aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorsabevzenko <sabevzenko@yandex-team.com>2022-09-30 11:15:45 +0300
committersabevzenko <sabevzenko@yandex-team.com>2022-09-30 11:15:45 +0300
commitb21d940555448792b2a12ecaf9d061ae469fde84 (patch)
tree44565808faa26ad568400e52f023ece72fcb2871 /contrib
parent4505d6b683cd7ebb1f6dacfb2fd3f7c5c22934e7 (diff)
downloadydb-b21d940555448792b2a12ecaf9d061ae469fde84.tar.gz
switch GOSTD_VERSION=1.19.1
switch GOSTD_VERSION=1.19.1
Diffstat (limited to 'contrib')
-rw-r--r--contrib/go/_std_1.18/src/bufio/bufio.go813
-rw-r--r--contrib/go/_std_1.18/src/bufio/scan.go420
-rw-r--r--contrib/go/_std_1.18/src/bytes/buffer.go461
-rw-r--r--contrib/go/_std_1.18/src/bytes/bytes.go1296
-rw-r--r--contrib/go/_std_1.18/src/bytes/reader.go160
-rw-r--r--contrib/go/_std_1.18/src/compress/flate/deflate.go748
-rw-r--r--contrib/go/_std_1.18/src/compress/flate/dict_decoder.go182
-rw-r--r--contrib/go/_std_1.18/src/compress/flate/huffman_bit_writer.go704
-rw-r--r--contrib/go/_std_1.18/src/compress/flate/huffman_code.go349
-rw-r--r--contrib/go/_std_1.18/src/compress/gzip/gunzip.go290
-rw-r--r--contrib/go/_std_1.18/src/container/list/list.go235
-rw-r--r--contrib/go/_std_1.18/src/context/context.go593
-rw-r--r--contrib/go/_std_1.18/src/crypto/aes/aes_gcm.go193
-rw-r--r--contrib/go/_std_1.18/src/crypto/aes/cipher.go78
-rw-r--r--contrib/go/_std_1.18/src/crypto/aes/cipher_asm.go99
-rw-r--r--contrib/go/_std_1.18/src/crypto/cipher/cbc.go163
-rw-r--r--contrib/go/_std_1.18/src/crypto/cipher/gcm.go426
-rw-r--r--contrib/go/_std_1.18/src/crypto/crypto.go223
-rw-r--r--contrib/go/_std_1.18/src/crypto/des/block.go263
-rw-r--r--contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa.go368
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/ed25519.go215
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/doc.go22
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/edwards25519.go427
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe.go416
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.go13
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go264
-rw-r--r--contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalar.go1025
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/elliptic.go496
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_fiat64.go1429
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_fiat64.go3004
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_fiat64.go5509
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p224.go293
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p384.go298
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p521.go310
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/p224.go139
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/p256_asm.go544
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_amd64.s2347
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/p384.go144
-rw-r--r--contrib/go/_std_1.18/src/crypto/elliptic/p521.go165
-rw-r--r--contrib/go/_std_1.18/src/crypto/hmac/hmac.go172
-rw-r--r--contrib/go/_std_1.18/src/crypto/rand/eagain.go27
-rw-r--r--contrib/go/_std_1.18/src/crypto/rand/rand_batched.go32
-rw-r--r--contrib/go/_std_1.18/src/crypto/rand/rand_getentropy.go30
-rw-r--r--contrib/go/_std_1.18/src/crypto/rand/rand_linux.go14
-rw-r--r--contrib/go/_std_1.18/src/crypto/rand/rand_unix.go169
-rw-r--r--contrib/go/_std_1.18/src/crypto/rand/util.go143
-rw-r--r--contrib/go/_std_1.18/src/crypto/rsa/pkcs1v15.go323
-rw-r--r--contrib/go/_std_1.18/src/crypto/rsa/pss.go303
-rw-r--r--contrib/go/_std_1.18/src/crypto/rsa/rsa.go659
-rw-r--r--contrib/go/_std_1.18/src/crypto/sha1/sha1.go266
-rw-r--r--contrib/go/_std_1.18/src/crypto/sha256/sha256.go270
-rw-r--r--contrib/go/_std_1.18/src/crypto/sha256/sha256block_decl.go11
-rw-r--r--contrib/go/_std_1.18/src/crypto/sha512/sha512.go367
-rw-r--r--contrib/go/_std_1.18/src/crypto/subtle/constant_time.go61
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/auth.go289
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/cipher_suites.go689
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/common.go1480
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/conn.go1543
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/handshake_client.go1011
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/handshake_client_tls13.go682
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/handshake_messages.go1808
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/handshake_server.go875
-rw-r--r--contrib/go/_std_1.18/src/crypto/tls/handshake_server_tls13.go872
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/cert_pool.go251
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.go207
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.go234
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/parser.go1013
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/pkix/pkix.go316
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/root.go31
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/root_darwin.go113
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/sec1.go120
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/verify.go1114
-rw-r--r--contrib/go/_std_1.18/src/crypto/x509/x509.go2224
-rw-r--r--contrib/go/_std_1.18/src/embed/embed.go432
-rw-r--r--contrib/go/_std_1.18/src/encoding/asn1/asn1.go1122
-rw-r--r--contrib/go/_std_1.18/src/encoding/binary/binary.go737
-rw-r--r--contrib/go/_std_1.18/src/encoding/binary/varint.go139
-rw-r--r--contrib/go/_std_1.18/src/encoding/csv/reader.go450
-rw-r--r--contrib/go/_std_1.18/src/encoding/hex/hex.go326
-rw-r--r--contrib/go/_std_1.18/src/encoding/pem/pem.go314
-rw-r--r--contrib/go/_std_1.18/src/flag/flag.go1079
-rw-r--r--contrib/go/_std_1.18/src/fmt/doc.go343
-rw-r--r--contrib/go/_std_1.18/src/fmt/print.go1172
-rw-r--r--contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.go222
-rw-r--r--contrib/go/_std_1.18/src/internal/bytealg/compare_native.go19
-rw-r--r--contrib/go/_std_1.18/src/internal/bytealg/indexbyte_native.go13
-rw-r--r--contrib/go/_std_1.18/src/internal/cpu/cpu.go220
-rw-r--r--contrib/go/_std_1.18/src/internal/cpu/cpu_x86.go173
-rw-r--r--contrib/go/_std_1.18/src/internal/cpu/cpu_x86.s26
-rw-r--r--contrib/go/_std_1.18/src/internal/fmtsort/sort.go220
-rw-r--r--contrib/go/_std_1.18/src/internal/goarch/goarch.go58
-rw-r--r--contrib/go/_std_1.18/src/internal/goexperiment/exp_pacerredesign_on.go9
-rw-r--r--contrib/go/_std_1.18/src/internal/goexperiment/exp_regabireflect_on.go9
-rw-r--r--contrib/go/_std_1.18/src/internal/goexperiment/flags.go99
-rw-r--r--contrib/go/_std_1.18/src/internal/goos/goos.go12
-rw-r--r--contrib/go/_std_1.18/src/internal/intern/intern.go178
-rw-r--r--contrib/go/_std_1.18/src/internal/nettrace/nettrace.go45
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/errno_unix.go33
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fcntl_libc.go13
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fd.go82
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fd_opendir_darwin.go38
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fd_poll_runtime.go168
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fd_posix.go79
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fd_unix.go799
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/fd_writev_darwin.go16
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/hook_cloexec.go12
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/hook_unix.go15
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/sock_cloexec.go50
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/sockopt.go36
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/sockopt_unix.go18
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/sockoptip.go27
-rw-r--r--contrib/go/_std_1.18/src/internal/poll/sys_cloexec.go36
-rw-r--r--contrib/go/_std_1.18/src/internal/reflectlite/type.go972
-rw-r--r--contrib/go/_std_1.18/src/internal/reflectlite/value.go476
-rw-r--r--contrib/go/_std_1.18/src/internal/syscall/unix/at.go58
-rw-r--r--contrib/go/_std_1.18/src/internal/syscall/unix/net.go44
-rw-r--r--contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking_libc.go24
-rw-r--r--contrib/go/_std_1.18/src/io/fs/fs.go257
-rw-r--r--contrib/go/_std_1.18/src/io/fs/walk.go127
-rw-r--r--contrib/go/_std_1.18/src/io/io.go654
-rw-r--r--contrib/go/_std_1.18/src/io/ioutil/ioutil.go84
-rw-r--r--contrib/go/_std_1.18/src/io/ioutil/tempfile.go41
-rw-r--r--contrib/go/_std_1.18/src/io/multi.go112
-rw-r--r--contrib/go/_std_1.18/src/log/log.go411
-rw-r--r--contrib/go/_std_1.18/src/math/abs.go14
-rw-r--r--contrib/go/_std_1.18/src/math/acosh.go64
-rw-r--r--contrib/go/_std_1.18/src/math/asin.go65
-rw-r--r--contrib/go/_std_1.18/src/math/asinh.go76
-rw-r--r--contrib/go/_std_1.18/src/math/atan.go110
-rw-r--r--contrib/go/_std_1.18/src/math/atan2.go76
-rw-r--r--contrib/go/_std_1.18/src/math/atanh.go84
-rw-r--r--contrib/go/_std_1.18/src/math/big/arith.go277
-rw-r--r--contrib/go/_std_1.18/src/math/big/arith_amd64.s526
-rw-r--r--contrib/go/_std_1.18/src/math/big/arith_decl.go19
-rw-r--r--contrib/go/_std_1.18/src/math/big/float.go1732
-rw-r--r--contrib/go/_std_1.18/src/math/big/floatconv.go304
-rw-r--r--contrib/go/_std_1.18/src/math/big/floatmarsh.go120
-rw-r--r--contrib/go/_std_1.18/src/math/big/int.go1218
-rw-r--r--contrib/go/_std_1.18/src/math/big/intconv.go257
-rw-r--r--contrib/go/_std_1.18/src/math/big/intmarsh.go80
-rw-r--r--contrib/go/_std_1.18/src/math/big/nat.go1244
-rw-r--r--contrib/go/_std_1.18/src/math/big/natconv.go512
-rw-r--r--contrib/go/_std_1.18/src/math/big/rat.go544
-rw-r--r--contrib/go/_std_1.18/src/math/big/ratconv.go380
-rw-r--r--contrib/go/_std_1.18/src/math/big/ratmarsh.go75
-rw-r--r--contrib/go/_std_1.18/src/math/big/sqrt.go128
-rw-r--r--contrib/go/_std_1.18/src/math/bits.go62
-rw-r--r--contrib/go/_std_1.18/src/math/cbrt.go84
-rw-r--r--contrib/go/_std_1.18/src/math/copysign.go12
-rw-r--r--contrib/go/_std_1.18/src/math/dim.go91
-rw-r--r--contrib/go/_std_1.18/src/math/erf.go349
-rw-r--r--contrib/go/_std_1.18/src/math/erfinv.go127
-rw-r--r--contrib/go/_std_1.18/src/math/exp.go201
-rw-r--r--contrib/go/_std_1.18/src/math/expm1.go242
-rw-r--r--contrib/go/_std_1.18/src/math/floor.go146
-rw-r--r--contrib/go/_std_1.18/src/math/frexp.go38
-rw-r--r--contrib/go/_std_1.18/src/math/gamma.go221
-rw-r--r--contrib/go/_std_1.18/src/math/hypot.go43
-rw-r--r--contrib/go/_std_1.18/src/math/j0.go427
-rw-r--r--contrib/go/_std_1.18/src/math/j1.go422
-rw-r--r--contrib/go/_std_1.18/src/math/jn.go304
-rw-r--r--contrib/go/_std_1.18/src/math/ldexp.go50
-rw-r--r--contrib/go/_std_1.18/src/math/lgamma.go365
-rw-r--r--contrib/go/_std_1.18/src/math/log.go128
-rw-r--r--contrib/go/_std_1.18/src/math/log1p.go202
-rw-r--r--contrib/go/_std_1.18/src/math/logb.go50
-rw-r--r--contrib/go/_std_1.18/src/math/mod.go51
-rw-r--r--contrib/go/_std_1.18/src/math/modf.go42
-rw-r--r--contrib/go/_std_1.18/src/math/nextafter.go49
-rw-r--r--contrib/go/_std_1.18/src/math/pow.go156
-rw-r--r--contrib/go/_std_1.18/src/math/pow10.go46
-rw-r--r--contrib/go/_std_1.18/src/math/rand/exp.go222
-rw-r--r--contrib/go/_std_1.18/src/math/rand/normal.go157
-rw-r--r--contrib/go/_std_1.18/src/math/rand/rand.go421
-rw-r--r--contrib/go/_std_1.18/src/math/remainder.go94
-rw-r--r--contrib/go/_std_1.18/src/math/sin.go242
-rw-r--r--contrib/go/_std_1.18/src/math/sincos.go72
-rw-r--r--contrib/go/_std_1.18/src/math/sinh.go91
-rw-r--r--contrib/go/_std_1.18/src/math/sqrt.go149
-rw-r--r--contrib/go/_std_1.18/src/math/tan.go139
-rw-r--r--contrib/go/_std_1.18/src/math/tanh.go104
-rw-r--r--contrib/go/_std_1.18/src/math/trig_reduce.go100
-rw-r--r--contrib/go/_std_1.18/src/mime/multipart/multipart.go429
-rw-r--r--contrib/go/_std_1.18/src/mime/type.go202
-rw-r--r--contrib/go/_std_1.18/src/mime/type_unix.go114
-rw-r--r--contrib/go/_std_1.18/src/net/addrselect.go390
-rw-r--r--contrib/go/_std_1.18/src/net/cgo_unix.go348
-rw-r--r--contrib/go/_std_1.18/src/net/conf.go320
-rw-r--r--contrib/go/_std_1.18/src/net/dial.go743
-rw-r--r--contrib/go/_std_1.18/src/net/dnsclient.go231
-rw-r--r--contrib/go/_std_1.18/src/net/dnsclient_unix.go800
-rw-r--r--contrib/go/_std_1.18/src/net/dnsconfig_unix.go191
-rw-r--r--contrib/go/_std_1.18/src/net/error_posix.go21
-rw-r--r--contrib/go/_std_1.18/src/net/error_unix.go16
-rw-r--r--contrib/go/_std_1.18/src/net/fd_posix.go147
-rw-r--r--contrib/go/_std_1.18/src/net/fd_unix.go203
-rw-r--r--contrib/go/_std_1.18/src/net/file_unix.go119
-rw-r--r--contrib/go/_std_1.18/src/net/hook_unix.go20
-rw-r--r--contrib/go/_std_1.18/src/net/http/client.go1033
-rw-r--r--contrib/go/_std_1.18/src/net/http/cookie.go464
-rw-r--r--contrib/go/_std_1.18/src/net/http/doc.go107
-rw-r--r--contrib/go/_std_1.18/src/net/http/filetransport.go123
-rw-r--r--contrib/go/_std_1.18/src/net/http/fs.go972
-rw-r--r--contrib/go/_std_1.18/src/net/http/h2_bundle.go10858
-rw-r--r--contrib/go/_std_1.18/src/net/http/header.go279
-rw-r--r--contrib/go/_std_1.18/src/net/http/internal/chunked.go261
-rw-r--r--contrib/go/_std_1.18/src/net/http/request.go1463
-rw-r--r--contrib/go/_std_1.18/src/net/http/response.go369
-rw-r--r--contrib/go/_std_1.18/src/net/http/server.go3622
-rw-r--r--contrib/go/_std_1.18/src/net/http/sniff.go309
-rw-r--r--contrib/go/_std_1.18/src/net/http/status.go152
-rw-r--r--contrib/go/_std_1.18/src/net/http/transfer.go1114
-rw-r--r--contrib/go/_std_1.18/src/net/http/transport.go2906
-rw-r--r--contrib/go/_std_1.18/src/net/iprawsock_posix.go147
-rw-r--r--contrib/go/_std_1.18/src/net/ipsock_posix.go228
-rw-r--r--contrib/go/_std_1.18/src/net/lookup.go667
-rw-r--r--contrib/go/_std_1.18/src/net/lookup_unix.go353
-rw-r--r--contrib/go/_std_1.18/src/net/mac.go85
-rw-r--r--contrib/go/_std_1.18/src/net/net.go758
-rw-r--r--contrib/go/_std_1.18/src/net/netip/netip.go1498
-rw-r--r--contrib/go/_std_1.18/src/net/nss.go160
-rw-r--r--contrib/go/_std_1.18/src/net/port_unix.go57
-rw-r--r--contrib/go/_std_1.18/src/net/sock_cloexec.go50
-rw-r--r--contrib/go/_std_1.18/src/net/sock_linux.go86
-rw-r--r--contrib/go/_std_1.18/src/net/sock_posix.go254
-rw-r--r--contrib/go/_std_1.18/src/net/sockaddr_posix.go34
-rw-r--r--contrib/go/_std_1.18/src/net/sockopt_posix.go134
-rw-r--r--contrib/go/_std_1.18/src/net/sockoptip_posix.go49
-rw-r--r--contrib/go/_std_1.18/src/net/sys_cloexec.go36
-rw-r--r--contrib/go/_std_1.18/src/net/tcpsock_posix.go173
-rw-r--r--contrib/go/_std_1.18/src/net/tcpsockopt_posix.go18
-rw-r--r--contrib/go/_std_1.18/src/net/textproto/reader.go790
-rw-r--r--contrib/go/_std_1.18/src/net/textproto/textproto.go154
-rw-r--r--contrib/go/_std_1.18/src/net/udpsock.go364
-rw-r--r--contrib/go/_std_1.18/src/net/udpsock_posix.go269
-rw-r--r--contrib/go/_std_1.18/src/net/unixsock_posix.go227
-rw-r--r--contrib/go/_std_1.18/src/net/url/url.go1218
-rw-r--r--contrib/go/_std_1.18/src/os/endian_little.go9
-rw-r--r--contrib/go/_std_1.18/src/os/error_posix.go18
-rw-r--r--contrib/go/_std_1.18/src/os/exec_posix.go136
-rw-r--r--contrib/go/_std_1.18/src/os/exec_unix.go106
-rw-r--r--contrib/go/_std_1.18/src/os/file.go723
-rw-r--r--contrib/go/_std_1.18/src/os/file_posix.go250
-rw-r--r--contrib/go/_std_1.18/src/os/file_unix.go430
-rw-r--r--contrib/go/_std_1.18/src/os/path_unix.go75
-rw-r--r--contrib/go/_std_1.18/src/os/pipe_bsd.go28
-rw-r--r--contrib/go/_std_1.18/src/os/pipe_linux.go20
-rw-r--r--contrib/go/_std_1.18/src/os/removeall_at.go192
-rw-r--r--contrib/go/_std_1.18/src/os/stat_darwin.go51
-rw-r--r--contrib/go/_std_1.18/src/os/stat_linux.go51
-rw-r--r--contrib/go/_std_1.18/src/os/stat_unix.go52
-rw-r--r--contrib/go/_std_1.18/src/os/sys_unix.go14
-rw-r--r--contrib/go/_std_1.18/src/path/filepath/match.go370
-rw-r--r--contrib/go/_std_1.18/src/path/filepath/path.go612
-rw-r--r--contrib/go/_std_1.18/src/path/filepath/path_unix.go53
-rw-r--r--contrib/go/_std_1.18/src/path/filepath/symlink_unix.go7
-rw-r--r--contrib/go/_std_1.18/src/path/match.go231
-rw-r--r--contrib/go/_std_1.18/src/path/path.go233
-rw-r--r--contrib/go/_std_1.18/src/reflect/abi.go511
-rw-r--r--contrib/go/_std_1.18/src/reflect/deepequal.go238
-rw-r--r--contrib/go/_std_1.18/src/reflect/float32reg_generic.go23
-rw-r--r--contrib/go/_std_1.18/src/reflect/makefunc.go176
-rw-r--r--contrib/go/_std_1.18/src/reflect/type.go3173
-rw-r--r--contrib/go/_std_1.18/src/reflect/value.go3532
-rw-r--r--contrib/go/_std_1.18/src/runtime/alg.go353
-rw-r--r--contrib/go/_std_1.18/src/runtime/asan0.go22
-rw-r--r--contrib/go/_std_1.18/src/runtime/asm_amd64.s2036
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgo/callbacks.go106
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgo/cgo.go34
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgo/gcc_linux_amd64.c94
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgo/setenv.go21
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgo_mmap.go67
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgo_sigaction.go92
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgocall.go639
-rw-r--r--contrib/go/_std_1.18/src/runtime/cgocheck.go263
-rw-r--r--contrib/go/_std_1.18/src/runtime/chan.go846
-rw-r--r--contrib/go/_std_1.18/src/runtime/compiler.go13
-rw-r--r--contrib/go/_std_1.18/src/runtime/cpuprof.go218
-rw-r--r--contrib/go/_std_1.18/src/runtime/cputicks.go11
-rw-r--r--contrib/go/_std_1.18/src/runtime/debug.go116
-rw-r--r--contrib/go/_std_1.18/src/runtime/debuglog.go820
-rw-r--r--contrib/go/_std_1.18/src/runtime/defs_linux_amd64.go302
-rw-r--r--contrib/go/_std_1.18/src/runtime/env_posix.go76
-rw-r--r--contrib/go/_std_1.18/src/runtime/error.go329
-rw-r--r--contrib/go/_std_1.18/src/runtime/extern.go275
-rw-r--r--contrib/go/_std_1.18/src/runtime/float.go53
-rw-r--r--contrib/go/_std_1.18/src/runtime/hash64.go92
-rw-r--r--contrib/go/_std_1.18/src/runtime/heapdump.go757
-rw-r--r--contrib/go/_std_1.18/src/runtime/histogram.go170
-rw-r--r--contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.go116
-rw-r--r--contrib/go/_std_1.18/src/runtime/internal/atomic/types.go395
-rw-r--r--contrib/go/_std_1.18/src/runtime/internal/syscall/asm_linux_amd64.s33
-rw-r--r--contrib/go/_std_1.18/src/runtime/internal/syscall/syscall_linux.go12
-rw-r--r--contrib/go/_std_1.18/src/runtime/lfstack_64bit.go58
-rw-r--r--contrib/go/_std_1.18/src/runtime/lock_futex.go245
-rw-r--r--contrib/go/_std_1.18/src/runtime/lock_sema.go304
-rw-r--r--contrib/go/_std_1.18/src/runtime/lockrank.go251
-rw-r--r--contrib/go/_std_1.18/src/runtime/lockrank_off.go64
-rw-r--r--contrib/go/_std_1.18/src/runtime/malloc.go1564
-rw-r--r--contrib/go/_std_1.18/src/runtime/map.go1416
-rw-r--r--contrib/go/_std_1.18/src/runtime/map_fast32.go462
-rw-r--r--contrib/go/_std_1.18/src/runtime/map_fast64.go470
-rw-r--r--contrib/go/_std_1.18/src/runtime/map_faststr.go485
-rw-r--r--contrib/go/_std_1.18/src/runtime/mbarrier.go343
-rw-r--r--contrib/go/_std_1.18/src/runtime/mbitmap.go2043
-rw-r--r--contrib/go/_std_1.18/src/runtime/mcache.go298
-rw-r--r--contrib/go/_std_1.18/src/runtime/mem_darwin.go72
-rw-r--r--contrib/go/_std_1.18/src/runtime/mem_linux.go195
-rw-r--r--contrib/go/_std_1.18/src/runtime/memclr_amd64.s179
-rw-r--r--contrib/go/_std_1.18/src/runtime/memmove_amd64.s532
-rw-r--r--contrib/go/_std_1.18/src/runtime/metrics.go594
-rw-r--r--contrib/go/_std_1.18/src/runtime/mfinal.go474
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgc.go1714
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgcmark.go1591
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgcpacer.go1348
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgcscavenge.go1008
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgcstack.go352
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgcsweep.go878
-rw-r--r--contrib/go/_std_1.18/src/runtime/mgcwork.go483
-rw-r--r--contrib/go/_std_1.18/src/runtime/mheap.go2119
-rw-r--r--contrib/go/_std_1.18/src/runtime/mpagealloc.go1026
-rw-r--r--contrib/go/_std_1.18/src/runtime/mpagealloc_64bit.go178
-rw-r--r--contrib/go/_std_1.18/src/runtime/mpagecache.go177
-rw-r--r--contrib/go/_std_1.18/src/runtime/mprof.go937
-rw-r--r--contrib/go/_std_1.18/src/runtime/mranges.go372
-rw-r--r--contrib/go/_std_1.18/src/runtime/mspanset.go354
-rw-r--r--contrib/go/_std_1.18/src/runtime/mstats.go928
-rw-r--r--contrib/go/_std_1.18/src/runtime/mwbbuf.go290
-rw-r--r--contrib/go/_std_1.18/src/runtime/nbpipe_pipe2.go22
-rw-r--r--contrib/go/_std_1.18/src/runtime/netpoll.go652
-rw-r--r--contrib/go/_std_1.18/src/runtime/os_darwin.go470
-rw-r--r--contrib/go/_std_1.18/src/runtime/os_linux.go878
-rw-r--r--contrib/go/_std_1.18/src/runtime/os_linux_noauxv.go10
-rw-r--r--contrib/go/_std_1.18/src/runtime/panic.go1292
-rw-r--r--contrib/go/_std_1.18/src/runtime/proc.go6244
-rw-r--r--contrib/go/_std_1.18/src/runtime/profbuf.go561
-rw-r--r--contrib/go/_std_1.18/src/runtime/runtime.go65
-rw-r--r--contrib/go/_std_1.18/src/runtime/runtime1.go544
-rw-r--r--contrib/go/_std_1.18/src/runtime/runtime2.go1134
-rw-r--r--contrib/go/_std_1.18/src/runtime/sema.go617
-rw-r--r--contrib/go/_std_1.18/src/runtime/signal_darwin_amd64.go92
-rw-r--r--contrib/go/_std_1.18/src/runtime/signal_unix.go1320
-rw-r--r--contrib/go/_std_1.18/src/runtime/sigqueue.go268
-rw-r--r--contrib/go/_std_1.18/src/runtime/slice.go332
-rw-r--r--contrib/go/_std_1.18/src/runtime/stack.go1434
-rw-r--r--contrib/go/_std_1.18/src/runtime/string.go495
-rw-r--r--contrib/go/_std_1.18/src/runtime/stubs.go437
-rw-r--r--contrib/go/_std_1.18/src/runtime/stubs2.go40
-rw-r--r--contrib/go/_std_1.18/src/runtime/stubs_linux.go19
-rw-r--r--contrib/go/_std_1.18/src/runtime/symtab.go1180
-rw-r--r--contrib/go/_std_1.18/src/runtime/sys_darwin.go536
-rw-r--r--contrib/go/_std_1.18/src/runtime/sys_darwin_amd64.s859
-rw-r--r--contrib/go/_std_1.18/src/runtime/sys_libc.go53
-rw-r--r--contrib/go/_std_1.18/src/runtime/sys_linux_amd64.s765
-rw-r--r--contrib/go/_std_1.18/src/runtime/time.go1128
-rw-r--r--contrib/go/_std_1.18/src/runtime/trace.go1260
-rw-r--r--contrib/go/_std_1.18/src/runtime/traceback.go1436
-rw-r--r--contrib/go/_std_1.18/src/runtime/type.go708
-rw-r--r--contrib/go/_std_1.18/src/runtime/vdso_elf64.go79
-rw-r--r--contrib/go/_std_1.18/src/runtime/vdso_in_none.go13
-rw-r--r--contrib/go/_std_1.18/src/runtime/vdso_linux.go292
-rw-r--r--contrib/go/_std_1.18/src/sort/search.go112
-rw-r--r--contrib/go/_std_1.18/src/sort/slice.go46
-rw-r--r--contrib/go/_std_1.18/src/sort/sort.go579
-rw-r--r--contrib/go/_std_1.18/src/sort/zfuncversion.go265
-rw-r--r--contrib/go/_std_1.18/src/strconv/atof.go704
-rw-r--r--contrib/go/_std_1.18/src/strconv/atoi.go314
-rw-r--r--contrib/go/_std_1.18/src/strconv/doc.go57
-rw-r--r--contrib/go/_std_1.18/src/strconv/eisel_lemire.go884
-rw-r--r--contrib/go/_std_1.18/src/strconv/ftoa.go582
-rw-r--r--contrib/go/_std_1.18/src/strconv/ftoaryu.go567
-rw-r--r--contrib/go/_std_1.18/src/strconv/itoa.go206
-rw-r--r--contrib/go/_std_1.18/src/strconv/quote.go598
-rw-r--r--contrib/go/_std_1.18/src/strings/builder.go125
-rw-r--r--contrib/go/_std_1.18/src/strings/replace.go569
-rw-r--r--contrib/go/_std_1.18/src/strings/strings.go1186
-rw-r--r--contrib/go/_std_1.18/src/sync/atomic/doc.go144
-rw-r--r--contrib/go/_std_1.18/src/sync/cond.go98
-rw-r--r--contrib/go/_std_1.18/src/sync/map.go386
-rw-r--r--contrib/go/_std_1.18/src/sync/mutex.go250
-rw-r--r--contrib/go/_std_1.18/src/sync/once.go70
-rw-r--r--contrib/go/_std_1.18/src/sync/pool.go294
-rw-r--r--contrib/go/_std_1.18/src/sync/rwmutex.go223
-rw-r--r--contrib/go/_std_1.18/src/sync/waitgroup.go147
-rw-r--r--contrib/go/_std_1.18/src/syscall/asm_darwin_amd64.s134
-rw-r--r--contrib/go/_std_1.18/src/syscall/asm_linux_amd64.s164
-rw-r--r--contrib/go/_std_1.18/src/syscall/dirent.go102
-rw-r--r--contrib/go/_std_1.18/src/syscall/endian_little.go9
-rw-r--r--contrib/go/_std_1.18/src/syscall/env_unix.go155
-rw-r--r--contrib/go/_std_1.18/src/syscall/exec_libc2.go265
-rw-r--r--contrib/go/_std_1.18/src/syscall/exec_linux.go621
-rw-r--r--contrib/go/_std_1.18/src/syscall/exec_unix.go306
-rw-r--r--contrib/go/_std_1.18/src/syscall/forkpipe.go21
-rw-r--r--contrib/go/_std_1.18/src/syscall/sockcmsg_unix.go92
-rw-r--r--contrib/go/_std_1.18/src/syscall/syscall.go103
-rw-r--r--contrib/go/_std_1.18/src/syscall/syscall_darwin.go332
-rw-r--r--contrib/go/_std_1.18/src/syscall/syscall_linux.go1150
-rw-r--r--contrib/go/_std_1.18/src/syscall/syscall_linux_amd64.go123
-rw-r--r--contrib/go/_std_1.18/src/syscall/syscall_unix.go487
-rw-r--r--contrib/go/_std_1.18/src/syscall/timestruct.go36
-rw-r--r--contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.go2004
-rw-r--r--contrib/go/_std_1.18/src/syscall/zsyscall_linux_amd64.go1655
-rw-r--r--contrib/go/_std_1.18/src/text/template/doc.go465
-rw-r--r--contrib/go/_std_1.18/src/text/template/funcs.go753
-rw-r--r--contrib/go/_std_1.18/src/text/template/helper.go177
-rw-r--r--contrib/go/_std_1.18/src/text/template/option.go72
-rw-r--r--contrib/go/_std_1.18/src/text/template/parse/lex.go682
-rw-r--r--contrib/go/_std_1.18/src/text/template/parse/parse.go795
-rw-r--r--contrib/go/_std_1.18/src/time/format.go1608
-rw-r--r--contrib/go/_std_1.18/src/time/sleep.go177
-rw-r--r--contrib/go/_std_1.18/src/time/sys_unix.go54
-rw-r--r--contrib/go/_std_1.18/src/time/tick.go73
-rw-r--r--contrib/go/_std_1.18/src/time/time.go1581
-rw-r--r--contrib/go/_std_1.18/src/time/zoneinfo.go687
-rw-r--r--contrib/go/_std_1.18/src/time/zoneinfo_read.go586
-rw-r--r--contrib/go/_std_1.18/src/time/zoneinfo_unix.go69
-rw-r--r--contrib/go/_std_1.18/src/unicode/graphic.go144
-rw-r--r--contrib/go/_std_1.18/src/unicode/letter.go369
-rw-r--r--contrib/go/_std_1.18/src/unicode/utf8/utf8.go578
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/builder.go337
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go13
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go310
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/net/dns/dnsmessage/message.go2664
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/httplex.go348
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpproxy/proxy.go368
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trieval.go119
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/byteorder.go65
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu.go287
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/core.go1071
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/forminfo.go278
-rw-r--r--contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/normalize.go609
-rw-r--r--contrib/go/_std_1.19/src/bufio/bufio.go829
-rw-r--r--contrib/go/_std_1.19/src/bufio/scan.go419
-rw-r--r--contrib/go/_std_1.19/src/bytes/buffer.go474
-rw-r--r--contrib/go/_std_1.19/src/bytes/bytes.go1301
-rw-r--r--contrib/go/_std_1.19/src/bytes/reader.go159
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/deflate.go746
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/deflatefast.go (renamed from contrib/go/_std_1.18/src/compress/flate/deflatefast.go)0
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/dict_decoder.go182
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/huffman_bit_writer.go704
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/huffman_code.go348
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/inflate.go (renamed from contrib/go/_std_1.18/src/compress/flate/inflate.go)0
-rw-r--r--contrib/go/_std_1.19/src/compress/flate/token.go (renamed from contrib/go/_std_1.18/src/compress/flate/token.go)0
-rw-r--r--contrib/go/_std_1.19/src/compress/gzip/gunzip.go290
-rw-r--r--contrib/go/_std_1.19/src/compress/gzip/gzip.go (renamed from contrib/go/_std_1.18/src/compress/gzip/gzip.go)0
-rw-r--r--contrib/go/_std_1.19/src/container/list/list.go235
-rw-r--r--contrib/go/_std_1.19/src/context/context.go593
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/aes_gcm.go186
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/asm_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/aes/asm_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/block.go (renamed from contrib/go/_std_1.18/src/crypto/aes/block.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/cipher.go82
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/cipher_asm.go113
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/const.go (renamed from contrib/go/_std_1.18/src/crypto/aes/const.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/gcm_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/aes/gcm_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/aes/modes.go (renamed from contrib/go/_std_1.18/src/crypto/aes/modes.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/cbc.go185
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/cfb.go (renamed from contrib/go/_std_1.18/src/crypto/cipher/cfb.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/cipher.go (renamed from contrib/go/_std_1.18/src/crypto/cipher/cipher.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/ctr.go (renamed from contrib/go/_std_1.18/src/crypto/cipher/ctr.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/gcm.go427
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/io.go (renamed from contrib/go/_std_1.18/src/crypto/cipher/io.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/ofb.go (renamed from contrib/go/_std_1.18/src/crypto/cipher/ofb.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/xor_amd64.go (renamed from contrib/go/_std_1.18/src/crypto/cipher/xor_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/cipher/xor_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/cipher/xor_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/crypto.go223
-rw-r--r--contrib/go/_std_1.19/src/crypto/des/block.go259
-rw-r--r--contrib/go/_std_1.19/src/crypto/des/cipher.go (renamed from contrib/go/_std_1.18/src/crypto/des/cipher.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/des/const.go (renamed from contrib/go/_std_1.18/src/crypto/des/const.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/dsa/dsa.go (renamed from contrib/go/_std_1.18/src/crypto/dsa/dsa.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa.go427
-rw-r--r--contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa_noasm.go (renamed from contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa_noasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/ecdsa/notboring.go16
-rw-r--r--contrib/go/_std_1.19/src/crypto/ed25519/ed25519.go230
-rw-r--r--contrib/go/_std_1.19/src/crypto/elliptic/elliptic.go242
-rw-r--r--contrib/go/_std_1.19/src/crypto/elliptic/nistec.go295
-rw-r--r--contrib/go/_std_1.19/src/crypto/elliptic/nistec_p256.go29
-rw-r--r--contrib/go/_std_1.19/src/crypto/elliptic/params.go300
-rw-r--r--contrib/go/_std_1.19/src/crypto/hmac/hmac.go180
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/boring/bbig/big.go33
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/boring/doc.go19
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/boring/notboring.go113
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig.go17
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig_amd64.s54
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/doc.go22
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/edwards25519.go426
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe.go420
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.go15
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go (renamed from contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_arm64_noasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_generic.go266
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalar.go1034
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalarmult.go (renamed from contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalarmult.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/edwards25519/tables.go (renamed from contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/tables.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224.go (renamed from contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_fiat64.go1461
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_invert.go (renamed from contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_invert.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256.go135
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_fiat64.go1400
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_invert.go84
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384.go (renamed from contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_fiat64.go3036
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_invert.go (renamed from contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_invert.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521.go (renamed from contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_fiat64.go5541
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_invert.go (renamed from contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_invert.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/nistec.go15
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p224.go428
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p224_sqrt.go132
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm.go704
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_amd64.s2350
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_ordinv.go101
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_table.bin (renamed from contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_table.bin)bin88064 -> 88064 bytes
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p384.go515
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/nistec/p521.go444
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/randutil/randutil.go (renamed from contrib/go/_std_1.18/src/crypto/internal/randutil/randutil.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/internal/subtle/aliasing.go (renamed from contrib/go/_std_1.18/src/crypto/internal/subtle/aliasing.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/md5/md5.go (renamed from contrib/go/_std_1.18/src/crypto/md5/md5.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/md5/md5block.go (renamed from contrib/go/_std_1.18/src/crypto/md5/md5block.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/md5/md5block_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/md5/md5block_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/md5/md5block_decl.go (renamed from contrib/go/_std_1.18/src/crypto/md5/md5block_decl.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/rand/rand.go (renamed from contrib/go/_std_1.18/src/crypto/rand/rand.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/rand/rand_getentropy.go14
-rw-r--r--contrib/go/_std_1.19/src/crypto/rand/rand_getrandom.go48
-rw-r--r--contrib/go/_std_1.19/src/crypto/rand/rand_unix.go87
-rw-r--r--contrib/go/_std_1.19/src/crypto/rand/util.go99
-rw-r--r--contrib/go/_std_1.19/src/crypto/rc4/rc4.go (renamed from contrib/go/_std_1.18/src/crypto/rc4/rc4.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/rsa/notboring.go16
-rw-r--r--contrib/go/_std_1.19/src/crypto/rsa/pkcs1v15.go387
-rw-r--r--contrib/go/_std_1.19/src/crypto/rsa/pss.go338
-rw-r--r--contrib/go/_std_1.19/src/crypto/rsa/rsa.go727
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha1/boring.go25
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha1/sha1.go274
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha1/sha1block.go (renamed from contrib/go/_std_1.18/src/crypto/sha1/sha1block.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha1/sha1block_amd64.go (renamed from contrib/go/_std_1.18/src/crypto/sha1/sha1block_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha1/sha1block_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/sha1/sha1block_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha256/sha256.go285
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha256/sha256block.go (renamed from contrib/go/_std_1.18/src/crypto/sha256/sha256block.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha256/sha256block_amd64.go (renamed from contrib/go/_std_1.18/src/crypto/sha256/sha256block_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha256/sha256block_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/sha256/sha256block_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha256/sha256block_decl.go11
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha512/sha512.go386
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha512/sha512block.go (renamed from contrib/go/_std_1.18/src/crypto/sha512/sha512block.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha512/sha512block_amd64.go (renamed from contrib/go/_std_1.18/src/crypto/sha512/sha512block_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/sha512/sha512block_amd64.s (renamed from contrib/go/_std_1.18/src/crypto/sha512/sha512block_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/subtle/constant_time.go62
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/alert.go (renamed from contrib/go/_std_1.18/src/crypto/tls/alert.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/auth.go293
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/cipher_suites.go702
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/common.go1485
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/common_string.go (renamed from contrib/go/_std_1.18/src/crypto/tls/common_string.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/conn.go1545
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/handshake_client.go1017
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/handshake_client_tls13.go686
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/handshake_messages.go1820
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/handshake_server.go882
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/handshake_server_tls13.go876
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/key_agreement.go (renamed from contrib/go/_std_1.18/src/crypto/tls/key_agreement.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/key_schedule.go (renamed from contrib/go/_std_1.18/src/crypto/tls/key_schedule.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/notboring.go20
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/prf.go (renamed from contrib/go/_std_1.18/src/crypto/tls/prf.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/ticket.go (renamed from contrib/go/_std_1.18/src/crypto/tls/ticket.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/tls/tls.go (renamed from contrib/go/_std_1.18/src/crypto/tls/tls.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/cert_pool.go268
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.go214
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.s (renamed from contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.go240
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.s (renamed from contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.s)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/notboring.go9
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/parser.go1162
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/pem_decrypt.go (renamed from contrib/go/_std_1.18/src/crypto/x509/pem_decrypt.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/pkcs1.go (renamed from contrib/go/_std_1.18/src/crypto/x509/pkcs1.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/pkcs8.go (renamed from contrib/go/_std_1.18/src/crypto/x509/pkcs8.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/pkix/pkix.go320
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/root.go32
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/root_darwin.go113
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/root_linux.go (renamed from contrib/go/_std_1.18/src/crypto/x509/root_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/root_unix.go (renamed from contrib/go/_std_1.18/src/crypto/x509/root_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/sec1.go125
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/verify.go1170
-rw-r--r--contrib/go/_std_1.19/src/crypto/x509/x509.go2284
-rw-r--r--contrib/go/_std_1.19/src/embed/embed.go432
-rw-r--r--contrib/go/_std_1.19/src/encoding/asn1/asn1.go1122
-rw-r--r--contrib/go/_std_1.19/src/encoding/asn1/common.go (renamed from contrib/go/_std_1.18/src/encoding/asn1/common.go)0
-rw-r--r--contrib/go/_std_1.19/src/encoding/asn1/marshal.go (renamed from contrib/go/_std_1.18/src/encoding/asn1/marshal.go)0
-rw-r--r--contrib/go/_std_1.19/src/encoding/base64/base64.go (renamed from contrib/go/_std_1.18/src/encoding/base64/base64.go)0
-rw-r--r--contrib/go/_std_1.19/src/encoding/binary/binary.go804
-rw-r--r--contrib/go/_std_1.19/src/encoding/binary/varint.go157
-rw-r--r--contrib/go/_std_1.19/src/encoding/csv/reader.go462
-rw-r--r--contrib/go/_std_1.19/src/encoding/csv/writer.go (renamed from contrib/go/_std_1.18/src/encoding/csv/writer.go)0
-rw-r--r--contrib/go/_std_1.19/src/encoding/encoding.go48
-rw-r--r--contrib/go/_std_1.19/src/encoding/hex/hex.go335
-rw-r--r--contrib/go/_std_1.19/src/encoding/pem/pem.go316
-rw-r--r--contrib/go/_std_1.19/src/errors/errors.go (renamed from contrib/go/_std_1.18/src/errors/errors.go)0
-rw-r--r--contrib/go/_std_1.19/src/errors/wrap.go (renamed from contrib/go/_std_1.18/src/errors/wrap.go)0
-rw-r--r--contrib/go/_std_1.19/src/flag/flag.go1180
-rw-r--r--contrib/go/_std_1.19/src/fmt/doc.go383
-rw-r--r--contrib/go/_std_1.19/src/fmt/errors.go (renamed from contrib/go/_std_1.18/src/fmt/errors.go)0
-rw-r--r--contrib/go/_std_1.19/src/fmt/format.go (renamed from contrib/go/_std_1.18/src/fmt/format.go)0
-rw-r--r--contrib/go/_std_1.19/src/fmt/print.go1203
-rw-r--r--contrib/go/_std_1.19/src/fmt/scan.go (renamed from contrib/go/_std_1.18/src/fmt/scan.go)0
-rw-r--r--contrib/go/_std_1.19/src/hash/crc32/crc32.go (renamed from contrib/go/_std_1.18/src/hash/crc32/crc32.go)0
-rw-r--r--contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.go225
-rw-r--r--contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.s (renamed from contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/hash/crc32/crc32_generic.go (renamed from contrib/go/_std_1.18/src/hash/crc32/crc32_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/hash/hash.go (renamed from contrib/go/_std_1.18/src/hash/hash.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/abi/abi.go (renamed from contrib/go/_std_1.18/src/internal/abi/abi.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/abi/abi_amd64.go (renamed from contrib/go/_std_1.18/src/internal/abi/abi_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/abi/abi_test.s (renamed from contrib/go/_std_1.18/src/internal/abi/abi_test.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/bytealg.go (renamed from contrib/go/_std_1.18/src/internal/bytealg/bytealg.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/compare_amd64.s (renamed from contrib/go/_std_1.18/src/internal/bytealg/compare_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/compare_native.go19
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/count_amd64.s (renamed from contrib/go/_std_1.18/src/internal/bytealg/count_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/count_native.go (renamed from contrib/go/_std_1.18/src/internal/bytealg/count_native.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/equal_amd64.s (renamed from contrib/go/_std_1.18/src/internal/bytealg/equal_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/equal_generic.go (renamed from contrib/go/_std_1.18/src/internal/bytealg/equal_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/equal_native.go (renamed from contrib/go/_std_1.18/src/internal/bytealg/equal_native.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/index_amd64.go (renamed from contrib/go/_std_1.18/src/internal/bytealg/index_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/index_amd64.s (renamed from contrib/go/_std_1.18/src/internal/bytealg/index_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/index_native.go (renamed from contrib/go/_std_1.18/src/internal/bytealg/index_native.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/indexbyte_amd64.s (renamed from contrib/go/_std_1.18/src/internal/bytealg/indexbyte_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/bytealg/indexbyte_native.go13
-rw-r--r--contrib/go/_std_1.19/src/internal/cpu/cpu.go221
-rw-r--r--contrib/go/_std_1.19/src/internal/cpu/cpu.s (renamed from contrib/go/_std_1.18/src/internal/cpu/cpu.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/cpu/cpu_x86.go187
-rw-r--r--contrib/go/_std_1.19/src/internal/cpu/cpu_x86.s43
-rw-r--r--contrib/go/_std_1.19/src/internal/fmtsort/sort.go219
-rw-r--r--contrib/go/_std_1.19/src/internal/goarch/goarch.go60
-rw-r--r--contrib/go/_std_1.19/src/internal/goarch/goarch_amd64.go (renamed from contrib/go/_std_1.18/src/internal/goarch/goarch_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goarch/zgoarch_amd64.go (renamed from contrib/go/_std_1.18/src/internal/goarch/zgoarch_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/godebug/godebug.go (renamed from contrib/go/_std_1.18/src/internal/godebug/godebug.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_boringcrypto_off.go9
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_fieldtrack_off.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_fieldtrack_off.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_heapminimum512kib_off.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_heapminimum512kib_off.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_preemptibleloops_off.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_preemptibleloops_off.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_regabiargs_on.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_regabiargs_on.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_regabiwrappers_on.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_regabiwrappers_on.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_staticlockranking_off.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_staticlockranking_off.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/exp_unified_off.go (renamed from contrib/go/_std_1.18/src/internal/goexperiment/exp_unified_off.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goexperiment/flags.go89
-rw-r--r--contrib/go/_std_1.19/src/internal/goos/goos.go13
-rw-r--r--contrib/go/_std_1.19/src/internal/goos/zgoos_darwin.go (renamed from contrib/go/_std_1.18/src/internal/goos/zgoos_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/goos/zgoos_linux.go (renamed from contrib/go/_std_1.18/src/internal/goos/zgoos_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/intern/intern.go179
-rw-r--r--contrib/go/_std_1.19/src/internal/itoa/itoa.go (renamed from contrib/go/_std_1.18/src/internal/itoa/itoa.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/nettrace/nettrace.go46
-rw-r--r--contrib/go/_std_1.19/src/internal/oserror/errors.go (renamed from contrib/go/_std_1.18/src/internal/oserror/errors.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/copy_file_range_linux.go (renamed from contrib/go/_std_1.18/src/internal/poll/copy_file_range_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/errno_unix.go33
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fcntl_libc.go14
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fcntl_syscall.go (renamed from contrib/go/_std_1.18/src/internal/poll/fcntl_syscall.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd.go83
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_fsync_darwin.go (renamed from contrib/go/_std_1.18/src/internal/poll/fd_fsync_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_fsync_posix.go (renamed from contrib/go/_std_1.18/src/internal/poll/fd_fsync_posix.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_mutex.go (renamed from contrib/go/_std_1.18/src/internal/poll/fd_mutex.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_opendir_darwin.go39
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_poll_runtime.go169
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_posix.go79
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_unix.go799
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_writev_darwin.go17
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/fd_writev_unix.go (renamed from contrib/go/_std_1.18/src/internal/poll/fd_writev_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/hook_cloexec.go12
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/hook_unix.go15
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/iovec_unix.go (renamed from contrib/go/_std_1.18/src/internal/poll/iovec_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sendfile_bsd.go (renamed from contrib/go/_std_1.18/src/internal/poll/sendfile_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sendfile_linux.go (renamed from contrib/go/_std_1.18/src/internal/poll/sendfile_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sock_cloexec.go50
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sockopt.go36
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sockopt_linux.go (renamed from contrib/go/_std_1.18/src/internal/poll/sockopt_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sockopt_unix.go18
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sockoptip.go27
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/splice_linux.go (renamed from contrib/go/_std_1.18/src/internal/poll/splice_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/sys_cloexec.go36
-rw-r--r--contrib/go/_std_1.19/src/internal/poll/writev.go (renamed from contrib/go/_std_1.18/src/internal/poll/writev.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/race/doc.go (renamed from contrib/go/_std_1.18/src/internal/race/doc.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/race/norace.go (renamed from contrib/go/_std_1.18/src/internal/race/norace.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/reflectlite/asm.s (renamed from contrib/go/_std_1.18/src/internal/reflectlite/asm.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/reflectlite/swapper.go (renamed from contrib/go/_std_1.18/src/internal/reflectlite/swapper.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/reflectlite/type.go983
-rw-r--r--contrib/go/_std_1.19/src/internal/reflectlite/value.go477
-rw-r--r--contrib/go/_std_1.19/src/internal/singleflight/singleflight.go (renamed from contrib/go/_std_1.18/src/internal/singleflight/singleflight.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/execenv/execenv_default.go (renamed from contrib/go/_std_1.18/src/internal/syscall/execenv/execenv_default.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/asm_darwin.s (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/asm_darwin.s)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/at.go42
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/at_darwin.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/at_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/at_fstatat.go28
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_darwin.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_linux.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/copy_file_range_linux.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/copy_file_range_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/getentropy_darwin.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/getentropy_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/getrandom.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/getrandom.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/getrandom_linux.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/getrandom_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/net.go44
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking_libc.go25
-rw-r--r--contrib/go/_std_1.19/src/internal/syscall/unix/sysnum_linux_amd64.go (renamed from contrib/go/_std_1.18/src/internal/syscall/unix/sysnum_linux_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/testlog/exit.go (renamed from contrib/go/_std_1.18/src/internal/testlog/exit.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/testlog/log.go (renamed from contrib/go/_std_1.18/src/internal/testlog/log.go)0
-rw-r--r--contrib/go/_std_1.19/src/internal/unsafeheader/unsafeheader.go (renamed from contrib/go/_std_1.18/src/internal/unsafeheader/unsafeheader.go)0
-rw-r--r--contrib/go/_std_1.19/src/io/fs/fs.go258
-rw-r--r--contrib/go/_std_1.19/src/io/fs/glob.go (renamed from contrib/go/_std_1.18/src/io/fs/glob.go)0
-rw-r--r--contrib/go/_std_1.19/src/io/fs/readdir.go (renamed from contrib/go/_std_1.18/src/io/fs/readdir.go)0
-rw-r--r--contrib/go/_std_1.19/src/io/fs/readfile.go (renamed from contrib/go/_std_1.18/src/io/fs/readfile.go)0
-rw-r--r--contrib/go/_std_1.19/src/io/fs/stat.go (renamed from contrib/go/_std_1.18/src/io/fs/stat.go)0
-rw-r--r--contrib/go/_std_1.19/src/io/fs/sub.go (renamed from contrib/go/_std_1.18/src/io/fs/sub.go)0
-rw-r--r--contrib/go/_std_1.19/src/io/fs/walk.go129
-rw-r--r--contrib/go/_std_1.19/src/io/io.go670
-rw-r--r--contrib/go/_std_1.19/src/io/ioutil/ioutil.go95
-rw-r--r--contrib/go/_std_1.19/src/io/ioutil/tempfile.go41
-rw-r--r--contrib/go/_std_1.19/src/io/multi.go137
-rw-r--r--contrib/go/_std_1.19/src/io/pipe.go (renamed from contrib/go/_std_1.18/src/io/pipe.go)0
-rw-r--r--contrib/go/_std_1.19/src/log/log.go414
-rw-r--r--contrib/go/_std_1.19/src/math/abs.go15
-rw-r--r--contrib/go/_std_1.19/src/math/acosh.go65
-rw-r--r--contrib/go/_std_1.19/src/math/asin.go67
-rw-r--r--contrib/go/_std_1.19/src/math/asinh.go77
-rw-r--r--contrib/go/_std_1.19/src/math/atan.go111
-rw-r--r--contrib/go/_std_1.19/src/math/atan2.go77
-rw-r--r--contrib/go/_std_1.19/src/math/atanh.go85
-rw-r--r--contrib/go/_std_1.19/src/math/big/accuracy_string.go (renamed from contrib/go/_std_1.18/src/math/big/accuracy_string.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/arith.go277
-rw-r--r--contrib/go/_std_1.19/src/math/big/arith_amd64.go (renamed from contrib/go/_std_1.18/src/math/big/arith_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/arith_amd64.s516
-rw-r--r--contrib/go/_std_1.19/src/math/big/arith_decl.go18
-rw-r--r--contrib/go/_std_1.19/src/math/big/decimal.go (renamed from contrib/go/_std_1.18/src/math/big/decimal.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/doc.go (renamed from contrib/go/_std_1.18/src/math/big/doc.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/float.go1729
-rw-r--r--contrib/go/_std_1.19/src/math/big/floatconv.go302
-rw-r--r--contrib/go/_std_1.19/src/math/big/floatmarsh.go127
-rw-r--r--contrib/go/_std_1.19/src/math/big/ftoa.go (renamed from contrib/go/_std_1.18/src/math/big/ftoa.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/int.go1225
-rw-r--r--contrib/go/_std_1.19/src/math/big/intconv.go255
-rw-r--r--contrib/go/_std_1.19/src/math/big/intmarsh.go83
-rw-r--r--contrib/go/_std_1.19/src/math/big/nat.go1244
-rw-r--r--contrib/go/_std_1.19/src/math/big/natconv.go511
-rw-r--r--contrib/go/_std_1.19/src/math/big/natdiv.go (renamed from contrib/go/_std_1.18/src/math/big/natdiv.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/prime.go (renamed from contrib/go/_std_1.18/src/math/big/prime.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/rat.go542
-rw-r--r--contrib/go/_std_1.19/src/math/big/ratconv.go380
-rw-r--r--contrib/go/_std_1.19/src/math/big/ratmarsh.go81
-rw-r--r--contrib/go/_std_1.19/src/math/big/roundingmode_string.go (renamed from contrib/go/_std_1.18/src/math/big/roundingmode_string.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/big/sqrt.go130
-rw-r--r--contrib/go/_std_1.19/src/math/bits.go62
-rw-r--r--contrib/go/_std_1.19/src/math/bits/bits.go (renamed from contrib/go/_std_1.18/src/math/bits/bits.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/bits/bits_errors.go (renamed from contrib/go/_std_1.18/src/math/bits/bits_errors.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/bits/bits_tables.go (renamed from contrib/go/_std_1.18/src/math/bits/bits_tables.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/cbrt.go85
-rw-r--r--contrib/go/_std_1.19/src/math/const.go (renamed from contrib/go/_std_1.18/src/math/const.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/copysign.go12
-rw-r--r--contrib/go/_std_1.19/src/math/dim.go94
-rw-r--r--contrib/go/_std_1.19/src/math/dim_amd64.s (renamed from contrib/go/_std_1.18/src/math/dim_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/math/dim_asm.go (renamed from contrib/go/_std_1.18/src/math/dim_asm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/erf.go351
-rw-r--r--contrib/go/_std_1.19/src/math/erfinv.go129
-rw-r--r--contrib/go/_std_1.19/src/math/exp.go203
-rw-r--r--contrib/go/_std_1.19/src/math/exp2_noasm.go (renamed from contrib/go/_std_1.18/src/math/exp2_noasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/exp_amd64.go (renamed from contrib/go/_std_1.18/src/math/exp_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/exp_amd64.s (renamed from contrib/go/_std_1.18/src/math/exp_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/math/exp_asm.go (renamed from contrib/go/_std_1.18/src/math/exp_asm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/expm1.go244
-rw-r--r--contrib/go/_std_1.19/src/math/floor.go151
-rw-r--r--contrib/go/_std_1.19/src/math/floor_amd64.s (renamed from contrib/go/_std_1.18/src/math/floor_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/math/floor_asm.go (renamed from contrib/go/_std_1.18/src/math/floor_asm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/fma.go (renamed from contrib/go/_std_1.18/src/math/fma.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/frexp.go39
-rw-r--r--contrib/go/_std_1.19/src/math/gamma.go222
-rw-r--r--contrib/go/_std_1.19/src/math/hypot.go44
-rw-r--r--contrib/go/_std_1.19/src/math/hypot_amd64.s (renamed from contrib/go/_std_1.18/src/math/hypot_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/math/hypot_asm.go (renamed from contrib/go/_std_1.18/src/math/hypot_asm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/j0.go429
-rw-r--r--contrib/go/_std_1.19/src/math/j1.go424
-rw-r--r--contrib/go/_std_1.19/src/math/jn.go306
-rw-r--r--contrib/go/_std_1.19/src/math/ldexp.go51
-rw-r--r--contrib/go/_std_1.19/src/math/lgamma.go366
-rw-r--r--contrib/go/_std_1.19/src/math/log.go129
-rw-r--r--contrib/go/_std_1.19/src/math/log10.go (renamed from contrib/go/_std_1.18/src/math/log10.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/log1p.go203
-rw-r--r--contrib/go/_std_1.19/src/math/log_amd64.s (renamed from contrib/go/_std_1.18/src/math/log_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/math/log_asm.go (renamed from contrib/go/_std_1.18/src/math/log_asm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/logb.go52
-rw-r--r--contrib/go/_std_1.19/src/math/mod.go52
-rw-r--r--contrib/go/_std_1.19/src/math/modf.go43
-rw-r--r--contrib/go/_std_1.19/src/math/modf_noasm.go (renamed from contrib/go/_std_1.18/src/math/modf_noasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/nextafter.go51
-rw-r--r--contrib/go/_std_1.19/src/math/pow.go157
-rw-r--r--contrib/go/_std_1.19/src/math/pow10.go47
-rw-r--r--contrib/go/_std_1.19/src/math/rand/exp.go221
-rw-r--r--contrib/go/_std_1.19/src/math/rand/normal.go156
-rw-r--r--contrib/go/_std_1.19/src/math/rand/rand.go419
-rw-r--r--contrib/go/_std_1.19/src/math/rand/rng.go (renamed from contrib/go/_std_1.18/src/math/rand/rng.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/rand/zipf.go (renamed from contrib/go/_std_1.18/src/math/rand/zipf.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/remainder.go95
-rw-r--r--contrib/go/_std_1.19/src/math/signbit.go (renamed from contrib/go/_std_1.18/src/math/signbit.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/sin.go244
-rw-r--r--contrib/go/_std_1.19/src/math/sincos.go73
-rw-r--r--contrib/go/_std_1.19/src/math/sinh.go93
-rw-r--r--contrib/go/_std_1.19/src/math/sqrt.go150
-rw-r--r--contrib/go/_std_1.19/src/math/sqrt_amd64.s (renamed from contrib/go/_std_1.18/src/math/sqrt_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/math/sqrt_asm.go (renamed from contrib/go/_std_1.18/src/math/sqrt_asm.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/stubs.go (renamed from contrib/go/_std_1.18/src/math/stubs.go)0
-rw-r--r--contrib/go/_std_1.19/src/math/tan.go140
-rw-r--r--contrib/go/_std_1.19/src/math/tanh.go105
-rw-r--r--contrib/go/_std_1.19/src/math/trig_reduce.go102
-rw-r--r--contrib/go/_std_1.19/src/math/unsafe.go (renamed from contrib/go/_std_1.18/src/math/unsafe.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/encodedword.go (renamed from contrib/go/_std_1.18/src/mime/encodedword.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/grammar.go (renamed from contrib/go/_std_1.18/src/mime/grammar.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/mediatype.go (renamed from contrib/go/_std_1.18/src/mime/mediatype.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/multipart/formdata.go (renamed from contrib/go/_std_1.18/src/mime/multipart/formdata.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/multipart/multipart.go447
-rw-r--r--contrib/go/_std_1.19/src/mime/multipart/writer.go (renamed from contrib/go/_std_1.18/src/mime/multipart/writer.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/quotedprintable/reader.go (renamed from contrib/go/_std_1.18/src/mime/quotedprintable/reader.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/quotedprintable/writer.go (renamed from contrib/go/_std_1.18/src/mime/quotedprintable/writer.go)0
-rw-r--r--contrib/go/_std_1.19/src/mime/type.go202
-rw-r--r--contrib/go/_std_1.19/src/mime/type_unix.go126
-rw-r--r--contrib/go/_std_1.19/src/net/addrselect.go388
-rw-r--r--contrib/go/_std_1.19/src/net/cgo_bsd.go (renamed from contrib/go/_std_1.18/src/net/cgo_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/cgo_linux.go (renamed from contrib/go/_std_1.18/src/net/cgo_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/cgo_resnew.go (renamed from contrib/go/_std_1.18/src/net/cgo_resnew.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/cgo_socknew.go (renamed from contrib/go/_std_1.18/src/net/cgo_socknew.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/cgo_sockold.go (renamed from contrib/go/_std_1.18/src/net/cgo_sockold.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/cgo_unix.go348
-rw-r--r--contrib/go/_std_1.19/src/net/conf.go352
-rw-r--r--contrib/go/_std_1.19/src/net/dial.go742
-rw-r--r--contrib/go/_std_1.19/src/net/dnsclient.go228
-rw-r--r--contrib/go/_std_1.19/src/net/dnsclient_unix.go826
-rw-r--r--contrib/go/_std_1.19/src/net/dnsconfig.go43
-rw-r--r--contrib/go/_std_1.19/src/net/dnsconfig_unix.go157
-rw-r--r--contrib/go/_std_1.19/src/net/error_posix.go21
-rw-r--r--contrib/go/_std_1.19/src/net/error_unix.go16
-rw-r--r--contrib/go/_std_1.19/src/net/fd_posix.go147
-rw-r--r--contrib/go/_std_1.19/src/net/fd_unix.go203
-rw-r--r--contrib/go/_std_1.19/src/net/file.go (renamed from contrib/go/_std_1.18/src/net/file.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/file_unix.go119
-rw-r--r--contrib/go/_std_1.19/src/net/hook.go (renamed from contrib/go/_std_1.18/src/net/hook.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/hook_unix.go20
-rw-r--r--contrib/go/_std_1.19/src/net/hosts.go (renamed from contrib/go/_std_1.18/src/net/hosts.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/client.go1023
-rw-r--r--contrib/go/_std_1.19/src/net/http/clone.go (renamed from contrib/go/_std_1.18/src/net/http/clone.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/cookie.go466
-rw-r--r--contrib/go/_std_1.19/src/net/http/doc.go106
-rw-r--r--contrib/go/_std_1.19/src/net/http/filetransport.go123
-rw-r--r--contrib/go/_std_1.19/src/net/http/fs.go972
-rw-r--r--contrib/go/_std_1.19/src/net/http/h2_bundle.go10924
-rw-r--r--contrib/go/_std_1.19/src/net/http/header.go280
-rw-r--r--contrib/go/_std_1.19/src/net/http/http.go (renamed from contrib/go/_std_1.18/src/net/http/http.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/httptrace/trace.go (renamed from contrib/go/_std_1.18/src/net/http/httptrace/trace.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/internal/ascii/print.go (renamed from contrib/go/_std_1.18/src/net/http/internal/ascii/print.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/internal/chunked.go262
-rw-r--r--contrib/go/_std_1.19/src/net/http/jar.go (renamed from contrib/go/_std_1.18/src/net/http/jar.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/method.go (renamed from contrib/go/_std_1.18/src/net/http/method.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/request.go1483
-rw-r--r--contrib/go/_std_1.19/src/net/http/response.go371
-rw-r--r--contrib/go/_std_1.19/src/net/http/roundtrip.go (renamed from contrib/go/_std_1.18/src/net/http/roundtrip.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/server.go3655
-rw-r--r--contrib/go/_std_1.19/src/net/http/sniff.go304
-rw-r--r--contrib/go/_std_1.19/src/net/http/socks_bundle.go (renamed from contrib/go/_std_1.18/src/net/http/socks_bundle.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/http/status.go210
-rw-r--r--contrib/go/_std_1.19/src/net/http/transfer.go1131
-rw-r--r--contrib/go/_std_1.19/src/net/http/transport.go2906
-rw-r--r--contrib/go/_std_1.19/src/net/http/transport_default_other.go (renamed from contrib/go/_std_1.18/src/net/http/transport_default_other.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/interface.go (renamed from contrib/go/_std_1.18/src/net/interface.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/interface_bsd.go (renamed from contrib/go/_std_1.18/src/net/interface_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/interface_darwin.go (renamed from contrib/go/_std_1.18/src/net/interface_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/interface_linux.go (renamed from contrib/go/_std_1.18/src/net/interface_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/ip.go (renamed from contrib/go/_std_1.18/src/net/ip.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/iprawsock.go (renamed from contrib/go/_std_1.18/src/net/iprawsock.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/iprawsock_posix.go147
-rw-r--r--contrib/go/_std_1.19/src/net/ipsock.go (renamed from contrib/go/_std_1.18/src/net/ipsock.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/ipsock_posix.go232
-rw-r--r--contrib/go/_std_1.19/src/net/lookup.go893
-rw-r--r--contrib/go/_std_1.19/src/net/lookup_unix.go156
-rw-r--r--contrib/go/_std_1.19/src/net/mac.go86
-rw-r--r--contrib/go/_std_1.19/src/net/net.go771
-rw-r--r--contrib/go/_std_1.19/src/net/netip/leaf_alts.go (renamed from contrib/go/_std_1.18/src/net/netip/leaf_alts.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/netip/netip.go1504
-rw-r--r--contrib/go/_std_1.19/src/net/netip/uint128.go (renamed from contrib/go/_std_1.18/src/net/netip/uint128.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/nss.go158
-rw-r--r--contrib/go/_std_1.19/src/net/parse.go (renamed from contrib/go/_std_1.18/src/net/parse.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/pipe.go (renamed from contrib/go/_std_1.18/src/net/pipe.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/port.go (renamed from contrib/go/_std_1.18/src/net/port.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/port_unix.go57
-rw-r--r--contrib/go/_std_1.19/src/net/rawconn.go (renamed from contrib/go/_std_1.18/src/net/rawconn.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sendfile_linux.go (renamed from contrib/go/_std_1.18/src/net/sendfile_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sendfile_unix_alt.go (renamed from contrib/go/_std_1.18/src/net/sendfile_unix_alt.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sock_bsd.go (renamed from contrib/go/_std_1.18/src/net/sock_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sock_cloexec.go25
-rw-r--r--contrib/go/_std_1.19/src/net/sock_linux.go86
-rw-r--r--contrib/go/_std_1.19/src/net/sock_posix.go254
-rw-r--r--contrib/go/_std_1.19/src/net/sockaddr_posix.go34
-rw-r--r--contrib/go/_std_1.19/src/net/sockopt_bsd.go (renamed from contrib/go/_std_1.18/src/net/sockopt_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sockopt_linux.go (renamed from contrib/go/_std_1.18/src/net/sockopt_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sockopt_posix.go134
-rw-r--r--contrib/go/_std_1.19/src/net/sockoptip_bsdvar.go (renamed from contrib/go/_std_1.18/src/net/sockoptip_bsdvar.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sockoptip_linux.go (renamed from contrib/go/_std_1.18/src/net/sockoptip_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sockoptip_posix.go49
-rw-r--r--contrib/go/_std_1.19/src/net/splice_linux.go (renamed from contrib/go/_std_1.18/src/net/splice_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/splice_stub.go (renamed from contrib/go/_std_1.18/src/net/splice_stub.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/sys_cloexec.go36
-rw-r--r--contrib/go/_std_1.19/src/net/tcpsock.go (renamed from contrib/go/_std_1.18/src/net/tcpsock.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/tcpsock_posix.go176
-rw-r--r--contrib/go/_std_1.19/src/net/tcpsockopt_darwin.go (renamed from contrib/go/_std_1.18/src/net/tcpsockopt_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/tcpsockopt_posix.go18
-rw-r--r--contrib/go/_std_1.19/src/net/tcpsockopt_unix.go (renamed from contrib/go/_std_1.18/src/net/tcpsockopt_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/textproto/header.go (renamed from contrib/go/_std_1.18/src/net/textproto/header.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/textproto/pipeline.go (renamed from contrib/go/_std_1.18/src/net/textproto/pipeline.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/textproto/reader.go788
-rw-r--r--contrib/go/_std_1.19/src/net/textproto/textproto.go152
-rw-r--r--contrib/go/_std_1.19/src/net/textproto/writer.go (renamed from contrib/go/_std_1.18/src/net/textproto/writer.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/udpsock.go368
-rw-r--r--contrib/go/_std_1.19/src/net/udpsock_posix.go269
-rw-r--r--contrib/go/_std_1.19/src/net/unixsock.go (renamed from contrib/go/_std_1.18/src/net/unixsock.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/unixsock_posix.go227
-rw-r--r--contrib/go/_std_1.19/src/net/unixsock_readmsg_cloexec.go (renamed from contrib/go/_std_1.18/src/net/unixsock_readmsg_cloexec.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/unixsock_readmsg_cmsg_cloexec.go (renamed from contrib/go/_std_1.18/src/net/unixsock_readmsg_cmsg_cloexec.go)0
-rw-r--r--contrib/go/_std_1.19/src/net/url/url.go1265
-rw-r--r--contrib/go/_std_1.19/src/net/writev_unix.go (renamed from contrib/go/_std_1.18/src/net/writev_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/dir.go (renamed from contrib/go/_std_1.18/src/os/dir.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/dir_darwin.go (renamed from contrib/go/_std_1.18/src/os/dir_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/dir_unix.go (renamed from contrib/go/_std_1.18/src/os/dir_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/dirent_linux.go (renamed from contrib/go/_std_1.18/src/os/dirent_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/endian_little.go9
-rw-r--r--contrib/go/_std_1.19/src/os/env.go (renamed from contrib/go/_std_1.18/src/os/env.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/error.go (renamed from contrib/go/_std_1.18/src/os/error.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/error_errno.go (renamed from contrib/go/_std_1.18/src/os/error_errno.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/error_posix.go18
-rw-r--r--contrib/go/_std_1.19/src/os/exec.go (renamed from contrib/go/_std_1.18/src/os/exec.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/exec_posix.go136
-rw-r--r--contrib/go/_std_1.19/src/os/exec_unix.go106
-rw-r--r--contrib/go/_std_1.19/src/os/executable.go (renamed from contrib/go/_std_1.18/src/os/executable.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/executable_darwin.go (renamed from contrib/go/_std_1.18/src/os/executable_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/executable_procfs.go (renamed from contrib/go/_std_1.18/src/os/executable_procfs.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/file.go726
-rw-r--r--contrib/go/_std_1.19/src/os/file_posix.go250
-rw-r--r--contrib/go/_std_1.19/src/os/file_unix.go430
-rw-r--r--contrib/go/_std_1.19/src/os/getwd.go (renamed from contrib/go/_std_1.18/src/os/getwd.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/path.go (renamed from contrib/go/_std_1.18/src/os/path.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/path_unix.go75
-rw-r--r--contrib/go/_std_1.19/src/os/pipe2_unix.go22
-rw-r--r--contrib/go/_std_1.19/src/os/pipe_unix.go28
-rw-r--r--contrib/go/_std_1.19/src/os/proc.go (renamed from contrib/go/_std_1.18/src/os/proc.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/rawconn.go (renamed from contrib/go/_std_1.18/src/os/rawconn.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/readfrom_linux.go (renamed from contrib/go/_std_1.18/src/os/readfrom_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/readfrom_stub.go (renamed from contrib/go/_std_1.18/src/os/readfrom_stub.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/removeall_at.go192
-rw-r--r--contrib/go/_std_1.19/src/os/rlimit.go32
-rw-r--r--contrib/go/_std_1.19/src/os/rlimit_darwin.go22
-rw-r--r--contrib/go/_std_1.19/src/os/rlimit_stub.go12
-rw-r--r--contrib/go/_std_1.19/src/os/stat.go (renamed from contrib/go/_std_1.18/src/os/stat.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/stat_darwin.go47
-rw-r--r--contrib/go/_std_1.19/src/os/stat_linux.go47
-rw-r--r--contrib/go/_std_1.19/src/os/stat_unix.go52
-rw-r--r--contrib/go/_std_1.19/src/os/sticky_bsd.go (renamed from contrib/go/_std_1.18/src/os/sticky_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/sticky_notbsd.go (renamed from contrib/go/_std_1.18/src/os/sticky_notbsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/str.go (renamed from contrib/go/_std_1.18/src/os/str.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/sys.go (renamed from contrib/go/_std_1.18/src/os/sys.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/sys_bsd.go (renamed from contrib/go/_std_1.18/src/os/sys_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/sys_linux.go (renamed from contrib/go/_std_1.18/src/os/sys_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/sys_unix.go14
-rw-r--r--contrib/go/_std_1.19/src/os/tempfile.go (renamed from contrib/go/_std_1.18/src/os/tempfile.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/types.go (renamed from contrib/go/_std_1.18/src/os/types.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/types_unix.go (renamed from contrib/go/_std_1.18/src/os/types_unix.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/wait_unimp.go (renamed from contrib/go/_std_1.18/src/os/wait_unimp.go)0
-rw-r--r--contrib/go/_std_1.19/src/os/wait_waitid.go (renamed from contrib/go/_std_1.18/src/os/wait_waitid.go)0
-rw-r--r--contrib/go/_std_1.19/src/path/filepath/match.go369
-rw-r--r--contrib/go/_std_1.19/src/path/filepath/path.go615
-rw-r--r--contrib/go/_std_1.19/src/path/filepath/path_unix.go53
-rw-r--r--contrib/go/_std_1.19/src/path/filepath/symlink.go (renamed from contrib/go/_std_1.18/src/path/filepath/symlink.go)0
-rw-r--r--contrib/go/_std_1.19/src/path/filepath/symlink_unix.go7
-rw-r--r--contrib/go/_std_1.19/src/path/match.go230
-rw-r--r--contrib/go/_std_1.19/src/path/path.go233
-rw-r--r--contrib/go/_std_1.19/src/reflect/abi.go510
-rw-r--r--contrib/go/_std_1.19/src/reflect/asm_amd64.s (renamed from contrib/go/_std_1.18/src/reflect/asm_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/reflect/deepequal.go238
-rw-r--r--contrib/go/_std_1.19/src/reflect/float32reg_generic.go23
-rw-r--r--contrib/go/_std_1.19/src/reflect/makefunc.go176
-rw-r--r--contrib/go/_std_1.19/src/reflect/swapper.go (renamed from contrib/go/_std_1.18/src/reflect/swapper.go)0
-rw-r--r--contrib/go/_std_1.19/src/reflect/type.go3186
-rw-r--r--contrib/go/_std_1.19/src/reflect/value.go3620
-rw-r--r--contrib/go/_std_1.19/src/reflect/visiblefields.go (renamed from contrib/go/_std_1.18/src/reflect/visiblefields.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/alg.go353
-rw-r--r--contrib/go/_std_1.19/src/runtime/asan0.go23
-rw-r--r--contrib/go/_std_1.19/src/runtime/asm.s (renamed from contrib/go/_std_1.18/src/runtime/asm.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/asm_amd64.h14
-rw-r--r--contrib/go/_std_1.19/src/runtime/asm_amd64.s2059
-rw-r--r--contrib/go/_std_1.19/src/runtime/atomic_pointer.go (renamed from contrib/go/_std_1.18/src/runtime/atomic_pointer.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo.go (renamed from contrib/go/_std_1.18/src/runtime/cgo.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/abi_amd64.h (renamed from contrib/go/_std_1.18/src/runtime/cgo/abi_amd64.h)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/asm_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/cgo/asm_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/callbacks.go107
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/callbacks_traceback.go (renamed from contrib/go/_std_1.18/src/runtime/cgo/callbacks_traceback.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/cgo.go31
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_amd64.S (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_amd64.S)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_context.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_context.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_darwin_amd64.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_darwin_amd64.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_fatalf.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_fatalf.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_libinit.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_libinit.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_linux_amd64.c96
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_mmap.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_mmap.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_setenv.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_setenv.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_sigaction.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_sigaction.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_traceback.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_traceback.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/gcc_util.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/gcc_util.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/handle.go (renamed from contrib/go/_std_1.18/src/runtime/cgo/handle.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/iscgo.go (renamed from contrib/go/_std_1.18/src/runtime/cgo/iscgo.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/libcgo.h (renamed from contrib/go/_std_1.18/src/runtime/cgo/libcgo.h)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/libcgo_unix.h (renamed from contrib/go/_std_1.18/src/runtime/cgo/libcgo_unix.h)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/linux.go (renamed from contrib/go/_std_1.18/src/runtime/cgo/linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/linux_syscall.c (renamed from contrib/go/_std_1.18/src/runtime/cgo/linux_syscall.c)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/mmap.go (renamed from contrib/go/_std_1.18/src/runtime/cgo/mmap.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/setenv.go21
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo/sigaction.go (renamed from contrib/go/_std_1.18/src/runtime/cgo/sigaction.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo_mmap.go70
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgo_sigaction.go94
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgocall.go643
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgocallback.go (renamed from contrib/go/_std_1.18/src/runtime/cgocallback.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cgocheck.go268
-rw-r--r--contrib/go/_std_1.19/src/runtime/chan.go851
-rw-r--r--contrib/go/_std_1.19/src/runtime/checkptr.go (renamed from contrib/go/_std_1.18/src/runtime/checkptr.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/compiler.go12
-rw-r--r--contrib/go/_std_1.19/src/runtime/complex.go (renamed from contrib/go/_std_1.18/src/runtime/complex.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cpuflags.go (renamed from contrib/go/_std_1.18/src/runtime/cpuflags.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cpuflags_amd64.go (renamed from contrib/go/_std_1.18/src/runtime/cpuflags_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/cpuprof.go238
-rw-r--r--contrib/go/_std_1.19/src/runtime/cputicks.go11
-rw-r--r--contrib/go/_std_1.19/src/runtime/debug.go115
-rw-r--r--contrib/go/_std_1.19/src/runtime/debugcall.go (renamed from contrib/go/_std_1.18/src/runtime/debugcall.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/debuglog.go825
-rw-r--r--contrib/go/_std_1.19/src/runtime/debuglog_off.go (renamed from contrib/go/_std_1.18/src/runtime/debuglog_off.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/defs_darwin_amd64.go (renamed from contrib/go/_std_1.18/src/runtime/defs_darwin_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/defs_linux_amd64.go301
-rw-r--r--contrib/go/_std_1.19/src/runtime/duff_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/duff_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/env_posix.go78
-rw-r--r--contrib/go/_std_1.19/src/runtime/error.go330
-rw-r--r--contrib/go/_std_1.19/src/runtime/extern.go287
-rw-r--r--contrib/go/_std_1.19/src/runtime/fastlog2.go (renamed from contrib/go/_std_1.18/src/runtime/fastlog2.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/fastlog2table.go (renamed from contrib/go/_std_1.18/src/runtime/fastlog2table.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/float.go54
-rw-r--r--contrib/go/_std_1.19/src/runtime/funcdata.h (renamed from contrib/go/_std_1.18/src/runtime/funcdata.h)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/go_tls.h (renamed from contrib/go/_std_1.18/src/runtime/go_tls.h)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/hash64.go92
-rw-r--r--contrib/go/_std_1.19/src/runtime/heapdump.go752
-rw-r--r--contrib/go/_std_1.19/src/runtime/histogram.go171
-rw-r--r--contrib/go/_std_1.19/src/runtime/iface.go (renamed from contrib/go/_std_1.18/src/runtime/iface.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.go117
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/doc.go (renamed from contrib/go/_std_1.18/src/runtime/internal/atomic/doc.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/stubs.go (renamed from contrib/go/_std_1.18/src/runtime/internal/atomic/stubs.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/types.go431
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/types_64bit.go (renamed from contrib/go/_std_1.18/src/runtime/internal/atomic/types_64bit.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/atomic/unaligned.go (renamed from contrib/go/_std_1.18/src/runtime/internal/atomic/unaligned.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/math/math.go (renamed from contrib/go/_std_1.18/src/runtime/internal/math/math.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/sys/consts.go (renamed from contrib/go/_std_1.18/src/runtime/internal/sys/consts.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/sys/intrinsics.go (renamed from contrib/go/_std_1.18/src/runtime/internal/sys/intrinsics.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/sys/intrinsics_common.go (renamed from contrib/go/_std_1.18/src/runtime/internal/sys/intrinsics_common.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/sys/sys.go (renamed from contrib/go/_std_1.18/src/runtime/internal/sys/sys.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/sys/zversion.go (renamed from contrib/go/_std_1.18/src/runtime/internal/sys/zversion.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/syscall/asm_linux_amd64.s47
-rw-r--r--contrib/go/_std_1.19/src/runtime/internal/syscall/syscall_linux.go39
-rw-r--r--contrib/go/_std_1.19/src/runtime/lfstack.go (renamed from contrib/go/_std_1.18/src/runtime/lfstack.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/lfstack_64bit.go58
-rw-r--r--contrib/go/_std_1.19/src/runtime/lock_futex.go246
-rw-r--r--contrib/go/_std_1.19/src/runtime/lock_sema.go304
-rw-r--r--contrib/go/_std_1.19/src/runtime/lockrank.go260
-rw-r--r--contrib/go/_std_1.19/src/runtime/lockrank_off.go66
-rw-r--r--contrib/go/_std_1.19/src/runtime/malloc.go1501
-rw-r--r--contrib/go/_std_1.19/src/runtime/map.go1418
-rw-r--r--contrib/go/_std_1.19/src/runtime/map_fast32.go462
-rw-r--r--contrib/go/_std_1.19/src/runtime/map_fast64.go470
-rw-r--r--contrib/go/_std_1.19/src/runtime/map_faststr.go485
-rw-r--r--contrib/go/_std_1.19/src/runtime/mbarrier.go344
-rw-r--r--contrib/go/_std_1.19/src/runtime/mbitmap.go2056
-rw-r--r--contrib/go/_std_1.19/src/runtime/mcache.go329
-rw-r--r--contrib/go/_std_1.19/src/runtime/mcentral.go (renamed from contrib/go/_std_1.18/src/runtime/mcentral.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/mcheckmark.go (renamed from contrib/go/_std_1.18/src/runtime/mcheckmark.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/mem.go143
-rw-r--r--contrib/go/_std_1.19/src/runtime/mem_darwin.go70
-rw-r--r--contrib/go/_std_1.19/src/runtime/mem_linux.go193
-rw-r--r--contrib/go/_std_1.19/src/runtime/memclr_amd64.s218
-rw-r--r--contrib/go/_std_1.19/src/runtime/memmove_amd64.s532
-rw-r--r--contrib/go/_std_1.19/src/runtime/metrics.go618
-rw-r--r--contrib/go/_std_1.19/src/runtime/mfinal.go491
-rw-r--r--contrib/go/_std_1.19/src/runtime/mfixalloc.go (renamed from contrib/go/_std_1.18/src/runtime/mfixalloc.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgc.go1761
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgclimit.go484
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgcmark.go1601
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgcpacer.go1579
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgcscavenge.go1105
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgcstack.go353
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgcsweep.go889
-rw-r--r--contrib/go/_std_1.19/src/runtime/mgcwork.go488
-rw-r--r--contrib/go/_std_1.19/src/runtime/mheap.go2156
-rw-r--r--contrib/go/_std_1.19/src/runtime/mpagealloc.go1013
-rw-r--r--contrib/go/_std_1.19/src/runtime/mpagealloc_64bit.go257
-rw-r--r--contrib/go/_std_1.19/src/runtime/mpagecache.go177
-rw-r--r--contrib/go/_std_1.19/src/runtime/mpallocbits.go (renamed from contrib/go/_std_1.18/src/runtime/mpallocbits.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/mprof.go1283
-rw-r--r--contrib/go/_std_1.19/src/runtime/mranges.go436
-rw-r--r--contrib/go/_std_1.19/src/runtime/msan0.go (renamed from contrib/go/_std_1.18/src/runtime/msan0.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/msize.go (renamed from contrib/go/_std_1.18/src/runtime/msize.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/mspanset.go354
-rw-r--r--contrib/go/_std_1.19/src/runtime/mstats.go888
-rw-r--r--contrib/go/_std_1.19/src/runtime/mwbbuf.go290
-rw-r--r--contrib/go/_std_1.19/src/runtime/nbpipe_pipe.go (renamed from contrib/go/_std_1.18/src/runtime/nbpipe_pipe.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/nbpipe_pipe2.go11
-rw-r--r--contrib/go/_std_1.19/src/runtime/netpoll.go656
-rw-r--r--contrib/go/_std_1.19/src/runtime/netpoll_epoll.go (renamed from contrib/go/_std_1.18/src/runtime/netpoll_epoll.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/netpoll_kqueue.go (renamed from contrib/go/_std_1.18/src/runtime/netpoll_kqueue.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/os_darwin.go474
-rw-r--r--contrib/go/_std_1.19/src/runtime/os_linux.go888
-rw-r--r--contrib/go/_std_1.19/src/runtime/os_linux_generic.go (renamed from contrib/go/_std_1.18/src/runtime/os_linux_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/os_linux_noauxv.go10
-rw-r--r--contrib/go/_std_1.19/src/runtime/os_linux_x86.go (renamed from contrib/go/_std_1.18/src/runtime/os_linux_x86.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/os_nonopenbsd.go (renamed from contrib/go/_std_1.18/src/runtime/os_nonopenbsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/panic.go1370
-rw-r--r--contrib/go/_std_1.19/src/runtime/plugin.go (renamed from contrib/go/_std_1.18/src/runtime/plugin.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/preempt.go (renamed from contrib/go/_std_1.18/src/runtime/preempt.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/preempt_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/preempt_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/preempt_nonwindows.go (renamed from contrib/go/_std_1.18/src/runtime/preempt_nonwindows.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/print.go (renamed from contrib/go/_std_1.18/src/runtime/print.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/proc.go6343
-rw-r--r--contrib/go/_std_1.19/src/runtime/profbuf.go560
-rw-r--r--contrib/go/_std_1.19/src/runtime/proflabel.go (renamed from contrib/go/_std_1.18/src/runtime/proflabel.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/race0.go (renamed from contrib/go/_std_1.18/src/runtime/race0.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/rdebug.go (renamed from contrib/go/_std_1.18/src/runtime/rdebug.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/relax_stub.go (renamed from contrib/go/_std_1.18/src/runtime/relax_stub.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/rt0_darwin_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/rt0_darwin_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/rt0_linux_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/rt0_linux_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/runtime.go67
-rw-r--r--contrib/go/_std_1.19/src/runtime/runtime1.go559
-rw-r--r--contrib/go/_std_1.19/src/runtime/runtime2.go1151
-rw-r--r--contrib/go/_std_1.19/src/runtime/runtime_boring.go19
-rw-r--r--contrib/go/_std_1.19/src/runtime/rwmutex.go (renamed from contrib/go/_std_1.18/src/runtime/rwmutex.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/select.go (renamed from contrib/go/_std_1.18/src/runtime/select.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/sema.go623
-rw-r--r--contrib/go/_std_1.19/src/runtime/signal_amd64.go (renamed from contrib/go/_std_1.18/src/runtime/signal_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/signal_darwin.go (renamed from contrib/go/_std_1.18/src/runtime/signal_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/signal_darwin_amd64.go96
-rw-r--r--contrib/go/_std_1.19/src/runtime/signal_linux_amd64.go (renamed from contrib/go/_std_1.18/src/runtime/signal_linux_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/signal_unix.go1348
-rw-r--r--contrib/go/_std_1.19/src/runtime/sigqueue.go275
-rw-r--r--contrib/go/_std_1.19/src/runtime/sigqueue_note.go (renamed from contrib/go/_std_1.18/src/runtime/sigqueue_note.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/sigtab_linux_generic.go (renamed from contrib/go/_std_1.18/src/runtime/sigtab_linux_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/sizeclasses.go (renamed from contrib/go/_std_1.18/src/runtime/sizeclasses.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/slice.go344
-rw-r--r--contrib/go/_std_1.19/src/runtime/softfloat64.go (renamed from contrib/go/_std_1.18/src/runtime/softfloat64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/stack.go1484
-rw-r--r--contrib/go/_std_1.19/src/runtime/string.go589
-rw-r--r--contrib/go/_std_1.19/src/runtime/stubs.go480
-rw-r--r--contrib/go/_std_1.19/src/runtime/stubs2.go41
-rw-r--r--contrib/go/_std_1.19/src/runtime/stubs3.go (renamed from contrib/go/_std_1.18/src/runtime/stubs3.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/stubs_amd64.go (renamed from contrib/go/_std_1.18/src/runtime/stubs_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/stubs_linux.go20
-rw-r--r--contrib/go/_std_1.19/src/runtime/stubs_nonlinux.go (renamed from contrib/go/_std_1.18/src/runtime/stubs_nonlinux.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/symtab.go1183
-rw-r--r--contrib/go/_std_1.19/src/runtime/sys_darwin.go537
-rw-r--r--contrib/go/_std_1.19/src/runtime/sys_darwin_amd64.s867
-rw-r--r--contrib/go/_std_1.19/src/runtime/sys_libc.go54
-rw-r--r--contrib/go/_std_1.19/src/runtime/sys_linux_amd64.s757
-rw-r--r--contrib/go/_std_1.19/src/runtime/sys_nonppc64x.go (renamed from contrib/go/_std_1.18/src/runtime/sys_nonppc64x.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/sys_x86.go (renamed from contrib/go/_std_1.18/src/runtime/sys_x86.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/textflag.h (renamed from contrib/go/_std_1.18/src/runtime/textflag.h)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/time.go1142
-rw-r--r--contrib/go/_std_1.19/src/runtime/time_linux_amd64.s (renamed from contrib/go/_std_1.18/src/runtime/time_linux_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/time_nofake.go (renamed from contrib/go/_std_1.18/src/runtime/time_nofake.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/timeasm.go (renamed from contrib/go/_std_1.18/src/runtime/timeasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/timestub.go (renamed from contrib/go/_std_1.18/src/runtime/timestub.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/tls_stub.go (renamed from contrib/go/_std_1.18/src/runtime/tls_stub.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/trace.go1442
-rw-r--r--contrib/go/_std_1.19/src/runtime/traceback.go1447
-rw-r--r--contrib/go/_std_1.19/src/runtime/type.go719
-rw-r--r--contrib/go/_std_1.19/src/runtime/typekind.go (renamed from contrib/go/_std_1.18/src/runtime/typekind.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/utf8.go (renamed from contrib/go/_std_1.18/src/runtime/utf8.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/vdso_elf64.go79
-rw-r--r--contrib/go/_std_1.19/src/runtime/vdso_in_none.go13
-rw-r--r--contrib/go/_std_1.19/src/runtime/vdso_linux.go295
-rw-r--r--contrib/go/_std_1.19/src/runtime/vdso_linux_amd64.go (renamed from contrib/go/_std_1.18/src/runtime/vdso_linux_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/runtime/write_err.go (renamed from contrib/go/_std_1.18/src/runtime/write_err.go)0
-rw-r--r--contrib/go/_std_1.19/src/sort/search.go150
-rw-r--r--contrib/go/_std_1.19/src/sort/slice.go49
-rw-r--r--contrib/go/_std_1.19/src/sort/slice_go113.go (renamed from contrib/go/_std_1.18/src/sort/slice_go113.go)0
-rw-r--r--contrib/go/_std_1.19/src/sort/sort.go262
-rw-r--r--contrib/go/_std_1.19/src/sort/zsortfunc.go479
-rw-r--r--contrib/go/_std_1.19/src/sort/zsortinterface.go479
-rw-r--r--contrib/go/_std_1.19/src/strconv/atob.go (renamed from contrib/go/_std_1.18/src/strconv/atob.go)0
-rw-r--r--contrib/go/_std_1.19/src/strconv/atoc.go (renamed from contrib/go/_std_1.18/src/strconv/atoc.go)0
-rw-r--r--contrib/go/_std_1.19/src/strconv/atof.go709
-rw-r--r--contrib/go/_std_1.19/src/strconv/atoi.go316
-rw-r--r--contrib/go/_std_1.19/src/strconv/bytealg.go (renamed from contrib/go/_std_1.18/src/strconv/bytealg.go)0
-rw-r--r--contrib/go/_std_1.19/src/strconv/ctoa.go (renamed from contrib/go/_std_1.18/src/strconv/ctoa.go)0
-rw-r--r--contrib/go/_std_1.19/src/strconv/decimal.go (renamed from contrib/go/_std_1.18/src/strconv/decimal.go)0
-rw-r--r--contrib/go/_std_1.19/src/strconv/doc.go56
-rw-r--r--contrib/go/_std_1.19/src/strconv/eisel_lemire.go884
-rw-r--r--contrib/go/_std_1.19/src/strconv/ftoa.go585
-rw-r--r--contrib/go/_std_1.19/src/strconv/ftoaryu.go569
-rw-r--r--contrib/go/_std_1.19/src/strconv/isprint.go (renamed from contrib/go/_std_1.18/src/strconv/isprint.go)0
-rw-r--r--contrib/go/_std_1.19/src/strconv/itoa.go205
-rw-r--r--contrib/go/_std_1.19/src/strconv/quote.go604
-rw-r--r--contrib/go/_std_1.19/src/strings/builder.go126
-rw-r--r--contrib/go/_std_1.19/src/strings/clone.go (renamed from contrib/go/_std_1.18/src/strings/clone.go)0
-rw-r--r--contrib/go/_std_1.19/src/strings/compare.go (renamed from contrib/go/_std_1.18/src/strings/compare.go)0
-rw-r--r--contrib/go/_std_1.19/src/strings/reader.go (renamed from contrib/go/_std_1.18/src/strings/reader.go)0
-rw-r--r--contrib/go/_std_1.19/src/strings/replace.go569
-rw-r--r--contrib/go/_std_1.19/src/strings/search.go (renamed from contrib/go/_std_1.18/src/strings/search.go)0
-rw-r--r--contrib/go/_std_1.19/src/strings/strings.go1192
-rw-r--r--contrib/go/_std_1.19/src/sync/atomic/asm.s (renamed from contrib/go/_std_1.18/src/sync/atomic/asm.s)0
-rw-r--r--contrib/go/_std_1.19/src/sync/atomic/doc.go153
-rw-r--r--contrib/go/_std_1.19/src/sync/atomic/type.go191
-rw-r--r--contrib/go/_std_1.19/src/sync/atomic/value.go (renamed from contrib/go/_std_1.18/src/sync/atomic/value.go)0
-rw-r--r--contrib/go/_std_1.19/src/sync/cond.go117
-rw-r--r--contrib/go/_std_1.19/src/sync/map.go393
-rw-r--r--contrib/go/_std_1.19/src/sync/mutex.go259
-rw-r--r--contrib/go/_std_1.19/src/sync/once.go76
-rw-r--r--contrib/go/_std_1.19/src/sync/pool.go297
-rw-r--r--contrib/go/_std_1.19/src/sync/poolqueue.go (renamed from contrib/go/_std_1.18/src/sync/poolqueue.go)0
-rw-r--r--contrib/go/_std_1.19/src/sync/runtime.go (renamed from contrib/go/_std_1.18/src/sync/runtime.go)0
-rw-r--r--contrib/go/_std_1.19/src/sync/runtime2.go (renamed from contrib/go/_std_1.18/src/sync/runtime2.go)0
-rw-r--r--contrib/go/_std_1.19/src/sync/rwmutex.go231
-rw-r--r--contrib/go/_std_1.19/src/sync/waitgroup.go150
-rw-r--r--contrib/go/_std_1.19/src/syscall/asan0.go (renamed from contrib/go/_std_1.18/src/syscall/asan0.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/asm_darwin_amd64.s134
-rw-r--r--contrib/go/_std_1.19/src/syscall/asm_linux_amd64.s68
-rw-r--r--contrib/go/_std_1.19/src/syscall/bpf_darwin.go (renamed from contrib/go/_std_1.18/src/syscall/bpf_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/dirent.go102
-rw-r--r--contrib/go/_std_1.19/src/syscall/endian_little.go9
-rw-r--r--contrib/go/_std_1.19/src/syscall/env_unix.go155
-rw-r--r--contrib/go/_std_1.19/src/syscall/exec_libc2.go281
-rw-r--r--contrib/go/_std_1.19/src/syscall/exec_linux.go626
-rw-r--r--contrib/go/_std_1.19/src/syscall/exec_unix.go306
-rw-r--r--contrib/go/_std_1.19/src/syscall/flock.go (renamed from contrib/go/_std_1.18/src/syscall/flock.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/flock_darwin.go (renamed from contrib/go/_std_1.18/src/syscall/flock_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/forkpipe.go21
-rw-r--r--contrib/go/_std_1.19/src/syscall/lsf_linux.go (renamed from contrib/go/_std_1.18/src/syscall/lsf_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/msan0.go (renamed from contrib/go/_std_1.18/src/syscall/msan0.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/net.go (renamed from contrib/go/_std_1.18/src/syscall/net.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/netlink_linux.go (renamed from contrib/go/_std_1.18/src/syscall/netlink_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/ptrace_darwin.go (renamed from contrib/go/_std_1.18/src/syscall/ptrace_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/route_bsd.go (renamed from contrib/go/_std_1.18/src/syscall/route_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/route_darwin.go (renamed from contrib/go/_std_1.18/src/syscall/route_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/setuidgid_linux.go (renamed from contrib/go/_std_1.18/src/syscall/setuidgid_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/sock_cloexec_linux.go (renamed from contrib/go/_std_1.18/src/syscall/sock_cloexec_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/sockcmsg_linux.go (renamed from contrib/go/_std_1.18/src/syscall/sockcmsg_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/sockcmsg_unix.go92
-rw-r--r--contrib/go/_std_1.19/src/syscall/sockcmsg_unix_other.go (renamed from contrib/go/_std_1.18/src/syscall/sockcmsg_unix_other.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall.go102
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall_bsd.go (renamed from contrib/go/_std_1.18/src/syscall/syscall_bsd.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall_darwin.go339
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall_darwin_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/syscall_darwin_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall_linux.go1238
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall_linux_amd64.go122
-rw-r--r--contrib/go/_std_1.19/src/syscall/syscall_unix.go518
-rw-r--r--contrib/go/_std_1.19/src/syscall/time_nofake.go (renamed from contrib/go/_std_1.18/src/syscall/time_nofake.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/timestruct.go36
-rw-r--r--contrib/go/_std_1.19/src/syscall/zerrors_darwin_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/zerrors_darwin_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/zerrors_linux_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/zerrors_linux_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.go2004
-rw-r--r--contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.s (renamed from contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/zsyscall_linux_amd64.go1644
-rw-r--r--contrib/go/_std_1.19/src/syscall/zsysnum_darwin_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/zsysnum_darwin_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/zsysnum_linux_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/zsysnum_linux_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/ztypes_darwin_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/ztypes_darwin_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/syscall/ztypes_linux_amd64.go (renamed from contrib/go/_std_1.18/src/syscall/ztypes_linux_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/text/template/doc.go464
-rw-r--r--contrib/go/_std_1.19/src/text/template/exec.go (renamed from contrib/go/_std_1.18/src/text/template/exec.go)0
-rw-r--r--contrib/go/_std_1.19/src/text/template/funcs.go777
-rw-r--r--contrib/go/_std_1.19/src/text/template/helper.go178
-rw-r--r--contrib/go/_std_1.19/src/text/template/option.go72
-rw-r--r--contrib/go/_std_1.19/src/text/template/parse/lex.go679
-rw-r--r--contrib/go/_std_1.19/src/text/template/parse/node.go (renamed from contrib/go/_std_1.18/src/text/template/parse/node.go)0
-rw-r--r--contrib/go/_std_1.19/src/text/template/parse/parse.go826
-rw-r--r--contrib/go/_std_1.19/src/text/template/template.go (renamed from contrib/go/_std_1.18/src/text/template/template.go)0
-rw-r--r--contrib/go/_std_1.19/src/time/format.go1619
-rw-r--r--contrib/go/_std_1.19/src/time/sleep.go177
-rw-r--r--contrib/go/_std_1.19/src/time/sys_unix.go54
-rw-r--r--contrib/go/_std_1.19/src/time/tick.go73
-rw-r--r--contrib/go/_std_1.19/src/time/time.go1619
-rw-r--r--contrib/go/_std_1.19/src/time/zoneinfo.go687
-rw-r--r--contrib/go/_std_1.19/src/time/zoneinfo_goroot.go14
-rw-r--r--contrib/go/_std_1.19/src/time/zoneinfo_read.go597
-rw-r--r--contrib/go/_std_1.19/src/time/zoneinfo_unix.go67
-rw-r--r--contrib/go/_std_1.19/src/unicode/casetables.go (renamed from contrib/go/_std_1.18/src/unicode/casetables.go)0
-rw-r--r--contrib/go/_std_1.19/src/unicode/digit.go (renamed from contrib/go/_std_1.18/src/unicode/digit.go)0
-rw-r--r--contrib/go/_std_1.19/src/unicode/graphic.go146
-rw-r--r--contrib/go/_std_1.19/src/unicode/letter.go371
-rw-r--r--contrib/go/_std_1.19/src/unicode/tables.go (renamed from contrib/go/_std_1.18/src/unicode/tables.go)0
-rw-r--r--contrib/go/_std_1.19/src/unicode/utf16/utf16.go (renamed from contrib/go/_std_1.18/src/unicode/utf16/utf16.go)0
-rw-r--r--contrib/go/_std_1.19/src/unicode/utf8/utf8.go583
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/xor.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/xor.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/builder.go337
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/string.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/string.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/curve25519.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/curve25519.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go16
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/hkdf/hkdf.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/hkdf/hkdf.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go309
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/subtle/aliasing.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/subtle/aliasing.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/dns/dnsmessage/message.go2665
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/guts.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/guts.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/httplex.go352
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpproxy/proxy.go371
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/encode.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/encode.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/hpack.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/hpack.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/huffman.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/huffman.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/tables.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/tables.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/go118.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/go118.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/idna10.0.0.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/idna10.0.0.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/punycode.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/punycode.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/tables13.0.0.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/tables13.0.0.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trie.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trie.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trieval.go119
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/address.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/address.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/binary.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/binary.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/empty.s (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/empty.s)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface_classic.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface_classic.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface_multicast.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface_multicast.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/message.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/message.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/route.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/route.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/route_classic.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/route_classic.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/sys.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/sys.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/sys_darwin.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/sys_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/syscall.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/syscall.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/zsys_darwin.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/zsys_darwin.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/byteorder.go66
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu.go287
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_x86.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_x86.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_x86.s (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_x86.s)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/secure/bidirule/bidirule.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/secure/bidirule/bidirule.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/transform/transform.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/transform/transform.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/bidi.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/bidi.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/bracket.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/bracket.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/core.go1071
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/prop.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/prop.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/trieval.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/trieval.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/composition.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/composition.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/forminfo.go279
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/input.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/input.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/iter.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/iter.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/normalize.go610
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/readwriter.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/readwriter.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/transform.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/transform.go)0
-rw-r--r--contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/trie.go (renamed from contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/trie.go)0
1363 files changed, 212655 insertions, 204899 deletions
diff --git a/contrib/go/_std_1.18/src/bufio/bufio.go b/contrib/go/_std_1.18/src/bufio/bufio.go
deleted file mode 100644
index 7483946fc0..0000000000
--- a/contrib/go/_std_1.18/src/bufio/bufio.go
+++ /dev/null
@@ -1,813 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
-// object, creating another object (Reader or Writer) that also implements
-// the interface but provides buffering and some help for textual I/O.
-package bufio
-
-import (
- "bytes"
- "errors"
- "io"
- "strings"
- "unicode/utf8"
-)
-
-const (
- defaultBufSize = 4096
-)
-
-var (
- ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
- ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
- ErrBufferFull = errors.New("bufio: buffer full")
- ErrNegativeCount = errors.New("bufio: negative count")
-)
-
-// Buffered input.
-
-// Reader implements buffering for an io.Reader object.
-type Reader struct {
- buf []byte
- rd io.Reader // reader provided by the client
- r, w int // buf read and write positions
- err error
- lastByte int // last byte read for UnreadByte; -1 means invalid
- lastRuneSize int // size of last rune read for UnreadRune; -1 means invalid
-}
-
-const minReadBufferSize = 16
-const maxConsecutiveEmptyReads = 100
-
-// NewReaderSize returns a new Reader whose buffer has at least the specified
-// size. If the argument io.Reader is already a Reader with large enough
-// size, it returns the underlying Reader.
-func NewReaderSize(rd io.Reader, size int) *Reader {
- // Is it already a Reader?
- b, ok := rd.(*Reader)
- if ok && len(b.buf) >= size {
- return b
- }
- if size < minReadBufferSize {
- size = minReadBufferSize
- }
- r := new(Reader)
- r.reset(make([]byte, size), rd)
- return r
-}
-
-// NewReader returns a new Reader whose buffer has the default size.
-func NewReader(rd io.Reader) *Reader {
- return NewReaderSize(rd, defaultBufSize)
-}
-
-// Size returns the size of the underlying buffer in bytes.
-func (b *Reader) Size() int { return len(b.buf) }
-
-// Reset discards any buffered data, resets all state, and switches
-// the buffered reader to read from r.
-// Calling Reset on the zero value of Reader initializes the internal buffer
-// to the default size.
-func (b *Reader) Reset(r io.Reader) {
- if b.buf == nil {
- b.buf = make([]byte, defaultBufSize)
- }
- b.reset(b.buf, r)
-}
-
-func (b *Reader) reset(buf []byte, r io.Reader) {
- *b = Reader{
- buf: buf,
- rd: r,
- lastByte: -1,
- lastRuneSize: -1,
- }
-}
-
-var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
-
-// fill reads a new chunk into the buffer.
-func (b *Reader) fill() {
- // Slide existing data to beginning.
- if b.r > 0 {
- copy(b.buf, b.buf[b.r:b.w])
- b.w -= b.r
- b.r = 0
- }
-
- if b.w >= len(b.buf) {
- panic("bufio: tried to fill full buffer")
- }
-
- // Read new data: try a limited number of times.
- for i := maxConsecutiveEmptyReads; i > 0; i-- {
- n, err := b.rd.Read(b.buf[b.w:])
- if n < 0 {
- panic(errNegativeRead)
- }
- b.w += n
- if err != nil {
- b.err = err
- return
- }
- if n > 0 {
- return
- }
- }
- b.err = io.ErrNoProgress
-}
-
-func (b *Reader) readErr() error {
- err := b.err
- b.err = nil
- return err
-}
-
-// Peek returns the next n bytes without advancing the reader. The bytes stop
-// being valid at the next read call. If Peek returns fewer than n bytes, it
-// also returns an error explaining why the read is short. The error is
-// ErrBufferFull if n is larger than b's buffer size.
-//
-// Calling Peek prevents a UnreadByte or UnreadRune call from succeeding
-// until the next read operation.
-func (b *Reader) Peek(n int) ([]byte, error) {
- if n < 0 {
- return nil, ErrNegativeCount
- }
-
- b.lastByte = -1
- b.lastRuneSize = -1
-
- for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
- b.fill() // b.w-b.r < len(b.buf) => buffer is not full
- }
-
- if n > len(b.buf) {
- return b.buf[b.r:b.w], ErrBufferFull
- }
-
- // 0 <= n <= len(b.buf)
- var err error
- if avail := b.w - b.r; avail < n {
- // not enough data in buffer
- n = avail
- err = b.readErr()
- if err == nil {
- err = ErrBufferFull
- }
- }
- return b.buf[b.r : b.r+n], err
-}
-
-// Discard skips the next n bytes, returning the number of bytes discarded.
-//
-// If Discard skips fewer than n bytes, it also returns an error.
-// If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
-// reading from the underlying io.Reader.
-func (b *Reader) Discard(n int) (discarded int, err error) {
- if n < 0 {
- return 0, ErrNegativeCount
- }
- if n == 0 {
- return
- }
-
- b.lastByte = -1
- b.lastRuneSize = -1
-
- remain := n
- for {
- skip := b.Buffered()
- if skip == 0 {
- b.fill()
- skip = b.Buffered()
- }
- if skip > remain {
- skip = remain
- }
- b.r += skip
- remain -= skip
- if remain == 0 {
- return n, nil
- }
- if b.err != nil {
- return n - remain, b.readErr()
- }
- }
-}
-
-// Read reads data into p.
-// It returns the number of bytes read into p.
-// The bytes are taken from at most one Read on the underlying Reader,
-// hence n may be less than len(p).
-// To read exactly len(p) bytes, use io.ReadFull(b, p).
-// At EOF, the count will be zero and err will be io.EOF.
-func (b *Reader) Read(p []byte) (n int, err error) {
- n = len(p)
- if n == 0 {
- if b.Buffered() > 0 {
- return 0, nil
- }
- return 0, b.readErr()
- }
- if b.r == b.w {
- if b.err != nil {
- return 0, b.readErr()
- }
- if len(p) >= len(b.buf) {
- // Large read, empty buffer.
- // Read directly into p to avoid copy.
- n, b.err = b.rd.Read(p)
- if n < 0 {
- panic(errNegativeRead)
- }
- if n > 0 {
- b.lastByte = int(p[n-1])
- b.lastRuneSize = -1
- }
- return n, b.readErr()
- }
- // One read.
- // Do not use b.fill, which will loop.
- b.r = 0
- b.w = 0
- n, b.err = b.rd.Read(b.buf)
- if n < 0 {
- panic(errNegativeRead)
- }
- if n == 0 {
- return 0, b.readErr()
- }
- b.w += n
- }
-
- // copy as much as we can
- // Note: if the slice panics here, it is probably because
- // the underlying reader returned a bad count. See issue 49795.
- n = copy(p, b.buf[b.r:b.w])
- b.r += n
- b.lastByte = int(b.buf[b.r-1])
- b.lastRuneSize = -1
- return n, nil
-}
-
-// ReadByte reads and returns a single byte.
-// If no byte is available, returns an error.
-func (b *Reader) ReadByte() (byte, error) {
- b.lastRuneSize = -1
- for b.r == b.w {
- if b.err != nil {
- return 0, b.readErr()
- }
- b.fill() // buffer is empty
- }
- c := b.buf[b.r]
- b.r++
- b.lastByte = int(c)
- return c, nil
-}
-
-// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
-//
-// UnreadByte returns an error if the most recent method called on the
-// Reader was not a read operation. Notably, Peek, Discard, and WriteTo are not
-// considered read operations.
-func (b *Reader) UnreadByte() error {
- if b.lastByte < 0 || b.r == 0 && b.w > 0 {
- return ErrInvalidUnreadByte
- }
- // b.r > 0 || b.w == 0
- if b.r > 0 {
- b.r--
- } else {
- // b.r == 0 && b.w == 0
- b.w = 1
- }
- b.buf[b.r] = byte(b.lastByte)
- b.lastByte = -1
- b.lastRuneSize = -1
- return nil
-}
-
-// ReadRune reads a single UTF-8 encoded Unicode character and returns the
-// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
-// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
-func (b *Reader) ReadRune() (r rune, size int, err error) {
- for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
- b.fill() // b.w-b.r < len(buf) => buffer is not full
- }
- b.lastRuneSize = -1
- if b.r == b.w {
- return 0, 0, b.readErr()
- }
- r, size = rune(b.buf[b.r]), 1
- if r >= utf8.RuneSelf {
- r, size = utf8.DecodeRune(b.buf[b.r:b.w])
- }
- b.r += size
- b.lastByte = int(b.buf[b.r-1])
- b.lastRuneSize = size
- return r, size, nil
-}
-
-// UnreadRune unreads the last rune. If the most recent method called on
-// the Reader was not a ReadRune, UnreadRune returns an error. (In this
-// regard it is stricter than UnreadByte, which will unread the last byte
-// from any read operation.)
-func (b *Reader) UnreadRune() error {
- if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
- return ErrInvalidUnreadRune
- }
- b.r -= b.lastRuneSize
- b.lastByte = -1
- b.lastRuneSize = -1
- return nil
-}
-
-// Buffered returns the number of bytes that can be read from the current buffer.
-func (b *Reader) Buffered() int { return b.w - b.r }
-
-// ReadSlice reads until the first occurrence of delim in the input,
-// returning a slice pointing at the bytes in the buffer.
-// The bytes stop being valid at the next read.
-// If ReadSlice encounters an error before finding a delimiter,
-// it returns all the data in the buffer and the error itself (often io.EOF).
-// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
-// Because the data returned from ReadSlice will be overwritten
-// by the next I/O operation, most clients should use
-// ReadBytes or ReadString instead.
-// ReadSlice returns err != nil if and only if line does not end in delim.
-func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
- s := 0 // search start index
- for {
- // Search buffer.
- if i := bytes.IndexByte(b.buf[b.r+s:b.w], delim); i >= 0 {
- i += s
- line = b.buf[b.r : b.r+i+1]
- b.r += i + 1
- break
- }
-
- // Pending error?
- if b.err != nil {
- line = b.buf[b.r:b.w]
- b.r = b.w
- err = b.readErr()
- break
- }
-
- // Buffer full?
- if b.Buffered() >= len(b.buf) {
- b.r = b.w
- line = b.buf
- err = ErrBufferFull
- break
- }
-
- s = b.w - b.r // do not rescan area we scanned before
-
- b.fill() // buffer is not full
- }
-
- // Handle last byte, if any.
- if i := len(line) - 1; i >= 0 {
- b.lastByte = int(line[i])
- b.lastRuneSize = -1
- }
-
- return
-}
-
-// ReadLine is a low-level line-reading primitive. Most callers should use
-// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
-//
-// ReadLine tries to return a single line, not including the end-of-line bytes.
-// If the line was too long for the buffer then isPrefix is set and the
-// beginning of the line is returned. The rest of the line will be returned
-// from future calls. isPrefix will be false when returning the last fragment
-// of the line. The returned buffer is only valid until the next call to
-// ReadLine. ReadLine either returns a non-nil line or it returns an error,
-// never both.
-//
-// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
-// No indication or error is given if the input ends without a final line end.
-// Calling UnreadByte after ReadLine will always unread the last byte read
-// (possibly a character belonging to the line end) even if that byte is not
-// part of the line returned by ReadLine.
-func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
- line, err = b.ReadSlice('\n')
- if err == ErrBufferFull {
- // Handle the case where "\r\n" straddles the buffer.
- if len(line) > 0 && line[len(line)-1] == '\r' {
- // Put the '\r' back on buf and drop it from line.
- // Let the next call to ReadLine check for "\r\n".
- if b.r == 0 {
- // should be unreachable
- panic("bufio: tried to rewind past start of buffer")
- }
- b.r--
- line = line[:len(line)-1]
- }
- return line, true, nil
- }
-
- if len(line) == 0 {
- if err != nil {
- line = nil
- }
- return
- }
- err = nil
-
- if line[len(line)-1] == '\n' {
- drop := 1
- if len(line) > 1 && line[len(line)-2] == '\r' {
- drop = 2
- }
- line = line[:len(line)-drop]
- }
- return
-}
-
-// collectFragments reads until the first occurrence of delim in the input. It
-// returns (slice of full buffers, remaining bytes before delim, total number
-// of bytes in the combined first two elements, error).
-// The complete result is equal to
-// `bytes.Join(append(fullBuffers, finalFragment), nil)`, which has a
-// length of `totalLen`. The result is structured in this way to allow callers
-// to minimize allocations and copies.
-func (b *Reader) collectFragments(delim byte) (fullBuffers [][]byte, finalFragment []byte, totalLen int, err error) {
- var frag []byte
- // Use ReadSlice to look for delim, accumulating full buffers.
- for {
- var e error
- frag, e = b.ReadSlice(delim)
- if e == nil { // got final fragment
- break
- }
- if e != ErrBufferFull { // unexpected error
- err = e
- break
- }
-
- // Make a copy of the buffer.
- buf := make([]byte, len(frag))
- copy(buf, frag)
- fullBuffers = append(fullBuffers, buf)
- totalLen += len(buf)
- }
-
- totalLen += len(frag)
- return fullBuffers, frag, totalLen, err
-}
-
-// ReadBytes reads until the first occurrence of delim in the input,
-// returning a slice containing the data up to and including the delimiter.
-// If ReadBytes encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadBytes returns err != nil if and only if the returned data does not end in
-// delim.
-// For simple uses, a Scanner may be more convenient.
-func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
- full, frag, n, err := b.collectFragments(delim)
- // Allocate new buffer to hold the full pieces and the fragment.
- buf := make([]byte, n)
- n = 0
- // Copy full pieces and fragment in.
- for i := range full {
- n += copy(buf[n:], full[i])
- }
- copy(buf[n:], frag)
- return buf, err
-}
-
-// ReadString reads until the first occurrence of delim in the input,
-// returning a string containing the data up to and including the delimiter.
-// If ReadString encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadString returns err != nil if and only if the returned data does not end in
-// delim.
-// For simple uses, a Scanner may be more convenient.
-func (b *Reader) ReadString(delim byte) (string, error) {
- full, frag, n, err := b.collectFragments(delim)
- // Allocate new buffer to hold the full pieces and the fragment.
- var buf strings.Builder
- buf.Grow(n)
- // Copy full pieces and fragment in.
- for _, fb := range full {
- buf.Write(fb)
- }
- buf.Write(frag)
- return buf.String(), err
-}
-
-// WriteTo implements io.WriterTo.
-// This may make multiple calls to the Read method of the underlying Reader.
-// If the underlying reader supports the WriteTo method,
-// this calls the underlying WriteTo without buffering.
-func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
- b.lastByte = -1
- b.lastRuneSize = -1
-
- n, err = b.writeBuf(w)
- if err != nil {
- return
- }
-
- if r, ok := b.rd.(io.WriterTo); ok {
- m, err := r.WriteTo(w)
- n += m
- return n, err
- }
-
- if w, ok := w.(io.ReaderFrom); ok {
- m, err := w.ReadFrom(b.rd)
- n += m
- return n, err
- }
-
- if b.w-b.r < len(b.buf) {
- b.fill() // buffer not full
- }
-
- for b.r < b.w {
- // b.r < b.w => buffer is not empty
- m, err := b.writeBuf(w)
- n += m
- if err != nil {
- return n, err
- }
- b.fill() // buffer is empty
- }
-
- if b.err == io.EOF {
- b.err = nil
- }
-
- return n, b.readErr()
-}
-
-var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
-
-// writeBuf writes the Reader's buffer to the writer.
-func (b *Reader) writeBuf(w io.Writer) (int64, error) {
- n, err := w.Write(b.buf[b.r:b.w])
- if n < 0 {
- panic(errNegativeWrite)
- }
- b.r += n
- return int64(n), err
-}
-
-// buffered output
-
-// Writer implements buffering for an io.Writer object.
-// If an error occurs writing to a Writer, no more data will be
-// accepted and all subsequent writes, and Flush, will return the error.
-// After all data has been written, the client should call the
-// Flush method to guarantee all data has been forwarded to
-// the underlying io.Writer.
-type Writer struct {
- err error
- buf []byte
- n int
- wr io.Writer
-}
-
-// NewWriterSize returns a new Writer whose buffer has at least the specified
-// size. If the argument io.Writer is already a Writer with large enough
-// size, it returns the underlying Writer.
-func NewWriterSize(w io.Writer, size int) *Writer {
- // Is it already a Writer?
- b, ok := w.(*Writer)
- if ok && len(b.buf) >= size {
- return b
- }
- if size <= 0 {
- size = defaultBufSize
- }
- return &Writer{
- buf: make([]byte, size),
- wr: w,
- }
-}
-
-// NewWriter returns a new Writer whose buffer has the default size.
-// If the argument io.Writer is already a Writer with large enough buffer size,
-// it returns the underlying Writer.
-func NewWriter(w io.Writer) *Writer {
- return NewWriterSize(w, defaultBufSize)
-}
-
-// Size returns the size of the underlying buffer in bytes.
-func (b *Writer) Size() int { return len(b.buf) }
-
-// Reset discards any unflushed buffered data, clears any error, and
-// resets b to write its output to w.
-// Calling Reset on the zero value of Writer initializes the internal buffer
-// to the default size.
-func (b *Writer) Reset(w io.Writer) {
- if b.buf == nil {
- b.buf = make([]byte, defaultBufSize)
- }
- b.err = nil
- b.n = 0
- b.wr = w
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-func (b *Writer) Flush() error {
- if b.err != nil {
- return b.err
- }
- if b.n == 0 {
- return nil
- }
- n, err := b.wr.Write(b.buf[0:b.n])
- if n < b.n && err == nil {
- err = io.ErrShortWrite
- }
- if err != nil {
- if n > 0 && n < b.n {
- copy(b.buf[0:b.n-n], b.buf[n:b.n])
- }
- b.n -= n
- b.err = err
- return err
- }
- b.n = 0
- return nil
-}
-
-// Available returns how many bytes are unused in the buffer.
-func (b *Writer) Available() int { return len(b.buf) - b.n }
-
-// AvailableBuffer returns an empty buffer with b.Available() capacity.
-// This buffer is intended to be appended to and
-// passed to an immediately succeeding Write call.
-// The buffer is only valid until the next write operation on b.
-func (b *Writer) AvailableBuffer() []byte {
- return b.buf[b.n:][:0]
-}
-
-// Buffered returns the number of bytes that have been written into the current buffer.
-func (b *Writer) Buffered() int { return b.n }
-
-// Write writes the contents of p into the buffer.
-// It returns the number of bytes written.
-// If nn < len(p), it also returns an error explaining
-// why the write is short.
-func (b *Writer) Write(p []byte) (nn int, err error) {
- for len(p) > b.Available() && b.err == nil {
- var n int
- if b.Buffered() == 0 {
- // Large write, empty buffer.
- // Write directly from p to avoid copy.
- n, b.err = b.wr.Write(p)
- } else {
- n = copy(b.buf[b.n:], p)
- b.n += n
- b.Flush()
- }
- nn += n
- p = p[n:]
- }
- if b.err != nil {
- return nn, b.err
- }
- n := copy(b.buf[b.n:], p)
- b.n += n
- nn += n
- return nn, nil
-}
-
-// WriteByte writes a single byte.
-func (b *Writer) WriteByte(c byte) error {
- if b.err != nil {
- return b.err
- }
- if b.Available() <= 0 && b.Flush() != nil {
- return b.err
- }
- b.buf[b.n] = c
- b.n++
- return nil
-}
-
-// WriteRune writes a single Unicode code point, returning
-// the number of bytes written and any error.
-func (b *Writer) WriteRune(r rune) (size int, err error) {
- // Compare as uint32 to correctly handle negative runes.
- if uint32(r) < utf8.RuneSelf {
- err = b.WriteByte(byte(r))
- if err != nil {
- return 0, err
- }
- return 1, nil
- }
- if b.err != nil {
- return 0, b.err
- }
- n := b.Available()
- if n < utf8.UTFMax {
- if b.Flush(); b.err != nil {
- return 0, b.err
- }
- n = b.Available()
- if n < utf8.UTFMax {
- // Can only happen if buffer is silly small.
- return b.WriteString(string(r))
- }
- }
- size = utf8.EncodeRune(b.buf[b.n:], r)
- b.n += size
- return size, nil
-}
-
-// WriteString writes a string.
-// It returns the number of bytes written.
-// If the count is less than len(s), it also returns an error explaining
-// why the write is short.
-func (b *Writer) WriteString(s string) (int, error) {
- nn := 0
- for len(s) > b.Available() && b.err == nil {
- n := copy(b.buf[b.n:], s)
- b.n += n
- nn += n
- s = s[n:]
- b.Flush()
- }
- if b.err != nil {
- return nn, b.err
- }
- n := copy(b.buf[b.n:], s)
- b.n += n
- nn += n
- return nn, nil
-}
-
-// ReadFrom implements io.ReaderFrom. If the underlying writer
-// supports the ReadFrom method, this calls the underlying ReadFrom.
-// If there is buffered data and an underlying ReadFrom, this fills
-// the buffer and writes it before calling ReadFrom.
-func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
- if b.err != nil {
- return 0, b.err
- }
- readerFrom, readerFromOK := b.wr.(io.ReaderFrom)
- var m int
- for {
- if b.Available() == 0 {
- if err1 := b.Flush(); err1 != nil {
- return n, err1
- }
- }
- if readerFromOK && b.Buffered() == 0 {
- nn, err := readerFrom.ReadFrom(r)
- b.err = err
- n += nn
- return n, err
- }
- nr := 0
- for nr < maxConsecutiveEmptyReads {
- m, err = r.Read(b.buf[b.n:])
- if m != 0 || err != nil {
- break
- }
- nr++
- }
- if nr == maxConsecutiveEmptyReads {
- return n, io.ErrNoProgress
- }
- b.n += m
- n += int64(m)
- if err != nil {
- break
- }
- }
- if err == io.EOF {
- // If we filled the buffer exactly, flush preemptively.
- if b.Available() == 0 {
- err = b.Flush()
- } else {
- err = nil
- }
- }
- return n, err
-}
-
-// buffered input and output
-
-// ReadWriter stores pointers to a Reader and a Writer.
-// It implements io.ReadWriter.
-type ReadWriter struct {
- *Reader
- *Writer
-}
-
-// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
-func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
- return &ReadWriter{r, w}
-}
diff --git a/contrib/go/_std_1.18/src/bufio/scan.go b/contrib/go/_std_1.18/src/bufio/scan.go
deleted file mode 100644
index 4846d4f733..0000000000
--- a/contrib/go/_std_1.18/src/bufio/scan.go
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bufio
-
-import (
- "bytes"
- "errors"
- "io"
- "unicode/utf8"
-)
-
-// Scanner provides a convenient interface for reading data such as
-// a file of newline-delimited lines of text. Successive calls to
-// the Scan method will step through the 'tokens' of a file, skipping
-// the bytes between the tokens. The specification of a token is
-// defined by a split function of type SplitFunc; the default split
-// function breaks the input into lines with line termination stripped. Split
-// functions are defined in this package for scanning a file into
-// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
-// client may instead provide a custom split function.
-//
-// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
-// large to fit in the buffer. When a scan stops, the reader may have
-// advanced arbitrarily far past the last token. Programs that need more
-// control over error handling or large tokens, or must run sequential scans
-// on a reader, should use bufio.Reader instead.
-//
-type Scanner struct {
- r io.Reader // The reader provided by the client.
- split SplitFunc // The function to split the tokens.
- maxTokenSize int // Maximum size of a token; modified by tests.
- token []byte // Last token returned by split.
- buf []byte // Buffer used as argument to split.
- start int // First non-processed byte in buf.
- end int // End of data in buf.
- err error // Sticky error.
- empties int // Count of successive empty tokens.
- scanCalled bool // Scan has been called; buffer is in use.
- done bool // Scan has finished.
-}
-
-// SplitFunc is the signature of the split function used to tokenize the
-// input. The arguments are an initial substring of the remaining unprocessed
-// data and a flag, atEOF, that reports whether the Reader has no more data
-// to give. The return values are the number of bytes to advance the input
-// and the next token to return to the user, if any, plus an error, if any.
-//
-// Scanning stops if the function returns an error, in which case some of
-// the input may be discarded. If that error is ErrFinalToken, scanning
-// stops with no error.
-//
-// Otherwise, the Scanner advances the input. If the token is not nil,
-// the Scanner returns it to the user. If the token is nil, the
-// Scanner reads more data and continues scanning; if there is no more
-// data--if atEOF was true--the Scanner returns. If the data does not
-// yet hold a complete token, for instance if it has no newline while
-// scanning lines, a SplitFunc can return (0, nil, nil) to signal the
-// Scanner to read more data into the slice and try again with a
-// longer slice starting at the same point in the input.
-//
-// The function is never called with an empty data slice unless atEOF
-// is true. If atEOF is true, however, data may be non-empty and,
-// as always, holds unprocessed text.
-type SplitFunc func(data []byte, atEOF bool) (advance int, token []byte, err error)
-
-// Errors returned by Scanner.
-var (
- ErrTooLong = errors.New("bufio.Scanner: token too long")
- ErrNegativeAdvance = errors.New("bufio.Scanner: SplitFunc returns negative advance count")
- ErrAdvanceTooFar = errors.New("bufio.Scanner: SplitFunc returns advance count beyond input")
- ErrBadReadCount = errors.New("bufio.Scanner: Read returned impossible count")
-)
-
-const (
- // MaxScanTokenSize is the maximum size used to buffer a token
- // unless the user provides an explicit buffer with Scanner.Buffer.
- // The actual maximum token size may be smaller as the buffer
- // may need to include, for instance, a newline.
- MaxScanTokenSize = 64 * 1024
-
- startBufSize = 4096 // Size of initial allocation for buffer.
-)
-
-// NewScanner returns a new Scanner to read from r.
-// The split function defaults to ScanLines.
-func NewScanner(r io.Reader) *Scanner {
- return &Scanner{
- r: r,
- split: ScanLines,
- maxTokenSize: MaxScanTokenSize,
- }
-}
-
-// Err returns the first non-EOF error that was encountered by the Scanner.
-func (s *Scanner) Err() error {
- if s.err == io.EOF {
- return nil
- }
- return s.err
-}
-
-// Bytes returns the most recent token generated by a call to Scan.
-// The underlying array may point to data that will be overwritten
-// by a subsequent call to Scan. It does no allocation.
-func (s *Scanner) Bytes() []byte {
- return s.token
-}
-
-// Text returns the most recent token generated by a call to Scan
-// as a newly allocated string holding its bytes.
-func (s *Scanner) Text() string {
- return string(s.token)
-}
-
-// ErrFinalToken is a special sentinel error value. It is intended to be
-// returned by a Split function to indicate that the token being delivered
-// with the error is the last token and scanning should stop after this one.
-// After ErrFinalToken is received by Scan, scanning stops with no error.
-// The value is useful to stop processing early or when it is necessary to
-// deliver a final empty token. One could achieve the same behavior
-// with a custom error value but providing one here is tidier.
-// See the emptyFinalToken example for a use of this value.
-var ErrFinalToken = errors.New("final token")
-
-// Scan advances the Scanner to the next token, which will then be
-// available through the Bytes or Text method. It returns false when the
-// scan stops, either by reaching the end of the input or an error.
-// After Scan returns false, the Err method will return any error that
-// occurred during scanning, except that if it was io.EOF, Err
-// will return nil.
-// Scan panics if the split function returns too many empty
-// tokens without advancing the input. This is a common error mode for
-// scanners.
-func (s *Scanner) Scan() bool {
- if s.done {
- return false
- }
- s.scanCalled = true
- // Loop until we have a token.
- for {
- // See if we can get a token with what we already have.
- // If we've run out of data but have an error, give the split function
- // a chance to recover any remaining, possibly empty token.
- if s.end > s.start || s.err != nil {
- advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
- if err != nil {
- if err == ErrFinalToken {
- s.token = token
- s.done = true
- return true
- }
- s.setErr(err)
- return false
- }
- if !s.advance(advance) {
- return false
- }
- s.token = token
- if token != nil {
- if s.err == nil || advance > 0 {
- s.empties = 0
- } else {
- // Returning tokens not advancing input at EOF.
- s.empties++
- if s.empties > maxConsecutiveEmptyReads {
- panic("bufio.Scan: too many empty tokens without progressing")
- }
- }
- return true
- }
- }
- // We cannot generate a token with what we are holding.
- // If we've already hit EOF or an I/O error, we are done.
- if s.err != nil {
- // Shut it down.
- s.start = 0
- s.end = 0
- return false
- }
- // Must read more data.
- // First, shift data to beginning of buffer if there's lots of empty space
- // or space is needed.
- if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
- copy(s.buf, s.buf[s.start:s.end])
- s.end -= s.start
- s.start = 0
- }
- // Is the buffer full? If so, resize.
- if s.end == len(s.buf) {
- // Guarantee no overflow in the multiplication below.
- const maxInt = int(^uint(0) >> 1)
- if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
- s.setErr(ErrTooLong)
- return false
- }
- newSize := len(s.buf) * 2
- if newSize == 0 {
- newSize = startBufSize
- }
- if newSize > s.maxTokenSize {
- newSize = s.maxTokenSize
- }
- newBuf := make([]byte, newSize)
- copy(newBuf, s.buf[s.start:s.end])
- s.buf = newBuf
- s.end -= s.start
- s.start = 0
- }
- // Finally we can read some input. Make sure we don't get stuck with
- // a misbehaving Reader. Officially we don't need to do this, but let's
- // be extra careful: Scanner is for safe, simple jobs.
- for loop := 0; ; {
- n, err := s.r.Read(s.buf[s.end:len(s.buf)])
- if n < 0 || len(s.buf)-s.end < n {
- s.setErr(ErrBadReadCount)
- break
- }
- s.end += n
- if err != nil {
- s.setErr(err)
- break
- }
- if n > 0 {
- s.empties = 0
- break
- }
- loop++
- if loop > maxConsecutiveEmptyReads {
- s.setErr(io.ErrNoProgress)
- break
- }
- }
- }
-}
-
-// advance consumes n bytes of the buffer. It reports whether the advance was legal.
-func (s *Scanner) advance(n int) bool {
- if n < 0 {
- s.setErr(ErrNegativeAdvance)
- return false
- }
- if n > s.end-s.start {
- s.setErr(ErrAdvanceTooFar)
- return false
- }
- s.start += n
- return true
-}
-
-// setErr records the first error encountered.
-func (s *Scanner) setErr(err error) {
- if s.err == nil || s.err == io.EOF {
- s.err = err
- }
-}
-
-// Buffer sets the initial buffer to use when scanning and the maximum
-// size of buffer that may be allocated during scanning. The maximum
-// token size is the larger of max and cap(buf). If max <= cap(buf),
-// Scan will use this buffer only and do no allocation.
-//
-// By default, Scan uses an internal buffer and sets the
-// maximum token size to MaxScanTokenSize.
-//
-// Buffer panics if it is called after scanning has started.
-func (s *Scanner) Buffer(buf []byte, max int) {
- if s.scanCalled {
- panic("Buffer called after Scan")
- }
- s.buf = buf[0:cap(buf)]
- s.maxTokenSize = max
-}
-
-// Split sets the split function for the Scanner.
-// The default split function is ScanLines.
-//
-// Split panics if it is called after scanning has started.
-func (s *Scanner) Split(split SplitFunc) {
- if s.scanCalled {
- panic("Split called after Scan")
- }
- s.split = split
-}
-
-// Split functions
-
-// ScanBytes is a split function for a Scanner that returns each byte as a token.
-func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
- return 1, data[0:1], nil
-}
-
-var errorRune = []byte(string(utf8.RuneError))
-
-// ScanRunes is a split function for a Scanner that returns each
-// UTF-8-encoded rune as a token. The sequence of runes returned is
-// equivalent to that from a range loop over the input as a string, which
-// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
-// Because of the Scan interface, this makes it impossible for the client to
-// distinguish correctly encoded replacement runes from encoding errors.
-func ScanRunes(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
-
- // Fast path 1: ASCII.
- if data[0] < utf8.RuneSelf {
- return 1, data[0:1], nil
- }
-
- // Fast path 2: Correct UTF-8 decode without error.
- _, width := utf8.DecodeRune(data)
- if width > 1 {
- // It's a valid encoding. Width cannot be one for a correctly encoded
- // non-ASCII rune.
- return width, data[0:width], nil
- }
-
- // We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
- // Is the error because there wasn't a full rune to be decoded?
- // FullRune distinguishes correctly between erroneous and incomplete encodings.
- if !atEOF && !utf8.FullRune(data) {
- // Incomplete; get more bytes.
- return 0, nil, nil
- }
-
- // We have a real UTF-8 encoding error. Return a properly encoded error rune
- // but advance only one byte. This matches the behavior of a range loop over
- // an incorrectly encoded string.
- return 1, errorRune, nil
-}
-
-// dropCR drops a terminal \r from the data.
-func dropCR(data []byte) []byte {
- if len(data) > 0 && data[len(data)-1] == '\r' {
- return data[0 : len(data)-1]
- }
- return data
-}
-
-// ScanLines is a split function for a Scanner that returns each line of
-// text, stripped of any trailing end-of-line marker. The returned line may
-// be empty. The end-of-line marker is one optional carriage return followed
-// by one mandatory newline. In regular expression notation, it is `\r?\n`.
-// The last non-empty line of input will be returned even if it has no
-// newline.
-func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
- if i := bytes.IndexByte(data, '\n'); i >= 0 {
- // We have a full newline-terminated line.
- return i + 1, dropCR(data[0:i]), nil
- }
- // If we're at EOF, we have a final, non-terminated line. Return it.
- if atEOF {
- return len(data), dropCR(data), nil
- }
- // Request more data.
- return 0, nil, nil
-}
-
-// isSpace reports whether the character is a Unicode white space character.
-// We avoid dependency on the unicode package, but check validity of the implementation
-// in the tests.
-func isSpace(r rune) bool {
- if r <= '\u00FF' {
- // Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
- switch r {
- case ' ', '\t', '\n', '\v', '\f', '\r':
- return true
- case '\u0085', '\u00A0':
- return true
- }
- return false
- }
- // High-valued ones.
- if '\u2000' <= r && r <= '\u200a' {
- return true
- }
- switch r {
- case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
- return true
- }
- return false
-}
-
-// ScanWords is a split function for a Scanner that returns each
-// space-separated word of text, with surrounding spaces deleted. It will
-// never return an empty string. The definition of space is set by
-// unicode.IsSpace.
-func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
- // Skip leading spaces.
- start := 0
- for width := 0; start < len(data); start += width {
- var r rune
- r, width = utf8.DecodeRune(data[start:])
- if !isSpace(r) {
- break
- }
- }
- // Scan until space, marking end of word.
- for width, i := 0, start; i < len(data); i += width {
- var r rune
- r, width = utf8.DecodeRune(data[i:])
- if isSpace(r) {
- return i + width, data[start:i], nil
- }
- }
- // If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
- if atEOF && len(data) > start {
- return len(data), data[start:], nil
- }
- // Request more data.
- return start, nil, nil
-}
diff --git a/contrib/go/_std_1.18/src/bytes/buffer.go b/contrib/go/_std_1.18/src/bytes/buffer.go
deleted file mode 100644
index 549b077708..0000000000
--- a/contrib/go/_std_1.18/src/bytes/buffer.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bytes
-
-// Simple byte buffer for marshaling data.
-
-import (
- "errors"
- "io"
- "unicode/utf8"
-)
-
-// smallBufferSize is an initial allocation minimal capacity.
-const smallBufferSize = 64
-
-// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
-// The zero value for Buffer is an empty buffer ready to use.
-type Buffer struct {
- buf []byte // contents are the bytes buf[off : len(buf)]
- off int // read at &buf[off], write at &buf[len(buf)]
- lastRead readOp // last read operation, so that Unread* can work correctly.
-}
-
-// The readOp constants describe the last action performed on
-// the buffer, so that UnreadRune and UnreadByte can check for
-// invalid usage. opReadRuneX constants are chosen such that
-// converted to int they correspond to the rune size that was read.
-type readOp int8
-
-// Don't use iota for these, as the values need to correspond with the
-// names and comments, which is easier to see when being explicit.
-const (
- opRead readOp = -1 // Any other read operation.
- opInvalid readOp = 0 // Non-read operation.
- opReadRune1 readOp = 1 // Read rune of size 1.
- opReadRune2 readOp = 2 // Read rune of size 2.
- opReadRune3 readOp = 3 // Read rune of size 3.
- opReadRune4 readOp = 4 // Read rune of size 4.
-)
-
-// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
-var ErrTooLarge = errors.New("bytes.Buffer: too large")
-var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
-
-const maxInt = int(^uint(0) >> 1)
-
-// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
-// The slice is valid for use only until the next buffer modification (that is,
-// only until the next call to a method like Read, Write, Reset, or Truncate).
-// The slice aliases the buffer content at least until the next buffer modification,
-// so immediate changes to the slice will affect the result of future reads.
-func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
-
-// String returns the contents of the unread portion of the buffer
-// as a string. If the Buffer is a nil pointer, it returns "<nil>".
-//
-// To build strings more efficiently, see the strings.Builder type.
-func (b *Buffer) String() string {
- if b == nil {
- // Special case, useful in debugging.
- return "<nil>"
- }
- return string(b.buf[b.off:])
-}
-
-// empty reports whether the unread portion of the buffer is empty.
-func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
-
-// Len returns the number of bytes of the unread portion of the buffer;
-// b.Len() == len(b.Bytes()).
-func (b *Buffer) Len() int { return len(b.buf) - b.off }
-
-// Cap returns the capacity of the buffer's underlying byte slice, that is, the
-// total space allocated for the buffer's data.
-func (b *Buffer) Cap() int { return cap(b.buf) }
-
-// Truncate discards all but the first n unread bytes from the buffer
-// but continues to use the same allocated storage.
-// It panics if n is negative or greater than the length of the buffer.
-func (b *Buffer) Truncate(n int) {
- if n == 0 {
- b.Reset()
- return
- }
- b.lastRead = opInvalid
- if n < 0 || n > b.Len() {
- panic("bytes.Buffer: truncation out of range")
- }
- b.buf = b.buf[:b.off+n]
-}
-
-// Reset resets the buffer to be empty,
-// but it retains the underlying storage for use by future writes.
-// Reset is the same as Truncate(0).
-func (b *Buffer) Reset() {
- b.buf = b.buf[:0]
- b.off = 0
- b.lastRead = opInvalid
-}
-
-// tryGrowByReslice is a inlineable version of grow for the fast-case where the
-// internal buffer only needs to be resliced.
-// It returns the index where bytes should be written and whether it succeeded.
-func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
- if l := len(b.buf); n <= cap(b.buf)-l {
- b.buf = b.buf[:l+n]
- return l, true
- }
- return 0, false
-}
-
-// grow grows the buffer to guarantee space for n more bytes.
-// It returns the index where bytes should be written.
-// If the buffer can't grow it will panic with ErrTooLarge.
-func (b *Buffer) grow(n int) int {
- m := b.Len()
- // If buffer is empty, reset to recover space.
- if m == 0 && b.off != 0 {
- b.Reset()
- }
- // Try to grow by means of a reslice.
- if i, ok := b.tryGrowByReslice(n); ok {
- return i
- }
- if b.buf == nil && n <= smallBufferSize {
- b.buf = make([]byte, n, smallBufferSize)
- return 0
- }
- c := cap(b.buf)
- if n <= c/2-m {
- // We can slide things down instead of allocating a new
- // slice. We only need m+n <= c to slide, but
- // we instead let capacity get twice as large so we
- // don't spend all our time copying.
- copy(b.buf, b.buf[b.off:])
- } else if c > maxInt-c-n {
- panic(ErrTooLarge)
- } else {
- // Not enough space anywhere, we need to allocate.
- buf := makeSlice(2*c + n)
- copy(buf, b.buf[b.off:])
- b.buf = buf
- }
- // Restore b.off and len(b.buf).
- b.off = 0
- b.buf = b.buf[:m+n]
- return m
-}
-
-// Grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After Grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-// If n is negative, Grow will panic.
-// If the buffer can't grow it will panic with ErrTooLarge.
-func (b *Buffer) Grow(n int) {
- if n < 0 {
- panic("bytes.Buffer.Grow: negative count")
- }
- m := b.grow(n)
- b.buf = b.buf[:m]
-}
-
-// Write appends the contents of p to the buffer, growing the buffer as
-// needed. The return value n is the length of p; err is always nil. If the
-// buffer becomes too large, Write will panic with ErrTooLarge.
-func (b *Buffer) Write(p []byte) (n int, err error) {
- b.lastRead = opInvalid
- m, ok := b.tryGrowByReslice(len(p))
- if !ok {
- m = b.grow(len(p))
- }
- return copy(b.buf[m:], p), nil
-}
-
-// WriteString appends the contents of s to the buffer, growing the buffer as
-// needed. The return value n is the length of s; err is always nil. If the
-// buffer becomes too large, WriteString will panic with ErrTooLarge.
-func (b *Buffer) WriteString(s string) (n int, err error) {
- b.lastRead = opInvalid
- m, ok := b.tryGrowByReslice(len(s))
- if !ok {
- m = b.grow(len(s))
- }
- return copy(b.buf[m:], s), nil
-}
-
-// MinRead is the minimum slice size passed to a Read call by
-// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
-// what is required to hold the contents of r, ReadFrom will not grow the
-// underlying buffer.
-const MinRead = 512
-
-// ReadFrom reads data from r until EOF and appends it to the buffer, growing
-// the buffer as needed. The return value n is the number of bytes read. Any
-// error except io.EOF encountered during the read is also returned. If the
-// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
-func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
- b.lastRead = opInvalid
- for {
- i := b.grow(MinRead)
- b.buf = b.buf[:i]
- m, e := r.Read(b.buf[i:cap(b.buf)])
- if m < 0 {
- panic(errNegativeRead)
- }
-
- b.buf = b.buf[:i+m]
- n += int64(m)
- if e == io.EOF {
- return n, nil // e is EOF, so return nil explicitly
- }
- if e != nil {
- return n, e
- }
- }
-}
-
-// makeSlice allocates a slice of size n. If the allocation fails, it panics
-// with ErrTooLarge.
-func makeSlice(n int) []byte {
- // If the make fails, give a known error.
- defer func() {
- if recover() != nil {
- panic(ErrTooLarge)
- }
- }()
- return make([]byte, n)
-}
-
-// WriteTo writes data to w until the buffer is drained or an error occurs.
-// The return value n is the number of bytes written; it always fits into an
-// int, but it is int64 to match the io.WriterTo interface. Any error
-// encountered during the write is also returned.
-func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
- b.lastRead = opInvalid
- if nBytes := b.Len(); nBytes > 0 {
- m, e := w.Write(b.buf[b.off:])
- if m > nBytes {
- panic("bytes.Buffer.WriteTo: invalid Write count")
- }
- b.off += m
- n = int64(m)
- if e != nil {
- return n, e
- }
- // all bytes should have been written, by definition of
- // Write method in io.Writer
- if m != nBytes {
- return n, io.ErrShortWrite
- }
- }
- // Buffer is now empty; reset.
- b.Reset()
- return n, nil
-}
-
-// WriteByte appends the byte c to the buffer, growing the buffer as needed.
-// The returned error is always nil, but is included to match bufio.Writer's
-// WriteByte. If the buffer becomes too large, WriteByte will panic with
-// ErrTooLarge.
-func (b *Buffer) WriteByte(c byte) error {
- b.lastRead = opInvalid
- m, ok := b.tryGrowByReslice(1)
- if !ok {
- m = b.grow(1)
- }
- b.buf[m] = c
- return nil
-}
-
-// WriteRune appends the UTF-8 encoding of Unicode code point r to the
-// buffer, returning its length and an error, which is always nil but is
-// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
-// if it becomes too large, WriteRune will panic with ErrTooLarge.
-func (b *Buffer) WriteRune(r rune) (n int, err error) {
- // Compare as uint32 to correctly handle negative runes.
- if uint32(r) < utf8.RuneSelf {
- b.WriteByte(byte(r))
- return 1, nil
- }
- b.lastRead = opInvalid
- m, ok := b.tryGrowByReslice(utf8.UTFMax)
- if !ok {
- m = b.grow(utf8.UTFMax)
- }
- n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
- b.buf = b.buf[:m+n]
- return n, nil
-}
-
-// Read reads the next len(p) bytes from the buffer or until the buffer
-// is drained. The return value n is the number of bytes read. If the
-// buffer has no data to return, err is io.EOF (unless len(p) is zero);
-// otherwise it is nil.
-func (b *Buffer) Read(p []byte) (n int, err error) {
- b.lastRead = opInvalid
- if b.empty() {
- // Buffer is empty, reset to recover space.
- b.Reset()
- if len(p) == 0 {
- return 0, nil
- }
- return 0, io.EOF
- }
- n = copy(p, b.buf[b.off:])
- b.off += n
- if n > 0 {
- b.lastRead = opRead
- }
- return n, nil
-}
-
-// Next returns a slice containing the next n bytes from the buffer,
-// advancing the buffer as if the bytes had been returned by Read.
-// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
-// The slice is only valid until the next call to a read or write method.
-func (b *Buffer) Next(n int) []byte {
- b.lastRead = opInvalid
- m := b.Len()
- if n > m {
- n = m
- }
- data := b.buf[b.off : b.off+n]
- b.off += n
- if n > 0 {
- b.lastRead = opRead
- }
- return data
-}
-
-// ReadByte reads and returns the next byte from the buffer.
-// If no byte is available, it returns error io.EOF.
-func (b *Buffer) ReadByte() (byte, error) {
- if b.empty() {
- // Buffer is empty, reset to recover space.
- b.Reset()
- return 0, io.EOF
- }
- c := b.buf[b.off]
- b.off++
- b.lastRead = opRead
- return c, nil
-}
-
-// ReadRune reads and returns the next UTF-8-encoded
-// Unicode code point from the buffer.
-// If no bytes are available, the error returned is io.EOF.
-// If the bytes are an erroneous UTF-8 encoding, it
-// consumes one byte and returns U+FFFD, 1.
-func (b *Buffer) ReadRune() (r rune, size int, err error) {
- if b.empty() {
- // Buffer is empty, reset to recover space.
- b.Reset()
- return 0, 0, io.EOF
- }
- c := b.buf[b.off]
- if c < utf8.RuneSelf {
- b.off++
- b.lastRead = opReadRune1
- return rune(c), 1, nil
- }
- r, n := utf8.DecodeRune(b.buf[b.off:])
- b.off += n
- b.lastRead = readOp(n)
- return r, n, nil
-}
-
-// UnreadRune unreads the last rune returned by ReadRune.
-// If the most recent read or write operation on the buffer was
-// not a successful ReadRune, UnreadRune returns an error. (In this regard
-// it is stricter than UnreadByte, which will unread the last byte
-// from any read operation.)
-func (b *Buffer) UnreadRune() error {
- if b.lastRead <= opInvalid {
- return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
- }
- if b.off >= int(b.lastRead) {
- b.off -= int(b.lastRead)
- }
- b.lastRead = opInvalid
- return nil
-}
-
-var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
-
-// UnreadByte unreads the last byte returned by the most recent successful
-// read operation that read at least one byte. If a write has happened since
-// the last read, if the last read returned an error, or if the read read zero
-// bytes, UnreadByte returns an error.
-func (b *Buffer) UnreadByte() error {
- if b.lastRead == opInvalid {
- return errUnreadByte
- }
- b.lastRead = opInvalid
- if b.off > 0 {
- b.off--
- }
- return nil
-}
-
-// ReadBytes reads until the first occurrence of delim in the input,
-// returning a slice containing the data up to and including the delimiter.
-// If ReadBytes encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadBytes returns err != nil if and only if the returned data does not end in
-// delim.
-func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
- slice, err := b.readSlice(delim)
- // return a copy of slice. The buffer's backing array may
- // be overwritten by later calls.
- line = append(line, slice...)
- return line, err
-}
-
-// readSlice is like ReadBytes but returns a reference to internal buffer data.
-func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
- i := IndexByte(b.buf[b.off:], delim)
- end := b.off + i + 1
- if i < 0 {
- end = len(b.buf)
- err = io.EOF
- }
- line = b.buf[b.off:end]
- b.off = end
- b.lastRead = opRead
- return line, err
-}
-
-// ReadString reads until the first occurrence of delim in the input,
-// returning a string containing the data up to and including the delimiter.
-// If ReadString encounters an error before finding a delimiter,
-// it returns the data read before the error and the error itself (often io.EOF).
-// ReadString returns err != nil if and only if the returned data does not end
-// in delim.
-func (b *Buffer) ReadString(delim byte) (line string, err error) {
- slice, err := b.readSlice(delim)
- return string(slice), err
-}
-
-// NewBuffer creates and initializes a new Buffer using buf as its
-// initial contents. The new Buffer takes ownership of buf, and the
-// caller should not use buf after this call. NewBuffer is intended to
-// prepare a Buffer to read existing data. It can also be used to set
-// the initial size of the internal buffer for writing. To do that,
-// buf should have the desired capacity but a length of zero.
-//
-// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// sufficient to initialize a Buffer.
-func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
-
-// NewBufferString creates and initializes a new Buffer using string s as its
-// initial contents. It is intended to prepare a buffer to read an existing
-// string.
-//
-// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// sufficient to initialize a Buffer.
-func NewBufferString(s string) *Buffer {
- return &Buffer{buf: []byte(s)}
-}
diff --git a/contrib/go/_std_1.18/src/bytes/bytes.go b/contrib/go/_std_1.18/src/bytes/bytes.go
deleted file mode 100644
index e3dab4d035..0000000000
--- a/contrib/go/_std_1.18/src/bytes/bytes.go
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bytes implements functions for the manipulation of byte slices.
-// It is analogous to the facilities of the strings package.
-package bytes
-
-import (
- "internal/bytealg"
- "unicode"
- "unicode/utf8"
-)
-
-// Equal reports whether a and b
-// are the same length and contain the same bytes.
-// A nil argument is equivalent to an empty slice.
-func Equal(a, b []byte) bool {
- // Neither cmd/compile nor gccgo allocates for these string conversions.
- return string(a) == string(b)
-}
-
-// Compare returns an integer comparing two byte slices lexicographically.
-// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
-// A nil argument is equivalent to an empty slice.
-func Compare(a, b []byte) int {
- return bytealg.Compare(a, b)
-}
-
-// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
-// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
-func explode(s []byte, n int) [][]byte {
- if n <= 0 {
- n = len(s)
- }
- a := make([][]byte, n)
- var size int
- na := 0
- for len(s) > 0 {
- if na+1 >= n {
- a[na] = s
- na++
- break
- }
- _, size = utf8.DecodeRune(s)
- a[na] = s[0:size:size]
- s = s[size:]
- na++
- }
- return a[0:na]
-}
-
-// Count counts the number of non-overlapping instances of sep in s.
-// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
-func Count(s, sep []byte) int {
- // special case
- if len(sep) == 0 {
- return utf8.RuneCount(s) + 1
- }
- if len(sep) == 1 {
- return bytealg.Count(s, sep[0])
- }
- n := 0
- for {
- i := Index(s, sep)
- if i == -1 {
- return n
- }
- n++
- s = s[i+len(sep):]
- }
-}
-
-// Contains reports whether subslice is within b.
-func Contains(b, subslice []byte) bool {
- return Index(b, subslice) != -1
-}
-
-// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
-func ContainsAny(b []byte, chars string) bool {
- return IndexAny(b, chars) >= 0
-}
-
-// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
-func ContainsRune(b []byte, r rune) bool {
- return IndexRune(b, r) >= 0
-}
-
-// IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
-func IndexByte(b []byte, c byte) int {
- return bytealg.IndexByte(b, c)
-}
-
-func indexBytePortable(s []byte, c byte) int {
- for i, b := range s {
- if b == c {
- return i
- }
- }
- return -1
-}
-
-// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
-func LastIndex(s, sep []byte) int {
- n := len(sep)
- switch {
- case n == 0:
- return len(s)
- case n == 1:
- return LastIndexByte(s, sep[0])
- case n == len(s):
- if Equal(s, sep) {
- return 0
- }
- return -1
- case n > len(s):
- return -1
- }
- // Rabin-Karp search from the end of the string
- hashss, pow := bytealg.HashStrRevBytes(sep)
- last := len(s) - n
- var h uint32
- for i := len(s) - 1; i >= last; i-- {
- h = h*bytealg.PrimeRK + uint32(s[i])
- }
- if h == hashss && Equal(s[last:], sep) {
- return last
- }
- for i := last - 1; i >= 0; i-- {
- h *= bytealg.PrimeRK
- h += uint32(s[i])
- h -= pow * uint32(s[i+n])
- if h == hashss && Equal(s[i:i+n], sep) {
- return i
- }
- }
- return -1
-}
-
-// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
-func LastIndexByte(s []byte, c byte) int {
- for i := len(s) - 1; i >= 0; i-- {
- if s[i] == c {
- return i
- }
- }
- return -1
-}
-
-// IndexRune interprets s as a sequence of UTF-8-encoded code points.
-// It returns the byte index of the first occurrence in s of the given rune.
-// It returns -1 if rune is not present in s.
-// If r is utf8.RuneError, it returns the first instance of any
-// invalid UTF-8 byte sequence.
-func IndexRune(s []byte, r rune) int {
- switch {
- case 0 <= r && r < utf8.RuneSelf:
- return IndexByte(s, byte(r))
- case r == utf8.RuneError:
- for i := 0; i < len(s); {
- r1, n := utf8.DecodeRune(s[i:])
- if r1 == utf8.RuneError {
- return i
- }
- i += n
- }
- return -1
- case !utf8.ValidRune(r):
- return -1
- default:
- var b [utf8.UTFMax]byte
- n := utf8.EncodeRune(b[:], r)
- return Index(s, b[:n])
- }
-}
-
-// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
-// It returns the byte index of the first occurrence in s of any of the Unicode
-// code points in chars. It returns -1 if chars is empty or if there is no code
-// point in common.
-func IndexAny(s []byte, chars string) int {
- if chars == "" {
- // Avoid scanning all of s.
- return -1
- }
- if len(s) == 1 {
- r := rune(s[0])
- if r >= utf8.RuneSelf {
- // search utf8.RuneError.
- for _, r = range chars {
- if r == utf8.RuneError {
- return 0
- }
- }
- return -1
- }
- if bytealg.IndexByteString(chars, s[0]) >= 0 {
- return 0
- }
- return -1
- }
- if len(chars) == 1 {
- r := rune(chars[0])
- if r >= utf8.RuneSelf {
- r = utf8.RuneError
- }
- return IndexRune(s, r)
- }
- if len(s) > 8 {
- if as, isASCII := makeASCIISet(chars); isASCII {
- for i, c := range s {
- if as.contains(c) {
- return i
- }
- }
- return -1
- }
- }
- var width int
- for i := 0; i < len(s); i += width {
- r := rune(s[i])
- if r < utf8.RuneSelf {
- if bytealg.IndexByteString(chars, s[i]) >= 0 {
- return i
- }
- width = 1
- continue
- }
- r, width = utf8.DecodeRune(s[i:])
- if r != utf8.RuneError {
- // r is 2 to 4 bytes
- if len(chars) == width {
- if chars == string(r) {
- return i
- }
- continue
- }
- // Use bytealg.IndexString for performance if available.
- if bytealg.MaxLen >= width {
- if bytealg.IndexString(chars, string(r)) >= 0 {
- return i
- }
- continue
- }
- }
- for _, ch := range chars {
- if r == ch {
- return i
- }
- }
- }
- return -1
-}
-
-// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
-// points. It returns the byte index of the last occurrence in s of any of
-// the Unicode code points in chars. It returns -1 if chars is empty or if
-// there is no code point in common.
-func LastIndexAny(s []byte, chars string) int {
- if chars == "" {
- // Avoid scanning all of s.
- return -1
- }
- if len(s) > 8 {
- if as, isASCII := makeASCIISet(chars); isASCII {
- for i := len(s) - 1; i >= 0; i-- {
- if as.contains(s[i]) {
- return i
- }
- }
- return -1
- }
- }
- if len(s) == 1 {
- r := rune(s[0])
- if r >= utf8.RuneSelf {
- for _, r = range chars {
- if r == utf8.RuneError {
- return 0
- }
- }
- return -1
- }
- if bytealg.IndexByteString(chars, s[0]) >= 0 {
- return 0
- }
- return -1
- }
- if len(chars) == 1 {
- cr := rune(chars[0])
- if cr >= utf8.RuneSelf {
- cr = utf8.RuneError
- }
- for i := len(s); i > 0; {
- r, size := utf8.DecodeLastRune(s[:i])
- i -= size
- if r == cr {
- return i
- }
- }
- return -1
- }
- for i := len(s); i > 0; {
- r := rune(s[i-1])
- if r < utf8.RuneSelf {
- if bytealg.IndexByteString(chars, s[i-1]) >= 0 {
- return i - 1
- }
- i--
- continue
- }
- r, size := utf8.DecodeLastRune(s[:i])
- i -= size
- if r != utf8.RuneError {
- // r is 2 to 4 bytes
- if len(chars) == size {
- if chars == string(r) {
- return i
- }
- continue
- }
- // Use bytealg.IndexString for performance if available.
- if bytealg.MaxLen >= size {
- if bytealg.IndexString(chars, string(r)) >= 0 {
- return i
- }
- continue
- }
- }
- for _, ch := range chars {
- if r == ch {
- return i
- }
- }
- }
- return -1
-}
-
-// Generic split: splits after each instance of sep,
-// including sepSave bytes of sep in the subslices.
-func genSplit(s, sep []byte, sepSave, n int) [][]byte {
- if n == 0 {
- return nil
- }
- if len(sep) == 0 {
- return explode(s, n)
- }
- if n < 0 {
- n = Count(s, sep) + 1
- }
-
- a := make([][]byte, n)
- n--
- i := 0
- for i < n {
- m := Index(s, sep)
- if m < 0 {
- break
- }
- a[i] = s[: m+sepSave : m+sepSave]
- s = s[m+len(sep):]
- i++
- }
- a[i] = s
- return a[:i+1]
-}
-
-// SplitN slices s into subslices separated by sep and returns a slice of
-// the subslices between those separators.
-// If sep is empty, SplitN splits after each UTF-8 sequence.
-// The count determines the number of subslices to return:
-// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
-// n == 0: the result is nil (zero subslices)
-// n < 0: all subslices
-//
-// To split around the first instance of a separator, see Cut.
-func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
-
-// SplitAfterN slices s into subslices after each instance of sep and
-// returns a slice of those subslices.
-// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
-// The count determines the number of subslices to return:
-// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
-// n == 0: the result is nil (zero subslices)
-// n < 0: all subslices
-func SplitAfterN(s, sep []byte, n int) [][]byte {
- return genSplit(s, sep, len(sep), n)
-}
-
-// Split slices s into all subslices separated by sep and returns a slice of
-// the subslices between those separators.
-// If sep is empty, Split splits after each UTF-8 sequence.
-// It is equivalent to SplitN with a count of -1.
-//
-// To split around the first instance of a separator, see Cut.
-func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
-
-// SplitAfter slices s into all subslices after each instance of sep and
-// returns a slice of those subslices.
-// If sep is empty, SplitAfter splits after each UTF-8 sequence.
-// It is equivalent to SplitAfterN with a count of -1.
-func SplitAfter(s, sep []byte) [][]byte {
- return genSplit(s, sep, len(sep), -1)
-}
-
-var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
-
-// Fields interprets s as a sequence of UTF-8-encoded code points.
-// It splits the slice s around each instance of one or more consecutive white space
-// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
-// empty slice if s contains only white space.
-func Fields(s []byte) [][]byte {
- // First count the fields.
- // This is an exact count if s is ASCII, otherwise it is an approximation.
- n := 0
- wasSpace := 1
- // setBits is used to track which bits are set in the bytes of s.
- setBits := uint8(0)
- for i := 0; i < len(s); i++ {
- r := s[i]
- setBits |= r
- isSpace := int(asciiSpace[r])
- n += wasSpace & ^isSpace
- wasSpace = isSpace
- }
-
- if setBits >= utf8.RuneSelf {
- // Some runes in the input slice are not ASCII.
- return FieldsFunc(s, unicode.IsSpace)
- }
-
- // ASCII fast path
- a := make([][]byte, n)
- na := 0
- fieldStart := 0
- i := 0
- // Skip spaces in the front of the input.
- for i < len(s) && asciiSpace[s[i]] != 0 {
- i++
- }
- fieldStart = i
- for i < len(s) {
- if asciiSpace[s[i]] == 0 {
- i++
- continue
- }
- a[na] = s[fieldStart:i:i]
- na++
- i++
- // Skip spaces in between fields.
- for i < len(s) && asciiSpace[s[i]] != 0 {
- i++
- }
- fieldStart = i
- }
- if fieldStart < len(s) { // Last field might end at EOF.
- a[na] = s[fieldStart:len(s):len(s)]
- }
- return a
-}
-
-// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
-// It splits the slice s at each run of code points c satisfying f(c) and
-// returns a slice of subslices of s. If all code points in s satisfy f(c), or
-// len(s) == 0, an empty slice is returned.
-//
-// FieldsFunc makes no guarantees about the order in which it calls f(c)
-// and assumes that f always returns the same value for a given c.
-func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
- // A span is used to record a slice of s of the form s[start:end].
- // The start index is inclusive and the end index is exclusive.
- type span struct {
- start int
- end int
- }
- spans := make([]span, 0, 32)
-
- // Find the field start and end indices.
- // Doing this in a separate pass (rather than slicing the string s
- // and collecting the result substrings right away) is significantly
- // more efficient, possibly due to cache effects.
- start := -1 // valid span start if >= 0
- for i := 0; i < len(s); {
- size := 1
- r := rune(s[i])
- if r >= utf8.RuneSelf {
- r, size = utf8.DecodeRune(s[i:])
- }
- if f(r) {
- if start >= 0 {
- spans = append(spans, span{start, i})
- start = -1
- }
- } else {
- if start < 0 {
- start = i
- }
- }
- i += size
- }
-
- // Last field might end at EOF.
- if start >= 0 {
- spans = append(spans, span{start, len(s)})
- }
-
- // Create subslices from recorded field indices.
- a := make([][]byte, len(spans))
- for i, span := range spans {
- a[i] = s[span.start:span.end:span.end]
- }
-
- return a
-}
-
-// Join concatenates the elements of s to create a new byte slice. The separator
-// sep is placed between elements in the resulting slice.
-func Join(s [][]byte, sep []byte) []byte {
- if len(s) == 0 {
- return []byte{}
- }
- if len(s) == 1 {
- // Just return a copy.
- return append([]byte(nil), s[0]...)
- }
- n := len(sep) * (len(s) - 1)
- for _, v := range s {
- n += len(v)
- }
-
- b := make([]byte, n)
- bp := copy(b, s[0])
- for _, v := range s[1:] {
- bp += copy(b[bp:], sep)
- bp += copy(b[bp:], v)
- }
- return b
-}
-
-// HasPrefix tests whether the byte slice s begins with prefix.
-func HasPrefix(s, prefix []byte) bool {
- return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
-}
-
-// HasSuffix tests whether the byte slice s ends with suffix.
-func HasSuffix(s, suffix []byte) bool {
- return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
-}
-
-// Map returns a copy of the byte slice s with all its characters modified
-// according to the mapping function. If mapping returns a negative value, the character is
-// dropped from the byte slice with no replacement. The characters in s and the
-// output are interpreted as UTF-8-encoded code points.
-func Map(mapping func(r rune) rune, s []byte) []byte {
- // In the worst case, the slice can grow when mapped, making
- // things unpleasant. But it's so rare we barge in assuming it's
- // fine. It could also shrink but that falls out naturally.
- maxbytes := len(s) // length of b
- nbytes := 0 // number of bytes encoded in b
- b := make([]byte, maxbytes)
- for i := 0; i < len(s); {
- wid := 1
- r := rune(s[i])
- if r >= utf8.RuneSelf {
- r, wid = utf8.DecodeRune(s[i:])
- }
- r = mapping(r)
- if r >= 0 {
- rl := utf8.RuneLen(r)
- if rl < 0 {
- rl = len(string(utf8.RuneError))
- }
- if nbytes+rl > maxbytes {
- // Grow the buffer.
- maxbytes = maxbytes*2 + utf8.UTFMax
- nb := make([]byte, maxbytes)
- copy(nb, b[0:nbytes])
- b = nb
- }
- nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
- }
- i += wid
- }
- return b[0:nbytes]
-}
-
-// Repeat returns a new byte slice consisting of count copies of b.
-//
-// It panics if count is negative or if
-// the result of (len(b) * count) overflows.
-func Repeat(b []byte, count int) []byte {
- if count == 0 {
- return []byte{}
- }
- // Since we cannot return an error on overflow,
- // we should panic if the repeat will generate
- // an overflow.
- // See Issue golang.org/issue/16237.
- if count < 0 {
- panic("bytes: negative Repeat count")
- } else if len(b)*count/count != len(b) {
- panic("bytes: Repeat count causes overflow")
- }
-
- nb := make([]byte, len(b)*count)
- bp := copy(nb, b)
- for bp < len(nb) {
- copy(nb[bp:], nb[:bp])
- bp *= 2
- }
- return nb
-}
-
-// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
-// their upper case.
-func ToUpper(s []byte) []byte {
- isASCII, hasLower := true, false
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= utf8.RuneSelf {
- isASCII = false
- break
- }
- hasLower = hasLower || ('a' <= c && c <= 'z')
- }
-
- if isASCII { // optimize for ASCII-only byte slices.
- if !hasLower {
- // Just return a copy.
- return append([]byte(""), s...)
- }
- b := make([]byte, len(s))
- for i := 0; i < len(s); i++ {
- c := s[i]
- if 'a' <= c && c <= 'z' {
- c -= 'a' - 'A'
- }
- b[i] = c
- }
- return b
- }
- return Map(unicode.ToUpper, s)
-}
-
-// ToLower returns a copy of the byte slice s with all Unicode letters mapped to
-// their lower case.
-func ToLower(s []byte) []byte {
- isASCII, hasUpper := true, false
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= utf8.RuneSelf {
- isASCII = false
- break
- }
- hasUpper = hasUpper || ('A' <= c && c <= 'Z')
- }
-
- if isASCII { // optimize for ASCII-only byte slices.
- if !hasUpper {
- return append([]byte(""), s...)
- }
- b := make([]byte, len(s))
- for i := 0; i < len(s); i++ {
- c := s[i]
- if 'A' <= c && c <= 'Z' {
- c += 'a' - 'A'
- }
- b[i] = c
- }
- return b
- }
- return Map(unicode.ToLower, s)
-}
-
-// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
-func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
-
-// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
-// upper case, giving priority to the special casing rules.
-func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
- return Map(c.ToUpper, s)
-}
-
-// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
-// lower case, giving priority to the special casing rules.
-func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
- return Map(c.ToLower, s)
-}
-
-// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
-// title case, giving priority to the special casing rules.
-func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
- return Map(c.ToTitle, s)
-}
-
-// ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
-// representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
-func ToValidUTF8(s, replacement []byte) []byte {
- b := make([]byte, 0, len(s)+len(replacement))
- invalid := false // previous byte was from an invalid UTF-8 sequence
- for i := 0; i < len(s); {
- c := s[i]
- if c < utf8.RuneSelf {
- i++
- invalid = false
- b = append(b, c)
- continue
- }
- _, wid := utf8.DecodeRune(s[i:])
- if wid == 1 {
- i++
- if !invalid {
- invalid = true
- b = append(b, replacement...)
- }
- continue
- }
- invalid = false
- b = append(b, s[i:i+wid]...)
- i += wid
- }
- return b
-}
-
-// isSeparator reports whether the rune could mark a word boundary.
-// TODO: update when package unicode captures more of the properties.
-func isSeparator(r rune) bool {
- // ASCII alphanumerics and underscore are not separators
- if r <= 0x7F {
- switch {
- case '0' <= r && r <= '9':
- return false
- case 'a' <= r && r <= 'z':
- return false
- case 'A' <= r && r <= 'Z':
- return false
- case r == '_':
- return false
- }
- return true
- }
- // Letters and digits are not separators
- if unicode.IsLetter(r) || unicode.IsDigit(r) {
- return false
- }
- // Otherwise, all we can do for now is treat spaces as separators.
- return unicode.IsSpace(r)
-}
-
-// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
-// words mapped to their title case.
-//
-// Deprecated: The rule Title uses for word boundaries does not handle Unicode
-// punctuation properly. Use golang.org/x/text/cases instead.
-func Title(s []byte) []byte {
- // Use a closure here to remember state.
- // Hackish but effective. Depends on Map scanning in order and calling
- // the closure once per rune.
- prev := ' '
- return Map(
- func(r rune) rune {
- if isSeparator(prev) {
- prev = r
- return unicode.ToTitle(r)
- }
- prev = r
- return r
- },
- s)
-}
-
-// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
-// all leading UTF-8-encoded code points c that satisfy f(c).
-func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
- i := indexFunc(s, f, false)
- if i == -1 {
- return nil
- }
- return s[i:]
-}
-
-// TrimRightFunc returns a subslice of s by slicing off all trailing
-// UTF-8-encoded code points c that satisfy f(c).
-func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
- i := lastIndexFunc(s, f, false)
- if i >= 0 && s[i] >= utf8.RuneSelf {
- _, wid := utf8.DecodeRune(s[i:])
- i += wid
- } else {
- i++
- }
- return s[0:i]
-}
-
-// TrimFunc returns a subslice of s by slicing off all leading and trailing
-// UTF-8-encoded code points c that satisfy f(c).
-func TrimFunc(s []byte, f func(r rune) bool) []byte {
- return TrimRightFunc(TrimLeftFunc(s, f), f)
-}
-
-// TrimPrefix returns s without the provided leading prefix string.
-// If s doesn't start with prefix, s is returned unchanged.
-func TrimPrefix(s, prefix []byte) []byte {
- if HasPrefix(s, prefix) {
- return s[len(prefix):]
- }
- return s
-}
-
-// TrimSuffix returns s without the provided trailing suffix string.
-// If s doesn't end with suffix, s is returned unchanged.
-func TrimSuffix(s, suffix []byte) []byte {
- if HasSuffix(s, suffix) {
- return s[:len(s)-len(suffix)]
- }
- return s
-}
-
-// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
-// It returns the byte index in s of the first Unicode
-// code point satisfying f(c), or -1 if none do.
-func IndexFunc(s []byte, f func(r rune) bool) int {
- return indexFunc(s, f, true)
-}
-
-// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
-// It returns the byte index in s of the last Unicode
-// code point satisfying f(c), or -1 if none do.
-func LastIndexFunc(s []byte, f func(r rune) bool) int {
- return lastIndexFunc(s, f, true)
-}
-
-// indexFunc is the same as IndexFunc except that if
-// truth==false, the sense of the predicate function is
-// inverted.
-func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
- start := 0
- for start < len(s) {
- wid := 1
- r := rune(s[start])
- if r >= utf8.RuneSelf {
- r, wid = utf8.DecodeRune(s[start:])
- }
- if f(r) == truth {
- return start
- }
- start += wid
- }
- return -1
-}
-
-// lastIndexFunc is the same as LastIndexFunc except that if
-// truth==false, the sense of the predicate function is
-// inverted.
-func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
- for i := len(s); i > 0; {
- r, size := rune(s[i-1]), 1
- if r >= utf8.RuneSelf {
- r, size = utf8.DecodeLastRune(s[0:i])
- }
- i -= size
- if f(r) == truth {
- return i
- }
- }
- return -1
-}
-
-// asciiSet is a 32-byte value, where each bit represents the presence of a
-// given ASCII character in the set. The 128-bits of the lower 16 bytes,
-// starting with the least-significant bit of the lowest word to the
-// most-significant bit of the highest word, map to the full range of all
-// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
-// ensuring that any non-ASCII character will be reported as not in the set.
-// This allocates a total of 32 bytes even though the upper half
-// is unused to avoid bounds checks in asciiSet.contains.
-type asciiSet [8]uint32
-
-// makeASCIISet creates a set of ASCII characters and reports whether all
-// characters in chars are ASCII.
-func makeASCIISet(chars string) (as asciiSet, ok bool) {
- for i := 0; i < len(chars); i++ {
- c := chars[i]
- if c >= utf8.RuneSelf {
- return as, false
- }
- as[c/32] |= 1 << (c % 32)
- }
- return as, true
-}
-
-// contains reports whether c is inside the set.
-func (as *asciiSet) contains(c byte) bool {
- return (as[c/32] & (1 << (c % 32))) != 0
-}
-
-// containsRune is a simplified version of strings.ContainsRune
-// to avoid importing the strings package.
-// We avoid bytes.ContainsRune to avoid allocating a temporary copy of s.
-func containsRune(s string, r rune) bool {
- for _, c := range s {
- if c == r {
- return true
- }
- }
- return false
-}
-
-// Trim returns a subslice of s by slicing off all leading and
-// trailing UTF-8-encoded code points contained in cutset.
-func Trim(s []byte, cutset string) []byte {
- if len(s) == 0 {
- // This is what we've historically done.
- return nil
- }
- if cutset == "" {
- return s
- }
- if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
- return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
- }
- if as, ok := makeASCIISet(cutset); ok {
- return trimLeftASCII(trimRightASCII(s, &as), &as)
- }
- return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
-}
-
-// TrimLeft returns a subslice of s by slicing off all leading
-// UTF-8-encoded code points contained in cutset.
-func TrimLeft(s []byte, cutset string) []byte {
- if len(s) == 0 {
- // This is what we've historically done.
- return nil
- }
- if cutset == "" {
- return s
- }
- if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
- return trimLeftByte(s, cutset[0])
- }
- if as, ok := makeASCIISet(cutset); ok {
- return trimLeftASCII(s, &as)
- }
- return trimLeftUnicode(s, cutset)
-}
-
-func trimLeftByte(s []byte, c byte) []byte {
- for len(s) > 0 && s[0] == c {
- s = s[1:]
- }
- if len(s) == 0 {
- // This is what we've historically done.
- return nil
- }
- return s
-}
-
-func trimLeftASCII(s []byte, as *asciiSet) []byte {
- for len(s) > 0 {
- if !as.contains(s[0]) {
- break
- }
- s = s[1:]
- }
- if len(s) == 0 {
- // This is what we've historically done.
- return nil
- }
- return s
-}
-
-func trimLeftUnicode(s []byte, cutset string) []byte {
- for len(s) > 0 {
- r, n := rune(s[0]), 1
- if r >= utf8.RuneSelf {
- r, n = utf8.DecodeRune(s)
- }
- if !containsRune(cutset, r) {
- break
- }
- s = s[n:]
- }
- if len(s) == 0 {
- // This is what we've historically done.
- return nil
- }
- return s
-}
-
-// TrimRight returns a subslice of s by slicing off all trailing
-// UTF-8-encoded code points that are contained in cutset.
-func TrimRight(s []byte, cutset string) []byte {
- if len(s) == 0 || cutset == "" {
- return s
- }
- if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
- return trimRightByte(s, cutset[0])
- }
- if as, ok := makeASCIISet(cutset); ok {
- return trimRightASCII(s, &as)
- }
- return trimRightUnicode(s, cutset)
-}
-
-func trimRightByte(s []byte, c byte) []byte {
- for len(s) > 0 && s[len(s)-1] == c {
- s = s[:len(s)-1]
- }
- return s
-}
-
-func trimRightASCII(s []byte, as *asciiSet) []byte {
- for len(s) > 0 {
- if !as.contains(s[len(s)-1]) {
- break
- }
- s = s[:len(s)-1]
- }
- return s
-}
-
-func trimRightUnicode(s []byte, cutset string) []byte {
- for len(s) > 0 {
- r, n := rune(s[len(s)-1]), 1
- if r >= utf8.RuneSelf {
- r, n = utf8.DecodeLastRune(s)
- }
- if !containsRune(cutset, r) {
- break
- }
- s = s[:len(s)-n]
- }
- return s
-}
-
-// TrimSpace returns a subslice of s by slicing off all leading and
-// trailing white space, as defined by Unicode.
-func TrimSpace(s []byte) []byte {
- // Fast path for ASCII: look for the first ASCII non-space byte
- start := 0
- for ; start < len(s); start++ {
- c := s[start]
- if c >= utf8.RuneSelf {
- // If we run into a non-ASCII byte, fall back to the
- // slower unicode-aware method on the remaining bytes
- return TrimFunc(s[start:], unicode.IsSpace)
- }
- if asciiSpace[c] == 0 {
- break
- }
- }
-
- // Now look for the first ASCII non-space byte from the end
- stop := len(s)
- for ; stop > start; stop-- {
- c := s[stop-1]
- if c >= utf8.RuneSelf {
- return TrimFunc(s[start:stop], unicode.IsSpace)
- }
- if asciiSpace[c] == 0 {
- break
- }
- }
-
- // At this point s[start:stop] starts and ends with an ASCII
- // non-space bytes, so we're done. Non-ASCII cases have already
- // been handled above.
- if start == stop {
- // Special case to preserve previous TrimLeftFunc behavior,
- // returning nil instead of empty slice if all spaces.
- return nil
- }
- return s[start:stop]
-}
-
-// Runes interprets s as a sequence of UTF-8-encoded code points.
-// It returns a slice of runes (Unicode code points) equivalent to s.
-func Runes(s []byte) []rune {
- t := make([]rune, utf8.RuneCount(s))
- i := 0
- for len(s) > 0 {
- r, l := utf8.DecodeRune(s)
- t[i] = r
- i++
- s = s[l:]
- }
- return t
-}
-
-// Replace returns a copy of the slice s with the first n
-// non-overlapping instances of old replaced by new.
-// If old is empty, it matches at the beginning of the slice
-// and after each UTF-8 sequence, yielding up to k+1 replacements
-// for a k-rune slice.
-// If n < 0, there is no limit on the number of replacements.
-func Replace(s, old, new []byte, n int) []byte {
- m := 0
- if n != 0 {
- // Compute number of replacements.
- m = Count(s, old)
- }
- if m == 0 {
- // Just return a copy.
- return append([]byte(nil), s...)
- }
- if n < 0 || m < n {
- n = m
- }
-
- // Apply replacements to buffer.
- t := make([]byte, len(s)+n*(len(new)-len(old)))
- w := 0
- start := 0
- for i := 0; i < n; i++ {
- j := start
- if len(old) == 0 {
- if i > 0 {
- _, wid := utf8.DecodeRune(s[start:])
- j += wid
- }
- } else {
- j += Index(s[start:], old)
- }
- w += copy(t[w:], s[start:j])
- w += copy(t[w:], new)
- start = j + len(old)
- }
- w += copy(t[w:], s[start:])
- return t[0:w]
-}
-
-// ReplaceAll returns a copy of the slice s with all
-// non-overlapping instances of old replaced by new.
-// If old is empty, it matches at the beginning of the slice
-// and after each UTF-8 sequence, yielding up to k+1 replacements
-// for a k-rune slice.
-func ReplaceAll(s, old, new []byte) []byte {
- return Replace(s, old, new, -1)
-}
-
-// EqualFold reports whether s and t, interpreted as UTF-8 strings,
-// are equal under Unicode case-folding, which is a more general
-// form of case-insensitivity.
-func EqualFold(s, t []byte) bool {
- for len(s) != 0 && len(t) != 0 {
- // Extract first rune from each.
- var sr, tr rune
- if s[0] < utf8.RuneSelf {
- sr, s = rune(s[0]), s[1:]
- } else {
- r, size := utf8.DecodeRune(s)
- sr, s = r, s[size:]
- }
- if t[0] < utf8.RuneSelf {
- tr, t = rune(t[0]), t[1:]
- } else {
- r, size := utf8.DecodeRune(t)
- tr, t = r, t[size:]
- }
-
- // If they match, keep going; if not, return false.
-
- // Easy case.
- if tr == sr {
- continue
- }
-
- // Make sr < tr to simplify what follows.
- if tr < sr {
- tr, sr = sr, tr
- }
- // Fast check for ASCII.
- if tr < utf8.RuneSelf {
- // ASCII only, sr/tr must be upper/lower case
- if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
- continue
- }
- return false
- }
-
- // General case. SimpleFold(x) returns the next equivalent rune > x
- // or wraps around to smaller values.
- r := unicode.SimpleFold(sr)
- for r != sr && r < tr {
- r = unicode.SimpleFold(r)
- }
- if r == tr {
- continue
- }
- return false
- }
-
- // One string is empty. Are both?
- return len(s) == len(t)
-}
-
-// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
-func Index(s, sep []byte) int {
- n := len(sep)
- switch {
- case n == 0:
- return 0
- case n == 1:
- return IndexByte(s, sep[0])
- case n == len(s):
- if Equal(sep, s) {
- return 0
- }
- return -1
- case n > len(s):
- return -1
- case n <= bytealg.MaxLen:
- // Use brute force when s and sep both are small
- if len(s) <= bytealg.MaxBruteForce {
- return bytealg.Index(s, sep)
- }
- c0 := sep[0]
- c1 := sep[1]
- i := 0
- t := len(s) - n + 1
- fails := 0
- for i < t {
- if s[i] != c0 {
- // IndexByte is faster than bytealg.Index, so use it as long as
- // we're not getting lots of false positives.
- o := IndexByte(s[i+1:t], c0)
- if o < 0 {
- return -1
- }
- i += o + 1
- }
- if s[i+1] == c1 && Equal(s[i:i+n], sep) {
- return i
- }
- fails++
- i++
- // Switch to bytealg.Index when IndexByte produces too many false positives.
- if fails > bytealg.Cutover(i) {
- r := bytealg.Index(s[i:], sep)
- if r >= 0 {
- return r + i
- }
- return -1
- }
- }
- return -1
- }
- c0 := sep[0]
- c1 := sep[1]
- i := 0
- fails := 0
- t := len(s) - n + 1
- for i < t {
- if s[i] != c0 {
- o := IndexByte(s[i+1:t], c0)
- if o < 0 {
- break
- }
- i += o + 1
- }
- if s[i+1] == c1 && Equal(s[i:i+n], sep) {
- return i
- }
- i++
- fails++
- if fails >= 4+i>>4 && i < t {
- // Give up on IndexByte, it isn't skipping ahead
- // far enough to be better than Rabin-Karp.
- // Experiments (using IndexPeriodic) suggest
- // the cutover is about 16 byte skips.
- // TODO: if large prefixes of sep are matching
- // we should cutover at even larger average skips,
- // because Equal becomes that much more expensive.
- // This code does not take that effect into account.
- j := bytealg.IndexRabinKarpBytes(s[i:], sep)
- if j < 0 {
- return -1
- }
- return i + j
- }
- }
- return -1
-}
-
-// Cut slices s around the first instance of sep,
-// returning the text before and after sep.
-// The found result reports whether sep appears in s.
-// If sep does not appear in s, cut returns s, nil, false.
-//
-// Cut returns slices of the original slice s, not copies.
-func Cut(s, sep []byte) (before, after []byte, found bool) {
- if i := Index(s, sep); i >= 0 {
- return s[:i], s[i+len(sep):], true
- }
- return s, nil, false
-}
diff --git a/contrib/go/_std_1.18/src/bytes/reader.go b/contrib/go/_std_1.18/src/bytes/reader.go
deleted file mode 100644
index 5946cf9780..0000000000
--- a/contrib/go/_std_1.18/src/bytes/reader.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bytes
-
-import (
- "errors"
- "io"
- "unicode/utf8"
-)
-
-// A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
-// io.ByteScanner, and io.RuneScanner interfaces by reading from
-// a byte slice.
-// Unlike a Buffer, a Reader is read-only and supports seeking.
-// The zero value for Reader operates like a Reader of an empty slice.
-type Reader struct {
- s []byte
- i int64 // current reading index
- prevRune int // index of previous rune; or < 0
-}
-
-// Len returns the number of bytes of the unread portion of the
-// slice.
-func (r *Reader) Len() int {
- if r.i >= int64(len(r.s)) {
- return 0
- }
- return int(int64(len(r.s)) - r.i)
-}
-
-// Size returns the original length of the underlying byte slice.
-// Size is the number of bytes available for reading via ReadAt.
-// The returned value is always the same and is not affected by calls
-// to any other method.
-func (r *Reader) Size() int64 { return int64(len(r.s)) }
-
-// Read implements the io.Reader interface.
-func (r *Reader) Read(b []byte) (n int, err error) {
- if r.i >= int64(len(r.s)) {
- return 0, io.EOF
- }
- r.prevRune = -1
- n = copy(b, r.s[r.i:])
- r.i += int64(n)
- return
-}
-
-// ReadAt implements the io.ReaderAt interface.
-func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
- // cannot modify state - see io.ReaderAt
- if off < 0 {
- return 0, errors.New("bytes.Reader.ReadAt: negative offset")
- }
- if off >= int64(len(r.s)) {
- return 0, io.EOF
- }
- n = copy(b, r.s[off:])
- if n < len(b) {
- err = io.EOF
- }
- return
-}
-
-// ReadByte implements the io.ByteReader interface.
-func (r *Reader) ReadByte() (byte, error) {
- r.prevRune = -1
- if r.i >= int64(len(r.s)) {
- return 0, io.EOF
- }
- b := r.s[r.i]
- r.i++
- return b, nil
-}
-
-// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
-func (r *Reader) UnreadByte() error {
- if r.i <= 0 {
- return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
- }
- r.prevRune = -1
- r.i--
- return nil
-}
-
-// ReadRune implements the io.RuneReader interface.
-func (r *Reader) ReadRune() (ch rune, size int, err error) {
- if r.i >= int64(len(r.s)) {
- r.prevRune = -1
- return 0, 0, io.EOF
- }
- r.prevRune = int(r.i)
- if c := r.s[r.i]; c < utf8.RuneSelf {
- r.i++
- return rune(c), 1, nil
- }
- ch, size = utf8.DecodeRune(r.s[r.i:])
- r.i += int64(size)
- return
-}
-
-// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
-func (r *Reader) UnreadRune() error {
- if r.i <= 0 {
- return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
- }
- if r.prevRune < 0 {
- return errors.New("bytes.Reader.UnreadRune: previous operation was not ReadRune")
- }
- r.i = int64(r.prevRune)
- r.prevRune = -1
- return nil
-}
-
-// Seek implements the io.Seeker interface.
-func (r *Reader) Seek(offset int64, whence int) (int64, error) {
- r.prevRune = -1
- var abs int64
- switch whence {
- case io.SeekStart:
- abs = offset
- case io.SeekCurrent:
- abs = r.i + offset
- case io.SeekEnd:
- abs = int64(len(r.s)) + offset
- default:
- return 0, errors.New("bytes.Reader.Seek: invalid whence")
- }
- if abs < 0 {
- return 0, errors.New("bytes.Reader.Seek: negative position")
- }
- r.i = abs
- return abs, nil
-}
-
-// WriteTo implements the io.WriterTo interface.
-func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
- r.prevRune = -1
- if r.i >= int64(len(r.s)) {
- return 0, nil
- }
- b := r.s[r.i:]
- m, err := w.Write(b)
- if m > len(b) {
- panic("bytes.Reader.WriteTo: invalid Write count")
- }
- r.i += int64(m)
- n = int64(m)
- if m != len(b) && err == nil {
- err = io.ErrShortWrite
- }
- return
-}
-
-// Reset resets the Reader to be reading from b.
-func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
-
-// NewReader returns a new Reader reading from b.
-func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }
diff --git a/contrib/go/_std_1.18/src/compress/flate/deflate.go b/contrib/go/_std_1.18/src/compress/flate/deflate.go
deleted file mode 100644
index 550032176d..0000000000
--- a/contrib/go/_std_1.18/src/compress/flate/deflate.go
+++ /dev/null
@@ -1,748 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "fmt"
- "io"
- "math"
-)
-
-const (
- NoCompression = 0
- BestSpeed = 1
- BestCompression = 9
- DefaultCompression = -1
-
- // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
- // entropy encoding. This mode is useful in compressing data that has
- // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
- // that lacks an entropy encoder. Compression gains are achieved when
- // certain bytes in the input stream occur more frequently than others.
- //
- // Note that HuffmanOnly produces a compressed output that is
- // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
- // continue to be able to decompress this output.
- HuffmanOnly = -2
-)
-
-const (
- logWindowSize = 15
- windowSize = 1 << logWindowSize
- windowMask = windowSize - 1
-
- // The LZ77 step produces a sequence of literal tokens and <length, offset>
- // pair tokens. The offset is also known as distance. The underlying wire
- // format limits the range of lengths and offsets. For example, there are
- // 256 legitimate lengths: those in the range [3, 258]. This package's
- // compressor uses a higher minimum match length, enabling optimizations
- // such as finding matches via 32-bit loads and compares.
- baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
- minMatchLength = 4 // The smallest match length that the compressor actually emits
- maxMatchLength = 258 // The largest match length
- baseMatchOffset = 1 // The smallest match offset
- maxMatchOffset = 1 << 15 // The largest match offset
-
- // The maximum number of tokens we put into a single flate block, just to
- // stop things from getting too large.
- maxFlateBlockTokens = 1 << 14
- maxStoreBlockSize = 65535
- hashBits = 17 // After 17 performance degrades
- hashSize = 1 << hashBits
- hashMask = (1 << hashBits) - 1
- maxHashOffset = 1 << 24
-
- skipNever = math.MaxInt32
-)
-
-type compressionLevel struct {
- level, good, lazy, nice, chain, fastSkipHashing int
-}
-
-var levels = []compressionLevel{
- {0, 0, 0, 0, 0, 0}, // NoCompression.
- {1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go.
- // For levels 2-3 we don't bother trying with lazy matches.
- {2, 4, 0, 16, 8, 5},
- {3, 4, 0, 32, 32, 6},
- // Levels 4-9 use increasingly more lazy matching
- // and increasingly stringent conditions for "good enough".
- {4, 4, 4, 16, 16, skipNever},
- {5, 8, 16, 32, 32, skipNever},
- {6, 8, 16, 128, 128, skipNever},
- {7, 8, 32, 128, 256, skipNever},
- {8, 32, 128, 258, 1024, skipNever},
- {9, 32, 258, 258, 4096, skipNever},
-}
-
-type compressor struct {
- compressionLevel
-
- w *huffmanBitWriter
- bulkHasher func([]byte, []uint32)
-
- // compression algorithm
- fill func(*compressor, []byte) int // copy data to window
- step func(*compressor) // process window
- sync bool // requesting flush
- bestSpeed *deflateFast // Encoder for BestSpeed
-
- // Input hash chains
- // hashHead[hashValue] contains the largest inputIndex with the specified hash value
- // If hashHead[hashValue] is within the current window, then
- // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
- // with the same hash value.
- chainHead int
- hashHead [hashSize]uint32
- hashPrev [windowSize]uint32
- hashOffset int
-
- // input window: unprocessed data is window[index:windowEnd]
- index int
- window []byte
- windowEnd int
- blockStart int // window index where current tokens start
- byteAvailable bool // if true, still need to process window[index-1].
-
- // queued output tokens
- tokens []token
-
- // deflate state
- length int
- offset int
- hash uint32
- maxInsertIndex int
- err error
-
- // hashMatch must be able to contain hashes for the maximum match length.
- hashMatch [maxMatchLength - 1]uint32
-}
-
-func (d *compressor) fillDeflate(b []byte) int {
- if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
- // shift the window by windowSize
- copy(d.window, d.window[windowSize:2*windowSize])
- d.index -= windowSize
- d.windowEnd -= windowSize
- if d.blockStart >= windowSize {
- d.blockStart -= windowSize
- } else {
- d.blockStart = math.MaxInt32
- }
- d.hashOffset += windowSize
- if d.hashOffset > maxHashOffset {
- delta := d.hashOffset - 1
- d.hashOffset -= delta
- d.chainHead -= delta
-
- // Iterate over slices instead of arrays to avoid copying
- // the entire table onto the stack (Issue #18625).
- for i, v := range d.hashPrev[:] {
- if int(v) > delta {
- d.hashPrev[i] = uint32(int(v) - delta)
- } else {
- d.hashPrev[i] = 0
- }
- }
- for i, v := range d.hashHead[:] {
- if int(v) > delta {
- d.hashHead[i] = uint32(int(v) - delta)
- } else {
- d.hashHead[i] = 0
- }
- }
- }
- }
- n := copy(d.window[d.windowEnd:], b)
- d.windowEnd += n
- return n
-}
-
-func (d *compressor) writeBlock(tokens []token, index int) error {
- if index > 0 {
- var window []byte
- if d.blockStart <= index {
- window = d.window[d.blockStart:index]
- }
- d.blockStart = index
- d.w.writeBlock(tokens, false, window)
- return d.w.err
- }
- return nil
-}
-
-// fillWindow will fill the current window with the supplied
-// dictionary and calculate all hashes.
-// This is much faster than doing a full encode.
-// Should only be used after a reset.
-func (d *compressor) fillWindow(b []byte) {
- // Do not fill window if we are in store-only mode.
- if d.compressionLevel.level < 2 {
- return
- }
- if d.index != 0 || d.windowEnd != 0 {
- panic("internal error: fillWindow called with stale data")
- }
-
- // If we are given too much, cut it.
- if len(b) > windowSize {
- b = b[len(b)-windowSize:]
- }
- // Add all to window.
- n := copy(d.window, b)
-
- // Calculate 256 hashes at the time (more L1 cache hits)
- loops := (n + 256 - minMatchLength) / 256
- for j := 0; j < loops; j++ {
- index := j * 256
- end := index + 256 + minMatchLength - 1
- if end > n {
- end = n
- }
- toCheck := d.window[index:end]
- dstSize := len(toCheck) - minMatchLength + 1
-
- if dstSize <= 0 {
- continue
- }
-
- dst := d.hashMatch[:dstSize]
- d.bulkHasher(toCheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + index
- newH = val
- hh := &d.hashHead[newH&hashMask]
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- d.hashPrev[di&windowMask] = *hh
- // Set the head of the hash chain to us.
- *hh = uint32(di + d.hashOffset)
- }
- d.hash = newH
- }
- // Update window information.
- d.windowEnd = n
- d.index = n
-}
-
-// Try to find a match starting at index whose length is greater than prevSize.
-// We only look at chainCount possibilities before giving up.
-func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
- minMatchLook := maxMatchLength
- if lookahead < minMatchLook {
- minMatchLook = lookahead
- }
-
- win := d.window[0 : pos+minMatchLook]
-
- // We quit when we get a match that's at least nice long
- nice := len(win) - pos
- if d.nice < nice {
- nice = d.nice
- }
-
- // If we've got a match that's good enough, only look in 1/4 the chain.
- tries := d.chain
- length = prevLength
- if length >= d.good {
- tries >>= 2
- }
-
- wEnd := win[pos+length]
- wPos := win[pos:]
- minIndex := pos - windowSize
-
- for i := prevHead; tries > 0; tries-- {
- if wEnd == win[i+length] {
- n := matchLen(win[i:], wPos, minMatchLook)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
- }
- wEnd = win[pos+n]
- }
- }
- if i == minIndex {
- // hashPrev[i & windowMask] has already been overwritten, so stop now.
- break
- }
- i = int(d.hashPrev[i&windowMask]) - d.hashOffset
- if i < minIndex || i < 0 {
- break
- }
- }
- return
-}
-
-func (d *compressor) writeStoredBlock(buf []byte) error {
- if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
- return d.w.err
- }
- d.w.writeBytes(buf)
- return d.w.err
-}
-
-const hashmul = 0x1e35a7bd
-
-// hash4 returns a hash representation of the first 4 bytes
-// of the supplied slice.
-// The caller must ensure that len(b) >= 4.
-func hash4(b []byte) uint32 {
- return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
-}
-
-// bulkHash4 will compute hashes using the same
-// algorithm as hash4
-func bulkHash4(b []byte, dst []uint32) {
- if len(b) < minMatchLength {
- return
- }
- hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- dst[0] = (hb * hashmul) >> (32 - hashBits)
- end := len(b) - minMatchLength + 1
- for i := 1; i < end; i++ {
- hb = (hb << 8) | uint32(b[i+3])
- dst[i] = (hb * hashmul) >> (32 - hashBits)
- }
-}
-
-// matchLen returns the number of matching bytes in a and b
-// up to length 'max'. Both slices must be at least 'max'
-// bytes in size.
-func matchLen(a, b []byte, max int) int {
- a = a[:max]
- b = b[:len(a)]
- for i, av := range a {
- if b[i] != av {
- return i
- }
- }
- return max
-}
-
-// encSpeed will compress and store the currently added data,
-// if enough has been accumulated or we at the end of the stream.
-// Any error that occurred will be in d.err
-func (d *compressor) encSpeed() {
- // We only compress if we have maxStoreBlockSize.
- if d.windowEnd < maxStoreBlockSize {
- if !d.sync {
- return
- }
-
- // Handle small sizes.
- if d.windowEnd < 128 {
- switch {
- case d.windowEnd == 0:
- return
- case d.windowEnd <= 16:
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- default:
- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
- d.err = d.w.err
- }
- d.windowEnd = 0
- d.bestSpeed.reset()
- return
- }
-
- }
- // Encode the block.
- d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd])
-
- // If we removed less than 1/16th, Huffman compress the block.
- if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
- } else {
- d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
- }
- d.err = d.w.err
- d.windowEnd = 0
-}
-
-func (d *compressor) initDeflate() {
- d.window = make([]byte, 2*windowSize)
- d.hashOffset = 1
- d.tokens = make([]token, 0, maxFlateBlockTokens+1)
- d.length = minMatchLength - 1
- d.offset = 0
- d.byteAvailable = false
- d.index = 0
- d.hash = 0
- d.chainHead = -1
- d.bulkHasher = bulkHash4
-}
-
-func (d *compressor) deflate() {
- if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
- return
- }
-
- d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if d.index < d.maxInsertIndex {
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
- }
-
-Loop:
- for {
- if d.index > d.windowEnd {
- panic("index > windowEnd")
- }
- lookahead := d.windowEnd - d.index
- if lookahead < minMatchLength+maxMatchLength {
- if !d.sync {
- break Loop
- }
- if d.index > d.windowEnd {
- panic("index > windowEnd")
- }
- if lookahead == 0 {
- // Flush current output block if any.
- if d.byteAvailable {
- // There is still one pending token that needs to be flushed
- d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1])))
- d.byteAvailable = false
- }
- if len(d.tokens) > 0 {
- if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
- return
- }
- d.tokens = d.tokens[:0]
- }
- break Loop
- }
- }
- if d.index < d.maxInsertIndex {
- // Update the hash
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
- hh := &d.hashHead[d.hash&hashMask]
- d.chainHead = int(*hh)
- d.hashPrev[d.index&windowMask] = uint32(d.chainHead)
- *hh = uint32(d.index + d.hashOffset)
- }
- prevLength := d.length
- prevOffset := d.offset
- d.length = minMatchLength - 1
- d.offset = 0
- minIndex := d.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
-
- if d.chainHead-d.hashOffset >= minIndex &&
- (d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
- d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) {
- if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
- d.length = newLength
- d.offset = newOffset
- }
- }
- if d.fastSkipHashing != skipNever && d.length >= minMatchLength ||
- d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength {
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
- if d.fastSkipHashing != skipNever {
- d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset)))
- } else {
- d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset)))
- }
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
- // lookahead, the last two strings are not inserted into the hash
- // table.
- if d.length <= d.fastSkipHashing {
- var newIndex int
- if d.fastSkipHashing != skipNever {
- newIndex = d.index + d.length
- } else {
- newIndex = d.index + prevLength - 1
- }
- index := d.index
- for index++; index < newIndex; index++ {
- if index < d.maxInsertIndex {
- d.hash = hash4(d.window[index : index+minMatchLength])
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- hh := &d.hashHead[d.hash&hashMask]
- d.hashPrev[index&windowMask] = *hh
- // Set the head of the hash chain to us.
- *hh = uint32(index + d.hashOffset)
- }
- }
- d.index = index
-
- if d.fastSkipHashing == skipNever {
- d.byteAvailable = false
- d.length = minMatchLength - 1
- }
- } else {
- // For matches this long, we don't bother inserting each individual
- // item into the table.
- d.index += d.length
- if d.index < d.maxInsertIndex {
- d.hash = hash4(d.window[d.index : d.index+minMatchLength])
- }
- }
- if len(d.tokens) == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
- return
- }
- d.tokens = d.tokens[:0]
- }
- } else {
- if d.fastSkipHashing != skipNever || d.byteAvailable {
- i := d.index - 1
- if d.fastSkipHashing != skipNever {
- i = d.index
- }
- d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
- if len(d.tokens) == maxFlateBlockTokens {
- if d.err = d.writeBlock(d.tokens, i+1); d.err != nil {
- return
- }
- d.tokens = d.tokens[:0]
- }
- }
- d.index++
- if d.fastSkipHashing == skipNever {
- d.byteAvailable = true
- }
- }
- }
-}
-
-func (d *compressor) fillStore(b []byte) int {
- n := copy(d.window[d.windowEnd:], b)
- d.windowEnd += n
- return n
-}
-
-func (d *compressor) store() {
- if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- d.windowEnd = 0
- }
-}
-
-// storeHuff compresses and stores the currently added data
-// when the d.window is full or we are at the end of the stream.
-// Any error that occurred will be in d.err
-func (d *compressor) storeHuff() {
- if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
- return
- }
- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
- d.err = d.w.err
- d.windowEnd = 0
-}
-
-func (d *compressor) write(b []byte) (n int, err error) {
- if d.err != nil {
- return 0, d.err
- }
- n = len(b)
- for len(b) > 0 {
- d.step(d)
- b = b[d.fill(d, b):]
- if d.err != nil {
- return 0, d.err
- }
- }
- return n, nil
-}
-
-func (d *compressor) syncFlush() error {
- if d.err != nil {
- return d.err
- }
- d.sync = true
- d.step(d)
- if d.err == nil {
- d.w.writeStoredHeader(0, false)
- d.w.flush()
- d.err = d.w.err
- }
- d.sync = false
- return d.err
-}
-
-func (d *compressor) init(w io.Writer, level int) (err error) {
- d.w = newHuffmanBitWriter(w)
-
- switch {
- case level == NoCompression:
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillStore
- d.step = (*compressor).store
- case level == HuffmanOnly:
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillStore
- d.step = (*compressor).storeHuff
- case level == BestSpeed:
- d.compressionLevel = levels[level]
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillStore
- d.step = (*compressor).encSpeed
- d.bestSpeed = newDeflateFast()
- d.tokens = make([]token, maxStoreBlockSize)
- case level == DefaultCompression:
- level = 6
- fallthrough
- case 2 <= level && level <= 9:
- d.compressionLevel = levels[level]
- d.initDeflate()
- d.fill = (*compressor).fillDeflate
- d.step = (*compressor).deflate
- default:
- return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
- }
- return nil
-}
-
-func (d *compressor) reset(w io.Writer) {
- d.w.reset(w)
- d.sync = false
- d.err = nil
- switch d.compressionLevel.level {
- case NoCompression:
- d.windowEnd = 0
- case BestSpeed:
- d.windowEnd = 0
- d.tokens = d.tokens[:0]
- d.bestSpeed.reset()
- default:
- d.chainHead = -1
- for i := range d.hashHead {
- d.hashHead[i] = 0
- }
- for i := range d.hashPrev {
- d.hashPrev[i] = 0
- }
- d.hashOffset = 1
- d.index, d.windowEnd = 0, 0
- d.blockStart, d.byteAvailable = 0, false
- d.tokens = d.tokens[:0]
- d.length = minMatchLength - 1
- d.offset = 0
- d.hash = 0
- d.maxInsertIndex = 0
- }
-}
-
-func (d *compressor) close() error {
- if d.err != nil {
- return d.err
- }
- d.sync = true
- d.step(d)
- if d.err != nil {
- return d.err
- }
- if d.w.writeStoredHeader(0, true); d.w.err != nil {
- return d.w.err
- }
- d.w.flush()
- return d.w.err
-}
-
-// NewWriter returns a new Writer compressing data at the given level.
-// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
-// higher levels typically run slower but compress more. Level 0
-// (NoCompression) does not attempt any compression; it only adds the
-// necessary DEFLATE framing.
-// Level -1 (DefaultCompression) uses the default compression level.
-// Level -2 (HuffmanOnly) will use Huffman compression only, giving
-// a very fast compression for all types of input, but sacrificing considerable
-// compression efficiency.
-//
-// If level is in the range [-2, 9] then the error returned will be nil.
-// Otherwise the error returned will be non-nil.
-func NewWriter(w io.Writer, level int) (*Writer, error) {
- var dw Writer
- if err := dw.d.init(w, level); err != nil {
- return nil, err
- }
- return &dw, nil
-}
-
-// NewWriterDict is like NewWriter but initializes the new
-// Writer with a preset dictionary. The returned Writer behaves
-// as if the dictionary had been written to it without producing
-// any compressed output. The compressed data written to w
-// can only be decompressed by a Reader initialized with the
-// same dictionary.
-func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
- dw := &dictWriter{w}
- zw, err := NewWriter(dw, level)
- if err != nil {
- return nil, err
- }
- zw.d.fillWindow(dict)
- zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
- return zw, err
-}
-
-type dictWriter struct {
- w io.Writer
-}
-
-func (w *dictWriter) Write(b []byte) (n int, err error) {
- return w.w.Write(b)
-}
-
-// A Writer takes data written to it and writes the compressed
-// form of that data to an underlying writer (see NewWriter).
-type Writer struct {
- d compressor
- dict []byte
-}
-
-// Write writes data to w, which will eventually write the
-// compressed form of data to its underlying writer.
-func (w *Writer) Write(data []byte) (n int, err error) {
- return w.d.write(data)
-}
-
-// Flush flushes any pending data to the underlying writer.
-// It is useful mainly in compressed network protocols, to ensure that
-// a remote reader has enough data to reconstruct a packet.
-// Flush does not return until the data has been written.
-// Calling Flush when there is no pending data still causes the Writer
-// to emit a sync marker of at least 4 bytes.
-// If the underlying writer returns an error, Flush returns that error.
-//
-// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
-func (w *Writer) Flush() error {
- // For more about flushing:
- // https://www.bolet.org/~pornin/deflate-flush.html
- return w.d.syncFlush()
-}
-
-// Close flushes and closes the writer.
-func (w *Writer) Close() error {
- return w.d.close()
-}
-
-// Reset discards the writer's state and makes it equivalent to
-// the result of NewWriter or NewWriterDict called with dst
-// and w's level and dictionary.
-func (w *Writer) Reset(dst io.Writer) {
- if dw, ok := w.d.w.writer.(*dictWriter); ok {
- // w was created with NewWriterDict
- dw.w = dst
- w.d.reset(dw)
- w.d.fillWindow(w.dict)
- } else {
- // w was created with NewWriter
- w.d.reset(dst)
- }
-}
diff --git a/contrib/go/_std_1.18/src/compress/flate/dict_decoder.go b/contrib/go/_std_1.18/src/compress/flate/dict_decoder.go
deleted file mode 100644
index 3b59d48351..0000000000
--- a/contrib/go/_std_1.18/src/compress/flate/dict_decoder.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
-// LZ77 decompresses data through sequences of two forms of commands:
-//
-// * Literal insertions: Runs of one or more symbols are inserted into the data
-// stream as is. This is accomplished through the writeByte method for a
-// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
-// Any valid stream must start with a literal insertion if no preset dictionary
-// is used.
-//
-// * Backward copies: Runs of one or more symbols are copied from previously
-// emitted data. Backward copies come as the tuple (dist, length) where dist
-// determines how far back in the stream to copy from and length determines how
-// many bytes to copy. Note that it is valid for the length to be greater than
-// the distance. Since LZ77 uses forward copies, that situation is used to
-// perform a form of run-length encoding on repeated runs of symbols.
-// The writeCopy and tryWriteCopy are used to implement this command.
-//
-// For performance reasons, this implementation performs little to no sanity
-// checks about the arguments. As such, the invariants documented for each
-// method call must be respected.
-type dictDecoder struct {
- hist []byte // Sliding window history
-
- // Invariant: 0 <= rdPos <= wrPos <= len(hist)
- wrPos int // Current output position in buffer
- rdPos int // Have emitted hist[:rdPos] already
- full bool // Has a full window length been written yet?
-}
-
-// init initializes dictDecoder to have a sliding window dictionary of the given
-// size. If a preset dict is provided, it will initialize the dictionary with
-// the contents of dict.
-func (dd *dictDecoder) init(size int, dict []byte) {
- *dd = dictDecoder{hist: dd.hist}
-
- if cap(dd.hist) < size {
- dd.hist = make([]byte, size)
- }
- dd.hist = dd.hist[:size]
-
- if len(dict) > len(dd.hist) {
- dict = dict[len(dict)-len(dd.hist):]
- }
- dd.wrPos = copy(dd.hist, dict)
- if dd.wrPos == len(dd.hist) {
- dd.wrPos = 0
- dd.full = true
- }
- dd.rdPos = dd.wrPos
-}
-
-// histSize reports the total amount of historical data in the dictionary.
-func (dd *dictDecoder) histSize() int {
- if dd.full {
- return len(dd.hist)
- }
- return dd.wrPos
-}
-
-// availRead reports the number of bytes that can be flushed by readFlush.
-func (dd *dictDecoder) availRead() int {
- return dd.wrPos - dd.rdPos
-}
-
-// availWrite reports the available amount of output buffer space.
-func (dd *dictDecoder) availWrite() int {
- return len(dd.hist) - dd.wrPos
-}
-
-// writeSlice returns a slice of the available buffer to write data to.
-//
-// This invariant will be kept: len(s) <= availWrite()
-func (dd *dictDecoder) writeSlice() []byte {
- return dd.hist[dd.wrPos:]
-}
-
-// writeMark advances the writer pointer by cnt.
-//
-// This invariant must be kept: 0 <= cnt <= availWrite()
-func (dd *dictDecoder) writeMark(cnt int) {
- dd.wrPos += cnt
-}
-
-// writeByte writes a single byte to the dictionary.
-//
-// This invariant must be kept: 0 < availWrite()
-func (dd *dictDecoder) writeByte(c byte) {
- dd.hist[dd.wrPos] = c
- dd.wrPos++
-}
-
-// writeCopy copies a string at a given (dist, length) to the output.
-// This returns the number of bytes copied and may be less than the requested
-// length if the available space in the output buffer is too small.
-//
-// This invariant must be kept: 0 < dist <= histSize()
-func (dd *dictDecoder) writeCopy(dist, length int) int {
- dstBase := dd.wrPos
- dstPos := dstBase
- srcPos := dstPos - dist
- endPos := dstPos + length
- if endPos > len(dd.hist) {
- endPos = len(dd.hist)
- }
-
- // Copy non-overlapping section after destination position.
- //
- // This section is non-overlapping in that the copy length for this section
- // is always less than or equal to the backwards distance. This can occur
- // if a distance refers to data that wraps-around in the buffer.
- // Thus, a backwards copy is performed here; that is, the exact bytes in
- // the source prior to the copy is placed in the destination.
- if srcPos < 0 {
- srcPos += len(dd.hist)
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
- srcPos = 0
- }
-
- // Copy possibly overlapping section before destination position.
- //
- // This section can overlap if the copy length for this section is larger
- // than the backwards distance. This is allowed by LZ77 so that repeated
- // strings can be succinctly represented using (dist, length) pairs.
- // Thus, a forwards copy is performed here; that is, the bytes copied is
- // possibly dependent on the resulting bytes in the destination as the copy
- // progresses along. This is functionally equivalent to the following:
- //
- // for i := 0; i < endPos-dstPos; i++ {
- // dd.hist[dstPos+i] = dd.hist[srcPos+i]
- // }
- // dstPos = endPos
- //
- for dstPos < endPos {
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
- }
-
- dd.wrPos = dstPos
- return dstPos - dstBase
-}
-
-// tryWriteCopy tries to copy a string at a given (distance, length) to the
-// output. This specialized version is optimized for short distances.
-//
-// This method is designed to be inlined for performance reasons.
-//
-// This invariant must be kept: 0 < dist <= histSize()
-func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
- dstPos := dd.wrPos
- endPos := dstPos + length
- if dstPos < dist || endPos > len(dd.hist) {
- return 0
- }
- dstBase := dstPos
- srcPos := dstPos - dist
-
- // Copy possibly overlapping section before destination position.
- for dstPos < endPos {
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
- }
-
- dd.wrPos = dstPos
- return dstPos - dstBase
-}
-
-// readFlush returns a slice of the historical buffer that is ready to be
-// emitted to the user. The data returned by readFlush must be fully consumed
-// before calling any other dictDecoder methods.
-func (dd *dictDecoder) readFlush() []byte {
- toRead := dd.hist[dd.rdPos:dd.wrPos]
- dd.rdPos = dd.wrPos
- if dd.wrPos == len(dd.hist) {
- dd.wrPos, dd.rdPos = 0, 0
- dd.full = true
- }
- return toRead
-}
diff --git a/contrib/go/_std_1.18/src/compress/flate/huffman_bit_writer.go b/contrib/go/_std_1.18/src/compress/flate/huffman_bit_writer.go
deleted file mode 100644
index b3ae76d082..0000000000
--- a/contrib/go/_std_1.18/src/compress/flate/huffman_bit_writer.go
+++ /dev/null
@@ -1,704 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "io"
-)
-
-const (
- // The largest offset code.
- offsetCodeCount = 30
-
- // The special code used to mark the end of a block.
- endBlockMarker = 256
-
- // The first length code.
- lengthCodesStart = 257
-
- // The number of codegen codes.
- codegenCodeCount = 19
- badCode = 255
-
- // bufferFlushSize indicates the buffer size
- // after which bytes are flushed to the writer.
- // Should preferably be a multiple of 6, since
- // we accumulate 6 bytes between writes to the buffer.
- bufferFlushSize = 240
-
- // bufferSize is the actual output byte buffer size.
- // It must have additional headroom for a flush
- // which can contain up to 8 bytes.
- bufferSize = bufferFlushSize + 8
-)
-
-// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = []int8{
- /* 257 */ 0, 0, 0,
- /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
- /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
- /* 280 */ 4, 5, 5, 5, 5, 0,
-}
-
-// The length indicated by length code X - LENGTH_CODES_START.
-var lengthBase = []uint32{
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
- 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
- 64, 80, 96, 112, 128, 160, 192, 224, 255,
-}
-
-// offset code word extra bits.
-var offsetExtraBits = []int8{
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
- 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
- 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
-}
-
-var offsetBase = []uint32{
- 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
- 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
- 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
- 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
- 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
- 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
-}
-
-// The odd order in which the codegen code sizes are written.
-var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
-
-type huffmanBitWriter struct {
- // writer is the underlying writer.
- // Do not use it directly; use the write method, which ensures
- // that Write errors are sticky.
- writer io.Writer
-
- // Data waiting to be written is bytes[0:nbytes]
- // and then the low nbits of bits. Data is always written
- // sequentially into the bytes array.
- bits uint64
- nbits uint
- bytes [bufferSize]byte
- codegenFreq [codegenCodeCount]int32
- nbytes int
- literalFreq []int32
- offsetFreq []int32
- codegen []uint8
- literalEncoding *huffmanEncoder
- offsetEncoding *huffmanEncoder
- codegenEncoding *huffmanEncoder
- err error
-}
-
-func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
- return &huffmanBitWriter{
- writer: w,
- literalFreq: make([]int32, maxNumLit),
- offsetFreq: make([]int32, offsetCodeCount),
- codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
- literalEncoding: newHuffmanEncoder(maxNumLit),
- codegenEncoding: newHuffmanEncoder(codegenCodeCount),
- offsetEncoding: newHuffmanEncoder(offsetCodeCount),
- }
-}
-
-func (w *huffmanBitWriter) reset(writer io.Writer) {
- w.writer = writer
- w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
-}
-
-func (w *huffmanBitWriter) flush() {
- if w.err != nil {
- w.nbits = 0
- return
- }
- n := w.nbytes
- for w.nbits != 0 {
- w.bytes[n] = byte(w.bits)
- w.bits >>= 8
- if w.nbits > 8 { // Avoid underflow
- w.nbits -= 8
- } else {
- w.nbits = 0
- }
- n++
- }
- w.bits = 0
- w.write(w.bytes[:n])
- w.nbytes = 0
-}
-
-func (w *huffmanBitWriter) write(b []byte) {
- if w.err != nil {
- return
- }
- _, w.err = w.writer.Write(b)
-}
-
-func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
- if w.err != nil {
- return
- }
- w.bits |= uint64(b) << w.nbits
- w.nbits += nb
- if w.nbits >= 48 {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- bytes := w.bytes[n : n+6]
- bytes[0] = byte(bits)
- bytes[1] = byte(bits >> 8)
- bytes[2] = byte(bits >> 16)
- bytes[3] = byte(bits >> 24)
- bytes[4] = byte(bits >> 32)
- bytes[5] = byte(bits >> 40)
- n += 6
- if n >= bufferFlushSize {
- w.write(w.bytes[:n])
- n = 0
- }
- w.nbytes = n
- }
-}
-
-func (w *huffmanBitWriter) writeBytes(bytes []byte) {
- if w.err != nil {
- return
- }
- n := w.nbytes
- if w.nbits&7 != 0 {
- w.err = InternalError("writeBytes with unfinished bits")
- return
- }
- for w.nbits != 0 {
- w.bytes[n] = byte(w.bits)
- w.bits >>= 8
- w.nbits -= 8
- n++
- }
- if n != 0 {
- w.write(w.bytes[:n])
- }
- w.nbytes = 0
- w.write(bytes)
-}
-
-// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
-// the literal and offset lengths arrays (which are concatenated into a single
-// array). This method generates that run-length encoding.
-//
-// The result is written into the codegen array, and the frequencies
-// of each code is written into the codegenFreq array.
-// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
-// information. Code badCode is an end marker
-//
-// numLiterals The number of literals in literalEncoding
-// numOffsets The number of offsets in offsetEncoding
-// litenc, offenc The literal and offset encoder to use
-func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
- for i := range w.codegenFreq {
- w.codegenFreq[i] = 0
- }
- // Note that we are using codegen both as a temporary variable for holding
- // a copy of the frequencies, and as the place where we put the result.
- // This is fine because the output is always shorter than the input used
- // so far.
- codegen := w.codegen // cache
- // Copy the concatenated code sizes to codegen. Put a marker at the end.
- cgnl := codegen[:numLiterals]
- for i := range cgnl {
- cgnl[i] = uint8(litEnc.codes[i].len)
- }
-
- cgnl = codegen[numLiterals : numLiterals+numOffsets]
- for i := range cgnl {
- cgnl[i] = uint8(offEnc.codes[i].len)
- }
- codegen[numLiterals+numOffsets] = badCode
-
- size := codegen[0]
- count := 1
- outIndex := 0
- for inIndex := 1; size != badCode; inIndex++ {
- // INVARIANT: We have seen "count" copies of size that have not yet
- // had output generated for them.
- nextSize := codegen[inIndex]
- if nextSize == size {
- count++
- continue
- }
- // We need to generate codegen indicating "count" of size.
- if size != 0 {
- codegen[outIndex] = size
- outIndex++
- w.codegenFreq[size]++
- count--
- for count >= 3 {
- n := 6
- if n > count {
- n = count
- }
- codegen[outIndex] = 16
- outIndex++
- codegen[outIndex] = uint8(n - 3)
- outIndex++
- w.codegenFreq[16]++
- count -= n
- }
- } else {
- for count >= 11 {
- n := 138
- if n > count {
- n = count
- }
- codegen[outIndex] = 18
- outIndex++
- codegen[outIndex] = uint8(n - 11)
- outIndex++
- w.codegenFreq[18]++
- count -= n
- }
- if count >= 3 {
- // count >= 3 && count <= 10
- codegen[outIndex] = 17
- outIndex++
- codegen[outIndex] = uint8(count - 3)
- outIndex++
- w.codegenFreq[17]++
- count = 0
- }
- }
- count--
- for ; count >= 0; count-- {
- codegen[outIndex] = size
- outIndex++
- w.codegenFreq[size]++
- }
- // Set up invariant for next time through the loop.
- size = nextSize
- count = 1
- }
- // Marker indicating the end of the codegen.
- codegen[outIndex] = badCode
-}
-
-// dynamicSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
- numCodegens = len(w.codegenFreq)
- for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
- numCodegens--
- }
- header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
- w.codegenEncoding.bitLength(w.codegenFreq[:]) +
- int(w.codegenFreq[16])*2 +
- int(w.codegenFreq[17])*3 +
- int(w.codegenFreq[18])*7
- size = header +
- litEnc.bitLength(w.literalFreq) +
- offEnc.bitLength(w.offsetFreq) +
- extraBits
-
- return size, numCodegens
-}
-
-// fixedSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) fixedSize(extraBits int) int {
- return 3 +
- fixedLiteralEncoding.bitLength(w.literalFreq) +
- fixedOffsetEncoding.bitLength(w.offsetFreq) +
- extraBits
-}
-
-// storedSize calculates the stored size, including header.
-// The function returns the size in bits and whether the block
-// fits inside a single block.
-func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
- if in == nil {
- return 0, false
- }
- if len(in) <= maxStoreBlockSize {
- return (len(in) + 5) * 8, true
- }
- return 0, false
-}
-
-func (w *huffmanBitWriter) writeCode(c hcode) {
- if w.err != nil {
- return
- }
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += uint(c.len)
- if w.nbits >= 48 {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
- bytes := w.bytes[n : n+6]
- bytes[0] = byte(bits)
- bytes[1] = byte(bits >> 8)
- bytes[2] = byte(bits >> 16)
- bytes[3] = byte(bits >> 24)
- bytes[4] = byte(bits >> 32)
- bytes[5] = byte(bits >> 40)
- n += 6
- if n >= bufferFlushSize {
- w.write(w.bytes[:n])
- n = 0
- }
- w.nbytes = n
- }
-}
-
-// Write the header of a dynamic Huffman block to the output stream.
-//
-// numLiterals The number of literals specified in codegen
-// numOffsets The number of offsets specified in codegen
-// numCodegens The number of codegens used in codegen
-func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
- if w.err != nil {
- return
- }
- var firstBits int32 = 4
- if isEof {
- firstBits = 5
- }
- w.writeBits(firstBits, 3)
- w.writeBits(int32(numLiterals-257), 5)
- w.writeBits(int32(numOffsets-1), 5)
- w.writeBits(int32(numCodegens-4), 4)
-
- for i := 0; i < numCodegens; i++ {
- value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
- w.writeBits(int32(value), 3)
- }
-
- i := 0
- for {
- var codeWord int = int(w.codegen[i])
- i++
- if codeWord == badCode {
- break
- }
- w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
-
- switch codeWord {
- case 16:
- w.writeBits(int32(w.codegen[i]), 2)
- i++
- break
- case 17:
- w.writeBits(int32(w.codegen[i]), 3)
- i++
- break
- case 18:
- w.writeBits(int32(w.codegen[i]), 7)
- i++
- break
- }
- }
-}
-
-func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
- if w.err != nil {
- return
- }
- var flag int32
- if isEof {
- flag = 1
- }
- w.writeBits(flag, 3)
- w.flush()
- w.writeBits(int32(length), 16)
- w.writeBits(int32(^uint16(length)), 16)
-}
-
-func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
- if w.err != nil {
- return
- }
- // Indicate that we are a fixed Huffman block
- var value int32 = 2
- if isEof {
- value = 3
- }
- w.writeBits(value, 3)
-}
-
-// writeBlock will write a block of tokens with the smallest encoding.
-// The original input can be supplied, and if the huffman encoded data
-// is larger than the original bytes, the data will be written as a
-// stored block.
-// If the input is nil, the tokens will always be Huffman encoded.
-func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
- if w.err != nil {
- return
- }
-
- tokens = append(tokens, endBlockMarker)
- numLiterals, numOffsets := w.indexTokens(tokens)
-
- var extraBits int
- storedSize, storable := w.storedSize(input)
- if storable {
- // We only bother calculating the costs of the extra bits required by
- // the length of offset fields (which will be the same for both fixed
- // and dynamic encoding), if we need to compare those two encodings
- // against stored encoding.
- for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
- // First eight length codes have extra size = 0.
- extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
- }
- for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
- // First four offset codes have extra size = 0.
- extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
- }
- }
-
- // Figure out smallest code.
- // Fixed Huffman baseline.
- var literalEncoding = fixedLiteralEncoding
- var offsetEncoding = fixedOffsetEncoding
- var size = w.fixedSize(extraBits)
-
- // Dynamic Huffman?
- var numCodegens int
-
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
-
- if dynamicSize < size {
- size = dynamicSize
- literalEncoding = w.literalEncoding
- offsetEncoding = w.offsetEncoding
- }
-
- // Stored bytes?
- if storable && storedSize < size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- // Huffman.
- if literalEncoding == fixedLiteralEncoding {
- w.writeFixedHeader(eof)
- } else {
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- }
-
- // Write the tokens.
- w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
-}
-
-// writeBlockDynamic encodes a block using a dynamic Huffman table.
-// This should be used if the symbols used have a disproportionate
-// histogram distribution.
-// If input is supplied and the compression savings are below 1/16th of the
-// input size the block is stored.
-func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
- if w.err != nil {
- return
- }
-
- tokens = append(tokens, endBlockMarker)
- numLiterals, numOffsets := w.indexTokens(tokens)
-
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
-
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- // Write Huffman table.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
-
- // Write the tokens.
- w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
-}
-
-// indexTokens indexes a slice of tokens, and updates
-// literalFreq and offsetFreq, and generates literalEncoding
-// and offsetEncoding.
-// The number of literal and offset tokens is returned.
-func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
- for i := range w.literalFreq {
- w.literalFreq[i] = 0
- }
- for i := range w.offsetFreq {
- w.offsetFreq[i] = 0
- }
-
- for _, t := range tokens {
- if t < matchType {
- w.literalFreq[t.literal()]++
- continue
- }
- length := t.length()
- offset := t.offset()
- w.literalFreq[lengthCodesStart+lengthCode(length)]++
- w.offsetFreq[offsetCode(offset)]++
- }
-
- // get the number of literals
- numLiterals = len(w.literalFreq)
- for w.literalFreq[numLiterals-1] == 0 {
- numLiterals--
- }
- // get the number of offsets
- numOffsets = len(w.offsetFreq)
- for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
- numOffsets--
- }
- if numOffsets == 0 {
- // We haven't found a single match. If we want to go with the dynamic encoding,
- // we should count at least one offset to be sure that the offset huffman tree could be encoded.
- w.offsetFreq[0] = 1
- numOffsets = 1
- }
- w.literalEncoding.generate(w.literalFreq, 15)
- w.offsetEncoding.generate(w.offsetFreq, 15)
- return
-}
-
-// writeTokens writes a slice of tokens to the output.
-// codes for literal and offset encoding must be supplied.
-func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
- if w.err != nil {
- return
- }
- for _, t := range tokens {
- if t < matchType {
- w.writeCode(leCodes[t.literal()])
- continue
- }
- // Write the length
- length := t.length()
- lengthCode := lengthCode(length)
- w.writeCode(leCodes[lengthCode+lengthCodesStart])
- extraLengthBits := uint(lengthExtraBits[lengthCode])
- if extraLengthBits > 0 {
- extraLength := int32(length - lengthBase[lengthCode])
- w.writeBits(extraLength, extraLengthBits)
- }
- // Write the offset
- offset := t.offset()
- offsetCode := offsetCode(offset)
- w.writeCode(oeCodes[offsetCode])
- extraOffsetBits := uint(offsetExtraBits[offsetCode])
- if extraOffsetBits > 0 {
- extraOffset := int32(offset - offsetBase[offsetCode])
- w.writeBits(extraOffset, extraOffsetBits)
- }
- }
-}
-
-// huffOffset is a static offset encoder used for huffman only encoding.
-// It can be reused since we will not be encoding offset values.
-var huffOffset *huffmanEncoder
-
-func init() {
- offsetFreq := make([]int32, offsetCodeCount)
- offsetFreq[0] = 1
- huffOffset = newHuffmanEncoder(offsetCodeCount)
- huffOffset.generate(offsetFreq, 15)
-}
-
-// writeBlockHuff encodes a block of bytes as either
-// Huffman encoded literals or uncompressed bytes if the
-// results only gains very little from compression.
-func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
- if w.err != nil {
- return
- }
-
- // Clear histogram
- for i := range w.literalFreq {
- w.literalFreq[i] = 0
- }
-
- // Add everything as literals
- histogram(input, w.literalFreq)
-
- w.literalFreq[endBlockMarker] = 1
-
- const numLiterals = endBlockMarker + 1
- w.offsetFreq[0] = 1
- const numOffsets = 1
-
- w.literalEncoding.generate(w.literalFreq, 15)
-
- // Figure out smallest code.
- // Always use dynamic Huffman or Store
- var numCodegens int
-
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
-
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- // Huffman.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- encoding := w.literalEncoding.codes[:257]
- n := w.nbytes
- for _, t := range input {
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
- w.bits |= uint64(c.code) << w.nbits
- w.nbits += uint(c.len)
- if w.nbits < 48 {
- continue
- }
- // Store 6 bytes
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- bytes := w.bytes[n : n+6]
- bytes[0] = byte(bits)
- bytes[1] = byte(bits >> 8)
- bytes[2] = byte(bits >> 16)
- bytes[3] = byte(bits >> 24)
- bytes[4] = byte(bits >> 32)
- bytes[5] = byte(bits >> 40)
- n += 6
- if n < bufferFlushSize {
- continue
- }
- w.write(w.bytes[:n])
- if w.err != nil {
- return // Return early in the event of write failures
- }
- n = 0
- }
- w.nbytes = n
- w.writeCode(encoding[endBlockMarker])
-}
-
-// histogram accumulates a histogram of b in h.
-//
-// len(h) must be >= 256, and h's elements must be all zeroes.
-func histogram(b []byte, h []int32) {
- h = h[:256]
- for _, t := range b {
- h[t]++
- }
-}
diff --git a/contrib/go/_std_1.18/src/compress/flate/huffman_code.go b/contrib/go/_std_1.18/src/compress/flate/huffman_code.go
deleted file mode 100644
index 891537ed5e..0000000000
--- a/contrib/go/_std_1.18/src/compress/flate/huffman_code.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "math"
- "math/bits"
- "sort"
-)
-
-// hcode is a huffman code with a bit code and bit length.
-type hcode struct {
- code, len uint16
-}
-
-type huffmanEncoder struct {
- codes []hcode
- freqcache []literalNode
- bitCount [17]int32
- lns byLiteral // stored to avoid repeated allocation in generate
- lfs byFreq // stored to avoid repeated allocation in generate
-}
-
-type literalNode struct {
- literal uint16
- freq int32
-}
-
-// A levelInfo describes the state of the constructed tree for a given depth.
-type levelInfo struct {
- // Our level. for better printing
- level int32
-
- // The frequency of the last node at this level
- lastFreq int32
-
- // The frequency of the next character to add to this level
- nextCharFreq int32
-
- // The frequency of the next pair (from level below) to add to this level.
- // Only valid if the "needed" value of the next lower level is 0.
- nextPairFreq int32
-
- // The number of chains remaining to generate for this level before moving
- // up to the next level
- needed int32
-}
-
-// set sets the code and length of an hcode.
-func (h *hcode) set(code uint16, length uint16) {
- h.len = length
- h.code = code
-}
-
-func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
-
-func newHuffmanEncoder(size int) *huffmanEncoder {
- return &huffmanEncoder{codes: make([]hcode, size)}
-}
-
-// Generates a HuffmanCode corresponding to the fixed literal table
-func generateFixedLiteralEncoding() *huffmanEncoder {
- h := newHuffmanEncoder(maxNumLit)
- codes := h.codes
- var ch uint16
- for ch = 0; ch < maxNumLit; ch++ {
- var bits uint16
- var size uint16
- switch {
- case ch < 144:
- // size 8, 000110000 .. 10111111
- bits = ch + 48
- size = 8
- break
- case ch < 256:
- // size 9, 110010000 .. 111111111
- bits = ch + 400 - 144
- size = 9
- break
- case ch < 280:
- // size 7, 0000000 .. 0010111
- bits = ch - 256
- size = 7
- break
- default:
- // size 8, 11000000 .. 11000111
- bits = ch + 192 - 280
- size = 8
- }
- codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
- }
- return h
-}
-
-func generateFixedOffsetEncoding() *huffmanEncoder {
- h := newHuffmanEncoder(30)
- codes := h.codes
- for ch := range codes {
- codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
- }
- return h
-}
-
-var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
-var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
-
-func (h *huffmanEncoder) bitLength(freq []int32) int {
- var total int
- for i, f := range freq {
- if f != 0 {
- total += int(f) * int(h.codes[i].len)
- }
- }
- return total
-}
-
-const maxBitsLimit = 16
-
-// Return the number of literals assigned to each bit size in the Huffman encoding
-//
-// This method is only called when list.length >= 3
-// The cases of 0, 1, and 2 literals are handled by special case code.
-//
-// list An array of the literals with non-zero frequencies
-// and their associated frequencies. The array is in order of increasing
-// frequency, and has as its last element a special element with frequency
-// MaxInt32
-// maxBits The maximum number of bits that should be used to encode any literal.
-// Must be less than 16.
-// return An integer array in which array[i] indicates the number of literals
-// that should be encoded in i bits.
-func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
- if maxBits >= maxBitsLimit {
- panic("flate: maxBits too large")
- }
- n := int32(len(list))
- list = list[0 : n+1]
- list[n] = maxNode()
-
- // The tree can't have greater depth than n - 1, no matter what. This
- // saves a little bit of work in some small cases
- if maxBits > n-1 {
- maxBits = n - 1
- }
-
- // Create information about each of the levels.
- // A bogus "Level 0" whose sole purpose is so that
- // level1.prev.needed==0. This makes level1.nextPairFreq
- // be a legitimate value that never gets chosen.
- var levels [maxBitsLimit]levelInfo
- // leafCounts[i] counts the number of literals at the left
- // of ancestors of the rightmost node at level i.
- // leafCounts[i][j] is the number of literals at the left
- // of the level j ancestor.
- var leafCounts [maxBitsLimit][maxBitsLimit]int32
-
- for level := int32(1); level <= maxBits; level++ {
- // For every level, the first two items are the first two characters.
- // We initialize the levels as if we had already figured this out.
- levels[level] = levelInfo{
- level: level,
- lastFreq: list[1].freq,
- nextCharFreq: list[2].freq,
- nextPairFreq: list[0].freq + list[1].freq,
- }
- leafCounts[level][level] = 2
- if level == 1 {
- levels[level].nextPairFreq = math.MaxInt32
- }
- }
-
- // We need a total of 2*n - 2 items at top level and have already generated 2.
- levels[maxBits].needed = 2*n - 4
-
- level := maxBits
- for {
- l := &levels[level]
- if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
- // We've run out of both leafs and pairs.
- // End all calculations for this level.
- // To make sure we never come back to this level or any lower level,
- // set nextPairFreq impossibly large.
- l.needed = 0
- levels[level+1].nextPairFreq = math.MaxInt32
- level++
- continue
- }
-
- prevFreq := l.lastFreq
- if l.nextCharFreq < l.nextPairFreq {
- // The next item on this row is a leaf node.
- n := leafCounts[level][level] + 1
- l.lastFreq = l.nextCharFreq
- // Lower leafCounts are the same of the previous node.
- leafCounts[level][level] = n
- l.nextCharFreq = list[n].freq
- } else {
- // The next item on this row is a pair from the previous row.
- // nextPairFreq isn't valid until we generate two
- // more values in the level below
- l.lastFreq = l.nextPairFreq
- // Take leaf counts from the lower level, except counts[level] remains the same.
- copy(leafCounts[level][:level], leafCounts[level-1][:level])
- levels[l.level-1].needed = 2
- }
-
- if l.needed--; l.needed == 0 {
- // We've done everything we need to do for this level.
- // Continue calculating one level up. Fill in nextPairFreq
- // of that level with the sum of the two nodes we've just calculated on
- // this level.
- if l.level == maxBits {
- // All done!
- break
- }
- levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
- level++
- } else {
- // If we stole from below, move down temporarily to replenish it.
- for levels[level-1].needed > 0 {
- level--
- }
- }
- }
-
- // Somethings is wrong if at the end, the top level is null or hasn't used
- // all of the leaves.
- if leafCounts[maxBits][maxBits] != n {
- panic("leafCounts[maxBits][maxBits] != n")
- }
-
- bitCount := h.bitCount[:maxBits+1]
- bits := 1
- counts := &leafCounts[maxBits]
- for level := maxBits; level > 0; level-- {
- // chain.leafCount gives the number of literals requiring at least "bits"
- // bits to encode.
- bitCount[bits] = counts[level] - counts[level-1]
- bits++
- }
- return bitCount
-}
-
-// Look at the leaves and assign them a bit count and an encoding as specified
-// in RFC 1951 3.2.2
-func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
- code := uint16(0)
- for n, bits := range bitCount {
- code <<= 1
- if n == 0 || bits == 0 {
- continue
- }
- // The literals list[len(list)-bits] .. list[len(list)-bits]
- // are encoded using "bits" bits, and get the values
- // code, code + 1, .... The code values are
- // assigned in literal order (not frequency order).
- chunk := list[len(list)-int(bits):]
-
- h.lns.sort(chunk)
- for _, node := range chunk {
- h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
- code++
- }
- list = list[0 : len(list)-int(bits)]
- }
-}
-
-// Update this Huffman Code object to be the minimum code for the specified frequency count.
-//
-// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
-// maxBits The maximum number of bits to use for any literal.
-func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
- if h.freqcache == nil {
- // Allocate a reusable buffer with the longest possible frequency table.
- // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
- // The largest of these is maxNumLit, so we allocate for that case.
- h.freqcache = make([]literalNode, maxNumLit+1)
- }
- list := h.freqcache[:len(freq)+1]
- // Number of non-zero literals
- count := 0
- // Set list to be the set of all non-zero literals and their frequencies
- for i, f := range freq {
- if f != 0 {
- list[count] = literalNode{uint16(i), f}
- count++
- } else {
- list[count] = literalNode{}
- h.codes[i].len = 0
- }
- }
- list[len(freq)] = literalNode{}
-
- list = list[:count]
- if count <= 2 {
- // Handle the small cases here, because they are awkward for the general case code. With
- // two or fewer literals, everything has bit length 1.
- for i, node := range list {
- // "list" is in order of increasing literal value.
- h.codes[node.literal].set(uint16(i), 1)
- }
- return
- }
- h.lfs.sort(list)
-
- // Get the number of literals for each bit count
- bitCount := h.bitCounts(list, maxBits)
- // And do the assignment
- h.assignEncodingAndSize(bitCount, list)
-}
-
-type byLiteral []literalNode
-
-func (s *byLiteral) sort(a []literalNode) {
- *s = byLiteral(a)
- sort.Sort(s)
-}
-
-func (s byLiteral) Len() int { return len(s) }
-
-func (s byLiteral) Less(i, j int) bool {
- return s[i].literal < s[j].literal
-}
-
-func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-type byFreq []literalNode
-
-func (s *byFreq) sort(a []literalNode) {
- *s = byFreq(a)
- sort.Sort(s)
-}
-
-func (s byFreq) Len() int { return len(s) }
-
-func (s byFreq) Less(i, j int) bool {
- if s[i].freq == s[j].freq {
- return s[i].literal < s[j].literal
- }
- return s[i].freq < s[j].freq
-}
-
-func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func reverseBits(number uint16, bitLength byte) uint16 {
- return bits.Reverse16(number << (16 - bitLength))
-}
diff --git a/contrib/go/_std_1.18/src/compress/gzip/gunzip.go b/contrib/go/_std_1.18/src/compress/gzip/gunzip.go
deleted file mode 100644
index 237b2b928b..0000000000
--- a/contrib/go/_std_1.18/src/compress/gzip/gunzip.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package gzip implements reading and writing of gzip format compressed files,
-// as specified in RFC 1952.
-package gzip
-
-import (
- "bufio"
- "compress/flate"
- "encoding/binary"
- "errors"
- "hash/crc32"
- "io"
- "time"
-)
-
-const (
- gzipID1 = 0x1f
- gzipID2 = 0x8b
- gzipDeflate = 8
- flagText = 1 << 0
- flagHdrCrc = 1 << 1
- flagExtra = 1 << 2
- flagName = 1 << 3
- flagComment = 1 << 4
-)
-
-var (
- // ErrChecksum is returned when reading GZIP data that has an invalid checksum.
- ErrChecksum = errors.New("gzip: invalid checksum")
- // ErrHeader is returned when reading GZIP data that has an invalid header.
- ErrHeader = errors.New("gzip: invalid header")
-)
-
-var le = binary.LittleEndian
-
-// noEOF converts io.EOF to io.ErrUnexpectedEOF.
-func noEOF(err error) error {
- if err == io.EOF {
- return io.ErrUnexpectedEOF
- }
- return err
-}
-
-// The gzip file stores a header giving metadata about the compressed file.
-// That header is exposed as the fields of the Writer and Reader structs.
-//
-// Strings must be UTF-8 encoded and may only contain Unicode code points
-// U+0001 through U+00FF, due to limitations of the GZIP file format.
-type Header struct {
- Comment string // comment
- Extra []byte // "extra data"
- ModTime time.Time // modification time
- Name string // file name
- OS byte // operating system type
-}
-
-// A Reader is an io.Reader that can be read to retrieve
-// uncompressed data from a gzip-format compressed file.
-//
-// In general, a gzip file can be a concatenation of gzip files,
-// each with its own header. Reads from the Reader
-// return the concatenation of the uncompressed data of each.
-// Only the first header is recorded in the Reader fields.
-//
-// Gzip files store a length and checksum of the uncompressed data.
-// The Reader will return an ErrChecksum when Read
-// reaches the end of the uncompressed data if it does not
-// have the expected length or checksum. Clients should treat data
-// returned by Read as tentative until they receive the io.EOF
-// marking the end of the data.
-type Reader struct {
- Header // valid after NewReader or Reader.Reset
- r flate.Reader
- decompressor io.ReadCloser
- digest uint32 // CRC-32, IEEE polynomial (section 8)
- size uint32 // Uncompressed size (section 2.3.1)
- buf [512]byte
- err error
- multistream bool
-}
-
-// NewReader creates a new Reader reading the given reader.
-// If r does not also implement io.ByteReader,
-// the decompressor may read more data than necessary from r.
-//
-// It is the caller's responsibility to call Close on the Reader when done.
-//
-// The Reader.Header fields will be valid in the Reader returned.
-func NewReader(r io.Reader) (*Reader, error) {
- z := new(Reader)
- if err := z.Reset(r); err != nil {
- return nil, err
- }
- return z, nil
-}
-
-// Reset discards the Reader z's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *Reader) Reset(r io.Reader) error {
- *z = Reader{
- decompressor: z.decompressor,
- multistream: true,
- }
- if rr, ok := r.(flate.Reader); ok {
- z.r = rr
- } else {
- z.r = bufio.NewReader(r)
- }
- z.Header, z.err = z.readHeader()
- return z.err
-}
-
-// Multistream controls whether the reader supports multistream files.
-//
-// If enabled (the default), the Reader expects the input to be a sequence
-// of individually gzipped data streams, each with its own header and
-// trailer, ending at EOF. The effect is that the concatenation of a sequence
-// of gzipped files is treated as equivalent to the gzip of the concatenation
-// of the sequence. This is standard behavior for gzip readers.
-//
-// Calling Multistream(false) disables this behavior; disabling the behavior
-// can be useful when reading file formats that distinguish individual gzip
-// data streams or mix gzip data streams with other data streams.
-// In this mode, when the Reader reaches the end of the data stream,
-// Read returns io.EOF. The underlying reader must implement io.ByteReader
-// in order to be left positioned just after the gzip stream.
-// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
-// If there is no next stream, z.Reset(r) will return io.EOF.
-func (z *Reader) Multistream(ok bool) {
- z.multistream = ok
-}
-
-// readString reads a NUL-terminated string from z.r.
-// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
-// will output a string encoded using UTF-8.
-// This method always updates z.digest with the data read.
-func (z *Reader) readString() (string, error) {
- var err error
- needConv := false
- for i := 0; ; i++ {
- if i >= len(z.buf) {
- return "", ErrHeader
- }
- z.buf[i], err = z.r.ReadByte()
- if err != nil {
- return "", err
- }
- if z.buf[i] > 0x7f {
- needConv = true
- }
- if z.buf[i] == 0 {
- // Digest covers the NUL terminator.
- z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
-
- // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
- if needConv {
- s := make([]rune, 0, i)
- for _, v := range z.buf[:i] {
- s = append(s, rune(v))
- }
- return string(s), nil
- }
- return string(z.buf[:i]), nil
- }
- }
-}
-
-// readHeader reads the GZIP header according to section 2.3.1.
-// This method does not set z.err.
-func (z *Reader) readHeader() (hdr Header, err error) {
- if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
- // RFC 1952, section 2.2, says the following:
- // A gzip file consists of a series of "members" (compressed data sets).
- //
- // Other than this, the specification does not clarify whether a
- // "series" is defined as "one or more" or "zero or more". To err on the
- // side of caution, Go interprets this to mean "zero or more".
- // Thus, it is okay to return io.EOF here.
- return hdr, err
- }
- if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
- return hdr, ErrHeader
- }
- flg := z.buf[3]
- if t := int64(le.Uint32(z.buf[4:8])); t > 0 {
- // Section 2.3.1, the zero value for MTIME means that the
- // modified time is not set.
- hdr.ModTime = time.Unix(t, 0)
- }
- // z.buf[8] is XFL and is currently ignored.
- hdr.OS = z.buf[9]
- z.digest = crc32.ChecksumIEEE(z.buf[:10])
-
- if flg&flagExtra != 0 {
- if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
- return hdr, noEOF(err)
- }
- z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
- data := make([]byte, le.Uint16(z.buf[:2]))
- if _, err = io.ReadFull(z.r, data); err != nil {
- return hdr, noEOF(err)
- }
- z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
- hdr.Extra = data
- }
-
- var s string
- if flg&flagName != 0 {
- if s, err = z.readString(); err != nil {
- return hdr, err
- }
- hdr.Name = s
- }
-
- if flg&flagComment != 0 {
- if s, err = z.readString(); err != nil {
- return hdr, err
- }
- hdr.Comment = s
- }
-
- if flg&flagHdrCrc != 0 {
- if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
- return hdr, noEOF(err)
- }
- digest := le.Uint16(z.buf[:2])
- if digest != uint16(z.digest) {
- return hdr, ErrHeader
- }
- }
-
- z.digest = 0
- if z.decompressor == nil {
- z.decompressor = flate.NewReader(z.r)
- } else {
- z.decompressor.(flate.Resetter).Reset(z.r, nil)
- }
- return hdr, nil
-}
-
-// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
-func (z *Reader) Read(p []byte) (n int, err error) {
- if z.err != nil {
- return 0, z.err
- }
-
- for n == 0 {
- n, z.err = z.decompressor.Read(p)
- z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
- z.size += uint32(n)
- if z.err != io.EOF {
- // In the normal case we return here.
- return n, z.err
- }
-
- // Finished file; check checksum and size.
- if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
- z.err = noEOF(err)
- return n, z.err
- }
- digest := le.Uint32(z.buf[:4])
- size := le.Uint32(z.buf[4:8])
- if digest != z.digest || size != z.size {
- z.err = ErrChecksum
- return n, z.err
- }
- z.digest, z.size = 0, 0
-
- // File is ok; check if there is another.
- if !z.multistream {
- return n, io.EOF
- }
- z.err = nil // Remove io.EOF
-
- if _, z.err = z.readHeader(); z.err != nil {
- return n, z.err
- }
- }
-
- return n, nil
-}
-
-// Close closes the Reader. It does not close the underlying io.Reader.
-// In order for the GZIP checksum to be verified, the reader must be
-// fully consumed until the io.EOF.
-func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/contrib/go/_std_1.18/src/container/list/list.go b/contrib/go/_std_1.18/src/container/list/list.go
deleted file mode 100644
index 9555ad3900..0000000000
--- a/contrib/go/_std_1.18/src/container/list/list.go
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package list implements a doubly linked list.
-//
-// To iterate over a list (where l is a *List):
-// for e := l.Front(); e != nil; e = e.Next() {
-// // do something with e.Value
-// }
-//
-package list
-
-// Element is an element of a linked list.
-type Element struct {
- // Next and previous pointers in the doubly-linked list of elements.
- // To simplify the implementation, internally a list l is implemented
- // as a ring, such that &l.root is both the next element of the last
- // list element (l.Back()) and the previous element of the first list
- // element (l.Front()).
- next, prev *Element
-
- // The list to which this element belongs.
- list *List
-
- // The value stored with this element.
- Value any
-}
-
-// Next returns the next list element or nil.
-func (e *Element) Next() *Element {
- if p := e.next; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// Prev returns the previous list element or nil.
-func (e *Element) Prev() *Element {
- if p := e.prev; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// List represents a doubly linked list.
-// The zero value for List is an empty list ready to use.
-type List struct {
- root Element // sentinel list element, only &root, root.prev, and root.next are used
- len int // current list length excluding (this) sentinel element
-}
-
-// Init initializes or clears list l.
-func (l *List) Init() *List {
- l.root.next = &l.root
- l.root.prev = &l.root
- l.len = 0
- return l
-}
-
-// New returns an initialized list.
-func New() *List { return new(List).Init() }
-
-// Len returns the number of elements of list l.
-// The complexity is O(1).
-func (l *List) Len() int { return l.len }
-
-// Front returns the first element of list l or nil if the list is empty.
-func (l *List) Front() *Element {
- if l.len == 0 {
- return nil
- }
- return l.root.next
-}
-
-// Back returns the last element of list l or nil if the list is empty.
-func (l *List) Back() *Element {
- if l.len == 0 {
- return nil
- }
- return l.root.prev
-}
-
-// lazyInit lazily initializes a zero List value.
-func (l *List) lazyInit() {
- if l.root.next == nil {
- l.Init()
- }
-}
-
-// insert inserts e after at, increments l.len, and returns e.
-func (l *List) insert(e, at *Element) *Element {
- e.prev = at
- e.next = at.next
- e.prev.next = e
- e.next.prev = e
- e.list = l
- l.len++
- return e
-}
-
-// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
-func (l *List) insertValue(v any, at *Element) *Element {
- return l.insert(&Element{Value: v}, at)
-}
-
-// remove removes e from its list, decrements l.len
-func (l *List) remove(e *Element) {
- e.prev.next = e.next
- e.next.prev = e.prev
- e.next = nil // avoid memory leaks
- e.prev = nil // avoid memory leaks
- e.list = nil
- l.len--
-}
-
-// move moves e to next to at.
-func (l *List) move(e, at *Element) {
- if e == at {
- return
- }
- e.prev.next = e.next
- e.next.prev = e.prev
-
- e.prev = at
- e.next = at.next
- e.prev.next = e
- e.next.prev = e
-}
-
-// Remove removes e from l if e is an element of list l.
-// It returns the element value e.Value.
-// The element must not be nil.
-func (l *List) Remove(e *Element) any {
- if e.list == l {
- // if e.list == l, l must have been initialized when e was inserted
- // in l or l == nil (e is a zero Element) and l.remove will crash
- l.remove(e)
- }
- return e.Value
-}
-
-// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *List) PushFront(v any) *Element {
- l.lazyInit()
- return l.insertValue(v, &l.root)
-}
-
-// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *List) PushBack(v any) *Element {
- l.lazyInit()
- return l.insertValue(v, l.root.prev)
-}
-
-// InsertBefore inserts a new element e with value v immediately before mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *List) InsertBefore(v any, mark *Element) *Element {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark.prev)
-}
-
-// InsertAfter inserts a new element e with value v immediately after mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *List) InsertAfter(v any, mark *Element) *Element {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark)
-}
-
-// MoveToFront moves element e to the front of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *List) MoveToFront(e *Element) {
- if e.list != l || l.root.next == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.move(e, &l.root)
-}
-
-// MoveToBack moves element e to the back of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *List) MoveToBack(e *Element) {
- if e.list != l || l.root.prev == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.move(e, l.root.prev)
-}
-
-// MoveBefore moves element e to its new position before mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *List) MoveBefore(e, mark *Element) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.move(e, mark.prev)
-}
-
-// MoveAfter moves element e to its new position after mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *List) MoveAfter(e, mark *Element) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.move(e, mark)
-}
-
-// PushBackList inserts a copy of another list at the back of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *List) PushBackList(other *List) {
- l.lazyInit()
- for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
- l.insertValue(e.Value, l.root.prev)
- }
-}
-
-// PushFrontList inserts a copy of another list at the front of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *List) PushFrontList(other *List) {
- l.lazyInit()
- for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
- l.insertValue(e.Value, &l.root)
- }
-}
diff --git a/contrib/go/_std_1.18/src/context/context.go b/contrib/go/_std_1.18/src/context/context.go
deleted file mode 100644
index cf010b2a69..0000000000
--- a/contrib/go/_std_1.18/src/context/context.go
+++ /dev/null
@@ -1,593 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package context defines the Context type, which carries deadlines,
-// cancellation signals, and other request-scoped values across API boundaries
-// and between processes.
-//
-// Incoming requests to a server should create a Context, and outgoing
-// calls to servers should accept a Context. The chain of function
-// calls between them must propagate the Context, optionally replacing
-// it with a derived Context created using WithCancel, WithDeadline,
-// WithTimeout, or WithValue. When a Context is canceled, all
-// Contexts derived from it are also canceled.
-//
-// The WithCancel, WithDeadline, and WithTimeout functions take a
-// Context (the parent) and return a derived Context (the child) and a
-// CancelFunc. Calling the CancelFunc cancels the child and its
-// children, removes the parent's reference to the child, and stops
-// any associated timers. Failing to call the CancelFunc leaks the
-// child and its children until the parent is canceled or the timer
-// fires. The go vet tool checks that CancelFuncs are used on all
-// control-flow paths.
-//
-// Programs that use Contexts should follow these rules to keep interfaces
-// consistent across packages and enable static analysis tools to check context
-// propagation:
-//
-// Do not store Contexts inside a struct type; instead, pass a Context
-// explicitly to each function that needs it. The Context should be the first
-// parameter, typically named ctx:
-//
-// func DoSomething(ctx context.Context, arg Arg) error {
-// // ... use ctx ...
-// }
-//
-// Do not pass a nil Context, even if a function permits it. Pass context.TODO
-// if you are unsure about which Context to use.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-//
-// The same Context may be passed to functions running in different goroutines;
-// Contexts are safe for simultaneous use by multiple goroutines.
-//
-// See https://blog.golang.org/context for example code for a server that uses
-// Contexts.
-package context
-
-import (
- "errors"
- "internal/reflectlite"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// A Context carries a deadline, a cancellation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- // The close of the Done channel may happen asynchronously,
- // after the cancel function returns.
- //
- // WithCancel arranges for Done to be closed when cancel is called;
- // WithDeadline arranges for Done to be closed when the deadline
- // expires; WithTimeout arranges for Done to be closed when the timeout
- // elapses.
- //
- // Done is provided for use in select statements:
- //
- // // Stream generates values with DoSomething and sends them to out
- // // until DoSomething returns an error or ctx.Done is closed.
- // func Stream(ctx context.Context, out chan<- Value) error {
- // for {
- // v, err := DoSomething(ctx)
- // if err != nil {
- // return err
- // }
- // select {
- // case <-ctx.Done():
- // return ctx.Err()
- // case out <- v:
- // }
- // }
- // }
- //
- // See https://blog.golang.org/pipelines for more examples of how to use
- // a Done channel for cancellation.
- Done() <-chan struct{}
-
- // If Done is not yet closed, Err returns nil.
- // If Done is closed, Err returns a non-nil error explaining why:
- // Canceled if the context was canceled
- // or DeadlineExceeded if the context's deadline passed.
- // After Err returns a non-nil error, successive calls to Err return the same error.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- //
- // A key identifies a specific value in a Context. Functions that wish
- // to store values in Context typically allocate a key in a global
- // variable then use that key as the argument to context.WithValue and
- // Context.Value. A key can be any type that supports equality;
- // packages should define keys as an unexported type to avoid
- // collisions.
- //
- // Packages that define a Context key should provide type-safe accessors
- // for the values stored using that key:
- //
- // // Package user defines a User type that's stored in Contexts.
- // package user
- //
- // import "context"
- //
- // // User is the type of value stored in the Contexts.
- // type User struct {...}
- //
- // // key is an unexported type for keys defined in this package.
- // // This prevents collisions with keys defined in other packages.
- // type key int
- //
- // // userKey is the key for user.User values in Contexts. It is
- // // unexported; clients use user.NewContext and user.FromContext
- // // instead of using this key directly.
- // var userKey key
- //
- // // NewContext returns a new Context that carries value u.
- // func NewContext(ctx context.Context, u *User) context.Context {
- // return context.WithValue(ctx, userKey, u)
- // }
- //
- // // FromContext returns the User value stored in ctx, if any.
- // func FromContext(ctx context.Context) (*User, bool) {
- // u, ok := ctx.Value(userKey).(*User)
- // return u, ok
- // }
- Value(key any) any
-}
-
-// Canceled is the error returned by Context.Err when the context is canceled.
-var Canceled = errors.New("context canceled")
-
-// DeadlineExceeded is the error returned by Context.Err when the context's
-// deadline passes.
-var DeadlineExceeded error = deadlineExceededError{}
-
-type deadlineExceededError struct{}
-
-func (deadlineExceededError) Error() string { return "context deadline exceeded" }
-func (deadlineExceededError) Timeout() bool { return true }
-func (deadlineExceededError) Temporary() bool { return true }
-
-// An emptyCtx is never canceled, has no values, and has no deadline. It is not
-// struct{}, since vars of this type must have distinct addresses.
-type emptyCtx int
-
-func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
- return
-}
-
-func (*emptyCtx) Done() <-chan struct{} {
- return nil
-}
-
-func (*emptyCtx) Err() error {
- return nil
-}
-
-func (*emptyCtx) Value(key any) any {
- return nil
-}
-
-func (e *emptyCtx) String() string {
- switch e {
- case background:
- return "context.Background"
- case todo:
- return "context.TODO"
- }
- return "unknown empty Context"
-}
-
-var (
- background = new(emptyCtx)
- todo = new(emptyCtx)
-)
-
-// Background returns a non-nil, empty Context. It is never canceled, has no
-// values, and has no deadline. It is typically used by the main function,
-// initialization, and tests, and as the top-level Context for incoming
-// requests.
-func Background() Context {
- return background
-}
-
-// TODO returns a non-nil, empty Context. Code should use context.TODO when
-// it's unclear which Context to use or it is not yet available (because the
-// surrounding function has not yet been extended to accept a Context
-// parameter).
-func TODO() Context {
- return todo
-}
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// A CancelFunc may be called by multiple goroutines simultaneously.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
-
-// WithCancel returns a copy of parent with a new Done channel. The returned
-// context's Done channel is closed when the returned cancel function is called
-// or when the parent context's Done channel is closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
- if parent == nil {
- panic("cannot create context from nil parent")
- }
- c := newCancelCtx(parent)
- propagateCancel(parent, &c)
- return &c, func() { c.cancel(true, Canceled) }
-}
-
-// newCancelCtx returns an initialized cancelCtx.
-func newCancelCtx(parent Context) cancelCtx {
- return cancelCtx{Context: parent}
-}
-
-// goroutines counts the number of goroutines ever created; for testing.
-var goroutines int32
-
-// propagateCancel arranges for child to be canceled when parent is.
-func propagateCancel(parent Context, child canceler) {
- done := parent.Done()
- if done == nil {
- return // parent is never canceled
- }
-
- select {
- case <-done:
- // parent is already canceled
- child.cancel(false, parent.Err())
- return
- default:
- }
-
- if p, ok := parentCancelCtx(parent); ok {
- p.mu.Lock()
- if p.err != nil {
- // parent has already been canceled
- child.cancel(false, p.err)
- } else {
- if p.children == nil {
- p.children = make(map[canceler]struct{})
- }
- p.children[child] = struct{}{}
- }
- p.mu.Unlock()
- } else {
- atomic.AddInt32(&goroutines, +1)
- go func() {
- select {
- case <-parent.Done():
- child.cancel(false, parent.Err())
- case <-child.Done():
- }
- }()
- }
-}
-
-// &cancelCtxKey is the key that a cancelCtx returns itself for.
-var cancelCtxKey int
-
-// parentCancelCtx returns the underlying *cancelCtx for parent.
-// It does this by looking up parent.Value(&cancelCtxKey) to find
-// the innermost enclosing *cancelCtx and then checking whether
-// parent.Done() matches that *cancelCtx. (If not, the *cancelCtx
-// has been wrapped in a custom implementation providing a
-// different done channel, in which case we should not bypass it.)
-func parentCancelCtx(parent Context) (*cancelCtx, bool) {
- done := parent.Done()
- if done == closedchan || done == nil {
- return nil, false
- }
- p, ok := parent.Value(&cancelCtxKey).(*cancelCtx)
- if !ok {
- return nil, false
- }
- pdone, _ := p.done.Load().(chan struct{})
- if pdone != done {
- return nil, false
- }
- return p, true
-}
-
-// removeChild removes a context from its parent.
-func removeChild(parent Context, child canceler) {
- p, ok := parentCancelCtx(parent)
- if !ok {
- return
- }
- p.mu.Lock()
- if p.children != nil {
- delete(p.children, child)
- }
- p.mu.Unlock()
-}
-
-// A canceler is a context type that can be canceled directly. The
-// implementations are *cancelCtx and *timerCtx.
-type canceler interface {
- cancel(removeFromParent bool, err error)
- Done() <-chan struct{}
-}
-
-// closedchan is a reusable closed channel.
-var closedchan = make(chan struct{})
-
-func init() {
- close(closedchan)
-}
-
-// A cancelCtx can be canceled. When canceled, it also cancels any children
-// that implement canceler.
-type cancelCtx struct {
- Context
-
- mu sync.Mutex // protects following fields
- done atomic.Value // of chan struct{}, created lazily, closed by first cancel call
- children map[canceler]struct{} // set to nil by the first cancel call
- err error // set to non-nil by the first cancel call
-}
-
-func (c *cancelCtx) Value(key any) any {
- if key == &cancelCtxKey {
- return c
- }
- return value(c.Context, key)
-}
-
-func (c *cancelCtx) Done() <-chan struct{} {
- d := c.done.Load()
- if d != nil {
- return d.(chan struct{})
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- d = c.done.Load()
- if d == nil {
- d = make(chan struct{})
- c.done.Store(d)
- }
- return d.(chan struct{})
-}
-
-func (c *cancelCtx) Err() error {
- c.mu.Lock()
- err := c.err
- c.mu.Unlock()
- return err
-}
-
-type stringer interface {
- String() string
-}
-
-func contextName(c Context) string {
- if s, ok := c.(stringer); ok {
- return s.String()
- }
- return reflectlite.TypeOf(c).String()
-}
-
-func (c *cancelCtx) String() string {
- return contextName(c.Context) + ".WithCancel"
-}
-
-// cancel closes c.done, cancels each of c's children, and, if
-// removeFromParent is true, removes c from its parent's children.
-func (c *cancelCtx) cancel(removeFromParent bool, err error) {
- if err == nil {
- panic("context: internal error: missing cancel error")
- }
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return // already canceled
- }
- c.err = err
- d, _ := c.done.Load().(chan struct{})
- if d == nil {
- c.done.Store(closedchan)
- } else {
- close(d)
- }
- for child := range c.children {
- // NOTE: acquiring the child's lock while holding parent's lock.
- child.cancel(false, err)
- }
- c.children = nil
- c.mu.Unlock()
-
- if removeFromParent {
- removeChild(c.Context, c)
- }
-}
-
-// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
-// context's Done channel is closed when the deadline expires, when the returned
-// cancel function is called, or when the parent context's Done channel is
-// closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
- if parent == nil {
- panic("cannot create context from nil parent")
- }
- if cur, ok := parent.Deadline(); ok && cur.Before(d) {
- // The current deadline is already sooner than the new one.
- return WithCancel(parent)
- }
- c := &timerCtx{
- cancelCtx: newCancelCtx(parent),
- deadline: d,
- }
- propagateCancel(parent, c)
- dur := time.Until(d)
- if dur <= 0 {
- c.cancel(true, DeadlineExceeded) // deadline has already passed
- return c, func() { c.cancel(false, Canceled) }
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err == nil {
- c.timer = time.AfterFunc(dur, func() {
- c.cancel(true, DeadlineExceeded)
- })
- }
- return c, func() { c.cancel(true, Canceled) }
-}
-
-// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
-// implement Done and Err. It implements cancel by stopping its timer then
-// delegating to cancelCtx.cancel.
-type timerCtx struct {
- cancelCtx
- timer *time.Timer // Under cancelCtx.mu.
-
- deadline time.Time
-}
-
-func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
- return c.deadline, true
-}
-
-func (c *timerCtx) String() string {
- return contextName(c.cancelCtx.Context) + ".WithDeadline(" +
- c.deadline.String() + " [" +
- time.Until(c.deadline).String() + "])"
-}
-
-func (c *timerCtx) cancel(removeFromParent bool, err error) {
- c.cancelCtx.cancel(false, err)
- if removeFromParent {
- // Remove this timerCtx from its parent cancelCtx's children.
- removeChild(c.cancelCtx.Context, c)
- }
- c.mu.Lock()
- if c.timer != nil {
- c.timer.Stop()
- c.timer = nil
- }
- c.mu.Unlock()
-}
-
-// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete:
-//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
-func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
- return WithDeadline(parent, time.Now().Add(timeout))
-}
-
-// WithValue returns a copy of parent in which the value associated with key is
-// val.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-//
-// The provided key must be comparable and should not be of type
-// string or any other built-in type to avoid collisions between
-// packages using context. Users of WithValue should define their own
-// types for keys. To avoid allocating when assigning to an
-// interface{}, context keys often have concrete type
-// struct{}. Alternatively, exported context key variables' static
-// type should be a pointer or interface.
-func WithValue(parent Context, key, val any) Context {
- if parent == nil {
- panic("cannot create context from nil parent")
- }
- if key == nil {
- panic("nil key")
- }
- if !reflectlite.TypeOf(key).Comparable() {
- panic("key is not comparable")
- }
- return &valueCtx{parent, key, val}
-}
-
-// A valueCtx carries a key-value pair. It implements Value for that key and
-// delegates all other calls to the embedded Context.
-type valueCtx struct {
- Context
- key, val any
-}
-
-// stringify tries a bit to stringify v, without using fmt, since we don't
-// want context depending on the unicode tables. This is only used by
-// *valueCtx.String().
-func stringify(v any) string {
- switch s := v.(type) {
- case stringer:
- return s.String()
- case string:
- return s
- }
- return "<not Stringer>"
-}
-
-func (c *valueCtx) String() string {
- return contextName(c.Context) + ".WithValue(type " +
- reflectlite.TypeOf(c.key).String() +
- ", val " + stringify(c.val) + ")"
-}
-
-func (c *valueCtx) Value(key any) any {
- if c.key == key {
- return c.val
- }
- return value(c.Context, key)
-}
-
-func value(c Context, key any) any {
- for {
- switch ctx := c.(type) {
- case *valueCtx:
- if key == ctx.key {
- return ctx.val
- }
- c = ctx.Context
- case *cancelCtx:
- if key == &cancelCtxKey {
- return c
- }
- c = ctx.Context
- case *timerCtx:
- if key == &cancelCtxKey {
- return &ctx.cancelCtx
- }
- c = ctx.Context
- case *emptyCtx:
- return nil
- default:
- return c.Value(key)
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/crypto/aes/aes_gcm.go b/contrib/go/_std_1.18/src/crypto/aes/aes_gcm.go
deleted file mode 100644
index 98fb6d8e9b..0000000000
--- a/contrib/go/_std_1.18/src/crypto/aes/aes_gcm.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 || arm64
-
-package aes
-
-import (
- "crypto/cipher"
- subtleoverlap "crypto/internal/subtle"
- "crypto/subtle"
- "errors"
-)
-
-// The following functions are defined in gcm_*.s.
-
-//go:noescape
-func gcmAesInit(productTable *[256]byte, ks []uint32)
-
-//go:noescape
-func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte)
-
-//go:noescape
-func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
-
-//go:noescape
-func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
-
-//go:noescape
-func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64)
-
-const (
- gcmBlockSize = 16
- gcmTagSize = 16
- gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes.
- gcmStandardNonceSize = 12
-)
-
-var errOpen = errors.New("cipher: message authentication failed")
-
-// aesCipherGCM implements crypto/cipher.gcmAble so that crypto/cipher.NewGCM
-// will use the optimised implementation in this file when possible. Instances
-// of this type only exist when hasGCMAsm returns true.
-type aesCipherGCM struct {
- aesCipherAsm
-}
-
-// Assert that aesCipherGCM implements the gcmAble interface.
-var _ gcmAble = (*aesCipherGCM)(nil)
-
-// NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
-// called by crypto/cipher.NewGCM via the gcmAble interface.
-func (c *aesCipherGCM) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
- g := &gcmAsm{ks: c.enc, nonceSize: nonceSize, tagSize: tagSize}
- gcmAesInit(&g.productTable, g.ks)
- return g, nil
-}
-
-type gcmAsm struct {
- // ks is the key schedule, the length of which depends on the size of
- // the AES key.
- ks []uint32
- // productTable contains pre-computed multiples of the binary-field
- // element used in GHASH.
- productTable [256]byte
- // nonceSize contains the expected size of the nonce, in bytes.
- nonceSize int
- // tagSize contains the size of the tag, in bytes.
- tagSize int
-}
-
-func (g *gcmAsm) NonceSize() int {
- return g.nonceSize
-}
-
-func (g *gcmAsm) Overhead() int {
- return g.tagSize
-}
-
-// sliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
-// details.
-func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
- if len(nonce) != g.nonceSize {
- panic("crypto/cipher: incorrect nonce length given to GCM")
- }
- if uint64(len(plaintext)) > ((1<<32)-2)*BlockSize {
- panic("crypto/cipher: message too large for GCM")
- }
-
- var counter, tagMask [gcmBlockSize]byte
-
- if len(nonce) == gcmStandardNonceSize {
- // Init counter to nonce||1
- copy(counter[:], nonce)
- counter[gcmBlockSize-1] = 1
- } else {
- // Otherwise counter = GHASH(nonce)
- gcmAesData(&g.productTable, nonce, &counter)
- gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
- }
-
- encryptBlockAsm(len(g.ks)/4-1, &g.ks[0], &tagMask[0], &counter[0])
-
- var tagOut [gcmTagSize]byte
- gcmAesData(&g.productTable, data, &tagOut)
-
- ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
- if subtleoverlap.InexactOverlap(out[:len(plaintext)], plaintext) {
- panic("crypto/cipher: invalid buffer overlap")
- }
- if len(plaintext) > 0 {
- gcmAesEnc(&g.productTable, out, plaintext, &counter, &tagOut, g.ks)
- }
- gcmAesFinish(&g.productTable, &tagMask, &tagOut, uint64(len(plaintext)), uint64(len(data)))
- copy(out[len(plaintext):], tagOut[:])
-
- return ret
-}
-
-// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
-// for details.
-func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
- if len(nonce) != g.nonceSize {
- panic("crypto/cipher: incorrect nonce length given to GCM")
- }
- // Sanity check to prevent the authentication from always succeeding if an implementation
- // leaves tagSize uninitialized, for example.
- if g.tagSize < gcmMinimumTagSize {
- panic("crypto/cipher: incorrect GCM tag size")
- }
-
- if len(ciphertext) < g.tagSize {
- return nil, errOpen
- }
- if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(BlockSize)+uint64(g.tagSize) {
- return nil, errOpen
- }
-
- tag := ciphertext[len(ciphertext)-g.tagSize:]
- ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
-
- // See GCM spec, section 7.1.
- var counter, tagMask [gcmBlockSize]byte
-
- if len(nonce) == gcmStandardNonceSize {
- // Init counter to nonce||1
- copy(counter[:], nonce)
- counter[gcmBlockSize-1] = 1
- } else {
- // Otherwise counter = GHASH(nonce)
- gcmAesData(&g.productTable, nonce, &counter)
- gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
- }
-
- encryptBlockAsm(len(g.ks)/4-1, &g.ks[0], &tagMask[0], &counter[0])
-
- var expectedTag [gcmTagSize]byte
- gcmAesData(&g.productTable, data, &expectedTag)
-
- ret, out := sliceForAppend(dst, len(ciphertext))
- if subtleoverlap.InexactOverlap(out, ciphertext) {
- panic("crypto/cipher: invalid buffer overlap")
- }
- if len(ciphertext) > 0 {
- gcmAesDec(&g.productTable, out, ciphertext, &counter, &expectedTag, g.ks)
- }
- gcmAesFinish(&g.productTable, &tagMask, &expectedTag, uint64(len(ciphertext)), uint64(len(data)))
-
- if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
- for i := range out {
- out[i] = 0
- }
- return nil, errOpen
- }
-
- return ret, nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/aes/cipher.go b/contrib/go/_std_1.18/src/crypto/aes/cipher.go
deleted file mode 100644
index bb93fbb36e..0000000000
--- a/contrib/go/_std_1.18/src/crypto/aes/cipher.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package aes
-
-import (
- "crypto/cipher"
- "crypto/internal/subtle"
- "strconv"
-)
-
-// The AES block size in bytes.
-const BlockSize = 16
-
-// A cipher is an instance of AES encryption using a particular key.
-type aesCipher struct {
- enc []uint32
- dec []uint32
-}
-
-type KeySizeError int
-
-func (k KeySizeError) Error() string {
- return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
-}
-
-// NewCipher creates and returns a new cipher.Block.
-// The key argument should be the AES key,
-// either 16, 24, or 32 bytes to select
-// AES-128, AES-192, or AES-256.
-func NewCipher(key []byte) (cipher.Block, error) {
- k := len(key)
- switch k {
- default:
- return nil, KeySizeError(k)
- case 16, 24, 32:
- break
- }
- return newCipher(key)
-}
-
-// newCipherGeneric creates and returns a new cipher.Block
-// implemented in pure Go.
-func newCipherGeneric(key []byte) (cipher.Block, error) {
- n := len(key) + 28
- c := aesCipher{make([]uint32, n), make([]uint32, n)}
- expandKeyGo(key, c.enc, c.dec)
- return &c, nil
-}
-
-func (c *aesCipher) BlockSize() int { return BlockSize }
-
-func (c *aesCipher) Encrypt(dst, src []byte) {
- if len(src) < BlockSize {
- panic("crypto/aes: input not full block")
- }
- if len(dst) < BlockSize {
- panic("crypto/aes: output not full block")
- }
- if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
- panic("crypto/aes: invalid buffer overlap")
- }
- encryptBlockGo(c.enc, dst, src)
-}
-
-func (c *aesCipher) Decrypt(dst, src []byte) {
- if len(src) < BlockSize {
- panic("crypto/aes: input not full block")
- }
- if len(dst) < BlockSize {
- panic("crypto/aes: output not full block")
- }
- if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
- panic("crypto/aes: invalid buffer overlap")
- }
- decryptBlockGo(c.dec, dst, src)
-}
diff --git a/contrib/go/_std_1.18/src/crypto/aes/cipher_asm.go b/contrib/go/_std_1.18/src/crypto/aes/cipher_asm.go
deleted file mode 100644
index c948f1a579..0000000000
--- a/contrib/go/_std_1.18/src/crypto/aes/cipher_asm.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 || arm64
-
-package aes
-
-import (
- "crypto/cipher"
- "crypto/internal/subtle"
- "internal/cpu"
-)
-
-// defined in asm_*.s
-
-//go:noescape
-func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
-
-//go:noescape
-func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
-
-//go:noescape
-func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
-
-type aesCipherAsm struct {
- aesCipher
-}
-
-var supportsAES = cpu.X86.HasAES || cpu.ARM64.HasAES
-var supportsGFMUL = cpu.X86.HasPCLMULQDQ || cpu.ARM64.HasPMULL
-
-func newCipher(key []byte) (cipher.Block, error) {
- if !supportsAES {
- return newCipherGeneric(key)
- }
- n := len(key) + 28
- c := aesCipherAsm{aesCipher{make([]uint32, n), make([]uint32, n)}}
- var rounds int
- switch len(key) {
- case 128 / 8:
- rounds = 10
- case 192 / 8:
- rounds = 12
- case 256 / 8:
- rounds = 14
- }
-
- expandKeyAsm(rounds, &key[0], &c.enc[0], &c.dec[0])
- if supportsAES && supportsGFMUL {
- return &aesCipherGCM{c}, nil
- }
- return &c, nil
-}
-
-func (c *aesCipherAsm) BlockSize() int { return BlockSize }
-
-func (c *aesCipherAsm) Encrypt(dst, src []byte) {
- if len(src) < BlockSize {
- panic("crypto/aes: input not full block")
- }
- if len(dst) < BlockSize {
- panic("crypto/aes: output not full block")
- }
- if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
- panic("crypto/aes: invalid buffer overlap")
- }
- encryptBlockAsm(len(c.enc)/4-1, &c.enc[0], &dst[0], &src[0])
-}
-
-func (c *aesCipherAsm) Decrypt(dst, src []byte) {
- if len(src) < BlockSize {
- panic("crypto/aes: input not full block")
- }
- if len(dst) < BlockSize {
- panic("crypto/aes: output not full block")
- }
- if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
- panic("crypto/aes: invalid buffer overlap")
- }
- decryptBlockAsm(len(c.dec)/4-1, &c.dec[0], &dst[0], &src[0])
-}
-
-// expandKey is used by BenchmarkExpand to ensure that the asm implementation
-// of key expansion is used for the benchmark when it is available.
-func expandKey(key []byte, enc, dec []uint32) {
- if supportsAES {
- rounds := 10 // rounds needed for AES128
- switch len(key) {
- case 192 / 8:
- rounds = 12
- case 256 / 8:
- rounds = 14
- }
- expandKeyAsm(rounds, &key[0], &enc[0], &dec[0])
- } else {
- expandKeyGo(key, enc, dec)
- }
-}
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/cbc.go b/contrib/go/_std_1.18/src/crypto/cipher/cbc.go
deleted file mode 100644
index 0d07192e29..0000000000
--- a/contrib/go/_std_1.18/src/crypto/cipher/cbc.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Cipher block chaining (CBC) mode.
-
-// CBC provides confidentiality by xoring (chaining) each plaintext block
-// with the previous ciphertext block before applying the block cipher.
-
-// See NIST SP 800-38A, pp 10-11
-
-package cipher
-
-import "crypto/internal/subtle"
-
-type cbc struct {
- b Block
- blockSize int
- iv []byte
- tmp []byte
-}
-
-func newCBC(b Block, iv []byte) *cbc {
- return &cbc{
- b: b,
- blockSize: b.BlockSize(),
- iv: dup(iv),
- tmp: make([]byte, b.BlockSize()),
- }
-}
-
-type cbcEncrypter cbc
-
-// cbcEncAble is an interface implemented by ciphers that have a specific
-// optimized implementation of CBC encryption, like crypto/aes.
-// NewCBCEncrypter will check for this interface and return the specific
-// BlockMode if found.
-type cbcEncAble interface {
- NewCBCEncrypter(iv []byte) BlockMode
-}
-
-// NewCBCEncrypter returns a BlockMode which encrypts in cipher block chaining
-// mode, using the given Block. The length of iv must be the same as the
-// Block's block size.
-func NewCBCEncrypter(b Block, iv []byte) BlockMode {
- if len(iv) != b.BlockSize() {
- panic("cipher.NewCBCEncrypter: IV length must equal block size")
- }
- if cbc, ok := b.(cbcEncAble); ok {
- return cbc.NewCBCEncrypter(iv)
- }
- return (*cbcEncrypter)(newCBC(b, iv))
-}
-
-func (x *cbcEncrypter) BlockSize() int { return x.blockSize }
-
-func (x *cbcEncrypter) CryptBlocks(dst, src []byte) {
- if len(src)%x.blockSize != 0 {
- panic("crypto/cipher: input not full blocks")
- }
- if len(dst) < len(src) {
- panic("crypto/cipher: output smaller than input")
- }
- if subtle.InexactOverlap(dst[:len(src)], src) {
- panic("crypto/cipher: invalid buffer overlap")
- }
-
- iv := x.iv
-
- for len(src) > 0 {
- // Write the xor to dst, then encrypt in place.
- xorBytes(dst[:x.blockSize], src[:x.blockSize], iv)
- x.b.Encrypt(dst[:x.blockSize], dst[:x.blockSize])
-
- // Move to the next block with this block as the next iv.
- iv = dst[:x.blockSize]
- src = src[x.blockSize:]
- dst = dst[x.blockSize:]
- }
-
- // Save the iv for the next CryptBlocks call.
- copy(x.iv, iv)
-}
-
-func (x *cbcEncrypter) SetIV(iv []byte) {
- if len(iv) != len(x.iv) {
- panic("cipher: incorrect length IV")
- }
- copy(x.iv, iv)
-}
-
-type cbcDecrypter cbc
-
-// cbcDecAble is an interface implemented by ciphers that have a specific
-// optimized implementation of CBC decryption, like crypto/aes.
-// NewCBCDecrypter will check for this interface and return the specific
-// BlockMode if found.
-type cbcDecAble interface {
- NewCBCDecrypter(iv []byte) BlockMode
-}
-
-// NewCBCDecrypter returns a BlockMode which decrypts in cipher block chaining
-// mode, using the given Block. The length of iv must be the same as the
-// Block's block size and must match the iv used to encrypt the data.
-func NewCBCDecrypter(b Block, iv []byte) BlockMode {
- if len(iv) != b.BlockSize() {
- panic("cipher.NewCBCDecrypter: IV length must equal block size")
- }
- if cbc, ok := b.(cbcDecAble); ok {
- return cbc.NewCBCDecrypter(iv)
- }
- return (*cbcDecrypter)(newCBC(b, iv))
-}
-
-func (x *cbcDecrypter) BlockSize() int { return x.blockSize }
-
-func (x *cbcDecrypter) CryptBlocks(dst, src []byte) {
- if len(src)%x.blockSize != 0 {
- panic("crypto/cipher: input not full blocks")
- }
- if len(dst) < len(src) {
- panic("crypto/cipher: output smaller than input")
- }
- if subtle.InexactOverlap(dst[:len(src)], src) {
- panic("crypto/cipher: invalid buffer overlap")
- }
- if len(src) == 0 {
- return
- }
-
- // For each block, we need to xor the decrypted data with the previous block's ciphertext (the iv).
- // To avoid making a copy each time, we loop over the blocks BACKWARDS.
- end := len(src)
- start := end - x.blockSize
- prev := start - x.blockSize
-
- // Copy the last block of ciphertext in preparation as the new iv.
- copy(x.tmp, src[start:end])
-
- // Loop over all but the first block.
- for start > 0 {
- x.b.Decrypt(dst[start:end], src[start:end])
- xorBytes(dst[start:end], dst[start:end], src[prev:start])
-
- end = start
- start = prev
- prev -= x.blockSize
- }
-
- // The first block is special because it uses the saved iv.
- x.b.Decrypt(dst[start:end], src[start:end])
- xorBytes(dst[start:end], dst[start:end], x.iv)
-
- // Set the new iv to the first block we copied earlier.
- x.iv, x.tmp = x.tmp, x.iv
-}
-
-func (x *cbcDecrypter) SetIV(iv []byte) {
- if len(iv) != len(x.iv) {
- panic("cipher: incorrect length IV")
- }
- copy(x.iv, iv)
-}
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/gcm.go b/contrib/go/_std_1.18/src/crypto/cipher/gcm.go
deleted file mode 100644
index ba0af84a9d..0000000000
--- a/contrib/go/_std_1.18/src/crypto/cipher/gcm.go
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cipher
-
-import (
- subtleoverlap "crypto/internal/subtle"
- "crypto/subtle"
- "encoding/binary"
- "errors"
-)
-
-// AEAD is a cipher mode providing authenticated encryption with associated
-// data. For a description of the methodology, see
-// https://en.wikipedia.org/wiki/Authenticated_encryption
-type AEAD interface {
- // NonceSize returns the size of the nonce that must be passed to Seal
- // and Open.
- NonceSize() int
-
- // Overhead returns the maximum difference between the lengths of a
- // plaintext and its ciphertext.
- Overhead() int
-
- // Seal encrypts and authenticates plaintext, authenticates the
- // additional data and appends the result to dst, returning the updated
- // slice. The nonce must be NonceSize() bytes long and unique for all
- // time, for a given key.
- //
- // To reuse plaintext's storage for the encrypted output, use plaintext[:0]
- // as dst. Otherwise, the remaining capacity of dst must not overlap plaintext.
- Seal(dst, nonce, plaintext, additionalData []byte) []byte
-
- // Open decrypts and authenticates ciphertext, authenticates the
- // additional data and, if successful, appends the resulting plaintext
- // to dst, returning the updated slice. The nonce must be NonceSize()
- // bytes long and both it and the additional data must match the
- // value passed to Seal.
- //
- // To reuse ciphertext's storage for the decrypted output, use ciphertext[:0]
- // as dst. Otherwise, the remaining capacity of dst must not overlap plaintext.
- //
- // Even if the function fails, the contents of dst, up to its capacity,
- // may be overwritten.
- Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error)
-}
-
-// gcmAble is an interface implemented by ciphers that have a specific optimized
-// implementation of GCM, like crypto/aes. NewGCM will check for this interface
-// and return the specific AEAD if found.
-type gcmAble interface {
- NewGCM(nonceSize, tagSize int) (AEAD, error)
-}
-
-// gcmFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM
-// standard and make binary.BigEndian suitable for marshaling these values, the
-// bits are stored in big endian order. For example:
-// the coefficient of x⁰ can be obtained by v.low >> 63.
-// the coefficient of x⁶³ can be obtained by v.low & 1.
-// the coefficient of x⁶⁴ can be obtained by v.high >> 63.
-// the coefficient of x¹²⁷ can be obtained by v.high & 1.
-type gcmFieldElement struct {
- low, high uint64
-}
-
-// gcm represents a Galois Counter Mode with a specific key. See
-// https://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
-type gcm struct {
- cipher Block
- nonceSize int
- tagSize int
- // productTable contains the first sixteen powers of the key, H.
- // However, they are in bit reversed order. See NewGCMWithNonceSize.
- productTable [16]gcmFieldElement
-}
-
-// NewGCM returns the given 128-bit, block cipher wrapped in Galois Counter Mode
-// with the standard nonce length.
-//
-// In general, the GHASH operation performed by this implementation of GCM is not constant-time.
-// An exception is when the underlying Block was created by aes.NewCipher
-// on systems with hardware support for AES. See the crypto/aes package documentation for details.
-func NewGCM(cipher Block) (AEAD, error) {
- return newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, gcmTagSize)
-}
-
-// NewGCMWithNonceSize returns the given 128-bit, block cipher wrapped in Galois
-// Counter Mode, which accepts nonces of the given length. The length must not
-// be zero.
-//
-// Only use this function if you require compatibility with an existing
-// cryptosystem that uses non-standard nonce lengths. All other users should use
-// NewGCM, which is faster and more resistant to misuse.
-func NewGCMWithNonceSize(cipher Block, size int) (AEAD, error) {
- return newGCMWithNonceAndTagSize(cipher, size, gcmTagSize)
-}
-
-// NewGCMWithTagSize returns the given 128-bit, block cipher wrapped in Galois
-// Counter Mode, which generates tags with the given length.
-//
-// Tag sizes between 12 and 16 bytes are allowed.
-//
-// Only use this function if you require compatibility with an existing
-// cryptosystem that uses non-standard tag lengths. All other users should use
-// NewGCM, which is more resistant to misuse.
-func NewGCMWithTagSize(cipher Block, tagSize int) (AEAD, error) {
- return newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, tagSize)
-}
-
-func newGCMWithNonceAndTagSize(cipher Block, nonceSize, tagSize int) (AEAD, error) {
- if tagSize < gcmMinimumTagSize || tagSize > gcmBlockSize {
- return nil, errors.New("cipher: incorrect tag size given to GCM")
- }
-
- if nonceSize <= 0 {
- return nil, errors.New("cipher: the nonce can't have zero length, or the security of the key will be immediately compromised")
- }
-
- if cipher, ok := cipher.(gcmAble); ok {
- return cipher.NewGCM(nonceSize, tagSize)
- }
-
- if cipher.BlockSize() != gcmBlockSize {
- return nil, errors.New("cipher: NewGCM requires 128-bit block cipher")
- }
-
- var key [gcmBlockSize]byte
- cipher.Encrypt(key[:], key[:])
-
- g := &gcm{cipher: cipher, nonceSize: nonceSize, tagSize: tagSize}
-
- // We precompute 16 multiples of |key|. However, when we do lookups
- // into this table we'll be using bits from a field element and
- // therefore the bits will be in the reverse order. So normally one
- // would expect, say, 4*key to be in index 4 of the table but due to
- // this bit ordering it will actually be in index 0010 (base 2) = 2.
- x := gcmFieldElement{
- binary.BigEndian.Uint64(key[:8]),
- binary.BigEndian.Uint64(key[8:]),
- }
- g.productTable[reverseBits(1)] = x
-
- for i := 2; i < 16; i += 2 {
- g.productTable[reverseBits(i)] = gcmDouble(&g.productTable[reverseBits(i/2)])
- g.productTable[reverseBits(i+1)] = gcmAdd(&g.productTable[reverseBits(i)], &x)
- }
-
- return g, nil
-}
-
-const (
- gcmBlockSize = 16
- gcmTagSize = 16
- gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes.
- gcmStandardNonceSize = 12
-)
-
-func (g *gcm) NonceSize() int {
- return g.nonceSize
-}
-
-func (g *gcm) Overhead() int {
- return g.tagSize
-}
-
-func (g *gcm) Seal(dst, nonce, plaintext, data []byte) []byte {
- if len(nonce) != g.nonceSize {
- panic("crypto/cipher: incorrect nonce length given to GCM")
- }
- if uint64(len(plaintext)) > ((1<<32)-2)*uint64(g.cipher.BlockSize()) {
- panic("crypto/cipher: message too large for GCM")
- }
-
- ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
- if subtleoverlap.InexactOverlap(out, plaintext) {
- panic("crypto/cipher: invalid buffer overlap")
- }
-
- var counter, tagMask [gcmBlockSize]byte
- g.deriveCounter(&counter, nonce)
-
- g.cipher.Encrypt(tagMask[:], counter[:])
- gcmInc32(&counter)
-
- g.counterCrypt(out, plaintext, &counter)
-
- var tag [gcmTagSize]byte
- g.auth(tag[:], out[:len(plaintext)], data, &tagMask)
- copy(out[len(plaintext):], tag[:])
-
- return ret
-}
-
-var errOpen = errors.New("cipher: message authentication failed")
-
-func (g *gcm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
- if len(nonce) != g.nonceSize {
- panic("crypto/cipher: incorrect nonce length given to GCM")
- }
- // Sanity check to prevent the authentication from always succeeding if an implementation
- // leaves tagSize uninitialized, for example.
- if g.tagSize < gcmMinimumTagSize {
- panic("crypto/cipher: incorrect GCM tag size")
- }
-
- if len(ciphertext) < g.tagSize {
- return nil, errOpen
- }
- if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(g.cipher.BlockSize())+uint64(g.tagSize) {
- return nil, errOpen
- }
-
- tag := ciphertext[len(ciphertext)-g.tagSize:]
- ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
-
- var counter, tagMask [gcmBlockSize]byte
- g.deriveCounter(&counter, nonce)
-
- g.cipher.Encrypt(tagMask[:], counter[:])
- gcmInc32(&counter)
-
- var expectedTag [gcmTagSize]byte
- g.auth(expectedTag[:], ciphertext, data, &tagMask)
-
- ret, out := sliceForAppend(dst, len(ciphertext))
- if subtleoverlap.InexactOverlap(out, ciphertext) {
- panic("crypto/cipher: invalid buffer overlap")
- }
-
- if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
- // The AESNI code decrypts and authenticates concurrently, and
- // so overwrites dst in the event of a tag mismatch. That
- // behavior is mimicked here in order to be consistent across
- // platforms.
- for i := range out {
- out[i] = 0
- }
- return nil, errOpen
- }
-
- g.counterCrypt(out, ciphertext, &counter)
-
- return ret, nil
-}
-
-// reverseBits reverses the order of the bits of 4-bit number in i.
-func reverseBits(i int) int {
- i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
- i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
- return i
-}
-
-// gcmAdd adds two elements of GF(2¹²⁸) and returns the sum.
-func gcmAdd(x, y *gcmFieldElement) gcmFieldElement {
- // Addition in a characteristic 2 field is just XOR.
- return gcmFieldElement{x.low ^ y.low, x.high ^ y.high}
-}
-
-// gcmDouble returns the result of doubling an element of GF(2¹²⁸).
-func gcmDouble(x *gcmFieldElement) (double gcmFieldElement) {
- msbSet := x.high&1 == 1
-
- // Because of the bit-ordering, doubling is actually a right shift.
- double.high = x.high >> 1
- double.high |= x.low << 63
- double.low = x.low >> 1
-
- // If the most-significant bit was set before shifting then it,
- // conceptually, becomes a term of x^128. This is greater than the
- // irreducible polynomial so the result has to be reduced. The
- // irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to
- // eliminate the term at x^128 which also means subtracting the other
- // four terms. In characteristic 2 fields, subtraction == addition ==
- // XOR.
- if msbSet {
- double.low ^= 0xe100000000000000
- }
-
- return
-}
-
-var gcmReductionTable = []uint16{
- 0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
- 0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
-}
-
-// mul sets y to y*H, where H is the GCM key, fixed during NewGCMWithNonceSize.
-func (g *gcm) mul(y *gcmFieldElement) {
- var z gcmFieldElement
-
- for i := 0; i < 2; i++ {
- word := y.high
- if i == 1 {
- word = y.low
- }
-
- // Multiplication works by multiplying z by 16 and adding in
- // one of the precomputed multiples of H.
- for j := 0; j < 64; j += 4 {
- msw := z.high & 0xf
- z.high >>= 4
- z.high |= z.low << 60
- z.low >>= 4
- z.low ^= uint64(gcmReductionTable[msw]) << 48
-
- // the values in |table| are ordered for
- // little-endian bit positions. See the comment
- // in NewGCMWithNonceSize.
- t := &g.productTable[word&0xf]
-
- z.low ^= t.low
- z.high ^= t.high
- word >>= 4
- }
- }
-
- *y = z
-}
-
-// updateBlocks extends y with more polynomial terms from blocks, based on
-// Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks.
-func (g *gcm) updateBlocks(y *gcmFieldElement, blocks []byte) {
- for len(blocks) > 0 {
- y.low ^= binary.BigEndian.Uint64(blocks)
- y.high ^= binary.BigEndian.Uint64(blocks[8:])
- g.mul(y)
- blocks = blocks[gcmBlockSize:]
- }
-}
-
-// update extends y with more polynomial terms from data. If data is not a
-// multiple of gcmBlockSize bytes long then the remainder is zero padded.
-func (g *gcm) update(y *gcmFieldElement, data []byte) {
- fullBlocks := (len(data) >> 4) << 4
- g.updateBlocks(y, data[:fullBlocks])
-
- if len(data) != fullBlocks {
- var partialBlock [gcmBlockSize]byte
- copy(partialBlock[:], data[fullBlocks:])
- g.updateBlocks(y, partialBlock[:])
- }
-}
-
-// gcmInc32 treats the final four bytes of counterBlock as a big-endian value
-// and increments it.
-func gcmInc32(counterBlock *[16]byte) {
- ctr := counterBlock[len(counterBlock)-4:]
- binary.BigEndian.PutUint32(ctr, binary.BigEndian.Uint32(ctr)+1)
-}
-
-// sliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// counterCrypt crypts in to out using g.cipher in counter mode.
-func (g *gcm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) {
- var mask [gcmBlockSize]byte
-
- for len(in) >= gcmBlockSize {
- g.cipher.Encrypt(mask[:], counter[:])
- gcmInc32(counter)
-
- xorWords(out, in, mask[:])
- out = out[gcmBlockSize:]
- in = in[gcmBlockSize:]
- }
-
- if len(in) > 0 {
- g.cipher.Encrypt(mask[:], counter[:])
- gcmInc32(counter)
- xorBytes(out, in, mask[:])
- }
-}
-
-// deriveCounter computes the initial GCM counter state from the given nonce.
-// See NIST SP 800-38D, section 7.1. This assumes that counter is filled with
-// zeros on entry.
-func (g *gcm) deriveCounter(counter *[gcmBlockSize]byte, nonce []byte) {
- // GCM has two modes of operation with respect to the initial counter
- // state: a "fast path" for 96-bit (12-byte) nonces, and a "slow path"
- // for nonces of other lengths. For a 96-bit nonce, the nonce, along
- // with a four-byte big-endian counter starting at one, is used
- // directly as the starting counter. For other nonce sizes, the counter
- // is computed by passing it through the GHASH function.
- if len(nonce) == gcmStandardNonceSize {
- copy(counter[:], nonce)
- counter[gcmBlockSize-1] = 1
- } else {
- var y gcmFieldElement
- g.update(&y, nonce)
- y.high ^= uint64(len(nonce)) * 8
- g.mul(&y)
- binary.BigEndian.PutUint64(counter[:8], y.low)
- binary.BigEndian.PutUint64(counter[8:], y.high)
- }
-}
-
-// auth calculates GHASH(ciphertext, additionalData), masks the result with
-// tagMask and writes the result to out.
-func (g *gcm) auth(out, ciphertext, additionalData []byte, tagMask *[gcmTagSize]byte) {
- var y gcmFieldElement
- g.update(&y, additionalData)
- g.update(&y, ciphertext)
-
- y.low ^= uint64(len(additionalData)) * 8
- y.high ^= uint64(len(ciphertext)) * 8
-
- g.mul(&y)
-
- binary.BigEndian.PutUint64(out, y.low)
- binary.BigEndian.PutUint64(out[8:], y.high)
-
- xorWords(out, out, tagMask[:])
-}
diff --git a/contrib/go/_std_1.18/src/crypto/crypto.go b/contrib/go/_std_1.18/src/crypto/crypto.go
deleted file mode 100644
index fe1c0690bc..0000000000
--- a/contrib/go/_std_1.18/src/crypto/crypto.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package crypto collects common cryptographic constants.
-package crypto
-
-import (
- "hash"
- "io"
- "strconv"
-)
-
-// Hash identifies a cryptographic hash function that is implemented in another
-// package.
-type Hash uint
-
-// HashFunc simply returns the value of h so that Hash implements SignerOpts.
-func (h Hash) HashFunc() Hash {
- return h
-}
-
-func (h Hash) String() string {
- switch h {
- case MD4:
- return "MD4"
- case MD5:
- return "MD5"
- case SHA1:
- return "SHA-1"
- case SHA224:
- return "SHA-224"
- case SHA256:
- return "SHA-256"
- case SHA384:
- return "SHA-384"
- case SHA512:
- return "SHA-512"
- case MD5SHA1:
- return "MD5+SHA1"
- case RIPEMD160:
- return "RIPEMD-160"
- case SHA3_224:
- return "SHA3-224"
- case SHA3_256:
- return "SHA3-256"
- case SHA3_384:
- return "SHA3-384"
- case SHA3_512:
- return "SHA3-512"
- case SHA512_224:
- return "SHA-512/224"
- case SHA512_256:
- return "SHA-512/256"
- case BLAKE2s_256:
- return "BLAKE2s-256"
- case BLAKE2b_256:
- return "BLAKE2b-256"
- case BLAKE2b_384:
- return "BLAKE2b-384"
- case BLAKE2b_512:
- return "BLAKE2b-512"
- default:
- return "unknown hash value " + strconv.Itoa(int(h))
- }
-}
-
-const (
- MD4 Hash = 1 + iota // import golang.org/x/crypto/md4
- MD5 // import crypto/md5
- SHA1 // import crypto/sha1
- SHA224 // import crypto/sha256
- SHA256 // import crypto/sha256
- SHA384 // import crypto/sha512
- SHA512 // import crypto/sha512
- MD5SHA1 // no implementation; MD5+SHA1 used for TLS RSA
- RIPEMD160 // import golang.org/x/crypto/ripemd160
- SHA3_224 // import golang.org/x/crypto/sha3
- SHA3_256 // import golang.org/x/crypto/sha3
- SHA3_384 // import golang.org/x/crypto/sha3
- SHA3_512 // import golang.org/x/crypto/sha3
- SHA512_224 // import crypto/sha512
- SHA512_256 // import crypto/sha512
- BLAKE2s_256 // import golang.org/x/crypto/blake2s
- BLAKE2b_256 // import golang.org/x/crypto/blake2b
- BLAKE2b_384 // import golang.org/x/crypto/blake2b
- BLAKE2b_512 // import golang.org/x/crypto/blake2b
- maxHash
-)
-
-var digestSizes = []uint8{
- MD4: 16,
- MD5: 16,
- SHA1: 20,
- SHA224: 28,
- SHA256: 32,
- SHA384: 48,
- SHA512: 64,
- SHA512_224: 28,
- SHA512_256: 32,
- SHA3_224: 28,
- SHA3_256: 32,
- SHA3_384: 48,
- SHA3_512: 64,
- MD5SHA1: 36,
- RIPEMD160: 20,
- BLAKE2s_256: 32,
- BLAKE2b_256: 32,
- BLAKE2b_384: 48,
- BLAKE2b_512: 64,
-}
-
-// Size returns the length, in bytes, of a digest resulting from the given hash
-// function. It doesn't require that the hash function in question be linked
-// into the program.
-func (h Hash) Size() int {
- if h > 0 && h < maxHash {
- return int(digestSizes[h])
- }
- panic("crypto: Size of unknown hash function")
-}
-
-var hashes = make([]func() hash.Hash, maxHash)
-
-// New returns a new hash.Hash calculating the given hash function. New panics
-// if the hash function is not linked into the binary.
-func (h Hash) New() hash.Hash {
- if h > 0 && h < maxHash {
- f := hashes[h]
- if f != nil {
- return f()
- }
- }
- panic("crypto: requested hash function #" + strconv.Itoa(int(h)) + " is unavailable")
-}
-
-// Available reports whether the given hash function is linked into the binary.
-func (h Hash) Available() bool {
- return h < maxHash && hashes[h] != nil
-}
-
-// RegisterHash registers a function that returns a new instance of the given
-// hash function. This is intended to be called from the init function in
-// packages that implement hash functions.
-func RegisterHash(h Hash, f func() hash.Hash) {
- if h >= maxHash {
- panic("crypto: RegisterHash of unknown hash function")
- }
- hashes[h] = f
-}
-
-// PublicKey represents a public key using an unspecified algorithm.
-//
-// Although this type is an empty interface for backwards compatibility reasons,
-// all public key types in the standard library implement the following interface
-//
-// interface{
-// Equal(x crypto.PublicKey) bool
-// }
-//
-// which can be used for increased type safety within applications.
-type PublicKey any
-
-// PrivateKey represents a private key using an unspecified algorithm.
-//
-// Although this type is an empty interface for backwards compatibility reasons,
-// all private key types in the standard library implement the following interface
-//
-// interface{
-// Public() crypto.PublicKey
-// Equal(x crypto.PrivateKey) bool
-// }
-//
-// as well as purpose-specific interfaces such as Signer and Decrypter, which
-// can be used for increased type safety within applications.
-type PrivateKey any
-
-// Signer is an interface for an opaque private key that can be used for
-// signing operations. For example, an RSA key kept in a hardware module.
-type Signer interface {
- // Public returns the public key corresponding to the opaque,
- // private key.
- Public() PublicKey
-
- // Sign signs digest with the private key, possibly using entropy from
- // rand. For an RSA key, the resulting signature should be either a
- // PKCS #1 v1.5 or PSS signature (as indicated by opts). For an (EC)DSA
- // key, it should be a DER-serialised, ASN.1 signature structure.
- //
- // Hash implements the SignerOpts interface and, in most cases, one can
- // simply pass in the hash function used as opts. Sign may also attempt
- // to type assert opts to other types in order to obtain algorithm
- // specific values. See the documentation in each package for details.
- //
- // Note that when a signature of a hash of a larger message is needed,
- // the caller is responsible for hashing the larger message and passing
- // the hash (as digest) and the hash function (as opts) to Sign.
- Sign(rand io.Reader, digest []byte, opts SignerOpts) (signature []byte, err error)
-}
-
-// SignerOpts contains options for signing with a Signer.
-type SignerOpts interface {
- // HashFunc returns an identifier for the hash function used to produce
- // the message passed to Signer.Sign, or else zero to indicate that no
- // hashing was done.
- HashFunc() Hash
-}
-
-// Decrypter is an interface for an opaque private key that can be used for
-// asymmetric decryption operations. An example would be an RSA key
-// kept in a hardware module.
-type Decrypter interface {
- // Public returns the public key corresponding to the opaque,
- // private key.
- Public() PublicKey
-
- // Decrypt decrypts msg. The opts argument should be appropriate for
- // the primitive used. See the documentation in each implementation for
- // details.
- Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error)
-}
-
-type DecrypterOpts any
diff --git a/contrib/go/_std_1.18/src/crypto/des/block.go b/contrib/go/_std_1.18/src/crypto/des/block.go
deleted file mode 100644
index cc2888e2c7..0000000000
--- a/contrib/go/_std_1.18/src/crypto/des/block.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package des
-
-import (
- "encoding/binary"
- "sync"
-)
-
-func cryptBlock(subkeys []uint64, dst, src []byte, decrypt bool) {
- b := binary.BigEndian.Uint64(src)
- b = permuteInitialBlock(b)
- left, right := uint32(b>>32), uint32(b)
-
- left = (left << 1) | (left >> 31)
- right = (right << 1) | (right >> 31)
-
- if decrypt {
- for i := 0; i < 8; i++ {
- left, right = feistel(left, right, subkeys[15-2*i], subkeys[15-(2*i+1)])
- }
- } else {
- for i := 0; i < 8; i++ {
- left, right = feistel(left, right, subkeys[2*i], subkeys[2*i+1])
- }
- }
-
- left = (left << 31) | (left >> 1)
- right = (right << 31) | (right >> 1)
-
- // switch left & right and perform final permutation
- preOutput := (uint64(right) << 32) | uint64(left)
- binary.BigEndian.PutUint64(dst, permuteFinalBlock(preOutput))
-}
-
-// Encrypt one block from src into dst, using the subkeys.
-func encryptBlock(subkeys []uint64, dst, src []byte) {
- cryptBlock(subkeys, dst, src, false)
-}
-
-// Decrypt one block from src into dst, using the subkeys.
-func decryptBlock(subkeys []uint64, dst, src []byte) {
- cryptBlock(subkeys, dst, src, true)
-}
-
-// DES Feistel function. feistelBox must be initialized via
-// feistelBoxOnce.Do(initFeistelBox) first.
-func feistel(l, r uint32, k0, k1 uint64) (lout, rout uint32) {
- var t uint32
-
- t = r ^ uint32(k0>>32)
- l ^= feistelBox[7][t&0x3f] ^
- feistelBox[5][(t>>8)&0x3f] ^
- feistelBox[3][(t>>16)&0x3f] ^
- feistelBox[1][(t>>24)&0x3f]
-
- t = ((r << 28) | (r >> 4)) ^ uint32(k0)
- l ^= feistelBox[6][(t)&0x3f] ^
- feistelBox[4][(t>>8)&0x3f] ^
- feistelBox[2][(t>>16)&0x3f] ^
- feistelBox[0][(t>>24)&0x3f]
-
- t = l ^ uint32(k1>>32)
- r ^= feistelBox[7][t&0x3f] ^
- feistelBox[5][(t>>8)&0x3f] ^
- feistelBox[3][(t>>16)&0x3f] ^
- feistelBox[1][(t>>24)&0x3f]
-
- t = ((l << 28) | (l >> 4)) ^ uint32(k1)
- r ^= feistelBox[6][(t)&0x3f] ^
- feistelBox[4][(t>>8)&0x3f] ^
- feistelBox[2][(t>>16)&0x3f] ^
- feistelBox[0][(t>>24)&0x3f]
-
- return l, r
-}
-
-// feistelBox[s][16*i+j] contains the output of permutationFunction
-// for sBoxes[s][i][j] << 4*(7-s)
-var feistelBox [8][64]uint32
-
-var feistelBoxOnce sync.Once
-
-// general purpose function to perform DES block permutations
-func permuteBlock(src uint64, permutation []uint8) (block uint64) {
- for position, n := range permutation {
- bit := (src >> n) & 1
- block |= bit << uint((len(permutation)-1)-position)
- }
- return
-}
-
-func initFeistelBox() {
- for s := range sBoxes {
- for i := 0; i < 4; i++ {
- for j := 0; j < 16; j++ {
- f := uint64(sBoxes[s][i][j]) << (4 * (7 - uint(s)))
- f = permuteBlock(f, permutationFunction[:])
-
- // Row is determined by the 1st and 6th bit.
- // Column is the middle four bits.
- row := uint8(((i & 2) << 4) | i&1)
- col := uint8(j << 1)
- t := row | col
-
- // The rotation was performed in the feistel rounds, being factored out and now mixed into the feistelBox.
- f = (f << 1) | (f >> 31)
-
- feistelBox[s][t] = uint32(f)
- }
- }
- }
-}
-
-// permuteInitialBlock is equivalent to the permutation defined
-// by initialPermutation.
-func permuteInitialBlock(block uint64) uint64 {
- // block = b7 b6 b5 b4 b3 b2 b1 b0 (8 bytes)
- b1 := block >> 48
- b2 := block << 48
- block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
-
- // block = b1 b0 b5 b4 b3 b2 b7 b6
- b1 = block >> 32 & 0xff00ff
- b2 = (block & 0xff00ff00)
- block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24 // exchange b0 b4 with b3 b7
-
- // block is now b1 b3 b5 b7 b0 b2 b4 b6, the permutation:
- // ... 8
- // ... 24
- // ... 40
- // ... 56
- // 7 6 5 4 3 2 1 0
- // 23 22 21 20 19 18 17 16
- // ... 32
- // ... 48
-
- // exchange 4,5,6,7 with 32,33,34,35 etc.
- b1 = block & 0x0f0f00000f0f0000
- b2 = block & 0x0000f0f00000f0f0
- block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
-
- // block is the permutation:
- //
- // [+8] [+40]
- //
- // 7 6 5 4
- // 23 22 21 20
- // 3 2 1 0
- // 19 18 17 16 [+32]
-
- // exchange 0,1,4,5 with 18,19,22,23
- b1 = block & 0x3300330033003300
- b2 = block & 0x00cc00cc00cc00cc
- block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
-
- // block is the permutation:
- // 15 14
- // 13 12
- // 11 10
- // 9 8
- // 7 6
- // 5 4
- // 3 2
- // 1 0 [+16] [+32] [+64]
-
- // exchange 0,2,4,6 with 9,11,13,15:
- b1 = block & 0xaaaaaaaa55555555
- block ^= b1 ^ b1>>33 ^ b1<<33
-
- // block is the permutation:
- // 6 14 22 30 38 46 54 62
- // 4 12 20 28 36 44 52 60
- // 2 10 18 26 34 42 50 58
- // 0 8 16 24 32 40 48 56
- // 7 15 23 31 39 47 55 63
- // 5 13 21 29 37 45 53 61
- // 3 11 19 27 35 43 51 59
- // 1 9 17 25 33 41 49 57
- return block
-}
-
-// permuteInitialBlock is equivalent to the permutation defined
-// by finalPermutation.
-func permuteFinalBlock(block uint64) uint64 {
- // Perform the same bit exchanges as permuteInitialBlock
- // but in reverse order.
- b1 := block & 0xaaaaaaaa55555555
- block ^= b1 ^ b1>>33 ^ b1<<33
-
- b1 = block & 0x3300330033003300
- b2 := block & 0x00cc00cc00cc00cc
- block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
-
- b1 = block & 0x0f0f00000f0f0000
- b2 = block & 0x0000f0f00000f0f0
- block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
-
- b1 = block >> 32 & 0xff00ff
- b2 = (block & 0xff00ff00)
- block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24
-
- b1 = block >> 48
- b2 = block << 48
- block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
- return block
-}
-
-// creates 16 28-bit blocks rotated according
-// to the rotation schedule
-func ksRotate(in uint32) (out []uint32) {
- out = make([]uint32, 16)
- last := in
- for i := 0; i < 16; i++ {
- // 28-bit circular left shift
- left := (last << (4 + ksRotations[i])) >> 4
- right := (last << 4) >> (32 - ksRotations[i])
- out[i] = left | right
- last = out[i]
- }
- return
-}
-
-// creates 16 56-bit subkeys from the original key
-func (c *desCipher) generateSubkeys(keyBytes []byte) {
- feistelBoxOnce.Do(initFeistelBox)
-
- // apply PC1 permutation to key
- key := binary.BigEndian.Uint64(keyBytes)
- permutedKey := permuteBlock(key, permutedChoice1[:])
-
- // rotate halves of permuted key according to the rotation schedule
- leftRotations := ksRotate(uint32(permutedKey >> 28))
- rightRotations := ksRotate(uint32(permutedKey<<4) >> 4)
-
- // generate subkeys
- for i := 0; i < 16; i++ {
- // combine halves to form 56-bit input to PC2
- pc2Input := uint64(leftRotations[i])<<28 | uint64(rightRotations[i])
- // apply PC2 permutation to 7 byte input
- c.subkeys[i] = unpack(permuteBlock(pc2Input, permutedChoice2[:]))
- }
-}
-
-// Expand 48-bit input to 64-bit, with each 6-bit block padded by extra two bits at the top.
-// By doing so, we can have the input blocks (four bits each), and the key blocks (six bits each) well-aligned without
-// extra shifts/rotations for alignments.
-func unpack(x uint64) uint64 {
- var result uint64
-
- result = ((x>>(6*1))&0xff)<<(8*0) |
- ((x>>(6*3))&0xff)<<(8*1) |
- ((x>>(6*5))&0xff)<<(8*2) |
- ((x>>(6*7))&0xff)<<(8*3) |
- ((x>>(6*0))&0xff)<<(8*4) |
- ((x>>(6*2))&0xff)<<(8*5) |
- ((x>>(6*4))&0xff)<<(8*6) |
- ((x>>(6*6))&0xff)<<(8*7)
-
- return result
-}
diff --git a/contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa.go b/contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa.go
deleted file mode 100644
index 9f9a09a884..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as
-// defined in FIPS 186-4 and SEC 1, Version 2.0.
-//
-// Signatures generated by this package are not deterministic, but entropy is
-// mixed with the private key and the message, achieving the same level of
-// security in case of randomness source failure.
-package ecdsa
-
-// [FIPS 186-4] references ANSI X9.62-2005 for the bulk of the ECDSA algorithm.
-// That standard is not freely available, which is a problem in an open source
-// implementation, because not only the implementer, but also any maintainer,
-// contributor, reviewer, auditor, and learner needs access to it. Instead, this
-// package references and follows the equivalent [SEC 1, Version 2.0].
-//
-// [FIPS 186-4]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
-// [SEC 1, Version 2.0]: https://www.secg.org/sec1-v2.pdf
-
-import (
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/elliptic"
- "crypto/internal/randutil"
- "crypto/sha512"
- "errors"
- "io"
- "math/big"
-
- "golang.org/x/crypto/cryptobyte"
- "golang.org/x/crypto/cryptobyte/asn1"
-)
-
-// A invertible implements fast inverse in GF(N).
-type invertible interface {
- // Inverse returns the inverse of k mod Params().N.
- Inverse(k *big.Int) *big.Int
-}
-
-// A combinedMult implements fast combined multiplication for verification.
-type combinedMult interface {
- // CombinedMult returns [s1]G + [s2]P where G is the generator.
- CombinedMult(Px, Py *big.Int, s1, s2 []byte) (x, y *big.Int)
-}
-
-const (
- aesIV = "IV for ECDSA CTR"
-)
-
-// PublicKey represents an ECDSA public key.
-type PublicKey struct {
- elliptic.Curve
- X, Y *big.Int
-}
-
-// Any methods implemented on PublicKey might need to also be implemented on
-// PrivateKey, as the latter embeds the former and will expose its methods.
-
-// Equal reports whether pub and x have the same value.
-//
-// Two keys are only considered to have the same value if they have the same Curve value.
-// Note that for example elliptic.P256() and elliptic.P256().Params() are different
-// values, as the latter is a generic not constant time implementation.
-func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(*PublicKey)
- if !ok {
- return false
- }
- return pub.X.Cmp(xx.X) == 0 && pub.Y.Cmp(xx.Y) == 0 &&
- // Standard library Curve implementations are singletons, so this check
- // will work for those. Other Curves might be equivalent even if not
- // singletons, but there is no definitive way to check for that, and
- // better to err on the side of safety.
- pub.Curve == xx.Curve
-}
-
-// PrivateKey represents an ECDSA private key.
-type PrivateKey struct {
- PublicKey
- D *big.Int
-}
-
-// Public returns the public key corresponding to priv.
-func (priv *PrivateKey) Public() crypto.PublicKey {
- return &priv.PublicKey
-}
-
-// Equal reports whether priv and x have the same value.
-//
-// See PublicKey.Equal for details on how Curve is compared.
-func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(*PrivateKey)
- if !ok {
- return false
- }
- return priv.PublicKey.Equal(&xx.PublicKey) && priv.D.Cmp(xx.D) == 0
-}
-
-// Sign signs digest with priv, reading randomness from rand. The opts argument
-// is not currently used but, in keeping with the crypto.Signer interface,
-// should be the hash function used to digest the message.
-//
-// This method implements crypto.Signer, which is an interface to support keys
-// where the private part is kept in, for example, a hardware module. Common
-// uses can use the SignASN1 function in this package directly.
-func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
- r, s, err := Sign(rand, priv, digest)
- if err != nil {
- return nil, err
- }
-
- var b cryptobyte.Builder
- b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
- b.AddASN1BigInt(r)
- b.AddASN1BigInt(s)
- })
- return b.Bytes()
-}
-
-var one = new(big.Int).SetInt64(1)
-
-// randFieldElement returns a random element of the order of the given
-// curve using the procedure given in FIPS 186-4, Appendix B.5.1.
-func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
- params := c.Params()
- // Note that for P-521 this will actually be 63 bits more than the order, as
- // division rounds down, but the extra bit is inconsequential.
- b := make([]byte, params.BitSize/8+8) // TODO: use params.N.BitLen()
- _, err = io.ReadFull(rand, b)
- if err != nil {
- return
- }
-
- k = new(big.Int).SetBytes(b)
- n := new(big.Int).Sub(params.N, one)
- k.Mod(k, n)
- k.Add(k, one)
- return
-}
-
-// GenerateKey generates a public and private key pair.
-func GenerateKey(c elliptic.Curve, rand io.Reader) (*PrivateKey, error) {
- k, err := randFieldElement(c, rand)
- if err != nil {
- return nil, err
- }
-
- priv := new(PrivateKey)
- priv.PublicKey.Curve = c
- priv.D = k
- priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
- return priv, nil
-}
-
-// hashToInt converts a hash value to an integer. Per FIPS 186-4, Section 6.4,
-// we use the left-most bits of the hash to match the bit-length of the order of
-// the curve. This also performs Step 5 of SEC 1, Version 2.0, Section 4.1.3.
-func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
- orderBits := c.Params().N.BitLen()
- orderBytes := (orderBits + 7) / 8
- if len(hash) > orderBytes {
- hash = hash[:orderBytes]
- }
-
- ret := new(big.Int).SetBytes(hash)
- excess := len(hash)*8 - orderBits
- if excess > 0 {
- ret.Rsh(ret, uint(excess))
- }
- return ret
-}
-
-// fermatInverse calculates the inverse of k in GF(P) using Fermat's method
-// (exponentiation modulo P - 2, per Euler's theorem). This has better
-// constant-time properties than Euclid's method (implemented in
-// math/big.Int.ModInverse and FIPS 186-4, Appendix C.1) although math/big
-// itself isn't strictly constant-time so it's not perfect.
-func fermatInverse(k, N *big.Int) *big.Int {
- two := big.NewInt(2)
- nMinus2 := new(big.Int).Sub(N, two)
- return new(big.Int).Exp(k, nMinus2, N)
-}
-
-var errZeroParam = errors.New("zero parameter")
-
-// Sign signs a hash (which should be the result of hashing a larger message)
-// using the private key, priv. If the hash is longer than the bit-length of the
-// private key's curve order, the hash will be truncated to that length. It
-// returns the signature as a pair of integers. Most applications should use
-// SignASN1 instead of dealing directly with r, s.
-func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
- randutil.MaybeReadByte(rand)
-
- // This implementation derives the nonce from an AES-CTR CSPRNG keyed by:
- //
- // SHA2-512(priv.D || entropy || hash)[:32]
- //
- // The CSPRNG key is indifferentiable from a random oracle as shown in
- // [Coron], the AES-CTR stream is indifferentiable from a random oracle
- // under standard cryptographic assumptions (see [Larsson] for examples).
- //
- // [Coron]: https://cs.nyu.edu/~dodis/ps/merkle.pdf
- // [Larsson]: https://web.archive.org/web/20040719170906/https://www.nada.kth.se/kurser/kth/2D1441/semteo03/lecturenotes/assump.pdf
-
- // Get 256 bits of entropy from rand.
- entropy := make([]byte, 32)
- _, err = io.ReadFull(rand, entropy)
- if err != nil {
- return
- }
-
- // Initialize an SHA-512 hash context; digest...
- md := sha512.New()
- md.Write(priv.D.Bytes()) // the private key,
- md.Write(entropy) // the entropy,
- md.Write(hash) // and the input hash;
- key := md.Sum(nil)[:32] // and compute ChopMD-256(SHA-512),
- // which is an indifferentiable MAC.
-
- // Create an AES-CTR instance to use as a CSPRNG.
- block, err := aes.NewCipher(key)
- if err != nil {
- return nil, nil, err
- }
-
- // Create a CSPRNG that xors a stream of zeros with
- // the output of the AES-CTR instance.
- csprng := cipher.StreamReader{
- R: zeroReader,
- S: cipher.NewCTR(block, []byte(aesIV)),
- }
-
- c := priv.PublicKey.Curve
- return sign(priv, &csprng, c, hash)
-}
-
-func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash []byte) (r, s *big.Int, err error) {
- // SEC 1, Version 2.0, Section 4.1.3
- N := c.Params().N
- if N.Sign() == 0 {
- return nil, nil, errZeroParam
- }
- var k, kInv *big.Int
- for {
- for {
- k, err = randFieldElement(c, *csprng)
- if err != nil {
- r = nil
- return
- }
-
- if in, ok := priv.Curve.(invertible); ok {
- kInv = in.Inverse(k)
- } else {
- kInv = fermatInverse(k, N) // N != 0
- }
-
- r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
- r.Mod(r, N)
- if r.Sign() != 0 {
- break
- }
- }
-
- e := hashToInt(hash, c)
- s = new(big.Int).Mul(priv.D, r)
- s.Add(s, e)
- s.Mul(s, kInv)
- s.Mod(s, N) // N != 0
- if s.Sign() != 0 {
- break
- }
- }
-
- return
-}
-
-// SignASN1 signs a hash (which should be the result of hashing a larger message)
-// using the private key, priv. If the hash is longer than the bit-length of the
-// private key's curve order, the hash will be truncated to that length. It
-// returns the ASN.1 encoded signature.
-func SignASN1(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error) {
- return priv.Sign(rand, hash, nil)
-}
-
-// Verify verifies the signature in r, s of hash using the public key, pub. Its
-// return value records whether the signature is valid. Most applications should
-// use VerifyASN1 instead of dealing directly with r, s.
-func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
- c := pub.Curve
- N := c.Params().N
-
- if r.Sign() <= 0 || s.Sign() <= 0 {
- return false
- }
- if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
- return false
- }
- return verify(pub, c, hash, r, s)
-}
-
-func verifyGeneric(pub *PublicKey, c elliptic.Curve, hash []byte, r, s *big.Int) bool {
- // SEC 1, Version 2.0, Section 4.1.4
- e := hashToInt(hash, c)
- var w *big.Int
- N := c.Params().N
- if in, ok := c.(invertible); ok {
- w = in.Inverse(s)
- } else {
- w = new(big.Int).ModInverse(s, N)
- }
-
- u1 := e.Mul(e, w)
- u1.Mod(u1, N)
- u2 := w.Mul(r, w)
- u2.Mod(u2, N)
-
- // Check if implements S1*g + S2*p
- var x, y *big.Int
- if opt, ok := c.(combinedMult); ok {
- x, y = opt.CombinedMult(pub.X, pub.Y, u1.Bytes(), u2.Bytes())
- } else {
- x1, y1 := c.ScalarBaseMult(u1.Bytes())
- x2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes())
- x, y = c.Add(x1, y1, x2, y2)
- }
-
- if x.Sign() == 0 && y.Sign() == 0 {
- return false
- }
- x.Mod(x, N)
- return x.Cmp(r) == 0
-}
-
-// VerifyASN1 verifies the ASN.1 encoded signature, sig, of hash using the
-// public key, pub. Its return value records whether the signature is valid.
-func VerifyASN1(pub *PublicKey, hash, sig []byte) bool {
- var (
- r, s = &big.Int{}, &big.Int{}
- inner cryptobyte.String
- )
- input := cryptobyte.String(sig)
- if !input.ReadASN1(&inner, asn1.SEQUENCE) ||
- !input.Empty() ||
- !inner.ReadASN1Integer(r) ||
- !inner.ReadASN1Integer(s) ||
- !inner.Empty() {
- return false
- }
- return Verify(pub, hash, r, s)
-}
-
-type zr struct {
- io.Reader
-}
-
-// Read replaces the contents of dst with zeros.
-func (z *zr) Read(dst []byte) (n int, err error) {
- for i := range dst {
- dst[i] = 0
- }
- return len(dst), nil
-}
-
-var zeroReader = &zr{}
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/ed25519.go b/contrib/go/_std_1.18/src/crypto/ed25519/ed25519.go
deleted file mode 100644
index 09c5269d0c..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/ed25519.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
-//
-// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
-// representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the RFC
-// 8032 private key as the “seed”.
-package ed25519
-
-import (
- "bytes"
- "crypto"
- "crypto/ed25519/internal/edwards25519"
- cryptorand "crypto/rand"
- "crypto/sha512"
- "errors"
- "io"
- "strconv"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey []byte
-
-// Any methods implemented on PublicKey might need to also be implemented on
-// PrivateKey, as the latter embeds the former and will expose its methods.
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- if !ok {
- return false
- }
- return bytes.Equal(pub, xx)
-}
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, priv[32:])
- return PublicKey(publicKey)
-}
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- if !ok {
- return false
- }
- return bytes.Equal(priv, xx)
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:32])
- return seed
-}
-
-// Sign signs the given message with priv.
-// Ed25519 performs two passes over messages to be signed and therefore cannot
-// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
-// indicate the message hasn't been hashed. This can be achieved by passing
-// crypto.Hash(0) as the value for opts.
-func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
- if opts.HashFunc() != crypto.Hash(0) {
- return nil, errors.New("ed25519: cannot sign hashed message")
- }
-
- return Sign(priv, message), nil
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptorand.Reader
- }
-
- seed := make([]byte, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, privateKey[32:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- // Outline the function body so that the returned key can be stack-allocated.
- privateKey := make([]byte, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed25519: bad seed length: " + strconv.Itoa(l))
- }
-
- h := sha512.Sum512(seed)
- s := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
- A := (&edwards25519.Point{}).ScalarBaseMult(s)
-
- publicKey := A.Bytes()
-
- copy(privateKey, seed)
- copy(privateKey[32:], publicKey)
-}
-
-// Sign signs the message with privateKey and returns a signature. It will
-// panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- // Outline the function body so that the returned signature can be
- // stack-allocated.
- signature := make([]byte, SignatureSize)
- sign(signature, privateKey, message)
- return signature
-}
-
-func sign(signature, privateKey, message []byte) {
- if l := len(privateKey); l != PrivateKeySize {
- panic("ed25519: bad private key length: " + strconv.Itoa(l))
- }
- seed, publicKey := privateKey[:SeedSize], privateKey[SeedSize:]
-
- h := sha512.Sum512(seed)
- s := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
- prefix := h[32:]
-
- mh := sha512.New()
- mh.Write(prefix)
- mh.Write(message)
- messageDigest := make([]byte, 0, sha512.Size)
- messageDigest = mh.Sum(messageDigest)
- r := edwards25519.NewScalar().SetUniformBytes(messageDigest)
-
- R := (&edwards25519.Point{}).ScalarBaseMult(r)
-
- kh := sha512.New()
- kh.Write(R.Bytes())
- kh.Write(publicKey)
- kh.Write(message)
- hramDigest := make([]byte, 0, sha512.Size)
- hramDigest = kh.Sum(hramDigest)
- k := edwards25519.NewScalar().SetUniformBytes(hramDigest)
-
- S := edwards25519.NewScalar().MultiplyAdd(k, s, r)
-
- copy(signature[:32], R.Bytes())
- copy(signature[32:], S.Bytes())
-}
-
-// Verify reports whether sig is a valid signature of message by publicKey. It
-// will panic if len(publicKey) is not PublicKeySize.
-func Verify(publicKey PublicKey, message, sig []byte) bool {
- if l := len(publicKey); l != PublicKeySize {
- panic("ed25519: bad public key length: " + strconv.Itoa(l))
- }
-
- if len(sig) != SignatureSize || sig[63]&224 != 0 {
- return false
- }
-
- A, err := (&edwards25519.Point{}).SetBytes(publicKey)
- if err != nil {
- return false
- }
-
- kh := sha512.New()
- kh.Write(sig[:32])
- kh.Write(publicKey)
- kh.Write(message)
- hramDigest := make([]byte, 0, sha512.Size)
- hramDigest = kh.Sum(hramDigest)
- k := edwards25519.NewScalar().SetUniformBytes(hramDigest)
-
- S, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:])
- if err != nil {
- return false
- }
-
- // [S]B = R + [k]A --> [k](-A) + [S]B = R
- minusA := (&edwards25519.Point{}).Negate(A)
- R := (&edwards25519.Point{}).VarTimeDoubleScalarBaseMult(k, minusA, S)
-
- return bytes.Equal(sig[:32], R.Bytes())
-}
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/doc.go b/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/doc.go
deleted file mode 100644
index ff31cd23da..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package edwards25519 implements group logic for the twisted Edwards curve
-//
-// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
-//
-// This is better known as the Edwards curve equivalent to Curve25519, and is
-// the curve used by the Ed25519 signature scheme.
-//
-// Most users don't need this package, and should instead use crypto/ed25519 for
-// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
-// github.com/gtank/ristretto255 for prime order group logic.
-//
-// However, developers who do need to interact with low-level edwards25519
-// operations can use filippo.io/edwards25519, an extended version of this
-// package repackaged as an importable module.
-//
-// (Note that filippo.io/edwards25519 and github.com/gtank/ristretto255 are not
-// maintained by the Go team and are not covered by the Go 1 Compatibility Promise.)
-package edwards25519
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/edwards25519.go b/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/edwards25519.go
deleted file mode 100644
index 313e6c281c..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/edwards25519.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import (
- "crypto/ed25519/internal/edwards25519/field"
- "errors"
-)
-
-// Point types.
-
-type projP1xP1 struct {
- X, Y, Z, T field.Element
-}
-
-type projP2 struct {
- X, Y, Z field.Element
-}
-
-// Point represents a point on the edwards25519 curve.
-//
-// This type works similarly to math/big.Int, and all arguments and receivers
-// are allowed to alias.
-//
-// The zero value is NOT valid, and it may be used only as a receiver.
-type Point struct {
- // The point is internally represented in extended coordinates (X, Y, Z, T)
- // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
- x, y, z, t field.Element
-
- // Make the type not comparable (i.e. used with == or as a map key), as
- // equivalent points can be represented by different Go values.
- _ incomparable
-}
-
-type incomparable [0]func()
-
-func checkInitialized(points ...*Point) {
- for _, p := range points {
- if p.x == (field.Element{}) && p.y == (field.Element{}) {
- panic("edwards25519: use of uninitialized Point")
- }
- }
-}
-
-type projCached struct {
- YplusX, YminusX, Z, T2d field.Element
-}
-
-type affineCached struct {
- YplusX, YminusX, T2d field.Element
-}
-
-// Constructors.
-
-func (v *projP2) Zero() *projP2 {
- v.X.Zero()
- v.Y.One()
- v.Z.One()
- return v
-}
-
-// identity is the point at infinity.
-var identity, _ = new(Point).SetBytes([]byte{
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
-
-// NewIdentityPoint returns a new Point set to the identity.
-func NewIdentityPoint() *Point {
- return new(Point).Set(identity)
-}
-
-// generator is the canonical curve basepoint. See TestGenerator for the
-// correspondence of this encoding with the values in RFC 8032.
-var generator, _ = new(Point).SetBytes([]byte{
- 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
-
-// NewGeneratorPoint returns a new Point set to the canonical generator.
-func NewGeneratorPoint() *Point {
- return new(Point).Set(generator)
-}
-
-func (v *projCached) Zero() *projCached {
- v.YplusX.One()
- v.YminusX.One()
- v.Z.One()
- v.T2d.Zero()
- return v
-}
-
-func (v *affineCached) Zero() *affineCached {
- v.YplusX.One()
- v.YminusX.One()
- v.T2d.Zero()
- return v
-}
-
-// Assignments.
-
-// Set sets v = u, and returns v.
-func (v *Point) Set(u *Point) *Point {
- *v = *u
- return v
-}
-
-// Encoding.
-
-// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
-// Section 5.1.2.
-func (v *Point) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var buf [32]byte
- return v.bytes(&buf)
-}
-
-func (v *Point) bytes(buf *[32]byte) []byte {
- checkInitialized(v)
-
- var zInv, x, y field.Element
- zInv.Invert(&v.z) // zInv = 1 / Z
- x.Multiply(&v.x, &zInv) // x = X / Z
- y.Multiply(&v.y, &zInv) // y = Y / Z
-
- out := copyFieldElement(buf, &y)
- out[31] |= byte(x.IsNegative() << 7)
- return out
-}
-
-var feOne = new(field.Element).One()
-
-// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
-// represent a valid point on the curve, SetBytes returns nil and an error and
-// the receiver is unchanged. Otherwise, SetBytes returns v.
-//
-// Note that SetBytes accepts all non-canonical encodings of valid points.
-// That is, it follows decoding rules that match most implementations in
-// the ecosystem rather than RFC 8032.
-func (v *Point) SetBytes(x []byte) (*Point, error) {
- // Specifically, the non-canonical encodings that are accepted are
- // 1) the ones where the field element is not reduced (see the
- // (*field.Element).SetBytes docs) and
- // 2) the ones where the x-coordinate is zero and the sign bit is set.
- //
- // This is consistent with crypto/ed25519/internal/edwards25519. Read more
- // at https://hdevalence.ca/blog/2020-10-04-its-25519am, specifically the
- // "Canonical A, R" section.
-
- if len(x) != 32 {
- return nil, errors.New("edwards25519: invalid point encoding length")
- }
- y := new(field.Element).SetBytes(x)
-
- // -x² + y² = 1 + dx²y²
- // x² + dx²y² = x²(dy² + 1) = y² - 1
- // x² = (y² - 1) / (dy² + 1)
-
- // u = y² - 1
- y2 := new(field.Element).Square(y)
- u := new(field.Element).Subtract(y2, feOne)
-
- // v = dy² + 1
- vv := new(field.Element).Multiply(y2, d)
- vv = vv.Add(vv, feOne)
-
- // x = +√(u/v)
- xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
- if wasSquare == 0 {
- return nil, errors.New("edwards25519: invalid point encoding")
- }
-
- // Select the negative square root if the sign bit is set.
- xxNeg := new(field.Element).Negate(xx)
- xx = xx.Select(xxNeg, xx, int(x[31]>>7))
-
- v.x.Set(xx)
- v.y.Set(y)
- v.z.One()
- v.t.Multiply(xx, y) // xy = T / Z
-
- return v, nil
-}
-
-func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
- copy(buf[:], v.Bytes())
- return buf[:]
-}
-
-// Conversions.
-
-func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
- v.X.Multiply(&p.X, &p.T)
- v.Y.Multiply(&p.Y, &p.Z)
- v.Z.Multiply(&p.Z, &p.T)
- return v
-}
-
-func (v *projP2) FromP3(p *Point) *projP2 {
- v.X.Set(&p.x)
- v.Y.Set(&p.y)
- v.Z.Set(&p.z)
- return v
-}
-
-func (v *Point) fromP1xP1(p *projP1xP1) *Point {
- v.x.Multiply(&p.X, &p.T)
- v.y.Multiply(&p.Y, &p.Z)
- v.z.Multiply(&p.Z, &p.T)
- v.t.Multiply(&p.X, &p.Y)
- return v
-}
-
-func (v *Point) fromP2(p *projP2) *Point {
- v.x.Multiply(&p.X, &p.Z)
- v.y.Multiply(&p.Y, &p.Z)
- v.z.Square(&p.Z)
- v.t.Multiply(&p.X, &p.Y)
- return v
-}
-
-// d is a constant in the curve equation.
-var d = new(field.Element).SetBytes([]byte{
- 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
- 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
- 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
- 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
-var d2 = new(field.Element).Add(d, d)
-
-func (v *projCached) FromP3(p *Point) *projCached {
- v.YplusX.Add(&p.y, &p.x)
- v.YminusX.Subtract(&p.y, &p.x)
- v.Z.Set(&p.z)
- v.T2d.Multiply(&p.t, d2)
- return v
-}
-
-func (v *affineCached) FromP3(p *Point) *affineCached {
- v.YplusX.Add(&p.y, &p.x)
- v.YminusX.Subtract(&p.y, &p.x)
- v.T2d.Multiply(&p.t, d2)
-
- var invZ field.Element
- invZ.Invert(&p.z)
- v.YplusX.Multiply(&v.YplusX, &invZ)
- v.YminusX.Multiply(&v.YminusX, &invZ)
- v.T2d.Multiply(&v.T2d, &invZ)
- return v
-}
-
-// (Re)addition and subtraction.
-
-// Add sets v = p + q, and returns v.
-func (v *Point) Add(p, q *Point) *Point {
- checkInitialized(p, q)
- qCached := new(projCached).FromP3(q)
- result := new(projP1xP1).Add(p, qCached)
- return v.fromP1xP1(result)
-}
-
-// Subtract sets v = p - q, and returns v.
-func (v *Point) Subtract(p, q *Point) *Point {
- checkInitialized(p, q)
- qCached := new(projCached).FromP3(q)
- result := new(projP1xP1).Sub(p, qCached)
- return v.fromP1xP1(result)
-}
-
-func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YplusX)
- MM.Multiply(&YminusX, &q.YminusX)
- TT2d.Multiply(&p.t, &q.T2d)
- ZZ2.Multiply(&p.z, &q.Z)
-
- ZZ2.Add(&ZZ2, &ZZ2)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Add(&ZZ2, &TT2d)
- v.T.Subtract(&ZZ2, &TT2d)
- return v
-}
-
-func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YminusX) // flipped sign
- MM.Multiply(&YminusX, &q.YplusX) // flipped sign
- TT2d.Multiply(&p.t, &q.T2d)
- ZZ2.Multiply(&p.z, &q.Z)
-
- ZZ2.Add(&ZZ2, &ZZ2)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
- v.T.Add(&ZZ2, &TT2d) // flipped sign
- return v
-}
-
-func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YplusX)
- MM.Multiply(&YminusX, &q.YminusX)
- TT2d.Multiply(&p.t, &q.T2d)
-
- Z2.Add(&p.z, &p.z)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Add(&Z2, &TT2d)
- v.T.Subtract(&Z2, &TT2d)
- return v
-}
-
-func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YminusX) // flipped sign
- MM.Multiply(&YminusX, &q.YplusX) // flipped sign
- TT2d.Multiply(&p.t, &q.T2d)
-
- Z2.Add(&p.z, &p.z)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Subtract(&Z2, &TT2d) // flipped sign
- v.T.Add(&Z2, &TT2d) // flipped sign
- return v
-}
-
-// Doubling.
-
-func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
- var XX, YY, ZZ2, XplusYsq field.Element
-
- XX.Square(&p.X)
- YY.Square(&p.Y)
- ZZ2.Square(&p.Z)
- ZZ2.Add(&ZZ2, &ZZ2)
- XplusYsq.Add(&p.X, &p.Y)
- XplusYsq.Square(&XplusYsq)
-
- v.Y.Add(&YY, &XX)
- v.Z.Subtract(&YY, &XX)
-
- v.X.Subtract(&XplusYsq, &v.Y)
- v.T.Subtract(&ZZ2, &v.Z)
- return v
-}
-
-// Negation.
-
-// Negate sets v = -p, and returns v.
-func (v *Point) Negate(p *Point) *Point {
- checkInitialized(p)
- v.x.Negate(&p.x)
- v.y.Set(&p.y)
- v.z.Set(&p.z)
- v.t.Negate(&p.t)
- return v
-}
-
-// Equal returns 1 if v is equivalent to u, and 0 otherwise.
-func (v *Point) Equal(u *Point) int {
- checkInitialized(v, u)
-
- var t1, t2, t3, t4 field.Element
- t1.Multiply(&v.x, &u.z)
- t2.Multiply(&u.x, &v.z)
- t3.Multiply(&v.y, &u.z)
- t4.Multiply(&u.y, &v.z)
-
- return t1.Equal(&t2) & t3.Equal(&t4)
-}
-
-// Constant-time operations
-
-// Select sets v to a if cond == 1 and to b if cond == 0.
-func (v *projCached) Select(a, b *projCached, cond int) *projCached {
- v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
- v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
- v.Z.Select(&a.Z, &b.Z, cond)
- v.T2d.Select(&a.T2d, &b.T2d, cond)
- return v
-}
-
-// Select sets v to a if cond == 1 and to b if cond == 0.
-func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
- v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
- v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
- v.T2d.Select(&a.T2d, &b.T2d, cond)
- return v
-}
-
-// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
-func (v *projCached) CondNeg(cond int) *projCached {
- v.YplusX.Swap(&v.YminusX, cond)
- v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
- return v
-}
-
-// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
-func (v *affineCached) CondNeg(cond int) *affineCached {
- v.YplusX.Swap(&v.YminusX, cond)
- v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
- return v
-}
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe.go b/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe.go
deleted file mode 100644
index dbe86599b3..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package field implements fast arithmetic modulo 2^255-19.
-package field
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "math/bits"
-)
-
-// Element represents an element of the field GF(2^255-19). Note that this
-// is not a cryptographically secure group, and should only be used to interact
-// with edwards25519.Point coordinates.
-//
-// This type works similarly to math/big.Int, and all arguments and receivers
-// are allowed to alias.
-//
-// The zero value is a valid zero element.
-type Element struct {
- // An element t represents the integer
- // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
- //
- // Between operations, all limbs are expected to be lower than 2^52.
- l0 uint64
- l1 uint64
- l2 uint64
- l3 uint64
- l4 uint64
-}
-
-const maskLow51Bits uint64 = (1 << 51) - 1
-
-var feZero = &Element{0, 0, 0, 0, 0}
-
-// Zero sets v = 0, and returns v.
-func (v *Element) Zero() *Element {
- *v = *feZero
- return v
-}
-
-var feOne = &Element{1, 0, 0, 0, 0}
-
-// One sets v = 1, and returns v.
-func (v *Element) One() *Element {
- *v = *feOne
- return v
-}
-
-// reduce reduces v modulo 2^255 - 19 and returns it.
-func (v *Element) reduce() *Element {
- v.carryPropagate()
-
- // After the light reduction we now have a field element representation
- // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
-
- // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
- // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
- c := (v.l0 + 19) >> 51
- c = (v.l1 + c) >> 51
- c = (v.l2 + c) >> 51
- c = (v.l3 + c) >> 51
- c = (v.l4 + c) >> 51
-
- // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
- // effectively applying the reduction identity to the carry.
- v.l0 += 19 * c
-
- v.l1 += v.l0 >> 51
- v.l0 = v.l0 & maskLow51Bits
- v.l2 += v.l1 >> 51
- v.l1 = v.l1 & maskLow51Bits
- v.l3 += v.l2 >> 51
- v.l2 = v.l2 & maskLow51Bits
- v.l4 += v.l3 >> 51
- v.l3 = v.l3 & maskLow51Bits
- // no additional carry
- v.l4 = v.l4 & maskLow51Bits
-
- return v
-}
-
-// Add sets v = a + b, and returns v.
-func (v *Element) Add(a, b *Element) *Element {
- v.l0 = a.l0 + b.l0
- v.l1 = a.l1 + b.l1
- v.l2 = a.l2 + b.l2
- v.l3 = a.l3 + b.l3
- v.l4 = a.l4 + b.l4
- // Using the generic implementation here is actually faster than the
- // assembly. Probably because the body of this function is so simple that
- // the compiler can figure out better optimizations by inlining the carry
- // propagation.
- return v.carryPropagateGeneric()
-}
-
-// Subtract sets v = a - b, and returns v.
-func (v *Element) Subtract(a, b *Element) *Element {
- // We first add 2 * p, to guarantee the subtraction won't underflow, and
- // then subtract b (which can be up to 2^255 + 2^13 * 19).
- v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
- v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
- v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
- v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
- v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
- return v.carryPropagate()
-}
-
-// Negate sets v = -a, and returns v.
-func (v *Element) Negate(a *Element) *Element {
- return v.Subtract(feZero, a)
-}
-
-// Invert sets v = 1/z mod p, and returns v.
-//
-// If z == 0, Invert returns v = 0.
-func (v *Element) Invert(z *Element) *Element {
- // Inversion is implemented as exponentiation with exponent p − 2. It uses the
- // same sequence of 255 squarings and 11 multiplications as [Curve25519].
- var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
-
- z2.Square(z) // 2
- t.Square(&z2) // 4
- t.Square(&t) // 8
- z9.Multiply(&t, z) // 9
- z11.Multiply(&z9, &z2) // 11
- t.Square(&z11) // 22
- z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
-
- t.Square(&z2_5_0) // 2^6 - 2^1
- for i := 0; i < 4; i++ {
- t.Square(&t) // 2^10 - 2^5
- }
- z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
-
- t.Square(&z2_10_0) // 2^11 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^20 - 2^10
- }
- z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
-
- t.Square(&z2_20_0) // 2^21 - 2^1
- for i := 0; i < 19; i++ {
- t.Square(&t) // 2^40 - 2^20
- }
- t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
-
- t.Square(&t) // 2^41 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^50 - 2^10
- }
- z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
-
- t.Square(&z2_50_0) // 2^51 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^100 - 2^50
- }
- z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
-
- t.Square(&z2_100_0) // 2^101 - 2^1
- for i := 0; i < 99; i++ {
- t.Square(&t) // 2^200 - 2^100
- }
- t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
-
- t.Square(&t) // 2^201 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^250 - 2^50
- }
- t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
-
- t.Square(&t) // 2^251 - 2^1
- t.Square(&t) // 2^252 - 2^2
- t.Square(&t) // 2^253 - 2^3
- t.Square(&t) // 2^254 - 2^4
- t.Square(&t) // 2^255 - 2^5
-
- return v.Multiply(&t, &z11) // 2^255 - 21
-}
-
-// Set sets v = a, and returns v.
-func (v *Element) Set(a *Element) *Element {
- *v = *a
- return v
-}
-
-// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
-//
-// Consistent with RFC 7748, the most significant bit (the high bit of the
-// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032.
-func (v *Element) SetBytes(x []byte) *Element {
- if len(x) != 32 {
- panic("edwards25519: invalid field element input size")
- }
-
- // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
- v.l0 = binary.LittleEndian.Uint64(x[0:8])
- v.l0 &= maskLow51Bits
- // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
- v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
- v.l1 &= maskLow51Bits
- // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
- v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
- v.l2 &= maskLow51Bits
- // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
- v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
- v.l3 &= maskLow51Bits
- // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
- // Note: not bytes 25:33, shift 4, to avoid overread.
- v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
- v.l4 &= maskLow51Bits
-
- return v
-}
-
-// Bytes returns the canonical 32-byte little-endian encoding of v.
-func (v *Element) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [32]byte
- return v.bytes(&out)
-}
-
-func (v *Element) bytes(out *[32]byte) []byte {
- t := *v
- t.reduce()
-
- var buf [8]byte
- for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
- bitsOffset := i * 51
- binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
- for i, bb := range buf {
- off := bitsOffset/8 + i
- if off >= len(out) {
- break
- }
- out[off] |= bb
- }
- }
-
- return out[:]
-}
-
-// Equal returns 1 if v and u are equal, and 0 otherwise.
-func (v *Element) Equal(u *Element) int {
- sa, sv := u.Bytes(), v.Bytes()
- return subtle.ConstantTimeCompare(sa, sv)
-}
-
-// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
-func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
-
-// Select sets v to a if cond == 1, and to b if cond == 0.
-func (v *Element) Select(a, b *Element, cond int) *Element {
- m := mask64Bits(cond)
- v.l0 = (m & a.l0) | (^m & b.l0)
- v.l1 = (m & a.l1) | (^m & b.l1)
- v.l2 = (m & a.l2) | (^m & b.l2)
- v.l3 = (m & a.l3) | (^m & b.l3)
- v.l4 = (m & a.l4) | (^m & b.l4)
- return v
-}
-
-// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
-func (v *Element) Swap(u *Element, cond int) {
- m := mask64Bits(cond)
- t := m & (v.l0 ^ u.l0)
- v.l0 ^= t
- u.l0 ^= t
- t = m & (v.l1 ^ u.l1)
- v.l1 ^= t
- u.l1 ^= t
- t = m & (v.l2 ^ u.l2)
- v.l2 ^= t
- u.l2 ^= t
- t = m & (v.l3 ^ u.l3)
- v.l3 ^= t
- u.l3 ^= t
- t = m & (v.l4 ^ u.l4)
- v.l4 ^= t
- u.l4 ^= t
-}
-
-// IsNegative returns 1 if v is negative, and 0 otherwise.
-func (v *Element) IsNegative() int {
- return int(v.Bytes()[0] & 1)
-}
-
-// Absolute sets v to |u|, and returns v.
-func (v *Element) Absolute(u *Element) *Element {
- return v.Select(new(Element).Negate(u), u, u.IsNegative())
-}
-
-// Multiply sets v = x * y, and returns v.
-func (v *Element) Multiply(x, y *Element) *Element {
- feMul(v, x, y)
- return v
-}
-
-// Square sets v = x * x, and returns v.
-func (v *Element) Square(x *Element) *Element {
- feSquare(v, x)
- return v
-}
-
-// Mult32 sets v = x * y, and returns v.
-func (v *Element) Mult32(x *Element, y uint32) *Element {
- x0lo, x0hi := mul51(x.l0, y)
- x1lo, x1hi := mul51(x.l1, y)
- x2lo, x2hi := mul51(x.l2, y)
- x3lo, x3hi := mul51(x.l3, y)
- x4lo, x4hi := mul51(x.l4, y)
- v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
- v.l1 = x1lo + x0hi
- v.l2 = x2lo + x1hi
- v.l3 = x3lo + x2hi
- v.l4 = x4lo + x3hi
- // The hi portions are going to be only 32 bits, plus any previous excess,
- // so we can skip the carry propagation.
- return v
-}
-
-// mul51 returns lo + hi * 2⁵¹ = a * b.
-func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
- mh, ml := bits.Mul64(a, uint64(b))
- lo = ml & maskLow51Bits
- hi = (mh << 13) | (ml >> 51)
- return
-}
-
-// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
-func (v *Element) Pow22523(x *Element) *Element {
- var t0, t1, t2 Element
-
- t0.Square(x) // x^2
- t1.Square(&t0) // x^4
- t1.Square(&t1) // x^8
- t1.Multiply(x, &t1) // x^9
- t0.Multiply(&t0, &t1) // x^11
- t0.Square(&t0) // x^22
- t0.Multiply(&t1, &t0) // x^31
- t1.Square(&t0) // x^62
- for i := 1; i < 5; i++ { // x^992
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
- t1.Square(&t0) // 2^11 - 2
- for i := 1; i < 10; i++ { // 2^20 - 2^10
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^20 - 1
- t2.Square(&t1) // 2^21 - 2
- for i := 1; i < 20; i++ { // 2^40 - 2^20
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^40 - 1
- t1.Square(&t1) // 2^41 - 2
- for i := 1; i < 10; i++ { // 2^50 - 2^10
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^50 - 1
- t1.Square(&t0) // 2^51 - 2
- for i := 1; i < 50; i++ { // 2^100 - 2^50
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^100 - 1
- t2.Square(&t1) // 2^101 - 2
- for i := 1; i < 100; i++ { // 2^200 - 2^100
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^200 - 1
- t1.Square(&t1) // 2^201 - 2
- for i := 1; i < 50; i++ { // 2^250 - 2^50
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^250 - 1
- t0.Square(&t0) // 2^251 - 2
- t0.Square(&t0) // 2^252 - 4
- return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
-}
-
-// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
-var sqrtM1 = &Element{1718705420411056, 234908883556509,
- 2233514472574048, 2117202627021982, 765476049583133}
-
-// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
-//
-// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
-// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
-// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
- var a, b Element
-
- // r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := a.Square(v)
- uv3 := b.Multiply(u, b.Multiply(v2, v))
- uv7 := a.Multiply(uv3, a.Square(v2))
- r.Multiply(uv3, r.Pow22523(uv7))
-
- check := a.Multiply(v, a.Square(r)) // check = v * r^2
-
- uNeg := b.Negate(u)
- correctSignSqrt := check.Equal(u)
- flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
-
- rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
- // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
-
- r.Absolute(r) // Choose the nonnegative square root.
- return r, correctSignSqrt | flippedSignSqrt
-}
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.go b/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.go
deleted file mode 100644
index 363020bd6b..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-//go:build amd64 && gc && !purego
-
-package field
-
-// feMul sets out = a * b. It works like feMulGeneric.
-//go:noescape
-func feMul(out *Element, a *Element, b *Element)
-
-// feSquare sets out = a * a. It works like feSquareGeneric.
-//go:noescape
-func feSquare(out *Element, a *Element)
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go b/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go
deleted file mode 100644
index bccf8511ac..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_generic.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package field
-
-import "math/bits"
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-// mul64 returns a * b.
-func mul64(a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- return uint128{lo, hi}
-}
-
-// addMul64 returns v + a * b.
-func addMul64(v uint128, a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- lo, c := bits.Add64(lo, v.lo, 0)
- hi, _ = bits.Add64(hi, v.hi, c)
- return uint128{lo, hi}
-}
-
-// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
-func shiftRightBy51(a uint128) uint64 {
- return (a.hi << (64 - 51)) | (a.lo >> 51)
-}
-
-func feMulGeneric(v, a, b *Element) {
- a0 := a.l0
- a1 := a.l1
- a2 := a.l2
- a3 := a.l3
- a4 := a.l4
-
- b0 := b.l0
- b1 := b.l1
- b2 := b.l2
- b3 := b.l3
- b4 := b.l4
-
- // Limb multiplication works like pen-and-paper columnar multiplication, but
- // with 51-bit limbs instead of digits.
- //
- // a4 a3 a2 a1 a0 x
- // b4 b3 b2 b1 b0 =
- // ------------------------
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a4b1 a3b1 a2b1 a1b1 a0b1 +
- // a4b2 a3b2 a2b2 a1b2 a0b2 +
- // a4b3 a3b3 a2b3 a1b3 a0b3 +
- // a4b4 a3b4 a2b4 a1b4 a0b4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
- // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
- // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
- //
- // Reduction can be carried out simultaneously to multiplication. For
- // example, we do not compute r5: whenever the result of a multiplication
- // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
- //
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
- // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
- // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
- // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // Finally we add up the columns into wide, overlapping limbs.
-
- a1_19 := a1 * 19
- a2_19 := a2 * 19
- a3_19 := a3 * 19
- a4_19 := a4 * 19
-
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- r0 := mul64(a0, b0)
- r0 = addMul64(r0, a1_19, b4)
- r0 = addMul64(r0, a2_19, b3)
- r0 = addMul64(r0, a3_19, b2)
- r0 = addMul64(r0, a4_19, b1)
-
- // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
- r1 := mul64(a0, b1)
- r1 = addMul64(r1, a1, b0)
- r1 = addMul64(r1, a2_19, b4)
- r1 = addMul64(r1, a3_19, b3)
- r1 = addMul64(r1, a4_19, b2)
-
- // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
- r2 := mul64(a0, b2)
- r2 = addMul64(r2, a1, b1)
- r2 = addMul64(r2, a2, b0)
- r2 = addMul64(r2, a3_19, b4)
- r2 = addMul64(r2, a4_19, b3)
-
- // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
- r3 := mul64(a0, b3)
- r3 = addMul64(r3, a1, b2)
- r3 = addMul64(r3, a2, b1)
- r3 = addMul64(r3, a3, b0)
- r3 = addMul64(r3, a4_19, b4)
-
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- r4 := mul64(a0, b4)
- r4 = addMul64(r4, a1, b3)
- r4 = addMul64(r4, a2, b2)
- r4 = addMul64(r4, a3, b1)
- r4 = addMul64(r4, a4, b0)
-
- // After the multiplication, we need to reduce (carry) the five coefficients
- // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
- // to respect the Element invariant.
- //
- // Overall, the reduction works the same as carryPropagate, except with
- // wider inputs: we take the carry for each coefficient by shifting it right
- // by 51, and add it to the limb above it. The top carry is multiplied by 19
- // according to the reduction identity and added to the lowest limb.
- //
- // The largest coefficient (r0) will be at most 111 bits, which guarantees
- // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
- //
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
- // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
- // r0 < 2⁷ × 2⁵² × 2⁵²
- // r0 < 2¹¹¹
- //
- // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
- // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
- // allows us to easily apply the reduction identity.
- //
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- // r4 < 5 × 2⁵² × 2⁵²
- // r4 < 2¹⁰⁷
- //
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- // Now all coefficients fit into 64-bit registers but are still too large to
- // be passed around as a Element. We therefore do one last carry chain,
- // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-func feSquareGeneric(v, a *Element) {
- l0 := a.l0
- l1 := a.l1
- l2 := a.l2
- l3 := a.l3
- l4 := a.l4
-
- // Squaring works precisely like multiplication above, but thanks to its
- // symmetry we get to group a few terms together.
- //
- // l4 l3 l2 l1 l0 x
- // l4 l3 l2 l1 l0 =
- // ------------------------
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l4l1 l3l1 l2l1 l1l1 l0l1 +
- // l4l2 l3l2 l2l2 l1l2 l0l2 +
- // l4l3 l3l3 l2l3 l1l3 l0l3 +
- // l4l4 l3l4 l2l4 l1l4 l0l4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
- // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
- // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
- // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
- // only three Mul64 and four Add64, instead of five and eight.
-
- l0_2 := l0 * 2
- l1_2 := l1 * 2
-
- l1_38 := l1 * 38
- l2_38 := l2 * 38
- l3_38 := l3 * 38
-
- l3_19 := l3 * 19
- l4_19 := l4 * 19
-
- // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
- r0 := mul64(l0, l0)
- r0 = addMul64(r0, l1_38, l4)
- r0 = addMul64(r0, l2_38, l3)
-
- // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
- r1 := mul64(l0_2, l1)
- r1 = addMul64(r1, l2_38, l4)
- r1 = addMul64(r1, l3_19, l3)
-
- // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
- r2 := mul64(l0_2, l2)
- r2 = addMul64(r2, l1, l1)
- r2 = addMul64(r2, l3_38, l4)
-
- // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
- r3 := mul64(l0_2, l3)
- r3 = addMul64(r3, l1_2, l2)
- r3 = addMul64(r3, l4_19, l4)
-
- // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
- r4 := mul64(l0_2, l4)
- r4 = addMul64(r4, l1_2, l3)
- r4 = addMul64(r4, l2, l2)
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-// carryPropagate brings the limbs below 52 bits by applying the reduction
-// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
-func (v *Element) carryPropagateGeneric() *Element {
- c0 := v.l0 >> 51
- c1 := v.l1 >> 51
- c2 := v.l2 >> 51
- c3 := v.l3 >> 51
- c4 := v.l4 >> 51
-
- v.l0 = v.l0&maskLow51Bits + c4*19
- v.l1 = v.l1&maskLow51Bits + c0
- v.l2 = v.l2&maskLow51Bits + c1
- v.l3 = v.l3&maskLow51Bits + c2
- v.l4 = v.l4&maskLow51Bits + c3
-
- return v
-}
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalar.go b/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalar.go
deleted file mode 100644
index 889acaa0f1..0000000000
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalar.go
+++ /dev/null
@@ -1,1025 +0,0 @@
-// Copyright (c) 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "errors"
-)
-
-// A Scalar is an integer modulo
-//
-// l = 2^252 + 27742317777372353535851937790883648493
-//
-// which is the prime order of the edwards25519 group.
-//
-// This type works similarly to math/big.Int, and all arguments and
-// receivers are allowed to alias.
-//
-// The zero value is a valid zero element.
-type Scalar struct {
- // s is the Scalar value in little-endian. The value is always reduced
- // between operations.
- s [32]byte
-}
-
-var (
- scZero = Scalar{[32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
-
- scOne = Scalar{[32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
-
- scMinusOne = Scalar{[32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}}
-)
-
-// NewScalar returns a new zero Scalar.
-func NewScalar() *Scalar {
- return &Scalar{}
-}
-
-// MultiplyAdd sets s = x * y + z mod l, and returns s.
-func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
- scMulAdd(&s.s, &x.s, &y.s, &z.s)
- return s
-}
-
-// Add sets s = x + y mod l, and returns s.
-func (s *Scalar) Add(x, y *Scalar) *Scalar {
- // s = 1 * x + y mod l
- scMulAdd(&s.s, &scOne.s, &x.s, &y.s)
- return s
-}
-
-// Subtract sets s = x - y mod l, and returns s.
-func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
- // s = -1 * y + x mod l
- scMulAdd(&s.s, &scMinusOne.s, &y.s, &x.s)
- return s
-}
-
-// Negate sets s = -x mod l, and returns s.
-func (s *Scalar) Negate(x *Scalar) *Scalar {
- // s = -1 * x + 0 mod l
- scMulAdd(&s.s, &scMinusOne.s, &x.s, &scZero.s)
- return s
-}
-
-// Multiply sets s = x * y mod l, and returns s.
-func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
- // s = x * y + 0 mod l
- scMulAdd(&s.s, &x.s, &y.s, &scZero.s)
- return s
-}
-
-// Set sets s = x, and returns s.
-func (s *Scalar) Set(x *Scalar) *Scalar {
- *s = *x
- return s
-}
-
-// SetUniformBytes sets s to an uniformly distributed value given 64 uniformly
-// distributed random bytes.
-func (s *Scalar) SetUniformBytes(x []byte) *Scalar {
- if len(x) != 64 {
- panic("edwards25519: invalid SetUniformBytes input length")
- }
- var wideBytes [64]byte
- copy(wideBytes[:], x[:])
- scReduce(&s.s, &wideBytes)
- return s
-}
-
-// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
-// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
-// returns nil and an error, and the receiver is unchanged.
-func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
- if len(x) != 32 {
- return nil, errors.New("invalid scalar length")
- }
- ss := &Scalar{}
- copy(ss.s[:], x)
- if !isReduced(ss) {
- return nil, errors.New("invalid scalar encoding")
- }
- s.s = ss.s
- return s, nil
-}
-
-// isReduced returns whether the given scalar is reduced modulo l.
-func isReduced(s *Scalar) bool {
- for i := len(s.s) - 1; i >= 0; i-- {
- switch {
- case s.s[i] > scMinusOne.s[i]:
- return false
- case s.s[i] < scMinusOne.s[i]:
- return true
- }
- }
- return true
-}
-
-// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
-// Section 5.1.5 (also known as clamping) and sets s to the result. The input
-// must be 32 bytes, and it is not modified.
-//
-// Note that since Scalar values are always reduced modulo the prime order of
-// the curve, the resulting value will not preserve any of the cofactor-clearing
-// properties that clamping is meant to provide. It will however work as
-// expected as long as it is applied to points on the prime order subgroup, like
-// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
-// irrelevant RFC 7748 clamping, but it is now required for compatibility.
-func (s *Scalar) SetBytesWithClamping(x []byte) *Scalar {
- // The description above omits the purpose of the high bits of the clamping
- // for brevity, but those are also lost to reductions, and are also
- // irrelevant to edwards25519 as they protect against a specific
- // implementation bug that was once observed in a generic Montgomery ladder.
- if len(x) != 32 {
- panic("edwards25519: invalid SetBytesWithClamping input length")
- }
- var wideBytes [64]byte
- copy(wideBytes[:], x[:])
- wideBytes[0] &= 248
- wideBytes[31] &= 63
- wideBytes[31] |= 64
- scReduce(&s.s, &wideBytes)
- return s
-}
-
-// Bytes returns the canonical 32-byte little-endian encoding of s.
-func (s *Scalar) Bytes() []byte {
- buf := make([]byte, 32)
- copy(buf, s.s[:])
- return buf
-}
-
-// Equal returns 1 if s and t are equal, and 0 otherwise.
-func (s *Scalar) Equal(t *Scalar) int {
- return subtle.ConstantTimeCompare(s.s[:], t.s[:])
-}
-
-// scMulAdd and scReduce are ported from the public domain, “ref10”
-// implementation of ed25519 from SUPERCOP.
-
-func load3(in []byte) int64 {
- r := int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- return r
-}
-
-func load4(in []byte) int64 {
- r := int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- r |= int64(in[3]) << 24
- return r
-}
-
-// Input:
-// a[0]+256*a[1]+...+256^31*a[31] = a
-// b[0]+256*b[1]+...+256^31*b[31] = b
-// c[0]+256*c[1]+...+256^31*c[31] = c
-//
-// Output:
-// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
-// where l = 2^252 + 27742317777372353535851937790883648493.
-func scMulAdd(s, a, b, c *[32]byte) {
- a0 := 2097151 & load3(a[:])
- a1 := 2097151 & (load4(a[2:]) >> 5)
- a2 := 2097151 & (load3(a[5:]) >> 2)
- a3 := 2097151 & (load4(a[7:]) >> 7)
- a4 := 2097151 & (load4(a[10:]) >> 4)
- a5 := 2097151 & (load3(a[13:]) >> 1)
- a6 := 2097151 & (load4(a[15:]) >> 6)
- a7 := 2097151 & (load3(a[18:]) >> 3)
- a8 := 2097151 & load3(a[21:])
- a9 := 2097151 & (load4(a[23:]) >> 5)
- a10 := 2097151 & (load3(a[26:]) >> 2)
- a11 := (load4(a[28:]) >> 7)
- b0 := 2097151 & load3(b[:])
- b1 := 2097151 & (load4(b[2:]) >> 5)
- b2 := 2097151 & (load3(b[5:]) >> 2)
- b3 := 2097151 & (load4(b[7:]) >> 7)
- b4 := 2097151 & (load4(b[10:]) >> 4)
- b5 := 2097151 & (load3(b[13:]) >> 1)
- b6 := 2097151 & (load4(b[15:]) >> 6)
- b7 := 2097151 & (load3(b[18:]) >> 3)
- b8 := 2097151 & load3(b[21:])
- b9 := 2097151 & (load4(b[23:]) >> 5)
- b10 := 2097151 & (load3(b[26:]) >> 2)
- b11 := (load4(b[28:]) >> 7)
- c0 := 2097151 & load3(c[:])
- c1 := 2097151 & (load4(c[2:]) >> 5)
- c2 := 2097151 & (load3(c[5:]) >> 2)
- c3 := 2097151 & (load4(c[7:]) >> 7)
- c4 := 2097151 & (load4(c[10:]) >> 4)
- c5 := 2097151 & (load3(c[13:]) >> 1)
- c6 := 2097151 & (load4(c[15:]) >> 6)
- c7 := 2097151 & (load3(c[18:]) >> 3)
- c8 := 2097151 & load3(c[21:])
- c9 := 2097151 & (load4(c[23:]) >> 5)
- c10 := 2097151 & (load3(c[26:]) >> 2)
- c11 := (load4(c[28:]) >> 7)
- var carry [23]int64
-
- s0 := c0 + a0*b0
- s1 := c1 + a0*b1 + a1*b0
- s2 := c2 + a0*b2 + a1*b1 + a2*b0
- s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
- s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
- s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
- s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
- s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
- s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
- s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
- s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
- s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
- s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
- s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
- s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
- s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
- s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
- s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
- s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
- s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
- s20 := a9*b11 + a10*b10 + a11*b9
- s21 := a10*b11 + a11*b10
- s22 := a11 * b11
- s23 := int64(0)
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
- carry[18] = (s18 + (1 << 20)) >> 21
- s19 += carry[18]
- s18 -= carry[18] << 21
- carry[20] = (s20 + (1 << 20)) >> 21
- s21 += carry[20]
- s20 -= carry[20] << 21
- carry[22] = (s22 + (1 << 20)) >> 21
- s23 += carry[22]
- s22 -= carry[22] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
- carry[17] = (s17 + (1 << 20)) >> 21
- s18 += carry[17]
- s17 -= carry[17] << 21
- carry[19] = (s19 + (1 << 20)) >> 21
- s20 += carry[19]
- s19 -= carry[19] << 21
- carry[21] = (s21 + (1 << 20)) >> 21
- s22 += carry[21]
- s21 -= carry[21] << 21
-
- s11 += s23 * 666643
- s12 += s23 * 470296
- s13 += s23 * 654183
- s14 -= s23 * 997805
- s15 += s23 * 136657
- s16 -= s23 * 683901
- s23 = 0
-
- s10 += s22 * 666643
- s11 += s22 * 470296
- s12 += s22 * 654183
- s13 -= s22 * 997805
- s14 += s22 * 136657
- s15 -= s22 * 683901
- s22 = 0
-
- s9 += s21 * 666643
- s10 += s21 * 470296
- s11 += s21 * 654183
- s12 -= s21 * 997805
- s13 += s21 * 136657
- s14 -= s21 * 683901
- s21 = 0
-
- s8 += s20 * 666643
- s9 += s20 * 470296
- s10 += s20 * 654183
- s11 -= s20 * 997805
- s12 += s20 * 136657
- s13 -= s20 * 683901
- s20 = 0
-
- s7 += s19 * 666643
- s8 += s19 * 470296
- s9 += s19 * 654183
- s10 -= s19 * 997805
- s11 += s19 * 136657
- s12 -= s19 * 683901
- s19 = 0
-
- s6 += s18 * 666643
- s7 += s18 * 470296
- s8 += s18 * 654183
- s9 -= s18 * 997805
- s10 += s18 * 136657
- s11 -= s18 * 683901
- s18 = 0
-
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
-
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
-
- s5 += s17 * 666643
- s6 += s17 * 470296
- s7 += s17 * 654183
- s8 -= s17 * 997805
- s9 += s17 * 136657
- s10 -= s17 * 683901
- s17 = 0
-
- s4 += s16 * 666643
- s5 += s16 * 470296
- s6 += s16 * 654183
- s7 -= s16 * 997805
- s8 += s16 * 136657
- s9 -= s16 * 683901
- s16 = 0
-
- s3 += s15 * 666643
- s4 += s15 * 470296
- s5 += s15 * 654183
- s6 -= s15 * 997805
- s7 += s15 * 136657
- s8 -= s15 * 683901
- s15 = 0
-
- s2 += s14 * 666643
- s3 += s14 * 470296
- s4 += s14 * 654183
- s5 -= s14 * 997805
- s6 += s14 * 136657
- s7 -= s14 * 683901
- s14 = 0
-
- s1 += s13 * 666643
- s2 += s13 * 470296
- s3 += s13 * 654183
- s4 -= s13 * 997805
- s5 += s13 * 136657
- s6 -= s13 * 683901
- s13 = 0
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[11] = s11 >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- s[0] = byte(s0 >> 0)
- s[1] = byte(s0 >> 8)
- s[2] = byte((s0 >> 16) | (s1 << 5))
- s[3] = byte(s1 >> 3)
- s[4] = byte(s1 >> 11)
- s[5] = byte((s1 >> 19) | (s2 << 2))
- s[6] = byte(s2 >> 6)
- s[7] = byte((s2 >> 14) | (s3 << 7))
- s[8] = byte(s3 >> 1)
- s[9] = byte(s3 >> 9)
- s[10] = byte((s3 >> 17) | (s4 << 4))
- s[11] = byte(s4 >> 4)
- s[12] = byte(s4 >> 12)
- s[13] = byte((s4 >> 20) | (s5 << 1))
- s[14] = byte(s5 >> 7)
- s[15] = byte((s5 >> 15) | (s6 << 6))
- s[16] = byte(s6 >> 2)
- s[17] = byte(s6 >> 10)
- s[18] = byte((s6 >> 18) | (s7 << 3))
- s[19] = byte(s7 >> 5)
- s[20] = byte(s7 >> 13)
- s[21] = byte(s8 >> 0)
- s[22] = byte(s8 >> 8)
- s[23] = byte((s8 >> 16) | (s9 << 5))
- s[24] = byte(s9 >> 3)
- s[25] = byte(s9 >> 11)
- s[26] = byte((s9 >> 19) | (s10 << 2))
- s[27] = byte(s10 >> 6)
- s[28] = byte((s10 >> 14) | (s11 << 7))
- s[29] = byte(s11 >> 1)
- s[30] = byte(s11 >> 9)
- s[31] = byte(s11 >> 17)
-}
-
-// Input:
-// s[0]+256*s[1]+...+256^63*s[63] = s
-//
-// Output:
-// s[0]+256*s[1]+...+256^31*s[31] = s mod l
-// where l = 2^252 + 27742317777372353535851937790883648493.
-func scReduce(out *[32]byte, s *[64]byte) {
- s0 := 2097151 & load3(s[:])
- s1 := 2097151 & (load4(s[2:]) >> 5)
- s2 := 2097151 & (load3(s[5:]) >> 2)
- s3 := 2097151 & (load4(s[7:]) >> 7)
- s4 := 2097151 & (load4(s[10:]) >> 4)
- s5 := 2097151 & (load3(s[13:]) >> 1)
- s6 := 2097151 & (load4(s[15:]) >> 6)
- s7 := 2097151 & (load3(s[18:]) >> 3)
- s8 := 2097151 & load3(s[21:])
- s9 := 2097151 & (load4(s[23:]) >> 5)
- s10 := 2097151 & (load3(s[26:]) >> 2)
- s11 := 2097151 & (load4(s[28:]) >> 7)
- s12 := 2097151 & (load4(s[31:]) >> 4)
- s13 := 2097151 & (load3(s[34:]) >> 1)
- s14 := 2097151 & (load4(s[36:]) >> 6)
- s15 := 2097151 & (load3(s[39:]) >> 3)
- s16 := 2097151 & load3(s[42:])
- s17 := 2097151 & (load4(s[44:]) >> 5)
- s18 := 2097151 & (load3(s[47:]) >> 2)
- s19 := 2097151 & (load4(s[49:]) >> 7)
- s20 := 2097151 & (load4(s[52:]) >> 4)
- s21 := 2097151 & (load3(s[55:]) >> 1)
- s22 := 2097151 & (load4(s[57:]) >> 6)
- s23 := (load4(s[60:]) >> 3)
-
- s11 += s23 * 666643
- s12 += s23 * 470296
- s13 += s23 * 654183
- s14 -= s23 * 997805
- s15 += s23 * 136657
- s16 -= s23 * 683901
- s23 = 0
-
- s10 += s22 * 666643
- s11 += s22 * 470296
- s12 += s22 * 654183
- s13 -= s22 * 997805
- s14 += s22 * 136657
- s15 -= s22 * 683901
- s22 = 0
-
- s9 += s21 * 666643
- s10 += s21 * 470296
- s11 += s21 * 654183
- s12 -= s21 * 997805
- s13 += s21 * 136657
- s14 -= s21 * 683901
- s21 = 0
-
- s8 += s20 * 666643
- s9 += s20 * 470296
- s10 += s20 * 654183
- s11 -= s20 * 997805
- s12 += s20 * 136657
- s13 -= s20 * 683901
- s20 = 0
-
- s7 += s19 * 666643
- s8 += s19 * 470296
- s9 += s19 * 654183
- s10 -= s19 * 997805
- s11 += s19 * 136657
- s12 -= s19 * 683901
- s19 = 0
-
- s6 += s18 * 666643
- s7 += s18 * 470296
- s8 += s18 * 654183
- s9 -= s18 * 997805
- s10 += s18 * 136657
- s11 -= s18 * 683901
- s18 = 0
-
- var carry [17]int64
-
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
-
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
-
- s5 += s17 * 666643
- s6 += s17 * 470296
- s7 += s17 * 654183
- s8 -= s17 * 997805
- s9 += s17 * 136657
- s10 -= s17 * 683901
- s17 = 0
-
- s4 += s16 * 666643
- s5 += s16 * 470296
- s6 += s16 * 654183
- s7 -= s16 * 997805
- s8 += s16 * 136657
- s9 -= s16 * 683901
- s16 = 0
-
- s3 += s15 * 666643
- s4 += s15 * 470296
- s5 += s15 * 654183
- s6 -= s15 * 997805
- s7 += s15 * 136657
- s8 -= s15 * 683901
- s15 = 0
-
- s2 += s14 * 666643
- s3 += s14 * 470296
- s4 += s14 * 654183
- s5 -= s14 * 997805
- s6 += s14 * 136657
- s7 -= s14 * 683901
- s14 = 0
-
- s1 += s13 * 666643
- s2 += s13 * 470296
- s3 += s13 * 654183
- s4 -= s13 * 997805
- s5 += s13 * 136657
- s6 -= s13 * 683901
- s13 = 0
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[11] = s11 >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- out[0] = byte(s0 >> 0)
- out[1] = byte(s0 >> 8)
- out[2] = byte((s0 >> 16) | (s1 << 5))
- out[3] = byte(s1 >> 3)
- out[4] = byte(s1 >> 11)
- out[5] = byte((s1 >> 19) | (s2 << 2))
- out[6] = byte(s2 >> 6)
- out[7] = byte((s2 >> 14) | (s3 << 7))
- out[8] = byte(s3 >> 1)
- out[9] = byte(s3 >> 9)
- out[10] = byte((s3 >> 17) | (s4 << 4))
- out[11] = byte(s4 >> 4)
- out[12] = byte(s4 >> 12)
- out[13] = byte((s4 >> 20) | (s5 << 1))
- out[14] = byte(s5 >> 7)
- out[15] = byte((s5 >> 15) | (s6 << 6))
- out[16] = byte(s6 >> 2)
- out[17] = byte(s6 >> 10)
- out[18] = byte((s6 >> 18) | (s7 << 3))
- out[19] = byte(s7 >> 5)
- out[20] = byte(s7 >> 13)
- out[21] = byte(s8 >> 0)
- out[22] = byte(s8 >> 8)
- out[23] = byte((s8 >> 16) | (s9 << 5))
- out[24] = byte(s9 >> 3)
- out[25] = byte(s9 >> 11)
- out[26] = byte((s9 >> 19) | (s10 << 2))
- out[27] = byte(s10 >> 6)
- out[28] = byte((s10 >> 14) | (s11 << 7))
- out[29] = byte(s11 >> 1)
- out[30] = byte(s11 >> 9)
- out[31] = byte(s11 >> 17)
-}
-
-// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
-//
-// w must be between 2 and 8, or nonAdjacentForm will panic.
-func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
- // This implementation is adapted from the one
- // in curve25519-dalek and is documented there:
- // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
- if s.s[31] > 127 {
- panic("scalar has high bit set illegally")
- }
- if w < 2 {
- panic("w must be at least 2 by the definition of NAF")
- } else if w > 8 {
- panic("NAF digits must fit in int8")
- }
-
- var naf [256]int8
- var digits [5]uint64
-
- for i := 0; i < 4; i++ {
- digits[i] = binary.LittleEndian.Uint64(s.s[i*8:])
- }
-
- width := uint64(1 << w)
- windowMask := uint64(width - 1)
-
- pos := uint(0)
- carry := uint64(0)
- for pos < 256 {
- indexU64 := pos / 64
- indexBit := pos % 64
- var bitBuf uint64
- if indexBit < 64-w {
- // This window's bits are contained in a single u64
- bitBuf = digits[indexU64] >> indexBit
- } else {
- // Combine the current 64 bits with bits from the next 64
- bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
- }
-
- // Add carry into the current window
- window := carry + (bitBuf & windowMask)
-
- if window&1 == 0 {
- // If the window value is even, preserve the carry and continue.
- // Why is the carry preserved?
- // If carry == 0 and window & 1 == 0,
- // then the next carry should be 0
- // If carry == 1 and window & 1 == 0,
- // then bit_buf & 1 == 1 so the next carry should be 1
- pos += 1
- continue
- }
-
- if window < width/2 {
- carry = 0
- naf[pos] = int8(window)
- } else {
- carry = 1
- naf[pos] = int8(window) - int8(width)
- }
-
- pos += w
- }
- return naf
-}
-
-func (s *Scalar) signedRadix16() [64]int8 {
- if s.s[31] > 127 {
- panic("scalar has high bit set illegally")
- }
-
- var digits [64]int8
-
- // Compute unsigned radix-16 digits:
- for i := 0; i < 32; i++ {
- digits[2*i] = int8(s.s[i] & 15)
- digits[2*i+1] = int8((s.s[i] >> 4) & 15)
- }
-
- // Recenter coefficients:
- for i := 0; i < 63; i++ {
- carry := (digits[i] + 8) >> 4
- digits[i] -= carry << 4
- digits[i+1] += carry
- }
-
- return digits
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/elliptic.go b/contrib/go/_std_1.18/src/crypto/elliptic/elliptic.go
deleted file mode 100644
index 7ead09f8d3..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/elliptic.go
+++ /dev/null
@@ -1,496 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package elliptic implements the standard NIST P-224, P-256, P-384, and P-521
-// elliptic curves over prime fields.
-package elliptic
-
-import (
- "io"
- "math/big"
- "sync"
-)
-
-// A Curve represents a short-form Weierstrass curve with a=-3.
-//
-// The behavior of Add, Double, and ScalarMult when the input is not a point on
-// the curve is undefined.
-//
-// Note that the conventional point at infinity (0, 0) is not considered on the
-// curve, although it can be returned by Add, Double, ScalarMult, or
-// ScalarBaseMult (but not the Unmarshal or UnmarshalCompressed functions).
-type Curve interface {
- // Params returns the parameters for the curve.
- Params() *CurveParams
- // IsOnCurve reports whether the given (x,y) lies on the curve.
- IsOnCurve(x, y *big.Int) bool
- // Add returns the sum of (x1,y1) and (x2,y2)
- Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int)
- // Double returns 2*(x,y)
- Double(x1, y1 *big.Int) (x, y *big.Int)
- // ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
- ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int)
- // ScalarBaseMult returns k*G, where G is the base point of the group
- // and k is an integer in big-endian form.
- ScalarBaseMult(k []byte) (x, y *big.Int)
-}
-
-func matchesSpecificCurve(params *CurveParams, available ...Curve) (Curve, bool) {
- for _, c := range available {
- if params == c.Params() {
- return c, true
- }
- }
- return nil, false
-}
-
-// CurveParams contains the parameters of an elliptic curve and also provides
-// a generic, non-constant time implementation of Curve.
-type CurveParams struct {
- P *big.Int // the order of the underlying field
- N *big.Int // the order of the base point
- B *big.Int // the constant of the curve equation
- Gx, Gy *big.Int // (x,y) of the base point
- BitSize int // the size of the underlying field
- Name string // the canonical name of the curve
-}
-
-func (curve *CurveParams) Params() *CurveParams {
- return curve
-}
-
-// CurveParams operates, internally, on Jacobian coordinates. For a given
-// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
-// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
-// calculation can be performed within the transform (as in ScalarMult and
-// ScalarBaseMult). But even for Add and Double, it's faster to apply and
-// reverse the transform than to operate in affine coordinates.
-
-// polynomial returns x³ - 3x + b.
-func (curve *CurveParams) polynomial(x *big.Int) *big.Int {
- x3 := new(big.Int).Mul(x, x)
- x3.Mul(x3, x)
-
- threeX := new(big.Int).Lsh(x, 1)
- threeX.Add(threeX, x)
-
- x3.Sub(x3, threeX)
- x3.Add(x3, curve.B)
- x3.Mod(x3, curve.P)
-
- return x3
-}
-
-func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
- // If there is a dedicated constant-time implementation for this curve operation,
- // use that instead of the generic one.
- if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
- return specific.IsOnCurve(x, y)
- }
-
- if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
- y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
- return false
- }
-
- // y² = x³ - 3x + b
- y2 := new(big.Int).Mul(y, y)
- y2.Mod(y2, curve.P)
-
- return curve.polynomial(x).Cmp(y2) == 0
-}
-
-// zForAffine returns a Jacobian Z value for the affine point (x, y). If x and
-// y are zero, it assumes that they represent the point at infinity because (0,
-// 0) is not on the any of the curves handled here.
-func zForAffine(x, y *big.Int) *big.Int {
- z := new(big.Int)
- if x.Sign() != 0 || y.Sign() != 0 {
- z.SetInt64(1)
- }
- return z
-}
-
-// affineFromJacobian reverses the Jacobian transform. See the comment at the
-// top of the file. If the point is ∞ it returns 0, 0.
-func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
- if z.Sign() == 0 {
- return new(big.Int), new(big.Int)
- }
-
- zinv := new(big.Int).ModInverse(z, curve.P)
- zinvsq := new(big.Int).Mul(zinv, zinv)
-
- xOut = new(big.Int).Mul(x, zinvsq)
- xOut.Mod(xOut, curve.P)
- zinvsq.Mul(zinvsq, zinv)
- yOut = new(big.Int).Mul(y, zinvsq)
- yOut.Mod(yOut, curve.P)
- return
-}
-
-func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- // If there is a dedicated constant-time implementation for this curve operation,
- // use that instead of the generic one.
- if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
- return specific.Add(x1, y1, x2, y2)
- }
-
- z1 := zForAffine(x1, y1)
- z2 := zForAffine(x2, y2)
- return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
-}
-
-// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
-// (x2, y2, z2) and returns their sum, also in Jacobian form.
-func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
- x3, y3, z3 := new(big.Int), new(big.Int), new(big.Int)
- if z1.Sign() == 0 {
- x3.Set(x2)
- y3.Set(y2)
- z3.Set(z2)
- return x3, y3, z3
- }
- if z2.Sign() == 0 {
- x3.Set(x1)
- y3.Set(y1)
- z3.Set(z1)
- return x3, y3, z3
- }
-
- z1z1 := new(big.Int).Mul(z1, z1)
- z1z1.Mod(z1z1, curve.P)
- z2z2 := new(big.Int).Mul(z2, z2)
- z2z2.Mod(z2z2, curve.P)
-
- u1 := new(big.Int).Mul(x1, z2z2)
- u1.Mod(u1, curve.P)
- u2 := new(big.Int).Mul(x2, z1z1)
- u2.Mod(u2, curve.P)
- h := new(big.Int).Sub(u2, u1)
- xEqual := h.Sign() == 0
- if h.Sign() == -1 {
- h.Add(h, curve.P)
- }
- i := new(big.Int).Lsh(h, 1)
- i.Mul(i, i)
- j := new(big.Int).Mul(h, i)
-
- s1 := new(big.Int).Mul(y1, z2)
- s1.Mul(s1, z2z2)
- s1.Mod(s1, curve.P)
- s2 := new(big.Int).Mul(y2, z1)
- s2.Mul(s2, z1z1)
- s2.Mod(s2, curve.P)
- r := new(big.Int).Sub(s2, s1)
- if r.Sign() == -1 {
- r.Add(r, curve.P)
- }
- yEqual := r.Sign() == 0
- if xEqual && yEqual {
- return curve.doubleJacobian(x1, y1, z1)
- }
- r.Lsh(r, 1)
- v := new(big.Int).Mul(u1, i)
-
- x3.Set(r)
- x3.Mul(x3, x3)
- x3.Sub(x3, j)
- x3.Sub(x3, v)
- x3.Sub(x3, v)
- x3.Mod(x3, curve.P)
-
- y3.Set(r)
- v.Sub(v, x3)
- y3.Mul(y3, v)
- s1.Mul(s1, j)
- s1.Lsh(s1, 1)
- y3.Sub(y3, s1)
- y3.Mod(y3, curve.P)
-
- z3.Add(z1, z2)
- z3.Mul(z3, z3)
- z3.Sub(z3, z1z1)
- z3.Sub(z3, z2z2)
- z3.Mul(z3, h)
- z3.Mod(z3, curve.P)
-
- return x3, y3, z3
-}
-
-func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- // If there is a dedicated constant-time implementation for this curve operation,
- // use that instead of the generic one.
- if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
- return specific.Double(x1, y1)
- }
-
- z1 := zForAffine(x1, y1)
- return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
-}
-
-// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
-// returns its double, also in Jacobian form.
-func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
- delta := new(big.Int).Mul(z, z)
- delta.Mod(delta, curve.P)
- gamma := new(big.Int).Mul(y, y)
- gamma.Mod(gamma, curve.P)
- alpha := new(big.Int).Sub(x, delta)
- if alpha.Sign() == -1 {
- alpha.Add(alpha, curve.P)
- }
- alpha2 := new(big.Int).Add(x, delta)
- alpha.Mul(alpha, alpha2)
- alpha2.Set(alpha)
- alpha.Lsh(alpha, 1)
- alpha.Add(alpha, alpha2)
-
- beta := alpha2.Mul(x, gamma)
-
- x3 := new(big.Int).Mul(alpha, alpha)
- beta8 := new(big.Int).Lsh(beta, 3)
- beta8.Mod(beta8, curve.P)
- x3.Sub(x3, beta8)
- if x3.Sign() == -1 {
- x3.Add(x3, curve.P)
- }
- x3.Mod(x3, curve.P)
-
- z3 := new(big.Int).Add(y, z)
- z3.Mul(z3, z3)
- z3.Sub(z3, gamma)
- if z3.Sign() == -1 {
- z3.Add(z3, curve.P)
- }
- z3.Sub(z3, delta)
- if z3.Sign() == -1 {
- z3.Add(z3, curve.P)
- }
- z3.Mod(z3, curve.P)
-
- beta.Lsh(beta, 2)
- beta.Sub(beta, x3)
- if beta.Sign() == -1 {
- beta.Add(beta, curve.P)
- }
- y3 := alpha.Mul(alpha, beta)
-
- gamma.Mul(gamma, gamma)
- gamma.Lsh(gamma, 3)
- gamma.Mod(gamma, curve.P)
-
- y3.Sub(y3, gamma)
- if y3.Sign() == -1 {
- y3.Add(y3, curve.P)
- }
- y3.Mod(y3, curve.P)
-
- return x3, y3, z3
-}
-
-func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
- // If there is a dedicated constant-time implementation for this curve operation,
- // use that instead of the generic one.
- if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
- return specific.ScalarMult(Bx, By, k)
- }
-
- Bz := new(big.Int).SetInt64(1)
- x, y, z := new(big.Int), new(big.Int), new(big.Int)
-
- for _, byte := range k {
- for bitNum := 0; bitNum < 8; bitNum++ {
- x, y, z = curve.doubleJacobian(x, y, z)
- if byte&0x80 == 0x80 {
- x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
- }
- byte <<= 1
- }
- }
-
- return curve.affineFromJacobian(x, y, z)
-}
-
-func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
- // If there is a dedicated constant-time implementation for this curve operation,
- // use that instead of the generic one.
- if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
- return specific.ScalarBaseMult(k)
- }
-
- return curve.ScalarMult(curve.Gx, curve.Gy, k)
-}
-
-var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
-
-// GenerateKey returns a public/private key pair. The private key is
-// generated using the given reader, which must return random data.
-func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
- N := curve.Params().N
- bitSize := N.BitLen()
- byteLen := (bitSize + 7) / 8
- priv = make([]byte, byteLen)
-
- for x == nil {
- _, err = io.ReadFull(rand, priv)
- if err != nil {
- return
- }
- // We have to mask off any excess bits in the case that the size of the
- // underlying field is not a whole number of bytes.
- priv[0] &= mask[bitSize%8]
- // This is because, in tests, rand will return all zeros and we don't
- // want to get the point at infinity and loop forever.
- priv[1] ^= 0x42
-
- // If the scalar is out of range, sample another random number.
- if new(big.Int).SetBytes(priv).Cmp(N) >= 0 {
- continue
- }
-
- x, y = curve.ScalarBaseMult(priv)
- }
- return
-}
-
-// Marshal converts a point on the curve into the uncompressed form specified in
-// SEC 1, Version 2.0, Section 2.3.3. If the point is not on the curve (or is
-// the conventional point at infinity), the behavior is undefined.
-func Marshal(curve Curve, x, y *big.Int) []byte {
- byteLen := (curve.Params().BitSize + 7) / 8
-
- ret := make([]byte, 1+2*byteLen)
- ret[0] = 4 // uncompressed point
-
- x.FillBytes(ret[1 : 1+byteLen])
- y.FillBytes(ret[1+byteLen : 1+2*byteLen])
-
- return ret
-}
-
-// MarshalCompressed converts a point on the curve into the compressed form
-// specified in SEC 1, Version 2.0, Section 2.3.3. If the point is not on the
-// curve (or is the conventional point at infinity), the behavior is undefined.
-func MarshalCompressed(curve Curve, x, y *big.Int) []byte {
- byteLen := (curve.Params().BitSize + 7) / 8
- compressed := make([]byte, 1+byteLen)
- compressed[0] = byte(y.Bit(0)) | 2
- x.FillBytes(compressed[1:])
- return compressed
-}
-
-// Unmarshal converts a point, serialized by Marshal, into an x, y pair. It is
-// an error if the point is not in uncompressed form, is not on the curve, or is
-// the point at infinity. On error, x = nil.
-func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
- byteLen := (curve.Params().BitSize + 7) / 8
- if len(data) != 1+2*byteLen {
- return nil, nil
- }
- if data[0] != 4 { // uncompressed form
- return nil, nil
- }
- p := curve.Params().P
- x = new(big.Int).SetBytes(data[1 : 1+byteLen])
- y = new(big.Int).SetBytes(data[1+byteLen:])
- if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 {
- return nil, nil
- }
- if !curve.IsOnCurve(x, y) {
- return nil, nil
- }
- return
-}
-
-// UnmarshalCompressed converts a point, serialized by MarshalCompressed, into
-// an x, y pair. It is an error if the point is not in compressed form, is not
-// on the curve, or is the point at infinity. On error, x = nil.
-func UnmarshalCompressed(curve Curve, data []byte) (x, y *big.Int) {
- byteLen := (curve.Params().BitSize + 7) / 8
- if len(data) != 1+byteLen {
- return nil, nil
- }
- if data[0] != 2 && data[0] != 3 { // compressed form
- return nil, nil
- }
- p := curve.Params().P
- x = new(big.Int).SetBytes(data[1:])
- if x.Cmp(p) >= 0 {
- return nil, nil
- }
- // y² = x³ - 3x + b
- y = curve.Params().polynomial(x)
- y = y.ModSqrt(y, p)
- if y == nil {
- return nil, nil
- }
- if byte(y.Bit(0)) != data[0]&1 {
- y.Neg(y).Mod(y, p)
- }
- if !curve.IsOnCurve(x, y) {
- return nil, nil
- }
- return
-}
-
-var initonce sync.Once
-
-func initAll() {
- initP224()
- initP256()
- initP384()
- initP521()
-}
-
-// P224 returns a Curve which implements NIST P-224 (FIPS 186-3, section D.2.2),
-// also known as secp224r1. The CurveParams.Name of this Curve is "P-224".
-//
-// Multiple invocations of this function will return the same value, so it can
-// be used for equality checks and switch statements.
-//
-// The cryptographic operations are implemented using constant-time algorithms.
-func P224() Curve {
- initonce.Do(initAll)
- return p224
-}
-
-// P256 returns a Curve which implements NIST P-256 (FIPS 186-3, section D.2.3),
-// also known as secp256r1 or prime256v1. The CurveParams.Name of this Curve is
-// "P-256".
-//
-// Multiple invocations of this function will return the same value, so it can
-// be used for equality checks and switch statements.
-//
-// ScalarMult and ScalarBaseMult are implemented using constant-time algorithms.
-func P256() Curve {
- initonce.Do(initAll)
- return p256
-}
-
-// P384 returns a Curve which implements NIST P-384 (FIPS 186-3, section D.2.4),
-// also known as secp384r1. The CurveParams.Name of this Curve is "P-384".
-//
-// Multiple invocations of this function will return the same value, so it can
-// be used for equality checks and switch statements.
-//
-// The cryptographic operations are implemented using constant-time algorithms.
-func P384() Curve {
- initonce.Do(initAll)
- return p384
-}
-
-// P521 returns a Curve which implements NIST P-521 (FIPS 186-3, section D.2.5),
-// also known as secp521r1. The CurveParams.Name of this Curve is "P-521".
-//
-// Multiple invocations of this function will return the same value, so it can
-// be used for equality checks and switch statements.
-//
-// The cryptographic operations are implemented using constant-time algorithms.
-func P521() Curve {
- initonce.Do(initAll)
- return p521
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_fiat64.go b/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_fiat64.go
deleted file mode 100644
index 4ece3e9220..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_fiat64.go
+++ /dev/null
@@ -1,1429 +0,0 @@
-// Code generated by Fiat Cryptography. DO NOT EDIT.
-//
-// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p224 64 '2^224 - 2^96 + 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
-//
-// curve description: p224
-//
-// machine_wordsize = 64 (from "64")
-//
-// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
-//
-// m = 0xffffffffffffffffffffffffffffffff000000000000000000000001 (from "2^224 - 2^96 + 1")
-//
-//
-//
-// NOTE: In addition to the bounds specified above each function, all
-//
-// functions synthesized for this Montgomery arithmetic require the
-//
-// input to be strictly less than the prime modulus (m), and also
-//
-// require the input to be in the unique saturated representation.
-//
-// All functions also ensure that these two properties are true of
-//
-// return values.
-//
-//
-//
-// Computed values:
-//
-// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
-//
-// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216)
-//
-// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
-//
-// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
-
-package fiat
-
-import "math/bits"
-
-type p224Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-type p224Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-
-// The type p224MontgomeryDomainFieldElement is a field element in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type p224MontgomeryDomainFieldElement [4]uint64
-
-// The type p224NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type p224NonMontgomeryDomainFieldElement [4]uint64
-
-// p224CmovznzU64 is a single-word conditional move.
-//
-// Postconditions:
-// out1 = (if arg1 = 0 then arg2 else arg3)
-//
-// Input Bounds:
-// arg1: [0x0 ~> 0x1]
-// arg2: [0x0 ~> 0xffffffffffffffff]
-// arg3: [0x0 ~> 0xffffffffffffffff]
-// Output Bounds:
-// out1: [0x0 ~> 0xffffffffffffffff]
-func p224CmovznzU64(out1 *uint64, arg1 p224Uint1, arg2 uint64, arg3 uint64) {
- x1 := (uint64(arg1) * 0xffffffffffffffff)
- x2 := ((x1 & arg3) | ((^x1) & arg2))
- *out1 = x2
-}
-
-// p224Mul multiplies two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p224Mul(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[0]
- var x5 uint64
- var x6 uint64
- x6, x5 = bits.Mul64(x4, arg2[3])
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x4, arg2[2])
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x4, arg2[1])
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x4, arg2[0])
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Add64(x12, x9, uint64(0x0))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
- x19 := (uint64(p224Uint1(x18)) + x6)
- var x20 uint64
- _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
- var x22 uint64
- var x23 uint64
- x23, x22 = bits.Mul64(x20, 0xffffffff)
- var x24 uint64
- var x25 uint64
- x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
- var x26 uint64
- var x27 uint64
- x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x27, x24, uint64(0x0))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
- x32 := (uint64(p224Uint1(x31)) + x23)
- var x34 uint64
- _, x34 = bits.Add64(x11, x20, uint64(0x0))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
- var x37 uint64
- var x38 uint64
- x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
- var x39 uint64
- var x40 uint64
- x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
- var x41 uint64
- var x42 uint64
- x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
- var x43 uint64
- var x44 uint64
- x44, x43 = bits.Mul64(x1, arg2[3])
- var x45 uint64
- var x46 uint64
- x46, x45 = bits.Mul64(x1, arg2[2])
- var x47 uint64
- var x48 uint64
- x48, x47 = bits.Mul64(x1, arg2[1])
- var x49 uint64
- var x50 uint64
- x50, x49 = bits.Mul64(x1, arg2[0])
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(x50, x47, uint64(0x0))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
- var x55 uint64
- var x56 uint64
- x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
- x57 := (uint64(p224Uint1(x56)) + x44)
- var x58 uint64
- var x59 uint64
- x58, x59 = bits.Add64(x35, x49, uint64(0x0))
- var x60 uint64
- var x61 uint64
- x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
- var x62 uint64
- var x63 uint64
- x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
- var x64 uint64
- var x65 uint64
- x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
- var x66 uint64
- var x67 uint64
- x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
- var x68 uint64
- _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
- var x70 uint64
- var x71 uint64
- x71, x70 = bits.Mul64(x68, 0xffffffff)
- var x72 uint64
- var x73 uint64
- x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
- var x74 uint64
- var x75 uint64
- x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
- var x76 uint64
- var x77 uint64
- x76, x77 = bits.Add64(x75, x72, uint64(0x0))
- var x78 uint64
- var x79 uint64
- x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
- x80 := (uint64(p224Uint1(x79)) + x71)
- var x82 uint64
- _, x82 = bits.Add64(x58, x68, uint64(0x0))
- var x83 uint64
- var x84 uint64
- x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
- x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
- var x92 uint64
- var x93 uint64
- x93, x92 = bits.Mul64(x2, arg2[3])
- var x94 uint64
- var x95 uint64
- x95, x94 = bits.Mul64(x2, arg2[2])
- var x96 uint64
- var x97 uint64
- x97, x96 = bits.Mul64(x2, arg2[1])
- var x98 uint64
- var x99 uint64
- x99, x98 = bits.Mul64(x2, arg2[0])
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x99, x96, uint64(0x0))
- var x102 uint64
- var x103 uint64
- x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
- var x104 uint64
- var x105 uint64
- x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
- x106 := (uint64(p224Uint1(x105)) + x93)
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Add64(x83, x98, uint64(0x0))
- var x109 uint64
- var x110 uint64
- x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
- var x113 uint64
- var x114 uint64
- x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
- var x115 uint64
- var x116 uint64
- x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
- var x117 uint64
- _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
- var x119 uint64
- var x120 uint64
- x120, x119 = bits.Mul64(x117, 0xffffffff)
- var x121 uint64
- var x122 uint64
- x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
- var x123 uint64
- var x124 uint64
- x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
- var x125 uint64
- var x126 uint64
- x125, x126 = bits.Add64(x124, x121, uint64(0x0))
- var x127 uint64
- var x128 uint64
- x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
- x129 := (uint64(p224Uint1(x128)) + x120)
- var x131 uint64
- _, x131 = bits.Add64(x107, x117, uint64(0x0))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
- var x134 uint64
- var x135 uint64
- x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
- var x136 uint64
- var x137 uint64
- x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
- var x138 uint64
- var x139 uint64
- x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
- x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
- var x141 uint64
- var x142 uint64
- x142, x141 = bits.Mul64(x3, arg2[3])
- var x143 uint64
- var x144 uint64
- x144, x143 = bits.Mul64(x3, arg2[2])
- var x145 uint64
- var x146 uint64
- x146, x145 = bits.Mul64(x3, arg2[1])
- var x147 uint64
- var x148 uint64
- x148, x147 = bits.Mul64(x3, arg2[0])
- var x149 uint64
- var x150 uint64
- x149, x150 = bits.Add64(x148, x145, uint64(0x0))
- var x151 uint64
- var x152 uint64
- x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
- x155 := (uint64(p224Uint1(x154)) + x142)
- var x156 uint64
- var x157 uint64
- x156, x157 = bits.Add64(x132, x147, uint64(0x0))
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
- var x160 uint64
- var x161 uint64
- x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
- var x162 uint64
- var x163 uint64
- x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
- var x164 uint64
- var x165 uint64
- x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
- var x166 uint64
- _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
- var x168 uint64
- var x169 uint64
- x169, x168 = bits.Mul64(x166, 0xffffffff)
- var x170 uint64
- var x171 uint64
- x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
- var x172 uint64
- var x173 uint64
- x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
- var x174 uint64
- var x175 uint64
- x174, x175 = bits.Add64(x173, x170, uint64(0x0))
- var x176 uint64
- var x177 uint64
- x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
- x178 := (uint64(p224Uint1(x177)) + x169)
- var x180 uint64
- _, x180 = bits.Add64(x156, x166, uint64(0x0))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
- var x183 uint64
- var x184 uint64
- x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
- var x185 uint64
- var x186 uint64
- x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
- var x187 uint64
- var x188 uint64
- x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
- x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
- var x190 uint64
- var x191 uint64
- x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
- var x192 uint64
- var x193 uint64
- x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
- var x194 uint64
- var x195 uint64
- x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
- var x196 uint64
- var x197 uint64
- x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
- var x199 uint64
- _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
- var x200 uint64
- p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
- var x201 uint64
- p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
- var x202 uint64
- p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
- var x203 uint64
- p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
- out1[0] = x200
- out1[1] = x201
- out1[2] = x202
- out1[3] = x203
-}
-
-// p224Square squares a field element in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
-// 0 ≤ eval out1 < m
-//
-func p224Square(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[0]
- var x5 uint64
- var x6 uint64
- x6, x5 = bits.Mul64(x4, arg1[3])
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x4, arg1[2])
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x4, arg1[1])
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x4, arg1[0])
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Add64(x12, x9, uint64(0x0))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
- x19 := (uint64(p224Uint1(x18)) + x6)
- var x20 uint64
- _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
- var x22 uint64
- var x23 uint64
- x23, x22 = bits.Mul64(x20, 0xffffffff)
- var x24 uint64
- var x25 uint64
- x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
- var x26 uint64
- var x27 uint64
- x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x27, x24, uint64(0x0))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
- x32 := (uint64(p224Uint1(x31)) + x23)
- var x34 uint64
- _, x34 = bits.Add64(x11, x20, uint64(0x0))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
- var x37 uint64
- var x38 uint64
- x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
- var x39 uint64
- var x40 uint64
- x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
- var x41 uint64
- var x42 uint64
- x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
- var x43 uint64
- var x44 uint64
- x44, x43 = bits.Mul64(x1, arg1[3])
- var x45 uint64
- var x46 uint64
- x46, x45 = bits.Mul64(x1, arg1[2])
- var x47 uint64
- var x48 uint64
- x48, x47 = bits.Mul64(x1, arg1[1])
- var x49 uint64
- var x50 uint64
- x50, x49 = bits.Mul64(x1, arg1[0])
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(x50, x47, uint64(0x0))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
- var x55 uint64
- var x56 uint64
- x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
- x57 := (uint64(p224Uint1(x56)) + x44)
- var x58 uint64
- var x59 uint64
- x58, x59 = bits.Add64(x35, x49, uint64(0x0))
- var x60 uint64
- var x61 uint64
- x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
- var x62 uint64
- var x63 uint64
- x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
- var x64 uint64
- var x65 uint64
- x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
- var x66 uint64
- var x67 uint64
- x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
- var x68 uint64
- _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
- var x70 uint64
- var x71 uint64
- x71, x70 = bits.Mul64(x68, 0xffffffff)
- var x72 uint64
- var x73 uint64
- x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
- var x74 uint64
- var x75 uint64
- x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
- var x76 uint64
- var x77 uint64
- x76, x77 = bits.Add64(x75, x72, uint64(0x0))
- var x78 uint64
- var x79 uint64
- x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
- x80 := (uint64(p224Uint1(x79)) + x71)
- var x82 uint64
- _, x82 = bits.Add64(x58, x68, uint64(0x0))
- var x83 uint64
- var x84 uint64
- x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
- x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
- var x92 uint64
- var x93 uint64
- x93, x92 = bits.Mul64(x2, arg1[3])
- var x94 uint64
- var x95 uint64
- x95, x94 = bits.Mul64(x2, arg1[2])
- var x96 uint64
- var x97 uint64
- x97, x96 = bits.Mul64(x2, arg1[1])
- var x98 uint64
- var x99 uint64
- x99, x98 = bits.Mul64(x2, arg1[0])
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x99, x96, uint64(0x0))
- var x102 uint64
- var x103 uint64
- x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
- var x104 uint64
- var x105 uint64
- x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
- x106 := (uint64(p224Uint1(x105)) + x93)
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Add64(x83, x98, uint64(0x0))
- var x109 uint64
- var x110 uint64
- x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
- var x113 uint64
- var x114 uint64
- x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
- var x115 uint64
- var x116 uint64
- x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
- var x117 uint64
- _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
- var x119 uint64
- var x120 uint64
- x120, x119 = bits.Mul64(x117, 0xffffffff)
- var x121 uint64
- var x122 uint64
- x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
- var x123 uint64
- var x124 uint64
- x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
- var x125 uint64
- var x126 uint64
- x125, x126 = bits.Add64(x124, x121, uint64(0x0))
- var x127 uint64
- var x128 uint64
- x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
- x129 := (uint64(p224Uint1(x128)) + x120)
- var x131 uint64
- _, x131 = bits.Add64(x107, x117, uint64(0x0))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
- var x134 uint64
- var x135 uint64
- x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
- var x136 uint64
- var x137 uint64
- x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
- var x138 uint64
- var x139 uint64
- x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
- x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
- var x141 uint64
- var x142 uint64
- x142, x141 = bits.Mul64(x3, arg1[3])
- var x143 uint64
- var x144 uint64
- x144, x143 = bits.Mul64(x3, arg1[2])
- var x145 uint64
- var x146 uint64
- x146, x145 = bits.Mul64(x3, arg1[1])
- var x147 uint64
- var x148 uint64
- x148, x147 = bits.Mul64(x3, arg1[0])
- var x149 uint64
- var x150 uint64
- x149, x150 = bits.Add64(x148, x145, uint64(0x0))
- var x151 uint64
- var x152 uint64
- x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
- x155 := (uint64(p224Uint1(x154)) + x142)
- var x156 uint64
- var x157 uint64
- x156, x157 = bits.Add64(x132, x147, uint64(0x0))
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
- var x160 uint64
- var x161 uint64
- x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
- var x162 uint64
- var x163 uint64
- x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
- var x164 uint64
- var x165 uint64
- x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
- var x166 uint64
- _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
- var x168 uint64
- var x169 uint64
- x169, x168 = bits.Mul64(x166, 0xffffffff)
- var x170 uint64
- var x171 uint64
- x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
- var x172 uint64
- var x173 uint64
- x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
- var x174 uint64
- var x175 uint64
- x174, x175 = bits.Add64(x173, x170, uint64(0x0))
- var x176 uint64
- var x177 uint64
- x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
- x178 := (uint64(p224Uint1(x177)) + x169)
- var x180 uint64
- _, x180 = bits.Add64(x156, x166, uint64(0x0))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
- var x183 uint64
- var x184 uint64
- x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
- var x185 uint64
- var x186 uint64
- x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
- var x187 uint64
- var x188 uint64
- x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
- x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
- var x190 uint64
- var x191 uint64
- x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
- var x192 uint64
- var x193 uint64
- x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
- var x194 uint64
- var x195 uint64
- x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
- var x196 uint64
- var x197 uint64
- x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
- var x199 uint64
- _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
- var x200 uint64
- p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
- var x201 uint64
- p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
- var x202 uint64
- p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
- var x203 uint64
- p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
- out1[0] = x200
- out1[1] = x201
- out1[2] = x202
- out1[3] = x203
-}
-
-// p224Add adds two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p224Add(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
- var x9 uint64
- var x10 uint64
- x9, x10 = bits.Sub64(x1, uint64(0x1), uint64(0x0))
- var x11 uint64
- var x12 uint64
- x11, x12 = bits.Sub64(x3, 0xffffffff00000000, uint64(p224Uint1(x10)))
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p224Uint1(x12)))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Sub64(x7, 0xffffffff, uint64(p224Uint1(x14)))
- var x18 uint64
- _, x18 = bits.Sub64(uint64(p224Uint1(x8)), uint64(0x0), uint64(p224Uint1(x16)))
- var x19 uint64
- p224CmovznzU64(&x19, p224Uint1(x18), x9, x1)
- var x20 uint64
- p224CmovznzU64(&x20, p224Uint1(x18), x11, x3)
- var x21 uint64
- p224CmovznzU64(&x21, p224Uint1(x18), x13, x5)
- var x22 uint64
- p224CmovznzU64(&x22, p224Uint1(x18), x15, x7)
- out1[0] = x19
- out1[1] = x20
- out1[2] = x21
- out1[3] = x22
-}
-
-// p224Sub subtracts two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p224Sub(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
- var x9 uint64
- p224CmovznzU64(&x9, p224Uint1(x8), uint64(0x0), 0xffffffffffffffff)
- var x10 uint64
- var x11 uint64
- x10, x11 = bits.Add64(x1, uint64((p224Uint1(x9) & 0x1)), uint64(0x0))
- var x12 uint64
- var x13 uint64
- x12, x13 = bits.Add64(x3, (x9 & 0xffffffff00000000), uint64(p224Uint1(x11)))
- var x14 uint64
- var x15 uint64
- x14, x15 = bits.Add64(x5, x9, uint64(p224Uint1(x13)))
- var x16 uint64
- x16, _ = bits.Add64(x7, (x9 & 0xffffffff), uint64(p224Uint1(x15)))
- out1[0] = x10
- out1[1] = x12
- out1[2] = x14
- out1[3] = x16
-}
-
-// p224SetOne returns the field element one in the Montgomery domain.
-//
-// Postconditions:
-// eval (from_montgomery out1) mod m = 1 mod m
-// 0 ≤ eval out1 < m
-//
-func p224SetOne(out1 *p224MontgomeryDomainFieldElement) {
- out1[0] = 0xffffffff00000000
- out1[1] = 0xffffffffffffffff
- out1[2] = uint64(0x0)
- out1[3] = uint64(0x0)
-}
-
-// p224FromMontgomery translates a field element out of the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
-// 0 ≤ eval out1 < m
-//
-func p224FromMontgomery(out1 *p224NonMontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
- x1 := arg1[0]
- var x2 uint64
- _, x2 = bits.Mul64(x1, 0xffffffffffffffff)
- var x4 uint64
- var x5 uint64
- x5, x4 = bits.Mul64(x2, 0xffffffff)
- var x6 uint64
- var x7 uint64
- x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
- var x8 uint64
- var x9 uint64
- x9, x8 = bits.Mul64(x2, 0xffffffff00000000)
- var x10 uint64
- var x11 uint64
- x10, x11 = bits.Add64(x9, x6, uint64(0x0))
- var x12 uint64
- var x13 uint64
- x12, x13 = bits.Add64(x7, x4, uint64(p224Uint1(x11)))
- var x15 uint64
- _, x15 = bits.Add64(x1, x2, uint64(0x0))
- var x16 uint64
- var x17 uint64
- x16, x17 = bits.Add64(uint64(0x0), x8, uint64(p224Uint1(x15)))
- var x18 uint64
- var x19 uint64
- x18, x19 = bits.Add64(uint64(0x0), x10, uint64(p224Uint1(x17)))
- var x20 uint64
- var x21 uint64
- x20, x21 = bits.Add64(uint64(0x0), x12, uint64(p224Uint1(x19)))
- var x22 uint64
- var x23 uint64
- x22, x23 = bits.Add64(x16, arg1[1], uint64(0x0))
- var x24 uint64
- var x25 uint64
- x24, x25 = bits.Add64(x18, uint64(0x0), uint64(p224Uint1(x23)))
- var x26 uint64
- var x27 uint64
- x26, x27 = bits.Add64(x20, uint64(0x0), uint64(p224Uint1(x25)))
- var x28 uint64
- _, x28 = bits.Mul64(x22, 0xffffffffffffffff)
- var x30 uint64
- var x31 uint64
- x31, x30 = bits.Mul64(x28, 0xffffffff)
- var x32 uint64
- var x33 uint64
- x33, x32 = bits.Mul64(x28, 0xffffffffffffffff)
- var x34 uint64
- var x35 uint64
- x35, x34 = bits.Mul64(x28, 0xffffffff00000000)
- var x36 uint64
- var x37 uint64
- x36, x37 = bits.Add64(x35, x32, uint64(0x0))
- var x38 uint64
- var x39 uint64
- x38, x39 = bits.Add64(x33, x30, uint64(p224Uint1(x37)))
- var x41 uint64
- _, x41 = bits.Add64(x22, x28, uint64(0x0))
- var x42 uint64
- var x43 uint64
- x42, x43 = bits.Add64(x24, x34, uint64(p224Uint1(x41)))
- var x44 uint64
- var x45 uint64
- x44, x45 = bits.Add64(x26, x36, uint64(p224Uint1(x43)))
- var x46 uint64
- var x47 uint64
- x46, x47 = bits.Add64((uint64(p224Uint1(x27)) + (uint64(p224Uint1(x21)) + (uint64(p224Uint1(x13)) + x5))), x38, uint64(p224Uint1(x45)))
- var x48 uint64
- var x49 uint64
- x48, x49 = bits.Add64(x42, arg1[2], uint64(0x0))
- var x50 uint64
- var x51 uint64
- x50, x51 = bits.Add64(x44, uint64(0x0), uint64(p224Uint1(x49)))
- var x52 uint64
- var x53 uint64
- x52, x53 = bits.Add64(x46, uint64(0x0), uint64(p224Uint1(x51)))
- var x54 uint64
- _, x54 = bits.Mul64(x48, 0xffffffffffffffff)
- var x56 uint64
- var x57 uint64
- x57, x56 = bits.Mul64(x54, 0xffffffff)
- var x58 uint64
- var x59 uint64
- x59, x58 = bits.Mul64(x54, 0xffffffffffffffff)
- var x60 uint64
- var x61 uint64
- x61, x60 = bits.Mul64(x54, 0xffffffff00000000)
- var x62 uint64
- var x63 uint64
- x62, x63 = bits.Add64(x61, x58, uint64(0x0))
- var x64 uint64
- var x65 uint64
- x64, x65 = bits.Add64(x59, x56, uint64(p224Uint1(x63)))
- var x67 uint64
- _, x67 = bits.Add64(x48, x54, uint64(0x0))
- var x68 uint64
- var x69 uint64
- x68, x69 = bits.Add64(x50, x60, uint64(p224Uint1(x67)))
- var x70 uint64
- var x71 uint64
- x70, x71 = bits.Add64(x52, x62, uint64(p224Uint1(x69)))
- var x72 uint64
- var x73 uint64
- x72, x73 = bits.Add64((uint64(p224Uint1(x53)) + (uint64(p224Uint1(x47)) + (uint64(p224Uint1(x39)) + x31))), x64, uint64(p224Uint1(x71)))
- var x74 uint64
- var x75 uint64
- x74, x75 = bits.Add64(x68, arg1[3], uint64(0x0))
- var x76 uint64
- var x77 uint64
- x76, x77 = bits.Add64(x70, uint64(0x0), uint64(p224Uint1(x75)))
- var x78 uint64
- var x79 uint64
- x78, x79 = bits.Add64(x72, uint64(0x0), uint64(p224Uint1(x77)))
- var x80 uint64
- _, x80 = bits.Mul64(x74, 0xffffffffffffffff)
- var x82 uint64
- var x83 uint64
- x83, x82 = bits.Mul64(x80, 0xffffffff)
- var x84 uint64
- var x85 uint64
- x85, x84 = bits.Mul64(x80, 0xffffffffffffffff)
- var x86 uint64
- var x87 uint64
- x87, x86 = bits.Mul64(x80, 0xffffffff00000000)
- var x88 uint64
- var x89 uint64
- x88, x89 = bits.Add64(x87, x84, uint64(0x0))
- var x90 uint64
- var x91 uint64
- x90, x91 = bits.Add64(x85, x82, uint64(p224Uint1(x89)))
- var x93 uint64
- _, x93 = bits.Add64(x74, x80, uint64(0x0))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x76, x86, uint64(p224Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x78, x88, uint64(p224Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64((uint64(p224Uint1(x79)) + (uint64(p224Uint1(x73)) + (uint64(p224Uint1(x65)) + x57))), x90, uint64(p224Uint1(x97)))
- x100 := (uint64(p224Uint1(x99)) + (uint64(p224Uint1(x91)) + x83))
- var x101 uint64
- var x102 uint64
- x101, x102 = bits.Sub64(x94, uint64(0x1), uint64(0x0))
- var x103 uint64
- var x104 uint64
- x103, x104 = bits.Sub64(x96, 0xffffffff00000000, uint64(p224Uint1(x102)))
- var x105 uint64
- var x106 uint64
- x105, x106 = bits.Sub64(x98, 0xffffffffffffffff, uint64(p224Uint1(x104)))
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Sub64(x100, 0xffffffff, uint64(p224Uint1(x106)))
- var x110 uint64
- _, x110 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x108)))
- var x111 uint64
- p224CmovznzU64(&x111, p224Uint1(x110), x101, x94)
- var x112 uint64
- p224CmovznzU64(&x112, p224Uint1(x110), x103, x96)
- var x113 uint64
- p224CmovznzU64(&x113, p224Uint1(x110), x105, x98)
- var x114 uint64
- p224CmovznzU64(&x114, p224Uint1(x110), x107, x100)
- out1[0] = x111
- out1[1] = x112
- out1[2] = x113
- out1[3] = x114
-}
-
-// p224ToMontgomery translates a field element into the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-func p224ToMontgomery(out1 *p224MontgomeryDomainFieldElement, arg1 *p224NonMontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[0]
- var x5 uint64
- var x6 uint64
- x6, x5 = bits.Mul64(x4, 0xffffffff)
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x4, 0xfffffffe00000000)
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x4, 0xffffffff00000000)
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x4, 0xffffffff00000001)
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Add64(x12, x9, uint64(0x0))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
- var x19 uint64
- _, x19 = bits.Mul64(x11, 0xffffffffffffffff)
- var x21 uint64
- var x22 uint64
- x22, x21 = bits.Mul64(x19, 0xffffffff)
- var x23 uint64
- var x24 uint64
- x24, x23 = bits.Mul64(x19, 0xffffffffffffffff)
- var x25 uint64
- var x26 uint64
- x26, x25 = bits.Mul64(x19, 0xffffffff00000000)
- var x27 uint64
- var x28 uint64
- x27, x28 = bits.Add64(x26, x23, uint64(0x0))
- var x29 uint64
- var x30 uint64
- x29, x30 = bits.Add64(x24, x21, uint64(p224Uint1(x28)))
- var x32 uint64
- _, x32 = bits.Add64(x11, x19, uint64(0x0))
- var x33 uint64
- var x34 uint64
- x33, x34 = bits.Add64(x13, x25, uint64(p224Uint1(x32)))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Add64(x15, x27, uint64(p224Uint1(x34)))
- var x37 uint64
- var x38 uint64
- x37, x38 = bits.Add64(x17, x29, uint64(p224Uint1(x36)))
- var x39 uint64
- var x40 uint64
- x40, x39 = bits.Mul64(x1, 0xffffffff)
- var x41 uint64
- var x42 uint64
- x42, x41 = bits.Mul64(x1, 0xfffffffe00000000)
- var x43 uint64
- var x44 uint64
- x44, x43 = bits.Mul64(x1, 0xffffffff00000000)
- var x45 uint64
- var x46 uint64
- x46, x45 = bits.Mul64(x1, 0xffffffff00000001)
- var x47 uint64
- var x48 uint64
- x47, x48 = bits.Add64(x46, x43, uint64(0x0))
- var x49 uint64
- var x50 uint64
- x49, x50 = bits.Add64(x44, x41, uint64(p224Uint1(x48)))
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(x42, x39, uint64(p224Uint1(x50)))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(x33, x45, uint64(0x0))
- var x55 uint64
- var x56 uint64
- x55, x56 = bits.Add64(x35, x47, uint64(p224Uint1(x54)))
- var x57 uint64
- var x58 uint64
- x57, x58 = bits.Add64(x37, x49, uint64(p224Uint1(x56)))
- var x59 uint64
- var x60 uint64
- x59, x60 = bits.Add64(((uint64(p224Uint1(x38)) + (uint64(p224Uint1(x18)) + x6)) + (uint64(p224Uint1(x30)) + x22)), x51, uint64(p224Uint1(x58)))
- var x61 uint64
- _, x61 = bits.Mul64(x53, 0xffffffffffffffff)
- var x63 uint64
- var x64 uint64
- x64, x63 = bits.Mul64(x61, 0xffffffff)
- var x65 uint64
- var x66 uint64
- x66, x65 = bits.Mul64(x61, 0xffffffffffffffff)
- var x67 uint64
- var x68 uint64
- x68, x67 = bits.Mul64(x61, 0xffffffff00000000)
- var x69 uint64
- var x70 uint64
- x69, x70 = bits.Add64(x68, x65, uint64(0x0))
- var x71 uint64
- var x72 uint64
- x71, x72 = bits.Add64(x66, x63, uint64(p224Uint1(x70)))
- var x74 uint64
- _, x74 = bits.Add64(x53, x61, uint64(0x0))
- var x75 uint64
- var x76 uint64
- x75, x76 = bits.Add64(x55, x67, uint64(p224Uint1(x74)))
- var x77 uint64
- var x78 uint64
- x77, x78 = bits.Add64(x57, x69, uint64(p224Uint1(x76)))
- var x79 uint64
- var x80 uint64
- x79, x80 = bits.Add64(x59, x71, uint64(p224Uint1(x78)))
- var x81 uint64
- var x82 uint64
- x82, x81 = bits.Mul64(x2, 0xffffffff)
- var x83 uint64
- var x84 uint64
- x84, x83 = bits.Mul64(x2, 0xfffffffe00000000)
- var x85 uint64
- var x86 uint64
- x86, x85 = bits.Mul64(x2, 0xffffffff00000000)
- var x87 uint64
- var x88 uint64
- x88, x87 = bits.Mul64(x2, 0xffffffff00000001)
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Add64(x88, x85, uint64(0x0))
- var x91 uint64
- var x92 uint64
- x91, x92 = bits.Add64(x86, x83, uint64(p224Uint1(x90)))
- var x93 uint64
- var x94 uint64
- x93, x94 = bits.Add64(x84, x81, uint64(p224Uint1(x92)))
- var x95 uint64
- var x96 uint64
- x95, x96 = bits.Add64(x75, x87, uint64(0x0))
- var x97 uint64
- var x98 uint64
- x97, x98 = bits.Add64(x77, x89, uint64(p224Uint1(x96)))
- var x99 uint64
- var x100 uint64
- x99, x100 = bits.Add64(x79, x91, uint64(p224Uint1(x98)))
- var x101 uint64
- var x102 uint64
- x101, x102 = bits.Add64(((uint64(p224Uint1(x80)) + (uint64(p224Uint1(x60)) + (uint64(p224Uint1(x52)) + x40))) + (uint64(p224Uint1(x72)) + x64)), x93, uint64(p224Uint1(x100)))
- var x103 uint64
- _, x103 = bits.Mul64(x95, 0xffffffffffffffff)
- var x105 uint64
- var x106 uint64
- x106, x105 = bits.Mul64(x103, 0xffffffff)
- var x107 uint64
- var x108 uint64
- x108, x107 = bits.Mul64(x103, 0xffffffffffffffff)
- var x109 uint64
- var x110 uint64
- x110, x109 = bits.Mul64(x103, 0xffffffff00000000)
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x110, x107, uint64(0x0))
- var x113 uint64
- var x114 uint64
- x113, x114 = bits.Add64(x108, x105, uint64(p224Uint1(x112)))
- var x116 uint64
- _, x116 = bits.Add64(x95, x103, uint64(0x0))
- var x117 uint64
- var x118 uint64
- x117, x118 = bits.Add64(x97, x109, uint64(p224Uint1(x116)))
- var x119 uint64
- var x120 uint64
- x119, x120 = bits.Add64(x99, x111, uint64(p224Uint1(x118)))
- var x121 uint64
- var x122 uint64
- x121, x122 = bits.Add64(x101, x113, uint64(p224Uint1(x120)))
- var x123 uint64
- var x124 uint64
- x124, x123 = bits.Mul64(x3, 0xffffffff)
- var x125 uint64
- var x126 uint64
- x126, x125 = bits.Mul64(x3, 0xfffffffe00000000)
- var x127 uint64
- var x128 uint64
- x128, x127 = bits.Mul64(x3, 0xffffffff00000000)
- var x129 uint64
- var x130 uint64
- x130, x129 = bits.Mul64(x3, 0xffffffff00000001)
- var x131 uint64
- var x132 uint64
- x131, x132 = bits.Add64(x130, x127, uint64(0x0))
- var x133 uint64
- var x134 uint64
- x133, x134 = bits.Add64(x128, x125, uint64(p224Uint1(x132)))
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x126, x123, uint64(p224Uint1(x134)))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x117, x129, uint64(0x0))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x119, x131, uint64(p224Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x121, x133, uint64(p224Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(((uint64(p224Uint1(x122)) + (uint64(p224Uint1(x102)) + (uint64(p224Uint1(x94)) + x82))) + (uint64(p224Uint1(x114)) + x106)), x135, uint64(p224Uint1(x142)))
- var x145 uint64
- _, x145 = bits.Mul64(x137, 0xffffffffffffffff)
- var x147 uint64
- var x148 uint64
- x148, x147 = bits.Mul64(x145, 0xffffffff)
- var x149 uint64
- var x150 uint64
- x150, x149 = bits.Mul64(x145, 0xffffffffffffffff)
- var x151 uint64
- var x152 uint64
- x152, x151 = bits.Mul64(x145, 0xffffffff00000000)
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(x152, x149, uint64(0x0))
- var x155 uint64
- var x156 uint64
- x155, x156 = bits.Add64(x150, x147, uint64(p224Uint1(x154)))
- var x158 uint64
- _, x158 = bits.Add64(x137, x145, uint64(0x0))
- var x159 uint64
- var x160 uint64
- x159, x160 = bits.Add64(x139, x151, uint64(p224Uint1(x158)))
- var x161 uint64
- var x162 uint64
- x161, x162 = bits.Add64(x141, x153, uint64(p224Uint1(x160)))
- var x163 uint64
- var x164 uint64
- x163, x164 = bits.Add64(x143, x155, uint64(p224Uint1(x162)))
- x165 := ((uint64(p224Uint1(x164)) + (uint64(p224Uint1(x144)) + (uint64(p224Uint1(x136)) + x124))) + (uint64(p224Uint1(x156)) + x148))
- var x166 uint64
- var x167 uint64
- x166, x167 = bits.Sub64(x159, uint64(0x1), uint64(0x0))
- var x168 uint64
- var x169 uint64
- x168, x169 = bits.Sub64(x161, 0xffffffff00000000, uint64(p224Uint1(x167)))
- var x170 uint64
- var x171 uint64
- x170, x171 = bits.Sub64(x163, 0xffffffffffffffff, uint64(p224Uint1(x169)))
- var x172 uint64
- var x173 uint64
- x172, x173 = bits.Sub64(x165, 0xffffffff, uint64(p224Uint1(x171)))
- var x175 uint64
- _, x175 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x173)))
- var x176 uint64
- p224CmovznzU64(&x176, p224Uint1(x175), x166, x159)
- var x177 uint64
- p224CmovznzU64(&x177, p224Uint1(x175), x168, x161)
- var x178 uint64
- p224CmovznzU64(&x178, p224Uint1(x175), x170, x163)
- var x179 uint64
- p224CmovznzU64(&x179, p224Uint1(x175), x172, x165)
- out1[0] = x176
- out1[1] = x177
- out1[2] = x178
- out1[3] = x179
-}
-
-// p224Selectznz is a multi-limb conditional select.
-//
-// Postconditions:
-// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
-//
-// Input Bounds:
-// arg1: [0x0 ~> 0x1]
-// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-func p224Selectznz(out1 *[4]uint64, arg1 p224Uint1, arg2 *[4]uint64, arg3 *[4]uint64) {
- var x1 uint64
- p224CmovznzU64(&x1, arg1, arg2[0], arg3[0])
- var x2 uint64
- p224CmovznzU64(&x2, arg1, arg2[1], arg3[1])
- var x3 uint64
- p224CmovznzU64(&x3, arg1, arg2[2], arg3[2])
- var x4 uint64
- p224CmovznzU64(&x4, arg1, arg2[3], arg3[3])
- out1[0] = x1
- out1[1] = x2
- out1[2] = x3
- out1[3] = x4
-}
-
-// p224ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..27]
-//
-// Input Bounds:
-// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
-func p224ToBytes(out1 *[28]uint8, arg1 *[4]uint64) {
- x1 := arg1[3]
- x2 := arg1[2]
- x3 := arg1[1]
- x4 := arg1[0]
- x5 := (uint8(x4) & 0xff)
- x6 := (x4 >> 8)
- x7 := (uint8(x6) & 0xff)
- x8 := (x6 >> 8)
- x9 := (uint8(x8) & 0xff)
- x10 := (x8 >> 8)
- x11 := (uint8(x10) & 0xff)
- x12 := (x10 >> 8)
- x13 := (uint8(x12) & 0xff)
- x14 := (x12 >> 8)
- x15 := (uint8(x14) & 0xff)
- x16 := (x14 >> 8)
- x17 := (uint8(x16) & 0xff)
- x18 := uint8((x16 >> 8))
- x19 := (uint8(x3) & 0xff)
- x20 := (x3 >> 8)
- x21 := (uint8(x20) & 0xff)
- x22 := (x20 >> 8)
- x23 := (uint8(x22) & 0xff)
- x24 := (x22 >> 8)
- x25 := (uint8(x24) & 0xff)
- x26 := (x24 >> 8)
- x27 := (uint8(x26) & 0xff)
- x28 := (x26 >> 8)
- x29 := (uint8(x28) & 0xff)
- x30 := (x28 >> 8)
- x31 := (uint8(x30) & 0xff)
- x32 := uint8((x30 >> 8))
- x33 := (uint8(x2) & 0xff)
- x34 := (x2 >> 8)
- x35 := (uint8(x34) & 0xff)
- x36 := (x34 >> 8)
- x37 := (uint8(x36) & 0xff)
- x38 := (x36 >> 8)
- x39 := (uint8(x38) & 0xff)
- x40 := (x38 >> 8)
- x41 := (uint8(x40) & 0xff)
- x42 := (x40 >> 8)
- x43 := (uint8(x42) & 0xff)
- x44 := (x42 >> 8)
- x45 := (uint8(x44) & 0xff)
- x46 := uint8((x44 >> 8))
- x47 := (uint8(x1) & 0xff)
- x48 := (x1 >> 8)
- x49 := (uint8(x48) & 0xff)
- x50 := (x48 >> 8)
- x51 := (uint8(x50) & 0xff)
- x52 := uint8((x50 >> 8))
- out1[0] = x5
- out1[1] = x7
- out1[2] = x9
- out1[3] = x11
- out1[4] = x13
- out1[5] = x15
- out1[6] = x17
- out1[7] = x18
- out1[8] = x19
- out1[9] = x21
- out1[10] = x23
- out1[11] = x25
- out1[12] = x27
- out1[13] = x29
- out1[14] = x31
- out1[15] = x32
- out1[16] = x33
- out1[17] = x35
- out1[18] = x37
- out1[19] = x39
- out1[20] = x41
- out1[21] = x43
- out1[22] = x45
- out1[23] = x46
- out1[24] = x47
- out1[25] = x49
- out1[26] = x51
- out1[27] = x52
-}
-
-// p224FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
-//
-// Preconditions:
-// 0 ≤ bytes_eval arg1 < m
-// Postconditions:
-// eval out1 mod m = bytes_eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-// Input Bounds:
-// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
-func p224FromBytes(out1 *[4]uint64, arg1 *[28]uint8) {
- x1 := (uint64(arg1[27]) << 24)
- x2 := (uint64(arg1[26]) << 16)
- x3 := (uint64(arg1[25]) << 8)
- x4 := arg1[24]
- x5 := (uint64(arg1[23]) << 56)
- x6 := (uint64(arg1[22]) << 48)
- x7 := (uint64(arg1[21]) << 40)
- x8 := (uint64(arg1[20]) << 32)
- x9 := (uint64(arg1[19]) << 24)
- x10 := (uint64(arg1[18]) << 16)
- x11 := (uint64(arg1[17]) << 8)
- x12 := arg1[16]
- x13 := (uint64(arg1[15]) << 56)
- x14 := (uint64(arg1[14]) << 48)
- x15 := (uint64(arg1[13]) << 40)
- x16 := (uint64(arg1[12]) << 32)
- x17 := (uint64(arg1[11]) << 24)
- x18 := (uint64(arg1[10]) << 16)
- x19 := (uint64(arg1[9]) << 8)
- x20 := arg1[8]
- x21 := (uint64(arg1[7]) << 56)
- x22 := (uint64(arg1[6]) << 48)
- x23 := (uint64(arg1[5]) << 40)
- x24 := (uint64(arg1[4]) << 32)
- x25 := (uint64(arg1[3]) << 24)
- x26 := (uint64(arg1[2]) << 16)
- x27 := (uint64(arg1[1]) << 8)
- x28 := arg1[0]
- x29 := (x27 + uint64(x28))
- x30 := (x26 + x29)
- x31 := (x25 + x30)
- x32 := (x24 + x31)
- x33 := (x23 + x32)
- x34 := (x22 + x33)
- x35 := (x21 + x34)
- x36 := (x19 + uint64(x20))
- x37 := (x18 + x36)
- x38 := (x17 + x37)
- x39 := (x16 + x38)
- x40 := (x15 + x39)
- x41 := (x14 + x40)
- x42 := (x13 + x41)
- x43 := (x11 + uint64(x12))
- x44 := (x10 + x43)
- x45 := (x9 + x44)
- x46 := (x8 + x45)
- x47 := (x7 + x46)
- x48 := (x6 + x47)
- x49 := (x5 + x48)
- x50 := (x3 + uint64(x4))
- x51 := (x2 + x50)
- x52 := (x1 + x51)
- out1[0] = x35
- out1[1] = x42
- out1[2] = x49
- out1[3] = x52
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_fiat64.go b/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_fiat64.go
deleted file mode 100644
index 493bed47e1..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_fiat64.go
+++ /dev/null
@@ -1,3004 +0,0 @@
-// Code generated by Fiat Cryptography. DO NOT EDIT.
-//
-// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p384 64 '2^384 - 2^128 - 2^96 + 2^32 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
-//
-// curve description: p384
-//
-// machine_wordsize = 64 (from "64")
-//
-// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
-//
-// m = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff (from "2^384 - 2^128 - 2^96 + 2^32 - 1")
-//
-//
-//
-// NOTE: In addition to the bounds specified above each function, all
-//
-// functions synthesized for this Montgomery arithmetic require the
-//
-// input to be strictly less than the prime modulus (m), and also
-//
-// require the input to be in the unique saturated representation.
-//
-// All functions also ensure that these two properties are true of
-//
-// return values.
-//
-//
-//
-// Computed values:
-//
-// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140)
-//
-// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178)
-//
-// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) in
-//
-// if x1 & (2^384-1) < 2^383 then x1 & (2^384-1) else (x1 & (2^384-1)) - 2^384
-
-package fiat
-
-import "math/bits"
-
-type p384Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-type p384Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-
-// The type p384MontgomeryDomainFieldElement is a field element in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type p384MontgomeryDomainFieldElement [6]uint64
-
-// The type p384NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type p384NonMontgomeryDomainFieldElement [6]uint64
-
-// p384CmovznzU64 is a single-word conditional move.
-//
-// Postconditions:
-// out1 = (if arg1 = 0 then arg2 else arg3)
-//
-// Input Bounds:
-// arg1: [0x0 ~> 0x1]
-// arg2: [0x0 ~> 0xffffffffffffffff]
-// arg3: [0x0 ~> 0xffffffffffffffff]
-// Output Bounds:
-// out1: [0x0 ~> 0xffffffffffffffff]
-func p384CmovznzU64(out1 *uint64, arg1 p384Uint1, arg2 uint64, arg3 uint64) {
- x1 := (uint64(arg1) * 0xffffffffffffffff)
- x2 := ((x1 & arg3) | ((^x1) & arg2))
- *out1 = x2
-}
-
-// p384Mul multiplies two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p384Mul(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[4]
- x5 := arg1[5]
- x6 := arg1[0]
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x6, arg2[5])
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x6, arg2[4])
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x6, arg2[3])
- var x13 uint64
- var x14 uint64
- x14, x13 = bits.Mul64(x6, arg2[2])
- var x15 uint64
- var x16 uint64
- x16, x15 = bits.Mul64(x6, arg2[1])
- var x17 uint64
- var x18 uint64
- x18, x17 = bits.Mul64(x6, arg2[0])
- var x19 uint64
- var x20 uint64
- x19, x20 = bits.Add64(x18, x15, uint64(0x0))
- var x21 uint64
- var x22 uint64
- x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
- var x23 uint64
- var x24 uint64
- x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
- var x25 uint64
- var x26 uint64
- x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
- var x27 uint64
- var x28 uint64
- x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
- x29 := (uint64(p384Uint1(x28)) + x8)
- var x30 uint64
- _, x30 = bits.Mul64(x17, 0x100000001)
- var x32 uint64
- var x33 uint64
- x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
- var x34 uint64
- var x35 uint64
- x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
- var x36 uint64
- var x37 uint64
- x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
- var x38 uint64
- var x39 uint64
- x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
- var x40 uint64
- var x41 uint64
- x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
- var x42 uint64
- var x43 uint64
- x43, x42 = bits.Mul64(x30, 0xffffffff)
- var x44 uint64
- var x45 uint64
- x44, x45 = bits.Add64(x43, x40, uint64(0x0))
- var x46 uint64
- var x47 uint64
- x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
- var x48 uint64
- var x49 uint64
- x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
- var x50 uint64
- var x51 uint64
- x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
- var x52 uint64
- var x53 uint64
- x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
- x54 := (uint64(p384Uint1(x53)) + x33)
- var x56 uint64
- _, x56 = bits.Add64(x17, x42, uint64(0x0))
- var x57 uint64
- var x58 uint64
- x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
- var x59 uint64
- var x60 uint64
- x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
- var x61 uint64
- var x62 uint64
- x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
- var x63 uint64
- var x64 uint64
- x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
- var x65 uint64
- var x66 uint64
- x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
- var x67 uint64
- var x68 uint64
- x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
- var x69 uint64
- var x70 uint64
- x70, x69 = bits.Mul64(x1, arg2[5])
- var x71 uint64
- var x72 uint64
- x72, x71 = bits.Mul64(x1, arg2[4])
- var x73 uint64
- var x74 uint64
- x74, x73 = bits.Mul64(x1, arg2[3])
- var x75 uint64
- var x76 uint64
- x76, x75 = bits.Mul64(x1, arg2[2])
- var x77 uint64
- var x78 uint64
- x78, x77 = bits.Mul64(x1, arg2[1])
- var x79 uint64
- var x80 uint64
- x80, x79 = bits.Mul64(x1, arg2[0])
- var x81 uint64
- var x82 uint64
- x81, x82 = bits.Add64(x80, x77, uint64(0x0))
- var x83 uint64
- var x84 uint64
- x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
- x91 := (uint64(p384Uint1(x90)) + x70)
- var x92 uint64
- var x93 uint64
- x92, x93 = bits.Add64(x57, x79, uint64(0x0))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
- var x102 uint64
- var x103 uint64
- x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
- var x104 uint64
- var x105 uint64
- x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
- var x106 uint64
- _, x106 = bits.Mul64(x92, 0x100000001)
- var x108 uint64
- var x109 uint64
- x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
- var x110 uint64
- var x111 uint64
- x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
- var x112 uint64
- var x113 uint64
- x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
- var x114 uint64
- var x115 uint64
- x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
- var x116 uint64
- var x117 uint64
- x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
- var x118 uint64
- var x119 uint64
- x119, x118 = bits.Mul64(x106, 0xffffffff)
- var x120 uint64
- var x121 uint64
- x120, x121 = bits.Add64(x119, x116, uint64(0x0))
- var x122 uint64
- var x123 uint64
- x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
- var x124 uint64
- var x125 uint64
- x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
- var x126 uint64
- var x127 uint64
- x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
- x130 := (uint64(p384Uint1(x129)) + x109)
- var x132 uint64
- _, x132 = bits.Add64(x92, x118, uint64(0x0))
- var x133 uint64
- var x134 uint64
- x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
- x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
- var x146 uint64
- var x147 uint64
- x147, x146 = bits.Mul64(x2, arg2[5])
- var x148 uint64
- var x149 uint64
- x149, x148 = bits.Mul64(x2, arg2[4])
- var x150 uint64
- var x151 uint64
- x151, x150 = bits.Mul64(x2, arg2[3])
- var x152 uint64
- var x153 uint64
- x153, x152 = bits.Mul64(x2, arg2[2])
- var x154 uint64
- var x155 uint64
- x155, x154 = bits.Mul64(x2, arg2[1])
- var x156 uint64
- var x157 uint64
- x157, x156 = bits.Mul64(x2, arg2[0])
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Add64(x157, x154, uint64(0x0))
- var x160 uint64
- var x161 uint64
- x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
- var x162 uint64
- var x163 uint64
- x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
- var x164 uint64
- var x165 uint64
- x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
- var x166 uint64
- var x167 uint64
- x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
- x168 := (uint64(p384Uint1(x167)) + x147)
- var x169 uint64
- var x170 uint64
- x169, x170 = bits.Add64(x133, x156, uint64(0x0))
- var x171 uint64
- var x172 uint64
- x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
- var x177 uint64
- var x178 uint64
- x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
- var x179 uint64
- var x180 uint64
- x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
- var x183 uint64
- _, x183 = bits.Mul64(x169, 0x100000001)
- var x185 uint64
- var x186 uint64
- x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
- var x187 uint64
- var x188 uint64
- x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
- var x189 uint64
- var x190 uint64
- x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
- var x191 uint64
- var x192 uint64
- x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
- var x193 uint64
- var x194 uint64
- x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
- var x195 uint64
- var x196 uint64
- x196, x195 = bits.Mul64(x183, 0xffffffff)
- var x197 uint64
- var x198 uint64
- x197, x198 = bits.Add64(x196, x193, uint64(0x0))
- var x199 uint64
- var x200 uint64
- x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
- var x201 uint64
- var x202 uint64
- x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
- var x203 uint64
- var x204 uint64
- x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
- var x205 uint64
- var x206 uint64
- x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
- x207 := (uint64(p384Uint1(x206)) + x186)
- var x209 uint64
- _, x209 = bits.Add64(x169, x195, uint64(0x0))
- var x210 uint64
- var x211 uint64
- x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
- var x212 uint64
- var x213 uint64
- x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
- var x214 uint64
- var x215 uint64
- x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
- var x216 uint64
- var x217 uint64
- x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
- var x218 uint64
- var x219 uint64
- x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
- var x220 uint64
- var x221 uint64
- x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
- x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
- var x223 uint64
- var x224 uint64
- x224, x223 = bits.Mul64(x3, arg2[5])
- var x225 uint64
- var x226 uint64
- x226, x225 = bits.Mul64(x3, arg2[4])
- var x227 uint64
- var x228 uint64
- x228, x227 = bits.Mul64(x3, arg2[3])
- var x229 uint64
- var x230 uint64
- x230, x229 = bits.Mul64(x3, arg2[2])
- var x231 uint64
- var x232 uint64
- x232, x231 = bits.Mul64(x3, arg2[1])
- var x233 uint64
- var x234 uint64
- x234, x233 = bits.Mul64(x3, arg2[0])
- var x235 uint64
- var x236 uint64
- x235, x236 = bits.Add64(x234, x231, uint64(0x0))
- var x237 uint64
- var x238 uint64
- x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
- var x239 uint64
- var x240 uint64
- x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
- var x241 uint64
- var x242 uint64
- x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
- var x243 uint64
- var x244 uint64
- x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
- x245 := (uint64(p384Uint1(x244)) + x224)
- var x246 uint64
- var x247 uint64
- x246, x247 = bits.Add64(x210, x233, uint64(0x0))
- var x248 uint64
- var x249 uint64
- x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
- var x250 uint64
- var x251 uint64
- x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
- var x252 uint64
- var x253 uint64
- x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
- var x254 uint64
- var x255 uint64
- x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
- var x256 uint64
- var x257 uint64
- x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
- var x258 uint64
- var x259 uint64
- x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
- var x260 uint64
- _, x260 = bits.Mul64(x246, 0x100000001)
- var x262 uint64
- var x263 uint64
- x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
- var x264 uint64
- var x265 uint64
- x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
- var x266 uint64
- var x267 uint64
- x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
- var x268 uint64
- var x269 uint64
- x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
- var x270 uint64
- var x271 uint64
- x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
- var x272 uint64
- var x273 uint64
- x273, x272 = bits.Mul64(x260, 0xffffffff)
- var x274 uint64
- var x275 uint64
- x274, x275 = bits.Add64(x273, x270, uint64(0x0))
- var x276 uint64
- var x277 uint64
- x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
- var x278 uint64
- var x279 uint64
- x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
- var x280 uint64
- var x281 uint64
- x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
- var x282 uint64
- var x283 uint64
- x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
- x284 := (uint64(p384Uint1(x283)) + x263)
- var x286 uint64
- _, x286 = bits.Add64(x246, x272, uint64(0x0))
- var x287 uint64
- var x288 uint64
- x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
- var x289 uint64
- var x290 uint64
- x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
- var x291 uint64
- var x292 uint64
- x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
- var x293 uint64
- var x294 uint64
- x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
- var x295 uint64
- var x296 uint64
- x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
- var x297 uint64
- var x298 uint64
- x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
- x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
- var x300 uint64
- var x301 uint64
- x301, x300 = bits.Mul64(x4, arg2[5])
- var x302 uint64
- var x303 uint64
- x303, x302 = bits.Mul64(x4, arg2[4])
- var x304 uint64
- var x305 uint64
- x305, x304 = bits.Mul64(x4, arg2[3])
- var x306 uint64
- var x307 uint64
- x307, x306 = bits.Mul64(x4, arg2[2])
- var x308 uint64
- var x309 uint64
- x309, x308 = bits.Mul64(x4, arg2[1])
- var x310 uint64
- var x311 uint64
- x311, x310 = bits.Mul64(x4, arg2[0])
- var x312 uint64
- var x313 uint64
- x312, x313 = bits.Add64(x311, x308, uint64(0x0))
- var x314 uint64
- var x315 uint64
- x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
- var x316 uint64
- var x317 uint64
- x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
- var x318 uint64
- var x319 uint64
- x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
- var x320 uint64
- var x321 uint64
- x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
- x322 := (uint64(p384Uint1(x321)) + x301)
- var x323 uint64
- var x324 uint64
- x323, x324 = bits.Add64(x287, x310, uint64(0x0))
- var x325 uint64
- var x326 uint64
- x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
- var x327 uint64
- var x328 uint64
- x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
- var x329 uint64
- var x330 uint64
- x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
- var x331 uint64
- var x332 uint64
- x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
- var x333 uint64
- var x334 uint64
- x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
- var x335 uint64
- var x336 uint64
- x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
- var x337 uint64
- _, x337 = bits.Mul64(x323, 0x100000001)
- var x339 uint64
- var x340 uint64
- x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
- var x341 uint64
- var x342 uint64
- x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
- var x343 uint64
- var x344 uint64
- x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
- var x345 uint64
- var x346 uint64
- x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
- var x347 uint64
- var x348 uint64
- x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
- var x349 uint64
- var x350 uint64
- x350, x349 = bits.Mul64(x337, 0xffffffff)
- var x351 uint64
- var x352 uint64
- x351, x352 = bits.Add64(x350, x347, uint64(0x0))
- var x353 uint64
- var x354 uint64
- x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
- var x355 uint64
- var x356 uint64
- x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
- var x357 uint64
- var x358 uint64
- x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
- var x359 uint64
- var x360 uint64
- x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
- x361 := (uint64(p384Uint1(x360)) + x340)
- var x363 uint64
- _, x363 = bits.Add64(x323, x349, uint64(0x0))
- var x364 uint64
- var x365 uint64
- x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
- var x366 uint64
- var x367 uint64
- x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
- var x368 uint64
- var x369 uint64
- x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
- var x370 uint64
- var x371 uint64
- x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
- var x372 uint64
- var x373 uint64
- x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
- var x374 uint64
- var x375 uint64
- x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
- x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
- var x377 uint64
- var x378 uint64
- x378, x377 = bits.Mul64(x5, arg2[5])
- var x379 uint64
- var x380 uint64
- x380, x379 = bits.Mul64(x5, arg2[4])
- var x381 uint64
- var x382 uint64
- x382, x381 = bits.Mul64(x5, arg2[3])
- var x383 uint64
- var x384 uint64
- x384, x383 = bits.Mul64(x5, arg2[2])
- var x385 uint64
- var x386 uint64
- x386, x385 = bits.Mul64(x5, arg2[1])
- var x387 uint64
- var x388 uint64
- x388, x387 = bits.Mul64(x5, arg2[0])
- var x389 uint64
- var x390 uint64
- x389, x390 = bits.Add64(x388, x385, uint64(0x0))
- var x391 uint64
- var x392 uint64
- x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
- var x393 uint64
- var x394 uint64
- x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
- var x395 uint64
- var x396 uint64
- x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
- var x397 uint64
- var x398 uint64
- x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
- x399 := (uint64(p384Uint1(x398)) + x378)
- var x400 uint64
- var x401 uint64
- x400, x401 = bits.Add64(x364, x387, uint64(0x0))
- var x402 uint64
- var x403 uint64
- x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
- var x404 uint64
- var x405 uint64
- x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
- var x406 uint64
- var x407 uint64
- x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
- var x408 uint64
- var x409 uint64
- x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
- var x410 uint64
- var x411 uint64
- x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
- var x412 uint64
- var x413 uint64
- x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
- var x414 uint64
- _, x414 = bits.Mul64(x400, 0x100000001)
- var x416 uint64
- var x417 uint64
- x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
- var x418 uint64
- var x419 uint64
- x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
- var x420 uint64
- var x421 uint64
- x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
- var x422 uint64
- var x423 uint64
- x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
- var x424 uint64
- var x425 uint64
- x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
- var x426 uint64
- var x427 uint64
- x427, x426 = bits.Mul64(x414, 0xffffffff)
- var x428 uint64
- var x429 uint64
- x428, x429 = bits.Add64(x427, x424, uint64(0x0))
- var x430 uint64
- var x431 uint64
- x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
- var x432 uint64
- var x433 uint64
- x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
- var x434 uint64
- var x435 uint64
- x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
- var x436 uint64
- var x437 uint64
- x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
- x438 := (uint64(p384Uint1(x437)) + x417)
- var x440 uint64
- _, x440 = bits.Add64(x400, x426, uint64(0x0))
- var x441 uint64
- var x442 uint64
- x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
- var x443 uint64
- var x444 uint64
- x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
- var x445 uint64
- var x446 uint64
- x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
- var x447 uint64
- var x448 uint64
- x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
- var x449 uint64
- var x450 uint64
- x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
- var x451 uint64
- var x452 uint64
- x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
- x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
- var x454 uint64
- var x455 uint64
- x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
- var x456 uint64
- var x457 uint64
- x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
- var x458 uint64
- var x459 uint64
- x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
- var x460 uint64
- var x461 uint64
- x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
- var x462 uint64
- var x463 uint64
- x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
- var x464 uint64
- var x465 uint64
- x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
- var x467 uint64
- _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
- var x468 uint64
- p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
- var x469 uint64
- p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
- var x470 uint64
- p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
- var x471 uint64
- p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
- var x472 uint64
- p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
- var x473 uint64
- p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
- out1[0] = x468
- out1[1] = x469
- out1[2] = x470
- out1[3] = x471
- out1[4] = x472
- out1[5] = x473
-}
-
-// p384Square squares a field element in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
-// 0 ≤ eval out1 < m
-//
-func p384Square(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[4]
- x5 := arg1[5]
- x6 := arg1[0]
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x6, arg1[5])
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x6, arg1[4])
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x6, arg1[3])
- var x13 uint64
- var x14 uint64
- x14, x13 = bits.Mul64(x6, arg1[2])
- var x15 uint64
- var x16 uint64
- x16, x15 = bits.Mul64(x6, arg1[1])
- var x17 uint64
- var x18 uint64
- x18, x17 = bits.Mul64(x6, arg1[0])
- var x19 uint64
- var x20 uint64
- x19, x20 = bits.Add64(x18, x15, uint64(0x0))
- var x21 uint64
- var x22 uint64
- x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
- var x23 uint64
- var x24 uint64
- x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
- var x25 uint64
- var x26 uint64
- x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
- var x27 uint64
- var x28 uint64
- x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
- x29 := (uint64(p384Uint1(x28)) + x8)
- var x30 uint64
- _, x30 = bits.Mul64(x17, 0x100000001)
- var x32 uint64
- var x33 uint64
- x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
- var x34 uint64
- var x35 uint64
- x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
- var x36 uint64
- var x37 uint64
- x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
- var x38 uint64
- var x39 uint64
- x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
- var x40 uint64
- var x41 uint64
- x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
- var x42 uint64
- var x43 uint64
- x43, x42 = bits.Mul64(x30, 0xffffffff)
- var x44 uint64
- var x45 uint64
- x44, x45 = bits.Add64(x43, x40, uint64(0x0))
- var x46 uint64
- var x47 uint64
- x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
- var x48 uint64
- var x49 uint64
- x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
- var x50 uint64
- var x51 uint64
- x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
- var x52 uint64
- var x53 uint64
- x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
- x54 := (uint64(p384Uint1(x53)) + x33)
- var x56 uint64
- _, x56 = bits.Add64(x17, x42, uint64(0x0))
- var x57 uint64
- var x58 uint64
- x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
- var x59 uint64
- var x60 uint64
- x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
- var x61 uint64
- var x62 uint64
- x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
- var x63 uint64
- var x64 uint64
- x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
- var x65 uint64
- var x66 uint64
- x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
- var x67 uint64
- var x68 uint64
- x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
- var x69 uint64
- var x70 uint64
- x70, x69 = bits.Mul64(x1, arg1[5])
- var x71 uint64
- var x72 uint64
- x72, x71 = bits.Mul64(x1, arg1[4])
- var x73 uint64
- var x74 uint64
- x74, x73 = bits.Mul64(x1, arg1[3])
- var x75 uint64
- var x76 uint64
- x76, x75 = bits.Mul64(x1, arg1[2])
- var x77 uint64
- var x78 uint64
- x78, x77 = bits.Mul64(x1, arg1[1])
- var x79 uint64
- var x80 uint64
- x80, x79 = bits.Mul64(x1, arg1[0])
- var x81 uint64
- var x82 uint64
- x81, x82 = bits.Add64(x80, x77, uint64(0x0))
- var x83 uint64
- var x84 uint64
- x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
- x91 := (uint64(p384Uint1(x90)) + x70)
- var x92 uint64
- var x93 uint64
- x92, x93 = bits.Add64(x57, x79, uint64(0x0))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
- var x102 uint64
- var x103 uint64
- x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
- var x104 uint64
- var x105 uint64
- x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
- var x106 uint64
- _, x106 = bits.Mul64(x92, 0x100000001)
- var x108 uint64
- var x109 uint64
- x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
- var x110 uint64
- var x111 uint64
- x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
- var x112 uint64
- var x113 uint64
- x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
- var x114 uint64
- var x115 uint64
- x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
- var x116 uint64
- var x117 uint64
- x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
- var x118 uint64
- var x119 uint64
- x119, x118 = bits.Mul64(x106, 0xffffffff)
- var x120 uint64
- var x121 uint64
- x120, x121 = bits.Add64(x119, x116, uint64(0x0))
- var x122 uint64
- var x123 uint64
- x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
- var x124 uint64
- var x125 uint64
- x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
- var x126 uint64
- var x127 uint64
- x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
- x130 := (uint64(p384Uint1(x129)) + x109)
- var x132 uint64
- _, x132 = bits.Add64(x92, x118, uint64(0x0))
- var x133 uint64
- var x134 uint64
- x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
- x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
- var x146 uint64
- var x147 uint64
- x147, x146 = bits.Mul64(x2, arg1[5])
- var x148 uint64
- var x149 uint64
- x149, x148 = bits.Mul64(x2, arg1[4])
- var x150 uint64
- var x151 uint64
- x151, x150 = bits.Mul64(x2, arg1[3])
- var x152 uint64
- var x153 uint64
- x153, x152 = bits.Mul64(x2, arg1[2])
- var x154 uint64
- var x155 uint64
- x155, x154 = bits.Mul64(x2, arg1[1])
- var x156 uint64
- var x157 uint64
- x157, x156 = bits.Mul64(x2, arg1[0])
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Add64(x157, x154, uint64(0x0))
- var x160 uint64
- var x161 uint64
- x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
- var x162 uint64
- var x163 uint64
- x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
- var x164 uint64
- var x165 uint64
- x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
- var x166 uint64
- var x167 uint64
- x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
- x168 := (uint64(p384Uint1(x167)) + x147)
- var x169 uint64
- var x170 uint64
- x169, x170 = bits.Add64(x133, x156, uint64(0x0))
- var x171 uint64
- var x172 uint64
- x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
- var x177 uint64
- var x178 uint64
- x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
- var x179 uint64
- var x180 uint64
- x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
- var x183 uint64
- _, x183 = bits.Mul64(x169, 0x100000001)
- var x185 uint64
- var x186 uint64
- x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
- var x187 uint64
- var x188 uint64
- x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
- var x189 uint64
- var x190 uint64
- x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
- var x191 uint64
- var x192 uint64
- x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
- var x193 uint64
- var x194 uint64
- x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
- var x195 uint64
- var x196 uint64
- x196, x195 = bits.Mul64(x183, 0xffffffff)
- var x197 uint64
- var x198 uint64
- x197, x198 = bits.Add64(x196, x193, uint64(0x0))
- var x199 uint64
- var x200 uint64
- x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
- var x201 uint64
- var x202 uint64
- x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
- var x203 uint64
- var x204 uint64
- x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
- var x205 uint64
- var x206 uint64
- x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
- x207 := (uint64(p384Uint1(x206)) + x186)
- var x209 uint64
- _, x209 = bits.Add64(x169, x195, uint64(0x0))
- var x210 uint64
- var x211 uint64
- x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
- var x212 uint64
- var x213 uint64
- x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
- var x214 uint64
- var x215 uint64
- x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
- var x216 uint64
- var x217 uint64
- x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
- var x218 uint64
- var x219 uint64
- x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
- var x220 uint64
- var x221 uint64
- x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
- x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
- var x223 uint64
- var x224 uint64
- x224, x223 = bits.Mul64(x3, arg1[5])
- var x225 uint64
- var x226 uint64
- x226, x225 = bits.Mul64(x3, arg1[4])
- var x227 uint64
- var x228 uint64
- x228, x227 = bits.Mul64(x3, arg1[3])
- var x229 uint64
- var x230 uint64
- x230, x229 = bits.Mul64(x3, arg1[2])
- var x231 uint64
- var x232 uint64
- x232, x231 = bits.Mul64(x3, arg1[1])
- var x233 uint64
- var x234 uint64
- x234, x233 = bits.Mul64(x3, arg1[0])
- var x235 uint64
- var x236 uint64
- x235, x236 = bits.Add64(x234, x231, uint64(0x0))
- var x237 uint64
- var x238 uint64
- x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
- var x239 uint64
- var x240 uint64
- x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
- var x241 uint64
- var x242 uint64
- x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
- var x243 uint64
- var x244 uint64
- x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
- x245 := (uint64(p384Uint1(x244)) + x224)
- var x246 uint64
- var x247 uint64
- x246, x247 = bits.Add64(x210, x233, uint64(0x0))
- var x248 uint64
- var x249 uint64
- x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
- var x250 uint64
- var x251 uint64
- x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
- var x252 uint64
- var x253 uint64
- x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
- var x254 uint64
- var x255 uint64
- x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
- var x256 uint64
- var x257 uint64
- x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
- var x258 uint64
- var x259 uint64
- x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
- var x260 uint64
- _, x260 = bits.Mul64(x246, 0x100000001)
- var x262 uint64
- var x263 uint64
- x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
- var x264 uint64
- var x265 uint64
- x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
- var x266 uint64
- var x267 uint64
- x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
- var x268 uint64
- var x269 uint64
- x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
- var x270 uint64
- var x271 uint64
- x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
- var x272 uint64
- var x273 uint64
- x273, x272 = bits.Mul64(x260, 0xffffffff)
- var x274 uint64
- var x275 uint64
- x274, x275 = bits.Add64(x273, x270, uint64(0x0))
- var x276 uint64
- var x277 uint64
- x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
- var x278 uint64
- var x279 uint64
- x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
- var x280 uint64
- var x281 uint64
- x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
- var x282 uint64
- var x283 uint64
- x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
- x284 := (uint64(p384Uint1(x283)) + x263)
- var x286 uint64
- _, x286 = bits.Add64(x246, x272, uint64(0x0))
- var x287 uint64
- var x288 uint64
- x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
- var x289 uint64
- var x290 uint64
- x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
- var x291 uint64
- var x292 uint64
- x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
- var x293 uint64
- var x294 uint64
- x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
- var x295 uint64
- var x296 uint64
- x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
- var x297 uint64
- var x298 uint64
- x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
- x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
- var x300 uint64
- var x301 uint64
- x301, x300 = bits.Mul64(x4, arg1[5])
- var x302 uint64
- var x303 uint64
- x303, x302 = bits.Mul64(x4, arg1[4])
- var x304 uint64
- var x305 uint64
- x305, x304 = bits.Mul64(x4, arg1[3])
- var x306 uint64
- var x307 uint64
- x307, x306 = bits.Mul64(x4, arg1[2])
- var x308 uint64
- var x309 uint64
- x309, x308 = bits.Mul64(x4, arg1[1])
- var x310 uint64
- var x311 uint64
- x311, x310 = bits.Mul64(x4, arg1[0])
- var x312 uint64
- var x313 uint64
- x312, x313 = bits.Add64(x311, x308, uint64(0x0))
- var x314 uint64
- var x315 uint64
- x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
- var x316 uint64
- var x317 uint64
- x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
- var x318 uint64
- var x319 uint64
- x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
- var x320 uint64
- var x321 uint64
- x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
- x322 := (uint64(p384Uint1(x321)) + x301)
- var x323 uint64
- var x324 uint64
- x323, x324 = bits.Add64(x287, x310, uint64(0x0))
- var x325 uint64
- var x326 uint64
- x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
- var x327 uint64
- var x328 uint64
- x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
- var x329 uint64
- var x330 uint64
- x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
- var x331 uint64
- var x332 uint64
- x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
- var x333 uint64
- var x334 uint64
- x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
- var x335 uint64
- var x336 uint64
- x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
- var x337 uint64
- _, x337 = bits.Mul64(x323, 0x100000001)
- var x339 uint64
- var x340 uint64
- x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
- var x341 uint64
- var x342 uint64
- x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
- var x343 uint64
- var x344 uint64
- x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
- var x345 uint64
- var x346 uint64
- x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
- var x347 uint64
- var x348 uint64
- x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
- var x349 uint64
- var x350 uint64
- x350, x349 = bits.Mul64(x337, 0xffffffff)
- var x351 uint64
- var x352 uint64
- x351, x352 = bits.Add64(x350, x347, uint64(0x0))
- var x353 uint64
- var x354 uint64
- x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
- var x355 uint64
- var x356 uint64
- x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
- var x357 uint64
- var x358 uint64
- x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
- var x359 uint64
- var x360 uint64
- x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
- x361 := (uint64(p384Uint1(x360)) + x340)
- var x363 uint64
- _, x363 = bits.Add64(x323, x349, uint64(0x0))
- var x364 uint64
- var x365 uint64
- x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
- var x366 uint64
- var x367 uint64
- x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
- var x368 uint64
- var x369 uint64
- x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
- var x370 uint64
- var x371 uint64
- x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
- var x372 uint64
- var x373 uint64
- x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
- var x374 uint64
- var x375 uint64
- x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
- x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
- var x377 uint64
- var x378 uint64
- x378, x377 = bits.Mul64(x5, arg1[5])
- var x379 uint64
- var x380 uint64
- x380, x379 = bits.Mul64(x5, arg1[4])
- var x381 uint64
- var x382 uint64
- x382, x381 = bits.Mul64(x5, arg1[3])
- var x383 uint64
- var x384 uint64
- x384, x383 = bits.Mul64(x5, arg1[2])
- var x385 uint64
- var x386 uint64
- x386, x385 = bits.Mul64(x5, arg1[1])
- var x387 uint64
- var x388 uint64
- x388, x387 = bits.Mul64(x5, arg1[0])
- var x389 uint64
- var x390 uint64
- x389, x390 = bits.Add64(x388, x385, uint64(0x0))
- var x391 uint64
- var x392 uint64
- x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
- var x393 uint64
- var x394 uint64
- x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
- var x395 uint64
- var x396 uint64
- x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
- var x397 uint64
- var x398 uint64
- x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
- x399 := (uint64(p384Uint1(x398)) + x378)
- var x400 uint64
- var x401 uint64
- x400, x401 = bits.Add64(x364, x387, uint64(0x0))
- var x402 uint64
- var x403 uint64
- x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
- var x404 uint64
- var x405 uint64
- x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
- var x406 uint64
- var x407 uint64
- x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
- var x408 uint64
- var x409 uint64
- x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
- var x410 uint64
- var x411 uint64
- x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
- var x412 uint64
- var x413 uint64
- x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
- var x414 uint64
- _, x414 = bits.Mul64(x400, 0x100000001)
- var x416 uint64
- var x417 uint64
- x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
- var x418 uint64
- var x419 uint64
- x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
- var x420 uint64
- var x421 uint64
- x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
- var x422 uint64
- var x423 uint64
- x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
- var x424 uint64
- var x425 uint64
- x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
- var x426 uint64
- var x427 uint64
- x427, x426 = bits.Mul64(x414, 0xffffffff)
- var x428 uint64
- var x429 uint64
- x428, x429 = bits.Add64(x427, x424, uint64(0x0))
- var x430 uint64
- var x431 uint64
- x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
- var x432 uint64
- var x433 uint64
- x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
- var x434 uint64
- var x435 uint64
- x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
- var x436 uint64
- var x437 uint64
- x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
- x438 := (uint64(p384Uint1(x437)) + x417)
- var x440 uint64
- _, x440 = bits.Add64(x400, x426, uint64(0x0))
- var x441 uint64
- var x442 uint64
- x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
- var x443 uint64
- var x444 uint64
- x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
- var x445 uint64
- var x446 uint64
- x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
- var x447 uint64
- var x448 uint64
- x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
- var x449 uint64
- var x450 uint64
- x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
- var x451 uint64
- var x452 uint64
- x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
- x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
- var x454 uint64
- var x455 uint64
- x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
- var x456 uint64
- var x457 uint64
- x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
- var x458 uint64
- var x459 uint64
- x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
- var x460 uint64
- var x461 uint64
- x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
- var x462 uint64
- var x463 uint64
- x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
- var x464 uint64
- var x465 uint64
- x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
- var x467 uint64
- _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
- var x468 uint64
- p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
- var x469 uint64
- p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
- var x470 uint64
- p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
- var x471 uint64
- p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
- var x472 uint64
- p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
- var x473 uint64
- p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
- out1[0] = x468
- out1[1] = x469
- out1[2] = x470
- out1[3] = x471
- out1[4] = x472
- out1[5] = x473
-}
-
-// p384Add adds two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p384Add(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
- var x9 uint64
- var x10 uint64
- x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
- var x11 uint64
- var x12 uint64
- x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Sub64(x1, 0xffffffff, uint64(0x0))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Sub64(x3, 0xffffffff00000000, uint64(p384Uint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Sub64(x5, 0xfffffffffffffffe, uint64(p384Uint1(x16)))
- var x19 uint64
- var x20 uint64
- x19, x20 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p384Uint1(x18)))
- var x21 uint64
- var x22 uint64
- x21, x22 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p384Uint1(x20)))
- var x23 uint64
- var x24 uint64
- x23, x24 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p384Uint1(x22)))
- var x26 uint64
- _, x26 = bits.Sub64(uint64(p384Uint1(x12)), uint64(0x0), uint64(p384Uint1(x24)))
- var x27 uint64
- p384CmovznzU64(&x27, p384Uint1(x26), x13, x1)
- var x28 uint64
- p384CmovznzU64(&x28, p384Uint1(x26), x15, x3)
- var x29 uint64
- p384CmovznzU64(&x29, p384Uint1(x26), x17, x5)
- var x30 uint64
- p384CmovznzU64(&x30, p384Uint1(x26), x19, x7)
- var x31 uint64
- p384CmovznzU64(&x31, p384Uint1(x26), x21, x9)
- var x32 uint64
- p384CmovznzU64(&x32, p384Uint1(x26), x23, x11)
- out1[0] = x27
- out1[1] = x28
- out1[2] = x29
- out1[3] = x30
- out1[4] = x31
- out1[5] = x32
-}
-
-// p384Sub subtracts two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p384Sub(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
- var x9 uint64
- var x10 uint64
- x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
- var x11 uint64
- var x12 uint64
- x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
- var x13 uint64
- p384CmovznzU64(&x13, p384Uint1(x12), uint64(0x0), 0xffffffffffffffff)
- var x14 uint64
- var x15 uint64
- x14, x15 = bits.Add64(x1, (x13 & 0xffffffff), uint64(0x0))
- var x16 uint64
- var x17 uint64
- x16, x17 = bits.Add64(x3, (x13 & 0xffffffff00000000), uint64(p384Uint1(x15)))
- var x18 uint64
- var x19 uint64
- x18, x19 = bits.Add64(x5, (x13 & 0xfffffffffffffffe), uint64(p384Uint1(x17)))
- var x20 uint64
- var x21 uint64
- x20, x21 = bits.Add64(x7, x13, uint64(p384Uint1(x19)))
- var x22 uint64
- var x23 uint64
- x22, x23 = bits.Add64(x9, x13, uint64(p384Uint1(x21)))
- var x24 uint64
- x24, _ = bits.Add64(x11, x13, uint64(p384Uint1(x23)))
- out1[0] = x14
- out1[1] = x16
- out1[2] = x18
- out1[3] = x20
- out1[4] = x22
- out1[5] = x24
-}
-
-// p384SetOne returns the field element one in the Montgomery domain.
-//
-// Postconditions:
-// eval (from_montgomery out1) mod m = 1 mod m
-// 0 ≤ eval out1 < m
-//
-func p384SetOne(out1 *p384MontgomeryDomainFieldElement) {
- out1[0] = 0xffffffff00000001
- out1[1] = 0xffffffff
- out1[2] = uint64(0x1)
- out1[3] = uint64(0x0)
- out1[4] = uint64(0x0)
- out1[5] = uint64(0x0)
-}
-
-// p384FromMontgomery translates a field element out of the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^6) mod m
-// 0 ≤ eval out1 < m
-//
-func p384FromMontgomery(out1 *p384NonMontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
- x1 := arg1[0]
- var x2 uint64
- _, x2 = bits.Mul64(x1, 0x100000001)
- var x4 uint64
- var x5 uint64
- x5, x4 = bits.Mul64(x2, 0xffffffffffffffff)
- var x6 uint64
- var x7 uint64
- x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
- var x8 uint64
- var x9 uint64
- x9, x8 = bits.Mul64(x2, 0xffffffffffffffff)
- var x10 uint64
- var x11 uint64
- x11, x10 = bits.Mul64(x2, 0xfffffffffffffffe)
- var x12 uint64
- var x13 uint64
- x13, x12 = bits.Mul64(x2, 0xffffffff00000000)
- var x14 uint64
- var x15 uint64
- x15, x14 = bits.Mul64(x2, 0xffffffff)
- var x16 uint64
- var x17 uint64
- x16, x17 = bits.Add64(x15, x12, uint64(0x0))
- var x18 uint64
- var x19 uint64
- x18, x19 = bits.Add64(x13, x10, uint64(p384Uint1(x17)))
- var x20 uint64
- var x21 uint64
- x20, x21 = bits.Add64(x11, x8, uint64(p384Uint1(x19)))
- var x22 uint64
- var x23 uint64
- x22, x23 = bits.Add64(x9, x6, uint64(p384Uint1(x21)))
- var x24 uint64
- var x25 uint64
- x24, x25 = bits.Add64(x7, x4, uint64(p384Uint1(x23)))
- var x27 uint64
- _, x27 = bits.Add64(x1, x14, uint64(0x0))
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(uint64(0x0), x16, uint64(p384Uint1(x27)))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(uint64(0x0), x18, uint64(p384Uint1(x29)))
- var x32 uint64
- var x33 uint64
- x32, x33 = bits.Add64(uint64(0x0), x20, uint64(p384Uint1(x31)))
- var x34 uint64
- var x35 uint64
- x34, x35 = bits.Add64(uint64(0x0), x22, uint64(p384Uint1(x33)))
- var x36 uint64
- var x37 uint64
- x36, x37 = bits.Add64(uint64(0x0), x24, uint64(p384Uint1(x35)))
- var x38 uint64
- var x39 uint64
- x38, x39 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x25)) + x5), uint64(p384Uint1(x37)))
- var x40 uint64
- var x41 uint64
- x40, x41 = bits.Add64(x28, arg1[1], uint64(0x0))
- var x42 uint64
- var x43 uint64
- x42, x43 = bits.Add64(x30, uint64(0x0), uint64(p384Uint1(x41)))
- var x44 uint64
- var x45 uint64
- x44, x45 = bits.Add64(x32, uint64(0x0), uint64(p384Uint1(x43)))
- var x46 uint64
- var x47 uint64
- x46, x47 = bits.Add64(x34, uint64(0x0), uint64(p384Uint1(x45)))
- var x48 uint64
- var x49 uint64
- x48, x49 = bits.Add64(x36, uint64(0x0), uint64(p384Uint1(x47)))
- var x50 uint64
- var x51 uint64
- x50, x51 = bits.Add64(x38, uint64(0x0), uint64(p384Uint1(x49)))
- var x52 uint64
- _, x52 = bits.Mul64(x40, 0x100000001)
- var x54 uint64
- var x55 uint64
- x55, x54 = bits.Mul64(x52, 0xffffffffffffffff)
- var x56 uint64
- var x57 uint64
- x57, x56 = bits.Mul64(x52, 0xffffffffffffffff)
- var x58 uint64
- var x59 uint64
- x59, x58 = bits.Mul64(x52, 0xffffffffffffffff)
- var x60 uint64
- var x61 uint64
- x61, x60 = bits.Mul64(x52, 0xfffffffffffffffe)
- var x62 uint64
- var x63 uint64
- x63, x62 = bits.Mul64(x52, 0xffffffff00000000)
- var x64 uint64
- var x65 uint64
- x65, x64 = bits.Mul64(x52, 0xffffffff)
- var x66 uint64
- var x67 uint64
- x66, x67 = bits.Add64(x65, x62, uint64(0x0))
- var x68 uint64
- var x69 uint64
- x68, x69 = bits.Add64(x63, x60, uint64(p384Uint1(x67)))
- var x70 uint64
- var x71 uint64
- x70, x71 = bits.Add64(x61, x58, uint64(p384Uint1(x69)))
- var x72 uint64
- var x73 uint64
- x72, x73 = bits.Add64(x59, x56, uint64(p384Uint1(x71)))
- var x74 uint64
- var x75 uint64
- x74, x75 = bits.Add64(x57, x54, uint64(p384Uint1(x73)))
- var x77 uint64
- _, x77 = bits.Add64(x40, x64, uint64(0x0))
- var x78 uint64
- var x79 uint64
- x78, x79 = bits.Add64(x42, x66, uint64(p384Uint1(x77)))
- var x80 uint64
- var x81 uint64
- x80, x81 = bits.Add64(x44, x68, uint64(p384Uint1(x79)))
- var x82 uint64
- var x83 uint64
- x82, x83 = bits.Add64(x46, x70, uint64(p384Uint1(x81)))
- var x84 uint64
- var x85 uint64
- x84, x85 = bits.Add64(x48, x72, uint64(p384Uint1(x83)))
- var x86 uint64
- var x87 uint64
- x86, x87 = bits.Add64(x50, x74, uint64(p384Uint1(x85)))
- var x88 uint64
- var x89 uint64
- x88, x89 = bits.Add64((uint64(p384Uint1(x51)) + uint64(p384Uint1(x39))), (uint64(p384Uint1(x75)) + x55), uint64(p384Uint1(x87)))
- var x90 uint64
- var x91 uint64
- x90, x91 = bits.Add64(x78, arg1[2], uint64(0x0))
- var x92 uint64
- var x93 uint64
- x92, x93 = bits.Add64(x80, uint64(0x0), uint64(p384Uint1(x91)))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x82, uint64(0x0), uint64(p384Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x84, uint64(0x0), uint64(p384Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x86, uint64(0x0), uint64(p384Uint1(x97)))
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x88, uint64(0x0), uint64(p384Uint1(x99)))
- var x102 uint64
- _, x102 = bits.Mul64(x90, 0x100000001)
- var x104 uint64
- var x105 uint64
- x105, x104 = bits.Mul64(x102, 0xffffffffffffffff)
- var x106 uint64
- var x107 uint64
- x107, x106 = bits.Mul64(x102, 0xffffffffffffffff)
- var x108 uint64
- var x109 uint64
- x109, x108 = bits.Mul64(x102, 0xffffffffffffffff)
- var x110 uint64
- var x111 uint64
- x111, x110 = bits.Mul64(x102, 0xfffffffffffffffe)
- var x112 uint64
- var x113 uint64
- x113, x112 = bits.Mul64(x102, 0xffffffff00000000)
- var x114 uint64
- var x115 uint64
- x115, x114 = bits.Mul64(x102, 0xffffffff)
- var x116 uint64
- var x117 uint64
- x116, x117 = bits.Add64(x115, x112, uint64(0x0))
- var x118 uint64
- var x119 uint64
- x118, x119 = bits.Add64(x113, x110, uint64(p384Uint1(x117)))
- var x120 uint64
- var x121 uint64
- x120, x121 = bits.Add64(x111, x108, uint64(p384Uint1(x119)))
- var x122 uint64
- var x123 uint64
- x122, x123 = bits.Add64(x109, x106, uint64(p384Uint1(x121)))
- var x124 uint64
- var x125 uint64
- x124, x125 = bits.Add64(x107, x104, uint64(p384Uint1(x123)))
- var x127 uint64
- _, x127 = bits.Add64(x90, x114, uint64(0x0))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x92, x116, uint64(p384Uint1(x127)))
- var x130 uint64
- var x131 uint64
- x130, x131 = bits.Add64(x94, x118, uint64(p384Uint1(x129)))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x96, x120, uint64(p384Uint1(x131)))
- var x134 uint64
- var x135 uint64
- x134, x135 = bits.Add64(x98, x122, uint64(p384Uint1(x133)))
- var x136 uint64
- var x137 uint64
- x136, x137 = bits.Add64(x100, x124, uint64(p384Uint1(x135)))
- var x138 uint64
- var x139 uint64
- x138, x139 = bits.Add64((uint64(p384Uint1(x101)) + uint64(p384Uint1(x89))), (uint64(p384Uint1(x125)) + x105), uint64(p384Uint1(x137)))
- var x140 uint64
- var x141 uint64
- x140, x141 = bits.Add64(x128, arg1[3], uint64(0x0))
- var x142 uint64
- var x143 uint64
- x142, x143 = bits.Add64(x130, uint64(0x0), uint64(p384Uint1(x141)))
- var x144 uint64
- var x145 uint64
- x144, x145 = bits.Add64(x132, uint64(0x0), uint64(p384Uint1(x143)))
- var x146 uint64
- var x147 uint64
- x146, x147 = bits.Add64(x134, uint64(0x0), uint64(p384Uint1(x145)))
- var x148 uint64
- var x149 uint64
- x148, x149 = bits.Add64(x136, uint64(0x0), uint64(p384Uint1(x147)))
- var x150 uint64
- var x151 uint64
- x150, x151 = bits.Add64(x138, uint64(0x0), uint64(p384Uint1(x149)))
- var x152 uint64
- _, x152 = bits.Mul64(x140, 0x100000001)
- var x154 uint64
- var x155 uint64
- x155, x154 = bits.Mul64(x152, 0xffffffffffffffff)
- var x156 uint64
- var x157 uint64
- x157, x156 = bits.Mul64(x152, 0xffffffffffffffff)
- var x158 uint64
- var x159 uint64
- x159, x158 = bits.Mul64(x152, 0xffffffffffffffff)
- var x160 uint64
- var x161 uint64
- x161, x160 = bits.Mul64(x152, 0xfffffffffffffffe)
- var x162 uint64
- var x163 uint64
- x163, x162 = bits.Mul64(x152, 0xffffffff00000000)
- var x164 uint64
- var x165 uint64
- x165, x164 = bits.Mul64(x152, 0xffffffff)
- var x166 uint64
- var x167 uint64
- x166, x167 = bits.Add64(x165, x162, uint64(0x0))
- var x168 uint64
- var x169 uint64
- x168, x169 = bits.Add64(x163, x160, uint64(p384Uint1(x167)))
- var x170 uint64
- var x171 uint64
- x170, x171 = bits.Add64(x161, x158, uint64(p384Uint1(x169)))
- var x172 uint64
- var x173 uint64
- x172, x173 = bits.Add64(x159, x156, uint64(p384Uint1(x171)))
- var x174 uint64
- var x175 uint64
- x174, x175 = bits.Add64(x157, x154, uint64(p384Uint1(x173)))
- var x177 uint64
- _, x177 = bits.Add64(x140, x164, uint64(0x0))
- var x178 uint64
- var x179 uint64
- x178, x179 = bits.Add64(x142, x166, uint64(p384Uint1(x177)))
- var x180 uint64
- var x181 uint64
- x180, x181 = bits.Add64(x144, x168, uint64(p384Uint1(x179)))
- var x182 uint64
- var x183 uint64
- x182, x183 = bits.Add64(x146, x170, uint64(p384Uint1(x181)))
- var x184 uint64
- var x185 uint64
- x184, x185 = bits.Add64(x148, x172, uint64(p384Uint1(x183)))
- var x186 uint64
- var x187 uint64
- x186, x187 = bits.Add64(x150, x174, uint64(p384Uint1(x185)))
- var x188 uint64
- var x189 uint64
- x188, x189 = bits.Add64((uint64(p384Uint1(x151)) + uint64(p384Uint1(x139))), (uint64(p384Uint1(x175)) + x155), uint64(p384Uint1(x187)))
- var x190 uint64
- var x191 uint64
- x190, x191 = bits.Add64(x178, arg1[4], uint64(0x0))
- var x192 uint64
- var x193 uint64
- x192, x193 = bits.Add64(x180, uint64(0x0), uint64(p384Uint1(x191)))
- var x194 uint64
- var x195 uint64
- x194, x195 = bits.Add64(x182, uint64(0x0), uint64(p384Uint1(x193)))
- var x196 uint64
- var x197 uint64
- x196, x197 = bits.Add64(x184, uint64(0x0), uint64(p384Uint1(x195)))
- var x198 uint64
- var x199 uint64
- x198, x199 = bits.Add64(x186, uint64(0x0), uint64(p384Uint1(x197)))
- var x200 uint64
- var x201 uint64
- x200, x201 = bits.Add64(x188, uint64(0x0), uint64(p384Uint1(x199)))
- var x202 uint64
- _, x202 = bits.Mul64(x190, 0x100000001)
- var x204 uint64
- var x205 uint64
- x205, x204 = bits.Mul64(x202, 0xffffffffffffffff)
- var x206 uint64
- var x207 uint64
- x207, x206 = bits.Mul64(x202, 0xffffffffffffffff)
- var x208 uint64
- var x209 uint64
- x209, x208 = bits.Mul64(x202, 0xffffffffffffffff)
- var x210 uint64
- var x211 uint64
- x211, x210 = bits.Mul64(x202, 0xfffffffffffffffe)
- var x212 uint64
- var x213 uint64
- x213, x212 = bits.Mul64(x202, 0xffffffff00000000)
- var x214 uint64
- var x215 uint64
- x215, x214 = bits.Mul64(x202, 0xffffffff)
- var x216 uint64
- var x217 uint64
- x216, x217 = bits.Add64(x215, x212, uint64(0x0))
- var x218 uint64
- var x219 uint64
- x218, x219 = bits.Add64(x213, x210, uint64(p384Uint1(x217)))
- var x220 uint64
- var x221 uint64
- x220, x221 = bits.Add64(x211, x208, uint64(p384Uint1(x219)))
- var x222 uint64
- var x223 uint64
- x222, x223 = bits.Add64(x209, x206, uint64(p384Uint1(x221)))
- var x224 uint64
- var x225 uint64
- x224, x225 = bits.Add64(x207, x204, uint64(p384Uint1(x223)))
- var x227 uint64
- _, x227 = bits.Add64(x190, x214, uint64(0x0))
- var x228 uint64
- var x229 uint64
- x228, x229 = bits.Add64(x192, x216, uint64(p384Uint1(x227)))
- var x230 uint64
- var x231 uint64
- x230, x231 = bits.Add64(x194, x218, uint64(p384Uint1(x229)))
- var x232 uint64
- var x233 uint64
- x232, x233 = bits.Add64(x196, x220, uint64(p384Uint1(x231)))
- var x234 uint64
- var x235 uint64
- x234, x235 = bits.Add64(x198, x222, uint64(p384Uint1(x233)))
- var x236 uint64
- var x237 uint64
- x236, x237 = bits.Add64(x200, x224, uint64(p384Uint1(x235)))
- var x238 uint64
- var x239 uint64
- x238, x239 = bits.Add64((uint64(p384Uint1(x201)) + uint64(p384Uint1(x189))), (uint64(p384Uint1(x225)) + x205), uint64(p384Uint1(x237)))
- var x240 uint64
- var x241 uint64
- x240, x241 = bits.Add64(x228, arg1[5], uint64(0x0))
- var x242 uint64
- var x243 uint64
- x242, x243 = bits.Add64(x230, uint64(0x0), uint64(p384Uint1(x241)))
- var x244 uint64
- var x245 uint64
- x244, x245 = bits.Add64(x232, uint64(0x0), uint64(p384Uint1(x243)))
- var x246 uint64
- var x247 uint64
- x246, x247 = bits.Add64(x234, uint64(0x0), uint64(p384Uint1(x245)))
- var x248 uint64
- var x249 uint64
- x248, x249 = bits.Add64(x236, uint64(0x0), uint64(p384Uint1(x247)))
- var x250 uint64
- var x251 uint64
- x250, x251 = bits.Add64(x238, uint64(0x0), uint64(p384Uint1(x249)))
- var x252 uint64
- _, x252 = bits.Mul64(x240, 0x100000001)
- var x254 uint64
- var x255 uint64
- x255, x254 = bits.Mul64(x252, 0xffffffffffffffff)
- var x256 uint64
- var x257 uint64
- x257, x256 = bits.Mul64(x252, 0xffffffffffffffff)
- var x258 uint64
- var x259 uint64
- x259, x258 = bits.Mul64(x252, 0xffffffffffffffff)
- var x260 uint64
- var x261 uint64
- x261, x260 = bits.Mul64(x252, 0xfffffffffffffffe)
- var x262 uint64
- var x263 uint64
- x263, x262 = bits.Mul64(x252, 0xffffffff00000000)
- var x264 uint64
- var x265 uint64
- x265, x264 = bits.Mul64(x252, 0xffffffff)
- var x266 uint64
- var x267 uint64
- x266, x267 = bits.Add64(x265, x262, uint64(0x0))
- var x268 uint64
- var x269 uint64
- x268, x269 = bits.Add64(x263, x260, uint64(p384Uint1(x267)))
- var x270 uint64
- var x271 uint64
- x270, x271 = bits.Add64(x261, x258, uint64(p384Uint1(x269)))
- var x272 uint64
- var x273 uint64
- x272, x273 = bits.Add64(x259, x256, uint64(p384Uint1(x271)))
- var x274 uint64
- var x275 uint64
- x274, x275 = bits.Add64(x257, x254, uint64(p384Uint1(x273)))
- var x277 uint64
- _, x277 = bits.Add64(x240, x264, uint64(0x0))
- var x278 uint64
- var x279 uint64
- x278, x279 = bits.Add64(x242, x266, uint64(p384Uint1(x277)))
- var x280 uint64
- var x281 uint64
- x280, x281 = bits.Add64(x244, x268, uint64(p384Uint1(x279)))
- var x282 uint64
- var x283 uint64
- x282, x283 = bits.Add64(x246, x270, uint64(p384Uint1(x281)))
- var x284 uint64
- var x285 uint64
- x284, x285 = bits.Add64(x248, x272, uint64(p384Uint1(x283)))
- var x286 uint64
- var x287 uint64
- x286, x287 = bits.Add64(x250, x274, uint64(p384Uint1(x285)))
- var x288 uint64
- var x289 uint64
- x288, x289 = bits.Add64((uint64(p384Uint1(x251)) + uint64(p384Uint1(x239))), (uint64(p384Uint1(x275)) + x255), uint64(p384Uint1(x287)))
- var x290 uint64
- var x291 uint64
- x290, x291 = bits.Sub64(x278, 0xffffffff, uint64(0x0))
- var x292 uint64
- var x293 uint64
- x292, x293 = bits.Sub64(x280, 0xffffffff00000000, uint64(p384Uint1(x291)))
- var x294 uint64
- var x295 uint64
- x294, x295 = bits.Sub64(x282, 0xfffffffffffffffe, uint64(p384Uint1(x293)))
- var x296 uint64
- var x297 uint64
- x296, x297 = bits.Sub64(x284, 0xffffffffffffffff, uint64(p384Uint1(x295)))
- var x298 uint64
- var x299 uint64
- x298, x299 = bits.Sub64(x286, 0xffffffffffffffff, uint64(p384Uint1(x297)))
- var x300 uint64
- var x301 uint64
- x300, x301 = bits.Sub64(x288, 0xffffffffffffffff, uint64(p384Uint1(x299)))
- var x303 uint64
- _, x303 = bits.Sub64(uint64(p384Uint1(x289)), uint64(0x0), uint64(p384Uint1(x301)))
- var x304 uint64
- p384CmovznzU64(&x304, p384Uint1(x303), x290, x278)
- var x305 uint64
- p384CmovznzU64(&x305, p384Uint1(x303), x292, x280)
- var x306 uint64
- p384CmovznzU64(&x306, p384Uint1(x303), x294, x282)
- var x307 uint64
- p384CmovznzU64(&x307, p384Uint1(x303), x296, x284)
- var x308 uint64
- p384CmovznzU64(&x308, p384Uint1(x303), x298, x286)
- var x309 uint64
- p384CmovznzU64(&x309, p384Uint1(x303), x300, x288)
- out1[0] = x304
- out1[1] = x305
- out1[2] = x306
- out1[3] = x307
- out1[4] = x308
- out1[5] = x309
-}
-
-// p384ToMontgomery translates a field element into the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-func p384ToMontgomery(out1 *p384MontgomeryDomainFieldElement, arg1 *p384NonMontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[4]
- x5 := arg1[5]
- x6 := arg1[0]
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x6, 0x200000000)
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x6, 0xfffffffe00000000)
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x6, 0x200000000)
- var x13 uint64
- var x14 uint64
- x14, x13 = bits.Mul64(x6, 0xfffffffe00000001)
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(x14, x11, uint64(0x0))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(x12, x9, uint64(p384Uint1(x16)))
- var x19 uint64
- var x20 uint64
- x19, x20 = bits.Add64(x10, x7, uint64(p384Uint1(x18)))
- var x21 uint64
- var x22 uint64
- x21, x22 = bits.Add64(x8, x6, uint64(p384Uint1(x20)))
- var x23 uint64
- _, x23 = bits.Mul64(x13, 0x100000001)
- var x25 uint64
- var x26 uint64
- x26, x25 = bits.Mul64(x23, 0xffffffffffffffff)
- var x27 uint64
- var x28 uint64
- x28, x27 = bits.Mul64(x23, 0xffffffffffffffff)
- var x29 uint64
- var x30 uint64
- x30, x29 = bits.Mul64(x23, 0xffffffffffffffff)
- var x31 uint64
- var x32 uint64
- x32, x31 = bits.Mul64(x23, 0xfffffffffffffffe)
- var x33 uint64
- var x34 uint64
- x34, x33 = bits.Mul64(x23, 0xffffffff00000000)
- var x35 uint64
- var x36 uint64
- x36, x35 = bits.Mul64(x23, 0xffffffff)
- var x37 uint64
- var x38 uint64
- x37, x38 = bits.Add64(x36, x33, uint64(0x0))
- var x39 uint64
- var x40 uint64
- x39, x40 = bits.Add64(x34, x31, uint64(p384Uint1(x38)))
- var x41 uint64
- var x42 uint64
- x41, x42 = bits.Add64(x32, x29, uint64(p384Uint1(x40)))
- var x43 uint64
- var x44 uint64
- x43, x44 = bits.Add64(x30, x27, uint64(p384Uint1(x42)))
- var x45 uint64
- var x46 uint64
- x45, x46 = bits.Add64(x28, x25, uint64(p384Uint1(x44)))
- var x48 uint64
- _, x48 = bits.Add64(x13, x35, uint64(0x0))
- var x49 uint64
- var x50 uint64
- x49, x50 = bits.Add64(x15, x37, uint64(p384Uint1(x48)))
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(x17, x39, uint64(p384Uint1(x50)))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(x19, x41, uint64(p384Uint1(x52)))
- var x55 uint64
- var x56 uint64
- x55, x56 = bits.Add64(x21, x43, uint64(p384Uint1(x54)))
- var x57 uint64
- var x58 uint64
- x57, x58 = bits.Add64(uint64(p384Uint1(x22)), x45, uint64(p384Uint1(x56)))
- var x59 uint64
- var x60 uint64
- x59, x60 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x46)) + x26), uint64(p384Uint1(x58)))
- var x61 uint64
- var x62 uint64
- x62, x61 = bits.Mul64(x1, 0x200000000)
- var x63 uint64
- var x64 uint64
- x64, x63 = bits.Mul64(x1, 0xfffffffe00000000)
- var x65 uint64
- var x66 uint64
- x66, x65 = bits.Mul64(x1, 0x200000000)
- var x67 uint64
- var x68 uint64
- x68, x67 = bits.Mul64(x1, 0xfffffffe00000001)
- var x69 uint64
- var x70 uint64
- x69, x70 = bits.Add64(x68, x65, uint64(0x0))
- var x71 uint64
- var x72 uint64
- x71, x72 = bits.Add64(x66, x63, uint64(p384Uint1(x70)))
- var x73 uint64
- var x74 uint64
- x73, x74 = bits.Add64(x64, x61, uint64(p384Uint1(x72)))
- var x75 uint64
- var x76 uint64
- x75, x76 = bits.Add64(x62, x1, uint64(p384Uint1(x74)))
- var x77 uint64
- var x78 uint64
- x77, x78 = bits.Add64(x49, x67, uint64(0x0))
- var x79 uint64
- var x80 uint64
- x79, x80 = bits.Add64(x51, x69, uint64(p384Uint1(x78)))
- var x81 uint64
- var x82 uint64
- x81, x82 = bits.Add64(x53, x71, uint64(p384Uint1(x80)))
- var x83 uint64
- var x84 uint64
- x83, x84 = bits.Add64(x55, x73, uint64(p384Uint1(x82)))
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x57, x75, uint64(p384Uint1(x84)))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Add64(x59, uint64(p384Uint1(x76)), uint64(p384Uint1(x86)))
- var x89 uint64
- _, x89 = bits.Mul64(x77, 0x100000001)
- var x91 uint64
- var x92 uint64
- x92, x91 = bits.Mul64(x89, 0xffffffffffffffff)
- var x93 uint64
- var x94 uint64
- x94, x93 = bits.Mul64(x89, 0xffffffffffffffff)
- var x95 uint64
- var x96 uint64
- x96, x95 = bits.Mul64(x89, 0xffffffffffffffff)
- var x97 uint64
- var x98 uint64
- x98, x97 = bits.Mul64(x89, 0xfffffffffffffffe)
- var x99 uint64
- var x100 uint64
- x100, x99 = bits.Mul64(x89, 0xffffffff00000000)
- var x101 uint64
- var x102 uint64
- x102, x101 = bits.Mul64(x89, 0xffffffff)
- var x103 uint64
- var x104 uint64
- x103, x104 = bits.Add64(x102, x99, uint64(0x0))
- var x105 uint64
- var x106 uint64
- x105, x106 = bits.Add64(x100, x97, uint64(p384Uint1(x104)))
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Add64(x98, x95, uint64(p384Uint1(x106)))
- var x109 uint64
- var x110 uint64
- x109, x110 = bits.Add64(x96, x93, uint64(p384Uint1(x108)))
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x94, x91, uint64(p384Uint1(x110)))
- var x114 uint64
- _, x114 = bits.Add64(x77, x101, uint64(0x0))
- var x115 uint64
- var x116 uint64
- x115, x116 = bits.Add64(x79, x103, uint64(p384Uint1(x114)))
- var x117 uint64
- var x118 uint64
- x117, x118 = bits.Add64(x81, x105, uint64(p384Uint1(x116)))
- var x119 uint64
- var x120 uint64
- x119, x120 = bits.Add64(x83, x107, uint64(p384Uint1(x118)))
- var x121 uint64
- var x122 uint64
- x121, x122 = bits.Add64(x85, x109, uint64(p384Uint1(x120)))
- var x123 uint64
- var x124 uint64
- x123, x124 = bits.Add64(x87, x111, uint64(p384Uint1(x122)))
- var x125 uint64
- var x126 uint64
- x125, x126 = bits.Add64((uint64(p384Uint1(x88)) + uint64(p384Uint1(x60))), (uint64(p384Uint1(x112)) + x92), uint64(p384Uint1(x124)))
- var x127 uint64
- var x128 uint64
- x128, x127 = bits.Mul64(x2, 0x200000000)
- var x129 uint64
- var x130 uint64
- x130, x129 = bits.Mul64(x2, 0xfffffffe00000000)
- var x131 uint64
- var x132 uint64
- x132, x131 = bits.Mul64(x2, 0x200000000)
- var x133 uint64
- var x134 uint64
- x134, x133 = bits.Mul64(x2, 0xfffffffe00000001)
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x134, x131, uint64(0x0))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x132, x129, uint64(p384Uint1(x136)))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x130, x127, uint64(p384Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x128, x2, uint64(p384Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(x115, x133, uint64(0x0))
- var x145 uint64
- var x146 uint64
- x145, x146 = bits.Add64(x117, x135, uint64(p384Uint1(x144)))
- var x147 uint64
- var x148 uint64
- x147, x148 = bits.Add64(x119, x137, uint64(p384Uint1(x146)))
- var x149 uint64
- var x150 uint64
- x149, x150 = bits.Add64(x121, x139, uint64(p384Uint1(x148)))
- var x151 uint64
- var x152 uint64
- x151, x152 = bits.Add64(x123, x141, uint64(p384Uint1(x150)))
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(x125, uint64(p384Uint1(x142)), uint64(p384Uint1(x152)))
- var x155 uint64
- _, x155 = bits.Mul64(x143, 0x100000001)
- var x157 uint64
- var x158 uint64
- x158, x157 = bits.Mul64(x155, 0xffffffffffffffff)
- var x159 uint64
- var x160 uint64
- x160, x159 = bits.Mul64(x155, 0xffffffffffffffff)
- var x161 uint64
- var x162 uint64
- x162, x161 = bits.Mul64(x155, 0xffffffffffffffff)
- var x163 uint64
- var x164 uint64
- x164, x163 = bits.Mul64(x155, 0xfffffffffffffffe)
- var x165 uint64
- var x166 uint64
- x166, x165 = bits.Mul64(x155, 0xffffffff00000000)
- var x167 uint64
- var x168 uint64
- x168, x167 = bits.Mul64(x155, 0xffffffff)
- var x169 uint64
- var x170 uint64
- x169, x170 = bits.Add64(x168, x165, uint64(0x0))
- var x171 uint64
- var x172 uint64
- x171, x172 = bits.Add64(x166, x163, uint64(p384Uint1(x170)))
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x164, x161, uint64(p384Uint1(x172)))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x162, x159, uint64(p384Uint1(x174)))
- var x177 uint64
- var x178 uint64
- x177, x178 = bits.Add64(x160, x157, uint64(p384Uint1(x176)))
- var x180 uint64
- _, x180 = bits.Add64(x143, x167, uint64(0x0))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x145, x169, uint64(p384Uint1(x180)))
- var x183 uint64
- var x184 uint64
- x183, x184 = bits.Add64(x147, x171, uint64(p384Uint1(x182)))
- var x185 uint64
- var x186 uint64
- x185, x186 = bits.Add64(x149, x173, uint64(p384Uint1(x184)))
- var x187 uint64
- var x188 uint64
- x187, x188 = bits.Add64(x151, x175, uint64(p384Uint1(x186)))
- var x189 uint64
- var x190 uint64
- x189, x190 = bits.Add64(x153, x177, uint64(p384Uint1(x188)))
- var x191 uint64
- var x192 uint64
- x191, x192 = bits.Add64((uint64(p384Uint1(x154)) + uint64(p384Uint1(x126))), (uint64(p384Uint1(x178)) + x158), uint64(p384Uint1(x190)))
- var x193 uint64
- var x194 uint64
- x194, x193 = bits.Mul64(x3, 0x200000000)
- var x195 uint64
- var x196 uint64
- x196, x195 = bits.Mul64(x3, 0xfffffffe00000000)
- var x197 uint64
- var x198 uint64
- x198, x197 = bits.Mul64(x3, 0x200000000)
- var x199 uint64
- var x200 uint64
- x200, x199 = bits.Mul64(x3, 0xfffffffe00000001)
- var x201 uint64
- var x202 uint64
- x201, x202 = bits.Add64(x200, x197, uint64(0x0))
- var x203 uint64
- var x204 uint64
- x203, x204 = bits.Add64(x198, x195, uint64(p384Uint1(x202)))
- var x205 uint64
- var x206 uint64
- x205, x206 = bits.Add64(x196, x193, uint64(p384Uint1(x204)))
- var x207 uint64
- var x208 uint64
- x207, x208 = bits.Add64(x194, x3, uint64(p384Uint1(x206)))
- var x209 uint64
- var x210 uint64
- x209, x210 = bits.Add64(x181, x199, uint64(0x0))
- var x211 uint64
- var x212 uint64
- x211, x212 = bits.Add64(x183, x201, uint64(p384Uint1(x210)))
- var x213 uint64
- var x214 uint64
- x213, x214 = bits.Add64(x185, x203, uint64(p384Uint1(x212)))
- var x215 uint64
- var x216 uint64
- x215, x216 = bits.Add64(x187, x205, uint64(p384Uint1(x214)))
- var x217 uint64
- var x218 uint64
- x217, x218 = bits.Add64(x189, x207, uint64(p384Uint1(x216)))
- var x219 uint64
- var x220 uint64
- x219, x220 = bits.Add64(x191, uint64(p384Uint1(x208)), uint64(p384Uint1(x218)))
- var x221 uint64
- _, x221 = bits.Mul64(x209, 0x100000001)
- var x223 uint64
- var x224 uint64
- x224, x223 = bits.Mul64(x221, 0xffffffffffffffff)
- var x225 uint64
- var x226 uint64
- x226, x225 = bits.Mul64(x221, 0xffffffffffffffff)
- var x227 uint64
- var x228 uint64
- x228, x227 = bits.Mul64(x221, 0xffffffffffffffff)
- var x229 uint64
- var x230 uint64
- x230, x229 = bits.Mul64(x221, 0xfffffffffffffffe)
- var x231 uint64
- var x232 uint64
- x232, x231 = bits.Mul64(x221, 0xffffffff00000000)
- var x233 uint64
- var x234 uint64
- x234, x233 = bits.Mul64(x221, 0xffffffff)
- var x235 uint64
- var x236 uint64
- x235, x236 = bits.Add64(x234, x231, uint64(0x0))
- var x237 uint64
- var x238 uint64
- x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
- var x239 uint64
- var x240 uint64
- x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
- var x241 uint64
- var x242 uint64
- x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
- var x243 uint64
- var x244 uint64
- x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
- var x246 uint64
- _, x246 = bits.Add64(x209, x233, uint64(0x0))
- var x247 uint64
- var x248 uint64
- x247, x248 = bits.Add64(x211, x235, uint64(p384Uint1(x246)))
- var x249 uint64
- var x250 uint64
- x249, x250 = bits.Add64(x213, x237, uint64(p384Uint1(x248)))
- var x251 uint64
- var x252 uint64
- x251, x252 = bits.Add64(x215, x239, uint64(p384Uint1(x250)))
- var x253 uint64
- var x254 uint64
- x253, x254 = bits.Add64(x217, x241, uint64(p384Uint1(x252)))
- var x255 uint64
- var x256 uint64
- x255, x256 = bits.Add64(x219, x243, uint64(p384Uint1(x254)))
- var x257 uint64
- var x258 uint64
- x257, x258 = bits.Add64((uint64(p384Uint1(x220)) + uint64(p384Uint1(x192))), (uint64(p384Uint1(x244)) + x224), uint64(p384Uint1(x256)))
- var x259 uint64
- var x260 uint64
- x260, x259 = bits.Mul64(x4, 0x200000000)
- var x261 uint64
- var x262 uint64
- x262, x261 = bits.Mul64(x4, 0xfffffffe00000000)
- var x263 uint64
- var x264 uint64
- x264, x263 = bits.Mul64(x4, 0x200000000)
- var x265 uint64
- var x266 uint64
- x266, x265 = bits.Mul64(x4, 0xfffffffe00000001)
- var x267 uint64
- var x268 uint64
- x267, x268 = bits.Add64(x266, x263, uint64(0x0))
- var x269 uint64
- var x270 uint64
- x269, x270 = bits.Add64(x264, x261, uint64(p384Uint1(x268)))
- var x271 uint64
- var x272 uint64
- x271, x272 = bits.Add64(x262, x259, uint64(p384Uint1(x270)))
- var x273 uint64
- var x274 uint64
- x273, x274 = bits.Add64(x260, x4, uint64(p384Uint1(x272)))
- var x275 uint64
- var x276 uint64
- x275, x276 = bits.Add64(x247, x265, uint64(0x0))
- var x277 uint64
- var x278 uint64
- x277, x278 = bits.Add64(x249, x267, uint64(p384Uint1(x276)))
- var x279 uint64
- var x280 uint64
- x279, x280 = bits.Add64(x251, x269, uint64(p384Uint1(x278)))
- var x281 uint64
- var x282 uint64
- x281, x282 = bits.Add64(x253, x271, uint64(p384Uint1(x280)))
- var x283 uint64
- var x284 uint64
- x283, x284 = bits.Add64(x255, x273, uint64(p384Uint1(x282)))
- var x285 uint64
- var x286 uint64
- x285, x286 = bits.Add64(x257, uint64(p384Uint1(x274)), uint64(p384Uint1(x284)))
- var x287 uint64
- _, x287 = bits.Mul64(x275, 0x100000001)
- var x289 uint64
- var x290 uint64
- x290, x289 = bits.Mul64(x287, 0xffffffffffffffff)
- var x291 uint64
- var x292 uint64
- x292, x291 = bits.Mul64(x287, 0xffffffffffffffff)
- var x293 uint64
- var x294 uint64
- x294, x293 = bits.Mul64(x287, 0xffffffffffffffff)
- var x295 uint64
- var x296 uint64
- x296, x295 = bits.Mul64(x287, 0xfffffffffffffffe)
- var x297 uint64
- var x298 uint64
- x298, x297 = bits.Mul64(x287, 0xffffffff00000000)
- var x299 uint64
- var x300 uint64
- x300, x299 = bits.Mul64(x287, 0xffffffff)
- var x301 uint64
- var x302 uint64
- x301, x302 = bits.Add64(x300, x297, uint64(0x0))
- var x303 uint64
- var x304 uint64
- x303, x304 = bits.Add64(x298, x295, uint64(p384Uint1(x302)))
- var x305 uint64
- var x306 uint64
- x305, x306 = bits.Add64(x296, x293, uint64(p384Uint1(x304)))
- var x307 uint64
- var x308 uint64
- x307, x308 = bits.Add64(x294, x291, uint64(p384Uint1(x306)))
- var x309 uint64
- var x310 uint64
- x309, x310 = bits.Add64(x292, x289, uint64(p384Uint1(x308)))
- var x312 uint64
- _, x312 = bits.Add64(x275, x299, uint64(0x0))
- var x313 uint64
- var x314 uint64
- x313, x314 = bits.Add64(x277, x301, uint64(p384Uint1(x312)))
- var x315 uint64
- var x316 uint64
- x315, x316 = bits.Add64(x279, x303, uint64(p384Uint1(x314)))
- var x317 uint64
- var x318 uint64
- x317, x318 = bits.Add64(x281, x305, uint64(p384Uint1(x316)))
- var x319 uint64
- var x320 uint64
- x319, x320 = bits.Add64(x283, x307, uint64(p384Uint1(x318)))
- var x321 uint64
- var x322 uint64
- x321, x322 = bits.Add64(x285, x309, uint64(p384Uint1(x320)))
- var x323 uint64
- var x324 uint64
- x323, x324 = bits.Add64((uint64(p384Uint1(x286)) + uint64(p384Uint1(x258))), (uint64(p384Uint1(x310)) + x290), uint64(p384Uint1(x322)))
- var x325 uint64
- var x326 uint64
- x326, x325 = bits.Mul64(x5, 0x200000000)
- var x327 uint64
- var x328 uint64
- x328, x327 = bits.Mul64(x5, 0xfffffffe00000000)
- var x329 uint64
- var x330 uint64
- x330, x329 = bits.Mul64(x5, 0x200000000)
- var x331 uint64
- var x332 uint64
- x332, x331 = bits.Mul64(x5, 0xfffffffe00000001)
- var x333 uint64
- var x334 uint64
- x333, x334 = bits.Add64(x332, x329, uint64(0x0))
- var x335 uint64
- var x336 uint64
- x335, x336 = bits.Add64(x330, x327, uint64(p384Uint1(x334)))
- var x337 uint64
- var x338 uint64
- x337, x338 = bits.Add64(x328, x325, uint64(p384Uint1(x336)))
- var x339 uint64
- var x340 uint64
- x339, x340 = bits.Add64(x326, x5, uint64(p384Uint1(x338)))
- var x341 uint64
- var x342 uint64
- x341, x342 = bits.Add64(x313, x331, uint64(0x0))
- var x343 uint64
- var x344 uint64
- x343, x344 = bits.Add64(x315, x333, uint64(p384Uint1(x342)))
- var x345 uint64
- var x346 uint64
- x345, x346 = bits.Add64(x317, x335, uint64(p384Uint1(x344)))
- var x347 uint64
- var x348 uint64
- x347, x348 = bits.Add64(x319, x337, uint64(p384Uint1(x346)))
- var x349 uint64
- var x350 uint64
- x349, x350 = bits.Add64(x321, x339, uint64(p384Uint1(x348)))
- var x351 uint64
- var x352 uint64
- x351, x352 = bits.Add64(x323, uint64(p384Uint1(x340)), uint64(p384Uint1(x350)))
- var x353 uint64
- _, x353 = bits.Mul64(x341, 0x100000001)
- var x355 uint64
- var x356 uint64
- x356, x355 = bits.Mul64(x353, 0xffffffffffffffff)
- var x357 uint64
- var x358 uint64
- x358, x357 = bits.Mul64(x353, 0xffffffffffffffff)
- var x359 uint64
- var x360 uint64
- x360, x359 = bits.Mul64(x353, 0xffffffffffffffff)
- var x361 uint64
- var x362 uint64
- x362, x361 = bits.Mul64(x353, 0xfffffffffffffffe)
- var x363 uint64
- var x364 uint64
- x364, x363 = bits.Mul64(x353, 0xffffffff00000000)
- var x365 uint64
- var x366 uint64
- x366, x365 = bits.Mul64(x353, 0xffffffff)
- var x367 uint64
- var x368 uint64
- x367, x368 = bits.Add64(x366, x363, uint64(0x0))
- var x369 uint64
- var x370 uint64
- x369, x370 = bits.Add64(x364, x361, uint64(p384Uint1(x368)))
- var x371 uint64
- var x372 uint64
- x371, x372 = bits.Add64(x362, x359, uint64(p384Uint1(x370)))
- var x373 uint64
- var x374 uint64
- x373, x374 = bits.Add64(x360, x357, uint64(p384Uint1(x372)))
- var x375 uint64
- var x376 uint64
- x375, x376 = bits.Add64(x358, x355, uint64(p384Uint1(x374)))
- var x378 uint64
- _, x378 = bits.Add64(x341, x365, uint64(0x0))
- var x379 uint64
- var x380 uint64
- x379, x380 = bits.Add64(x343, x367, uint64(p384Uint1(x378)))
- var x381 uint64
- var x382 uint64
- x381, x382 = bits.Add64(x345, x369, uint64(p384Uint1(x380)))
- var x383 uint64
- var x384 uint64
- x383, x384 = bits.Add64(x347, x371, uint64(p384Uint1(x382)))
- var x385 uint64
- var x386 uint64
- x385, x386 = bits.Add64(x349, x373, uint64(p384Uint1(x384)))
- var x387 uint64
- var x388 uint64
- x387, x388 = bits.Add64(x351, x375, uint64(p384Uint1(x386)))
- var x389 uint64
- var x390 uint64
- x389, x390 = bits.Add64((uint64(p384Uint1(x352)) + uint64(p384Uint1(x324))), (uint64(p384Uint1(x376)) + x356), uint64(p384Uint1(x388)))
- var x391 uint64
- var x392 uint64
- x391, x392 = bits.Sub64(x379, 0xffffffff, uint64(0x0))
- var x393 uint64
- var x394 uint64
- x393, x394 = bits.Sub64(x381, 0xffffffff00000000, uint64(p384Uint1(x392)))
- var x395 uint64
- var x396 uint64
- x395, x396 = bits.Sub64(x383, 0xfffffffffffffffe, uint64(p384Uint1(x394)))
- var x397 uint64
- var x398 uint64
- x397, x398 = bits.Sub64(x385, 0xffffffffffffffff, uint64(p384Uint1(x396)))
- var x399 uint64
- var x400 uint64
- x399, x400 = bits.Sub64(x387, 0xffffffffffffffff, uint64(p384Uint1(x398)))
- var x401 uint64
- var x402 uint64
- x401, x402 = bits.Sub64(x389, 0xffffffffffffffff, uint64(p384Uint1(x400)))
- var x404 uint64
- _, x404 = bits.Sub64(uint64(p384Uint1(x390)), uint64(0x0), uint64(p384Uint1(x402)))
- var x405 uint64
- p384CmovznzU64(&x405, p384Uint1(x404), x391, x379)
- var x406 uint64
- p384CmovznzU64(&x406, p384Uint1(x404), x393, x381)
- var x407 uint64
- p384CmovznzU64(&x407, p384Uint1(x404), x395, x383)
- var x408 uint64
- p384CmovznzU64(&x408, p384Uint1(x404), x397, x385)
- var x409 uint64
- p384CmovznzU64(&x409, p384Uint1(x404), x399, x387)
- var x410 uint64
- p384CmovznzU64(&x410, p384Uint1(x404), x401, x389)
- out1[0] = x405
- out1[1] = x406
- out1[2] = x407
- out1[3] = x408
- out1[4] = x409
- out1[5] = x410
-}
-
-// p384Selectznz is a multi-limb conditional select.
-//
-// Postconditions:
-// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
-//
-// Input Bounds:
-// arg1: [0x0 ~> 0x1]
-// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-func p384Selectznz(out1 *[6]uint64, arg1 p384Uint1, arg2 *[6]uint64, arg3 *[6]uint64) {
- var x1 uint64
- p384CmovznzU64(&x1, arg1, arg2[0], arg3[0])
- var x2 uint64
- p384CmovznzU64(&x2, arg1, arg2[1], arg3[1])
- var x3 uint64
- p384CmovznzU64(&x3, arg1, arg2[2], arg3[2])
- var x4 uint64
- p384CmovznzU64(&x4, arg1, arg2[3], arg3[3])
- var x5 uint64
- p384CmovznzU64(&x5, arg1, arg2[4], arg3[4])
- var x6 uint64
- p384CmovznzU64(&x6, arg1, arg2[5], arg3[5])
- out1[0] = x1
- out1[1] = x2
- out1[2] = x3
- out1[3] = x4
- out1[4] = x5
- out1[5] = x6
-}
-
-// p384ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..47]
-//
-// Input Bounds:
-// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
-func p384ToBytes(out1 *[48]uint8, arg1 *[6]uint64) {
- x1 := arg1[5]
- x2 := arg1[4]
- x3 := arg1[3]
- x4 := arg1[2]
- x5 := arg1[1]
- x6 := arg1[0]
- x7 := (uint8(x6) & 0xff)
- x8 := (x6 >> 8)
- x9 := (uint8(x8) & 0xff)
- x10 := (x8 >> 8)
- x11 := (uint8(x10) & 0xff)
- x12 := (x10 >> 8)
- x13 := (uint8(x12) & 0xff)
- x14 := (x12 >> 8)
- x15 := (uint8(x14) & 0xff)
- x16 := (x14 >> 8)
- x17 := (uint8(x16) & 0xff)
- x18 := (x16 >> 8)
- x19 := (uint8(x18) & 0xff)
- x20 := uint8((x18 >> 8))
- x21 := (uint8(x5) & 0xff)
- x22 := (x5 >> 8)
- x23 := (uint8(x22) & 0xff)
- x24 := (x22 >> 8)
- x25 := (uint8(x24) & 0xff)
- x26 := (x24 >> 8)
- x27 := (uint8(x26) & 0xff)
- x28 := (x26 >> 8)
- x29 := (uint8(x28) & 0xff)
- x30 := (x28 >> 8)
- x31 := (uint8(x30) & 0xff)
- x32 := (x30 >> 8)
- x33 := (uint8(x32) & 0xff)
- x34 := uint8((x32 >> 8))
- x35 := (uint8(x4) & 0xff)
- x36 := (x4 >> 8)
- x37 := (uint8(x36) & 0xff)
- x38 := (x36 >> 8)
- x39 := (uint8(x38) & 0xff)
- x40 := (x38 >> 8)
- x41 := (uint8(x40) & 0xff)
- x42 := (x40 >> 8)
- x43 := (uint8(x42) & 0xff)
- x44 := (x42 >> 8)
- x45 := (uint8(x44) & 0xff)
- x46 := (x44 >> 8)
- x47 := (uint8(x46) & 0xff)
- x48 := uint8((x46 >> 8))
- x49 := (uint8(x3) & 0xff)
- x50 := (x3 >> 8)
- x51 := (uint8(x50) & 0xff)
- x52 := (x50 >> 8)
- x53 := (uint8(x52) & 0xff)
- x54 := (x52 >> 8)
- x55 := (uint8(x54) & 0xff)
- x56 := (x54 >> 8)
- x57 := (uint8(x56) & 0xff)
- x58 := (x56 >> 8)
- x59 := (uint8(x58) & 0xff)
- x60 := (x58 >> 8)
- x61 := (uint8(x60) & 0xff)
- x62 := uint8((x60 >> 8))
- x63 := (uint8(x2) & 0xff)
- x64 := (x2 >> 8)
- x65 := (uint8(x64) & 0xff)
- x66 := (x64 >> 8)
- x67 := (uint8(x66) & 0xff)
- x68 := (x66 >> 8)
- x69 := (uint8(x68) & 0xff)
- x70 := (x68 >> 8)
- x71 := (uint8(x70) & 0xff)
- x72 := (x70 >> 8)
- x73 := (uint8(x72) & 0xff)
- x74 := (x72 >> 8)
- x75 := (uint8(x74) & 0xff)
- x76 := uint8((x74 >> 8))
- x77 := (uint8(x1) & 0xff)
- x78 := (x1 >> 8)
- x79 := (uint8(x78) & 0xff)
- x80 := (x78 >> 8)
- x81 := (uint8(x80) & 0xff)
- x82 := (x80 >> 8)
- x83 := (uint8(x82) & 0xff)
- x84 := (x82 >> 8)
- x85 := (uint8(x84) & 0xff)
- x86 := (x84 >> 8)
- x87 := (uint8(x86) & 0xff)
- x88 := (x86 >> 8)
- x89 := (uint8(x88) & 0xff)
- x90 := uint8((x88 >> 8))
- out1[0] = x7
- out1[1] = x9
- out1[2] = x11
- out1[3] = x13
- out1[4] = x15
- out1[5] = x17
- out1[6] = x19
- out1[7] = x20
- out1[8] = x21
- out1[9] = x23
- out1[10] = x25
- out1[11] = x27
- out1[12] = x29
- out1[13] = x31
- out1[14] = x33
- out1[15] = x34
- out1[16] = x35
- out1[17] = x37
- out1[18] = x39
- out1[19] = x41
- out1[20] = x43
- out1[21] = x45
- out1[22] = x47
- out1[23] = x48
- out1[24] = x49
- out1[25] = x51
- out1[26] = x53
- out1[27] = x55
- out1[28] = x57
- out1[29] = x59
- out1[30] = x61
- out1[31] = x62
- out1[32] = x63
- out1[33] = x65
- out1[34] = x67
- out1[35] = x69
- out1[36] = x71
- out1[37] = x73
- out1[38] = x75
- out1[39] = x76
- out1[40] = x77
- out1[41] = x79
- out1[42] = x81
- out1[43] = x83
- out1[44] = x85
- out1[45] = x87
- out1[46] = x89
- out1[47] = x90
-}
-
-// p384FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
-//
-// Preconditions:
-// 0 ≤ bytes_eval arg1 < m
-// Postconditions:
-// eval out1 mod m = bytes_eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-// Input Bounds:
-// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-func p384FromBytes(out1 *[6]uint64, arg1 *[48]uint8) {
- x1 := (uint64(arg1[47]) << 56)
- x2 := (uint64(arg1[46]) << 48)
- x3 := (uint64(arg1[45]) << 40)
- x4 := (uint64(arg1[44]) << 32)
- x5 := (uint64(arg1[43]) << 24)
- x6 := (uint64(arg1[42]) << 16)
- x7 := (uint64(arg1[41]) << 8)
- x8 := arg1[40]
- x9 := (uint64(arg1[39]) << 56)
- x10 := (uint64(arg1[38]) << 48)
- x11 := (uint64(arg1[37]) << 40)
- x12 := (uint64(arg1[36]) << 32)
- x13 := (uint64(arg1[35]) << 24)
- x14 := (uint64(arg1[34]) << 16)
- x15 := (uint64(arg1[33]) << 8)
- x16 := arg1[32]
- x17 := (uint64(arg1[31]) << 56)
- x18 := (uint64(arg1[30]) << 48)
- x19 := (uint64(arg1[29]) << 40)
- x20 := (uint64(arg1[28]) << 32)
- x21 := (uint64(arg1[27]) << 24)
- x22 := (uint64(arg1[26]) << 16)
- x23 := (uint64(arg1[25]) << 8)
- x24 := arg1[24]
- x25 := (uint64(arg1[23]) << 56)
- x26 := (uint64(arg1[22]) << 48)
- x27 := (uint64(arg1[21]) << 40)
- x28 := (uint64(arg1[20]) << 32)
- x29 := (uint64(arg1[19]) << 24)
- x30 := (uint64(arg1[18]) << 16)
- x31 := (uint64(arg1[17]) << 8)
- x32 := arg1[16]
- x33 := (uint64(arg1[15]) << 56)
- x34 := (uint64(arg1[14]) << 48)
- x35 := (uint64(arg1[13]) << 40)
- x36 := (uint64(arg1[12]) << 32)
- x37 := (uint64(arg1[11]) << 24)
- x38 := (uint64(arg1[10]) << 16)
- x39 := (uint64(arg1[9]) << 8)
- x40 := arg1[8]
- x41 := (uint64(arg1[7]) << 56)
- x42 := (uint64(arg1[6]) << 48)
- x43 := (uint64(arg1[5]) << 40)
- x44 := (uint64(arg1[4]) << 32)
- x45 := (uint64(arg1[3]) << 24)
- x46 := (uint64(arg1[2]) << 16)
- x47 := (uint64(arg1[1]) << 8)
- x48 := arg1[0]
- x49 := (x47 + uint64(x48))
- x50 := (x46 + x49)
- x51 := (x45 + x50)
- x52 := (x44 + x51)
- x53 := (x43 + x52)
- x54 := (x42 + x53)
- x55 := (x41 + x54)
- x56 := (x39 + uint64(x40))
- x57 := (x38 + x56)
- x58 := (x37 + x57)
- x59 := (x36 + x58)
- x60 := (x35 + x59)
- x61 := (x34 + x60)
- x62 := (x33 + x61)
- x63 := (x31 + uint64(x32))
- x64 := (x30 + x63)
- x65 := (x29 + x64)
- x66 := (x28 + x65)
- x67 := (x27 + x66)
- x68 := (x26 + x67)
- x69 := (x25 + x68)
- x70 := (x23 + uint64(x24))
- x71 := (x22 + x70)
- x72 := (x21 + x71)
- x73 := (x20 + x72)
- x74 := (x19 + x73)
- x75 := (x18 + x74)
- x76 := (x17 + x75)
- x77 := (x15 + uint64(x16))
- x78 := (x14 + x77)
- x79 := (x13 + x78)
- x80 := (x12 + x79)
- x81 := (x11 + x80)
- x82 := (x10 + x81)
- x83 := (x9 + x82)
- x84 := (x7 + uint64(x8))
- x85 := (x6 + x84)
- x86 := (x5 + x85)
- x87 := (x4 + x86)
- x88 := (x3 + x87)
- x89 := (x2 + x88)
- x90 := (x1 + x89)
- out1[0] = x55
- out1[1] = x62
- out1[2] = x69
- out1[3] = x76
- out1[4] = x83
- out1[5] = x90
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_fiat64.go b/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_fiat64.go
deleted file mode 100644
index 9f4f290f4c..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_fiat64.go
+++ /dev/null
@@ -1,5509 +0,0 @@
-// Code generated by Fiat Cryptography. DO NOT EDIT.
-//
-// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p521 64 '2^521 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
-//
-// curve description: p521
-//
-// machine_wordsize = 64 (from "64")
-//
-// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
-//
-// m = 0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff (from "2^521 - 1")
-//
-//
-//
-// NOTE: In addition to the bounds specified above each function, all
-//
-// functions synthesized for this Montgomery arithmetic require the
-//
-// input to be strictly less than the prime modulus (m), and also
-//
-// require the input to be in the unique saturated representation.
-//
-// All functions also ensure that these two properties are true of
-//
-// return values.
-//
-//
-//
-// Computed values:
-//
-// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9)
-//
-// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178) + (z[48] << 0x180) + (z[49] << 0x188) + (z[50] << 0x190) + (z[51] << 0x198) + (z[52] << 0x1a0) + (z[53] << 0x1a8) + (z[54] << 0x1b0) + (z[55] << 0x1b8) + (z[56] << 0x1c0) + (z[57] << 0x1c8) + (z[58] << 0x1d0) + (z[59] << 0x1d8) + (z[60] << 0x1e0) + (z[61] << 0x1e8) + (z[62] << 0x1f0) + (z[63] << 0x1f8) + (z[64] << 2^9) + (z[65] << 0x208)
-//
-// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9) in
-//
-// if x1 & (2^576-1) < 2^575 then x1 & (2^576-1) else (x1 & (2^576-1)) - 2^576
-
-package fiat
-
-import "math/bits"
-
-type p521Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-type p521Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-
-// The type p521MontgomeryDomainFieldElement is a field element in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type p521MontgomeryDomainFieldElement [9]uint64
-
-// The type p521NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type p521NonMontgomeryDomainFieldElement [9]uint64
-
-// p521CmovznzU64 is a single-word conditional move.
-//
-// Postconditions:
-// out1 = (if arg1 = 0 then arg2 else arg3)
-//
-// Input Bounds:
-// arg1: [0x0 ~> 0x1]
-// arg2: [0x0 ~> 0xffffffffffffffff]
-// arg3: [0x0 ~> 0xffffffffffffffff]
-// Output Bounds:
-// out1: [0x0 ~> 0xffffffffffffffff]
-func p521CmovznzU64(out1 *uint64, arg1 p521Uint1, arg2 uint64, arg3 uint64) {
- x1 := (uint64(arg1) * 0xffffffffffffffff)
- x2 := ((x1 & arg3) | ((^x1) & arg2))
- *out1 = x2
-}
-
-// p521Mul multiplies two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p521Mul(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[4]
- x5 := arg1[5]
- x6 := arg1[6]
- x7 := arg1[7]
- x8 := arg1[8]
- x9 := arg1[0]
- var x10 uint64
- var x11 uint64
- x11, x10 = bits.Mul64(x9, arg2[8])
- var x12 uint64
- var x13 uint64
- x13, x12 = bits.Mul64(x9, arg2[7])
- var x14 uint64
- var x15 uint64
- x15, x14 = bits.Mul64(x9, arg2[6])
- var x16 uint64
- var x17 uint64
- x17, x16 = bits.Mul64(x9, arg2[5])
- var x18 uint64
- var x19 uint64
- x19, x18 = bits.Mul64(x9, arg2[4])
- var x20 uint64
- var x21 uint64
- x21, x20 = bits.Mul64(x9, arg2[3])
- var x22 uint64
- var x23 uint64
- x23, x22 = bits.Mul64(x9, arg2[2])
- var x24 uint64
- var x25 uint64
- x25, x24 = bits.Mul64(x9, arg2[1])
- var x26 uint64
- var x27 uint64
- x27, x26 = bits.Mul64(x9, arg2[0])
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x27, x24, uint64(0x0))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
- var x32 uint64
- var x33 uint64
- x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
- var x34 uint64
- var x35 uint64
- x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
- var x36 uint64
- var x37 uint64
- x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
- var x38 uint64
- var x39 uint64
- x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
- var x40 uint64
- var x41 uint64
- x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
- var x42 uint64
- var x43 uint64
- x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
- x44 := (uint64(p521Uint1(x43)) + x11)
- var x45 uint64
- var x46 uint64
- x46, x45 = bits.Mul64(x26, 0x1ff)
- var x47 uint64
- var x48 uint64
- x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
- var x49 uint64
- var x50 uint64
- x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
- var x51 uint64
- var x52 uint64
- x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
- var x53 uint64
- var x54 uint64
- x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
- var x55 uint64
- var x56 uint64
- x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
- var x57 uint64
- var x58 uint64
- x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
- var x59 uint64
- var x60 uint64
- x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
- var x61 uint64
- var x62 uint64
- x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
- var x63 uint64
- var x64 uint64
- x63, x64 = bits.Add64(x62, x59, uint64(0x0))
- var x65 uint64
- var x66 uint64
- x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
- var x67 uint64
- var x68 uint64
- x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
- var x69 uint64
- var x70 uint64
- x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
- var x71 uint64
- var x72 uint64
- x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
- var x73 uint64
- var x74 uint64
- x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
- var x75 uint64
- var x76 uint64
- x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
- var x77 uint64
- var x78 uint64
- x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
- x79 := (uint64(p521Uint1(x78)) + x46)
- var x81 uint64
- _, x81 = bits.Add64(x26, x61, uint64(0x0))
- var x82 uint64
- var x83 uint64
- x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
- var x84 uint64
- var x85 uint64
- x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
- var x86 uint64
- var x87 uint64
- x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
- var x88 uint64
- var x89 uint64
- x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
- var x90 uint64
- var x91 uint64
- x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
- var x92 uint64
- var x93 uint64
- x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
- var x100 uint64
- var x101 uint64
- x101, x100 = bits.Mul64(x1, arg2[8])
- var x102 uint64
- var x103 uint64
- x103, x102 = bits.Mul64(x1, arg2[7])
- var x104 uint64
- var x105 uint64
- x105, x104 = bits.Mul64(x1, arg2[6])
- var x106 uint64
- var x107 uint64
- x107, x106 = bits.Mul64(x1, arg2[5])
- var x108 uint64
- var x109 uint64
- x109, x108 = bits.Mul64(x1, arg2[4])
- var x110 uint64
- var x111 uint64
- x111, x110 = bits.Mul64(x1, arg2[3])
- var x112 uint64
- var x113 uint64
- x113, x112 = bits.Mul64(x1, arg2[2])
- var x114 uint64
- var x115 uint64
- x115, x114 = bits.Mul64(x1, arg2[1])
- var x116 uint64
- var x117 uint64
- x117, x116 = bits.Mul64(x1, arg2[0])
- var x118 uint64
- var x119 uint64
- x118, x119 = bits.Add64(x117, x114, uint64(0x0))
- var x120 uint64
- var x121 uint64
- x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
- var x122 uint64
- var x123 uint64
- x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
- var x124 uint64
- var x125 uint64
- x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
- var x126 uint64
- var x127 uint64
- x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
- var x130 uint64
- var x131 uint64
- x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
- x134 := (uint64(p521Uint1(x133)) + x101)
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x82, x116, uint64(0x0))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
- var x145 uint64
- var x146 uint64
- x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
- var x147 uint64
- var x148 uint64
- x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
- var x149 uint64
- var x150 uint64
- x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
- var x151 uint64
- var x152 uint64
- x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
- var x155 uint64
- var x156 uint64
- x156, x155 = bits.Mul64(x135, 0x1ff)
- var x157 uint64
- var x158 uint64
- x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
- var x159 uint64
- var x160 uint64
- x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
- var x161 uint64
- var x162 uint64
- x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
- var x163 uint64
- var x164 uint64
- x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
- var x165 uint64
- var x166 uint64
- x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
- var x167 uint64
- var x168 uint64
- x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
- var x169 uint64
- var x170 uint64
- x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
- var x171 uint64
- var x172 uint64
- x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x172, x169, uint64(0x0))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
- var x177 uint64
- var x178 uint64
- x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
- var x179 uint64
- var x180 uint64
- x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
- var x183 uint64
- var x184 uint64
- x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
- var x185 uint64
- var x186 uint64
- x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
- var x187 uint64
- var x188 uint64
- x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
- x189 := (uint64(p521Uint1(x188)) + x156)
- var x191 uint64
- _, x191 = bits.Add64(x135, x171, uint64(0x0))
- var x192 uint64
- var x193 uint64
- x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
- var x194 uint64
- var x195 uint64
- x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
- var x196 uint64
- var x197 uint64
- x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
- var x198 uint64
- var x199 uint64
- x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
- var x200 uint64
- var x201 uint64
- x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
- var x202 uint64
- var x203 uint64
- x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
- var x204 uint64
- var x205 uint64
- x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
- var x206 uint64
- var x207 uint64
- x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
- var x208 uint64
- var x209 uint64
- x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
- x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
- var x211 uint64
- var x212 uint64
- x212, x211 = bits.Mul64(x2, arg2[8])
- var x213 uint64
- var x214 uint64
- x214, x213 = bits.Mul64(x2, arg2[7])
- var x215 uint64
- var x216 uint64
- x216, x215 = bits.Mul64(x2, arg2[6])
- var x217 uint64
- var x218 uint64
- x218, x217 = bits.Mul64(x2, arg2[5])
- var x219 uint64
- var x220 uint64
- x220, x219 = bits.Mul64(x2, arg2[4])
- var x221 uint64
- var x222 uint64
- x222, x221 = bits.Mul64(x2, arg2[3])
- var x223 uint64
- var x224 uint64
- x224, x223 = bits.Mul64(x2, arg2[2])
- var x225 uint64
- var x226 uint64
- x226, x225 = bits.Mul64(x2, arg2[1])
- var x227 uint64
- var x228 uint64
- x228, x227 = bits.Mul64(x2, arg2[0])
- var x229 uint64
- var x230 uint64
- x229, x230 = bits.Add64(x228, x225, uint64(0x0))
- var x231 uint64
- var x232 uint64
- x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
- var x233 uint64
- var x234 uint64
- x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
- var x235 uint64
- var x236 uint64
- x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
- var x237 uint64
- var x238 uint64
- x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
- var x239 uint64
- var x240 uint64
- x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
- var x241 uint64
- var x242 uint64
- x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
- var x243 uint64
- var x244 uint64
- x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
- x245 := (uint64(p521Uint1(x244)) + x212)
- var x246 uint64
- var x247 uint64
- x246, x247 = bits.Add64(x192, x227, uint64(0x0))
- var x248 uint64
- var x249 uint64
- x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
- var x250 uint64
- var x251 uint64
- x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
- var x252 uint64
- var x253 uint64
- x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
- var x254 uint64
- var x255 uint64
- x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
- var x256 uint64
- var x257 uint64
- x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
- var x258 uint64
- var x259 uint64
- x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
- var x260 uint64
- var x261 uint64
- x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
- var x262 uint64
- var x263 uint64
- x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
- var x264 uint64
- var x265 uint64
- x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
- var x266 uint64
- var x267 uint64
- x267, x266 = bits.Mul64(x246, 0x1ff)
- var x268 uint64
- var x269 uint64
- x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
- var x270 uint64
- var x271 uint64
- x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
- var x272 uint64
- var x273 uint64
- x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
- var x274 uint64
- var x275 uint64
- x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
- var x276 uint64
- var x277 uint64
- x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
- var x278 uint64
- var x279 uint64
- x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
- var x280 uint64
- var x281 uint64
- x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
- var x282 uint64
- var x283 uint64
- x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
- var x284 uint64
- var x285 uint64
- x284, x285 = bits.Add64(x283, x280, uint64(0x0))
- var x286 uint64
- var x287 uint64
- x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
- var x288 uint64
- var x289 uint64
- x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
- var x290 uint64
- var x291 uint64
- x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
- var x292 uint64
- var x293 uint64
- x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
- var x294 uint64
- var x295 uint64
- x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
- var x296 uint64
- var x297 uint64
- x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
- var x298 uint64
- var x299 uint64
- x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
- x300 := (uint64(p521Uint1(x299)) + x267)
- var x302 uint64
- _, x302 = bits.Add64(x246, x282, uint64(0x0))
- var x303 uint64
- var x304 uint64
- x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
- var x305 uint64
- var x306 uint64
- x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
- var x307 uint64
- var x308 uint64
- x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
- var x309 uint64
- var x310 uint64
- x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
- var x311 uint64
- var x312 uint64
- x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
- var x313 uint64
- var x314 uint64
- x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
- var x315 uint64
- var x316 uint64
- x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
- var x317 uint64
- var x318 uint64
- x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
- var x319 uint64
- var x320 uint64
- x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
- x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
- var x322 uint64
- var x323 uint64
- x323, x322 = bits.Mul64(x3, arg2[8])
- var x324 uint64
- var x325 uint64
- x325, x324 = bits.Mul64(x3, arg2[7])
- var x326 uint64
- var x327 uint64
- x327, x326 = bits.Mul64(x3, arg2[6])
- var x328 uint64
- var x329 uint64
- x329, x328 = bits.Mul64(x3, arg2[5])
- var x330 uint64
- var x331 uint64
- x331, x330 = bits.Mul64(x3, arg2[4])
- var x332 uint64
- var x333 uint64
- x333, x332 = bits.Mul64(x3, arg2[3])
- var x334 uint64
- var x335 uint64
- x335, x334 = bits.Mul64(x3, arg2[2])
- var x336 uint64
- var x337 uint64
- x337, x336 = bits.Mul64(x3, arg2[1])
- var x338 uint64
- var x339 uint64
- x339, x338 = bits.Mul64(x3, arg2[0])
- var x340 uint64
- var x341 uint64
- x340, x341 = bits.Add64(x339, x336, uint64(0x0))
- var x342 uint64
- var x343 uint64
- x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
- var x344 uint64
- var x345 uint64
- x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
- var x346 uint64
- var x347 uint64
- x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
- var x348 uint64
- var x349 uint64
- x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
- var x350 uint64
- var x351 uint64
- x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
- var x352 uint64
- var x353 uint64
- x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
- var x354 uint64
- var x355 uint64
- x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
- x356 := (uint64(p521Uint1(x355)) + x323)
- var x357 uint64
- var x358 uint64
- x357, x358 = bits.Add64(x303, x338, uint64(0x0))
- var x359 uint64
- var x360 uint64
- x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
- var x361 uint64
- var x362 uint64
- x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
- var x363 uint64
- var x364 uint64
- x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
- var x365 uint64
- var x366 uint64
- x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
- var x367 uint64
- var x368 uint64
- x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
- var x369 uint64
- var x370 uint64
- x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
- var x371 uint64
- var x372 uint64
- x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
- var x373 uint64
- var x374 uint64
- x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
- var x375 uint64
- var x376 uint64
- x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
- var x377 uint64
- var x378 uint64
- x378, x377 = bits.Mul64(x357, 0x1ff)
- var x379 uint64
- var x380 uint64
- x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
- var x381 uint64
- var x382 uint64
- x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
- var x383 uint64
- var x384 uint64
- x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
- var x385 uint64
- var x386 uint64
- x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
- var x387 uint64
- var x388 uint64
- x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
- var x389 uint64
- var x390 uint64
- x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
- var x391 uint64
- var x392 uint64
- x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
- var x393 uint64
- var x394 uint64
- x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
- var x395 uint64
- var x396 uint64
- x395, x396 = bits.Add64(x394, x391, uint64(0x0))
- var x397 uint64
- var x398 uint64
- x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
- var x399 uint64
- var x400 uint64
- x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
- var x401 uint64
- var x402 uint64
- x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
- var x403 uint64
- var x404 uint64
- x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
- var x405 uint64
- var x406 uint64
- x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
- var x407 uint64
- var x408 uint64
- x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
- var x409 uint64
- var x410 uint64
- x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
- x411 := (uint64(p521Uint1(x410)) + x378)
- var x413 uint64
- _, x413 = bits.Add64(x357, x393, uint64(0x0))
- var x414 uint64
- var x415 uint64
- x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
- var x416 uint64
- var x417 uint64
- x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
- var x418 uint64
- var x419 uint64
- x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
- var x420 uint64
- var x421 uint64
- x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
- var x422 uint64
- var x423 uint64
- x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
- var x424 uint64
- var x425 uint64
- x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
- var x426 uint64
- var x427 uint64
- x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
- var x428 uint64
- var x429 uint64
- x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
- var x430 uint64
- var x431 uint64
- x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
- x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
- var x433 uint64
- var x434 uint64
- x434, x433 = bits.Mul64(x4, arg2[8])
- var x435 uint64
- var x436 uint64
- x436, x435 = bits.Mul64(x4, arg2[7])
- var x437 uint64
- var x438 uint64
- x438, x437 = bits.Mul64(x4, arg2[6])
- var x439 uint64
- var x440 uint64
- x440, x439 = bits.Mul64(x4, arg2[5])
- var x441 uint64
- var x442 uint64
- x442, x441 = bits.Mul64(x4, arg2[4])
- var x443 uint64
- var x444 uint64
- x444, x443 = bits.Mul64(x4, arg2[3])
- var x445 uint64
- var x446 uint64
- x446, x445 = bits.Mul64(x4, arg2[2])
- var x447 uint64
- var x448 uint64
- x448, x447 = bits.Mul64(x4, arg2[1])
- var x449 uint64
- var x450 uint64
- x450, x449 = bits.Mul64(x4, arg2[0])
- var x451 uint64
- var x452 uint64
- x451, x452 = bits.Add64(x450, x447, uint64(0x0))
- var x453 uint64
- var x454 uint64
- x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
- var x455 uint64
- var x456 uint64
- x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
- var x457 uint64
- var x458 uint64
- x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
- var x459 uint64
- var x460 uint64
- x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
- var x461 uint64
- var x462 uint64
- x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
- var x463 uint64
- var x464 uint64
- x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
- var x465 uint64
- var x466 uint64
- x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
- x467 := (uint64(p521Uint1(x466)) + x434)
- var x468 uint64
- var x469 uint64
- x468, x469 = bits.Add64(x414, x449, uint64(0x0))
- var x470 uint64
- var x471 uint64
- x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
- var x472 uint64
- var x473 uint64
- x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
- var x474 uint64
- var x475 uint64
- x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
- var x476 uint64
- var x477 uint64
- x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
- var x478 uint64
- var x479 uint64
- x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
- var x480 uint64
- var x481 uint64
- x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
- var x482 uint64
- var x483 uint64
- x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
- var x484 uint64
- var x485 uint64
- x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
- var x486 uint64
- var x487 uint64
- x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
- var x488 uint64
- var x489 uint64
- x489, x488 = bits.Mul64(x468, 0x1ff)
- var x490 uint64
- var x491 uint64
- x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
- var x492 uint64
- var x493 uint64
- x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
- var x494 uint64
- var x495 uint64
- x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
- var x496 uint64
- var x497 uint64
- x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
- var x498 uint64
- var x499 uint64
- x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
- var x500 uint64
- var x501 uint64
- x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
- var x502 uint64
- var x503 uint64
- x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
- var x504 uint64
- var x505 uint64
- x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
- var x506 uint64
- var x507 uint64
- x506, x507 = bits.Add64(x505, x502, uint64(0x0))
- var x508 uint64
- var x509 uint64
- x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
- var x510 uint64
- var x511 uint64
- x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
- var x512 uint64
- var x513 uint64
- x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
- var x514 uint64
- var x515 uint64
- x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
- var x516 uint64
- var x517 uint64
- x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
- var x518 uint64
- var x519 uint64
- x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
- var x520 uint64
- var x521 uint64
- x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
- x522 := (uint64(p521Uint1(x521)) + x489)
- var x524 uint64
- _, x524 = bits.Add64(x468, x504, uint64(0x0))
- var x525 uint64
- var x526 uint64
- x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
- var x527 uint64
- var x528 uint64
- x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
- var x529 uint64
- var x530 uint64
- x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
- var x531 uint64
- var x532 uint64
- x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
- var x533 uint64
- var x534 uint64
- x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
- var x535 uint64
- var x536 uint64
- x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
- var x537 uint64
- var x538 uint64
- x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
- var x539 uint64
- var x540 uint64
- x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
- var x541 uint64
- var x542 uint64
- x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
- x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
- var x544 uint64
- var x545 uint64
- x545, x544 = bits.Mul64(x5, arg2[8])
- var x546 uint64
- var x547 uint64
- x547, x546 = bits.Mul64(x5, arg2[7])
- var x548 uint64
- var x549 uint64
- x549, x548 = bits.Mul64(x5, arg2[6])
- var x550 uint64
- var x551 uint64
- x551, x550 = bits.Mul64(x5, arg2[5])
- var x552 uint64
- var x553 uint64
- x553, x552 = bits.Mul64(x5, arg2[4])
- var x554 uint64
- var x555 uint64
- x555, x554 = bits.Mul64(x5, arg2[3])
- var x556 uint64
- var x557 uint64
- x557, x556 = bits.Mul64(x5, arg2[2])
- var x558 uint64
- var x559 uint64
- x559, x558 = bits.Mul64(x5, arg2[1])
- var x560 uint64
- var x561 uint64
- x561, x560 = bits.Mul64(x5, arg2[0])
- var x562 uint64
- var x563 uint64
- x562, x563 = bits.Add64(x561, x558, uint64(0x0))
- var x564 uint64
- var x565 uint64
- x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
- var x566 uint64
- var x567 uint64
- x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
- var x568 uint64
- var x569 uint64
- x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
- var x570 uint64
- var x571 uint64
- x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
- var x572 uint64
- var x573 uint64
- x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
- var x574 uint64
- var x575 uint64
- x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
- var x576 uint64
- var x577 uint64
- x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
- x578 := (uint64(p521Uint1(x577)) + x545)
- var x579 uint64
- var x580 uint64
- x579, x580 = bits.Add64(x525, x560, uint64(0x0))
- var x581 uint64
- var x582 uint64
- x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
- var x583 uint64
- var x584 uint64
- x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
- var x585 uint64
- var x586 uint64
- x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
- var x587 uint64
- var x588 uint64
- x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
- var x589 uint64
- var x590 uint64
- x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
- var x591 uint64
- var x592 uint64
- x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
- var x593 uint64
- var x594 uint64
- x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
- var x595 uint64
- var x596 uint64
- x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
- var x597 uint64
- var x598 uint64
- x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
- var x599 uint64
- var x600 uint64
- x600, x599 = bits.Mul64(x579, 0x1ff)
- var x601 uint64
- var x602 uint64
- x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
- var x603 uint64
- var x604 uint64
- x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
- var x605 uint64
- var x606 uint64
- x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
- var x607 uint64
- var x608 uint64
- x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
- var x609 uint64
- var x610 uint64
- x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
- var x611 uint64
- var x612 uint64
- x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
- var x613 uint64
- var x614 uint64
- x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
- var x615 uint64
- var x616 uint64
- x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
- var x617 uint64
- var x618 uint64
- x617, x618 = bits.Add64(x616, x613, uint64(0x0))
- var x619 uint64
- var x620 uint64
- x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
- var x621 uint64
- var x622 uint64
- x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
- var x623 uint64
- var x624 uint64
- x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
- var x625 uint64
- var x626 uint64
- x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
- var x627 uint64
- var x628 uint64
- x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
- var x629 uint64
- var x630 uint64
- x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
- var x631 uint64
- var x632 uint64
- x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
- x633 := (uint64(p521Uint1(x632)) + x600)
- var x635 uint64
- _, x635 = bits.Add64(x579, x615, uint64(0x0))
- var x636 uint64
- var x637 uint64
- x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
- var x638 uint64
- var x639 uint64
- x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
- var x640 uint64
- var x641 uint64
- x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
- var x642 uint64
- var x643 uint64
- x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
- var x644 uint64
- var x645 uint64
- x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
- var x646 uint64
- var x647 uint64
- x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
- var x648 uint64
- var x649 uint64
- x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
- var x650 uint64
- var x651 uint64
- x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
- var x652 uint64
- var x653 uint64
- x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
- x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
- var x655 uint64
- var x656 uint64
- x656, x655 = bits.Mul64(x6, arg2[8])
- var x657 uint64
- var x658 uint64
- x658, x657 = bits.Mul64(x6, arg2[7])
- var x659 uint64
- var x660 uint64
- x660, x659 = bits.Mul64(x6, arg2[6])
- var x661 uint64
- var x662 uint64
- x662, x661 = bits.Mul64(x6, arg2[5])
- var x663 uint64
- var x664 uint64
- x664, x663 = bits.Mul64(x6, arg2[4])
- var x665 uint64
- var x666 uint64
- x666, x665 = bits.Mul64(x6, arg2[3])
- var x667 uint64
- var x668 uint64
- x668, x667 = bits.Mul64(x6, arg2[2])
- var x669 uint64
- var x670 uint64
- x670, x669 = bits.Mul64(x6, arg2[1])
- var x671 uint64
- var x672 uint64
- x672, x671 = bits.Mul64(x6, arg2[0])
- var x673 uint64
- var x674 uint64
- x673, x674 = bits.Add64(x672, x669, uint64(0x0))
- var x675 uint64
- var x676 uint64
- x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
- var x677 uint64
- var x678 uint64
- x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
- var x679 uint64
- var x680 uint64
- x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
- var x681 uint64
- var x682 uint64
- x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
- var x683 uint64
- var x684 uint64
- x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
- var x685 uint64
- var x686 uint64
- x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
- var x687 uint64
- var x688 uint64
- x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
- x689 := (uint64(p521Uint1(x688)) + x656)
- var x690 uint64
- var x691 uint64
- x690, x691 = bits.Add64(x636, x671, uint64(0x0))
- var x692 uint64
- var x693 uint64
- x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
- var x694 uint64
- var x695 uint64
- x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
- var x696 uint64
- var x697 uint64
- x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
- var x698 uint64
- var x699 uint64
- x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
- var x700 uint64
- var x701 uint64
- x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
- var x702 uint64
- var x703 uint64
- x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
- var x704 uint64
- var x705 uint64
- x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
- var x706 uint64
- var x707 uint64
- x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
- var x708 uint64
- var x709 uint64
- x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
- var x710 uint64
- var x711 uint64
- x711, x710 = bits.Mul64(x690, 0x1ff)
- var x712 uint64
- var x713 uint64
- x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
- var x714 uint64
- var x715 uint64
- x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
- var x716 uint64
- var x717 uint64
- x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
- var x718 uint64
- var x719 uint64
- x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
- var x720 uint64
- var x721 uint64
- x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
- var x722 uint64
- var x723 uint64
- x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
- var x724 uint64
- var x725 uint64
- x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
- var x726 uint64
- var x727 uint64
- x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
- var x728 uint64
- var x729 uint64
- x728, x729 = bits.Add64(x727, x724, uint64(0x0))
- var x730 uint64
- var x731 uint64
- x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
- var x732 uint64
- var x733 uint64
- x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
- var x734 uint64
- var x735 uint64
- x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
- var x736 uint64
- var x737 uint64
- x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
- var x738 uint64
- var x739 uint64
- x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
- var x740 uint64
- var x741 uint64
- x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
- var x742 uint64
- var x743 uint64
- x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
- x744 := (uint64(p521Uint1(x743)) + x711)
- var x746 uint64
- _, x746 = bits.Add64(x690, x726, uint64(0x0))
- var x747 uint64
- var x748 uint64
- x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
- var x749 uint64
- var x750 uint64
- x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
- var x751 uint64
- var x752 uint64
- x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
- var x753 uint64
- var x754 uint64
- x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
- var x755 uint64
- var x756 uint64
- x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
- var x757 uint64
- var x758 uint64
- x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
- var x759 uint64
- var x760 uint64
- x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
- var x761 uint64
- var x762 uint64
- x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
- var x763 uint64
- var x764 uint64
- x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
- x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
- var x766 uint64
- var x767 uint64
- x767, x766 = bits.Mul64(x7, arg2[8])
- var x768 uint64
- var x769 uint64
- x769, x768 = bits.Mul64(x7, arg2[7])
- var x770 uint64
- var x771 uint64
- x771, x770 = bits.Mul64(x7, arg2[6])
- var x772 uint64
- var x773 uint64
- x773, x772 = bits.Mul64(x7, arg2[5])
- var x774 uint64
- var x775 uint64
- x775, x774 = bits.Mul64(x7, arg2[4])
- var x776 uint64
- var x777 uint64
- x777, x776 = bits.Mul64(x7, arg2[3])
- var x778 uint64
- var x779 uint64
- x779, x778 = bits.Mul64(x7, arg2[2])
- var x780 uint64
- var x781 uint64
- x781, x780 = bits.Mul64(x7, arg2[1])
- var x782 uint64
- var x783 uint64
- x783, x782 = bits.Mul64(x7, arg2[0])
- var x784 uint64
- var x785 uint64
- x784, x785 = bits.Add64(x783, x780, uint64(0x0))
- var x786 uint64
- var x787 uint64
- x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
- var x788 uint64
- var x789 uint64
- x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
- var x790 uint64
- var x791 uint64
- x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
- var x792 uint64
- var x793 uint64
- x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
- var x794 uint64
- var x795 uint64
- x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
- var x796 uint64
- var x797 uint64
- x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
- var x798 uint64
- var x799 uint64
- x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
- x800 := (uint64(p521Uint1(x799)) + x767)
- var x801 uint64
- var x802 uint64
- x801, x802 = bits.Add64(x747, x782, uint64(0x0))
- var x803 uint64
- var x804 uint64
- x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
- var x805 uint64
- var x806 uint64
- x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
- var x807 uint64
- var x808 uint64
- x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
- var x809 uint64
- var x810 uint64
- x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
- var x811 uint64
- var x812 uint64
- x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
- var x813 uint64
- var x814 uint64
- x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
- var x815 uint64
- var x816 uint64
- x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
- var x817 uint64
- var x818 uint64
- x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
- var x819 uint64
- var x820 uint64
- x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
- var x821 uint64
- var x822 uint64
- x822, x821 = bits.Mul64(x801, 0x1ff)
- var x823 uint64
- var x824 uint64
- x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
- var x825 uint64
- var x826 uint64
- x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
- var x827 uint64
- var x828 uint64
- x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
- var x829 uint64
- var x830 uint64
- x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
- var x831 uint64
- var x832 uint64
- x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
- var x833 uint64
- var x834 uint64
- x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
- var x835 uint64
- var x836 uint64
- x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
- var x837 uint64
- var x838 uint64
- x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
- var x839 uint64
- var x840 uint64
- x839, x840 = bits.Add64(x838, x835, uint64(0x0))
- var x841 uint64
- var x842 uint64
- x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
- var x843 uint64
- var x844 uint64
- x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
- var x845 uint64
- var x846 uint64
- x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
- var x847 uint64
- var x848 uint64
- x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
- var x849 uint64
- var x850 uint64
- x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
- var x851 uint64
- var x852 uint64
- x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
- var x853 uint64
- var x854 uint64
- x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
- x855 := (uint64(p521Uint1(x854)) + x822)
- var x857 uint64
- _, x857 = bits.Add64(x801, x837, uint64(0x0))
- var x858 uint64
- var x859 uint64
- x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
- var x860 uint64
- var x861 uint64
- x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
- var x862 uint64
- var x863 uint64
- x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
- var x864 uint64
- var x865 uint64
- x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
- var x866 uint64
- var x867 uint64
- x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
- var x868 uint64
- var x869 uint64
- x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
- var x870 uint64
- var x871 uint64
- x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
- var x872 uint64
- var x873 uint64
- x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
- var x874 uint64
- var x875 uint64
- x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
- x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
- var x877 uint64
- var x878 uint64
- x878, x877 = bits.Mul64(x8, arg2[8])
- var x879 uint64
- var x880 uint64
- x880, x879 = bits.Mul64(x8, arg2[7])
- var x881 uint64
- var x882 uint64
- x882, x881 = bits.Mul64(x8, arg2[6])
- var x883 uint64
- var x884 uint64
- x884, x883 = bits.Mul64(x8, arg2[5])
- var x885 uint64
- var x886 uint64
- x886, x885 = bits.Mul64(x8, arg2[4])
- var x887 uint64
- var x888 uint64
- x888, x887 = bits.Mul64(x8, arg2[3])
- var x889 uint64
- var x890 uint64
- x890, x889 = bits.Mul64(x8, arg2[2])
- var x891 uint64
- var x892 uint64
- x892, x891 = bits.Mul64(x8, arg2[1])
- var x893 uint64
- var x894 uint64
- x894, x893 = bits.Mul64(x8, arg2[0])
- var x895 uint64
- var x896 uint64
- x895, x896 = bits.Add64(x894, x891, uint64(0x0))
- var x897 uint64
- var x898 uint64
- x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
- var x899 uint64
- var x900 uint64
- x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
- var x901 uint64
- var x902 uint64
- x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
- var x903 uint64
- var x904 uint64
- x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
- var x905 uint64
- var x906 uint64
- x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
- var x907 uint64
- var x908 uint64
- x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
- var x909 uint64
- var x910 uint64
- x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
- x911 := (uint64(p521Uint1(x910)) + x878)
- var x912 uint64
- var x913 uint64
- x912, x913 = bits.Add64(x858, x893, uint64(0x0))
- var x914 uint64
- var x915 uint64
- x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
- var x916 uint64
- var x917 uint64
- x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
- var x918 uint64
- var x919 uint64
- x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
- var x920 uint64
- var x921 uint64
- x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
- var x922 uint64
- var x923 uint64
- x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
- var x924 uint64
- var x925 uint64
- x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
- var x926 uint64
- var x927 uint64
- x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
- var x928 uint64
- var x929 uint64
- x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
- var x930 uint64
- var x931 uint64
- x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
- var x932 uint64
- var x933 uint64
- x933, x932 = bits.Mul64(x912, 0x1ff)
- var x934 uint64
- var x935 uint64
- x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
- var x936 uint64
- var x937 uint64
- x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
- var x938 uint64
- var x939 uint64
- x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
- var x940 uint64
- var x941 uint64
- x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
- var x942 uint64
- var x943 uint64
- x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
- var x944 uint64
- var x945 uint64
- x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
- var x946 uint64
- var x947 uint64
- x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
- var x948 uint64
- var x949 uint64
- x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
- var x950 uint64
- var x951 uint64
- x950, x951 = bits.Add64(x949, x946, uint64(0x0))
- var x952 uint64
- var x953 uint64
- x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
- var x954 uint64
- var x955 uint64
- x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
- var x956 uint64
- var x957 uint64
- x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
- var x958 uint64
- var x959 uint64
- x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
- var x960 uint64
- var x961 uint64
- x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
- var x962 uint64
- var x963 uint64
- x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
- var x964 uint64
- var x965 uint64
- x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
- x966 := (uint64(p521Uint1(x965)) + x933)
- var x968 uint64
- _, x968 = bits.Add64(x912, x948, uint64(0x0))
- var x969 uint64
- var x970 uint64
- x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
- var x971 uint64
- var x972 uint64
- x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
- var x973 uint64
- var x974 uint64
- x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
- var x975 uint64
- var x976 uint64
- x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
- var x977 uint64
- var x978 uint64
- x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
- var x979 uint64
- var x980 uint64
- x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
- var x981 uint64
- var x982 uint64
- x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
- var x983 uint64
- var x984 uint64
- x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
- var x985 uint64
- var x986 uint64
- x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
- x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
- var x988 uint64
- var x989 uint64
- x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
- var x990 uint64
- var x991 uint64
- x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
- var x992 uint64
- var x993 uint64
- x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
- var x994 uint64
- var x995 uint64
- x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
- var x996 uint64
- var x997 uint64
- x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
- var x998 uint64
- var x999 uint64
- x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
- var x1000 uint64
- var x1001 uint64
- x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
- var x1002 uint64
- var x1003 uint64
- x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
- var x1004 uint64
- var x1005 uint64
- x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
- var x1007 uint64
- _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
- var x1008 uint64
- p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
- var x1009 uint64
- p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
- var x1010 uint64
- p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
- var x1011 uint64
- p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
- var x1012 uint64
- p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
- var x1013 uint64
- p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
- var x1014 uint64
- p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
- var x1015 uint64
- p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
- var x1016 uint64
- p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
- out1[0] = x1008
- out1[1] = x1009
- out1[2] = x1010
- out1[3] = x1011
- out1[4] = x1012
- out1[5] = x1013
- out1[6] = x1014
- out1[7] = x1015
- out1[8] = x1016
-}
-
-// p521Square squares a field element in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
-// 0 ≤ eval out1 < m
-//
-func p521Square(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[4]
- x5 := arg1[5]
- x6 := arg1[6]
- x7 := arg1[7]
- x8 := arg1[8]
- x9 := arg1[0]
- var x10 uint64
- var x11 uint64
- x11, x10 = bits.Mul64(x9, arg1[8])
- var x12 uint64
- var x13 uint64
- x13, x12 = bits.Mul64(x9, arg1[7])
- var x14 uint64
- var x15 uint64
- x15, x14 = bits.Mul64(x9, arg1[6])
- var x16 uint64
- var x17 uint64
- x17, x16 = bits.Mul64(x9, arg1[5])
- var x18 uint64
- var x19 uint64
- x19, x18 = bits.Mul64(x9, arg1[4])
- var x20 uint64
- var x21 uint64
- x21, x20 = bits.Mul64(x9, arg1[3])
- var x22 uint64
- var x23 uint64
- x23, x22 = bits.Mul64(x9, arg1[2])
- var x24 uint64
- var x25 uint64
- x25, x24 = bits.Mul64(x9, arg1[1])
- var x26 uint64
- var x27 uint64
- x27, x26 = bits.Mul64(x9, arg1[0])
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x27, x24, uint64(0x0))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
- var x32 uint64
- var x33 uint64
- x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
- var x34 uint64
- var x35 uint64
- x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
- var x36 uint64
- var x37 uint64
- x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
- var x38 uint64
- var x39 uint64
- x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
- var x40 uint64
- var x41 uint64
- x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
- var x42 uint64
- var x43 uint64
- x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
- x44 := (uint64(p521Uint1(x43)) + x11)
- var x45 uint64
- var x46 uint64
- x46, x45 = bits.Mul64(x26, 0x1ff)
- var x47 uint64
- var x48 uint64
- x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
- var x49 uint64
- var x50 uint64
- x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
- var x51 uint64
- var x52 uint64
- x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
- var x53 uint64
- var x54 uint64
- x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
- var x55 uint64
- var x56 uint64
- x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
- var x57 uint64
- var x58 uint64
- x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
- var x59 uint64
- var x60 uint64
- x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
- var x61 uint64
- var x62 uint64
- x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
- var x63 uint64
- var x64 uint64
- x63, x64 = bits.Add64(x62, x59, uint64(0x0))
- var x65 uint64
- var x66 uint64
- x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
- var x67 uint64
- var x68 uint64
- x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
- var x69 uint64
- var x70 uint64
- x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
- var x71 uint64
- var x72 uint64
- x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
- var x73 uint64
- var x74 uint64
- x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
- var x75 uint64
- var x76 uint64
- x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
- var x77 uint64
- var x78 uint64
- x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
- x79 := (uint64(p521Uint1(x78)) + x46)
- var x81 uint64
- _, x81 = bits.Add64(x26, x61, uint64(0x0))
- var x82 uint64
- var x83 uint64
- x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
- var x84 uint64
- var x85 uint64
- x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
- var x86 uint64
- var x87 uint64
- x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
- var x88 uint64
- var x89 uint64
- x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
- var x90 uint64
- var x91 uint64
- x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
- var x92 uint64
- var x93 uint64
- x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
- var x100 uint64
- var x101 uint64
- x101, x100 = bits.Mul64(x1, arg1[8])
- var x102 uint64
- var x103 uint64
- x103, x102 = bits.Mul64(x1, arg1[7])
- var x104 uint64
- var x105 uint64
- x105, x104 = bits.Mul64(x1, arg1[6])
- var x106 uint64
- var x107 uint64
- x107, x106 = bits.Mul64(x1, arg1[5])
- var x108 uint64
- var x109 uint64
- x109, x108 = bits.Mul64(x1, arg1[4])
- var x110 uint64
- var x111 uint64
- x111, x110 = bits.Mul64(x1, arg1[3])
- var x112 uint64
- var x113 uint64
- x113, x112 = bits.Mul64(x1, arg1[2])
- var x114 uint64
- var x115 uint64
- x115, x114 = bits.Mul64(x1, arg1[1])
- var x116 uint64
- var x117 uint64
- x117, x116 = bits.Mul64(x1, arg1[0])
- var x118 uint64
- var x119 uint64
- x118, x119 = bits.Add64(x117, x114, uint64(0x0))
- var x120 uint64
- var x121 uint64
- x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
- var x122 uint64
- var x123 uint64
- x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
- var x124 uint64
- var x125 uint64
- x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
- var x126 uint64
- var x127 uint64
- x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
- var x130 uint64
- var x131 uint64
- x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
- x134 := (uint64(p521Uint1(x133)) + x101)
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x82, x116, uint64(0x0))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
- var x145 uint64
- var x146 uint64
- x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
- var x147 uint64
- var x148 uint64
- x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
- var x149 uint64
- var x150 uint64
- x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
- var x151 uint64
- var x152 uint64
- x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
- var x155 uint64
- var x156 uint64
- x156, x155 = bits.Mul64(x135, 0x1ff)
- var x157 uint64
- var x158 uint64
- x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
- var x159 uint64
- var x160 uint64
- x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
- var x161 uint64
- var x162 uint64
- x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
- var x163 uint64
- var x164 uint64
- x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
- var x165 uint64
- var x166 uint64
- x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
- var x167 uint64
- var x168 uint64
- x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
- var x169 uint64
- var x170 uint64
- x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
- var x171 uint64
- var x172 uint64
- x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x172, x169, uint64(0x0))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
- var x177 uint64
- var x178 uint64
- x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
- var x179 uint64
- var x180 uint64
- x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
- var x183 uint64
- var x184 uint64
- x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
- var x185 uint64
- var x186 uint64
- x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
- var x187 uint64
- var x188 uint64
- x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
- x189 := (uint64(p521Uint1(x188)) + x156)
- var x191 uint64
- _, x191 = bits.Add64(x135, x171, uint64(0x0))
- var x192 uint64
- var x193 uint64
- x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
- var x194 uint64
- var x195 uint64
- x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
- var x196 uint64
- var x197 uint64
- x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
- var x198 uint64
- var x199 uint64
- x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
- var x200 uint64
- var x201 uint64
- x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
- var x202 uint64
- var x203 uint64
- x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
- var x204 uint64
- var x205 uint64
- x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
- var x206 uint64
- var x207 uint64
- x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
- var x208 uint64
- var x209 uint64
- x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
- x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
- var x211 uint64
- var x212 uint64
- x212, x211 = bits.Mul64(x2, arg1[8])
- var x213 uint64
- var x214 uint64
- x214, x213 = bits.Mul64(x2, arg1[7])
- var x215 uint64
- var x216 uint64
- x216, x215 = bits.Mul64(x2, arg1[6])
- var x217 uint64
- var x218 uint64
- x218, x217 = bits.Mul64(x2, arg1[5])
- var x219 uint64
- var x220 uint64
- x220, x219 = bits.Mul64(x2, arg1[4])
- var x221 uint64
- var x222 uint64
- x222, x221 = bits.Mul64(x2, arg1[3])
- var x223 uint64
- var x224 uint64
- x224, x223 = bits.Mul64(x2, arg1[2])
- var x225 uint64
- var x226 uint64
- x226, x225 = bits.Mul64(x2, arg1[1])
- var x227 uint64
- var x228 uint64
- x228, x227 = bits.Mul64(x2, arg1[0])
- var x229 uint64
- var x230 uint64
- x229, x230 = bits.Add64(x228, x225, uint64(0x0))
- var x231 uint64
- var x232 uint64
- x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
- var x233 uint64
- var x234 uint64
- x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
- var x235 uint64
- var x236 uint64
- x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
- var x237 uint64
- var x238 uint64
- x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
- var x239 uint64
- var x240 uint64
- x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
- var x241 uint64
- var x242 uint64
- x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
- var x243 uint64
- var x244 uint64
- x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
- x245 := (uint64(p521Uint1(x244)) + x212)
- var x246 uint64
- var x247 uint64
- x246, x247 = bits.Add64(x192, x227, uint64(0x0))
- var x248 uint64
- var x249 uint64
- x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
- var x250 uint64
- var x251 uint64
- x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
- var x252 uint64
- var x253 uint64
- x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
- var x254 uint64
- var x255 uint64
- x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
- var x256 uint64
- var x257 uint64
- x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
- var x258 uint64
- var x259 uint64
- x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
- var x260 uint64
- var x261 uint64
- x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
- var x262 uint64
- var x263 uint64
- x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
- var x264 uint64
- var x265 uint64
- x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
- var x266 uint64
- var x267 uint64
- x267, x266 = bits.Mul64(x246, 0x1ff)
- var x268 uint64
- var x269 uint64
- x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
- var x270 uint64
- var x271 uint64
- x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
- var x272 uint64
- var x273 uint64
- x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
- var x274 uint64
- var x275 uint64
- x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
- var x276 uint64
- var x277 uint64
- x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
- var x278 uint64
- var x279 uint64
- x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
- var x280 uint64
- var x281 uint64
- x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
- var x282 uint64
- var x283 uint64
- x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
- var x284 uint64
- var x285 uint64
- x284, x285 = bits.Add64(x283, x280, uint64(0x0))
- var x286 uint64
- var x287 uint64
- x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
- var x288 uint64
- var x289 uint64
- x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
- var x290 uint64
- var x291 uint64
- x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
- var x292 uint64
- var x293 uint64
- x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
- var x294 uint64
- var x295 uint64
- x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
- var x296 uint64
- var x297 uint64
- x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
- var x298 uint64
- var x299 uint64
- x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
- x300 := (uint64(p521Uint1(x299)) + x267)
- var x302 uint64
- _, x302 = bits.Add64(x246, x282, uint64(0x0))
- var x303 uint64
- var x304 uint64
- x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
- var x305 uint64
- var x306 uint64
- x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
- var x307 uint64
- var x308 uint64
- x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
- var x309 uint64
- var x310 uint64
- x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
- var x311 uint64
- var x312 uint64
- x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
- var x313 uint64
- var x314 uint64
- x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
- var x315 uint64
- var x316 uint64
- x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
- var x317 uint64
- var x318 uint64
- x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
- var x319 uint64
- var x320 uint64
- x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
- x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
- var x322 uint64
- var x323 uint64
- x323, x322 = bits.Mul64(x3, arg1[8])
- var x324 uint64
- var x325 uint64
- x325, x324 = bits.Mul64(x3, arg1[7])
- var x326 uint64
- var x327 uint64
- x327, x326 = bits.Mul64(x3, arg1[6])
- var x328 uint64
- var x329 uint64
- x329, x328 = bits.Mul64(x3, arg1[5])
- var x330 uint64
- var x331 uint64
- x331, x330 = bits.Mul64(x3, arg1[4])
- var x332 uint64
- var x333 uint64
- x333, x332 = bits.Mul64(x3, arg1[3])
- var x334 uint64
- var x335 uint64
- x335, x334 = bits.Mul64(x3, arg1[2])
- var x336 uint64
- var x337 uint64
- x337, x336 = bits.Mul64(x3, arg1[1])
- var x338 uint64
- var x339 uint64
- x339, x338 = bits.Mul64(x3, arg1[0])
- var x340 uint64
- var x341 uint64
- x340, x341 = bits.Add64(x339, x336, uint64(0x0))
- var x342 uint64
- var x343 uint64
- x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
- var x344 uint64
- var x345 uint64
- x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
- var x346 uint64
- var x347 uint64
- x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
- var x348 uint64
- var x349 uint64
- x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
- var x350 uint64
- var x351 uint64
- x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
- var x352 uint64
- var x353 uint64
- x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
- var x354 uint64
- var x355 uint64
- x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
- x356 := (uint64(p521Uint1(x355)) + x323)
- var x357 uint64
- var x358 uint64
- x357, x358 = bits.Add64(x303, x338, uint64(0x0))
- var x359 uint64
- var x360 uint64
- x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
- var x361 uint64
- var x362 uint64
- x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
- var x363 uint64
- var x364 uint64
- x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
- var x365 uint64
- var x366 uint64
- x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
- var x367 uint64
- var x368 uint64
- x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
- var x369 uint64
- var x370 uint64
- x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
- var x371 uint64
- var x372 uint64
- x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
- var x373 uint64
- var x374 uint64
- x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
- var x375 uint64
- var x376 uint64
- x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
- var x377 uint64
- var x378 uint64
- x378, x377 = bits.Mul64(x357, 0x1ff)
- var x379 uint64
- var x380 uint64
- x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
- var x381 uint64
- var x382 uint64
- x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
- var x383 uint64
- var x384 uint64
- x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
- var x385 uint64
- var x386 uint64
- x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
- var x387 uint64
- var x388 uint64
- x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
- var x389 uint64
- var x390 uint64
- x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
- var x391 uint64
- var x392 uint64
- x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
- var x393 uint64
- var x394 uint64
- x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
- var x395 uint64
- var x396 uint64
- x395, x396 = bits.Add64(x394, x391, uint64(0x0))
- var x397 uint64
- var x398 uint64
- x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
- var x399 uint64
- var x400 uint64
- x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
- var x401 uint64
- var x402 uint64
- x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
- var x403 uint64
- var x404 uint64
- x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
- var x405 uint64
- var x406 uint64
- x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
- var x407 uint64
- var x408 uint64
- x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
- var x409 uint64
- var x410 uint64
- x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
- x411 := (uint64(p521Uint1(x410)) + x378)
- var x413 uint64
- _, x413 = bits.Add64(x357, x393, uint64(0x0))
- var x414 uint64
- var x415 uint64
- x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
- var x416 uint64
- var x417 uint64
- x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
- var x418 uint64
- var x419 uint64
- x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
- var x420 uint64
- var x421 uint64
- x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
- var x422 uint64
- var x423 uint64
- x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
- var x424 uint64
- var x425 uint64
- x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
- var x426 uint64
- var x427 uint64
- x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
- var x428 uint64
- var x429 uint64
- x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
- var x430 uint64
- var x431 uint64
- x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
- x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
- var x433 uint64
- var x434 uint64
- x434, x433 = bits.Mul64(x4, arg1[8])
- var x435 uint64
- var x436 uint64
- x436, x435 = bits.Mul64(x4, arg1[7])
- var x437 uint64
- var x438 uint64
- x438, x437 = bits.Mul64(x4, arg1[6])
- var x439 uint64
- var x440 uint64
- x440, x439 = bits.Mul64(x4, arg1[5])
- var x441 uint64
- var x442 uint64
- x442, x441 = bits.Mul64(x4, arg1[4])
- var x443 uint64
- var x444 uint64
- x444, x443 = bits.Mul64(x4, arg1[3])
- var x445 uint64
- var x446 uint64
- x446, x445 = bits.Mul64(x4, arg1[2])
- var x447 uint64
- var x448 uint64
- x448, x447 = bits.Mul64(x4, arg1[1])
- var x449 uint64
- var x450 uint64
- x450, x449 = bits.Mul64(x4, arg1[0])
- var x451 uint64
- var x452 uint64
- x451, x452 = bits.Add64(x450, x447, uint64(0x0))
- var x453 uint64
- var x454 uint64
- x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
- var x455 uint64
- var x456 uint64
- x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
- var x457 uint64
- var x458 uint64
- x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
- var x459 uint64
- var x460 uint64
- x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
- var x461 uint64
- var x462 uint64
- x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
- var x463 uint64
- var x464 uint64
- x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
- var x465 uint64
- var x466 uint64
- x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
- x467 := (uint64(p521Uint1(x466)) + x434)
- var x468 uint64
- var x469 uint64
- x468, x469 = bits.Add64(x414, x449, uint64(0x0))
- var x470 uint64
- var x471 uint64
- x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
- var x472 uint64
- var x473 uint64
- x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
- var x474 uint64
- var x475 uint64
- x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
- var x476 uint64
- var x477 uint64
- x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
- var x478 uint64
- var x479 uint64
- x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
- var x480 uint64
- var x481 uint64
- x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
- var x482 uint64
- var x483 uint64
- x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
- var x484 uint64
- var x485 uint64
- x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
- var x486 uint64
- var x487 uint64
- x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
- var x488 uint64
- var x489 uint64
- x489, x488 = bits.Mul64(x468, 0x1ff)
- var x490 uint64
- var x491 uint64
- x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
- var x492 uint64
- var x493 uint64
- x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
- var x494 uint64
- var x495 uint64
- x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
- var x496 uint64
- var x497 uint64
- x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
- var x498 uint64
- var x499 uint64
- x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
- var x500 uint64
- var x501 uint64
- x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
- var x502 uint64
- var x503 uint64
- x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
- var x504 uint64
- var x505 uint64
- x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
- var x506 uint64
- var x507 uint64
- x506, x507 = bits.Add64(x505, x502, uint64(0x0))
- var x508 uint64
- var x509 uint64
- x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
- var x510 uint64
- var x511 uint64
- x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
- var x512 uint64
- var x513 uint64
- x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
- var x514 uint64
- var x515 uint64
- x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
- var x516 uint64
- var x517 uint64
- x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
- var x518 uint64
- var x519 uint64
- x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
- var x520 uint64
- var x521 uint64
- x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
- x522 := (uint64(p521Uint1(x521)) + x489)
- var x524 uint64
- _, x524 = bits.Add64(x468, x504, uint64(0x0))
- var x525 uint64
- var x526 uint64
- x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
- var x527 uint64
- var x528 uint64
- x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
- var x529 uint64
- var x530 uint64
- x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
- var x531 uint64
- var x532 uint64
- x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
- var x533 uint64
- var x534 uint64
- x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
- var x535 uint64
- var x536 uint64
- x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
- var x537 uint64
- var x538 uint64
- x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
- var x539 uint64
- var x540 uint64
- x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
- var x541 uint64
- var x542 uint64
- x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
- x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
- var x544 uint64
- var x545 uint64
- x545, x544 = bits.Mul64(x5, arg1[8])
- var x546 uint64
- var x547 uint64
- x547, x546 = bits.Mul64(x5, arg1[7])
- var x548 uint64
- var x549 uint64
- x549, x548 = bits.Mul64(x5, arg1[6])
- var x550 uint64
- var x551 uint64
- x551, x550 = bits.Mul64(x5, arg1[5])
- var x552 uint64
- var x553 uint64
- x553, x552 = bits.Mul64(x5, arg1[4])
- var x554 uint64
- var x555 uint64
- x555, x554 = bits.Mul64(x5, arg1[3])
- var x556 uint64
- var x557 uint64
- x557, x556 = bits.Mul64(x5, arg1[2])
- var x558 uint64
- var x559 uint64
- x559, x558 = bits.Mul64(x5, arg1[1])
- var x560 uint64
- var x561 uint64
- x561, x560 = bits.Mul64(x5, arg1[0])
- var x562 uint64
- var x563 uint64
- x562, x563 = bits.Add64(x561, x558, uint64(0x0))
- var x564 uint64
- var x565 uint64
- x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
- var x566 uint64
- var x567 uint64
- x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
- var x568 uint64
- var x569 uint64
- x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
- var x570 uint64
- var x571 uint64
- x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
- var x572 uint64
- var x573 uint64
- x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
- var x574 uint64
- var x575 uint64
- x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
- var x576 uint64
- var x577 uint64
- x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
- x578 := (uint64(p521Uint1(x577)) + x545)
- var x579 uint64
- var x580 uint64
- x579, x580 = bits.Add64(x525, x560, uint64(0x0))
- var x581 uint64
- var x582 uint64
- x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
- var x583 uint64
- var x584 uint64
- x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
- var x585 uint64
- var x586 uint64
- x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
- var x587 uint64
- var x588 uint64
- x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
- var x589 uint64
- var x590 uint64
- x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
- var x591 uint64
- var x592 uint64
- x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
- var x593 uint64
- var x594 uint64
- x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
- var x595 uint64
- var x596 uint64
- x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
- var x597 uint64
- var x598 uint64
- x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
- var x599 uint64
- var x600 uint64
- x600, x599 = bits.Mul64(x579, 0x1ff)
- var x601 uint64
- var x602 uint64
- x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
- var x603 uint64
- var x604 uint64
- x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
- var x605 uint64
- var x606 uint64
- x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
- var x607 uint64
- var x608 uint64
- x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
- var x609 uint64
- var x610 uint64
- x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
- var x611 uint64
- var x612 uint64
- x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
- var x613 uint64
- var x614 uint64
- x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
- var x615 uint64
- var x616 uint64
- x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
- var x617 uint64
- var x618 uint64
- x617, x618 = bits.Add64(x616, x613, uint64(0x0))
- var x619 uint64
- var x620 uint64
- x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
- var x621 uint64
- var x622 uint64
- x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
- var x623 uint64
- var x624 uint64
- x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
- var x625 uint64
- var x626 uint64
- x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
- var x627 uint64
- var x628 uint64
- x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
- var x629 uint64
- var x630 uint64
- x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
- var x631 uint64
- var x632 uint64
- x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
- x633 := (uint64(p521Uint1(x632)) + x600)
- var x635 uint64
- _, x635 = bits.Add64(x579, x615, uint64(0x0))
- var x636 uint64
- var x637 uint64
- x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
- var x638 uint64
- var x639 uint64
- x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
- var x640 uint64
- var x641 uint64
- x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
- var x642 uint64
- var x643 uint64
- x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
- var x644 uint64
- var x645 uint64
- x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
- var x646 uint64
- var x647 uint64
- x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
- var x648 uint64
- var x649 uint64
- x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
- var x650 uint64
- var x651 uint64
- x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
- var x652 uint64
- var x653 uint64
- x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
- x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
- var x655 uint64
- var x656 uint64
- x656, x655 = bits.Mul64(x6, arg1[8])
- var x657 uint64
- var x658 uint64
- x658, x657 = bits.Mul64(x6, arg1[7])
- var x659 uint64
- var x660 uint64
- x660, x659 = bits.Mul64(x6, arg1[6])
- var x661 uint64
- var x662 uint64
- x662, x661 = bits.Mul64(x6, arg1[5])
- var x663 uint64
- var x664 uint64
- x664, x663 = bits.Mul64(x6, arg1[4])
- var x665 uint64
- var x666 uint64
- x666, x665 = bits.Mul64(x6, arg1[3])
- var x667 uint64
- var x668 uint64
- x668, x667 = bits.Mul64(x6, arg1[2])
- var x669 uint64
- var x670 uint64
- x670, x669 = bits.Mul64(x6, arg1[1])
- var x671 uint64
- var x672 uint64
- x672, x671 = bits.Mul64(x6, arg1[0])
- var x673 uint64
- var x674 uint64
- x673, x674 = bits.Add64(x672, x669, uint64(0x0))
- var x675 uint64
- var x676 uint64
- x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
- var x677 uint64
- var x678 uint64
- x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
- var x679 uint64
- var x680 uint64
- x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
- var x681 uint64
- var x682 uint64
- x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
- var x683 uint64
- var x684 uint64
- x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
- var x685 uint64
- var x686 uint64
- x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
- var x687 uint64
- var x688 uint64
- x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
- x689 := (uint64(p521Uint1(x688)) + x656)
- var x690 uint64
- var x691 uint64
- x690, x691 = bits.Add64(x636, x671, uint64(0x0))
- var x692 uint64
- var x693 uint64
- x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
- var x694 uint64
- var x695 uint64
- x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
- var x696 uint64
- var x697 uint64
- x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
- var x698 uint64
- var x699 uint64
- x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
- var x700 uint64
- var x701 uint64
- x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
- var x702 uint64
- var x703 uint64
- x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
- var x704 uint64
- var x705 uint64
- x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
- var x706 uint64
- var x707 uint64
- x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
- var x708 uint64
- var x709 uint64
- x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
- var x710 uint64
- var x711 uint64
- x711, x710 = bits.Mul64(x690, 0x1ff)
- var x712 uint64
- var x713 uint64
- x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
- var x714 uint64
- var x715 uint64
- x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
- var x716 uint64
- var x717 uint64
- x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
- var x718 uint64
- var x719 uint64
- x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
- var x720 uint64
- var x721 uint64
- x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
- var x722 uint64
- var x723 uint64
- x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
- var x724 uint64
- var x725 uint64
- x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
- var x726 uint64
- var x727 uint64
- x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
- var x728 uint64
- var x729 uint64
- x728, x729 = bits.Add64(x727, x724, uint64(0x0))
- var x730 uint64
- var x731 uint64
- x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
- var x732 uint64
- var x733 uint64
- x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
- var x734 uint64
- var x735 uint64
- x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
- var x736 uint64
- var x737 uint64
- x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
- var x738 uint64
- var x739 uint64
- x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
- var x740 uint64
- var x741 uint64
- x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
- var x742 uint64
- var x743 uint64
- x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
- x744 := (uint64(p521Uint1(x743)) + x711)
- var x746 uint64
- _, x746 = bits.Add64(x690, x726, uint64(0x0))
- var x747 uint64
- var x748 uint64
- x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
- var x749 uint64
- var x750 uint64
- x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
- var x751 uint64
- var x752 uint64
- x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
- var x753 uint64
- var x754 uint64
- x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
- var x755 uint64
- var x756 uint64
- x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
- var x757 uint64
- var x758 uint64
- x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
- var x759 uint64
- var x760 uint64
- x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
- var x761 uint64
- var x762 uint64
- x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
- var x763 uint64
- var x764 uint64
- x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
- x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
- var x766 uint64
- var x767 uint64
- x767, x766 = bits.Mul64(x7, arg1[8])
- var x768 uint64
- var x769 uint64
- x769, x768 = bits.Mul64(x7, arg1[7])
- var x770 uint64
- var x771 uint64
- x771, x770 = bits.Mul64(x7, arg1[6])
- var x772 uint64
- var x773 uint64
- x773, x772 = bits.Mul64(x7, arg1[5])
- var x774 uint64
- var x775 uint64
- x775, x774 = bits.Mul64(x7, arg1[4])
- var x776 uint64
- var x777 uint64
- x777, x776 = bits.Mul64(x7, arg1[3])
- var x778 uint64
- var x779 uint64
- x779, x778 = bits.Mul64(x7, arg1[2])
- var x780 uint64
- var x781 uint64
- x781, x780 = bits.Mul64(x7, arg1[1])
- var x782 uint64
- var x783 uint64
- x783, x782 = bits.Mul64(x7, arg1[0])
- var x784 uint64
- var x785 uint64
- x784, x785 = bits.Add64(x783, x780, uint64(0x0))
- var x786 uint64
- var x787 uint64
- x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
- var x788 uint64
- var x789 uint64
- x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
- var x790 uint64
- var x791 uint64
- x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
- var x792 uint64
- var x793 uint64
- x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
- var x794 uint64
- var x795 uint64
- x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
- var x796 uint64
- var x797 uint64
- x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
- var x798 uint64
- var x799 uint64
- x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
- x800 := (uint64(p521Uint1(x799)) + x767)
- var x801 uint64
- var x802 uint64
- x801, x802 = bits.Add64(x747, x782, uint64(0x0))
- var x803 uint64
- var x804 uint64
- x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
- var x805 uint64
- var x806 uint64
- x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
- var x807 uint64
- var x808 uint64
- x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
- var x809 uint64
- var x810 uint64
- x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
- var x811 uint64
- var x812 uint64
- x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
- var x813 uint64
- var x814 uint64
- x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
- var x815 uint64
- var x816 uint64
- x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
- var x817 uint64
- var x818 uint64
- x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
- var x819 uint64
- var x820 uint64
- x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
- var x821 uint64
- var x822 uint64
- x822, x821 = bits.Mul64(x801, 0x1ff)
- var x823 uint64
- var x824 uint64
- x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
- var x825 uint64
- var x826 uint64
- x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
- var x827 uint64
- var x828 uint64
- x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
- var x829 uint64
- var x830 uint64
- x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
- var x831 uint64
- var x832 uint64
- x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
- var x833 uint64
- var x834 uint64
- x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
- var x835 uint64
- var x836 uint64
- x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
- var x837 uint64
- var x838 uint64
- x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
- var x839 uint64
- var x840 uint64
- x839, x840 = bits.Add64(x838, x835, uint64(0x0))
- var x841 uint64
- var x842 uint64
- x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
- var x843 uint64
- var x844 uint64
- x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
- var x845 uint64
- var x846 uint64
- x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
- var x847 uint64
- var x848 uint64
- x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
- var x849 uint64
- var x850 uint64
- x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
- var x851 uint64
- var x852 uint64
- x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
- var x853 uint64
- var x854 uint64
- x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
- x855 := (uint64(p521Uint1(x854)) + x822)
- var x857 uint64
- _, x857 = bits.Add64(x801, x837, uint64(0x0))
- var x858 uint64
- var x859 uint64
- x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
- var x860 uint64
- var x861 uint64
- x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
- var x862 uint64
- var x863 uint64
- x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
- var x864 uint64
- var x865 uint64
- x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
- var x866 uint64
- var x867 uint64
- x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
- var x868 uint64
- var x869 uint64
- x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
- var x870 uint64
- var x871 uint64
- x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
- var x872 uint64
- var x873 uint64
- x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
- var x874 uint64
- var x875 uint64
- x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
- x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
- var x877 uint64
- var x878 uint64
- x878, x877 = bits.Mul64(x8, arg1[8])
- var x879 uint64
- var x880 uint64
- x880, x879 = bits.Mul64(x8, arg1[7])
- var x881 uint64
- var x882 uint64
- x882, x881 = bits.Mul64(x8, arg1[6])
- var x883 uint64
- var x884 uint64
- x884, x883 = bits.Mul64(x8, arg1[5])
- var x885 uint64
- var x886 uint64
- x886, x885 = bits.Mul64(x8, arg1[4])
- var x887 uint64
- var x888 uint64
- x888, x887 = bits.Mul64(x8, arg1[3])
- var x889 uint64
- var x890 uint64
- x890, x889 = bits.Mul64(x8, arg1[2])
- var x891 uint64
- var x892 uint64
- x892, x891 = bits.Mul64(x8, arg1[1])
- var x893 uint64
- var x894 uint64
- x894, x893 = bits.Mul64(x8, arg1[0])
- var x895 uint64
- var x896 uint64
- x895, x896 = bits.Add64(x894, x891, uint64(0x0))
- var x897 uint64
- var x898 uint64
- x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
- var x899 uint64
- var x900 uint64
- x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
- var x901 uint64
- var x902 uint64
- x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
- var x903 uint64
- var x904 uint64
- x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
- var x905 uint64
- var x906 uint64
- x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
- var x907 uint64
- var x908 uint64
- x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
- var x909 uint64
- var x910 uint64
- x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
- x911 := (uint64(p521Uint1(x910)) + x878)
- var x912 uint64
- var x913 uint64
- x912, x913 = bits.Add64(x858, x893, uint64(0x0))
- var x914 uint64
- var x915 uint64
- x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
- var x916 uint64
- var x917 uint64
- x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
- var x918 uint64
- var x919 uint64
- x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
- var x920 uint64
- var x921 uint64
- x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
- var x922 uint64
- var x923 uint64
- x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
- var x924 uint64
- var x925 uint64
- x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
- var x926 uint64
- var x927 uint64
- x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
- var x928 uint64
- var x929 uint64
- x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
- var x930 uint64
- var x931 uint64
- x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
- var x932 uint64
- var x933 uint64
- x933, x932 = bits.Mul64(x912, 0x1ff)
- var x934 uint64
- var x935 uint64
- x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
- var x936 uint64
- var x937 uint64
- x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
- var x938 uint64
- var x939 uint64
- x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
- var x940 uint64
- var x941 uint64
- x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
- var x942 uint64
- var x943 uint64
- x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
- var x944 uint64
- var x945 uint64
- x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
- var x946 uint64
- var x947 uint64
- x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
- var x948 uint64
- var x949 uint64
- x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
- var x950 uint64
- var x951 uint64
- x950, x951 = bits.Add64(x949, x946, uint64(0x0))
- var x952 uint64
- var x953 uint64
- x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
- var x954 uint64
- var x955 uint64
- x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
- var x956 uint64
- var x957 uint64
- x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
- var x958 uint64
- var x959 uint64
- x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
- var x960 uint64
- var x961 uint64
- x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
- var x962 uint64
- var x963 uint64
- x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
- var x964 uint64
- var x965 uint64
- x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
- x966 := (uint64(p521Uint1(x965)) + x933)
- var x968 uint64
- _, x968 = bits.Add64(x912, x948, uint64(0x0))
- var x969 uint64
- var x970 uint64
- x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
- var x971 uint64
- var x972 uint64
- x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
- var x973 uint64
- var x974 uint64
- x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
- var x975 uint64
- var x976 uint64
- x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
- var x977 uint64
- var x978 uint64
- x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
- var x979 uint64
- var x980 uint64
- x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
- var x981 uint64
- var x982 uint64
- x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
- var x983 uint64
- var x984 uint64
- x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
- var x985 uint64
- var x986 uint64
- x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
- x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
- var x988 uint64
- var x989 uint64
- x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
- var x990 uint64
- var x991 uint64
- x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
- var x992 uint64
- var x993 uint64
- x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
- var x994 uint64
- var x995 uint64
- x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
- var x996 uint64
- var x997 uint64
- x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
- var x998 uint64
- var x999 uint64
- x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
- var x1000 uint64
- var x1001 uint64
- x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
- var x1002 uint64
- var x1003 uint64
- x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
- var x1004 uint64
- var x1005 uint64
- x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
- var x1007 uint64
- _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
- var x1008 uint64
- p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
- var x1009 uint64
- p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
- var x1010 uint64
- p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
- var x1011 uint64
- p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
- var x1012 uint64
- p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
- var x1013 uint64
- p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
- var x1014 uint64
- p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
- var x1015 uint64
- p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
- var x1016 uint64
- p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
- out1[0] = x1008
- out1[1] = x1009
- out1[2] = x1010
- out1[3] = x1011
- out1[4] = x1012
- out1[5] = x1013
- out1[6] = x1014
- out1[7] = x1015
- out1[8] = x1016
-}
-
-// p521Add adds two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p521Add(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
- var x9 uint64
- var x10 uint64
- x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
- var x11 uint64
- var x12 uint64
- x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Add64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
- var x19 uint64
- var x20 uint64
- x19, x20 = bits.Sub64(x1, 0xffffffffffffffff, uint64(0x0))
- var x21 uint64
- var x22 uint64
- x21, x22 = bits.Sub64(x3, 0xffffffffffffffff, uint64(p521Uint1(x20)))
- var x23 uint64
- var x24 uint64
- x23, x24 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p521Uint1(x22)))
- var x25 uint64
- var x26 uint64
- x25, x26 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p521Uint1(x24)))
- var x27 uint64
- var x28 uint64
- x27, x28 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p521Uint1(x26)))
- var x29 uint64
- var x30 uint64
- x29, x30 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p521Uint1(x28)))
- var x31 uint64
- var x32 uint64
- x31, x32 = bits.Sub64(x13, 0xffffffffffffffff, uint64(p521Uint1(x30)))
- var x33 uint64
- var x34 uint64
- x33, x34 = bits.Sub64(x15, 0xffffffffffffffff, uint64(p521Uint1(x32)))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Sub64(x17, 0x1ff, uint64(p521Uint1(x34)))
- var x38 uint64
- _, x38 = bits.Sub64(uint64(p521Uint1(x18)), uint64(0x0), uint64(p521Uint1(x36)))
- var x39 uint64
- p521CmovznzU64(&x39, p521Uint1(x38), x19, x1)
- var x40 uint64
- p521CmovznzU64(&x40, p521Uint1(x38), x21, x3)
- var x41 uint64
- p521CmovznzU64(&x41, p521Uint1(x38), x23, x5)
- var x42 uint64
- p521CmovznzU64(&x42, p521Uint1(x38), x25, x7)
- var x43 uint64
- p521CmovznzU64(&x43, p521Uint1(x38), x27, x9)
- var x44 uint64
- p521CmovznzU64(&x44, p521Uint1(x38), x29, x11)
- var x45 uint64
- p521CmovznzU64(&x45, p521Uint1(x38), x31, x13)
- var x46 uint64
- p521CmovznzU64(&x46, p521Uint1(x38), x33, x15)
- var x47 uint64
- p521CmovznzU64(&x47, p521Uint1(x38), x35, x17)
- out1[0] = x39
- out1[1] = x40
- out1[2] = x41
- out1[3] = x42
- out1[4] = x43
- out1[5] = x44
- out1[6] = x45
- out1[7] = x46
- out1[8] = x47
-}
-
-// p521Sub subtracts two field elements in the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-//
-func p521Sub(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
- var x9 uint64
- var x10 uint64
- x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
- var x11 uint64
- var x12 uint64
- x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Sub64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Sub64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Sub64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
- var x19 uint64
- p521CmovznzU64(&x19, p521Uint1(x18), uint64(0x0), 0xffffffffffffffff)
- var x20 uint64
- var x21 uint64
- x20, x21 = bits.Add64(x1, x19, uint64(0x0))
- var x22 uint64
- var x23 uint64
- x22, x23 = bits.Add64(x3, x19, uint64(p521Uint1(x21)))
- var x24 uint64
- var x25 uint64
- x24, x25 = bits.Add64(x5, x19, uint64(p521Uint1(x23)))
- var x26 uint64
- var x27 uint64
- x26, x27 = bits.Add64(x7, x19, uint64(p521Uint1(x25)))
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x9, x19, uint64(p521Uint1(x27)))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(x11, x19, uint64(p521Uint1(x29)))
- var x32 uint64
- var x33 uint64
- x32, x33 = bits.Add64(x13, x19, uint64(p521Uint1(x31)))
- var x34 uint64
- var x35 uint64
- x34, x35 = bits.Add64(x15, x19, uint64(p521Uint1(x33)))
- var x36 uint64
- x36, _ = bits.Add64(x17, (x19 & 0x1ff), uint64(p521Uint1(x35)))
- out1[0] = x20
- out1[1] = x22
- out1[2] = x24
- out1[3] = x26
- out1[4] = x28
- out1[5] = x30
- out1[6] = x32
- out1[7] = x34
- out1[8] = x36
-}
-
-// p521SetOne returns the field element one in the Montgomery domain.
-//
-// Postconditions:
-// eval (from_montgomery out1) mod m = 1 mod m
-// 0 ≤ eval out1 < m
-//
-func p521SetOne(out1 *p521MontgomeryDomainFieldElement) {
- out1[0] = 0x80000000000000
- out1[1] = uint64(0x0)
- out1[2] = uint64(0x0)
- out1[3] = uint64(0x0)
- out1[4] = uint64(0x0)
- out1[5] = uint64(0x0)
- out1[6] = uint64(0x0)
- out1[7] = uint64(0x0)
- out1[8] = uint64(0x0)
-}
-
-// p521FromMontgomery translates a field element out of the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^9) mod m
-// 0 ≤ eval out1 < m
-//
-func p521FromMontgomery(out1 *p521NonMontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
- x1 := arg1[0]
- var x2 uint64
- var x3 uint64
- x3, x2 = bits.Mul64(x1, 0x1ff)
- var x4 uint64
- var x5 uint64
- x5, x4 = bits.Mul64(x1, 0xffffffffffffffff)
- var x6 uint64
- var x7 uint64
- x7, x6 = bits.Mul64(x1, 0xffffffffffffffff)
- var x8 uint64
- var x9 uint64
- x9, x8 = bits.Mul64(x1, 0xffffffffffffffff)
- var x10 uint64
- var x11 uint64
- x11, x10 = bits.Mul64(x1, 0xffffffffffffffff)
- var x12 uint64
- var x13 uint64
- x13, x12 = bits.Mul64(x1, 0xffffffffffffffff)
- var x14 uint64
- var x15 uint64
- x15, x14 = bits.Mul64(x1, 0xffffffffffffffff)
- var x16 uint64
- var x17 uint64
- x17, x16 = bits.Mul64(x1, 0xffffffffffffffff)
- var x18 uint64
- var x19 uint64
- x19, x18 = bits.Mul64(x1, 0xffffffffffffffff)
- var x20 uint64
- var x21 uint64
- x20, x21 = bits.Add64(x19, x16, uint64(0x0))
- var x22 uint64
- var x23 uint64
- x22, x23 = bits.Add64(x17, x14, uint64(p521Uint1(x21)))
- var x24 uint64
- var x25 uint64
- x24, x25 = bits.Add64(x15, x12, uint64(p521Uint1(x23)))
- var x26 uint64
- var x27 uint64
- x26, x27 = bits.Add64(x13, x10, uint64(p521Uint1(x25)))
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x11, x8, uint64(p521Uint1(x27)))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64(x9, x6, uint64(p521Uint1(x29)))
- var x32 uint64
- var x33 uint64
- x32, x33 = bits.Add64(x7, x4, uint64(p521Uint1(x31)))
- var x34 uint64
- var x35 uint64
- x34, x35 = bits.Add64(x5, x2, uint64(p521Uint1(x33)))
- var x37 uint64
- _, x37 = bits.Add64(x1, x18, uint64(0x0))
- var x38 uint64
- var x39 uint64
- x38, x39 = bits.Add64(uint64(0x0), x20, uint64(p521Uint1(x37)))
- var x40 uint64
- var x41 uint64
- x40, x41 = bits.Add64(uint64(0x0), x22, uint64(p521Uint1(x39)))
- var x42 uint64
- var x43 uint64
- x42, x43 = bits.Add64(uint64(0x0), x24, uint64(p521Uint1(x41)))
- var x44 uint64
- var x45 uint64
- x44, x45 = bits.Add64(uint64(0x0), x26, uint64(p521Uint1(x43)))
- var x46 uint64
- var x47 uint64
- x46, x47 = bits.Add64(uint64(0x0), x28, uint64(p521Uint1(x45)))
- var x48 uint64
- var x49 uint64
- x48, x49 = bits.Add64(uint64(0x0), x30, uint64(p521Uint1(x47)))
- var x50 uint64
- var x51 uint64
- x50, x51 = bits.Add64(uint64(0x0), x32, uint64(p521Uint1(x49)))
- var x52 uint64
- var x53 uint64
- x52, x53 = bits.Add64(uint64(0x0), x34, uint64(p521Uint1(x51)))
- var x54 uint64
- var x55 uint64
- x54, x55 = bits.Add64(x38, arg1[1], uint64(0x0))
- var x56 uint64
- var x57 uint64
- x56, x57 = bits.Add64(x40, uint64(0x0), uint64(p521Uint1(x55)))
- var x58 uint64
- var x59 uint64
- x58, x59 = bits.Add64(x42, uint64(0x0), uint64(p521Uint1(x57)))
- var x60 uint64
- var x61 uint64
- x60, x61 = bits.Add64(x44, uint64(0x0), uint64(p521Uint1(x59)))
- var x62 uint64
- var x63 uint64
- x62, x63 = bits.Add64(x46, uint64(0x0), uint64(p521Uint1(x61)))
- var x64 uint64
- var x65 uint64
- x64, x65 = bits.Add64(x48, uint64(0x0), uint64(p521Uint1(x63)))
- var x66 uint64
- var x67 uint64
- x66, x67 = bits.Add64(x50, uint64(0x0), uint64(p521Uint1(x65)))
- var x68 uint64
- var x69 uint64
- x68, x69 = bits.Add64(x52, uint64(0x0), uint64(p521Uint1(x67)))
- var x70 uint64
- var x71 uint64
- x71, x70 = bits.Mul64(x54, 0x1ff)
- var x72 uint64
- var x73 uint64
- x73, x72 = bits.Mul64(x54, 0xffffffffffffffff)
- var x74 uint64
- var x75 uint64
- x75, x74 = bits.Mul64(x54, 0xffffffffffffffff)
- var x76 uint64
- var x77 uint64
- x77, x76 = bits.Mul64(x54, 0xffffffffffffffff)
- var x78 uint64
- var x79 uint64
- x79, x78 = bits.Mul64(x54, 0xffffffffffffffff)
- var x80 uint64
- var x81 uint64
- x81, x80 = bits.Mul64(x54, 0xffffffffffffffff)
- var x82 uint64
- var x83 uint64
- x83, x82 = bits.Mul64(x54, 0xffffffffffffffff)
- var x84 uint64
- var x85 uint64
- x85, x84 = bits.Mul64(x54, 0xffffffffffffffff)
- var x86 uint64
- var x87 uint64
- x87, x86 = bits.Mul64(x54, 0xffffffffffffffff)
- var x88 uint64
- var x89 uint64
- x88, x89 = bits.Add64(x87, x84, uint64(0x0))
- var x90 uint64
- var x91 uint64
- x90, x91 = bits.Add64(x85, x82, uint64(p521Uint1(x89)))
- var x92 uint64
- var x93 uint64
- x92, x93 = bits.Add64(x83, x80, uint64(p521Uint1(x91)))
- var x94 uint64
- var x95 uint64
- x94, x95 = bits.Add64(x81, x78, uint64(p521Uint1(x93)))
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x79, x76, uint64(p521Uint1(x95)))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x77, x74, uint64(p521Uint1(x97)))
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x75, x72, uint64(p521Uint1(x99)))
- var x102 uint64
- var x103 uint64
- x102, x103 = bits.Add64(x73, x70, uint64(p521Uint1(x101)))
- var x105 uint64
- _, x105 = bits.Add64(x54, x86, uint64(0x0))
- var x106 uint64
- var x107 uint64
- x106, x107 = bits.Add64(x56, x88, uint64(p521Uint1(x105)))
- var x108 uint64
- var x109 uint64
- x108, x109 = bits.Add64(x58, x90, uint64(p521Uint1(x107)))
- var x110 uint64
- var x111 uint64
- x110, x111 = bits.Add64(x60, x92, uint64(p521Uint1(x109)))
- var x112 uint64
- var x113 uint64
- x112, x113 = bits.Add64(x62, x94, uint64(p521Uint1(x111)))
- var x114 uint64
- var x115 uint64
- x114, x115 = bits.Add64(x64, x96, uint64(p521Uint1(x113)))
- var x116 uint64
- var x117 uint64
- x116, x117 = bits.Add64(x66, x98, uint64(p521Uint1(x115)))
- var x118 uint64
- var x119 uint64
- x118, x119 = bits.Add64(x68, x100, uint64(p521Uint1(x117)))
- var x120 uint64
- var x121 uint64
- x120, x121 = bits.Add64((uint64(p521Uint1(x69)) + (uint64(p521Uint1(x53)) + (uint64(p521Uint1(x35)) + x3))), x102, uint64(p521Uint1(x119)))
- var x122 uint64
- var x123 uint64
- x122, x123 = bits.Add64(x106, arg1[2], uint64(0x0))
- var x124 uint64
- var x125 uint64
- x124, x125 = bits.Add64(x108, uint64(0x0), uint64(p521Uint1(x123)))
- var x126 uint64
- var x127 uint64
- x126, x127 = bits.Add64(x110, uint64(0x0), uint64(p521Uint1(x125)))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x112, uint64(0x0), uint64(p521Uint1(x127)))
- var x130 uint64
- var x131 uint64
- x130, x131 = bits.Add64(x114, uint64(0x0), uint64(p521Uint1(x129)))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x116, uint64(0x0), uint64(p521Uint1(x131)))
- var x134 uint64
- var x135 uint64
- x134, x135 = bits.Add64(x118, uint64(0x0), uint64(p521Uint1(x133)))
- var x136 uint64
- var x137 uint64
- x136, x137 = bits.Add64(x120, uint64(0x0), uint64(p521Uint1(x135)))
- var x138 uint64
- var x139 uint64
- x139, x138 = bits.Mul64(x122, 0x1ff)
- var x140 uint64
- var x141 uint64
- x141, x140 = bits.Mul64(x122, 0xffffffffffffffff)
- var x142 uint64
- var x143 uint64
- x143, x142 = bits.Mul64(x122, 0xffffffffffffffff)
- var x144 uint64
- var x145 uint64
- x145, x144 = bits.Mul64(x122, 0xffffffffffffffff)
- var x146 uint64
- var x147 uint64
- x147, x146 = bits.Mul64(x122, 0xffffffffffffffff)
- var x148 uint64
- var x149 uint64
- x149, x148 = bits.Mul64(x122, 0xffffffffffffffff)
- var x150 uint64
- var x151 uint64
- x151, x150 = bits.Mul64(x122, 0xffffffffffffffff)
- var x152 uint64
- var x153 uint64
- x153, x152 = bits.Mul64(x122, 0xffffffffffffffff)
- var x154 uint64
- var x155 uint64
- x155, x154 = bits.Mul64(x122, 0xffffffffffffffff)
- var x156 uint64
- var x157 uint64
- x156, x157 = bits.Add64(x155, x152, uint64(0x0))
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Add64(x153, x150, uint64(p521Uint1(x157)))
- var x160 uint64
- var x161 uint64
- x160, x161 = bits.Add64(x151, x148, uint64(p521Uint1(x159)))
- var x162 uint64
- var x163 uint64
- x162, x163 = bits.Add64(x149, x146, uint64(p521Uint1(x161)))
- var x164 uint64
- var x165 uint64
- x164, x165 = bits.Add64(x147, x144, uint64(p521Uint1(x163)))
- var x166 uint64
- var x167 uint64
- x166, x167 = bits.Add64(x145, x142, uint64(p521Uint1(x165)))
- var x168 uint64
- var x169 uint64
- x168, x169 = bits.Add64(x143, x140, uint64(p521Uint1(x167)))
- var x170 uint64
- var x171 uint64
- x170, x171 = bits.Add64(x141, x138, uint64(p521Uint1(x169)))
- var x173 uint64
- _, x173 = bits.Add64(x122, x154, uint64(0x0))
- var x174 uint64
- var x175 uint64
- x174, x175 = bits.Add64(x124, x156, uint64(p521Uint1(x173)))
- var x176 uint64
- var x177 uint64
- x176, x177 = bits.Add64(x126, x158, uint64(p521Uint1(x175)))
- var x178 uint64
- var x179 uint64
- x178, x179 = bits.Add64(x128, x160, uint64(p521Uint1(x177)))
- var x180 uint64
- var x181 uint64
- x180, x181 = bits.Add64(x130, x162, uint64(p521Uint1(x179)))
- var x182 uint64
- var x183 uint64
- x182, x183 = bits.Add64(x132, x164, uint64(p521Uint1(x181)))
- var x184 uint64
- var x185 uint64
- x184, x185 = bits.Add64(x134, x166, uint64(p521Uint1(x183)))
- var x186 uint64
- var x187 uint64
- x186, x187 = bits.Add64(x136, x168, uint64(p521Uint1(x185)))
- var x188 uint64
- var x189 uint64
- x188, x189 = bits.Add64((uint64(p521Uint1(x137)) + (uint64(p521Uint1(x121)) + (uint64(p521Uint1(x103)) + x71))), x170, uint64(p521Uint1(x187)))
- var x190 uint64
- var x191 uint64
- x190, x191 = bits.Add64(x174, arg1[3], uint64(0x0))
- var x192 uint64
- var x193 uint64
- x192, x193 = bits.Add64(x176, uint64(0x0), uint64(p521Uint1(x191)))
- var x194 uint64
- var x195 uint64
- x194, x195 = bits.Add64(x178, uint64(0x0), uint64(p521Uint1(x193)))
- var x196 uint64
- var x197 uint64
- x196, x197 = bits.Add64(x180, uint64(0x0), uint64(p521Uint1(x195)))
- var x198 uint64
- var x199 uint64
- x198, x199 = bits.Add64(x182, uint64(0x0), uint64(p521Uint1(x197)))
- var x200 uint64
- var x201 uint64
- x200, x201 = bits.Add64(x184, uint64(0x0), uint64(p521Uint1(x199)))
- var x202 uint64
- var x203 uint64
- x202, x203 = bits.Add64(x186, uint64(0x0), uint64(p521Uint1(x201)))
- var x204 uint64
- var x205 uint64
- x204, x205 = bits.Add64(x188, uint64(0x0), uint64(p521Uint1(x203)))
- var x206 uint64
- var x207 uint64
- x207, x206 = bits.Mul64(x190, 0x1ff)
- var x208 uint64
- var x209 uint64
- x209, x208 = bits.Mul64(x190, 0xffffffffffffffff)
- var x210 uint64
- var x211 uint64
- x211, x210 = bits.Mul64(x190, 0xffffffffffffffff)
- var x212 uint64
- var x213 uint64
- x213, x212 = bits.Mul64(x190, 0xffffffffffffffff)
- var x214 uint64
- var x215 uint64
- x215, x214 = bits.Mul64(x190, 0xffffffffffffffff)
- var x216 uint64
- var x217 uint64
- x217, x216 = bits.Mul64(x190, 0xffffffffffffffff)
- var x218 uint64
- var x219 uint64
- x219, x218 = bits.Mul64(x190, 0xffffffffffffffff)
- var x220 uint64
- var x221 uint64
- x221, x220 = bits.Mul64(x190, 0xffffffffffffffff)
- var x222 uint64
- var x223 uint64
- x223, x222 = bits.Mul64(x190, 0xffffffffffffffff)
- var x224 uint64
- var x225 uint64
- x224, x225 = bits.Add64(x223, x220, uint64(0x0))
- var x226 uint64
- var x227 uint64
- x226, x227 = bits.Add64(x221, x218, uint64(p521Uint1(x225)))
- var x228 uint64
- var x229 uint64
- x228, x229 = bits.Add64(x219, x216, uint64(p521Uint1(x227)))
- var x230 uint64
- var x231 uint64
- x230, x231 = bits.Add64(x217, x214, uint64(p521Uint1(x229)))
- var x232 uint64
- var x233 uint64
- x232, x233 = bits.Add64(x215, x212, uint64(p521Uint1(x231)))
- var x234 uint64
- var x235 uint64
- x234, x235 = bits.Add64(x213, x210, uint64(p521Uint1(x233)))
- var x236 uint64
- var x237 uint64
- x236, x237 = bits.Add64(x211, x208, uint64(p521Uint1(x235)))
- var x238 uint64
- var x239 uint64
- x238, x239 = bits.Add64(x209, x206, uint64(p521Uint1(x237)))
- var x241 uint64
- _, x241 = bits.Add64(x190, x222, uint64(0x0))
- var x242 uint64
- var x243 uint64
- x242, x243 = bits.Add64(x192, x224, uint64(p521Uint1(x241)))
- var x244 uint64
- var x245 uint64
- x244, x245 = bits.Add64(x194, x226, uint64(p521Uint1(x243)))
- var x246 uint64
- var x247 uint64
- x246, x247 = bits.Add64(x196, x228, uint64(p521Uint1(x245)))
- var x248 uint64
- var x249 uint64
- x248, x249 = bits.Add64(x198, x230, uint64(p521Uint1(x247)))
- var x250 uint64
- var x251 uint64
- x250, x251 = bits.Add64(x200, x232, uint64(p521Uint1(x249)))
- var x252 uint64
- var x253 uint64
- x252, x253 = bits.Add64(x202, x234, uint64(p521Uint1(x251)))
- var x254 uint64
- var x255 uint64
- x254, x255 = bits.Add64(x204, x236, uint64(p521Uint1(x253)))
- var x256 uint64
- var x257 uint64
- x256, x257 = bits.Add64((uint64(p521Uint1(x205)) + (uint64(p521Uint1(x189)) + (uint64(p521Uint1(x171)) + x139))), x238, uint64(p521Uint1(x255)))
- var x258 uint64
- var x259 uint64
- x258, x259 = bits.Add64(x242, arg1[4], uint64(0x0))
- var x260 uint64
- var x261 uint64
- x260, x261 = bits.Add64(x244, uint64(0x0), uint64(p521Uint1(x259)))
- var x262 uint64
- var x263 uint64
- x262, x263 = bits.Add64(x246, uint64(0x0), uint64(p521Uint1(x261)))
- var x264 uint64
- var x265 uint64
- x264, x265 = bits.Add64(x248, uint64(0x0), uint64(p521Uint1(x263)))
- var x266 uint64
- var x267 uint64
- x266, x267 = bits.Add64(x250, uint64(0x0), uint64(p521Uint1(x265)))
- var x268 uint64
- var x269 uint64
- x268, x269 = bits.Add64(x252, uint64(0x0), uint64(p521Uint1(x267)))
- var x270 uint64
- var x271 uint64
- x270, x271 = bits.Add64(x254, uint64(0x0), uint64(p521Uint1(x269)))
- var x272 uint64
- var x273 uint64
- x272, x273 = bits.Add64(x256, uint64(0x0), uint64(p521Uint1(x271)))
- var x274 uint64
- var x275 uint64
- x275, x274 = bits.Mul64(x258, 0x1ff)
- var x276 uint64
- var x277 uint64
- x277, x276 = bits.Mul64(x258, 0xffffffffffffffff)
- var x278 uint64
- var x279 uint64
- x279, x278 = bits.Mul64(x258, 0xffffffffffffffff)
- var x280 uint64
- var x281 uint64
- x281, x280 = bits.Mul64(x258, 0xffffffffffffffff)
- var x282 uint64
- var x283 uint64
- x283, x282 = bits.Mul64(x258, 0xffffffffffffffff)
- var x284 uint64
- var x285 uint64
- x285, x284 = bits.Mul64(x258, 0xffffffffffffffff)
- var x286 uint64
- var x287 uint64
- x287, x286 = bits.Mul64(x258, 0xffffffffffffffff)
- var x288 uint64
- var x289 uint64
- x289, x288 = bits.Mul64(x258, 0xffffffffffffffff)
- var x290 uint64
- var x291 uint64
- x291, x290 = bits.Mul64(x258, 0xffffffffffffffff)
- var x292 uint64
- var x293 uint64
- x292, x293 = bits.Add64(x291, x288, uint64(0x0))
- var x294 uint64
- var x295 uint64
- x294, x295 = bits.Add64(x289, x286, uint64(p521Uint1(x293)))
- var x296 uint64
- var x297 uint64
- x296, x297 = bits.Add64(x287, x284, uint64(p521Uint1(x295)))
- var x298 uint64
- var x299 uint64
- x298, x299 = bits.Add64(x285, x282, uint64(p521Uint1(x297)))
- var x300 uint64
- var x301 uint64
- x300, x301 = bits.Add64(x283, x280, uint64(p521Uint1(x299)))
- var x302 uint64
- var x303 uint64
- x302, x303 = bits.Add64(x281, x278, uint64(p521Uint1(x301)))
- var x304 uint64
- var x305 uint64
- x304, x305 = bits.Add64(x279, x276, uint64(p521Uint1(x303)))
- var x306 uint64
- var x307 uint64
- x306, x307 = bits.Add64(x277, x274, uint64(p521Uint1(x305)))
- var x309 uint64
- _, x309 = bits.Add64(x258, x290, uint64(0x0))
- var x310 uint64
- var x311 uint64
- x310, x311 = bits.Add64(x260, x292, uint64(p521Uint1(x309)))
- var x312 uint64
- var x313 uint64
- x312, x313 = bits.Add64(x262, x294, uint64(p521Uint1(x311)))
- var x314 uint64
- var x315 uint64
- x314, x315 = bits.Add64(x264, x296, uint64(p521Uint1(x313)))
- var x316 uint64
- var x317 uint64
- x316, x317 = bits.Add64(x266, x298, uint64(p521Uint1(x315)))
- var x318 uint64
- var x319 uint64
- x318, x319 = bits.Add64(x268, x300, uint64(p521Uint1(x317)))
- var x320 uint64
- var x321 uint64
- x320, x321 = bits.Add64(x270, x302, uint64(p521Uint1(x319)))
- var x322 uint64
- var x323 uint64
- x322, x323 = bits.Add64(x272, x304, uint64(p521Uint1(x321)))
- var x324 uint64
- var x325 uint64
- x324, x325 = bits.Add64((uint64(p521Uint1(x273)) + (uint64(p521Uint1(x257)) + (uint64(p521Uint1(x239)) + x207))), x306, uint64(p521Uint1(x323)))
- var x326 uint64
- var x327 uint64
- x326, x327 = bits.Add64(x310, arg1[5], uint64(0x0))
- var x328 uint64
- var x329 uint64
- x328, x329 = bits.Add64(x312, uint64(0x0), uint64(p521Uint1(x327)))
- var x330 uint64
- var x331 uint64
- x330, x331 = bits.Add64(x314, uint64(0x0), uint64(p521Uint1(x329)))
- var x332 uint64
- var x333 uint64
- x332, x333 = bits.Add64(x316, uint64(0x0), uint64(p521Uint1(x331)))
- var x334 uint64
- var x335 uint64
- x334, x335 = bits.Add64(x318, uint64(0x0), uint64(p521Uint1(x333)))
- var x336 uint64
- var x337 uint64
- x336, x337 = bits.Add64(x320, uint64(0x0), uint64(p521Uint1(x335)))
- var x338 uint64
- var x339 uint64
- x338, x339 = bits.Add64(x322, uint64(0x0), uint64(p521Uint1(x337)))
- var x340 uint64
- var x341 uint64
- x340, x341 = bits.Add64(x324, uint64(0x0), uint64(p521Uint1(x339)))
- var x342 uint64
- var x343 uint64
- x343, x342 = bits.Mul64(x326, 0x1ff)
- var x344 uint64
- var x345 uint64
- x345, x344 = bits.Mul64(x326, 0xffffffffffffffff)
- var x346 uint64
- var x347 uint64
- x347, x346 = bits.Mul64(x326, 0xffffffffffffffff)
- var x348 uint64
- var x349 uint64
- x349, x348 = bits.Mul64(x326, 0xffffffffffffffff)
- var x350 uint64
- var x351 uint64
- x351, x350 = bits.Mul64(x326, 0xffffffffffffffff)
- var x352 uint64
- var x353 uint64
- x353, x352 = bits.Mul64(x326, 0xffffffffffffffff)
- var x354 uint64
- var x355 uint64
- x355, x354 = bits.Mul64(x326, 0xffffffffffffffff)
- var x356 uint64
- var x357 uint64
- x357, x356 = bits.Mul64(x326, 0xffffffffffffffff)
- var x358 uint64
- var x359 uint64
- x359, x358 = bits.Mul64(x326, 0xffffffffffffffff)
- var x360 uint64
- var x361 uint64
- x360, x361 = bits.Add64(x359, x356, uint64(0x0))
- var x362 uint64
- var x363 uint64
- x362, x363 = bits.Add64(x357, x354, uint64(p521Uint1(x361)))
- var x364 uint64
- var x365 uint64
- x364, x365 = bits.Add64(x355, x352, uint64(p521Uint1(x363)))
- var x366 uint64
- var x367 uint64
- x366, x367 = bits.Add64(x353, x350, uint64(p521Uint1(x365)))
- var x368 uint64
- var x369 uint64
- x368, x369 = bits.Add64(x351, x348, uint64(p521Uint1(x367)))
- var x370 uint64
- var x371 uint64
- x370, x371 = bits.Add64(x349, x346, uint64(p521Uint1(x369)))
- var x372 uint64
- var x373 uint64
- x372, x373 = bits.Add64(x347, x344, uint64(p521Uint1(x371)))
- var x374 uint64
- var x375 uint64
- x374, x375 = bits.Add64(x345, x342, uint64(p521Uint1(x373)))
- var x377 uint64
- _, x377 = bits.Add64(x326, x358, uint64(0x0))
- var x378 uint64
- var x379 uint64
- x378, x379 = bits.Add64(x328, x360, uint64(p521Uint1(x377)))
- var x380 uint64
- var x381 uint64
- x380, x381 = bits.Add64(x330, x362, uint64(p521Uint1(x379)))
- var x382 uint64
- var x383 uint64
- x382, x383 = bits.Add64(x332, x364, uint64(p521Uint1(x381)))
- var x384 uint64
- var x385 uint64
- x384, x385 = bits.Add64(x334, x366, uint64(p521Uint1(x383)))
- var x386 uint64
- var x387 uint64
- x386, x387 = bits.Add64(x336, x368, uint64(p521Uint1(x385)))
- var x388 uint64
- var x389 uint64
- x388, x389 = bits.Add64(x338, x370, uint64(p521Uint1(x387)))
- var x390 uint64
- var x391 uint64
- x390, x391 = bits.Add64(x340, x372, uint64(p521Uint1(x389)))
- var x392 uint64
- var x393 uint64
- x392, x393 = bits.Add64((uint64(p521Uint1(x341)) + (uint64(p521Uint1(x325)) + (uint64(p521Uint1(x307)) + x275))), x374, uint64(p521Uint1(x391)))
- var x394 uint64
- var x395 uint64
- x394, x395 = bits.Add64(x378, arg1[6], uint64(0x0))
- var x396 uint64
- var x397 uint64
- x396, x397 = bits.Add64(x380, uint64(0x0), uint64(p521Uint1(x395)))
- var x398 uint64
- var x399 uint64
- x398, x399 = bits.Add64(x382, uint64(0x0), uint64(p521Uint1(x397)))
- var x400 uint64
- var x401 uint64
- x400, x401 = bits.Add64(x384, uint64(0x0), uint64(p521Uint1(x399)))
- var x402 uint64
- var x403 uint64
- x402, x403 = bits.Add64(x386, uint64(0x0), uint64(p521Uint1(x401)))
- var x404 uint64
- var x405 uint64
- x404, x405 = bits.Add64(x388, uint64(0x0), uint64(p521Uint1(x403)))
- var x406 uint64
- var x407 uint64
- x406, x407 = bits.Add64(x390, uint64(0x0), uint64(p521Uint1(x405)))
- var x408 uint64
- var x409 uint64
- x408, x409 = bits.Add64(x392, uint64(0x0), uint64(p521Uint1(x407)))
- var x410 uint64
- var x411 uint64
- x411, x410 = bits.Mul64(x394, 0x1ff)
- var x412 uint64
- var x413 uint64
- x413, x412 = bits.Mul64(x394, 0xffffffffffffffff)
- var x414 uint64
- var x415 uint64
- x415, x414 = bits.Mul64(x394, 0xffffffffffffffff)
- var x416 uint64
- var x417 uint64
- x417, x416 = bits.Mul64(x394, 0xffffffffffffffff)
- var x418 uint64
- var x419 uint64
- x419, x418 = bits.Mul64(x394, 0xffffffffffffffff)
- var x420 uint64
- var x421 uint64
- x421, x420 = bits.Mul64(x394, 0xffffffffffffffff)
- var x422 uint64
- var x423 uint64
- x423, x422 = bits.Mul64(x394, 0xffffffffffffffff)
- var x424 uint64
- var x425 uint64
- x425, x424 = bits.Mul64(x394, 0xffffffffffffffff)
- var x426 uint64
- var x427 uint64
- x427, x426 = bits.Mul64(x394, 0xffffffffffffffff)
- var x428 uint64
- var x429 uint64
- x428, x429 = bits.Add64(x427, x424, uint64(0x0))
- var x430 uint64
- var x431 uint64
- x430, x431 = bits.Add64(x425, x422, uint64(p521Uint1(x429)))
- var x432 uint64
- var x433 uint64
- x432, x433 = bits.Add64(x423, x420, uint64(p521Uint1(x431)))
- var x434 uint64
- var x435 uint64
- x434, x435 = bits.Add64(x421, x418, uint64(p521Uint1(x433)))
- var x436 uint64
- var x437 uint64
- x436, x437 = bits.Add64(x419, x416, uint64(p521Uint1(x435)))
- var x438 uint64
- var x439 uint64
- x438, x439 = bits.Add64(x417, x414, uint64(p521Uint1(x437)))
- var x440 uint64
- var x441 uint64
- x440, x441 = bits.Add64(x415, x412, uint64(p521Uint1(x439)))
- var x442 uint64
- var x443 uint64
- x442, x443 = bits.Add64(x413, x410, uint64(p521Uint1(x441)))
- var x445 uint64
- _, x445 = bits.Add64(x394, x426, uint64(0x0))
- var x446 uint64
- var x447 uint64
- x446, x447 = bits.Add64(x396, x428, uint64(p521Uint1(x445)))
- var x448 uint64
- var x449 uint64
- x448, x449 = bits.Add64(x398, x430, uint64(p521Uint1(x447)))
- var x450 uint64
- var x451 uint64
- x450, x451 = bits.Add64(x400, x432, uint64(p521Uint1(x449)))
- var x452 uint64
- var x453 uint64
- x452, x453 = bits.Add64(x402, x434, uint64(p521Uint1(x451)))
- var x454 uint64
- var x455 uint64
- x454, x455 = bits.Add64(x404, x436, uint64(p521Uint1(x453)))
- var x456 uint64
- var x457 uint64
- x456, x457 = bits.Add64(x406, x438, uint64(p521Uint1(x455)))
- var x458 uint64
- var x459 uint64
- x458, x459 = bits.Add64(x408, x440, uint64(p521Uint1(x457)))
- var x460 uint64
- var x461 uint64
- x460, x461 = bits.Add64((uint64(p521Uint1(x409)) + (uint64(p521Uint1(x393)) + (uint64(p521Uint1(x375)) + x343))), x442, uint64(p521Uint1(x459)))
- var x462 uint64
- var x463 uint64
- x462, x463 = bits.Add64(x446, arg1[7], uint64(0x0))
- var x464 uint64
- var x465 uint64
- x464, x465 = bits.Add64(x448, uint64(0x0), uint64(p521Uint1(x463)))
- var x466 uint64
- var x467 uint64
- x466, x467 = bits.Add64(x450, uint64(0x0), uint64(p521Uint1(x465)))
- var x468 uint64
- var x469 uint64
- x468, x469 = bits.Add64(x452, uint64(0x0), uint64(p521Uint1(x467)))
- var x470 uint64
- var x471 uint64
- x470, x471 = bits.Add64(x454, uint64(0x0), uint64(p521Uint1(x469)))
- var x472 uint64
- var x473 uint64
- x472, x473 = bits.Add64(x456, uint64(0x0), uint64(p521Uint1(x471)))
- var x474 uint64
- var x475 uint64
- x474, x475 = bits.Add64(x458, uint64(0x0), uint64(p521Uint1(x473)))
- var x476 uint64
- var x477 uint64
- x476, x477 = bits.Add64(x460, uint64(0x0), uint64(p521Uint1(x475)))
- var x478 uint64
- var x479 uint64
- x479, x478 = bits.Mul64(x462, 0x1ff)
- var x480 uint64
- var x481 uint64
- x481, x480 = bits.Mul64(x462, 0xffffffffffffffff)
- var x482 uint64
- var x483 uint64
- x483, x482 = bits.Mul64(x462, 0xffffffffffffffff)
- var x484 uint64
- var x485 uint64
- x485, x484 = bits.Mul64(x462, 0xffffffffffffffff)
- var x486 uint64
- var x487 uint64
- x487, x486 = bits.Mul64(x462, 0xffffffffffffffff)
- var x488 uint64
- var x489 uint64
- x489, x488 = bits.Mul64(x462, 0xffffffffffffffff)
- var x490 uint64
- var x491 uint64
- x491, x490 = bits.Mul64(x462, 0xffffffffffffffff)
- var x492 uint64
- var x493 uint64
- x493, x492 = bits.Mul64(x462, 0xffffffffffffffff)
- var x494 uint64
- var x495 uint64
- x495, x494 = bits.Mul64(x462, 0xffffffffffffffff)
- var x496 uint64
- var x497 uint64
- x496, x497 = bits.Add64(x495, x492, uint64(0x0))
- var x498 uint64
- var x499 uint64
- x498, x499 = bits.Add64(x493, x490, uint64(p521Uint1(x497)))
- var x500 uint64
- var x501 uint64
- x500, x501 = bits.Add64(x491, x488, uint64(p521Uint1(x499)))
- var x502 uint64
- var x503 uint64
- x502, x503 = bits.Add64(x489, x486, uint64(p521Uint1(x501)))
- var x504 uint64
- var x505 uint64
- x504, x505 = bits.Add64(x487, x484, uint64(p521Uint1(x503)))
- var x506 uint64
- var x507 uint64
- x506, x507 = bits.Add64(x485, x482, uint64(p521Uint1(x505)))
- var x508 uint64
- var x509 uint64
- x508, x509 = bits.Add64(x483, x480, uint64(p521Uint1(x507)))
- var x510 uint64
- var x511 uint64
- x510, x511 = bits.Add64(x481, x478, uint64(p521Uint1(x509)))
- var x513 uint64
- _, x513 = bits.Add64(x462, x494, uint64(0x0))
- var x514 uint64
- var x515 uint64
- x514, x515 = bits.Add64(x464, x496, uint64(p521Uint1(x513)))
- var x516 uint64
- var x517 uint64
- x516, x517 = bits.Add64(x466, x498, uint64(p521Uint1(x515)))
- var x518 uint64
- var x519 uint64
- x518, x519 = bits.Add64(x468, x500, uint64(p521Uint1(x517)))
- var x520 uint64
- var x521 uint64
- x520, x521 = bits.Add64(x470, x502, uint64(p521Uint1(x519)))
- var x522 uint64
- var x523 uint64
- x522, x523 = bits.Add64(x472, x504, uint64(p521Uint1(x521)))
- var x524 uint64
- var x525 uint64
- x524, x525 = bits.Add64(x474, x506, uint64(p521Uint1(x523)))
- var x526 uint64
- var x527 uint64
- x526, x527 = bits.Add64(x476, x508, uint64(p521Uint1(x525)))
- var x528 uint64
- var x529 uint64
- x528, x529 = bits.Add64((uint64(p521Uint1(x477)) + (uint64(p521Uint1(x461)) + (uint64(p521Uint1(x443)) + x411))), x510, uint64(p521Uint1(x527)))
- var x530 uint64
- var x531 uint64
- x530, x531 = bits.Add64(x514, arg1[8], uint64(0x0))
- var x532 uint64
- var x533 uint64
- x532, x533 = bits.Add64(x516, uint64(0x0), uint64(p521Uint1(x531)))
- var x534 uint64
- var x535 uint64
- x534, x535 = bits.Add64(x518, uint64(0x0), uint64(p521Uint1(x533)))
- var x536 uint64
- var x537 uint64
- x536, x537 = bits.Add64(x520, uint64(0x0), uint64(p521Uint1(x535)))
- var x538 uint64
- var x539 uint64
- x538, x539 = bits.Add64(x522, uint64(0x0), uint64(p521Uint1(x537)))
- var x540 uint64
- var x541 uint64
- x540, x541 = bits.Add64(x524, uint64(0x0), uint64(p521Uint1(x539)))
- var x542 uint64
- var x543 uint64
- x542, x543 = bits.Add64(x526, uint64(0x0), uint64(p521Uint1(x541)))
- var x544 uint64
- var x545 uint64
- x544, x545 = bits.Add64(x528, uint64(0x0), uint64(p521Uint1(x543)))
- var x546 uint64
- var x547 uint64
- x547, x546 = bits.Mul64(x530, 0x1ff)
- var x548 uint64
- var x549 uint64
- x549, x548 = bits.Mul64(x530, 0xffffffffffffffff)
- var x550 uint64
- var x551 uint64
- x551, x550 = bits.Mul64(x530, 0xffffffffffffffff)
- var x552 uint64
- var x553 uint64
- x553, x552 = bits.Mul64(x530, 0xffffffffffffffff)
- var x554 uint64
- var x555 uint64
- x555, x554 = bits.Mul64(x530, 0xffffffffffffffff)
- var x556 uint64
- var x557 uint64
- x557, x556 = bits.Mul64(x530, 0xffffffffffffffff)
- var x558 uint64
- var x559 uint64
- x559, x558 = bits.Mul64(x530, 0xffffffffffffffff)
- var x560 uint64
- var x561 uint64
- x561, x560 = bits.Mul64(x530, 0xffffffffffffffff)
- var x562 uint64
- var x563 uint64
- x563, x562 = bits.Mul64(x530, 0xffffffffffffffff)
- var x564 uint64
- var x565 uint64
- x564, x565 = bits.Add64(x563, x560, uint64(0x0))
- var x566 uint64
- var x567 uint64
- x566, x567 = bits.Add64(x561, x558, uint64(p521Uint1(x565)))
- var x568 uint64
- var x569 uint64
- x568, x569 = bits.Add64(x559, x556, uint64(p521Uint1(x567)))
- var x570 uint64
- var x571 uint64
- x570, x571 = bits.Add64(x557, x554, uint64(p521Uint1(x569)))
- var x572 uint64
- var x573 uint64
- x572, x573 = bits.Add64(x555, x552, uint64(p521Uint1(x571)))
- var x574 uint64
- var x575 uint64
- x574, x575 = bits.Add64(x553, x550, uint64(p521Uint1(x573)))
- var x576 uint64
- var x577 uint64
- x576, x577 = bits.Add64(x551, x548, uint64(p521Uint1(x575)))
- var x578 uint64
- var x579 uint64
- x578, x579 = bits.Add64(x549, x546, uint64(p521Uint1(x577)))
- var x581 uint64
- _, x581 = bits.Add64(x530, x562, uint64(0x0))
- var x582 uint64
- var x583 uint64
- x582, x583 = bits.Add64(x532, x564, uint64(p521Uint1(x581)))
- var x584 uint64
- var x585 uint64
- x584, x585 = bits.Add64(x534, x566, uint64(p521Uint1(x583)))
- var x586 uint64
- var x587 uint64
- x586, x587 = bits.Add64(x536, x568, uint64(p521Uint1(x585)))
- var x588 uint64
- var x589 uint64
- x588, x589 = bits.Add64(x538, x570, uint64(p521Uint1(x587)))
- var x590 uint64
- var x591 uint64
- x590, x591 = bits.Add64(x540, x572, uint64(p521Uint1(x589)))
- var x592 uint64
- var x593 uint64
- x592, x593 = bits.Add64(x542, x574, uint64(p521Uint1(x591)))
- var x594 uint64
- var x595 uint64
- x594, x595 = bits.Add64(x544, x576, uint64(p521Uint1(x593)))
- var x596 uint64
- var x597 uint64
- x596, x597 = bits.Add64((uint64(p521Uint1(x545)) + (uint64(p521Uint1(x529)) + (uint64(p521Uint1(x511)) + x479))), x578, uint64(p521Uint1(x595)))
- x598 := (uint64(p521Uint1(x597)) + (uint64(p521Uint1(x579)) + x547))
- var x599 uint64
- var x600 uint64
- x599, x600 = bits.Sub64(x582, 0xffffffffffffffff, uint64(0x0))
- var x601 uint64
- var x602 uint64
- x601, x602 = bits.Sub64(x584, 0xffffffffffffffff, uint64(p521Uint1(x600)))
- var x603 uint64
- var x604 uint64
- x603, x604 = bits.Sub64(x586, 0xffffffffffffffff, uint64(p521Uint1(x602)))
- var x605 uint64
- var x606 uint64
- x605, x606 = bits.Sub64(x588, 0xffffffffffffffff, uint64(p521Uint1(x604)))
- var x607 uint64
- var x608 uint64
- x607, x608 = bits.Sub64(x590, 0xffffffffffffffff, uint64(p521Uint1(x606)))
- var x609 uint64
- var x610 uint64
- x609, x610 = bits.Sub64(x592, 0xffffffffffffffff, uint64(p521Uint1(x608)))
- var x611 uint64
- var x612 uint64
- x611, x612 = bits.Sub64(x594, 0xffffffffffffffff, uint64(p521Uint1(x610)))
- var x613 uint64
- var x614 uint64
- x613, x614 = bits.Sub64(x596, 0xffffffffffffffff, uint64(p521Uint1(x612)))
- var x615 uint64
- var x616 uint64
- x615, x616 = bits.Sub64(x598, 0x1ff, uint64(p521Uint1(x614)))
- var x618 uint64
- _, x618 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x616)))
- var x619 uint64
- p521CmovznzU64(&x619, p521Uint1(x618), x599, x582)
- var x620 uint64
- p521CmovznzU64(&x620, p521Uint1(x618), x601, x584)
- var x621 uint64
- p521CmovznzU64(&x621, p521Uint1(x618), x603, x586)
- var x622 uint64
- p521CmovznzU64(&x622, p521Uint1(x618), x605, x588)
- var x623 uint64
- p521CmovznzU64(&x623, p521Uint1(x618), x607, x590)
- var x624 uint64
- p521CmovznzU64(&x624, p521Uint1(x618), x609, x592)
- var x625 uint64
- p521CmovznzU64(&x625, p521Uint1(x618), x611, x594)
- var x626 uint64
- p521CmovznzU64(&x626, p521Uint1(x618), x613, x596)
- var x627 uint64
- p521CmovznzU64(&x627, p521Uint1(x618), x615, x598)
- out1[0] = x619
- out1[1] = x620
- out1[2] = x621
- out1[3] = x622
- out1[4] = x623
- out1[5] = x624
- out1[6] = x625
- out1[7] = x626
- out1[8] = x627
-}
-
-// p521ToMontgomery translates a field element into the Montgomery domain.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// eval (from_montgomery out1) mod m = eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-func p521ToMontgomery(out1 *p521MontgomeryDomainFieldElement, arg1 *p521NonMontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x2, x1 = bits.Mul64(arg1[0], 0x400000000000)
- var x3 uint64
- var x4 uint64
- x4, x3 = bits.Mul64(arg1[1], 0x400000000000)
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Add64(x2, x3, uint64(0x0))
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x1, 0x1ff)
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x1, 0xffffffffffffffff)
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x1, 0xffffffffffffffff)
- var x13 uint64
- var x14 uint64
- x14, x13 = bits.Mul64(x1, 0xffffffffffffffff)
- var x15 uint64
- var x16 uint64
- x16, x15 = bits.Mul64(x1, 0xffffffffffffffff)
- var x17 uint64
- var x18 uint64
- x18, x17 = bits.Mul64(x1, 0xffffffffffffffff)
- var x19 uint64
- var x20 uint64
- x20, x19 = bits.Mul64(x1, 0xffffffffffffffff)
- var x21 uint64
- var x22 uint64
- x22, x21 = bits.Mul64(x1, 0xffffffffffffffff)
- var x23 uint64
- var x24 uint64
- x24, x23 = bits.Mul64(x1, 0xffffffffffffffff)
- var x25 uint64
- var x26 uint64
- x25, x26 = bits.Add64(x24, x21, uint64(0x0))
- var x27 uint64
- var x28 uint64
- x27, x28 = bits.Add64(x22, x19, uint64(p521Uint1(x26)))
- var x29 uint64
- var x30 uint64
- x29, x30 = bits.Add64(x20, x17, uint64(p521Uint1(x28)))
- var x31 uint64
- var x32 uint64
- x31, x32 = bits.Add64(x18, x15, uint64(p521Uint1(x30)))
- var x33 uint64
- var x34 uint64
- x33, x34 = bits.Add64(x16, x13, uint64(p521Uint1(x32)))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Add64(x14, x11, uint64(p521Uint1(x34)))
- var x37 uint64
- var x38 uint64
- x37, x38 = bits.Add64(x12, x9, uint64(p521Uint1(x36)))
- var x39 uint64
- var x40 uint64
- x39, x40 = bits.Add64(x10, x7, uint64(p521Uint1(x38)))
- var x42 uint64
- _, x42 = bits.Add64(x1, x23, uint64(0x0))
- var x43 uint64
- var x44 uint64
- x43, x44 = bits.Add64(x5, x25, uint64(p521Uint1(x42)))
- var x45 uint64
- var x46 uint64
- x45, x46 = bits.Add64((uint64(p521Uint1(x6)) + x4), x27, uint64(p521Uint1(x44)))
- var x47 uint64
- var x48 uint64
- x47, x48 = bits.Add64(uint64(0x0), x29, uint64(p521Uint1(x46)))
- var x49 uint64
- var x50 uint64
- x49, x50 = bits.Add64(uint64(0x0), x31, uint64(p521Uint1(x48)))
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(uint64(0x0), x33, uint64(p521Uint1(x50)))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(uint64(0x0), x35, uint64(p521Uint1(x52)))
- var x55 uint64
- var x56 uint64
- x55, x56 = bits.Add64(uint64(0x0), x37, uint64(p521Uint1(x54)))
- var x57 uint64
- var x58 uint64
- x57, x58 = bits.Add64(uint64(0x0), x39, uint64(p521Uint1(x56)))
- var x59 uint64
- var x60 uint64
- x60, x59 = bits.Mul64(arg1[2], 0x400000000000)
- var x61 uint64
- var x62 uint64
- x61, x62 = bits.Add64(x45, x59, uint64(0x0))
- var x63 uint64
- var x64 uint64
- x63, x64 = bits.Add64(x47, x60, uint64(p521Uint1(x62)))
- var x65 uint64
- var x66 uint64
- x65, x66 = bits.Add64(x49, uint64(0x0), uint64(p521Uint1(x64)))
- var x67 uint64
- var x68 uint64
- x67, x68 = bits.Add64(x51, uint64(0x0), uint64(p521Uint1(x66)))
- var x69 uint64
- var x70 uint64
- x69, x70 = bits.Add64(x53, uint64(0x0), uint64(p521Uint1(x68)))
- var x71 uint64
- var x72 uint64
- x71, x72 = bits.Add64(x55, uint64(0x0), uint64(p521Uint1(x70)))
- var x73 uint64
- var x74 uint64
- x73, x74 = bits.Add64(x57, uint64(0x0), uint64(p521Uint1(x72)))
- var x75 uint64
- var x76 uint64
- x76, x75 = bits.Mul64(x43, 0x1ff)
- var x77 uint64
- var x78 uint64
- x78, x77 = bits.Mul64(x43, 0xffffffffffffffff)
- var x79 uint64
- var x80 uint64
- x80, x79 = bits.Mul64(x43, 0xffffffffffffffff)
- var x81 uint64
- var x82 uint64
- x82, x81 = bits.Mul64(x43, 0xffffffffffffffff)
- var x83 uint64
- var x84 uint64
- x84, x83 = bits.Mul64(x43, 0xffffffffffffffff)
- var x85 uint64
- var x86 uint64
- x86, x85 = bits.Mul64(x43, 0xffffffffffffffff)
- var x87 uint64
- var x88 uint64
- x88, x87 = bits.Mul64(x43, 0xffffffffffffffff)
- var x89 uint64
- var x90 uint64
- x90, x89 = bits.Mul64(x43, 0xffffffffffffffff)
- var x91 uint64
- var x92 uint64
- x92, x91 = bits.Mul64(x43, 0xffffffffffffffff)
- var x93 uint64
- var x94 uint64
- x93, x94 = bits.Add64(x92, x89, uint64(0x0))
- var x95 uint64
- var x96 uint64
- x95, x96 = bits.Add64(x90, x87, uint64(p521Uint1(x94)))
- var x97 uint64
- var x98 uint64
- x97, x98 = bits.Add64(x88, x85, uint64(p521Uint1(x96)))
- var x99 uint64
- var x100 uint64
- x99, x100 = bits.Add64(x86, x83, uint64(p521Uint1(x98)))
- var x101 uint64
- var x102 uint64
- x101, x102 = bits.Add64(x84, x81, uint64(p521Uint1(x100)))
- var x103 uint64
- var x104 uint64
- x103, x104 = bits.Add64(x82, x79, uint64(p521Uint1(x102)))
- var x105 uint64
- var x106 uint64
- x105, x106 = bits.Add64(x80, x77, uint64(p521Uint1(x104)))
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Add64(x78, x75, uint64(p521Uint1(x106)))
- var x110 uint64
- _, x110 = bits.Add64(x43, x91, uint64(0x0))
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x61, x93, uint64(p521Uint1(x110)))
- var x113 uint64
- var x114 uint64
- x113, x114 = bits.Add64(x63, x95, uint64(p521Uint1(x112)))
- var x115 uint64
- var x116 uint64
- x115, x116 = bits.Add64(x65, x97, uint64(p521Uint1(x114)))
- var x117 uint64
- var x118 uint64
- x117, x118 = bits.Add64(x67, x99, uint64(p521Uint1(x116)))
- var x119 uint64
- var x120 uint64
- x119, x120 = bits.Add64(x69, x101, uint64(p521Uint1(x118)))
- var x121 uint64
- var x122 uint64
- x121, x122 = bits.Add64(x71, x103, uint64(p521Uint1(x120)))
- var x123 uint64
- var x124 uint64
- x123, x124 = bits.Add64(x73, x105, uint64(p521Uint1(x122)))
- var x125 uint64
- var x126 uint64
- x125, x126 = bits.Add64((uint64(p521Uint1(x74)) + (uint64(p521Uint1(x58)) + (uint64(p521Uint1(x40)) + x8))), x107, uint64(p521Uint1(x124)))
- var x127 uint64
- var x128 uint64
- x128, x127 = bits.Mul64(arg1[3], 0x400000000000)
- var x129 uint64
- var x130 uint64
- x129, x130 = bits.Add64(x113, x127, uint64(0x0))
- var x131 uint64
- var x132 uint64
- x131, x132 = bits.Add64(x115, x128, uint64(p521Uint1(x130)))
- var x133 uint64
- var x134 uint64
- x133, x134 = bits.Add64(x117, uint64(0x0), uint64(p521Uint1(x132)))
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x119, uint64(0x0), uint64(p521Uint1(x134)))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(x121, uint64(0x0), uint64(p521Uint1(x136)))
- var x139 uint64
- var x140 uint64
- x139, x140 = bits.Add64(x123, uint64(0x0), uint64(p521Uint1(x138)))
- var x141 uint64
- var x142 uint64
- x141, x142 = bits.Add64(x125, uint64(0x0), uint64(p521Uint1(x140)))
- var x143 uint64
- var x144 uint64
- x144, x143 = bits.Mul64(x111, 0x1ff)
- var x145 uint64
- var x146 uint64
- x146, x145 = bits.Mul64(x111, 0xffffffffffffffff)
- var x147 uint64
- var x148 uint64
- x148, x147 = bits.Mul64(x111, 0xffffffffffffffff)
- var x149 uint64
- var x150 uint64
- x150, x149 = bits.Mul64(x111, 0xffffffffffffffff)
- var x151 uint64
- var x152 uint64
- x152, x151 = bits.Mul64(x111, 0xffffffffffffffff)
- var x153 uint64
- var x154 uint64
- x154, x153 = bits.Mul64(x111, 0xffffffffffffffff)
- var x155 uint64
- var x156 uint64
- x156, x155 = bits.Mul64(x111, 0xffffffffffffffff)
- var x157 uint64
- var x158 uint64
- x158, x157 = bits.Mul64(x111, 0xffffffffffffffff)
- var x159 uint64
- var x160 uint64
- x160, x159 = bits.Mul64(x111, 0xffffffffffffffff)
- var x161 uint64
- var x162 uint64
- x161, x162 = bits.Add64(x160, x157, uint64(0x0))
- var x163 uint64
- var x164 uint64
- x163, x164 = bits.Add64(x158, x155, uint64(p521Uint1(x162)))
- var x165 uint64
- var x166 uint64
- x165, x166 = bits.Add64(x156, x153, uint64(p521Uint1(x164)))
- var x167 uint64
- var x168 uint64
- x167, x168 = bits.Add64(x154, x151, uint64(p521Uint1(x166)))
- var x169 uint64
- var x170 uint64
- x169, x170 = bits.Add64(x152, x149, uint64(p521Uint1(x168)))
- var x171 uint64
- var x172 uint64
- x171, x172 = bits.Add64(x150, x147, uint64(p521Uint1(x170)))
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x148, x145, uint64(p521Uint1(x172)))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x146, x143, uint64(p521Uint1(x174)))
- var x178 uint64
- _, x178 = bits.Add64(x111, x159, uint64(0x0))
- var x179 uint64
- var x180 uint64
- x179, x180 = bits.Add64(x129, x161, uint64(p521Uint1(x178)))
- var x181 uint64
- var x182 uint64
- x181, x182 = bits.Add64(x131, x163, uint64(p521Uint1(x180)))
- var x183 uint64
- var x184 uint64
- x183, x184 = bits.Add64(x133, x165, uint64(p521Uint1(x182)))
- var x185 uint64
- var x186 uint64
- x185, x186 = bits.Add64(x135, x167, uint64(p521Uint1(x184)))
- var x187 uint64
- var x188 uint64
- x187, x188 = bits.Add64(x137, x169, uint64(p521Uint1(x186)))
- var x189 uint64
- var x190 uint64
- x189, x190 = bits.Add64(x139, x171, uint64(p521Uint1(x188)))
- var x191 uint64
- var x192 uint64
- x191, x192 = bits.Add64(x141, x173, uint64(p521Uint1(x190)))
- var x193 uint64
- var x194 uint64
- x193, x194 = bits.Add64((uint64(p521Uint1(x142)) + (uint64(p521Uint1(x126)) + (uint64(p521Uint1(x108)) + x76))), x175, uint64(p521Uint1(x192)))
- var x195 uint64
- var x196 uint64
- x196, x195 = bits.Mul64(arg1[4], 0x400000000000)
- var x197 uint64
- var x198 uint64
- x197, x198 = bits.Add64(x181, x195, uint64(0x0))
- var x199 uint64
- var x200 uint64
- x199, x200 = bits.Add64(x183, x196, uint64(p521Uint1(x198)))
- var x201 uint64
- var x202 uint64
- x201, x202 = bits.Add64(x185, uint64(0x0), uint64(p521Uint1(x200)))
- var x203 uint64
- var x204 uint64
- x203, x204 = bits.Add64(x187, uint64(0x0), uint64(p521Uint1(x202)))
- var x205 uint64
- var x206 uint64
- x205, x206 = bits.Add64(x189, uint64(0x0), uint64(p521Uint1(x204)))
- var x207 uint64
- var x208 uint64
- x207, x208 = bits.Add64(x191, uint64(0x0), uint64(p521Uint1(x206)))
- var x209 uint64
- var x210 uint64
- x209, x210 = bits.Add64(x193, uint64(0x0), uint64(p521Uint1(x208)))
- var x211 uint64
- var x212 uint64
- x212, x211 = bits.Mul64(x179, 0x1ff)
- var x213 uint64
- var x214 uint64
- x214, x213 = bits.Mul64(x179, 0xffffffffffffffff)
- var x215 uint64
- var x216 uint64
- x216, x215 = bits.Mul64(x179, 0xffffffffffffffff)
- var x217 uint64
- var x218 uint64
- x218, x217 = bits.Mul64(x179, 0xffffffffffffffff)
- var x219 uint64
- var x220 uint64
- x220, x219 = bits.Mul64(x179, 0xffffffffffffffff)
- var x221 uint64
- var x222 uint64
- x222, x221 = bits.Mul64(x179, 0xffffffffffffffff)
- var x223 uint64
- var x224 uint64
- x224, x223 = bits.Mul64(x179, 0xffffffffffffffff)
- var x225 uint64
- var x226 uint64
- x226, x225 = bits.Mul64(x179, 0xffffffffffffffff)
- var x227 uint64
- var x228 uint64
- x228, x227 = bits.Mul64(x179, 0xffffffffffffffff)
- var x229 uint64
- var x230 uint64
- x229, x230 = bits.Add64(x228, x225, uint64(0x0))
- var x231 uint64
- var x232 uint64
- x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
- var x233 uint64
- var x234 uint64
- x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
- var x235 uint64
- var x236 uint64
- x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
- var x237 uint64
- var x238 uint64
- x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
- var x239 uint64
- var x240 uint64
- x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
- var x241 uint64
- var x242 uint64
- x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
- var x243 uint64
- var x244 uint64
- x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
- var x246 uint64
- _, x246 = bits.Add64(x179, x227, uint64(0x0))
- var x247 uint64
- var x248 uint64
- x247, x248 = bits.Add64(x197, x229, uint64(p521Uint1(x246)))
- var x249 uint64
- var x250 uint64
- x249, x250 = bits.Add64(x199, x231, uint64(p521Uint1(x248)))
- var x251 uint64
- var x252 uint64
- x251, x252 = bits.Add64(x201, x233, uint64(p521Uint1(x250)))
- var x253 uint64
- var x254 uint64
- x253, x254 = bits.Add64(x203, x235, uint64(p521Uint1(x252)))
- var x255 uint64
- var x256 uint64
- x255, x256 = bits.Add64(x205, x237, uint64(p521Uint1(x254)))
- var x257 uint64
- var x258 uint64
- x257, x258 = bits.Add64(x207, x239, uint64(p521Uint1(x256)))
- var x259 uint64
- var x260 uint64
- x259, x260 = bits.Add64(x209, x241, uint64(p521Uint1(x258)))
- var x261 uint64
- var x262 uint64
- x261, x262 = bits.Add64((uint64(p521Uint1(x210)) + (uint64(p521Uint1(x194)) + (uint64(p521Uint1(x176)) + x144))), x243, uint64(p521Uint1(x260)))
- var x263 uint64
- var x264 uint64
- x264, x263 = bits.Mul64(arg1[5], 0x400000000000)
- var x265 uint64
- var x266 uint64
- x265, x266 = bits.Add64(x249, x263, uint64(0x0))
- var x267 uint64
- var x268 uint64
- x267, x268 = bits.Add64(x251, x264, uint64(p521Uint1(x266)))
- var x269 uint64
- var x270 uint64
- x269, x270 = bits.Add64(x253, uint64(0x0), uint64(p521Uint1(x268)))
- var x271 uint64
- var x272 uint64
- x271, x272 = bits.Add64(x255, uint64(0x0), uint64(p521Uint1(x270)))
- var x273 uint64
- var x274 uint64
- x273, x274 = bits.Add64(x257, uint64(0x0), uint64(p521Uint1(x272)))
- var x275 uint64
- var x276 uint64
- x275, x276 = bits.Add64(x259, uint64(0x0), uint64(p521Uint1(x274)))
- var x277 uint64
- var x278 uint64
- x277, x278 = bits.Add64(x261, uint64(0x0), uint64(p521Uint1(x276)))
- var x279 uint64
- var x280 uint64
- x280, x279 = bits.Mul64(x247, 0x1ff)
- var x281 uint64
- var x282 uint64
- x282, x281 = bits.Mul64(x247, 0xffffffffffffffff)
- var x283 uint64
- var x284 uint64
- x284, x283 = bits.Mul64(x247, 0xffffffffffffffff)
- var x285 uint64
- var x286 uint64
- x286, x285 = bits.Mul64(x247, 0xffffffffffffffff)
- var x287 uint64
- var x288 uint64
- x288, x287 = bits.Mul64(x247, 0xffffffffffffffff)
- var x289 uint64
- var x290 uint64
- x290, x289 = bits.Mul64(x247, 0xffffffffffffffff)
- var x291 uint64
- var x292 uint64
- x292, x291 = bits.Mul64(x247, 0xffffffffffffffff)
- var x293 uint64
- var x294 uint64
- x294, x293 = bits.Mul64(x247, 0xffffffffffffffff)
- var x295 uint64
- var x296 uint64
- x296, x295 = bits.Mul64(x247, 0xffffffffffffffff)
- var x297 uint64
- var x298 uint64
- x297, x298 = bits.Add64(x296, x293, uint64(0x0))
- var x299 uint64
- var x300 uint64
- x299, x300 = bits.Add64(x294, x291, uint64(p521Uint1(x298)))
- var x301 uint64
- var x302 uint64
- x301, x302 = bits.Add64(x292, x289, uint64(p521Uint1(x300)))
- var x303 uint64
- var x304 uint64
- x303, x304 = bits.Add64(x290, x287, uint64(p521Uint1(x302)))
- var x305 uint64
- var x306 uint64
- x305, x306 = bits.Add64(x288, x285, uint64(p521Uint1(x304)))
- var x307 uint64
- var x308 uint64
- x307, x308 = bits.Add64(x286, x283, uint64(p521Uint1(x306)))
- var x309 uint64
- var x310 uint64
- x309, x310 = bits.Add64(x284, x281, uint64(p521Uint1(x308)))
- var x311 uint64
- var x312 uint64
- x311, x312 = bits.Add64(x282, x279, uint64(p521Uint1(x310)))
- var x314 uint64
- _, x314 = bits.Add64(x247, x295, uint64(0x0))
- var x315 uint64
- var x316 uint64
- x315, x316 = bits.Add64(x265, x297, uint64(p521Uint1(x314)))
- var x317 uint64
- var x318 uint64
- x317, x318 = bits.Add64(x267, x299, uint64(p521Uint1(x316)))
- var x319 uint64
- var x320 uint64
- x319, x320 = bits.Add64(x269, x301, uint64(p521Uint1(x318)))
- var x321 uint64
- var x322 uint64
- x321, x322 = bits.Add64(x271, x303, uint64(p521Uint1(x320)))
- var x323 uint64
- var x324 uint64
- x323, x324 = bits.Add64(x273, x305, uint64(p521Uint1(x322)))
- var x325 uint64
- var x326 uint64
- x325, x326 = bits.Add64(x275, x307, uint64(p521Uint1(x324)))
- var x327 uint64
- var x328 uint64
- x327, x328 = bits.Add64(x277, x309, uint64(p521Uint1(x326)))
- var x329 uint64
- var x330 uint64
- x329, x330 = bits.Add64((uint64(p521Uint1(x278)) + (uint64(p521Uint1(x262)) + (uint64(p521Uint1(x244)) + x212))), x311, uint64(p521Uint1(x328)))
- var x331 uint64
- var x332 uint64
- x332, x331 = bits.Mul64(arg1[6], 0x400000000000)
- var x333 uint64
- var x334 uint64
- x333, x334 = bits.Add64(x317, x331, uint64(0x0))
- var x335 uint64
- var x336 uint64
- x335, x336 = bits.Add64(x319, x332, uint64(p521Uint1(x334)))
- var x337 uint64
- var x338 uint64
- x337, x338 = bits.Add64(x321, uint64(0x0), uint64(p521Uint1(x336)))
- var x339 uint64
- var x340 uint64
- x339, x340 = bits.Add64(x323, uint64(0x0), uint64(p521Uint1(x338)))
- var x341 uint64
- var x342 uint64
- x341, x342 = bits.Add64(x325, uint64(0x0), uint64(p521Uint1(x340)))
- var x343 uint64
- var x344 uint64
- x343, x344 = bits.Add64(x327, uint64(0x0), uint64(p521Uint1(x342)))
- var x345 uint64
- var x346 uint64
- x345, x346 = bits.Add64(x329, uint64(0x0), uint64(p521Uint1(x344)))
- var x347 uint64
- var x348 uint64
- x348, x347 = bits.Mul64(x315, 0x1ff)
- var x349 uint64
- var x350 uint64
- x350, x349 = bits.Mul64(x315, 0xffffffffffffffff)
- var x351 uint64
- var x352 uint64
- x352, x351 = bits.Mul64(x315, 0xffffffffffffffff)
- var x353 uint64
- var x354 uint64
- x354, x353 = bits.Mul64(x315, 0xffffffffffffffff)
- var x355 uint64
- var x356 uint64
- x356, x355 = bits.Mul64(x315, 0xffffffffffffffff)
- var x357 uint64
- var x358 uint64
- x358, x357 = bits.Mul64(x315, 0xffffffffffffffff)
- var x359 uint64
- var x360 uint64
- x360, x359 = bits.Mul64(x315, 0xffffffffffffffff)
- var x361 uint64
- var x362 uint64
- x362, x361 = bits.Mul64(x315, 0xffffffffffffffff)
- var x363 uint64
- var x364 uint64
- x364, x363 = bits.Mul64(x315, 0xffffffffffffffff)
- var x365 uint64
- var x366 uint64
- x365, x366 = bits.Add64(x364, x361, uint64(0x0))
- var x367 uint64
- var x368 uint64
- x367, x368 = bits.Add64(x362, x359, uint64(p521Uint1(x366)))
- var x369 uint64
- var x370 uint64
- x369, x370 = bits.Add64(x360, x357, uint64(p521Uint1(x368)))
- var x371 uint64
- var x372 uint64
- x371, x372 = bits.Add64(x358, x355, uint64(p521Uint1(x370)))
- var x373 uint64
- var x374 uint64
- x373, x374 = bits.Add64(x356, x353, uint64(p521Uint1(x372)))
- var x375 uint64
- var x376 uint64
- x375, x376 = bits.Add64(x354, x351, uint64(p521Uint1(x374)))
- var x377 uint64
- var x378 uint64
- x377, x378 = bits.Add64(x352, x349, uint64(p521Uint1(x376)))
- var x379 uint64
- var x380 uint64
- x379, x380 = bits.Add64(x350, x347, uint64(p521Uint1(x378)))
- var x382 uint64
- _, x382 = bits.Add64(x315, x363, uint64(0x0))
- var x383 uint64
- var x384 uint64
- x383, x384 = bits.Add64(x333, x365, uint64(p521Uint1(x382)))
- var x385 uint64
- var x386 uint64
- x385, x386 = bits.Add64(x335, x367, uint64(p521Uint1(x384)))
- var x387 uint64
- var x388 uint64
- x387, x388 = bits.Add64(x337, x369, uint64(p521Uint1(x386)))
- var x389 uint64
- var x390 uint64
- x389, x390 = bits.Add64(x339, x371, uint64(p521Uint1(x388)))
- var x391 uint64
- var x392 uint64
- x391, x392 = bits.Add64(x341, x373, uint64(p521Uint1(x390)))
- var x393 uint64
- var x394 uint64
- x393, x394 = bits.Add64(x343, x375, uint64(p521Uint1(x392)))
- var x395 uint64
- var x396 uint64
- x395, x396 = bits.Add64(x345, x377, uint64(p521Uint1(x394)))
- var x397 uint64
- var x398 uint64
- x397, x398 = bits.Add64((uint64(p521Uint1(x346)) + (uint64(p521Uint1(x330)) + (uint64(p521Uint1(x312)) + x280))), x379, uint64(p521Uint1(x396)))
- var x399 uint64
- var x400 uint64
- x400, x399 = bits.Mul64(arg1[7], 0x400000000000)
- var x401 uint64
- var x402 uint64
- x401, x402 = bits.Add64(x385, x399, uint64(0x0))
- var x403 uint64
- var x404 uint64
- x403, x404 = bits.Add64(x387, x400, uint64(p521Uint1(x402)))
- var x405 uint64
- var x406 uint64
- x405, x406 = bits.Add64(x389, uint64(0x0), uint64(p521Uint1(x404)))
- var x407 uint64
- var x408 uint64
- x407, x408 = bits.Add64(x391, uint64(0x0), uint64(p521Uint1(x406)))
- var x409 uint64
- var x410 uint64
- x409, x410 = bits.Add64(x393, uint64(0x0), uint64(p521Uint1(x408)))
- var x411 uint64
- var x412 uint64
- x411, x412 = bits.Add64(x395, uint64(0x0), uint64(p521Uint1(x410)))
- var x413 uint64
- var x414 uint64
- x413, x414 = bits.Add64(x397, uint64(0x0), uint64(p521Uint1(x412)))
- var x415 uint64
- var x416 uint64
- x416, x415 = bits.Mul64(x383, 0x1ff)
- var x417 uint64
- var x418 uint64
- x418, x417 = bits.Mul64(x383, 0xffffffffffffffff)
- var x419 uint64
- var x420 uint64
- x420, x419 = bits.Mul64(x383, 0xffffffffffffffff)
- var x421 uint64
- var x422 uint64
- x422, x421 = bits.Mul64(x383, 0xffffffffffffffff)
- var x423 uint64
- var x424 uint64
- x424, x423 = bits.Mul64(x383, 0xffffffffffffffff)
- var x425 uint64
- var x426 uint64
- x426, x425 = bits.Mul64(x383, 0xffffffffffffffff)
- var x427 uint64
- var x428 uint64
- x428, x427 = bits.Mul64(x383, 0xffffffffffffffff)
- var x429 uint64
- var x430 uint64
- x430, x429 = bits.Mul64(x383, 0xffffffffffffffff)
- var x431 uint64
- var x432 uint64
- x432, x431 = bits.Mul64(x383, 0xffffffffffffffff)
- var x433 uint64
- var x434 uint64
- x433, x434 = bits.Add64(x432, x429, uint64(0x0))
- var x435 uint64
- var x436 uint64
- x435, x436 = bits.Add64(x430, x427, uint64(p521Uint1(x434)))
- var x437 uint64
- var x438 uint64
- x437, x438 = bits.Add64(x428, x425, uint64(p521Uint1(x436)))
- var x439 uint64
- var x440 uint64
- x439, x440 = bits.Add64(x426, x423, uint64(p521Uint1(x438)))
- var x441 uint64
- var x442 uint64
- x441, x442 = bits.Add64(x424, x421, uint64(p521Uint1(x440)))
- var x443 uint64
- var x444 uint64
- x443, x444 = bits.Add64(x422, x419, uint64(p521Uint1(x442)))
- var x445 uint64
- var x446 uint64
- x445, x446 = bits.Add64(x420, x417, uint64(p521Uint1(x444)))
- var x447 uint64
- var x448 uint64
- x447, x448 = bits.Add64(x418, x415, uint64(p521Uint1(x446)))
- var x450 uint64
- _, x450 = bits.Add64(x383, x431, uint64(0x0))
- var x451 uint64
- var x452 uint64
- x451, x452 = bits.Add64(x401, x433, uint64(p521Uint1(x450)))
- var x453 uint64
- var x454 uint64
- x453, x454 = bits.Add64(x403, x435, uint64(p521Uint1(x452)))
- var x455 uint64
- var x456 uint64
- x455, x456 = bits.Add64(x405, x437, uint64(p521Uint1(x454)))
- var x457 uint64
- var x458 uint64
- x457, x458 = bits.Add64(x407, x439, uint64(p521Uint1(x456)))
- var x459 uint64
- var x460 uint64
- x459, x460 = bits.Add64(x409, x441, uint64(p521Uint1(x458)))
- var x461 uint64
- var x462 uint64
- x461, x462 = bits.Add64(x411, x443, uint64(p521Uint1(x460)))
- var x463 uint64
- var x464 uint64
- x463, x464 = bits.Add64(x413, x445, uint64(p521Uint1(x462)))
- var x465 uint64
- var x466 uint64
- x465, x466 = bits.Add64((uint64(p521Uint1(x414)) + (uint64(p521Uint1(x398)) + (uint64(p521Uint1(x380)) + x348))), x447, uint64(p521Uint1(x464)))
- var x467 uint64
- var x468 uint64
- x468, x467 = bits.Mul64(arg1[8], 0x400000000000)
- var x469 uint64
- var x470 uint64
- x469, x470 = bits.Add64(x453, x467, uint64(0x0))
- var x471 uint64
- var x472 uint64
- x471, x472 = bits.Add64(x455, x468, uint64(p521Uint1(x470)))
- var x473 uint64
- var x474 uint64
- x473, x474 = bits.Add64(x457, uint64(0x0), uint64(p521Uint1(x472)))
- var x475 uint64
- var x476 uint64
- x475, x476 = bits.Add64(x459, uint64(0x0), uint64(p521Uint1(x474)))
- var x477 uint64
- var x478 uint64
- x477, x478 = bits.Add64(x461, uint64(0x0), uint64(p521Uint1(x476)))
- var x479 uint64
- var x480 uint64
- x479, x480 = bits.Add64(x463, uint64(0x0), uint64(p521Uint1(x478)))
- var x481 uint64
- var x482 uint64
- x481, x482 = bits.Add64(x465, uint64(0x0), uint64(p521Uint1(x480)))
- var x483 uint64
- var x484 uint64
- x484, x483 = bits.Mul64(x451, 0x1ff)
- var x485 uint64
- var x486 uint64
- x486, x485 = bits.Mul64(x451, 0xffffffffffffffff)
- var x487 uint64
- var x488 uint64
- x488, x487 = bits.Mul64(x451, 0xffffffffffffffff)
- var x489 uint64
- var x490 uint64
- x490, x489 = bits.Mul64(x451, 0xffffffffffffffff)
- var x491 uint64
- var x492 uint64
- x492, x491 = bits.Mul64(x451, 0xffffffffffffffff)
- var x493 uint64
- var x494 uint64
- x494, x493 = bits.Mul64(x451, 0xffffffffffffffff)
- var x495 uint64
- var x496 uint64
- x496, x495 = bits.Mul64(x451, 0xffffffffffffffff)
- var x497 uint64
- var x498 uint64
- x498, x497 = bits.Mul64(x451, 0xffffffffffffffff)
- var x499 uint64
- var x500 uint64
- x500, x499 = bits.Mul64(x451, 0xffffffffffffffff)
- var x501 uint64
- var x502 uint64
- x501, x502 = bits.Add64(x500, x497, uint64(0x0))
- var x503 uint64
- var x504 uint64
- x503, x504 = bits.Add64(x498, x495, uint64(p521Uint1(x502)))
- var x505 uint64
- var x506 uint64
- x505, x506 = bits.Add64(x496, x493, uint64(p521Uint1(x504)))
- var x507 uint64
- var x508 uint64
- x507, x508 = bits.Add64(x494, x491, uint64(p521Uint1(x506)))
- var x509 uint64
- var x510 uint64
- x509, x510 = bits.Add64(x492, x489, uint64(p521Uint1(x508)))
- var x511 uint64
- var x512 uint64
- x511, x512 = bits.Add64(x490, x487, uint64(p521Uint1(x510)))
- var x513 uint64
- var x514 uint64
- x513, x514 = bits.Add64(x488, x485, uint64(p521Uint1(x512)))
- var x515 uint64
- var x516 uint64
- x515, x516 = bits.Add64(x486, x483, uint64(p521Uint1(x514)))
- var x518 uint64
- _, x518 = bits.Add64(x451, x499, uint64(0x0))
- var x519 uint64
- var x520 uint64
- x519, x520 = bits.Add64(x469, x501, uint64(p521Uint1(x518)))
- var x521 uint64
- var x522 uint64
- x521, x522 = bits.Add64(x471, x503, uint64(p521Uint1(x520)))
- var x523 uint64
- var x524 uint64
- x523, x524 = bits.Add64(x473, x505, uint64(p521Uint1(x522)))
- var x525 uint64
- var x526 uint64
- x525, x526 = bits.Add64(x475, x507, uint64(p521Uint1(x524)))
- var x527 uint64
- var x528 uint64
- x527, x528 = bits.Add64(x477, x509, uint64(p521Uint1(x526)))
- var x529 uint64
- var x530 uint64
- x529, x530 = bits.Add64(x479, x511, uint64(p521Uint1(x528)))
- var x531 uint64
- var x532 uint64
- x531, x532 = bits.Add64(x481, x513, uint64(p521Uint1(x530)))
- var x533 uint64
- var x534 uint64
- x533, x534 = bits.Add64((uint64(p521Uint1(x482)) + (uint64(p521Uint1(x466)) + (uint64(p521Uint1(x448)) + x416))), x515, uint64(p521Uint1(x532)))
- x535 := (uint64(p521Uint1(x534)) + (uint64(p521Uint1(x516)) + x484))
- var x536 uint64
- var x537 uint64
- x536, x537 = bits.Sub64(x519, 0xffffffffffffffff, uint64(0x0))
- var x538 uint64
- var x539 uint64
- x538, x539 = bits.Sub64(x521, 0xffffffffffffffff, uint64(p521Uint1(x537)))
- var x540 uint64
- var x541 uint64
- x540, x541 = bits.Sub64(x523, 0xffffffffffffffff, uint64(p521Uint1(x539)))
- var x542 uint64
- var x543 uint64
- x542, x543 = bits.Sub64(x525, 0xffffffffffffffff, uint64(p521Uint1(x541)))
- var x544 uint64
- var x545 uint64
- x544, x545 = bits.Sub64(x527, 0xffffffffffffffff, uint64(p521Uint1(x543)))
- var x546 uint64
- var x547 uint64
- x546, x547 = bits.Sub64(x529, 0xffffffffffffffff, uint64(p521Uint1(x545)))
- var x548 uint64
- var x549 uint64
- x548, x549 = bits.Sub64(x531, 0xffffffffffffffff, uint64(p521Uint1(x547)))
- var x550 uint64
- var x551 uint64
- x550, x551 = bits.Sub64(x533, 0xffffffffffffffff, uint64(p521Uint1(x549)))
- var x552 uint64
- var x553 uint64
- x552, x553 = bits.Sub64(x535, 0x1ff, uint64(p521Uint1(x551)))
- var x555 uint64
- _, x555 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x553)))
- var x556 uint64
- p521CmovznzU64(&x556, p521Uint1(x555), x536, x519)
- var x557 uint64
- p521CmovznzU64(&x557, p521Uint1(x555), x538, x521)
- var x558 uint64
- p521CmovznzU64(&x558, p521Uint1(x555), x540, x523)
- var x559 uint64
- p521CmovznzU64(&x559, p521Uint1(x555), x542, x525)
- var x560 uint64
- p521CmovznzU64(&x560, p521Uint1(x555), x544, x527)
- var x561 uint64
- p521CmovznzU64(&x561, p521Uint1(x555), x546, x529)
- var x562 uint64
- p521CmovznzU64(&x562, p521Uint1(x555), x548, x531)
- var x563 uint64
- p521CmovznzU64(&x563, p521Uint1(x555), x550, x533)
- var x564 uint64
- p521CmovznzU64(&x564, p521Uint1(x555), x552, x535)
- out1[0] = x556
- out1[1] = x557
- out1[2] = x558
- out1[3] = x559
- out1[4] = x560
- out1[5] = x561
- out1[6] = x562
- out1[7] = x563
- out1[8] = x564
-}
-
-// p521Selectznz is a multi-limb conditional select.
-//
-// Postconditions:
-// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
-//
-// Input Bounds:
-// arg1: [0x0 ~> 0x1]
-// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-func p521Selectznz(out1 *[9]uint64, arg1 p521Uint1, arg2 *[9]uint64, arg3 *[9]uint64) {
- var x1 uint64
- p521CmovznzU64(&x1, arg1, arg2[0], arg3[0])
- var x2 uint64
- p521CmovznzU64(&x2, arg1, arg2[1], arg3[1])
- var x3 uint64
- p521CmovznzU64(&x3, arg1, arg2[2], arg3[2])
- var x4 uint64
- p521CmovznzU64(&x4, arg1, arg2[3], arg3[3])
- var x5 uint64
- p521CmovznzU64(&x5, arg1, arg2[4], arg3[4])
- var x6 uint64
- p521CmovznzU64(&x6, arg1, arg2[5], arg3[5])
- var x7 uint64
- p521CmovznzU64(&x7, arg1, arg2[6], arg3[6])
- var x8 uint64
- p521CmovznzU64(&x8, arg1, arg2[7], arg3[7])
- var x9 uint64
- p521CmovznzU64(&x9, arg1, arg2[8], arg3[8])
- out1[0] = x1
- out1[1] = x2
- out1[2] = x3
- out1[3] = x4
- out1[4] = x5
- out1[5] = x6
- out1[6] = x7
- out1[7] = x8
- out1[8] = x9
-}
-
-// p521ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
-//
-// Preconditions:
-// 0 ≤ eval arg1 < m
-// Postconditions:
-// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..65]
-//
-// Input Bounds:
-// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
-func p521ToBytes(out1 *[66]uint8, arg1 *[9]uint64) {
- x1 := arg1[8]
- x2 := arg1[7]
- x3 := arg1[6]
- x4 := arg1[5]
- x5 := arg1[4]
- x6 := arg1[3]
- x7 := arg1[2]
- x8 := arg1[1]
- x9 := arg1[0]
- x10 := (uint8(x9) & 0xff)
- x11 := (x9 >> 8)
- x12 := (uint8(x11) & 0xff)
- x13 := (x11 >> 8)
- x14 := (uint8(x13) & 0xff)
- x15 := (x13 >> 8)
- x16 := (uint8(x15) & 0xff)
- x17 := (x15 >> 8)
- x18 := (uint8(x17) & 0xff)
- x19 := (x17 >> 8)
- x20 := (uint8(x19) & 0xff)
- x21 := (x19 >> 8)
- x22 := (uint8(x21) & 0xff)
- x23 := uint8((x21 >> 8))
- x24 := (uint8(x8) & 0xff)
- x25 := (x8 >> 8)
- x26 := (uint8(x25) & 0xff)
- x27 := (x25 >> 8)
- x28 := (uint8(x27) & 0xff)
- x29 := (x27 >> 8)
- x30 := (uint8(x29) & 0xff)
- x31 := (x29 >> 8)
- x32 := (uint8(x31) & 0xff)
- x33 := (x31 >> 8)
- x34 := (uint8(x33) & 0xff)
- x35 := (x33 >> 8)
- x36 := (uint8(x35) & 0xff)
- x37 := uint8((x35 >> 8))
- x38 := (uint8(x7) & 0xff)
- x39 := (x7 >> 8)
- x40 := (uint8(x39) & 0xff)
- x41 := (x39 >> 8)
- x42 := (uint8(x41) & 0xff)
- x43 := (x41 >> 8)
- x44 := (uint8(x43) & 0xff)
- x45 := (x43 >> 8)
- x46 := (uint8(x45) & 0xff)
- x47 := (x45 >> 8)
- x48 := (uint8(x47) & 0xff)
- x49 := (x47 >> 8)
- x50 := (uint8(x49) & 0xff)
- x51 := uint8((x49 >> 8))
- x52 := (uint8(x6) & 0xff)
- x53 := (x6 >> 8)
- x54 := (uint8(x53) & 0xff)
- x55 := (x53 >> 8)
- x56 := (uint8(x55) & 0xff)
- x57 := (x55 >> 8)
- x58 := (uint8(x57) & 0xff)
- x59 := (x57 >> 8)
- x60 := (uint8(x59) & 0xff)
- x61 := (x59 >> 8)
- x62 := (uint8(x61) & 0xff)
- x63 := (x61 >> 8)
- x64 := (uint8(x63) & 0xff)
- x65 := uint8((x63 >> 8))
- x66 := (uint8(x5) & 0xff)
- x67 := (x5 >> 8)
- x68 := (uint8(x67) & 0xff)
- x69 := (x67 >> 8)
- x70 := (uint8(x69) & 0xff)
- x71 := (x69 >> 8)
- x72 := (uint8(x71) & 0xff)
- x73 := (x71 >> 8)
- x74 := (uint8(x73) & 0xff)
- x75 := (x73 >> 8)
- x76 := (uint8(x75) & 0xff)
- x77 := (x75 >> 8)
- x78 := (uint8(x77) & 0xff)
- x79 := uint8((x77 >> 8))
- x80 := (uint8(x4) & 0xff)
- x81 := (x4 >> 8)
- x82 := (uint8(x81) & 0xff)
- x83 := (x81 >> 8)
- x84 := (uint8(x83) & 0xff)
- x85 := (x83 >> 8)
- x86 := (uint8(x85) & 0xff)
- x87 := (x85 >> 8)
- x88 := (uint8(x87) & 0xff)
- x89 := (x87 >> 8)
- x90 := (uint8(x89) & 0xff)
- x91 := (x89 >> 8)
- x92 := (uint8(x91) & 0xff)
- x93 := uint8((x91 >> 8))
- x94 := (uint8(x3) & 0xff)
- x95 := (x3 >> 8)
- x96 := (uint8(x95) & 0xff)
- x97 := (x95 >> 8)
- x98 := (uint8(x97) & 0xff)
- x99 := (x97 >> 8)
- x100 := (uint8(x99) & 0xff)
- x101 := (x99 >> 8)
- x102 := (uint8(x101) & 0xff)
- x103 := (x101 >> 8)
- x104 := (uint8(x103) & 0xff)
- x105 := (x103 >> 8)
- x106 := (uint8(x105) & 0xff)
- x107 := uint8((x105 >> 8))
- x108 := (uint8(x2) & 0xff)
- x109 := (x2 >> 8)
- x110 := (uint8(x109) & 0xff)
- x111 := (x109 >> 8)
- x112 := (uint8(x111) & 0xff)
- x113 := (x111 >> 8)
- x114 := (uint8(x113) & 0xff)
- x115 := (x113 >> 8)
- x116 := (uint8(x115) & 0xff)
- x117 := (x115 >> 8)
- x118 := (uint8(x117) & 0xff)
- x119 := (x117 >> 8)
- x120 := (uint8(x119) & 0xff)
- x121 := uint8((x119 >> 8))
- x122 := (uint8(x1) & 0xff)
- x123 := p521Uint1((x1 >> 8))
- out1[0] = x10
- out1[1] = x12
- out1[2] = x14
- out1[3] = x16
- out1[4] = x18
- out1[5] = x20
- out1[6] = x22
- out1[7] = x23
- out1[8] = x24
- out1[9] = x26
- out1[10] = x28
- out1[11] = x30
- out1[12] = x32
- out1[13] = x34
- out1[14] = x36
- out1[15] = x37
- out1[16] = x38
- out1[17] = x40
- out1[18] = x42
- out1[19] = x44
- out1[20] = x46
- out1[21] = x48
- out1[22] = x50
- out1[23] = x51
- out1[24] = x52
- out1[25] = x54
- out1[26] = x56
- out1[27] = x58
- out1[28] = x60
- out1[29] = x62
- out1[30] = x64
- out1[31] = x65
- out1[32] = x66
- out1[33] = x68
- out1[34] = x70
- out1[35] = x72
- out1[36] = x74
- out1[37] = x76
- out1[38] = x78
- out1[39] = x79
- out1[40] = x80
- out1[41] = x82
- out1[42] = x84
- out1[43] = x86
- out1[44] = x88
- out1[45] = x90
- out1[46] = x92
- out1[47] = x93
- out1[48] = x94
- out1[49] = x96
- out1[50] = x98
- out1[51] = x100
- out1[52] = x102
- out1[53] = x104
- out1[54] = x106
- out1[55] = x107
- out1[56] = x108
- out1[57] = x110
- out1[58] = x112
- out1[59] = x114
- out1[60] = x116
- out1[61] = x118
- out1[62] = x120
- out1[63] = x121
- out1[64] = x122
- out1[65] = uint8(x123)
-}
-
-// p521FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
-//
-// Preconditions:
-// 0 ≤ bytes_eval arg1 < m
-// Postconditions:
-// eval out1 mod m = bytes_eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-// Input Bounds:
-// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
-// Output Bounds:
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
-func p521FromBytes(out1 *[9]uint64, arg1 *[66]uint8) {
- x1 := (uint64(p521Uint1(arg1[65])) << 8)
- x2 := arg1[64]
- x3 := (uint64(arg1[63]) << 56)
- x4 := (uint64(arg1[62]) << 48)
- x5 := (uint64(arg1[61]) << 40)
- x6 := (uint64(arg1[60]) << 32)
- x7 := (uint64(arg1[59]) << 24)
- x8 := (uint64(arg1[58]) << 16)
- x9 := (uint64(arg1[57]) << 8)
- x10 := arg1[56]
- x11 := (uint64(arg1[55]) << 56)
- x12 := (uint64(arg1[54]) << 48)
- x13 := (uint64(arg1[53]) << 40)
- x14 := (uint64(arg1[52]) << 32)
- x15 := (uint64(arg1[51]) << 24)
- x16 := (uint64(arg1[50]) << 16)
- x17 := (uint64(arg1[49]) << 8)
- x18 := arg1[48]
- x19 := (uint64(arg1[47]) << 56)
- x20 := (uint64(arg1[46]) << 48)
- x21 := (uint64(arg1[45]) << 40)
- x22 := (uint64(arg1[44]) << 32)
- x23 := (uint64(arg1[43]) << 24)
- x24 := (uint64(arg1[42]) << 16)
- x25 := (uint64(arg1[41]) << 8)
- x26 := arg1[40]
- x27 := (uint64(arg1[39]) << 56)
- x28 := (uint64(arg1[38]) << 48)
- x29 := (uint64(arg1[37]) << 40)
- x30 := (uint64(arg1[36]) << 32)
- x31 := (uint64(arg1[35]) << 24)
- x32 := (uint64(arg1[34]) << 16)
- x33 := (uint64(arg1[33]) << 8)
- x34 := arg1[32]
- x35 := (uint64(arg1[31]) << 56)
- x36 := (uint64(arg1[30]) << 48)
- x37 := (uint64(arg1[29]) << 40)
- x38 := (uint64(arg1[28]) << 32)
- x39 := (uint64(arg1[27]) << 24)
- x40 := (uint64(arg1[26]) << 16)
- x41 := (uint64(arg1[25]) << 8)
- x42 := arg1[24]
- x43 := (uint64(arg1[23]) << 56)
- x44 := (uint64(arg1[22]) << 48)
- x45 := (uint64(arg1[21]) << 40)
- x46 := (uint64(arg1[20]) << 32)
- x47 := (uint64(arg1[19]) << 24)
- x48 := (uint64(arg1[18]) << 16)
- x49 := (uint64(arg1[17]) << 8)
- x50 := arg1[16]
- x51 := (uint64(arg1[15]) << 56)
- x52 := (uint64(arg1[14]) << 48)
- x53 := (uint64(arg1[13]) << 40)
- x54 := (uint64(arg1[12]) << 32)
- x55 := (uint64(arg1[11]) << 24)
- x56 := (uint64(arg1[10]) << 16)
- x57 := (uint64(arg1[9]) << 8)
- x58 := arg1[8]
- x59 := (uint64(arg1[7]) << 56)
- x60 := (uint64(arg1[6]) << 48)
- x61 := (uint64(arg1[5]) << 40)
- x62 := (uint64(arg1[4]) << 32)
- x63 := (uint64(arg1[3]) << 24)
- x64 := (uint64(arg1[2]) << 16)
- x65 := (uint64(arg1[1]) << 8)
- x66 := arg1[0]
- x67 := (x65 + uint64(x66))
- x68 := (x64 + x67)
- x69 := (x63 + x68)
- x70 := (x62 + x69)
- x71 := (x61 + x70)
- x72 := (x60 + x71)
- x73 := (x59 + x72)
- x74 := (x57 + uint64(x58))
- x75 := (x56 + x74)
- x76 := (x55 + x75)
- x77 := (x54 + x76)
- x78 := (x53 + x77)
- x79 := (x52 + x78)
- x80 := (x51 + x79)
- x81 := (x49 + uint64(x50))
- x82 := (x48 + x81)
- x83 := (x47 + x82)
- x84 := (x46 + x83)
- x85 := (x45 + x84)
- x86 := (x44 + x85)
- x87 := (x43 + x86)
- x88 := (x41 + uint64(x42))
- x89 := (x40 + x88)
- x90 := (x39 + x89)
- x91 := (x38 + x90)
- x92 := (x37 + x91)
- x93 := (x36 + x92)
- x94 := (x35 + x93)
- x95 := (x33 + uint64(x34))
- x96 := (x32 + x95)
- x97 := (x31 + x96)
- x98 := (x30 + x97)
- x99 := (x29 + x98)
- x100 := (x28 + x99)
- x101 := (x27 + x100)
- x102 := (x25 + uint64(x26))
- x103 := (x24 + x102)
- x104 := (x23 + x103)
- x105 := (x22 + x104)
- x106 := (x21 + x105)
- x107 := (x20 + x106)
- x108 := (x19 + x107)
- x109 := (x17 + uint64(x18))
- x110 := (x16 + x109)
- x111 := (x15 + x110)
- x112 := (x14 + x111)
- x113 := (x13 + x112)
- x114 := (x12 + x113)
- x115 := (x11 + x114)
- x116 := (x9 + uint64(x10))
- x117 := (x8 + x116)
- x118 := (x7 + x117)
- x119 := (x6 + x118)
- x120 := (x5 + x119)
- x121 := (x4 + x120)
- x122 := (x3 + x121)
- x123 := (x1 + uint64(x2))
- out1[0] = x73
- out1[1] = x80
- out1[2] = x87
- out1[3] = x94
- out1[4] = x101
- out1[5] = x108
- out1[6] = x115
- out1[7] = x122
- out1[8] = x123
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p224.go b/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p224.go
deleted file mode 100644
index 74dbc184dd..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p224.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package nistec
-
-import (
- "crypto/elliptic/internal/fiat"
- "crypto/subtle"
- "errors"
-)
-
-var p224B, _ = new(fiat.P224Element).SetBytes([]byte{0xb4, 0x05, 0x0a, 0x85,
- 0x0c, 0x04, 0xb3, 0xab, 0xf5, 0x41, 0x32, 0x56, 0x50, 0x44, 0xb0, 0xb7,
- 0xd7, 0xbf, 0xd8, 0xba, 0x27, 0x0b, 0x39, 0x43, 0x23, 0x55, 0xff, 0xb4})
-
-var p224G, _ = NewP224Point().SetBytes([]byte{0x04,
- 0xb7, 0x0e, 0x0c, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f, 0x32, 0x13, 0x90, 0xb9,
- 0x4a, 0x03, 0xc1, 0xd3, 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6,
- 0x11, 0x5c, 0x1d, 0x21, 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb,
- 0x4c, 0x22, 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
- 0x44, 0xd5, 0x81, 0x99, 0x85, 0x0, 0x7e, 0x34})
-
-const p224ElementLength = 28
-
-// P224Point is a P-224 point. The zero value is NOT valid.
-type P224Point struct {
- // The point is represented in projective coordinates (X:Y:Z),
- // where x = X/Z and y = Y/Z.
- x, y, z *fiat.P224Element
-}
-
-// NewP224Point returns a new P224Point representing the point at infinity point.
-func NewP224Point() *P224Point {
- return &P224Point{
- x: new(fiat.P224Element),
- y: new(fiat.P224Element).One(),
- z: new(fiat.P224Element),
- }
-}
-
-// NewP224Generator returns a new P224Point set to the canonical generator.
-func NewP224Generator() *P224Point {
- return (&P224Point{
- x: new(fiat.P224Element),
- y: new(fiat.P224Element),
- z: new(fiat.P224Element),
- }).Set(p224G)
-}
-
-// Set sets p = q and returns p.
-func (p *P224Point) Set(q *P224Point) *P224Point {
- p.x.Set(q.x)
- p.y.Set(q.y)
- p.z.Set(q.z)
- return p
-}
-
-// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
-// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
-// the curve, it returns nil and an error, and the receiver is unchanged.
-// Otherwise, it returns p.
-func (p *P224Point) SetBytes(b []byte) (*P224Point, error) {
- switch {
- // Point at infinity.
- case len(b) == 1 && b[0] == 0:
- return p.Set(NewP224Point()), nil
-
- // Uncompressed form.
- case len(b) == 1+2*p224ElementLength && b[0] == 4:
- x, err := new(fiat.P224Element).SetBytes(b[1 : 1+p224ElementLength])
- if err != nil {
- return nil, err
- }
- y, err := new(fiat.P224Element).SetBytes(b[1+p224ElementLength:])
- if err != nil {
- return nil, err
- }
- if err := p224CheckOnCurve(x, y); err != nil {
- return nil, err
- }
- p.x.Set(x)
- p.y.Set(y)
- p.z.One()
- return p, nil
-
- // Compressed form
- case len(b) == 1+p224ElementLength && b[0] == 0:
- return nil, errors.New("unimplemented") // TODO(filippo)
-
- default:
- return nil, errors.New("invalid P224 point encoding")
- }
-}
-
-func p224CheckOnCurve(x, y *fiat.P224Element) error {
- // x³ - 3x + b.
- x3 := new(fiat.P224Element).Square(x)
- x3.Mul(x3, x)
-
- threeX := new(fiat.P224Element).Add(x, x)
- threeX.Add(threeX, x)
-
- x3.Sub(x3, threeX)
- x3.Add(x3, p224B)
-
- // y² = x³ - 3x + b
- y2 := new(fiat.P224Element).Square(y)
-
- if x3.Equal(y2) != 1 {
- return errors.New("P224 point not on curve")
- }
- return nil
-}
-
-// Bytes returns the uncompressed or infinity encoding of p, as specified in
-// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
-// infinity is shorter than all other encodings.
-func (p *P224Point) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [133]byte
- return p.bytes(&out)
-}
-
-func (p *P224Point) bytes(out *[133]byte) []byte {
- if p.z.IsZero() == 1 {
- return append(out[:0], 0)
- }
-
- zinv := new(fiat.P224Element).Invert(p.z)
- xx := new(fiat.P224Element).Mul(p.x, zinv)
- yy := new(fiat.P224Element).Mul(p.y, zinv)
-
- buf := append(out[:0], 4)
- buf = append(buf, xx.Bytes()...)
- buf = append(buf, yy.Bytes()...)
- return buf
-}
-
-// Add sets q = p1 + p2, and returns q. The points may overlap.
-func (q *P224Point) Add(p1, p2 *P224Point) *P224Point {
- // Complete addition formula for a = -3 from "Complete addition formulas for
- // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
-
- t0 := new(fiat.P224Element).Mul(p1.x, p2.x) // t0 := X1 * X2
- t1 := new(fiat.P224Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
- t2 := new(fiat.P224Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
- t3 := new(fiat.P224Element).Add(p1.x, p1.y) // t3 := X1 + Y1
- t4 := new(fiat.P224Element).Add(p2.x, p2.y) // t4 := X2 + Y2
- t3.Mul(t3, t4) // t3 := t3 * t4
- t4.Add(t0, t1) // t4 := t0 + t1
- t3.Sub(t3, t4) // t3 := t3 - t4
- t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
- x3 := new(fiat.P224Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
- t4.Mul(t4, x3) // t4 := t4 * X3
- x3.Add(t1, t2) // X3 := t1 + t2
- t4.Sub(t4, x3) // t4 := t4 - X3
- x3.Add(p1.x, p1.z) // X3 := X1 + Z1
- y3 := new(fiat.P224Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
- x3.Mul(x3, y3) // X3 := X3 * Y3
- y3.Add(t0, t2) // Y3 := t0 + t2
- y3.Sub(x3, y3) // Y3 := X3 - Y3
- z3 := new(fiat.P224Element).Mul(p224B, t2) // Z3 := b * t2
- x3.Sub(y3, z3) // X3 := Y3 - Z3
- z3.Add(x3, x3) // Z3 := X3 + X3
- x3.Add(x3, z3) // X3 := X3 + Z3
- z3.Sub(t1, x3) // Z3 := t1 - X3
- x3.Add(t1, x3) // X3 := t1 + X3
- y3.Mul(p224B, y3) // Y3 := b * Y3
- t1.Add(t2, t2) // t1 := t2 + t2
- t2.Add(t1, t2) // t2 := t1 + t2
- y3.Sub(y3, t2) // Y3 := Y3 - t2
- y3.Sub(y3, t0) // Y3 := Y3 - t0
- t1.Add(y3, y3) // t1 := Y3 + Y3
- y3.Add(t1, y3) // Y3 := t1 + Y3
- t1.Add(t0, t0) // t1 := t0 + t0
- t0.Add(t1, t0) // t0 := t1 + t0
- t0.Sub(t0, t2) // t0 := t0 - t2
- t1.Mul(t4, y3) // t1 := t4 * Y3
- t2.Mul(t0, y3) // t2 := t0 * Y3
- y3.Mul(x3, z3) // Y3 := X3 * Z3
- y3.Add(y3, t2) // Y3 := Y3 + t2
- x3.Mul(t3, x3) // X3 := t3 * X3
- x3.Sub(x3, t1) // X3 := X3 - t1
- z3.Mul(t4, z3) // Z3 := t4 * Z3
- t1.Mul(t3, t0) // t1 := t3 * t0
- z3.Add(z3, t1) // Z3 := Z3 + t1
-
- q.x.Set(x3)
- q.y.Set(y3)
- q.z.Set(z3)
- return q
-}
-
-// Double sets q = p + p, and returns q. The points may overlap.
-func (q *P224Point) Double(p *P224Point) *P224Point {
- // Complete addition formula for a = -3 from "Complete addition formulas for
- // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
-
- t0 := new(fiat.P224Element).Square(p.x) // t0 := X ^ 2
- t1 := new(fiat.P224Element).Square(p.y) // t1 := Y ^ 2
- t2 := new(fiat.P224Element).Square(p.z) // t2 := Z ^ 2
- t3 := new(fiat.P224Element).Mul(p.x, p.y) // t3 := X * Y
- t3.Add(t3, t3) // t3 := t3 + t3
- z3 := new(fiat.P224Element).Mul(p.x, p.z) // Z3 := X * Z
- z3.Add(z3, z3) // Z3 := Z3 + Z3
- y3 := new(fiat.P224Element).Mul(p224B, t2) // Y3 := b * t2
- y3.Sub(y3, z3) // Y3 := Y3 - Z3
- x3 := new(fiat.P224Element).Add(y3, y3) // X3 := Y3 + Y3
- y3.Add(x3, y3) // Y3 := X3 + Y3
- x3.Sub(t1, y3) // X3 := t1 - Y3
- y3.Add(t1, y3) // Y3 := t1 + Y3
- y3.Mul(x3, y3) // Y3 := X3 * Y3
- x3.Mul(x3, t3) // X3 := X3 * t3
- t3.Add(t2, t2) // t3 := t2 + t2
- t2.Add(t2, t3) // t2 := t2 + t3
- z3.Mul(p224B, z3) // Z3 := b * Z3
- z3.Sub(z3, t2) // Z3 := Z3 - t2
- z3.Sub(z3, t0) // Z3 := Z3 - t0
- t3.Add(z3, z3) // t3 := Z3 + Z3
- z3.Add(z3, t3) // Z3 := Z3 + t3
- t3.Add(t0, t0) // t3 := t0 + t0
- t0.Add(t3, t0) // t0 := t3 + t0
- t0.Sub(t0, t2) // t0 := t0 - t2
- t0.Mul(t0, z3) // t0 := t0 * Z3
- y3.Add(y3, t0) // Y3 := Y3 + t0
- t0.Mul(p.y, p.z) // t0 := Y * Z
- t0.Add(t0, t0) // t0 := t0 + t0
- z3.Mul(t0, z3) // Z3 := t0 * Z3
- x3.Sub(x3, z3) // X3 := X3 - Z3
- z3.Mul(t0, t1) // Z3 := t0 * t1
- z3.Add(z3, z3) // Z3 := Z3 + Z3
- z3.Add(z3, z3) // Z3 := Z3 + Z3
-
- q.x.Set(x3)
- q.y.Set(y3)
- q.z.Set(z3)
- return q
-}
-
-// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
-func (q *P224Point) Select(p1, p2 *P224Point, cond int) *P224Point {
- q.x.Select(p1.x, p2.x, cond)
- q.y.Select(p1.y, p2.y, cond)
- q.z.Select(p1.z, p2.z, cond)
- return q
-}
-
-// ScalarMult sets p = scalar * q, and returns p.
-func (p *P224Point) ScalarMult(q *P224Point, scalar []byte) *P224Point {
- // table holds the first 16 multiples of q. The explicit newP224Point calls
- // get inlined, letting the allocations live on the stack.
- var table = [16]*P224Point{
- NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
- NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
- NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
- NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
- }
- for i := 1; i < 16; i++ {
- table[i].Add(table[i-1], q)
- }
-
- // Instead of doing the classic double-and-add chain, we do it with a
- // four-bit window: we double four times, and then add [0-15]P.
- t := NewP224Point()
- p.Set(NewP224Point())
- for _, byte := range scalar {
- p.Double(p)
- p.Double(p)
- p.Double(p)
- p.Double(p)
-
- for i := uint8(0); i < 16; i++ {
- cond := subtle.ConstantTimeByteEq(byte>>4, i)
- t.Select(table[i], t, cond)
- }
- p.Add(p, t)
-
- p.Double(p)
- p.Double(p)
- p.Double(p)
- p.Double(p)
-
- for i := uint8(0); i < 16; i++ {
- cond := subtle.ConstantTimeByteEq(byte&0b1111, i)
- t.Select(table[i], t, cond)
- }
- p.Add(p, t)
- }
-
- return p
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p384.go b/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p384.go
deleted file mode 100644
index 24a166de0a..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p384.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package nistec
-
-import (
- "crypto/elliptic/internal/fiat"
- "crypto/subtle"
- "errors"
-)
-
-var p384B, _ = new(fiat.P384Element).SetBytes([]byte{
- 0xb3, 0x31, 0x2f, 0xa7, 0xe2, 0x3e, 0xe7, 0xe4, 0x98, 0x8e, 0x05, 0x6b,
- 0xe3, 0xf8, 0x2d, 0x19, 0x18, 0x1d, 0x9c, 0x6e, 0xfe, 0x81, 0x41, 0x12,
- 0x03, 0x14, 0x08, 0x8f, 0x50, 0x13, 0x87, 0x5a, 0xc6, 0x56, 0x39, 0x8d,
- 0x8a, 0x2e, 0xd1, 0x9d, 0x2a, 0x85, 0xc8, 0xed, 0xd3, 0xec, 0x2a, 0xef})
-
-var p384G, _ = NewP384Point().SetBytes([]byte{0x4,
- 0xaa, 0x87, 0xca, 0x22, 0xbe, 0x8b, 0x05, 0x37, 0x8e, 0xb1, 0xc7, 0x1e,
- 0xf3, 0x20, 0xad, 0x74, 0x6e, 0x1d, 0x3b, 0x62, 0x8b, 0xa7, 0x9b, 0x98,
- 0x59, 0xf7, 0x41, 0xe0, 0x82, 0x54, 0x2a, 0x38, 0x55, 0x02, 0xf2, 0x5d,
- 0xbf, 0x55, 0x29, 0x6c, 0x3a, 0x54, 0x5e, 0x38, 0x72, 0x76, 0x0a, 0xb7,
- 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf,
- 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c,
- 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0x0a, 0x60, 0xb1, 0xce,
- 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0x0e, 0x5f})
-
-const p384ElementLength = 48
-
-// P384Point is a P-384 point. The zero value is NOT valid.
-type P384Point struct {
- // The point is represented in projective coordinates (X:Y:Z),
- // where x = X/Z and y = Y/Z.
- x, y, z *fiat.P384Element
-}
-
-// NewP384Point returns a new P384Point representing the point at infinity point.
-func NewP384Point() *P384Point {
- return &P384Point{
- x: new(fiat.P384Element),
- y: new(fiat.P384Element).One(),
- z: new(fiat.P384Element),
- }
-}
-
-// NewP384Generator returns a new P384Point set to the canonical generator.
-func NewP384Generator() *P384Point {
- return (&P384Point{
- x: new(fiat.P384Element),
- y: new(fiat.P384Element),
- z: new(fiat.P384Element),
- }).Set(p384G)
-}
-
-// Set sets p = q and returns p.
-func (p *P384Point) Set(q *P384Point) *P384Point {
- p.x.Set(q.x)
- p.y.Set(q.y)
- p.z.Set(q.z)
- return p
-}
-
-// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
-// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
-// the curve, it returns nil and an error, and the receiver is unchanged.
-// Otherwise, it returns p.
-func (p *P384Point) SetBytes(b []byte) (*P384Point, error) {
- switch {
- // Point at infinity.
- case len(b) == 1 && b[0] == 0:
- return p.Set(NewP384Point()), nil
-
- // Uncompressed form.
- case len(b) == 1+2*p384ElementLength && b[0] == 4:
- x, err := new(fiat.P384Element).SetBytes(b[1 : 1+p384ElementLength])
- if err != nil {
- return nil, err
- }
- y, err := new(fiat.P384Element).SetBytes(b[1+p384ElementLength:])
- if err != nil {
- return nil, err
- }
- if err := p384CheckOnCurve(x, y); err != nil {
- return nil, err
- }
- p.x.Set(x)
- p.y.Set(y)
- p.z.One()
- return p, nil
-
- // Compressed form
- case len(b) == 1+p384ElementLength && b[0] == 0:
- return nil, errors.New("unimplemented") // TODO(filippo)
-
- default:
- return nil, errors.New("invalid P384 point encoding")
- }
-}
-
-func p384CheckOnCurve(x, y *fiat.P384Element) error {
- // x³ - 3x + b.
- x3 := new(fiat.P384Element).Square(x)
- x3.Mul(x3, x)
-
- threeX := new(fiat.P384Element).Add(x, x)
- threeX.Add(threeX, x)
-
- x3.Sub(x3, threeX)
- x3.Add(x3, p384B)
-
- // y² = x³ - 3x + b
- y2 := new(fiat.P384Element).Square(y)
-
- if x3.Equal(y2) != 1 {
- return errors.New("P384 point not on curve")
- }
- return nil
-}
-
-// Bytes returns the uncompressed or infinity encoding of p, as specified in
-// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
-// infinity is shorter than all other encodings.
-func (p *P384Point) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [133]byte
- return p.bytes(&out)
-}
-
-func (p *P384Point) bytes(out *[133]byte) []byte {
- if p.z.IsZero() == 1 {
- return append(out[:0], 0)
- }
-
- zinv := new(fiat.P384Element).Invert(p.z)
- xx := new(fiat.P384Element).Mul(p.x, zinv)
- yy := new(fiat.P384Element).Mul(p.y, zinv)
-
- buf := append(out[:0], 4)
- buf = append(buf, xx.Bytes()...)
- buf = append(buf, yy.Bytes()...)
- return buf
-}
-
-// Add sets q = p1 + p2, and returns q. The points may overlap.
-func (q *P384Point) Add(p1, p2 *P384Point) *P384Point {
- // Complete addition formula for a = -3 from "Complete addition formulas for
- // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
-
- t0 := new(fiat.P384Element).Mul(p1.x, p2.x) // t0 := X1 * X2
- t1 := new(fiat.P384Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
- t2 := new(fiat.P384Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
- t3 := new(fiat.P384Element).Add(p1.x, p1.y) // t3 := X1 + Y1
- t4 := new(fiat.P384Element).Add(p2.x, p2.y) // t4 := X2 + Y2
- t3.Mul(t3, t4) // t3 := t3 * t4
- t4.Add(t0, t1) // t4 := t0 + t1
- t3.Sub(t3, t4) // t3 := t3 - t4
- t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
- x3 := new(fiat.P384Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
- t4.Mul(t4, x3) // t4 := t4 * X3
- x3.Add(t1, t2) // X3 := t1 + t2
- t4.Sub(t4, x3) // t4 := t4 - X3
- x3.Add(p1.x, p1.z) // X3 := X1 + Z1
- y3 := new(fiat.P384Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
- x3.Mul(x3, y3) // X3 := X3 * Y3
- y3.Add(t0, t2) // Y3 := t0 + t2
- y3.Sub(x3, y3) // Y3 := X3 - Y3
- z3 := new(fiat.P384Element).Mul(p384B, t2) // Z3 := b * t2
- x3.Sub(y3, z3) // X3 := Y3 - Z3
- z3.Add(x3, x3) // Z3 := X3 + X3
- x3.Add(x3, z3) // X3 := X3 + Z3
- z3.Sub(t1, x3) // Z3 := t1 - X3
- x3.Add(t1, x3) // X3 := t1 + X3
- y3.Mul(p384B, y3) // Y3 := b * Y3
- t1.Add(t2, t2) // t1 := t2 + t2
- t2.Add(t1, t2) // t2 := t1 + t2
- y3.Sub(y3, t2) // Y3 := Y3 - t2
- y3.Sub(y3, t0) // Y3 := Y3 - t0
- t1.Add(y3, y3) // t1 := Y3 + Y3
- y3.Add(t1, y3) // Y3 := t1 + Y3
- t1.Add(t0, t0) // t1 := t0 + t0
- t0.Add(t1, t0) // t0 := t1 + t0
- t0.Sub(t0, t2) // t0 := t0 - t2
- t1.Mul(t4, y3) // t1 := t4 * Y3
- t2.Mul(t0, y3) // t2 := t0 * Y3
- y3.Mul(x3, z3) // Y3 := X3 * Z3
- y3.Add(y3, t2) // Y3 := Y3 + t2
- x3.Mul(t3, x3) // X3 := t3 * X3
- x3.Sub(x3, t1) // X3 := X3 - t1
- z3.Mul(t4, z3) // Z3 := t4 * Z3
- t1.Mul(t3, t0) // t1 := t3 * t0
- z3.Add(z3, t1) // Z3 := Z3 + t1
-
- q.x.Set(x3)
- q.y.Set(y3)
- q.z.Set(z3)
- return q
-}
-
-// Double sets q = p + p, and returns q. The points may overlap.
-func (q *P384Point) Double(p *P384Point) *P384Point {
- // Complete addition formula for a = -3 from "Complete addition formulas for
- // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
-
- t0 := new(fiat.P384Element).Square(p.x) // t0 := X ^ 2
- t1 := new(fiat.P384Element).Square(p.y) // t1 := Y ^ 2
- t2 := new(fiat.P384Element).Square(p.z) // t2 := Z ^ 2
- t3 := new(fiat.P384Element).Mul(p.x, p.y) // t3 := X * Y
- t3.Add(t3, t3) // t3 := t3 + t3
- z3 := new(fiat.P384Element).Mul(p.x, p.z) // Z3 := X * Z
- z3.Add(z3, z3) // Z3 := Z3 + Z3
- y3 := new(fiat.P384Element).Mul(p384B, t2) // Y3 := b * t2
- y3.Sub(y3, z3) // Y3 := Y3 - Z3
- x3 := new(fiat.P384Element).Add(y3, y3) // X3 := Y3 + Y3
- y3.Add(x3, y3) // Y3 := X3 + Y3
- x3.Sub(t1, y3) // X3 := t1 - Y3
- y3.Add(t1, y3) // Y3 := t1 + Y3
- y3.Mul(x3, y3) // Y3 := X3 * Y3
- x3.Mul(x3, t3) // X3 := X3 * t3
- t3.Add(t2, t2) // t3 := t2 + t2
- t2.Add(t2, t3) // t2 := t2 + t3
- z3.Mul(p384B, z3) // Z3 := b * Z3
- z3.Sub(z3, t2) // Z3 := Z3 - t2
- z3.Sub(z3, t0) // Z3 := Z3 - t0
- t3.Add(z3, z3) // t3 := Z3 + Z3
- z3.Add(z3, t3) // Z3 := Z3 + t3
- t3.Add(t0, t0) // t3 := t0 + t0
- t0.Add(t3, t0) // t0 := t3 + t0
- t0.Sub(t0, t2) // t0 := t0 - t2
- t0.Mul(t0, z3) // t0 := t0 * Z3
- y3.Add(y3, t0) // Y3 := Y3 + t0
- t0.Mul(p.y, p.z) // t0 := Y * Z
- t0.Add(t0, t0) // t0 := t0 + t0
- z3.Mul(t0, z3) // Z3 := t0 * Z3
- x3.Sub(x3, z3) // X3 := X3 - Z3
- z3.Mul(t0, t1) // Z3 := t0 * t1
- z3.Add(z3, z3) // Z3 := Z3 + Z3
- z3.Add(z3, z3) // Z3 := Z3 + Z3
-
- q.x.Set(x3)
- q.y.Set(y3)
- q.z.Set(z3)
- return q
-}
-
-// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
-func (q *P384Point) Select(p1, p2 *P384Point, cond int) *P384Point {
- q.x.Select(p1.x, p2.x, cond)
- q.y.Select(p1.y, p2.y, cond)
- q.z.Select(p1.z, p2.z, cond)
- return q
-}
-
-// ScalarMult sets p = scalar * q, and returns p.
-func (p *P384Point) ScalarMult(q *P384Point, scalar []byte) *P384Point {
- // table holds the first 16 multiples of q. The explicit newP384Point calls
- // get inlined, letting the allocations live on the stack.
- var table = [16]*P384Point{
- NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
- NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
- NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
- NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
- }
- for i := 1; i < 16; i++ {
- table[i].Add(table[i-1], q)
- }
-
- // Instead of doing the classic double-and-add chain, we do it with a
- // four-bit window: we double four times, and then add [0-15]P.
- t := NewP384Point()
- p.Set(NewP384Point())
- for _, byte := range scalar {
- p.Double(p)
- p.Double(p)
- p.Double(p)
- p.Double(p)
-
- for i := uint8(0); i < 16; i++ {
- cond := subtle.ConstantTimeByteEq(byte>>4, i)
- t.Select(table[i], t, cond)
- }
- p.Add(p, t)
-
- p.Double(p)
- p.Double(p)
- p.Double(p)
- p.Double(p)
-
- for i := uint8(0); i < 16; i++ {
- cond := subtle.ConstantTimeByteEq(byte&0b1111, i)
- t.Select(table[i], t, cond)
- }
- p.Add(p, t)
- }
-
- return p
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p521.go b/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p521.go
deleted file mode 100644
index cdbd195cf4..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/nistec/p521.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package nistec implements the NIST P elliptic curves from FIPS 186-4.
-//
-// This package uses fiat-crypto for its backend field arithmetic (not math/big)
-// and exposes constant-time, heap allocation-free, byte slice-based safe APIs.
-// Group operations use modern and safe complete addition formulas. The point at
-// infinity is handled and encoded according to SEC 1, Version 2.0, and invalid
-// curve points can't be represented.
-package nistec
-
-import (
- "crypto/elliptic/internal/fiat"
- "crypto/subtle"
- "errors"
-)
-
-var p521B, _ = new(fiat.P521Element).SetBytes([]byte{
- 0x00, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, 0x9a, 0x1f, 0x92, 0x9a,
- 0x21, 0xa0, 0xb6, 0x85, 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3,
- 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1, 0x09, 0xe1, 0x56, 0x19,
- 0x39, 0x51, 0xec, 0x7e, 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1,
- 0xbf, 0x07, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c, 0x34, 0xf1, 0xef, 0x45,
- 0x1f, 0xd4, 0x6b, 0x50, 0x3f, 0x00})
-
-var p521G, _ = NewP521Point().SetBytes([]byte{0x04,
- 0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, 0x04, 0xe9, 0xcd, 0x9e, 0x3e,
- 0xcb, 0x66, 0x23, 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, 0x3f,
- 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d, 0x3d, 0xba, 0xa1, 0x4b,
- 0x5e, 0x77, 0xef, 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff,
- 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a, 0x42, 0x9b, 0xf9, 0x7e,
- 0x7e, 0x31, 0xc2, 0xe5, 0xbd, 0x66, 0x01, 0x18, 0x39, 0x29, 0x6a, 0x78,
- 0x9a, 0x3b, 0xc0, 0x04, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9,
- 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17,
- 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40,
- 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad, 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86,
- 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50})
-
-const p521ElementLength = 66
-
-// P521Point is a P-521 point. The zero value is NOT valid.
-type P521Point struct {
- // The point is represented in projective coordinates (X:Y:Z),
- // where x = X/Z and y = Y/Z.
- x, y, z *fiat.P521Element
-}
-
-// NewP521Point returns a new P521Point representing the point at infinity point.
-func NewP521Point() *P521Point {
- return &P521Point{
- x: new(fiat.P521Element),
- y: new(fiat.P521Element).One(),
- z: new(fiat.P521Element),
- }
-}
-
-// NewP521Generator returns a new P521Point set to the canonical generator.
-func NewP521Generator() *P521Point {
- return (&P521Point{
- x: new(fiat.P521Element),
- y: new(fiat.P521Element),
- z: new(fiat.P521Element),
- }).Set(p521G)
-}
-
-// Set sets p = q and returns p.
-func (p *P521Point) Set(q *P521Point) *P521Point {
- p.x.Set(q.x)
- p.y.Set(q.y)
- p.z.Set(q.z)
- return p
-}
-
-// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
-// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
-// the curve, it returns nil and an error, and the receiver is unchanged.
-// Otherwise, it returns p.
-func (p *P521Point) SetBytes(b []byte) (*P521Point, error) {
- switch {
- // Point at infinity.
- case len(b) == 1 && b[0] == 0:
- return p.Set(NewP521Point()), nil
-
- // Uncompressed form.
- case len(b) == 1+2*p521ElementLength && b[0] == 4:
- x, err := new(fiat.P521Element).SetBytes(b[1 : 1+p521ElementLength])
- if err != nil {
- return nil, err
- }
- y, err := new(fiat.P521Element).SetBytes(b[1+p521ElementLength:])
- if err != nil {
- return nil, err
- }
- if err := p521CheckOnCurve(x, y); err != nil {
- return nil, err
- }
- p.x.Set(x)
- p.y.Set(y)
- p.z.One()
- return p, nil
-
- // Compressed form
- case len(b) == 1+p521ElementLength && b[0] == 0:
- return nil, errors.New("unimplemented") // TODO(filippo)
-
- default:
- return nil, errors.New("invalid P521 point encoding")
- }
-}
-
-func p521CheckOnCurve(x, y *fiat.P521Element) error {
- // x³ - 3x + b.
- x3 := new(fiat.P521Element).Square(x)
- x3.Mul(x3, x)
-
- threeX := new(fiat.P521Element).Add(x, x)
- threeX.Add(threeX, x)
-
- x3.Sub(x3, threeX)
- x3.Add(x3, p521B)
-
- // y² = x³ - 3x + b
- y2 := new(fiat.P521Element).Square(y)
-
- if x3.Equal(y2) != 1 {
- return errors.New("P521 point not on curve")
- }
- return nil
-}
-
-// Bytes returns the uncompressed or infinity encoding of p, as specified in
-// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
-// infinity is shorter than all other encodings.
-func (p *P521Point) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [133]byte
- return p.bytes(&out)
-}
-
-func (p *P521Point) bytes(out *[133]byte) []byte {
- if p.z.IsZero() == 1 {
- return append(out[:0], 0)
- }
-
- zinv := new(fiat.P521Element).Invert(p.z)
- xx := new(fiat.P521Element).Mul(p.x, zinv)
- yy := new(fiat.P521Element).Mul(p.y, zinv)
-
- buf := append(out[:0], 4)
- buf = append(buf, xx.Bytes()...)
- buf = append(buf, yy.Bytes()...)
- return buf
-}
-
-// Add sets q = p1 + p2, and returns q. The points may overlap.
-func (q *P521Point) Add(p1, p2 *P521Point) *P521Point {
- // Complete addition formula for a = -3 from "Complete addition formulas for
- // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
-
- t0 := new(fiat.P521Element).Mul(p1.x, p2.x) // t0 := X1 * X2
- t1 := new(fiat.P521Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
- t2 := new(fiat.P521Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
- t3 := new(fiat.P521Element).Add(p1.x, p1.y) // t3 := X1 + Y1
- t4 := new(fiat.P521Element).Add(p2.x, p2.y) // t4 := X2 + Y2
- t3.Mul(t3, t4) // t3 := t3 * t4
- t4.Add(t0, t1) // t4 := t0 + t1
- t3.Sub(t3, t4) // t3 := t3 - t4
- t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
- x3 := new(fiat.P521Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
- t4.Mul(t4, x3) // t4 := t4 * X3
- x3.Add(t1, t2) // X3 := t1 + t2
- t4.Sub(t4, x3) // t4 := t4 - X3
- x3.Add(p1.x, p1.z) // X3 := X1 + Z1
- y3 := new(fiat.P521Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
- x3.Mul(x3, y3) // X3 := X3 * Y3
- y3.Add(t0, t2) // Y3 := t0 + t2
- y3.Sub(x3, y3) // Y3 := X3 - Y3
- z3 := new(fiat.P521Element).Mul(p521B, t2) // Z3 := b * t2
- x3.Sub(y3, z3) // X3 := Y3 - Z3
- z3.Add(x3, x3) // Z3 := X3 + X3
- x3.Add(x3, z3) // X3 := X3 + Z3
- z3.Sub(t1, x3) // Z3 := t1 - X3
- x3.Add(t1, x3) // X3 := t1 + X3
- y3.Mul(p521B, y3) // Y3 := b * Y3
- t1.Add(t2, t2) // t1 := t2 + t2
- t2.Add(t1, t2) // t2 := t1 + t2
- y3.Sub(y3, t2) // Y3 := Y3 - t2
- y3.Sub(y3, t0) // Y3 := Y3 - t0
- t1.Add(y3, y3) // t1 := Y3 + Y3
- y3.Add(t1, y3) // Y3 := t1 + Y3
- t1.Add(t0, t0) // t1 := t0 + t0
- t0.Add(t1, t0) // t0 := t1 + t0
- t0.Sub(t0, t2) // t0 := t0 - t2
- t1.Mul(t4, y3) // t1 := t4 * Y3
- t2.Mul(t0, y3) // t2 := t0 * Y3
- y3.Mul(x3, z3) // Y3 := X3 * Z3
- y3.Add(y3, t2) // Y3 := Y3 + t2
- x3.Mul(t3, x3) // X3 := t3 * X3
- x3.Sub(x3, t1) // X3 := X3 - t1
- z3.Mul(t4, z3) // Z3 := t4 * Z3
- t1.Mul(t3, t0) // t1 := t3 * t0
- z3.Add(z3, t1) // Z3 := Z3 + t1
-
- q.x.Set(x3)
- q.y.Set(y3)
- q.z.Set(z3)
- return q
-}
-
-// Double sets q = p + p, and returns q. The points may overlap.
-func (q *P521Point) Double(p *P521Point) *P521Point {
- // Complete addition formula for a = -3 from "Complete addition formulas for
- // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
-
- t0 := new(fiat.P521Element).Square(p.x) // t0 := X ^ 2
- t1 := new(fiat.P521Element).Square(p.y) // t1 := Y ^ 2
- t2 := new(fiat.P521Element).Square(p.z) // t2 := Z ^ 2
- t3 := new(fiat.P521Element).Mul(p.x, p.y) // t3 := X * Y
- t3.Add(t3, t3) // t3 := t3 + t3
- z3 := new(fiat.P521Element).Mul(p.x, p.z) // Z3 := X * Z
- z3.Add(z3, z3) // Z3 := Z3 + Z3
- y3 := new(fiat.P521Element).Mul(p521B, t2) // Y3 := b * t2
- y3.Sub(y3, z3) // Y3 := Y3 - Z3
- x3 := new(fiat.P521Element).Add(y3, y3) // X3 := Y3 + Y3
- y3.Add(x3, y3) // Y3 := X3 + Y3
- x3.Sub(t1, y3) // X3 := t1 - Y3
- y3.Add(t1, y3) // Y3 := t1 + Y3
- y3.Mul(x3, y3) // Y3 := X3 * Y3
- x3.Mul(x3, t3) // X3 := X3 * t3
- t3.Add(t2, t2) // t3 := t2 + t2
- t2.Add(t2, t3) // t2 := t2 + t3
- z3.Mul(p521B, z3) // Z3 := b * Z3
- z3.Sub(z3, t2) // Z3 := Z3 - t2
- z3.Sub(z3, t0) // Z3 := Z3 - t0
- t3.Add(z3, z3) // t3 := Z3 + Z3
- z3.Add(z3, t3) // Z3 := Z3 + t3
- t3.Add(t0, t0) // t3 := t0 + t0
- t0.Add(t3, t0) // t0 := t3 + t0
- t0.Sub(t0, t2) // t0 := t0 - t2
- t0.Mul(t0, z3) // t0 := t0 * Z3
- y3.Add(y3, t0) // Y3 := Y3 + t0
- t0.Mul(p.y, p.z) // t0 := Y * Z
- t0.Add(t0, t0) // t0 := t0 + t0
- z3.Mul(t0, z3) // Z3 := t0 * Z3
- x3.Sub(x3, z3) // X3 := X3 - Z3
- z3.Mul(t0, t1) // Z3 := t0 * t1
- z3.Add(z3, z3) // Z3 := Z3 + Z3
- z3.Add(z3, z3) // Z3 := Z3 + Z3
-
- q.x.Set(x3)
- q.y.Set(y3)
- q.z.Set(z3)
- return q
-}
-
-// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
-func (q *P521Point) Select(p1, p2 *P521Point, cond int) *P521Point {
- q.x.Select(p1.x, p2.x, cond)
- q.y.Select(p1.y, p2.y, cond)
- q.z.Select(p1.z, p2.z, cond)
- return q
-}
-
-// ScalarMult sets p = scalar * q, and returns p.
-func (p *P521Point) ScalarMult(q *P521Point, scalar []byte) *P521Point {
- // table holds the first 16 multiples of q. The explicit newP521Point calls
- // get inlined, letting the allocations live on the stack.
- var table = [16]*P521Point{
- NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
- NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
- NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
- NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
- }
- for i := 1; i < 16; i++ {
- table[i].Add(table[i-1], q)
- }
-
- // Instead of doing the classic double-and-add chain, we do it with a
- // four-bit window: we double four times, and then add [0-15]P.
- t := NewP521Point()
- p.Set(NewP521Point())
- for _, byte := range scalar {
- p.Double(p)
- p.Double(p)
- p.Double(p)
- p.Double(p)
-
- for i := uint8(0); i < 16; i++ {
- cond := subtle.ConstantTimeByteEq(byte>>4, i)
- t.Select(table[i], t, cond)
- }
- p.Add(p, t)
-
- p.Double(p)
- p.Double(p)
- p.Double(p)
- p.Double(p)
-
- for i := uint8(0); i < 16; i++ {
- cond := subtle.ConstantTimeByteEq(byte&0b1111, i)
- t.Select(table[i], t, cond)
- }
- p.Add(p, t)
- }
-
- return p
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/p224.go b/contrib/go/_std_1.18/src/crypto/elliptic/p224.go
deleted file mode 100644
index 8a431c4769..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/p224.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package elliptic
-
-import (
- "crypto/elliptic/internal/nistec"
- "crypto/rand"
- "math/big"
-)
-
-// p224Curve is a Curve implementation based on nistec.P224Point.
-//
-// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
-// legacy idiosyncrasies it requires, such as invalid and infinity point
-// handling.
-//
-// To interact with the nistec package, points are encoded into and decoded from
-// properly formatted byte slices. All big.Int use is limited to this package.
-// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
-// so the overhead is acceptable.
-type p224Curve struct {
- params *CurveParams
-}
-
-var p224 p224Curve
-var _ Curve = p224
-
-func initP224() {
- p224.params = &CurveParams{
- Name: "P-224",
- BitSize: 224,
- // FIPS 186-4, section D.1.2.2
- P: bigFromDecimal("26959946667150639794667015087019630673557916260026308143510066298881"),
- N: bigFromDecimal("26959946667150639794667015087019625940457807714424391721682722368061"),
- B: bigFromHex("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4"),
- Gx: bigFromHex("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21"),
- Gy: bigFromHex("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34"),
- }
-}
-
-func (curve p224Curve) Params() *CurveParams {
- return curve.params
-}
-
-func (curve p224Curve) IsOnCurve(x, y *big.Int) bool {
- // IsOnCurve is documented to reject (0, 0), the conventional point at
- // infinity, which however is accepted by p224PointFromAffine.
- if x.Sign() == 0 && y.Sign() == 0 {
- return false
- }
- _, ok := p224PointFromAffine(x, y)
- return ok
-}
-
-func p224PointFromAffine(x, y *big.Int) (p *nistec.P224Point, ok bool) {
- // (0, 0) is by convention the point at infinity, which can't be represented
- // in affine coordinates. Marshal incorrectly encodes it as an uncompressed
- // point, which SetBytes would correctly reject. See Issue 37294.
- if x.Sign() == 0 && y.Sign() == 0 {
- return nistec.NewP224Point(), true
- }
- if x.Sign() < 0 || y.Sign() < 0 {
- return nil, false
- }
- if x.BitLen() > 224 || y.BitLen() > 224 {
- return nil, false
- }
- p, err := nistec.NewP224Point().SetBytes(Marshal(P224(), x, y))
- if err != nil {
- return nil, false
- }
- return p, true
-}
-
-func p224PointToAffine(p *nistec.P224Point) (x, y *big.Int) {
- out := p.Bytes()
- if len(out) == 1 && out[0] == 0 {
- // This is the correct encoding of the point at infinity, which
- // Unmarshal does not support. See Issue 37294.
- return new(big.Int), new(big.Int)
- }
- x, y = Unmarshal(P224(), out)
- if x == nil {
- panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
- }
- return x, y
-}
-
-// p224RandomPoint returns a random point on the curve. It's used when Add,
-// Double, or ScalarMult are fed a point not on the curve, which is undefined
-// behavior. Originally, we used to do the math on it anyway (which allows
-// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
-// happening in the first place. Now, we just can't construct a nistec.P224Point
-// for an invalid pair of coordinates, because that API is safer. If we panic,
-// we risk introducing a DoS. If we return nil, we risk a panic. If we return
-// the input, ecdsa.Verify might fail open. The safest course seems to be to
-// return a valid, random point, which hopefully won't help the attacker.
-func p224RandomPoint() (x, y *big.Int) {
- _, x, y, err := GenerateKey(P224(), rand.Reader)
- if err != nil {
- panic("crypto/elliptic: failed to generate random point")
- }
- return x, y
-}
-
-func (p224Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- p1, ok := p224PointFromAffine(x1, y1)
- if !ok {
- return p224RandomPoint()
- }
- p2, ok := p224PointFromAffine(x2, y2)
- if !ok {
- return p224RandomPoint()
- }
- return p224PointToAffine(p1.Add(p1, p2))
-}
-
-func (p224Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- p, ok := p224PointFromAffine(x1, y1)
- if !ok {
- return p224RandomPoint()
- }
- return p224PointToAffine(p.Double(p))
-}
-
-func (p224Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
- p, ok := p224PointFromAffine(Bx, By)
- if !ok {
- return p224RandomPoint()
- }
- return p224PointToAffine(p.ScalarMult(p, scalar))
-}
-
-func (p224Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
- p := nistec.NewP224Generator()
- return p224PointToAffine(p.ScalarMult(p, scalar))
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm.go b/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm.go
deleted file mode 100644
index 8624e031a3..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm.go
+++ /dev/null
@@ -1,544 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains the Go wrapper for the constant-time, 64-bit assembly
-// implementation of P256. The optimizations performed here are described in
-// detail in:
-// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
-// 256-bit primes"
-// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
-// https://eprint.iacr.org/2013/816.pdf
-
-//go:build amd64 || arm64
-
-package elliptic
-
-import (
- _ "embed"
- "math/big"
-)
-
-//go:generate go run -tags=tablegen gen_p256_table.go
-
-//go:embed p256_asm_table.bin
-var p256Precomputed string
-
-type (
- p256Curve struct {
- *CurveParams
- }
-
- p256Point struct {
- xyz [12]uint64
- }
-)
-
-var p256 p256Curve
-
-func initP256() {
- // See FIPS 186-3, section D.2.3
- p256.CurveParams = &CurveParams{Name: "P-256"}
- p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
- p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
- p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
- p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
- p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
- p256.BitSize = 256
-}
-
-func (curve p256Curve) Params() *CurveParams {
- return curve.CurveParams
-}
-
-// Functions implemented in p256_asm_*64.s
-// Montgomery multiplication modulo P256
-//go:noescape
-func p256Mul(res, in1, in2 []uint64)
-
-// Montgomery square modulo P256, repeated n times (n >= 1)
-//go:noescape
-func p256Sqr(res, in []uint64, n int)
-
-// Montgomery multiplication by 1
-//go:noescape
-func p256FromMont(res, in []uint64)
-
-// iff cond == 1 val <- -val
-//go:noescape
-func p256NegCond(val []uint64, cond int)
-
-// if cond == 0 res <- b; else res <- a
-//go:noescape
-func p256MovCond(res, a, b []uint64, cond int)
-
-// Endianness swap
-//go:noescape
-func p256BigToLittle(res []uint64, in []byte)
-
-//go:noescape
-func p256LittleToBig(res []byte, in []uint64)
-
-// Constant time table access
-//go:noescape
-func p256Select(point, table []uint64, idx int)
-
-//go:noescape
-func p256SelectBase(point *[12]uint64, table string, idx int)
-
-// Montgomery multiplication modulo Ord(G)
-//go:noescape
-func p256OrdMul(res, in1, in2 []uint64)
-
-// Montgomery square modulo Ord(G), repeated n times
-//go:noescape
-func p256OrdSqr(res, in []uint64, n int)
-
-// Point add with in2 being affine point
-// If sign == 1 -> in2 = -in2
-// If sel == 0 -> res = in1
-// if zero == 0 -> res = in2
-//go:noescape
-func p256PointAddAffineAsm(res, in1, in2 []uint64, sign, sel, zero int)
-
-// Point add. Returns one if the two input points were equal and zero
-// otherwise. (Note that, due to the way that the equations work out, some
-// representations of ∞ are considered equal to everything by this function.)
-//go:noescape
-func p256PointAddAsm(res, in1, in2 []uint64) int
-
-// Point double
-//go:noescape
-func p256PointDoubleAsm(res, in []uint64)
-
-func (curve p256Curve) Inverse(k *big.Int) *big.Int {
- if k.Sign() < 0 {
- // This should never happen.
- k = new(big.Int).Neg(k)
- }
-
- if k.Cmp(p256.N) >= 0 {
- // This should never happen.
- k = new(big.Int).Mod(k, p256.N)
- }
-
- // table will store precomputed powers of x.
- var table [4 * 9]uint64
- var (
- _1 = table[4*0 : 4*1]
- _11 = table[4*1 : 4*2]
- _101 = table[4*2 : 4*3]
- _111 = table[4*3 : 4*4]
- _1111 = table[4*4 : 4*5]
- _10101 = table[4*5 : 4*6]
- _101111 = table[4*6 : 4*7]
- x = table[4*7 : 4*8]
- t = table[4*8 : 4*9]
- )
-
- fromBig(x[:], k)
- // This code operates in the Montgomery domain where R = 2^256 mod n
- // and n is the order of the scalar field. (See initP256 for the
- // value.) Elements in the Montgomery domain take the form a×R and
- // multiplication of x and y in the calculates (x × y × R^-1) mod n. RR
- // is R×R mod n thus the Montgomery multiplication x and RR gives x×R,
- // i.e. converts x into the Montgomery domain.
- // Window values borrowed from https://briansmith.org/ecc-inversion-addition-chains-01#p256_scalar_inversion
- RR := []uint64{0x83244c95be79eea2, 0x4699799c49bd6fa6, 0x2845b2392b6bec59, 0x66e12d94f3d95620}
- p256OrdMul(_1, x, RR) // _1
- p256OrdSqr(x, _1, 1) // _10
- p256OrdMul(_11, x, _1) // _11
- p256OrdMul(_101, x, _11) // _101
- p256OrdMul(_111, x, _101) // _111
- p256OrdSqr(x, _101, 1) // _1010
- p256OrdMul(_1111, _101, x) // _1111
-
- p256OrdSqr(t, x, 1) // _10100
- p256OrdMul(_10101, t, _1) // _10101
- p256OrdSqr(x, _10101, 1) // _101010
- p256OrdMul(_101111, _101, x) // _101111
- p256OrdMul(x, _10101, x) // _111111 = x6
- p256OrdSqr(t, x, 2) // _11111100
- p256OrdMul(t, t, _11) // _11111111 = x8
- p256OrdSqr(x, t, 8) // _ff00
- p256OrdMul(x, x, t) // _ffff = x16
- p256OrdSqr(t, x, 16) // _ffff0000
- p256OrdMul(t, t, x) // _ffffffff = x32
-
- p256OrdSqr(x, t, 64)
- p256OrdMul(x, x, t)
- p256OrdSqr(x, x, 32)
- p256OrdMul(x, x, t)
-
- sqrs := []uint8{
- 6, 5, 4, 5, 5,
- 4, 3, 3, 5, 9,
- 6, 2, 5, 6, 5,
- 4, 5, 5, 3, 10,
- 2, 5, 5, 3, 7, 6}
- muls := [][]uint64{
- _101111, _111, _11, _1111, _10101,
- _101, _101, _101, _111, _101111,
- _1111, _1, _1, _1111, _111,
- _111, _111, _101, _11, _101111,
- _11, _11, _11, _1, _10101, _1111}
-
- for i, s := range sqrs {
- p256OrdSqr(x, x, int(s))
- p256OrdMul(x, x, muls[i])
- }
-
- // Multiplying by one in the Montgomery domain converts a Montgomery
- // value out of the domain.
- one := []uint64{1, 0, 0, 0}
- p256OrdMul(x, x, one)
-
- xOut := make([]byte, 32)
- p256LittleToBig(xOut, x)
- return new(big.Int).SetBytes(xOut)
-}
-
-// fromBig converts a *big.Int into a format used by this code.
-func fromBig(out []uint64, big *big.Int) {
- for i := range out {
- out[i] = 0
- }
-
- for i, v := range big.Bits() {
- out[i] = uint64(v)
- }
-}
-
-// p256GetScalar endian-swaps the big-endian scalar value from in and writes it
-// to out. If the scalar is equal or greater than the order of the group, it's
-// reduced modulo that order.
-func p256GetScalar(out []uint64, in []byte) {
- n := new(big.Int).SetBytes(in)
-
- if n.Cmp(p256.N) >= 0 {
- n.Mod(n, p256.N)
- }
- fromBig(out, n)
-}
-
-// p256Mul operates in a Montgomery domain with R = 2^256 mod p, where p is the
-// underlying field of the curve. (See initP256 for the value.) Thus rr here is
-// R×R mod p. See comment in Inverse about how this is used.
-var rr = []uint64{0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd}
-
-func maybeReduceModP(in *big.Int) *big.Int {
- if in.Cmp(p256.P) < 0 {
- return in
- }
- return new(big.Int).Mod(in, p256.P)
-}
-
-func (curve p256Curve) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {
- scalarReversed := make([]uint64, 4)
- var r1, r2 p256Point
- p256GetScalar(scalarReversed, baseScalar)
- r1IsInfinity := scalarIsZero(scalarReversed)
- r1.p256BaseMult(scalarReversed)
-
- p256GetScalar(scalarReversed, scalar)
- r2IsInfinity := scalarIsZero(scalarReversed)
- fromBig(r2.xyz[0:4], maybeReduceModP(bigX))
- fromBig(r2.xyz[4:8], maybeReduceModP(bigY))
- p256Mul(r2.xyz[0:4], r2.xyz[0:4], rr[:])
- p256Mul(r2.xyz[4:8], r2.xyz[4:8], rr[:])
-
- // This sets r2's Z value to 1, in the Montgomery domain.
- r2.xyz[8] = 0x0000000000000001
- r2.xyz[9] = 0xffffffff00000000
- r2.xyz[10] = 0xffffffffffffffff
- r2.xyz[11] = 0x00000000fffffffe
-
- r2.p256ScalarMult(scalarReversed)
-
- var sum, double p256Point
- pointsEqual := p256PointAddAsm(sum.xyz[:], r1.xyz[:], r2.xyz[:])
- p256PointDoubleAsm(double.xyz[:], r1.xyz[:])
- sum.CopyConditional(&double, pointsEqual)
- sum.CopyConditional(&r1, r2IsInfinity)
- sum.CopyConditional(&r2, r1IsInfinity)
-
- return sum.p256PointToAffine()
-}
-
-func (curve p256Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
- scalarReversed := make([]uint64, 4)
- p256GetScalar(scalarReversed, scalar)
-
- var r p256Point
- r.p256BaseMult(scalarReversed)
- return r.p256PointToAffine()
-}
-
-func (curve p256Curve) ScalarMult(bigX, bigY *big.Int, scalar []byte) (x, y *big.Int) {
- scalarReversed := make([]uint64, 4)
- p256GetScalar(scalarReversed, scalar)
-
- var r p256Point
- fromBig(r.xyz[0:4], maybeReduceModP(bigX))
- fromBig(r.xyz[4:8], maybeReduceModP(bigY))
- p256Mul(r.xyz[0:4], r.xyz[0:4], rr[:])
- p256Mul(r.xyz[4:8], r.xyz[4:8], rr[:])
- // This sets r2's Z value to 1, in the Montgomery domain.
- r.xyz[8] = 0x0000000000000001
- r.xyz[9] = 0xffffffff00000000
- r.xyz[10] = 0xffffffffffffffff
- r.xyz[11] = 0x00000000fffffffe
-
- r.p256ScalarMult(scalarReversed)
- return r.p256PointToAffine()
-}
-
-// uint64IsZero returns 1 if x is zero and zero otherwise.
-func uint64IsZero(x uint64) int {
- x = ^x
- x &= x >> 32
- x &= x >> 16
- x &= x >> 8
- x &= x >> 4
- x &= x >> 2
- x &= x >> 1
- return int(x & 1)
-}
-
-// scalarIsZero returns 1 if scalar represents the zero value, and zero
-// otherwise.
-func scalarIsZero(scalar []uint64) int {
- return uint64IsZero(scalar[0] | scalar[1] | scalar[2] | scalar[3])
-}
-
-func (p *p256Point) p256PointToAffine() (x, y *big.Int) {
- zInv := make([]uint64, 4)
- zInvSq := make([]uint64, 4)
- p256Inverse(zInv, p.xyz[8:12])
- p256Sqr(zInvSq, zInv, 1)
- p256Mul(zInv, zInv, zInvSq)
-
- p256Mul(zInvSq, p.xyz[0:4], zInvSq)
- p256Mul(zInv, p.xyz[4:8], zInv)
-
- p256FromMont(zInvSq, zInvSq)
- p256FromMont(zInv, zInv)
-
- xOut := make([]byte, 32)
- yOut := make([]byte, 32)
- p256LittleToBig(xOut, zInvSq)
- p256LittleToBig(yOut, zInv)
-
- return new(big.Int).SetBytes(xOut), new(big.Int).SetBytes(yOut)
-}
-
-// CopyConditional copies overwrites p with src if v == 1, and leaves p
-// unchanged if v == 0.
-func (p *p256Point) CopyConditional(src *p256Point, v int) {
- pMask := uint64(v) - 1
- srcMask := ^pMask
-
- for i, n := range p.xyz {
- p.xyz[i] = (n & pMask) | (src.xyz[i] & srcMask)
- }
-}
-
-// p256Inverse sets out to in^-1 mod p.
-func p256Inverse(out, in []uint64) {
- var stack [6 * 4]uint64
- p2 := stack[4*0 : 4*0+4]
- p4 := stack[4*1 : 4*1+4]
- p8 := stack[4*2 : 4*2+4]
- p16 := stack[4*3 : 4*3+4]
- p32 := stack[4*4 : 4*4+4]
-
- p256Sqr(out, in, 1)
- p256Mul(p2, out, in) // 3*p
-
- p256Sqr(out, p2, 2)
- p256Mul(p4, out, p2) // f*p
-
- p256Sqr(out, p4, 4)
- p256Mul(p8, out, p4) // ff*p
-
- p256Sqr(out, p8, 8)
- p256Mul(p16, out, p8) // ffff*p
-
- p256Sqr(out, p16, 16)
- p256Mul(p32, out, p16) // ffffffff*p
-
- p256Sqr(out, p32, 32)
- p256Mul(out, out, in)
-
- p256Sqr(out, out, 128)
- p256Mul(out, out, p32)
-
- p256Sqr(out, out, 32)
- p256Mul(out, out, p32)
-
- p256Sqr(out, out, 16)
- p256Mul(out, out, p16)
-
- p256Sqr(out, out, 8)
- p256Mul(out, out, p8)
-
- p256Sqr(out, out, 4)
- p256Mul(out, out, p4)
-
- p256Sqr(out, out, 2)
- p256Mul(out, out, p2)
-
- p256Sqr(out, out, 2)
- p256Mul(out, out, in)
-}
-
-func (p *p256Point) p256StorePoint(r *[16 * 4 * 3]uint64, index int) {
- copy(r[index*12:], p.xyz[:])
-}
-
-func boothW5(in uint) (int, int) {
- var s uint = ^((in >> 5) - 1)
- var d uint = (1 << 6) - in - 1
- d = (d & s) | (in & (^s))
- d = (d >> 1) + (d & 1)
- return int(d), int(s & 1)
-}
-
-func boothW6(in uint) (int, int) {
- var s uint = ^((in >> 6) - 1)
- var d uint = (1 << 7) - in - 1
- d = (d & s) | (in & (^s))
- d = (d >> 1) + (d & 1)
- return int(d), int(s & 1)
-}
-
-func (p *p256Point) p256BaseMult(scalar []uint64) {
- wvalue := (scalar[0] << 1) & 0x7f
- sel, sign := boothW6(uint(wvalue))
- p256SelectBase(&p.xyz, p256Precomputed, sel)
- p256NegCond(p.xyz[4:8], sign)
-
- // (This is one, in the Montgomery domain.)
- p.xyz[8] = 0x0000000000000001
- p.xyz[9] = 0xffffffff00000000
- p.xyz[10] = 0xffffffffffffffff
- p.xyz[11] = 0x00000000fffffffe
-
- var t0 p256Point
- // (This is one, in the Montgomery domain.)
- t0.xyz[8] = 0x0000000000000001
- t0.xyz[9] = 0xffffffff00000000
- t0.xyz[10] = 0xffffffffffffffff
- t0.xyz[11] = 0x00000000fffffffe
-
- index := uint(5)
- zero := sel
-
- for i := 1; i < 43; i++ {
- if index < 192 {
- wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x7f
- } else {
- wvalue = (scalar[index/64] >> (index % 64)) & 0x7f
- }
- index += 6
- sel, sign = boothW6(uint(wvalue))
- p256SelectBase(&t0.xyz, p256Precomputed[i*32*8*8:], sel)
- p256PointAddAffineAsm(p.xyz[0:12], p.xyz[0:12], t0.xyz[0:8], sign, sel, zero)
- zero |= sel
- }
-}
-
-func (p *p256Point) p256ScalarMult(scalar []uint64) {
- // precomp is a table of precomputed points that stores powers of p
- // from p^1 to p^16.
- var precomp [16 * 4 * 3]uint64
- var t0, t1, t2, t3 p256Point
-
- // Prepare the table
- p.p256StorePoint(&precomp, 0) // 1
-
- p256PointDoubleAsm(t0.xyz[:], p.xyz[:])
- p256PointDoubleAsm(t1.xyz[:], t0.xyz[:])
- p256PointDoubleAsm(t2.xyz[:], t1.xyz[:])
- p256PointDoubleAsm(t3.xyz[:], t2.xyz[:])
- t0.p256StorePoint(&precomp, 1) // 2
- t1.p256StorePoint(&precomp, 3) // 4
- t2.p256StorePoint(&precomp, 7) // 8
- t3.p256StorePoint(&precomp, 15) // 16
-
- p256PointAddAsm(t0.xyz[:], t0.xyz[:], p.xyz[:])
- p256PointAddAsm(t1.xyz[:], t1.xyz[:], p.xyz[:])
- p256PointAddAsm(t2.xyz[:], t2.xyz[:], p.xyz[:])
- t0.p256StorePoint(&precomp, 2) // 3
- t1.p256StorePoint(&precomp, 4) // 5
- t2.p256StorePoint(&precomp, 8) // 9
-
- p256PointDoubleAsm(t0.xyz[:], t0.xyz[:])
- p256PointDoubleAsm(t1.xyz[:], t1.xyz[:])
- t0.p256StorePoint(&precomp, 5) // 6
- t1.p256StorePoint(&precomp, 9) // 10
-
- p256PointAddAsm(t2.xyz[:], t0.xyz[:], p.xyz[:])
- p256PointAddAsm(t1.xyz[:], t1.xyz[:], p.xyz[:])
- t2.p256StorePoint(&precomp, 6) // 7
- t1.p256StorePoint(&precomp, 10) // 11
-
- p256PointDoubleAsm(t0.xyz[:], t0.xyz[:])
- p256PointDoubleAsm(t2.xyz[:], t2.xyz[:])
- t0.p256StorePoint(&precomp, 11) // 12
- t2.p256StorePoint(&precomp, 13) // 14
-
- p256PointAddAsm(t0.xyz[:], t0.xyz[:], p.xyz[:])
- p256PointAddAsm(t2.xyz[:], t2.xyz[:], p.xyz[:])
- t0.p256StorePoint(&precomp, 12) // 13
- t2.p256StorePoint(&precomp, 14) // 15
-
- // Start scanning the window from top bit
- index := uint(254)
- var sel, sign int
-
- wvalue := (scalar[index/64] >> (index % 64)) & 0x3f
- sel, _ = boothW5(uint(wvalue))
-
- p256Select(p.xyz[0:12], precomp[0:], sel)
- zero := sel
-
- for index > 4 {
- index -= 5
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
-
- if index < 192 {
- wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x3f
- } else {
- wvalue = (scalar[index/64] >> (index % 64)) & 0x3f
- }
-
- sel, sign = boothW5(uint(wvalue))
-
- p256Select(t0.xyz[0:], precomp[0:], sel)
- p256NegCond(t0.xyz[4:8], sign)
- p256PointAddAsm(t1.xyz[:], p.xyz[:], t0.xyz[:])
- p256MovCond(t1.xyz[0:12], t1.xyz[0:12], p.xyz[0:12], sel)
- p256MovCond(p.xyz[0:12], t1.xyz[0:12], t0.xyz[0:12], zero)
- zero |= sel
- }
-
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
- p256PointDoubleAsm(p.xyz[:], p.xyz[:])
-
- wvalue = (scalar[0] << 1) & 0x3f
- sel, sign = boothW5(uint(wvalue))
-
- p256Select(t0.xyz[0:], precomp[0:], sel)
- p256NegCond(t0.xyz[4:8], sign)
- p256PointAddAsm(t1.xyz[:], p.xyz[:], t0.xyz[:])
- p256MovCond(t1.xyz[0:12], t1.xyz[0:12], p.xyz[0:12], sel)
- p256MovCond(p.xyz[0:12], t1.xyz[0:12], t0.xyz[0:12], zero)
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_amd64.s b/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_amd64.s
deleted file mode 100644
index bd16add241..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_amd64.s
+++ /dev/null
@@ -1,2347 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains constant-time, 64-bit assembly implementation of
-// P256. The optimizations performed here are described in detail in:
-// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
-// 256-bit primes"
-// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
-// https://eprint.iacr.org/2013/816.pdf
-
-#include "textflag.h"
-
-#define res_ptr DI
-#define x_ptr SI
-#define y_ptr CX
-
-#define acc0 R8
-#define acc1 R9
-#define acc2 R10
-#define acc3 R11
-#define acc4 R12
-#define acc5 R13
-#define t0 R14
-#define t1 R15
-
-DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff
-DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001
-DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f
-DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551
-DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84
-DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff
-DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000
-DATA p256one<>+0x00(SB)/8, $0x0000000000000001
-DATA p256one<>+0x08(SB)/8, $0xffffffff00000000
-DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff
-DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe
-GLOBL p256const0<>(SB), 8, $8
-GLOBL p256const1<>(SB), 8, $8
-GLOBL p256ordK0<>(SB), 8, $8
-GLOBL p256ord<>(SB), 8, $32
-GLOBL p256one<>(SB), 8, $32
-
-/* ---------------------------------------*/
-// func p256LittleToBig(res []byte, in []uint64)
-TEXT ·p256LittleToBig(SB),NOSPLIT,$0
- JMP ·p256BigToLittle(SB)
-/* ---------------------------------------*/
-// func p256BigToLittle(res []uint64, in []byte)
-TEXT ·p256BigToLittle(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ in+24(FP), x_ptr
-
- MOVQ (8*0)(x_ptr), acc0
- MOVQ (8*1)(x_ptr), acc1
- MOVQ (8*2)(x_ptr), acc2
- MOVQ (8*3)(x_ptr), acc3
-
- BSWAPQ acc0
- BSWAPQ acc1
- BSWAPQ acc2
- BSWAPQ acc3
-
- MOVQ acc3, (8*0)(res_ptr)
- MOVQ acc2, (8*1)(res_ptr)
- MOVQ acc1, (8*2)(res_ptr)
- MOVQ acc0, (8*3)(res_ptr)
-
- RET
-/* ---------------------------------------*/
-// func p256MovCond(res, a, b []uint64, cond int)
-// If cond == 0 res=b, else res=a
-TEXT ·p256MovCond(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ a+24(FP), x_ptr
- MOVQ b+48(FP), y_ptr
- MOVQ cond+72(FP), X12
-
- PXOR X13, X13
- PSHUFD $0, X12, X12
- PCMPEQL X13, X12
-
- MOVOU X12, X0
- MOVOU (16*0)(x_ptr), X6
- PANDN X6, X0
- MOVOU X12, X1
- MOVOU (16*1)(x_ptr), X7
- PANDN X7, X1
- MOVOU X12, X2
- MOVOU (16*2)(x_ptr), X8
- PANDN X8, X2
- MOVOU X12, X3
- MOVOU (16*3)(x_ptr), X9
- PANDN X9, X3
- MOVOU X12, X4
- MOVOU (16*4)(x_ptr), X10
- PANDN X10, X4
- MOVOU X12, X5
- MOVOU (16*5)(x_ptr), X11
- PANDN X11, X5
-
- MOVOU (16*0)(y_ptr), X6
- MOVOU (16*1)(y_ptr), X7
- MOVOU (16*2)(y_ptr), X8
- MOVOU (16*3)(y_ptr), X9
- MOVOU (16*4)(y_ptr), X10
- MOVOU (16*5)(y_ptr), X11
-
- PAND X12, X6
- PAND X12, X7
- PAND X12, X8
- PAND X12, X9
- PAND X12, X10
- PAND X12, X11
-
- PXOR X6, X0
- PXOR X7, X1
- PXOR X8, X2
- PXOR X9, X3
- PXOR X10, X4
- PXOR X11, X5
-
- MOVOU X0, (16*0)(res_ptr)
- MOVOU X1, (16*1)(res_ptr)
- MOVOU X2, (16*2)(res_ptr)
- MOVOU X3, (16*3)(res_ptr)
- MOVOU X4, (16*4)(res_ptr)
- MOVOU X5, (16*5)(res_ptr)
-
- RET
-/* ---------------------------------------*/
-// func p256NegCond(val []uint64, cond int)
-TEXT ·p256NegCond(SB),NOSPLIT,$0
- MOVQ val+0(FP), res_ptr
- MOVQ cond+24(FP), t0
- // acc = poly
- MOVQ $-1, acc0
- MOVQ p256const0<>(SB), acc1
- MOVQ $0, acc2
- MOVQ p256const1<>(SB), acc3
- // Load the original value
- MOVQ (8*0)(res_ptr), acc5
- MOVQ (8*1)(res_ptr), x_ptr
- MOVQ (8*2)(res_ptr), y_ptr
- MOVQ (8*3)(res_ptr), t1
- // Speculatively subtract
- SUBQ acc5, acc0
- SBBQ x_ptr, acc1
- SBBQ y_ptr, acc2
- SBBQ t1, acc3
- // If condition is 0, keep original value
- TESTQ t0, t0
- CMOVQEQ acc5, acc0
- CMOVQEQ x_ptr, acc1
- CMOVQEQ y_ptr, acc2
- CMOVQEQ t1, acc3
- // Store result
- MOVQ acc0, (8*0)(res_ptr)
- MOVQ acc1, (8*1)(res_ptr)
- MOVQ acc2, (8*2)(res_ptr)
- MOVQ acc3, (8*3)(res_ptr)
-
- RET
-/* ---------------------------------------*/
-// func p256Sqr(res, in []uint64, n int)
-TEXT ·p256Sqr(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ in+24(FP), x_ptr
- MOVQ n+48(FP), BX
-
-sqrLoop:
-
- // y[1:] * y[0]
- MOVQ (8*0)(x_ptr), t0
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- MOVQ AX, acc1
- MOVQ DX, acc2
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, acc3
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, acc4
- // y[2:] * y[1]
- MOVQ (8*1)(x_ptr), t0
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, acc5
- // y[3] * y[2]
- MOVQ (8*2)(x_ptr), t0
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc5
- ADCQ $0, DX
- MOVQ DX, y_ptr
- XORQ t1, t1
- // *2
- ADDQ acc1, acc1
- ADCQ acc2, acc2
- ADCQ acc3, acc3
- ADCQ acc4, acc4
- ADCQ acc5, acc5
- ADCQ y_ptr, y_ptr
- ADCQ $0, t1
- // Missing products
- MOVQ (8*0)(x_ptr), AX
- MULQ AX
- MOVQ AX, acc0
- MOVQ DX, t0
-
- MOVQ (8*1)(x_ptr), AX
- MULQ AX
- ADDQ t0, acc1
- ADCQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t0
-
- MOVQ (8*2)(x_ptr), AX
- MULQ AX
- ADDQ t0, acc3
- ADCQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t0
-
- MOVQ (8*3)(x_ptr), AX
- MULQ AX
- ADDQ t0, acc5
- ADCQ AX, y_ptr
- ADCQ DX, t1
- MOVQ t1, x_ptr
- // First reduction step
- MOVQ acc0, AX
- MOVQ acc0, t1
- SHLQ $32, acc0
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc0, acc1
- ADCQ t1, acc2
- ADCQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, acc0
- // Second reduction step
- MOVQ acc1, AX
- MOVQ acc1, t1
- SHLQ $32, acc1
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc1, acc2
- ADCQ t1, acc3
- ADCQ AX, acc0
- ADCQ $0, DX
- MOVQ DX, acc1
- // Third reduction step
- MOVQ acc2, AX
- MOVQ acc2, t1
- SHLQ $32, acc2
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc2, acc3
- ADCQ t1, acc0
- ADCQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, acc2
- // Last reduction step
- XORQ t0, t0
- MOVQ acc3, AX
- MOVQ acc3, t1
- SHLQ $32, acc3
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc3, acc0
- ADCQ t1, acc1
- ADCQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, acc3
- // Add bits [511:256] of the sqr result
- ADCQ acc4, acc0
- ADCQ acc5, acc1
- ADCQ y_ptr, acc2
- ADCQ x_ptr, acc3
- ADCQ $0, t0
-
- MOVQ acc0, acc4
- MOVQ acc1, acc5
- MOVQ acc2, y_ptr
- MOVQ acc3, t1
- // Subtract p256
- SUBQ $-1, acc0
- SBBQ p256const0<>(SB) ,acc1
- SBBQ $0, acc2
- SBBQ p256const1<>(SB), acc3
- SBBQ $0, t0
-
- CMOVQCS acc4, acc0
- CMOVQCS acc5, acc1
- CMOVQCS y_ptr, acc2
- CMOVQCS t1, acc3
-
- MOVQ acc0, (8*0)(res_ptr)
- MOVQ acc1, (8*1)(res_ptr)
- MOVQ acc2, (8*2)(res_ptr)
- MOVQ acc3, (8*3)(res_ptr)
- MOVQ res_ptr, x_ptr
- DECQ BX
- JNE sqrLoop
-
- RET
-/* ---------------------------------------*/
-// func p256Mul(res, in1, in2 []uint64)
-TEXT ·p256Mul(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ in1+24(FP), x_ptr
- MOVQ in2+48(FP), y_ptr
- // x * y[0]
- MOVQ (8*0)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- MOVQ AX, acc0
- MOVQ DX, acc1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, acc2
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, acc3
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, acc4
- XORQ acc5, acc5
- // First reduction step
- MOVQ acc0, AX
- MOVQ acc0, t1
- SHLQ $32, acc0
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc0, acc1
- ADCQ t1, acc2
- ADCQ AX, acc3
- ADCQ DX, acc4
- ADCQ $0, acc5
- XORQ acc0, acc0
- // x * y[1]
- MOVQ (8*1)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc2
- ADCQ $0, DX
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ DX, acc5
- ADCQ $0, acc0
- // Second reduction step
- MOVQ acc1, AX
- MOVQ acc1, t1
- SHLQ $32, acc1
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc1, acc2
- ADCQ t1, acc3
- ADCQ AX, acc4
- ADCQ DX, acc5
- ADCQ $0, acc0
- XORQ acc1, acc1
- // x * y[2]
- MOVQ (8*2)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc5
- ADCQ $0, DX
- ADDQ AX, acc5
- ADCQ DX, acc0
- ADCQ $0, acc1
- // Third reduction step
- MOVQ acc2, AX
- MOVQ acc2, t1
- SHLQ $32, acc2
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc2, acc3
- ADCQ t1, acc4
- ADCQ AX, acc5
- ADCQ DX, acc0
- ADCQ $0, acc1
- XORQ acc2, acc2
- // x * y[3]
- MOVQ (8*3)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc5
- ADCQ $0, DX
- ADDQ AX, acc5
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc0
- ADCQ $0, DX
- ADDQ AX, acc0
- ADCQ DX, acc1
- ADCQ $0, acc2
- // Last reduction step
- MOVQ acc3, AX
- MOVQ acc3, t1
- SHLQ $32, acc3
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc3, acc4
- ADCQ t1, acc5
- ADCQ AX, acc0
- ADCQ DX, acc1
- ADCQ $0, acc2
- // Copy result [255:0]
- MOVQ acc4, x_ptr
- MOVQ acc5, acc3
- MOVQ acc0, t0
- MOVQ acc1, t1
- // Subtract p256
- SUBQ $-1, acc4
- SBBQ p256const0<>(SB) ,acc5
- SBBQ $0, acc0
- SBBQ p256const1<>(SB), acc1
- SBBQ $0, acc2
-
- CMOVQCS x_ptr, acc4
- CMOVQCS acc3, acc5
- CMOVQCS t0, acc0
- CMOVQCS t1, acc1
-
- MOVQ acc4, (8*0)(res_ptr)
- MOVQ acc5, (8*1)(res_ptr)
- MOVQ acc0, (8*2)(res_ptr)
- MOVQ acc1, (8*3)(res_ptr)
-
- RET
-/* ---------------------------------------*/
-// func p256FromMont(res, in []uint64)
-TEXT ·p256FromMont(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ in+24(FP), x_ptr
-
- MOVQ (8*0)(x_ptr), acc0
- MOVQ (8*1)(x_ptr), acc1
- MOVQ (8*2)(x_ptr), acc2
- MOVQ (8*3)(x_ptr), acc3
- XORQ acc4, acc4
-
- // Only reduce, no multiplications are needed
- // First stage
- MOVQ acc0, AX
- MOVQ acc0, t1
- SHLQ $32, acc0
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc0, acc1
- ADCQ t1, acc2
- ADCQ AX, acc3
- ADCQ DX, acc4
- XORQ acc5, acc5
- // Second stage
- MOVQ acc1, AX
- MOVQ acc1, t1
- SHLQ $32, acc1
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc1, acc2
- ADCQ t1, acc3
- ADCQ AX, acc4
- ADCQ DX, acc5
- XORQ acc0, acc0
- // Third stage
- MOVQ acc2, AX
- MOVQ acc2, t1
- SHLQ $32, acc2
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc2, acc3
- ADCQ t1, acc4
- ADCQ AX, acc5
- ADCQ DX, acc0
- XORQ acc1, acc1
- // Last stage
- MOVQ acc3, AX
- MOVQ acc3, t1
- SHLQ $32, acc3
- MULQ p256const1<>(SB)
- SHRQ $32, t1
- ADDQ acc3, acc4
- ADCQ t1, acc5
- ADCQ AX, acc0
- ADCQ DX, acc1
-
- MOVQ acc4, x_ptr
- MOVQ acc5, acc3
- MOVQ acc0, t0
- MOVQ acc1, t1
-
- SUBQ $-1, acc4
- SBBQ p256const0<>(SB), acc5
- SBBQ $0, acc0
- SBBQ p256const1<>(SB), acc1
-
- CMOVQCS x_ptr, acc4
- CMOVQCS acc3, acc5
- CMOVQCS t0, acc0
- CMOVQCS t1, acc1
-
- MOVQ acc4, (8*0)(res_ptr)
- MOVQ acc5, (8*1)(res_ptr)
- MOVQ acc0, (8*2)(res_ptr)
- MOVQ acc1, (8*3)(res_ptr)
-
- RET
-/* ---------------------------------------*/
-// Constant time point access to arbitrary point table.
-// Indexed from 1 to 15, with -1 offset
-// (index 0 is implicitly point at infinity)
-// func p256Select(point, table []uint64, idx int)
-TEXT ·p256Select(SB),NOSPLIT,$0
- MOVQ idx+48(FP),AX
- MOVQ table+24(FP),DI
- MOVQ point+0(FP),DX
-
- PXOR X15, X15 // X15 = 0
- PCMPEQL X14, X14 // X14 = -1
- PSUBL X14, X15 // X15 = 1
- MOVL AX, X14
- PSHUFD $0, X14, X14
-
- PXOR X0, X0
- PXOR X1, X1
- PXOR X2, X2
- PXOR X3, X3
- PXOR X4, X4
- PXOR X5, X5
- MOVQ $16, AX
-
- MOVOU X15, X13
-
-loop_select:
-
- MOVOU X13, X12
- PADDL X15, X13
- PCMPEQL X14, X12
-
- MOVOU (16*0)(DI), X6
- MOVOU (16*1)(DI), X7
- MOVOU (16*2)(DI), X8
- MOVOU (16*3)(DI), X9
- MOVOU (16*4)(DI), X10
- MOVOU (16*5)(DI), X11
- ADDQ $(16*6), DI
-
- PAND X12, X6
- PAND X12, X7
- PAND X12, X8
- PAND X12, X9
- PAND X12, X10
- PAND X12, X11
-
- PXOR X6, X0
- PXOR X7, X1
- PXOR X8, X2
- PXOR X9, X3
- PXOR X10, X4
- PXOR X11, X5
-
- DECQ AX
- JNE loop_select
-
- MOVOU X0, (16*0)(DX)
- MOVOU X1, (16*1)(DX)
- MOVOU X2, (16*2)(DX)
- MOVOU X3, (16*3)(DX)
- MOVOU X4, (16*4)(DX)
- MOVOU X5, (16*5)(DX)
-
- RET
-/* ---------------------------------------*/
-// Constant time point access to base point table.
-// func p256SelectBase(point *[12]uint64, table string, idx int)
-TEXT ·p256SelectBase(SB),NOSPLIT,$0
- MOVQ idx+24(FP),AX
- MOVQ table+8(FP),DI
- MOVQ point+0(FP),DX
-
- PXOR X15, X15 // X15 = 0
- PCMPEQL X14, X14 // X14 = -1
- PSUBL X14, X15 // X15 = 1
- MOVL AX, X14
- PSHUFD $0, X14, X14
-
- PXOR X0, X0
- PXOR X1, X1
- PXOR X2, X2
- PXOR X3, X3
- MOVQ $16, AX
-
- MOVOU X15, X13
-
-loop_select_base:
-
- MOVOU X13, X12
- PADDL X15, X13
- PCMPEQL X14, X12
-
- MOVOU (16*0)(DI), X4
- MOVOU (16*1)(DI), X5
- MOVOU (16*2)(DI), X6
- MOVOU (16*3)(DI), X7
-
- MOVOU (16*4)(DI), X8
- MOVOU (16*5)(DI), X9
- MOVOU (16*6)(DI), X10
- MOVOU (16*7)(DI), X11
-
- ADDQ $(16*8), DI
-
- PAND X12, X4
- PAND X12, X5
- PAND X12, X6
- PAND X12, X7
-
- MOVOU X13, X12
- PADDL X15, X13
- PCMPEQL X14, X12
-
- PAND X12, X8
- PAND X12, X9
- PAND X12, X10
- PAND X12, X11
-
- PXOR X4, X0
- PXOR X5, X1
- PXOR X6, X2
- PXOR X7, X3
-
- PXOR X8, X0
- PXOR X9, X1
- PXOR X10, X2
- PXOR X11, X3
-
- DECQ AX
- JNE loop_select_base
-
- MOVOU X0, (16*0)(DX)
- MOVOU X1, (16*1)(DX)
- MOVOU X2, (16*2)(DX)
- MOVOU X3, (16*3)(DX)
-
- RET
-/* ---------------------------------------*/
-// func p256OrdMul(res, in1, in2 []uint64)
-TEXT ·p256OrdMul(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ in1+24(FP), x_ptr
- MOVQ in2+48(FP), y_ptr
- // x * y[0]
- MOVQ (8*0)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- MOVQ AX, acc0
- MOVQ DX, acc1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, acc2
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, acc3
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, acc4
- XORQ acc5, acc5
- // First reduction step
- MOVQ acc0, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc0
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc1
- ADCQ $0, DX
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x10(SB), AX
- MULQ t0
- ADDQ t1, acc2
- ADCQ $0, DX
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x18(SB), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ DX, acc4
- ADCQ $0, acc5
- // x * y[1]
- MOVQ (8*1)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc2
- ADCQ $0, DX
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ DX, acc5
- ADCQ $0, acc0
- // Second reduction step
- MOVQ acc1, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc2
- ADCQ $0, DX
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x10(SB), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x18(SB), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ DX, acc5
- ADCQ $0, acc0
- // x * y[2]
- MOVQ (8*2)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc5
- ADCQ $0, DX
- ADDQ AX, acc5
- ADCQ DX, acc0
- ADCQ $0, acc1
- // Third reduction step
- MOVQ acc2, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x10(SB), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x18(SB), AX
- MULQ t0
- ADDQ t1, acc5
- ADCQ $0, DX
- ADDQ AX, acc5
- ADCQ DX, acc0
- ADCQ $0, acc1
- // x * y[3]
- MOVQ (8*3)(y_ptr), t0
-
- MOVQ (8*0)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc5
- ADCQ $0, DX
- ADDQ AX, acc5
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc0
- ADCQ $0, DX
- ADDQ AX, acc0
- ADCQ DX, acc1
- ADCQ $0, acc2
- // Last reduction step
- MOVQ acc3, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x10(SB), AX
- MULQ t0
- ADDQ t1, acc5
- ADCQ $0, DX
- ADDQ AX, acc5
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x18(SB), AX
- MULQ t0
- ADDQ t1, acc0
- ADCQ $0, DX
- ADDQ AX, acc0
- ADCQ DX, acc1
- ADCQ $0, acc2
- // Copy result [255:0]
- MOVQ acc4, x_ptr
- MOVQ acc5, acc3
- MOVQ acc0, t0
- MOVQ acc1, t1
- // Subtract p256
- SUBQ p256ord<>+0x00(SB), acc4
- SBBQ p256ord<>+0x08(SB) ,acc5
- SBBQ p256ord<>+0x10(SB), acc0
- SBBQ p256ord<>+0x18(SB), acc1
- SBBQ $0, acc2
-
- CMOVQCS x_ptr, acc4
- CMOVQCS acc3, acc5
- CMOVQCS t0, acc0
- CMOVQCS t1, acc1
-
- MOVQ acc4, (8*0)(res_ptr)
- MOVQ acc5, (8*1)(res_ptr)
- MOVQ acc0, (8*2)(res_ptr)
- MOVQ acc1, (8*3)(res_ptr)
-
- RET
-/* ---------------------------------------*/
-// func p256OrdSqr(res, in []uint64, n int)
-TEXT ·p256OrdSqr(SB),NOSPLIT,$0
- MOVQ res+0(FP), res_ptr
- MOVQ in+24(FP), x_ptr
- MOVQ n+48(FP), BX
-
-ordSqrLoop:
-
- // y[1:] * y[0]
- MOVQ (8*0)(x_ptr), t0
-
- MOVQ (8*1)(x_ptr), AX
- MULQ t0
- MOVQ AX, acc1
- MOVQ DX, acc2
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, acc3
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, acc4
- // y[2:] * y[1]
- MOVQ (8*1)(x_ptr), t0
-
- MOVQ (8*2)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ t1, acc4
- ADCQ $0, DX
- ADDQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, acc5
- // y[3] * y[2]
- MOVQ (8*2)(x_ptr), t0
-
- MOVQ (8*3)(x_ptr), AX
- MULQ t0
- ADDQ AX, acc5
- ADCQ $0, DX
- MOVQ DX, y_ptr
- XORQ t1, t1
- // *2
- ADDQ acc1, acc1
- ADCQ acc2, acc2
- ADCQ acc3, acc3
- ADCQ acc4, acc4
- ADCQ acc5, acc5
- ADCQ y_ptr, y_ptr
- ADCQ $0, t1
- // Missing products
- MOVQ (8*0)(x_ptr), AX
- MULQ AX
- MOVQ AX, acc0
- MOVQ DX, t0
-
- MOVQ (8*1)(x_ptr), AX
- MULQ AX
- ADDQ t0, acc1
- ADCQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t0
-
- MOVQ (8*2)(x_ptr), AX
- MULQ AX
- ADDQ t0, acc3
- ADCQ AX, acc4
- ADCQ $0, DX
- MOVQ DX, t0
-
- MOVQ (8*3)(x_ptr), AX
- MULQ AX
- ADDQ t0, acc5
- ADCQ AX, y_ptr
- ADCQ DX, t1
- MOVQ t1, x_ptr
- // First reduction step
- MOVQ acc0, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc0
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc1
- ADCQ $0, DX
- ADDQ AX, acc1
-
- MOVQ t0, t1
- ADCQ DX, acc2
- ADCQ $0, t1
- SUBQ t0, acc2
- SBBQ $0, t1
-
- MOVQ t0, AX
- MOVQ t0, DX
- MOVQ t0, acc0
- SHLQ $32, AX
- SHRQ $32, DX
-
- ADDQ t1, acc3
- ADCQ $0, acc0
- SUBQ AX, acc3
- SBBQ DX, acc0
- // Second reduction step
- MOVQ acc1, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc1
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc2
- ADCQ $0, DX
- ADDQ AX, acc2
-
- MOVQ t0, t1
- ADCQ DX, acc3
- ADCQ $0, t1
- SUBQ t0, acc3
- SBBQ $0, t1
-
- MOVQ t0, AX
- MOVQ t0, DX
- MOVQ t0, acc1
- SHLQ $32, AX
- SHRQ $32, DX
-
- ADDQ t1, acc0
- ADCQ $0, acc1
- SUBQ AX, acc0
- SBBQ DX, acc1
- // Third reduction step
- MOVQ acc2, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc2
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc3
- ADCQ $0, DX
- ADDQ AX, acc3
-
- MOVQ t0, t1
- ADCQ DX, acc0
- ADCQ $0, t1
- SUBQ t0, acc0
- SBBQ $0, t1
-
- MOVQ t0, AX
- MOVQ t0, DX
- MOVQ t0, acc2
- SHLQ $32, AX
- SHRQ $32, DX
-
- ADDQ t1, acc1
- ADCQ $0, acc2
- SUBQ AX, acc1
- SBBQ DX, acc2
- // Last reduction step
- MOVQ acc3, AX
- MULQ p256ordK0<>(SB)
- MOVQ AX, t0
-
- MOVQ p256ord<>+0x00(SB), AX
- MULQ t0
- ADDQ AX, acc3
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ p256ord<>+0x08(SB), AX
- MULQ t0
- ADDQ t1, acc0
- ADCQ $0, DX
- ADDQ AX, acc0
- ADCQ $0, DX
- MOVQ DX, t1
-
- MOVQ t0, t1
- ADCQ DX, acc1
- ADCQ $0, t1
- SUBQ t0, acc1
- SBBQ $0, t1
-
- MOVQ t0, AX
- MOVQ t0, DX
- MOVQ t0, acc3
- SHLQ $32, AX
- SHRQ $32, DX
-
- ADDQ t1, acc2
- ADCQ $0, acc3
- SUBQ AX, acc2
- SBBQ DX, acc3
- XORQ t0, t0
- // Add bits [511:256] of the sqr result
- ADCQ acc4, acc0
- ADCQ acc5, acc1
- ADCQ y_ptr, acc2
- ADCQ x_ptr, acc3
- ADCQ $0, t0
-
- MOVQ acc0, acc4
- MOVQ acc1, acc5
- MOVQ acc2, y_ptr
- MOVQ acc3, t1
- // Subtract p256
- SUBQ p256ord<>+0x00(SB), acc0
- SBBQ p256ord<>+0x08(SB) ,acc1
- SBBQ p256ord<>+0x10(SB), acc2
- SBBQ p256ord<>+0x18(SB), acc3
- SBBQ $0, t0
-
- CMOVQCS acc4, acc0
- CMOVQCS acc5, acc1
- CMOVQCS y_ptr, acc2
- CMOVQCS t1, acc3
-
- MOVQ acc0, (8*0)(res_ptr)
- MOVQ acc1, (8*1)(res_ptr)
- MOVQ acc2, (8*2)(res_ptr)
- MOVQ acc3, (8*3)(res_ptr)
- MOVQ res_ptr, x_ptr
- DECQ BX
- JNE ordSqrLoop
-
- RET
-/* ---------------------------------------*/
-#undef res_ptr
-#undef x_ptr
-#undef y_ptr
-
-#undef acc0
-#undef acc1
-#undef acc2
-#undef acc3
-#undef acc4
-#undef acc5
-#undef t0
-#undef t1
-/* ---------------------------------------*/
-#define mul0 AX
-#define mul1 DX
-#define acc0 BX
-#define acc1 CX
-#define acc2 R8
-#define acc3 R9
-#define acc4 R10
-#define acc5 R11
-#define acc6 R12
-#define acc7 R13
-#define t0 R14
-#define t1 R15
-#define t2 DI
-#define t3 SI
-#define hlp BP
-/* ---------------------------------------*/
-TEXT p256SubInternal(SB),NOSPLIT,$0
- XORQ mul0, mul0
- SUBQ t0, acc4
- SBBQ t1, acc5
- SBBQ t2, acc6
- SBBQ t3, acc7
- SBBQ $0, mul0
-
- MOVQ acc4, acc0
- MOVQ acc5, acc1
- MOVQ acc6, acc2
- MOVQ acc7, acc3
-
- ADDQ $-1, acc4
- ADCQ p256const0<>(SB), acc5
- ADCQ $0, acc6
- ADCQ p256const1<>(SB), acc7
- ANDQ $1, mul0
-
- CMOVQEQ acc0, acc4
- CMOVQEQ acc1, acc5
- CMOVQEQ acc2, acc6
- CMOVQEQ acc3, acc7
-
- RET
-/* ---------------------------------------*/
-TEXT p256MulInternal(SB),NOSPLIT,$8
- MOVQ acc4, mul0
- MULQ t0
- MOVQ mul0, acc0
- MOVQ mul1, acc1
-
- MOVQ acc4, mul0
- MULQ t1
- ADDQ mul0, acc1
- ADCQ $0, mul1
- MOVQ mul1, acc2
-
- MOVQ acc4, mul0
- MULQ t2
- ADDQ mul0, acc2
- ADCQ $0, mul1
- MOVQ mul1, acc3
-
- MOVQ acc4, mul0
- MULQ t3
- ADDQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, acc4
-
- MOVQ acc5, mul0
- MULQ t0
- ADDQ mul0, acc1
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc5, mul0
- MULQ t1
- ADDQ hlp, acc2
- ADCQ $0, mul1
- ADDQ mul0, acc2
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc5, mul0
- MULQ t2
- ADDQ hlp, acc3
- ADCQ $0, mul1
- ADDQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc5, mul0
- MULQ t3
- ADDQ hlp, acc4
- ADCQ $0, mul1
- ADDQ mul0, acc4
- ADCQ $0, mul1
- MOVQ mul1, acc5
-
- MOVQ acc6, mul0
- MULQ t0
- ADDQ mul0, acc2
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc6, mul0
- MULQ t1
- ADDQ hlp, acc3
- ADCQ $0, mul1
- ADDQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc6, mul0
- MULQ t2
- ADDQ hlp, acc4
- ADCQ $0, mul1
- ADDQ mul0, acc4
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc6, mul0
- MULQ t3
- ADDQ hlp, acc5
- ADCQ $0, mul1
- ADDQ mul0, acc5
- ADCQ $0, mul1
- MOVQ mul1, acc6
-
- MOVQ acc7, mul0
- MULQ t0
- ADDQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc7, mul0
- MULQ t1
- ADDQ hlp, acc4
- ADCQ $0, mul1
- ADDQ mul0, acc4
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc7, mul0
- MULQ t2
- ADDQ hlp, acc5
- ADCQ $0, mul1
- ADDQ mul0, acc5
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc7, mul0
- MULQ t3
- ADDQ hlp, acc6
- ADCQ $0, mul1
- ADDQ mul0, acc6
- ADCQ $0, mul1
- MOVQ mul1, acc7
- // First reduction step
- MOVQ acc0, mul0
- MOVQ acc0, hlp
- SHLQ $32, acc0
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc0, acc1
- ADCQ hlp, acc2
- ADCQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, acc0
- // Second reduction step
- MOVQ acc1, mul0
- MOVQ acc1, hlp
- SHLQ $32, acc1
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc1, acc2
- ADCQ hlp, acc3
- ADCQ mul0, acc0
- ADCQ $0, mul1
- MOVQ mul1, acc1
- // Third reduction step
- MOVQ acc2, mul0
- MOVQ acc2, hlp
- SHLQ $32, acc2
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc2, acc3
- ADCQ hlp, acc0
- ADCQ mul0, acc1
- ADCQ $0, mul1
- MOVQ mul1, acc2
- // Last reduction step
- MOVQ acc3, mul0
- MOVQ acc3, hlp
- SHLQ $32, acc3
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc3, acc0
- ADCQ hlp, acc1
- ADCQ mul0, acc2
- ADCQ $0, mul1
- MOVQ mul1, acc3
- MOVQ $0, BP
- // Add bits [511:256] of the result
- ADCQ acc0, acc4
- ADCQ acc1, acc5
- ADCQ acc2, acc6
- ADCQ acc3, acc7
- ADCQ $0, hlp
- // Copy result
- MOVQ acc4, acc0
- MOVQ acc5, acc1
- MOVQ acc6, acc2
- MOVQ acc7, acc3
- // Subtract p256
- SUBQ $-1, acc4
- SBBQ p256const0<>(SB) ,acc5
- SBBQ $0, acc6
- SBBQ p256const1<>(SB), acc7
- SBBQ $0, hlp
- // If the result of the subtraction is negative, restore the previous result
- CMOVQCS acc0, acc4
- CMOVQCS acc1, acc5
- CMOVQCS acc2, acc6
- CMOVQCS acc3, acc7
-
- RET
-/* ---------------------------------------*/
-TEXT p256SqrInternal(SB),NOSPLIT,$8
-
- MOVQ acc4, mul0
- MULQ acc5
- MOVQ mul0, acc1
- MOVQ mul1, acc2
-
- MOVQ acc4, mul0
- MULQ acc6
- ADDQ mul0, acc2
- ADCQ $0, mul1
- MOVQ mul1, acc3
-
- MOVQ acc4, mul0
- MULQ acc7
- ADDQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, t0
-
- MOVQ acc5, mul0
- MULQ acc6
- ADDQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, hlp
-
- MOVQ acc5, mul0
- MULQ acc7
- ADDQ hlp, t0
- ADCQ $0, mul1
- ADDQ mul0, t0
- ADCQ $0, mul1
- MOVQ mul1, t1
-
- MOVQ acc6, mul0
- MULQ acc7
- ADDQ mul0, t1
- ADCQ $0, mul1
- MOVQ mul1, t2
- XORQ t3, t3
- // *2
- ADDQ acc1, acc1
- ADCQ acc2, acc2
- ADCQ acc3, acc3
- ADCQ t0, t0
- ADCQ t1, t1
- ADCQ t2, t2
- ADCQ $0, t3
- // Missing products
- MOVQ acc4, mul0
- MULQ mul0
- MOVQ mul0, acc0
- MOVQ DX, acc4
-
- MOVQ acc5, mul0
- MULQ mul0
- ADDQ acc4, acc1
- ADCQ mul0, acc2
- ADCQ $0, DX
- MOVQ DX, acc4
-
- MOVQ acc6, mul0
- MULQ mul0
- ADDQ acc4, acc3
- ADCQ mul0, t0
- ADCQ $0, DX
- MOVQ DX, acc4
-
- MOVQ acc7, mul0
- MULQ mul0
- ADDQ acc4, t1
- ADCQ mul0, t2
- ADCQ DX, t3
- // First reduction step
- MOVQ acc0, mul0
- MOVQ acc0, hlp
- SHLQ $32, acc0
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc0, acc1
- ADCQ hlp, acc2
- ADCQ mul0, acc3
- ADCQ $0, mul1
- MOVQ mul1, acc0
- // Second reduction step
- MOVQ acc1, mul0
- MOVQ acc1, hlp
- SHLQ $32, acc1
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc1, acc2
- ADCQ hlp, acc3
- ADCQ mul0, acc0
- ADCQ $0, mul1
- MOVQ mul1, acc1
- // Third reduction step
- MOVQ acc2, mul0
- MOVQ acc2, hlp
- SHLQ $32, acc2
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc2, acc3
- ADCQ hlp, acc0
- ADCQ mul0, acc1
- ADCQ $0, mul1
- MOVQ mul1, acc2
- // Last reduction step
- MOVQ acc3, mul0
- MOVQ acc3, hlp
- SHLQ $32, acc3
- MULQ p256const1<>(SB)
- SHRQ $32, hlp
- ADDQ acc3, acc0
- ADCQ hlp, acc1
- ADCQ mul0, acc2
- ADCQ $0, mul1
- MOVQ mul1, acc3
- MOVQ $0, BP
- // Add bits [511:256] of the result
- ADCQ acc0, t0
- ADCQ acc1, t1
- ADCQ acc2, t2
- ADCQ acc3, t3
- ADCQ $0, hlp
- // Copy result
- MOVQ t0, acc4
- MOVQ t1, acc5
- MOVQ t2, acc6
- MOVQ t3, acc7
- // Subtract p256
- SUBQ $-1, acc4
- SBBQ p256const0<>(SB) ,acc5
- SBBQ $0, acc6
- SBBQ p256const1<>(SB), acc7
- SBBQ $0, hlp
- // If the result of the subtraction is negative, restore the previous result
- CMOVQCS t0, acc4
- CMOVQCS t1, acc5
- CMOVQCS t2, acc6
- CMOVQCS t3, acc7
-
- RET
-/* ---------------------------------------*/
-#define p256MulBy2Inline\
- XORQ mul0, mul0;\
- ADDQ acc4, acc4;\
- ADCQ acc5, acc5;\
- ADCQ acc6, acc6;\
- ADCQ acc7, acc7;\
- ADCQ $0, mul0;\
- MOVQ acc4, t0;\
- MOVQ acc5, t1;\
- MOVQ acc6, t2;\
- MOVQ acc7, t3;\
- SUBQ $-1, t0;\
- SBBQ p256const0<>(SB), t1;\
- SBBQ $0, t2;\
- SBBQ p256const1<>(SB), t3;\
- SBBQ $0, mul0;\
- CMOVQCS acc4, t0;\
- CMOVQCS acc5, t1;\
- CMOVQCS acc6, t2;\
- CMOVQCS acc7, t3;
-/* ---------------------------------------*/
-#define p256AddInline \
- XORQ mul0, mul0;\
- ADDQ t0, acc4;\
- ADCQ t1, acc5;\
- ADCQ t2, acc6;\
- ADCQ t3, acc7;\
- ADCQ $0, mul0;\
- MOVQ acc4, t0;\
- MOVQ acc5, t1;\
- MOVQ acc6, t2;\
- MOVQ acc7, t3;\
- SUBQ $-1, t0;\
- SBBQ p256const0<>(SB), t1;\
- SBBQ $0, t2;\
- SBBQ p256const1<>(SB), t3;\
- SBBQ $0, mul0;\
- CMOVQCS acc4, t0;\
- CMOVQCS acc5, t1;\
- CMOVQCS acc6, t2;\
- CMOVQCS acc7, t3;
-/* ---------------------------------------*/
-#define LDacc(src) MOVQ src(8*0), acc4; MOVQ src(8*1), acc5; MOVQ src(8*2), acc6; MOVQ src(8*3), acc7
-#define LDt(src) MOVQ src(8*0), t0; MOVQ src(8*1), t1; MOVQ src(8*2), t2; MOVQ src(8*3), t3
-#define ST(dst) MOVQ acc4, dst(8*0); MOVQ acc5, dst(8*1); MOVQ acc6, dst(8*2); MOVQ acc7, dst(8*3)
-#define STt(dst) MOVQ t0, dst(8*0); MOVQ t1, dst(8*1); MOVQ t2, dst(8*2); MOVQ t3, dst(8*3)
-#define acc2t MOVQ acc4, t0; MOVQ acc5, t1; MOVQ acc6, t2; MOVQ acc7, t3
-#define t2acc MOVQ t0, acc4; MOVQ t1, acc5; MOVQ t2, acc6; MOVQ t3, acc7
-/* ---------------------------------------*/
-#define x1in(off) (32*0 + off)(SP)
-#define y1in(off) (32*1 + off)(SP)
-#define z1in(off) (32*2 + off)(SP)
-#define x2in(off) (32*3 + off)(SP)
-#define y2in(off) (32*4 + off)(SP)
-#define xout(off) (32*5 + off)(SP)
-#define yout(off) (32*6 + off)(SP)
-#define zout(off) (32*7 + off)(SP)
-#define s2(off) (32*8 + off)(SP)
-#define z1sqr(off) (32*9 + off)(SP)
-#define h(off) (32*10 + off)(SP)
-#define r(off) (32*11 + off)(SP)
-#define hsqr(off) (32*12 + off)(SP)
-#define rsqr(off) (32*13 + off)(SP)
-#define hcub(off) (32*14 + off)(SP)
-#define rptr (32*15)(SP)
-#define sel_save (32*15 + 8)(SP)
-#define zero_save (32*15 + 8 + 4)(SP)
-
-// func p256PointAddAffineAsm(res, in1, in2 []uint64, sign, sel, zero int)
-TEXT ·p256PointAddAffineAsm(SB),0,$512-96
- // Move input to stack in order to free registers
- MOVQ res+0(FP), AX
- MOVQ in1+24(FP), BX
- MOVQ in2+48(FP), CX
- MOVQ sign+72(FP), DX
- MOVQ sel+80(FP), t1
- MOVQ zero+88(FP), t2
-
- MOVOU (16*0)(BX), X0
- MOVOU (16*1)(BX), X1
- MOVOU (16*2)(BX), X2
- MOVOU (16*3)(BX), X3
- MOVOU (16*4)(BX), X4
- MOVOU (16*5)(BX), X5
-
- MOVOU X0, x1in(16*0)
- MOVOU X1, x1in(16*1)
- MOVOU X2, y1in(16*0)
- MOVOU X3, y1in(16*1)
- MOVOU X4, z1in(16*0)
- MOVOU X5, z1in(16*1)
-
- MOVOU (16*0)(CX), X0
- MOVOU (16*1)(CX), X1
-
- MOVOU X0, x2in(16*0)
- MOVOU X1, x2in(16*1)
- // Store pointer to result
- MOVQ mul0, rptr
- MOVL t1, sel_save
- MOVL t2, zero_save
- // Negate y2in based on sign
- MOVQ (16*2 + 8*0)(CX), acc4
- MOVQ (16*2 + 8*1)(CX), acc5
- MOVQ (16*2 + 8*2)(CX), acc6
- MOVQ (16*2 + 8*3)(CX), acc7
- MOVQ $-1, acc0
- MOVQ p256const0<>(SB), acc1
- MOVQ $0, acc2
- MOVQ p256const1<>(SB), acc3
- XORQ mul0, mul0
- // Speculatively subtract
- SUBQ acc4, acc0
- SBBQ acc5, acc1
- SBBQ acc6, acc2
- SBBQ acc7, acc3
- SBBQ $0, mul0
- MOVQ acc0, t0
- MOVQ acc1, t1
- MOVQ acc2, t2
- MOVQ acc3, t3
- // Add in case the operand was > p256
- ADDQ $-1, acc0
- ADCQ p256const0<>(SB), acc1
- ADCQ $0, acc2
- ADCQ p256const1<>(SB), acc3
- ADCQ $0, mul0
- CMOVQNE t0, acc0
- CMOVQNE t1, acc1
- CMOVQNE t2, acc2
- CMOVQNE t3, acc3
- // If condition is 0, keep original value
- TESTQ DX, DX
- CMOVQEQ acc4, acc0
- CMOVQEQ acc5, acc1
- CMOVQEQ acc6, acc2
- CMOVQEQ acc7, acc3
- // Store result
- MOVQ acc0, y2in(8*0)
- MOVQ acc1, y2in(8*1)
- MOVQ acc2, y2in(8*2)
- MOVQ acc3, y2in(8*3)
- // Begin point add
- LDacc (z1in)
- CALL p256SqrInternal(SB) // z1ˆ2
- ST (z1sqr)
-
- LDt (x2in)
- CALL p256MulInternal(SB) // x2 * z1ˆ2
-
- LDt (x1in)
- CALL p256SubInternal(SB) // h = u2 - u1
- ST (h)
-
- LDt (z1in)
- CALL p256MulInternal(SB) // z3 = h * z1
- ST (zout)
-
- LDacc (z1sqr)
- CALL p256MulInternal(SB) // z1ˆ3
-
- LDt (y2in)
- CALL p256MulInternal(SB) // s2 = y2 * z1ˆ3
- ST (s2)
-
- LDt (y1in)
- CALL p256SubInternal(SB) // r = s2 - s1
- ST (r)
-
- CALL p256SqrInternal(SB) // rsqr = rˆ2
- ST (rsqr)
-
- LDacc (h)
- CALL p256SqrInternal(SB) // hsqr = hˆ2
- ST (hsqr)
-
- LDt (h)
- CALL p256MulInternal(SB) // hcub = hˆ3
- ST (hcub)
-
- LDt (y1in)
- CALL p256MulInternal(SB) // y1 * hˆ3
- ST (s2)
-
- LDacc (x1in)
- LDt (hsqr)
- CALL p256MulInternal(SB) // u1 * hˆ2
- ST (h)
-
- p256MulBy2Inline // u1 * hˆ2 * 2, inline
- LDacc (rsqr)
- CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
-
- LDt (hcub)
- CALL p256SubInternal(SB)
- ST (xout)
-
- MOVQ acc4, t0
- MOVQ acc5, t1
- MOVQ acc6, t2
- MOVQ acc7, t3
- LDacc (h)
- CALL p256SubInternal(SB)
-
- LDt (r)
- CALL p256MulInternal(SB)
-
- LDt (s2)
- CALL p256SubInternal(SB)
- ST (yout)
- // Load stored values from stack
- MOVQ rptr, AX
- MOVL sel_save, BX
- MOVL zero_save, CX
- // The result is not valid if (sel == 0), conditional choose
- MOVOU xout(16*0), X0
- MOVOU xout(16*1), X1
- MOVOU yout(16*0), X2
- MOVOU yout(16*1), X3
- MOVOU zout(16*0), X4
- MOVOU zout(16*1), X5
-
- MOVL BX, X6
- MOVL CX, X7
-
- PXOR X8, X8
- PCMPEQL X9, X9
-
- PSHUFD $0, X6, X6
- PSHUFD $0, X7, X7
-
- PCMPEQL X8, X6
- PCMPEQL X8, X7
-
- MOVOU X6, X15
- PANDN X9, X15
-
- MOVOU x1in(16*0), X9
- MOVOU x1in(16*1), X10
- MOVOU y1in(16*0), X11
- MOVOU y1in(16*1), X12
- MOVOU z1in(16*0), X13
- MOVOU z1in(16*1), X14
-
- PAND X15, X0
- PAND X15, X1
- PAND X15, X2
- PAND X15, X3
- PAND X15, X4
- PAND X15, X5
-
- PAND X6, X9
- PAND X6, X10
- PAND X6, X11
- PAND X6, X12
- PAND X6, X13
- PAND X6, X14
-
- PXOR X9, X0
- PXOR X10, X1
- PXOR X11, X2
- PXOR X12, X3
- PXOR X13, X4
- PXOR X14, X5
- // Similarly if zero == 0
- PCMPEQL X9, X9
- MOVOU X7, X15
- PANDN X9, X15
-
- MOVOU x2in(16*0), X9
- MOVOU x2in(16*1), X10
- MOVOU y2in(16*0), X11
- MOVOU y2in(16*1), X12
- MOVOU p256one<>+0x00(SB), X13
- MOVOU p256one<>+0x10(SB), X14
-
- PAND X15, X0
- PAND X15, X1
- PAND X15, X2
- PAND X15, X3
- PAND X15, X4
- PAND X15, X5
-
- PAND X7, X9
- PAND X7, X10
- PAND X7, X11
- PAND X7, X12
- PAND X7, X13
- PAND X7, X14
-
- PXOR X9, X0
- PXOR X10, X1
- PXOR X11, X2
- PXOR X12, X3
- PXOR X13, X4
- PXOR X14, X5
- // Finally output the result
- MOVOU X0, (16*0)(AX)
- MOVOU X1, (16*1)(AX)
- MOVOU X2, (16*2)(AX)
- MOVOU X3, (16*3)(AX)
- MOVOU X4, (16*4)(AX)
- MOVOU X5, (16*5)(AX)
- MOVQ $0, rptr
-
- RET
-#undef x1in
-#undef y1in
-#undef z1in
-#undef x2in
-#undef y2in
-#undef xout
-#undef yout
-#undef zout
-#undef s2
-#undef z1sqr
-#undef h
-#undef r
-#undef hsqr
-#undef rsqr
-#undef hcub
-#undef rptr
-#undef sel_save
-#undef zero_save
-
-// p256IsZero returns 1 in AX if [acc4..acc7] represents zero and zero
-// otherwise. It writes to [acc4..acc7], t0 and t1.
-TEXT p256IsZero(SB),NOSPLIT,$0
- // AX contains a flag that is set if the input is zero.
- XORQ AX, AX
- MOVQ $1, t1
-
- // Check whether [acc4..acc7] are all zero.
- MOVQ acc4, t0
- ORQ acc5, t0
- ORQ acc6, t0
- ORQ acc7, t0
-
- // Set the zero flag if so. (CMOV of a constant to a register doesn't
- // appear to be supported in Go. Thus t1 = 1.)
- CMOVQEQ t1, AX
-
- // XOR [acc4..acc7] with P and compare with zero again.
- XORQ $-1, acc4
- XORQ p256const0<>(SB), acc5
- XORQ p256const1<>(SB), acc7
- ORQ acc5, acc4
- ORQ acc6, acc4
- ORQ acc7, acc4
-
- // Set the zero flag if so.
- CMOVQEQ t1, AX
- RET
-
-/* ---------------------------------------*/
-#define x1in(off) (32*0 + off)(SP)
-#define y1in(off) (32*1 + off)(SP)
-#define z1in(off) (32*2 + off)(SP)
-#define x2in(off) (32*3 + off)(SP)
-#define y2in(off) (32*4 + off)(SP)
-#define z2in(off) (32*5 + off)(SP)
-
-#define xout(off) (32*6 + off)(SP)
-#define yout(off) (32*7 + off)(SP)
-#define zout(off) (32*8 + off)(SP)
-
-#define u1(off) (32*9 + off)(SP)
-#define u2(off) (32*10 + off)(SP)
-#define s1(off) (32*11 + off)(SP)
-#define s2(off) (32*12 + off)(SP)
-#define z1sqr(off) (32*13 + off)(SP)
-#define z2sqr(off) (32*14 + off)(SP)
-#define h(off) (32*15 + off)(SP)
-#define r(off) (32*16 + off)(SP)
-#define hsqr(off) (32*17 + off)(SP)
-#define rsqr(off) (32*18 + off)(SP)
-#define hcub(off) (32*19 + off)(SP)
-#define rptr (32*20)(SP)
-#define points_eq (32*20+8)(SP)
-
-//func p256PointAddAsm(res, in1, in2 []uint64) int
-TEXT ·p256PointAddAsm(SB),0,$680-80
- // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
- // Move input to stack in order to free registers
- MOVQ res+0(FP), AX
- MOVQ in1+24(FP), BX
- MOVQ in2+48(FP), CX
-
- MOVOU (16*0)(BX), X0
- MOVOU (16*1)(BX), X1
- MOVOU (16*2)(BX), X2
- MOVOU (16*3)(BX), X3
- MOVOU (16*4)(BX), X4
- MOVOU (16*5)(BX), X5
-
- MOVOU X0, x1in(16*0)
- MOVOU X1, x1in(16*1)
- MOVOU X2, y1in(16*0)
- MOVOU X3, y1in(16*1)
- MOVOU X4, z1in(16*0)
- MOVOU X5, z1in(16*1)
-
- MOVOU (16*0)(CX), X0
- MOVOU (16*1)(CX), X1
- MOVOU (16*2)(CX), X2
- MOVOU (16*3)(CX), X3
- MOVOU (16*4)(CX), X4
- MOVOU (16*5)(CX), X5
-
- MOVOU X0, x2in(16*0)
- MOVOU X1, x2in(16*1)
- MOVOU X2, y2in(16*0)
- MOVOU X3, y2in(16*1)
- MOVOU X4, z2in(16*0)
- MOVOU X5, z2in(16*1)
- // Store pointer to result
- MOVQ AX, rptr
- // Begin point add
- LDacc (z2in)
- CALL p256SqrInternal(SB) // z2ˆ2
- ST (z2sqr)
- LDt (z2in)
- CALL p256MulInternal(SB) // z2ˆ3
- LDt (y1in)
- CALL p256MulInternal(SB) // s1 = z2ˆ3*y1
- ST (s1)
-
- LDacc (z1in)
- CALL p256SqrInternal(SB) // z1ˆ2
- ST (z1sqr)
- LDt (z1in)
- CALL p256MulInternal(SB) // z1ˆ3
- LDt (y2in)
- CALL p256MulInternal(SB) // s2 = z1ˆ3*y2
- ST (s2)
-
- LDt (s1)
- CALL p256SubInternal(SB) // r = s2 - s1
- ST (r)
- CALL p256IsZero(SB)
- MOVQ AX, points_eq
-
- LDacc (z2sqr)
- LDt (x1in)
- CALL p256MulInternal(SB) // u1 = x1 * z2ˆ2
- ST (u1)
- LDacc (z1sqr)
- LDt (x2in)
- CALL p256MulInternal(SB) // u2 = x2 * z1ˆ2
- ST (u2)
-
- LDt (u1)
- CALL p256SubInternal(SB) // h = u2 - u1
- ST (h)
- CALL p256IsZero(SB)
- ANDQ points_eq, AX
- MOVQ AX, points_eq
-
- LDacc (r)
- CALL p256SqrInternal(SB) // rsqr = rˆ2
- ST (rsqr)
-
- LDacc (h)
- CALL p256SqrInternal(SB) // hsqr = hˆ2
- ST (hsqr)
-
- LDt (h)
- CALL p256MulInternal(SB) // hcub = hˆ3
- ST (hcub)
-
- LDt (s1)
- CALL p256MulInternal(SB)
- ST (s2)
-
- LDacc (z1in)
- LDt (z2in)
- CALL p256MulInternal(SB) // z1 * z2
- LDt (h)
- CALL p256MulInternal(SB) // z1 * z2 * h
- ST (zout)
-
- LDacc (hsqr)
- LDt (u1)
- CALL p256MulInternal(SB) // hˆ2 * u1
- ST (u2)
-
- p256MulBy2Inline // u1 * hˆ2 * 2, inline
- LDacc (rsqr)
- CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
-
- LDt (hcub)
- CALL p256SubInternal(SB)
- ST (xout)
-
- MOVQ acc4, t0
- MOVQ acc5, t1
- MOVQ acc6, t2
- MOVQ acc7, t3
- LDacc (u2)
- CALL p256SubInternal(SB)
-
- LDt (r)
- CALL p256MulInternal(SB)
-
- LDt (s2)
- CALL p256SubInternal(SB)
- ST (yout)
-
- MOVOU xout(16*0), X0
- MOVOU xout(16*1), X1
- MOVOU yout(16*0), X2
- MOVOU yout(16*1), X3
- MOVOU zout(16*0), X4
- MOVOU zout(16*1), X5
- // Finally output the result
- MOVQ rptr, AX
- MOVQ $0, rptr
- MOVOU X0, (16*0)(AX)
- MOVOU X1, (16*1)(AX)
- MOVOU X2, (16*2)(AX)
- MOVOU X3, (16*3)(AX)
- MOVOU X4, (16*4)(AX)
- MOVOU X5, (16*5)(AX)
-
- MOVQ points_eq, AX
- MOVQ AX, ret+72(FP)
-
- RET
-#undef x1in
-#undef y1in
-#undef z1in
-#undef x2in
-#undef y2in
-#undef z2in
-#undef xout
-#undef yout
-#undef zout
-#undef s1
-#undef s2
-#undef u1
-#undef u2
-#undef z1sqr
-#undef z2sqr
-#undef h
-#undef r
-#undef hsqr
-#undef rsqr
-#undef hcub
-#undef rptr
-/* ---------------------------------------*/
-#define x(off) (32*0 + off)(SP)
-#define y(off) (32*1 + off)(SP)
-#define z(off) (32*2 + off)(SP)
-
-#define s(off) (32*3 + off)(SP)
-#define m(off) (32*4 + off)(SP)
-#define zsqr(off) (32*5 + off)(SP)
-#define tmp(off) (32*6 + off)(SP)
-#define rptr (32*7)(SP)
-
-//func p256PointDoubleAsm(res, in []uint64)
-TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$256-48
- // Move input to stack in order to free registers
- MOVQ res+0(FP), AX
- MOVQ in+24(FP), BX
-
- MOVOU (16*0)(BX), X0
- MOVOU (16*1)(BX), X1
- MOVOU (16*2)(BX), X2
- MOVOU (16*3)(BX), X3
- MOVOU (16*4)(BX), X4
- MOVOU (16*5)(BX), X5
-
- MOVOU X0, x(16*0)
- MOVOU X1, x(16*1)
- MOVOU X2, y(16*0)
- MOVOU X3, y(16*1)
- MOVOU X4, z(16*0)
- MOVOU X5, z(16*1)
- // Store pointer to result
- MOVQ AX, rptr
- // Begin point double
- LDacc (z)
- CALL p256SqrInternal(SB)
- ST (zsqr)
-
- LDt (x)
- p256AddInline
- STt (m)
-
- LDacc (z)
- LDt (y)
- CALL p256MulInternal(SB)
- p256MulBy2Inline
- MOVQ rptr, AX
- // Store z
- MOVQ t0, (16*4 + 8*0)(AX)
- MOVQ t1, (16*4 + 8*1)(AX)
- MOVQ t2, (16*4 + 8*2)(AX)
- MOVQ t3, (16*4 + 8*3)(AX)
-
- LDacc (x)
- LDt (zsqr)
- CALL p256SubInternal(SB)
- LDt (m)
- CALL p256MulInternal(SB)
- ST (m)
- // Multiply by 3
- p256MulBy2Inline
- LDacc (m)
- p256AddInline
- STt (m)
- ////////////////////////
- LDacc (y)
- p256MulBy2Inline
- t2acc
- CALL p256SqrInternal(SB)
- ST (s)
- CALL p256SqrInternal(SB)
- // Divide by 2
- XORQ mul0, mul0
- MOVQ acc4, t0
- MOVQ acc5, t1
- MOVQ acc6, t2
- MOVQ acc7, t3
-
- ADDQ $-1, acc4
- ADCQ p256const0<>(SB), acc5
- ADCQ $0, acc6
- ADCQ p256const1<>(SB), acc7
- ADCQ $0, mul0
- TESTQ $1, t0
-
- CMOVQEQ t0, acc4
- CMOVQEQ t1, acc5
- CMOVQEQ t2, acc6
- CMOVQEQ t3, acc7
- ANDQ t0, mul0
-
- SHRQ $1, acc5, acc4
- SHRQ $1, acc6, acc5
- SHRQ $1, acc7, acc6
- SHRQ $1, mul0, acc7
- ST (y)
- /////////////////////////
- LDacc (x)
- LDt (s)
- CALL p256MulInternal(SB)
- ST (s)
- p256MulBy2Inline
- STt (tmp)
-
- LDacc (m)
- CALL p256SqrInternal(SB)
- LDt (tmp)
- CALL p256SubInternal(SB)
-
- MOVQ rptr, AX
- // Store x
- MOVQ acc4, (16*0 + 8*0)(AX)
- MOVQ acc5, (16*0 + 8*1)(AX)
- MOVQ acc6, (16*0 + 8*2)(AX)
- MOVQ acc7, (16*0 + 8*3)(AX)
-
- acc2t
- LDacc (s)
- CALL p256SubInternal(SB)
-
- LDt (m)
- CALL p256MulInternal(SB)
-
- LDt (y)
- CALL p256SubInternal(SB)
- MOVQ rptr, AX
- // Store y
- MOVQ acc4, (16*2 + 8*0)(AX)
- MOVQ acc5, (16*2 + 8*1)(AX)
- MOVQ acc6, (16*2 + 8*2)(AX)
- MOVQ acc7, (16*2 + 8*3)(AX)
- ///////////////////////
- MOVQ $0, rptr
-
- RET
-/* ---------------------------------------*/
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/p384.go b/contrib/go/_std_1.18/src/crypto/elliptic/p384.go
deleted file mode 100644
index 33a441d090..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/p384.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package elliptic
-
-import (
- "crypto/elliptic/internal/nistec"
- "crypto/rand"
- "math/big"
-)
-
-// p384Curve is a Curve implementation based on nistec.P384Point.
-//
-// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
-// legacy idiosyncrasies it requires, such as invalid and infinity point
-// handling.
-//
-// To interact with the nistec package, points are encoded into and decoded from
-// properly formatted byte slices. All big.Int use is limited to this package.
-// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
-// so the overhead is acceptable.
-type p384Curve struct {
- params *CurveParams
-}
-
-var p384 p384Curve
-var _ Curve = p384
-
-func initP384() {
- p384.params = &CurveParams{
- Name: "P-384",
- BitSize: 384,
- // FIPS 186-4, section D.1.2.4
- P: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
- "46667948293404245721771496870329047266088258938001861606973112319"),
- N: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
- "46667946905279627659399113263569398956308152294913554433653942643"),
- B: bigFromHex("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088" +
- "f5013875ac656398d8a2ed19d2a85c8edd3ec2aef"),
- Gx: bigFromHex("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741" +
- "e082542a385502f25dbf55296c3a545e3872760ab7"),
- Gy: bigFromHex("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da31" +
- "13b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f"),
- }
-}
-
-func (curve p384Curve) Params() *CurveParams {
- return curve.params
-}
-
-func (curve p384Curve) IsOnCurve(x, y *big.Int) bool {
- // IsOnCurve is documented to reject (0, 0), the conventional point at
- // infinity, which however is accepted by p384PointFromAffine.
- if x.Sign() == 0 && y.Sign() == 0 {
- return false
- }
- _, ok := p384PointFromAffine(x, y)
- return ok
-}
-
-func p384PointFromAffine(x, y *big.Int) (p *nistec.P384Point, ok bool) {
- // (0, 0) is by convention the point at infinity, which can't be represented
- // in affine coordinates. Marshal incorrectly encodes it as an uncompressed
- // point, which SetBytes would correctly reject. See Issue 37294.
- if x.Sign() == 0 && y.Sign() == 0 {
- return nistec.NewP384Point(), true
- }
- if x.Sign() < 0 || y.Sign() < 0 {
- return nil, false
- }
- if x.BitLen() > 384 || y.BitLen() > 384 {
- return nil, false
- }
- p, err := nistec.NewP384Point().SetBytes(Marshal(P384(), x, y))
- if err != nil {
- return nil, false
- }
- return p, true
-}
-
-func p384PointToAffine(p *nistec.P384Point) (x, y *big.Int) {
- out := p.Bytes()
- if len(out) == 1 && out[0] == 0 {
- // This is the correct encoding of the point at infinity, which
- // Unmarshal does not support. See Issue 37294.
- return new(big.Int), new(big.Int)
- }
- x, y = Unmarshal(P384(), out)
- if x == nil {
- panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
- }
- return x, y
-}
-
-// p384RandomPoint returns a random point on the curve. It's used when Add,
-// Double, or ScalarMult are fed a point not on the curve, which is undefined
-// behavior. Originally, we used to do the math on it anyway (which allows
-// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
-// happening in the first place. Now, we just can't construct a nistec.P384Point
-// for an invalid pair of coordinates, because that API is safer. If we panic,
-// we risk introducing a DoS. If we return nil, we risk a panic. If we return
-// the input, ecdsa.Verify might fail open. The safest course seems to be to
-// return a valid, random point, which hopefully won't help the attacker.
-func p384RandomPoint() (x, y *big.Int) {
- _, x, y, err := GenerateKey(P384(), rand.Reader)
- if err != nil {
- panic("crypto/elliptic: failed to generate random point")
- }
- return x, y
-}
-
-func (p384Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- p1, ok := p384PointFromAffine(x1, y1)
- if !ok {
- return p384RandomPoint()
- }
- p2, ok := p384PointFromAffine(x2, y2)
- if !ok {
- return p384RandomPoint()
- }
- return p384PointToAffine(p1.Add(p1, p2))
-}
-
-func (p384Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- p, ok := p384PointFromAffine(x1, y1)
- if !ok {
- return p384RandomPoint()
- }
- return p384PointToAffine(p.Double(p))
-}
-
-func (p384Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
- p, ok := p384PointFromAffine(Bx, By)
- if !ok {
- return p384RandomPoint()
- }
- return p384PointToAffine(p.ScalarMult(p, scalar))
-}
-
-func (p384Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
- p := nistec.NewP384Generator()
- return p384PointToAffine(p.ScalarMult(p, scalar))
-}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/p521.go b/contrib/go/_std_1.18/src/crypto/elliptic/p521.go
deleted file mode 100644
index 6a3ade3c36..0000000000
--- a/contrib/go/_std_1.18/src/crypto/elliptic/p521.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package elliptic
-
-import (
- "crypto/elliptic/internal/nistec"
- "crypto/rand"
- "math/big"
-)
-
-// p521Curve is a Curve implementation based on nistec.P521Point.
-//
-// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
-// legacy idiosyncrasies it requires, such as invalid and infinity point
-// handling.
-//
-// To interact with the nistec package, points are encoded into and decoded from
-// properly formatted byte slices. All big.Int use is limited to this package.
-// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
-// so the overhead is acceptable.
-type p521Curve struct {
- params *CurveParams
-}
-
-var p521 p521Curve
-var _ Curve = p521
-
-func initP521() {
- p521.params = &CurveParams{
- Name: "P-521",
- BitSize: 521,
- // FIPS 186-4, section D.1.2.5
- P: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
- "0540939446345918554318339765605212255964066145455497729631139148" +
- "0858037121987999716643812574028291115057151"),
- N: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
- "0540939446345918554318339765539424505774633321719753296399637136" +
- "3321113864768612440380340372808892707005449"),
- B: bigFromHex("0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8" +
- "b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef" +
- "451fd46b503f00"),
- Gx: bigFromHex("00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f8" +
- "28af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf9" +
- "7e7e31c2e5bd66"),
- Gy: bigFromHex("011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817" +
- "afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088" +
- "be94769fd16650"),
- }
-}
-
-func (curve p521Curve) Params() *CurveParams {
- return curve.params
-}
-
-func (curve p521Curve) IsOnCurve(x, y *big.Int) bool {
- // IsOnCurve is documented to reject (0, 0), the conventional point at
- // infinity, which however is accepted by p521PointFromAffine.
- if x.Sign() == 0 && y.Sign() == 0 {
- return false
- }
- _, ok := p521PointFromAffine(x, y)
- return ok
-}
-
-func p521PointFromAffine(x, y *big.Int) (p *nistec.P521Point, ok bool) {
- // (0, 0) is by convention the point at infinity, which can't be represented
- // in affine coordinates. Marshal incorrectly encodes it as an uncompressed
- // point, which SetBytes would correctly reject. See Issue 37294.
- if x.Sign() == 0 && y.Sign() == 0 {
- return nistec.NewP521Point(), true
- }
- if x.Sign() < 0 || y.Sign() < 0 {
- return nil, false
- }
- if x.BitLen() > 521 || y.BitLen() > 521 {
- return nil, false
- }
- p, err := nistec.NewP521Point().SetBytes(Marshal(P521(), x, y))
- if err != nil {
- return nil, false
- }
- return p, true
-}
-
-func p521PointToAffine(p *nistec.P521Point) (x, y *big.Int) {
- out := p.Bytes()
- if len(out) == 1 && out[0] == 0 {
- // This is the correct encoding of the point at infinity, which
- // Unmarshal does not support. See Issue 37294.
- return new(big.Int), new(big.Int)
- }
- x, y = Unmarshal(P521(), out)
- if x == nil {
- panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
- }
- return x, y
-}
-
-// p521RandomPoint returns a random point on the curve. It's used when Add,
-// Double, or ScalarMult are fed a point not on the curve, which is undefined
-// behavior. Originally, we used to do the math on it anyway (which allows
-// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
-// happening in the first place. Now, we just can't construct a nistec.P521Point
-// for an invalid pair of coordinates, because that API is safer. If we panic,
-// we risk introducing a DoS. If we return nil, we risk a panic. If we return
-// the input, ecdsa.Verify might fail open. The safest course seems to be to
-// return a valid, random point, which hopefully won't help the attacker.
-func p521RandomPoint() (x, y *big.Int) {
- _, x, y, err := GenerateKey(P521(), rand.Reader)
- if err != nil {
- panic("crypto/elliptic: failed to generate random point")
- }
- return x, y
-}
-
-func (p521Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- p1, ok := p521PointFromAffine(x1, y1)
- if !ok {
- return p521RandomPoint()
- }
- p2, ok := p521PointFromAffine(x2, y2)
- if !ok {
- return p521RandomPoint()
- }
- return p521PointToAffine(p1.Add(p1, p2))
-}
-
-func (p521Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- p, ok := p521PointFromAffine(x1, y1)
- if !ok {
- return p521RandomPoint()
- }
- return p521PointToAffine(p.Double(p))
-}
-
-func (p521Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
- p, ok := p521PointFromAffine(Bx, By)
- if !ok {
- return p521RandomPoint()
- }
- return p521PointToAffine(p.ScalarMult(p, scalar))
-}
-
-func (p521Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
- p := nistec.NewP521Generator()
- return p521PointToAffine(p.ScalarMult(p, scalar))
-}
-
-func bigFromDecimal(s string) *big.Int {
- b, ok := new(big.Int).SetString(s, 10)
- if !ok {
- panic("invalid encoding")
- }
- return b
-}
-
-func bigFromHex(s string) *big.Int {
- b, ok := new(big.Int).SetString(s, 16)
- if !ok {
- panic("invalid encoding")
- }
- return b
-}
diff --git a/contrib/go/_std_1.18/src/crypto/hmac/hmac.go b/contrib/go/_std_1.18/src/crypto/hmac/hmac.go
deleted file mode 100644
index cdda33c2cb..0000000000
--- a/contrib/go/_std_1.18/src/crypto/hmac/hmac.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package hmac implements the Keyed-Hash Message Authentication Code (HMAC) as
-defined in U.S. Federal Information Processing Standards Publication 198.
-An HMAC is a cryptographic hash that uses a key to sign a message.
-The receiver verifies the hash by recomputing it using the same key.
-
-Receivers should be careful to use Equal to compare MACs in order to avoid
-timing side-channels:
-
- // ValidMAC reports whether messageMAC is a valid HMAC tag for message.
- func ValidMAC(message, messageMAC, key []byte) bool {
- mac := hmac.New(sha256.New, key)
- mac.Write(message)
- expectedMAC := mac.Sum(nil)
- return hmac.Equal(messageMAC, expectedMAC)
- }
-*/
-package hmac
-
-import (
- "crypto/subtle"
- "hash"
-)
-
-// FIPS 198-1:
-// https://csrc.nist.gov/publications/fips/fips198-1/FIPS-198-1_final.pdf
-
-// key is zero padded to the block size of the hash function
-// ipad = 0x36 byte repeated for key length
-// opad = 0x5c byte repeated for key length
-// hmac = H([key ^ opad] H([key ^ ipad] text))
-
-// Marshalable is the combination of encoding.BinaryMarshaler and
-// encoding.BinaryUnmarshaler. Their method definitions are repeated here to
-// avoid a dependency on the encoding package.
-type marshalable interface {
- MarshalBinary() ([]byte, error)
- UnmarshalBinary([]byte) error
-}
-
-type hmac struct {
- opad, ipad []byte
- outer, inner hash.Hash
-
- // If marshaled is true, then opad and ipad do not contain a padded
- // copy of the key, but rather the marshaled state of outer/inner after
- // opad/ipad has been fed into it.
- marshaled bool
-}
-
-func (h *hmac) Sum(in []byte) []byte {
- origLen := len(in)
- in = h.inner.Sum(in)
-
- if h.marshaled {
- if err := h.outer.(marshalable).UnmarshalBinary(h.opad); err != nil {
- panic(err)
- }
- } else {
- h.outer.Reset()
- h.outer.Write(h.opad)
- }
- h.outer.Write(in[origLen:])
- return h.outer.Sum(in[:origLen])
-}
-
-func (h *hmac) Write(p []byte) (n int, err error) {
- return h.inner.Write(p)
-}
-
-func (h *hmac) Size() int { return h.outer.Size() }
-func (h *hmac) BlockSize() int { return h.inner.BlockSize() }
-
-func (h *hmac) Reset() {
- if h.marshaled {
- if err := h.inner.(marshalable).UnmarshalBinary(h.ipad); err != nil {
- panic(err)
- }
- return
- }
-
- h.inner.Reset()
- h.inner.Write(h.ipad)
-
- // If the underlying hash is marshalable, we can save some time by
- // saving a copy of the hash state now, and restoring it on future
- // calls to Reset and Sum instead of writing ipad/opad every time.
- //
- // If either hash is unmarshalable for whatever reason,
- // it's safe to bail out here.
- marshalableInner, innerOK := h.inner.(marshalable)
- if !innerOK {
- return
- }
- marshalableOuter, outerOK := h.outer.(marshalable)
- if !outerOK {
- return
- }
-
- imarshal, err := marshalableInner.MarshalBinary()
- if err != nil {
- return
- }
-
- h.outer.Reset()
- h.outer.Write(h.opad)
- omarshal, err := marshalableOuter.MarshalBinary()
- if err != nil {
- return
- }
-
- // Marshaling succeeded; save the marshaled state for later
- h.ipad = imarshal
- h.opad = omarshal
- h.marshaled = true
-}
-
-// New returns a new HMAC hash using the given hash.Hash type and key.
-// New functions like sha256.New from crypto/sha256 can be used as h.
-// h must return a new Hash every time it is called.
-// Note that unlike other hash implementations in the standard library,
-// the returned Hash does not implement encoding.BinaryMarshaler
-// or encoding.BinaryUnmarshaler.
-func New(h func() hash.Hash, key []byte) hash.Hash {
- hm := new(hmac)
- hm.outer = h()
- hm.inner = h()
- unique := true
- func() {
- defer func() {
- // The comparison might panic if the underlying types are not comparable.
- _ = recover()
- }()
- if hm.outer == hm.inner {
- unique = false
- }
- }()
- if !unique {
- panic("crypto/hmac: hash generation function does not produce unique values")
- }
- blocksize := hm.inner.BlockSize()
- hm.ipad = make([]byte, blocksize)
- hm.opad = make([]byte, blocksize)
- if len(key) > blocksize {
- // If key is too big, hash it.
- hm.outer.Write(key)
- key = hm.outer.Sum(nil)
- }
- copy(hm.ipad, key)
- copy(hm.opad, key)
- for i := range hm.ipad {
- hm.ipad[i] ^= 0x36
- }
- for i := range hm.opad {
- hm.opad[i] ^= 0x5c
- }
- hm.inner.Write(hm.ipad)
-
- return hm
-}
-
-// Equal compares two MACs for equality without leaking timing information.
-func Equal(mac1, mac2 []byte) bool {
- // We don't have to be constant time if the lengths of the MACs are
- // different as that suggests that a completely different hash function
- // was used.
- return subtle.ConstantTimeCompare(mac1, mac2) == 1
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rand/eagain.go b/contrib/go/_std_1.18/src/crypto/rand/eagain.go
deleted file mode 100644
index f018e75931..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rand/eagain.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package rand
-
-import (
- "io/fs"
- "syscall"
-)
-
-func init() {
- isEAGAIN = unixIsEAGAIN
-}
-
-// unixIsEAGAIN reports whether err is a syscall.EAGAIN wrapped in a PathError.
-// See golang.org/issue/9205
-func unixIsEAGAIN(err error) bool {
- if pe, ok := err.(*fs.PathError); ok {
- if errno, ok := pe.Err.(syscall.Errno); ok && errno == syscall.EAGAIN {
- return true
- }
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rand/rand_batched.go b/contrib/go/_std_1.18/src/crypto/rand/rand_batched.go
deleted file mode 100644
index c267f6a31a..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rand/rand_batched.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux || freebsd || dragonfly || solaris
-
-package rand
-
-import (
- "errors"
- "internal/syscall/unix"
-)
-
-// maxGetRandomRead is platform dependent.
-func init() {
- altGetRandom = batched(getRandomBatch, maxGetRandomRead)
-}
-
-// If the kernel is too old to support the getrandom syscall(),
-// unix.GetRandom will immediately return ENOSYS and we will then fall back to
-// reading from /dev/urandom in rand_unix.go. unix.GetRandom caches the ENOSYS
-// result so we only suffer the syscall overhead once in this case.
-// If the kernel supports the getrandom() syscall, unix.GetRandom will block
-// until the kernel has sufficient randomness (as we don't use GRND_NONBLOCK).
-// In this case, unix.GetRandom will not return an error.
-func getRandomBatch(p []byte) (err error) {
- n, err := unix.GetRandom(p, 0)
- if n != len(p) {
- return errors.New("short read")
- }
- return err
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rand/rand_getentropy.go b/contrib/go/_std_1.18/src/crypto/rand/rand_getentropy.go
deleted file mode 100644
index d9e551097f..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rand/rand_getentropy.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin && !ios) || openbsd
-
-package rand
-
-import (
- "internal/syscall/unix"
-)
-
-func init() {
- altGetRandom = getEntropy
-}
-
-func getEntropy(p []byte) error {
- // getentropy(2) returns a maximum of 256 bytes per call
- for i := 0; i < len(p); i += 256 {
- end := i + 256
- if len(p) < end {
- end = len(p)
- }
- err := unix.GetEntropy(p[i:end])
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rand/rand_linux.go b/contrib/go/_std_1.18/src/crypto/rand/rand_linux.go
deleted file mode 100644
index 26b93c54d2..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rand/rand_linux.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-// maxGetRandomRead is the maximum number of bytes to ask for in one call to the
-// getrandom() syscall. In linux at most 2^25-1 bytes will be returned per call.
-// From the manpage
-//
-// * When reading from the urandom source, a maximum of 33554431 bytes
-// is returned by a single call to getrandom() on systems where int
-// has a size of 32 bits.
-const maxGetRandomRead = (1 << 25) - 1
diff --git a/contrib/go/_std_1.18/src/crypto/rand/rand_unix.go b/contrib/go/_std_1.18/src/crypto/rand/rand_unix.go
deleted file mode 100644
index 811c65f0e9..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rand/rand_unix.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || plan9 || solaris
-
-// Unix cryptographically secure pseudorandom number
-// generator.
-
-package rand
-
-import (
- "bufio"
- "crypto/aes"
- "crypto/cipher"
- "encoding/binary"
- "io"
- "os"
- "runtime"
- "sync"
- "sync/atomic"
- "time"
-)
-
-const urandomDevice = "/dev/urandom"
-
-// Easy implementation: read from /dev/urandom.
-// This is sufficient on Linux, OS X, and FreeBSD.
-
-func init() {
- if runtime.GOOS == "plan9" {
- Reader = newReader(nil)
- } else {
- Reader = &devReader{name: urandomDevice}
- }
-}
-
-// A devReader satisfies reads by reading the file named name.
-type devReader struct {
- name string
- f io.Reader
- mu sync.Mutex
- used int32 // atomic; whether this devReader has been used
-}
-
-// altGetRandom if non-nil specifies an OS-specific function to get
-// urandom-style randomness.
-var altGetRandom func([]byte) (err error)
-
-func warnBlocked() {
- println("crypto/rand: blocked for 60 seconds waiting to read random data from the kernel")
-}
-
-func (r *devReader) Read(b []byte) (n int, err error) {
- if atomic.CompareAndSwapInt32(&r.used, 0, 1) {
- // First use of randomness. Start timer to warn about
- // being blocked on entropy not being available.
- t := time.AfterFunc(60*time.Second, warnBlocked)
- defer t.Stop()
- }
- if altGetRandom != nil && r.name == urandomDevice && altGetRandom(b) == nil {
- return len(b), nil
- }
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.f == nil {
- f, err := os.Open(r.name)
- if f == nil {
- return 0, err
- }
- if runtime.GOOS == "plan9" {
- r.f = f
- } else {
- r.f = bufio.NewReader(hideAgainReader{f})
- }
- }
- return r.f.Read(b)
-}
-
-var isEAGAIN func(error) bool // set by eagain.go on unix systems
-
-// hideAgainReader masks EAGAIN reads from /dev/urandom.
-// See golang.org/issue/9205
-type hideAgainReader struct {
- r io.Reader
-}
-
-func (hr hideAgainReader) Read(p []byte) (n int, err error) {
- n, err = hr.r.Read(p)
- if err != nil && isEAGAIN != nil && isEAGAIN(err) {
- err = nil
- }
- return
-}
-
-// Alternate pseudo-random implementation for use on
-// systems without a reliable /dev/urandom.
-
-// newReader returns a new pseudorandom generator that
-// seeds itself by reading from entropy. If entropy == nil,
-// the generator seeds itself by reading from the system's
-// random number generator, typically /dev/random.
-// The Read method on the returned reader always returns
-// the full amount asked for, or else it returns an error.
-//
-// The generator uses the X9.31 algorithm with AES-128,
-// reseeding after every 1 MB of generated data.
-func newReader(entropy io.Reader) io.Reader {
- if entropy == nil {
- entropy = &devReader{name: "/dev/random"}
- }
- return &reader{entropy: entropy}
-}
-
-type reader struct {
- mu sync.Mutex
- budget int // number of bytes that can be generated
- cipher cipher.Block
- entropy io.Reader
- time, seed, dst, key [aes.BlockSize]byte
-}
-
-func (r *reader) Read(b []byte) (n int, err error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- n = len(b)
-
- for len(b) > 0 {
- if r.budget == 0 {
- _, err := io.ReadFull(r.entropy, r.seed[0:])
- if err != nil {
- return n - len(b), err
- }
- _, err = io.ReadFull(r.entropy, r.key[0:])
- if err != nil {
- return n - len(b), err
- }
- r.cipher, err = aes.NewCipher(r.key[0:])
- if err != nil {
- return n - len(b), err
- }
- r.budget = 1 << 20 // reseed after generating 1MB
- }
- r.budget -= aes.BlockSize
-
- // ANSI X9.31 (== X9.17) algorithm, but using AES in place of 3DES.
- //
- // single block:
- // t = encrypt(time)
- // dst = encrypt(t^seed)
- // seed = encrypt(t^dst)
- ns := time.Now().UnixNano()
- binary.BigEndian.PutUint64(r.time[:], uint64(ns))
- r.cipher.Encrypt(r.time[0:], r.time[0:])
- for i := 0; i < aes.BlockSize; i++ {
- r.dst[i] = r.time[i] ^ r.seed[i]
- }
- r.cipher.Encrypt(r.dst[0:], r.dst[0:])
- for i := 0; i < aes.BlockSize; i++ {
- r.seed[i] = r.time[i] ^ r.dst[i]
- }
- r.cipher.Encrypt(r.seed[0:], r.seed[0:])
-
- m := copy(b, r.dst[0:])
- b = b[m:]
- }
-
- return n, nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rand/util.go b/contrib/go/_std_1.18/src/crypto/rand/util.go
deleted file mode 100644
index 4dd1711203..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rand/util.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-import (
- "errors"
- "io"
- "math/big"
-)
-
-// smallPrimes is a list of small, prime numbers that allows us to rapidly
-// exclude some fraction of composite candidates when searching for a random
-// prime. This list is truncated at the point where smallPrimesProduct exceeds
-// a uint64. It does not include two because we ensure that the candidates are
-// odd by construction.
-var smallPrimes = []uint8{
- 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,
-}
-
-// smallPrimesProduct is the product of the values in smallPrimes and allows us
-// to reduce a candidate prime by this number and then determine whether it's
-// coprime to all the elements of smallPrimes without further big.Int
-// operations.
-var smallPrimesProduct = new(big.Int).SetUint64(16294579238595022365)
-
-// Prime returns a number, p, of the given size, such that p is prime
-// with high probability.
-// Prime will return error for any error returned by rand.Read or if bits < 2.
-func Prime(rand io.Reader, bits int) (p *big.Int, err error) {
- if bits < 2 {
- err = errors.New("crypto/rand: prime size must be at least 2-bit")
- return
- }
-
- b := uint(bits % 8)
- if b == 0 {
- b = 8
- }
-
- bytes := make([]byte, (bits+7)/8)
- p = new(big.Int)
-
- bigMod := new(big.Int)
-
- for {
- _, err = io.ReadFull(rand, bytes)
- if err != nil {
- return nil, err
- }
-
- // Clear bits in the first byte to make sure the candidate has a size <= bits.
- bytes[0] &= uint8(int(1<<b) - 1)
- // Don't let the value be too small, i.e, set the most significant two bits.
- // Setting the top two bits, rather than just the top bit,
- // means that when two of these values are multiplied together,
- // the result isn't ever one bit short.
- if b >= 2 {
- bytes[0] |= 3 << (b - 2)
- } else {
- // Here b==1, because b cannot be zero.
- bytes[0] |= 1
- if len(bytes) > 1 {
- bytes[1] |= 0x80
- }
- }
- // Make the value odd since an even number this large certainly isn't prime.
- bytes[len(bytes)-1] |= 1
-
- p.SetBytes(bytes)
-
- // Calculate the value mod the product of smallPrimes. If it's
- // a multiple of any of these primes we add two until it isn't.
- // The probability of overflowing is minimal and can be ignored
- // because we still perform Miller-Rabin tests on the result.
- bigMod.Mod(p, smallPrimesProduct)
- mod := bigMod.Uint64()
-
- NextDelta:
- for delta := uint64(0); delta < 1<<20; delta += 2 {
- m := mod + delta
- for _, prime := range smallPrimes {
- if m%uint64(prime) == 0 && (bits > 6 || m != uint64(prime)) {
- continue NextDelta
- }
- }
-
- if delta > 0 {
- bigMod.SetUint64(delta)
- p.Add(p, bigMod)
- }
- break
- }
-
- // There is a tiny possibility that, by adding delta, we caused
- // the number to be one bit too long. Thus we check BitLen
- // here.
- if p.ProbablyPrime(20) && p.BitLen() == bits {
- return
- }
- }
-}
-
-// Int returns a uniform random value in [0, max). It panics if max <= 0.
-func Int(rand io.Reader, max *big.Int) (n *big.Int, err error) {
- if max.Sign() <= 0 {
- panic("crypto/rand: argument to Int is <= 0")
- }
- n = new(big.Int)
- n.Sub(max, n.SetUint64(1))
- // bitLen is the maximum bit length needed to encode a value < max.
- bitLen := n.BitLen()
- if bitLen == 0 {
- // the only valid result is 0
- return
- }
- // k is the maximum byte length needed to encode a value < max.
- k := (bitLen + 7) / 8
- // b is the number of bits in the most significant byte of max-1.
- b := uint(bitLen % 8)
- if b == 0 {
- b = 8
- }
-
- bytes := make([]byte, k)
-
- for {
- _, err = io.ReadFull(rand, bytes)
- if err != nil {
- return nil, err
- }
-
- // Clear bits in the first byte to increase the probability
- // that the candidate is < max.
- bytes[0] &= uint8(int(1<<b) - 1)
-
- n.SetBytes(bytes)
- if n.Cmp(max) < 0 {
- return
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rsa/pkcs1v15.go b/contrib/go/_std_1.18/src/crypto/rsa/pkcs1v15.go
deleted file mode 100644
index 0cbd6d0045..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rsa/pkcs1v15.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rsa
-
-import (
- "crypto"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-
- "crypto/internal/randutil"
-)
-
-// This file implements encryption and decryption using PKCS #1 v1.5 padding.
-
-// PKCS1v15DecrypterOpts is for passing options to PKCS #1 v1.5 decryption using
-// the crypto.Decrypter interface.
-type PKCS1v15DecryptOptions struct {
- // SessionKeyLen is the length of the session key that is being
- // decrypted. If not zero, then a padding error during decryption will
- // cause a random plaintext of this length to be returned rather than
- // an error. These alternatives happen in constant time.
- SessionKeyLen int
-}
-
-// EncryptPKCS1v15 encrypts the given message with RSA and the padding
-// scheme from PKCS #1 v1.5. The message must be no longer than the
-// length of the public modulus minus 11 bytes.
-//
-// The rand parameter is used as a source of entropy to ensure that
-// encrypting the same message twice doesn't result in the same
-// ciphertext.
-//
-// WARNING: use of this function to encrypt plaintexts other than
-// session keys is dangerous. Use RSA OAEP in new protocols.
-func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error) {
- randutil.MaybeReadByte(rand)
-
- if err := checkPub(pub); err != nil {
- return nil, err
- }
- k := pub.Size()
- if len(msg) > k-11 {
- return nil, ErrMessageTooLong
- }
-
- // EM = 0x00 || 0x02 || PS || 0x00 || M
- em := make([]byte, k)
- em[1] = 2
- ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err := nonZeroRandomBytes(ps, rand)
- if err != nil {
- return nil, err
- }
- em[len(em)-len(msg)-1] = 0
- copy(mm, msg)
-
- m := new(big.Int).SetBytes(em)
- c := encrypt(new(big.Int), pub, m)
-
- return c.FillBytes(em), nil
-}
-
-// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS #1 v1.5.
-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
-//
-// Note that whether this function returns an error or not discloses secret
-// information. If an attacker can cause this function to run repeatedly and
-// learn whether each instance returned an error then they can decrypt and
-// forge signatures as if they had the private key. See
-// DecryptPKCS1v15SessionKey for a way of solving this problem.
-func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
- if err := checkPub(&priv.PublicKey); err != nil {
- return nil, err
- }
- valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
- if err != nil {
- return nil, err
- }
- if valid == 0 {
- return nil, ErrDecryption
- }
- return out[index:], nil
-}
-
-// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS #1 v1.5.
-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
-// It returns an error if the ciphertext is the wrong length or if the
-// ciphertext is greater than the public modulus. Otherwise, no error is
-// returned. If the padding is valid, the resulting plaintext message is copied
-// into key. Otherwise, key is unchanged. These alternatives occur in constant
-// time. It is intended that the user of this function generate a random
-// session key beforehand and continue the protocol with the resulting value.
-// This will remove any possibility that an attacker can learn any information
-// about the plaintext.
-// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA
-// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology
-// (Crypto '98).
-//
-// Note that if the session key is too small then it may be possible for an
-// attacker to brute-force it. If they can do that then they can learn whether
-// a random value was used (because it'll be different for the same ciphertext)
-// and thus whether the padding was correct. This defeats the point of this
-// function. Using at least a 16-byte key will protect against this attack.
-func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
- if err := checkPub(&priv.PublicKey); err != nil {
- return err
- }
- k := priv.Size()
- if k-(len(key)+3+8) < 0 {
- return ErrDecryption
- }
-
- valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
- if err != nil {
- return err
- }
-
- if len(em) != k {
- // This should be impossible because decryptPKCS1v15 always
- // returns the full slice.
- return ErrDecryption
- }
-
- valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key)))
- subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):])
- return nil
-}
-
-// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
-// rand is not nil. It returns one or zero in valid that indicates whether the
-// plaintext was correctly structured. In either case, the plaintext is
-// returned in em so that it may be read independently of whether it was valid
-// in order to maintain constant memory access patterns. If the plaintext was
-// valid then index contains the index of the original message in em.
-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
- k := priv.Size()
- if k < 11 {
- err = ErrDecryption
- return
- }
-
- c := new(big.Int).SetBytes(ciphertext)
- m, err := decrypt(rand, priv, c)
- if err != nil {
- return
- }
-
- em = m.FillBytes(make([]byte, k))
- firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
- secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
-
- // The remainder of the plaintext must be a string of non-zero random
- // octets, followed by a 0, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the zero.
- // index: the offset of the first zero byte.
- lookingForIndex := 1
-
- for i := 2; i < len(em); i++ {
- equals0 := subtle.ConstantTimeByteEq(em[i], 0)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
- }
-
- // The PS padding must be at least 8 bytes long, and it starts two
- // bytes into em.
- validPS := subtle.ConstantTimeLessOrEq(2+8, index)
-
- valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS
- index = subtle.ConstantTimeSelect(valid, index+1, 0)
- return valid, em, index, nil
-}
-
-// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
- if err != nil {
- return
- }
-
- for i := 0; i < len(s); i++ {
- for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
- if err != nil {
- return
- }
- // In tests, the PRNG may return all zeros so we do
- // this to break the loop.
- s[i] ^= 0x42
- }
- }
-
- return
-}
-
-// These are ASN1 DER structures:
-// DigestInfo ::= SEQUENCE {
-// digestAlgorithm AlgorithmIdentifier,
-// digest OCTET STRING
-// }
-// For performance, we don't use the generic ASN1 encoder. Rather, we
-// precompute a prefix of the digest value that makes a valid ASN1 DER string
-// with the correct contents.
-var hashPrefixes = map[crypto.Hash][]byte{
- crypto.MD5: {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10},
- crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14},
- crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c},
- crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20},
- crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30},
- crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40},
- crypto.MD5SHA1: {}, // A special TLS case which doesn't use an ASN1 prefix.
- crypto.RIPEMD160: {0x30, 0x20, 0x30, 0x08, 0x06, 0x06, 0x28, 0xcf, 0x06, 0x03, 0x00, 0x31, 0x04, 0x14},
-}
-
-// SignPKCS1v15 calculates the signature of hashed using
-// RSASSA-PKCS1-V1_5-SIGN from RSA PKCS #1 v1.5. Note that hashed must
-// be the result of hashing the input message using the given hash
-// function. If hash is zero, hashed is signed directly. This isn't
-// advisable except for interoperability.
-//
-// If rand is not nil then RSA blinding will be used to avoid timing
-// side-channel attacks.
-//
-// This function is deterministic. Thus, if the set of possible
-// messages is small, an attacker may be able to build a map from
-// messages to signatures and identify the signed messages. As ever,
-// signatures provide authenticity, not confidentiality.
-func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
- hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
- if err != nil {
- return nil, err
- }
-
- tLen := len(prefix) + hashLen
- k := priv.Size()
- if k < tLen+11 {
- return nil, ErrMessageTooLong
- }
-
- // EM = 0x00 || 0x01 || PS || 0x00 || T
- em := make([]byte, k)
- em[1] = 1
- for i := 2; i < k-tLen-1; i++ {
- em[i] = 0xff
- }
- copy(em[k-tLen:k-hashLen], prefix)
- copy(em[k-hashLen:k], hashed)
-
- m := new(big.Int).SetBytes(em)
- c, err := decryptAndCheck(rand, priv, m)
- if err != nil {
- return nil, err
- }
-
- return c.FillBytes(em), nil
-}
-
-// VerifyPKCS1v15 verifies an RSA PKCS #1 v1.5 signature.
-// hashed is the result of hashing the input message using the given hash
-// function and sig is the signature. A valid signature is indicated by
-// returning a nil error. If hash is zero then hashed is used directly. This
-// isn't advisable except for interoperability.
-func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error {
- hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
- if err != nil {
- return err
- }
-
- tLen := len(prefix) + hashLen
- k := pub.Size()
- if k < tLen+11 {
- return ErrVerification
- }
-
- // RFC 8017 Section 8.2.2: If the length of the signature S is not k
- // octets (where k is the length in octets of the RSA modulus n), output
- // "invalid signature" and stop.
- if k != len(sig) {
- return ErrVerification
- }
-
- c := new(big.Int).SetBytes(sig)
- m := encrypt(new(big.Int), pub, c)
- em := m.FillBytes(make([]byte, k))
- // EM = 0x00 || 0x01 || PS || 0x00 || T
-
- ok := subtle.ConstantTimeByteEq(em[0], 0)
- ok &= subtle.ConstantTimeByteEq(em[1], 1)
- ok &= subtle.ConstantTimeCompare(em[k-hashLen:k], hashed)
- ok &= subtle.ConstantTimeCompare(em[k-tLen:k-hashLen], prefix)
- ok &= subtle.ConstantTimeByteEq(em[k-tLen-1], 0)
-
- for i := 2; i < k-tLen-1; i++ {
- ok &= subtle.ConstantTimeByteEq(em[i], 0xff)
- }
-
- if ok != 1 {
- return ErrVerification
- }
-
- return nil
-}
-
-func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) {
- // Special case: crypto.Hash(0) is used to indicate that the data is
- // signed directly.
- if hash == 0 {
- return inLen, nil, nil
- }
-
- hashLen = hash.Size()
- if inLen != hashLen {
- return 0, nil, errors.New("crypto/rsa: input must be hashed message")
- }
- prefix, ok := hashPrefixes[hash]
- if !ok {
- return 0, nil, errors.New("crypto/rsa: unsupported hash function")
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rsa/pss.go b/contrib/go/_std_1.18/src/crypto/rsa/pss.go
deleted file mode 100644
index 814522de81..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rsa/pss.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rsa
-
-// This file implements the RSASSA-PSS signature scheme according to RFC 8017.
-
-import (
- "bytes"
- "crypto"
- "errors"
- "hash"
- "io"
- "math/big"
-)
-
-// Per RFC 8017, Section 9.1
-//
-// EM = MGF1 xor DB || H( 8*0x00 || mHash || salt ) || 0xbc
-//
-// where
-//
-// DB = PS || 0x01 || salt
-//
-// and PS can be empty so
-//
-// emLen = dbLen + hLen + 1 = psLen + sLen + hLen + 2
-//
-
-func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {
- // See RFC 8017, Section 9.1.1.
-
- hLen := hash.Size()
- sLen := len(salt)
- emLen := (emBits + 7) / 8
-
- // 1. If the length of M is greater than the input limitation for the
- // hash function (2^61 - 1 octets for SHA-1), output "message too
- // long" and stop.
- //
- // 2. Let mHash = Hash(M), an octet string of length hLen.
-
- if len(mHash) != hLen {
- return nil, errors.New("crypto/rsa: input must be hashed with given hash")
- }
-
- // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
-
- if emLen < hLen+sLen+2 {
- return nil, errors.New("crypto/rsa: key size too small for PSS signature")
- }
-
- em := make([]byte, emLen)
- psLen := emLen - sLen - hLen - 2
- db := em[:psLen+1+sLen]
- h := em[psLen+1+sLen : emLen-1]
-
- // 4. Generate a random octet string salt of length sLen; if sLen = 0,
- // then salt is the empty string.
- //
- // 5. Let
- // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt;
- //
- // M' is an octet string of length 8 + hLen + sLen with eight
- // initial zero octets.
- //
- // 6. Let H = Hash(M'), an octet string of length hLen.
-
- var prefix [8]byte
-
- hash.Write(prefix[:])
- hash.Write(mHash)
- hash.Write(salt)
-
- h = hash.Sum(h[:0])
- hash.Reset()
-
- // 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2
- // zero octets. The length of PS may be 0.
- //
- // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length
- // emLen - hLen - 1.
-
- db[psLen] = 0x01
- copy(db[psLen+1:], salt)
-
- // 9. Let dbMask = MGF(H, emLen - hLen - 1).
- //
- // 10. Let maskedDB = DB \xor dbMask.
-
- mgf1XOR(db, hash, h)
-
- // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in
- // maskedDB to zero.
-
- db[0] &= 0xff >> (8*emLen - emBits)
-
- // 12. Let EM = maskedDB || H || 0xbc.
- em[emLen-1] = 0xbc
-
- // 13. Output EM.
- return em, nil
-}
-
-func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
- // See RFC 8017, Section 9.1.2.
-
- hLen := hash.Size()
- if sLen == PSSSaltLengthEqualsHash {
- sLen = hLen
- }
- emLen := (emBits + 7) / 8
- if emLen != len(em) {
- return errors.New("rsa: internal error: inconsistent length")
- }
-
- // 1. If the length of M is greater than the input limitation for the
- // hash function (2^61 - 1 octets for SHA-1), output "inconsistent"
- // and stop.
- //
- // 2. Let mHash = Hash(M), an octet string of length hLen.
- if hLen != len(mHash) {
- return ErrVerification
- }
-
- // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
- if emLen < hLen+sLen+2 {
- return ErrVerification
- }
-
- // 4. If the rightmost octet of EM does not have hexadecimal value
- // 0xbc, output "inconsistent" and stop.
- if em[emLen-1] != 0xbc {
- return ErrVerification
- }
-
- // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and
- // let H be the next hLen octets.
- db := em[:emLen-hLen-1]
- h := em[emLen-hLen-1 : emLen-1]
-
- // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in
- // maskedDB are not all equal to zero, output "inconsistent" and
- // stop.
- var bitMask byte = 0xff >> (8*emLen - emBits)
- if em[0] & ^bitMask != 0 {
- return ErrVerification
- }
-
- // 7. Let dbMask = MGF(H, emLen - hLen - 1).
- //
- // 8. Let DB = maskedDB \xor dbMask.
- mgf1XOR(db, hash, h)
-
- // 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB
- // to zero.
- db[0] &= bitMask
-
- // If we don't know the salt length, look for the 0x01 delimiter.
- if sLen == PSSSaltLengthAuto {
- psLen := bytes.IndexByte(db, 0x01)
- if psLen < 0 {
- return ErrVerification
- }
- sLen = len(db) - psLen - 1
- }
-
- // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
- // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
- // position is "position 1") does not have hexadecimal value 0x01,
- // output "inconsistent" and stop.
- psLen := emLen - hLen - sLen - 2
- for _, e := range db[:psLen] {
- if e != 0x00 {
- return ErrVerification
- }
- }
- if db[psLen] != 0x01 {
- return ErrVerification
- }
-
- // 11. Let salt be the last sLen octets of DB.
- salt := db[len(db)-sLen:]
-
- // 12. Let
- // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;
- // M' is an octet string of length 8 + hLen + sLen with eight
- // initial zero octets.
- //
- // 13. Let H' = Hash(M'), an octet string of length hLen.
- var prefix [8]byte
- hash.Write(prefix[:])
- hash.Write(mHash)
- hash.Write(salt)
-
- h0 := hash.Sum(nil)
-
- // 14. If H = H', output "consistent." Otherwise, output "inconsistent."
- if !bytes.Equal(h0, h) { // TODO: constant time?
- return ErrVerification
- }
- return nil
-}
-
-// signPSSWithSalt calculates the signature of hashed using PSS with specified salt.
-// Note that hashed must be the result of hashing the input message using the
-// given hash function. salt is a random sequence of bytes whose length will be
-// later used to verify the signature.
-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
- emBits := priv.N.BitLen() - 1
- em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
- if err != nil {
- return nil, err
- }
- m := new(big.Int).SetBytes(em)
- c, err := decryptAndCheck(rand, priv, m)
- if err != nil {
- return nil, err
- }
- s := make([]byte, priv.Size())
- return c.FillBytes(s), nil
-}
-
-const (
- // PSSSaltLengthAuto causes the salt in a PSS signature to be as large
- // as possible when signing, and to be auto-detected when verifying.
- PSSSaltLengthAuto = 0
- // PSSSaltLengthEqualsHash causes the salt length to equal the length
- // of the hash used in the signature.
- PSSSaltLengthEqualsHash = -1
-)
-
-// PSSOptions contains options for creating and verifying PSS signatures.
-type PSSOptions struct {
- // SaltLength controls the length of the salt used in the PSS
- // signature. It can either be a number of bytes, or one of the special
- // PSSSaltLength constants.
- SaltLength int
-
- // Hash is the hash function used to generate the message digest. If not
- // zero, it overrides the hash function passed to SignPSS. It's required
- // when using PrivateKey.Sign.
- Hash crypto.Hash
-}
-
-// HashFunc returns opts.Hash so that PSSOptions implements crypto.SignerOpts.
-func (opts *PSSOptions) HashFunc() crypto.Hash {
- return opts.Hash
-}
-
-func (opts *PSSOptions) saltLength() int {
- if opts == nil {
- return PSSSaltLengthAuto
- }
- return opts.SaltLength
-}
-
-// SignPSS calculates the signature of digest using PSS.
-//
-// digest must be the result of hashing the input message using the given hash
-// function. The opts argument may be nil, in which case sensible defaults are
-// used. If opts.Hash is set, it overrides hash.
-func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error) {
- if opts != nil && opts.Hash != 0 {
- hash = opts.Hash
- }
-
- saltLength := opts.saltLength()
- switch saltLength {
- case PSSSaltLengthAuto:
- saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
- case PSSSaltLengthEqualsHash:
- saltLength = hash.Size()
- }
-
- salt := make([]byte, saltLength)
- if _, err := io.ReadFull(rand, salt); err != nil {
- return nil, err
- }
- return signPSSWithSalt(rand, priv, hash, digest, salt)
-}
-
-// VerifyPSS verifies a PSS signature.
-//
-// A valid signature is indicated by returning a nil error. digest must be the
-// result of hashing the input message using the given hash function. The opts
-// argument may be nil, in which case sensible defaults are used. opts.Hash is
-// ignored.
-func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error {
- if len(sig) != pub.Size() {
- return ErrVerification
- }
- s := new(big.Int).SetBytes(sig)
- m := encrypt(new(big.Int), pub, s)
- emBits := pub.N.BitLen() - 1
- emLen := (emBits + 7) / 8
- if m.BitLen() > emLen*8 {
- return ErrVerification
- }
- em := m.FillBytes(make([]byte, emLen))
- return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
-}
diff --git a/contrib/go/_std_1.18/src/crypto/rsa/rsa.go b/contrib/go/_std_1.18/src/crypto/rsa/rsa.go
deleted file mode 100644
index 6fd59b3940..0000000000
--- a/contrib/go/_std_1.18/src/crypto/rsa/rsa.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package rsa implements RSA encryption as specified in PKCS #1 and RFC 8017.
-//
-// RSA is a single, fundamental operation that is used in this package to
-// implement either public-key encryption or public-key signatures.
-//
-// The original specification for encryption and signatures with RSA is PKCS #1
-// and the terms "RSA encryption" and "RSA signatures" by default refer to
-// PKCS #1 version 1.5. However, that specification has flaws and new designs
-// should use version 2, usually called by just OAEP and PSS, where
-// possible.
-//
-// Two sets of interfaces are included in this package. When a more abstract
-// interface isn't necessary, there are functions for encrypting/decrypting
-// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract
-// over the public key primitive, the PrivateKey type implements the
-// Decrypter and Signer interfaces from the crypto package.
-//
-// The RSA operations in this package are not implemented using constant-time algorithms.
-package rsa
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "hash"
- "io"
- "math"
- "math/big"
-
- "crypto/internal/randutil"
-)
-
-var bigZero = big.NewInt(0)
-var bigOne = big.NewInt(1)
-
-// A PublicKey represents the public part of an RSA key.
-type PublicKey struct {
- N *big.Int // modulus
- E int // public exponent
-}
-
-// Any methods implemented on PublicKey might need to also be implemented on
-// PrivateKey, as the latter embeds the former and will expose its methods.
-
-// Size returns the modulus size in bytes. Raw signatures and ciphertexts
-// for or by this public key will have the same size.
-func (pub *PublicKey) Size() int {
- return (pub.N.BitLen() + 7) / 8
-}
-
-// Equal reports whether pub and x have the same value.
-func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(*PublicKey)
- if !ok {
- return false
- }
- return pub.N.Cmp(xx.N) == 0 && pub.E == xx.E
-}
-
-// OAEPOptions is an interface for passing options to OAEP decryption using the
-// crypto.Decrypter interface.
-type OAEPOptions struct {
- // Hash is the hash function that will be used when generating the mask.
- Hash crypto.Hash
- // Label is an arbitrary byte string that must be equal to the value
- // used when encrypting.
- Label []byte
-}
-
-var (
- errPublicModulus = errors.New("crypto/rsa: missing public modulus")
- errPublicExponentSmall = errors.New("crypto/rsa: public exponent too small")
- errPublicExponentLarge = errors.New("crypto/rsa: public exponent too large")
-)
-
-// checkPub sanity checks the public key before we use it.
-// We require pub.E to fit into a 32-bit integer so that we
-// do not have different behavior depending on whether
-// int is 32 or 64 bits. See also
-// https://www.imperialviolet.org/2012/03/16/rsae.html.
-func checkPub(pub *PublicKey) error {
- if pub.N == nil {
- return errPublicModulus
- }
- if pub.E < 2 {
- return errPublicExponentSmall
- }
- if pub.E > 1<<31-1 {
- return errPublicExponentLarge
- }
- return nil
-}
-
-// A PrivateKey represents an RSA key
-type PrivateKey struct {
- PublicKey // public part.
- D *big.Int // private exponent
- Primes []*big.Int // prime factors of N, has >= 2 elements.
-
- // Precomputed contains precomputed values that speed up private
- // operations, if available.
- Precomputed PrecomputedValues
-}
-
-// Public returns the public key corresponding to priv.
-func (priv *PrivateKey) Public() crypto.PublicKey {
- return &priv.PublicKey
-}
-
-// Equal reports whether priv and x have equivalent values. It ignores
-// Precomputed values.
-func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(*PrivateKey)
- if !ok {
- return false
- }
- if !priv.PublicKey.Equal(&xx.PublicKey) || priv.D.Cmp(xx.D) != 0 {
- return false
- }
- if len(priv.Primes) != len(xx.Primes) {
- return false
- }
- for i := range priv.Primes {
- if priv.Primes[i].Cmp(xx.Primes[i]) != 0 {
- return false
- }
- }
- return true
-}
-
-// Sign signs digest with priv, reading randomness from rand. If opts is a
-// *PSSOptions then the PSS algorithm will be used, otherwise PKCS #1 v1.5 will
-// be used. digest must be the result of hashing the input message using
-// opts.HashFunc().
-//
-// This method implements crypto.Signer, which is an interface to support keys
-// where the private part is kept in, for example, a hardware module. Common
-// uses should use the Sign* functions in this package directly.
-func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
- if pssOpts, ok := opts.(*PSSOptions); ok {
- return SignPSS(rand, priv, pssOpts.Hash, digest, pssOpts)
- }
-
- return SignPKCS1v15(rand, priv, opts.HashFunc(), digest)
-}
-
-// Decrypt decrypts ciphertext with priv. If opts is nil or of type
-// *PKCS1v15DecryptOptions then PKCS #1 v1.5 decryption is performed. Otherwise
-// opts must have type *OAEPOptions and OAEP decryption is done.
-func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
- if opts == nil {
- return DecryptPKCS1v15(rand, priv, ciphertext)
- }
-
- switch opts := opts.(type) {
- case *OAEPOptions:
- return DecryptOAEP(opts.Hash.New(), rand, priv, ciphertext, opts.Label)
-
- case *PKCS1v15DecryptOptions:
- if l := opts.SessionKeyLen; l > 0 {
- plaintext = make([]byte, l)
- if _, err := io.ReadFull(rand, plaintext); err != nil {
- return nil, err
- }
- if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil {
- return nil, err
- }
- return plaintext, nil
- } else {
- return DecryptPKCS1v15(rand, priv, ciphertext)
- }
-
- default:
- return nil, errors.New("crypto/rsa: invalid options for Decrypt")
- }
-}
-
-type PrecomputedValues struct {
- Dp, Dq *big.Int // D mod (P-1) (or mod Q-1)
- Qinv *big.Int // Q^-1 mod P
-
- // CRTValues is used for the 3rd and subsequent primes. Due to a
- // historical accident, the CRT for the first two primes is handled
- // differently in PKCS #1 and interoperability is sufficiently
- // important that we mirror this.
- CRTValues []CRTValue
-}
-
-// CRTValue contains the precomputed Chinese remainder theorem values.
-type CRTValue struct {
- Exp *big.Int // D mod (prime-1).
- Coeff *big.Int // R·Coeff ≡ 1 mod Prime.
- R *big.Int // product of primes prior to this (inc p and q).
-}
-
-// Validate performs basic sanity checks on the key.
-// It returns nil if the key is valid, or else an error describing a problem.
-func (priv *PrivateKey) Validate() error {
- if err := checkPub(&priv.PublicKey); err != nil {
- return err
- }
-
- // Check that Πprimes == n.
- modulus := new(big.Int).Set(bigOne)
- for _, prime := range priv.Primes {
- // Any primes ≤ 1 will cause divide-by-zero panics later.
- if prime.Cmp(bigOne) <= 0 {
- return errors.New("crypto/rsa: invalid prime value")
- }
- modulus.Mul(modulus, prime)
- }
- if modulus.Cmp(priv.N) != 0 {
- return errors.New("crypto/rsa: invalid modulus")
- }
-
- // Check that de ≡ 1 mod p-1, for each prime.
- // This implies that e is coprime to each p-1 as e has a multiplicative
- // inverse. Therefore e is coprime to lcm(p-1,q-1,r-1,...) =
- // exponent(ℤ/nℤ). It also implies that a^de ≡ a mod p as a^(p-1) ≡ 1
- // mod p. Thus a^de ≡ a mod n for all a coprime to n, as required.
- congruence := new(big.Int)
- de := new(big.Int).SetInt64(int64(priv.E))
- de.Mul(de, priv.D)
- for _, prime := range priv.Primes {
- pminus1 := new(big.Int).Sub(prime, bigOne)
- congruence.Mod(de, pminus1)
- if congruence.Cmp(bigOne) != 0 {
- return errors.New("crypto/rsa: invalid exponents")
- }
- }
- return nil
-}
-
-// GenerateKey generates an RSA keypair of the given bit size using the
-// random source random (for example, crypto/rand.Reader).
-func GenerateKey(random io.Reader, bits int) (*PrivateKey, error) {
- return GenerateMultiPrimeKey(random, 2, bits)
-}
-
-// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit
-// size and the given random source, as suggested in [1]. Although the public
-// keys are compatible (actually, indistinguishable) from the 2-prime case,
-// the private keys are not. Thus it may not be possible to export multi-prime
-// private keys in certain formats or to subsequently import them into other
-// code.
-//
-// Table 1 in [2] suggests maximum numbers of primes for a given size.
-//
-// [1] US patent 4405829 (1972, expired)
-// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
-func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (*PrivateKey, error) {
- randutil.MaybeReadByte(random)
-
- priv := new(PrivateKey)
- priv.E = 65537
-
- if nprimes < 2 {
- return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2")
- }
-
- if bits < 64 {
- primeLimit := float64(uint64(1) << uint(bits/nprimes))
- // pi approximates the number of primes less than primeLimit
- pi := primeLimit / (math.Log(primeLimit) - 1)
- // Generated primes start with 11 (in binary) so we can only
- // use a quarter of them.
- pi /= 4
- // Use a factor of two to ensure that key generation terminates
- // in a reasonable amount of time.
- pi /= 2
- if pi <= float64(nprimes) {
- return nil, errors.New("crypto/rsa: too few primes of given length to generate an RSA key")
- }
- }
-
- primes := make([]*big.Int, nprimes)
-
-NextSetOfPrimes:
- for {
- todo := bits
- // crypto/rand should set the top two bits in each prime.
- // Thus each prime has the form
- // p_i = 2^bitlen(p_i) × 0.11... (in base 2).
- // And the product is:
- // P = 2^todo × α
- // where α is the product of nprimes numbers of the form 0.11...
- //
- // If α < 1/2 (which can happen for nprimes > 2), we need to
- // shift todo to compensate for lost bits: the mean value of 0.11...
- // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
- // will give good results.
- if nprimes >= 7 {
- todo += (nprimes - 2) / 5
- }
- for i := 0; i < nprimes; i++ {
- var err error
- primes[i], err = rand.Prime(random, todo/(nprimes-i))
- if err != nil {
- return nil, err
- }
- todo -= primes[i].BitLen()
- }
-
- // Make sure that primes is pairwise unequal.
- for i, prime := range primes {
- for j := 0; j < i; j++ {
- if prime.Cmp(primes[j]) == 0 {
- continue NextSetOfPrimes
- }
- }
- }
-
- n := new(big.Int).Set(bigOne)
- totient := new(big.Int).Set(bigOne)
- pminus1 := new(big.Int)
- for _, prime := range primes {
- n.Mul(n, prime)
- pminus1.Sub(prime, bigOne)
- totient.Mul(totient, pminus1)
- }
- if n.BitLen() != bits {
- // This should never happen for nprimes == 2 because
- // crypto/rand should set the top two bits in each prime.
- // For nprimes > 2 we hope it does not happen often.
- continue NextSetOfPrimes
- }
-
- priv.D = new(big.Int)
- e := big.NewInt(int64(priv.E))
- ok := priv.D.ModInverse(e, totient)
-
- if ok != nil {
- priv.Primes = primes
- priv.N = n
- break
- }
- }
-
- priv.Precompute()
- return priv, nil
-}
-
-// incCounter increments a four byte, big-endian counter.
-func incCounter(c *[4]byte) {
- if c[3]++; c[3] != 0 {
- return
- }
- if c[2]++; c[2] != 0 {
- return
- }
- if c[1]++; c[1] != 0 {
- return
- }
- c[0]++
-}
-
-// mgf1XOR XORs the bytes in out with a mask generated using the MGF1 function
-// specified in PKCS #1 v2.1.
-func mgf1XOR(out []byte, hash hash.Hash, seed []byte) {
- var counter [4]byte
- var digest []byte
-
- done := 0
- for done < len(out) {
- hash.Write(seed)
- hash.Write(counter[0:4])
- digest = hash.Sum(digest[:0])
- hash.Reset()
-
- for i := 0; i < len(digest) && done < len(out); i++ {
- out[done] ^= digest[i]
- done++
- }
- incCounter(&counter)
- }
-}
-
-// ErrMessageTooLong is returned when attempting to encrypt a message which is
-// too large for the size of the public key.
-var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
-
-func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
- e := big.NewInt(int64(pub.E))
- c.Exp(m, e, pub.N)
- return c
-}
-
-// EncryptOAEP encrypts the given message with RSA-OAEP.
-//
-// OAEP is parameterised by a hash function that is used as a random oracle.
-// Encryption and decryption of a given message must use the same hash function
-// and sha256.New() is a reasonable choice.
-//
-// The random parameter is used as a source of entropy to ensure that
-// encrypting the same message twice doesn't result in the same ciphertext.
-//
-// The label parameter may contain arbitrary data that will not be encrypted,
-// but which gives important context to the message. For example, if a given
-// public key is used to encrypt two types of messages then distinct label
-// values could be used to ensure that a ciphertext for one purpose cannot be
-// used for another by an attacker. If not required it can be empty.
-//
-// The message must be no longer than the length of the public modulus minus
-// twice the hash length, minus a further 2.
-func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error) {
- if err := checkPub(pub); err != nil {
- return nil, err
- }
- hash.Reset()
- k := pub.Size()
- if len(msg) > k-2*hash.Size()-2 {
- return nil, ErrMessageTooLong
- }
-
- hash.Write(label)
- lHash := hash.Sum(nil)
- hash.Reset()
-
- em := make([]byte, k)
- seed := em[1 : 1+hash.Size()]
- db := em[1+hash.Size():]
-
- copy(db[0:hash.Size()], lHash)
- db[len(db)-len(msg)-1] = 1
- copy(db[len(db)-len(msg):], msg)
-
- _, err := io.ReadFull(random, seed)
- if err != nil {
- return nil, err
- }
-
- mgf1XOR(db, hash, seed)
- mgf1XOR(seed, hash, db)
-
- m := new(big.Int)
- m.SetBytes(em)
- c := encrypt(new(big.Int), pub, m)
-
- out := make([]byte, k)
- return c.FillBytes(out), nil
-}
-
-// ErrDecryption represents a failure to decrypt a message.
-// It is deliberately vague to avoid adaptive attacks.
-var ErrDecryption = errors.New("crypto/rsa: decryption error")
-
-// ErrVerification represents a failure to verify a signature.
-// It is deliberately vague to avoid adaptive attacks.
-var ErrVerification = errors.New("crypto/rsa: verification error")
-
-// Precompute performs some calculations that speed up private key operations
-// in the future.
-func (priv *PrivateKey) Precompute() {
- if priv.Precomputed.Dp != nil {
- return
- }
-
- priv.Precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne)
- priv.Precomputed.Dp.Mod(priv.D, priv.Precomputed.Dp)
-
- priv.Precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne)
- priv.Precomputed.Dq.Mod(priv.D, priv.Precomputed.Dq)
-
- priv.Precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0])
-
- r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1])
- priv.Precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2)
- for i := 2; i < len(priv.Primes); i++ {
- prime := priv.Primes[i]
- values := &priv.Precomputed.CRTValues[i-2]
-
- values.Exp = new(big.Int).Sub(prime, bigOne)
- values.Exp.Mod(priv.D, values.Exp)
-
- values.R = new(big.Int).Set(r)
- values.Coeff = new(big.Int).ModInverse(r, prime)
-
- r.Mul(r, prime)
- }
-}
-
-// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
-// random source is given, RSA blinding is used.
-func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
- // TODO(agl): can we get away with reusing blinds?
- if c.Cmp(priv.N) > 0 {
- err = ErrDecryption
- return
- }
- if priv.N.Sign() == 0 {
- return nil, ErrDecryption
- }
-
- var ir *big.Int
- if random != nil {
- randutil.MaybeReadByte(random)
-
- // Blinding enabled. Blinding involves multiplying c by r^e.
- // Then the decryption operation performs (m^e * r^e)^d mod n
- // which equals mr mod n. The factor of r can then be removed
- // by multiplying by the multiplicative inverse of r.
-
- var r *big.Int
- ir = new(big.Int)
- for {
- r, err = rand.Int(random, priv.N)
- if err != nil {
- return
- }
- if r.Cmp(bigZero) == 0 {
- r = bigOne
- }
- ok := ir.ModInverse(r, priv.N)
- if ok != nil {
- break
- }
- }
- bigE := big.NewInt(int64(priv.E))
- rpowe := new(big.Int).Exp(r, bigE, priv.N) // N != 0
- cCopy := new(big.Int).Set(c)
- cCopy.Mul(cCopy, rpowe)
- cCopy.Mod(cCopy, priv.N)
- c = cCopy
- }
-
- if priv.Precomputed.Dp == nil {
- m = new(big.Int).Exp(c, priv.D, priv.N)
- } else {
- // We have the precalculated values needed for the CRT.
- m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
- m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
- m.Sub(m, m2)
- if m.Sign() < 0 {
- m.Add(m, priv.Primes[0])
- }
- m.Mul(m, priv.Precomputed.Qinv)
- m.Mod(m, priv.Primes[0])
- m.Mul(m, priv.Primes[1])
- m.Add(m, m2)
-
- for i, values := range priv.Precomputed.CRTValues {
- prime := priv.Primes[2+i]
- m2.Exp(c, values.Exp, prime)
- m2.Sub(m2, m)
- m2.Mul(m2, values.Coeff)
- m2.Mod(m2, prime)
- if m2.Sign() < 0 {
- m2.Add(m2, prime)
- }
- m2.Mul(m2, values.R)
- m.Add(m, m2)
- }
- }
-
- if ir != nil {
- // Unblind.
- m.Mul(m, ir)
- m.Mod(m, priv.N)
- }
-
- return
-}
-
-func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
- m, err = decrypt(random, priv, c)
- if err != nil {
- return nil, err
- }
-
- // In order to defend against errors in the CRT computation, m^e is
- // calculated, which should match the original ciphertext.
- check := encrypt(new(big.Int), &priv.PublicKey, m)
- if c.Cmp(check) != 0 {
- return nil, errors.New("rsa: internal error")
- }
- return m, nil
-}
-
-// DecryptOAEP decrypts ciphertext using RSA-OAEP.
-//
-// OAEP is parameterised by a hash function that is used as a random oracle.
-// Encryption and decryption of a given message must use the same hash function
-// and sha256.New() is a reasonable choice.
-//
-// The random parameter, if not nil, is used to blind the private-key operation
-// and avoid timing side-channel attacks. Blinding is purely internal to this
-// function – the random data need not match that used when encrypting.
-//
-// The label parameter must match the value given when encrypting. See
-// EncryptOAEP for details.
-func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error) {
- if err := checkPub(&priv.PublicKey); err != nil {
- return nil, err
- }
- k := priv.Size()
- if len(ciphertext) > k ||
- k < hash.Size()*2+2 {
- return nil, ErrDecryption
- }
-
- c := new(big.Int).SetBytes(ciphertext)
-
- m, err := decrypt(random, priv, c)
- if err != nil {
- return nil, err
- }
-
- hash.Write(label)
- lHash := hash.Sum(nil)
- hash.Reset()
-
- // We probably leak the number of leading zeros.
- // It's not clear that we can do anything about this.
- em := m.FillBytes(make([]byte, k))
-
- firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
-
- seed := em[1 : hash.Size()+1]
- db := em[hash.Size()+1:]
-
- mgf1XOR(seed, hash, db)
- mgf1XOR(db, hash, seed)
-
- lHash2 := db[0:hash.Size()]
-
- // We have to validate the plaintext in constant time in order to avoid
- // attacks like: J. Manger. A Chosen Ciphertext Attack on RSA Optimal
- // Asymmetric Encryption Padding (OAEP) as Standardized in PKCS #1
- // v2.0. In J. Kilian, editor, Advances in Cryptology.
- lHash2Good := subtle.ConstantTimeCompare(lHash, lHash2)
-
- // The remainder of the plaintext must be zero or more 0x00, followed
- // by 0x01, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the 0x01
- // index: the offset of the first 0x01 byte
- // invalid: 1 iff we saw a non-zero byte before the 0x01.
- var lookingForIndex, index, invalid int
- lookingForIndex = 1
- rest := db[hash.Size():]
-
- for i := 0; i < len(rest); i++ {
- equals0 := subtle.ConstantTimeByteEq(rest[i], 0)
- equals1 := subtle.ConstantTimeByteEq(rest[i], 1)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex)
- invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid)
- }
-
- if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 {
- return nil, ErrDecryption
- }
-
- return rest[index+1:], nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/sha1/sha1.go b/contrib/go/_std_1.18/src/crypto/sha1/sha1.go
deleted file mode 100644
index 286a59d33d..0000000000
--- a/contrib/go/_std_1.18/src/crypto/sha1/sha1.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha1 implements the SHA-1 hash algorithm as defined in RFC 3174.
-//
-// SHA-1 is cryptographically broken and should not be used for secure
-// applications.
-package sha1
-
-import (
- "crypto"
- "encoding/binary"
- "errors"
- "hash"
-)
-
-func init() {
- crypto.RegisterHash(crypto.SHA1, New)
-}
-
-// The size of a SHA-1 checksum in bytes.
-const Size = 20
-
-// The blocksize of SHA-1 in bytes.
-const BlockSize = 64
-
-const (
- chunk = 64
- init0 = 0x67452301
- init1 = 0xEFCDAB89
- init2 = 0x98BADCFE
- init3 = 0x10325476
- init4 = 0xC3D2E1F0
-)
-
-// digest represents the partial evaluation of a checksum.
-type digest struct {
- h [5]uint32
- x [chunk]byte
- nx int
- len uint64
-}
-
-const (
- magic = "sha\x01"
- marshaledSize = len(magic) + 5*4 + chunk + 8
-)
-
-func (d *digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- b = appendUint32(b, d.h[0])
- b = appendUint32(b, d.h[1])
- b = appendUint32(b, d.h[2])
- b = appendUint32(b, d.h[3])
- b = appendUint32(b, d.h[4])
- b = append(b, d.x[:d.nx]...)
- b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
- b = appendUint64(b, d.len)
- return b, nil
-}
-
-func (d *digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("crypto/sha1: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("crypto/sha1: invalid hash state size")
- }
- b = b[len(magic):]
- b, d.h[0] = consumeUint32(b)
- b, d.h[1] = consumeUint32(b)
- b, d.h[2] = consumeUint32(b)
- b, d.h[3] = consumeUint32(b)
- b, d.h[4] = consumeUint32(b)
- b = b[copy(d.x[:], b):]
- b, d.len = consumeUint64(b)
- d.nx = int(d.len % chunk)
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.BigEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func appendUint32(b []byte, x uint32) []byte {
- var a [4]byte
- binary.BigEndian.PutUint32(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- _ = b[7]
- x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- return b[8:], x
-}
-
-func consumeUint32(b []byte) ([]byte, uint32) {
- _ = b[3]
- x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- return b[4:], x
-}
-
-func (d *digest) Reset() {
- d.h[0] = init0
- d.h[1] = init1
- d.h[2] = init2
- d.h[3] = init3
- d.h[4] = init4
- d.nx = 0
- d.len = 0
-}
-
-// New returns a new hash.Hash computing the SHA1 checksum. The Hash also
-// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
-// marshal and unmarshal the internal state of the hash.
-func New() hash.Hash {
- d := new(digest)
- d.Reset()
- return d
-}
-
-func (d *digest) Size() int { return Size }
-
-func (d *digest) BlockSize() int { return BlockSize }
-
-func (d *digest) Write(p []byte) (nn int, err error) {
- nn = len(p)
- d.len += uint64(nn)
- if d.nx > 0 {
- n := copy(d.x[d.nx:], p)
- d.nx += n
- if d.nx == chunk {
- block(d, d.x[:])
- d.nx = 0
- }
- p = p[n:]
- }
- if len(p) >= chunk {
- n := len(p) &^ (chunk - 1)
- block(d, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- d.nx = copy(d.x[:], p)
- }
- return
-}
-
-func (d *digest) Sum(in []byte) []byte {
- // Make a copy of d so that caller can keep writing and summing.
- d0 := *d
- hash := d0.checkSum()
- return append(in, hash[:]...)
-}
-
-func (d *digest) checkSum() [Size]byte {
- len := d.len
- // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
- var tmp [64]byte
- tmp[0] = 0x80
- if len%64 < 56 {
- d.Write(tmp[0 : 56-len%64])
- } else {
- d.Write(tmp[0 : 64+56-len%64])
- }
-
- // Length in bits.
- len <<= 3
- binary.BigEndian.PutUint64(tmp[:], len)
- d.Write(tmp[0:8])
-
- if d.nx != 0 {
- panic("d.nx != 0")
- }
-
- var digest [Size]byte
-
- binary.BigEndian.PutUint32(digest[0:], d.h[0])
- binary.BigEndian.PutUint32(digest[4:], d.h[1])
- binary.BigEndian.PutUint32(digest[8:], d.h[2])
- binary.BigEndian.PutUint32(digest[12:], d.h[3])
- binary.BigEndian.PutUint32(digest[16:], d.h[4])
-
- return digest
-}
-
-// ConstantTimeSum computes the same result of Sum() but in constant time
-func (d *digest) ConstantTimeSum(in []byte) []byte {
- d0 := *d
- hash := d0.constSum()
- return append(in, hash[:]...)
-}
-
-func (d *digest) constSum() [Size]byte {
- var length [8]byte
- l := d.len << 3
- for i := uint(0); i < 8; i++ {
- length[i] = byte(l >> (56 - 8*i))
- }
-
- nx := byte(d.nx)
- t := nx - 56 // if nx < 56 then the MSB of t is one
- mask1b := byte(int8(t) >> 7) // mask1b is 0xFF iff one block is enough
-
- separator := byte(0x80) // gets reset to 0x00 once used
- for i := byte(0); i < chunk; i++ {
- mask := byte(int8(i-nx) >> 7) // 0x00 after the end of data
-
- // if we reached the end of the data, replace with 0x80 or 0x00
- d.x[i] = (^mask & separator) | (mask & d.x[i])
-
- // zero the separator once used
- separator &= mask
-
- if i >= 56 {
- // we might have to write the length here if all fit in one block
- d.x[i] |= mask1b & length[i-56]
- }
- }
-
- // compress, and only keep the digest if all fit in one block
- block(d, d.x[:])
-
- var digest [Size]byte
- for i, s := range d.h {
- digest[i*4] = mask1b & byte(s>>24)
- digest[i*4+1] = mask1b & byte(s>>16)
- digest[i*4+2] = mask1b & byte(s>>8)
- digest[i*4+3] = mask1b & byte(s)
- }
-
- for i := byte(0); i < chunk; i++ {
- // second block, it's always past the end of data, might start with 0x80
- if i < 56 {
- d.x[i] = separator
- separator = 0
- } else {
- d.x[i] = length[i-56]
- }
- }
-
- // compress, and only keep the digest if we actually needed the second block
- block(d, d.x[:])
-
- for i, s := range d.h {
- digest[i*4] |= ^mask1b & byte(s>>24)
- digest[i*4+1] |= ^mask1b & byte(s>>16)
- digest[i*4+2] |= ^mask1b & byte(s>>8)
- digest[i*4+3] |= ^mask1b & byte(s)
- }
-
- return digest
-}
-
-// Sum returns the SHA-1 checksum of the data.
-func Sum(data []byte) [Size]byte {
- var d digest
- d.Reset()
- d.Write(data)
- return d.checkSum()
-}
diff --git a/contrib/go/_std_1.18/src/crypto/sha256/sha256.go b/contrib/go/_std_1.18/src/crypto/sha256/sha256.go
deleted file mode 100644
index 659531dc71..0000000000
--- a/contrib/go/_std_1.18/src/crypto/sha256/sha256.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined
-// in FIPS 180-4.
-package sha256
-
-import (
- "crypto"
- "encoding/binary"
- "errors"
- "hash"
-)
-
-func init() {
- crypto.RegisterHash(crypto.SHA224, New224)
- crypto.RegisterHash(crypto.SHA256, New)
-}
-
-// The size of a SHA256 checksum in bytes.
-const Size = 32
-
-// The size of a SHA224 checksum in bytes.
-const Size224 = 28
-
-// The blocksize of SHA256 and SHA224 in bytes.
-const BlockSize = 64
-
-const (
- chunk = 64
- init0 = 0x6A09E667
- init1 = 0xBB67AE85
- init2 = 0x3C6EF372
- init3 = 0xA54FF53A
- init4 = 0x510E527F
- init5 = 0x9B05688C
- init6 = 0x1F83D9AB
- init7 = 0x5BE0CD19
- init0_224 = 0xC1059ED8
- init1_224 = 0x367CD507
- init2_224 = 0x3070DD17
- init3_224 = 0xF70E5939
- init4_224 = 0xFFC00B31
- init5_224 = 0x68581511
- init6_224 = 0x64F98FA7
- init7_224 = 0xBEFA4FA4
-)
-
-// digest represents the partial evaluation of a checksum.
-type digest struct {
- h [8]uint32
- x [chunk]byte
- nx int
- len uint64
- is224 bool // mark if this digest is SHA-224
-}
-
-const (
- magic224 = "sha\x02"
- magic256 = "sha\x03"
- marshaledSize = len(magic256) + 8*4 + chunk + 8
-)
-
-func (d *digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- if d.is224 {
- b = append(b, magic224...)
- } else {
- b = append(b, magic256...)
- }
- b = appendUint32(b, d.h[0])
- b = appendUint32(b, d.h[1])
- b = appendUint32(b, d.h[2])
- b = appendUint32(b, d.h[3])
- b = appendUint32(b, d.h[4])
- b = appendUint32(b, d.h[5])
- b = appendUint32(b, d.h[6])
- b = appendUint32(b, d.h[7])
- b = append(b, d.x[:d.nx]...)
- b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
- b = appendUint64(b, d.len)
- return b, nil
-}
-
-func (d *digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic224) || (d.is224 && string(b[:len(magic224)]) != magic224) || (!d.is224 && string(b[:len(magic256)]) != magic256) {
- return errors.New("crypto/sha256: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("crypto/sha256: invalid hash state size")
- }
- b = b[len(magic224):]
- b, d.h[0] = consumeUint32(b)
- b, d.h[1] = consumeUint32(b)
- b, d.h[2] = consumeUint32(b)
- b, d.h[3] = consumeUint32(b)
- b, d.h[4] = consumeUint32(b)
- b, d.h[5] = consumeUint32(b)
- b, d.h[6] = consumeUint32(b)
- b, d.h[7] = consumeUint32(b)
- b = b[copy(d.x[:], b):]
- b, d.len = consumeUint64(b)
- d.nx = int(d.len % chunk)
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.BigEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func appendUint32(b []byte, x uint32) []byte {
- var a [4]byte
- binary.BigEndian.PutUint32(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- _ = b[7]
- x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- return b[8:], x
-}
-
-func consumeUint32(b []byte) ([]byte, uint32) {
- _ = b[3]
- x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- return b[4:], x
-}
-
-func (d *digest) Reset() {
- if !d.is224 {
- d.h[0] = init0
- d.h[1] = init1
- d.h[2] = init2
- d.h[3] = init3
- d.h[4] = init4
- d.h[5] = init5
- d.h[6] = init6
- d.h[7] = init7
- } else {
- d.h[0] = init0_224
- d.h[1] = init1_224
- d.h[2] = init2_224
- d.h[3] = init3_224
- d.h[4] = init4_224
- d.h[5] = init5_224
- d.h[6] = init6_224
- d.h[7] = init7_224
- }
- d.nx = 0
- d.len = 0
-}
-
-// New returns a new hash.Hash computing the SHA256 checksum. The Hash
-// also implements encoding.BinaryMarshaler and
-// encoding.BinaryUnmarshaler to marshal and unmarshal the internal
-// state of the hash.
-func New() hash.Hash {
- d := new(digest)
- d.Reset()
- return d
-}
-
-// New224 returns a new hash.Hash computing the SHA224 checksum.
-func New224() hash.Hash {
- d := new(digest)
- d.is224 = true
- d.Reset()
- return d
-}
-
-func (d *digest) Size() int {
- if !d.is224 {
- return Size
- }
- return Size224
-}
-
-func (d *digest) BlockSize() int { return BlockSize }
-
-func (d *digest) Write(p []byte) (nn int, err error) {
- nn = len(p)
- d.len += uint64(nn)
- if d.nx > 0 {
- n := copy(d.x[d.nx:], p)
- d.nx += n
- if d.nx == chunk {
- block(d, d.x[:])
- d.nx = 0
- }
- p = p[n:]
- }
- if len(p) >= chunk {
- n := len(p) &^ (chunk - 1)
- block(d, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- d.nx = copy(d.x[:], p)
- }
- return
-}
-
-func (d *digest) Sum(in []byte) []byte {
- // Make a copy of d so that caller can keep writing and summing.
- d0 := *d
- hash := d0.checkSum()
- if d0.is224 {
- return append(in, hash[:Size224]...)
- }
- return append(in, hash[:]...)
-}
-
-func (d *digest) checkSum() [Size]byte {
- len := d.len
- // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
- var tmp [64]byte
- tmp[0] = 0x80
- if len%64 < 56 {
- d.Write(tmp[0 : 56-len%64])
- } else {
- d.Write(tmp[0 : 64+56-len%64])
- }
-
- // Length in bits.
- len <<= 3
- binary.BigEndian.PutUint64(tmp[:], len)
- d.Write(tmp[0:8])
-
- if d.nx != 0 {
- panic("d.nx != 0")
- }
-
- var digest [Size]byte
-
- binary.BigEndian.PutUint32(digest[0:], d.h[0])
- binary.BigEndian.PutUint32(digest[4:], d.h[1])
- binary.BigEndian.PutUint32(digest[8:], d.h[2])
- binary.BigEndian.PutUint32(digest[12:], d.h[3])
- binary.BigEndian.PutUint32(digest[16:], d.h[4])
- binary.BigEndian.PutUint32(digest[20:], d.h[5])
- binary.BigEndian.PutUint32(digest[24:], d.h[6])
- if !d.is224 {
- binary.BigEndian.PutUint32(digest[28:], d.h[7])
- }
-
- return digest
-}
-
-// Sum256 returns the SHA256 checksum of the data.
-func Sum256(data []byte) [Size]byte {
- var d digest
- d.Reset()
- d.Write(data)
- return d.checkSum()
-}
-
-// Sum224 returns the SHA224 checksum of the data.
-func Sum224(data []byte) [Size224]byte {
- var d digest
- d.is224 = true
- d.Reset()
- d.Write(data)
- sum := d.checkSum()
- ap := (*[Size224]byte)(sum[:])
- return *ap
-}
diff --git a/contrib/go/_std_1.18/src/crypto/sha256/sha256block_decl.go b/contrib/go/_std_1.18/src/crypto/sha256/sha256block_decl.go
deleted file mode 100644
index c9c1194487..0000000000
--- a/contrib/go/_std_1.18/src/crypto/sha256/sha256block_decl.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build 386 || amd64 || s390x || ppc64le
-
-package sha256
-
-//go:noescape
-
-func block(dig *digest, p []byte)
diff --git a/contrib/go/_std_1.18/src/crypto/sha512/sha512.go b/contrib/go/_std_1.18/src/crypto/sha512/sha512.go
deleted file mode 100644
index d5715558c0..0000000000
--- a/contrib/go/_std_1.18/src/crypto/sha512/sha512.go
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha512 implements the SHA-384, SHA-512, SHA-512/224, and SHA-512/256
-// hash algorithms as defined in FIPS 180-4.
-//
-// All the hash.Hash implementations returned by this package also
-// implement encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
-// marshal and unmarshal the internal state of the hash.
-package sha512
-
-import (
- "crypto"
- "encoding/binary"
- "errors"
- "hash"
-)
-
-func init() {
- crypto.RegisterHash(crypto.SHA384, New384)
- crypto.RegisterHash(crypto.SHA512, New)
- crypto.RegisterHash(crypto.SHA512_224, New512_224)
- crypto.RegisterHash(crypto.SHA512_256, New512_256)
-}
-
-const (
- // Size is the size, in bytes, of a SHA-512 checksum.
- Size = 64
-
- // Size224 is the size, in bytes, of a SHA-512/224 checksum.
- Size224 = 28
-
- // Size256 is the size, in bytes, of a SHA-512/256 checksum.
- Size256 = 32
-
- // Size384 is the size, in bytes, of a SHA-384 checksum.
- Size384 = 48
-
- // BlockSize is the block size, in bytes, of the SHA-512/224,
- // SHA-512/256, SHA-384 and SHA-512 hash functions.
- BlockSize = 128
-)
-
-const (
- chunk = 128
- init0 = 0x6a09e667f3bcc908
- init1 = 0xbb67ae8584caa73b
- init2 = 0x3c6ef372fe94f82b
- init3 = 0xa54ff53a5f1d36f1
- init4 = 0x510e527fade682d1
- init5 = 0x9b05688c2b3e6c1f
- init6 = 0x1f83d9abfb41bd6b
- init7 = 0x5be0cd19137e2179
- init0_224 = 0x8c3d37c819544da2
- init1_224 = 0x73e1996689dcd4d6
- init2_224 = 0x1dfab7ae32ff9c82
- init3_224 = 0x679dd514582f9fcf
- init4_224 = 0x0f6d2b697bd44da8
- init5_224 = 0x77e36f7304c48942
- init6_224 = 0x3f9d85a86a1d36c8
- init7_224 = 0x1112e6ad91d692a1
- init0_256 = 0x22312194fc2bf72c
- init1_256 = 0x9f555fa3c84c64c2
- init2_256 = 0x2393b86b6f53b151
- init3_256 = 0x963877195940eabd
- init4_256 = 0x96283ee2a88effe3
- init5_256 = 0xbe5e1e2553863992
- init6_256 = 0x2b0199fc2c85b8aa
- init7_256 = 0x0eb72ddc81c52ca2
- init0_384 = 0xcbbb9d5dc1059ed8
- init1_384 = 0x629a292a367cd507
- init2_384 = 0x9159015a3070dd17
- init3_384 = 0x152fecd8f70e5939
- init4_384 = 0x67332667ffc00b31
- init5_384 = 0x8eb44a8768581511
- init6_384 = 0xdb0c2e0d64f98fa7
- init7_384 = 0x47b5481dbefa4fa4
-)
-
-// digest represents the partial evaluation of a checksum.
-type digest struct {
- h [8]uint64
- x [chunk]byte
- nx int
- len uint64
- function crypto.Hash
-}
-
-func (d *digest) Reset() {
- switch d.function {
- case crypto.SHA384:
- d.h[0] = init0_384
- d.h[1] = init1_384
- d.h[2] = init2_384
- d.h[3] = init3_384
- d.h[4] = init4_384
- d.h[5] = init5_384
- d.h[6] = init6_384
- d.h[7] = init7_384
- case crypto.SHA512_224:
- d.h[0] = init0_224
- d.h[1] = init1_224
- d.h[2] = init2_224
- d.h[3] = init3_224
- d.h[4] = init4_224
- d.h[5] = init5_224
- d.h[6] = init6_224
- d.h[7] = init7_224
- case crypto.SHA512_256:
- d.h[0] = init0_256
- d.h[1] = init1_256
- d.h[2] = init2_256
- d.h[3] = init3_256
- d.h[4] = init4_256
- d.h[5] = init5_256
- d.h[6] = init6_256
- d.h[7] = init7_256
- default:
- d.h[0] = init0
- d.h[1] = init1
- d.h[2] = init2
- d.h[3] = init3
- d.h[4] = init4
- d.h[5] = init5
- d.h[6] = init6
- d.h[7] = init7
- }
- d.nx = 0
- d.len = 0
-}
-
-const (
- magic384 = "sha\x04"
- magic512_224 = "sha\x05"
- magic512_256 = "sha\x06"
- magic512 = "sha\x07"
- marshaledSize = len(magic512) + 8*8 + chunk + 8
-)
-
-func (d *digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, marshaledSize)
- switch d.function {
- case crypto.SHA384:
- b = append(b, magic384...)
- case crypto.SHA512_224:
- b = append(b, magic512_224...)
- case crypto.SHA512_256:
- b = append(b, magic512_256...)
- case crypto.SHA512:
- b = append(b, magic512...)
- default:
- return nil, errors.New("crypto/sha512: invalid hash function")
- }
- b = appendUint64(b, d.h[0])
- b = appendUint64(b, d.h[1])
- b = appendUint64(b, d.h[2])
- b = appendUint64(b, d.h[3])
- b = appendUint64(b, d.h[4])
- b = appendUint64(b, d.h[5])
- b = appendUint64(b, d.h[6])
- b = appendUint64(b, d.h[7])
- b = append(b, d.x[:d.nx]...)
- b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
- b = appendUint64(b, d.len)
- return b, nil
-}
-
-func (d *digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic512) {
- return errors.New("crypto/sha512: invalid hash state identifier")
- }
- switch {
- case d.function == crypto.SHA384 && string(b[:len(magic384)]) == magic384:
- case d.function == crypto.SHA512_224 && string(b[:len(magic512_224)]) == magic512_224:
- case d.function == crypto.SHA512_256 && string(b[:len(magic512_256)]) == magic512_256:
- case d.function == crypto.SHA512 && string(b[:len(magic512)]) == magic512:
- default:
- return errors.New("crypto/sha512: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("crypto/sha512: invalid hash state size")
- }
- b = b[len(magic512):]
- b, d.h[0] = consumeUint64(b)
- b, d.h[1] = consumeUint64(b)
- b, d.h[2] = consumeUint64(b)
- b, d.h[3] = consumeUint64(b)
- b, d.h[4] = consumeUint64(b)
- b, d.h[5] = consumeUint64(b)
- b, d.h[6] = consumeUint64(b)
- b, d.h[7] = consumeUint64(b)
- b = b[copy(d.x[:], b):]
- b, d.len = consumeUint64(b)
- d.nx = int(d.len % chunk)
- return nil
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.BigEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- _ = b[7]
- x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- return b[8:], x
-}
-
-// New returns a new hash.Hash computing the SHA-512 checksum.
-func New() hash.Hash {
- d := &digest{function: crypto.SHA512}
- d.Reset()
- return d
-}
-
-// New512_224 returns a new hash.Hash computing the SHA-512/224 checksum.
-func New512_224() hash.Hash {
- d := &digest{function: crypto.SHA512_224}
- d.Reset()
- return d
-}
-
-// New512_256 returns a new hash.Hash computing the SHA-512/256 checksum.
-func New512_256() hash.Hash {
- d := &digest{function: crypto.SHA512_256}
- d.Reset()
- return d
-}
-
-// New384 returns a new hash.Hash computing the SHA-384 checksum.
-func New384() hash.Hash {
- d := &digest{function: crypto.SHA384}
- d.Reset()
- return d
-}
-
-func (d *digest) Size() int {
- switch d.function {
- case crypto.SHA512_224:
- return Size224
- case crypto.SHA512_256:
- return Size256
- case crypto.SHA384:
- return Size384
- default:
- return Size
- }
-}
-
-func (d *digest) BlockSize() int { return BlockSize }
-
-func (d *digest) Write(p []byte) (nn int, err error) {
- nn = len(p)
- d.len += uint64(nn)
- if d.nx > 0 {
- n := copy(d.x[d.nx:], p)
- d.nx += n
- if d.nx == chunk {
- block(d, d.x[:])
- d.nx = 0
- }
- p = p[n:]
- }
- if len(p) >= chunk {
- n := len(p) &^ (chunk - 1)
- block(d, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- d.nx = copy(d.x[:], p)
- }
- return
-}
-
-func (d *digest) Sum(in []byte) []byte {
- // Make a copy of d so that caller can keep writing and summing.
- d0 := new(digest)
- *d0 = *d
- hash := d0.checkSum()
- switch d0.function {
- case crypto.SHA384:
- return append(in, hash[:Size384]...)
- case crypto.SHA512_224:
- return append(in, hash[:Size224]...)
- case crypto.SHA512_256:
- return append(in, hash[:Size256]...)
- default:
- return append(in, hash[:]...)
- }
-}
-
-func (d *digest) checkSum() [Size]byte {
- // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
- len := d.len
- var tmp [128]byte
- tmp[0] = 0x80
- if len%128 < 112 {
- d.Write(tmp[0 : 112-len%128])
- } else {
- d.Write(tmp[0 : 128+112-len%128])
- }
-
- // Length in bits.
- len <<= 3
- binary.BigEndian.PutUint64(tmp[0:], 0) // upper 64 bits are always zero, because len variable has type uint64
- binary.BigEndian.PutUint64(tmp[8:], len)
- d.Write(tmp[0:16])
-
- if d.nx != 0 {
- panic("d.nx != 0")
- }
-
- var digest [Size]byte
- binary.BigEndian.PutUint64(digest[0:], d.h[0])
- binary.BigEndian.PutUint64(digest[8:], d.h[1])
- binary.BigEndian.PutUint64(digest[16:], d.h[2])
- binary.BigEndian.PutUint64(digest[24:], d.h[3])
- binary.BigEndian.PutUint64(digest[32:], d.h[4])
- binary.BigEndian.PutUint64(digest[40:], d.h[5])
- if d.function != crypto.SHA384 {
- binary.BigEndian.PutUint64(digest[48:], d.h[6])
- binary.BigEndian.PutUint64(digest[56:], d.h[7])
- }
-
- return digest
-}
-
-// Sum512 returns the SHA512 checksum of the data.
-func Sum512(data []byte) [Size]byte {
- d := digest{function: crypto.SHA512}
- d.Reset()
- d.Write(data)
- return d.checkSum()
-}
-
-// Sum384 returns the SHA384 checksum of the data.
-func Sum384(data []byte) [Size384]byte {
- d := digest{function: crypto.SHA384}
- d.Reset()
- d.Write(data)
- sum := d.checkSum()
- ap := (*[Size384]byte)(sum[:])
- return *ap
-}
-
-// Sum512_224 returns the Sum512/224 checksum of the data.
-func Sum512_224(data []byte) [Size224]byte {
- d := digest{function: crypto.SHA512_224}
- d.Reset()
- d.Write(data)
- sum := d.checkSum()
- ap := (*[Size224]byte)(sum[:])
- return *ap
-}
-
-// Sum512_256 returns the Sum512/256 checksum of the data.
-func Sum512_256(data []byte) [Size256]byte {
- d := digest{function: crypto.SHA512_256}
- d.Reset()
- d.Write(data)
- sum := d.checkSum()
- ap := (*[Size256]byte)(sum[:])
- return *ap
-}
diff --git a/contrib/go/_std_1.18/src/crypto/subtle/constant_time.go b/contrib/go/_std_1.18/src/crypto/subtle/constant_time.go
deleted file mode 100644
index 7c3cf05c46..0000000000
--- a/contrib/go/_std_1.18/src/crypto/subtle/constant_time.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle
-
-// ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents
-// and 0 otherwise. The time taken is a function of the length of the slices and
-// is independent of the contents.
-func ConstantTimeCompare(x, y []byte) int {
- if len(x) != len(y) {
- return 0
- }
-
- var v byte
-
- for i := 0; i < len(x); i++ {
- v |= x[i] ^ y[i]
- }
-
- return ConstantTimeByteEq(v, 0)
-}
-
-// ConstantTimeSelect returns x if v == 1 and y if v == 0.
-// Its behavior is undefined if v takes any other value.
-func ConstantTimeSelect(v, x, y int) int { return ^(v-1)&x | (v-1)&y }
-
-// ConstantTimeByteEq returns 1 if x == y and 0 otherwise.
-func ConstantTimeByteEq(x, y uint8) int {
- return int((uint32(x^y) - 1) >> 31)
-}
-
-// ConstantTimeEq returns 1 if x == y and 0 otherwise.
-func ConstantTimeEq(x, y int32) int {
- return int((uint64(uint32(x^y)) - 1) >> 63)
-}
-
-// ConstantTimeCopy copies the contents of y into x (a slice of equal length)
-// if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v
-// takes any other value.
-func ConstantTimeCopy(v int, x, y []byte) {
- if len(x) != len(y) {
- panic("subtle: slices have different lengths")
- }
-
- xmask := byte(v - 1)
- ymask := byte(^(v - 1))
- for i := 0; i < len(x); i++ {
- x[i] = x[i]&xmask | y[i]&ymask
- }
-}
-
-// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise.
-// Its behavior is undefined if x or y are negative or > 2**31 - 1.
-func ConstantTimeLessOrEq(x, y int) int {
- x32 := int32(x)
- y32 := int32(y)
- return int(((x32 - y32 - 1) >> 31) & 1)
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/auth.go b/contrib/go/_std_1.18/src/crypto/tls/auth.go
deleted file mode 100644
index a9df0da6d6..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/auth.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "errors"
- "fmt"
- "hash"
- "io"
-)
-
-// verifyHandshakeSignature verifies a signature against pre-hashed
-// (if required) handshake contents.
-func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, signed, sig []byte) error {
- switch sigType {
- case signatureECDSA:
- pubKey, ok := pubkey.(*ecdsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an ECDSA public key, got %T", pubkey)
- }
- if !ecdsa.VerifyASN1(pubKey, signed, sig) {
- return errors.New("ECDSA verification failure")
- }
- case signatureEd25519:
- pubKey, ok := pubkey.(ed25519.PublicKey)
- if !ok {
- return fmt.Errorf("expected an Ed25519 public key, got %T", pubkey)
- }
- if !ed25519.Verify(pubKey, signed, sig) {
- return errors.New("Ed25519 verification failure")
- }
- case signaturePKCS1v15:
- pubKey, ok := pubkey.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an RSA public key, got %T", pubkey)
- }
- if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, signed, sig); err != nil {
- return err
- }
- case signatureRSAPSS:
- pubKey, ok := pubkey.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an RSA public key, got %T", pubkey)
- }
- signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}
- if err := rsa.VerifyPSS(pubKey, hashFunc, signed, sig, signOpts); err != nil {
- return err
- }
- default:
- return errors.New("internal error: unknown signature type")
- }
- return nil
-}
-
-const (
- serverSignatureContext = "TLS 1.3, server CertificateVerify\x00"
- clientSignatureContext = "TLS 1.3, client CertificateVerify\x00"
-)
-
-var signaturePadding = []byte{
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
-}
-
-// signedMessage returns the pre-hashed (if necessary) message to be signed by
-// certificate keys in TLS 1.3. See RFC 8446, Section 4.4.3.
-func signedMessage(sigHash crypto.Hash, context string, transcript hash.Hash) []byte {
- if sigHash == directSigning {
- b := &bytes.Buffer{}
- b.Write(signaturePadding)
- io.WriteString(b, context)
- b.Write(transcript.Sum(nil))
- return b.Bytes()
- }
- h := sigHash.New()
- h.Write(signaturePadding)
- io.WriteString(h, context)
- h.Write(transcript.Sum(nil))
- return h.Sum(nil)
-}
-
-// typeAndHashFromSignatureScheme returns the corresponding signature type and
-// crypto.Hash for a given TLS SignatureScheme.
-func typeAndHashFromSignatureScheme(signatureAlgorithm SignatureScheme) (sigType uint8, hash crypto.Hash, err error) {
- switch signatureAlgorithm {
- case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512:
- sigType = signaturePKCS1v15
- case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512:
- sigType = signatureRSAPSS
- case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512:
- sigType = signatureECDSA
- case Ed25519:
- sigType = signatureEd25519
- default:
- return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
- }
- switch signatureAlgorithm {
- case PKCS1WithSHA1, ECDSAWithSHA1:
- hash = crypto.SHA1
- case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256:
- hash = crypto.SHA256
- case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384:
- hash = crypto.SHA384
- case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512:
- hash = crypto.SHA512
- case Ed25519:
- hash = directSigning
- default:
- return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
- }
- return sigType, hash, nil
-}
-
-// legacyTypeAndHashFromPublicKey returns the fixed signature type and crypto.Hash for
-// a given public key used with TLS 1.0 and 1.1, before the introduction of
-// signature algorithm negotiation.
-func legacyTypeAndHashFromPublicKey(pub crypto.PublicKey) (sigType uint8, hash crypto.Hash, err error) {
- switch pub.(type) {
- case *rsa.PublicKey:
- return signaturePKCS1v15, crypto.MD5SHA1, nil
- case *ecdsa.PublicKey:
- return signatureECDSA, crypto.SHA1, nil
- case ed25519.PublicKey:
- // RFC 8422 specifies support for Ed25519 in TLS 1.0 and 1.1,
- // but it requires holding on to a handshake transcript to do a
- // full signature, and not even OpenSSL bothers with the
- // complexity, so we can't even test it properly.
- return 0, 0, fmt.Errorf("tls: Ed25519 public keys are not supported before TLS 1.2")
- default:
- return 0, 0, fmt.Errorf("tls: unsupported public key: %T", pub)
- }
-}
-
-var rsaSignatureSchemes = []struct {
- scheme SignatureScheme
- minModulusBytes int
- maxVersion uint16
-}{
- // RSA-PSS is used with PSSSaltLengthEqualsHash, and requires
- // emLen >= hLen + sLen + 2
- {PSSWithSHA256, crypto.SHA256.Size()*2 + 2, VersionTLS13},
- {PSSWithSHA384, crypto.SHA384.Size()*2 + 2, VersionTLS13},
- {PSSWithSHA512, crypto.SHA512.Size()*2 + 2, VersionTLS13},
- // PKCS #1 v1.5 uses prefixes from hashPrefixes in crypto/rsa, and requires
- // emLen >= len(prefix) + hLen + 11
- // TLS 1.3 dropped support for PKCS #1 v1.5 in favor of RSA-PSS.
- {PKCS1WithSHA256, 19 + crypto.SHA256.Size() + 11, VersionTLS12},
- {PKCS1WithSHA384, 19 + crypto.SHA384.Size() + 11, VersionTLS12},
- {PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11, VersionTLS12},
- {PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11, VersionTLS12},
-}
-
-// signatureSchemesForCertificate returns the list of supported SignatureSchemes
-// for a given certificate, based on the public key and the protocol version,
-// and optionally filtered by its explicit SupportedSignatureAlgorithms.
-//
-// This function must be kept in sync with supportedSignatureAlgorithms.
-func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme {
- priv, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return nil
- }
-
- var sigAlgs []SignatureScheme
- switch pub := priv.Public().(type) {
- case *ecdsa.PublicKey:
- if version != VersionTLS13 {
- // In TLS 1.2 and earlier, ECDSA algorithms are not
- // constrained to a single curve.
- sigAlgs = []SignatureScheme{
- ECDSAWithP256AndSHA256,
- ECDSAWithP384AndSHA384,
- ECDSAWithP521AndSHA512,
- ECDSAWithSHA1,
- }
- break
- }
- switch pub.Curve {
- case elliptic.P256():
- sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256}
- case elliptic.P384():
- sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384}
- case elliptic.P521():
- sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512}
- default:
- return nil
- }
- case *rsa.PublicKey:
- size := pub.Size()
- sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes))
- for _, candidate := range rsaSignatureSchemes {
- if size >= candidate.minModulusBytes && version <= candidate.maxVersion {
- sigAlgs = append(sigAlgs, candidate.scheme)
- }
- }
- case ed25519.PublicKey:
- sigAlgs = []SignatureScheme{Ed25519}
- default:
- return nil
- }
-
- if cert.SupportedSignatureAlgorithms != nil {
- var filteredSigAlgs []SignatureScheme
- for _, sigAlg := range sigAlgs {
- if isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) {
- filteredSigAlgs = append(filteredSigAlgs, sigAlg)
- }
- }
- return filteredSigAlgs
- }
- return sigAlgs
-}
-
-// selectSignatureScheme picks a SignatureScheme from the peer's preference list
-// that works with the selected certificate. It's only called for protocol
-// versions that support signature algorithms, so TLS 1.2 and 1.3.
-func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) {
- supportedAlgs := signatureSchemesForCertificate(vers, c)
- if len(supportedAlgs) == 0 {
- return 0, unsupportedCertificateError(c)
- }
- if len(peerAlgs) == 0 && vers == VersionTLS12 {
- // For TLS 1.2, if the client didn't send signature_algorithms then we
- // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1.
- peerAlgs = []SignatureScheme{PKCS1WithSHA1, ECDSAWithSHA1}
- }
- // Pick signature scheme in the peer's preference order, as our
- // preference order is not configurable.
- for _, preferredAlg := range peerAlgs {
- if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) {
- return preferredAlg, nil
- }
- }
- return 0, errors.New("tls: peer doesn't support any of the certificate's signature algorithms")
-}
-
-// unsupportedCertificateError returns a helpful error for certificates with
-// an unsupported private key.
-func unsupportedCertificateError(cert *Certificate) error {
- switch cert.PrivateKey.(type) {
- case rsa.PrivateKey, ecdsa.PrivateKey:
- return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T",
- cert.PrivateKey, cert.PrivateKey)
- case *ed25519.PrivateKey:
- return fmt.Errorf("tls: unsupported certificate: private key is *ed25519.PrivateKey, expected ed25519.PrivateKey")
- }
-
- signer, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer",
- cert.PrivateKey)
- }
-
- switch pub := signer.Public().(type) {
- case *ecdsa.PublicKey:
- switch pub.Curve {
- case elliptic.P256():
- case elliptic.P384():
- case elliptic.P521():
- default:
- return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name)
- }
- case *rsa.PublicKey:
- return fmt.Errorf("tls: certificate RSA key size too small for supported signature algorithms")
- case ed25519.PublicKey:
- default:
- return fmt.Errorf("tls: unsupported certificate key (%T)", pub)
- }
-
- if cert.SupportedSignatureAlgorithms != nil {
- return fmt.Errorf("tls: peer doesn't support the certificate custom signature algorithms")
- }
-
- return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey)
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/cipher_suites.go b/contrib/go/_std_1.18/src/crypto/tls/cipher_suites.go
deleted file mode 100644
index d164991eec..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/cipher_suites.go
+++ /dev/null
@@ -1,689 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
- "crypto/hmac"
- "crypto/rc4"
- "crypto/sha1"
- "crypto/sha256"
- "fmt"
- "hash"
- "internal/cpu"
- "runtime"
-
- "golang.org/x/crypto/chacha20poly1305"
-)
-
-// CipherSuite is a TLS cipher suite. Note that most functions in this package
-// accept and expose cipher suite IDs instead of this type.
-type CipherSuite struct {
- ID uint16
- Name string
-
- // Supported versions is the list of TLS protocol versions that can
- // negotiate this cipher suite.
- SupportedVersions []uint16
-
- // Insecure is true if the cipher suite has known security issues
- // due to its primitives, design, or implementation.
- Insecure bool
-}
-
-var (
- supportedUpToTLS12 = []uint16{VersionTLS10, VersionTLS11, VersionTLS12}
- supportedOnlyTLS12 = []uint16{VersionTLS12}
- supportedOnlyTLS13 = []uint16{VersionTLS13}
-)
-
-// CipherSuites returns a list of cipher suites currently implemented by this
-// package, excluding those with security issues, which are returned by
-// InsecureCipherSuites.
-//
-// The list is sorted by ID. Note that the default cipher suites selected by
-// this package might depend on logic that can't be captured by a static list,
-// and might not match those returned by this function.
-func CipherSuites() []*CipherSuite {
- return []*CipherSuite{
- {TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
-
- {TLS_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", supportedOnlyTLS13, false},
- {TLS_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", supportedOnlyTLS13, false},
- {TLS_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", supportedOnlyTLS13, false},
-
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
- }
-}
-
-// InsecureCipherSuites returns a list of cipher suites currently implemented by
-// this package and which have security issues.
-//
-// Most applications should not use the cipher suites in this list, and should
-// only use those returned by CipherSuites.
-func InsecureCipherSuites() []*CipherSuite {
- // This list includes RC4, CBC_SHA256, and 3DES cipher suites. See
- // cipherSuitesPreferenceOrder for details.
- return []*CipherSuite{
- {TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
- {TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- }
-}
-
-// CipherSuiteName returns the standard name for the passed cipher suite ID
-// (e.g. "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"), or a fallback representation
-// of the ID value if the cipher suite is not implemented by this package.
-func CipherSuiteName(id uint16) string {
- for _, c := range CipherSuites() {
- if c.ID == id {
- return c.Name
- }
- }
- for _, c := range InsecureCipherSuites() {
- if c.ID == id {
- return c.Name
- }
- }
- return fmt.Sprintf("0x%04X", id)
-}
-
-const (
- // suiteECDHE indicates that the cipher suite involves elliptic curve
- // Diffie-Hellman. This means that it should only be selected when the
- // client indicates that it supports ECC with a curve and point format
- // that we're happy with.
- suiteECDHE = 1 << iota
- // suiteECSign indicates that the cipher suite involves an ECDSA or
- // EdDSA signature and therefore may only be selected when the server's
- // certificate is ECDSA or EdDSA. If this is not set then the cipher suite
- // is RSA based.
- suiteECSign
- // suiteTLS12 indicates that the cipher suite should only be advertised
- // and accepted when using TLS 1.2.
- suiteTLS12
- // suiteSHA384 indicates that the cipher suite uses SHA384 as the
- // handshake hash.
- suiteSHA384
-)
-
-// A cipherSuite is a TLS 1.0–1.2 cipher suite, and defines the key exchange
-// mechanism, as well as the cipher+MAC pair or the AEAD.
-type cipherSuite struct {
- id uint16
- // the lengths, in bytes, of the key material needed for each component.
- keyLen int
- macLen int
- ivLen int
- ka func(version uint16) keyAgreement
- // flags is a bitmask of the suite* values, above.
- flags int
- cipher func(key, iv []byte, isRead bool) any
- mac func(key []byte) hash.Hash
- aead func(key, fixedNonce []byte) aead
-}
-
-var cipherSuites = []*cipherSuite{ // TODO: replace with a map, since the order doesn't matter.
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12, cipherAES, macSHA256, nil},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, cipherAES, macSHA256, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
- {TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12, cipherAES, macSHA256, nil},
- {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
- {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
- {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
- {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherRC4, macSHA1, nil},
-}
-
-// selectCipherSuite returns the first TLS 1.0–1.2 cipher suite from ids which
-// is also in supportedIDs and passes the ok filter.
-func selectCipherSuite(ids, supportedIDs []uint16, ok func(*cipherSuite) bool) *cipherSuite {
- for _, id := range ids {
- candidate := cipherSuiteByID(id)
- if candidate == nil || !ok(candidate) {
- continue
- }
-
- for _, suppID := range supportedIDs {
- if id == suppID {
- return candidate
- }
- }
- }
- return nil
-}
-
-// A cipherSuiteTLS13 defines only the pair of the AEAD algorithm and hash
-// algorithm to be used with HKDF. See RFC 8446, Appendix B.4.
-type cipherSuiteTLS13 struct {
- id uint16
- keyLen int
- aead func(key, fixedNonce []byte) aead
- hash crypto.Hash
-}
-
-var cipherSuitesTLS13 = []*cipherSuiteTLS13{ // TODO: replace with a map.
- {TLS_AES_128_GCM_SHA256, 16, aeadAESGCMTLS13, crypto.SHA256},
- {TLS_CHACHA20_POLY1305_SHA256, 32, aeadChaCha20Poly1305, crypto.SHA256},
- {TLS_AES_256_GCM_SHA384, 32, aeadAESGCMTLS13, crypto.SHA384},
-}
-
-// cipherSuitesPreferenceOrder is the order in which we'll select (on the
-// server) or advertise (on the client) TLS 1.0–1.2 cipher suites.
-//
-// Cipher suites are filtered but not reordered based on the application and
-// peer's preferences, meaning we'll never select a suite lower in this list if
-// any higher one is available. This makes it more defensible to keep weaker
-// cipher suites enabled, especially on the server side where we get the last
-// word, since there are no known downgrade attacks on cipher suites selection.
-//
-// The list is sorted by applying the following priority rules, stopping at the
-// first (most important) applicable one:
-//
-// - Anything else comes before RC4
-//
-// RC4 has practically exploitable biases. See https://www.rc4nomore.com.
-//
-// - Anything else comes before CBC_SHA256
-//
-// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13
-// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and
-// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
-//
-// - Anything else comes before 3DES
-//
-// 3DES has 64-bit blocks, which makes it fundamentally susceptible to
-// birthday attacks. See https://sweet32.info.
-//
-// - ECDHE comes before anything else
-//
-// Once we got the broken stuff out of the way, the most important
-// property a cipher suite can have is forward secrecy. We don't
-// implement FFDHE, so that means ECDHE.
-//
-// - AEADs come before CBC ciphers
-//
-// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites
-// are fundamentally fragile, and suffered from an endless sequence of
-// padding oracle attacks. See https://eprint.iacr.org/2015/1129,
-// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and
-// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/.
-//
-// - AES comes before ChaCha20
-//
-// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster
-// than ChaCha20Poly1305.
-//
-// When AES hardware is not available, AES-128-GCM is one or more of: much
-// slower, way more complex, and less safe (because not constant time)
-// than ChaCha20Poly1305.
-//
-// We use this list if we think both peers have AES hardware, and
-// cipherSuitesPreferenceOrderNoAES otherwise.
-//
-// - AES-128 comes before AES-256
-//
-// The only potential advantages of AES-256 are better multi-target
-// margins, and hypothetical post-quantum properties. Neither apply to
-// TLS, and AES-256 is slower due to its four extra rounds (which don't
-// contribute to the advantages above).
-//
-// - ECDSA comes before RSA
-//
-// The relative order of ECDSA and RSA cipher suites doesn't matter,
-// as they depend on the certificate. Pick one to get a stable order.
-//
-var cipherSuitesPreferenceOrder = []uint16{
- // AEADs w/ ECDHE
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
-
- // CBC w/ ECDHE
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
-
- // AEADs w/o ECDHE
- TLS_RSA_WITH_AES_128_GCM_SHA256,
- TLS_RSA_WITH_AES_256_GCM_SHA384,
-
- // CBC w/o ECDHE
- TLS_RSA_WITH_AES_128_CBC_SHA,
- TLS_RSA_WITH_AES_256_CBC_SHA,
-
- // 3DES
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- TLS_RSA_WITH_3DES_EDE_CBC_SHA,
-
- // CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- TLS_RSA_WITH_AES_128_CBC_SHA256,
-
- // RC4
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- TLS_RSA_WITH_RC4_128_SHA,
-}
-
-var cipherSuitesPreferenceOrderNoAES = []uint16{
- // ChaCha20Poly1305
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
-
- // AES-GCM w/ ECDHE
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
-
- // The rest of cipherSuitesPreferenceOrder.
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- TLS_RSA_WITH_AES_128_GCM_SHA256,
- TLS_RSA_WITH_AES_256_GCM_SHA384,
- TLS_RSA_WITH_AES_128_CBC_SHA,
- TLS_RSA_WITH_AES_256_CBC_SHA,
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- TLS_RSA_WITH_AES_128_CBC_SHA256,
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- TLS_RSA_WITH_RC4_128_SHA,
-}
-
-// disabledCipherSuites are not used unless explicitly listed in
-// Config.CipherSuites. They MUST be at the end of cipherSuitesPreferenceOrder.
-var disabledCipherSuites = []uint16{
- // CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- TLS_RSA_WITH_AES_128_CBC_SHA256,
-
- // RC4
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- TLS_RSA_WITH_RC4_128_SHA,
-}
-
-var (
- defaultCipherSuitesLen = len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites)
- defaultCipherSuites = cipherSuitesPreferenceOrder[:defaultCipherSuitesLen]
-)
-
-// defaultCipherSuitesTLS13 is also the preference order, since there are no
-// disabled by default TLS 1.3 cipher suites. The same AES vs ChaCha20 logic as
-// cipherSuitesPreferenceOrder applies.
-var defaultCipherSuitesTLS13 = []uint16{
- TLS_AES_128_GCM_SHA256,
- TLS_AES_256_GCM_SHA384,
- TLS_CHACHA20_POLY1305_SHA256,
-}
-
-var defaultCipherSuitesTLS13NoAES = []uint16{
- TLS_CHACHA20_POLY1305_SHA256,
- TLS_AES_128_GCM_SHA256,
- TLS_AES_256_GCM_SHA384,
-}
-
-var (
- hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR &&
- (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasAESGCMHardwareSupport = runtime.GOARCH == "amd64" && hasGCMAsmAMD64 ||
- runtime.GOARCH == "arm64" && hasGCMAsmARM64 ||
- runtime.GOARCH == "s390x" && hasGCMAsmS390X
-)
-
-var aesgcmCiphers = map[uint16]bool{
- // TLS 1.2
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: true,
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: true,
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: true,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: true,
- // TLS 1.3
- TLS_AES_128_GCM_SHA256: true,
- TLS_AES_256_GCM_SHA384: true,
-}
-
-var nonAESGCMAEADCiphers = map[uint16]bool{
- // TLS 1.2
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: true,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: true,
- // TLS 1.3
- TLS_CHACHA20_POLY1305_SHA256: true,
-}
-
-// aesgcmPreferred returns whether the first known cipher in the preference list
-// is an AES-GCM cipher, implying the peer has hardware support for it.
-func aesgcmPreferred(ciphers []uint16) bool {
- for _, cID := range ciphers {
- if c := cipherSuiteByID(cID); c != nil {
- return aesgcmCiphers[cID]
- }
- if c := cipherSuiteTLS13ByID(cID); c != nil {
- return aesgcmCiphers[cID]
- }
- }
- return false
-}
-
-func cipherRC4(key, iv []byte, isRead bool) any {
- cipher, _ := rc4.NewCipher(key)
- return cipher
-}
-
-func cipher3DES(key, iv []byte, isRead bool) any {
- block, _ := des.NewTripleDESCipher(key)
- if isRead {
- return cipher.NewCBCDecrypter(block, iv)
- }
- return cipher.NewCBCEncrypter(block, iv)
-}
-
-func cipherAES(key, iv []byte, isRead bool) any {
- block, _ := aes.NewCipher(key)
- if isRead {
- return cipher.NewCBCDecrypter(block, iv)
- }
- return cipher.NewCBCEncrypter(block, iv)
-}
-
-// macSHA1 returns a SHA-1 based constant time MAC.
-func macSHA1(key []byte) hash.Hash {
- return hmac.New(newConstantTimeHash(sha1.New), key)
-}
-
-// macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and
-// is currently only used in disabled-by-default cipher suites.
-func macSHA256(key []byte) hash.Hash {
- return hmac.New(sha256.New, key)
-}
-
-type aead interface {
- cipher.AEAD
-
- // explicitNonceLen returns the number of bytes of explicit nonce
- // included in each record. This is eight for older AEADs and
- // zero for modern ones.
- explicitNonceLen() int
-}
-
-const (
- aeadNonceLength = 12
- noncePrefixLength = 4
-)
-
-// prefixNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
-// each call.
-type prefixNonceAEAD struct {
- // nonce contains the fixed part of the nonce in the first four bytes.
- nonce [aeadNonceLength]byte
- aead cipher.AEAD
-}
-
-func (f *prefixNonceAEAD) NonceSize() int { return aeadNonceLength - noncePrefixLength }
-func (f *prefixNonceAEAD) Overhead() int { return f.aead.Overhead() }
-func (f *prefixNonceAEAD) explicitNonceLen() int { return f.NonceSize() }
-
-func (f *prefixNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
- copy(f.nonce[4:], nonce)
- return f.aead.Seal(out, f.nonce[:], plaintext, additionalData)
-}
-
-func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
- copy(f.nonce[4:], nonce)
- return f.aead.Open(out, f.nonce[:], ciphertext, additionalData)
-}
-
-// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
-// before each call.
-type xorNonceAEAD struct {
- nonceMask [aeadNonceLength]byte
- aead cipher.AEAD
-}
-
-func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number
-func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() }
-func (f *xorNonceAEAD) explicitNonceLen() int { return 0 }
-
-func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
- result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData)
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
-
- return result
-}
-
-func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
- result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData)
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
-
- return result, err
-}
-
-func aeadAESGCM(key, noncePrefix []byte) aead {
- if len(noncePrefix) != noncePrefixLength {
- panic("tls: internal error: wrong nonce length")
- }
- aes, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
-
- ret := &prefixNonceAEAD{aead: aead}
- copy(ret.nonce[:], noncePrefix)
- return ret
-}
-
-func aeadAESGCMTLS13(key, nonceMask []byte) aead {
- if len(nonceMask) != aeadNonceLength {
- panic("tls: internal error: wrong nonce length")
- }
- aes, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
-
- ret := &xorNonceAEAD{aead: aead}
- copy(ret.nonceMask[:], nonceMask)
- return ret
-}
-
-func aeadChaCha20Poly1305(key, nonceMask []byte) aead {
- if len(nonceMask) != aeadNonceLength {
- panic("tls: internal error: wrong nonce length")
- }
- aead, err := chacha20poly1305.New(key)
- if err != nil {
- panic(err)
- }
-
- ret := &xorNonceAEAD{aead: aead}
- copy(ret.nonceMask[:], nonceMask)
- return ret
-}
-
-type constantTimeHash interface {
- hash.Hash
- ConstantTimeSum(b []byte) []byte
-}
-
-// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces
-// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC.
-type cthWrapper struct {
- h constantTimeHash
-}
-
-func (c *cthWrapper) Size() int { return c.h.Size() }
-func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() }
-func (c *cthWrapper) Reset() { c.h.Reset() }
-func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) }
-func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
-
-func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
- return func() hash.Hash {
- return &cthWrapper{h().(constantTimeHash)}
- }
-}
-
-// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3.
-func tls10MAC(h hash.Hash, out, seq, header, data, extra []byte) []byte {
- h.Reset()
- h.Write(seq)
- h.Write(header)
- h.Write(data)
- res := h.Sum(out)
- if extra != nil {
- h.Write(extra)
- }
- return res
-}
-
-func rsaKA(version uint16) keyAgreement {
- return rsaKeyAgreement{}
-}
-
-func ecdheECDSAKA(version uint16) keyAgreement {
- return &ecdheKeyAgreement{
- isRSA: false,
- version: version,
- }
-}
-
-func ecdheRSAKA(version uint16) keyAgreement {
- return &ecdheKeyAgreement{
- isRSA: true,
- version: version,
- }
-}
-
-// mutualCipherSuite returns a cipherSuite given a list of supported
-// ciphersuites and the id requested by the peer.
-func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
- for _, id := range have {
- if id == want {
- return cipherSuiteByID(id)
- }
- }
- return nil
-}
-
-func cipherSuiteByID(id uint16) *cipherSuite {
- for _, cipherSuite := range cipherSuites {
- if cipherSuite.id == id {
- return cipherSuite
- }
- }
- return nil
-}
-
-func mutualCipherSuiteTLS13(have []uint16, want uint16) *cipherSuiteTLS13 {
- for _, id := range have {
- if id == want {
- return cipherSuiteTLS13ByID(id)
- }
- }
- return nil
-}
-
-func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 {
- for _, cipherSuite := range cipherSuitesTLS13 {
- if cipherSuite.id == id {
- return cipherSuite
- }
- }
- return nil
-}
-
-// A list of cipher suite IDs that are, or have been, implemented by this
-// package.
-//
-// See https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
-const (
- // TLS 1.0 - 1.2 cipher suites.
- TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
- TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
- TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
- TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c
- TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c
- TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
- TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca8
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca9
-
- // TLS 1.3 cipher suites.
- TLS_AES_128_GCM_SHA256 uint16 = 0x1301
- TLS_AES_256_GCM_SHA384 uint16 = 0x1302
- TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303
-
- // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
- // that the client is doing version fallback. See RFC 7507.
- TLS_FALLBACK_SCSV uint16 = 0x5600
-
- // Legacy names for the corresponding cipher suites with the correct _SHA256
- // suffix, retained for backward compatibility.
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
-)
diff --git a/contrib/go/_std_1.18/src/crypto/tls/common.go b/contrib/go/_std_1.18/src/crypto/tls/common.go
deleted file mode 100644
index e6e7598ce9..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/common.go
+++ /dev/null
@@ -1,1480 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "bytes"
- "container/list"
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha512"
- "crypto/x509"
- "errors"
- "fmt"
- "internal/godebug"
- "io"
- "net"
- "strings"
- "sync"
- "time"
-)
-
-const (
- VersionTLS10 = 0x0301
- VersionTLS11 = 0x0302
- VersionTLS12 = 0x0303
- VersionTLS13 = 0x0304
-
- // Deprecated: SSLv3 is cryptographically broken, and is no longer
- // supported by this package. See golang.org/issue/32716.
- VersionSSL30 = 0x0300
-)
-
-const (
- maxPlaintext = 16384 // maximum plaintext payload length
- maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
- maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3
- recordHeaderLen = 5 // record header length
- maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
- maxUselessRecords = 16 // maximum number of consecutive non-advancing records
-)
-
-// TLS record types.
-type recordType uint8
-
-const (
- recordTypeChangeCipherSpec recordType = 20
- recordTypeAlert recordType = 21
- recordTypeHandshake recordType = 22
- recordTypeApplicationData recordType = 23
-)
-
-// TLS handshake message types.
-const (
- typeHelloRequest uint8 = 0
- typeClientHello uint8 = 1
- typeServerHello uint8 = 2
- typeNewSessionTicket uint8 = 4
- typeEndOfEarlyData uint8 = 5
- typeEncryptedExtensions uint8 = 8
- typeCertificate uint8 = 11
- typeServerKeyExchange uint8 = 12
- typeCertificateRequest uint8 = 13
- typeServerHelloDone uint8 = 14
- typeCertificateVerify uint8 = 15
- typeClientKeyExchange uint8 = 16
- typeFinished uint8 = 20
- typeCertificateStatus uint8 = 22
- typeKeyUpdate uint8 = 24
- typeNextProtocol uint8 = 67 // Not IANA assigned
- typeMessageHash uint8 = 254 // synthetic message
-)
-
-// TLS compression types.
-const (
- compressionNone uint8 = 0
-)
-
-// TLS extension numbers
-const (
- extensionServerName uint16 = 0
- extensionStatusRequest uint16 = 5
- extensionSupportedCurves uint16 = 10 // supported_groups in TLS 1.3, see RFC 8446, Section 4.2.7
- extensionSupportedPoints uint16 = 11
- extensionSignatureAlgorithms uint16 = 13
- extensionALPN uint16 = 16
- extensionSCT uint16 = 18
- extensionSessionTicket uint16 = 35
- extensionPreSharedKey uint16 = 41
- extensionEarlyData uint16 = 42
- extensionSupportedVersions uint16 = 43
- extensionCookie uint16 = 44
- extensionPSKModes uint16 = 45
- extensionCertificateAuthorities uint16 = 47
- extensionSignatureAlgorithmsCert uint16 = 50
- extensionKeyShare uint16 = 51
- extensionRenegotiationInfo uint16 = 0xff01
-)
-
-// TLS signaling cipher suite values
-const (
- scsvRenegotiation uint16 = 0x00ff
-)
-
-// CurveID is the type of a TLS identifier for an elliptic curve. See
-// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8.
-//
-// In TLS 1.3, this type is called NamedGroup, but at this time this library
-// only supports Elliptic Curve based groups. See RFC 8446, Section 4.2.7.
-type CurveID uint16
-
-const (
- CurveP256 CurveID = 23
- CurveP384 CurveID = 24
- CurveP521 CurveID = 25
- X25519 CurveID = 29
-)
-
-// TLS 1.3 Key Share. See RFC 8446, Section 4.2.8.
-type keyShare struct {
- group CurveID
- data []byte
-}
-
-// TLS 1.3 PSK Key Exchange Modes. See RFC 8446, Section 4.2.9.
-const (
- pskModePlain uint8 = 0
- pskModeDHE uint8 = 1
-)
-
-// TLS 1.3 PSK Identity. Can be a Session Ticket, or a reference to a saved
-// session. See RFC 8446, Section 4.2.11.
-type pskIdentity struct {
- label []byte
- obfuscatedTicketAge uint32
-}
-
-// TLS Elliptic Curve Point Formats
-// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
-const (
- pointFormatUncompressed uint8 = 0
-)
-
-// TLS CertificateStatusType (RFC 3546)
-const (
- statusTypeOCSP uint8 = 1
-)
-
-// Certificate types (for certificateRequestMsg)
-const (
- certTypeRSASign = 1
- certTypeECDSASign = 64 // ECDSA or EdDSA keys, see RFC 8422, Section 3.
-)
-
-// Signature algorithms (for internal signaling use). Starting at 225 to avoid overlap with
-// TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do.
-const (
- signaturePKCS1v15 uint8 = iota + 225
- signatureRSAPSS
- signatureECDSA
- signatureEd25519
-)
-
-// directSigning is a standard Hash value that signals that no pre-hashing
-// should be performed, and that the input should be signed directly. It is the
-// hash function associated with the Ed25519 signature scheme.
-var directSigning crypto.Hash = 0
-
-// supportedSignatureAlgorithms contains the signature and hash algorithms that
-// the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+
-// CertificateRequest. The two fields are merged to match with TLS 1.3.
-// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
-var supportedSignatureAlgorithms = []SignatureScheme{
- PSSWithSHA256,
- ECDSAWithP256AndSHA256,
- Ed25519,
- PSSWithSHA384,
- PSSWithSHA512,
- PKCS1WithSHA256,
- PKCS1WithSHA384,
- PKCS1WithSHA512,
- ECDSAWithP384AndSHA384,
- ECDSAWithP521AndSHA512,
- PKCS1WithSHA1,
- ECDSAWithSHA1,
-}
-
-// helloRetryRequestRandom is set as the Random value of a ServerHello
-// to signal that the message is actually a HelloRetryRequest.
-var helloRetryRequestRandom = []byte{ // See RFC 8446, Section 4.1.3.
- 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11,
- 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,
- 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E,
- 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C,
-}
-
-const (
- // downgradeCanaryTLS12 or downgradeCanaryTLS11 is embedded in the server
- // random as a downgrade protection if the server would be capable of
- // negotiating a higher version. See RFC 8446, Section 4.1.3.
- downgradeCanaryTLS12 = "DOWNGRD\x01"
- downgradeCanaryTLS11 = "DOWNGRD\x00"
-)
-
-// testingOnlyForceDowngradeCanary is set in tests to force the server side to
-// include downgrade canaries even if it's using its highers supported version.
-var testingOnlyForceDowngradeCanary bool
-
-// ConnectionState records basic TLS details about the connection.
-type ConnectionState struct {
- // Version is the TLS version used by the connection (e.g. VersionTLS12).
- Version uint16
-
- // HandshakeComplete is true if the handshake has concluded.
- HandshakeComplete bool
-
- // DidResume is true if this connection was successfully resumed from a
- // previous session with a session ticket or similar mechanism.
- DidResume bool
-
- // CipherSuite is the cipher suite negotiated for the connection (e.g.
- // TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_AES_128_GCM_SHA256).
- CipherSuite uint16
-
- // NegotiatedProtocol is the application protocol negotiated with ALPN.
- NegotiatedProtocol string
-
- // NegotiatedProtocolIsMutual used to indicate a mutual NPN negotiation.
- //
- // Deprecated: this value is always true.
- NegotiatedProtocolIsMutual bool
-
- // ServerName is the value of the Server Name Indication extension sent by
- // the client. It's available both on the server and on the client side.
- ServerName string
-
- // PeerCertificates are the parsed certificates sent by the peer, in the
- // order in which they were sent. The first element is the leaf certificate
- // that the connection is verified against.
- //
- // On the client side, it can't be empty. On the server side, it can be
- // empty if Config.ClientAuth is not RequireAnyClientCert or
- // RequireAndVerifyClientCert.
- PeerCertificates []*x509.Certificate
-
- // VerifiedChains is a list of one or more chains where the first element is
- // PeerCertificates[0] and the last element is from Config.RootCAs (on the
- // client side) or Config.ClientCAs (on the server side).
- //
- // On the client side, it's set if Config.InsecureSkipVerify is false. On
- // the server side, it's set if Config.ClientAuth is VerifyClientCertIfGiven
- // (and the peer provided a certificate) or RequireAndVerifyClientCert.
- VerifiedChains [][]*x509.Certificate
-
- // SignedCertificateTimestamps is a list of SCTs provided by the peer
- // through the TLS handshake for the leaf certificate, if any.
- SignedCertificateTimestamps [][]byte
-
- // OCSPResponse is a stapled Online Certificate Status Protocol (OCSP)
- // response provided by the peer for the leaf certificate, if any.
- OCSPResponse []byte
-
- // TLSUnique contains the "tls-unique" channel binding value (see RFC 5929,
- // Section 3). This value will be nil for TLS 1.3 connections and for all
- // resumed connections.
- //
- // Deprecated: there are conditions in which this value might not be unique
- // to a connection. See the Security Considerations sections of RFC 5705 and
- // RFC 7627, and https://mitls.org/pages/attacks/3SHAKE#channelbindings.
- TLSUnique []byte
-
- // ekm is a closure exposed via ExportKeyingMaterial.
- ekm func(label string, context []byte, length int) ([]byte, error)
-}
-
-// ExportKeyingMaterial returns length bytes of exported key material in a new
-// slice as defined in RFC 5705. If context is nil, it is not used as part of
-// the seed. If the connection was set to allow renegotiation via
-// Config.Renegotiation, this function will return an error.
-func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
- return cs.ekm(label, context, length)
-}
-
-// ClientAuthType declares the policy the server will follow for
-// TLS Client Authentication.
-type ClientAuthType int
-
-const (
- // NoClientCert indicates that no client certificate should be requested
- // during the handshake, and if any certificates are sent they will not
- // be verified.
- NoClientCert ClientAuthType = iota
- // RequestClientCert indicates that a client certificate should be requested
- // during the handshake, but does not require that the client send any
- // certificates.
- RequestClientCert
- // RequireAnyClientCert indicates that a client certificate should be requested
- // during the handshake, and that at least one certificate is required to be
- // sent by the client, but that certificate is not required to be valid.
- RequireAnyClientCert
- // VerifyClientCertIfGiven indicates that a client certificate should be requested
- // during the handshake, but does not require that the client sends a
- // certificate. If the client does send a certificate it is required to be
- // valid.
- VerifyClientCertIfGiven
- // RequireAndVerifyClientCert indicates that a client certificate should be requested
- // during the handshake, and that at least one valid certificate is required
- // to be sent by the client.
- RequireAndVerifyClientCert
-)
-
-// requiresClientCert reports whether the ClientAuthType requires a client
-// certificate to be provided.
-func requiresClientCert(c ClientAuthType) bool {
- switch c {
- case RequireAnyClientCert, RequireAndVerifyClientCert:
- return true
- default:
- return false
- }
-}
-
-// ClientSessionState contains the state needed by clients to resume TLS
-// sessions.
-type ClientSessionState struct {
- sessionTicket []uint8 // Encrypted ticket used for session resumption with server
- vers uint16 // TLS version negotiated for the session
- cipherSuite uint16 // Ciphersuite negotiated for the session
- masterSecret []byte // Full handshake MasterSecret, or TLS 1.3 resumption_master_secret
- serverCertificates []*x509.Certificate // Certificate chain presented by the server
- verifiedChains [][]*x509.Certificate // Certificate chains we built for verification
- receivedAt time.Time // When the session ticket was received from the server
- ocspResponse []byte // Stapled OCSP response presented by the server
- scts [][]byte // SCTs presented by the server
-
- // TLS 1.3 fields.
- nonce []byte // Ticket nonce sent by the server, to derive PSK
- useBy time.Time // Expiration of the ticket lifetime as set by the server
- ageAdd uint32 // Random obfuscation factor for sending the ticket age
-}
-
-// ClientSessionCache is a cache of ClientSessionState objects that can be used
-// by a client to resume a TLS session with a given server. ClientSessionCache
-// implementations should expect to be called concurrently from different
-// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not
-// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which
-// are supported via this interface.
-type ClientSessionCache interface {
- // Get searches for a ClientSessionState associated with the given key.
- // On return, ok is true if one was found.
- Get(sessionKey string) (session *ClientSessionState, ok bool)
-
- // Put adds the ClientSessionState to the cache with the given key. It might
- // get called multiple times in a connection if a TLS 1.3 server provides
- // more than one session ticket. If called with a nil *ClientSessionState,
- // it should remove the cache entry.
- Put(sessionKey string, cs *ClientSessionState)
-}
-
-//go:generate stringer -type=SignatureScheme,CurveID,ClientAuthType -output=common_string.go
-
-// SignatureScheme identifies a signature algorithm supported by TLS. See
-// RFC 8446, Section 4.2.3.
-type SignatureScheme uint16
-
-const (
- // RSASSA-PKCS1-v1_5 algorithms.
- PKCS1WithSHA256 SignatureScheme = 0x0401
- PKCS1WithSHA384 SignatureScheme = 0x0501
- PKCS1WithSHA512 SignatureScheme = 0x0601
-
- // RSASSA-PSS algorithms with public key OID rsaEncryption.
- PSSWithSHA256 SignatureScheme = 0x0804
- PSSWithSHA384 SignatureScheme = 0x0805
- PSSWithSHA512 SignatureScheme = 0x0806
-
- // ECDSA algorithms. Only constrained to a specific curve in TLS 1.3.
- ECDSAWithP256AndSHA256 SignatureScheme = 0x0403
- ECDSAWithP384AndSHA384 SignatureScheme = 0x0503
- ECDSAWithP521AndSHA512 SignatureScheme = 0x0603
-
- // EdDSA algorithms.
- Ed25519 SignatureScheme = 0x0807
-
- // Legacy signature and hash algorithms for TLS 1.2.
- PKCS1WithSHA1 SignatureScheme = 0x0201
- ECDSAWithSHA1 SignatureScheme = 0x0203
-)
-
-// ClientHelloInfo contains information from a ClientHello message in order to
-// guide application logic in the GetCertificate and GetConfigForClient callbacks.
-type ClientHelloInfo struct {
- // CipherSuites lists the CipherSuites supported by the client (e.g.
- // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).
- CipherSuites []uint16
-
- // ServerName indicates the name of the server requested by the client
- // in order to support virtual hosting. ServerName is only set if the
- // client is using SNI (see RFC 4366, Section 3.1).
- ServerName string
-
- // SupportedCurves lists the elliptic curves supported by the client.
- // SupportedCurves is set only if the Supported Elliptic Curves
- // Extension is being used (see RFC 4492, Section 5.1.1).
- SupportedCurves []CurveID
-
- // SupportedPoints lists the point formats supported by the client.
- // SupportedPoints is set only if the Supported Point Formats Extension
- // is being used (see RFC 4492, Section 5.1.2).
- SupportedPoints []uint8
-
- // SignatureSchemes lists the signature and hash schemes that the client
- // is willing to verify. SignatureSchemes is set only if the Signature
- // Algorithms Extension is being used (see RFC 5246, Section 7.4.1.4.1).
- SignatureSchemes []SignatureScheme
-
- // SupportedProtos lists the application protocols supported by the client.
- // SupportedProtos is set only if the Application-Layer Protocol
- // Negotiation Extension is being used (see RFC 7301, Section 3.1).
- //
- // Servers can select a protocol by setting Config.NextProtos in a
- // GetConfigForClient return value.
- SupportedProtos []string
-
- // SupportedVersions lists the TLS versions supported by the client.
- // For TLS versions less than 1.3, this is extrapolated from the max
- // version advertised by the client, so values other than the greatest
- // might be rejected if used.
- SupportedVersions []uint16
-
- // Conn is the underlying net.Conn for the connection. Do not read
- // from, or write to, this connection; that will cause the TLS
- // connection to fail.
- Conn net.Conn
-
- // config is embedded by the GetCertificate or GetConfigForClient caller,
- // for use with SupportsCertificate.
- config *Config
-
- // ctx is the context of the handshake that is in progress.
- ctx context.Context
-}
-
-// Context returns the context of the handshake that is in progress.
-// This context is a child of the context passed to HandshakeContext,
-// if any, and is canceled when the handshake concludes.
-func (c *ClientHelloInfo) Context() context.Context {
- return c.ctx
-}
-
-// CertificateRequestInfo contains information from a server's
-// CertificateRequest message, which is used to demand a certificate and proof
-// of control from a client.
-type CertificateRequestInfo struct {
- // AcceptableCAs contains zero or more, DER-encoded, X.501
- // Distinguished Names. These are the names of root or intermediate CAs
- // that the server wishes the returned certificate to be signed by. An
- // empty slice indicates that the server has no preference.
- AcceptableCAs [][]byte
-
- // SignatureSchemes lists the signature schemes that the server is
- // willing to verify.
- SignatureSchemes []SignatureScheme
-
- // Version is the TLS version that was negotiated for this connection.
- Version uint16
-
- // ctx is the context of the handshake that is in progress.
- ctx context.Context
-}
-
-// Context returns the context of the handshake that is in progress.
-// This context is a child of the context passed to HandshakeContext,
-// if any, and is canceled when the handshake concludes.
-func (c *CertificateRequestInfo) Context() context.Context {
- return c.ctx
-}
-
-// RenegotiationSupport enumerates the different levels of support for TLS
-// renegotiation. TLS renegotiation is the act of performing subsequent
-// handshakes on a connection after the first. This significantly complicates
-// the state machine and has been the source of numerous, subtle security
-// issues. Initiating a renegotiation is not supported, but support for
-// accepting renegotiation requests may be enabled.
-//
-// Even when enabled, the server may not change its identity between handshakes
-// (i.e. the leaf certificate must be the same). Additionally, concurrent
-// handshake and application data flow is not permitted so renegotiation can
-// only be used with protocols that synchronise with the renegotiation, such as
-// HTTPS.
-//
-// Renegotiation is not defined in TLS 1.3.
-type RenegotiationSupport int
-
-const (
- // RenegotiateNever disables renegotiation.
- RenegotiateNever RenegotiationSupport = iota
-
- // RenegotiateOnceAsClient allows a remote server to request
- // renegotiation once per connection.
- RenegotiateOnceAsClient
-
- // RenegotiateFreelyAsClient allows a remote server to repeatedly
- // request renegotiation.
- RenegotiateFreelyAsClient
-)
-
-// A Config structure is used to configure a TLS client or server.
-// After one has been passed to a TLS function it must not be
-// modified. A Config may be reused; the tls package will also not
-// modify it.
-type Config struct {
- // Rand provides the source of entropy for nonces and RSA blinding.
- // If Rand is nil, TLS uses the cryptographic random reader in package
- // crypto/rand.
- // The Reader must be safe for use by multiple goroutines.
- Rand io.Reader
-
- // Time returns the current time as the number of seconds since the epoch.
- // If Time is nil, TLS uses time.Now.
- Time func() time.Time
-
- // Certificates contains one or more certificate chains to present to the
- // other side of the connection. The first certificate compatible with the
- // peer's requirements is selected automatically.
- //
- // Server configurations must set one of Certificates, GetCertificate or
- // GetConfigForClient. Clients doing client-authentication may set either
- // Certificates or GetClientCertificate.
- //
- // Note: if there are multiple Certificates, and they don't have the
- // optional field Leaf set, certificate selection will incur a significant
- // per-handshake performance cost.
- Certificates []Certificate
-
- // NameToCertificate maps from a certificate name to an element of
- // Certificates. Note that a certificate name can be of the form
- // '*.example.com' and so doesn't have to be a domain name as such.
- //
- // Deprecated: NameToCertificate only allows associating a single
- // certificate with a given name. Leave this field nil to let the library
- // select the first compatible chain from Certificates.
- NameToCertificate map[string]*Certificate
-
- // GetCertificate returns a Certificate based on the given
- // ClientHelloInfo. It will only be called if the client supplies SNI
- // information or if Certificates is empty.
- //
- // If GetCertificate is nil or returns nil, then the certificate is
- // retrieved from NameToCertificate. If NameToCertificate is nil, the
- // best element of Certificates will be used.
- GetCertificate func(*ClientHelloInfo) (*Certificate, error)
-
- // GetClientCertificate, if not nil, is called when a server requests a
- // certificate from a client. If set, the contents of Certificates will
- // be ignored.
- //
- // If GetClientCertificate returns an error, the handshake will be
- // aborted and that error will be returned. Otherwise
- // GetClientCertificate must return a non-nil Certificate. If
- // Certificate.Certificate is empty then no certificate will be sent to
- // the server. If this is unacceptable to the server then it may abort
- // the handshake.
- //
- // GetClientCertificate may be called multiple times for the same
- // connection if renegotiation occurs or if TLS 1.3 is in use.
- GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error)
-
- // GetConfigForClient, if not nil, is called after a ClientHello is
- // received from a client. It may return a non-nil Config in order to
- // change the Config that will be used to handle this connection. If
- // the returned Config is nil, the original Config will be used. The
- // Config returned by this callback may not be subsequently modified.
- //
- // If GetConfigForClient is nil, the Config passed to Server() will be
- // used for all connections.
- //
- // If SessionTicketKey was explicitly set on the returned Config, or if
- // SetSessionTicketKeys was called on the returned Config, those keys will
- // be used. Otherwise, the original Config keys will be used (and possibly
- // rotated if they are automatically managed).
- GetConfigForClient func(*ClientHelloInfo) (*Config, error)
-
- // VerifyPeerCertificate, if not nil, is called after normal
- // certificate verification by either a TLS client or server. It
- // receives the raw ASN.1 certificates provided by the peer and also
- // any verified chains that normal processing found. If it returns a
- // non-nil error, the handshake is aborted and that error results.
- //
- // If normal verification fails then the handshake will abort before
- // considering this callback. If normal verification is disabled by
- // setting InsecureSkipVerify, or (for a server) when ClientAuth is
- // RequestClientCert or RequireAnyClientCert, then this callback will
- // be considered but the verifiedChains argument will always be nil.
- VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
-
- // VerifyConnection, if not nil, is called after normal certificate
- // verification and after VerifyPeerCertificate by either a TLS client
- // or server. If it returns a non-nil error, the handshake is aborted
- // and that error results.
- //
- // If normal verification fails then the handshake will abort before
- // considering this callback. This callback will run for all connections
- // regardless of InsecureSkipVerify or ClientAuth settings.
- VerifyConnection func(ConnectionState) error
-
- // RootCAs defines the set of root certificate authorities
- // that clients use when verifying server certificates.
- // If RootCAs is nil, TLS uses the host's root CA set.
- RootCAs *x509.CertPool
-
- // NextProtos is a list of supported application level protocols, in
- // order of preference. If both peers support ALPN, the selected
- // protocol will be one from this list, and the connection will fail
- // if there is no mutually supported protocol. If NextProtos is empty
- // or the peer doesn't support ALPN, the connection will succeed and
- // ConnectionState.NegotiatedProtocol will be empty.
- NextProtos []string
-
- // ServerName is used to verify the hostname on the returned
- // certificates unless InsecureSkipVerify is given. It is also included
- // in the client's handshake to support virtual hosting unless it is
- // an IP address.
- ServerName string
-
- // ClientAuth determines the server's policy for
- // TLS Client Authentication. The default is NoClientCert.
- ClientAuth ClientAuthType
-
- // ClientCAs defines the set of root certificate authorities
- // that servers use if required to verify a client certificate
- // by the policy in ClientAuth.
- ClientCAs *x509.CertPool
-
- // InsecureSkipVerify controls whether a client verifies the server's
- // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls
- // accepts any certificate presented by the server and any host name in that
- // certificate. In this mode, TLS is susceptible to machine-in-the-middle
- // attacks unless custom verification is used. This should be used only for
- // testing or in combination with VerifyConnection or VerifyPeerCertificate.
- InsecureSkipVerify bool
-
- // CipherSuites is a list of enabled TLS 1.0–1.2 cipher suites. The order of
- // the list is ignored. Note that TLS 1.3 ciphersuites are not configurable.
- //
- // If CipherSuites is nil, a safe default list is used. The default cipher
- // suites might change over time.
- CipherSuites []uint16
-
- // PreferServerCipherSuites is a legacy field and has no effect.
- //
- // It used to control whether the server would follow the client's or the
- // server's preference. Servers now select the best mutually supported
- // cipher suite based on logic that takes into account inferred client
- // hardware, server hardware, and security.
- //
- // Deprecated: PreferServerCipherSuites is ignored.
- PreferServerCipherSuites bool
-
- // SessionTicketsDisabled may be set to true to disable session ticket and
- // PSK (resumption) support. Note that on clients, session ticket support is
- // also disabled if ClientSessionCache is nil.
- SessionTicketsDisabled bool
-
- // SessionTicketKey is used by TLS servers to provide session resumption.
- // See RFC 5077 and the PSK mode of RFC 8446. If zero, it will be filled
- // with random data before the first server handshake.
- //
- // Deprecated: if this field is left at zero, session ticket keys will be
- // automatically rotated every day and dropped after seven days. For
- // customizing the rotation schedule or synchronizing servers that are
- // terminating connections for the same host, use SetSessionTicketKeys.
- SessionTicketKey [32]byte
-
- // ClientSessionCache is a cache of ClientSessionState entries for TLS
- // session resumption. It is only used by clients.
- ClientSessionCache ClientSessionCache
-
- // MinVersion contains the minimum TLS version that is acceptable.
- //
- // By default, TLS 1.2 is currently used as the minimum when acting as a
- // client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum
- // supported by this package, both as a client and as a server.
- //
- // The client-side default can temporarily be reverted to TLS 1.0 by
- // including the value "x509sha1=1" in the GODEBUG environment variable.
- // Note that this option will be removed in Go 1.19 (but it will still be
- // possible to set this field to VersionTLS10 explicitly).
- MinVersion uint16
-
- // MaxVersion contains the maximum TLS version that is acceptable.
- //
- // By default, the maximum version supported by this package is used,
- // which is currently TLS 1.3.
- MaxVersion uint16
-
- // CurvePreferences contains the elliptic curves that will be used in
- // an ECDHE handshake, in preference order. If empty, the default will
- // be used. The client will use the first preference as the type for
- // its key share in TLS 1.3. This may change in the future.
- CurvePreferences []CurveID
-
- // DynamicRecordSizingDisabled disables adaptive sizing of TLS records.
- // When true, the largest possible TLS record size is always used. When
- // false, the size of TLS records may be adjusted in an attempt to
- // improve latency.
- DynamicRecordSizingDisabled bool
-
- // Renegotiation controls what types of renegotiation are supported.
- // The default, none, is correct for the vast majority of applications.
- Renegotiation RenegotiationSupport
-
- // KeyLogWriter optionally specifies a destination for TLS master secrets
- // in NSS key log format that can be used to allow external programs
- // such as Wireshark to decrypt TLS connections.
- // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
- // Use of KeyLogWriter compromises security and should only be
- // used for debugging.
- KeyLogWriter io.Writer
-
- // mutex protects sessionTicketKeys and autoSessionTicketKeys.
- mutex sync.RWMutex
- // sessionTicketKeys contains zero or more ticket keys. If set, it means the
- // the keys were set with SessionTicketKey or SetSessionTicketKeys. The
- // first key is used for new tickets and any subsequent keys can be used to
- // decrypt old tickets. The slice contents are not protected by the mutex
- // and are immutable.
- sessionTicketKeys []ticketKey
- // autoSessionTicketKeys is like sessionTicketKeys but is owned by the
- // auto-rotation logic. See Config.ticketKeys.
- autoSessionTicketKeys []ticketKey
-}
-
-const (
- // ticketKeyNameLen is the number of bytes of identifier that is prepended to
- // an encrypted session ticket in order to identify the key used to encrypt it.
- ticketKeyNameLen = 16
-
- // ticketKeyLifetime is how long a ticket key remains valid and can be used to
- // resume a client connection.
- ticketKeyLifetime = 7 * 24 * time.Hour // 7 days
-
- // ticketKeyRotation is how often the server should rotate the session ticket key
- // that is used for new tickets.
- ticketKeyRotation = 24 * time.Hour
-)
-
-// ticketKey is the internal representation of a session ticket key.
-type ticketKey struct {
- // keyName is an opaque byte string that serves to identify the session
- // ticket key. It's exposed as plaintext in every session ticket.
- keyName [ticketKeyNameLen]byte
- aesKey [16]byte
- hmacKey [16]byte
- // created is the time at which this ticket key was created. See Config.ticketKeys.
- created time.Time
-}
-
-// ticketKeyFromBytes converts from the external representation of a session
-// ticket key to a ticketKey. Externally, session ticket keys are 32 random
-// bytes and this function expands that into sufficient name and key material.
-func (c *Config) ticketKeyFromBytes(b [32]byte) (key ticketKey) {
- hashed := sha512.Sum512(b[:])
- copy(key.keyName[:], hashed[:ticketKeyNameLen])
- copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16])
- copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32])
- key.created = c.time()
- return key
-}
-
-// maxSessionTicketLifetime is the maximum allowed lifetime of a TLS 1.3 session
-// ticket, and the lifetime we set for tickets we send.
-const maxSessionTicketLifetime = 7 * 24 * time.Hour
-
-// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a Config that is
-// being used concurrently by a TLS client or server.
-func (c *Config) Clone() *Config {
- if c == nil {
- return nil
- }
- c.mutex.RLock()
- defer c.mutex.RUnlock()
- return &Config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- GetClientCertificate: c.GetClientCertificate,
- GetConfigForClient: c.GetConfigForClient,
- VerifyPeerCertificate: c.VerifyPeerCertificate,
- VerifyConnection: c.VerifyConnection,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- KeyLogWriter: c.KeyLogWriter,
- sessionTicketKeys: c.sessionTicketKeys,
- autoSessionTicketKeys: c.autoSessionTicketKeys,
- }
-}
-
-// deprecatedSessionTicketKey is set as the prefix of SessionTicketKey if it was
-// randomized for backwards compatibility but is not in use.
-var deprecatedSessionTicketKey = []byte("DEPRECATED")
-
-// initLegacySessionTicketKeyRLocked ensures the legacy SessionTicketKey field is
-// randomized if empty, and that sessionTicketKeys is populated from it otherwise.
-func (c *Config) initLegacySessionTicketKeyRLocked() {
- // Don't write if SessionTicketKey is already defined as our deprecated string,
- // or if it is defined by the user but sessionTicketKeys is already set.
- if c.SessionTicketKey != [32]byte{} &&
- (bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) || len(c.sessionTicketKeys) > 0) {
- return
- }
-
- // We need to write some data, so get an exclusive lock and re-check any conditions.
- c.mutex.RUnlock()
- defer c.mutex.RLock()
- c.mutex.Lock()
- defer c.mutex.Unlock()
- if c.SessionTicketKey == [32]byte{} {
- if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil {
- panic(fmt.Sprintf("tls: unable to generate random session ticket key: %v", err))
- }
- // Write the deprecated prefix at the beginning so we know we created
- // it. This key with the DEPRECATED prefix isn't used as an actual
- // session ticket key, and is only randomized in case the application
- // reuses it for some reason.
- copy(c.SessionTicketKey[:], deprecatedSessionTicketKey)
- } else if !bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) && len(c.sessionTicketKeys) == 0 {
- c.sessionTicketKeys = []ticketKey{c.ticketKeyFromBytes(c.SessionTicketKey)}
- }
-
-}
-
-// ticketKeys returns the ticketKeys for this connection.
-// If configForClient has explicitly set keys, those will
-// be returned. Otherwise, the keys on c will be used and
-// may be rotated if auto-managed.
-// During rotation, any expired session ticket keys are deleted from
-// c.sessionTicketKeys. If the session ticket key that is currently
-// encrypting tickets (ie. the first ticketKey in c.sessionTicketKeys)
-// is not fresh, then a new session ticket key will be
-// created and prepended to c.sessionTicketKeys.
-func (c *Config) ticketKeys(configForClient *Config) []ticketKey {
- // If the ConfigForClient callback returned a Config with explicitly set
- // keys, use those, otherwise just use the original Config.
- if configForClient != nil {
- configForClient.mutex.RLock()
- if configForClient.SessionTicketsDisabled {
- return nil
- }
- configForClient.initLegacySessionTicketKeyRLocked()
- if len(configForClient.sessionTicketKeys) != 0 {
- ret := configForClient.sessionTicketKeys
- configForClient.mutex.RUnlock()
- return ret
- }
- configForClient.mutex.RUnlock()
- }
-
- c.mutex.RLock()
- defer c.mutex.RUnlock()
- if c.SessionTicketsDisabled {
- return nil
- }
- c.initLegacySessionTicketKeyRLocked()
- if len(c.sessionTicketKeys) != 0 {
- return c.sessionTicketKeys
- }
- // Fast path for the common case where the key is fresh enough.
- if len(c.autoSessionTicketKeys) > 0 && c.time().Sub(c.autoSessionTicketKeys[0].created) < ticketKeyRotation {
- return c.autoSessionTicketKeys
- }
-
- // autoSessionTicketKeys are managed by auto-rotation.
- c.mutex.RUnlock()
- defer c.mutex.RLock()
- c.mutex.Lock()
- defer c.mutex.Unlock()
- // Re-check the condition in case it changed since obtaining the new lock.
- if len(c.autoSessionTicketKeys) == 0 || c.time().Sub(c.autoSessionTicketKeys[0].created) >= ticketKeyRotation {
- var newKey [32]byte
- if _, err := io.ReadFull(c.rand(), newKey[:]); err != nil {
- panic(fmt.Sprintf("unable to generate random session ticket key: %v", err))
- }
- valid := make([]ticketKey, 0, len(c.autoSessionTicketKeys)+1)
- valid = append(valid, c.ticketKeyFromBytes(newKey))
- for _, k := range c.autoSessionTicketKeys {
- // While rotating the current key, also remove any expired ones.
- if c.time().Sub(k.created) < ticketKeyLifetime {
- valid = append(valid, k)
- }
- }
- c.autoSessionTicketKeys = valid
- }
- return c.autoSessionTicketKeys
-}
-
-// SetSessionTicketKeys updates the session ticket keys for a server.
-//
-// The first key will be used when creating new tickets, while all keys can be
-// used for decrypting tickets. It is safe to call this function while the
-// server is running in order to rotate the session ticket keys. The function
-// will panic if keys is empty.
-//
-// Calling this function will turn off automatic session ticket key rotation.
-//
-// If multiple servers are terminating connections for the same host they should
-// all have the same session ticket keys. If the session ticket keys leaks,
-// previously recorded and future TLS connections using those keys might be
-// compromised.
-func (c *Config) SetSessionTicketKeys(keys [][32]byte) {
- if len(keys) == 0 {
- panic("tls: keys must have at least one key")
- }
-
- newKeys := make([]ticketKey, len(keys))
- for i, bytes := range keys {
- newKeys[i] = c.ticketKeyFromBytes(bytes)
- }
-
- c.mutex.Lock()
- c.sessionTicketKeys = newKeys
- c.mutex.Unlock()
-}
-
-func (c *Config) rand() io.Reader {
- r := c.Rand
- if r == nil {
- return rand.Reader
- }
- return r
-}
-
-func (c *Config) time() time.Time {
- t := c.Time
- if t == nil {
- t = time.Now
- }
- return t()
-}
-
-func (c *Config) cipherSuites() []uint16 {
- if c.CipherSuites != nil {
- return c.CipherSuites
- }
- return defaultCipherSuites
-}
-
-var supportedVersions = []uint16{
- VersionTLS13,
- VersionTLS12,
- VersionTLS11,
- VersionTLS10,
-}
-
-// debugEnableTLS10 enables TLS 1.0. See issue 45428.
-var debugEnableTLS10 = godebug.Get("tls10default") == "1"
-
-// roleClient and roleServer are meant to call supportedVersions and parents
-// with more readability at the callsite.
-const roleClient = true
-const roleServer = false
-
-func (c *Config) supportedVersions(isClient bool) []uint16 {
- versions := make([]uint16, 0, len(supportedVersions))
- for _, v := range supportedVersions {
- if (c == nil || c.MinVersion == 0) && !debugEnableTLS10 &&
- isClient && v < VersionTLS12 {
- continue
- }
- if c != nil && c.MinVersion != 0 && v < c.MinVersion {
- continue
- }
- if c != nil && c.MaxVersion != 0 && v > c.MaxVersion {
- continue
- }
- versions = append(versions, v)
- }
- return versions
-}
-
-func (c *Config) maxSupportedVersion(isClient bool) uint16 {
- supportedVersions := c.supportedVersions(isClient)
- if len(supportedVersions) == 0 {
- return 0
- }
- return supportedVersions[0]
-}
-
-// supportedVersionsFromMax returns a list of supported versions derived from a
-// legacy maximum version value. Note that only versions supported by this
-// library are returned. Any newer peer will use supportedVersions anyway.
-func supportedVersionsFromMax(maxVersion uint16) []uint16 {
- versions := make([]uint16, 0, len(supportedVersions))
- for _, v := range supportedVersions {
- if v > maxVersion {
- continue
- }
- versions = append(versions, v)
- }
- return versions
-}
-
-var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521}
-
-func (c *Config) curvePreferences() []CurveID {
- if c == nil || len(c.CurvePreferences) == 0 {
- return defaultCurvePreferences
- }
- return c.CurvePreferences
-}
-
-func (c *Config) supportsCurve(curve CurveID) bool {
- for _, cc := range c.curvePreferences() {
- if cc == curve {
- return true
- }
- }
- return false
-}
-
-// mutualVersion returns the protocol version to use given the advertised
-// versions of the peer. Priority is given to the peer preference order.
-func (c *Config) mutualVersion(isClient bool, peerVersions []uint16) (uint16, bool) {
- supportedVersions := c.supportedVersions(isClient)
- for _, peerVersion := range peerVersions {
- for _, v := range supportedVersions {
- if v == peerVersion {
- return v, true
- }
- }
- }
- return 0, false
-}
-
-var errNoCertificates = errors.New("tls: no certificates configured")
-
-// getCertificate returns the best certificate for the given ClientHelloInfo,
-// defaulting to the first element of c.Certificates.
-func (c *Config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) {
- if c.GetCertificate != nil &&
- (len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) {
- cert, err := c.GetCertificate(clientHello)
- if cert != nil || err != nil {
- return cert, err
- }
- }
-
- if len(c.Certificates) == 0 {
- return nil, errNoCertificates
- }
-
- if len(c.Certificates) == 1 {
- // There's only one choice, so no point doing any work.
- return &c.Certificates[0], nil
- }
-
- if c.NameToCertificate != nil {
- name := strings.ToLower(clientHello.ServerName)
- if cert, ok := c.NameToCertificate[name]; ok {
- return cert, nil
- }
- if len(name) > 0 {
- labels := strings.Split(name, ".")
- labels[0] = "*"
- wildcardName := strings.Join(labels, ".")
- if cert, ok := c.NameToCertificate[wildcardName]; ok {
- return cert, nil
- }
- }
- }
-
- for _, cert := range c.Certificates {
- if err := clientHello.SupportsCertificate(&cert); err == nil {
- return &cert, nil
- }
- }
-
- // If nothing matches, return the first certificate.
- return &c.Certificates[0], nil
-}
-
-// SupportsCertificate returns nil if the provided certificate is supported by
-// the client that sent the ClientHello. Otherwise, it returns an error
-// describing the reason for the incompatibility.
-//
-// If this ClientHelloInfo was passed to a GetConfigForClient or GetCertificate
-// callback, this method will take into account the associated Config. Note that
-// if GetConfigForClient returns a different Config, the change can't be
-// accounted for by this method.
-//
-// This function will call x509.ParseCertificate unless c.Leaf is set, which can
-// incur a significant performance cost.
-func (chi *ClientHelloInfo) SupportsCertificate(c *Certificate) error {
- // Note we don't currently support certificate_authorities nor
- // signature_algorithms_cert, and don't check the algorithms of the
- // signatures on the chain (which anyway are a SHOULD, see RFC 8446,
- // Section 4.4.2.2).
-
- config := chi.config
- if config == nil {
- config = &Config{}
- }
- vers, ok := config.mutualVersion(roleServer, chi.SupportedVersions)
- if !ok {
- return errors.New("no mutually supported protocol versions")
- }
-
- // If the client specified the name they are trying to connect to, the
- // certificate needs to be valid for it.
- if chi.ServerName != "" {
- x509Cert, err := c.leaf()
- if err != nil {
- return fmt.Errorf("failed to parse certificate: %w", err)
- }
- if err := x509Cert.VerifyHostname(chi.ServerName); err != nil {
- return fmt.Errorf("certificate is not valid for requested server name: %w", err)
- }
- }
-
- // supportsRSAFallback returns nil if the certificate and connection support
- // the static RSA key exchange, and unsupported otherwise. The logic for
- // supporting static RSA is completely disjoint from the logic for
- // supporting signed key exchanges, so we just check it as a fallback.
- supportsRSAFallback := func(unsupported error) error {
- // TLS 1.3 dropped support for the static RSA key exchange.
- if vers == VersionTLS13 {
- return unsupported
- }
- // The static RSA key exchange works by decrypting a challenge with the
- // RSA private key, not by signing, so check the PrivateKey implements
- // crypto.Decrypter, like *rsa.PrivateKey does.
- if priv, ok := c.PrivateKey.(crypto.Decrypter); ok {
- if _, ok := priv.Public().(*rsa.PublicKey); !ok {
- return unsupported
- }
- } else {
- return unsupported
- }
- // Finally, there needs to be a mutual cipher suite that uses the static
- // RSA key exchange instead of ECDHE.
- rsaCipherSuite := selectCipherSuite(chi.CipherSuites, config.cipherSuites(), func(c *cipherSuite) bool {
- if c.flags&suiteECDHE != 0 {
- return false
- }
- if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
- })
- if rsaCipherSuite == nil {
- return unsupported
- }
- return nil
- }
-
- // If the client sent the signature_algorithms extension, ensure it supports
- // schemes we can use with this certificate and TLS version.
- if len(chi.SignatureSchemes) > 0 {
- if _, err := selectSignatureScheme(vers, c, chi.SignatureSchemes); err != nil {
- return supportsRSAFallback(err)
- }
- }
-
- // In TLS 1.3 we are done because supported_groups is only relevant to the
- // ECDHE computation, point format negotiation is removed, cipher suites are
- // only relevant to the AEAD choice, and static RSA does not exist.
- if vers == VersionTLS13 {
- return nil
- }
-
- // The only signed key exchange we support is ECDHE.
- if !supportsECDHE(config, chi.SupportedCurves, chi.SupportedPoints) {
- return supportsRSAFallback(errors.New("client doesn't support ECDHE, can only use legacy RSA key exchange"))
- }
-
- var ecdsaCipherSuite bool
- if priv, ok := c.PrivateKey.(crypto.Signer); ok {
- switch pub := priv.Public().(type) {
- case *ecdsa.PublicKey:
- var curve CurveID
- switch pub.Curve {
- case elliptic.P256():
- curve = CurveP256
- case elliptic.P384():
- curve = CurveP384
- case elliptic.P521():
- curve = CurveP521
- default:
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
- var curveOk bool
- for _, c := range chi.SupportedCurves {
- if c == curve && config.supportsCurve(c) {
- curveOk = true
- break
- }
- }
- if !curveOk {
- return errors.New("client doesn't support certificate curve")
- }
- ecdsaCipherSuite = true
- case ed25519.PublicKey:
- if vers < VersionTLS12 || len(chi.SignatureSchemes) == 0 {
- return errors.New("connection doesn't support Ed25519")
- }
- ecdsaCipherSuite = true
- case *rsa.PublicKey:
- default:
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
- } else {
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
-
- // Make sure that there is a mutually supported cipher suite that works with
- // this certificate. Cipher suite selection will then apply the logic in
- // reverse to pick it. See also serverHandshakeState.cipherSuiteOk.
- cipherSuite := selectCipherSuite(chi.CipherSuites, config.cipherSuites(), func(c *cipherSuite) bool {
- if c.flags&suiteECDHE == 0 {
- return false
- }
- if c.flags&suiteECSign != 0 {
- if !ecdsaCipherSuite {
- return false
- }
- } else {
- if ecdsaCipherSuite {
- return false
- }
- }
- if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
- })
- if cipherSuite == nil {
- return supportsRSAFallback(errors.New("client doesn't support any cipher suites compatible with the certificate"))
- }
-
- return nil
-}
-
-// SupportsCertificate returns nil if the provided certificate is supported by
-// the server that sent the CertificateRequest. Otherwise, it returns an error
-// describing the reason for the incompatibility.
-func (cri *CertificateRequestInfo) SupportsCertificate(c *Certificate) error {
- if _, err := selectSignatureScheme(cri.Version, c, cri.SignatureSchemes); err != nil {
- return err
- }
-
- if len(cri.AcceptableCAs) == 0 {
- return nil
- }
-
- for j, cert := range c.Certificate {
- x509Cert := c.Leaf
- // Parse the certificate if this isn't the leaf node, or if
- // chain.Leaf was nil.
- if j != 0 || x509Cert == nil {
- var err error
- if x509Cert, err = x509.ParseCertificate(cert); err != nil {
- return fmt.Errorf("failed to parse certificate #%d in the chain: %w", j, err)
- }
- }
-
- for _, ca := range cri.AcceptableCAs {
- if bytes.Equal(x509Cert.RawIssuer, ca) {
- return nil
- }
- }
- }
- return errors.New("chain is not signed by an acceptable CA")
-}
-
-// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate
-// from the CommonName and SubjectAlternateName fields of each of the leaf
-// certificates.
-//
-// Deprecated: NameToCertificate only allows associating a single certificate
-// with a given name. Leave that field nil to let the library select the first
-// compatible chain from Certificates.
-func (c *Config) BuildNameToCertificate() {
- c.NameToCertificate = make(map[string]*Certificate)
- for i := range c.Certificates {
- cert := &c.Certificates[i]
- x509Cert, err := cert.leaf()
- if err != nil {
- continue
- }
- // If SANs are *not* present, some clients will consider the certificate
- // valid for the name in the Common Name.
- if x509Cert.Subject.CommonName != "" && len(x509Cert.DNSNames) == 0 {
- c.NameToCertificate[x509Cert.Subject.CommonName] = cert
- }
- for _, san := range x509Cert.DNSNames {
- c.NameToCertificate[san] = cert
- }
- }
-}
-
-const (
- keyLogLabelTLS12 = "CLIENT_RANDOM"
- keyLogLabelClientHandshake = "CLIENT_HANDSHAKE_TRAFFIC_SECRET"
- keyLogLabelServerHandshake = "SERVER_HANDSHAKE_TRAFFIC_SECRET"
- keyLogLabelClientTraffic = "CLIENT_TRAFFIC_SECRET_0"
- keyLogLabelServerTraffic = "SERVER_TRAFFIC_SECRET_0"
-)
-
-func (c *Config) writeKeyLog(label string, clientRandom, secret []byte) error {
- if c.KeyLogWriter == nil {
- return nil
- }
-
- logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))
-
- writerMutex.Lock()
- _, err := c.KeyLogWriter.Write(logLine)
- writerMutex.Unlock()
-
- return err
-}
-
-// writerMutex protects all KeyLogWriters globally. It is rarely enabled,
-// and is only for debugging, so a global mutex saves space.
-var writerMutex sync.Mutex
-
-// A Certificate is a chain of one or more certificates, leaf first.
-type Certificate struct {
- Certificate [][]byte
- // PrivateKey contains the private key corresponding to the public key in
- // Leaf. This must implement crypto.Signer with an RSA, ECDSA or Ed25519 PublicKey.
- // For a server up to TLS 1.2, it can also implement crypto.Decrypter with
- // an RSA PublicKey.
- PrivateKey crypto.PrivateKey
- // SupportedSignatureAlgorithms is an optional list restricting what
- // signature algorithms the PrivateKey can be used for.
- SupportedSignatureAlgorithms []SignatureScheme
- // OCSPStaple contains an optional OCSP response which will be served
- // to clients that request it.
- OCSPStaple []byte
- // SignedCertificateTimestamps contains an optional list of Signed
- // Certificate Timestamps which will be served to clients that request it.
- SignedCertificateTimestamps [][]byte
- // Leaf is the parsed form of the leaf certificate, which may be initialized
- // using x509.ParseCertificate to reduce per-handshake processing. If nil,
- // the leaf certificate will be parsed as needed.
- Leaf *x509.Certificate
-}
-
-// leaf returns the parsed leaf certificate, either from c.Leaf or by parsing
-// the corresponding c.Certificate[0].
-func (c *Certificate) leaf() (*x509.Certificate, error) {
- if c.Leaf != nil {
- return c.Leaf, nil
- }
- return x509.ParseCertificate(c.Certificate[0])
-}
-
-type handshakeMessage interface {
- marshal() []byte
- unmarshal([]byte) bool
-}
-
-// lruSessionCache is a ClientSessionCache implementation that uses an LRU
-// caching strategy.
-type lruSessionCache struct {
- sync.Mutex
-
- m map[string]*list.Element
- q *list.List
- capacity int
-}
-
-type lruSessionCacheEntry struct {
- sessionKey string
- state *ClientSessionState
-}
-
-// NewLRUClientSessionCache returns a ClientSessionCache with the given
-// capacity that uses an LRU strategy. If capacity is < 1, a default capacity
-// is used instead.
-func NewLRUClientSessionCache(capacity int) ClientSessionCache {
- const defaultSessionCacheCapacity = 64
-
- if capacity < 1 {
- capacity = defaultSessionCacheCapacity
- }
- return &lruSessionCache{
- m: make(map[string]*list.Element),
- q: list.New(),
- capacity: capacity,
- }
-}
-
-// Put adds the provided (sessionKey, cs) pair to the cache. If cs is nil, the entry
-// corresponding to sessionKey is removed from the cache instead.
-func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) {
- c.Lock()
- defer c.Unlock()
-
- if elem, ok := c.m[sessionKey]; ok {
- if cs == nil {
- c.q.Remove(elem)
- delete(c.m, sessionKey)
- } else {
- entry := elem.Value.(*lruSessionCacheEntry)
- entry.state = cs
- c.q.MoveToFront(elem)
- }
- return
- }
-
- if c.q.Len() < c.capacity {
- entry := &lruSessionCacheEntry{sessionKey, cs}
- c.m[sessionKey] = c.q.PushFront(entry)
- return
- }
-
- elem := c.q.Back()
- entry := elem.Value.(*lruSessionCacheEntry)
- delete(c.m, entry.sessionKey)
- entry.sessionKey = sessionKey
- entry.state = cs
- c.q.MoveToFront(elem)
- c.m[sessionKey] = elem
-}
-
-// Get returns the ClientSessionState value associated with a given key. It
-// returns (nil, false) if no value is found.
-func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) {
- c.Lock()
- defer c.Unlock()
-
- if elem, ok := c.m[sessionKey]; ok {
- c.q.MoveToFront(elem)
- return elem.Value.(*lruSessionCacheEntry).state, true
- }
- return nil, false
-}
-
-var emptyConfig Config
-
-func defaultConfig() *Config {
- return &emptyConfig
-}
-
-func unexpectedMessageError(wanted, got any) error {
- return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted)
-}
-
-func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool {
- for _, s := range supportedSignatureAlgorithms {
- if s == sigAlg {
- return true
- }
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/conn.go b/contrib/go/_std_1.18/src/crypto/tls/conn.go
deleted file mode 100644
index 0dae8e34a1..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/conn.go
+++ /dev/null
@@ -1,1543 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TLS low level connection and record layer
-
-package tls
-
-import (
- "bytes"
- "context"
- "crypto/cipher"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// A Conn represents a secured connection.
-// It implements the net.Conn interface.
-type Conn struct {
- // constant
- conn net.Conn
- isClient bool
- handshakeFn func(context.Context) error // (*Conn).clientHandshake or serverHandshake
-
- // handshakeStatus is 1 if the connection is currently transferring
- // application data (i.e. is not currently processing a handshake).
- // handshakeStatus == 1 implies handshakeErr == nil.
- // This field is only to be accessed with sync/atomic.
- handshakeStatus uint32
- // constant after handshake; protected by handshakeMutex
- handshakeMutex sync.Mutex
- handshakeErr error // error resulting from handshake
- vers uint16 // TLS version
- haveVers bool // version has been negotiated
- config *Config // configuration passed to constructor
- // handshakes counts the number of handshakes performed on the
- // connection so far. If renegotiation is disabled then this is either
- // zero or one.
- handshakes int
- didResume bool // whether this connection was a session resumption
- cipherSuite uint16
- ocspResponse []byte // stapled OCSP response
- scts [][]byte // signed certificate timestamps from server
- peerCertificates []*x509.Certificate
- // verifiedChains contains the certificate chains that we built, as
- // opposed to the ones presented by the server.
- verifiedChains [][]*x509.Certificate
- // serverName contains the server name indicated by the client, if any.
- serverName string
- // secureRenegotiation is true if the server echoed the secure
- // renegotiation extension. (This is meaningless as a server because
- // renegotiation is not supported in that case.)
- secureRenegotiation bool
- // ekm is a closure for exporting keying material.
- ekm func(label string, context []byte, length int) ([]byte, error)
- // resumptionSecret is the resumption_master_secret for handling
- // NewSessionTicket messages. nil if config.SessionTicketsDisabled.
- resumptionSecret []byte
-
- // ticketKeys is the set of active session ticket keys for this
- // connection. The first one is used to encrypt new tickets and
- // all are tried to decrypt tickets.
- ticketKeys []ticketKey
-
- // clientFinishedIsFirst is true if the client sent the first Finished
- // message during the most recent handshake. This is recorded because
- // the first transmitted Finished message is the tls-unique
- // channel-binding value.
- clientFinishedIsFirst bool
-
- // closeNotifyErr is any error from sending the alertCloseNotify record.
- closeNotifyErr error
- // closeNotifySent is true if the Conn attempted to send an
- // alertCloseNotify record.
- closeNotifySent bool
-
- // clientFinished and serverFinished contain the Finished message sent
- // by the client or server in the most recent handshake. This is
- // retained to support the renegotiation extension and tls-unique
- // channel-binding.
- clientFinished [12]byte
- serverFinished [12]byte
-
- // clientProtocol is the negotiated ALPN protocol.
- clientProtocol string
-
- // input/output
- in, out halfConn
- rawInput bytes.Buffer // raw input, starting with a record header
- input bytes.Reader // application data waiting to be read, from rawInput.Next
- hand bytes.Buffer // handshake data waiting to be read
- buffering bool // whether records are buffered in sendBuf
- sendBuf []byte // a buffer of records waiting to be sent
-
- // bytesSent counts the bytes of application data sent.
- // packetsSent counts packets.
- bytesSent int64
- packetsSent int64
-
- // retryCount counts the number of consecutive non-advancing records
- // received by Conn.readRecord. That is, records that neither advance the
- // handshake, nor deliver application data. Protected by in.Mutex.
- retryCount int
-
- // activeCall is an atomic int32; the low bit is whether Close has
- // been called. the rest of the bits are the number of goroutines
- // in Conn.Write.
- activeCall int32
-
- tmp [16]byte
-}
-
-// Access to net.Conn methods.
-// Cannot just embed net.Conn because that would
-// export the struct field too.
-
-// LocalAddr returns the local network address.
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the remote network address.
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// SetDeadline sets the read and write deadlines associated with the connection.
-// A zero value for t means Read and Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
-func (c *Conn) SetDeadline(t time.Time) error {
- return c.conn.SetDeadline(t)
-}
-
-// SetReadDeadline sets the read deadline on the underlying connection.
-// A zero value for t means Read will not time out.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetWriteDeadline sets the write deadline on the underlying connection.
-// A zero value for t means Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return c.conn.SetWriteDeadline(t)
-}
-
-// NetConn returns the underlying connection that is wrapped by c.
-// Note that writing to or reading from this connection directly will corrupt the
-// TLS session.
-func (c *Conn) NetConn() net.Conn {
- return c.conn
-}
-
-// A halfConn represents one direction of the record layer
-// connection, either sending or receiving.
-type halfConn struct {
- sync.Mutex
-
- err error // first permanent error
- version uint16 // protocol version
- cipher any // cipher algorithm
- mac hash.Hash
- seq [8]byte // 64-bit sequence number
-
- scratchBuf [13]byte // to avoid allocs; interface method args escape
-
- nextCipher any // next encryption state
- nextMac hash.Hash // next MAC algorithm
-
- trafficSecret []byte // current TLS 1.3 traffic secret
-}
-
-type permanentError struct {
- err net.Error
-}
-
-func (e *permanentError) Error() string { return e.err.Error() }
-func (e *permanentError) Unwrap() error { return e.err }
-func (e *permanentError) Timeout() bool { return e.err.Timeout() }
-func (e *permanentError) Temporary() bool { return false }
-
-func (hc *halfConn) setErrorLocked(err error) error {
- if e, ok := err.(net.Error); ok {
- hc.err = &permanentError{err: e}
- } else {
- hc.err = err
- }
- return hc.err
-}
-
-// prepareCipherSpec sets the encryption and MAC states
-// that a subsequent changeCipherSpec will use.
-func (hc *halfConn) prepareCipherSpec(version uint16, cipher any, mac hash.Hash) {
- hc.version = version
- hc.nextCipher = cipher
- hc.nextMac = mac
-}
-
-// changeCipherSpec changes the encryption and MAC states
-// to the ones previously passed to prepareCipherSpec.
-func (hc *halfConn) changeCipherSpec() error {
- if hc.nextCipher == nil || hc.version == VersionTLS13 {
- return alertInternalError
- }
- hc.cipher = hc.nextCipher
- hc.mac = hc.nextMac
- hc.nextCipher = nil
- hc.nextMac = nil
- for i := range hc.seq {
- hc.seq[i] = 0
- }
- return nil
-}
-
-func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, secret []byte) {
- hc.trafficSecret = secret
- key, iv := suite.trafficKey(secret)
- hc.cipher = suite.aead(key, iv)
- for i := range hc.seq {
- hc.seq[i] = 0
- }
-}
-
-// incSeq increments the sequence number.
-func (hc *halfConn) incSeq() {
- for i := 7; i >= 0; i-- {
- hc.seq[i]++
- if hc.seq[i] != 0 {
- return
- }
- }
-
- // Not allowed to let sequence number wrap.
- // Instead, must renegotiate before it does.
- // Not likely enough to bother.
- panic("TLS: sequence number wraparound")
-}
-
-// explicitNonceLen returns the number of bytes of explicit nonce or IV included
-// in each record. Explicit nonces are present only in CBC modes after TLS 1.0
-// and in certain AEAD modes in TLS 1.2.
-func (hc *halfConn) explicitNonceLen() int {
- if hc.cipher == nil {
- return 0
- }
-
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- return 0
- case aead:
- return c.explicitNonceLen()
- case cbcMode:
- // TLS 1.1 introduced a per-record explicit IV to fix the BEAST attack.
- if hc.version >= VersionTLS11 {
- return c.BlockSize()
- }
- return 0
- default:
- panic("unknown cipher type")
- }
-}
-
-// extractPadding returns, in constant time, the length of the padding to remove
-// from the end of payload. It also returns a byte which is equal to 255 if the
-// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
-func extractPadding(payload []byte) (toRemove int, good byte) {
- if len(payload) < 1 {
- return 0, 0
- }
-
- paddingLen := payload[len(payload)-1]
- t := uint(len(payload)-1) - uint(paddingLen)
- // if len(payload) >= (paddingLen - 1) then the MSB of t is zero
- good = byte(int32(^t) >> 31)
-
- // The maximum possible padding length plus the actual length field
- toCheck := 256
- // The length of the padded data is public, so we can use an if here
- if toCheck > len(payload) {
- toCheck = len(payload)
- }
-
- for i := 0; i < toCheck; i++ {
- t := uint(paddingLen) - uint(i)
- // if i <= paddingLen then the MSB of t is zero
- mask := byte(int32(^t) >> 31)
- b := payload[len(payload)-1-i]
- good &^= mask&paddingLen ^ mask&b
- }
-
- // We AND together the bits of good and replicate the result across
- // all the bits.
- good &= good << 4
- good &= good << 2
- good &= good << 1
- good = uint8(int8(good) >> 7)
-
- // Zero the padding length on error. This ensures any unchecked bytes
- // are included in the MAC. Otherwise, an attacker that could
- // distinguish MAC failures from padding failures could mount an attack
- // similar to POODLE in SSL 3.0: given a good ciphertext that uses a
- // full block's worth of padding, replace the final block with another
- // block. If the MAC check passed but the padding check failed, the
- // last byte of that block decrypted to the block size.
- //
- // See also macAndPaddingGood logic below.
- paddingLen &= good
-
- toRemove = int(paddingLen) + 1
- return
-}
-
-func roundUp(a, b int) int {
- return a + (b-a%b)%b
-}
-
-// cbcMode is an interface for block ciphers using cipher block chaining.
-type cbcMode interface {
- cipher.BlockMode
- SetIV([]byte)
-}
-
-// decrypt authenticates and decrypts the record if protection is active at
-// this stage. The returned plaintext might overlap with the input.
-func (hc *halfConn) decrypt(record []byte) ([]byte, recordType, error) {
- var plaintext []byte
- typ := recordType(record[0])
- payload := record[recordHeaderLen:]
-
- // In TLS 1.3, change_cipher_spec messages are to be ignored without being
- // decrypted. See RFC 8446, Appendix D.4.
- if hc.version == VersionTLS13 && typ == recordTypeChangeCipherSpec {
- return payload, typ, nil
- }
-
- paddingGood := byte(255)
- paddingLen := 0
-
- explicitNonceLen := hc.explicitNonceLen()
-
- if hc.cipher != nil {
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- c.XORKeyStream(payload, payload)
- case aead:
- if len(payload) < explicitNonceLen {
- return nil, 0, alertBadRecordMAC
- }
- nonce := payload[:explicitNonceLen]
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
- payload = payload[explicitNonceLen:]
-
- var additionalData []byte
- if hc.version == VersionTLS13 {
- additionalData = record[:recordHeaderLen]
- } else {
- additionalData = append(hc.scratchBuf[:0], hc.seq[:]...)
- additionalData = append(additionalData, record[:3]...)
- n := len(payload) - c.Overhead()
- additionalData = append(additionalData, byte(n>>8), byte(n))
- }
-
- var err error
- plaintext, err = c.Open(payload[:0], nonce, payload, additionalData)
- if err != nil {
- return nil, 0, alertBadRecordMAC
- }
- case cbcMode:
- blockSize := c.BlockSize()
- minPayload := explicitNonceLen + roundUp(hc.mac.Size()+1, blockSize)
- if len(payload)%blockSize != 0 || len(payload) < minPayload {
- return nil, 0, alertBadRecordMAC
- }
-
- if explicitNonceLen > 0 {
- c.SetIV(payload[:explicitNonceLen])
- payload = payload[explicitNonceLen:]
- }
- c.CryptBlocks(payload, payload)
-
- // In a limited attempt to protect against CBC padding oracles like
- // Lucky13, the data past paddingLen (which is secret) is passed to
- // the MAC function as extra data, to be fed into the HMAC after
- // computing the digest. This makes the MAC roughly constant time as
- // long as the digest computation is constant time and does not
- // affect the subsequent write, modulo cache effects.
- paddingLen, paddingGood = extractPadding(payload)
- default:
- panic("unknown cipher type")
- }
-
- if hc.version == VersionTLS13 {
- if typ != recordTypeApplicationData {
- return nil, 0, alertUnexpectedMessage
- }
- if len(plaintext) > maxPlaintext+1 {
- return nil, 0, alertRecordOverflow
- }
- // Remove padding and find the ContentType scanning from the end.
- for i := len(plaintext) - 1; i >= 0; i-- {
- if plaintext[i] != 0 {
- typ = recordType(plaintext[i])
- plaintext = plaintext[:i]
- break
- }
- if i == 0 {
- return nil, 0, alertUnexpectedMessage
- }
- }
- }
- } else {
- plaintext = payload
- }
-
- if hc.mac != nil {
- macSize := hc.mac.Size()
- if len(payload) < macSize {
- return nil, 0, alertBadRecordMAC
- }
-
- n := len(payload) - macSize - paddingLen
- n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 }
- record[3] = byte(n >> 8)
- record[4] = byte(n)
- remoteMAC := payload[n : n+macSize]
- localMAC := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload[:n], payload[n+macSize:])
-
- // This is equivalent to checking the MACs and paddingGood
- // separately, but in constant-time to prevent distinguishing
- // padding failures from MAC failures. Depending on what value
- // of paddingLen was returned on bad padding, distinguishing
- // bad MAC from bad padding can lead to an attack.
- //
- // See also the logic at the end of extractPadding.
- macAndPaddingGood := subtle.ConstantTimeCompare(localMAC, remoteMAC) & int(paddingGood)
- if macAndPaddingGood != 1 {
- return nil, 0, alertBadRecordMAC
- }
-
- plaintext = payload[:n]
- }
-
- hc.incSeq()
- return plaintext, typ, nil
-}
-
-// sliceForAppend extends the input slice by n bytes. head is the full extended
-// slice, while tail is the appended part. If the original slice has sufficient
-// capacity no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// encrypt encrypts payload, adding the appropriate nonce and/or MAC, and
-// appends it to record, which must already contain the record header.
-func (hc *halfConn) encrypt(record, payload []byte, rand io.Reader) ([]byte, error) {
- if hc.cipher == nil {
- return append(record, payload...), nil
- }
-
- var explicitNonce []byte
- if explicitNonceLen := hc.explicitNonceLen(); explicitNonceLen > 0 {
- record, explicitNonce = sliceForAppend(record, explicitNonceLen)
- if _, isCBC := hc.cipher.(cbcMode); !isCBC && explicitNonceLen < 16 {
- // The AES-GCM construction in TLS has an explicit nonce so that the
- // nonce can be random. However, the nonce is only 8 bytes which is
- // too small for a secure, random nonce. Therefore we use the
- // sequence number as the nonce. The 3DES-CBC construction also has
- // an 8 bytes nonce but its nonces must be unpredictable (see RFC
- // 5246, Appendix F.3), forcing us to use randomness. That's not
- // 3DES' biggest problem anyway because the birthday bound on block
- // collision is reached first due to its similarly small block size
- // (see the Sweet32 attack).
- copy(explicitNonce, hc.seq[:])
- } else {
- if _, err := io.ReadFull(rand, explicitNonce); err != nil {
- return nil, err
- }
- }
- }
-
- var dst []byte
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
- record, dst = sliceForAppend(record, len(payload)+len(mac))
- c.XORKeyStream(dst[:len(payload)], payload)
- c.XORKeyStream(dst[len(payload):], mac)
- case aead:
- nonce := explicitNonce
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
-
- if hc.version == VersionTLS13 {
- record = append(record, payload...)
-
- // Encrypt the actual ContentType and replace the plaintext one.
- record = append(record, record[0])
- record[0] = byte(recordTypeApplicationData)
-
- n := len(payload) + 1 + c.Overhead()
- record[3] = byte(n >> 8)
- record[4] = byte(n)
-
- record = c.Seal(record[:recordHeaderLen],
- nonce, record[recordHeaderLen:], record[:recordHeaderLen])
- } else {
- additionalData := append(hc.scratchBuf[:0], hc.seq[:]...)
- additionalData = append(additionalData, record[:recordHeaderLen]...)
- record = c.Seal(record, nonce, payload, additionalData)
- }
- case cbcMode:
- mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
- blockSize := c.BlockSize()
- plaintextLen := len(payload) + len(mac)
- paddingLen := blockSize - plaintextLen%blockSize
- record, dst = sliceForAppend(record, plaintextLen+paddingLen)
- copy(dst, payload)
- copy(dst[len(payload):], mac)
- for i := plaintextLen; i < len(dst); i++ {
- dst[i] = byte(paddingLen - 1)
- }
- if len(explicitNonce) > 0 {
- c.SetIV(explicitNonce)
- }
- c.CryptBlocks(dst, dst)
- default:
- panic("unknown cipher type")
- }
-
- // Update length to include nonce, MAC and any block padding needed.
- n := len(record) - recordHeaderLen
- record[3] = byte(n >> 8)
- record[4] = byte(n)
- hc.incSeq()
-
- return record, nil
-}
-
-// RecordHeaderError is returned when a TLS record header is invalid.
-type RecordHeaderError struct {
- // Msg contains a human readable string that describes the error.
- Msg string
- // RecordHeader contains the five bytes of TLS record header that
- // triggered the error.
- RecordHeader [5]byte
- // Conn provides the underlying net.Conn in the case that a client
- // sent an initial handshake that didn't look like TLS.
- // It is nil if there's already been a handshake or a TLS alert has
- // been written to the connection.
- Conn net.Conn
-}
-
-func (e RecordHeaderError) Error() string { return "tls: " + e.Msg }
-
-func (c *Conn) newRecordHeaderError(conn net.Conn, msg string) (err RecordHeaderError) {
- err.Msg = msg
- err.Conn = conn
- copy(err.RecordHeader[:], c.rawInput.Bytes())
- return err
-}
-
-func (c *Conn) readRecord() error {
- return c.readRecordOrCCS(false)
-}
-
-func (c *Conn) readChangeCipherSpec() error {
- return c.readRecordOrCCS(true)
-}
-
-// readRecordOrCCS reads one or more TLS records from the connection and
-// updates the record layer state. Some invariants:
-// * c.in must be locked
-// * c.input must be empty
-// During the handshake one and only one of the following will happen:
-// - c.hand grows
-// - c.in.changeCipherSpec is called
-// - an error is returned
-// After the handshake one and only one of the following will happen:
-// - c.hand grows
-// - c.input is set
-// - an error is returned
-func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error {
- if c.in.err != nil {
- return c.in.err
- }
- handshakeComplete := c.handshakeComplete()
-
- // This function modifies c.rawInput, which owns the c.input memory.
- if c.input.Len() != 0 {
- return c.in.setErrorLocked(errors.New("tls: internal error: attempted to read record with pending application data"))
- }
- c.input.Reset(nil)
-
- // Read header, payload.
- if err := c.readFromUntil(c.conn, recordHeaderLen); err != nil {
- // RFC 8446, Section 6.1 suggests that EOF without an alertCloseNotify
- // is an error, but popular web sites seem to do this, so we accept it
- // if and only if at the record boundary.
- if err == io.ErrUnexpectedEOF && c.rawInput.Len() == 0 {
- err = io.EOF
- }
- if e, ok := err.(net.Error); !ok || !e.Temporary() {
- c.in.setErrorLocked(err)
- }
- return err
- }
- hdr := c.rawInput.Bytes()[:recordHeaderLen]
- typ := recordType(hdr[0])
-
- // No valid TLS record has a type of 0x80, however SSLv2 handshakes
- // start with a uint16 length where the MSB is set and the first record
- // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests
- // an SSLv2 client.
- if !handshakeComplete && typ == 0x80 {
- c.sendAlert(alertProtocolVersion)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, "unsupported SSLv2 handshake received"))
- }
-
- vers := uint16(hdr[1])<<8 | uint16(hdr[2])
- n := int(hdr[3])<<8 | int(hdr[4])
- if c.haveVers && c.vers != VersionTLS13 && vers != c.vers {
- c.sendAlert(alertProtocolVersion)
- msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
- }
- if !c.haveVers {
- // First message, be extra suspicious: this might not be a TLS
- // client. Bail out before reading a full 'body', if possible.
- // The current max version is 3.3 so if the version is >= 16.0,
- // it's probably not real.
- if (typ != recordTypeAlert && typ != recordTypeHandshake) || vers >= 0x1000 {
- return c.in.setErrorLocked(c.newRecordHeaderError(c.conn, "first record does not look like a TLS handshake"))
- }
- }
- if c.vers == VersionTLS13 && n > maxCiphertextTLS13 || n > maxCiphertext {
- c.sendAlert(alertRecordOverflow)
- msg := fmt.Sprintf("oversized record received with length %d", n)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
- }
- if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
- if e, ok := err.(net.Error); !ok || !e.Temporary() {
- c.in.setErrorLocked(err)
- }
- return err
- }
-
- // Process message.
- record := c.rawInput.Next(recordHeaderLen + n)
- data, typ, err := c.in.decrypt(record)
- if err != nil {
- return c.in.setErrorLocked(c.sendAlert(err.(alert)))
- }
- if len(data) > maxPlaintext {
- return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow))
- }
-
- // Application Data messages are always protected.
- if c.in.cipher == nil && typ == recordTypeApplicationData {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- if typ != recordTypeAlert && typ != recordTypeChangeCipherSpec && len(data) > 0 {
- // This is a state-advancing message: reset the retry count.
- c.retryCount = 0
- }
-
- // Handshake messages MUST NOT be interleaved with other record types in TLS 1.3.
- if c.vers == VersionTLS13 && typ != recordTypeHandshake && c.hand.Len() > 0 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- switch typ {
- default:
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
-
- case recordTypeAlert:
- if len(data) != 2 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- if alert(data[1]) == alertCloseNotify {
- return c.in.setErrorLocked(io.EOF)
- }
- if c.vers == VersionTLS13 {
- return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
- }
- switch data[0] {
- case alertLevelWarning:
- // Drop the record on the floor and retry.
- return c.retryReadRecord(expectChangeCipherSpec)
- case alertLevelError:
- return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
- default:
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- case recordTypeChangeCipherSpec:
- if len(data) != 1 || data[0] != 1 {
- return c.in.setErrorLocked(c.sendAlert(alertDecodeError))
- }
- // Handshake messages are not allowed to fragment across the CCS.
- if c.hand.Len() > 0 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- // In TLS 1.3, change_cipher_spec records are ignored until the
- // Finished. See RFC 8446, Appendix D.4. Note that according to Section
- // 5, a server can send a ChangeCipherSpec before its ServerHello, when
- // c.vers is still unset. That's not useful though and suspicious if the
- // server then selects a lower protocol version, so don't allow that.
- if c.vers == VersionTLS13 {
- return c.retryReadRecord(expectChangeCipherSpec)
- }
- if !expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- if err := c.in.changeCipherSpec(); err != nil {
- return c.in.setErrorLocked(c.sendAlert(err.(alert)))
- }
-
- case recordTypeApplicationData:
- if !handshakeComplete || expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- // Some OpenSSL servers send empty records in order to randomize the
- // CBC IV. Ignore a limited number of empty records.
- if len(data) == 0 {
- return c.retryReadRecord(expectChangeCipherSpec)
- }
- // Note that data is owned by c.rawInput, following the Next call above,
- // to avoid copying the plaintext. This is safe because c.rawInput is
- // not read from or written to until c.input is drained.
- c.input.Reset(data)
-
- case recordTypeHandshake:
- if len(data) == 0 || expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- c.hand.Write(data)
- }
-
- return nil
-}
-
-// retryReadRecord recurses into readRecordOrCCS to drop a non-advancing record, like
-// a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3.
-func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error {
- c.retryCount++
- if c.retryCount > maxUselessRecords {
- c.sendAlert(alertUnexpectedMessage)
- return c.in.setErrorLocked(errors.New("tls: too many ignored records"))
- }
- return c.readRecordOrCCS(expectChangeCipherSpec)
-}
-
-// atLeastReader reads from R, stopping with EOF once at least N bytes have been
-// read. It is different from an io.LimitedReader in that it doesn't cut short
-// the last Read call, and in that it considers an early EOF an error.
-type atLeastReader struct {
- R io.Reader
- N int64
-}
-
-func (r *atLeastReader) Read(p []byte) (int, error) {
- if r.N <= 0 {
- return 0, io.EOF
- }
- n, err := r.R.Read(p)
- r.N -= int64(n) // won't underflow unless len(p) >= n > 9223372036854775809
- if r.N > 0 && err == io.EOF {
- return n, io.ErrUnexpectedEOF
- }
- if r.N <= 0 && err == nil {
- return n, io.EOF
- }
- return n, err
-}
-
-// readFromUntil reads from r into c.rawInput until c.rawInput contains
-// at least n bytes or else returns an error.
-func (c *Conn) readFromUntil(r io.Reader, n int) error {
- if c.rawInput.Len() >= n {
- return nil
- }
- needs := n - c.rawInput.Len()
- // There might be extra input waiting on the wire. Make a best effort
- // attempt to fetch it so that it can be used in (*Conn).Read to
- // "predict" closeNotify alerts.
- c.rawInput.Grow(needs + bytes.MinRead)
- _, err := c.rawInput.ReadFrom(&atLeastReader{r, int64(needs)})
- return err
-}
-
-// sendAlert sends a TLS alert message.
-func (c *Conn) sendAlertLocked(err alert) error {
- switch err {
- case alertNoRenegotiation, alertCloseNotify:
- c.tmp[0] = alertLevelWarning
- default:
- c.tmp[0] = alertLevelError
- }
- c.tmp[1] = byte(err)
-
- _, writeErr := c.writeRecordLocked(recordTypeAlert, c.tmp[0:2])
- if err == alertCloseNotify {
- // closeNotify is a special case in that it isn't an error.
- return writeErr
- }
-
- return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
-}
-
-// sendAlert sends a TLS alert message.
-func (c *Conn) sendAlert(err alert) error {
- c.out.Lock()
- defer c.out.Unlock()
- return c.sendAlertLocked(err)
-}
-
-const (
- // tcpMSSEstimate is a conservative estimate of the TCP maximum segment
- // size (MSS). A constant is used, rather than querying the kernel for
- // the actual MSS, to avoid complexity. The value here is the IPv6
- // minimum MTU (1280 bytes) minus the overhead of an IPv6 header (40
- // bytes) and a TCP header with timestamps (32 bytes).
- tcpMSSEstimate = 1208
-
- // recordSizeBoostThreshold is the number of bytes of application data
- // sent after which the TLS record size will be increased to the
- // maximum.
- recordSizeBoostThreshold = 128 * 1024
-)
-
-// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the
-// next application data record. There is the following trade-off:
-//
-// - For latency-sensitive applications, such as web browsing, each TLS
-// record should fit in one TCP segment.
-// - For throughput-sensitive applications, such as large file transfers,
-// larger TLS records better amortize framing and encryption overheads.
-//
-// A simple heuristic that works well in practice is to use small records for
-// the first 1MB of data, then use larger records for subsequent data, and
-// reset back to smaller records after the connection becomes idle. See "High
-// Performance Web Networking", Chapter 4, or:
-// https://www.igvita.com/2013/10/24/optimizing-tls-record-size-and-buffering-latency/
-//
-// In the interests of simplicity and determinism, this code does not attempt
-// to reset the record size once the connection is idle, however.
-func (c *Conn) maxPayloadSizeForWrite(typ recordType) int {
- if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData {
- return maxPlaintext
- }
-
- if c.bytesSent >= recordSizeBoostThreshold {
- return maxPlaintext
- }
-
- // Subtract TLS overheads to get the maximum payload size.
- payloadBytes := tcpMSSEstimate - recordHeaderLen - c.out.explicitNonceLen()
- if c.out.cipher != nil {
- switch ciph := c.out.cipher.(type) {
- case cipher.Stream:
- payloadBytes -= c.out.mac.Size()
- case cipher.AEAD:
- payloadBytes -= ciph.Overhead()
- case cbcMode:
- blockSize := ciph.BlockSize()
- // The payload must fit in a multiple of blockSize, with
- // room for at least one padding byte.
- payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
- // The MAC is appended before padding so affects the
- // payload size directly.
- payloadBytes -= c.out.mac.Size()
- default:
- panic("unknown cipher type")
- }
- }
- if c.vers == VersionTLS13 {
- payloadBytes-- // encrypted ContentType
- }
-
- // Allow packet growth in arithmetic progression up to max.
- pkt := c.packetsSent
- c.packetsSent++
- if pkt > 1000 {
- return maxPlaintext // avoid overflow in multiply below
- }
-
- n := payloadBytes * int(pkt+1)
- if n > maxPlaintext {
- n = maxPlaintext
- }
- return n
-}
-
-func (c *Conn) write(data []byte) (int, error) {
- if c.buffering {
- c.sendBuf = append(c.sendBuf, data...)
- return len(data), nil
- }
-
- n, err := c.conn.Write(data)
- c.bytesSent += int64(n)
- return n, err
-}
-
-func (c *Conn) flush() (int, error) {
- if len(c.sendBuf) == 0 {
- return 0, nil
- }
-
- n, err := c.conn.Write(c.sendBuf)
- c.bytesSent += int64(n)
- c.sendBuf = nil
- c.buffering = false
- return n, err
-}
-
-// outBufPool pools the record-sized scratch buffers used by writeRecordLocked.
-var outBufPool = sync.Pool{
- New: func() any {
- return new([]byte)
- },
-}
-
-// writeRecordLocked writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
- outBufPtr := outBufPool.Get().(*[]byte)
- outBuf := *outBufPtr
- defer func() {
- // You might be tempted to simplify this by just passing &outBuf to Put,
- // but that would make the local copy of the outBuf slice header escape
- // to the heap, causing an allocation. Instead, we keep around the
- // pointer to the slice header returned by Get, which is already on the
- // heap, and overwrite and return that.
- *outBufPtr = outBuf
- outBufPool.Put(outBufPtr)
- }()
-
- var n int
- for len(data) > 0 {
- m := len(data)
- if maxPayload := c.maxPayloadSizeForWrite(typ); m > maxPayload {
- m = maxPayload
- }
-
- _, outBuf = sliceForAppend(outBuf[:0], recordHeaderLen)
- outBuf[0] = byte(typ)
- vers := c.vers
- if vers == 0 {
- // Some TLS servers fail if the record version is
- // greater than TLS 1.0 for the initial ClientHello.
- vers = VersionTLS10
- } else if vers == VersionTLS13 {
- // TLS 1.3 froze the record layer version to 1.2.
- // See RFC 8446, Section 5.1.
- vers = VersionTLS12
- }
- outBuf[1] = byte(vers >> 8)
- outBuf[2] = byte(vers)
- outBuf[3] = byte(m >> 8)
- outBuf[4] = byte(m)
-
- var err error
- outBuf, err = c.out.encrypt(outBuf, data[:m], c.config.rand())
- if err != nil {
- return n, err
- }
- if _, err := c.write(outBuf); err != nil {
- return n, err
- }
- n += m
- data = data[m:]
- }
-
- if typ == recordTypeChangeCipherSpec && c.vers != VersionTLS13 {
- if err := c.out.changeCipherSpec(); err != nil {
- return n, c.sendAlertLocked(err.(alert))
- }
- }
-
- return n, nil
-}
-
-// writeRecord writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
- c.out.Lock()
- defer c.out.Unlock()
-
- return c.writeRecordLocked(typ, data)
-}
-
-// readHandshake reads the next handshake message from
-// the record layer.
-func (c *Conn) readHandshake() (any, error) {
- for c.hand.Len() < 4 {
- if err := c.readRecord(); err != nil {
- return nil, err
- }
- }
-
- data := c.hand.Bytes()
- n := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
- if n > maxHandshake {
- c.sendAlertLocked(alertInternalError)
- return nil, c.in.setErrorLocked(fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake))
- }
- for c.hand.Len() < 4+n {
- if err := c.readRecord(); err != nil {
- return nil, err
- }
- }
- data = c.hand.Next(4 + n)
- var m handshakeMessage
- switch data[0] {
- case typeHelloRequest:
- m = new(helloRequestMsg)
- case typeClientHello:
- m = new(clientHelloMsg)
- case typeServerHello:
- m = new(serverHelloMsg)
- case typeNewSessionTicket:
- if c.vers == VersionTLS13 {
- m = new(newSessionTicketMsgTLS13)
- } else {
- m = new(newSessionTicketMsg)
- }
- case typeCertificate:
- if c.vers == VersionTLS13 {
- m = new(certificateMsgTLS13)
- } else {
- m = new(certificateMsg)
- }
- case typeCertificateRequest:
- if c.vers == VersionTLS13 {
- m = new(certificateRequestMsgTLS13)
- } else {
- m = &certificateRequestMsg{
- hasSignatureAlgorithm: c.vers >= VersionTLS12,
- }
- }
- case typeCertificateStatus:
- m = new(certificateStatusMsg)
- case typeServerKeyExchange:
- m = new(serverKeyExchangeMsg)
- case typeServerHelloDone:
- m = new(serverHelloDoneMsg)
- case typeClientKeyExchange:
- m = new(clientKeyExchangeMsg)
- case typeCertificateVerify:
- m = &certificateVerifyMsg{
- hasSignatureAlgorithm: c.vers >= VersionTLS12,
- }
- case typeFinished:
- m = new(finishedMsg)
- case typeEncryptedExtensions:
- m = new(encryptedExtensionsMsg)
- case typeEndOfEarlyData:
- m = new(endOfEarlyDataMsg)
- case typeKeyUpdate:
- m = new(keyUpdateMsg)
- default:
- return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- // The handshake message unmarshalers
- // expect to be able to keep references to data,
- // so pass in a fresh copy that won't be overwritten.
- data = append([]byte(nil), data...)
-
- if !m.unmarshal(data) {
- return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- return m, nil
-}
-
-var (
- errShutdown = errors.New("tls: protocol is shutdown")
-)
-
-// Write writes data to the connection.
-//
-// As Write calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Write is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
-func (c *Conn) Write(b []byte) (int, error) {
- // interlock with Close below
- for {
- x := atomic.LoadInt32(&c.activeCall)
- if x&1 != 0 {
- return 0, net.ErrClosed
- }
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
- break
- }
- }
- defer atomic.AddInt32(&c.activeCall, -2)
-
- if err := c.Handshake(); err != nil {
- return 0, err
- }
-
- c.out.Lock()
- defer c.out.Unlock()
-
- if err := c.out.err; err != nil {
- return 0, err
- }
-
- if !c.handshakeComplete() {
- return 0, alertInternalError
- }
-
- if c.closeNotifySent {
- return 0, errShutdown
- }
-
- // TLS 1.0 is susceptible to a chosen-plaintext
- // attack when using block mode ciphers due to predictable IVs.
- // This can be prevented by splitting each Application Data
- // record into two records, effectively randomizing the IV.
- //
- // https://www.openssl.org/~bodo/tls-cbc.txt
- // https://bugzilla.mozilla.org/show_bug.cgi?id=665814
- // https://www.imperialviolet.org/2012/01/15/beastfollowup.html
-
- var m int
- if len(b) > 1 && c.vers == VersionTLS10 {
- if _, ok := c.out.cipher.(cipher.BlockMode); ok {
- n, err := c.writeRecordLocked(recordTypeApplicationData, b[:1])
- if err != nil {
- return n, c.out.setErrorLocked(err)
- }
- m, b = 1, b[1:]
- }
- }
-
- n, err := c.writeRecordLocked(recordTypeApplicationData, b)
- return n + m, c.out.setErrorLocked(err)
-}
-
-// handleRenegotiation processes a HelloRequest handshake message.
-func (c *Conn) handleRenegotiation() error {
- if c.vers == VersionTLS13 {
- return errors.New("tls: internal error: unexpected renegotiation")
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- helloReq, ok := msg.(*helloRequestMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(helloReq, msg)
- }
-
- if !c.isClient {
- return c.sendAlert(alertNoRenegotiation)
- }
-
- switch c.config.Renegotiation {
- case RenegotiateNever:
- return c.sendAlert(alertNoRenegotiation)
- case RenegotiateOnceAsClient:
- if c.handshakes > 1 {
- return c.sendAlert(alertNoRenegotiation)
- }
- case RenegotiateFreelyAsClient:
- // Ok.
- default:
- c.sendAlert(alertInternalError)
- return errors.New("tls: unknown Renegotiation value")
- }
-
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- atomic.StoreUint32(&c.handshakeStatus, 0)
- if c.handshakeErr = c.clientHandshake(context.Background()); c.handshakeErr == nil {
- c.handshakes++
- }
- return c.handshakeErr
-}
-
-// handlePostHandshakeMessage processes a handshake message arrived after the
-// handshake is complete. Up to TLS 1.2, it indicates the start of a renegotiation.
-func (c *Conn) handlePostHandshakeMessage() error {
- if c.vers != VersionTLS13 {
- return c.handleRenegotiation()
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- c.retryCount++
- if c.retryCount > maxUselessRecords {
- c.sendAlert(alertUnexpectedMessage)
- return c.in.setErrorLocked(errors.New("tls: too many non-advancing records"))
- }
-
- switch msg := msg.(type) {
- case *newSessionTicketMsgTLS13:
- return c.handleNewSessionTicket(msg)
- case *keyUpdateMsg:
- return c.handleKeyUpdate(msg)
- default:
- c.sendAlert(alertUnexpectedMessage)
- return fmt.Errorf("tls: received unexpected handshake message of type %T", msg)
- }
-}
-
-func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
- cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
- if cipherSuite == nil {
- return c.in.setErrorLocked(c.sendAlert(alertInternalError))
- }
-
- newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret)
- c.in.setTrafficSecret(cipherSuite, newSecret)
-
- if keyUpdate.updateRequested {
- c.out.Lock()
- defer c.out.Unlock()
-
- msg := &keyUpdateMsg{}
- _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
- if err != nil {
- // Surface the error at the next write.
- c.out.setErrorLocked(err)
- return nil
- }
-
- newSecret := cipherSuite.nextTrafficSecret(c.out.trafficSecret)
- c.out.setTrafficSecret(cipherSuite, newSecret)
- }
-
- return nil
-}
-
-// Read reads data from the connection.
-//
-// As Read calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Read is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
-func (c *Conn) Read(b []byte) (int, error) {
- if err := c.Handshake(); err != nil {
- return 0, err
- }
- if len(b) == 0 {
- // Put this after Handshake, in case people were calling
- // Read(nil) for the side effect of the Handshake.
- return 0, nil
- }
-
- c.in.Lock()
- defer c.in.Unlock()
-
- for c.input.Len() == 0 {
- if err := c.readRecord(); err != nil {
- return 0, err
- }
- for c.hand.Len() > 0 {
- if err := c.handlePostHandshakeMessage(); err != nil {
- return 0, err
- }
- }
- }
-
- n, _ := c.input.Read(b)
-
- // If a close-notify alert is waiting, read it so that we can return (n,
- // EOF) instead of (n, nil), to signal to the HTTP response reading
- // goroutine that the connection is now closed. This eliminates a race
- // where the HTTP response reading goroutine would otherwise not observe
- // the EOF until its next read, by which time a client goroutine might
- // have already tried to reuse the HTTP connection for a new request.
- // See https://golang.org/cl/76400046 and https://golang.org/issue/3514
- if n != 0 && c.input.Len() == 0 && c.rawInput.Len() > 0 &&
- recordType(c.rawInput.Bytes()[0]) == recordTypeAlert {
- if err := c.readRecord(); err != nil {
- return n, err // will be io.EOF on closeNotify
- }
- }
-
- return n, nil
-}
-
-// Close closes the connection.
-func (c *Conn) Close() error {
- // Interlock with Conn.Write above.
- var x int32
- for {
- x = atomic.LoadInt32(&c.activeCall)
- if x&1 != 0 {
- return net.ErrClosed
- }
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
- break
- }
- }
- if x != 0 {
- // io.Writer and io.Closer should not be used concurrently.
- // If Close is called while a Write is currently in-flight,
- // interpret that as a sign that this Close is really just
- // being used to break the Write and/or clean up resources and
- // avoid sending the alertCloseNotify, which may block
- // waiting on handshakeMutex or the c.out mutex.
- return c.conn.Close()
- }
-
- var alertErr error
- if c.handshakeComplete() {
- if err := c.closeNotify(); err != nil {
- alertErr = fmt.Errorf("tls: failed to send closeNotify alert (but connection was closed anyway): %w", err)
- }
- }
-
- if err := c.conn.Close(); err != nil {
- return err
- }
- return alertErr
-}
-
-var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake complete")
-
-// CloseWrite shuts down the writing side of the connection. It should only be
-// called once the handshake has completed and does not call CloseWrite on the
-// underlying connection. Most callers should just use Close.
-func (c *Conn) CloseWrite() error {
- if !c.handshakeComplete() {
- return errEarlyCloseWrite
- }
-
- return c.closeNotify()
-}
-
-func (c *Conn) closeNotify() error {
- c.out.Lock()
- defer c.out.Unlock()
-
- if !c.closeNotifySent {
- // Set a Write Deadline to prevent possibly blocking forever.
- c.SetWriteDeadline(time.Now().Add(time.Second * 5))
- c.closeNotifyErr = c.sendAlertLocked(alertCloseNotify)
- c.closeNotifySent = true
- // Any subsequent writes will fail.
- c.SetWriteDeadline(time.Now())
- }
- return c.closeNotifyErr
-}
-
-// Handshake runs the client or server handshake
-// protocol if it has not yet been run.
-//
-// Most uses of this package need not call Handshake explicitly: the
-// first Read or Write will call it automatically.
-//
-// For control over canceling or setting a timeout on a handshake, use
-// HandshakeContext or the Dialer's DialContext method instead.
-func (c *Conn) Handshake() error {
- return c.HandshakeContext(context.Background())
-}
-
-// HandshakeContext runs the client or server handshake
-// protocol if it has not yet been run.
-//
-// The provided Context must be non-nil. If the context is canceled before
-// the handshake is complete, the handshake is interrupted and an error is returned.
-// Once the handshake has completed, cancellation of the context will not affect the
-// connection.
-//
-// Most uses of this package need not call HandshakeContext explicitly: the
-// first Read or Write will call it automatically.
-func (c *Conn) HandshakeContext(ctx context.Context) error {
- // Delegate to unexported method for named return
- // without confusing documented signature.
- return c.handshakeContext(ctx)
-}
-
-func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
- // Fast sync/atomic-based exit if there is no handshake in flight and the
- // last one succeeded without an error. Avoids the expensive context setup
- // and mutex for most Read and Write calls.
- if c.handshakeComplete() {
- return nil
- }
-
- handshakeCtx, cancel := context.WithCancel(ctx)
- // Note: defer this before starting the "interrupter" goroutine
- // so that we can tell the difference between the input being canceled and
- // this cancellation. In the former case, we need to close the connection.
- defer cancel()
-
- // Start the "interrupter" goroutine, if this context might be canceled.
- // (The background context cannot).
- //
- // The interrupter goroutine waits for the input context to be done and
- // closes the connection if this happens before the function returns.
- if ctx.Done() != nil {
- done := make(chan struct{})
- interruptRes := make(chan error, 1)
- defer func() {
- close(done)
- if ctxErr := <-interruptRes; ctxErr != nil {
- // Return context error to user.
- ret = ctxErr
- }
- }()
- go func() {
- select {
- case <-handshakeCtx.Done():
- // Close the connection, discarding the error
- _ = c.conn.Close()
- interruptRes <- handshakeCtx.Err()
- case <-done:
- interruptRes <- nil
- }
- }()
- }
-
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- if err := c.handshakeErr; err != nil {
- return err
- }
- if c.handshakeComplete() {
- return nil
- }
-
- c.in.Lock()
- defer c.in.Unlock()
-
- c.handshakeErr = c.handshakeFn(handshakeCtx)
- if c.handshakeErr == nil {
- c.handshakes++
- } else {
- // If an error occurred during the handshake try to flush the
- // alert that might be left in the buffer.
- c.flush()
- }
-
- if c.handshakeErr == nil && !c.handshakeComplete() {
- c.handshakeErr = errors.New("tls: internal error: handshake should have had a result")
- }
- if c.handshakeErr != nil && c.handshakeComplete() {
- panic("tls: internal error: handshake returned an error but is marked successful")
- }
-
- return c.handshakeErr
-}
-
-// ConnectionState returns basic TLS details about the connection.
-func (c *Conn) ConnectionState() ConnectionState {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return c.connectionStateLocked()
-}
-
-func (c *Conn) connectionStateLocked() ConnectionState {
- var state ConnectionState
- state.HandshakeComplete = c.handshakeComplete()
- state.Version = c.vers
- state.NegotiatedProtocol = c.clientProtocol
- state.DidResume = c.didResume
- state.NegotiatedProtocolIsMutual = true
- state.ServerName = c.serverName
- state.CipherSuite = c.cipherSuite
- state.PeerCertificates = c.peerCertificates
- state.VerifiedChains = c.verifiedChains
- state.SignedCertificateTimestamps = c.scts
- state.OCSPResponse = c.ocspResponse
- if !c.didResume && c.vers != VersionTLS13 {
- if c.clientFinishedIsFirst {
- state.TLSUnique = c.clientFinished[:]
- } else {
- state.TLSUnique = c.serverFinished[:]
- }
- }
- if c.config.Renegotiation != RenegotiateNever {
- state.ekm = noExportedKeyingMaterial
- } else {
- state.ekm = c.ekm
- }
- return state
-}
-
-// OCSPResponse returns the stapled OCSP response from the TLS server, if
-// any. (Only valid for client connections.)
-func (c *Conn) OCSPResponse() []byte {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- return c.ocspResponse
-}
-
-// VerifyHostname checks that the peer certificate chain is valid for
-// connecting to host. If so, it returns nil; if not, it returns an error
-// describing the problem.
-func (c *Conn) VerifyHostname(host string) error {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- if !c.isClient {
- return errors.New("tls: VerifyHostname called on TLS server connection")
- }
- if !c.handshakeComplete() {
- return errors.New("tls: handshake has not yet been performed")
- }
- if len(c.verifiedChains) == 0 {
- return errors.New("tls: handshake did not verify certificate chain")
- }
- return c.peerCertificates[0].VerifyHostname(host)
-}
-
-func (c *Conn) handshakeComplete() bool {
- return atomic.LoadUint32(&c.handshakeStatus) == 1
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/handshake_client.go b/contrib/go/_std_1.18/src/crypto/tls/handshake_client.go
deleted file mode 100644
index a3e00777f1..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/handshake_client.go
+++ /dev/null
@@ -1,1011 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "strings"
- "sync/atomic"
- "time"
-)
-
-type clientHandshakeState struct {
- c *Conn
- ctx context.Context
- serverHello *serverHelloMsg
- hello *clientHelloMsg
- suite *cipherSuite
- finishedHash finishedHash
- masterSecret []byte
- session *ClientSessionState
-}
-
-func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
- config := c.config
- if len(config.ServerName) == 0 && !config.InsecureSkipVerify {
- return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config")
- }
-
- nextProtosLength := 0
- for _, proto := range config.NextProtos {
- if l := len(proto); l == 0 || l > 255 {
- return nil, nil, errors.New("tls: invalid NextProtos value")
- } else {
- nextProtosLength += 1 + l
- }
- }
- if nextProtosLength > 0xffff {
- return nil, nil, errors.New("tls: NextProtos values too large")
- }
-
- supportedVersions := config.supportedVersions(roleClient)
- if len(supportedVersions) == 0 {
- return nil, nil, errors.New("tls: no supported versions satisfy MinVersion and MaxVersion")
- }
-
- clientHelloVersion := config.maxSupportedVersion(roleClient)
- // The version at the beginning of the ClientHello was capped at TLS 1.2
- // for compatibility reasons. The supported_versions extension is used
- // to negotiate versions now. See RFC 8446, Section 4.2.1.
- if clientHelloVersion > VersionTLS12 {
- clientHelloVersion = VersionTLS12
- }
-
- hello := &clientHelloMsg{
- vers: clientHelloVersion,
- compressionMethods: []uint8{compressionNone},
- random: make([]byte, 32),
- sessionId: make([]byte, 32),
- ocspStapling: true,
- scts: true,
- serverName: hostnameInSNI(config.ServerName),
- supportedCurves: config.curvePreferences(),
- supportedPoints: []uint8{pointFormatUncompressed},
- secureRenegotiationSupported: true,
- alpnProtocols: config.NextProtos,
- supportedVersions: supportedVersions,
- }
-
- if c.handshakes > 0 {
- hello.secureRenegotiation = c.clientFinished[:]
- }
-
- preferenceOrder := cipherSuitesPreferenceOrder
- if !hasAESGCMHardwareSupport {
- preferenceOrder = cipherSuitesPreferenceOrderNoAES
- }
- configCipherSuites := config.cipherSuites()
- hello.cipherSuites = make([]uint16, 0, len(configCipherSuites))
-
- for _, suiteId := range preferenceOrder {
- suite := mutualCipherSuite(configCipherSuites, suiteId)
- if suite == nil {
- continue
- }
- // Don't advertise TLS 1.2-only cipher suites unless
- // we're attempting TLS 1.2.
- if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 {
- continue
- }
- hello.cipherSuites = append(hello.cipherSuites, suiteId)
- }
-
- _, err := io.ReadFull(config.rand(), hello.random)
- if err != nil {
- return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
- }
-
- // A random session ID is used to detect when the server accepted a ticket
- // and is resuming a session (see RFC 5077). In TLS 1.3, it's always set as
- // a compatibility measure (see RFC 8446, Section 4.1.2).
- if _, err := io.ReadFull(config.rand(), hello.sessionId); err != nil {
- return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
- }
-
- if hello.vers >= VersionTLS12 {
- hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- }
-
- var params ecdheParameters
- if hello.supportedVersions[0] == VersionTLS13 {
- if hasAESGCMHardwareSupport {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
- } else {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
- }
-
- curveID := config.curvePreferences()[0]
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err = generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return nil, nil, err
- }
- hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
- }
-
- return hello, params, nil
-}
-
-func (c *Conn) clientHandshake(ctx context.Context) (err error) {
- if c.config == nil {
- c.config = defaultConfig()
- }
-
- // This may be a renegotiation handshake, in which case some fields
- // need to be reset.
- c.didResume = false
-
- hello, ecdheParams, err := c.makeClientHello()
- if err != nil {
- return err
- }
- c.serverName = hello.serverName
-
- cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
- if cacheKey != "" && session != nil {
- defer func() {
- // If we got a handshake failure when resuming a session, throw away
- // the session ticket. See RFC 5077, Section 3.2.
- //
- // RFC 8446 makes no mention of dropping tickets on failure, but it
- // does require servers to abort on invalid binders, so we need to
- // delete tickets to recover from a corrupted PSK.
- if err != nil {
- c.config.ClientSessionCache.Put(cacheKey, nil)
- }
- }()
- }
-
- if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- serverHello, ok := msg.(*serverHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverHello, msg)
- }
-
- if err := c.pickTLSVersion(serverHello); err != nil {
- return err
- }
-
- // If we are negotiating a protocol version that's lower than what we
- // support, check for the server downgrade canaries.
- // See RFC 8446, Section 4.1.3.
- maxVers := c.config.maxSupportedVersion(roleClient)
- tls12Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS12
- tls11Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS11
- if maxVers == VersionTLS13 && c.vers <= VersionTLS12 && (tls12Downgrade || tls11Downgrade) ||
- maxVers == VersionTLS12 && c.vers <= VersionTLS11 && tls11Downgrade {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: downgrade attempt detected, possibly due to a MitM attack or a broken middlebox")
- }
-
- if c.vers == VersionTLS13 {
- hs := &clientHandshakeStateTLS13{
- c: c,
- ctx: ctx,
- serverHello: serverHello,
- hello: hello,
- ecdheParams: ecdheParams,
- session: session,
- earlySecret: earlySecret,
- binderKey: binderKey,
- }
-
- // In TLS 1.3, session tickets are delivered after the handshake.
- return hs.handshake()
- }
-
- hs := &clientHandshakeState{
- c: c,
- ctx: ctx,
- serverHello: serverHello,
- hello: hello,
- session: session,
- }
-
- if err := hs.handshake(); err != nil {
- return err
- }
-
- // If we had a successful handshake and hs.session is different from
- // the one already cached - cache a new one.
- if cacheKey != "" && hs.session != nil && session != hs.session {
- c.config.ClientSessionCache.Put(cacheKey, hs.session)
- }
-
- return nil
-}
-
-func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
- session *ClientSessionState, earlySecret, binderKey []byte) {
- if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return "", nil, nil, nil
- }
-
- hello.ticketSupported = true
-
- if hello.supportedVersions[0] == VersionTLS13 {
- // Require DHE on resumption as it guarantees forward secrecy against
- // compromise of the session ticket key. See RFC 8446, Section 4.2.9.
- hello.pskModes = []uint8{pskModeDHE}
- }
-
- // Session resumption is not allowed if renegotiating because
- // renegotiation is primarily used to allow a client to send a client
- // certificate, which would be skipped if session resumption occurred.
- if c.handshakes != 0 {
- return "", nil, nil, nil
- }
-
- // Try to resume a previously negotiated TLS session, if available.
- cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
- session, ok := c.config.ClientSessionCache.Get(cacheKey)
- if !ok || session == nil {
- return cacheKey, nil, nil, nil
- }
-
- // Check that version used for the previous session is still valid.
- versOk := false
- for _, v := range hello.supportedVersions {
- if v == session.vers {
- versOk = true
- break
- }
- }
- if !versOk {
- return cacheKey, nil, nil, nil
- }
-
- // Check that the cached server certificate is not expired, and that it's
- // valid for the ServerName. This should be ensured by the cache key, but
- // protect the application from a faulty ClientSessionCache implementation.
- if !c.config.InsecureSkipVerify {
- if len(session.verifiedChains) == 0 {
- // The original connection had InsecureSkipVerify, while this doesn't.
- return cacheKey, nil, nil, nil
- }
- serverCert := session.serverCertificates[0]
- if c.config.time().After(serverCert.NotAfter) {
- // Expired certificate, delete the entry.
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
- if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
- return cacheKey, nil, nil, nil
- }
- }
-
- if session.vers != VersionTLS13 {
- // In TLS 1.2 the cipher suite must match the resumed session. Ensure we
- // are still offering it.
- if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
- return cacheKey, nil, nil, nil
- }
-
- hello.sessionTicket = session.sessionTicket
- return
- }
-
- // Check that the session ticket is not expired.
- if c.config.time().After(session.useBy) {
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
-
- // In TLS 1.3 the KDF hash must match the resumed session. Ensure we
- // offer at least one cipher suite with that hash.
- cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
- if cipherSuite == nil {
- return cacheKey, nil, nil, nil
- }
- cipherSuiteOk := false
- for _, offeredID := range hello.cipherSuites {
- offeredSuite := cipherSuiteTLS13ByID(offeredID)
- if offeredSuite != nil && offeredSuite.hash == cipherSuite.hash {
- cipherSuiteOk = true
- break
- }
- }
- if !cipherSuiteOk {
- return cacheKey, nil, nil, nil
- }
-
- // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
- ticketAge := uint32(c.config.time().Sub(session.receivedAt) / time.Millisecond)
- identity := pskIdentity{
- label: session.sessionTicket,
- obfuscatedTicketAge: ticketAge + session.ageAdd,
- }
- hello.pskIdentities = []pskIdentity{identity}
- hello.pskBinders = [][]byte{make([]byte, cipherSuite.hash.Size())}
-
- // Compute the PSK binders. See RFC 8446, Section 4.2.11.2.
- psk := cipherSuite.expandLabel(session.masterSecret, "resumption",
- session.nonce, cipherSuite.hash.Size())
- earlySecret = cipherSuite.extract(psk, nil)
- binderKey = cipherSuite.deriveSecret(earlySecret, resumptionBinderLabel, nil)
- transcript := cipherSuite.hash.New()
- transcript.Write(hello.marshalWithoutBinders())
- pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
- hello.updateBinders(pskBinders)
-
- return
-}
-
-func (c *Conn) pickTLSVersion(serverHello *serverHelloMsg) error {
- peerVersion := serverHello.vers
- if serverHello.supportedVersion != 0 {
- peerVersion = serverHello.supportedVersion
- }
-
- vers, ok := c.config.mutualVersion(roleClient, []uint16{peerVersion})
- if !ok {
- c.sendAlert(alertProtocolVersion)
- return fmt.Errorf("tls: server selected unsupported protocol version %x", peerVersion)
- }
-
- c.vers = vers
- c.haveVers = true
- c.in.version = vers
- c.out.version = vers
-
- return nil
-}
-
-// Does the handshake, either a full one or resumes old session. Requires hs.c,
-// hs.hello, hs.serverHello, and, optionally, hs.session to be set.
-func (hs *clientHandshakeState) handshake() error {
- c := hs.c
-
- isResume, err := hs.processServerHello()
- if err != nil {
- return err
- }
-
- hs.finishedHash = newFinishedHash(c.vers, hs.suite)
-
- // No signatures of the handshake are needed in a resumption.
- // Otherwise, in a full handshake, if we don't have any certificates
- // configured then we will never send a CertificateVerify message and
- // thus no signatures are needed in that case either.
- if isResume || (len(c.config.Certificates) == 0 && c.config.GetClientCertificate == nil) {
- hs.finishedHash.discardHandshakeBuffer()
- }
-
- hs.finishedHash.Write(hs.hello.marshal())
- hs.finishedHash.Write(hs.serverHello.marshal())
-
- c.buffering = true
- c.didResume = isResume
- if isResume {
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.readSessionTicket(); err != nil {
- return err
- }
- if err := hs.readFinished(c.serverFinished[:]); err != nil {
- return err
- }
- c.clientFinishedIsFirst = false
- // Make sure the connection is still being verified whether or not this
- // is a resumption. Resumptions currently don't reverify certificates so
- // they don't call verifyServerCertificate. See Issue 31641.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- if err := hs.sendFinished(c.clientFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- } else {
- if err := hs.doFullHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.sendFinished(c.clientFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- c.clientFinishedIsFirst = true
- if err := hs.readSessionTicket(); err != nil {
- return err
- }
- if err := hs.readFinished(c.serverFinished[:]); err != nil {
- return err
- }
- }
-
- c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-func (hs *clientHandshakeState) pickCipherSuite() error {
- if hs.suite = mutualCipherSuite(hs.hello.cipherSuites, hs.serverHello.cipherSuite); hs.suite == nil {
- hs.c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: server chose an unconfigured cipher suite")
- }
-
- hs.c.cipherSuite = hs.suite.id
- return nil
-}
-
-func (hs *clientHandshakeState) doFullHandshake() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- certMsg, ok := msg.(*certificateMsg)
- if !ok || len(certMsg.certificates) == 0 {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.finishedHash.Write(certMsg.marshal())
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- cs, ok := msg.(*certificateStatusMsg)
- if ok {
- // RFC4366 on Certificate Status Request:
- // The server MAY return a "certificate_status" message.
-
- if !hs.serverHello.ocspStapling {
- // If a server returns a "CertificateStatus" message, then the
- // server MUST have included an extension of type "status_request"
- // with empty "extension_data" in the extended server hello.
-
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: received unexpected CertificateStatus message")
- }
- hs.finishedHash.Write(cs.marshal())
-
- c.ocspResponse = cs.response
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- if c.handshakes == 0 {
- // If this is the first handshake on a connection, process and
- // (optionally) verify the server's certificates.
- if err := c.verifyServerCertificate(certMsg.certificates); err != nil {
- return err
- }
- } else {
- // This is a renegotiation handshake. We require that the
- // server's identity (i.e. leaf certificate) is unchanged and
- // thus any previous trust decision is still valid.
- //
- // See https://mitls.org/pages/attacks/3SHAKE for the
- // motivation behind this requirement.
- if !bytes.Equal(c.peerCertificates[0].Raw, certMsg.certificates[0]) {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: server's identity changed during renegotiation")
- }
- }
-
- keyAgreement := hs.suite.ka(c.vers)
-
- skx, ok := msg.(*serverKeyExchangeMsg)
- if ok {
- hs.finishedHash.Write(skx.marshal())
- err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
- if err != nil {
- c.sendAlert(alertUnexpectedMessage)
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- var chainToSend *Certificate
- var certRequested bool
- certReq, ok := msg.(*certificateRequestMsg)
- if ok {
- certRequested = true
- hs.finishedHash.Write(certReq.marshal())
-
- cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq)
- if chainToSend, err = c.getClientCertificate(cri); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- shd, ok := msg.(*serverHelloDoneMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(shd, msg)
- }
- hs.finishedHash.Write(shd.marshal())
-
- // If the server requested a certificate then we have to send a
- // Certificate message, even if it's empty because we don't have a
- // certificate to send.
- if certRequested {
- certMsg = new(certificateMsg)
- certMsg.certificates = chainToSend.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
- }
-
- preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hs.hello, c.peerCertificates[0])
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- if ckx != nil {
- hs.finishedHash.Write(ckx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
- return err
- }
- }
-
- if chainToSend != nil && len(chainToSend.Certificate) > 0 {
- certVerify := &certificateVerifyMsg{}
-
- key, ok := chainToSend.PrivateKey.(crypto.Signer)
- if !ok {
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey)
- }
-
- var sigType uint8
- var sigHash crypto.Hash
- if c.vers >= VersionTLS12 {
- signatureAlgorithm, err := selectSignatureScheme(c.vers, chainToSend, certReq.supportedSignatureAlgorithms)
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- certVerify.hasSignatureAlgorithm = true
- certVerify.signatureAlgorithm = signatureAlgorithm
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(key.Public())
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- }
-
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- certVerify.signature, err = key.Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- hs.finishedHash.Write(certVerify.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
- return err
- }
- }
-
- hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.hello.random, hs.serverHello.random)
- if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.hello.random, hs.masterSecret); err != nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: failed to write to key log: " + err.Error())
- }
-
- hs.finishedHash.discardHandshakeBuffer()
-
- return nil
-}
-
-func (hs *clientHandshakeState) establishKeys() error {
- c := hs.c
-
- clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
- keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
- var clientCipher, serverCipher any
- var clientHash, serverHash hash.Hash
- if hs.suite.cipher != nil {
- clientCipher = hs.suite.cipher(clientKey, clientIV, false /* not for reading */)
- clientHash = hs.suite.mac(clientMAC)
- serverCipher = hs.suite.cipher(serverKey, serverIV, true /* for reading */)
- serverHash = hs.suite.mac(serverMAC)
- } else {
- clientCipher = hs.suite.aead(clientKey, clientIV)
- serverCipher = hs.suite.aead(serverKey, serverIV)
- }
-
- c.in.prepareCipherSpec(c.vers, serverCipher, serverHash)
- c.out.prepareCipherSpec(c.vers, clientCipher, clientHash)
- return nil
-}
-
-func (hs *clientHandshakeState) serverResumedSession() bool {
- // If the server responded with the same sessionId then it means the
- // sessionTicket is being used to resume a TLS session.
- return hs.session != nil && hs.hello.sessionId != nil &&
- bytes.Equal(hs.serverHello.sessionId, hs.hello.sessionId)
-}
-
-func (hs *clientHandshakeState) processServerHello() (bool, error) {
- c := hs.c
-
- if err := hs.pickCipherSuite(); err != nil {
- return false, err
- }
-
- if hs.serverHello.compressionMethod != compressionNone {
- c.sendAlert(alertUnexpectedMessage)
- return false, errors.New("tls: server selected unsupported compression format")
- }
-
- if c.handshakes == 0 && hs.serverHello.secureRenegotiationSupported {
- c.secureRenegotiation = true
- if len(hs.serverHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
- }
-
- if c.handshakes > 0 && c.secureRenegotiation {
- var expectedSecureRenegotiation [24]byte
- copy(expectedSecureRenegotiation[:], c.clientFinished[:])
- copy(expectedSecureRenegotiation[12:], c.serverFinished[:])
- if !bytes.Equal(hs.serverHello.secureRenegotiation, expectedSecureRenegotiation[:]) {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: incorrect renegotiation extension contents")
- }
- }
-
- if err := checkALPN(hs.hello.alpnProtocols, hs.serverHello.alpnProtocol); err != nil {
- c.sendAlert(alertUnsupportedExtension)
- return false, err
- }
- c.clientProtocol = hs.serverHello.alpnProtocol
-
- c.scts = hs.serverHello.scts
-
- if !hs.serverResumedSession() {
- return false, nil
- }
-
- if hs.session.vers != c.vers {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: server resumed a session with a different version")
- }
-
- if hs.session.cipherSuite != hs.suite.id {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: server resumed a session with a different cipher suite")
- }
-
- // Restore masterSecret, peerCerts, and ocspResponse from previous state
- hs.masterSecret = hs.session.masterSecret
- c.peerCertificates = hs.session.serverCertificates
- c.verifiedChains = hs.session.verifiedChains
- c.ocspResponse = hs.session.ocspResponse
- // Let the ServerHello SCTs override the session SCTs from the original
- // connection, if any are provided
- if len(c.scts) == 0 && len(hs.session.scts) != 0 {
- c.scts = hs.session.scts
- }
-
- return true, nil
-}
-
-// checkALPN ensure that the server's choice of ALPN protocol is compatible with
-// the protocols that we advertised in the Client Hello.
-func checkALPN(clientProtos []string, serverProto string) error {
- if serverProto == "" {
- return nil
- }
- if len(clientProtos) == 0 {
- return errors.New("tls: server advertised unrequested ALPN extension")
- }
- for _, proto := range clientProtos {
- if proto == serverProto {
- return nil
- }
- }
- return errors.New("tls: server selected unadvertised ALPN protocol")
-}
-
-func (hs *clientHandshakeState) readFinished(out []byte) error {
- c := hs.c
-
- if err := c.readChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- serverFinished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverFinished, msg)
- }
-
- verify := hs.finishedHash.serverSum(hs.masterSecret)
- if len(verify) != len(serverFinished.verifyData) ||
- subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: server's Finished message was incorrect")
- }
- hs.finishedHash.Write(serverFinished.marshal())
- copy(out, verify)
- return nil
-}
-
-func (hs *clientHandshakeState) readSessionTicket() error {
- if !hs.serverHello.ticketSupported {
- return nil
- }
-
- c := hs.c
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- sessionTicketMsg, ok := msg.(*newSessionTicketMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(sessionTicketMsg, msg)
- }
- hs.finishedHash.Write(sessionTicketMsg.marshal())
-
- hs.session = &ClientSessionState{
- sessionTicket: sessionTicketMsg.ticket,
- vers: c.vers,
- cipherSuite: hs.suite.id,
- masterSecret: hs.masterSecret,
- serverCertificates: c.peerCertificates,
- verifiedChains: c.verifiedChains,
- receivedAt: c.config.time(),
- ocspResponse: c.ocspResponse,
- scts: c.scts,
- }
-
- return nil
-}
-
-func (hs *clientHandshakeState) sendFinished(out []byte) error {
- c := hs.c
-
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
- return err
- }
-
- finished := new(finishedMsg)
- finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
- copy(out, finished.verifyData)
- return nil
-}
-
-// verifyServerCertificate parses and verifies the provided chain, setting
-// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
-func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
- certs := make([]*x509.Certificate, len(certificates))
- for i, asn1Data := range certificates {
- cert, err := x509.ParseCertificate(asn1Data)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to parse certificate from server: " + err.Error())
- }
- certs[i] = cert
- }
-
- if !c.config.InsecureSkipVerify {
- opts := x509.VerifyOptions{
- Roots: c.config.RootCAs,
- CurrentTime: c.config.time(),
- DNSName: c.config.ServerName,
- Intermediates: x509.NewCertPool(),
- }
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
- var err error
- c.verifiedChains, err = certs[0].Verify(opts)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- switch certs[0].PublicKey.(type) {
- case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
- break
- default:
- c.sendAlert(alertUnsupportedCertificate)
- return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey)
- }
-
- c.peerCertificates = certs
-
- if c.config.VerifyPeerCertificate != nil {
- if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- return nil
-}
-
-// certificateRequestInfoFromMsg generates a CertificateRequestInfo from a TLS
-// <= 1.2 CertificateRequest, making an effort to fill in missing information.
-func certificateRequestInfoFromMsg(ctx context.Context, vers uint16, certReq *certificateRequestMsg) *CertificateRequestInfo {
- cri := &CertificateRequestInfo{
- AcceptableCAs: certReq.certificateAuthorities,
- Version: vers,
- ctx: ctx,
- }
-
- var rsaAvail, ecAvail bool
- for _, certType := range certReq.certificateTypes {
- switch certType {
- case certTypeRSASign:
- rsaAvail = true
- case certTypeECDSASign:
- ecAvail = true
- }
- }
-
- if !certReq.hasSignatureAlgorithm {
- // Prior to TLS 1.2, signature schemes did not exist. In this case we
- // make up a list based on the acceptable certificate types, to help
- // GetClientCertificate and SupportsCertificate select the right certificate.
- // The hash part of the SignatureScheme is a lie here, because
- // TLS 1.0 and 1.1 always use MD5+SHA1 for RSA and SHA1 for ECDSA.
- switch {
- case rsaAvail && ecAvail:
- cri.SignatureSchemes = []SignatureScheme{
- ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
- PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
- }
- case rsaAvail:
- cri.SignatureSchemes = []SignatureScheme{
- PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
- }
- case ecAvail:
- cri.SignatureSchemes = []SignatureScheme{
- ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
- }
- }
- return cri
- }
-
- // Filter the signature schemes based on the certificate types.
- // See RFC 5246, Section 7.4.4 (where it calls this "somewhat complicated").
- cri.SignatureSchemes = make([]SignatureScheme, 0, len(certReq.supportedSignatureAlgorithms))
- for _, sigScheme := range certReq.supportedSignatureAlgorithms {
- sigType, _, err := typeAndHashFromSignatureScheme(sigScheme)
- if err != nil {
- continue
- }
- switch sigType {
- case signatureECDSA, signatureEd25519:
- if ecAvail {
- cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
- }
- case signatureRSAPSS, signaturePKCS1v15:
- if rsaAvail {
- cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
- }
- }
- }
-
- return cri
-}
-
-func (c *Conn) getClientCertificate(cri *CertificateRequestInfo) (*Certificate, error) {
- if c.config.GetClientCertificate != nil {
- return c.config.GetClientCertificate(cri)
- }
-
- for _, chain := range c.config.Certificates {
- if err := cri.SupportsCertificate(&chain); err != nil {
- continue
- }
- return &chain, nil
- }
-
- // No acceptable certificate found. Don't send a certificate.
- return new(Certificate), nil
-}
-
-// clientSessionCacheKey returns a key used to cache sessionTickets that could
-// be used to resume previously negotiated TLS sessions with a server.
-func clientSessionCacheKey(serverAddr net.Addr, config *Config) string {
- if len(config.ServerName) > 0 {
- return config.ServerName
- }
- return serverAddr.String()
-}
-
-// hostnameInSNI converts name into an appropriate hostname for SNI.
-// Literal IP addresses and absolute FQDNs are not permitted as SNI values.
-// See RFC 6066, Section 3.
-func hostnameInSNI(name string) string {
- host := name
- if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
- host = host[1 : len(host)-1]
- }
- if i := strings.LastIndex(host, "%"); i > 0 {
- host = host[:i]
- }
- if net.ParseIP(host) != nil {
- return ""
- }
- for len(name) > 0 && name[len(name)-1] == '.' {
- name = name[:len(name)-1]
- }
- return name
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/handshake_client_tls13.go b/contrib/go/_std_1.18/src/crypto/tls/handshake_client_tls13.go
deleted file mode 100644
index eb59ac90d1..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/handshake_client_tls13.go
+++ /dev/null
@@ -1,682 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/hmac"
- "crypto/rsa"
- "errors"
- "hash"
- "sync/atomic"
- "time"
-)
-
-type clientHandshakeStateTLS13 struct {
- c *Conn
- ctx context.Context
- serverHello *serverHelloMsg
- hello *clientHelloMsg
- ecdheParams ecdheParameters
-
- session *ClientSessionState
- earlySecret []byte
- binderKey []byte
-
- certReq *certificateRequestMsgTLS13
- usingPSK bool
- sentDummyCCS bool
- suite *cipherSuiteTLS13
- transcript hash.Hash
- masterSecret []byte
- trafficSecret []byte // client_application_traffic_secret_0
-}
-
-// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and,
-// optionally, hs.session, hs.earlySecret and hs.binderKey to be set.
-func (hs *clientHandshakeStateTLS13) handshake() error {
- c := hs.c
-
- // The server must not select TLS 1.3 in a renegotiation. See RFC 8446,
- // sections 4.1.2 and 4.1.3.
- if c.handshakes > 0 {
- c.sendAlert(alertProtocolVersion)
- return errors.New("tls: server selected TLS 1.3 in a renegotiation")
- }
-
- // Consistency check on the presence of a keyShare and its parameters.
- if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 {
- return c.sendAlert(alertInternalError)
- }
-
- if err := hs.checkServerHelloOrHRR(); err != nil {
- return err
- }
-
- hs.transcript = hs.suite.hash.New()
- hs.transcript.Write(hs.hello.marshal())
-
- if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
- if err := hs.processHelloRetryRequest(); err != nil {
- return err
- }
- }
-
- hs.transcript.Write(hs.serverHello.marshal())
-
- c.buffering = true
- if err := hs.processServerHello(); err != nil {
- return err
- }
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
- if err := hs.establishHandshakeKeys(); err != nil {
- return err
- }
- if err := hs.readServerParameters(); err != nil {
- return err
- }
- if err := hs.readServerCertificate(); err != nil {
- return err
- }
- if err := hs.readServerFinished(); err != nil {
- return err
- }
- if err := hs.sendClientCertificate(); err != nil {
- return err
- }
- if err := hs.sendClientFinished(); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
-
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-// checkServerHelloOrHRR does validity checks that apply to both ServerHello and
-// HelloRetryRequest messages. It sets hs.suite.
-func (hs *clientHandshakeStateTLS13) checkServerHelloOrHRR() error {
- c := hs.c
-
- if hs.serverHello.supportedVersion == 0 {
- c.sendAlert(alertMissingExtension)
- return errors.New("tls: server selected TLS 1.3 using the legacy version field")
- }
-
- if hs.serverHello.supportedVersion != VersionTLS13 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid version after a HelloRetryRequest")
- }
-
- if hs.serverHello.vers != VersionTLS12 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an incorrect legacy version")
- }
-
- if hs.serverHello.ocspStapling ||
- hs.serverHello.ticketSupported ||
- hs.serverHello.secureRenegotiationSupported ||
- len(hs.serverHello.secureRenegotiation) != 0 ||
- len(hs.serverHello.alpnProtocol) != 0 ||
- len(hs.serverHello.scts) != 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server sent a ServerHello extension forbidden in TLS 1.3")
- }
-
- if !bytes.Equal(hs.hello.sessionId, hs.serverHello.sessionId) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server did not echo the legacy session ID")
- }
-
- if hs.serverHello.compressionMethod != compressionNone {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported compression format")
- }
-
- selectedSuite := mutualCipherSuiteTLS13(hs.hello.cipherSuites, hs.serverHello.cipherSuite)
- if hs.suite != nil && selectedSuite != hs.suite {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server changed cipher suite after a HelloRetryRequest")
- }
- if selectedSuite == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server chose an unconfigured cipher suite")
- }
- hs.suite = selectedSuite
- c.cipherSuite = hs.suite.id
-
- return nil
-}
-
-// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
-// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
-func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
- if hs.sentDummyCCS {
- return nil
- }
- hs.sentDummyCCS = true
-
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
-}
-
-// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
-// resends hs.hello, and reads the new ServerHello into hs.serverHello.
-func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
- c := hs.c
-
- // The first ClientHello gets double-hashed into the transcript upon a
- // HelloRetryRequest. (The idea is that the server might offload transcript
- // storage to the client in the cookie.) See RFC 8446, Section 4.4.1.
- chHash := hs.transcript.Sum(nil)
- hs.transcript.Reset()
- hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- hs.transcript.Write(chHash)
- hs.transcript.Write(hs.serverHello.marshal())
-
- // The only HelloRetryRequest extensions we support are key_share and
- // cookie, and clients must abort the handshake if the HRR would not result
- // in any change in the ClientHello.
- if hs.serverHello.selectedGroup == 0 && hs.serverHello.cookie == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an unnecessary HelloRetryRequest message")
- }
-
- if hs.serverHello.cookie != nil {
- hs.hello.cookie = hs.serverHello.cookie
- }
-
- if hs.serverHello.serverShare.group != 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: received malformed key_share extension")
- }
-
- // If the server sent a key_share extension selecting a group, ensure it's
- // a group we advertised but did not send a key share for, and send a key
- // share for it this time.
- if curveID := hs.serverHello.selectedGroup; curveID != 0 {
- curveOK := false
- for _, id := range hs.hello.supportedCurves {
- if id == curveID {
- curveOK = true
- break
- }
- }
- if !curveOK {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported group")
- }
- if hs.ecdheParams.CurveID() == curveID {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share")
- }
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- c.sendAlert(alertInternalError)
- return errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err := generateECDHEParameters(c.config.rand(), curveID)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- hs.ecdheParams = params
- hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
- }
-
- hs.hello.raw = nil
- if len(hs.hello.pskIdentities) > 0 {
- pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
- if pskSuite == nil {
- return c.sendAlert(alertInternalError)
- }
- if pskSuite.hash == hs.suite.hash {
- // Update binders and obfuscated_ticket_age.
- ticketAge := uint32(c.config.time().Sub(hs.session.receivedAt) / time.Millisecond)
- hs.hello.pskIdentities[0].obfuscatedTicketAge = ticketAge + hs.session.ageAdd
-
- transcript := hs.suite.hash.New()
- transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- transcript.Write(chHash)
- transcript.Write(hs.serverHello.marshal())
- transcript.Write(hs.hello.marshalWithoutBinders())
- pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
- hs.hello.updateBinders(pskBinders)
- } else {
- // Server selected a cipher suite incompatible with the PSK.
- hs.hello.pskIdentities = nil
- hs.hello.pskBinders = nil
- }
- }
-
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- serverHello, ok := msg.(*serverHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverHello, msg)
- }
- hs.serverHello = serverHello
-
- if err := hs.checkServerHelloOrHRR(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) processServerHello() error {
- c := hs.c
-
- if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: server sent two HelloRetryRequest messages")
- }
-
- if len(hs.serverHello.cookie) != 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server sent a cookie in a normal ServerHello")
- }
-
- if hs.serverHello.selectedGroup != 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: malformed key_share extension")
- }
-
- if hs.serverHello.serverShare.group == 0 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server did not send a key share")
- }
- if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported group")
- }
-
- if !hs.serverHello.selectedIdentityPresent {
- return nil
- }
-
- if int(hs.serverHello.selectedIdentity) >= len(hs.hello.pskIdentities) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid PSK")
- }
-
- if len(hs.hello.pskIdentities) != 1 || hs.session == nil {
- return c.sendAlert(alertInternalError)
- }
- pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
- if pskSuite == nil {
- return c.sendAlert(alertInternalError)
- }
- if pskSuite.hash != hs.suite.hash {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid PSK and cipher suite pair")
- }
-
- hs.usingPSK = true
- c.didResume = true
- c.peerCertificates = hs.session.serverCertificates
- c.verifiedChains = hs.session.verifiedChains
- c.ocspResponse = hs.session.ocspResponse
- c.scts = hs.session.scts
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
- c := hs.c
-
- sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data)
- if sharedKey == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid server key share")
- }
-
- earlySecret := hs.earlySecret
- if !hs.usingPSK {
- earlySecret = hs.suite.extract(nil, nil)
- }
- handshakeSecret := hs.suite.extract(sharedKey,
- hs.suite.deriveSecret(earlySecret, "derived", nil))
-
- clientSecret := hs.suite.deriveSecret(handshakeSecret,
- clientHandshakeTrafficLabel, hs.transcript)
- c.out.setTrafficSecret(hs.suite, clientSecret)
- serverSecret := hs.suite.deriveSecret(handshakeSecret,
- serverHandshakeTrafficLabel, hs.transcript)
- c.in.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.hello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- hs.masterSecret = hs.suite.extract(nil,
- hs.suite.deriveSecret(handshakeSecret, "derived", nil))
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerParameters() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- encryptedExtensions, ok := msg.(*encryptedExtensionsMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(encryptedExtensions, msg)
- }
- hs.transcript.Write(encryptedExtensions.marshal())
-
- if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil {
- c.sendAlert(alertUnsupportedExtension)
- return err
- }
- c.clientProtocol = encryptedExtensions.alpnProtocol
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
- c := hs.c
-
- // Either a PSK or a certificate is always used, but not both.
- // See RFC 8446, Section 4.1.1.
- if hs.usingPSK {
- // Make sure the connection is still being verified whether or not this
- // is a resumption. Resumptions currently don't reverify certificates so
- // they don't call verifyServerCertificate. See Issue 31641.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- return nil
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- certReq, ok := msg.(*certificateRequestMsgTLS13)
- if ok {
- hs.transcript.Write(certReq.marshal())
-
- hs.certReq = certReq
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- certMsg, ok := msg.(*certificateMsgTLS13)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- if len(certMsg.certificate.Certificate) == 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: received empty certificates message")
- }
- hs.transcript.Write(certMsg.marshal())
-
- c.scts = certMsg.certificate.SignedCertificateTimestamps
- c.ocspResponse = certMsg.certificate.OCSPStaple
-
- if err := c.verifyServerCertificate(certMsg.certificate.Certificate); err != nil {
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- // See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
- if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
- sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the server certificate: " + err.Error())
- }
-
- hs.transcript.Write(certVerify.marshal())
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerFinished() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- finished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(finished, msg)
- }
-
- expectedMAC := hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
- if !hmac.Equal(expectedMAC, finished.verifyData) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid server finished hash")
- }
-
- hs.transcript.Write(finished.marshal())
-
- // Derive secrets that take context through the server Finished.
-
- hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
- clientApplicationTrafficLabel, hs.transcript)
- serverSecret := hs.suite.deriveSecret(hs.masterSecret,
- serverApplicationTrafficLabel, hs.transcript)
- c.in.setTrafficSecret(hs.suite, serverSecret)
-
- err = c.config.writeKeyLog(keyLogLabelClientTraffic, hs.hello.random, hs.trafficSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.hello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
- c := hs.c
-
- if hs.certReq == nil {
- return nil
- }
-
- cert, err := c.getClientCertificate(&CertificateRequestInfo{
- AcceptableCAs: hs.certReq.certificateAuthorities,
- SignatureSchemes: hs.certReq.supportedSignatureAlgorithms,
- Version: c.vers,
- ctx: hs.ctx,
- })
- if err != nil {
- return err
- }
-
- certMsg := new(certificateMsgTLS13)
-
- certMsg.certificate = *cert
- certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
- certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
-
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- // If we sent an empty certificate message, skip the CertificateVerify.
- if len(cert.Certificate) == 0 {
- return nil
- }
-
- certVerifyMsg := new(certificateVerifyMsg)
- certVerifyMsg.hasSignatureAlgorithm = true
-
- certVerifyMsg.signatureAlgorithm, err = selectSignatureScheme(c.vers, cert, hs.certReq.supportedSignatureAlgorithms)
- if err != nil {
- // getClientCertificate returned a certificate incompatible with the
- // CertificateRequestInfo supported signature algorithms.
- c.sendAlert(alertHandshakeFailure)
- return err
- }
-
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerifyMsg.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
-
- signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: failed to sign handshake: " + err.Error())
- }
- certVerifyMsg.signature = sig
-
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
- c := hs.c
-
- finished := &finishedMsg{
- verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
- }
-
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- c.out.setTrafficSecret(hs.suite, hs.trafficSecret)
-
- if !c.config.SessionTicketsDisabled && c.config.ClientSessionCache != nil {
- c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret,
- resumptionLabel, hs.transcript)
- }
-
- return nil
-}
-
-func (c *Conn) handleNewSessionTicket(msg *newSessionTicketMsgTLS13) error {
- if !c.isClient {
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: received new session ticket from a client")
- }
-
- if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return nil
- }
-
- // See RFC 8446, Section 4.6.1.
- if msg.lifetime == 0 {
- return nil
- }
- lifetime := time.Duration(msg.lifetime) * time.Second
- if lifetime > maxSessionTicketLifetime {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: received a session ticket with invalid lifetime")
- }
-
- cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
- if cipherSuite == nil || c.resumptionSecret == nil {
- return c.sendAlert(alertInternalError)
- }
-
- // Save the resumption_master_secret and nonce instead of deriving the PSK
- // to do the least amount of work on NewSessionTicket messages before we
- // know if the ticket will be used. Forward secrecy of resumed connections
- // is guaranteed by the requirement for pskModeDHE.
- session := &ClientSessionState{
- sessionTicket: msg.label,
- vers: c.vers,
- cipherSuite: c.cipherSuite,
- masterSecret: c.resumptionSecret,
- serverCertificates: c.peerCertificates,
- verifiedChains: c.verifiedChains,
- receivedAt: c.config.time(),
- nonce: msg.nonce,
- useBy: c.config.time().Add(lifetime),
- ageAdd: msg.ageAdd,
- ocspResponse: c.ocspResponse,
- scts: c.scts,
- }
-
- cacheKey := clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
- c.config.ClientSessionCache.Put(cacheKey, session)
-
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/handshake_messages.go b/contrib/go/_std_1.18/src/crypto/tls/handshake_messages.go
deleted file mode 100644
index 17cf85910f..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/handshake_messages.go
+++ /dev/null
@@ -1,1808 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "fmt"
- "strings"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-// The marshalingFunction type is an adapter to allow the use of ordinary
-// functions as cryptobyte.MarshalingValue.
-type marshalingFunction func(b *cryptobyte.Builder) error
-
-func (f marshalingFunction) Marshal(b *cryptobyte.Builder) error {
- return f(b)
-}
-
-// addBytesWithLength appends a sequence of bytes to the cryptobyte.Builder. If
-// the length of the sequence is not the value specified, it produces an error.
-func addBytesWithLength(b *cryptobyte.Builder, v []byte, n int) {
- b.AddValue(marshalingFunction(func(b *cryptobyte.Builder) error {
- if len(v) != n {
- return fmt.Errorf("invalid value length: expected %d, got %d", n, len(v))
- }
- b.AddBytes(v)
- return nil
- }))
-}
-
-// addUint64 appends a big-endian, 64-bit value to the cryptobyte.Builder.
-func addUint64(b *cryptobyte.Builder, v uint64) {
- b.AddUint32(uint32(v >> 32))
- b.AddUint32(uint32(v))
-}
-
-// readUint64 decodes a big-endian, 64-bit value into out and advances over it.
-// It reports whether the read was successful.
-func readUint64(s *cryptobyte.String, out *uint64) bool {
- var hi, lo uint32
- if !s.ReadUint32(&hi) || !s.ReadUint32(&lo) {
- return false
- }
- *out = uint64(hi)<<32 | uint64(lo)
- return true
-}
-
-// readUint8LengthPrefixed acts like s.ReadUint8LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint8LengthPrefixed((*cryptobyte.String)(out))
-}
-
-// readUint16LengthPrefixed acts like s.ReadUint16LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint16LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint16LengthPrefixed((*cryptobyte.String)(out))
-}
-
-// readUint24LengthPrefixed acts like s.ReadUint24LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint24LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint24LengthPrefixed((*cryptobyte.String)(out))
-}
-
-type clientHelloMsg struct {
- raw []byte
- vers uint16
- random []byte
- sessionId []byte
- cipherSuites []uint16
- compressionMethods []uint8
- serverName string
- ocspStapling bool
- supportedCurves []CurveID
- supportedPoints []uint8
- ticketSupported bool
- sessionTicket []uint8
- supportedSignatureAlgorithms []SignatureScheme
- supportedSignatureAlgorithmsCert []SignatureScheme
- secureRenegotiationSupported bool
- secureRenegotiation []byte
- alpnProtocols []string
- scts bool
- supportedVersions []uint16
- cookie []byte
- keyShares []keyShare
- earlyData bool
- pskModes []uint8
- pskIdentities []pskIdentity
- pskBinders [][]byte
-}
-
-func (m *clientHelloMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeClientHello)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.vers)
- addBytesWithLength(b, m.random, 32)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionId)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, suite := range m.cipherSuites {
- b.AddUint16(suite)
- }
- })
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.compressionMethods)
- })
-
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.serverName) > 0 {
- // RFC 6066, Section 3
- b.AddUint16(extensionServerName)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // name_type = host_name
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.serverName))
- })
- })
- })
- }
- if m.ocspStapling {
- // RFC 4366, Section 3.6
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(1) // status_type = ocsp
- b.AddUint16(0) // empty responder_id_list
- b.AddUint16(0) // empty request_extensions
- })
- }
- if len(m.supportedCurves) > 0 {
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- b.AddUint16(extensionSupportedCurves)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, curve := range m.supportedCurves {
- b.AddUint16(uint16(curve))
- }
- })
- })
- }
- if len(m.supportedPoints) > 0 {
- // RFC 4492, Section 5.1.2
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
- if m.ticketSupported {
- // RFC 5077, Section 3.2
- b.AddUint16(extensionSessionTicket)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionTicket)
- })
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- // RFC 5246, Section 7.4.1.4.1
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- // RFC 8446, Section 4.2.3
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if m.secureRenegotiationSupported {
- // RFC 5746, Section 3.2
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocols) > 0 {
- // RFC 7301, Section 3.1
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, proto := range m.alpnProtocols {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(proto))
- })
- }
- })
- })
- }
- if m.scts {
- // RFC 6962, Section 3.3.1
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedVersions) > 0 {
- // RFC 8446, Section 4.2.1
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, vers := range m.supportedVersions {
- b.AddUint16(vers)
- }
- })
- })
- }
- if len(m.cookie) > 0 {
- // RFC 8446, Section 4.2.2
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if len(m.keyShares) > 0 {
- // RFC 8446, Section 4.2.8
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ks := range m.keyShares {
- b.AddUint16(uint16(ks.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ks.data)
- })
- }
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.pskModes) > 0 {
- // RFC 8446, Section 4.2.9
- b.AddUint16(extensionPSKModes)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.pskModes)
- })
- })
- }
- if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
- // RFC 8446, Section 4.2.11
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, psk := range m.pskIdentities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(psk.label)
- })
- b.AddUint32(psk.obfuscatedTicketAge)
- }
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-// marshalWithoutBinders returns the ClientHello through the
-// PreSharedKeyExtension.identities field, according to RFC 8446, Section
-// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
-func (m *clientHelloMsg) marshalWithoutBinders() []byte {
- bindersLen := 2 // uint16 length prefix
- for _, binder := range m.pskBinders {
- bindersLen += 1 // uint8 length prefix
- bindersLen += len(binder)
- }
-
- fullMessage := m.marshal()
- return fullMessage[:len(fullMessage)-bindersLen]
-}
-
-// updateBinders updates the m.pskBinders field, if necessary updating the
-// cached marshaled representation. The supplied binders must have the same
-// length as the current m.pskBinders.
-func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
- if len(pskBinders) != len(m.pskBinders) {
- panic("tls: internal error: pskBinders length mismatch")
- }
- for i := range m.pskBinders {
- if len(pskBinders[i]) != len(m.pskBinders[i]) {
- panic("tls: internal error: pskBinders length mismatch")
- }
- }
- m.pskBinders = pskBinders
- if m.raw != nil {
- lenWithoutBinders := len(m.marshalWithoutBinders())
- b := cryptobyte.NewFixedBuilder(m.raw[:lenWithoutBinders])
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- if out, err := b.Bytes(); err != nil || len(out) != len(m.raw) {
- panic("tls: internal error: failed to update binders")
- }
- }
-}
-
-func (m *clientHelloMsg) unmarshal(data []byte) bool {
- *m = clientHelloMsg{raw: data}
- s := cryptobyte.String(data)
-
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
- !readUint8LengthPrefixed(&s, &m.sessionId) {
- return false
- }
-
- var cipherSuites cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&cipherSuites) {
- return false
- }
- m.cipherSuites = []uint16{}
- m.secureRenegotiationSupported = false
- for !cipherSuites.Empty() {
- var suite uint16
- if !cipherSuites.ReadUint16(&suite) {
- return false
- }
- if suite == scsvRenegotiation {
- m.secureRenegotiationSupported = true
- }
- m.cipherSuites = append(m.cipherSuites, suite)
- }
-
- if !readUint8LengthPrefixed(&s, &m.compressionMethods) {
- return false
- }
-
- if s.Empty() {
- // ClientHello is optionally followed by extension data
- return true
- }
-
- var extensions cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionServerName:
- // RFC 6066, Section 3
- var nameList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
- return false
- }
- for !nameList.Empty() {
- var nameType uint8
- var serverName cryptobyte.String
- if !nameList.ReadUint8(&nameType) ||
- !nameList.ReadUint16LengthPrefixed(&serverName) ||
- serverName.Empty() {
- return false
- }
- if nameType != 0 {
- continue
- }
- if len(m.serverName) != 0 {
- // Multiple names of the same name_type are prohibited.
- return false
- }
- m.serverName = string(serverName)
- // An SNI value may not include a trailing dot.
- if strings.HasSuffix(m.serverName, ".") {
- return false
- }
- }
- case extensionStatusRequest:
- // RFC 4366, Section 3.6
- var statusType uint8
- var ignored cryptobyte.String
- if !extData.ReadUint8(&statusType) ||
- !extData.ReadUint16LengthPrefixed(&ignored) ||
- !extData.ReadUint16LengthPrefixed(&ignored) {
- return false
- }
- m.ocspStapling = statusType == statusTypeOCSP
- case extensionSupportedCurves:
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- var curves cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&curves) || curves.Empty() {
- return false
- }
- for !curves.Empty() {
- var curve uint16
- if !curves.ReadUint16(&curve) {
- return false
- }
- m.supportedCurves = append(m.supportedCurves, CurveID(curve))
- }
- case extensionSupportedPoints:
- // RFC 4492, Section 5.1.2
- if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
- len(m.supportedPoints) == 0 {
- return false
- }
- case extensionSessionTicket:
- // RFC 5077, Section 3.2
- m.ticketSupported = true
- extData.ReadBytes(&m.sessionTicket, len(extData))
- case extensionSignatureAlgorithms:
- // RFC 5246, Section 7.4.1.4.1
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithms = append(
- m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
- }
- case extensionSignatureAlgorithmsCert:
- // RFC 8446, Section 4.2.3
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithmsCert = append(
- m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
- }
- case extensionRenegotiationInfo:
- // RFC 5746, Section 3.2
- if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
- return false
- }
- m.secureRenegotiationSupported = true
- case extensionALPN:
- // RFC 7301, Section 3.1
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- for !protoList.Empty() {
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
- return false
- }
- m.alpnProtocols = append(m.alpnProtocols, string(proto))
- }
- case extensionSCT:
- // RFC 6962, Section 3.3.1
- m.scts = true
- case extensionSupportedVersions:
- // RFC 8446, Section 4.2.1
- var versList cryptobyte.String
- if !extData.ReadUint8LengthPrefixed(&versList) || versList.Empty() {
- return false
- }
- for !versList.Empty() {
- var vers uint16
- if !versList.ReadUint16(&vers) {
- return false
- }
- m.supportedVersions = append(m.supportedVersions, vers)
- }
- case extensionCookie:
- // RFC 8446, Section 4.2.2
- if !readUint16LengthPrefixed(&extData, &m.cookie) ||
- len(m.cookie) == 0 {
- return false
- }
- case extensionKeyShare:
- // RFC 8446, Section 4.2.8
- var clientShares cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&clientShares) {
- return false
- }
- for !clientShares.Empty() {
- var ks keyShare
- if !clientShares.ReadUint16((*uint16)(&ks.group)) ||
- !readUint16LengthPrefixed(&clientShares, &ks.data) ||
- len(ks.data) == 0 {
- return false
- }
- m.keyShares = append(m.keyShares, ks)
- }
- case extensionEarlyData:
- // RFC 8446, Section 4.2.10
- m.earlyData = true
- case extensionPSKModes:
- // RFC 8446, Section 4.2.9
- if !readUint8LengthPrefixed(&extData, &m.pskModes) {
- return false
- }
- case extensionPreSharedKey:
- // RFC 8446, Section 4.2.11
- if !extensions.Empty() {
- return false // pre_shared_key must be the last extension
- }
- var identities cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&identities) || identities.Empty() {
- return false
- }
- for !identities.Empty() {
- var psk pskIdentity
- if !readUint16LengthPrefixed(&identities, &psk.label) ||
- !identities.ReadUint32(&psk.obfuscatedTicketAge) ||
- len(psk.label) == 0 {
- return false
- }
- m.pskIdentities = append(m.pskIdentities, psk)
- }
- var binders cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&binders) || binders.Empty() {
- return false
- }
- for !binders.Empty() {
- var binder []byte
- if !readUint8LengthPrefixed(&binders, &binder) ||
- len(binder) == 0 {
- return false
- }
- m.pskBinders = append(m.pskBinders, binder)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type serverHelloMsg struct {
- raw []byte
- vers uint16
- random []byte
- sessionId []byte
- cipherSuite uint16
- compressionMethod uint8
- ocspStapling bool
- ticketSupported bool
- secureRenegotiationSupported bool
- secureRenegotiation []byte
- alpnProtocol string
- scts [][]byte
- supportedVersion uint16
- serverShare keyShare
- selectedIdentityPresent bool
- selectedIdentity uint16
- supportedPoints []uint8
-
- // HelloRetryRequest extensions
- cookie []byte
- selectedGroup CurveID
-}
-
-func (m *serverHelloMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeServerHello)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.vers)
- addBytesWithLength(b, m.random, 32)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionId)
- })
- b.AddUint16(m.cipherSuite)
- b.AddUint8(m.compressionMethod)
-
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.ticketSupported {
- b.AddUint16(extensionSessionTicket)
- b.AddUint16(0) // empty extension_data
- }
- if m.secureRenegotiationSupported {
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if len(m.scts) > 0 {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range m.scts {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- if m.supportedVersion != 0 {
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.supportedVersion)
- })
- }
- if m.serverShare.group != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.serverShare.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.serverShare.data)
- })
- })
- }
- if m.selectedIdentityPresent {
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.selectedIdentity)
- })
- }
-
- if len(m.cookie) > 0 {
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if m.selectedGroup != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.selectedGroup))
- })
- }
- if len(m.supportedPoints) > 0 {
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *serverHelloMsg) unmarshal(data []byte) bool {
- *m = serverHelloMsg{raw: data}
- s := cryptobyte.String(data)
-
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
- !readUint8LengthPrefixed(&s, &m.sessionId) ||
- !s.ReadUint16(&m.cipherSuite) ||
- !s.ReadUint8(&m.compressionMethod) {
- return false
- }
-
- if s.Empty() {
- // ServerHello is optionally followed by extension data
- return true
- }
-
- var extensions cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionStatusRequest:
- m.ocspStapling = true
- case extensionSessionTicket:
- m.ticketSupported = true
- case extensionRenegotiationInfo:
- if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
- return false
- }
- m.secureRenegotiationSupported = true
- case extensionALPN:
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) ||
- proto.Empty() || !protoList.Empty() {
- return false
- }
- m.alpnProtocol = string(proto)
- case extensionSCT:
- var sctList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
- return false
- }
- for !sctList.Empty() {
- var sct []byte
- if !readUint16LengthPrefixed(&sctList, &sct) ||
- len(sct) == 0 {
- return false
- }
- m.scts = append(m.scts, sct)
- }
- case extensionSupportedVersions:
- if !extData.ReadUint16(&m.supportedVersion) {
- return false
- }
- case extensionCookie:
- if !readUint16LengthPrefixed(&extData, &m.cookie) ||
- len(m.cookie) == 0 {
- return false
- }
- case extensionKeyShare:
- // This extension has different formats in SH and HRR, accept either
- // and let the handshake logic decide. See RFC 8446, Section 4.2.8.
- if len(extData) == 2 {
- if !extData.ReadUint16((*uint16)(&m.selectedGroup)) {
- return false
- }
- } else {
- if !extData.ReadUint16((*uint16)(&m.serverShare.group)) ||
- !readUint16LengthPrefixed(&extData, &m.serverShare.data) {
- return false
- }
- }
- case extensionPreSharedKey:
- m.selectedIdentityPresent = true
- if !extData.ReadUint16(&m.selectedIdentity) {
- return false
- }
- case extensionSupportedPoints:
- // RFC 4492, Section 5.1.2
- if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
- len(m.supportedPoints) == 0 {
- return false
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type encryptedExtensionsMsg struct {
- raw []byte
- alpnProtocol string
-}
-
-func (m *encryptedExtensionsMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeEncryptedExtensions)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
- *m = encryptedExtensionsMsg{raw: data}
- s := cryptobyte.String(data)
-
- var extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionALPN:
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) ||
- proto.Empty() || !protoList.Empty() {
- return false
- }
- m.alpnProtocol = string(proto)
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type endOfEarlyDataMsg struct{}
-
-func (m *endOfEarlyDataMsg) marshal() []byte {
- x := make([]byte, 4)
- x[0] = typeEndOfEarlyData
- return x
-}
-
-func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
-
-type keyUpdateMsg struct {
- raw []byte
- updateRequested bool
-}
-
-func (m *keyUpdateMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeKeyUpdate)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.updateRequested {
- b.AddUint8(1)
- } else {
- b.AddUint8(0)
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *keyUpdateMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- var updateRequested uint8
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8(&updateRequested) || !s.Empty() {
- return false
- }
- switch updateRequested {
- case 0:
- m.updateRequested = false
- case 1:
- m.updateRequested = true
- default:
- return false
- }
- return true
-}
-
-type newSessionTicketMsgTLS13 struct {
- raw []byte
- lifetime uint32
- ageAdd uint32
- nonce []byte
- label []byte
- maxEarlyData uint32
-}
-
-func (m *newSessionTicketMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeNewSessionTicket)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint32(m.lifetime)
- b.AddUint32(m.ageAdd)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.nonce)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.label)
- })
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.maxEarlyData > 0 {
- b.AddUint16(extensionEarlyData)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint32(m.maxEarlyData)
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
- *m = newSessionTicketMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint32(&m.lifetime) ||
- !s.ReadUint32(&m.ageAdd) ||
- !readUint8LengthPrefixed(&s, &m.nonce) ||
- !readUint16LengthPrefixed(&s, &m.label) ||
- !s.ReadUint16LengthPrefixed(&extensions) ||
- !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionEarlyData:
- if !extData.ReadUint32(&m.maxEarlyData) {
- return false
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type certificateRequestMsgTLS13 struct {
- raw []byte
- ocspStapling bool
- scts bool
- supportedSignatureAlgorithms []SignatureScheme
- supportedSignatureAlgorithmsCert []SignatureScheme
- certificateAuthorities [][]byte
-}
-
-func (m *certificateRequestMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateRequest)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- // certificate_request_context (SHALL be zero length unless used for
- // post-handshake authentication)
- b.AddUint8(0)
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.scts {
- // RFC 8446, Section 4.4.2.1 makes no mention of
- // signed_certificate_timestamp in CertificateRequest, but
- // "Extensions in the Certificate message from the client MUST
- // correspond to extensions in the CertificateRequest message
- // from the server." and it appears in the table in Section 4.2.
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.certificateAuthorities) > 0 {
- b.AddUint16(extensionCertificateAuthorities)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ca := range m.certificateAuthorities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ca)
- })
- }
- })
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
- *m = certificateRequestMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var context, extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
- !s.ReadUint16LengthPrefixed(&extensions) ||
- !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionStatusRequest:
- m.ocspStapling = true
- case extensionSCT:
- m.scts = true
- case extensionSignatureAlgorithms:
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithms = append(
- m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
- }
- case extensionSignatureAlgorithmsCert:
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithmsCert = append(
- m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
- }
- case extensionCertificateAuthorities:
- var auths cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&auths) || auths.Empty() {
- return false
- }
- for !auths.Empty() {
- var ca []byte
- if !readUint16LengthPrefixed(&auths, &ca) || len(ca) == 0 {
- return false
- }
- m.certificateAuthorities = append(m.certificateAuthorities, ca)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type certificateMsg struct {
- raw []byte
- certificates [][]byte
-}
-
-func (m *certificateMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- var i int
- for _, slice := range m.certificates {
- i += len(slice)
- }
-
- length := 3 + 3*len(m.certificates) + i
- x = make([]byte, 4+length)
- x[0] = typeCertificate
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
-
- certificateOctets := length - 3
- x[4] = uint8(certificateOctets >> 16)
- x[5] = uint8(certificateOctets >> 8)
- x[6] = uint8(certificateOctets)
-
- y := x[7:]
- for _, slice := range m.certificates {
- y[0] = uint8(len(slice) >> 16)
- y[1] = uint8(len(slice) >> 8)
- y[2] = uint8(len(slice))
- copy(y[3:], slice)
- y = y[3+len(slice):]
- }
-
- m.raw = x
- return
-}
-
-func (m *certificateMsg) unmarshal(data []byte) bool {
- if len(data) < 7 {
- return false
- }
-
- m.raw = data
- certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6])
- if uint32(len(data)) != certsLen+7 {
- return false
- }
-
- numCerts := 0
- d := data[7:]
- for certsLen > 0 {
- if len(d) < 4 {
- return false
- }
- certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
- if uint32(len(d)) < 3+certLen {
- return false
- }
- d = d[3+certLen:]
- certsLen -= 3 + certLen
- numCerts++
- }
-
- m.certificates = make([][]byte, numCerts)
- d = data[7:]
- for i := 0; i < numCerts; i++ {
- certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
- m.certificates[i] = d[3 : 3+certLen]
- d = d[3+certLen:]
- }
-
- return true
-}
-
-type certificateMsgTLS13 struct {
- raw []byte
- certificate Certificate
- ocspStapling bool
- scts bool
-}
-
-func (m *certificateMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificate)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // certificate_request_context
-
- certificate := m.certificate
- if !m.ocspStapling {
- certificate.OCSPStaple = nil
- }
- if !m.scts {
- certificate.SignedCertificateTimestamps = nil
- }
- marshalCertificate(b, certificate)
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- for i, cert := range certificate.Certificate {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(cert)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if i > 0 {
- // This library only supports OCSP and SCT for leaf certificates.
- return
- }
- if certificate.OCSPStaple != nil {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(statusTypeOCSP)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(certificate.OCSPStaple)
- })
- })
- }
- if certificate.SignedCertificateTimestamps != nil {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range certificate.SignedCertificateTimestamps {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- })
- }
- })
-}
-
-func (m *certificateMsgTLS13) unmarshal(data []byte) bool {
- *m = certificateMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var context cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
- !unmarshalCertificate(&s, &m.certificate) ||
- !s.Empty() {
- return false
- }
-
- m.scts = m.certificate.SignedCertificateTimestamps != nil
- m.ocspStapling = m.certificate.OCSPStaple != nil
-
- return true
-}
-
-func unmarshalCertificate(s *cryptobyte.String, certificate *Certificate) bool {
- var certList cryptobyte.String
- if !s.ReadUint24LengthPrefixed(&certList) {
- return false
- }
- for !certList.Empty() {
- var cert []byte
- var extensions cryptobyte.String
- if !readUint24LengthPrefixed(&certList, &cert) ||
- !certList.ReadUint16LengthPrefixed(&extensions) {
- return false
- }
- certificate.Certificate = append(certificate.Certificate, cert)
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
- if len(certificate.Certificate) > 1 {
- // This library only supports OCSP and SCT for leaf certificates.
- continue
- }
-
- switch extension {
- case extensionStatusRequest:
- var statusType uint8
- if !extData.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
- !readUint24LengthPrefixed(&extData, &certificate.OCSPStaple) ||
- len(certificate.OCSPStaple) == 0 {
- return false
- }
- case extensionSCT:
- var sctList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
- return false
- }
- for !sctList.Empty() {
- var sct []byte
- if !readUint16LengthPrefixed(&sctList, &sct) ||
- len(sct) == 0 {
- return false
- }
- certificate.SignedCertificateTimestamps = append(
- certificate.SignedCertificateTimestamps, sct)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
- }
- return true
-}
-
-type serverKeyExchangeMsg struct {
- raw []byte
- key []byte
-}
-
-func (m *serverKeyExchangeMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
- length := len(m.key)
- x := make([]byte, length+4)
- x[0] = typeServerKeyExchange
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- copy(x[4:], m.key)
-
- m.raw = x
- return x
-}
-
-func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
- m.raw = data
- if len(data) < 4 {
- return false
- }
- m.key = data[4:]
- return true
-}
-
-type certificateStatusMsg struct {
- raw []byte
- response []byte
-}
-
-func (m *certificateStatusMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateStatus)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(statusTypeOCSP)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.response)
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateStatusMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- var statusType uint8
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
- !readUint24LengthPrefixed(&s, &m.response) ||
- len(m.response) == 0 || !s.Empty() {
- return false
- }
- return true
-}
-
-type serverHelloDoneMsg struct{}
-
-func (m *serverHelloDoneMsg) marshal() []byte {
- x := make([]byte, 4)
- x[0] = typeServerHelloDone
- return x
-}
-
-func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
-
-type clientKeyExchangeMsg struct {
- raw []byte
- ciphertext []byte
-}
-
-func (m *clientKeyExchangeMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
- length := len(m.ciphertext)
- x := make([]byte, length+4)
- x[0] = typeClientKeyExchange
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- copy(x[4:], m.ciphertext)
-
- m.raw = x
- return x
-}
-
-func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
- m.raw = data
- if len(data) < 4 {
- return false
- }
- l := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
- if l != len(data)-4 {
- return false
- }
- m.ciphertext = data[4:]
- return true
-}
-
-type finishedMsg struct {
- raw []byte
- verifyData []byte
-}
-
-func (m *finishedMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeFinished)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.verifyData)
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *finishedMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
- return s.Skip(1) &&
- readUint24LengthPrefixed(&s, &m.verifyData) &&
- s.Empty()
-}
-
-type certificateRequestMsg struct {
- raw []byte
- // hasSignatureAlgorithm indicates whether this message includes a list of
- // supported signature algorithms. This change was introduced with TLS 1.2.
- hasSignatureAlgorithm bool
-
- certificateTypes []byte
- supportedSignatureAlgorithms []SignatureScheme
- certificateAuthorities [][]byte
-}
-
-func (m *certificateRequestMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- // See RFC 4346, Section 7.4.4.
- length := 1 + len(m.certificateTypes) + 2
- casLength := 0
- for _, ca := range m.certificateAuthorities {
- casLength += 2 + len(ca)
- }
- length += casLength
-
- if m.hasSignatureAlgorithm {
- length += 2 + 2*len(m.supportedSignatureAlgorithms)
- }
-
- x = make([]byte, 4+length)
- x[0] = typeCertificateRequest
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
-
- x[4] = uint8(len(m.certificateTypes))
-
- copy(x[5:], m.certificateTypes)
- y := x[5+len(m.certificateTypes):]
-
- if m.hasSignatureAlgorithm {
- n := len(m.supportedSignatureAlgorithms) * 2
- y[0] = uint8(n >> 8)
- y[1] = uint8(n)
- y = y[2:]
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- y[0] = uint8(sigAlgo >> 8)
- y[1] = uint8(sigAlgo)
- y = y[2:]
- }
- }
-
- y[0] = uint8(casLength >> 8)
- y[1] = uint8(casLength)
- y = y[2:]
- for _, ca := range m.certificateAuthorities {
- y[0] = uint8(len(ca) >> 8)
- y[1] = uint8(len(ca))
- y = y[2:]
- copy(y, ca)
- y = y[len(ca):]
- }
-
- m.raw = x
- return
-}
-
-func (m *certificateRequestMsg) unmarshal(data []byte) bool {
- m.raw = data
-
- if len(data) < 5 {
- return false
- }
-
- length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
- if uint32(len(data))-4 != length {
- return false
- }
-
- numCertTypes := int(data[4])
- data = data[5:]
- if numCertTypes == 0 || len(data) <= numCertTypes {
- return false
- }
-
- m.certificateTypes = make([]byte, numCertTypes)
- if copy(m.certificateTypes, data) != numCertTypes {
- return false
- }
-
- data = data[numCertTypes:]
-
- if m.hasSignatureAlgorithm {
- if len(data) < 2 {
- return false
- }
- sigAndHashLen := uint16(data[0])<<8 | uint16(data[1])
- data = data[2:]
- if sigAndHashLen&1 != 0 {
- return false
- }
- if len(data) < int(sigAndHashLen) {
- return false
- }
- numSigAlgos := sigAndHashLen / 2
- m.supportedSignatureAlgorithms = make([]SignatureScheme, numSigAlgos)
- for i := range m.supportedSignatureAlgorithms {
- m.supportedSignatureAlgorithms[i] = SignatureScheme(data[0])<<8 | SignatureScheme(data[1])
- data = data[2:]
- }
- }
-
- if len(data) < 2 {
- return false
- }
- casLength := uint16(data[0])<<8 | uint16(data[1])
- data = data[2:]
- if len(data) < int(casLength) {
- return false
- }
- cas := make([]byte, casLength)
- copy(cas, data)
- data = data[casLength:]
-
- m.certificateAuthorities = nil
- for len(cas) > 0 {
- if len(cas) < 2 {
- return false
- }
- caLen := uint16(cas[0])<<8 | uint16(cas[1])
- cas = cas[2:]
-
- if len(cas) < int(caLen) {
- return false
- }
-
- m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen])
- cas = cas[caLen:]
- }
-
- return len(data) == 0
-}
-
-type certificateVerifyMsg struct {
- raw []byte
- hasSignatureAlgorithm bool // format change introduced in TLS 1.2
- signatureAlgorithm SignatureScheme
- signature []byte
-}
-
-func (m *certificateVerifyMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateVerify)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.hasSignatureAlgorithm {
- b.AddUint16(uint16(m.signatureAlgorithm))
- }
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.signature)
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- if !s.Skip(4) { // message type and uint24 length field
- return false
- }
- if m.hasSignatureAlgorithm {
- if !s.ReadUint16((*uint16)(&m.signatureAlgorithm)) {
- return false
- }
- }
- return readUint16LengthPrefixed(&s, &m.signature) && s.Empty()
-}
-
-type newSessionTicketMsg struct {
- raw []byte
- ticket []byte
-}
-
-func (m *newSessionTicketMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- // See RFC 5077, Section 3.3.
- ticketLen := len(m.ticket)
- length := 2 + 4 + ticketLen
- x = make([]byte, 4+length)
- x[0] = typeNewSessionTicket
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- x[8] = uint8(ticketLen >> 8)
- x[9] = uint8(ticketLen)
- copy(x[10:], m.ticket)
-
- m.raw = x
-
- return
-}
-
-func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
- m.raw = data
-
- if len(data) < 10 {
- return false
- }
-
- length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
- if uint32(len(data))-4 != length {
- return false
- }
-
- ticketLen := int(data[8])<<8 + int(data[9])
- if len(data)-10 != ticketLen {
- return false
- }
-
- m.ticket = data[10:]
-
- return true
-}
-
-type helloRequestMsg struct {
-}
-
-func (*helloRequestMsg) marshal() []byte {
- return []byte{typeHelloRequest, 0, 0, 0}
-}
-
-func (*helloRequestMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/handshake_server.go b/contrib/go/_std_1.18/src/crypto/tls/handshake_server.go
deleted file mode 100644
index 097046340b..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/handshake_server.go
+++ /dev/null
@@ -1,875 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "sync/atomic"
- "time"
-)
-
-// serverHandshakeState contains details of a server handshake in progress.
-// It's discarded once the handshake has completed.
-type serverHandshakeState struct {
- c *Conn
- ctx context.Context
- clientHello *clientHelloMsg
- hello *serverHelloMsg
- suite *cipherSuite
- ecdheOk bool
- ecSignOk bool
- rsaDecryptOk bool
- rsaSignOk bool
- sessionState *sessionState
- finishedHash finishedHash
- masterSecret []byte
- cert *Certificate
-}
-
-// serverHandshake performs a TLS handshake as a server.
-func (c *Conn) serverHandshake(ctx context.Context) error {
- clientHello, err := c.readClientHello(ctx)
- if err != nil {
- return err
- }
-
- if c.vers == VersionTLS13 {
- hs := serverHandshakeStateTLS13{
- c: c,
- ctx: ctx,
- clientHello: clientHello,
- }
- return hs.handshake()
- }
-
- hs := serverHandshakeState{
- c: c,
- ctx: ctx,
- clientHello: clientHello,
- }
- return hs.handshake()
-}
-
-func (hs *serverHandshakeState) handshake() error {
- c := hs.c
-
- if err := hs.processClientHello(); err != nil {
- return err
- }
-
- // For an overview of TLS handshaking, see RFC 5246, Section 7.3.
- c.buffering = true
- if hs.checkForResumption() {
- // The client has included a session ticket and so we do an abbreviated handshake.
- c.didResume = true
- if err := hs.doResumeHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.sendSessionTicket(); err != nil {
- return err
- }
- if err := hs.sendFinished(c.serverFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- c.clientFinishedIsFirst = false
- if err := hs.readFinished(nil); err != nil {
- return err
- }
- } else {
- // The client didn't include a session ticket, or it wasn't
- // valid so we do a full handshake.
- if err := hs.pickCipherSuite(); err != nil {
- return err
- }
- if err := hs.doFullHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.readFinished(c.clientFinished[:]); err != nil {
- return err
- }
- c.clientFinishedIsFirst = true
- c.buffering = true
- if err := hs.sendSessionTicket(); err != nil {
- return err
- }
- if err := hs.sendFinished(nil); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- }
-
- c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-// readClientHello reads a ClientHello message and selects the protocol version.
-func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) {
- msg, err := c.readHandshake()
- if err != nil {
- return nil, err
- }
- clientHello, ok := msg.(*clientHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return nil, unexpectedMessageError(clientHello, msg)
- }
-
- var configForClient *Config
- originalConfig := c.config
- if c.config.GetConfigForClient != nil {
- chi := clientHelloInfo(ctx, c, clientHello)
- if configForClient, err = c.config.GetConfigForClient(chi); err != nil {
- c.sendAlert(alertInternalError)
- return nil, err
- } else if configForClient != nil {
- c.config = configForClient
- }
- }
- c.ticketKeys = originalConfig.ticketKeys(configForClient)
-
- clientVersions := clientHello.supportedVersions
- if len(clientHello.supportedVersions) == 0 {
- clientVersions = supportedVersionsFromMax(clientHello.vers)
- }
- c.vers, ok = c.config.mutualVersion(roleServer, clientVersions)
- if !ok {
- c.sendAlert(alertProtocolVersion)
- return nil, fmt.Errorf("tls: client offered only unsupported versions: %x", clientVersions)
- }
- c.haveVers = true
- c.in.version = c.vers
- c.out.version = c.vers
-
- return clientHello, nil
-}
-
-func (hs *serverHandshakeState) processClientHello() error {
- c := hs.c
-
- hs.hello = new(serverHelloMsg)
- hs.hello.vers = c.vers
-
- foundCompression := false
- // We only support null compression, so check that the client offered it.
- for _, compression := range hs.clientHello.compressionMethods {
- if compression == compressionNone {
- foundCompression = true
- break
- }
- }
-
- if !foundCompression {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: client does not support uncompressed connections")
- }
-
- hs.hello.random = make([]byte, 32)
- serverRandom := hs.hello.random
- // Downgrade protection canaries. See RFC 8446, Section 4.1.3.
- maxVers := c.config.maxSupportedVersion(roleServer)
- if maxVers >= VersionTLS12 && c.vers < maxVers || testingOnlyForceDowngradeCanary {
- if c.vers == VersionTLS12 {
- copy(serverRandom[24:], downgradeCanaryTLS12)
- } else {
- copy(serverRandom[24:], downgradeCanaryTLS11)
- }
- serverRandom = serverRandom[:24]
- }
- _, err := io.ReadFull(c.config.rand(), serverRandom)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if len(hs.clientHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
-
- hs.hello.secureRenegotiationSupported = hs.clientHello.secureRenegotiationSupported
- hs.hello.compressionMethod = compressionNone
- if len(hs.clientHello.serverName) > 0 {
- c.serverName = hs.clientHello.serverName
- }
-
- selectedProto, err := negotiateALPN(c.config.NextProtos, hs.clientHello.alpnProtocols)
- if err != nil {
- c.sendAlert(alertNoApplicationProtocol)
- return err
- }
- hs.hello.alpnProtocol = selectedProto
- c.clientProtocol = selectedProto
-
- hs.cert, err = c.config.getCertificate(clientHelloInfo(hs.ctx, c, hs.clientHello))
- if err != nil {
- if err == errNoCertificates {
- c.sendAlert(alertUnrecognizedName)
- } else {
- c.sendAlert(alertInternalError)
- }
- return err
- }
- if hs.clientHello.scts {
- hs.hello.scts = hs.cert.SignedCertificateTimestamps
- }
-
- hs.ecdheOk = supportsECDHE(c.config, hs.clientHello.supportedCurves, hs.clientHello.supportedPoints)
-
- if hs.ecdheOk {
- // Although omitting the ec_point_formats extension is permitted, some
- // old OpenSSL version will refuse to handshake if not present.
- //
- // Per RFC 4492, section 5.1.2, implementations MUST support the
- // uncompressed point format. See golang.org/issue/31943.
- hs.hello.supportedPoints = []uint8{pointFormatUncompressed}
- }
-
- if priv, ok := hs.cert.PrivateKey.(crypto.Signer); ok {
- switch priv.Public().(type) {
- case *ecdsa.PublicKey:
- hs.ecSignOk = true
- case ed25519.PublicKey:
- hs.ecSignOk = true
- case *rsa.PublicKey:
- hs.rsaSignOk = true
- default:
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: unsupported signing key type (%T)", priv.Public())
- }
- }
- if priv, ok := hs.cert.PrivateKey.(crypto.Decrypter); ok {
- switch priv.Public().(type) {
- case *rsa.PublicKey:
- hs.rsaDecryptOk = true
- default:
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: unsupported decryption key type (%T)", priv.Public())
- }
- }
-
- return nil
-}
-
-// negotiateALPN picks a shared ALPN protocol that both sides support in server
-// preference order. If ALPN is not configured or the peer doesn't support it,
-// it returns "" and no error.
-func negotiateALPN(serverProtos, clientProtos []string) (string, error) {
- if len(serverProtos) == 0 || len(clientProtos) == 0 {
- return "", nil
- }
- var http11fallback bool
- for _, s := range serverProtos {
- for _, c := range clientProtos {
- if s == c {
- return s, nil
- }
- if s == "h2" && c == "http/1.1" {
- http11fallback = true
- }
- }
- }
- // As a special case, let http/1.1 clients connect to h2 servers as if they
- // didn't support ALPN. We used not to enforce protocol overlap, so over
- // time a number of HTTP servers were configured with only "h2", but
- // expected to accept connections from "http/1.1" clients. See Issue 46310.
- if http11fallback {
- return "", nil
- }
- return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos)
-}
-
-// supportsECDHE returns whether ECDHE key exchanges can be used with this
-// pre-TLS 1.3 client.
-func supportsECDHE(c *Config, supportedCurves []CurveID, supportedPoints []uint8) bool {
- supportsCurve := false
- for _, curve := range supportedCurves {
- if c.supportsCurve(curve) {
- supportsCurve = true
- break
- }
- }
-
- supportsPointFormat := false
- for _, pointFormat := range supportedPoints {
- if pointFormat == pointFormatUncompressed {
- supportsPointFormat = true
- break
- }
- }
-
- return supportsCurve && supportsPointFormat
-}
-
-func (hs *serverHandshakeState) pickCipherSuite() error {
- c := hs.c
-
- preferenceOrder := cipherSuitesPreferenceOrder
- if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceOrder = cipherSuitesPreferenceOrderNoAES
- }
-
- configCipherSuites := c.config.cipherSuites()
- preferenceList := make([]uint16, 0, len(configCipherSuites))
- for _, suiteID := range preferenceOrder {
- for _, id := range configCipherSuites {
- if id == suiteID {
- preferenceList = append(preferenceList, id)
- break
- }
- }
- }
-
- hs.suite = selectCipherSuite(preferenceList, hs.clientHello.cipherSuites, hs.cipherSuiteOk)
- if hs.suite == nil {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no cipher suite supported by both client and server")
- }
- c.cipherSuite = hs.suite.id
-
- for _, id := range hs.clientHello.cipherSuites {
- if id == TLS_FALLBACK_SCSV {
- // The client is doing a fallback connection. See RFC 7507.
- if hs.clientHello.vers < c.config.maxSupportedVersion(roleServer) {
- c.sendAlert(alertInappropriateFallback)
- return errors.New("tls: client using inappropriate protocol fallback")
- }
- break
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeState) cipherSuiteOk(c *cipherSuite) bool {
- if c.flags&suiteECDHE != 0 {
- if !hs.ecdheOk {
- return false
- }
- if c.flags&suiteECSign != 0 {
- if !hs.ecSignOk {
- return false
- }
- } else if !hs.rsaSignOk {
- return false
- }
- } else if !hs.rsaDecryptOk {
- return false
- }
- if hs.c.vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
-}
-
-// checkForResumption reports whether we should perform resumption on this connection.
-func (hs *serverHandshakeState) checkForResumption() bool {
- c := hs.c
-
- if c.config.SessionTicketsDisabled {
- return false
- }
-
- plaintext, usedOldKey := c.decryptTicket(hs.clientHello.sessionTicket)
- if plaintext == nil {
- return false
- }
- hs.sessionState = &sessionState{usedOldKey: usedOldKey}
- ok := hs.sessionState.unmarshal(plaintext)
- if !ok {
- return false
- }
-
- createdAt := time.Unix(int64(hs.sessionState.createdAt), 0)
- if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
- return false
- }
-
- // Never resume a session for a different TLS version.
- if c.vers != hs.sessionState.vers {
- return false
- }
-
- cipherSuiteOk := false
- // Check that the client is still offering the ciphersuite in the session.
- for _, id := range hs.clientHello.cipherSuites {
- if id == hs.sessionState.cipherSuite {
- cipherSuiteOk = true
- break
- }
- }
- if !cipherSuiteOk {
- return false
- }
-
- // Check that we also support the ciphersuite from the session.
- hs.suite = selectCipherSuite([]uint16{hs.sessionState.cipherSuite},
- c.config.cipherSuites(), hs.cipherSuiteOk)
- if hs.suite == nil {
- return false
- }
-
- sessionHasClientCerts := len(hs.sessionState.certificates) != 0
- needClientCerts := requiresClientCert(c.config.ClientAuth)
- if needClientCerts && !sessionHasClientCerts {
- return false
- }
- if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
- return false
- }
-
- return true
-}
-
-func (hs *serverHandshakeState) doResumeHandshake() error {
- c := hs.c
-
- hs.hello.cipherSuite = hs.suite.id
- c.cipherSuite = hs.suite.id
- // We echo the client's session ID in the ServerHello to let it know
- // that we're doing a resumption.
- hs.hello.sessionId = hs.clientHello.sessionId
- hs.hello.ticketSupported = hs.sessionState.usedOldKey
- hs.finishedHash = newFinishedHash(c.vers, hs.suite)
- hs.finishedHash.discardHandshakeBuffer()
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- if err := c.processCertsFromClient(Certificate{
- Certificate: hs.sessionState.certificates,
- }); err != nil {
- return err
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- hs.masterSecret = hs.sessionState.masterSecret
-
- return nil
-}
-
-func (hs *serverHandshakeState) doFullHandshake() error {
- c := hs.c
-
- if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 {
- hs.hello.ocspStapling = true
- }
-
- hs.hello.ticketSupported = hs.clientHello.ticketSupported && !c.config.SessionTicketsDisabled
- hs.hello.cipherSuite = hs.suite.id
-
- hs.finishedHash = newFinishedHash(hs.c.vers, hs.suite)
- if c.config.ClientAuth == NoClientCert {
- // No need to keep a full record of the handshake if client
- // certificates won't be used.
- hs.finishedHash.discardHandshakeBuffer()
- }
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- certMsg := new(certificateMsg)
- certMsg.certificates = hs.cert.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- if hs.hello.ocspStapling {
- certStatus := new(certificateStatusMsg)
- certStatus.response = hs.cert.OCSPStaple
- hs.finishedHash.Write(certStatus.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
- return err
- }
- }
-
- keyAgreement := hs.suite.ka(c.vers)
- skx, err := keyAgreement.generateServerKeyExchange(c.config, hs.cert, hs.clientHello, hs.hello)
- if err != nil {
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- if skx != nil {
- hs.finishedHash.Write(skx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
- return err
- }
- }
-
- var certReq *certificateRequestMsg
- if c.config.ClientAuth >= RequestClientCert {
- // Request a client certificate
- certReq = new(certificateRequestMsg)
- certReq.certificateTypes = []byte{
- byte(certTypeRSASign),
- byte(certTypeECDSASign),
- }
- if c.vers >= VersionTLS12 {
- certReq.hasSignatureAlgorithm = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- }
-
- // An empty list of certificateAuthorities signals to
- // the client that it may send any certificate in response
- // to our request. When we know the CAs we trust, then
- // we can send them down, so that the client can choose
- // an appropriate certificate to give to us.
- if c.config.ClientCAs != nil {
- certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
- }
- hs.finishedHash.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
- return err
- }
- }
-
- helloDone := new(serverHelloDoneMsg)
- hs.finishedHash.Write(helloDone.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
- return err
- }
-
- if _, err := c.flush(); err != nil {
- return err
- }
-
- var pub crypto.PublicKey // public key for client auth, if any
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- // If we requested a client certificate, then the client must send a
- // certificate message, even if it's empty.
- if c.config.ClientAuth >= RequestClientCert {
- certMsg, ok := msg.(*certificateMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.finishedHash.Write(certMsg.marshal())
-
- if err := c.processCertsFromClient(Certificate{
- Certificate: certMsg.certificates,
- }); err != nil {
- return err
- }
- if len(certMsg.certificates) != 0 {
- pub = c.peerCertificates[0].PublicKey
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- // Get client key exchange
- ckx, ok := msg.(*clientKeyExchangeMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(ckx, msg)
- }
- hs.finishedHash.Write(ckx.marshal())
-
- preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
- if err != nil {
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.clientHello.random, hs.hello.random)
- if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.clientHello.random, hs.masterSecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- // If we received a client cert in response to our certificate request message,
- // the client will send us a certificateVerifyMsg immediately after the
- // clientKeyExchangeMsg. This message is a digest of all preceding
- // handshake-layer messages that is signed using the private key corresponding
- // to the client's certificate. This allows us to verify that the client is in
- // possession of the private key of the certificate.
- if len(c.peerCertificates) > 0 {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- var sigType uint8
- var sigHash crypto.Hash
- if c.vers >= VersionTLS12 {
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, certReq.supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(pub)
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- }
-
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
- if err := verifyHandshakeSignature(sigType, pub, sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the client certificate: " + err.Error())
- }
-
- hs.finishedHash.Write(certVerify.marshal())
- }
-
- hs.finishedHash.discardHandshakeBuffer()
-
- return nil
-}
-
-func (hs *serverHandshakeState) establishKeys() error {
- c := hs.c
-
- clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
- keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
-
- var clientCipher, serverCipher any
- var clientHash, serverHash hash.Hash
-
- if hs.suite.aead == nil {
- clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */)
- clientHash = hs.suite.mac(clientMAC)
- serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */)
- serverHash = hs.suite.mac(serverMAC)
- } else {
- clientCipher = hs.suite.aead(clientKey, clientIV)
- serverCipher = hs.suite.aead(serverKey, serverIV)
- }
-
- c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
- c.out.prepareCipherSpec(c.vers, serverCipher, serverHash)
-
- return nil
-}
-
-func (hs *serverHandshakeState) readFinished(out []byte) error {
- c := hs.c
-
- if err := c.readChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- clientFinished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(clientFinished, msg)
- }
-
- verify := hs.finishedHash.clientSum(hs.masterSecret)
- if len(verify) != len(clientFinished.verifyData) ||
- subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: client's Finished message is incorrect")
- }
-
- hs.finishedHash.Write(clientFinished.marshal())
- copy(out, verify)
- return nil
-}
-
-func (hs *serverHandshakeState) sendSessionTicket() error {
- // ticketSupported is set in a resumption handshake if the
- // ticket from the client was encrypted with an old session
- // ticket key and thus a refreshed ticket should be sent.
- if !hs.hello.ticketSupported {
- return nil
- }
-
- c := hs.c
- m := new(newSessionTicketMsg)
-
- createdAt := uint64(c.config.time().Unix())
- if hs.sessionState != nil {
- // If this is re-wrapping an old key, then keep
- // the original time it was created.
- createdAt = hs.sessionState.createdAt
- }
-
- var certsFromClient [][]byte
- for _, cert := range c.peerCertificates {
- certsFromClient = append(certsFromClient, cert.Raw)
- }
- state := sessionState{
- vers: c.vers,
- cipherSuite: hs.suite.id,
- createdAt: createdAt,
- masterSecret: hs.masterSecret,
- certificates: certsFromClient,
- }
- var err error
- m.ticket, err = c.encryptTicket(state.marshal())
- if err != nil {
- return err
- }
-
- hs.finishedHash.Write(m.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeState) sendFinished(out []byte) error {
- c := hs.c
-
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
- return err
- }
-
- finished := new(finishedMsg)
- finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- copy(out, finished.verifyData)
-
- return nil
-}
-
-// processCertsFromClient takes a chain of client certificates either from a
-// Certificates message or from a sessionState and verifies them. It returns
-// the public key of the leaf certificate.
-func (c *Conn) processCertsFromClient(certificate Certificate) error {
- certificates := certificate.Certificate
- certs := make([]*x509.Certificate, len(certificates))
- var err error
- for i, asn1Data := range certificates {
- if certs[i], err = x509.ParseCertificate(asn1Data); err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to parse client certificate: " + err.Error())
- }
- }
-
- if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: client didn't provide a certificate")
- }
-
- if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 {
- opts := x509.VerifyOptions{
- Roots: c.config.ClientCAs,
- CurrentTime: c.config.time(),
- Intermediates: x509.NewCertPool(),
- KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
- }
-
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
-
- chains, err := certs[0].Verify(opts)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to verify client certificate: " + err.Error())
- }
-
- c.verifiedChains = chains
- }
-
- c.peerCertificates = certs
- c.ocspResponse = certificate.OCSPStaple
- c.scts = certificate.SignedCertificateTimestamps
-
- if len(certs) > 0 {
- switch certs[0].PublicKey.(type) {
- case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
- default:
- c.sendAlert(alertUnsupportedCertificate)
- return fmt.Errorf("tls: client certificate contains an unsupported public key of type %T", certs[0].PublicKey)
- }
- }
-
- if c.config.VerifyPeerCertificate != nil {
- if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- return nil
-}
-
-func clientHelloInfo(ctx context.Context, c *Conn, clientHello *clientHelloMsg) *ClientHelloInfo {
- supportedVersions := clientHello.supportedVersions
- if len(clientHello.supportedVersions) == 0 {
- supportedVersions = supportedVersionsFromMax(clientHello.vers)
- }
-
- return &ClientHelloInfo{
- CipherSuites: clientHello.cipherSuites,
- ServerName: clientHello.serverName,
- SupportedCurves: clientHello.supportedCurves,
- SupportedPoints: clientHello.supportedPoints,
- SignatureSchemes: clientHello.supportedSignatureAlgorithms,
- SupportedProtos: clientHello.alpnProtocols,
- SupportedVersions: supportedVersions,
- Conn: c.conn,
- config: c.config,
- ctx: ctx,
- }
-}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/handshake_server_tls13.go b/contrib/go/_std_1.18/src/crypto/tls/handshake_server_tls13.go
deleted file mode 100644
index 54e612ae04..0000000000
--- a/contrib/go/_std_1.18/src/crypto/tls/handshake_server_tls13.go
+++ /dev/null
@@ -1,872 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tls
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/hmac"
- "crypto/rsa"
- "encoding/binary"
- "errors"
- "hash"
- "io"
- "sync/atomic"
- "time"
-)
-
-// maxClientPSKIdentities is the number of client PSK identities the server will
-// attempt to validate. It will ignore the rest not to let cheap ClientHello
-// messages cause too much work in session ticket decryption attempts.
-const maxClientPSKIdentities = 5
-
-type serverHandshakeStateTLS13 struct {
- c *Conn
- ctx context.Context
- clientHello *clientHelloMsg
- hello *serverHelloMsg
- sentDummyCCS bool
- usingPSK bool
- suite *cipherSuiteTLS13
- cert *Certificate
- sigAlg SignatureScheme
- earlySecret []byte
- sharedKey []byte
- handshakeSecret []byte
- masterSecret []byte
- trafficSecret []byte // client_application_traffic_secret_0
- transcript hash.Hash
- clientFinished []byte
-}
-
-func (hs *serverHandshakeStateTLS13) handshake() error {
- c := hs.c
-
- // For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2.
- if err := hs.processClientHello(); err != nil {
- return err
- }
- if err := hs.checkForResumption(); err != nil {
- return err
- }
- if err := hs.pickCertificate(); err != nil {
- return err
- }
- c.buffering = true
- if err := hs.sendServerParameters(); err != nil {
- return err
- }
- if err := hs.sendServerCertificate(); err != nil {
- return err
- }
- if err := hs.sendServerFinished(); err != nil {
- return err
- }
- // Note that at this point we could start sending application data without
- // waiting for the client's second flight, but the application might not
- // expect the lack of replay protection of the ClientHello parameters.
- if _, err := c.flush(); err != nil {
- return err
- }
- if err := hs.readClientCertificate(); err != nil {
- return err
- }
- if err := hs.readClientFinished(); err != nil {
- return err
- }
-
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) processClientHello() error {
- c := hs.c
-
- hs.hello = new(serverHelloMsg)
-
- // TLS 1.3 froze the ServerHello.legacy_version field, and uses
- // supported_versions instead. See RFC 8446, sections 4.1.3 and 4.2.1.
- hs.hello.vers = VersionTLS12
- hs.hello.supportedVersion = c.vers
-
- if len(hs.clientHello.supportedVersions) == 0 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client used the legacy version field to negotiate TLS 1.3")
- }
-
- // Abort if the client is doing a fallback and landing lower than what we
- // support. See RFC 7507, which however does not specify the interaction
- // with supported_versions. The only difference is that with
- // supported_versions a client has a chance to attempt a [TLS 1.2, TLS 1.4]
- // handshake in case TLS 1.3 is broken but 1.2 is not. Alas, in that case,
- // it will have to drop the TLS_FALLBACK_SCSV protection if it falls back to
- // TLS 1.2, because a TLS 1.3 server would abort here. The situation before
- // supported_versions was not better because there was just no way to do a
- // TLS 1.4 handshake without risking the server selecting TLS 1.3.
- for _, id := range hs.clientHello.cipherSuites {
- if id == TLS_FALLBACK_SCSV {
- // Use c.vers instead of max(supported_versions) because an attacker
- // could defeat this by adding an arbitrary high version otherwise.
- if c.vers < c.config.maxSupportedVersion(roleServer) {
- c.sendAlert(alertInappropriateFallback)
- return errors.New("tls: client using inappropriate protocol fallback")
- }
- break
- }
- }
-
- if len(hs.clientHello.compressionMethods) != 1 ||
- hs.clientHello.compressionMethods[0] != compressionNone {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: TLS 1.3 client supports illegal compression methods")
- }
-
- hs.hello.random = make([]byte, 32)
- if _, err := io.ReadFull(c.config.rand(), hs.hello.random); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if len(hs.clientHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
-
- if hs.clientHello.earlyData {
- // See RFC 8446, Section 4.2.10 for the complicated behavior required
- // here. The scenario is that a different server at our address offered
- // to accept early data in the past, which we can't handle. For now, all
- // 0-RTT enabled session tickets need to expire before a Go server can
- // replace a server or join a pool. That's the same requirement that
- // applies to mixing or replacing with any TLS 1.2 server.
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: client sent unexpected early data")
- }
-
- hs.hello.sessionId = hs.clientHello.sessionId
- hs.hello.compressionMethod = compressionNone
-
- preferenceList := defaultCipherSuitesTLS13
- if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceList = defaultCipherSuitesTLS13NoAES
- }
- for _, suiteID := range preferenceList {
- hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
- if hs.suite != nil {
- break
- }
- }
- if hs.suite == nil {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no cipher suite supported by both client and server")
- }
- c.cipherSuite = hs.suite.id
- hs.hello.cipherSuite = hs.suite.id
- hs.transcript = hs.suite.hash.New()
-
- // Pick the ECDHE group in server preference order, but give priority to
- // groups with a key share, to avoid a HelloRetryRequest round-trip.
- var selectedGroup CurveID
- var clientKeyShare *keyShare
-GroupSelection:
- for _, preferredGroup := range c.config.curvePreferences() {
- for _, ks := range hs.clientHello.keyShares {
- if ks.group == preferredGroup {
- selectedGroup = ks.group
- clientKeyShare = &ks
- break GroupSelection
- }
- }
- if selectedGroup != 0 {
- continue
- }
- for _, group := range hs.clientHello.supportedCurves {
- if group == preferredGroup {
- selectedGroup = group
- break
- }
- }
- }
- if selectedGroup == 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no ECDHE curve supported by both client and server")
- }
- if clientKeyShare == nil {
- if err := hs.doHelloRetryRequest(selectedGroup); err != nil {
- return err
- }
- clientKeyShare = &hs.clientHello.keyShares[0]
- }
-
- if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok {
- c.sendAlert(alertInternalError)
- return errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err := generateECDHEParameters(c.config.rand(), selectedGroup)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()}
- hs.sharedKey = params.SharedKey(clientKeyShare.data)
- if hs.sharedKey == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid client key share")
- }
-
- c.serverName = hs.clientHello.serverName
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) checkForResumption() error {
- c := hs.c
-
- if c.config.SessionTicketsDisabled {
- return nil
- }
-
- modeOK := false
- for _, mode := range hs.clientHello.pskModes {
- if mode == pskModeDHE {
- modeOK = true
- break
- }
- }
- if !modeOK {
- return nil
- }
-
- if len(hs.clientHello.pskIdentities) != len(hs.clientHello.pskBinders) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid or missing PSK binders")
- }
- if len(hs.clientHello.pskIdentities) == 0 {
- return nil
- }
-
- for i, identity := range hs.clientHello.pskIdentities {
- if i >= maxClientPSKIdentities {
- break
- }
-
- plaintext, _ := c.decryptTicket(identity.label)
- if plaintext == nil {
- continue
- }
- sessionState := new(sessionStateTLS13)
- if ok := sessionState.unmarshal(plaintext); !ok {
- continue
- }
-
- createdAt := time.Unix(int64(sessionState.createdAt), 0)
- if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
- continue
- }
-
- // We don't check the obfuscated ticket age because it's affected by
- // clock skew and it's only a freshness signal useful for shrinking the
- // window for replay attacks, which don't affect us as we don't do 0-RTT.
-
- pskSuite := cipherSuiteTLS13ByID(sessionState.cipherSuite)
- if pskSuite == nil || pskSuite.hash != hs.suite.hash {
- continue
- }
-
- // PSK connections don't re-establish client certificates, but carry
- // them over in the session ticket. Ensure the presence of client certs
- // in the ticket is consistent with the configured requirements.
- sessionHasClientCerts := len(sessionState.certificate.Certificate) != 0
- needClientCerts := requiresClientCert(c.config.ClientAuth)
- if needClientCerts && !sessionHasClientCerts {
- continue
- }
- if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
- continue
- }
-
- psk := hs.suite.expandLabel(sessionState.resumptionSecret, "resumption",
- nil, hs.suite.hash.Size())
- hs.earlySecret = hs.suite.extract(psk, nil)
- binderKey := hs.suite.deriveSecret(hs.earlySecret, resumptionBinderLabel, nil)
- // Clone the transcript in case a HelloRetryRequest was recorded.
- transcript := cloneHash(hs.transcript, hs.suite.hash)
- if transcript == nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: internal error: failed to clone hash")
- }
- transcript.Write(hs.clientHello.marshalWithoutBinders())
- pskBinder := hs.suite.finishedHash(binderKey, transcript)
- if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid PSK binder")
- }
-
- c.didResume = true
- if err := c.processCertsFromClient(sessionState.certificate); err != nil {
- return err
- }
-
- hs.hello.selectedIdentityPresent = true
- hs.hello.selectedIdentity = uint16(i)
- hs.usingPSK = true
- return nil
- }
-
- return nil
-}
-
-// cloneHash uses the encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
-// interfaces implemented by standard library hashes to clone the state of in
-// to a new instance of h. It returns nil if the operation fails.
-func cloneHash(in hash.Hash, h crypto.Hash) hash.Hash {
- // Recreate the interface to avoid importing encoding.
- type binaryMarshaler interface {
- MarshalBinary() (data []byte, err error)
- UnmarshalBinary(data []byte) error
- }
- marshaler, ok := in.(binaryMarshaler)
- if !ok {
- return nil
- }
- state, err := marshaler.MarshalBinary()
- if err != nil {
- return nil
- }
- out := h.New()
- unmarshaler, ok := out.(binaryMarshaler)
- if !ok {
- return nil
- }
- if err := unmarshaler.UnmarshalBinary(state); err != nil {
- return nil
- }
- return out
-}
-
-func (hs *serverHandshakeStateTLS13) pickCertificate() error {
- c := hs.c
-
- // Only one of PSK and certificates are used at a time.
- if hs.usingPSK {
- return nil
- }
-
- // signature_algorithms is required in TLS 1.3. See RFC 8446, Section 4.2.3.
- if len(hs.clientHello.supportedSignatureAlgorithms) == 0 {
- return c.sendAlert(alertMissingExtension)
- }
-
- certificate, err := c.config.getCertificate(clientHelloInfo(hs.ctx, c, hs.clientHello))
- if err != nil {
- if err == errNoCertificates {
- c.sendAlert(alertUnrecognizedName)
- } else {
- c.sendAlert(alertInternalError)
- }
- return err
- }
- hs.sigAlg, err = selectSignatureScheme(c.vers, certificate, hs.clientHello.supportedSignatureAlgorithms)
- if err != nil {
- // getCertificate returned a certificate that is unsupported or
- // incompatible with the client's signature algorithms.
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- hs.cert = certificate
-
- return nil
-}
-
-// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
-// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
-func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
- if hs.sentDummyCCS {
- return nil
- }
- hs.sentDummyCCS = true
-
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
-}
-
-func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
- c := hs.c
-
- // The first ClientHello gets double-hashed into the transcript upon a
- // HelloRetryRequest. See RFC 8446, Section 4.4.1.
- hs.transcript.Write(hs.clientHello.marshal())
- chHash := hs.transcript.Sum(nil)
- hs.transcript.Reset()
- hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- hs.transcript.Write(chHash)
-
- helloRetryRequest := &serverHelloMsg{
- vers: hs.hello.vers,
- random: helloRetryRequestRandom,
- sessionId: hs.hello.sessionId,
- cipherSuite: hs.hello.cipherSuite,
- compressionMethod: hs.hello.compressionMethod,
- supportedVersion: hs.hello.supportedVersion,
- selectedGroup: selectedGroup,
- }
-
- hs.transcript.Write(helloRetryRequest.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
- return err
- }
-
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- clientHello, ok := msg.(*clientHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(clientHello, msg)
- }
-
- if len(clientHello.keyShares) != 1 || clientHello.keyShares[0].group != selectedGroup {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client sent invalid key share in second ClientHello")
- }
-
- if clientHello.earlyData {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client indicated early data in second ClientHello")
- }
-
- if illegalClientHelloChange(clientHello, hs.clientHello) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client illegally modified second ClientHello")
- }
-
- hs.clientHello = clientHello
- return nil
-}
-
-// illegalClientHelloChange reports whether the two ClientHello messages are
-// different, with the exception of the changes allowed before and after a
-// HelloRetryRequest. See RFC 8446, Section 4.1.2.
-func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool {
- if len(ch.supportedVersions) != len(ch1.supportedVersions) ||
- len(ch.cipherSuites) != len(ch1.cipherSuites) ||
- len(ch.supportedCurves) != len(ch1.supportedCurves) ||
- len(ch.supportedSignatureAlgorithms) != len(ch1.supportedSignatureAlgorithms) ||
- len(ch.supportedSignatureAlgorithmsCert) != len(ch1.supportedSignatureAlgorithmsCert) ||
- len(ch.alpnProtocols) != len(ch1.alpnProtocols) {
- return true
- }
- for i := range ch.supportedVersions {
- if ch.supportedVersions[i] != ch1.supportedVersions[i] {
- return true
- }
- }
- for i := range ch.cipherSuites {
- if ch.cipherSuites[i] != ch1.cipherSuites[i] {
- return true
- }
- }
- for i := range ch.supportedCurves {
- if ch.supportedCurves[i] != ch1.supportedCurves[i] {
- return true
- }
- }
- for i := range ch.supportedSignatureAlgorithms {
- if ch.supportedSignatureAlgorithms[i] != ch1.supportedSignatureAlgorithms[i] {
- return true
- }
- }
- for i := range ch.supportedSignatureAlgorithmsCert {
- if ch.supportedSignatureAlgorithmsCert[i] != ch1.supportedSignatureAlgorithmsCert[i] {
- return true
- }
- }
- for i := range ch.alpnProtocols {
- if ch.alpnProtocols[i] != ch1.alpnProtocols[i] {
- return true
- }
- }
- return ch.vers != ch1.vers ||
- !bytes.Equal(ch.random, ch1.random) ||
- !bytes.Equal(ch.sessionId, ch1.sessionId) ||
- !bytes.Equal(ch.compressionMethods, ch1.compressionMethods) ||
- ch.serverName != ch1.serverName ||
- ch.ocspStapling != ch1.ocspStapling ||
- !bytes.Equal(ch.supportedPoints, ch1.supportedPoints) ||
- ch.ticketSupported != ch1.ticketSupported ||
- !bytes.Equal(ch.sessionTicket, ch1.sessionTicket) ||
- ch.secureRenegotiationSupported != ch1.secureRenegotiationSupported ||
- !bytes.Equal(ch.secureRenegotiation, ch1.secureRenegotiation) ||
- ch.scts != ch1.scts ||
- !bytes.Equal(ch.cookie, ch1.cookie) ||
- !bytes.Equal(ch.pskModes, ch1.pskModes)
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
- c := hs.c
-
- hs.transcript.Write(hs.clientHello.marshal())
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
-
- earlySecret := hs.earlySecret
- if earlySecret == nil {
- earlySecret = hs.suite.extract(nil, nil)
- }
- hs.handshakeSecret = hs.suite.extract(hs.sharedKey,
- hs.suite.deriveSecret(earlySecret, "derived", nil))
-
- clientSecret := hs.suite.deriveSecret(hs.handshakeSecret,
- clientHandshakeTrafficLabel, hs.transcript)
- c.in.setTrafficSecret(hs.suite, clientSecret)
- serverSecret := hs.suite.deriveSecret(hs.handshakeSecret,
- serverHandshakeTrafficLabel, hs.transcript)
- c.out.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.clientHello.random, clientSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.clientHello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- encryptedExtensions := new(encryptedExtensionsMsg)
-
- selectedProto, err := negotiateALPN(c.config.NextProtos, hs.clientHello.alpnProtocols)
- if err != nil {
- c.sendAlert(alertNoApplicationProtocol)
- return err
- }
- encryptedExtensions.alpnProtocol = selectedProto
- c.clientProtocol = selectedProto
-
- hs.transcript.Write(encryptedExtensions.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, encryptedExtensions.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) requestClientCert() bool {
- return hs.c.config.ClientAuth >= RequestClientCert && !hs.usingPSK
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
- c := hs.c
-
- // Only one of PSK and certificates are used at a time.
- if hs.usingPSK {
- return nil
- }
-
- if hs.requestClientCert() {
- // Request a client certificate
- certReq := new(certificateRequestMsgTLS13)
- certReq.ocspStapling = true
- certReq.scts = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- if c.config.ClientCAs != nil {
- certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
- }
-
- hs.transcript.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
- return err
- }
- }
-
- certMsg := new(certificateMsgTLS13)
-
- certMsg.certificate = *hs.cert
- certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
- certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
-
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- certVerifyMsg := new(certificateVerifyMsg)
- certVerifyMsg.hasSignatureAlgorithm = true
- certVerifyMsg.signatureAlgorithm = hs.sigAlg
-
- sigType, sigHash, err := typeAndHashFromSignatureScheme(hs.sigAlg)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
-
- signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := hs.cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- public := hs.cert.PrivateKey.(crypto.Signer).Public()
- if rsaKey, ok := public.(*rsa.PublicKey); ok && sigType == signatureRSAPSS &&
- rsaKey.N.BitLen()/8 < sigHash.Size()*2+2 { // key too small for RSA-PSS
- c.sendAlert(alertHandshakeFailure)
- } else {
- c.sendAlert(alertInternalError)
- }
- return errors.New("tls: failed to sign handshake: " + err.Error())
- }
- certVerifyMsg.signature = sig
-
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
- c := hs.c
-
- finished := &finishedMsg{
- verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
- }
-
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- // Derive secrets that take context through the server Finished.
-
- hs.masterSecret = hs.suite.extract(nil,
- hs.suite.deriveSecret(hs.handshakeSecret, "derived", nil))
-
- hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
- clientApplicationTrafficLabel, hs.transcript)
- serverSecret := hs.suite.deriveSecret(hs.masterSecret,
- serverApplicationTrafficLabel, hs.transcript)
- c.out.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientTraffic, hs.clientHello.random, hs.trafficSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.clientHello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
-
- // If we did not request client certificates, at this point we can
- // precompute the client finished and roll the transcript forward to send
- // session tickets in our first flight.
- if !hs.requestClientCert() {
- if err := hs.sendSessionTickets(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) shouldSendSessionTickets() bool {
- if hs.c.config.SessionTicketsDisabled {
- return false
- }
-
- // Don't send tickets the client wouldn't use. See RFC 8446, Section 4.2.9.
- for _, pskMode := range hs.clientHello.pskModes {
- if pskMode == pskModeDHE {
- return true
- }
- }
- return false
-}
-
-func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
- c := hs.c
-
- hs.clientFinished = hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
- finishedMsg := &finishedMsg{
- verifyData: hs.clientFinished,
- }
- hs.transcript.Write(finishedMsg.marshal())
-
- if !hs.shouldSendSessionTickets() {
- return nil
- }
-
- resumptionSecret := hs.suite.deriveSecret(hs.masterSecret,
- resumptionLabel, hs.transcript)
-
- m := new(newSessionTicketMsgTLS13)
-
- var certsFromClient [][]byte
- for _, cert := range c.peerCertificates {
- certsFromClient = append(certsFromClient, cert.Raw)
- }
- state := sessionStateTLS13{
- cipherSuite: hs.suite.id,
- createdAt: uint64(c.config.time().Unix()),
- resumptionSecret: resumptionSecret,
- certificate: Certificate{
- Certificate: certsFromClient,
- OCSPStaple: c.ocspResponse,
- SignedCertificateTimestamps: c.scts,
- },
- }
- var err error
- m.label, err = c.encryptTicket(state.marshal())
- if err != nil {
- return err
- }
- m.lifetime = uint32(maxSessionTicketLifetime / time.Second)
-
- // ticket_age_add is a random 32-bit value. See RFC 8446, section 4.6.1
- // The value is not stored anywhere; we never need to check the ticket age
- // because 0-RTT is not supported.
- ageAdd := make([]byte, 4)
- _, err = hs.c.config.rand().Read(ageAdd)
- if err != nil {
- return err
- }
- m.ageAdd = binary.LittleEndian.Uint32(ageAdd)
-
- // ticket_nonce, which must be unique per connection, is always left at
- // zero because we only ever send one ticket per connection.
-
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
- c := hs.c
-
- if !hs.requestClientCert() {
- // Make sure the connection is still being verified whether or not
- // the server requested a client certificate.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- return nil
- }
-
- // If we requested a client certificate, then the client must send a
- // certificate message. If it's empty, no CertificateVerify is sent.
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- certMsg, ok := msg.(*certificateMsgTLS13)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.transcript.Write(certMsg.marshal())
-
- if err := c.processCertsFromClient(certMsg.certificate); err != nil {
- return err
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- if len(certMsg.certificate.Certificate) != 0 {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- // See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
- if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
- sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the client certificate: " + err.Error())
- }
-
- hs.transcript.Write(certVerify.marshal())
- }
-
- // If we waited until the client certificates to send session tickets, we
- // are ready to do it now.
- if err := hs.sendSessionTickets(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) readClientFinished() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- finished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(finished, msg)
- }
-
- if !hmac.Equal(hs.clientFinished, finished.verifyData) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid client finished hash")
- }
-
- c.in.setTrafficSecret(hs.suite, hs.trafficSecret)
-
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/cert_pool.go b/contrib/go/_std_1.18/src/crypto/x509/cert_pool.go
deleted file mode 100644
index 873ffeee1d..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/cert_pool.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x509
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/pem"
- "sync"
-)
-
-type sum224 [sha256.Size224]byte
-
-// CertPool is a set of certificates.
-type CertPool struct {
- byName map[string][]int // cert.RawSubject => index into lazyCerts
-
- // lazyCerts contains funcs that return a certificate,
- // lazily parsing/decompressing it as needed.
- lazyCerts []lazyCert
-
- // haveSum maps from sum224(cert.Raw) to true. It's used only
- // for AddCert duplicate detection, to avoid CertPool.contains
- // calls in the AddCert path (because the contains method can
- // call getCert and otherwise negate savings from lazy getCert
- // funcs).
- haveSum map[sum224]bool
-
- // systemPool indicates whether this is a special pool derived from the
- // system roots. If it includes additional roots, it requires doing two
- // verifications, one using the roots provided by the caller, and one using
- // the system platform verifier.
- systemPool bool
-}
-
-// lazyCert is minimal metadata about a Cert and a func to retrieve it
-// in its normal expanded *Certificate form.
-type lazyCert struct {
- // rawSubject is the Certificate.RawSubject value.
- // It's the same as the CertPool.byName key, but in []byte
- // form to make CertPool.Subjects (as used by crypto/tls) do
- // fewer allocations.
- rawSubject []byte
-
- // getCert returns the certificate.
- //
- // It is not meant to do network operations or anything else
- // where a failure is likely; the func is meant to lazily
- // parse/decompress data that is already known to be good. The
- // error in the signature primarily is meant for use in the
- // case where a cert file existed on local disk when the program
- // started up is deleted later before it's read.
- getCert func() (*Certificate, error)
-}
-
-// NewCertPool returns a new, empty CertPool.
-func NewCertPool() *CertPool {
- return &CertPool{
- byName: make(map[string][]int),
- haveSum: make(map[sum224]bool),
- }
-}
-
-// len returns the number of certs in the set.
-// A nil set is a valid empty set.
-func (s *CertPool) len() int {
- if s == nil {
- return 0
- }
- return len(s.lazyCerts)
-}
-
-// cert returns cert index n in s.
-func (s *CertPool) cert(n int) (*Certificate, error) {
- return s.lazyCerts[n].getCert()
-}
-
-func (s *CertPool) copy() *CertPool {
- p := &CertPool{
- byName: make(map[string][]int, len(s.byName)),
- lazyCerts: make([]lazyCert, len(s.lazyCerts)),
- haveSum: make(map[sum224]bool, len(s.haveSum)),
- systemPool: s.systemPool,
- }
- for k, v := range s.byName {
- indexes := make([]int, len(v))
- copy(indexes, v)
- p.byName[k] = indexes
- }
- for k := range s.haveSum {
- p.haveSum[k] = true
- }
- copy(p.lazyCerts, s.lazyCerts)
- return p
-}
-
-// SystemCertPool returns a copy of the system cert pool.
-//
-// On Unix systems other than macOS the environment variables SSL_CERT_FILE and
-// SSL_CERT_DIR can be used to override the system default locations for the SSL
-// certificate file and SSL certificate files directory, respectively. The
-// latter can be a colon-separated list.
-//
-// Any mutations to the returned pool are not written to disk and do not affect
-// any other pool returned by SystemCertPool.
-//
-// New changes in the system cert pool might not be reflected in subsequent calls.
-func SystemCertPool() (*CertPool, error) {
- if sysRoots := systemRootsPool(); sysRoots != nil {
- return sysRoots.copy(), nil
- }
-
- return loadSystemRoots()
-}
-
-// findPotentialParents returns the indexes of certificates in s which might
-// have signed cert.
-func (s *CertPool) findPotentialParents(cert *Certificate) []*Certificate {
- if s == nil {
- return nil
- }
-
- // consider all candidates where cert.Issuer matches cert.Subject.
- // when picking possible candidates the list is built in the order
- // of match plausibility as to save cycles in buildChains:
- // AKID and SKID match
- // AKID present, SKID missing / AKID missing, SKID present
- // AKID and SKID don't match
- var matchingKeyID, oneKeyID, mismatchKeyID []*Certificate
- for _, c := range s.byName[string(cert.RawIssuer)] {
- candidate, err := s.cert(c)
- if err != nil {
- continue
- }
- kidMatch := bytes.Equal(candidate.SubjectKeyId, cert.AuthorityKeyId)
- switch {
- case kidMatch:
- matchingKeyID = append(matchingKeyID, candidate)
- case (len(candidate.SubjectKeyId) == 0 && len(cert.AuthorityKeyId) > 0) ||
- (len(candidate.SubjectKeyId) > 0 && len(cert.AuthorityKeyId) == 0):
- oneKeyID = append(oneKeyID, candidate)
- default:
- mismatchKeyID = append(mismatchKeyID, candidate)
- }
- }
-
- found := len(matchingKeyID) + len(oneKeyID) + len(mismatchKeyID)
- if found == 0 {
- return nil
- }
- candidates := make([]*Certificate, 0, found)
- candidates = append(candidates, matchingKeyID...)
- candidates = append(candidates, oneKeyID...)
- candidates = append(candidates, mismatchKeyID...)
- return candidates
-}
-
-func (s *CertPool) contains(cert *Certificate) bool {
- if s == nil {
- return false
- }
- return s.haveSum[sha256.Sum224(cert.Raw)]
-}
-
-// AddCert adds a certificate to a pool.
-func (s *CertPool) AddCert(cert *Certificate) {
- if cert == nil {
- panic("adding nil Certificate to CertPool")
- }
- s.addCertFunc(sha256.Sum224(cert.Raw), string(cert.RawSubject), func() (*Certificate, error) {
- return cert, nil
- })
-}
-
-// addCertFunc adds metadata about a certificate to a pool, along with
-// a func to fetch that certificate later when needed.
-//
-// The rawSubject is Certificate.RawSubject and must be non-empty.
-// The getCert func may be called 0 or more times.
-func (s *CertPool) addCertFunc(rawSum224 sum224, rawSubject string, getCert func() (*Certificate, error)) {
- if getCert == nil {
- panic("getCert can't be nil")
- }
-
- // Check that the certificate isn't being added twice.
- if s.haveSum[rawSum224] {
- return
- }
-
- s.haveSum[rawSum224] = true
- s.lazyCerts = append(s.lazyCerts, lazyCert{
- rawSubject: []byte(rawSubject),
- getCert: getCert,
- })
- s.byName[rawSubject] = append(s.byName[rawSubject], len(s.lazyCerts)-1)
-}
-
-// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
-// It appends any certificates found to s and reports whether any certificates
-// were successfully parsed.
-//
-// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
-// of root CAs in a format suitable for this function.
-func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
- for len(pemCerts) > 0 {
- var block *pem.Block
- block, pemCerts = pem.Decode(pemCerts)
- if block == nil {
- break
- }
- if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
- continue
- }
-
- certBytes := block.Bytes
- cert, err := ParseCertificate(certBytes)
- if err != nil {
- continue
- }
- var lazyCert struct {
- sync.Once
- v *Certificate
- }
- s.addCertFunc(sha256.Sum224(cert.Raw), string(cert.RawSubject), func() (*Certificate, error) {
- lazyCert.Do(func() {
- // This can't fail, as the same bytes already parsed above.
- lazyCert.v, _ = ParseCertificate(certBytes)
- certBytes = nil
- })
- return lazyCert.v, nil
- })
- ok = true
- }
-
- return ok
-}
-
-// Subjects returns a list of the DER-encoded subjects of
-// all of the certificates in the pool.
-//
-// Deprecated: if s was returned by SystemCertPool, Subjects
-// will not include the system roots.
-func (s *CertPool) Subjects() [][]byte {
- res := make([][]byte, s.len())
- for i, lc := range s.lazyCerts {
- res[i] = lc.rawSubject
- }
- return res
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.go b/contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.go
deleted file mode 100644
index 75c212910b..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin
-
-// Package macOS provides cgo-less wrappers for Core Foundation and
-// Security.framework, similarly to how package syscall provides access to
-// libSystem.dylib.
-package macOS
-
-import (
- "errors"
- "internal/abi"
- "reflect"
- "runtime"
- "time"
- "unsafe"
-)
-
-// Core Foundation linker flags for the external linker. See Issue 42459.
-//go:cgo_ldflag "-framework"
-//go:cgo_ldflag "CoreFoundation"
-
-// CFRef is an opaque reference to a Core Foundation object. It is a pointer,
-// but to memory not owned by Go, so not an unsafe.Pointer.
-type CFRef uintptr
-
-// CFDataToSlice returns a copy of the contents of data as a bytes slice.
-func CFDataToSlice(data CFRef) []byte {
- length := CFDataGetLength(data)
- ptr := CFDataGetBytePtr(data)
- src := (*[1 << 20]byte)(unsafe.Pointer(ptr))[:length:length]
- out := make([]byte, length)
- copy(out, src)
- return out
-}
-
-// CFStringToString returns a Go string representation of the passed
-// in CFString.
-func CFStringToString(ref CFRef) string {
- data := CFStringCreateExternalRepresentation(ref)
- b := CFDataToSlice(data)
- CFRelease(data)
- return string(b)
-}
-
-// TimeToCFDateRef converts a time.Time into an apple CFDateRef
-func TimeToCFDateRef(t time.Time) CFRef {
- secs := t.Sub(time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC)).Seconds()
- ref := CFDateCreate(secs)
- return ref
-}
-
-type CFString CFRef
-
-const kCFAllocatorDefault = 0
-const kCFStringEncodingUTF8 = 0x08000100
-
-//go:cgo_import_dynamic x509_CFDataCreate CFDataCreate "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func BytesToCFData(b []byte) CFRef {
- p := unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&b)).Data)
- ret := syscall(abi.FuncPCABI0(x509_CFDataCreate_trampoline), kCFAllocatorDefault, uintptr(p), uintptr(len(b)), 0, 0, 0)
- runtime.KeepAlive(p)
- return CFRef(ret)
-}
-func x509_CFDataCreate_trampoline()
-
-//go:cgo_import_dynamic x509_CFStringCreateWithBytes CFStringCreateWithBytes "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-// StringToCFString returns a copy of the UTF-8 contents of s as a new CFString.
-func StringToCFString(s string) CFString {
- p := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&s)).Data)
- ret := syscall(abi.FuncPCABI0(x509_CFStringCreateWithBytes_trampoline), kCFAllocatorDefault, uintptr(p),
- uintptr(len(s)), uintptr(kCFStringEncodingUTF8), 0 /* isExternalRepresentation */, 0)
- runtime.KeepAlive(p)
- return CFString(ret)
-}
-func x509_CFStringCreateWithBytes_trampoline()
-
-//go:cgo_import_dynamic x509_CFDictionaryGetValueIfPresent CFDictionaryGetValueIfPresent "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFDictionaryGetValueIfPresent(dict CFRef, key CFString) (value CFRef, ok bool) {
- ret := syscall(abi.FuncPCABI0(x509_CFDictionaryGetValueIfPresent_trampoline), uintptr(dict), uintptr(key),
- uintptr(unsafe.Pointer(&value)), 0, 0, 0)
- if ret == 0 {
- return 0, false
- }
- return value, true
-}
-func x509_CFDictionaryGetValueIfPresent_trampoline()
-
-const kCFNumberSInt32Type = 3
-
-//go:cgo_import_dynamic x509_CFNumberGetValue CFNumberGetValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFNumberGetValue(num CFRef) (int32, error) {
- var value int32
- ret := syscall(abi.FuncPCABI0(x509_CFNumberGetValue_trampoline), uintptr(num), uintptr(kCFNumberSInt32Type),
- uintptr(unsafe.Pointer(&value)), 0, 0, 0)
- if ret == 0 {
- return 0, errors.New("CFNumberGetValue call failed")
- }
- return value, nil
-}
-func x509_CFNumberGetValue_trampoline()
-
-//go:cgo_import_dynamic x509_CFDataGetLength CFDataGetLength "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFDataGetLength(data CFRef) int {
- ret := syscall(abi.FuncPCABI0(x509_CFDataGetLength_trampoline), uintptr(data), 0, 0, 0, 0, 0)
- return int(ret)
-}
-func x509_CFDataGetLength_trampoline()
-
-//go:cgo_import_dynamic x509_CFDataGetBytePtr CFDataGetBytePtr "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFDataGetBytePtr(data CFRef) uintptr {
- ret := syscall(abi.FuncPCABI0(x509_CFDataGetBytePtr_trampoline), uintptr(data), 0, 0, 0, 0, 0)
- return ret
-}
-func x509_CFDataGetBytePtr_trampoline()
-
-//go:cgo_import_dynamic x509_CFArrayGetCount CFArrayGetCount "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFArrayGetCount(array CFRef) int {
- ret := syscall(abi.FuncPCABI0(x509_CFArrayGetCount_trampoline), uintptr(array), 0, 0, 0, 0, 0)
- return int(ret)
-}
-func x509_CFArrayGetCount_trampoline()
-
-//go:cgo_import_dynamic x509_CFArrayGetValueAtIndex CFArrayGetValueAtIndex "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFArrayGetValueAtIndex(array CFRef, index int) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_CFArrayGetValueAtIndex_trampoline), uintptr(array), uintptr(index), 0, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_CFArrayGetValueAtIndex_trampoline()
-
-//go:cgo_import_dynamic x509_CFEqual CFEqual "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFEqual(a, b CFRef) bool {
- ret := syscall(abi.FuncPCABI0(x509_CFEqual_trampoline), uintptr(a), uintptr(b), 0, 0, 0, 0)
- return ret == 1
-}
-func x509_CFEqual_trampoline()
-
-//go:cgo_import_dynamic x509_CFRelease CFRelease "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFRelease(ref CFRef) {
- syscall(abi.FuncPCABI0(x509_CFRelease_trampoline), uintptr(ref), 0, 0, 0, 0, 0)
-}
-func x509_CFRelease_trampoline()
-
-//go:cgo_import_dynamic x509_CFArrayCreateMutable CFArrayCreateMutable "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFArrayCreateMutable() CFRef {
- ret := syscall(abi.FuncPCABI0(x509_CFArrayCreateMutable_trampoline), kCFAllocatorDefault, 0, 0 /* kCFTypeArrayCallBacks */, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_CFArrayCreateMutable_trampoline()
-
-//go:cgo_import_dynamic x509_CFArrayAppendValue CFArrayAppendValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFArrayAppendValue(array CFRef, val CFRef) {
- syscall(abi.FuncPCABI0(x509_CFArrayAppendValue_trampoline), uintptr(array), uintptr(val), 0, 0, 0, 0)
-}
-func x509_CFArrayAppendValue_trampoline()
-
-//go:cgo_import_dynamic x509_CFDateCreate CFDateCreate "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFDateCreate(seconds float64) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_CFDateCreate_trampoline), kCFAllocatorDefault, 0, 0, 0, 0, seconds)
- return CFRef(ret)
-}
-func x509_CFDateCreate_trampoline()
-
-//go:cgo_import_dynamic x509_CFErrorCopyDescription CFErrorCopyDescription "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFErrorCopyDescription(errRef CFRef) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_CFErrorCopyDescription_trampoline), uintptr(errRef), 0, 0, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_CFErrorCopyDescription_trampoline()
-
-//go:cgo_import_dynamic x509_CFStringCreateExternalRepresentation CFStringCreateExternalRepresentation "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
-
-func CFStringCreateExternalRepresentation(strRef CFRef) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_CFStringCreateExternalRepresentation_trampoline), kCFAllocatorDefault, uintptr(strRef), kCFStringEncodingUTF8, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_CFStringCreateExternalRepresentation_trampoline()
-
-// syscall is implemented in the runtime package (runtime/sys_darwin.go)
-func syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) uintptr
-
-// ReleaseCFArray iterates through an array, releasing its contents, and then
-// releases the array itself. This is necessary because we cannot, easily, set the
-// CFArrayCallBacks argument when creating CFArrays.
-func ReleaseCFArray(array CFRef) {
- for i := 0; i < CFArrayGetCount(array); i++ {
- ref := CFArrayGetValueAtIndex(array, i)
- CFRelease(ref)
- }
- CFRelease(array)
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.go b/contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.go
deleted file mode 100644
index ef64bda49f..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin
-
-package macOS
-
-import (
- "errors"
- "fmt"
- "internal/abi"
- "strconv"
- "unsafe"
-)
-
-// Security.framework linker flags for the external linker. See Issue 42459.
-//go:cgo_ldflag "-framework"
-//go:cgo_ldflag "Security"
-
-// Based on https://opensource.apple.com/source/Security/Security-59306.41.2/base/Security.h
-
-type SecTrustSettingsResult int32
-
-const (
- SecTrustSettingsResultInvalid SecTrustSettingsResult = iota
- SecTrustSettingsResultTrustRoot
- SecTrustSettingsResultTrustAsRoot
- SecTrustSettingsResultDeny
- SecTrustSettingsResultUnspecified
-)
-
-type SecTrustResultType int32
-
-const (
- SecTrustResultInvalid SecTrustResultType = iota
- SecTrustResultProceed
- SecTrustResultConfirm // deprecated
- SecTrustResultDeny
- SecTrustResultUnspecified
- SecTrustResultRecoverableTrustFailure
- SecTrustResultFatalTrustFailure
- SecTrustResultOtherError
-)
-
-type SecTrustSettingsDomain int32
-
-const (
- SecTrustSettingsDomainUser SecTrustSettingsDomain = iota
- SecTrustSettingsDomainAdmin
- SecTrustSettingsDomainSystem
-)
-
-type OSStatus struct {
- call string
- status int32
-}
-
-func (s OSStatus) Error() string {
- return s.call + " error: " + strconv.Itoa(int(s.status))
-}
-
-// Dictionary keys are defined as build-time strings with CFSTR, but the Go
-// linker's internal linking mode can't handle CFSTR relocations. Create our
-// own dynamic strings instead and just never release them.
-//
-// Note that this might be the only thing that can break over time if
-// these values change, as the ABI arguably requires using the strings
-// pointed to by the symbols, not values that happen to be equal to them.
-
-var SecTrustSettingsResultKey = StringToCFString("kSecTrustSettingsResult")
-var SecTrustSettingsPolicy = StringToCFString("kSecTrustSettingsPolicy")
-var SecTrustSettingsPolicyString = StringToCFString("kSecTrustSettingsPolicyString")
-var SecPolicyOid = StringToCFString("SecPolicyOid")
-var SecPolicyAppleSSL = StringToCFString("1.2.840.113635.100.1.3") // defined by POLICYMACRO
-
-var ErrNoTrustSettings = errors.New("no trust settings found")
-
-const errSecNoTrustSettings = -25263
-
-//go:cgo_import_dynamic x509_SecTrustSettingsCopyCertificates SecTrustSettingsCopyCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustSettingsCopyCertificates(domain SecTrustSettingsDomain) (certArray CFRef, err error) {
- ret := syscall(abi.FuncPCABI0(x509_SecTrustSettingsCopyCertificates_trampoline), uintptr(domain),
- uintptr(unsafe.Pointer(&certArray)), 0, 0, 0, 0)
- if int32(ret) == errSecNoTrustSettings {
- return 0, ErrNoTrustSettings
- } else if ret != 0 {
- return 0, OSStatus{"SecTrustSettingsCopyCertificates", int32(ret)}
- }
- return certArray, nil
-}
-func x509_SecTrustSettingsCopyCertificates_trampoline()
-
-const errSecItemNotFound = -25300
-
-//go:cgo_import_dynamic x509_SecTrustSettingsCopyTrustSettings SecTrustSettingsCopyTrustSettings "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustSettingsCopyTrustSettings(cert CFRef, domain SecTrustSettingsDomain) (trustSettings CFRef, err error) {
- ret := syscall(abi.FuncPCABI0(x509_SecTrustSettingsCopyTrustSettings_trampoline), uintptr(cert), uintptr(domain),
- uintptr(unsafe.Pointer(&trustSettings)), 0, 0, 0)
- if int32(ret) == errSecItemNotFound {
- return 0, ErrNoTrustSettings
- } else if ret != 0 {
- return 0, OSStatus{"SecTrustSettingsCopyTrustSettings", int32(ret)}
- }
- return trustSettings, nil
-}
-func x509_SecTrustSettingsCopyTrustSettings_trampoline()
-
-//go:cgo_import_dynamic x509_SecPolicyCopyProperties SecPolicyCopyProperties "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecPolicyCopyProperties(policy CFRef) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_SecPolicyCopyProperties_trampoline), uintptr(policy), 0, 0, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_SecPolicyCopyProperties_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustCreateWithCertificates SecTrustCreateWithCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustCreateWithCertificates(certs CFRef, policies CFRef) (CFRef, error) {
- var trustObj CFRef
- ret := syscall(abi.FuncPCABI0(x509_SecTrustCreateWithCertificates_trampoline), uintptr(certs), uintptr(policies),
- uintptr(unsafe.Pointer(&trustObj)), 0, 0, 0)
- if int32(ret) != 0 {
- return 0, OSStatus{"SecTrustCreateWithCertificates", int32(ret)}
- }
- return trustObj, nil
-}
-func x509_SecTrustCreateWithCertificates_trampoline()
-
-//go:cgo_import_dynamic x509_SecCertificateCreateWithData SecCertificateCreateWithData "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecCertificateCreateWithData(b []byte) CFRef {
- data := BytesToCFData(b)
- ret := syscall(abi.FuncPCABI0(x509_SecCertificateCreateWithData_trampoline), kCFAllocatorDefault, uintptr(data), 0, 0, 0, 0)
- CFRelease(data)
- return CFRef(ret)
-}
-func x509_SecCertificateCreateWithData_trampoline()
-
-//go:cgo_import_dynamic x509_SecPolicyCreateSSL SecPolicyCreateSSL "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecPolicyCreateSSL(name string) CFRef {
- var hostname CFString
- if name != "" {
- hostname = StringToCFString(name)
- defer CFRelease(CFRef(hostname))
- }
- ret := syscall(abi.FuncPCABI0(x509_SecPolicyCreateSSL_trampoline), 1 /* true */, uintptr(hostname), 0, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_SecPolicyCreateSSL_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustSetVerifyDate SecTrustSetVerifyDate "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustSetVerifyDate(trustObj CFRef, dateRef CFRef) error {
- ret := syscall(abi.FuncPCABI0(x509_SecTrustSetVerifyDate_trampoline), uintptr(trustObj), uintptr(dateRef), 0, 0, 0, 0)
- if int32(ret) != 0 {
- return OSStatus{"SecTrustSetVerifyDate", int32(ret)}
- }
- return nil
-}
-func x509_SecTrustSetVerifyDate_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustEvaluate SecTrustEvaluate "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustEvaluate(trustObj CFRef) (CFRef, error) {
- var result CFRef
- ret := syscall(abi.FuncPCABI0(x509_SecTrustEvaluate_trampoline), uintptr(trustObj), uintptr(unsafe.Pointer(&result)), 0, 0, 0, 0)
- if int32(ret) != 0 {
- return 0, OSStatus{"SecTrustEvaluate", int32(ret)}
- }
- return CFRef(result), nil
-}
-func x509_SecTrustEvaluate_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustGetResult SecTrustGetResult "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustGetResult(trustObj CFRef, result CFRef) (CFRef, CFRef, error) {
- var chain, info CFRef
- ret := syscall(abi.FuncPCABI0(x509_SecTrustGetResult_trampoline), uintptr(trustObj), uintptr(unsafe.Pointer(&result)),
- uintptr(unsafe.Pointer(&chain)), uintptr(unsafe.Pointer(&info)), 0, 0)
- if int32(ret) != 0 {
- return 0, 0, OSStatus{"SecTrustGetResult", int32(ret)}
- }
- return chain, info, nil
-}
-func x509_SecTrustGetResult_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustEvaluateWithError SecTrustEvaluateWithError "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustEvaluateWithError(trustObj CFRef) error {
- var errRef CFRef
- ret := syscall(abi.FuncPCABI0(x509_SecTrustEvaluateWithError_trampoline), uintptr(trustObj), uintptr(unsafe.Pointer(&errRef)), 0, 0, 0, 0)
- if int32(ret) != 1 {
- errStr := CFErrorCopyDescription(errRef)
- err := fmt.Errorf("x509: %s", CFStringToString(errStr))
- CFRelease(errRef)
- CFRelease(errStr)
- return err
- }
- return nil
-}
-func x509_SecTrustEvaluateWithError_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustGetCertificateCount SecTrustGetCertificateCount "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustGetCertificateCount(trustObj CFRef) int {
- ret := syscall(abi.FuncPCABI0(x509_SecTrustGetCertificateCount_trampoline), uintptr(trustObj), 0, 0, 0, 0, 0)
- return int(ret)
-}
-func x509_SecTrustGetCertificateCount_trampoline()
-
-//go:cgo_import_dynamic x509_SecTrustGetCertificateAtIndex SecTrustGetCertificateAtIndex "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecTrustGetCertificateAtIndex(trustObj CFRef, i int) CFRef {
- ret := syscall(abi.FuncPCABI0(x509_SecTrustGetCertificateAtIndex_trampoline), uintptr(trustObj), uintptr(i), 0, 0, 0, 0)
- return CFRef(ret)
-}
-func x509_SecTrustGetCertificateAtIndex_trampoline()
-
-//go:cgo_import_dynamic x509_SecCertificateCopyData SecCertificateCopyData "/System/Library/Frameworks/Security.framework/Versions/A/Security"
-
-func SecCertificateCopyData(cert CFRef) ([]byte, error) {
- ret := syscall(abi.FuncPCABI0(x509_SecCertificateCopyData_trampoline), uintptr(cert), 0, 0, 0, 0, 0)
- if ret == 0 {
- return nil, errors.New("x509: invalid certificate object")
- }
- b := CFDataToSlice(CFRef(ret))
- CFRelease(CFRef(ret))
- return b, nil
-}
-func x509_SecCertificateCopyData_trampoline()
diff --git a/contrib/go/_std_1.18/src/crypto/x509/parser.go b/contrib/go/_std_1.18/src/crypto/x509/parser.go
deleted file mode 100644
index 2cb1ad2d7f..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/parser.go
+++ /dev/null
@@ -1,1013 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package x509
-
-import (
- "bytes"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "crypto/x509/pkix"
- "encoding/asn1"
- "errors"
- "fmt"
- "math/big"
- "net"
- "net/url"
- "strconv"
- "strings"
- "time"
- "unicode/utf16"
- "unicode/utf8"
-
- "golang.org/x/crypto/cryptobyte"
- cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
-)
-
-// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
-// This is a simplified version of encoding/asn1.isPrintable.
-func isPrintable(b byte) bool {
- return 'a' <= b && b <= 'z' ||
- 'A' <= b && b <= 'Z' ||
- '0' <= b && b <= '9' ||
- '\'' <= b && b <= ')' ||
- '+' <= b && b <= '/' ||
- b == ' ' ||
- b == ':' ||
- b == '=' ||
- b == '?' ||
- // This is technically not allowed in a PrintableString.
- // However, x509 certificates with wildcard strings don't
- // always use the correct string type so we permit it.
- b == '*' ||
- // This is not technically allowed either. However, not
- // only is it relatively common, but there are also a
- // handful of CA certificates that contain it. At least
- // one of which will not expire until 2027.
- b == '&'
-}
-
-// parseASN1String parses the ASN.1 string types T61String, PrintableString,
-// UTF8String, BMPString, IA5String, and NumericString. This is mostly copied
-// from the respective encoding/asn1.parse... methods, rather than just
-// increasing the API surface of that package.
-func parseASN1String(tag cryptobyte_asn1.Tag, value []byte) (string, error) {
- switch tag {
- case cryptobyte_asn1.T61String:
- return string(value), nil
- case cryptobyte_asn1.PrintableString:
- for _, b := range value {
- if !isPrintable(b) {
- return "", errors.New("invalid PrintableString")
- }
- }
- return string(value), nil
- case cryptobyte_asn1.UTF8String:
- if !utf8.Valid(value) {
- return "", errors.New("invalid UTF-8 string")
- }
- return string(value), nil
- case cryptobyte_asn1.Tag(asn1.TagBMPString):
- if len(value)%2 != 0 {
- return "", errors.New("invalid BMPString")
- }
-
- // Strip terminator if present.
- if l := len(value); l >= 2 && value[l-1] == 0 && value[l-2] == 0 {
- value = value[:l-2]
- }
-
- s := make([]uint16, 0, len(value)/2)
- for len(value) > 0 {
- s = append(s, uint16(value[0])<<8+uint16(value[1]))
- value = value[2:]
- }
-
- return string(utf16.Decode(s)), nil
- case cryptobyte_asn1.IA5String:
- s := string(value)
- if isIA5String(s) != nil {
- return "", errors.New("invalid IA5String")
- }
- return s, nil
- case cryptobyte_asn1.Tag(asn1.TagNumericString):
- for _, b := range value {
- if !('0' <= b && b <= '9' || b == ' ') {
- return "", errors.New("invalid NumericString")
- }
- }
- return string(value), nil
- }
- return "", fmt.Errorf("unsupported string type: %v", tag)
-}
-
-// parseName parses a DER encoded Name as defined in RFC 5280. We may
-// want to export this function in the future for use in crypto/tls.
-func parseName(raw cryptobyte.String) (*pkix.RDNSequence, error) {
- if !raw.ReadASN1(&raw, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: invalid RDNSequence")
- }
-
- var rdnSeq pkix.RDNSequence
- for !raw.Empty() {
- var rdnSet pkix.RelativeDistinguishedNameSET
- var set cryptobyte.String
- if !raw.ReadASN1(&set, cryptobyte_asn1.SET) {
- return nil, errors.New("x509: invalid RDNSequence")
- }
- for !set.Empty() {
- var atav cryptobyte.String
- if !set.ReadASN1(&atav, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: invalid RDNSequence: invalid attribute")
- }
- var attr pkix.AttributeTypeAndValue
- if !atav.ReadASN1ObjectIdentifier(&attr.Type) {
- return nil, errors.New("x509: invalid RDNSequence: invalid attribute type")
- }
- var rawValue cryptobyte.String
- var valueTag cryptobyte_asn1.Tag
- if !atav.ReadAnyASN1(&rawValue, &valueTag) {
- return nil, errors.New("x509: invalid RDNSequence: invalid attribute value")
- }
- var err error
- attr.Value, err = parseASN1String(valueTag, rawValue)
- if err != nil {
- return nil, fmt.Errorf("x509: invalid RDNSequence: invalid attribute value: %s", err)
- }
- rdnSet = append(rdnSet, attr)
- }
-
- rdnSeq = append(rdnSeq, rdnSet)
- }
-
- return &rdnSeq, nil
-}
-
-func parseAI(der cryptobyte.String) (pkix.AlgorithmIdentifier, error) {
- ai := pkix.AlgorithmIdentifier{}
- if !der.ReadASN1ObjectIdentifier(&ai.Algorithm) {
- return ai, errors.New("x509: malformed OID")
- }
- if der.Empty() {
- return ai, nil
- }
- var params cryptobyte.String
- var tag cryptobyte_asn1.Tag
- if !der.ReadAnyASN1Element(&params, &tag) {
- return ai, errors.New("x509: malformed parameters")
- }
- ai.Parameters.Tag = int(tag)
- ai.Parameters.FullBytes = params
- return ai, nil
-}
-
-func parseValidity(der cryptobyte.String) (time.Time, time.Time, error) {
- extract := func() (time.Time, error) {
- var t time.Time
- switch {
- case der.PeekASN1Tag(cryptobyte_asn1.UTCTime):
- // TODO(rolandshoemaker): once #45411 is fixed, the following code
- // should be replaced with a call to der.ReadASN1UTCTime.
- var utc cryptobyte.String
- if !der.ReadASN1(&utc, cryptobyte_asn1.UTCTime) {
- return t, errors.New("x509: malformed UTCTime")
- }
- s := string(utc)
-
- formatStr := "0601021504Z0700"
- var err error
- t, err = time.Parse(formatStr, s)
- if err != nil {
- formatStr = "060102150405Z0700"
- t, err = time.Parse(formatStr, s)
- }
- if err != nil {
- return t, err
- }
-
- if serialized := t.Format(formatStr); serialized != s {
- return t, errors.New("x509: malformed UTCTime")
- }
-
- if t.Year() >= 2050 {
- // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
- t = t.AddDate(-100, 0, 0)
- }
- case der.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime):
- if !der.ReadASN1GeneralizedTime(&t) {
- return t, errors.New("x509: malformed GeneralizedTime")
- }
- default:
- return t, errors.New("x509: unsupported time format")
- }
- return t, nil
- }
-
- notBefore, err := extract()
- if err != nil {
- return time.Time{}, time.Time{}, err
- }
- notAfter, err := extract()
- if err != nil {
- return time.Time{}, time.Time{}, err
- }
-
- return notBefore, notAfter, nil
-}
-
-func parseExtension(der cryptobyte.String) (pkix.Extension, error) {
- var ext pkix.Extension
- if !der.ReadASN1ObjectIdentifier(&ext.Id) {
- return ext, errors.New("x509: malformed extension OID field")
- }
- if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) {
- if !der.ReadASN1Boolean(&ext.Critical) {
- return ext, errors.New("x509: malformed extension critical field")
- }
- }
- var val cryptobyte.String
- if !der.ReadASN1(&val, cryptobyte_asn1.OCTET_STRING) {
- return ext, errors.New("x509: malformed extension value field")
- }
- ext.Value = val
- return ext, nil
-}
-
-func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (any, error) {
- der := cryptobyte.String(keyData.PublicKey.RightAlign())
- switch algo {
- case RSA:
- // RSA public keys must have a NULL in the parameters.
- // See RFC 3279, Section 2.3.1.
- if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
- return nil, errors.New("x509: RSA key missing NULL parameters")
- }
-
- p := &pkcs1PublicKey{N: new(big.Int)}
- if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: invalid RSA public key")
- }
- if !der.ReadASN1Integer(p.N) {
- return nil, errors.New("x509: invalid RSA modulus")
- }
- if !der.ReadASN1Integer(&p.E) {
- return nil, errors.New("x509: invalid RSA public exponent")
- }
-
- if p.N.Sign() <= 0 {
- return nil, errors.New("x509: RSA modulus is not a positive number")
- }
- if p.E <= 0 {
- return nil, errors.New("x509: RSA public exponent is not a positive number")
- }
-
- pub := &rsa.PublicKey{
- E: p.E,
- N: p.N,
- }
- return pub, nil
- case ECDSA:
- paramsDer := cryptobyte.String(keyData.Algorithm.Parameters.FullBytes)
- namedCurveOID := new(asn1.ObjectIdentifier)
- if !paramsDer.ReadASN1ObjectIdentifier(namedCurveOID) {
- return nil, errors.New("x509: invalid ECDSA parameters")
- }
- namedCurve := namedCurveFromOID(*namedCurveOID)
- if namedCurve == nil {
- return nil, errors.New("x509: unsupported elliptic curve")
- }
- x, y := elliptic.Unmarshal(namedCurve, der)
- if x == nil {
- return nil, errors.New("x509: failed to unmarshal elliptic curve point")
- }
- pub := &ecdsa.PublicKey{
- Curve: namedCurve,
- X: x,
- Y: y,
- }
- return pub, nil
- case Ed25519:
- // RFC 8410, Section 3
- // > For all of the OIDs, the parameters MUST be absent.
- if len(keyData.Algorithm.Parameters.FullBytes) != 0 {
- return nil, errors.New("x509: Ed25519 key encoded with illegal parameters")
- }
- if len(der) != ed25519.PublicKeySize {
- return nil, errors.New("x509: wrong Ed25519 public key size")
- }
- return ed25519.PublicKey(der), nil
- case DSA:
- y := new(big.Int)
- if !der.ReadASN1Integer(y) {
- return nil, errors.New("x509: invalid DSA public key")
- }
- pub := &dsa.PublicKey{
- Y: y,
- Parameters: dsa.Parameters{
- P: new(big.Int),
- Q: new(big.Int),
- G: new(big.Int),
- },
- }
- paramsDer := cryptobyte.String(keyData.Algorithm.Parameters.FullBytes)
- if !paramsDer.ReadASN1(&paramsDer, cryptobyte_asn1.SEQUENCE) ||
- !paramsDer.ReadASN1Integer(pub.Parameters.P) ||
- !paramsDer.ReadASN1Integer(pub.Parameters.Q) ||
- !paramsDer.ReadASN1Integer(pub.Parameters.G) {
- return nil, errors.New("x509: invalid DSA parameters")
- }
- if pub.Y.Sign() <= 0 || pub.Parameters.P.Sign() <= 0 ||
- pub.Parameters.Q.Sign() <= 0 || pub.Parameters.G.Sign() <= 0 {
- return nil, errors.New("x509: zero or negative DSA parameter")
- }
- return pub, nil
- default:
- return nil, nil
- }
-}
-
-func parseKeyUsageExtension(der cryptobyte.String) (KeyUsage, error) {
- var usageBits asn1.BitString
- if !der.ReadASN1BitString(&usageBits) {
- return 0, errors.New("x509: invalid key usage")
- }
-
- var usage int
- for i := 0; i < 9; i++ {
- if usageBits.At(i) != 0 {
- usage |= 1 << uint(i)
- }
- }
- return KeyUsage(usage), nil
-}
-
-func parseBasicConstraintsExtension(der cryptobyte.String) (bool, int, error) {
- var isCA bool
- if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
- return false, 0, errors.New("x509: invalid basic constraints a")
- }
- if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) {
- if !der.ReadASN1Boolean(&isCA) {
- return false, 0, errors.New("x509: invalid basic constraints b")
- }
- }
- maxPathLen := -1
- if !der.Empty() && der.PeekASN1Tag(cryptobyte_asn1.INTEGER) {
- if !der.ReadASN1Integer(&maxPathLen) {
- return false, 0, errors.New("x509: invalid basic constraints c")
- }
- }
-
- // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285)
- return isCA, maxPathLen, nil
-}
-
-func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error) error {
- if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
- return errors.New("x509: invalid subject alternative names")
- }
- for !der.Empty() {
- var san cryptobyte.String
- var tag cryptobyte_asn1.Tag
- if !der.ReadAnyASN1(&san, &tag) {
- return errors.New("x509: invalid subject alternative name")
- }
- if err := callback(int(tag^0x80), san); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) {
- err = forEachSAN(der, func(tag int, data []byte) error {
- switch tag {
- case nameTypeEmail:
- email := string(data)
- if err := isIA5String(email); err != nil {
- return errors.New("x509: SAN rfc822Name is malformed")
- }
- emailAddresses = append(emailAddresses, email)
- case nameTypeDNS:
- name := string(data)
- if err := isIA5String(name); err != nil {
- return errors.New("x509: SAN dNSName is malformed")
- }
- dnsNames = append(dnsNames, string(name))
- case nameTypeURI:
- uriStr := string(data)
- if err := isIA5String(uriStr); err != nil {
- return errors.New("x509: SAN uniformResourceIdentifier is malformed")
- }
- uri, err := url.Parse(uriStr)
- if err != nil {
- return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err)
- }
- if len(uri.Host) > 0 {
- if _, ok := domainToReverseLabels(uri.Host); !ok {
- return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr)
- }
- }
- uris = append(uris, uri)
- case nameTypeIP:
- switch len(data) {
- case net.IPv4len, net.IPv6len:
- ipAddresses = append(ipAddresses, data)
- default:
- return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))
- }
- }
-
- return nil
- })
-
- return
-}
-
-func parseExtKeyUsageExtension(der cryptobyte.String) ([]ExtKeyUsage, []asn1.ObjectIdentifier, error) {
- var extKeyUsages []ExtKeyUsage
- var unknownUsages []asn1.ObjectIdentifier
- if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
- return nil, nil, errors.New("x509: invalid extended key usages")
- }
- for !der.Empty() {
- var eku asn1.ObjectIdentifier
- if !der.ReadASN1ObjectIdentifier(&eku) {
- return nil, nil, errors.New("x509: invalid extended key usages")
- }
- if extKeyUsage, ok := extKeyUsageFromOID(eku); ok {
- extKeyUsages = append(extKeyUsages, extKeyUsage)
- } else {
- unknownUsages = append(unknownUsages, eku)
- }
- }
- return extKeyUsages, unknownUsages, nil
-}
-
-func parseCertificatePoliciesExtension(der cryptobyte.String) ([]asn1.ObjectIdentifier, error) {
- var oids []asn1.ObjectIdentifier
- if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: invalid certificate policies")
- }
- for !der.Empty() {
- var cp cryptobyte.String
- if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: invalid certificate policies")
- }
- var oid asn1.ObjectIdentifier
- if !cp.ReadASN1ObjectIdentifier(&oid) {
- return nil, errors.New("x509: invalid certificate policies")
- }
- oids = append(oids, oid)
- }
-
- return oids, nil
-}
-
-// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits.
-func isValidIPMask(mask []byte) bool {
- seenZero := false
-
- for _, b := range mask {
- if seenZero {
- if b != 0 {
- return false
- }
-
- continue
- }
-
- switch b {
- case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe:
- seenZero = true
- case 0xff:
- default:
- return false
- }
- }
-
- return true
-}
-
-func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandled bool, err error) {
- // RFC 5280, 4.2.1.10
-
- // NameConstraints ::= SEQUENCE {
- // permittedSubtrees [0] GeneralSubtrees OPTIONAL,
- // excludedSubtrees [1] GeneralSubtrees OPTIONAL }
- //
- // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree
- //
- // GeneralSubtree ::= SEQUENCE {
- // base GeneralName,
- // minimum [0] BaseDistance DEFAULT 0,
- // maximum [1] BaseDistance OPTIONAL }
- //
- // BaseDistance ::= INTEGER (0..MAX)
-
- outer := cryptobyte.String(e.Value)
- var toplevel, permitted, excluded cryptobyte.String
- var havePermitted, haveExcluded bool
- if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) ||
- !outer.Empty() ||
- !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) ||
- !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) ||
- !toplevel.Empty() {
- return false, errors.New("x509: invalid NameConstraints extension")
- }
-
- if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 {
- // From RFC 5280, Section 4.2.1.10:
- // “either the permittedSubtrees field
- // or the excludedSubtrees MUST be
- // present”
- return false, errors.New("x509: empty name constraints extension")
- }
-
- getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) {
- for !subtrees.Empty() {
- var seq, value cryptobyte.String
- var tag cryptobyte_asn1.Tag
- if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) ||
- !seq.ReadAnyASN1(&value, &tag) {
- return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension")
- }
-
- var (
- dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific()
- emailTag = cryptobyte_asn1.Tag(1).ContextSpecific()
- ipTag = cryptobyte_asn1.Tag(7).ContextSpecific()
- uriTag = cryptobyte_asn1.Tag(6).ContextSpecific()
- )
-
- switch tag {
- case dnsTag:
- domain := string(value)
- if err := isIA5String(domain); err != nil {
- return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
- }
-
- trimmedDomain := domain
- if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
- // constraints can have a leading
- // period to exclude the domain
- // itself, but that's not valid in a
- // normal domain name.
- trimmedDomain = trimmedDomain[1:]
- }
- if _, ok := domainToReverseLabels(trimmedDomain); !ok {
- return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)
- }
- dnsNames = append(dnsNames, domain)
-
- case ipTag:
- l := len(value)
- var ip, mask []byte
-
- switch l {
- case 8:
- ip = value[:4]
- mask = value[4:]
-
- case 32:
- ip = value[:16]
- mask = value[16:]
-
- default:
- return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l)
- }
-
- if !isValidIPMask(mask) {
- return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask)
- }
-
- ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)})
-
- case emailTag:
- constraint := string(value)
- if err := isIA5String(constraint); err != nil {
- return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
- }
-
- // If the constraint contains an @ then
- // it specifies an exact mailbox name.
- if strings.Contains(constraint, "@") {
- if _, ok := parseRFC2821Mailbox(constraint); !ok {
- return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
- }
- } else {
- // Otherwise it's a domain name.
- domain := constraint
- if len(domain) > 0 && domain[0] == '.' {
- domain = domain[1:]
- }
- if _, ok := domainToReverseLabels(domain); !ok {
- return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
- }
- }
- emails = append(emails, constraint)
-
- case uriTag:
- domain := string(value)
- if err := isIA5String(domain); err != nil {
- return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
- }
-
- if net.ParseIP(domain) != nil {
- return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain)
- }
-
- trimmedDomain := domain
- if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
- // constraints can have a leading
- // period to exclude the domain itself,
- // but that's not valid in a normal
- // domain name.
- trimmedDomain = trimmedDomain[1:]
- }
- if _, ok := domainToReverseLabels(trimmedDomain); !ok {
- return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain)
- }
- uriDomains = append(uriDomains, domain)
-
- default:
- unhandled = true
- }
- }
-
- return dnsNames, ips, emails, uriDomains, nil
- }
-
- if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil {
- return false, err
- }
- if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil {
- return false, err
- }
- out.PermittedDNSDomainsCritical = e.Critical
-
- return unhandled, nil
-}
-
-func processExtensions(out *Certificate) error {
- var err error
- for _, e := range out.Extensions {
- unhandled := false
-
- if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 {
- switch e.Id[3] {
- case 15:
- out.KeyUsage, err = parseKeyUsageExtension(e.Value)
- if err != nil {
- return err
- }
- case 19:
- out.IsCA, out.MaxPathLen, err = parseBasicConstraintsExtension(e.Value)
- if err != nil {
- return err
- }
- out.BasicConstraintsValid = true
- out.MaxPathLenZero = out.MaxPathLen == 0
- case 17:
- out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value)
- if err != nil {
- return err
- }
-
- if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 {
- // If we didn't parse anything then we do the critical check, below.
- unhandled = true
- }
-
- case 30:
- unhandled, err = parseNameConstraintsExtension(out, e)
- if err != nil {
- return err
- }
-
- case 31:
- // RFC 5280, 4.2.1.13
-
- // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
- //
- // DistributionPoint ::= SEQUENCE {
- // distributionPoint [0] DistributionPointName OPTIONAL,
- // reasons [1] ReasonFlags OPTIONAL,
- // cRLIssuer [2] GeneralNames OPTIONAL }
- //
- // DistributionPointName ::= CHOICE {
- // fullName [0] GeneralNames,
- // nameRelativeToCRLIssuer [1] RelativeDistinguishedName }
- val := cryptobyte.String(e.Value)
- if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) {
- return errors.New("x509: invalid CRL distribution points")
- }
- for !val.Empty() {
- var dpDER cryptobyte.String
- if !val.ReadASN1(&dpDER, cryptobyte_asn1.SEQUENCE) {
- return errors.New("x509: invalid CRL distribution point")
- }
- var dpNameDER cryptobyte.String
- var dpNamePresent bool
- if !dpDER.ReadOptionalASN1(&dpNameDER, &dpNamePresent, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) {
- return errors.New("x509: invalid CRL distribution point")
- }
- if !dpNamePresent {
- continue
- }
- if !dpNameDER.ReadASN1(&dpNameDER, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) {
- return errors.New("x509: invalid CRL distribution point")
- }
- for !dpNameDER.Empty() {
- if !dpNameDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) {
- break
- }
- var uri cryptobyte.String
- if !dpNameDER.ReadASN1(&uri, cryptobyte_asn1.Tag(6).ContextSpecific()) {
- return errors.New("x509: invalid CRL distribution point")
- }
- out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(uri))
- }
- }
-
- case 35:
- // RFC 5280, 4.2.1.1
- val := cryptobyte.String(e.Value)
- var akid cryptobyte.String
- if !val.ReadASN1(&akid, cryptobyte_asn1.SEQUENCE) {
- return errors.New("x509: invalid authority key identifier")
- }
- if akid.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) {
- if !akid.ReadASN1(&akid, cryptobyte_asn1.Tag(0).ContextSpecific()) {
- return errors.New("x509: invalid authority key identifier")
- }
- out.AuthorityKeyId = akid
- }
- case 37:
- out.ExtKeyUsage, out.UnknownExtKeyUsage, err = parseExtKeyUsageExtension(e.Value)
- if err != nil {
- return err
- }
- case 14:
- // RFC 5280, 4.2.1.2
- val := cryptobyte.String(e.Value)
- var skid cryptobyte.String
- if !val.ReadASN1(&skid, cryptobyte_asn1.OCTET_STRING) {
- return errors.New("x509: invalid subject key identifier")
- }
- out.SubjectKeyId = skid
- case 32:
- out.PolicyIdentifiers, err = parseCertificatePoliciesExtension(e.Value)
- if err != nil {
- return err
- }
- default:
- // Unknown extensions are recorded if critical.
- unhandled = true
- }
- } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) {
- // RFC 5280 4.2.2.1: Authority Information Access
- val := cryptobyte.String(e.Value)
- if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) {
- return errors.New("x509: invalid authority info access")
- }
- for !val.Empty() {
- var aiaDER cryptobyte.String
- if !val.ReadASN1(&aiaDER, cryptobyte_asn1.SEQUENCE) {
- return errors.New("x509: invalid authority info access")
- }
- var method asn1.ObjectIdentifier
- if !aiaDER.ReadASN1ObjectIdentifier(&method) {
- return errors.New("x509: invalid authority info access")
- }
- if !aiaDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) {
- continue
- }
- if !aiaDER.ReadASN1(&aiaDER, cryptobyte_asn1.Tag(6).ContextSpecific()) {
- return errors.New("x509: invalid authority info access")
- }
- switch {
- case method.Equal(oidAuthorityInfoAccessOcsp):
- out.OCSPServer = append(out.OCSPServer, string(aiaDER))
- case method.Equal(oidAuthorityInfoAccessIssuers):
- out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(aiaDER))
- }
- }
- } else {
- // Unknown extensions are recorded if critical.
- unhandled = true
- }
-
- if e.Critical && unhandled {
- out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id)
- }
- }
-
- return nil
-}
-
-func parseCertificate(der []byte) (*Certificate, error) {
- cert := &Certificate{}
-
- input := cryptobyte.String(der)
- // we read the SEQUENCE including length and tag bytes so that
- // we can populate Certificate.Raw, before unwrapping the
- // SEQUENCE so it can be operated on
- if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed certificate")
- }
- cert.Raw = input
- if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed certificate")
- }
-
- var tbs cryptobyte.String
- // do the same trick again as above to extract the raw
- // bytes for Certificate.RawTBSCertificate
- if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed tbs certificate")
- }
- cert.RawTBSCertificate = tbs
- if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed tbs certificate")
- }
-
- if !tbs.ReadOptionalASN1Integer(&cert.Version, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific(), 0) {
- return nil, errors.New("x509: malformed version")
- }
- if cert.Version < 0 {
- return nil, errors.New("x509: malformed version")
- }
- // for backwards compat reasons Version is one-indexed,
- // rather than zero-indexed as defined in 5280
- cert.Version++
- if cert.Version > 3 {
- return nil, errors.New("x509: invalid version")
- }
-
- serial := new(big.Int)
- if !tbs.ReadASN1Integer(serial) {
- return nil, errors.New("x509: malformed serial number")
- }
- // we ignore the presence of negative serial numbers because
- // of their prevalence, despite them being invalid
- // TODO(rolandshoemaker): revist this decision, there are currently
- // only 10 trusted certificates with negative serial numbers
- // according to censys.io.
- cert.SerialNumber = serial
-
- var sigAISeq cryptobyte.String
- if !tbs.ReadASN1(&sigAISeq, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed signature algorithm identifier")
- }
- // Before parsing the inner algorithm identifier, extract
- // the outer algorithm identifier and make sure that they
- // match.
- var outerSigAISeq cryptobyte.String
- if !input.ReadASN1(&outerSigAISeq, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed algorithm identifier")
- }
- if !bytes.Equal(outerSigAISeq, sigAISeq) {
- return nil, errors.New("x509: inner and outer signature algorithm identifiers don't match")
- }
- sigAI, err := parseAI(sigAISeq)
- if err != nil {
- return nil, err
- }
- cert.SignatureAlgorithm = getSignatureAlgorithmFromAI(sigAI)
-
- var issuerSeq cryptobyte.String
- if !tbs.ReadASN1Element(&issuerSeq, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed issuer")
- }
- cert.RawIssuer = issuerSeq
- issuerRDNs, err := parseName(issuerSeq)
- if err != nil {
- return nil, err
- }
- cert.Issuer.FillFromRDNSequence(issuerRDNs)
-
- var validity cryptobyte.String
- if !tbs.ReadASN1(&validity, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed validity")
- }
- cert.NotBefore, cert.NotAfter, err = parseValidity(validity)
- if err != nil {
- return nil, err
- }
-
- var subjectSeq cryptobyte.String
- if !tbs.ReadASN1Element(&subjectSeq, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed issuer")
- }
- cert.RawSubject = subjectSeq
- subjectRDNs, err := parseName(subjectSeq)
- if err != nil {
- return nil, err
- }
- cert.Subject.FillFromRDNSequence(subjectRDNs)
-
- var spki cryptobyte.String
- if !tbs.ReadASN1Element(&spki, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed spki")
- }
- cert.RawSubjectPublicKeyInfo = spki
- if !spki.ReadASN1(&spki, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed spki")
- }
- var pkAISeq cryptobyte.String
- if !spki.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed public key algorithm identifier")
- }
- pkAI, err := parseAI(pkAISeq)
- if err != nil {
- return nil, err
- }
- cert.PublicKeyAlgorithm = getPublicKeyAlgorithmFromOID(pkAI.Algorithm)
- var spk asn1.BitString
- if !spki.ReadASN1BitString(&spk) {
- return nil, errors.New("x509: malformed subjectPublicKey")
- }
- cert.PublicKey, err = parsePublicKey(cert.PublicKeyAlgorithm, &publicKeyInfo{
- Algorithm: pkAI,
- PublicKey: spk,
- })
- if err != nil {
- return nil, err
- }
-
- if cert.Version > 1 {
- if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(1).ContextSpecific()) {
- return nil, errors.New("x509: malformed issuerUniqueID")
- }
- if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(2).ContextSpecific()) {
- return nil, errors.New("x509: malformed subjectUniqueID")
- }
- if cert.Version == 3 {
- var extensions cryptobyte.String
- var present bool
- if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.Tag(3).Constructed().ContextSpecific()) {
- return nil, errors.New("x509: malformed extensions")
- }
- if present {
- if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed extensions")
- }
- for !extensions.Empty() {
- var extension cryptobyte.String
- if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed extension")
- }
- ext, err := parseExtension(extension)
- if err != nil {
- return nil, err
- }
- cert.Extensions = append(cert.Extensions, ext)
- }
- err = processExtensions(cert)
- if err != nil {
- return nil, err
- }
- }
- }
- }
-
- var signature asn1.BitString
- if !input.ReadASN1BitString(&signature) {
- return nil, errors.New("x509: malformed signature")
- }
- cert.Signature = signature.RightAlign()
-
- return cert, nil
-}
-
-// ParseCertificate parses a single certificate from the given ASN.1 DER data.
-func ParseCertificate(der []byte) (*Certificate, error) {
- cert, err := parseCertificate(der)
- if err != nil {
- return nil, err
- }
- if len(der) != len(cert.Raw) {
- return nil, errors.New("x509: trailing data")
- }
- return cert, err
-}
-
-// ParseCertificates parses one or more certificates from the given ASN.1 DER
-// data. The certificates must be concatenated with no intermediate padding.
-func ParseCertificates(der []byte) ([]*Certificate, error) {
- var certs []*Certificate
- for len(der) > 0 {
- cert, err := parseCertificate(der)
- if err != nil {
- return nil, err
- }
- certs = append(certs, cert)
- der = der[len(cert.Raw):]
- }
- return certs, nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/pkix/pkix.go b/contrib/go/_std_1.18/src/crypto/x509/pkix/pkix.go
deleted file mode 100644
index e9179ed067..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/pkix/pkix.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pkix contains shared, low level structures used for ASN.1 parsing
-// and serialization of X.509 certificates, CRL and OCSP.
-package pkix
-
-import (
- "encoding/asn1"
- "encoding/hex"
- "fmt"
- "math/big"
- "time"
-)
-
-// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
-// 5280, section 4.1.1.2.
-type AlgorithmIdentifier struct {
- Algorithm asn1.ObjectIdentifier
- Parameters asn1.RawValue `asn1:"optional"`
-}
-
-type RDNSequence []RelativeDistinguishedNameSET
-
-var attributeTypeNames = map[string]string{
- "2.5.4.6": "C",
- "2.5.4.10": "O",
- "2.5.4.11": "OU",
- "2.5.4.3": "CN",
- "2.5.4.5": "SERIALNUMBER",
- "2.5.4.7": "L",
- "2.5.4.8": "ST",
- "2.5.4.9": "STREET",
- "2.5.4.17": "POSTALCODE",
-}
-
-// String returns a string representation of the sequence r,
-// roughly following the RFC 2253 Distinguished Names syntax.
-func (r RDNSequence) String() string {
- s := ""
- for i := 0; i < len(r); i++ {
- rdn := r[len(r)-1-i]
- if i > 0 {
- s += ","
- }
- for j, tv := range rdn {
- if j > 0 {
- s += "+"
- }
-
- oidString := tv.Type.String()
- typeName, ok := attributeTypeNames[oidString]
- if !ok {
- derBytes, err := asn1.Marshal(tv.Value)
- if err == nil {
- s += oidString + "=#" + hex.EncodeToString(derBytes)
- continue // No value escaping necessary.
- }
-
- typeName = oidString
- }
-
- valueString := fmt.Sprint(tv.Value)
- escaped := make([]rune, 0, len(valueString))
-
- for k, c := range valueString {
- escape := false
-
- switch c {
- case ',', '+', '"', '\\', '<', '>', ';':
- escape = true
-
- case ' ':
- escape = k == 0 || k == len(valueString)-1
-
- case '#':
- escape = k == 0
- }
-
- if escape {
- escaped = append(escaped, '\\', c)
- } else {
- escaped = append(escaped, c)
- }
- }
-
- s += typeName + "=" + string(escaped)
- }
- }
-
- return s
-}
-
-type RelativeDistinguishedNameSET []AttributeTypeAndValue
-
-// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
-// RFC 5280, Section 4.1.2.4.
-type AttributeTypeAndValue struct {
- Type asn1.ObjectIdentifier
- Value any
-}
-
-// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
-// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
-type AttributeTypeAndValueSET struct {
- Type asn1.ObjectIdentifier
- Value [][]AttributeTypeAndValue `asn1:"set"`
-}
-
-// Extension represents the ASN.1 structure of the same name. See RFC
-// 5280, section 4.2.
-type Extension struct {
- Id asn1.ObjectIdentifier
- Critical bool `asn1:"optional"`
- Value []byte
-}
-
-// Name represents an X.509 distinguished name. This only includes the common
-// elements of a DN. Note that Name is only an approximation of the X.509
-// structure. If an accurate representation is needed, asn1.Unmarshal the raw
-// subject or issuer as an RDNSequence.
-type Name struct {
- Country, Organization, OrganizationalUnit []string
- Locality, Province []string
- StreetAddress, PostalCode []string
- SerialNumber, CommonName string
-
- // Names contains all parsed attributes. When parsing distinguished names,
- // this can be used to extract non-standard attributes that are not parsed
- // by this package. When marshaling to RDNSequences, the Names field is
- // ignored, see ExtraNames.
- Names []AttributeTypeAndValue
-
- // ExtraNames contains attributes to be copied, raw, into any marshaled
- // distinguished names. Values override any attributes with the same OID.
- // The ExtraNames field is not populated when parsing, see Names.
- ExtraNames []AttributeTypeAndValue
-}
-
-// FillFromRDNSequence populates n from the provided RDNSequence.
-// Multi-entry RDNs are flattened, all entries are added to the
-// relevant n fields, and the grouping is not preserved.
-func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
- for _, rdn := range *rdns {
- if len(rdn) == 0 {
- continue
- }
-
- for _, atv := range rdn {
- n.Names = append(n.Names, atv)
- value, ok := atv.Value.(string)
- if !ok {
- continue
- }
-
- t := atv.Type
- if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
- switch t[3] {
- case 3:
- n.CommonName = value
- case 5:
- n.SerialNumber = value
- case 6:
- n.Country = append(n.Country, value)
- case 7:
- n.Locality = append(n.Locality, value)
- case 8:
- n.Province = append(n.Province, value)
- case 9:
- n.StreetAddress = append(n.StreetAddress, value)
- case 10:
- n.Organization = append(n.Organization, value)
- case 11:
- n.OrganizationalUnit = append(n.OrganizationalUnit, value)
- case 17:
- n.PostalCode = append(n.PostalCode, value)
- }
- }
- }
- }
-}
-
-var (
- oidCountry = []int{2, 5, 4, 6}
- oidOrganization = []int{2, 5, 4, 10}
- oidOrganizationalUnit = []int{2, 5, 4, 11}
- oidCommonName = []int{2, 5, 4, 3}
- oidSerialNumber = []int{2, 5, 4, 5}
- oidLocality = []int{2, 5, 4, 7}
- oidProvince = []int{2, 5, 4, 8}
- oidStreetAddress = []int{2, 5, 4, 9}
- oidPostalCode = []int{2, 5, 4, 17}
-)
-
-// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
-// and returns the new value. The relativeDistinguishedNameSET contains an
-// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
-// search for AttributeTypeAndValue.
-func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
- if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
- return in
- }
-
- s := make([]AttributeTypeAndValue, len(values))
- for i, value := range values {
- s[i].Type = oid
- s[i].Value = value
- }
-
- return append(in, s)
-}
-
-// ToRDNSequence converts n into a single RDNSequence. The following
-// attributes are encoded as multi-value RDNs:
-//
-// - Country
-// - Organization
-// - OrganizationalUnit
-// - Locality
-// - Province
-// - StreetAddress
-// - PostalCode
-//
-// Each ExtraNames entry is encoded as an individual RDN.
-func (n Name) ToRDNSequence() (ret RDNSequence) {
- ret = n.appendRDNs(ret, n.Country, oidCountry)
- ret = n.appendRDNs(ret, n.Province, oidProvince)
- ret = n.appendRDNs(ret, n.Locality, oidLocality)
- ret = n.appendRDNs(ret, n.StreetAddress, oidStreetAddress)
- ret = n.appendRDNs(ret, n.PostalCode, oidPostalCode)
- ret = n.appendRDNs(ret, n.Organization, oidOrganization)
- ret = n.appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
- if len(n.CommonName) > 0 {
- ret = n.appendRDNs(ret, []string{n.CommonName}, oidCommonName)
- }
- if len(n.SerialNumber) > 0 {
- ret = n.appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
- }
- for _, atv := range n.ExtraNames {
- ret = append(ret, []AttributeTypeAndValue{atv})
- }
-
- return ret
-}
-
-// String returns the string form of n, roughly following
-// the RFC 2253 Distinguished Names syntax.
-func (n Name) String() string {
- var rdns RDNSequence
- // If there are no ExtraNames, surface the parsed value (all entries in
- // Names) instead.
- if n.ExtraNames == nil {
- for _, atv := range n.Names {
- t := atv.Type
- if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
- switch t[3] {
- case 3, 5, 6, 7, 8, 9, 10, 11, 17:
- // These attributes were already parsed into named fields.
- continue
- }
- }
- // Place non-standard parsed values at the beginning of the sequence
- // so they will be at the end of the string. See Issue 39924.
- rdns = append(rdns, []AttributeTypeAndValue{atv})
- }
- }
- rdns = append(rdns, n.ToRDNSequence()...)
- return rdns.String()
-}
-
-// oidInAttributeTypeAndValue reports whether a type with the given OID exists
-// in atv.
-func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
- for _, a := range atv {
- if a.Type.Equal(oid) {
- return true
- }
- }
- return false
-}
-
-// CertificateList represents the ASN.1 structure of the same name. See RFC
-// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
-// signature.
-type CertificateList struct {
- TBSCertList TBSCertificateList
- SignatureAlgorithm AlgorithmIdentifier
- SignatureValue asn1.BitString
-}
-
-// HasExpired reports whether certList should have been updated by now.
-func (certList *CertificateList) HasExpired(now time.Time) bool {
- return !now.Before(certList.TBSCertList.NextUpdate)
-}
-
-// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
-// 5280, section 5.1.
-type TBSCertificateList struct {
- Raw asn1.RawContent
- Version int `asn1:"optional,default:0"`
- Signature AlgorithmIdentifier
- Issuer RDNSequence
- ThisUpdate time.Time
- NextUpdate time.Time `asn1:"optional"`
- RevokedCertificates []RevokedCertificate `asn1:"optional"`
- Extensions []Extension `asn1:"tag:0,optional,explicit"`
-}
-
-// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
-// 5280, section 5.1.
-type RevokedCertificate struct {
- SerialNumber *big.Int
- RevocationTime time.Time
- Extensions []Extension `asn1:"optional"`
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/root.go b/contrib/go/_std_1.18/src/crypto/x509/root.go
deleted file mode 100644
index eef9c047b2..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/root.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x509
-
-// To update the embedded iOS root store, update the -version
-// argument to the latest security_certificates version from
-// https://opensource.apple.com/source/security_certificates/
-// and run "go generate". See https://golang.org/issue/38843.
-//go:generate go run root_ios_gen.go -version 55188.120.1.0.1
-
-import "sync"
-
-var (
- once sync.Once
- systemRoots *CertPool
- systemRootsErr error
-)
-
-func systemRootsPool() *CertPool {
- once.Do(initSystemRoots)
- return systemRoots
-}
-
-func initSystemRoots() {
- systemRoots, systemRootsErr = loadSystemRoots()
- if systemRootsErr != nil {
- systemRoots = nil
- }
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/root_darwin.go b/contrib/go/_std_1.18/src/crypto/x509/root_darwin.go
deleted file mode 100644
index ad365f577e..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/root_darwin.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x509
-
-import (
- macOS "crypto/x509/internal/macos"
- "errors"
-)
-
-func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
- certs := macOS.CFArrayCreateMutable()
- defer macOS.ReleaseCFArray(certs)
- leaf := macOS.SecCertificateCreateWithData(c.Raw)
- if leaf == 0 {
- return nil, errors.New("invalid leaf certificate")
- }
- macOS.CFArrayAppendValue(certs, leaf)
- if opts.Intermediates != nil {
- for _, lc := range opts.Intermediates.lazyCerts {
- c, err := lc.getCert()
- if err != nil {
- return nil, err
- }
- sc := macOS.SecCertificateCreateWithData(c.Raw)
- if sc != 0 {
- macOS.CFArrayAppendValue(certs, sc)
- }
- }
- }
-
- policies := macOS.CFArrayCreateMutable()
- defer macOS.ReleaseCFArray(policies)
- sslPolicy := macOS.SecPolicyCreateSSL(opts.DNSName)
- macOS.CFArrayAppendValue(policies, sslPolicy)
-
- trustObj, err := macOS.SecTrustCreateWithCertificates(certs, policies)
- if err != nil {
- return nil, err
- }
- defer macOS.CFRelease(trustObj)
-
- if !opts.CurrentTime.IsZero() {
- dateRef := macOS.TimeToCFDateRef(opts.CurrentTime)
- defer macOS.CFRelease(dateRef)
- if err := macOS.SecTrustSetVerifyDate(trustObj, dateRef); err != nil {
- return nil, err
- }
- }
-
- // TODO(roland): we may want to allow passing in SCTs via VerifyOptions and
- // set them via SecTrustSetSignedCertificateTimestamps, since Apple will
- // always enforce its SCT requirements, and there are still _some_ people
- // using TLS or OCSP for that.
-
- if err := macOS.SecTrustEvaluateWithError(trustObj); err != nil {
- return nil, err
- }
-
- chain := [][]*Certificate{{}}
- numCerts := macOS.SecTrustGetCertificateCount(trustObj)
- for i := 0; i < numCerts; i++ {
- certRef := macOS.SecTrustGetCertificateAtIndex(trustObj, i)
- cert, err := exportCertificate(certRef)
- if err != nil {
- return nil, err
- }
- chain[0] = append(chain[0], cert)
- }
- if len(chain[0]) == 0 {
- // This should _never_ happen, but to be safe
- return nil, errors.New("x509: macOS certificate verification internal error")
- }
-
- if opts.DNSName != "" {
- // If we have a DNS name, apply our own name verification
- if err := chain[0][0].VerifyHostname(opts.DNSName); err != nil {
- return nil, err
- }
- }
-
- keyUsages := opts.KeyUsages
- if len(keyUsages) == 0 {
- keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
- }
-
- // If any key usage is acceptable then we're done.
- for _, usage := range keyUsages {
- if usage == ExtKeyUsageAny {
- return chain, nil
- }
- }
-
- if !checkChainForKeyUsage(chain[0], keyUsages) {
- return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
- }
-
- return chain, nil
-}
-
-// exportCertificate returns a *Certificate for a SecCertificateRef.
-func exportCertificate(cert macOS.CFRef) (*Certificate, error) {
- data, err := macOS.SecCertificateCopyData(cert)
- if err != nil {
- return nil, err
- }
- return ParseCertificate(data)
-}
-
-func loadSystemRoots() (*CertPool, error) {
- return &CertPool{systemPool: true}, nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/sec1.go b/contrib/go/_std_1.18/src/crypto/x509/sec1.go
deleted file mode 100644
index 52c108ff1d..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/sec1.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x509
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "encoding/asn1"
- "errors"
- "fmt"
- "math/big"
-)
-
-const ecPrivKeyVersion = 1
-
-// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
-// References:
-// RFC 5915
-// SEC1 - http://www.secg.org/sec1-v2.pdf
-// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
-// most cases it is not.
-type ecPrivateKey struct {
- Version int
- PrivateKey []byte
- NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
- PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
-}
-
-// ParseECPrivateKey parses an EC private key in SEC 1, ASN.1 DER form.
-//
-// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
-func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) {
- return parseECPrivateKey(nil, der)
-}
-
-// MarshalECPrivateKey converts an EC private key to SEC 1, ASN.1 DER form.
-//
-// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
-// For a more flexible key format which is not EC specific, use
-// MarshalPKCS8PrivateKey.
-func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
- oid, ok := oidFromNamedCurve(key.Curve)
- if !ok {
- return nil, errors.New("x509: unknown elliptic curve")
- }
-
- return marshalECPrivateKeyWithOID(key, oid)
-}
-
-// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
-// sets the curve ID to the given OID, or omits it if OID is nil.
-func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
- privateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
- return asn1.Marshal(ecPrivateKey{
- Version: 1,
- PrivateKey: key.D.FillBytes(privateKey),
- NamedCurveOID: oid,
- PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
- })
-}
-
-// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
-// The OID for the named curve may be provided from another source (such as
-// the PKCS8 container) - if it is provided then use this instead of the OID
-// that may exist in the EC private key structure.
-func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
- var privKey ecPrivateKey
- if _, err := asn1.Unmarshal(der, &privKey); err != nil {
- if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil {
- return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)")
- }
- if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil {
- return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)")
- }
- return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
- }
- if privKey.Version != ecPrivKeyVersion {
- return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
- }
-
- var curve elliptic.Curve
- if namedCurveOID != nil {
- curve = namedCurveFromOID(*namedCurveOID)
- } else {
- curve = namedCurveFromOID(privKey.NamedCurveOID)
- }
- if curve == nil {
- return nil, errors.New("x509: unknown elliptic curve")
- }
-
- k := new(big.Int).SetBytes(privKey.PrivateKey)
- curveOrder := curve.Params().N
- if k.Cmp(curveOrder) >= 0 {
- return nil, errors.New("x509: invalid elliptic curve private key value")
- }
- priv := new(ecdsa.PrivateKey)
- priv.Curve = curve
- priv.D = k
-
- privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
-
- // Some private keys have leading zero padding. This is invalid
- // according to [SEC1], but this code will ignore it.
- for len(privKey.PrivateKey) > len(privateKey) {
- if privKey.PrivateKey[0] != 0 {
- return nil, errors.New("x509: invalid private key length")
- }
- privKey.PrivateKey = privKey.PrivateKey[1:]
- }
-
- // Some private keys remove all leading zeros, this is also invalid
- // according to [SEC1] but since OpenSSL used to do this, we ignore
- // this too.
- copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)
- priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
-
- return priv, nil
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/verify.go b/contrib/go/_std_1.18/src/crypto/x509/verify.go
deleted file mode 100644
index 4be4eb6095..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/verify.go
+++ /dev/null
@@ -1,1114 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x509
-
-import (
- "bytes"
- "errors"
- "fmt"
- "net"
- "net/url"
- "reflect"
- "runtime"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-type InvalidReason int
-
-const (
- // NotAuthorizedToSign results when a certificate is signed by another
- // which isn't marked as a CA certificate.
- NotAuthorizedToSign InvalidReason = iota
- // Expired results when a certificate has expired, based on the time
- // given in the VerifyOptions.
- Expired
- // CANotAuthorizedForThisName results when an intermediate or root
- // certificate has a name constraint which doesn't permit a DNS or
- // other name (including IP address) in the leaf certificate.
- CANotAuthorizedForThisName
- // TooManyIntermediates results when a path length constraint is
- // violated.
- TooManyIntermediates
- // IncompatibleUsage results when the certificate's key usage indicates
- // that it may only be used for a different purpose.
- IncompatibleUsage
- // NameMismatch results when the subject name of a parent certificate
- // does not match the issuer name in the child.
- NameMismatch
- // NameConstraintsWithoutSANs is a legacy error and is no longer returned.
- NameConstraintsWithoutSANs
- // UnconstrainedName results when a CA certificate contains permitted
- // name constraints, but leaf certificate contains a name of an
- // unsupported or unconstrained type.
- UnconstrainedName
- // TooManyConstraints results when the number of comparison operations
- // needed to check a certificate exceeds the limit set by
- // VerifyOptions.MaxConstraintComparisions. This limit exists to
- // prevent pathological certificates can consuming excessive amounts of
- // CPU time to verify.
- TooManyConstraints
- // CANotAuthorizedForExtKeyUsage results when an intermediate or root
- // certificate does not permit a requested extended key usage.
- CANotAuthorizedForExtKeyUsage
-)
-
-// CertificateInvalidError results when an odd error occurs. Users of this
-// library probably want to handle all these errors uniformly.
-type CertificateInvalidError struct {
- Cert *Certificate
- Reason InvalidReason
- Detail string
-}
-
-func (e CertificateInvalidError) Error() string {
- switch e.Reason {
- case NotAuthorizedToSign:
- return "x509: certificate is not authorized to sign other certificates"
- case Expired:
- return "x509: certificate has expired or is not yet valid: " + e.Detail
- case CANotAuthorizedForThisName:
- return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail
- case CANotAuthorizedForExtKeyUsage:
- return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail
- case TooManyIntermediates:
- return "x509: too many intermediates for path length constraint"
- case IncompatibleUsage:
- return "x509: certificate specifies an incompatible key usage"
- case NameMismatch:
- return "x509: issuer name does not match subject from issuing certificate"
- case NameConstraintsWithoutSANs:
- return "x509: issuer has name constraints but leaf doesn't have a SAN extension"
- case UnconstrainedName:
- return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail
- }
- return "x509: unknown error"
-}
-
-// HostnameError results when the set of authorized names doesn't match the
-// requested name.
-type HostnameError struct {
- Certificate *Certificate
- Host string
-}
-
-func (h HostnameError) Error() string {
- c := h.Certificate
-
- if !c.hasSANExtension() && matchHostnames(c.Subject.CommonName, h.Host) {
- return "x509: certificate relies on legacy Common Name field, use SANs instead"
- }
-
- var valid string
- if ip := net.ParseIP(h.Host); ip != nil {
- // Trying to validate an IP
- if len(c.IPAddresses) == 0 {
- return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
- }
- for _, san := range c.IPAddresses {
- if len(valid) > 0 {
- valid += ", "
- }
- valid += san.String()
- }
- } else {
- valid = strings.Join(c.DNSNames, ", ")
- }
-
- if len(valid) == 0 {
- return "x509: certificate is not valid for any names, but wanted to match " + h.Host
- }
- return "x509: certificate is valid for " + valid + ", not " + h.Host
-}
-
-// UnknownAuthorityError results when the certificate issuer is unknown
-type UnknownAuthorityError struct {
- Cert *Certificate
- // hintErr contains an error that may be helpful in determining why an
- // authority wasn't found.
- hintErr error
- // hintCert contains a possible authority certificate that was rejected
- // because of the error in hintErr.
- hintCert *Certificate
-}
-
-func (e UnknownAuthorityError) Error() string {
- s := "x509: certificate signed by unknown authority"
- if e.hintErr != nil {
- certName := e.hintCert.Subject.CommonName
- if len(certName) == 0 {
- if len(e.hintCert.Subject.Organization) > 0 {
- certName = e.hintCert.Subject.Organization[0]
- } else {
- certName = "serial:" + e.hintCert.SerialNumber.String()
- }
- }
- s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
- }
- return s
-}
-
-// SystemRootsError results when we fail to load the system root certificates.
-type SystemRootsError struct {
- Err error
-}
-
-func (se SystemRootsError) Error() string {
- msg := "x509: failed to load system roots and no roots provided"
- if se.Err != nil {
- return msg + "; " + se.Err.Error()
- }
- return msg
-}
-
-func (se SystemRootsError) Unwrap() error { return se.Err }
-
-// errNotParsed is returned when a certificate without ASN.1 contents is
-// verified. Platform-specific verification needs the ASN.1 contents.
-var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate")
-
-// VerifyOptions contains parameters for Certificate.Verify.
-type VerifyOptions struct {
- // DNSName, if set, is checked against the leaf certificate with
- // Certificate.VerifyHostname or the platform verifier.
- DNSName string
-
- // Intermediates is an optional pool of certificates that are not trust
- // anchors, but can be used to form a chain from the leaf certificate to a
- // root certificate.
- Intermediates *CertPool
- // Roots is the set of trusted root certificates the leaf certificate needs
- // to chain up to. If nil, the system roots or the platform verifier are used.
- Roots *CertPool
-
- // CurrentTime is used to check the validity of all certificates in the
- // chain. If zero, the current time is used.
- CurrentTime time.Time
-
- // KeyUsages specifies which Extended Key Usage values are acceptable. A
- // chain is accepted if it allows any of the listed values. An empty list
- // means ExtKeyUsageServerAuth. To accept any key usage, include ExtKeyUsageAny.
- KeyUsages []ExtKeyUsage
-
- // MaxConstraintComparisions is the maximum number of comparisons to
- // perform when checking a given certificate's name constraints. If
- // zero, a sensible default is used. This limit prevents pathological
- // certificates from consuming excessive amounts of CPU time when
- // validating. It does not apply to the platform verifier.
- MaxConstraintComparisions int
-}
-
-const (
- leafCertificate = iota
- intermediateCertificate
- rootCertificate
-)
-
-// rfc2821Mailbox represents a “mailbox” (which is an email address to most
-// people) by breaking it into the “local” (i.e. before the '@') and “domain”
-// parts.
-type rfc2821Mailbox struct {
- local, domain string
-}
-
-// parseRFC2821Mailbox parses an email address into local and domain parts,
-// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280,
-// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The
-// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”.
-func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
- if len(in) == 0 {
- return mailbox, false
- }
-
- localPartBytes := make([]byte, 0, len(in)/2)
-
- if in[0] == '"' {
- // Quoted-string = DQUOTE *qcontent DQUOTE
- // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127
- // qcontent = qtext / quoted-pair
- // qtext = non-whitespace-control /
- // %d33 / %d35-91 / %d93-126
- // quoted-pair = ("\" text) / obs-qp
- // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text
- //
- // (Names beginning with “obs-” are the obsolete syntax from RFC 2822,
- // Section 4. Since it has been 16 years, we no longer accept that.)
- in = in[1:]
- QuotedString:
- for {
- if len(in) == 0 {
- return mailbox, false
- }
- c := in[0]
- in = in[1:]
-
- switch {
- case c == '"':
- break QuotedString
-
- case c == '\\':
- // quoted-pair
- if len(in) == 0 {
- return mailbox, false
- }
- if in[0] == 11 ||
- in[0] == 12 ||
- (1 <= in[0] && in[0] <= 9) ||
- (14 <= in[0] && in[0] <= 127) {
- localPartBytes = append(localPartBytes, in[0])
- in = in[1:]
- } else {
- return mailbox, false
- }
-
- case c == 11 ||
- c == 12 ||
- // Space (char 32) is not allowed based on the
- // BNF, but RFC 3696 gives an example that
- // assumes that it is. Several “verified”
- // errata continue to argue about this point.
- // We choose to accept it.
- c == 32 ||
- c == 33 ||
- c == 127 ||
- (1 <= c && c <= 8) ||
- (14 <= c && c <= 31) ||
- (35 <= c && c <= 91) ||
- (93 <= c && c <= 126):
- // qtext
- localPartBytes = append(localPartBytes, c)
-
- default:
- return mailbox, false
- }
- }
- } else {
- // Atom ("." Atom)*
- NextChar:
- for len(in) > 0 {
- // atext from RFC 2822, Section 3.2.4
- c := in[0]
-
- switch {
- case c == '\\':
- // Examples given in RFC 3696 suggest that
- // escaped characters can appear outside of a
- // quoted string. Several “verified” errata
- // continue to argue the point. We choose to
- // accept it.
- in = in[1:]
- if len(in) == 0 {
- return mailbox, false
- }
- fallthrough
-
- case ('0' <= c && c <= '9') ||
- ('a' <= c && c <= 'z') ||
- ('A' <= c && c <= 'Z') ||
- c == '!' || c == '#' || c == '$' || c == '%' ||
- c == '&' || c == '\'' || c == '*' || c == '+' ||
- c == '-' || c == '/' || c == '=' || c == '?' ||
- c == '^' || c == '_' || c == '`' || c == '{' ||
- c == '|' || c == '}' || c == '~' || c == '.':
- localPartBytes = append(localPartBytes, in[0])
- in = in[1:]
-
- default:
- break NextChar
- }
- }
-
- if len(localPartBytes) == 0 {
- return mailbox, false
- }
-
- // From RFC 3696, Section 3:
- // “period (".") may also appear, but may not be used to start
- // or end the local part, nor may two or more consecutive
- // periods appear.”
- twoDots := []byte{'.', '.'}
- if localPartBytes[0] == '.' ||
- localPartBytes[len(localPartBytes)-1] == '.' ||
- bytes.Contains(localPartBytes, twoDots) {
- return mailbox, false
- }
- }
-
- if len(in) == 0 || in[0] != '@' {
- return mailbox, false
- }
- in = in[1:]
-
- // The RFC species a format for domains, but that's known to be
- // violated in practice so we accept that anything after an '@' is the
- // domain part.
- if _, ok := domainToReverseLabels(in); !ok {
- return mailbox, false
- }
-
- mailbox.local = string(localPartBytes)
- mailbox.domain = in
- return mailbox, true
-}
-
-// domainToReverseLabels converts a textual domain name like foo.example.com to
-// the list of labels in reverse order, e.g. ["com", "example", "foo"].
-func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
- for len(domain) > 0 {
- if i := strings.LastIndexByte(domain, '.'); i == -1 {
- reverseLabels = append(reverseLabels, domain)
- domain = ""
- } else {
- reverseLabels = append(reverseLabels, domain[i+1:])
- domain = domain[:i]
- }
- }
-
- if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 {
- // An empty label at the end indicates an absolute value.
- return nil, false
- }
-
- for _, label := range reverseLabels {
- if len(label) == 0 {
- // Empty labels are otherwise invalid.
- return nil, false
- }
-
- for _, c := range label {
- if c < 33 || c > 126 {
- // Invalid character.
- return nil, false
- }
- }
- }
-
- return reverseLabels, true
-}
-
-func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) {
- // If the constraint contains an @, then it specifies an exact mailbox
- // name.
- if strings.Contains(constraint, "@") {
- constraintMailbox, ok := parseRFC2821Mailbox(constraint)
- if !ok {
- return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint)
- }
- return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil
- }
-
- // Otherwise the constraint is like a DNS constraint of the domain part
- // of the mailbox.
- return matchDomainConstraint(mailbox.domain, constraint)
-}
-
-func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
- // From RFC 5280, Section 4.2.1.10:
- // “a uniformResourceIdentifier that does not include an authority
- // component with a host name specified as a fully qualified domain
- // name (e.g., if the URI either does not include an authority
- // component or includes an authority component in which the host name
- // is specified as an IP address), then the application MUST reject the
- // certificate.”
-
- host := uri.Host
- if len(host) == 0 {
- return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String())
- }
-
- if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") {
- var err error
- host, _, err = net.SplitHostPort(uri.Host)
- if err != nil {
- return false, err
- }
- }
-
- if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") ||
- net.ParseIP(host) != nil {
- return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String())
- }
-
- return matchDomainConstraint(host, constraint)
-}
-
-func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) {
- if len(ip) != len(constraint.IP) {
- return false, nil
- }
-
- for i := range ip {
- if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-func matchDomainConstraint(domain, constraint string) (bool, error) {
- // The meaning of zero length constraints is not specified, but this
- // code follows NSS and accepts them as matching everything.
- if len(constraint) == 0 {
- return true, nil
- }
-
- domainLabels, ok := domainToReverseLabels(domain)
- if !ok {
- return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain)
- }
-
- // RFC 5280 says that a leading period in a domain name means that at
- // least one label must be prepended, but only for URI and email
- // constraints, not DNS constraints. The code also supports that
- // behaviour for DNS constraints.
-
- mustHaveSubdomains := false
- if constraint[0] == '.' {
- mustHaveSubdomains = true
- constraint = constraint[1:]
- }
-
- constraintLabels, ok := domainToReverseLabels(constraint)
- if !ok {
- return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint)
- }
-
- if len(domainLabels) < len(constraintLabels) ||
- (mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) {
- return false, nil
- }
-
- for i, constraintLabel := range constraintLabels {
- if !strings.EqualFold(constraintLabel, domainLabels[i]) {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-// checkNameConstraints checks that c permits a child certificate to claim the
-// given name, of type nameType. The argument parsedName contains the parsed
-// form of name, suitable for passing to the match function. The total number
-// of comparisons is tracked in the given count and should not exceed the given
-// limit.
-func (c *Certificate) checkNameConstraints(count *int,
- maxConstraintComparisons int,
- nameType string,
- name string,
- parsedName any,
- match func(parsedName, constraint any) (match bool, err error),
- permitted, excluded any) error {
-
- excludedValue := reflect.ValueOf(excluded)
-
- *count += excludedValue.Len()
- if *count > maxConstraintComparisons {
- return CertificateInvalidError{c, TooManyConstraints, ""}
- }
-
- for i := 0; i < excludedValue.Len(); i++ {
- constraint := excludedValue.Index(i).Interface()
- match, err := match(parsedName, constraint)
- if err != nil {
- return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
- }
-
- if match {
- return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)}
- }
- }
-
- permittedValue := reflect.ValueOf(permitted)
-
- *count += permittedValue.Len()
- if *count > maxConstraintComparisons {
- return CertificateInvalidError{c, TooManyConstraints, ""}
- }
-
- ok := true
- for i := 0; i < permittedValue.Len(); i++ {
- constraint := permittedValue.Index(i).Interface()
-
- var err error
- if ok, err = match(parsedName, constraint); err != nil {
- return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
- }
-
- if ok {
- break
- }
- }
-
- if !ok {
- return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)}
- }
-
- return nil
-}
-
-// isValid performs validity checks on c given that it is a candidate to append
-// to the chain in currentChain.
-func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
- if len(c.UnhandledCriticalExtensions) > 0 {
- return UnhandledCriticalExtension{}
- }
-
- if len(currentChain) > 0 {
- child := currentChain[len(currentChain)-1]
- if !bytes.Equal(child.RawIssuer, c.RawSubject) {
- return CertificateInvalidError{c, NameMismatch, ""}
- }
- }
-
- now := opts.CurrentTime
- if now.IsZero() {
- now = time.Now()
- }
- if now.Before(c.NotBefore) {
- return CertificateInvalidError{
- Cert: c,
- Reason: Expired,
- Detail: fmt.Sprintf("current time %s is before %s", now.Format(time.RFC3339), c.NotBefore.Format(time.RFC3339)),
- }
- } else if now.After(c.NotAfter) {
- return CertificateInvalidError{
- Cert: c,
- Reason: Expired,
- Detail: fmt.Sprintf("current time %s is after %s", now.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)),
- }
- }
-
- maxConstraintComparisons := opts.MaxConstraintComparisions
- if maxConstraintComparisons == 0 {
- maxConstraintComparisons = 250000
- }
- comparisonCount := 0
-
- var leaf *Certificate
- if certType == intermediateCertificate || certType == rootCertificate {
- if len(currentChain) == 0 {
- return errors.New("x509: internal error: empty chain when appending CA cert")
- }
- leaf = currentChain[0]
- }
-
- if (certType == intermediateCertificate || certType == rootCertificate) &&
- c.hasNameConstraints() && leaf.hasSANExtension() {
- err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error {
- switch tag {
- case nameTypeEmail:
- name := string(data)
- mailbox, ok := parseRFC2821Mailbox(name)
- if !ok {
- return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
- }
-
- if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
- func(parsedName, constraint any) (bool, error) {
- return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string))
- }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil {
- return err
- }
-
- case nameTypeDNS:
- name := string(data)
- if _, ok := domainToReverseLabels(name); !ok {
- return fmt.Errorf("x509: cannot parse dnsName %q", name)
- }
-
- if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
- func(parsedName, constraint any) (bool, error) {
- return matchDomainConstraint(parsedName.(string), constraint.(string))
- }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil {
- return err
- }
-
- case nameTypeURI:
- name := string(data)
- uri, err := url.Parse(name)
- if err != nil {
- return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name)
- }
-
- if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri,
- func(parsedName, constraint any) (bool, error) {
- return matchURIConstraint(parsedName.(*url.URL), constraint.(string))
- }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil {
- return err
- }
-
- case nameTypeIP:
- ip := net.IP(data)
- if l := len(ip); l != net.IPv4len && l != net.IPv6len {
- return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data)
- }
-
- if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip,
- func(parsedName, constraint any) (bool, error) {
- return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet))
- }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil {
- return err
- }
-
- default:
- // Unknown SAN types are ignored.
- }
-
- return nil
- })
-
- if err != nil {
- return err
- }
- }
-
- // KeyUsage status flags are ignored. From Engineering Security, Peter
- // Gutmann: A European government CA marked its signing certificates as
- // being valid for encryption only, but no-one noticed. Another
- // European CA marked its signature keys as not being valid for
- // signatures. A different CA marked its own trusted root certificate
- // as being invalid for certificate signing. Another national CA
- // distributed a certificate to be used to encrypt data for the
- // country’s tax authority that was marked as only being usable for
- // digital signatures but not for encryption. Yet another CA reversed
- // the order of the bit flags in the keyUsage due to confusion over
- // encoding endianness, essentially setting a random keyUsage in
- // certificates that it issued. Another CA created a self-invalidating
- // certificate by adding a certificate policy statement stipulating
- // that the certificate had to be used strictly as specified in the
- // keyUsage, and a keyUsage containing a flag indicating that the RSA
- // encryption key could only be used for Diffie-Hellman key agreement.
-
- if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
- return CertificateInvalidError{c, NotAuthorizedToSign, ""}
- }
-
- if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
- numIntermediates := len(currentChain) - 1
- if numIntermediates > c.MaxPathLen {
- return CertificateInvalidError{c, TooManyIntermediates, ""}
- }
- }
-
- return nil
-}
-
-// Verify attempts to verify c by building one or more chains from c to a
-// certificate in opts.Roots, using certificates in opts.Intermediates if
-// needed. If successful, it returns one or more chains where the first
-// element of the chain is c and the last element is from opts.Roots.
-//
-// If opts.Roots is nil, the platform verifier might be used, and
-// verification details might differ from what is described below. If system
-// roots are unavailable the returned error will be of type SystemRootsError.
-//
-// Name constraints in the intermediates will be applied to all names claimed
-// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim
-// example.com if an intermediate doesn't permit it, even if example.com is not
-// the name being validated. Note that DirectoryName constraints are not
-// supported.
-//
-// Name constraint validation follows the rules from RFC 5280, with the
-// addition that DNS name constraints may use the leading period format
-// defined for emails and URIs. When a constraint has a leading period
-// it indicates that at least one additional label must be prepended to
-// the constrained name to be considered valid.
-//
-// Extended Key Usage values are enforced nested down a chain, so an intermediate
-// or root that enumerates EKUs prevents a leaf from asserting an EKU not in that
-// list. (While this is not specified, it is common practice in order to limit
-// the types of certificates a CA can issue.)
-//
-// Certificates that use SHA1WithRSA and ECDSAWithSHA1 signatures are not supported,
-// and will not be used to build chains.
-//
-// WARNING: this function doesn't do any revocation checking.
-func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
- // Platform-specific verification needs the ASN.1 contents so
- // this makes the behavior consistent across platforms.
- if len(c.Raw) == 0 {
- return nil, errNotParsed
- }
- for i := 0; i < opts.Intermediates.len(); i++ {
- c, err := opts.Intermediates.cert(i)
- if err != nil {
- return nil, fmt.Errorf("crypto/x509: error fetching intermediate: %w", err)
- }
- if len(c.Raw) == 0 {
- return nil, errNotParsed
- }
- }
-
- // Use platform verifiers, where available, if Roots is from SystemCertPool.
- if runtime.GOOS == "windows" || runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
- if opts.Roots == nil {
- return c.systemVerify(&opts)
- }
- if opts.Roots != nil && opts.Roots.systemPool {
- platformChains, err := c.systemVerify(&opts)
- // If the platform verifier succeeded, or there are no additional
- // roots, return the platform verifier result. Otherwise, continue
- // with the Go verifier.
- if err == nil || opts.Roots.len() == 0 {
- return platformChains, err
- }
- }
- }
-
- if opts.Roots == nil {
- opts.Roots = systemRootsPool()
- if opts.Roots == nil {
- return nil, SystemRootsError{systemRootsErr}
- }
- }
-
- err = c.isValid(leafCertificate, nil, &opts)
- if err != nil {
- return
- }
-
- if len(opts.DNSName) > 0 {
- err = c.VerifyHostname(opts.DNSName)
- if err != nil {
- return
- }
- }
-
- var candidateChains [][]*Certificate
- if opts.Roots.contains(c) {
- candidateChains = append(candidateChains, []*Certificate{c})
- } else {
- if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil {
- return nil, err
- }
- }
-
- keyUsages := opts.KeyUsages
- if len(keyUsages) == 0 {
- keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
- }
-
- // If any key usage is acceptable then we're done.
- for _, usage := range keyUsages {
- if usage == ExtKeyUsageAny {
- return candidateChains, nil
- }
- }
-
- for _, candidate := range candidateChains {
- if checkChainForKeyUsage(candidate, keyUsages) {
- chains = append(chains, candidate)
- }
- }
-
- if len(chains) == 0 {
- return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
- }
-
- return chains, nil
-}
-
-func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
- n := make([]*Certificate, len(chain)+1)
- copy(n, chain)
- n[len(chain)] = cert
- return n
-}
-
-// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls
-// that an invocation of buildChains will (transitively) make. Most chains are
-// less than 15 certificates long, so this leaves space for multiple chains and
-// for failed checks due to different intermediates having the same Subject.
-const maxChainSignatureChecks = 100
-
-func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) {
- var (
- hintErr error
- hintCert *Certificate
- )
-
- considerCandidate := func(certType int, candidate *Certificate) {
- for _, cert := range currentChain {
- if cert.Equal(candidate) {
- return
- }
- }
-
- if sigChecks == nil {
- sigChecks = new(int)
- }
- *sigChecks++
- if *sigChecks > maxChainSignatureChecks {
- err = errors.New("x509: signature check attempts limit reached while verifying certificate chain")
- return
- }
-
- if err := c.CheckSignatureFrom(candidate); err != nil {
- if hintErr == nil {
- hintErr = err
- hintCert = candidate
- }
- return
- }
-
- err = candidate.isValid(certType, currentChain, opts)
- if err != nil {
- return
- }
-
- switch certType {
- case rootCertificate:
- chains = append(chains, appendToFreshChain(currentChain, candidate))
- case intermediateCertificate:
- if cache == nil {
- cache = make(map[*Certificate][][]*Certificate)
- }
- childChains, ok := cache[candidate]
- if !ok {
- childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts)
- cache[candidate] = childChains
- }
- chains = append(chains, childChains...)
- }
- }
-
- for _, root := range opts.Roots.findPotentialParents(c) {
- considerCandidate(rootCertificate, root)
- }
- for _, intermediate := range opts.Intermediates.findPotentialParents(c) {
- considerCandidate(intermediateCertificate, intermediate)
- }
-
- if len(chains) > 0 {
- err = nil
- }
- if len(chains) == 0 && err == nil {
- err = UnknownAuthorityError{c, hintErr, hintCert}
- }
-
- return
-}
-
-func validHostnamePattern(host string) bool { return validHostname(host, true) }
-func validHostnameInput(host string) bool { return validHostname(host, false) }
-
-// validHostname reports whether host is a valid hostname that can be matched or
-// matched against according to RFC 6125 2.2, with some leniency to accommodate
-// legacy values.
-func validHostname(host string, isPattern bool) bool {
- if !isPattern {
- host = strings.TrimSuffix(host, ".")
- }
- if len(host) == 0 {
- return false
- }
-
- for i, part := range strings.Split(host, ".") {
- if part == "" {
- // Empty label.
- return false
- }
- if isPattern && i == 0 && part == "*" {
- // Only allow full left-most wildcards, as those are the only ones
- // we match, and matching literal '*' characters is probably never
- // the expected behavior.
- continue
- }
- for j, c := range part {
- if 'a' <= c && c <= 'z' {
- continue
- }
- if '0' <= c && c <= '9' {
- continue
- }
- if 'A' <= c && c <= 'Z' {
- continue
- }
- if c == '-' && j != 0 {
- continue
- }
- if c == '_' {
- // Not a valid character in hostnames, but commonly
- // found in deployments outside the WebPKI.
- continue
- }
- return false
- }
- }
-
- return true
-}
-
-func matchExactly(hostA, hostB string) bool {
- if hostA == "" || hostA == "." || hostB == "" || hostB == "." {
- return false
- }
- return toLowerCaseASCII(hostA) == toLowerCaseASCII(hostB)
-}
-
-func matchHostnames(pattern, host string) bool {
- pattern = toLowerCaseASCII(pattern)
- host = toLowerCaseASCII(strings.TrimSuffix(host, "."))
-
- if len(pattern) == 0 || len(host) == 0 {
- return false
- }
-
- patternParts := strings.Split(pattern, ".")
- hostParts := strings.Split(host, ".")
-
- if len(patternParts) != len(hostParts) {
- return false
- }
-
- for i, patternPart := range patternParts {
- if i == 0 && patternPart == "*" {
- continue
- }
- if patternPart != hostParts[i] {
- return false
- }
- }
-
- return true
-}
-
-// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
-// an explicitly ASCII function to avoid any sharp corners resulting from
-// performing Unicode operations on DNS labels.
-func toLowerCaseASCII(in string) string {
- // If the string is already lower-case then there's nothing to do.
- isAlreadyLowerCase := true
- for _, c := range in {
- if c == utf8.RuneError {
- // If we get a UTF-8 error then there might be
- // upper-case ASCII bytes in the invalid sequence.
- isAlreadyLowerCase = false
- break
- }
- if 'A' <= c && c <= 'Z' {
- isAlreadyLowerCase = false
- break
- }
- }
-
- if isAlreadyLowerCase {
- return in
- }
-
- out := []byte(in)
- for i, c := range out {
- if 'A' <= c && c <= 'Z' {
- out[i] += 'a' - 'A'
- }
- }
- return string(out)
-}
-
-// VerifyHostname returns nil if c is a valid certificate for the named host.
-// Otherwise it returns an error describing the mismatch.
-//
-// IP addresses can be optionally enclosed in square brackets and are checked
-// against the IPAddresses field. Other names are checked case insensitively
-// against the DNSNames field. If the names are valid hostnames, the certificate
-// fields can have a wildcard as the left-most label.
-//
-// Note that the legacy Common Name field is ignored.
-func (c *Certificate) VerifyHostname(h string) error {
- // IP addresses may be written in [ ].
- candidateIP := h
- if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
- candidateIP = h[1 : len(h)-1]
- }
- if ip := net.ParseIP(candidateIP); ip != nil {
- // We only match IP addresses against IP SANs.
- // See RFC 6125, Appendix B.2.
- for _, candidate := range c.IPAddresses {
- if ip.Equal(candidate) {
- return nil
- }
- }
- return HostnameError{c, candidateIP}
- }
-
- candidateName := toLowerCaseASCII(h) // Save allocations inside the loop.
- validCandidateName := validHostnameInput(candidateName)
-
- for _, match := range c.DNSNames {
- // Ideally, we'd only match valid hostnames according to RFC 6125 like
- // browsers (more or less) do, but in practice Go is used in a wider
- // array of contexts and can't even assume DNS resolution. Instead,
- // always allow perfect matches, and only apply wildcard and trailing
- // dot processing to valid hostnames.
- if validCandidateName && validHostnamePattern(match) {
- if matchHostnames(match, candidateName) {
- return nil
- }
- } else {
- if matchExactly(match, candidateName) {
- return nil
- }
- }
- }
-
- return HostnameError{c, h}
-}
-
-func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
- usages := make([]ExtKeyUsage, len(keyUsages))
- copy(usages, keyUsages)
-
- if len(chain) == 0 {
- return false
- }
-
- usagesRemaining := len(usages)
-
- // We walk down the list and cross out any usages that aren't supported
- // by each certificate. If we cross out all the usages, then the chain
- // is unacceptable.
-
-NextCert:
- for i := len(chain) - 1; i >= 0; i-- {
- cert := chain[i]
- if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
- // The certificate doesn't have any extended key usage specified.
- continue
- }
-
- for _, usage := range cert.ExtKeyUsage {
- if usage == ExtKeyUsageAny {
- // The certificate is explicitly good for any usage.
- continue NextCert
- }
- }
-
- const invalidUsage ExtKeyUsage = -1
-
- NextRequestedUsage:
- for i, requestedUsage := range usages {
- if requestedUsage == invalidUsage {
- continue
- }
-
- for _, usage := range cert.ExtKeyUsage {
- if requestedUsage == usage {
- continue NextRequestedUsage
- }
- }
-
- usages[i] = invalidUsage
- usagesRemaining--
- if usagesRemaining == 0 {
- return false
- }
- }
- }
-
- return true
-}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/x509.go b/contrib/go/_std_1.18/src/crypto/x509/x509.go
deleted file mode 100644
index 85720b3ccb..0000000000
--- a/contrib/go/_std_1.18/src/crypto/x509/x509.go
+++ /dev/null
@@ -1,2224 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package x509 parses X.509-encoded keys and certificates.
-package x509
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/x509/pkix"
- "encoding/asn1"
- "encoding/pem"
- "errors"
- "fmt"
- "internal/godebug"
- "io"
- "math/big"
- "net"
- "net/url"
- "strconv"
- "time"
- "unicode"
-
- // Explicitly import these for their crypto.RegisterHash init side-effects.
- // Keep these as blank imports, even if they're imported above.
- _ "crypto/sha1"
- _ "crypto/sha256"
- _ "crypto/sha512"
-
- "golang.org/x/crypto/cryptobyte"
- cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
-)
-
-// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
-// in RFC 3280.
-type pkixPublicKey struct {
- Algo pkix.AlgorithmIdentifier
- BitString asn1.BitString
-}
-
-// ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form.
-// The encoded public key is a SubjectPublicKeyInfo structure
-// (see RFC 5280, Section 4.1).
-//
-// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, or
-// ed25519.PublicKey. More types might be supported in the future.
-//
-// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
-func ParsePKIXPublicKey(derBytes []byte) (pub any, err error) {
- var pki publicKeyInfo
- if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil {
- if _, err := asn1.Unmarshal(derBytes, &pkcs1PublicKey{}); err == nil {
- return nil, errors.New("x509: failed to parse public key (use ParsePKCS1PublicKey instead for this key format)")
- }
- return nil, err
- } else if len(rest) != 0 {
- return nil, errors.New("x509: trailing data after ASN.1 of public-key")
- }
- algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
- if algo == UnknownPublicKeyAlgorithm {
- return nil, errors.New("x509: unknown public key algorithm")
- }
- return parsePublicKey(algo, &pki)
-}
-
-func marshalPublicKey(pub any) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
- switch pub := pub.(type) {
- case *rsa.PublicKey:
- publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{
- N: pub.N,
- E: pub.E,
- })
- if err != nil {
- return nil, pkix.AlgorithmIdentifier{}, err
- }
- publicKeyAlgorithm.Algorithm = oidPublicKeyRSA
- // This is a NULL parameters value which is required by
- // RFC 3279, Section 2.3.1.
- publicKeyAlgorithm.Parameters = asn1.NullRawValue
- case *ecdsa.PublicKey:
- publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
- oid, ok := oidFromNamedCurve(pub.Curve)
- if !ok {
- return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve")
- }
- publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA
- var paramBytes []byte
- paramBytes, err = asn1.Marshal(oid)
- if err != nil {
- return
- }
- publicKeyAlgorithm.Parameters.FullBytes = paramBytes
- case ed25519.PublicKey:
- publicKeyBytes = pub
- publicKeyAlgorithm.Algorithm = oidPublicKeyEd25519
- default:
- return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub)
- }
-
- return publicKeyBytes, publicKeyAlgorithm, nil
-}
-
-// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form.
-// The encoded public key is a SubjectPublicKeyInfo structure
-// (see RFC 5280, Section 4.1).
-//
-// The following key types are currently supported: *rsa.PublicKey, *ecdsa.PublicKey
-// and ed25519.PublicKey. Unsupported key types result in an error.
-//
-// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
-func MarshalPKIXPublicKey(pub any) ([]byte, error) {
- var publicKeyBytes []byte
- var publicKeyAlgorithm pkix.AlgorithmIdentifier
- var err error
-
- if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil {
- return nil, err
- }
-
- pkix := pkixPublicKey{
- Algo: publicKeyAlgorithm,
- BitString: asn1.BitString{
- Bytes: publicKeyBytes,
- BitLength: 8 * len(publicKeyBytes),
- },
- }
-
- ret, _ := asn1.Marshal(pkix)
- return ret, nil
-}
-
-// These structures reflect the ASN.1 structure of X.509 certificates.:
-
-type certificate struct {
- Raw asn1.RawContent
- TBSCertificate tbsCertificate
- SignatureAlgorithm pkix.AlgorithmIdentifier
- SignatureValue asn1.BitString
-}
-
-type tbsCertificate struct {
- Raw asn1.RawContent
- Version int `asn1:"optional,explicit,default:0,tag:0"`
- SerialNumber *big.Int
- SignatureAlgorithm pkix.AlgorithmIdentifier
- Issuer asn1.RawValue
- Validity validity
- Subject asn1.RawValue
- PublicKey publicKeyInfo
- UniqueId asn1.BitString `asn1:"optional,tag:1"`
- SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
- Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"`
-}
-
-type dsaAlgorithmParameters struct {
- P, Q, G *big.Int
-}
-
-type validity struct {
- NotBefore, NotAfter time.Time
-}
-
-type publicKeyInfo struct {
- Raw asn1.RawContent
- Algorithm pkix.AlgorithmIdentifier
- PublicKey asn1.BitString
-}
-
-// RFC 5280, 4.2.1.1
-type authKeyId struct {
- Id []byte `asn1:"optional,tag:0"`
-}
-
-type SignatureAlgorithm int
-
-const (
- UnknownSignatureAlgorithm SignatureAlgorithm = iota
-
- MD2WithRSA // Unsupported.
- MD5WithRSA // Only supported for signing, not verification.
- SHA1WithRSA // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses.
- SHA256WithRSA
- SHA384WithRSA
- SHA512WithRSA
- DSAWithSHA1 // Unsupported.
- DSAWithSHA256 // Unsupported.
- ECDSAWithSHA1 // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses.
- ECDSAWithSHA256
- ECDSAWithSHA384
- ECDSAWithSHA512
- SHA256WithRSAPSS
- SHA384WithRSAPSS
- SHA512WithRSAPSS
- PureEd25519
-)
-
-func (algo SignatureAlgorithm) isRSAPSS() bool {
- switch algo {
- case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS:
- return true
- default:
- return false
- }
-}
-
-func (algo SignatureAlgorithm) String() string {
- for _, details := range signatureAlgorithmDetails {
- if details.algo == algo {
- return details.name
- }
- }
- return strconv.Itoa(int(algo))
-}
-
-type PublicKeyAlgorithm int
-
-const (
- UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
- RSA
- DSA // Unsupported.
- ECDSA
- Ed25519
-)
-
-var publicKeyAlgoName = [...]string{
- RSA: "RSA",
- DSA: "DSA",
- ECDSA: "ECDSA",
- Ed25519: "Ed25519",
-}
-
-func (algo PublicKeyAlgorithm) String() string {
- if 0 < algo && int(algo) < len(publicKeyAlgoName) {
- return publicKeyAlgoName[algo]
- }
- return strconv.Itoa(int(algo))
-}
-
-// OIDs for signature algorithms
-//
-// pkcs-1 OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
-//
-//
-// RFC 3279 2.2.1 RSA Signature Algorithms
-//
-// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
-//
-// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
-//
-// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
-//
-// dsaWithSha1 OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
-//
-// RFC 3279 2.2.3 ECDSA Signature Algorithm
-//
-// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) ansi-x962(10045)
-// signatures(4) ecdsa-with-SHA1(1)}
-//
-//
-// RFC 4055 5 PKCS #1 Version 1.5
-//
-// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
-//
-// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
-//
-// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
-//
-//
-// RFC 5758 3.1 DSA Signature Algorithms
-//
-// dsaWithSha256 OBJECT IDENTIFIER ::= {
-// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
-// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
-//
-// RFC 5758 3.2 ECDSA Signature Algorithm
-//
-// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
-// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
-//
-// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
-// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
-//
-// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
-// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
-//
-//
-// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers
-//
-// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 }
-
-var (
- oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
- oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
- oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
- oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
- oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
- oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
- oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10}
- oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
- oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
- oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
- oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
- oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
- oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
- oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112}
-
- oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
- oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
- oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
-
- oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8}
-
- // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA
- // but it's specified by ISO. Microsoft's makecert.exe has been known
- // to produce certificates with this OID.
- oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29}
-)
-
-var signatureAlgorithmDetails = []struct {
- algo SignatureAlgorithm
- name string
- oid asn1.ObjectIdentifier
- pubKeyAlgo PublicKeyAlgorithm
- hash crypto.Hash
-}{
- {MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */},
- {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, RSA, crypto.MD5},
- {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, RSA, crypto.SHA1},
- {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1},
- {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, RSA, crypto.SHA256},
- {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, RSA, crypto.SHA384},
- {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, RSA, crypto.SHA512},
- {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA256},
- {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA384},
- {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA512},
- {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, DSA, crypto.SHA1},
- {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, DSA, crypto.SHA256},
- {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1},
- {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256},
- {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384},
- {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512},
- {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */},
-}
-
-// hashToPSSParameters contains the DER encoded RSA PSS parameters for the
-// SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3.
-// The parameters contain the following values:
-// * hashAlgorithm contains the associated hash identifier with NULL parameters
-// * maskGenAlgorithm always contains the default mgf1SHA1 identifier
-// * saltLength contains the length of the associated hash
-// * trailerField always contains the default trailerFieldBC value
-var hashToPSSParameters = map[crypto.Hash]asn1.RawValue{
- crypto.SHA256: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}},
- crypto.SHA384: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}},
- crypto.SHA512: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}},
-}
-
-// pssParameters reflects the parameters in an AlgorithmIdentifier that
-// specifies RSA PSS. See RFC 3447, Appendix A.2.3.
-type pssParameters struct {
- // The following three fields are not marked as
- // optional because the default values specify SHA-1,
- // which is no longer suitable for use in signatures.
- Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"`
- MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"`
- SaltLength int `asn1:"explicit,tag:2"`
- TrailerField int `asn1:"optional,explicit,tag:3,default:1"`
-}
-
-func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
- if ai.Algorithm.Equal(oidSignatureEd25519) {
- // RFC 8410, Section 3
- // > For all of the OIDs, the parameters MUST be absent.
- if len(ai.Parameters.FullBytes) != 0 {
- return UnknownSignatureAlgorithm
- }
- }
-
- if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
- for _, details := range signatureAlgorithmDetails {
- if ai.Algorithm.Equal(details.oid) {
- return details.algo
- }
- }
- return UnknownSignatureAlgorithm
- }
-
- // RSA PSS is special because it encodes important parameters
- // in the Parameters.
-
- var params pssParameters
- if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, &params); err != nil {
- return UnknownSignatureAlgorithm
- }
-
- var mgf1HashFunc pkix.AlgorithmIdentifier
- if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil {
- return UnknownSignatureAlgorithm
- }
-
- // PSS is greatly overburdened with options. This code forces them into
- // three buckets by requiring that the MGF1 hash function always match the
- // message hash function (as recommended in RFC 3447, Section 8.1), that the
- // salt length matches the hash length, and that the trailer field has the
- // default value.
- if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
- !params.MGF.Algorithm.Equal(oidMGF1) ||
- !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
- (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) ||
- params.TrailerField != 1 {
- return UnknownSignatureAlgorithm
- }
-
- switch {
- case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32:
- return SHA256WithRSAPSS
- case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48:
- return SHA384WithRSAPSS
- case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64:
- return SHA512WithRSAPSS
- }
-
- return UnknownSignatureAlgorithm
-}
-
-// RFC 3279, 2.3 Public Key Algorithms
-//
-// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
-// rsadsi(113549) pkcs(1) 1 }
-//
-// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
-//
-// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
-// x9-57(10040) x9cm(4) 1 }
-//
-// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
-//
-// id-ecPublicKey OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
-var (
- oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
- oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
- oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
- oidPublicKeyEd25519 = oidSignatureEd25519
-)
-
-func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
- switch {
- case oid.Equal(oidPublicKeyRSA):
- return RSA
- case oid.Equal(oidPublicKeyDSA):
- return DSA
- case oid.Equal(oidPublicKeyECDSA):
- return ECDSA
- case oid.Equal(oidPublicKeyEd25519):
- return Ed25519
- }
- return UnknownPublicKeyAlgorithm
-}
-
-// RFC 5480, 2.1.1.1. Named Curve
-//
-// secp224r1 OBJECT IDENTIFIER ::= {
-// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
-//
-// secp256r1 OBJECT IDENTIFIER ::= {
-// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
-// prime(1) 7 }
-//
-// secp384r1 OBJECT IDENTIFIER ::= {
-// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
-//
-// secp521r1 OBJECT IDENTIFIER ::= {
-// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
-//
-// NB: secp256r1 is equivalent to prime256v1
-var (
- oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
- oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
- oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
- oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
-)
-
-func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
- switch {
- case oid.Equal(oidNamedCurveP224):
- return elliptic.P224()
- case oid.Equal(oidNamedCurveP256):
- return elliptic.P256()
- case oid.Equal(oidNamedCurveP384):
- return elliptic.P384()
- case oid.Equal(oidNamedCurveP521):
- return elliptic.P521()
- }
- return nil
-}
-
-func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
- switch curve {
- case elliptic.P224():
- return oidNamedCurveP224, true
- case elliptic.P256():
- return oidNamedCurveP256, true
- case elliptic.P384():
- return oidNamedCurveP384, true
- case elliptic.P521():
- return oidNamedCurveP521, true
- }
-
- return nil, false
-}
-
-// KeyUsage represents the set of actions that are valid for a given key. It's
-// a bitmap of the KeyUsage* constants.
-type KeyUsage int
-
-const (
- KeyUsageDigitalSignature KeyUsage = 1 << iota
- KeyUsageContentCommitment
- KeyUsageKeyEncipherment
- KeyUsageDataEncipherment
- KeyUsageKeyAgreement
- KeyUsageCertSign
- KeyUsageCRLSign
- KeyUsageEncipherOnly
- KeyUsageDecipherOnly
-)
-
-// RFC 5280, 4.2.1.12 Extended Key Usage
-//
-// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
-//
-// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
-//
-// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
-// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
-// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
-// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
-// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
-// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
-var (
- oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
- oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
- oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
- oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
- oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
- oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5}
- oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6}
- oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7}
- oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
- oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
- oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3}
- oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1}
- oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22}
- oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1}
-)
-
-// ExtKeyUsage represents an extended set of actions that are valid for a given key.
-// Each of the ExtKeyUsage* constants define a unique action.
-type ExtKeyUsage int
-
-const (
- ExtKeyUsageAny ExtKeyUsage = iota
- ExtKeyUsageServerAuth
- ExtKeyUsageClientAuth
- ExtKeyUsageCodeSigning
- ExtKeyUsageEmailProtection
- ExtKeyUsageIPSECEndSystem
- ExtKeyUsageIPSECTunnel
- ExtKeyUsageIPSECUser
- ExtKeyUsageTimeStamping
- ExtKeyUsageOCSPSigning
- ExtKeyUsageMicrosoftServerGatedCrypto
- ExtKeyUsageNetscapeServerGatedCrypto
- ExtKeyUsageMicrosoftCommercialCodeSigning
- ExtKeyUsageMicrosoftKernelCodeSigning
-)
-
-// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID.
-var extKeyUsageOIDs = []struct {
- extKeyUsage ExtKeyUsage
- oid asn1.ObjectIdentifier
-}{
- {ExtKeyUsageAny, oidExtKeyUsageAny},
- {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth},
- {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth},
- {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning},
- {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection},
- {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem},
- {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel},
- {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser},
- {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping},
- {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning},
- {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto},
- {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto},
- {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning},
- {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning},
-}
-
-func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) {
- for _, pair := range extKeyUsageOIDs {
- if oid.Equal(pair.oid) {
- return pair.extKeyUsage, true
- }
- }
- return
-}
-
-func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) {
- for _, pair := range extKeyUsageOIDs {
- if eku == pair.extKeyUsage {
- return pair.oid, true
- }
- }
- return
-}
-
-// A Certificate represents an X.509 certificate.
-type Certificate struct {
- Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
- RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
- RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
- RawSubject []byte // DER encoded Subject
- RawIssuer []byte // DER encoded Issuer
-
- Signature []byte
- SignatureAlgorithm SignatureAlgorithm
-
- PublicKeyAlgorithm PublicKeyAlgorithm
- PublicKey any
-
- Version int
- SerialNumber *big.Int
- Issuer pkix.Name
- Subject pkix.Name
- NotBefore, NotAfter time.Time // Validity bounds.
- KeyUsage KeyUsage
-
- // Extensions contains raw X.509 extensions. When parsing certificates,
- // this can be used to extract non-critical extensions that are not
- // parsed by this package. When marshaling certificates, the Extensions
- // field is ignored, see ExtraExtensions.
- Extensions []pkix.Extension
-
- // ExtraExtensions contains extensions to be copied, raw, into any
- // marshaled certificates. Values override any extensions that would
- // otherwise be produced based on the other fields. The ExtraExtensions
- // field is not populated when parsing certificates, see Extensions.
- ExtraExtensions []pkix.Extension
-
- // UnhandledCriticalExtensions contains a list of extension IDs that
- // were not (fully) processed when parsing. Verify will fail if this
- // slice is non-empty, unless verification is delegated to an OS
- // library which understands all the critical extensions.
- //
- // Users can access these extensions using Extensions and can remove
- // elements from this slice if they believe that they have been
- // handled.
- UnhandledCriticalExtensions []asn1.ObjectIdentifier
-
- ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
- UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
-
- // BasicConstraintsValid indicates whether IsCA, MaxPathLen,
- // and MaxPathLenZero are valid.
- BasicConstraintsValid bool
- IsCA bool
-
- // MaxPathLen and MaxPathLenZero indicate the presence and
- // value of the BasicConstraints' "pathLenConstraint".
- //
- // When parsing a certificate, a positive non-zero MaxPathLen
- // means that the field was specified, -1 means it was unset,
- // and MaxPathLenZero being true mean that the field was
- // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false
- // should be treated equivalent to -1 (unset).
- //
- // When generating a certificate, an unset pathLenConstraint
- // can be requested with either MaxPathLen == -1 or using the
- // zero value for both MaxPathLen and MaxPathLenZero.
- MaxPathLen int
- // MaxPathLenZero indicates that BasicConstraintsValid==true
- // and MaxPathLen==0 should be interpreted as an actual
- // maximum path length of zero. Otherwise, that combination is
- // interpreted as MaxPathLen not being set.
- MaxPathLenZero bool
-
- SubjectKeyId []byte
- AuthorityKeyId []byte
-
- // RFC 5280, 4.2.2.1 (Authority Information Access)
- OCSPServer []string
- IssuingCertificateURL []string
-
- // Subject Alternate Name values. (Note that these values may not be valid
- // if invalid values were contained within a parsed certificate. For
- // example, an element of DNSNames may not be a valid DNS domain name.)
- DNSNames []string
- EmailAddresses []string
- IPAddresses []net.IP
- URIs []*url.URL
-
- // Name constraints
- PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical.
- PermittedDNSDomains []string
- ExcludedDNSDomains []string
- PermittedIPRanges []*net.IPNet
- ExcludedIPRanges []*net.IPNet
- PermittedEmailAddresses []string
- ExcludedEmailAddresses []string
- PermittedURIDomains []string
- ExcludedURIDomains []string
-
- // CRL Distribution Points
- CRLDistributionPoints []string
-
- PolicyIdentifiers []asn1.ObjectIdentifier
-}
-
-// ErrUnsupportedAlgorithm results from attempting to perform an operation that
-// involves algorithms that are not currently implemented.
-var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented")
-
-// debugAllowSHA1 allows SHA-1 signatures. See issue 41682.
-var debugAllowSHA1 = godebug.Get("x509sha1") == "1"
-
-// An InsecureAlgorithmError indicates that the SignatureAlgorithm used to
-// generate the signature is not secure, and the signature has been rejected.
-//
-// To temporarily restore support for SHA-1 signatures, include the value
-// "x509sha1=1" in the GODEBUG environment variable. Note that this option will
-// be removed in Go 1.19.
-type InsecureAlgorithmError SignatureAlgorithm
-
-func (e InsecureAlgorithmError) Error() string {
- var override string
- if SignatureAlgorithm(e) == SHA1WithRSA || SignatureAlgorithm(e) == ECDSAWithSHA1 {
- override = " (temporarily override with GODEBUG=x509sha1=1)"
- }
- return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) + override
-}
-
-// ConstraintViolationError results when a requested usage is not permitted by
-// a certificate. For example: checking a signature when the public key isn't a
-// certificate signing key.
-type ConstraintViolationError struct{}
-
-func (ConstraintViolationError) Error() string {
- return "x509: invalid signature: parent certificate cannot sign this kind of certificate"
-}
-
-func (c *Certificate) Equal(other *Certificate) bool {
- if c == nil || other == nil {
- return c == other
- }
- return bytes.Equal(c.Raw, other.Raw)
-}
-
-func (c *Certificate) hasSANExtension() bool {
- return oidInExtensions(oidExtensionSubjectAltName, c.Extensions)
-}
-
-// CheckSignatureFrom verifies that the signature on c is a valid signature
-// from parent. SHA1WithRSA and ECDSAWithSHA1 signatures are not supported.
-func (c *Certificate) CheckSignatureFrom(parent *Certificate) error {
- // RFC 5280, 4.2.1.9:
- // "If the basic constraints extension is not present in a version 3
- // certificate, or the extension is present but the cA boolean is not
- // asserted, then the certified public key MUST NOT be used to verify
- // certificate signatures."
- if parent.Version == 3 && !parent.BasicConstraintsValid ||
- parent.BasicConstraintsValid && !parent.IsCA {
- return ConstraintViolationError{}
- }
-
- if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 {
- return ConstraintViolationError{}
- }
-
- if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
- return ErrUnsupportedAlgorithm
- }
-
- // TODO(agl): don't ignore the path length constraint.
-
- return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature, parent.PublicKey, debugAllowSHA1)
-}
-
-// CheckSignature verifies that signature is a valid signature over signed from
-// c's public key.
-func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error {
- return checkSignature(algo, signed, signature, c.PublicKey, true)
-}
-
-func (c *Certificate) hasNameConstraints() bool {
- return oidInExtensions(oidExtensionNameConstraints, c.Extensions)
-}
-
-func (c *Certificate) getSANExtension() []byte {
- for _, e := range c.Extensions {
- if e.Id.Equal(oidExtensionSubjectAltName) {
- return e.Value
- }
- }
- return nil
-}
-
-func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey any) error {
- return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey)
-}
-
-// checkSignature verifies that signature is a valid signature over signed from
-// a crypto.PublicKey.
-func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey, allowSHA1 bool) (err error) {
- var hashType crypto.Hash
- var pubKeyAlgo PublicKeyAlgorithm
-
- for _, details := range signatureAlgorithmDetails {
- if details.algo == algo {
- hashType = details.hash
- pubKeyAlgo = details.pubKeyAlgo
- }
- }
-
- switch hashType {
- case crypto.Hash(0):
- if pubKeyAlgo != Ed25519 {
- return ErrUnsupportedAlgorithm
- }
- case crypto.MD5:
- return InsecureAlgorithmError(algo)
- case crypto.SHA1:
- if !allowSHA1 {
- return InsecureAlgorithmError(algo)
- }
- fallthrough
- default:
- if !hashType.Available() {
- return ErrUnsupportedAlgorithm
- }
- h := hashType.New()
- h.Write(signed)
- signed = h.Sum(nil)
- }
-
- switch pub := publicKey.(type) {
- case *rsa.PublicKey:
- if pubKeyAlgo != RSA {
- return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
- }
- if algo.isRSAPSS() {
- return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash})
- } else {
- return rsa.VerifyPKCS1v15(pub, hashType, signed, signature)
- }
- case *ecdsa.PublicKey:
- if pubKeyAlgo != ECDSA {
- return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
- }
- if !ecdsa.VerifyASN1(pub, signed, signature) {
- return errors.New("x509: ECDSA verification failure")
- }
- return
- case ed25519.PublicKey:
- if pubKeyAlgo != Ed25519 {
- return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
- }
- if !ed25519.Verify(pub, signed, signature) {
- return errors.New("x509: Ed25519 verification failure")
- }
- return
- }
- return ErrUnsupportedAlgorithm
-}
-
-// CheckCRLSignature checks that the signature in crl is from c.
-func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error {
- algo := getSignatureAlgorithmFromAI(crl.SignatureAlgorithm)
- return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
-}
-
-type UnhandledCriticalExtension struct{}
-
-func (h UnhandledCriticalExtension) Error() string {
- return "x509: unhandled critical extension"
-}
-
-type basicConstraints struct {
- IsCA bool `asn1:"optional"`
- MaxPathLen int `asn1:"optional,default:-1"`
-}
-
-// RFC 5280 4.2.1.4
-type policyInformation struct {
- Policy asn1.ObjectIdentifier
- // policyQualifiers omitted
-}
-
-const (
- nameTypeEmail = 1
- nameTypeDNS = 2
- nameTypeURI = 6
- nameTypeIP = 7
-)
-
-// RFC 5280, 4.2.2.1
-type authorityInfoAccess struct {
- Method asn1.ObjectIdentifier
- Location asn1.RawValue
-}
-
-// RFC 5280, 4.2.1.14
-type distributionPoint struct {
- DistributionPoint distributionPointName `asn1:"optional,tag:0"`
- Reason asn1.BitString `asn1:"optional,tag:1"`
- CRLIssuer asn1.RawValue `asn1:"optional,tag:2"`
-}
-
-type distributionPointName struct {
- FullName []asn1.RawValue `asn1:"optional,tag:0"`
- RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
-}
-
-func reverseBitsInAByte(in byte) byte {
- b1 := in>>4 | in<<4
- b2 := b1>>2&0x33 | b1<<2&0xcc
- b3 := b2>>1&0x55 | b2<<1&0xaa
- return b3
-}
-
-// asn1BitLength returns the bit-length of bitString by considering the
-// most-significant bit in a byte to be the "first" bit. This convention
-// matches ASN.1, but differs from almost everything else.
-func asn1BitLength(bitString []byte) int {
- bitLen := len(bitString) * 8
-
- for i := range bitString {
- b := bitString[len(bitString)-i-1]
-
- for bit := uint(0); bit < 8; bit++ {
- if (b>>bit)&1 == 1 {
- return bitLen
- }
- bitLen--
- }
- }
-
- return 0
-}
-
-var (
- oidExtensionSubjectKeyId = []int{2, 5, 29, 14}
- oidExtensionKeyUsage = []int{2, 5, 29, 15}
- oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37}
- oidExtensionAuthorityKeyId = []int{2, 5, 29, 35}
- oidExtensionBasicConstraints = []int{2, 5, 29, 19}
- oidExtensionSubjectAltName = []int{2, 5, 29, 17}
- oidExtensionCertificatePolicies = []int{2, 5, 29, 32}
- oidExtensionNameConstraints = []int{2, 5, 29, 30}
- oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31}
- oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1}
- oidExtensionCRLNumber = []int{2, 5, 29, 20}
-)
-
-var (
- oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
- oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
-)
-
-// oidNotInExtensions reports whether an extension with the given oid exists in
-// extensions.
-func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
- for _, e := range extensions {
- if e.Id.Equal(oid) {
- return true
- }
- }
- return false
-}
-
-// marshalSANs marshals a list of addresses into a the contents of an X.509
-// SubjectAlternativeName extension.
-func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) {
- var rawValues []asn1.RawValue
- for _, name := range dnsNames {
- if err := isIA5String(name); err != nil {
- return nil, err
- }
- rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)})
- }
- for _, email := range emailAddresses {
- if err := isIA5String(email); err != nil {
- return nil, err
- }
- rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)})
- }
- for _, rawIP := range ipAddresses {
- // If possible, we always want to encode IPv4 addresses in 4 bytes.
- ip := rawIP.To4()
- if ip == nil {
- ip = rawIP
- }
- rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip})
- }
- for _, uri := range uris {
- uriStr := uri.String()
- if err := isIA5String(uriStr); err != nil {
- return nil, err
- }
- rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uriStr)})
- }
- return asn1.Marshal(rawValues)
-}
-
-func isIA5String(s string) error {
- for _, r := range s {
- // Per RFC5280 "IA5String is limited to the set of ASCII characters"
- if r > unicode.MaxASCII {
- return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s)
- }
- }
-
- return nil
-}
-
-func buildCertExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte, subjectKeyId []byte) (ret []pkix.Extension, err error) {
- ret = make([]pkix.Extension, 10 /* maximum number of elements. */)
- n := 0
-
- if template.KeyUsage != 0 &&
- !oidInExtensions(oidExtensionKeyUsage, template.ExtraExtensions) {
- ret[n], err = marshalKeyUsage(template.KeyUsage)
- if err != nil {
- return nil, err
- }
- n++
- }
-
- if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) &&
- !oidInExtensions(oidExtensionExtendedKeyUsage, template.ExtraExtensions) {
- ret[n], err = marshalExtKeyUsage(template.ExtKeyUsage, template.UnknownExtKeyUsage)
- if err != nil {
- return nil, err
- }
- n++
- }
-
- if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) {
- ret[n], err = marshalBasicConstraints(template.IsCA, template.MaxPathLen, template.MaxPathLenZero)
- if err != nil {
- return nil, err
- }
- n++
- }
-
- if len(subjectKeyId) > 0 && !oidInExtensions(oidExtensionSubjectKeyId, template.ExtraExtensions) {
- ret[n].Id = oidExtensionSubjectKeyId
- ret[n].Value, err = asn1.Marshal(subjectKeyId)
- if err != nil {
- return
- }
- n++
- }
-
- if len(authorityKeyId) > 0 && !oidInExtensions(oidExtensionAuthorityKeyId, template.ExtraExtensions) {
- ret[n].Id = oidExtensionAuthorityKeyId
- ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId})
- if err != nil {
- return
- }
- n++
- }
-
- if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
- !oidInExtensions(oidExtensionAuthorityInfoAccess, template.ExtraExtensions) {
- ret[n].Id = oidExtensionAuthorityInfoAccess
- var aiaValues []authorityInfoAccess
- for _, name := range template.OCSPServer {
- aiaValues = append(aiaValues, authorityInfoAccess{
- Method: oidAuthorityInfoAccessOcsp,
- Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)},
- })
- }
- for _, name := range template.IssuingCertificateURL {
- aiaValues = append(aiaValues, authorityInfoAccess{
- Method: oidAuthorityInfoAccessIssuers,
- Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)},
- })
- }
- ret[n].Value, err = asn1.Marshal(aiaValues)
- if err != nil {
- return
- }
- n++
- }
-
- if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
- !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) {
- ret[n].Id = oidExtensionSubjectAltName
- // From RFC 5280, Section 4.2.1.6:
- // “If the subject field contains an empty sequence ... then
- // subjectAltName extension ... is marked as critical”
- ret[n].Critical = subjectIsEmpty
- ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs)
- if err != nil {
- return
- }
- n++
- }
-
- if len(template.PolicyIdentifiers) > 0 &&
- !oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) {
- ret[n], err = marshalCertificatePolicies(template.PolicyIdentifiers)
- if err != nil {
- return nil, err
- }
- n++
- }
-
- if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 ||
- len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 ||
- len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 ||
- len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) &&
- !oidInExtensions(oidExtensionNameConstraints, template.ExtraExtensions) {
- ret[n].Id = oidExtensionNameConstraints
- ret[n].Critical = template.PermittedDNSDomainsCritical
-
- ipAndMask := func(ipNet *net.IPNet) []byte {
- maskedIP := ipNet.IP.Mask(ipNet.Mask)
- ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask))
- ipAndMask = append(ipAndMask, maskedIP...)
- ipAndMask = append(ipAndMask, ipNet.Mask...)
- return ipAndMask
- }
-
- serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) {
- var b cryptobyte.Builder
-
- for _, name := range dns {
- if err = isIA5String(name); err != nil {
- return nil, err
- }
-
- b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
- b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(name))
- })
- })
- }
-
- for _, ipNet := range ips {
- b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
- b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) {
- b.AddBytes(ipAndMask(ipNet))
- })
- })
- }
-
- for _, email := range emails {
- if err = isIA5String(email); err != nil {
- return nil, err
- }
-
- b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
- b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(email))
- })
- })
- }
-
- for _, uriDomain := range uriDomains {
- if err = isIA5String(uriDomain); err != nil {
- return nil, err
- }
-
- b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
- b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(uriDomain))
- })
- })
- }
-
- return b.Bytes()
- }
-
- permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains)
- if err != nil {
- return nil, err
- }
-
- excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains)
- if err != nil {
- return nil, err
- }
-
- var b cryptobyte.Builder
- b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
- if len(permitted) > 0 {
- b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
- b.AddBytes(permitted)
- })
- }
-
- if len(excluded) > 0 {
- b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
- b.AddBytes(excluded)
- })
- }
- })
-
- ret[n].Value, err = b.Bytes()
- if err != nil {
- return nil, err
- }
- n++
- }
-
- if len(template.CRLDistributionPoints) > 0 &&
- !oidInExtensions(oidExtensionCRLDistributionPoints, template.ExtraExtensions) {
- ret[n].Id = oidExtensionCRLDistributionPoints
-
- var crlDp []distributionPoint
- for _, name := range template.CRLDistributionPoints {
- dp := distributionPoint{
- DistributionPoint: distributionPointName{
- FullName: []asn1.RawValue{
- {Tag: 6, Class: 2, Bytes: []byte(name)},
- },
- },
- }
- crlDp = append(crlDp, dp)
- }
-
- ret[n].Value, err = asn1.Marshal(crlDp)
- if err != nil {
- return
- }
- n++
- }
-
- // Adding another extension here? Remember to update the maximum number
- // of elements in the make() at the top of the function and the list of
- // template fields used in CreateCertificate documentation.
-
- return append(ret[:n], template.ExtraExtensions...), nil
-}
-
-func marshalKeyUsage(ku KeyUsage) (pkix.Extension, error) {
- ext := pkix.Extension{Id: oidExtensionKeyUsage, Critical: true}
-
- var a [2]byte
- a[0] = reverseBitsInAByte(byte(ku))
- a[1] = reverseBitsInAByte(byte(ku >> 8))
-
- l := 1
- if a[1] != 0 {
- l = 2
- }
-
- bitString := a[:l]
- var err error
- ext.Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)})
- if err != nil {
- return ext, err
- }
- return ext, nil
-}
-
-func marshalExtKeyUsage(extUsages []ExtKeyUsage, unknownUsages []asn1.ObjectIdentifier) (pkix.Extension, error) {
- ext := pkix.Extension{Id: oidExtensionExtendedKeyUsage}
-
- oids := make([]asn1.ObjectIdentifier, len(extUsages)+len(unknownUsages))
- for i, u := range extUsages {
- if oid, ok := oidFromExtKeyUsage(u); ok {
- oids[i] = oid
- } else {
- return ext, errors.New("x509: unknown extended key usage")
- }
- }
-
- copy(oids[len(extUsages):], unknownUsages)
-
- var err error
- ext.Value, err = asn1.Marshal(oids)
- if err != nil {
- return ext, err
- }
- return ext, nil
-}
-
-func marshalBasicConstraints(isCA bool, maxPathLen int, maxPathLenZero bool) (pkix.Extension, error) {
- ext := pkix.Extension{Id: oidExtensionBasicConstraints, Critical: true}
- // Leaving MaxPathLen as zero indicates that no maximum path
- // length is desired, unless MaxPathLenZero is set. A value of
- // -1 causes encoding/asn1 to omit the value as desired.
- if maxPathLen == 0 && !maxPathLenZero {
- maxPathLen = -1
- }
- var err error
- ext.Value, err = asn1.Marshal(basicConstraints{isCA, maxPathLen})
- if err != nil {
- return ext, nil
- }
- return ext, nil
-}
-
-func marshalCertificatePolicies(policyIdentifiers []asn1.ObjectIdentifier) (pkix.Extension, error) {
- ext := pkix.Extension{Id: oidExtensionCertificatePolicies}
- policies := make([]policyInformation, len(policyIdentifiers))
- for i, policy := range policyIdentifiers {
- policies[i].Policy = policy
- }
- var err error
- ext.Value, err = asn1.Marshal(policies)
- if err != nil {
- return ext, err
- }
- return ext, nil
-}
-
-func buildCSRExtensions(template *CertificateRequest) ([]pkix.Extension, error) {
- var ret []pkix.Extension
-
- if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
- !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) {
- sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs)
- if err != nil {
- return nil, err
- }
-
- ret = append(ret, pkix.Extension{
- Id: oidExtensionSubjectAltName,
- Value: sanBytes,
- })
- }
-
- return append(ret, template.ExtraExtensions...), nil
-}
-
-func subjectBytes(cert *Certificate) ([]byte, error) {
- if len(cert.RawSubject) > 0 {
- return cert.RawSubject, nil
- }
-
- return asn1.Marshal(cert.Subject.ToRDNSequence())
-}
-
-// signingParamsForPublicKey returns the parameters to use for signing with
-// priv. If requestedSigAlgo is not zero then it overrides the default
-// signature algorithm.
-func signingParamsForPublicKey(pub any, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
- var pubType PublicKeyAlgorithm
-
- switch pub := pub.(type) {
- case *rsa.PublicKey:
- pubType = RSA
- hashFunc = crypto.SHA256
- sigAlgo.Algorithm = oidSignatureSHA256WithRSA
- sigAlgo.Parameters = asn1.NullRawValue
-
- case *ecdsa.PublicKey:
- pubType = ECDSA
-
- switch pub.Curve {
- case elliptic.P224(), elliptic.P256():
- hashFunc = crypto.SHA256
- sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
- case elliptic.P384():
- hashFunc = crypto.SHA384
- sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
- case elliptic.P521():
- hashFunc = crypto.SHA512
- sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
- default:
- err = errors.New("x509: unknown elliptic curve")
- }
-
- case ed25519.PublicKey:
- pubType = Ed25519
- sigAlgo.Algorithm = oidSignatureEd25519
-
- default:
- err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported")
- }
-
- if err != nil {
- return
- }
-
- if requestedSigAlgo == 0 {
- return
- }
-
- found := false
- for _, details := range signatureAlgorithmDetails {
- if details.algo == requestedSigAlgo {
- if details.pubKeyAlgo != pubType {
- err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
- return
- }
- sigAlgo.Algorithm, hashFunc = details.oid, details.hash
- if hashFunc == 0 && pubType != Ed25519 {
- err = errors.New("x509: cannot sign with hash function requested")
- return
- }
- if requestedSigAlgo.isRSAPSS() {
- sigAlgo.Parameters = hashToPSSParameters[hashFunc]
- }
- found = true
- break
- }
- }
-
- if !found {
- err = errors.New("x509: unknown SignatureAlgorithm")
- }
-
- return
-}
-
-// emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is
-// just an empty SEQUENCE.
-var emptyASN1Subject = []byte{0x30, 0}
-
-// CreateCertificate creates a new X.509 v3 certificate based on a template.
-// The following members of template are currently used:
-//
-// - AuthorityKeyId
-// - BasicConstraintsValid
-// - CRLDistributionPoints
-// - DNSNames
-// - EmailAddresses
-// - ExcludedDNSDomains
-// - ExcludedEmailAddresses
-// - ExcludedIPRanges
-// - ExcludedURIDomains
-// - ExtKeyUsage
-// - ExtraExtensions
-// - IPAddresses
-// - IsCA
-// - IssuingCertificateURL
-// - KeyUsage
-// - MaxPathLen
-// - MaxPathLenZero
-// - NotAfter
-// - NotBefore
-// - OCSPServer
-// - PermittedDNSDomains
-// - PermittedDNSDomainsCritical
-// - PermittedEmailAddresses
-// - PermittedIPRanges
-// - PermittedURIDomains
-// - PolicyIdentifiers
-// - SerialNumber
-// - SignatureAlgorithm
-// - Subject
-// - SubjectKeyId
-// - URIs
-// - UnknownExtKeyUsage
-//
-// The certificate is signed by parent. If parent is equal to template then the
-// certificate is self-signed. The parameter pub is the public key of the
-// certificate to be generated and priv is the private key of the signer.
-//
-// The returned slice is the certificate in DER encoding.
-//
-// The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and
-// ed25519.PublicKey. pub must be a supported key type, and priv must be a
-// crypto.Signer with a supported public key.
-//
-// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any,
-// unless the resulting certificate is self-signed. Otherwise the value from
-// template will be used.
-//
-// If SubjectKeyId from template is empty and the template is a CA, SubjectKeyId
-// will be generated from the hash of the public key.
-func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv any) ([]byte, error) {
- key, ok := priv.(crypto.Signer)
- if !ok {
- return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
- }
-
- if template.SerialNumber == nil {
- return nil, errors.New("x509: no SerialNumber given")
- }
-
- if template.BasicConstraintsValid && !template.IsCA && template.MaxPathLen != -1 && (template.MaxPathLen != 0 || template.MaxPathLenZero) {
- return nil, errors.New("x509: only CAs are allowed to specify MaxPathLen")
- }
-
- hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm)
- if err != nil {
- return nil, err
- }
-
- publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub)
- if err != nil {
- return nil, err
- }
-
- asn1Issuer, err := subjectBytes(parent)
- if err != nil {
- return nil, err
- }
-
- asn1Subject, err := subjectBytes(template)
- if err != nil {
- return nil, err
- }
-
- authorityKeyId := template.AuthorityKeyId
- if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 {
- authorityKeyId = parent.SubjectKeyId
- }
-
- subjectKeyId := template.SubjectKeyId
- if len(subjectKeyId) == 0 && template.IsCA {
- // SubjectKeyId generated using method 1 in RFC 5280, Section 4.2.1.2:
- // (1) The keyIdentifier is composed of the 160-bit SHA-1 hash of the
- // value of the BIT STRING subjectPublicKey (excluding the tag,
- // length, and number of unused bits).
- h := sha1.Sum(publicKeyBytes)
- subjectKeyId = h[:]
- }
-
- // Check that the signer's public key matches the private key, if available.
- type privateKey interface {
- Equal(crypto.PublicKey) bool
- }
- if privPub, ok := key.Public().(privateKey); !ok {
- return nil, errors.New("x509: internal error: supported public key does not implement Equal")
- } else if parent.PublicKey != nil && !privPub.Equal(parent.PublicKey) {
- return nil, errors.New("x509: provided PrivateKey doesn't match parent's PublicKey")
- }
-
- extensions, err := buildCertExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId, subjectKeyId)
- if err != nil {
- return nil, err
- }
-
- encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes}
- c := tbsCertificate{
- Version: 2,
- SerialNumber: template.SerialNumber,
- SignatureAlgorithm: signatureAlgorithm,
- Issuer: asn1.RawValue{FullBytes: asn1Issuer},
- Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()},
- Subject: asn1.RawValue{FullBytes: asn1Subject},
- PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey},
- Extensions: extensions,
- }
-
- tbsCertContents, err := asn1.Marshal(c)
- if err != nil {
- return nil, err
- }
- c.Raw = tbsCertContents
-
- signed := tbsCertContents
- if hashFunc != 0 {
- h := hashFunc.New()
- h.Write(signed)
- signed = h.Sum(nil)
- }
-
- var signerOpts crypto.SignerOpts = hashFunc
- if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() {
- signerOpts = &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- Hash: hashFunc,
- }
- }
-
- var signature []byte
- signature, err = key.Sign(rand, signed, signerOpts)
- if err != nil {
- return nil, err
- }
-
- signedCert, err := asn1.Marshal(certificate{
- nil,
- c,
- signatureAlgorithm,
- asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
- })
- if err != nil {
- return nil, err
- }
-
- // Check the signature to ensure the crypto.Signer behaved correctly.
- sigAlg := getSignatureAlgorithmFromAI(signatureAlgorithm)
- switch sigAlg {
- case MD5WithRSA:
- // We skip the check if the signature algorithm is only supported for
- // signing, not verification.
- default:
- if err := checkSignature(sigAlg, c.Raw, signature, key.Public(), true); err != nil {
- return nil, fmt.Errorf("x509: signature over certificate returned by signer is invalid: %w", err)
- }
- }
-
- return signedCert, nil
-}
-
-// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
-// CRL.
-var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
-
-// pemType is the type of a PEM encoded CRL.
-var pemType = "X509 CRL"
-
-// ParseCRL parses a CRL from the given bytes. It's often the case that PEM
-// encoded CRLs will appear where they should be DER encoded, so this function
-// will transparently handle PEM encoding as long as there isn't any leading
-// garbage.
-func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) {
- if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
- block, _ := pem.Decode(crlBytes)
- if block != nil && block.Type == pemType {
- crlBytes = block.Bytes
- }
- }
- return ParseDERCRL(crlBytes)
-}
-
-// ParseDERCRL parses a DER encoded CRL from the given bytes.
-func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) {
- certList := new(pkix.CertificateList)
- if rest, err := asn1.Unmarshal(derBytes, certList); err != nil {
- return nil, err
- } else if len(rest) != 0 {
- return nil, errors.New("x509: trailing data after CRL")
- }
- return certList, nil
-}
-
-// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
-// contains the given list of revoked certificates.
-//
-// Note: this method does not generate an RFC 5280 conformant X.509 v2 CRL.
-// To generate a standards compliant CRL, use CreateRevocationList instead.
-func (c *Certificate) CreateCRL(rand io.Reader, priv any, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
- key, ok := priv.(crypto.Signer)
- if !ok {
- return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
- }
-
- hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0)
- if err != nil {
- return nil, err
- }
-
- // Force revocation times to UTC per RFC 5280.
- revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts))
- for i, rc := range revokedCerts {
- rc.RevocationTime = rc.RevocationTime.UTC()
- revokedCertsUTC[i] = rc
- }
-
- tbsCertList := pkix.TBSCertificateList{
- Version: 1,
- Signature: signatureAlgorithm,
- Issuer: c.Subject.ToRDNSequence(),
- ThisUpdate: now.UTC(),
- NextUpdate: expiry.UTC(),
- RevokedCertificates: revokedCertsUTC,
- }
-
- // Authority Key Id
- if len(c.SubjectKeyId) > 0 {
- var aki pkix.Extension
- aki.Id = oidExtensionAuthorityKeyId
- aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId})
- if err != nil {
- return
- }
- tbsCertList.Extensions = append(tbsCertList.Extensions, aki)
- }
-
- tbsCertListContents, err := asn1.Marshal(tbsCertList)
- if err != nil {
- return
- }
-
- signed := tbsCertListContents
- if hashFunc != 0 {
- h := hashFunc.New()
- h.Write(signed)
- signed = h.Sum(nil)
- }
-
- var signature []byte
- signature, err = key.Sign(rand, signed, hashFunc)
- if err != nil {
- return
- }
-
- return asn1.Marshal(pkix.CertificateList{
- TBSCertList: tbsCertList,
- SignatureAlgorithm: signatureAlgorithm,
- SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
- })
-}
-
-// CertificateRequest represents a PKCS #10, certificate signature request.
-type CertificateRequest struct {
- Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature).
- RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content.
- RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
- RawSubject []byte // DER encoded Subject.
-
- Version int
- Signature []byte
- SignatureAlgorithm SignatureAlgorithm
-
- PublicKeyAlgorithm PublicKeyAlgorithm
- PublicKey any
-
- Subject pkix.Name
-
- // Attributes contains the CSR attributes that can parse as
- // pkix.AttributeTypeAndValueSET.
- //
- // Deprecated: Use Extensions and ExtraExtensions instead for parsing and
- // generating the requestedExtensions attribute.
- Attributes []pkix.AttributeTypeAndValueSET
-
- // Extensions contains all requested extensions, in raw form. When parsing
- // CSRs, this can be used to extract extensions that are not parsed by this
- // package.
- Extensions []pkix.Extension
-
- // ExtraExtensions contains extensions to be copied, raw, into any CSR
- // marshaled by CreateCertificateRequest. Values override any extensions
- // that would otherwise be produced based on the other fields but are
- // overridden by any extensions specified in Attributes.
- //
- // The ExtraExtensions field is not populated by ParseCertificateRequest,
- // see Extensions instead.
- ExtraExtensions []pkix.Extension
-
- // Subject Alternate Name values.
- DNSNames []string
- EmailAddresses []string
- IPAddresses []net.IP
- URIs []*url.URL
-}
-
-// These structures reflect the ASN.1 structure of X.509 certificate
-// signature requests (see RFC 2986):
-
-type tbsCertificateRequest struct {
- Raw asn1.RawContent
- Version int
- Subject asn1.RawValue
- PublicKey publicKeyInfo
- RawAttributes []asn1.RawValue `asn1:"tag:0"`
-}
-
-type certificateRequest struct {
- Raw asn1.RawContent
- TBSCSR tbsCertificateRequest
- SignatureAlgorithm pkix.AlgorithmIdentifier
- SignatureValue asn1.BitString
-}
-
-// oidExtensionRequest is a PKCS #9 OBJECT IDENTIFIER that indicates requested
-// extensions in a CSR.
-var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14}
-
-// newRawAttributes converts AttributeTypeAndValueSETs from a template
-// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes.
-func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) {
- var rawAttributes []asn1.RawValue
- b, err := asn1.Marshal(attributes)
- if err != nil {
- return nil, err
- }
- rest, err := asn1.Unmarshal(b, &rawAttributes)
- if err != nil {
- return nil, err
- }
- if len(rest) != 0 {
- return nil, errors.New("x509: failed to unmarshal raw CSR Attributes")
- }
- return rawAttributes, nil
-}
-
-// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs.
-func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
- var attributes []pkix.AttributeTypeAndValueSET
- for _, rawAttr := range rawAttributes {
- var attr pkix.AttributeTypeAndValueSET
- rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr)
- // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET
- // (i.e.: challengePassword or unstructuredName).
- if err == nil && len(rest) == 0 {
- attributes = append(attributes, attr)
- }
- }
- return attributes
-}
-
-// parseCSRExtensions parses the attributes from a CSR and extracts any
-// requested extensions.
-func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) {
- // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1.
- type pkcs10Attribute struct {
- Id asn1.ObjectIdentifier
- Values []asn1.RawValue `asn1:"set"`
- }
-
- var ret []pkix.Extension
- for _, rawAttr := range rawAttributes {
- var attr pkcs10Attribute
- if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 {
- // Ignore attributes that don't parse.
- continue
- }
-
- if !attr.Id.Equal(oidExtensionRequest) {
- continue
- }
-
- var extensions []pkix.Extension
- if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil {
- return nil, err
- }
- ret = append(ret, extensions...)
- }
-
- return ret, nil
-}
-
-// CreateCertificateRequest creates a new certificate request based on a
-// template. The following members of template are used:
-//
-// - SignatureAlgorithm
-// - Subject
-// - DNSNames
-// - EmailAddresses
-// - IPAddresses
-// - URIs
-// - ExtraExtensions
-// - Attributes (deprecated)
-//
-// priv is the private key to sign the CSR with, and the corresponding public
-// key will be included in the CSR. It must implement crypto.Signer and its
-// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a
-// ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or
-// ed25519.PrivateKey satisfies this.)
-//
-// The returned slice is the certificate request in DER encoding.
-func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error) {
- key, ok := priv.(crypto.Signer)
- if !ok {
- return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
- }
-
- var hashFunc crypto.Hash
- var sigAlgo pkix.AlgorithmIdentifier
- hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm)
- if err != nil {
- return nil, err
- }
-
- var publicKeyBytes []byte
- var publicKeyAlgorithm pkix.AlgorithmIdentifier
- publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public())
- if err != nil {
- return nil, err
- }
-
- extensions, err := buildCSRExtensions(template)
- if err != nil {
- return nil, err
- }
-
- // Make a copy of template.Attributes because we may alter it below.
- attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes))
- for _, attr := range template.Attributes {
- values := make([][]pkix.AttributeTypeAndValue, len(attr.Value))
- copy(values, attr.Value)
- attributes = append(attributes, pkix.AttributeTypeAndValueSET{
- Type: attr.Type,
- Value: values,
- })
- }
-
- extensionsAppended := false
- if len(extensions) > 0 {
- // Append the extensions to an existing attribute if possible.
- for _, atvSet := range attributes {
- if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
- continue
- }
-
- // specifiedExtensions contains all the extensions that we
- // found specified via template.Attributes.
- specifiedExtensions := make(map[string]bool)
-
- for _, atvs := range atvSet.Value {
- for _, atv := range atvs {
- specifiedExtensions[atv.Type.String()] = true
- }
- }
-
- newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions))
- newValue = append(newValue, atvSet.Value[0]...)
-
- for _, e := range extensions {
- if specifiedExtensions[e.Id.String()] {
- // Attributes already contained a value for
- // this extension and it takes priority.
- continue
- }
-
- newValue = append(newValue, pkix.AttributeTypeAndValue{
- // There is no place for the critical
- // flag in an AttributeTypeAndValue.
- Type: e.Id,
- Value: e.Value,
- })
- }
-
- atvSet.Value[0] = newValue
- extensionsAppended = true
- break
- }
- }
-
- rawAttributes, err := newRawAttributes(attributes)
- if err != nil {
- return
- }
-
- // If not included in attributes, add a new attribute for the
- // extensions.
- if len(extensions) > 0 && !extensionsAppended {
- attr := struct {
- Type asn1.ObjectIdentifier
- Value [][]pkix.Extension `asn1:"set"`
- }{
- Type: oidExtensionRequest,
- Value: [][]pkix.Extension{extensions},
- }
-
- b, err := asn1.Marshal(attr)
- if err != nil {
- return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error())
- }
-
- var rawValue asn1.RawValue
- if _, err := asn1.Unmarshal(b, &rawValue); err != nil {
- return nil, err
- }
-
- rawAttributes = append(rawAttributes, rawValue)
- }
-
- asn1Subject := template.RawSubject
- if len(asn1Subject) == 0 {
- asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
- if err != nil {
- return nil, err
- }
- }
-
- tbsCSR := tbsCertificateRequest{
- Version: 0, // PKCS #10, RFC 2986
- Subject: asn1.RawValue{FullBytes: asn1Subject},
- PublicKey: publicKeyInfo{
- Algorithm: publicKeyAlgorithm,
- PublicKey: asn1.BitString{
- Bytes: publicKeyBytes,
- BitLength: len(publicKeyBytes) * 8,
- },
- },
- RawAttributes: rawAttributes,
- }
-
- tbsCSRContents, err := asn1.Marshal(tbsCSR)
- if err != nil {
- return
- }
- tbsCSR.Raw = tbsCSRContents
-
- signed := tbsCSRContents
- if hashFunc != 0 {
- h := hashFunc.New()
- h.Write(signed)
- signed = h.Sum(nil)
- }
-
- var signature []byte
- signature, err = key.Sign(rand, signed, hashFunc)
- if err != nil {
- return
- }
-
- return asn1.Marshal(certificateRequest{
- TBSCSR: tbsCSR,
- SignatureAlgorithm: sigAlgo,
- SignatureValue: asn1.BitString{
- Bytes: signature,
- BitLength: len(signature) * 8,
- },
- })
-}
-
-// ParseCertificateRequest parses a single certificate request from the
-// given ASN.1 DER data.
-func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
- var csr certificateRequest
-
- rest, err := asn1.Unmarshal(asn1Data, &csr)
- if err != nil {
- return nil, err
- } else if len(rest) != 0 {
- return nil, asn1.SyntaxError{Msg: "trailing data"}
- }
-
- return parseCertificateRequest(&csr)
-}
-
-func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
- out := &CertificateRequest{
- Raw: in.Raw,
- RawTBSCertificateRequest: in.TBSCSR.Raw,
- RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
- RawSubject: in.TBSCSR.Subject.FullBytes,
-
- Signature: in.SignatureValue.RightAlign(),
- SignatureAlgorithm: getSignatureAlgorithmFromAI(in.SignatureAlgorithm),
-
- PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm),
-
- Version: in.TBSCSR.Version,
- Attributes: parseRawAttributes(in.TBSCSR.RawAttributes),
- }
-
- var err error
- out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey)
- if err != nil {
- return nil, err
- }
-
- var subject pkix.RDNSequence
- if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
- return nil, err
- } else if len(rest) != 0 {
- return nil, errors.New("x509: trailing data after X.509 Subject")
- }
-
- out.Subject.FillFromRDNSequence(&subject)
-
- if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil {
- return nil, err
- }
-
- for _, extension := range out.Extensions {
- switch {
- case extension.Id.Equal(oidExtensionSubjectAltName):
- out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value)
- if err != nil {
- return nil, err
- }
- }
- }
-
- return out, nil
-}
-
-// CheckSignature reports whether the signature on c is valid.
-func (c *CertificateRequest) CheckSignature() error {
- return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey, true)
-}
-
-// RevocationList contains the fields used to create an X.509 v2 Certificate
-// Revocation list with CreateRevocationList.
-type RevocationList struct {
- // SignatureAlgorithm is used to determine the signature algorithm to be
- // used when signing the CRL. If 0 the default algorithm for the signing
- // key will be used.
- SignatureAlgorithm SignatureAlgorithm
-
- // RevokedCertificates is used to populate the revokedCertificates
- // sequence in the CRL, it may be empty. RevokedCertificates may be nil,
- // in which case an empty CRL will be created.
- RevokedCertificates []pkix.RevokedCertificate
-
- // Number is used to populate the X.509 v2 cRLNumber extension in the CRL,
- // which should be a monotonically increasing sequence number for a given
- // CRL scope and CRL issuer.
- Number *big.Int
- // ThisUpdate is used to populate the thisUpdate field in the CRL, which
- // indicates the issuance date of the CRL.
- ThisUpdate time.Time
- // NextUpdate is used to populate the nextUpdate field in the CRL, which
- // indicates the date by which the next CRL will be issued. NextUpdate
- // must be greater than ThisUpdate.
- NextUpdate time.Time
- // ExtraExtensions contains any additional extensions to add directly to
- // the CRL.
- ExtraExtensions []pkix.Extension
-}
-
-// CreateRevocationList creates a new X.509 v2 Certificate Revocation List,
-// according to RFC 5280, based on template.
-//
-// The CRL is signed by priv which should be the private key associated with
-// the public key in the issuer certificate.
-//
-// The issuer may not be nil, and the crlSign bit must be set in KeyUsage in
-// order to use it as a CRL issuer.
-//
-// The issuer distinguished name CRL field and authority key identifier
-// extension are populated using the issuer certificate. issuer must have
-// SubjectKeyId set.
-func CreateRevocationList(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error) {
- if template == nil {
- return nil, errors.New("x509: template can not be nil")
- }
- if issuer == nil {
- return nil, errors.New("x509: issuer can not be nil")
- }
- if (issuer.KeyUsage & KeyUsageCRLSign) == 0 {
- return nil, errors.New("x509: issuer must have the crlSign key usage bit set")
- }
- if len(issuer.SubjectKeyId) == 0 {
- return nil, errors.New("x509: issuer certificate doesn't contain a subject key identifier")
- }
- if template.NextUpdate.Before(template.ThisUpdate) {
- return nil, errors.New("x509: template.ThisUpdate is after template.NextUpdate")
- }
- if template.Number == nil {
- return nil, errors.New("x509: template contains nil Number field")
- }
-
- hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
- if err != nil {
- return nil, err
- }
-
- // Force revocation times to UTC per RFC 5280.
- revokedCertsUTC := make([]pkix.RevokedCertificate, len(template.RevokedCertificates))
- for i, rc := range template.RevokedCertificates {
- rc.RevocationTime = rc.RevocationTime.UTC()
- revokedCertsUTC[i] = rc
- }
-
- aki, err := asn1.Marshal(authKeyId{Id: issuer.SubjectKeyId})
- if err != nil {
- return nil, err
- }
- crlNum, err := asn1.Marshal(template.Number)
- if err != nil {
- return nil, err
- }
-
- tbsCertList := pkix.TBSCertificateList{
- Version: 1, // v2
- Signature: signatureAlgorithm,
- Issuer: issuer.Subject.ToRDNSequence(),
- ThisUpdate: template.ThisUpdate.UTC(),
- NextUpdate: template.NextUpdate.UTC(),
- Extensions: []pkix.Extension{
- {
- Id: oidExtensionAuthorityKeyId,
- Value: aki,
- },
- {
- Id: oidExtensionCRLNumber,
- Value: crlNum,
- },
- },
- }
- if len(revokedCertsUTC) > 0 {
- tbsCertList.RevokedCertificates = revokedCertsUTC
- }
-
- if len(template.ExtraExtensions) > 0 {
- tbsCertList.Extensions = append(tbsCertList.Extensions, template.ExtraExtensions...)
- }
-
- tbsCertListContents, err := asn1.Marshal(tbsCertList)
- if err != nil {
- return nil, err
- }
-
- input := tbsCertListContents
- if hashFunc != 0 {
- h := hashFunc.New()
- h.Write(tbsCertListContents)
- input = h.Sum(nil)
- }
- var signerOpts crypto.SignerOpts = hashFunc
- if template.SignatureAlgorithm.isRSAPSS() {
- signerOpts = &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- Hash: hashFunc,
- }
- }
-
- signature, err := priv.Sign(rand, input, signerOpts)
- if err != nil {
- return nil, err
- }
-
- return asn1.Marshal(pkix.CertificateList{
- TBSCertList: tbsCertList,
- SignatureAlgorithm: signatureAlgorithm,
- SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
- })
-}
diff --git a/contrib/go/_std_1.18/src/embed/embed.go b/contrib/go/_std_1.18/src/embed/embed.go
deleted file mode 100644
index 9737ccdf6b..0000000000
--- a/contrib/go/_std_1.18/src/embed/embed.go
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package embed provides access to files embedded in the running Go program.
-//
-// Go source files that import "embed" can use the //go:embed directive
-// to initialize a variable of type string, []byte, or FS with the contents of
-// files read from the package directory or subdirectories at compile time.
-//
-// For example, here are three ways to embed a file named hello.txt
-// and then print its contents at run time.
-//
-// Embedding one file into a string:
-//
-// import _ "embed"
-//
-// //go:embed hello.txt
-// var s string
-// print(s)
-//
-// Embedding one file into a slice of bytes:
-//
-// import _ "embed"
-//
-// //go:embed hello.txt
-// var b []byte
-// print(string(b))
-//
-// Embedded one or more files into a file system:
-//
-// import "embed"
-//
-// //go:embed hello.txt
-// var f embed.FS
-// data, _ := f.ReadFile("hello.txt")
-// print(string(data))
-//
-// Directives
-//
-// A //go:embed directive above a variable declaration specifies which files to embed,
-// using one or more path.Match patterns.
-//
-// The directive must immediately precede a line containing the declaration of a single variable.
-// Only blank lines and ‘//’ line comments are permitted between the directive and the declaration.
-//
-// The type of the variable must be a string type, or a slice of a byte type,
-// or FS (or an alias of FS).
-//
-// For example:
-//
-// package server
-//
-// import "embed"
-//
-// // content holds our static web server content.
-// //go:embed image/* template/*
-// //go:embed html/index.html
-// var content embed.FS
-//
-// The Go build system will recognize the directives and arrange for the declared variable
-// (in the example above, content) to be populated with the matching files from the file system.
-//
-// The //go:embed directive accepts multiple space-separated patterns for
-// brevity, but it can also be repeated, to avoid very long lines when there are
-// many patterns. The patterns are interpreted relative to the package directory
-// containing the source file. The path separator is a forward slash, even on
-// Windows systems. Patterns may not contain ‘.’ or ‘..’ or empty path elements,
-// nor may they begin or end with a slash. To match everything in the current
-// directory, use ‘*’ instead of ‘.’. To allow for naming files with spaces in
-// their names, patterns can be written as Go double-quoted or back-quoted
-// string literals.
-//
-// If a pattern names a directory, all files in the subtree rooted at that directory are
-// embedded (recursively), except that files with names beginning with ‘.’ or ‘_’
-// are excluded. So the variable in the above example is almost equivalent to:
-//
-// // content is our static web server content.
-// //go:embed image template html/index.html
-// var content embed.FS
-//
-// The difference is that ‘image/*’ embeds ‘image/.tempfile’ while ‘image’ does not.
-// Neither embeds ‘image/dir/.tempfile’.
-//
-// If a pattern begins with the prefix ‘all:’, then the rule for walking directories is changed
-// to include those files beginning with ‘.’ or ‘_’. For example, ‘all:image’ embeds
-// both ‘image/.tempfile’ and ‘image/dir/.tempfile’.
-//
-// The //go:embed directive can be used with both exported and unexported variables,
-// depending on whether the package wants to make the data available to other packages.
-// It can only be used with variables at package scope, not with local variables.
-//
-// Patterns must not match files outside the package's module, such as ‘.git/*’ or symbolic links.
-// Matches for empty directories are ignored. After that, each pattern in a //go:embed line
-// must match at least one file or non-empty directory.
-//
-// If any patterns are invalid or have invalid matches, the build will fail.
-//
-// Strings and Bytes
-//
-// The //go:embed line for a variable of type string or []byte can have only a single pattern,
-// and that pattern can match only a single file. The string or []byte is initialized with
-// the contents of that file.
-//
-// The //go:embed directive requires importing "embed", even when using a string or []byte.
-// In source files that don't refer to embed.FS, use a blank import (import _ "embed").
-//
-// File Systems
-//
-// For embedding a single file, a variable of type string or []byte is often best.
-// The FS type enables embedding a tree of files, such as a directory of static
-// web server content, as in the example above.
-//
-// FS implements the io/fs package's FS interface, so it can be used with any package that
-// understands file systems, including net/http, text/template, and html/template.
-//
-// For example, given the content variable in the example above, we can write:
-//
-// http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.FS(content))))
-//
-// template.ParseFS(content, "*.tmpl")
-//
-// Tools
-//
-// To support tools that analyze Go packages, the patterns found in //go:embed lines
-// are available in “go list” output. See the EmbedPatterns, TestEmbedPatterns,
-// and XTestEmbedPatterns fields in the “go help list” output.
-//
-package embed
-
-import (
- "errors"
- "io"
- "io/fs"
- "time"
-)
-
-// An FS is a read-only collection of files, usually initialized with a //go:embed directive.
-// When declared without a //go:embed directive, an FS is an empty file system.
-//
-// An FS is a read-only value, so it is safe to use from multiple goroutines
-// simultaneously and also safe to assign values of type FS to each other.
-//
-// FS implements fs.FS, so it can be used with any package that understands
-// file system interfaces, including net/http, text/template, and html/template.
-//
-// See the package documentation for more details about initializing an FS.
-type FS struct {
- // The compiler knows the layout of this struct.
- // See cmd/compile/internal/staticdata's WriteEmbed.
- //
- // The files list is sorted by name but not by simple string comparison.
- // Instead, each file's name takes the form "dir/elem" or "dir/elem/".
- // The optional trailing slash indicates that the file is itself a directory.
- // The files list is sorted first by dir (if dir is missing, it is taken to be ".")
- // and then by base, so this list of files:
- //
- // p
- // q/
- // q/r
- // q/s/
- // q/s/t
- // q/s/u
- // q/v
- // w
- //
- // is actually sorted as:
- //
- // p # dir=. elem=p
- // q/ # dir=. elem=q
- // w/ # dir=. elem=w
- // q/r # dir=q elem=r
- // q/s/ # dir=q elem=s
- // q/v # dir=q elem=v
- // q/s/t # dir=q/s elem=t
- // q/s/u # dir=q/s elem=u
- //
- // This order brings directory contents together in contiguous sections
- // of the list, allowing a directory read to use binary search to find
- // the relevant sequence of entries.
- files *[]file
-}
-
-// split splits the name into dir and elem as described in the
-// comment in the FS struct above. isDir reports whether the
-// final trailing slash was present, indicating that name is a directory.
-func split(name string) (dir, elem string, isDir bool) {
- if name[len(name)-1] == '/' {
- isDir = true
- name = name[:len(name)-1]
- }
- i := len(name) - 1
- for i >= 0 && name[i] != '/' {
- i--
- }
- if i < 0 {
- return ".", name, isDir
- }
- return name[:i], name[i+1:], isDir
-}
-
-// trimSlash trims a trailing slash from name, if present,
-// returning the possibly shortened name.
-func trimSlash(name string) string {
- if len(name) > 0 && name[len(name)-1] == '/' {
- return name[:len(name)-1]
- }
- return name
-}
-
-var (
- _ fs.ReadDirFS = FS{}
- _ fs.ReadFileFS = FS{}
-)
-
-// A file is a single file in the FS.
-// It implements fs.FileInfo and fs.DirEntry.
-type file struct {
- // The compiler knows the layout of this struct.
- // See cmd/compile/internal/staticdata's WriteEmbed.
- name string
- data string
- hash [16]byte // truncated SHA256 hash
-}
-
-var (
- _ fs.FileInfo = (*file)(nil)
- _ fs.DirEntry = (*file)(nil)
-)
-
-func (f *file) Name() string { _, elem, _ := split(f.name); return elem }
-func (f *file) Size() int64 { return int64(len(f.data)) }
-func (f *file) ModTime() time.Time { return time.Time{} }
-func (f *file) IsDir() bool { _, _, isDir := split(f.name); return isDir }
-func (f *file) Sys() any { return nil }
-func (f *file) Type() fs.FileMode { return f.Mode().Type() }
-func (f *file) Info() (fs.FileInfo, error) { return f, nil }
-
-func (f *file) Mode() fs.FileMode {
- if f.IsDir() {
- return fs.ModeDir | 0555
- }
- return 0444
-}
-
-// dotFile is a file for the root directory,
-// which is omitted from the files list in a FS.
-var dotFile = &file{name: "./"}
-
-// lookup returns the named file, or nil if it is not present.
-func (f FS) lookup(name string) *file {
- if !fs.ValidPath(name) {
- // The compiler should never emit a file with an invalid name,
- // so this check is not strictly necessary (if name is invalid,
- // we shouldn't find a match below), but it's a good backstop anyway.
- return nil
- }
- if name == "." {
- return dotFile
- }
- if f.files == nil {
- return nil
- }
-
- // Binary search to find where name would be in the list,
- // and then check if name is at that position.
- dir, elem, _ := split(name)
- files := *f.files
- i := sortSearch(len(files), func(i int) bool {
- idir, ielem, _ := split(files[i].name)
- return idir > dir || idir == dir && ielem >= elem
- })
- if i < len(files) && trimSlash(files[i].name) == name {
- return &files[i]
- }
- return nil
-}
-
-// readDir returns the list of files corresponding to the directory dir.
-func (f FS) readDir(dir string) []file {
- if f.files == nil {
- return nil
- }
- // Binary search to find where dir starts and ends in the list
- // and then return that slice of the list.
- files := *f.files
- i := sortSearch(len(files), func(i int) bool {
- idir, _, _ := split(files[i].name)
- return idir >= dir
- })
- j := sortSearch(len(files), func(j int) bool {
- jdir, _, _ := split(files[j].name)
- return jdir > dir
- })
- return files[i:j]
-}
-
-// Open opens the named file for reading and returns it as an fs.File.
-//
-// The returned file implements io.Seeker when the file is not a directory.
-func (f FS) Open(name string) (fs.File, error) {
- file := f.lookup(name)
- if file == nil {
- return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
- }
- if file.IsDir() {
- return &openDir{file, f.readDir(name), 0}, nil
- }
- return &openFile{file, 0}, nil
-}
-
-// ReadDir reads and returns the entire named directory.
-func (f FS) ReadDir(name string) ([]fs.DirEntry, error) {
- file, err := f.Open(name)
- if err != nil {
- return nil, err
- }
- dir, ok := file.(*openDir)
- if !ok {
- return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("not a directory")}
- }
- list := make([]fs.DirEntry, len(dir.files))
- for i := range list {
- list[i] = &dir.files[i]
- }
- return list, nil
-}
-
-// ReadFile reads and returns the content of the named file.
-func (f FS) ReadFile(name string) ([]byte, error) {
- file, err := f.Open(name)
- if err != nil {
- return nil, err
- }
- ofile, ok := file.(*openFile)
- if !ok {
- return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("is a directory")}
- }
- return []byte(ofile.f.data), nil
-}
-
-// An openFile is a regular file open for reading.
-type openFile struct {
- f *file // the file itself
- offset int64 // current read offset
-}
-
-var (
- _ io.Seeker = (*openFile)(nil)
-)
-
-func (f *openFile) Close() error { return nil }
-func (f *openFile) Stat() (fs.FileInfo, error) { return f.f, nil }
-
-func (f *openFile) Read(b []byte) (int, error) {
- if f.offset >= int64(len(f.f.data)) {
- return 0, io.EOF
- }
- if f.offset < 0 {
- return 0, &fs.PathError{Op: "read", Path: f.f.name, Err: fs.ErrInvalid}
- }
- n := copy(b, f.f.data[f.offset:])
- f.offset += int64(n)
- return n, nil
-}
-
-func (f *openFile) Seek(offset int64, whence int) (int64, error) {
- switch whence {
- case 0:
- // offset += 0
- case 1:
- offset += f.offset
- case 2:
- offset += int64(len(f.f.data))
- }
- if offset < 0 || offset > int64(len(f.f.data)) {
- return 0, &fs.PathError{Op: "seek", Path: f.f.name, Err: fs.ErrInvalid}
- }
- f.offset = offset
- return offset, nil
-}
-
-// An openDir is a directory open for reading.
-type openDir struct {
- f *file // the directory file itself
- files []file // the directory contents
- offset int // the read offset, an index into the files slice
-}
-
-func (d *openDir) Close() error { return nil }
-func (d *openDir) Stat() (fs.FileInfo, error) { return d.f, nil }
-
-func (d *openDir) Read([]byte) (int, error) {
- return 0, &fs.PathError{Op: "read", Path: d.f.name, Err: errors.New("is a directory")}
-}
-
-func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
- n := len(d.files) - d.offset
- if n == 0 {
- if count <= 0 {
- return nil, nil
- }
- return nil, io.EOF
- }
- if count > 0 && n > count {
- n = count
- }
- list := make([]fs.DirEntry, n)
- for i := range list {
- list[i] = &d.files[d.offset+i]
- }
- d.offset += n
- return list, nil
-}
-
-// sortSearch is like sort.Search, avoiding an import.
-func sortSearch(n int, f func(int) bool) int {
- // Define f(-1) == false and f(n) == true.
- // Invariant: f(i-1) == false, f(j) == true.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if !f(h) {
- i = h + 1 // preserves f(i-1) == false
- } else {
- j = h // preserves f(j) == true
- }
- }
- // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
- return i
-}
diff --git a/contrib/go/_std_1.18/src/encoding/asn1/asn1.go b/contrib/go/_std_1.18/src/encoding/asn1/asn1.go
deleted file mode 100644
index cad1d7b08f..0000000000
--- a/contrib/go/_std_1.18/src/encoding/asn1/asn1.go
+++ /dev/null
@@ -1,1122 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
-// as defined in ITU-T Rec X.690.
-//
-// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
-// http://luca.ntop.org/Teaching/Appunti/asn1.html.
-package asn1
-
-// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
-// are different encoding formats for those objects. Here, we'll be dealing
-// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
-// it's fast to parse and, unlike BER, has a unique encoding for every object.
-// When calculating hashes over objects, it's important that the resulting
-// bytes be the same at both ends and DER removes this margin of error.
-//
-// ASN.1 is very complex and this package doesn't attempt to implement
-// everything by any means.
-
-import (
- "errors"
- "fmt"
- "math"
- "math/big"
- "reflect"
- "strconv"
- "time"
- "unicode/utf16"
- "unicode/utf8"
-)
-
-// A StructuralError suggests that the ASN.1 data is valid, but the Go type
-// which is receiving it doesn't match.
-type StructuralError struct {
- Msg string
-}
-
-func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
-
-// A SyntaxError suggests that the ASN.1 data is invalid.
-type SyntaxError struct {
- Msg string
-}
-
-func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
-
-// We start by dealing with each of the primitive types in turn.
-
-// BOOLEAN
-
-func parseBool(bytes []byte) (ret bool, err error) {
- if len(bytes) != 1 {
- err = SyntaxError{"invalid boolean"}
- return
- }
-
- // DER demands that "If the encoding represents the boolean value TRUE,
- // its single contents octet shall have all eight bits set to one."
- // Thus only 0 and 255 are valid encoded values.
- switch bytes[0] {
- case 0:
- ret = false
- case 0xff:
- ret = true
- default:
- err = SyntaxError{"invalid boolean"}
- }
-
- return
-}
-
-// INTEGER
-
-// checkInteger returns nil if the given bytes are a valid DER-encoded
-// INTEGER and an error otherwise.
-func checkInteger(bytes []byte) error {
- if len(bytes) == 0 {
- return StructuralError{"empty integer"}
- }
- if len(bytes) == 1 {
- return nil
- }
- if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
- return StructuralError{"integer not minimally-encoded"}
- }
- return nil
-}
-
-// parseInt64 treats the given bytes as a big-endian, signed integer and
-// returns the result.
-func parseInt64(bytes []byte) (ret int64, err error) {
- err = checkInteger(bytes)
- if err != nil {
- return
- }
- if len(bytes) > 8 {
- // We'll overflow an int64 in this case.
- err = StructuralError{"integer too large"}
- return
- }
- for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
- ret <<= 8
- ret |= int64(bytes[bytesRead])
- }
-
- // Shift up and down in order to sign extend the result.
- ret <<= 64 - uint8(len(bytes))*8
- ret >>= 64 - uint8(len(bytes))*8
- return
-}
-
-// parseInt treats the given bytes as a big-endian, signed integer and returns
-// the result.
-func parseInt32(bytes []byte) (int32, error) {
- if err := checkInteger(bytes); err != nil {
- return 0, err
- }
- ret64, err := parseInt64(bytes)
- if err != nil {
- return 0, err
- }
- if ret64 != int64(int32(ret64)) {
- return 0, StructuralError{"integer too large"}
- }
- return int32(ret64), nil
-}
-
-var bigOne = big.NewInt(1)
-
-// parseBigInt treats the given bytes as a big-endian, signed integer and returns
-// the result.
-func parseBigInt(bytes []byte) (*big.Int, error) {
- if err := checkInteger(bytes); err != nil {
- return nil, err
- }
- ret := new(big.Int)
- if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
- // This is a negative number.
- notBytes := make([]byte, len(bytes))
- for i := range notBytes {
- notBytes[i] = ^bytes[i]
- }
- ret.SetBytes(notBytes)
- ret.Add(ret, bigOne)
- ret.Neg(ret)
- return ret, nil
- }
- ret.SetBytes(bytes)
- return ret, nil
-}
-
-// BIT STRING
-
-// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
-// bit string is padded up to the nearest byte in memory and the number of
-// valid bits is recorded. Padding bits will be zero.
-type BitString struct {
- Bytes []byte // bits packed into bytes.
- BitLength int // length in bits.
-}
-
-// At returns the bit at the given index. If the index is out of range it
-// returns false.
-func (b BitString) At(i int) int {
- if i < 0 || i >= b.BitLength {
- return 0
- }
- x := i / 8
- y := 7 - uint(i%8)
- return int(b.Bytes[x]>>y) & 1
-}
-
-// RightAlign returns a slice where the padding bits are at the beginning. The
-// slice may share memory with the BitString.
-func (b BitString) RightAlign() []byte {
- shift := uint(8 - (b.BitLength % 8))
- if shift == 8 || len(b.Bytes) == 0 {
- return b.Bytes
- }
-
- a := make([]byte, len(b.Bytes))
- a[0] = b.Bytes[0] >> shift
- for i := 1; i < len(b.Bytes); i++ {
- a[i] = b.Bytes[i-1] << (8 - shift)
- a[i] |= b.Bytes[i] >> shift
- }
-
- return a
-}
-
-// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
-func parseBitString(bytes []byte) (ret BitString, err error) {
- if len(bytes) == 0 {
- err = SyntaxError{"zero length BIT STRING"}
- return
- }
- paddingBits := int(bytes[0])
- if paddingBits > 7 ||
- len(bytes) == 1 && paddingBits > 0 ||
- bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
- err = SyntaxError{"invalid padding bits in BIT STRING"}
- return
- }
- ret.BitLength = (len(bytes)-1)*8 - paddingBits
- ret.Bytes = bytes[1:]
- return
-}
-
-// NULL
-
-// NullRawValue is a RawValue with its Tag set to the ASN.1 NULL type tag (5).
-var NullRawValue = RawValue{Tag: TagNull}
-
-// NullBytes contains bytes representing the DER-encoded ASN.1 NULL type.
-var NullBytes = []byte{TagNull, 0}
-
-// OBJECT IDENTIFIER
-
-// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
-type ObjectIdentifier []int
-
-// Equal reports whether oi and other represent the same identifier.
-func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
- if len(oi) != len(other) {
- return false
- }
- for i := 0; i < len(oi); i++ {
- if oi[i] != other[i] {
- return false
- }
- }
-
- return true
-}
-
-func (oi ObjectIdentifier) String() string {
- var s string
-
- for i, v := range oi {
- if i > 0 {
- s += "."
- }
- s += strconv.Itoa(v)
- }
-
- return s
-}
-
-// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
-// returns it. An object identifier is a sequence of variable length integers
-// that are assigned in a hierarchy.
-func parseObjectIdentifier(bytes []byte) (s ObjectIdentifier, err error) {
- if len(bytes) == 0 {
- err = SyntaxError{"zero length OBJECT IDENTIFIER"}
- return
- }
-
- // In the worst case, we get two elements from the first byte (which is
- // encoded differently) and then every varint is a single byte long.
- s = make([]int, len(bytes)+1)
-
- // The first varint is 40*value1 + value2:
- // According to this packing, value1 can take the values 0, 1 and 2 only.
- // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
- // then there are no restrictions on value2.
- v, offset, err := parseBase128Int(bytes, 0)
- if err != nil {
- return
- }
- if v < 80 {
- s[0] = v / 40
- s[1] = v % 40
- } else {
- s[0] = 2
- s[1] = v - 80
- }
-
- i := 2
- for ; offset < len(bytes); i++ {
- v, offset, err = parseBase128Int(bytes, offset)
- if err != nil {
- return
- }
- s[i] = v
- }
- s = s[0:i]
- return
-}
-
-// ENUMERATED
-
-// An Enumerated is represented as a plain int.
-type Enumerated int
-
-// FLAG
-
-// A Flag accepts any data and is set to true if present.
-type Flag bool
-
-// parseBase128Int parses a base-128 encoded int from the given offset in the
-// given byte slice. It returns the value and the new offset.
-func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
- offset = initOffset
- var ret64 int64
- for shifted := 0; offset < len(bytes); shifted++ {
- // 5 * 7 bits per byte == 35 bits of data
- // Thus the representation is either non-minimal or too large for an int32
- if shifted == 5 {
- err = StructuralError{"base 128 integer too large"}
- return
- }
- ret64 <<= 7
- b := bytes[offset]
- // integers should be minimally encoded, so the leading octet should
- // never be 0x80
- if shifted == 0 && b == 0x80 {
- err = SyntaxError{"integer is not minimally encoded"}
- return
- }
- ret64 |= int64(b & 0x7f)
- offset++
- if b&0x80 == 0 {
- ret = int(ret64)
- // Ensure that the returned value fits in an int on all platforms
- if ret64 > math.MaxInt32 {
- err = StructuralError{"base 128 integer too large"}
- }
- return
- }
- }
- err = SyntaxError{"truncated base 128 integer"}
- return
-}
-
-// UTCTime
-
-func parseUTCTime(bytes []byte) (ret time.Time, err error) {
- s := string(bytes)
-
- formatStr := "0601021504Z0700"
- ret, err = time.Parse(formatStr, s)
- if err != nil {
- formatStr = "060102150405Z0700"
- ret, err = time.Parse(formatStr, s)
- }
- if err != nil {
- return
- }
-
- if serialized := ret.Format(formatStr); serialized != s {
- err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
- return
- }
-
- if ret.Year() >= 2050 {
- // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
- ret = ret.AddDate(-100, 0, 0)
- }
-
- return
-}
-
-// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
-// and returns the resulting time.
-func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
- const formatStr = "20060102150405Z0700"
- s := string(bytes)
-
- if ret, err = time.Parse(formatStr, s); err != nil {
- return
- }
-
- if serialized := ret.Format(formatStr); serialized != s {
- err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
- }
-
- return
-}
-
-// NumericString
-
-// parseNumericString parses an ASN.1 NumericString from the given byte array
-// and returns it.
-func parseNumericString(bytes []byte) (ret string, err error) {
- for _, b := range bytes {
- if !isNumeric(b) {
- return "", SyntaxError{"NumericString contains invalid character"}
- }
- }
- return string(bytes), nil
-}
-
-// isNumeric reports whether the given b is in the ASN.1 NumericString set.
-func isNumeric(b byte) bool {
- return '0' <= b && b <= '9' ||
- b == ' '
-}
-
-// PrintableString
-
-// parsePrintableString parses an ASN.1 PrintableString from the given byte
-// array and returns it.
-func parsePrintableString(bytes []byte) (ret string, err error) {
- for _, b := range bytes {
- if !isPrintable(b, allowAsterisk, allowAmpersand) {
- err = SyntaxError{"PrintableString contains invalid character"}
- return
- }
- }
- ret = string(bytes)
- return
-}
-
-type asteriskFlag bool
-type ampersandFlag bool
-
-const (
- allowAsterisk asteriskFlag = true
- rejectAsterisk asteriskFlag = false
-
- allowAmpersand ampersandFlag = true
- rejectAmpersand ampersandFlag = false
-)
-
-// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
-// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing
-// practice. If ampersand is allowAmpersand then '&' is allowed as well.
-func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool {
- return 'a' <= b && b <= 'z' ||
- 'A' <= b && b <= 'Z' ||
- '0' <= b && b <= '9' ||
- '\'' <= b && b <= ')' ||
- '+' <= b && b <= '/' ||
- b == ' ' ||
- b == ':' ||
- b == '=' ||
- b == '?' ||
- // This is technically not allowed in a PrintableString.
- // However, x509 certificates with wildcard strings don't
- // always use the correct string type so we permit it.
- (bool(asterisk) && b == '*') ||
- // This is not technically allowed either. However, not
- // only is it relatively common, but there are also a
- // handful of CA certificates that contain it. At least
- // one of which will not expire until 2027.
- (bool(ampersand) && b == '&')
-}
-
-// IA5String
-
-// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given
-// byte slice and returns it.
-func parseIA5String(bytes []byte) (ret string, err error) {
- for _, b := range bytes {
- if b >= utf8.RuneSelf {
- err = SyntaxError{"IA5String contains invalid character"}
- return
- }
- }
- ret = string(bytes)
- return
-}
-
-// T61String
-
-// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given
-// byte slice and returns it.
-func parseT61String(bytes []byte) (ret string, err error) {
- return string(bytes), nil
-}
-
-// UTF8String
-
-// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte
-// array and returns it.
-func parseUTF8String(bytes []byte) (ret string, err error) {
- if !utf8.Valid(bytes) {
- return "", errors.New("asn1: invalid UTF-8 string")
- }
- return string(bytes), nil
-}
-
-// BMPString
-
-// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of
-// ISO/IEC/ITU 10646-1) from the given byte slice and returns it.
-func parseBMPString(bmpString []byte) (string, error) {
- if len(bmpString)%2 != 0 {
- return "", errors.New("pkcs12: odd-length BMP string")
- }
-
- // Strip terminator if present.
- if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
- bmpString = bmpString[:l-2]
- }
-
- s := make([]uint16, 0, len(bmpString)/2)
- for len(bmpString) > 0 {
- s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
- bmpString = bmpString[2:]
- }
-
- return string(utf16.Decode(s)), nil
-}
-
-// A RawValue represents an undecoded ASN.1 object.
-type RawValue struct {
- Class, Tag int
- IsCompound bool
- Bytes []byte
- FullBytes []byte // includes the tag and length
-}
-
-// RawContent is used to signal that the undecoded, DER data needs to be
-// preserved for a struct. To use it, the first field of the struct must have
-// this type. It's an error for any of the other fields to have this type.
-type RawContent []byte
-
-// Tagging
-
-// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
-// into a byte slice. It returns the parsed data and the new offset. SET and
-// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
-// don't distinguish between ordered and unordered objects in this code.
-func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
- offset = initOffset
- // parseTagAndLength should not be called without at least a single
- // byte to read. Thus this check is for robustness:
- if offset >= len(bytes) {
- err = errors.New("asn1: internal error in parseTagAndLength")
- return
- }
- b := bytes[offset]
- offset++
- ret.class = int(b >> 6)
- ret.isCompound = b&0x20 == 0x20
- ret.tag = int(b & 0x1f)
-
- // If the bottom five bits are set, then the tag number is actually base 128
- // encoded afterwards
- if ret.tag == 0x1f {
- ret.tag, offset, err = parseBase128Int(bytes, offset)
- if err != nil {
- return
- }
- // Tags should be encoded in minimal form.
- if ret.tag < 0x1f {
- err = SyntaxError{"non-minimal tag"}
- return
- }
- }
- if offset >= len(bytes) {
- err = SyntaxError{"truncated tag or length"}
- return
- }
- b = bytes[offset]
- offset++
- if b&0x80 == 0 {
- // The length is encoded in the bottom 7 bits.
- ret.length = int(b & 0x7f)
- } else {
- // Bottom 7 bits give the number of length bytes to follow.
- numBytes := int(b & 0x7f)
- if numBytes == 0 {
- err = SyntaxError{"indefinite length found (not DER)"}
- return
- }
- ret.length = 0
- for i := 0; i < numBytes; i++ {
- if offset >= len(bytes) {
- err = SyntaxError{"truncated tag or length"}
- return
- }
- b = bytes[offset]
- offset++
- if ret.length >= 1<<23 {
- // We can't shift ret.length up without
- // overflowing.
- err = StructuralError{"length too large"}
- return
- }
- ret.length <<= 8
- ret.length |= int(b)
- if ret.length == 0 {
- // DER requires that lengths be minimal.
- err = StructuralError{"superfluous leading zeros in length"}
- return
- }
- }
- // Short lengths must be encoded in short form.
- if ret.length < 0x80 {
- err = StructuralError{"non-minimal length"}
- return
- }
- }
-
- return
-}
-
-// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
-// a number of ASN.1 values from the given byte slice and returns them as a
-// slice of Go values of the given type.
-func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
- matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
- if !ok {
- err = StructuralError{"unknown Go type for slice"}
- return
- }
-
- // First we iterate over the input and count the number of elements,
- // checking that the types are correct in each case.
- numElements := 0
- for offset := 0; offset < len(bytes); {
- var t tagAndLength
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- switch t.tag {
- case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
- // We pretend that various other string types are
- // PRINTABLE STRINGs so that a sequence of them can be
- // parsed into a []string.
- t.tag = TagPrintableString
- case TagGeneralizedTime, TagUTCTime:
- // Likewise, both time types are treated the same.
- t.tag = TagUTCTime
- }
-
- if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) {
- err = StructuralError{"sequence tag mismatch"}
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"truncated sequence"}
- return
- }
- offset += t.length
- numElements++
- }
- ret = reflect.MakeSlice(sliceType, numElements, numElements)
- params := fieldParameters{}
- offset := 0
- for i := 0; i < numElements; i++ {
- offset, err = parseField(ret.Index(i), bytes, offset, params)
- if err != nil {
- return
- }
- }
- return
-}
-
-var (
- bitStringType = reflect.TypeOf(BitString{})
- objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
- enumeratedType = reflect.TypeOf(Enumerated(0))
- flagType = reflect.TypeOf(Flag(false))
- timeType = reflect.TypeOf(time.Time{})
- rawValueType = reflect.TypeOf(RawValue{})
- rawContentsType = reflect.TypeOf(RawContent(nil))
- bigIntType = reflect.TypeOf(new(big.Int))
-)
-
-// invalidLength reports whether offset + length > sliceLength, or if the
-// addition would overflow.
-func invalidLength(offset, length, sliceLength int) bool {
- return offset+length < offset || offset+length > sliceLength
-}
-
-// parseField is the main parsing function. Given a byte slice and an offset
-// into the array, it will try to parse a suitable ASN.1 value out and store it
-// in the given Value.
-func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
- offset = initOffset
- fieldType := v.Type()
-
- // If we have run out of data, it may be that there are optional elements at the end.
- if offset == len(bytes) {
- if !setDefaultValue(v, params) {
- err = SyntaxError{"sequence truncated"}
- }
- return
- }
-
- // Deal with the ANY type.
- if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
- var t tagAndLength
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"data truncated"}
- return
- }
- var result any
- if !t.isCompound && t.class == ClassUniversal {
- innerBytes := bytes[offset : offset+t.length]
- switch t.tag {
- case TagPrintableString:
- result, err = parsePrintableString(innerBytes)
- case TagNumericString:
- result, err = parseNumericString(innerBytes)
- case TagIA5String:
- result, err = parseIA5String(innerBytes)
- case TagT61String:
- result, err = parseT61String(innerBytes)
- case TagUTF8String:
- result, err = parseUTF8String(innerBytes)
- case TagInteger:
- result, err = parseInt64(innerBytes)
- case TagBitString:
- result, err = parseBitString(innerBytes)
- case TagOID:
- result, err = parseObjectIdentifier(innerBytes)
- case TagUTCTime:
- result, err = parseUTCTime(innerBytes)
- case TagGeneralizedTime:
- result, err = parseGeneralizedTime(innerBytes)
- case TagOctetString:
- result = innerBytes
- case TagBMPString:
- result, err = parseBMPString(innerBytes)
- default:
- // If we don't know how to handle the type, we just leave Value as nil.
- }
- }
- offset += t.length
- if err != nil {
- return
- }
- if result != nil {
- v.Set(reflect.ValueOf(result))
- }
- return
- }
-
- t, offset, err := parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- if params.explicit {
- expectedClass := ClassContextSpecific
- if params.application {
- expectedClass = ClassApplication
- }
- if offset == len(bytes) {
- err = StructuralError{"explicit tag has no child"}
- return
- }
- if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
- if fieldType == rawValueType {
- // The inner element should not be parsed for RawValues.
- } else if t.length > 0 {
- t, offset, err = parseTagAndLength(bytes, offset)
- if err != nil {
- return
- }
- } else {
- if fieldType != flagType {
- err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
- return
- }
- v.SetBool(true)
- return
- }
- } else {
- // The tags didn't match, it might be an optional element.
- ok := setDefaultValue(v, params)
- if ok {
- offset = initOffset
- } else {
- err = StructuralError{"explicitly tagged member didn't match"}
- }
- return
- }
- }
-
- matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType)
- if !ok1 {
- err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
- return
- }
-
- // Special case for strings: all the ASN.1 string types map to the Go
- // type string. getUniversalType returns the tag for PrintableString
- // when it sees a string, so if we see a different string type on the
- // wire, we change the universal type to match.
- if universalTag == TagPrintableString {
- if t.class == ClassUniversal {
- switch t.tag {
- case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
- universalTag = t.tag
- }
- } else if params.stringType != 0 {
- universalTag = params.stringType
- }
- }
-
- // Special case for time: UTCTime and GeneralizedTime both map to the
- // Go type time.Time.
- if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal {
- universalTag = TagGeneralizedTime
- }
-
- if params.set {
- universalTag = TagSet
- }
-
- matchAnyClassAndTag := matchAny
- expectedClass := ClassUniversal
- expectedTag := universalTag
-
- if !params.explicit && params.tag != nil {
- expectedClass = ClassContextSpecific
- expectedTag = *params.tag
- matchAnyClassAndTag = false
- }
-
- if !params.explicit && params.application && params.tag != nil {
- expectedClass = ClassApplication
- expectedTag = *params.tag
- matchAnyClassAndTag = false
- }
-
- if !params.explicit && params.private && params.tag != nil {
- expectedClass = ClassPrivate
- expectedTag = *params.tag
- matchAnyClassAndTag = false
- }
-
- // We have unwrapped any explicit tagging at this point.
- if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
- (!matchAny && t.isCompound != compoundType) {
- // Tags don't match. Again, it could be an optional element.
- ok := setDefaultValue(v, params)
- if ok {
- offset = initOffset
- } else {
- err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
- }
- return
- }
- if invalidLength(offset, t.length, len(bytes)) {
- err = SyntaxError{"data truncated"}
- return
- }
- innerBytes := bytes[offset : offset+t.length]
- offset += t.length
-
- // We deal with the structures defined in this package first.
- switch v := v.Addr().Interface().(type) {
- case *RawValue:
- *v = RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]}
- return
- case *ObjectIdentifier:
- *v, err = parseObjectIdentifier(innerBytes)
- return
- case *BitString:
- *v, err = parseBitString(innerBytes)
- return
- case *time.Time:
- if universalTag == TagUTCTime {
- *v, err = parseUTCTime(innerBytes)
- return
- }
- *v, err = parseGeneralizedTime(innerBytes)
- return
- case *Enumerated:
- parsedInt, err1 := parseInt32(innerBytes)
- if err1 == nil {
- *v = Enumerated(parsedInt)
- }
- err = err1
- return
- case *Flag:
- *v = true
- return
- case **big.Int:
- parsedInt, err1 := parseBigInt(innerBytes)
- if err1 == nil {
- *v = parsedInt
- }
- err = err1
- return
- }
- switch val := v; val.Kind() {
- case reflect.Bool:
- parsedBool, err1 := parseBool(innerBytes)
- if err1 == nil {
- val.SetBool(parsedBool)
- }
- err = err1
- return
- case reflect.Int, reflect.Int32, reflect.Int64:
- if val.Type().Size() == 4 {
- parsedInt, err1 := parseInt32(innerBytes)
- if err1 == nil {
- val.SetInt(int64(parsedInt))
- }
- err = err1
- } else {
- parsedInt, err1 := parseInt64(innerBytes)
- if err1 == nil {
- val.SetInt(parsedInt)
- }
- err = err1
- }
- return
- // TODO(dfc) Add support for the remaining integer types
- case reflect.Struct:
- structType := fieldType
-
- for i := 0; i < structType.NumField(); i++ {
- if !structType.Field(i).IsExported() {
- err = StructuralError{"struct contains unexported fields"}
- return
- }
- }
-
- if structType.NumField() > 0 &&
- structType.Field(0).Type == rawContentsType {
- bytes := bytes[initOffset:offset]
- val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
- }
-
- innerOffset := 0
- for i := 0; i < structType.NumField(); i++ {
- field := structType.Field(i)
- if i == 0 && field.Type == rawContentsType {
- continue
- }
- innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
- if err != nil {
- return
- }
- }
- // We allow extra bytes at the end of the SEQUENCE because
- // adding elements to the end has been used in X.509 as the
- // version numbers have increased.
- return
- case reflect.Slice:
- sliceType := fieldType
- if sliceType.Elem().Kind() == reflect.Uint8 {
- val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
- reflect.Copy(val, reflect.ValueOf(innerBytes))
- return
- }
- newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
- if err1 == nil {
- val.Set(newSlice)
- }
- err = err1
- return
- case reflect.String:
- var v string
- switch universalTag {
- case TagPrintableString:
- v, err = parsePrintableString(innerBytes)
- case TagNumericString:
- v, err = parseNumericString(innerBytes)
- case TagIA5String:
- v, err = parseIA5String(innerBytes)
- case TagT61String:
- v, err = parseT61String(innerBytes)
- case TagUTF8String:
- v, err = parseUTF8String(innerBytes)
- case TagGeneralString:
- // GeneralString is specified in ISO-2022/ECMA-35,
- // A brief review suggests that it includes structures
- // that allow the encoding to change midstring and
- // such. We give up and pass it as an 8-bit string.
- v, err = parseT61String(innerBytes)
- case TagBMPString:
- v, err = parseBMPString(innerBytes)
-
- default:
- err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
- }
- if err == nil {
- val.SetString(v)
- }
- return
- }
- err = StructuralError{"unsupported: " + v.Type().String()}
- return
-}
-
-// canHaveDefaultValue reports whether k is a Kind that we will set a default
-// value for. (A signed integer, essentially.)
-func canHaveDefaultValue(k reflect.Kind) bool {
- switch k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return true
- }
-
- return false
-}
-
-// setDefaultValue is used to install a default value, from a tag string, into
-// a Value. It is successful if the field was optional, even if a default value
-// wasn't provided or it failed to install it into the Value.
-func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
- if !params.optional {
- return
- }
- ok = true
- if params.defaultValue == nil {
- return
- }
- if canHaveDefaultValue(v.Kind()) {
- v.SetInt(*params.defaultValue)
- }
- return
-}
-
-// Unmarshal parses the DER-encoded ASN.1 data structure b
-// and uses the reflect package to fill in an arbitrary value pointed at by val.
-// Because Unmarshal uses the reflect package, the structs
-// being written to must use upper case field names. If val
-// is nil or not a pointer, Unmarshal returns an error.
-//
-// After parsing b, any bytes that were leftover and not used to fill
-// val will be returned in rest. When parsing a SEQUENCE into a struct,
-// any trailing elements of the SEQUENCE that do not have matching
-// fields in val will not be included in rest, as these are considered
-// valid elements of the SEQUENCE and not trailing data.
-//
-// An ASN.1 INTEGER can be written to an int, int32, int64,
-// or *big.Int (from the math/big package).
-// If the encoded value does not fit in the Go type,
-// Unmarshal returns a parse error.
-//
-// An ASN.1 BIT STRING can be written to a BitString.
-//
-// An ASN.1 OCTET STRING can be written to a []byte.
-//
-// An ASN.1 OBJECT IDENTIFIER can be written to an
-// ObjectIdentifier.
-//
-// An ASN.1 ENUMERATED can be written to an Enumerated.
-//
-// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
-//
-// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string.
-//
-// Any of the above ASN.1 values can be written to an interface{}.
-// The value stored in the interface has the corresponding Go type.
-// For integers, that type is int64.
-//
-// An ASN.1 SEQUENCE OF x or SET OF x can be written
-// to a slice if an x can be written to the slice's element type.
-//
-// An ASN.1 SEQUENCE or SET can be written to a struct
-// if each of the elements in the sequence can be
-// written to the corresponding element in the struct.
-//
-// The following tags on struct fields have special meaning to Unmarshal:
-//
-// application specifies that an APPLICATION tag is used
-// private specifies that a PRIVATE tag is used
-// default:x sets the default value for optional integer fields (only used if optional is also present)
-// explicit specifies that an additional, explicit tag wraps the implicit one
-// optional marks the field as ASN.1 OPTIONAL
-// set causes a SET, rather than a SEQUENCE type to be expected
-// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
-//
-// When decoding an ASN.1 value with an IMPLICIT tag into a string field,
-// Unmarshal will default to a PrintableString, which doesn't support
-// characters such as '@' and '&'. To force other encodings, use the following
-// tags:
-//
-// ia5 causes strings to be unmarshaled as ASN.1 IA5String values
-// numeric causes strings to be unmarshaled as ASN.1 NumericString values
-// utf8 causes strings to be unmarshaled as ASN.1 UTF8String values
-//
-// If the type of the first field of a structure is RawContent then the raw
-// ASN1 contents of the struct will be stored in it.
-//
-// If the name of a slice type ends with "SET" then it's treated as if
-// the "set" tag was set on it. This results in interpreting the type as a
-// SET OF x rather than a SEQUENCE OF x. This can be used with nested slices
-// where a struct tag cannot be given.
-//
-// Other ASN.1 types are not supported; if it encounters them,
-// Unmarshal returns a parse error.
-func Unmarshal(b []byte, val any) (rest []byte, err error) {
- return UnmarshalWithParams(b, val, "")
-}
-
-// An invalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
-type invalidUnmarshalError struct {
- Type reflect.Type
-}
-
-func (e *invalidUnmarshalError) Error() string {
- if e.Type == nil {
- return "asn1: Unmarshal recipient value is nil"
- }
-
- if e.Type.Kind() != reflect.Pointer {
- return "asn1: Unmarshal recipient value is non-pointer " + e.Type.String()
- }
- return "asn1: Unmarshal recipient value is nil " + e.Type.String()
-}
-
-// UnmarshalWithParams allows field parameters to be specified for the
-// top-level element. The form of the params is the same as the field tags.
-func UnmarshalWithParams(b []byte, val any, params string) (rest []byte, err error) {
- v := reflect.ValueOf(val)
- if v.Kind() != reflect.Pointer || v.IsNil() {
- return nil, &invalidUnmarshalError{reflect.TypeOf(val)}
- }
- offset, err := parseField(v.Elem(), b, 0, parseFieldParameters(params))
- if err != nil {
- return nil, err
- }
- return b[offset:], nil
-}
diff --git a/contrib/go/_std_1.18/src/encoding/binary/binary.go b/contrib/go/_std_1.18/src/encoding/binary/binary.go
deleted file mode 100644
index ee933461ee..0000000000
--- a/contrib/go/_std_1.18/src/encoding/binary/binary.go
+++ /dev/null
@@ -1,737 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package binary implements simple translation between numbers and byte
-// sequences and encoding and decoding of varints.
-//
-// Numbers are translated by reading and writing fixed-size values.
-// A fixed-size value is either a fixed-size arithmetic
-// type (bool, int8, uint8, int16, float32, complex64, ...)
-// or an array or struct containing only fixed-size values.
-//
-// The varint functions encode and decode single integer values using
-// a variable-length encoding; smaller values require fewer bytes.
-// For a specification, see
-// https://developers.google.com/protocol-buffers/docs/encoding.
-//
-// This package favors simplicity over efficiency. Clients that require
-// high-performance serialization, especially for large data structures,
-// should look at more advanced solutions such as the encoding/gob
-// package or protocol buffers.
-package binary
-
-import (
- "errors"
- "io"
- "math"
- "reflect"
- "sync"
-)
-
-// A ByteOrder specifies how to convert byte sequences into
-// 16-, 32-, or 64-bit unsigned integers.
-type ByteOrder interface {
- Uint16([]byte) uint16
- Uint32([]byte) uint32
- Uint64([]byte) uint64
- PutUint16([]byte, uint16)
- PutUint32([]byte, uint32)
- PutUint64([]byte, uint64)
- String() string
-}
-
-// LittleEndian is the little-endian implementation of ByteOrder.
-var LittleEndian littleEndian
-
-// BigEndian is the big-endian implementation of ByteOrder.
-var BigEndian bigEndian
-
-type littleEndian struct{}
-
-func (littleEndian) Uint16(b []byte) uint16 {
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint16(b[0]) | uint16(b[1])<<8
-}
-
-func (littleEndian) PutUint16(b []byte, v uint16) {
- _ = b[1] // early bounds check to guarantee safety of writes below
- b[0] = byte(v)
- b[1] = byte(v >> 8)
-}
-
-func (littleEndian) Uint32(b []byte) uint32 {
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func (littleEndian) PutUint32(b []byte, v uint32) {
- _ = b[3] // early bounds check to guarantee safety of writes below
- b[0] = byte(v)
- b[1] = byte(v >> 8)
- b[2] = byte(v >> 16)
- b[3] = byte(v >> 24)
-}
-
-func (littleEndian) Uint64(b []byte) uint64 {
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func (littleEndian) PutUint64(b []byte, v uint64) {
- _ = b[7] // early bounds check to guarantee safety of writes below
- b[0] = byte(v)
- b[1] = byte(v >> 8)
- b[2] = byte(v >> 16)
- b[3] = byte(v >> 24)
- b[4] = byte(v >> 32)
- b[5] = byte(v >> 40)
- b[6] = byte(v >> 48)
- b[7] = byte(v >> 56)
-}
-
-func (littleEndian) String() string { return "LittleEndian" }
-
-func (littleEndian) GoString() string { return "binary.LittleEndian" }
-
-type bigEndian struct{}
-
-func (bigEndian) Uint16(b []byte) uint16 {
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint16(b[1]) | uint16(b[0])<<8
-}
-
-func (bigEndian) PutUint16(b []byte, v uint16) {
- _ = b[1] // early bounds check to guarantee safety of writes below
- b[0] = byte(v >> 8)
- b[1] = byte(v)
-}
-
-func (bigEndian) Uint32(b []byte) uint32 {
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
-}
-
-func (bigEndian) PutUint32(b []byte, v uint32) {
- _ = b[3] // early bounds check to guarantee safety of writes below
- b[0] = byte(v >> 24)
- b[1] = byte(v >> 16)
- b[2] = byte(v >> 8)
- b[3] = byte(v)
-}
-
-func (bigEndian) Uint64(b []byte) uint64 {
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
-}
-
-func (bigEndian) PutUint64(b []byte, v uint64) {
- _ = b[7] // early bounds check to guarantee safety of writes below
- b[0] = byte(v >> 56)
- b[1] = byte(v >> 48)
- b[2] = byte(v >> 40)
- b[3] = byte(v >> 32)
- b[4] = byte(v >> 24)
- b[5] = byte(v >> 16)
- b[6] = byte(v >> 8)
- b[7] = byte(v)
-}
-
-func (bigEndian) String() string { return "BigEndian" }
-
-func (bigEndian) GoString() string { return "binary.BigEndian" }
-
-// Read reads structured binary data from r into data.
-// Data must be a pointer to a fixed-size value or a slice
-// of fixed-size values.
-// Bytes read from r are decoded using the specified byte order
-// and written to successive fields of the data.
-// When decoding boolean values, a zero byte is decoded as false, and
-// any other non-zero byte is decoded as true.
-// When reading into structs, the field data for fields with
-// blank (_) field names is skipped; i.e., blank field names
-// may be used for padding.
-// When reading into a struct, all non-blank fields must be exported
-// or Read may panic.
-//
-// The error is EOF only if no bytes were read.
-// If an EOF happens after reading some but not all the bytes,
-// Read returns ErrUnexpectedEOF.
-func Read(r io.Reader, order ByteOrder, data any) error {
- // Fast path for basic types and slices.
- if n := intDataSize(data); n != 0 {
- bs := make([]byte, n)
- if _, err := io.ReadFull(r, bs); err != nil {
- return err
- }
- switch data := data.(type) {
- case *bool:
- *data = bs[0] != 0
- case *int8:
- *data = int8(bs[0])
- case *uint8:
- *data = bs[0]
- case *int16:
- *data = int16(order.Uint16(bs))
- case *uint16:
- *data = order.Uint16(bs)
- case *int32:
- *data = int32(order.Uint32(bs))
- case *uint32:
- *data = order.Uint32(bs)
- case *int64:
- *data = int64(order.Uint64(bs))
- case *uint64:
- *data = order.Uint64(bs)
- case *float32:
- *data = math.Float32frombits(order.Uint32(bs))
- case *float64:
- *data = math.Float64frombits(order.Uint64(bs))
- case []bool:
- for i, x := range bs { // Easier to loop over the input for 8-bit values.
- data[i] = x != 0
- }
- case []int8:
- for i, x := range bs {
- data[i] = int8(x)
- }
- case []uint8:
- copy(data, bs)
- case []int16:
- for i := range data {
- data[i] = int16(order.Uint16(bs[2*i:]))
- }
- case []uint16:
- for i := range data {
- data[i] = order.Uint16(bs[2*i:])
- }
- case []int32:
- for i := range data {
- data[i] = int32(order.Uint32(bs[4*i:]))
- }
- case []uint32:
- for i := range data {
- data[i] = order.Uint32(bs[4*i:])
- }
- case []int64:
- for i := range data {
- data[i] = int64(order.Uint64(bs[8*i:]))
- }
- case []uint64:
- for i := range data {
- data[i] = order.Uint64(bs[8*i:])
- }
- case []float32:
- for i := range data {
- data[i] = math.Float32frombits(order.Uint32(bs[4*i:]))
- }
- case []float64:
- for i := range data {
- data[i] = math.Float64frombits(order.Uint64(bs[8*i:]))
- }
- default:
- n = 0 // fast path doesn't apply
- }
- if n != 0 {
- return nil
- }
- }
-
- // Fallback to reflect-based decoding.
- v := reflect.ValueOf(data)
- size := -1
- switch v.Kind() {
- case reflect.Pointer:
- v = v.Elem()
- size = dataSize(v)
- case reflect.Slice:
- size = dataSize(v)
- }
- if size < 0 {
- return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
- }
- d := &decoder{order: order, buf: make([]byte, size)}
- if _, err := io.ReadFull(r, d.buf); err != nil {
- return err
- }
- d.value(v)
- return nil
-}
-
-// Write writes the binary representation of data into w.
-// Data must be a fixed-size value or a slice of fixed-size
-// values, or a pointer to such data.
-// Boolean values encode as one byte: 1 for true, and 0 for false.
-// Bytes written to w are encoded using the specified byte order
-// and read from successive fields of the data.
-// When writing structs, zero values are written for fields
-// with blank (_) field names.
-func Write(w io.Writer, order ByteOrder, data any) error {
- // Fast path for basic types and slices.
- if n := intDataSize(data); n != 0 {
- bs := make([]byte, n)
- switch v := data.(type) {
- case *bool:
- if *v {
- bs[0] = 1
- } else {
- bs[0] = 0
- }
- case bool:
- if v {
- bs[0] = 1
- } else {
- bs[0] = 0
- }
- case []bool:
- for i, x := range v {
- if x {
- bs[i] = 1
- } else {
- bs[i] = 0
- }
- }
- case *int8:
- bs[0] = byte(*v)
- case int8:
- bs[0] = byte(v)
- case []int8:
- for i, x := range v {
- bs[i] = byte(x)
- }
- case *uint8:
- bs[0] = *v
- case uint8:
- bs[0] = v
- case []uint8:
- bs = v
- case *int16:
- order.PutUint16(bs, uint16(*v))
- case int16:
- order.PutUint16(bs, uint16(v))
- case []int16:
- for i, x := range v {
- order.PutUint16(bs[2*i:], uint16(x))
- }
- case *uint16:
- order.PutUint16(bs, *v)
- case uint16:
- order.PutUint16(bs, v)
- case []uint16:
- for i, x := range v {
- order.PutUint16(bs[2*i:], x)
- }
- case *int32:
- order.PutUint32(bs, uint32(*v))
- case int32:
- order.PutUint32(bs, uint32(v))
- case []int32:
- for i, x := range v {
- order.PutUint32(bs[4*i:], uint32(x))
- }
- case *uint32:
- order.PutUint32(bs, *v)
- case uint32:
- order.PutUint32(bs, v)
- case []uint32:
- for i, x := range v {
- order.PutUint32(bs[4*i:], x)
- }
- case *int64:
- order.PutUint64(bs, uint64(*v))
- case int64:
- order.PutUint64(bs, uint64(v))
- case []int64:
- for i, x := range v {
- order.PutUint64(bs[8*i:], uint64(x))
- }
- case *uint64:
- order.PutUint64(bs, *v)
- case uint64:
- order.PutUint64(bs, v)
- case []uint64:
- for i, x := range v {
- order.PutUint64(bs[8*i:], x)
- }
- case *float32:
- order.PutUint32(bs, math.Float32bits(*v))
- case float32:
- order.PutUint32(bs, math.Float32bits(v))
- case []float32:
- for i, x := range v {
- order.PutUint32(bs[4*i:], math.Float32bits(x))
- }
- case *float64:
- order.PutUint64(bs, math.Float64bits(*v))
- case float64:
- order.PutUint64(bs, math.Float64bits(v))
- case []float64:
- for i, x := range v {
- order.PutUint64(bs[8*i:], math.Float64bits(x))
- }
- }
- _, err := w.Write(bs)
- return err
- }
-
- // Fallback to reflect-based encoding.
- v := reflect.Indirect(reflect.ValueOf(data))
- size := dataSize(v)
- if size < 0 {
- return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
- }
- buf := make([]byte, size)
- e := &encoder{order: order, buf: buf}
- e.value(v)
- _, err := w.Write(buf)
- return err
-}
-
-// Size returns how many bytes Write would generate to encode the value v, which
-// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
-// If v is neither of these, Size returns -1.
-func Size(v any) int {
- return dataSize(reflect.Indirect(reflect.ValueOf(v)))
-}
-
-var structSize sync.Map // map[reflect.Type]int
-
-// dataSize returns the number of bytes the actual data represented by v occupies in memory.
-// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
-// it returns the length of the slice times the element size and does not count the memory
-// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
-func dataSize(v reflect.Value) int {
- switch v.Kind() {
- case reflect.Slice:
- if s := sizeof(v.Type().Elem()); s >= 0 {
- return s * v.Len()
- }
- return -1
-
- case reflect.Struct:
- t := v.Type()
- if size, ok := structSize.Load(t); ok {
- return size.(int)
- }
- size := sizeof(t)
- structSize.Store(t, size)
- return size
-
- default:
- return sizeof(v.Type())
- }
-}
-
-// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
-func sizeof(t reflect.Type) int {
- switch t.Kind() {
- case reflect.Array:
- if s := sizeof(t.Elem()); s >= 0 {
- return s * t.Len()
- }
-
- case reflect.Struct:
- sum := 0
- for i, n := 0, t.NumField(); i < n; i++ {
- s := sizeof(t.Field(i).Type)
- if s < 0 {
- return -1
- }
- sum += s
- }
- return sum
-
- case reflect.Bool,
- reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
- return int(t.Size())
- }
-
- return -1
-}
-
-type coder struct {
- order ByteOrder
- buf []byte
- offset int
-}
-
-type decoder coder
-type encoder coder
-
-func (d *decoder) bool() bool {
- x := d.buf[d.offset]
- d.offset++
- return x != 0
-}
-
-func (e *encoder) bool(x bool) {
- if x {
- e.buf[e.offset] = 1
- } else {
- e.buf[e.offset] = 0
- }
- e.offset++
-}
-
-func (d *decoder) uint8() uint8 {
- x := d.buf[d.offset]
- d.offset++
- return x
-}
-
-func (e *encoder) uint8(x uint8) {
- e.buf[e.offset] = x
- e.offset++
-}
-
-func (d *decoder) uint16() uint16 {
- x := d.order.Uint16(d.buf[d.offset : d.offset+2])
- d.offset += 2
- return x
-}
-
-func (e *encoder) uint16(x uint16) {
- e.order.PutUint16(e.buf[e.offset:e.offset+2], x)
- e.offset += 2
-}
-
-func (d *decoder) uint32() uint32 {
- x := d.order.Uint32(d.buf[d.offset : d.offset+4])
- d.offset += 4
- return x
-}
-
-func (e *encoder) uint32(x uint32) {
- e.order.PutUint32(e.buf[e.offset:e.offset+4], x)
- e.offset += 4
-}
-
-func (d *decoder) uint64() uint64 {
- x := d.order.Uint64(d.buf[d.offset : d.offset+8])
- d.offset += 8
- return x
-}
-
-func (e *encoder) uint64(x uint64) {
- e.order.PutUint64(e.buf[e.offset:e.offset+8], x)
- e.offset += 8
-}
-
-func (d *decoder) int8() int8 { return int8(d.uint8()) }
-
-func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
-
-func (d *decoder) int16() int16 { return int16(d.uint16()) }
-
-func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
-
-func (d *decoder) int32() int32 { return int32(d.uint32()) }
-
-func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
-
-func (d *decoder) int64() int64 { return int64(d.uint64()) }
-
-func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
-
-func (d *decoder) value(v reflect.Value) {
- switch v.Kind() {
- case reflect.Array:
- l := v.Len()
- for i := 0; i < l; i++ {
- d.value(v.Index(i))
- }
-
- case reflect.Struct:
- t := v.Type()
- l := v.NumField()
- for i := 0; i < l; i++ {
- // Note: Calling v.CanSet() below is an optimization.
- // It would be sufficient to check the field name,
- // but creating the StructField info for each field is
- // costly (run "go test -bench=ReadStruct" and compare
- // results when making changes to this code).
- if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
- d.value(v)
- } else {
- d.skip(v)
- }
- }
-
- case reflect.Slice:
- l := v.Len()
- for i := 0; i < l; i++ {
- d.value(v.Index(i))
- }
-
- case reflect.Bool:
- v.SetBool(d.bool())
-
- case reflect.Int8:
- v.SetInt(int64(d.int8()))
- case reflect.Int16:
- v.SetInt(int64(d.int16()))
- case reflect.Int32:
- v.SetInt(int64(d.int32()))
- case reflect.Int64:
- v.SetInt(d.int64())
-
- case reflect.Uint8:
- v.SetUint(uint64(d.uint8()))
- case reflect.Uint16:
- v.SetUint(uint64(d.uint16()))
- case reflect.Uint32:
- v.SetUint(uint64(d.uint32()))
- case reflect.Uint64:
- v.SetUint(d.uint64())
-
- case reflect.Float32:
- v.SetFloat(float64(math.Float32frombits(d.uint32())))
- case reflect.Float64:
- v.SetFloat(math.Float64frombits(d.uint64()))
-
- case reflect.Complex64:
- v.SetComplex(complex(
- float64(math.Float32frombits(d.uint32())),
- float64(math.Float32frombits(d.uint32())),
- ))
- case reflect.Complex128:
- v.SetComplex(complex(
- math.Float64frombits(d.uint64()),
- math.Float64frombits(d.uint64()),
- ))
- }
-}
-
-func (e *encoder) value(v reflect.Value) {
- switch v.Kind() {
- case reflect.Array:
- l := v.Len()
- for i := 0; i < l; i++ {
- e.value(v.Index(i))
- }
-
- case reflect.Struct:
- t := v.Type()
- l := v.NumField()
- for i := 0; i < l; i++ {
- // see comment for corresponding code in decoder.value()
- if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
- e.value(v)
- } else {
- e.skip(v)
- }
- }
-
- case reflect.Slice:
- l := v.Len()
- for i := 0; i < l; i++ {
- e.value(v.Index(i))
- }
-
- case reflect.Bool:
- e.bool(v.Bool())
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- switch v.Type().Kind() {
- case reflect.Int8:
- e.int8(int8(v.Int()))
- case reflect.Int16:
- e.int16(int16(v.Int()))
- case reflect.Int32:
- e.int32(int32(v.Int()))
- case reflect.Int64:
- e.int64(v.Int())
- }
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- switch v.Type().Kind() {
- case reflect.Uint8:
- e.uint8(uint8(v.Uint()))
- case reflect.Uint16:
- e.uint16(uint16(v.Uint()))
- case reflect.Uint32:
- e.uint32(uint32(v.Uint()))
- case reflect.Uint64:
- e.uint64(v.Uint())
- }
-
- case reflect.Float32, reflect.Float64:
- switch v.Type().Kind() {
- case reflect.Float32:
- e.uint32(math.Float32bits(float32(v.Float())))
- case reflect.Float64:
- e.uint64(math.Float64bits(v.Float()))
- }
-
- case reflect.Complex64, reflect.Complex128:
- switch v.Type().Kind() {
- case reflect.Complex64:
- x := v.Complex()
- e.uint32(math.Float32bits(float32(real(x))))
- e.uint32(math.Float32bits(float32(imag(x))))
- case reflect.Complex128:
- x := v.Complex()
- e.uint64(math.Float64bits(real(x)))
- e.uint64(math.Float64bits(imag(x)))
- }
- }
-}
-
-func (d *decoder) skip(v reflect.Value) {
- d.offset += dataSize(v)
-}
-
-func (e *encoder) skip(v reflect.Value) {
- n := dataSize(v)
- zero := e.buf[e.offset : e.offset+n]
- for i := range zero {
- zero[i] = 0
- }
- e.offset += n
-}
-
-// intDataSize returns the size of the data required to represent the data when encoded.
-// It returns zero if the type cannot be implemented by the fast path in Read or Write.
-func intDataSize(data any) int {
- switch data := data.(type) {
- case bool, int8, uint8, *bool, *int8, *uint8:
- return 1
- case []bool:
- return len(data)
- case []int8:
- return len(data)
- case []uint8:
- return len(data)
- case int16, uint16, *int16, *uint16:
- return 2
- case []int16:
- return 2 * len(data)
- case []uint16:
- return 2 * len(data)
- case int32, uint32, *int32, *uint32:
- return 4
- case []int32:
- return 4 * len(data)
- case []uint32:
- return 4 * len(data)
- case int64, uint64, *int64, *uint64:
- return 8
- case []int64:
- return 8 * len(data)
- case []uint64:
- return 8 * len(data)
- case float32, *float32:
- return 4
- case float64, *float64:
- return 8
- case []float32:
- return 4 * len(data)
- case []float64:
- return 8 * len(data)
- }
- return 0
-}
diff --git a/contrib/go/_std_1.18/src/encoding/binary/varint.go b/contrib/go/_std_1.18/src/encoding/binary/varint.go
deleted file mode 100644
index 8fe20b5c45..0000000000
--- a/contrib/go/_std_1.18/src/encoding/binary/varint.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package binary
-
-// This file implements "varint" encoding of 64-bit integers.
-// The encoding is:
-// - unsigned integers are serialized 7 bits at a time, starting with the
-// least significant bits
-// - the most significant bit (msb) in each output byte indicates if there
-// is a continuation byte (msb = 1)
-// - signed integers are mapped to unsigned integers using "zig-zag"
-// encoding: Positive values x are written as 2*x + 0, negative values
-// are written as 2*(^x) + 1; that is, negative numbers are complemented
-// and whether to complement is encoded in bit 0.
-//
-// Design note:
-// At most 10 bytes are needed for 64-bit values. The encoding could
-// be more dense: a full 64-bit value needs an extra byte just to hold bit 63.
-// Instead, the msb of the previous byte could be used to hold bit 63 since we
-// know there can't be more than 64 bits. This is a trivial improvement and
-// would reduce the maximum encoding length to 9 bytes. However, it breaks the
-// invariant that the msb is always the "continuation bit" and thus makes the
-// format incompatible with a varint encoding for larger numbers (say 128-bit).
-
-import (
- "errors"
- "io"
-)
-
-// MaxVarintLenN is the maximum length of a varint-encoded N-bit integer.
-const (
- MaxVarintLen16 = 3
- MaxVarintLen32 = 5
- MaxVarintLen64 = 10
-)
-
-// PutUvarint encodes a uint64 into buf and returns the number of bytes written.
-// If the buffer is too small, PutUvarint will panic.
-func PutUvarint(buf []byte, x uint64) int {
- i := 0
- for x >= 0x80 {
- buf[i] = byte(x) | 0x80
- x >>= 7
- i++
- }
- buf[i] = byte(x)
- return i + 1
-}
-
-// Uvarint decodes a uint64 from buf and returns that value and the
-// number of bytes read (> 0). If an error occurred, the value is 0
-// and the number of bytes n is <= 0 meaning:
-//
-// n == 0: buf too small
-// n < 0: value larger than 64 bits (overflow)
-// and -n is the number of bytes read
-//
-func Uvarint(buf []byte) (uint64, int) {
- var x uint64
- var s uint
- for i, b := range buf {
- if i == MaxVarintLen64 {
- // Catch byte reads past MaxVarintLen64.
- // See issue https://golang.org/issues/41185
- return 0, -(i + 1) // overflow
- }
- if b < 0x80 {
- if i == MaxVarintLen64-1 && b > 1 {
- return 0, -(i + 1) // overflow
- }
- return x | uint64(b)<<s, i + 1
- }
- x |= uint64(b&0x7f) << s
- s += 7
- }
- return 0, 0
-}
-
-// PutVarint encodes an int64 into buf and returns the number of bytes written.
-// If the buffer is too small, PutVarint will panic.
-func PutVarint(buf []byte, x int64) int {
- ux := uint64(x) << 1
- if x < 0 {
- ux = ^ux
- }
- return PutUvarint(buf, ux)
-}
-
-// Varint decodes an int64 from buf and returns that value and the
-// number of bytes read (> 0). If an error occurred, the value is 0
-// and the number of bytes n is <= 0 with the following meaning:
-//
-// n == 0: buf too small
-// n < 0: value larger than 64 bits (overflow)
-// and -n is the number of bytes read
-//
-func Varint(buf []byte) (int64, int) {
- ux, n := Uvarint(buf) // ok to continue in presence of error
- x := int64(ux >> 1)
- if ux&1 != 0 {
- x = ^x
- }
- return x, n
-}
-
-var overflow = errors.New("binary: varint overflows a 64-bit integer")
-
-// ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
-func ReadUvarint(r io.ByteReader) (uint64, error) {
- var x uint64
- var s uint
- for i := 0; i < MaxVarintLen64; i++ {
- b, err := r.ReadByte()
- if err != nil {
- return x, err
- }
- if b < 0x80 {
- if i == MaxVarintLen64-1 && b > 1 {
- return x, overflow
- }
- return x | uint64(b)<<s, nil
- }
- x |= uint64(b&0x7f) << s
- s += 7
- }
- return x, overflow
-}
-
-// ReadVarint reads an encoded signed integer from r and returns it as an int64.
-func ReadVarint(r io.ByteReader) (int64, error) {
- ux, err := ReadUvarint(r) // ok to continue in presence of error
- x := int64(ux >> 1)
- if ux&1 != 0 {
- x = ^x
- }
- return x, err
-}
diff --git a/contrib/go/_std_1.18/src/encoding/csv/reader.go b/contrib/go/_std_1.18/src/encoding/csv/reader.go
deleted file mode 100644
index f860f4f25f..0000000000
--- a/contrib/go/_std_1.18/src/encoding/csv/reader.go
+++ /dev/null
@@ -1,450 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package csv reads and writes comma-separated values (CSV) files.
-// There are many kinds of CSV files; this package supports the format
-// described in RFC 4180.
-//
-// A csv file contains zero or more records of one or more fields per record.
-// Each record is separated by the newline character. The final record may
-// optionally be followed by a newline character.
-//
-// field1,field2,field3
-//
-// White space is considered part of a field.
-//
-// Carriage returns before newline characters are silently removed.
-//
-// Blank lines are ignored. A line with only whitespace characters (excluding
-// the ending newline character) is not considered a blank line.
-//
-// Fields which start and stop with the quote character " are called
-// quoted-fields. The beginning and ending quote are not part of the
-// field.
-//
-// The source:
-//
-// normal string,"quoted-field"
-//
-// results in the fields
-//
-// {`normal string`, `quoted-field`}
-//
-// Within a quoted-field a quote character followed by a second quote
-// character is considered a single quote.
-//
-// "the ""word"" is true","a ""quoted-field"""
-//
-// results in
-//
-// {`the "word" is true`, `a "quoted-field"`}
-//
-// Newlines and commas may be included in a quoted-field
-//
-// "Multi-line
-// field","comma is ,"
-//
-// results in
-//
-// {`Multi-line
-// field`, `comma is ,`}
-package csv
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "unicode"
- "unicode/utf8"
-)
-
-// A ParseError is returned for parsing errors.
-// Line numbers are 1-indexed and columns are 0-indexed.
-type ParseError struct {
- StartLine int // Line where the record starts
- Line int // Line where the error occurred
- Column int // Column (1-based byte index) where the error occurred
- Err error // The actual error
-}
-
-func (e *ParseError) Error() string {
- if e.Err == ErrFieldCount {
- return fmt.Sprintf("record on line %d: %v", e.Line, e.Err)
- }
- if e.StartLine != e.Line {
- return fmt.Sprintf("record on line %d; parse error on line %d, column %d: %v", e.StartLine, e.Line, e.Column, e.Err)
- }
- return fmt.Sprintf("parse error on line %d, column %d: %v", e.Line, e.Column, e.Err)
-}
-
-func (e *ParseError) Unwrap() error { return e.Err }
-
-// These are the errors that can be returned in ParseError.Err.
-var (
- ErrTrailingComma = errors.New("extra delimiter at end of line") // Deprecated: No longer used.
- ErrBareQuote = errors.New("bare \" in non-quoted-field")
- ErrQuote = errors.New("extraneous or missing \" in quoted-field")
- ErrFieldCount = errors.New("wrong number of fields")
-)
-
-var errInvalidDelim = errors.New("csv: invalid field or comment delimiter")
-
-func validDelim(r rune) bool {
- return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
-}
-
-// A Reader reads records from a CSV-encoded file.
-//
-// As returned by NewReader, a Reader expects input conforming to RFC 4180.
-// The exported fields can be changed to customize the details before the
-// first call to Read or ReadAll.
-//
-// The Reader converts all \r\n sequences in its input to plain \n,
-// including in multiline field values, so that the returned data does
-// not depend on which line-ending convention an input file uses.
-type Reader struct {
- // Comma is the field delimiter.
- // It is set to comma (',') by NewReader.
- // Comma must be a valid rune and must not be \r, \n,
- // or the Unicode replacement character (0xFFFD).
- Comma rune
-
- // Comment, if not 0, is the comment character. Lines beginning with the
- // Comment character without preceding whitespace are ignored.
- // With leading whitespace the Comment character becomes part of the
- // field, even if TrimLeadingSpace is true.
- // Comment must be a valid rune and must not be \r, \n,
- // or the Unicode replacement character (0xFFFD).
- // It must also not be equal to Comma.
- Comment rune
-
- // FieldsPerRecord is the number of expected fields per record.
- // If FieldsPerRecord is positive, Read requires each record to
- // have the given number of fields. If FieldsPerRecord is 0, Read sets it to
- // the number of fields in the first record, so that future records must
- // have the same field count. If FieldsPerRecord is negative, no check is
- // made and records may have a variable number of fields.
- FieldsPerRecord int
-
- // If LazyQuotes is true, a quote may appear in an unquoted field and a
- // non-doubled quote may appear in a quoted field.
- LazyQuotes bool
-
- // If TrimLeadingSpace is true, leading white space in a field is ignored.
- // This is done even if the field delimiter, Comma, is white space.
- TrimLeadingSpace bool
-
- // ReuseRecord controls whether calls to Read may return a slice sharing
- // the backing array of the previous call's returned slice for performance.
- // By default, each call to Read returns newly allocated memory owned by the caller.
- ReuseRecord bool
-
- TrailingComma bool // Deprecated: No longer used.
-
- r *bufio.Reader
-
- // numLine is the current line being read in the CSV file.
- numLine int
-
- // rawBuffer is a line buffer only used by the readLine method.
- rawBuffer []byte
-
- // recordBuffer holds the unescaped fields, one after another.
- // The fields can be accessed by using the indexes in fieldIndexes.
- // E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
- // and fieldIndexes will contain the indexes [1, 2, 5, 6].
- recordBuffer []byte
-
- // fieldIndexes is an index of fields inside recordBuffer.
- // The i'th field ends at offset fieldIndexes[i] in recordBuffer.
- fieldIndexes []int
-
- // fieldPositions is an index of field positions for the
- // last record returned by Read.
- fieldPositions []position
-
- // lastRecord is a record cache and only used when ReuseRecord == true.
- lastRecord []string
-}
-
-// NewReader returns a new Reader that reads from r.
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- Comma: ',',
- r: bufio.NewReader(r),
- }
-}
-
-// Read reads one record (a slice of fields) from r.
-// If the record has an unexpected number of fields,
-// Read returns the record along with the error ErrFieldCount.
-// Except for that case, Read always returns either a non-nil
-// record or a non-nil error, but not both.
-// If there is no data left to be read, Read returns nil, io.EOF.
-// If ReuseRecord is true, the returned slice may be shared
-// between multiple calls to Read.
-func (r *Reader) Read() (record []string, err error) {
- if r.ReuseRecord {
- record, err = r.readRecord(r.lastRecord)
- r.lastRecord = record
- } else {
- record, err = r.readRecord(nil)
- }
- return record, err
-}
-
-// FieldPos returns the line and column corresponding to
-// the start of the field with the given index in the slice most recently
-// returned by Read. Numbering of lines and columns starts at 1;
-// columns are counted in bytes, not runes.
-//
-// If this is called with an out-of-bounds index, it panics.
-func (r *Reader) FieldPos(field int) (line, column int) {
- if field < 0 || field >= len(r.fieldPositions) {
- panic("out of range index passed to FieldPos")
- }
- p := &r.fieldPositions[field]
- return p.line, p.col
-}
-
-// pos holds the position of a field in the current line.
-type position struct {
- line, col int
-}
-
-// ReadAll reads all the remaining records from r.
-// Each record is a slice of fields.
-// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
-// defined to read until EOF, it does not treat end of file as an error to be
-// reported.
-func (r *Reader) ReadAll() (records [][]string, err error) {
- for {
- record, err := r.readRecord(nil)
- if err == io.EOF {
- return records, nil
- }
- if err != nil {
- return nil, err
- }
- records = append(records, record)
- }
-}
-
-// readLine reads the next line (with the trailing endline).
-// If EOF is hit without a trailing endline, it will be omitted.
-// If some bytes were read, then the error is never io.EOF.
-// The result is only valid until the next call to readLine.
-func (r *Reader) readLine() ([]byte, error) {
- line, err := r.r.ReadSlice('\n')
- if err == bufio.ErrBufferFull {
- r.rawBuffer = append(r.rawBuffer[:0], line...)
- for err == bufio.ErrBufferFull {
- line, err = r.r.ReadSlice('\n')
- r.rawBuffer = append(r.rawBuffer, line...)
- }
- line = r.rawBuffer
- }
- if len(line) > 0 && err == io.EOF {
- err = nil
- // For backwards compatibility, drop trailing \r before EOF.
- if line[len(line)-1] == '\r' {
- line = line[:len(line)-1]
- }
- }
- r.numLine++
- // Normalize \r\n to \n on all input lines.
- if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' {
- line[n-2] = '\n'
- line = line[:n-1]
- }
- return line, err
-}
-
-// lengthNL reports the number of bytes for the trailing \n.
-func lengthNL(b []byte) int {
- if len(b) > 0 && b[len(b)-1] == '\n' {
- return 1
- }
- return 0
-}
-
-// nextRune returns the next rune in b or utf8.RuneError.
-func nextRune(b []byte) rune {
- r, _ := utf8.DecodeRune(b)
- return r
-}
-
-func (r *Reader) readRecord(dst []string) ([]string, error) {
- if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) {
- return nil, errInvalidDelim
- }
-
- // Read line (automatically skipping past empty lines and any comments).
- var line []byte
- var errRead error
- for errRead == nil {
- line, errRead = r.readLine()
- if r.Comment != 0 && nextRune(line) == r.Comment {
- line = nil
- continue // Skip comment lines
- }
- if errRead == nil && len(line) == lengthNL(line) {
- line = nil
- continue // Skip empty lines
- }
- break
- }
- if errRead == io.EOF {
- return nil, errRead
- }
-
- // Parse each field in the record.
- var err error
- const quoteLen = len(`"`)
- commaLen := utf8.RuneLen(r.Comma)
- recLine := r.numLine // Starting line for record
- r.recordBuffer = r.recordBuffer[:0]
- r.fieldIndexes = r.fieldIndexes[:0]
- r.fieldPositions = r.fieldPositions[:0]
- pos := position{line: r.numLine, col: 1}
-parseField:
- for {
- if r.TrimLeadingSpace {
- i := bytes.IndexFunc(line, func(r rune) bool {
- return !unicode.IsSpace(r)
- })
- if i < 0 {
- i = len(line)
- pos.col -= lengthNL(line)
- }
- line = line[i:]
- pos.col += i
- }
- if len(line) == 0 || line[0] != '"' {
- // Non-quoted string field
- i := bytes.IndexRune(line, r.Comma)
- field := line
- if i >= 0 {
- field = field[:i]
- } else {
- field = field[:len(field)-lengthNL(field)]
- }
- // Check to make sure a quote does not appear in field.
- if !r.LazyQuotes {
- if j := bytes.IndexByte(field, '"'); j >= 0 {
- col := pos.col + j
- err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote}
- break parseField
- }
- }
- r.recordBuffer = append(r.recordBuffer, field...)
- r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
- r.fieldPositions = append(r.fieldPositions, pos)
- if i >= 0 {
- line = line[i+commaLen:]
- pos.col += i + commaLen
- continue parseField
- }
- break parseField
- } else {
- // Quoted string field
- fieldPos := pos
- line = line[quoteLen:]
- pos.col += quoteLen
- for {
- i := bytes.IndexByte(line, '"')
- if i >= 0 {
- // Hit next quote.
- r.recordBuffer = append(r.recordBuffer, line[:i]...)
- line = line[i+quoteLen:]
- pos.col += i + quoteLen
- switch rn := nextRune(line); {
- case rn == '"':
- // `""` sequence (append quote).
- r.recordBuffer = append(r.recordBuffer, '"')
- line = line[quoteLen:]
- pos.col += quoteLen
- case rn == r.Comma:
- // `",` sequence (end of field).
- line = line[commaLen:]
- pos.col += commaLen
- r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
- r.fieldPositions = append(r.fieldPositions, fieldPos)
- continue parseField
- case lengthNL(line) == len(line):
- // `"\n` sequence (end of line).
- r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
- r.fieldPositions = append(r.fieldPositions, fieldPos)
- break parseField
- case r.LazyQuotes:
- // `"` sequence (bare quote).
- r.recordBuffer = append(r.recordBuffer, '"')
- default:
- // `"*` sequence (invalid non-escaped quote).
- err = &ParseError{StartLine: recLine, Line: r.numLine, Column: pos.col - quoteLen, Err: ErrQuote}
- break parseField
- }
- } else if len(line) > 0 {
- // Hit end of line (copy all data so far).
- r.recordBuffer = append(r.recordBuffer, line...)
- if errRead != nil {
- break parseField
- }
- pos.col += len(line)
- line, errRead = r.readLine()
- if len(line) > 0 {
- pos.line++
- pos.col = 1
- }
- if errRead == io.EOF {
- errRead = nil
- }
- } else {
- // Abrupt end of file (EOF or error).
- if !r.LazyQuotes && errRead == nil {
- err = &ParseError{StartLine: recLine, Line: pos.line, Column: pos.col, Err: ErrQuote}
- break parseField
- }
- r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
- r.fieldPositions = append(r.fieldPositions, fieldPos)
- break parseField
- }
- }
- }
- }
- if err == nil {
- err = errRead
- }
-
- // Create a single string and create slices out of it.
- // This pins the memory of the fields together, but allocates once.
- str := string(r.recordBuffer) // Convert to string once to batch allocations
- dst = dst[:0]
- if cap(dst) < len(r.fieldIndexes) {
- dst = make([]string, len(r.fieldIndexes))
- }
- dst = dst[:len(r.fieldIndexes)]
- var preIdx int
- for i, idx := range r.fieldIndexes {
- dst[i] = str[preIdx:idx]
- preIdx = idx
- }
-
- // Check or update the expected fields per record.
- if r.FieldsPerRecord > 0 {
- if len(dst) != r.FieldsPerRecord && err == nil {
- err = &ParseError{
- StartLine: recLine,
- Line: recLine,
- Column: 1,
- Err: ErrFieldCount,
- }
- }
- } else if r.FieldsPerRecord == 0 {
- r.FieldsPerRecord = len(dst)
- }
- return dst, err
-}
diff --git a/contrib/go/_std_1.18/src/encoding/hex/hex.go b/contrib/go/_std_1.18/src/encoding/hex/hex.go
deleted file mode 100644
index fbba78ffd2..0000000000
--- a/contrib/go/_std_1.18/src/encoding/hex/hex.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package hex implements hexadecimal encoding and decoding.
-package hex
-
-import (
- "errors"
- "fmt"
- "io"
- "strings"
-)
-
-const hextable = "0123456789abcdef"
-
-// EncodedLen returns the length of an encoding of n source bytes.
-// Specifically, it returns n * 2.
-func EncodedLen(n int) int { return n * 2 }
-
-// Encode encodes src into EncodedLen(len(src))
-// bytes of dst. As a convenience, it returns the number
-// of bytes written to dst, but this value is always EncodedLen(len(src)).
-// Encode implements hexadecimal encoding.
-func Encode(dst, src []byte) int {
- j := 0
- for _, v := range src {
- dst[j] = hextable[v>>4]
- dst[j+1] = hextable[v&0x0f]
- j += 2
- }
- return len(src) * 2
-}
-
-// ErrLength reports an attempt to decode an odd-length input
-// using Decode or DecodeString.
-// The stream-based Decoder returns io.ErrUnexpectedEOF instead of ErrLength.
-var ErrLength = errors.New("encoding/hex: odd length hex string")
-
-// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
-type InvalidByteError byte
-
-func (e InvalidByteError) Error() string {
- return fmt.Sprintf("encoding/hex: invalid byte: %#U", rune(e))
-}
-
-// DecodedLen returns the length of a decoding of x source bytes.
-// Specifically, it returns x / 2.
-func DecodedLen(x int) int { return x / 2 }
-
-// Decode decodes src into DecodedLen(len(src)) bytes,
-// returning the actual number of bytes written to dst.
-//
-// Decode expects that src contains only hexadecimal
-// characters and that src has even length.
-// If the input is malformed, Decode returns the number
-// of bytes decoded before the error.
-func Decode(dst, src []byte) (int, error) {
- i, j := 0, 1
- for ; j < len(src); j += 2 {
- a, ok := fromHexChar(src[j-1])
- if !ok {
- return i, InvalidByteError(src[j-1])
- }
- b, ok := fromHexChar(src[j])
- if !ok {
- return i, InvalidByteError(src[j])
- }
- dst[i] = (a << 4) | b
- i++
- }
- if len(src)%2 == 1 {
- // Check for invalid char before reporting bad length,
- // since the invalid char (if present) is an earlier problem.
- if _, ok := fromHexChar(src[j-1]); !ok {
- return i, InvalidByteError(src[j-1])
- }
- return i, ErrLength
- }
- return i, nil
-}
-
-// fromHexChar converts a hex character into its value and a success flag.
-func fromHexChar(c byte) (byte, bool) {
- switch {
- case '0' <= c && c <= '9':
- return c - '0', true
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10, true
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10, true
- }
-
- return 0, false
-}
-
-// EncodeToString returns the hexadecimal encoding of src.
-func EncodeToString(src []byte) string {
- dst := make([]byte, EncodedLen(len(src)))
- Encode(dst, src)
- return string(dst)
-}
-
-// DecodeString returns the bytes represented by the hexadecimal string s.
-//
-// DecodeString expects that src contains only hexadecimal
-// characters and that src has even length.
-// If the input is malformed, DecodeString returns
-// the bytes decoded before the error.
-func DecodeString(s string) ([]byte, error) {
- src := []byte(s)
- // We can use the source slice itself as the destination
- // because the decode loop increments by one and then the 'seen' byte is not used anymore.
- n, err := Decode(src, src)
- return src[:n], err
-}
-
-// Dump returns a string that contains a hex dump of the given data. The format
-// of the hex dump matches the output of `hexdump -C` on the command line.
-func Dump(data []byte) string {
- if len(data) == 0 {
- return ""
- }
-
- var buf strings.Builder
- // Dumper will write 79 bytes per complete 16 byte chunk, and at least
- // 64 bytes for whatever remains. Round the allocation up, since only a
- // maximum of 15 bytes will be wasted.
- buf.Grow((1 + ((len(data) - 1) / 16)) * 79)
-
- dumper := Dumper(&buf)
- dumper.Write(data)
- dumper.Close()
- return buf.String()
-}
-
-// bufferSize is the number of hexadecimal characters to buffer in encoder and decoder.
-const bufferSize = 1024
-
-type encoder struct {
- w io.Writer
- err error
- out [bufferSize]byte // output buffer
-}
-
-// NewEncoder returns an io.Writer that writes lowercase hexadecimal characters to w.
-func NewEncoder(w io.Writer) io.Writer {
- return &encoder{w: w}
-}
-
-func (e *encoder) Write(p []byte) (n int, err error) {
- for len(p) > 0 && e.err == nil {
- chunkSize := bufferSize / 2
- if len(p) < chunkSize {
- chunkSize = len(p)
- }
-
- var written int
- encoded := Encode(e.out[:], p[:chunkSize])
- written, e.err = e.w.Write(e.out[:encoded])
- n += written / 2
- p = p[chunkSize:]
- }
- return n, e.err
-}
-
-type decoder struct {
- r io.Reader
- err error
- in []byte // input buffer (encoded form)
- arr [bufferSize]byte // backing array for in
-}
-
-// NewDecoder returns an io.Reader that decodes hexadecimal characters from r.
-// NewDecoder expects that r contain only an even number of hexadecimal characters.
-func NewDecoder(r io.Reader) io.Reader {
- return &decoder{r: r}
-}
-
-func (d *decoder) Read(p []byte) (n int, err error) {
- // Fill internal buffer with sufficient bytes to decode
- if len(d.in) < 2 && d.err == nil {
- var numCopy, numRead int
- numCopy = copy(d.arr[:], d.in) // Copies either 0 or 1 bytes
- numRead, d.err = d.r.Read(d.arr[numCopy:])
- d.in = d.arr[:numCopy+numRead]
- if d.err == io.EOF && len(d.in)%2 != 0 {
- if _, ok := fromHexChar(d.in[len(d.in)-1]); !ok {
- d.err = InvalidByteError(d.in[len(d.in)-1])
- } else {
- d.err = io.ErrUnexpectedEOF
- }
- }
- }
-
- // Decode internal buffer into output buffer
- if numAvail := len(d.in) / 2; len(p) > numAvail {
- p = p[:numAvail]
- }
- numDec, err := Decode(p, d.in[:len(p)*2])
- d.in = d.in[2*numDec:]
- if err != nil {
- d.in, d.err = nil, err // Decode error; discard input remainder
- }
-
- if len(d.in) < 2 {
- return numDec, d.err // Only expose errors when buffer fully consumed
- }
- return numDec, nil
-}
-
-// Dumper returns a WriteCloser that writes a hex dump of all written data to
-// w. The format of the dump matches the output of `hexdump -C` on the command
-// line.
-func Dumper(w io.Writer) io.WriteCloser {
- return &dumper{w: w}
-}
-
-type dumper struct {
- w io.Writer
- rightChars [18]byte
- buf [14]byte
- used int // number of bytes in the current line
- n uint // number of bytes, total
- closed bool
-}
-
-func toChar(b byte) byte {
- if b < 32 || b > 126 {
- return '.'
- }
- return b
-}
-
-func (h *dumper) Write(data []byte) (n int, err error) {
- if h.closed {
- return 0, errors.New("encoding/hex: dumper closed")
- }
-
- // Output lines look like:
- // 00000010 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d |./0123456789:;<=|
- // ^ offset ^ extra space ^ ASCII of line.
- for i := range data {
- if h.used == 0 {
- // At the beginning of a line we print the current
- // offset in hex.
- h.buf[0] = byte(h.n >> 24)
- h.buf[1] = byte(h.n >> 16)
- h.buf[2] = byte(h.n >> 8)
- h.buf[3] = byte(h.n)
- Encode(h.buf[4:], h.buf[:4])
- h.buf[12] = ' '
- h.buf[13] = ' '
- _, err = h.w.Write(h.buf[4:])
- if err != nil {
- return
- }
- }
- Encode(h.buf[:], data[i:i+1])
- h.buf[2] = ' '
- l := 3
- if h.used == 7 {
- // There's an additional space after the 8th byte.
- h.buf[3] = ' '
- l = 4
- } else if h.used == 15 {
- // At the end of the line there's an extra space and
- // the bar for the right column.
- h.buf[3] = ' '
- h.buf[4] = '|'
- l = 5
- }
- _, err = h.w.Write(h.buf[:l])
- if err != nil {
- return
- }
- n++
- h.rightChars[h.used] = toChar(data[i])
- h.used++
- h.n++
- if h.used == 16 {
- h.rightChars[16] = '|'
- h.rightChars[17] = '\n'
- _, err = h.w.Write(h.rightChars[:])
- if err != nil {
- return
- }
- h.used = 0
- }
- }
- return
-}
-
-func (h *dumper) Close() (err error) {
- // See the comments in Write() for the details of this format.
- if h.closed {
- return
- }
- h.closed = true
- if h.used == 0 {
- return
- }
- h.buf[0] = ' '
- h.buf[1] = ' '
- h.buf[2] = ' '
- h.buf[3] = ' '
- h.buf[4] = '|'
- nBytes := h.used
- for h.used < 16 {
- l := 3
- if h.used == 7 {
- l = 4
- } else if h.used == 15 {
- l = 5
- }
- _, err = h.w.Write(h.buf[:l])
- if err != nil {
- return
- }
- h.used++
- }
- h.rightChars[nBytes] = '|'
- h.rightChars[nBytes+1] = '\n'
- _, err = h.w.Write(h.rightChars[:nBytes+2])
- return
-}
diff --git a/contrib/go/_std_1.18/src/encoding/pem/pem.go b/contrib/go/_std_1.18/src/encoding/pem/pem.go
deleted file mode 100644
index 743876c906..0000000000
--- a/contrib/go/_std_1.18/src/encoding/pem/pem.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pem implements the PEM data encoding, which originated in Privacy
-// Enhanced Mail. The most common use of PEM encoding today is in TLS keys and
-// certificates. See RFC 1421.
-package pem
-
-import (
- "bytes"
- "encoding/base64"
- "errors"
- "io"
- "sort"
- "strings"
-)
-
-// A Block represents a PEM encoded structure.
-//
-// The encoded form is:
-// -----BEGIN Type-----
-// Headers
-// base64-encoded Bytes
-// -----END Type-----
-// where Headers is a possibly empty sequence of Key: Value lines.
-type Block struct {
- Type string // The type, taken from the preamble (i.e. "RSA PRIVATE KEY").
- Headers map[string]string // Optional headers.
- Bytes []byte // The decoded bytes of the contents. Typically a DER encoded ASN.1 structure.
-}
-
-// getLine results the first \r\n or \n delineated line from the given byte
-// array. The line does not include trailing whitespace or the trailing new
-// line bytes. The remainder of the byte array (also not including the new line
-// bytes) is also returned and this will always be smaller than the original
-// argument.
-func getLine(data []byte) (line, rest []byte) {
- i := bytes.IndexByte(data, '\n')
- var j int
- if i < 0 {
- i = len(data)
- j = i
- } else {
- j = i + 1
- if i > 0 && data[i-1] == '\r' {
- i--
- }
- }
- return bytes.TrimRight(data[0:i], " \t"), data[j:]
-}
-
-// removeSpacesAndTabs returns a copy of its input with all spaces and tabs
-// removed, if there were any. Otherwise, the input is returned unchanged.
-//
-// The base64 decoder already skips newline characters, so we don't need to
-// filter them out here.
-func removeSpacesAndTabs(data []byte) []byte {
- if !bytes.ContainsAny(data, " \t") {
- // Fast path; most base64 data within PEM contains newlines, but
- // no spaces nor tabs. Skip the extra alloc and work.
- return data
- }
- result := make([]byte, len(data))
- n := 0
-
- for _, b := range data {
- if b == ' ' || b == '\t' {
- continue
- }
- result[n] = b
- n++
- }
-
- return result[0:n]
-}
-
-var pemStart = []byte("\n-----BEGIN ")
-var pemEnd = []byte("\n-----END ")
-var pemEndOfLine = []byte("-----")
-var colon = []byte(":")
-
-// Decode will find the next PEM formatted block (certificate, private key
-// etc) in the input. It returns that block and the remainder of the input. If
-// no PEM data is found, p is nil and the whole of the input is returned in
-// rest.
-func Decode(data []byte) (p *Block, rest []byte) {
- // pemStart begins with a newline. However, at the very beginning of
- // the byte array, we'll accept the start string without it.
- rest = data
- for {
- if bytes.HasPrefix(rest, pemStart[1:]) {
- rest = rest[len(pemStart)-1:]
- } else if _, after, ok := bytes.Cut(rest, pemStart); ok {
- rest = after
- } else {
- return nil, data
- }
-
- var typeLine []byte
- typeLine, rest = getLine(rest)
- if !bytes.HasSuffix(typeLine, pemEndOfLine) {
- continue
- }
- typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
-
- p = &Block{
- Headers: make(map[string]string),
- Type: string(typeLine),
- }
-
- for {
- // This loop terminates because getLine's second result is
- // always smaller than its argument.
- if len(rest) == 0 {
- return nil, data
- }
- line, next := getLine(rest)
-
- key, val, ok := bytes.Cut(line, colon)
- if !ok {
- break
- }
-
- // TODO(agl): need to cope with values that spread across lines.
- key = bytes.TrimSpace(key)
- val = bytes.TrimSpace(val)
- p.Headers[string(key)] = string(val)
- rest = next
- }
-
- var endIndex, endTrailerIndex int
-
- // If there were no headers, the END line might occur
- // immediately, without a leading newline.
- if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
- endIndex = 0
- endTrailerIndex = len(pemEnd) - 1
- } else {
- endIndex = bytes.Index(rest, pemEnd)
- endTrailerIndex = endIndex + len(pemEnd)
- }
-
- if endIndex < 0 {
- continue
- }
-
- // After the "-----" of the ending line, there should be the same type
- // and then a final five dashes.
- endTrailer := rest[endTrailerIndex:]
- endTrailerLen := len(typeLine) + len(pemEndOfLine)
- if len(endTrailer) < endTrailerLen {
- continue
- }
-
- restOfEndLine := endTrailer[endTrailerLen:]
- endTrailer = endTrailer[:endTrailerLen]
- if !bytes.HasPrefix(endTrailer, typeLine) ||
- !bytes.HasSuffix(endTrailer, pemEndOfLine) {
- continue
- }
-
- // The line must end with only whitespace.
- if s, _ := getLine(restOfEndLine); len(s) != 0 {
- continue
- }
-
- base64Data := removeSpacesAndTabs(rest[:endIndex])
- p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
- n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
- if err != nil {
- continue
- }
- p.Bytes = p.Bytes[:n]
-
- // the -1 is because we might have only matched pemEnd without the
- // leading newline if the PEM block was empty.
- _, rest = getLine(rest[endIndex+len(pemEnd)-1:])
- return p, rest
- }
-}
-
-const pemLineLength = 64
-
-type lineBreaker struct {
- line [pemLineLength]byte
- used int
- out io.Writer
-}
-
-var nl = []byte{'\n'}
-
-func (l *lineBreaker) Write(b []byte) (n int, err error) {
- if l.used+len(b) < pemLineLength {
- copy(l.line[l.used:], b)
- l.used += len(b)
- return len(b), nil
- }
-
- n, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- excess := pemLineLength - l.used
- l.used = 0
-
- n, err = l.out.Write(b[0:excess])
- if err != nil {
- return
- }
-
- n, err = l.out.Write(nl)
- if err != nil {
- return
- }
-
- return l.Write(b[excess:])
-}
-
-func (l *lineBreaker) Close() (err error) {
- if l.used > 0 {
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- _, err = l.out.Write(nl)
- }
-
- return
-}
-
-func writeHeader(out io.Writer, k, v string) error {
- _, err := out.Write([]byte(k + ": " + v + "\n"))
- return err
-}
-
-// Encode writes the PEM encoding of b to out.
-func Encode(out io.Writer, b *Block) error {
- // Check for invalid block before writing any output.
- for k := range b.Headers {
- if strings.Contains(k, ":") {
- return errors.New("pem: cannot encode a header key that contains a colon")
- }
- }
-
- // All errors below are relayed from underlying io.Writer,
- // so it is now safe to write data.
-
- if _, err := out.Write(pemStart[1:]); err != nil {
- return err
- }
- if _, err := out.Write([]byte(b.Type + "-----\n")); err != nil {
- return err
- }
-
- if len(b.Headers) > 0 {
- const procType = "Proc-Type"
- h := make([]string, 0, len(b.Headers))
- hasProcType := false
- for k := range b.Headers {
- if k == procType {
- hasProcType = true
- continue
- }
- h = append(h, k)
- }
- // The Proc-Type header must be written first.
- // See RFC 1421, section 4.6.1.1
- if hasProcType {
- if err := writeHeader(out, procType, b.Headers[procType]); err != nil {
- return err
- }
- }
- // For consistency of output, write other headers sorted by key.
- sort.Strings(h)
- for _, k := range h {
- if err := writeHeader(out, k, b.Headers[k]); err != nil {
- return err
- }
- }
- if _, err := out.Write(nl); err != nil {
- return err
- }
- }
-
- var breaker lineBreaker
- breaker.out = out
-
- b64 := base64.NewEncoder(base64.StdEncoding, &breaker)
- if _, err := b64.Write(b.Bytes); err != nil {
- return err
- }
- b64.Close()
- breaker.Close()
-
- if _, err := out.Write(pemEnd[1:]); err != nil {
- return err
- }
- _, err := out.Write([]byte(b.Type + "-----\n"))
- return err
-}
-
-// EncodeToMemory returns the PEM encoding of b.
-//
-// If b has invalid headers and cannot be encoded,
-// EncodeToMemory returns nil. If it is important to
-// report details about this error case, use Encode instead.
-func EncodeToMemory(b *Block) []byte {
- var buf bytes.Buffer
- if err := Encode(&buf, b); err != nil {
- return nil
- }
- return buf.Bytes()
-}
diff --git a/contrib/go/_std_1.18/src/flag/flag.go b/contrib/go/_std_1.18/src/flag/flag.go
deleted file mode 100644
index 4e2af450c5..0000000000
--- a/contrib/go/_std_1.18/src/flag/flag.go
+++ /dev/null
@@ -1,1079 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
- Package flag implements command-line flag parsing.
-
- Usage
-
- Define flags using flag.String(), Bool(), Int(), etc.
-
- This declares an integer flag, -n, stored in the pointer nFlag, with type *int:
- import "flag"
- var nFlag = flag.Int("n", 1234, "help message for flag n")
- If you like, you can bind the flag to a variable using the Var() functions.
- var flagvar int
- func init() {
- flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
- }
- Or you can create custom flags that satisfy the Value interface (with
- pointer receivers) and couple them to flag parsing by
- flag.Var(&flagVal, "name", "help message for flagname")
- For such flags, the default value is just the initial value of the variable.
-
- After all flags are defined, call
- flag.Parse()
- to parse the command line into the defined flags.
-
- Flags may then be used directly. If you're using the flags themselves,
- they are all pointers; if you bind to variables, they're values.
- fmt.Println("ip has value ", *ip)
- fmt.Println("flagvar has value ", flagvar)
-
- After parsing, the arguments following the flags are available as the
- slice flag.Args() or individually as flag.Arg(i).
- The arguments are indexed from 0 through flag.NArg()-1.
-
- Command line flag syntax
-
- The following forms are permitted:
-
- -flag
- -flag=x
- -flag x // non-boolean flags only
- One or two minus signs may be used; they are equivalent.
- The last form is not permitted for boolean flags because the
- meaning of the command
- cmd -x *
- where * is a Unix shell wildcard, will change if there is a file
- called 0, false, etc. You must use the -flag=false form to turn
- off a boolean flag.
-
- Flag parsing stops just before the first non-flag argument
- ("-" is a non-flag argument) or after the terminator "--".
-
- Integer flags accept 1234, 0664, 0x1234 and may be negative.
- Boolean flags may be:
- 1, 0, t, f, T, F, true, false, TRUE, FALSE, True, False
- Duration flags accept any input valid for time.ParseDuration.
-
- The default set of command-line flags is controlled by
- top-level functions. The FlagSet type allows one to define
- independent sets of flags, such as to implement subcommands
- in a command-line interface. The methods of FlagSet are
- analogous to the top-level functions for the command-line
- flag set.
-*/
-package flag
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-// ErrHelp is the error returned if the -help or -h flag is invoked
-// but no such flag is defined.
-var ErrHelp = errors.New("flag: help requested")
-
-// errParse is returned by Set if a flag's value fails to parse, such as with an invalid integer for Int.
-// It then gets wrapped through failf to provide more information.
-var errParse = errors.New("parse error")
-
-// errRange is returned by Set if a flag's value is out of range.
-// It then gets wrapped through failf to provide more information.
-var errRange = errors.New("value out of range")
-
-func numError(err error) error {
- ne, ok := err.(*strconv.NumError)
- if !ok {
- return err
- }
- if ne.Err == strconv.ErrSyntax {
- return errParse
- }
- if ne.Err == strconv.ErrRange {
- return errRange
- }
- return err
-}
-
-// -- bool Value
-type boolValue bool
-
-func newBoolValue(val bool, p *bool) *boolValue {
- *p = val
- return (*boolValue)(p)
-}
-
-func (b *boolValue) Set(s string) error {
- v, err := strconv.ParseBool(s)
- if err != nil {
- err = errParse
- }
- *b = boolValue(v)
- return err
-}
-
-func (b *boolValue) Get() any { return bool(*b) }
-
-func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
-
-func (b *boolValue) IsBoolFlag() bool { return true }
-
-// optional interface to indicate boolean flags that can be
-// supplied without "=value" text
-type boolFlag interface {
- Value
- IsBoolFlag() bool
-}
-
-// -- int Value
-type intValue int
-
-func newIntValue(val int, p *int) *intValue {
- *p = val
- return (*intValue)(p)
-}
-
-func (i *intValue) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, strconv.IntSize)
- if err != nil {
- err = numError(err)
- }
- *i = intValue(v)
- return err
-}
-
-func (i *intValue) Get() any { return int(*i) }
-
-func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
-
-// -- int64 Value
-type int64Value int64
-
-func newInt64Value(val int64, p *int64) *int64Value {
- *p = val
- return (*int64Value)(p)
-}
-
-func (i *int64Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 64)
- if err != nil {
- err = numError(err)
- }
- *i = int64Value(v)
- return err
-}
-
-func (i *int64Value) Get() any { return int64(*i) }
-
-func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
-
-// -- uint Value
-type uintValue uint
-
-func newUintValue(val uint, p *uint) *uintValue {
- *p = val
- return (*uintValue)(p)
-}
-
-func (i *uintValue) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, strconv.IntSize)
- if err != nil {
- err = numError(err)
- }
- *i = uintValue(v)
- return err
-}
-
-func (i *uintValue) Get() any { return uint(*i) }
-
-func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-// -- uint64 Value
-type uint64Value uint64
-
-func newUint64Value(val uint64, p *uint64) *uint64Value {
- *p = val
- return (*uint64Value)(p)
-}
-
-func (i *uint64Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 64)
- if err != nil {
- err = numError(err)
- }
- *i = uint64Value(v)
- return err
-}
-
-func (i *uint64Value) Get() any { return uint64(*i) }
-
-func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-// -- string Value
-type stringValue string
-
-func newStringValue(val string, p *string) *stringValue {
- *p = val
- return (*stringValue)(p)
-}
-
-func (s *stringValue) Set(val string) error {
- *s = stringValue(val)
- return nil
-}
-
-func (s *stringValue) Get() any { return string(*s) }
-
-func (s *stringValue) String() string { return string(*s) }
-
-// -- float64 Value
-type float64Value float64
-
-func newFloat64Value(val float64, p *float64) *float64Value {
- *p = val
- return (*float64Value)(p)
-}
-
-func (f *float64Value) Set(s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err != nil {
- err = numError(err)
- }
- *f = float64Value(v)
- return err
-}
-
-func (f *float64Value) Get() any { return float64(*f) }
-
-func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
-
-// -- time.Duration Value
-type durationValue time.Duration
-
-func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
- *p = val
- return (*durationValue)(p)
-}
-
-func (d *durationValue) Set(s string) error {
- v, err := time.ParseDuration(s)
- if err != nil {
- err = errParse
- }
- *d = durationValue(v)
- return err
-}
-
-func (d *durationValue) Get() any { return time.Duration(*d) }
-
-func (d *durationValue) String() string { return (*time.Duration)(d).String() }
-
-type funcValue func(string) error
-
-func (f funcValue) Set(s string) error { return f(s) }
-
-func (f funcValue) String() string { return "" }
-
-// Value is the interface to the dynamic value stored in a flag.
-// (The default value is represented as a string.)
-//
-// If a Value has an IsBoolFlag() bool method returning true,
-// the command-line parser makes -name equivalent to -name=true
-// rather than using the next command-line argument.
-//
-// Set is called once, in command line order, for each flag present.
-// The flag package may call the String method with a zero-valued receiver,
-// such as a nil pointer.
-type Value interface {
- String() string
- Set(string) error
-}
-
-// Getter is an interface that allows the contents of a Value to be retrieved.
-// It wraps the Value interface, rather than being part of it, because it
-// appeared after Go 1 and its compatibility rules. All Value types provided
-// by this package satisfy the Getter interface, except the type used by Func.
-type Getter interface {
- Value
- Get() any
-}
-
-// ErrorHandling defines how FlagSet.Parse behaves if the parse fails.
-type ErrorHandling int
-
-// These constants cause FlagSet.Parse to behave as described if the parse fails.
-const (
- ContinueOnError ErrorHandling = iota // Return a descriptive error.
- ExitOnError // Call os.Exit(2) or for -h/-help Exit(0).
- PanicOnError // Call panic with a descriptive error.
-)
-
-// A FlagSet represents a set of defined flags. The zero value of a FlagSet
-// has no name and has ContinueOnError error handling.
-//
-// Flag names must be unique within a FlagSet. An attempt to define a flag whose
-// name is already in use will cause a panic.
-type FlagSet struct {
- // Usage is the function called when an error occurs while parsing flags.
- // The field is a function (not a method) that may be changed to point to
- // a custom error handler. What happens after Usage is called depends
- // on the ErrorHandling setting; for the command line, this defaults
- // to ExitOnError, which exits the program after calling Usage.
- Usage func()
-
- name string
- parsed bool
- actual map[string]*Flag
- formal map[string]*Flag
- args []string // arguments after flags
- errorHandling ErrorHandling
- output io.Writer // nil means stderr; use Output() accessor
-}
-
-// A Flag represents the state of a flag.
-type Flag struct {
- Name string // name as it appears on command line
- Usage string // help message
- Value Value // value as set
- DefValue string // default value (as text); for usage message
-}
-
-// sortFlags returns the flags as a slice in lexicographical sorted order.
-func sortFlags(flags map[string]*Flag) []*Flag {
- result := make([]*Flag, len(flags))
- i := 0
- for _, f := range flags {
- result[i] = f
- i++
- }
- sort.Slice(result, func(i, j int) bool {
- return result[i].Name < result[j].Name
- })
- return result
-}
-
-// Output returns the destination for usage and error messages. os.Stderr is returned if
-// output was not set or was set to nil.
-func (f *FlagSet) Output() io.Writer {
- if f.output == nil {
- return os.Stderr
- }
- return f.output
-}
-
-// Name returns the name of the flag set.
-func (f *FlagSet) Name() string {
- return f.name
-}
-
-// ErrorHandling returns the error handling behavior of the flag set.
-func (f *FlagSet) ErrorHandling() ErrorHandling {
- return f.errorHandling
-}
-
-// SetOutput sets the destination for usage and error messages.
-// If output is nil, os.Stderr is used.
-func (f *FlagSet) SetOutput(output io.Writer) {
- f.output = output
-}
-
-// VisitAll visits the flags in lexicographical order, calling fn for each.
-// It visits all flags, even those not set.
-func (f *FlagSet) VisitAll(fn func(*Flag)) {
- for _, flag := range sortFlags(f.formal) {
- fn(flag)
- }
-}
-
-// VisitAll visits the command-line flags in lexicographical order, calling
-// fn for each. It visits all flags, even those not set.
-func VisitAll(fn func(*Flag)) {
- CommandLine.VisitAll(fn)
-}
-
-// Visit visits the flags in lexicographical order, calling fn for each.
-// It visits only those flags that have been set.
-func (f *FlagSet) Visit(fn func(*Flag)) {
- for _, flag := range sortFlags(f.actual) {
- fn(flag)
- }
-}
-
-// Visit visits the command-line flags in lexicographical order, calling fn
-// for each. It visits only those flags that have been set.
-func Visit(fn func(*Flag)) {
- CommandLine.Visit(fn)
-}
-
-// Lookup returns the Flag structure of the named flag, returning nil if none exists.
-func (f *FlagSet) Lookup(name string) *Flag {
- return f.formal[name]
-}
-
-// Lookup returns the Flag structure of the named command-line flag,
-// returning nil if none exists.
-func Lookup(name string) *Flag {
- return CommandLine.formal[name]
-}
-
-// Set sets the value of the named flag.
-func (f *FlagSet) Set(name, value string) error {
- flag, ok := f.formal[name]
- if !ok {
- return fmt.Errorf("no such flag -%v", name)
- }
- err := flag.Value.Set(value)
- if err != nil {
- return err
- }
- if f.actual == nil {
- f.actual = make(map[string]*Flag)
- }
- f.actual[name] = flag
- return nil
-}
-
-// Set sets the value of the named command-line flag.
-func Set(name, value string) error {
- return CommandLine.Set(name, value)
-}
-
-// isZeroValue determines whether the string represents the zero
-// value for a flag.
-func isZeroValue(flag *Flag, value string) bool {
- // Build a zero value of the flag's Value type, and see if the
- // result of calling its String method equals the value passed in.
- // This works unless the Value type is itself an interface type.
- typ := reflect.TypeOf(flag.Value)
- var z reflect.Value
- if typ.Kind() == reflect.Pointer {
- z = reflect.New(typ.Elem())
- } else {
- z = reflect.Zero(typ)
- }
- return value == z.Interface().(Value).String()
-}
-
-// UnquoteUsage extracts a back-quoted name from the usage
-// string for a flag and returns it and the un-quoted usage.
-// Given "a `name` to show" it returns ("name", "a name to show").
-// If there are no back quotes, the name is an educated guess of the
-// type of the flag's value, or the empty string if the flag is boolean.
-func UnquoteUsage(flag *Flag) (name string, usage string) {
- // Look for a back-quoted name, but avoid the strings package.
- usage = flag.Usage
- for i := 0; i < len(usage); i++ {
- if usage[i] == '`' {
- for j := i + 1; j < len(usage); j++ {
- if usage[j] == '`' {
- name = usage[i+1 : j]
- usage = usage[:i] + name + usage[j+1:]
- return name, usage
- }
- }
- break // Only one back quote; use type name.
- }
- }
- // No explicit name, so use type if we can find one.
- name = "value"
- switch flag.Value.(type) {
- case boolFlag:
- name = ""
- case *durationValue:
- name = "duration"
- case *float64Value:
- name = "float"
- case *intValue, *int64Value:
- name = "int"
- case *stringValue:
- name = "string"
- case *uintValue, *uint64Value:
- name = "uint"
- }
- return
-}
-
-// PrintDefaults prints, to standard error unless configured otherwise, the
-// default values of all defined command-line flags in the set. See the
-// documentation for the global function PrintDefaults for more information.
-func (f *FlagSet) PrintDefaults() {
- f.VisitAll(func(flag *Flag) {
- var b strings.Builder
- fmt.Fprintf(&b, " -%s", flag.Name) // Two spaces before -; see next two comments.
- name, usage := UnquoteUsage(flag)
- if len(name) > 0 {
- b.WriteString(" ")
- b.WriteString(name)
- }
- // Boolean flags of one ASCII letter are so common we
- // treat them specially, putting their usage on the same line.
- if b.Len() <= 4 { // space, space, '-', 'x'.
- b.WriteString("\t")
- } else {
- // Four spaces before the tab triggers good alignment
- // for both 4- and 8-space tab stops.
- b.WriteString("\n \t")
- }
- b.WriteString(strings.ReplaceAll(usage, "\n", "\n \t"))
-
- if !isZeroValue(flag, flag.DefValue) {
- if _, ok := flag.Value.(*stringValue); ok {
- // put quotes on the value
- fmt.Fprintf(&b, " (default %q)", flag.DefValue)
- } else {
- fmt.Fprintf(&b, " (default %v)", flag.DefValue)
- }
- }
- fmt.Fprint(f.Output(), b.String(), "\n")
- })
-}
-
-// PrintDefaults prints, to standard error unless configured otherwise,
-// a usage message showing the default settings of all defined
-// command-line flags.
-// For an integer valued flag x, the default output has the form
-// -x int
-// usage-message-for-x (default 7)
-// The usage message will appear on a separate line for anything but
-// a bool flag with a one-byte name. For bool flags, the type is
-// omitted and if the flag name is one byte the usage message appears
-// on the same line. The parenthetical default is omitted if the
-// default is the zero value for the type. The listed type, here int,
-// can be changed by placing a back-quoted name in the flag's usage
-// string; the first such item in the message is taken to be a parameter
-// name to show in the message and the back quotes are stripped from
-// the message when displayed. For instance, given
-// flag.String("I", "", "search `directory` for include files")
-// the output will be
-// -I directory
-// search directory for include files.
-//
-// To change the destination for flag messages, call CommandLine.SetOutput.
-func PrintDefaults() {
- CommandLine.PrintDefaults()
-}
-
-// defaultUsage is the default function to print a usage message.
-func (f *FlagSet) defaultUsage() {
- if f.name == "" {
- fmt.Fprintf(f.Output(), "Usage:\n")
- } else {
- fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name)
- }
- f.PrintDefaults()
-}
-
-// NOTE: Usage is not just defaultUsage(CommandLine)
-// because it serves (via godoc flag Usage) as the example
-// for how to write your own usage function.
-
-// Usage prints a usage message documenting all defined command-line flags
-// to CommandLine's output, which by default is os.Stderr.
-// It is called when an error occurs while parsing flags.
-// The function is a variable that may be changed to point to a custom function.
-// By default it prints a simple header and calls PrintDefaults; for details about the
-// format of the output and how to control it, see the documentation for PrintDefaults.
-// Custom usage functions may choose to exit the program; by default exiting
-// happens anyway as the command line's error handling strategy is set to
-// ExitOnError.
-var Usage = func() {
- fmt.Fprintf(CommandLine.Output(), "Usage of %s:\n", os.Args[0])
- PrintDefaults()
-}
-
-// NFlag returns the number of flags that have been set.
-func (f *FlagSet) NFlag() int { return len(f.actual) }
-
-// NFlag returns the number of command-line flags that have been set.
-func NFlag() int { return len(CommandLine.actual) }
-
-// Arg returns the i'th argument. Arg(0) is the first remaining argument
-// after flags have been processed. Arg returns an empty string if the
-// requested element does not exist.
-func (f *FlagSet) Arg(i int) string {
- if i < 0 || i >= len(f.args) {
- return ""
- }
- return f.args[i]
-}
-
-// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
-// after flags have been processed. Arg returns an empty string if the
-// requested element does not exist.
-func Arg(i int) string {
- return CommandLine.Arg(i)
-}
-
-// NArg is the number of arguments remaining after flags have been processed.
-func (f *FlagSet) NArg() int { return len(f.args) }
-
-// NArg is the number of arguments remaining after flags have been processed.
-func NArg() int { return len(CommandLine.args) }
-
-// Args returns the non-flag arguments.
-func (f *FlagSet) Args() []string { return f.args }
-
-// Args returns the non-flag command-line arguments.
-func Args() []string { return CommandLine.args }
-
-// BoolVar defines a bool flag with specified name, default value, and usage string.
-// The argument p points to a bool variable in which to store the value of the flag.
-func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
- f.Var(newBoolValue(value, p), name, usage)
-}
-
-// BoolVar defines a bool flag with specified name, default value, and usage string.
-// The argument p points to a bool variable in which to store the value of the flag.
-func BoolVar(p *bool, name string, value bool, usage string) {
- CommandLine.Var(newBoolValue(value, p), name, usage)
-}
-
-// Bool defines a bool flag with specified name, default value, and usage string.
-// The return value is the address of a bool variable that stores the value of the flag.
-func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
- p := new(bool)
- f.BoolVar(p, name, value, usage)
- return p
-}
-
-// Bool defines a bool flag with specified name, default value, and usage string.
-// The return value is the address of a bool variable that stores the value of the flag.
-func Bool(name string, value bool, usage string) *bool {
- return CommandLine.Bool(name, value, usage)
-}
-
-// IntVar defines an int flag with specified name, default value, and usage string.
-// The argument p points to an int variable in which to store the value of the flag.
-func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
- f.Var(newIntValue(value, p), name, usage)
-}
-
-// IntVar defines an int flag with specified name, default value, and usage string.
-// The argument p points to an int variable in which to store the value of the flag.
-func IntVar(p *int, name string, value int, usage string) {
- CommandLine.Var(newIntValue(value, p), name, usage)
-}
-
-// Int defines an int flag with specified name, default value, and usage string.
-// The return value is the address of an int variable that stores the value of the flag.
-func (f *FlagSet) Int(name string, value int, usage string) *int {
- p := new(int)
- f.IntVar(p, name, value, usage)
- return p
-}
-
-// Int defines an int flag with specified name, default value, and usage string.
-// The return value is the address of an int variable that stores the value of the flag.
-func Int(name string, value int, usage string) *int {
- return CommandLine.Int(name, value, usage)
-}
-
-// Int64Var defines an int64 flag with specified name, default value, and usage string.
-// The argument p points to an int64 variable in which to store the value of the flag.
-func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
- f.Var(newInt64Value(value, p), name, usage)
-}
-
-// Int64Var defines an int64 flag with specified name, default value, and usage string.
-// The argument p points to an int64 variable in which to store the value of the flag.
-func Int64Var(p *int64, name string, value int64, usage string) {
- CommandLine.Var(newInt64Value(value, p), name, usage)
-}
-
-// Int64 defines an int64 flag with specified name, default value, and usage string.
-// The return value is the address of an int64 variable that stores the value of the flag.
-func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
- p := new(int64)
- f.Int64Var(p, name, value, usage)
- return p
-}
-
-// Int64 defines an int64 flag with specified name, default value, and usage string.
-// The return value is the address of an int64 variable that stores the value of the flag.
-func Int64(name string, value int64, usage string) *int64 {
- return CommandLine.Int64(name, value, usage)
-}
-
-// UintVar defines a uint flag with specified name, default value, and usage string.
-// The argument p points to a uint variable in which to store the value of the flag.
-func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
- f.Var(newUintValue(value, p), name, usage)
-}
-
-// UintVar defines a uint flag with specified name, default value, and usage string.
-// The argument p points to a uint variable in which to store the value of the flag.
-func UintVar(p *uint, name string, value uint, usage string) {
- CommandLine.Var(newUintValue(value, p), name, usage)
-}
-
-// Uint defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint variable that stores the value of the flag.
-func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
- p := new(uint)
- f.UintVar(p, name, value, usage)
- return p
-}
-
-// Uint defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint variable that stores the value of the flag.
-func Uint(name string, value uint, usage string) *uint {
- return CommandLine.Uint(name, value, usage)
-}
-
-// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
-// The argument p points to a uint64 variable in which to store the value of the flag.
-func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
- f.Var(newUint64Value(value, p), name, usage)
-}
-
-// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
-// The argument p points to a uint64 variable in which to store the value of the flag.
-func Uint64Var(p *uint64, name string, value uint64, usage string) {
- CommandLine.Var(newUint64Value(value, p), name, usage)
-}
-
-// Uint64 defines a uint64 flag with specified name, default value, and usage string.
-// The return value is the address of a uint64 variable that stores the value of the flag.
-func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
- p := new(uint64)
- f.Uint64Var(p, name, value, usage)
- return p
-}
-
-// Uint64 defines a uint64 flag with specified name, default value, and usage string.
-// The return value is the address of a uint64 variable that stores the value of the flag.
-func Uint64(name string, value uint64, usage string) *uint64 {
- return CommandLine.Uint64(name, value, usage)
-}
-
-// StringVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a string variable in which to store the value of the flag.
-func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
- f.Var(newStringValue(value, p), name, usage)
-}
-
-// StringVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a string variable in which to store the value of the flag.
-func StringVar(p *string, name string, value string, usage string) {
- CommandLine.Var(newStringValue(value, p), name, usage)
-}
-
-// String defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a string variable that stores the value of the flag.
-func (f *FlagSet) String(name string, value string, usage string) *string {
- p := new(string)
- f.StringVar(p, name, value, usage)
- return p
-}
-
-// String defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a string variable that stores the value of the flag.
-func String(name string, value string, usage string) *string {
- return CommandLine.String(name, value, usage)
-}
-
-// Float64Var defines a float64 flag with specified name, default value, and usage string.
-// The argument p points to a float64 variable in which to store the value of the flag.
-func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
- f.Var(newFloat64Value(value, p), name, usage)
-}
-
-// Float64Var defines a float64 flag with specified name, default value, and usage string.
-// The argument p points to a float64 variable in which to store the value of the flag.
-func Float64Var(p *float64, name string, value float64, usage string) {
- CommandLine.Var(newFloat64Value(value, p), name, usage)
-}
-
-// Float64 defines a float64 flag with specified name, default value, and usage string.
-// The return value is the address of a float64 variable that stores the value of the flag.
-func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
- p := new(float64)
- f.Float64Var(p, name, value, usage)
- return p
-}
-
-// Float64 defines a float64 flag with specified name, default value, and usage string.
-// The return value is the address of a float64 variable that stores the value of the flag.
-func Float64(name string, value float64, usage string) *float64 {
- return CommandLine.Float64(name, value, usage)
-}
-
-// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
-// The argument p points to a time.Duration variable in which to store the value of the flag.
-// The flag accepts a value acceptable to time.ParseDuration.
-func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
- f.Var(newDurationValue(value, p), name, usage)
-}
-
-// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
-// The argument p points to a time.Duration variable in which to store the value of the flag.
-// The flag accepts a value acceptable to time.ParseDuration.
-func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
- CommandLine.Var(newDurationValue(value, p), name, usage)
-}
-
-// Duration defines a time.Duration flag with specified name, default value, and usage string.
-// The return value is the address of a time.Duration variable that stores the value of the flag.
-// The flag accepts a value acceptable to time.ParseDuration.
-func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
- p := new(time.Duration)
- f.DurationVar(p, name, value, usage)
- return p
-}
-
-// Duration defines a time.Duration flag with specified name, default value, and usage string.
-// The return value is the address of a time.Duration variable that stores the value of the flag.
-// The flag accepts a value acceptable to time.ParseDuration.
-func Duration(name string, value time.Duration, usage string) *time.Duration {
- return CommandLine.Duration(name, value, usage)
-}
-
-// Func defines a flag with the specified name and usage string.
-// Each time the flag is seen, fn is called with the value of the flag.
-// If fn returns a non-nil error, it will be treated as a flag value parsing error.
-func (f *FlagSet) Func(name, usage string, fn func(string) error) {
- f.Var(funcValue(fn), name, usage)
-}
-
-// Func defines a flag with the specified name and usage string.
-// Each time the flag is seen, fn is called with the value of the flag.
-// If fn returns a non-nil error, it will be treated as a flag value parsing error.
-func Func(name, usage string, fn func(string) error) {
- CommandLine.Func(name, usage, fn)
-}
-
-// Var defines a flag with the specified name and usage string. The type and
-// value of the flag are represented by the first argument, of type Value, which
-// typically holds a user-defined implementation of Value. For instance, the
-// caller could create a flag that turns a comma-separated string into a slice
-// of strings by giving the slice the methods of Value; in particular, Set would
-// decompose the comma-separated string into the slice.
-func (f *FlagSet) Var(value Value, name string, usage string) {
- // Flag must not begin "-" or contain "=".
- if strings.HasPrefix(name, "-") {
- panic(f.sprintf("flag %q begins with -", name))
- } else if strings.Contains(name, "=") {
- panic(f.sprintf("flag %q contains =", name))
- }
-
- // Remember the default value as a string; it won't change.
- flag := &Flag{name, usage, value, value.String()}
- _, alreadythere := f.formal[name]
- if alreadythere {
- var msg string
- if f.name == "" {
- msg = f.sprintf("flag redefined: %s", name)
- } else {
- msg = f.sprintf("%s flag redefined: %s", f.name, name)
- }
- panic(msg) // Happens only if flags are declared with identical names
- }
- if f.formal == nil {
- f.formal = make(map[string]*Flag)
- }
- f.formal[name] = flag
-}
-
-// Var defines a flag with the specified name and usage string. The type and
-// value of the flag are represented by the first argument, of type Value, which
-// typically holds a user-defined implementation of Value. For instance, the
-// caller could create a flag that turns a comma-separated string into a slice
-// of strings by giving the slice the methods of Value; in particular, Set would
-// decompose the comma-separated string into the slice.
-func Var(value Value, name string, usage string) {
- CommandLine.Var(value, name, usage)
-}
-
-// sprintf formats the message, prints it to output, and returns it.
-func (f *FlagSet) sprintf(format string, a ...any) string {
- msg := fmt.Sprintf(format, a...)
- fmt.Fprintln(f.Output(), msg)
- return msg
-}
-
-// failf prints to standard error a formatted error and usage message and
-// returns the error.
-func (f *FlagSet) failf(format string, a ...any) error {
- msg := f.sprintf(format, a...)
- f.usage()
- return errors.New(msg)
-}
-
-// usage calls the Usage method for the flag set if one is specified,
-// or the appropriate default usage function otherwise.
-func (f *FlagSet) usage() {
- if f.Usage == nil {
- f.defaultUsage()
- } else {
- f.Usage()
- }
-}
-
-// parseOne parses one flag. It reports whether a flag was seen.
-func (f *FlagSet) parseOne() (bool, error) {
- if len(f.args) == 0 {
- return false, nil
- }
- s := f.args[0]
- if len(s) < 2 || s[0] != '-' {
- return false, nil
- }
- numMinuses := 1
- if s[1] == '-' {
- numMinuses++
- if len(s) == 2 { // "--" terminates the flags
- f.args = f.args[1:]
- return false, nil
- }
- }
- name := s[numMinuses:]
- if len(name) == 0 || name[0] == '-' || name[0] == '=' {
- return false, f.failf("bad flag syntax: %s", s)
- }
-
- // it's a flag. does it have an argument?
- f.args = f.args[1:]
- hasValue := false
- value := ""
- for i := 1; i < len(name); i++ { // equals cannot be first
- if name[i] == '=' {
- value = name[i+1:]
- hasValue = true
- name = name[0:i]
- break
- }
- }
- m := f.formal
- flag, alreadythere := m[name] // BUG
- if !alreadythere {
- if name == "help" || name == "h" { // special case for nice help message.
- f.usage()
- return false, ErrHelp
- }
- return false, f.failf("flag provided but not defined: -%s", name)
- }
-
- if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
- if hasValue {
- if err := fv.Set(value); err != nil {
- return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err)
- }
- } else {
- if err := fv.Set("true"); err != nil {
- return false, f.failf("invalid boolean flag %s: %v", name, err)
- }
- }
- } else {
- // It must have a value, which might be the next argument.
- if !hasValue && len(f.args) > 0 {
- // value is the next arg
- hasValue = true
- value, f.args = f.args[0], f.args[1:]
- }
- if !hasValue {
- return false, f.failf("flag needs an argument: -%s", name)
- }
- if err := flag.Value.Set(value); err != nil {
- return false, f.failf("invalid value %q for flag -%s: %v", value, name, err)
- }
- }
- if f.actual == nil {
- f.actual = make(map[string]*Flag)
- }
- f.actual[name] = flag
- return true, nil
-}
-
-// Parse parses flag definitions from the argument list, which should not
-// include the command name. Must be called after all flags in the FlagSet
-// are defined and before flags are accessed by the program.
-// The return value will be ErrHelp if -help or -h were set but not defined.
-func (f *FlagSet) Parse(arguments []string) error {
- f.parsed = true
- f.args = arguments
- for {
- seen, err := f.parseOne()
- if seen {
- continue
- }
- if err == nil {
- break
- }
- switch f.errorHandling {
- case ContinueOnError:
- return err
- case ExitOnError:
- if err == ErrHelp {
- os.Exit(0)
- }
- os.Exit(2)
- case PanicOnError:
- panic(err)
- }
- }
- return nil
-}
-
-// Parsed reports whether f.Parse has been called.
-func (f *FlagSet) Parsed() bool {
- return f.parsed
-}
-
-// Parse parses the command-line flags from os.Args[1:]. Must be called
-// after all flags are defined and before flags are accessed by the program.
-func Parse() {
- // Ignore errors; CommandLine is set for ExitOnError.
- CommandLine.Parse(os.Args[1:])
-}
-
-// Parsed reports whether the command-line flags have been parsed.
-func Parsed() bool {
- return CommandLine.Parsed()
-}
-
-// CommandLine is the default set of command-line flags, parsed from os.Args.
-// The top-level functions such as BoolVar, Arg, and so on are wrappers for the
-// methods of CommandLine.
-var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
-
-func init() {
- // Override generic FlagSet default Usage with call to global Usage.
- // Note: This is not CommandLine.Usage = Usage,
- // because we want any eventual call to use any updated value of Usage,
- // not the value it has when this line is run.
- CommandLine.Usage = commandLineUsage
-}
-
-func commandLineUsage() {
- Usage()
-}
-
-// NewFlagSet returns a new, empty flag set with the specified name and
-// error handling property. If the name is not empty, it will be printed
-// in the default usage message and in error messages.
-func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
- f := &FlagSet{
- name: name,
- errorHandling: errorHandling,
- }
- f.Usage = f.defaultUsage
- return f
-}
-
-// Init sets the name and error handling property for a flag set.
-// By default, the zero FlagSet uses an empty name and the
-// ContinueOnError error handling policy.
-func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
- f.name = name
- f.errorHandling = errorHandling
-}
diff --git a/contrib/go/_std_1.18/src/fmt/doc.go b/contrib/go/_std_1.18/src/fmt/doc.go
deleted file mode 100644
index c584cc9465..0000000000
--- a/contrib/go/_std_1.18/src/fmt/doc.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
- Package fmt implements formatted I/O with functions analogous
- to C's printf and scanf. The format 'verbs' are derived from C's but
- are simpler.
-
-
- Printing
-
- The verbs:
-
- General:
- %v the value in a default format
- when printing structs, the plus flag (%+v) adds field names
- %#v a Go-syntax representation of the value
- %T a Go-syntax representation of the type of the value
- %% a literal percent sign; consumes no value
-
- Boolean:
- %t the word true or false
- Integer:
- %b base 2
- %c the character represented by the corresponding Unicode code point
- %d base 10
- %o base 8
- %O base 8 with 0o prefix
- %q a single-quoted character literal safely escaped with Go syntax.
- %x base 16, with lower-case letters for a-f
- %X base 16, with upper-case letters for A-F
- %U Unicode format: U+1234; same as "U+%04X"
- Floating-point and complex constituents:
- %b decimalless scientific notation with exponent a power of two,
- in the manner of strconv.FormatFloat with the 'b' format,
- e.g. -123456p-78
- %e scientific notation, e.g. -1.234456e+78
- %E scientific notation, e.g. -1.234456E+78
- %f decimal point but no exponent, e.g. 123.456
- %F synonym for %f
- %g %e for large exponents, %f otherwise. Precision is discussed below.
- %G %E for large exponents, %F otherwise
- %x hexadecimal notation (with decimal power of two exponent), e.g. -0x1.23abcp+20
- %X upper-case hexadecimal notation, e.g. -0X1.23ABCP+20
- String and slice of bytes (treated equivalently with these verbs):
- %s the uninterpreted bytes of the string or slice
- %q a double-quoted string safely escaped with Go syntax
- %x base 16, lower-case, two characters per byte
- %X base 16, upper-case, two characters per byte
- Slice:
- %p address of 0th element in base 16 notation, with leading 0x
- Pointer:
- %p base 16 notation, with leading 0x
- The %b, %d, %o, %x and %X verbs also work with pointers,
- formatting the value exactly as if it were an integer.
-
- The default format for %v is:
- bool: %t
- int, int8 etc.: %d
- uint, uint8 etc.: %d, %#x if printed with %#v
- float32, complex64, etc: %g
- string: %s
- chan: %p
- pointer: %p
- For compound objects, the elements are printed using these rules, recursively,
- laid out like this:
- struct: {field0 field1 ...}
- array, slice: [elem0 elem1 ...]
- maps: map[key1:value1 key2:value2 ...]
- pointer to above: &{}, &[], &map[]
-
- Width is specified by an optional decimal number immediately preceding the verb.
- If absent, the width is whatever is necessary to represent the value.
- Precision is specified after the (optional) width by a period followed by a
- decimal number. If no period is present, a default precision is used.
- A period with no following number specifies a precision of zero.
- Examples:
- %f default width, default precision
- %9f width 9, default precision
- %.2f default width, precision 2
- %9.2f width 9, precision 2
- %9.f width 9, precision 0
-
- Width and precision are measured in units of Unicode code points,
- that is, runes. (This differs from C's printf where the
- units are always measured in bytes.) Either or both of the flags
- may be replaced with the character '*', causing their values to be
- obtained from the next operand (preceding the one to format),
- which must be of type int.
-
- For most values, width is the minimum number of runes to output,
- padding the formatted form with spaces if necessary.
-
- For strings, byte slices and byte arrays, however, precision
- limits the length of the input to be formatted (not the size of
- the output), truncating if necessary. Normally it is measured in
- runes, but for these types when formatted with the %x or %X format
- it is measured in bytes.
-
- For floating-point values, width sets the minimum width of the field and
- precision sets the number of places after the decimal, if appropriate,
- except that for %g/%G precision sets the maximum number of significant
- digits (trailing zeros are removed). For example, given 12.345 the format
- %6.3f prints 12.345 while %.3g prints 12.3. The default precision for %e, %f
- and %#g is 6; for %g it is the smallest number of digits necessary to identify
- the value uniquely.
-
- For complex numbers, the width and precision apply to the two
- components independently and the result is parenthesized, so %f applied
- to 1.2+3.4i produces (1.200000+3.400000i).
-
- Other flags:
- + always print a sign for numeric values;
- guarantee ASCII-only output for %q (%+q)
- - pad with spaces on the right rather than the left (left-justify the field)
- # alternate format: add leading 0b for binary (%#b), 0 for octal (%#o),
- 0x or 0X for hex (%#x or %#X); suppress 0x for %p (%#p);
- for %q, print a raw (backquoted) string if strconv.CanBackquote
- returns true;
- always print a decimal point for %e, %E, %f, %F, %g and %G;
- do not remove trailing zeros for %g and %G;
- write e.g. U+0078 'x' if the character is printable for %U (%#U).
- ' ' (space) leave a space for elided sign in numbers (% d);
- put spaces between bytes printing strings or slices in hex (% x, % X)
- 0 pad with leading zeros rather than spaces;
- for numbers, this moves the padding after the sign
-
- Flags are ignored by verbs that do not expect them.
- For example there is no alternate decimal format, so %#d and %d
- behave identically.
-
- For each Printf-like function, there is also a Print function
- that takes no format and is equivalent to saying %v for every
- operand. Another variant Println inserts blanks between
- operands and appends a newline.
-
- Regardless of the verb, if an operand is an interface value,
- the internal concrete value is used, not the interface itself.
- Thus:
- var i interface{} = 23
- fmt.Printf("%v\n", i)
- will print 23.
-
- Except when printed using the verbs %T and %p, special
- formatting considerations apply for operands that implement
- certain interfaces. In order of application:
-
- 1. If the operand is a reflect.Value, the operand is replaced by the
- concrete value that it holds, and printing continues with the next rule.
-
- 2. If an operand implements the Formatter interface, it will
- be invoked. In this case the interpretation of verbs and flags is
- controlled by that implementation.
-
- 3. If the %v verb is used with the # flag (%#v) and the operand
- implements the GoStringer interface, that will be invoked.
-
- If the format (which is implicitly %v for Println etc.) is valid
- for a string (%s %q %v %x %X), the following two rules apply:
-
- 4. If an operand implements the error interface, the Error method
- will be invoked to convert the object to a string, which will then
- be formatted as required by the verb (if any).
-
- 5. If an operand implements method String() string, that method
- will be invoked to convert the object to a string, which will then
- be formatted as required by the verb (if any).
-
- For compound operands such as slices and structs, the format
- applies to the elements of each operand, recursively, not to the
- operand as a whole. Thus %q will quote each element of a slice
- of strings, and %6.2f will control formatting for each element
- of a floating-point array.
-
- However, when printing a byte slice with a string-like verb
- (%s %q %x %X), it is treated identically to a string, as a single item.
-
- To avoid recursion in cases such as
- type X string
- func (x X) String() string { return Sprintf("<%s>", x) }
- convert the value before recurring:
- func (x X) String() string { return Sprintf("<%s>", string(x)) }
- Infinite recursion can also be triggered by self-referential data
- structures, such as a slice that contains itself as an element, if
- that type has a String method. Such pathologies are rare, however,
- and the package does not protect against them.
-
- When printing a struct, fmt cannot and therefore does not invoke
- formatting methods such as Error or String on unexported fields.
-
- Explicit argument indexes
-
- In Printf, Sprintf, and Fprintf, the default behavior is for each
- formatting verb to format successive arguments passed in the call.
- However, the notation [n] immediately before the verb indicates that the
- nth one-indexed argument is to be formatted instead. The same notation
- before a '*' for a width or precision selects the argument index holding
- the value. After processing a bracketed expression [n], subsequent verbs
- will use arguments n+1, n+2, etc. unless otherwise directed.
-
- For example,
- fmt.Sprintf("%[2]d %[1]d\n", 11, 22)
- will yield "22 11", while
- fmt.Sprintf("%[3]*.[2]*[1]f", 12.0, 2, 6)
- equivalent to
- fmt.Sprintf("%6.2f", 12.0)
- will yield " 12.00". Because an explicit index affects subsequent verbs,
- this notation can be used to print the same values multiple times
- by resetting the index for the first argument to be repeated:
- fmt.Sprintf("%d %d %#[1]x %#x", 16, 17)
- will yield "16 17 0x10 0x11".
-
- Format errors
-
- If an invalid argument is given for a verb, such as providing
- a string to %d, the generated string will contain a
- description of the problem, as in these examples:
-
- Wrong type or unknown verb: %!verb(type=value)
- Printf("%d", "hi"): %!d(string=hi)
- Too many arguments: %!(EXTRA type=value)
- Printf("hi", "guys"): hi%!(EXTRA string=guys)
- Too few arguments: %!verb(MISSING)
- Printf("hi%d"): hi%!d(MISSING)
- Non-int for width or precision: %!(BADWIDTH) or %!(BADPREC)
- Printf("%*s", 4.5, "hi"): %!(BADWIDTH)hi
- Printf("%.*s", 4.5, "hi"): %!(BADPREC)hi
- Invalid or invalid use of argument index: %!(BADINDEX)
- Printf("%*[2]d", 7): %!d(BADINDEX)
- Printf("%.[2]d", 7): %!d(BADINDEX)
-
- All errors begin with the string "%!" followed sometimes
- by a single character (the verb) and end with a parenthesized
- description.
-
- If an Error or String method triggers a panic when called by a
- print routine, the fmt package reformats the error message
- from the panic, decorating it with an indication that it came
- through the fmt package. For example, if a String method
- calls panic("bad"), the resulting formatted message will look
- like
- %!s(PANIC=bad)
-
- The %!s just shows the print verb in use when the failure
- occurred. If the panic is caused by a nil receiver to an Error
- or String method, however, the output is the undecorated
- string, "<nil>".
-
- Scanning
-
- An analogous set of functions scans formatted text to yield
- values. Scan, Scanf and Scanln read from os.Stdin; Fscan,
- Fscanf and Fscanln read from a specified io.Reader; Sscan,
- Sscanf and Sscanln read from an argument string.
-
- Scan, Fscan, Sscan treat newlines in the input as spaces.
-
- Scanln, Fscanln and Sscanln stop scanning at a newline and
- require that the items be followed by a newline or EOF.
-
- Scanf, Fscanf, and Sscanf parse the arguments according to a
- format string, analogous to that of Printf. In the text that
- follows, 'space' means any Unicode whitespace character
- except newline.
-
- In the format string, a verb introduced by the % character
- consumes and parses input; these verbs are described in more
- detail below. A character other than %, space, or newline in
- the format consumes exactly that input character, which must
- be present. A newline with zero or more spaces before it in
- the format string consumes zero or more spaces in the input
- followed by a single newline or the end of the input. A space
- following a newline in the format string consumes zero or more
- spaces in the input. Otherwise, any run of one or more spaces
- in the format string consumes as many spaces as possible in
- the input. Unless the run of spaces in the format string
- appears adjacent to a newline, the run must consume at least
- one space from the input or find the end of the input.
-
- The handling of spaces and newlines differs from that of C's
- scanf family: in C, newlines are treated as any other space,
- and it is never an error when a run of spaces in the format
- string finds no spaces to consume in the input.
-
- The verbs behave analogously to those of Printf.
- For example, %x will scan an integer as a hexadecimal number,
- and %v will scan the default representation format for the value.
- The Printf verbs %p and %T and the flags # and + are not implemented.
- For floating-point and complex values, all valid formatting verbs
- (%b %e %E %f %F %g %G %x %X and %v) are equivalent and accept
- both decimal and hexadecimal notation (for example: "2.3e+7", "0x4.5p-8")
- and digit-separating underscores (for example: "3.14159_26535_89793").
-
- Input processed by verbs is implicitly space-delimited: the
- implementation of every verb except %c starts by discarding
- leading spaces from the remaining input, and the %s verb
- (and %v reading into a string) stops consuming input at the first
- space or newline character.
-
- The familiar base-setting prefixes 0b (binary), 0o and 0 (octal),
- and 0x (hexadecimal) are accepted when scanning integers
- without a format or with the %v verb, as are digit-separating
- underscores.
-
- Width is interpreted in the input text but there is no
- syntax for scanning with a precision (no %5.2f, just %5f).
- If width is provided, it applies after leading spaces are
- trimmed and specifies the maximum number of runes to read
- to satisfy the verb. For example,
- Sscanf(" 1234567 ", "%5s%d", &s, &i)
- will set s to "12345" and i to 67 while
- Sscanf(" 12 34 567 ", "%5s%d", &s, &i)
- will set s to "12" and i to 34.
-
- In all the scanning functions, a carriage return followed
- immediately by a newline is treated as a plain newline
- (\r\n means the same as \n).
-
- In all the scanning functions, if an operand implements method
- Scan (that is, it implements the Scanner interface) that
- method will be used to scan the text for that operand. Also,
- if the number of arguments scanned is less than the number of
- arguments provided, an error is returned.
-
- All arguments to be scanned must be either pointers to basic
- types or implementations of the Scanner interface.
-
- Like Scanf and Fscanf, Sscanf need not consume its entire input.
- There is no way to recover how much of the input string Sscanf used.
-
- Note: Fscan etc. can read one character (rune) past the input
- they return, which means that a loop calling a scan routine
- may skip some of the input. This is usually a problem only
- when there is no space between input values. If the reader
- provided to Fscan implements ReadRune, that method will be used
- to read characters. If the reader also implements UnreadRune,
- that method will be used to save the character and successive
- calls will not lose data. To attach ReadRune and UnreadRune
- methods to a reader without that capability, use
- bufio.NewReader.
-*/
-package fmt
diff --git a/contrib/go/_std_1.18/src/fmt/print.go b/contrib/go/_std_1.18/src/fmt/print.go
deleted file mode 100644
index 1c37c3cb7b..0000000000
--- a/contrib/go/_std_1.18/src/fmt/print.go
+++ /dev/null
@@ -1,1172 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fmt
-
-import (
- "internal/fmtsort"
- "io"
- "os"
- "reflect"
- "sync"
- "unicode/utf8"
-)
-
-// Strings for use with buffer.WriteString.
-// This is less overhead than using buffer.Write with byte arrays.
-const (
- commaSpaceString = ", "
- nilAngleString = "<nil>"
- nilParenString = "(nil)"
- nilString = "nil"
- mapString = "map["
- percentBangString = "%!"
- missingString = "(MISSING)"
- badIndexString = "(BADINDEX)"
- panicString = "(PANIC="
- extraString = "%!(EXTRA "
- badWidthString = "%!(BADWIDTH)"
- badPrecString = "%!(BADPREC)"
- noVerbString = "%!(NOVERB)"
- invReflectString = "<invalid reflect.Value>"
-)
-
-// State represents the printer state passed to custom formatters.
-// It provides access to the io.Writer interface plus information about
-// the flags and options for the operand's format specifier.
-type State interface {
- // Write is the function to call to emit formatted output to be printed.
- Write(b []byte) (n int, err error)
- // Width returns the value of the width option and whether it has been set.
- Width() (wid int, ok bool)
- // Precision returns the value of the precision option and whether it has been set.
- Precision() (prec int, ok bool)
-
- // Flag reports whether the flag c, a character, has been set.
- Flag(c int) bool
-}
-
-// Formatter is implemented by any value that has a Format method.
-// The implementation controls how State and rune are interpreted,
-// and may call Sprint(f) or Fprint(f) etc. to generate its output.
-type Formatter interface {
- Format(f State, verb rune)
-}
-
-// Stringer is implemented by any value that has a String method,
-// which defines the ``native'' format for that value.
-// The String method is used to print values passed as an operand
-// to any format that accepts a string or to an unformatted printer
-// such as Print.
-type Stringer interface {
- String() string
-}
-
-// GoStringer is implemented by any value that has a GoString method,
-// which defines the Go syntax for that value.
-// The GoString method is used to print values passed as an operand
-// to a %#v format.
-type GoStringer interface {
- GoString() string
-}
-
-// Use simple []byte instead of bytes.Buffer to avoid large dependency.
-type buffer []byte
-
-func (b *buffer) write(p []byte) {
- *b = append(*b, p...)
-}
-
-func (b *buffer) writeString(s string) {
- *b = append(*b, s...)
-}
-
-func (b *buffer) writeByte(c byte) {
- *b = append(*b, c)
-}
-
-func (bp *buffer) writeRune(r rune) {
- if r < utf8.RuneSelf {
- *bp = append(*bp, byte(r))
- return
- }
-
- b := *bp
- n := len(b)
- for n+utf8.UTFMax > cap(b) {
- b = append(b, 0)
- }
- w := utf8.EncodeRune(b[n:n+utf8.UTFMax], r)
- *bp = b[:n+w]
-}
-
-// pp is used to store a printer's state and is reused with sync.Pool to avoid allocations.
-type pp struct {
- buf buffer
-
- // arg holds the current item, as an interface{}.
- arg any
-
- // value is used instead of arg for reflect values.
- value reflect.Value
-
- // fmt is used to format basic items such as integers or strings.
- fmt fmt
-
- // reordered records whether the format string used argument reordering.
- reordered bool
- // goodArgNum records whether the most recent reordering directive was valid.
- goodArgNum bool
- // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion.
- panicking bool
- // erroring is set when printing an error string to guard against calling handleMethods.
- erroring bool
- // wrapErrs is set when the format string may contain a %w verb.
- wrapErrs bool
- // wrappedErr records the target of the %w verb.
- wrappedErr error
-}
-
-var ppFree = sync.Pool{
- New: func() any { return new(pp) },
-}
-
-// newPrinter allocates a new pp struct or grabs a cached one.
-func newPrinter() *pp {
- p := ppFree.Get().(*pp)
- p.panicking = false
- p.erroring = false
- p.wrapErrs = false
- p.fmt.init(&p.buf)
- return p
-}
-
-// free saves used pp structs in ppFree; avoids an allocation per invocation.
-func (p *pp) free() {
- // Proper usage of a sync.Pool requires each entry to have approximately
- // the same memory cost. To obtain this property when the stored type
- // contains a variably-sized buffer, we add a hard limit on the maximum buffer
- // to place back in the pool.
- //
- // See https://golang.org/issue/23199
- if cap(p.buf) > 64<<10 {
- return
- }
-
- p.buf = p.buf[:0]
- p.arg = nil
- p.value = reflect.Value{}
- p.wrappedErr = nil
- ppFree.Put(p)
-}
-
-func (p *pp) Width() (wid int, ok bool) { return p.fmt.wid, p.fmt.widPresent }
-
-func (p *pp) Precision() (prec int, ok bool) { return p.fmt.prec, p.fmt.precPresent }
-
-func (p *pp) Flag(b int) bool {
- switch b {
- case '-':
- return p.fmt.minus
- case '+':
- return p.fmt.plus || p.fmt.plusV
- case '#':
- return p.fmt.sharp || p.fmt.sharpV
- case ' ':
- return p.fmt.space
- case '0':
- return p.fmt.zero
- }
- return false
-}
-
-// Implement Write so we can call Fprintf on a pp (through State), for
-// recursive use in custom verbs.
-func (p *pp) Write(b []byte) (ret int, err error) {
- p.buf.write(b)
- return len(b), nil
-}
-
-// Implement WriteString so that we can call io.WriteString
-// on a pp (through state), for efficiency.
-func (p *pp) WriteString(s string) (ret int, err error) {
- p.buf.writeString(s)
- return len(s), nil
-}
-
-// These routines end in 'f' and take a format string.
-
-// Fprintf formats according to a format specifier and writes to w.
-// It returns the number of bytes written and any write error encountered.
-func Fprintf(w io.Writer, format string, a ...any) (n int, err error) {
- p := newPrinter()
- p.doPrintf(format, a)
- n, err = w.Write(p.buf)
- p.free()
- return
-}
-
-// Printf formats according to a format specifier and writes to standard output.
-// It returns the number of bytes written and any write error encountered.
-func Printf(format string, a ...any) (n int, err error) {
- return Fprintf(os.Stdout, format, a...)
-}
-
-// Sprintf formats according to a format specifier and returns the resulting string.
-func Sprintf(format string, a ...any) string {
- p := newPrinter()
- p.doPrintf(format, a)
- s := string(p.buf)
- p.free()
- return s
-}
-
-// These routines do not take a format string
-
-// Fprint formats using the default formats for its operands and writes to w.
-// Spaces are added between operands when neither is a string.
-// It returns the number of bytes written and any write error encountered.
-func Fprint(w io.Writer, a ...any) (n int, err error) {
- p := newPrinter()
- p.doPrint(a)
- n, err = w.Write(p.buf)
- p.free()
- return
-}
-
-// Print formats using the default formats for its operands and writes to standard output.
-// Spaces are added between operands when neither is a string.
-// It returns the number of bytes written and any write error encountered.
-func Print(a ...any) (n int, err error) {
- return Fprint(os.Stdout, a...)
-}
-
-// Sprint formats using the default formats for its operands and returns the resulting string.
-// Spaces are added between operands when neither is a string.
-func Sprint(a ...any) string {
- p := newPrinter()
- p.doPrint(a)
- s := string(p.buf)
- p.free()
- return s
-}
-
-// These routines end in 'ln', do not take a format string,
-// always add spaces between operands, and add a newline
-// after the last operand.
-
-// Fprintln formats using the default formats for its operands and writes to w.
-// Spaces are always added between operands and a newline is appended.
-// It returns the number of bytes written and any write error encountered.
-func Fprintln(w io.Writer, a ...any) (n int, err error) {
- p := newPrinter()
- p.doPrintln(a)
- n, err = w.Write(p.buf)
- p.free()
- return
-}
-
-// Println formats using the default formats for its operands and writes to standard output.
-// Spaces are always added between operands and a newline is appended.
-// It returns the number of bytes written and any write error encountered.
-func Println(a ...any) (n int, err error) {
- return Fprintln(os.Stdout, a...)
-}
-
-// Sprintln formats using the default formats for its operands and returns the resulting string.
-// Spaces are always added between operands and a newline is appended.
-func Sprintln(a ...any) string {
- p := newPrinter()
- p.doPrintln(a)
- s := string(p.buf)
- p.free()
- return s
-}
-
-// getField gets the i'th field of the struct value.
-// If the field is itself is an interface, return a value for
-// the thing inside the interface, not the interface itself.
-func getField(v reflect.Value, i int) reflect.Value {
- val := v.Field(i)
- if val.Kind() == reflect.Interface && !val.IsNil() {
- val = val.Elem()
- }
- return val
-}
-
-// tooLarge reports whether the magnitude of the integer is
-// too large to be used as a formatting width or precision.
-func tooLarge(x int) bool {
- const max int = 1e6
- return x > max || x < -max
-}
-
-// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present.
-func parsenum(s string, start, end int) (num int, isnum bool, newi int) {
- if start >= end {
- return 0, false, end
- }
- for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ {
- if tooLarge(num) {
- return 0, false, end // Overflow; crazy long number most likely.
- }
- num = num*10 + int(s[newi]-'0')
- isnum = true
- }
- return
-}
-
-func (p *pp) unknownType(v reflect.Value) {
- if !v.IsValid() {
- p.buf.writeString(nilAngleString)
- return
- }
- p.buf.writeByte('?')
- p.buf.writeString(v.Type().String())
- p.buf.writeByte('?')
-}
-
-func (p *pp) badVerb(verb rune) {
- p.erroring = true
- p.buf.writeString(percentBangString)
- p.buf.writeRune(verb)
- p.buf.writeByte('(')
- switch {
- case p.arg != nil:
- p.buf.writeString(reflect.TypeOf(p.arg).String())
- p.buf.writeByte('=')
- p.printArg(p.arg, 'v')
- case p.value.IsValid():
- p.buf.writeString(p.value.Type().String())
- p.buf.writeByte('=')
- p.printValue(p.value, 'v', 0)
- default:
- p.buf.writeString(nilAngleString)
- }
- p.buf.writeByte(')')
- p.erroring = false
-}
-
-func (p *pp) fmtBool(v bool, verb rune) {
- switch verb {
- case 't', 'v':
- p.fmt.fmtBoolean(v)
- default:
- p.badVerb(verb)
- }
-}
-
-// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or
-// not, as requested, by temporarily setting the sharp flag.
-func (p *pp) fmt0x64(v uint64, leading0x bool) {
- sharp := p.fmt.sharp
- p.fmt.sharp = leading0x
- p.fmt.fmtInteger(v, 16, unsigned, 'v', ldigits)
- p.fmt.sharp = sharp
-}
-
-// fmtInteger formats a signed or unsigned integer.
-func (p *pp) fmtInteger(v uint64, isSigned bool, verb rune) {
- switch verb {
- case 'v':
- if p.fmt.sharpV && !isSigned {
- p.fmt0x64(v, true)
- } else {
- p.fmt.fmtInteger(v, 10, isSigned, verb, ldigits)
- }
- case 'd':
- p.fmt.fmtInteger(v, 10, isSigned, verb, ldigits)
- case 'b':
- p.fmt.fmtInteger(v, 2, isSigned, verb, ldigits)
- case 'o', 'O':
- p.fmt.fmtInteger(v, 8, isSigned, verb, ldigits)
- case 'x':
- p.fmt.fmtInteger(v, 16, isSigned, verb, ldigits)
- case 'X':
- p.fmt.fmtInteger(v, 16, isSigned, verb, udigits)
- case 'c':
- p.fmt.fmtC(v)
- case 'q':
- p.fmt.fmtQc(v)
- case 'U':
- p.fmt.fmtUnicode(v)
- default:
- p.badVerb(verb)
- }
-}
-
-// fmtFloat formats a float. The default precision for each verb
-// is specified as last argument in the call to fmt_float.
-func (p *pp) fmtFloat(v float64, size int, verb rune) {
- switch verb {
- case 'v':
- p.fmt.fmtFloat(v, size, 'g', -1)
- case 'b', 'g', 'G', 'x', 'X':
- p.fmt.fmtFloat(v, size, verb, -1)
- case 'f', 'e', 'E':
- p.fmt.fmtFloat(v, size, verb, 6)
- case 'F':
- p.fmt.fmtFloat(v, size, 'f', 6)
- default:
- p.badVerb(verb)
- }
-}
-
-// fmtComplex formats a complex number v with
-// r = real(v) and j = imag(v) as (r+ji) using
-// fmtFloat for r and j formatting.
-func (p *pp) fmtComplex(v complex128, size int, verb rune) {
- // Make sure any unsupported verbs are found before the
- // calls to fmtFloat to not generate an incorrect error string.
- switch verb {
- case 'v', 'b', 'g', 'G', 'x', 'X', 'f', 'F', 'e', 'E':
- oldPlus := p.fmt.plus
- p.buf.writeByte('(')
- p.fmtFloat(real(v), size/2, verb)
- // Imaginary part always has a sign.
- p.fmt.plus = true
- p.fmtFloat(imag(v), size/2, verb)
- p.buf.writeString("i)")
- p.fmt.plus = oldPlus
- default:
- p.badVerb(verb)
- }
-}
-
-func (p *pp) fmtString(v string, verb rune) {
- switch verb {
- case 'v':
- if p.fmt.sharpV {
- p.fmt.fmtQ(v)
- } else {
- p.fmt.fmtS(v)
- }
- case 's':
- p.fmt.fmtS(v)
- case 'x':
- p.fmt.fmtSx(v, ldigits)
- case 'X':
- p.fmt.fmtSx(v, udigits)
- case 'q':
- p.fmt.fmtQ(v)
- default:
- p.badVerb(verb)
- }
-}
-
-func (p *pp) fmtBytes(v []byte, verb rune, typeString string) {
- switch verb {
- case 'v', 'd':
- if p.fmt.sharpV {
- p.buf.writeString(typeString)
- if v == nil {
- p.buf.writeString(nilParenString)
- return
- }
- p.buf.writeByte('{')
- for i, c := range v {
- if i > 0 {
- p.buf.writeString(commaSpaceString)
- }
- p.fmt0x64(uint64(c), true)
- }
- p.buf.writeByte('}')
- } else {
- p.buf.writeByte('[')
- for i, c := range v {
- if i > 0 {
- p.buf.writeByte(' ')
- }
- p.fmt.fmtInteger(uint64(c), 10, unsigned, verb, ldigits)
- }
- p.buf.writeByte(']')
- }
- case 's':
- p.fmt.fmtBs(v)
- case 'x':
- p.fmt.fmtBx(v, ldigits)
- case 'X':
- p.fmt.fmtBx(v, udigits)
- case 'q':
- p.fmt.fmtQ(string(v))
- default:
- p.printValue(reflect.ValueOf(v), verb, 0)
- }
-}
-
-func (p *pp) fmtPointer(value reflect.Value, verb rune) {
- var u uintptr
- switch value.Kind() {
- case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Slice, reflect.UnsafePointer:
- u = value.Pointer()
- default:
- p.badVerb(verb)
- return
- }
-
- switch verb {
- case 'v':
- if p.fmt.sharpV {
- p.buf.writeByte('(')
- p.buf.writeString(value.Type().String())
- p.buf.writeString(")(")
- if u == 0 {
- p.buf.writeString(nilString)
- } else {
- p.fmt0x64(uint64(u), true)
- }
- p.buf.writeByte(')')
- } else {
- if u == 0 {
- p.fmt.padString(nilAngleString)
- } else {
- p.fmt0x64(uint64(u), !p.fmt.sharp)
- }
- }
- case 'p':
- p.fmt0x64(uint64(u), !p.fmt.sharp)
- case 'b', 'o', 'd', 'x', 'X':
- p.fmtInteger(uint64(u), unsigned, verb)
- default:
- p.badVerb(verb)
- }
-}
-
-func (p *pp) catchPanic(arg any, verb rune, method string) {
- if err := recover(); err != nil {
- // If it's a nil pointer, just say "<nil>". The likeliest causes are a
- // Stringer that fails to guard against nil or a nil pointer for a
- // value receiver, and in either case, "<nil>" is a nice result.
- if v := reflect.ValueOf(arg); v.Kind() == reflect.Pointer && v.IsNil() {
- p.buf.writeString(nilAngleString)
- return
- }
- // Otherwise print a concise panic message. Most of the time the panic
- // value will print itself nicely.
- if p.panicking {
- // Nested panics; the recursion in printArg cannot succeed.
- panic(err)
- }
-
- oldFlags := p.fmt.fmtFlags
- // For this output we want default behavior.
- p.fmt.clearflags()
-
- p.buf.writeString(percentBangString)
- p.buf.writeRune(verb)
- p.buf.writeString(panicString)
- p.buf.writeString(method)
- p.buf.writeString(" method: ")
- p.panicking = true
- p.printArg(err, 'v')
- p.panicking = false
- p.buf.writeByte(')')
-
- p.fmt.fmtFlags = oldFlags
- }
-}
-
-func (p *pp) handleMethods(verb rune) (handled bool) {
- if p.erroring {
- return
- }
- if verb == 'w' {
- // It is invalid to use %w other than with Errorf, more than once,
- // or with a non-error arg.
- err, ok := p.arg.(error)
- if !ok || !p.wrapErrs || p.wrappedErr != nil {
- p.wrappedErr = nil
- p.wrapErrs = false
- p.badVerb(verb)
- return true
- }
- p.wrappedErr = err
- // If the arg is a Formatter, pass 'v' as the verb to it.
- verb = 'v'
- }
-
- // Is it a Formatter?
- if formatter, ok := p.arg.(Formatter); ok {
- handled = true
- defer p.catchPanic(p.arg, verb, "Format")
- formatter.Format(p, verb)
- return
- }
-
- // If we're doing Go syntax and the argument knows how to supply it, take care of it now.
- if p.fmt.sharpV {
- if stringer, ok := p.arg.(GoStringer); ok {
- handled = true
- defer p.catchPanic(p.arg, verb, "GoString")
- // Print the result of GoString unadorned.
- p.fmt.fmtS(stringer.GoString())
- return
- }
- } else {
- // If a string is acceptable according to the format, see if
- // the value satisfies one of the string-valued interfaces.
- // Println etc. set verb to %v, which is "stringable".
- switch verb {
- case 'v', 's', 'x', 'X', 'q':
- // Is it an error or Stringer?
- // The duplication in the bodies is necessary:
- // setting handled and deferring catchPanic
- // must happen before calling the method.
- switch v := p.arg.(type) {
- case error:
- handled = true
- defer p.catchPanic(p.arg, verb, "Error")
- p.fmtString(v.Error(), verb)
- return
-
- case Stringer:
- handled = true
- defer p.catchPanic(p.arg, verb, "String")
- p.fmtString(v.String(), verb)
- return
- }
- }
- }
- return false
-}
-
-func (p *pp) printArg(arg any, verb rune) {
- p.arg = arg
- p.value = reflect.Value{}
-
- if arg == nil {
- switch verb {
- case 'T', 'v':
- p.fmt.padString(nilAngleString)
- default:
- p.badVerb(verb)
- }
- return
- }
-
- // Special processing considerations.
- // %T (the value's type) and %p (its address) are special; we always do them first.
- switch verb {
- case 'T':
- p.fmt.fmtS(reflect.TypeOf(arg).String())
- return
- case 'p':
- p.fmtPointer(reflect.ValueOf(arg), 'p')
- return
- }
-
- // Some types can be done without reflection.
- switch f := arg.(type) {
- case bool:
- p.fmtBool(f, verb)
- case float32:
- p.fmtFloat(float64(f), 32, verb)
- case float64:
- p.fmtFloat(f, 64, verb)
- case complex64:
- p.fmtComplex(complex128(f), 64, verb)
- case complex128:
- p.fmtComplex(f, 128, verb)
- case int:
- p.fmtInteger(uint64(f), signed, verb)
- case int8:
- p.fmtInteger(uint64(f), signed, verb)
- case int16:
- p.fmtInteger(uint64(f), signed, verb)
- case int32:
- p.fmtInteger(uint64(f), signed, verb)
- case int64:
- p.fmtInteger(uint64(f), signed, verb)
- case uint:
- p.fmtInteger(uint64(f), unsigned, verb)
- case uint8:
- p.fmtInteger(uint64(f), unsigned, verb)
- case uint16:
- p.fmtInteger(uint64(f), unsigned, verb)
- case uint32:
- p.fmtInteger(uint64(f), unsigned, verb)
- case uint64:
- p.fmtInteger(f, unsigned, verb)
- case uintptr:
- p.fmtInteger(uint64(f), unsigned, verb)
- case string:
- p.fmtString(f, verb)
- case []byte:
- p.fmtBytes(f, verb, "[]byte")
- case reflect.Value:
- // Handle extractable values with special methods
- // since printValue does not handle them at depth 0.
- if f.IsValid() && f.CanInterface() {
- p.arg = f.Interface()
- if p.handleMethods(verb) {
- return
- }
- }
- p.printValue(f, verb, 0)
- default:
- // If the type is not simple, it might have methods.
- if !p.handleMethods(verb) {
- // Need to use reflection, since the type had no
- // interface methods that could be used for formatting.
- p.printValue(reflect.ValueOf(f), verb, 0)
- }
- }
-}
-
-// printValue is similar to printArg but starts with a reflect value, not an interface{} value.
-// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg.
-func (p *pp) printValue(value reflect.Value, verb rune, depth int) {
- // Handle values with special methods if not already handled by printArg (depth == 0).
- if depth > 0 && value.IsValid() && value.CanInterface() {
- p.arg = value.Interface()
- if p.handleMethods(verb) {
- return
- }
- }
- p.arg = nil
- p.value = value
-
- switch f := value; value.Kind() {
- case reflect.Invalid:
- if depth == 0 {
- p.buf.writeString(invReflectString)
- } else {
- switch verb {
- case 'v':
- p.buf.writeString(nilAngleString)
- default:
- p.badVerb(verb)
- }
- }
- case reflect.Bool:
- p.fmtBool(f.Bool(), verb)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p.fmtInteger(uint64(f.Int()), signed, verb)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p.fmtInteger(f.Uint(), unsigned, verb)
- case reflect.Float32:
- p.fmtFloat(f.Float(), 32, verb)
- case reflect.Float64:
- p.fmtFloat(f.Float(), 64, verb)
- case reflect.Complex64:
- p.fmtComplex(f.Complex(), 64, verb)
- case reflect.Complex128:
- p.fmtComplex(f.Complex(), 128, verb)
- case reflect.String:
- p.fmtString(f.String(), verb)
- case reflect.Map:
- if p.fmt.sharpV {
- p.buf.writeString(f.Type().String())
- if f.IsNil() {
- p.buf.writeString(nilParenString)
- return
- }
- p.buf.writeByte('{')
- } else {
- p.buf.writeString(mapString)
- }
- sorted := fmtsort.Sort(f)
- for i, key := range sorted.Key {
- if i > 0 {
- if p.fmt.sharpV {
- p.buf.writeString(commaSpaceString)
- } else {
- p.buf.writeByte(' ')
- }
- }
- p.printValue(key, verb, depth+1)
- p.buf.writeByte(':')
- p.printValue(sorted.Value[i], verb, depth+1)
- }
- if p.fmt.sharpV {
- p.buf.writeByte('}')
- } else {
- p.buf.writeByte(']')
- }
- case reflect.Struct:
- if p.fmt.sharpV {
- p.buf.writeString(f.Type().String())
- }
- p.buf.writeByte('{')
- for i := 0; i < f.NumField(); i++ {
- if i > 0 {
- if p.fmt.sharpV {
- p.buf.writeString(commaSpaceString)
- } else {
- p.buf.writeByte(' ')
- }
- }
- if p.fmt.plusV || p.fmt.sharpV {
- if name := f.Type().Field(i).Name; name != "" {
- p.buf.writeString(name)
- p.buf.writeByte(':')
- }
- }
- p.printValue(getField(f, i), verb, depth+1)
- }
- p.buf.writeByte('}')
- case reflect.Interface:
- value := f.Elem()
- if !value.IsValid() {
- if p.fmt.sharpV {
- p.buf.writeString(f.Type().String())
- p.buf.writeString(nilParenString)
- } else {
- p.buf.writeString(nilAngleString)
- }
- } else {
- p.printValue(value, verb, depth+1)
- }
- case reflect.Array, reflect.Slice:
- switch verb {
- case 's', 'q', 'x', 'X':
- // Handle byte and uint8 slices and arrays special for the above verbs.
- t := f.Type()
- if t.Elem().Kind() == reflect.Uint8 {
- var bytes []byte
- if f.Kind() == reflect.Slice {
- bytes = f.Bytes()
- } else if f.CanAddr() {
- bytes = f.Slice(0, f.Len()).Bytes()
- } else {
- // We have an array, but we cannot Slice() a non-addressable array,
- // so we build a slice by hand. This is a rare case but it would be nice
- // if reflection could help a little more.
- bytes = make([]byte, f.Len())
- for i := range bytes {
- bytes[i] = byte(f.Index(i).Uint())
- }
- }
- p.fmtBytes(bytes, verb, t.String())
- return
- }
- }
- if p.fmt.sharpV {
- p.buf.writeString(f.Type().String())
- if f.Kind() == reflect.Slice && f.IsNil() {
- p.buf.writeString(nilParenString)
- return
- }
- p.buf.writeByte('{')
- for i := 0; i < f.Len(); i++ {
- if i > 0 {
- p.buf.writeString(commaSpaceString)
- }
- p.printValue(f.Index(i), verb, depth+1)
- }
- p.buf.writeByte('}')
- } else {
- p.buf.writeByte('[')
- for i := 0; i < f.Len(); i++ {
- if i > 0 {
- p.buf.writeByte(' ')
- }
- p.printValue(f.Index(i), verb, depth+1)
- }
- p.buf.writeByte(']')
- }
- case reflect.Pointer:
- // pointer to array or slice or struct? ok at top level
- // but not embedded (avoid loops)
- if depth == 0 && f.Pointer() != 0 {
- switch a := f.Elem(); a.Kind() {
- case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map:
- p.buf.writeByte('&')
- p.printValue(a, verb, depth+1)
- return
- }
- }
- fallthrough
- case reflect.Chan, reflect.Func, reflect.UnsafePointer:
- p.fmtPointer(f, verb)
- default:
- p.unknownType(f)
- }
-}
-
-// intFromArg gets the argNumth element of a. On return, isInt reports whether the argument has integer type.
-func intFromArg(a []any, argNum int) (num int, isInt bool, newArgNum int) {
- newArgNum = argNum
- if argNum < len(a) {
- num, isInt = a[argNum].(int) // Almost always OK.
- if !isInt {
- // Work harder.
- switch v := reflect.ValueOf(a[argNum]); v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n := v.Int()
- if int64(int(n)) == n {
- num = int(n)
- isInt = true
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- n := v.Uint()
- if int64(n) >= 0 && uint64(int(n)) == n {
- num = int(n)
- isInt = true
- }
- default:
- // Already 0, false.
- }
- }
- newArgNum = argNum + 1
- if tooLarge(num) {
- num = 0
- isInt = false
- }
- }
- return
-}
-
-// parseArgNumber returns the value of the bracketed number, minus 1
-// (explicit argument numbers are one-indexed but we want zero-indexed).
-// The opening bracket is known to be present at format[0].
-// The returned values are the index, the number of bytes to consume
-// up to the closing paren, if present, and whether the number parsed
-// ok. The bytes to consume will be 1 if no closing paren is present.
-func parseArgNumber(format string) (index int, wid int, ok bool) {
- // There must be at least 3 bytes: [n].
- if len(format) < 3 {
- return 0, 1, false
- }
-
- // Find closing bracket.
- for i := 1; i < len(format); i++ {
- if format[i] == ']' {
- width, ok, newi := parsenum(format, 1, i)
- if !ok || newi != i {
- return 0, i + 1, false
- }
- return width - 1, i + 1, true // arg numbers are one-indexed and skip paren.
- }
- }
- return 0, 1, false
-}
-
-// argNumber returns the next argument to evaluate, which is either the value of the passed-in
-// argNum or the value of the bracketed integer that begins format[i:]. It also returns
-// the new value of i, that is, the index of the next byte of the format to process.
-func (p *pp) argNumber(argNum int, format string, i int, numArgs int) (newArgNum, newi int, found bool) {
- if len(format) <= i || format[i] != '[' {
- return argNum, i, false
- }
- p.reordered = true
- index, wid, ok := parseArgNumber(format[i:])
- if ok && 0 <= index && index < numArgs {
- return index, i + wid, true
- }
- p.goodArgNum = false
- return argNum, i + wid, ok
-}
-
-func (p *pp) badArgNum(verb rune) {
- p.buf.writeString(percentBangString)
- p.buf.writeRune(verb)
- p.buf.writeString(badIndexString)
-}
-
-func (p *pp) missingArg(verb rune) {
- p.buf.writeString(percentBangString)
- p.buf.writeRune(verb)
- p.buf.writeString(missingString)
-}
-
-func (p *pp) doPrintf(format string, a []any) {
- end := len(format)
- argNum := 0 // we process one argument per non-trivial format
- afterIndex := false // previous item in format was an index like [3].
- p.reordered = false
-formatLoop:
- for i := 0; i < end; {
- p.goodArgNum = true
- lasti := i
- for i < end && format[i] != '%' {
- i++
- }
- if i > lasti {
- p.buf.writeString(format[lasti:i])
- }
- if i >= end {
- // done processing format string
- break
- }
-
- // Process one verb
- i++
-
- // Do we have flags?
- p.fmt.clearflags()
- simpleFormat:
- for ; i < end; i++ {
- c := format[i]
- switch c {
- case '#':
- p.fmt.sharp = true
- case '0':
- p.fmt.zero = !p.fmt.minus // Only allow zero padding to the left.
- case '+':
- p.fmt.plus = true
- case '-':
- p.fmt.minus = true
- p.fmt.zero = false // Do not pad with zeros to the right.
- case ' ':
- p.fmt.space = true
- default:
- // Fast path for common case of ascii lower case simple verbs
- // without precision or width or argument indices.
- if 'a' <= c && c <= 'z' && argNum < len(a) {
- if c == 'v' {
- // Go syntax
- p.fmt.sharpV = p.fmt.sharp
- p.fmt.sharp = false
- // Struct-field syntax
- p.fmt.plusV = p.fmt.plus
- p.fmt.plus = false
- }
- p.printArg(a[argNum], rune(c))
- argNum++
- i++
- continue formatLoop
- }
- // Format is more complex than simple flags and a verb or is malformed.
- break simpleFormat
- }
- }
-
- // Do we have an explicit argument index?
- argNum, i, afterIndex = p.argNumber(argNum, format, i, len(a))
-
- // Do we have width?
- if i < end && format[i] == '*' {
- i++
- p.fmt.wid, p.fmt.widPresent, argNum = intFromArg(a, argNum)
-
- if !p.fmt.widPresent {
- p.buf.writeString(badWidthString)
- }
-
- // We have a negative width, so take its value and ensure
- // that the minus flag is set
- if p.fmt.wid < 0 {
- p.fmt.wid = -p.fmt.wid
- p.fmt.minus = true
- p.fmt.zero = false // Do not pad with zeros to the right.
- }
- afterIndex = false
- } else {
- p.fmt.wid, p.fmt.widPresent, i = parsenum(format, i, end)
- if afterIndex && p.fmt.widPresent { // "%[3]2d"
- p.goodArgNum = false
- }
- }
-
- // Do we have precision?
- if i+1 < end && format[i] == '.' {
- i++
- if afterIndex { // "%[3].2d"
- p.goodArgNum = false
- }
- argNum, i, afterIndex = p.argNumber(argNum, format, i, len(a))
- if i < end && format[i] == '*' {
- i++
- p.fmt.prec, p.fmt.precPresent, argNum = intFromArg(a, argNum)
- // Negative precision arguments don't make sense
- if p.fmt.prec < 0 {
- p.fmt.prec = 0
- p.fmt.precPresent = false
- }
- if !p.fmt.precPresent {
- p.buf.writeString(badPrecString)
- }
- afterIndex = false
- } else {
- p.fmt.prec, p.fmt.precPresent, i = parsenum(format, i, end)
- if !p.fmt.precPresent {
- p.fmt.prec = 0
- p.fmt.precPresent = true
- }
- }
- }
-
- if !afterIndex {
- argNum, i, afterIndex = p.argNumber(argNum, format, i, len(a))
- }
-
- if i >= end {
- p.buf.writeString(noVerbString)
- break
- }
-
- verb, size := rune(format[i]), 1
- if verb >= utf8.RuneSelf {
- verb, size = utf8.DecodeRuneInString(format[i:])
- }
- i += size
-
- switch {
- case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec.
- p.buf.writeByte('%')
- case !p.goodArgNum:
- p.badArgNum(verb)
- case argNum >= len(a): // No argument left over to print for the current verb.
- p.missingArg(verb)
- case verb == 'v':
- // Go syntax
- p.fmt.sharpV = p.fmt.sharp
- p.fmt.sharp = false
- // Struct-field syntax
- p.fmt.plusV = p.fmt.plus
- p.fmt.plus = false
- fallthrough
- default:
- p.printArg(a[argNum], verb)
- argNum++
- }
- }
-
- // Check for extra arguments unless the call accessed the arguments
- // out of order, in which case it's too expensive to detect if they've all
- // been used and arguably OK if they're not.
- if !p.reordered && argNum < len(a) {
- p.fmt.clearflags()
- p.buf.writeString(extraString)
- for i, arg := range a[argNum:] {
- if i > 0 {
- p.buf.writeString(commaSpaceString)
- }
- if arg == nil {
- p.buf.writeString(nilAngleString)
- } else {
- p.buf.writeString(reflect.TypeOf(arg).String())
- p.buf.writeByte('=')
- p.printArg(arg, 'v')
- }
- }
- p.buf.writeByte(')')
- }
-}
-
-func (p *pp) doPrint(a []any) {
- prevString := false
- for argNum, arg := range a {
- isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
- // Add a space between two non-string arguments.
- if argNum > 0 && !isString && !prevString {
- p.buf.writeByte(' ')
- }
- p.printArg(arg, 'v')
- prevString = isString
- }
-}
-
-// doPrintln is like doPrint but always adds a space between arguments
-// and a newline after the last argument.
-func (p *pp) doPrintln(a []any) {
- for argNum, arg := range a {
- if argNum > 0 {
- p.buf.writeByte(' ')
- }
- p.printArg(arg, 'v')
- }
- p.buf.writeByte('\n')
-}
diff --git a/contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.go b/contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.go
deleted file mode 100644
index 7017a89304..0000000000
--- a/contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
-// description of the interface that each architecture-specific file
-// implements.
-
-package crc32
-
-import (
- "internal/cpu"
- "unsafe"
-)
-
-// This file contains the code to call the SSE 4.2 version of the Castagnoli
-// and IEEE CRC.
-
-// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
-// instruction.
-//go:noescape
-func castagnoliSSE42(crc uint32, p []byte) uint32
-
-// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
-// instruction.
-//go:noescape
-func castagnoliSSE42Triple(
- crcA, crcB, crcC uint32,
- a, b, c []byte,
- rounds uint32,
-) (retA uint32, retB uint32, retC uint32)
-
-// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
-// instruction as well as SSE 4.1.
-//go:noescape
-func ieeeCLMUL(crc uint32, p []byte) uint32
-
-const castagnoliK1 = 168
-const castagnoliK2 = 1344
-
-type sse42Table [4]Table
-
-var castagnoliSSE42TableK1 *sse42Table
-var castagnoliSSE42TableK2 *sse42Table
-
-func archAvailableCastagnoli() bool {
- return cpu.X86.HasSSE42
-}
-
-func archInitCastagnoli() {
- if !cpu.X86.HasSSE42 {
- panic("arch-specific Castagnoli not available")
- }
- castagnoliSSE42TableK1 = new(sse42Table)
- castagnoliSSE42TableK2 = new(sse42Table)
- // See description in updateCastagnoli.
- // t[0][i] = CRC(i000, O)
- // t[1][i] = CRC(0i00, O)
- // t[2][i] = CRC(00i0, O)
- // t[3][i] = CRC(000i, O)
- // where O is a sequence of K zeros.
- var tmp [castagnoliK2]byte
- for b := 0; b < 4; b++ {
- for i := 0; i < 256; i++ {
- val := uint32(i) << uint32(b*8)
- castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
- castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
- }
- }
-}
-
-// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
-// table given) with the given initial crc value. This corresponds to
-// CRC(crc, O) in the description in updateCastagnoli.
-func castagnoliShift(table *sse42Table, crc uint32) uint32 {
- return table[3][crc>>24] ^
- table[2][(crc>>16)&0xFF] ^
- table[1][(crc>>8)&0xFF] ^
- table[0][crc&0xFF]
-}
-
-func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
- if !cpu.X86.HasSSE42 {
- panic("not available")
- }
-
- // This method is inspired from the algorithm in Intel's white paper:
- // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
- // The same strategy of splitting the buffer in three is used but the
- // combining calculation is different; the complete derivation is explained
- // below.
- //
- // -- The basic idea --
- //
- // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
- // time. In recent Intel architectures the instruction takes 3 cycles;
- // however the processor can pipeline up to three instructions if they
- // don't depend on each other.
- //
- // Roughly this means that we can process three buffers in about the same
- // time we can process one buffer.
- //
- // The idea is then to split the buffer in three, CRC the three pieces
- // separately and then combine the results.
- //
- // Combining the results requires precomputed tables, so we must choose a
- // fixed buffer length to optimize. The longer the length, the faster; but
- // only buffers longer than this length will use the optimization. We choose
- // two cutoffs and compute tables for both:
- // - one around 512: 168*3=504
- // - one around 4KB: 1344*3=4032
- //
- // -- The nitty gritty --
- //
- // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
- // initial non-inverted CRC I). This function has the following properties:
- // (a) CRC(I, AB) = CRC(CRC(I, A), B)
- // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
- //
- // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
- // K bytes each, where K is a fixed constant. Let O be the sequence of K zero
- // bytes.
- //
- // CRC(I, ABC) = CRC(I, ABO xor C)
- // = CRC(I, ABO) xor CRC(0, C)
- // = CRC(CRC(I, AB), O) xor CRC(0, C)
- // = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
- // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
- // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
- //
- // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
- // and CRC(0, C) efficiently. We just need to find a way to quickly compute
- // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
- // values; since we can't have a 32-bit table, we break it up into four
- // 8-bit tables:
- //
- // CRC(uvwx, O) = CRC(u000, O) xor
- // CRC(0v00, O) xor
- // CRC(00w0, O) xor
- // CRC(000x, O)
- //
- // We can compute tables corresponding to the four terms for all 8-bit
- // values.
-
- crc = ^crc
-
- // If a buffer is long enough to use the optimization, process the first few
- // bytes to align the buffer to an 8 byte boundary (if necessary).
- if len(p) >= castagnoliK1*3 {
- delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
- if delta != 0 {
- delta = 8 - delta
- crc = castagnoliSSE42(crc, p[:delta])
- p = p[delta:]
- }
- }
-
- // Process 3*K2 at a time.
- for len(p) >= castagnoliK2*3 {
- // Compute CRC(I, A), CRC(0, B), and CRC(0, C).
- crcA, crcB, crcC := castagnoliSSE42Triple(
- crc, 0, 0,
- p, p[castagnoliK2:], p[castagnoliK2*2:],
- castagnoliK2/24)
-
- // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
- crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
- // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
- crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
- p = p[castagnoliK2*3:]
- }
-
- // Process 3*K1 at a time.
- for len(p) >= castagnoliK1*3 {
- // Compute CRC(I, A), CRC(0, B), and CRC(0, C).
- crcA, crcB, crcC := castagnoliSSE42Triple(
- crc, 0, 0,
- p, p[castagnoliK1:], p[castagnoliK1*2:],
- castagnoliK1/24)
-
- // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
- crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
- // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
- crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
- p = p[castagnoliK1*3:]
- }
-
- // Use the simple implementation for what's left.
- crc = castagnoliSSE42(crc, p)
- return ^crc
-}
-
-func archAvailableIEEE() bool {
- return cpu.X86.HasPCLMULQDQ && cpu.X86.HasSSE41
-}
-
-var archIeeeTable8 *slicing8Table
-
-func archInitIEEE() {
- if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 {
- panic("not available")
- }
- // We still use slicing-by-8 for small buffers.
- archIeeeTable8 = slicingMakeTable(IEEE)
-}
-
-func archUpdateIEEE(crc uint32, p []byte) uint32 {
- if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 {
- panic("not available")
- }
-
- if len(p) >= 64 {
- left := len(p) & 15
- do := len(p) - left
- crc = ^ieeeCLMUL(^crc, p[:do])
- p = p[do:]
- }
- if len(p) == 0 {
- return crc
- }
- return slicingUpdate(crc, archIeeeTable8, p)
-}
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/compare_native.go b/contrib/go/_std_1.18/src/internal/bytealg/compare_native.go
deleted file mode 100644
index 21ff8fe786..0000000000
--- a/contrib/go/_std_1.18/src/internal/bytealg/compare_native.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le
-
-package bytealg
-
-import _ "unsafe" // For go:linkname
-
-//go:noescape
-func Compare(a, b []byte) int
-
-// The declaration below generates ABI wrappers for functions
-// implemented in assembly in this package but declared in another
-// package.
-
-//go:linkname abigen_runtime_cmpstring runtime.cmpstring
-func abigen_runtime_cmpstring(a, b string) int
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/indexbyte_native.go b/contrib/go/_std_1.18/src/internal/bytealg/indexbyte_native.go
deleted file mode 100644
index 2101021e2d..0000000000
--- a/contrib/go/_std_1.18/src/internal/bytealg/indexbyte_native.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm
-
-package bytealg
-
-//go:noescape
-func IndexByte(b []byte, c byte) int
-
-//go:noescape
-func IndexByteString(s string, c byte) int
diff --git a/contrib/go/_std_1.18/src/internal/cpu/cpu.go b/contrib/go/_std_1.18/src/internal/cpu/cpu.go
deleted file mode 100644
index 30745344e1..0000000000
--- a/contrib/go/_std_1.18/src/internal/cpu/cpu.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cpu implements processor feature detection
-// used by the Go standard library.
-package cpu
-
-// DebugOptions is set to true by the runtime if the OS supports reading
-// GODEBUG early in runtime startup.
-// This should not be changed after it is initialized.
-var DebugOptions bool
-
-// CacheLinePad is used to pad structs to avoid false sharing.
-type CacheLinePad struct{ _ [CacheLinePadSize]byte }
-
-// CacheLineSize is the CPU's assumed cache line size.
-// There is currently no runtime detection of the real cache line size
-// so we use the constant per GOARCH CacheLinePadSize as an approximation.
-var CacheLineSize uintptr = CacheLinePadSize
-
-// The booleans in X86 contain the correspondingly named cpuid feature bit.
-// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers
-// in addition to the cpuid feature bit being set.
-// The struct is padded to avoid false sharing.
-var X86 struct {
- _ CacheLinePad
- HasAES bool
- HasADX bool
- HasAVX bool
- HasAVX2 bool
- HasBMI1 bool
- HasBMI2 bool
- HasERMS bool
- HasFMA bool
- HasOSXSAVE bool
- HasPCLMULQDQ bool
- HasPOPCNT bool
- HasRDTSCP bool
- HasSSE3 bool
- HasSSSE3 bool
- HasSSE41 bool
- HasSSE42 bool
- _ CacheLinePad
-}
-
-// The booleans in ARM contain the correspondingly named cpu feature bit.
-// The struct is padded to avoid false sharing.
-var ARM struct {
- _ CacheLinePad
- HasVFPv4 bool
- HasIDIVA bool
- _ CacheLinePad
-}
-
-// The booleans in ARM64 contain the correspondingly named cpu feature bit.
-// The struct is padded to avoid false sharing.
-var ARM64 struct {
- _ CacheLinePad
- HasAES bool
- HasPMULL bool
- HasSHA1 bool
- HasSHA2 bool
- HasCRC32 bool
- HasATOMICS bool
- HasCPUID bool
- IsNeoverseN1 bool
- IsZeus bool
- _ CacheLinePad
-}
-
-var MIPS64X struct {
- _ CacheLinePad
- HasMSA bool // MIPS SIMD architecture
- _ CacheLinePad
-}
-
-// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00,
-// since there are no optional categories. There are some exceptions that also
-// require kernel support to work (darn, scv), so there are feature bits for
-// those as well. The minimum processor requirement is POWER8 (ISA 2.07).
-// The struct is padded to avoid false sharing.
-var PPC64 struct {
- _ CacheLinePad
- HasDARN bool // Hardware random number generator (requires kernel enablement)
- HasSCV bool // Syscall vectored (requires kernel enablement)
- IsPOWER8 bool // ISA v2.07 (POWER8)
- IsPOWER9 bool // ISA v3.00 (POWER9)
- _ CacheLinePad
-}
-
-var S390X struct {
- _ CacheLinePad
- HasZARCH bool // z architecture mode is active [mandatory]
- HasSTFLE bool // store facility list extended [mandatory]
- HasLDISP bool // long (20-bit) displacements [mandatory]
- HasEIMM bool // 32-bit immediates [mandatory]
- HasDFP bool // decimal floating point
- HasETF3EH bool // ETF-3 enhanced
- HasMSA bool // message security assist (CPACF)
- HasAES bool // KM-AES{128,192,256} functions
- HasAESCBC bool // KMC-AES{128,192,256} functions
- HasAESCTR bool // KMCTR-AES{128,192,256} functions
- HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
- HasGHASH bool // KIMD-GHASH function
- HasSHA1 bool // K{I,L}MD-SHA-1 functions
- HasSHA256 bool // K{I,L}MD-SHA-256 functions
- HasSHA512 bool // K{I,L}MD-SHA-512 functions
- HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
- HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records.
- HasVXE bool // vector-enhancements facility 1
- HasKDSA bool // elliptic curve functions
- HasECDSA bool // NIST curves
- HasEDDSA bool // Edwards curves
- _ CacheLinePad
-}
-
-// Initialize examines the processor and sets the relevant variables above.
-// This is called by the runtime package early in program initialization,
-// before normal init functions are run. env is set by runtime if the OS supports
-// cpu feature options in GODEBUG.
-func Initialize(env string) {
- doinit()
- processOptions(env)
-}
-
-// options contains the cpu debug options that can be used in GODEBUG.
-// Options are arch dependent and are added by the arch specific doinit functions.
-// Features that are mandatory for the specific GOARCH should not be added to options
-// (e.g. SSE2 on amd64).
-var options []option
-
-// Option names should be lower case. e.g. avx instead of AVX.
-type option struct {
- Name string
- Feature *bool
- Specified bool // whether feature value was specified in GODEBUG
- Enable bool // whether feature should be enabled
-}
-
-// processOptions enables or disables CPU feature values based on the parsed env string.
-// The env string is expected to be of the form cpu.feature1=value1,cpu.feature2=value2...
-// where feature names is one of the architecture specific list stored in the
-// cpu packages options variable and values are either 'on' or 'off'.
-// If env contains cpu.all=off then all cpu features referenced through the options
-// variable are disabled. Other feature names and values result in warning messages.
-func processOptions(env string) {
-field:
- for env != "" {
- field := ""
- i := indexByte(env, ',')
- if i < 0 {
- field, env = env, ""
- } else {
- field, env = env[:i], env[i+1:]
- }
- if len(field) < 4 || field[:4] != "cpu." {
- continue
- }
- i = indexByte(field, '=')
- if i < 0 {
- print("GODEBUG: no value specified for \"", field, "\"\n")
- continue
- }
- key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
-
- var enable bool
- switch value {
- case "on":
- enable = true
- case "off":
- enable = false
- default:
- print("GODEBUG: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
- continue field
- }
-
- if key == "all" {
- for i := range options {
- options[i].Specified = true
- options[i].Enable = enable
- }
- continue field
- }
-
- for i := range options {
- if options[i].Name == key {
- options[i].Specified = true
- options[i].Enable = enable
- continue field
- }
- }
-
- print("GODEBUG: unknown cpu feature \"", key, "\"\n")
- }
-
- for _, o := range options {
- if !o.Specified {
- continue
- }
-
- if o.Enable && !*o.Feature {
- print("GODEBUG: can not enable \"", o.Name, "\", missing CPU support\n")
- continue
- }
-
- *o.Feature = o.Enable
- }
-}
-
-// indexByte returns the index of the first instance of c in s,
-// or -1 if c is not present in s.
-func indexByte(s string, c byte) int {
- for i := 0; i < len(s); i++ {
- if s[i] == c {
- return i
- }
- }
- return -1
-}
diff --git a/contrib/go/_std_1.18/src/internal/cpu/cpu_x86.go b/contrib/go/_std_1.18/src/internal/cpu/cpu_x86.go
deleted file mode 100644
index 81d5ceed61..0000000000
--- a/contrib/go/_std_1.18/src/internal/cpu/cpu_x86.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build 386 || amd64
-
-package cpu
-
-const CacheLinePadSize = 64
-
-// cpuid is implemented in cpu_x86.s.
-func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
-
-// xgetbv with ecx = 0 is implemented in cpu_x86.s.
-func xgetbv() (eax, edx uint32)
-
-const (
- // edx bits
- cpuid_SSE2 = 1 << 26
-
- // ecx bits
- cpuid_SSE3 = 1 << 0
- cpuid_PCLMULQDQ = 1 << 1
- cpuid_SSSE3 = 1 << 9
- cpuid_FMA = 1 << 12
- cpuid_SSE41 = 1 << 19
- cpuid_SSE42 = 1 << 20
- cpuid_POPCNT = 1 << 23
- cpuid_AES = 1 << 25
- cpuid_OSXSAVE = 1 << 27
- cpuid_AVX = 1 << 28
-
- // ebx bits
- cpuid_BMI1 = 1 << 3
- cpuid_AVX2 = 1 << 5
- cpuid_BMI2 = 1 << 8
- cpuid_ERMS = 1 << 9
- cpuid_ADX = 1 << 19
-
- // edx bits for CPUID 0x80000001
- cpuid_RDTSCP = 1 << 27
-)
-
-var maxExtendedFunctionInformation uint32
-
-func doinit() {
- options = []option{
- {Name: "adx", Feature: &X86.HasADX},
- {Name: "aes", Feature: &X86.HasAES},
- {Name: "avx", Feature: &X86.HasAVX},
- {Name: "avx2", Feature: &X86.HasAVX2},
- {Name: "bmi1", Feature: &X86.HasBMI1},
- {Name: "bmi2", Feature: &X86.HasBMI2},
- {Name: "erms", Feature: &X86.HasERMS},
- {Name: "fma", Feature: &X86.HasFMA},
- {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
- {Name: "popcnt", Feature: &X86.HasPOPCNT},
- {Name: "rdtscp", Feature: &X86.HasRDTSCP},
- {Name: "sse3", Feature: &X86.HasSSE3},
- {Name: "sse41", Feature: &X86.HasSSE41},
- {Name: "sse42", Feature: &X86.HasSSE42},
- {Name: "ssse3", Feature: &X86.HasSSSE3},
- }
-
- maxID, _, _, _ := cpuid(0, 0)
-
- if maxID < 1 {
- return
- }
-
- maxExtendedFunctionInformation, _, _, _ = cpuid(0x80000000, 0)
-
- _, _, ecx1, _ := cpuid(1, 0)
-
- X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
- X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
- X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
- X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
- X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
- X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
- X86.HasAES = isSet(ecx1, cpuid_AES)
-
- // OSXSAVE can be false when using older Operating Systems
- // or when explicitly disabled on newer Operating Systems by
- // e.g. setting the xsavedisable boot option on Windows 10.
- X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
-
- // The FMA instruction set extension only has VEX prefixed instructions.
- // VEX prefixed instructions require OSXSAVE to be enabled.
- // See Intel 64 and IA-32 Architecture Software Developer’s Manual Volume 2
- // Section 2.4 "AVX and SSE Instruction Exception Specification"
- X86.HasFMA = isSet(ecx1, cpuid_FMA) && X86.HasOSXSAVE
-
- osSupportsAVX := false
- // For XGETBV, OSXSAVE bit is required and sufficient.
- if X86.HasOSXSAVE {
- eax, _ := xgetbv()
- // Check if XMM and YMM registers have OS support.
- osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
- }
-
- X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
-
- if maxID < 7 {
- return
- }
-
- _, ebx7, _, _ := cpuid(7, 0)
- X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
- X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
- X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
- X86.HasERMS = isSet(ebx7, cpuid_ERMS)
- X86.HasADX = isSet(ebx7, cpuid_ADX)
-
- var maxExtendedInformation uint32
- maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0)
-
- if maxExtendedInformation < 0x80000001 {
- return
- }
-
- _, _, _, edxExt1 := cpuid(0x80000001, 0)
- X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP)
-}
-
-func isSet(hwc uint32, value uint32) bool {
- return hwc&value != 0
-}
-
-// Name returns the CPU name given by the vendor.
-// If the CPU name can not be determined an
-// empty string is returned.
-func Name() string {
- if maxExtendedFunctionInformation < 0x80000004 {
- return ""
- }
-
- data := make([]byte, 0, 3*4*4)
-
- var eax, ebx, ecx, edx uint32
- eax, ebx, ecx, edx = cpuid(0x80000002, 0)
- data = appendBytes(data, eax, ebx, ecx, edx)
- eax, ebx, ecx, edx = cpuid(0x80000003, 0)
- data = appendBytes(data, eax, ebx, ecx, edx)
- eax, ebx, ecx, edx = cpuid(0x80000004, 0)
- data = appendBytes(data, eax, ebx, ecx, edx)
-
- // Trim leading spaces.
- for len(data) > 0 && data[0] == ' ' {
- data = data[1:]
- }
-
- // Trim tail after and including the first null byte.
- for i, c := range data {
- if c == '\x00' {
- data = data[:i]
- break
- }
- }
-
- return string(data)
-}
-
-func appendBytes(b []byte, args ...uint32) []byte {
- for _, arg := range args {
- b = append(b,
- byte((arg >> 0)),
- byte((arg >> 8)),
- byte((arg >> 16)),
- byte((arg >> 24)))
- }
- return b
-}
diff --git a/contrib/go/_std_1.18/src/internal/cpu/cpu_x86.s b/contrib/go/_std_1.18/src/internal/cpu/cpu_x86.s
deleted file mode 100644
index edef21905c..0000000000
--- a/contrib/go/_std_1.18/src/internal/cpu/cpu_x86.s
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build 386 || amd64
-
-#include "textflag.h"
-
-// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
-TEXT ·cpuid(SB), NOSPLIT, $0-24
- MOVL eaxArg+0(FP), AX
- MOVL ecxArg+4(FP), CX
- CPUID
- MOVL AX, eax+8(FP)
- MOVL BX, ebx+12(FP)
- MOVL CX, ecx+16(FP)
- MOVL DX, edx+20(FP)
- RET
-
-// func xgetbv() (eax, edx uint32)
-TEXT ·xgetbv(SB),NOSPLIT,$0-8
- MOVL $0, CX
- XGETBV
- MOVL AX, eax+0(FP)
- MOVL DX, edx+4(FP)
- RET
diff --git a/contrib/go/_std_1.18/src/internal/fmtsort/sort.go b/contrib/go/_std_1.18/src/internal/fmtsort/sort.go
deleted file mode 100644
index 34c1f477f0..0000000000
--- a/contrib/go/_std_1.18/src/internal/fmtsort/sort.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fmtsort provides a general stable ordering mechanism
-// for maps, on behalf of the fmt and text/template packages.
-// It is not guaranteed to be efficient and works only for types
-// that are valid map keys.
-package fmtsort
-
-import (
- "reflect"
- "sort"
-)
-
-// Note: Throughout this package we avoid calling reflect.Value.Interface as
-// it is not always legal to do so and it's easier to avoid the issue than to face it.
-
-// SortedMap represents a map's keys and values. The keys and values are
-// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
-type SortedMap struct {
- Key []reflect.Value
- Value []reflect.Value
-}
-
-func (o *SortedMap) Len() int { return len(o.Key) }
-func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
-func (o *SortedMap) Swap(i, j int) {
- o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
- o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
-}
-
-// Sort accepts a map and returns a SortedMap that has the same keys and
-// values but in a stable sorted order according to the keys, modulo issues
-// raised by unorderable key values such as NaNs.
-//
-// The ordering rules are more general than with Go's < operator:
-//
-// - when applicable, nil compares low
-// - ints, floats, and strings order by <
-// - NaN compares less than non-NaN floats
-// - bool compares false before true
-// - complex compares real, then imag
-// - pointers compare by machine address
-// - channel values compare by machine address
-// - structs compare each field in turn
-// - arrays compare each element in turn.
-// Otherwise identical arrays compare by length.
-// - interface values compare first by reflect.Type describing the concrete type
-// and then by concrete value as described in the previous rules.
-//
-func Sort(mapValue reflect.Value) *SortedMap {
- if mapValue.Type().Kind() != reflect.Map {
- return nil
- }
- // Note: this code is arranged to not panic even in the presence
- // of a concurrent map update. The runtime is responsible for
- // yelling loudly if that happens. See issue 33275.
- n := mapValue.Len()
- key := make([]reflect.Value, 0, n)
- value := make([]reflect.Value, 0, n)
- iter := mapValue.MapRange()
- for iter.Next() {
- key = append(key, iter.Key())
- value = append(value, iter.Value())
- }
- sorted := &SortedMap{
- Key: key,
- Value: value,
- }
- sort.Stable(sorted)
- return sorted
-}
-
-// compare compares two values of the same type. It returns -1, 0, 1
-// according to whether a > b (1), a == b (0), or a < b (-1).
-// If the types differ, it returns -1.
-// See the comment on Sort for the comparison rules.
-func compare(aVal, bVal reflect.Value) int {
- aType, bType := aVal.Type(), bVal.Type()
- if aType != bType {
- return -1 // No good answer possible, but don't return 0: they're not equal.
- }
- switch aVal.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- a, b := aVal.Int(), bVal.Int()
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- a, b := aVal.Uint(), bVal.Uint()
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
- case reflect.String:
- a, b := aVal.String(), bVal.String()
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
- case reflect.Float32, reflect.Float64:
- return floatCompare(aVal.Float(), bVal.Float())
- case reflect.Complex64, reflect.Complex128:
- a, b := aVal.Complex(), bVal.Complex()
- if c := floatCompare(real(a), real(b)); c != 0 {
- return c
- }
- return floatCompare(imag(a), imag(b))
- case reflect.Bool:
- a, b := aVal.Bool(), bVal.Bool()
- switch {
- case a == b:
- return 0
- case a:
- return 1
- default:
- return -1
- }
- case reflect.Pointer, reflect.UnsafePointer:
- a, b := aVal.Pointer(), bVal.Pointer()
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
- case reflect.Chan:
- if c, ok := nilCompare(aVal, bVal); ok {
- return c
- }
- ap, bp := aVal.Pointer(), bVal.Pointer()
- switch {
- case ap < bp:
- return -1
- case ap > bp:
- return 1
- default:
- return 0
- }
- case reflect.Struct:
- for i := 0; i < aVal.NumField(); i++ {
- if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
- return c
- }
- }
- return 0
- case reflect.Array:
- for i := 0; i < aVal.Len(); i++ {
- if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
- return c
- }
- }
- return 0
- case reflect.Interface:
- if c, ok := nilCompare(aVal, bVal); ok {
- return c
- }
- c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
- if c != 0 {
- return c
- }
- return compare(aVal.Elem(), bVal.Elem())
- default:
- // Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
- panic("bad type in compare: " + aType.String())
- }
-}
-
-// nilCompare checks whether either value is nil. If not, the boolean is false.
-// If either value is nil, the boolean is true and the integer is the comparison
-// value. The comparison is defined to be 0 if both are nil, otherwise the one
-// nil value compares low. Both arguments must represent a chan, func,
-// interface, map, pointer, or slice.
-func nilCompare(aVal, bVal reflect.Value) (int, bool) {
- if aVal.IsNil() {
- if bVal.IsNil() {
- return 0, true
- }
- return -1, true
- }
- if bVal.IsNil() {
- return 1, true
- }
- return 0, false
-}
-
-// floatCompare compares two floating-point values. NaNs compare low.
-func floatCompare(a, b float64) int {
- switch {
- case isNaN(a):
- return -1 // No good answer if b is a NaN so don't bother checking.
- case isNaN(b):
- return 1
- case a < b:
- return -1
- case a > b:
- return 1
- }
- return 0
-}
-
-func isNaN(a float64) bool {
- return a != a
-}
diff --git a/contrib/go/_std_1.18/src/internal/goarch/goarch.go b/contrib/go/_std_1.18/src/internal/goarch/goarch.go
deleted file mode 100644
index 921f5a208f..0000000000
--- a/contrib/go/_std_1.18/src/internal/goarch/goarch.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package goarch contains GOARCH-specific constants.
-package goarch
-
-// The next line makes 'go generate' write the zgoarch*.go files with
-// per-arch information, including constants named $GOARCH for every
-// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying
-// by them is useful for defining GOARCH-specific constants.
-//go:generate go run gengoarch.go
-
-type ArchFamilyType int
-
-const (
- AMD64 ArchFamilyType = iota
- ARM
- ARM64
- I386
- MIPS
- MIPS64
- PPC64
- RISCV64
- S390X
- WASM
-)
-
-// PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant.
-// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
-const PtrSize = 4 << (^uintptr(0) >> 63)
-
-// ArchFamily is the architecture family (AMD64, ARM, ...)
-const ArchFamily ArchFamilyType = _ArchFamily
-
-// BigEndian reports whether the architecture is big-endian.
-const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1
-
-// DefaultPhysPageSize is the default physical page size.
-const DefaultPhysPageSize = _DefaultPhysPageSize
-
-// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems).
-// The various PC tables record PC deltas pre-divided by PCQuantum.
-const PCQuantum = _PCQuantum
-
-// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit).
-const Int64Align = PtrSize
-
-// MinFrameSize is the size of the system-reserved words at the bottom
-// of a frame (just above the architectural stack pointer).
-// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems.
-// On PowerPC it is larger, to cover three more reserved words:
-// the compiler word, the link editor word, and the TOC save word.
-const MinFrameSize = _MinFrameSize
-
-// StackAlign is the required alignment of the SP register.
-// The stack must be at least word aligned, but some architectures require more.
-const StackAlign = _StackAlign
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_pacerredesign_on.go b/contrib/go/_std_1.18/src/internal/goexperiment/exp_pacerredesign_on.go
deleted file mode 100644
index b22b031009..0000000000
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_pacerredesign_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.pacerredesign
-// +build goexperiment.pacerredesign
-
-package goexperiment
-
-const PacerRedesign = true
-const PacerRedesignInt = 1
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabireflect_on.go b/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabireflect_on.go
deleted file mode 100644
index e8a3e9c06a..0000000000
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabireflect_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabireflect
-// +build goexperiment.regabireflect
-
-package goexperiment
-
-const RegabiReflect = true
-const RegabiReflectInt = 1
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/flags.go b/contrib/go/_std_1.18/src/internal/goexperiment/flags.go
deleted file mode 100644
index 6d935edc2b..0000000000
--- a/contrib/go/_std_1.18/src/internal/goexperiment/flags.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package goexperiment implements support for toolchain experiments.
-//
-// Toolchain experiments are controlled by the GOEXPERIMENT
-// environment variable. GOEXPERIMENT is a comma-separated list of
-// experiment names. GOEXPERIMENT can be set at make.bash time, which
-// sets the default experiments for binaries built with the tool
-// chain; or it can be set at build time. GOEXPERIMENT can also be set
-// to "none", which disables any experiments that were enabled at
-// make.bash time.
-//
-// Experiments are exposed to the build in the following ways:
-//
-// - Build tag goexperiment.x is set if experiment x (lower case) is
-// enabled.
-//
-// - For each experiment x (in camel case), this package contains a
-// boolean constant x and an integer constant xInt.
-//
-// - In runtime assembly, the macro GOEXPERIMENT_x is defined if
-// experiment x (lower case) is enabled.
-//
-// In the toolchain, the set of experiments enabled for the current
-// build should be accessed via objabi.Experiment.
-//
-// The set of experiments is included in the output of runtime.Version()
-// and "go version <binary>" if it differs from the default experiments.
-//
-// For the set of experiments supported by the current toolchain, see
-// "go doc goexperiment.Flags".
-//
-// Note that this package defines the set of experiments (in Flags)
-// and records the experiments that were enabled when the package
-// was compiled (as boolean and integer constants).
-//
-// Note especially that this package does not itself change behavior
-// at run time based on the GOEXPERIMENT variable.
-// The code used in builds to interpret the GOEXPERIMENT variable
-// is in the separate package internal/buildcfg.
-package goexperiment
-
-//go:generate go run mkconsts.go
-
-// Flags is the set of experiments that can be enabled or disabled in
-// the current toolchain.
-//
-// When specified in the GOEXPERIMENT environment variable or as build
-// tags, experiments use the strings.ToLower of their field name.
-//
-// For the baseline experimental configuration, see
-// objabi.experimentBaseline.
-//
-// If you change this struct definition, run "go generate".
-type Flags struct {
- FieldTrack bool
- PreemptibleLoops bool
- StaticLockRanking bool
-
- // Unified enables the compiler's unified IR construction
- // experiment.
- Unified bool
-
- // Regabi is split into several sub-experiments that can be
- // enabled individually. Not all combinations work.
- // The "regabi" GOEXPERIMENT is an alias for all "working"
- // subexperiments.
-
- // RegabiWrappers enables ABI wrappers for calling between
- // ABI0 and ABIInternal functions. Without this, the ABIs are
- // assumed to be identical so cross-ABI calls are direct.
- RegabiWrappers bool
- // RegabiReflect enables the register-passing paths in
- // reflection calls. This is also gated by intArgRegs in
- // reflect and runtime (which are disabled by default) so it
- // can be used in targeted tests.
- RegabiReflect bool
- // RegabiArgs enables register arguments/results in all
- // compiled Go functions.
- //
- // Requires wrappers (to do ABI translation), and reflect (so
- // reflection calls use registers).
- RegabiArgs bool
-
- // PacerRedesign enables the new GC pacer in the runtime.
- //
- // Details regarding the new pacer may be found at
- // https://golang.org/design/44167-gc-pacer-redesign
- PacerRedesign bool
-
- // HeapMinimum512KiB reduces the minimum heap size to 512 KiB.
- //
- // This was originally reduced as part of PacerRedesign, but
- // has been broken out to its own experiment that is disabled
- // by default.
- HeapMinimum512KiB bool
-}
diff --git a/contrib/go/_std_1.18/src/internal/goos/goos.go b/contrib/go/_std_1.18/src/internal/goos/goos.go
deleted file mode 100644
index ebb521fec6..0000000000
--- a/contrib/go/_std_1.18/src/internal/goos/goos.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package goos contains GOOS-specific constants.
-package goos
-
-// The next line makes 'go generate' write the zgoos*.go files with
-// per-OS information, including constants named Is$GOOS for every
-// known GOOS. The constant is 1 on the current system, 0 otherwise;
-// multiplying by them is useful for defining GOOS-specific constants.
-//go:generate go run gengoos.go
diff --git a/contrib/go/_std_1.18/src/internal/intern/intern.go b/contrib/go/_std_1.18/src/internal/intern/intern.go
deleted file mode 100644
index 75641106ab..0000000000
--- a/contrib/go/_std_1.18/src/internal/intern/intern.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package intern lets you make smaller comparable values by boxing
-// a larger comparable value (such as a 16 byte string header) down
-// into a globally unique 8 byte pointer.
-//
-// The globally unique pointers are garbage collected with weak
-// references and finalizers. This package hides that.
-package intern
-
-import (
- "internal/godebug"
- "runtime"
- "sync"
- "unsafe"
-)
-
-// A Value pointer is the handle to an underlying comparable value.
-// See func Get for how Value pointers may be used.
-type Value struct {
- _ [0]func() // prevent people from accidentally using value type as comparable
- cmpVal any
- // resurrected is guarded by mu (for all instances of Value).
- // It is set true whenever v is synthesized from a uintptr.
- resurrected bool
-}
-
-// Get returns the comparable value passed to the Get func
-// that returned v.
-func (v *Value) Get() any { return v.cmpVal }
-
-// key is a key in our global value map.
-// It contains type-specialized fields to avoid allocations
-// when converting common types to empty interfaces.
-type key struct {
- s string
- cmpVal any
- // isString reports whether key contains a string.
- // Without it, the zero value of key is ambiguous.
- isString bool
-}
-
-// keyFor returns a key to use with cmpVal.
-func keyFor(cmpVal any) key {
- if s, ok := cmpVal.(string); ok {
- return key{s: s, isString: true}
- }
- return key{cmpVal: cmpVal}
-}
-
-// Value returns a *Value built from k.
-func (k key) Value() *Value {
- if k.isString {
- return &Value{cmpVal: k.s}
- }
- return &Value{cmpVal: k.cmpVal}
-}
-
-var (
- // mu guards valMap, a weakref map of *Value by underlying value.
- // It also guards the resurrected field of all *Values.
- mu sync.Mutex
- valMap = map[key]uintptr{} // to uintptr(*Value)
- valSafe = safeMap() // non-nil in safe+leaky mode
-)
-
-// safeMap returns a non-nil map if we're in safe-but-leaky mode,
-// as controlled by GODEBUG=intern=leaky
-func safeMap() map[key]*Value {
- if godebug.Get("intern") == "leaky" {
- return map[key]*Value{}
- }
- return nil
-}
-
-// Get returns a pointer representing the comparable value cmpVal.
-//
-// The returned pointer will be the same for Get(v) and Get(v2)
-// if and only if v == v2, and can be used as a map key.
-func Get(cmpVal any) *Value {
- return get(keyFor(cmpVal))
-}
-
-// GetByString is identical to Get, except that it is specialized for strings.
-// This avoids an allocation from putting a string into an interface{}
-// to pass as an argument to Get.
-func GetByString(s string) *Value {
- return get(key{s: s, isString: true})
-}
-
-// We play unsafe games that violate Go's rules (and assume a non-moving
-// collector). So we quiet Go here.
-// See the comment below Get for more implementation details.
-//go:nocheckptr
-func get(k key) *Value {
- mu.Lock()
- defer mu.Unlock()
-
- var v *Value
- if valSafe != nil {
- v = valSafe[k]
- } else if addr, ok := valMap[k]; ok {
- v = (*Value)(unsafe.Pointer(addr))
- v.resurrected = true
- }
- if v != nil {
- return v
- }
- v = k.Value()
- if valSafe != nil {
- valSafe[k] = v
- } else {
- // SetFinalizer before uintptr conversion (theoretical concern;
- // see https://github.com/go4org/intern/issues/13)
- runtime.SetFinalizer(v, finalize)
- valMap[k] = uintptr(unsafe.Pointer(v))
- }
- return v
-}
-
-func finalize(v *Value) {
- mu.Lock()
- defer mu.Unlock()
- if v.resurrected {
- // We lost the race. Somebody resurrected it while we
- // were about to finalize it. Try again next round.
- v.resurrected = false
- runtime.SetFinalizer(v, finalize)
- return
- }
- delete(valMap, keyFor(v.cmpVal))
-}
-
-// Interning is simple if you don't require that unused values be
-// garbage collectable. But we do require that; we don't want to be
-// DOS vector. We do this by using a uintptr to hide the pointer from
-// the garbage collector, and using a finalizer to eliminate the
-// pointer when no other code is using it.
-//
-// The obvious implementation of this is to use a
-// map[interface{}]uintptr-of-*interface{}, and set up a finalizer to
-// delete from the map. Unfortunately, this is racy. Because pointers
-// are being created in violation of Go's unsafety rules, it's
-// possible to create a pointer to a value concurrently with the GC
-// concluding that the value can be collected. There are other races
-// that break the equality invariant as well, but the use-after-free
-// will cause a runtime crash.
-//
-// To make this work, the finalizer needs to know that no references
-// have been unsafely created since the finalizer was set up. To do
-// this, values carry a "resurrected" sentinel, which gets set
-// whenever a pointer is unsafely created. If the finalizer encounters
-// the sentinel, it clears the sentinel and delays collection for one
-// additional GC cycle, by re-installing itself as finalizer. This
-// ensures that the unsafely created pointer is visible to the GC, and
-// will correctly prevent collection.
-//
-// This technique does mean that interned values that get reused take
-// at least 3 GC cycles to fully collect (1 to clear the sentinel, 1
-// to clean up the unsafe map, 1 to be actually deleted).
-//
-// @ianlancetaylor commented in
-// https://github.com/golang/go/issues/41303#issuecomment-717401656
-// that it is possible to implement weak references in terms of
-// finalizers without unsafe. Unfortunately, the approach he outlined
-// does not work here, for two reasons. First, there is no way to
-// construct a strong pointer out of a weak pointer; our map stores
-// weak pointers, but we must return strong pointers to callers.
-// Second, and more fundamentally, we must return not just _a_ strong
-// pointer to callers, but _the same_ strong pointer to callers. In
-// order to return _the same_ strong pointer to callers, we must track
-// it, which is exactly what we cannot do with strong pointers.
-//
-// See https://github.com/inetaf/netaddr/issues/53 for more
-// discussion, and https://github.com/go4org/intern/issues/2 for an
-// illustration of the subtleties at play.
diff --git a/contrib/go/_std_1.18/src/internal/nettrace/nettrace.go b/contrib/go/_std_1.18/src/internal/nettrace/nettrace.go
deleted file mode 100644
index 6e0dbe73bb..0000000000
--- a/contrib/go/_std_1.18/src/internal/nettrace/nettrace.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package nettrace contains internal hooks for tracing activity in
-// the net package. This package is purely internal for use by the
-// net/http/httptrace package and has no stable API exposed to end
-// users.
-package nettrace
-
-// TraceKey is a context.Context Value key. Its associated value should
-// be a *Trace struct.
-type TraceKey struct{}
-
-// LookupIPAltResolverKey is a context.Context Value key used by tests to
-// specify an alternate resolver func.
-// It is not exposed to outsider users. (But see issue 12503)
-// The value should be the same type as lookupIP:
-// func lookupIP(ctx context.Context, host string) ([]IPAddr, error)
-type LookupIPAltResolverKey struct{}
-
-// Trace contains a set of hooks for tracing events within
-// the net package. Any specific hook may be nil.
-type Trace struct {
- // DNSStart is called with the hostname of a DNS lookup
- // before it begins.
- DNSStart func(name string)
-
- // DNSDone is called after a DNS lookup completes (or fails).
- // The coalesced parameter is whether singleflight de-duped
- // the call. The addrs are of type net.IPAddr but can't
- // actually be for circular dependency reasons.
- DNSDone func(netIPs []any, coalesced bool, err error)
-
- // ConnectStart is called before a Dial, excluding Dials made
- // during DNS lookups. In the case of DualStack (Happy Eyeballs)
- // dialing, this may be called multiple times, from multiple
- // goroutines.
- ConnectStart func(network, addr string)
-
- // ConnectStart is called after a Dial with the results, excluding
- // Dials made during DNS lookups. It may also be called multiple
- // times, like ConnectStart.
- ConnectDone func(network, addr string, err error)
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/errno_unix.go b/contrib/go/_std_1.18/src/internal/poll/errno_unix.go
deleted file mode 100644
index c177519732..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/errno_unix.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package poll
-
-import "syscall"
-
-// Do the interface allocations only once for common
-// Errno values.
-var (
- errEAGAIN error = syscall.EAGAIN
- errEINVAL error = syscall.EINVAL
- errENOENT error = syscall.ENOENT
-)
-
-// errnoErr returns common boxed Errno values, to prevent
-// allocations at runtime.
-func errnoErr(e syscall.Errno) error {
- switch e {
- case 0:
- return nil
- case syscall.EAGAIN:
- return errEAGAIN
- case syscall.EINVAL:
- return errEINVAL
- case syscall.ENOENT:
- return errENOENT
- }
- return e
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/fcntl_libc.go b/contrib/go/_std_1.18/src/internal/poll/fcntl_libc.go
deleted file mode 100644
index f503d7a336..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fcntl_libc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || solaris
-
-package poll
-
-import _ "unsafe" // for go:linkname
-
-// Implemented in the syscall package.
-//go:linkname fcntl syscall.fcntl
-func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd.go b/contrib/go/_std_1.18/src/internal/poll/fd.go
deleted file mode 100644
index 69a90054d3..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fd.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package poll supports non-blocking I/O on file descriptors with polling.
-// This supports I/O operations that block only a goroutine, not a thread.
-// This is used by the net and os packages.
-// It uses a poller built into the runtime, with support from the
-// runtime scheduler.
-package poll
-
-import (
- "errors"
-)
-
-// errNetClosing is the type of the variable ErrNetClosing.
-// This is used to implement the net.Error interface.
-type errNetClosing struct{}
-
-// Error returns the error message for ErrNetClosing.
-// Keep this string consistent because of issue #4373:
-// since historically programs have not been able to detect
-// this error, they look for the string.
-func (e errNetClosing) Error() string { return "use of closed network connection" }
-
-func (e errNetClosing) Timeout() bool { return false }
-func (e errNetClosing) Temporary() bool { return false }
-
-// ErrNetClosing is returned when a network descriptor is used after
-// it has been closed.
-var ErrNetClosing = errNetClosing{}
-
-// ErrFileClosing is returned when a file descriptor is used after it
-// has been closed.
-var ErrFileClosing = errors.New("use of closed file")
-
-// ErrNoDeadline is returned when a request is made to set a deadline
-// on a file type that does not use the poller.
-var ErrNoDeadline = errors.New("file type does not support deadline")
-
-// Return the appropriate closing error based on isFile.
-func errClosing(isFile bool) error {
- if isFile {
- return ErrFileClosing
- }
- return ErrNetClosing
-}
-
-// ErrDeadlineExceeded is returned for an expired deadline.
-// This is exported by the os package as os.ErrDeadlineExceeded.
-var ErrDeadlineExceeded error = &DeadlineExceededError{}
-
-// DeadlineExceededError is returned for an expired deadline.
-type DeadlineExceededError struct{}
-
-// Implement the net.Error interface.
-// The string is "i/o timeout" because that is what was returned
-// by earlier Go versions. Changing it may break programs that
-// match on error strings.
-func (e *DeadlineExceededError) Error() string { return "i/o timeout" }
-func (e *DeadlineExceededError) Timeout() bool { return true }
-func (e *DeadlineExceededError) Temporary() bool { return true }
-
-// ErrNotPollable is returned when the file or socket is not suitable
-// for event notification.
-var ErrNotPollable = errors.New("not pollable")
-
-// consume removes data from a slice of byte slices, for writev.
-func consume(v *[][]byte, n int64) {
- for len(*v) > 0 {
- ln0 := int64(len((*v)[0]))
- if ln0 > n {
- (*v)[0] = (*v)[0][n:]
- return
- }
- n -= ln0
- *v = (*v)[1:]
- }
-}
-
-// TestHookDidWritev is a hook for testing writev.
-var TestHookDidWritev = func(wrote int) {}
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_opendir_darwin.go b/contrib/go/_std_1.18/src/internal/poll/fd_opendir_darwin.go
deleted file mode 100644
index 8eb770c358..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fd_opendir_darwin.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package poll
-
-import (
- "syscall"
- _ "unsafe" // for go:linkname
-)
-
-// OpenDir returns a pointer to a DIR structure suitable for
-// ReadDir. In case of an error, the name of the failed
-// syscall is returned along with a syscall.Errno.
-func (fd *FD) OpenDir() (uintptr, string, error) {
- // fdopendir(3) takes control of the file descriptor,
- // so use a dup.
- fd2, call, err := fd.Dup()
- if err != nil {
- return 0, call, err
- }
- var dir uintptr
- for {
- dir, err = fdopendir(fd2)
- if err != syscall.EINTR {
- break
- }
- }
- if err != nil {
- syscall.Close(fd2)
- return 0, "fdopendir", err
- }
- return dir, "", nil
-}
-
-// Implemented in syscall/syscall_darwin.go.
-//go:linkname fdopendir syscall.fdopendir
-func fdopendir(fd int) (dir uintptr, err error)
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_poll_runtime.go b/contrib/go/_std_1.18/src/internal/poll/fd_poll_runtime.go
deleted file mode 100644
index 4a4dddfd27..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fd_poll_runtime.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || windows || solaris
-
-package poll
-
-import (
- "errors"
- "sync"
- "syscall"
- "time"
- _ "unsafe" // for go:linkname
-)
-
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-//go:linkname runtimeNano runtime.nanotime
-func runtimeNano() int64
-
-func runtime_pollServerInit()
-func runtime_pollOpen(fd uintptr) (uintptr, int)
-func runtime_pollClose(ctx uintptr)
-func runtime_pollWait(ctx uintptr, mode int) int
-func runtime_pollWaitCanceled(ctx uintptr, mode int) int
-func runtime_pollReset(ctx uintptr, mode int) int
-func runtime_pollSetDeadline(ctx uintptr, d int64, mode int)
-func runtime_pollUnblock(ctx uintptr)
-func runtime_isPollServerDescriptor(fd uintptr) bool
-
-type pollDesc struct {
- runtimeCtx uintptr
-}
-
-var serverInit sync.Once
-
-func (pd *pollDesc) init(fd *FD) error {
- serverInit.Do(runtime_pollServerInit)
- ctx, errno := runtime_pollOpen(uintptr(fd.Sysfd))
- if errno != 0 {
- return errnoErr(syscall.Errno(errno))
- }
- pd.runtimeCtx = ctx
- return nil
-}
-
-func (pd *pollDesc) close() {
- if pd.runtimeCtx == 0 {
- return
- }
- runtime_pollClose(pd.runtimeCtx)
- pd.runtimeCtx = 0
-}
-
-// Evict evicts fd from the pending list, unblocking any I/O running on fd.
-func (pd *pollDesc) evict() {
- if pd.runtimeCtx == 0 {
- return
- }
- runtime_pollUnblock(pd.runtimeCtx)
-}
-
-func (pd *pollDesc) prepare(mode int, isFile bool) error {
- if pd.runtimeCtx == 0 {
- return nil
- }
- res := runtime_pollReset(pd.runtimeCtx, mode)
- return convertErr(res, isFile)
-}
-
-func (pd *pollDesc) prepareRead(isFile bool) error {
- return pd.prepare('r', isFile)
-}
-
-func (pd *pollDesc) prepareWrite(isFile bool) error {
- return pd.prepare('w', isFile)
-}
-
-func (pd *pollDesc) wait(mode int, isFile bool) error {
- if pd.runtimeCtx == 0 {
- return errors.New("waiting for unsupported file type")
- }
- res := runtime_pollWait(pd.runtimeCtx, mode)
- return convertErr(res, isFile)
-}
-
-func (pd *pollDesc) waitRead(isFile bool) error {
- return pd.wait('r', isFile)
-}
-
-func (pd *pollDesc) waitWrite(isFile bool) error {
- return pd.wait('w', isFile)
-}
-
-func (pd *pollDesc) waitCanceled(mode int) {
- if pd.runtimeCtx == 0 {
- return
- }
- runtime_pollWaitCanceled(pd.runtimeCtx, mode)
-}
-
-func (pd *pollDesc) pollable() bool {
- return pd.runtimeCtx != 0
-}
-
-// Error values returned by runtime_pollReset and runtime_pollWait.
-// These must match the values in runtime/netpoll.go.
-const (
- pollNoError = 0
- pollErrClosing = 1
- pollErrTimeout = 2
- pollErrNotPollable = 3
-)
-
-func convertErr(res int, isFile bool) error {
- switch res {
- case pollNoError:
- return nil
- case pollErrClosing:
- return errClosing(isFile)
- case pollErrTimeout:
- return ErrDeadlineExceeded
- case pollErrNotPollable:
- return ErrNotPollable
- }
- println("unreachable: ", res)
- panic("unreachable")
-}
-
-// SetDeadline sets the read and write deadlines associated with fd.
-func (fd *FD) SetDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r'+'w')
-}
-
-// SetReadDeadline sets the read deadline associated with fd.
-func (fd *FD) SetReadDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r')
-}
-
-// SetWriteDeadline sets the write deadline associated with fd.
-func (fd *FD) SetWriteDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'w')
-}
-
-func setDeadlineImpl(fd *FD, t time.Time, mode int) error {
- var d int64
- if !t.IsZero() {
- d = int64(time.Until(t))
- if d == 0 {
- d = -1 // don't confuse deadline right now with no deadline
- }
- }
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- if fd.pd.runtimeCtx == 0 {
- return ErrNoDeadline
- }
- runtime_pollSetDeadline(fd.pd.runtimeCtx, d, mode)
- return nil
-}
-
-// IsPollDescriptor reports whether fd is the descriptor being used by the poller.
-// This is only used for testing.
-func IsPollDescriptor(fd uintptr) bool {
- return runtime_isPollServerDescriptor(fd)
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_posix.go b/contrib/go/_std_1.18/src/internal/poll/fd_posix.go
deleted file mode 100644
index dc1e29c6b7..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fd_posix.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package poll
-
-import (
- "io"
- "syscall"
-)
-
-// eofError returns io.EOF when fd is available for reading end of
-// file.
-func (fd *FD) eofError(n int, err error) error {
- if n == 0 && err == nil && fd.ZeroReadIsEOF {
- return io.EOF
- }
- return err
-}
-
-// Shutdown wraps syscall.Shutdown.
-func (fd *FD) Shutdown(how int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.Shutdown(fd.Sysfd, how)
-}
-
-// Fchown wraps syscall.Fchown.
-func (fd *FD) Fchown(uid, gid int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return ignoringEINTR(func() error {
- return syscall.Fchown(fd.Sysfd, uid, gid)
- })
-}
-
-// Ftruncate wraps syscall.Ftruncate.
-func (fd *FD) Ftruncate(size int64) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return ignoringEINTR(func() error {
- return syscall.Ftruncate(fd.Sysfd, size)
- })
-}
-
-// RawControl invokes the user-defined function f for a non-IO
-// operation.
-func (fd *FD) RawControl(f func(uintptr)) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- f(uintptr(fd.Sysfd))
- return nil
-}
-
-// ignoringEINTR makes a function call and repeats it if it returns
-// an EINTR error. This appears to be required even though we install all
-// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
-// Also #20400 and #36644 are issues in which a signal handler is
-// installed without setting SA_RESTART. None of these are the common case,
-// but there are enough of them that it seems that we can't avoid
-// an EINTR loop.
-func ignoringEINTR(fn func() error) error {
- for {
- err := fn()
- if err != syscall.EINTR {
- return err
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_unix.go b/contrib/go/_std_1.18/src/internal/poll/fd_unix.go
deleted file mode 100644
index 85971a16cd..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fd_unix.go
+++ /dev/null
@@ -1,799 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package poll
-
-import (
- "internal/syscall/unix"
- "io"
- "sync/atomic"
- "syscall"
-)
-
-// FD is a file descriptor. The net and os packages use this type as a
-// field of a larger type representing a network connection or OS file.
-type FD struct {
- // Lock sysfd and serialize access to Read and Write methods.
- fdmu fdMutex
-
- // System file descriptor. Immutable until Close.
- Sysfd int
-
- // I/O poller.
- pd pollDesc
-
- // Writev cache.
- iovecs *[]syscall.Iovec
-
- // Semaphore signaled when file is closed.
- csema uint32
-
- // Non-zero if this file has been set to blocking mode.
- isBlocking uint32
-
- // Whether this is a streaming descriptor, as opposed to a
- // packet-based descriptor like a UDP socket. Immutable.
- IsStream bool
-
- // Whether a zero byte read indicates EOF. This is false for a
- // message based socket connection.
- ZeroReadIsEOF bool
-
- // Whether this is a file rather than a network socket.
- isFile bool
-}
-
-// Init initializes the FD. The Sysfd field should already be set.
-// This can be called multiple times on a single FD.
-// The net argument is a network name from the net package (e.g., "tcp"),
-// or "file".
-// Set pollable to true if fd should be managed by runtime netpoll.
-func (fd *FD) Init(net string, pollable bool) error {
- // We don't actually care about the various network types.
- if net == "file" {
- fd.isFile = true
- }
- if !pollable {
- fd.isBlocking = 1
- return nil
- }
- err := fd.pd.init(fd)
- if err != nil {
- // If we could not initialize the runtime poller,
- // assume we are using blocking mode.
- fd.isBlocking = 1
- }
- return err
-}
-
-// Destroy closes the file descriptor. This is called when there are
-// no remaining references.
-func (fd *FD) destroy() error {
- // Poller may want to unregister fd in readiness notification mechanism,
- // so this must be executed before CloseFunc.
- fd.pd.close()
-
- // We don't use ignoringEINTR here because POSIX does not define
- // whether the descriptor is closed if close returns EINTR.
- // If the descriptor is indeed closed, using a loop would race
- // with some other goroutine opening a new descriptor.
- // (The Linux kernel guarantees that it is closed on an EINTR error.)
- err := CloseFunc(fd.Sysfd)
-
- fd.Sysfd = -1
- runtime_Semrelease(&fd.csema)
- return err
-}
-
-// Close closes the FD. The underlying file descriptor is closed by the
-// destroy method when there are no remaining references.
-func (fd *FD) Close() error {
- if !fd.fdmu.increfAndClose() {
- return errClosing(fd.isFile)
- }
-
- // Unblock any I/O. Once it all unblocks and returns,
- // so that it cannot be referring to fd.sysfd anymore,
- // the final decref will close fd.sysfd. This should happen
- // fairly quickly, since all the I/O is non-blocking, and any
- // attempts to block in the pollDesc will return errClosing(fd.isFile).
- fd.pd.evict()
-
- // The call to decref will call destroy if there are no other
- // references.
- err := fd.decref()
-
- // Wait until the descriptor is closed. If this was the only
- // reference, it is already closed. Only wait if the file has
- // not been set to blocking mode, as otherwise any current I/O
- // may be blocking, and that would block the Close.
- // No need for an atomic read of isBlocking, increfAndClose means
- // we have exclusive access to fd.
- if fd.isBlocking == 0 {
- runtime_Semacquire(&fd.csema)
- }
-
- return err
-}
-
-// SetBlocking puts the file into blocking mode.
-func (fd *FD) SetBlocking() error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- // Atomic store so that concurrent calls to SetBlocking
- // do not cause a race condition. isBlocking only ever goes
- // from 0 to 1 so there is no real race here.
- atomic.StoreUint32(&fd.isBlocking, 1)
- return syscall.SetNonblock(fd.Sysfd, false)
-}
-
-// Darwin and FreeBSD can't read or write 2GB+ files at a time,
-// even on 64-bit systems.
-// The same is true of socket implementations on many systems.
-// See golang.org/issue/7812 and golang.org/issue/16266.
-// Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned.
-const maxRW = 1 << 30
-
-// Read implements io.Reader.
-func (fd *FD) Read(p []byte) (int, error) {
- if err := fd.readLock(); err != nil {
- return 0, err
- }
- defer fd.readUnlock()
- if len(p) == 0 {
- // If the caller wanted a zero byte read, return immediately
- // without trying (but after acquiring the readLock).
- // Otherwise syscall.Read returns 0, nil which looks like
- // io.EOF.
- // TODO(bradfitz): make it wait for readability? (Issue 15735)
- return 0, nil
- }
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, err
- }
- if fd.IsStream && len(p) > maxRW {
- p = p[:maxRW]
- }
- for {
- n, err := ignoringEINTRIO(syscall.Read, fd.Sysfd, p)
- if err != nil {
- n = 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, err
- }
-}
-
-// Pread wraps the pread system call.
-func (fd *FD) Pread(p []byte, off int64) (int, error) {
- // Call incref, not readLock, because since pread specifies the
- // offset it is independent from other reads.
- // Similarly, using the poller doesn't make sense for pread.
- if err := fd.incref(); err != nil {
- return 0, err
- }
- if fd.IsStream && len(p) > maxRW {
- p = p[:maxRW]
- }
- var (
- n int
- err error
- )
- for {
- n, err = syscall.Pread(fd.Sysfd, p, off)
- if err != syscall.EINTR {
- break
- }
- }
- if err != nil {
- n = 0
- }
- fd.decref()
- err = fd.eofError(n, err)
- return n, err
-}
-
-// ReadFrom wraps the recvfrom network call.
-func (fd *FD) ReadFrom(p []byte) (int, syscall.Sockaddr, error) {
- if err := fd.readLock(); err != nil {
- return 0, nil, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, nil, err
- }
- for {
- n, sa, err := syscall.Recvfrom(fd.Sysfd, p, 0)
- if err != nil {
- if err == syscall.EINTR {
- continue
- }
- n = 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, sa, err
- }
-}
-
-// ReadFromInet4 wraps the recvfrom network call for IPv4.
-func (fd *FD) ReadFromInet4(p []byte, from *syscall.SockaddrInet4) (int, error) {
- if err := fd.readLock(); err != nil {
- return 0, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, err
- }
- for {
- n, err := unix.RecvfromInet4(fd.Sysfd, p, 0, from)
- if err != nil {
- if err == syscall.EINTR {
- continue
- }
- n = 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, err
- }
-}
-
-// ReadFromInet6 wraps the recvfrom network call for IPv6.
-func (fd *FD) ReadFromInet6(p []byte, from *syscall.SockaddrInet6) (int, error) {
- if err := fd.readLock(); err != nil {
- return 0, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, err
- }
- for {
- n, err := unix.RecvfromInet6(fd.Sysfd, p, 0, from)
- if err != nil {
- if err == syscall.EINTR {
- continue
- }
- n = 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, err
- }
-}
-
-// ReadMsg wraps the recvmsg network call.
-func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.Sockaddr, error) {
- if err := fd.readLock(); err != nil {
- return 0, 0, 0, nil, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, 0, 0, nil, err
- }
- for {
- n, oobn, sysflags, sa, err := syscall.Recvmsg(fd.Sysfd, p, oob, flags)
- if err != nil {
- if err == syscall.EINTR {
- continue
- }
- // TODO(dfc) should n and oobn be set to 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, oobn, sysflags, sa, err
- }
-}
-
-// ReadMsgInet4 is ReadMsg, but specialized for syscall.SockaddrInet4.
-func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.SockaddrInet4) (int, int, int, error) {
- if err := fd.readLock(); err != nil {
- return 0, 0, 0, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, 0, 0, err
- }
- for {
- n, oobn, sysflags, err := unix.RecvmsgInet4(fd.Sysfd, p, oob, flags, sa4)
- if err != nil {
- if err == syscall.EINTR {
- continue
- }
- // TODO(dfc) should n and oobn be set to 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, oobn, sysflags, err
- }
-}
-
-// ReadMsgInet6 is ReadMsg, but specialized for syscall.SockaddrInet6.
-func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.SockaddrInet6) (int, int, int, error) {
- if err := fd.readLock(); err != nil {
- return 0, 0, 0, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return 0, 0, 0, err
- }
- for {
- n, oobn, sysflags, err := unix.RecvmsgInet6(fd.Sysfd, p, oob, flags, sa6)
- if err != nil {
- if err == syscall.EINTR {
- continue
- }
- // TODO(dfc) should n and oobn be set to 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- return n, oobn, sysflags, err
- }
-}
-
-// Write implements io.Writer.
-func (fd *FD) Write(p []byte) (int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, err
- }
- var nn int
- for {
- max := len(p)
- if fd.IsStream && max-nn > maxRW {
- max = nn + maxRW
- }
- n, err := ignoringEINTRIO(syscall.Write, fd.Sysfd, p[nn:max])
- if n > 0 {
- nn += n
- }
- if nn == len(p) {
- return nn, err
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return nn, err
- }
- if n == 0 {
- return nn, io.ErrUnexpectedEOF
- }
- }
-}
-
-// Pwrite wraps the pwrite system call.
-func (fd *FD) Pwrite(p []byte, off int64) (int, error) {
- // Call incref, not writeLock, because since pwrite specifies the
- // offset it is independent from other writes.
- // Similarly, using the poller doesn't make sense for pwrite.
- if err := fd.incref(); err != nil {
- return 0, err
- }
- defer fd.decref()
- var nn int
- for {
- max := len(p)
- if fd.IsStream && max-nn > maxRW {
- max = nn + maxRW
- }
- n, err := syscall.Pwrite(fd.Sysfd, p[nn:max], off+int64(nn))
- if err == syscall.EINTR {
- continue
- }
- if n > 0 {
- nn += n
- }
- if nn == len(p) {
- return nn, err
- }
- if err != nil {
- return nn, err
- }
- if n == 0 {
- return nn, io.ErrUnexpectedEOF
- }
- }
-}
-
-// WriteToInet4 wraps the sendto network call for IPv4 addresses.
-func (fd *FD) WriteToInet4(p []byte, sa *syscall.SockaddrInet4) (int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, err
- }
- for {
- err := unix.SendtoInet4(fd.Sysfd, p, 0, sa)
- if err == syscall.EINTR {
- continue
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return 0, err
- }
- return len(p), nil
- }
-}
-
-// WriteToInet6 wraps the sendto network call for IPv6 addresses.
-func (fd *FD) WriteToInet6(p []byte, sa *syscall.SockaddrInet6) (int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, err
- }
- for {
- err := unix.SendtoInet6(fd.Sysfd, p, 0, sa)
- if err == syscall.EINTR {
- continue
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return 0, err
- }
- return len(p), nil
- }
-}
-
-// WriteTo wraps the sendto network call.
-func (fd *FD) WriteTo(p []byte, sa syscall.Sockaddr) (int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, err
- }
- for {
- err := syscall.Sendto(fd.Sysfd, p, 0, sa)
- if err == syscall.EINTR {
- continue
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return 0, err
- }
- return len(p), nil
- }
-}
-
-// WriteMsg wraps the sendmsg network call.
-func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, 0, err
- }
- for {
- n, err := syscall.SendmsgN(fd.Sysfd, p, oob, sa, 0)
- if err == syscall.EINTR {
- continue
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return n, 0, err
- }
- return n, len(oob), err
- }
-}
-
-// WriteMsgInet4 is WriteMsg specialized for syscall.SockaddrInet4.
-func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (int, int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, 0, err
- }
- for {
- n, err := unix.SendmsgNInet4(fd.Sysfd, p, oob, sa, 0)
- if err == syscall.EINTR {
- continue
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return n, 0, err
- }
- return n, len(oob), err
- }
-}
-
-// WriteMsgInet6 is WriteMsg specialized for syscall.SockaddrInet6.
-func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (int, int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return 0, 0, err
- }
- for {
- n, err := unix.SendmsgNInet6(fd.Sysfd, p, oob, sa, 0)
- if err == syscall.EINTR {
- continue
- }
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitWrite(fd.isFile); err == nil {
- continue
- }
- }
- if err != nil {
- return n, 0, err
- }
- return n, len(oob), err
- }
-}
-
-// Accept wraps the accept network call.
-func (fd *FD) Accept() (int, syscall.Sockaddr, string, error) {
- if err := fd.readLock(); err != nil {
- return -1, nil, "", err
- }
- defer fd.readUnlock()
-
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return -1, nil, "", err
- }
- for {
- s, rsa, errcall, err := accept(fd.Sysfd)
- if err == nil {
- return s, rsa, "", err
- }
- switch err {
- case syscall.EINTR:
- continue
- case syscall.EAGAIN:
- if fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- case syscall.ECONNABORTED:
- // This means that a socket on the listen
- // queue was closed before we Accept()ed it;
- // it's a silly error, so try again.
- continue
- }
- return -1, nil, errcall, err
- }
-}
-
-// Seek wraps syscall.Seek.
-func (fd *FD) Seek(offset int64, whence int) (int64, error) {
- if err := fd.incref(); err != nil {
- return 0, err
- }
- defer fd.decref()
- return syscall.Seek(fd.Sysfd, offset, whence)
-}
-
-// ReadDirent wraps syscall.ReadDirent.
-// We treat this like an ordinary system call rather than a call
-// that tries to fill the buffer.
-func (fd *FD) ReadDirent(buf []byte) (int, error) {
- if err := fd.incref(); err != nil {
- return 0, err
- }
- defer fd.decref()
- for {
- n, err := ignoringEINTRIO(syscall.ReadDirent, fd.Sysfd, buf)
- if err != nil {
- n = 0
- if err == syscall.EAGAIN && fd.pd.pollable() {
- if err = fd.pd.waitRead(fd.isFile); err == nil {
- continue
- }
- }
- }
- // Do not call eofError; caller does not expect to see io.EOF.
- return n, err
- }
-}
-
-// Fchmod wraps syscall.Fchmod.
-func (fd *FD) Fchmod(mode uint32) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return ignoringEINTR(func() error {
- return syscall.Fchmod(fd.Sysfd, mode)
- })
-}
-
-// Fchdir wraps syscall.Fchdir.
-func (fd *FD) Fchdir() error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.Fchdir(fd.Sysfd)
-}
-
-// Fstat wraps syscall.Fstat
-func (fd *FD) Fstat(s *syscall.Stat_t) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return ignoringEINTR(func() error {
- return syscall.Fstat(fd.Sysfd, s)
- })
-}
-
-// tryDupCloexec indicates whether F_DUPFD_CLOEXEC should be used.
-// If the kernel doesn't support it, this is set to 0.
-var tryDupCloexec = int32(1)
-
-// DupCloseOnExec dups fd and marks it close-on-exec.
-func DupCloseOnExec(fd int) (int, string, error) {
- if syscall.F_DUPFD_CLOEXEC != 0 && atomic.LoadInt32(&tryDupCloexec) == 1 {
- r0, e1 := fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0)
- if e1 == nil {
- return r0, "", nil
- }
- switch e1.(syscall.Errno) {
- case syscall.EINVAL, syscall.ENOSYS:
- // Old kernel, or js/wasm (which returns
- // ENOSYS). Fall back to the portable way from
- // now on.
- atomic.StoreInt32(&tryDupCloexec, 0)
- default:
- return -1, "fcntl", e1
- }
- }
- return dupCloseOnExecOld(fd)
-}
-
-// dupCloseOnExecOld is the traditional way to dup an fd and
-// set its O_CLOEXEC bit, using two system calls.
-func dupCloseOnExecOld(fd int) (int, string, error) {
- syscall.ForkLock.RLock()
- defer syscall.ForkLock.RUnlock()
- newfd, err := syscall.Dup(fd)
- if err != nil {
- return -1, "dup", err
- }
- syscall.CloseOnExec(newfd)
- return newfd, "", nil
-}
-
-// Dup duplicates the file descriptor.
-func (fd *FD) Dup() (int, string, error) {
- if err := fd.incref(); err != nil {
- return -1, "", err
- }
- defer fd.decref()
- return DupCloseOnExec(fd.Sysfd)
-}
-
-// On Unix variants only, expose the IO event for the net code.
-
-// WaitWrite waits until data can be read from fd.
-func (fd *FD) WaitWrite() error {
- return fd.pd.waitWrite(fd.isFile)
-}
-
-// WriteOnce is for testing only. It makes a single write call.
-func (fd *FD) WriteOnce(p []byte) (int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- return ignoringEINTRIO(syscall.Write, fd.Sysfd, p)
-}
-
-// RawRead invokes the user-defined function f for a read operation.
-func (fd *FD) RawRead(f func(uintptr) bool) error {
- if err := fd.readLock(); err != nil {
- return err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(fd.isFile); err != nil {
- return err
- }
- for {
- if f(uintptr(fd.Sysfd)) {
- return nil
- }
- if err := fd.pd.waitRead(fd.isFile); err != nil {
- return err
- }
- }
-}
-
-// RawWrite invokes the user-defined function f for a write operation.
-func (fd *FD) RawWrite(f func(uintptr) bool) error {
- if err := fd.writeLock(); err != nil {
- return err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(fd.isFile); err != nil {
- return err
- }
- for {
- if f(uintptr(fd.Sysfd)) {
- return nil
- }
- if err := fd.pd.waitWrite(fd.isFile); err != nil {
- return err
- }
- }
-}
-
-// ignoringEINTRIO is like ignoringEINTR, but just for IO calls.
-func ignoringEINTRIO(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) {
- for {
- n, err := fn(fd, p)
- if err != syscall.EINTR {
- return n, err
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_writev_darwin.go b/contrib/go/_std_1.18/src/internal/poll/fd_writev_darwin.go
deleted file mode 100644
index 8137510c8b..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/fd_writev_darwin.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin
-
-package poll
-
-import (
- "syscall"
- _ "unsafe" // for go:linkname
-)
-
-// Implemented in syscall/syscall_darwin.go.
-//go:linkname writev syscall.writev
-func writev(fd int, iovecs []syscall.Iovec) (uintptr, error)
diff --git a/contrib/go/_std_1.18/src/internal/poll/hook_cloexec.go b/contrib/go/_std_1.18/src/internal/poll/hook_cloexec.go
deleted file mode 100644
index c941cb5235..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/hook_cloexec.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build dragonfly || freebsd || illumos || linux || netbsd || openbsd
-
-package poll
-
-import "syscall"
-
-// Accept4Func is used to hook the accept4 call.
-var Accept4Func func(int, int) (int, syscall.Sockaddr, error) = syscall.Accept4
diff --git a/contrib/go/_std_1.18/src/internal/poll/hook_unix.go b/contrib/go/_std_1.18/src/internal/poll/hook_unix.go
deleted file mode 100644
index c9aa4b4ca2..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/hook_unix.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package poll
-
-import "syscall"
-
-// CloseFunc is used to hook the close call.
-var CloseFunc func(int) error = syscall.Close
-
-// AcceptFunc is used to hook the accept call.
-var AcceptFunc func(int) (int, syscall.Sockaddr, error) = syscall.Accept
diff --git a/contrib/go/_std_1.18/src/internal/poll/sock_cloexec.go b/contrib/go/_std_1.18/src/internal/poll/sock_cloexec.go
deleted file mode 100644
index d849fda0b0..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/sock_cloexec.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements accept for platforms that provide a fast path for
-// setting SetNonblock and CloseOnExec.
-
-//go:build dragonfly || freebsd || illumos || linux || netbsd || openbsd
-
-package poll
-
-import "syscall"
-
-// Wrapper around the accept system call that marks the returned file
-// descriptor as nonblocking and close-on-exec.
-func accept(s int) (int, syscall.Sockaddr, string, error) {
- ns, sa, err := Accept4Func(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
- // On Linux the accept4 system call was introduced in 2.6.28
- // kernel and on FreeBSD it was introduced in 10 kernel. If we
- // get an ENOSYS error on both Linux and FreeBSD, or EINVAL
- // error on Linux, fall back to using accept.
- switch err {
- case nil:
- return ns, sa, "", nil
- default: // errors other than the ones listed
- return -1, sa, "accept4", err
- case syscall.ENOSYS: // syscall missing
- case syscall.EINVAL: // some Linux use this instead of ENOSYS
- case syscall.EACCES: // some Linux use this instead of ENOSYS
- case syscall.EFAULT: // some Linux use this instead of ENOSYS
- }
-
- // See ../syscall/exec_unix.go for description of ForkLock.
- // It is probably okay to hold the lock across syscall.Accept
- // because we have put fd.sysfd into non-blocking mode.
- // However, a call to the File method will put it back into
- // blocking mode. We can't take that risk, so no use of ForkLock here.
- ns, sa, err = AcceptFunc(s)
- if err == nil {
- syscall.CloseOnExec(ns)
- }
- if err != nil {
- return -1, nil, "accept", err
- }
- if err = syscall.SetNonblock(ns, true); err != nil {
- CloseFunc(ns)
- return -1, nil, "setnonblock", err
- }
- return ns, sa, "", nil
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/sockopt.go b/contrib/go/_std_1.18/src/internal/poll/sockopt.go
deleted file mode 100644
index 2d354700c5..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/sockopt.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package poll
-
-import "syscall"
-
-// SetsockoptInt wraps the setsockopt network call with an int argument.
-func (fd *FD) SetsockoptInt(level, name, arg int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.SetsockoptInt(fd.Sysfd, level, name, arg)
-}
-
-// SetsockoptInet4Addr wraps the setsockopt network call with an IPv4 address.
-func (fd *FD) SetsockoptInet4Addr(level, name int, arg [4]byte) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.SetsockoptInet4Addr(fd.Sysfd, level, name, arg)
-}
-
-// SetsockoptLinger wraps the setsockopt network call with a Linger argument.
-func (fd *FD) SetsockoptLinger(level, name int, l *syscall.Linger) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.SetsockoptLinger(fd.Sysfd, level, name, l)
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/sockopt_unix.go b/contrib/go/_std_1.18/src/internal/poll/sockopt_unix.go
deleted file mode 100644
index 54be1cc4b6..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/sockopt_unix.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package poll
-
-import "syscall"
-
-// SetsockoptByte wraps the setsockopt network call with a byte argument.
-func (fd *FD) SetsockoptByte(level, name int, arg byte) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.SetsockoptByte(fd.Sysfd, level, name, arg)
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/sockoptip.go b/contrib/go/_std_1.18/src/internal/poll/sockoptip.go
deleted file mode 100644
index 7fc9aeefb3..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/sockoptip.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package poll
-
-import "syscall"
-
-// SetsockoptIPMreq wraps the setsockopt network call with an IPMreq argument.
-func (fd *FD) SetsockoptIPMreq(level, name int, mreq *syscall.IPMreq) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.SetsockoptIPMreq(fd.Sysfd, level, name, mreq)
-}
-
-// SetsockoptIPv6Mreq wraps the setsockopt network call with an IPv6Mreq argument.
-func (fd *FD) SetsockoptIPv6Mreq(level, name int, mreq *syscall.IPv6Mreq) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.SetsockoptIPv6Mreq(fd.Sysfd, level, name, mreq)
-}
diff --git a/contrib/go/_std_1.18/src/internal/poll/sys_cloexec.go b/contrib/go/_std_1.18/src/internal/poll/sys_cloexec.go
deleted file mode 100644
index 312ed24a44..0000000000
--- a/contrib/go/_std_1.18/src/internal/poll/sys_cloexec.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements accept for platforms that do not provide a fast path for
-// setting SetNonblock and CloseOnExec.
-
-//go:build aix || darwin || (js && wasm) || (solaris && !illumos)
-
-package poll
-
-import (
- "syscall"
-)
-
-// Wrapper around the accept system call that marks the returned file
-// descriptor as nonblocking and close-on-exec.
-func accept(s int) (int, syscall.Sockaddr, string, error) {
- // See ../syscall/exec_unix.go for description of ForkLock.
- // It is probably okay to hold the lock across syscall.Accept
- // because we have put fd.sysfd into non-blocking mode.
- // However, a call to the File method will put it back into
- // blocking mode. We can't take that risk, so no use of ForkLock here.
- ns, sa, err := AcceptFunc(s)
- if err == nil {
- syscall.CloseOnExec(ns)
- }
- if err != nil {
- return -1, nil, "accept", err
- }
- if err = syscall.SetNonblock(ns, true); err != nil {
- CloseFunc(ns)
- return -1, nil, "setnonblock", err
- }
- return ns, sa, "", nil
-}
diff --git a/contrib/go/_std_1.18/src/internal/reflectlite/type.go b/contrib/go/_std_1.18/src/internal/reflectlite/type.go
deleted file mode 100644
index 8f649600d2..0000000000
--- a/contrib/go/_std_1.18/src/internal/reflectlite/type.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package reflectlite implements lightweight version of reflect, not using
-// any package except for "runtime" and "unsafe".
-package reflectlite
-
-import (
- "internal/unsafeheader"
- "unsafe"
-)
-
-// Type is the representation of a Go type.
-//
-// Not all methods apply to all kinds of types. Restrictions,
-// if any, are noted in the documentation for each method.
-// Use the Kind method to find out the kind of type before
-// calling kind-specific methods. Calling a method
-// inappropriate to the kind of type causes a run-time panic.
-//
-// Type values are comparable, such as with the == operator,
-// so they can be used as map keys.
-// Two Type values are equal if they represent identical types.
-type Type interface {
- // Methods applicable to all types.
-
- // Name returns the type's name within its package for a defined type.
- // For other (non-defined) types it returns the empty string.
- Name() string
-
- // PkgPath returns a defined type's package path, that is, the import path
- // that uniquely identifies the package, such as "encoding/base64".
- // If the type was predeclared (string, error) or not defined (*T, struct{},
- // []int, or A where A is an alias for a non-defined type), the package path
- // will be the empty string.
- PkgPath() string
-
- // Size returns the number of bytes needed to store
- // a value of the given type; it is analogous to unsafe.Sizeof.
- Size() uintptr
-
- // Kind returns the specific kind of this type.
- Kind() Kind
-
- // Implements reports whether the type implements the interface type u.
- Implements(u Type) bool
-
- // AssignableTo reports whether a value of the type is assignable to type u.
- AssignableTo(u Type) bool
-
- // Comparable reports whether values of this type are comparable.
- Comparable() bool
-
- // String returns a string representation of the type.
- // The string representation may use shortened package names
- // (e.g., base64 instead of "encoding/base64") and is not
- // guaranteed to be unique among types. To test for type identity,
- // compare the Types directly.
- String() string
-
- // Elem returns a type's element type.
- // It panics if the type's Kind is not Ptr.
- Elem() Type
-
- common() *rtype
- uncommon() *uncommonType
-}
-
-/*
- * These data structures are known to the compiler (../../cmd/internal/reflectdata/reflect.go).
- * A few are known to ../runtime/type.go to convey to debuggers.
- * They are also known to ../runtime/type.go.
- */
-
-// A Kind represents the specific kind of type that a Type represents.
-// The zero Kind is not a valid kind.
-type Kind uint
-
-const (
- Invalid Kind = iota
- Bool
- Int
- Int8
- Int16
- Int32
- Int64
- Uint
- Uint8
- Uint16
- Uint32
- Uint64
- Uintptr
- Float32
- Float64
- Complex64
- Complex128
- Array
- Chan
- Func
- Interface
- Map
- Pointer
- Slice
- String
- Struct
- UnsafePointer
-)
-
-const Ptr = Pointer
-
-// tflag is used by an rtype to signal what extra type information is
-// available in the memory directly following the rtype value.
-//
-// tflag values must be kept in sync with copies in:
-// cmd/compile/internal/reflectdata/reflect.go
-// cmd/link/internal/ld/decodesym.go
-// runtime/type.go
-type tflag uint8
-
-const (
- // tflagUncommon means that there is a pointer, *uncommonType,
- // just beyond the outer type structure.
- //
- // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
- // then t has uncommonType data and it can be accessed as:
- //
- // type tUncommon struct {
- // structType
- // u uncommonType
- // }
- // u := &(*tUncommon)(unsafe.Pointer(t)).u
- tflagUncommon tflag = 1 << 0
-
- // tflagExtraStar means the name in the str field has an
- // extraneous '*' prefix. This is because for most types T in
- // a program, the type *T also exists and reusing the str data
- // saves binary size.
- tflagExtraStar tflag = 1 << 1
-
- // tflagNamed means the type has a name.
- tflagNamed tflag = 1 << 2
-
- // tflagRegularMemory means that equal and hash functions can treat
- // this type as a single region of t.size bytes.
- tflagRegularMemory tflag = 1 << 3
-)
-
-// rtype is the common implementation of most values.
-// It is embedded in other struct types.
-//
-// rtype must be kept in sync with ../runtime/type.go:/^type._type.
-type rtype struct {
- size uintptr
- ptrdata uintptr // number of bytes in the type that can contain pointers
- hash uint32 // hash of type; avoids computation in hash tables
- tflag tflag // extra type information flags
- align uint8 // alignment of variable with this type
- fieldAlign uint8 // alignment of struct field with this type
- kind uint8 // enumeration for C
- // function for comparing objects of this type
- // (ptr to object A, ptr to object B) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer) bool
- gcdata *byte // garbage collection data
- str nameOff // string form
- ptrToThis typeOff // type for pointer to this type, may be zero
-}
-
-// Method on non-interface type
-type method struct {
- name nameOff // name of method
- mtyp typeOff // method type (without receiver)
- ifn textOff // fn used in interface call (one-word receiver)
- tfn textOff // fn used for normal method call
-}
-
-// uncommonType is present only for defined types or types with methods
-// (if T is a defined type, the uncommonTypes for T and *T have methods).
-// Using a pointer to this struct reduces the overall size required
-// to describe a non-defined type with no methods.
-type uncommonType struct {
- pkgPath nameOff // import path; empty for built-in types like int, string
- mcount uint16 // number of methods
- xcount uint16 // number of exported methods
- moff uint32 // offset from this uncommontype to [mcount]method
- _ uint32 // unused
-}
-
-// chanDir represents a channel type's direction.
-type chanDir int
-
-const (
- recvDir chanDir = 1 << iota // <-chan
- sendDir // chan<-
- bothDir = recvDir | sendDir // chan
-)
-
-// arrayType represents a fixed array type.
-type arrayType struct {
- rtype
- elem *rtype // array element type
- slice *rtype // slice type
- len uintptr
-}
-
-// chanType represents a channel type.
-type chanType struct {
- rtype
- elem *rtype // channel element type
- dir uintptr // channel direction (chanDir)
-}
-
-// funcType represents a function type.
-//
-// A *rtype for each in and out parameter is stored in an array that
-// directly follows the funcType (and possibly its uncommonType). So
-// a function type with one method, one input, and one output is:
-//
-// struct {
-// funcType
-// uncommonType
-// [2]*rtype // [0] is in, [1] is out
-// }
-type funcType struct {
- rtype
- inCount uint16
- outCount uint16 // top bit is set if last input parameter is ...
-}
-
-// imethod represents a method on an interface type
-type imethod struct {
- name nameOff // name of method
- typ typeOff // .(*FuncType) underneath
-}
-
-// interfaceType represents an interface type.
-type interfaceType struct {
- rtype
- pkgPath name // import path
- methods []imethod // sorted by hash
-}
-
-// mapType represents a map type.
-type mapType struct {
- rtype
- key *rtype // map key type
- elem *rtype // map element (value) type
- bucket *rtype // internal bucket structure
- // function for hashing keys (ptr to key, seed) -> hash
- hasher func(unsafe.Pointer, uintptr) uintptr
- keysize uint8 // size of key slot
- valuesize uint8 // size of value slot
- bucketsize uint16 // size of bucket
- flags uint32
-}
-
-// ptrType represents a pointer type.
-type ptrType struct {
- rtype
- elem *rtype // pointer element (pointed at) type
-}
-
-// sliceType represents a slice type.
-type sliceType struct {
- rtype
- elem *rtype // slice element type
-}
-
-// Struct field
-type structField struct {
- name name // name is always non-empty
- typ *rtype // type of field
- offsetEmbed uintptr // byte offset of field<<1 | isEmbedded
-}
-
-func (f *structField) offset() uintptr {
- return f.offsetEmbed >> 1
-}
-
-func (f *structField) embedded() bool {
- return f.offsetEmbed&1 != 0
-}
-
-// structType represents a struct type.
-type structType struct {
- rtype
- pkgPath name
- fields []structField // sorted by offset
-}
-
-// name is an encoded type name with optional extra data.
-//
-// The first byte is a bit field containing:
-//
-// 1<<0 the name is exported
-// 1<<1 tag data follows the name
-// 1<<2 pkgPath nameOff follows the name and tag
-//
-// The next two bytes are the data length:
-//
-// l := uint16(data[1])<<8 | uint16(data[2])
-//
-// Bytes [3:3+l] are the string data.
-//
-// If tag data follows then bytes 3+l and 3+l+1 are the tag length,
-// with the data following.
-//
-// If the import path follows, then 4 bytes at the end of
-// the data form a nameOff. The import path is only set for concrete
-// methods that are defined in a different package than their type.
-//
-// If a name starts with "*", then the exported bit represents
-// whether the pointed to type is exported.
-type name struct {
- bytes *byte
-}
-
-func (n name) data(off int, whySafe string) *byte {
- return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
-}
-
-func (n name) isExported() bool {
- return (*n.bytes)&(1<<0) != 0
-}
-
-func (n name) hasTag() bool {
- return (*n.bytes)&(1<<1) != 0
-}
-
-// readVarint parses a varint as encoded by encoding/binary.
-// It returns the number of encoded bytes and the encoded value.
-func (n name) readVarint(off int) (int, int) {
- v := 0
- for i := 0; ; i++ {
- x := *n.data(off+i, "read varint")
- v += int(x&0x7f) << (7 * i)
- if x&0x80 == 0 {
- return i + 1, v
- }
- }
-}
-
-func (n name) name() (s string) {
- if n.bytes == nil {
- return
- }
- i, l := n.readVarint(1)
- hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
- hdr.Data = unsafe.Pointer(n.data(1+i, "non-empty string"))
- hdr.Len = l
- return
-}
-
-func (n name) tag() (s string) {
- if !n.hasTag() {
- return ""
- }
- i, l := n.readVarint(1)
- i2, l2 := n.readVarint(1 + i + l)
- hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
- hdr.Data = unsafe.Pointer(n.data(1+i+l+i2, "non-empty string"))
- hdr.Len = l2
- return
-}
-
-func (n name) pkgPath() string {
- if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
- return ""
- }
- i, l := n.readVarint(1)
- off := 1 + i + l
- if n.hasTag() {
- i2, l2 := n.readVarint(off)
- off += i2 + l2
- }
- var nameOff int32
- // Note that this field may not be aligned in memory,
- // so we cannot use a direct int32 assignment here.
- copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
- pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
- return pkgPathName.name()
-}
-
-/*
- * The compiler knows the exact layout of all the data structures above.
- * The compiler does not know about the data structures and methods below.
- */
-
-const (
- kindDirectIface = 1 << 5
- kindGCProg = 1 << 6 // Type.gc points to GC program
- kindMask = (1 << 5) - 1
-)
-
-// String returns the name of k.
-func (k Kind) String() string {
- if int(k) < len(kindNames) {
- return kindNames[k]
- }
- return kindNames[0]
-}
-
-var kindNames = []string{
- Invalid: "invalid",
- Bool: "bool",
- Int: "int",
- Int8: "int8",
- Int16: "int16",
- Int32: "int32",
- Int64: "int64",
- Uint: "uint",
- Uint8: "uint8",
- Uint16: "uint16",
- Uint32: "uint32",
- Uint64: "uint64",
- Uintptr: "uintptr",
- Float32: "float32",
- Float64: "float64",
- Complex64: "complex64",
- Complex128: "complex128",
- Array: "array",
- Chan: "chan",
- Func: "func",
- Interface: "interface",
- Map: "map",
- Ptr: "ptr",
- Slice: "slice",
- String: "string",
- Struct: "struct",
- UnsafePointer: "unsafe.Pointer",
-}
-
-func (t *uncommonType) methods() []method {
- if t.mcount == 0 {
- return nil
- }
- return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
-}
-
-func (t *uncommonType) exportedMethods() []method {
- if t.xcount == 0 {
- return nil
- }
- return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
-}
-
-// resolveNameOff resolves a name offset from a base pointer.
-// The (*rtype).nameOff method is a convenience wrapper for this function.
-// Implemented in the runtime package.
-func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
-
-// resolveTypeOff resolves an *rtype offset from a base type.
-// The (*rtype).typeOff method is a convenience wrapper for this function.
-// Implemented in the runtime package.
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
-
-type nameOff int32 // offset to a name
-type typeOff int32 // offset to an *rtype
-type textOff int32 // offset from top of text section
-
-func (t *rtype) nameOff(off nameOff) name {
- return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
-}
-
-func (t *rtype) typeOff(off typeOff) *rtype {
- return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
-}
-
-func (t *rtype) uncommon() *uncommonType {
- if t.tflag&tflagUncommon == 0 {
- return nil
- }
- switch t.Kind() {
- case Struct:
- return &(*structTypeUncommon)(unsafe.Pointer(t)).u
- case Ptr:
- type u struct {
- ptrType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Func:
- type u struct {
- funcType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Slice:
- type u struct {
- sliceType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Array:
- type u struct {
- arrayType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Chan:
- type u struct {
- chanType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Map:
- type u struct {
- mapType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Interface:
- type u struct {
- interfaceType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- default:
- type u struct {
- rtype
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- }
-}
-
-func (t *rtype) String() string {
- s := t.nameOff(t.str).name()
- if t.tflag&tflagExtraStar != 0 {
- return s[1:]
- }
- return s
-}
-
-func (t *rtype) Size() uintptr { return t.size }
-
-func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
-
-func (t *rtype) pointers() bool { return t.ptrdata != 0 }
-
-func (t *rtype) common() *rtype { return t }
-
-func (t *rtype) exportedMethods() []method {
- ut := t.uncommon()
- if ut == nil {
- return nil
- }
- return ut.exportedMethods()
-}
-
-func (t *rtype) NumMethod() int {
- if t.Kind() == Interface {
- tt := (*interfaceType)(unsafe.Pointer(t))
- return tt.NumMethod()
- }
- return len(t.exportedMethods())
-}
-
-func (t *rtype) PkgPath() string {
- if t.tflag&tflagNamed == 0 {
- return ""
- }
- ut := t.uncommon()
- if ut == nil {
- return ""
- }
- return t.nameOff(ut.pkgPath).name()
-}
-
-func (t *rtype) hasName() bool {
- return t.tflag&tflagNamed != 0
-}
-
-func (t *rtype) Name() string {
- if !t.hasName() {
- return ""
- }
- s := t.String()
- i := len(s) - 1
- for i >= 0 && s[i] != '.' {
- i--
- }
- return s[i+1:]
-}
-
-func (t *rtype) chanDir() chanDir {
- if t.Kind() != Chan {
- panic("reflect: chanDir of non-chan type")
- }
- tt := (*chanType)(unsafe.Pointer(t))
- return chanDir(tt.dir)
-}
-
-func (t *rtype) Elem() Type {
- switch t.Kind() {
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Chan:
- tt := (*chanType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Map:
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Ptr:
- tt := (*ptrType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Slice:
- tt := (*sliceType)(unsafe.Pointer(t))
- return toType(tt.elem)
- }
- panic("reflect: Elem of invalid type")
-}
-
-func (t *rtype) In(i int) Type {
- if t.Kind() != Func {
- panic("reflect: In of non-func type")
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return toType(tt.in()[i])
-}
-
-func (t *rtype) Key() Type {
- if t.Kind() != Map {
- panic("reflect: Key of non-map type")
- }
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.key)
-}
-
-func (t *rtype) Len() int {
- if t.Kind() != Array {
- panic("reflect: Len of non-array type")
- }
- tt := (*arrayType)(unsafe.Pointer(t))
- return int(tt.len)
-}
-
-func (t *rtype) NumField() int {
- if t.Kind() != Struct {
- panic("reflect: NumField of non-struct type")
- }
- tt := (*structType)(unsafe.Pointer(t))
- return len(tt.fields)
-}
-
-func (t *rtype) NumIn() int {
- if t.Kind() != Func {
- panic("reflect: NumIn of non-func type")
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return int(tt.inCount)
-}
-
-func (t *rtype) NumOut() int {
- if t.Kind() != Func {
- panic("reflect: NumOut of non-func type")
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return len(tt.out())
-}
-
-func (t *rtype) Out(i int) Type {
- if t.Kind() != Func {
- panic("reflect: Out of non-func type")
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return toType(tt.out()[i])
-}
-
-func (t *funcType) in() []*rtype {
- uadd := unsafe.Sizeof(*t)
- if t.tflag&tflagUncommon != 0 {
- uadd += unsafe.Sizeof(uncommonType{})
- }
- if t.inCount == 0 {
- return nil
- }
- return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
-}
-
-func (t *funcType) out() []*rtype {
- uadd := unsafe.Sizeof(*t)
- if t.tflag&tflagUncommon != 0 {
- uadd += unsafe.Sizeof(uncommonType{})
- }
- outCount := t.outCount & (1<<15 - 1)
- if outCount == 0 {
- return nil
- }
- return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
-}
-
-// add returns p+x.
-//
-// The whySafe string is ignored, so that the function still inlines
-// as efficiently as p+x, but all call sites should use the string to
-// record why the addition is safe, which is to say why the addition
-// does not cause x to advance to the very end of p's allocation
-// and therefore point incorrectly at the next block in memory.
-func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + x)
-}
-
-// NumMethod returns the number of interface methods in the type's method set.
-func (t *interfaceType) NumMethod() int { return len(t.methods) }
-
-// TypeOf returns the reflection Type that represents the dynamic type of i.
-// If i is a nil interface value, TypeOf returns nil.
-func TypeOf(i any) Type {
- eface := *(*emptyInterface)(unsafe.Pointer(&i))
- return toType(eface.typ)
-}
-
-func (t *rtype) Implements(u Type) bool {
- if u == nil {
- panic("reflect: nil type passed to Type.Implements")
- }
- if u.Kind() != Interface {
- panic("reflect: non-interface type passed to Type.Implements")
- }
- return implements(u.(*rtype), t)
-}
-
-func (t *rtype) AssignableTo(u Type) bool {
- if u == nil {
- panic("reflect: nil type passed to Type.AssignableTo")
- }
- uu := u.(*rtype)
- return directlyAssignable(uu, t) || implements(uu, t)
-}
-
-func (t *rtype) Comparable() bool {
- return t.equal != nil
-}
-
-// implements reports whether the type V implements the interface type T.
-func implements(T, V *rtype) bool {
- if T.Kind() != Interface {
- return false
- }
- t := (*interfaceType)(unsafe.Pointer(T))
- if len(t.methods) == 0 {
- return true
- }
-
- // The same algorithm applies in both cases, but the
- // method tables for an interface type and a concrete type
- // are different, so the code is duplicated.
- // In both cases the algorithm is a linear scan over the two
- // lists - T's methods and V's methods - simultaneously.
- // Since method tables are stored in a unique sorted order
- // (alphabetical, with no duplicate method names), the scan
- // through V's methods must hit a match for each of T's
- // methods along the way, or else V does not implement T.
- // This lets us run the scan in overall linear time instead of
- // the quadratic time a naive search would require.
- // See also ../runtime/iface.go.
- if V.Kind() == Interface {
- v := (*interfaceType)(unsafe.Pointer(V))
- i := 0
- for j := 0; j < len(v.methods); j++ {
- tm := &t.methods[i]
- tmName := t.nameOff(tm.name)
- vm := &v.methods[j]
- vmName := V.nameOff(vm.name)
- if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
- if !tmName.isExported() {
- tmPkgPath := tmName.pkgPath()
- if tmPkgPath == "" {
- tmPkgPath = t.pkgPath.name()
- }
- vmPkgPath := vmName.pkgPath()
- if vmPkgPath == "" {
- vmPkgPath = v.pkgPath.name()
- }
- if tmPkgPath != vmPkgPath {
- continue
- }
- }
- if i++; i >= len(t.methods) {
- return true
- }
- }
- }
- return false
- }
-
- v := V.uncommon()
- if v == nil {
- return false
- }
- i := 0
- vmethods := v.methods()
- for j := 0; j < int(v.mcount); j++ {
- tm := &t.methods[i]
- tmName := t.nameOff(tm.name)
- vm := vmethods[j]
- vmName := V.nameOff(vm.name)
- if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
- if !tmName.isExported() {
- tmPkgPath := tmName.pkgPath()
- if tmPkgPath == "" {
- tmPkgPath = t.pkgPath.name()
- }
- vmPkgPath := vmName.pkgPath()
- if vmPkgPath == "" {
- vmPkgPath = V.nameOff(v.pkgPath).name()
- }
- if tmPkgPath != vmPkgPath {
- continue
- }
- }
- if i++; i >= len(t.methods) {
- return true
- }
- }
- }
- return false
-}
-
-// directlyAssignable reports whether a value x of type V can be directly
-// assigned (using memmove) to a value of type T.
-// https://golang.org/doc/go_spec.html#Assignability
-// Ignoring the interface rules (implemented elsewhere)
-// and the ideal constant rules (no ideal constants at run time).
-func directlyAssignable(T, V *rtype) bool {
- // x's type V is identical to T?
- if T == V {
- return true
- }
-
- // Otherwise at least one of T and V must not be defined
- // and they must have the same kind.
- if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
- return false
- }
-
- // x's type T and V must have identical underlying types.
- return haveIdenticalUnderlyingType(T, V, true)
-}
-
-func haveIdenticalType(T, V Type, cmpTags bool) bool {
- if cmpTags {
- return T == V
- }
-
- if T.Name() != V.Name() || T.Kind() != V.Kind() {
- return false
- }
-
- return haveIdenticalUnderlyingType(T.common(), V.common(), false)
-}
-
-func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
- if T == V {
- return true
- }
-
- kind := T.Kind()
- if kind != V.Kind() {
- return false
- }
-
- // Non-composite types of equal kind have same underlying type
- // (the predefined instance of the type).
- if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
- return true
- }
-
- // Composite types.
- switch kind {
- case Array:
- return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Chan:
- // Special case:
- // x is a bidirectional channel value, T is a channel type,
- // and x's type V and T have identical element types.
- if V.chanDir() == bothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
- return true
- }
-
- // Otherwise continue test for identical underlying type.
- return V.chanDir() == T.chanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Func:
- t := (*funcType)(unsafe.Pointer(T))
- v := (*funcType)(unsafe.Pointer(V))
- if t.outCount != v.outCount || t.inCount != v.inCount {
- return false
- }
- for i := 0; i < t.NumIn(); i++ {
- if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
- return false
- }
- }
- for i := 0; i < t.NumOut(); i++ {
- if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
- return false
- }
- }
- return true
-
- case Interface:
- t := (*interfaceType)(unsafe.Pointer(T))
- v := (*interfaceType)(unsafe.Pointer(V))
- if len(t.methods) == 0 && len(v.methods) == 0 {
- return true
- }
- // Might have the same methods but still
- // need a run time conversion.
- return false
-
- case Map:
- return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Ptr, Slice:
- return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Struct:
- t := (*structType)(unsafe.Pointer(T))
- v := (*structType)(unsafe.Pointer(V))
- if len(t.fields) != len(v.fields) {
- return false
- }
- if t.pkgPath.name() != v.pkgPath.name() {
- return false
- }
- for i := range t.fields {
- tf := &t.fields[i]
- vf := &v.fields[i]
- if tf.name.name() != vf.name.name() {
- return false
- }
- if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
- return false
- }
- if cmpTags && tf.name.tag() != vf.name.tag() {
- return false
- }
- if tf.offsetEmbed != vf.offsetEmbed {
- return false
- }
- }
- return true
- }
-
- return false
-}
-
-type structTypeUncommon struct {
- structType
- u uncommonType
-}
-
-// toType converts from a *rtype to a Type that can be returned
-// to the client of package reflect. In gc, the only concern is that
-// a nil *rtype must be replaced by a nil Type, but in gccgo this
-// function takes care of ensuring that multiple *rtype for the same
-// type are coalesced into a single Type.
-func toType(t *rtype) Type {
- if t == nil {
- return nil
- }
- return t
-}
-
-// ifaceIndir reports whether t is stored indirectly in an interface value.
-func ifaceIndir(t *rtype) bool {
- return t.kind&kindDirectIface == 0
-}
diff --git a/contrib/go/_std_1.18/src/internal/reflectlite/value.go b/contrib/go/_std_1.18/src/internal/reflectlite/value.go
deleted file mode 100644
index 966230f581..0000000000
--- a/contrib/go/_std_1.18/src/internal/reflectlite/value.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package reflectlite
-
-import (
- "internal/goarch"
- "internal/unsafeheader"
- "runtime"
- "unsafe"
-)
-
-// Value is the reflection interface to a Go value.
-//
-// Not all methods apply to all kinds of values. Restrictions,
-// if any, are noted in the documentation for each method.
-// Use the Kind method to find out the kind of value before
-// calling kind-specific methods. Calling a method
-// inappropriate to the kind of type causes a run time panic.
-//
-// The zero Value represents no value.
-// Its IsValid method returns false, its Kind method returns Invalid,
-// its String method returns "<invalid Value>", and all other methods panic.
-// Most functions and methods never return an invalid value.
-// If one does, its documentation states the conditions explicitly.
-//
-// A Value can be used concurrently by multiple goroutines provided that
-// the underlying Go value can be used concurrently for the equivalent
-// direct operations.
-//
-// To compare two Values, compare the results of the Interface method.
-// Using == on two Values does not compare the underlying values
-// they represent.
-type Value struct {
- // typ holds the type of the value represented by a Value.
- typ *rtype
-
- // Pointer-valued data or, if flagIndir is set, pointer to data.
- // Valid when either flagIndir is set or typ.pointers() is true.
- ptr unsafe.Pointer
-
- // flag holds metadata about the value.
- // The lowest bits are flag bits:
- // - flagStickyRO: obtained via unexported not embedded field, so read-only
- // - flagEmbedRO: obtained via unexported embedded field, so read-only
- // - flagIndir: val holds a pointer to the data
- // - flagAddr: v.CanAddr is true (implies flagIndir)
- // Value cannot represent method values.
- // The next five bits give the Kind of the value.
- // This repeats typ.Kind() except for method values.
- // The remaining 23+ bits give a method number for method values.
- // If flag.kind() != Func, code can assume that flagMethod is unset.
- // If ifaceIndir(typ), code can assume that flagIndir is set.
- flag
-
- // A method value represents a curried method invocation
- // like r.Read for some receiver r. The typ+val+flag bits describe
- // the receiver r, but the flag's Kind bits say Func (methods are
- // functions), and the top bits of the flag give the method number
- // in r's type's method table.
-}
-
-type flag uintptr
-
-const (
- flagKindWidth = 5 // there are 27 kinds
- flagKindMask flag = 1<<flagKindWidth - 1
- flagStickyRO flag = 1 << 5
- flagEmbedRO flag = 1 << 6
- flagIndir flag = 1 << 7
- flagAddr flag = 1 << 8
- flagMethod flag = 1 << 9
- flagMethodShift = 10
- flagRO flag = flagStickyRO | flagEmbedRO
-)
-
-func (f flag) kind() Kind {
- return Kind(f & flagKindMask)
-}
-
-func (f flag) ro() flag {
- if f&flagRO != 0 {
- return flagStickyRO
- }
- return 0
-}
-
-// pointer returns the underlying pointer represented by v.
-// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
-func (v Value) pointer() unsafe.Pointer {
- if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
- panic("can't call pointer on a non-pointer Value")
- }
- if v.flag&flagIndir != 0 {
- return *(*unsafe.Pointer)(v.ptr)
- }
- return v.ptr
-}
-
-// packEface converts v to the empty interface.
-func packEface(v Value) any {
- t := v.typ
- var i any
- e := (*emptyInterface)(unsafe.Pointer(&i))
- // First, fill in the data portion of the interface.
- switch {
- case ifaceIndir(t):
- if v.flag&flagIndir == 0 {
- panic("bad indir")
- }
- // Value is indirect, and so is the interface we're making.
- ptr := v.ptr
- if v.flag&flagAddr != 0 {
- // TODO: pass safe boolean from valueInterface so
- // we don't need to copy if safe==true?
- c := unsafe_New(t)
- typedmemmove(t, c, ptr)
- ptr = c
- }
- e.word = ptr
- case v.flag&flagIndir != 0:
- // Value is indirect, but interface is direct. We need
- // to load the data at v.ptr into the interface data word.
- e.word = *(*unsafe.Pointer)(v.ptr)
- default:
- // Value is direct, and so is the interface.
- e.word = v.ptr
- }
- // Now, fill in the type portion. We're very careful here not
- // to have any operation between the e.word and e.typ assignments
- // that would let the garbage collector observe the partially-built
- // interface value.
- e.typ = t
- return i
-}
-
-// unpackEface converts the empty interface i to a Value.
-func unpackEface(i any) Value {
- e := (*emptyInterface)(unsafe.Pointer(&i))
- // NOTE: don't read e.word until we know whether it is really a pointer or not.
- t := e.typ
- if t == nil {
- return Value{}
- }
- f := flag(t.Kind())
- if ifaceIndir(t) {
- f |= flagIndir
- }
- return Value{t, e.word, f}
-}
-
-// A ValueError occurs when a Value method is invoked on
-// a Value that does not support it. Such cases are documented
-// in the description of each method.
-type ValueError struct {
- Method string
- Kind Kind
-}
-
-func (e *ValueError) Error() string {
- if e.Kind == 0 {
- return "reflect: call of " + e.Method + " on zero Value"
- }
- return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
-}
-
-// methodName returns the name of the calling method,
-// assumed to be two stack frames above.
-func methodName() string {
- pc, _, _, _ := runtime.Caller(2)
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown method"
- }
- return f.Name()
-}
-
-// emptyInterface is the header for an interface{} value.
-type emptyInterface struct {
- typ *rtype
- word unsafe.Pointer
-}
-
-// mustBeExported panics if f records that the value was obtained using
-// an unexported field.
-func (f flag) mustBeExported() {
- if f == 0 {
- panic(&ValueError{methodName(), 0})
- }
- if f&flagRO != 0 {
- panic("reflect: " + methodName() + " using value obtained using unexported field")
- }
-}
-
-// mustBeAssignable panics if f records that the value is not assignable,
-// which is to say that either it was obtained using an unexported field
-// or it is not addressable.
-func (f flag) mustBeAssignable() {
- if f == 0 {
- panic(&ValueError{methodName(), Invalid})
- }
- // Assignable if addressable and not read-only.
- if f&flagRO != 0 {
- panic("reflect: " + methodName() + " using value obtained using unexported field")
- }
- if f&flagAddr == 0 {
- panic("reflect: " + methodName() + " using unaddressable value")
- }
-}
-
-// CanSet reports whether the value of v can be changed.
-// A Value can be changed only if it is addressable and was not
-// obtained by the use of unexported struct fields.
-// If CanSet returns false, calling Set or any type-specific
-// setter (e.g., SetBool, SetInt) will panic.
-func (v Value) CanSet() bool {
- return v.flag&(flagAddr|flagRO) == flagAddr
-}
-
-// Elem returns the value that the interface v contains
-// or that the pointer v points to.
-// It panics if v's Kind is not Interface or Pointer.
-// It returns the zero Value if v is nil.
-func (v Value) Elem() Value {
- k := v.kind()
- switch k {
- case Interface:
- var eface any
- if v.typ.NumMethod() == 0 {
- eface = *(*any)(v.ptr)
- } else {
- eface = (any)(*(*interface {
- M()
- })(v.ptr))
- }
- x := unpackEface(eface)
- if x.flag != 0 {
- x.flag |= v.flag.ro()
- }
- return x
- case Pointer:
- ptr := v.ptr
- if v.flag&flagIndir != 0 {
- ptr = *(*unsafe.Pointer)(ptr)
- }
- // The returned value's address is v's value.
- if ptr == nil {
- return Value{}
- }
- tt := (*ptrType)(unsafe.Pointer(v.typ))
- typ := tt.elem
- fl := v.flag&flagRO | flagIndir | flagAddr
- fl |= flag(typ.Kind())
- return Value{typ, ptr, fl}
- }
- panic(&ValueError{"reflectlite.Value.Elem", v.kind()})
-}
-
-func valueInterface(v Value) any {
- if v.flag == 0 {
- panic(&ValueError{"reflectlite.Value.Interface", 0})
- }
-
- if v.kind() == Interface {
- // Special case: return the element inside the interface.
- // Empty interface has one layout, all interfaces with
- // methods have a second layout.
- if v.numMethod() == 0 {
- return *(*any)(v.ptr)
- }
- return *(*interface {
- M()
- })(v.ptr)
- }
-
- // TODO: pass safe to packEface so we don't need to copy if safe==true?
- return packEface(v)
-}
-
-// IsNil reports whether its argument v is nil. The argument must be
-// a chan, func, interface, map, pointer, or slice value; if it is
-// not, IsNil panics. Note that IsNil is not always equivalent to a
-// regular comparison with nil in Go. For example, if v was created
-// by calling ValueOf with an uninitialized interface variable i,
-// i==nil will be true but v.IsNil will panic as v will be the zero
-// Value.
-func (v Value) IsNil() bool {
- k := v.kind()
- switch k {
- case Chan, Func, Map, Pointer, UnsafePointer:
- // if v.flag&flagMethod != 0 {
- // return false
- // }
- ptr := v.ptr
- if v.flag&flagIndir != 0 {
- ptr = *(*unsafe.Pointer)(ptr)
- }
- return ptr == nil
- case Interface, Slice:
- // Both interface and slice are nil if first word is 0.
- // Both are always bigger than a word; assume flagIndir.
- return *(*unsafe.Pointer)(v.ptr) == nil
- }
- panic(&ValueError{"reflectlite.Value.IsNil", v.kind()})
-}
-
-// IsValid reports whether v represents a value.
-// It returns false if v is the zero Value.
-// If IsValid returns false, all other methods except String panic.
-// Most functions and methods never return an invalid Value.
-// If one does, its documentation states the conditions explicitly.
-func (v Value) IsValid() bool {
- return v.flag != 0
-}
-
-// Kind returns v's Kind.
-// If v is the zero Value (IsValid returns false), Kind returns Invalid.
-func (v Value) Kind() Kind {
- return v.kind()
-}
-
-// implemented in runtime:
-func chanlen(unsafe.Pointer) int
-func maplen(unsafe.Pointer) int
-
-// Len returns v's length.
-// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
-func (v Value) Len() int {
- k := v.kind()
- switch k {
- case Array:
- tt := (*arrayType)(unsafe.Pointer(v.typ))
- return int(tt.len)
- case Chan:
- return chanlen(v.pointer())
- case Map:
- return maplen(v.pointer())
- case Slice:
- // Slice is bigger than a word; assume flagIndir.
- return (*unsafeheader.Slice)(v.ptr).Len
- case String:
- // String is bigger than a word; assume flagIndir.
- return (*unsafeheader.String)(v.ptr).Len
- }
- panic(&ValueError{"reflect.Value.Len", v.kind()})
-}
-
-// NumMethod returns the number of exported methods in the value's method set.
-func (v Value) numMethod() int {
- if v.typ == nil {
- panic(&ValueError{"reflectlite.Value.NumMethod", Invalid})
- }
- return v.typ.NumMethod()
-}
-
-// Set assigns x to the value v.
-// It panics if CanSet returns false.
-// As in Go, x's value must be assignable to v's type.
-func (v Value) Set(x Value) {
- v.mustBeAssignable()
- x.mustBeExported() // do not let unexported x leak
- var target unsafe.Pointer
- if v.kind() == Interface {
- target = v.ptr
- }
- x = x.assignTo("reflectlite.Set", v.typ, target)
- if x.flag&flagIndir != 0 {
- typedmemmove(v.typ, v.ptr, x.ptr)
- } else {
- *(*unsafe.Pointer)(v.ptr) = x.ptr
- }
-}
-
-// Type returns v's type.
-func (v Value) Type() Type {
- f := v.flag
- if f == 0 {
- panic(&ValueError{"reflectlite.Value.Type", Invalid})
- }
- // Method values not supported.
- return v.typ
-}
-
-/*
- * constructors
- */
-
-// implemented in package runtime
-func unsafe_New(*rtype) unsafe.Pointer
-
-// ValueOf returns a new Value initialized to the concrete value
-// stored in the interface i. ValueOf(nil) returns the zero Value.
-func ValueOf(i any) Value {
- if i == nil {
- return Value{}
- }
-
- // TODO: Maybe allow contents of a Value to live on the stack.
- // For now we make the contents always escape to the heap. It
- // makes life easier in a few places (see chanrecv/mapassign
- // comment below).
- escapes(i)
-
- return unpackEface(i)
-}
-
-// assignTo returns a value v that can be assigned directly to typ.
-// It panics if v is not assignable to typ.
-// For a conversion to an interface type, target is a suggested scratch space to use.
-func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
- // if v.flag&flagMethod != 0 {
- // v = makeMethodValue(context, v)
- // }
-
- switch {
- case directlyAssignable(dst, v.typ):
- // Overwrite type so that they match.
- // Same memory layout, so no harm done.
- fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
- fl |= flag(dst.Kind())
- return Value{dst, v.ptr, fl}
-
- case implements(dst, v.typ):
- if target == nil {
- target = unsafe_New(dst)
- }
- if v.Kind() == Interface && v.IsNil() {
- // A nil ReadWriter passed to nil Reader is OK,
- // but using ifaceE2I below will panic.
- // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
- return Value{dst, nil, flag(Interface)}
- }
- x := valueInterface(v)
- if dst.NumMethod() == 0 {
- *(*any)(target) = x
- } else {
- ifaceE2I(dst, x, target)
- }
- return Value{dst, target, flagIndir | flag(Interface)}
- }
-
- // Failed.
- panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
-}
-
-// arrayAt returns the i-th element of p,
-// an array whose elements are eltSize bytes wide.
-// The array pointed at by p must have at least i+1 elements:
-// it is invalid (but impossible to check here) to pass i >= len,
-// because then the result will point outside the array.
-// whySafe must explain why i < len. (Passing "i < len" is fine;
-// the benefit is to surface this assumption at the call site.)
-func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
- return add(p, uintptr(i)*eltSize, "i < len")
-}
-
-func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
-
-// typedmemmove copies a value of type t to dst from src.
-//go:noescape
-func typedmemmove(t *rtype, dst, src unsafe.Pointer)
-
-// Dummy annotation marking that the value x escapes,
-// for use in cases where the reflect code is so clever that
-// the compiler cannot follow.
-func escapes(x any) {
- if dummy.b {
- dummy.x = x
- }
-}
-
-var dummy struct {
- b bool
- x any
-}
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/at.go b/contrib/go/_std_1.18/src/internal/syscall/unix/at.go
deleted file mode 100644
index 447d48e198..0000000000
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/at.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux || openbsd || netbsd || dragonfly
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-func Unlinkat(dirfd int, path string, flags int) error {
- var p *byte
- p, err := syscall.BytePtrFromString(path)
- if err != nil {
- return err
- }
-
- _, _, errno := syscall.Syscall(unlinkatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags))
- if errno != 0 {
- return errno
- }
-
- return nil
-}
-
-func Openat(dirfd int, path string, flags int, perm uint32) (int, error) {
- var p *byte
- p, err := syscall.BytePtrFromString(path)
- if err != nil {
- return 0, err
- }
-
- fd, _, errno := syscall.Syscall6(openatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0)
- if errno != 0 {
- return 0, errno
- }
-
- return int(fd), nil
-}
-
-func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error {
- var p *byte
- p, err := syscall.BytePtrFromString(path)
- if err != nil {
- return err
- }
-
- _, _, errno := syscall.Syscall6(fstatatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if errno != 0 {
- return errno
- }
-
- return nil
-
-}
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/net.go b/contrib/go/_std_1.18/src/internal/syscall/unix/net.go
deleted file mode 100644
index 85632e1c03..0000000000
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/net.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package unix
-
-import (
- "syscall"
- _ "unsafe"
-)
-
-//go:linkname RecvfromInet4 syscall.recvfromInet4
-//go:noescape
-func RecvfromInet4(fd int, p []byte, flags int, from *syscall.SockaddrInet4) (int, error)
-
-//go:linkname RecvfromInet6 syscall.recvfromInet6
-//go:noescape
-func RecvfromInet6(fd int, p []byte, flags int, from *syscall.SockaddrInet6) (n int, err error)
-
-//go:linkname SendtoInet4 syscall.sendtoInet4
-//go:noescape
-func SendtoInet4(fd int, p []byte, flags int, to *syscall.SockaddrInet4) (err error)
-
-//go:linkname SendtoInet6 syscall.sendtoInet6
-//go:noescape
-func SendtoInet6(fd int, p []byte, flags int, to *syscall.SockaddrInet6) (err error)
-
-//go:linkname SendmsgNInet4 syscall.sendmsgNInet4
-//go:noescape
-func SendmsgNInet4(fd int, p, oob []byte, to *syscall.SockaddrInet4, flags int) (n int, err error)
-
-//go:linkname SendmsgNInet6 syscall.sendmsgNInet6
-//go:noescape
-func SendmsgNInet6(fd int, p, oob []byte, to *syscall.SockaddrInet6, flags int) (n int, err error)
-
-//go:linkname RecvmsgInet4 syscall.recvmsgInet4
-//go:noescape
-func RecvmsgInet4(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet4) (n, oobn int, recvflags int, err error)
-
-//go:linkname RecvmsgInet6 syscall.recvmsgInet6
-//go:noescape
-func RecvmsgInet6(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet6) (n, oobn int, recvflags int, err error)
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking_libc.go b/contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking_libc.go
deleted file mode 100644
index 75c6e92a6e..0000000000
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking_libc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || solaris
-
-package unix
-
-import (
- "syscall"
- _ "unsafe" // for go:linkname
-)
-
-func IsNonblock(fd int) (nonblocking bool, err error) {
- flag, e1 := fcntl(fd, syscall.F_GETFL, 0)
- if e1 != nil {
- return false, e1
- }
- return flag&syscall.O_NONBLOCK != 0, nil
-}
-
-// Implemented in the syscall package.
-//go:linkname fcntl syscall.fcntl
-func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/contrib/go/_std_1.18/src/io/fs/fs.go b/contrib/go/_std_1.18/src/io/fs/fs.go
deleted file mode 100644
index 5c0d9a6664..0000000000
--- a/contrib/go/_std_1.18/src/io/fs/fs.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fs defines basic interfaces to a file system.
-// A file system can be provided by the host operating system
-// but also by other packages.
-package fs
-
-import (
- "internal/oserror"
- "time"
- "unicode/utf8"
-)
-
-// An FS provides access to a hierarchical file system.
-//
-// The FS interface is the minimum implementation required of the file system.
-// A file system may implement additional interfaces,
-// such as ReadFileFS, to provide additional or optimized functionality.
-type FS interface {
- // Open opens the named file.
- //
- // When Open returns an error, it should be of type *PathError
- // with the Op field set to "open", the Path field set to name,
- // and the Err field describing the problem.
- //
- // Open should reject attempts to open names that do not satisfy
- // ValidPath(name), returning a *PathError with Err set to
- // ErrInvalid or ErrNotExist.
- Open(name string) (File, error)
-}
-
-// ValidPath reports whether the given path name
-// is valid for use in a call to Open.
-//
-// Path names passed to open are UTF-8-encoded,
-// unrooted, slash-separated sequences of path elements, like “x/y/z”.
-// Path names must not contain an element that is “.” or “..” or the empty string,
-// except for the special case that the root directory is named “.”.
-// Paths must not start or end with a slash: “/x” and “x/” are invalid.
-//
-// Note that paths are slash-separated on all systems, even Windows.
-// Paths containing other characters such as backslash and colon
-// are accepted as valid, but those characters must never be
-// interpreted by an FS implementation as path element separators.
-func ValidPath(name string) bool {
- if !utf8.ValidString(name) {
- return false
- }
-
- if name == "." {
- // special case
- return true
- }
-
- // Iterate over elements in name, checking each.
- for {
- i := 0
- for i < len(name) && name[i] != '/' {
- i++
- }
- elem := name[:i]
- if elem == "" || elem == "." || elem == ".." {
- return false
- }
- if i == len(name) {
- return true // reached clean ending
- }
- name = name[i+1:]
- }
-}
-
-// A File provides access to a single file.
-// The File interface is the minimum implementation required of the file.
-// Directory files should also implement ReadDirFile.
-// A file may implement io.ReaderAt or io.Seeker as optimizations.
-type File interface {
- Stat() (FileInfo, error)
- Read([]byte) (int, error)
- Close() error
-}
-
-// A DirEntry is an entry read from a directory
-// (using the ReadDir function or a ReadDirFile's ReadDir method).
-type DirEntry interface {
- // Name returns the name of the file (or subdirectory) described by the entry.
- // This name is only the final element of the path (the base name), not the entire path.
- // For example, Name would return "hello.go" not "home/gopher/hello.go".
- Name() string
-
- // IsDir reports whether the entry describes a directory.
- IsDir() bool
-
- // Type returns the type bits for the entry.
- // The type bits are a subset of the usual FileMode bits, those returned by the FileMode.Type method.
- Type() FileMode
-
- // Info returns the FileInfo for the file or subdirectory described by the entry.
- // The returned FileInfo may be from the time of the original directory read
- // or from the time of the call to Info. If the file has been removed or renamed
- // since the directory read, Info may return an error satisfying errors.Is(err, ErrNotExist).
- // If the entry denotes a symbolic link, Info reports the information about the link itself,
- // not the link's target.
- Info() (FileInfo, error)
-}
-
-// A ReadDirFile is a directory file whose entries can be read with the ReadDir method.
-// Every directory file should implement this interface.
-// (It is permissible for any file to implement this interface,
-// but if so ReadDir should return an error for non-directories.)
-type ReadDirFile interface {
- File
-
- // ReadDir reads the contents of the directory and returns
- // a slice of up to n DirEntry values in directory order.
- // Subsequent calls on the same file will yield further DirEntry values.
- //
- // If n > 0, ReadDir returns at most n DirEntry structures.
- // In this case, if ReadDir returns an empty slice, it will return
- // a non-nil error explaining why.
- // At the end of a directory, the error is io.EOF.
- //
- // If n <= 0, ReadDir returns all the DirEntry values from the directory
- // in a single slice. In this case, if ReadDir succeeds (reads all the way
- // to the end of the directory), it returns the slice and a nil error.
- // If it encounters an error before the end of the directory,
- // ReadDir returns the DirEntry list read until that point and a non-nil error.
- ReadDir(n int) ([]DirEntry, error)
-}
-
-// Generic file system errors.
-// Errors returned by file systems can be tested against these errors
-// using errors.Is.
-var (
- ErrInvalid = errInvalid() // "invalid argument"
- ErrPermission = errPermission() // "permission denied"
- ErrExist = errExist() // "file already exists"
- ErrNotExist = errNotExist() // "file does not exist"
- ErrClosed = errClosed() // "file already closed"
-)
-
-func errInvalid() error { return oserror.ErrInvalid }
-func errPermission() error { return oserror.ErrPermission }
-func errExist() error { return oserror.ErrExist }
-func errNotExist() error { return oserror.ErrNotExist }
-func errClosed() error { return oserror.ErrClosed }
-
-// A FileInfo describes a file and is returned by Stat.
-type FileInfo interface {
- Name() string // base name of the file
- Size() int64 // length in bytes for regular files; system-dependent for others
- Mode() FileMode // file mode bits
- ModTime() time.Time // modification time
- IsDir() bool // abbreviation for Mode().IsDir()
- Sys() any // underlying data source (can return nil)
-}
-
-// A FileMode represents a file's mode and permission bits.
-// The bits have the same definition on all systems, so that
-// information about files can be moved from one system
-// to another portably. Not all bits apply to all systems.
-// The only required bit is ModeDir for directories.
-type FileMode uint32
-
-// The defined file mode bits are the most significant bits of the FileMode.
-// The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
-// The values of these bits should be considered part of the public API and
-// may be used in wire protocols or disk representations: they must not be
-// changed, although new bits might be added.
-const (
- // The single letters are the abbreviations
- // used by the String method's formatting.
- ModeDir FileMode = 1 << (32 - 1 - iota) // d: is a directory
- ModeAppend // a: append-only
- ModeExclusive // l: exclusive use
- ModeTemporary // T: temporary file; Plan 9 only
- ModeSymlink // L: symbolic link
- ModeDevice // D: device file
- ModeNamedPipe // p: named pipe (FIFO)
- ModeSocket // S: Unix domain socket
- ModeSetuid // u: setuid
- ModeSetgid // g: setgid
- ModeCharDevice // c: Unix character device, when ModeDevice is set
- ModeSticky // t: sticky
- ModeIrregular // ?: non-regular file; nothing else is known about this file
-
- // Mask for the type bits. For regular files, none will be set.
- ModeType = ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice | ModeCharDevice | ModeIrregular
-
- ModePerm FileMode = 0777 // Unix permission bits
-)
-
-func (m FileMode) String() string {
- const str = "dalTLDpSugct?"
- var buf [32]byte // Mode is uint32.
- w := 0
- for i, c := range str {
- if m&(1<<uint(32-1-i)) != 0 {
- buf[w] = byte(c)
- w++
- }
- }
- if w == 0 {
- buf[w] = '-'
- w++
- }
- const rwx = "rwxrwxrwx"
- for i, c := range rwx {
- if m&(1<<uint(9-1-i)) != 0 {
- buf[w] = byte(c)
- } else {
- buf[w] = '-'
- }
- w++
- }
- return string(buf[:w])
-}
-
-// IsDir reports whether m describes a directory.
-// That is, it tests for the ModeDir bit being set in m.
-func (m FileMode) IsDir() bool {
- return m&ModeDir != 0
-}
-
-// IsRegular reports whether m describes a regular file.
-// That is, it tests that no mode type bits are set.
-func (m FileMode) IsRegular() bool {
- return m&ModeType == 0
-}
-
-// Perm returns the Unix permission bits in m (m & ModePerm).
-func (m FileMode) Perm() FileMode {
- return m & ModePerm
-}
-
-// Type returns type bits in m (m & ModeType).
-func (m FileMode) Type() FileMode {
- return m & ModeType
-}
-
-// PathError records an error and the operation and file path that caused it.
-type PathError struct {
- Op string
- Path string
- Err error
-}
-
-func (e *PathError) Error() string { return e.Op + " " + e.Path + ": " + e.Err.Error() }
-
-func (e *PathError) Unwrap() error { return e.Err }
-
-// Timeout reports whether this error represents a timeout.
-func (e *PathError) Timeout() bool {
- t, ok := e.Err.(interface{ Timeout() bool })
- return ok && t.Timeout()
-}
diff --git a/contrib/go/_std_1.18/src/io/fs/walk.go b/contrib/go/_std_1.18/src/io/fs/walk.go
deleted file mode 100644
index 534876bad3..0000000000
--- a/contrib/go/_std_1.18/src/io/fs/walk.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fs
-
-import (
- "errors"
- "path"
-)
-
-// SkipDir is used as a return value from WalkDirFuncs to indicate that
-// the directory named in the call is to be skipped. It is not returned
-// as an error by any function.
-var SkipDir = errors.New("skip this directory")
-
-// WalkDirFunc is the type of the function called by WalkDir to visit
-// each file or directory.
-//
-// The path argument contains the argument to WalkDir as a prefix.
-// That is, if WalkDir is called with root argument "dir" and finds a file
-// named "a" in that directory, the walk function will be called with
-// argument "dir/a".
-//
-// The d argument is the fs.DirEntry for the named path.
-//
-// The error result returned by the function controls how WalkDir
-// continues. If the function returns the special value SkipDir, WalkDir
-// skips the current directory (path if d.IsDir() is true, otherwise
-// path's parent directory). Otherwise, if the function returns a non-nil
-// error, WalkDir stops entirely and returns that error.
-//
-// The err argument reports an error related to path, signaling that
-// WalkDir will not walk into that directory. The function can decide how
-// to handle that error; as described earlier, returning the error will
-// cause WalkDir to stop walking the entire tree.
-//
-// WalkDir calls the function with a non-nil err argument in two cases.
-//
-// First, if the initial fs.Stat on the root directory fails, WalkDir
-// calls the function with path set to root, d set to nil, and err set to
-// the error from fs.Stat.
-//
-// Second, if a directory's ReadDir method fails, WalkDir calls the
-// function with path set to the directory's path, d set to an
-// fs.DirEntry describing the directory, and err set to the error from
-// ReadDir. In this second case, the function is called twice with the
-// path of the directory: the first call is before the directory read is
-// attempted and has err set to nil, giving the function a chance to
-// return SkipDir and avoid the ReadDir entirely. The second call is
-// after a failed ReadDir and reports the error from ReadDir.
-// (If ReadDir succeeds, there is no second call.)
-//
-// The differences between WalkDirFunc compared to filepath.WalkFunc are:
-//
-// - The second argument has type fs.DirEntry instead of fs.FileInfo.
-// - The function is called before reading a directory, to allow SkipDir
-// to bypass the directory read entirely.
-// - If a directory read fails, the function is called a second time
-// for that directory to report the error.
-//
-type WalkDirFunc func(path string, d DirEntry, err error) error
-
-// walkDir recursively descends path, calling walkDirFn.
-func walkDir(fsys FS, name string, d DirEntry, walkDirFn WalkDirFunc) error {
- if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() {
- if err == SkipDir && d.IsDir() {
- // Successfully skipped directory.
- err = nil
- }
- return err
- }
-
- dirs, err := ReadDir(fsys, name)
- if err != nil {
- // Second call, to report ReadDir error.
- err = walkDirFn(name, d, err)
- if err != nil {
- return err
- }
- }
-
- for _, d1 := range dirs {
- name1 := path.Join(name, d1.Name())
- if err := walkDir(fsys, name1, d1, walkDirFn); err != nil {
- if err == SkipDir {
- break
- }
- return err
- }
- }
- return nil
-}
-
-// WalkDir walks the file tree rooted at root, calling fn for each file or
-// directory in the tree, including root.
-//
-// All errors that arise visiting files and directories are filtered by fn:
-// see the fs.WalkDirFunc documentation for details.
-//
-// The files are walked in lexical order, which makes the output deterministic
-// but requires WalkDir to read an entire directory into memory before proceeding
-// to walk that directory.
-//
-// WalkDir does not follow symbolic links found in directories,
-// but if root itself is a symbolic link, its target will be walked.
-func WalkDir(fsys FS, root string, fn WalkDirFunc) error {
- info, err := Stat(fsys, root)
- if err != nil {
- err = fn(root, nil, err)
- } else {
- err = walkDir(fsys, root, &statDirEntry{info}, fn)
- }
- if err == SkipDir {
- return nil
- }
- return err
-}
-
-type statDirEntry struct {
- info FileInfo
-}
-
-func (d *statDirEntry) Name() string { return d.info.Name() }
-func (d *statDirEntry) IsDir() bool { return d.info.IsDir() }
-func (d *statDirEntry) Type() FileMode { return d.info.Mode().Type() }
-func (d *statDirEntry) Info() (FileInfo, error) { return d.info, nil }
diff --git a/contrib/go/_std_1.18/src/io/io.go b/contrib/go/_std_1.18/src/io/io.go
deleted file mode 100644
index 1ea01d5d63..0000000000
--- a/contrib/go/_std_1.18/src/io/io.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package io provides basic interfaces to I/O primitives.
-// Its primary job is to wrap existing implementations of such primitives,
-// such as those in package os, into shared public interfaces that
-// abstract the functionality, plus some other related primitives.
-//
-// Because these interfaces and primitives wrap lower-level operations with
-// various implementations, unless otherwise informed clients should not
-// assume they are safe for parallel execution.
-package io
-
-import (
- "errors"
- "sync"
-)
-
-// Seek whence values.
-const (
- SeekStart = 0 // seek relative to the origin of the file
- SeekCurrent = 1 // seek relative to the current offset
- SeekEnd = 2 // seek relative to the end
-)
-
-// ErrShortWrite means that a write accepted fewer bytes than requested
-// but failed to return an explicit error.
-var ErrShortWrite = errors.New("short write")
-
-// errInvalidWrite means that a write returned an impossible count.
-var errInvalidWrite = errors.New("invalid write result")
-
-// ErrShortBuffer means that a read required a longer buffer than was provided.
-var ErrShortBuffer = errors.New("short buffer")
-
-// EOF is the error returned by Read when no more input is available.
-// (Read must return EOF itself, not an error wrapping EOF,
-// because callers will test for EOF using ==.)
-// Functions should return EOF only to signal a graceful end of input.
-// If the EOF occurs unexpectedly in a structured data stream,
-// the appropriate error is either ErrUnexpectedEOF or some other error
-// giving more detail.
-var EOF = errors.New("EOF")
-
-// ErrUnexpectedEOF means that EOF was encountered in the
-// middle of reading a fixed-size block or data structure.
-var ErrUnexpectedEOF = errors.New("unexpected EOF")
-
-// ErrNoProgress is returned by some clients of a Reader when
-// many calls to Read have failed to return any data or error,
-// usually the sign of a broken Reader implementation.
-var ErrNoProgress = errors.New("multiple Read calls return no data or error")
-
-// Reader is the interface that wraps the basic Read method.
-//
-// Read reads up to len(p) bytes into p. It returns the number of bytes
-// read (0 <= n <= len(p)) and any error encountered. Even if Read
-// returns n < len(p), it may use all of p as scratch space during the call.
-// If some data is available but not len(p) bytes, Read conventionally
-// returns what is available instead of waiting for more.
-//
-// When Read encounters an error or end-of-file condition after
-// successfully reading n > 0 bytes, it returns the number of
-// bytes read. It may return the (non-nil) error from the same call
-// or return the error (and n == 0) from a subsequent call.
-// An instance of this general case is that a Reader returning
-// a non-zero number of bytes at the end of the input stream may
-// return either err == EOF or err == nil. The next Read should
-// return 0, EOF.
-//
-// Callers should always process the n > 0 bytes returned before
-// considering the error err. Doing so correctly handles I/O errors
-// that happen after reading some bytes and also both of the
-// allowed EOF behaviors.
-//
-// Implementations of Read are discouraged from returning a
-// zero byte count with a nil error, except when len(p) == 0.
-// Callers should treat a return of 0 and nil as indicating that
-// nothing happened; in particular it does not indicate EOF.
-//
-// Implementations must not retain p.
-type Reader interface {
- Read(p []byte) (n int, err error)
-}
-
-// Writer is the interface that wraps the basic Write method.
-//
-// Write writes len(p) bytes from p to the underlying data stream.
-// It returns the number of bytes written from p (0 <= n <= len(p))
-// and any error encountered that caused the write to stop early.
-// Write must return a non-nil error if it returns n < len(p).
-// Write must not modify the slice data, even temporarily.
-//
-// Implementations must not retain p.
-type Writer interface {
- Write(p []byte) (n int, err error)
-}
-
-// Closer is the interface that wraps the basic Close method.
-//
-// The behavior of Close after the first call is undefined.
-// Specific implementations may document their own behavior.
-type Closer interface {
- Close() error
-}
-
-// Seeker is the interface that wraps the basic Seek method.
-//
-// Seek sets the offset for the next Read or Write to offset,
-// interpreted according to whence:
-// SeekStart means relative to the start of the file,
-// SeekCurrent means relative to the current offset, and
-// SeekEnd means relative to the end.
-// Seek returns the new offset relative to the start of the
-// file or an error, if any.
-//
-// Seeking to an offset before the start of the file is an error.
-// Seeking to any positive offset may be allowed, but if the new offset exceeds
-// the size of the underlying object the behavior of subsequent I/O operations
-// is implementation-dependent.
-type Seeker interface {
- Seek(offset int64, whence int) (int64, error)
-}
-
-// ReadWriter is the interface that groups the basic Read and Write methods.
-type ReadWriter interface {
- Reader
- Writer
-}
-
-// ReadCloser is the interface that groups the basic Read and Close methods.
-type ReadCloser interface {
- Reader
- Closer
-}
-
-// WriteCloser is the interface that groups the basic Write and Close methods.
-type WriteCloser interface {
- Writer
- Closer
-}
-
-// ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.
-type ReadWriteCloser interface {
- Reader
- Writer
- Closer
-}
-
-// ReadSeeker is the interface that groups the basic Read and Seek methods.
-type ReadSeeker interface {
- Reader
- Seeker
-}
-
-// ReadSeekCloser is the interface that groups the basic Read, Seek and Close
-// methods.
-type ReadSeekCloser interface {
- Reader
- Seeker
- Closer
-}
-
-// WriteSeeker is the interface that groups the basic Write and Seek methods.
-type WriteSeeker interface {
- Writer
- Seeker
-}
-
-// ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.
-type ReadWriteSeeker interface {
- Reader
- Writer
- Seeker
-}
-
-// ReaderFrom is the interface that wraps the ReadFrom method.
-//
-// ReadFrom reads data from r until EOF or error.
-// The return value n is the number of bytes read.
-// Any error except EOF encountered during the read is also returned.
-//
-// The Copy function uses ReaderFrom if available.
-type ReaderFrom interface {
- ReadFrom(r Reader) (n int64, err error)
-}
-
-// WriterTo is the interface that wraps the WriteTo method.
-//
-// WriteTo writes data to w until there's no more data to write or
-// when an error occurs. The return value n is the number of bytes
-// written. Any error encountered during the write is also returned.
-//
-// The Copy function uses WriterTo if available.
-type WriterTo interface {
- WriteTo(w Writer) (n int64, err error)
-}
-
-// ReaderAt is the interface that wraps the basic ReadAt method.
-//
-// ReadAt reads len(p) bytes into p starting at offset off in the
-// underlying input source. It returns the number of bytes
-// read (0 <= n <= len(p)) and any error encountered.
-//
-// When ReadAt returns n < len(p), it returns a non-nil error
-// explaining why more bytes were not returned. In this respect,
-// ReadAt is stricter than Read.
-//
-// Even if ReadAt returns n < len(p), it may use all of p as scratch
-// space during the call. If some data is available but not len(p) bytes,
-// ReadAt blocks until either all the data is available or an error occurs.
-// In this respect ReadAt is different from Read.
-//
-// If the n = len(p) bytes returned by ReadAt are at the end of the
-// input source, ReadAt may return either err == EOF or err == nil.
-//
-// If ReadAt is reading from an input source with a seek offset,
-// ReadAt should not affect nor be affected by the underlying
-// seek offset.
-//
-// Clients of ReadAt can execute parallel ReadAt calls on the
-// same input source.
-//
-// Implementations must not retain p.
-type ReaderAt interface {
- ReadAt(p []byte, off int64) (n int, err error)
-}
-
-// WriterAt is the interface that wraps the basic WriteAt method.
-//
-// WriteAt writes len(p) bytes from p to the underlying data stream
-// at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
-// and any error encountered that caused the write to stop early.
-// WriteAt must return a non-nil error if it returns n < len(p).
-//
-// If WriteAt is writing to a destination with a seek offset,
-// WriteAt should not affect nor be affected by the underlying
-// seek offset.
-//
-// Clients of WriteAt can execute parallel WriteAt calls on the same
-// destination if the ranges do not overlap.
-//
-// Implementations must not retain p.
-type WriterAt interface {
- WriteAt(p []byte, off int64) (n int, err error)
-}
-
-// ByteReader is the interface that wraps the ReadByte method.
-//
-// ReadByte reads and returns the next byte from the input or
-// any error encountered. If ReadByte returns an error, no input
-// byte was consumed, and the returned byte value is undefined.
-//
-// ReadByte provides an efficient interface for byte-at-time
-// processing. A Reader that does not implement ByteReader
-// can be wrapped using bufio.NewReader to add this method.
-type ByteReader interface {
- ReadByte() (byte, error)
-}
-
-// ByteScanner is the interface that adds the UnreadByte method to the
-// basic ReadByte method.
-//
-// UnreadByte causes the next call to ReadByte to return the last byte read.
-// If the last operation was not a successful call to ReadByte, UnreadByte may
-// return an error, unread the last byte read (or the byte prior to the
-// last-unread byte), or (in implementations that support the Seeker interface)
-// seek to one byte before the current offset.
-type ByteScanner interface {
- ByteReader
- UnreadByte() error
-}
-
-// ByteWriter is the interface that wraps the WriteByte method.
-type ByteWriter interface {
- WriteByte(c byte) error
-}
-
-// RuneReader is the interface that wraps the ReadRune method.
-//
-// ReadRune reads a single encoded Unicode character
-// and returns the rune and its size in bytes. If no character is
-// available, err will be set.
-type RuneReader interface {
- ReadRune() (r rune, size int, err error)
-}
-
-// RuneScanner is the interface that adds the UnreadRune method to the
-// basic ReadRune method.
-//
-// UnreadRune causes the next call to ReadRune to return the last rune read.
-// If the last operation was not a successful call to ReadRune, UnreadRune may
-// return an error, unread the last rune read (or the rune prior to the
-// last-unread rune), or (in implementations that support the Seeker interface)
-// seek to the start of the rune before the current offset.
-type RuneScanner interface {
- RuneReader
- UnreadRune() error
-}
-
-// StringWriter is the interface that wraps the WriteString method.
-type StringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-// WriteString writes the contents of the string s to w, which accepts a slice of bytes.
-// If w implements StringWriter, its WriteString method is invoked directly.
-// Otherwise, w.Write is called exactly once.
-func WriteString(w Writer, s string) (n int, err error) {
- if sw, ok := w.(StringWriter); ok {
- return sw.WriteString(s)
- }
- return w.Write([]byte(s))
-}
-
-// ReadAtLeast reads from r into buf until it has read at least min bytes.
-// It returns the number of bytes copied and an error if fewer bytes were read.
-// The error is EOF only if no bytes were read.
-// If an EOF happens after reading fewer than min bytes,
-// ReadAtLeast returns ErrUnexpectedEOF.
-// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer.
-// On return, n >= min if and only if err == nil.
-// If r returns an error having read at least min bytes, the error is dropped.
-func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error) {
- if len(buf) < min {
- return 0, ErrShortBuffer
- }
- for n < min && err == nil {
- var nn int
- nn, err = r.Read(buf[n:])
- n += nn
- }
- if n >= min {
- err = nil
- } else if n > 0 && err == EOF {
- err = ErrUnexpectedEOF
- }
- return
-}
-
-// ReadFull reads exactly len(buf) bytes from r into buf.
-// It returns the number of bytes copied and an error if fewer bytes were read.
-// The error is EOF only if no bytes were read.
-// If an EOF happens after reading some but not all the bytes,
-// ReadFull returns ErrUnexpectedEOF.
-// On return, n == len(buf) if and only if err == nil.
-// If r returns an error having read at least len(buf) bytes, the error is dropped.
-func ReadFull(r Reader, buf []byte) (n int, err error) {
- return ReadAtLeast(r, buf, len(buf))
-}
-
-// CopyN copies n bytes (or until an error) from src to dst.
-// It returns the number of bytes copied and the earliest
-// error encountered while copying.
-// On return, written == n if and only if err == nil.
-//
-// If dst implements the ReaderFrom interface,
-// the copy is implemented using it.
-func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
- written, err = Copy(dst, LimitReader(src, n))
- if written == n {
- return n, nil
- }
- if written < n && err == nil {
- // src stopped early; must have been EOF.
- err = EOF
- }
- return
-}
-
-// Copy copies from src to dst until either EOF is reached
-// on src or an error occurs. It returns the number of bytes
-// copied and the first error encountered while copying, if any.
-//
-// A successful Copy returns err == nil, not err == EOF.
-// Because Copy is defined to read from src until EOF, it does
-// not treat an EOF from Read as an error to be reported.
-//
-// If src implements the WriterTo interface,
-// the copy is implemented by calling src.WriteTo(dst).
-// Otherwise, if dst implements the ReaderFrom interface,
-// the copy is implemented by calling dst.ReadFrom(src).
-func Copy(dst Writer, src Reader) (written int64, err error) {
- return copyBuffer(dst, src, nil)
-}
-
-// CopyBuffer is identical to Copy except that it stages through the
-// provided buffer (if one is required) rather than allocating a
-// temporary one. If buf is nil, one is allocated; otherwise if it has
-// zero length, CopyBuffer panics.
-//
-// If either src implements WriterTo or dst implements ReaderFrom,
-// buf will not be used to perform the copy.
-func CopyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
- if buf != nil && len(buf) == 0 {
- panic("empty buffer in CopyBuffer")
- }
- return copyBuffer(dst, src, buf)
-}
-
-// copyBuffer is the actual implementation of Copy and CopyBuffer.
-// if buf is nil, one is allocated.
-func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
- // If the reader has a WriteTo method, use it to do the copy.
- // Avoids an allocation and a copy.
- if wt, ok := src.(WriterTo); ok {
- return wt.WriteTo(dst)
- }
- // Similarly, if the writer has a ReadFrom method, use it to do the copy.
- if rt, ok := dst.(ReaderFrom); ok {
- return rt.ReadFrom(src)
- }
- if buf == nil {
- size := 32 * 1024
- if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
- if l.N < 1 {
- size = 1
- } else {
- size = int(l.N)
- }
- }
- buf = make([]byte, size)
- }
- for {
- nr, er := src.Read(buf)
- if nr > 0 {
- nw, ew := dst.Write(buf[0:nr])
- if nw < 0 || nr < nw {
- nw = 0
- if ew == nil {
- ew = errInvalidWrite
- }
- }
- written += int64(nw)
- if ew != nil {
- err = ew
- break
- }
- if nr != nw {
- err = ErrShortWrite
- break
- }
- }
- if er != nil {
- if er != EOF {
- err = er
- }
- break
- }
- }
- return written, err
-}
-
-// LimitReader returns a Reader that reads from r
-// but stops with EOF after n bytes.
-// The underlying implementation is a *LimitedReader.
-func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
-
-// A LimitedReader reads from R but limits the amount of
-// data returned to just N bytes. Each call to Read
-// updates N to reflect the new amount remaining.
-// Read returns EOF when N <= 0 or when the underlying R returns EOF.
-type LimitedReader struct {
- R Reader // underlying reader
- N int64 // max bytes remaining
-}
-
-func (l *LimitedReader) Read(p []byte) (n int, err error) {
- if l.N <= 0 {
- return 0, EOF
- }
- if int64(len(p)) > l.N {
- p = p[0:l.N]
- }
- n, err = l.R.Read(p)
- l.N -= int64(n)
- return
-}
-
-// NewSectionReader returns a SectionReader that reads from r
-// starting at offset off and stops with EOF after n bytes.
-func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
- var remaining int64
- const maxint64 = 1<<63 - 1
- if off <= maxint64-n {
- remaining = n + off
- } else {
- // Overflow, with no way to return error.
- // Assume we can read up to an offset of 1<<63 - 1.
- remaining = maxint64
- }
- return &SectionReader{r, off, off, remaining}
-}
-
-// SectionReader implements Read, Seek, and ReadAt on a section
-// of an underlying ReaderAt.
-type SectionReader struct {
- r ReaderAt
- base int64
- off int64
- limit int64
-}
-
-func (s *SectionReader) Read(p []byte) (n int, err error) {
- if s.off >= s.limit {
- return 0, EOF
- }
- if max := s.limit - s.off; int64(len(p)) > max {
- p = p[0:max]
- }
- n, err = s.r.ReadAt(p, s.off)
- s.off += int64(n)
- return
-}
-
-var errWhence = errors.New("Seek: invalid whence")
-var errOffset = errors.New("Seek: invalid offset")
-
-func (s *SectionReader) Seek(offset int64, whence int) (int64, error) {
- switch whence {
- default:
- return 0, errWhence
- case SeekStart:
- offset += s.base
- case SeekCurrent:
- offset += s.off
- case SeekEnd:
- offset += s.limit
- }
- if offset < s.base {
- return 0, errOffset
- }
- s.off = offset
- return offset - s.base, nil
-}
-
-func (s *SectionReader) ReadAt(p []byte, off int64) (n int, err error) {
- if off < 0 || off >= s.limit-s.base {
- return 0, EOF
- }
- off += s.base
- if max := s.limit - off; int64(len(p)) > max {
- p = p[0:max]
- n, err = s.r.ReadAt(p, off)
- if err == nil {
- err = EOF
- }
- return n, err
- }
- return s.r.ReadAt(p, off)
-}
-
-// Size returns the size of the section in bytes.
-func (s *SectionReader) Size() int64 { return s.limit - s.base }
-
-// TeeReader returns a Reader that writes to w what it reads from r.
-// All reads from r performed through it are matched with
-// corresponding writes to w. There is no internal buffering -
-// the write must complete before the read completes.
-// Any error encountered while writing is reported as a read error.
-func TeeReader(r Reader, w Writer) Reader {
- return &teeReader{r, w}
-}
-
-type teeReader struct {
- r Reader
- w Writer
-}
-
-func (t *teeReader) Read(p []byte) (n int, err error) {
- n, err = t.r.Read(p)
- if n > 0 {
- if n, err := t.w.Write(p[:n]); err != nil {
- return n, err
- }
- }
- return
-}
-
-// Discard is a Writer on which all Write calls succeed
-// without doing anything.
-var Discard Writer = discard{}
-
-type discard struct{}
-
-// discard implements ReaderFrom as an optimization so Copy to
-// io.Discard can avoid doing unnecessary work.
-var _ ReaderFrom = discard{}
-
-func (discard) Write(p []byte) (int, error) {
- return len(p), nil
-}
-
-func (discard) WriteString(s string) (int, error) {
- return len(s), nil
-}
-
-var blackHolePool = sync.Pool{
- New: func() any {
- b := make([]byte, 8192)
- return &b
- },
-}
-
-func (discard) ReadFrom(r Reader) (n int64, err error) {
- bufp := blackHolePool.Get().(*[]byte)
- readSize := 0
- for {
- readSize, err = r.Read(*bufp)
- n += int64(readSize)
- if err != nil {
- blackHolePool.Put(bufp)
- if err == EOF {
- return n, nil
- }
- return
- }
- }
-}
-
-// NopCloser returns a ReadCloser with a no-op Close method wrapping
-// the provided Reader r.
-func NopCloser(r Reader) ReadCloser {
- return nopCloser{r}
-}
-
-type nopCloser struct {
- Reader
-}
-
-func (nopCloser) Close() error { return nil }
-
-// ReadAll reads from r until an error or EOF and returns the data it read.
-// A successful call returns err == nil, not err == EOF. Because ReadAll is
-// defined to read from src until EOF, it does not treat an EOF from Read
-// as an error to be reported.
-func ReadAll(r Reader) ([]byte, error) {
- b := make([]byte, 0, 512)
- for {
- if len(b) == cap(b) {
- // Add more capacity (let append pick how much).
- b = append(b, 0)[:len(b)]
- }
- n, err := r.Read(b[len(b):cap(b)])
- b = b[:len(b)+n]
- if err != nil {
- if err == EOF {
- err = nil
- }
- return b, err
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/io/ioutil/ioutil.go b/contrib/go/_std_1.18/src/io/ioutil/ioutil.go
deleted file mode 100644
index 45682b89c9..0000000000
--- a/contrib/go/_std_1.18/src/io/ioutil/ioutil.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ioutil implements some I/O utility functions.
-//
-// As of Go 1.16, the same functionality is now provided
-// by package io or package os, and those implementations
-// should be preferred in new code.
-// See the specific function documentation for details.
-package ioutil
-
-import (
- "io"
- "io/fs"
- "os"
- "sort"
-)
-
-// ReadAll reads from r until an error or EOF and returns the data it read.
-// A successful call returns err == nil, not err == EOF. Because ReadAll is
-// defined to read from src until EOF, it does not treat an EOF from Read
-// as an error to be reported.
-//
-// As of Go 1.16, this function simply calls io.ReadAll.
-func ReadAll(r io.Reader) ([]byte, error) {
- return io.ReadAll(r)
-}
-
-// ReadFile reads the file named by filename and returns the contents.
-// A successful call returns err == nil, not err == EOF. Because ReadFile
-// reads the whole file, it does not treat an EOF from Read as an error
-// to be reported.
-//
-// As of Go 1.16, this function simply calls os.ReadFile.
-func ReadFile(filename string) ([]byte, error) {
- return os.ReadFile(filename)
-}
-
-// WriteFile writes data to a file named by filename.
-// If the file does not exist, WriteFile creates it with permissions perm
-// (before umask); otherwise WriteFile truncates it before writing, without changing permissions.
-//
-// As of Go 1.16, this function simply calls os.WriteFile.
-func WriteFile(filename string, data []byte, perm fs.FileMode) error {
- return os.WriteFile(filename, data, perm)
-}
-
-// ReadDir reads the directory named by dirname and returns
-// a list of fs.FileInfo for the directory's contents,
-// sorted by filename. If an error occurs reading the directory,
-// ReadDir returns no directory entries along with the error.
-//
-// As of Go 1.16, os.ReadDir is a more efficient and correct choice:
-// it returns a list of fs.DirEntry instead of fs.FileInfo,
-// and it returns partial results in the case of an error
-// midway through reading a directory.
-func ReadDir(dirname string) ([]fs.FileInfo, error) {
- f, err := os.Open(dirname)
- if err != nil {
- return nil, err
- }
- list, err := f.Readdir(-1)
- f.Close()
- if err != nil {
- return nil, err
- }
- sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
- return list, nil
-}
-
-// NopCloser returns a ReadCloser with a no-op Close method wrapping
-// the provided Reader r.
-//
-// As of Go 1.16, this function simply calls io.NopCloser.
-func NopCloser(r io.Reader) io.ReadCloser {
- return io.NopCloser(r)
-}
-
-// Discard is an io.Writer on which all Write calls succeed
-// without doing anything.
-//
-// As of Go 1.16, this value is simply io.Discard.
-var Discard io.Writer = io.Discard
diff --git a/contrib/go/_std_1.18/src/io/ioutil/tempfile.go b/contrib/go/_std_1.18/src/io/ioutil/tempfile.go
deleted file mode 100644
index c43db2c080..0000000000
--- a/contrib/go/_std_1.18/src/io/ioutil/tempfile.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ioutil
-
-import (
- "os"
-)
-
-// TempFile creates a new temporary file in the directory dir,
-// opens the file for reading and writing, and returns the resulting *os.File.
-// The filename is generated by taking pattern and adding a random
-// string to the end. If pattern includes a "*", the random string
-// replaces the last "*".
-// If dir is the empty string, TempFile uses the default directory
-// for temporary files (see os.TempDir).
-// Multiple programs calling TempFile simultaneously
-// will not choose the same file. The caller can use f.Name()
-// to find the pathname of the file. It is the caller's responsibility
-// to remove the file when no longer needed.
-//
-// As of Go 1.17, this function simply calls os.CreateTemp.
-func TempFile(dir, pattern string) (f *os.File, err error) {
- return os.CreateTemp(dir, pattern)
-}
-
-// TempDir creates a new temporary directory in the directory dir.
-// The directory name is generated by taking pattern and applying a
-// random string to the end. If pattern includes a "*", the random string
-// replaces the last "*". TempDir returns the name of the new directory.
-// If dir is the empty string, TempDir uses the
-// default directory for temporary files (see os.TempDir).
-// Multiple programs calling TempDir simultaneously
-// will not choose the same directory. It is the caller's responsibility
-// to remove the directory when no longer needed.
-//
-// As of Go 1.17, this function simply calls os.MkdirTemp.
-func TempDir(dir, pattern string) (name string, err error) {
- return os.MkdirTemp(dir, pattern)
-}
diff --git a/contrib/go/_std_1.18/src/io/multi.go b/contrib/go/_std_1.18/src/io/multi.go
deleted file mode 100644
index 24ee71e4ca..0000000000
--- a/contrib/go/_std_1.18/src/io/multi.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package io
-
-type eofReader struct{}
-
-func (eofReader) Read([]byte) (int, error) {
- return 0, EOF
-}
-
-type multiReader struct {
- readers []Reader
-}
-
-func (mr *multiReader) Read(p []byte) (n int, err error) {
- for len(mr.readers) > 0 {
- // Optimization to flatten nested multiReaders (Issue 13558).
- if len(mr.readers) == 1 {
- if r, ok := mr.readers[0].(*multiReader); ok {
- mr.readers = r.readers
- continue
- }
- }
- n, err = mr.readers[0].Read(p)
- if err == EOF {
- // Use eofReader instead of nil to avoid nil panic
- // after performing flatten (Issue 18232).
- mr.readers[0] = eofReader{} // permit earlier GC
- mr.readers = mr.readers[1:]
- }
- if n > 0 || err != EOF {
- if err == EOF && len(mr.readers) > 0 {
- // Don't return EOF yet. More readers remain.
- err = nil
- }
- return
- }
- }
- return 0, EOF
-}
-
-// MultiReader returns a Reader that's the logical concatenation of
-// the provided input readers. They're read sequentially. Once all
-// inputs have returned EOF, Read will return EOF. If any of the readers
-// return a non-nil, non-EOF error, Read will return that error.
-func MultiReader(readers ...Reader) Reader {
- r := make([]Reader, len(readers))
- copy(r, readers)
- return &multiReader{r}
-}
-
-type multiWriter struct {
- writers []Writer
-}
-
-func (t *multiWriter) Write(p []byte) (n int, err error) {
- for _, w := range t.writers {
- n, err = w.Write(p)
- if err != nil {
- return
- }
- if n != len(p) {
- err = ErrShortWrite
- return
- }
- }
- return len(p), nil
-}
-
-var _ StringWriter = (*multiWriter)(nil)
-
-func (t *multiWriter) WriteString(s string) (n int, err error) {
- var p []byte // lazily initialized if/when needed
- for _, w := range t.writers {
- if sw, ok := w.(StringWriter); ok {
- n, err = sw.WriteString(s)
- } else {
- if p == nil {
- p = []byte(s)
- }
- n, err = w.Write(p)
- }
- if err != nil {
- return
- }
- if n != len(s) {
- err = ErrShortWrite
- return
- }
- }
- return len(s), nil
-}
-
-// MultiWriter creates a writer that duplicates its writes to all the
-// provided writers, similar to the Unix tee(1) command.
-//
-// Each write is written to each listed writer, one at a time.
-// If a listed writer returns an error, that overall write operation
-// stops and returns the error; it does not continue down the list.
-func MultiWriter(writers ...Writer) Writer {
- allWriters := make([]Writer, 0, len(writers))
- for _, w := range writers {
- if mw, ok := w.(*multiWriter); ok {
- allWriters = append(allWriters, mw.writers...)
- } else {
- allWriters = append(allWriters, w)
- }
- }
- return &multiWriter{allWriters}
-}
diff --git a/contrib/go/_std_1.18/src/log/log.go b/contrib/go/_std_1.18/src/log/log.go
deleted file mode 100644
index 5e79b19522..0000000000
--- a/contrib/go/_std_1.18/src/log/log.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package log implements a simple logging package. It defines a type, Logger,
-// with methods for formatting output. It also has a predefined 'standard'
-// Logger accessible through helper functions Print[f|ln], Fatal[f|ln], and
-// Panic[f|ln], which are easier to use than creating a Logger manually.
-// That logger writes to standard error and prints the date and time
-// of each logged message.
-// Every log message is output on a separate line: if the message being
-// printed does not end in a newline, the logger will add one.
-// The Fatal functions call os.Exit(1) after writing the log message.
-// The Panic functions call panic after writing the log message.
-package log
-
-import (
- "fmt"
- "io"
- "os"
- "runtime"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// These flags define which text to prefix to each log entry generated by the Logger.
-// Bits are or'ed together to control what's printed.
-// With the exception of the Lmsgprefix flag, there is no
-// control over the order they appear (the order listed here)
-// or the format they present (as described in the comments).
-// The prefix is followed by a colon only when Llongfile or Lshortfile
-// is specified.
-// For example, flags Ldate | Ltime (or LstdFlags) produce,
-// 2009/01/23 01:23:23 message
-// while flags Ldate | Ltime | Lmicroseconds | Llongfile produce,
-// 2009/01/23 01:23:23.123123 /a/b/c/d.go:23: message
-const (
- Ldate = 1 << iota // the date in the local time zone: 2009/01/23
- Ltime // the time in the local time zone: 01:23:23
- Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
- Llongfile // full file name and line number: /a/b/c/d.go:23
- Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
- LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
- Lmsgprefix // move the "prefix" from the beginning of the line to before the message
- LstdFlags = Ldate | Ltime // initial values for the standard logger
-)
-
-// A Logger represents an active logging object that generates lines of
-// output to an io.Writer. Each logging operation makes a single call to
-// the Writer's Write method. A Logger can be used simultaneously from
-// multiple goroutines; it guarantees to serialize access to the Writer.
-type Logger struct {
- mu sync.Mutex // ensures atomic writes; protects the following fields
- prefix string // prefix on each line to identify the logger (but see Lmsgprefix)
- flag int // properties
- out io.Writer // destination for output
- buf []byte // for accumulating text to write
- isDiscard int32 // atomic boolean: whether out == io.Discard
-}
-
-// New creates a new Logger. The out variable sets the
-// destination to which log data will be written.
-// The prefix appears at the beginning of each generated log line, or
-// after the log header if the Lmsgprefix flag is provided.
-// The flag argument defines the logging properties.
-func New(out io.Writer, prefix string, flag int) *Logger {
- l := &Logger{out: out, prefix: prefix, flag: flag}
- if out == io.Discard {
- l.isDiscard = 1
- }
- return l
-}
-
-// SetOutput sets the output destination for the logger.
-func (l *Logger) SetOutput(w io.Writer) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.out = w
- isDiscard := int32(0)
- if w == io.Discard {
- isDiscard = 1
- }
- atomic.StoreInt32(&l.isDiscard, isDiscard)
-}
-
-var std = New(os.Stderr, "", LstdFlags)
-
-// Default returns the standard logger used by the package-level output functions.
-func Default() *Logger { return std }
-
-// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
-func itoa(buf *[]byte, i int, wid int) {
- // Assemble decimal in reverse order.
- var b [20]byte
- bp := len(b) - 1
- for i >= 10 || wid > 1 {
- wid--
- q := i / 10
- b[bp] = byte('0' + i - q*10)
- bp--
- i = q
- }
- // i < 10
- b[bp] = byte('0' + i)
- *buf = append(*buf, b[bp:]...)
-}
-
-// formatHeader writes log header to buf in following order:
-// * l.prefix (if it's not blank and Lmsgprefix is unset),
-// * date and/or time (if corresponding flags are provided),
-// * file and line number (if corresponding flags are provided),
-// * l.prefix (if it's not blank and Lmsgprefix is set).
-func (l *Logger) formatHeader(buf *[]byte, t time.Time, file string, line int) {
- if l.flag&Lmsgprefix == 0 {
- *buf = append(*buf, l.prefix...)
- }
- if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
- if l.flag&LUTC != 0 {
- t = t.UTC()
- }
- if l.flag&Ldate != 0 {
- year, month, day := t.Date()
- itoa(buf, year, 4)
- *buf = append(*buf, '/')
- itoa(buf, int(month), 2)
- *buf = append(*buf, '/')
- itoa(buf, day, 2)
- *buf = append(*buf, ' ')
- }
- if l.flag&(Ltime|Lmicroseconds) != 0 {
- hour, min, sec := t.Clock()
- itoa(buf, hour, 2)
- *buf = append(*buf, ':')
- itoa(buf, min, 2)
- *buf = append(*buf, ':')
- itoa(buf, sec, 2)
- if l.flag&Lmicroseconds != 0 {
- *buf = append(*buf, '.')
- itoa(buf, t.Nanosecond()/1e3, 6)
- }
- *buf = append(*buf, ' ')
- }
- }
- if l.flag&(Lshortfile|Llongfile) != 0 {
- if l.flag&Lshortfile != 0 {
- short := file
- for i := len(file) - 1; i > 0; i-- {
- if file[i] == '/' {
- short = file[i+1:]
- break
- }
- }
- file = short
- }
- *buf = append(*buf, file...)
- *buf = append(*buf, ':')
- itoa(buf, line, -1)
- *buf = append(*buf, ": "...)
- }
- if l.flag&Lmsgprefix != 0 {
- *buf = append(*buf, l.prefix...)
- }
-}
-
-// Output writes the output for a logging event. The string s contains
-// the text to print after the prefix specified by the flags of the
-// Logger. A newline is appended if the last character of s is not
-// already a newline. Calldepth is used to recover the PC and is
-// provided for generality, although at the moment on all pre-defined
-// paths it will be 2.
-func (l *Logger) Output(calldepth int, s string) error {
- now := time.Now() // get this early.
- var file string
- var line int
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.flag&(Lshortfile|Llongfile) != 0 {
- // Release lock while getting caller info - it's expensive.
- l.mu.Unlock()
- var ok bool
- _, file, line, ok = runtime.Caller(calldepth)
- if !ok {
- file = "???"
- line = 0
- }
- l.mu.Lock()
- }
- l.buf = l.buf[:0]
- l.formatHeader(&l.buf, now, file, line)
- l.buf = append(l.buf, s...)
- if len(s) == 0 || s[len(s)-1] != '\n' {
- l.buf = append(l.buf, '\n')
- }
- _, err := l.out.Write(l.buf)
- return err
-}
-
-// Printf calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Printf.
-func (l *Logger) Printf(format string, v ...any) {
- if atomic.LoadInt32(&l.isDiscard) != 0 {
- return
- }
- l.Output(2, fmt.Sprintf(format, v...))
-}
-
-// Print calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Print.
-func (l *Logger) Print(v ...any) {
- if atomic.LoadInt32(&l.isDiscard) != 0 {
- return
- }
- l.Output(2, fmt.Sprint(v...))
-}
-
-// Println calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Println.
-func (l *Logger) Println(v ...any) {
- if atomic.LoadInt32(&l.isDiscard) != 0 {
- return
- }
- l.Output(2, fmt.Sprintln(v...))
-}
-
-// Fatal is equivalent to l.Print() followed by a call to os.Exit(1).
-func (l *Logger) Fatal(v ...any) {
- l.Output(2, fmt.Sprint(v...))
- os.Exit(1)
-}
-
-// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
-func (l *Logger) Fatalf(format string, v ...any) {
- l.Output(2, fmt.Sprintf(format, v...))
- os.Exit(1)
-}
-
-// Fatalln is equivalent to l.Println() followed by a call to os.Exit(1).
-func (l *Logger) Fatalln(v ...any) {
- l.Output(2, fmt.Sprintln(v...))
- os.Exit(1)
-}
-
-// Panic is equivalent to l.Print() followed by a call to panic().
-func (l *Logger) Panic(v ...any) {
- s := fmt.Sprint(v...)
- l.Output(2, s)
- panic(s)
-}
-
-// Panicf is equivalent to l.Printf() followed by a call to panic().
-func (l *Logger) Panicf(format string, v ...any) {
- s := fmt.Sprintf(format, v...)
- l.Output(2, s)
- panic(s)
-}
-
-// Panicln is equivalent to l.Println() followed by a call to panic().
-func (l *Logger) Panicln(v ...any) {
- s := fmt.Sprintln(v...)
- l.Output(2, s)
- panic(s)
-}
-
-// Flags returns the output flags for the logger.
-// The flag bits are Ldate, Ltime, and so on.
-func (l *Logger) Flags() int {
- l.mu.Lock()
- defer l.mu.Unlock()
- return l.flag
-}
-
-// SetFlags sets the output flags for the logger.
-// The flag bits are Ldate, Ltime, and so on.
-func (l *Logger) SetFlags(flag int) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.flag = flag
-}
-
-// Prefix returns the output prefix for the logger.
-func (l *Logger) Prefix() string {
- l.mu.Lock()
- defer l.mu.Unlock()
- return l.prefix
-}
-
-// SetPrefix sets the output prefix for the logger.
-func (l *Logger) SetPrefix(prefix string) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.prefix = prefix
-}
-
-// Writer returns the output destination for the logger.
-func (l *Logger) Writer() io.Writer {
- l.mu.Lock()
- defer l.mu.Unlock()
- return l.out
-}
-
-// SetOutput sets the output destination for the standard logger.
-func SetOutput(w io.Writer) {
- std.SetOutput(w)
-}
-
-// Flags returns the output flags for the standard logger.
-// The flag bits are Ldate, Ltime, and so on.
-func Flags() int {
- return std.Flags()
-}
-
-// SetFlags sets the output flags for the standard logger.
-// The flag bits are Ldate, Ltime, and so on.
-func SetFlags(flag int) {
- std.SetFlags(flag)
-}
-
-// Prefix returns the output prefix for the standard logger.
-func Prefix() string {
- return std.Prefix()
-}
-
-// SetPrefix sets the output prefix for the standard logger.
-func SetPrefix(prefix string) {
- std.SetPrefix(prefix)
-}
-
-// Writer returns the output destination for the standard logger.
-func Writer() io.Writer {
- return std.Writer()
-}
-
-// These functions write to the standard logger.
-
-// Print calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Print.
-func Print(v ...any) {
- if atomic.LoadInt32(&std.isDiscard) != 0 {
- return
- }
- std.Output(2, fmt.Sprint(v...))
-}
-
-// Printf calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Printf.
-func Printf(format string, v ...any) {
- if atomic.LoadInt32(&std.isDiscard) != 0 {
- return
- }
- std.Output(2, fmt.Sprintf(format, v...))
-}
-
-// Println calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Println.
-func Println(v ...any) {
- if atomic.LoadInt32(&std.isDiscard) != 0 {
- return
- }
- std.Output(2, fmt.Sprintln(v...))
-}
-
-// Fatal is equivalent to Print() followed by a call to os.Exit(1).
-func Fatal(v ...any) {
- std.Output(2, fmt.Sprint(v...))
- os.Exit(1)
-}
-
-// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
-func Fatalf(format string, v ...any) {
- std.Output(2, fmt.Sprintf(format, v...))
- os.Exit(1)
-}
-
-// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
-func Fatalln(v ...any) {
- std.Output(2, fmt.Sprintln(v...))
- os.Exit(1)
-}
-
-// Panic is equivalent to Print() followed by a call to panic().
-func Panic(v ...any) {
- s := fmt.Sprint(v...)
- std.Output(2, s)
- panic(s)
-}
-
-// Panicf is equivalent to Printf() followed by a call to panic().
-func Panicf(format string, v ...any) {
- s := fmt.Sprintf(format, v...)
- std.Output(2, s)
- panic(s)
-}
-
-// Panicln is equivalent to Println() followed by a call to panic().
-func Panicln(v ...any) {
- s := fmt.Sprintln(v...)
- std.Output(2, s)
- panic(s)
-}
-
-// Output writes the output for a logging event. The string s contains
-// the text to print after the prefix specified by the flags of the
-// Logger. A newline is appended if the last character of s is not
-// already a newline. Calldepth is the count of the number of
-// frames to skip when computing the file name and line number
-// if Llongfile or Lshortfile is set; a value of 1 will print the details
-// for the caller of Output.
-func Output(calldepth int, s string) error {
- return std.Output(calldepth+1, s) // +1 for this frame.
-}
diff --git a/contrib/go/_std_1.18/src/math/abs.go b/contrib/go/_std_1.18/src/math/abs.go
deleted file mode 100644
index df83add695..0000000000
--- a/contrib/go/_std_1.18/src/math/abs.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Abs returns the absolute value of x.
-//
-// Special cases are:
-// Abs(±Inf) = +Inf
-// Abs(NaN) = NaN
-func Abs(x float64) float64 {
- return Float64frombits(Float64bits(x) &^ (1 << 63))
-}
diff --git a/contrib/go/_std_1.18/src/math/acosh.go b/contrib/go/_std_1.18/src/math/acosh.go
deleted file mode 100644
index f74e0b62fb..0000000000
--- a/contrib/go/_std_1.18/src/math/acosh.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/e_acosh.c
-// and came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-//
-// __ieee754_acosh(x)
-// Method :
-// Based on
-// acosh(x) = log [ x + sqrt(x*x-1) ]
-// we have
-// acosh(x) := log(x)+ln2, if x is large; else
-// acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
-// acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
-//
-// Special cases:
-// acosh(x) is NaN with signal if x<1.
-// acosh(NaN) is NaN without signal.
-//
-
-// Acosh returns the inverse hyperbolic cosine of x.
-//
-// Special cases are:
-// Acosh(+Inf) = +Inf
-// Acosh(x) = NaN if x < 1
-// Acosh(NaN) = NaN
-func Acosh(x float64) float64 {
- if haveArchAcosh {
- return archAcosh(x)
- }
- return acosh(x)
-}
-
-func acosh(x float64) float64 {
- const Large = 1 << 28 // 2**28
- // first case is special case
- switch {
- case x < 1 || IsNaN(x):
- return NaN()
- case x == 1:
- return 0
- case x >= Large:
- return Log(x) + Ln2 // x > 2**28
- case x > 2:
- return Log(2*x - 1/(x+Sqrt(x*x-1))) // 2**28 > x > 2
- }
- t := x - 1
- return Log1p(t + Sqrt(2*t+t*t)) // 2 >= x > 1
-}
diff --git a/contrib/go/_std_1.18/src/math/asin.go b/contrib/go/_std_1.18/src/math/asin.go
deleted file mode 100644
index 989a74155b..0000000000
--- a/contrib/go/_std_1.18/src/math/asin.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point arcsine and arccosine.
-
- They are implemented by computing the arctangent
- after appropriate range reduction.
-*/
-
-// Asin returns the arcsine, in radians, of x.
-//
-// Special cases are:
-// Asin(±0) = ±0
-// Asin(x) = NaN if x < -1 or x > 1
-func Asin(x float64) float64 {
- if haveArchAsin {
- return archAsin(x)
- }
- return asin(x)
-}
-
-func asin(x float64) float64 {
- if x == 0 {
- return x // special case
- }
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- if x > 1 {
- return NaN() // special case
- }
-
- temp := Sqrt(1 - x*x)
- if x > 0.7 {
- temp = Pi/2 - satan(temp/x)
- } else {
- temp = satan(x / temp)
- }
-
- if sign {
- temp = -temp
- }
- return temp
-}
-
-// Acos returns the arccosine, in radians, of x.
-//
-// Special case is:
-// Acos(x) = NaN if x < -1 or x > 1
-func Acos(x float64) float64 {
- if haveArchAcos {
- return archAcos(x)
- }
- return acos(x)
-}
-
-func acos(x float64) float64 {
- return Pi/2 - Asin(x)
-}
diff --git a/contrib/go/_std_1.18/src/math/asinh.go b/contrib/go/_std_1.18/src/math/asinh.go
deleted file mode 100644
index 6dcb241c1f..0000000000
--- a/contrib/go/_std_1.18/src/math/asinh.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/s_asinh.c
-// and came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-//
-// asinh(x)
-// Method :
-// Based on
-// asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
-// we have
-// asinh(x) := x if 1+x*x=1,
-// := sign(x)*(log(x)+ln2)) for large |x|, else
-// := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
-// := sign(x)*log1p(|x| + x**2/(1 + sqrt(1+x**2)))
-//
-
-// Asinh returns the inverse hyperbolic sine of x.
-//
-// Special cases are:
-// Asinh(±0) = ±0
-// Asinh(±Inf) = ±Inf
-// Asinh(NaN) = NaN
-func Asinh(x float64) float64 {
- if haveArchAsinh {
- return archAsinh(x)
- }
- return asinh(x)
-}
-
-func asinh(x float64) float64 {
- const (
- Ln2 = 6.93147180559945286227e-01 // 0x3FE62E42FEFA39EF
- NearZero = 1.0 / (1 << 28) // 2**-28
- Large = 1 << 28 // 2**28
- )
- // special cases
- if IsNaN(x) || IsInf(x, 0) {
- return x
- }
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- var temp float64
- switch {
- case x > Large:
- temp = Log(x) + Ln2 // |x| > 2**28
- case x > 2:
- temp = Log(2*x + 1/(Sqrt(x*x+1)+x)) // 2**28 > |x| > 2.0
- case x < NearZero:
- temp = x // |x| < 2**-28
- default:
- temp = Log1p(x + x*x/(1+Sqrt(1+x*x))) // 2.0 > |x| > 2**-28
- }
- if sign {
- temp = -temp
- }
- return temp
-}
diff --git a/contrib/go/_std_1.18/src/math/atan.go b/contrib/go/_std_1.18/src/math/atan.go
deleted file mode 100644
index 69af860161..0000000000
--- a/contrib/go/_std_1.18/src/math/atan.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point arctangent.
-*/
-
-// The original C code, the long comment, and the constants below were
-// from http://netlib.sandia.gov/cephes/cmath/atan.c, available from
-// http://www.netlib.org/cephes/cmath.tgz.
-// The go code is a version of the original C.
-//
-// atan.c
-// Inverse circular tangent (arctangent)
-//
-// SYNOPSIS:
-// double x, y, atan();
-// y = atan( x );
-//
-// DESCRIPTION:
-// Returns radian angle between -pi/2 and +pi/2 whose tangent is x.
-//
-// Range reduction is from three intervals into the interval from zero to 0.66.
-// The approximant uses a rational function of degree 4/5 of the form
-// x + x**3 P(x)/Q(x).
-//
-// ACCURACY:
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -10, 10 50000 2.4e-17 8.3e-18
-// IEEE -10, 10 10^6 1.8e-16 5.0e-17
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// xatan evaluates a series valid in the range [0, 0.66].
-func xatan(x float64) float64 {
- const (
- P0 = -8.750608600031904122785e-01
- P1 = -1.615753718733365076637e+01
- P2 = -7.500855792314704667340e+01
- P3 = -1.228866684490136173410e+02
- P4 = -6.485021904942025371773e+01
- Q0 = +2.485846490142306297962e+01
- Q1 = +1.650270098316988542046e+02
- Q2 = +4.328810604912902668951e+02
- Q3 = +4.853903996359136964868e+02
- Q4 = +1.945506571482613964425e+02
- )
- z := x * x
- z = z * ((((P0*z+P1)*z+P2)*z+P3)*z + P4) / (((((z+Q0)*z+Q1)*z+Q2)*z+Q3)*z + Q4)
- z = x*z + x
- return z
-}
-
-// satan reduces its argument (known to be positive)
-// to the range [0, 0.66] and calls xatan.
-func satan(x float64) float64 {
- const (
- Morebits = 6.123233995736765886130e-17 // pi/2 = PIO2 + Morebits
- Tan3pio8 = 2.41421356237309504880 // tan(3*pi/8)
- )
- if x <= 0.66 {
- return xatan(x)
- }
- if x > Tan3pio8 {
- return Pi/2 - xatan(1/x) + Morebits
- }
- return Pi/4 + xatan((x-1)/(x+1)) + 0.5*Morebits
-}
-
-// Atan returns the arctangent, in radians, of x.
-//
-// Special cases are:
-// Atan(±0) = ±0
-// Atan(±Inf) = ±Pi/2
-func Atan(x float64) float64 {
- if haveArchAtan {
- return archAtan(x)
- }
- return atan(x)
-}
-
-func atan(x float64) float64 {
- if x == 0 {
- return x
- }
- if x > 0 {
- return satan(x)
- }
- return -satan(-x)
-}
diff --git a/contrib/go/_std_1.18/src/math/atan2.go b/contrib/go/_std_1.18/src/math/atan2.go
deleted file mode 100644
index 11d7e81acd..0000000000
--- a/contrib/go/_std_1.18/src/math/atan2.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Atan2 returns the arc tangent of y/x, using
-// the signs of the two to determine the quadrant
-// of the return value.
-//
-// Special cases are (in order):
-// Atan2(y, NaN) = NaN
-// Atan2(NaN, x) = NaN
-// Atan2(+0, x>=0) = +0
-// Atan2(-0, x>=0) = -0
-// Atan2(+0, x<=-0) = +Pi
-// Atan2(-0, x<=-0) = -Pi
-// Atan2(y>0, 0) = +Pi/2
-// Atan2(y<0, 0) = -Pi/2
-// Atan2(+Inf, +Inf) = +Pi/4
-// Atan2(-Inf, +Inf) = -Pi/4
-// Atan2(+Inf, -Inf) = 3Pi/4
-// Atan2(-Inf, -Inf) = -3Pi/4
-// Atan2(y, +Inf) = 0
-// Atan2(y>0, -Inf) = +Pi
-// Atan2(y<0, -Inf) = -Pi
-// Atan2(+Inf, x) = +Pi/2
-// Atan2(-Inf, x) = -Pi/2
-func Atan2(y, x float64) float64 {
- if haveArchAtan2 {
- return archAtan2(y, x)
- }
- return atan2(y, x)
-}
-
-func atan2(y, x float64) float64 {
- // special cases
- switch {
- case IsNaN(y) || IsNaN(x):
- return NaN()
- case y == 0:
- if x >= 0 && !Signbit(x) {
- return Copysign(0, y)
- }
- return Copysign(Pi, y)
- case x == 0:
- return Copysign(Pi/2, y)
- case IsInf(x, 0):
- if IsInf(x, 1) {
- switch {
- case IsInf(y, 0):
- return Copysign(Pi/4, y)
- default:
- return Copysign(0, y)
- }
- }
- switch {
- case IsInf(y, 0):
- return Copysign(3*Pi/4, y)
- default:
- return Copysign(Pi, y)
- }
- case IsInf(y, 0):
- return Copysign(Pi/2, y)
- }
-
- // Call atan and determine the quadrant.
- q := Atan(y / x)
- if x < 0 {
- if q <= 0 {
- return q + Pi
- }
- return q - Pi
- }
- return q
-}
diff --git a/contrib/go/_std_1.18/src/math/atanh.go b/contrib/go/_std_1.18/src/math/atanh.go
deleted file mode 100644
index fe8bd6d8a4..0000000000
--- a/contrib/go/_std_1.18/src/math/atanh.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/e_atanh.c
-// and came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-//
-// __ieee754_atanh(x)
-// Method :
-// 1. Reduce x to positive by atanh(-x) = -atanh(x)
-// 2. For x>=0.5
-// 1 2x x
-// atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------)
-// 2 1 - x 1 - x
-//
-// For x<0.5
-// atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
-//
-// Special cases:
-// atanh(x) is NaN if |x| > 1 with signal;
-// atanh(NaN) is that NaN with no signal;
-// atanh(+-1) is +-INF with signal.
-//
-
-// Atanh returns the inverse hyperbolic tangent of x.
-//
-// Special cases are:
-// Atanh(1) = +Inf
-// Atanh(±0) = ±0
-// Atanh(-1) = -Inf
-// Atanh(x) = NaN if x < -1 or x > 1
-// Atanh(NaN) = NaN
-func Atanh(x float64) float64 {
- if haveArchAtanh {
- return archAtanh(x)
- }
- return atanh(x)
-}
-
-func atanh(x float64) float64 {
- const NearZero = 1.0 / (1 << 28) // 2**-28
- // special cases
- switch {
- case x < -1 || x > 1 || IsNaN(x):
- return NaN()
- case x == 1:
- return Inf(1)
- case x == -1:
- return Inf(-1)
- }
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- var temp float64
- switch {
- case x < NearZero:
- temp = x
- case x < 0.5:
- temp = x + x
- temp = 0.5 * Log1p(temp+temp*x/(1-x))
- default:
- temp = 0.5 * Log1p((x+x)/(1-x))
- }
- if sign {
- temp = -temp
- }
- return temp
-}
diff --git a/contrib/go/_std_1.18/src/math/big/arith.go b/contrib/go/_std_1.18/src/math/big/arith.go
deleted file mode 100644
index 8f55c195d4..0000000000
--- a/contrib/go/_std_1.18/src/math/big/arith.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides Go implementations of elementary multi-precision
-// arithmetic operations on word vectors. These have the suffix _g.
-// These are needed for platforms without assembly implementations of these routines.
-// This file also contains elementary operations that can be implemented
-// sufficiently efficiently in Go.
-
-package big
-
-import "math/bits"
-
-// A Word represents a single digit of a multi-precision unsigned integer.
-type Word uint
-
-const (
- _S = _W / 8 // word size in bytes
-
- _W = bits.UintSize // word size in bits
- _B = 1 << _W // digit base
- _M = _B - 1 // digit mask
-)
-
-// Many of the loops in this file are of the form
-// for i := 0; i < len(z) && i < len(x) && i < len(y); i++
-// i < len(z) is the real condition.
-// However, checking i < len(x) && i < len(y) as well is faster than
-// having the compiler do a bounds check in the body of the loop;
-// remarkably it is even faster than hoisting the bounds check
-// out of the loop, by doing something like
-// _, _ = x[len(z)-1], y[len(z)-1]
-// There are other ways to hoist the bounds check out of the loop,
-// but the compiler's BCE isn't powerful enough for them (yet?).
-// See the discussion in CL 164966.
-
-// ----------------------------------------------------------------------------
-// Elementary operations on words
-//
-// These operations are used by the vector operations below.
-
-// z1<<_W + z0 = x*y
-func mulWW_g(x, y Word) (z1, z0 Word) {
- hi, lo := bits.Mul(uint(x), uint(y))
- return Word(hi), Word(lo)
-}
-
-// z1<<_W + z0 = x*y + c
-func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
- hi, lo := bits.Mul(uint(x), uint(y))
- var cc uint
- lo, cc = bits.Add(lo, uint(c), 0)
- return Word(hi + cc), Word(lo)
-}
-
-// nlz returns the number of leading zeros in x.
-// Wraps bits.LeadingZeros call for convenience.
-func nlz(x Word) uint {
- return uint(bits.LeadingZeros(uint(x)))
-}
-
-// The resulting carry c is either 0 or 1.
-func addVV_g(z, x, y []Word) (c Word) {
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x) && i < len(y); i++ {
- zi, cc := bits.Add(uint(x[i]), uint(y[i]), uint(c))
- z[i] = Word(zi)
- c = Word(cc)
- }
- return
-}
-
-// The resulting carry c is either 0 or 1.
-func subVV_g(z, x, y []Word) (c Word) {
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x) && i < len(y); i++ {
- zi, cc := bits.Sub(uint(x[i]), uint(y[i]), uint(c))
- z[i] = Word(zi)
- c = Word(cc)
- }
- return
-}
-
-// The resulting carry c is either 0 or 1.
-func addVW_g(z, x []Word, y Word) (c Word) {
- c = y
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x); i++ {
- zi, cc := bits.Add(uint(x[i]), uint(c), 0)
- z[i] = Word(zi)
- c = Word(cc)
- }
- return
-}
-
-// addVWlarge is addVW, but intended for large z.
-// The only difference is that we check on every iteration
-// whether we are done with carries,
-// and if so, switch to a much faster copy instead.
-// This is only a good idea for large z,
-// because the overhead of the check and the function call
-// outweigh the benefits when z is small.
-func addVWlarge(z, x []Word, y Word) (c Word) {
- c = y
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x); i++ {
- if c == 0 {
- copy(z[i:], x[i:])
- return
- }
- zi, cc := bits.Add(uint(x[i]), uint(c), 0)
- z[i] = Word(zi)
- c = Word(cc)
- }
- return
-}
-
-func subVW_g(z, x []Word, y Word) (c Word) {
- c = y
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x); i++ {
- zi, cc := bits.Sub(uint(x[i]), uint(c), 0)
- z[i] = Word(zi)
- c = Word(cc)
- }
- return
-}
-
-// subVWlarge is to subVW as addVWlarge is to addVW.
-func subVWlarge(z, x []Word, y Word) (c Word) {
- c = y
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x); i++ {
- if c == 0 {
- copy(z[i:], x[i:])
- return
- }
- zi, cc := bits.Sub(uint(x[i]), uint(c), 0)
- z[i] = Word(zi)
- c = Word(cc)
- }
- return
-}
-
-func shlVU_g(z, x []Word, s uint) (c Word) {
- if s == 0 {
- copy(z, x)
- return
- }
- if len(z) == 0 {
- return
- }
- s &= _W - 1 // hint to the compiler that shifts by s don't need guard code
- ŝ := _W - s
- ŝ &= _W - 1 // ditto
- c = x[len(z)-1] >> ŝ
- for i := len(z) - 1; i > 0; i-- {
- z[i] = x[i]<<s | x[i-1]>>ŝ
- }
- z[0] = x[0] << s
- return
-}
-
-func shrVU_g(z, x []Word, s uint) (c Word) {
- if s == 0 {
- copy(z, x)
- return
- }
- if len(z) == 0 {
- return
- }
- if len(x) != len(z) {
- // This is an invariant guaranteed by the caller.
- panic("len(x) != len(z)")
- }
- s &= _W - 1 // hint to the compiler that shifts by s don't need guard code
- ŝ := _W - s
- ŝ &= _W - 1 // ditto
- c = x[0] << ŝ
- for i := 1; i < len(z); i++ {
- z[i-1] = x[i-1]>>s | x[i]<<ŝ
- }
- z[len(z)-1] = x[len(z)-1] >> s
- return
-}
-
-func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
- c = r
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x); i++ {
- c, z[i] = mulAddWWW_g(x[i], y, c)
- }
- return
-}
-
-func addMulVVW_g(z, x []Word, y Word) (c Word) {
- // The comment near the top of this file discusses this for loop condition.
- for i := 0; i < len(z) && i < len(x); i++ {
- z1, z0 := mulAddWWW_g(x[i], y, z[i])
- lo, cc := bits.Add(uint(z0), uint(c), 0)
- c, z[i] = Word(cc), Word(lo)
- c += z1
- }
- return
-}
-
-// q = ( x1 << _W + x0 - r)/y. m = floor(( _B^2 - 1 ) / d - _B). Requiring x1<y.
-// An approximate reciprocal with a reference to "Improved Division by Invariant Integers
-// (IEEE Transactions on Computers, 11 Jun. 2010)"
-func divWW(x1, x0, y, m Word) (q, r Word) {
- s := nlz(y)
- if s != 0 {
- x1 = x1<<s | x0>>(_W-s)
- x0 <<= s
- y <<= s
- }
- d := uint(y)
- // We know that
- // m = ⎣(B^2-1)/d⎦-B
- // ⎣(B^2-1)/d⎦ = m+B
- // (B^2-1)/d = m+B+delta1 0 <= delta1 <= (d-1)/d
- // B^2/d = m+B+delta2 0 <= delta2 <= 1
- // The quotient we're trying to compute is
- // quotient = ⎣(x1*B+x0)/d⎦
- // = ⎣(x1*B*(B^2/d)+x0*(B^2/d))/B^2⎦
- // = ⎣(x1*B*(m+B+delta2)+x0*(m+B+delta2))/B^2⎦
- // = ⎣(x1*m+x1*B+x0)/B + x0*m/B^2 + delta2*(x1*B+x0)/B^2⎦
- // The latter two terms of this three-term sum are between 0 and 1.
- // So we can compute just the first term, and we will be low by at most 2.
- t1, t0 := bits.Mul(uint(m), uint(x1))
- _, c := bits.Add(t0, uint(x0), 0)
- t1, _ = bits.Add(t1, uint(x1), c)
- // The quotient is either t1, t1+1, or t1+2.
- // We'll try t1 and adjust if needed.
- qq := t1
- // compute remainder r=x-d*q.
- dq1, dq0 := bits.Mul(d, qq)
- r0, b := bits.Sub(uint(x0), dq0, 0)
- r1, _ := bits.Sub(uint(x1), dq1, b)
- // The remainder we just computed is bounded above by B+d:
- // r = x1*B + x0 - d*q.
- // = x1*B + x0 - d*⎣(x1*m+x1*B+x0)/B⎦
- // = x1*B + x0 - d*((x1*m+x1*B+x0)/B-alpha) 0 <= alpha < 1
- // = x1*B + x0 - x1*d/B*m - x1*d - x0*d/B + d*alpha
- // = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha
- // = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha
- // = x1*B + x0 - x1*d/B*((B^2-1)/d-B-beta) - x1*d - x0*d/B + d*alpha 0 <= beta < 1
- // = x1*B + x0 - x1*B + x1/B + x1*d + x1*d/B*beta - x1*d - x0*d/B + d*alpha
- // = x0 + x1/B + x1*d/B*beta - x0*d/B + d*alpha
- // = x0*(1-d/B) + x1*(1+d*beta)/B + d*alpha
- // < B*(1-d/B) + d*B/B + d because x0<B (and 1-d/B>0), x1<d, 1+d*beta<=B, alpha<1
- // = B - d + d + d
- // = B+d
- // So r1 can only be 0 or 1. If r1 is 1, then we know q was too small.
- // Add 1 to q and subtract d from r. That guarantees that r is <B, so
- // we no longer need to keep track of r1.
- if r1 != 0 {
- qq++
- r0 -= d
- }
- // If the remainder is still too large, increment q one more time.
- if r0 >= d {
- qq++
- r0 -= d
- }
- return Word(qq), Word(r0 >> s)
-}
-
-// reciprocalWord return the reciprocal of the divisor. rec = floor(( _B^2 - 1 ) / u - _B). u = d1 << nlz(d1).
-func reciprocalWord(d1 Word) Word {
- u := uint(d1 << nlz(d1))
- x1 := ^u
- x0 := uint(_M)
- rec, _ := bits.Div(x1, x0, u) // (_B^2-1)/U-_B = (_B*(_M-C)+_M)/U
- return Word(rec)
-}
diff --git a/contrib/go/_std_1.18/src/math/big/arith_amd64.s b/contrib/go/_std_1.18/src/math/big/arith_amd64.s
deleted file mode 100644
index 5c72a27d8d..0000000000
--- a/contrib/go/_std_1.18/src/math/big/arith_amd64.s
+++ /dev/null
@@ -1,526 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !math_big_pure_go
-// +build !math_big_pure_go
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-// func mulWW(x, y Word) (z1, z0 Word)
-TEXT ·mulWW(SB),NOSPLIT,$0
- MOVQ x+0(FP), AX
- MULQ y+8(FP)
- MOVQ DX, z1+16(FP)
- MOVQ AX, z0+24(FP)
- RET
-
-
-
-// The carry bit is saved with SBBQ Rx, Rx: if the carry was set, Rx is -1, otherwise it is 0.
-// It is restored with ADDQ Rx, Rx: if Rx was -1 the carry is set, otherwise it is cleared.
-// This is faster than using rotate instructions.
-
-// func addVV(z, x, y []Word) (c Word)
-TEXT ·addVV(SB),NOSPLIT,$0
- MOVQ z_len+8(FP), DI
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), R9
- MOVQ z+0(FP), R10
-
- MOVQ $0, CX // c = 0
- MOVQ $0, SI // i = 0
-
- // s/JL/JMP/ below to disable the unrolled loop
- SUBQ $4, DI // n -= 4
- JL V1 // if n < 0 goto V1
-
-U1: // n >= 0
- // regular loop body unrolled 4x
- ADDQ CX, CX // restore CF
- MOVQ 0(R8)(SI*8), R11
- MOVQ 8(R8)(SI*8), R12
- MOVQ 16(R8)(SI*8), R13
- MOVQ 24(R8)(SI*8), R14
- ADCQ 0(R9)(SI*8), R11
- ADCQ 8(R9)(SI*8), R12
- ADCQ 16(R9)(SI*8), R13
- ADCQ 24(R9)(SI*8), R14
- MOVQ R11, 0(R10)(SI*8)
- MOVQ R12, 8(R10)(SI*8)
- MOVQ R13, 16(R10)(SI*8)
- MOVQ R14, 24(R10)(SI*8)
- SBBQ CX, CX // save CF
-
- ADDQ $4, SI // i += 4
- SUBQ $4, DI // n -= 4
- JGE U1 // if n >= 0 goto U1
-
-V1: ADDQ $4, DI // n += 4
- JLE E1 // if n <= 0 goto E1
-
-L1: // n > 0
- ADDQ CX, CX // restore CF
- MOVQ 0(R8)(SI*8), R11
- ADCQ 0(R9)(SI*8), R11
- MOVQ R11, 0(R10)(SI*8)
- SBBQ CX, CX // save CF
-
- ADDQ $1, SI // i++
- SUBQ $1, DI // n--
- JG L1 // if n > 0 goto L1
-
-E1: NEGQ CX
- MOVQ CX, c+72(FP) // return c
- RET
-
-
-// func subVV(z, x, y []Word) (c Word)
-// (same as addVV except for SBBQ instead of ADCQ and label names)
-TEXT ·subVV(SB),NOSPLIT,$0
- MOVQ z_len+8(FP), DI
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), R9
- MOVQ z+0(FP), R10
-
- MOVQ $0, CX // c = 0
- MOVQ $0, SI // i = 0
-
- // s/JL/JMP/ below to disable the unrolled loop
- SUBQ $4, DI // n -= 4
- JL V2 // if n < 0 goto V2
-
-U2: // n >= 0
- // regular loop body unrolled 4x
- ADDQ CX, CX // restore CF
- MOVQ 0(R8)(SI*8), R11
- MOVQ 8(R8)(SI*8), R12
- MOVQ 16(R8)(SI*8), R13
- MOVQ 24(R8)(SI*8), R14
- SBBQ 0(R9)(SI*8), R11
- SBBQ 8(R9)(SI*8), R12
- SBBQ 16(R9)(SI*8), R13
- SBBQ 24(R9)(SI*8), R14
- MOVQ R11, 0(R10)(SI*8)
- MOVQ R12, 8(R10)(SI*8)
- MOVQ R13, 16(R10)(SI*8)
- MOVQ R14, 24(R10)(SI*8)
- SBBQ CX, CX // save CF
-
- ADDQ $4, SI // i += 4
- SUBQ $4, DI // n -= 4
- JGE U2 // if n >= 0 goto U2
-
-V2: ADDQ $4, DI // n += 4
- JLE E2 // if n <= 0 goto E2
-
-L2: // n > 0
- ADDQ CX, CX // restore CF
- MOVQ 0(R8)(SI*8), R11
- SBBQ 0(R9)(SI*8), R11
- MOVQ R11, 0(R10)(SI*8)
- SBBQ CX, CX // save CF
-
- ADDQ $1, SI // i++
- SUBQ $1, DI // n--
- JG L2 // if n > 0 goto L2
-
-E2: NEGQ CX
- MOVQ CX, c+72(FP) // return c
- RET
-
-
-// func addVW(z, x []Word, y Word) (c Word)
-TEXT ·addVW(SB),NOSPLIT,$0
- MOVQ z_len+8(FP), DI
- CMPQ DI, $32
- JG large
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), CX // c = y
- MOVQ z+0(FP), R10
-
- MOVQ $0, SI // i = 0
-
- // s/JL/JMP/ below to disable the unrolled loop
- SUBQ $4, DI // n -= 4
- JL V3 // if n < 4 goto V3
-
-U3: // n >= 0
- // regular loop body unrolled 4x
- MOVQ 0(R8)(SI*8), R11
- MOVQ 8(R8)(SI*8), R12
- MOVQ 16(R8)(SI*8), R13
- MOVQ 24(R8)(SI*8), R14
- ADDQ CX, R11
- ADCQ $0, R12
- ADCQ $0, R13
- ADCQ $0, R14
- SBBQ CX, CX // save CF
- NEGQ CX
- MOVQ R11, 0(R10)(SI*8)
- MOVQ R12, 8(R10)(SI*8)
- MOVQ R13, 16(R10)(SI*8)
- MOVQ R14, 24(R10)(SI*8)
-
- ADDQ $4, SI // i += 4
- SUBQ $4, DI // n -= 4
- JGE U3 // if n >= 0 goto U3
-
-V3: ADDQ $4, DI // n += 4
- JLE E3 // if n <= 0 goto E3
-
-L3: // n > 0
- ADDQ 0(R8)(SI*8), CX
- MOVQ CX, 0(R10)(SI*8)
- SBBQ CX, CX // save CF
- NEGQ CX
-
- ADDQ $1, SI // i++
- SUBQ $1, DI // n--
- JG L3 // if n > 0 goto L3
-
-E3: MOVQ CX, c+56(FP) // return c
- RET
-large:
- JMP ·addVWlarge(SB)
-
-
-// func subVW(z, x []Word, y Word) (c Word)
-// (same as addVW except for SUBQ/SBBQ instead of ADDQ/ADCQ and label names)
-TEXT ·subVW(SB),NOSPLIT,$0
- MOVQ z_len+8(FP), DI
- CMPQ DI, $32
- JG large
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), CX // c = y
- MOVQ z+0(FP), R10
-
- MOVQ $0, SI // i = 0
-
- // s/JL/JMP/ below to disable the unrolled loop
- SUBQ $4, DI // n -= 4
- JL V4 // if n < 4 goto V4
-
-U4: // n >= 0
- // regular loop body unrolled 4x
- MOVQ 0(R8)(SI*8), R11
- MOVQ 8(R8)(SI*8), R12
- MOVQ 16(R8)(SI*8), R13
- MOVQ 24(R8)(SI*8), R14
- SUBQ CX, R11
- SBBQ $0, R12
- SBBQ $0, R13
- SBBQ $0, R14
- SBBQ CX, CX // save CF
- NEGQ CX
- MOVQ R11, 0(R10)(SI*8)
- MOVQ R12, 8(R10)(SI*8)
- MOVQ R13, 16(R10)(SI*8)
- MOVQ R14, 24(R10)(SI*8)
-
- ADDQ $4, SI // i += 4
- SUBQ $4, DI // n -= 4
- JGE U4 // if n >= 0 goto U4
-
-V4: ADDQ $4, DI // n += 4
- JLE E4 // if n <= 0 goto E4
-
-L4: // n > 0
- MOVQ 0(R8)(SI*8), R11
- SUBQ CX, R11
- MOVQ R11, 0(R10)(SI*8)
- SBBQ CX, CX // save CF
- NEGQ CX
-
- ADDQ $1, SI // i++
- SUBQ $1, DI // n--
- JG L4 // if n > 0 goto L4
-
-E4: MOVQ CX, c+56(FP) // return c
- RET
-large:
- JMP ·subVWlarge(SB)
-
-
-// func shlVU(z, x []Word, s uint) (c Word)
-TEXT ·shlVU(SB),NOSPLIT,$0
- MOVQ z_len+8(FP), BX // i = z
- SUBQ $1, BX // i--
- JL X8b // i < 0 (n <= 0)
-
- // n > 0
- MOVQ z+0(FP), R10
- MOVQ x+24(FP), R8
- MOVQ s+48(FP), CX
- MOVQ (R8)(BX*8), AX // w1 = x[n-1]
- MOVQ $0, DX
- SHLQ CX, AX, DX // w1>>ŝ
- MOVQ DX, c+56(FP)
-
- CMPQ BX, $0
- JLE X8a // i <= 0
-
- // i > 0
-L8: MOVQ AX, DX // w = w1
- MOVQ -8(R8)(BX*8), AX // w1 = x[i-1]
- SHLQ CX, AX, DX // w<<s | w1>>ŝ
- MOVQ DX, (R10)(BX*8) // z[i] = w<<s | w1>>ŝ
- SUBQ $1, BX // i--
- JG L8 // i > 0
-
- // i <= 0
-X8a: SHLQ CX, AX // w1<<s
- MOVQ AX, (R10) // z[0] = w1<<s
- RET
-
-X8b: MOVQ $0, c+56(FP)
- RET
-
-
-// func shrVU(z, x []Word, s uint) (c Word)
-TEXT ·shrVU(SB),NOSPLIT,$0
- MOVQ z_len+8(FP), R11
- SUBQ $1, R11 // n--
- JL X9b // n < 0 (n <= 0)
-
- // n > 0
- MOVQ z+0(FP), R10
- MOVQ x+24(FP), R8
- MOVQ s+48(FP), CX
- MOVQ (R8), AX // w1 = x[0]
- MOVQ $0, DX
- SHRQ CX, AX, DX // w1<<ŝ
- MOVQ DX, c+56(FP)
-
- MOVQ $0, BX // i = 0
- JMP E9
-
- // i < n-1
-L9: MOVQ AX, DX // w = w1
- MOVQ 8(R8)(BX*8), AX // w1 = x[i+1]
- SHRQ CX, AX, DX // w>>s | w1<<ŝ
- MOVQ DX, (R10)(BX*8) // z[i] = w>>s | w1<<ŝ
- ADDQ $1, BX // i++
-
-E9: CMPQ BX, R11
- JL L9 // i < n-1
-
- // i >= n-1
-X9a: SHRQ CX, AX // w1>>s
- MOVQ AX, (R10)(R11*8) // z[n-1] = w1>>s
- RET
-
-X9b: MOVQ $0, c+56(FP)
- RET
-
-
-// func mulAddVWW(z, x []Word, y, r Word) (c Word)
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
- MOVQ z+0(FP), R10
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), R9
- MOVQ r+56(FP), CX // c = r
- MOVQ z_len+8(FP), R11
- MOVQ $0, BX // i = 0
-
- CMPQ R11, $4
- JL E5
-
-U5: // i+4 <= n
- // regular loop body unrolled 4x
- MOVQ (0*8)(R8)(BX*8), AX
- MULQ R9
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ AX, (0*8)(R10)(BX*8)
- MOVQ DX, CX
- MOVQ (1*8)(R8)(BX*8), AX
- MULQ R9
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ AX, (1*8)(R10)(BX*8)
- MOVQ DX, CX
- MOVQ (2*8)(R8)(BX*8), AX
- MULQ R9
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ AX, (2*8)(R10)(BX*8)
- MOVQ DX, CX
- MOVQ (3*8)(R8)(BX*8), AX
- MULQ R9
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ AX, (3*8)(R10)(BX*8)
- MOVQ DX, CX
- ADDQ $4, BX // i += 4
-
- LEAQ 4(BX), DX
- CMPQ DX, R11
- JLE U5
- JMP E5
-
-L5: MOVQ (R8)(BX*8), AX
- MULQ R9
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ AX, (R10)(BX*8)
- MOVQ DX, CX
- ADDQ $1, BX // i++
-
-E5: CMPQ BX, R11 // i < n
- JL L5
-
- MOVQ CX, c+64(FP)
- RET
-
-
-// func addMulVVW(z, x []Word, y Word) (c Word)
-TEXT ·addMulVVW(SB),NOSPLIT,$0
- CMPB ·support_adx(SB), $1
- JEQ adx
- MOVQ z+0(FP), R10
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), R9
- MOVQ z_len+8(FP), R11
- MOVQ $0, BX // i = 0
- MOVQ $0, CX // c = 0
- MOVQ R11, R12
- ANDQ $-2, R12
- CMPQ R11, $2
- JAE A6
- JMP E6
-
-A6:
- MOVQ (R8)(BX*8), AX
- MULQ R9
- ADDQ (R10)(BX*8), AX
- ADCQ $0, DX
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ DX, CX
- MOVQ AX, (R10)(BX*8)
-
- MOVQ (8)(R8)(BX*8), AX
- MULQ R9
- ADDQ (8)(R10)(BX*8), AX
- ADCQ $0, DX
- ADDQ CX, AX
- ADCQ $0, DX
- MOVQ DX, CX
- MOVQ AX, (8)(R10)(BX*8)
-
- ADDQ $2, BX
- CMPQ BX, R12
- JL A6
- JMP E6
-
-L6: MOVQ (R8)(BX*8), AX
- MULQ R9
- ADDQ CX, AX
- ADCQ $0, DX
- ADDQ AX, (R10)(BX*8)
- ADCQ $0, DX
- MOVQ DX, CX
- ADDQ $1, BX // i++
-
-E6: CMPQ BX, R11 // i < n
- JL L6
-
- MOVQ CX, c+56(FP)
- RET
-
-adx:
- MOVQ z_len+8(FP), R11
- MOVQ z+0(FP), R10
- MOVQ x+24(FP), R8
- MOVQ y+48(FP), DX
- MOVQ $0, BX // i = 0
- MOVQ $0, CX // carry
- CMPQ R11, $8
- JAE adx_loop_header
- CMPQ BX, R11
- JL adx_short
- MOVQ CX, c+56(FP)
- RET
-
-adx_loop_header:
- MOVQ R11, R13
- ANDQ $-8, R13
-adx_loop:
- XORQ R9, R9 // unset flags
- MULXQ (R8), SI, DI
- ADCXQ CX,SI
- ADOXQ (R10), SI
- MOVQ SI,(R10)
-
- MULXQ 8(R8), AX, CX
- ADCXQ DI, AX
- ADOXQ 8(R10), AX
- MOVQ AX, 8(R10)
-
- MULXQ 16(R8), SI, DI
- ADCXQ CX, SI
- ADOXQ 16(R10), SI
- MOVQ SI, 16(R10)
-
- MULXQ 24(R8), AX, CX
- ADCXQ DI, AX
- ADOXQ 24(R10), AX
- MOVQ AX, 24(R10)
-
- MULXQ 32(R8), SI, DI
- ADCXQ CX, SI
- ADOXQ 32(R10), SI
- MOVQ SI, 32(R10)
-
- MULXQ 40(R8), AX, CX
- ADCXQ DI, AX
- ADOXQ 40(R10), AX
- MOVQ AX, 40(R10)
-
- MULXQ 48(R8), SI, DI
- ADCXQ CX, SI
- ADOXQ 48(R10), SI
- MOVQ SI, 48(R10)
-
- MULXQ 56(R8), AX, CX
- ADCXQ DI, AX
- ADOXQ 56(R10), AX
- MOVQ AX, 56(R10)
-
- ADCXQ R9, CX
- ADOXQ R9, CX
-
- ADDQ $64, R8
- ADDQ $64, R10
- ADDQ $8, BX
-
- CMPQ BX, R13
- JL adx_loop
- MOVQ z+0(FP), R10
- MOVQ x+24(FP), R8
- CMPQ BX, R11
- JL adx_short
- MOVQ CX, c+56(FP)
- RET
-
-adx_short:
- MULXQ (R8)(BX*8), SI, DI
- ADDQ CX, SI
- ADCQ $0, DI
- ADDQ SI, (R10)(BX*8)
- ADCQ $0, DI
- MOVQ DI, CX
- ADDQ $1, BX // i++
-
- CMPQ BX, R11
- JL adx_short
-
- MOVQ CX, c+56(FP)
- RET
-
-
-
diff --git a/contrib/go/_std_1.18/src/math/big/arith_decl.go b/contrib/go/_std_1.18/src/math/big/arith_decl.go
deleted file mode 100644
index eea3d6b325..0000000000
--- a/contrib/go/_std_1.18/src/math/big/arith_decl.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !math_big_pure_go
-// +build !math_big_pure_go
-
-package big
-
-// implemented in arith_$GOARCH.s
-func mulWW(x, y Word) (z1, z0 Word)
-func addVV(z, x, y []Word) (c Word)
-func subVV(z, x, y []Word) (c Word)
-func addVW(z, x []Word, y Word) (c Word)
-func subVW(z, x []Word, y Word) (c Word)
-func shlVU(z, x []Word, s uint) (c Word)
-func shrVU(z, x []Word, s uint) (c Word)
-func mulAddVWW(z, x []Word, y, r Word) (c Word)
-func addMulVVW(z, x []Word, y Word) (c Word)
diff --git a/contrib/go/_std_1.18/src/math/big/float.go b/contrib/go/_std_1.18/src/math/big/float.go
deleted file mode 100644
index a8c91a6e54..0000000000
--- a/contrib/go/_std_1.18/src/math/big/float.go
+++ /dev/null
@@ -1,1732 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements multi-precision floating-point numbers.
-// Like in the GNU MPFR library (https://www.mpfr.org/), operands
-// can be of mixed precision. Unlike MPFR, the rounding mode is
-// not specified with each operation, but with each operand. The
-// rounding mode of the result operand determines the rounding
-// mode of an operation. This is a from-scratch implementation.
-
-package big
-
-import (
- "fmt"
- "math"
- "math/bits"
-)
-
-const debugFloat = false // enable for debugging
-
-// A nonzero finite Float represents a multi-precision floating point number
-//
-// sign × mantissa × 2**exponent
-//
-// with 0.5 <= mantissa < 1.0, and MinExp <= exponent <= MaxExp.
-// A Float may also be zero (+0, -0) or infinite (+Inf, -Inf).
-// All Floats are ordered, and the ordering of two Floats x and y
-// is defined by x.Cmp(y).
-//
-// Each Float value also has a precision, rounding mode, and accuracy.
-// The precision is the maximum number of mantissa bits available to
-// represent the value. The rounding mode specifies how a result should
-// be rounded to fit into the mantissa bits, and accuracy describes the
-// rounding error with respect to the exact result.
-//
-// Unless specified otherwise, all operations (including setters) that
-// specify a *Float variable for the result (usually via the receiver
-// with the exception of MantExp), round the numeric result according
-// to the precision and rounding mode of the result variable.
-//
-// If the provided result precision is 0 (see below), it is set to the
-// precision of the argument with the largest precision value before any
-// rounding takes place, and the rounding mode remains unchanged. Thus,
-// uninitialized Floats provided as result arguments will have their
-// precision set to a reasonable value determined by the operands, and
-// their mode is the zero value for RoundingMode (ToNearestEven).
-//
-// By setting the desired precision to 24 or 53 and using matching rounding
-// mode (typically ToNearestEven), Float operations produce the same results
-// as the corresponding float32 or float64 IEEE-754 arithmetic for operands
-// that correspond to normal (i.e., not denormal) float32 or float64 numbers.
-// Exponent underflow and overflow lead to a 0 or an Infinity for different
-// values than IEEE-754 because Float exponents have a much larger range.
-//
-// The zero (uninitialized) value for a Float is ready to use and represents
-// the number +0.0 exactly, with precision 0 and rounding mode ToNearestEven.
-//
-// Operations always take pointer arguments (*Float) rather
-// than Float values, and each unique Float value requires
-// its own unique *Float pointer. To "copy" a Float value,
-// an existing (or newly allocated) Float must be set to
-// a new value using the Float.Set method; shallow copies
-// of Floats are not supported and may lead to errors.
-type Float struct {
- prec uint32
- mode RoundingMode
- acc Accuracy
- form form
- neg bool
- mant nat
- exp int32
-}
-
-// An ErrNaN panic is raised by a Float operation that would lead to
-// a NaN under IEEE-754 rules. An ErrNaN implements the error interface.
-type ErrNaN struct {
- msg string
-}
-
-func (err ErrNaN) Error() string {
- return err.msg
-}
-
-// NewFloat allocates and returns a new Float set to x,
-// with precision 53 and rounding mode ToNearestEven.
-// NewFloat panics with ErrNaN if x is a NaN.
-func NewFloat(x float64) *Float {
- if math.IsNaN(x) {
- panic(ErrNaN{"NewFloat(NaN)"})
- }
- return new(Float).SetFloat64(x)
-}
-
-// Exponent and precision limits.
-const (
- MaxExp = math.MaxInt32 // largest supported exponent
- MinExp = math.MinInt32 // smallest supported exponent
- MaxPrec = math.MaxUint32 // largest (theoretically) supported precision; likely memory-limited
-)
-
-// Internal representation: The mantissa bits x.mant of a nonzero finite
-// Float x are stored in a nat slice long enough to hold up to x.prec bits;
-// the slice may (but doesn't have to) be shorter if the mantissa contains
-// trailing 0 bits. x.mant is normalized if the msb of x.mant == 1 (i.e.,
-// the msb is shifted all the way "to the left"). Thus, if the mantissa has
-// trailing 0 bits or x.prec is not a multiple of the Word size _W,
-// x.mant[0] has trailing zero bits. The msb of the mantissa corresponds
-// to the value 0.5; the exponent x.exp shifts the binary point as needed.
-//
-// A zero or non-finite Float x ignores x.mant and x.exp.
-//
-// x form neg mant exp
-// ----------------------------------------------------------
-// ±0 zero sign - -
-// 0 < |x| < +Inf finite sign mantissa exponent
-// ±Inf inf sign - -
-
-// A form value describes the internal representation.
-type form byte
-
-// The form value order is relevant - do not change!
-const (
- zero form = iota
- finite
- inf
-)
-
-// RoundingMode determines how a Float value is rounded to the
-// desired precision. Rounding may change the Float value; the
-// rounding error is described by the Float's Accuracy.
-type RoundingMode byte
-
-// These constants define supported rounding modes.
-const (
- ToNearestEven RoundingMode = iota // == IEEE 754-2008 roundTiesToEven
- ToNearestAway // == IEEE 754-2008 roundTiesToAway
- ToZero // == IEEE 754-2008 roundTowardZero
- AwayFromZero // no IEEE 754-2008 equivalent
- ToNegativeInf // == IEEE 754-2008 roundTowardNegative
- ToPositiveInf // == IEEE 754-2008 roundTowardPositive
-)
-
-//go:generate stringer -type=RoundingMode
-
-// Accuracy describes the rounding error produced by the most recent
-// operation that generated a Float value, relative to the exact value.
-type Accuracy int8
-
-// Constants describing the Accuracy of a Float.
-const (
- Below Accuracy = -1
- Exact Accuracy = 0
- Above Accuracy = +1
-)
-
-//go:generate stringer -type=Accuracy
-
-// SetPrec sets z's precision to prec and returns the (possibly) rounded
-// value of z. Rounding occurs according to z's rounding mode if the mantissa
-// cannot be represented in prec bits without loss of precision.
-// SetPrec(0) maps all finite values to ±0; infinite values remain unchanged.
-// If prec > MaxPrec, it is set to MaxPrec.
-func (z *Float) SetPrec(prec uint) *Float {
- z.acc = Exact // optimistically assume no rounding is needed
-
- // special case
- if prec == 0 {
- z.prec = 0
- if z.form == finite {
- // truncate z to 0
- z.acc = makeAcc(z.neg)
- z.form = zero
- }
- return z
- }
-
- // general case
- if prec > MaxPrec {
- prec = MaxPrec
- }
- old := z.prec
- z.prec = uint32(prec)
- if z.prec < old {
- z.round(0)
- }
- return z
-}
-
-func makeAcc(above bool) Accuracy {
- if above {
- return Above
- }
- return Below
-}
-
-// SetMode sets z's rounding mode to mode and returns an exact z.
-// z remains unchanged otherwise.
-// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to Exact.
-func (z *Float) SetMode(mode RoundingMode) *Float {
- z.mode = mode
- z.acc = Exact
- return z
-}
-
-// Prec returns the mantissa precision of x in bits.
-// The result may be 0 for |x| == 0 and |x| == Inf.
-func (x *Float) Prec() uint {
- return uint(x.prec)
-}
-
-// MinPrec returns the minimum precision required to represent x exactly
-// (i.e., the smallest prec before x.SetPrec(prec) would start rounding x).
-// The result is 0 for |x| == 0 and |x| == Inf.
-func (x *Float) MinPrec() uint {
- if x.form != finite {
- return 0
- }
- return uint(len(x.mant))*_W - x.mant.trailingZeroBits()
-}
-
-// Mode returns the rounding mode of x.
-func (x *Float) Mode() RoundingMode {
- return x.mode
-}
-
-// Acc returns the accuracy of x produced by the most recent
-// operation, unless explicitly documented otherwise by that
-// operation.
-func (x *Float) Acc() Accuracy {
- return x.acc
-}
-
-// Sign returns:
-//
-// -1 if x < 0
-// 0 if x is ±0
-// +1 if x > 0
-//
-func (x *Float) Sign() int {
- if debugFloat {
- x.validate()
- }
- if x.form == zero {
- return 0
- }
- if x.neg {
- return -1
- }
- return 1
-}
-
-// MantExp breaks x into its mantissa and exponent components
-// and returns the exponent. If a non-nil mant argument is
-// provided its value is set to the mantissa of x, with the
-// same precision and rounding mode as x. The components
-// satisfy x == mant × 2**exp, with 0.5 <= |mant| < 1.0.
-// Calling MantExp with a nil argument is an efficient way to
-// get the exponent of the receiver.
-//
-// Special cases are:
-//
-// ( ±0).MantExp(mant) = 0, with mant set to ±0
-// (±Inf).MantExp(mant) = 0, with mant set to ±Inf
-//
-// x and mant may be the same in which case x is set to its
-// mantissa value.
-func (x *Float) MantExp(mant *Float) (exp int) {
- if debugFloat {
- x.validate()
- }
- if x.form == finite {
- exp = int(x.exp)
- }
- if mant != nil {
- mant.Copy(x)
- if mant.form == finite {
- mant.exp = 0
- }
- }
- return
-}
-
-func (z *Float) setExpAndRound(exp int64, sbit uint) {
- if exp < MinExp {
- // underflow
- z.acc = makeAcc(z.neg)
- z.form = zero
- return
- }
-
- if exp > MaxExp {
- // overflow
- z.acc = makeAcc(!z.neg)
- z.form = inf
- return
- }
-
- z.form = finite
- z.exp = int32(exp)
- z.round(sbit)
-}
-
-// SetMantExp sets z to mant × 2**exp and returns z.
-// The result z has the same precision and rounding mode
-// as mant. SetMantExp is an inverse of MantExp but does
-// not require 0.5 <= |mant| < 1.0. Specifically, for a
-// given x of type *Float, SetMantExp relates to MantExp
-// as follows:
-//
-// mant := new(Float)
-// new(Float).SetMantExp(mant, x.MantExp(mant)).Cmp(x) == 0
-//
-// Special cases are:
-//
-// z.SetMantExp( ±0, exp) = ±0
-// z.SetMantExp(±Inf, exp) = ±Inf
-//
-// z and mant may be the same in which case z's exponent
-// is set to exp.
-func (z *Float) SetMantExp(mant *Float, exp int) *Float {
- if debugFloat {
- z.validate()
- mant.validate()
- }
- z.Copy(mant)
-
- if z.form == finite {
- // 0 < |mant| < +Inf
- z.setExpAndRound(int64(z.exp)+int64(exp), 0)
- }
- return z
-}
-
-// Signbit reports whether x is negative or negative zero.
-func (x *Float) Signbit() bool {
- return x.neg
-}
-
-// IsInf reports whether x is +Inf or -Inf.
-func (x *Float) IsInf() bool {
- return x.form == inf
-}
-
-// IsInt reports whether x is an integer.
-// ±Inf values are not integers.
-func (x *Float) IsInt() bool {
- if debugFloat {
- x.validate()
- }
- // special cases
- if x.form != finite {
- return x.form == zero
- }
- // x.form == finite
- if x.exp <= 0 {
- return false
- }
- // x.exp > 0
- return x.prec <= uint32(x.exp) || x.MinPrec() <= uint(x.exp) // not enough bits for fractional mantissa
-}
-
-// debugging support
-func (x *Float) validate() {
- if !debugFloat {
- // avoid performance bugs
- panic("validate called but debugFloat is not set")
- }
- if x.form != finite {
- return
- }
- m := len(x.mant)
- if m == 0 {
- panic("nonzero finite number with empty mantissa")
- }
- const msb = 1 << (_W - 1)
- if x.mant[m-1]&msb == 0 {
- panic(fmt.Sprintf("msb not set in last word %#x of %s", x.mant[m-1], x.Text('p', 0)))
- }
- if x.prec == 0 {
- panic("zero precision finite number")
- }
-}
-
-// round rounds z according to z.mode to z.prec bits and sets z.acc accordingly.
-// sbit must be 0 or 1 and summarizes any "sticky bit" information one might
-// have before calling round. z's mantissa must be normalized (with the msb set)
-// or empty.
-//
-// CAUTION: The rounding modes ToNegativeInf, ToPositiveInf are affected by the
-// sign of z. For correct rounding, the sign of z must be set correctly before
-// calling round.
-func (z *Float) round(sbit uint) {
- if debugFloat {
- z.validate()
- }
-
- z.acc = Exact
- if z.form != finite {
- // ±0 or ±Inf => nothing left to do
- return
- }
- // z.form == finite && len(z.mant) > 0
- // m > 0 implies z.prec > 0 (checked by validate)
-
- m := uint32(len(z.mant)) // present mantissa length in words
- bits := m * _W // present mantissa bits; bits > 0
- if bits <= z.prec {
- // mantissa fits => nothing to do
- return
- }
- // bits > z.prec
-
- // Rounding is based on two bits: the rounding bit (rbit) and the
- // sticky bit (sbit). The rbit is the bit immediately before the
- // z.prec leading mantissa bits (the "0.5"). The sbit is set if any
- // of the bits before the rbit are set (the "0.25", "0.125", etc.):
- //
- // rbit sbit => "fractional part"
- //
- // 0 0 == 0
- // 0 1 > 0 , < 0.5
- // 1 0 == 0.5
- // 1 1 > 0.5, < 1.0
-
- // bits > z.prec: mantissa too large => round
- r := uint(bits - z.prec - 1) // rounding bit position; r >= 0
- rbit := z.mant.bit(r) & 1 // rounding bit; be safe and ensure it's a single bit
- // The sticky bit is only needed for rounding ToNearestEven
- // or when the rounding bit is zero. Avoid computation otherwise.
- if sbit == 0 && (rbit == 0 || z.mode == ToNearestEven) {
- sbit = z.mant.sticky(r)
- }
- sbit &= 1 // be safe and ensure it's a single bit
-
- // cut off extra words
- n := (z.prec + (_W - 1)) / _W // mantissa length in words for desired precision
- if m > n {
- copy(z.mant, z.mant[m-n:]) // move n last words to front
- z.mant = z.mant[:n]
- }
-
- // determine number of trailing zero bits (ntz) and compute lsb mask of mantissa's least-significant word
- ntz := n*_W - z.prec // 0 <= ntz < _W
- lsb := Word(1) << ntz
-
- // round if result is inexact
- if rbit|sbit != 0 {
- // Make rounding decision: The result mantissa is truncated ("rounded down")
- // by default. Decide if we need to increment, or "round up", the (unsigned)
- // mantissa.
- inc := false
- switch z.mode {
- case ToNegativeInf:
- inc = z.neg
- case ToZero:
- // nothing to do
- case ToNearestEven:
- inc = rbit != 0 && (sbit != 0 || z.mant[0]&lsb != 0)
- case ToNearestAway:
- inc = rbit != 0
- case AwayFromZero:
- inc = true
- case ToPositiveInf:
- inc = !z.neg
- default:
- panic("unreachable")
- }
-
- // A positive result (!z.neg) is Above the exact result if we increment,
- // and it's Below if we truncate (Exact results require no rounding).
- // For a negative result (z.neg) it is exactly the opposite.
- z.acc = makeAcc(inc != z.neg)
-
- if inc {
- // add 1 to mantissa
- if addVW(z.mant, z.mant, lsb) != 0 {
- // mantissa overflow => adjust exponent
- if z.exp >= MaxExp {
- // exponent overflow
- z.form = inf
- return
- }
- z.exp++
- // adjust mantissa: divide by 2 to compensate for exponent adjustment
- shrVU(z.mant, z.mant, 1)
- // set msb == carry == 1 from the mantissa overflow above
- const msb = 1 << (_W - 1)
- z.mant[n-1] |= msb
- }
- }
- }
-
- // zero out trailing bits in least-significant word
- z.mant[0] &^= lsb - 1
-
- if debugFloat {
- z.validate()
- }
-}
-
-func (z *Float) setBits64(neg bool, x uint64) *Float {
- if z.prec == 0 {
- z.prec = 64
- }
- z.acc = Exact
- z.neg = neg
- if x == 0 {
- z.form = zero
- return z
- }
- // x != 0
- z.form = finite
- s := bits.LeadingZeros64(x)
- z.mant = z.mant.setUint64(x << uint(s))
- z.exp = int32(64 - s) // always fits
- if z.prec < 64 {
- z.round(0)
- }
- return z
-}
-
-// SetUint64 sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to 64 (and rounding will have
-// no effect).
-func (z *Float) SetUint64(x uint64) *Float {
- return z.setBits64(false, x)
-}
-
-// SetInt64 sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to 64 (and rounding will have
-// no effect).
-func (z *Float) SetInt64(x int64) *Float {
- u := x
- if u < 0 {
- u = -u
- }
- // We cannot simply call z.SetUint64(uint64(u)) and change
- // the sign afterwards because the sign affects rounding.
- return z.setBits64(x < 0, uint64(u))
-}
-
-// SetFloat64 sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to 53 (and rounding will have
-// no effect). SetFloat64 panics with ErrNaN if x is a NaN.
-func (z *Float) SetFloat64(x float64) *Float {
- if z.prec == 0 {
- z.prec = 53
- }
- if math.IsNaN(x) {
- panic(ErrNaN{"Float.SetFloat64(NaN)"})
- }
- z.acc = Exact
- z.neg = math.Signbit(x) // handle -0, -Inf correctly
- if x == 0 {
- z.form = zero
- return z
- }
- if math.IsInf(x, 0) {
- z.form = inf
- return z
- }
- // normalized x != 0
- z.form = finite
- fmant, exp := math.Frexp(x) // get normalized mantissa
- z.mant = z.mant.setUint64(1<<63 | math.Float64bits(fmant)<<11)
- z.exp = int32(exp) // always fits
- if z.prec < 53 {
- z.round(0)
- }
- return z
-}
-
-// fnorm normalizes mantissa m by shifting it to the left
-// such that the msb of the most-significant word (msw) is 1.
-// It returns the shift amount. It assumes that len(m) != 0.
-func fnorm(m nat) int64 {
- if debugFloat && (len(m) == 0 || m[len(m)-1] == 0) {
- panic("msw of mantissa is 0")
- }
- s := nlz(m[len(m)-1])
- if s > 0 {
- c := shlVU(m, m, s)
- if debugFloat && c != 0 {
- panic("nlz or shlVU incorrect")
- }
- }
- return int64(s)
-}
-
-// SetInt sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to the larger of x.BitLen()
-// or 64 (and rounding will have no effect).
-func (z *Float) SetInt(x *Int) *Float {
- // TODO(gri) can be more efficient if z.prec > 0
- // but small compared to the size of x, or if there
- // are many trailing 0's.
- bits := uint32(x.BitLen())
- if z.prec == 0 {
- z.prec = umax32(bits, 64)
- }
- z.acc = Exact
- z.neg = x.neg
- if len(x.abs) == 0 {
- z.form = zero
- return z
- }
- // x != 0
- z.mant = z.mant.set(x.abs)
- fnorm(z.mant)
- z.setExpAndRound(int64(bits), 0)
- return z
-}
-
-// SetRat sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to the largest of a.BitLen(),
-// b.BitLen(), or 64; with x = a/b.
-func (z *Float) SetRat(x *Rat) *Float {
- if x.IsInt() {
- return z.SetInt(x.Num())
- }
- var a, b Float
- a.SetInt(x.Num())
- b.SetInt(x.Denom())
- if z.prec == 0 {
- z.prec = umax32(a.prec, b.prec)
- }
- return z.Quo(&a, &b)
-}
-
-// SetInf sets z to the infinite Float -Inf if signbit is
-// set, or +Inf if signbit is not set, and returns z. The
-// precision of z is unchanged and the result is always
-// Exact.
-func (z *Float) SetInf(signbit bool) *Float {
- z.acc = Exact
- z.form = inf
- z.neg = signbit
- return z
-}
-
-// Set sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to the precision of x
-// before setting z (and rounding will have no effect).
-// Rounding is performed according to z's precision and rounding
-// mode; and z's accuracy reports the result error relative to the
-// exact (not rounded) result.
-func (z *Float) Set(x *Float) *Float {
- if debugFloat {
- x.validate()
- }
- z.acc = Exact
- if z != x {
- z.form = x.form
- z.neg = x.neg
- if x.form == finite {
- z.exp = x.exp
- z.mant = z.mant.set(x.mant)
- }
- if z.prec == 0 {
- z.prec = x.prec
- } else if z.prec < x.prec {
- z.round(0)
- }
- }
- return z
-}
-
-// Copy sets z to x, with the same precision, rounding mode, and
-// accuracy as x, and returns z. x is not changed even if z and
-// x are the same.
-func (z *Float) Copy(x *Float) *Float {
- if debugFloat {
- x.validate()
- }
- if z != x {
- z.prec = x.prec
- z.mode = x.mode
- z.acc = x.acc
- z.form = x.form
- z.neg = x.neg
- if z.form == finite {
- z.mant = z.mant.set(x.mant)
- z.exp = x.exp
- }
- }
- return z
-}
-
-// msb32 returns the 32 most significant bits of x.
-func msb32(x nat) uint32 {
- i := len(x) - 1
- if i < 0 {
- return 0
- }
- if debugFloat && x[i]&(1<<(_W-1)) == 0 {
- panic("x not normalized")
- }
- switch _W {
- case 32:
- return uint32(x[i])
- case 64:
- return uint32(x[i] >> 32)
- }
- panic("unreachable")
-}
-
-// msb64 returns the 64 most significant bits of x.
-func msb64(x nat) uint64 {
- i := len(x) - 1
- if i < 0 {
- return 0
- }
- if debugFloat && x[i]&(1<<(_W-1)) == 0 {
- panic("x not normalized")
- }
- switch _W {
- case 32:
- v := uint64(x[i]) << 32
- if i > 0 {
- v |= uint64(x[i-1])
- }
- return v
- case 64:
- return uint64(x[i])
- }
- panic("unreachable")
-}
-
-// Uint64 returns the unsigned integer resulting from truncating x
-// towards zero. If 0 <= x <= math.MaxUint64, the result is Exact
-// if x is an integer and Below otherwise.
-// The result is (0, Above) for x < 0, and (math.MaxUint64, Below)
-// for x > math.MaxUint64.
-func (x *Float) Uint64() (uint64, Accuracy) {
- if debugFloat {
- x.validate()
- }
-
- switch x.form {
- case finite:
- if x.neg {
- return 0, Above
- }
- // 0 < x < +Inf
- if x.exp <= 0 {
- // 0 < x < 1
- return 0, Below
- }
- // 1 <= x < Inf
- if x.exp <= 64 {
- // u = trunc(x) fits into a uint64
- u := msb64(x.mant) >> (64 - uint32(x.exp))
- if x.MinPrec() <= 64 {
- return u, Exact
- }
- return u, Below // x truncated
- }
- // x too large
- return math.MaxUint64, Below
-
- case zero:
- return 0, Exact
-
- case inf:
- if x.neg {
- return 0, Above
- }
- return math.MaxUint64, Below
- }
-
- panic("unreachable")
-}
-
-// Int64 returns the integer resulting from truncating x towards zero.
-// If math.MinInt64 <= x <= math.MaxInt64, the result is Exact if x is
-// an integer, and Above (x < 0) or Below (x > 0) otherwise.
-// The result is (math.MinInt64, Above) for x < math.MinInt64,
-// and (math.MaxInt64, Below) for x > math.MaxInt64.
-func (x *Float) Int64() (int64, Accuracy) {
- if debugFloat {
- x.validate()
- }
-
- switch x.form {
- case finite:
- // 0 < |x| < +Inf
- acc := makeAcc(x.neg)
- if x.exp <= 0 {
- // 0 < |x| < 1
- return 0, acc
- }
- // x.exp > 0
-
- // 1 <= |x| < +Inf
- if x.exp <= 63 {
- // i = trunc(x) fits into an int64 (excluding math.MinInt64)
- i := int64(msb64(x.mant) >> (64 - uint32(x.exp)))
- if x.neg {
- i = -i
- }
- if x.MinPrec() <= uint(x.exp) {
- return i, Exact
- }
- return i, acc // x truncated
- }
- if x.neg {
- // check for special case x == math.MinInt64 (i.e., x == -(0.5 << 64))
- if x.exp == 64 && x.MinPrec() == 1 {
- acc = Exact
- }
- return math.MinInt64, acc
- }
- // x too large
- return math.MaxInt64, Below
-
- case zero:
- return 0, Exact
-
- case inf:
- if x.neg {
- return math.MinInt64, Above
- }
- return math.MaxInt64, Below
- }
-
- panic("unreachable")
-}
-
-// Float32 returns the float32 value nearest to x. If x is too small to be
-// represented by a float32 (|x| < math.SmallestNonzeroFloat32), the result
-// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
-// If x is too large to be represented by a float32 (|x| > math.MaxFloat32),
-// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
-func (x *Float) Float32() (float32, Accuracy) {
- if debugFloat {
- x.validate()
- }
-
- switch x.form {
- case finite:
- // 0 < |x| < +Inf
-
- const (
- fbits = 32 // float size
- mbits = 23 // mantissa size (excluding implicit msb)
- ebits = fbits - mbits - 1 // 8 exponent size
- bias = 1<<(ebits-1) - 1 // 127 exponent bias
- dmin = 1 - bias - mbits // -149 smallest unbiased exponent (denormal)
- emin = 1 - bias // -126 smallest unbiased exponent (normal)
- emax = bias // 127 largest unbiased exponent (normal)
- )
-
- // Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float32 mantissa.
- e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
-
- // Compute precision p for float32 mantissa.
- // If the exponent is too small, we have a denormal number before
- // rounding and fewer than p mantissa bits of precision available
- // (the exponent remains fixed but the mantissa gets shifted right).
- p := mbits + 1 // precision of normal float
- if e < emin {
- // recompute precision
- p = mbits + 1 - emin + int(e)
- // If p == 0, the mantissa of x is shifted so much to the right
- // that its msb falls immediately to the right of the float32
- // mantissa space. In other words, if the smallest denormal is
- // considered "1.0", for p == 0, the mantissa value m is >= 0.5.
- // If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
- // If m == 0.5, it is rounded down to even, i.e., 0.0.
- // If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
- if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
- // underflow to ±0
- if x.neg {
- var z float32
- return -z, Above
- }
- return 0.0, Below
- }
- // otherwise, round up
- // We handle p == 0 explicitly because it's easy and because
- // Float.round doesn't support rounding to 0 bits of precision.
- if p == 0 {
- if x.neg {
- return -math.SmallestNonzeroFloat32, Below
- }
- return math.SmallestNonzeroFloat32, Above
- }
- }
- // p > 0
-
- // round
- var r Float
- r.prec = uint32(p)
- r.Set(x)
- e = r.exp - 1
-
- // Rounding may have caused r to overflow to ±Inf
- // (rounding never causes underflows to 0).
- // If the exponent is too large, also overflow to ±Inf.
- if r.form == inf || e > emax {
- // overflow
- if x.neg {
- return float32(math.Inf(-1)), Below
- }
- return float32(math.Inf(+1)), Above
- }
- // e <= emax
-
- // Determine sign, biased exponent, and mantissa.
- var sign, bexp, mant uint32
- if x.neg {
- sign = 1 << (fbits - 1)
- }
-
- // Rounding may have caused a denormal number to
- // become normal. Check again.
- if e < emin {
- // denormal number: recompute precision
- // Since rounding may have at best increased precision
- // and we have eliminated p <= 0 early, we know p > 0.
- // bexp == 0 for denormals
- p = mbits + 1 - emin + int(e)
- mant = msb32(r.mant) >> uint(fbits-p)
- } else {
- // normal number: emin <= e <= emax
- bexp = uint32(e+bias) << mbits
- mant = msb32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
- }
-
- return math.Float32frombits(sign | bexp | mant), r.acc
-
- case zero:
- if x.neg {
- var z float32
- return -z, Exact
- }
- return 0.0, Exact
-
- case inf:
- if x.neg {
- return float32(math.Inf(-1)), Exact
- }
- return float32(math.Inf(+1)), Exact
- }
-
- panic("unreachable")
-}
-
-// Float64 returns the float64 value nearest to x. If x is too small to be
-// represented by a float64 (|x| < math.SmallestNonzeroFloat64), the result
-// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
-// If x is too large to be represented by a float64 (|x| > math.MaxFloat64),
-// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
-func (x *Float) Float64() (float64, Accuracy) {
- if debugFloat {
- x.validate()
- }
-
- switch x.form {
- case finite:
- // 0 < |x| < +Inf
-
- const (
- fbits = 64 // float size
- mbits = 52 // mantissa size (excluding implicit msb)
- ebits = fbits - mbits - 1 // 11 exponent size
- bias = 1<<(ebits-1) - 1 // 1023 exponent bias
- dmin = 1 - bias - mbits // -1074 smallest unbiased exponent (denormal)
- emin = 1 - bias // -1022 smallest unbiased exponent (normal)
- emax = bias // 1023 largest unbiased exponent (normal)
- )
-
- // Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float64 mantissa.
- e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
-
- // Compute precision p for float64 mantissa.
- // If the exponent is too small, we have a denormal number before
- // rounding and fewer than p mantissa bits of precision available
- // (the exponent remains fixed but the mantissa gets shifted right).
- p := mbits + 1 // precision of normal float
- if e < emin {
- // recompute precision
- p = mbits + 1 - emin + int(e)
- // If p == 0, the mantissa of x is shifted so much to the right
- // that its msb falls immediately to the right of the float64
- // mantissa space. In other words, if the smallest denormal is
- // considered "1.0", for p == 0, the mantissa value m is >= 0.5.
- // If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
- // If m == 0.5, it is rounded down to even, i.e., 0.0.
- // If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
- if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
- // underflow to ±0
- if x.neg {
- var z float64
- return -z, Above
- }
- return 0.0, Below
- }
- // otherwise, round up
- // We handle p == 0 explicitly because it's easy and because
- // Float.round doesn't support rounding to 0 bits of precision.
- if p == 0 {
- if x.neg {
- return -math.SmallestNonzeroFloat64, Below
- }
- return math.SmallestNonzeroFloat64, Above
- }
- }
- // p > 0
-
- // round
- var r Float
- r.prec = uint32(p)
- r.Set(x)
- e = r.exp - 1
-
- // Rounding may have caused r to overflow to ±Inf
- // (rounding never causes underflows to 0).
- // If the exponent is too large, also overflow to ±Inf.
- if r.form == inf || e > emax {
- // overflow
- if x.neg {
- return math.Inf(-1), Below
- }
- return math.Inf(+1), Above
- }
- // e <= emax
-
- // Determine sign, biased exponent, and mantissa.
- var sign, bexp, mant uint64
- if x.neg {
- sign = 1 << (fbits - 1)
- }
-
- // Rounding may have caused a denormal number to
- // become normal. Check again.
- if e < emin {
- // denormal number: recompute precision
- // Since rounding may have at best increased precision
- // and we have eliminated p <= 0 early, we know p > 0.
- // bexp == 0 for denormals
- p = mbits + 1 - emin + int(e)
- mant = msb64(r.mant) >> uint(fbits-p)
- } else {
- // normal number: emin <= e <= emax
- bexp = uint64(e+bias) << mbits
- mant = msb64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
- }
-
- return math.Float64frombits(sign | bexp | mant), r.acc
-
- case zero:
- if x.neg {
- var z float64
- return -z, Exact
- }
- return 0.0, Exact
-
- case inf:
- if x.neg {
- return math.Inf(-1), Exact
- }
- return math.Inf(+1), Exact
- }
-
- panic("unreachable")
-}
-
-// Int returns the result of truncating x towards zero;
-// or nil if x is an infinity.
-// The result is Exact if x.IsInt(); otherwise it is Below
-// for x > 0, and Above for x < 0.
-// If a non-nil *Int argument z is provided, Int stores
-// the result in z instead of allocating a new Int.
-func (x *Float) Int(z *Int) (*Int, Accuracy) {
- if debugFloat {
- x.validate()
- }
-
- if z == nil && x.form <= finite {
- z = new(Int)
- }
-
- switch x.form {
- case finite:
- // 0 < |x| < +Inf
- acc := makeAcc(x.neg)
- if x.exp <= 0 {
- // 0 < |x| < 1
- return z.SetInt64(0), acc
- }
- // x.exp > 0
-
- // 1 <= |x| < +Inf
- // determine minimum required precision for x
- allBits := uint(len(x.mant)) * _W
- exp := uint(x.exp)
- if x.MinPrec() <= exp {
- acc = Exact
- }
- // shift mantissa as needed
- if z == nil {
- z = new(Int)
- }
- z.neg = x.neg
- switch {
- case exp > allBits:
- z.abs = z.abs.shl(x.mant, exp-allBits)
- default:
- z.abs = z.abs.set(x.mant)
- case exp < allBits:
- z.abs = z.abs.shr(x.mant, allBits-exp)
- }
- return z, acc
-
- case zero:
- return z.SetInt64(0), Exact
-
- case inf:
- return nil, makeAcc(x.neg)
- }
-
- panic("unreachable")
-}
-
-// Rat returns the rational number corresponding to x;
-// or nil if x is an infinity.
-// The result is Exact if x is not an Inf.
-// If a non-nil *Rat argument z is provided, Rat stores
-// the result in z instead of allocating a new Rat.
-func (x *Float) Rat(z *Rat) (*Rat, Accuracy) {
- if debugFloat {
- x.validate()
- }
-
- if z == nil && x.form <= finite {
- z = new(Rat)
- }
-
- switch x.form {
- case finite:
- // 0 < |x| < +Inf
- allBits := int32(len(x.mant)) * _W
- // build up numerator and denominator
- z.a.neg = x.neg
- switch {
- case x.exp > allBits:
- z.a.abs = z.a.abs.shl(x.mant, uint(x.exp-allBits))
- z.b.abs = z.b.abs[:0] // == 1 (see Rat)
- // z already in normal form
- default:
- z.a.abs = z.a.abs.set(x.mant)
- z.b.abs = z.b.abs[:0] // == 1 (see Rat)
- // z already in normal form
- case x.exp < allBits:
- z.a.abs = z.a.abs.set(x.mant)
- t := z.b.abs.setUint64(1)
- z.b.abs = t.shl(t, uint(allBits-x.exp))
- z.norm()
- }
- return z, Exact
-
- case zero:
- return z.SetInt64(0), Exact
-
- case inf:
- return nil, makeAcc(x.neg)
- }
-
- panic("unreachable")
-}
-
-// Abs sets z to the (possibly rounded) value |x| (the absolute value of x)
-// and returns z.
-func (z *Float) Abs(x *Float) *Float {
- z.Set(x)
- z.neg = false
- return z
-}
-
-// Neg sets z to the (possibly rounded) value of x with its sign negated,
-// and returns z.
-func (z *Float) Neg(x *Float) *Float {
- z.Set(x)
- z.neg = !z.neg
- return z
-}
-
-func validateBinaryOperands(x, y *Float) {
- if !debugFloat {
- // avoid performance bugs
- panic("validateBinaryOperands called but debugFloat is not set")
- }
- if len(x.mant) == 0 {
- panic("empty mantissa for x")
- }
- if len(y.mant) == 0 {
- panic("empty mantissa for y")
- }
-}
-
-// z = x + y, ignoring signs of x and y for the addition
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) uadd(x, y *Float) {
- // Note: This implementation requires 2 shifts most of the
- // time. It is also inefficient if exponents or precisions
- // differ by wide margins. The following article describes
- // an efficient (but much more complicated) implementation
- // compatible with the internal representation used here:
- //
- // Vincent Lefèvre: "The Generic Multiple-Precision Floating-
- // Point Addition With Exact Rounding (as in the MPFR Library)"
- // http://www.vinc17.net/research/papers/rnc6.pdf
-
- if debugFloat {
- validateBinaryOperands(x, y)
- }
-
- // compute exponents ex, ey for mantissa with "binary point"
- // on the right (mantissa.0) - use int64 to avoid overflow
- ex := int64(x.exp) - int64(len(x.mant))*_W
- ey := int64(y.exp) - int64(len(y.mant))*_W
-
- al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
-
- // TODO(gri) having a combined add-and-shift primitive
- // could make this code significantly faster
- switch {
- case ex < ey:
- if al {
- t := nat(nil).shl(y.mant, uint(ey-ex))
- z.mant = z.mant.add(x.mant, t)
- } else {
- z.mant = z.mant.shl(y.mant, uint(ey-ex))
- z.mant = z.mant.add(x.mant, z.mant)
- }
- default:
- // ex == ey, no shift needed
- z.mant = z.mant.add(x.mant, y.mant)
- case ex > ey:
- if al {
- t := nat(nil).shl(x.mant, uint(ex-ey))
- z.mant = z.mant.add(t, y.mant)
- } else {
- z.mant = z.mant.shl(x.mant, uint(ex-ey))
- z.mant = z.mant.add(z.mant, y.mant)
- }
- ex = ey
- }
- // len(z.mant) > 0
-
- z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
-}
-
-// z = x - y for |x| > |y|, ignoring signs of x and y for the subtraction
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) usub(x, y *Float) {
- // This code is symmetric to uadd.
- // We have not factored the common code out because
- // eventually uadd (and usub) should be optimized
- // by special-casing, and the code will diverge.
-
- if debugFloat {
- validateBinaryOperands(x, y)
- }
-
- ex := int64(x.exp) - int64(len(x.mant))*_W
- ey := int64(y.exp) - int64(len(y.mant))*_W
-
- al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
-
- switch {
- case ex < ey:
- if al {
- t := nat(nil).shl(y.mant, uint(ey-ex))
- z.mant = t.sub(x.mant, t)
- } else {
- z.mant = z.mant.shl(y.mant, uint(ey-ex))
- z.mant = z.mant.sub(x.mant, z.mant)
- }
- default:
- // ex == ey, no shift needed
- z.mant = z.mant.sub(x.mant, y.mant)
- case ex > ey:
- if al {
- t := nat(nil).shl(x.mant, uint(ex-ey))
- z.mant = t.sub(t, y.mant)
- } else {
- z.mant = z.mant.shl(x.mant, uint(ex-ey))
- z.mant = z.mant.sub(z.mant, y.mant)
- }
- ex = ey
- }
-
- // operands may have canceled each other out
- if len(z.mant) == 0 {
- z.acc = Exact
- z.form = zero
- z.neg = false
- return
- }
- // len(z.mant) > 0
-
- z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
-}
-
-// z = x * y, ignoring signs of x and y for the multiplication
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) umul(x, y *Float) {
- if debugFloat {
- validateBinaryOperands(x, y)
- }
-
- // Note: This is doing too much work if the precision
- // of z is less than the sum of the precisions of x
- // and y which is often the case (e.g., if all floats
- // have the same precision).
- // TODO(gri) Optimize this for the common case.
-
- e := int64(x.exp) + int64(y.exp)
- if x == y {
- z.mant = z.mant.sqr(x.mant)
- } else {
- z.mant = z.mant.mul(x.mant, y.mant)
- }
- z.setExpAndRound(e-fnorm(z.mant), 0)
-}
-
-// z = x / y, ignoring signs of x and y for the division
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) uquo(x, y *Float) {
- if debugFloat {
- validateBinaryOperands(x, y)
- }
-
- // mantissa length in words for desired result precision + 1
- // (at least one extra bit so we get the rounding bit after
- // the division)
- n := int(z.prec/_W) + 1
-
- // compute adjusted x.mant such that we get enough result precision
- xadj := x.mant
- if d := n - len(x.mant) + len(y.mant); d > 0 {
- // d extra words needed => add d "0 digits" to x
- xadj = make(nat, len(x.mant)+d)
- copy(xadj[d:], x.mant)
- }
- // TODO(gri): If we have too many digits (d < 0), we should be able
- // to shorten x for faster division. But we must be extra careful
- // with rounding in that case.
-
- // Compute d before division since there may be aliasing of x.mant
- // (via xadj) or y.mant with z.mant.
- d := len(xadj) - len(y.mant)
-
- // divide
- var r nat
- z.mant, r = z.mant.div(nil, xadj, y.mant)
- e := int64(x.exp) - int64(y.exp) - int64(d-len(z.mant))*_W
-
- // The result is long enough to include (at least) the rounding bit.
- // If there's a non-zero remainder, the corresponding fractional part
- // (if it were computed), would have a non-zero sticky bit (if it were
- // zero, it couldn't have a non-zero remainder).
- var sbit uint
- if len(r) > 0 {
- sbit = 1
- }
-
- z.setExpAndRound(e-fnorm(z.mant), sbit)
-}
-
-// ucmp returns -1, 0, or +1, depending on whether
-// |x| < |y|, |x| == |y|, or |x| > |y|.
-// x and y must have a non-empty mantissa and valid exponent.
-func (x *Float) ucmp(y *Float) int {
- if debugFloat {
- validateBinaryOperands(x, y)
- }
-
- switch {
- case x.exp < y.exp:
- return -1
- case x.exp > y.exp:
- return +1
- }
- // x.exp == y.exp
-
- // compare mantissas
- i := len(x.mant)
- j := len(y.mant)
- for i > 0 || j > 0 {
- var xm, ym Word
- if i > 0 {
- i--
- xm = x.mant[i]
- }
- if j > 0 {
- j--
- ym = y.mant[j]
- }
- switch {
- case xm < ym:
- return -1
- case xm > ym:
- return +1
- }
- }
-
- return 0
-}
-
-// Handling of sign bit as defined by IEEE 754-2008, section 6.3:
-//
-// When neither the inputs nor result are NaN, the sign of a product or
-// quotient is the exclusive OR of the operands’ signs; the sign of a sum,
-// or of a difference x−y regarded as a sum x+(−y), differs from at most
-// one of the addends’ signs; and the sign of the result of conversions,
-// the quantize operation, the roundToIntegral operations, and the
-// roundToIntegralExact (see 5.3.1) is the sign of the first or only operand.
-// These rules shall apply even when operands or results are zero or infinite.
-//
-// When the sum of two operands with opposite signs (or the difference of
-// two operands with like signs) is exactly zero, the sign of that sum (or
-// difference) shall be +0 in all rounding-direction attributes except
-// roundTowardNegative; under that attribute, the sign of an exact zero
-// sum (or difference) shall be −0. However, x+x = x−(−x) retains the same
-// sign as x even when x is zero.
-//
-// See also: https://play.golang.org/p/RtH3UCt5IH
-
-// Add sets z to the rounded sum x+y and returns z. If z's precision is 0,
-// it is changed to the larger of x's or y's precision before the operation.
-// Rounding is performed according to z's precision and rounding mode; and
-// z's accuracy reports the result error relative to the exact (not rounded)
-// result. Add panics with ErrNaN if x and y are infinities with opposite
-// signs. The value of z is undefined in that case.
-func (z *Float) Add(x, y *Float) *Float {
- if debugFloat {
- x.validate()
- y.validate()
- }
-
- if z.prec == 0 {
- z.prec = umax32(x.prec, y.prec)
- }
-
- if x.form == finite && y.form == finite {
- // x + y (common case)
-
- // Below we set z.neg = x.neg, and when z aliases y this will
- // change the y operand's sign. This is fine, because if an
- // operand aliases the receiver it'll be overwritten, but we still
- // want the original x.neg and y.neg values when we evaluate
- // x.neg != y.neg, so we need to save y.neg before setting z.neg.
- yneg := y.neg
-
- z.neg = x.neg
- if x.neg == yneg {
- // x + y == x + y
- // (-x) + (-y) == -(x + y)
- z.uadd(x, y)
- } else {
- // x + (-y) == x - y == -(y - x)
- // (-x) + y == y - x == -(x - y)
- if x.ucmp(y) > 0 {
- z.usub(x, y)
- } else {
- z.neg = !z.neg
- z.usub(y, x)
- }
- }
- if z.form == zero && z.mode == ToNegativeInf && z.acc == Exact {
- z.neg = true
- }
- return z
- }
-
- if x.form == inf && y.form == inf && x.neg != y.neg {
- // +Inf + -Inf
- // -Inf + +Inf
- // value of z is undefined but make sure it's valid
- z.acc = Exact
- z.form = zero
- z.neg = false
- panic(ErrNaN{"addition of infinities with opposite signs"})
- }
-
- if x.form == zero && y.form == zero {
- // ±0 + ±0
- z.acc = Exact
- z.form = zero
- z.neg = x.neg && y.neg // -0 + -0 == -0
- return z
- }
-
- if x.form == inf || y.form == zero {
- // ±Inf + y
- // x + ±0
- return z.Set(x)
- }
-
- // ±0 + y
- // x + ±Inf
- return z.Set(y)
-}
-
-// Sub sets z to the rounded difference x-y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Sub panics with ErrNaN if x and y are infinities with equal
-// signs. The value of z is undefined in that case.
-func (z *Float) Sub(x, y *Float) *Float {
- if debugFloat {
- x.validate()
- y.validate()
- }
-
- if z.prec == 0 {
- z.prec = umax32(x.prec, y.prec)
- }
-
- if x.form == finite && y.form == finite {
- // x - y (common case)
- yneg := y.neg
- z.neg = x.neg
- if x.neg != yneg {
- // x - (-y) == x + y
- // (-x) - y == -(x + y)
- z.uadd(x, y)
- } else {
- // x - y == x - y == -(y - x)
- // (-x) - (-y) == y - x == -(x - y)
- if x.ucmp(y) > 0 {
- z.usub(x, y)
- } else {
- z.neg = !z.neg
- z.usub(y, x)
- }
- }
- if z.form == zero && z.mode == ToNegativeInf && z.acc == Exact {
- z.neg = true
- }
- return z
- }
-
- if x.form == inf && y.form == inf && x.neg == y.neg {
- // +Inf - +Inf
- // -Inf - -Inf
- // value of z is undefined but make sure it's valid
- z.acc = Exact
- z.form = zero
- z.neg = false
- panic(ErrNaN{"subtraction of infinities with equal signs"})
- }
-
- if x.form == zero && y.form == zero {
- // ±0 - ±0
- z.acc = Exact
- z.form = zero
- z.neg = x.neg && !y.neg // -0 - +0 == -0
- return z
- }
-
- if x.form == inf || y.form == zero {
- // ±Inf - y
- // x - ±0
- return z.Set(x)
- }
-
- // ±0 - y
- // x - ±Inf
- return z.Neg(y)
-}
-
-// Mul sets z to the rounded product x*y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Mul panics with ErrNaN if one operand is zero and the other
-// operand an infinity. The value of z is undefined in that case.
-func (z *Float) Mul(x, y *Float) *Float {
- if debugFloat {
- x.validate()
- y.validate()
- }
-
- if z.prec == 0 {
- z.prec = umax32(x.prec, y.prec)
- }
-
- z.neg = x.neg != y.neg
-
- if x.form == finite && y.form == finite {
- // x * y (common case)
- z.umul(x, y)
- return z
- }
-
- z.acc = Exact
- if x.form == zero && y.form == inf || x.form == inf && y.form == zero {
- // ±0 * ±Inf
- // ±Inf * ±0
- // value of z is undefined but make sure it's valid
- z.form = zero
- z.neg = false
- panic(ErrNaN{"multiplication of zero with infinity"})
- }
-
- if x.form == inf || y.form == inf {
- // ±Inf * y
- // x * ±Inf
- z.form = inf
- return z
- }
-
- // ±0 * y
- // x * ±0
- z.form = zero
- return z
-}
-
-// Quo sets z to the rounded quotient x/y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Quo panics with ErrNaN if both operands are zero or infinities.
-// The value of z is undefined in that case.
-func (z *Float) Quo(x, y *Float) *Float {
- if debugFloat {
- x.validate()
- y.validate()
- }
-
- if z.prec == 0 {
- z.prec = umax32(x.prec, y.prec)
- }
-
- z.neg = x.neg != y.neg
-
- if x.form == finite && y.form == finite {
- // x / y (common case)
- z.uquo(x, y)
- return z
- }
-
- z.acc = Exact
- if x.form == zero && y.form == zero || x.form == inf && y.form == inf {
- // ±0 / ±0
- // ±Inf / ±Inf
- // value of z is undefined but make sure it's valid
- z.form = zero
- z.neg = false
- panic(ErrNaN{"division of zero by zero or infinity by infinity"})
- }
-
- if x.form == zero || y.form == inf {
- // ±0 / y
- // x / ±Inf
- z.form = zero
- return z
- }
-
- // x / ±0
- // ±Inf / y
- z.form = inf
- return z
-}
-
-// Cmp compares x and y and returns:
-//
-// -1 if x < y
-// 0 if x == y (incl. -0 == 0, -Inf == -Inf, and +Inf == +Inf)
-// +1 if x > y
-//
-func (x *Float) Cmp(y *Float) int {
- if debugFloat {
- x.validate()
- y.validate()
- }
-
- mx := x.ord()
- my := y.ord()
- switch {
- case mx < my:
- return -1
- case mx > my:
- return +1
- }
- // mx == my
-
- // only if |mx| == 1 we have to compare the mantissae
- switch mx {
- case -1:
- return y.ucmp(x)
- case +1:
- return x.ucmp(y)
- }
-
- return 0
-}
-
-// ord classifies x and returns:
-//
-// -2 if -Inf == x
-// -1 if -Inf < x < 0
-// 0 if x == 0 (signed or unsigned)
-// +1 if 0 < x < +Inf
-// +2 if x == +Inf
-//
-func (x *Float) ord() int {
- var m int
- switch x.form {
- case finite:
- m = 1
- case zero:
- return 0
- case inf:
- m = 2
- }
- if x.neg {
- m = -m
- }
- return m
-}
-
-func umax32(x, y uint32) uint32 {
- if x > y {
- return x
- }
- return y
-}
diff --git a/contrib/go/_std_1.18/src/math/big/floatconv.go b/contrib/go/_std_1.18/src/math/big/floatconv.go
deleted file mode 100644
index 57b7df3936..0000000000
--- a/contrib/go/_std_1.18/src/math/big/floatconv.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements string-to-Float conversion functions.
-
-package big
-
-import (
- "fmt"
- "io"
- "strings"
-)
-
-var floatZero Float
-
-// SetString sets z to the value of s and returns z and a boolean indicating
-// success. s must be a floating-point number of the same format as accepted
-// by Parse, with base argument 0. The entire string (not just a prefix) must
-// be valid for success. If the operation failed, the value of z is undefined
-// but the returned value is nil.
-func (z *Float) SetString(s string) (*Float, bool) {
- if f, _, err := z.Parse(s, 0); err == nil {
- return f, true
- }
- return nil, false
-}
-
-// scan is like Parse but reads the longest possible prefix representing a valid
-// floating point number from an io.ByteScanner rather than a string. It serves
-// as the implementation of Parse. It does not recognize ±Inf and does not expect
-// EOF at the end.
-func (z *Float) scan(r io.ByteScanner, base int) (f *Float, b int, err error) {
- prec := z.prec
- if prec == 0 {
- prec = 64
- }
-
- // A reasonable value in case of an error.
- z.form = zero
-
- // sign
- z.neg, err = scanSign(r)
- if err != nil {
- return
- }
-
- // mantissa
- var fcount int // fractional digit count; valid if <= 0
- z.mant, b, fcount, err = z.mant.scan(r, base, true)
- if err != nil {
- return
- }
-
- // exponent
- var exp int64
- var ebase int
- exp, ebase, err = scanExponent(r, true, base == 0)
- if err != nil {
- return
- }
-
- // special-case 0
- if len(z.mant) == 0 {
- z.prec = prec
- z.acc = Exact
- z.form = zero
- f = z
- return
- }
- // len(z.mant) > 0
-
- // The mantissa may have a radix point (fcount <= 0) and there
- // may be a nonzero exponent exp. The radix point amounts to a
- // division by b**(-fcount). An exponent means multiplication by
- // ebase**exp. Finally, mantissa normalization (shift left) requires
- // a correcting multiplication by 2**(-shiftcount). Multiplications
- // are commutative, so we can apply them in any order as long as there
- // is no loss of precision. We only have powers of 2 and 10, and
- // we split powers of 10 into the product of the same powers of
- // 2 and 5. This reduces the size of the multiplication factor
- // needed for base-10 exponents.
-
- // normalize mantissa and determine initial exponent contributions
- exp2 := int64(len(z.mant))*_W - fnorm(z.mant)
- exp5 := int64(0)
-
- // determine binary or decimal exponent contribution of radix point
- if fcount < 0 {
- // The mantissa has a radix point ddd.dddd; and
- // -fcount is the number of digits to the right
- // of '.'. Adjust relevant exponent accordingly.
- d := int64(fcount)
- switch b {
- case 10:
- exp5 = d
- fallthrough // 10**e == 5**e * 2**e
- case 2:
- exp2 += d
- case 8:
- exp2 += d * 3 // octal digits are 3 bits each
- case 16:
- exp2 += d * 4 // hexadecimal digits are 4 bits each
- default:
- panic("unexpected mantissa base")
- }
- // fcount consumed - not needed anymore
- }
-
- // take actual exponent into account
- switch ebase {
- case 10:
- exp5 += exp
- fallthrough // see fallthrough above
- case 2:
- exp2 += exp
- default:
- panic("unexpected exponent base")
- }
- // exp consumed - not needed anymore
-
- // apply 2**exp2
- if MinExp <= exp2 && exp2 <= MaxExp {
- z.prec = prec
- z.form = finite
- z.exp = int32(exp2)
- f = z
- } else {
- err = fmt.Errorf("exponent overflow")
- return
- }
-
- if exp5 == 0 {
- // no decimal exponent contribution
- z.round(0)
- return
- }
- // exp5 != 0
-
- // apply 5**exp5
- p := new(Float).SetPrec(z.Prec() + 64) // use more bits for p -- TODO(gri) what is the right number?
- if exp5 < 0 {
- z.Quo(z, p.pow5(uint64(-exp5)))
- } else {
- z.Mul(z, p.pow5(uint64(exp5)))
- }
-
- return
-}
-
-// These powers of 5 fit into a uint64.
-//
-// for p, q := uint64(0), uint64(1); p < q; p, q = q, q*5 {
-// fmt.Println(q)
-// }
-//
-var pow5tab = [...]uint64{
- 1,
- 5,
- 25,
- 125,
- 625,
- 3125,
- 15625,
- 78125,
- 390625,
- 1953125,
- 9765625,
- 48828125,
- 244140625,
- 1220703125,
- 6103515625,
- 30517578125,
- 152587890625,
- 762939453125,
- 3814697265625,
- 19073486328125,
- 95367431640625,
- 476837158203125,
- 2384185791015625,
- 11920928955078125,
- 59604644775390625,
- 298023223876953125,
- 1490116119384765625,
- 7450580596923828125,
-}
-
-// pow5 sets z to 5**n and returns z.
-// n must not be negative.
-func (z *Float) pow5(n uint64) *Float {
- const m = uint64(len(pow5tab) - 1)
- if n <= m {
- return z.SetUint64(pow5tab[n])
- }
- // n > m
-
- z.SetUint64(pow5tab[m])
- n -= m
-
- // use more bits for f than for z
- // TODO(gri) what is the right number?
- f := new(Float).SetPrec(z.Prec() + 64).SetUint64(5)
-
- for n > 0 {
- if n&1 != 0 {
- z.Mul(z, f)
- }
- f.Mul(f, f)
- n >>= 1
- }
-
- return z
-}
-
-// Parse parses s which must contain a text representation of a floating-
-// point number with a mantissa in the given conversion base (the exponent
-// is always a decimal number), or a string representing an infinite value.
-//
-// For base 0, an underscore character ``_'' may appear between a base
-// prefix and an adjacent digit, and between successive digits; such
-// underscores do not change the value of the number, or the returned
-// digit count. Incorrect placement of underscores is reported as an
-// error if there are no other errors. If base != 0, underscores are
-// not recognized and thus terminate scanning like any other character
-// that is not a valid radix point or digit.
-//
-// It sets z to the (possibly rounded) value of the corresponding floating-
-// point value, and returns z, the actual base b, and an error err, if any.
-// The entire string (not just a prefix) must be consumed for success.
-// If z's precision is 0, it is changed to 64 before rounding takes effect.
-// The number must be of the form:
-//
-// number = [ sign ] ( float | "inf" | "Inf" ) .
-// sign = "+" | "-" .
-// float = ( mantissa | prefix pmantissa ) [ exponent ] .
-// prefix = "0" [ "b" | "B" | "o" | "O" | "x" | "X" ] .
-// mantissa = digits "." [ digits ] | digits | "." digits .
-// pmantissa = [ "_" ] digits "." [ digits ] | [ "_" ] digits | "." digits .
-// exponent = ( "e" | "E" | "p" | "P" ) [ sign ] digits .
-// digits = digit { [ "_" ] digit } .
-// digit = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
-//
-// The base argument must be 0, 2, 8, 10, or 16. Providing an invalid base
-// argument will lead to a run-time panic.
-//
-// For base 0, the number prefix determines the actual base: A prefix of
-// ``0b'' or ``0B'' selects base 2, ``0o'' or ``0O'' selects base 8, and
-// ``0x'' or ``0X'' selects base 16. Otherwise, the actual base is 10 and
-// no prefix is accepted. The octal prefix "0" is not supported (a leading
-// "0" is simply considered a "0").
-//
-// A "p" or "P" exponent indicates a base 2 (rather then base 10) exponent;
-// for instance, "0x1.fffffffffffffp1023" (using base 0) represents the
-// maximum float64 value. For hexadecimal mantissae, the exponent character
-// must be one of 'p' or 'P', if present (an "e" or "E" exponent indicator
-// cannot be distinguished from a mantissa digit).
-//
-// The returned *Float f is nil and the value of z is valid but not
-// defined if an error is reported.
-//
-func (z *Float) Parse(s string, base int) (f *Float, b int, err error) {
- // scan doesn't handle ±Inf
- if len(s) == 3 && (s == "Inf" || s == "inf") {
- f = z.SetInf(false)
- return
- }
- if len(s) == 4 && (s[0] == '+' || s[0] == '-') && (s[1:] == "Inf" || s[1:] == "inf") {
- f = z.SetInf(s[0] == '-')
- return
- }
-
- r := strings.NewReader(s)
- if f, b, err = z.scan(r, base); err != nil {
- return
- }
-
- // entire string must have been consumed
- if ch, err2 := r.ReadByte(); err2 == nil {
- err = fmt.Errorf("expected end of string, found %q", ch)
- } else if err2 != io.EOF {
- err = err2
- }
-
- return
-}
-
-// ParseFloat is like f.Parse(s, base) with f set to the given precision
-// and rounding mode.
-func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {
- return new(Float).SetPrec(prec).SetMode(mode).Parse(s, base)
-}
-
-var _ fmt.Scanner = (*Float)(nil) // *Float must implement fmt.Scanner
-
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
-// the scanned number. It accepts formats whose verbs are supported by
-// fmt.Scan for floating point values, which are:
-// 'b' (binary), 'e', 'E', 'f', 'F', 'g' and 'G'.
-// Scan doesn't handle ±Inf.
-func (z *Float) Scan(s fmt.ScanState, ch rune) error {
- s.SkipSpace()
- _, _, err := z.scan(byteReader{s}, 0)
- return err
-}
diff --git a/contrib/go/_std_1.18/src/math/big/floatmarsh.go b/contrib/go/_std_1.18/src/math/big/floatmarsh.go
deleted file mode 100644
index d1c1dab069..0000000000
--- a/contrib/go/_std_1.18/src/math/big/floatmarsh.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements encoding/decoding of Floats.
-
-package big
-
-import (
- "encoding/binary"
- "fmt"
-)
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const floatGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-// The Float value and all its attributes (precision,
-// rounding mode, accuracy) are marshaled.
-func (x *Float) GobEncode() ([]byte, error) {
- if x == nil {
- return nil, nil
- }
-
- // determine max. space (bytes) required for encoding
- sz := 1 + 1 + 4 // version + mode|acc|form|neg (3+2+2+1bit) + prec
- n := 0 // number of mantissa words
- if x.form == finite {
- // add space for mantissa and exponent
- n = int((x.prec + (_W - 1)) / _W) // required mantissa length in words for given precision
- // actual mantissa slice could be shorter (trailing 0's) or longer (unused bits):
- // - if shorter, only encode the words present
- // - if longer, cut off unused words when encoding in bytes
- // (in practice, this should never happen since rounding
- // takes care of it, but be safe and do it always)
- if len(x.mant) < n {
- n = len(x.mant)
- }
- // len(x.mant) >= n
- sz += 4 + n*_S // exp + mant
- }
- buf := make([]byte, sz)
-
- buf[0] = floatGobVersion
- b := byte(x.mode&7)<<5 | byte((x.acc+1)&3)<<3 | byte(x.form&3)<<1
- if x.neg {
- b |= 1
- }
- buf[1] = b
- binary.BigEndian.PutUint32(buf[2:], x.prec)
-
- if x.form == finite {
- binary.BigEndian.PutUint32(buf[6:], uint32(x.exp))
- x.mant[len(x.mant)-n:].bytes(buf[10:]) // cut off unused trailing words
- }
-
- return buf, nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-// The result is rounded per the precision and rounding mode of
-// z unless z's precision is 0, in which case z is set exactly
-// to the decoded value.
-func (z *Float) GobDecode(buf []byte) error {
- if len(buf) == 0 {
- // Other side sent a nil or default value.
- *z = Float{}
- return nil
- }
-
- if buf[0] != floatGobVersion {
- return fmt.Errorf("Float.GobDecode: encoding version %d not supported", buf[0])
- }
-
- oldPrec := z.prec
- oldMode := z.mode
-
- b := buf[1]
- z.mode = RoundingMode((b >> 5) & 7)
- z.acc = Accuracy((b>>3)&3) - 1
- z.form = form((b >> 1) & 3)
- z.neg = b&1 != 0
- z.prec = binary.BigEndian.Uint32(buf[2:])
-
- if z.form == finite {
- z.exp = int32(binary.BigEndian.Uint32(buf[6:]))
- z.mant = z.mant.setBytes(buf[10:])
- }
-
- if oldPrec != 0 {
- z.mode = oldMode
- z.SetPrec(uint(oldPrec))
- }
-
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// Only the Float value is marshaled (in full precision), other
-// attributes such as precision or accuracy are ignored.
-func (x *Float) MarshalText() (text []byte, err error) {
- if x == nil {
- return []byte("<nil>"), nil
- }
- var buf []byte
- return x.Append(buf, 'g', -1), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The result is rounded per the precision and rounding mode of z.
-// If z's precision is 0, it is changed to 64 before rounding takes
-// effect.
-func (z *Float) UnmarshalText(text []byte) error {
- // TODO(gri): get rid of the []byte/string conversion
- _, _, err := z.Parse(string(text), 0)
- if err != nil {
- err = fmt.Errorf("math/big: cannot unmarshal %q into a *big.Float (%v)", text, err)
- }
- return err
-}
diff --git a/contrib/go/_std_1.18/src/math/big/int.go b/contrib/go/_std_1.18/src/math/big/int.go
deleted file mode 100644
index 7647346486..0000000000
--- a/contrib/go/_std_1.18/src/math/big/int.go
+++ /dev/null
@@ -1,1218 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements signed multi-precision integers.
-
-package big
-
-import (
- "fmt"
- "io"
- "math/rand"
- "strings"
-)
-
-// An Int represents a signed multi-precision integer.
-// The zero value for an Int represents the value 0.
-//
-// Operations always take pointer arguments (*Int) rather
-// than Int values, and each unique Int value requires
-// its own unique *Int pointer. To "copy" an Int value,
-// an existing (or newly allocated) Int must be set to
-// a new value using the Int.Set method; shallow copies
-// of Ints are not supported and may lead to errors.
-type Int struct {
- neg bool // sign
- abs nat // absolute value of the integer
-}
-
-var intOne = &Int{false, natOne}
-
-// Sign returns:
-//
-// -1 if x < 0
-// 0 if x == 0
-// +1 if x > 0
-//
-func (x *Int) Sign() int {
- if len(x.abs) == 0 {
- return 0
- }
- if x.neg {
- return -1
- }
- return 1
-}
-
-// SetInt64 sets z to x and returns z.
-func (z *Int) SetInt64(x int64) *Int {
- neg := false
- if x < 0 {
- neg = true
- x = -x
- }
- z.abs = z.abs.setUint64(uint64(x))
- z.neg = neg
- return z
-}
-
-// SetUint64 sets z to x and returns z.
-func (z *Int) SetUint64(x uint64) *Int {
- z.abs = z.abs.setUint64(x)
- z.neg = false
- return z
-}
-
-// NewInt allocates and returns a new Int set to x.
-func NewInt(x int64) *Int {
- return new(Int).SetInt64(x)
-}
-
-// Set sets z to x and returns z.
-func (z *Int) Set(x *Int) *Int {
- if z != x {
- z.abs = z.abs.set(x.abs)
- z.neg = x.neg
- }
- return z
-}
-
-// Bits provides raw (unchecked but fast) access to x by returning its
-// absolute value as a little-endian Word slice. The result and x share
-// the same underlying array.
-// Bits is intended to support implementation of missing low-level Int
-// functionality outside this package; it should be avoided otherwise.
-func (x *Int) Bits() []Word {
- return x.abs
-}
-
-// SetBits provides raw (unchecked but fast) access to z by setting its
-// value to abs, interpreted as a little-endian Word slice, and returning
-// z. The result and abs share the same underlying array.
-// SetBits is intended to support implementation of missing low-level Int
-// functionality outside this package; it should be avoided otherwise.
-func (z *Int) SetBits(abs []Word) *Int {
- z.abs = nat(abs).norm()
- z.neg = false
- return z
-}
-
-// Abs sets z to |x| (the absolute value of x) and returns z.
-func (z *Int) Abs(x *Int) *Int {
- z.Set(x)
- z.neg = false
- return z
-}
-
-// Neg sets z to -x and returns z.
-func (z *Int) Neg(x *Int) *Int {
- z.Set(x)
- z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
- return z
-}
-
-// Add sets z to the sum x+y and returns z.
-func (z *Int) Add(x, y *Int) *Int {
- neg := x.neg
- if x.neg == y.neg {
- // x + y == x + y
- // (-x) + (-y) == -(x + y)
- z.abs = z.abs.add(x.abs, y.abs)
- } else {
- // x + (-y) == x - y == -(y - x)
- // (-x) + y == y - x == -(x - y)
- if x.abs.cmp(y.abs) >= 0 {
- z.abs = z.abs.sub(x.abs, y.abs)
- } else {
- neg = !neg
- z.abs = z.abs.sub(y.abs, x.abs)
- }
- }
- z.neg = len(z.abs) > 0 && neg // 0 has no sign
- return z
-}
-
-// Sub sets z to the difference x-y and returns z.
-func (z *Int) Sub(x, y *Int) *Int {
- neg := x.neg
- if x.neg != y.neg {
- // x - (-y) == x + y
- // (-x) - y == -(x + y)
- z.abs = z.abs.add(x.abs, y.abs)
- } else {
- // x - y == x - y == -(y - x)
- // (-x) - (-y) == y - x == -(x - y)
- if x.abs.cmp(y.abs) >= 0 {
- z.abs = z.abs.sub(x.abs, y.abs)
- } else {
- neg = !neg
- z.abs = z.abs.sub(y.abs, x.abs)
- }
- }
- z.neg = len(z.abs) > 0 && neg // 0 has no sign
- return z
-}
-
-// Mul sets z to the product x*y and returns z.
-func (z *Int) Mul(x, y *Int) *Int {
- // x * y == x * y
- // x * (-y) == -(x * y)
- // (-x) * y == -(x * y)
- // (-x) * (-y) == x * y
- if x == y {
- z.abs = z.abs.sqr(x.abs)
- z.neg = false
- return z
- }
- z.abs = z.abs.mul(x.abs, y.abs)
- z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
- return z
-}
-
-// MulRange sets z to the product of all integers
-// in the range [a, b] inclusively and returns z.
-// If a > b (empty range), the result is 1.
-func (z *Int) MulRange(a, b int64) *Int {
- switch {
- case a > b:
- return z.SetInt64(1) // empty range
- case a <= 0 && b >= 0:
- return z.SetInt64(0) // range includes 0
- }
- // a <= b && (b < 0 || a > 0)
-
- neg := false
- if a < 0 {
- neg = (b-a)&1 == 0
- a, b = -b, -a
- }
-
- z.abs = z.abs.mulRange(uint64(a), uint64(b))
- z.neg = neg
- return z
-}
-
-// Binomial sets z to the binomial coefficient of (n, k) and returns z.
-func (z *Int) Binomial(n, k int64) *Int {
- // reduce the number of multiplications by reducing k
- if n/2 < k && k <= n {
- k = n - k // Binomial(n, k) == Binomial(n, n-k)
- }
- var a, b Int
- a.MulRange(n-k+1, n)
- b.MulRange(1, k)
- return z.Quo(&a, &b)
-}
-
-// Quo sets z to the quotient x/y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Quo implements truncated division (like Go); see QuoRem for more details.
-func (z *Int) Quo(x, y *Int) *Int {
- z.abs, _ = z.abs.div(nil, x.abs, y.abs)
- z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
- return z
-}
-
-// Rem sets z to the remainder x%y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Rem implements truncated modulus (like Go); see QuoRem for more details.
-func (z *Int) Rem(x, y *Int) *Int {
- _, z.abs = nat(nil).div(z.abs, x.abs, y.abs)
- z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
- return z
-}
-
-// QuoRem sets z to the quotient x/y and r to the remainder x%y
-// and returns the pair (z, r) for y != 0.
-// If y == 0, a division-by-zero run-time panic occurs.
-//
-// QuoRem implements T-division and modulus (like Go):
-//
-// q = x/y with the result truncated to zero
-// r = x - y*q
-//
-// (See Daan Leijen, ``Division and Modulus for Computer Scientists''.)
-// See DivMod for Euclidean division and modulus (unlike Go).
-//
-func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
- z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
- z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
- return z, r
-}
-
-// Div sets z to the quotient x/y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Div implements Euclidean division (unlike Go); see DivMod for more details.
-func (z *Int) Div(x, y *Int) *Int {
- y_neg := y.neg // z may be an alias for y
- var r Int
- z.QuoRem(x, y, &r)
- if r.neg {
- if y_neg {
- z.Add(z, intOne)
- } else {
- z.Sub(z, intOne)
- }
- }
- return z
-}
-
-// Mod sets z to the modulus x%y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
-func (z *Int) Mod(x, y *Int) *Int {
- y0 := y // save y
- if z == y || alias(z.abs, y.abs) {
- y0 = new(Int).Set(y)
- }
- var q Int
- q.QuoRem(x, y, z)
- if z.neg {
- if y0.neg {
- z.Sub(z, y0)
- } else {
- z.Add(z, y0)
- }
- }
- return z
-}
-
-// DivMod sets z to the quotient x div y and m to the modulus x mod y
-// and returns the pair (z, m) for y != 0.
-// If y == 0, a division-by-zero run-time panic occurs.
-//
-// DivMod implements Euclidean division and modulus (unlike Go):
-//
-// q = x div y such that
-// m = x - y*q with 0 <= m < |y|
-//
-// (See Raymond T. Boute, ``The Euclidean definition of the functions
-// div and mod''. ACM Transactions on Programming Languages and
-// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
-// ACM press.)
-// See QuoRem for T-division and modulus (like Go).
-//
-func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
- y0 := y // save y
- if z == y || alias(z.abs, y.abs) {
- y0 = new(Int).Set(y)
- }
- z.QuoRem(x, y, m)
- if m.neg {
- if y0.neg {
- z.Add(z, intOne)
- m.Sub(m, y0)
- } else {
- z.Sub(z, intOne)
- m.Add(m, y0)
- }
- }
- return z, m
-}
-
-// Cmp compares x and y and returns:
-//
-// -1 if x < y
-// 0 if x == y
-// +1 if x > y
-//
-func (x *Int) Cmp(y *Int) (r int) {
- // x cmp y == x cmp y
- // x cmp (-y) == x
- // (-x) cmp y == y
- // (-x) cmp (-y) == -(x cmp y)
- switch {
- case x == y:
- // nothing to do
- case x.neg == y.neg:
- r = x.abs.cmp(y.abs)
- if x.neg {
- r = -r
- }
- case x.neg:
- r = -1
- default:
- r = 1
- }
- return
-}
-
-// CmpAbs compares the absolute values of x and y and returns:
-//
-// -1 if |x| < |y|
-// 0 if |x| == |y|
-// +1 if |x| > |y|
-//
-func (x *Int) CmpAbs(y *Int) int {
- return x.abs.cmp(y.abs)
-}
-
-// low32 returns the least significant 32 bits of x.
-func low32(x nat) uint32 {
- if len(x) == 0 {
- return 0
- }
- return uint32(x[0])
-}
-
-// low64 returns the least significant 64 bits of x.
-func low64(x nat) uint64 {
- if len(x) == 0 {
- return 0
- }
- v := uint64(x[0])
- if _W == 32 && len(x) > 1 {
- return uint64(x[1])<<32 | v
- }
- return v
-}
-
-// Int64 returns the int64 representation of x.
-// If x cannot be represented in an int64, the result is undefined.
-func (x *Int) Int64() int64 {
- v := int64(low64(x.abs))
- if x.neg {
- v = -v
- }
- return v
-}
-
-// Uint64 returns the uint64 representation of x.
-// If x cannot be represented in a uint64, the result is undefined.
-func (x *Int) Uint64() uint64 {
- return low64(x.abs)
-}
-
-// IsInt64 reports whether x can be represented as an int64.
-func (x *Int) IsInt64() bool {
- if len(x.abs) <= 64/_W {
- w := int64(low64(x.abs))
- return w >= 0 || x.neg && w == -w
- }
- return false
-}
-
-// IsUint64 reports whether x can be represented as a uint64.
-func (x *Int) IsUint64() bool {
- return !x.neg && len(x.abs) <= 64/_W
-}
-
-// SetString sets z to the value of s, interpreted in the given base,
-// and returns z and a boolean indicating success. The entire string
-// (not just a prefix) must be valid for success. If SetString fails,
-// the value of z is undefined but the returned value is nil.
-//
-// The base argument must be 0 or a value between 2 and MaxBase.
-// For base 0, the number prefix determines the actual base: A prefix of
-// ``0b'' or ``0B'' selects base 2, ``0'', ``0o'' or ``0O'' selects base 8,
-// and ``0x'' or ``0X'' selects base 16. Otherwise, the selected base is 10
-// and no prefix is accepted.
-//
-// For bases <= 36, lower and upper case letters are considered the same:
-// The letters 'a' to 'z' and 'A' to 'Z' represent digit values 10 to 35.
-// For bases > 36, the upper case letters 'A' to 'Z' represent the digit
-// values 36 to 61.
-//
-// For base 0, an underscore character ``_'' may appear between a base
-// prefix and an adjacent digit, and between successive digits; such
-// underscores do not change the value of the number.
-// Incorrect placement of underscores is reported as an error if there
-// are no other errors. If base != 0, underscores are not recognized
-// and act like any other character that is not a valid digit.
-//
-func (z *Int) SetString(s string, base int) (*Int, bool) {
- return z.setFromScanner(strings.NewReader(s), base)
-}
-
-// setFromScanner implements SetString given an io.ByteScanner.
-// For documentation see comments of SetString.
-func (z *Int) setFromScanner(r io.ByteScanner, base int) (*Int, bool) {
- if _, _, err := z.scan(r, base); err != nil {
- return nil, false
- }
- // entire content must have been consumed
- if _, err := r.ReadByte(); err != io.EOF {
- return nil, false
- }
- return z, true // err == io.EOF => scan consumed all content of r
-}
-
-// SetBytes interprets buf as the bytes of a big-endian unsigned
-// integer, sets z to that value, and returns z.
-func (z *Int) SetBytes(buf []byte) *Int {
- z.abs = z.abs.setBytes(buf)
- z.neg = false
- return z
-}
-
-// Bytes returns the absolute value of x as a big-endian byte slice.
-//
-// To use a fixed length slice, or a preallocated one, use FillBytes.
-func (x *Int) Bytes() []byte {
- buf := make([]byte, len(x.abs)*_S)
- return buf[x.abs.bytes(buf):]
-}
-
-// FillBytes sets buf to the absolute value of x, storing it as a zero-extended
-// big-endian byte slice, and returns buf.
-//
-// If the absolute value of x doesn't fit in buf, FillBytes will panic.
-func (x *Int) FillBytes(buf []byte) []byte {
- // Clear whole buffer. (This gets optimized into a memclr.)
- for i := range buf {
- buf[i] = 0
- }
- x.abs.bytes(buf)
- return buf
-}
-
-// BitLen returns the length of the absolute value of x in bits.
-// The bit length of 0 is 0.
-func (x *Int) BitLen() int {
- return x.abs.bitLen()
-}
-
-// TrailingZeroBits returns the number of consecutive least significant zero
-// bits of |x|.
-func (x *Int) TrailingZeroBits() uint {
- return x.abs.trailingZeroBits()
-}
-
-// Exp sets z = x**y mod |m| (i.e. the sign of m is ignored), and returns z.
-// If m == nil or m == 0, z = x**y unless y <= 0 then z = 1. If m != 0, y < 0,
-// and x and m are not relatively prime, z is unchanged and nil is returned.
-//
-// Modular exponentiation of inputs of a particular size is not a
-// cryptographically constant-time operation.
-func (z *Int) Exp(x, y, m *Int) *Int {
- // See Knuth, volume 2, section 4.6.3.
- xWords := x.abs
- if y.neg {
- if m == nil || len(m.abs) == 0 {
- return z.SetInt64(1)
- }
- // for y < 0: x**y mod m == (x**(-1))**|y| mod m
- inverse := new(Int).ModInverse(x, m)
- if inverse == nil {
- return nil
- }
- xWords = inverse.abs
- }
- yWords := y.abs
-
- var mWords nat
- if m != nil {
- mWords = m.abs // m.abs may be nil for m == 0
- }
-
- z.abs = z.abs.expNN(xWords, yWords, mWords)
- z.neg = len(z.abs) > 0 && x.neg && len(yWords) > 0 && yWords[0]&1 == 1 // 0 has no sign
- if z.neg && len(mWords) > 0 {
- // make modulus result positive
- z.abs = z.abs.sub(mWords, z.abs) // z == x**y mod |m| && 0 <= z < |m|
- z.neg = false
- }
-
- return z
-}
-
-// GCD sets z to the greatest common divisor of a and b and returns z.
-// If x or y are not nil, GCD sets their value such that z = a*x + b*y.
-//
-// a and b may be positive, zero or negative. (Before Go 1.14 both had
-// to be > 0.) Regardless of the signs of a and b, z is always >= 0.
-//
-// If a == b == 0, GCD sets z = x = y = 0.
-//
-// If a == 0 and b != 0, GCD sets z = |b|, x = 0, y = sign(b) * 1.
-//
-// If a != 0 and b == 0, GCD sets z = |a|, x = sign(a) * 1, y = 0.
-func (z *Int) GCD(x, y, a, b *Int) *Int {
- if len(a.abs) == 0 || len(b.abs) == 0 {
- lenA, lenB, negA, negB := len(a.abs), len(b.abs), a.neg, b.neg
- if lenA == 0 {
- z.Set(b)
- } else {
- z.Set(a)
- }
- z.neg = false
- if x != nil {
- if lenA == 0 {
- x.SetUint64(0)
- } else {
- x.SetUint64(1)
- x.neg = negA
- }
- }
- if y != nil {
- if lenB == 0 {
- y.SetUint64(0)
- } else {
- y.SetUint64(1)
- y.neg = negB
- }
- }
- return z
- }
-
- return z.lehmerGCD(x, y, a, b)
-}
-
-// lehmerSimulate attempts to simulate several Euclidean update steps
-// using the leading digits of A and B. It returns u0, u1, v0, v1
-// such that A and B can be updated as:
-// A = u0*A + v0*B
-// B = u1*A + v1*B
-// Requirements: A >= B and len(B.abs) >= 2
-// Since we are calculating with full words to avoid overflow,
-// we use 'even' to track the sign of the cosequences.
-// For even iterations: u0, v1 >= 0 && u1, v0 <= 0
-// For odd iterations: u0, v1 <= 0 && u1, v0 >= 0
-func lehmerSimulate(A, B *Int) (u0, u1, v0, v1 Word, even bool) {
- // initialize the digits
- var a1, a2, u2, v2 Word
-
- m := len(B.abs) // m >= 2
- n := len(A.abs) // n >= m >= 2
-
- // extract the top Word of bits from A and B
- h := nlz(A.abs[n-1])
- a1 = A.abs[n-1]<<h | A.abs[n-2]>>(_W-h)
- // B may have implicit zero words in the high bits if the lengths differ
- switch {
- case n == m:
- a2 = B.abs[n-1]<<h | B.abs[n-2]>>(_W-h)
- case n == m+1:
- a2 = B.abs[n-2] >> (_W - h)
- default:
- a2 = 0
- }
-
- // Since we are calculating with full words to avoid overflow,
- // we use 'even' to track the sign of the cosequences.
- // For even iterations: u0, v1 >= 0 && u1, v0 <= 0
- // For odd iterations: u0, v1 <= 0 && u1, v0 >= 0
- // The first iteration starts with k=1 (odd).
- even = false
- // variables to track the cosequences
- u0, u1, u2 = 0, 1, 0
- v0, v1, v2 = 0, 0, 1
-
- // Calculate the quotient and cosequences using Collins' stopping condition.
- // Note that overflow of a Word is not possible when computing the remainder
- // sequence and cosequences since the cosequence size is bounded by the input size.
- // See section 4.2 of Jebelean for details.
- for a2 >= v2 && a1-a2 >= v1+v2 {
- q, r := a1/a2, a1%a2
- a1, a2 = a2, r
- u0, u1, u2 = u1, u2, u1+q*u2
- v0, v1, v2 = v1, v2, v1+q*v2
- even = !even
- }
- return
-}
-
-// lehmerUpdate updates the inputs A and B such that:
-// A = u0*A + v0*B
-// B = u1*A + v1*B
-// where the signs of u0, u1, v0, v1 are given by even
-// For even == true: u0, v1 >= 0 && u1, v0 <= 0
-// For even == false: u0, v1 <= 0 && u1, v0 >= 0
-// q, r, s, t are temporary variables to avoid allocations in the multiplication
-func lehmerUpdate(A, B, q, r, s, t *Int, u0, u1, v0, v1 Word, even bool) {
-
- t.abs = t.abs.setWord(u0)
- s.abs = s.abs.setWord(v0)
- t.neg = !even
- s.neg = even
-
- t.Mul(A, t)
- s.Mul(B, s)
-
- r.abs = r.abs.setWord(u1)
- q.abs = q.abs.setWord(v1)
- r.neg = even
- q.neg = !even
-
- r.Mul(A, r)
- q.Mul(B, q)
-
- A.Add(t, s)
- B.Add(r, q)
-}
-
-// euclidUpdate performs a single step of the Euclidean GCD algorithm
-// if extended is true, it also updates the cosequence Ua, Ub
-func euclidUpdate(A, B, Ua, Ub, q, r, s, t *Int, extended bool) {
- q, r = q.QuoRem(A, B, r)
-
- *A, *B, *r = *B, *r, *A
-
- if extended {
- // Ua, Ub = Ub, Ua - q*Ub
- t.Set(Ub)
- s.Mul(Ub, q)
- Ub.Sub(Ua, s)
- Ua.Set(t)
- }
-}
-
-// lehmerGCD sets z to the greatest common divisor of a and b,
-// which both must be != 0, and returns z.
-// If x or y are not nil, their values are set such that z = a*x + b*y.
-// See Knuth, The Art of Computer Programming, Vol. 2, Section 4.5.2, Algorithm L.
-// This implementation uses the improved condition by Collins requiring only one
-// quotient and avoiding the possibility of single Word overflow.
-// See Jebelean, "Improving the multiprecision Euclidean algorithm",
-// Design and Implementation of Symbolic Computation Systems, pp 45-58.
-// The cosequences are updated according to Algorithm 10.45 from
-// Cohen et al. "Handbook of Elliptic and Hyperelliptic Curve Cryptography" pp 192.
-func (z *Int) lehmerGCD(x, y, a, b *Int) *Int {
- var A, B, Ua, Ub *Int
-
- A = new(Int).Abs(a)
- B = new(Int).Abs(b)
-
- extended := x != nil || y != nil
-
- if extended {
- // Ua (Ub) tracks how many times input a has been accumulated into A (B).
- Ua = new(Int).SetInt64(1)
- Ub = new(Int)
- }
-
- // temp variables for multiprecision update
- q := new(Int)
- r := new(Int)
- s := new(Int)
- t := new(Int)
-
- // ensure A >= B
- if A.abs.cmp(B.abs) < 0 {
- A, B = B, A
- Ub, Ua = Ua, Ub
- }
-
- // loop invariant A >= B
- for len(B.abs) > 1 {
- // Attempt to calculate in single-precision using leading words of A and B.
- u0, u1, v0, v1, even := lehmerSimulate(A, B)
-
- // multiprecision Step
- if v0 != 0 {
- // Simulate the effect of the single-precision steps using the cosequences.
- // A = u0*A + v0*B
- // B = u1*A + v1*B
- lehmerUpdate(A, B, q, r, s, t, u0, u1, v0, v1, even)
-
- if extended {
- // Ua = u0*Ua + v0*Ub
- // Ub = u1*Ua + v1*Ub
- lehmerUpdate(Ua, Ub, q, r, s, t, u0, u1, v0, v1, even)
- }
-
- } else {
- // Single-digit calculations failed to simulate any quotients.
- // Do a standard Euclidean step.
- euclidUpdate(A, B, Ua, Ub, q, r, s, t, extended)
- }
- }
-
- if len(B.abs) > 0 {
- // extended Euclidean algorithm base case if B is a single Word
- if len(A.abs) > 1 {
- // A is longer than a single Word, so one update is needed.
- euclidUpdate(A, B, Ua, Ub, q, r, s, t, extended)
- }
- if len(B.abs) > 0 {
- // A and B are both a single Word.
- aWord, bWord := A.abs[0], B.abs[0]
- if extended {
- var ua, ub, va, vb Word
- ua, ub = 1, 0
- va, vb = 0, 1
- even := true
- for bWord != 0 {
- q, r := aWord/bWord, aWord%bWord
- aWord, bWord = bWord, r
- ua, ub = ub, ua+q*ub
- va, vb = vb, va+q*vb
- even = !even
- }
-
- t.abs = t.abs.setWord(ua)
- s.abs = s.abs.setWord(va)
- t.neg = !even
- s.neg = even
-
- t.Mul(Ua, t)
- s.Mul(Ub, s)
-
- Ua.Add(t, s)
- } else {
- for bWord != 0 {
- aWord, bWord = bWord, aWord%bWord
- }
- }
- A.abs[0] = aWord
- }
- }
- negA := a.neg
- if y != nil {
- // avoid aliasing b needed in the division below
- if y == b {
- B.Set(b)
- } else {
- B = b
- }
- // y = (z - a*x)/b
- y.Mul(a, Ua) // y can safely alias a
- if negA {
- y.neg = !y.neg
- }
- y.Sub(A, y)
- y.Div(y, B)
- }
-
- if x != nil {
- *x = *Ua
- if negA {
- x.neg = !x.neg
- }
- }
-
- *z = *A
-
- return z
-}
-
-// Rand sets z to a pseudo-random number in [0, n) and returns z.
-//
-// As this uses the math/rand package, it must not be used for
-// security-sensitive work. Use crypto/rand.Int instead.
-func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
- z.neg = false
- if n.neg || len(n.abs) == 0 {
- z.abs = nil
- return z
- }
- z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
- return z
-}
-
-// ModInverse sets z to the multiplicative inverse of g in the ring ℤ/nℤ
-// and returns z. If g and n are not relatively prime, g has no multiplicative
-// inverse in the ring ℤ/nℤ. In this case, z is unchanged and the return value
-// is nil.
-func (z *Int) ModInverse(g, n *Int) *Int {
- // GCD expects parameters a and b to be > 0.
- if n.neg {
- var n2 Int
- n = n2.Neg(n)
- }
- if g.neg {
- var g2 Int
- g = g2.Mod(g, n)
- }
- var d, x Int
- d.GCD(&x, nil, g, n)
-
- // if and only if d==1, g and n are relatively prime
- if d.Cmp(intOne) != 0 {
- return nil
- }
-
- // x and y are such that g*x + n*y = 1, therefore x is the inverse element,
- // but it may be negative, so convert to the range 0 <= z < |n|
- if x.neg {
- z.Add(&x, n)
- } else {
- z.Set(&x)
- }
- return z
-}
-
-// Jacobi returns the Jacobi symbol (x/y), either +1, -1, or 0.
-// The y argument must be an odd integer.
-func Jacobi(x, y *Int) int {
- if len(y.abs) == 0 || y.abs[0]&1 == 0 {
- panic(fmt.Sprintf("big: invalid 2nd argument to Int.Jacobi: need odd integer but got %s", y))
- }
-
- // We use the formulation described in chapter 2, section 2.4,
- // "The Yacas Book of Algorithms":
- // http://yacas.sourceforge.net/Algo.book.pdf
-
- var a, b, c Int
- a.Set(x)
- b.Set(y)
- j := 1
-
- if b.neg {
- if a.neg {
- j = -1
- }
- b.neg = false
- }
-
- for {
- if b.Cmp(intOne) == 0 {
- return j
- }
- if len(a.abs) == 0 {
- return 0
- }
- a.Mod(&a, &b)
- if len(a.abs) == 0 {
- return 0
- }
- // a > 0
-
- // handle factors of 2 in 'a'
- s := a.abs.trailingZeroBits()
- if s&1 != 0 {
- bmod8 := b.abs[0] & 7
- if bmod8 == 3 || bmod8 == 5 {
- j = -j
- }
- }
- c.Rsh(&a, s) // a = 2^s*c
-
- // swap numerator and denominator
- if b.abs[0]&3 == 3 && c.abs[0]&3 == 3 {
- j = -j
- }
- a.Set(&b)
- b.Set(&c)
- }
-}
-
-// modSqrt3Mod4 uses the identity
-// (a^((p+1)/4))^2 mod p
-// == u^(p+1) mod p
-// == u^2 mod p
-// to calculate the square root of any quadratic residue mod p quickly for 3
-// mod 4 primes.
-func (z *Int) modSqrt3Mod4Prime(x, p *Int) *Int {
- e := new(Int).Add(p, intOne) // e = p + 1
- e.Rsh(e, 2) // e = (p + 1) / 4
- z.Exp(x, e, p) // z = x^e mod p
- return z
-}
-
-// modSqrt5Mod8 uses Atkin's observation that 2 is not a square mod p
-// alpha == (2*a)^((p-5)/8) mod p
-// beta == 2*a*alpha^2 mod p is a square root of -1
-// b == a*alpha*(beta-1) mod p is a square root of a
-// to calculate the square root of any quadratic residue mod p quickly for 5
-// mod 8 primes.
-func (z *Int) modSqrt5Mod8Prime(x, p *Int) *Int {
- // p == 5 mod 8 implies p = e*8 + 5
- // e is the quotient and 5 the remainder on division by 8
- e := new(Int).Rsh(p, 3) // e = (p - 5) / 8
- tx := new(Int).Lsh(x, 1) // tx = 2*x
- alpha := new(Int).Exp(tx, e, p)
- beta := new(Int).Mul(alpha, alpha)
- beta.Mod(beta, p)
- beta.Mul(beta, tx)
- beta.Mod(beta, p)
- beta.Sub(beta, intOne)
- beta.Mul(beta, x)
- beta.Mod(beta, p)
- beta.Mul(beta, alpha)
- z.Mod(beta, p)
- return z
-}
-
-// modSqrtTonelliShanks uses the Tonelli-Shanks algorithm to find the square
-// root of a quadratic residue modulo any prime.
-func (z *Int) modSqrtTonelliShanks(x, p *Int) *Int {
- // Break p-1 into s*2^e such that s is odd.
- var s Int
- s.Sub(p, intOne)
- e := s.abs.trailingZeroBits()
- s.Rsh(&s, e)
-
- // find some non-square n
- var n Int
- n.SetInt64(2)
- for Jacobi(&n, p) != -1 {
- n.Add(&n, intOne)
- }
-
- // Core of the Tonelli-Shanks algorithm. Follows the description in
- // section 6 of "Square roots from 1; 24, 51, 10 to Dan Shanks" by Ezra
- // Brown:
- // https://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020786.02p0470a.pdf
- var y, b, g, t Int
- y.Add(&s, intOne)
- y.Rsh(&y, 1)
- y.Exp(x, &y, p) // y = x^((s+1)/2)
- b.Exp(x, &s, p) // b = x^s
- g.Exp(&n, &s, p) // g = n^s
- r := e
- for {
- // find the least m such that ord_p(b) = 2^m
- var m uint
- t.Set(&b)
- for t.Cmp(intOne) != 0 {
- t.Mul(&t, &t).Mod(&t, p)
- m++
- }
-
- if m == 0 {
- return z.Set(&y)
- }
-
- t.SetInt64(0).SetBit(&t, int(r-m-1), 1).Exp(&g, &t, p)
- // t = g^(2^(r-m-1)) mod p
- g.Mul(&t, &t).Mod(&g, p) // g = g^(2^(r-m)) mod p
- y.Mul(&y, &t).Mod(&y, p)
- b.Mul(&b, &g).Mod(&b, p)
- r = m
- }
-}
-
-// ModSqrt sets z to a square root of x mod p if such a square root exists, and
-// returns z. The modulus p must be an odd prime. If x is not a square mod p,
-// ModSqrt leaves z unchanged and returns nil. This function panics if p is
-// not an odd integer.
-func (z *Int) ModSqrt(x, p *Int) *Int {
- switch Jacobi(x, p) {
- case -1:
- return nil // x is not a square mod p
- case 0:
- return z.SetInt64(0) // sqrt(0) mod p = 0
- case 1:
- break
- }
- if x.neg || x.Cmp(p) >= 0 { // ensure 0 <= x < p
- x = new(Int).Mod(x, p)
- }
-
- switch {
- case p.abs[0]%4 == 3:
- // Check whether p is 3 mod 4, and if so, use the faster algorithm.
- return z.modSqrt3Mod4Prime(x, p)
- case p.abs[0]%8 == 5:
- // Check whether p is 5 mod 8, use Atkin's algorithm.
- return z.modSqrt5Mod8Prime(x, p)
- default:
- // Otherwise, use Tonelli-Shanks.
- return z.modSqrtTonelliShanks(x, p)
- }
-}
-
-// Lsh sets z = x << n and returns z.
-func (z *Int) Lsh(x *Int, n uint) *Int {
- z.abs = z.abs.shl(x.abs, n)
- z.neg = x.neg
- return z
-}
-
-// Rsh sets z = x >> n and returns z.
-func (z *Int) Rsh(x *Int, n uint) *Int {
- if x.neg {
- // (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
- t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
- t = t.shr(t, n)
- z.abs = t.add(t, natOne)
- z.neg = true // z cannot be zero if x is negative
- return z
- }
-
- z.abs = z.abs.shr(x.abs, n)
- z.neg = false
- return z
-}
-
-// Bit returns the value of the i'th bit of x. That is, it
-// returns (x>>i)&1. The bit index i must be >= 0.
-func (x *Int) Bit(i int) uint {
- if i == 0 {
- // optimization for common case: odd/even test of x
- if len(x.abs) > 0 {
- return uint(x.abs[0] & 1) // bit 0 is same for -x
- }
- return 0
- }
- if i < 0 {
- panic("negative bit index")
- }
- if x.neg {
- t := nat(nil).sub(x.abs, natOne)
- return t.bit(uint(i)) ^ 1
- }
-
- return x.abs.bit(uint(i))
-}
-
-// SetBit sets z to x, with x's i'th bit set to b (0 or 1).
-// That is, if b is 1 SetBit sets z = x | (1 << i);
-// if b is 0 SetBit sets z = x &^ (1 << i). If b is not 0 or 1,
-// SetBit will panic.
-func (z *Int) SetBit(x *Int, i int, b uint) *Int {
- if i < 0 {
- panic("negative bit index")
- }
- if x.neg {
- t := z.abs.sub(x.abs, natOne)
- t = t.setBit(t, uint(i), b^1)
- z.abs = t.add(t, natOne)
- z.neg = len(z.abs) > 0
- return z
- }
- z.abs = z.abs.setBit(x.abs, uint(i), b)
- z.neg = false
- return z
-}
-
-// And sets z = x & y and returns z.
-func (z *Int) And(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
- x1 := nat(nil).sub(x.abs, natOne)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
- z.neg = true // z cannot be zero if x and y are negative
- return z
- }
-
- // x & y == x & y
- z.abs = z.abs.and(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- // x.neg != y.neg
- if x.neg {
- x, y = y, x // & is symmetric
- }
-
- // x & (-y) == x & ^(y-1) == x &^ (y-1)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.andNot(x.abs, y1)
- z.neg = false
- return z
-}
-
-// AndNot sets z = x &^ y and returns z.
-func (z *Int) AndNot(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
- x1 := nat(nil).sub(x.abs, natOne)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.andNot(y1, x1)
- z.neg = false
- return z
- }
-
- // x &^ y == x &^ y
- z.abs = z.abs.andNot(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- if x.neg {
- // (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
- x1 := nat(nil).sub(x.abs, natOne)
- z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
- z.neg = true // z cannot be zero if x is negative and y is positive
- return z
- }
-
- // x &^ (-y) == x &^ ^(y-1) == x & (y-1)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.and(x.abs, y1)
- z.neg = false
- return z
-}
-
-// Or sets z = x | y and returns z.
-func (z *Int) Or(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
- x1 := nat(nil).sub(x.abs, natOne)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
- z.neg = true // z cannot be zero if x and y are negative
- return z
- }
-
- // x | y == x | y
- z.abs = z.abs.or(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- // x.neg != y.neg
- if x.neg {
- x, y = y, x // | is symmetric
- }
-
- // x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
- z.neg = true // z cannot be zero if one of x or y is negative
- return z
-}
-
-// Xor sets z = x ^ y and returns z.
-func (z *Int) Xor(x, y *Int) *Int {
- if x.neg == y.neg {
- if x.neg {
- // (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
- x1 := nat(nil).sub(x.abs, natOne)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.xor(x1, y1)
- z.neg = false
- return z
- }
-
- // x ^ y == x ^ y
- z.abs = z.abs.xor(x.abs, y.abs)
- z.neg = false
- return z
- }
-
- // x.neg != y.neg
- if x.neg {
- x, y = y, x // ^ is symmetric
- }
-
- // x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
- y1 := nat(nil).sub(y.abs, natOne)
- z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
- z.neg = true // z cannot be zero if only one of x or y is negative
- return z
-}
-
-// Not sets z = ^x and returns z.
-func (z *Int) Not(x *Int) *Int {
- if x.neg {
- // ^(-x) == ^(^(x-1)) == x-1
- z.abs = z.abs.sub(x.abs, natOne)
- z.neg = false
- return z
- }
-
- // ^x == -x-1 == -(x+1)
- z.abs = z.abs.add(x.abs, natOne)
- z.neg = true // z cannot be zero if x is positive
- return z
-}
-
-// Sqrt sets z to ⌊√x⌋, the largest integer such that z² ≤ x, and returns z.
-// It panics if x is negative.
-func (z *Int) Sqrt(x *Int) *Int {
- if x.neg {
- panic("square root of negative number")
- }
- z.neg = false
- z.abs = z.abs.sqrt(x.abs)
- return z
-}
diff --git a/contrib/go/_std_1.18/src/math/big/intconv.go b/contrib/go/_std_1.18/src/math/big/intconv.go
deleted file mode 100644
index 0567284105..0000000000
--- a/contrib/go/_std_1.18/src/math/big/intconv.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements int-to-string conversion functions.
-
-package big
-
-import (
- "errors"
- "fmt"
- "io"
-)
-
-// Text returns the string representation of x in the given base.
-// Base must be between 2 and 62, inclusive. The result uses the
-// lower-case letters 'a' to 'z' for digit values 10 to 35, and
-// the upper-case letters 'A' to 'Z' for digit values 36 to 61.
-// No prefix (such as "0x") is added to the string. If x is a nil
-// pointer it returns "<nil>".
-func (x *Int) Text(base int) string {
- if x == nil {
- return "<nil>"
- }
- return string(x.abs.itoa(x.neg, base))
-}
-
-// Append appends the string representation of x, as generated by
-// x.Text(base), to buf and returns the extended buffer.
-func (x *Int) Append(buf []byte, base int) []byte {
- if x == nil {
- return append(buf, "<nil>"...)
- }
- return append(buf, x.abs.itoa(x.neg, base)...)
-}
-
-// String returns the decimal representation of x as generated by
-// x.Text(10).
-func (x *Int) String() string {
- return x.Text(10)
-}
-
-// write count copies of text to s
-func writeMultiple(s fmt.State, text string, count int) {
- if len(text) > 0 {
- b := []byte(text)
- for ; count > 0; count-- {
- s.Write(b)
- }
- }
-}
-
-var _ fmt.Formatter = intOne // *Int must implement fmt.Formatter
-
-// Format implements fmt.Formatter. It accepts the formats
-// 'b' (binary), 'o' (octal with 0 prefix), 'O' (octal with 0o prefix),
-// 'd' (decimal), 'x' (lowercase hexadecimal), and
-// 'X' (uppercase hexadecimal).
-// Also supported are the full suite of package fmt's format
-// flags for integral types, including '+' and ' ' for sign
-// control, '#' for leading zero in octal and for hexadecimal,
-// a leading "0x" or "0X" for "%#x" and "%#X" respectively,
-// specification of minimum digits precision, output field
-// width, space or zero padding, and '-' for left or right
-// justification.
-//
-func (x *Int) Format(s fmt.State, ch rune) {
- // determine base
- var base int
- switch ch {
- case 'b':
- base = 2
- case 'o', 'O':
- base = 8
- case 'd', 's', 'v':
- base = 10
- case 'x', 'X':
- base = 16
- default:
- // unknown format
- fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
- return
- }
-
- if x == nil {
- fmt.Fprint(s, "<nil>")
- return
- }
-
- // determine sign character
- sign := ""
- switch {
- case x.neg:
- sign = "-"
- case s.Flag('+'): // supersedes ' ' when both specified
- sign = "+"
- case s.Flag(' '):
- sign = " "
- }
-
- // determine prefix characters for indicating output base
- prefix := ""
- if s.Flag('#') {
- switch ch {
- case 'b': // binary
- prefix = "0b"
- case 'o': // octal
- prefix = "0"
- case 'x': // hexadecimal
- prefix = "0x"
- case 'X':
- prefix = "0X"
- }
- }
- if ch == 'O' {
- prefix = "0o"
- }
-
- digits := x.abs.utoa(base)
- if ch == 'X' {
- // faster than bytes.ToUpper
- for i, d := range digits {
- if 'a' <= d && d <= 'z' {
- digits[i] = 'A' + (d - 'a')
- }
- }
- }
-
- // number of characters for the three classes of number padding
- var left int // space characters to left of digits for right justification ("%8d")
- var zeros int // zero characters (actually cs[0]) as left-most digits ("%.8d")
- var right int // space characters to right of digits for left justification ("%-8d")
-
- // determine number padding from precision: the least number of digits to output
- precision, precisionSet := s.Precision()
- if precisionSet {
- switch {
- case len(digits) < precision:
- zeros = precision - len(digits) // count of zero padding
- case len(digits) == 1 && digits[0] == '0' && precision == 0:
- return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
- }
- }
-
- // determine field pad from width: the least number of characters to output
- length := len(sign) + len(prefix) + zeros + len(digits)
- if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
- switch d := width - length; {
- case s.Flag('-'):
- // pad on the right with spaces; supersedes '0' when both specified
- right = d
- case s.Flag('0') && !precisionSet:
- // pad with zeros unless precision also specified
- zeros = d
- default:
- // pad on the left with spaces
- left = d
- }
- }
-
- // print number as [left pad][sign][prefix][zero pad][digits][right pad]
- writeMultiple(s, " ", left)
- writeMultiple(s, sign, 1)
- writeMultiple(s, prefix, 1)
- writeMultiple(s, "0", zeros)
- s.Write(digits)
- writeMultiple(s, " ", right)
-}
-
-// scan sets z to the integer value corresponding to the longest possible prefix
-// read from r representing a signed integer number in a given conversion base.
-// It returns z, the actual conversion base used, and an error, if any. In the
-// error case, the value of z is undefined but the returned value is nil. The
-// syntax follows the syntax of integer literals in Go.
-//
-// The base argument must be 0 or a value from 2 through MaxBase. If the base
-// is 0, the string prefix determines the actual conversion base. A prefix of
-// ``0b'' or ``0B'' selects base 2; a ``0'', ``0o'', or ``0O'' prefix selects
-// base 8, and a ``0x'' or ``0X'' prefix selects base 16. Otherwise the selected
-// base is 10.
-//
-func (z *Int) scan(r io.ByteScanner, base int) (*Int, int, error) {
- // determine sign
- neg, err := scanSign(r)
- if err != nil {
- return nil, 0, err
- }
-
- // determine mantissa
- z.abs, base, _, err = z.abs.scan(r, base, false)
- if err != nil {
- return nil, base, err
- }
- z.neg = len(z.abs) > 0 && neg // 0 has no sign
-
- return z, base, nil
-}
-
-func scanSign(r io.ByteScanner) (neg bool, err error) {
- var ch byte
- if ch, err = r.ReadByte(); err != nil {
- return false, err
- }
- switch ch {
- case '-':
- neg = true
- case '+':
- // nothing to do
- default:
- r.UnreadByte()
- }
- return
-}
-
-// byteReader is a local wrapper around fmt.ScanState;
-// it implements the ByteReader interface.
-type byteReader struct {
- fmt.ScanState
-}
-
-func (r byteReader) ReadByte() (byte, error) {
- ch, size, err := r.ReadRune()
- if size != 1 && err == nil {
- err = fmt.Errorf("invalid rune %#U", ch)
- }
- return byte(ch), err
-}
-
-func (r byteReader) UnreadByte() error {
- return r.UnreadRune()
-}
-
-var _ fmt.Scanner = intOne // *Int must implement fmt.Scanner
-
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
-// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
-// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
-func (z *Int) Scan(s fmt.ScanState, ch rune) error {
- s.SkipSpace() // skip leading space characters
- base := 0
- switch ch {
- case 'b':
- base = 2
- case 'o':
- base = 8
- case 'd':
- base = 10
- case 'x', 'X':
- base = 16
- case 's', 'v':
- // let scan determine the base
- default:
- return errors.New("Int.Scan: invalid verb")
- }
- _, _, err := z.scan(byteReader{s}, base)
- return err
-}
diff --git a/contrib/go/_std_1.18/src/math/big/intmarsh.go b/contrib/go/_std_1.18/src/math/big/intmarsh.go
deleted file mode 100644
index c1422e2710..0000000000
--- a/contrib/go/_std_1.18/src/math/big/intmarsh.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements encoding/decoding of Ints.
-
-package big
-
-import (
- "bytes"
- "fmt"
-)
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const intGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-func (x *Int) GobEncode() ([]byte, error) {
- if x == nil {
- return nil, nil
- }
- buf := make([]byte, 1+len(x.abs)*_S) // extra byte for version and sign bit
- i := x.abs.bytes(buf) - 1 // i >= 0
- b := intGobVersion << 1 // make space for sign bit
- if x.neg {
- b |= 1
- }
- buf[i] = b
- return buf[i:], nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (z *Int) GobDecode(buf []byte) error {
- if len(buf) == 0 {
- // Other side sent a nil or default value.
- *z = Int{}
- return nil
- }
- b := buf[0]
- if b>>1 != intGobVersion {
- return fmt.Errorf("Int.GobDecode: encoding version %d not supported", b>>1)
- }
- z.neg = b&1 != 0
- z.abs = z.abs.setBytes(buf[1:])
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (x *Int) MarshalText() (text []byte, err error) {
- if x == nil {
- return []byte("<nil>"), nil
- }
- return x.abs.itoa(x.neg, 10), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (z *Int) UnmarshalText(text []byte) error {
- if _, ok := z.setFromScanner(bytes.NewReader(text), 0); !ok {
- return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
- }
- return nil
-}
-
-// The JSON marshalers are only here for API backward compatibility
-// (programs that explicitly look for these two methods). JSON works
-// fine with the TextMarshaler only.
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *Int) MarshalJSON() ([]byte, error) {
- return x.MarshalText()
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (z *Int) UnmarshalJSON(text []byte) error {
- // Ignore null, like in the main JSON package.
- if string(text) == "null" {
- return nil
- }
- return z.UnmarshalText(text)
-}
diff --git a/contrib/go/_std_1.18/src/math/big/nat.go b/contrib/go/_std_1.18/src/math/big/nat.go
deleted file mode 100644
index 140c619c8c..0000000000
--- a/contrib/go/_std_1.18/src/math/big/nat.go
+++ /dev/null
@@ -1,1244 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements unsigned multi-precision integers (natural
-// numbers). They are the building blocks for the implementation
-// of signed integers, rationals, and floating-point numbers.
-//
-// Caution: This implementation relies on the function "alias"
-// which assumes that (nat) slice capacities are never
-// changed (no 3-operand slice expressions). If that
-// changes, alias needs to be updated for correctness.
-
-package big
-
-import (
- "encoding/binary"
- "math/bits"
- "math/rand"
- "sync"
-)
-
-// An unsigned integer x of the form
-//
-// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
-//
-// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
-// with the digits x[i] as the slice elements.
-//
-// A number is normalized if the slice contains no leading 0 digits.
-// During arithmetic operations, denormalized values may occur but are
-// always normalized before returning the final result. The normalized
-// representation of 0 is the empty or nil slice (length = 0).
-//
-type nat []Word
-
-var (
- natOne = nat{1}
- natTwo = nat{2}
- natFive = nat{5}
- natTen = nat{10}
-)
-
-func (z nat) clear() {
- for i := range z {
- z[i] = 0
- }
-}
-
-func (z nat) norm() nat {
- i := len(z)
- for i > 0 && z[i-1] == 0 {
- i--
- }
- return z[0:i]
-}
-
-func (z nat) make(n int) nat {
- if n <= cap(z) {
- return z[:n] // reuse z
- }
- if n == 1 {
- // Most nats start small and stay that way; don't over-allocate.
- return make(nat, 1)
- }
- // Choosing a good value for e has significant performance impact
- // because it increases the chance that a value can be reused.
- const e = 4 // extra capacity
- return make(nat, n, n+e)
-}
-
-func (z nat) setWord(x Word) nat {
- if x == 0 {
- return z[:0]
- }
- z = z.make(1)
- z[0] = x
- return z
-}
-
-func (z nat) setUint64(x uint64) nat {
- // single-word value
- if w := Word(x); uint64(w) == x {
- return z.setWord(w)
- }
- // 2-word value
- z = z.make(2)
- z[1] = Word(x >> 32)
- z[0] = Word(x)
- return z
-}
-
-func (z nat) set(x nat) nat {
- z = z.make(len(x))
- copy(z, x)
- return z
-}
-
-func (z nat) add(x, y nat) nat {
- m := len(x)
- n := len(y)
-
- switch {
- case m < n:
- return z.add(y, x)
- case m == 0:
- // n == 0 because m >= n; result is 0
- return z[:0]
- case n == 0:
- // result is x
- return z.set(x)
- }
- // m > 0
-
- z = z.make(m + 1)
- c := addVV(z[0:n], x, y)
- if m > n {
- c = addVW(z[n:m], x[n:], c)
- }
- z[m] = c
-
- return z.norm()
-}
-
-func (z nat) sub(x, y nat) nat {
- m := len(x)
- n := len(y)
-
- switch {
- case m < n:
- panic("underflow")
- case m == 0:
- // n == 0 because m >= n; result is 0
- return z[:0]
- case n == 0:
- // result is x
- return z.set(x)
- }
- // m > 0
-
- z = z.make(m)
- c := subVV(z[0:n], x, y)
- if m > n {
- c = subVW(z[n:], x[n:], c)
- }
- if c != 0 {
- panic("underflow")
- }
-
- return z.norm()
-}
-
-func (x nat) cmp(y nat) (r int) {
- m := len(x)
- n := len(y)
- if m != n || m == 0 {
- switch {
- case m < n:
- r = -1
- case m > n:
- r = 1
- }
- return
- }
-
- i := m - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
-
- switch {
- case x[i] < y[i]:
- r = -1
- case x[i] > y[i]:
- r = 1
- }
- return
-}
-
-func (z nat) mulAddWW(x nat, y, r Word) nat {
- m := len(x)
- if m == 0 || y == 0 {
- return z.setWord(r) // result is r
- }
- // m > 0
-
- z = z.make(m + 1)
- z[m] = mulAddVWW(z[0:m], x, y, r)
-
- return z.norm()
-}
-
-// basicMul multiplies x and y and leaves the result in z.
-// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
-func basicMul(z, x, y nat) {
- z[0 : len(x)+len(y)].clear() // initialize z
- for i, d := range y {
- if d != 0 {
- z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d)
- }
- }
-}
-
-// montgomery computes z mod m = x*y*2**(-n*_W) mod m,
-// assuming k = -1/m mod 2**_W.
-// z is used for storing the result which is returned;
-// z must not alias x, y or m.
-// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
-// https://eprint.iacr.org/2011/239.pdf
-// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
-// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
-// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
-func (z nat) montgomery(x, y, m nat, k Word, n int) nat {
- // This code assumes x, y, m are all the same length, n.
- // (required by addMulVVW and the for loop).
- // It also assumes that x, y are already reduced mod m,
- // or else the result will not be properly reduced.
- if len(x) != n || len(y) != n || len(m) != n {
- panic("math/big: mismatched montgomery number lengths")
- }
- z = z.make(n * 2)
- z.clear()
- var c Word
- for i := 0; i < n; i++ {
- d := y[i]
- c2 := addMulVVW(z[i:n+i], x, d)
- t := z[i] * k
- c3 := addMulVVW(z[i:n+i], m, t)
- cx := c + c2
- cy := cx + c3
- z[n+i] = cy
- if cx < c2 || cy < c3 {
- c = 1
- } else {
- c = 0
- }
- }
- if c != 0 {
- subVV(z[:n], z[n:], m)
- } else {
- copy(z[:n], z[n:])
- }
- return z[:n]
-}
-
-// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
-// Factored out for readability - do not use outside karatsuba.
-func karatsubaAdd(z, x nat, n int) {
- if c := addVV(z[0:n], z, x); c != 0 {
- addVW(z[n:n+n>>1], z[n:], c)
- }
-}
-
-// Like karatsubaAdd, but does subtract.
-func karatsubaSub(z, x nat, n int) {
- if c := subVV(z[0:n], z, x); c != 0 {
- subVW(z[n:n+n>>1], z[n:], c)
- }
-}
-
-// Operands that are shorter than karatsubaThreshold are multiplied using
-// "grade school" multiplication; for longer operands the Karatsuba algorithm
-// is used.
-var karatsubaThreshold = 40 // computed by calibrate_test.go
-
-// karatsuba multiplies x and y and leaves the result in z.
-// Both x and y must have the same length n and n must be a
-// power of 2. The result vector z must have len(z) >= 6*n.
-// The (non-normalized) result is placed in z[0 : 2*n].
-func karatsuba(z, x, y nat) {
- n := len(y)
-
- // Switch to basic multiplication if numbers are odd or small.
- // (n is always even if karatsubaThreshold is even, but be
- // conservative)
- if n&1 != 0 || n < karatsubaThreshold || n < 2 {
- basicMul(z, x, y)
- return
- }
- // n&1 == 0 && n >= karatsubaThreshold && n >= 2
-
- // Karatsuba multiplication is based on the observation that
- // for two numbers x and y with:
- //
- // x = x1*b + x0
- // y = y1*b + y0
- //
- // the product x*y can be obtained with 3 products z2, z1, z0
- // instead of 4:
- //
- // x*y = x1*y1*b*b + (x1*y0 + x0*y1)*b + x0*y0
- // = z2*b*b + z1*b + z0
- //
- // with:
- //
- // xd = x1 - x0
- // yd = y0 - y1
- //
- // z1 = xd*yd + z2 + z0
- // = (x1-x0)*(y0 - y1) + z2 + z0
- // = x1*y0 - x1*y1 - x0*y0 + x0*y1 + z2 + z0
- // = x1*y0 - z2 - z0 + x0*y1 + z2 + z0
- // = x1*y0 + x0*y1
-
- // split x, y into "digits"
- n2 := n >> 1 // n2 >= 1
- x1, x0 := x[n2:], x[0:n2] // x = x1*b + y0
- y1, y0 := y[n2:], y[0:n2] // y = y1*b + y0
-
- // z is used for the result and temporary storage:
- //
- // 6*n 5*n 4*n 3*n 2*n 1*n 0*n
- // z = [z2 copy|z0 copy| xd*yd | yd:xd | x1*y1 | x0*y0 ]
- //
- // For each recursive call of karatsuba, an unused slice of
- // z is passed in that has (at least) half the length of the
- // caller's z.
-
- // compute z0 and z2 with the result "in place" in z
- karatsuba(z, x0, y0) // z0 = x0*y0
- karatsuba(z[n:], x1, y1) // z2 = x1*y1
-
- // compute xd (or the negative value if underflow occurs)
- s := 1 // sign of product xd*yd
- xd := z[2*n : 2*n+n2]
- if subVV(xd, x1, x0) != 0 { // x1-x0
- s = -s
- subVV(xd, x0, x1) // x0-x1
- }
-
- // compute yd (or the negative value if underflow occurs)
- yd := z[2*n+n2 : 3*n]
- if subVV(yd, y0, y1) != 0 { // y0-y1
- s = -s
- subVV(yd, y1, y0) // y1-y0
- }
-
- // p = (x1-x0)*(y0-y1) == x1*y0 - x1*y1 - x0*y0 + x0*y1 for s > 0
- // p = (x0-x1)*(y0-y1) == x0*y0 - x0*y1 - x1*y0 + x1*y1 for s < 0
- p := z[n*3:]
- karatsuba(p, xd, yd)
-
- // save original z2:z0
- // (ok to use upper half of z since we're done recursing)
- r := z[n*4:]
- copy(r, z[:n*2])
-
- // add up all partial products
- //
- // 2*n n 0
- // z = [ z2 | z0 ]
- // + [ z0 ]
- // + [ z2 ]
- // + [ p ]
- //
- karatsubaAdd(z[n2:], r, n)
- karatsubaAdd(z[n2:], r[n:], n)
- if s > 0 {
- karatsubaAdd(z[n2:], p, n)
- } else {
- karatsubaSub(z[n2:], p, n)
- }
-}
-
-// alias reports whether x and y share the same base array.
-// Note: alias assumes that the capacity of underlying arrays
-// is never changed for nat values; i.e. that there are
-// no 3-operand slice expressions in this code (or worse,
-// reflect-based operations to the same effect).
-func alias(x, y nat) bool {
- return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
-}
-
-// addAt implements z += x<<(_W*i); z must be long enough.
-// (we don't use nat.add because we need z to stay the same
-// slice, and we don't need to normalize z after each addition)
-func addAt(z, x nat, i int) {
- if n := len(x); n > 0 {
- if c := addVV(z[i:i+n], z[i:], x); c != 0 {
- j := i + n
- if j < len(z) {
- addVW(z[j:], z[j:], c)
- }
- }
- }
-}
-
-func max(x, y int) int {
- if x > y {
- return x
- }
- return y
-}
-
-// karatsubaLen computes an approximation to the maximum k <= n such that
-// k = p<<i for a number p <= threshold and an i >= 0. Thus, the
-// result is the largest number that can be divided repeatedly by 2 before
-// becoming about the value of threshold.
-func karatsubaLen(n, threshold int) int {
- i := uint(0)
- for n > threshold {
- n >>= 1
- i++
- }
- return n << i
-}
-
-func (z nat) mul(x, y nat) nat {
- m := len(x)
- n := len(y)
-
- switch {
- case m < n:
- return z.mul(y, x)
- case m == 0 || n == 0:
- return z[:0]
- case n == 1:
- return z.mulAddWW(x, y[0], 0)
- }
- // m >= n > 1
-
- // determine if z can be reused
- if alias(z, x) || alias(z, y) {
- z = nil // z is an alias for x or y - cannot reuse
- }
-
- // use basic multiplication if the numbers are small
- if n < karatsubaThreshold {
- z = z.make(m + n)
- basicMul(z, x, y)
- return z.norm()
- }
- // m >= n && n >= karatsubaThreshold && n >= 2
-
- // determine Karatsuba length k such that
- //
- // x = xh*b + x0 (0 <= x0 < b)
- // y = yh*b + y0 (0 <= y0 < b)
- // b = 1<<(_W*k) ("base" of digits xi, yi)
- //
- k := karatsubaLen(n, karatsubaThreshold)
- // k <= n
-
- // multiply x0 and y0 via Karatsuba
- x0 := x[0:k] // x0 is not normalized
- y0 := y[0:k] // y0 is not normalized
- z = z.make(max(6*k, m+n)) // enough space for karatsuba of x0*y0 and full result of x*y
- karatsuba(z, x0, y0)
- z = z[0 : m+n] // z has final length but may be incomplete
- z[2*k:].clear() // upper portion of z is garbage (and 2*k <= m+n since k <= n <= m)
-
- // If xh != 0 or yh != 0, add the missing terms to z. For
- //
- // xh = xi*b^i + ... + x2*b^2 + x1*b (0 <= xi < b)
- // yh = y1*b (0 <= y1 < b)
- //
- // the missing terms are
- //
- // x0*y1*b and xi*y0*b^i, xi*y1*b^(i+1) for i > 0
- //
- // since all the yi for i > 1 are 0 by choice of k: If any of them
- // were > 0, then yh >= b^2 and thus y >= b^2. Then k' = k*2 would
- // be a larger valid threshold contradicting the assumption about k.
- //
- if k < n || m != n {
- tp := getNat(3 * k)
- t := *tp
-
- // add x0*y1*b
- x0 := x0.norm()
- y1 := y[k:] // y1 is normalized because y is
- t = t.mul(x0, y1) // update t so we don't lose t's underlying array
- addAt(z, t, k)
-
- // add xi*y0<<i, xi*y1*b<<(i+k)
- y0 := y0.norm()
- for i := k; i < len(x); i += k {
- xi := x[i:]
- if len(xi) > k {
- xi = xi[:k]
- }
- xi = xi.norm()
- t = t.mul(xi, y0)
- addAt(z, t, i)
- t = t.mul(xi, y1)
- addAt(z, t, i+k)
- }
-
- putNat(tp)
- }
-
- return z.norm()
-}
-
-// basicSqr sets z = x*x and is asymptotically faster than basicMul
-// by about a factor of 2, but slower for small arguments due to overhead.
-// Requirements: len(x) > 0, len(z) == 2*len(x)
-// The (non-normalized) result is placed in z.
-func basicSqr(z, x nat) {
- n := len(x)
- tp := getNat(2 * n)
- t := *tp // temporary variable to hold the products
- t.clear()
- z[1], z[0] = mulWW(x[0], x[0]) // the initial square
- for i := 1; i < n; i++ {
- d := x[i]
- // z collects the squares x[i] * x[i]
- z[2*i+1], z[2*i] = mulWW(d, d)
- // t collects the products x[i] * x[j] where j < i
- t[2*i] = addMulVVW(t[i:2*i], x[0:i], d)
- }
- t[2*n-1] = shlVU(t[1:2*n-1], t[1:2*n-1], 1) // double the j < i products
- addVV(z, z, t) // combine the result
- putNat(tp)
-}
-
-// karatsubaSqr squares x and leaves the result in z.
-// len(x) must be a power of 2 and len(z) >= 6*len(x).
-// The (non-normalized) result is placed in z[0 : 2*len(x)].
-//
-// The algorithm and the layout of z are the same as for karatsuba.
-func karatsubaSqr(z, x nat) {
- n := len(x)
-
- if n&1 != 0 || n < karatsubaSqrThreshold || n < 2 {
- basicSqr(z[:2*n], x)
- return
- }
-
- n2 := n >> 1
- x1, x0 := x[n2:], x[0:n2]
-
- karatsubaSqr(z, x0)
- karatsubaSqr(z[n:], x1)
-
- // s = sign(xd*yd) == -1 for xd != 0; s == 1 for xd == 0
- xd := z[2*n : 2*n+n2]
- if subVV(xd, x1, x0) != 0 {
- subVV(xd, x0, x1)
- }
-
- p := z[n*3:]
- karatsubaSqr(p, xd)
-
- r := z[n*4:]
- copy(r, z[:n*2])
-
- karatsubaAdd(z[n2:], r, n)
- karatsubaAdd(z[n2:], r[n:], n)
- karatsubaSub(z[n2:], p, n) // s == -1 for p != 0; s == 1 for p == 0
-}
-
-// Operands that are shorter than basicSqrThreshold are squared using
-// "grade school" multiplication; for operands longer than karatsubaSqrThreshold
-// we use the Karatsuba algorithm optimized for x == y.
-var basicSqrThreshold = 20 // computed by calibrate_test.go
-var karatsubaSqrThreshold = 260 // computed by calibrate_test.go
-
-// z = x*x
-func (z nat) sqr(x nat) nat {
- n := len(x)
- switch {
- case n == 0:
- return z[:0]
- case n == 1:
- d := x[0]
- z = z.make(2)
- z[1], z[0] = mulWW(d, d)
- return z.norm()
- }
-
- if alias(z, x) {
- z = nil // z is an alias for x - cannot reuse
- }
-
- if n < basicSqrThreshold {
- z = z.make(2 * n)
- basicMul(z, x, x)
- return z.norm()
- }
- if n < karatsubaSqrThreshold {
- z = z.make(2 * n)
- basicSqr(z, x)
- return z.norm()
- }
-
- // Use Karatsuba multiplication optimized for x == y.
- // The algorithm and layout of z are the same as for mul.
-
- // z = (x1*b + x0)^2 = x1^2*b^2 + 2*x1*x0*b + x0^2
-
- k := karatsubaLen(n, karatsubaSqrThreshold)
-
- x0 := x[0:k]
- z = z.make(max(6*k, 2*n))
- karatsubaSqr(z, x0) // z = x0^2
- z = z[0 : 2*n]
- z[2*k:].clear()
-
- if k < n {
- tp := getNat(2 * k)
- t := *tp
- x0 := x0.norm()
- x1 := x[k:]
- t = t.mul(x0, x1)
- addAt(z, t, k)
- addAt(z, t, k) // z = 2*x1*x0*b + x0^2
- t = t.sqr(x1)
- addAt(z, t, 2*k) // z = x1^2*b^2 + 2*x1*x0*b + x0^2
- putNat(tp)
- }
-
- return z.norm()
-}
-
-// mulRange computes the product of all the unsigned integers in the
-// range [a, b] inclusively. If a > b (empty range), the result is 1.
-func (z nat) mulRange(a, b uint64) nat {
- switch {
- case a == 0:
- // cut long ranges short (optimization)
- return z.setUint64(0)
- case a > b:
- return z.setUint64(1)
- case a == b:
- return z.setUint64(a)
- case a+1 == b:
- return z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b))
- }
- m := (a + b) / 2
- return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
-}
-
-// getNat returns a *nat of len n. The contents may not be zero.
-// The pool holds *nat to avoid allocation when converting to interface{}.
-func getNat(n int) *nat {
- var z *nat
- if v := natPool.Get(); v != nil {
- z = v.(*nat)
- }
- if z == nil {
- z = new(nat)
- }
- *z = z.make(n)
- return z
-}
-
-func putNat(x *nat) {
- natPool.Put(x)
-}
-
-var natPool sync.Pool
-
-// Length of x in bits. x must be normalized.
-func (x nat) bitLen() int {
- if i := len(x) - 1; i >= 0 {
- return i*_W + bits.Len(uint(x[i]))
- }
- return 0
-}
-
-// trailingZeroBits returns the number of consecutive least significant zero
-// bits of x.
-func (x nat) trailingZeroBits() uint {
- if len(x) == 0 {
- return 0
- }
- var i uint
- for x[i] == 0 {
- i++
- }
- // x[i] != 0
- return i*_W + uint(bits.TrailingZeros(uint(x[i])))
-}
-
-func same(x, y nat) bool {
- return len(x) == len(y) && len(x) > 0 && &x[0] == &y[0]
-}
-
-// z = x << s
-func (z nat) shl(x nat, s uint) nat {
- if s == 0 {
- if same(z, x) {
- return z
- }
- if !alias(z, x) {
- return z.set(x)
- }
- }
-
- m := len(x)
- if m == 0 {
- return z[:0]
- }
- // m > 0
-
- n := m + int(s/_W)
- z = z.make(n + 1)
- z[n] = shlVU(z[n-m:n], x, s%_W)
- z[0 : n-m].clear()
-
- return z.norm()
-}
-
-// z = x >> s
-func (z nat) shr(x nat, s uint) nat {
- if s == 0 {
- if same(z, x) {
- return z
- }
- if !alias(z, x) {
- return z.set(x)
- }
- }
-
- m := len(x)
- n := m - int(s/_W)
- if n <= 0 {
- return z[:0]
- }
- // n > 0
-
- z = z.make(n)
- shrVU(z, x[m-n:], s%_W)
-
- return z.norm()
-}
-
-func (z nat) setBit(x nat, i uint, b uint) nat {
- j := int(i / _W)
- m := Word(1) << (i % _W)
- n := len(x)
- switch b {
- case 0:
- z = z.make(n)
- copy(z, x)
- if j >= n {
- // no need to grow
- return z
- }
- z[j] &^= m
- return z.norm()
- case 1:
- if j >= n {
- z = z.make(j + 1)
- z[n:].clear()
- } else {
- z = z.make(n)
- }
- copy(z, x)
- z[j] |= m
- // no need to normalize
- return z
- }
- panic("set bit is not 0 or 1")
-}
-
-// bit returns the value of the i'th bit, with lsb == bit 0.
-func (x nat) bit(i uint) uint {
- j := i / _W
- if j >= uint(len(x)) {
- return 0
- }
- // 0 <= j < len(x)
- return uint(x[j] >> (i % _W) & 1)
-}
-
-// sticky returns 1 if there's a 1 bit within the
-// i least significant bits, otherwise it returns 0.
-func (x nat) sticky(i uint) uint {
- j := i / _W
- if j >= uint(len(x)) {
- if len(x) == 0 {
- return 0
- }
- return 1
- }
- // 0 <= j < len(x)
- for _, x := range x[:j] {
- if x != 0 {
- return 1
- }
- }
- if x[j]<<(_W-i%_W) != 0 {
- return 1
- }
- return 0
-}
-
-func (z nat) and(x, y nat) nat {
- m := len(x)
- n := len(y)
- if m > n {
- m = n
- }
- // m <= n
-
- z = z.make(m)
- for i := 0; i < m; i++ {
- z[i] = x[i] & y[i]
- }
-
- return z.norm()
-}
-
-func (z nat) andNot(x, y nat) nat {
- m := len(x)
- n := len(y)
- if n > m {
- n = m
- }
- // m >= n
-
- z = z.make(m)
- for i := 0; i < n; i++ {
- z[i] = x[i] &^ y[i]
- }
- copy(z[n:m], x[n:m])
-
- return z.norm()
-}
-
-func (z nat) or(x, y nat) nat {
- m := len(x)
- n := len(y)
- s := x
- if m < n {
- n, m = m, n
- s = y
- }
- // m >= n
-
- z = z.make(m)
- for i := 0; i < n; i++ {
- z[i] = x[i] | y[i]
- }
- copy(z[n:m], s[n:m])
-
- return z.norm()
-}
-
-func (z nat) xor(x, y nat) nat {
- m := len(x)
- n := len(y)
- s := x
- if m < n {
- n, m = m, n
- s = y
- }
- // m >= n
-
- z = z.make(m)
- for i := 0; i < n; i++ {
- z[i] = x[i] ^ y[i]
- }
- copy(z[n:m], s[n:m])
-
- return z.norm()
-}
-
-// random creates a random integer in [0..limit), using the space in z if
-// possible. n is the bit length of limit.
-func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
- if alias(z, limit) {
- z = nil // z is an alias for limit - cannot reuse
- }
- z = z.make(len(limit))
-
- bitLengthOfMSW := uint(n % _W)
- if bitLengthOfMSW == 0 {
- bitLengthOfMSW = _W
- }
- mask := Word((1 << bitLengthOfMSW) - 1)
-
- for {
- switch _W {
- case 32:
- for i := range z {
- z[i] = Word(rand.Uint32())
- }
- case 64:
- for i := range z {
- z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
- }
- default:
- panic("unknown word size")
- }
- z[len(limit)-1] &= mask
- if z.cmp(limit) < 0 {
- break
- }
- }
-
- return z.norm()
-}
-
-// If m != 0 (i.e., len(m) != 0), expNN sets z to x**y mod m;
-// otherwise it sets z to x**y. The result is the value of z.
-func (z nat) expNN(x, y, m nat) nat {
- if alias(z, x) || alias(z, y) {
- // We cannot allow in-place modification of x or y.
- z = nil
- }
-
- // x**y mod 1 == 0
- if len(m) == 1 && m[0] == 1 {
- return z.setWord(0)
- }
- // m == 0 || m > 1
-
- // x**0 == 1
- if len(y) == 0 {
- return z.setWord(1)
- }
- // y > 0
-
- // x**1 mod m == x mod m
- if len(y) == 1 && y[0] == 1 && len(m) != 0 {
- _, z = nat(nil).div(z, x, m)
- return z
- }
- // y > 1
-
- if len(m) != 0 {
- // We likely end up being as long as the modulus.
- z = z.make(len(m))
- }
- z = z.set(x)
-
- // If the base is non-trivial and the exponent is large, we use
- // 4-bit, windowed exponentiation. This involves precomputing 14 values
- // (x^2...x^15) but then reduces the number of multiply-reduces by a
- // third. Even for a 32-bit exponent, this reduces the number of
- // operations. Uses Montgomery method for odd moduli.
- if x.cmp(natOne) > 0 && len(y) > 1 && len(m) > 0 {
- if m[0]&1 == 1 {
- return z.expNNMontgomery(x, y, m)
- }
- return z.expNNWindowed(x, y, m)
- }
-
- v := y[len(y)-1] // v > 0 because y is normalized and y > 0
- shift := nlz(v) + 1
- v <<= shift
- var q nat
-
- const mask = 1 << (_W - 1)
-
- // We walk through the bits of the exponent one by one. Each time we
- // see a bit, we square, thus doubling the power. If the bit is a one,
- // we also multiply by x, thus adding one to the power.
-
- w := _W - int(shift)
- // zz and r are used to avoid allocating in mul and div as
- // otherwise the arguments would alias.
- var zz, r nat
- for j := 0; j < w; j++ {
- zz = zz.sqr(z)
- zz, z = z, zz
-
- if v&mask != 0 {
- zz = zz.mul(z, x)
- zz, z = z, zz
- }
-
- if len(m) != 0 {
- zz, r = zz.div(r, z, m)
- zz, r, q, z = q, z, zz, r
- }
-
- v <<= 1
- }
-
- for i := len(y) - 2; i >= 0; i-- {
- v = y[i]
-
- for j := 0; j < _W; j++ {
- zz = zz.sqr(z)
- zz, z = z, zz
-
- if v&mask != 0 {
- zz = zz.mul(z, x)
- zz, z = z, zz
- }
-
- if len(m) != 0 {
- zz, r = zz.div(r, z, m)
- zz, r, q, z = q, z, zz, r
- }
-
- v <<= 1
- }
- }
-
- return z.norm()
-}
-
-// expNNWindowed calculates x**y mod m using a fixed, 4-bit window.
-func (z nat) expNNWindowed(x, y, m nat) nat {
- // zz and r are used to avoid allocating in mul and div as otherwise
- // the arguments would alias.
- var zz, r nat
-
- const n = 4
- // powers[i] contains x^i.
- var powers [1 << n]nat
- powers[0] = natOne
- powers[1] = x
- for i := 2; i < 1<<n; i += 2 {
- p2, p, p1 := &powers[i/2], &powers[i], &powers[i+1]
- *p = p.sqr(*p2)
- zz, r = zz.div(r, *p, m)
- *p, r = r, *p
- *p1 = p1.mul(*p, x)
- zz, r = zz.div(r, *p1, m)
- *p1, r = r, *p1
- }
-
- z = z.setWord(1)
-
- for i := len(y) - 1; i >= 0; i-- {
- yi := y[i]
- for j := 0; j < _W; j += n {
- if i != len(y)-1 || j != 0 {
- // Unrolled loop for significant performance
- // gain. Use go test -bench=".*" in crypto/rsa
- // to check performance before making changes.
- zz = zz.sqr(z)
- zz, z = z, zz
- zz, r = zz.div(r, z, m)
- z, r = r, z
-
- zz = zz.sqr(z)
- zz, z = z, zz
- zz, r = zz.div(r, z, m)
- z, r = r, z
-
- zz = zz.sqr(z)
- zz, z = z, zz
- zz, r = zz.div(r, z, m)
- z, r = r, z
-
- zz = zz.sqr(z)
- zz, z = z, zz
- zz, r = zz.div(r, z, m)
- z, r = r, z
- }
-
- zz = zz.mul(z, powers[yi>>(_W-n)])
- zz, z = z, zz
- zz, r = zz.div(r, z, m)
- z, r = r, z
-
- yi <<= n
- }
- }
-
- return z.norm()
-}
-
-// expNNMontgomery calculates x**y mod m using a fixed, 4-bit window.
-// Uses Montgomery representation.
-func (z nat) expNNMontgomery(x, y, m nat) nat {
- numWords := len(m)
-
- // We want the lengths of x and m to be equal.
- // It is OK if x >= m as long as len(x) == len(m).
- if len(x) > numWords {
- _, x = nat(nil).div(nil, x, m)
- // Note: now len(x) <= numWords, not guaranteed ==.
- }
- if len(x) < numWords {
- rr := make(nat, numWords)
- copy(rr, x)
- x = rr
- }
-
- // Ideally the precomputations would be performed outside, and reused
- // k0 = -m**-1 mod 2**_W. Algorithm from: Dumas, J.G. "On Newton–Raphson
- // Iteration for Multiplicative Inverses Modulo Prime Powers".
- k0 := 2 - m[0]
- t := m[0] - 1
- for i := 1; i < _W; i <<= 1 {
- t *= t
- k0 *= (t + 1)
- }
- k0 = -k0
-
- // RR = 2**(2*_W*len(m)) mod m
- RR := nat(nil).setWord(1)
- zz := nat(nil).shl(RR, uint(2*numWords*_W))
- _, RR = nat(nil).div(RR, zz, m)
- if len(RR) < numWords {
- zz = zz.make(numWords)
- copy(zz, RR)
- RR = zz
- }
- // one = 1, with equal length to that of m
- one := make(nat, numWords)
- one[0] = 1
-
- const n = 4
- // powers[i] contains x^i
- var powers [1 << n]nat
- powers[0] = powers[0].montgomery(one, RR, m, k0, numWords)
- powers[1] = powers[1].montgomery(x, RR, m, k0, numWords)
- for i := 2; i < 1<<n; i++ {
- powers[i] = powers[i].montgomery(powers[i-1], powers[1], m, k0, numWords)
- }
-
- // initialize z = 1 (Montgomery 1)
- z = z.make(numWords)
- copy(z, powers[0])
-
- zz = zz.make(numWords)
-
- // same windowed exponent, but with Montgomery multiplications
- for i := len(y) - 1; i >= 0; i-- {
- yi := y[i]
- for j := 0; j < _W; j += n {
- if i != len(y)-1 || j != 0 {
- zz = zz.montgomery(z, z, m, k0, numWords)
- z = z.montgomery(zz, zz, m, k0, numWords)
- zz = zz.montgomery(z, z, m, k0, numWords)
- z = z.montgomery(zz, zz, m, k0, numWords)
- }
- zz = zz.montgomery(z, powers[yi>>(_W-n)], m, k0, numWords)
- z, zz = zz, z
- yi <<= n
- }
- }
- // convert to regular number
- zz = zz.montgomery(z, one, m, k0, numWords)
-
- // One last reduction, just in case.
- // See golang.org/issue/13907.
- if zz.cmp(m) >= 0 {
- // Common case is m has high bit set; in that case,
- // since zz is the same length as m, there can be just
- // one multiple of m to remove. Just subtract.
- // We think that the subtract should be sufficient in general,
- // so do that unconditionally, but double-check,
- // in case our beliefs are wrong.
- // The div is not expected to be reached.
- zz = zz.sub(zz, m)
- if zz.cmp(m) >= 0 {
- _, zz = nat(nil).div(nil, zz, m)
- }
- }
-
- return zz.norm()
-}
-
-// bytes writes the value of z into buf using big-endian encoding.
-// The value of z is encoded in the slice buf[i:]. If the value of z
-// cannot be represented in buf, bytes panics. The number i of unused
-// bytes at the beginning of buf is returned as result.
-func (z nat) bytes(buf []byte) (i int) {
- i = len(buf)
- for _, d := range z {
- for j := 0; j < _S; j++ {
- i--
- if i >= 0 {
- buf[i] = byte(d)
- } else if byte(d) != 0 {
- panic("math/big: buffer too small to fit value")
- }
- d >>= 8
- }
- }
-
- if i < 0 {
- i = 0
- }
- for i < len(buf) && buf[i] == 0 {
- i++
- }
-
- return
-}
-
-// bigEndianWord returns the contents of buf interpreted as a big-endian encoded Word value.
-func bigEndianWord(buf []byte) Word {
- if _W == 64 {
- return Word(binary.BigEndian.Uint64(buf))
- }
- return Word(binary.BigEndian.Uint32(buf))
-}
-
-// setBytes interprets buf as the bytes of a big-endian unsigned
-// integer, sets z to that value, and returns z.
-func (z nat) setBytes(buf []byte) nat {
- z = z.make((len(buf) + _S - 1) / _S)
-
- i := len(buf)
- for k := 0; i >= _S; k++ {
- z[k] = bigEndianWord(buf[i-_S : i])
- i -= _S
- }
- if i > 0 {
- var d Word
- for s := uint(0); i > 0; s += 8 {
- d |= Word(buf[i-1]) << s
- i--
- }
- z[len(z)-1] = d
- }
-
- return z.norm()
-}
-
-// sqrt sets z = ⌊√x⌋
-func (z nat) sqrt(x nat) nat {
- if x.cmp(natOne) <= 0 {
- return z.set(x)
- }
- if alias(z, x) {
- z = nil
- }
-
- // Start with value known to be too large and repeat "z = ⌊(z + ⌊x/z⌋)/2⌋" until it stops getting smaller.
- // See Brent and Zimmermann, Modern Computer Arithmetic, Algorithm 1.13 (SqrtInt).
- // https://members.loria.fr/PZimmermann/mca/pub226.html
- // If x is one less than a perfect square, the sequence oscillates between the correct z and z+1;
- // otherwise it converges to the correct z and stays there.
- var z1, z2 nat
- z1 = z
- z1 = z1.setUint64(1)
- z1 = z1.shl(z1, uint(x.bitLen()+1)/2) // must be ≥ √x
- for n := 0; ; n++ {
- z2, _ = z2.div(nil, x, z1)
- z2 = z2.add(z2, z1)
- z2 = z2.shr(z2, 1)
- if z2.cmp(z1) >= 0 {
- // z1 is answer.
- // Figure out whether z1 or z2 is currently aliased to z by looking at loop count.
- if n&1 == 0 {
- return z1
- }
- return z.set(z1)
- }
- z1, z2 = z2, z1
- }
-}
diff --git a/contrib/go/_std_1.18/src/math/big/natconv.go b/contrib/go/_std_1.18/src/math/big/natconv.go
deleted file mode 100644
index 42d1cccf6f..0000000000
--- a/contrib/go/_std_1.18/src/math/big/natconv.go
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements nat-to-string conversion functions.
-
-package big
-
-import (
- "errors"
- "fmt"
- "io"
- "math"
- "math/bits"
- "sync"
-)
-
-const digits = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-
-// Note: MaxBase = len(digits), but it must remain an untyped rune constant
-// for API compatibility.
-
-// MaxBase is the largest number base accepted for string conversions.
-const MaxBase = 10 + ('z' - 'a' + 1) + ('Z' - 'A' + 1)
-const maxBaseSmall = 10 + ('z' - 'a' + 1)
-
-// maxPow returns (b**n, n) such that b**n is the largest power b**n <= _M.
-// For instance maxPow(10) == (1e19, 19) for 19 decimal digits in a 64bit Word.
-// In other words, at most n digits in base b fit into a Word.
-// TODO(gri) replace this with a table, generated at build time.
-func maxPow(b Word) (p Word, n int) {
- p, n = b, 1 // assuming b <= _M
- for max := _M / b; p <= max; {
- // p == b**n && p <= max
- p *= b
- n++
- }
- // p == b**n && p <= _M
- return
-}
-
-// pow returns x**n for n > 0, and 1 otherwise.
-func pow(x Word, n int) (p Word) {
- // n == sum of bi * 2**i, for 0 <= i < imax, and bi is 0 or 1
- // thus x**n == product of x**(2**i) for all i where bi == 1
- // (Russian Peasant Method for exponentiation)
- p = 1
- for n > 0 {
- if n&1 != 0 {
- p *= x
- }
- x *= x
- n >>= 1
- }
- return
-}
-
-// scan errors
-var (
- errNoDigits = errors.New("number has no digits")
- errInvalSep = errors.New("'_' must separate successive digits")
-)
-
-// scan scans the number corresponding to the longest possible prefix
-// from r representing an unsigned number in a given conversion base.
-// scan returns the corresponding natural number res, the actual base b,
-// a digit count, and a read or syntax error err, if any.
-//
-// For base 0, an underscore character ``_'' may appear between a base
-// prefix and an adjacent digit, and between successive digits; such
-// underscores do not change the value of the number, or the returned
-// digit count. Incorrect placement of underscores is reported as an
-// error if there are no other errors. If base != 0, underscores are
-// not recognized and thus terminate scanning like any other character
-// that is not a valid radix point or digit.
-//
-// number = mantissa | prefix pmantissa .
-// prefix = "0" [ "b" | "B" | "o" | "O" | "x" | "X" ] .
-// mantissa = digits "." [ digits ] | digits | "." digits .
-// pmantissa = [ "_" ] digits "." [ digits ] | [ "_" ] digits | "." digits .
-// digits = digit { [ "_" ] digit } .
-// digit = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
-//
-// Unless fracOk is set, the base argument must be 0 or a value between
-// 2 and MaxBase. If fracOk is set, the base argument must be one of
-// 0, 2, 8, 10, or 16. Providing an invalid base argument leads to a run-
-// time panic.
-//
-// For base 0, the number prefix determines the actual base: A prefix of
-// ``0b'' or ``0B'' selects base 2, ``0o'' or ``0O'' selects base 8, and
-// ``0x'' or ``0X'' selects base 16. If fracOk is false, a ``0'' prefix
-// (immediately followed by digits) selects base 8 as well. Otherwise,
-// the selected base is 10 and no prefix is accepted.
-//
-// If fracOk is set, a period followed by a fractional part is permitted.
-// The result value is computed as if there were no period present; and
-// the count value is used to determine the fractional part.
-//
-// For bases <= 36, lower and upper case letters are considered the same:
-// The letters 'a' to 'z' and 'A' to 'Z' represent digit values 10 to 35.
-// For bases > 36, the upper case letters 'A' to 'Z' represent the digit
-// values 36 to 61.
-//
-// A result digit count > 0 corresponds to the number of (non-prefix) digits
-// parsed. A digit count <= 0 indicates the presence of a period (if fracOk
-// is set, only), and -count is the number of fractional digits found.
-// In this case, the actual value of the scanned number is res * b**count.
-//
-func (z nat) scan(r io.ByteScanner, base int, fracOk bool) (res nat, b, count int, err error) {
- // reject invalid bases
- baseOk := base == 0 ||
- !fracOk && 2 <= base && base <= MaxBase ||
- fracOk && (base == 2 || base == 8 || base == 10 || base == 16)
- if !baseOk {
- panic(fmt.Sprintf("invalid number base %d", base))
- }
-
- // prev encodes the previously seen char: it is one
- // of '_', '0' (a digit), or '.' (anything else). A
- // valid separator '_' may only occur after a digit
- // and if base == 0.
- prev := '.'
- invalSep := false
-
- // one char look-ahead
- ch, err := r.ReadByte()
-
- // determine actual base
- b, prefix := base, 0
- if base == 0 {
- // actual base is 10 unless there's a base prefix
- b = 10
- if err == nil && ch == '0' {
- prev = '0'
- count = 1
- ch, err = r.ReadByte()
- if err == nil {
- // possibly one of 0b, 0B, 0o, 0O, 0x, 0X
- switch ch {
- case 'b', 'B':
- b, prefix = 2, 'b'
- case 'o', 'O':
- b, prefix = 8, 'o'
- case 'x', 'X':
- b, prefix = 16, 'x'
- default:
- if !fracOk {
- b, prefix = 8, '0'
- }
- }
- if prefix != 0 {
- count = 0 // prefix is not counted
- if prefix != '0' {
- ch, err = r.ReadByte()
- }
- }
- }
- }
- }
-
- // convert string
- // Algorithm: Collect digits in groups of at most n digits in di
- // and then use mulAddWW for every such group to add them to the
- // result.
- z = z[:0]
- b1 := Word(b)
- bn, n := maxPow(b1) // at most n digits in base b1 fit into Word
- di := Word(0) // 0 <= di < b1**i < bn
- i := 0 // 0 <= i < n
- dp := -1 // position of decimal point
- for err == nil {
- if ch == '.' && fracOk {
- fracOk = false
- if prev == '_' {
- invalSep = true
- }
- prev = '.'
- dp = count
- } else if ch == '_' && base == 0 {
- if prev != '0' {
- invalSep = true
- }
- prev = '_'
- } else {
- // convert rune into digit value d1
- var d1 Word
- switch {
- case '0' <= ch && ch <= '9':
- d1 = Word(ch - '0')
- case 'a' <= ch && ch <= 'z':
- d1 = Word(ch - 'a' + 10)
- case 'A' <= ch && ch <= 'Z':
- if b <= maxBaseSmall {
- d1 = Word(ch - 'A' + 10)
- } else {
- d1 = Word(ch - 'A' + maxBaseSmall)
- }
- default:
- d1 = MaxBase + 1
- }
- if d1 >= b1 {
- r.UnreadByte() // ch does not belong to number anymore
- break
- }
- prev = '0'
- count++
-
- // collect d1 in di
- di = di*b1 + d1
- i++
-
- // if di is "full", add it to the result
- if i == n {
- z = z.mulAddWW(z, bn, di)
- di = 0
- i = 0
- }
- }
-
- ch, err = r.ReadByte()
- }
-
- if err == io.EOF {
- err = nil
- }
-
- // other errors take precedence over invalid separators
- if err == nil && (invalSep || prev == '_') {
- err = errInvalSep
- }
-
- if count == 0 {
- // no digits found
- if prefix == '0' {
- // there was only the octal prefix 0 (possibly followed by separators and digits > 7);
- // interpret as decimal 0
- return z[:0], 10, 1, err
- }
- err = errNoDigits // fall through; result will be 0
- }
-
- // add remaining digits to result
- if i > 0 {
- z = z.mulAddWW(z, pow(b1, i), di)
- }
- res = z.norm()
-
- // adjust count for fraction, if any
- if dp >= 0 {
- // 0 <= dp <= count
- count = dp - count
- }
-
- return
-}
-
-// utoa converts x to an ASCII representation in the given base;
-// base must be between 2 and MaxBase, inclusive.
-func (x nat) utoa(base int) []byte {
- return x.itoa(false, base)
-}
-
-// itoa is like utoa but it prepends a '-' if neg && x != 0.
-func (x nat) itoa(neg bool, base int) []byte {
- if base < 2 || base > MaxBase {
- panic("invalid base")
- }
-
- // x == 0
- if len(x) == 0 {
- return []byte("0")
- }
- // len(x) > 0
-
- // allocate buffer for conversion
- i := int(float64(x.bitLen())/math.Log2(float64(base))) + 1 // off by 1 at most
- if neg {
- i++
- }
- s := make([]byte, i)
-
- // convert power of two and non power of two bases separately
- if b := Word(base); b == b&-b {
- // shift is base b digit size in bits
- shift := uint(bits.TrailingZeros(uint(b))) // shift > 0 because b >= 2
- mask := Word(1<<shift - 1)
- w := x[0] // current word
- nbits := uint(_W) // number of unprocessed bits in w
-
- // convert less-significant words (include leading zeros)
- for k := 1; k < len(x); k++ {
- // convert full digits
- for nbits >= shift {
- i--
- s[i] = digits[w&mask]
- w >>= shift
- nbits -= shift
- }
-
- // convert any partial leading digit and advance to next word
- if nbits == 0 {
- // no partial digit remaining, just advance
- w = x[k]
- nbits = _W
- } else {
- // partial digit in current word w (== x[k-1]) and next word x[k]
- w |= x[k] << nbits
- i--
- s[i] = digits[w&mask]
-
- // advance
- w = x[k] >> (shift - nbits)
- nbits = _W - (shift - nbits)
- }
- }
-
- // convert digits of most-significant word w (omit leading zeros)
- for w != 0 {
- i--
- s[i] = digits[w&mask]
- w >>= shift
- }
-
- } else {
- bb, ndigits := maxPow(b)
-
- // construct table of successive squares of bb*leafSize to use in subdivisions
- // result (table != nil) <=> (len(x) > leafSize > 0)
- table := divisors(len(x), b, ndigits, bb)
-
- // preserve x, create local copy for use by convertWords
- q := nat(nil).set(x)
-
- // convert q to string s in base b
- q.convertWords(s, b, ndigits, bb, table)
-
- // strip leading zeros
- // (x != 0; thus s must contain at least one non-zero digit
- // and the loop will terminate)
- i = 0
- for s[i] == '0' {
- i++
- }
- }
-
- if neg {
- i--
- s[i] = '-'
- }
-
- return s[i:]
-}
-
-// Convert words of q to base b digits in s. If q is large, it is recursively "split in half"
-// by nat/nat division using tabulated divisors. Otherwise, it is converted iteratively using
-// repeated nat/Word division.
-//
-// The iterative method processes n Words by n divW() calls, each of which visits every Word in the
-// incrementally shortened q for a total of n + (n-1) + (n-2) ... + 2 + 1, or n(n+1)/2 divW()'s.
-// Recursive conversion divides q by its approximate square root, yielding two parts, each half
-// the size of q. Using the iterative method on both halves means 2 * (n/2)(n/2 + 1)/2 divW()'s
-// plus the expensive long div(). Asymptotically, the ratio is favorable at 1/2 the divW()'s, and
-// is made better by splitting the subblocks recursively. Best is to split blocks until one more
-// split would take longer (because of the nat/nat div()) than the twice as many divW()'s of the
-// iterative approach. This threshold is represented by leafSize. Benchmarking of leafSize in the
-// range 2..64 shows that values of 8 and 16 work well, with a 4x speedup at medium lengths and
-// ~30x for 20000 digits. Use nat_test.go's BenchmarkLeafSize tests to optimize leafSize for
-// specific hardware.
-//
-func (q nat) convertWords(s []byte, b Word, ndigits int, bb Word, table []divisor) {
- // split larger blocks recursively
- if table != nil {
- // len(q) > leafSize > 0
- var r nat
- index := len(table) - 1
- for len(q) > leafSize {
- // find divisor close to sqrt(q) if possible, but in any case < q
- maxLength := q.bitLen() // ~= log2 q, or at of least largest possible q of this bit length
- minLength := maxLength >> 1 // ~= log2 sqrt(q)
- for index > 0 && table[index-1].nbits > minLength {
- index-- // desired
- }
- if table[index].nbits >= maxLength && table[index].bbb.cmp(q) >= 0 {
- index--
- if index < 0 {
- panic("internal inconsistency")
- }
- }
-
- // split q into the two digit number (q'*bbb + r) to form independent subblocks
- q, r = q.div(r, q, table[index].bbb)
-
- // convert subblocks and collect results in s[:h] and s[h:]
- h := len(s) - table[index].ndigits
- r.convertWords(s[h:], b, ndigits, bb, table[0:index])
- s = s[:h] // == q.convertWords(s, b, ndigits, bb, table[0:index+1])
- }
- }
-
- // having split any large blocks now process the remaining (small) block iteratively
- i := len(s)
- var r Word
- if b == 10 {
- // hard-coding for 10 here speeds this up by 1.25x (allows for / and % by constants)
- for len(q) > 0 {
- // extract least significant, base bb "digit"
- q, r = q.divW(q, bb)
- for j := 0; j < ndigits && i > 0; j++ {
- i--
- // avoid % computation since r%10 == r - int(r/10)*10;
- // this appears to be faster for BenchmarkString10000Base10
- // and smaller strings (but a bit slower for larger ones)
- t := r / 10
- s[i] = '0' + byte(r-t*10)
- r = t
- }
- }
- } else {
- for len(q) > 0 {
- // extract least significant, base bb "digit"
- q, r = q.divW(q, bb)
- for j := 0; j < ndigits && i > 0; j++ {
- i--
- s[i] = digits[r%b]
- r /= b
- }
- }
- }
-
- // prepend high-order zeros
- for i > 0 { // while need more leading zeros
- i--
- s[i] = '0'
- }
-}
-
-// Split blocks greater than leafSize Words (or set to 0 to disable recursive conversion)
-// Benchmark and configure leafSize using: go test -bench="Leaf"
-// 8 and 16 effective on 3.0 GHz Xeon "Clovertown" CPU (128 byte cache lines)
-// 8 and 16 effective on 2.66 GHz Core 2 Duo "Penryn" CPU
-var leafSize int = 8 // number of Word-size binary values treat as a monolithic block
-
-type divisor struct {
- bbb nat // divisor
- nbits int // bit length of divisor (discounting leading zeros) ~= log2(bbb)
- ndigits int // digit length of divisor in terms of output base digits
-}
-
-var cacheBase10 struct {
- sync.Mutex
- table [64]divisor // cached divisors for base 10
-}
-
-// expWW computes x**y
-func (z nat) expWW(x, y Word) nat {
- return z.expNN(nat(nil).setWord(x), nat(nil).setWord(y), nil)
-}
-
-// construct table of powers of bb*leafSize to use in subdivisions
-func divisors(m int, b Word, ndigits int, bb Word) []divisor {
- // only compute table when recursive conversion is enabled and x is large
- if leafSize == 0 || m <= leafSize {
- return nil
- }
-
- // determine k where (bb**leafSize)**(2**k) >= sqrt(x)
- k := 1
- for words := leafSize; words < m>>1 && k < len(cacheBase10.table); words <<= 1 {
- k++
- }
-
- // reuse and extend existing table of divisors or create new table as appropriate
- var table []divisor // for b == 10, table overlaps with cacheBase10.table
- if b == 10 {
- cacheBase10.Lock()
- table = cacheBase10.table[0:k] // reuse old table for this conversion
- } else {
- table = make([]divisor, k) // create new table for this conversion
- }
-
- // extend table
- if table[k-1].ndigits == 0 {
- // add new entries as needed
- var larger nat
- for i := 0; i < k; i++ {
- if table[i].ndigits == 0 {
- if i == 0 {
- table[0].bbb = nat(nil).expWW(bb, Word(leafSize))
- table[0].ndigits = ndigits * leafSize
- } else {
- table[i].bbb = nat(nil).sqr(table[i-1].bbb)
- table[i].ndigits = 2 * table[i-1].ndigits
- }
-
- // optimization: exploit aggregated extra bits in macro blocks
- larger = nat(nil).set(table[i].bbb)
- for mulAddVWW(larger, larger, b, 0) == 0 {
- table[i].bbb = table[i].bbb.set(larger)
- table[i].ndigits++
- }
-
- table[i].nbits = table[i].bbb.bitLen()
- }
- }
- }
-
- if b == 10 {
- cacheBase10.Unlock()
- }
-
- return table
-}
diff --git a/contrib/go/_std_1.18/src/math/big/rat.go b/contrib/go/_std_1.18/src/math/big/rat.go
deleted file mode 100644
index 731a979ff7..0000000000
--- a/contrib/go/_std_1.18/src/math/big/rat.go
+++ /dev/null
@@ -1,544 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements multi-precision rational numbers.
-
-package big
-
-import (
- "fmt"
- "math"
-)
-
-// A Rat represents a quotient a/b of arbitrary precision.
-// The zero value for a Rat represents the value 0.
-//
-// Operations always take pointer arguments (*Rat) rather
-// than Rat values, and each unique Rat value requires
-// its own unique *Rat pointer. To "copy" a Rat value,
-// an existing (or newly allocated) Rat must be set to
-// a new value using the Rat.Set method; shallow copies
-// of Rats are not supported and may lead to errors.
-type Rat struct {
- // To make zero values for Rat work w/o initialization,
- // a zero value of b (len(b) == 0) acts like b == 1. At
- // the earliest opportunity (when an assignment to the Rat
- // is made), such uninitialized denominators are set to 1.
- // a.neg determines the sign of the Rat, b.neg is ignored.
- a, b Int
-}
-
-// NewRat creates a new Rat with numerator a and denominator b.
-func NewRat(a, b int64) *Rat {
- return new(Rat).SetFrac64(a, b)
-}
-
-// SetFloat64 sets z to exactly f and returns z.
-// If f is not finite, SetFloat returns nil.
-func (z *Rat) SetFloat64(f float64) *Rat {
- const expMask = 1<<11 - 1
- bits := math.Float64bits(f)
- mantissa := bits & (1<<52 - 1)
- exp := int((bits >> 52) & expMask)
- switch exp {
- case expMask: // non-finite
- return nil
- case 0: // denormal
- exp -= 1022
- default: // normal
- mantissa |= 1 << 52
- exp -= 1023
- }
-
- shift := 52 - exp
-
- // Optimization (?): partially pre-normalise.
- for mantissa&1 == 0 && shift > 0 {
- mantissa >>= 1
- shift--
- }
-
- z.a.SetUint64(mantissa)
- z.a.neg = f < 0
- z.b.Set(intOne)
- if shift > 0 {
- z.b.Lsh(&z.b, uint(shift))
- } else {
- z.a.Lsh(&z.a, uint(-shift))
- }
- return z.norm()
-}
-
-// quotToFloat32 returns the non-negative float32 value
-// nearest to the quotient a/b, using round-to-even in
-// halfway cases. It does not mutate its arguments.
-// Preconditions: b is non-zero; a and b have no common factors.
-func quotToFloat32(a, b nat) (f float32, exact bool) {
- const (
- // float size in bits
- Fsize = 32
-
- // mantissa
- Msize = 23
- Msize1 = Msize + 1 // incl. implicit 1
- Msize2 = Msize1 + 1
-
- // exponent
- Esize = Fsize - Msize1
- Ebias = 1<<(Esize-1) - 1
- Emin = 1 - Ebias
- Emax = Ebias
- )
-
- // TODO(adonovan): specialize common degenerate cases: 1.0, integers.
- alen := a.bitLen()
- if alen == 0 {
- return 0, true
- }
- blen := b.bitLen()
- if blen == 0 {
- panic("division by zero")
- }
-
- // 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
- // (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
- // This is 2 or 3 more than the float32 mantissa field width of Msize:
- // - the optional extra bit is shifted away in step 3 below.
- // - the high-order 1 is omitted in "normal" representation;
- // - the low-order 1 will be used during rounding then discarded.
- exp := alen - blen
- var a2, b2 nat
- a2 = a2.set(a)
- b2 = b2.set(b)
- if shift := Msize2 - exp; shift > 0 {
- a2 = a2.shl(a2, uint(shift))
- } else if shift < 0 {
- b2 = b2.shl(b2, uint(-shift))
- }
-
- // 2. Compute quotient and remainder (q, r). NB: due to the
- // extra shift, the low-order bit of q is logically the
- // high-order bit of r.
- var q nat
- q, r := q.div(a2, a2, b2) // (recycle a2)
- mantissa := low32(q)
- haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
-
- // 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
- // (in effect---we accomplish this incrementally).
- if mantissa>>Msize2 == 1 {
- if mantissa&1 == 1 {
- haveRem = true
- }
- mantissa >>= 1
- exp++
- }
- if mantissa>>Msize1 != 1 {
- panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
- }
-
- // 4. Rounding.
- if Emin-Msize <= exp && exp <= Emin {
- // Denormal case; lose 'shift' bits of precision.
- shift := uint(Emin - (exp - 1)) // [1..Esize1)
- lostbits := mantissa & (1<<shift - 1)
- haveRem = haveRem || lostbits != 0
- mantissa >>= shift
- exp = 2 - Ebias // == exp + shift
- }
- // Round q using round-half-to-even.
- exact = !haveRem
- if mantissa&1 != 0 {
- exact = false
- if haveRem || mantissa&2 != 0 {
- if mantissa++; mantissa >= 1<<Msize2 {
- // Complete rollover 11...1 => 100...0, so shift is safe
- mantissa >>= 1
- exp++
- }
- }
- }
- mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1.
-
- f = float32(math.Ldexp(float64(mantissa), exp-Msize1))
- if math.IsInf(float64(f), 0) {
- exact = false
- }
- return
-}
-
-// quotToFloat64 returns the non-negative float64 value
-// nearest to the quotient a/b, using round-to-even in
-// halfway cases. It does not mutate its arguments.
-// Preconditions: b is non-zero; a and b have no common factors.
-func quotToFloat64(a, b nat) (f float64, exact bool) {
- const (
- // float size in bits
- Fsize = 64
-
- // mantissa
- Msize = 52
- Msize1 = Msize + 1 // incl. implicit 1
- Msize2 = Msize1 + 1
-
- // exponent
- Esize = Fsize - Msize1
- Ebias = 1<<(Esize-1) - 1
- Emin = 1 - Ebias
- Emax = Ebias
- )
-
- // TODO(adonovan): specialize common degenerate cases: 1.0, integers.
- alen := a.bitLen()
- if alen == 0 {
- return 0, true
- }
- blen := b.bitLen()
- if blen == 0 {
- panic("division by zero")
- }
-
- // 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
- // (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
- // This is 2 or 3 more than the float64 mantissa field width of Msize:
- // - the optional extra bit is shifted away in step 3 below.
- // - the high-order 1 is omitted in "normal" representation;
- // - the low-order 1 will be used during rounding then discarded.
- exp := alen - blen
- var a2, b2 nat
- a2 = a2.set(a)
- b2 = b2.set(b)
- if shift := Msize2 - exp; shift > 0 {
- a2 = a2.shl(a2, uint(shift))
- } else if shift < 0 {
- b2 = b2.shl(b2, uint(-shift))
- }
-
- // 2. Compute quotient and remainder (q, r). NB: due to the
- // extra shift, the low-order bit of q is logically the
- // high-order bit of r.
- var q nat
- q, r := q.div(a2, a2, b2) // (recycle a2)
- mantissa := low64(q)
- haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
-
- // 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
- // (in effect---we accomplish this incrementally).
- if mantissa>>Msize2 == 1 {
- if mantissa&1 == 1 {
- haveRem = true
- }
- mantissa >>= 1
- exp++
- }
- if mantissa>>Msize1 != 1 {
- panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
- }
-
- // 4. Rounding.
- if Emin-Msize <= exp && exp <= Emin {
- // Denormal case; lose 'shift' bits of precision.
- shift := uint(Emin - (exp - 1)) // [1..Esize1)
- lostbits := mantissa & (1<<shift - 1)
- haveRem = haveRem || lostbits != 0
- mantissa >>= shift
- exp = 2 - Ebias // == exp + shift
- }
- // Round q using round-half-to-even.
- exact = !haveRem
- if mantissa&1 != 0 {
- exact = false
- if haveRem || mantissa&2 != 0 {
- if mantissa++; mantissa >= 1<<Msize2 {
- // Complete rollover 11...1 => 100...0, so shift is safe
- mantissa >>= 1
- exp++
- }
- }
- }
- mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1.
-
- f = math.Ldexp(float64(mantissa), exp-Msize1)
- if math.IsInf(f, 0) {
- exact = false
- }
- return
-}
-
-// Float32 returns the nearest float32 value for x and a bool indicating
-// whether f represents x exactly. If the magnitude of x is too large to
-// be represented by a float32, f is an infinity and exact is false.
-// The sign of f always matches the sign of x, even if f == 0.
-func (x *Rat) Float32() (f float32, exact bool) {
- b := x.b.abs
- if len(b) == 0 {
- b = natOne
- }
- f, exact = quotToFloat32(x.a.abs, b)
- if x.a.neg {
- f = -f
- }
- return
-}
-
-// Float64 returns the nearest float64 value for x and a bool indicating
-// whether f represents x exactly. If the magnitude of x is too large to
-// be represented by a float64, f is an infinity and exact is false.
-// The sign of f always matches the sign of x, even if f == 0.
-func (x *Rat) Float64() (f float64, exact bool) {
- b := x.b.abs
- if len(b) == 0 {
- b = natOne
- }
- f, exact = quotToFloat64(x.a.abs, b)
- if x.a.neg {
- f = -f
- }
- return
-}
-
-// SetFrac sets z to a/b and returns z.
-// If b == 0, SetFrac panics.
-func (z *Rat) SetFrac(a, b *Int) *Rat {
- z.a.neg = a.neg != b.neg
- babs := b.abs
- if len(babs) == 0 {
- panic("division by zero")
- }
- if &z.a == b || alias(z.a.abs, babs) {
- babs = nat(nil).set(babs) // make a copy
- }
- z.a.abs = z.a.abs.set(a.abs)
- z.b.abs = z.b.abs.set(babs)
- return z.norm()
-}
-
-// SetFrac64 sets z to a/b and returns z.
-// If b == 0, SetFrac64 panics.
-func (z *Rat) SetFrac64(a, b int64) *Rat {
- if b == 0 {
- panic("division by zero")
- }
- z.a.SetInt64(a)
- if b < 0 {
- b = -b
- z.a.neg = !z.a.neg
- }
- z.b.abs = z.b.abs.setUint64(uint64(b))
- return z.norm()
-}
-
-// SetInt sets z to x (by making a copy of x) and returns z.
-func (z *Rat) SetInt(x *Int) *Rat {
- z.a.Set(x)
- z.b.abs = z.b.abs.setWord(1)
- return z
-}
-
-// SetInt64 sets z to x and returns z.
-func (z *Rat) SetInt64(x int64) *Rat {
- z.a.SetInt64(x)
- z.b.abs = z.b.abs.setWord(1)
- return z
-}
-
-// SetUint64 sets z to x and returns z.
-func (z *Rat) SetUint64(x uint64) *Rat {
- z.a.SetUint64(x)
- z.b.abs = z.b.abs.setWord(1)
- return z
-}
-
-// Set sets z to x (by making a copy of x) and returns z.
-func (z *Rat) Set(x *Rat) *Rat {
- if z != x {
- z.a.Set(&x.a)
- z.b.Set(&x.b)
- }
- if len(z.b.abs) == 0 {
- z.b.abs = z.b.abs.setWord(1)
- }
- return z
-}
-
-// Abs sets z to |x| (the absolute value of x) and returns z.
-func (z *Rat) Abs(x *Rat) *Rat {
- z.Set(x)
- z.a.neg = false
- return z
-}
-
-// Neg sets z to -x and returns z.
-func (z *Rat) Neg(x *Rat) *Rat {
- z.Set(x)
- z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
- return z
-}
-
-// Inv sets z to 1/x and returns z.
-// If x == 0, Inv panics.
-func (z *Rat) Inv(x *Rat) *Rat {
- if len(x.a.abs) == 0 {
- panic("division by zero")
- }
- z.Set(x)
- z.a.abs, z.b.abs = z.b.abs, z.a.abs
- return z
-}
-
-// Sign returns:
-//
-// -1 if x < 0
-// 0 if x == 0
-// +1 if x > 0
-//
-func (x *Rat) Sign() int {
- return x.a.Sign()
-}
-
-// IsInt reports whether the denominator of x is 1.
-func (x *Rat) IsInt() bool {
- return len(x.b.abs) == 0 || x.b.abs.cmp(natOne) == 0
-}
-
-// Num returns the numerator of x; it may be <= 0.
-// The result is a reference to x's numerator; it
-// may change if a new value is assigned to x, and vice versa.
-// The sign of the numerator corresponds to the sign of x.
-func (x *Rat) Num() *Int {
- return &x.a
-}
-
-// Denom returns the denominator of x; it is always > 0.
-// The result is a reference to x's denominator, unless
-// x is an uninitialized (zero value) Rat, in which case
-// the result is a new Int of value 1. (To initialize x,
-// any operation that sets x will do, including x.Set(x).)
-// If the result is a reference to x's denominator it
-// may change if a new value is assigned to x, and vice versa.
-func (x *Rat) Denom() *Int {
- // Note that x.b.neg is guaranteed false.
- if len(x.b.abs) == 0 {
- // Note: If this proves problematic, we could
- // panic instead and require the Rat to
- // be explicitly initialized.
- return &Int{abs: nat{1}}
- }
- return &x.b
-}
-
-func (z *Rat) norm() *Rat {
- switch {
- case len(z.a.abs) == 0:
- // z == 0; normalize sign and denominator
- z.a.neg = false
- fallthrough
- case len(z.b.abs) == 0:
- // z is integer; normalize denominator
- z.b.abs = z.b.abs.setWord(1)
- default:
- // z is fraction; normalize numerator and denominator
- neg := z.a.neg
- z.a.neg = false
- z.b.neg = false
- if f := NewInt(0).lehmerGCD(nil, nil, &z.a, &z.b); f.Cmp(intOne) != 0 {
- z.a.abs, _ = z.a.abs.div(nil, z.a.abs, f.abs)
- z.b.abs, _ = z.b.abs.div(nil, z.b.abs, f.abs)
- }
- z.a.neg = neg
- }
- return z
-}
-
-// mulDenom sets z to the denominator product x*y (by taking into
-// account that 0 values for x or y must be interpreted as 1) and
-// returns z.
-func mulDenom(z, x, y nat) nat {
- switch {
- case len(x) == 0 && len(y) == 0:
- return z.setWord(1)
- case len(x) == 0:
- return z.set(y)
- case len(y) == 0:
- return z.set(x)
- }
- return z.mul(x, y)
-}
-
-// scaleDenom sets z to the product x*f.
-// If f == 0 (zero value of denominator), z is set to (a copy of) x.
-func (z *Int) scaleDenom(x *Int, f nat) {
- if len(f) == 0 {
- z.Set(x)
- return
- }
- z.abs = z.abs.mul(x.abs, f)
- z.neg = x.neg
-}
-
-// Cmp compares x and y and returns:
-//
-// -1 if x < y
-// 0 if x == y
-// +1 if x > y
-//
-func (x *Rat) Cmp(y *Rat) int {
- var a, b Int
- a.scaleDenom(&x.a, y.b.abs)
- b.scaleDenom(&y.a, x.b.abs)
- return a.Cmp(&b)
-}
-
-// Add sets z to the sum x+y and returns z.
-func (z *Rat) Add(x, y *Rat) *Rat {
- var a1, a2 Int
- a1.scaleDenom(&x.a, y.b.abs)
- a2.scaleDenom(&y.a, x.b.abs)
- z.a.Add(&a1, &a2)
- z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
- return z.norm()
-}
-
-// Sub sets z to the difference x-y and returns z.
-func (z *Rat) Sub(x, y *Rat) *Rat {
- var a1, a2 Int
- a1.scaleDenom(&x.a, y.b.abs)
- a2.scaleDenom(&y.a, x.b.abs)
- z.a.Sub(&a1, &a2)
- z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
- return z.norm()
-}
-
-// Mul sets z to the product x*y and returns z.
-func (z *Rat) Mul(x, y *Rat) *Rat {
- if x == y {
- // a squared Rat is positive and can't be reduced (no need to call norm())
- z.a.neg = false
- z.a.abs = z.a.abs.sqr(x.a.abs)
- if len(x.b.abs) == 0 {
- z.b.abs = z.b.abs.setWord(1)
- } else {
- z.b.abs = z.b.abs.sqr(x.b.abs)
- }
- return z
- }
- z.a.Mul(&x.a, &y.a)
- z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
- return z.norm()
-}
-
-// Quo sets z to the quotient x/y and returns z.
-// If y == 0, Quo panics.
-func (z *Rat) Quo(x, y *Rat) *Rat {
- if len(y.a.abs) == 0 {
- panic("division by zero")
- }
- var a, b Int
- a.scaleDenom(&x.a, y.b.abs)
- b.scaleDenom(&y.a, x.b.abs)
- z.a.abs = a.abs
- z.b.abs = b.abs
- z.a.neg = a.neg != b.neg
- return z.norm()
-}
diff --git a/contrib/go/_std_1.18/src/math/big/ratconv.go b/contrib/go/_std_1.18/src/math/big/ratconv.go
deleted file mode 100644
index 90053a9c81..0000000000
--- a/contrib/go/_std_1.18/src/math/big/ratconv.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements rat-to-string conversion functions.
-
-package big
-
-import (
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
-)
-
-func ratTok(ch rune) bool {
- return strings.ContainsRune("+-/0123456789.eE", ch)
-}
-
-var ratZero Rat
-var _ fmt.Scanner = &ratZero // *Rat must implement fmt.Scanner
-
-// Scan is a support routine for fmt.Scanner. It accepts the formats
-// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
-func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
- tok, err := s.Token(true, ratTok)
- if err != nil {
- return err
- }
- if !strings.ContainsRune("efgEFGv", ch) {
- return errors.New("Rat.Scan: invalid verb")
- }
- if _, ok := z.SetString(string(tok)); !ok {
- return errors.New("Rat.Scan: invalid syntax")
- }
- return nil
-}
-
-// SetString sets z to the value of s and returns z and a boolean indicating
-// success. s can be given as a (possibly signed) fraction "a/b", or as a
-// floating-point number optionally followed by an exponent.
-// If a fraction is provided, both the dividend and the divisor may be a
-// decimal integer or independently use a prefix of ``0b'', ``0'' or ``0o'',
-// or ``0x'' (or their upper-case variants) to denote a binary, octal, or
-// hexadecimal integer, respectively. The divisor may not be signed.
-// If a floating-point number is provided, it may be in decimal form or
-// use any of the same prefixes as above but for ``0'' to denote a non-decimal
-// mantissa. A leading ``0'' is considered a decimal leading 0; it does not
-// indicate octal representation in this case.
-// An optional base-10 ``e'' or base-2 ``p'' (or their upper-case variants)
-// exponent may be provided as well, except for hexadecimal floats which
-// only accept an (optional) ``p'' exponent (because an ``e'' or ``E'' cannot
-// be distinguished from a mantissa digit). If the exponent's absolute value
-// is too large, the operation may fail.
-// The entire string, not just a prefix, must be valid for success. If the
-// operation failed, the value of z is undefined but the returned value is nil.
-func (z *Rat) SetString(s string) (*Rat, bool) {
- if len(s) == 0 {
- return nil, false
- }
- // len(s) > 0
-
- // parse fraction a/b, if any
- if sep := strings.Index(s, "/"); sep >= 0 {
- if _, ok := z.a.SetString(s[:sep], 0); !ok {
- return nil, false
- }
- r := strings.NewReader(s[sep+1:])
- var err error
- if z.b.abs, _, _, err = z.b.abs.scan(r, 0, false); err != nil {
- return nil, false
- }
- // entire string must have been consumed
- if _, err = r.ReadByte(); err != io.EOF {
- return nil, false
- }
- if len(z.b.abs) == 0 {
- return nil, false
- }
- return z.norm(), true
- }
-
- // parse floating-point number
- r := strings.NewReader(s)
-
- // sign
- neg, err := scanSign(r)
- if err != nil {
- return nil, false
- }
-
- // mantissa
- var base int
- var fcount int // fractional digit count; valid if <= 0
- z.a.abs, base, fcount, err = z.a.abs.scan(r, 0, true)
- if err != nil {
- return nil, false
- }
-
- // exponent
- var exp int64
- var ebase int
- exp, ebase, err = scanExponent(r, true, true)
- if err != nil {
- return nil, false
- }
-
- // there should be no unread characters left
- if _, err = r.ReadByte(); err != io.EOF {
- return nil, false
- }
-
- // special-case 0 (see also issue #16176)
- if len(z.a.abs) == 0 {
- return z, true
- }
- // len(z.a.abs) > 0
-
- // The mantissa may have a radix point (fcount <= 0) and there
- // may be a nonzero exponent exp. The radix point amounts to a
- // division by base**(-fcount), which equals a multiplication by
- // base**fcount. An exponent means multiplication by ebase**exp.
- // Multiplications are commutative, so we can apply them in any
- // order. We only have powers of 2 and 10, and we split powers
- // of 10 into the product of the same powers of 2 and 5. This
- // may reduce the size of shift/multiplication factors or
- // divisors required to create the final fraction, depending
- // on the actual floating-point value.
-
- // determine binary or decimal exponent contribution of radix point
- var exp2, exp5 int64
- if fcount < 0 {
- // The mantissa has a radix point ddd.dddd; and
- // -fcount is the number of digits to the right
- // of '.'. Adjust relevant exponent accordingly.
- d := int64(fcount)
- switch base {
- case 10:
- exp5 = d
- fallthrough // 10**e == 5**e * 2**e
- case 2:
- exp2 = d
- case 8:
- exp2 = d * 3 // octal digits are 3 bits each
- case 16:
- exp2 = d * 4 // hexadecimal digits are 4 bits each
- default:
- panic("unexpected mantissa base")
- }
- // fcount consumed - not needed anymore
- }
-
- // take actual exponent into account
- switch ebase {
- case 10:
- exp5 += exp
- fallthrough // see fallthrough above
- case 2:
- exp2 += exp
- default:
- panic("unexpected exponent base")
- }
- // exp consumed - not needed anymore
-
- // apply exp5 contributions
- // (start with exp5 so the numbers to multiply are smaller)
- if exp5 != 0 {
- n := exp5
- if n < 0 {
- n = -n
- if n < 0 {
- // This can occur if -n overflows. -(-1 << 63) would become
- // -1 << 63, which is still negative.
- return nil, false
- }
- }
- if n > 1e6 {
- return nil, false // avoid excessively large exponents
- }
- pow5 := z.b.abs.expNN(natFive, nat(nil).setWord(Word(n)), nil) // use underlying array of z.b.abs
- if exp5 > 0 {
- z.a.abs = z.a.abs.mul(z.a.abs, pow5)
- z.b.abs = z.b.abs.setWord(1)
- } else {
- z.b.abs = pow5
- }
- } else {
- z.b.abs = z.b.abs.setWord(1)
- }
-
- // apply exp2 contributions
- if exp2 < -1e7 || exp2 > 1e7 {
- return nil, false // avoid excessively large exponents
- }
- if exp2 > 0 {
- z.a.abs = z.a.abs.shl(z.a.abs, uint(exp2))
- } else if exp2 < 0 {
- z.b.abs = z.b.abs.shl(z.b.abs, uint(-exp2))
- }
-
- z.a.neg = neg && len(z.a.abs) > 0 // 0 has no sign
-
- return z.norm(), true
-}
-
-// scanExponent scans the longest possible prefix of r representing a base 10
-// (``e'', ``E'') or a base 2 (``p'', ``P'') exponent, if any. It returns the
-// exponent, the exponent base (10 or 2), or a read or syntax error, if any.
-//
-// If sepOk is set, an underscore character ``_'' may appear between successive
-// exponent digits; such underscores do not change the value of the exponent.
-// Incorrect placement of underscores is reported as an error if there are no
-// other errors. If sepOk is not set, underscores are not recognized and thus
-// terminate scanning like any other character that is not a valid digit.
-//
-// exponent = ( "e" | "E" | "p" | "P" ) [ sign ] digits .
-// sign = "+" | "-" .
-// digits = digit { [ '_' ] digit } .
-// digit = "0" ... "9" .
-//
-// A base 2 exponent is only permitted if base2ok is set.
-func scanExponent(r io.ByteScanner, base2ok, sepOk bool) (exp int64, base int, err error) {
- // one char look-ahead
- ch, err := r.ReadByte()
- if err != nil {
- if err == io.EOF {
- err = nil
- }
- return 0, 10, err
- }
-
- // exponent char
- switch ch {
- case 'e', 'E':
- base = 10
- case 'p', 'P':
- if base2ok {
- base = 2
- break // ok
- }
- fallthrough // binary exponent not permitted
- default:
- r.UnreadByte() // ch does not belong to exponent anymore
- return 0, 10, nil
- }
-
- // sign
- var digits []byte
- ch, err = r.ReadByte()
- if err == nil && (ch == '+' || ch == '-') {
- if ch == '-' {
- digits = append(digits, '-')
- }
- ch, err = r.ReadByte()
- }
-
- // prev encodes the previously seen char: it is one
- // of '_', '0' (a digit), or '.' (anything else). A
- // valid separator '_' may only occur after a digit.
- prev := '.'
- invalSep := false
-
- // exponent value
- hasDigits := false
- for err == nil {
- if '0' <= ch && ch <= '9' {
- digits = append(digits, ch)
- prev = '0'
- hasDigits = true
- } else if ch == '_' && sepOk {
- if prev != '0' {
- invalSep = true
- }
- prev = '_'
- } else {
- r.UnreadByte() // ch does not belong to number anymore
- break
- }
- ch, err = r.ReadByte()
- }
-
- if err == io.EOF {
- err = nil
- }
- if err == nil && !hasDigits {
- err = errNoDigits
- }
- if err == nil {
- exp, err = strconv.ParseInt(string(digits), 10, 64)
- }
- // other errors take precedence over invalid separators
- if err == nil && (invalSep || prev == '_') {
- err = errInvalSep
- }
-
- return
-}
-
-// String returns a string representation of x in the form "a/b" (even if b == 1).
-func (x *Rat) String() string {
- return string(x.marshal())
-}
-
-// marshal implements String returning a slice of bytes
-func (x *Rat) marshal() []byte {
- var buf []byte
- buf = x.a.Append(buf, 10)
- buf = append(buf, '/')
- if len(x.b.abs) != 0 {
- buf = x.b.Append(buf, 10)
- } else {
- buf = append(buf, '1')
- }
- return buf
-}
-
-// RatString returns a string representation of x in the form "a/b" if b != 1,
-// and in the form "a" if b == 1.
-func (x *Rat) RatString() string {
- if x.IsInt() {
- return x.a.String()
- }
- return x.String()
-}
-
-// FloatString returns a string representation of x in decimal form with prec
-// digits of precision after the radix point. The last digit is rounded to
-// nearest, with halves rounded away from zero.
-func (x *Rat) FloatString(prec int) string {
- var buf []byte
-
- if x.IsInt() {
- buf = x.a.Append(buf, 10)
- if prec > 0 {
- buf = append(buf, '.')
- for i := prec; i > 0; i-- {
- buf = append(buf, '0')
- }
- }
- return string(buf)
- }
- // x.b.abs != 0
-
- q, r := nat(nil).div(nat(nil), x.a.abs, x.b.abs)
-
- p := natOne
- if prec > 0 {
- p = nat(nil).expNN(natTen, nat(nil).setUint64(uint64(prec)), nil)
- }
-
- r = r.mul(r, p)
- r, r2 := r.div(nat(nil), r, x.b.abs)
-
- // see if we need to round up
- r2 = r2.add(r2, r2)
- if x.b.abs.cmp(r2) <= 0 {
- r = r.add(r, natOne)
- if r.cmp(p) >= 0 {
- q = nat(nil).add(q, natOne)
- r = nat(nil).sub(r, p)
- }
- }
-
- if x.a.neg {
- buf = append(buf, '-')
- }
- buf = append(buf, q.utoa(10)...) // itoa ignores sign if q == 0
-
- if prec > 0 {
- buf = append(buf, '.')
- rs := r.utoa(10)
- for i := prec - len(rs); i > 0; i-- {
- buf = append(buf, '0')
- }
- buf = append(buf, rs...)
- }
-
- return string(buf)
-}
diff --git a/contrib/go/_std_1.18/src/math/big/ratmarsh.go b/contrib/go/_std_1.18/src/math/big/ratmarsh.go
deleted file mode 100644
index fbc7b6002d..0000000000
--- a/contrib/go/_std_1.18/src/math/big/ratmarsh.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements encoding/decoding of Rats.
-
-package big
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
-)
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const ratGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-func (x *Rat) GobEncode() ([]byte, error) {
- if x == nil {
- return nil, nil
- }
- buf := make([]byte, 1+4+(len(x.a.abs)+len(x.b.abs))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
- i := x.b.abs.bytes(buf)
- j := x.a.abs.bytes(buf[:i])
- n := i - j
- if int(uint32(n)) != n {
- // this should never happen
- return nil, errors.New("Rat.GobEncode: numerator too large")
- }
- binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
- j -= 1 + 4
- b := ratGobVersion << 1 // make space for sign bit
- if x.a.neg {
- b |= 1
- }
- buf[j] = b
- return buf[j:], nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (z *Rat) GobDecode(buf []byte) error {
- if len(buf) == 0 {
- // Other side sent a nil or default value.
- *z = Rat{}
- return nil
- }
- b := buf[0]
- if b>>1 != ratGobVersion {
- return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
- }
- const j = 1 + 4
- i := j + binary.BigEndian.Uint32(buf[j-4:j])
- z.a.neg = b&1 != 0
- z.a.abs = z.a.abs.setBytes(buf[j:i])
- z.b.abs = z.b.abs.setBytes(buf[i:])
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (x *Rat) MarshalText() (text []byte, err error) {
- if x.IsInt() {
- return x.a.MarshalText()
- }
- return x.marshal(), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (z *Rat) UnmarshalText(text []byte) error {
- // TODO(gri): get rid of the []byte/string conversion
- if _, ok := z.SetString(string(text)); !ok {
- return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Rat", text)
- }
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/math/big/sqrt.go b/contrib/go/_std_1.18/src/math/big/sqrt.go
deleted file mode 100644
index 0d50164557..0000000000
--- a/contrib/go/_std_1.18/src/math/big/sqrt.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
- "math"
- "sync"
-)
-
-var threeOnce struct {
- sync.Once
- v *Float
-}
-
-func three() *Float {
- threeOnce.Do(func() {
- threeOnce.v = NewFloat(3.0)
- })
- return threeOnce.v
-}
-
-// Sqrt sets z to the rounded square root of x, and returns it.
-//
-// If z's precision is 0, it is changed to x's precision before the
-// operation. Rounding is performed according to z's precision and
-// rounding mode, but z's accuracy is not computed. Specifically, the
-// result of z.Acc() is undefined.
-//
-// The function panics if z < 0. The value of z is undefined in that
-// case.
-func (z *Float) Sqrt(x *Float) *Float {
- if debugFloat {
- x.validate()
- }
-
- if z.prec == 0 {
- z.prec = x.prec
- }
-
- if x.Sign() == -1 {
- // following IEEE754-2008 (section 7.2)
- panic(ErrNaN{"square root of negative operand"})
- }
-
- // handle ±0 and +∞
- if x.form != finite {
- z.acc = Exact
- z.form = x.form
- z.neg = x.neg // IEEE754-2008 requires √±0 = ±0
- return z
- }
-
- // MantExp sets the argument's precision to the receiver's, and
- // when z.prec > x.prec this will lower z.prec. Restore it after
- // the MantExp call.
- prec := z.prec
- b := x.MantExp(z)
- z.prec = prec
-
- // Compute √(z·2**b) as
- // √( z)·2**(½b) if b is even
- // √(2z)·2**(⌊½b⌋) if b > 0 is odd
- // √(½z)·2**(⌈½b⌉) if b < 0 is odd
- switch b % 2 {
- case 0:
- // nothing to do
- case 1:
- z.exp++
- case -1:
- z.exp--
- }
- // 0.25 <= z < 2.0
-
- // Solving 1/x² - z = 0 avoids Quo calls and is faster, especially
- // for high precisions.
- z.sqrtInverse(z)
-
- // re-attach halved exponent
- return z.SetMantExp(z, b/2)
-}
-
-// Compute √x (to z.prec precision) by solving
-// 1/t² - x = 0
-// for t (using Newton's method), and then inverting.
-func (z *Float) sqrtInverse(x *Float) {
- // let
- // f(t) = 1/t² - x
- // then
- // g(t) = f(t)/f'(t) = -½t(1 - xt²)
- // and the next guess is given by
- // t2 = t - g(t) = ½t(3 - xt²)
- u := newFloat(z.prec)
- v := newFloat(z.prec)
- three := three()
- ng := func(t *Float) *Float {
- u.prec = t.prec
- v.prec = t.prec
- u.Mul(t, t) // u = t²
- u.Mul(x, u) // = xt²
- v.Sub(three, u) // v = 3 - xt²
- u.Mul(t, v) // u = t(3 - xt²)
- u.exp-- // = ½t(3 - xt²)
- return t.Set(u)
- }
-
- xf, _ := x.Float64()
- sqi := newFloat(z.prec)
- sqi.SetFloat64(1 / math.Sqrt(xf))
- for prec := z.prec + 32; sqi.prec < prec; {
- sqi.prec *= 2
- sqi = ng(sqi)
- }
- // sqi = 1/√x
-
- // x/√x = √x
- z.Mul(x, sqi)
-}
-
-// newFloat returns a new *Float with space for twice the given
-// precision.
-func newFloat(prec2 uint32) *Float {
- z := new(Float)
- // nat.make ensures the slice length is > 0
- z.mant = z.mant.make(int(prec2/_W) * 2)
- return z
-}
diff --git a/contrib/go/_std_1.18/src/math/bits.go b/contrib/go/_std_1.18/src/math/bits.go
deleted file mode 100644
index 77bcdbe1ce..0000000000
--- a/contrib/go/_std_1.18/src/math/bits.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-const (
- uvnan = 0x7FF8000000000001
- uvinf = 0x7FF0000000000000
- uvneginf = 0xFFF0000000000000
- uvone = 0x3FF0000000000000
- mask = 0x7FF
- shift = 64 - 11 - 1
- bias = 1023
- signMask = 1 << 63
- fracMask = 1<<shift - 1
-)
-
-// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0.
-func Inf(sign int) float64 {
- var v uint64
- if sign >= 0 {
- v = uvinf
- } else {
- v = uvneginf
- }
- return Float64frombits(v)
-}
-
-// NaN returns an IEEE 754 ``not-a-number'' value.
-func NaN() float64 { return Float64frombits(uvnan) }
-
-// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value.
-func IsNaN(f float64) (is bool) {
- // IEEE 754 says that only NaNs satisfy f != f.
- // To avoid the floating-point hardware, could use:
- // x := Float64bits(f);
- // return uint32(x>>shift)&mask == mask && x != uvinf && x != uvneginf
- return f != f
-}
-
-// IsInf reports whether f is an infinity, according to sign.
-// If sign > 0, IsInf reports whether f is positive infinity.
-// If sign < 0, IsInf reports whether f is negative infinity.
-// If sign == 0, IsInf reports whether f is either infinity.
-func IsInf(f float64, sign int) bool {
- // Test for infinity by comparing against maximum float.
- // To avoid the floating-point hardware, could use:
- // x := Float64bits(f);
- // return sign >= 0 && x == uvinf || sign <= 0 && x == uvneginf;
- return sign >= 0 && f > MaxFloat64 || sign <= 0 && f < -MaxFloat64
-}
-
-// normalize returns a normal number y and exponent exp
-// satisfying x == y × 2**exp. It assumes x is finite and non-zero.
-func normalize(x float64) (y float64, exp int) {
- const SmallestNormal = 2.2250738585072014e-308 // 2**-1022
- if Abs(x) < SmallestNormal {
- return x * (1 << 52), -52
- }
- return x, 0
-}
diff --git a/contrib/go/_std_1.18/src/math/cbrt.go b/contrib/go/_std_1.18/src/math/cbrt.go
deleted file mode 100644
index 45c8ecb3a8..0000000000
--- a/contrib/go/_std_1.18/src/math/cbrt.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The go code is a modified version of the original C code from
-// http://www.netlib.org/fdlibm/s_cbrt.c and came with this notice.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunSoft, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-
-// Cbrt returns the cube root of x.
-//
-// Special cases are:
-// Cbrt(±0) = ±0
-// Cbrt(±Inf) = ±Inf
-// Cbrt(NaN) = NaN
-func Cbrt(x float64) float64 {
- if haveArchCbrt {
- return archCbrt(x)
- }
- return cbrt(x)
-}
-
-func cbrt(x float64) float64 {
- const (
- B1 = 715094163 // (682-0.03306235651)*2**20
- B2 = 696219795 // (664-0.03306235651)*2**20
- C = 5.42857142857142815906e-01 // 19/35 = 0x3FE15F15F15F15F1
- D = -7.05306122448979611050e-01 // -864/1225 = 0xBFE691DE2532C834
- E = 1.41428571428571436819e+00 // 99/70 = 0x3FF6A0EA0EA0EA0F
- F = 1.60714285714285720630e+00 // 45/28 = 0x3FF9B6DB6DB6DB6E
- G = 3.57142857142857150787e-01 // 5/14 = 0x3FD6DB6DB6DB6DB7
- SmallestNormal = 2.22507385850720138309e-308 // 2**-1022 = 0x0010000000000000
- )
- // special cases
- switch {
- case x == 0 || IsNaN(x) || IsInf(x, 0):
- return x
- }
-
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
-
- // rough cbrt to 5 bits
- t := Float64frombits(Float64bits(x)/3 + B1<<32)
- if x < SmallestNormal {
- // subnormal number
- t = float64(1 << 54) // set t= 2**54
- t *= x
- t = Float64frombits(Float64bits(t)/3 + B2<<32)
- }
-
- // new cbrt to 23 bits
- r := t * t / x
- s := C + r*t
- t *= G + F/(s+E+D/s)
-
- // chop to 22 bits, make larger than cbrt(x)
- t = Float64frombits(Float64bits(t)&(0xFFFFFFFFC<<28) + 1<<30)
-
- // one step newton iteration to 53 bits with error less than 0.667ulps
- s = t * t // t*t is exact
- r = x / s
- w := t + t
- r = (r - t) / (w + r) // r-s is exact
- t = t + t*r
-
- // restore the sign bit
- if sign {
- t = -t
- }
- return t
-}
diff --git a/contrib/go/_std_1.18/src/math/copysign.go b/contrib/go/_std_1.18/src/math/copysign.go
deleted file mode 100644
index 719c64b9eb..0000000000
--- a/contrib/go/_std_1.18/src/math/copysign.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Copysign returns a value with the magnitude
-// of x and the sign of y.
-func Copysign(x, y float64) float64 {
- const sign = 1 << 63
- return Float64frombits(Float64bits(x)&^sign | Float64bits(y)&sign)
-}
diff --git a/contrib/go/_std_1.18/src/math/dim.go b/contrib/go/_std_1.18/src/math/dim.go
deleted file mode 100644
index 6a857bbe41..0000000000
--- a/contrib/go/_std_1.18/src/math/dim.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Dim returns the maximum of x-y or 0.
-//
-// Special cases are:
-// Dim(+Inf, +Inf) = NaN
-// Dim(-Inf, -Inf) = NaN
-// Dim(x, NaN) = Dim(NaN, x) = NaN
-func Dim(x, y float64) float64 {
- // The special cases result in NaN after the subtraction:
- // +Inf - +Inf = NaN
- // -Inf - -Inf = NaN
- // NaN - y = NaN
- // x - NaN = NaN
- v := x - y
- if v <= 0 {
- // v is negative or 0
- return 0
- }
- // v is positive or NaN
- return v
-}
-
-// Max returns the larger of x or y.
-//
-// Special cases are:
-// Max(x, +Inf) = Max(+Inf, x) = +Inf
-// Max(x, NaN) = Max(NaN, x) = NaN
-// Max(+0, ±0) = Max(±0, +0) = +0
-// Max(-0, -0) = -0
-func Max(x, y float64) float64 {
- if haveArchMax {
- return archMax(x, y)
- }
- return max(x, y)
-}
-
-func max(x, y float64) float64 {
- // special cases
- switch {
- case IsInf(x, 1) || IsInf(y, 1):
- return Inf(1)
- case IsNaN(x) || IsNaN(y):
- return NaN()
- case x == 0 && x == y:
- if Signbit(x) {
- return y
- }
- return x
- }
- if x > y {
- return x
- }
- return y
-}
-
-// Min returns the smaller of x or y.
-//
-// Special cases are:
-// Min(x, -Inf) = Min(-Inf, x) = -Inf
-// Min(x, NaN) = Min(NaN, x) = NaN
-// Min(-0, ±0) = Min(±0, -0) = -0
-func Min(x, y float64) float64 {
- if haveArchMin {
- return archMin(x, y)
- }
- return min(x, y)
-}
-
-func min(x, y float64) float64 {
- // special cases
- switch {
- case IsInf(x, -1) || IsInf(y, -1):
- return Inf(-1)
- case IsNaN(x) || IsNaN(y):
- return NaN()
- case x == 0 && x == y:
- if Signbit(x) {
- return x
- }
- return y
- }
- if x < y {
- return x
- }
- return y
-}
diff --git a/contrib/go/_std_1.18/src/math/erf.go b/contrib/go/_std_1.18/src/math/erf.go
deleted file mode 100644
index 4d6fe472f1..0000000000
--- a/contrib/go/_std_1.18/src/math/erf.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point error function and complementary error function.
-*/
-
-// The original C code and the long comment below are
-// from FreeBSD's /usr/src/lib/msun/src/s_erf.c and
-// came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-//
-// double erf(double x)
-// double erfc(double x)
-// x
-// 2 |\
-// erf(x) = --------- | exp(-t*t)dt
-// sqrt(pi) \|
-// 0
-//
-// erfc(x) = 1-erf(x)
-// Note that
-// erf(-x) = -erf(x)
-// erfc(-x) = 2 - erfc(x)
-//
-// Method:
-// 1. For |x| in [0, 0.84375]
-// erf(x) = x + x*R(x**2)
-// erfc(x) = 1 - erf(x) if x in [-.84375,0.25]
-// = 0.5 + ((0.5-x)-x*R) if x in [0.25,0.84375]
-// where R = P/Q where P is an odd poly of degree 8 and
-// Q is an odd poly of degree 10.
-// -57.90
-// | R - (erf(x)-x)/x | <= 2
-//
-//
-// Remark. The formula is derived by noting
-// erf(x) = (2/sqrt(pi))*(x - x**3/3 + x**5/10 - x**7/42 + ....)
-// and that
-// 2/sqrt(pi) = 1.128379167095512573896158903121545171688
-// is close to one. The interval is chosen because the fix
-// point of erf(x) is near 0.6174 (i.e., erf(x)=x when x is
-// near 0.6174), and by some experiment, 0.84375 is chosen to
-// guarantee the error is less than one ulp for erf.
-//
-// 2. For |x| in [0.84375,1.25], let s = |x| - 1, and
-// c = 0.84506291151 rounded to single (24 bits)
-// erf(x) = sign(x) * (c + P1(s)/Q1(s))
-// erfc(x) = (1-c) - P1(s)/Q1(s) if x > 0
-// 1+(c+P1(s)/Q1(s)) if x < 0
-// |P1/Q1 - (erf(|x|)-c)| <= 2**-59.06
-// Remark: here we use the taylor series expansion at x=1.
-// erf(1+s) = erf(1) + s*Poly(s)
-// = 0.845.. + P1(s)/Q1(s)
-// That is, we use rational approximation to approximate
-// erf(1+s) - (c = (single)0.84506291151)
-// Note that |P1/Q1|< 0.078 for x in [0.84375,1.25]
-// where
-// P1(s) = degree 6 poly in s
-// Q1(s) = degree 6 poly in s
-//
-// 3. For x in [1.25,1/0.35(~2.857143)],
-// erfc(x) = (1/x)*exp(-x*x-0.5625+R1/S1)
-// erf(x) = 1 - erfc(x)
-// where
-// R1(z) = degree 7 poly in z, (z=1/x**2)
-// S1(z) = degree 8 poly in z
-//
-// 4. For x in [1/0.35,28]
-// erfc(x) = (1/x)*exp(-x*x-0.5625+R2/S2) if x > 0
-// = 2.0 - (1/x)*exp(-x*x-0.5625+R2/S2) if -6<x<0
-// = 2.0 - tiny (if x <= -6)
-// erf(x) = sign(x)*(1.0 - erfc(x)) if x < 6, else
-// erf(x) = sign(x)*(1.0 - tiny)
-// where
-// R2(z) = degree 6 poly in z, (z=1/x**2)
-// S2(z) = degree 7 poly in z
-//
-// Note1:
-// To compute exp(-x*x-0.5625+R/S), let s be a single
-// precision number and s := x; then
-// -x*x = -s*s + (s-x)*(s+x)
-// exp(-x*x-0.5626+R/S) =
-// exp(-s*s-0.5625)*exp((s-x)*(s+x)+R/S);
-// Note2:
-// Here 4 and 5 make use of the asymptotic series
-// exp(-x*x)
-// erfc(x) ~ ---------- * ( 1 + Poly(1/x**2) )
-// x*sqrt(pi)
-// We use rational approximation to approximate
-// g(s)=f(1/x**2) = log(erfc(x)*x) - x*x + 0.5625
-// Here is the error bound for R1/S1 and R2/S2
-// |R1/S1 - f(x)| < 2**(-62.57)
-// |R2/S2 - f(x)| < 2**(-61.52)
-//
-// 5. For inf > x >= 28
-// erf(x) = sign(x) *(1 - tiny) (raise inexact)
-// erfc(x) = tiny*tiny (raise underflow) if x > 0
-// = 2 - tiny if x<0
-//
-// 7. Special case:
-// erf(0) = 0, erf(inf) = 1, erf(-inf) = -1,
-// erfc(0) = 1, erfc(inf) = 0, erfc(-inf) = 2,
-// erfc/erf(NaN) is NaN
-
-const (
- erx = 8.45062911510467529297e-01 // 0x3FEB0AC160000000
- // Coefficients for approximation to erf in [0, 0.84375]
- efx = 1.28379167095512586316e-01 // 0x3FC06EBA8214DB69
- efx8 = 1.02703333676410069053e+00 // 0x3FF06EBA8214DB69
- pp0 = 1.28379167095512558561e-01 // 0x3FC06EBA8214DB68
- pp1 = -3.25042107247001499370e-01 // 0xBFD4CD7D691CB913
- pp2 = -2.84817495755985104766e-02 // 0xBF9D2A51DBD7194F
- pp3 = -5.77027029648944159157e-03 // 0xBF77A291236668E4
- pp4 = -2.37630166566501626084e-05 // 0xBEF8EAD6120016AC
- qq1 = 3.97917223959155352819e-01 // 0x3FD97779CDDADC09
- qq2 = 6.50222499887672944485e-02 // 0x3FB0A54C5536CEBA
- qq3 = 5.08130628187576562776e-03 // 0x3F74D022C4D36B0F
- qq4 = 1.32494738004321644526e-04 // 0x3F215DC9221C1A10
- qq5 = -3.96022827877536812320e-06 // 0xBED09C4342A26120
- // Coefficients for approximation to erf in [0.84375, 1.25]
- pa0 = -2.36211856075265944077e-03 // 0xBF6359B8BEF77538
- pa1 = 4.14856118683748331666e-01 // 0x3FDA8D00AD92B34D
- pa2 = -3.72207876035701323847e-01 // 0xBFD7D240FBB8C3F1
- pa3 = 3.18346619901161753674e-01 // 0x3FD45FCA805120E4
- pa4 = -1.10894694282396677476e-01 // 0xBFBC63983D3E28EC
- pa5 = 3.54783043256182359371e-02 // 0x3FA22A36599795EB
- pa6 = -2.16637559486879084300e-03 // 0xBF61BF380A96073F
- qa1 = 1.06420880400844228286e-01 // 0x3FBB3E6618EEE323
- qa2 = 5.40397917702171048937e-01 // 0x3FE14AF092EB6F33
- qa3 = 7.18286544141962662868e-02 // 0x3FB2635CD99FE9A7
- qa4 = 1.26171219808761642112e-01 // 0x3FC02660E763351F
- qa5 = 1.36370839120290507362e-02 // 0x3F8BEDC26B51DD1C
- qa6 = 1.19844998467991074170e-02 // 0x3F888B545735151D
- // Coefficients for approximation to erfc in [1.25, 1/0.35]
- ra0 = -9.86494403484714822705e-03 // 0xBF843412600D6435
- ra1 = -6.93858572707181764372e-01 // 0xBFE63416E4BA7360
- ra2 = -1.05586262253232909814e+01 // 0xC0251E0441B0E726
- ra3 = -6.23753324503260060396e+01 // 0xC04F300AE4CBA38D
- ra4 = -1.62396669462573470355e+02 // 0xC0644CB184282266
- ra5 = -1.84605092906711035994e+02 // 0xC067135CEBCCABB2
- ra6 = -8.12874355063065934246e+01 // 0xC054526557E4D2F2
- ra7 = -9.81432934416914548592e+00 // 0xC023A0EFC69AC25C
- sa1 = 1.96512716674392571292e+01 // 0x4033A6B9BD707687
- sa2 = 1.37657754143519042600e+02 // 0x4061350C526AE721
- sa3 = 4.34565877475229228821e+02 // 0x407B290DD58A1A71
- sa4 = 6.45387271733267880336e+02 // 0x40842B1921EC2868
- sa5 = 4.29008140027567833386e+02 // 0x407AD02157700314
- sa6 = 1.08635005541779435134e+02 // 0x405B28A3EE48AE2C
- sa7 = 6.57024977031928170135e+00 // 0x401A47EF8E484A93
- sa8 = -6.04244152148580987438e-02 // 0xBFAEEFF2EE749A62
- // Coefficients for approximation to erfc in [1/.35, 28]
- rb0 = -9.86494292470009928597e-03 // 0xBF84341239E86F4A
- rb1 = -7.99283237680523006574e-01 // 0xBFE993BA70C285DE
- rb2 = -1.77579549177547519889e+01 // 0xC031C209555F995A
- rb3 = -1.60636384855821916062e+02 // 0xC064145D43C5ED98
- rb4 = -6.37566443368389627722e+02 // 0xC083EC881375F228
- rb5 = -1.02509513161107724954e+03 // 0xC09004616A2E5992
- rb6 = -4.83519191608651397019e+02 // 0xC07E384E9BDC383F
- sb1 = 3.03380607434824582924e+01 // 0x403E568B261D5190
- sb2 = 3.25792512996573918826e+02 // 0x40745CAE221B9F0A
- sb3 = 1.53672958608443695994e+03 // 0x409802EB189D5118
- sb4 = 3.19985821950859553908e+03 // 0x40A8FFB7688C246A
- sb5 = 2.55305040643316442583e+03 // 0x40A3F219CEDF3BE6
- sb6 = 4.74528541206955367215e+02 // 0x407DA874E79FE763
- sb7 = -2.24409524465858183362e+01 // 0xC03670E242712D62
-)
-
-// Erf returns the error function of x.
-//
-// Special cases are:
-// Erf(+Inf) = 1
-// Erf(-Inf) = -1
-// Erf(NaN) = NaN
-func Erf(x float64) float64 {
- if haveArchErf {
- return archErf(x)
- }
- return erf(x)
-}
-
-func erf(x float64) float64 {
- const (
- VeryTiny = 2.848094538889218e-306 // 0x0080000000000000
- Small = 1.0 / (1 << 28) // 2**-28
- )
- // special cases
- switch {
- case IsNaN(x):
- return NaN()
- case IsInf(x, 1):
- return 1
- case IsInf(x, -1):
- return -1
- }
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- if x < 0.84375 { // |x| < 0.84375
- var temp float64
- if x < Small { // |x| < 2**-28
- if x < VeryTiny {
- temp = 0.125 * (8.0*x + efx8*x) // avoid underflow
- } else {
- temp = x + efx*x
- }
- } else {
- z := x * x
- r := pp0 + z*(pp1+z*(pp2+z*(pp3+z*pp4)))
- s := 1 + z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))))
- y := r / s
- temp = x + x*y
- }
- if sign {
- return -temp
- }
- return temp
- }
- if x < 1.25 { // 0.84375 <= |x| < 1.25
- s := x - 1
- P := pa0 + s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))))
- Q := 1 + s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))))
- if sign {
- return -erx - P/Q
- }
- return erx + P/Q
- }
- if x >= 6 { // inf > |x| >= 6
- if sign {
- return -1
- }
- return 1
- }
- s := 1 / (x * x)
- var R, S float64
- if x < 1/0.35 { // |x| < 1 / 0.35 ~ 2.857143
- R = ra0 + s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(ra5+s*(ra6+s*ra7))))))
- S = 1 + s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(sa5+s*(sa6+s*(sa7+s*sa8)))))))
- } else { // |x| >= 1 / 0.35 ~ 2.857143
- R = rb0 + s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(rb5+s*rb6)))))
- S = 1 + s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(sb5+s*(sb6+s*sb7))))))
- }
- z := Float64frombits(Float64bits(x) & 0xffffffff00000000) // pseudo-single (20-bit) precision x
- r := Exp(-z*z-0.5625) * Exp((z-x)*(z+x)+R/S)
- if sign {
- return r/x - 1
- }
- return 1 - r/x
-}
-
-// Erfc returns the complementary error function of x.
-//
-// Special cases are:
-// Erfc(+Inf) = 0
-// Erfc(-Inf) = 2
-// Erfc(NaN) = NaN
-func Erfc(x float64) float64 {
- if haveArchErfc {
- return archErfc(x)
- }
- return erfc(x)
-}
-
-func erfc(x float64) float64 {
- const Tiny = 1.0 / (1 << 56) // 2**-56
- // special cases
- switch {
- case IsNaN(x):
- return NaN()
- case IsInf(x, 1):
- return 0
- case IsInf(x, -1):
- return 2
- }
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- if x < 0.84375 { // |x| < 0.84375
- var temp float64
- if x < Tiny { // |x| < 2**-56
- temp = x
- } else {
- z := x * x
- r := pp0 + z*(pp1+z*(pp2+z*(pp3+z*pp4)))
- s := 1 + z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))))
- y := r / s
- if x < 0.25 { // |x| < 1/4
- temp = x + x*y
- } else {
- temp = 0.5 + (x*y + (x - 0.5))
- }
- }
- if sign {
- return 1 + temp
- }
- return 1 - temp
- }
- if x < 1.25 { // 0.84375 <= |x| < 1.25
- s := x - 1
- P := pa0 + s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))))
- Q := 1 + s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))))
- if sign {
- return 1 + erx + P/Q
- }
- return 1 - erx - P/Q
-
- }
- if x < 28 { // |x| < 28
- s := 1 / (x * x)
- var R, S float64
- if x < 1/0.35 { // |x| < 1 / 0.35 ~ 2.857143
- R = ra0 + s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(ra5+s*(ra6+s*ra7))))))
- S = 1 + s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(sa5+s*(sa6+s*(sa7+s*sa8)))))))
- } else { // |x| >= 1 / 0.35 ~ 2.857143
- if sign && x > 6 {
- return 2 // x < -6
- }
- R = rb0 + s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(rb5+s*rb6)))))
- S = 1 + s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(sb5+s*(sb6+s*sb7))))))
- }
- z := Float64frombits(Float64bits(x) & 0xffffffff00000000) // pseudo-single (20-bit) precision x
- r := Exp(-z*z-0.5625) * Exp((z-x)*(z+x)+R/S)
- if sign {
- return 2 - r/x
- }
- return r / x
- }
- if sign {
- return 2
- }
- return 0
-}
diff --git a/contrib/go/_std_1.18/src/math/erfinv.go b/contrib/go/_std_1.18/src/math/erfinv.go
deleted file mode 100644
index ee423d33e4..0000000000
--- a/contrib/go/_std_1.18/src/math/erfinv.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Inverse of the floating-point error function.
-*/
-
-// This implementation is based on the rational approximation
-// of percentage points of normal distribution available from
-// https://www.jstor.org/stable/2347330.
-
-const (
- // Coefficients for approximation to erf in |x| <= 0.85
- a0 = 1.1975323115670912564578e0
- a1 = 4.7072688112383978012285e1
- a2 = 6.9706266534389598238465e2
- a3 = 4.8548868893843886794648e3
- a4 = 1.6235862515167575384252e4
- a5 = 2.3782041382114385731252e4
- a6 = 1.1819493347062294404278e4
- a7 = 8.8709406962545514830200e2
- b0 = 1.0000000000000000000e0
- b1 = 4.2313330701600911252e1
- b2 = 6.8718700749205790830e2
- b3 = 5.3941960214247511077e3
- b4 = 2.1213794301586595867e4
- b5 = 3.9307895800092710610e4
- b6 = 2.8729085735721942674e4
- b7 = 5.2264952788528545610e3
- // Coefficients for approximation to erf in 0.85 < |x| <= 1-2*exp(-25)
- c0 = 1.42343711074968357734e0
- c1 = 4.63033784615654529590e0
- c2 = 5.76949722146069140550e0
- c3 = 3.64784832476320460504e0
- c4 = 1.27045825245236838258e0
- c5 = 2.41780725177450611770e-1
- c6 = 2.27238449892691845833e-2
- c7 = 7.74545014278341407640e-4
- d0 = 1.4142135623730950488016887e0
- d1 = 2.9036514445419946173133295e0
- d2 = 2.3707661626024532365971225e0
- d3 = 9.7547832001787427186894837e-1
- d4 = 2.0945065210512749128288442e-1
- d5 = 2.1494160384252876777097297e-2
- d6 = 7.7441459065157709165577218e-4
- d7 = 1.4859850019840355905497876e-9
- // Coefficients for approximation to erf in 1-2*exp(-25) < |x| < 1
- e0 = 6.65790464350110377720e0
- e1 = 5.46378491116411436990e0
- e2 = 1.78482653991729133580e0
- e3 = 2.96560571828504891230e-1
- e4 = 2.65321895265761230930e-2
- e5 = 1.24266094738807843860e-3
- e6 = 2.71155556874348757815e-5
- e7 = 2.01033439929228813265e-7
- f0 = 1.414213562373095048801689e0
- f1 = 8.482908416595164588112026e-1
- f2 = 1.936480946950659106176712e-1
- f3 = 2.103693768272068968719679e-2
- f4 = 1.112800997078859844711555e-3
- f5 = 2.611088405080593625138020e-5
- f6 = 2.010321207683943062279931e-7
- f7 = 2.891024605872965461538222e-15
-)
-
-// Erfinv returns the inverse error function of x.
-//
-// Special cases are:
-// Erfinv(1) = +Inf
-// Erfinv(-1) = -Inf
-// Erfinv(x) = NaN if x < -1 or x > 1
-// Erfinv(NaN) = NaN
-func Erfinv(x float64) float64 {
- // special cases
- if IsNaN(x) || x <= -1 || x >= 1 {
- if x == -1 || x == 1 {
- return Inf(int(x))
- }
- return NaN()
- }
-
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
-
- var ans float64
- if x <= 0.85 { // |x| <= 0.85
- r := 0.180625 - 0.25*x*x
- z1 := ((((((a7*r+a6)*r+a5)*r+a4)*r+a3)*r+a2)*r+a1)*r + a0
- z2 := ((((((b7*r+b6)*r+b5)*r+b4)*r+b3)*r+b2)*r+b1)*r + b0
- ans = (x * z1) / z2
- } else {
- var z1, z2 float64
- r := Sqrt(Ln2 - Log(1.0-x))
- if r <= 5.0 {
- r -= 1.6
- z1 = ((((((c7*r+c6)*r+c5)*r+c4)*r+c3)*r+c2)*r+c1)*r + c0
- z2 = ((((((d7*r+d6)*r+d5)*r+d4)*r+d3)*r+d2)*r+d1)*r + d0
- } else {
- r -= 5.0
- z1 = ((((((e7*r+e6)*r+e5)*r+e4)*r+e3)*r+e2)*r+e1)*r + e0
- z2 = ((((((f7*r+f6)*r+f5)*r+f4)*r+f3)*r+f2)*r+f1)*r + f0
- }
- ans = z1 / z2
- }
-
- if sign {
- return -ans
- }
- return ans
-}
-
-// Erfcinv returns the inverse of Erfc(x).
-//
-// Special cases are:
-// Erfcinv(0) = +Inf
-// Erfcinv(2) = -Inf
-// Erfcinv(x) = NaN if x < 0 or x > 2
-// Erfcinv(NaN) = NaN
-func Erfcinv(x float64) float64 {
- return Erfinv(1 - x)
-}
diff --git a/contrib/go/_std_1.18/src/math/exp.go b/contrib/go/_std_1.18/src/math/exp.go
deleted file mode 100644
index d05eb91fb0..0000000000
--- a/contrib/go/_std_1.18/src/math/exp.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Exp returns e**x, the base-e exponential of x.
-//
-// Special cases are:
-// Exp(+Inf) = +Inf
-// Exp(NaN) = NaN
-// Very large values overflow to 0 or +Inf.
-// Very small values underflow to 1.
-func Exp(x float64) float64 {
- if haveArchExp {
- return archExp(x)
- }
- return exp(x)
-}
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/e_exp.c
-// and came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved.
-//
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-//
-// exp(x)
-// Returns the exponential of x.
-//
-// Method
-// 1. Argument reduction:
-// Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
-// Given x, find r and integer k such that
-//
-// x = k*ln2 + r, |r| <= 0.5*ln2.
-//
-// Here r will be represented as r = hi-lo for better
-// accuracy.
-//
-// 2. Approximation of exp(r) by a special rational function on
-// the interval [0,0.34658]:
-// Write
-// R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
-// We use a special Remez algorithm on [0,0.34658] to generate
-// a polynomial of degree 5 to approximate R. The maximum error
-// of this polynomial approximation is bounded by 2**-59. In
-// other words,
-// R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
-// (where z=r*r, and the values of P1 to P5 are listed below)
-// and
-// | 5 | -59
-// | 2.0+P1*z+...+P5*z - R(z) | <= 2
-// | |
-// The computation of exp(r) thus becomes
-// 2*r
-// exp(r) = 1 + -------
-// R - r
-// r*R1(r)
-// = 1 + r + ----------- (for better accuracy)
-// 2 - R1(r)
-// where
-// 2 4 10
-// R1(r) = r - (P1*r + P2*r + ... + P5*r ).
-//
-// 3. Scale back to obtain exp(x):
-// From step 1, we have
-// exp(x) = 2**k * exp(r)
-//
-// Special cases:
-// exp(INF) is INF, exp(NaN) is NaN;
-// exp(-INF) is 0, and
-// for finite argument, only exp(0)=1 is exact.
-//
-// Accuracy:
-// according to an error analysis, the error is always less than
-// 1 ulp (unit in the last place).
-//
-// Misc. info.
-// For IEEE double
-// if x > 7.09782712893383973096e+02 then exp(x) overflow
-// if x < -7.45133219101941108420e+02 then exp(x) underflow
-//
-// Constants:
-// The hexadecimal values are the intended ones for the following
-// constants. The decimal values may be used, provided that the
-// compiler will convert from decimal to binary accurately enough
-// to produce the hexadecimal values shown.
-
-func exp(x float64) float64 {
- const (
- Ln2Hi = 6.93147180369123816490e-01
- Ln2Lo = 1.90821492927058770002e-10
- Log2e = 1.44269504088896338700e+00
-
- Overflow = 7.09782712893383973096e+02
- Underflow = -7.45133219101941108420e+02
- NearZero = 1.0 / (1 << 28) // 2**-28
- )
-
- // special cases
- switch {
- case IsNaN(x) || IsInf(x, 1):
- return x
- case IsInf(x, -1):
- return 0
- case x > Overflow:
- return Inf(1)
- case x < Underflow:
- return 0
- case -NearZero < x && x < NearZero:
- return 1 + x
- }
-
- // reduce; computed as r = hi - lo for extra precision.
- var k int
- switch {
- case x < 0:
- k = int(Log2e*x - 0.5)
- case x > 0:
- k = int(Log2e*x + 0.5)
- }
- hi := x - float64(k)*Ln2Hi
- lo := float64(k) * Ln2Lo
-
- // compute
- return expmulti(hi, lo, k)
-}
-
-// Exp2 returns 2**x, the base-2 exponential of x.
-//
-// Special cases are the same as Exp.
-func Exp2(x float64) float64 {
- if haveArchExp2 {
- return archExp2(x)
- }
- return exp2(x)
-}
-
-func exp2(x float64) float64 {
- const (
- Ln2Hi = 6.93147180369123816490e-01
- Ln2Lo = 1.90821492927058770002e-10
-
- Overflow = 1.0239999999999999e+03
- Underflow = -1.0740e+03
- )
-
- // special cases
- switch {
- case IsNaN(x) || IsInf(x, 1):
- return x
- case IsInf(x, -1):
- return 0
- case x > Overflow:
- return Inf(1)
- case x < Underflow:
- return 0
- }
-
- // argument reduction; x = r×lg(e) + k with |r| ≤ ln(2)/2.
- // computed as r = hi - lo for extra precision.
- var k int
- switch {
- case x > 0:
- k = int(x + 0.5)
- case x < 0:
- k = int(x - 0.5)
- }
- t := x - float64(k)
- hi := t * Ln2Hi
- lo := -t * Ln2Lo
-
- // compute
- return expmulti(hi, lo, k)
-}
-
-// exp1 returns e**r × 2**k where r = hi - lo and |r| ≤ ln(2)/2.
-func expmulti(hi, lo float64, k int) float64 {
- const (
- P1 = 1.66666666666666657415e-01 /* 0x3FC55555; 0x55555555 */
- P2 = -2.77777777770155933842e-03 /* 0xBF66C16C; 0x16BEBD93 */
- P3 = 6.61375632143793436117e-05 /* 0x3F11566A; 0xAF25DE2C */
- P4 = -1.65339022054652515390e-06 /* 0xBEBBBD41; 0xC5D26BF1 */
- P5 = 4.13813679705723846039e-08 /* 0x3E663769; 0x72BEA4D0 */
- )
-
- r := hi - lo
- t := r * r
- c := r - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))))
- y := 1 - ((lo - (r*c)/(2-c)) - hi)
- // TODO(rsc): make sure Ldexp can handle boundary k
- return Ldexp(y, k)
-}
diff --git a/contrib/go/_std_1.18/src/math/expm1.go b/contrib/go/_std_1.18/src/math/expm1.go
deleted file mode 100644
index 66d3421661..0000000000
--- a/contrib/go/_std_1.18/src/math/expm1.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/s_expm1.c
-// and came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// expm1(x)
-// Returns exp(x)-1, the exponential of x minus 1.
-//
-// Method
-// 1. Argument reduction:
-// Given x, find r and integer k such that
-//
-// x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
-//
-// Here a correction term c will be computed to compensate
-// the error in r when rounded to a floating-point number.
-//
-// 2. Approximating expm1(r) by a special rational function on
-// the interval [0,0.34658]:
-// Since
-// r*(exp(r)+1)/(exp(r)-1) = 2+ r**2/6 - r**4/360 + ...
-// we define R1(r*r) by
-// r*(exp(r)+1)/(exp(r)-1) = 2+ r**2/6 * R1(r*r)
-// That is,
-// R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
-// = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
-// = 1 - r**2/60 + r**4/2520 - r**6/100800 + ...
-// We use a special Reme algorithm on [0,0.347] to generate
-// a polynomial of degree 5 in r*r to approximate R1. The
-// maximum error of this polynomial approximation is bounded
-// by 2**-61. In other words,
-// R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
-// where Q1 = -1.6666666666666567384E-2,
-// Q2 = 3.9682539681370365873E-4,
-// Q3 = -9.9206344733435987357E-6,
-// Q4 = 2.5051361420808517002E-7,
-// Q5 = -6.2843505682382617102E-9;
-// (where z=r*r, and the values of Q1 to Q5 are listed below)
-// with error bounded by
-// | 5 | -61
-// | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
-// | |
-//
-// expm1(r) = exp(r)-1 is then computed by the following
-// specific way which minimize the accumulation rounding error:
-// 2 3
-// r r [ 3 - (R1 + R1*r/2) ]
-// expm1(r) = r + --- + --- * [--------------------]
-// 2 2 [ 6 - r*(3 - R1*r/2) ]
-//
-// To compensate the error in the argument reduction, we use
-// expm1(r+c) = expm1(r) + c + expm1(r)*c
-// ~ expm1(r) + c + r*c
-// Thus c+r*c will be added in as the correction terms for
-// expm1(r+c). Now rearrange the term to avoid optimization
-// screw up:
-// ( 2 2 )
-// ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
-// expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
-// ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
-// ( )
-//
-// = r - E
-// 3. Scale back to obtain expm1(x):
-// From step 1, we have
-// expm1(x) = either 2**k*[expm1(r)+1] - 1
-// = or 2**k*[expm1(r) + (1-2**-k)]
-// 4. Implementation notes:
-// (A). To save one multiplication, we scale the coefficient Qi
-// to Qi*2**i, and replace z by (x**2)/2.
-// (B). To achieve maximum accuracy, we compute expm1(x) by
-// (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
-// (ii) if k=0, return r-E
-// (iii) if k=-1, return 0.5*(r-E)-0.5
-// (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
-// else return 1.0+2.0*(r-E);
-// (v) if (k<-2||k>56) return 2**k(1-(E-r)) - 1 (or exp(x)-1)
-// (vi) if k <= 20, return 2**k((1-2**-k)-(E-r)), else
-// (vii) return 2**k(1-((E+2**-k)-r))
-//
-// Special cases:
-// expm1(INF) is INF, expm1(NaN) is NaN;
-// expm1(-INF) is -1, and
-// for finite argument, only expm1(0)=0 is exact.
-//
-// Accuracy:
-// according to an error analysis, the error is always less than
-// 1 ulp (unit in the last place).
-//
-// Misc. info.
-// For IEEE double
-// if x > 7.09782712893383973096e+02 then expm1(x) overflow
-//
-// Constants:
-// The hexadecimal values are the intended ones for the following
-// constants. The decimal values may be used, provided that the
-// compiler will convert from decimal to binary accurately enough
-// to produce the hexadecimal values shown.
-//
-
-// Expm1 returns e**x - 1, the base-e exponential of x minus 1.
-// It is more accurate than Exp(x) - 1 when x is near zero.
-//
-// Special cases are:
-// Expm1(+Inf) = +Inf
-// Expm1(-Inf) = -1
-// Expm1(NaN) = NaN
-// Very large values overflow to -1 or +Inf.
-func Expm1(x float64) float64 {
- if haveArchExpm1 {
- return archExpm1(x)
- }
- return expm1(x)
-}
-
-func expm1(x float64) float64 {
- const (
- Othreshold = 7.09782712893383973096e+02 // 0x40862E42FEFA39EF
- Ln2X56 = 3.88162421113569373274e+01 // 0x4043687a9f1af2b1
- Ln2HalfX3 = 1.03972077083991796413e+00 // 0x3ff0a2b23f3bab73
- Ln2Half = 3.46573590279972654709e-01 // 0x3fd62e42fefa39ef
- Ln2Hi = 6.93147180369123816490e-01 // 0x3fe62e42fee00000
- Ln2Lo = 1.90821492927058770002e-10 // 0x3dea39ef35793c76
- InvLn2 = 1.44269504088896338700e+00 // 0x3ff71547652b82fe
- Tiny = 1.0 / (1 << 54) // 2**-54 = 0x3c90000000000000
- // scaled coefficients related to expm1
- Q1 = -3.33333333333331316428e-02 // 0xBFA11111111110F4
- Q2 = 1.58730158725481460165e-03 // 0x3F5A01A019FE5585
- Q3 = -7.93650757867487942473e-05 // 0xBF14CE199EAADBB7
- Q4 = 4.00821782732936239552e-06 // 0x3ED0CFCA86E65239
- Q5 = -2.01099218183624371326e-07 // 0xBE8AFDB76E09C32D
- )
-
- // special cases
- switch {
- case IsInf(x, 1) || IsNaN(x):
- return x
- case IsInf(x, -1):
- return -1
- }
-
- absx := x
- sign := false
- if x < 0 {
- absx = -absx
- sign = true
- }
-
- // filter out huge argument
- if absx >= Ln2X56 { // if |x| >= 56 * ln2
- if sign {
- return -1 // x < -56*ln2, return -1
- }
- if absx >= Othreshold { // if |x| >= 709.78...
- return Inf(1)
- }
- }
-
- // argument reduction
- var c float64
- var k int
- if absx > Ln2Half { // if |x| > 0.5 * ln2
- var hi, lo float64
- if absx < Ln2HalfX3 { // and |x| < 1.5 * ln2
- if !sign {
- hi = x - Ln2Hi
- lo = Ln2Lo
- k = 1
- } else {
- hi = x + Ln2Hi
- lo = -Ln2Lo
- k = -1
- }
- } else {
- if !sign {
- k = int(InvLn2*x + 0.5)
- } else {
- k = int(InvLn2*x - 0.5)
- }
- t := float64(k)
- hi = x - t*Ln2Hi // t * Ln2Hi is exact here
- lo = t * Ln2Lo
- }
- x = hi - lo
- c = (hi - x) - lo
- } else if absx < Tiny { // when |x| < 2**-54, return x
- return x
- } else {
- k = 0
- }
-
- // x is now in primary range
- hfx := 0.5 * x
- hxs := x * hfx
- r1 := 1 + hxs*(Q1+hxs*(Q2+hxs*(Q3+hxs*(Q4+hxs*Q5))))
- t := 3 - r1*hfx
- e := hxs * ((r1 - t) / (6.0 - x*t))
- if k == 0 {
- return x - (x*e - hxs) // c is 0
- }
- e = (x*(e-c) - c)
- e -= hxs
- switch {
- case k == -1:
- return 0.5*(x-e) - 0.5
- case k == 1:
- if x < -0.25 {
- return -2 * (e - (x + 0.5))
- }
- return 1 + 2*(x-e)
- case k <= -2 || k > 56: // suffice to return exp(x)-1
- y := 1 - (e - x)
- y = Float64frombits(Float64bits(y) + uint64(k)<<52) // add k to y's exponent
- return y - 1
- }
- if k < 20 {
- t := Float64frombits(0x3ff0000000000000 - (0x20000000000000 >> uint(k))) // t=1-2**-k
- y := t - (e - x)
- y = Float64frombits(Float64bits(y) + uint64(k)<<52) // add k to y's exponent
- return y
- }
- t = Float64frombits(uint64(0x3ff-k) << 52) // 2**-k
- y := x - (e + t)
- y++
- y = Float64frombits(Float64bits(y) + uint64(k)<<52) // add k to y's exponent
- return y
-}
diff --git a/contrib/go/_std_1.18/src/math/floor.go b/contrib/go/_std_1.18/src/math/floor.go
deleted file mode 100644
index 7913a900e3..0000000000
--- a/contrib/go/_std_1.18/src/math/floor.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Floor returns the greatest integer value less than or equal to x.
-//
-// Special cases are:
-// Floor(±0) = ±0
-// Floor(±Inf) = ±Inf
-// Floor(NaN) = NaN
-func Floor(x float64) float64 {
- if haveArchFloor {
- return archFloor(x)
- }
- return floor(x)
-}
-
-func floor(x float64) float64 {
- if x == 0 || IsNaN(x) || IsInf(x, 0) {
- return x
- }
- if x < 0 {
- d, fract := Modf(-x)
- if fract != 0.0 {
- d = d + 1
- }
- return -d
- }
- d, _ := Modf(x)
- return d
-}
-
-// Ceil returns the least integer value greater than or equal to x.
-//
-// Special cases are:
-// Ceil(±0) = ±0
-// Ceil(±Inf) = ±Inf
-// Ceil(NaN) = NaN
-func Ceil(x float64) float64 {
- if haveArchCeil {
- return archCeil(x)
- }
- return ceil(x)
-}
-
-func ceil(x float64) float64 {
- return -Floor(-x)
-}
-
-// Trunc returns the integer value of x.
-//
-// Special cases are:
-// Trunc(±0) = ±0
-// Trunc(±Inf) = ±Inf
-// Trunc(NaN) = NaN
-func Trunc(x float64) float64 {
- if haveArchTrunc {
- return archTrunc(x)
- }
- return trunc(x)
-}
-
-func trunc(x float64) float64 {
- if x == 0 || IsNaN(x) || IsInf(x, 0) {
- return x
- }
- d, _ := Modf(x)
- return d
-}
-
-// Round returns the nearest integer, rounding half away from zero.
-//
-// Special cases are:
-// Round(±0) = ±0
-// Round(±Inf) = ±Inf
-// Round(NaN) = NaN
-func Round(x float64) float64 {
- // Round is a faster implementation of:
- //
- // func Round(x float64) float64 {
- // t := Trunc(x)
- // if Abs(x-t) >= 0.5 {
- // return t + Copysign(1, x)
- // }
- // return t
- // }
- bits := Float64bits(x)
- e := uint(bits>>shift) & mask
- if e < bias {
- // Round abs(x) < 1 including denormals.
- bits &= signMask // +-0
- if e == bias-1 {
- bits |= uvone // +-1
- }
- } else if e < bias+shift {
- // Round any abs(x) >= 1 containing a fractional component [0,1).
- //
- // Numbers with larger exponents are returned unchanged since they
- // must be either an integer, infinity, or NaN.
- const half = 1 << (shift - 1)
- e -= bias
- bits += half >> e
- bits &^= fracMask >> e
- }
- return Float64frombits(bits)
-}
-
-// RoundToEven returns the nearest integer, rounding ties to even.
-//
-// Special cases are:
-// RoundToEven(±0) = ±0
-// RoundToEven(±Inf) = ±Inf
-// RoundToEven(NaN) = NaN
-func RoundToEven(x float64) float64 {
- // RoundToEven is a faster implementation of:
- //
- // func RoundToEven(x float64) float64 {
- // t := math.Trunc(x)
- // odd := math.Remainder(t, 2) != 0
- // if d := math.Abs(x - t); d > 0.5 || (d == 0.5 && odd) {
- // return t + math.Copysign(1, x)
- // }
- // return t
- // }
- bits := Float64bits(x)
- e := uint(bits>>shift) & mask
- if e >= bias {
- // Round abs(x) >= 1.
- // - Large numbers without fractional components, infinity, and NaN are unchanged.
- // - Add 0.499.. or 0.5 before truncating depending on whether the truncated
- // number is even or odd (respectively).
- const halfMinusULP = (1 << (shift - 1)) - 1
- e -= bias
- bits += (halfMinusULP + (bits>>(shift-e))&1) >> e
- bits &^= fracMask >> e
- } else if e == bias-1 && bits&fracMask != 0 {
- // Round 0.5 < abs(x) < 1.
- bits = bits&signMask | uvone // +-1
- } else {
- // Round abs(x) <= 0.5 including denormals.
- bits &= signMask // +-0
- }
- return Float64frombits(bits)
-}
diff --git a/contrib/go/_std_1.18/src/math/frexp.go b/contrib/go/_std_1.18/src/math/frexp.go
deleted file mode 100644
index 3c8a909ed0..0000000000
--- a/contrib/go/_std_1.18/src/math/frexp.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Frexp breaks f into a normalized fraction
-// and an integral power of two.
-// It returns frac and exp satisfying f == frac × 2**exp,
-// with the absolute value of frac in the interval [½, 1).
-//
-// Special cases are:
-// Frexp(±0) = ±0, 0
-// Frexp(±Inf) = ±Inf, 0
-// Frexp(NaN) = NaN, 0
-func Frexp(f float64) (frac float64, exp int) {
- if haveArchFrexp {
- return archFrexp(f)
- }
- return frexp(f)
-}
-
-func frexp(f float64) (frac float64, exp int) {
- // special cases
- switch {
- case f == 0:
- return f, 0 // correctly return -0
- case IsInf(f, 0) || IsNaN(f):
- return f, 0
- }
- f, exp = normalize(f)
- x := Float64bits(f)
- exp += int((x>>shift)&mask) - bias + 1
- x &^= mask << shift
- x |= (-1 + bias) << shift
- frac = Float64frombits(x)
- return
-}
diff --git a/contrib/go/_std_1.18/src/math/gamma.go b/contrib/go/_std_1.18/src/math/gamma.go
deleted file mode 100644
index cc9e869496..0000000000
--- a/contrib/go/_std_1.18/src/math/gamma.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below are from http://netlib.sandia.gov/cephes/cprob/gamma.c.
-// The go code is a simplified version of the original C.
-//
-// tgamma.c
-//
-// Gamma function
-//
-// SYNOPSIS:
-//
-// double x, y, tgamma();
-// extern int signgam;
-//
-// y = tgamma( x );
-//
-// DESCRIPTION:
-//
-// Returns gamma function of the argument. The result is
-// correctly signed, and the sign (+1 or -1) is also
-// returned in a global (extern) variable named signgam.
-// This variable is also filled in by the logarithmic gamma
-// function lgamma().
-//
-// Arguments |x| <= 34 are reduced by recurrence and the function
-// approximated by a rational function of degree 6/7 in the
-// interval (2,3). Large arguments are handled by Stirling's
-// formula. Large negative arguments are made positive using
-// a reflection formula.
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC -34, 34 10000 1.3e-16 2.5e-17
-// IEEE -170,-33 20000 2.3e-15 3.3e-16
-// IEEE -33, 33 20000 9.4e-16 2.2e-16
-// IEEE 33, 171.6 20000 2.3e-15 3.2e-16
-//
-// Error for arguments outside the test range will be larger
-// owing to error amplification by the exponential function.
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-var _gamP = [...]float64{
- 1.60119522476751861407e-04,
- 1.19135147006586384913e-03,
- 1.04213797561761569935e-02,
- 4.76367800457137231464e-02,
- 2.07448227648435975150e-01,
- 4.94214826801497100753e-01,
- 9.99999999999999996796e-01,
-}
-var _gamQ = [...]float64{
- -2.31581873324120129819e-05,
- 5.39605580493303397842e-04,
- -4.45641913851797240494e-03,
- 1.18139785222060435552e-02,
- 3.58236398605498653373e-02,
- -2.34591795718243348568e-01,
- 7.14304917030273074085e-02,
- 1.00000000000000000320e+00,
-}
-var _gamS = [...]float64{
- 7.87311395793093628397e-04,
- -2.29549961613378126380e-04,
- -2.68132617805781232825e-03,
- 3.47222221605458667310e-03,
- 8.33333333333482257126e-02,
-}
-
-// Gamma function computed by Stirling's formula.
-// The pair of results must be multiplied together to get the actual answer.
-// The multiplication is left to the caller so that, if careful, the caller can avoid
-// infinity for 172 <= x <= 180.
-// The polynomial is valid for 33 <= x <= 172; larger values are only used
-// in reciprocal and produce denormalized floats. The lower precision there
-// masks any imprecision in the polynomial.
-func stirling(x float64) (float64, float64) {
- if x > 200 {
- return Inf(1), 1
- }
- const (
- SqrtTwoPi = 2.506628274631000502417
- MaxStirling = 143.01608
- )
- w := 1 / x
- w = 1 + w*((((_gamS[0]*w+_gamS[1])*w+_gamS[2])*w+_gamS[3])*w+_gamS[4])
- y1 := Exp(x)
- y2 := 1.0
- if x > MaxStirling { // avoid Pow() overflow
- v := Pow(x, 0.5*x-0.25)
- y1, y2 = v, v/y1
- } else {
- y1 = Pow(x, x-0.5) / y1
- }
- return y1, SqrtTwoPi * w * y2
-}
-
-// Gamma returns the Gamma function of x.
-//
-// Special cases are:
-// Gamma(+Inf) = +Inf
-// Gamma(+0) = +Inf
-// Gamma(-0) = -Inf
-// Gamma(x) = NaN for integer x < 0
-// Gamma(-Inf) = NaN
-// Gamma(NaN) = NaN
-func Gamma(x float64) float64 {
- const Euler = 0.57721566490153286060651209008240243104215933593992 // A001620
- // special cases
- switch {
- case isNegInt(x) || IsInf(x, -1) || IsNaN(x):
- return NaN()
- case IsInf(x, 1):
- return Inf(1)
- case x == 0:
- if Signbit(x) {
- return Inf(-1)
- }
- return Inf(1)
- }
- q := Abs(x)
- p := Floor(q)
- if q > 33 {
- if x >= 0 {
- y1, y2 := stirling(x)
- return y1 * y2
- }
- // Note: x is negative but (checked above) not a negative integer,
- // so x must be small enough to be in range for conversion to int64.
- // If |x| were >= 2⁶³ it would have to be an integer.
- signgam := 1
- if ip := int64(p); ip&1 == 0 {
- signgam = -1
- }
- z := q - p
- if z > 0.5 {
- p = p + 1
- z = q - p
- }
- z = q * Sin(Pi*z)
- if z == 0 {
- return Inf(signgam)
- }
- sq1, sq2 := stirling(q)
- absz := Abs(z)
- d := absz * sq1 * sq2
- if IsInf(d, 0) {
- z = Pi / absz / sq1 / sq2
- } else {
- z = Pi / d
- }
- return float64(signgam) * z
- }
-
- // Reduce argument
- z := 1.0
- for x >= 3 {
- x = x - 1
- z = z * x
- }
- for x < 0 {
- if x > -1e-09 {
- goto small
- }
- z = z / x
- x = x + 1
- }
- for x < 2 {
- if x < 1e-09 {
- goto small
- }
- z = z / x
- x = x + 1
- }
-
- if x == 2 {
- return z
- }
-
- x = x - 2
- p = (((((x*_gamP[0]+_gamP[1])*x+_gamP[2])*x+_gamP[3])*x+_gamP[4])*x+_gamP[5])*x + _gamP[6]
- q = ((((((x*_gamQ[0]+_gamQ[1])*x+_gamQ[2])*x+_gamQ[3])*x+_gamQ[4])*x+_gamQ[5])*x+_gamQ[6])*x + _gamQ[7]
- return z * p / q
-
-small:
- if x == 0 {
- return Inf(1)
- }
- return z / ((1 + Euler*x) * x)
-}
-
-func isNegInt(x float64) bool {
- if x < 0 {
- _, xf := Modf(x)
- return xf == 0
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/math/hypot.go b/contrib/go/_std_1.18/src/math/hypot.go
deleted file mode 100644
index 12af17766d..0000000000
--- a/contrib/go/_std_1.18/src/math/hypot.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Hypot -- sqrt(p*p + q*q), but overflows only if the result does.
-*/
-
-// Hypot returns Sqrt(p*p + q*q), taking care to avoid
-// unnecessary overflow and underflow.
-//
-// Special cases are:
-// Hypot(±Inf, q) = +Inf
-// Hypot(p, ±Inf) = +Inf
-// Hypot(NaN, q) = NaN
-// Hypot(p, NaN) = NaN
-func Hypot(p, q float64) float64 {
- if haveArchHypot {
- return archHypot(p, q)
- }
- return hypot(p, q)
-}
-
-func hypot(p, q float64) float64 {
- // special cases
- switch {
- case IsInf(p, 0) || IsInf(q, 0):
- return Inf(1)
- case IsNaN(p) || IsNaN(q):
- return NaN()
- }
- p, q = Abs(p), Abs(q)
- if p < q {
- p, q = q, p
- }
- if p == 0 {
- return 0
- }
- q = q / p
- return p * Sqrt(1+q*q)
-}
diff --git a/contrib/go/_std_1.18/src/math/j0.go b/contrib/go/_std_1.18/src/math/j0.go
deleted file mode 100644
index cb5f07bca6..0000000000
--- a/contrib/go/_std_1.18/src/math/j0.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Bessel function of the first and second kinds of order zero.
-*/
-
-// The original C code and the long comment below are
-// from FreeBSD's /usr/src/lib/msun/src/e_j0.c and
-// came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_j0(x), __ieee754_y0(x)
-// Bessel function of the first and second kinds of order zero.
-// Method -- j0(x):
-// 1. For tiny x, we use j0(x) = 1 - x**2/4 + x**4/64 - ...
-// 2. Reduce x to |x| since j0(x)=j0(-x), and
-// for x in (0,2)
-// j0(x) = 1-z/4+ z**2*R0/S0, where z = x*x;
-// (precision: |j0-1+z/4-z**2R0/S0 |<2**-63.67 )
-// for x in (2,inf)
-// j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)-q0(x)*sin(x0))
-// where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
-// as follow:
-// cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
-// = 1/sqrt(2) * (cos(x) + sin(x))
-// sin(x0) = sin(x)cos(pi/4)-cos(x)sin(pi/4)
-// = 1/sqrt(2) * (sin(x) - cos(x))
-// (To avoid cancellation, use
-// sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
-// to compute the worse one.)
-//
-// 3 Special cases
-// j0(nan)= nan
-// j0(0) = 1
-// j0(inf) = 0
-//
-// Method -- y0(x):
-// 1. For x<2.
-// Since
-// y0(x) = 2/pi*(j0(x)*(ln(x/2)+Euler) + x**2/4 - ...)
-// therefore y0(x)-2/pi*j0(x)*ln(x) is an even function.
-// We use the following function to approximate y0,
-// y0(x) = U(z)/V(z) + (2/pi)*(j0(x)*ln(x)), z= x**2
-// where
-// U(z) = u00 + u01*z + ... + u06*z**6
-// V(z) = 1 + v01*z + ... + v04*z**4
-// with absolute approximation error bounded by 2**-72.
-// Note: For tiny x, U/V = u0 and j0(x)~1, hence
-// y0(tiny) = u0 + (2/pi)*ln(tiny), (choose tiny<2**-27)
-// 2. For x>=2.
-// y0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)+q0(x)*sin(x0))
-// where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
-// by the method mentioned above.
-// 3. Special cases: y0(0)=-inf, y0(x<0)=NaN, y0(inf)=0.
-//
-
-// J0 returns the order-zero Bessel function of the first kind.
-//
-// Special cases are:
-// J0(±Inf) = 0
-// J0(0) = 1
-// J0(NaN) = NaN
-func J0(x float64) float64 {
- const (
- Huge = 1e300
- TwoM27 = 1.0 / (1 << 27) // 2**-27 0x3e40000000000000
- TwoM13 = 1.0 / (1 << 13) // 2**-13 0x3f20000000000000
- Two129 = 1 << 129 // 2**129 0x4800000000000000
- // R0/S0 on [0, 2]
- R02 = 1.56249999999999947958e-02 // 0x3F8FFFFFFFFFFFFD
- R03 = -1.89979294238854721751e-04 // 0xBF28E6A5B61AC6E9
- R04 = 1.82954049532700665670e-06 // 0x3EBEB1D10C503919
- R05 = -4.61832688532103189199e-09 // 0xBE33D5E773D63FCE
- S01 = 1.56191029464890010492e-02 // 0x3F8FFCE882C8C2A4
- S02 = 1.16926784663337450260e-04 // 0x3F1EA6D2DD57DBF4
- S03 = 5.13546550207318111446e-07 // 0x3EA13B54CE84D5A9
- S04 = 1.16614003333790000205e-09 // 0x3E1408BCF4745D8F
- )
- // special cases
- switch {
- case IsNaN(x):
- return x
- case IsInf(x, 0):
- return 0
- case x == 0:
- return 1
- }
-
- x = Abs(x)
- if x >= 2 {
- s, c := Sincos(x)
- ss := s - c
- cc := s + c
-
- // make sure x+x does not overflow
- if x < MaxFloat64/2 {
- z := -Cos(x + x)
- if s*c < 0 {
- cc = z / ss
- } else {
- ss = z / cc
- }
- }
-
- // j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
- // y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
-
- var z float64
- if x > Two129 { // |x| > ~6.8056e+38
- z = (1 / SqrtPi) * cc / Sqrt(x)
- } else {
- u := pzero(x)
- v := qzero(x)
- z = (1 / SqrtPi) * (u*cc - v*ss) / Sqrt(x)
- }
- return z // |x| >= 2.0
- }
- if x < TwoM13 { // |x| < ~1.2207e-4
- if x < TwoM27 {
- return 1 // |x| < ~7.4506e-9
- }
- return 1 - 0.25*x*x // ~7.4506e-9 < |x| < ~1.2207e-4
- }
- z := x * x
- r := z * (R02 + z*(R03+z*(R04+z*R05)))
- s := 1 + z*(S01+z*(S02+z*(S03+z*S04)))
- if x < 1 {
- return 1 + z*(-0.25+(r/s)) // |x| < 1.00
- }
- u := 0.5 * x
- return (1+u)*(1-u) + z*(r/s) // 1.0 < |x| < 2.0
-}
-
-// Y0 returns the order-zero Bessel function of the second kind.
-//
-// Special cases are:
-// Y0(+Inf) = 0
-// Y0(0) = -Inf
-// Y0(x < 0) = NaN
-// Y0(NaN) = NaN
-func Y0(x float64) float64 {
- const (
- TwoM27 = 1.0 / (1 << 27) // 2**-27 0x3e40000000000000
- Two129 = 1 << 129 // 2**129 0x4800000000000000
- U00 = -7.38042951086872317523e-02 // 0xBFB2E4D699CBD01F
- U01 = 1.76666452509181115538e-01 // 0x3FC69D019DE9E3FC
- U02 = -1.38185671945596898896e-02 // 0xBF8C4CE8B16CFA97
- U03 = 3.47453432093683650238e-04 // 0x3F36C54D20B29B6B
- U04 = -3.81407053724364161125e-06 // 0xBECFFEA773D25CAD
- U05 = 1.95590137035022920206e-08 // 0x3E5500573B4EABD4
- U06 = -3.98205194132103398453e-11 // 0xBDC5E43D693FB3C8
- V01 = 1.27304834834123699328e-02 // 0x3F8A127091C9C71A
- V02 = 7.60068627350353253702e-05 // 0x3F13ECBBF578C6C1
- V03 = 2.59150851840457805467e-07 // 0x3E91642D7FF202FD
- V04 = 4.41110311332675467403e-10 // 0x3DFE50183BD6D9EF
- )
- // special cases
- switch {
- case x < 0 || IsNaN(x):
- return NaN()
- case IsInf(x, 1):
- return 0
- case x == 0:
- return Inf(-1)
- }
-
- if x >= 2 { // |x| >= 2.0
-
- // y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0))
- // where x0 = x-pi/4
- // Better formula:
- // cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
- // = 1/sqrt(2) * (sin(x) + cos(x))
- // sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
- // = 1/sqrt(2) * (sin(x) - cos(x))
- // To avoid cancellation, use
- // sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
- // to compute the worse one.
-
- s, c := Sincos(x)
- ss := s - c
- cc := s + c
-
- // j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
- // y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
-
- // make sure x+x does not overflow
- if x < MaxFloat64/2 {
- z := -Cos(x + x)
- if s*c < 0 {
- cc = z / ss
- } else {
- ss = z / cc
- }
- }
- var z float64
- if x > Two129 { // |x| > ~6.8056e+38
- z = (1 / SqrtPi) * ss / Sqrt(x)
- } else {
- u := pzero(x)
- v := qzero(x)
- z = (1 / SqrtPi) * (u*ss + v*cc) / Sqrt(x)
- }
- return z // |x| >= 2.0
- }
- if x <= TwoM27 {
- return U00 + (2/Pi)*Log(x) // |x| < ~7.4506e-9
- }
- z := x * x
- u := U00 + z*(U01+z*(U02+z*(U03+z*(U04+z*(U05+z*U06)))))
- v := 1 + z*(V01+z*(V02+z*(V03+z*V04)))
- return u/v + (2/Pi)*J0(x)*Log(x) // ~7.4506e-9 < |x| < 2.0
-}
-
-// The asymptotic expansions of pzero is
-// 1 - 9/128 s**2 + 11025/98304 s**4 - ..., where s = 1/x.
-// For x >= 2, We approximate pzero by
-// pzero(x) = 1 + (R/S)
-// where R = pR0 + pR1*s**2 + pR2*s**4 + ... + pR5*s**10
-// S = 1 + pS0*s**2 + ... + pS4*s**10
-// and
-// | pzero(x)-1-R/S | <= 2 ** ( -60.26)
-
-// for x in [inf, 8]=1/[0,0.125]
-var p0R8 = [6]float64{
- 0.00000000000000000000e+00, // 0x0000000000000000
- -7.03124999999900357484e-02, // 0xBFB1FFFFFFFFFD32
- -8.08167041275349795626e+00, // 0xC02029D0B44FA779
- -2.57063105679704847262e+02, // 0xC07011027B19E863
- -2.48521641009428822144e+03, // 0xC0A36A6ECD4DCAFC
- -5.25304380490729545272e+03, // 0xC0B4850B36CC643D
-}
-var p0S8 = [5]float64{
- 1.16534364619668181717e+02, // 0x405D223307A96751
- 3.83374475364121826715e+03, // 0x40ADF37D50596938
- 4.05978572648472545552e+04, // 0x40E3D2BB6EB6B05F
- 1.16752972564375915681e+05, // 0x40FC810F8F9FA9BD
- 4.76277284146730962675e+04, // 0x40E741774F2C49DC
-}
-
-// for x in [8,4.5454]=1/[0.125,0.22001]
-var p0R5 = [6]float64{
- -1.14125464691894502584e-11, // 0xBDA918B147E495CC
- -7.03124940873599280078e-02, // 0xBFB1FFFFE69AFBC6
- -4.15961064470587782438e+00, // 0xC010A370F90C6BBF
- -6.76747652265167261021e+01, // 0xC050EB2F5A7D1783
- -3.31231299649172967747e+02, // 0xC074B3B36742CC63
- -3.46433388365604912451e+02, // 0xC075A6EF28A38BD7
-}
-var p0S5 = [5]float64{
- 6.07539382692300335975e+01, // 0x404E60810C98C5DE
- 1.05125230595704579173e+03, // 0x40906D025C7E2864
- 5.97897094333855784498e+03, // 0x40B75AF88FBE1D60
- 9.62544514357774460223e+03, // 0x40C2CCB8FA76FA38
- 2.40605815922939109441e+03, // 0x40A2CC1DC70BE864
-}
-
-// for x in [4.547,2.8571]=1/[0.2199,0.35001]
-var p0R3 = [6]float64{
- -2.54704601771951915620e-09, // 0xBE25E1036FE1AA86
- -7.03119616381481654654e-02, // 0xBFB1FFF6F7C0E24B
- -2.40903221549529611423e+00, // 0xC00345B2AEA48074
- -2.19659774734883086467e+01, // 0xC035F74A4CB94E14
- -5.80791704701737572236e+01, // 0xC04D0A22420A1A45
- -3.14479470594888503854e+01, // 0xC03F72ACA892D80F
-}
-var p0S3 = [5]float64{
- 3.58560338055209726349e+01, // 0x4041ED9284077DD3
- 3.61513983050303863820e+02, // 0x40769839464A7C0E
- 1.19360783792111533330e+03, // 0x4092A66E6D1061D6
- 1.12799679856907414432e+03, // 0x40919FFCB8C39B7E
- 1.73580930813335754692e+02, // 0x4065B296FC379081
-}
-
-// for x in [2.8570,2]=1/[0.3499,0.5]
-var p0R2 = [6]float64{
- -8.87534333032526411254e-08, // 0xBE77D316E927026D
- -7.03030995483624743247e-02, // 0xBFB1FF62495E1E42
- -1.45073846780952986357e+00, // 0xBFF736398A24A843
- -7.63569613823527770791e+00, // 0xC01E8AF3EDAFA7F3
- -1.11931668860356747786e+01, // 0xC02662E6C5246303
- -3.23364579351335335033e+00, // 0xC009DE81AF8FE70F
-}
-var p0S2 = [5]float64{
- 2.22202997532088808441e+01, // 0x40363865908B5959
- 1.36206794218215208048e+02, // 0x4061069E0EE8878F
- 2.70470278658083486789e+02, // 0x4070E78642EA079B
- 1.53875394208320329881e+02, // 0x40633C033AB6FAFF
- 1.46576176948256193810e+01, // 0x402D50B344391809
-}
-
-func pzero(x float64) float64 {
- var p *[6]float64
- var q *[5]float64
- if x >= 8 {
- p = &p0R8
- q = &p0S8
- } else if x >= 4.5454 {
- p = &p0R5
- q = &p0S5
- } else if x >= 2.8571 {
- p = &p0R3
- q = &p0S3
- } else if x >= 2 {
- p = &p0R2
- q = &p0S2
- }
- z := 1 / (x * x)
- r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
- s := 1 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))))
- return 1 + r/s
-}
-
-// For x >= 8, the asymptotic expansions of qzero is
-// -1/8 s + 75/1024 s**3 - ..., where s = 1/x.
-// We approximate pzero by
-// qzero(x) = s*(-1.25 + (R/S))
-// where R = qR0 + qR1*s**2 + qR2*s**4 + ... + qR5*s**10
-// S = 1 + qS0*s**2 + ... + qS5*s**12
-// and
-// | qzero(x)/s +1.25-R/S | <= 2**(-61.22)
-
-// for x in [inf, 8]=1/[0,0.125]
-var q0R8 = [6]float64{
- 0.00000000000000000000e+00, // 0x0000000000000000
- 7.32421874999935051953e-02, // 0x3FB2BFFFFFFFFE2C
- 1.17682064682252693899e+01, // 0x402789525BB334D6
- 5.57673380256401856059e+02, // 0x40816D6315301825
- 8.85919720756468632317e+03, // 0x40C14D993E18F46D
- 3.70146267776887834771e+04, // 0x40E212D40E901566
-}
-var q0S8 = [6]float64{
- 1.63776026895689824414e+02, // 0x406478D5365B39BC
- 8.09834494656449805916e+03, // 0x40BFA2584E6B0563
- 1.42538291419120476348e+05, // 0x4101665254D38C3F
- 8.03309257119514397345e+05, // 0x412883DA83A52B43
- 8.40501579819060512818e+05, // 0x4129A66B28DE0B3D
- -3.43899293537866615225e+05, // 0xC114FD6D2C9530C5
-}
-
-// for x in [8,4.5454]=1/[0.125,0.22001]
-var q0R5 = [6]float64{
- 1.84085963594515531381e-11, // 0x3DB43D8F29CC8CD9
- 7.32421766612684765896e-02, // 0x3FB2BFFFD172B04C
- 5.83563508962056953777e+00, // 0x401757B0B9953DD3
- 1.35111577286449829671e+02, // 0x4060E3920A8788E9
- 1.02724376596164097464e+03, // 0x40900CF99DC8C481
- 1.98997785864605384631e+03, // 0x409F17E953C6E3A6
-}
-var q0S5 = [6]float64{
- 8.27766102236537761883e+01, // 0x4054B1B3FB5E1543
- 2.07781416421392987104e+03, // 0x40A03BA0DA21C0CE
- 1.88472887785718085070e+04, // 0x40D267D27B591E6D
- 5.67511122894947329769e+04, // 0x40EBB5E397E02372
- 3.59767538425114471465e+04, // 0x40E191181F7A54A0
- -5.35434275601944773371e+03, // 0xC0B4EA57BEDBC609
-}
-
-// for x in [4.547,2.8571]=1/[0.2199,0.35001]
-var q0R3 = [6]float64{
- 4.37741014089738620906e-09, // 0x3E32CD036ADECB82
- 7.32411180042911447163e-02, // 0x3FB2BFEE0E8D0842
- 3.34423137516170720929e+00, // 0x400AC0FC61149CF5
- 4.26218440745412650017e+01, // 0x40454F98962DAEDD
- 1.70808091340565596283e+02, // 0x406559DBE25EFD1F
- 1.66733948696651168575e+02, // 0x4064D77C81FA21E0
-}
-var q0S3 = [6]float64{
- 4.87588729724587182091e+01, // 0x40486122BFE343A6
- 7.09689221056606015736e+02, // 0x40862D8386544EB3
- 3.70414822620111362994e+03, // 0x40ACF04BE44DFC63
- 6.46042516752568917582e+03, // 0x40B93C6CD7C76A28
- 2.51633368920368957333e+03, // 0x40A3A8AAD94FB1C0
- -1.49247451836156386662e+02, // 0xC062A7EB201CF40F
-}
-
-// for x in [2.8570,2]=1/[0.3499,0.5]
-var q0R2 = [6]float64{
- 1.50444444886983272379e-07, // 0x3E84313B54F76BDB
- 7.32234265963079278272e-02, // 0x3FB2BEC53E883E34
- 1.99819174093815998816e+00, // 0x3FFFF897E727779C
- 1.44956029347885735348e+01, // 0x402CFDBFAAF96FE5
- 3.16662317504781540833e+01, // 0x403FAA8E29FBDC4A
- 1.62527075710929267416e+01, // 0x403040B171814BB4
-}
-var q0S2 = [6]float64{
- 3.03655848355219184498e+01, // 0x403E5D96F7C07AED
- 2.69348118608049844624e+02, // 0x4070D591E4D14B40
- 8.44783757595320139444e+02, // 0x408A664522B3BF22
- 8.82935845112488550512e+02, // 0x408B977C9C5CC214
- 2.12666388511798828631e+02, // 0x406A95530E001365
- -5.31095493882666946917e+00, // 0xC0153E6AF8B32931
-}
-
-func qzero(x float64) float64 {
- var p, q *[6]float64
- if x >= 8 {
- p = &q0R8
- q = &q0S8
- } else if x >= 4.5454 {
- p = &q0R5
- q = &q0S5
- } else if x >= 2.8571 {
- p = &q0R3
- q = &q0S3
- } else if x >= 2 {
- p = &q0R2
- q = &q0S2
- }
- z := 1 / (x * x)
- r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
- s := 1 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))))
- return (-0.125 + r/s) / x
-}
diff --git a/contrib/go/_std_1.18/src/math/j1.go b/contrib/go/_std_1.18/src/math/j1.go
deleted file mode 100644
index 7c7d279730..0000000000
--- a/contrib/go/_std_1.18/src/math/j1.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Bessel function of the first and second kinds of order one.
-*/
-
-// The original C code and the long comment below are
-// from FreeBSD's /usr/src/lib/msun/src/e_j1.c and
-// came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_j1(x), __ieee754_y1(x)
-// Bessel function of the first and second kinds of order one.
-// Method -- j1(x):
-// 1. For tiny x, we use j1(x) = x/2 - x**3/16 + x**5/384 - ...
-// 2. Reduce x to |x| since j1(x)=-j1(-x), and
-// for x in (0,2)
-// j1(x) = x/2 + x*z*R0/S0, where z = x*x;
-// (precision: |j1/x - 1/2 - R0/S0 |<2**-61.51 )
-// for x in (2,inf)
-// j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
-// y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
-// where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
-// as follow:
-// cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
-// = 1/sqrt(2) * (sin(x) - cos(x))
-// sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
-// = -1/sqrt(2) * (sin(x) + cos(x))
-// (To avoid cancellation, use
-// sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
-// to compute the worse one.)
-//
-// 3 Special cases
-// j1(nan)= nan
-// j1(0) = 0
-// j1(inf) = 0
-//
-// Method -- y1(x):
-// 1. screen out x<=0 cases: y1(0)=-inf, y1(x<0)=NaN
-// 2. For x<2.
-// Since
-// y1(x) = 2/pi*(j1(x)*(ln(x/2)+Euler)-1/x-x/2+5/64*x**3-...)
-// therefore y1(x)-2/pi*j1(x)*ln(x)-1/x is an odd function.
-// We use the following function to approximate y1,
-// y1(x) = x*U(z)/V(z) + (2/pi)*(j1(x)*ln(x)-1/x), z= x**2
-// where for x in [0,2] (abs err less than 2**-65.89)
-// U(z) = U0[0] + U0[1]*z + ... + U0[4]*z**4
-// V(z) = 1 + v0[0]*z + ... + v0[4]*z**5
-// Note: For tiny x, 1/x dominate y1 and hence
-// y1(tiny) = -2/pi/tiny, (choose tiny<2**-54)
-// 3. For x>=2.
-// y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
-// where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
-// by method mentioned above.
-
-// J1 returns the order-one Bessel function of the first kind.
-//
-// Special cases are:
-// J1(±Inf) = 0
-// J1(NaN) = NaN
-func J1(x float64) float64 {
- const (
- TwoM27 = 1.0 / (1 << 27) // 2**-27 0x3e40000000000000
- Two129 = 1 << 129 // 2**129 0x4800000000000000
- // R0/S0 on [0, 2]
- R00 = -6.25000000000000000000e-02 // 0xBFB0000000000000
- R01 = 1.40705666955189706048e-03 // 0x3F570D9F98472C61
- R02 = -1.59955631084035597520e-05 // 0xBEF0C5C6BA169668
- R03 = 4.96727999609584448412e-08 // 0x3E6AAAFA46CA0BD9
- S01 = 1.91537599538363460805e-02 // 0x3F939D0B12637E53
- S02 = 1.85946785588630915560e-04 // 0x3F285F56B9CDF664
- S03 = 1.17718464042623683263e-06 // 0x3EB3BFF8333F8498
- S04 = 5.04636257076217042715e-09 // 0x3E35AC88C97DFF2C
- S05 = 1.23542274426137913908e-11 // 0x3DAB2ACFCFB97ED8
- )
- // special cases
- switch {
- case IsNaN(x):
- return x
- case IsInf(x, 0) || x == 0:
- return 0
- }
-
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- if x >= 2 {
- s, c := Sincos(x)
- ss := -s - c
- cc := s - c
-
- // make sure x+x does not overflow
- if x < MaxFloat64/2 {
- z := Cos(x + x)
- if s*c > 0 {
- cc = z / ss
- } else {
- ss = z / cc
- }
- }
-
- // j1(x) = 1/sqrt(pi) * (P(1,x)*cc - Q(1,x)*ss) / sqrt(x)
- // y1(x) = 1/sqrt(pi) * (P(1,x)*ss + Q(1,x)*cc) / sqrt(x)
-
- var z float64
- if x > Two129 {
- z = (1 / SqrtPi) * cc / Sqrt(x)
- } else {
- u := pone(x)
- v := qone(x)
- z = (1 / SqrtPi) * (u*cc - v*ss) / Sqrt(x)
- }
- if sign {
- return -z
- }
- return z
- }
- if x < TwoM27 { // |x|<2**-27
- return 0.5 * x // inexact if x!=0 necessary
- }
- z := x * x
- r := z * (R00 + z*(R01+z*(R02+z*R03)))
- s := 1.0 + z*(S01+z*(S02+z*(S03+z*(S04+z*S05))))
- r *= x
- z = 0.5*x + r/s
- if sign {
- return -z
- }
- return z
-}
-
-// Y1 returns the order-one Bessel function of the second kind.
-//
-// Special cases are:
-// Y1(+Inf) = 0
-// Y1(0) = -Inf
-// Y1(x < 0) = NaN
-// Y1(NaN) = NaN
-func Y1(x float64) float64 {
- const (
- TwoM54 = 1.0 / (1 << 54) // 2**-54 0x3c90000000000000
- Two129 = 1 << 129 // 2**129 0x4800000000000000
- U00 = -1.96057090646238940668e-01 // 0xBFC91866143CBC8A
- U01 = 5.04438716639811282616e-02 // 0x3FA9D3C776292CD1
- U02 = -1.91256895875763547298e-03 // 0xBF5F55E54844F50F
- U03 = 2.35252600561610495928e-05 // 0x3EF8AB038FA6B88E
- U04 = -9.19099158039878874504e-08 // 0xBE78AC00569105B8
- V00 = 1.99167318236649903973e-02 // 0x3F94650D3F4DA9F0
- V01 = 2.02552581025135171496e-04 // 0x3F2A8C896C257764
- V02 = 1.35608801097516229404e-06 // 0x3EB6C05A894E8CA6
- V03 = 6.22741452364621501295e-09 // 0x3E3ABF1D5BA69A86
- V04 = 1.66559246207992079114e-11 // 0x3DB25039DACA772A
- )
- // special cases
- switch {
- case x < 0 || IsNaN(x):
- return NaN()
- case IsInf(x, 1):
- return 0
- case x == 0:
- return Inf(-1)
- }
-
- if x >= 2 {
- s, c := Sincos(x)
- ss := -s - c
- cc := s - c
-
- // make sure x+x does not overflow
- if x < MaxFloat64/2 {
- z := Cos(x + x)
- if s*c > 0 {
- cc = z / ss
- } else {
- ss = z / cc
- }
- }
- // y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x0)+q1(x)*cos(x0))
- // where x0 = x-3pi/4
- // Better formula:
- // cos(x0) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
- // = 1/sqrt(2) * (sin(x) - cos(x))
- // sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
- // = -1/sqrt(2) * (cos(x) + sin(x))
- // To avoid cancellation, use
- // sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
- // to compute the worse one.
-
- var z float64
- if x > Two129 {
- z = (1 / SqrtPi) * ss / Sqrt(x)
- } else {
- u := pone(x)
- v := qone(x)
- z = (1 / SqrtPi) * (u*ss + v*cc) / Sqrt(x)
- }
- return z
- }
- if x <= TwoM54 { // x < 2**-54
- return -(2 / Pi) / x
- }
- z := x * x
- u := U00 + z*(U01+z*(U02+z*(U03+z*U04)))
- v := 1 + z*(V00+z*(V01+z*(V02+z*(V03+z*V04))))
- return x*(u/v) + (2/Pi)*(J1(x)*Log(x)-1/x)
-}
-
-// For x >= 8, the asymptotic expansions of pone is
-// 1 + 15/128 s**2 - 4725/2**15 s**4 - ..., where s = 1/x.
-// We approximate pone by
-// pone(x) = 1 + (R/S)
-// where R = pr0 + pr1*s**2 + pr2*s**4 + ... + pr5*s**10
-// S = 1 + ps0*s**2 + ... + ps4*s**10
-// and
-// | pone(x)-1-R/S | <= 2**(-60.06)
-
-// for x in [inf, 8]=1/[0,0.125]
-var p1R8 = [6]float64{
- 0.00000000000000000000e+00, // 0x0000000000000000
- 1.17187499999988647970e-01, // 0x3FBDFFFFFFFFFCCE
- 1.32394806593073575129e+01, // 0x402A7A9D357F7FCE
- 4.12051854307378562225e+02, // 0x4079C0D4652EA590
- 3.87474538913960532227e+03, // 0x40AE457DA3A532CC
- 7.91447954031891731574e+03, // 0x40BEEA7AC32782DD
-}
-var p1S8 = [5]float64{
- 1.14207370375678408436e+02, // 0x405C8D458E656CAC
- 3.65093083420853463394e+03, // 0x40AC85DC964D274F
- 3.69562060269033463555e+04, // 0x40E20B8697C5BB7F
- 9.76027935934950801311e+04, // 0x40F7D42CB28F17BB
- 3.08042720627888811578e+04, // 0x40DE1511697A0B2D
-}
-
-// for x in [8,4.5454] = 1/[0.125,0.22001]
-var p1R5 = [6]float64{
- 1.31990519556243522749e-11, // 0x3DAD0667DAE1CA7D
- 1.17187493190614097638e-01, // 0x3FBDFFFFE2C10043
- 6.80275127868432871736e+00, // 0x401B36046E6315E3
- 1.08308182990189109773e+02, // 0x405B13B9452602ED
- 5.17636139533199752805e+02, // 0x40802D16D052D649
- 5.28715201363337541807e+02, // 0x408085B8BB7E0CB7
-}
-var p1S5 = [5]float64{
- 5.92805987221131331921e+01, // 0x404DA3EAA8AF633D
- 9.91401418733614377743e+02, // 0x408EFB361B066701
- 5.35326695291487976647e+03, // 0x40B4E9445706B6FB
- 7.84469031749551231769e+03, // 0x40BEA4B0B8A5BB15
- 1.50404688810361062679e+03, // 0x40978030036F5E51
-}
-
-// for x in[4.5453,2.8571] = 1/[0.2199,0.35001]
-var p1R3 = [6]float64{
- 3.02503916137373618024e-09, // 0x3E29FC21A7AD9EDD
- 1.17186865567253592491e-01, // 0x3FBDFFF55B21D17B
- 3.93297750033315640650e+00, // 0x400F76BCE85EAD8A
- 3.51194035591636932736e+01, // 0x40418F489DA6D129
- 9.10550110750781271918e+01, // 0x4056C3854D2C1837
- 4.85590685197364919645e+01, // 0x4048478F8EA83EE5
-}
-var p1S3 = [5]float64{
- 3.47913095001251519989e+01, // 0x40416549A134069C
- 3.36762458747825746741e+02, // 0x40750C3307F1A75F
- 1.04687139975775130551e+03, // 0x40905B7C5037D523
- 8.90811346398256432622e+02, // 0x408BD67DA32E31E9
- 1.03787932439639277504e+02, // 0x4059F26D7C2EED53
-}
-
-// for x in [2.8570,2] = 1/[0.3499,0.5]
-var p1R2 = [6]float64{
- 1.07710830106873743082e-07, // 0x3E7CE9D4F65544F4
- 1.17176219462683348094e-01, // 0x3FBDFF42BE760D83
- 2.36851496667608785174e+00, // 0x4002F2B7F98FAEC0
- 1.22426109148261232917e+01, // 0x40287C377F71A964
- 1.76939711271687727390e+01, // 0x4031B1A8177F8EE2
- 5.07352312588818499250e+00, // 0x40144B49A574C1FE
-}
-var p1S2 = [5]float64{
- 2.14364859363821409488e+01, // 0x40356FBD8AD5ECDC
- 1.25290227168402751090e+02, // 0x405F529314F92CD5
- 2.32276469057162813669e+02, // 0x406D08D8D5A2DBD9
- 1.17679373287147100768e+02, // 0x405D6B7ADA1884A9
- 8.36463893371618283368e+00, // 0x4020BAB1F44E5192
-}
-
-func pone(x float64) float64 {
- var p *[6]float64
- var q *[5]float64
- if x >= 8 {
- p = &p1R8
- q = &p1S8
- } else if x >= 4.5454 {
- p = &p1R5
- q = &p1S5
- } else if x >= 2.8571 {
- p = &p1R3
- q = &p1S3
- } else if x >= 2 {
- p = &p1R2
- q = &p1S2
- }
- z := 1 / (x * x)
- r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
- s := 1.0 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))))
- return 1 + r/s
-}
-
-// For x >= 8, the asymptotic expansions of qone is
-// 3/8 s - 105/1024 s**3 - ..., where s = 1/x.
-// We approximate qone by
-// qone(x) = s*(0.375 + (R/S))
-// where R = qr1*s**2 + qr2*s**4 + ... + qr5*s**10
-// S = 1 + qs1*s**2 + ... + qs6*s**12
-// and
-// | qone(x)/s -0.375-R/S | <= 2**(-61.13)
-
-// for x in [inf, 8] = 1/[0,0.125]
-var q1R8 = [6]float64{
- 0.00000000000000000000e+00, // 0x0000000000000000
- -1.02539062499992714161e-01, // 0xBFBA3FFFFFFFFDF3
- -1.62717534544589987888e+01, // 0xC0304591A26779F7
- -7.59601722513950107896e+02, // 0xC087BCD053E4B576
- -1.18498066702429587167e+04, // 0xC0C724E740F87415
- -4.84385124285750353010e+04, // 0xC0E7A6D065D09C6A
-}
-var q1S8 = [6]float64{
- 1.61395369700722909556e+02, // 0x40642CA6DE5BCDE5
- 7.82538599923348465381e+03, // 0x40BE9162D0D88419
- 1.33875336287249578163e+05, // 0x4100579AB0B75E98
- 7.19657723683240939863e+05, // 0x4125F65372869C19
- 6.66601232617776375264e+05, // 0x412457D27719AD5C
- -2.94490264303834643215e+05, // 0xC111F9690EA5AA18
-}
-
-// for x in [8,4.5454] = 1/[0.125,0.22001]
-var q1R5 = [6]float64{
- -2.08979931141764104297e-11, // 0xBDB6FA431AA1A098
- -1.02539050241375426231e-01, // 0xBFBA3FFFCB597FEF
- -8.05644828123936029840e+00, // 0xC0201CE6CA03AD4B
- -1.83669607474888380239e+02, // 0xC066F56D6CA7B9B0
- -1.37319376065508163265e+03, // 0xC09574C66931734F
- -2.61244440453215656817e+03, // 0xC0A468E388FDA79D
-}
-var q1S5 = [6]float64{
- 8.12765501384335777857e+01, // 0x405451B2FF5A11B2
- 1.99179873460485964642e+03, // 0x409F1F31E77BF839
- 1.74684851924908907677e+04, // 0x40D10F1F0D64CE29
- 4.98514270910352279316e+04, // 0x40E8576DAABAD197
- 2.79480751638918118260e+04, // 0x40DB4B04CF7C364B
- -4.71918354795128470869e+03, // 0xC0B26F2EFCFFA004
-}
-
-// for x in [4.5454,2.8571] = 1/[0.2199,0.35001] ???
-var q1R3 = [6]float64{
- -5.07831226461766561369e-09, // 0xBE35CFA9D38FC84F
- -1.02537829820837089745e-01, // 0xBFBA3FEB51AEED54
- -4.61011581139473403113e+00, // 0xC01270C23302D9FF
- -5.78472216562783643212e+01, // 0xC04CEC71C25D16DA
- -2.28244540737631695038e+02, // 0xC06C87D34718D55F
- -2.19210128478909325622e+02, // 0xC06B66B95F5C1BF6
-}
-var q1S3 = [6]float64{
- 4.76651550323729509273e+01, // 0x4047D523CCD367E4
- 6.73865112676699709482e+02, // 0x40850EEBC031EE3E
- 3.38015286679526343505e+03, // 0x40AA684E448E7C9A
- 5.54772909720722782367e+03, // 0x40B5ABBAA61D54A6
- 1.90311919338810798763e+03, // 0x409DBC7A0DD4DF4B
- -1.35201191444307340817e+02, // 0xC060E670290A311F
-}
-
-// for x in [2.8570,2] = 1/[0.3499,0.5]
-var q1R2 = [6]float64{
- -1.78381727510958865572e-07, // 0xBE87F12644C626D2
- -1.02517042607985553460e-01, // 0xBFBA3E8E9148B010
- -2.75220568278187460720e+00, // 0xC006048469BB4EDA
- -1.96636162643703720221e+01, // 0xC033A9E2C168907F
- -4.23253133372830490089e+01, // 0xC04529A3DE104AAA
- -2.13719211703704061733e+01, // 0xC0355F3639CF6E52
-}
-var q1S2 = [6]float64{
- 2.95333629060523854548e+01, // 0x403D888A78AE64FF
- 2.52981549982190529136e+02, // 0x406F9F68DB821CBA
- 7.57502834868645436472e+02, // 0x4087AC05CE49A0F7
- 7.39393205320467245656e+02, // 0x40871B2548D4C029
- 1.55949003336666123687e+02, // 0x40637E5E3C3ED8D4
- -4.95949898822628210127e+00, // 0xC013D686E71BE86B
-}
-
-func qone(x float64) float64 {
- var p, q *[6]float64
- if x >= 8 {
- p = &q1R8
- q = &q1S8
- } else if x >= 4.5454 {
- p = &q1R5
- q = &q1S5
- } else if x >= 2.8571 {
- p = &q1R3
- q = &q1S3
- } else if x >= 2 {
- p = &q1R2
- q = &q1S2
- }
- z := 1 / (x * x)
- r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
- s := 1 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))))
- return (0.375 + r/s) / x
-}
diff --git a/contrib/go/_std_1.18/src/math/jn.go b/contrib/go/_std_1.18/src/math/jn.go
deleted file mode 100644
index b1aca8ff6b..0000000000
--- a/contrib/go/_std_1.18/src/math/jn.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Bessel function of the first and second kinds of order n.
-*/
-
-// The original C code and the long comment below are
-// from FreeBSD's /usr/src/lib/msun/src/e_jn.c and
-// came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_jn(n, x), __ieee754_yn(n, x)
-// floating point Bessel's function of the 1st and 2nd kind
-// of order n
-//
-// Special cases:
-// y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
-// y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
-// Note 2. About jn(n,x), yn(n,x)
-// For n=0, j0(x) is called,
-// for n=1, j1(x) is called,
-// for n<x, forward recursion is used starting
-// from values of j0(x) and j1(x).
-// for n>x, a continued fraction approximation to
-// j(n,x)/j(n-1,x) is evaluated and then backward
-// recursion is used starting from a supposed value
-// for j(n,x). The resulting value of j(0,x) is
-// compared with the actual value to correct the
-// supposed value of j(n,x).
-//
-// yn(n,x) is similar in all respects, except
-// that forward recursion is used for all
-// values of n>1.
-
-// Jn returns the order-n Bessel function of the first kind.
-//
-// Special cases are:
-// Jn(n, ±Inf) = 0
-// Jn(n, NaN) = NaN
-func Jn(n int, x float64) float64 {
- const (
- TwoM29 = 1.0 / (1 << 29) // 2**-29 0x3e10000000000000
- Two302 = 1 << 302 // 2**302 0x52D0000000000000
- )
- // special cases
- switch {
- case IsNaN(x):
- return x
- case IsInf(x, 0):
- return 0
- }
- // J(-n, x) = (-1)**n * J(n, x), J(n, -x) = (-1)**n * J(n, x)
- // Thus, J(-n, x) = J(n, -x)
-
- if n == 0 {
- return J0(x)
- }
- if x == 0 {
- return 0
- }
- if n < 0 {
- n, x = -n, -x
- }
- if n == 1 {
- return J1(x)
- }
- sign := false
- if x < 0 {
- x = -x
- if n&1 == 1 {
- sign = true // odd n and negative x
- }
- }
- var b float64
- if float64(n) <= x {
- // Safe to use J(n+1,x)=2n/x *J(n,x)-J(n-1,x)
- if x >= Two302 { // x > 2**302
-
- // (x >> n**2)
- // Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
- // Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
- // Let s=sin(x), c=cos(x),
- // xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
- //
- // n sin(xn)*sqt2 cos(xn)*sqt2
- // ----------------------------------
- // 0 s-c c+s
- // 1 -s-c -c+s
- // 2 -s+c -c-s
- // 3 s+c c-s
-
- var temp float64
- switch s, c := Sincos(x); n & 3 {
- case 0:
- temp = c + s
- case 1:
- temp = -c + s
- case 2:
- temp = -c - s
- case 3:
- temp = c - s
- }
- b = (1 / SqrtPi) * temp / Sqrt(x)
- } else {
- b = J1(x)
- for i, a := 1, J0(x); i < n; i++ {
- a, b = b, b*(float64(i+i)/x)-a // avoid underflow
- }
- }
- } else {
- if x < TwoM29 { // x < 2**-29
- // x is tiny, return the first Taylor expansion of J(n,x)
- // J(n,x) = 1/n!*(x/2)**n - ...
-
- if n > 33 { // underflow
- b = 0
- } else {
- temp := x * 0.5
- b = temp
- a := 1.0
- for i := 2; i <= n; i++ {
- a *= float64(i) // a = n!
- b *= temp // b = (x/2)**n
- }
- b /= a
- }
- } else {
- // use backward recurrence
- // x x**2 x**2
- // J(n,x)/J(n-1,x) = ---- ------ ------ .....
- // 2n - 2(n+1) - 2(n+2)
- //
- // 1 1 1
- // (for large x) = ---- ------ ------ .....
- // 2n 2(n+1) 2(n+2)
- // -- - ------ - ------ -
- // x x x
- //
- // Let w = 2n/x and h=2/x, then the above quotient
- // is equal to the continued fraction:
- // 1
- // = -----------------------
- // 1
- // w - -----------------
- // 1
- // w+h - ---------
- // w+2h - ...
- //
- // To determine how many terms needed, let
- // Q(0) = w, Q(1) = w(w+h) - 1,
- // Q(k) = (w+k*h)*Q(k-1) - Q(k-2),
- // When Q(k) > 1e4 good for single
- // When Q(k) > 1e9 good for double
- // When Q(k) > 1e17 good for quadruple
-
- // determine k
- w := float64(n+n) / x
- h := 2 / x
- q0 := w
- z := w + h
- q1 := w*z - 1
- k := 1
- for q1 < 1e9 {
- k++
- z += h
- q0, q1 = q1, z*q1-q0
- }
- m := n + n
- t := 0.0
- for i := 2 * (n + k); i >= m; i -= 2 {
- t = 1 / (float64(i)/x - t)
- }
- a := t
- b = 1
- // estimate log((2/x)**n*n!) = n*log(2/x)+n*ln(n)
- // Hence, if n*(log(2n/x)) > ...
- // single 8.8722839355e+01
- // double 7.09782712893383973096e+02
- // long double 1.1356523406294143949491931077970765006170e+04
- // then recurrent value may overflow and the result is
- // likely underflow to zero
-
- tmp := float64(n)
- v := 2 / x
- tmp = tmp * Log(Abs(v*tmp))
- if tmp < 7.09782712893383973096e+02 {
- for i := n - 1; i > 0; i-- {
- di := float64(i + i)
- a, b = b, b*di/x-a
- }
- } else {
- for i := n - 1; i > 0; i-- {
- di := float64(i + i)
- a, b = b, b*di/x-a
- // scale b to avoid spurious overflow
- if b > 1e100 {
- a /= b
- t /= b
- b = 1
- }
- }
- }
- b = t * J0(x) / b
- }
- }
- if sign {
- return -b
- }
- return b
-}
-
-// Yn returns the order-n Bessel function of the second kind.
-//
-// Special cases are:
-// Yn(n, +Inf) = 0
-// Yn(n ≥ 0, 0) = -Inf
-// Yn(n < 0, 0) = +Inf if n is odd, -Inf if n is even
-// Yn(n, x < 0) = NaN
-// Yn(n, NaN) = NaN
-func Yn(n int, x float64) float64 {
- const Two302 = 1 << 302 // 2**302 0x52D0000000000000
- // special cases
- switch {
- case x < 0 || IsNaN(x):
- return NaN()
- case IsInf(x, 1):
- return 0
- }
-
- if n == 0 {
- return Y0(x)
- }
- if x == 0 {
- if n < 0 && n&1 == 1 {
- return Inf(1)
- }
- return Inf(-1)
- }
- sign := false
- if n < 0 {
- n = -n
- if n&1 == 1 {
- sign = true // sign true if n < 0 && |n| odd
- }
- }
- if n == 1 {
- if sign {
- return -Y1(x)
- }
- return Y1(x)
- }
- var b float64
- if x >= Two302 { // x > 2**302
- // (x >> n**2)
- // Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
- // Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
- // Let s=sin(x), c=cos(x),
- // xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
- //
- // n sin(xn)*sqt2 cos(xn)*sqt2
- // ----------------------------------
- // 0 s-c c+s
- // 1 -s-c -c+s
- // 2 -s+c -c-s
- // 3 s+c c-s
-
- var temp float64
- switch s, c := Sincos(x); n & 3 {
- case 0:
- temp = s - c
- case 1:
- temp = -s - c
- case 2:
- temp = -s + c
- case 3:
- temp = s + c
- }
- b = (1 / SqrtPi) * temp / Sqrt(x)
- } else {
- a := Y0(x)
- b = Y1(x)
- // quit if b is -inf
- for i := 1; i < n && !IsInf(b, -1); i++ {
- a, b = b, (float64(i+i)/x)*b-a
- }
- }
- if sign {
- return -b
- }
- return b
-}
diff --git a/contrib/go/_std_1.18/src/math/ldexp.go b/contrib/go/_std_1.18/src/math/ldexp.go
deleted file mode 100644
index 55c82f1e84..0000000000
--- a/contrib/go/_std_1.18/src/math/ldexp.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Ldexp is the inverse of Frexp.
-// It returns frac × 2**exp.
-//
-// Special cases are:
-// Ldexp(±0, exp) = ±0
-// Ldexp(±Inf, exp) = ±Inf
-// Ldexp(NaN, exp) = NaN
-func Ldexp(frac float64, exp int) float64 {
- if haveArchLdexp {
- return archLdexp(frac, exp)
- }
- return ldexp(frac, exp)
-}
-
-func ldexp(frac float64, exp int) float64 {
- // special cases
- switch {
- case frac == 0:
- return frac // correctly return -0
- case IsInf(frac, 0) || IsNaN(frac):
- return frac
- }
- frac, e := normalize(frac)
- exp += e
- x := Float64bits(frac)
- exp += int(x>>shift)&mask - bias
- if exp < -1075 {
- return Copysign(0, frac) // underflow
- }
- if exp > 1023 { // overflow
- if frac < 0 {
- return Inf(-1)
- }
- return Inf(1)
- }
- var m float64 = 1
- if exp < -1022 { // denormal
- exp += 53
- m = 1.0 / (1 << 53) // 2**-53
- }
- x &^= mask << shift
- x |= uint64(exp+bias) << shift
- return m * Float64frombits(x)
-}
diff --git a/contrib/go/_std_1.18/src/math/lgamma.go b/contrib/go/_std_1.18/src/math/lgamma.go
deleted file mode 100644
index 7af5871744..0000000000
--- a/contrib/go/_std_1.18/src/math/lgamma.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point logarithm of the Gamma function.
-*/
-
-// The original C code and the long comment below are
-// from FreeBSD's /usr/src/lib/msun/src/e_lgamma_r.c and
-// came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_lgamma_r(x, signgamp)
-// Reentrant version of the logarithm of the Gamma function
-// with user provided pointer for the sign of Gamma(x).
-//
-// Method:
-// 1. Argument Reduction for 0 < x <= 8
-// Since gamma(1+s)=s*gamma(s), for x in [0,8], we may
-// reduce x to a number in [1.5,2.5] by
-// lgamma(1+s) = log(s) + lgamma(s)
-// for example,
-// lgamma(7.3) = log(6.3) + lgamma(6.3)
-// = log(6.3*5.3) + lgamma(5.3)
-// = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
-// 2. Polynomial approximation of lgamma around its
-// minimum (ymin=1.461632144968362245) to maintain monotonicity.
-// On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
-// Let z = x-ymin;
-// lgamma(x) = -1.214862905358496078218 + z**2*poly(z)
-// poly(z) is a 14 degree polynomial.
-// 2. Rational approximation in the primary interval [2,3]
-// We use the following approximation:
-// s = x-2.0;
-// lgamma(x) = 0.5*s + s*P(s)/Q(s)
-// with accuracy
-// |P/Q - (lgamma(x)-0.5s)| < 2**-61.71
-// Our algorithms are based on the following observation
-//
-// zeta(2)-1 2 zeta(3)-1 3
-// lgamma(2+s) = s*(1-Euler) + --------- * s - --------- * s + ...
-// 2 3
-//
-// where Euler = 0.5772156649... is the Euler constant, which
-// is very close to 0.5.
-//
-// 3. For x>=8, we have
-// lgamma(x)~(x-0.5)log(x)-x+0.5*log(2pi)+1/(12x)-1/(360x**3)+....
-// (better formula:
-// lgamma(x)~(x-0.5)*(log(x)-1)-.5*(log(2pi)-1) + ...)
-// Let z = 1/x, then we approximation
-// f(z) = lgamma(x) - (x-0.5)(log(x)-1)
-// by
-// 3 5 11
-// w = w0 + w1*z + w2*z + w3*z + ... + w6*z
-// where
-// |w - f(z)| < 2**-58.74
-//
-// 4. For negative x, since (G is gamma function)
-// -x*G(-x)*G(x) = pi/sin(pi*x),
-// we have
-// G(x) = pi/(sin(pi*x)*(-x)*G(-x))
-// since G(-x) is positive, sign(G(x)) = sign(sin(pi*x)) for x<0
-// Hence, for x<0, signgam = sign(sin(pi*x)) and
-// lgamma(x) = log(|Gamma(x)|)
-// = log(pi/(|x*sin(pi*x)|)) - lgamma(-x);
-// Note: one should avoid computing pi*(-x) directly in the
-// computation of sin(pi*(-x)).
-//
-// 5. Special Cases
-// lgamma(2+s) ~ s*(1-Euler) for tiny s
-// lgamma(1)=lgamma(2)=0
-// lgamma(x) ~ -log(x) for tiny x
-// lgamma(0) = lgamma(inf) = inf
-// lgamma(-integer) = +-inf
-//
-//
-
-var _lgamA = [...]float64{
- 7.72156649015328655494e-02, // 0x3FB3C467E37DB0C8
- 3.22467033424113591611e-01, // 0x3FD4A34CC4A60FAD
- 6.73523010531292681824e-02, // 0x3FB13E001A5562A7
- 2.05808084325167332806e-02, // 0x3F951322AC92547B
- 7.38555086081402883957e-03, // 0x3F7E404FB68FEFE8
- 2.89051383673415629091e-03, // 0x3F67ADD8CCB7926B
- 1.19270763183362067845e-03, // 0x3F538A94116F3F5D
- 5.10069792153511336608e-04, // 0x3F40B6C689B99C00
- 2.20862790713908385557e-04, // 0x3F2CF2ECED10E54D
- 1.08011567247583939954e-04, // 0x3F1C5088987DFB07
- 2.52144565451257326939e-05, // 0x3EFA7074428CFA52
- 4.48640949618915160150e-05, // 0x3F07858E90A45837
-}
-var _lgamR = [...]float64{
- 1.0, // placeholder
- 1.39200533467621045958e+00, // 0x3FF645A762C4AB74
- 7.21935547567138069525e-01, // 0x3FE71A1893D3DCDC
- 1.71933865632803078993e-01, // 0x3FC601EDCCFBDF27
- 1.86459191715652901344e-02, // 0x3F9317EA742ED475
- 7.77942496381893596434e-04, // 0x3F497DDACA41A95B
- 7.32668430744625636189e-06, // 0x3EDEBAF7A5B38140
-}
-var _lgamS = [...]float64{
- -7.72156649015328655494e-02, // 0xBFB3C467E37DB0C8
- 2.14982415960608852501e-01, // 0x3FCB848B36E20878
- 3.25778796408930981787e-01, // 0x3FD4D98F4F139F59
- 1.46350472652464452805e-01, // 0x3FC2BB9CBEE5F2F7
- 2.66422703033638609560e-02, // 0x3F9B481C7E939961
- 1.84028451407337715652e-03, // 0x3F5E26B67368F239
- 3.19475326584100867617e-05, // 0x3F00BFECDD17E945
-}
-var _lgamT = [...]float64{
- 4.83836122723810047042e-01, // 0x3FDEF72BC8EE38A2
- -1.47587722994593911752e-01, // 0xBFC2E4278DC6C509
- 6.46249402391333854778e-02, // 0x3FB08B4294D5419B
- -3.27885410759859649565e-02, // 0xBFA0C9A8DF35B713
- 1.79706750811820387126e-02, // 0x3F9266E7970AF9EC
- -1.03142241298341437450e-02, // 0xBF851F9FBA91EC6A
- 6.10053870246291332635e-03, // 0x3F78FCE0E370E344
- -3.68452016781138256760e-03, // 0xBF6E2EFFB3E914D7
- 2.25964780900612472250e-03, // 0x3F6282D32E15C915
- -1.40346469989232843813e-03, // 0xBF56FE8EBF2D1AF1
- 8.81081882437654011382e-04, // 0x3F4CDF0CEF61A8E9
- -5.38595305356740546715e-04, // 0xBF41A6109C73E0EC
- 3.15632070903625950361e-04, // 0x3F34AF6D6C0EBBF7
- -3.12754168375120860518e-04, // 0xBF347F24ECC38C38
- 3.35529192635519073543e-04, // 0x3F35FD3EE8C2D3F4
-}
-var _lgamU = [...]float64{
- -7.72156649015328655494e-02, // 0xBFB3C467E37DB0C8
- 6.32827064025093366517e-01, // 0x3FE4401E8B005DFF
- 1.45492250137234768737e+00, // 0x3FF7475CD119BD6F
- 9.77717527963372745603e-01, // 0x3FEF497644EA8450
- 2.28963728064692451092e-01, // 0x3FCD4EAEF6010924
- 1.33810918536787660377e-02, // 0x3F8B678BBF2BAB09
-}
-var _lgamV = [...]float64{
- 1.0,
- 2.45597793713041134822e+00, // 0x4003A5D7C2BD619C
- 2.12848976379893395361e+00, // 0x40010725A42B18F5
- 7.69285150456672783825e-01, // 0x3FE89DFBE45050AF
- 1.04222645593369134254e-01, // 0x3FBAAE55D6537C88
- 3.21709242282423911810e-03, // 0x3F6A5ABB57D0CF61
-}
-var _lgamW = [...]float64{
- 4.18938533204672725052e-01, // 0x3FDACFE390C97D69
- 8.33333333333329678849e-02, // 0x3FB555555555553B
- -2.77777777728775536470e-03, // 0xBF66C16C16B02E5C
- 7.93650558643019558500e-04, // 0x3F4A019F98CF38B6
- -5.95187557450339963135e-04, // 0xBF4380CB8C0FE741
- 8.36339918996282139126e-04, // 0x3F4B67BA4CDAD5D1
- -1.63092934096575273989e-03, // 0xBF5AB89D0B9E43E4
-}
-
-// Lgamma returns the natural logarithm and sign (-1 or +1) of Gamma(x).
-//
-// Special cases are:
-// Lgamma(+Inf) = +Inf
-// Lgamma(0) = +Inf
-// Lgamma(-integer) = +Inf
-// Lgamma(-Inf) = -Inf
-// Lgamma(NaN) = NaN
-func Lgamma(x float64) (lgamma float64, sign int) {
- const (
- Ymin = 1.461632144968362245
- Two52 = 1 << 52 // 0x4330000000000000 ~4.5036e+15
- Two53 = 1 << 53 // 0x4340000000000000 ~9.0072e+15
- Two58 = 1 << 58 // 0x4390000000000000 ~2.8823e+17
- Tiny = 1.0 / (1 << 70) // 0x3b90000000000000 ~8.47033e-22
- Tc = 1.46163214496836224576e+00 // 0x3FF762D86356BE3F
- Tf = -1.21486290535849611461e-01 // 0xBFBF19B9BCC38A42
- // Tt = -(tail of Tf)
- Tt = -3.63867699703950536541e-18 // 0xBC50C7CAA48A971F
- )
- // special cases
- sign = 1
- switch {
- case IsNaN(x):
- lgamma = x
- return
- case IsInf(x, 0):
- lgamma = x
- return
- case x == 0:
- lgamma = Inf(1)
- return
- }
-
- neg := false
- if x < 0 {
- x = -x
- neg = true
- }
-
- if x < Tiny { // if |x| < 2**-70, return -log(|x|)
- if neg {
- sign = -1
- }
- lgamma = -Log(x)
- return
- }
- var nadj float64
- if neg {
- if x >= Two52 { // |x| >= 2**52, must be -integer
- lgamma = Inf(1)
- return
- }
- t := sinPi(x)
- if t == 0 {
- lgamma = Inf(1) // -integer
- return
- }
- nadj = Log(Pi / Abs(t*x))
- if t < 0 {
- sign = -1
- }
- }
-
- switch {
- case x == 1 || x == 2: // purge off 1 and 2
- lgamma = 0
- return
- case x < 2: // use lgamma(x) = lgamma(x+1) - log(x)
- var y float64
- var i int
- if x <= 0.9 {
- lgamma = -Log(x)
- switch {
- case x >= (Ymin - 1 + 0.27): // 0.7316 <= x <= 0.9
- y = 1 - x
- i = 0
- case x >= (Ymin - 1 - 0.27): // 0.2316 <= x < 0.7316
- y = x - (Tc - 1)
- i = 1
- default: // 0 < x < 0.2316
- y = x
- i = 2
- }
- } else {
- lgamma = 0
- switch {
- case x >= (Ymin + 0.27): // 1.7316 <= x < 2
- y = 2 - x
- i = 0
- case x >= (Ymin - 0.27): // 1.2316 <= x < 1.7316
- y = x - Tc
- i = 1
- default: // 0.9 < x < 1.2316
- y = x - 1
- i = 2
- }
- }
- switch i {
- case 0:
- z := y * y
- p1 := _lgamA[0] + z*(_lgamA[2]+z*(_lgamA[4]+z*(_lgamA[6]+z*(_lgamA[8]+z*_lgamA[10]))))
- p2 := z * (_lgamA[1] + z*(+_lgamA[3]+z*(_lgamA[5]+z*(_lgamA[7]+z*(_lgamA[9]+z*_lgamA[11])))))
- p := y*p1 + p2
- lgamma += (p - 0.5*y)
- case 1:
- z := y * y
- w := z * y
- p1 := _lgamT[0] + w*(_lgamT[3]+w*(_lgamT[6]+w*(_lgamT[9]+w*_lgamT[12]))) // parallel comp
- p2 := _lgamT[1] + w*(_lgamT[4]+w*(_lgamT[7]+w*(_lgamT[10]+w*_lgamT[13])))
- p3 := _lgamT[2] + w*(_lgamT[5]+w*(_lgamT[8]+w*(_lgamT[11]+w*_lgamT[14])))
- p := z*p1 - (Tt - w*(p2+y*p3))
- lgamma += (Tf + p)
- case 2:
- p1 := y * (_lgamU[0] + y*(_lgamU[1]+y*(_lgamU[2]+y*(_lgamU[3]+y*(_lgamU[4]+y*_lgamU[5])))))
- p2 := 1 + y*(_lgamV[1]+y*(_lgamV[2]+y*(_lgamV[3]+y*(_lgamV[4]+y*_lgamV[5]))))
- lgamma += (-0.5*y + p1/p2)
- }
- case x < 8: // 2 <= x < 8
- i := int(x)
- y := x - float64(i)
- p := y * (_lgamS[0] + y*(_lgamS[1]+y*(_lgamS[2]+y*(_lgamS[3]+y*(_lgamS[4]+y*(_lgamS[5]+y*_lgamS[6]))))))
- q := 1 + y*(_lgamR[1]+y*(_lgamR[2]+y*(_lgamR[3]+y*(_lgamR[4]+y*(_lgamR[5]+y*_lgamR[6])))))
- lgamma = 0.5*y + p/q
- z := 1.0 // Lgamma(1+s) = Log(s) + Lgamma(s)
- switch i {
- case 7:
- z *= (y + 6)
- fallthrough
- case 6:
- z *= (y + 5)
- fallthrough
- case 5:
- z *= (y + 4)
- fallthrough
- case 4:
- z *= (y + 3)
- fallthrough
- case 3:
- z *= (y + 2)
- lgamma += Log(z)
- }
- case x < Two58: // 8 <= x < 2**58
- t := Log(x)
- z := 1 / x
- y := z * z
- w := _lgamW[0] + z*(_lgamW[1]+y*(_lgamW[2]+y*(_lgamW[3]+y*(_lgamW[4]+y*(_lgamW[5]+y*_lgamW[6])))))
- lgamma = (x-0.5)*(t-1) + w
- default: // 2**58 <= x <= Inf
- lgamma = x * (Log(x) - 1)
- }
- if neg {
- lgamma = nadj - lgamma
- }
- return
-}
-
-// sinPi(x) is a helper function for negative x
-func sinPi(x float64) float64 {
- const (
- Two52 = 1 << 52 // 0x4330000000000000 ~4.5036e+15
- Two53 = 1 << 53 // 0x4340000000000000 ~9.0072e+15
- )
- if x < 0.25 {
- return -Sin(Pi * x)
- }
-
- // argument reduction
- z := Floor(x)
- var n int
- if z != x { // inexact
- x = Mod(x, 2)
- n = int(x * 4)
- } else {
- if x >= Two53 { // x must be even
- x = 0
- n = 0
- } else {
- if x < Two52 {
- z = x + Two52 // exact
- }
- n = int(1 & Float64bits(z))
- x = float64(n)
- n <<= 2
- }
- }
- switch n {
- case 0:
- x = Sin(Pi * x)
- case 1, 2:
- x = Cos(Pi * (0.5 - x))
- case 3, 4:
- x = Sin(Pi * (1 - x))
- case 5, 6:
- x = -Cos(Pi * (x - 1.5))
- default:
- x = Sin(Pi * (x - 2))
- }
- return -x
-}
diff --git a/contrib/go/_std_1.18/src/math/log.go b/contrib/go/_std_1.18/src/math/log.go
deleted file mode 100644
index 1b3e306adf..0000000000
--- a/contrib/go/_std_1.18/src/math/log.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point logarithm.
-*/
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/e_log.c
-// and came with this notice. The go code is a simpler
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_log(x)
-// Return the logarithm of x
-//
-// Method :
-// 1. Argument Reduction: find k and f such that
-// x = 2**k * (1+f),
-// where sqrt(2)/2 < 1+f < sqrt(2) .
-//
-// 2. Approximation of log(1+f).
-// Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
-// = 2s + 2/3 s**3 + 2/5 s**5 + .....,
-// = 2s + s*R
-// We use a special Reme algorithm on [0,0.1716] to generate
-// a polynomial of degree 14 to approximate R. The maximum error
-// of this polynomial approximation is bounded by 2**-58.45. In
-// other words,
-// 2 4 6 8 10 12 14
-// R(z) ~ L1*s +L2*s +L3*s +L4*s +L5*s +L6*s +L7*s
-// (the values of L1 to L7 are listed in the program) and
-// | 2 14 | -58.45
-// | L1*s +...+L7*s - R(z) | <= 2
-// | |
-// Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
-// In order to guarantee error in log below 1ulp, we compute log by
-// log(1+f) = f - s*(f - R) (if f is not too large)
-// log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
-//
-// 3. Finally, log(x) = k*Ln2 + log(1+f).
-// = k*Ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*Ln2_lo)))
-// Here Ln2 is split into two floating point number:
-// Ln2_hi + Ln2_lo,
-// where n*Ln2_hi is always exact for |n| < 2000.
-//
-// Special cases:
-// log(x) is NaN with signal if x < 0 (including -INF) ;
-// log(+INF) is +INF; log(0) is -INF with signal;
-// log(NaN) is that NaN with no signal.
-//
-// Accuracy:
-// according to an error analysis, the error is always less than
-// 1 ulp (unit in the last place).
-//
-// Constants:
-// The hexadecimal values are the intended ones for the following
-// constants. The decimal values may be used, provided that the
-// compiler will convert from decimal to binary accurately enough
-// to produce the hexadecimal values shown.
-
-// Log returns the natural logarithm of x.
-//
-// Special cases are:
-// Log(+Inf) = +Inf
-// Log(0) = -Inf
-// Log(x < 0) = NaN
-// Log(NaN) = NaN
-func Log(x float64) float64 {
- if haveArchLog {
- return archLog(x)
- }
- return log(x)
-}
-
-func log(x float64) float64 {
- const (
- Ln2Hi = 6.93147180369123816490e-01 /* 3fe62e42 fee00000 */
- Ln2Lo = 1.90821492927058770002e-10 /* 3dea39ef 35793c76 */
- L1 = 6.666666666666735130e-01 /* 3FE55555 55555593 */
- L2 = 3.999999999940941908e-01 /* 3FD99999 9997FA04 */
- L3 = 2.857142874366239149e-01 /* 3FD24924 94229359 */
- L4 = 2.222219843214978396e-01 /* 3FCC71C5 1D8E78AF */
- L5 = 1.818357216161805012e-01 /* 3FC74664 96CB03DE */
- L6 = 1.531383769920937332e-01 /* 3FC39A09 D078C69F */
- L7 = 1.479819860511658591e-01 /* 3FC2F112 DF3E5244 */
- )
-
- // special cases
- switch {
- case IsNaN(x) || IsInf(x, 1):
- return x
- case x < 0:
- return NaN()
- case x == 0:
- return Inf(-1)
- }
-
- // reduce
- f1, ki := Frexp(x)
- if f1 < Sqrt2/2 {
- f1 *= 2
- ki--
- }
- f := f1 - 1
- k := float64(ki)
-
- // compute
- s := f / (2 + f)
- s2 := s * s
- s4 := s2 * s2
- t1 := s2 * (L1 + s4*(L3+s4*(L5+s4*L7)))
- t2 := s4 * (L2 + s4*(L4+s4*L6))
- R := t1 + t2
- hfsq := 0.5 * f * f
- return k*Ln2Hi - ((hfsq - (s*(hfsq+R) + k*Ln2Lo)) - f)
-}
diff --git a/contrib/go/_std_1.18/src/math/log1p.go b/contrib/go/_std_1.18/src/math/log1p.go
deleted file mode 100644
index c117f7245d..0000000000
--- a/contrib/go/_std_1.18/src/math/log1p.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below are from FreeBSD's /usr/src/lib/msun/src/s_log1p.c
-// and came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-//
-// double log1p(double x)
-//
-// Method :
-// 1. Argument Reduction: find k and f such that
-// 1+x = 2**k * (1+f),
-// where sqrt(2)/2 < 1+f < sqrt(2) .
-//
-// Note. If k=0, then f=x is exact. However, if k!=0, then f
-// may not be representable exactly. In that case, a correction
-// term is need. Let u=1+x rounded. Let c = (1+x)-u, then
-// log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
-// and add back the correction term c/u.
-// (Note: when x > 2**53, one can simply return log(x))
-//
-// 2. Approximation of log1p(f).
-// Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
-// = 2s + 2/3 s**3 + 2/5 s**5 + .....,
-// = 2s + s*R
-// We use a special Reme algorithm on [0,0.1716] to generate
-// a polynomial of degree 14 to approximate R The maximum error
-// of this polynomial approximation is bounded by 2**-58.45. In
-// other words,
-// 2 4 6 8 10 12 14
-// R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s
-// (the values of Lp1 to Lp7 are listed in the program)
-// and
-// | 2 14 | -58.45
-// | Lp1*s +...+Lp7*s - R(z) | <= 2
-// | |
-// Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
-// In order to guarantee error in log below 1ulp, we compute log
-// by
-// log1p(f) = f - (hfsq - s*(hfsq+R)).
-//
-// 3. Finally, log1p(x) = k*ln2 + log1p(f).
-// = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
-// Here ln2 is split into two floating point number:
-// ln2_hi + ln2_lo,
-// where n*ln2_hi is always exact for |n| < 2000.
-//
-// Special cases:
-// log1p(x) is NaN with signal if x < -1 (including -INF) ;
-// log1p(+INF) is +INF; log1p(-1) is -INF with signal;
-// log1p(NaN) is that NaN with no signal.
-//
-// Accuracy:
-// according to an error analysis, the error is always less than
-// 1 ulp (unit in the last place).
-//
-// Constants:
-// The hexadecimal values are the intended ones for the following
-// constants. The decimal values may be used, provided that the
-// compiler will convert from decimal to binary accurately enough
-// to produce the hexadecimal values shown.
-//
-// Note: Assuming log() return accurate answer, the following
-// algorithm can be used to compute log1p(x) to within a few ULP:
-//
-// u = 1+x;
-// if(u==1.0) return x ; else
-// return log(u)*(x/(u-1.0));
-//
-// See HP-15C Advanced Functions Handbook, p.193.
-
-// Log1p returns the natural logarithm of 1 plus its argument x.
-// It is more accurate than Log(1 + x) when x is near zero.
-//
-// Special cases are:
-// Log1p(+Inf) = +Inf
-// Log1p(±0) = ±0
-// Log1p(-1) = -Inf
-// Log1p(x < -1) = NaN
-// Log1p(NaN) = NaN
-func Log1p(x float64) float64 {
- if haveArchLog1p {
- return archLog1p(x)
- }
- return log1p(x)
-}
-
-func log1p(x float64) float64 {
- const (
- Sqrt2M1 = 4.142135623730950488017e-01 // Sqrt(2)-1 = 0x3fda827999fcef34
- Sqrt2HalfM1 = -2.928932188134524755992e-01 // Sqrt(2)/2-1 = 0xbfd2bec333018866
- Small = 1.0 / (1 << 29) // 2**-29 = 0x3e20000000000000
- Tiny = 1.0 / (1 << 54) // 2**-54
- Two53 = 1 << 53 // 2**53
- Ln2Hi = 6.93147180369123816490e-01 // 3fe62e42fee00000
- Ln2Lo = 1.90821492927058770002e-10 // 3dea39ef35793c76
- Lp1 = 6.666666666666735130e-01 // 3FE5555555555593
- Lp2 = 3.999999999940941908e-01 // 3FD999999997FA04
- Lp3 = 2.857142874366239149e-01 // 3FD2492494229359
- Lp4 = 2.222219843214978396e-01 // 3FCC71C51D8E78AF
- Lp5 = 1.818357216161805012e-01 // 3FC7466496CB03DE
- Lp6 = 1.531383769920937332e-01 // 3FC39A09D078C69F
- Lp7 = 1.479819860511658591e-01 // 3FC2F112DF3E5244
- )
-
- // special cases
- switch {
- case x < -1 || IsNaN(x): // includes -Inf
- return NaN()
- case x == -1:
- return Inf(-1)
- case IsInf(x, 1):
- return Inf(1)
- }
-
- absx := Abs(x)
-
- var f float64
- var iu uint64
- k := 1
- if absx < Sqrt2M1 { // |x| < Sqrt(2)-1
- if absx < Small { // |x| < 2**-29
- if absx < Tiny { // |x| < 2**-54
- return x
- }
- return x - x*x*0.5
- }
- if x > Sqrt2HalfM1 { // Sqrt(2)/2-1 < x
- // (Sqrt(2)/2-1) < x < (Sqrt(2)-1)
- k = 0
- f = x
- iu = 1
- }
- }
- var c float64
- if k != 0 {
- var u float64
- if absx < Two53 { // 1<<53
- u = 1.0 + x
- iu = Float64bits(u)
- k = int((iu >> 52) - 1023)
- // correction term
- if k > 0 {
- c = 1.0 - (u - x)
- } else {
- c = x - (u - 1.0)
- }
- c /= u
- } else {
- u = x
- iu = Float64bits(u)
- k = int((iu >> 52) - 1023)
- c = 0
- }
- iu &= 0x000fffffffffffff
- if iu < 0x0006a09e667f3bcd { // mantissa of Sqrt(2)
- u = Float64frombits(iu | 0x3ff0000000000000) // normalize u
- } else {
- k++
- u = Float64frombits(iu | 0x3fe0000000000000) // normalize u/2
- iu = (0x0010000000000000 - iu) >> 2
- }
- f = u - 1.0 // Sqrt(2)/2 < u < Sqrt(2)
- }
- hfsq := 0.5 * f * f
- var s, R, z float64
- if iu == 0 { // |f| < 2**-20
- if f == 0 {
- if k == 0 {
- return 0
- }
- c += float64(k) * Ln2Lo
- return float64(k)*Ln2Hi + c
- }
- R = hfsq * (1.0 - 0.66666666666666666*f) // avoid division
- if k == 0 {
- return f - R
- }
- return float64(k)*Ln2Hi - ((R - (float64(k)*Ln2Lo + c)) - f)
- }
- s = f / (2.0 + f)
- z = s * s
- R = z * (Lp1 + z*(Lp2+z*(Lp3+z*(Lp4+z*(Lp5+z*(Lp6+z*Lp7))))))
- if k == 0 {
- return f - (hfsq - s*(hfsq+R))
- }
- return float64(k)*Ln2Hi - ((hfsq - (s*(hfsq+R) + (float64(k)*Ln2Lo + c))) - f)
-}
diff --git a/contrib/go/_std_1.18/src/math/logb.go b/contrib/go/_std_1.18/src/math/logb.go
deleted file mode 100644
index f2769d4fd7..0000000000
--- a/contrib/go/_std_1.18/src/math/logb.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Logb returns the binary exponent of x.
-//
-// Special cases are:
-// Logb(±Inf) = +Inf
-// Logb(0) = -Inf
-// Logb(NaN) = NaN
-func Logb(x float64) float64 {
- // special cases
- switch {
- case x == 0:
- return Inf(-1)
- case IsInf(x, 0):
- return Inf(1)
- case IsNaN(x):
- return x
- }
- return float64(ilogb(x))
-}
-
-// Ilogb returns the binary exponent of x as an integer.
-//
-// Special cases are:
-// Ilogb(±Inf) = MaxInt32
-// Ilogb(0) = MinInt32
-// Ilogb(NaN) = MaxInt32
-func Ilogb(x float64) int {
- // special cases
- switch {
- case x == 0:
- return MinInt32
- case IsNaN(x):
- return MaxInt32
- case IsInf(x, 0):
- return MaxInt32
- }
- return ilogb(x)
-}
-
-// logb returns the binary exponent of x. It assumes x is finite and
-// non-zero.
-func ilogb(x float64) int {
- x, exp := normalize(x)
- return int((Float64bits(x)>>shift)&mask) - bias + exp
-}
diff --git a/contrib/go/_std_1.18/src/math/mod.go b/contrib/go/_std_1.18/src/math/mod.go
deleted file mode 100644
index 6bc5f28832..0000000000
--- a/contrib/go/_std_1.18/src/math/mod.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2009-2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point mod function.
-*/
-
-// Mod returns the floating-point remainder of x/y.
-// The magnitude of the result is less than y and its
-// sign agrees with that of x.
-//
-// Special cases are:
-// Mod(±Inf, y) = NaN
-// Mod(NaN, y) = NaN
-// Mod(x, 0) = NaN
-// Mod(x, ±Inf) = x
-// Mod(x, NaN) = NaN
-func Mod(x, y float64) float64 {
- if haveArchMod {
- return archMod(x, y)
- }
- return mod(x, y)
-}
-
-func mod(x, y float64) float64 {
- if y == 0 || IsInf(x, 0) || IsNaN(x) || IsNaN(y) {
- return NaN()
- }
- y = Abs(y)
-
- yfr, yexp := Frexp(y)
- r := x
- if x < 0 {
- r = -x
- }
-
- for r >= y {
- rfr, rexp := Frexp(r)
- if rfr < yfr {
- rexp = rexp - 1
- }
- r = r - Ldexp(y, rexp-yexp)
- }
- if x < 0 {
- r = -r
- }
- return r
-}
diff --git a/contrib/go/_std_1.18/src/math/modf.go b/contrib/go/_std_1.18/src/math/modf.go
deleted file mode 100644
index bf08dc6556..0000000000
--- a/contrib/go/_std_1.18/src/math/modf.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Modf returns integer and fractional floating-point numbers
-// that sum to f. Both values have the same sign as f.
-//
-// Special cases are:
-// Modf(±Inf) = ±Inf, NaN
-// Modf(NaN) = NaN, NaN
-func Modf(f float64) (int float64, frac float64) {
- if haveArchModf {
- return archModf(f)
- }
- return modf(f)
-}
-
-func modf(f float64) (int float64, frac float64) {
- if f < 1 {
- switch {
- case f < 0:
- int, frac = Modf(-f)
- return -int, -frac
- case f == 0:
- return f, f // Return -0, -0 when f == -0
- }
- return 0, f
- }
-
- x := Float64bits(f)
- e := uint(x>>shift)&mask - bias
-
- // Keep the top 12+e bits, the integer part; clear the rest.
- if e < 64-12 {
- x &^= 1<<(64-12-e) - 1
- }
- int = Float64frombits(x)
- frac = f - int
- return
-}
diff --git a/contrib/go/_std_1.18/src/math/nextafter.go b/contrib/go/_std_1.18/src/math/nextafter.go
deleted file mode 100644
index 9088e4d248..0000000000
--- a/contrib/go/_std_1.18/src/math/nextafter.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Nextafter32 returns the next representable float32 value after x towards y.
-//
-// Special cases are:
-// Nextafter32(x, x) = x
-// Nextafter32(NaN, y) = NaN
-// Nextafter32(x, NaN) = NaN
-func Nextafter32(x, y float32) (r float32) {
- switch {
- case IsNaN(float64(x)) || IsNaN(float64(y)): // special case
- r = float32(NaN())
- case x == y:
- r = x
- case x == 0:
- r = float32(Copysign(float64(Float32frombits(1)), float64(y)))
- case (y > x) == (x > 0):
- r = Float32frombits(Float32bits(x) + 1)
- default:
- r = Float32frombits(Float32bits(x) - 1)
- }
- return
-}
-
-// Nextafter returns the next representable float64 value after x towards y.
-//
-// Special cases are:
-// Nextafter(x, x) = x
-// Nextafter(NaN, y) = NaN
-// Nextafter(x, NaN) = NaN
-func Nextafter(x, y float64) (r float64) {
- switch {
- case IsNaN(x) || IsNaN(y): // special case
- r = NaN()
- case x == y:
- r = x
- case x == 0:
- r = Copysign(Float64frombits(1), y)
- case (y > x) == (x > 0):
- r = Float64frombits(Float64bits(x) + 1)
- default:
- r = Float64frombits(Float64bits(x) - 1)
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/math/pow.go b/contrib/go/_std_1.18/src/math/pow.go
deleted file mode 100644
index e45a044ae1..0000000000
--- a/contrib/go/_std_1.18/src/math/pow.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-func isOddInt(x float64) bool {
- xi, xf := Modf(x)
- return xf == 0 && int64(xi)&1 == 1
-}
-
-// Special cases taken from FreeBSD's /usr/src/lib/msun/src/e_pow.c
-// updated by IEEE Std. 754-2008 "Section 9.2.1 Special values".
-
-// Pow returns x**y, the base-x exponential of y.
-//
-// Special cases are (in order):
-// Pow(x, ±0) = 1 for any x
-// Pow(1, y) = 1 for any y
-// Pow(x, 1) = x for any x
-// Pow(NaN, y) = NaN
-// Pow(x, NaN) = NaN
-// Pow(±0, y) = ±Inf for y an odd integer < 0
-// Pow(±0, -Inf) = +Inf
-// Pow(±0, +Inf) = +0
-// Pow(±0, y) = +Inf for finite y < 0 and not an odd integer
-// Pow(±0, y) = ±0 for y an odd integer > 0
-// Pow(±0, y) = +0 for finite y > 0 and not an odd integer
-// Pow(-1, ±Inf) = 1
-// Pow(x, +Inf) = +Inf for |x| > 1
-// Pow(x, -Inf) = +0 for |x| > 1
-// Pow(x, +Inf) = +0 for |x| < 1
-// Pow(x, -Inf) = +Inf for |x| < 1
-// Pow(+Inf, y) = +Inf for y > 0
-// Pow(+Inf, y) = +0 for y < 0
-// Pow(-Inf, y) = Pow(-0, -y)
-// Pow(x, y) = NaN for finite x < 0 and finite non-integer y
-func Pow(x, y float64) float64 {
- if haveArchPow {
- return archPow(x, y)
- }
- return pow(x, y)
-}
-
-func pow(x, y float64) float64 {
- switch {
- case y == 0 || x == 1:
- return 1
- case y == 1:
- return x
- case IsNaN(x) || IsNaN(y):
- return NaN()
- case x == 0:
- switch {
- case y < 0:
- if isOddInt(y) {
- return Copysign(Inf(1), x)
- }
- return Inf(1)
- case y > 0:
- if isOddInt(y) {
- return x
- }
- return 0
- }
- case IsInf(y, 0):
- switch {
- case x == -1:
- return 1
- case (Abs(x) < 1) == IsInf(y, 1):
- return 0
- default:
- return Inf(1)
- }
- case IsInf(x, 0):
- if IsInf(x, -1) {
- return Pow(1/x, -y) // Pow(-0, -y)
- }
- switch {
- case y < 0:
- return 0
- case y > 0:
- return Inf(1)
- }
- case y == 0.5:
- return Sqrt(x)
- case y == -0.5:
- return 1 / Sqrt(x)
- }
-
- yi, yf := Modf(Abs(y))
- if yf != 0 && x < 0 {
- return NaN()
- }
- if yi >= 1<<63 {
- // yi is a large even int that will lead to overflow (or underflow to 0)
- // for all x except -1 (x == 1 was handled earlier)
- switch {
- case x == -1:
- return 1
- case (Abs(x) < 1) == (y > 0):
- return 0
- default:
- return Inf(1)
- }
- }
-
- // ans = a1 * 2**ae (= 1 for now).
- a1 := 1.0
- ae := 0
-
- // ans *= x**yf
- if yf != 0 {
- if yf > 0.5 {
- yf--
- yi++
- }
- a1 = Exp(yf * Log(x))
- }
-
- // ans *= x**yi
- // by multiplying in successive squarings
- // of x according to bits of yi.
- // accumulate powers of two into exp.
- x1, xe := Frexp(x)
- for i := int64(yi); i != 0; i >>= 1 {
- if xe < -1<<12 || 1<<12 < xe {
- // catch xe before it overflows the left shift below
- // Since i !=0 it has at least one bit still set, so ae will accumulate xe
- // on at least one more iteration, ae += xe is a lower bound on ae
- // the lower bound on ae exceeds the size of a float64 exp
- // so the final call to Ldexp will produce under/overflow (0/Inf)
- ae += xe
- break
- }
- if i&1 == 1 {
- a1 *= x1
- ae += xe
- }
- x1 *= x1
- xe <<= 1
- if x1 < .5 {
- x1 += x1
- xe--
- }
- }
-
- // ans = a1*2**ae
- // if y < 0 { ans = 1 / ans }
- // but in the opposite order
- if y < 0 {
- a1 = 1 / a1
- ae = -ae
- }
- return Ldexp(a1, ae)
-}
diff --git a/contrib/go/_std_1.18/src/math/pow10.go b/contrib/go/_std_1.18/src/math/pow10.go
deleted file mode 100644
index 1234e20885..0000000000
--- a/contrib/go/_std_1.18/src/math/pow10.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// pow10tab stores the pre-computed values 10**i for i < 32.
-var pow10tab = [...]float64{
- 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
- 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
- 1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
- 1e30, 1e31,
-}
-
-// pow10postab32 stores the pre-computed value for 10**(i*32) at index i.
-var pow10postab32 = [...]float64{
- 1e00, 1e32, 1e64, 1e96, 1e128, 1e160, 1e192, 1e224, 1e256, 1e288,
-}
-
-// pow10negtab32 stores the pre-computed value for 10**(-i*32) at index i.
-var pow10negtab32 = [...]float64{
- 1e-00, 1e-32, 1e-64, 1e-96, 1e-128, 1e-160, 1e-192, 1e-224, 1e-256, 1e-288, 1e-320,
-}
-
-// Pow10 returns 10**n, the base-10 exponential of n.
-//
-// Special cases are:
-// Pow10(n) = 0 for n < -323
-// Pow10(n) = +Inf for n > 308
-func Pow10(n int) float64 {
- if 0 <= n && n <= 308 {
- return pow10postab32[uint(n)/32] * pow10tab[uint(n)%32]
- }
-
- if -323 <= n && n <= 0 {
- return pow10negtab32[uint(-n)/32] / pow10tab[uint(-n)%32]
- }
-
- // n < -323 || 308 < n
- if n > 0 {
- return Inf(1)
- }
-
- // n < -323
- return 0
-}
diff --git a/contrib/go/_std_1.18/src/math/rand/exp.go b/contrib/go/_std_1.18/src/math/rand/exp.go
deleted file mode 100644
index 5a8d946c0c..0000000000
--- a/contrib/go/_std_1.18/src/math/rand/exp.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-import (
- "math"
-)
-
-/*
- * Exponential distribution
- *
- * See "The Ziggurat Method for Generating Random Variables"
- * (Marsaglia & Tsang, 2000)
- * https://www.jstatsoft.org/v05/i08/paper [pdf]
- */
-
-const (
- re = 7.69711747013104972
-)
-
-// ExpFloat64 returns an exponentially distributed float64 in the range
-// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
-// (lambda) is 1 and whose mean is 1/lambda (1).
-// To produce a distribution with a different rate parameter,
-// callers can adjust the output using:
-//
-// sample = ExpFloat64() / desiredRateParameter
-//
-func (r *Rand) ExpFloat64() float64 {
- for {
- j := r.Uint32()
- i := j & 0xFF
- x := float64(j) * float64(we[i])
- if j < ke[i] {
- return x
- }
- if i == 0 {
- return re - math.Log(r.Float64())
- }
- if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) {
- return x
- }
- }
-}
-
-var ke = [256]uint32{
- 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
- 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
- 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78,
- 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651,
- 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca,
- 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8,
- 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea,
- 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba,
- 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed,
- 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662,
- 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3,
- 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace,
- 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6,
- 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7,
- 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415,
- 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4,
- 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36,
- 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46,
- 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac,
- 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245,
- 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52,
- 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06,
- 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0,
- 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9,
- 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76,
- 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516,
- 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289,
- 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed,
- 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb,
- 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e,
- 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a,
- 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1,
- 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b,
- 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621,
- 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d,
- 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3,
- 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73,
- 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88,
- 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a,
- 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb,
- 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176,
- 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be,
- 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192,
- 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed,
- 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936,
- 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b,
- 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4,
- 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1,
- 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482,
- 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023,
- 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
- 0xe6da6ecf,
-}
-var we = [256]float32{
- 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
- 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
- 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11,
- 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11,
- 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10,
- 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10,
- 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10,
- 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10,
- 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10,
- 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10,
- 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10,
- 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10,
- 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10,
- 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10,
- 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10,
- 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10,
- 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10,
- 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10,
- 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10,
- 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10,
- 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10,
- 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10,
- 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10,
- 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10,
- 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10,
- 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10,
- 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10,
- 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10,
- 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10,
- 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10,
- 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10,
- 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10,
- 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10,
- 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10,
- 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10,
- 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10,
- 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10,
- 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10,
- 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10,
- 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10,
- 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10,
- 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10,
- 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10,
- 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10,
- 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10,
- 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10,
- 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10,
- 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10,
- 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10,
- 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10,
- 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10,
- 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10,
- 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10,
- 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10,
- 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10,
- 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10,
- 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10,
- 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10,
- 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10,
- 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09,
- 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09,
- 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09,
- 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
- 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
-}
-var fe = [256]float32{
- 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
- 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
- 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665,
- 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967,
- 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896,
- 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092,
- 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386,
- 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495,
- 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752,
- 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325,
- 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955,
- 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694,
- 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218,
- 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763,
- 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044,
- 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796,
- 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408,
- 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928,
- 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393,
- 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625,
- 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107,
- 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878,
- 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438,
- 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682,
- 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852,
- 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479,
- 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354,
- 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494,
- 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119,
- 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624,
- 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574,
- 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672,
- 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763,
- 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816,
- 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919,
- 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274,
- 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195,
- 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106,
- 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434,
- 0.062193416, 0.060783047, 0.059384305, 0.057997175,
- 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236,
- 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623,
- 0.043502413, 0.042254124, 0.041017443, 0.039792392,
- 0.038578995, 0.037377283, 0.036187284, 0.035009038,
- 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566,
- 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421,
- 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867,
- 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392,
- 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414,
- 0.008780315, 0.007963077, 0.0071633533, 0.006381906,
- 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648,
- 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693,
- 0.00045413437,
-}
diff --git a/contrib/go/_std_1.18/src/math/rand/normal.go b/contrib/go/_std_1.18/src/math/rand/normal.go
deleted file mode 100644
index 2c5a7aa99b..0000000000
--- a/contrib/go/_std_1.18/src/math/rand/normal.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rand
-
-import (
- "math"
-)
-
-/*
- * Normal distribution
- *
- * See "The Ziggurat Method for Generating Random Variables"
- * (Marsaglia & Tsang, 2000)
- * http://www.jstatsoft.org/v05/i08/paper [pdf]
- */
-
-const (
- rn = 3.442619855899
-)
-
-func absInt32(i int32) uint32 {
- if i < 0 {
- return uint32(-i)
- }
- return uint32(i)
-}
-
-// NormFloat64 returns a normally distributed float64 in
-// the range -math.MaxFloat64 through +math.MaxFloat64 inclusive,
-// with standard normal distribution (mean = 0, stddev = 1).
-// To produce a different normal distribution, callers can
-// adjust the output using:
-//
-// sample = NormFloat64() * desiredStdDev + desiredMean
-//
-func (r *Rand) NormFloat64() float64 {
- for {
- j := int32(r.Uint32()) // Possibly negative
- i := j & 0x7F
- x := float64(j) * float64(wn[i])
- if absInt32(j) < kn[i] {
- // This case should be hit better than 99% of the time.
- return x
- }
-
- if i == 0 {
- // This extra work is only required for the base strip.
- for {
- x = -math.Log(r.Float64()) * (1.0 / rn)
- y := -math.Log(r.Float64())
- if y+y >= x*x {
- break
- }
- }
- if j > 0 {
- return rn + x
- }
- return -rn - x
- }
- if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
- return x
- }
- }
-}
-
-var kn = [128]uint32{
- 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
- 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
- 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7,
- 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883,
- 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30,
- 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa,
- 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d,
- 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18,
- 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924,
- 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a,
- 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4,
- 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62,
- 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e,
- 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473,
- 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd,
- 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568,
- 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08,
- 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc,
- 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94,
- 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb,
- 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075,
- 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba,
- 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded,
- 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72,
- 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
- 0x7ba90bdc, 0x7a722176, 0x77d664e5,
-}
-var wn = [128]float32{
- 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
- 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
- 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10,
- 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10,
- 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10,
- 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10,
- 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10,
- 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10,
- 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10,
- 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10,
- 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10,
- 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10,
- 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10,
- 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10,
- 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10,
- 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10,
- 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10,
- 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10,
- 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10,
- 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10,
- 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10,
- 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10,
- 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10,
- 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10,
- 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10,
- 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09,
- 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09,
- 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09,
- 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09,
- 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09,
- 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
- 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
-}
-var fn = [128]float32{
- 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303,
- 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177,
- 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569,
- 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277,
- 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434,
- 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903,
- 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055,
- 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766,
- 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872,
- 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642,
- 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446,
- 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685,
- 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484,
- 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807,
- 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239,
- 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877,
- 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499,
- 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037,
- 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265,
- 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602,
- 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467,
- 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058,
- 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863,
- 0.040742867, 0.03688439, 0.033087887, 0.029356318,
- 0.025693292, 0.022103304, 0.018592102, 0.015167298,
- 0.011839478, 0.008624485, 0.005548995, 0.0026696292,
-}
diff --git a/contrib/go/_std_1.18/src/math/rand/rand.go b/contrib/go/_std_1.18/src/math/rand/rand.go
deleted file mode 100644
index 13f20ca5ef..0000000000
--- a/contrib/go/_std_1.18/src/math/rand/rand.go
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package rand implements pseudo-random number generators unsuitable for
-// security-sensitive work.
-//
-// Random numbers are generated by a Source. Top-level functions, such as
-// Float64 and Int, use a default shared Source that produces a deterministic
-// sequence of values each time a program is run. Use the Seed function to
-// initialize the default Source if different behavior is required for each run.
-// The default Source is safe for concurrent use by multiple goroutines, but
-// Sources created by NewSource are not.
-//
-// This package's outputs might be easily predictable regardless of how it's
-// seeded. For random numbers suitable for security-sensitive work, see the
-// crypto/rand package.
-package rand
-
-import "sync"
-
-// A Source represents a source of uniformly-distributed
-// pseudo-random int64 values in the range [0, 1<<63).
-type Source interface {
- Int63() int64
- Seed(seed int64)
-}
-
-// A Source64 is a Source that can also generate
-// uniformly-distributed pseudo-random uint64 values in
-// the range [0, 1<<64) directly.
-// If a Rand r's underlying Source s implements Source64,
-// then r.Uint64 returns the result of one call to s.Uint64
-// instead of making two calls to s.Int63.
-type Source64 interface {
- Source
- Uint64() uint64
-}
-
-// NewSource returns a new pseudo-random Source seeded with the given value.
-// Unlike the default Source used by top-level functions, this source is not
-// safe for concurrent use by multiple goroutines.
-func NewSource(seed int64) Source {
- var rng rngSource
- rng.Seed(seed)
- return &rng
-}
-
-// A Rand is a source of random numbers.
-type Rand struct {
- src Source
- s64 Source64 // non-nil if src is source64
-
- // readVal contains remainder of 63-bit integer used for bytes
- // generation during most recent Read call.
- // It is saved so next Read call can start where the previous
- // one finished.
- readVal int64
- // readPos indicates the number of low-order bytes of readVal
- // that are still valid.
- readPos int8
-}
-
-// New returns a new Rand that uses random values from src
-// to generate other random values.
-func New(src Source) *Rand {
- s64, _ := src.(Source64)
- return &Rand{src: src, s64: s64}
-}
-
-// Seed uses the provided seed value to initialize the generator to a deterministic state.
-// Seed should not be called concurrently with any other Rand method.
-func (r *Rand) Seed(seed int64) {
- if lk, ok := r.src.(*lockedSource); ok {
- lk.seedPos(seed, &r.readPos)
- return
- }
-
- r.src.Seed(seed)
- r.readPos = 0
-}
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
-func (r *Rand) Int63() int64 { return r.src.Int63() }
-
-// Uint32 returns a pseudo-random 32-bit value as a uint32.
-func (r *Rand) Uint32() uint32 { return uint32(r.Int63() >> 31) }
-
-// Uint64 returns a pseudo-random 64-bit value as a uint64.
-func (r *Rand) Uint64() uint64 {
- if r.s64 != nil {
- return r.s64.Uint64()
- }
- return uint64(r.Int63())>>31 | uint64(r.Int63())<<32
-}
-
-// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
-func (r *Rand) Int31() int32 { return int32(r.Int63() >> 32) }
-
-// Int returns a non-negative pseudo-random int.
-func (r *Rand) Int() int {
- u := uint(r.Int63())
- return int(u << 1 >> 1) // clear sign bit if int == int32
-}
-
-// Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n).
-// It panics if n <= 0.
-func (r *Rand) Int63n(n int64) int64 {
- if n <= 0 {
- panic("invalid argument to Int63n")
- }
- if n&(n-1) == 0 { // n is power of two, can mask
- return r.Int63() & (n - 1)
- }
- max := int64((1 << 63) - 1 - (1<<63)%uint64(n))
- v := r.Int63()
- for v > max {
- v = r.Int63()
- }
- return v % n
-}
-
-// Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n).
-// It panics if n <= 0.
-func (r *Rand) Int31n(n int32) int32 {
- if n <= 0 {
- panic("invalid argument to Int31n")
- }
- if n&(n-1) == 0 { // n is power of two, can mask
- return r.Int31() & (n - 1)
- }
- max := int32((1 << 31) - 1 - (1<<31)%uint32(n))
- v := r.Int31()
- for v > max {
- v = r.Int31()
- }
- return v % n
-}
-
-// int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n).
-// n must be > 0, but int31n does not check this; the caller must ensure it.
-// int31n exists because Int31n is inefficient, but Go 1 compatibility
-// requires that the stream of values produced by math/rand remain unchanged.
-// int31n can thus only be used internally, by newly introduced APIs.
-//
-// For implementation details, see:
-// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction
-// https://lemire.me/blog/2016/06/30/fast-random-shuffling
-func (r *Rand) int31n(n int32) int32 {
- v := r.Uint32()
- prod := uint64(v) * uint64(n)
- low := uint32(prod)
- if low < uint32(n) {
- thresh := uint32(-n) % uint32(n)
- for low < thresh {
- v = r.Uint32()
- prod = uint64(v) * uint64(n)
- low = uint32(prod)
- }
- }
- return int32(prod >> 32)
-}
-
-// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n).
-// It panics if n <= 0.
-func (r *Rand) Intn(n int) int {
- if n <= 0 {
- panic("invalid argument to Intn")
- }
- if n <= 1<<31-1 {
- return int(r.Int31n(int32(n)))
- }
- return int(r.Int63n(int64(n)))
-}
-
-// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0).
-func (r *Rand) Float64() float64 {
- // A clearer, simpler implementation would be:
- // return float64(r.Int63n(1<<53)) / (1<<53)
- // However, Go 1 shipped with
- // return float64(r.Int63()) / (1 << 63)
- // and we want to preserve that value stream.
- //
- // There is one bug in the value stream: r.Int63() may be so close
- // to 1<<63 that the division rounds up to 1.0, and we've guaranteed
- // that the result is always less than 1.0.
- //
- // We tried to fix this by mapping 1.0 back to 0.0, but since float64
- // values near 0 are much denser than near 1, mapping 1 to 0 caused
- // a theoretically significant overshoot in the probability of returning 0.
- // Instead of that, if we round up to 1, just try again.
- // Getting 1 only happens 1/2⁵³ of the time, so most clients
- // will not observe it anyway.
-again:
- f := float64(r.Int63()) / (1 << 63)
- if f == 1 {
- goto again // resample; this branch is taken O(never)
- }
- return f
-}
-
-// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0).
-func (r *Rand) Float32() float32 {
- // Same rationale as in Float64: we want to preserve the Go 1 value
- // stream except we want to fix it not to return 1.0
- // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64).
-again:
- f := float32(r.Float64())
- if f == 1 {
- goto again // resample; this branch is taken O(very rarely)
- }
- return f
-}
-
-// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
-// in the half-open interval [0,n).
-func (r *Rand) Perm(n int) []int {
- m := make([]int, n)
- // In the following loop, the iteration when i=0 always swaps m[0] with m[0].
- // A change to remove this useless iteration is to assign 1 to i in the init
- // statement. But Perm also effects r. Making this change will affect
- // the final state of r. So this change can't be made for compatibility
- // reasons for Go 1.
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- m[i] = m[j]
- m[j] = i
- }
- return m
-}
-
-// Shuffle pseudo-randomizes the order of elements.
-// n is the number of elements. Shuffle panics if n < 0.
-// swap swaps the elements with indexes i and j.
-func (r *Rand) Shuffle(n int, swap func(i, j int)) {
- if n < 0 {
- panic("invalid argument to Shuffle")
- }
-
- // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
- // Shuffle really ought not be called with n that doesn't fit in 32 bits.
- // Not only will it take a very long time, but with 2³¹! possible permutations,
- // there's no way that any PRNG can have a big enough internal state to
- // generate even a minuscule percentage of the possible permutations.
- // Nevertheless, the right API signature accepts an int n, so handle it as best we can.
- i := n - 1
- for ; i > 1<<31-1-1; i-- {
- j := int(r.Int63n(int64(i + 1)))
- swap(i, j)
- }
- for ; i > 0; i-- {
- j := int(r.int31n(int32(i + 1)))
- swap(i, j)
- }
-}
-
-// Read generates len(p) random bytes and writes them into p. It
-// always returns len(p) and a nil error.
-// Read should not be called concurrently with any other Rand method.
-func (r *Rand) Read(p []byte) (n int, err error) {
- if lk, ok := r.src.(*lockedSource); ok {
- return lk.read(p, &r.readVal, &r.readPos)
- }
- return read(p, r.src, &r.readVal, &r.readPos)
-}
-
-func read(p []byte, src Source, readVal *int64, readPos *int8) (n int, err error) {
- pos := *readPos
- val := *readVal
- rng, _ := src.(*rngSource)
- for n = 0; n < len(p); n++ {
- if pos == 0 {
- if rng != nil {
- val = rng.Int63()
- } else {
- val = src.Int63()
- }
- pos = 7
- }
- p[n] = byte(val)
- val >>= 8
- pos--
- }
- *readPos = pos
- *readVal = val
- return
-}
-
-/*
- * Top-level convenience functions
- */
-
-var globalRand = New(&lockedSource{src: NewSource(1).(*rngSource)})
-
-// Type assert that globalRand's source is a lockedSource whose src is a *rngSource.
-var _ *rngSource = globalRand.src.(*lockedSource).src
-
-// Seed uses the provided seed value to initialize the default Source to a
-// deterministic state. If Seed is not called, the generator behaves as
-// if seeded by Seed(1). Seed values that have the same remainder when
-// divided by 2³¹-1 generate the same pseudo-random sequence.
-// Seed, unlike the Rand.Seed method, is safe for concurrent use.
-func Seed(seed int64) { globalRand.Seed(seed) }
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an int64
-// from the default Source.
-func Int63() int64 { return globalRand.Int63() }
-
-// Uint32 returns a pseudo-random 32-bit value as a uint32
-// from the default Source.
-func Uint32() uint32 { return globalRand.Uint32() }
-
-// Uint64 returns a pseudo-random 64-bit value as a uint64
-// from the default Source.
-func Uint64() uint64 { return globalRand.Uint64() }
-
-// Int31 returns a non-negative pseudo-random 31-bit integer as an int32
-// from the default Source.
-func Int31() int32 { return globalRand.Int31() }
-
-// Int returns a non-negative pseudo-random int from the default Source.
-func Int() int { return globalRand.Int() }
-
-// Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n)
-// from the default Source.
-// It panics if n <= 0.
-func Int63n(n int64) int64 { return globalRand.Int63n(n) }
-
-// Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n)
-// from the default Source.
-// It panics if n <= 0.
-func Int31n(n int32) int32 { return globalRand.Int31n(n) }
-
-// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n)
-// from the default Source.
-// It panics if n <= 0.
-func Intn(n int) int { return globalRand.Intn(n) }
-
-// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0)
-// from the default Source.
-func Float64() float64 { return globalRand.Float64() }
-
-// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0)
-// from the default Source.
-func Float32() float32 { return globalRand.Float32() }
-
-// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
-// in the half-open interval [0,n) from the default Source.
-func Perm(n int) []int { return globalRand.Perm(n) }
-
-// Shuffle pseudo-randomizes the order of elements using the default Source.
-// n is the number of elements. Shuffle panics if n < 0.
-// swap swaps the elements with indexes i and j.
-func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) }
-
-// Read generates len(p) random bytes from the default Source and
-// writes them into p. It always returns len(p) and a nil error.
-// Read, unlike the Rand.Read method, is safe for concurrent use.
-func Read(p []byte) (n int, err error) { return globalRand.Read(p) }
-
-// NormFloat64 returns a normally distributed float64 in the range
-// [-math.MaxFloat64, +math.MaxFloat64] with
-// standard normal distribution (mean = 0, stddev = 1)
-// from the default Source.
-// To produce a different normal distribution, callers can
-// adjust the output using:
-//
-// sample = NormFloat64() * desiredStdDev + desiredMean
-//
-func NormFloat64() float64 { return globalRand.NormFloat64() }
-
-// ExpFloat64 returns an exponentially distributed float64 in the range
-// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
-// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source.
-// To produce a distribution with a different rate parameter,
-// callers can adjust the output using:
-//
-// sample = ExpFloat64() / desiredRateParameter
-//
-func ExpFloat64() float64 { return globalRand.ExpFloat64() }
-
-type lockedSource struct {
- lk sync.Mutex
- src *rngSource
-}
-
-func (r *lockedSource) Int63() (n int64) {
- r.lk.Lock()
- n = r.src.Int63()
- r.lk.Unlock()
- return
-}
-
-func (r *lockedSource) Uint64() (n uint64) {
- r.lk.Lock()
- n = r.src.Uint64()
- r.lk.Unlock()
- return
-}
-
-func (r *lockedSource) Seed(seed int64) {
- r.lk.Lock()
- r.src.Seed(seed)
- r.lk.Unlock()
-}
-
-// seedPos implements Seed for a lockedSource without a race condition.
-func (r *lockedSource) seedPos(seed int64, readPos *int8) {
- r.lk.Lock()
- r.src.Seed(seed)
- *readPos = 0
- r.lk.Unlock()
-}
-
-// read implements Read for a lockedSource without a race condition.
-func (r *lockedSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) {
- r.lk.Lock()
- n, err = read(p, r.src, readVal, readPos)
- r.lk.Unlock()
- return
-}
diff --git a/contrib/go/_std_1.18/src/math/remainder.go b/contrib/go/_std_1.18/src/math/remainder.go
deleted file mode 100644
index bf8bfd5553..0000000000
--- a/contrib/go/_std_1.18/src/math/remainder.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code and the comment below are from
-// FreeBSD's /usr/src/lib/msun/src/e_remainder.c and came
-// with this notice. The go code is a simplified version of
-// the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_remainder(x,y)
-// Return :
-// returns x REM y = x - [x/y]*y as if in infinite
-// precision arithmetic, where [x/y] is the (infinite bit)
-// integer nearest x/y (in half way cases, choose the even one).
-// Method :
-// Based on Mod() returning x - [x/y]chopped * y exactly.
-
-// Remainder returns the IEEE 754 floating-point remainder of x/y.
-//
-// Special cases are:
-// Remainder(±Inf, y) = NaN
-// Remainder(NaN, y) = NaN
-// Remainder(x, 0) = NaN
-// Remainder(x, ±Inf) = x
-// Remainder(x, NaN) = NaN
-func Remainder(x, y float64) float64 {
- if haveArchRemainder {
- return archRemainder(x, y)
- }
- return remainder(x, y)
-}
-
-func remainder(x, y float64) float64 {
- const (
- Tiny = 4.45014771701440276618e-308 // 0x0020000000000000
- HalfMax = MaxFloat64 / 2
- )
- // special cases
- switch {
- case IsNaN(x) || IsNaN(y) || IsInf(x, 0) || y == 0:
- return NaN()
- case IsInf(y, 0):
- return x
- }
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- if y < 0 {
- y = -y
- }
- if x == y {
- if sign {
- zero := 0.0
- return -zero
- }
- return 0
- }
- if y <= HalfMax {
- x = Mod(x, y+y) // now x < 2y
- }
- if y < Tiny {
- if x+x > y {
- x -= y
- if x+x >= y {
- x -= y
- }
- }
- } else {
- yHalf := 0.5 * y
- if x > yHalf {
- x -= y
- if x >= yHalf {
- x -= y
- }
- }
- }
- if sign {
- x = -x
- }
- return x
-}
diff --git a/contrib/go/_std_1.18/src/math/sin.go b/contrib/go/_std_1.18/src/math/sin.go
deleted file mode 100644
index d95bb548e8..0000000000
--- a/contrib/go/_std_1.18/src/math/sin.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point sine and cosine.
-*/
-
-// The original C code, the long comment, and the constants
-// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
-// available from http://www.netlib.org/cephes/cmath.tgz.
-// The go code is a simplified version of the original C.
-//
-// sin.c
-//
-// Circular sine
-//
-// SYNOPSIS:
-//
-// double x, y, sin();
-// y = sin( x );
-//
-// DESCRIPTION:
-//
-// Range reduction is into intervals of pi/4. The reduction error is nearly
-// eliminated by contriving an extended precision modular arithmetic.
-//
-// Two polynomial approximating functions are employed.
-// Between 0 and pi/4 the sine is approximated by
-// x + x**3 P(x**2).
-// Between pi/4 and pi/2 the cosine is represented as
-// 1 - x**2 Q(x**2).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC 0, 10 150000 3.0e-17 7.8e-18
-// IEEE -1.07e9,+1.07e9 130000 2.1e-16 5.4e-17
-//
-// Partial loss of accuracy begins to occur at x = 2**30 = 1.074e9. The loss
-// is not gradual, but jumps suddenly to about 1 part in 10e7. Results may
-// be meaningless for x > 2**49 = 5.6e14.
-//
-// cos.c
-//
-// Circular cosine
-//
-// SYNOPSIS:
-//
-// double x, y, cos();
-// y = cos( x );
-//
-// DESCRIPTION:
-//
-// Range reduction is into intervals of pi/4. The reduction error is nearly
-// eliminated by contriving an extended precision modular arithmetic.
-//
-// Two polynomial approximating functions are employed.
-// Between 0 and pi/4 the cosine is approximated by
-// 1 - x**2 Q(x**2).
-// Between pi/4 and pi/2 the sine is represented as
-// x + x**3 P(x**2).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// IEEE -1.07e9,+1.07e9 130000 2.1e-16 5.4e-17
-// DEC 0,+1.07e9 17000 3.0e-17 7.2e-18
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// sin coefficients
-var _sin = [...]float64{
- 1.58962301576546568060e-10, // 0x3de5d8fd1fd19ccd
- -2.50507477628578072866e-8, // 0xbe5ae5e5a9291f5d
- 2.75573136213857245213e-6, // 0x3ec71de3567d48a1
- -1.98412698295895385996e-4, // 0xbf2a01a019bfdf03
- 8.33333333332211858878e-3, // 0x3f8111111110f7d0
- -1.66666666666666307295e-1, // 0xbfc5555555555548
-}
-
-// cos coefficients
-var _cos = [...]float64{
- -1.13585365213876817300e-11, // 0xbda8fa49a0861a9b
- 2.08757008419747316778e-9, // 0x3e21ee9d7b4e3f05
- -2.75573141792967388112e-7, // 0xbe927e4f7eac4bc6
- 2.48015872888517045348e-5, // 0x3efa01a019c844f5
- -1.38888888888730564116e-3, // 0xbf56c16c16c14f91
- 4.16666666666665929218e-2, // 0x3fa555555555554b
-}
-
-// Cos returns the cosine of the radian argument x.
-//
-// Special cases are:
-// Cos(±Inf) = NaN
-// Cos(NaN) = NaN
-func Cos(x float64) float64 {
- if haveArchCos {
- return archCos(x)
- }
- return cos(x)
-}
-
-func cos(x float64) float64 {
- const (
- PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
- PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
- PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
- )
- // special cases
- switch {
- case IsNaN(x) || IsInf(x, 0):
- return NaN()
- }
-
- // make argument positive
- sign := false
- x = Abs(x)
-
- var j uint64
- var y, z float64
- if x >= reduceThreshold {
- j, z = trigReduce(x)
- } else {
- j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
- y = float64(j) // integer part of x/(Pi/4), as float
-
- // map zeros to origin
- if j&1 == 1 {
- j++
- y++
- }
- j &= 7 // octant modulo 2Pi radians (360 degrees)
- z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
- }
-
- if j > 3 {
- j -= 4
- sign = !sign
- }
- if j > 1 {
- sign = !sign
- }
-
- zz := z * z
- if j == 1 || j == 2 {
- y = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
- } else {
- y = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
- }
- if sign {
- y = -y
- }
- return y
-}
-
-// Sin returns the sine of the radian argument x.
-//
-// Special cases are:
-// Sin(±0) = ±0
-// Sin(±Inf) = NaN
-// Sin(NaN) = NaN
-func Sin(x float64) float64 {
- if haveArchSin {
- return archSin(x)
- }
- return sin(x)
-}
-
-func sin(x float64) float64 {
- const (
- PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
- PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
- PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
- )
- // special cases
- switch {
- case x == 0 || IsNaN(x):
- return x // return ±0 || NaN()
- case IsInf(x, 0):
- return NaN()
- }
-
- // make argument positive but save the sign
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
-
- var j uint64
- var y, z float64
- if x >= reduceThreshold {
- j, z = trigReduce(x)
- } else {
- j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
- y = float64(j) // integer part of x/(Pi/4), as float
-
- // map zeros to origin
- if j&1 == 1 {
- j++
- y++
- }
- j &= 7 // octant modulo 2Pi radians (360 degrees)
- z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
- }
- // reflect in x axis
- if j > 3 {
- sign = !sign
- j -= 4
- }
- zz := z * z
- if j == 1 || j == 2 {
- y = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
- } else {
- y = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
- }
- if sign {
- y = -y
- }
- return y
-}
diff --git a/contrib/go/_std_1.18/src/math/sincos.go b/contrib/go/_std_1.18/src/math/sincos.go
deleted file mode 100644
index 5c5726f689..0000000000
--- a/contrib/go/_std_1.18/src/math/sincos.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// Coefficients _sin[] and _cos[] are found in pkg/math/sin.go.
-
-// Sincos returns Sin(x), Cos(x).
-//
-// Special cases are:
-// Sincos(±0) = ±0, 1
-// Sincos(±Inf) = NaN, NaN
-// Sincos(NaN) = NaN, NaN
-func Sincos(x float64) (sin, cos float64) {
- const (
- PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
- PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
- PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
- )
- // special cases
- switch {
- case x == 0:
- return x, 1 // return ±0.0, 1.0
- case IsNaN(x) || IsInf(x, 0):
- return NaN(), NaN()
- }
-
- // make argument positive
- sinSign, cosSign := false, false
- if x < 0 {
- x = -x
- sinSign = true
- }
-
- var j uint64
- var y, z float64
- if x >= reduceThreshold {
- j, z = trigReduce(x)
- } else {
- j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
- y = float64(j) // integer part of x/(Pi/4), as float
-
- if j&1 == 1 { // map zeros to origin
- j++
- y++
- }
- j &= 7 // octant modulo 2Pi radians (360 degrees)
- z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
- }
- if j > 3 { // reflect in x axis
- j -= 4
- sinSign, cosSign = !sinSign, !cosSign
- }
- if j > 1 {
- cosSign = !cosSign
- }
-
- zz := z * z
- cos = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
- sin = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
- if j == 1 || j == 2 {
- sin, cos = cos, sin
- }
- if cosSign {
- cos = -cos
- }
- if sinSign {
- sin = -sin
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/math/sinh.go b/contrib/go/_std_1.18/src/math/sinh.go
deleted file mode 100644
index 9fe9b4e17a..0000000000
--- a/contrib/go/_std_1.18/src/math/sinh.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point hyperbolic sine and cosine.
-
- The exponential func is called for arguments
- greater in magnitude than 0.5.
-
- A series is used for arguments smaller in magnitude than 0.5.
-
- Cosh(x) is computed from the exponential func for
- all arguments.
-*/
-
-// Sinh returns the hyperbolic sine of x.
-//
-// Special cases are:
-// Sinh(±0) = ±0
-// Sinh(±Inf) = ±Inf
-// Sinh(NaN) = NaN
-func Sinh(x float64) float64 {
- if haveArchSinh {
- return archSinh(x)
- }
- return sinh(x)
-}
-
-func sinh(x float64) float64 {
- // The coefficients are #2029 from Hart & Cheney. (20.36D)
- const (
- P0 = -0.6307673640497716991184787251e+6
- P1 = -0.8991272022039509355398013511e+5
- P2 = -0.2894211355989563807284660366e+4
- P3 = -0.2630563213397497062819489e+2
- Q0 = -0.6307673640497716991212077277e+6
- Q1 = 0.1521517378790019070696485176e+5
- Q2 = -0.173678953558233699533450911e+3
- )
-
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
-
- var temp float64
- switch {
- case x > 21:
- temp = Exp(x) * 0.5
-
- case x > 0.5:
- ex := Exp(x)
- temp = (ex - 1/ex) * 0.5
-
- default:
- sq := x * x
- temp = (((P3*sq+P2)*sq+P1)*sq + P0) * x
- temp = temp / (((sq+Q2)*sq+Q1)*sq + Q0)
- }
-
- if sign {
- temp = -temp
- }
- return temp
-}
-
-// Cosh returns the hyperbolic cosine of x.
-//
-// Special cases are:
-// Cosh(±0) = 1
-// Cosh(±Inf) = +Inf
-// Cosh(NaN) = NaN
-func Cosh(x float64) float64 {
- if haveArchCosh {
- return archCosh(x)
- }
- return cosh(x)
-}
-
-func cosh(x float64) float64 {
- x = Abs(x)
- if x > 21 {
- return Exp(x) * 0.5
- }
- ex := Exp(x)
- return (ex + 1/ex) * 0.5
-}
diff --git a/contrib/go/_std_1.18/src/math/sqrt.go b/contrib/go/_std_1.18/src/math/sqrt.go
deleted file mode 100644
index 903d57d5e0..0000000000
--- a/contrib/go/_std_1.18/src/math/sqrt.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code and the long comment below are
-// from FreeBSD's /usr/src/lib/msun/src/e_sqrt.c and
-// came with this notice. The go code is a simplified
-// version of the original C.
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunPro, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// __ieee754_sqrt(x)
-// Return correctly rounded sqrt.
-// -----------------------------------------
-// | Use the hardware sqrt if you have one |
-// -----------------------------------------
-// Method:
-// Bit by bit method using integer arithmetic. (Slow, but portable)
-// 1. Normalization
-// Scale x to y in [1,4) with even powers of 2:
-// find an integer k such that 1 <= (y=x*2**(2k)) < 4, then
-// sqrt(x) = 2**k * sqrt(y)
-// 2. Bit by bit computation
-// Let q = sqrt(y) truncated to i bit after binary point (q = 1),
-// i 0
-// i+1 2
-// s = 2*q , and y = 2 * ( y - q ). (1)
-// i i i i
-//
-// To compute q from q , one checks whether
-// i+1 i
-//
-// -(i+1) 2
-// (q + 2 ) <= y. (2)
-// i
-// -(i+1)
-// If (2) is false, then q = q ; otherwise q = q + 2 .
-// i+1 i i+1 i
-//
-// With some algebraic manipulation, it is not difficult to see
-// that (2) is equivalent to
-// -(i+1)
-// s + 2 <= y (3)
-// i i
-//
-// The advantage of (3) is that s and y can be computed by
-// i i
-// the following recurrence formula:
-// if (3) is false
-//
-// s = s , y = y ; (4)
-// i+1 i i+1 i
-//
-// otherwise,
-// -i -(i+1)
-// s = s + 2 , y = y - s - 2 (5)
-// i+1 i i+1 i i
-//
-// One may easily use induction to prove (4) and (5).
-// Note. Since the left hand side of (3) contain only i+2 bits,
-// it is not necessary to do a full (53-bit) comparison
-// in (3).
-// 3. Final rounding
-// After generating the 53 bits result, we compute one more bit.
-// Together with the remainder, we can decide whether the
-// result is exact, bigger than 1/2ulp, or less than 1/2ulp
-// (it will never equal to 1/2ulp).
-// The rounding mode can be detected by checking whether
-// huge + tiny is equal to huge, and whether huge - tiny is
-// equal to huge for some floating point number "huge" and "tiny".
-//
-//
-// Notes: Rounding mode detection omitted. The constants "mask", "shift",
-// and "bias" are found in src/math/bits.go
-
-// Sqrt returns the square root of x.
-//
-// Special cases are:
-// Sqrt(+Inf) = +Inf
-// Sqrt(±0) = ±0
-// Sqrt(x < 0) = NaN
-// Sqrt(NaN) = NaN
-func Sqrt(x float64) float64 {
- if haveArchSqrt {
- return archSqrt(x)
- }
- return sqrt(x)
-}
-
-// Note: Sqrt is implemented in assembly on some systems.
-// Others have assembly stubs that jump to func sqrt below.
-// On systems where Sqrt is a single instruction, the compiler
-// may turn a direct call into a direct use of that instruction instead.
-
-func sqrt(x float64) float64 {
- // special cases
- switch {
- case x == 0 || IsNaN(x) || IsInf(x, 1):
- return x
- case x < 0:
- return NaN()
- }
- ix := Float64bits(x)
- // normalize x
- exp := int((ix >> shift) & mask)
- if exp == 0 { // subnormal x
- for ix&(1<<shift) == 0 {
- ix <<= 1
- exp--
- }
- exp++
- }
- exp -= bias // unbias exponent
- ix &^= mask << shift
- ix |= 1 << shift
- if exp&1 == 1 { // odd exp, double x to make it even
- ix <<= 1
- }
- exp >>= 1 // exp = exp/2, exponent of square root
- // generate sqrt(x) bit by bit
- ix <<= 1
- var q, s uint64 // q = sqrt(x)
- r := uint64(1 << (shift + 1)) // r = moving bit from MSB to LSB
- for r != 0 {
- t := s + r
- if t <= ix {
- s = t + r
- ix -= t
- q += r
- }
- ix <<= 1
- r >>= 1
- }
- // final rounding
- if ix != 0 { // remainder, result not exact
- q += q & 1 // round according to extra bit
- }
- ix = q>>1 + uint64(exp-1+bias)<<shift // significand + biased exponent
- return Float64frombits(ix)
-}
diff --git a/contrib/go/_std_1.18/src/math/tan.go b/contrib/go/_std_1.18/src/math/tan.go
deleted file mode 100644
index a25417f527..0000000000
--- a/contrib/go/_std_1.18/src/math/tan.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-/*
- Floating-point tangent.
-*/
-
-// The original C code, the long comment, and the constants
-// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
-// available from http://www.netlib.org/cephes/cmath.tgz.
-// The go code is a simplified version of the original C.
-//
-// tan.c
-//
-// Circular tangent
-//
-// SYNOPSIS:
-//
-// double x, y, tan();
-// y = tan( x );
-//
-// DESCRIPTION:
-//
-// Returns the circular tangent of the radian argument x.
-//
-// Range reduction is modulo pi/4. A rational function
-// x + x**3 P(x**2)/Q(x**2)
-// is employed in the basic interval [0, pi/4].
-//
-// ACCURACY:
-// Relative error:
-// arithmetic domain # trials peak rms
-// DEC +-1.07e9 44000 4.1e-17 1.0e-17
-// IEEE +-1.07e9 30000 2.9e-16 8.1e-17
-//
-// Partial loss of accuracy begins to occur at x = 2**30 = 1.074e9. The loss
-// is not gradual, but jumps suddenly to about 1 part in 10e7. Results may
-// be meaningless for x > 2**49 = 5.6e14.
-// [Accuracy loss statement from sin.go comments.]
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-
-// tan coefficients
-var _tanP = [...]float64{
- -1.30936939181383777646e4, // 0xc0c992d8d24f3f38
- 1.15351664838587416140e6, // 0x413199eca5fc9ddd
- -1.79565251976484877988e7, // 0xc1711fead3299176
-}
-var _tanQ = [...]float64{
- 1.00000000000000000000e0,
- 1.36812963470692954678e4, //0x40cab8a5eeb36572
- -1.32089234440210967447e6, //0xc13427bc582abc96
- 2.50083801823357915839e7, //0x4177d98fc2ead8ef
- -5.38695755929454629881e7, //0xc189afe03cbe5a31
-}
-
-// Tan returns the tangent of the radian argument x.
-//
-// Special cases are:
-// Tan(±0) = ±0
-// Tan(±Inf) = NaN
-// Tan(NaN) = NaN
-func Tan(x float64) float64 {
- if haveArchTan {
- return archTan(x)
- }
- return tan(x)
-}
-
-func tan(x float64) float64 {
- const (
- PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
- PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
- PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
- )
- // special cases
- switch {
- case x == 0 || IsNaN(x):
- return x // return ±0 || NaN()
- case IsInf(x, 0):
- return NaN()
- }
-
- // make argument positive but save the sign
- sign := false
- if x < 0 {
- x = -x
- sign = true
- }
- var j uint64
- var y, z float64
- if x >= reduceThreshold {
- j, z = trigReduce(x)
- } else {
- j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
- y = float64(j) // integer part of x/(Pi/4), as float
-
- /* map zeros and singularities to origin */
- if j&1 == 1 {
- j++
- y++
- }
-
- z = ((x - y*PI4A) - y*PI4B) - y*PI4C
- }
- zz := z * z
-
- if zz > 1e-14 {
- y = z + z*(zz*(((_tanP[0]*zz)+_tanP[1])*zz+_tanP[2])/((((zz+_tanQ[1])*zz+_tanQ[2])*zz+_tanQ[3])*zz+_tanQ[4]))
- } else {
- y = z
- }
- if j&2 == 2 {
- y = -1 / y
- }
- if sign {
- y = -y
- }
- return y
-}
diff --git a/contrib/go/_std_1.18/src/math/tanh.go b/contrib/go/_std_1.18/src/math/tanh.go
deleted file mode 100644
index a825678424..0000000000
--- a/contrib/go/_std_1.18/src/math/tanh.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-// The original C code, the long comment, and the constants
-// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
-// available from http://www.netlib.org/cephes/cmath.tgz.
-// The go code is a simplified version of the original C.
-// tanh.c
-//
-// Hyperbolic tangent
-//
-// SYNOPSIS:
-//
-// double x, y, tanh();
-//
-// y = tanh( x );
-//
-// DESCRIPTION:
-//
-// Returns hyperbolic tangent of argument in the range MINLOG to MAXLOG.
-// MAXLOG = 8.8029691931113054295988e+01 = log(2**127)
-// MINLOG = -8.872283911167299960540e+01 = log(2**-128)
-//
-// A rational function is used for |x| < 0.625. The form
-// x + x**3 P(x)/Q(x) of Cody & Waite is employed.
-// Otherwise,
-// tanh(x) = sinh(x)/cosh(x) = 1 - 2/(exp(2x) + 1).
-//
-// ACCURACY:
-//
-// Relative error:
-// arithmetic domain # trials peak rms
-// IEEE -2,2 30000 2.5e-16 5.8e-17
-//
-// Cephes Math Library Release 2.8: June, 2000
-// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
-//
-// The readme file at http://netlib.sandia.gov/cephes/ says:
-// Some software in this archive may be from the book _Methods and
-// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
-// International, 1989) or from the Cephes Mathematical Library, a
-// commercial product. In either event, it is copyrighted by the author.
-// What you see here may be used freely but it comes with no support or
-// guarantee.
-//
-// The two known misprints in the book are repaired here in the
-// source listings for the gamma function and the incomplete beta
-// integral.
-//
-// Stephen L. Moshier
-// moshier@na-net.ornl.gov
-//
-
-var tanhP = [...]float64{
- -9.64399179425052238628e-1,
- -9.92877231001918586564e1,
- -1.61468768441708447952e3,
-}
-var tanhQ = [...]float64{
- 1.12811678491632931402e2,
- 2.23548839060100448583e3,
- 4.84406305325125486048e3,
-}
-
-// Tanh returns the hyperbolic tangent of x.
-//
-// Special cases are:
-// Tanh(±0) = ±0
-// Tanh(±Inf) = ±1
-// Tanh(NaN) = NaN
-func Tanh(x float64) float64 {
- if haveArchTanh {
- return archTanh(x)
- }
- return tanh(x)
-}
-
-func tanh(x float64) float64 {
- const MAXLOG = 8.8029691931113054295988e+01 // log(2**127)
- z := Abs(x)
- switch {
- case z > 0.5*MAXLOG:
- if x < 0 {
- return -1
- }
- return 1
- case z >= 0.625:
- s := Exp(2 * z)
- z = 1 - 2/(s+1)
- if x < 0 {
- z = -z
- }
- default:
- if x == 0 {
- return x
- }
- s := x * x
- z = x + x*s*((tanhP[0]*s+tanhP[1])*s+tanhP[2])/(((s+tanhQ[0])*s+tanhQ[1])*s+tanhQ[2])
- }
- return z
-}
diff --git a/contrib/go/_std_1.18/src/math/trig_reduce.go b/contrib/go/_std_1.18/src/math/trig_reduce.go
deleted file mode 100644
index 5cdf4fa013..0000000000
--- a/contrib/go/_std_1.18/src/math/trig_reduce.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package math
-
-import (
- "math/bits"
-)
-
-// reduceThreshold is the maximum value of x where the reduction using Pi/4
-// in 3 float64 parts still gives accurate results. This threshold
-// is set by y*C being representable as a float64 without error
-// where y is given by y = floor(x * (4 / Pi)) and C is the leading partial
-// terms of 4/Pi. Since the leading terms (PI4A and PI4B in sin.go) have 30
-// and 32 trailing zero bits, y should have less than 30 significant bits.
-// y < 1<<30 -> floor(x*4/Pi) < 1<<30 -> x < (1<<30 - 1) * Pi/4
-// So, conservatively we can take x < 1<<29.
-// Above this threshold Payne-Hanek range reduction must be used.
-const reduceThreshold = 1 << 29
-
-// trigReduce implements Payne-Hanek range reduction by Pi/4
-// for x > 0. It returns the integer part mod 8 (j) and
-// the fractional part (z) of x / (Pi/4).
-// The implementation is based on:
-// "ARGUMENT REDUCTION FOR HUGE ARGUMENTS: Good to the Last Bit"
-// K. C. Ng et al, March 24, 1992
-// The simulated multi-precision calculation of x*B uses 64-bit integer arithmetic.
-func trigReduce(x float64) (j uint64, z float64) {
- const PI4 = Pi / 4
- if x < PI4 {
- return 0, x
- }
- // Extract out the integer and exponent such that,
- // x = ix * 2 ** exp.
- ix := Float64bits(x)
- exp := int(ix>>shift&mask) - bias - shift
- ix &^= mask << shift
- ix |= 1 << shift
- // Use the exponent to extract the 3 appropriate uint64 digits from mPi4,
- // B ~ (z0, z1, z2), such that the product leading digit has the exponent -61.
- // Note, exp >= -53 since x >= PI4 and exp < 971 for maximum float64.
- digit, bitshift := uint(exp+61)/64, uint(exp+61)%64
- z0 := (mPi4[digit] << bitshift) | (mPi4[digit+1] >> (64 - bitshift))
- z1 := (mPi4[digit+1] << bitshift) | (mPi4[digit+2] >> (64 - bitshift))
- z2 := (mPi4[digit+2] << bitshift) | (mPi4[digit+3] >> (64 - bitshift))
- // Multiply mantissa by the digits and extract the upper two digits (hi, lo).
- z2hi, _ := bits.Mul64(z2, ix)
- z1hi, z1lo := bits.Mul64(z1, ix)
- z0lo := z0 * ix
- lo, c := bits.Add64(z1lo, z2hi, 0)
- hi, _ := bits.Add64(z0lo, z1hi, c)
- // The top 3 bits are j.
- j = hi >> 61
- // Extract the fraction and find its magnitude.
- hi = hi<<3 | lo>>61
- lz := uint(bits.LeadingZeros64(hi))
- e := uint64(bias - (lz + 1))
- // Clear implicit mantissa bit and shift into place.
- hi = (hi << (lz + 1)) | (lo >> (64 - (lz + 1)))
- hi >>= 64 - shift
- // Include the exponent and convert to a float.
- hi |= e << shift
- z = Float64frombits(hi)
- // Map zeros to origin.
- if j&1 == 1 {
- j++
- j &= 7
- z--
- }
- // Multiply the fractional part by pi/4.
- return j, z * PI4
-}
-
-// mPi4 is the binary digits of 4/pi as a uint64 array,
-// that is, 4/pi = Sum mPi4[i]*2^(-64*i)
-// 19 64-bit digits and the leading one bit give 1217 bits
-// of precision to handle the largest possible float64 exponent.
-var mPi4 = [...]uint64{
- 0x0000000000000001,
- 0x45f306dc9c882a53,
- 0xf84eafa3ea69bb81,
- 0xb6c52b3278872083,
- 0xfca2c757bd778ac3,
- 0x6e48dc74849ba5c0,
- 0x0c925dd413a32439,
- 0xfc3bd63962534e7d,
- 0xd1046bea5d768909,
- 0xd338e04d68befc82,
- 0x7323ac7306a673e9,
- 0x3908bf177bf25076,
- 0x3ff12fffbc0b301f,
- 0xde5e2316b414da3e,
- 0xda6cfd9e4f96136e,
- 0x9e8c7ecd3cbfd45a,
- 0xea4f758fd7cbe2f6,
- 0x7a0e73ef14a525d4,
- 0xd7f6bf623f1aba10,
- 0xac06608df8f6d757,
-}
diff --git a/contrib/go/_std_1.18/src/mime/multipart/multipart.go b/contrib/go/_std_1.18/src/mime/multipart/multipart.go
deleted file mode 100644
index 81bf722d4e..0000000000
--- a/contrib/go/_std_1.18/src/mime/multipart/multipart.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-
-/*
-Package multipart implements MIME multipart parsing, as defined in RFC
-2046.
-
-The implementation is sufficient for HTTP (RFC 2388) and the multipart
-bodies generated by popular browsers.
-*/
-package multipart
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "mime"
- "mime/quotedprintable"
- "net/textproto"
- "path/filepath"
- "strings"
-)
-
-var emptyParams = make(map[string]string)
-
-// This constant needs to be at least 76 for this package to work correctly.
-// This is because \r\n--separator_of_len_70- would fill the buffer and it
-// wouldn't be safe to consume a single byte from it.
-const peekBufferSize = 4096
-
-// A Part represents a single part in a multipart body.
-type Part struct {
- // The headers of the body, if any, with the keys canonicalized
- // in the same fashion that the Go http.Request headers are.
- // For example, "foo-bar" changes case to "Foo-Bar"
- Header textproto.MIMEHeader
-
- mr *Reader
-
- disposition string
- dispositionParams map[string]string
-
- // r is either a reader directly reading from mr, or it's a
- // wrapper around such a reader, decoding the
- // Content-Transfer-Encoding
- r io.Reader
-
- n int // known data bytes waiting in mr.bufReader
- total int64 // total data bytes read already
- err error // error to return when n == 0
- readErr error // read error observed from mr.bufReader
-}
-
-// FormName returns the name parameter if p has a Content-Disposition
-// of type "form-data". Otherwise it returns the empty string.
-func (p *Part) FormName() string {
- // See https://tools.ietf.org/html/rfc2183 section 2 for EBNF
- // of Content-Disposition value format.
- if p.dispositionParams == nil {
- p.parseContentDisposition()
- }
- if p.disposition != "form-data" {
- return ""
- }
- return p.dispositionParams["name"]
-}
-
-// FileName returns the filename parameter of the Part's Content-Disposition
-// header. If not empty, the filename is passed through filepath.Base (which is
-// platform dependent) before being returned.
-func (p *Part) FileName() string {
- if p.dispositionParams == nil {
- p.parseContentDisposition()
- }
- filename := p.dispositionParams["filename"]
- if filename == "" {
- return ""
- }
- // RFC 7578, Section 4.2 requires that if a filename is provided, the
- // directory path information must not be used.
- return filepath.Base(filename)
-}
-
-func (p *Part) parseContentDisposition() {
- v := p.Header.Get("Content-Disposition")
- var err error
- p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
- if err != nil {
- p.dispositionParams = emptyParams
- }
-}
-
-// NewReader creates a new multipart Reader reading from r using the
-// given MIME boundary.
-//
-// The boundary is usually obtained from the "boundary" parameter of
-// the message's "Content-Type" header. Use mime.ParseMediaType to
-// parse such headers.
-func NewReader(r io.Reader, boundary string) *Reader {
- b := []byte("\r\n--" + boundary + "--")
- return &Reader{
- bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
- nl: b[:2],
- nlDashBoundary: b[:len(b)-2],
- dashBoundaryDash: b[2:],
- dashBoundary: b[2 : len(b)-2],
- }
-}
-
-// stickyErrorReader is an io.Reader which never calls Read on its
-// underlying Reader once an error has been seen. (the io.Reader
-// interface's contract promises nothing about the return values of
-// Read calls after an error, yet this package does do multiple Reads
-// after error)
-type stickyErrorReader struct {
- r io.Reader
- err error
-}
-
-func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
- if r.err != nil {
- return 0, r.err
- }
- n, r.err = r.r.Read(p)
- return n, r.err
-}
-
-func newPart(mr *Reader, rawPart bool) (*Part, error) {
- bp := &Part{
- Header: make(map[string][]string),
- mr: mr,
- }
- if err := bp.populateHeaders(); err != nil {
- return nil, err
- }
- bp.r = partReader{bp}
-
- // rawPart is used to switch between Part.NextPart and Part.NextRawPart.
- if !rawPart {
- const cte = "Content-Transfer-Encoding"
- if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") {
- bp.Header.Del(cte)
- bp.r = quotedprintable.NewReader(bp.r)
- }
- }
- return bp, nil
-}
-
-func (bp *Part) populateHeaders() error {
- r := textproto.NewReader(bp.mr.bufReader)
- header, err := r.ReadMIMEHeader()
- if err == nil {
- bp.Header = header
- }
- return err
-}
-
-// Read reads the body of a part, after its headers and before the
-// next part (if any) begins.
-func (p *Part) Read(d []byte) (n int, err error) {
- return p.r.Read(d)
-}
-
-// partReader implements io.Reader by reading raw bytes directly from the
-// wrapped *Part, without doing any Transfer-Encoding decoding.
-type partReader struct {
- p *Part
-}
-
-func (pr partReader) Read(d []byte) (int, error) {
- p := pr.p
- br := p.mr.bufReader
-
- // Read into buffer until we identify some data to return,
- // or we find a reason to stop (boundary or read error).
- for p.n == 0 && p.err == nil {
- peek, _ := br.Peek(br.Buffered())
- p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr)
- if p.n == 0 && p.err == nil {
- // Force buffered I/O to read more into buffer.
- _, p.readErr = br.Peek(len(peek) + 1)
- if p.readErr == io.EOF {
- p.readErr = io.ErrUnexpectedEOF
- }
- }
- }
-
- // Read out from "data to return" part of buffer.
- if p.n == 0 {
- return 0, p.err
- }
- n := len(d)
- if n > p.n {
- n = p.n
- }
- n, _ = br.Read(d[:n])
- p.total += int64(n)
- p.n -= n
- if p.n == 0 {
- return n, p.err
- }
- return n, nil
-}
-
-// scanUntilBoundary scans buf to identify how much of it can be safely
-// returned as part of the Part body.
-// dashBoundary is "--boundary".
-// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
-// The comments below (and the name) assume "\n--boundary", but either is accepted.
-// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
-// readErr is the read error, if any, that followed reading the bytes in buf.
-// scanUntilBoundary returns the number of data bytes from buf that can be
-// returned as part of the Part body and also the error to return (if any)
-// once those data bytes are done.
-func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
- if total == 0 {
- // At beginning of body, allow dashBoundary.
- if bytes.HasPrefix(buf, dashBoundary) {
- switch matchAfterPrefix(buf, dashBoundary, readErr) {
- case -1:
- return len(dashBoundary), nil
- case 0:
- return 0, nil
- case +1:
- return 0, io.EOF
- }
- }
- if bytes.HasPrefix(dashBoundary, buf) {
- return 0, readErr
- }
- }
-
- // Search for "\n--boundary".
- if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
- switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
- case -1:
- return i + len(nlDashBoundary), nil
- case 0:
- return i, nil
- case +1:
- return i, io.EOF
- }
- }
- if bytes.HasPrefix(nlDashBoundary, buf) {
- return 0, readErr
- }
-
- // Otherwise, anything up to the final \n is not part of the boundary
- // and so must be part of the body.
- // Also if the section from the final \n onward is not a prefix of the boundary,
- // it too must be part of the body.
- i := bytes.LastIndexByte(buf, nlDashBoundary[0])
- if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
- return i, nil
- }
- return len(buf), readErr
-}
-
-// matchAfterPrefix checks whether buf should be considered to match the boundary.
-// The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary",
-// and the caller has verified already that bytes.HasPrefix(buf, prefix) is true.
-//
-// matchAfterPrefix returns +1 if the buffer does match the boundary,
-// meaning the prefix is followed by a dash, space, tab, cr, nl, or end of input.
-// It returns -1 if the buffer definitely does NOT match the boundary,
-// meaning the prefix is followed by some other character.
-// For example, "--foobar" does not match "--foo".
-// It returns 0 more input needs to be read to make the decision,
-// meaning that len(buf) == len(prefix) and readErr == nil.
-func matchAfterPrefix(buf, prefix []byte, readErr error) int {
- if len(buf) == len(prefix) {
- if readErr != nil {
- return +1
- }
- return 0
- }
- c := buf[len(prefix)]
- if c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '-' {
- return +1
- }
- return -1
-}
-
-func (p *Part) Close() error {
- io.Copy(io.Discard, p)
- return nil
-}
-
-// Reader is an iterator over parts in a MIME multipart body.
-// Reader's underlying parser consumes its input as needed. Seeking
-// isn't supported.
-type Reader struct {
- bufReader *bufio.Reader
-
- currentPart *Part
- partsRead int
-
- nl []byte // "\r\n" or "\n" (set after seeing first boundary line)
- nlDashBoundary []byte // nl + "--boundary"
- dashBoundaryDash []byte // "--boundary--"
- dashBoundary []byte // "--boundary"
-}
-
-// NextPart returns the next part in the multipart or an error.
-// When there are no more parts, the error io.EOF is returned.
-//
-// As a special case, if the "Content-Transfer-Encoding" header
-// has a value of "quoted-printable", that header is instead
-// hidden and the body is transparently decoded during Read calls.
-func (r *Reader) NextPart() (*Part, error) {
- return r.nextPart(false)
-}
-
-// NextRawPart returns the next part in the multipart or an error.
-// When there are no more parts, the error io.EOF is returned.
-//
-// Unlike NextPart, it does not have special handling for
-// "Content-Transfer-Encoding: quoted-printable".
-func (r *Reader) NextRawPart() (*Part, error) {
- return r.nextPart(true)
-}
-
-func (r *Reader) nextPart(rawPart bool) (*Part, error) {
- if r.currentPart != nil {
- r.currentPart.Close()
- }
- if string(r.dashBoundary) == "--" {
- return nil, fmt.Errorf("multipart: boundary is empty")
- }
- expectNewPart := false
- for {
- line, err := r.bufReader.ReadSlice('\n')
-
- if err == io.EOF && r.isFinalBoundary(line) {
- // If the buffer ends in "--boundary--" without the
- // trailing "\r\n", ReadSlice will return an error
- // (since it's missing the '\n'), but this is a valid
- // multipart EOF so we need to return io.EOF instead of
- // a fmt-wrapped one.
- return nil, io.EOF
- }
- if err != nil {
- return nil, fmt.Errorf("multipart: NextPart: %v", err)
- }
-
- if r.isBoundaryDelimiterLine(line) {
- r.partsRead++
- bp, err := newPart(r, rawPart)
- if err != nil {
- return nil, err
- }
- r.currentPart = bp
- return bp, nil
- }
-
- if r.isFinalBoundary(line) {
- // Expected EOF
- return nil, io.EOF
- }
-
- if expectNewPart {
- return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
- }
-
- if r.partsRead == 0 {
- // skip line
- continue
- }
-
- // Consume the "\n" or "\r\n" separator between the
- // body of the previous part and the boundary line we
- // now expect will follow. (either a new part or the
- // end boundary)
- if bytes.Equal(line, r.nl) {
- expectNewPart = true
- continue
- }
-
- return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
- }
-}
-
-// isFinalBoundary reports whether line is the final boundary line
-// indicating that all parts are over.
-// It matches `^--boundary--[ \t]*(\r\n)?$`
-func (mr *Reader) isFinalBoundary(line []byte) bool {
- if !bytes.HasPrefix(line, mr.dashBoundaryDash) {
- return false
- }
- rest := line[len(mr.dashBoundaryDash):]
- rest = skipLWSPChar(rest)
- return len(rest) == 0 || bytes.Equal(rest, mr.nl)
-}
-
-func (mr *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) {
- // https://tools.ietf.org/html/rfc2046#section-5.1
- // The boundary delimiter line is then defined as a line
- // consisting entirely of two hyphen characters ("-",
- // decimal value 45) followed by the boundary parameter
- // value from the Content-Type header field, optional linear
- // whitespace, and a terminating CRLF.
- if !bytes.HasPrefix(line, mr.dashBoundary) {
- return false
- }
- rest := line[len(mr.dashBoundary):]
- rest = skipLWSPChar(rest)
-
- // On the first part, see our lines are ending in \n instead of \r\n
- // and switch into that mode if so. This is a violation of the spec,
- // but occurs in practice.
- if mr.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' {
- mr.nl = mr.nl[1:]
- mr.nlDashBoundary = mr.nlDashBoundary[1:]
- }
- return bytes.Equal(rest, mr.nl)
-}
-
-// skipLWSPChar returns b with leading spaces and tabs removed.
-// RFC 822 defines:
-// LWSP-char = SPACE / HTAB
-func skipLWSPChar(b []byte) []byte {
- for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') {
- b = b[1:]
- }
- return b
-}
diff --git a/contrib/go/_std_1.18/src/mime/type.go b/contrib/go/_std_1.18/src/mime/type.go
deleted file mode 100644
index bdb8bb319a..0000000000
--- a/contrib/go/_std_1.18/src/mime/type.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package mime implements parts of the MIME spec.
-package mime
-
-import (
- "fmt"
- "sort"
- "strings"
- "sync"
-)
-
-var (
- mimeTypes sync.Map // map[string]string; ".Z" => "application/x-compress"
- mimeTypesLower sync.Map // map[string]string; ".z" => "application/x-compress"
-
- // extensions maps from MIME type to list of lowercase file
- // extensions: "image/jpeg" => [".jpg", ".jpeg"]
- extensionsMu sync.Mutex // Guards stores (but not loads) on extensions.
- extensions sync.Map // map[string][]string; slice values are append-only.
-)
-
-func clearSyncMap(m *sync.Map) {
- m.Range(func(k, _ any) bool {
- m.Delete(k)
- return true
- })
-}
-
-// setMimeTypes is used by initMime's non-test path, and by tests.
-func setMimeTypes(lowerExt, mixExt map[string]string) {
- clearSyncMap(&mimeTypes)
- clearSyncMap(&mimeTypesLower)
- clearSyncMap(&extensions)
-
- for k, v := range lowerExt {
- mimeTypesLower.Store(k, v)
- }
- for k, v := range mixExt {
- mimeTypes.Store(k, v)
- }
-
- extensionsMu.Lock()
- defer extensionsMu.Unlock()
- for k, v := range lowerExt {
- justType, _, err := ParseMediaType(v)
- if err != nil {
- panic(err)
- }
- var exts []string
- if ei, ok := extensions.Load(justType); ok {
- exts = ei.([]string)
- }
- extensions.Store(justType, append(exts, k))
- }
-}
-
-var builtinTypesLower = map[string]string{
- ".avif": "image/avif",
- ".css": "text/css; charset=utf-8",
- ".gif": "image/gif",
- ".htm": "text/html; charset=utf-8",
- ".html": "text/html; charset=utf-8",
- ".jpeg": "image/jpeg",
- ".jpg": "image/jpeg",
- ".js": "text/javascript; charset=utf-8",
- ".json": "application/json",
- ".mjs": "text/javascript; charset=utf-8",
- ".pdf": "application/pdf",
- ".png": "image/png",
- ".svg": "image/svg+xml",
- ".wasm": "application/wasm",
- ".webp": "image/webp",
- ".xml": "text/xml; charset=utf-8",
-}
-
-var once sync.Once // guards initMime
-
-var testInitMime, osInitMime func()
-
-func initMime() {
- if fn := testInitMime; fn != nil {
- fn()
- } else {
- setMimeTypes(builtinTypesLower, builtinTypesLower)
- osInitMime()
- }
-}
-
-// TypeByExtension returns the MIME type associated with the file extension ext.
-// The extension ext should begin with a leading dot, as in ".html".
-// When ext has no associated type, TypeByExtension returns "".
-//
-// Extensions are looked up first case-sensitively, then case-insensitively.
-//
-// The built-in table is small but on unix it is augmented by the local
-// system's MIME-info database or mime.types file(s) if available under one or
-// more of these names:
-//
-// /usr/local/share/mime/globs2
-// /usr/share/mime/globs2
-// /etc/mime.types
-// /etc/apache2/mime.types
-// /etc/apache/mime.types
-//
-// On Windows, MIME types are extracted from the registry.
-//
-// Text types have the charset parameter set to "utf-8" by default.
-func TypeByExtension(ext string) string {
- once.Do(initMime)
-
- // Case-sensitive lookup.
- if v, ok := mimeTypes.Load(ext); ok {
- return v.(string)
- }
-
- // Case-insensitive lookup.
- // Optimistically assume a short ASCII extension and be
- // allocation-free in that case.
- var buf [10]byte
- lower := buf[:0]
- const utf8RuneSelf = 0x80 // from utf8 package, but not importing it.
- for i := 0; i < len(ext); i++ {
- c := ext[i]
- if c >= utf8RuneSelf {
- // Slow path.
- si, _ := mimeTypesLower.Load(strings.ToLower(ext))
- s, _ := si.(string)
- return s
- }
- if 'A' <= c && c <= 'Z' {
- lower = append(lower, c+('a'-'A'))
- } else {
- lower = append(lower, c)
- }
- }
- si, _ := mimeTypesLower.Load(string(lower))
- s, _ := si.(string)
- return s
-}
-
-// ExtensionsByType returns the extensions known to be associated with the MIME
-// type typ. The returned extensions will each begin with a leading dot, as in
-// ".html". When typ has no associated extensions, ExtensionsByType returns an
-// nil slice.
-func ExtensionsByType(typ string) ([]string, error) {
- justType, _, err := ParseMediaType(typ)
- if err != nil {
- return nil, err
- }
-
- once.Do(initMime)
- s, ok := extensions.Load(justType)
- if !ok {
- return nil, nil
- }
- ret := append([]string(nil), s.([]string)...)
- sort.Strings(ret)
- return ret, nil
-}
-
-// AddExtensionType sets the MIME type associated with
-// the extension ext to typ. The extension should begin with
-// a leading dot, as in ".html".
-func AddExtensionType(ext, typ string) error {
- if !strings.HasPrefix(ext, ".") {
- return fmt.Errorf("mime: extension %q missing leading dot", ext)
- }
- once.Do(initMime)
- return setExtensionType(ext, typ)
-}
-
-func setExtensionType(extension, mimeType string) error {
- justType, param, err := ParseMediaType(mimeType)
- if err != nil {
- return err
- }
- if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
- param["charset"] = "utf-8"
- mimeType = FormatMediaType(mimeType, param)
- }
- extLower := strings.ToLower(extension)
-
- mimeTypes.Store(extension, mimeType)
- mimeTypesLower.Store(extLower, mimeType)
-
- extensionsMu.Lock()
- defer extensionsMu.Unlock()
- var exts []string
- if ei, ok := extensions.Load(justType); ok {
- exts = ei.([]string)
- }
- for _, v := range exts {
- if v == extLower {
- return nil
- }
- }
- extensions.Store(justType, append(exts, extLower))
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/mime/type_unix.go b/contrib/go/_std_1.18/src/mime/type_unix.go
deleted file mode 100644
index 3abc1fa10e..0000000000
--- a/contrib/go/_std_1.18/src/mime/type_unix.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package mime
-
-import (
- "bufio"
- "os"
- "strings"
-)
-
-func init() {
- osInitMime = initMimeUnix
-}
-
-// See https://specifications.freedesktop.org/shared-mime-info-spec/shared-mime-info-spec-0.21.html
-// for the FreeDesktop Shared MIME-info Database specification.
-var mimeGlobs = []string{
- "/usr/local/share/mime/globs2",
- "/usr/share/mime/globs2",
-}
-
-// Common locations for mime.types files on unix.
-var typeFiles = []string{
- "/etc/mime.types",
- "/etc/apache2/mime.types",
- "/etc/apache/mime.types",
- "/etc/httpd/conf/mime.types",
-}
-
-func loadMimeGlobsFile(filename string) error {
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- defer f.Close()
-
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- // Each line should be of format: weight:mimetype:*.ext
- fields := strings.Split(scanner.Text(), ":")
- if len(fields) < 3 || len(fields[0]) < 1 || len(fields[2]) < 2 {
- continue
- } else if fields[0][0] == '#' || fields[2][0] != '*' {
- continue
- }
-
- extension := fields[2][1:]
- if _, ok := mimeTypes.Load(extension); ok {
- // We've already seen this extension.
- // The file is in weight order, so we keep
- // the first entry that we see.
- continue
- }
-
- setExtensionType(extension, fields[1])
- }
- if err := scanner.Err(); err != nil {
- panic(err)
- }
- return nil
-}
-
-func loadMimeFile(filename string) {
- f, err := os.Open(filename)
- if err != nil {
- return
- }
- defer f.Close()
-
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- if len(fields) <= 1 || fields[0][0] == '#' {
- continue
- }
- mimeType := fields[0]
- for _, ext := range fields[1:] {
- if ext[0] == '#' {
- break
- }
- setExtensionType("."+ext, mimeType)
- }
- }
- if err := scanner.Err(); err != nil {
- panic(err)
- }
-}
-
-func initMimeUnix() {
- for _, filename := range mimeGlobs {
- if err := loadMimeGlobsFile(filename); err == nil {
- return // Stop checking more files if mimetype database is found.
- }
- }
-
- // Fallback if no system-generated mimetype database exists.
- for _, filename := range typeFiles {
- loadMimeFile(filename)
- }
-}
-
-func initMimeForTests() map[string]string {
- mimeGlobs = []string{""}
- typeFiles = []string{"testdata/test.types"}
- return map[string]string{
- ".T1": "application/test",
- ".t2": "text/test; charset=utf-8",
- ".png": "image/png",
- }
-}
diff --git a/contrib/go/_std_1.18/src/net/addrselect.go b/contrib/go/_std_1.18/src/net/addrselect.go
deleted file mode 100644
index 29e4ed85ab..0000000000
--- a/contrib/go/_std_1.18/src/net/addrselect.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-// Minimal RFC 6724 address selection.
-
-package net
-
-import "sort"
-
-func sortByRFC6724(addrs []IPAddr) {
- if len(addrs) < 2 {
- return
- }
- sortByRFC6724withSrcs(addrs, srcAddrs(addrs))
-}
-
-func sortByRFC6724withSrcs(addrs []IPAddr, srcs []IP) {
- if len(addrs) != len(srcs) {
- panic("internal error")
- }
- addrAttr := make([]ipAttr, len(addrs))
- srcAttr := make([]ipAttr, len(srcs))
- for i, v := range addrs {
- addrAttr[i] = ipAttrOf(v.IP)
- srcAttr[i] = ipAttrOf(srcs[i])
- }
- sort.Stable(&byRFC6724{
- addrs: addrs,
- addrAttr: addrAttr,
- srcs: srcs,
- srcAttr: srcAttr,
- })
-}
-
-// srcsAddrs tries to UDP-connect to each address to see if it has a
-// route. (This doesn't send any packets). The destination port
-// number is irrelevant.
-func srcAddrs(addrs []IPAddr) []IP {
- srcs := make([]IP, len(addrs))
- dst := UDPAddr{Port: 9}
- for i := range addrs {
- dst.IP = addrs[i].IP
- dst.Zone = addrs[i].Zone
- c, err := DialUDP("udp", nil, &dst)
- if err == nil {
- if src, ok := c.LocalAddr().(*UDPAddr); ok {
- srcs[i] = src.IP
- }
- c.Close()
- }
- }
- return srcs
-}
-
-type ipAttr struct {
- Scope scope
- Precedence uint8
- Label uint8
-}
-
-func ipAttrOf(ip IP) ipAttr {
- if ip == nil {
- return ipAttr{}
- }
- match := rfc6724policyTable.Classify(ip)
- return ipAttr{
- Scope: classifyScope(ip),
- Precedence: match.Precedence,
- Label: match.Label,
- }
-}
-
-type byRFC6724 struct {
- addrs []IPAddr // addrs to sort
- addrAttr []ipAttr
- srcs []IP // or nil if unreachable
- srcAttr []ipAttr
-}
-
-func (s *byRFC6724) Len() int { return len(s.addrs) }
-
-func (s *byRFC6724) Swap(i, j int) {
- s.addrs[i], s.addrs[j] = s.addrs[j], s.addrs[i]
- s.srcs[i], s.srcs[j] = s.srcs[j], s.srcs[i]
- s.addrAttr[i], s.addrAttr[j] = s.addrAttr[j], s.addrAttr[i]
- s.srcAttr[i], s.srcAttr[j] = s.srcAttr[j], s.srcAttr[i]
-}
-
-// Less reports whether i is a better destination address for this
-// host than j.
-//
-// The algorithm and variable names comes from RFC 6724 section 6.
-func (s *byRFC6724) Less(i, j int) bool {
- DA := s.addrs[i].IP
- DB := s.addrs[j].IP
- SourceDA := s.srcs[i]
- SourceDB := s.srcs[j]
- attrDA := &s.addrAttr[i]
- attrDB := &s.addrAttr[j]
- attrSourceDA := &s.srcAttr[i]
- attrSourceDB := &s.srcAttr[j]
-
- const preferDA = true
- const preferDB = false
-
- // Rule 1: Avoid unusable destinations.
- // If DB is known to be unreachable or if Source(DB) is undefined, then
- // prefer DA. Similarly, if DA is known to be unreachable or if
- // Source(DA) is undefined, then prefer DB.
- if SourceDA == nil && SourceDB == nil {
- return false // "equal"
- }
- if SourceDB == nil {
- return preferDA
- }
- if SourceDA == nil {
- return preferDB
- }
-
- // Rule 2: Prefer matching scope.
- // If Scope(DA) = Scope(Source(DA)) and Scope(DB) <> Scope(Source(DB)),
- // then prefer DA. Similarly, if Scope(DA) <> Scope(Source(DA)) and
- // Scope(DB) = Scope(Source(DB)), then prefer DB.
- if attrDA.Scope == attrSourceDA.Scope && attrDB.Scope != attrSourceDB.Scope {
- return preferDA
- }
- if attrDA.Scope != attrSourceDA.Scope && attrDB.Scope == attrSourceDB.Scope {
- return preferDB
- }
-
- // Rule 3: Avoid deprecated addresses.
- // If Source(DA) is deprecated and Source(DB) is not, then prefer DB.
- // Similarly, if Source(DA) is not deprecated and Source(DB) is
- // deprecated, then prefer DA.
-
- // TODO(bradfitz): implement? low priority for now.
-
- // Rule 4: Prefer home addresses.
- // If Source(DA) is simultaneously a home address and care-of address
- // and Source(DB) is not, then prefer DA. Similarly, if Source(DB) is
- // simultaneously a home address and care-of address and Source(DA) is
- // not, then prefer DB.
-
- // TODO(bradfitz): implement? low priority for now.
-
- // Rule 5: Prefer matching label.
- // If Label(Source(DA)) = Label(DA) and Label(Source(DB)) <> Label(DB),
- // then prefer DA. Similarly, if Label(Source(DA)) <> Label(DA) and
- // Label(Source(DB)) = Label(DB), then prefer DB.
- if attrSourceDA.Label == attrDA.Label &&
- attrSourceDB.Label != attrDB.Label {
- return preferDA
- }
- if attrSourceDA.Label != attrDA.Label &&
- attrSourceDB.Label == attrDB.Label {
- return preferDB
- }
-
- // Rule 6: Prefer higher precedence.
- // If Precedence(DA) > Precedence(DB), then prefer DA. Similarly, if
- // Precedence(DA) < Precedence(DB), then prefer DB.
- if attrDA.Precedence > attrDB.Precedence {
- return preferDA
- }
- if attrDA.Precedence < attrDB.Precedence {
- return preferDB
- }
-
- // Rule 7: Prefer native transport.
- // If DA is reached via an encapsulating transition mechanism (e.g.,
- // IPv6 in IPv4) and DB is not, then prefer DB. Similarly, if DB is
- // reached via encapsulation and DA is not, then prefer DA.
-
- // TODO(bradfitz): implement? low priority for now.
-
- // Rule 8: Prefer smaller scope.
- // If Scope(DA) < Scope(DB), then prefer DA. Similarly, if Scope(DA) >
- // Scope(DB), then prefer DB.
- if attrDA.Scope < attrDB.Scope {
- return preferDA
- }
- if attrDA.Scope > attrDB.Scope {
- return preferDB
- }
-
- // Rule 9: Use longest matching prefix.
- // When DA and DB belong to the same address family (both are IPv6 or
- // both are IPv4 [but see below]): If CommonPrefixLen(Source(DA), DA) >
- // CommonPrefixLen(Source(DB), DB), then prefer DA. Similarly, if
- // CommonPrefixLen(Source(DA), DA) < CommonPrefixLen(Source(DB), DB),
- // then prefer DB.
- //
- // However, applying this rule to IPv4 addresses causes
- // problems (see issues 13283 and 18518), so limit to IPv6.
- if DA.To4() == nil && DB.To4() == nil {
- commonA := commonPrefixLen(SourceDA, DA)
- commonB := commonPrefixLen(SourceDB, DB)
-
- if commonA > commonB {
- return preferDA
- }
- if commonA < commonB {
- return preferDB
- }
- }
-
- // Rule 10: Otherwise, leave the order unchanged.
- // If DA preceded DB in the original list, prefer DA.
- // Otherwise, prefer DB.
- return false // "equal"
-}
-
-type policyTableEntry struct {
- Prefix *IPNet
- Precedence uint8
- Label uint8
-}
-
-type policyTable []policyTableEntry
-
-// RFC 6724 section 2.1.
-var rfc6724policyTable = policyTable{
- {
- Prefix: mustCIDR("::1/128"),
- Precedence: 50,
- Label: 0,
- },
- {
- Prefix: mustCIDR("::/0"),
- Precedence: 40,
- Label: 1,
- },
- {
- // IPv4-compatible, etc.
- Prefix: mustCIDR("::ffff:0:0/96"),
- Precedence: 35,
- Label: 4,
- },
- {
- // 6to4
- Prefix: mustCIDR("2002::/16"),
- Precedence: 30,
- Label: 2,
- },
- {
- // Teredo
- Prefix: mustCIDR("2001::/32"),
- Precedence: 5,
- Label: 5,
- },
- {
- Prefix: mustCIDR("fc00::/7"),
- Precedence: 3,
- Label: 13,
- },
- {
- Prefix: mustCIDR("::/96"),
- Precedence: 1,
- Label: 3,
- },
- {
- Prefix: mustCIDR("fec0::/10"),
- Precedence: 1,
- Label: 11,
- },
- {
- Prefix: mustCIDR("3ffe::/16"),
- Precedence: 1,
- Label: 12,
- },
-}
-
-func init() {
- sort.Sort(sort.Reverse(byMaskLength(rfc6724policyTable)))
-}
-
-// byMaskLength sorts policyTableEntry by the size of their Prefix.Mask.Size,
-// from smallest mask, to largest.
-type byMaskLength []policyTableEntry
-
-func (s byMaskLength) Len() int { return len(s) }
-func (s byMaskLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byMaskLength) Less(i, j int) bool {
- isize, _ := s[i].Prefix.Mask.Size()
- jsize, _ := s[j].Prefix.Mask.Size()
- return isize < jsize
-}
-
-// mustCIDR calls ParseCIDR and panics on any error, or if the network
-// is not IPv6.
-func mustCIDR(s string) *IPNet {
- ip, ipNet, err := ParseCIDR(s)
- if err != nil {
- panic(err.Error())
- }
- if len(ip) != IPv6len {
- panic("unexpected IP length")
- }
- return ipNet
-}
-
-// Classify returns the policyTableEntry of the entry with the longest
-// matching prefix that contains ip.
-// The table t must be sorted from largest mask size to smallest.
-func (t policyTable) Classify(ip IP) policyTableEntry {
- for _, ent := range t {
- if ent.Prefix.Contains(ip) {
- return ent
- }
- }
- return policyTableEntry{}
-}
-
-// RFC 6724 section 3.1.
-type scope uint8
-
-const (
- scopeInterfaceLocal scope = 0x1
- scopeLinkLocal scope = 0x2
- scopeAdminLocal scope = 0x4
- scopeSiteLocal scope = 0x5
- scopeOrgLocal scope = 0x8
- scopeGlobal scope = 0xe
-)
-
-func classifyScope(ip IP) scope {
- if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
- return scopeLinkLocal
- }
- ipv6 := len(ip) == IPv6len && ip.To4() == nil
- if ipv6 && ip.IsMulticast() {
- return scope(ip[1] & 0xf)
- }
- // Site-local addresses are defined in RFC 3513 section 2.5.6
- // (and deprecated in RFC 3879).
- if ipv6 && ip[0] == 0xfe && ip[1]&0xc0 == 0xc0 {
- return scopeSiteLocal
- }
- return scopeGlobal
-}
-
-// commonPrefixLen reports the length of the longest prefix (looking
-// at the most significant, or leftmost, bits) that the
-// two addresses have in common, up to the length of a's prefix (i.e.,
-// the portion of the address not including the interface ID).
-//
-// If a or b is an IPv4 address as an IPv6 address, the IPv4 addresses
-// are compared (with max common prefix length of 32).
-// If a and b are different IP versions, 0 is returned.
-//
-// See https://tools.ietf.org/html/rfc6724#section-2.2
-func commonPrefixLen(a, b IP) (cpl int) {
- if a4 := a.To4(); a4 != nil {
- a = a4
- }
- if b4 := b.To4(); b4 != nil {
- b = b4
- }
- if len(a) != len(b) {
- return 0
- }
- // If IPv6, only up to the prefix (first 64 bits)
- if len(a) > 8 {
- a = a[:8]
- b = b[:8]
- }
- for len(a) > 0 {
- if a[0] == b[0] {
- cpl += 8
- a = a[1:]
- b = b[1:]
- continue
- }
- bits := 8
- ab, bb := a[0], b[0]
- for {
- ab >>= 1
- bb >>= 1
- bits--
- if ab == bb {
- cpl += bits
- return
- }
- }
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/net/cgo_unix.go b/contrib/go/_std_1.18/src/net/cgo_unix.go
deleted file mode 100644
index 6fc2c1930e..0000000000
--- a/contrib/go/_std_1.18/src/net/cgo_unix.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build cgo && !netgo && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris)
-
-package net
-
-/*
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <netdb.h>
-#include <unistd.h>
-#include <string.h>
-
-// If nothing else defined EAI_OVERFLOW, make sure it has a value.
-#ifndef EAI_OVERFLOW
-#define EAI_OVERFLOW -12
-#endif
-*/
-import "C"
-
-import (
- "context"
- "syscall"
- "unsafe"
-)
-
-// An addrinfoErrno represents a getaddrinfo, getnameinfo-specific
-// error number. It's a signed number and a zero value is a non-error
-// by convention.
-type addrinfoErrno int
-
-func (eai addrinfoErrno) Error() string { return C.GoString(C.gai_strerror(C.int(eai))) }
-func (eai addrinfoErrno) Temporary() bool { return eai == C.EAI_AGAIN }
-func (eai addrinfoErrno) Timeout() bool { return false }
-
-type portLookupResult struct {
- port int
- err error
-}
-
-type ipLookupResult struct {
- addrs []IPAddr
- cname string
- err error
-}
-
-type reverseLookupResult struct {
- names []string
- err error
-}
-
-func cgoLookupHost(ctx context.Context, name string) (hosts []string, err error, completed bool) {
- addrs, err, completed := cgoLookupIP(ctx, "ip", name)
- for _, addr := range addrs {
- hosts = append(hosts, addr.String())
- }
- return
-}
-
-func cgoLookupPort(ctx context.Context, network, service string) (port int, err error, completed bool) {
- var hints C.struct_addrinfo
- switch network {
- case "": // no hints
- case "tcp", "tcp4", "tcp6":
- hints.ai_socktype = C.SOCK_STREAM
- hints.ai_protocol = C.IPPROTO_TCP
- case "udp", "udp4", "udp6":
- hints.ai_socktype = C.SOCK_DGRAM
- hints.ai_protocol = C.IPPROTO_UDP
- default:
- return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}, true
- }
- switch ipVersion(network) {
- case '4':
- hints.ai_family = C.AF_INET
- case '6':
- hints.ai_family = C.AF_INET6
- }
- if ctx.Done() == nil {
- port, err := cgoLookupServicePort(&hints, network, service)
- return port, err, true
- }
- result := make(chan portLookupResult, 1)
- go cgoPortLookup(result, &hints, network, service)
- select {
- case r := <-result:
- return r.port, r.err, true
- case <-ctx.Done():
- // Since there isn't a portable way to cancel the lookup,
- // we just let it finish and write to the buffered channel.
- return 0, mapErr(ctx.Err()), false
- }
-}
-
-func cgoLookupServicePort(hints *C.struct_addrinfo, network, service string) (port int, err error) {
- cservice := make([]byte, len(service)+1)
- copy(cservice, service)
- // Lowercase the C service name.
- for i, b := range cservice[:len(service)] {
- cservice[i] = lowerASCII(b)
- }
- var res *C.struct_addrinfo
- gerrno, err := C.getaddrinfo(nil, (*C.char)(unsafe.Pointer(&cservice[0])), hints, &res)
- if gerrno != 0 {
- isTemporary := false
- switch gerrno {
- case C.EAI_SYSTEM:
- if err == nil { // see golang.org/issue/6232
- err = syscall.EMFILE
- }
- default:
- err = addrinfoErrno(gerrno)
- isTemporary = addrinfoErrno(gerrno).Temporary()
- }
- return 0, &DNSError{Err: err.Error(), Name: network + "/" + service, IsTemporary: isTemporary}
- }
- defer C.freeaddrinfo(res)
-
- for r := res; r != nil; r = r.ai_next {
- switch r.ai_family {
- case C.AF_INET:
- sa := (*syscall.RawSockaddrInet4)(unsafe.Pointer(r.ai_addr))
- p := (*[2]byte)(unsafe.Pointer(&sa.Port))
- return int(p[0])<<8 | int(p[1]), nil
- case C.AF_INET6:
- sa := (*syscall.RawSockaddrInet6)(unsafe.Pointer(r.ai_addr))
- p := (*[2]byte)(unsafe.Pointer(&sa.Port))
- return int(p[0])<<8 | int(p[1]), nil
- }
- }
- return 0, &DNSError{Err: "unknown port", Name: network + "/" + service}
-}
-
-func cgoPortLookup(result chan<- portLookupResult, hints *C.struct_addrinfo, network, service string) {
- port, err := cgoLookupServicePort(hints, network, service)
- result <- portLookupResult{port, err}
-}
-
-func cgoLookupIPCNAME(network, name string) (addrs []IPAddr, cname string, err error) {
- acquireThread()
- defer releaseThread()
-
- var hints C.struct_addrinfo
- hints.ai_flags = cgoAddrInfoFlags
- hints.ai_socktype = C.SOCK_STREAM
- hints.ai_family = C.AF_UNSPEC
- switch ipVersion(network) {
- case '4':
- hints.ai_family = C.AF_INET
- case '6':
- hints.ai_family = C.AF_INET6
- }
-
- h := make([]byte, len(name)+1)
- copy(h, name)
- var res *C.struct_addrinfo
- gerrno, err := C.getaddrinfo((*C.char)(unsafe.Pointer(&h[0])), nil, &hints, &res)
- if gerrno != 0 {
- isErrorNoSuchHost := false
- isTemporary := false
- switch gerrno {
- case C.EAI_SYSTEM:
- if err == nil {
- // err should not be nil, but sometimes getaddrinfo returns
- // gerrno == C.EAI_SYSTEM with err == nil on Linux.
- // The report claims that it happens when we have too many
- // open files, so use syscall.EMFILE (too many open files in system).
- // Most system calls would return ENFILE (too many open files),
- // so at the least EMFILE should be easy to recognize if this
- // comes up again. golang.org/issue/6232.
- err = syscall.EMFILE
- }
- case C.EAI_NONAME:
- err = errNoSuchHost
- isErrorNoSuchHost = true
- default:
- err = addrinfoErrno(gerrno)
- isTemporary = addrinfoErrno(gerrno).Temporary()
- }
-
- return nil, "", &DNSError{Err: err.Error(), Name: name, IsNotFound: isErrorNoSuchHost, IsTemporary: isTemporary}
- }
- defer C.freeaddrinfo(res)
-
- if res != nil {
- cname = C.GoString(res.ai_canonname)
- if cname == "" {
- cname = name
- }
- if len(cname) > 0 && cname[len(cname)-1] != '.' {
- cname += "."
- }
- }
- for r := res; r != nil; r = r.ai_next {
- // We only asked for SOCK_STREAM, but check anyhow.
- if r.ai_socktype != C.SOCK_STREAM {
- continue
- }
- switch r.ai_family {
- case C.AF_INET:
- sa := (*syscall.RawSockaddrInet4)(unsafe.Pointer(r.ai_addr))
- addr := IPAddr{IP: copyIP(sa.Addr[:])}
- addrs = append(addrs, addr)
- case C.AF_INET6:
- sa := (*syscall.RawSockaddrInet6)(unsafe.Pointer(r.ai_addr))
- addr := IPAddr{IP: copyIP(sa.Addr[:]), Zone: zoneCache.name(int(sa.Scope_id))}
- addrs = append(addrs, addr)
- }
- }
- return addrs, cname, nil
-}
-
-func cgoIPLookup(result chan<- ipLookupResult, network, name string) {
- addrs, cname, err := cgoLookupIPCNAME(network, name)
- result <- ipLookupResult{addrs, cname, err}
-}
-
-func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error, completed bool) {
- if ctx.Done() == nil {
- addrs, _, err = cgoLookupIPCNAME(network, name)
- return addrs, err, true
- }
- result := make(chan ipLookupResult, 1)
- go cgoIPLookup(result, network, name)
- select {
- case r := <-result:
- return r.addrs, r.err, true
- case <-ctx.Done():
- return nil, mapErr(ctx.Err()), false
- }
-}
-
-func cgoLookupCNAME(ctx context.Context, name string) (cname string, err error, completed bool) {
- if ctx.Done() == nil {
- _, cname, err = cgoLookupIPCNAME("ip", name)
- return cname, err, true
- }
- result := make(chan ipLookupResult, 1)
- go cgoIPLookup(result, "ip", name)
- select {
- case r := <-result:
- return r.cname, r.err, true
- case <-ctx.Done():
- return "", mapErr(ctx.Err()), false
- }
-}
-
-// These are roughly enough for the following:
-//
-// Source Encoding Maximum length of single name entry
-// Unicast DNS ASCII or <=253 + a NUL terminator
-// Unicode in RFC 5892 252 * total number of labels + delimiters + a NUL terminator
-// Multicast DNS UTF-8 in RFC 5198 or <=253 + a NUL terminator
-// the same as unicast DNS ASCII <=253 + a NUL terminator
-// Local database various depends on implementation
-const (
- nameinfoLen = 64
- maxNameinfoLen = 4096
-)
-
-func cgoLookupPTR(ctx context.Context, addr string) (names []string, err error, completed bool) {
- var zone string
- ip := parseIPv4(addr)
- if ip == nil {
- ip, zone = parseIPv6Zone(addr)
- }
- if ip == nil {
- return nil, &DNSError{Err: "invalid address", Name: addr}, true
- }
- sa, salen := cgoSockaddr(ip, zone)
- if sa == nil {
- return nil, &DNSError{Err: "invalid address " + ip.String(), Name: addr}, true
- }
- if ctx.Done() == nil {
- names, err := cgoLookupAddrPTR(addr, sa, salen)
- return names, err, true
- }
- result := make(chan reverseLookupResult, 1)
- go cgoReverseLookup(result, addr, sa, salen)
- select {
- case r := <-result:
- return r.names, r.err, true
- case <-ctx.Done():
- return nil, mapErr(ctx.Err()), false
- }
-}
-
-func cgoLookupAddrPTR(addr string, sa *C.struct_sockaddr, salen C.socklen_t) (names []string, err error) {
- acquireThread()
- defer releaseThread()
-
- var gerrno int
- var b []byte
- for l := nameinfoLen; l <= maxNameinfoLen; l *= 2 {
- b = make([]byte, l)
- gerrno, err = cgoNameinfoPTR(b, sa, salen)
- if gerrno == 0 || gerrno != C.EAI_OVERFLOW {
- break
- }
- }
- if gerrno != 0 {
- isTemporary := false
- switch gerrno {
- case C.EAI_SYSTEM:
- if err == nil { // see golang.org/issue/6232
- err = syscall.EMFILE
- }
- default:
- err = addrinfoErrno(gerrno)
- isTemporary = addrinfoErrno(gerrno).Temporary()
- }
- return nil, &DNSError{Err: err.Error(), Name: addr, IsTemporary: isTemporary}
- }
- for i := 0; i < len(b); i++ {
- if b[i] == 0 {
- b = b[:i]
- break
- }
- }
- return []string{absDomainName(string(b))}, nil
-}
-
-func cgoReverseLookup(result chan<- reverseLookupResult, addr string, sa *C.struct_sockaddr, salen C.socklen_t) {
- names, err := cgoLookupAddrPTR(addr, sa, salen)
- result <- reverseLookupResult{names, err}
-}
-
-func cgoSockaddr(ip IP, zone string) (*C.struct_sockaddr, C.socklen_t) {
- if ip4 := ip.To4(); ip4 != nil {
- return cgoSockaddrInet4(ip4), C.socklen_t(syscall.SizeofSockaddrInet4)
- }
- if ip6 := ip.To16(); ip6 != nil {
- return cgoSockaddrInet6(ip6, zoneCache.index(zone)), C.socklen_t(syscall.SizeofSockaddrInet6)
- }
- return nil, 0
-}
-
-func copyIP(x IP) IP {
- if len(x) < 16 {
- return x.To16()
- }
- y := make(IP, len(x))
- copy(y, x)
- return y
-}
diff --git a/contrib/go/_std_1.18/src/net/conf.go b/contrib/go/_std_1.18/src/net/conf.go
deleted file mode 100644
index 415caedacc..0000000000
--- a/contrib/go/_std_1.18/src/net/conf.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package net
-
-import (
- "internal/bytealg"
- "internal/godebug"
- "os"
- "runtime"
- "sync"
- "syscall"
-)
-
-// conf represents a system's network configuration.
-type conf struct {
- // forceCgoLookupHost forces CGO to always be used, if available.
- forceCgoLookupHost bool
-
- netGo bool // go DNS resolution forced
- netCgo bool // cgo DNS resolution forced
-
- // machine has an /etc/mdns.allow file
- hasMDNSAllow bool
-
- goos string // the runtime.GOOS, to ease testing
- dnsDebugLevel int
-
- nss *nssConf
- resolv *dnsConfig
-}
-
-var (
- confOnce sync.Once // guards init of confVal via initConfVal
- confVal = &conf{goos: runtime.GOOS}
-)
-
-// systemConf returns the machine's network configuration.
-func systemConf() *conf {
- confOnce.Do(initConfVal)
- return confVal
-}
-
-func initConfVal() {
- dnsMode, debugLevel := goDebugNetDNS()
- confVal.dnsDebugLevel = debugLevel
- confVal.netGo = netGo || dnsMode == "go"
- confVal.netCgo = netCgo || dnsMode == "cgo"
-
- if confVal.dnsDebugLevel > 0 {
- defer func() {
- switch {
- case confVal.netGo:
- if netGo {
- println("go package net: built with netgo build tag; using Go's DNS resolver")
- } else {
- println("go package net: GODEBUG setting forcing use of Go's resolver")
- }
- case confVal.forceCgoLookupHost:
- println("go package net: using cgo DNS resolver")
- default:
- println("go package net: dynamic selection of DNS resolver")
- }
- }()
- }
-
- // Darwin pops up annoying dialog boxes if programs try to do
- // their own DNS requests. So always use cgo instead, which
- // avoids that.
- if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
- confVal.forceCgoLookupHost = true
- return
- }
-
- // If any environment-specified resolver options are specified,
- // force cgo. Note that LOCALDOMAIN can change behavior merely
- // by being specified with the empty string.
- _, localDomainDefined := syscall.Getenv("LOCALDOMAIN")
- if os.Getenv("RES_OPTIONS") != "" ||
- os.Getenv("HOSTALIASES") != "" ||
- confVal.netCgo ||
- localDomainDefined {
- confVal.forceCgoLookupHost = true
- return
- }
-
- // OpenBSD apparently lets you override the location of resolv.conf
- // with ASR_CONFIG. If we notice that, defer to libc.
- if runtime.GOOS == "openbsd" && os.Getenv("ASR_CONFIG") != "" {
- confVal.forceCgoLookupHost = true
- return
- }
-
- if runtime.GOOS != "openbsd" {
- confVal.nss = parseNSSConfFile("/etc/nsswitch.conf")
- }
-
- confVal.resolv = dnsReadConfig("/etc/resolv.conf")
- if confVal.resolv.err != nil && !os.IsNotExist(confVal.resolv.err) &&
- !os.IsPermission(confVal.resolv.err) {
- // If we can't read the resolv.conf file, assume it
- // had something important in it and defer to cgo.
- // libc's resolver might then fail too, but at least
- // it wasn't our fault.
- confVal.forceCgoLookupHost = true
- }
-
- if _, err := os.Stat("/etc/mdns.allow"); err == nil {
- confVal.hasMDNSAllow = true
- }
-}
-
-// canUseCgo reports whether calling cgo functions is allowed
-// for non-hostname lookups.
-func (c *conf) canUseCgo() bool {
- return c.hostLookupOrder(nil, "") == hostLookupCgo
-}
-
-// hostLookupOrder determines which strategy to use to resolve hostname.
-// The provided Resolver is optional. nil means to not consider its options.
-func (c *conf) hostLookupOrder(r *Resolver, hostname string) (ret hostLookupOrder) {
- if c.dnsDebugLevel > 1 {
- defer func() {
- print("go package net: hostLookupOrder(", hostname, ") = ", ret.String(), "\n")
- }()
- }
- fallbackOrder := hostLookupCgo
- if c.netGo || r.preferGo() {
- fallbackOrder = hostLookupFilesDNS
- }
- if c.forceCgoLookupHost || c.resolv.unknownOpt || c.goos == "android" {
- return fallbackOrder
- }
- if bytealg.IndexByteString(hostname, '\\') != -1 || bytealg.IndexByteString(hostname, '%') != -1 {
- // Don't deal with special form hostnames with backslashes
- // or '%'.
- return fallbackOrder
- }
-
- // OpenBSD is unique and doesn't use nsswitch.conf.
- // It also doesn't support mDNS.
- if c.goos == "openbsd" {
- // OpenBSD's resolv.conf manpage says that a non-existent
- // resolv.conf means "lookup" defaults to only "files",
- // without DNS lookups.
- if os.IsNotExist(c.resolv.err) {
- return hostLookupFiles
- }
- lookup := c.resolv.lookup
- if len(lookup) == 0 {
- // https://www.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man5/resolv.conf.5
- // "If the lookup keyword is not used in the
- // system's resolv.conf file then the assumed
- // order is 'bind file'"
- return hostLookupDNSFiles
- }
- if len(lookup) < 1 || len(lookup) > 2 {
- return fallbackOrder
- }
- switch lookup[0] {
- case "bind":
- if len(lookup) == 2 {
- if lookup[1] == "file" {
- return hostLookupDNSFiles
- }
- return fallbackOrder
- }
- return hostLookupDNS
- case "file":
- if len(lookup) == 2 {
- if lookup[1] == "bind" {
- return hostLookupFilesDNS
- }
- return fallbackOrder
- }
- return hostLookupFiles
- default:
- return fallbackOrder
- }
- }
-
- // Canonicalize the hostname by removing any trailing dot.
- if stringsHasSuffix(hostname, ".") {
- hostname = hostname[:len(hostname)-1]
- }
- if stringsHasSuffixFold(hostname, ".local") {
- // Per RFC 6762, the ".local" TLD is special. And
- // because Go's native resolver doesn't do mDNS or
- // similar local resolution mechanisms, assume that
- // libc might (via Avahi, etc) and use cgo.
- return fallbackOrder
- }
-
- nss := c.nss
- srcs := nss.sources["hosts"]
- // If /etc/nsswitch.conf doesn't exist or doesn't specify any
- // sources for "hosts", assume Go's DNS will work fine.
- if os.IsNotExist(nss.err) || (nss.err == nil && len(srcs) == 0) {
- if c.goos == "solaris" {
- // illumos defaults to "nis [NOTFOUND=return] files"
- return fallbackOrder
- }
- return hostLookupFilesDNS
- }
- if nss.err != nil {
- // We failed to parse or open nsswitch.conf, so
- // conservatively assume we should use cgo if it's
- // available.
- return fallbackOrder
- }
-
- var mdnsSource, filesSource, dnsSource bool
- var first string
- for _, src := range srcs {
- if src.source == "myhostname" {
- if isLocalhost(hostname) || isGateway(hostname) {
- return fallbackOrder
- }
- hn, err := getHostname()
- if err != nil || stringsEqualFold(hostname, hn) {
- return fallbackOrder
- }
- continue
- }
- if src.source == "files" || src.source == "dns" {
- if !src.standardCriteria() {
- return fallbackOrder // non-standard; let libc deal with it.
- }
- if src.source == "files" {
- filesSource = true
- } else if src.source == "dns" {
- dnsSource = true
- }
- if first == "" {
- first = src.source
- }
- continue
- }
- if stringsHasPrefix(src.source, "mdns") {
- // e.g. "mdns4", "mdns4_minimal"
- // We already returned true before if it was *.local.
- // libc wouldn't have found a hit on this anyway.
- mdnsSource = true
- continue
- }
- // Some source we don't know how to deal with.
- return fallbackOrder
- }
-
- // We don't parse mdns.allow files. They're rare. If one
- // exists, it might list other TLDs (besides .local) or even
- // '*', so just let libc deal with it.
- if mdnsSource && c.hasMDNSAllow {
- return fallbackOrder
- }
-
- // Cases where Go can handle it without cgo and C thread
- // overhead.
- switch {
- case filesSource && dnsSource:
- if first == "files" {
- return hostLookupFilesDNS
- } else {
- return hostLookupDNSFiles
- }
- case filesSource:
- return hostLookupFiles
- case dnsSource:
- return hostLookupDNS
- }
-
- // Something weird. Let libc deal with it.
- return fallbackOrder
-}
-
-// goDebugNetDNS parses the value of the GODEBUG "netdns" value.
-// The netdns value can be of the form:
-// 1 // debug level 1
-// 2 // debug level 2
-// cgo // use cgo for DNS lookups
-// go // use go for DNS lookups
-// cgo+1 // use cgo for DNS lookups + debug level 1
-// 1+cgo // same
-// cgo+2 // same, but debug level 2
-// etc.
-func goDebugNetDNS() (dnsMode string, debugLevel int) {
- goDebug := godebug.Get("netdns")
- parsePart := func(s string) {
- if s == "" {
- return
- }
- if '0' <= s[0] && s[0] <= '9' {
- debugLevel, _, _ = dtoi(s)
- } else {
- dnsMode = s
- }
- }
- if i := bytealg.IndexByteString(goDebug, '+'); i != -1 {
- parsePart(goDebug[:i])
- parsePart(goDebug[i+1:])
- return
- }
- parsePart(goDebug)
- return
-}
-
-// isLocalhost reports whether h should be considered a "localhost"
-// name for the myhostname NSS module.
-func isLocalhost(h string) bool {
- return stringsEqualFold(h, "localhost") || stringsEqualFold(h, "localhost.localdomain") || stringsHasSuffixFold(h, ".localhost") || stringsHasSuffixFold(h, ".localhost.localdomain")
-}
-
-// isGateway reports whether h should be considered a "gateway"
-// name for the myhostname NSS module.
-func isGateway(h string) bool {
- return stringsEqualFold(h, "gateway")
-}
diff --git a/contrib/go/_std_1.18/src/net/dial.go b/contrib/go/_std_1.18/src/net/dial.go
deleted file mode 100644
index 486ced0f2a..0000000000
--- a/contrib/go/_std_1.18/src/net/dial.go
+++ /dev/null
@@ -1,743 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "context"
- "internal/nettrace"
- "syscall"
- "time"
-)
-
-// defaultTCPKeepAlive is a default constant value for TCPKeepAlive times
-// See golang.org/issue/31510
-const (
- defaultTCPKeepAlive = 15 * time.Second
-)
-
-// A Dialer contains options for connecting to an address.
-//
-// The zero value for each field is equivalent to dialing
-// without that option. Dialing with the zero value of Dialer
-// is therefore equivalent to just calling the Dial function.
-//
-// It is safe to call Dialer's methods concurrently.
-type Dialer struct {
- // Timeout is the maximum amount of time a dial will wait for
- // a connect to complete. If Deadline is also set, it may fail
- // earlier.
- //
- // The default is no timeout.
- //
- // When using TCP and dialing a host name with multiple IP
- // addresses, the timeout may be divided between them.
- //
- // With or without a timeout, the operating system may impose
- // its own earlier timeout. For instance, TCP timeouts are
- // often around 3 minutes.
- Timeout time.Duration
-
- // Deadline is the absolute point in time after which dials
- // will fail. If Timeout is set, it may fail earlier.
- // Zero means no deadline, or dependent on the operating system
- // as with the Timeout option.
- Deadline time.Time
-
- // LocalAddr is the local address to use when dialing an
- // address. The address must be of a compatible type for the
- // network being dialed.
- // If nil, a local address is automatically chosen.
- LocalAddr Addr
-
- // DualStack previously enabled RFC 6555 Fast Fallback
- // support, also known as "Happy Eyeballs", in which IPv4 is
- // tried soon if IPv6 appears to be misconfigured and
- // hanging.
- //
- // Deprecated: Fast Fallback is enabled by default. To
- // disable, set FallbackDelay to a negative value.
- DualStack bool
-
- // FallbackDelay specifies the length of time to wait before
- // spawning a RFC 6555 Fast Fallback connection. That is, this
- // is the amount of time to wait for IPv6 to succeed before
- // assuming that IPv6 is misconfigured and falling back to
- // IPv4.
- //
- // If zero, a default delay of 300ms is used.
- // A negative value disables Fast Fallback support.
- FallbackDelay time.Duration
-
- // KeepAlive specifies the interval between keep-alive
- // probes for an active network connection.
- // If zero, keep-alive probes are sent with a default value
- // (currently 15 seconds), if supported by the protocol and operating
- // system. Network protocols or operating systems that do
- // not support keep-alives ignore this field.
- // If negative, keep-alive probes are disabled.
- KeepAlive time.Duration
-
- // Resolver optionally specifies an alternate resolver to use.
- Resolver *Resolver
-
- // Cancel is an optional channel whose closure indicates that
- // the dial should be canceled. Not all types of dials support
- // cancellation.
- //
- // Deprecated: Use DialContext instead.
- Cancel <-chan struct{}
-
- // If Control is not nil, it is called after creating the network
- // connection but before actually dialing.
- //
- // Network and address parameters passed to Control method are not
- // necessarily the ones passed to Dial. For example, passing "tcp" to Dial
- // will cause the Control function to be called with "tcp4" or "tcp6".
- Control func(network, address string, c syscall.RawConn) error
-}
-
-func (d *Dialer) dualStack() bool { return d.FallbackDelay >= 0 }
-
-func minNonzeroTime(a, b time.Time) time.Time {
- if a.IsZero() {
- return b
- }
- if b.IsZero() || a.Before(b) {
- return a
- }
- return b
-}
-
-// deadline returns the earliest of:
-// - now+Timeout
-// - d.Deadline
-// - the context's deadline
-// Or zero, if none of Timeout, Deadline, or context's deadline is set.
-func (d *Dialer) deadline(ctx context.Context, now time.Time) (earliest time.Time) {
- if d.Timeout != 0 { // including negative, for historical reasons
- earliest = now.Add(d.Timeout)
- }
- if d, ok := ctx.Deadline(); ok {
- earliest = minNonzeroTime(earliest, d)
- }
- return minNonzeroTime(earliest, d.Deadline)
-}
-
-func (d *Dialer) resolver() *Resolver {
- if d.Resolver != nil {
- return d.Resolver
- }
- return DefaultResolver
-}
-
-// partialDeadline returns the deadline to use for a single address,
-// when multiple addresses are pending.
-func partialDeadline(now, deadline time.Time, addrsRemaining int) (time.Time, error) {
- if deadline.IsZero() {
- return deadline, nil
- }
- timeRemaining := deadline.Sub(now)
- if timeRemaining <= 0 {
- return time.Time{}, errTimeout
- }
- // Tentatively allocate equal time to each remaining address.
- timeout := timeRemaining / time.Duration(addrsRemaining)
- // If the time per address is too short, steal from the end of the list.
- const saneMinimum = 2 * time.Second
- if timeout < saneMinimum {
- if timeRemaining < saneMinimum {
- timeout = timeRemaining
- } else {
- timeout = saneMinimum
- }
- }
- return now.Add(timeout), nil
-}
-
-func (d *Dialer) fallbackDelay() time.Duration {
- if d.FallbackDelay > 0 {
- return d.FallbackDelay
- } else {
- return 300 * time.Millisecond
- }
-}
-
-func parseNetwork(ctx context.Context, network string, needsProto bool) (afnet string, proto int, err error) {
- i := last(network, ':')
- if i < 0 { // no colon
- switch network {
- case "tcp", "tcp4", "tcp6":
- case "udp", "udp4", "udp6":
- case "ip", "ip4", "ip6":
- if needsProto {
- return "", 0, UnknownNetworkError(network)
- }
- case "unix", "unixgram", "unixpacket":
- default:
- return "", 0, UnknownNetworkError(network)
- }
- return network, 0, nil
- }
- afnet = network[:i]
- switch afnet {
- case "ip", "ip4", "ip6":
- protostr := network[i+1:]
- proto, i, ok := dtoi(protostr)
- if !ok || i != len(protostr) {
- proto, err = lookupProtocol(ctx, protostr)
- if err != nil {
- return "", 0, err
- }
- }
- return afnet, proto, nil
- }
- return "", 0, UnknownNetworkError(network)
-}
-
-// resolveAddrList resolves addr using hint and returns a list of
-// addresses. The result contains at least one address when error is
-// nil.
-func (r *Resolver) resolveAddrList(ctx context.Context, op, network, addr string, hint Addr) (addrList, error) {
- afnet, _, err := parseNetwork(ctx, network, true)
- if err != nil {
- return nil, err
- }
- if op == "dial" && addr == "" {
- return nil, errMissingAddress
- }
- switch afnet {
- case "unix", "unixgram", "unixpacket":
- addr, err := ResolveUnixAddr(afnet, addr)
- if err != nil {
- return nil, err
- }
- if op == "dial" && hint != nil && addr.Network() != hint.Network() {
- return nil, &AddrError{Err: "mismatched local address type", Addr: hint.String()}
- }
- return addrList{addr}, nil
- }
- addrs, err := r.internetAddrList(ctx, afnet, addr)
- if err != nil || op != "dial" || hint == nil {
- return addrs, err
- }
- var (
- tcp *TCPAddr
- udp *UDPAddr
- ip *IPAddr
- wildcard bool
- )
- switch hint := hint.(type) {
- case *TCPAddr:
- tcp = hint
- wildcard = tcp.isWildcard()
- case *UDPAddr:
- udp = hint
- wildcard = udp.isWildcard()
- case *IPAddr:
- ip = hint
- wildcard = ip.isWildcard()
- }
- naddrs := addrs[:0]
- for _, addr := range addrs {
- if addr.Network() != hint.Network() {
- return nil, &AddrError{Err: "mismatched local address type", Addr: hint.String()}
- }
- switch addr := addr.(type) {
- case *TCPAddr:
- if !wildcard && !addr.isWildcard() && !addr.IP.matchAddrFamily(tcp.IP) {
- continue
- }
- naddrs = append(naddrs, addr)
- case *UDPAddr:
- if !wildcard && !addr.isWildcard() && !addr.IP.matchAddrFamily(udp.IP) {
- continue
- }
- naddrs = append(naddrs, addr)
- case *IPAddr:
- if !wildcard && !addr.isWildcard() && !addr.IP.matchAddrFamily(ip.IP) {
- continue
- }
- naddrs = append(naddrs, addr)
- }
- }
- if len(naddrs) == 0 {
- return nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: hint.String()}
- }
- return naddrs, nil
-}
-
-// Dial connects to the address on the named network.
-//
-// Known networks are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only),
-// "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4"
-// (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and
-// "unixpacket".
-//
-// For TCP and UDP networks, the address has the form "host:port".
-// The host must be a literal IP address, or a host name that can be
-// resolved to IP addresses.
-// The port must be a literal port number or a service name.
-// If the host is a literal IPv6 address it must be enclosed in square
-// brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80".
-// The zone specifies the scope of the literal IPv6 address as defined
-// in RFC 4007.
-// The functions JoinHostPort and SplitHostPort manipulate a pair of
-// host and port in this form.
-// When using TCP, and the host resolves to multiple IP addresses,
-// Dial will try each IP address in order until one succeeds.
-//
-// Examples:
-// Dial("tcp", "golang.org:http")
-// Dial("tcp", "192.0.2.1:http")
-// Dial("tcp", "198.51.100.1:80")
-// Dial("udp", "[2001:db8::1]:domain")
-// Dial("udp", "[fe80::1%lo0]:53")
-// Dial("tcp", ":80")
-//
-// For IP networks, the network must be "ip", "ip4" or "ip6" followed
-// by a colon and a literal protocol number or a protocol name, and
-// the address has the form "host". The host must be a literal IP
-// address or a literal IPv6 address with zone.
-// It depends on each operating system how the operating system
-// behaves with a non-well known protocol number such as "0" or "255".
-//
-// Examples:
-// Dial("ip4:1", "192.0.2.1")
-// Dial("ip6:ipv6-icmp", "2001:db8::1")
-// Dial("ip6:58", "fe80::1%lo0")
-//
-// For TCP, UDP and IP networks, if the host is empty or a literal
-// unspecified IP address, as in ":80", "0.0.0.0:80" or "[::]:80" for
-// TCP and UDP, "", "0.0.0.0" or "::" for IP, the local system is
-// assumed.
-//
-// For Unix networks, the address must be a file system path.
-func Dial(network, address string) (Conn, error) {
- var d Dialer
- return d.Dial(network, address)
-}
-
-// DialTimeout acts like Dial but takes a timeout.
-//
-// The timeout includes name resolution, if required.
-// When using TCP, and the host in the address parameter resolves to
-// multiple IP addresses, the timeout is spread over each consecutive
-// dial, such that each is given an appropriate fraction of the time
-// to connect.
-//
-// See func Dial for a description of the network and address
-// parameters.
-func DialTimeout(network, address string, timeout time.Duration) (Conn, error) {
- d := Dialer{Timeout: timeout}
- return d.Dial(network, address)
-}
-
-// sysDialer contains a Dial's parameters and configuration.
-type sysDialer struct {
- Dialer
- network, address string
-}
-
-// Dial connects to the address on the named network.
-//
-// See func Dial for a description of the network and address
-// parameters.
-//
-// Dial uses context.Background internally; to specify the context, use
-// DialContext.
-func (d *Dialer) Dial(network, address string) (Conn, error) {
- return d.DialContext(context.Background(), network, address)
-}
-
-// DialContext connects to the address on the named network using
-// the provided context.
-//
-// The provided Context must be non-nil. If the context expires before
-// the connection is complete, an error is returned. Once successfully
-// connected, any expiration of the context will not affect the
-// connection.
-//
-// When using TCP, and the host in the address parameter resolves to multiple
-// network addresses, any dial timeout (from d.Timeout or ctx) is spread
-// over each consecutive dial, such that each is given an appropriate
-// fraction of the time to connect.
-// For example, if a host has 4 IP addresses and the timeout is 1 minute,
-// the connect to each single address will be given 15 seconds to complete
-// before trying the next one.
-//
-// See func Dial for a description of the network and address
-// parameters.
-func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) {
- if ctx == nil {
- panic("nil context")
- }
- deadline := d.deadline(ctx, time.Now())
- if !deadline.IsZero() {
- if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
- subCtx, cancel := context.WithDeadline(ctx, deadline)
- defer cancel()
- ctx = subCtx
- }
- }
- if oldCancel := d.Cancel; oldCancel != nil {
- subCtx, cancel := context.WithCancel(ctx)
- defer cancel()
- go func() {
- select {
- case <-oldCancel:
- cancel()
- case <-subCtx.Done():
- }
- }()
- ctx = subCtx
- }
-
- // Shadow the nettrace (if any) during resolve so Connect events don't fire for DNS lookups.
- resolveCtx := ctx
- if trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace); trace != nil {
- shadow := *trace
- shadow.ConnectStart = nil
- shadow.ConnectDone = nil
- resolveCtx = context.WithValue(resolveCtx, nettrace.TraceKey{}, &shadow)
- }
-
- addrs, err := d.resolver().resolveAddrList(resolveCtx, "dial", network, address, d.LocalAddr)
- if err != nil {
- return nil, &OpError{Op: "dial", Net: network, Source: nil, Addr: nil, Err: err}
- }
-
- sd := &sysDialer{
- Dialer: *d,
- network: network,
- address: address,
- }
-
- var primaries, fallbacks addrList
- if d.dualStack() && network == "tcp" {
- primaries, fallbacks = addrs.partition(isIPv4)
- } else {
- primaries = addrs
- }
-
- var c Conn
- if len(fallbacks) > 0 {
- c, err = sd.dialParallel(ctx, primaries, fallbacks)
- } else {
- c, err = sd.dialSerial(ctx, primaries)
- }
- if err != nil {
- return nil, err
- }
-
- if tc, ok := c.(*TCPConn); ok && d.KeepAlive >= 0 {
- setKeepAlive(tc.fd, true)
- ka := d.KeepAlive
- if d.KeepAlive == 0 {
- ka = defaultTCPKeepAlive
- }
- setKeepAlivePeriod(tc.fd, ka)
- testHookSetKeepAlive(ka)
- }
- return c, nil
-}
-
-// dialParallel races two copies of dialSerial, giving the first a
-// head start. It returns the first established connection and
-// closes the others. Otherwise it returns an error from the first
-// primary address.
-func (sd *sysDialer) dialParallel(ctx context.Context, primaries, fallbacks addrList) (Conn, error) {
- if len(fallbacks) == 0 {
- return sd.dialSerial(ctx, primaries)
- }
-
- returned := make(chan struct{})
- defer close(returned)
-
- type dialResult struct {
- Conn
- error
- primary bool
- done bool
- }
- results := make(chan dialResult) // unbuffered
-
- startRacer := func(ctx context.Context, primary bool) {
- ras := primaries
- if !primary {
- ras = fallbacks
- }
- c, err := sd.dialSerial(ctx, ras)
- select {
- case results <- dialResult{Conn: c, error: err, primary: primary, done: true}:
- case <-returned:
- if c != nil {
- c.Close()
- }
- }
- }
-
- var primary, fallback dialResult
-
- // Start the main racer.
- primaryCtx, primaryCancel := context.WithCancel(ctx)
- defer primaryCancel()
- go startRacer(primaryCtx, true)
-
- // Start the timer for the fallback racer.
- fallbackTimer := time.NewTimer(sd.fallbackDelay())
- defer fallbackTimer.Stop()
-
- for {
- select {
- case <-fallbackTimer.C:
- fallbackCtx, fallbackCancel := context.WithCancel(ctx)
- defer fallbackCancel()
- go startRacer(fallbackCtx, false)
-
- case res := <-results:
- if res.error == nil {
- return res.Conn, nil
- }
- if res.primary {
- primary = res
- } else {
- fallback = res
- }
- if primary.done && fallback.done {
- return nil, primary.error
- }
- if res.primary && fallbackTimer.Stop() {
- // If we were able to stop the timer, that means it
- // was running (hadn't yet started the fallback), but
- // we just got an error on the primary path, so start
- // the fallback immediately (in 0 nanoseconds).
- fallbackTimer.Reset(0)
- }
- }
- }
-}
-
-// dialSerial connects to a list of addresses in sequence, returning
-// either the first successful connection, or the first error.
-func (sd *sysDialer) dialSerial(ctx context.Context, ras addrList) (Conn, error) {
- var firstErr error // The error from the first address is most relevant.
-
- for i, ra := range ras {
- select {
- case <-ctx.Done():
- return nil, &OpError{Op: "dial", Net: sd.network, Source: sd.LocalAddr, Addr: ra, Err: mapErr(ctx.Err())}
- default:
- }
-
- dialCtx := ctx
- if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
- partialDeadline, err := partialDeadline(time.Now(), deadline, len(ras)-i)
- if err != nil {
- // Ran out of time.
- if firstErr == nil {
- firstErr = &OpError{Op: "dial", Net: sd.network, Source: sd.LocalAddr, Addr: ra, Err: err}
- }
- break
- }
- if partialDeadline.Before(deadline) {
- var cancel context.CancelFunc
- dialCtx, cancel = context.WithDeadline(ctx, partialDeadline)
- defer cancel()
- }
- }
-
- c, err := sd.dialSingle(dialCtx, ra)
- if err == nil {
- return c, nil
- }
- if firstErr == nil {
- firstErr = err
- }
- }
-
- if firstErr == nil {
- firstErr = &OpError{Op: "dial", Net: sd.network, Source: nil, Addr: nil, Err: errMissingAddress}
- }
- return nil, firstErr
-}
-
-// dialSingle attempts to establish and returns a single connection to
-// the destination address.
-func (sd *sysDialer) dialSingle(ctx context.Context, ra Addr) (c Conn, err error) {
- trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace)
- if trace != nil {
- raStr := ra.String()
- if trace.ConnectStart != nil {
- trace.ConnectStart(sd.network, raStr)
- }
- if trace.ConnectDone != nil {
- defer func() { trace.ConnectDone(sd.network, raStr, err) }()
- }
- }
- la := sd.LocalAddr
- switch ra := ra.(type) {
- case *TCPAddr:
- la, _ := la.(*TCPAddr)
- c, err = sd.dialTCP(ctx, la, ra)
- case *UDPAddr:
- la, _ := la.(*UDPAddr)
- c, err = sd.dialUDP(ctx, la, ra)
- case *IPAddr:
- la, _ := la.(*IPAddr)
- c, err = sd.dialIP(ctx, la, ra)
- case *UnixAddr:
- la, _ := la.(*UnixAddr)
- c, err = sd.dialUnix(ctx, la, ra)
- default:
- return nil, &OpError{Op: "dial", Net: sd.network, Source: la, Addr: ra, Err: &AddrError{Err: "unexpected address type", Addr: sd.address}}
- }
- if err != nil {
- return nil, &OpError{Op: "dial", Net: sd.network, Source: la, Addr: ra, Err: err} // c is non-nil interface containing nil pointer
- }
- return c, nil
-}
-
-// ListenConfig contains options for listening to an address.
-type ListenConfig struct {
- // If Control is not nil, it is called after creating the network
- // connection but before binding it to the operating system.
- //
- // Network and address parameters passed to Control method are not
- // necessarily the ones passed to Listen. For example, passing "tcp" to
- // Listen will cause the Control function to be called with "tcp4" or "tcp6".
- Control func(network, address string, c syscall.RawConn) error
-
- // KeepAlive specifies the keep-alive period for network
- // connections accepted by this listener.
- // If zero, keep-alives are enabled if supported by the protocol
- // and operating system. Network protocols or operating systems
- // that do not support keep-alives ignore this field.
- // If negative, keep-alives are disabled.
- KeepAlive time.Duration
-}
-
-// Listen announces on the local network address.
-//
-// See func Listen for a description of the network and address
-// parameters.
-func (lc *ListenConfig) Listen(ctx context.Context, network, address string) (Listener, error) {
- addrs, err := DefaultResolver.resolveAddrList(ctx, "listen", network, address, nil)
- if err != nil {
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: err}
- }
- sl := &sysListener{
- ListenConfig: *lc,
- network: network,
- address: address,
- }
- var l Listener
- la := addrs.first(isIPv4)
- switch la := la.(type) {
- case *TCPAddr:
- l, err = sl.listenTCP(ctx, la)
- case *UnixAddr:
- l, err = sl.listenUnix(ctx, la)
- default:
- return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: address}}
- }
- if err != nil {
- return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: err} // l is non-nil interface containing nil pointer
- }
- return l, nil
-}
-
-// ListenPacket announces on the local network address.
-//
-// See func ListenPacket for a description of the network and address
-// parameters.
-func (lc *ListenConfig) ListenPacket(ctx context.Context, network, address string) (PacketConn, error) {
- addrs, err := DefaultResolver.resolveAddrList(ctx, "listen", network, address, nil)
- if err != nil {
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: err}
- }
- sl := &sysListener{
- ListenConfig: *lc,
- network: network,
- address: address,
- }
- var c PacketConn
- la := addrs.first(isIPv4)
- switch la := la.(type) {
- case *UDPAddr:
- c, err = sl.listenUDP(ctx, la)
- case *IPAddr:
- c, err = sl.listenIP(ctx, la)
- case *UnixAddr:
- c, err = sl.listenUnixgram(ctx, la)
- default:
- return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: address}}
- }
- if err != nil {
- return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: err} // c is non-nil interface containing nil pointer
- }
- return c, nil
-}
-
-// sysListener contains a Listen's parameters and configuration.
-type sysListener struct {
- ListenConfig
- network, address string
-}
-
-// Listen announces on the local network address.
-//
-// The network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket".
-//
-// For TCP networks, if the host in the address parameter is empty or
-// a literal unspecified IP address, Listen listens on all available
-// unicast and anycast IP addresses of the local system.
-// To only use IPv4, use network "tcp4".
-// The address can use a host name, but this is not recommended,
-// because it will create a listener for at most one of the host's IP
-// addresses.
-// If the port in the address parameter is empty or "0", as in
-// "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
-// The Addr method of Listener can be used to discover the chosen
-// port.
-//
-// See func Dial for a description of the network and address
-// parameters.
-//
-// Listen uses context.Background internally; to specify the context, use
-// ListenConfig.Listen.
-func Listen(network, address string) (Listener, error) {
- var lc ListenConfig
- return lc.Listen(context.Background(), network, address)
-}
-
-// ListenPacket announces on the local network address.
-//
-// The network must be "udp", "udp4", "udp6", "unixgram", or an IP
-// transport. The IP transports are "ip", "ip4", or "ip6" followed by
-// a colon and a literal protocol number or a protocol name, as in
-// "ip:1" or "ip:icmp".
-//
-// For UDP and IP networks, if the host in the address parameter is
-// empty or a literal unspecified IP address, ListenPacket listens on
-// all available IP addresses of the local system except multicast IP
-// addresses.
-// To only use IPv4, use network "udp4" or "ip4:proto".
-// The address can use a host name, but this is not recommended,
-// because it will create a listener for at most one of the host's IP
-// addresses.
-// If the port in the address parameter is empty or "0", as in
-// "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
-// The LocalAddr method of PacketConn can be used to discover the
-// chosen port.
-//
-// See func Dial for a description of the network and address
-// parameters.
-//
-// ListenPacket uses context.Background internally; to specify the context, use
-// ListenConfig.ListenPacket.
-func ListenPacket(network, address string) (PacketConn, error) {
- var lc ListenConfig
- return lc.ListenPacket(context.Background(), network, address)
-}
diff --git a/contrib/go/_std_1.18/src/net/dnsclient.go b/contrib/go/_std_1.18/src/net/dnsclient.go
deleted file mode 100644
index a779c37e53..0000000000
--- a/contrib/go/_std_1.18/src/net/dnsclient.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "internal/bytealg"
- "internal/itoa"
- "sort"
-
- "golang.org/x/net/dns/dnsmessage"
-)
-
-// provided by runtime
-func fastrand() uint32
-
-func randInt() int {
- x, y := fastrand(), fastrand() // 32-bit halves
- u := uint(x)<<31 ^ uint(int32(y)) // full uint, even on 64-bit systems; avoid 32-bit shift on 32-bit systems
- i := int(u >> 1) // clear sign bit, even on 32-bit systems
- return i
-}
-
-func randIntn(n int) int {
- return randInt() % n
-}
-
-// reverseaddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
-// address addr suitable for rDNS (PTR) record lookup or an error if it fails
-// to parse the IP address.
-func reverseaddr(addr string) (arpa string, err error) {
- ip := ParseIP(addr)
- if ip == nil {
- return "", &DNSError{Err: "unrecognized address", Name: addr}
- }
- if ip.To4() != nil {
- return itoa.Uitoa(uint(ip[15])) + "." + itoa.Uitoa(uint(ip[14])) + "." + itoa.Uitoa(uint(ip[13])) + "." + itoa.Uitoa(uint(ip[12])) + ".in-addr.arpa.", nil
- }
- // Must be IPv6
- buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
- // Add it, in reverse, to the buffer
- for i := len(ip) - 1; i >= 0; i-- {
- v := ip[i]
- buf = append(buf, hexDigit[v&0xF],
- '.',
- hexDigit[v>>4],
- '.')
- }
- // Append "ip6.arpa." and return (buf already has the final .)
- buf = append(buf, "ip6.arpa."...)
- return string(buf), nil
-}
-
-func equalASCIIName(x, y dnsmessage.Name) bool {
- if x.Length != y.Length {
- return false
- }
- for i := 0; i < int(x.Length); i++ {
- a := x.Data[i]
- b := y.Data[i]
- if 'A' <= a && a <= 'Z' {
- a += 0x20
- }
- if 'A' <= b && b <= 'Z' {
- b += 0x20
- }
- if a != b {
- return false
- }
- }
- return true
-}
-
-// isDomainName checks if a string is a presentation-format domain name
-// (currently restricted to hostname-compatible "preferred name" LDH labels and
-// SRV-like "underscore labels"; see golang.org/issue/12421).
-func isDomainName(s string) bool {
- // The root domain name is valid. See golang.org/issue/45715.
- if s == "." {
- return true
- }
-
- // See RFC 1035, RFC 3696.
- // Presentation format has dots before every label except the first, and the
- // terminal empty label is optional here because we assume fully-qualified
- // (absolute) input. We must therefore reserve space for the first and last
- // labels' length octets in wire format, where they are necessary and the
- // maximum total length is 255.
- // So our _effective_ maximum is 253, but 254 is not rejected if the last
- // character is a dot.
- l := len(s)
- if l == 0 || l > 254 || l == 254 && s[l-1] != '.' {
- return false
- }
-
- last := byte('.')
- nonNumeric := false // true once we've seen a letter or hyphen
- partlen := 0
- for i := 0; i < len(s); i++ {
- c := s[i]
- switch {
- default:
- return false
- case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_':
- nonNumeric = true
- partlen++
- case '0' <= c && c <= '9':
- // fine
- partlen++
- case c == '-':
- // Byte before dash cannot be dot.
- if last == '.' {
- return false
- }
- partlen++
- nonNumeric = true
- case c == '.':
- // Byte before dot cannot be dot, dash.
- if last == '.' || last == '-' {
- return false
- }
- if partlen > 63 || partlen == 0 {
- return false
- }
- partlen = 0
- }
- last = c
- }
- if last == '-' || partlen > 63 {
- return false
- }
-
- return nonNumeric
-}
-
-// absDomainName returns an absolute domain name which ends with a
-// trailing dot to match pure Go reverse resolver and all other lookup
-// routines.
-// See golang.org/issue/12189.
-// But we don't want to add dots for local names from /etc/hosts.
-// It's hard to tell so we settle on the heuristic that names without dots
-// (like "localhost" or "myhost") do not get trailing dots, but any other
-// names do.
-func absDomainName(s string) string {
- if bytealg.IndexByteString(s, '.') != -1 && s[len(s)-1] != '.' {
- s += "."
- }
- return s
-}
-
-// An SRV represents a single DNS SRV record.
-type SRV struct {
- Target string
- Port uint16
- Priority uint16
- Weight uint16
-}
-
-// byPriorityWeight sorts SRV records by ascending priority and weight.
-type byPriorityWeight []*SRV
-
-func (s byPriorityWeight) Len() int { return len(s) }
-func (s byPriorityWeight) Less(i, j int) bool {
- return s[i].Priority < s[j].Priority || (s[i].Priority == s[j].Priority && s[i].Weight < s[j].Weight)
-}
-func (s byPriorityWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// shuffleByWeight shuffles SRV records by weight using the algorithm
-// described in RFC 2782.
-func (addrs byPriorityWeight) shuffleByWeight() {
- sum := 0
- for _, addr := range addrs {
- sum += int(addr.Weight)
- }
- for sum > 0 && len(addrs) > 1 {
- s := 0
- n := randIntn(sum)
- for i := range addrs {
- s += int(addrs[i].Weight)
- if s > n {
- if i > 0 {
- addrs[0], addrs[i] = addrs[i], addrs[0]
- }
- break
- }
- }
- sum -= int(addrs[0].Weight)
- addrs = addrs[1:]
- }
-}
-
-// sort reorders SRV records as specified in RFC 2782.
-func (addrs byPriorityWeight) sort() {
- sort.Sort(addrs)
- i := 0
- for j := 1; j < len(addrs); j++ {
- if addrs[i].Priority != addrs[j].Priority {
- addrs[i:j].shuffleByWeight()
- i = j
- }
- }
- addrs[i:].shuffleByWeight()
-}
-
-// An MX represents a single DNS MX record.
-type MX struct {
- Host string
- Pref uint16
-}
-
-// byPref implements sort.Interface to sort MX records by preference
-type byPref []*MX
-
-func (s byPref) Len() int { return len(s) }
-func (s byPref) Less(i, j int) bool { return s[i].Pref < s[j].Pref }
-func (s byPref) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// sort reorders MX records as specified in RFC 5321.
-func (s byPref) sort() {
- for i := range s {
- j := randIntn(i + 1)
- s[i], s[j] = s[j], s[i]
- }
- sort.Sort(s)
-}
-
-// An NS represents a single DNS NS record.
-type NS struct {
- Host string
-}
diff --git a/contrib/go/_std_1.18/src/net/dnsclient_unix.go b/contrib/go/_std_1.18/src/net/dnsclient_unix.go
deleted file mode 100644
index 9a4a6ee68c..0000000000
--- a/contrib/go/_std_1.18/src/net/dnsclient_unix.go
+++ /dev/null
@@ -1,800 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-// DNS client: see RFC 1035.
-// Has to be linked into package net for Dial.
-
-// TODO(rsc):
-// Could potentially handle many outstanding lookups faster.
-// Random UDP source port (net.Dial should do that for us).
-// Random request IDs.
-
-package net
-
-import (
- "context"
- "errors"
- "internal/itoa"
- "io"
- "os"
- "sync"
- "time"
-
- "golang.org/x/net/dns/dnsmessage"
-)
-
-const (
- // to be used as a useTCP parameter to exchange
- useTCPOnly = true
- useUDPOrTCP = false
-
- // Maximum DNS packet size.
- // Value taken from https://dnsflagday.net/2020/.
- maxDNSPacketSize = 1232
-)
-
-var (
- errLameReferral = errors.New("lame referral")
- errCannotUnmarshalDNSMessage = errors.New("cannot unmarshal DNS message")
- errCannotMarshalDNSMessage = errors.New("cannot marshal DNS message")
- errServerMisbehaving = errors.New("server misbehaving")
- errInvalidDNSResponse = errors.New("invalid DNS response")
- errNoAnswerFromDNSServer = errors.New("no answer from DNS server")
-
- // errServerTemporarilyMisbehaving is like errServerMisbehaving, except
- // that when it gets translated to a DNSError, the IsTemporary field
- // gets set to true.
- errServerTemporarilyMisbehaving = errors.New("server misbehaving")
-)
-
-func newRequest(q dnsmessage.Question) (id uint16, udpReq, tcpReq []byte, err error) {
- id = uint16(randInt())
- b := dnsmessage.NewBuilder(make([]byte, 2, 514), dnsmessage.Header{ID: id, RecursionDesired: true})
- b.EnableCompression()
- if err := b.StartQuestions(); err != nil {
- return 0, nil, nil, err
- }
- if err := b.Question(q); err != nil {
- return 0, nil, nil, err
- }
- tcpReq, err = b.Finish()
- udpReq = tcpReq[2:]
- l := len(tcpReq) - 2
- tcpReq[0] = byte(l >> 8)
- tcpReq[1] = byte(l)
- return id, udpReq, tcpReq, err
-}
-
-func checkResponse(reqID uint16, reqQues dnsmessage.Question, respHdr dnsmessage.Header, respQues dnsmessage.Question) bool {
- if !respHdr.Response {
- return false
- }
- if reqID != respHdr.ID {
- return false
- }
- if reqQues.Type != respQues.Type || reqQues.Class != respQues.Class || !equalASCIIName(reqQues.Name, respQues.Name) {
- return false
- }
- return true
-}
-
-func dnsPacketRoundTrip(c Conn, id uint16, query dnsmessage.Question, b []byte) (dnsmessage.Parser, dnsmessage.Header, error) {
- if _, err := c.Write(b); err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, err
- }
-
- b = make([]byte, maxDNSPacketSize)
- for {
- n, err := c.Read(b)
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, err
- }
- var p dnsmessage.Parser
- // Ignore invalid responses as they may be malicious
- // forgery attempts. Instead continue waiting until
- // timeout. See golang.org/issue/13281.
- h, err := p.Start(b[:n])
- if err != nil {
- continue
- }
- q, err := p.Question()
- if err != nil || !checkResponse(id, query, h, q) {
- continue
- }
- return p, h, nil
- }
-}
-
-func dnsStreamRoundTrip(c Conn, id uint16, query dnsmessage.Question, b []byte) (dnsmessage.Parser, dnsmessage.Header, error) {
- if _, err := c.Write(b); err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, err
- }
-
- b = make([]byte, 1280) // 1280 is a reasonable initial size for IP over Ethernet, see RFC 4035
- if _, err := io.ReadFull(c, b[:2]); err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, err
- }
- l := int(b[0])<<8 | int(b[1])
- if l > len(b) {
- b = make([]byte, l)
- }
- n, err := io.ReadFull(c, b[:l])
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, err
- }
- var p dnsmessage.Parser
- h, err := p.Start(b[:n])
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotUnmarshalDNSMessage
- }
- q, err := p.Question()
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotUnmarshalDNSMessage
- }
- if !checkResponse(id, query, h, q) {
- return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse
- }
- return p, h, nil
-}
-
-// exchange sends a query on the connection and hopes for a response.
-func (r *Resolver) exchange(ctx context.Context, server string, q dnsmessage.Question, timeout time.Duration, useTCP bool) (dnsmessage.Parser, dnsmessage.Header, error) {
- q.Class = dnsmessage.ClassINET
- id, udpReq, tcpReq, err := newRequest(q)
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotMarshalDNSMessage
- }
- var networks []string
- if useTCP {
- networks = []string{"tcp"}
- } else {
- networks = []string{"udp", "tcp"}
- }
- for _, network := range networks {
- ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout))
- defer cancel()
-
- c, err := r.dial(ctx, network, server)
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, err
- }
- if d, ok := ctx.Deadline(); ok && !d.IsZero() {
- c.SetDeadline(d)
- }
- var p dnsmessage.Parser
- var h dnsmessage.Header
- if _, ok := c.(PacketConn); ok {
- p, h, err = dnsPacketRoundTrip(c, id, q, udpReq)
- } else {
- p, h, err = dnsStreamRoundTrip(c, id, q, tcpReq)
- }
- c.Close()
- if err != nil {
- return dnsmessage.Parser{}, dnsmessage.Header{}, mapErr(err)
- }
- if err := p.SkipQuestion(); err != dnsmessage.ErrSectionDone {
- return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse
- }
- if h.Truncated { // see RFC 5966
- continue
- }
- return p, h, nil
- }
- return dnsmessage.Parser{}, dnsmessage.Header{}, errNoAnswerFromDNSServer
-}
-
-// checkHeader performs basic sanity checks on the header.
-func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header) error {
- if h.RCode == dnsmessage.RCodeNameError {
- return errNoSuchHost
- }
-
- _, err := p.AnswerHeader()
- if err != nil && err != dnsmessage.ErrSectionDone {
- return errCannotUnmarshalDNSMessage
- }
-
- // libresolv continues to the next server when it receives
- // an invalid referral response. See golang.org/issue/15434.
- if h.RCode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone {
- return errLameReferral
- }
-
- if h.RCode != dnsmessage.RCodeSuccess && h.RCode != dnsmessage.RCodeNameError {
- // None of the error codes make sense
- // for the query we sent. If we didn't get
- // a name error and we didn't get success,
- // the server is behaving incorrectly or
- // having temporary trouble.
- if h.RCode == dnsmessage.RCodeServerFailure {
- return errServerTemporarilyMisbehaving
- }
- return errServerMisbehaving
- }
-
- return nil
-}
-
-func skipToAnswer(p *dnsmessage.Parser, qtype dnsmessage.Type) error {
- for {
- h, err := p.AnswerHeader()
- if err == dnsmessage.ErrSectionDone {
- return errNoSuchHost
- }
- if err != nil {
- return errCannotUnmarshalDNSMessage
- }
- if h.Type == qtype {
- return nil
- }
- if err := p.SkipAnswer(); err != nil {
- return errCannotUnmarshalDNSMessage
- }
- }
-}
-
-// Do a lookup for a single name, which must be rooted
-// (otherwise answer will not find the answers).
-func (r *Resolver) tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype dnsmessage.Type) (dnsmessage.Parser, string, error) {
- var lastErr error
- serverOffset := cfg.serverOffset()
- sLen := uint32(len(cfg.servers))
-
- n, err := dnsmessage.NewName(name)
- if err != nil {
- return dnsmessage.Parser{}, "", errCannotMarshalDNSMessage
- }
- q := dnsmessage.Question{
- Name: n,
- Type: qtype,
- Class: dnsmessage.ClassINET,
- }
-
- for i := 0; i < cfg.attempts; i++ {
- for j := uint32(0); j < sLen; j++ {
- server := cfg.servers[(serverOffset+j)%sLen]
-
- p, h, err := r.exchange(ctx, server, q, cfg.timeout, cfg.useTCP)
- if err != nil {
- dnsErr := &DNSError{
- Err: err.Error(),
- Name: name,
- Server: server,
- }
- if nerr, ok := err.(Error); ok && nerr.Timeout() {
- dnsErr.IsTimeout = true
- }
- // Set IsTemporary for socket-level errors. Note that this flag
- // may also be used to indicate a SERVFAIL response.
- if _, ok := err.(*OpError); ok {
- dnsErr.IsTemporary = true
- }
- lastErr = dnsErr
- continue
- }
-
- if err := checkHeader(&p, h); err != nil {
- dnsErr := &DNSError{
- Err: err.Error(),
- Name: name,
- Server: server,
- }
- if err == errServerTemporarilyMisbehaving {
- dnsErr.IsTemporary = true
- }
- if err == errNoSuchHost {
- // The name does not exist, so trying
- // another server won't help.
-
- dnsErr.IsNotFound = true
- return p, server, dnsErr
- }
- lastErr = dnsErr
- continue
- }
-
- err = skipToAnswer(&p, qtype)
- if err == nil {
- return p, server, nil
- }
- lastErr = &DNSError{
- Err: err.Error(),
- Name: name,
- Server: server,
- }
- if err == errNoSuchHost {
- // The name does not exist, so trying another
- // server won't help.
-
- lastErr.(*DNSError).IsNotFound = true
- return p, server, lastErr
- }
- }
- }
- return dnsmessage.Parser{}, "", lastErr
-}
-
-// A resolverConfig represents a DNS stub resolver configuration.
-type resolverConfig struct {
- initOnce sync.Once // guards init of resolverConfig
-
- // ch is used as a semaphore that only allows one lookup at a
- // time to recheck resolv.conf.
- ch chan struct{} // guards lastChecked and modTime
- lastChecked time.Time // last time resolv.conf was checked
-
- mu sync.RWMutex // protects dnsConfig
- dnsConfig *dnsConfig // parsed resolv.conf structure used in lookups
-}
-
-var resolvConf resolverConfig
-
-// init initializes conf and is only called via conf.initOnce.
-func (conf *resolverConfig) init() {
- // Set dnsConfig and lastChecked so we don't parse
- // resolv.conf twice the first time.
- conf.dnsConfig = systemConf().resolv
- if conf.dnsConfig == nil {
- conf.dnsConfig = dnsReadConfig("/etc/resolv.conf")
- }
- conf.lastChecked = time.Now()
-
- // Prepare ch so that only one update of resolverConfig may
- // run at once.
- conf.ch = make(chan struct{}, 1)
-}
-
-// tryUpdate tries to update conf with the named resolv.conf file.
-// The name variable only exists for testing. It is otherwise always
-// "/etc/resolv.conf".
-func (conf *resolverConfig) tryUpdate(name string) {
- conf.initOnce.Do(conf.init)
-
- // Ensure only one update at a time checks resolv.conf.
- if !conf.tryAcquireSema() {
- return
- }
- defer conf.releaseSema()
-
- now := time.Now()
- if conf.lastChecked.After(now.Add(-5 * time.Second)) {
- return
- }
- conf.lastChecked = now
-
- var mtime time.Time
- if fi, err := os.Stat(name); err == nil {
- mtime = fi.ModTime()
- }
- if mtime.Equal(conf.dnsConfig.mtime) {
- return
- }
-
- dnsConf := dnsReadConfig(name)
- conf.mu.Lock()
- conf.dnsConfig = dnsConf
- conf.mu.Unlock()
-}
-
-func (conf *resolverConfig) tryAcquireSema() bool {
- select {
- case conf.ch <- struct{}{}:
- return true
- default:
- return false
- }
-}
-
-func (conf *resolverConfig) releaseSema() {
- <-conf.ch
-}
-
-func (r *Resolver) lookup(ctx context.Context, name string, qtype dnsmessage.Type) (dnsmessage.Parser, string, error) {
- if !isDomainName(name) {
- // We used to use "invalid domain name" as the error,
- // but that is a detail of the specific lookup mechanism.
- // Other lookups might allow broader name syntax
- // (for example Multicast DNS allows UTF-8; see RFC 6762).
- // For consistency with libc resolvers, report no such host.
- return dnsmessage.Parser{}, "", &DNSError{Err: errNoSuchHost.Error(), Name: name, IsNotFound: true}
- }
- resolvConf.tryUpdate("/etc/resolv.conf")
- resolvConf.mu.RLock()
- conf := resolvConf.dnsConfig
- resolvConf.mu.RUnlock()
- var (
- p dnsmessage.Parser
- server string
- err error
- )
- for _, fqdn := range conf.nameList(name) {
- p, server, err = r.tryOneName(ctx, conf, fqdn, qtype)
- if err == nil {
- break
- }
- if nerr, ok := err.(Error); ok && nerr.Temporary() && r.strictErrors() {
- // If we hit a temporary error with StrictErrors enabled,
- // stop immediately instead of trying more names.
- break
- }
- }
- if err == nil {
- return p, server, nil
- }
- if err, ok := err.(*DNSError); ok {
- // Show original name passed to lookup, not suffixed one.
- // In general we might have tried many suffixes; showing
- // just one is misleading. See also golang.org/issue/6324.
- err.Name = name
- }
- return dnsmessage.Parser{}, "", err
-}
-
-// avoidDNS reports whether this is a hostname for which we should not
-// use DNS. Currently this includes only .onion, per RFC 7686. See
-// golang.org/issue/13705. Does not cover .local names (RFC 6762),
-// see golang.org/issue/16739.
-func avoidDNS(name string) bool {
- if name == "" {
- return true
- }
- if name[len(name)-1] == '.' {
- name = name[:len(name)-1]
- }
- return stringsHasSuffixFold(name, ".onion")
-}
-
-// nameList returns a list of names for sequential DNS queries.
-func (conf *dnsConfig) nameList(name string) []string {
- if avoidDNS(name) {
- return nil
- }
-
- // Check name length (see isDomainName).
- l := len(name)
- rooted := l > 0 && name[l-1] == '.'
- if l > 254 || l == 254 && rooted {
- return nil
- }
-
- // If name is rooted (trailing dot), try only that name.
- if rooted {
- return []string{name}
- }
-
- hasNdots := count(name, '.') >= conf.ndots
- name += "."
- l++
-
- // Build list of search choices.
- names := make([]string, 0, 1+len(conf.search))
- // If name has enough dots, try unsuffixed first.
- if hasNdots {
- names = append(names, name)
- }
- // Try suffixes that are not too long (see isDomainName).
- for _, suffix := range conf.search {
- if l+len(suffix) <= 254 {
- names = append(names, name+suffix)
- }
- }
- // Try unsuffixed, if not tried first above.
- if !hasNdots {
- names = append(names, name)
- }
- return names
-}
-
-// hostLookupOrder specifies the order of LookupHost lookup strategies.
-// It is basically a simplified representation of nsswitch.conf.
-// "files" means /etc/hosts.
-type hostLookupOrder int
-
-const (
- // hostLookupCgo means defer to cgo.
- hostLookupCgo hostLookupOrder = iota
- hostLookupFilesDNS // files first
- hostLookupDNSFiles // dns first
- hostLookupFiles // only files
- hostLookupDNS // only DNS
-)
-
-var lookupOrderName = map[hostLookupOrder]string{
- hostLookupCgo: "cgo",
- hostLookupFilesDNS: "files,dns",
- hostLookupDNSFiles: "dns,files",
- hostLookupFiles: "files",
- hostLookupDNS: "dns",
-}
-
-func (o hostLookupOrder) String() string {
- if s, ok := lookupOrderName[o]; ok {
- return s
- }
- return "hostLookupOrder=" + itoa.Itoa(int(o)) + "??"
-}
-
-// goLookupHost is the native Go implementation of LookupHost.
-// Used only if cgoLookupHost refuses to handle the request
-// (that is, only if cgoLookupHost is the stub in cgo_stub.go).
-// Normally we let cgo use the C library resolver instead of
-// depending on our lookup code, so that Go and C get the same
-// answers.
-func (r *Resolver) goLookupHost(ctx context.Context, name string) (addrs []string, err error) {
- return r.goLookupHostOrder(ctx, name, hostLookupFilesDNS)
-}
-
-func (r *Resolver) goLookupHostOrder(ctx context.Context, name string, order hostLookupOrder) (addrs []string, err error) {
- if order == hostLookupFilesDNS || order == hostLookupFiles {
- // Use entries from /etc/hosts if they match.
- addrs = lookupStaticHost(name)
- if len(addrs) > 0 || order == hostLookupFiles {
- return
- }
- }
- ips, _, err := r.goLookupIPCNAMEOrder(ctx, "ip", name, order)
- if err != nil {
- return
- }
- addrs = make([]string, 0, len(ips))
- for _, ip := range ips {
- addrs = append(addrs, ip.String())
- }
- return
-}
-
-// lookup entries from /etc/hosts
-func goLookupIPFiles(name string) (addrs []IPAddr) {
- for _, haddr := range lookupStaticHost(name) {
- haddr, zone := splitHostZone(haddr)
- if ip := ParseIP(haddr); ip != nil {
- addr := IPAddr{IP: ip, Zone: zone}
- addrs = append(addrs, addr)
- }
- }
- sortByRFC6724(addrs)
- return
-}
-
-// goLookupIP is the native Go implementation of LookupIP.
-// The libc versions are in cgo_*.go.
-func (r *Resolver) goLookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
- order := systemConf().hostLookupOrder(r, host)
- addrs, _, err = r.goLookupIPCNAMEOrder(ctx, network, host, order)
- return
-}
-
-func (r *Resolver) goLookupIPCNAMEOrder(ctx context.Context, network, name string, order hostLookupOrder) (addrs []IPAddr, cname dnsmessage.Name, err error) {
- if order == hostLookupFilesDNS || order == hostLookupFiles {
- addrs = goLookupIPFiles(name)
- if len(addrs) > 0 || order == hostLookupFiles {
- return addrs, dnsmessage.Name{}, nil
- }
- }
- if !isDomainName(name) {
- // See comment in func lookup above about use of errNoSuchHost.
- return nil, dnsmessage.Name{}, &DNSError{Err: errNoSuchHost.Error(), Name: name, IsNotFound: true}
- }
- resolvConf.tryUpdate("/etc/resolv.conf")
- resolvConf.mu.RLock()
- conf := resolvConf.dnsConfig
- resolvConf.mu.RUnlock()
- type result struct {
- p dnsmessage.Parser
- server string
- error
- }
- lane := make(chan result, 1)
- qtypes := []dnsmessage.Type{dnsmessage.TypeA, dnsmessage.TypeAAAA}
- switch ipVersion(network) {
- case '4':
- qtypes = []dnsmessage.Type{dnsmessage.TypeA}
- case '6':
- qtypes = []dnsmessage.Type{dnsmessage.TypeAAAA}
- }
- var queryFn func(fqdn string, qtype dnsmessage.Type)
- var responseFn func(fqdn string, qtype dnsmessage.Type) result
- if conf.singleRequest {
- queryFn = func(fqdn string, qtype dnsmessage.Type) {}
- responseFn = func(fqdn string, qtype dnsmessage.Type) result {
- dnsWaitGroup.Add(1)
- defer dnsWaitGroup.Done()
- p, server, err := r.tryOneName(ctx, conf, fqdn, qtype)
- return result{p, server, err}
- }
- } else {
- queryFn = func(fqdn string, qtype dnsmessage.Type) {
- dnsWaitGroup.Add(1)
- go func(qtype dnsmessage.Type) {
- p, server, err := r.tryOneName(ctx, conf, fqdn, qtype)
- lane <- result{p, server, err}
- dnsWaitGroup.Done()
- }(qtype)
- }
- responseFn = func(fqdn string, qtype dnsmessage.Type) result {
- return <-lane
- }
- }
- var lastErr error
- for _, fqdn := range conf.nameList(name) {
- for _, qtype := range qtypes {
- queryFn(fqdn, qtype)
- }
- hitStrictError := false
- for _, qtype := range qtypes {
- result := responseFn(fqdn, qtype)
- if result.error != nil {
- if nerr, ok := result.error.(Error); ok && nerr.Temporary() && r.strictErrors() {
- // This error will abort the nameList loop.
- hitStrictError = true
- lastErr = result.error
- } else if lastErr == nil || fqdn == name+"." {
- // Prefer error for original name.
- lastErr = result.error
- }
- continue
- }
-
- // Presotto says it's okay to assume that servers listed in
- // /etc/resolv.conf are recursive resolvers.
- //
- // We asked for recursion, so it should have included all the
- // answers we need in this one packet.
- //
- // Further, RFC 1035 section 4.3.1 says that "the recursive
- // response to a query will be... The answer to the query,
- // possibly preface by one or more CNAME RRs that specify
- // aliases encountered on the way to an answer."
- //
- // Therefore, we should be able to assume that we can ignore
- // CNAMEs and that the A and AAAA records we requested are
- // for the canonical name.
-
- loop:
- for {
- h, err := result.p.AnswerHeader()
- if err != nil && err != dnsmessage.ErrSectionDone {
- lastErr = &DNSError{
- Err: "cannot marshal DNS message",
- Name: name,
- Server: result.server,
- }
- }
- if err != nil {
- break
- }
- switch h.Type {
- case dnsmessage.TypeA:
- a, err := result.p.AResource()
- if err != nil {
- lastErr = &DNSError{
- Err: "cannot marshal DNS message",
- Name: name,
- Server: result.server,
- }
- break loop
- }
- addrs = append(addrs, IPAddr{IP: IP(a.A[:])})
-
- case dnsmessage.TypeAAAA:
- aaaa, err := result.p.AAAAResource()
- if err != nil {
- lastErr = &DNSError{
- Err: "cannot marshal DNS message",
- Name: name,
- Server: result.server,
- }
- break loop
- }
- addrs = append(addrs, IPAddr{IP: IP(aaaa.AAAA[:])})
-
- default:
- if err := result.p.SkipAnswer(); err != nil {
- lastErr = &DNSError{
- Err: "cannot marshal DNS message",
- Name: name,
- Server: result.server,
- }
- break loop
- }
- continue
- }
- if cname.Length == 0 && h.Name.Length != 0 {
- cname = h.Name
- }
- }
- }
- if hitStrictError {
- // If either family hit an error with StrictErrors enabled,
- // discard all addresses. This ensures that network flakiness
- // cannot turn a dualstack hostname IPv4/IPv6-only.
- addrs = nil
- break
- }
- if len(addrs) > 0 {
- break
- }
- }
- if lastErr, ok := lastErr.(*DNSError); ok {
- // Show original name passed to lookup, not suffixed one.
- // In general we might have tried many suffixes; showing
- // just one is misleading. See also golang.org/issue/6324.
- lastErr.Name = name
- }
- sortByRFC6724(addrs)
- if len(addrs) == 0 {
- if order == hostLookupDNSFiles {
- addrs = goLookupIPFiles(name)
- }
- if len(addrs) == 0 && lastErr != nil {
- return nil, dnsmessage.Name{}, lastErr
- }
- }
- return addrs, cname, nil
-}
-
-// goLookupCNAME is the native Go (non-cgo) implementation of LookupCNAME.
-func (r *Resolver) goLookupCNAME(ctx context.Context, host string) (string, error) {
- order := systemConf().hostLookupOrder(r, host)
- _, cname, err := r.goLookupIPCNAMEOrder(ctx, "ip", host, order)
- return cname.String(), err
-}
-
-// goLookupPTR is the native Go implementation of LookupAddr.
-// Used only if cgoLookupPTR refuses to handle the request (that is,
-// only if cgoLookupPTR is the stub in cgo_stub.go).
-// Normally we let cgo use the C library resolver instead of depending
-// on our lookup code, so that Go and C get the same answers.
-func (r *Resolver) goLookupPTR(ctx context.Context, addr string) ([]string, error) {
- names := lookupStaticAddr(addr)
- if len(names) > 0 {
- return names, nil
- }
- arpa, err := reverseaddr(addr)
- if err != nil {
- return nil, err
- }
- p, server, err := r.lookup(ctx, arpa, dnsmessage.TypePTR)
- if err != nil {
- return nil, err
- }
- var ptrs []string
- for {
- h, err := p.AnswerHeader()
- if err == dnsmessage.ErrSectionDone {
- break
- }
- if err != nil {
- return nil, &DNSError{
- Err: "cannot marshal DNS message",
- Name: addr,
- Server: server,
- }
- }
- if h.Type != dnsmessage.TypePTR {
- err := p.SkipAnswer()
- if err != nil {
- return nil, &DNSError{
- Err: "cannot marshal DNS message",
- Name: addr,
- Server: server,
- }
- }
- continue
- }
- ptr, err := p.PTRResource()
- if err != nil {
- return nil, &DNSError{
- Err: "cannot marshal DNS message",
- Name: addr,
- Server: server,
- }
- }
- ptrs = append(ptrs, ptr.PTR.String())
-
- }
- return ptrs, nil
-}
diff --git a/contrib/go/_std_1.18/src/net/dnsconfig_unix.go b/contrib/go/_std_1.18/src/net/dnsconfig_unix.go
deleted file mode 100644
index 5ad254cd7c..0000000000
--- a/contrib/go/_std_1.18/src/net/dnsconfig_unix.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-// Read system DNS config from /etc/resolv.conf
-
-package net
-
-import (
- "internal/bytealg"
- "os"
- "sync/atomic"
- "time"
-)
-
-var (
- defaultNS = []string{"127.0.0.1:53", "[::1]:53"}
- getHostname = os.Hostname // variable for testing
-)
-
-type dnsConfig struct {
- servers []string // server addresses (in host:port form) to use
- search []string // rooted suffixes to append to local name
- ndots int // number of dots in name to trigger absolute lookup
- timeout time.Duration // wait before giving up on a query, including retries
- attempts int // lost packets before giving up on server
- rotate bool // round robin among servers
- unknownOpt bool // anything unknown was encountered
- lookup []string // OpenBSD top-level database "lookup" order
- err error // any error that occurs during open of resolv.conf
- mtime time.Time // time of resolv.conf modification
- soffset uint32 // used by serverOffset
- singleRequest bool // use sequential A and AAAA queries instead of parallel queries
- useTCP bool // force usage of TCP for DNS resolutions
-}
-
-// See resolv.conf(5) on a Linux machine.
-func dnsReadConfig(filename string) *dnsConfig {
- conf := &dnsConfig{
- ndots: 1,
- timeout: 5 * time.Second,
- attempts: 2,
- }
- file, err := open(filename)
- if err != nil {
- conf.servers = defaultNS
- conf.search = dnsDefaultSearch()
- conf.err = err
- return conf
- }
- defer file.close()
- if fi, err := file.file.Stat(); err == nil {
- conf.mtime = fi.ModTime()
- } else {
- conf.servers = defaultNS
- conf.search = dnsDefaultSearch()
- conf.err = err
- return conf
- }
- for line, ok := file.readLine(); ok; line, ok = file.readLine() {
- if len(line) > 0 && (line[0] == ';' || line[0] == '#') {
- // comment.
- continue
- }
- f := getFields(line)
- if len(f) < 1 {
- continue
- }
- switch f[0] {
- case "nameserver": // add one name server
- if len(f) > 1 && len(conf.servers) < 3 { // small, but the standard limit
- // One more check: make sure server name is
- // just an IP address. Otherwise we need DNS
- // to look it up.
- if parseIPv4(f[1]) != nil {
- conf.servers = append(conf.servers, JoinHostPort(f[1], "53"))
- } else if ip, _ := parseIPv6Zone(f[1]); ip != nil {
- conf.servers = append(conf.servers, JoinHostPort(f[1], "53"))
- }
- }
-
- case "domain": // set search path to just this domain
- if len(f) > 1 {
- conf.search = []string{ensureRooted(f[1])}
- }
-
- case "search": // set search path to given servers
- conf.search = make([]string, len(f)-1)
- for i := 0; i < len(conf.search); i++ {
- conf.search[i] = ensureRooted(f[i+1])
- }
-
- case "options": // magic options
- for _, s := range f[1:] {
- switch {
- case hasPrefix(s, "ndots:"):
- n, _, _ := dtoi(s[6:])
- if n < 0 {
- n = 0
- } else if n > 15 {
- n = 15
- }
- conf.ndots = n
- case hasPrefix(s, "timeout:"):
- n, _, _ := dtoi(s[8:])
- if n < 1 {
- n = 1
- }
- conf.timeout = time.Duration(n) * time.Second
- case hasPrefix(s, "attempts:"):
- n, _, _ := dtoi(s[9:])
- if n < 1 {
- n = 1
- }
- conf.attempts = n
- case s == "rotate":
- conf.rotate = true
- case s == "single-request" || s == "single-request-reopen":
- // Linux option:
- // http://man7.org/linux/man-pages/man5/resolv.conf.5.html
- // "By default, glibc performs IPv4 and IPv6 lookups in parallel [...]
- // This option disables the behavior and makes glibc
- // perform the IPv6 and IPv4 requests sequentially."
- conf.singleRequest = true
- case s == "use-vc" || s == "usevc" || s == "tcp":
- // Linux (use-vc), FreeBSD (usevc) and OpenBSD (tcp) option:
- // http://man7.org/linux/man-pages/man5/resolv.conf.5.html
- // "Sets RES_USEVC in _res.options.
- // This option forces the use of TCP for DNS resolutions."
- // https://www.freebsd.org/cgi/man.cgi?query=resolv.conf&sektion=5&manpath=freebsd-release-ports
- // https://man.openbsd.org/resolv.conf.5
- conf.useTCP = true
- default:
- conf.unknownOpt = true
- }
- }
-
- case "lookup":
- // OpenBSD option:
- // https://www.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man5/resolv.conf.5
- // "the legal space-separated values are: bind, file, yp"
- conf.lookup = f[1:]
-
- default:
- conf.unknownOpt = true
- }
- }
- if len(conf.servers) == 0 {
- conf.servers = defaultNS
- }
- if len(conf.search) == 0 {
- conf.search = dnsDefaultSearch()
- }
- return conf
-}
-
-// serverOffset returns an offset that can be used to determine
-// indices of servers in c.servers when making queries.
-// When the rotate option is enabled, this offset increases.
-// Otherwise it is always 0.
-func (c *dnsConfig) serverOffset() uint32 {
- if c.rotate {
- return atomic.AddUint32(&c.soffset, 1) - 1 // return 0 to start
- }
- return 0
-}
-
-func dnsDefaultSearch() []string {
- hn, err := getHostname()
- if err != nil {
- // best effort
- return nil
- }
- if i := bytealg.IndexByteString(hn, '.'); i >= 0 && i < len(hn)-1 {
- return []string{ensureRooted(hn[i+1:])}
- }
- return nil
-}
-
-func hasPrefix(s, prefix string) bool {
- return len(s) >= len(prefix) && s[:len(prefix)] == prefix
-}
-
-func ensureRooted(s string) string {
- if len(s) > 0 && s[len(s)-1] == '.' {
- return s
- }
- return s + "."
-}
diff --git a/contrib/go/_std_1.18/src/net/error_posix.go b/contrib/go/_std_1.18/src/net/error_posix.go
deleted file mode 100644
index 10e3caa67b..0000000000
--- a/contrib/go/_std_1.18/src/net/error_posix.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "os"
- "syscall"
-)
-
-// wrapSyscallError takes an error and a syscall name. If the error is
-// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name.
-func wrapSyscallError(name string, err error) error {
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError(name, err)
- }
- return err
-}
diff --git a/contrib/go/_std_1.18/src/net/error_unix.go b/contrib/go/_std_1.18/src/net/error_unix.go
deleted file mode 100644
index 0e64b40ea1..0000000000
--- a/contrib/go/_std_1.18/src/net/error_unix.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || js || linux || netbsd || openbsd || solaris
-
-package net
-
-import "syscall"
-
-func isConnError(err error) bool {
- if se, ok := err.(syscall.Errno); ok {
- return se == syscall.ECONNRESET || se == syscall.ECONNABORTED
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/net/fd_posix.go b/contrib/go/_std_1.18/src/net/fd_posix.go
deleted file mode 100644
index 1845c173bb..0000000000
--- a/contrib/go/_std_1.18/src/net/fd_posix.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "internal/poll"
- "runtime"
- "syscall"
- "time"
-)
-
-// Network file descriptor.
-type netFD struct {
- pfd poll.FD
-
- // immutable until Close
- family int
- sotype int
- isConnected bool // handshake completed or use of association with peer
- net string
- laddr Addr
- raddr Addr
-}
-
-func (fd *netFD) setAddr(laddr, raddr Addr) {
- fd.laddr = laddr
- fd.raddr = raddr
- runtime.SetFinalizer(fd, (*netFD).Close)
-}
-
-func (fd *netFD) Close() error {
- runtime.SetFinalizer(fd, nil)
- return fd.pfd.Close()
-}
-
-func (fd *netFD) shutdown(how int) error {
- err := fd.pfd.Shutdown(how)
- runtime.KeepAlive(fd)
- return wrapSyscallError("shutdown", err)
-}
-
-func (fd *netFD) closeRead() error {
- return fd.shutdown(syscall.SHUT_RD)
-}
-
-func (fd *netFD) closeWrite() error {
- return fd.shutdown(syscall.SHUT_WR)
-}
-
-func (fd *netFD) Read(p []byte) (n int, err error) {
- n, err = fd.pfd.Read(p)
- runtime.KeepAlive(fd)
- return n, wrapSyscallError(readSyscallName, err)
-}
-
-func (fd *netFD) readFrom(p []byte) (n int, sa syscall.Sockaddr, err error) {
- n, sa, err = fd.pfd.ReadFrom(p)
- runtime.KeepAlive(fd)
- return n, sa, wrapSyscallError(readFromSyscallName, err)
-}
-func (fd *netFD) readFromInet4(p []byte, from *syscall.SockaddrInet4) (n int, err error) {
- n, err = fd.pfd.ReadFromInet4(p, from)
- runtime.KeepAlive(fd)
- return n, wrapSyscallError(readFromSyscallName, err)
-}
-
-func (fd *netFD) readFromInet6(p []byte, from *syscall.SockaddrInet6) (n int, err error) {
- n, err = fd.pfd.ReadFromInet6(p, from)
- runtime.KeepAlive(fd)
- return n, wrapSyscallError(readFromSyscallName, err)
-}
-
-func (fd *netFD) readMsg(p []byte, oob []byte, flags int) (n, oobn, retflags int, sa syscall.Sockaddr, err error) {
- n, oobn, retflags, sa, err = fd.pfd.ReadMsg(p, oob, flags)
- runtime.KeepAlive(fd)
- return n, oobn, retflags, sa, wrapSyscallError(readMsgSyscallName, err)
-}
-
-func (fd *netFD) readMsgInet4(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet4) (n, oobn, retflags int, err error) {
- n, oobn, retflags, err = fd.pfd.ReadMsgInet4(p, oob, flags, sa)
- runtime.KeepAlive(fd)
- return n, oobn, retflags, wrapSyscallError(readMsgSyscallName, err)
-}
-
-func (fd *netFD) readMsgInet6(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet6) (n, oobn, retflags int, err error) {
- n, oobn, retflags, err = fd.pfd.ReadMsgInet6(p, oob, flags, sa)
- runtime.KeepAlive(fd)
- return n, oobn, retflags, wrapSyscallError(readMsgSyscallName, err)
-}
-
-func (fd *netFD) Write(p []byte) (nn int, err error) {
- nn, err = fd.pfd.Write(p)
- runtime.KeepAlive(fd)
- return nn, wrapSyscallError(writeSyscallName, err)
-}
-
-func (fd *netFD) writeTo(p []byte, sa syscall.Sockaddr) (n int, err error) {
- n, err = fd.pfd.WriteTo(p, sa)
- runtime.KeepAlive(fd)
- return n, wrapSyscallError(writeToSyscallName, err)
-}
-
-func (fd *netFD) writeToInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) {
- n, err = fd.pfd.WriteToInet4(p, sa)
- runtime.KeepAlive(fd)
- return n, wrapSyscallError(writeToSyscallName, err)
-}
-
-func (fd *netFD) writeToInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) {
- n, err = fd.pfd.WriteToInet6(p, sa)
- runtime.KeepAlive(fd)
- return n, wrapSyscallError(writeToSyscallName, err)
-}
-
-func (fd *netFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
- n, oobn, err = fd.pfd.WriteMsg(p, oob, sa)
- runtime.KeepAlive(fd)
- return n, oobn, wrapSyscallError(writeMsgSyscallName, err)
-}
-
-func (fd *netFD) writeMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (n int, oobn int, err error) {
- n, oobn, err = fd.pfd.WriteMsgInet4(p, oob, sa)
- runtime.KeepAlive(fd)
- return n, oobn, wrapSyscallError(writeMsgSyscallName, err)
-}
-
-func (fd *netFD) writeMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (n int, oobn int, err error) {
- n, oobn, err = fd.pfd.WriteMsgInet6(p, oob, sa)
- runtime.KeepAlive(fd)
- return n, oobn, wrapSyscallError(writeMsgSyscallName, err)
-}
-
-func (fd *netFD) SetDeadline(t time.Time) error {
- return fd.pfd.SetDeadline(t)
-}
-
-func (fd *netFD) SetReadDeadline(t time.Time) error {
- return fd.pfd.SetReadDeadline(t)
-}
-
-func (fd *netFD) SetWriteDeadline(t time.Time) error {
- return fd.pfd.SetWriteDeadline(t)
-}
diff --git a/contrib/go/_std_1.18/src/net/fd_unix.go b/contrib/go/_std_1.18/src/net/fd_unix.go
deleted file mode 100644
index aaa7a1c185..0000000000
--- a/contrib/go/_std_1.18/src/net/fd_unix.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package net
-
-import (
- "context"
- "internal/poll"
- "os"
- "runtime"
- "syscall"
-)
-
-const (
- readSyscallName = "read"
- readFromSyscallName = "recvfrom"
- readMsgSyscallName = "recvmsg"
- writeSyscallName = "write"
- writeToSyscallName = "sendto"
- writeMsgSyscallName = "sendmsg"
-)
-
-func newFD(sysfd, family, sotype int, net string) (*netFD, error) {
- ret := &netFD{
- pfd: poll.FD{
- Sysfd: sysfd,
- IsStream: sotype == syscall.SOCK_STREAM,
- ZeroReadIsEOF: sotype != syscall.SOCK_DGRAM && sotype != syscall.SOCK_RAW,
- },
- family: family,
- sotype: sotype,
- net: net,
- }
- return ret, nil
-}
-
-func (fd *netFD) init() error {
- return fd.pfd.Init(fd.net, true)
-}
-
-func (fd *netFD) name() string {
- var ls, rs string
- if fd.laddr != nil {
- ls = fd.laddr.String()
- }
- if fd.raddr != nil {
- rs = fd.raddr.String()
- }
- return fd.net + ":" + ls + "->" + rs
-}
-
-func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (rsa syscall.Sockaddr, ret error) {
- // Do not need to call fd.writeLock here,
- // because fd is not yet accessible to user,
- // so no concurrent operations are possible.
- switch err := connectFunc(fd.pfd.Sysfd, ra); err {
- case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR:
- case nil, syscall.EISCONN:
- select {
- case <-ctx.Done():
- return nil, mapErr(ctx.Err())
- default:
- }
- if err := fd.pfd.Init(fd.net, true); err != nil {
- return nil, err
- }
- runtime.KeepAlive(fd)
- return nil, nil
- case syscall.EINVAL:
- // On Solaris and illumos we can see EINVAL if the socket has
- // already been accepted and closed by the server. Treat this
- // as a successful connection--writes to the socket will see
- // EOF. For details and a test case in C see
- // https://golang.org/issue/6828.
- if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" {
- return nil, nil
- }
- fallthrough
- default:
- return nil, os.NewSyscallError("connect", err)
- }
- if err := fd.pfd.Init(fd.net, true); err != nil {
- return nil, err
- }
- if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
- fd.pfd.SetWriteDeadline(deadline)
- defer fd.pfd.SetWriteDeadline(noDeadline)
- }
-
- // Start the "interrupter" goroutine, if this context might be canceled.
- //
- // The interrupter goroutine waits for the context to be done and
- // interrupts the dial (by altering the fd's write deadline, which
- // wakes up waitWrite).
- ctxDone := ctx.Done()
- if ctxDone != nil {
- // Wait for the interrupter goroutine to exit before returning
- // from connect.
- done := make(chan struct{})
- interruptRes := make(chan error)
- defer func() {
- close(done)
- if ctxErr := <-interruptRes; ctxErr != nil && ret == nil {
- // The interrupter goroutine called SetWriteDeadline,
- // but the connect code below had returned from
- // waitWrite already and did a successful connect (ret
- // == nil). Because we've now poisoned the connection
- // by making it unwritable, don't return a successful
- // dial. This was issue 16523.
- ret = mapErr(ctxErr)
- fd.Close() // prevent a leak
- }
- }()
- go func() {
- select {
- case <-ctxDone:
- // Force the runtime's poller to immediately give up
- // waiting for writability, unblocking waitWrite
- // below.
- fd.pfd.SetWriteDeadline(aLongTimeAgo)
- testHookCanceledDial()
- interruptRes <- ctx.Err()
- case <-done:
- interruptRes <- nil
- }
- }()
- }
-
- for {
- // Performing multiple connect system calls on a
- // non-blocking socket under Unix variants does not
- // necessarily result in earlier errors being
- // returned. Instead, once runtime-integrated network
- // poller tells us that the socket is ready, get the
- // SO_ERROR socket option to see if the connection
- // succeeded or failed. See issue 7474 for further
- // details.
- if err := fd.pfd.WaitWrite(); err != nil {
- select {
- case <-ctxDone:
- return nil, mapErr(ctx.Err())
- default:
- }
- return nil, err
- }
- nerr, err := getsockoptIntFunc(fd.pfd.Sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
- if err != nil {
- return nil, os.NewSyscallError("getsockopt", err)
- }
- switch err := syscall.Errno(nerr); err {
- case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR:
- case syscall.EISCONN:
- return nil, nil
- case syscall.Errno(0):
- // The runtime poller can wake us up spuriously;
- // see issues 14548 and 19289. Check that we are
- // really connected; if not, wait again.
- if rsa, err := syscall.Getpeername(fd.pfd.Sysfd); err == nil {
- return rsa, nil
- }
- default:
- return nil, os.NewSyscallError("connect", err)
- }
- runtime.KeepAlive(fd)
- }
-}
-
-func (fd *netFD) accept() (netfd *netFD, err error) {
- d, rsa, errcall, err := fd.pfd.Accept()
- if err != nil {
- if errcall != "" {
- err = wrapSyscallError(errcall, err)
- }
- return nil, err
- }
-
- if netfd, err = newFD(d, fd.family, fd.sotype, fd.net); err != nil {
- poll.CloseFunc(d)
- return nil, err
- }
- if err = netfd.init(); err != nil {
- netfd.Close()
- return nil, err
- }
- lsa, _ := syscall.Getsockname(netfd.pfd.Sysfd)
- netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa))
- return netfd, nil
-}
-
-func (fd *netFD) dup() (f *os.File, err error) {
- ns, call, err := fd.pfd.Dup()
- if err != nil {
- if call != "" {
- err = os.NewSyscallError(call, err)
- }
- return nil, err
- }
-
- return os.NewFile(uintptr(ns), fd.name()), nil
-}
diff --git a/contrib/go/_std_1.18/src/net/file_unix.go b/contrib/go/_std_1.18/src/net/file_unix.go
deleted file mode 100644
index 68d7eb9ca0..0000000000
--- a/contrib/go/_std_1.18/src/net/file_unix.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package net
-
-import (
- "internal/poll"
- "os"
- "syscall"
-)
-
-func dupSocket(f *os.File) (int, error) {
- s, call, err := poll.DupCloseOnExec(int(f.Fd()))
- if err != nil {
- if call != "" {
- err = os.NewSyscallError(call, err)
- }
- return -1, err
- }
- if err := syscall.SetNonblock(s, true); err != nil {
- poll.CloseFunc(s)
- return -1, os.NewSyscallError("setnonblock", err)
- }
- return s, nil
-}
-
-func newFileFD(f *os.File) (*netFD, error) {
- s, err := dupSocket(f)
- if err != nil {
- return nil, err
- }
- family := syscall.AF_UNSPEC
- sotype, err := syscall.GetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_TYPE)
- if err != nil {
- poll.CloseFunc(s)
- return nil, os.NewSyscallError("getsockopt", err)
- }
- lsa, _ := syscall.Getsockname(s)
- rsa, _ := syscall.Getpeername(s)
- switch lsa.(type) {
- case *syscall.SockaddrInet4:
- family = syscall.AF_INET
- case *syscall.SockaddrInet6:
- family = syscall.AF_INET6
- case *syscall.SockaddrUnix:
- family = syscall.AF_UNIX
- default:
- poll.CloseFunc(s)
- return nil, syscall.EPROTONOSUPPORT
- }
- fd, err := newFD(s, family, sotype, "")
- if err != nil {
- poll.CloseFunc(s)
- return nil, err
- }
- laddr := fd.addrFunc()(lsa)
- raddr := fd.addrFunc()(rsa)
- fd.net = laddr.Network()
- if err := fd.init(); err != nil {
- fd.Close()
- return nil, err
- }
- fd.setAddr(laddr, raddr)
- return fd, nil
-}
-
-func fileConn(f *os.File) (Conn, error) {
- fd, err := newFileFD(f)
- if err != nil {
- return nil, err
- }
- switch fd.laddr.(type) {
- case *TCPAddr:
- return newTCPConn(fd), nil
- case *UDPAddr:
- return newUDPConn(fd), nil
- case *IPAddr:
- return newIPConn(fd), nil
- case *UnixAddr:
- return newUnixConn(fd), nil
- }
- fd.Close()
- return nil, syscall.EINVAL
-}
-
-func fileListener(f *os.File) (Listener, error) {
- fd, err := newFileFD(f)
- if err != nil {
- return nil, err
- }
- switch laddr := fd.laddr.(type) {
- case *TCPAddr:
- return &TCPListener{fd: fd}, nil
- case *UnixAddr:
- return &UnixListener{fd: fd, path: laddr.Name, unlink: false}, nil
- }
- fd.Close()
- return nil, syscall.EINVAL
-}
-
-func filePacketConn(f *os.File) (PacketConn, error) {
- fd, err := newFileFD(f)
- if err != nil {
- return nil, err
- }
- switch fd.laddr.(type) {
- case *UDPAddr:
- return newUDPConn(fd), nil
- case *IPAddr:
- return newIPConn(fd), nil
- case *UnixAddr:
- return newUnixConn(fd), nil
- }
- fd.Close()
- return nil, syscall.EINVAL
-}
diff --git a/contrib/go/_std_1.18/src/net/hook_unix.go b/contrib/go/_std_1.18/src/net/hook_unix.go
deleted file mode 100644
index 7c36b0d6e3..0000000000
--- a/contrib/go/_std_1.18/src/net/hook_unix.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package net
-
-import "syscall"
-
-var (
- testHookDialChannel = func() {} // for golang.org/issue/5349
- testHookCanceledDial = func() {} // for golang.org/issue/16523
-
- // Placeholders for socket system calls.
- socketFunc func(int, int, int) (int, error) = syscall.Socket
- connectFunc func(int, syscall.Sockaddr) error = syscall.Connect
- listenFunc func(int, int) error = syscall.Listen
- getsockoptIntFunc func(int, int, int) (int, error) = syscall.GetsockoptInt
-)
diff --git a/contrib/go/_std_1.18/src/net/http/client.go b/contrib/go/_std_1.18/src/net/http/client.go
deleted file mode 100644
index 22db96b267..0000000000
--- a/contrib/go/_std_1.18/src/net/http/client.go
+++ /dev/null
@@ -1,1033 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP client. See RFC 7230 through 7235.
-//
-// This is the high-level Client interface.
-// The low-level implementation is in transport.go.
-
-package http
-
-import (
- "context"
- "crypto/tls"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "log"
- "net/http/internal/ascii"
- "net/url"
- "reflect"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-// A Client is an HTTP client. Its zero value (DefaultClient) is a
-// usable client that uses DefaultTransport.
-//
-// The Client's Transport typically has internal state (cached TCP
-// connections), so Clients should be reused instead of created as
-// needed. Clients are safe for concurrent use by multiple goroutines.
-//
-// A Client is higher-level than a RoundTripper (such as Transport)
-// and additionally handles HTTP details such as cookies and
-// redirects.
-//
-// When following redirects, the Client will forward all headers set on the
-// initial Request except:
-//
-// • when forwarding sensitive headers like "Authorization",
-// "WWW-Authenticate", and "Cookie" to untrusted targets.
-// These headers will be ignored when following a redirect to a domain
-// that is not a subdomain match or exact match of the initial domain.
-// For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
-// will forward the sensitive headers, but a redirect to "bar.com" will not.
-//
-// • when forwarding the "Cookie" header with a non-nil cookie Jar.
-// Since each redirect may mutate the state of the cookie jar,
-// a redirect may possibly alter a cookie set in the initial request.
-// When forwarding the "Cookie" header, any mutated cookies will be omitted,
-// with the expectation that the Jar will insert those mutated cookies
-// with the updated values (assuming the origin matches).
-// If Jar is nil, the initial cookies are forwarded without change.
-//
-type Client struct {
- // Transport specifies the mechanism by which individual
- // HTTP requests are made.
- // If nil, DefaultTransport is used.
- Transport RoundTripper
-
- // CheckRedirect specifies the policy for handling redirects.
- // If CheckRedirect is not nil, the client calls it before
- // following an HTTP redirect. The arguments req and via are
- // the upcoming request and the requests made already, oldest
- // first. If CheckRedirect returns an error, the Client's Get
- // method returns both the previous Response (with its Body
- // closed) and CheckRedirect's error (wrapped in a url.Error)
- // instead of issuing the Request req.
- // As a special case, if CheckRedirect returns ErrUseLastResponse,
- // then the most recent response is returned with its body
- // unclosed, along with a nil error.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect func(req *Request, via []*Request) error
-
- // Jar specifies the cookie jar.
- //
- // The Jar is used to insert relevant cookies into every
- // outbound Request and is updated with the cookie values
- // of every inbound Response. The Jar is consulted for every
- // redirect that the Client follows.
- //
- // If Jar is nil, cookies are only sent if they are explicitly
- // set on the Request.
- Jar CookieJar
-
- // Timeout specifies a time limit for requests made by this
- // Client. The timeout includes connection time, any
- // redirects, and reading the response body. The timer remains
- // running after Get, Head, Post, or Do return and will
- // interrupt reading of the Response.Body.
- //
- // A Timeout of zero means no timeout.
- //
- // The Client cancels requests to the underlying Transport
- // as if the Request's Context ended.
- //
- // For compatibility, the Client will also use the deprecated
- // CancelRequest method on Transport if found. New
- // RoundTripper implementations should use the Request's Context
- // for cancellation instead of implementing CancelRequest.
- Timeout time.Duration
-}
-
-// DefaultClient is the default Client and is used by Get, Head, and Post.
-var DefaultClient = &Client{}
-
-// RoundTripper is an interface representing the ability to execute a
-// single HTTP transaction, obtaining the Response for a given Request.
-//
-// A RoundTripper must be safe for concurrent use by multiple
-// goroutines.
-type RoundTripper interface {
- // RoundTrip executes a single HTTP transaction, returning
- // a Response for the provided Request.
- //
- // RoundTrip should not attempt to interpret the response. In
- // particular, RoundTrip must return err == nil if it obtained
- // a response, regardless of the response's HTTP status code.
- // A non-nil err should be reserved for failure to obtain a
- // response. Similarly, RoundTrip should not attempt to
- // handle higher-level protocol details such as redirects,
- // authentication, or cookies.
- //
- // RoundTrip should not modify the request, except for
- // consuming and closing the Request's Body. RoundTrip may
- // read fields of the request in a separate goroutine. Callers
- // should not mutate or reuse the request until the Response's
- // Body has been closed.
- //
- // RoundTrip must always close the body, including on errors,
- // but depending on the implementation may do so in a separate
- // goroutine even after RoundTrip returns. This means that
- // callers wanting to reuse the body for subsequent requests
- // must arrange to wait for the Close call before doing so.
- //
- // The Request's URL and Header fields must be initialized.
- RoundTrip(*Request) (*Response, error)
-}
-
-// refererForURL returns a referer without any authentication info or
-// an empty string if lastReq scheme is https and newReq scheme is http.
-func refererForURL(lastReq, newReq *url.URL) string {
- // https://tools.ietf.org/html/rfc7231#section-5.5.2
- // "Clients SHOULD NOT include a Referer header field in a
- // (non-secure) HTTP request if the referring page was
- // transferred with a secure protocol."
- if lastReq.Scheme == "https" && newReq.Scheme == "http" {
- return ""
- }
- referer := lastReq.String()
- if lastReq.User != nil {
- // This is not very efficient, but is the best we can
- // do without:
- // - introducing a new method on URL
- // - creating a race condition
- // - copying the URL struct manually, which would cause
- // maintenance problems down the line
- auth := lastReq.User.String() + "@"
- referer = strings.Replace(referer, auth, "", 1)
- }
- return referer
-}
-
-// didTimeout is non-nil only if err != nil.
-func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) {
- if c.Jar != nil {
- for _, cookie := range c.Jar.Cookies(req.URL) {
- req.AddCookie(cookie)
- }
- }
- resp, didTimeout, err = send(req, c.transport(), deadline)
- if err != nil {
- return nil, didTimeout, err
- }
- if c.Jar != nil {
- if rc := resp.Cookies(); len(rc) > 0 {
- c.Jar.SetCookies(req.URL, rc)
- }
- }
- return resp, nil, nil
-}
-
-func (c *Client) deadline() time.Time {
- if c.Timeout > 0 {
- return time.Now().Add(c.Timeout)
- }
- return time.Time{}
-}
-
-func (c *Client) transport() RoundTripper {
- if c.Transport != nil {
- return c.Transport
- }
- return DefaultTransport
-}
-
-// send issues an HTTP request.
-// Caller should close resp.Body when done reading from it.
-func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, didTimeout func() bool, err error) {
- req := ireq // req is either the original request, or a modified fork
-
- if rt == nil {
- req.closeBody()
- return nil, alwaysFalse, errors.New("http: no Client.Transport or DefaultTransport")
- }
-
- if req.URL == nil {
- req.closeBody()
- return nil, alwaysFalse, errors.New("http: nil Request.URL")
- }
-
- if req.RequestURI != "" {
- req.closeBody()
- return nil, alwaysFalse, errors.New("http: Request.RequestURI can't be set in client requests")
- }
-
- // forkReq forks req into a shallow clone of ireq the first
- // time it's called.
- forkReq := func() {
- if ireq == req {
- req = new(Request)
- *req = *ireq // shallow clone
- }
- }
-
- // Most the callers of send (Get, Post, et al) don't need
- // Headers, leaving it uninitialized. We guarantee to the
- // Transport that this has been initialized, though.
- if req.Header == nil {
- forkReq()
- req.Header = make(Header)
- }
-
- if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" {
- username := u.Username()
- password, _ := u.Password()
- forkReq()
- req.Header = cloneOrMakeHeader(ireq.Header)
- req.Header.Set("Authorization", "Basic "+basicAuth(username, password))
- }
-
- if !deadline.IsZero() {
- forkReq()
- }
- stopTimer, didTimeout := setRequestCancel(req, rt, deadline)
-
- resp, err = rt.RoundTrip(req)
- if err != nil {
- stopTimer()
- if resp != nil {
- log.Printf("RoundTripper returned a response & error; ignoring response")
- }
- if tlsErr, ok := err.(tls.RecordHeaderError); ok {
- // If we get a bad TLS record header, check to see if the
- // response looks like HTTP and give a more helpful error.
- // See golang.org/issue/11111.
- if string(tlsErr.RecordHeader[:]) == "HTTP/" {
- err = errors.New("http: server gave HTTP response to HTTPS client")
- }
- }
- return nil, didTimeout, err
- }
- if resp == nil {
- return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt)
- }
- if resp.Body == nil {
- // The documentation on the Body field says “The http Client and Transport
- // guarantee that Body is always non-nil, even on responses without a body
- // or responses with a zero-length body.” Unfortunately, we didn't document
- // that same constraint for arbitrary RoundTripper implementations, and
- // RoundTripper implementations in the wild (mostly in tests) assume that
- // they can use a nil Body to mean an empty one (similar to Request.Body).
- // (See https://golang.org/issue/38095.)
- //
- // If the ContentLength allows the Body to be empty, fill in an empty one
- // here to ensure that it is non-nil.
- if resp.ContentLength > 0 && req.Method != "HEAD" {
- return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with content length %d but a nil Body", rt, resp.ContentLength)
- }
- resp.Body = io.NopCloser(strings.NewReader(""))
- }
- if !deadline.IsZero() {
- resp.Body = &cancelTimerBody{
- stop: stopTimer,
- rc: resp.Body,
- reqDidTimeout: didTimeout,
- }
- }
- return resp, nil, nil
-}
-
-// timeBeforeContextDeadline reports whether the non-zero Time t is
-// before ctx's deadline, if any. If ctx does not have a deadline, it
-// always reports true (the deadline is considered infinite).
-func timeBeforeContextDeadline(t time.Time, ctx context.Context) bool {
- d, ok := ctx.Deadline()
- if !ok {
- return true
- }
- return t.Before(d)
-}
-
-// knownRoundTripperImpl reports whether rt is a RoundTripper that's
-// maintained by the Go team and known to implement the latest
-// optional semantics (notably contexts). The Request is used
-// to check whether this particular request is using an alternate protocol,
-// in which case we need to check the RoundTripper for that protocol.
-func knownRoundTripperImpl(rt RoundTripper, req *Request) bool {
- switch t := rt.(type) {
- case *Transport:
- if altRT := t.alternateRoundTripper(req); altRT != nil {
- return knownRoundTripperImpl(altRT, req)
- }
- return true
- case *http2Transport, http2noDialH2RoundTripper:
- return true
- }
- // There's a very minor chance of a false positive with this.
- // Instead of detecting our golang.org/x/net/http2.Transport,
- // it might detect a Transport type in a different http2
- // package. But I know of none, and the only problem would be
- // some temporarily leaked goroutines if the transport didn't
- // support contexts. So this is a good enough heuristic:
- if reflect.TypeOf(rt).String() == "*http2.Transport" {
- return true
- }
- return false
-}
-
-// setRequestCancel sets req.Cancel and adds a deadline context to req
-// if deadline is non-zero. The RoundTripper's type is used to
-// determine whether the legacy CancelRequest behavior should be used.
-//
-// As background, there are three ways to cancel a request:
-// First was Transport.CancelRequest. (deprecated)
-// Second was Request.Cancel.
-// Third was Request.Context.
-// This function populates the second and third, and uses the first if it really needs to.
-func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTimer func(), didTimeout func() bool) {
- if deadline.IsZero() {
- return nop, alwaysFalse
- }
- knownTransport := knownRoundTripperImpl(rt, req)
- oldCtx := req.Context()
-
- if req.Cancel == nil && knownTransport {
- // If they already had a Request.Context that's
- // expiring sooner, do nothing:
- if !timeBeforeContextDeadline(deadline, oldCtx) {
- return nop, alwaysFalse
- }
-
- var cancelCtx func()
- req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline)
- return cancelCtx, func() bool { return time.Now().After(deadline) }
- }
- initialReqCancel := req.Cancel // the user's original Request.Cancel, if any
-
- var cancelCtx func()
- if oldCtx := req.Context(); timeBeforeContextDeadline(deadline, oldCtx) {
- req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline)
- }
-
- cancel := make(chan struct{})
- req.Cancel = cancel
-
- doCancel := func() {
- // The second way in the func comment above:
- close(cancel)
- // The first way, used only for RoundTripper
- // implementations written before Go 1.5 or Go 1.6.
- type canceler interface{ CancelRequest(*Request) }
- if v, ok := rt.(canceler); ok {
- v.CancelRequest(req)
- }
- }
-
- stopTimerCh := make(chan struct{})
- var once sync.Once
- stopTimer = func() {
- once.Do(func() {
- close(stopTimerCh)
- if cancelCtx != nil {
- cancelCtx()
- }
- })
- }
-
- timer := time.NewTimer(time.Until(deadline))
- var timedOut atomicBool
-
- go func() {
- select {
- case <-initialReqCancel:
- doCancel()
- timer.Stop()
- case <-timer.C:
- timedOut.setTrue()
- doCancel()
- case <-stopTimerCh:
- timer.Stop()
- }
- }()
-
- return stopTimer, timedOut.isSet
-}
-
-// See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt
-// "To receive authorization, the client sends the userid and password,
-// separated by a single colon (":") character, within a base64
-// encoded string in the credentials."
-// It is not meant to be urlencoded.
-func basicAuth(username, password string) string {
- auth := username + ":" + password
- return base64.StdEncoding.EncodeToString([]byte(auth))
-}
-
-// Get issues a GET to the specified URL. If the response is one of
-// the following redirect codes, Get follows the redirect, up to a
-// maximum of 10 redirects:
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-// 308 (Permanent Redirect)
-//
-// An error is returned if there were too many redirects or if there
-// was an HTTP protocol error. A non-2xx response doesn't cause an
-// error. Any returned error will be of type *url.Error. The url.Error
-// value's Timeout method will report true if the request timed out.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-//
-// Get is a wrapper around DefaultClient.Get.
-//
-// To make a request with custom headers, use NewRequest and
-// DefaultClient.Do.
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and DefaultClient.Do.
-func Get(url string) (resp *Response, err error) {
- return DefaultClient.Get(url)
-}
-
-// Get issues a GET to the specified URL. If the response is one of the
-// following redirect codes, Get follows the redirect after calling the
-// Client's CheckRedirect function:
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-// 308 (Permanent Redirect)
-//
-// An error is returned if the Client's CheckRedirect function fails
-// or if there was an HTTP protocol error. A non-2xx response doesn't
-// cause an error. Any returned error will be of type *url.Error. The
-// url.Error value's Timeout method will report true if the request
-// timed out.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-//
-// To make a request with custom headers, use NewRequest and Client.Do.
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and Client.Do.
-func (c *Client) Get(url string) (resp *Response, err error) {
- req, err := NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return c.Do(req)
-}
-
-func alwaysFalse() bool { return false }
-
-// ErrUseLastResponse can be returned by Client.CheckRedirect hooks to
-// control how redirects are processed. If returned, the next request
-// is not sent and the most recent response is returned with its body
-// unclosed.
-var ErrUseLastResponse = errors.New("net/http: use last response")
-
-// checkRedirect calls either the user's configured CheckRedirect
-// function, or the default.
-func (c *Client) checkRedirect(req *Request, via []*Request) error {
- fn := c.CheckRedirect
- if fn == nil {
- fn = defaultCheckRedirect
- }
- return fn(req, via)
-}
-
-// redirectBehavior describes what should happen when the
-// client encounters a 3xx status code from the server
-func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) {
- switch resp.StatusCode {
- case 301, 302, 303:
- redirectMethod = reqMethod
- shouldRedirect = true
- includeBody = false
-
- // RFC 2616 allowed automatic redirection only with GET and
- // HEAD requests. RFC 7231 lifts this restriction, but we still
- // restrict other methods to GET to maintain compatibility.
- // See Issue 18570.
- if reqMethod != "GET" && reqMethod != "HEAD" {
- redirectMethod = "GET"
- }
- case 307, 308:
- redirectMethod = reqMethod
- shouldRedirect = true
- includeBody = true
-
- // Treat 307 and 308 specially, since they're new in
- // Go 1.8, and they also require re-sending the request body.
- if resp.Header.Get("Location") == "" {
- // 308s have been observed in the wild being served
- // without Location headers. Since Go 1.7 and earlier
- // didn't follow these codes, just stop here instead
- // of returning an error.
- // See Issue 17773.
- shouldRedirect = false
- break
- }
- if ireq.GetBody == nil && ireq.outgoingLength() != 0 {
- // We had a request body, and 307/308 require
- // re-sending it, but GetBody is not defined. So just
- // return this response to the user instead of an
- // error, like we did in Go 1.7 and earlier.
- shouldRedirect = false
- }
- }
- return redirectMethod, shouldRedirect, includeBody
-}
-
-// urlErrorOp returns the (*url.Error).Op value to use for the
-// provided (*Request).Method value.
-func urlErrorOp(method string) string {
- if method == "" {
- return "Get"
- }
- if lowerMethod, ok := ascii.ToLower(method); ok {
- return method[:1] + lowerMethod[1:]
- }
- return method
-}
-
-// Do sends an HTTP request and returns an HTTP response, following
-// policy (such as redirects, cookies, auth) as configured on the
-// client.
-//
-// An error is returned if caused by client policy (such as
-// CheckRedirect), or failure to speak HTTP (such as a network
-// connectivity problem). A non-2xx status code doesn't cause an
-// error.
-//
-// If the returned error is nil, the Response will contain a non-nil
-// Body which the user is expected to close. If the Body is not both
-// read to EOF and closed, the Client's underlying RoundTripper
-// (typically Transport) may not be able to re-use a persistent TCP
-// connection to the server for a subsequent "keep-alive" request.
-//
-// The request Body, if non-nil, will be closed by the underlying
-// Transport, even on errors.
-//
-// On error, any Response can be ignored. A non-nil Response with a
-// non-nil error only occurs when CheckRedirect fails, and even then
-// the returned Response.Body is already closed.
-//
-// Generally Get, Post, or PostForm will be used instead of Do.
-//
-// If the server replies with a redirect, the Client first uses the
-// CheckRedirect function to determine whether the redirect should be
-// followed. If permitted, a 301, 302, or 303 redirect causes
-// subsequent requests to use HTTP method GET
-// (or HEAD if the original request was HEAD), with no body.
-// A 307 or 308 redirect preserves the original HTTP method and body,
-// provided that the Request.GetBody function is defined.
-// The NewRequest function automatically sets GetBody for common
-// standard library body types.
-//
-// Any returned error will be of type *url.Error. The url.Error
-// value's Timeout method will report true if the request timed out.
-func (c *Client) Do(req *Request) (*Response, error) {
- return c.do(req)
-}
-
-var testHookClientDoResult func(retres *Response, reterr error)
-
-func (c *Client) do(req *Request) (retres *Response, reterr error) {
- if testHookClientDoResult != nil {
- defer func() { testHookClientDoResult(retres, reterr) }()
- }
- if req.URL == nil {
- req.closeBody()
- return nil, &url.Error{
- Op: urlErrorOp(req.Method),
- Err: errors.New("http: nil Request.URL"),
- }
- }
-
- var (
- deadline = c.deadline()
- reqs []*Request
- resp *Response
- copyHeaders = c.makeHeadersCopier(req)
- reqBodyClosed = false // have we closed the current req.Body?
-
- // Redirect behavior:
- redirectMethod string
- includeBody bool
- )
- uerr := func(err error) error {
- // the body may have been closed already by c.send()
- if !reqBodyClosed {
- req.closeBody()
- }
- var urlStr string
- if resp != nil && resp.Request != nil {
- urlStr = stripPassword(resp.Request.URL)
- } else {
- urlStr = stripPassword(req.URL)
- }
- return &url.Error{
- Op: urlErrorOp(reqs[0].Method),
- URL: urlStr,
- Err: err,
- }
- }
- for {
- // For all but the first request, create the next
- // request hop and replace req.
- if len(reqs) > 0 {
- loc := resp.Header.Get("Location")
- if loc == "" {
- resp.closeBody()
- return nil, uerr(fmt.Errorf("%d response missing Location header", resp.StatusCode))
- }
- u, err := req.URL.Parse(loc)
- if err != nil {
- resp.closeBody()
- return nil, uerr(fmt.Errorf("failed to parse Location header %q: %v", loc, err))
- }
- host := ""
- if req.Host != "" && req.Host != req.URL.Host {
- // If the caller specified a custom Host header and the
- // redirect location is relative, preserve the Host header
- // through the redirect. See issue #22233.
- if u, _ := url.Parse(loc); u != nil && !u.IsAbs() {
- host = req.Host
- }
- }
- ireq := reqs[0]
- req = &Request{
- Method: redirectMethod,
- Response: resp,
- URL: u,
- Header: make(Header),
- Host: host,
- Cancel: ireq.Cancel,
- ctx: ireq.ctx,
- }
- if includeBody && ireq.GetBody != nil {
- req.Body, err = ireq.GetBody()
- if err != nil {
- resp.closeBody()
- return nil, uerr(err)
- }
- req.ContentLength = ireq.ContentLength
- }
-
- // Copy original headers before setting the Referer,
- // in case the user set Referer on their first request.
- // If they really want to override, they can do it in
- // their CheckRedirect func.
- copyHeaders(req)
-
- // Add the Referer header from the most recent
- // request URL to the new one, if it's not https->http:
- if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL); ref != "" {
- req.Header.Set("Referer", ref)
- }
- err = c.checkRedirect(req, reqs)
-
- // Sentinel error to let users select the
- // previous response, without closing its
- // body. See Issue 10069.
- if err == ErrUseLastResponse {
- return resp, nil
- }
-
- // Close the previous response's body. But
- // read at least some of the body so if it's
- // small the underlying TCP connection will be
- // re-used. No need to check for errors: if it
- // fails, the Transport won't reuse it anyway.
- const maxBodySlurpSize = 2 << 10
- if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
- io.CopyN(io.Discard, resp.Body, maxBodySlurpSize)
- }
- resp.Body.Close()
-
- if err != nil {
- // Special case for Go 1 compatibility: return both the response
- // and an error if the CheckRedirect function failed.
- // See https://golang.org/issue/3795
- // The resp.Body has already been closed.
- ue := uerr(err)
- ue.(*url.Error).URL = loc
- return resp, ue
- }
- }
-
- reqs = append(reqs, req)
- var err error
- var didTimeout func() bool
- if resp, didTimeout, err = c.send(req, deadline); err != nil {
- // c.send() always closes req.Body
- reqBodyClosed = true
- if !deadline.IsZero() && didTimeout() {
- err = &httpError{
- err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
- timeout: true,
- }
- }
- return nil, uerr(err)
- }
-
- var shouldRedirect bool
- redirectMethod, shouldRedirect, includeBody = redirectBehavior(req.Method, resp, reqs[0])
- if !shouldRedirect {
- return resp, nil
- }
-
- req.closeBody()
- }
-}
-
-// makeHeadersCopier makes a function that copies headers from the
-// initial Request, ireq. For every redirect, this function must be called
-// so that it can copy headers into the upcoming Request.
-func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) {
- // The headers to copy are from the very initial request.
- // We use a closured callback to keep a reference to these original headers.
- var (
- ireqhdr = cloneOrMakeHeader(ireq.Header)
- icookies map[string][]*Cookie
- )
- if c.Jar != nil && ireq.Header.Get("Cookie") != "" {
- icookies = make(map[string][]*Cookie)
- for _, c := range ireq.Cookies() {
- icookies[c.Name] = append(icookies[c.Name], c)
- }
- }
-
- preq := ireq // The previous request
- return func(req *Request) {
- // If Jar is present and there was some initial cookies provided
- // via the request header, then we may need to alter the initial
- // cookies as we follow redirects since each redirect may end up
- // modifying a pre-existing cookie.
- //
- // Since cookies already set in the request header do not contain
- // information about the original domain and path, the logic below
- // assumes any new set cookies override the original cookie
- // regardless of domain or path.
- //
- // See https://golang.org/issue/17494
- if c.Jar != nil && icookies != nil {
- var changed bool
- resp := req.Response // The response that caused the upcoming redirect
- for _, c := range resp.Cookies() {
- if _, ok := icookies[c.Name]; ok {
- delete(icookies, c.Name)
- changed = true
- }
- }
- if changed {
- ireqhdr.Del("Cookie")
- var ss []string
- for _, cs := range icookies {
- for _, c := range cs {
- ss = append(ss, c.Name+"="+c.Value)
- }
- }
- sort.Strings(ss) // Ensure deterministic headers
- ireqhdr.Set("Cookie", strings.Join(ss, "; "))
- }
- }
-
- // Copy the initial request's Header values
- // (at least the safe ones).
- for k, vv := range ireqhdr {
- if shouldCopyHeaderOnRedirect(k, preq.URL, req.URL) {
- req.Header[k] = vv
- }
- }
-
- preq = req // Update previous Request with the current request
- }
-}
-
-func defaultCheckRedirect(req *Request, via []*Request) error {
- if len(via) >= 10 {
- return errors.New("stopped after 10 redirects")
- }
- return nil
-}
-
-// Post issues a POST to the specified URL.
-//
-// Caller should close resp.Body when done reading from it.
-//
-// If the provided body is an io.Closer, it is closed after the
-// request.
-//
-// Post is a wrapper around DefaultClient.Post.
-//
-// To set custom headers, use NewRequest and DefaultClient.Do.
-//
-// See the Client.Do method documentation for details on how redirects
-// are handled.
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and DefaultClient.Do.
-func Post(url, contentType string, body io.Reader) (resp *Response, err error) {
- return DefaultClient.Post(url, contentType, body)
-}
-
-// Post issues a POST to the specified URL.
-//
-// Caller should close resp.Body when done reading from it.
-//
-// If the provided body is an io.Closer, it is closed after the
-// request.
-//
-// To set custom headers, use NewRequest and Client.Do.
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and Client.Do.
-//
-// See the Client.Do method documentation for details on how redirects
-// are handled.
-func (c *Client) Post(url, contentType string, body io.Reader) (resp *Response, err error) {
- req, err := NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", contentType)
- return c.Do(req)
-}
-
-// PostForm issues a POST to the specified URL, with data's keys and
-// values URL-encoded as the request body.
-//
-// The Content-Type header is set to application/x-www-form-urlencoded.
-// To set other headers, use NewRequest and DefaultClient.Do.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-//
-// PostForm is a wrapper around DefaultClient.PostForm.
-//
-// See the Client.Do method documentation for details on how redirects
-// are handled.
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and DefaultClient.Do.
-func PostForm(url string, data url.Values) (resp *Response, err error) {
- return DefaultClient.PostForm(url, data)
-}
-
-// PostForm issues a POST to the specified URL,
-// with data's keys and values URL-encoded as the request body.
-//
-// The Content-Type header is set to application/x-www-form-urlencoded.
-// To set other headers, use NewRequest and Client.Do.
-//
-// When err is nil, resp always contains a non-nil resp.Body.
-// Caller should close resp.Body when done reading from it.
-//
-// See the Client.Do method documentation for details on how redirects
-// are handled.
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and Client.Do.
-func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) {
- return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-// Head issues a HEAD to the specified URL. If the response is one of
-// the following redirect codes, Head follows the redirect, up to a
-// maximum of 10 redirects:
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-// 308 (Permanent Redirect)
-//
-// Head is a wrapper around DefaultClient.Head
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and DefaultClient.Do.
-func Head(url string) (resp *Response, err error) {
- return DefaultClient.Head(url)
-}
-
-// Head issues a HEAD to the specified URL. If the response is one of the
-// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function:
-//
-// 301 (Moved Permanently)
-// 302 (Found)
-// 303 (See Other)
-// 307 (Temporary Redirect)
-// 308 (Permanent Redirect)
-//
-// To make a request with a specified context.Context, use NewRequestWithContext
-// and Client.Do.
-func (c *Client) Head(url string) (resp *Response, err error) {
- req, err := NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return c.Do(req)
-}
-
-// CloseIdleConnections closes any connections on its Transport which
-// were previously connected from previous requests but are now
-// sitting idle in a "keep-alive" state. It does not interrupt any
-// connections currently in use.
-//
-// If the Client's Transport does not have a CloseIdleConnections method
-// then this method does nothing.
-func (c *Client) CloseIdleConnections() {
- type closeIdler interface {
- CloseIdleConnections()
- }
- if tr, ok := c.transport().(closeIdler); ok {
- tr.CloseIdleConnections()
- }
-}
-
-// cancelTimerBody is an io.ReadCloser that wraps rc with two features:
-// 1) On Read error or close, the stop func is called.
-// 2) On Read failure, if reqDidTimeout is true, the error is wrapped and
-// marked as net.Error that hit its timeout.
-type cancelTimerBody struct {
- stop func() // stops the time.Timer waiting to cancel the request
- rc io.ReadCloser
- reqDidTimeout func() bool
-}
-
-func (b *cancelTimerBody) Read(p []byte) (n int, err error) {
- n, err = b.rc.Read(p)
- if err == nil {
- return n, nil
- }
- if err == io.EOF {
- return n, err
- }
- if b.reqDidTimeout() {
- err = &httpError{
- err: err.Error() + " (Client.Timeout or context cancellation while reading body)",
- timeout: true,
- }
- }
- return n, err
-}
-
-func (b *cancelTimerBody) Close() error {
- err := b.rc.Close()
- b.stop()
- return err
-}
-
-func shouldCopyHeaderOnRedirect(headerKey string, initial, dest *url.URL) bool {
- switch CanonicalHeaderKey(headerKey) {
- case "Authorization", "Www-Authenticate", "Cookie", "Cookie2":
- // Permit sending auth/cookie headers from "foo.com"
- // to "sub.foo.com".
-
- // Note that we don't send all cookies to subdomains
- // automatically. This function is only used for
- // Cookies set explicitly on the initial outgoing
- // client request. Cookies automatically added via the
- // CookieJar mechanism continue to follow each
- // cookie's scope as set by Set-Cookie. But for
- // outgoing requests with the Cookie header set
- // directly, we don't know their scope, so we assume
- // it's for *.domain.com.
-
- ihost := canonicalAddr(initial)
- dhost := canonicalAddr(dest)
- return isDomainOrSubdomain(dhost, ihost)
- }
- // All other headers are copied:
- return true
-}
-
-// isDomainOrSubdomain reports whether sub is a subdomain (or exact
-// match) of the parent domain.
-//
-// Both domains must already be in canonical form.
-func isDomainOrSubdomain(sub, parent string) bool {
- if sub == parent {
- return true
- }
- // If sub is "foo.example.com" and parent is "example.com",
- // that means sub must end in "."+parent.
- // Do it without allocating.
- if !strings.HasSuffix(sub, parent) {
- return false
- }
- return sub[len(sub)-len(parent)-1] == '.'
-}
-
-func stripPassword(u *url.URL) string {
- _, passSet := u.User.Password()
- if passSet {
- return strings.Replace(u.String(), u.User.String()+"@", u.User.Username()+":***@", 1)
- }
- return u.String()
-}
diff --git a/contrib/go/_std_1.18/src/net/http/cookie.go b/contrib/go/_std_1.18/src/net/http/cookie.go
deleted file mode 100644
index cb37f2351f..0000000000
--- a/contrib/go/_std_1.18/src/net/http/cookie.go
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "errors"
- "fmt"
- "log"
- "net"
- "net/http/internal/ascii"
- "net/textproto"
- "strconv"
- "strings"
- "time"
-)
-
-// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
-// HTTP response or the Cookie header of an HTTP request.
-//
-// See https://tools.ietf.org/html/rfc6265 for details.
-type Cookie struct {
- Name string
- Value string
-
- Path string // optional
- Domain string // optional
- Expires time.Time // optional
- RawExpires string // for reading cookies only
-
- // MaxAge=0 means no 'Max-Age' attribute specified.
- // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
- // MaxAge>0 means Max-Age attribute present and given in seconds
- MaxAge int
- Secure bool
- HttpOnly bool
- SameSite SameSite
- Raw string
- Unparsed []string // Raw text of unparsed attribute-value pairs
-}
-
-// SameSite allows a server to define a cookie attribute making it impossible for
-// the browser to send this cookie along with cross-site requests. The main
-// goal is to mitigate the risk of cross-origin information leakage, and provide
-// some protection against cross-site request forgery attacks.
-//
-// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details.
-type SameSite int
-
-const (
- SameSiteDefaultMode SameSite = iota + 1
- SameSiteLaxMode
- SameSiteStrictMode
- SameSiteNoneMode
-)
-
-// readSetCookies parses all "Set-Cookie" values from
-// the header h and returns the successfully parsed Cookies.
-func readSetCookies(h Header) []*Cookie {
- cookieCount := len(h["Set-Cookie"])
- if cookieCount == 0 {
- return []*Cookie{}
- }
- cookies := make([]*Cookie, 0, cookieCount)
- for _, line := range h["Set-Cookie"] {
- parts := strings.Split(textproto.TrimString(line), ";")
- if len(parts) == 1 && parts[0] == "" {
- continue
- }
- parts[0] = textproto.TrimString(parts[0])
- name, value, ok := strings.Cut(parts[0], "=")
- if !ok {
- continue
- }
- if !isCookieNameValid(name) {
- continue
- }
- value, ok = parseCookieValue(value, true)
- if !ok {
- continue
- }
- c := &Cookie{
- Name: name,
- Value: value,
- Raw: line,
- }
- for i := 1; i < len(parts); i++ {
- parts[i] = textproto.TrimString(parts[i])
- if len(parts[i]) == 0 {
- continue
- }
-
- attr, val, _ := strings.Cut(parts[i], "=")
- lowerAttr, isASCII := ascii.ToLower(attr)
- if !isASCII {
- continue
- }
- val, ok = parseCookieValue(val, false)
- if !ok {
- c.Unparsed = append(c.Unparsed, parts[i])
- continue
- }
-
- switch lowerAttr {
- case "samesite":
- lowerVal, ascii := ascii.ToLower(val)
- if !ascii {
- c.SameSite = SameSiteDefaultMode
- continue
- }
- switch lowerVal {
- case "lax":
- c.SameSite = SameSiteLaxMode
- case "strict":
- c.SameSite = SameSiteStrictMode
- case "none":
- c.SameSite = SameSiteNoneMode
- default:
- c.SameSite = SameSiteDefaultMode
- }
- continue
- case "secure":
- c.Secure = true
- continue
- case "httponly":
- c.HttpOnly = true
- continue
- case "domain":
- c.Domain = val
- continue
- case "max-age":
- secs, err := strconv.Atoi(val)
- if err != nil || secs != 0 && val[0] == '0' {
- break
- }
- if secs <= 0 {
- secs = -1
- }
- c.MaxAge = secs
- continue
- case "expires":
- c.RawExpires = val
- exptime, err := time.Parse(time.RFC1123, val)
- if err != nil {
- exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
- if err != nil {
- c.Expires = time.Time{}
- break
- }
- }
- c.Expires = exptime.UTC()
- continue
- case "path":
- c.Path = val
- continue
- }
- c.Unparsed = append(c.Unparsed, parts[i])
- }
- cookies = append(cookies, c)
- }
- return cookies
-}
-
-// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
-// The provided cookie must have a valid Name. Invalid cookies may be
-// silently dropped.
-func SetCookie(w ResponseWriter, cookie *Cookie) {
- if v := cookie.String(); v != "" {
- w.Header().Add("Set-Cookie", v)
- }
-}
-
-// String returns the serialization of the cookie for use in a Cookie
-// header (if only Name and Value are set) or a Set-Cookie response
-// header (if other fields are set).
-// If c is nil or c.Name is invalid, the empty string is returned.
-func (c *Cookie) String() string {
- if c == nil || !isCookieNameValid(c.Name) {
- return ""
- }
- // extraCookieLength derived from typical length of cookie attributes
- // see RFC 6265 Sec 4.1.
- const extraCookieLength = 110
- var b strings.Builder
- b.Grow(len(c.Name) + len(c.Value) + len(c.Domain) + len(c.Path) + extraCookieLength)
- b.WriteString(c.Name)
- b.WriteRune('=')
- b.WriteString(sanitizeCookieValue(c.Value))
-
- if len(c.Path) > 0 {
- b.WriteString("; Path=")
- b.WriteString(sanitizeCookiePath(c.Path))
- }
- if len(c.Domain) > 0 {
- if validCookieDomain(c.Domain) {
- // A c.Domain containing illegal characters is not
- // sanitized but simply dropped which turns the cookie
- // into a host-only cookie. A leading dot is okay
- // but won't be sent.
- d := c.Domain
- if d[0] == '.' {
- d = d[1:]
- }
- b.WriteString("; Domain=")
- b.WriteString(d)
- } else {
- log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", c.Domain)
- }
- }
- var buf [len(TimeFormat)]byte
- if validCookieExpires(c.Expires) {
- b.WriteString("; Expires=")
- b.Write(c.Expires.UTC().AppendFormat(buf[:0], TimeFormat))
- }
- if c.MaxAge > 0 {
- b.WriteString("; Max-Age=")
- b.Write(strconv.AppendInt(buf[:0], int64(c.MaxAge), 10))
- } else if c.MaxAge < 0 {
- b.WriteString("; Max-Age=0")
- }
- if c.HttpOnly {
- b.WriteString("; HttpOnly")
- }
- if c.Secure {
- b.WriteString("; Secure")
- }
- switch c.SameSite {
- case SameSiteDefaultMode:
- // Skip, default mode is obtained by not emitting the attribute.
- case SameSiteNoneMode:
- b.WriteString("; SameSite=None")
- case SameSiteLaxMode:
- b.WriteString("; SameSite=Lax")
- case SameSiteStrictMode:
- b.WriteString("; SameSite=Strict")
- }
- return b.String()
-}
-
-// Valid reports whether the cookie is valid.
-func (c *Cookie) Valid() error {
- if c == nil {
- return errors.New("http: nil Cookie")
- }
- if !isCookieNameValid(c.Name) {
- return errors.New("http: invalid Cookie.Name")
- }
- if !validCookieExpires(c.Expires) {
- return errors.New("http: invalid Cookie.Expires")
- }
- for i := 0; i < len(c.Value); i++ {
- if !validCookieValueByte(c.Value[i]) {
- return fmt.Errorf("http: invalid byte %q in Cookie.Value", c.Value[i])
- }
- }
- if len(c.Path) > 0 {
- for i := 0; i < len(c.Path); i++ {
- if !validCookiePathByte(c.Path[i]) {
- return fmt.Errorf("http: invalid byte %q in Cookie.Path", c.Path[i])
- }
- }
- }
- if len(c.Domain) > 0 {
- if !validCookieDomain(c.Domain) {
- return errors.New("http: invalid Cookie.Domain")
- }
- }
- return nil
-}
-
-// readCookies parses all "Cookie" values from the header h and
-// returns the successfully parsed Cookies.
-//
-// if filter isn't empty, only cookies of that name are returned
-func readCookies(h Header, filter string) []*Cookie {
- lines := h["Cookie"]
- if len(lines) == 0 {
- return []*Cookie{}
- }
-
- cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";"))
- for _, line := range lines {
- line = textproto.TrimString(line)
-
- var part string
- for len(line) > 0 { // continue since we have rest
- part, line, _ = strings.Cut(line, ";")
- part = textproto.TrimString(part)
- if part == "" {
- continue
- }
- name, val, _ := strings.Cut(part, "=")
- if !isCookieNameValid(name) {
- continue
- }
- if filter != "" && filter != name {
- continue
- }
- val, ok := parseCookieValue(val, true)
- if !ok {
- continue
- }
- cookies = append(cookies, &Cookie{Name: name, Value: val})
- }
- }
- return cookies
-}
-
-// validCookieDomain reports whether v is a valid cookie domain-value.
-func validCookieDomain(v string) bool {
- if isCookieDomainName(v) {
- return true
- }
- if net.ParseIP(v) != nil && !strings.Contains(v, ":") {
- return true
- }
- return false
-}
-
-// validCookieExpires reports whether v is a valid cookie expires-value.
-func validCookieExpires(t time.Time) bool {
- // IETF RFC 6265 Section 5.1.1.5, the year must not be less than 1601
- return t.Year() >= 1601
-}
-
-// isCookieDomainName reports whether s is a valid domain name or a valid
-// domain name with a leading dot '.'. It is almost a direct copy of
-// package net's isDomainName.
-func isCookieDomainName(s string) bool {
- if len(s) == 0 {
- return false
- }
- if len(s) > 255 {
- return false
- }
-
- if s[0] == '.' {
- // A cookie a domain attribute may start with a leading dot.
- s = s[1:]
- }
- last := byte('.')
- ok := false // Ok once we've seen a letter.
- partlen := 0
- for i := 0; i < len(s); i++ {
- c := s[i]
- switch {
- default:
- return false
- case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
- // No '_' allowed here (in contrast to package net).
- ok = true
- partlen++
- case '0' <= c && c <= '9':
- // fine
- partlen++
- case c == '-':
- // Byte before dash cannot be dot.
- if last == '.' {
- return false
- }
- partlen++
- case c == '.':
- // Byte before dot cannot be dot, dash.
- if last == '.' || last == '-' {
- return false
- }
- if partlen > 63 || partlen == 0 {
- return false
- }
- partlen = 0
- }
- last = c
- }
- if last == '-' || partlen > 63 {
- return false
- }
-
- return ok
-}
-
-var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
-
-func sanitizeCookieName(n string) string {
- return cookieNameSanitizer.Replace(n)
-}
-
-// sanitizeCookieValue produces a suitable cookie-value from v.
-// https://tools.ietf.org/html/rfc6265#section-4.1.1
-// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
-// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
-// ; US-ASCII characters excluding CTLs,
-// ; whitespace DQUOTE, comma, semicolon,
-// ; and backslash
-// We loosen this as spaces and commas are common in cookie values
-// but we produce a quoted cookie-value if and only if v contains
-// commas or spaces.
-// See https://golang.org/issue/7243 for the discussion.
-func sanitizeCookieValue(v string) string {
- v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v)
- if len(v) == 0 {
- return v
- }
- if strings.ContainsAny(v, " ,") {
- return `"` + v + `"`
- }
- return v
-}
-
-func validCookieValueByte(b byte) bool {
- return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\'
-}
-
-// path-av = "Path=" path-value
-// path-value = <any CHAR except CTLs or ";">
-func sanitizeCookiePath(v string) string {
- return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v)
-}
-
-func validCookiePathByte(b byte) bool {
- return 0x20 <= b && b < 0x7f && b != ';'
-}
-
-func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string {
- ok := true
- for i := 0; i < len(v); i++ {
- if valid(v[i]) {
- continue
- }
- log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName)
- ok = false
- break
- }
- if ok {
- return v
- }
- buf := make([]byte, 0, len(v))
- for i := 0; i < len(v); i++ {
- if b := v[i]; valid(b) {
- buf = append(buf, b)
- }
- }
- return string(buf)
-}
-
-func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) {
- // Strip the quotes, if present.
- if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' {
- raw = raw[1 : len(raw)-1]
- }
- for i := 0; i < len(raw); i++ {
- if !validCookieValueByte(raw[i]) {
- return "", false
- }
- }
- return raw, true
-}
-
-func isCookieNameValid(raw string) bool {
- if raw == "" {
- return false
- }
- return strings.IndexFunc(raw, isNotToken) < 0
-}
diff --git a/contrib/go/_std_1.18/src/net/http/doc.go b/contrib/go/_std_1.18/src/net/http/doc.go
deleted file mode 100644
index ae9b708c69..0000000000
--- a/contrib/go/_std_1.18/src/net/http/doc.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package http provides HTTP client and server implementations.
-
-Get, Head, Post, and PostForm make HTTP (or HTTPS) requests:
-
- resp, err := http.Get("http://example.com/")
- ...
- resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
- ...
- resp, err := http.PostForm("http://example.com/form",
- url.Values{"key": {"Value"}, "id": {"123"}})
-
-The client must close the response body when finished with it:
-
- resp, err := http.Get("http://example.com/")
- if err != nil {
- // handle error
- }
- defer resp.Body.Close()
- body, err := io.ReadAll(resp.Body)
- // ...
-
-For control over HTTP client headers, redirect policy, and other
-settings, create a Client:
-
- client := &http.Client{
- CheckRedirect: redirectPolicyFunc,
- }
-
- resp, err := client.Get("http://example.com")
- // ...
-
- req, err := http.NewRequest("GET", "http://example.com", nil)
- // ...
- req.Header.Add("If-None-Match", `W/"wyzzy"`)
- resp, err := client.Do(req)
- // ...
-
-For control over proxies, TLS configuration, keep-alives,
-compression, and other settings, create a Transport:
-
- tr := &http.Transport{
- MaxIdleConns: 10,
- IdleConnTimeout: 30 * time.Second,
- DisableCompression: true,
- }
- client := &http.Client{Transport: tr}
- resp, err := client.Get("https://example.com")
-
-Clients and Transports are safe for concurrent use by multiple
-goroutines and for efficiency should only be created once and re-used.
-
-ListenAndServe starts an HTTP server with a given address and handler.
-The handler is usually nil, which means to use DefaultServeMux.
-Handle and HandleFunc add handlers to DefaultServeMux:
-
- http.Handle("/foo", fooHandler)
-
- http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
- })
-
- log.Fatal(http.ListenAndServe(":8080", nil))
-
-More control over the server's behavior is available by creating a
-custom Server:
-
- s := &http.Server{
- Addr: ":8080",
- Handler: myHandler,
- ReadTimeout: 10 * time.Second,
- WriteTimeout: 10 * time.Second,
- MaxHeaderBytes: 1 << 20,
- }
- log.Fatal(s.ListenAndServe())
-
-Starting with Go 1.6, the http package has transparent support for the
-HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
-can do so by setting Transport.TLSNextProto (for clients) or
-Server.TLSNextProto (for servers) to a non-nil, empty
-map. Alternatively, the following GODEBUG environment variables are
-currently supported:
-
- GODEBUG=http2client=0 # disable HTTP/2 client support
- GODEBUG=http2server=0 # disable HTTP/2 server support
- GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
- GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
-
-The GODEBUG variables are not covered by Go's API compatibility
-promise. Please report any issues before disabling HTTP/2
-support: https://golang.org/s/http2bug
-
-The http package's Transport and Server both automatically enable
-HTTP/2 support for simple configurations. To enable HTTP/2 for more
-complex configurations, to use lower-level HTTP/2 features, or to use
-a newer version of Go's http2 package, import "golang.org/x/net/http2"
-directly and use its ConfigureTransport and/or ConfigureServer
-functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
-package takes precedence over the net/http package's built-in HTTP/2
-support.
-
-*/
-package http
diff --git a/contrib/go/_std_1.18/src/net/http/filetransport.go b/contrib/go/_std_1.18/src/net/http/filetransport.go
deleted file mode 100644
index 32126d7ec0..0000000000
--- a/contrib/go/_std_1.18/src/net/http/filetransport.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "fmt"
- "io"
-)
-
-// fileTransport implements RoundTripper for the 'file' protocol.
-type fileTransport struct {
- fh fileHandler
-}
-
-// NewFileTransport returns a new RoundTripper, serving the provided
-// FileSystem. The returned RoundTripper ignores the URL host in its
-// incoming requests, as well as most other properties of the
-// request.
-//
-// The typical use case for NewFileTransport is to register the "file"
-// protocol with a Transport, as in:
-//
-// t := &http.Transport{}
-// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
-// c := &http.Client{Transport: t}
-// res, err := c.Get("file:///etc/passwd")
-// ...
-func NewFileTransport(fs FileSystem) RoundTripper {
- return fileTransport{fileHandler{fs}}
-}
-
-func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
- // We start ServeHTTP in a goroutine, which may take a long
- // time if the file is large. The newPopulateResponseWriter
- // call returns a channel which either ServeHTTP or finish()
- // sends our *Response on, once the *Response itself has been
- // populated (even if the body itself is still being
- // written to the res.Body, a pipe)
- rw, resc := newPopulateResponseWriter()
- go func() {
- t.fh.ServeHTTP(rw, req)
- rw.finish()
- }()
- return <-resc, nil
-}
-
-func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
- pr, pw := io.Pipe()
- rw := &populateResponse{
- ch: make(chan *Response),
- pw: pw,
- res: &Response{
- Proto: "HTTP/1.0",
- ProtoMajor: 1,
- Header: make(Header),
- Close: true,
- Body: pr,
- },
- }
- return rw, rw.ch
-}
-
-// populateResponse is a ResponseWriter that populates the *Response
-// in res, and writes its body to a pipe connected to the response
-// body. Once writes begin or finish() is called, the response is sent
-// on ch.
-type populateResponse struct {
- res *Response
- ch chan *Response
- wroteHeader bool
- hasContent bool
- sentResponse bool
- pw *io.PipeWriter
-}
-
-func (pr *populateResponse) finish() {
- if !pr.wroteHeader {
- pr.WriteHeader(500)
- }
- if !pr.sentResponse {
- pr.sendResponse()
- }
- pr.pw.Close()
-}
-
-func (pr *populateResponse) sendResponse() {
- if pr.sentResponse {
- return
- }
- pr.sentResponse = true
-
- if pr.hasContent {
- pr.res.ContentLength = -1
- }
- pr.ch <- pr.res
-}
-
-func (pr *populateResponse) Header() Header {
- return pr.res.Header
-}
-
-func (pr *populateResponse) WriteHeader(code int) {
- if pr.wroteHeader {
- return
- }
- pr.wroteHeader = true
-
- pr.res.StatusCode = code
- pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
-}
-
-func (pr *populateResponse) Write(p []byte) (n int, err error) {
- if !pr.wroteHeader {
- pr.WriteHeader(StatusOK)
- }
- pr.hasContent = true
- if !pr.sentResponse {
- pr.sendResponse()
- }
- return pr.pw.Write(p)
-}
diff --git a/contrib/go/_std_1.18/src/net/http/fs.go b/contrib/go/_std_1.18/src/net/http/fs.go
deleted file mode 100644
index 6caee9ed93..0000000000
--- a/contrib/go/_std_1.18/src/net/http/fs.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP file system request handler
-
-package http
-
-import (
- "errors"
- "fmt"
- "io"
- "io/fs"
- "mime"
- "mime/multipart"
- "net/textproto"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-// A Dir implements FileSystem using the native file system restricted to a
-// specific directory tree.
-//
-// While the FileSystem.Open method takes '/'-separated paths, a Dir's string
-// value is a filename on the native file system, not a URL, so it is separated
-// by filepath.Separator, which isn't necessarily '/'.
-//
-// Note that Dir could expose sensitive files and directories. Dir will follow
-// symlinks pointing out of the directory tree, which can be especially dangerous
-// if serving from a directory in which users are able to create arbitrary symlinks.
-// Dir will also allow access to files and directories starting with a period,
-// which could expose sensitive directories like .git or sensitive files like
-// .htpasswd. To exclude files with a leading period, remove the files/directories
-// from the server or create a custom FileSystem implementation.
-//
-// An empty Dir is treated as ".".
-type Dir string
-
-// mapOpenError maps the provided non-nil error from opening name
-// to a possibly better non-nil error. In particular, it turns OS-specific errors
-// about opening files in non-directories into fs.ErrNotExist. See Issues 18984 and 49552.
-func mapOpenError(originalErr error, name string, sep rune, stat func(string) (fs.FileInfo, error)) error {
- if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) {
- return originalErr
- }
-
- parts := strings.Split(name, string(sep))
- for i := range parts {
- if parts[i] == "" {
- continue
- }
- fi, err := stat(strings.Join(parts[:i+1], string(sep)))
- if err != nil {
- return originalErr
- }
- if !fi.IsDir() {
- return fs.ErrNotExist
- }
- }
- return originalErr
-}
-
-// Open implements FileSystem using os.Open, opening files for reading rooted
-// and relative to the directory d.
-func (d Dir) Open(name string) (File, error) {
- if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) {
- return nil, errors.New("http: invalid character in file path")
- }
- dir := string(d)
- if dir == "" {
- dir = "."
- }
- fullName := filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))
- f, err := os.Open(fullName)
- if err != nil {
- return nil, mapOpenError(err, fullName, filepath.Separator, os.Stat)
- }
- return f, nil
-}
-
-// A FileSystem implements access to a collection of named files.
-// The elements in a file path are separated by slash ('/', U+002F)
-// characters, regardless of host operating system convention.
-// See the FileServer function to convert a FileSystem to a Handler.
-//
-// This interface predates the fs.FS interface, which can be used instead:
-// the FS adapter function converts an fs.FS to a FileSystem.
-type FileSystem interface {
- Open(name string) (File, error)
-}
-
-// A File is returned by a FileSystem's Open method and can be
-// served by the FileServer implementation.
-//
-// The methods should behave the same as those on an *os.File.
-type File interface {
- io.Closer
- io.Reader
- io.Seeker
- Readdir(count int) ([]fs.FileInfo, error)
- Stat() (fs.FileInfo, error)
-}
-
-type anyDirs interface {
- len() int
- name(i int) string
- isDir(i int) bool
-}
-
-type fileInfoDirs []fs.FileInfo
-
-func (d fileInfoDirs) len() int { return len(d) }
-func (d fileInfoDirs) isDir(i int) bool { return d[i].IsDir() }
-func (d fileInfoDirs) name(i int) string { return d[i].Name() }
-
-type dirEntryDirs []fs.DirEntry
-
-func (d dirEntryDirs) len() int { return len(d) }
-func (d dirEntryDirs) isDir(i int) bool { return d[i].IsDir() }
-func (d dirEntryDirs) name(i int) string { return d[i].Name() }
-
-func dirList(w ResponseWriter, r *Request, f File) {
- // Prefer to use ReadDir instead of Readdir,
- // because the former doesn't require calling
- // Stat on every entry of a directory on Unix.
- var dirs anyDirs
- var err error
- if d, ok := f.(fs.ReadDirFile); ok {
- var list dirEntryDirs
- list, err = d.ReadDir(-1)
- dirs = list
- } else {
- var list fileInfoDirs
- list, err = f.Readdir(-1)
- dirs = list
- }
-
- if err != nil {
- logf(r, "http: error reading directory: %v", err)
- Error(w, "Error reading directory", StatusInternalServerError)
- return
- }
- sort.Slice(dirs, func(i, j int) bool { return dirs.name(i) < dirs.name(j) })
-
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- fmt.Fprintf(w, "<pre>\n")
- for i, n := 0, dirs.len(); i < n; i++ {
- name := dirs.name(i)
- if dirs.isDir(i) {
- name += "/"
- }
- // name may contain '?' or '#', which must be escaped to remain
- // part of the URL path, and not indicate the start of a query
- // string or fragment.
- url := url.URL{Path: name}
- fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name))
- }
- fmt.Fprintf(w, "</pre>\n")
-}
-
-// ServeContent replies to the request using the content in the
-// provided ReadSeeker. The main benefit of ServeContent over io.Copy
-// is that it handles Range requests properly, sets the MIME type, and
-// handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since,
-// and If-Range requests.
-//
-// If the response's Content-Type header is not set, ServeContent
-// first tries to deduce the type from name's file extension and,
-// if that fails, falls back to reading the first block of the content
-// and passing it to DetectContentType.
-// The name is otherwise unused; in particular it can be empty and is
-// never sent in the response.
-//
-// If modtime is not the zero time or Unix epoch, ServeContent
-// includes it in a Last-Modified header in the response. If the
-// request includes an If-Modified-Since header, ServeContent uses
-// modtime to decide whether the content needs to be sent at all.
-//
-// The content's Seek method must work: ServeContent uses
-// a seek to the end of the content to determine its size.
-//
-// If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
-// ServeContent uses it to handle requests using If-Match, If-None-Match, or If-Range.
-//
-// Note that *os.File implements the io.ReadSeeker interface.
-func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
- sizeFunc := func() (int64, error) {
- size, err := content.Seek(0, io.SeekEnd)
- if err != nil {
- return 0, errSeeker
- }
- _, err = content.Seek(0, io.SeekStart)
- if err != nil {
- return 0, errSeeker
- }
- return size, nil
- }
- serveContent(w, req, name, modtime, sizeFunc, content)
-}
-
-// errSeeker is returned by ServeContent's sizeFunc when the content
-// doesn't seek properly. The underlying Seeker's error text isn't
-// included in the sizeFunc reply so it's not sent over HTTP to end
-// users.
-var errSeeker = errors.New("seeker can't seek")
-
-// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of
-// all of the byte-range-spec values is greater than the content size.
-var errNoOverlap = errors.New("invalid range: failed to overlap")
-
-// if name is empty, filename is unknown. (used for mime type, before sniffing)
-// if modtime.IsZero(), modtime is unknown.
-// content must be seeked to the beginning of the file.
-// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response.
-func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) {
- setLastModified(w, modtime)
- done, rangeReq := checkPreconditions(w, r, modtime)
- if done {
- return
- }
-
- code := StatusOK
-
- // If Content-Type isn't set, use the file's extension to find it, but
- // if the Content-Type is unset explicitly, do not sniff the type.
- ctypes, haveType := w.Header()["Content-Type"]
- var ctype string
- if !haveType {
- ctype = mime.TypeByExtension(filepath.Ext(name))
- if ctype == "" {
- // read a chunk to decide between utf-8 text and binary
- var buf [sniffLen]byte
- n, _ := io.ReadFull(content, buf[:])
- ctype = DetectContentType(buf[:n])
- _, err := content.Seek(0, io.SeekStart) // rewind to output whole file
- if err != nil {
- Error(w, "seeker can't seek", StatusInternalServerError)
- return
- }
- }
- w.Header().Set("Content-Type", ctype)
- } else if len(ctypes) > 0 {
- ctype = ctypes[0]
- }
-
- size, err := sizeFunc()
- if err != nil {
- Error(w, err.Error(), StatusInternalServerError)
- return
- }
-
- // handle Content-Range header.
- sendSize := size
- var sendContent io.Reader = content
- if size >= 0 {
- ranges, err := parseRange(rangeReq, size)
- if err != nil {
- if err == errNoOverlap {
- w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
- }
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- if sumRangesSize(ranges) > size {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- ranges = nil
- }
- switch {
- case len(ranges) == 1:
- // RFC 7233, Section 4.1:
- // "If a single part is being transferred, the server
- // generating the 206 response MUST generate a
- // Content-Range header field, describing what range
- // of the selected representation is enclosed, and a
- // payload consisting of the range.
- // ...
- // A server MUST NOT generate a multipart response to
- // a request for a single range, since a client that
- // does not request multiple parts might not support
- // multipart responses."
- ra := ranges[0]
- if _, err := content.Seek(ra.start, io.SeekStart); err != nil {
- Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
- return
- }
- sendSize = ra.length
- code = StatusPartialContent
- w.Header().Set("Content-Range", ra.contentRange(size))
- case len(ranges) > 1:
- sendSize = rangesMIMESize(ranges, ctype, size)
- code = StatusPartialContent
-
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent = pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
- if err != nil {
- pw.CloseWithError(err)
- return
- }
- if _, err := content.Seek(ra.start, io.SeekStart); err != nil {
- pw.CloseWithError(err)
- return
- }
- if _, err := io.CopyN(part, content, ra.length); err != nil {
- pw.CloseWithError(err)
- return
- }
- }
- mw.Close()
- pw.Close()
- }()
- }
-
- w.Header().Set("Accept-Ranges", "bytes")
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- }
-
- w.WriteHeader(code)
-
- if r.Method != "HEAD" {
- io.CopyN(w, sendContent, sendSize)
- }
-}
-
-// scanETag determines if a syntactically valid ETag is present at s. If so,
-// the ETag and remaining text after consuming ETag is returned. Otherwise,
-// it returns "", "".
-func scanETag(s string) (etag string, remain string) {
- s = textproto.TrimString(s)
- start := 0
- if strings.HasPrefix(s, "W/") {
- start = 2
- }
- if len(s[start:]) < 2 || s[start] != '"' {
- return "", ""
- }
- // ETag is either W/"text" or "text".
- // See RFC 7232 2.3.
- for i := start + 1; i < len(s); i++ {
- c := s[i]
- switch {
- // Character values allowed in ETags.
- case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:
- case c == '"':
- return s[:i+1], s[i+1:]
- default:
- return "", ""
- }
- }
- return "", ""
-}
-
-// etagStrongMatch reports whether a and b match using strong ETag comparison.
-// Assumes a and b are valid ETags.
-func etagStrongMatch(a, b string) bool {
- return a == b && a != "" && a[0] == '"'
-}
-
-// etagWeakMatch reports whether a and b match using weak ETag comparison.
-// Assumes a and b are valid ETags.
-func etagWeakMatch(a, b string) bool {
- return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/")
-}
-
-// condResult is the result of an HTTP request precondition check.
-// See https://tools.ietf.org/html/rfc7232 section 3.
-type condResult int
-
-const (
- condNone condResult = iota
- condTrue
- condFalse
-)
-
-func checkIfMatch(w ResponseWriter, r *Request) condResult {
- im := r.Header.Get("If-Match")
- if im == "" {
- return condNone
- }
- for {
- im = textproto.TrimString(im)
- if len(im) == 0 {
- break
- }
- if im[0] == ',' {
- im = im[1:]
- continue
- }
- if im[0] == '*' {
- return condTrue
- }
- etag, remain := scanETag(im)
- if etag == "" {
- break
- }
- if etagStrongMatch(etag, w.Header().get("Etag")) {
- return condTrue
- }
- im = remain
- }
-
- return condFalse
-}
-
-func checkIfUnmodifiedSince(r *Request, modtime time.Time) condResult {
- ius := r.Header.Get("If-Unmodified-Since")
- if ius == "" || isZeroTime(modtime) {
- return condNone
- }
- t, err := ParseTime(ius)
- if err != nil {
- return condNone
- }
-
- // The Last-Modified header truncates sub-second precision so
- // the modtime needs to be truncated too.
- modtime = modtime.Truncate(time.Second)
- if modtime.Before(t) || modtime.Equal(t) {
- return condTrue
- }
- return condFalse
-}
-
-func checkIfNoneMatch(w ResponseWriter, r *Request) condResult {
- inm := r.Header.get("If-None-Match")
- if inm == "" {
- return condNone
- }
- buf := inm
- for {
- buf = textproto.TrimString(buf)
- if len(buf) == 0 {
- break
- }
- if buf[0] == ',' {
- buf = buf[1:]
- continue
- }
- if buf[0] == '*' {
- return condFalse
- }
- etag, remain := scanETag(buf)
- if etag == "" {
- break
- }
- if etagWeakMatch(etag, w.Header().get("Etag")) {
- return condFalse
- }
- buf = remain
- }
- return condTrue
-}
-
-func checkIfModifiedSince(r *Request, modtime time.Time) condResult {
- if r.Method != "GET" && r.Method != "HEAD" {
- return condNone
- }
- ims := r.Header.Get("If-Modified-Since")
- if ims == "" || isZeroTime(modtime) {
- return condNone
- }
- t, err := ParseTime(ims)
- if err != nil {
- return condNone
- }
- // The Last-Modified header truncates sub-second precision so
- // the modtime needs to be truncated too.
- modtime = modtime.Truncate(time.Second)
- if modtime.Before(t) || modtime.Equal(t) {
- return condFalse
- }
- return condTrue
-}
-
-func checkIfRange(w ResponseWriter, r *Request, modtime time.Time) condResult {
- if r.Method != "GET" && r.Method != "HEAD" {
- return condNone
- }
- ir := r.Header.get("If-Range")
- if ir == "" {
- return condNone
- }
- etag, _ := scanETag(ir)
- if etag != "" {
- if etagStrongMatch(etag, w.Header().Get("Etag")) {
- return condTrue
- } else {
- return condFalse
- }
- }
- // The If-Range value is typically the ETag value, but it may also be
- // the modtime date. See golang.org/issue/8367.
- if modtime.IsZero() {
- return condFalse
- }
- t, err := ParseTime(ir)
- if err != nil {
- return condFalse
- }
- if t.Unix() == modtime.Unix() {
- return condTrue
- }
- return condFalse
-}
-
-var unixEpochTime = time.Unix(0, 0)
-
-// isZeroTime reports whether t is obviously unspecified (either zero or Unix()=0).
-func isZeroTime(t time.Time) bool {
- return t.IsZero() || t.Equal(unixEpochTime)
-}
-
-func setLastModified(w ResponseWriter, modtime time.Time) {
- if !isZeroTime(modtime) {
- w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat))
- }
-}
-
-func writeNotModified(w ResponseWriter) {
- // RFC 7232 section 4.1:
- // a sender SHOULD NOT generate representation metadata other than the
- // above listed fields unless said metadata exists for the purpose of
- // guiding cache updates (e.g., Last-Modified might be useful if the
- // response does not have an ETag field).
- h := w.Header()
- delete(h, "Content-Type")
- delete(h, "Content-Length")
- if h.Get("Etag") != "" {
- delete(h, "Last-Modified")
- }
- w.WriteHeader(StatusNotModified)
-}
-
-// checkPreconditions evaluates request preconditions and reports whether a precondition
-// resulted in sending StatusNotModified or StatusPreconditionFailed.
-func checkPreconditions(w ResponseWriter, r *Request, modtime time.Time) (done bool, rangeHeader string) {
- // This function carefully follows RFC 7232 section 6.
- ch := checkIfMatch(w, r)
- if ch == condNone {
- ch = checkIfUnmodifiedSince(r, modtime)
- }
- if ch == condFalse {
- w.WriteHeader(StatusPreconditionFailed)
- return true, ""
- }
- switch checkIfNoneMatch(w, r) {
- case condFalse:
- if r.Method == "GET" || r.Method == "HEAD" {
- writeNotModified(w)
- return true, ""
- } else {
- w.WriteHeader(StatusPreconditionFailed)
- return true, ""
- }
- case condNone:
- if checkIfModifiedSince(r, modtime) == condFalse {
- writeNotModified(w)
- return true, ""
- }
- }
-
- rangeHeader = r.Header.get("Range")
- if rangeHeader != "" && checkIfRange(w, r, modtime) == condFalse {
- rangeHeader = ""
- }
- return false, rangeHeader
-}
-
-// name is '/'-separated, not filepath.Separator.
-func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
- const indexPage = "/index.html"
-
- // redirect .../index.html to .../
- // can't use Redirect() because that would make the path absolute,
- // which would be a problem running under StripPrefix
- if strings.HasSuffix(r.URL.Path, indexPage) {
- localRedirect(w, r, "./")
- return
- }
-
- f, err := fs.Open(name)
- if err != nil {
- msg, code := toHTTPError(err)
- Error(w, msg, code)
- return
- }
- defer f.Close()
-
- d, err := f.Stat()
- if err != nil {
- msg, code := toHTTPError(err)
- Error(w, msg, code)
- return
- }
-
- if redirect {
- // redirect to canonical path: / at end of directory url
- // r.URL.Path always begins with /
- url := r.URL.Path
- if d.IsDir() {
- if url[len(url)-1] != '/' {
- localRedirect(w, r, path.Base(url)+"/")
- return
- }
- } else {
- if url[len(url)-1] == '/' {
- localRedirect(w, r, "../"+path.Base(url))
- return
- }
- }
- }
-
- if d.IsDir() {
- url := r.URL.Path
- // redirect if the directory name doesn't end in a slash
- if url == "" || url[len(url)-1] != '/' {
- localRedirect(w, r, path.Base(url)+"/")
- return
- }
-
- // use contents of index.html for directory, if present
- index := strings.TrimSuffix(name, "/") + indexPage
- ff, err := fs.Open(index)
- if err == nil {
- defer ff.Close()
- dd, err := ff.Stat()
- if err == nil {
- name = index
- d = dd
- f = ff
- }
- }
- }
-
- // Still a directory? (we didn't find an index.html file)
- if d.IsDir() {
- if checkIfModifiedSince(r, d.ModTime()) == condFalse {
- writeNotModified(w)
- return
- }
- setLastModified(w, d.ModTime())
- dirList(w, r, f)
- return
- }
-
- // serveContent will check modification time
- sizeFunc := func() (int64, error) { return d.Size(), nil }
- serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f)
-}
-
-// toHTTPError returns a non-specific HTTP error message and status code
-// for a given non-nil error value. It's important that toHTTPError does not
-// actually return err.Error(), since msg and httpStatus are returned to users,
-// and historically Go's ServeContent always returned just "404 Not Found" for
-// all errors. We don't want to start leaking information in error messages.
-func toHTTPError(err error) (msg string, httpStatus int) {
- if errors.Is(err, fs.ErrNotExist) {
- return "404 page not found", StatusNotFound
- }
- if errors.Is(err, fs.ErrPermission) {
- return "403 Forbidden", StatusForbidden
- }
- // Default:
- return "500 Internal Server Error", StatusInternalServerError
-}
-
-// localRedirect gives a Moved Permanently response.
-// It does not convert relative paths to absolute paths like Redirect does.
-func localRedirect(w ResponseWriter, r *Request, newPath string) {
- if q := r.URL.RawQuery; q != "" {
- newPath += "?" + q
- }
- w.Header().Set("Location", newPath)
- w.WriteHeader(StatusMovedPermanently)
-}
-
-// ServeFile replies to the request with the contents of the named
-// file or directory.
-//
-// If the provided file or directory name is a relative path, it is
-// interpreted relative to the current directory and may ascend to
-// parent directories. If the provided name is constructed from user
-// input, it should be sanitized before calling ServeFile.
-//
-// As a precaution, ServeFile will reject requests where r.URL.Path
-// contains a ".." path element; this protects against callers who
-// might unsafely use filepath.Join on r.URL.Path without sanitizing
-// it and then use that filepath.Join result as the name argument.
-//
-// As another special case, ServeFile redirects any request where r.URL.Path
-// ends in "/index.html" to the same path, without the final
-// "index.html". To avoid such redirects either modify the path or
-// use ServeContent.
-//
-// Outside of those two special cases, ServeFile does not use
-// r.URL.Path for selecting the file or directory to serve; only the
-// file or directory provided in the name argument is used.
-func ServeFile(w ResponseWriter, r *Request, name string) {
- if containsDotDot(r.URL.Path) {
- // Too many programs use r.URL.Path to construct the argument to
- // serveFile. Reject the request under the assumption that happened
- // here and ".." may not be wanted.
- // Note that name might not contain "..", for example if code (still
- // incorrectly) used filepath.Join(myDir, r.URL.Path).
- Error(w, "invalid URL path", StatusBadRequest)
- return
- }
- dir, file := filepath.Split(name)
- serveFile(w, r, Dir(dir), file, false)
-}
-
-func containsDotDot(v string) bool {
- if !strings.Contains(v, "..") {
- return false
- }
- for _, ent := range strings.FieldsFunc(v, isSlashRune) {
- if ent == ".." {
- return true
- }
- }
- return false
-}
-
-func isSlashRune(r rune) bool { return r == '/' || r == '\\' }
-
-type fileHandler struct {
- root FileSystem
-}
-
-type ioFS struct {
- fsys fs.FS
-}
-
-type ioFile struct {
- file fs.File
-}
-
-func (f ioFS) Open(name string) (File, error) {
- if name == "/" {
- name = "."
- } else {
- name = strings.TrimPrefix(name, "/")
- }
- file, err := f.fsys.Open(name)
- if err != nil {
- return nil, mapOpenError(err, name, '/', func(path string) (fs.FileInfo, error) {
- return fs.Stat(f.fsys, path)
- })
- }
- return ioFile{file}, nil
-}
-
-func (f ioFile) Close() error { return f.file.Close() }
-func (f ioFile) Read(b []byte) (int, error) { return f.file.Read(b) }
-func (f ioFile) Stat() (fs.FileInfo, error) { return f.file.Stat() }
-
-var errMissingSeek = errors.New("io.File missing Seek method")
-var errMissingReadDir = errors.New("io.File directory missing ReadDir method")
-
-func (f ioFile) Seek(offset int64, whence int) (int64, error) {
- s, ok := f.file.(io.Seeker)
- if !ok {
- return 0, errMissingSeek
- }
- return s.Seek(offset, whence)
-}
-
-func (f ioFile) ReadDir(count int) ([]fs.DirEntry, error) {
- d, ok := f.file.(fs.ReadDirFile)
- if !ok {
- return nil, errMissingReadDir
- }
- return d.ReadDir(count)
-}
-
-func (f ioFile) Readdir(count int) ([]fs.FileInfo, error) {
- d, ok := f.file.(fs.ReadDirFile)
- if !ok {
- return nil, errMissingReadDir
- }
- var list []fs.FileInfo
- for {
- dirs, err := d.ReadDir(count - len(list))
- for _, dir := range dirs {
- info, err := dir.Info()
- if err != nil {
- // Pretend it doesn't exist, like (*os.File).Readdir does.
- continue
- }
- list = append(list, info)
- }
- if err != nil {
- return list, err
- }
- if count < 0 || len(list) >= count {
- break
- }
- }
- return list, nil
-}
-
-// FS converts fsys to a FileSystem implementation,
-// for use with FileServer and NewFileTransport.
-func FS(fsys fs.FS) FileSystem {
- return ioFS{fsys}
-}
-
-// FileServer returns a handler that serves HTTP requests
-// with the contents of the file system rooted at root.
-//
-// As a special case, the returned file server redirects any request
-// ending in "/index.html" to the same path, without the final
-// "index.html".
-//
-// To use the operating system's file system implementation,
-// use http.Dir:
-//
-// http.Handle("/", http.FileServer(http.Dir("/tmp")))
-//
-// To use an fs.FS implementation, use http.FS to convert it:
-//
-// http.Handle("/", http.FileServer(http.FS(fsys)))
-//
-func FileServer(root FileSystem) Handler {
- return &fileHandler{root}
-}
-
-func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
- upath := r.URL.Path
- if !strings.HasPrefix(upath, "/") {
- upath = "/" + upath
- r.URL.Path = upath
- }
- serveFile(w, r, f.root, path.Clean(upath), true)
-}
-
-// httpRange specifies the byte range to be sent to the client.
-type httpRange struct {
- start, length int64
-}
-
-func (r httpRange) contentRange(size int64) string {
- return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
-}
-
-func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
- return textproto.MIMEHeader{
- "Content-Range": {r.contentRange(size)},
- "Content-Type": {contentType},
- }
-}
-
-// parseRange parses a Range header string as per RFC 7233.
-// errNoOverlap is returned if none of the ranges overlap.
-func parseRange(s string, size int64) ([]httpRange, error) {
- if s == "" {
- return nil, nil // header not present
- }
- const b = "bytes="
- if !strings.HasPrefix(s, b) {
- return nil, errors.New("invalid range")
- }
- var ranges []httpRange
- noOverlap := false
- for _, ra := range strings.Split(s[len(b):], ",") {
- ra = textproto.TrimString(ra)
- if ra == "" {
- continue
- }
- start, end, ok := strings.Cut(ra, "-")
- if !ok {
- return nil, errors.New("invalid range")
- }
- start, end = textproto.TrimString(start), textproto.TrimString(end)
- var r httpRange
- if start == "" {
- // If no start is specified, end specifies the
- // range start relative to the end of the file,
- // and we are dealing with <suffix-length>
- // which has to be a non-negative integer as per
- // RFC 7233 Section 2.1 "Byte-Ranges".
- if end == "" || end[0] == '-' {
- return nil, errors.New("invalid range")
- }
- i, err := strconv.ParseInt(end, 10, 64)
- if i < 0 || err != nil {
- return nil, errors.New("invalid range")
- }
- if i > size {
- i = size
- }
- r.start = size - i
- r.length = size - r.start
- } else {
- i, err := strconv.ParseInt(start, 10, 64)
- if err != nil || i < 0 {
- return nil, errors.New("invalid range")
- }
- if i >= size {
- // If the range begins after the size of the content,
- // then it does not overlap.
- noOverlap = true
- continue
- }
- r.start = i
- if end == "" {
- // If no end is specified, range extends to end of the file.
- r.length = size - r.start
- } else {
- i, err := strconv.ParseInt(end, 10, 64)
- if err != nil || r.start > i {
- return nil, errors.New("invalid range")
- }
- if i >= size {
- i = size - 1
- }
- r.length = i - r.start + 1
- }
- }
- ranges = append(ranges, r)
- }
- if noOverlap && len(ranges) == 0 {
- // The specified ranges did not overlap with the content.
- return nil, errNoOverlap
- }
- return ranges, nil
-}
-
-// countingWriter counts how many bytes have been written to it.
-type countingWriter int64
-
-func (w *countingWriter) Write(p []byte) (n int, err error) {
- *w += countingWriter(len(p))
- return len(p), nil
-}
-
-// rangesMIMESize returns the number of bytes it takes to encode the
-// provided ranges as a multipart response.
-func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
- var w countingWriter
- mw := multipart.NewWriter(&w)
- for _, ra := range ranges {
- mw.CreatePart(ra.mimeHeader(contentType, contentSize))
- encSize += ra.length
- }
- mw.Close()
- encSize += int64(w)
- return
-}
-
-func sumRangesSize(ranges []httpRange) (size int64) {
- for _, ra := range ranges {
- size += ra.length
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/net/http/h2_bundle.go b/contrib/go/_std_1.18/src/net/http/h2_bundle.go
deleted file mode 100644
index bb82f24585..0000000000
--- a/contrib/go/_std_1.18/src/net/http/h2_bundle.go
+++ /dev/null
@@ -1,10858 +0,0 @@
-//go:build !nethttpomithttp2
-// +build !nethttpomithttp2
-
-// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
-// $ bundle -o=h2_bundle.go -prefix=http2 -tags=!nethttpomithttp2 golang.org/x/net/http2
-
-// Package http2 implements the HTTP/2 protocol.
-//
-// This package is low-level and intended to be used directly by very
-// few people. Most users will use it indirectly through the automatic
-// use by the net/http package (from Go 1.6 and later).
-// For use in earlier Go versions see ConfigureServer. (Transport support
-// requires Go 1.6 or later)
-//
-// See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
-//
-
-package http
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "context"
- "crypto/rand"
- "crypto/tls"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- mathrand "math/rand"
- "net"
- "net/http/httptrace"
- "net/textproto"
- "net/url"
- "os"
- "reflect"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http/httpguts"
- "golang.org/x/net/http2/hpack"
- "golang.org/x/net/idna"
-)
-
-// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
-// contains helper functions which may use Unicode-aware functions which would
-// otherwise be unsafe and could introduce vulnerabilities if used improperly.
-
-// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
-// are equal, ASCII-case-insensitively.
-func http2asciiEqualFold(s, t string) bool {
- if len(s) != len(t) {
- return false
- }
- for i := 0; i < len(s); i++ {
- if http2lower(s[i]) != http2lower(t[i]) {
- return false
- }
- }
- return true
-}
-
-// lower returns the ASCII lowercase version of b.
-func http2lower(b byte) byte {
- if 'A' <= b && b <= 'Z' {
- return b + ('a' - 'A')
- }
- return b
-}
-
-// isASCIIPrint returns whether s is ASCII and printable according to
-// https://tools.ietf.org/html/rfc20#section-4.2.
-func http2isASCIIPrint(s string) bool {
- for i := 0; i < len(s); i++ {
- if s[i] < ' ' || s[i] > '~' {
- return false
- }
- }
- return true
-}
-
-// asciiToLower returns the lowercase version of s if s is ASCII and printable,
-// and whether or not it was.
-func http2asciiToLower(s string) (lower string, ok bool) {
- if !http2isASCIIPrint(s) {
- return "", false
- }
- return strings.ToLower(s), true
-}
-
-// A list of the possible cipher suite ids. Taken from
-// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
-
-const (
- http2cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
- http2cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
- http2cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
- http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
- http2cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
- http2cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
- http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
- http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
- http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
- http2cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
- http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
- http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
- http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
- http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
- http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
- http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
- http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
- http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
- http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
- http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
- http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
- http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
- http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
- http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
- http2cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
- http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
- http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
- http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
- // Reserved uint16 = 0x001C-1D
- http2cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
- http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
- http2cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
- http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
- http2cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
- http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
- http2cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
- http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
- http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
- http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
- http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
- http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
- http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
- http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
- http2cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
- http2cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
- http2cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
- http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
- http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
- http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
- http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
- http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
- http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
- http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
- http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
- http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
- http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
- http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
- http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
- http2cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
- http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
- http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
- http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
- http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
- http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
- http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
- // Reserved uint16 = 0x0047-4F
- // Reserved uint16 = 0x0050-58
- // Reserved uint16 = 0x0059-5C
- // Unassigned uint16 = 0x005D-5F
- // Reserved uint16 = 0x0060-66
- http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
- http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
- http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
- http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
- http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
- http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
- http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
- // Unassigned uint16 = 0x006E-83
- http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
- http2cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
- http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
- http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
- http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
- http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
- http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
- http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
- http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
- http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
- http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
- http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
- http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
- http2cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
- http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
- http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
- http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
- http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
- http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
- http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
- http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
- http2cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
- http2cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
- http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
- http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
- http2cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
- http2cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
- http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
- http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
- http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
- http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
- http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
- http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
- http2cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
- http2cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
- http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
- http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
- http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
- http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
- http2cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
- http2cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
- http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
- http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
- http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
- http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
- http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
- http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
- http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
- http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
- http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
- http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
- // Unassigned uint16 = 0x00C6-FE
- http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
- // Unassigned uint16 = 0x01-55,*
- http2cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
- // Unassigned uint16 = 0x5601 - 0xC000
- http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
- http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
- http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
- http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
- http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
- http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
- http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
- http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
- http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
- http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
- http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
- http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
- http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
- http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
- http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
- http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
- http2cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
- http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
- http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
- http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
- http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
- http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
- http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
- http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
- http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
- http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
- http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
- http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
- http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
- http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
- http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
- http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
- http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
- http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
- http2cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
- http2cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
- http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
- http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
- http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
- http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
- http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
- http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
- http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
- http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
- http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
- http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
- http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
- http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
- http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
- http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
- http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
- http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
- http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
- http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
- http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
- http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
- http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
- http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
- http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
- http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
- http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
- http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
- http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
- http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
- http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
- http2cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
- http2cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
- http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
- http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
- http2cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
- http2cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
- http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
- http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
- http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
- http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
- http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
- http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
- http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
- http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
- http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
- http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
- http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
- http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
- http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
- http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
- http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
- http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
- http2cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
- http2cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
- http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
- http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
- http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
- http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
- http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
- http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
- http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
- http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
- http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
- http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
- http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
- http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
- http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
- http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
- http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
- http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
- http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
- http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
- http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
- http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
- http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
- http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
- http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
- http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
- http2cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
- http2cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
- http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
- http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
- http2cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
- http2cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
- http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
- http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
- http2cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
- http2cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
- http2cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
- http2cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
- http2cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
- http2cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
- http2cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
- http2cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
- // Unassigned uint16 = 0xC0B0-FF
- // Unassigned uint16 = 0xC1-CB,*
- // Unassigned uint16 = 0xCC00-A7
- http2cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
- http2cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
- http2cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
- http2cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
- http2cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
- http2cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
- http2cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
-)
-
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-// References:
-// https://tools.ietf.org/html/rfc7540#appendix-A
-// Reject cipher suites from Appendix A.
-// "This list includes those cipher suites that do not
-// offer an ephemeral key exchange and those that are
-// based on the TLS null, stream or block cipher type"
-func http2isBadCipher(cipher uint16) bool {
- switch cipher {
- case http2cipher_TLS_NULL_WITH_NULL_NULL,
- http2cipher_TLS_RSA_WITH_NULL_MD5,
- http2cipher_TLS_RSA_WITH_NULL_SHA,
- http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
- http2cipher_TLS_RSA_WITH_RC4_128_MD5,
- http2cipher_TLS_RSA_WITH_RC4_128_SHA,
- http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
- http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
- http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
- http2cipher_TLS_RSA_WITH_DES_CBC_SHA,
- http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
- http2cipher_TLS_DH_anon_WITH_RC4_128_MD5,
- http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_KRB5_WITH_DES_CBC_SHA,
- http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_KRB5_WITH_RC4_128_SHA,
- http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
- http2cipher_TLS_KRB5_WITH_DES_CBC_MD5,
- http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
- http2cipher_TLS_KRB5_WITH_RC4_128_MD5,
- http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
- http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
- http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
- http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
- http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
- http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
- http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
- http2cipher_TLS_PSK_WITH_NULL_SHA,
- http2cipher_TLS_DHE_PSK_WITH_NULL_SHA,
- http2cipher_TLS_RSA_PSK_WITH_NULL_SHA,
- http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_RSA_WITH_NULL_SHA256,
- http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
- http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
- http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
- http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
- http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
- http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
- http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
- http2cipher_TLS_PSK_WITH_RC4_128_SHA,
- http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
- http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
- http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_RSA_WITH_SEED_CBC_SHA,
- http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
- http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
- http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
- http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
- http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
- http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_PSK_WITH_NULL_SHA256,
- http2cipher_TLS_PSK_WITH_NULL_SHA384,
- http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
- http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
- http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
- http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
- http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
- http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
- http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
- http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
- http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
- http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
- http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
- http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
- http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_ECDH_anon_WITH_NULL_SHA,
- http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
- http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
- http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
- http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
- http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
- http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
- http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
- http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
- http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
- http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
- http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
- http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
- http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
- http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
- http2cipher_TLS_RSA_WITH_AES_128_CCM,
- http2cipher_TLS_RSA_WITH_AES_256_CCM,
- http2cipher_TLS_RSA_WITH_AES_128_CCM_8,
- http2cipher_TLS_RSA_WITH_AES_256_CCM_8,
- http2cipher_TLS_PSK_WITH_AES_128_CCM,
- http2cipher_TLS_PSK_WITH_AES_256_CCM,
- http2cipher_TLS_PSK_WITH_AES_128_CCM_8,
- http2cipher_TLS_PSK_WITH_AES_256_CCM_8:
- return true
- default:
- return false
- }
-}
-
-// ClientConnPool manages a pool of HTTP/2 client connections.
-type http2ClientConnPool interface {
- // GetClientConn returns a specific HTTP/2 connection (usually
- // a TLS-TCP connection) to an HTTP/2 server. On success, the
- // returned ClientConn accounts for the upcoming RoundTrip
- // call, so the caller should not omit it. If the caller needs
- // to, ClientConn.RoundTrip can be called with a bogus
- // new(http.Request) to release the stream reservation.
- GetClientConn(req *Request, addr string) (*http2ClientConn, error)
- MarkDead(*http2ClientConn)
-}
-
-// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
-// implementations which can close their idle connections.
-type http2clientConnPoolIdleCloser interface {
- http2ClientConnPool
- closeIdleConnections()
-}
-
-var (
- _ http2clientConnPoolIdleCloser = (*http2clientConnPool)(nil)
- _ http2clientConnPoolIdleCloser = http2noDialClientConnPool{}
-)
-
-// TODO: use singleflight for dialing and addConnCalls?
-type http2clientConnPool struct {
- t *http2Transport
-
- mu sync.Mutex // TODO: maybe switch to RWMutex
- // TODO: add support for sharing conns based on cert names
- // (e.g. share conn for googleapis.com and appspot.com)
- conns map[string][]*http2ClientConn // key is host:port
- dialing map[string]*http2dialCall // currently in-flight dials
- keys map[*http2ClientConn][]string
- addConnCalls map[string]*http2addConnCall // in-flight addConnIfNeeded calls
-}
-
-func (p *http2clientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
- return p.getClientConn(req, addr, http2dialOnMiss)
-}
-
-const (
- http2dialOnMiss = true
- http2noDialOnMiss = false
-)
-
-func (p *http2clientConnPool) getClientConn(req *Request, addr string, dialOnMiss bool) (*http2ClientConn, error) {
- // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
- if http2isConnectionCloseRequest(req) && dialOnMiss {
- // It gets its own connection.
- http2traceGetConn(req, addr)
- const singleUse = true
- cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
- if err != nil {
- return nil, err
- }
- return cc, nil
- }
- for {
- p.mu.Lock()
- for _, cc := range p.conns[addr] {
- if cc.ReserveNewRequest() {
- // When a connection is presented to us by the net/http package,
- // the GetConn hook has already been called.
- // Don't call it a second time here.
- if !cc.getConnCalled {
- http2traceGetConn(req, addr)
- }
- cc.getConnCalled = false
- p.mu.Unlock()
- return cc, nil
- }
- }
- if !dialOnMiss {
- p.mu.Unlock()
- return nil, http2ErrNoCachedConn
- }
- http2traceGetConn(req, addr)
- call := p.getStartDialLocked(req.Context(), addr)
- p.mu.Unlock()
- <-call.done
- if http2shouldRetryDial(call, req) {
- continue
- }
- cc, err := call.res, call.err
- if err != nil {
- return nil, err
- }
- if cc.ReserveNewRequest() {
- return cc, nil
- }
- }
-}
-
-// dialCall is an in-flight Transport dial call to a host.
-type http2dialCall struct {
- _ http2incomparable
- p *http2clientConnPool
- // the context associated with the request
- // that created this dialCall
- ctx context.Context
- done chan struct{} // closed when done
- res *http2ClientConn // valid after done is closed
- err error // valid after done is closed
-}
-
-// requires p.mu is held.
-func (p *http2clientConnPool) getStartDialLocked(ctx context.Context, addr string) *http2dialCall {
- if call, ok := p.dialing[addr]; ok {
- // A dial is already in-flight. Don't start another.
- return call
- }
- call := &http2dialCall{p: p, done: make(chan struct{}), ctx: ctx}
- if p.dialing == nil {
- p.dialing = make(map[string]*http2dialCall)
- }
- p.dialing[addr] = call
- go call.dial(call.ctx, addr)
- return call
-}
-
-// run in its own goroutine.
-func (c *http2dialCall) dial(ctx context.Context, addr string) {
- const singleUse = false // shared conn
- c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
- close(c.done)
-
- c.p.mu.Lock()
- delete(c.p.dialing, addr)
- if c.err == nil {
- c.p.addConnLocked(addr, c.res)
- }
- c.p.mu.Unlock()
-}
-
-// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
-// already exist. It coalesces concurrent calls with the same key.
-// This is used by the http1 Transport code when it creates a new connection. Because
-// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
-// the protocol), it can get into a situation where it has multiple TLS connections.
-// This code decides which ones live or die.
-// The return value used is whether c was used.
-// c is never closed.
-func (p *http2clientConnPool) addConnIfNeeded(key string, t *http2Transport, c *tls.Conn) (used bool, err error) {
- p.mu.Lock()
- for _, cc := range p.conns[key] {
- if cc.CanTakeNewRequest() {
- p.mu.Unlock()
- return false, nil
- }
- }
- call, dup := p.addConnCalls[key]
- if !dup {
- if p.addConnCalls == nil {
- p.addConnCalls = make(map[string]*http2addConnCall)
- }
- call = &http2addConnCall{
- p: p,
- done: make(chan struct{}),
- }
- p.addConnCalls[key] = call
- go call.run(t, key, c)
- }
- p.mu.Unlock()
-
- <-call.done
- if call.err != nil {
- return false, call.err
- }
- return !dup, nil
-}
-
-type http2addConnCall struct {
- _ http2incomparable
- p *http2clientConnPool
- done chan struct{} // closed when done
- err error
-}
-
-func (c *http2addConnCall) run(t *http2Transport, key string, tc *tls.Conn) {
- cc, err := t.NewClientConn(tc)
-
- p := c.p
- p.mu.Lock()
- if err != nil {
- c.err = err
- } else {
- cc.getConnCalled = true // already called by the net/http package
- p.addConnLocked(key, cc)
- }
- delete(p.addConnCalls, key)
- p.mu.Unlock()
- close(c.done)
-}
-
-// p.mu must be held
-func (p *http2clientConnPool) addConnLocked(key string, cc *http2ClientConn) {
- for _, v := range p.conns[key] {
- if v == cc {
- return
- }
- }
- if p.conns == nil {
- p.conns = make(map[string][]*http2ClientConn)
- }
- if p.keys == nil {
- p.keys = make(map[*http2ClientConn][]string)
- }
- p.conns[key] = append(p.conns[key], cc)
- p.keys[cc] = append(p.keys[cc], key)
-}
-
-func (p *http2clientConnPool) MarkDead(cc *http2ClientConn) {
- p.mu.Lock()
- defer p.mu.Unlock()
- for _, key := range p.keys[cc] {
- vv, ok := p.conns[key]
- if !ok {
- continue
- }
- newList := http2filterOutClientConn(vv, cc)
- if len(newList) > 0 {
- p.conns[key] = newList
- } else {
- delete(p.conns, key)
- }
- }
- delete(p.keys, cc)
-}
-
-func (p *http2clientConnPool) closeIdleConnections() {
- p.mu.Lock()
- defer p.mu.Unlock()
- // TODO: don't close a cc if it was just added to the pool
- // milliseconds ago and has never been used. There's currently
- // a small race window with the HTTP/1 Transport's integration
- // where it can add an idle conn just before using it, and
- // somebody else can concurrently call CloseIdleConns and
- // break some caller's RoundTrip.
- for _, vv := range p.conns {
- for _, cc := range vv {
- cc.closeIfIdle()
- }
- }
-}
-
-func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) []*http2ClientConn {
- out := in[:0]
- for _, v := range in {
- if v != exclude {
- out = append(out, v)
- }
- }
- // If we filtered it out, zero out the last item to prevent
- // the GC from seeing it.
- if len(in) != len(out) {
- in[len(in)-1] = nil
- }
- return out
-}
-
-// noDialClientConnPool is an implementation of http2.ClientConnPool
-// which never dials. We let the HTTP/1.1 client dial and use its TLS
-// connection instead.
-type http2noDialClientConnPool struct{ *http2clientConnPool }
-
-func (p http2noDialClientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
- return p.getClientConn(req, addr, http2noDialOnMiss)
-}
-
-// shouldRetryDial reports whether the current request should
-// retry dialing after the call finished unsuccessfully, for example
-// if the dial was canceled because of a context cancellation or
-// deadline expiry.
-func http2shouldRetryDial(call *http2dialCall, req *Request) bool {
- if call.err == nil {
- // No error, no need to retry
- return false
- }
- if call.ctx == req.Context() {
- // If the call has the same context as the request, the dial
- // should not be retried, since any cancellation will have come
- // from this request.
- return false
- }
- if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
- // If the call error is not because of a context cancellation or a deadline expiry,
- // the dial should not be retried.
- return false
- }
- // Only retry if the error is a context cancellation error or deadline expiry
- // and the context associated with the call was canceled or expired.
- return call.ctx.Err() != nil
-}
-
-// Buffer chunks are allocated from a pool to reduce pressure on GC.
-// The maximum wasted space per dataBuffer is 2x the largest size class,
-// which happens when the dataBuffer has multiple chunks and there is
-// one unread byte in both the first and last chunks. We use a few size
-// classes to minimize overheads for servers that typically receive very
-// small request bodies.
-//
-// TODO: Benchmark to determine if the pools are necessary. The GC may have
-// improved enough that we can instead allocate chunks like this:
-// make([]byte, max(16<<10, expectedBytesRemaining))
-var (
- http2dataChunkSizeClasses = []int{
- 1 << 10,
- 2 << 10,
- 4 << 10,
- 8 << 10,
- 16 << 10,
- }
- http2dataChunkPools = [...]sync.Pool{
- {New: func() interface{} { return make([]byte, 1<<10) }},
- {New: func() interface{} { return make([]byte, 2<<10) }},
- {New: func() interface{} { return make([]byte, 4<<10) }},
- {New: func() interface{} { return make([]byte, 8<<10) }},
- {New: func() interface{} { return make([]byte, 16<<10) }},
- }
-)
-
-func http2getDataBufferChunk(size int64) []byte {
- i := 0
- for ; i < len(http2dataChunkSizeClasses)-1; i++ {
- if size <= int64(http2dataChunkSizeClasses[i]) {
- break
- }
- }
- return http2dataChunkPools[i].Get().([]byte)
-}
-
-func http2putDataBufferChunk(p []byte) {
- for i, n := range http2dataChunkSizeClasses {
- if len(p) == n {
- http2dataChunkPools[i].Put(p)
- return
- }
- }
- panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
-}
-
-// dataBuffer is an io.ReadWriter backed by a list of data chunks.
-// Each dataBuffer is used to read DATA frames on a single stream.
-// The buffer is divided into chunks so the server can limit the
-// total memory used by a single connection without limiting the
-// request body size on any single stream.
-type http2dataBuffer struct {
- chunks [][]byte
- r int // next byte to read is chunks[0][r]
- w int // next byte to write is chunks[len(chunks)-1][w]
- size int // total buffered bytes
- expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
-}
-
-var http2errReadEmpty = errors.New("read from empty dataBuffer")
-
-// Read copies bytes from the buffer into p.
-// It is an error to read when no data is available.
-func (b *http2dataBuffer) Read(p []byte) (int, error) {
- if b.size == 0 {
- return 0, http2errReadEmpty
- }
- var ntotal int
- for len(p) > 0 && b.size > 0 {
- readFrom := b.bytesFromFirstChunk()
- n := copy(p, readFrom)
- p = p[n:]
- ntotal += n
- b.r += n
- b.size -= n
- // If the first chunk has been consumed, advance to the next chunk.
- if b.r == len(b.chunks[0]) {
- http2putDataBufferChunk(b.chunks[0])
- end := len(b.chunks) - 1
- copy(b.chunks[:end], b.chunks[1:])
- b.chunks[end] = nil
- b.chunks = b.chunks[:end]
- b.r = 0
- }
- }
- return ntotal, nil
-}
-
-func (b *http2dataBuffer) bytesFromFirstChunk() []byte {
- if len(b.chunks) == 1 {
- return b.chunks[0][b.r:b.w]
- }
- return b.chunks[0][b.r:]
-}
-
-// Len returns the number of bytes of the unread portion of the buffer.
-func (b *http2dataBuffer) Len() int {
- return b.size
-}
-
-// Write appends p to the buffer.
-func (b *http2dataBuffer) Write(p []byte) (int, error) {
- ntotal := len(p)
- for len(p) > 0 {
- // If the last chunk is empty, allocate a new chunk. Try to allocate
- // enough to fully copy p plus any additional bytes we expect to
- // receive. However, this may allocate less than len(p).
- want := int64(len(p))
- if b.expected > want {
- want = b.expected
- }
- chunk := b.lastChunkOrAlloc(want)
- n := copy(chunk[b.w:], p)
- p = p[n:]
- b.w += n
- b.size += n
- b.expected -= int64(n)
- }
- return ntotal, nil
-}
-
-func (b *http2dataBuffer) lastChunkOrAlloc(want int64) []byte {
- if len(b.chunks) != 0 {
- last := b.chunks[len(b.chunks)-1]
- if b.w < len(last) {
- return last
- }
- }
- chunk := http2getDataBufferChunk(want)
- b.chunks = append(b.chunks, chunk)
- b.w = 0
- return chunk
-}
-
-// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
-type http2ErrCode uint32
-
-const (
- http2ErrCodeNo http2ErrCode = 0x0
- http2ErrCodeProtocol http2ErrCode = 0x1
- http2ErrCodeInternal http2ErrCode = 0x2
- http2ErrCodeFlowControl http2ErrCode = 0x3
- http2ErrCodeSettingsTimeout http2ErrCode = 0x4
- http2ErrCodeStreamClosed http2ErrCode = 0x5
- http2ErrCodeFrameSize http2ErrCode = 0x6
- http2ErrCodeRefusedStream http2ErrCode = 0x7
- http2ErrCodeCancel http2ErrCode = 0x8
- http2ErrCodeCompression http2ErrCode = 0x9
- http2ErrCodeConnect http2ErrCode = 0xa
- http2ErrCodeEnhanceYourCalm http2ErrCode = 0xb
- http2ErrCodeInadequateSecurity http2ErrCode = 0xc
- http2ErrCodeHTTP11Required http2ErrCode = 0xd
-)
-
-var http2errCodeName = map[http2ErrCode]string{
- http2ErrCodeNo: "NO_ERROR",
- http2ErrCodeProtocol: "PROTOCOL_ERROR",
- http2ErrCodeInternal: "INTERNAL_ERROR",
- http2ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
- http2ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
- http2ErrCodeStreamClosed: "STREAM_CLOSED",
- http2ErrCodeFrameSize: "FRAME_SIZE_ERROR",
- http2ErrCodeRefusedStream: "REFUSED_STREAM",
- http2ErrCodeCancel: "CANCEL",
- http2ErrCodeCompression: "COMPRESSION_ERROR",
- http2ErrCodeConnect: "CONNECT_ERROR",
- http2ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
- http2ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
- http2ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
-}
-
-func (e http2ErrCode) String() string {
- if s, ok := http2errCodeName[e]; ok {
- return s
- }
- return fmt.Sprintf("unknown error code 0x%x", uint32(e))
-}
-
-func (e http2ErrCode) stringToken() string {
- if s, ok := http2errCodeName[e]; ok {
- return s
- }
- return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
-}
-
-// ConnectionError is an error that results in the termination of the
-// entire connection.
-type http2ConnectionError http2ErrCode
-
-func (e http2ConnectionError) Error() string {
- return fmt.Sprintf("connection error: %s", http2ErrCode(e))
-}
-
-// StreamError is an error that only affects one stream within an
-// HTTP/2 connection.
-type http2StreamError struct {
- StreamID uint32
- Code http2ErrCode
- Cause error // optional additional detail
-}
-
-// errFromPeer is a sentinel error value for StreamError.Cause to
-// indicate that the StreamError was sent from the peer over the wire
-// and wasn't locally generated in the Transport.
-var http2errFromPeer = errors.New("received from peer")
-
-func http2streamError(id uint32, code http2ErrCode) http2StreamError {
- return http2StreamError{StreamID: id, Code: code}
-}
-
-func (e http2StreamError) Error() string {
- if e.Cause != nil {
- return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
- }
- return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
-}
-
-// 6.9.1 The Flow Control Window
-// "If a sender receives a WINDOW_UPDATE that causes a flow control
-// window to exceed this maximum it MUST terminate either the stream
-// or the connection, as appropriate. For streams, [...]; for the
-// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
-type http2goAwayFlowError struct{}
-
-func (http2goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
-
-// connError represents an HTTP/2 ConnectionError error code, along
-// with a string (for debugging) explaining why.
-//
-// Errors of this type are only returned by the frame parser functions
-// and converted into ConnectionError(Code), after stashing away
-// the Reason into the Framer's errDetail field, accessible via
-// the (*Framer).ErrorDetail method.
-type http2connError struct {
- Code http2ErrCode // the ConnectionError error code
- Reason string // additional reason
-}
-
-func (e http2connError) Error() string {
- return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
-}
-
-type http2pseudoHeaderError string
-
-func (e http2pseudoHeaderError) Error() string {
- return fmt.Sprintf("invalid pseudo-header %q", string(e))
-}
-
-type http2duplicatePseudoHeaderError string
-
-func (e http2duplicatePseudoHeaderError) Error() string {
- return fmt.Sprintf("duplicate pseudo-header %q", string(e))
-}
-
-type http2headerFieldNameError string
-
-func (e http2headerFieldNameError) Error() string {
- return fmt.Sprintf("invalid header field name %q", string(e))
-}
-
-type http2headerFieldValueError string
-
-func (e http2headerFieldValueError) Error() string {
- return fmt.Sprintf("invalid header field value %q", string(e))
-}
-
-var (
- http2errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
- http2errPseudoAfterRegular = errors.New("pseudo header field after regular")
-)
-
-// flow is the flow control window's size.
-type http2flow struct {
- _ http2incomparable
-
- // n is the number of DATA bytes we're allowed to send.
- // A flow is kept both on a conn and a per-stream.
- n int32
-
- // conn points to the shared connection-level flow that is
- // shared by all streams on that conn. It is nil for the flow
- // that's on the conn directly.
- conn *http2flow
-}
-
-func (f *http2flow) setConnFlow(cf *http2flow) { f.conn = cf }
-
-func (f *http2flow) available() int32 {
- n := f.n
- if f.conn != nil && f.conn.n < n {
- n = f.conn.n
- }
- return n
-}
-
-func (f *http2flow) take(n int32) {
- if n > f.available() {
- panic("internal error: took too much")
- }
- f.n -= n
- if f.conn != nil {
- f.conn.n -= n
- }
-}
-
-// add adds n bytes (positive or negative) to the flow control window.
-// It returns false if the sum would exceed 2^31-1.
-func (f *http2flow) add(n int32) bool {
- sum := f.n + n
- if (sum > n) == (f.n > 0) {
- f.n = sum
- return true
- }
- return false
-}
-
-const http2frameHeaderLen = 9
-
-var http2padZeros = make([]byte, 255) // zeros for padding
-
-// A FrameType is a registered frame type as defined in
-// http://http2.github.io/http2-spec/#rfc.section.11.2
-type http2FrameType uint8
-
-const (
- http2FrameData http2FrameType = 0x0
- http2FrameHeaders http2FrameType = 0x1
- http2FramePriority http2FrameType = 0x2
- http2FrameRSTStream http2FrameType = 0x3
- http2FrameSettings http2FrameType = 0x4
- http2FramePushPromise http2FrameType = 0x5
- http2FramePing http2FrameType = 0x6
- http2FrameGoAway http2FrameType = 0x7
- http2FrameWindowUpdate http2FrameType = 0x8
- http2FrameContinuation http2FrameType = 0x9
-)
-
-var http2frameName = map[http2FrameType]string{
- http2FrameData: "DATA",
- http2FrameHeaders: "HEADERS",
- http2FramePriority: "PRIORITY",
- http2FrameRSTStream: "RST_STREAM",
- http2FrameSettings: "SETTINGS",
- http2FramePushPromise: "PUSH_PROMISE",
- http2FramePing: "PING",
- http2FrameGoAway: "GOAWAY",
- http2FrameWindowUpdate: "WINDOW_UPDATE",
- http2FrameContinuation: "CONTINUATION",
-}
-
-func (t http2FrameType) String() string {
- if s, ok := http2frameName[t]; ok {
- return s
- }
- return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
-}
-
-// Flags is a bitmask of HTTP/2 flags.
-// The meaning of flags varies depending on the frame type.
-type http2Flags uint8
-
-// Has reports whether f contains all (0 or more) flags in v.
-func (f http2Flags) Has(v http2Flags) bool {
- return (f & v) == v
-}
-
-// Frame-specific FrameHeader flag bits.
-const (
- // Data Frame
- http2FlagDataEndStream http2Flags = 0x1
- http2FlagDataPadded http2Flags = 0x8
-
- // Headers Frame
- http2FlagHeadersEndStream http2Flags = 0x1
- http2FlagHeadersEndHeaders http2Flags = 0x4
- http2FlagHeadersPadded http2Flags = 0x8
- http2FlagHeadersPriority http2Flags = 0x20
-
- // Settings Frame
- http2FlagSettingsAck http2Flags = 0x1
-
- // Ping Frame
- http2FlagPingAck http2Flags = 0x1
-
- // Continuation Frame
- http2FlagContinuationEndHeaders http2Flags = 0x4
-
- http2FlagPushPromiseEndHeaders http2Flags = 0x4
- http2FlagPushPromisePadded http2Flags = 0x8
-)
-
-var http2flagName = map[http2FrameType]map[http2Flags]string{
- http2FrameData: {
- http2FlagDataEndStream: "END_STREAM",
- http2FlagDataPadded: "PADDED",
- },
- http2FrameHeaders: {
- http2FlagHeadersEndStream: "END_STREAM",
- http2FlagHeadersEndHeaders: "END_HEADERS",
- http2FlagHeadersPadded: "PADDED",
- http2FlagHeadersPriority: "PRIORITY",
- },
- http2FrameSettings: {
- http2FlagSettingsAck: "ACK",
- },
- http2FramePing: {
- http2FlagPingAck: "ACK",
- },
- http2FrameContinuation: {
- http2FlagContinuationEndHeaders: "END_HEADERS",
- },
- http2FramePushPromise: {
- http2FlagPushPromiseEndHeaders: "END_HEADERS",
- http2FlagPushPromisePadded: "PADDED",
- },
-}
-
-// a frameParser parses a frame given its FrameHeader and payload
-// bytes. The length of payload will always equal fh.Length (which
-// might be 0).
-type http2frameParser func(fc *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error)
-
-var http2frameParsers = map[http2FrameType]http2frameParser{
- http2FrameData: http2parseDataFrame,
- http2FrameHeaders: http2parseHeadersFrame,
- http2FramePriority: http2parsePriorityFrame,
- http2FrameRSTStream: http2parseRSTStreamFrame,
- http2FrameSettings: http2parseSettingsFrame,
- http2FramePushPromise: http2parsePushPromise,
- http2FramePing: http2parsePingFrame,
- http2FrameGoAway: http2parseGoAwayFrame,
- http2FrameWindowUpdate: http2parseWindowUpdateFrame,
- http2FrameContinuation: http2parseContinuationFrame,
-}
-
-func http2typeFrameParser(t http2FrameType) http2frameParser {
- if f := http2frameParsers[t]; f != nil {
- return f
- }
- return http2parseUnknownFrame
-}
-
-// A FrameHeader is the 9 byte header of all HTTP/2 frames.
-//
-// See http://http2.github.io/http2-spec/#FrameHeader
-type http2FrameHeader struct {
- valid bool // caller can access []byte fields in the Frame
-
- // Type is the 1 byte frame type. There are ten standard frame
- // types, but extension frame types may be written by WriteRawFrame
- // and will be returned by ReadFrame (as UnknownFrame).
- Type http2FrameType
-
- // Flags are the 1 byte of 8 potential bit flags per frame.
- // They are specific to the frame type.
- Flags http2Flags
-
- // Length is the length of the frame, not including the 9 byte header.
- // The maximum size is one byte less than 16MB (uint24), but only
- // frames up to 16KB are allowed without peer agreement.
- Length uint32
-
- // StreamID is which stream this frame is for. Certain frames
- // are not stream-specific, in which case this field is 0.
- StreamID uint32
-}
-
-// Header returns h. It exists so FrameHeaders can be embedded in other
-// specific frame types and implement the Frame interface.
-func (h http2FrameHeader) Header() http2FrameHeader { return h }
-
-func (h http2FrameHeader) String() string {
- var buf bytes.Buffer
- buf.WriteString("[FrameHeader ")
- h.writeDebug(&buf)
- buf.WriteByte(']')
- return buf.String()
-}
-
-func (h http2FrameHeader) writeDebug(buf *bytes.Buffer) {
- buf.WriteString(h.Type.String())
- if h.Flags != 0 {
- buf.WriteString(" flags=")
- set := 0
- for i := uint8(0); i < 8; i++ {
- if h.Flags&(1<<i) == 0 {
- continue
- }
- set++
- if set > 1 {
- buf.WriteByte('|')
- }
- name := http2flagName[h.Type][http2Flags(1<<i)]
- if name != "" {
- buf.WriteString(name)
- } else {
- fmt.Fprintf(buf, "0x%x", 1<<i)
- }
- }
- }
- if h.StreamID != 0 {
- fmt.Fprintf(buf, " stream=%d", h.StreamID)
- }
- fmt.Fprintf(buf, " len=%d", h.Length)
-}
-
-func (h *http2FrameHeader) checkValid() {
- if !h.valid {
- panic("Frame accessor called on non-owned Frame")
- }
-}
-
-func (h *http2FrameHeader) invalidate() { h.valid = false }
-
-// frame header bytes.
-// Used only by ReadFrameHeader.
-var http2fhBytes = sync.Pool{
- New: func() interface{} {
- buf := make([]byte, http2frameHeaderLen)
- return &buf
- },
-}
-
-// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
-// Most users should use Framer.ReadFrame instead.
-func http2ReadFrameHeader(r io.Reader) (http2FrameHeader, error) {
- bufp := http2fhBytes.Get().(*[]byte)
- defer http2fhBytes.Put(bufp)
- return http2readFrameHeader(*bufp, r)
-}
-
-func http2readFrameHeader(buf []byte, r io.Reader) (http2FrameHeader, error) {
- _, err := io.ReadFull(r, buf[:http2frameHeaderLen])
- if err != nil {
- return http2FrameHeader{}, err
- }
- return http2FrameHeader{
- Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
- Type: http2FrameType(buf[3]),
- Flags: http2Flags(buf[4]),
- StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
- valid: true,
- }, nil
-}
-
-// A Frame is the base interface implemented by all frame types.
-// Callers will generally type-assert the specific frame type:
-// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
-//
-// Frames are only valid until the next call to Framer.ReadFrame.
-type http2Frame interface {
- Header() http2FrameHeader
-
- // invalidate is called by Framer.ReadFrame to make this
- // frame's buffers as being invalid, since the subsequent
- // frame will reuse them.
- invalidate()
-}
-
-// A Framer reads and writes Frames.
-type http2Framer struct {
- r io.Reader
- lastFrame http2Frame
- errDetail error
-
- // countError is a non-nil func that's called on a frame parse
- // error with some unique error path token. It's initialized
- // from Transport.CountError or Server.CountError.
- countError func(errToken string)
-
- // lastHeaderStream is non-zero if the last frame was an
- // unfinished HEADERS/CONTINUATION.
- lastHeaderStream uint32
-
- maxReadSize uint32
- headerBuf [http2frameHeaderLen]byte
-
- // TODO: let getReadBuf be configurable, and use a less memory-pinning
- // allocator in server.go to minimize memory pinned for many idle conns.
- // Will probably also need to make frame invalidation have a hook too.
- getReadBuf func(size uint32) []byte
- readBuf []byte // cache for default getReadBuf
-
- maxWriteSize uint32 // zero means unlimited; TODO: implement
-
- w io.Writer
- wbuf []byte
-
- // AllowIllegalWrites permits the Framer's Write methods to
- // write frames that do not conform to the HTTP/2 spec. This
- // permits using the Framer to test other HTTP/2
- // implementations' conformance to the spec.
- // If false, the Write methods will prefer to return an error
- // rather than comply.
- AllowIllegalWrites bool
-
- // AllowIllegalReads permits the Framer's ReadFrame method
- // to return non-compliant frames or frame orders.
- // This is for testing and permits using the Framer to test
- // other HTTP/2 implementations' conformance to the spec.
- // It is not compatible with ReadMetaHeaders.
- AllowIllegalReads bool
-
- // ReadMetaHeaders if non-nil causes ReadFrame to merge
- // HEADERS and CONTINUATION frames together and return
- // MetaHeadersFrame instead.
- ReadMetaHeaders *hpack.Decoder
-
- // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
- // It's used only if ReadMetaHeaders is set; 0 means a sane default
- // (currently 16MB)
- // If the limit is hit, MetaHeadersFrame.Truncated is set true.
- MaxHeaderListSize uint32
-
- // TODO: track which type of frame & with which flags was sent
- // last. Then return an error (unless AllowIllegalWrites) if
- // we're in the middle of a header block and a
- // non-Continuation or Continuation on a different stream is
- // attempted to be written.
-
- logReads, logWrites bool
-
- debugFramer *http2Framer // only use for logging written writes
- debugFramerBuf *bytes.Buffer
- debugReadLoggerf func(string, ...interface{})
- debugWriteLoggerf func(string, ...interface{})
-
- frameCache *http2frameCache // nil if frames aren't reused (default)
-}
-
-func (fr *http2Framer) maxHeaderListSize() uint32 {
- if fr.MaxHeaderListSize == 0 {
- return 16 << 20 // sane default, per docs
- }
- return fr.MaxHeaderListSize
-}
-
-func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) {
- // Write the FrameHeader.
- f.wbuf = append(f.wbuf[:0],
- 0, // 3 bytes of length, filled in in endWrite
- 0,
- 0,
- byte(ftype),
- byte(flags),
- byte(streamID>>24),
- byte(streamID>>16),
- byte(streamID>>8),
- byte(streamID))
-}
-
-func (f *http2Framer) endWrite() error {
- // Now that we know the final size, fill in the FrameHeader in
- // the space previously reserved for it. Abuse append.
- length := len(f.wbuf) - http2frameHeaderLen
- if length >= (1 << 24) {
- return http2ErrFrameTooLarge
- }
- _ = append(f.wbuf[:0],
- byte(length>>16),
- byte(length>>8),
- byte(length))
- if f.logWrites {
- f.logWrite()
- }
-
- n, err := f.w.Write(f.wbuf)
- if err == nil && n != len(f.wbuf) {
- err = io.ErrShortWrite
- }
- return err
-}
-
-func (f *http2Framer) logWrite() {
- if f.debugFramer == nil {
- f.debugFramerBuf = new(bytes.Buffer)
- f.debugFramer = http2NewFramer(nil, f.debugFramerBuf)
- f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
- // Let us read anything, even if we accidentally wrote it
- // in the wrong order:
- f.debugFramer.AllowIllegalReads = true
- }
- f.debugFramerBuf.Write(f.wbuf)
- fr, err := f.debugFramer.ReadFrame()
- if err != nil {
- f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
- return
- }
- f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, http2summarizeFrame(fr))
-}
-
-func (f *http2Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
-
-func (f *http2Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
-
-func (f *http2Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
-
-func (f *http2Framer) writeUint32(v uint32) {
- f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
-}
-
-const (
- http2minMaxFrameSize = 1 << 14
- http2maxFrameSize = 1<<24 - 1
-)
-
-// SetReuseFrames allows the Framer to reuse Frames.
-// If called on a Framer, Frames returned by calls to ReadFrame are only
-// valid until the next call to ReadFrame.
-func (fr *http2Framer) SetReuseFrames() {
- if fr.frameCache != nil {
- return
- }
- fr.frameCache = &http2frameCache{}
-}
-
-type http2frameCache struct {
- dataFrame http2DataFrame
-}
-
-func (fc *http2frameCache) getDataFrame() *http2DataFrame {
- if fc == nil {
- return &http2DataFrame{}
- }
- return &fc.dataFrame
-}
-
-// NewFramer returns a Framer that writes frames to w and reads them from r.
-func http2NewFramer(w io.Writer, r io.Reader) *http2Framer {
- fr := &http2Framer{
- w: w,
- r: r,
- countError: func(string) {},
- logReads: http2logFrameReads,
- logWrites: http2logFrameWrites,
- debugReadLoggerf: log.Printf,
- debugWriteLoggerf: log.Printf,
- }
- fr.getReadBuf = func(size uint32) []byte {
- if cap(fr.readBuf) >= int(size) {
- return fr.readBuf[:size]
- }
- fr.readBuf = make([]byte, size)
- return fr.readBuf
- }
- fr.SetMaxReadFrameSize(http2maxFrameSize)
- return fr
-}
-
-// SetMaxReadFrameSize sets the maximum size of a frame
-// that will be read by a subsequent call to ReadFrame.
-// It is the caller's responsibility to advertise this
-// limit with a SETTINGS frame.
-func (fr *http2Framer) SetMaxReadFrameSize(v uint32) {
- if v > http2maxFrameSize {
- v = http2maxFrameSize
- }
- fr.maxReadSize = v
-}
-
-// ErrorDetail returns a more detailed error of the last error
-// returned by Framer.ReadFrame. For instance, if ReadFrame
-// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
-// will say exactly what was invalid. ErrorDetail is not guaranteed
-// to return a non-nil value and like the rest of the http2 package,
-// its return value is not protected by an API compatibility promise.
-// ErrorDetail is reset after the next call to ReadFrame.
-func (fr *http2Framer) ErrorDetail() error {
- return fr.errDetail
-}
-
-// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
-// sends a frame that is larger than declared with SetMaxReadFrameSize.
-var http2ErrFrameTooLarge = errors.New("http2: frame too large")
-
-// terminalReadFrameError reports whether err is an unrecoverable
-// error from ReadFrame and no other frames should be read.
-func http2terminalReadFrameError(err error) bool {
- if _, ok := err.(http2StreamError); ok {
- return false
- }
- return err != nil
-}
-
-// ReadFrame reads a single frame. The returned Frame is only valid
-// until the next call to ReadFrame.
-//
-// If the frame is larger than previously set with SetMaxReadFrameSize, the
-// returned error is ErrFrameTooLarge. Other errors may be of type
-// ConnectionError, StreamError, or anything else from the underlying
-// reader.
-func (fr *http2Framer) ReadFrame() (http2Frame, error) {
- fr.errDetail = nil
- if fr.lastFrame != nil {
- fr.lastFrame.invalidate()
- }
- fh, err := http2readFrameHeader(fr.headerBuf[:], fr.r)
- if err != nil {
- return nil, err
- }
- if fh.Length > fr.maxReadSize {
- return nil, http2ErrFrameTooLarge
- }
- payload := fr.getReadBuf(fh.Length)
- if _, err := io.ReadFull(fr.r, payload); err != nil {
- return nil, err
- }
- f, err := http2typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
- if err != nil {
- if ce, ok := err.(http2connError); ok {
- return nil, fr.connError(ce.Code, ce.Reason)
- }
- return nil, err
- }
- if err := fr.checkFrameOrder(f); err != nil {
- return nil, err
- }
- if fr.logReads {
- fr.debugReadLoggerf("http2: Framer %p: read %v", fr, http2summarizeFrame(f))
- }
- if fh.Type == http2FrameHeaders && fr.ReadMetaHeaders != nil {
- return fr.readMetaFrame(f.(*http2HeadersFrame))
- }
- return f, nil
-}
-
-// connError returns ConnectionError(code) but first
-// stashes away a public reason to the caller can optionally relay it
-// to the peer before hanging up on them. This might help others debug
-// their implementations.
-func (fr *http2Framer) connError(code http2ErrCode, reason string) error {
- fr.errDetail = errors.New(reason)
- return http2ConnectionError(code)
-}
-
-// checkFrameOrder reports an error if f is an invalid frame to return
-// next from ReadFrame. Mostly it checks whether HEADERS and
-// CONTINUATION frames are contiguous.
-func (fr *http2Framer) checkFrameOrder(f http2Frame) error {
- last := fr.lastFrame
- fr.lastFrame = f
- if fr.AllowIllegalReads {
- return nil
- }
-
- fh := f.Header()
- if fr.lastHeaderStream != 0 {
- if fh.Type != http2FrameContinuation {
- return fr.connError(http2ErrCodeProtocol,
- fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
- fh.Type, fh.StreamID,
- last.Header().Type, fr.lastHeaderStream))
- }
- if fh.StreamID != fr.lastHeaderStream {
- return fr.connError(http2ErrCodeProtocol,
- fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
- fh.StreamID, fr.lastHeaderStream))
- }
- } else if fh.Type == http2FrameContinuation {
- return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
- }
-
- switch fh.Type {
- case http2FrameHeaders, http2FrameContinuation:
- if fh.Flags.Has(http2FlagHeadersEndHeaders) {
- fr.lastHeaderStream = 0
- } else {
- fr.lastHeaderStream = fh.StreamID
- }
- }
-
- return nil
-}
-
-// A DataFrame conveys arbitrary, variable-length sequences of octets
-// associated with a stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.1
-type http2DataFrame struct {
- http2FrameHeader
- data []byte
-}
-
-func (f *http2DataFrame) StreamEnded() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagDataEndStream)
-}
-
-// Data returns the frame's data octets, not including any padding
-// size byte or padding suffix bytes.
-// The caller must not retain the returned memory past the next
-// call to ReadFrame.
-func (f *http2DataFrame) Data() []byte {
- f.checkValid()
- return f.data
-}
-
-func http2parseDataFrame(fc *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
- if fh.StreamID == 0 {
- // DATA frames MUST be associated with a stream. If a
- // DATA frame is received whose stream identifier
- // field is 0x0, the recipient MUST respond with a
- // connection error (Section 5.4.1) of type
- // PROTOCOL_ERROR.
- countError("frame_data_stream_0")
- return nil, http2connError{http2ErrCodeProtocol, "DATA frame with stream ID 0"}
- }
- f := fc.getDataFrame()
- f.http2FrameHeader = fh
-
- var padSize byte
- if fh.Flags.Has(http2FlagDataPadded) {
- var err error
- payload, padSize, err = http2readByte(payload)
- if err != nil {
- countError("frame_data_pad_byte_short")
- return nil, err
- }
- }
- if int(padSize) > len(payload) {
- // If the length of the padding is greater than the
- // length of the frame payload, the recipient MUST
- // treat this as a connection error.
- // Filed: https://github.com/http2/http2-spec/issues/610
- countError("frame_data_pad_too_big")
- return nil, http2connError{http2ErrCodeProtocol, "pad size larger than data payload"}
- }
- f.data = payload[:len(payload)-int(padSize)]
- return f, nil
-}
-
-var (
- http2errStreamID = errors.New("invalid stream ID")
- http2errDepStreamID = errors.New("invalid dependent stream ID")
- http2errPadLength = errors.New("pad length too large")
- http2errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
-)
-
-func http2validStreamIDOrZero(streamID uint32) bool {
- return streamID&(1<<31) == 0
-}
-
-func http2validStreamID(streamID uint32) bool {
- return streamID != 0 && streamID&(1<<31) == 0
-}
-
-// WriteData writes a DATA frame.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility not to violate the maximum frame size
-// and to not call other Write methods concurrently.
-func (f *http2Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
- return f.WriteDataPadded(streamID, endStream, data, nil)
-}
-
-// WriteDataPadded writes a DATA frame with optional padding.
-//
-// If pad is nil, the padding bit is not sent.
-// The length of pad must not exceed 255 bytes.
-// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility not to violate the maximum frame size
-// and to not call other Write methods concurrently.
-func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
- if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- if len(pad) > 0 {
- if len(pad) > 255 {
- return http2errPadLength
- }
- if !f.AllowIllegalWrites {
- for _, b := range pad {
- if b != 0 {
- // "Padding octets MUST be set to zero when sending."
- return http2errPadBytes
- }
- }
- }
- }
- var flags http2Flags
- if endStream {
- flags |= http2FlagDataEndStream
- }
- if pad != nil {
- flags |= http2FlagDataPadded
- }
- f.startWrite(http2FrameData, flags, streamID)
- if pad != nil {
- f.wbuf = append(f.wbuf, byte(len(pad)))
- }
- f.wbuf = append(f.wbuf, data...)
- f.wbuf = append(f.wbuf, pad...)
- return f.endWrite()
-}
-
-// A SettingsFrame conveys configuration parameters that affect how
-// endpoints communicate, such as preferences and constraints on peer
-// behavior.
-//
-// See http://http2.github.io/http2-spec/#SETTINGS
-type http2SettingsFrame struct {
- http2FrameHeader
- p []byte
-}
-
-func http2parseSettingsFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
- if fh.Flags.Has(http2FlagSettingsAck) && fh.Length > 0 {
- // When this (ACK 0x1) bit is set, the payload of the
- // SETTINGS frame MUST be empty. Receipt of a
- // SETTINGS frame with the ACK flag set and a length
- // field value other than 0 MUST be treated as a
- // connection error (Section 5.4.1) of type
- // FRAME_SIZE_ERROR.
- countError("frame_settings_ack_with_length")
- return nil, http2ConnectionError(http2ErrCodeFrameSize)
- }
- if fh.StreamID != 0 {
- // SETTINGS frames always apply to a connection,
- // never a single stream. The stream identifier for a
- // SETTINGS frame MUST be zero (0x0). If an endpoint
- // receives a SETTINGS frame whose stream identifier
- // field is anything other than 0x0, the endpoint MUST
- // respond with a connection error (Section 5.4.1) of
- // type PROTOCOL_ERROR.
- countError("frame_settings_has_stream")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- if len(p)%6 != 0 {
- countError("frame_settings_mod_6")
- // Expecting even number of 6 byte settings.
- return nil, http2ConnectionError(http2ErrCodeFrameSize)
- }
- f := &http2SettingsFrame{http2FrameHeader: fh, p: p}
- if v, ok := f.Value(http2SettingInitialWindowSize); ok && v > (1<<31)-1 {
- countError("frame_settings_window_size_too_big")
- // Values above the maximum flow control window size of 2^31 - 1 MUST
- // be treated as a connection error (Section 5.4.1) of type
- // FLOW_CONTROL_ERROR.
- return nil, http2ConnectionError(http2ErrCodeFlowControl)
- }
- return f, nil
-}
-
-func (f *http2SettingsFrame) IsAck() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagSettingsAck)
-}
-
-func (f *http2SettingsFrame) Value(id http2SettingID) (v uint32, ok bool) {
- f.checkValid()
- for i := 0; i < f.NumSettings(); i++ {
- if s := f.Setting(i); s.ID == id {
- return s.Val, true
- }
- }
- return 0, false
-}
-
-// Setting returns the setting from the frame at the given 0-based index.
-// The index must be >= 0 and less than f.NumSettings().
-func (f *http2SettingsFrame) Setting(i int) http2Setting {
- buf := f.p
- return http2Setting{
- ID: http2SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
- Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
- }
-}
-
-func (f *http2SettingsFrame) NumSettings() int { return len(f.p) / 6 }
-
-// HasDuplicates reports whether f contains any duplicate setting IDs.
-func (f *http2SettingsFrame) HasDuplicates() bool {
- num := f.NumSettings()
- if num == 0 {
- return false
- }
- // If it's small enough (the common case), just do the n^2
- // thing and avoid a map allocation.
- if num < 10 {
- for i := 0; i < num; i++ {
- idi := f.Setting(i).ID
- for j := i + 1; j < num; j++ {
- idj := f.Setting(j).ID
- if idi == idj {
- return true
- }
- }
- }
- return false
- }
- seen := map[http2SettingID]bool{}
- for i := 0; i < num; i++ {
- id := f.Setting(i).ID
- if seen[id] {
- return true
- }
- seen[id] = true
- }
- return false
-}
-
-// ForeachSetting runs fn for each setting.
-// It stops and returns the first error.
-func (f *http2SettingsFrame) ForeachSetting(fn func(http2Setting) error) error {
- f.checkValid()
- for i := 0; i < f.NumSettings(); i++ {
- if err := fn(f.Setting(i)); err != nil {
- return err
- }
- }
- return nil
-}
-
-// WriteSettings writes a SETTINGS frame with zero or more settings
-// specified and the ACK bit not set.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WriteSettings(settings ...http2Setting) error {
- f.startWrite(http2FrameSettings, 0, 0)
- for _, s := range settings {
- f.writeUint16(uint16(s.ID))
- f.writeUint32(s.Val)
- }
- return f.endWrite()
-}
-
-// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WriteSettingsAck() error {
- f.startWrite(http2FrameSettings, http2FlagSettingsAck, 0)
- return f.endWrite()
-}
-
-// A PingFrame is a mechanism for measuring a minimal round trip time
-// from the sender, as well as determining whether an idle connection
-// is still functional.
-// See http://http2.github.io/http2-spec/#rfc.section.6.7
-type http2PingFrame struct {
- http2FrameHeader
- Data [8]byte
-}
-
-func (f *http2PingFrame) IsAck() bool { return f.Flags.Has(http2FlagPingAck) }
-
-func http2parsePingFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
- if len(payload) != 8 {
- countError("frame_ping_length")
- return nil, http2ConnectionError(http2ErrCodeFrameSize)
- }
- if fh.StreamID != 0 {
- countError("frame_ping_has_stream")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- f := &http2PingFrame{http2FrameHeader: fh}
- copy(f.Data[:], payload)
- return f, nil
-}
-
-func (f *http2Framer) WritePing(ack bool, data [8]byte) error {
- var flags http2Flags
- if ack {
- flags = http2FlagPingAck
- }
- f.startWrite(http2FramePing, flags, 0)
- f.writeBytes(data[:])
- return f.endWrite()
-}
-
-// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
-// See http://http2.github.io/http2-spec/#rfc.section.6.8
-type http2GoAwayFrame struct {
- http2FrameHeader
- LastStreamID uint32
- ErrCode http2ErrCode
- debugData []byte
-}
-
-// DebugData returns any debug data in the GOAWAY frame. Its contents
-// are not defined.
-// The caller must not retain the returned memory past the next
-// call to ReadFrame.
-func (f *http2GoAwayFrame) DebugData() []byte {
- f.checkValid()
- return f.debugData
-}
-
-func http2parseGoAwayFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
- if fh.StreamID != 0 {
- countError("frame_goaway_has_stream")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- if len(p) < 8 {
- countError("frame_goaway_short")
- return nil, http2ConnectionError(http2ErrCodeFrameSize)
- }
- return &http2GoAwayFrame{
- http2FrameHeader: fh,
- LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
- ErrCode: http2ErrCode(binary.BigEndian.Uint32(p[4:8])),
- debugData: p[8:],
- }, nil
-}
-
-func (f *http2Framer) WriteGoAway(maxStreamID uint32, code http2ErrCode, debugData []byte) error {
- f.startWrite(http2FrameGoAway, 0, 0)
- f.writeUint32(maxStreamID & (1<<31 - 1))
- f.writeUint32(uint32(code))
- f.writeBytes(debugData)
- return f.endWrite()
-}
-
-// An UnknownFrame is the frame type returned when the frame type is unknown
-// or no specific frame type parser exists.
-type http2UnknownFrame struct {
- http2FrameHeader
- p []byte
-}
-
-// Payload returns the frame's payload (after the header). It is not
-// valid to call this method after a subsequent call to
-// Framer.ReadFrame, nor is it valid to retain the returned slice.
-// The memory is owned by the Framer and is invalidated when the next
-// frame is read.
-func (f *http2UnknownFrame) Payload() []byte {
- f.checkValid()
- return f.p
-}
-
-func http2parseUnknownFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
- return &http2UnknownFrame{fh, p}, nil
-}
-
-// A WindowUpdateFrame is used to implement flow control.
-// See http://http2.github.io/http2-spec/#rfc.section.6.9
-type http2WindowUpdateFrame struct {
- http2FrameHeader
- Increment uint32 // never read with high bit set
-}
-
-func http2parseWindowUpdateFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
- if len(p) != 4 {
- countError("frame_windowupdate_bad_len")
- return nil, http2ConnectionError(http2ErrCodeFrameSize)
- }
- inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
- if inc == 0 {
- // A receiver MUST treat the receipt of a
- // WINDOW_UPDATE frame with an flow control window
- // increment of 0 as a stream error (Section 5.4.2) of
- // type PROTOCOL_ERROR; errors on the connection flow
- // control window MUST be treated as a connection
- // error (Section 5.4.1).
- if fh.StreamID == 0 {
- countError("frame_windowupdate_zero_inc_conn")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- countError("frame_windowupdate_zero_inc_stream")
- return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
- }
- return &http2WindowUpdateFrame{
- http2FrameHeader: fh,
- Increment: inc,
- }, nil
-}
-
-// WriteWindowUpdate writes a WINDOW_UPDATE frame.
-// The increment value must be between 1 and 2,147,483,647, inclusive.
-// If the Stream ID is zero, the window update applies to the
-// connection as a whole.
-func (f *http2Framer) WriteWindowUpdate(streamID, incr uint32) error {
- // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
- if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
- return errors.New("illegal window increment value")
- }
- f.startWrite(http2FrameWindowUpdate, 0, streamID)
- f.writeUint32(incr)
- return f.endWrite()
-}
-
-// A HeadersFrame is used to open a stream and additionally carries a
-// header block fragment.
-type http2HeadersFrame struct {
- http2FrameHeader
-
- // Priority is set if FlagHeadersPriority is set in the FrameHeader.
- Priority http2PriorityParam
-
- headerFragBuf []byte // not owned
-}
-
-func (f *http2HeadersFrame) HeaderBlockFragment() []byte {
- f.checkValid()
- return f.headerFragBuf
-}
-
-func (f *http2HeadersFrame) HeadersEnded() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndHeaders)
-}
-
-func (f *http2HeadersFrame) StreamEnded() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndStream)
-}
-
-func (f *http2HeadersFrame) HasPriority() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagHeadersPriority)
-}
-
-func http2parseHeadersFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (_ http2Frame, err error) {
- hf := &http2HeadersFrame{
- http2FrameHeader: fh,
- }
- if fh.StreamID == 0 {
- // HEADERS frames MUST be associated with a stream. If a HEADERS frame
- // is received whose stream identifier field is 0x0, the recipient MUST
- // respond with a connection error (Section 5.4.1) of type
- // PROTOCOL_ERROR.
- countError("frame_headers_zero_stream")
- return nil, http2connError{http2ErrCodeProtocol, "HEADERS frame with stream ID 0"}
- }
- var padLength uint8
- if fh.Flags.Has(http2FlagHeadersPadded) {
- if p, padLength, err = http2readByte(p); err != nil {
- countError("frame_headers_pad_short")
- return
- }
- }
- if fh.Flags.Has(http2FlagHeadersPriority) {
- var v uint32
- p, v, err = http2readUint32(p)
- if err != nil {
- countError("frame_headers_prio_short")
- return nil, err
- }
- hf.Priority.StreamDep = v & 0x7fffffff
- hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
- p, hf.Priority.Weight, err = http2readByte(p)
- if err != nil {
- countError("frame_headers_prio_weight_short")
- return nil, err
- }
- }
- if len(p)-int(padLength) < 0 {
- countError("frame_headers_pad_too_big")
- return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
- }
- hf.headerFragBuf = p[:len(p)-int(padLength)]
- return hf, nil
-}
-
-// HeadersFrameParam are the parameters for writing a HEADERS frame.
-type http2HeadersFrameParam struct {
- // StreamID is the required Stream ID to initiate.
- StreamID uint32
- // BlockFragment is part (or all) of a Header Block.
- BlockFragment []byte
-
- // EndStream indicates that the header block is the last that
- // the endpoint will send for the identified stream. Setting
- // this flag causes the stream to enter one of "half closed"
- // states.
- EndStream bool
-
- // EndHeaders indicates that this frame contains an entire
- // header block and is not followed by any
- // CONTINUATION frames.
- EndHeaders bool
-
- // PadLength is the optional number of bytes of zeros to add
- // to this frame.
- PadLength uint8
-
- // Priority, if non-zero, includes stream priority information
- // in the HEADER frame.
- Priority http2PriorityParam
-}
-
-// WriteHeaders writes a single HEADERS frame.
-//
-// This is a low-level header writing method. Encoding headers and
-// splitting them into any necessary CONTINUATION frames is handled
-// elsewhere.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WriteHeaders(p http2HeadersFrameParam) error {
- if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- var flags http2Flags
- if p.PadLength != 0 {
- flags |= http2FlagHeadersPadded
- }
- if p.EndStream {
- flags |= http2FlagHeadersEndStream
- }
- if p.EndHeaders {
- flags |= http2FlagHeadersEndHeaders
- }
- if !p.Priority.IsZero() {
- flags |= http2FlagHeadersPriority
- }
- f.startWrite(http2FrameHeaders, flags, p.StreamID)
- if p.PadLength != 0 {
- f.writeByte(p.PadLength)
- }
- if !p.Priority.IsZero() {
- v := p.Priority.StreamDep
- if !http2validStreamIDOrZero(v) && !f.AllowIllegalWrites {
- return http2errDepStreamID
- }
- if p.Priority.Exclusive {
- v |= 1 << 31
- }
- f.writeUint32(v)
- f.writeByte(p.Priority.Weight)
- }
- f.wbuf = append(f.wbuf, p.BlockFragment...)
- f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
- return f.endWrite()
-}
-
-// A PriorityFrame specifies the sender-advised priority of a stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.3
-type http2PriorityFrame struct {
- http2FrameHeader
- http2PriorityParam
-}
-
-// PriorityParam are the stream prioritzation parameters.
-type http2PriorityParam struct {
- // StreamDep is a 31-bit stream identifier for the
- // stream that this stream depends on. Zero means no
- // dependency.
- StreamDep uint32
-
- // Exclusive is whether the dependency is exclusive.
- Exclusive bool
-
- // Weight is the stream's zero-indexed weight. It should be
- // set together with StreamDep, or neither should be set. Per
- // the spec, "Add one to the value to obtain a weight between
- // 1 and 256."
- Weight uint8
-}
-
-func (p http2PriorityParam) IsZero() bool {
- return p == http2PriorityParam{}
-}
-
-func http2parsePriorityFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
- if fh.StreamID == 0 {
- countError("frame_priority_zero_stream")
- return nil, http2connError{http2ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
- }
- if len(payload) != 5 {
- countError("frame_priority_bad_length")
- return nil, http2connError{http2ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
- }
- v := binary.BigEndian.Uint32(payload[:4])
- streamID := v & 0x7fffffff // mask off high bit
- return &http2PriorityFrame{
- http2FrameHeader: fh,
- http2PriorityParam: http2PriorityParam{
- Weight: payload[4],
- StreamDep: streamID,
- Exclusive: streamID != v, // was high bit set?
- },
- }, nil
-}
-
-// WritePriority writes a PRIORITY frame.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WritePriority(streamID uint32, p http2PriorityParam) error {
- if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- if !http2validStreamIDOrZero(p.StreamDep) {
- return http2errDepStreamID
- }
- f.startWrite(http2FramePriority, 0, streamID)
- v := p.StreamDep
- if p.Exclusive {
- v |= 1 << 31
- }
- f.writeUint32(v)
- f.writeByte(p.Weight)
- return f.endWrite()
-}
-
-// A RSTStreamFrame allows for abnormal termination of a stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.4
-type http2RSTStreamFrame struct {
- http2FrameHeader
- ErrCode http2ErrCode
-}
-
-func http2parseRSTStreamFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
- if len(p) != 4 {
- countError("frame_rststream_bad_len")
- return nil, http2ConnectionError(http2ErrCodeFrameSize)
- }
- if fh.StreamID == 0 {
- countError("frame_rststream_zero_stream")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- return &http2RSTStreamFrame{fh, http2ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
-}
-
-// WriteRSTStream writes a RST_STREAM frame.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WriteRSTStream(streamID uint32, code http2ErrCode) error {
- if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- f.startWrite(http2FrameRSTStream, 0, streamID)
- f.writeUint32(uint32(code))
- return f.endWrite()
-}
-
-// A ContinuationFrame is used to continue a sequence of header block fragments.
-// See http://http2.github.io/http2-spec/#rfc.section.6.10
-type http2ContinuationFrame struct {
- http2FrameHeader
- headerFragBuf []byte
-}
-
-func http2parseContinuationFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
- if fh.StreamID == 0 {
- countError("frame_continuation_zero_stream")
- return nil, http2connError{http2ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
- }
- return &http2ContinuationFrame{fh, p}, nil
-}
-
-func (f *http2ContinuationFrame) HeaderBlockFragment() []byte {
- f.checkValid()
- return f.headerFragBuf
-}
-
-func (f *http2ContinuationFrame) HeadersEnded() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagContinuationEndHeaders)
-}
-
-// WriteContinuation writes a CONTINUATION frame.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
- if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- var flags http2Flags
- if endHeaders {
- flags |= http2FlagContinuationEndHeaders
- }
- f.startWrite(http2FrameContinuation, flags, streamID)
- f.wbuf = append(f.wbuf, headerBlockFragment...)
- return f.endWrite()
-}
-
-// A PushPromiseFrame is used to initiate a server stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.6
-type http2PushPromiseFrame struct {
- http2FrameHeader
- PromiseID uint32
- headerFragBuf []byte // not owned
-}
-
-func (f *http2PushPromiseFrame) HeaderBlockFragment() []byte {
- f.checkValid()
- return f.headerFragBuf
-}
-
-func (f *http2PushPromiseFrame) HeadersEnded() bool {
- return f.http2FrameHeader.Flags.Has(http2FlagPushPromiseEndHeaders)
-}
-
-func http2parsePushPromise(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (_ http2Frame, err error) {
- pp := &http2PushPromiseFrame{
- http2FrameHeader: fh,
- }
- if pp.StreamID == 0 {
- // PUSH_PROMISE frames MUST be associated with an existing,
- // peer-initiated stream. The stream identifier of a
- // PUSH_PROMISE frame indicates the stream it is associated
- // with. If the stream identifier field specifies the value
- // 0x0, a recipient MUST respond with a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- countError("frame_pushpromise_zero_stream")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- // The PUSH_PROMISE frame includes optional padding.
- // Padding fields and flags are identical to those defined for DATA frames
- var padLength uint8
- if fh.Flags.Has(http2FlagPushPromisePadded) {
- if p, padLength, err = http2readByte(p); err != nil {
- countError("frame_pushpromise_pad_short")
- return
- }
- }
-
- p, pp.PromiseID, err = http2readUint32(p)
- if err != nil {
- countError("frame_pushpromise_promiseid_short")
- return
- }
- pp.PromiseID = pp.PromiseID & (1<<31 - 1)
-
- if int(padLength) > len(p) {
- // like the DATA frame, error out if padding is longer than the body.
- countError("frame_pushpromise_pad_too_big")
- return nil, http2ConnectionError(http2ErrCodeProtocol)
- }
- pp.headerFragBuf = p[:len(p)-int(padLength)]
- return pp, nil
-}
-
-// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
-type http2PushPromiseParam struct {
- // StreamID is the required Stream ID to initiate.
- StreamID uint32
-
- // PromiseID is the required Stream ID which this
- // Push Promises
- PromiseID uint32
-
- // BlockFragment is part (or all) of a Header Block.
- BlockFragment []byte
-
- // EndHeaders indicates that this frame contains an entire
- // header block and is not followed by any
- // CONTINUATION frames.
- EndHeaders bool
-
- // PadLength is the optional number of bytes of zeros to add
- // to this frame.
- PadLength uint8
-}
-
-// WritePushPromise writes a single PushPromise Frame.
-//
-// As with Header Frames, This is the low level call for writing
-// individual frames. Continuation frames are handled elsewhere.
-//
-// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
-func (f *http2Framer) WritePushPromise(p http2PushPromiseParam) error {
- if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- var flags http2Flags
- if p.PadLength != 0 {
- flags |= http2FlagPushPromisePadded
- }
- if p.EndHeaders {
- flags |= http2FlagPushPromiseEndHeaders
- }
- f.startWrite(http2FramePushPromise, flags, p.StreamID)
- if p.PadLength != 0 {
- f.writeByte(p.PadLength)
- }
- if !http2validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
- return http2errStreamID
- }
- f.writeUint32(p.PromiseID)
- f.wbuf = append(f.wbuf, p.BlockFragment...)
- f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
- return f.endWrite()
-}
-
-// WriteRawFrame writes a raw frame. This can be used to write
-// extension frames unknown to this package.
-func (f *http2Framer) WriteRawFrame(t http2FrameType, flags http2Flags, streamID uint32, payload []byte) error {
- f.startWrite(t, flags, streamID)
- f.writeBytes(payload)
- return f.endWrite()
-}
-
-func http2readByte(p []byte) (remain []byte, b byte, err error) {
- if len(p) == 0 {
- return nil, 0, io.ErrUnexpectedEOF
- }
- return p[1:], p[0], nil
-}
-
-func http2readUint32(p []byte) (remain []byte, v uint32, err error) {
- if len(p) < 4 {
- return nil, 0, io.ErrUnexpectedEOF
- }
- return p[4:], binary.BigEndian.Uint32(p[:4]), nil
-}
-
-type http2streamEnder interface {
- StreamEnded() bool
-}
-
-type http2headersEnder interface {
- HeadersEnded() bool
-}
-
-type http2headersOrContinuation interface {
- http2headersEnder
- HeaderBlockFragment() []byte
-}
-
-// A MetaHeadersFrame is the representation of one HEADERS frame and
-// zero or more contiguous CONTINUATION frames and the decoding of
-// their HPACK-encoded contents.
-//
-// This type of frame does not appear on the wire and is only returned
-// by the Framer when Framer.ReadMetaHeaders is set.
-type http2MetaHeadersFrame struct {
- *http2HeadersFrame
-
- // Fields are the fields contained in the HEADERS and
- // CONTINUATION frames. The underlying slice is owned by the
- // Framer and must not be retained after the next call to
- // ReadFrame.
- //
- // Fields are guaranteed to be in the correct http2 order and
- // not have unknown pseudo header fields or invalid header
- // field names or values. Required pseudo header fields may be
- // missing, however. Use the MetaHeadersFrame.Pseudo accessor
- // method access pseudo headers.
- Fields []hpack.HeaderField
-
- // Truncated is whether the max header list size limit was hit
- // and Fields is incomplete. The hpack decoder state is still
- // valid, however.
- Truncated bool
-}
-
-// PseudoValue returns the given pseudo header field's value.
-// The provided pseudo field should not contain the leading colon.
-func (mh *http2MetaHeadersFrame) PseudoValue(pseudo string) string {
- for _, hf := range mh.Fields {
- if !hf.IsPseudo() {
- return ""
- }
- if hf.Name[1:] == pseudo {
- return hf.Value
- }
- }
- return ""
-}
-
-// RegularFields returns the regular (non-pseudo) header fields of mh.
-// The caller does not own the returned slice.
-func (mh *http2MetaHeadersFrame) RegularFields() []hpack.HeaderField {
- for i, hf := range mh.Fields {
- if !hf.IsPseudo() {
- return mh.Fields[i:]
- }
- }
- return nil
-}
-
-// PseudoFields returns the pseudo header fields of mh.
-// The caller does not own the returned slice.
-func (mh *http2MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
- for i, hf := range mh.Fields {
- if !hf.IsPseudo() {
- return mh.Fields[:i]
- }
- }
- return mh.Fields
-}
-
-func (mh *http2MetaHeadersFrame) checkPseudos() error {
- var isRequest, isResponse bool
- pf := mh.PseudoFields()
- for i, hf := range pf {
- switch hf.Name {
- case ":method", ":path", ":scheme", ":authority":
- isRequest = true
- case ":status":
- isResponse = true
- default:
- return http2pseudoHeaderError(hf.Name)
- }
- // Check for duplicates.
- // This would be a bad algorithm, but N is 4.
- // And this doesn't allocate.
- for _, hf2 := range pf[:i] {
- if hf.Name == hf2.Name {
- return http2duplicatePseudoHeaderError(hf.Name)
- }
- }
- }
- if isRequest && isResponse {
- return http2errMixPseudoHeaderTypes
- }
- return nil
-}
-
-func (fr *http2Framer) maxHeaderStringLen() int {
- v := fr.maxHeaderListSize()
- if uint32(int(v)) == v {
- return int(v)
- }
- // They had a crazy big number for MaxHeaderBytes anyway,
- // so give them unlimited header lengths:
- return 0
-}
-
-// readMetaFrame returns 0 or more CONTINUATION frames from fr and
-// merge them into the provided hf and returns a MetaHeadersFrame
-// with the decoded hpack values.
-func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFrame, error) {
- if fr.AllowIllegalReads {
- return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
- }
- mh := &http2MetaHeadersFrame{
- http2HeadersFrame: hf,
- }
- var remainSize = fr.maxHeaderListSize()
- var sawRegular bool
-
- var invalid error // pseudo header field errors
- hdec := fr.ReadMetaHeaders
- hdec.SetEmitEnabled(true)
- hdec.SetMaxStringLength(fr.maxHeaderStringLen())
- hdec.SetEmitFunc(func(hf hpack.HeaderField) {
- if http2VerboseLogs && fr.logReads {
- fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
- }
- if !httpguts.ValidHeaderFieldValue(hf.Value) {
- invalid = http2headerFieldValueError(hf.Value)
- }
- isPseudo := strings.HasPrefix(hf.Name, ":")
- if isPseudo {
- if sawRegular {
- invalid = http2errPseudoAfterRegular
- }
- } else {
- sawRegular = true
- if !http2validWireHeaderFieldName(hf.Name) {
- invalid = http2headerFieldNameError(hf.Name)
- }
- }
-
- if invalid != nil {
- hdec.SetEmitEnabled(false)
- return
- }
-
- size := hf.Size()
- if size > remainSize {
- hdec.SetEmitEnabled(false)
- mh.Truncated = true
- return
- }
- remainSize -= size
-
- mh.Fields = append(mh.Fields, hf)
- })
- // Lose reference to MetaHeadersFrame:
- defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
-
- var hc http2headersOrContinuation = hf
- for {
- frag := hc.HeaderBlockFragment()
- if _, err := hdec.Write(frag); err != nil {
- return nil, http2ConnectionError(http2ErrCodeCompression)
- }
-
- if hc.HeadersEnded() {
- break
- }
- if f, err := fr.ReadFrame(); err != nil {
- return nil, err
- } else {
- hc = f.(*http2ContinuationFrame) // guaranteed by checkFrameOrder
- }
- }
-
- mh.http2HeadersFrame.headerFragBuf = nil
- mh.http2HeadersFrame.invalidate()
-
- if err := hdec.Close(); err != nil {
- return nil, http2ConnectionError(http2ErrCodeCompression)
- }
- if invalid != nil {
- fr.errDetail = invalid
- if http2VerboseLogs {
- log.Printf("http2: invalid header: %v", invalid)
- }
- return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, invalid}
- }
- if err := mh.checkPseudos(); err != nil {
- fr.errDetail = err
- if http2VerboseLogs {
- log.Printf("http2: invalid pseudo headers: %v", err)
- }
- return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, err}
- }
- return mh, nil
-}
-
-func http2summarizeFrame(f http2Frame) string {
- var buf bytes.Buffer
- f.Header().writeDebug(&buf)
- switch f := f.(type) {
- case *http2SettingsFrame:
- n := 0
- f.ForeachSetting(func(s http2Setting) error {
- n++
- if n == 1 {
- buf.WriteString(", settings:")
- }
- fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
- return nil
- })
- if n > 0 {
- buf.Truncate(buf.Len() - 1) // remove trailing comma
- }
- case *http2DataFrame:
- data := f.Data()
- const max = 256
- if len(data) > max {
- data = data[:max]
- }
- fmt.Fprintf(&buf, " data=%q", data)
- if len(f.Data()) > max {
- fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
- }
- case *http2WindowUpdateFrame:
- if f.StreamID == 0 {
- buf.WriteString(" (conn)")
- }
- fmt.Fprintf(&buf, " incr=%v", f.Increment)
- case *http2PingFrame:
- fmt.Fprintf(&buf, " ping=%q", f.Data[:])
- case *http2GoAwayFrame:
- fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
- f.LastStreamID, f.ErrCode, f.debugData)
- case *http2RSTStreamFrame:
- fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
- }
- return buf.String()
-}
-
-func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
- return trace != nil && trace.WroteHeaderField != nil
-}
-
-func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField(k, []string{v})
- }
-}
-
-func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
- if trace != nil {
- return trace.Got1xxResponse
- }
- return nil
-}
-
-// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
-// connection.
-func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
- dialer := &tls.Dialer{
- Config: cfg,
- }
- cn, err := dialer.DialContext(ctx, network, addr)
- if err != nil {
- return nil, err
- }
- tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
- return tlsCn, nil
-}
-
-var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
-
-type http2goroutineLock uint64
-
-func http2newGoroutineLock() http2goroutineLock {
- if !http2DebugGoroutines {
- return 0
- }
- return http2goroutineLock(http2curGoroutineID())
-}
-
-func (g http2goroutineLock) check() {
- if !http2DebugGoroutines {
- return
- }
- if http2curGoroutineID() != uint64(g) {
- panic("running on the wrong goroutine")
- }
-}
-
-func (g http2goroutineLock) checkNotOn() {
- if !http2DebugGoroutines {
- return
- }
- if http2curGoroutineID() == uint64(g) {
- panic("running on the wrong goroutine")
- }
-}
-
-var http2goroutineSpace = []byte("goroutine ")
-
-func http2curGoroutineID() uint64 {
- bp := http2littleBuf.Get().(*[]byte)
- defer http2littleBuf.Put(bp)
- b := *bp
- b = b[:runtime.Stack(b, false)]
- // Parse the 4707 out of "goroutine 4707 ["
- b = bytes.TrimPrefix(b, http2goroutineSpace)
- i := bytes.IndexByte(b, ' ')
- if i < 0 {
- panic(fmt.Sprintf("No space found in %q", b))
- }
- b = b[:i]
- n, err := http2parseUintBytes(b, 10, 64)
- if err != nil {
- panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
- }
- return n
-}
-
-var http2littleBuf = sync.Pool{
- New: func() interface{} {
- buf := make([]byte, 64)
- return &buf
- },
-}
-
-// parseUintBytes is like strconv.ParseUint, but using a []byte.
-func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
- var cutoff, maxVal uint64
-
- if bitSize == 0 {
- bitSize = int(strconv.IntSize)
- }
-
- s0 := s
- switch {
- case len(s) < 1:
- err = strconv.ErrSyntax
- goto Error
-
- case 2 <= base && base <= 36:
- // valid base; nothing to do
-
- case base == 0:
- // Look for octal, hex prefix.
- switch {
- case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
- base = 16
- s = s[2:]
- if len(s) < 1 {
- err = strconv.ErrSyntax
- goto Error
- }
- case s[0] == '0':
- base = 8
- default:
- base = 10
- }
-
- default:
- err = errors.New("invalid base " + strconv.Itoa(base))
- goto Error
- }
-
- n = 0
- cutoff = http2cutoff64(base)
- maxVal = 1<<uint(bitSize) - 1
-
- for i := 0; i < len(s); i++ {
- var v byte
- d := s[i]
- switch {
- case '0' <= d && d <= '9':
- v = d - '0'
- case 'a' <= d && d <= 'z':
- v = d - 'a' + 10
- case 'A' <= d && d <= 'Z':
- v = d - 'A' + 10
- default:
- n = 0
- err = strconv.ErrSyntax
- goto Error
- }
- if int(v) >= base {
- n = 0
- err = strconv.ErrSyntax
- goto Error
- }
-
- if n >= cutoff {
- // n*base overflows
- n = 1<<64 - 1
- err = strconv.ErrRange
- goto Error
- }
- n *= uint64(base)
-
- n1 := n + uint64(v)
- if n1 < n || n1 > maxVal {
- // n+v overflows
- n = 1<<64 - 1
- err = strconv.ErrRange
- goto Error
- }
- n = n1
- }
-
- return n, nil
-
-Error:
- return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
-}
-
-// Return the first number n such that n*base >= 1<<64.
-func http2cutoff64(base int) uint64 {
- if base < 2 {
- return 0
- }
- return (1<<64-1)/uint64(base) + 1
-}
-
-var (
- http2commonBuildOnce sync.Once
- http2commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
- http2commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
-)
-
-func http2buildCommonHeaderMapsOnce() {
- http2commonBuildOnce.Do(http2buildCommonHeaderMaps)
-}
-
-func http2buildCommonHeaderMaps() {
- common := []string{
- "accept",
- "accept-charset",
- "accept-encoding",
- "accept-language",
- "accept-ranges",
- "age",
- "access-control-allow-origin",
- "allow",
- "authorization",
- "cache-control",
- "content-disposition",
- "content-encoding",
- "content-language",
- "content-length",
- "content-location",
- "content-range",
- "content-type",
- "cookie",
- "date",
- "etag",
- "expect",
- "expires",
- "from",
- "host",
- "if-match",
- "if-modified-since",
- "if-none-match",
- "if-unmodified-since",
- "last-modified",
- "link",
- "location",
- "max-forwards",
- "proxy-authenticate",
- "proxy-authorization",
- "range",
- "referer",
- "refresh",
- "retry-after",
- "server",
- "set-cookie",
- "strict-transport-security",
- "trailer",
- "transfer-encoding",
- "user-agent",
- "vary",
- "via",
- "www-authenticate",
- }
- http2commonLowerHeader = make(map[string]string, len(common))
- http2commonCanonHeader = make(map[string]string, len(common))
- for _, v := range common {
- chk := CanonicalHeaderKey(v)
- http2commonLowerHeader[chk] = v
- http2commonCanonHeader[v] = chk
- }
-}
-
-func http2lowerHeader(v string) (lower string, ascii bool) {
- http2buildCommonHeaderMapsOnce()
- if s, ok := http2commonLowerHeader[v]; ok {
- return s, true
- }
- return http2asciiToLower(v)
-}
-
-var (
- http2VerboseLogs bool
- http2logFrameWrites bool
- http2logFrameReads bool
- http2inTests bool
-)
-
-func init() {
- e := os.Getenv("GODEBUG")
- if strings.Contains(e, "http2debug=1") {
- http2VerboseLogs = true
- }
- if strings.Contains(e, "http2debug=2") {
- http2VerboseLogs = true
- http2logFrameWrites = true
- http2logFrameReads = true
- }
-}
-
-const (
- // ClientPreface is the string that must be sent by new
- // connections from clients.
- http2ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
-
- // SETTINGS_MAX_FRAME_SIZE default
- // http://http2.github.io/http2-spec/#rfc.section.6.5.2
- http2initialMaxFrameSize = 16384
-
- // NextProtoTLS is the NPN/ALPN protocol negotiated during
- // HTTP/2's TLS setup.
- http2NextProtoTLS = "h2"
-
- // http://http2.github.io/http2-spec/#SettingValues
- http2initialHeaderTableSize = 4096
-
- http2initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
-
- http2defaultMaxReadFrameSize = 1 << 20
-)
-
-var (
- http2clientPreface = []byte(http2ClientPreface)
-)
-
-type http2streamState int
-
-// HTTP/2 stream states.
-//
-// See http://tools.ietf.org/html/rfc7540#section-5.1.
-//
-// For simplicity, the server code merges "reserved (local)" into
-// "half-closed (remote)". This is one less state transition to track.
-// The only downside is that we send PUSH_PROMISEs slightly less
-// liberally than allowable. More discussion here:
-// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
-//
-// "reserved (remote)" is omitted since the client code does not
-// support server push.
-const (
- http2stateIdle http2streamState = iota
- http2stateOpen
- http2stateHalfClosedLocal
- http2stateHalfClosedRemote
- http2stateClosed
-)
-
-var http2stateName = [...]string{
- http2stateIdle: "Idle",
- http2stateOpen: "Open",
- http2stateHalfClosedLocal: "HalfClosedLocal",
- http2stateHalfClosedRemote: "HalfClosedRemote",
- http2stateClosed: "Closed",
-}
-
-func (st http2streamState) String() string {
- return http2stateName[st]
-}
-
-// Setting is a setting parameter: which setting it is, and its value.
-type http2Setting struct {
- // ID is which setting is being set.
- // See http://http2.github.io/http2-spec/#SettingValues
- ID http2SettingID
-
- // Val is the value.
- Val uint32
-}
-
-func (s http2Setting) String() string {
- return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
-}
-
-// Valid reports whether the setting is valid.
-func (s http2Setting) Valid() error {
- // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
- switch s.ID {
- case http2SettingEnablePush:
- if s.Val != 1 && s.Val != 0 {
- return http2ConnectionError(http2ErrCodeProtocol)
- }
- case http2SettingInitialWindowSize:
- if s.Val > 1<<31-1 {
- return http2ConnectionError(http2ErrCodeFlowControl)
- }
- case http2SettingMaxFrameSize:
- if s.Val < 16384 || s.Val > 1<<24-1 {
- return http2ConnectionError(http2ErrCodeProtocol)
- }
- }
- return nil
-}
-
-// A SettingID is an HTTP/2 setting as defined in
-// http://http2.github.io/http2-spec/#iana-settings
-type http2SettingID uint16
-
-const (
- http2SettingHeaderTableSize http2SettingID = 0x1
- http2SettingEnablePush http2SettingID = 0x2
- http2SettingMaxConcurrentStreams http2SettingID = 0x3
- http2SettingInitialWindowSize http2SettingID = 0x4
- http2SettingMaxFrameSize http2SettingID = 0x5
- http2SettingMaxHeaderListSize http2SettingID = 0x6
-)
-
-var http2settingName = map[http2SettingID]string{
- http2SettingHeaderTableSize: "HEADER_TABLE_SIZE",
- http2SettingEnablePush: "ENABLE_PUSH",
- http2SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
- http2SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
- http2SettingMaxFrameSize: "MAX_FRAME_SIZE",
- http2SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
-}
-
-func (s http2SettingID) String() string {
- if v, ok := http2settingName[s]; ok {
- return v
- }
- return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
-}
-
-// validWireHeaderFieldName reports whether v is a valid header field
-// name (key). See httpguts.ValidHeaderName for the base rules.
-//
-// Further, http2 says:
-// "Just as in HTTP/1.x, header field names are strings of ASCII
-// characters that are compared in a case-insensitive
-// fashion. However, header field names MUST be converted to
-// lowercase prior to their encoding in HTTP/2. "
-func http2validWireHeaderFieldName(v string) bool {
- if len(v) == 0 {
- return false
- }
- for _, r := range v {
- if !httpguts.IsTokenRune(r) {
- return false
- }
- if 'A' <= r && r <= 'Z' {
- return false
- }
- }
- return true
-}
-
-func http2httpCodeString(code int) string {
- switch code {
- case 200:
- return "200"
- case 404:
- return "404"
- }
- return strconv.Itoa(code)
-}
-
-// from pkg io
-type http2stringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-// A gate lets two goroutines coordinate their activities.
-type http2gate chan struct{}
-
-func (g http2gate) Done() { g <- struct{}{} }
-
-func (g http2gate) Wait() { <-g }
-
-// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
-type http2closeWaiter chan struct{}
-
-// Init makes a closeWaiter usable.
-// It exists because so a closeWaiter value can be placed inside a
-// larger struct and have the Mutex and Cond's memory in the same
-// allocation.
-func (cw *http2closeWaiter) Init() {
- *cw = make(chan struct{})
-}
-
-// Close marks the closeWaiter as closed and unblocks any waiters.
-func (cw http2closeWaiter) Close() {
- close(cw)
-}
-
-// Wait waits for the closeWaiter to become closed.
-func (cw http2closeWaiter) Wait() {
- <-cw
-}
-
-// bufferedWriter is a buffered writer that writes to w.
-// Its buffered writer is lazily allocated as needed, to minimize
-// idle memory usage with many connections.
-type http2bufferedWriter struct {
- _ http2incomparable
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
-}
-
-func http2newBufferedWriter(w io.Writer) *http2bufferedWriter {
- return &http2bufferedWriter{w: w}
-}
-
-// bufWriterPoolBufferSize is the size of bufio.Writer's
-// buffers created using bufWriterPool.
-//
-// TODO: pick a less arbitrary value? this is a bit under
-// (3 x typical 1500 byte MTU) at least. Other than that,
-// not much thought went into it.
-const http2bufWriterPoolBufferSize = 4 << 10
-
-var http2bufWriterPool = sync.Pool{
- New: func() interface{} {
- return bufio.NewWriterSize(nil, http2bufWriterPoolBufferSize)
- },
-}
-
-func (w *http2bufferedWriter) Available() int {
- if w.bw == nil {
- return http2bufWriterPoolBufferSize
- }
- return w.bw.Available()
-}
-
-func (w *http2bufferedWriter) Write(p []byte) (n int, err error) {
- if w.bw == nil {
- bw := http2bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
- w.bw = bw
- }
- return w.bw.Write(p)
-}
-
-func (w *http2bufferedWriter) Flush() error {
- bw := w.bw
- if bw == nil {
- return nil
- }
- err := bw.Flush()
- bw.Reset(nil)
- http2bufWriterPool.Put(bw)
- w.bw = nil
- return err
-}
-
-func http2mustUint31(v int32) uint32 {
- if v < 0 || v > 2147483647 {
- panic("out of range")
- }
- return uint32(v)
-}
-
-// bodyAllowedForStatus reports whether a given response status code
-// permits a body. See RFC 7230, section 3.3.
-func http2bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-type http2httpError struct {
- _ http2incomparable
- msg string
- timeout bool
-}
-
-func (e *http2httpError) Error() string { return e.msg }
-
-func (e *http2httpError) Timeout() bool { return e.timeout }
-
-func (e *http2httpError) Temporary() bool { return true }
-
-var http2errTimeout error = &http2httpError{msg: "http2: timeout awaiting response headers", timeout: true}
-
-type http2connectionStater interface {
- ConnectionState() tls.ConnectionState
-}
-
-var http2sorterPool = sync.Pool{New: func() interface{} { return new(http2sorter) }}
-
-type http2sorter struct {
- v []string // owned by sorter
-}
-
-func (s *http2sorter) Len() int { return len(s.v) }
-
-func (s *http2sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
-
-func (s *http2sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
-
-// Keys returns the sorted keys of h.
-//
-// The returned slice is only valid until s used again or returned to
-// its pool.
-func (s *http2sorter) Keys(h Header) []string {
- keys := s.v[:0]
- for k := range h {
- keys = append(keys, k)
- }
- s.v = keys
- sort.Sort(s)
- return keys
-}
-
-func (s *http2sorter) SortStrings(ss []string) {
- // Our sorter works on s.v, which sorter owns, so
- // stash it away while we sort the user's buffer.
- save := s.v
- s.v = ss
- sort.Sort(s)
- s.v = save
-}
-
-// validPseudoPath reports whether v is a valid :path pseudo-header
-// value. It must be either:
-//
-// *) a non-empty string starting with '/'
-// *) the string '*', for OPTIONS requests.
-//
-// For now this is only used a quick check for deciding when to clean
-// up Opaque URLs before sending requests from the Transport.
-// See golang.org/issue/16847
-//
-// We used to enforce that the path also didn't start with "//", but
-// Google's GFE accepts such paths and Chrome sends them, so ignore
-// that part of the spec. See golang.org/issue/19103.
-func http2validPseudoPath(v string) bool {
- return (len(v) > 0 && v[0] == '/') || v == "*"
-}
-
-// incomparable is a zero-width, non-comparable type. Adding it to a struct
-// makes that struct also non-comparable, and generally doesn't add
-// any size (as long as it's first).
-type http2incomparable [0]func()
-
-// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
-// io.Pipe except there are no PipeReader/PipeWriter halves, and the
-// underlying buffer is an interface. (io.Pipe is always unbuffered)
-type http2pipe struct {
- mu sync.Mutex
- c sync.Cond // c.L lazily initialized to &p.mu
- b http2pipeBuffer // nil when done reading
- unread int // bytes unread when done
- err error // read error once empty. non-nil means closed.
- breakErr error // immediate read error (caller doesn't see rest of b)
- donec chan struct{} // closed on error
- readFn func() // optional code to run in Read before error
-}
-
-type http2pipeBuffer interface {
- Len() int
- io.Writer
- io.Reader
-}
-
-// setBuffer initializes the pipe buffer.
-// It has no effect if the pipe is already closed.
-func (p *http2pipe) setBuffer(b http2pipeBuffer) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.err != nil || p.breakErr != nil {
- return
- }
- p.b = b
-}
-
-func (p *http2pipe) Len() int {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.b == nil {
- return p.unread
- }
- return p.b.Len()
-}
-
-// Read waits until data is available and copies bytes
-// from the buffer into p.
-func (p *http2pipe) Read(d []byte) (n int, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- for {
- if p.breakErr != nil {
- return 0, p.breakErr
- }
- if p.b != nil && p.b.Len() > 0 {
- return p.b.Read(d)
- }
- if p.err != nil {
- if p.readFn != nil {
- p.readFn() // e.g. copy trailers
- p.readFn = nil // not sticky like p.err
- }
- p.b = nil
- return 0, p.err
- }
- p.c.Wait()
- }
-}
-
-var http2errClosedPipeWrite = errors.New("write on closed buffer")
-
-// Write copies bytes from p into the buffer and wakes a reader.
-// It is an error to write more data than the buffer can hold.
-func (p *http2pipe) Write(d []byte) (n int, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- defer p.c.Signal()
- if p.err != nil {
- return 0, http2errClosedPipeWrite
- }
- if p.breakErr != nil {
- p.unread += len(d)
- return len(d), nil // discard when there is no reader
- }
- return p.b.Write(d)
-}
-
-// CloseWithError causes the next Read (waking up a current blocked
-// Read if needed) to return the provided err after all data has been
-// read.
-//
-// The error must be non-nil.
-func (p *http2pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
-
-// BreakWithError causes the next Read (waking up a current blocked
-// Read if needed) to return the provided err immediately, without
-// waiting for unread data.
-func (p *http2pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
-
-// closeWithErrorAndCode is like CloseWithError but also sets some code to run
-// in the caller's goroutine before returning the error.
-func (p *http2pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
-
-func (p *http2pipe) closeWithError(dst *error, err error, fn func()) {
- if err == nil {
- panic("err must be non-nil")
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- defer p.c.Signal()
- if *dst != nil {
- // Already been done.
- return
- }
- p.readFn = fn
- if dst == &p.breakErr {
- if p.b != nil {
- p.unread += p.b.Len()
- }
- p.b = nil
- }
- *dst = err
- p.closeDoneLocked()
-}
-
-// requires p.mu be held.
-func (p *http2pipe) closeDoneLocked() {
- if p.donec == nil {
- return
- }
- // Close if unclosed. This isn't racy since we always
- // hold p.mu while closing.
- select {
- case <-p.donec:
- default:
- close(p.donec)
- }
-}
-
-// Err returns the error (if any) first set by BreakWithError or CloseWithError.
-func (p *http2pipe) Err() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.breakErr != nil {
- return p.breakErr
- }
- return p.err
-}
-
-// Done returns a channel which is closed if and when this pipe is closed
-// with CloseWithError.
-func (p *http2pipe) Done() <-chan struct{} {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.donec == nil {
- p.donec = make(chan struct{})
- if p.err != nil || p.breakErr != nil {
- // Already hit an error.
- p.closeDoneLocked()
- }
- }
- return p.donec
-}
-
-const (
- http2prefaceTimeout = 10 * time.Second
- http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- http2handlerChunkWriteSize = 4 << 10
- http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
- http2maxQueuedControlFrames = 10000
-)
-
-var (
- http2errClientDisconnected = errors.New("client disconnected")
- http2errClosedBody = errors.New("body closed by handler")
- http2errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
- http2errStreamClosed = errors.New("http2: stream closed")
-)
-
-var http2responseWriterStatePool = sync.Pool{
- New: func() interface{} {
- rws := &http2responseWriterState{}
- rws.bw = bufio.NewWriterSize(http2chunkWriter{rws}, http2handlerChunkWriteSize)
- return rws
- },
-}
-
-// Test hooks.
-var (
- http2testHookOnConn func()
- http2testHookGetServerConn func(*http2serverConn)
- http2testHookOnPanicMu *sync.Mutex // nil except in tests
- http2testHookOnPanic func(sc *http2serverConn, panicVal interface{}) (rePanic bool)
-)
-
-// Server is an HTTP/2 server.
-type http2Server struct {
- // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
- // which may run at a time over all connections.
- // Negative or zero no limit.
- // TODO: implement
- MaxHandlers int
-
- // MaxConcurrentStreams optionally specifies the number of
- // concurrent streams that each client may have open at a
- // time. This is unrelated to the number of http.Handler goroutines
- // which may be active globally, which is MaxHandlers.
- // If zero, MaxConcurrentStreams defaults to at least 100, per
- // the HTTP/2 spec's recommendations.
- MaxConcurrentStreams uint32
-
- // MaxReadFrameSize optionally specifies the largest frame
- // this server is willing to read. A valid value is between
- // 16k and 16M, inclusive. If zero or otherwise invalid, a
- // default value is used.
- MaxReadFrameSize uint32
-
- // PermitProhibitedCipherSuites, if true, permits the use of
- // cipher suites prohibited by the HTTP/2 spec.
- PermitProhibitedCipherSuites bool
-
- // IdleTimeout specifies how long until idle clients should be
- // closed with a GOAWAY frame. PING frames are not considered
- // activity for the purposes of IdleTimeout.
- IdleTimeout time.Duration
-
- // MaxUploadBufferPerConnection is the size of the initial flow
- // control window for each connections. The HTTP/2 spec does not
- // allow this to be smaller than 65535 or larger than 2^32-1.
- // If the value is outside this range, a default value will be
- // used instead.
- MaxUploadBufferPerConnection int32
-
- // MaxUploadBufferPerStream is the size of the initial flow control
- // window for each stream. The HTTP/2 spec does not allow this to
- // be larger than 2^32-1. If the value is zero or larger than the
- // maximum, a default value will be used instead.
- MaxUploadBufferPerStream int32
-
- // NewWriteScheduler constructs a write scheduler for a connection.
- // If nil, a default scheduler is chosen.
- NewWriteScheduler func() http2WriteScheduler
-
- // CountError, if non-nil, is called on HTTP/2 server errors.
- // It's intended to increment a metric for monitoring, such
- // as an expvar or Prometheus metric.
- // The errType consists of only ASCII word characters.
- CountError func(errType string)
-
- // Internal state. This is a pointer (rather than embedded directly)
- // so that we don't embed a Mutex in this struct, which will make the
- // struct non-copyable, which might break some callers.
- state *http2serverInternalState
-}
-
-func (s *http2Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection > http2initialWindowSize {
- return s.MaxUploadBufferPerConnection
- }
- return 1 << 20
-}
-
-func (s *http2Server) initialStreamRecvWindowSize() int32 {
- if s.MaxUploadBufferPerStream > 0 {
- return s.MaxUploadBufferPerStream
- }
- return 1 << 20
-}
-
-func (s *http2Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= http2minMaxFrameSize && v <= http2maxFrameSize {
- return v
- }
- return http2defaultMaxReadFrameSize
-}
-
-func (s *http2Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return http2defaultMaxStreams
-}
-
-// maxQueuedControlFrames is the maximum number of control frames like
-// SETTINGS, PING and RST_STREAM that will be queued for writing before
-// the connection is closed to prevent memory exhaustion attacks.
-func (s *http2Server) maxQueuedControlFrames() int {
- // TODO: if anybody asks, add a Server field, and remember to define the
- // behavior of negative values.
- return http2maxQueuedControlFrames
-}
-
-type http2serverInternalState struct {
- mu sync.Mutex
- activeConns map[*http2serverConn]struct{}
-}
-
-func (s *http2serverInternalState) registerConn(sc *http2serverConn) {
- if s == nil {
- return // if the Server was used without calling ConfigureServer
- }
- s.mu.Lock()
- s.activeConns[sc] = struct{}{}
- s.mu.Unlock()
-}
-
-func (s *http2serverInternalState) unregisterConn(sc *http2serverConn) {
- if s == nil {
- return // if the Server was used without calling ConfigureServer
- }
- s.mu.Lock()
- delete(s.activeConns, sc)
- s.mu.Unlock()
-}
-
-func (s *http2serverInternalState) startGracefulShutdown() {
- if s == nil {
- return // if the Server was used without calling ConfigureServer
- }
- s.mu.Lock()
- for sc := range s.activeConns {
- sc.startGracefulShutdown()
- }
- s.mu.Unlock()
-}
-
-// ConfigureServer adds HTTP/2 support to a net/http Server.
-//
-// The configuration conf may be nil.
-//
-// ConfigureServer must be called before s begins serving.
-func http2ConfigureServer(s *Server, conf *http2Server) error {
- if s == nil {
- panic("nil *http.Server")
- }
- if conf == nil {
- conf = new(http2Server)
- }
- conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})}
- if h1, h2 := s, conf; h2.IdleTimeout == 0 {
- if h1.IdleTimeout != 0 {
- h2.IdleTimeout = h1.IdleTimeout
- } else {
- h2.IdleTimeout = h1.ReadTimeout
- }
- }
- s.RegisterOnShutdown(conf.state.startGracefulShutdown)
-
- if s.TLSConfig == nil {
- s.TLSConfig = new(tls.Config)
- } else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
- // If they already provided a TLS 1.0–1.2 CipherSuite list, return an
- // error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
- // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
- haveRequired := false
- for _, cs := range s.TLSConfig.CipherSuites {
- switch cs {
- case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- // Alternative MTI cipher to not discourage ECDSA-only servers.
- // See http://golang.org/cl/30721 for further information.
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
- haveRequired = true
- }
- }
- if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
- }
- }
-
- // Note: not setting MinVersion to tls.VersionTLS12,
- // as we don't want to interfere with HTTP/1.1 traffic
- // on the user's server. We enforce TLS 1.2 later once
- // we accept a connection. Ideally this should be done
- // during next-proto selection, but using TLS <1.2 with
- // HTTP/2 is still the client's bug.
-
- s.TLSConfig.PreferServerCipherSuites = true
-
- if !http2strSliceContains(s.TLSConfig.NextProtos, http2NextProtoTLS) {
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, http2NextProtoTLS)
- }
- if !http2strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
- }
-
- if s.TLSNextProto == nil {
- s.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){}
- }
- protoHandler := func(hs *Server, c *tls.Conn, h Handler) {
- if http2testHookOnConn != nil {
- http2testHookOnConn()
- }
- // The TLSNextProto interface predates contexts, so
- // the net/http package passes down its per-connection
- // base context via an exported but unadvertised
- // method on the Handler. This is for internal
- // net/http<=>http2 use only.
- var ctx context.Context
- type baseContexter interface {
- BaseContext() context.Context
- }
- if bc, ok := h.(baseContexter); ok {
- ctx = bc.BaseContext()
- }
- conf.ServeConn(c, &http2ServeConnOpts{
- Context: ctx,
- Handler: h,
- BaseConfig: hs,
- })
- }
- s.TLSNextProto[http2NextProtoTLS] = protoHandler
- return nil
-}
-
-// ServeConnOpts are options for the Server.ServeConn method.
-type http2ServeConnOpts struct {
- // Context is the base context to use.
- // If nil, context.Background is used.
- Context context.Context
-
- // BaseConfig optionally sets the base configuration
- // for values. If nil, defaults are used.
- BaseConfig *Server
-
- // Handler specifies which handler to use for processing
- // requests. If nil, BaseConfig.Handler is used. If BaseConfig
- // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
- Handler Handler
-}
-
-func (o *http2ServeConnOpts) context() context.Context {
- if o != nil && o.Context != nil {
- return o.Context
- }
- return context.Background()
-}
-
-func (o *http2ServeConnOpts) baseConfig() *Server {
- if o != nil && o.BaseConfig != nil {
- return o.BaseConfig
- }
- return new(Server)
-}
-
-func (o *http2ServeConnOpts) handler() Handler {
- if o != nil {
- if o.Handler != nil {
- return o.Handler
- }
- if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
- return o.BaseConfig.Handler
- }
- }
- return DefaultServeMux
-}
-
-// ServeConn serves HTTP/2 requests on the provided connection and
-// blocks until the connection is no longer readable.
-//
-// ServeConn starts speaking HTTP/2 assuming that c has not had any
-// reads or writes. It writes its initial settings frame and expects
-// to be able to read the preface and settings frame from the
-// client. If c has a ConnectionState method like a *tls.Conn, the
-// ConnectionState is used to verify the TLS ciphersuite and to set
-// the Request.TLS field in Handlers.
-//
-// ServeConn does not support h2c by itself. Any h2c support must be
-// implemented in terms of providing a suitably-behaving net.Conn.
-//
-// The opts parameter is optional. If nil, default values are used.
-func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
- baseCtx, cancel := http2serverConnBaseContext(c, opts)
- defer cancel()
-
- sc := &http2serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- baseCtx: baseCtx,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: http2newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*http2stream),
- readFrameCh: make(chan http2readFrameResult),
- wantWriteFrameCh: make(chan http2FrameWriteRequest, 8),
- serveMsgCh: make(chan interface{}, 8),
- wroteFrameCh: make(chan http2frameWriteResult, 1), // buffered; one send in writeFrameAsync
- bodyReadCh: make(chan http2bodyReadMsg), // buffering doesn't matter either way
- doneServing: make(chan struct{}),
- clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
- initialStreamSendWindowSize: http2initialWindowSize,
- maxFrameSize: http2initialMaxFrameSize,
- headerTableSize: http2initialHeaderTableSize,
- serveG: http2newGoroutineLock(),
- pushEnabled: true,
- }
-
- s.state.registerConn(sc)
- defer s.state.unregisterConn(sc)
-
- // The net/http package sets the write deadline from the
- // http.Server.WriteTimeout during the TLS handshake, but then
- // passes the connection off to us with the deadline already set.
- // Write deadlines are set per stream in serverConn.newStream.
- // Disarm the net.Conn write deadline here.
- if sc.hs.WriteTimeout != 0 {
- sc.conn.SetWriteDeadline(time.Time{})
- }
-
- if s.NewWriteScheduler != nil {
- sc.writeSched = s.NewWriteScheduler()
- } else {
- sc.writeSched = http2NewRandomWriteScheduler()
- }
-
- // These start at the RFC-specified defaults. If there is a higher
- // configured value for inflow, that will be updated when we send a
- // WINDOW_UPDATE shortly after sending SETTINGS.
- sc.flow.add(http2initialWindowSize)
- sc.inflow.add(http2initialWindowSize)
- sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
-
- fr := http2NewFramer(sc.bw, c)
- if s.CountError != nil {
- fr.countError = s.CountError
- }
- fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
- fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
- sc.framer = fr
-
- if tc, ok := c.(http2connectionStater); ok {
- sc.tlsState = new(tls.ConnectionState)
- *sc.tlsState = tc.ConnectionState()
- // 9.2 Use of TLS Features
- // An implementation of HTTP/2 over TLS MUST use TLS
- // 1.2 or higher with the restrictions on feature set
- // and cipher suite described in this section. Due to
- // implementation limitations, it might not be
- // possible to fail TLS negotiation. An endpoint MUST
- // immediately terminate an HTTP/2 connection that
- // does not meet the TLS requirements described in
- // this section with a connection error (Section
- // 5.4.1) of type INADEQUATE_SECURITY.
- if sc.tlsState.Version < tls.VersionTLS12 {
- sc.rejectConn(http2ErrCodeInadequateSecurity, "TLS version too low")
- return
- }
-
- if sc.tlsState.ServerName == "" {
- // Client must use SNI, but we don't enforce that anymore,
- // since it was causing problems when connecting to bare IP
- // addresses during development.
- //
- // TODO: optionally enforce? Or enforce at the time we receive
- // a new request, and verify the ServerName matches the :authority?
- // But that precludes proxy situations, perhaps.
- //
- // So for now, do nothing here again.
- }
-
- if !s.PermitProhibitedCipherSuites && http2isBadCipher(sc.tlsState.CipherSuite) {
- // "Endpoints MAY choose to generate a connection error
- // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
- // the prohibited cipher suites are negotiated."
- //
- // We choose that. In my opinion, the spec is weak
- // here. It also says both parties must support at least
- // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
- // excuses here. If we really must, we could allow an
- // "AllowInsecureWeakCiphers" option on the server later.
- // Let's see how it plays out first.
- sc.rejectConn(http2ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
- return
- }
- }
-
- if hook := http2testHookGetServerConn; hook != nil {
- hook(sc)
- }
- sc.serve()
-}
-
-func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx context.Context, cancel func()) {
- ctx, cancel = context.WithCancel(opts.context())
- ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr())
- if hs := opts.baseConfig(); hs != nil {
- ctx = context.WithValue(ctx, ServerContextKey, hs)
- }
- return
-}
-
-func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) {
- sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
- // ignoring errors. hanging up anyway.
- sc.framer.WriteGoAway(0, err, []byte(debug))
- sc.bw.Flush()
- sc.conn.Close()
-}
-
-type http2serverConn struct {
- // Immutable:
- srv *http2Server
- hs *Server
- conn net.Conn
- bw *http2bufferedWriter // writing to conn
- handler Handler
- baseCtx context.Context
- framer *http2Framer
- doneServing chan struct{} // closed when serverConn.serve ends
- readFrameCh chan http2readFrameResult // written by serverConn.readFrames
- wantWriteFrameCh chan http2FrameWriteRequest // from handlers -> serve
- wroteFrameCh chan http2frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
- bodyReadCh chan http2bodyReadMsg // from handlers -> serve
- serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
- flow http2flow // conn-wide (not stream-specific) outbound flow control
- inflow http2flow // conn-wide inbound flow control
- tlsState *tls.ConnectionState // shared by all handlers, like net/http
- remoteAddrStr string
- writeSched http2WriteScheduler
-
- // Everything following is owned by the serve loop; use serveG.check():
- serveG http2goroutineLock // used to verify funcs are on serve()
- pushEnabled bool
- sawFirstSettings bool // got the initial SETTINGS frame after the preface
- needToSendSettingsAck bool
- unackedSettings int // how many SETTINGS have we sent without ACKs?
- queuedControlFrames int // control frames in the writeSched queue
- clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
- advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curClientStreams uint32 // number of open streams initiated by the client
- curPushedStreams uint32 // number of open streams initiated by server push
- maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
- maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
- streams map[uint32]*http2stream
- initialStreamSendWindowSize int32
- maxFrameSize int32
- headerTableSize uint32
- peerMaxHeaderListSize uint32 // zero means unknown (default)
- canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started writing a frame (on serve goroutine or separate)
- writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
- needsFrameFlush bool // last frame write wasn't a flush
- inGoAway bool // we've started to or sent GOAWAY
- inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
- goAwayCode http2ErrCode
- shutdownTimer *time.Timer // nil until used
- idleTimer *time.Timer // nil if unused
-
- // Owned by the writeFrameAsync goroutine:
- headerWriteBuf bytes.Buffer
- hpackEncoder *hpack.Encoder
-
- // Used by startGracefulShutdown.
- shutdownOnce sync.Once
-}
-
-func (sc *http2serverConn) maxHeaderListSize() uint32 {
- n := sc.hs.MaxHeaderBytes
- if n <= 0 {
- n = DefaultMaxHeaderBytes
- }
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
-}
-
-func (sc *http2serverConn) curOpenStreams() uint32 {
- sc.serveG.check()
- return sc.curClientStreams + sc.curPushedStreams
-}
-
-// stream represents a stream. This is the minimal metadata needed by
-// the serve goroutine. Most of the actual stream state is owned by
-// the http.Handler's goroutine in the responseWriter. Because the
-// responseWriter's responseWriterState is recycled at the end of a
-// handler, this struct intentionally has no pointer to the
-// *responseWriter{,State} itself, as the Handler ending nils out the
-// responseWriter's state field.
-type http2stream struct {
- // immutable:
- sc *http2serverConn
- id uint32
- body *http2pipe // non-nil if expecting DATA frames
- cw http2closeWaiter // closed wait stream transitions to closed state
- ctx context.Context
- cancelCtx func()
-
- // owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow http2flow // limits writing from Handler to client
- inflow http2flow // what the client is allowed to POST/etc to us
- state http2streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- writeDeadline *time.Timer // nil if unused
-
- trailer Header // accumulated trailers
- reqTrailer Header // handler's Request.Trailer
-}
-
-func (sc *http2serverConn) Framer() *http2Framer { return sc.framer }
-
-func (sc *http2serverConn) CloseConn() error { return sc.conn.Close() }
-
-func (sc *http2serverConn) Flush() error { return sc.bw.Flush() }
-
-func (sc *http2serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
- return sc.hpackEncoder, &sc.headerWriteBuf
-}
-
-func (sc *http2serverConn) state(streamID uint32) (http2streamState, *http2stream) {
- sc.serveG.check()
- // http://tools.ietf.org/html/rfc7540#section-5.1
- if st, ok := sc.streams[streamID]; ok {
- return st.state, st
- }
- // "The first use of a new stream identifier implicitly closes all
- // streams in the "idle" state that might have been initiated by
- // that peer with a lower-valued stream identifier. For example, if
- // a client sends a HEADERS frame on stream 7 without ever sending a
- // frame on stream 5, then stream 5 transitions to the "closed"
- // state when the first frame for stream 7 is sent or received."
- if streamID%2 == 1 {
- if streamID <= sc.maxClientStreamID {
- return http2stateClosed, nil
- }
- } else {
- if streamID <= sc.maxPushPromiseID {
- return http2stateClosed, nil
- }
- }
- return http2stateIdle, nil
-}
-
-// setConnState calls the net/http ConnState hook for this connection, if configured.
-// Note that the net/http package does StateNew and StateClosed for us.
-// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
-func (sc *http2serverConn) setConnState(state ConnState) {
- if sc.hs.ConnState != nil {
- sc.hs.ConnState(sc.conn, state)
- }
-}
-
-func (sc *http2serverConn) vlogf(format string, args ...interface{}) {
- if http2VerboseLogs {
- sc.logf(format, args...)
- }
-}
-
-func (sc *http2serverConn) logf(format string, args ...interface{}) {
- if lg := sc.hs.ErrorLog; lg != nil {
- lg.Printf(format, args...)
- } else {
- log.Printf(format, args...)
- }
-}
-
-// errno returns v's underlying uintptr, else 0.
-//
-// TODO: remove this helper function once http2 can use build
-// tags. See comment in isClosedConnError.
-func http2errno(v error) uintptr {
- if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
- return uintptr(rv.Uint())
- }
- return 0
-}
-
-// isClosedConnError reports whether err is an error from use of a closed
-// network connection.
-func http2isClosedConnError(err error) bool {
- if err == nil {
- return false
- }
-
- // TODO: remove this string search and be more like the Windows
- // case below. That might involve modifying the standard library
- // to return better error types.
- str := err.Error()
- if strings.Contains(str, "use of closed network connection") {
- return true
- }
-
- // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
- // build tags, so I can't make an http2_windows.go file with
- // Windows-specific stuff. Fix that and move this, once we
- // have a way to bundle this into std's net/http somehow.
- if runtime.GOOS == "windows" {
- if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
- if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
- const WSAECONNABORTED = 10053
- const WSAECONNRESET = 10054
- if n := http2errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
- return true
- }
- }
- }
- }
- return false
-}
-
-func (sc *http2serverConn) condlogf(err error, format string, args ...interface{}) {
- if err == nil {
- return
- }
- if err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) || err == http2errPrefaceTimeout {
- // Boring, expected errors.
- sc.vlogf(format, args...)
- } else {
- sc.logf(format, args...)
- }
-}
-
-func (sc *http2serverConn) canonicalHeader(v string) string {
- sc.serveG.check()
- http2buildCommonHeaderMapsOnce()
- cv, ok := http2commonCanonHeader[v]
- if ok {
- return cv
- }
- cv, ok = sc.canonHeader[v]
- if ok {
- return cv
- }
- if sc.canonHeader == nil {
- sc.canonHeader = make(map[string]string)
- }
- cv = CanonicalHeaderKey(v)
- // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
- // entries in the canonHeader cache. This should be larger than the number
- // of unique, uncommon header keys likely to be sent by the peer, while not
- // so high as to permit unreaasonable memory usage if the peer sends an unbounded
- // number of unique header keys.
- const maxCachedCanonicalHeaders = 32
- if len(sc.canonHeader) < maxCachedCanonicalHeaders {
- sc.canonHeader[v] = cv
- }
- return cv
-}
-
-type http2readFrameResult struct {
- f http2Frame // valid until readMore is called
- err error
-
- // readMore should be called once the consumer no longer needs or
- // retains f. After readMore, f is invalid and more frames can be
- // read.
- readMore func()
-}
-
-// readFrames is the loop that reads incoming frames.
-// It takes care to only read one frame at a time, blocking until the
-// consumer is done with the frame.
-// It's run on its own goroutine.
-func (sc *http2serverConn) readFrames() {
- gate := make(http2gate)
- gateDone := gate.Done
- for {
- f, err := sc.framer.ReadFrame()
- select {
- case sc.readFrameCh <- http2readFrameResult{f, err, gateDone}:
- case <-sc.doneServing:
- return
- }
- select {
- case <-gate:
- case <-sc.doneServing:
- return
- }
- if http2terminalReadFrameError(err) {
- return
- }
- }
-}
-
-// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
-type http2frameWriteResult struct {
- _ http2incomparable
- wr http2FrameWriteRequest // what was written (or attempted)
- err error // result of the writeFrame call
-}
-
-// writeFrameAsync runs in its own goroutine and writes a single frame
-// and then reports when it's done.
-// At most one goroutine can be running writeFrameAsync at a time per
-// serverConn.
-func (sc *http2serverConn) writeFrameAsync(wr http2FrameWriteRequest) {
- err := wr.write.writeFrame(sc)
- sc.wroteFrameCh <- http2frameWriteResult{wr: wr, err: err}
-}
-
-func (sc *http2serverConn) closeAllStreamsOnConnClose() {
- sc.serveG.check()
- for _, st := range sc.streams {
- sc.closeStream(st, http2errClientDisconnected)
- }
-}
-
-func (sc *http2serverConn) stopShutdownTimer() {
- sc.serveG.check()
- if t := sc.shutdownTimer; t != nil {
- t.Stop()
- }
-}
-
-func (sc *http2serverConn) notePanic() {
- // Note: this is for serverConn.serve panicking, not http.Handler code.
- if http2testHookOnPanicMu != nil {
- http2testHookOnPanicMu.Lock()
- defer http2testHookOnPanicMu.Unlock()
- }
- if http2testHookOnPanic != nil {
- if e := recover(); e != nil {
- if http2testHookOnPanic(sc, e) {
- panic(e)
- }
- }
- }
-}
-
-func (sc *http2serverConn) serve() {
- sc.serveG.check()
- defer sc.notePanic()
- defer sc.conn.Close()
- defer sc.closeAllStreamsOnConnClose()
- defer sc.stopShutdownTimer()
- defer close(sc.doneServing) // unblocks handlers trying to send
-
- if http2VerboseLogs {
- sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
- }
-
- sc.writeFrame(http2FrameWriteRequest{
- write: http2writeSettings{
- {http2SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
- {http2SettingMaxConcurrentStreams, sc.advMaxStreams},
- {http2SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {http2SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
- },
- })
- sc.unackedSettings++
-
- // Each connection starts with initialWindowSize inflow tokens.
- // If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - http2initialWindowSize; diff > 0 {
- sc.sendWindowUpdate(nil, int(diff))
- }
-
- if err := sc.readPreface(); err != nil {
- sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
- return
- }
- // Now that we've got the preface, get us out of the
- // "StateNew" state. We can't go directly to idle, though.
- // Active means we read some data and anticipate a request. We'll
- // do another Active when we get a HEADERS frame.
- sc.setConnState(StateActive)
- sc.setConnState(StateIdle)
-
- if sc.srv.IdleTimeout != 0 {
- sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
- defer sc.idleTimer.Stop()
- }
-
- go sc.readFrames() // closed by defer sc.conn.Close above
-
- settingsTimer := time.AfterFunc(http2firstSettingsTimeout, sc.onSettingsTimer)
- defer settingsTimer.Stop()
-
- loopNum := 0
- for {
- loopNum++
- select {
- case wr := <-sc.wantWriteFrameCh:
- if se, ok := wr.write.(http2StreamError); ok {
- sc.resetStream(se)
- break
- }
- sc.writeFrame(wr)
- case res := <-sc.wroteFrameCh:
- sc.wroteFrame(res)
- case res := <-sc.readFrameCh:
- // Process any written frames before reading new frames from the client since a
- // written frame could have triggered a new stream to be started.
- if sc.writingFrameAsync {
- select {
- case wroteRes := <-sc.wroteFrameCh:
- sc.wroteFrame(wroteRes)
- default:
- }
- }
- if !sc.processFrameFromReader(res) {
- return
- }
- res.readMore()
- if settingsTimer != nil {
- settingsTimer.Stop()
- settingsTimer = nil
- }
- case m := <-sc.bodyReadCh:
- sc.noteBodyRead(m.st, m.n)
- case msg := <-sc.serveMsgCh:
- switch v := msg.(type) {
- case func(int):
- v(loopNum) // for testing
- case *http2serverMessage:
- switch v {
- case http2settingsTimerMsg:
- sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
- return
- case http2idleTimerMsg:
- sc.vlogf("connection is idle")
- sc.goAway(http2ErrCodeNo)
- case http2shutdownTimerMsg:
- sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
- return
- case http2gracefulShutdownMsg:
- sc.startGracefulShutdownInternal()
- default:
- panic("unknown timer")
- }
- case *http2startPushRequest:
- sc.startPush(v)
- default:
- panic(fmt.Sprintf("unexpected type %T", v))
- }
- }
-
- // If the peer is causing us to generate a lot of control frames,
- // but not reading them from us, assume they are trying to make us
- // run out of memory.
- if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
- sc.vlogf("http2: too many control frames in send queue, closing connection")
- return
- }
-
- // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
- // with no error code (graceful shutdown), don't start the timer until
- // all open streams have been completed.
- sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
- gracefulShutdownComplete := sc.goAwayCode == http2ErrCodeNo && sc.curOpenStreams() == 0
- if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != http2ErrCodeNo || gracefulShutdownComplete) {
- sc.shutDownIn(http2goAwayTimeout)
- }
- }
-}
-
-func (sc *http2serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
- select {
- case <-sc.doneServing:
- case <-sharedCh:
- close(privateCh)
- }
-}
-
-type http2serverMessage int
-
-// Message values sent to serveMsgCh.
-var (
- http2settingsTimerMsg = new(http2serverMessage)
- http2idleTimerMsg = new(http2serverMessage)
- http2shutdownTimerMsg = new(http2serverMessage)
- http2gracefulShutdownMsg = new(http2serverMessage)
-)
-
-func (sc *http2serverConn) onSettingsTimer() { sc.sendServeMsg(http2settingsTimerMsg) }
-
-func (sc *http2serverConn) onIdleTimer() { sc.sendServeMsg(http2idleTimerMsg) }
-
-func (sc *http2serverConn) onShutdownTimer() { sc.sendServeMsg(http2shutdownTimerMsg) }
-
-func (sc *http2serverConn) sendServeMsg(msg interface{}) {
- sc.serveG.checkNotOn() // NOT
- select {
- case sc.serveMsgCh <- msg:
- case <-sc.doneServing:
- }
-}
-
-var http2errPrefaceTimeout = errors.New("timeout waiting for client preface")
-
-// readPreface reads the ClientPreface greeting from the peer or
-// returns errPrefaceTimeout on timeout, or an error if the greeting
-// is invalid.
-func (sc *http2serverConn) readPreface() error {
- errc := make(chan error, 1)
- go func() {
- // Read the client preface
- buf := make([]byte, len(http2ClientPreface))
- if _, err := io.ReadFull(sc.conn, buf); err != nil {
- errc <- err
- } else if !bytes.Equal(buf, http2clientPreface) {
- errc <- fmt.Errorf("bogus greeting %q", buf)
- } else {
- errc <- nil
- }
- }()
- timer := time.NewTimer(http2prefaceTimeout) // TODO: configurable on *Server?
- defer timer.Stop()
- select {
- case <-timer.C:
- return http2errPrefaceTimeout
- case err := <-errc:
- if err == nil {
- if http2VerboseLogs {
- sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
- }
- }
- return err
- }
-}
-
-var http2errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
-var http2writeDataPool = sync.Pool{
- New: func() interface{} { return new(http2writeData) },
-}
-
-// writeDataFromHandler writes DATA response frames from a handler on
-// the given stream.
-func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error {
- ch := http2errChanPool.Get().(chan error)
- writeArg := http2writeDataPool.Get().(*http2writeData)
- *writeArg = http2writeData{stream.id, data, endStream}
- err := sc.writeFrameFromHandler(http2FrameWriteRequest{
- write: writeArg,
- stream: stream,
- done: ch,
- })
- if err != nil {
- return err
- }
- var frameWriteDone bool // the frame write is done (successfully or not)
- select {
- case err = <-ch:
- frameWriteDone = true
- case <-sc.doneServing:
- return http2errClientDisconnected
- case <-stream.cw:
- // If both ch and stream.cw were ready (as might
- // happen on the final Write after an http.Handler
- // ends), prefer the write result. Otherwise this
- // might just be us successfully closing the stream.
- // The writeFrameAsync and serve goroutines guarantee
- // that the ch send will happen before the stream.cw
- // close.
- select {
- case err = <-ch:
- frameWriteDone = true
- default:
- return http2errStreamClosed
- }
- }
- http2errChanPool.Put(ch)
- if frameWriteDone {
- http2writeDataPool.Put(writeArg)
- }
- return err
-}
-
-// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
-// if the connection has gone away.
-//
-// This must not be run from the serve goroutine itself, else it might
-// deadlock writing to sc.wantWriteFrameCh (which is only mildly
-// buffered and is read by serve itself). If you're on the serve
-// goroutine, call writeFrame instead.
-func (sc *http2serverConn) writeFrameFromHandler(wr http2FrameWriteRequest) error {
- sc.serveG.checkNotOn() // NOT
- select {
- case sc.wantWriteFrameCh <- wr:
- return nil
- case <-sc.doneServing:
- // Serve loop is gone.
- // Client has closed their connection to the server.
- return http2errClientDisconnected
- }
-}
-
-// writeFrame schedules a frame to write and sends it if there's nothing
-// already being written.
-//
-// There is no pushback here (the serve goroutine never blocks). It's
-// the http.Handlers that block, waiting for their previous frames to
-// make it onto the wire
-//
-// If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *http2serverConn) writeFrame(wr http2FrameWriteRequest) {
- sc.serveG.check()
-
- // If true, wr will not be written and wr.done will not be signaled.
- var ignoreWrite bool
-
- // We are not allowed to write frames on closed streams. RFC 7540 Section
- // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
- // a closed stream." Our server never sends PRIORITY, so that exception
- // does not apply.
- //
- // The serverConn might close an open stream while the stream's handler
- // is still running. For example, the server might close a stream when it
- // receives bad data from the client. If this happens, the handler might
- // attempt to write a frame after the stream has been closed (since the
- // handler hasn't yet been notified of the close). In this case, we simply
- // ignore the frame. The handler will notice that the stream is closed when
- // it waits for the frame to be written.
- //
- // As an exception to this rule, we allow sending RST_STREAM after close.
- // This allows us to immediately reject new streams without tracking any
- // state for those streams (except for the queued RST_STREAM frame). This
- // may result in duplicate RST_STREAMs in some cases, but the client should
- // ignore those.
- if wr.StreamID() != 0 {
- _, isReset := wr.write.(http2StreamError)
- if state, _ := sc.state(wr.StreamID()); state == http2stateClosed && !isReset {
- ignoreWrite = true
- }
- }
-
- // Don't send a 100-continue response if we've already sent headers.
- // See golang.org/issue/14030.
- switch wr.write.(type) {
- case *http2writeResHeaders:
- wr.stream.wroteHeaders = true
- case http2write100ContinueHeadersFrame:
- if wr.stream.wroteHeaders {
- // We do not need to notify wr.done because this frame is
- // never written with wr.done != nil.
- if wr.done != nil {
- panic("wr.done != nil for write100ContinueHeadersFrame")
- }
- ignoreWrite = true
- }
- }
-
- if !ignoreWrite {
- if wr.isControl() {
- sc.queuedControlFrames++
- // For extra safety, detect wraparounds, which should not happen,
- // and pull the plug.
- if sc.queuedControlFrames < 0 {
- sc.conn.Close()
- }
- }
- sc.writeSched.Push(wr)
- }
- sc.scheduleFrameWrite()
-}
-
-// startFrameWrite starts a goroutine to write wr (in a separate
-// goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wr.
-func (sc *http2serverConn) startFrameWrite(wr http2FrameWriteRequest) {
- sc.serveG.check()
- if sc.writingFrame {
- panic("internal error: can only be writing one frame at a time")
- }
-
- st := wr.stream
- if st != nil {
- switch st.state {
- case http2stateHalfClosedLocal:
- switch wr.write.(type) {
- case http2StreamError, http2handlerPanicRST, http2writeWindowUpdate:
- // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
- // in this state. (We never send PRIORITY from the server, so that is not checked.)
- default:
- panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
- }
- case http2stateClosed:
- panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
- }
- }
- if wpp, ok := wr.write.(*http2writePushPromise); ok {
- var err error
- wpp.promisedID, err = wpp.allocatePromisedID()
- if err != nil {
- sc.writingFrameAsync = false
- wr.replyToWriter(err)
- return
- }
- }
-
- sc.writingFrame = true
- sc.needsFrameFlush = true
- if wr.write.staysWithinBuffer(sc.bw.Available()) {
- sc.writingFrameAsync = false
- err := wr.write.writeFrame(sc)
- sc.wroteFrame(http2frameWriteResult{wr: wr, err: err})
- } else {
- sc.writingFrameAsync = true
- go sc.writeFrameAsync(wr)
- }
-}
-
-// errHandlerPanicked is the error given to any callers blocked in a read from
-// Request.Body when the main goroutine panics. Since most handlers read in the
-// main ServeHTTP goroutine, this will show up rarely.
-var http2errHandlerPanicked = errors.New("http2: handler panicked")
-
-// wroteFrame is called on the serve goroutine with the result of
-// whatever happened on writeFrameAsync.
-func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) {
- sc.serveG.check()
- if !sc.writingFrame {
- panic("internal error: expected to be already writing a frame")
- }
- sc.writingFrame = false
- sc.writingFrameAsync = false
-
- wr := res.wr
-
- if http2writeEndsStream(wr.write) {
- st := wr.stream
- if st == nil {
- panic("internal error: expecting non-nil stream")
- }
- switch st.state {
- case http2stateOpen:
- // Here we would go to stateHalfClosedLocal in
- // theory, but since our handler is done and
- // the net/http package provides no mechanism
- // for closing a ResponseWriter while still
- // reading data (see possible TODO at top of
- // this file), we go into closed state here
- // anyway, after telling the peer we're
- // hanging up on them. We'll transition to
- // stateClosed after the RST_STREAM frame is
- // written.
- st.state = http2stateHalfClosedLocal
- // Section 8.1: a server MAY request that the client abort
- // transmission of a request without error by sending a
- // RST_STREAM with an error code of NO_ERROR after sending
- // a complete response.
- sc.resetStream(http2streamError(st.id, http2ErrCodeNo))
- case http2stateHalfClosedRemote:
- sc.closeStream(st, http2errHandlerComplete)
- }
- } else {
- switch v := wr.write.(type) {
- case http2StreamError:
- // st may be unknown if the RST_STREAM was generated to reject bad input.
- if st, ok := sc.streams[v.StreamID]; ok {
- sc.closeStream(st, v)
- }
- case http2handlerPanicRST:
- sc.closeStream(wr.stream, http2errHandlerPanicked)
- }
- }
-
- // Reply (if requested) to unblock the ServeHTTP goroutine.
- wr.replyToWriter(res.err)
-
- sc.scheduleFrameWrite()
-}
-
-// scheduleFrameWrite tickles the frame writing scheduler.
-//
-// If a frame is already being written, nothing happens. This will be called again
-// when the frame is done being written.
-//
-// If a frame isn't being written and we need to send one, the best frame
-// to send is selected by writeSched.
-//
-// If a frame isn't being written and there's nothing else to send, we
-// flush the write buffer.
-func (sc *http2serverConn) scheduleFrameWrite() {
- sc.serveG.check()
- if sc.writingFrame || sc.inFrameScheduleLoop {
- return
- }
- sc.inFrameScheduleLoop = true
- for !sc.writingFrameAsync {
- if sc.needToSendGoAway {
- sc.needToSendGoAway = false
- sc.startFrameWrite(http2FrameWriteRequest{
- write: &http2writeGoAway{
- maxStreamID: sc.maxClientStreamID,
- code: sc.goAwayCode,
- },
- })
- continue
- }
- if sc.needToSendSettingsAck {
- sc.needToSendSettingsAck = false
- sc.startFrameWrite(http2FrameWriteRequest{write: http2writeSettingsAck{}})
- continue
- }
- if !sc.inGoAway || sc.goAwayCode == http2ErrCodeNo {
- if wr, ok := sc.writeSched.Pop(); ok {
- if wr.isControl() {
- sc.queuedControlFrames--
- }
- sc.startFrameWrite(wr)
- continue
- }
- }
- if sc.needsFrameFlush {
- sc.startFrameWrite(http2FrameWriteRequest{write: http2flushFrameWriter{}})
- sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
- continue
- }
- break
- }
- sc.inFrameScheduleLoop = false
-}
-
-// startGracefulShutdown gracefully shuts down a connection. This
-// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
-// shutting down. The connection isn't closed until all current
-// streams are done.
-//
-// startGracefulShutdown returns immediately; it does not wait until
-// the connection has shut down.
-func (sc *http2serverConn) startGracefulShutdown() {
- sc.serveG.checkNotOn() // NOT
- sc.shutdownOnce.Do(func() { sc.sendServeMsg(http2gracefulShutdownMsg) })
-}
-
-// After sending GOAWAY with an error code (non-graceful shutdown), the
-// connection will close after goAwayTimeout.
-//
-// If we close the connection immediately after sending GOAWAY, there may
-// be unsent data in our kernel receive buffer, which will cause the kernel
-// to send a TCP RST on close() instead of a FIN. This RST will abort the
-// connection immediately, whether or not the client had received the GOAWAY.
-//
-// Ideally we should delay for at least 1 RTT + epsilon so the client has
-// a chance to read the GOAWAY and stop sending messages. Measuring RTT
-// is hard, so we approximate with 1 second. See golang.org/issue/18701.
-//
-// This is a var so it can be shorter in tests, where all requests uses the
-// loopback interface making the expected RTT very small.
-//
-// TODO: configurable?
-var http2goAwayTimeout = 1 * time.Second
-
-func (sc *http2serverConn) startGracefulShutdownInternal() {
- sc.goAway(http2ErrCodeNo)
-}
-
-func (sc *http2serverConn) goAway(code http2ErrCode) {
- sc.serveG.check()
- if sc.inGoAway {
- return
- }
- sc.inGoAway = true
- sc.needToSendGoAway = true
- sc.goAwayCode = code
- sc.scheduleFrameWrite()
-}
-
-func (sc *http2serverConn) shutDownIn(d time.Duration) {
- sc.serveG.check()
- sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
-}
-
-func (sc *http2serverConn) resetStream(se http2StreamError) {
- sc.serveG.check()
- sc.writeFrame(http2FrameWriteRequest{write: se})
- if st, ok := sc.streams[se.StreamID]; ok {
- st.resetQueued = true
- }
-}
-
-// processFrameFromReader processes the serve loop's read from readFrameCh from the
-// frame-reading goroutine.
-// processFrameFromReader returns whether the connection should be kept open.
-func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool {
- sc.serveG.check()
- err := res.err
- if err != nil {
- if err == http2ErrFrameTooLarge {
- sc.goAway(http2ErrCodeFrameSize)
- return true // goAway will close the loop
- }
- clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err)
- if clientGone {
- // TODO: could we also get into this state if
- // the peer does a half close
- // (e.g. CloseWrite) because they're done
- // sending frames but they're still wanting
- // our open replies? Investigate.
- // TODO: add CloseWrite to crypto/tls.Conn first
- // so we have a way to test this? I suppose
- // just for testing we could have a non-TLS mode.
- return false
- }
- } else {
- f := res.f
- if http2VerboseLogs {
- sc.vlogf("http2: server read frame %v", http2summarizeFrame(f))
- }
- err = sc.processFrame(f)
- if err == nil {
- return true
- }
- }
-
- switch ev := err.(type) {
- case http2StreamError:
- sc.resetStream(ev)
- return true
- case http2goAwayFlowError:
- sc.goAway(http2ErrCodeFlowControl)
- return true
- case http2ConnectionError:
- sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
- sc.goAway(http2ErrCode(ev))
- return true // goAway will handle shutdown
- default:
- if res.err != nil {
- sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
- } else {
- sc.logf("http2: server closing client connection: %v", err)
- }
- return false
- }
-}
-
-func (sc *http2serverConn) processFrame(f http2Frame) error {
- sc.serveG.check()
-
- // First frame received must be SETTINGS.
- if !sc.sawFirstSettings {
- if _, ok := f.(*http2SettingsFrame); !ok {
- return sc.countError("first_settings", http2ConnectionError(http2ErrCodeProtocol))
- }
- sc.sawFirstSettings = true
- }
-
- switch f := f.(type) {
- case *http2SettingsFrame:
- return sc.processSettings(f)
- case *http2MetaHeadersFrame:
- return sc.processHeaders(f)
- case *http2WindowUpdateFrame:
- return sc.processWindowUpdate(f)
- case *http2PingFrame:
- return sc.processPing(f)
- case *http2DataFrame:
- return sc.processData(f)
- case *http2RSTStreamFrame:
- return sc.processResetStream(f)
- case *http2PriorityFrame:
- return sc.processPriority(f)
- case *http2GoAwayFrame:
- return sc.processGoAway(f)
- case *http2PushPromiseFrame:
- // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
- // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- return sc.countError("push_promise", http2ConnectionError(http2ErrCodeProtocol))
- default:
- sc.vlogf("http2: server ignoring frame: %v", f.Header())
- return nil
- }
-}
-
-func (sc *http2serverConn) processPing(f *http2PingFrame) error {
- sc.serveG.check()
- if f.IsAck() {
- // 6.7 PING: " An endpoint MUST NOT respond to PING frames
- // containing this flag."
- return nil
- }
- if f.StreamID != 0 {
- // "PING frames are not associated with any individual
- // stream. If a PING frame is received with a stream
- // identifier field value other than 0x0, the recipient MUST
- // respond with a connection error (Section 5.4.1) of type
- // PROTOCOL_ERROR."
- return sc.countError("ping_on_stream", http2ConnectionError(http2ErrCodeProtocol))
- }
- if sc.inGoAway && sc.goAwayCode != http2ErrCodeNo {
- return nil
- }
- sc.writeFrame(http2FrameWriteRequest{write: http2writePingAck{f}})
- return nil
-}
-
-func (sc *http2serverConn) processWindowUpdate(f *http2WindowUpdateFrame) error {
- sc.serveG.check()
- switch {
- case f.StreamID != 0: // stream-level flow control
- state, st := sc.state(f.StreamID)
- if state == http2stateIdle {
- // Section 5.1: "Receiving any frame other than HEADERS
- // or PRIORITY on a stream in this state MUST be
- // treated as a connection error (Section 5.4.1) of
- // type PROTOCOL_ERROR."
- return sc.countError("stream_idle", http2ConnectionError(http2ErrCodeProtocol))
- }
- if st == nil {
- // "WINDOW_UPDATE can be sent by a peer that has sent a
- // frame bearing the END_STREAM flag. This means that a
- // receiver could receive a WINDOW_UPDATE frame on a "half
- // closed (remote)" or "closed" stream. A receiver MUST
- // NOT treat this as an error, see Section 5.1."
- return nil
- }
- if !st.flow.add(int32(f.Increment)) {
- return sc.countError("bad_flow", http2streamError(f.StreamID, http2ErrCodeFlowControl))
- }
- default: // connection-level flow control
- if !sc.flow.add(int32(f.Increment)) {
- return http2goAwayFlowError{}
- }
- }
- sc.scheduleFrameWrite()
- return nil
-}
-
-func (sc *http2serverConn) processResetStream(f *http2RSTStreamFrame) error {
- sc.serveG.check()
-
- state, st := sc.state(f.StreamID)
- if state == http2stateIdle {
- // 6.4 "RST_STREAM frames MUST NOT be sent for a
- // stream in the "idle" state. If a RST_STREAM frame
- // identifying an idle stream is received, the
- // recipient MUST treat this as a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- return sc.countError("reset_idle_stream", http2ConnectionError(http2ErrCodeProtocol))
- }
- if st != nil {
- st.cancelCtx()
- sc.closeStream(st, http2streamError(f.StreamID, f.ErrCode))
- }
- return nil
-}
-
-func (sc *http2serverConn) closeStream(st *http2stream, err error) {
- sc.serveG.check()
- if st.state == http2stateIdle || st.state == http2stateClosed {
- panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
- }
- st.state = http2stateClosed
- if st.writeDeadline != nil {
- st.writeDeadline.Stop()
- }
- if st.isPushed() {
- sc.curPushedStreams--
- } else {
- sc.curClientStreams--
- }
- delete(sc.streams, st.id)
- if len(sc.streams) == 0 {
- sc.setConnState(StateIdle)
- if sc.srv.IdleTimeout != 0 {
- sc.idleTimer.Reset(sc.srv.IdleTimeout)
- }
- if http2h1ServerKeepAlivesDisabled(sc.hs) {
- sc.startGracefulShutdownInternal()
- }
- }
- if p := st.body; p != nil {
- // Return any buffered unread bytes worth of conn-level flow control.
- // See golang.org/issue/16481
- sc.sendWindowUpdate(nil, p.Len())
-
- p.CloseWithError(err)
- }
- st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
- sc.writeSched.CloseStream(st.id)
-}
-
-func (sc *http2serverConn) processSettings(f *http2SettingsFrame) error {
- sc.serveG.check()
- if f.IsAck() {
- sc.unackedSettings--
- if sc.unackedSettings < 0 {
- // Why is the peer ACKing settings we never sent?
- // The spec doesn't mention this case, but
- // hang up on them anyway.
- return sc.countError("ack_mystery", http2ConnectionError(http2ErrCodeProtocol))
- }
- return nil
- }
- if f.NumSettings() > 100 || f.HasDuplicates() {
- // This isn't actually in the spec, but hang up on
- // suspiciously large settings frames or those with
- // duplicate entries.
- return sc.countError("settings_big_or_dups", http2ConnectionError(http2ErrCodeProtocol))
- }
- if err := f.ForeachSetting(sc.processSetting); err != nil {
- return err
- }
- // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
- // acknowledged individually, even if multiple are received before the ACK.
- sc.needToSendSettingsAck = true
- sc.scheduleFrameWrite()
- return nil
-}
-
-func (sc *http2serverConn) processSetting(s http2Setting) error {
- sc.serveG.check()
- if err := s.Valid(); err != nil {
- return err
- }
- if http2VerboseLogs {
- sc.vlogf("http2: server processing setting %v", s)
- }
- switch s.ID {
- case http2SettingHeaderTableSize:
- sc.headerTableSize = s.Val
- sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
- case http2SettingEnablePush:
- sc.pushEnabled = s.Val != 0
- case http2SettingMaxConcurrentStreams:
- sc.clientMaxStreams = s.Val
- case http2SettingInitialWindowSize:
- return sc.processSettingInitialWindowSize(s.Val)
- case http2SettingMaxFrameSize:
- sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
- case http2SettingMaxHeaderListSize:
- sc.peerMaxHeaderListSize = s.Val
- default:
- // Unknown setting: "An endpoint that receives a SETTINGS
- // frame with any unknown or unsupported identifier MUST
- // ignore that setting."
- if http2VerboseLogs {
- sc.vlogf("http2: server ignoring unknown setting %v", s)
- }
- }
- return nil
-}
-
-func (sc *http2serverConn) processSettingInitialWindowSize(val uint32) error {
- sc.serveG.check()
- // Note: val already validated to be within range by
- // processSetting's Valid call.
-
- // "A SETTINGS frame can alter the initial flow control window
- // size for all current streams. When the value of
- // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
- // adjust the size of all stream flow control windows that it
- // maintains by the difference between the new value and the
- // old value."
- old := sc.initialStreamSendWindowSize
- sc.initialStreamSendWindowSize = int32(val)
- growth := int32(val) - old // may be negative
- for _, st := range sc.streams {
- if !st.flow.add(growth) {
- // 6.9.2 Initial Flow Control Window Size
- // "An endpoint MUST treat a change to
- // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
- // control window to exceed the maximum size as a
- // connection error (Section 5.4.1) of type
- // FLOW_CONTROL_ERROR."
- return sc.countError("setting_win_size", http2ConnectionError(http2ErrCodeFlowControl))
- }
- }
- return nil
-}
-
-func (sc *http2serverConn) processData(f *http2DataFrame) error {
- sc.serveG.check()
- id := f.Header().StreamID
- if sc.inGoAway && (sc.goAwayCode != http2ErrCodeNo || id > sc.maxClientStreamID) {
- // Discard all DATA frames if the GOAWAY is due to an
- // error, or:
- //
- // Section 6.8: After sending a GOAWAY frame, the sender
- // can discard frames for streams initiated by the
- // receiver with identifiers higher than the identified
- // last stream.
- return nil
- }
-
- data := f.Data()
- state, st := sc.state(id)
- if id == 0 || state == http2stateIdle {
- // Section 6.1: "DATA frames MUST be associated with a
- // stream. If a DATA frame is received whose stream
- // identifier field is 0x0, the recipient MUST respond
- // with a connection error (Section 5.4.1) of type
- // PROTOCOL_ERROR."
- //
- // Section 5.1: "Receiving any frame other than HEADERS
- // or PRIORITY on a stream in this state MUST be
- // treated as a connection error (Section 5.4.1) of
- // type PROTOCOL_ERROR."
- return sc.countError("data_on_idle", http2ConnectionError(http2ErrCodeProtocol))
- }
-
- // "If a DATA frame is received whose stream is not in "open"
- // or "half closed (local)" state, the recipient MUST respond
- // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
- if st == nil || state != http2stateOpen || st.gotTrailerHeader || st.resetQueued {
- // This includes sending a RST_STREAM if the stream is
- // in stateHalfClosedLocal (which currently means that
- // the http.Handler returned, so it's done reading &
- // done writing). Try to stop the client from sending
- // more DATA.
-
- // But still enforce their connection-level flow control,
- // and return any flow control bytes since we're not going
- // to consume them.
- if sc.inflow.available() < int32(f.Length) {
- return sc.countError("data_flow", http2streamError(id, http2ErrCodeFlowControl))
- }
- // Deduct the flow control from inflow, since we're
- // going to immediately add it back in
- // sendWindowUpdate, which also schedules sending the
- // frames.
- sc.inflow.take(int32(f.Length))
- sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
-
- if st != nil && st.resetQueued {
- // Already have a stream error in flight. Don't send another.
- return nil
- }
- return sc.countError("closed", http2streamError(id, http2ErrCodeStreamClosed))
- }
- if st.body == nil {
- panic("internal error: should have a body in this state")
- }
-
- // Sender sending more than they'd declared?
- if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
- st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
- // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
- // value of a content-length header field does not equal the sum of the
- // DATA frame payload lengths that form the body.
- return sc.countError("send_too_much", http2streamError(id, http2ErrCodeProtocol))
- }
- if f.Length > 0 {
- // Check whether the client has flow control quota.
- if st.inflow.available() < int32(f.Length) {
- return sc.countError("flow_on_data_length", http2streamError(id, http2ErrCodeFlowControl))
- }
- st.inflow.take(int32(f.Length))
-
- if len(data) > 0 {
- wrote, err := st.body.Write(data)
- if err != nil {
- sc.sendWindowUpdate(nil, int(f.Length)-wrote)
- return sc.countError("body_write_err", http2streamError(id, http2ErrCodeStreamClosed))
- }
- if wrote != len(data) {
- panic("internal error: bad Writer")
- }
- st.bodyBytes += int64(len(data))
- }
-
- // Return any padded flow control now, since we won't
- // refund it later on body reads.
- if pad := int32(f.Length) - int32(len(data)); pad > 0 {
- sc.sendWindowUpdate32(nil, pad)
- sc.sendWindowUpdate32(st, pad)
- }
- }
- if f.StreamEnded() {
- st.endStream()
- }
- return nil
-}
-
-func (sc *http2serverConn) processGoAway(f *http2GoAwayFrame) error {
- sc.serveG.check()
- if f.ErrCode != http2ErrCodeNo {
- sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
- } else {
- sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
- }
- sc.startGracefulShutdownInternal()
- // http://tools.ietf.org/html/rfc7540#section-6.8
- // We should not create any new streams, which means we should disable push.
- sc.pushEnabled = false
- return nil
-}
-
-// isPushed reports whether the stream is server-initiated.
-func (st *http2stream) isPushed() bool {
- return st.id%2 == 0
-}
-
-// endStream closes a Request.Body's pipe. It is called when a DATA
-// frame says a request body is over (or after trailers).
-func (st *http2stream) endStream() {
- sc := st.sc
- sc.serveG.check()
-
- if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
- st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
- st.declBodyBytes, st.bodyBytes))
- } else {
- st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
- st.body.CloseWithError(io.EOF)
- }
- st.state = http2stateHalfClosedRemote
-}
-
-// copyTrailersToHandlerRequest is run in the Handler's goroutine in
-// its Request.Body.Read just before it gets io.EOF.
-func (st *http2stream) copyTrailersToHandlerRequest() {
- for k, vv := range st.trailer {
- if _, ok := st.reqTrailer[k]; ok {
- // Only copy it over it was pre-declared.
- st.reqTrailer[k] = vv
- }
- }
-}
-
-// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
-// when the stream's WriteTimeout has fired.
-func (st *http2stream) onWriteTimeout() {
- st.sc.writeFrameFromHandler(http2FrameWriteRequest{write: http2streamError(st.id, http2ErrCodeInternal)})
-}
-
-func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
- sc.serveG.check()
- id := f.StreamID
- if sc.inGoAway {
- // Ignore.
- return nil
- }
- // http://tools.ietf.org/html/rfc7540#section-5.1.1
- // Streams initiated by a client MUST use odd-numbered stream
- // identifiers. [...] An endpoint that receives an unexpected
- // stream identifier MUST respond with a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- if id%2 != 1 {
- return sc.countError("headers_even", http2ConnectionError(http2ErrCodeProtocol))
- }
- // A HEADERS frame can be used to create a new stream or
- // send a trailer for an open one. If we already have a stream
- // open, let it process its own HEADERS frame (trailers at this
- // point, if it's valid).
- if st := sc.streams[f.StreamID]; st != nil {
- if st.resetQueued {
- // We're sending RST_STREAM to close the stream, so don't bother
- // processing this frame.
- return nil
- }
- // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
- // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
- // this state, it MUST respond with a stream error (Section 5.4.2) of
- // type STREAM_CLOSED.
- if st.state == http2stateHalfClosedRemote {
- return sc.countError("headers_half_closed", http2streamError(id, http2ErrCodeStreamClosed))
- }
- return st.processTrailerHeaders(f)
- }
-
- // [...] The identifier of a newly established stream MUST be
- // numerically greater than all streams that the initiating
- // endpoint has opened or reserved. [...] An endpoint that
- // receives an unexpected stream identifier MUST respond with
- // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- if id <= sc.maxClientStreamID {
- return sc.countError("stream_went_down", http2ConnectionError(http2ErrCodeProtocol))
- }
- sc.maxClientStreamID = id
-
- if sc.idleTimer != nil {
- sc.idleTimer.Stop()
- }
-
- // http://tools.ietf.org/html/rfc7540#section-5.1.2
- // [...] Endpoints MUST NOT exceed the limit set by their peer. An
- // endpoint that receives a HEADERS frame that causes their
- // advertised concurrent stream limit to be exceeded MUST treat
- // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
- // or REFUSED_STREAM.
- if sc.curClientStreams+1 > sc.advMaxStreams {
- if sc.unackedSettings == 0 {
- // They should know better.
- return sc.countError("over_max_streams", http2streamError(id, http2ErrCodeProtocol))
- }
- // Assume it's a network race, where they just haven't
- // received our last SETTINGS update. But actually
- // this can't happen yet, because we don't yet provide
- // a way for users to adjust server parameters at
- // runtime.
- return sc.countError("over_max_streams_race", http2streamError(id, http2ErrCodeRefusedStream))
- }
-
- initialState := http2stateOpen
- if f.StreamEnded() {
- initialState = http2stateHalfClosedRemote
- }
- st := sc.newStream(id, 0, initialState)
-
- if f.HasPriority() {
- if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
- return err
- }
- sc.writeSched.AdjustStream(st.id, f.Priority)
- }
-
- rw, req, err := sc.newWriterAndRequest(st, f)
- if err != nil {
- return err
- }
- st.reqTrailer = req.Trailer
- if st.reqTrailer != nil {
- st.trailer = make(Header)
- }
- st.body = req.Body.(*http2requestBody).pipe // may be nil
- st.declBodyBytes = req.ContentLength
-
- handler := sc.handler.ServeHTTP
- if f.Truncated {
- // Their header list was too long. Send a 431 error.
- handler = http2handleHeaderListTooLong
- } else if err := http2checkValidHTTP2RequestHeaders(req.Header); err != nil {
- handler = http2new400Handler(err)
- }
-
- // The net/http package sets the read deadline from the
- // http.Server.ReadTimeout during the TLS handshake, but then
- // passes the connection off to us with the deadline already
- // set. Disarm it here after the request headers are read,
- // similar to how the http1 server works. Here it's
- // technically more like the http1 Server's ReadHeaderTimeout
- // (in Go 1.8), though. That's a more sane option anyway.
- if sc.hs.ReadTimeout != 0 {
- sc.conn.SetReadDeadline(time.Time{})
- }
-
- go sc.runHandler(rw, req, handler)
- return nil
-}
-
-func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error {
- sc := st.sc
- sc.serveG.check()
- if st.gotTrailerHeader {
- return sc.countError("dup_trailers", http2ConnectionError(http2ErrCodeProtocol))
- }
- st.gotTrailerHeader = true
- if !f.StreamEnded() {
- return sc.countError("trailers_not_ended", http2streamError(st.id, http2ErrCodeProtocol))
- }
-
- if len(f.PseudoFields()) > 0 {
- return sc.countError("trailers_pseudo", http2streamError(st.id, http2ErrCodeProtocol))
- }
- if st.trailer != nil {
- for _, hf := range f.RegularFields() {
- key := sc.canonicalHeader(hf.Name)
- if !httpguts.ValidTrailerHeader(key) {
- // TODO: send more details to the peer somehow. But http2 has
- // no way to send debug data at a stream level. Discuss with
- // HTTP folk.
- return sc.countError("trailers_bogus", http2streamError(st.id, http2ErrCodeProtocol))
- }
- st.trailer[key] = append(st.trailer[key], hf.Value)
- }
- }
- st.endStream()
- return nil
-}
-
-func (sc *http2serverConn) checkPriority(streamID uint32, p http2PriorityParam) error {
- if streamID == p.StreamDep {
- // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
- // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
- // Section 5.3.3 says that a stream can depend on one of its dependencies,
- // so it's only self-dependencies that are forbidden.
- return sc.countError("priority", http2streamError(streamID, http2ErrCodeProtocol))
- }
- return nil
-}
-
-func (sc *http2serverConn) processPriority(f *http2PriorityFrame) error {
- if sc.inGoAway {
- return nil
- }
- if err := sc.checkPriority(f.StreamID, f.http2PriorityParam); err != nil {
- return err
- }
- sc.writeSched.AdjustStream(f.StreamID, f.http2PriorityParam)
- return nil
-}
-
-func (sc *http2serverConn) newStream(id, pusherID uint32, state http2streamState) *http2stream {
- sc.serveG.check()
- if id == 0 {
- panic("internal error: cannot create stream with id 0")
- }
-
- ctx, cancelCtx := context.WithCancel(sc.baseCtx)
- st := &http2stream{
- sc: sc,
- id: id,
- state: state,
- ctx: ctx,
- cancelCtx: cancelCtx,
- }
- st.cw.Init()
- st.flow.conn = &sc.flow // link to conn-level counter
- st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(sc.srv.initialStreamRecvWindowSize())
- if sc.hs.WriteTimeout != 0 {
- st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
- }
-
- sc.streams[id] = st
- sc.writeSched.OpenStream(st.id, http2OpenStreamOptions{PusherID: pusherID})
- if st.isPushed() {
- sc.curPushedStreams++
- } else {
- sc.curClientStreams++
- }
- if sc.curOpenStreams() == 1 {
- sc.setConnState(StateActive)
- }
-
- return st
-}
-
-func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHeadersFrame) (*http2responseWriter, *Request, error) {
- sc.serveG.check()
-
- rp := http2requestParam{
- method: f.PseudoValue("method"),
- scheme: f.PseudoValue("scheme"),
- authority: f.PseudoValue("authority"),
- path: f.PseudoValue("path"),
- }
-
- isConnect := rp.method == "CONNECT"
- if isConnect {
- if rp.path != "" || rp.scheme != "" || rp.authority == "" {
- return nil, nil, sc.countError("bad_connect", http2streamError(f.StreamID, http2ErrCodeProtocol))
- }
- } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
- // See 8.1.2.6 Malformed Requests and Responses:
- //
- // Malformed requests or responses that are detected
- // MUST be treated as a stream error (Section 5.4.2)
- // of type PROTOCOL_ERROR."
- //
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All HTTP/2 requests MUST include exactly one valid
- // value for the :method, :scheme, and :path
- // pseudo-header fields"
- return nil, nil, sc.countError("bad_path_method", http2streamError(f.StreamID, http2ErrCodeProtocol))
- }
-
- bodyOpen := !f.StreamEnded()
- if rp.method == "HEAD" && bodyOpen {
- // HEAD requests can't have bodies
- return nil, nil, sc.countError("head_body", http2streamError(f.StreamID, http2ErrCodeProtocol))
- }
-
- rp.header = make(Header)
- for _, hf := range f.RegularFields() {
- rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
- }
- if rp.authority == "" {
- rp.authority = rp.header.Get("Host")
- }
-
- rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
- if err != nil {
- return nil, nil, err
- }
- if bodyOpen {
- if vv, ok := rp.header["Content-Length"]; ok {
- if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
- req.ContentLength = int64(cl)
- } else {
- req.ContentLength = 0
- }
- } else {
- req.ContentLength = -1
- }
- req.Body.(*http2requestBody).pipe = &http2pipe{
- b: &http2dataBuffer{expected: req.ContentLength},
- }
- }
- return rw, req, nil
-}
-
-type http2requestParam struct {
- method string
- scheme, authority, path string
- header Header
-}
-
-func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2requestParam) (*http2responseWriter, *Request, error) {
- sc.serveG.check()
-
- var tlsState *tls.ConnectionState // nil if not scheme https
- if rp.scheme == "https" {
- tlsState = sc.tlsState
- }
-
- needsContinue := rp.header.Get("Expect") == "100-continue"
- if needsContinue {
- rp.header.Del("Expect")
- }
- // Merge Cookie headers into one "; "-delimited value.
- if cookies := rp.header["Cookie"]; len(cookies) > 1 {
- rp.header.Set("Cookie", strings.Join(cookies, "; "))
- }
-
- // Setup Trailers
- var trailer Header
- for _, v := range rp.header["Trailer"] {
- for _, key := range strings.Split(v, ",") {
- key = CanonicalHeaderKey(textproto.TrimString(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- // Bogus. (copy of http1 rules)
- // Ignore.
- default:
- if trailer == nil {
- trailer = make(Header)
- }
- trailer[key] = nil
- }
- }
- }
- delete(rp.header, "Trailer")
-
- var url_ *url.URL
- var requestURI string
- if rp.method == "CONNECT" {
- url_ = &url.URL{Host: rp.authority}
- requestURI = rp.authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(rp.path)
- if err != nil {
- return nil, nil, sc.countError("bad_path", http2streamError(st.id, http2ErrCodeProtocol))
- }
- requestURI = rp.path
- }
-
- body := &http2requestBody{
- conn: sc,
- stream: st,
- needsContinue: needsContinue,
- }
- req := &Request{
- Method: rp.method,
- URL: url_,
- RemoteAddr: sc.remoteAddrStr,
- Header: rp.header,
- RequestURI: requestURI,
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- ProtoMinor: 0,
- TLS: tlsState,
- Host: rp.authority,
- Body: body,
- Trailer: trailer,
- }
- req = req.WithContext(st.ctx)
-
- rws := http2responseWriterStatePool.Get().(*http2responseWriterState)
- bwSave := rws.bw
- *rws = http2responseWriterState{} // zero all the fields
- rws.conn = sc
- rws.bw = bwSave
- rws.bw.Reset(http2chunkWriter{rws})
- rws.stream = st
- rws.req = req
- rws.body = body
-
- rw := &http2responseWriter{rws: rws}
- return rw, req, nil
-}
-
-// Run on its own goroutine.
-func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) {
- didPanic := true
- defer func() {
- rw.rws.stream.cancelCtx()
- if didPanic {
- e := recover()
- sc.writeFrameFromHandler(http2FrameWriteRequest{
- write: http2handlerPanicRST{rw.rws.stream.id},
- stream: rw.rws.stream,
- })
- // Same as net/http:
- if e != nil && e != ErrAbortHandler {
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
- }
- return
- }
- rw.handlerDone()
- }()
- handler(rw, req)
- didPanic = false
-}
-
-func http2handleHeaderListTooLong(w ResponseWriter, r *Request) {
- // 10.5.1 Limits on Header Block Size:
- // .. "A server that receives a larger header block than it is
- // willing to handle can send an HTTP 431 (Request Header Fields Too
- // Large) status code"
- const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
- w.WriteHeader(statusRequestHeaderFieldsTooLarge)
- io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
-}
-
-// called from handler goroutines.
-// h may be nil.
-func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeResHeaders) error {
- sc.serveG.checkNotOn() // NOT on
- var errc chan error
- if headerData.h != nil {
- // If there's a header map (which we don't own), so we have to block on
- // waiting for this frame to be written, so an http.Flush mid-handler
- // writes out the correct value of keys, before a handler later potentially
- // mutates it.
- errc = http2errChanPool.Get().(chan error)
- }
- if err := sc.writeFrameFromHandler(http2FrameWriteRequest{
- write: headerData,
- stream: st,
- done: errc,
- }); err != nil {
- return err
- }
- if errc != nil {
- select {
- case err := <-errc:
- http2errChanPool.Put(errc)
- return err
- case <-sc.doneServing:
- return http2errClientDisconnected
- case <-st.cw:
- return http2errStreamClosed
- }
- }
- return nil
-}
-
-// called from handler goroutines.
-func (sc *http2serverConn) write100ContinueHeaders(st *http2stream) {
- sc.writeFrameFromHandler(http2FrameWriteRequest{
- write: http2write100ContinueHeadersFrame{st.id},
- stream: st,
- })
-}
-
-// A bodyReadMsg tells the server loop that the http.Handler read n
-// bytes of the DATA from the client on the given stream.
-type http2bodyReadMsg struct {
- st *http2stream
- n int
-}
-
-// called from handler goroutines.
-// Notes that the handler for the given stream ID read n bytes of its body
-// and schedules flow control tokens to be sent.
-func (sc *http2serverConn) noteBodyReadFromHandler(st *http2stream, n int, err error) {
- sc.serveG.checkNotOn() // NOT on
- if n > 0 {
- select {
- case sc.bodyReadCh <- http2bodyReadMsg{st, n}:
- case <-sc.doneServing:
- }
- }
-}
-
-func (sc *http2serverConn) noteBodyRead(st *http2stream, n int) {
- sc.serveG.check()
- sc.sendWindowUpdate(nil, n) // conn-level
- if st.state != http2stateHalfClosedRemote && st.state != http2stateClosed {
- // Don't send this WINDOW_UPDATE if the stream is closed
- // remotely.
- sc.sendWindowUpdate(st, n)
- }
-}
-
-// st may be nil for conn-level
-func (sc *http2serverConn) sendWindowUpdate(st *http2stream, n int) {
- sc.serveG.check()
- // "The legal range for the increment to the flow control
- // window is 1 to 2^31-1 (2,147,483,647) octets."
- // A Go Read call on 64-bit machines could in theory read
- // a larger Read than this. Very unlikely, but we handle it here
- // rather than elsewhere for now.
- const maxUint31 = 1<<31 - 1
- for n >= maxUint31 {
- sc.sendWindowUpdate32(st, maxUint31)
- n -= maxUint31
- }
- sc.sendWindowUpdate32(st, int32(n))
-}
-
-// st may be nil for conn-level
-func (sc *http2serverConn) sendWindowUpdate32(st *http2stream, n int32) {
- sc.serveG.check()
- if n == 0 {
- return
- }
- if n < 0 {
- panic("negative update")
- }
- var streamID uint32
- if st != nil {
- streamID = st.id
- }
- sc.writeFrame(http2FrameWriteRequest{
- write: http2writeWindowUpdate{streamID: streamID, n: uint32(n)},
- stream: st,
- })
- var ok bool
- if st == nil {
- ok = sc.inflow.add(n)
- } else {
- ok = st.inflow.add(n)
- }
- if !ok {
- panic("internal error; sent too many window updates without decrements?")
- }
-}
-
-// requestBody is the Handler's Request.Body type.
-// Read and Close may be called concurrently.
-type http2requestBody struct {
- _ http2incomparable
- stream *http2stream
- conn *http2serverConn
- closed bool // for use by Close only
- sawEOF bool // for use by Read only
- pipe *http2pipe // non-nil if we have a HTTP entity message body
- needsContinue bool // need to send a 100-continue
-}
-
-func (b *http2requestBody) Close() error {
- if b.pipe != nil && !b.closed {
- b.pipe.BreakWithError(http2errClosedBody)
- }
- b.closed = true
- return nil
-}
-
-func (b *http2requestBody) Read(p []byte) (n int, err error) {
- if b.needsContinue {
- b.needsContinue = false
- b.conn.write100ContinueHeaders(b.stream)
- }
- if b.pipe == nil || b.sawEOF {
- return 0, io.EOF
- }
- n, err = b.pipe.Read(p)
- if err == io.EOF {
- b.sawEOF = true
- }
- if b.conn == nil && http2inTests {
- return
- }
- b.conn.noteBodyReadFromHandler(b.stream, n, err)
- return
-}
-
-// responseWriter is the http.ResponseWriter implementation. It's
-// intentionally small (1 pointer wide) to minimize garbage. The
-// responseWriterState pointer inside is zeroed at the end of a
-// request (in handlerDone) and calls on the responseWriter thereafter
-// simply crash (caller's mistake), but the much larger responseWriterState
-// and buffers are reused between multiple requests.
-type http2responseWriter struct {
- rws *http2responseWriterState
-}
-
-// Optional http.ResponseWriter interfaces implemented.
-var (
- _ CloseNotifier = (*http2responseWriter)(nil)
- _ Flusher = (*http2responseWriter)(nil)
- _ http2stringWriter = (*http2responseWriter)(nil)
-)
-
-type http2responseWriterState struct {
- // immutable within a request:
- stream *http2stream
- req *Request
- body *http2requestBody // to close at end of request, if DATA frames didn't
- conn *http2serverConn
-
- // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
- bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
-
- // mutated by http.Handler goroutine:
- handlerHeader Header // nil until called
- snapHeader Header // snapshot of handlerHeader at WriteHeader time
- trailers []string // set in writeChunk
- status int // status code passed to WriteHeader
- wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
- sentHeader bool // have we sent the header frame?
- handlerDone bool // handler has finished
- dirty bool // a Write failed; don't reuse this responseWriterState
-
- sentContentLen int64 // non-zero if handler set a Content-Length header
- wroteBytes int64
-
- closeNotifierMu sync.Mutex // guards closeNotifierCh
- closeNotifierCh chan bool // nil until first used
-}
-
-type http2chunkWriter struct{ rws *http2responseWriterState }
-
-func (cw http2chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
-
-func (rws *http2responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
-
-func (rws *http2responseWriterState) hasNonemptyTrailers() bool {
- for _, trailer := range rws.trailers {
- if _, ok := rws.handlerHeader[trailer]; ok {
- return true
- }
- }
- return false
-}
-
-// declareTrailer is called for each Trailer header when the
-// response header is written. It notes that a header will need to be
-// written in the trailers at the end of the response.
-func (rws *http2responseWriterState) declareTrailer(k string) {
- k = CanonicalHeaderKey(k)
- if !httpguts.ValidTrailerHeader(k) {
- // Forbidden by RFC 7230, section 4.1.2.
- rws.conn.logf("ignoring invalid trailer %q", k)
- return
- }
- if !http2strSliceContains(rws.trailers, k) {
- rws.trailers = append(rws.trailers, k)
- }
-}
-
-// writeChunk writes chunks from the bufio.Writer. But because
-// bufio.Writer may bypass its chunking, sometimes p may be
-// arbitrarily large.
-//
-// writeChunk is also responsible (on the first chunk) for sending the
-// HEADER response.
-func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
- if !rws.wroteHeader {
- rws.writeHeader(200)
- }
-
- isHeadResp := rws.req.Method == "HEAD"
- if !rws.sentHeader {
- rws.sentHeader = true
- var ctype, clen string
- if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
- rws.snapHeader.Del("Content-Length")
- if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
- rws.sentContentLen = int64(cl)
- } else {
- clen = ""
- }
- }
- if clen == "" && rws.handlerDone && http2bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
- clen = strconv.Itoa(len(p))
- }
- _, hasContentType := rws.snapHeader["Content-Type"]
- // If the Content-Encoding is non-blank, we shouldn't
- // sniff the body. See Issue golang.org/issue/31753.
- ce := rws.snapHeader.Get("Content-Encoding")
- hasCE := len(ce) > 0
- if !hasCE && !hasContentType && http2bodyAllowedForStatus(rws.status) && len(p) > 0 {
- ctype = DetectContentType(p)
- }
- var date string
- if _, ok := rws.snapHeader["Date"]; !ok {
- // TODO(bradfitz): be faster here, like net/http? measure.
- date = time.Now().UTC().Format(TimeFormat)
- }
-
- for _, v := range rws.snapHeader["Trailer"] {
- http2foreachHeaderElement(v, rws.declareTrailer)
- }
-
- // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
- // but respect "Connection" == "close" to mean sending a GOAWAY and tearing
- // down the TCP connection when idle, like we do for HTTP/1.
- // TODO: remove more Connection-specific header fields here, in addition
- // to "Connection".
- if _, ok := rws.snapHeader["Connection"]; ok {
- v := rws.snapHeader.Get("Connection")
- delete(rws.snapHeader, "Connection")
- if v == "close" {
- rws.conn.startGracefulShutdown()
- }
- }
-
- endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
- err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
- streamID: rws.stream.id,
- httpResCode: rws.status,
- h: rws.snapHeader,
- endStream: endStream,
- contentType: ctype,
- contentLength: clen,
- date: date,
- })
- if err != nil {
- rws.dirty = true
- return 0, err
- }
- if endStream {
- return 0, nil
- }
- }
- if isHeadResp {
- return len(p), nil
- }
- if len(p) == 0 && !rws.handlerDone {
- return 0, nil
- }
-
- if rws.handlerDone {
- rws.promoteUndeclaredTrailers()
- }
-
- // only send trailers if they have actually been defined by the
- // server handler.
- hasNonemptyTrailers := rws.hasNonemptyTrailers()
- endStream := rws.handlerDone && !hasNonemptyTrailers
- if len(p) > 0 || endStream {
- // only send a 0 byte DATA frame if we're ending the stream.
- if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
- rws.dirty = true
- return 0, err
- }
- }
-
- if rws.handlerDone && hasNonemptyTrailers {
- err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
- streamID: rws.stream.id,
- h: rws.handlerHeader,
- trailers: rws.trailers,
- endStream: true,
- })
- if err != nil {
- rws.dirty = true
- }
- return len(p), err
- }
- return len(p), nil
-}
-
-// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
-// that, if present, signals that the map entry is actually for
-// the response trailers, and not the response headers. The prefix
-// is stripped after the ServeHTTP call finishes and the values are
-// sent in the trailers.
-//
-// This mechanism is intended only for trailers that are not known
-// prior to the headers being written. If the set of trailers is fixed
-// or known before the header is written, the normal Go trailers mechanism
-// is preferred:
-// https://golang.org/pkg/net/http/#ResponseWriter
-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
-const http2TrailerPrefix = "Trailer:"
-
-// promoteUndeclaredTrailers permits http.Handlers to set trailers
-// after the header has already been flushed. Because the Go
-// ResponseWriter interface has no way to set Trailers (only the
-// Header), and because we didn't want to expand the ResponseWriter
-// interface, and because nobody used trailers, and because RFC 7230
-// says you SHOULD (but not must) predeclare any trailers in the
-// header, the official ResponseWriter rules said trailers in Go must
-// be predeclared, and then we reuse the same ResponseWriter.Header()
-// map to mean both Headers and Trailers. When it's time to write the
-// Trailers, we pick out the fields of Headers that were declared as
-// trailers. That worked for a while, until we found the first major
-// user of Trailers in the wild: gRPC (using them only over http2),
-// and gRPC libraries permit setting trailers mid-stream without
-// predeclaring them. So: change of plans. We still permit the old
-// way, but we also permit this hack: if a Header() key begins with
-// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
-// invalid token byte anyway, there is no ambiguity. (And it's already
-// filtered out) It's mildly hacky, but not terrible.
-//
-// This method runs after the Handler is done and promotes any Header
-// fields to be trailers.
-func (rws *http2responseWriterState) promoteUndeclaredTrailers() {
- for k, vv := range rws.handlerHeader {
- if !strings.HasPrefix(k, http2TrailerPrefix) {
- continue
- }
- trailerKey := strings.TrimPrefix(k, http2TrailerPrefix)
- rws.declareTrailer(trailerKey)
- rws.handlerHeader[CanonicalHeaderKey(trailerKey)] = vv
- }
-
- if len(rws.trailers) > 1 {
- sorter := http2sorterPool.Get().(*http2sorter)
- sorter.SortStrings(rws.trailers)
- http2sorterPool.Put(sorter)
- }
-}
-
-func (w *http2responseWriter) Flush() {
- rws := w.rws
- if rws == nil {
- panic("Header called after Handler finished")
- }
- if rws.bw.Buffered() > 0 {
- if err := rws.bw.Flush(); err != nil {
- // Ignore the error. The frame writer already knows.
- return
- }
- } else {
- // The bufio.Writer won't call chunkWriter.Write
- // (writeChunk with zero bytes, so we have to do it
- // ourselves to force the HTTP response header and/or
- // final DATA frame (with END_STREAM) to be sent.
- rws.writeChunk(nil)
- }
-}
-
-func (w *http2responseWriter) CloseNotify() <-chan bool {
- rws := w.rws
- if rws == nil {
- panic("CloseNotify called after Handler finished")
- }
- rws.closeNotifierMu.Lock()
- ch := rws.closeNotifierCh
- if ch == nil {
- ch = make(chan bool, 1)
- rws.closeNotifierCh = ch
- cw := rws.stream.cw
- go func() {
- cw.Wait() // wait for close
- ch <- true
- }()
- }
- rws.closeNotifierMu.Unlock()
- return ch
-}
-
-func (w *http2responseWriter) Header() Header {
- rws := w.rws
- if rws == nil {
- panic("Header called after Handler finished")
- }
- if rws.handlerHeader == nil {
- rws.handlerHeader = make(Header)
- }
- return rws.handlerHeader
-}
-
-// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
-func http2checkWriteHeaderCode(code int) {
- // Issue 22880: require valid WriteHeader status codes.
- // For now we only enforce that it's three digits.
- // In the future we might block things over 599 (600 and above aren't defined
- // at http://httpwg.org/specs/rfc7231.html#status.codes)
- // and we might block under 200 (once we have more mature 1xx support).
- // But for now any three digits.
- //
- // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
- // no equivalent bogus thing we can realistically send in HTTP/2,
- // so we'll consistently panic instead and help people find their bugs
- // early. (We can't return an error from WriteHeader even if we wanted to.)
- if code < 100 || code > 999 {
- panic(fmt.Sprintf("invalid WriteHeader code %v", code))
- }
-}
-
-func (w *http2responseWriter) WriteHeader(code int) {
- rws := w.rws
- if rws == nil {
- panic("WriteHeader called after Handler finished")
- }
- rws.writeHeader(code)
-}
-
-func (rws *http2responseWriterState) writeHeader(code int) {
- if !rws.wroteHeader {
- http2checkWriteHeaderCode(code)
- rws.wroteHeader = true
- rws.status = code
- if len(rws.handlerHeader) > 0 {
- rws.snapHeader = http2cloneHeader(rws.handlerHeader)
- }
- }
-}
-
-func http2cloneHeader(h Header) Header {
- h2 := make(Header, len(h))
- for k, vv := range h {
- vv2 := make([]string, len(vv))
- copy(vv2, vv)
- h2[k] = vv2
- }
- return h2
-}
-
-// The Life Of A Write is like this:
-//
-// * Handler calls w.Write or w.WriteString ->
-// * -> rws.bw (*bufio.Writer) ->
-// * (Handler might call Flush)
-// * -> chunkWriter{rws}
-// * -> responseWriterState.writeChunk(p []byte)
-// * -> responseWriterState.writeChunk (most of the magic; see comment there)
-func (w *http2responseWriter) Write(p []byte) (n int, err error) {
- return w.write(len(p), p, "")
-}
-
-func (w *http2responseWriter) WriteString(s string) (n int, err error) {
- return w.write(len(s), nil, s)
-}
-
-// either dataB or dataS is non-zero.
-func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
- rws := w.rws
- if rws == nil {
- panic("Write called after Handler finished")
- }
- if !rws.wroteHeader {
- w.WriteHeader(200)
- }
- if !http2bodyAllowedForStatus(rws.status) {
- return 0, ErrBodyNotAllowed
- }
- rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
- if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
- // TODO: send a RST_STREAM
- return 0, errors.New("http2: handler wrote more than declared Content-Length")
- }
-
- if dataB != nil {
- return rws.bw.Write(dataB)
- } else {
- return rws.bw.WriteString(dataS)
- }
-}
-
-func (w *http2responseWriter) handlerDone() {
- rws := w.rws
- dirty := rws.dirty
- rws.handlerDone = true
- w.Flush()
- w.rws = nil
- if !dirty {
- // Only recycle the pool if all prior Write calls to
- // the serverConn goroutine completed successfully. If
- // they returned earlier due to resets from the peer
- // there might still be write goroutines outstanding
- // from the serverConn referencing the rws memory. See
- // issue 20704.
- http2responseWriterStatePool.Put(rws)
- }
-}
-
-// Push errors.
-var (
- http2ErrRecursivePush = errors.New("http2: recursive push not allowed")
- http2ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
-)
-
-var _ Pusher = (*http2responseWriter)(nil)
-
-func (w *http2responseWriter) Push(target string, opts *PushOptions) error {
- st := w.rws.stream
- sc := st.sc
- sc.serveG.checkNotOn()
-
- // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
- // http://tools.ietf.org/html/rfc7540#section-6.6
- if st.isPushed() {
- return http2ErrRecursivePush
- }
-
- if opts == nil {
- opts = new(PushOptions)
- }
-
- // Default options.
- if opts.Method == "" {
- opts.Method = "GET"
- }
- if opts.Header == nil {
- opts.Header = Header{}
- }
- wantScheme := "http"
- if w.rws.req.TLS != nil {
- wantScheme = "https"
- }
-
- // Validate the request.
- u, err := url.Parse(target)
- if err != nil {
- return err
- }
- if u.Scheme == "" {
- if !strings.HasPrefix(target, "/") {
- return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
- }
- u.Scheme = wantScheme
- u.Host = w.rws.req.Host
- } else {
- if u.Scheme != wantScheme {
- return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
- }
- if u.Host == "" {
- return errors.New("URL must have a host")
- }
- }
- for k := range opts.Header {
- if strings.HasPrefix(k, ":") {
- return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
- }
- // These headers are meaningful only if the request has a body,
- // but PUSH_PROMISE requests cannot have a body.
- // http://tools.ietf.org/html/rfc7540#section-8.2
- // Also disallow Host, since the promised URL must be absolute.
- if http2asciiEqualFold(k, "content-length") ||
- http2asciiEqualFold(k, "content-encoding") ||
- http2asciiEqualFold(k, "trailer") ||
- http2asciiEqualFold(k, "te") ||
- http2asciiEqualFold(k, "expect") ||
- http2asciiEqualFold(k, "host") {
- return fmt.Errorf("promised request headers cannot include %q", k)
- }
- }
- if err := http2checkValidHTTP2RequestHeaders(opts.Header); err != nil {
- return err
- }
-
- // The RFC effectively limits promised requests to GET and HEAD:
- // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
- // http://tools.ietf.org/html/rfc7540#section-8.2
- if opts.Method != "GET" && opts.Method != "HEAD" {
- return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
- }
-
- msg := &http2startPushRequest{
- parent: st,
- method: opts.Method,
- url: u,
- header: http2cloneHeader(opts.Header),
- done: http2errChanPool.Get().(chan error),
- }
-
- select {
- case <-sc.doneServing:
- return http2errClientDisconnected
- case <-st.cw:
- return http2errStreamClosed
- case sc.serveMsgCh <- msg:
- }
-
- select {
- case <-sc.doneServing:
- return http2errClientDisconnected
- case <-st.cw:
- return http2errStreamClosed
- case err := <-msg.done:
- http2errChanPool.Put(msg.done)
- return err
- }
-}
-
-type http2startPushRequest struct {
- parent *http2stream
- method string
- url *url.URL
- header Header
- done chan error
-}
-
-func (sc *http2serverConn) startPush(msg *http2startPushRequest) {
- sc.serveG.check()
-
- // http://tools.ietf.org/html/rfc7540#section-6.6.
- // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
- // is in either the "open" or "half-closed (remote)" state.
- if msg.parent.state != http2stateOpen && msg.parent.state != http2stateHalfClosedRemote {
- // responseWriter.Push checks that the stream is peer-initiated.
- msg.done <- http2errStreamClosed
- return
- }
-
- // http://tools.ietf.org/html/rfc7540#section-6.6.
- if !sc.pushEnabled {
- msg.done <- ErrNotSupported
- return
- }
-
- // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
- // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
- // is written. Once the ID is allocated, we start the request handler.
- allocatePromisedID := func() (uint32, error) {
- sc.serveG.check()
-
- // Check this again, just in case. Technically, we might have received
- // an updated SETTINGS by the time we got around to writing this frame.
- if !sc.pushEnabled {
- return 0, ErrNotSupported
- }
- // http://tools.ietf.org/html/rfc7540#section-6.5.2.
- if sc.curPushedStreams+1 > sc.clientMaxStreams {
- return 0, http2ErrPushLimitReached
- }
-
- // http://tools.ietf.org/html/rfc7540#section-5.1.1.
- // Streams initiated by the server MUST use even-numbered identifiers.
- // A server that is unable to establish a new stream identifier can send a GOAWAY
- // frame so that the client is forced to open a new connection for new streams.
- if sc.maxPushPromiseID+2 >= 1<<31 {
- sc.startGracefulShutdownInternal()
- return 0, http2ErrPushLimitReached
- }
- sc.maxPushPromiseID += 2
- promisedID := sc.maxPushPromiseID
-
- // http://tools.ietf.org/html/rfc7540#section-8.2.
- // Strictly speaking, the new stream should start in "reserved (local)", then
- // transition to "half closed (remote)" after sending the initial HEADERS, but
- // we start in "half closed (remote)" for simplicity.
- // See further comments at the definition of stateHalfClosedRemote.
- promised := sc.newStream(promisedID, msg.parent.id, http2stateHalfClosedRemote)
- rw, req, err := sc.newWriterAndRequestNoBody(promised, http2requestParam{
- method: msg.method,
- scheme: msg.url.Scheme,
- authority: msg.url.Host,
- path: msg.url.RequestURI(),
- header: http2cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
- })
- if err != nil {
- // Should not happen, since we've already validated msg.url.
- panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
- }
-
- go sc.runHandler(rw, req, sc.handler.ServeHTTP)
- return promisedID, nil
- }
-
- sc.writeFrame(http2FrameWriteRequest{
- write: &http2writePushPromise{
- streamID: msg.parent.id,
- method: msg.method,
- url: msg.url,
- h: msg.header,
- allocatePromisedID: allocatePromisedID,
- },
- stream: msg.parent,
- done: msg.done,
- })
-}
-
-// foreachHeaderElement splits v according to the "#rule" construction
-// in RFC 7230 section 7 and calls fn for each non-empty element.
-func http2foreachHeaderElement(v string, fn func(string)) {
- v = textproto.TrimString(v)
- if v == "" {
- return
- }
- if !strings.Contains(v, ",") {
- fn(v)
- return
- }
- for _, f := range strings.Split(v, ",") {
- if f = textproto.TrimString(f); f != "" {
- fn(f)
- }
- }
-}
-
-// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
-var http2connHeaders = []string{
- "Connection",
- "Keep-Alive",
- "Proxy-Connection",
- "Transfer-Encoding",
- "Upgrade",
-}
-
-// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
-// per RFC 7540 Section 8.1.2.2.
-// The returned error is reported to users.
-func http2checkValidHTTP2RequestHeaders(h Header) error {
- for _, k := range http2connHeaders {
- if _, ok := h[k]; ok {
- return fmt.Errorf("request header %q is not valid in HTTP/2", k)
- }
- }
- te := h["Te"]
- if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
- return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
- }
- return nil
-}
-
-func http2new400Handler(err error) HandlerFunc {
- return func(w ResponseWriter, r *Request) {
- Error(w, err.Error(), StatusBadRequest)
- }
-}
-
-// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
-// disabled. See comments on h1ServerShutdownChan above for why
-// the code is written this way.
-func http2h1ServerKeepAlivesDisabled(hs *Server) bool {
- var x interface{} = hs
- type I interface {
- doKeepAlives() bool
- }
- if hs, ok := x.(I); ok {
- return !hs.doKeepAlives()
- }
- return false
-}
-
-func (sc *http2serverConn) countError(name string, err error) error {
- if sc == nil || sc.srv == nil {
- return err
- }
- f := sc.srv.CountError
- if f == nil {
- return err
- }
- var typ string
- var code http2ErrCode
- switch e := err.(type) {
- case http2ConnectionError:
- typ = "conn"
- code = http2ErrCode(e)
- case http2StreamError:
- typ = "stream"
- code = http2ErrCode(e.Code)
- default:
- return err
- }
- codeStr := http2errCodeName[code]
- if codeStr == "" {
- codeStr = strconv.Itoa(int(code))
- }
- f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
- return err
-}
-
-const (
- // transportDefaultConnFlow is how many connection-level flow control
- // tokens we give the server at start-up, past the default 64k.
- http2transportDefaultConnFlow = 1 << 30
-
- // transportDefaultStreamFlow is how many stream-level flow
- // control tokens we announce to the peer, and how many bytes
- // we buffer per stream.
- http2transportDefaultStreamFlow = 4 << 20
-
- // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
- // a stream-level WINDOW_UPDATE for at a time.
- http2transportDefaultStreamMinRefresh = 4 << 10
-
- http2defaultUserAgent = "Go-http-client/2.0"
-
- // initialMaxConcurrentStreams is a connections maxConcurrentStreams until
- // it's received servers initial SETTINGS frame, which corresponds with the
- // spec's minimum recommended value.
- http2initialMaxConcurrentStreams = 100
-
- // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams
- // if the server doesn't include one in its initial SETTINGS frame.
- http2defaultMaxConcurrentStreams = 1000
-)
-
-// Transport is an HTTP/2 Transport.
-//
-// A Transport internally caches connections to servers. It is safe
-// for concurrent use by multiple goroutines.
-type http2Transport struct {
- // DialTLS specifies an optional dial function for creating
- // TLS connections for requests.
- //
- // If DialTLS is nil, tls.Dial is used.
- //
- // If the returned net.Conn has a ConnectionState method like tls.Conn,
- // it will be used to set http.Response.TLS.
- DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
-
- // TLSClientConfig specifies the TLS configuration to use with
- // tls.Client. If nil, the default configuration is used.
- TLSClientConfig *tls.Config
-
- // ConnPool optionally specifies an alternate connection pool to use.
- // If nil, the default is used.
- ConnPool http2ClientConnPool
-
- // DisableCompression, if true, prevents the Transport from
- // requesting compression with an "Accept-Encoding: gzip"
- // request header when the Request contains no existing
- // Accept-Encoding value. If the Transport requests gzip on
- // its own and gets a gzipped response, it's transparently
- // decoded in the Response.Body. However, if the user
- // explicitly requested gzip it is not automatically
- // uncompressed.
- DisableCompression bool
-
- // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
- // plain-text "http" scheme. Note that this does not enable h2c support.
- AllowHTTP bool
-
- // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
- // send in the initial settings frame. It is how many bytes
- // of response headers are allowed. Unlike the http2 spec, zero here
- // means to use a default limit (currently 10MB). If you actually
- // want to advertise an unlimited value to the peer, Transport
- // interprets the highest possible value here (0xffffffff or 1<<32-1)
- // to mean no limit.
- MaxHeaderListSize uint32
-
- // StrictMaxConcurrentStreams controls whether the server's
- // SETTINGS_MAX_CONCURRENT_STREAMS should be respected
- // globally. If false, new TCP connections are created to the
- // server as needed to keep each under the per-connection
- // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
- // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
- // a global limit and callers of RoundTrip block when needed,
- // waiting for their turn.
- StrictMaxConcurrentStreams bool
-
- // ReadIdleTimeout is the timeout after which a health check using ping
- // frame will be carried out if no frame is received on the connection.
- // Note that a ping response will is considered a received frame, so if
- // there is no other traffic on the connection, the health check will
- // be performed every ReadIdleTimeout interval.
- // If zero, no health check is performed.
- ReadIdleTimeout time.Duration
-
- // PingTimeout is the timeout after which the connection will be closed
- // if a response to Ping is not received.
- // Defaults to 15s.
- PingTimeout time.Duration
-
- // WriteByteTimeout is the timeout after which the connection will be
- // closed no data can be written to it. The timeout begins when data is
- // available to write, and is extended whenever any bytes are written.
- WriteByteTimeout time.Duration
-
- // CountError, if non-nil, is called on HTTP/2 transport errors.
- // It's intended to increment a metric for monitoring, such
- // as an expvar or Prometheus metric.
- // The errType consists of only ASCII word characters.
- CountError func(errType string)
-
- // t1, if non-nil, is the standard library Transport using
- // this transport. Its settings are used (but not its
- // RoundTrip method, etc).
- t1 *Transport
-
- connPoolOnce sync.Once
- connPoolOrDef http2ClientConnPool // non-nil version of ConnPool
-}
-
-func (t *http2Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
- return 10 << 20
- }
- if t.MaxHeaderListSize == 0xffffffff {
- return 0
- }
- return t.MaxHeaderListSize
-}
-
-func (t *http2Transport) disableCompression() bool {
- return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
-}
-
-func (t *http2Transport) pingTimeout() time.Duration {
- if t.PingTimeout == 0 {
- return 15 * time.Second
- }
- return t.PingTimeout
-
-}
-
-// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
-// It returns an error if t1 has already been HTTP/2-enabled.
-//
-// Use ConfigureTransports instead to configure the HTTP/2 Transport.
-func http2ConfigureTransport(t1 *Transport) error {
- _, err := http2ConfigureTransports(t1)
- return err
-}
-
-// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2.
-// It returns a new HTTP/2 Transport for further configuration.
-// It returns an error if t1 has already been HTTP/2-enabled.
-func http2ConfigureTransports(t1 *Transport) (*http2Transport, error) {
- return http2configureTransports(t1)
-}
-
-func http2configureTransports(t1 *Transport) (*http2Transport, error) {
- connPool := new(http2clientConnPool)
- t2 := &http2Transport{
- ConnPool: http2noDialClientConnPool{connPool},
- t1: t1,
- }
- connPool.t = t2
- if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil {
- return nil, err
- }
- if t1.TLSClientConfig == nil {
- t1.TLSClientConfig = new(tls.Config)
- }
- if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
- t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
- }
- if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
- t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
- }
- upgradeFn := func(authority string, c *tls.Conn) RoundTripper {
- addr := http2authorityAddr("https", authority)
- if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
- go c.Close()
- return http2erringRoundTripper{err}
- } else if !used {
- // Turns out we don't need this c.
- // For example, two goroutines made requests to the same host
- // at the same time, both kicking off TCP dials. (since protocol
- // was unknown)
- go c.Close()
- }
- return t2
- }
- if m := t1.TLSNextProto; len(m) == 0 {
- t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{
- "h2": upgradeFn,
- }
- } else {
- m["h2"] = upgradeFn
- }
- return t2, nil
-}
-
-func (t *http2Transport) connPool() http2ClientConnPool {
- t.connPoolOnce.Do(t.initConnPool)
- return t.connPoolOrDef
-}
-
-func (t *http2Transport) initConnPool() {
- if t.ConnPool != nil {
- t.connPoolOrDef = t.ConnPool
- } else {
- t.connPoolOrDef = &http2clientConnPool{t: t}
- }
-}
-
-// ClientConn is the state of a single HTTP/2 client connection to an
-// HTTP/2 server.
-type http2ClientConn struct {
- t *http2Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tlsState *tls.ConnectionState // nil only for specialized impls
- reused uint32 // whether conn is being reused; atomic
- singleUse bool // whether being used for a single http.Request
- getConnCalled bool // used by clientConnPool
-
- // readLoop goroutine fields:
- readerDone chan struct{} // closed on error
- readerErr error // set before readerDone is closed
-
- idleTimeout time.Duration // or 0 for never
- idleTimer *time.Timer
-
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow http2flow // our conn-level flow control quota (cs.flow is per stream)
- inflow http2flow // peer's conn-level flow control
- doNotReuse bool // whether conn is marked to not be reused for any future requests
- closing bool
- closed bool
- seenSettings bool // true if we've seen a settings frame, false otherwise
- wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
- goAway *http2GoAwayFrame // if non-nil, the GoAwayFrame we received
- goAwayDebug string // goAway frame's debug data, retained as a string
- streams map[uint32]*http2clientStream // client-initiated
- streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
- nextStreamID uint32
- pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
- pings map[[8]byte]chan struct{} // in flight ping data to notification channel
- br *bufio.Reader
- lastActive time.Time
- lastIdle time.Time // time last idle
- // Settings from peer: (also guarded by wmu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- peerMaxHeaderListSize uint64
- initialWindowSize uint32
-
- // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
- // Write to reqHeaderMu to lock it, read from it to unlock.
- // Lock reqmu BEFORE mu or wmu.
- reqHeaderMu chan struct{}
-
- // wmu is held while writing.
- // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
- // Only acquire both at the same time when changing peer settings.
- wmu sync.Mutex
- bw *bufio.Writer
- fr *http2Framer
- werr error // first write error that has occurred
- hbuf bytes.Buffer // HPACK encoder writes into this
- henc *hpack.Encoder
-}
-
-// clientStream is the state for a single HTTP/2 stream. One of these
-// is created for each Transport.RoundTrip call.
-type http2clientStream struct {
- cc *http2ClientConn
-
- // Fields of Request that we may access even after the response body is closed.
- ctx context.Context
- reqCancel <-chan struct{}
-
- trace *httptrace.ClientTrace // or nil
- ID uint32
- bufPipe http2pipe // buffered pipe with the flow-controlled response payload
- requestedGzip bool
- isHead bool
-
- abortOnce sync.Once
- abort chan struct{} // closed to signal stream should end immediately
- abortErr error // set if abort is closed
-
- peerClosed chan struct{} // closed when the peer sends an END_STREAM flag
- donec chan struct{} // closed after the stream is in the closed state
- on100 chan struct{} // buffered; written to if a 100 is received
-
- respHeaderRecv chan struct{} // closed when headers are received
- res *Response // set if respHeaderRecv is closed
-
- flow http2flow // guarded by cc.mu
- inflow http2flow // guarded by cc.mu
- bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
- readErr error // sticky read error; owned by transportResponseBody.Read
-
- reqBody io.ReadCloser
- reqBodyContentLength int64 // -1 means unknown
- reqBodyClosed bool // body has been closed; guarded by cc.mu
-
- // owned by writeRequest:
- sentEndStream bool // sent an END_STREAM flag to the peer
- sentHeaders bool
-
- // owned by clientConnReadLoop:
- firstByte bool // got the first response byte
- pastHeaders bool // got first MetaHeadersFrame (actual headers)
- pastTrailers bool // got optional second MetaHeadersFrame (trailers)
- num1xx uint8 // number of 1xx responses seen
- readClosed bool // peer sent an END_STREAM flag
- readAborted bool // read loop reset the stream
-
- trailer Header // accumulated trailers
- resTrailer *Header // client's Response.Trailer
-}
-
-var http2got1xxFuncForTests func(int, textproto.MIMEHeader) error
-
-// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
-// if any. It returns nil if not set or if the Go version is too old.
-func (cs *http2clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
- if fn := http2got1xxFuncForTests; fn != nil {
- return fn
- }
- return http2traceGot1xxResponseFunc(cs.trace)
-}
-
-func (cs *http2clientStream) abortStream(err error) {
- cs.cc.mu.Lock()
- defer cs.cc.mu.Unlock()
- cs.abortStreamLocked(err)
-}
-
-func (cs *http2clientStream) abortStreamLocked(err error) {
- cs.abortOnce.Do(func() {
- cs.abortErr = err
- close(cs.abort)
- })
- if cs.reqBody != nil && !cs.reqBodyClosed {
- cs.reqBody.Close()
- cs.reqBodyClosed = true
- }
- // TODO(dneil): Clean up tests where cs.cc.cond is nil.
- if cs.cc.cond != nil {
- // Wake up writeRequestBody if it is waiting on flow control.
- cs.cc.cond.Broadcast()
- }
-}
-
-func (cs *http2clientStream) abortRequestBodyWrite() {
- cc := cs.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if cs.reqBody != nil && !cs.reqBodyClosed {
- cs.reqBody.Close()
- cs.reqBodyClosed = true
- cc.cond.Broadcast()
- }
-}
-
-type http2stickyErrWriter struct {
- conn net.Conn
- timeout time.Duration
- err *error
-}
-
-func (sew http2stickyErrWriter) Write(p []byte) (n int, err error) {
- if *sew.err != nil {
- return 0, *sew.err
- }
- for {
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
- }
- nn, err := sew.conn.Write(p[n:])
- n += nn
- if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
- // Keep extending the deadline so long as we're making progress.
- continue
- }
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Time{})
- }
- *sew.err = err
- return n, err
- }
-}
-
-// noCachedConnError is the concrete type of ErrNoCachedConn, which
-// needs to be detected by net/http regardless of whether it's its
-// bundled version (in h2_bundle.go with a rewritten type name) or
-// from a user's x/net/http2. As such, as it has a unique method name
-// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
-// isNoCachedConnError.
-type http2noCachedConnError struct{}
-
-func (http2noCachedConnError) IsHTTP2NoCachedConnError() {}
-
-func (http2noCachedConnError) Error() string { return "http2: no cached connection was available" }
-
-// isNoCachedConnError reports whether err is of type noCachedConnError
-// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
-// may coexist in the same running program.
-func http2isNoCachedConnError(err error) bool {
- _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
- return ok
-}
-
-var http2ErrNoCachedConn error = http2noCachedConnError{}
-
-// RoundTripOpt are options for the Transport.RoundTripOpt method.
-type http2RoundTripOpt struct {
- // OnlyCachedConn controls whether RoundTripOpt may
- // create a new TCP connection. If set true and
- // no cached connection is available, RoundTripOpt
- // will return ErrNoCachedConn.
- OnlyCachedConn bool
-}
-
-func (t *http2Transport) RoundTrip(req *Request) (*Response, error) {
- return t.RoundTripOpt(req, http2RoundTripOpt{})
-}
-
-// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
-// and returns a host:port. The port 443 is added if needed.
-func http2authorityAddr(scheme string, authority string) (addr string) {
- host, port, err := net.SplitHostPort(authority)
- if err != nil { // authority didn't have a port
- port = "443"
- if scheme == "http" {
- port = "80"
- }
- host = authority
- }
- if a, err := idna.ToASCII(host); err == nil {
- host = a
- }
- // IPv6 address literal, without a port:
- if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
- return host + ":" + port
- }
- return net.JoinHostPort(host, port)
-}
-
-// RoundTripOpt is like RoundTrip, but takes options.
-func (t *http2Transport) RoundTripOpt(req *Request, opt http2RoundTripOpt) (*Response, error) {
- if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
- return nil, errors.New("http2: unsupported scheme")
- }
-
- addr := http2authorityAddr(req.URL.Scheme, req.URL.Host)
- for retry := 0; ; retry++ {
- cc, err := t.connPool().GetClientConn(req, addr)
- if err != nil {
- t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
- return nil, err
- }
- reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
- http2traceGotConn(req, cc, reused)
- res, err := cc.RoundTrip(req)
- if err != nil && retry <= 6 {
- if req, err = http2shouldRetryRequest(req, err); err == nil {
- // After the first retry, do exponential backoff with 10% jitter.
- if retry == 0 {
- continue
- }
- backoff := float64(uint(1) << (uint(retry) - 1))
- backoff += backoff * (0.1 * mathrand.Float64())
- select {
- case <-time.After(time.Second * time.Duration(backoff)):
- continue
- case <-req.Context().Done():
- err = req.Context().Err()
- }
- }
- }
- if err != nil {
- t.vlogf("RoundTrip failure: %v", err)
- return nil, err
- }
- return res, nil
- }
-}
-
-// CloseIdleConnections closes any connections which were previously
-// connected from previous requests but are now sitting idle.
-// It does not interrupt any connections currently in use.
-func (t *http2Transport) CloseIdleConnections() {
- if cp, ok := t.connPool().(http2clientConnPoolIdleCloser); ok {
- cp.closeIdleConnections()
- }
-}
-
-var (
- http2errClientConnClosed = errors.New("http2: client conn is closed")
- http2errClientConnUnusable = errors.New("http2: client conn not usable")
- http2errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
-)
-
-// shouldRetryRequest is called by RoundTrip when a request fails to get
-// response headers. It is always called with a non-nil error.
-// It returns either a request to retry (either the same request, or a
-// modified clone), or an error if the request can't be replayed.
-func http2shouldRetryRequest(req *Request, err error) (*Request, error) {
- if !http2canRetryError(err) {
- return nil, err
- }
- // If the Body is nil (or http.NoBody), it's safe to reuse
- // this request and its Body.
- if req.Body == nil || req.Body == NoBody {
- return req, nil
- }
-
- // If the request body can be reset back to its original
- // state via the optional req.GetBody, do that.
- if req.GetBody != nil {
- body, err := req.GetBody()
- if err != nil {
- return nil, err
- }
- newReq := *req
- newReq.Body = body
- return &newReq, nil
- }
-
- // The Request.Body can't reset back to the beginning, but we
- // don't seem to have started to read from it yet, so reuse
- // the request directly.
- if err == http2errClientConnUnusable {
- return req, nil
- }
-
- return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
-}
-
-func http2canRetryError(err error) bool {
- if err == http2errClientConnUnusable || err == http2errClientConnGotGoAway {
- return true
- }
- if se, ok := err.(http2StreamError); ok {
- if se.Code == http2ErrCodeProtocol && se.Cause == http2errFromPeer {
- // See golang/go#47635, golang/go#42777
- return true
- }
- return se.Code == http2ErrCodeRefusedStream
- }
- return false
-}
-
-func (t *http2Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*http2ClientConn, error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host))
- if err != nil {
- return nil, err
- }
- return t.newClientConn(tconn, singleUse)
-}
-
-func (t *http2Transport) newTLSConfig(host string) *tls.Config {
- cfg := new(tls.Config)
- if t.TLSClientConfig != nil {
- *cfg = *t.TLSClientConfig.Clone()
- }
- if !http2strSliceContains(cfg.NextProtos, http2NextProtoTLS) {
- cfg.NextProtos = append([]string{http2NextProtoTLS}, cfg.NextProtos...)
- }
- if cfg.ServerName == "" {
- cfg.ServerName = host
- }
- return cfg
-}
-
-func (t *http2Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) {
- if t.DialTLS != nil {
- return t.DialTLS
- }
- return func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg)
- if err != nil {
- return nil, err
- }
- state := tlsCn.ConnectionState()
- if p := state.NegotiatedProtocol; p != http2NextProtoTLS {
- return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, http2NextProtoTLS)
- }
- if !state.NegotiatedProtocolIsMutual {
- return nil, errors.New("http2: could not negotiate protocol mutually")
- }
- return tlsCn, nil
- }
-}
-
-// disableKeepAlives reports whether connections should be closed as
-// soon as possible after handling the first request.
-func (t *http2Transport) disableKeepAlives() bool {
- return t.t1 != nil && t.t1.DisableKeepAlives
-}
-
-func (t *http2Transport) expectContinueTimeout() time.Duration {
- if t.t1 == nil {
- return 0
- }
- return t.t1.ExpectContinueTimeout
-}
-
-func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives())
-}
-
-func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2ClientConn, error) {
- cc := &http2ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: http2initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
- streams: make(map[uint32]*http2clientStream),
- singleUse: singleUse,
- wantSettingsAck: true,
- pings: make(map[[8]byte]chan struct{}),
- reqHeaderMu: make(chan struct{}, 1),
- }
- if d := t.idleConnTimeout(); d != 0 {
- cc.idleTimeout = d
- cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
- }
- if http2VerboseLogs {
- t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
- }
-
- cc.cond = sync.NewCond(&cc.mu)
- cc.flow.add(int32(http2initialWindowSize))
-
- // TODO: adjust this writer size to account for frame size +
- // MTU + crypto/tls record padding.
- cc.bw = bufio.NewWriter(http2stickyErrWriter{
- conn: c,
- timeout: t.WriteByteTimeout,
- err: &cc.werr,
- })
- cc.br = bufio.NewReader(c)
- cc.fr = http2NewFramer(cc.bw, cc.br)
- if t.CountError != nil {
- cc.fr.countError = t.CountError
- }
- cc.fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
- cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
-
- // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
- // henc in response to SETTINGS frames?
- cc.henc = hpack.NewEncoder(&cc.hbuf)
-
- if t.AllowHTTP {
- cc.nextStreamID = 3
- }
-
- if cs, ok := c.(http2connectionStater); ok {
- state := cs.ConnectionState()
- cc.tlsState = &state
- }
-
- initialSettings := []http2Setting{
- {ID: http2SettingEnablePush, Val: 0},
- {ID: http2SettingInitialWindowSize, Val: http2transportDefaultStreamFlow},
- }
- if max := t.maxHeaderListSize(); max != 0 {
- initialSettings = append(initialSettings, http2Setting{ID: http2SettingMaxHeaderListSize, Val: max})
- }
-
- cc.bw.Write(http2clientPreface)
- cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, http2transportDefaultConnFlow)
- cc.inflow.add(http2transportDefaultConnFlow + http2initialWindowSize)
- cc.bw.Flush()
- if cc.werr != nil {
- cc.Close()
- return nil, cc.werr
- }
-
- go cc.readLoop()
- return cc, nil
-}
-
-func (cc *http2ClientConn) healthCheck() {
- pingTimeout := cc.t.pingTimeout()
- // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
- // trigger the healthCheck again if there is no frame received.
- ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
- defer cancel()
- err := cc.Ping(ctx)
- if err != nil {
- cc.closeForLostPing()
- cc.t.connPool().MarkDead(cc)
- return
- }
-}
-
-// SetDoNotReuse marks cc as not reusable for future HTTP requests.
-func (cc *http2ClientConn) SetDoNotReuse() {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- cc.doNotReuse = true
-}
-
-func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- old := cc.goAway
- cc.goAway = f
-
- // Merge the previous and current GoAway error frames.
- if cc.goAwayDebug == "" {
- cc.goAwayDebug = string(f.DebugData())
- }
- if old != nil && old.ErrCode != http2ErrCodeNo {
- cc.goAway.ErrCode = old.ErrCode
- }
- last := f.LastStreamID
- for streamID, cs := range cc.streams {
- if streamID > last {
- cs.abortStreamLocked(http2errClientConnGotGoAway)
- }
- }
-}
-
-// CanTakeNewRequest reports whether the connection can take a new request,
-// meaning it has not been closed or received or sent a GOAWAY.
-//
-// If the caller is going to immediately make a new request on this
-// connection, use ReserveNewRequest instead.
-func (cc *http2ClientConn) CanTakeNewRequest() bool {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cc.canTakeNewRequestLocked()
-}
-
-// ReserveNewRequest is like CanTakeNewRequest but also reserves a
-// concurrent stream in cc. The reservation is decremented on the
-// next call to RoundTrip.
-func (cc *http2ClientConn) ReserveNewRequest() bool {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if st := cc.idleStateLocked(); !st.canTakeNewRequest {
- return false
- }
- cc.streamsReserved++
- return true
-}
-
-// ClientConnState describes the state of a ClientConn.
-type http2ClientConnState struct {
- // Closed is whether the connection is closed.
- Closed bool
-
- // Closing is whether the connection is in the process of
- // closing. It may be closing due to shutdown, being a
- // single-use connection, being marked as DoNotReuse, or
- // having received a GOAWAY frame.
- Closing bool
-
- // StreamsActive is how many streams are active.
- StreamsActive int
-
- // StreamsReserved is how many streams have been reserved via
- // ClientConn.ReserveNewRequest.
- StreamsReserved int
-
- // StreamsPending is how many requests have been sent in excess
- // of the peer's advertised MaxConcurrentStreams setting and
- // are waiting for other streams to complete.
- StreamsPending int
-
- // MaxConcurrentStreams is how many concurrent streams the
- // peer advertised as acceptable. Zero means no SETTINGS
- // frame has been received yet.
- MaxConcurrentStreams uint32
-
- // LastIdle, if non-zero, is when the connection last
- // transitioned to idle state.
- LastIdle time.Time
-}
-
-// State returns a snapshot of cc's state.
-func (cc *http2ClientConn) State() http2ClientConnState {
- cc.wmu.Lock()
- maxConcurrent := cc.maxConcurrentStreams
- if !cc.seenSettings {
- maxConcurrent = 0
- }
- cc.wmu.Unlock()
-
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return http2ClientConnState{
- Closed: cc.closed,
- Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
- StreamsActive: len(cc.streams),
- StreamsReserved: cc.streamsReserved,
- StreamsPending: cc.pendingRequests,
- LastIdle: cc.lastIdle,
- MaxConcurrentStreams: maxConcurrent,
- }
-}
-
-// clientConnIdleState describes the suitability of a client
-// connection to initiate a new RoundTrip request.
-type http2clientConnIdleState struct {
- canTakeNewRequest bool
-}
-
-func (cc *http2ClientConn) idleState() http2clientConnIdleState {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cc.idleStateLocked()
-}
-
-func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) {
- if cc.singleUse && cc.nextStreamID > 1 {
- return
- }
- var maxConcurrentOkay bool
- if cc.t.StrictMaxConcurrentStreams {
- // We'll tell the caller we can take a new request to
- // prevent the caller from dialing a new TCP
- // connection, but then we'll block later before
- // writing it.
- maxConcurrentOkay = true
- } else {
- maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
- }
-
- st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
- !cc.doNotReuse &&
- int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
- !cc.tooIdleLocked()
- return
-}
-
-func (cc *http2ClientConn) canTakeNewRequestLocked() bool {
- st := cc.idleStateLocked()
- return st.canTakeNewRequest
-}
-
-// tooIdleLocked reports whether this connection has been been sitting idle
-// for too much wall time.
-func (cc *http2ClientConn) tooIdleLocked() bool {
- // The Round(0) strips the monontonic clock reading so the
- // times are compared based on their wall time. We don't want
- // to reuse a connection that's been sitting idle during
- // VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
-}
-
-// onIdleTimeout is called from a time.AfterFunc goroutine. It will
-// only be called when we're idle, but because we're coming from a new
-// goroutine, there could be a new request coming in at the same time,
-// so this simply calls the synchronized closeIfIdle to shut down this
-// connection. The timer could just call closeIfIdle, but this is more
-// clear.
-func (cc *http2ClientConn) onIdleTimeout() {
- cc.closeIfIdle()
-}
-
-func (cc *http2ClientConn) closeIfIdle() {
- cc.mu.Lock()
- if len(cc.streams) > 0 || cc.streamsReserved > 0 {
- cc.mu.Unlock()
- return
- }
- cc.closed = true
- nextID := cc.nextStreamID
- // TODO: do clients send GOAWAY too? maybe? Just Close:
- cc.mu.Unlock()
-
- if http2VerboseLogs {
- cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
- }
- cc.tconn.Close()
-}
-
-func (cc *http2ClientConn) isDoNotReuseAndIdle() bool {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cc.doNotReuse && len(cc.streams) == 0
-}
-
-var http2shutdownEnterWaitStateHook = func() {}
-
-// Shutdown gracefully closes the client connection, waiting for running streams to complete.
-func (cc *http2ClientConn) Shutdown(ctx context.Context) error {
- if err := cc.sendGoAway(); err != nil {
- return err
- }
- // Wait for all in-flight streams to complete or connection to close
- done := make(chan error, 1)
- cancelled := false // guarded by cc.mu
- go func() {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- for {
- if len(cc.streams) == 0 || cc.closed {
- cc.closed = true
- done <- cc.tconn.Close()
- break
- }
- if cancelled {
- break
- }
- cc.cond.Wait()
- }
- }()
- http2shutdownEnterWaitStateHook()
- select {
- case err := <-done:
- return err
- case <-ctx.Done():
- cc.mu.Lock()
- // Free the goroutine above
- cancelled = true
- cc.cond.Broadcast()
- cc.mu.Unlock()
- return ctx.Err()
- }
-}
-
-func (cc *http2ClientConn) sendGoAway() error {
- cc.mu.Lock()
- closing := cc.closing
- cc.closing = true
- maxStreamID := cc.nextStreamID
- cc.mu.Unlock()
- if closing {
- // GOAWAY sent already
- return nil
- }
-
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- // Send a graceful shutdown frame to server
- if err := cc.fr.WriteGoAway(maxStreamID, http2ErrCodeNo, nil); err != nil {
- return err
- }
- if err := cc.bw.Flush(); err != nil {
- return err
- }
- // Prevent new requests
- return nil
-}
-
-// closes the client connection immediately. In-flight requests are interrupted.
-// err is sent to streams.
-func (cc *http2ClientConn) closeForError(err error) error {
- cc.mu.Lock()
- cc.closed = true
- for _, cs := range cc.streams {
- cs.abortStreamLocked(err)
- }
- defer cc.cond.Broadcast()
- defer cc.mu.Unlock()
- return cc.tconn.Close()
-}
-
-// Close closes the client connection immediately.
-//
-// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
-func (cc *http2ClientConn) Close() error {
- err := errors.New("http2: client connection force closed via ClientConn.Close")
- return cc.closeForError(err)
-}
-
-// closes the client connection immediately. In-flight requests are interrupted.
-func (cc *http2ClientConn) closeForLostPing() error {
- err := errors.New("http2: client connection lost")
- if f := cc.t.CountError; f != nil {
- f("conn_close_lost_ping")
- }
- return cc.closeForError(err)
-}
-
-// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
-// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
-var http2errRequestCanceled = errors.New("net/http: request canceled")
-
-func http2commaSeparatedTrailers(req *Request) (string, error) {
- keys := make([]string, 0, len(req.Trailer))
- for k := range req.Trailer {
- k = CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", fmt.Errorf("invalid Trailer key %q", k)
- }
- keys = append(keys, k)
- }
- if len(keys) > 0 {
- sort.Strings(keys)
- return strings.Join(keys, ","), nil
- }
- return "", nil
-}
-
-func (cc *http2ClientConn) responseHeaderTimeout() time.Duration {
- if cc.t.t1 != nil {
- return cc.t.t1.ResponseHeaderTimeout
- }
- // No way to do this (yet?) with just an http2.Transport. Probably
- // no need. Request.Cancel this is the new way. We only need to support
- // this for compatibility with the old http.Transport fields when
- // we're doing transparent http2.
- return 0
-}
-
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func http2checkConnHeaders(req *Request) error {
- if v := req.Header.Get("Upgrade"); v != "" {
- return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
- }
- if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
- return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
- }
- if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !http2asciiEqualFold(vv[0], "close") && !http2asciiEqualFold(vv[0], "keep-alive")) {
- return fmt.Errorf("http2: invalid Connection request header: %q", vv)
- }
- return nil
-}
-
-// actualContentLength returns a sanitized version of
-// req.ContentLength, where 0 actually means zero (not unknown) and -1
-// means unknown.
-func http2actualContentLength(req *Request) int64 {
- if req.Body == nil || req.Body == NoBody {
- return 0
- }
- if req.ContentLength != 0 {
- return req.ContentLength
- }
- return -1
-}
-
-func (cc *http2ClientConn) decrStreamReservations() {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- cc.decrStreamReservationsLocked()
-}
-
-func (cc *http2ClientConn) decrStreamReservationsLocked() {
- if cc.streamsReserved > 0 {
- cc.streamsReserved--
- }
-}
-
-func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
- ctx := req.Context()
- cs := &http2clientStream{
- cc: cc,
- ctx: ctx,
- reqCancel: req.Cancel,
- isHead: req.Method == "HEAD",
- reqBody: req.Body,
- reqBodyContentLength: http2actualContentLength(req),
- trace: httptrace.ContextClientTrace(ctx),
- peerClosed: make(chan struct{}),
- abort: make(chan struct{}),
- respHeaderRecv: make(chan struct{}),
- donec: make(chan struct{}),
- }
- go cs.doRequest(req)
-
- waitDone := func() error {
- select {
- case <-cs.donec:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- case <-cs.reqCancel:
- return http2errRequestCanceled
- }
- }
-
- handleResponseHeaders := func() (*Response, error) {
- res := cs.res
- if res.StatusCode > 299 {
- // On error or status code 3xx, 4xx, 5xx, etc abort any
- // ongoing write, assuming that the server doesn't care
- // about our request body. If the server replied with 1xx or
- // 2xx, however, then assume the server DOES potentially
- // want our body (e.g. full-duplex streaming:
- // golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
- // we can keep it.
- cs.abortRequestBodyWrite()
- }
- res.Request = req
- res.TLS = cc.tlsState
- if res.Body == http2noBody && http2actualContentLength(req) == 0 {
- // If there isn't a request or response body still being
- // written, then wait for the stream to be closed before
- // RoundTrip returns.
- if err := waitDone(); err != nil {
- return nil, err
- }
- }
- return res, nil
- }
-
- for {
- select {
- case <-cs.respHeaderRecv:
- return handleResponseHeaders()
- case <-cs.abort:
- select {
- case <-cs.respHeaderRecv:
- // If both cs.respHeaderRecv and cs.abort are signaling,
- // pick respHeaderRecv. The server probably wrote the
- // response and immediately reset the stream.
- // golang.org/issue/49645
- return handleResponseHeaders()
- default:
- waitDone()
- return nil, cs.abortErr
- }
- case <-ctx.Done():
- err := ctx.Err()
- cs.abortStream(err)
- return nil, err
- case <-cs.reqCancel:
- cs.abortStream(http2errRequestCanceled)
- return nil, http2errRequestCanceled
- }
- }
-}
-
-// doRequest runs for the duration of the request lifetime.
-//
-// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
-func (cs *http2clientStream) doRequest(req *Request) {
- err := cs.writeRequest(req)
- cs.cleanupWriteRequest(err)
-}
-
-// writeRequest sends a request.
-//
-// It returns nil after the request is written, the response read,
-// and the request stream is half-closed by the peer.
-//
-// It returns non-nil if the request ends otherwise.
-// If the returned error is StreamError, the error Code may be used in resetting the stream.
-func (cs *http2clientStream) writeRequest(req *Request) (err error) {
- cc := cs.cc
- ctx := cs.ctx
-
- if err := http2checkConnHeaders(req); err != nil {
- return err
- }
-
- // Acquire the new-request lock by writing to reqHeaderMu.
- // This lock guards the critical section covering allocating a new stream ID
- // (requires mu) and creating the stream (requires wmu).
- if cc.reqHeaderMu == nil {
- panic("RoundTrip on uninitialized ClientConn") // for tests
- }
- select {
- case cc.reqHeaderMu <- struct{}{}:
- case <-cs.reqCancel:
- return http2errRequestCanceled
- case <-ctx.Done():
- return ctx.Err()
- }
-
- cc.mu.Lock()
- if cc.idleTimer != nil {
- cc.idleTimer.Stop()
- }
- cc.decrStreamReservationsLocked()
- if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil {
- cc.mu.Unlock()
- <-cc.reqHeaderMu
- return err
- }
- cc.addStreamLocked(cs) // assigns stream ID
- if http2isConnectionCloseRequest(req) {
- cc.doNotReuse = true
- }
- cc.mu.Unlock()
-
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- !cs.isHead {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: https://zlib.net/zlib_faq.html#faq39
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
- }
-
- continueTimeout := cc.t.expectContinueTimeout()
- if continueTimeout != 0 {
- if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") {
- continueTimeout = 0
- } else {
- cs.on100 = make(chan struct{}, 1)
- }
- }
-
- // Past this point (where we send request headers), it is possible for
- // RoundTrip to return successfully. Since the RoundTrip contract permits
- // the caller to "mutate or reuse" the Request after closing the Response's Body,
- // we must take care when referencing the Request from here on.
- err = cs.encodeAndWriteHeaders(req)
- <-cc.reqHeaderMu
- if err != nil {
- return err
- }
-
- hasBody := cs.reqBodyContentLength != 0
- if !hasBody {
- cs.sentEndStream = true
- } else {
- if continueTimeout != 0 {
- http2traceWait100Continue(cs.trace)
- timer := time.NewTimer(continueTimeout)
- select {
- case <-timer.C:
- err = nil
- case <-cs.on100:
- err = nil
- case <-cs.abort:
- err = cs.abortErr
- case <-ctx.Done():
- err = ctx.Err()
- case <-cs.reqCancel:
- err = http2errRequestCanceled
- }
- timer.Stop()
- if err != nil {
- http2traceWroteRequest(cs.trace, err)
- return err
- }
- }
-
- if err = cs.writeRequestBody(req); err != nil {
- if err != http2errStopReqBodyWrite {
- http2traceWroteRequest(cs.trace, err)
- return err
- }
- } else {
- cs.sentEndStream = true
- }
- }
-
- http2traceWroteRequest(cs.trace, err)
-
- var respHeaderTimer <-chan time.Time
- var respHeaderRecv chan struct{}
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- respHeaderRecv = cs.respHeaderRecv
- }
- // Wait until the peer half-closes its end of the stream,
- // or until the request is aborted (via context, error, or otherwise),
- // whichever comes first.
- for {
- select {
- case <-cs.peerClosed:
- return nil
- case <-respHeaderTimer:
- return http2errTimeout
- case <-respHeaderRecv:
- respHeaderRecv = nil
- respHeaderTimer = nil // keep waiting for END_STREAM
- case <-cs.abort:
- return cs.abortErr
- case <-ctx.Done():
- return ctx.Err()
- case <-cs.reqCancel:
- return http2errRequestCanceled
- }
- }
-}
-
-func (cs *http2clientStream) encodeAndWriteHeaders(req *Request) error {
- cc := cs.cc
- ctx := cs.ctx
-
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
-
- // If the request was canceled while waiting for cc.mu, just quit.
- select {
- case <-cs.abort:
- return cs.abortErr
- case <-ctx.Done():
- return ctx.Err()
- case <-cs.reqCancel:
- return http2errRequestCanceled
- default:
- }
-
- // Encode headers.
- //
- // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
- // sent by writeRequestBody below, along with any Trailers,
- // again in form HEADERS{1}, CONTINUATION{0,})
- trailers, err := http2commaSeparatedTrailers(req)
- if err != nil {
- return err
- }
- hasTrailers := trailers != ""
- contentLen := http2actualContentLength(req)
- hasBody := contentLen != 0
- hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
- if err != nil {
- return err
- }
-
- // Write the request.
- endStream := !hasBody && !hasTrailers
- cs.sentHeaders = true
- err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
- http2traceWroteHeaders(cs.trace)
- return err
-}
-
-// cleanupWriteRequest performs post-request tasks.
-//
-// If err (the result of writeRequest) is non-nil and the stream is not closed,
-// cleanupWriteRequest will send a reset to the peer.
-func (cs *http2clientStream) cleanupWriteRequest(err error) {
- cc := cs.cc
-
- if cs.ID == 0 {
- // We were canceled before creating the stream, so return our reservation.
- cc.decrStreamReservations()
- }
-
- // TODO: write h12Compare test showing whether
- // Request.Body is closed by the Transport,
- // and in multiple cases: server replies <=299 and >299
- // while still writing request body
- cc.mu.Lock()
- bodyClosed := cs.reqBodyClosed
- cs.reqBodyClosed = true
- cc.mu.Unlock()
- if !bodyClosed && cs.reqBody != nil {
- cs.reqBody.Close()
- }
-
- if err != nil && cs.sentEndStream {
- // If the connection is closed immediately after the response is read,
- // we may be aborted before finishing up here. If the stream was closed
- // cleanly on both sides, there is no error.
- select {
- case <-cs.peerClosed:
- err = nil
- default:
- }
- }
- if err != nil {
- cs.abortStream(err) // possibly redundant, but harmless
- if cs.sentHeaders {
- if se, ok := err.(http2StreamError); ok {
- if se.Cause != http2errFromPeer {
- cc.writeStreamReset(cs.ID, se.Code, err)
- }
- } else {
- cc.writeStreamReset(cs.ID, http2ErrCodeCancel, err)
- }
- }
- cs.bufPipe.CloseWithError(err) // no-op if already closed
- } else {
- if cs.sentHeaders && !cs.sentEndStream {
- cc.writeStreamReset(cs.ID, http2ErrCodeNo, nil)
- }
- cs.bufPipe.CloseWithError(http2errRequestCanceled)
- }
- if cs.ID != 0 {
- cc.forgetStreamID(cs.ID)
- }
-
- cc.wmu.Lock()
- werr := cc.werr
- cc.wmu.Unlock()
- if werr != nil {
- cc.Close()
- }
-
- close(cs.donec)
-}
-
-// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams.
-// Must hold cc.mu.
-func (cc *http2ClientConn) awaitOpenSlotForStreamLocked(cs *http2clientStream) error {
- for {
- cc.lastActive = time.Now()
- if cc.closed || !cc.canTakeNewRequestLocked() {
- return http2errClientConnUnusable
- }
- cc.lastIdle = time.Time{}
- if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
- return nil
- }
- cc.pendingRequests++
- cc.cond.Wait()
- cc.pendingRequests--
- select {
- case <-cs.abort:
- return cs.abortErr
- default:
- }
- }
-}
-
-// requires cc.wmu be held
-func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
- first := true // first frame written (HEADERS is first, then CONTINUATION)
- for len(hdrs) > 0 && cc.werr == nil {
- chunk := hdrs
- if len(chunk) > maxFrameSize {
- chunk = chunk[:maxFrameSize]
- }
- hdrs = hdrs[len(chunk):]
- endHeaders := len(hdrs) == 0
- if first {
- cc.fr.WriteHeaders(http2HeadersFrameParam{
- StreamID: streamID,
- BlockFragment: chunk,
- EndStream: endStream,
- EndHeaders: endHeaders,
- })
- first = false
- } else {
- cc.fr.WriteContinuation(streamID, endHeaders, chunk)
- }
- }
- cc.bw.Flush()
- return cc.werr
-}
-
-// internal error values; they don't escape to callers
-var (
- // abort request body write; don't send cancel
- http2errStopReqBodyWrite = errors.New("http2: aborting request body write")
-
- // abort request body write, but send stream reset of cancel.
- http2errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
-
- http2errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
-)
-
-// frameScratchBufferLen returns the length of a buffer to use for
-// outgoing request bodies to read/write to/from.
-//
-// It returns max(1, min(peer's advertised max frame size,
-// Request.ContentLength+1, 512KB)).
-func (cs *http2clientStream) frameScratchBufferLen(maxFrameSize int) int {
- const max = 512 << 10
- n := int64(maxFrameSize)
- if n > max {
- n = max
- }
- if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {
- // Add an extra byte past the declared content-length to
- // give the caller's Request.Body io.Reader a chance to
- // give us more bytes than they declared, so we can catch it
- // early.
- n = cl + 1
- }
- if n < 1 {
- return 1
- }
- return int(n) // doesn't truncate; max is 512K
-}
-
-var http2bufPool sync.Pool // of *[]byte
-
-func (cs *http2clientStream) writeRequestBody(req *Request) (err error) {
- cc := cs.cc
- body := cs.reqBody
- sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
-
- hasTrailers := req.Trailer != nil
- remainLen := cs.reqBodyContentLength
- hasContentLen := remainLen != -1
-
- cc.mu.Lock()
- maxFrameSize := int(cc.maxFrameSize)
- cc.mu.Unlock()
-
- // Scratch buffer for reading into & writing from.
- scratchLen := cs.frameScratchBufferLen(maxFrameSize)
- var buf []byte
- if bp, ok := http2bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
- defer http2bufPool.Put(bp)
- buf = *bp
- } else {
- buf = make([]byte, scratchLen)
- defer http2bufPool.Put(&buf)
- }
-
- var sawEOF bool
- for !sawEOF {
- n, err := body.Read(buf[:len(buf)])
- if hasContentLen {
- remainLen -= int64(n)
- if remainLen == 0 && err == nil {
- // The request body's Content-Length was predeclared and
- // we just finished reading it all, but the underlying io.Reader
- // returned the final chunk with a nil error (which is one of
- // the two valid things a Reader can do at EOF). Because we'd prefer
- // to send the END_STREAM bit early, double-check that we're actually
- // at EOF. Subsequent reads should return (0, EOF) at this point.
- // If either value is different, we return an error in one of two ways below.
- var scratch [1]byte
- var n1 int
- n1, err = body.Read(scratch[:])
- remainLen -= int64(n1)
- }
- if remainLen < 0 {
- err = http2errReqBodyTooLong
- return err
- }
- }
- if err != nil {
- cc.mu.Lock()
- bodyClosed := cs.reqBodyClosed
- cc.mu.Unlock()
- switch {
- case bodyClosed:
- return http2errStopReqBodyWrite
- case err == io.EOF:
- sawEOF = true
- err = nil
- default:
- return err
- }
- }
-
- remain := buf[:n]
- for len(remain) > 0 && err == nil {
- var allowed int32
- allowed, err = cs.awaitFlowControl(len(remain))
- if err != nil {
- return err
- }
- cc.wmu.Lock()
- data := remain[:allowed]
- remain = remain[allowed:]
- sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
- err = cc.fr.WriteData(cs.ID, sentEnd, data)
- if err == nil {
- // TODO(bradfitz): this flush is for latency, not bandwidth.
- // Most requests won't need this. Make this opt-in or
- // opt-out? Use some heuristic on the body type? Nagel-like
- // timers? Based on 'n'? Only last chunk of this for loop,
- // unless flow control tokens are low? For now, always.
- // If we change this, see comment below.
- err = cc.bw.Flush()
- }
- cc.wmu.Unlock()
- }
- if err != nil {
- return err
- }
- }
-
- if sentEnd {
- // Already sent END_STREAM (which implies we have no
- // trailers) and flushed, because currently all
- // WriteData frames above get a flush. So we're done.
- return nil
- }
-
- // Since the RoundTrip contract permits the caller to "mutate or reuse"
- // a request after the Response's Body is closed, verify that this hasn't
- // happened before accessing the trailers.
- cc.mu.Lock()
- trailer := req.Trailer
- err = cs.abortErr
- cc.mu.Unlock()
- if err != nil {
- return err
- }
-
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- var trls []byte
- if len(trailer) > 0 {
- trls, err = cc.encodeTrailers(trailer)
- if err != nil {
- return err
- }
- }
-
- // Two ways to send END_STREAM: either with trailers, or
- // with an empty DATA frame.
- if len(trls) > 0 {
- err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
- } else {
- err = cc.fr.WriteData(cs.ID, true, nil)
- }
- if ferr := cc.bw.Flush(); ferr != nil && err == nil {
- err = ferr
- }
- return err
-}
-
-// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
-// control tokens from the server.
-// It returns either the non-zero number of tokens taken or an error
-// if the stream is dead.
-func (cs *http2clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
- cc := cs.cc
- ctx := cs.ctx
- cc.mu.Lock()
- defer cc.mu.Unlock()
- for {
- if cc.closed {
- return 0, http2errClientConnClosed
- }
- if cs.reqBodyClosed {
- return 0, http2errStopReqBodyWrite
- }
- select {
- case <-cs.abort:
- return 0, cs.abortErr
- case <-ctx.Done():
- return 0, ctx.Err()
- case <-cs.reqCancel:
- return 0, http2errRequestCanceled
- default:
- }
- if a := cs.flow.available(); a > 0 {
- take := a
- if int(take) > maxBytes {
-
- take = int32(maxBytes) // can't truncate int; take is int32
- }
- if take > int32(cc.maxFrameSize) {
- take = int32(cc.maxFrameSize)
- }
- cs.flow.take(take)
- return take, nil
- }
- cc.cond.Wait()
- }
-}
-
-var http2errNilRequestURL = errors.New("http2: Request.URI is nil")
-
-// requires cc.wmu be held.
-func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
- cc.hbuf.Reset()
- if req.URL == nil {
- return nil, http2errNilRequestURL
- }
-
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
- host, err := httpguts.PunycodeHostPort(host)
- if err != nil {
- return nil, err
- }
-
- var path string
- if req.Method != "CONNECT" {
- path = req.URL.RequestURI()
- if !http2validPseudoPath(path) {
- orig := path
- path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
- if !http2validPseudoPath(path) {
- if req.URL.Opaque != "" {
- return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
- } else {
- return nil, fmt.Errorf("invalid request :path %q", orig)
- }
- }
- }
- }
-
- // Check for any invalid headers and return an error before we
- // potentially pollute our hpack state. (We want to be able to
- // continue to reuse the hpack encoder for future requests)
- for k, vv := range req.Header {
- if !httpguts.ValidHeaderFieldName(k) {
- return nil, fmt.Errorf("invalid HTTP header name %q", k)
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
- }
- }
- }
-
- enumerateHeaders := func(f func(name, value string)) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // The :path pseudo-header field includes the path and query parts of the
- // target URI (the path-absolute production and optionally a '?' character
- // followed by the query production (see Sections 3.3 and 3.4 of
- // [RFC3986]).
- f(":authority", host)
- m := req.Method
- if m == "" {
- m = MethodGet
- }
- f(":method", m)
- if req.Method != "CONNECT" {
- f(":path", path)
- f(":scheme", req.URL.Scheme)
- }
- if trailers != "" {
- f("trailer", trailers)
- }
-
- var didUA bool
- for k, vv := range req.Header {
- if http2asciiEqualFold(k, "host") || http2asciiEqualFold(k, "content-length") {
- // Host is :authority, already sent.
- // Content-Length is automatic, set below.
- continue
- } else if http2asciiEqualFold(k, "connection") ||
- http2asciiEqualFold(k, "proxy-connection") ||
- http2asciiEqualFold(k, "transfer-encoding") ||
- http2asciiEqualFold(k, "upgrade") ||
- http2asciiEqualFold(k, "keep-alive") {
- // Per 8.1.2.2 Connection-Specific Header
- // Fields, don't send connection-specific
- // fields. We have already checked if any
- // are error-worthy so just ignore the rest.
- continue
- } else if http2asciiEqualFold(k, "user-agent") {
- // Match Go's http1 behavior: at most one
- // User-Agent. If set to nil or empty string,
- // then omit it. Otherwise if not mentioned,
- // include the default (below).
- didUA = true
- if len(vv) < 1 {
- continue
- }
- vv = vv[:1]
- if vv[0] == "" {
- continue
- }
- } else if http2asciiEqualFold(k, "cookie") {
- // Per 8.1.2.5 To allow for better compression efficiency, the
- // Cookie header field MAY be split into separate header fields,
- // each with one or more cookie-pairs.
- for _, v := range vv {
- for {
- p := strings.IndexByte(v, ';')
- if p < 0 {
- break
- }
- f("cookie", v[:p])
- p++
- // strip space after semicolon if any.
- for p+1 <= len(v) && v[p] == ' ' {
- p++
- }
- v = v[p:]
- }
- if len(v) > 0 {
- f("cookie", v)
- }
- }
- continue
- }
-
- for _, v := range vv {
- f(k, v)
- }
- }
- if http2shouldSendReqContentLength(req.Method, contentLength) {
- f("content-length", strconv.FormatInt(contentLength, 10))
- }
- if addGzipHeader {
- f("accept-encoding", "gzip")
- }
- if !didUA {
- f("user-agent", http2defaultUserAgent)
- }
- }
-
- // Do a first pass over the headers counting bytes to ensure
- // we don't exceed cc.peerMaxHeaderListSize. This is done as a
- // separate pass before encoding the headers to prevent
- // modifying the hpack state.
- hlSize := uint64(0)
- enumerateHeaders(func(name, value string) {
- hf := hpack.HeaderField{Name: name, Value: value}
- hlSize += uint64(hf.Size())
- })
-
- if hlSize > cc.peerMaxHeaderListSize {
- return nil, http2errRequestHeaderListSize
- }
-
- trace := httptrace.ContextClientTrace(req.Context())
- traceHeaders := http2traceHasWroteHeaderField(trace)
-
- // Header list size is ok. Write the headers.
- enumerateHeaders(func(name, value string) {
- name, ascii := http2asciiToLower(name)
- if !ascii {
- // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
- // field names have to be ASCII characters (just as in HTTP/1.x).
- return
- }
- cc.writeHeader(name, value)
- if traceHeaders {
- http2traceWroteHeaderField(trace, name, value)
- }
- })
-
- return cc.hbuf.Bytes(), nil
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func http2shouldSendReqContentLength(method string, contentLength int64) bool {
- if contentLength > 0 {
- return true
- }
- if contentLength < 0 {
- return false
- }
- // For zero bodies, whether we send a content-length depends on the method.
- // It also kinda doesn't matter for http2 either way, with END_STREAM.
- switch method {
- case "POST", "PUT", "PATCH":
- return true
- default:
- return false
- }
-}
-
-// requires cc.wmu be held.
-func (cc *http2ClientConn) encodeTrailers(trailer Header) ([]byte, error) {
- cc.hbuf.Reset()
-
- hlSize := uint64(0)
- for k, vv := range trailer {
- for _, v := range vv {
- hf := hpack.HeaderField{Name: k, Value: v}
- hlSize += uint64(hf.Size())
- }
- }
- if hlSize > cc.peerMaxHeaderListSize {
- return nil, http2errRequestHeaderListSize
- }
-
- for k, vv := range trailer {
- lowKey, ascii := http2asciiToLower(k)
- if !ascii {
- // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
- // field names have to be ASCII characters (just as in HTTP/1.x).
- continue
- }
- // Transfer-Encoding, etc.. have already been filtered at the
- // start of RoundTrip
- for _, v := range vv {
- cc.writeHeader(lowKey, v)
- }
- }
- return cc.hbuf.Bytes(), nil
-}
-
-func (cc *http2ClientConn) writeHeader(name, value string) {
- if http2VerboseLogs {
- log.Printf("http2: Transport encoding header %q = %q", name, value)
- }
- cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
-}
-
-type http2resAndError struct {
- _ http2incomparable
- res *Response
- err error
-}
-
-// requires cc.mu be held.
-func (cc *http2ClientConn) addStreamLocked(cs *http2clientStream) {
- cs.flow.add(int32(cc.initialWindowSize))
- cs.flow.setConnFlow(&cc.flow)
- cs.inflow.add(http2transportDefaultStreamFlow)
- cs.inflow.setConnFlow(&cc.inflow)
- cs.ID = cc.nextStreamID
- cc.nextStreamID += 2
- cc.streams[cs.ID] = cs
- if cs.ID == 0 {
- panic("assigned stream ID 0")
- }
-}
-
-func (cc *http2ClientConn) forgetStreamID(id uint32) {
- cc.mu.Lock()
- slen := len(cc.streams)
- delete(cc.streams, id)
- if len(cc.streams) != slen-1 {
- panic("forgetting unknown stream id")
- }
- cc.lastActive = time.Now()
- if len(cc.streams) == 0 && cc.idleTimer != nil {
- cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = time.Now()
- }
- // Wake up writeRequestBody via clientStream.awaitFlowControl and
- // wake up RoundTrip if there is a pending request.
- cc.cond.Broadcast()
-
- closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives()
- if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
- if http2VerboseLogs {
- cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
- }
- cc.closed = true
- defer cc.tconn.Close()
- }
-
- cc.mu.Unlock()
-}
-
-// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
-type http2clientConnReadLoop struct {
- _ http2incomparable
- cc *http2ClientConn
-}
-
-// readLoop runs in its own goroutine and reads and dispatches frames.
-func (cc *http2ClientConn) readLoop() {
- rl := &http2clientConnReadLoop{cc: cc}
- defer rl.cleanup()
- cc.readerErr = rl.run()
- if ce, ok := cc.readerErr.(http2ConnectionError); ok {
- cc.wmu.Lock()
- cc.fr.WriteGoAway(0, http2ErrCode(ce), nil)
- cc.wmu.Unlock()
- }
-}
-
-// GoAwayError is returned by the Transport when the server closes the
-// TCP connection after sending a GOAWAY frame.
-type http2GoAwayError struct {
- LastStreamID uint32
- ErrCode http2ErrCode
- DebugData string
-}
-
-func (e http2GoAwayError) Error() string {
- return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
- e.LastStreamID, e.ErrCode, e.DebugData)
-}
-
-func http2isEOFOrNetReadError(err error) bool {
- if err == io.EOF {
- return true
- }
- ne, ok := err.(*net.OpError)
- return ok && ne.Op == "read"
-}
-
-func (rl *http2clientConnReadLoop) cleanup() {
- cc := rl.cc
- defer cc.tconn.Close()
- defer cc.t.connPool().MarkDead(cc)
- defer close(cc.readerDone)
-
- if cc.idleTimer != nil {
- cc.idleTimer.Stop()
- }
-
- // Close any response bodies if the server closes prematurely.
- // TODO: also do this if we've written the headers but not
- // gotten a response yet.
- err := cc.readerErr
- cc.mu.Lock()
- if cc.goAway != nil && http2isEOFOrNetReadError(err) {
- err = http2GoAwayError{
- LastStreamID: cc.goAway.LastStreamID,
- ErrCode: cc.goAway.ErrCode,
- DebugData: cc.goAwayDebug,
- }
- } else if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- cc.closed = true
- for _, cs := range cc.streams {
- select {
- case <-cs.peerClosed:
- // The server closed the stream before closing the conn,
- // so no need to interrupt it.
- default:
- cs.abortStreamLocked(err)
- }
- }
- cc.cond.Broadcast()
- cc.mu.Unlock()
-}
-
-// countReadFrameError calls Transport.CountError with a string
-// representing err.
-func (cc *http2ClientConn) countReadFrameError(err error) {
- f := cc.t.CountError
- if f == nil || err == nil {
- return
- }
- if ce, ok := err.(http2ConnectionError); ok {
- errCode := http2ErrCode(ce)
- f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
- return
- }
- if errors.Is(err, io.EOF) {
- f("read_frame_eof")
- return
- }
- if errors.Is(err, io.ErrUnexpectedEOF) {
- f("read_frame_unexpected_eof")
- return
- }
- if errors.Is(err, http2ErrFrameTooLarge) {
- f("read_frame_too_large")
- return
- }
- f("read_frame_other")
-}
-
-func (rl *http2clientConnReadLoop) run() error {
- cc := rl.cc
- gotSettings := false
- readIdleTimeout := cc.t.ReadIdleTimeout
- var t *time.Timer
- if readIdleTimeout != 0 {
- t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
- defer t.Stop()
- }
- for {
- f, err := cc.fr.ReadFrame()
- if t != nil {
- t.Reset(readIdleTimeout)
- }
- if err != nil {
- cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
- }
- if se, ok := err.(http2StreamError); ok {
- if cs := rl.streamByID(se.StreamID); cs != nil {
- if se.Cause == nil {
- se.Cause = cc.fr.errDetail
- }
- rl.endStreamError(cs, se)
- }
- continue
- } else if err != nil {
- cc.countReadFrameError(err)
- return err
- }
- if http2VerboseLogs {
- cc.vlogf("http2: Transport received %s", http2summarizeFrame(f))
- }
- if !gotSettings {
- if _, ok := f.(*http2SettingsFrame); !ok {
- cc.logf("protocol error: received %T before a SETTINGS frame", f)
- return http2ConnectionError(http2ErrCodeProtocol)
- }
- gotSettings = true
- }
-
- switch f := f.(type) {
- case *http2MetaHeadersFrame:
- err = rl.processHeaders(f)
- case *http2DataFrame:
- err = rl.processData(f)
- case *http2GoAwayFrame:
- err = rl.processGoAway(f)
- case *http2RSTStreamFrame:
- err = rl.processResetStream(f)
- case *http2SettingsFrame:
- err = rl.processSettings(f)
- case *http2PushPromiseFrame:
- err = rl.processPushPromise(f)
- case *http2WindowUpdateFrame:
- err = rl.processWindowUpdate(f)
- case *http2PingFrame:
- err = rl.processPing(f)
- default:
- cc.logf("Transport: unhandled response frame type %T", f)
- }
- if err != nil {
- if http2VerboseLogs {
- cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, http2summarizeFrame(f), err)
- }
- return err
- }
- }
-}
-
-func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) error {
- cs := rl.streamByID(f.StreamID)
- if cs == nil {
- // We'd get here if we canceled a request while the
- // server had its response still in flight. So if this
- // was just something we canceled, ignore it.
- return nil
- }
- if cs.readClosed {
- rl.endStreamError(cs, http2StreamError{
- StreamID: f.StreamID,
- Code: http2ErrCodeProtocol,
- Cause: errors.New("protocol error: headers after END_STREAM"),
- })
- return nil
- }
- if !cs.firstByte {
- if cs.trace != nil {
- // TODO(bradfitz): move first response byte earlier,
- // when we first read the 9 byte header, not waiting
- // until all the HEADERS+CONTINUATION frames have been
- // merged. This works for now.
- http2traceFirstResponseByte(cs.trace)
- }
- cs.firstByte = true
- }
- if !cs.pastHeaders {
- cs.pastHeaders = true
- } else {
- return rl.processTrailers(cs, f)
- }
-
- res, err := rl.handleResponse(cs, f)
- if err != nil {
- if _, ok := err.(http2ConnectionError); ok {
- return err
- }
- // Any other error type is a stream error.
- rl.endStreamError(cs, http2StreamError{
- StreamID: f.StreamID,
- Code: http2ErrCodeProtocol,
- Cause: err,
- })
- return nil // return nil from process* funcs to keep conn alive
- }
- if res == nil {
- // (nil, nil) special case. See handleResponse docs.
- return nil
- }
- cs.resTrailer = &res.Trailer
- cs.res = res
- close(cs.respHeaderRecv)
- if f.StreamEnded() {
- rl.endStream(cs)
- }
- return nil
-}
-
-// may return error types nil, or ConnectionError. Any other error value
-// is a StreamError of type ErrCodeProtocol. The returned error in that case
-// is the detail.
-//
-// As a special case, handleResponse may return (nil, nil) to skip the
-// frame (currently only used for 1xx responses).
-func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http2MetaHeadersFrame) (*Response, error) {
- if f.Truncated {
- return nil, http2errResponseHeaderListSize
- }
-
- status := f.PseudoValue("status")
- if status == "" {
- return nil, errors.New("malformed response from server: missing status pseudo header")
- }
- statusCode, err := strconv.Atoi(status)
- if err != nil {
- return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
- }
-
- regularFields := f.RegularFields()
- strs := make([]string, len(regularFields))
- header := make(Header, len(regularFields))
- res := &Response{
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- Header: header,
- StatusCode: statusCode,
- Status: status + " " + StatusText(statusCode),
- }
- for _, hf := range regularFields {
- key := CanonicalHeaderKey(hf.Name)
- if key == "Trailer" {
- t := res.Trailer
- if t == nil {
- t = make(Header)
- res.Trailer = t
- }
- http2foreachHeaderElement(hf.Value, func(v string) {
- t[CanonicalHeaderKey(v)] = nil
- })
- } else {
- vv := header[key]
- if vv == nil && len(strs) > 0 {
- // More than likely this will be a single-element key.
- // Most headers aren't multi-valued.
- // Set the capacity on strs[0] to 1, so any future append
- // won't extend the slice into the other strings.
- vv, strs = strs[:1:1], strs[1:]
- vv[0] = hf.Value
- header[key] = vv
- } else {
- header[key] = append(vv, hf.Value)
- }
- }
- }
-
- if statusCode >= 100 && statusCode <= 199 {
- if f.StreamEnded() {
- return nil, errors.New("1xx informational response with END_STREAM flag")
- }
- cs.num1xx++
- const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
- if cs.num1xx > max1xxResponses {
- return nil, errors.New("http2: too many 1xx informational responses")
- }
- if fn := cs.get1xxTraceFunc(); fn != nil {
- if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
- return nil, err
- }
- }
- if statusCode == 100 {
- http2traceGot100Continue(cs.trace)
- select {
- case cs.on100 <- struct{}{}:
- default:
- }
- }
- cs.pastHeaders = false // do it all again
- return nil, nil
- }
-
- res.ContentLength = -1
- if clens := res.Header["Content-Length"]; len(clens) == 1 {
- if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
- res.ContentLength = int64(cl)
- } else {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- }
- } else if len(clens) > 1 {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- } else if f.StreamEnded() && !cs.isHead {
- res.ContentLength = 0
- }
-
- if cs.isHead {
- res.Body = http2noBody
- return res, nil
- }
-
- if f.StreamEnded() {
- if res.ContentLength > 0 {
- res.Body = http2missingBody{}
- } else {
- res.Body = http2noBody
- }
- return res, nil
- }
-
- cs.bufPipe.setBuffer(&http2dataBuffer{expected: res.ContentLength})
- cs.bytesRemain = res.ContentLength
- res.Body = http2transportResponseBody{cs}
-
- if cs.requestedGzip && http2asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") {
- res.Header.Del("Content-Encoding")
- res.Header.Del("Content-Length")
- res.ContentLength = -1
- res.Body = &http2gzipReader{body: res.Body}
- res.Uncompressed = true
- }
- return res, nil
-}
-
-func (rl *http2clientConnReadLoop) processTrailers(cs *http2clientStream, f *http2MetaHeadersFrame) error {
- if cs.pastTrailers {
- // Too many HEADERS frames for this stream.
- return http2ConnectionError(http2ErrCodeProtocol)
- }
- cs.pastTrailers = true
- if !f.StreamEnded() {
- // We expect that any headers for trailers also
- // has END_STREAM.
- return http2ConnectionError(http2ErrCodeProtocol)
- }
- if len(f.PseudoFields()) > 0 {
- // No pseudo header fields are defined for trailers.
- // TODO: ConnectionError might be overly harsh? Check.
- return http2ConnectionError(http2ErrCodeProtocol)
- }
-
- trailer := make(Header)
- for _, hf := range f.RegularFields() {
- key := CanonicalHeaderKey(hf.Name)
- trailer[key] = append(trailer[key], hf.Value)
- }
- cs.trailer = trailer
-
- rl.endStream(cs)
- return nil
-}
-
-// transportResponseBody is the concrete type of Transport.RoundTrip's
-// Response.Body. It is an io.ReadCloser.
-type http2transportResponseBody struct {
- cs *http2clientStream
-}
-
-func (b http2transportResponseBody) Read(p []byte) (n int, err error) {
- cs := b.cs
- cc := cs.cc
-
- if cs.readErr != nil {
- return 0, cs.readErr
- }
- n, err = b.cs.bufPipe.Read(p)
- if cs.bytesRemain != -1 {
- if int64(n) > cs.bytesRemain {
- n = int(cs.bytesRemain)
- if err == nil {
- err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
- cs.abortStream(err)
- }
- cs.readErr = err
- return int(cs.bytesRemain), err
- }
- cs.bytesRemain -= int64(n)
- if err == io.EOF && cs.bytesRemain > 0 {
- err = io.ErrUnexpectedEOF
- cs.readErr = err
- return n, err
- }
- }
- if n == 0 {
- // No flow control tokens to send back.
- return
- }
-
- cc.mu.Lock()
- var connAdd, streamAdd int32
- // Check the conn-level first, before the stream-level.
- if v := cc.inflow.available(); v < http2transportDefaultConnFlow/2 {
- connAdd = http2transportDefaultConnFlow - v
- cc.inflow.add(connAdd)
- }
- if err == nil { // No need to refresh if the stream is over or failed.
- // Consider any buffered body data (read from the conn but not
- // consumed by the client) when computing flow control for this
- // stream.
- v := int(cs.inflow.available()) + cs.bufPipe.Len()
- if v < http2transportDefaultStreamFlow-http2transportDefaultStreamMinRefresh {
- streamAdd = int32(http2transportDefaultStreamFlow - v)
- cs.inflow.add(streamAdd)
- }
- }
- cc.mu.Unlock()
-
- if connAdd != 0 || streamAdd != 0 {
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if connAdd != 0 {
- cc.fr.WriteWindowUpdate(0, http2mustUint31(connAdd))
- }
- if streamAdd != 0 {
- cc.fr.WriteWindowUpdate(cs.ID, http2mustUint31(streamAdd))
- }
- cc.bw.Flush()
- }
- return
-}
-
-var http2errClosedResponseBody = errors.New("http2: response body closed")
-
-func (b http2transportResponseBody) Close() error {
- cs := b.cs
- cc := cs.cc
-
- unread := cs.bufPipe.Len()
- if unread > 0 {
- cc.mu.Lock()
- // Return connection-level flow control.
- if unread > 0 {
- cc.inflow.add(int32(unread))
- }
- cc.mu.Unlock()
-
- // TODO(dneil): Acquiring this mutex can block indefinitely.
- // Move flow control return to a goroutine?
- cc.wmu.Lock()
- // Return connection-level flow control.
- if unread > 0 {
- cc.fr.WriteWindowUpdate(0, uint32(unread))
- }
- cc.bw.Flush()
- cc.wmu.Unlock()
- }
-
- cs.bufPipe.BreakWithError(http2errClosedResponseBody)
- cs.abortStream(http2errClosedResponseBody)
-
- select {
- case <-cs.donec:
- case <-cs.ctx.Done():
- // See golang/go#49366: The net/http package can cancel the
- // request context after the response body is fully read.
- // Don't treat this as an error.
- return nil
- case <-cs.reqCancel:
- return http2errRequestCanceled
- }
- return nil
-}
-
-func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
- cc := rl.cc
- cs := rl.streamByID(f.StreamID)
- data := f.Data()
- if cs == nil {
- cc.mu.Lock()
- neverSent := cc.nextStreamID
- cc.mu.Unlock()
- if f.StreamID >= neverSent {
- // We never asked for this.
- cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
- return http2ConnectionError(http2ErrCodeProtocol)
- }
- // We probably did ask for this, but canceled. Just ignore it.
- // TODO: be stricter here? only silently ignore things which
- // we canceled, but not things which were closed normally
- // by the peer? Tough without accumulating too much state.
-
- // But at least return their flow control:
- if f.Length > 0 {
- cc.mu.Lock()
- cc.inflow.add(int32(f.Length))
- cc.mu.Unlock()
-
- cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(f.Length))
- cc.bw.Flush()
- cc.wmu.Unlock()
- }
- return nil
- }
- if cs.readClosed {
- cc.logf("protocol error: received DATA after END_STREAM")
- rl.endStreamError(cs, http2StreamError{
- StreamID: f.StreamID,
- Code: http2ErrCodeProtocol,
- })
- return nil
- }
- if !cs.firstByte {
- cc.logf("protocol error: received DATA before a HEADERS frame")
- rl.endStreamError(cs, http2StreamError{
- StreamID: f.StreamID,
- Code: http2ErrCodeProtocol,
- })
- return nil
- }
- if f.Length > 0 {
- if cs.isHead && len(data) > 0 {
- cc.logf("protocol error: received DATA on a HEAD request")
- rl.endStreamError(cs, http2StreamError{
- StreamID: f.StreamID,
- Code: http2ErrCodeProtocol,
- })
- return nil
- }
- // Check connection-level flow control.
- cc.mu.Lock()
- if cs.inflow.available() >= int32(f.Length) {
- cs.inflow.take(int32(f.Length))
- } else {
- cc.mu.Unlock()
- return http2ConnectionError(http2ErrCodeFlowControl)
- }
- // Return any padded flow control now, since we won't
- // refund it later on body reads.
- var refund int
- if pad := int(f.Length) - len(data); pad > 0 {
- refund += pad
- }
-
- didReset := false
- var err error
- if len(data) > 0 {
- if _, err = cs.bufPipe.Write(data); err != nil {
- // Return len(data) now if the stream is already closed,
- // since data will never be read.
- didReset = true
- refund += len(data)
- }
- }
-
- if refund > 0 {
- cc.inflow.add(int32(refund))
- if !didReset {
- cs.inflow.add(int32(refund))
- }
- }
- cc.mu.Unlock()
-
- if refund > 0 {
- cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(refund))
- if !didReset {
- cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
- }
- cc.bw.Flush()
- cc.wmu.Unlock()
- }
-
- if err != nil {
- rl.endStreamError(cs, err)
- return nil
- }
- }
-
- if f.StreamEnded() {
- rl.endStream(cs)
- }
- return nil
-}
-
-func (rl *http2clientConnReadLoop) endStream(cs *http2clientStream) {
- // TODO: check that any declared content-length matches, like
- // server.go's (*stream).endStream method.
- if !cs.readClosed {
- cs.readClosed = true
- // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a
- // race condition: The caller can read io.EOF from Response.Body
- // and close the body before we close cs.peerClosed, causing
- // cleanupWriteRequest to send a RST_STREAM.
- rl.cc.mu.Lock()
- defer rl.cc.mu.Unlock()
- cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers)
- close(cs.peerClosed)
- }
-}
-
-func (rl *http2clientConnReadLoop) endStreamError(cs *http2clientStream, err error) {
- cs.readAborted = true
- cs.abortStream(err)
-}
-
-func (rl *http2clientConnReadLoop) streamByID(id uint32) *http2clientStream {
- rl.cc.mu.Lock()
- defer rl.cc.mu.Unlock()
- cs := rl.cc.streams[id]
- if cs != nil && !cs.readAborted {
- return cs
- }
- return nil
-}
-
-func (cs *http2clientStream) copyTrailers() {
- for k, vv := range cs.trailer {
- t := cs.resTrailer
- if *t == nil {
- *t = make(Header)
- }
- (*t)[k] = vv
- }
-}
-
-func (rl *http2clientConnReadLoop) processGoAway(f *http2GoAwayFrame) error {
- cc := rl.cc
- cc.t.connPool().MarkDead(cc)
- if f.ErrCode != 0 {
- // TODO: deal with GOAWAY more. particularly the error code
- cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
- if fn := cc.t.CountError; fn != nil {
- fn("recv_goaway_" + f.ErrCode.stringToken())
- }
-
- }
- cc.setGoAway(f)
- return nil
-}
-
-func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error {
- cc := rl.cc
- // Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
- // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
-
- if err := rl.processSettingsNoWrite(f); err != nil {
- return err
- }
- if !f.IsAck() {
- cc.fr.WriteSettingsAck()
- cc.bw.Flush()
- }
- return nil
-}
-
-func (rl *http2clientConnReadLoop) processSettingsNoWrite(f *http2SettingsFrame) error {
- cc := rl.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- if f.IsAck() {
- if cc.wantSettingsAck {
- cc.wantSettingsAck = false
- return nil
- }
- return http2ConnectionError(http2ErrCodeProtocol)
- }
-
- var seenMaxConcurrentStreams bool
- err := f.ForeachSetting(func(s http2Setting) error {
- switch s.ID {
- case http2SettingMaxFrameSize:
- cc.maxFrameSize = s.Val
- case http2SettingMaxConcurrentStreams:
- cc.maxConcurrentStreams = s.Val
- seenMaxConcurrentStreams = true
- case http2SettingMaxHeaderListSize:
- cc.peerMaxHeaderListSize = uint64(s.Val)
- case http2SettingInitialWindowSize:
- // Values above the maximum flow-control
- // window size of 2^31-1 MUST be treated as a
- // connection error (Section 5.4.1) of type
- // FLOW_CONTROL_ERROR.
- if s.Val > math.MaxInt32 {
- return http2ConnectionError(http2ErrCodeFlowControl)
- }
-
- // Adjust flow control of currently-open
- // frames by the difference of the old initial
- // window size and this one.
- delta := int32(s.Val) - int32(cc.initialWindowSize)
- for _, cs := range cc.streams {
- cs.flow.add(delta)
- }
- cc.cond.Broadcast()
-
- cc.initialWindowSize = s.Val
- default:
- // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
- cc.vlogf("Unhandled Setting: %v", s)
- }
- return nil
- })
- if err != nil {
- return err
- }
-
- if !cc.seenSettings {
- if !seenMaxConcurrentStreams {
- // This was the servers initial SETTINGS frame and it
- // didn't contain a MAX_CONCURRENT_STREAMS field so
- // increase the number of concurrent streams this
- // connection can establish to our default.
- cc.maxConcurrentStreams = http2defaultMaxConcurrentStreams
- }
- cc.seenSettings = true
- }
-
- return nil
-}
-
-func (rl *http2clientConnReadLoop) processWindowUpdate(f *http2WindowUpdateFrame) error {
- cc := rl.cc
- cs := rl.streamByID(f.StreamID)
- if f.StreamID != 0 && cs == nil {
- return nil
- }
-
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- fl := &cc.flow
- if cs != nil {
- fl = &cs.flow
- }
- if !fl.add(int32(f.Increment)) {
- return http2ConnectionError(http2ErrCodeFlowControl)
- }
- cc.cond.Broadcast()
- return nil
-}
-
-func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) error {
- cs := rl.streamByID(f.StreamID)
- if cs == nil {
- // TODO: return error if server tries to RST_STREAM an idle stream
- return nil
- }
- serr := http2streamError(cs.ID, f.ErrCode)
- serr.Cause = http2errFromPeer
- if f.ErrCode == http2ErrCodeProtocol {
- rl.cc.SetDoNotReuse()
- }
- if fn := cs.cc.t.CountError; fn != nil {
- fn("recv_rststream_" + f.ErrCode.stringToken())
- }
- cs.abortStream(serr)
-
- cs.bufPipe.CloseWithError(serr)
- return nil
-}
-
-// Ping sends a PING frame to the server and waits for the ack.
-func (cc *http2ClientConn) Ping(ctx context.Context) error {
- c := make(chan struct{})
- // Generate a random payload
- var p [8]byte
- for {
- if _, err := rand.Read(p[:]); err != nil {
- return err
- }
- cc.mu.Lock()
- // check for dup before insert
- if _, found := cc.pings[p]; !found {
- cc.pings[p] = c
- cc.mu.Unlock()
- break
- }
- cc.mu.Unlock()
- }
- errc := make(chan error, 1)
- go func() {
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if err := cc.fr.WritePing(false, p); err != nil {
- errc <- err
- return
- }
- if err := cc.bw.Flush(); err != nil {
- errc <- err
- return
- }
- }()
- select {
- case <-c:
- return nil
- case err := <-errc:
- return err
- case <-ctx.Done():
- return ctx.Err()
- case <-cc.readerDone:
- // connection closed
- return cc.readerErr
- }
-}
-
-func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error {
- if f.IsAck() {
- cc := rl.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- // If ack, notify listener if any
- if c, ok := cc.pings[f.Data]; ok {
- close(c)
- delete(cc.pings, f.Data)
- }
- return nil
- }
- cc := rl.cc
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if err := cc.fr.WritePing(true, f.Data); err != nil {
- return err
- }
- return cc.bw.Flush()
-}
-
-func (rl *http2clientConnReadLoop) processPushPromise(f *http2PushPromiseFrame) error {
- // We told the peer we don't want them.
- // Spec says:
- // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
- // setting of the peer endpoint is set to 0. An endpoint that
- // has set this setting and has received acknowledgement MUST
- // treat the receipt of a PUSH_PROMISE frame as a connection
- // error (Section 5.4.1) of type PROTOCOL_ERROR."
- return http2ConnectionError(http2ErrCodeProtocol)
-}
-
-func (cc *http2ClientConn) writeStreamReset(streamID uint32, code http2ErrCode, err error) {
- // TODO: map err to more interesting error codes, once the
- // HTTP community comes up with some. But currently for
- // RST_STREAM there's no equivalent to GOAWAY frame's debug
- // data, and the error codes are all pretty vague ("cancel").
- cc.wmu.Lock()
- cc.fr.WriteRSTStream(streamID, code)
- cc.bw.Flush()
- cc.wmu.Unlock()
-}
-
-var (
- http2errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
- http2errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
-)
-
-func (cc *http2ClientConn) logf(format string, args ...interface{}) {
- cc.t.logf(format, args...)
-}
-
-func (cc *http2ClientConn) vlogf(format string, args ...interface{}) {
- cc.t.vlogf(format, args...)
-}
-
-func (t *http2Transport) vlogf(format string, args ...interface{}) {
- if http2VerboseLogs {
- t.logf(format, args...)
- }
-}
-
-func (t *http2Transport) logf(format string, args ...interface{}) {
- log.Printf(format, args...)
-}
-
-var http2noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
-
-type http2missingBody struct{}
-
-func (http2missingBody) Close() error { return nil }
-
-func (http2missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF }
-
-func http2strSliceContains(ss []string, s string) bool {
- for _, v := range ss {
- if v == s {
- return true
- }
- }
- return false
-}
-
-type http2erringRoundTripper struct{ err error }
-
-func (rt http2erringRoundTripper) RoundTripErr() error { return rt.err }
-
-func (rt http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { return nil, rt.err }
-
-// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
-type http2gzipReader struct {
- _ http2incomparable
- body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
-}
-
-func (gz *http2gzipReader) Read(p []byte) (n int, err error) {
- if gz.zerr != nil {
- return 0, gz.zerr
- }
- if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
- }
- }
- return gz.zr.Read(p)
-}
-
-func (gz *http2gzipReader) Close() error {
- return gz.body.Close()
-}
-
-type http2errorReader struct{ err error }
-
-func (r http2errorReader) Read(p []byte) (int, error) { return 0, r.err }
-
-// isConnectionCloseRequest reports whether req should use its own
-// connection for a single request and then close the connection.
-func http2isConnectionCloseRequest(req *Request) bool {
- return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
-}
-
-// registerHTTPSProtocol calls Transport.RegisterProtocol but
-// converting panics into errors.
-func http2registerHTTPSProtocol(t *Transport, rt http2noDialH2RoundTripper) (err error) {
- defer func() {
- if e := recover(); e != nil {
- err = fmt.Errorf("%v", e)
- }
- }()
- t.RegisterProtocol("https", rt)
- return nil
-}
-
-// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
-// if there's already has a cached connection to the host.
-// (The field is exported so it can be accessed via reflect from net/http; tested
-// by TestNoDialH2RoundTripperType)
-type http2noDialH2RoundTripper struct{ *http2Transport }
-
-func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) {
- res, err := rt.http2Transport.RoundTrip(req)
- if http2isNoCachedConnError(err) {
- return nil, ErrSkipAltProtocol
- }
- return res, err
-}
-
-func (t *http2Transport) idleConnTimeout() time.Duration {
- if t.t1 != nil {
- return t.t1.IdleConnTimeout
- }
- return 0
-}
-
-func http2traceGetConn(req *Request, hostPort string) {
- trace := httptrace.ContextClientTrace(req.Context())
- if trace == nil || trace.GetConn == nil {
- return
- }
- trace.GetConn(hostPort)
-}
-
-func http2traceGotConn(req *Request, cc *http2ClientConn, reused bool) {
- trace := httptrace.ContextClientTrace(req.Context())
- if trace == nil || trace.GotConn == nil {
- return
- }
- ci := httptrace.GotConnInfo{Conn: cc.tconn}
- ci.Reused = reused
- cc.mu.Lock()
- ci.WasIdle = len(cc.streams) == 0 && reused
- if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = time.Now().Sub(cc.lastActive)
- }
- cc.mu.Unlock()
-
- trace.GotConn(ci)
-}
-
-func http2traceWroteHeaders(trace *httptrace.ClientTrace) {
- if trace != nil && trace.WroteHeaders != nil {
- trace.WroteHeaders()
- }
-}
-
-func http2traceGot100Continue(trace *httptrace.ClientTrace) {
- if trace != nil && trace.Got100Continue != nil {
- trace.Got100Continue()
- }
-}
-
-func http2traceWait100Continue(trace *httptrace.ClientTrace) {
- if trace != nil && trace.Wait100Continue != nil {
- trace.Wait100Continue()
- }
-}
-
-func http2traceWroteRequest(trace *httptrace.ClientTrace, err error) {
- if trace != nil && trace.WroteRequest != nil {
- trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
- }
-}
-
-func http2traceFirstResponseByte(trace *httptrace.ClientTrace) {
- if trace != nil && trace.GotFirstResponseByte != nil {
- trace.GotFirstResponseByte()
- }
-}
-
-// writeFramer is implemented by any type that is used to write frames.
-type http2writeFramer interface {
- writeFrame(http2writeContext) error
-
- // staysWithinBuffer reports whether this writer promises that
- // it will only write less than or equal to size bytes, and it
- // won't Flush the write context.
- staysWithinBuffer(size int) bool
-}
-
-// writeContext is the interface needed by the various frame writer
-// types below. All the writeFrame methods below are scheduled via the
-// frame writing scheduler (see writeScheduler in writesched.go).
-//
-// This interface is implemented by *serverConn.
-//
-// TODO: decide whether to a) use this in the client code (which didn't
-// end up using this yet, because it has a simpler design, not
-// currently implementing priorities), or b) delete this and
-// make the server code a bit more concrete.
-type http2writeContext interface {
- Framer() *http2Framer
- Flush() error
- CloseConn() error
- // HeaderEncoder returns an HPACK encoder that writes to the
- // returned buffer.
- HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
-}
-
-// writeEndsStream reports whether w writes a frame that will transition
-// the stream to a half-closed local state. This returns false for RST_STREAM,
-// which closes the entire stream (not just the local half).
-func http2writeEndsStream(w http2writeFramer) bool {
- switch v := w.(type) {
- case *http2writeData:
- return v.endStream
- case *http2writeResHeaders:
- return v.endStream
- case nil:
- // This can only happen if the caller reuses w after it's
- // been intentionally nil'ed out to prevent use. Keep this
- // here to catch future refactoring breaking it.
- panic("writeEndsStream called on nil writeFramer")
- }
- return false
-}
-
-type http2flushFrameWriter struct{}
-
-func (http2flushFrameWriter) writeFrame(ctx http2writeContext) error {
- return ctx.Flush()
-}
-
-func (http2flushFrameWriter) staysWithinBuffer(max int) bool { return false }
-
-type http2writeSettings []http2Setting
-
-func (s http2writeSettings) staysWithinBuffer(max int) bool {
- const settingSize = 6 // uint16 + uint32
- return http2frameHeaderLen+settingSize*len(s) <= max
-
-}
-
-func (s http2writeSettings) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WriteSettings([]http2Setting(s)...)
-}
-
-type http2writeGoAway struct {
- maxStreamID uint32
- code http2ErrCode
-}
-
-func (p *http2writeGoAway) writeFrame(ctx http2writeContext) error {
- err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
- ctx.Flush() // ignore error: we're hanging up on them anyway
- return err
-}
-
-func (*http2writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
-
-type http2writeData struct {
- streamID uint32
- p []byte
- endStream bool
-}
-
-func (w *http2writeData) String() string {
- return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
-}
-
-func (w *http2writeData) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
-}
-
-func (w *http2writeData) staysWithinBuffer(max int) bool {
- return http2frameHeaderLen+len(w.p) <= max
-}
-
-// handlerPanicRST is the message sent from handler goroutines when
-// the handler panics.
-type http2handlerPanicRST struct {
- StreamID uint32
-}
-
-func (hp http2handlerPanicRST) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WriteRSTStream(hp.StreamID, http2ErrCodeInternal)
-}
-
-func (hp http2handlerPanicRST) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
-
-func (se http2StreamError) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
-}
-
-func (se http2StreamError) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
-
-type http2writePingAck struct{ pf *http2PingFrame }
-
-func (w http2writePingAck) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WritePing(true, w.pf.Data)
-}
-
-func (w http2writePingAck) staysWithinBuffer(max int) bool {
- return http2frameHeaderLen+len(w.pf.Data) <= max
-}
-
-type http2writeSettingsAck struct{}
-
-func (http2writeSettingsAck) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WriteSettingsAck()
-}
-
-func (http2writeSettingsAck) staysWithinBuffer(max int) bool { return http2frameHeaderLen <= max }
-
-// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
-// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
-// for the first/last fragment, respectively.
-func http2splitHeaderBlock(ctx http2writeContext, headerBlock []byte, fn func(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
- // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
- // that all peers must support (16KB). Later we could care
- // more and send larger frames if the peer advertised it, but
- // there's little point. Most headers are small anyway (so we
- // generally won't have CONTINUATION frames), and extra frames
- // only waste 9 bytes anyway.
- const maxFrameSize = 16384
-
- first := true
- for len(headerBlock) > 0 {
- frag := headerBlock
- if len(frag) > maxFrameSize {
- frag = frag[:maxFrameSize]
- }
- headerBlock = headerBlock[len(frag):]
- if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
- return err
- }
- first = false
- }
- return nil
-}
-
-// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
-// for HTTP response headers or trailers from a server handler.
-type http2writeResHeaders struct {
- streamID uint32
- httpResCode int // 0 means no ":status" line
- h Header // may be nil
- trailers []string // if non-nil, which keys of h to write. nil means all.
- endStream bool
-
- date string
- contentType string
- contentLength string
-}
-
-func http2encKV(enc *hpack.Encoder, k, v string) {
- if http2VerboseLogs {
- log.Printf("http2: server encoding header %q = %q", k, v)
- }
- enc.WriteField(hpack.HeaderField{Name: k, Value: v})
-}
-
-func (w *http2writeResHeaders) staysWithinBuffer(max int) bool {
- // TODO: this is a common one. It'd be nice to return true
- // here and get into the fast path if we could be clever and
- // calculate the size fast enough, or at least a conservative
- // upper bound that usually fires. (Maybe if w.h and
- // w.trailers are nil, so we don't need to enumerate it.)
- // Otherwise I'm afraid that just calculating the length to
- // answer this question would be slower than the ~2µs benefit.
- return false
-}
-
-func (w *http2writeResHeaders) writeFrame(ctx http2writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
-
- if w.httpResCode != 0 {
- http2encKV(enc, ":status", http2httpCodeString(w.httpResCode))
- }
-
- http2encodeHeaders(enc, w.h, w.trailers)
-
- if w.contentType != "" {
- http2encKV(enc, "content-type", w.contentType)
- }
- if w.contentLength != "" {
- http2encKV(enc, "content-length", w.contentLength)
- }
- if w.date != "" {
- http2encKV(enc, "date", w.date)
- }
-
- headerBlock := buf.Bytes()
- if len(headerBlock) == 0 && w.trailers == nil {
- panic("unexpected empty hpack")
- }
-
- return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
-}
-
-func (w *http2writeResHeaders) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
- if firstFrag {
- return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: frag,
- EndStream: w.endStream,
- EndHeaders: lastFrag,
- })
- } else {
- return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
- }
-}
-
-// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
-type http2writePushPromise struct {
- streamID uint32 // pusher stream
- method string // for :method
- url *url.URL // for :scheme, :authority, :path
- h Header
-
- // Creates an ID for a pushed stream. This runs on serveG just before
- // the frame is written. The returned ID is copied to promisedID.
- allocatePromisedID func() (uint32, error)
- promisedID uint32
-}
-
-func (w *http2writePushPromise) staysWithinBuffer(max int) bool {
- // TODO: see writeResHeaders.staysWithinBuffer
- return false
-}
-
-func (w *http2writePushPromise) writeFrame(ctx http2writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
-
- http2encKV(enc, ":method", w.method)
- http2encKV(enc, ":scheme", w.url.Scheme)
- http2encKV(enc, ":authority", w.url.Host)
- http2encKV(enc, ":path", w.url.RequestURI())
- http2encodeHeaders(enc, w.h, nil)
-
- headerBlock := buf.Bytes()
- if len(headerBlock) == 0 {
- panic("unexpected empty hpack")
- }
-
- return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
-}
-
-func (w *http2writePushPromise) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
- if firstFrag {
- return ctx.Framer().WritePushPromise(http2PushPromiseParam{
- StreamID: w.streamID,
- PromiseID: w.promisedID,
- BlockFragment: frag,
- EndHeaders: lastFrag,
- })
- } else {
- return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
- }
-}
-
-type http2write100ContinueHeadersFrame struct {
- streamID uint32
-}
-
-func (w http2write100ContinueHeadersFrame) writeFrame(ctx http2writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
- http2encKV(enc, ":status", "100")
- return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: buf.Bytes(),
- EndStream: false,
- EndHeaders: true,
- })
-}
-
-func (w http2write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
- // Sloppy but conservative:
- return 9+2*(len(":status")+len("100")) <= max
-}
-
-type http2writeWindowUpdate struct {
- streamID uint32 // or 0 for conn-level
- n uint32
-}
-
-func (wu http2writeWindowUpdate) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
-
-func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error {
- return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
-}
-
-// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
-// is encoded only if k is in keys.
-func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) {
- if keys == nil {
- sorter := http2sorterPool.Get().(*http2sorter)
- // Using defer here, since the returned keys from the
- // sorter.Keys method is only valid until the sorter
- // is returned:
- defer http2sorterPool.Put(sorter)
- keys = sorter.Keys(h)
- }
- for _, k := range keys {
- vv := h[k]
- k, ascii := http2lowerHeader(k)
- if !ascii {
- // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
- // field names have to be ASCII characters (just as in HTTP/1.x).
- continue
- }
- if !http2validWireHeaderFieldName(k) {
- // Skip it as backup paranoia. Per
- // golang.org/issue/14048, these should
- // already be rejected at a higher level.
- continue
- }
- isTE := k == "transfer-encoding"
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- // TODO: return an error? golang.org/issue/14048
- // For now just omit it.
- continue
- }
- // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
- if isTE && v != "trailers" {
- continue
- }
- http2encKV(enc, k, v)
- }
- }
-}
-
-// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
-// Methods are never called concurrently.
-type http2WriteScheduler interface {
- // OpenStream opens a new stream in the write scheduler.
- // It is illegal to call this with streamID=0 or with a streamID that is
- // already open -- the call may panic.
- OpenStream(streamID uint32, options http2OpenStreamOptions)
-
- // CloseStream closes a stream in the write scheduler. Any frames queued on
- // this stream should be discarded. It is illegal to call this on a stream
- // that is not open -- the call may panic.
- CloseStream(streamID uint32)
-
- // AdjustStream adjusts the priority of the given stream. This may be called
- // on a stream that has not yet been opened or has been closed. Note that
- // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
- // https://tools.ietf.org/html/rfc7540#section-5.1
- AdjustStream(streamID uint32, priority http2PriorityParam)
-
- // Push queues a frame in the scheduler. In most cases, this will not be
- // called with wr.StreamID()!=0 unless that stream is currently open. The one
- // exception is RST_STREAM frames, which may be sent on idle or closed streams.
- Push(wr http2FrameWriteRequest)
-
- // Pop dequeues the next frame to write. Returns false if no frames can
- // be written. Frames with a given wr.StreamID() are Pop'd in the same
- // order they are Push'd, except RST_STREAM frames. No frames should be
- // discarded except by CloseStream.
- Pop() (wr http2FrameWriteRequest, ok bool)
-}
-
-// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
-type http2OpenStreamOptions struct {
- // PusherID is zero if the stream was initiated by the client. Otherwise,
- // PusherID names the stream that pushed the newly opened stream.
- PusherID uint32
-}
-
-// FrameWriteRequest is a request to write a frame.
-type http2FrameWriteRequest struct {
- // write is the interface value that does the writing, once the
- // WriteScheduler has selected this frame to write. The write
- // functions are all defined in write.go.
- write http2writeFramer
-
- // stream is the stream on which this frame will be written.
- // nil for non-stream frames like PING and SETTINGS.
- // nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
- stream *http2stream
-
- // done, if non-nil, must be a buffered channel with space for
- // 1 message and is sent the return value from write (or an
- // earlier error) when the frame has been written.
- done chan error
-}
-
-// StreamID returns the id of the stream this frame will be written to.
-// 0 is used for non-stream frames such as PING and SETTINGS.
-func (wr http2FrameWriteRequest) StreamID() uint32 {
- if wr.stream == nil {
- if se, ok := wr.write.(http2StreamError); ok {
- // (*serverConn).resetStream doesn't set
- // stream because it doesn't necessarily have
- // one. So special case this type of write
- // message.
- return se.StreamID
- }
- return 0
- }
- return wr.stream.id
-}
-
-// isControl reports whether wr is a control frame for MaxQueuedControlFrames
-// purposes. That includes non-stream frames and RST_STREAM frames.
-func (wr http2FrameWriteRequest) isControl() bool {
- return wr.stream == nil
-}
-
-// DataSize returns the number of flow control bytes that must be consumed
-// to write this entire frame. This is 0 for non-DATA frames.
-func (wr http2FrameWriteRequest) DataSize() int {
- if wd, ok := wr.write.(*http2writeData); ok {
- return len(wd.p)
- }
- return 0
-}
-
-// Consume consumes min(n, available) bytes from this frame, where available
-// is the number of flow control bytes available on the stream. Consume returns
-// 0, 1, or 2 frames, where the integer return value gives the number of frames
-// returned.
-//
-// If flow control prevents consuming any bytes, this returns (_, _, 0). If
-// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
-// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
-// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
-// underlying stream's flow control budget.
-func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2FrameWriteRequest, int) {
- var empty http2FrameWriteRequest
-
- // Non-DATA frames are always consumed whole.
- wd, ok := wr.write.(*http2writeData)
- if !ok || len(wd.p) == 0 {
- return wr, empty, 1
- }
-
- // Might need to split after applying limits.
- allowed := wr.stream.flow.available()
- if n < allowed {
- allowed = n
- }
- if wr.stream.sc.maxFrameSize < allowed {
- allowed = wr.stream.sc.maxFrameSize
- }
- if allowed <= 0 {
- return empty, empty, 0
- }
- if len(wd.p) > int(allowed) {
- wr.stream.flow.take(allowed)
- consumed := http2FrameWriteRequest{
- stream: wr.stream,
- write: &http2writeData{
- streamID: wd.streamID,
- p: wd.p[:allowed],
- // Even if the original had endStream set, there
- // are bytes remaining because len(wd.p) > allowed,
- // so we know endStream is false.
- endStream: false,
- },
- // Our caller is blocking on the final DATA frame, not
- // this intermediate frame, so no need to wait.
- done: nil,
- }
- rest := http2FrameWriteRequest{
- stream: wr.stream,
- write: &http2writeData{
- streamID: wd.streamID,
- p: wd.p[allowed:],
- endStream: wd.endStream,
- },
- done: wr.done,
- }
- return consumed, rest, 2
- }
-
- // The frame is consumed whole.
- // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
- wr.stream.flow.take(int32(len(wd.p)))
- return wr, empty, 1
-}
-
-// String is for debugging only.
-func (wr http2FrameWriteRequest) String() string {
- var des string
- if s, ok := wr.write.(fmt.Stringer); ok {
- des = s.String()
- } else {
- des = fmt.Sprintf("%T", wr.write)
- }
- return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
-}
-
-// replyToWriter sends err to wr.done and panics if the send must block
-// This does nothing if wr.done is nil.
-func (wr *http2FrameWriteRequest) replyToWriter(err error) {
- if wr.done == nil {
- return
- }
- select {
- case wr.done <- err:
- default:
- panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
- }
- wr.write = nil // prevent use (assume it's tainted after wr.done send)
-}
-
-// writeQueue is used by implementations of WriteScheduler.
-type http2writeQueue struct {
- s []http2FrameWriteRequest
-}
-
-func (q *http2writeQueue) empty() bool { return len(q.s) == 0 }
-
-func (q *http2writeQueue) push(wr http2FrameWriteRequest) {
- q.s = append(q.s, wr)
-}
-
-func (q *http2writeQueue) shift() http2FrameWriteRequest {
- if len(q.s) == 0 {
- panic("invalid use of queue")
- }
- wr := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = http2FrameWriteRequest{}
- q.s = q.s[:len(q.s)-1]
- return wr
-}
-
-// consume consumes up to n bytes from q.s[0]. If the frame is
-// entirely consumed, it is removed from the queue. If the frame
-// is partially consumed, the frame is kept with the consumed
-// bytes removed. Returns true iff any bytes were consumed.
-func (q *http2writeQueue) consume(n int32) (http2FrameWriteRequest, bool) {
- if len(q.s) == 0 {
- return http2FrameWriteRequest{}, false
- }
- consumed, rest, numresult := q.s[0].Consume(n)
- switch numresult {
- case 0:
- return http2FrameWriteRequest{}, false
- case 1:
- q.shift()
- case 2:
- q.s[0] = rest
- }
- return consumed, true
-}
-
-type http2writeQueuePool []*http2writeQueue
-
-// put inserts an unused writeQueue into the pool.
-
-// put inserts an unused writeQueue into the pool.
-func (p *http2writeQueuePool) put(q *http2writeQueue) {
- for i := range q.s {
- q.s[i] = http2FrameWriteRequest{}
- }
- q.s = q.s[:0]
- *p = append(*p, q)
-}
-
-// get returns an empty writeQueue.
-func (p *http2writeQueuePool) get() *http2writeQueue {
- ln := len(*p)
- if ln == 0 {
- return new(http2writeQueue)
- }
- x := ln - 1
- q := (*p)[x]
- (*p)[x] = nil
- *p = (*p)[:x]
- return q
-}
-
-// RFC 7540, Section 5.3.5: the default weight is 16.
-const http2priorityDefaultWeight = 15 // 16 = 15 + 1
-
-// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
-type http2PriorityWriteSchedulerConfig struct {
- // MaxClosedNodesInTree controls the maximum number of closed streams to
- // retain in the priority tree. Setting this to zero saves a small amount
- // of memory at the cost of performance.
- //
- // See RFC 7540, Section 5.3.4:
- // "It is possible for a stream to become closed while prioritization
- // information ... is in transit. ... This potentially creates suboptimal
- // prioritization, since the stream could be given a priority that is
- // different from what is intended. To avoid these problems, an endpoint
- // SHOULD retain stream prioritization state for a period after streams
- // become closed. The longer state is retained, the lower the chance that
- // streams are assigned incorrect or default priority values."
- MaxClosedNodesInTree int
-
- // MaxIdleNodesInTree controls the maximum number of idle streams to
- // retain in the priority tree. Setting this to zero saves a small amount
- // of memory at the cost of performance.
- //
- // See RFC 7540, Section 5.3.4:
- // Similarly, streams that are in the "idle" state can be assigned
- // priority or become a parent of other streams. This allows for the
- // creation of a grouping node in the dependency tree, which enables
- // more flexible expressions of priority. Idle streams begin with a
- // default priority (Section 5.3.5).
- MaxIdleNodesInTree int
-
- // ThrottleOutOfOrderWrites enables write throttling to help ensure that
- // data is delivered in priority order. This works around a race where
- // stream B depends on stream A and both streams are about to call Write
- // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
- // write as much data from B as possible, but this is suboptimal because A
- // is a higher-priority stream. With throttling enabled, we write a small
- // amount of data from B to minimize the amount of bandwidth that B can
- // steal from A.
- ThrottleOutOfOrderWrites bool
-}
-
-// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
-// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
-// If cfg is nil, default options are used.
-func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http2WriteScheduler {
- if cfg == nil {
- // For justification of these defaults, see:
- // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
- cfg = &http2PriorityWriteSchedulerConfig{
- MaxClosedNodesInTree: 10,
- MaxIdleNodesInTree: 10,
- ThrottleOutOfOrderWrites: false,
- }
- }
-
- ws := &http2priorityWriteScheduler{
- nodes: make(map[uint32]*http2priorityNode),
- maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
- maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
- enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
- }
- ws.nodes[0] = &ws.root
- if cfg.ThrottleOutOfOrderWrites {
- ws.writeThrottleLimit = 1024
- } else {
- ws.writeThrottleLimit = math.MaxInt32
- }
- return ws
-}
-
-type http2priorityNodeState int
-
-const (
- http2priorityNodeOpen http2priorityNodeState = iota
- http2priorityNodeClosed
- http2priorityNodeIdle
-)
-
-// priorityNode is a node in an HTTP/2 priority tree.
-// Each node is associated with a single stream ID.
-// See RFC 7540, Section 5.3.
-type http2priorityNode struct {
- q http2writeQueue // queue of pending frames to write
- id uint32 // id of the stream, or 0 for the root of the tree
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
- state http2priorityNodeState // open | closed | idle
- bytes int64 // number of bytes written by this node, or 0 if closed
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
-
- // These links form the priority tree.
- parent *http2priorityNode
- kids *http2priorityNode // start of the kids list
- prev, next *http2priorityNode // doubly-linked list of siblings
-}
-
-func (n *http2priorityNode) setParent(parent *http2priorityNode) {
- if n == parent {
- panic("setParent to self")
- }
- if n.parent == parent {
- return
- }
- // Unlink from current parent.
- if parent := n.parent; parent != nil {
- if n.prev == nil {
- parent.kids = n.next
- } else {
- n.prev.next = n.next
- }
- if n.next != nil {
- n.next.prev = n.prev
- }
- }
- // Link to new parent.
- // If parent=nil, remove n from the tree.
- // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
- n.parent = parent
- if parent == nil {
- n.next = nil
- n.prev = nil
- } else {
- n.next = parent.kids
- n.prev = nil
- if n.next != nil {
- n.next.prev = n
- }
- parent.kids = n
- }
-}
-
-func (n *http2priorityNode) addBytes(b int64) {
- n.bytes += b
- for ; n != nil; n = n.parent {
- n.subtreeBytes += b
- }
-}
-
-// walkReadyInOrder iterates over the tree in priority order, calling f for each node
-// with a non-empty write queue. When f returns true, this function returns true and the
-// walk halts. tmp is used as scratch space for sorting.
-//
-// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
-// if any ancestor p of n is still open (ignoring the root node).
-func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNode, f func(*http2priorityNode, bool) bool) bool {
- if !n.q.empty() && f(n, openParent) {
- return true
- }
- if n.kids == nil {
- return false
- }
-
- // Don't consider the root "open" when updating openParent since
- // we can't send data frames on the root stream (only control frames).
- if n.id != 0 {
- openParent = openParent || (n.state == http2priorityNodeOpen)
- }
-
- // Common case: only one kid or all kids have the same weight.
- // Some clients don't use weights; other clients (like web browsers)
- // use mostly-linear priority trees.
- w := n.kids.weight
- needSort := false
- for k := n.kids.next; k != nil; k = k.next {
- if k.weight != w {
- needSort = true
- break
- }
- }
- if !needSort {
- for k := n.kids; k != nil; k = k.next {
- if k.walkReadyInOrder(openParent, tmp, f) {
- return true
- }
- }
- return false
- }
-
- // Uncommon case: sort the child nodes. We remove the kids from the parent,
- // then re-insert after sorting so we can reuse tmp for future sort calls.
- *tmp = (*tmp)[:0]
- for n.kids != nil {
- *tmp = append(*tmp, n.kids)
- n.kids.setParent(nil)
- }
- sort.Sort(http2sortPriorityNodeSiblings(*tmp))
- for i := len(*tmp) - 1; i >= 0; i-- {
- (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
- }
- for k := n.kids; k != nil; k = k.next {
- if k.walkReadyInOrder(openParent, tmp, f) {
- return true
- }
- }
- return false
-}
-
-type http2sortPriorityNodeSiblings []*http2priorityNode
-
-func (z http2sortPriorityNodeSiblings) Len() int { return len(z) }
-
-func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-
-func (z http2sortPriorityNodeSiblings) Less(i, k int) bool {
- // Prefer the subtree that has sent fewer bytes relative to its weight.
- // See sections 5.3.2 and 5.3.4.
- wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
- wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
- if bi == 0 && bk == 0 {
- return wi >= wk
- }
- if bk == 0 {
- return false
- }
- return bi/bk <= wi/wk
-}
-
-type http2priorityWriteScheduler struct {
- // root is the root of the priority tree, where root.id = 0.
- // The root queues control frames that are not associated with any stream.
- root http2priorityNode
-
- // nodes maps stream ids to priority tree nodes.
- nodes map[uint32]*http2priorityNode
-
- // maxID is the maximum stream id in nodes.
- maxID uint32
-
- // lists of nodes that have been closed or are idle, but are kept in
- // the tree for improved prioritization. When the lengths exceed either
- // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
- closedNodes, idleNodes []*http2priorityNode
-
- // From the config.
- maxClosedNodesInTree int
- maxIdleNodesInTree int
- writeThrottleLimit int32
- enableWriteThrottle bool
-
- // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
- tmp []*http2priorityNode
-
- // pool of empty queues for reuse.
- queuePool http2writeQueuePool
-}
-
-func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
- // The stream may be currently idle but cannot be opened or closed.
- if curr := ws.nodes[streamID]; curr != nil {
- if curr.state != http2priorityNodeIdle {
- panic(fmt.Sprintf("stream %d already opened", streamID))
- }
- curr.state = http2priorityNodeOpen
- return
- }
-
- // RFC 7540, Section 5.3.5:
- // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
- // Pushed streams initially depend on their associated stream. In both cases,
- // streams are assigned a default weight of 16."
- parent := ws.nodes[options.PusherID]
- if parent == nil {
- parent = &ws.root
- }
- n := &http2priorityNode{
- q: *ws.queuePool.get(),
- id: streamID,
- weight: http2priorityDefaultWeight,
- state: http2priorityNodeOpen,
- }
- n.setParent(parent)
- ws.nodes[streamID] = n
- if streamID > ws.maxID {
- ws.maxID = streamID
- }
-}
-
-func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) {
- if streamID == 0 {
- panic("violation of WriteScheduler interface: cannot close stream 0")
- }
- if ws.nodes[streamID] == nil {
- panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
- }
- if ws.nodes[streamID].state != http2priorityNodeOpen {
- panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
- }
-
- n := ws.nodes[streamID]
- n.state = http2priorityNodeClosed
- n.addBytes(-n.bytes)
-
- q := n.q
- ws.queuePool.put(&q)
- n.q.s = nil
- if ws.maxClosedNodesInTree > 0 {
- ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
- } else {
- ws.removeNode(n)
- }
-}
-
-func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
- if streamID == 0 {
- panic("adjustPriority on root")
- }
-
- // If streamID does not exist, there are two cases:
- // - A closed stream that has been removed (this will have ID <= maxID)
- // - An idle stream that is being used for "grouping" (this will have ID > maxID)
- n := ws.nodes[streamID]
- if n == nil {
- if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
- return
- }
- ws.maxID = streamID
- n = &http2priorityNode{
- q: *ws.queuePool.get(),
- id: streamID,
- weight: http2priorityDefaultWeight,
- state: http2priorityNodeIdle,
- }
- n.setParent(&ws.root)
- ws.nodes[streamID] = n
- ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
- }
-
- // Section 5.3.1: A dependency on a stream that is not currently in the tree
- // results in that stream being given a default priority (Section 5.3.5).
- parent := ws.nodes[priority.StreamDep]
- if parent == nil {
- n.setParent(&ws.root)
- n.weight = http2priorityDefaultWeight
- return
- }
-
- // Ignore if the client tries to make a node its own parent.
- if n == parent {
- return
- }
-
- // Section 5.3.3:
- // "If a stream is made dependent on one of its own dependencies, the
- // formerly dependent stream is first moved to be dependent on the
- // reprioritized stream's previous parent. The moved dependency retains
- // its weight."
- //
- // That is: if parent depends on n, move parent to depend on n.parent.
- for x := parent.parent; x != nil; x = x.parent {
- if x == n {
- parent.setParent(n.parent)
- break
- }
- }
-
- // Section 5.3.3: The exclusive flag causes the stream to become the sole
- // dependency of its parent stream, causing other dependencies to become
- // dependent on the exclusive stream.
- if priority.Exclusive {
- k := parent.kids
- for k != nil {
- next := k.next
- if k != n {
- k.setParent(n)
- }
- k = next
- }
- }
-
- n.setParent(parent)
- n.weight = priority.Weight
-}
-
-func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) {
- var n *http2priorityNode
- if id := wr.StreamID(); id == 0 {
- n = &ws.root
- } else {
- n = ws.nodes[id]
- if n == nil {
- // id is an idle or closed stream. wr should not be a HEADERS or
- // DATA frame. However, wr can be a RST_STREAM. In this case, we
- // push wr onto the root, rather than creating a new priorityNode,
- // since RST_STREAM is tiny and the stream's priority is unknown
- // anyway. See issue #17919.
- if wr.DataSize() > 0 {
- panic("add DATA on non-open stream")
- }
- n = &ws.root
- }
- }
- n.q.push(wr)
-}
-
-func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool) {
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNode, openParent bool) bool {
- limit := int32(math.MaxInt32)
- if openParent {
- limit = ws.writeThrottleLimit
- }
- wr, ok = n.q.consume(limit)
- if !ok {
- return false
- }
- n.addBytes(int64(wr.DataSize()))
- // If B depends on A and B continuously has data available but A
- // does not, gradually increase the throttling limit to allow B to
- // steal more and more bandwidth from A.
- if openParent {
- ws.writeThrottleLimit += 1024
- if ws.writeThrottleLimit < 0 {
- ws.writeThrottleLimit = math.MaxInt32
- }
- } else if ws.enableWriteThrottle {
- ws.writeThrottleLimit = 1024
- }
- return true
- })
- return wr, ok
-}
-
-func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorityNode, maxSize int, n *http2priorityNode) {
- if maxSize == 0 {
- return
- }
- if len(*list) == maxSize {
- // Remove the oldest node, then shift left.
- ws.removeNode((*list)[0])
- x := (*list)[1:]
- copy(*list, x)
- *list = (*list)[:len(x)]
- }
- *list = append(*list, n)
-}
-
-func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) {
- for k := n.kids; k != nil; k = k.next {
- k.setParent(n.parent)
- }
- n.setParent(nil)
- delete(ws.nodes, n.id)
-}
-
-// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
-// priorities. Control frames like SETTINGS and PING are written before DATA
-// frames, but if no control frames are queued and multiple streams have queued
-// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
-func http2NewRandomWriteScheduler() http2WriteScheduler {
- return &http2randomWriteScheduler{sq: make(map[uint32]*http2writeQueue)}
-}
-
-type http2randomWriteScheduler struct {
- // zero are frames not associated with a specific stream.
- zero http2writeQueue
-
- // sq contains the stream-specific queues, keyed by stream ID.
- // When a stream is idle, closed, or emptied, it's deleted
- // from the map.
- sq map[uint32]*http2writeQueue
-
- // pool of empty queues for reuse.
- queuePool http2writeQueuePool
-}
-
-func (ws *http2randomWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
- // no-op: idle streams are not tracked
-}
-
-func (ws *http2randomWriteScheduler) CloseStream(streamID uint32) {
- q, ok := ws.sq[streamID]
- if !ok {
- return
- }
- delete(ws.sq, streamID)
- ws.queuePool.put(q)
-}
-
-func (ws *http2randomWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
- // no-op: priorities are ignored
-}
-
-func (ws *http2randomWriteScheduler) Push(wr http2FrameWriteRequest) {
- if wr.isControl() {
- ws.zero.push(wr)
- return
- }
- id := wr.StreamID()
- q, ok := ws.sq[id]
- if !ok {
- q = ws.queuePool.get()
- ws.sq[id] = q
- }
- q.push(wr)
-}
-
-func (ws *http2randomWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
- // Control and RST_STREAM frames first.
- if !ws.zero.empty() {
- return ws.zero.shift(), true
- }
- // Iterate over all non-idle streams until finding one that can be consumed.
- for streamID, q := range ws.sq {
- if wr, ok := q.consume(math.MaxInt32); ok {
- if q.empty() {
- delete(ws.sq, streamID)
- ws.queuePool.put(q)
- }
- return wr, true
- }
- }
- return http2FrameWriteRequest{}, false
-}
diff --git a/contrib/go/_std_1.18/src/net/http/header.go b/contrib/go/_std_1.18/src/net/http/header.go
deleted file mode 100644
index 6437f2d2c0..0000000000
--- a/contrib/go/_std_1.18/src/net/http/header.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "io"
- "net/http/httptrace"
- "net/http/internal/ascii"
- "net/textproto"
- "sort"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http/httpguts"
-)
-
-// A Header represents the key-value pairs in an HTTP header.
-//
-// The keys should be in canonical form, as returned by
-// CanonicalHeaderKey.
-type Header map[string][]string
-
-// Add adds the key, value pair to the header.
-// It appends to any existing values associated with key.
-// The key is case insensitive; it is canonicalized by
-// CanonicalHeaderKey.
-func (h Header) Add(key, value string) {
- textproto.MIMEHeader(h).Add(key, value)
-}
-
-// Set sets the header entries associated with key to the
-// single element value. It replaces any existing values
-// associated with key. The key is case insensitive; it is
-// canonicalized by textproto.CanonicalMIMEHeaderKey.
-// To use non-canonical keys, assign to the map directly.
-func (h Header) Set(key, value string) {
- textproto.MIMEHeader(h).Set(key, value)
-}
-
-// Get gets the first value associated with the given key. If
-// there are no values associated with the key, Get returns "".
-// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
-// used to canonicalize the provided key. To use non-canonical keys,
-// access the map directly.
-func (h Header) Get(key string) string {
- return textproto.MIMEHeader(h).Get(key)
-}
-
-// Values returns all values associated with the given key.
-// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
-// used to canonicalize the provided key. To use non-canonical
-// keys, access the map directly.
-// The returned slice is not a copy.
-func (h Header) Values(key string) []string {
- return textproto.MIMEHeader(h).Values(key)
-}
-
-// get is like Get, but key must already be in CanonicalHeaderKey form.
-func (h Header) get(key string) string {
- if v := h[key]; len(v) > 0 {
- return v[0]
- }
- return ""
-}
-
-// has reports whether h has the provided key defined, even if it's
-// set to 0-length slice.
-func (h Header) has(key string) bool {
- _, ok := h[key]
- return ok
-}
-
-// Del deletes the values associated with key.
-// The key is case insensitive; it is canonicalized by
-// CanonicalHeaderKey.
-func (h Header) Del(key string) {
- textproto.MIMEHeader(h).Del(key)
-}
-
-// Write writes a header in wire format.
-func (h Header) Write(w io.Writer) error {
- return h.write(w, nil)
-}
-
-func (h Header) write(w io.Writer, trace *httptrace.ClientTrace) error {
- return h.writeSubset(w, nil, trace)
-}
-
-// Clone returns a copy of h or nil if h is nil.
-func (h Header) Clone() Header {
- if h == nil {
- return nil
- }
-
- // Find total number of values.
- nv := 0
- for _, vv := range h {
- nv += len(vv)
- }
- sv := make([]string, nv) // shared backing array for headers' values
- h2 := make(Header, len(h))
- for k, vv := range h {
- if vv == nil {
- // Preserve nil values. ReverseProxy distinguishes
- // between nil and zero-length header values.
- h2[k] = nil
- continue
- }
- n := copy(sv, vv)
- h2[k] = sv[:n:n]
- sv = sv[n:]
- }
- return h2
-}
-
-var timeFormats = []string{
- TimeFormat,
- time.RFC850,
- time.ANSIC,
-}
-
-// ParseTime parses a time header (such as the Date: header),
-// trying each of the three formats allowed by HTTP/1.1:
-// TimeFormat, time.RFC850, and time.ANSIC.
-func ParseTime(text string) (t time.Time, err error) {
- for _, layout := range timeFormats {
- t, err = time.Parse(layout, text)
- if err == nil {
- return
- }
- }
- return
-}
-
-var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
-
-// stringWriter implements WriteString on a Writer.
-type stringWriter struct {
- w io.Writer
-}
-
-func (w stringWriter) WriteString(s string) (n int, err error) {
- return w.w.Write([]byte(s))
-}
-
-type keyValues struct {
- key string
- values []string
-}
-
-// A headerSorter implements sort.Interface by sorting a []keyValues
-// by key. It's used as a pointer, so it can fit in a sort.Interface
-// interface value without allocation.
-type headerSorter struct {
- kvs []keyValues
-}
-
-func (s *headerSorter) Len() int { return len(s.kvs) }
-func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] }
-func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key }
-
-var headerSorterPool = sync.Pool{
- New: func() any { return new(headerSorter) },
-}
-
-// sortedKeyValues returns h's keys sorted in the returned kvs
-// slice. The headerSorter used to sort is also returned, for possible
-// return to headerSorterCache.
-func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) {
- hs = headerSorterPool.Get().(*headerSorter)
- if cap(hs.kvs) < len(h) {
- hs.kvs = make([]keyValues, 0, len(h))
- }
- kvs = hs.kvs[:0]
- for k, vv := range h {
- if !exclude[k] {
- kvs = append(kvs, keyValues{k, vv})
- }
- }
- hs.kvs = kvs
- sort.Sort(hs)
- return kvs, hs
-}
-
-// WriteSubset writes a header in wire format.
-// If exclude is not nil, keys where exclude[key] == true are not written.
-// Keys are not canonicalized before checking the exclude map.
-func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
- return h.writeSubset(w, exclude, nil)
-}
-
-func (h Header) writeSubset(w io.Writer, exclude map[string]bool, trace *httptrace.ClientTrace) error {
- ws, ok := w.(io.StringWriter)
- if !ok {
- ws = stringWriter{w}
- }
- kvs, sorter := h.sortedKeyValues(exclude)
- var formattedVals []string
- for _, kv := range kvs {
- if !httpguts.ValidHeaderFieldName(kv.key) {
- // This could be an error. In the common case of
- // writing response headers, however, we have no good
- // way to provide the error back to the server
- // handler, so just drop invalid headers instead.
- continue
- }
- for _, v := range kv.values {
- v = headerNewlineToSpace.Replace(v)
- v = textproto.TrimString(v)
- for _, s := range []string{kv.key, ": ", v, "\r\n"} {
- if _, err := ws.WriteString(s); err != nil {
- headerSorterPool.Put(sorter)
- return err
- }
- }
- if trace != nil && trace.WroteHeaderField != nil {
- formattedVals = append(formattedVals, v)
- }
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField(kv.key, formattedVals)
- formattedVals = nil
- }
- }
- headerSorterPool.Put(sorter)
- return nil
-}
-
-// CanonicalHeaderKey returns the canonical format of the
-// header key s. The canonicalization converts the first
-// letter and any letter following a hyphen to upper case;
-// the rest are converted to lowercase. For example, the
-// canonical key for "accept-encoding" is "Accept-Encoding".
-// If s contains a space or invalid header field bytes, it is
-// returned without modifications.
-func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
-
-// hasToken reports whether token appears with v, ASCII
-// case-insensitive, with space or comma boundaries.
-// token must be all lowercase.
-// v may contain mixed cased.
-func hasToken(v, token string) bool {
- if len(token) > len(v) || token == "" {
- return false
- }
- if v == token {
- return true
- }
- for sp := 0; sp <= len(v)-len(token); sp++ {
- // Check that first character is good.
- // The token is ASCII, so checking only a single byte
- // is sufficient. We skip this potential starting
- // position if both the first byte and its potential
- // ASCII uppercase equivalent (b|0x20) don't match.
- // False positives ('^' => '~') are caught by EqualFold.
- if b := v[sp]; b != token[0] && b|0x20 != token[0] {
- continue
- }
- // Check that start pos is on a valid token boundary.
- if sp > 0 && !isTokenBoundary(v[sp-1]) {
- continue
- }
- // Check that end pos is on a valid token boundary.
- if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) {
- continue
- }
- if ascii.EqualFold(v[sp:sp+len(token)], token) {
- return true
- }
- }
- return false
-}
-
-func isTokenBoundary(b byte) bool {
- return b == ' ' || b == ',' || b == '\t'
-}
diff --git a/contrib/go/_std_1.18/src/net/http/internal/chunked.go b/contrib/go/_std_1.18/src/net/http/internal/chunked.go
deleted file mode 100644
index 37a72e9031..0000000000
--- a/contrib/go/_std_1.18/src/net/http/internal/chunked.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The wire protocol for HTTP's "chunked" Transfer-Encoding.
-
-// Package internal contains HTTP internals shared by net/http and
-// net/http/httputil.
-package internal
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
-
-var ErrLineTooLong = errors.New("header line too long")
-
-// NewChunkedReader returns a new chunkedReader that translates the data read from r
-// out of HTTP "chunked" format before returning it.
-// The chunkedReader returns io.EOF when the final 0-length chunk is read.
-//
-// NewChunkedReader is not needed by normal applications. The http package
-// automatically decodes chunking when reading response bodies.
-func NewChunkedReader(r io.Reader) io.Reader {
- br, ok := r.(*bufio.Reader)
- if !ok {
- br = bufio.NewReader(r)
- }
- return &chunkedReader{r: br}
-}
-
-type chunkedReader struct {
- r *bufio.Reader
- n uint64 // unread bytes in chunk
- err error
- buf [2]byte
- checkEnd bool // whether need to check for \r\n chunk footer
-}
-
-func (cr *chunkedReader) beginChunk() {
- // chunk-size CRLF
- var line []byte
- line, cr.err = readChunkLine(cr.r)
- if cr.err != nil {
- return
- }
- cr.n, cr.err = parseHexUint(line)
- if cr.err != nil {
- return
- }
- if cr.n == 0 {
- cr.err = io.EOF
- }
-}
-
-func (cr *chunkedReader) chunkHeaderAvailable() bool {
- n := cr.r.Buffered()
- if n > 0 {
- peek, _ := cr.r.Peek(n)
- return bytes.IndexByte(peek, '\n') >= 0
- }
- return false
-}
-
-func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
- for cr.err == nil {
- if cr.checkEnd {
- if n > 0 && cr.r.Buffered() < 2 {
- // We have some data. Return early (per the io.Reader
- // contract) instead of potentially blocking while
- // reading more.
- break
- }
- if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
- if string(cr.buf[:]) != "\r\n" {
- cr.err = errors.New("malformed chunked encoding")
- break
- }
- } else {
- if cr.err == io.EOF {
- cr.err = io.ErrUnexpectedEOF
- }
- break
- }
- cr.checkEnd = false
- }
- if cr.n == 0 {
- if n > 0 && !cr.chunkHeaderAvailable() {
- // We've read enough. Don't potentially block
- // reading a new chunk header.
- break
- }
- cr.beginChunk()
- continue
- }
- if len(b) == 0 {
- break
- }
- rbuf := b
- if uint64(len(rbuf)) > cr.n {
- rbuf = rbuf[:cr.n]
- }
- var n0 int
- n0, cr.err = cr.r.Read(rbuf)
- n += n0
- b = b[n0:]
- cr.n -= uint64(n0)
- // If we're at the end of a chunk, read the next two
- // bytes to verify they are "\r\n".
- if cr.n == 0 && cr.err == nil {
- cr.checkEnd = true
- } else if cr.err == io.EOF {
- cr.err = io.ErrUnexpectedEOF
- }
- }
- return n, cr.err
-}
-
-// Read a line of bytes (up to \n) from b.
-// Give up if the line exceeds maxLineLength.
-// The returned bytes are owned by the bufio.Reader
-// so they are only valid until the next bufio read.
-func readChunkLine(b *bufio.Reader) ([]byte, error) {
- p, err := b.ReadSlice('\n')
- if err != nil {
- // We always know when EOF is coming.
- // If the caller asked for a line, there should be a line.
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- } else if err == bufio.ErrBufferFull {
- err = ErrLineTooLong
- }
- return nil, err
- }
- if len(p) >= maxLineLength {
- return nil, ErrLineTooLong
- }
- p = trimTrailingWhitespace(p)
- p, err = removeChunkExtension(p)
- if err != nil {
- return nil, err
- }
- return p, nil
-}
-
-func trimTrailingWhitespace(b []byte) []byte {
- for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
- b = b[:len(b)-1]
- }
- return b
-}
-
-func isASCIISpace(b byte) bool {
- return b == ' ' || b == '\t' || b == '\n' || b == '\r'
-}
-
-var semi = []byte(";")
-
-// removeChunkExtension removes any chunk-extension from p.
-// For example,
-// "0" => "0"
-// "0;token" => "0"
-// "0;token=val" => "0"
-// `0;token="quoted string"` => "0"
-func removeChunkExtension(p []byte) ([]byte, error) {
- p, _, _ = bytes.Cut(p, semi)
- // TODO: care about exact syntax of chunk extensions? We're
- // ignoring and stripping them anyway. For now just never
- // return an error.
- return p, nil
-}
-
-// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
-// "chunked" format before writing them to w. Closing the returned chunkedWriter
-// sends the final 0-length chunk that marks the end of the stream but does
-// not send the final CRLF that appears after trailers; trailers and the last
-// CRLF must be written separately.
-//
-// NewChunkedWriter is not needed by normal applications. The http
-// package adds chunking automatically if handlers don't set a
-// Content-Length header. Using newChunkedWriter inside a handler
-// would result in double chunking or chunking with a Content-Length
-// length, both of which are wrong.
-func NewChunkedWriter(w io.Writer) io.WriteCloser {
- return &chunkedWriter{w}
-}
-
-// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
-// Encoding wire format to the underlying Wire chunkedWriter.
-type chunkedWriter struct {
- Wire io.Writer
-}
-
-// Write the contents of data as one chunk to Wire.
-// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
-// a bug since it does not check for success of io.WriteString
-func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
-
- // Don't send 0-length data. It looks like EOF for chunked encoding.
- if len(data) == 0 {
- return 0, nil
- }
-
- if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
- return 0, err
- }
- if n, err = cw.Wire.Write(data); err != nil {
- return
- }
- if n != len(data) {
- err = io.ErrShortWrite
- return
- }
- if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil {
- return
- }
- if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok {
- err = bw.Flush()
- }
- return
-}
-
-func (cw *chunkedWriter) Close() error {
- _, err := io.WriteString(cw.Wire, "0\r\n")
- return err
-}
-
-// FlushAfterChunkWriter signals from the caller of NewChunkedWriter
-// that each chunk should be followed by a flush. It is used by the
-// http.Transport code to keep the buffering behavior for headers and
-// trailers, but flush out chunks aggressively in the middle for
-// request bodies which may be generated slowly. See Issue 6574.
-type FlushAfterChunkWriter struct {
- *bufio.Writer
-}
-
-func parseHexUint(v []byte) (n uint64, err error) {
- for i, b := range v {
- switch {
- case '0' <= b && b <= '9':
- b = b - '0'
- case 'a' <= b && b <= 'f':
- b = b - 'a' + 10
- case 'A' <= b && b <= 'F':
- b = b - 'A' + 10
- default:
- return 0, errors.New("invalid byte in chunk length")
- }
- if i == 16 {
- return 0, errors.New("http chunk length too large")
- }
- n <<= 4
- n |= uint64(b)
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/net/http/request.go b/contrib/go/_std_1.18/src/net/http/request.go
deleted file mode 100644
index 76c2317d28..0000000000
--- a/contrib/go/_std_1.18/src/net/http/request.go
+++ /dev/null
@@ -1,1463 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP Request reading and parsing.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "mime"
- "mime/multipart"
- "net"
- "net/http/httptrace"
- "net/http/internal/ascii"
- "net/textproto"
- "net/url"
- urlpkg "net/url"
- "strconv"
- "strings"
- "sync"
-
- "golang.org/x/net/idna"
-)
-
-const (
- defaultMaxMemory = 32 << 20 // 32 MB
-)
-
-// ErrMissingFile is returned by FormFile when the provided file field name
-// is either not present in the request or not a file field.
-var ErrMissingFile = errors.New("http: no such file")
-
-// ProtocolError represents an HTTP protocol error.
-//
-// Deprecated: Not all errors in the http package related to protocol errors
-// are of type ProtocolError.
-type ProtocolError struct {
- ErrorString string
-}
-
-func (pe *ProtocolError) Error() string { return pe.ErrorString }
-
-var (
- // ErrNotSupported is returned by the Push method of Pusher
- // implementations to indicate that HTTP/2 Push support is not
- // available.
- ErrNotSupported = &ProtocolError{"feature not supported"}
-
- // Deprecated: ErrUnexpectedTrailer is no longer returned by
- // anything in the net/http package. Callers should not
- // compare errors against this variable.
- ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"}
-
- // ErrMissingBoundary is returned by Request.MultipartReader when the
- // request's Content-Type does not include a "boundary" parameter.
- ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"}
-
- // ErrNotMultipart is returned by Request.MultipartReader when the
- // request's Content-Type is not multipart/form-data.
- ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"}
-
- // Deprecated: ErrHeaderTooLong is no longer returned by
- // anything in the net/http package. Callers should not
- // compare errors against this variable.
- ErrHeaderTooLong = &ProtocolError{"header too long"}
-
- // Deprecated: ErrShortBody is no longer returned by
- // anything in the net/http package. Callers should not
- // compare errors against this variable.
- ErrShortBody = &ProtocolError{"entity body too short"}
-
- // Deprecated: ErrMissingContentLength is no longer returned by
- // anything in the net/http package. Callers should not
- // compare errors against this variable.
- ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"}
-)
-
-func badStringError(what, val string) error { return fmt.Errorf("%s %q", what, val) }
-
-// Headers that Request.Write handles itself and should be skipped.
-var reqWriteExcludeHeader = map[string]bool{
- "Host": true, // not in Header map anyway
- "User-Agent": true,
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// A Request represents an HTTP request received by a server
-// or to be sent by a client.
-//
-// The field semantics differ slightly between client and server
-// usage. In addition to the notes on the fields below, see the
-// documentation for Request.Write and RoundTripper.
-type Request struct {
- // Method specifies the HTTP method (GET, POST, PUT, etc.).
- // For client requests, an empty string means GET.
- //
- // Go's HTTP client does not support sending a request with
- // the CONNECT method. See the documentation on Transport for
- // details.
- Method string
-
- // URL specifies either the URI being requested (for server
- // requests) or the URL to access (for client requests).
- //
- // For server requests, the URL is parsed from the URI
- // supplied on the Request-Line as stored in RequestURI. For
- // most requests, fields other than Path and RawQuery will be
- // empty. (See RFC 7230, Section 5.3)
- //
- // For client requests, the URL's Host specifies the server to
- // connect to, while the Request's Host field optionally
- // specifies the Host header value to send in the HTTP
- // request.
- URL *url.URL
-
- // The protocol version for incoming server requests.
- //
- // For client requests, these fields are ignored. The HTTP
- // client code always uses either HTTP/1.1 or HTTP/2.
- // See the docs on Transport for details.
- Proto string // "HTTP/1.0"
- ProtoMajor int // 1
- ProtoMinor int // 0
-
- // Header contains the request header fields either received
- // by the server or to be sent by the client.
- //
- // If a server received a request with header lines,
- //
- // Host: example.com
- // accept-encoding: gzip, deflate
- // Accept-Language: en-us
- // fOO: Bar
- // foo: two
- //
- // then
- //
- // Header = map[string][]string{
- // "Accept-Encoding": {"gzip, deflate"},
- // "Accept-Language": {"en-us"},
- // "Foo": {"Bar", "two"},
- // }
- //
- // For incoming requests, the Host header is promoted to the
- // Request.Host field and removed from the Header map.
- //
- // HTTP defines that header names are case-insensitive. The
- // request parser implements this by using CanonicalHeaderKey,
- // making the first character and any characters following a
- // hyphen uppercase and the rest lowercase.
- //
- // For client requests, certain headers such as Content-Length
- // and Connection are automatically written when needed and
- // values in Header may be ignored. See the documentation
- // for the Request.Write method.
- Header Header
-
- // Body is the request's body.
- //
- // For client requests, a nil body means the request has no
- // body, such as a GET request. The HTTP Client's Transport
- // is responsible for calling the Close method.
- //
- // For server requests, the Request Body is always non-nil
- // but will return EOF immediately when no body is present.
- // The Server will close the request body. The ServeHTTP
- // Handler does not need to.
- //
- // Body must allow Read to be called concurrently with Close.
- // In particular, calling Close should unblock a Read waiting
- // for input.
- Body io.ReadCloser
-
- // GetBody defines an optional func to return a new copy of
- // Body. It is used for client requests when a redirect requires
- // reading the body more than once. Use of GetBody still
- // requires setting Body.
- //
- // For server requests, it is unused.
- GetBody func() (io.ReadCloser, error)
-
- // ContentLength records the length of the associated content.
- // The value -1 indicates that the length is unknown.
- // Values >= 0 indicate that the given number of bytes may
- // be read from Body.
- //
- // For client requests, a value of 0 with a non-nil Body is
- // also treated as unknown.
- ContentLength int64
-
- // TransferEncoding lists the transfer encodings from outermost to
- // innermost. An empty list denotes the "identity" encoding.
- // TransferEncoding can usually be ignored; chunked encoding is
- // automatically added and removed as necessary when sending and
- // receiving requests.
- TransferEncoding []string
-
- // Close indicates whether to close the connection after
- // replying to this request (for servers) or after sending this
- // request and reading its response (for clients).
- //
- // For server requests, the HTTP server handles this automatically
- // and this field is not needed by Handlers.
- //
- // For client requests, setting this field prevents re-use of
- // TCP connections between requests to the same hosts, as if
- // Transport.DisableKeepAlives were set.
- Close bool
-
- // For server requests, Host specifies the host on which the
- // URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
- // is either the value of the "Host" header or the host name
- // given in the URL itself. For HTTP/2, it is the value of the
- // ":authority" pseudo-header field.
- // It may be of the form "host:port". For international domain
- // names, Host may be in Punycode or Unicode form. Use
- // golang.org/x/net/idna to convert it to either format if
- // needed.
- // To prevent DNS rebinding attacks, server Handlers should
- // validate that the Host header has a value for which the
- // Handler considers itself authoritative. The included
- // ServeMux supports patterns registered to particular host
- // names and thus protects its registered Handlers.
- //
- // For client requests, Host optionally overrides the Host
- // header to send. If empty, the Request.Write method uses
- // the value of URL.Host. Host may contain an international
- // domain name.
- Host string
-
- // Form contains the parsed form data, including both the URL
- // field's query parameters and the PATCH, POST, or PUT form data.
- // This field is only available after ParseForm is called.
- // The HTTP client ignores Form and uses Body instead.
- Form url.Values
-
- // PostForm contains the parsed form data from PATCH, POST
- // or PUT body parameters.
- //
- // This field is only available after ParseForm is called.
- // The HTTP client ignores PostForm and uses Body instead.
- PostForm url.Values
-
- // MultipartForm is the parsed multipart form, including file uploads.
- // This field is only available after ParseMultipartForm is called.
- // The HTTP client ignores MultipartForm and uses Body instead.
- MultipartForm *multipart.Form
-
- // Trailer specifies additional headers that are sent after the request
- // body.
- //
- // For server requests, the Trailer map initially contains only the
- // trailer keys, with nil values. (The client declares which trailers it
- // will later send.) While the handler is reading from Body, it must
- // not reference Trailer. After reading from Body returns EOF, Trailer
- // can be read again and will contain non-nil values, if they were sent
- // by the client.
- //
- // For client requests, Trailer must be initialized to a map containing
- // the trailer keys to later send. The values may be nil or their final
- // values. The ContentLength must be 0 or -1, to send a chunked request.
- // After the HTTP request is sent the map values can be updated while
- // the request body is read. Once the body returns EOF, the caller must
- // not mutate Trailer.
- //
- // Few HTTP clients, servers, or proxies support HTTP trailers.
- Trailer Header
-
- // RemoteAddr allows HTTP servers and other software to record
- // the network address that sent the request, usually for
- // logging. This field is not filled in by ReadRequest and
- // has no defined format. The HTTP server in this package
- // sets RemoteAddr to an "IP:port" address before invoking a
- // handler.
- // This field is ignored by the HTTP client.
- RemoteAddr string
-
- // RequestURI is the unmodified request-target of the
- // Request-Line (RFC 7230, Section 3.1.1) as sent by the client
- // to a server. Usually the URL field should be used instead.
- // It is an error to set this field in an HTTP client request.
- RequestURI string
-
- // TLS allows HTTP servers and other software to record
- // information about the TLS connection on which the request
- // was received. This field is not filled in by ReadRequest.
- // The HTTP server in this package sets the field for
- // TLS-enabled connections before invoking a handler;
- // otherwise it leaves the field nil.
- // This field is ignored by the HTTP client.
- TLS *tls.ConnectionState
-
- // Cancel is an optional channel whose closure indicates that the client
- // request should be regarded as canceled. Not all implementations of
- // RoundTripper may support Cancel.
- //
- // For server requests, this field is not applicable.
- //
- // Deprecated: Set the Request's context with NewRequestWithContext
- // instead. If a Request's Cancel field and context are both
- // set, it is undefined whether Cancel is respected.
- Cancel <-chan struct{}
-
- // Response is the redirect response which caused this request
- // to be created. This field is only populated during client
- // redirects.
- Response *Response
-
- // ctx is either the client or server context. It should only
- // be modified via copying the whole Request using WithContext.
- // It is unexported to prevent people from using Context wrong
- // and mutating the contexts held by callers of the same request.
- ctx context.Context
-}
-
-// Context returns the request's context. To change the context, use
-// WithContext.
-//
-// The returned context is always non-nil; it defaults to the
-// background context.
-//
-// For outgoing client requests, the context controls cancellation.
-//
-// For incoming server requests, the context is canceled when the
-// client's connection closes, the request is canceled (with HTTP/2),
-// or when the ServeHTTP method returns.
-func (r *Request) Context() context.Context {
- if r.ctx != nil {
- return r.ctx
- }
- return context.Background()
-}
-
-// WithContext returns a shallow copy of r with its context changed
-// to ctx. The provided ctx must be non-nil.
-//
-// For outgoing client request, the context controls the entire
-// lifetime of a request and its response: obtaining a connection,
-// sending the request, and reading the response headers and body.
-//
-// To create a new request with a context, use NewRequestWithContext.
-// To change the context of a request, such as an incoming request you
-// want to modify before sending back out, use Request.Clone. Between
-// those two uses, it's rare to need WithContext.
-func (r *Request) WithContext(ctx context.Context) *Request {
- if ctx == nil {
- panic("nil context")
- }
- r2 := new(Request)
- *r2 = *r
- r2.ctx = ctx
- r2.URL = cloneURL(r.URL) // legacy behavior; TODO: try to remove. Issue 23544
- return r2
-}
-
-// Clone returns a deep copy of r with its context changed to ctx.
-// The provided ctx must be non-nil.
-//
-// For an outgoing client request, the context controls the entire
-// lifetime of a request and its response: obtaining a connection,
-// sending the request, and reading the response headers and body.
-func (r *Request) Clone(ctx context.Context) *Request {
- if ctx == nil {
- panic("nil context")
- }
- r2 := new(Request)
- *r2 = *r
- r2.ctx = ctx
- r2.URL = cloneURL(r.URL)
- if r.Header != nil {
- r2.Header = r.Header.Clone()
- }
- if r.Trailer != nil {
- r2.Trailer = r.Trailer.Clone()
- }
- if s := r.TransferEncoding; s != nil {
- s2 := make([]string, len(s))
- copy(s2, s)
- r2.TransferEncoding = s2
- }
- r2.Form = cloneURLValues(r.Form)
- r2.PostForm = cloneURLValues(r.PostForm)
- r2.MultipartForm = cloneMultipartForm(r.MultipartForm)
- return r2
-}
-
-// ProtoAtLeast reports whether the HTTP protocol used
-// in the request is at least major.minor.
-func (r *Request) ProtoAtLeast(major, minor int) bool {
- return r.ProtoMajor > major ||
- r.ProtoMajor == major && r.ProtoMinor >= minor
-}
-
-// UserAgent returns the client's User-Agent, if sent in the request.
-func (r *Request) UserAgent() string {
- return r.Header.Get("User-Agent")
-}
-
-// Cookies parses and returns the HTTP cookies sent with the request.
-func (r *Request) Cookies() []*Cookie {
- return readCookies(r.Header, "")
-}
-
-// ErrNoCookie is returned by Request's Cookie method when a cookie is not found.
-var ErrNoCookie = errors.New("http: named cookie not present")
-
-// Cookie returns the named cookie provided in the request or
-// ErrNoCookie if not found.
-// If multiple cookies match the given name, only one cookie will
-// be returned.
-func (r *Request) Cookie(name string) (*Cookie, error) {
- for _, c := range readCookies(r.Header, name) {
- return c, nil
- }
- return nil, ErrNoCookie
-}
-
-// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
-// AddCookie does not attach more than one Cookie header field. That
-// means all cookies, if any, are written into the same line,
-// separated by semicolon.
-// AddCookie only sanitizes c's name and value, and does not sanitize
-// a Cookie header already present in the request.
-func (r *Request) AddCookie(c *Cookie) {
- s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value))
- if c := r.Header.Get("Cookie"); c != "" {
- r.Header.Set("Cookie", c+"; "+s)
- } else {
- r.Header.Set("Cookie", s)
- }
-}
-
-// Referer returns the referring URL, if sent in the request.
-//
-// Referer is misspelled as in the request itself, a mistake from the
-// earliest days of HTTP. This value can also be fetched from the
-// Header map as Header["Referer"]; the benefit of making it available
-// as a method is that the compiler can diagnose programs that use the
-// alternate (correct English) spelling req.Referrer() but cannot
-// diagnose programs that use Header["Referrer"].
-func (r *Request) Referer() string {
- return r.Header.Get("Referer")
-}
-
-// multipartByReader is a sentinel value.
-// Its presence in Request.MultipartForm indicates that parsing of the request
-// body has been handed off to a MultipartReader instead of ParseMultipartForm.
-var multipartByReader = &multipart.Form{
- Value: make(map[string][]string),
- File: make(map[string][]*multipart.FileHeader),
-}
-
-// MultipartReader returns a MIME multipart reader if this is a
-// multipart/form-data or a multipart/mixed POST request, else returns nil and an error.
-// Use this function instead of ParseMultipartForm to
-// process the request body as a stream.
-func (r *Request) MultipartReader() (*multipart.Reader, error) {
- if r.MultipartForm == multipartByReader {
- return nil, errors.New("http: MultipartReader called twice")
- }
- if r.MultipartForm != nil {
- return nil, errors.New("http: multipart handled by ParseMultipartForm")
- }
- r.MultipartForm = multipartByReader
- return r.multipartReader(true)
-}
-
-func (r *Request) multipartReader(allowMixed bool) (*multipart.Reader, error) {
- v := r.Header.Get("Content-Type")
- if v == "" {
- return nil, ErrNotMultipart
- }
- d, params, err := mime.ParseMediaType(v)
- if err != nil || !(d == "multipart/form-data" || allowMixed && d == "multipart/mixed") {
- return nil, ErrNotMultipart
- }
- boundary, ok := params["boundary"]
- if !ok {
- return nil, ErrMissingBoundary
- }
- return multipart.NewReader(r.Body, boundary), nil
-}
-
-// isH2Upgrade reports whether r represents the http2 "client preface"
-// magic string.
-func (r *Request) isH2Upgrade() bool {
- return r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0"
-}
-
-// Return value if nonempty, def otherwise.
-func valueOrDefault(value, def string) string {
- if value != "" {
- return value
- }
- return def
-}
-
-// NOTE: This is not intended to reflect the actual Go version being used.
-// It was changed at the time of Go 1.1 release because the former User-Agent
-// had ended up blocked by some intrusion detection systems.
-// See https://codereview.appspot.com/7532043.
-const defaultUserAgent = "Go-http-client/1.1"
-
-// Write writes an HTTP/1.1 request, which is the header and body, in wire format.
-// This method consults the following fields of the request:
-// Host
-// URL
-// Method (defaults to "GET")
-// Header
-// ContentLength
-// TransferEncoding
-// Body
-//
-// If Body is present, Content-Length is <= 0 and TransferEncoding
-// hasn't been set to "identity", Write adds "Transfer-Encoding:
-// chunked" to the header. Body is closed after it is sent.
-func (r *Request) Write(w io.Writer) error {
- return r.write(w, false, nil, nil)
-}
-
-// WriteProxy is like Write but writes the request in the form
-// expected by an HTTP proxy. In particular, WriteProxy writes the
-// initial Request-URI line of the request with an absolute URI, per
-// section 5.3 of RFC 7230, including the scheme and host.
-// In either case, WriteProxy also writes a Host header, using
-// either r.Host or r.URL.Host.
-func (r *Request) WriteProxy(w io.Writer) error {
- return r.write(w, true, nil, nil)
-}
-
-// errMissingHost is returned by Write when there is no Host or URL present in
-// the Request.
-var errMissingHost = errors.New("http: Request.Write on Request with no Host or URL set")
-
-// extraHeaders may be nil
-// waitForContinue may be nil
-// always closes body
-func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitForContinue func() bool) (err error) {
- trace := httptrace.ContextClientTrace(r.Context())
- if trace != nil && trace.WroteRequest != nil {
- defer func() {
- trace.WroteRequest(httptrace.WroteRequestInfo{
- Err: err,
- })
- }()
- }
- closed := false
- defer func() {
- if closed {
- return
- }
- if closeErr := r.closeBody(); closeErr != nil && err == nil {
- err = closeErr
- }
- }()
-
- // Find the target host. Prefer the Host: header, but if that
- // is not given, use the host from the request URL.
- //
- // Clean the host, in case it arrives with unexpected stuff in it.
- host := cleanHost(r.Host)
- if host == "" {
- if r.URL == nil {
- return errMissingHost
- }
- host = cleanHost(r.URL.Host)
- }
-
- // According to RFC 6874, an HTTP client, proxy, or other
- // intermediary must remove any IPv6 zone identifier attached
- // to an outgoing URI.
- host = removeZone(host)
-
- ruri := r.URL.RequestURI()
- if usingProxy && r.URL.Scheme != "" && r.URL.Opaque == "" {
- ruri = r.URL.Scheme + "://" + host + ruri
- } else if r.Method == "CONNECT" && r.URL.Path == "" {
- // CONNECT requests normally give just the host and port, not a full URL.
- ruri = host
- if r.URL.Opaque != "" {
- ruri = r.URL.Opaque
- }
- }
- if stringContainsCTLByte(ruri) {
- return errors.New("net/http: can't write control character in Request.URL")
- }
- // TODO: validate r.Method too? At least it's less likely to
- // come from an attacker (more likely to be a constant in
- // code).
-
- // Wrap the writer in a bufio Writer if it's not already buffered.
- // Don't always call NewWriter, as that forces a bytes.Buffer
- // and other small bufio Writers to have a minimum 4k buffer
- // size.
- var bw *bufio.Writer
- if _, ok := w.(io.ByteWriter); !ok {
- bw = bufio.NewWriter(w)
- w = bw
- }
-
- _, err = fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(r.Method, "GET"), ruri)
- if err != nil {
- return err
- }
-
- // Header lines
- _, err = fmt.Fprintf(w, "Host: %s\r\n", host)
- if err != nil {
- return err
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField("Host", []string{host})
- }
-
- // Use the defaultUserAgent unless the Header contains one, which
- // may be blank to not send the header.
- userAgent := defaultUserAgent
- if r.Header.has("User-Agent") {
- userAgent = r.Header.Get("User-Agent")
- }
- if userAgent != "" {
- _, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent)
- if err != nil {
- return err
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField("User-Agent", []string{userAgent})
- }
- }
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(r)
- if err != nil {
- return err
- }
- err = tw.writeHeader(w, trace)
- if err != nil {
- return err
- }
-
- err = r.Header.writeSubset(w, reqWriteExcludeHeader, trace)
- if err != nil {
- return err
- }
-
- if extraHeaders != nil {
- err = extraHeaders.write(w, trace)
- if err != nil {
- return err
- }
- }
-
- _, err = io.WriteString(w, "\r\n")
- if err != nil {
- return err
- }
-
- if trace != nil && trace.WroteHeaders != nil {
- trace.WroteHeaders()
- }
-
- // Flush and wait for 100-continue if expected.
- if waitForContinue != nil {
- if bw, ok := w.(*bufio.Writer); ok {
- err = bw.Flush()
- if err != nil {
- return err
- }
- }
- if trace != nil && trace.Wait100Continue != nil {
- trace.Wait100Continue()
- }
- if !waitForContinue() {
- closed = true
- r.closeBody()
- return nil
- }
- }
-
- if bw, ok := w.(*bufio.Writer); ok && tw.FlushHeaders {
- if err := bw.Flush(); err != nil {
- return err
- }
- }
-
- // Write body and trailer
- closed = true
- err = tw.writeBody(w)
- if err != nil {
- if tw.bodyReadError == err {
- err = requestBodyReadError{err}
- }
- return err
- }
-
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// requestBodyReadError wraps an error from (*Request).write to indicate
-// that the error came from a Read call on the Request.Body.
-// This error type should not escape the net/http package to users.
-type requestBodyReadError struct{ error }
-
-func idnaASCII(v string) (string, error) {
- // TODO: Consider removing this check after verifying performance is okay.
- // Right now punycode verification, length checks, context checks, and the
- // permissible character tests are all omitted. It also prevents the ToASCII
- // call from salvaging an invalid IDN, when possible. As a result it may be
- // possible to have two IDNs that appear identical to the user where the
- // ASCII-only version causes an error downstream whereas the non-ASCII
- // version does not.
- // Note that for correct ASCII IDNs ToASCII will only do considerably more
- // work, but it will not cause an allocation.
- if ascii.Is(v) {
- return v, nil
- }
- return idna.Lookup.ToASCII(v)
-}
-
-// cleanHost cleans up the host sent in request's Host header.
-//
-// It both strips anything after '/' or ' ', and puts the value
-// into Punycode form, if necessary.
-//
-// Ideally we'd clean the Host header according to the spec:
-// https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]")
-// https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host)
-// https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host)
-// But practically, what we are trying to avoid is the situation in
-// issue 11206, where a malformed Host header used in the proxy context
-// would create a bad request. So it is enough to just truncate at the
-// first offending character.
-func cleanHost(in string) string {
- if i := strings.IndexAny(in, " /"); i != -1 {
- in = in[:i]
- }
- host, port, err := net.SplitHostPort(in)
- if err != nil { // input was just a host
- a, err := idnaASCII(in)
- if err != nil {
- return in // garbage in, garbage out
- }
- return a
- }
- a, err := idnaASCII(host)
- if err != nil {
- return in // garbage in, garbage out
- }
- return net.JoinHostPort(a, port)
-}
-
-// removeZone removes IPv6 zone identifier from host.
-// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
-func removeZone(host string) string {
- if !strings.HasPrefix(host, "[") {
- return host
- }
- i := strings.LastIndex(host, "]")
- if i < 0 {
- return host
- }
- j := strings.LastIndex(host[:i], "%")
- if j < 0 {
- return host
- }
- return host[:j] + host[i:]
-}
-
-// ParseHTTPVersion parses an HTTP version string according to RFC 7230, section 2.6.
-// "HTTP/1.0" returns (1, 0, true). Note that strings without
-// a minor version, such as "HTTP/2", are not valid.
-func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
- switch vers {
- case "HTTP/1.1":
- return 1, 1, true
- case "HTTP/1.0":
- return 1, 0, true
- }
- if !strings.HasPrefix(vers, "HTTP/") {
- return 0, 0, false
- }
- if len(vers) != len("HTTP/X.Y") {
- return 0, 0, false
- }
- if vers[6] != '.' {
- return 0, 0, false
- }
- maj, err := strconv.ParseUint(vers[5:6], 10, 0)
- if err != nil {
- return 0, 0, false
- }
- min, err := strconv.ParseUint(vers[7:8], 10, 0)
- if err != nil {
- return 0, 0, false
- }
- return int(maj), int(min), true
-}
-
-func validMethod(method string) bool {
- /*
- Method = "OPTIONS" ; Section 9.2
- | "GET" ; Section 9.3
- | "HEAD" ; Section 9.4
- | "POST" ; Section 9.5
- | "PUT" ; Section 9.6
- | "DELETE" ; Section 9.7
- | "TRACE" ; Section 9.8
- | "CONNECT" ; Section 9.9
- | extension-method
- extension-method = token
- token = 1*<any CHAR except CTLs or separators>
- */
- return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1
-}
-
-// NewRequest wraps NewRequestWithContext using context.Background.
-func NewRequest(method, url string, body io.Reader) (*Request, error) {
- return NewRequestWithContext(context.Background(), method, url, body)
-}
-
-// NewRequestWithContext returns a new Request given a method, URL, and
-// optional body.
-//
-// If the provided body is also an io.Closer, the returned
-// Request.Body is set to body and will be closed by the Client
-// methods Do, Post, and PostForm, and Transport.RoundTrip.
-//
-// NewRequestWithContext returns a Request suitable for use with
-// Client.Do or Transport.RoundTrip. To create a request for use with
-// testing a Server Handler, either use the NewRequest function in the
-// net/http/httptest package, use ReadRequest, or manually update the
-// Request fields. For an outgoing client request, the context
-// controls the entire lifetime of a request and its response:
-// obtaining a connection, sending the request, and reading the
-// response headers and body. See the Request type's documentation for
-// the difference between inbound and outbound request fields.
-//
-// If body is of type *bytes.Buffer, *bytes.Reader, or
-// *strings.Reader, the returned request's ContentLength is set to its
-// exact value (instead of -1), GetBody is populated (so 307 and 308
-// redirects can replay the body), and Body is set to NoBody if the
-// ContentLength is 0.
-func NewRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*Request, error) {
- if method == "" {
- // We document that "" means "GET" for Request.Method, and people have
- // relied on that from NewRequest, so keep that working.
- // We still enforce validMethod for non-empty methods.
- method = "GET"
- }
- if !validMethod(method) {
- return nil, fmt.Errorf("net/http: invalid method %q", method)
- }
- if ctx == nil {
- return nil, errors.New("net/http: nil Context")
- }
- u, err := urlpkg.Parse(url)
- if err != nil {
- return nil, err
- }
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = io.NopCloser(body)
- }
- // The host's colon:port should be normalized. See Issue 14836.
- u.Host = removeEmptyPort(u.Host)
- req := &Request{
- ctx: ctx,
- Method: method,
- URL: u,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(Header),
- Body: rc,
- Host: u.Host,
- }
- if body != nil {
- switch v := body.(type) {
- case *bytes.Buffer:
- req.ContentLength = int64(v.Len())
- buf := v.Bytes()
- req.GetBody = func() (io.ReadCloser, error) {
- r := bytes.NewReader(buf)
- return io.NopCloser(r), nil
- }
- case *bytes.Reader:
- req.ContentLength = int64(v.Len())
- snapshot := *v
- req.GetBody = func() (io.ReadCloser, error) {
- r := snapshot
- return io.NopCloser(&r), nil
- }
- case *strings.Reader:
- req.ContentLength = int64(v.Len())
- snapshot := *v
- req.GetBody = func() (io.ReadCloser, error) {
- r := snapshot
- return io.NopCloser(&r), nil
- }
- default:
- // This is where we'd set it to -1 (at least
- // if body != NoBody) to mean unknown, but
- // that broke people during the Go 1.8 testing
- // period. People depend on it being 0 I
- // guess. Maybe retry later. See Issue 18117.
- }
- // For client requests, Request.ContentLength of 0
- // means either actually 0, or unknown. The only way
- // to explicitly say that the ContentLength is zero is
- // to set the Body to nil. But turns out too much code
- // depends on NewRequest returning a non-nil Body,
- // so we use a well-known ReadCloser variable instead
- // and have the http package also treat that sentinel
- // variable to mean explicitly zero.
- if req.GetBody != nil && req.ContentLength == 0 {
- req.Body = NoBody
- req.GetBody = func() (io.ReadCloser, error) { return NoBody, nil }
- }
- }
-
- return req, nil
-}
-
-// BasicAuth returns the username and password provided in the request's
-// Authorization header, if the request uses HTTP Basic Authentication.
-// See RFC 2617, Section 2.
-func (r *Request) BasicAuth() (username, password string, ok bool) {
- auth := r.Header.Get("Authorization")
- if auth == "" {
- return "", "", false
- }
- return parseBasicAuth(auth)
-}
-
-// parseBasicAuth parses an HTTP Basic Authentication string.
-// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
-func parseBasicAuth(auth string) (username, password string, ok bool) {
- const prefix = "Basic "
- // Case insensitive prefix match. See Issue 22736.
- if len(auth) < len(prefix) || !ascii.EqualFold(auth[:len(prefix)], prefix) {
- return "", "", false
- }
- c, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
- if err != nil {
- return "", "", false
- }
- cs := string(c)
- username, password, ok = strings.Cut(cs, ":")
- if !ok {
- return "", "", false
- }
- return username, password, true
-}
-
-// SetBasicAuth sets the request's Authorization header to use HTTP
-// Basic Authentication with the provided username and password.
-//
-// With HTTP Basic Authentication the provided username and password
-// are not encrypted.
-//
-// Some protocols may impose additional requirements on pre-escaping the
-// username and password. For instance, when used with OAuth2, both arguments
-// must be URL encoded first with url.QueryEscape.
-func (r *Request) SetBasicAuth(username, password string) {
- r.Header.Set("Authorization", "Basic "+basicAuth(username, password))
-}
-
-// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts.
-func parseRequestLine(line string) (method, requestURI, proto string, ok bool) {
- method, rest, ok1 := strings.Cut(line, " ")
- requestURI, proto, ok2 := strings.Cut(rest, " ")
- if !ok1 || !ok2 {
- return "", "", "", false
- }
- return method, requestURI, proto, true
-}
-
-var textprotoReaderPool sync.Pool
-
-func newTextprotoReader(br *bufio.Reader) *textproto.Reader {
- if v := textprotoReaderPool.Get(); v != nil {
- tr := v.(*textproto.Reader)
- tr.R = br
- return tr
- }
- return textproto.NewReader(br)
-}
-
-func putTextprotoReader(r *textproto.Reader) {
- r.R = nil
- textprotoReaderPool.Put(r)
-}
-
-// ReadRequest reads and parses an incoming request from b.
-//
-// ReadRequest is a low-level function and should only be used for
-// specialized applications; most code should use the Server to read
-// requests and handle them via the Handler interface. ReadRequest
-// only supports HTTP/1.x requests. For HTTP/2, use golang.org/x/net/http2.
-func ReadRequest(b *bufio.Reader) (*Request, error) {
- req, err := readRequest(b)
- if err != nil {
- return nil, err
- }
-
- delete(req.Header, "Host")
- return req, err
-}
-
-func readRequest(b *bufio.Reader) (req *Request, err error) {
- tp := newTextprotoReader(b)
- req = new(Request)
-
- // First line: GET /index.html HTTP/1.0
- var s string
- if s, err = tp.ReadLine(); err != nil {
- return nil, err
- }
- defer func() {
- putTextprotoReader(tp)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- }()
-
- var ok bool
- req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s)
- if !ok {
- return nil, badStringError("malformed HTTP request", s)
- }
- if !validMethod(req.Method) {
- return nil, badStringError("invalid method", req.Method)
- }
- rawurl := req.RequestURI
- if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
- return nil, badStringError("malformed HTTP version", req.Proto)
- }
-
- // CONNECT requests are used two different ways, and neither uses a full URL:
- // The standard use is to tunnel HTTPS through an HTTP proxy.
- // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is
- // just the authority section of a URL. This information should go in req.URL.Host.
- //
- // The net/rpc package also uses CONNECT, but there the parameter is a path
- // that starts with a slash. It can be parsed with the regular URL parser,
- // and the path will end up in req.URL.Path, where it needs to be in order for
- // RPC to work.
- justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/")
- if justAuthority {
- rawurl = "http://" + rawurl
- }
-
- if req.URL, err = url.ParseRequestURI(rawurl); err != nil {
- return nil, err
- }
-
- if justAuthority {
- // Strip the bogus "http://" back off.
- req.URL.Scheme = ""
- }
-
- // Subsequent lines: Key: value.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil {
- return nil, err
- }
- req.Header = Header(mimeHeader)
- if len(req.Header["Host"]) > 1 {
- return nil, fmt.Errorf("too many Host headers")
- }
-
- // RFC 7230, section 5.3: Must treat
- // GET /index.html HTTP/1.1
- // Host: www.google.com
- // and
- // GET http://www.google.com/index.html HTTP/1.1
- // Host: doesntmatter
- // the same. In the second case, any Host line is ignored.
- req.Host = req.URL.Host
- if req.Host == "" {
- req.Host = req.Header.get("Host")
- }
-
- fixPragmaCacheControl(req.Header)
-
- req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false)
-
- err = readTransfer(req, b)
- if err != nil {
- return nil, err
- }
-
- if req.isH2Upgrade() {
- // Because it's neither chunked, nor declared:
- req.ContentLength = -1
-
- // We want to give handlers a chance to hijack the
- // connection, but we need to prevent the Server from
- // dealing with the connection further if it's not
- // hijacked. Set Close to ensure that:
- req.Close = true
- }
- return req, nil
-}
-
-// MaxBytesReader is similar to io.LimitReader but is intended for
-// limiting the size of incoming request bodies. In contrast to
-// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// non-EOF error for a Read beyond the limit, and closes the
-// underlying reader when its Close method is called.
-//
-// MaxBytesReader prevents clients from accidentally or maliciously
-// sending a large request and wasting server resources.
-func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
- if n < 0 { // Treat negative limits as equivalent to 0.
- n = 0
- }
- return &maxBytesReader{w: w, r: r, n: n}
-}
-
-type maxBytesReader struct {
- w ResponseWriter
- r io.ReadCloser // underlying reader
- n int64 // max bytes remaining
- err error // sticky error
-}
-
-func (l *maxBytesReader) Read(p []byte) (n int, err error) {
- if l.err != nil {
- return 0, l.err
- }
- if len(p) == 0 {
- return 0, nil
- }
- // If they asked for a 32KB byte read but only 5 bytes are
- // remaining, no need to read 32KB. 6 bytes will answer the
- // question of the whether we hit the limit or go past it.
- if int64(len(p)) > l.n+1 {
- p = p[:l.n+1]
- }
- n, err = l.r.Read(p)
-
- if int64(n) <= l.n {
- l.n -= int64(n)
- l.err = err
- return n, err
- }
-
- n = int(l.n)
- l.n = 0
-
- // The server code and client code both use
- // maxBytesReader. This "requestTooLarge" check is
- // only used by the server code. To prevent binaries
- // which only using the HTTP Client code (such as
- // cmd/go) from also linking in the HTTP server, don't
- // use a static type assertion to the server
- // "*response" type. Check this interface instead:
- type requestTooLarger interface {
- requestTooLarge()
- }
- if res, ok := l.w.(requestTooLarger); ok {
- res.requestTooLarge()
- }
- l.err = errors.New("http: request body too large")
- return n, l.err
-}
-
-func (l *maxBytesReader) Close() error {
- return l.r.Close()
-}
-
-func copyValues(dst, src url.Values) {
- for k, vs := range src {
- dst[k] = append(dst[k], vs...)
- }
-}
-
-func parsePostForm(r *Request) (vs url.Values, err error) {
- if r.Body == nil {
- err = errors.New("missing form body")
- return
- }
- ct := r.Header.Get("Content-Type")
- // RFC 7231, section 3.1.1.5 - empty type
- // MAY be treated as application/octet-stream
- if ct == "" {
- ct = "application/octet-stream"
- }
- ct, _, err = mime.ParseMediaType(ct)
- switch {
- case ct == "application/x-www-form-urlencoded":
- var reader io.Reader = r.Body
- maxFormSize := int64(1<<63 - 1)
- if _, ok := r.Body.(*maxBytesReader); !ok {
- maxFormSize = int64(10 << 20) // 10 MB is a lot of text.
- reader = io.LimitReader(r.Body, maxFormSize+1)
- }
- b, e := io.ReadAll(reader)
- if e != nil {
- if err == nil {
- err = e
- }
- break
- }
- if int64(len(b)) > maxFormSize {
- err = errors.New("http: POST too large")
- return
- }
- vs, e = url.ParseQuery(string(b))
- if err == nil {
- err = e
- }
- case ct == "multipart/form-data":
- // handled by ParseMultipartForm (which is calling us, or should be)
- // TODO(bradfitz): there are too many possible
- // orders to call too many functions here.
- // Clean this up and write more tests.
- // request_test.go contains the start of this,
- // in TestParseMultipartFormOrder and others.
- }
- return
-}
-
-// ParseForm populates r.Form and r.PostForm.
-//
-// For all requests, ParseForm parses the raw query from the URL and updates
-// r.Form.
-//
-// For POST, PUT, and PATCH requests, it also reads the request body, parses it
-// as a form and puts the results into both r.PostForm and r.Form. Request body
-// parameters take precedence over URL query string values in r.Form.
-//
-// If the request Body's size has not already been limited by MaxBytesReader,
-// the size is capped at 10MB.
-//
-// For other HTTP methods, or when the Content-Type is not
-// application/x-www-form-urlencoded, the request Body is not read, and
-// r.PostForm is initialized to a non-nil, empty value.
-//
-// ParseMultipartForm calls ParseForm automatically.
-// ParseForm is idempotent.
-func (r *Request) ParseForm() error {
- var err error
- if r.PostForm == nil {
- if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" {
- r.PostForm, err = parsePostForm(r)
- }
- if r.PostForm == nil {
- r.PostForm = make(url.Values)
- }
- }
- if r.Form == nil {
- if len(r.PostForm) > 0 {
- r.Form = make(url.Values)
- copyValues(r.Form, r.PostForm)
- }
- var newValues url.Values
- if r.URL != nil {
- var e error
- newValues, e = url.ParseQuery(r.URL.RawQuery)
- if err == nil {
- err = e
- }
- }
- if newValues == nil {
- newValues = make(url.Values)
- }
- if r.Form == nil {
- r.Form = newValues
- } else {
- copyValues(r.Form, newValues)
- }
- }
- return err
-}
-
-// ParseMultipartForm parses a request body as multipart/form-data.
-// The whole request body is parsed and up to a total of maxMemory bytes of
-// its file parts are stored in memory, with the remainder stored on
-// disk in temporary files.
-// ParseMultipartForm calls ParseForm if necessary.
-// If ParseForm returns an error, ParseMultipartForm returns it but also
-// continues parsing the request body.
-// After one call to ParseMultipartForm, subsequent calls have no effect.
-func (r *Request) ParseMultipartForm(maxMemory int64) error {
- if r.MultipartForm == multipartByReader {
- return errors.New("http: multipart handled by MultipartReader")
- }
- var parseFormErr error
- if r.Form == nil {
- // Let errors in ParseForm fall through, and just
- // return it at the end.
- parseFormErr = r.ParseForm()
- }
- if r.MultipartForm != nil {
- return nil
- }
-
- mr, err := r.multipartReader(false)
- if err != nil {
- return err
- }
-
- f, err := mr.ReadForm(maxMemory)
- if err != nil {
- return err
- }
-
- if r.PostForm == nil {
- r.PostForm = make(url.Values)
- }
- for k, v := range f.Value {
- r.Form[k] = append(r.Form[k], v...)
- // r.PostForm should also be populated. See Issue 9305.
- r.PostForm[k] = append(r.PostForm[k], v...)
- }
-
- r.MultipartForm = f
-
- return parseFormErr
-}
-
-// FormValue returns the first value for the named component of the query.
-// POST and PUT body parameters take precedence over URL query string values.
-// FormValue calls ParseMultipartForm and ParseForm if necessary and ignores
-// any errors returned by these functions.
-// If key is not present, FormValue returns the empty string.
-// To access multiple values of the same key, call ParseForm and
-// then inspect Request.Form directly.
-func (r *Request) FormValue(key string) string {
- if r.Form == nil {
- r.ParseMultipartForm(defaultMaxMemory)
- }
- if vs := r.Form[key]; len(vs) > 0 {
- return vs[0]
- }
- return ""
-}
-
-// PostFormValue returns the first value for the named component of the POST,
-// PATCH, or PUT request body. URL query parameters are ignored.
-// PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores
-// any errors returned by these functions.
-// If key is not present, PostFormValue returns the empty string.
-func (r *Request) PostFormValue(key string) string {
- if r.PostForm == nil {
- r.ParseMultipartForm(defaultMaxMemory)
- }
- if vs := r.PostForm[key]; len(vs) > 0 {
- return vs[0]
- }
- return ""
-}
-
-// FormFile returns the first file for the provided form key.
-// FormFile calls ParseMultipartForm and ParseForm if necessary.
-func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
- if r.MultipartForm == multipartByReader {
- return nil, nil, errors.New("http: multipart handled by MultipartReader")
- }
- if r.MultipartForm == nil {
- err := r.ParseMultipartForm(defaultMaxMemory)
- if err != nil {
- return nil, nil, err
- }
- }
- if r.MultipartForm != nil && r.MultipartForm.File != nil {
- if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
- f, err := fhs[0].Open()
- return f, fhs[0], err
- }
- }
- return nil, nil, ErrMissingFile
-}
-
-func (r *Request) expectsContinue() bool {
- return hasToken(r.Header.get("Expect"), "100-continue")
-}
-
-func (r *Request) wantsHttp10KeepAlive() bool {
- if r.ProtoMajor != 1 || r.ProtoMinor != 0 {
- return false
- }
- return hasToken(r.Header.get("Connection"), "keep-alive")
-}
-
-func (r *Request) wantsClose() bool {
- if r.Close {
- return true
- }
- return hasToken(r.Header.get("Connection"), "close")
-}
-
-func (r *Request) closeBody() error {
- if r.Body == nil {
- return nil
- }
- return r.Body.Close()
-}
-
-func (r *Request) isReplayable() bool {
- if r.Body == nil || r.Body == NoBody || r.GetBody != nil {
- switch valueOrDefault(r.Method, "GET") {
- case "GET", "HEAD", "OPTIONS", "TRACE":
- return true
- }
- // The Idempotency-Key, while non-standard, is widely used to
- // mean a POST or other request is idempotent. See
- // https://golang.org/issue/19943#issuecomment-421092421
- if r.Header.has("Idempotency-Key") || r.Header.has("X-Idempotency-Key") {
- return true
- }
- }
- return false
-}
-
-// outgoingLength reports the Content-Length of this outgoing (Client) request.
-// It maps 0 into -1 (unknown) when the Body is non-nil.
-func (r *Request) outgoingLength() int64 {
- if r.Body == nil || r.Body == NoBody {
- return 0
- }
- if r.ContentLength != 0 {
- return r.ContentLength
- }
- return -1
-}
-
-// requestMethodUsuallyLacksBody reports whether the given request
-// method is one that typically does not involve a request body.
-// This is used by the Transport (via
-// transferWriter.shouldSendChunkedRequestBody) to determine whether
-// we try to test-read a byte from a non-nil Request.Body when
-// Request.outgoingLength() returns -1. See the comments in
-// shouldSendChunkedRequestBody.
-func requestMethodUsuallyLacksBody(method string) bool {
- switch method {
- case "GET", "HEAD", "DELETE", "OPTIONS", "PROPFIND", "SEARCH":
- return true
- }
- return false
-}
-
-// requiresHTTP1 reports whether this request requires being sent on
-// an HTTP/1 connection.
-func (r *Request) requiresHTTP1() bool {
- return hasToken(r.Header.Get("Connection"), "upgrade") &&
- ascii.EqualFold(r.Header.Get("Upgrade"), "websocket")
-}
diff --git a/contrib/go/_std_1.18/src/net/http/response.go b/contrib/go/_std_1.18/src/net/http/response.go
deleted file mode 100644
index 297394eabe..0000000000
--- a/contrib/go/_std_1.18/src/net/http/response.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP Response reading and parsing.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "net/textproto"
- "net/url"
- "strconv"
- "strings"
-
- "golang.org/x/net/http/httpguts"
-)
-
-var respExcludeHeader = map[string]bool{
- "Content-Length": true,
- "Transfer-Encoding": true,
- "Trailer": true,
-}
-
-// Response represents the response from an HTTP request.
-//
-// The Client and Transport return Responses from servers once
-// the response headers have been received. The response body
-// is streamed on demand as the Body field is read.
-type Response struct {
- Status string // e.g. "200 OK"
- StatusCode int // e.g. 200
- Proto string // e.g. "HTTP/1.0"
- ProtoMajor int // e.g. 1
- ProtoMinor int // e.g. 0
-
- // Header maps header keys to values. If the response had multiple
- // headers with the same key, they may be concatenated, with comma
- // delimiters. (RFC 7230, section 3.2.2 requires that multiple headers
- // be semantically equivalent to a comma-delimited sequence.) When
- // Header values are duplicated by other fields in this struct (e.g.,
- // ContentLength, TransferEncoding, Trailer), the field values are
- // authoritative.
- //
- // Keys in the map are canonicalized (see CanonicalHeaderKey).
- Header Header
-
- // Body represents the response body.
- //
- // The response body is streamed on demand as the Body field
- // is read. If the network connection fails or the server
- // terminates the response, Body.Read calls return an error.
- //
- // The http Client and Transport guarantee that Body is always
- // non-nil, even on responses without a body or responses with
- // a zero-length body. It is the caller's responsibility to
- // close Body. The default HTTP client's Transport may not
- // reuse HTTP/1.x "keep-alive" TCP connections if the Body is
- // not read to completion and closed.
- //
- // The Body is automatically dechunked if the server replied
- // with a "chunked" Transfer-Encoding.
- //
- // As of Go 1.12, the Body will also implement io.Writer
- // on a successful "101 Switching Protocols" response,
- // as used by WebSockets and HTTP/2's "h2c" mode.
- Body io.ReadCloser
-
- // ContentLength records the length of the associated content. The
- // value -1 indicates that the length is unknown. Unless Request.Method
- // is "HEAD", values >= 0 indicate that the given number of bytes may
- // be read from Body.
- ContentLength int64
-
- // Contains transfer encodings from outer-most to inner-most. Value is
- // nil, means that "identity" encoding is used.
- TransferEncoding []string
-
- // Close records whether the header directed that the connection be
- // closed after reading Body. The value is advice for clients: neither
- // ReadResponse nor Response.Write ever closes a connection.
- Close bool
-
- // Uncompressed reports whether the response was sent compressed but
- // was decompressed by the http package. When true, reading from
- // Body yields the uncompressed content instead of the compressed
- // content actually set from the server, ContentLength is set to -1,
- // and the "Content-Length" and "Content-Encoding" fields are deleted
- // from the responseHeader. To get the original response from
- // the server, set Transport.DisableCompression to true.
- Uncompressed bool
-
- // Trailer maps trailer keys to values in the same
- // format as Header.
- //
- // The Trailer initially contains only nil values, one for
- // each key specified in the server's "Trailer" header
- // value. Those values are not added to Header.
- //
- // Trailer must not be accessed concurrently with Read calls
- // on the Body.
- //
- // After Body.Read has returned io.EOF, Trailer will contain
- // any trailer values sent by the server.
- Trailer Header
-
- // Request is the request that was sent to obtain this Response.
- // Request's Body is nil (having already been consumed).
- // This is only populated for Client requests.
- Request *Request
-
- // TLS contains information about the TLS connection on which the
- // response was received. It is nil for unencrypted responses.
- // The pointer is shared between responses and should not be
- // modified.
- TLS *tls.ConnectionState
-}
-
-// Cookies parses and returns the cookies set in the Set-Cookie headers.
-func (r *Response) Cookies() []*Cookie {
- return readSetCookies(r.Header)
-}
-
-// ErrNoLocation is returned by Response's Location method
-// when no Location header is present.
-var ErrNoLocation = errors.New("http: no Location header in response")
-
-// Location returns the URL of the response's "Location" header,
-// if present. Relative redirects are resolved relative to
-// the Response's Request. ErrNoLocation is returned if no
-// Location header is present.
-func (r *Response) Location() (*url.URL, error) {
- lv := r.Header.Get("Location")
- if lv == "" {
- return nil, ErrNoLocation
- }
- if r.Request != nil && r.Request.URL != nil {
- return r.Request.URL.Parse(lv)
- }
- return url.Parse(lv)
-}
-
-// ReadResponse reads and returns an HTTP response from r.
-// The req parameter optionally specifies the Request that corresponds
-// to this Response. If nil, a GET request is assumed.
-// Clients must call resp.Body.Close when finished reading resp.Body.
-// After that call, clients can inspect resp.Trailer to find key/value
-// pairs included in the response trailer.
-func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) {
- tp := textproto.NewReader(r)
- resp := &Response{
- Request: req,
- }
-
- // Parse the first line of the response.
- line, err := tp.ReadLine()
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return nil, err
- }
- proto, status, ok := strings.Cut(line, " ")
- if !ok {
- return nil, badStringError("malformed HTTP response", line)
- }
- resp.Proto = proto
- resp.Status = strings.TrimLeft(status, " ")
-
- statusCode, _, _ := strings.Cut(resp.Status, " ")
- if len(statusCode) != 3 {
- return nil, badStringError("malformed HTTP status code", statusCode)
- }
- resp.StatusCode, err = strconv.Atoi(statusCode)
- if err != nil || resp.StatusCode < 0 {
- return nil, badStringError("malformed HTTP status code", statusCode)
- }
- if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
- return nil, badStringError("malformed HTTP version", resp.Proto)
- }
-
- // Parse the response headers.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return nil, err
- }
- resp.Header = Header(mimeHeader)
-
- fixPragmaCacheControl(resp.Header)
-
- err = readTransfer(resp, r)
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-// RFC 7234, section 5.4: Should treat
-// Pragma: no-cache
-// like
-// Cache-Control: no-cache
-func fixPragmaCacheControl(header Header) {
- if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" {
- if _, presentcc := header["Cache-Control"]; !presentcc {
- header["Cache-Control"] = []string{"no-cache"}
- }
- }
-}
-
-// ProtoAtLeast reports whether the HTTP protocol used
-// in the response is at least major.minor.
-func (r *Response) ProtoAtLeast(major, minor int) bool {
- return r.ProtoMajor > major ||
- r.ProtoMajor == major && r.ProtoMinor >= minor
-}
-
-// Write writes r to w in the HTTP/1.x server response format,
-// including the status line, headers, body, and optional trailer.
-//
-// This method consults the following fields of the response r:
-//
-// StatusCode
-// ProtoMajor
-// ProtoMinor
-// Request.Method
-// TransferEncoding
-// Trailer
-// Body
-// ContentLength
-// Header, values for non-canonical keys will have unpredictable behavior
-//
-// The Response Body is closed after it is sent.
-func (r *Response) Write(w io.Writer) error {
- // Status line
- text := r.Status
- if text == "" {
- var ok bool
- text, ok = statusText[r.StatusCode]
- if !ok {
- text = "status code " + strconv.Itoa(r.StatusCode)
- }
- } else {
- // Just to reduce stutter, if user set r.Status to "200 OK" and StatusCode to 200.
- // Not important.
- text = strings.TrimPrefix(text, strconv.Itoa(r.StatusCode)+" ")
- }
-
- if _, err := fmt.Fprintf(w, "HTTP/%d.%d %03d %s\r\n", r.ProtoMajor, r.ProtoMinor, r.StatusCode, text); err != nil {
- return err
- }
-
- // Clone it, so we can modify r1 as needed.
- r1 := new(Response)
- *r1 = *r
- if r1.ContentLength == 0 && r1.Body != nil {
- // Is it actually 0 length? Or just unknown?
- var buf [1]byte
- n, err := r1.Body.Read(buf[:])
- if err != nil && err != io.EOF {
- return err
- }
- if n == 0 {
- // Reset it to a known zero reader, in case underlying one
- // is unhappy being read repeatedly.
- r1.Body = NoBody
- } else {
- r1.ContentLength = -1
- r1.Body = struct {
- io.Reader
- io.Closer
- }{
- io.MultiReader(bytes.NewReader(buf[:1]), r.Body),
- r.Body,
- }
- }
- }
- // If we're sending a non-chunked HTTP/1.1 response without a
- // content-length, the only way to do that is the old HTTP/1.0
- // way, by noting the EOF with a connection close, so we need
- // to set Close.
- if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) && !r1.Uncompressed {
- r1.Close = true
- }
-
- // Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(r1)
- if err != nil {
- return err
- }
- err = tw.writeHeader(w, nil)
- if err != nil {
- return err
- }
-
- // Rest of header
- err = r.Header.WriteSubset(w, respExcludeHeader)
- if err != nil {
- return err
- }
-
- // contentLengthAlreadySent may have been already sent for
- // POST/PUT requests, even if zero length. See Issue 8180.
- contentLengthAlreadySent := tw.shouldSendContentLength()
- if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent && bodyAllowedForStatus(r.StatusCode) {
- if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil {
- return err
- }
- }
-
- // End-of-header
- if _, err := io.WriteString(w, "\r\n"); err != nil {
- return err
- }
-
- // Write body and trailer
- err = tw.writeBody(w)
- if err != nil {
- return err
- }
-
- // Success
- return nil
-}
-
-func (r *Response) closeBody() {
- if r.Body != nil {
- r.Body.Close()
- }
-}
-
-// bodyIsWritable reports whether the Body supports writing. The
-// Transport returns Writable bodies for 101 Switching Protocols
-// responses.
-// The Transport uses this method to determine whether a persistent
-// connection is done being managed from its perspective. Once we
-// return a writable response body to a user, the net/http package is
-// done managing that connection.
-func (r *Response) bodyIsWritable() bool {
- _, ok := r.Body.(io.Writer)
- return ok
-}
-
-// isProtocolSwitch reports whether the response code and header
-// indicate a successful protocol upgrade response.
-func (r *Response) isProtocolSwitch() bool {
- return isProtocolSwitchResponse(r.StatusCode, r.Header)
-}
-
-// isProtocolSwitchResponse reports whether the response code and
-// response header indicate a successful protocol upgrade response.
-func isProtocolSwitchResponse(code int, h Header) bool {
- return code == StatusSwitchingProtocols && isProtocolSwitchHeader(h)
-}
-
-// isProtocolSwitchHeader reports whether the request or response header
-// is for a protocol switch.
-func isProtocolSwitchHeader(h Header) bool {
- return h.Get("Upgrade") != "" &&
- httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade")
-}
diff --git a/contrib/go/_std_1.18/src/net/http/server.go b/contrib/go/_std_1.18/src/net/http/server.go
deleted file mode 100644
index ffb742ba4a..0000000000
--- a/contrib/go/_std_1.18/src/net/http/server.go
+++ /dev/null
@@ -1,3622 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP server. See RFC 7230 through 7235.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "internal/godebug"
- "io"
- "log"
- "math/rand"
- "net"
- "net/textproto"
- "net/url"
- urlpkg "net/url"
- "path"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http/httpguts"
-)
-
-// Errors used by the HTTP server.
-var (
- // ErrBodyNotAllowed is returned by ResponseWriter.Write calls
- // when the HTTP method or response code does not permit a
- // body.
- ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
-
- // ErrHijacked is returned by ResponseWriter.Write calls when
- // the underlying connection has been hijacked using the
- // Hijacker interface. A zero-byte write on a hijacked
- // connection will return ErrHijacked without any other side
- // effects.
- ErrHijacked = errors.New("http: connection has been hijacked")
-
- // ErrContentLength is returned by ResponseWriter.Write calls
- // when a Handler set a Content-Length response header with a
- // declared size and then attempted to write more bytes than
- // declared.
- ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
-
- // Deprecated: ErrWriteAfterFlush is no longer returned by
- // anything in the net/http package. Callers should not
- // compare errors against this variable.
- ErrWriteAfterFlush = errors.New("unused")
-)
-
-// A Handler responds to an HTTP request.
-//
-// ServeHTTP should write reply headers and data to the ResponseWriter
-// and then return. Returning signals that the request is finished; it
-// is not valid to use the ResponseWriter or read from the
-// Request.Body after or concurrently with the completion of the
-// ServeHTTP call.
-//
-// Depending on the HTTP client software, HTTP protocol version, and
-// any intermediaries between the client and the Go server, it may not
-// be possible to read from the Request.Body after writing to the
-// ResponseWriter. Cautious handlers should read the Request.Body
-// first, and then reply.
-//
-// Except for reading the body, handlers should not modify the
-// provided Request.
-//
-// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
-// that the effect of the panic was isolated to the active request.
-// It recovers the panic, logs a stack trace to the server error log,
-// and either closes the network connection or sends an HTTP/2
-// RST_STREAM, depending on the HTTP protocol. To abort a handler so
-// the client sees an interrupted response but the server doesn't log
-// an error, panic with the value ErrAbortHandler.
-type Handler interface {
- ServeHTTP(ResponseWriter, *Request)
-}
-
-// A ResponseWriter interface is used by an HTTP handler to
-// construct an HTTP response.
-//
-// A ResponseWriter may not be used after the Handler.ServeHTTP method
-// has returned.
-type ResponseWriter interface {
- // Header returns the header map that will be sent by
- // WriteHeader. The Header map also is the mechanism with which
- // Handlers can set HTTP trailers.
- //
- // Changing the header map after a call to WriteHeader (or
- // Write) has no effect unless the modified headers are
- // trailers.
- //
- // There are two ways to set Trailers. The preferred way is to
- // predeclare in the headers which trailers you will later
- // send by setting the "Trailer" header to the names of the
- // trailer keys which will come later. In this case, those
- // keys of the Header map are treated as if they were
- // trailers. See the example. The second way, for trailer
- // keys not known to the Handler until after the first Write,
- // is to prefix the Header map keys with the TrailerPrefix
- // constant value. See TrailerPrefix.
- //
- // To suppress automatic response headers (such as "Date"), set
- // their value to nil.
- Header() Header
-
- // Write writes the data to the connection as part of an HTTP reply.
- //
- // If WriteHeader has not yet been called, Write calls
- // WriteHeader(http.StatusOK) before writing the data. If the Header
- // does not contain a Content-Type line, Write adds a Content-Type set
- // to the result of passing the initial 512 bytes of written data to
- // DetectContentType. Additionally, if the total size of all written
- // data is under a few KB and there are no Flush calls, the
- // Content-Length header is added automatically.
- //
- // Depending on the HTTP protocol version and the client, calling
- // Write or WriteHeader may prevent future reads on the
- // Request.Body. For HTTP/1.x requests, handlers should read any
- // needed request body data before writing the response. Once the
- // headers have been flushed (due to either an explicit Flusher.Flush
- // call or writing enough data to trigger a flush), the request body
- // may be unavailable. For HTTP/2 requests, the Go HTTP server permits
- // handlers to continue to read the request body while concurrently
- // writing the response. However, such behavior may not be supported
- // by all HTTP/2 clients. Handlers should read before writing if
- // possible to maximize compatibility.
- Write([]byte) (int, error)
-
- // WriteHeader sends an HTTP response header with the provided
- // status code.
- //
- // If WriteHeader is not called explicitly, the first call to Write
- // will trigger an implicit WriteHeader(http.StatusOK).
- // Thus explicit calls to WriteHeader are mainly used to
- // send error codes.
- //
- // The provided code must be a valid HTTP 1xx-5xx status code.
- // Only one header may be written. Go does not currently
- // support sending user-defined 1xx informational headers,
- // with the exception of 100-continue response header that the
- // Server sends automatically when the Request.Body is read.
- WriteHeader(statusCode int)
-}
-
-// The Flusher interface is implemented by ResponseWriters that allow
-// an HTTP handler to flush buffered data to the client.
-//
-// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
-// support Flusher, but ResponseWriter wrappers may not. Handlers
-// should always test for this ability at runtime.
-//
-// Note that even for ResponseWriters that support Flush,
-// if the client is connected through an HTTP proxy,
-// the buffered data may not reach the client until the response
-// completes.
-type Flusher interface {
- // Flush sends any buffered data to the client.
- Flush()
-}
-
-// The Hijacker interface is implemented by ResponseWriters that allow
-// an HTTP handler to take over the connection.
-//
-// The default ResponseWriter for HTTP/1.x connections supports
-// Hijacker, but HTTP/2 connections intentionally do not.
-// ResponseWriter wrappers may also not support Hijacker. Handlers
-// should always test for this ability at runtime.
-type Hijacker interface {
- // Hijack lets the caller take over the connection.
- // After a call to Hijack the HTTP server library
- // will not do anything else with the connection.
- //
- // It becomes the caller's responsibility to manage
- // and close the connection.
- //
- // The returned net.Conn may have read or write deadlines
- // already set, depending on the configuration of the
- // Server. It is the caller's responsibility to set
- // or clear those deadlines as needed.
- //
- // The returned bufio.Reader may contain unprocessed buffered
- // data from the client.
- //
- // After a call to Hijack, the original Request.Body must not
- // be used. The original Request's Context remains valid and
- // is not canceled until the Request's ServeHTTP method
- // returns.
- Hijack() (net.Conn, *bufio.ReadWriter, error)
-}
-
-// The CloseNotifier interface is implemented by ResponseWriters which
-// allow detecting when the underlying connection has gone away.
-//
-// This mechanism can be used to cancel long operations on the server
-// if the client has disconnected before the response is ready.
-//
-// Deprecated: the CloseNotifier interface predates Go's context package.
-// New code should use Request.Context instead.
-type CloseNotifier interface {
- // CloseNotify returns a channel that receives at most a
- // single value (true) when the client connection has gone
- // away.
- //
- // CloseNotify may wait to notify until Request.Body has been
- // fully read.
- //
- // After the Handler has returned, there is no guarantee
- // that the channel receives a value.
- //
- // If the protocol is HTTP/1.1 and CloseNotify is called while
- // processing an idempotent request (such a GET) while
- // HTTP/1.1 pipelining is in use, the arrival of a subsequent
- // pipelined request may cause a value to be sent on the
- // returned channel. In practice HTTP/1.1 pipelining is not
- // enabled in browsers and not seen often in the wild. If this
- // is a problem, use HTTP/2 or only use CloseNotify on methods
- // such as POST.
- CloseNotify() <-chan bool
-}
-
-var (
- // ServerContextKey is a context key. It can be used in HTTP
- // handlers with Context.Value to access the server that
- // started the handler. The associated value will be of
- // type *Server.
- ServerContextKey = &contextKey{"http-server"}
-
- // LocalAddrContextKey is a context key. It can be used in
- // HTTP handlers with Context.Value to access the local
- // address the connection arrived on.
- // The associated value will be of type net.Addr.
- LocalAddrContextKey = &contextKey{"local-addr"}
-)
-
-// A conn represents the server side of an HTTP connection.
-type conn struct {
- // server is the server on which the connection arrived.
- // Immutable; never nil.
- server *Server
-
- // cancelCtx cancels the connection-level context.
- cancelCtx context.CancelFunc
-
- // rwc is the underlying network connection.
- // This is never wrapped by other types and is the value given out
- // to CloseNotifier callers. It is usually of type *net.TCPConn or
- // *tls.Conn.
- rwc net.Conn
-
- // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
- // inside the Listener's Accept goroutine, as some implementations block.
- // It is populated immediately inside the (*conn).serve goroutine.
- // This is the value of a Handler's (*Request).RemoteAddr.
- remoteAddr string
-
- // tlsState is the TLS connection state when using TLS.
- // nil means not TLS.
- tlsState *tls.ConnectionState
-
- // werr is set to the first write error to rwc.
- // It is set via checkConnErrorWriter{w}, where bufw writes.
- werr error
-
- // r is bufr's read source. It's a wrapper around rwc that provides
- // io.LimitedReader-style limiting (while reading request headers)
- // and functionality to support CloseNotifier. See *connReader docs.
- r *connReader
-
- // bufr reads from r.
- bufr *bufio.Reader
-
- // bufw writes to checkConnErrorWriter{c}, which populates werr on error.
- bufw *bufio.Writer
-
- // lastMethod is the method of the most recent request
- // on this connection, if any.
- lastMethod string
-
- curReq atomic.Value // of *response (which has a Request in it)
-
- curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState))
-
- // mu guards hijackedv
- mu sync.Mutex
-
- // hijackedv is whether this connection has been hijacked
- // by a Handler with the Hijacker interface.
- // It is guarded by mu.
- hijackedv bool
-}
-
-func (c *conn) hijacked() bool {
- c.mu.Lock()
- defer c.mu.Unlock()
- return c.hijackedv
-}
-
-// c.mu must be held.
-func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
- if c.hijackedv {
- return nil, nil, ErrHijacked
- }
- c.r.abortPendingRead()
-
- c.hijackedv = true
- rwc = c.rwc
- rwc.SetDeadline(time.Time{})
-
- buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
- if c.r.hasByte {
- if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
- return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
- }
- }
- c.setState(rwc, StateHijacked, runHooks)
- return
-}
-
-// This should be >= 512 bytes for DetectContentType,
-// but otherwise it's somewhat arbitrary.
-const bufferBeforeChunkingSize = 2048
-
-// chunkWriter writes to a response's conn buffer, and is the writer
-// wrapped by the response.w buffered writer.
-//
-// chunkWriter also is responsible for finalizing the Header, including
-// conditionally setting the Content-Type and setting a Content-Length
-// in cases where the handler's final output is smaller than the buffer
-// size. It also conditionally adds chunk headers, when in chunking mode.
-//
-// See the comment above (*response).Write for the entire write flow.
-type chunkWriter struct {
- res *response
-
- // header is either nil or a deep clone of res.handlerHeader
- // at the time of res.writeHeader, if res.writeHeader is
- // called and extra buffering is being done to calculate
- // Content-Type and/or Content-Length.
- header Header
-
- // wroteHeader tells whether the header's been written to "the
- // wire" (or rather: w.conn.buf). this is unlike
- // (*response).wroteHeader, which tells only whether it was
- // logically written.
- wroteHeader bool
-
- // set by the writeHeader method:
- chunking bool // using chunked transfer encoding for reply body
-}
-
-var (
- crlf = []byte("\r\n")
- colonSpace = []byte(": ")
-)
-
-func (cw *chunkWriter) Write(p []byte) (n int, err error) {
- if !cw.wroteHeader {
- cw.writeHeader(p)
- }
- if cw.res.req.Method == "HEAD" {
- // Eat writes.
- return len(p), nil
- }
- if cw.chunking {
- _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
- if err != nil {
- cw.res.conn.rwc.Close()
- return
- }
- }
- n, err = cw.res.conn.bufw.Write(p)
- if cw.chunking && err == nil {
- _, err = cw.res.conn.bufw.Write(crlf)
- }
- if err != nil {
- cw.res.conn.rwc.Close()
- }
- return
-}
-
-func (cw *chunkWriter) flush() {
- if !cw.wroteHeader {
- cw.writeHeader(nil)
- }
- cw.res.conn.bufw.Flush()
-}
-
-func (cw *chunkWriter) close() {
- if !cw.wroteHeader {
- cw.writeHeader(nil)
- }
- if cw.chunking {
- bw := cw.res.conn.bufw // conn's bufio writer
- // zero chunk to mark EOF
- bw.WriteString("0\r\n")
- if trailers := cw.res.finalTrailers(); trailers != nil {
- trailers.Write(bw) // the writer handles noting errors
- }
- // final blank line after the trailers (whether
- // present or not)
- bw.WriteString("\r\n")
- }
-}
-
-// A response represents the server side of an HTTP response.
-type response struct {
- conn *conn
- req *Request // request for this response
- reqBody io.ReadCloser
- cancelCtx context.CancelFunc // when ServeHTTP exits
- wroteHeader bool // reply header has been (logically) written
- wroteContinue bool // 100 Continue response was written
- wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
- wantsClose bool // HTTP request has Connection "close"
-
- // canWriteContinue is a boolean value accessed as an atomic int32
- // that says whether or not a 100 Continue header can be written
- // to the connection.
- // writeContinueMu must be held while writing the header.
- // These two fields together synchronize the body reader
- // (the expectContinueReader, which wants to write 100 Continue)
- // against the main writer.
- canWriteContinue atomicBool
- writeContinueMu sync.Mutex
-
- w *bufio.Writer // buffers output in chunks to chunkWriter
- cw chunkWriter
-
- // handlerHeader is the Header that Handlers get access to,
- // which may be retained and mutated even after WriteHeader.
- // handlerHeader is copied into cw.header at WriteHeader
- // time, and privately mutated thereafter.
- handlerHeader Header
- calledHeader bool // handler accessed handlerHeader via Header
-
- written int64 // number of bytes written in body
- contentLength int64 // explicitly-declared Content-Length; or -1
- status int // status code passed to WriteHeader
-
- // close connection after this reply. set on request and
- // updated after response from handler if there's a
- // "Connection: keep-alive" response header and a
- // Content-Length.
- closeAfterReply bool
-
- // requestBodyLimitHit is set by requestTooLarge when
- // maxBytesReader hits its max size. It is checked in
- // WriteHeader, to make sure we don't consume the
- // remaining request body to try to advance to the next HTTP
- // request. Instead, when this is set, we stop reading
- // subsequent requests on this connection and stop reading
- // input from it.
- requestBodyLimitHit bool
-
- // trailers are the headers to be sent after the handler
- // finishes writing the body. This field is initialized from
- // the Trailer response header when the response header is
- // written.
- trailers []string
-
- handlerDone atomicBool // set true when the handler exits
-
- // Buffers for Date, Content-Length, and status code
- dateBuf [len(TimeFormat)]byte
- clenBuf [10]byte
- statusBuf [3]byte
-
- // closeNotifyCh is the channel returned by CloseNotify.
- // TODO(bradfitz): this is currently (for Go 1.8) always
- // non-nil. Make this lazily-created again as it used to be?
- closeNotifyCh chan bool
- didCloseNotify int32 // atomic (only 0->1 winner should send)
-}
-
-// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
-// that, if present, signals that the map entry is actually for
-// the response trailers, and not the response headers. The prefix
-// is stripped after the ServeHTTP call finishes and the values are
-// sent in the trailers.
-//
-// This mechanism is intended only for trailers that are not known
-// prior to the headers being written. If the set of trailers is fixed
-// or known before the header is written, the normal Go trailers mechanism
-// is preferred:
-// https://pkg.go.dev/net/http#ResponseWriter
-// https://pkg.go.dev/net/http#example-ResponseWriter-Trailers
-const TrailerPrefix = "Trailer:"
-
-// finalTrailers is called after the Handler exits and returns a non-nil
-// value if the Handler set any trailers.
-func (w *response) finalTrailers() Header {
- var t Header
- for k, vv := range w.handlerHeader {
- if strings.HasPrefix(k, TrailerPrefix) {
- if t == nil {
- t = make(Header)
- }
- t[strings.TrimPrefix(k, TrailerPrefix)] = vv
- }
- }
- for _, k := range w.trailers {
- if t == nil {
- t = make(Header)
- }
- for _, v := range w.handlerHeader[k] {
- t.Add(k, v)
- }
- }
- return t
-}
-
-type atomicBool int32
-
-func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
-func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
-func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
-
-// declareTrailer is called for each Trailer header when the
-// response header is written. It notes that a header will need to be
-// written in the trailers at the end of the response.
-func (w *response) declareTrailer(k string) {
- k = CanonicalHeaderKey(k)
- if !httpguts.ValidTrailerHeader(k) {
- // Forbidden by RFC 7230, section 4.1.2
- return
- }
- w.trailers = append(w.trailers, k)
-}
-
-// requestTooLarge is called by maxBytesReader when too much input has
-// been read from the client.
-func (w *response) requestTooLarge() {
- w.closeAfterReply = true
- w.requestBodyLimitHit = true
- if !w.wroteHeader {
- w.Header().Set("Connection", "close")
- }
-}
-
-// needsSniff reports whether a Content-Type still needs to be sniffed.
-func (w *response) needsSniff() bool {
- _, haveType := w.handlerHeader["Content-Type"]
- return !w.cw.wroteHeader && !haveType && w.written < sniffLen
-}
-
-// writerOnly hides an io.Writer value's optional ReadFrom method
-// from io.Copy.
-type writerOnly struct {
- io.Writer
-}
-
-// ReadFrom is here to optimize copying from an *os.File regular file
-// to a *net.TCPConn with sendfile, or from a supported src type such
-// as a *net.TCPConn on Linux with splice.
-func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
- bufp := copyBufPool.Get().(*[]byte)
- buf := *bufp
- defer copyBufPool.Put(bufp)
-
- // Our underlying w.conn.rwc is usually a *TCPConn (with its
- // own ReadFrom method). If not, just fall back to the normal
- // copy method.
- rf, ok := w.conn.rwc.(io.ReaderFrom)
- if !ok {
- return io.CopyBuffer(writerOnly{w}, src, buf)
- }
-
- // Copy the first sniffLen bytes before switching to ReadFrom.
- // This ensures we don't start writing the response before the
- // source is available (see golang.org/issue/5660) and provides
- // enough bytes to perform Content-Type sniffing when required.
- if !w.cw.wroteHeader {
- n0, err := io.CopyBuffer(writerOnly{w}, io.LimitReader(src, sniffLen), buf)
- n += n0
- if err != nil || n0 < sniffLen {
- return n, err
- }
- }
-
- w.w.Flush() // get rid of any previous writes
- w.cw.flush() // make sure Header is written; flush data to rwc
-
- // Now that cw has been flushed, its chunking field is guaranteed initialized.
- if !w.cw.chunking && w.bodyAllowed() {
- n0, err := rf.ReadFrom(src)
- n += n0
- w.written += n0
- return n, err
- }
-
- n0, err := io.CopyBuffer(writerOnly{w}, src, buf)
- n += n0
- return n, err
-}
-
-// debugServerConnections controls whether all server connections are wrapped
-// with a verbose logging wrapper.
-const debugServerConnections = false
-
-// Create new connection from rwc.
-func (srv *Server) newConn(rwc net.Conn) *conn {
- c := &conn{
- server: srv,
- rwc: rwc,
- }
- if debugServerConnections {
- c.rwc = newLoggingConn("server", c.rwc)
- }
- return c
-}
-
-type readResult struct {
- _ incomparable
- n int
- err error
- b byte // byte read, if n == 1
-}
-
-// connReader is the io.Reader wrapper used by *conn. It combines a
-// selectively-activated io.LimitedReader (to bound request header
-// read sizes) with support for selectively keeping an io.Reader.Read
-// call blocked in a background goroutine to wait for activity and
-// trigger a CloseNotifier channel.
-type connReader struct {
- conn *conn
-
- mu sync.Mutex // guards following
- hasByte bool
- byteBuf [1]byte
- cond *sync.Cond
- inRead bool
- aborted bool // set true before conn.rwc deadline is set to past
- remain int64 // bytes remaining
-}
-
-func (cr *connReader) lock() {
- cr.mu.Lock()
- if cr.cond == nil {
- cr.cond = sync.NewCond(&cr.mu)
- }
-}
-
-func (cr *connReader) unlock() { cr.mu.Unlock() }
-
-func (cr *connReader) startBackgroundRead() {
- cr.lock()
- defer cr.unlock()
- if cr.inRead {
- panic("invalid concurrent Body.Read call")
- }
- if cr.hasByte {
- return
- }
- cr.inRead = true
- cr.conn.rwc.SetReadDeadline(time.Time{})
- go cr.backgroundRead()
-}
-
-func (cr *connReader) backgroundRead() {
- n, err := cr.conn.rwc.Read(cr.byteBuf[:])
- cr.lock()
- if n == 1 {
- cr.hasByte = true
- // We were past the end of the previous request's body already
- // (since we wouldn't be in a background read otherwise), so
- // this is a pipelined HTTP request. Prior to Go 1.11 we used to
- // send on the CloseNotify channel and cancel the context here,
- // but the behavior was documented as only "may", and we only
- // did that because that's how CloseNotify accidentally behaved
- // in very early Go releases prior to context support. Once we
- // added context support, people used a Handler's
- // Request.Context() and passed it along. Having that context
- // cancel on pipelined HTTP requests caused problems.
- // Fortunately, almost nothing uses HTTP/1.x pipelining.
- // Unfortunately, apt-get does, or sometimes does.
- // New Go 1.11 behavior: don't fire CloseNotify or cancel
- // contexts on pipelined requests. Shouldn't affect people, but
- // fixes cases like Issue 23921. This does mean that a client
- // closing their TCP connection after sending a pipelined
- // request won't cancel the context, but we'll catch that on any
- // write failure (in checkConnErrorWriter.Write).
- // If the server never writes, yes, there are still contrived
- // server & client behaviors where this fails to ever cancel the
- // context, but that's kinda why HTTP/1.x pipelining died
- // anyway.
- }
- if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
- // Ignore this error. It's the expected error from
- // another goroutine calling abortPendingRead.
- } else if err != nil {
- cr.handleReadError(err)
- }
- cr.aborted = false
- cr.inRead = false
- cr.unlock()
- cr.cond.Broadcast()
-}
-
-func (cr *connReader) abortPendingRead() {
- cr.lock()
- defer cr.unlock()
- if !cr.inRead {
- return
- }
- cr.aborted = true
- cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
- for cr.inRead {
- cr.cond.Wait()
- }
- cr.conn.rwc.SetReadDeadline(time.Time{})
-}
-
-func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
-func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
-func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
-
-// handleReadError is called whenever a Read from the client returns a
-// non-nil error.
-//
-// The provided non-nil err is almost always io.EOF or a "use of
-// closed network connection". In any case, the error is not
-// particularly interesting, except perhaps for debugging during
-// development. Any error means the connection is dead and we should
-// down its context.
-//
-// It may be called from multiple goroutines.
-func (cr *connReader) handleReadError(_ error) {
- cr.conn.cancelCtx()
- cr.closeNotify()
-}
-
-// may be called from multiple goroutines.
-func (cr *connReader) closeNotify() {
- res, _ := cr.conn.curReq.Load().(*response)
- if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
- res.closeNotifyCh <- true
- }
-}
-
-func (cr *connReader) Read(p []byte) (n int, err error) {
- cr.lock()
- if cr.inRead {
- cr.unlock()
- if cr.conn.hijacked() {
- panic("invalid Body.Read call. After hijacked, the original Request must not be used")
- }
- panic("invalid concurrent Body.Read call")
- }
- if cr.hitReadLimit() {
- cr.unlock()
- return 0, io.EOF
- }
- if len(p) == 0 {
- cr.unlock()
- return 0, nil
- }
- if int64(len(p)) > cr.remain {
- p = p[:cr.remain]
- }
- if cr.hasByte {
- p[0] = cr.byteBuf[0]
- cr.hasByte = false
- cr.unlock()
- return 1, nil
- }
- cr.inRead = true
- cr.unlock()
- n, err = cr.conn.rwc.Read(p)
-
- cr.lock()
- cr.inRead = false
- if err != nil {
- cr.handleReadError(err)
- }
- cr.remain -= int64(n)
- cr.unlock()
-
- cr.cond.Broadcast()
- return n, err
-}
-
-var (
- bufioReaderPool sync.Pool
- bufioWriter2kPool sync.Pool
- bufioWriter4kPool sync.Pool
-)
-
-var copyBufPool = sync.Pool{
- New: func() any {
- b := make([]byte, 32*1024)
- return &b
- },
-}
-
-func bufioWriterPool(size int) *sync.Pool {
- switch size {
- case 2 << 10:
- return &bufioWriter2kPool
- case 4 << 10:
- return &bufioWriter4kPool
- }
- return nil
-}
-
-func newBufioReader(r io.Reader) *bufio.Reader {
- if v := bufioReaderPool.Get(); v != nil {
- br := v.(*bufio.Reader)
- br.Reset(r)
- return br
- }
- // Note: if this reader size is ever changed, update
- // TestHandlerBodyClose's assumptions.
- return bufio.NewReader(r)
-}
-
-func putBufioReader(br *bufio.Reader) {
- br.Reset(nil)
- bufioReaderPool.Put(br)
-}
-
-func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
- pool := bufioWriterPool(size)
- if pool != nil {
- if v := pool.Get(); v != nil {
- bw := v.(*bufio.Writer)
- bw.Reset(w)
- return bw
- }
- }
- return bufio.NewWriterSize(w, size)
-}
-
-func putBufioWriter(bw *bufio.Writer) {
- bw.Reset(nil)
- if pool := bufioWriterPool(bw.Available()); pool != nil {
- pool.Put(bw)
- }
-}
-
-// DefaultMaxHeaderBytes is the maximum permitted size of the headers
-// in an HTTP request.
-// This can be overridden by setting Server.MaxHeaderBytes.
-const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
-
-func (srv *Server) maxHeaderBytes() int {
- if srv.MaxHeaderBytes > 0 {
- return srv.MaxHeaderBytes
- }
- return DefaultMaxHeaderBytes
-}
-
-func (srv *Server) initialReadLimitSize() int64 {
- return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
-}
-
-// tlsHandshakeTimeout returns the time limit permitted for the TLS
-// handshake, or zero for unlimited.
-//
-// It returns the minimum of any positive ReadHeaderTimeout,
-// ReadTimeout, or WriteTimeout.
-func (srv *Server) tlsHandshakeTimeout() time.Duration {
- var ret time.Duration
- for _, v := range [...]time.Duration{
- srv.ReadHeaderTimeout,
- srv.ReadTimeout,
- srv.WriteTimeout,
- } {
- if v <= 0 {
- continue
- }
- if ret == 0 || v < ret {
- ret = v
- }
- }
- return ret
-}
-
-// wrapper around io.ReadCloser which on first read, sends an
-// HTTP/1.1 100 Continue header
-type expectContinueReader struct {
- resp *response
- readCloser io.ReadCloser
- closed atomicBool
- sawEOF atomicBool
-}
-
-func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
- if ecr.closed.isSet() {
- return 0, ErrBodyReadAfterClose
- }
- w := ecr.resp
- if !w.wroteContinue && w.canWriteContinue.isSet() && !w.conn.hijacked() {
- w.wroteContinue = true
- w.writeContinueMu.Lock()
- if w.canWriteContinue.isSet() {
- w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
- w.conn.bufw.Flush()
- w.canWriteContinue.setFalse()
- }
- w.writeContinueMu.Unlock()
- }
- n, err = ecr.readCloser.Read(p)
- if err == io.EOF {
- ecr.sawEOF.setTrue()
- }
- return
-}
-
-func (ecr *expectContinueReader) Close() error {
- ecr.closed.setTrue()
- return ecr.readCloser.Close()
-}
-
-// TimeFormat is the time format to use when generating times in HTTP
-// headers. It is like time.RFC1123 but hard-codes GMT as the time
-// zone. The time being formatted must be in UTC for Format to
-// generate the correct format.
-//
-// For parsing this time format, see ParseTime.
-const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
-
-// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
-func appendTime(b []byte, t time.Time) []byte {
- const days = "SunMonTueWedThuFriSat"
- const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
-
- t = t.UTC()
- yy, mm, dd := t.Date()
- hh, mn, ss := t.Clock()
- day := days[3*t.Weekday():]
- mon := months[3*(mm-1):]
-
- return append(b,
- day[0], day[1], day[2], ',', ' ',
- byte('0'+dd/10), byte('0'+dd%10), ' ',
- mon[0], mon[1], mon[2], ' ',
- byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
- byte('0'+hh/10), byte('0'+hh%10), ':',
- byte('0'+mn/10), byte('0'+mn%10), ':',
- byte('0'+ss/10), byte('0'+ss%10), ' ',
- 'G', 'M', 'T')
-}
-
-var errTooLarge = errors.New("http: request too large")
-
-// Read next request from connection.
-func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
- if c.hijacked() {
- return nil, ErrHijacked
- }
-
- var (
- wholeReqDeadline time.Time // or zero if none
- hdrDeadline time.Time // or zero if none
- )
- t0 := time.Now()
- if d := c.server.readHeaderTimeout(); d > 0 {
- hdrDeadline = t0.Add(d)
- }
- if d := c.server.ReadTimeout; d > 0 {
- wholeReqDeadline = t0.Add(d)
- }
- c.rwc.SetReadDeadline(hdrDeadline)
- if d := c.server.WriteTimeout; d > 0 {
- defer func() {
- c.rwc.SetWriteDeadline(time.Now().Add(d))
- }()
- }
-
- c.r.setReadLimit(c.server.initialReadLimitSize())
- if c.lastMethod == "POST" {
- // RFC 7230 section 3 tolerance for old buggy clients.
- peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
- c.bufr.Discard(numLeadingCRorLF(peek))
- }
- req, err := readRequest(c.bufr)
- if err != nil {
- if c.r.hitReadLimit() {
- return nil, errTooLarge
- }
- return nil, err
- }
-
- if !http1ServerSupportsRequest(req) {
- return nil, statusError{StatusHTTPVersionNotSupported, "unsupported protocol version"}
- }
-
- c.lastMethod = req.Method
- c.r.setInfiniteReadLimit()
-
- hosts, haveHost := req.Header["Host"]
- isH2Upgrade := req.isH2Upgrade()
- if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
- return nil, badRequestError("missing required Host header")
- }
- if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
- return nil, badRequestError("malformed Host header")
- }
- for k, vv := range req.Header {
- if !httpguts.ValidHeaderFieldName(k) {
- return nil, badRequestError("invalid header name")
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- return nil, badRequestError("invalid header value")
- }
- }
- }
- delete(req.Header, "Host")
-
- ctx, cancelCtx := context.WithCancel(ctx)
- req.ctx = ctx
- req.RemoteAddr = c.remoteAddr
- req.TLS = c.tlsState
- if body, ok := req.Body.(*body); ok {
- body.doEarlyClose = true
- }
-
- // Adjust the read deadline if necessary.
- if !hdrDeadline.Equal(wholeReqDeadline) {
- c.rwc.SetReadDeadline(wholeReqDeadline)
- }
-
- w = &response{
- conn: c,
- cancelCtx: cancelCtx,
- req: req,
- reqBody: req.Body,
- handlerHeader: make(Header),
- contentLength: -1,
- closeNotifyCh: make(chan bool, 1),
-
- // We populate these ahead of time so we're not
- // reading from req.Header after their Handler starts
- // and maybe mutates it (Issue 14940)
- wants10KeepAlive: req.wantsHttp10KeepAlive(),
- wantsClose: req.wantsClose(),
- }
- if isH2Upgrade {
- w.closeAfterReply = true
- }
- w.cw.res = w
- w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
- return w, nil
-}
-
-// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
-// supports the given request.
-func http1ServerSupportsRequest(req *Request) bool {
- if req.ProtoMajor == 1 {
- return true
- }
- // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
- // wire up their own HTTP/2 upgrades.
- if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
- req.Method == "PRI" && req.RequestURI == "*" {
- return true
- }
- // Reject HTTP/0.x, and all other HTTP/2+ requests (which
- // aren't encoded in ASCII anyway).
- return false
-}
-
-func (w *response) Header() Header {
- if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
- // Accessing the header between logically writing it
- // and physically writing it means we need to allocate
- // a clone to snapshot the logically written state.
- w.cw.header = w.handlerHeader.Clone()
- }
- w.calledHeader = true
- return w.handlerHeader
-}
-
-// maxPostHandlerReadBytes is the max number of Request.Body bytes not
-// consumed by a handler that the server will read from the client
-// in order to keep a connection alive. If there are more bytes than
-// this then the server to be paranoid instead sends a "Connection:
-// close" response.
-//
-// This number is approximately what a typical machine's TCP buffer
-// size is anyway. (if we have the bytes on the machine, we might as
-// well read them)
-const maxPostHandlerReadBytes = 256 << 10
-
-func checkWriteHeaderCode(code int) {
- // Issue 22880: require valid WriteHeader status codes.
- // For now we only enforce that it's three digits.
- // In the future we might block things over 599 (600 and above aren't defined
- // at https://httpwg.org/specs/rfc7231.html#status.codes)
- // and we might block under 200 (once we have more mature 1xx support).
- // But for now any three digits.
- //
- // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
- // no equivalent bogus thing we can realistically send in HTTP/2,
- // so we'll consistently panic instead and help people find their bugs
- // early. (We can't return an error from WriteHeader even if we wanted to.)
- if code < 100 || code > 999 {
- panic(fmt.Sprintf("invalid WriteHeader code %v", code))
- }
-}
-
-// relevantCaller searches the call stack for the first function outside of net/http.
-// The purpose of this function is to provide more helpful error messages.
-func relevantCaller() runtime.Frame {
- pc := make([]uintptr, 16)
- n := runtime.Callers(1, pc)
- frames := runtime.CallersFrames(pc[:n])
- var frame runtime.Frame
- for {
- frame, more := frames.Next()
- if !strings.HasPrefix(frame.Function, "net/http.") {
- return frame
- }
- if !more {
- break
- }
- }
- return frame
-}
-
-func (w *response) WriteHeader(code int) {
- if w.conn.hijacked() {
- caller := relevantCaller()
- w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
- return
- }
- if w.wroteHeader {
- caller := relevantCaller()
- w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
- return
- }
- checkWriteHeaderCode(code)
- w.wroteHeader = true
- w.status = code
-
- if w.calledHeader && w.cw.header == nil {
- w.cw.header = w.handlerHeader.Clone()
- }
-
- if cl := w.handlerHeader.get("Content-Length"); cl != "" {
- v, err := strconv.ParseInt(cl, 10, 64)
- if err == nil && v >= 0 {
- w.contentLength = v
- } else {
- w.conn.server.logf("http: invalid Content-Length of %q", cl)
- w.handlerHeader.Del("Content-Length")
- }
- }
-}
-
-// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
-// This type is used to avoid extra allocations from cloning and/or populating
-// the response Header map and all its 1-element slices.
-type extraHeader struct {
- contentType string
- connection string
- transferEncoding string
- date []byte // written if not nil
- contentLength []byte // written if not nil
-}
-
-// Sorted the same as extraHeader.Write's loop.
-var extraHeaderKeys = [][]byte{
- []byte("Content-Type"),
- []byte("Connection"),
- []byte("Transfer-Encoding"),
-}
-
-var (
- headerContentLength = []byte("Content-Length: ")
- headerDate = []byte("Date: ")
-)
-
-// Write writes the headers described in h to w.
-//
-// This method has a value receiver, despite the somewhat large size
-// of h, because it prevents an allocation. The escape analysis isn't
-// smart enough to realize this function doesn't mutate h.
-func (h extraHeader) Write(w *bufio.Writer) {
- if h.date != nil {
- w.Write(headerDate)
- w.Write(h.date)
- w.Write(crlf)
- }
- if h.contentLength != nil {
- w.Write(headerContentLength)
- w.Write(h.contentLength)
- w.Write(crlf)
- }
- for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
- if v != "" {
- w.Write(extraHeaderKeys[i])
- w.Write(colonSpace)
- w.WriteString(v)
- w.Write(crlf)
- }
- }
-}
-
-// writeHeader finalizes the header sent to the client and writes it
-// to cw.res.conn.bufw.
-//
-// p is not written by writeHeader, but is the first chunk of the body
-// that will be written. It is sniffed for a Content-Type if none is
-// set explicitly. It's also used to set the Content-Length, if the
-// total body size was small and the handler has already finished
-// running.
-func (cw *chunkWriter) writeHeader(p []byte) {
- if cw.wroteHeader {
- return
- }
- cw.wroteHeader = true
-
- w := cw.res
- keepAlivesEnabled := w.conn.server.doKeepAlives()
- isHEAD := w.req.Method == "HEAD"
-
- // header is written out to w.conn.buf below. Depending on the
- // state of the handler, we either own the map or not. If we
- // don't own it, the exclude map is created lazily for
- // WriteSubset to remove headers. The setHeader struct holds
- // headers we need to add.
- header := cw.header
- owned := header != nil
- if !owned {
- header = w.handlerHeader
- }
- var excludeHeader map[string]bool
- delHeader := func(key string) {
- if owned {
- header.Del(key)
- return
- }
- if _, ok := header[key]; !ok {
- return
- }
- if excludeHeader == nil {
- excludeHeader = make(map[string]bool)
- }
- excludeHeader[key] = true
- }
- var setHeader extraHeader
-
- // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
- trailers := false
- for k := range cw.header {
- if strings.HasPrefix(k, TrailerPrefix) {
- if excludeHeader == nil {
- excludeHeader = make(map[string]bool)
- }
- excludeHeader[k] = true
- trailers = true
- }
- }
- for _, v := range cw.header["Trailer"] {
- trailers = true
- foreachHeaderElement(v, cw.res.declareTrailer)
- }
-
- te := header.get("Transfer-Encoding")
- hasTE := te != ""
-
- // If the handler is done but never sent a Content-Length
- // response header and this is our first (and last) write, set
- // it, even to zero. This helps HTTP/1.0 clients keep their
- // "keep-alive" connections alive.
- // Exceptions: 304/204/1xx responses never get Content-Length, and if
- // it was a HEAD request, we don't know the difference between
- // 0 actual bytes and 0 bytes because the handler noticed it
- // was a HEAD request and chose not to write anything. So for
- // HEAD, the handler should either write the Content-Length or
- // write non-zero bytes. If it's actually 0 bytes and the
- // handler never looked at the Request.Method, we just don't
- // send a Content-Length header.
- // Further, we don't send an automatic Content-Length if they
- // set a Transfer-Encoding, because they're generally incompatible.
- if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
- w.contentLength = int64(len(p))
- setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
- }
-
- // If this was an HTTP/1.0 request with keep-alive and we sent a
- // Content-Length back, we can make this a keep-alive response ...
- if w.wants10KeepAlive && keepAlivesEnabled {
- sentLength := header.get("Content-Length") != ""
- if sentLength && header.get("Connection") == "keep-alive" {
- w.closeAfterReply = false
- }
- }
-
- // Check for an explicit (and valid) Content-Length header.
- hasCL := w.contentLength != -1
-
- if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
- _, connectionHeaderSet := header["Connection"]
- if !connectionHeaderSet {
- setHeader.connection = "keep-alive"
- }
- } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
- w.closeAfterReply = true
- }
-
- if header.get("Connection") == "close" || !keepAlivesEnabled {
- w.closeAfterReply = true
- }
-
- // If the client wanted a 100-continue but we never sent it to
- // them (or, more strictly: we never finished reading their
- // request body), don't reuse this connection because it's now
- // in an unknown state: we might be sending this response at
- // the same time the client is now sending its request body
- // after a timeout. (Some HTTP clients send Expect:
- // 100-continue but knowing that some servers don't support
- // it, the clients set a timer and send the body later anyway)
- // If we haven't seen EOF, we can't skip over the unread body
- // because we don't know if the next bytes on the wire will be
- // the body-following-the-timer or the subsequent request.
- // See Issue 11549.
- if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.isSet() {
- w.closeAfterReply = true
- }
-
- // Per RFC 2616, we should consume the request body before
- // replying, if the handler hasn't already done so. But we
- // don't want to do an unbounded amount of reading here for
- // DoS reasons, so we only try up to a threshold.
- // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
- // about HTTP/1.x Handlers concurrently reading and writing, like
- // HTTP/2 handlers can do. Maybe this code should be relaxed?
- if w.req.ContentLength != 0 && !w.closeAfterReply {
- var discard, tooBig bool
-
- switch bdy := w.req.Body.(type) {
- case *expectContinueReader:
- if bdy.resp.wroteContinue {
- discard = true
- }
- case *body:
- bdy.mu.Lock()
- switch {
- case bdy.closed:
- if !bdy.sawEOF {
- // Body was closed in handler with non-EOF error.
- w.closeAfterReply = true
- }
- case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
- tooBig = true
- default:
- discard = true
- }
- bdy.mu.Unlock()
- default:
- discard = true
- }
-
- if discard {
- _, err := io.CopyN(io.Discard, w.reqBody, maxPostHandlerReadBytes+1)
- switch err {
- case nil:
- // There must be even more data left over.
- tooBig = true
- case ErrBodyReadAfterClose:
- // Body was already consumed and closed.
- case io.EOF:
- // The remaining body was just consumed, close it.
- err = w.reqBody.Close()
- if err != nil {
- w.closeAfterReply = true
- }
- default:
- // Some other kind of error occurred, like a read timeout, or
- // corrupt chunked encoding. In any case, whatever remains
- // on the wire must not be parsed as another HTTP request.
- w.closeAfterReply = true
- }
- }
-
- if tooBig {
- w.requestTooLarge()
- delHeader("Connection")
- setHeader.connection = "close"
- }
- }
-
- code := w.status
- if bodyAllowedForStatus(code) {
- // If no content type, apply sniffing algorithm to body.
- _, haveType := header["Content-Type"]
-
- // If the Content-Encoding was set and is non-blank,
- // we shouldn't sniff the body. See Issue 31753.
- ce := header.Get("Content-Encoding")
- hasCE := len(ce) > 0
- if !hasCE && !haveType && !hasTE && len(p) > 0 {
- setHeader.contentType = DetectContentType(p)
- }
- } else {
- for _, k := range suppressedHeaders(code) {
- delHeader(k)
- }
- }
-
- if !header.has("Date") {
- setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
- }
-
- if hasCL && hasTE && te != "identity" {
- // TODO: return an error if WriteHeader gets a return parameter
- // For now just ignore the Content-Length.
- w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
- te, w.contentLength)
- delHeader("Content-Length")
- hasCL = false
- }
-
- if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) || code == StatusNoContent {
- // Response has no body.
- delHeader("Transfer-Encoding")
- } else if hasCL {
- // Content-Length has been provided, so no chunking is to be done.
- delHeader("Transfer-Encoding")
- } else if w.req.ProtoAtLeast(1, 1) {
- // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
- // content-length has been provided. The connection must be closed after the
- // reply is written, and no chunking is to be done. This is the setup
- // recommended in the Server-Sent Events candidate recommendation 11,
- // section 8.
- if hasTE && te == "identity" {
- cw.chunking = false
- w.closeAfterReply = true
- delHeader("Transfer-Encoding")
- } else {
- // HTTP/1.1 or greater: use chunked transfer encoding
- // to avoid closing the connection at EOF.
- cw.chunking = true
- setHeader.transferEncoding = "chunked"
- if hasTE && te == "chunked" {
- // We will send the chunked Transfer-Encoding header later.
- delHeader("Transfer-Encoding")
- }
- }
- } else {
- // HTTP version < 1.1: cannot do chunked transfer
- // encoding and we don't know the Content-Length so
- // signal EOF by closing connection.
- w.closeAfterReply = true
- delHeader("Transfer-Encoding") // in case already set
- }
-
- // Cannot use Content-Length with non-identity Transfer-Encoding.
- if cw.chunking {
- delHeader("Content-Length")
- }
- if !w.req.ProtoAtLeast(1, 0) {
- return
- }
-
- // Only override the Connection header if it is not a successful
- // protocol switch response and if KeepAlives are not enabled.
- // See https://golang.org/issue/36381.
- delConnectionHeader := w.closeAfterReply &&
- (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) &&
- !isProtocolSwitchResponse(w.status, header)
- if delConnectionHeader {
- delHeader("Connection")
- if w.req.ProtoAtLeast(1, 1) {
- setHeader.connection = "close"
- }
- }
-
- writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
- cw.header.WriteSubset(w.conn.bufw, excludeHeader)
- setHeader.Write(w.conn.bufw)
- w.conn.bufw.Write(crlf)
-}
-
-// foreachHeaderElement splits v according to the "#rule" construction
-// in RFC 7230 section 7 and calls fn for each non-empty element.
-func foreachHeaderElement(v string, fn func(string)) {
- v = textproto.TrimString(v)
- if v == "" {
- return
- }
- if !strings.Contains(v, ",") {
- fn(v)
- return
- }
- for _, f := range strings.Split(v, ",") {
- if f = textproto.TrimString(f); f != "" {
- fn(f)
- }
- }
-}
-
-// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
-// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
-// code is the response status code.
-// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
-func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
- if is11 {
- bw.WriteString("HTTP/1.1 ")
- } else {
- bw.WriteString("HTTP/1.0 ")
- }
- if text, ok := statusText[code]; ok {
- bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
- bw.WriteByte(' ')
- bw.WriteString(text)
- bw.WriteString("\r\n")
- } else {
- // don't worry about performance
- fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
- }
-}
-
-// bodyAllowed reports whether a Write is allowed for this response type.
-// It's illegal to call this before the header has been flushed.
-func (w *response) bodyAllowed() bool {
- if !w.wroteHeader {
- panic("")
- }
- return bodyAllowedForStatus(w.status)
-}
-
-// The Life Of A Write is like this:
-//
-// Handler starts. No header has been sent. The handler can either
-// write a header, or just start writing. Writing before sending a header
-// sends an implicitly empty 200 OK header.
-//
-// If the handler didn't declare a Content-Length up front, we either
-// go into chunking mode or, if the handler finishes running before
-// the chunking buffer size, we compute a Content-Length and send that
-// in the header instead.
-//
-// Likewise, if the handler didn't set a Content-Type, we sniff that
-// from the initial chunk of output.
-//
-// The Writers are wired together like:
-//
-// 1. *response (the ResponseWriter) ->
-// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes ->
-// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
-// and which writes the chunk headers, if needed ->
-// 4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to ->
-// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
-// and populates c.werr with it if so, but otherwise writes to ->
-// 6. the rwc, the net.Conn.
-//
-// TODO(bradfitz): short-circuit some of the buffering when the
-// initial header contains both a Content-Type and Content-Length.
-// Also short-circuit in (1) when the header's been sent and not in
-// chunking mode, writing directly to (4) instead, if (2) has no
-// buffered data. More generally, we could short-circuit from (1) to
-// (3) even in chunking mode if the write size from (1) is over some
-// threshold and nothing is in (2). The answer might be mostly making
-// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
-// with this instead.
-func (w *response) Write(data []byte) (n int, err error) {
- return w.write(len(data), data, "")
-}
-
-func (w *response) WriteString(data string) (n int, err error) {
- return w.write(len(data), nil, data)
-}
-
-// either dataB or dataS is non-zero.
-func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
- if w.conn.hijacked() {
- if lenData > 0 {
- caller := relevantCaller()
- w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
- }
- return 0, ErrHijacked
- }
-
- if w.canWriteContinue.isSet() {
- // Body reader wants to write 100 Continue but hasn't yet.
- // Tell it not to. The store must be done while holding the lock
- // because the lock makes sure that there is not an active write
- // this very moment.
- w.writeContinueMu.Lock()
- w.canWriteContinue.setFalse()
- w.writeContinueMu.Unlock()
- }
-
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- if lenData == 0 {
- return 0, nil
- }
- if !w.bodyAllowed() {
- return 0, ErrBodyNotAllowed
- }
-
- w.written += int64(lenData) // ignoring errors, for errorKludge
- if w.contentLength != -1 && w.written > w.contentLength {
- return 0, ErrContentLength
- }
- if dataB != nil {
- return w.w.Write(dataB)
- } else {
- return w.w.WriteString(dataS)
- }
-}
-
-func (w *response) finishRequest() {
- w.handlerDone.setTrue()
-
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
-
- w.w.Flush()
- putBufioWriter(w.w)
- w.cw.close()
- w.conn.bufw.Flush()
-
- w.conn.r.abortPendingRead()
-
- // Close the body (regardless of w.closeAfterReply) so we can
- // re-use its bufio.Reader later safely.
- w.reqBody.Close()
-
- if w.req.MultipartForm != nil {
- w.req.MultipartForm.RemoveAll()
- }
-}
-
-// shouldReuseConnection reports whether the underlying TCP connection can be reused.
-// It must only be called after the handler is done executing.
-func (w *response) shouldReuseConnection() bool {
- if w.closeAfterReply {
- // The request or something set while executing the
- // handler indicated we shouldn't reuse this
- // connection.
- return false
- }
-
- if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
- // Did not write enough. Avoid getting out of sync.
- return false
- }
-
- // There was some error writing to the underlying connection
- // during the request, so don't re-use this conn.
- if w.conn.werr != nil {
- return false
- }
-
- if w.closedRequestBodyEarly() {
- return false
- }
-
- return true
-}
-
-func (w *response) closedRequestBodyEarly() bool {
- body, ok := w.req.Body.(*body)
- return ok && body.didEarlyClose()
-}
-
-func (w *response) Flush() {
- if !w.wroteHeader {
- w.WriteHeader(StatusOK)
- }
- w.w.Flush()
- w.cw.flush()
-}
-
-func (c *conn) finalFlush() {
- if c.bufr != nil {
- // Steal the bufio.Reader (~4KB worth of memory) and its associated
- // reader for a future connection.
- putBufioReader(c.bufr)
- c.bufr = nil
- }
-
- if c.bufw != nil {
- c.bufw.Flush()
- // Steal the bufio.Writer (~4KB worth of memory) and its associated
- // writer for a future connection.
- putBufioWriter(c.bufw)
- c.bufw = nil
- }
-}
-
-// Close the connection.
-func (c *conn) close() {
- c.finalFlush()
- c.rwc.Close()
-}
-
-// rstAvoidanceDelay is the amount of time we sleep after closing the
-// write side of a TCP connection before closing the entire socket.
-// By sleeping, we increase the chances that the client sees our FIN
-// and processes its final data before they process the subsequent RST
-// from closing a connection with known unread data.
-// This RST seems to occur mostly on BSD systems. (And Windows?)
-// This timeout is somewhat arbitrary (~latency around the planet).
-const rstAvoidanceDelay = 500 * time.Millisecond
-
-type closeWriter interface {
- CloseWrite() error
-}
-
-var _ closeWriter = (*net.TCPConn)(nil)
-
-// closeWrite flushes any outstanding data and sends a FIN packet (if
-// client is connected via TCP), signalling that we're done. We then
-// pause for a bit, hoping the client processes it before any
-// subsequent RST.
-//
-// See https://golang.org/issue/3595
-func (c *conn) closeWriteAndWait() {
- c.finalFlush()
- if tcp, ok := c.rwc.(closeWriter); ok {
- tcp.CloseWrite()
- }
- time.Sleep(rstAvoidanceDelay)
-}
-
-// validNextProto reports whether the proto is a valid ALPN protocol name.
-// Everything is valid except the empty string and built-in protocol types,
-// so that those can't be overridden with alternate implementations.
-func validNextProto(proto string) bool {
- switch proto {
- case "", "http/1.1", "http/1.0":
- return false
- }
- return true
-}
-
-const (
- runHooks = true
- skipHooks = false
-)
-
-func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) {
- srv := c.server
- switch state {
- case StateNew:
- srv.trackConn(c, true)
- case StateHijacked, StateClosed:
- srv.trackConn(c, false)
- }
- if state > 0xff || state < 0 {
- panic("internal error")
- }
- packedState := uint64(time.Now().Unix()<<8) | uint64(state)
- atomic.StoreUint64(&c.curState.atomic, packedState)
- if !runHook {
- return
- }
- if hook := srv.ConnState; hook != nil {
- hook(nc, state)
- }
-}
-
-func (c *conn) getState() (state ConnState, unixSec int64) {
- packedState := atomic.LoadUint64(&c.curState.atomic)
- return ConnState(packedState & 0xff), int64(packedState >> 8)
-}
-
-// badRequestError is a literal string (used by in the server in HTML,
-// unescaped) to tell the user why their request was bad. It should
-// be plain text without user info or other embedded errors.
-func badRequestError(e string) error { return statusError{StatusBadRequest, e} }
-
-// statusError is an error used to respond to a request with an HTTP status.
-// The text should be plain text without user info or other embedded errors.
-type statusError struct {
- code int
- text string
-}
-
-func (e statusError) Error() string { return StatusText(e.code) + ": " + e.text }
-
-// ErrAbortHandler is a sentinel panic value to abort a handler.
-// While any panic from ServeHTTP aborts the response to the client,
-// panicking with ErrAbortHandler also suppresses logging of a stack
-// trace to the server's error log.
-var ErrAbortHandler = errors.New("net/http: abort Handler")
-
-// isCommonNetReadError reports whether err is a common error
-// encountered during reading a request off the network when the
-// client has gone away or had its read fail somehow. This is used to
-// determine which logs are interesting enough to log about.
-func isCommonNetReadError(err error) bool {
- if err == io.EOF {
- return true
- }
- if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
- return true
- }
- if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
- return true
- }
- return false
-}
-
-// Serve a new connection.
-func (c *conn) serve(ctx context.Context) {
- c.remoteAddr = c.rwc.RemoteAddr().String()
- ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
- var inFlightResponse *response
- defer func() {
- if err := recover(); err != nil && err != ErrAbortHandler {
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
- }
- if inFlightResponse != nil {
- inFlightResponse.cancelCtx()
- }
- if !c.hijacked() {
- if inFlightResponse != nil {
- inFlightResponse.conn.r.abortPendingRead()
- inFlightResponse.reqBody.Close()
- }
- c.close()
- c.setState(c.rwc, StateClosed, runHooks)
- }
- }()
-
- if tlsConn, ok := c.rwc.(*tls.Conn); ok {
- tlsTO := c.server.tlsHandshakeTimeout()
- if tlsTO > 0 {
- dl := time.Now().Add(tlsTO)
- c.rwc.SetReadDeadline(dl)
- c.rwc.SetWriteDeadline(dl)
- }
- if err := tlsConn.HandshakeContext(ctx); err != nil {
- // If the handshake failed due to the client not speaking
- // TLS, assume they're speaking plaintext HTTP and write a
- // 400 response on the TLS conn's underlying net.Conn.
- if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
- io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
- re.Conn.Close()
- return
- }
- c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
- return
- }
- // Restore Conn-level deadlines.
- if tlsTO > 0 {
- c.rwc.SetReadDeadline(time.Time{})
- c.rwc.SetWriteDeadline(time.Time{})
- }
- c.tlsState = new(tls.ConnectionState)
- *c.tlsState = tlsConn.ConnectionState()
- if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
- if fn := c.server.TLSNextProto[proto]; fn != nil {
- h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
- // Mark freshly created HTTP/2 as active and prevent any server state hooks
- // from being run on these connections. This prevents closeIdleConns from
- // closing such connections. See issue https://golang.org/issue/39776.
- c.setState(c.rwc, StateActive, skipHooks)
- fn(c.server, tlsConn, h)
- }
- return
- }
- }
-
- // HTTP/1.x from here on.
-
- ctx, cancelCtx := context.WithCancel(ctx)
- c.cancelCtx = cancelCtx
- defer cancelCtx()
-
- c.r = &connReader{conn: c}
- c.bufr = newBufioReader(c.r)
- c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
-
- for {
- w, err := c.readRequest(ctx)
- if c.r.remain != c.server.initialReadLimitSize() {
- // If we read any bytes off the wire, we're active.
- c.setState(c.rwc, StateActive, runHooks)
- }
- if err != nil {
- const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
-
- switch {
- case err == errTooLarge:
- // Their HTTP client may or may not be
- // able to read this if we're
- // responding to them and hanging up
- // while they're still writing their
- // request. Undefined behavior.
- const publicErr = "431 Request Header Fields Too Large"
- fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
- c.closeWriteAndWait()
- return
-
- case isUnsupportedTEError(err):
- // Respond as per RFC 7230 Section 3.3.1 which says,
- // A server that receives a request message with a
- // transfer coding it does not understand SHOULD
- // respond with 501 (Unimplemented).
- code := StatusNotImplemented
-
- // We purposefully aren't echoing back the transfer-encoding's value,
- // so as to mitigate the risk of cross side scripting by an attacker.
- fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
- return
-
- case isCommonNetReadError(err):
- return // don't reply
-
- default:
- if v, ok := err.(statusError); ok {
- fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
- return
- }
- publicErr := "400 Bad Request"
- fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
- return
- }
- }
-
- // Expect 100 Continue support
- req := w.req
- if req.expectsContinue() {
- if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
- // Wrap the Body reader with one that replies on the connection
- req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
- w.canWriteContinue.setTrue()
- }
- } else if req.Header.get("Expect") != "" {
- w.sendExpectationFailed()
- return
- }
-
- c.curReq.Store(w)
-
- if requestBodyRemains(req.Body) {
- registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
- } else {
- w.conn.r.startBackgroundRead()
- }
-
- // HTTP cannot have multiple simultaneous active requests.[*]
- // Until the server replies to this request, it can't read another,
- // so we might as well run the handler in this goroutine.
- // [*] Not strictly true: HTTP pipelining. We could let them all process
- // in parallel even if their responses need to be serialized.
- // But we're not going to implement HTTP pipelining because it
- // was never deployed in the wild and the answer is HTTP/2.
- inFlightResponse = w
- serverHandler{c.server}.ServeHTTP(w, w.req)
- inFlightResponse = nil
- w.cancelCtx()
- if c.hijacked() {
- return
- }
- w.finishRequest()
- if !w.shouldReuseConnection() {
- if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
- c.closeWriteAndWait()
- }
- return
- }
- c.setState(c.rwc, StateIdle, runHooks)
- c.curReq.Store((*response)(nil))
-
- if !w.conn.server.doKeepAlives() {
- // We're in shutdown mode. We might've replied
- // to the user without "Connection: close" and
- // they might think they can send another
- // request, but such is life with HTTP/1.1.
- return
- }
-
- if d := c.server.idleTimeout(); d != 0 {
- c.rwc.SetReadDeadline(time.Now().Add(d))
- if _, err := c.bufr.Peek(4); err != nil {
- return
- }
- }
- c.rwc.SetReadDeadline(time.Time{})
- }
-}
-
-func (w *response) sendExpectationFailed() {
- // TODO(bradfitz): let ServeHTTP handlers handle
- // requests with non-standard expectation[s]? Seems
- // theoretical at best, and doesn't fit into the
- // current ServeHTTP model anyway. We'd need to
- // make the ResponseWriter an optional
- // "ExpectReplier" interface or something.
- //
- // For now we'll just obey RFC 7231 5.1.1 which says
- // "A server that receives an Expect field-value other
- // than 100-continue MAY respond with a 417 (Expectation
- // Failed) status code to indicate that the unexpected
- // expectation cannot be met."
- w.Header().Set("Connection", "close")
- w.WriteHeader(StatusExpectationFailed)
- w.finishRequest()
-}
-
-// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
-// and a Hijacker.
-func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
- if w.handlerDone.isSet() {
- panic("net/http: Hijack called after ServeHTTP finished")
- }
- if w.wroteHeader {
- w.cw.flush()
- }
-
- c := w.conn
- c.mu.Lock()
- defer c.mu.Unlock()
-
- // Release the bufioWriter that writes to the chunk writer, it is not
- // used after a connection has been hijacked.
- rwc, buf, err = c.hijackLocked()
- if err == nil {
- putBufioWriter(w.w)
- w.w = nil
- }
- return rwc, buf, err
-}
-
-func (w *response) CloseNotify() <-chan bool {
- if w.handlerDone.isSet() {
- panic("net/http: CloseNotify called after ServeHTTP finished")
- }
- return w.closeNotifyCh
-}
-
-func registerOnHitEOF(rc io.ReadCloser, fn func()) {
- switch v := rc.(type) {
- case *expectContinueReader:
- registerOnHitEOF(v.readCloser, fn)
- case *body:
- v.registerOnHitEOF(fn)
- default:
- panic("unexpected type " + fmt.Sprintf("%T", rc))
- }
-}
-
-// requestBodyRemains reports whether future calls to Read
-// on rc might yield more data.
-func requestBodyRemains(rc io.ReadCloser) bool {
- if rc == NoBody {
- return false
- }
- switch v := rc.(type) {
- case *expectContinueReader:
- return requestBodyRemains(v.readCloser)
- case *body:
- return v.bodyRemains()
- default:
- panic("unexpected type " + fmt.Sprintf("%T", rc))
- }
-}
-
-// The HandlerFunc type is an adapter to allow the use of
-// ordinary functions as HTTP handlers. If f is a function
-// with the appropriate signature, HandlerFunc(f) is a
-// Handler that calls f.
-type HandlerFunc func(ResponseWriter, *Request)
-
-// ServeHTTP calls f(w, r).
-func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
- f(w, r)
-}
-
-// Helper handlers
-
-// Error replies to the request with the specified error message and HTTP code.
-// It does not otherwise end the request; the caller should ensure no further
-// writes are done to w.
-// The error message should be plain text.
-func Error(w ResponseWriter, error string, code int) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.Header().Set("X-Content-Type-Options", "nosniff")
- w.WriteHeader(code)
- fmt.Fprintln(w, error)
-}
-
-// NotFound replies to the request with an HTTP 404 not found error.
-func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
-
-// NotFoundHandler returns a simple request handler
-// that replies to each request with a ``404 page not found'' reply.
-func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
-
-// StripPrefix returns a handler that serves HTTP requests by removing the
-// given prefix from the request URL's Path (and RawPath if set) and invoking
-// the handler h. StripPrefix handles a request for a path that doesn't begin
-// with prefix by replying with an HTTP 404 not found error. The prefix must
-// match exactly: if the prefix in the request contains escaped characters
-// the reply is also an HTTP 404 not found error.
-func StripPrefix(prefix string, h Handler) Handler {
- if prefix == "" {
- return h
- }
- return HandlerFunc(func(w ResponseWriter, r *Request) {
- p := strings.TrimPrefix(r.URL.Path, prefix)
- rp := strings.TrimPrefix(r.URL.RawPath, prefix)
- if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) {
- r2 := new(Request)
- *r2 = *r
- r2.URL = new(url.URL)
- *r2.URL = *r.URL
- r2.URL.Path = p
- r2.URL.RawPath = rp
- h.ServeHTTP(w, r2)
- } else {
- NotFound(w, r)
- }
- })
-}
-
-// Redirect replies to the request with a redirect to url,
-// which may be a path relative to the request path.
-//
-// The provided code should be in the 3xx range and is usually
-// StatusMovedPermanently, StatusFound or StatusSeeOther.
-//
-// If the Content-Type header has not been set, Redirect sets it
-// to "text/html; charset=utf-8" and writes a small HTML body.
-// Setting the Content-Type header to any value, including nil,
-// disables that behavior.
-func Redirect(w ResponseWriter, r *Request, url string, code int) {
- if u, err := urlpkg.Parse(url); err == nil {
- // If url was relative, make its path absolute by
- // combining with request path.
- // The client would probably do this for us,
- // but doing it ourselves is more reliable.
- // See RFC 7231, section 7.1.2
- if u.Scheme == "" && u.Host == "" {
- oldpath := r.URL.Path
- if oldpath == "" { // should not happen, but avoid a crash if it does
- oldpath = "/"
- }
-
- // no leading http://server
- if url == "" || url[0] != '/' {
- // make relative path absolute
- olddir, _ := path.Split(oldpath)
- url = olddir + url
- }
-
- var query string
- if i := strings.Index(url, "?"); i != -1 {
- url, query = url[:i], url[i:]
- }
-
- // clean up but preserve trailing slash
- trailing := strings.HasSuffix(url, "/")
- url = path.Clean(url)
- if trailing && !strings.HasSuffix(url, "/") {
- url += "/"
- }
- url += query
- }
- }
-
- h := w.Header()
-
- // RFC 7231 notes that a short HTML body is usually included in
- // the response because older user agents may not understand 301/307.
- // Do it only if the request didn't already have a Content-Type header.
- _, hadCT := h["Content-Type"]
-
- h.Set("Location", hexEscapeNonASCII(url))
- if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
- h.Set("Content-Type", "text/html; charset=utf-8")
- }
- w.WriteHeader(code)
-
- // Shouldn't send the body for POST or HEAD; that leaves GET.
- if !hadCT && r.Method == "GET" {
- body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n"
- fmt.Fprintln(w, body)
- }
-}
-
-var htmlReplacer = strings.NewReplacer(
- "&", "&amp;",
- "<", "&lt;",
- ">", "&gt;",
- // "&#34;" is shorter than "&quot;".
- `"`, "&#34;",
- // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
- "'", "&#39;",
-)
-
-func htmlEscape(s string) string {
- return htmlReplacer.Replace(s)
-}
-
-// Redirect to a fixed URL
-type redirectHandler struct {
- url string
- code int
-}
-
-func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
- Redirect(w, r, rh.url, rh.code)
-}
-
-// RedirectHandler returns a request handler that redirects
-// each request it receives to the given url using the given
-// status code.
-//
-// The provided code should be in the 3xx range and is usually
-// StatusMovedPermanently, StatusFound or StatusSeeOther.
-func RedirectHandler(url string, code int) Handler {
- return &redirectHandler{url, code}
-}
-
-// ServeMux is an HTTP request multiplexer.
-// It matches the URL of each incoming request against a list of registered
-// patterns and calls the handler for the pattern that
-// most closely matches the URL.
-//
-// Patterns name fixed, rooted paths, like "/favicon.ico",
-// or rooted subtrees, like "/images/" (note the trailing slash).
-// Longer patterns take precedence over shorter ones, so that
-// if there are handlers registered for both "/images/"
-// and "/images/thumbnails/", the latter handler will be
-// called for paths beginning "/images/thumbnails/" and the
-// former will receive requests for any other paths in the
-// "/images/" subtree.
-//
-// Note that since a pattern ending in a slash names a rooted subtree,
-// the pattern "/" matches all paths not matched by other registered
-// patterns, not just the URL with Path == "/".
-//
-// If a subtree has been registered and a request is received naming the
-// subtree root without its trailing slash, ServeMux redirects that
-// request to the subtree root (adding the trailing slash). This behavior can
-// be overridden with a separate registration for the path without
-// the trailing slash. For example, registering "/images/" causes ServeMux
-// to redirect a request for "/images" to "/images/", unless "/images" has
-// been registered separately.
-//
-// Patterns may optionally begin with a host name, restricting matches to
-// URLs on that host only. Host-specific patterns take precedence over
-// general patterns, so that a handler might register for the two patterns
-// "/codesearch" and "codesearch.google.com/" without also taking over
-// requests for "http://www.google.com/".
-//
-// ServeMux also takes care of sanitizing the URL request path and the Host
-// header, stripping the port number and redirecting any request containing . or
-// .. elements or repeated slashes to an equivalent, cleaner URL.
-type ServeMux struct {
- mu sync.RWMutex
- m map[string]muxEntry
- es []muxEntry // slice of entries sorted from longest to shortest.
- hosts bool // whether any patterns contain hostnames
-}
-
-type muxEntry struct {
- h Handler
- pattern string
-}
-
-// NewServeMux allocates and returns a new ServeMux.
-func NewServeMux() *ServeMux { return new(ServeMux) }
-
-// DefaultServeMux is the default ServeMux used by Serve.
-var DefaultServeMux = &defaultServeMux
-
-var defaultServeMux ServeMux
-
-// cleanPath returns the canonical path for p, eliminating . and .. elements.
-func cleanPath(p string) string {
- if p == "" {
- return "/"
- }
- if p[0] != '/' {
- p = "/" + p
- }
- np := path.Clean(p)
- // path.Clean removes trailing slash except for root;
- // put the trailing slash back if necessary.
- if p[len(p)-1] == '/' && np != "/" {
- // Fast path for common case of p being the string we want:
- if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
- np = p
- } else {
- np += "/"
- }
- }
- return np
-}
-
-// stripHostPort returns h without any trailing ":<port>".
-func stripHostPort(h string) string {
- // If no port on host, return unchanged
- if !strings.Contains(h, ":") {
- return h
- }
- host, _, err := net.SplitHostPort(h)
- if err != nil {
- return h // on error, return unchanged
- }
- return host
-}
-
-// Find a handler on a handler map given a path string.
-// Most-specific (longest) pattern wins.
-func (mux *ServeMux) match(path string) (h Handler, pattern string) {
- // Check for exact match first.
- v, ok := mux.m[path]
- if ok {
- return v.h, v.pattern
- }
-
- // Check for longest valid match. mux.es contains all patterns
- // that end in / sorted from longest to shortest.
- for _, e := range mux.es {
- if strings.HasPrefix(path, e.pattern) {
- return e.h, e.pattern
- }
- }
- return nil, ""
-}
-
-// redirectToPathSlash determines if the given path needs appending "/" to it.
-// This occurs when a handler for path + "/" was already registered, but
-// not for path itself. If the path needs appending to, it creates a new
-// URL, setting the path to u.Path + "/" and returning true to indicate so.
-func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
- mux.mu.RLock()
- shouldRedirect := mux.shouldRedirectRLocked(host, path)
- mux.mu.RUnlock()
- if !shouldRedirect {
- return u, false
- }
- path = path + "/"
- u = &url.URL{Path: path, RawQuery: u.RawQuery}
- return u, true
-}
-
-// shouldRedirectRLocked reports whether the given path and host should be redirected to
-// path+"/". This should happen if a handler is registered for path+"/" but
-// not path -- see comments at ServeMux.
-func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
- p := []string{path, host + path}
-
- for _, c := range p {
- if _, exist := mux.m[c]; exist {
- return false
- }
- }
-
- n := len(path)
- if n == 0 {
- return false
- }
- for _, c := range p {
- if _, exist := mux.m[c+"/"]; exist {
- return path[n-1] != '/'
- }
- }
-
- return false
-}
-
-// Handler returns the handler to use for the given request,
-// consulting r.Method, r.Host, and r.URL.Path. It always returns
-// a non-nil handler. If the path is not in its canonical form, the
-// handler will be an internally-generated handler that redirects
-// to the canonical path. If the host contains a port, it is ignored
-// when matching handlers.
-//
-// The path and host are used unchanged for CONNECT requests.
-//
-// Handler also returns the registered pattern that matches the
-// request or, in the case of internally-generated redirects,
-// the pattern that will match after following the redirect.
-//
-// If there is no registered handler that applies to the request,
-// Handler returns a ``page not found'' handler and an empty pattern.
-func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
-
- // CONNECT requests are not canonicalized.
- if r.Method == "CONNECT" {
- // If r.URL.Path is /tree and its handler is not registered,
- // the /tree -> /tree/ redirect applies to CONNECT requests
- // but the path canonicalization does not.
- if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
- return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
- }
-
- return mux.handler(r.Host, r.URL.Path)
- }
-
- // All other requests have any port stripped and path cleaned
- // before passing to mux.handler.
- host := stripHostPort(r.Host)
- path := cleanPath(r.URL.Path)
-
- // If the given path is /tree and its handler is not registered,
- // redirect for /tree/.
- if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
- return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
- }
-
- if path != r.URL.Path {
- _, pattern = mux.handler(host, path)
- u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
- return RedirectHandler(u.String(), StatusMovedPermanently), pattern
- }
-
- return mux.handler(host, r.URL.Path)
-}
-
-// handler is the main implementation of Handler.
-// The path is known to be in canonical form, except for CONNECT methods.
-func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
- mux.mu.RLock()
- defer mux.mu.RUnlock()
-
- // Host-specific pattern takes precedence over generic ones
- if mux.hosts {
- h, pattern = mux.match(host + path)
- }
- if h == nil {
- h, pattern = mux.match(path)
- }
- if h == nil {
- h, pattern = NotFoundHandler(), ""
- }
- return
-}
-
-// ServeHTTP dispatches the request to the handler whose
-// pattern most closely matches the request URL.
-func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
- if r.RequestURI == "*" {
- if r.ProtoAtLeast(1, 1) {
- w.Header().Set("Connection", "close")
- }
- w.WriteHeader(StatusBadRequest)
- return
- }
- h, _ := mux.Handler(r)
- h.ServeHTTP(w, r)
-}
-
-// Handle registers the handler for the given pattern.
-// If a handler already exists for pattern, Handle panics.
-func (mux *ServeMux) Handle(pattern string, handler Handler) {
- mux.mu.Lock()
- defer mux.mu.Unlock()
-
- if pattern == "" {
- panic("http: invalid pattern")
- }
- if handler == nil {
- panic("http: nil handler")
- }
- if _, exist := mux.m[pattern]; exist {
- panic("http: multiple registrations for " + pattern)
- }
-
- if mux.m == nil {
- mux.m = make(map[string]muxEntry)
- }
- e := muxEntry{h: handler, pattern: pattern}
- mux.m[pattern] = e
- if pattern[len(pattern)-1] == '/' {
- mux.es = appendSorted(mux.es, e)
- }
-
- if pattern[0] != '/' {
- mux.hosts = true
- }
-}
-
-func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
- n := len(es)
- i := sort.Search(n, func(i int) bool {
- return len(es[i].pattern) < len(e.pattern)
- })
- if i == n {
- return append(es, e)
- }
- // we now know that i points at where we want to insert
- es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
- copy(es[i+1:], es[i:]) // Move shorter entries down
- es[i] = e
- return es
-}
-
-// HandleFunc registers the handler function for the given pattern.
-func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
- if handler == nil {
- panic("http: nil handler")
- }
- mux.Handle(pattern, HandlerFunc(handler))
-}
-
-// Handle registers the handler for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
-
-// HandleFunc registers the handler function for the given pattern
-// in the DefaultServeMux.
-// The documentation for ServeMux explains how patterns are matched.
-func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
- DefaultServeMux.HandleFunc(pattern, handler)
-}
-
-// Serve accepts incoming HTTP connections on the listener l,
-// creating a new service goroutine for each. The service goroutines
-// read requests and then call handler to reply to them.
-//
-// The handler is typically nil, in which case the DefaultServeMux is used.
-//
-// HTTP/2 support is only enabled if the Listener returns *tls.Conn
-// connections and they were configured with "h2" in the TLS
-// Config.NextProtos.
-//
-// Serve always returns a non-nil error.
-func Serve(l net.Listener, handler Handler) error {
- srv := &Server{Handler: handler}
- return srv.Serve(l)
-}
-
-// ServeTLS accepts incoming HTTPS connections on the listener l,
-// creating a new service goroutine for each. The service goroutines
-// read requests and then call handler to reply to them.
-//
-// The handler is typically nil, in which case the DefaultServeMux is used.
-//
-// Additionally, files containing a certificate and matching private key
-// for the server must be provided. If the certificate is signed by a
-// certificate authority, the certFile should be the concatenation
-// of the server's certificate, any intermediates, and the CA's certificate.
-//
-// ServeTLS always returns a non-nil error.
-func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
- srv := &Server{Handler: handler}
- return srv.ServeTLS(l, certFile, keyFile)
-}
-
-// A Server defines parameters for running an HTTP server.
-// The zero value for Server is a valid configuration.
-type Server struct {
- // Addr optionally specifies the TCP address for the server to listen on,
- // in the form "host:port". If empty, ":http" (port 80) is used.
- // The service names are defined in RFC 6335 and assigned by IANA.
- // See net.Dial for details of the address format.
- Addr string
-
- Handler Handler // handler to invoke, http.DefaultServeMux if nil
-
- // TLSConfig optionally provides a TLS configuration for use
- // by ServeTLS and ListenAndServeTLS. Note that this value is
- // cloned by ServeTLS and ListenAndServeTLS, so it's not
- // possible to modify the configuration with methods like
- // tls.Config.SetSessionTicketKeys. To use
- // SetSessionTicketKeys, use Server.Serve with a TLS Listener
- // instead.
- TLSConfig *tls.Config
-
- // ReadTimeout is the maximum duration for reading the entire
- // request, including the body. A zero or negative value means
- // there will be no timeout.
- //
- // Because ReadTimeout does not let Handlers make per-request
- // decisions on each request body's acceptable deadline or
- // upload rate, most users will prefer to use
- // ReadHeaderTimeout. It is valid to use them both.
- ReadTimeout time.Duration
-
- // ReadHeaderTimeout is the amount of time allowed to read
- // request headers. The connection's read deadline is reset
- // after reading the headers and the Handler can decide what
- // is considered too slow for the body. If ReadHeaderTimeout
- // is zero, the value of ReadTimeout is used. If both are
- // zero, there is no timeout.
- ReadHeaderTimeout time.Duration
-
- // WriteTimeout is the maximum duration before timing out
- // writes of the response. It is reset whenever a new
- // request's header is read. Like ReadTimeout, it does not
- // let Handlers make decisions on a per-request basis.
- // A zero or negative value means there will be no timeout.
- WriteTimeout time.Duration
-
- // IdleTimeout is the maximum amount of time to wait for the
- // next request when keep-alives are enabled. If IdleTimeout
- // is zero, the value of ReadTimeout is used. If both are
- // zero, there is no timeout.
- IdleTimeout time.Duration
-
- // MaxHeaderBytes controls the maximum number of bytes the
- // server will read parsing the request header's keys and
- // values, including the request line. It does not limit the
- // size of the request body.
- // If zero, DefaultMaxHeaderBytes is used.
- MaxHeaderBytes int
-
- // TLSNextProto optionally specifies a function to take over
- // ownership of the provided TLS connection when an ALPN
- // protocol upgrade has occurred. The map key is the protocol
- // name negotiated. The Handler argument should be used to
- // handle HTTP requests and will initialize the Request's TLS
- // and RemoteAddr if not already set. The connection is
- // automatically closed when the function returns.
- // If TLSNextProto is not nil, HTTP/2 support is not enabled
- // automatically.
- TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
-
- // ConnState specifies an optional callback function that is
- // called when a client connection changes state. See the
- // ConnState type and associated constants for details.
- ConnState func(net.Conn, ConnState)
-
- // ErrorLog specifies an optional logger for errors accepting
- // connections, unexpected behavior from handlers, and
- // underlying FileSystem errors.
- // If nil, logging is done via the log package's standard logger.
- ErrorLog *log.Logger
-
- // BaseContext optionally specifies a function that returns
- // the base context for incoming requests on this server.
- // The provided Listener is the specific Listener that's
- // about to start accepting requests.
- // If BaseContext is nil, the default is context.Background().
- // If non-nil, it must return a non-nil context.
- BaseContext func(net.Listener) context.Context
-
- // ConnContext optionally specifies a function that modifies
- // the context used for a new connection c. The provided ctx
- // is derived from the base context and has a ServerContextKey
- // value.
- ConnContext func(ctx context.Context, c net.Conn) context.Context
-
- inShutdown atomicBool // true when server is in shutdown
-
- disableKeepAlives int32 // accessed atomically.
- nextProtoOnce sync.Once // guards setupHTTP2_* init
- nextProtoErr error // result of http2.ConfigureServer if used
-
- mu sync.Mutex
- listeners map[*net.Listener]struct{}
- activeConn map[*conn]struct{}
- doneChan chan struct{}
- onShutdown []func()
-}
-
-func (s *Server) getDoneChan() <-chan struct{} {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.getDoneChanLocked()
-}
-
-func (s *Server) getDoneChanLocked() chan struct{} {
- if s.doneChan == nil {
- s.doneChan = make(chan struct{})
- }
- return s.doneChan
-}
-
-func (s *Server) closeDoneChanLocked() {
- ch := s.getDoneChanLocked()
- select {
- case <-ch:
- // Already closed. Don't close again.
- default:
- // Safe to close here. We're the only closer, guarded
- // by s.mu.
- close(ch)
- }
-}
-
-// Close immediately closes all active net.Listeners and any
-// connections in state StateNew, StateActive, or StateIdle. For a
-// graceful shutdown, use Shutdown.
-//
-// Close does not attempt to close (and does not even know about)
-// any hijacked connections, such as WebSockets.
-//
-// Close returns any error returned from closing the Server's
-// underlying Listener(s).
-func (srv *Server) Close() error {
- srv.inShutdown.setTrue()
- srv.mu.Lock()
- defer srv.mu.Unlock()
- srv.closeDoneChanLocked()
- err := srv.closeListenersLocked()
- for c := range srv.activeConn {
- c.rwc.Close()
- delete(srv.activeConn, c)
- }
- return err
-}
-
-// shutdownPollIntervalMax is the max polling interval when checking
-// quiescence during Server.Shutdown. Polling starts with a small
-// interval and backs off to the max.
-// Ideally we could find a solution that doesn't involve polling,
-// but which also doesn't have a high runtime cost (and doesn't
-// involve any contentious mutexes), but that is left as an
-// exercise for the reader.
-const shutdownPollIntervalMax = 500 * time.Millisecond
-
-// Shutdown gracefully shuts down the server without interrupting any
-// active connections. Shutdown works by first closing all open
-// listeners, then closing all idle connections, and then waiting
-// indefinitely for connections to return to idle and then shut down.
-// If the provided context expires before the shutdown is complete,
-// Shutdown returns the context's error, otherwise it returns any
-// error returned from closing the Server's underlying Listener(s).
-//
-// When Shutdown is called, Serve, ListenAndServe, and
-// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
-// program doesn't exit and waits instead for Shutdown to return.
-//
-// Shutdown does not attempt to close nor wait for hijacked
-// connections such as WebSockets. The caller of Shutdown should
-// separately notify such long-lived connections of shutdown and wait
-// for them to close, if desired. See RegisterOnShutdown for a way to
-// register shutdown notification functions.
-//
-// Once Shutdown has been called on a server, it may not be reused;
-// future calls to methods such as Serve will return ErrServerClosed.
-func (srv *Server) Shutdown(ctx context.Context) error {
- srv.inShutdown.setTrue()
-
- srv.mu.Lock()
- lnerr := srv.closeListenersLocked()
- srv.closeDoneChanLocked()
- for _, f := range srv.onShutdown {
- go f()
- }
- srv.mu.Unlock()
-
- pollIntervalBase := time.Millisecond
- nextPollInterval := func() time.Duration {
- // Add 10% jitter.
- interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10)))
- // Double and clamp for next time.
- pollIntervalBase *= 2
- if pollIntervalBase > shutdownPollIntervalMax {
- pollIntervalBase = shutdownPollIntervalMax
- }
- return interval
- }
-
- timer := time.NewTimer(nextPollInterval())
- defer timer.Stop()
- for {
- if srv.closeIdleConns() && srv.numListeners() == 0 {
- return lnerr
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-timer.C:
- timer.Reset(nextPollInterval())
- }
- }
-}
-
-// RegisterOnShutdown registers a function to call on Shutdown.
-// This can be used to gracefully shutdown connections that have
-// undergone ALPN protocol upgrade or that have been hijacked.
-// This function should start protocol-specific graceful shutdown,
-// but should not wait for shutdown to complete.
-func (srv *Server) RegisterOnShutdown(f func()) {
- srv.mu.Lock()
- srv.onShutdown = append(srv.onShutdown, f)
- srv.mu.Unlock()
-}
-
-func (s *Server) numListeners() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return len(s.listeners)
-}
-
-// closeIdleConns closes all idle connections and reports whether the
-// server is quiescent.
-func (s *Server) closeIdleConns() bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- quiescent := true
- for c := range s.activeConn {
- st, unixSec := c.getState()
- // Issue 22682: treat StateNew connections as if
- // they're idle if we haven't read the first request's
- // header in over 5 seconds.
- if st == StateNew && unixSec < time.Now().Unix()-5 {
- st = StateIdle
- }
- if st != StateIdle || unixSec == 0 {
- // Assume unixSec == 0 means it's a very new
- // connection, without state set yet.
- quiescent = false
- continue
- }
- c.rwc.Close()
- delete(s.activeConn, c)
- }
- return quiescent
-}
-
-func (s *Server) closeListenersLocked() error {
- var err error
- for ln := range s.listeners {
- if cerr := (*ln).Close(); cerr != nil && err == nil {
- err = cerr
- }
- }
- return err
-}
-
-// A ConnState represents the state of a client connection to a server.
-// It's used by the optional Server.ConnState hook.
-type ConnState int
-
-const (
- // StateNew represents a new connection that is expected to
- // send a request immediately. Connections begin at this
- // state and then transition to either StateActive or
- // StateClosed.
- StateNew ConnState = iota
-
- // StateActive represents a connection that has read 1 or more
- // bytes of a request. The Server.ConnState hook for
- // StateActive fires before the request has entered a handler
- // and doesn't fire again until the request has been
- // handled. After the request is handled, the state
- // transitions to StateClosed, StateHijacked, or StateIdle.
- // For HTTP/2, StateActive fires on the transition from zero
- // to one active request, and only transitions away once all
- // active requests are complete. That means that ConnState
- // cannot be used to do per-request work; ConnState only notes
- // the overall state of the connection.
- StateActive
-
- // StateIdle represents a connection that has finished
- // handling a request and is in the keep-alive state, waiting
- // for a new request. Connections transition from StateIdle
- // to either StateActive or StateClosed.
- StateIdle
-
- // StateHijacked represents a hijacked connection.
- // This is a terminal state. It does not transition to StateClosed.
- StateHijacked
-
- // StateClosed represents a closed connection.
- // This is a terminal state. Hijacked connections do not
- // transition to StateClosed.
- StateClosed
-)
-
-var stateName = map[ConnState]string{
- StateNew: "new",
- StateActive: "active",
- StateIdle: "idle",
- StateHijacked: "hijacked",
- StateClosed: "closed",
-}
-
-func (c ConnState) String() string {
- return stateName[c]
-}
-
-// serverHandler delegates to either the server's Handler or
-// DefaultServeMux and also handles "OPTIONS *" requests.
-type serverHandler struct {
- srv *Server
-}
-
-func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
- handler := sh.srv.Handler
- if handler == nil {
- handler = DefaultServeMux
- }
- if req.RequestURI == "*" && req.Method == "OPTIONS" {
- handler = globalOptionsHandler{}
- }
-
- if req.URL != nil && strings.Contains(req.URL.RawQuery, ";") {
- var allowQuerySemicolonsInUse int32
- req = req.WithContext(context.WithValue(req.Context(), silenceSemWarnContextKey, func() {
- atomic.StoreInt32(&allowQuerySemicolonsInUse, 1)
- }))
- defer func() {
- if atomic.LoadInt32(&allowQuerySemicolonsInUse) == 0 {
- sh.srv.logf("http: URL query contains semicolon, which is no longer a supported separator; parts of the query may be stripped when parsed; see golang.org/issue/25192")
- }
- }()
- }
-
- handler.ServeHTTP(rw, req)
-}
-
-var silenceSemWarnContextKey = &contextKey{"silence-semicolons"}
-
-// AllowQuerySemicolons returns a handler that serves requests by converting any
-// unescaped semicolons in the URL query to ampersands, and invoking the handler h.
-//
-// This restores the pre-Go 1.17 behavior of splitting query parameters on both
-// semicolons and ampersands. (See golang.org/issue/25192). Note that this
-// behavior doesn't match that of many proxies, and the mismatch can lead to
-// security issues.
-//
-// AllowQuerySemicolons should be invoked before Request.ParseForm is called.
-func AllowQuerySemicolons(h Handler) Handler {
- return HandlerFunc(func(w ResponseWriter, r *Request) {
- if silenceSemicolonsWarning, ok := r.Context().Value(silenceSemWarnContextKey).(func()); ok {
- silenceSemicolonsWarning()
- }
- if strings.Contains(r.URL.RawQuery, ";") {
- r2 := new(Request)
- *r2 = *r
- r2.URL = new(url.URL)
- *r2.URL = *r.URL
- r2.URL.RawQuery = strings.ReplaceAll(r.URL.RawQuery, ";", "&")
- h.ServeHTTP(w, r2)
- } else {
- h.ServeHTTP(w, r)
- }
- })
-}
-
-// ListenAndServe listens on the TCP network address srv.Addr and then
-// calls Serve to handle requests on incoming connections.
-// Accepted connections are configured to enable TCP keep-alives.
-//
-// If srv.Addr is blank, ":http" is used.
-//
-// ListenAndServe always returns a non-nil error. After Shutdown or Close,
-// the returned error is ErrServerClosed.
-func (srv *Server) ListenAndServe() error {
- if srv.shuttingDown() {
- return ErrServerClosed
- }
- addr := srv.Addr
- if addr == "" {
- addr = ":http"
- }
- ln, err := net.Listen("tcp", addr)
- if err != nil {
- return err
- }
- return srv.Serve(ln)
-}
-
-var testHookServerServe func(*Server, net.Listener) // used if non-nil
-
-// shouldDoServeHTTP2 reports whether Server.Serve should configure
-// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
-func (srv *Server) shouldConfigureHTTP2ForServe() bool {
- if srv.TLSConfig == nil {
- // Compatibility with Go 1.6:
- // If there's no TLSConfig, it's possible that the user just
- // didn't set it on the http.Server, but did pass it to
- // tls.NewListener and passed that listener to Serve.
- // So we should configure HTTP/2 (to set up srv.TLSNextProto)
- // in case the listener returns an "h2" *tls.Conn.
- return true
- }
- // The user specified a TLSConfig on their http.Server.
- // In this, case, only configure HTTP/2 if their tls.Config
- // explicitly mentions "h2". Otherwise http2.ConfigureServer
- // would modify the tls.Config to add it, but they probably already
- // passed this tls.Config to tls.NewListener. And if they did,
- // it's too late anyway to fix it. It would only be potentially racy.
- // See Issue 15908.
- return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
-}
-
-// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
-// and ListenAndServeTLS methods after a call to Shutdown or Close.
-var ErrServerClosed = errors.New("http: Server closed")
-
-// Serve accepts incoming connections on the Listener l, creating a
-// new service goroutine for each. The service goroutines read requests and
-// then call srv.Handler to reply to them.
-//
-// HTTP/2 support is only enabled if the Listener returns *tls.Conn
-// connections and they were configured with "h2" in the TLS
-// Config.NextProtos.
-//
-// Serve always returns a non-nil error and closes l.
-// After Shutdown or Close, the returned error is ErrServerClosed.
-func (srv *Server) Serve(l net.Listener) error {
- if fn := testHookServerServe; fn != nil {
- fn(srv, l) // call hook with unwrapped listener
- }
-
- origListener := l
- l = &onceCloseListener{Listener: l}
- defer l.Close()
-
- if err := srv.setupHTTP2_Serve(); err != nil {
- return err
- }
-
- if !srv.trackListener(&l, true) {
- return ErrServerClosed
- }
- defer srv.trackListener(&l, false)
-
- baseCtx := context.Background()
- if srv.BaseContext != nil {
- baseCtx = srv.BaseContext(origListener)
- if baseCtx == nil {
- panic("BaseContext returned a nil context")
- }
- }
-
- var tempDelay time.Duration // how long to sleep on accept failure
-
- ctx := context.WithValue(baseCtx, ServerContextKey, srv)
- for {
- rw, err := l.Accept()
- if err != nil {
- select {
- case <-srv.getDoneChan():
- return ErrServerClosed
- default:
- }
- if ne, ok := err.(net.Error); ok && ne.Temporary() {
- if tempDelay == 0 {
- tempDelay = 5 * time.Millisecond
- } else {
- tempDelay *= 2
- }
- if max := 1 * time.Second; tempDelay > max {
- tempDelay = max
- }
- srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
- time.Sleep(tempDelay)
- continue
- }
- return err
- }
- connCtx := ctx
- if cc := srv.ConnContext; cc != nil {
- connCtx = cc(connCtx, rw)
- if connCtx == nil {
- panic("ConnContext returned nil")
- }
- }
- tempDelay = 0
- c := srv.newConn(rw)
- c.setState(c.rwc, StateNew, runHooks) // before Serve can return
- go c.serve(connCtx)
- }
-}
-
-// ServeTLS accepts incoming connections on the Listener l, creating a
-// new service goroutine for each. The service goroutines perform TLS
-// setup and then read requests, calling srv.Handler to reply to them.
-//
-// Files containing a certificate and matching private key for the
-// server must be provided if neither the Server's
-// TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
-// If the certificate is signed by a certificate authority, the
-// certFile should be the concatenation of the server's certificate,
-// any intermediates, and the CA's certificate.
-//
-// ServeTLS always returns a non-nil error. After Shutdown or Close, the
-// returned error is ErrServerClosed.
-func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
- // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
- // before we clone it and create the TLS Listener.
- if err := srv.setupHTTP2_ServeTLS(); err != nil {
- return err
- }
-
- config := cloneTLSConfig(srv.TLSConfig)
- if !strSliceContains(config.NextProtos, "http/1.1") {
- config.NextProtos = append(config.NextProtos, "http/1.1")
- }
-
- configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
- if !configHasCert || certFile != "" || keyFile != "" {
- var err error
- config.Certificates = make([]tls.Certificate, 1)
- config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return err
- }
- }
-
- tlsListener := tls.NewListener(l, config)
- return srv.Serve(tlsListener)
-}
-
-// trackListener adds or removes a net.Listener to the set of tracked
-// listeners.
-//
-// We store a pointer to interface in the map set, in case the
-// net.Listener is not comparable. This is safe because we only call
-// trackListener via Serve and can track+defer untrack the same
-// pointer to local variable there. We never need to compare a
-// Listener from another caller.
-//
-// It reports whether the server is still up (not Shutdown or Closed).
-func (s *Server) trackListener(ln *net.Listener, add bool) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.listeners == nil {
- s.listeners = make(map[*net.Listener]struct{})
- }
- if add {
- if s.shuttingDown() {
- return false
- }
- s.listeners[ln] = struct{}{}
- } else {
- delete(s.listeners, ln)
- }
- return true
-}
-
-func (s *Server) trackConn(c *conn, add bool) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.activeConn == nil {
- s.activeConn = make(map[*conn]struct{})
- }
- if add {
- s.activeConn[c] = struct{}{}
- } else {
- delete(s.activeConn, c)
- }
-}
-
-func (s *Server) idleTimeout() time.Duration {
- if s.IdleTimeout != 0 {
- return s.IdleTimeout
- }
- return s.ReadTimeout
-}
-
-func (s *Server) readHeaderTimeout() time.Duration {
- if s.ReadHeaderTimeout != 0 {
- return s.ReadHeaderTimeout
- }
- return s.ReadTimeout
-}
-
-func (s *Server) doKeepAlives() bool {
- return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown()
-}
-
-func (s *Server) shuttingDown() bool {
- return s.inShutdown.isSet()
-}
-
-// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
-// By default, keep-alives are always enabled. Only very
-// resource-constrained environments or servers in the process of
-// shutting down should disable them.
-func (srv *Server) SetKeepAlivesEnabled(v bool) {
- if v {
- atomic.StoreInt32(&srv.disableKeepAlives, 0)
- return
- }
- atomic.StoreInt32(&srv.disableKeepAlives, 1)
-
- // Close idle HTTP/1 conns:
- srv.closeIdleConns()
-
- // TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
-}
-
-func (s *Server) logf(format string, args ...any) {
- if s.ErrorLog != nil {
- s.ErrorLog.Printf(format, args...)
- } else {
- log.Printf(format, args...)
- }
-}
-
-// logf prints to the ErrorLog of the *Server associated with request r
-// via ServerContextKey. If there's no associated server, or if ErrorLog
-// is nil, logging is done via the log package's standard logger.
-func logf(r *Request, format string, args ...any) {
- s, _ := r.Context().Value(ServerContextKey).(*Server)
- if s != nil && s.ErrorLog != nil {
- s.ErrorLog.Printf(format, args...)
- } else {
- log.Printf(format, args...)
- }
-}
-
-// ListenAndServe listens on the TCP network address addr and then calls
-// Serve with handler to handle requests on incoming connections.
-// Accepted connections are configured to enable TCP keep-alives.
-//
-// The handler is typically nil, in which case the DefaultServeMux is used.
-//
-// ListenAndServe always returns a non-nil error.
-func ListenAndServe(addr string, handler Handler) error {
- server := &Server{Addr: addr, Handler: handler}
- return server.ListenAndServe()
-}
-
-// ListenAndServeTLS acts identically to ListenAndServe, except that it
-// expects HTTPS connections. Additionally, files containing a certificate and
-// matching private key for the server must be provided. If the certificate
-// is signed by a certificate authority, the certFile should be the concatenation
-// of the server's certificate, any intermediates, and the CA's certificate.
-func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
- server := &Server{Addr: addr, Handler: handler}
- return server.ListenAndServeTLS(certFile, keyFile)
-}
-
-// ListenAndServeTLS listens on the TCP network address srv.Addr and
-// then calls ServeTLS to handle requests on incoming TLS connections.
-// Accepted connections are configured to enable TCP keep-alives.
-//
-// Filenames containing a certificate and matching private key for the
-// server must be provided if neither the Server's TLSConfig.Certificates
-// nor TLSConfig.GetCertificate are populated. If the certificate is
-// signed by a certificate authority, the certFile should be the
-// concatenation of the server's certificate, any intermediates, and
-// the CA's certificate.
-//
-// If srv.Addr is blank, ":https" is used.
-//
-// ListenAndServeTLS always returns a non-nil error. After Shutdown or
-// Close, the returned error is ErrServerClosed.
-func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
- if srv.shuttingDown() {
- return ErrServerClosed
- }
- addr := srv.Addr
- if addr == "" {
- addr = ":https"
- }
-
- ln, err := net.Listen("tcp", addr)
- if err != nil {
- return err
- }
-
- defer ln.Close()
-
- return srv.ServeTLS(ln, certFile, keyFile)
-}
-
-// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
-// srv and reports whether there was an error setting it up. If it is
-// not configured for policy reasons, nil is returned.
-func (srv *Server) setupHTTP2_ServeTLS() error {
- srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
- return srv.nextProtoErr
-}
-
-// setupHTTP2_Serve is called from (*Server).Serve and conditionally
-// configures HTTP/2 on srv using a more conservative policy than
-// setupHTTP2_ServeTLS because Serve is called after tls.Listen,
-// and may be called concurrently. See shouldConfigureHTTP2ForServe.
-//
-// The tests named TestTransportAutomaticHTTP2* and
-// TestConcurrentServerServe in server_test.go demonstrate some
-// of the supported use cases and motivations.
-func (srv *Server) setupHTTP2_Serve() error {
- srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
- return srv.nextProtoErr
-}
-
-func (srv *Server) onceSetNextProtoDefaults_Serve() {
- if srv.shouldConfigureHTTP2ForServe() {
- srv.onceSetNextProtoDefaults()
- }
-}
-
-// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
-// configured otherwise. (by setting srv.TLSNextProto non-nil)
-// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
-func (srv *Server) onceSetNextProtoDefaults() {
- if omitBundledHTTP2 || godebug.Get("http2server") == "0" {
- return
- }
- // Enable HTTP/2 by default if the user hasn't otherwise
- // configured their TLSNextProto map.
- if srv.TLSNextProto == nil {
- conf := &http2Server{
- NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
- }
- srv.nextProtoErr = http2ConfigureServer(srv, conf)
- }
-}
-
-// TimeoutHandler returns a Handler that runs h with the given time limit.
-//
-// The new Handler calls h.ServeHTTP to handle each request, but if a
-// call runs for longer than its time limit, the handler responds with
-// a 503 Service Unavailable error and the given message in its body.
-// (If msg is empty, a suitable default message will be sent.)
-// After such a timeout, writes by h to its ResponseWriter will return
-// ErrHandlerTimeout.
-//
-// TimeoutHandler supports the Pusher interface but does not support
-// the Hijacker or Flusher interfaces.
-func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
- return &timeoutHandler{
- handler: h,
- body: msg,
- dt: dt,
- }
-}
-
-// ErrHandlerTimeout is returned on ResponseWriter Write calls
-// in handlers which have timed out.
-var ErrHandlerTimeout = errors.New("http: Handler timeout")
-
-type timeoutHandler struct {
- handler Handler
- body string
- dt time.Duration
-
- // When set, no context will be created and this context will
- // be used instead.
- testContext context.Context
-}
-
-func (h *timeoutHandler) errorBody() string {
- if h.body != "" {
- return h.body
- }
- return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
-}
-
-func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
- ctx := h.testContext
- if ctx == nil {
- var cancelCtx context.CancelFunc
- ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
- defer cancelCtx()
- }
- r = r.WithContext(ctx)
- done := make(chan struct{})
- tw := &timeoutWriter{
- w: w,
- h: make(Header),
- req: r,
- }
- panicChan := make(chan any, 1)
- go func() {
- defer func() {
- if p := recover(); p != nil {
- panicChan <- p
- }
- }()
- h.handler.ServeHTTP(tw, r)
- close(done)
- }()
- select {
- case p := <-panicChan:
- panic(p)
- case <-done:
- tw.mu.Lock()
- defer tw.mu.Unlock()
- dst := w.Header()
- for k, vv := range tw.h {
- dst[k] = vv
- }
- if !tw.wroteHeader {
- tw.code = StatusOK
- }
- w.WriteHeader(tw.code)
- w.Write(tw.wbuf.Bytes())
- case <-ctx.Done():
- tw.mu.Lock()
- defer tw.mu.Unlock()
- switch err := ctx.Err(); err {
- case context.DeadlineExceeded:
- w.WriteHeader(StatusServiceUnavailable)
- io.WriteString(w, h.errorBody())
- tw.err = ErrHandlerTimeout
- default:
- w.WriteHeader(StatusServiceUnavailable)
- tw.err = err
- }
- }
-}
-
-type timeoutWriter struct {
- w ResponseWriter
- h Header
- wbuf bytes.Buffer
- req *Request
-
- mu sync.Mutex
- err error
- wroteHeader bool
- code int
-}
-
-var _ Pusher = (*timeoutWriter)(nil)
-
-// Push implements the Pusher interface.
-func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
- if pusher, ok := tw.w.(Pusher); ok {
- return pusher.Push(target, opts)
- }
- return ErrNotSupported
-}
-
-func (tw *timeoutWriter) Header() Header { return tw.h }
-
-func (tw *timeoutWriter) Write(p []byte) (int, error) {
- tw.mu.Lock()
- defer tw.mu.Unlock()
- if tw.err != nil {
- return 0, tw.err
- }
- if !tw.wroteHeader {
- tw.writeHeaderLocked(StatusOK)
- }
- return tw.wbuf.Write(p)
-}
-
-func (tw *timeoutWriter) writeHeaderLocked(code int) {
- checkWriteHeaderCode(code)
-
- switch {
- case tw.err != nil:
- return
- case tw.wroteHeader:
- if tw.req != nil {
- caller := relevantCaller()
- logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
- }
- default:
- tw.wroteHeader = true
- tw.code = code
- }
-}
-
-func (tw *timeoutWriter) WriteHeader(code int) {
- tw.mu.Lock()
- defer tw.mu.Unlock()
- tw.writeHeaderLocked(code)
-}
-
-// onceCloseListener wraps a net.Listener, protecting it from
-// multiple Close calls.
-type onceCloseListener struct {
- net.Listener
- once sync.Once
- closeErr error
-}
-
-func (oc *onceCloseListener) Close() error {
- oc.once.Do(oc.close)
- return oc.closeErr
-}
-
-func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
-
-// globalOptionsHandler responds to "OPTIONS *" requests.
-type globalOptionsHandler struct{}
-
-func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
- w.Header().Set("Content-Length", "0")
- if r.ContentLength != 0 {
- // Read up to 4KB of OPTIONS body (as mentioned in the
- // spec as being reserved for future use), but anything
- // over that is considered a waste of server resources
- // (or an attack) and we abort and close the connection,
- // courtesy of MaxBytesReader's EOF behavior.
- mb := MaxBytesReader(w, r.Body, 4<<10)
- io.Copy(io.Discard, mb)
- }
-}
-
-// initALPNRequest is an HTTP handler that initializes certain
-// uninitialized fields in its *Request. Such partially-initialized
-// Requests come from ALPN protocol handlers.
-type initALPNRequest struct {
- ctx context.Context
- c *tls.Conn
- h serverHandler
-}
-
-// BaseContext is an exported but unadvertised http.Handler method
-// recognized by x/net/http2 to pass down a context; the TLSNextProto
-// API predates context support so we shoehorn through the only
-// interface we have available.
-func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
-
-func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
- if req.TLS == nil {
- req.TLS = &tls.ConnectionState{}
- *req.TLS = h.c.ConnectionState()
- }
- if req.Body == nil {
- req.Body = NoBody
- }
- if req.RemoteAddr == "" {
- req.RemoteAddr = h.c.RemoteAddr().String()
- }
- h.h.ServeHTTP(rw, req)
-}
-
-// loggingConn is used for debugging.
-type loggingConn struct {
- name string
- net.Conn
-}
-
-var (
- uniqNameMu sync.Mutex
- uniqNameNext = make(map[string]int)
-)
-
-func newLoggingConn(baseName string, c net.Conn) net.Conn {
- uniqNameMu.Lock()
- defer uniqNameMu.Unlock()
- uniqNameNext[baseName]++
- return &loggingConn{
- name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
- Conn: c,
- }
-}
-
-func (c *loggingConn) Write(p []byte) (n int, err error) {
- log.Printf("%s.Write(%d) = ....", c.name, len(p))
- n, err = c.Conn.Write(p)
- log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
- return
-}
-
-func (c *loggingConn) Read(p []byte) (n int, err error) {
- log.Printf("%s.Read(%d) = ....", c.name, len(p))
- n, err = c.Conn.Read(p)
- log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
- return
-}
-
-func (c *loggingConn) Close() (err error) {
- log.Printf("%s.Close() = ...", c.name)
- err = c.Conn.Close()
- log.Printf("%s.Close() = %v", c.name, err)
- return
-}
-
-// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
-// It only contains one field (and a pointer field at that), so it
-// fits in an interface value without an extra allocation.
-type checkConnErrorWriter struct {
- c *conn
-}
-
-func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
- n, err = w.c.rwc.Write(p)
- if err != nil && w.c.werr == nil {
- w.c.werr = err
- w.c.cancelCtx()
- }
- return
-}
-
-func numLeadingCRorLF(v []byte) (n int) {
- for _, b := range v {
- if b == '\r' || b == '\n' {
- n++
- continue
- }
- break
- }
- return
-
-}
-
-func strSliceContains(ss []string, s string) bool {
- for _, v := range ss {
- if v == s {
- return true
- }
- }
- return false
-}
-
-// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
-// looks like it might've been a misdirected plaintext HTTP request.
-func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
- switch string(hdr[:]) {
- case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
- return true
- }
- return false
-}
-
-// MaxBytesHandler returns a Handler that runs h with its ResponseWriter and Request.Body wrapped by a MaxBytesReader.
-func MaxBytesHandler(h Handler, n int64) Handler {
- return HandlerFunc(func(w ResponseWriter, r *Request) {
- r2 := *r
- r2.Body = MaxBytesReader(w, r.Body, n)
- h.ServeHTTP(w, &r2)
- })
-}
diff --git a/contrib/go/_std_1.18/src/net/http/sniff.go b/contrib/go/_std_1.18/src/net/http/sniff.go
deleted file mode 100644
index 67a7151b0c..0000000000
--- a/contrib/go/_std_1.18/src/net/http/sniff.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bytes"
- "encoding/binary"
-)
-
-// The algorithm uses at most sniffLen bytes to make its decision.
-const sniffLen = 512
-
-// DetectContentType implements the algorithm described
-// at https://mimesniff.spec.whatwg.org/ to determine the
-// Content-Type of the given data. It considers at most the
-// first 512 bytes of data. DetectContentType always returns
-// a valid MIME type: if it cannot determine a more specific one, it
-// returns "application/octet-stream".
-func DetectContentType(data []byte) string {
- if len(data) > sniffLen {
- data = data[:sniffLen]
- }
-
- // Index of the first non-whitespace byte in data.
- firstNonWS := 0
- for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ {
- }
-
- for _, sig := range sniffSignatures {
- if ct := sig.match(data, firstNonWS); ct != "" {
- return ct
- }
- }
-
- return "application/octet-stream" // fallback
-}
-
-// isWS reports whether the provided byte is a whitespace byte (0xWS)
-// as defined in https://mimesniff.spec.whatwg.org/#terminology.
-func isWS(b byte) bool {
- switch b {
- case '\t', '\n', '\x0c', '\r', ' ':
- return true
- }
- return false
-}
-
-// isTT reports whether the provided byte is a tag-terminating byte (0xTT)
-// as defined in https://mimesniff.spec.whatwg.org/#terminology.
-func isTT(b byte) bool {
- switch b {
- case ' ', '>':
- return true
- }
- return false
-}
-
-type sniffSig interface {
- // match returns the MIME type of the data, or "" if unknown.
- match(data []byte, firstNonWS int) string
-}
-
-// Data matching the table in section 6.
-var sniffSignatures = []sniffSig{
- htmlSig("<!DOCTYPE HTML"),
- htmlSig("<HTML"),
- htmlSig("<HEAD"),
- htmlSig("<SCRIPT"),
- htmlSig("<IFRAME"),
- htmlSig("<H1"),
- htmlSig("<DIV"),
- htmlSig("<FONT"),
- htmlSig("<TABLE"),
- htmlSig("<A"),
- htmlSig("<STYLE"),
- htmlSig("<TITLE"),
- htmlSig("<B"),
- htmlSig("<BODY"),
- htmlSig("<BR"),
- htmlSig("<P"),
- htmlSig("<!--"),
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
- pat: []byte("<?xml"),
- skipWS: true,
- ct: "text/xml; charset=utf-8"},
- &exactSig{[]byte("%PDF-"), "application/pdf"},
- &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"},
-
- // UTF BOMs.
- &maskedSig{
- mask: []byte("\xFF\xFF\x00\x00"),
- pat: []byte("\xFE\xFF\x00\x00"),
- ct: "text/plain; charset=utf-16be",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\x00\x00"),
- pat: []byte("\xFF\xFE\x00\x00"),
- ct: "text/plain; charset=utf-16le",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\x00"),
- pat: []byte("\xEF\xBB\xBF\x00"),
- ct: "text/plain; charset=utf-8",
- },
-
- // Image types
- // For posterity, we originally returned "image/vnd.microsoft.icon" from
- // https://tools.ietf.org/html/draft-ietf-websec-mime-sniff-03#section-7
- // https://codereview.appspot.com/4746042
- // but that has since been replaced with "image/x-icon" in Section 6.2
- // of https://mimesniff.spec.whatwg.org/#matching-an-image-type-pattern
- &exactSig{[]byte("\x00\x00\x01\x00"), "image/x-icon"},
- &exactSig{[]byte("\x00\x00\x02\x00"), "image/x-icon"},
- &exactSig{[]byte("BM"), "image/bmp"},
- &exactSig{[]byte("GIF87a"), "image/gif"},
- &exactSig{[]byte("GIF89a"), "image/gif"},
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"),
- ct: "image/webp",
- },
- &exactSig{[]byte("\x89PNG\x0D\x0A\x1A\x0A"), "image/png"},
- &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"},
-
- // Audio and Video types
- // Enforce the pattern match ordering as prescribed in
- // https://mimesniff.spec.whatwg.org/#matching-an-audio-or-video-type-pattern
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF"),
- pat: []byte(".snd"),
- ct: "audio/basic",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
- pat: []byte("FORM\x00\x00\x00\x00AIFF"),
- ct: "audio/aiff",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF"),
- pat: []byte("ID3"),
- ct: "audio/mpeg",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
- pat: []byte("OggS\x00"),
- ct: "application/ogg",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"),
- pat: []byte("MThd\x00\x00\x00\x06"),
- ct: "audio/midi",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00AVI "),
- ct: "video/avi",
- },
- &maskedSig{
- mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
- pat: []byte("RIFF\x00\x00\x00\x00WAVE"),
- ct: "audio/wave",
- },
- // 6.2.0.2. video/mp4
- mp4Sig{},
- // 6.2.0.3. video/webm
- &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"},
-
- // Font types
- &maskedSig{
- // 34 NULL bytes followed by the string "LP"
- pat: []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LP"),
- // 34 NULL bytes followed by \xF\xF
- mask: []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF"),
- ct: "application/vnd.ms-fontobject",
- },
- &exactSig{[]byte("\x00\x01\x00\x00"), "font/ttf"},
- &exactSig{[]byte("OTTO"), "font/otf"},
- &exactSig{[]byte("ttcf"), "font/collection"},
- &exactSig{[]byte("wOFF"), "font/woff"},
- &exactSig{[]byte("wOF2"), "font/woff2"},
-
- // Archive types
- &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"},
- &exactSig{[]byte("PK\x03\x04"), "application/zip"},
- // RAR's signatures are incorrectly defined by the MIME spec as per
- // https://github.com/whatwg/mimesniff/issues/63
- // However, RAR Labs correctly defines it at:
- // https://www.rarlab.com/technote.htm#rarsign
- // so we use the definition from RAR Labs.
- // TODO: do whatever the spec ends up doing.
- &exactSig{[]byte("Rar!\x1A\x07\x00"), "application/x-rar-compressed"}, // RAR v1.5-v4.0
- &exactSig{[]byte("Rar!\x1A\x07\x01\x00"), "application/x-rar-compressed"}, // RAR v5+
-
- &exactSig{[]byte("\x00\x61\x73\x6D"), "application/wasm"},
-
- textSig{}, // should be last
-}
-
-type exactSig struct {
- sig []byte
- ct string
-}
-
-func (e *exactSig) match(data []byte, firstNonWS int) string {
- if bytes.HasPrefix(data, e.sig) {
- return e.ct
- }
- return ""
-}
-
-type maskedSig struct {
- mask, pat []byte
- skipWS bool
- ct string
-}
-
-func (m *maskedSig) match(data []byte, firstNonWS int) string {
- // pattern matching algorithm section 6
- // https://mimesniff.spec.whatwg.org/#pattern-matching-algorithm
-
- if m.skipWS {
- data = data[firstNonWS:]
- }
- if len(m.pat) != len(m.mask) {
- return ""
- }
- if len(data) < len(m.pat) {
- return ""
- }
- for i, pb := range m.pat {
- maskedData := data[i] & m.mask[i]
- if maskedData != pb {
- return ""
- }
- }
- return m.ct
-}
-
-type htmlSig []byte
-
-func (h htmlSig) match(data []byte, firstNonWS int) string {
- data = data[firstNonWS:]
- if len(data) < len(h)+1 {
- return ""
- }
- for i, b := range h {
- db := data[i]
- if 'A' <= b && b <= 'Z' {
- db &= 0xDF
- }
- if b != db {
- return ""
- }
- }
- // Next byte must be a tag-terminating byte(0xTT).
- if !isTT(data[len(h)]) {
- return ""
- }
- return "text/html; charset=utf-8"
-}
-
-var mp4ftype = []byte("ftyp")
-var mp4 = []byte("mp4")
-
-type mp4Sig struct{}
-
-func (mp4Sig) match(data []byte, firstNonWS int) string {
- // https://mimesniff.spec.whatwg.org/#signature-for-mp4
- // c.f. section 6.2.1
- if len(data) < 12 {
- return ""
- }
- boxSize := int(binary.BigEndian.Uint32(data[:4]))
- if len(data) < boxSize || boxSize%4 != 0 {
- return ""
- }
- if !bytes.Equal(data[4:8], mp4ftype) {
- return ""
- }
- for st := 8; st < boxSize; st += 4 {
- if st == 12 {
- // Ignores the four bytes that correspond to the version number of the "major brand".
- continue
- }
- if bytes.Equal(data[st:st+3], mp4) {
- return "video/mp4"
- }
- }
- return ""
-}
-
-type textSig struct{}
-
-func (textSig) match(data []byte, firstNonWS int) string {
- // c.f. section 5, step 4.
- for _, b := range data[firstNonWS:] {
- switch {
- case b <= 0x08,
- b == 0x0B,
- 0x0E <= b && b <= 0x1A,
- 0x1C <= b && b <= 0x1F:
- return ""
- }
- }
- return "text/plain; charset=utf-8"
-}
diff --git a/contrib/go/_std_1.18/src/net/http/status.go b/contrib/go/_std_1.18/src/net/http/status.go
deleted file mode 100644
index 286315f639..0000000000
--- a/contrib/go/_std_1.18/src/net/http/status.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-// HTTP status codes as registered with IANA.
-// See: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
-const (
- StatusContinue = 100 // RFC 7231, 6.2.1
- StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2
- StatusProcessing = 102 // RFC 2518, 10.1
- StatusEarlyHints = 103 // RFC 8297
-
- StatusOK = 200 // RFC 7231, 6.3.1
- StatusCreated = 201 // RFC 7231, 6.3.2
- StatusAccepted = 202 // RFC 7231, 6.3.3
- StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4
- StatusNoContent = 204 // RFC 7231, 6.3.5
- StatusResetContent = 205 // RFC 7231, 6.3.6
- StatusPartialContent = 206 // RFC 7233, 4.1
- StatusMultiStatus = 207 // RFC 4918, 11.1
- StatusAlreadyReported = 208 // RFC 5842, 7.1
- StatusIMUsed = 226 // RFC 3229, 10.4.1
-
- StatusMultipleChoices = 300 // RFC 7231, 6.4.1
- StatusMovedPermanently = 301 // RFC 7231, 6.4.2
- StatusFound = 302 // RFC 7231, 6.4.3
- StatusSeeOther = 303 // RFC 7231, 6.4.4
- StatusNotModified = 304 // RFC 7232, 4.1
- StatusUseProxy = 305 // RFC 7231, 6.4.5
- _ = 306 // RFC 7231, 6.4.6 (Unused)
- StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7
- StatusPermanentRedirect = 308 // RFC 7538, 3
-
- StatusBadRequest = 400 // RFC 7231, 6.5.1
- StatusUnauthorized = 401 // RFC 7235, 3.1
- StatusPaymentRequired = 402 // RFC 7231, 6.5.2
- StatusForbidden = 403 // RFC 7231, 6.5.3
- StatusNotFound = 404 // RFC 7231, 6.5.4
- StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5
- StatusNotAcceptable = 406 // RFC 7231, 6.5.6
- StatusProxyAuthRequired = 407 // RFC 7235, 3.2
- StatusRequestTimeout = 408 // RFC 7231, 6.5.7
- StatusConflict = 409 // RFC 7231, 6.5.8
- StatusGone = 410 // RFC 7231, 6.5.9
- StatusLengthRequired = 411 // RFC 7231, 6.5.10
- StatusPreconditionFailed = 412 // RFC 7232, 4.2
- StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11
- StatusRequestURITooLong = 414 // RFC 7231, 6.5.12
- StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13
- StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4
- StatusExpectationFailed = 417 // RFC 7231, 6.5.14
- StatusTeapot = 418 // RFC 7168, 2.3.3
- StatusMisdirectedRequest = 421 // RFC 7540, 9.1.2
- StatusUnprocessableEntity = 422 // RFC 4918, 11.2
- StatusLocked = 423 // RFC 4918, 11.3
- StatusFailedDependency = 424 // RFC 4918, 11.4
- StatusTooEarly = 425 // RFC 8470, 5.2.
- StatusUpgradeRequired = 426 // RFC 7231, 6.5.15
- StatusPreconditionRequired = 428 // RFC 6585, 3
- StatusTooManyRequests = 429 // RFC 6585, 4
- StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
- StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
-
- StatusInternalServerError = 500 // RFC 7231, 6.6.1
- StatusNotImplemented = 501 // RFC 7231, 6.6.2
- StatusBadGateway = 502 // RFC 7231, 6.6.3
- StatusServiceUnavailable = 503 // RFC 7231, 6.6.4
- StatusGatewayTimeout = 504 // RFC 7231, 6.6.5
- StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6
- StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
- StatusInsufficientStorage = 507 // RFC 4918, 11.5
- StatusLoopDetected = 508 // RFC 5842, 7.2
- StatusNotExtended = 510 // RFC 2774, 7
- StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
-)
-
-var statusText = map[int]string{
- StatusContinue: "Continue",
- StatusSwitchingProtocols: "Switching Protocols",
- StatusProcessing: "Processing",
- StatusEarlyHints: "Early Hints",
-
- StatusOK: "OK",
- StatusCreated: "Created",
- StatusAccepted: "Accepted",
- StatusNonAuthoritativeInfo: "Non-Authoritative Information",
- StatusNoContent: "No Content",
- StatusResetContent: "Reset Content",
- StatusPartialContent: "Partial Content",
- StatusMultiStatus: "Multi-Status",
- StatusAlreadyReported: "Already Reported",
- StatusIMUsed: "IM Used",
-
- StatusMultipleChoices: "Multiple Choices",
- StatusMovedPermanently: "Moved Permanently",
- StatusFound: "Found",
- StatusSeeOther: "See Other",
- StatusNotModified: "Not Modified",
- StatusUseProxy: "Use Proxy",
- StatusTemporaryRedirect: "Temporary Redirect",
- StatusPermanentRedirect: "Permanent Redirect",
-
- StatusBadRequest: "Bad Request",
- StatusUnauthorized: "Unauthorized",
- StatusPaymentRequired: "Payment Required",
- StatusForbidden: "Forbidden",
- StatusNotFound: "Not Found",
- StatusMethodNotAllowed: "Method Not Allowed",
- StatusNotAcceptable: "Not Acceptable",
- StatusProxyAuthRequired: "Proxy Authentication Required",
- StatusRequestTimeout: "Request Timeout",
- StatusConflict: "Conflict",
- StatusGone: "Gone",
- StatusLengthRequired: "Length Required",
- StatusPreconditionFailed: "Precondition Failed",
- StatusRequestEntityTooLarge: "Request Entity Too Large",
- StatusRequestURITooLong: "Request URI Too Long",
- StatusUnsupportedMediaType: "Unsupported Media Type",
- StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
- StatusExpectationFailed: "Expectation Failed",
- StatusTeapot: "I'm a teapot",
- StatusMisdirectedRequest: "Misdirected Request",
- StatusUnprocessableEntity: "Unprocessable Entity",
- StatusLocked: "Locked",
- StatusFailedDependency: "Failed Dependency",
- StatusTooEarly: "Too Early",
- StatusUpgradeRequired: "Upgrade Required",
- StatusPreconditionRequired: "Precondition Required",
- StatusTooManyRequests: "Too Many Requests",
- StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large",
- StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons",
-
- StatusInternalServerError: "Internal Server Error",
- StatusNotImplemented: "Not Implemented",
- StatusBadGateway: "Bad Gateway",
- StatusServiceUnavailable: "Service Unavailable",
- StatusGatewayTimeout: "Gateway Timeout",
- StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
- StatusVariantAlsoNegotiates: "Variant Also Negotiates",
- StatusInsufficientStorage: "Insufficient Storage",
- StatusLoopDetected: "Loop Detected",
- StatusNotExtended: "Not Extended",
- StatusNetworkAuthenticationRequired: "Network Authentication Required",
-}
-
-// StatusText returns a text for the HTTP status code. It returns the empty
-// string if the code is unknown.
-func StatusText(code int) string {
- return statusText[code]
-}
diff --git a/contrib/go/_std_1.18/src/net/http/transfer.go b/contrib/go/_std_1.18/src/net/http/transfer.go
deleted file mode 100644
index de69d835ac..0000000000
--- a/contrib/go/_std_1.18/src/net/http/transfer.go
+++ /dev/null
@@ -1,1114 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "net/http/httptrace"
- "net/http/internal"
- "net/http/internal/ascii"
- "net/textproto"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http/httpguts"
-)
-
-// ErrLineTooLong is returned when reading request or response bodies
-// with malformed chunked encoding.
-var ErrLineTooLong = internal.ErrLineTooLong
-
-type errorReader struct {
- err error
-}
-
-func (r errorReader) Read(p []byte) (n int, err error) {
- return 0, r.err
-}
-
-type byteReader struct {
- b byte
- done bool
-}
-
-func (br *byteReader) Read(p []byte) (n int, err error) {
- if br.done {
- return 0, io.EOF
- }
- if len(p) == 0 {
- return 0, nil
- }
- br.done = true
- p[0] = br.b
- return 1, io.EOF
-}
-
-// transferWriter inspects the fields of a user-supplied Request or Response,
-// sanitizes them without changing the user object and provides methods for
-// writing the respective header, body and trailer in wire format.
-type transferWriter struct {
- Method string
- Body io.Reader
- BodyCloser io.Closer
- ResponseToHEAD bool
- ContentLength int64 // -1 means unknown, 0 means exactly none
- Close bool
- TransferEncoding []string
- Header Header
- Trailer Header
- IsResponse bool
- bodyReadError error // any non-EOF error from reading Body
-
- FlushHeaders bool // flush headers to network before body
- ByteReadCh chan readResult // non-nil if probeRequestBody called
-}
-
-func newTransferWriter(r any) (t *transferWriter, err error) {
- t = &transferWriter{}
-
- // Extract relevant fields
- atLeastHTTP11 := false
- switch rr := r.(type) {
- case *Request:
- if rr.ContentLength != 0 && rr.Body == nil {
- return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength)
- }
- t.Method = valueOrDefault(rr.Method, "GET")
- t.Close = rr.Close
- t.TransferEncoding = rr.TransferEncoding
- t.Header = rr.Header
- t.Trailer = rr.Trailer
- t.Body = rr.Body
- t.BodyCloser = rr.Body
- t.ContentLength = rr.outgoingLength()
- if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && t.shouldSendChunkedRequestBody() {
- t.TransferEncoding = []string{"chunked"}
- }
- // If there's a body, conservatively flush the headers
- // to any bufio.Writer we're writing to, just in case
- // the server needs the headers early, before we copy
- // the body and possibly block. We make an exception
- // for the common standard library in-memory types,
- // though, to avoid unnecessary TCP packets on the
- // wire. (Issue 22088.)
- if t.ContentLength != 0 && !isKnownInMemoryReader(t.Body) {
- t.FlushHeaders = true
- }
-
- atLeastHTTP11 = true // Transport requests are always 1.1 or 2.0
- case *Response:
- t.IsResponse = true
- if rr.Request != nil {
- t.Method = rr.Request.Method
- }
- t.Body = rr.Body
- t.BodyCloser = rr.Body
- t.ContentLength = rr.ContentLength
- t.Close = rr.Close
- t.TransferEncoding = rr.TransferEncoding
- t.Header = rr.Header
- t.Trailer = rr.Trailer
- atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
- t.ResponseToHEAD = noResponseBodyExpected(t.Method)
- }
-
- // Sanitize Body,ContentLength,TransferEncoding
- if t.ResponseToHEAD {
- t.Body = nil
- if chunked(t.TransferEncoding) {
- t.ContentLength = -1
- }
- } else {
- if !atLeastHTTP11 || t.Body == nil {
- t.TransferEncoding = nil
- }
- if chunked(t.TransferEncoding) {
- t.ContentLength = -1
- } else if t.Body == nil { // no chunking, no body
- t.ContentLength = 0
- }
- }
-
- // Sanitize Trailer
- if !chunked(t.TransferEncoding) {
- t.Trailer = nil
- }
-
- return t, nil
-}
-
-// shouldSendChunkedRequestBody reports whether we should try to send a
-// chunked request body to the server. In particular, the case we really
-// want to prevent is sending a GET or other typically-bodyless request to a
-// server with a chunked body when the body has zero bytes, since GETs with
-// bodies (while acceptable according to specs), even zero-byte chunked
-// bodies, are approximately never seen in the wild and confuse most
-// servers. See Issue 18257, as one example.
-//
-// The only reason we'd send such a request is if the user set the Body to a
-// non-nil value (say, io.NopCloser(bytes.NewReader(nil))) and didn't
-// set ContentLength, or NewRequest set it to -1 (unknown), so then we assume
-// there's bytes to send.
-//
-// This code tries to read a byte from the Request.Body in such cases to see
-// whether the body actually has content (super rare) or is actually just
-// a non-nil content-less ReadCloser (the more common case). In that more
-// common case, we act as if their Body were nil instead, and don't send
-// a body.
-func (t *transferWriter) shouldSendChunkedRequestBody() bool {
- // Note that t.ContentLength is the corrected content length
- // from rr.outgoingLength, so 0 actually means zero, not unknown.
- if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them
- return false
- }
- if t.Method == "CONNECT" {
- return false
- }
- if requestMethodUsuallyLacksBody(t.Method) {
- // Only probe the Request.Body for GET/HEAD/DELETE/etc
- // requests, because it's only those types of requests
- // that confuse servers.
- t.probeRequestBody() // adjusts t.Body, t.ContentLength
- return t.Body != nil
- }
- // For all other request types (PUT, POST, PATCH, or anything
- // made-up we've never heard of), assume it's normal and the server
- // can deal with a chunked request body. Maybe we'll adjust this
- // later.
- return true
-}
-
-// probeRequestBody reads a byte from t.Body to see whether it's empty
-// (returns io.EOF right away).
-//
-// But because we've had problems with this blocking users in the past
-// (issue 17480) when the body is a pipe (perhaps waiting on the response
-// headers before the pipe is fed data), we need to be careful and bound how
-// long we wait for it. This delay will only affect users if all the following
-// are true:
-// * the request body blocks
-// * the content length is not set (or set to -1)
-// * the method doesn't usually have a body (GET, HEAD, DELETE, ...)
-// * there is no transfer-encoding=chunked already set.
-// In other words, this delay will not normally affect anybody, and there
-// are workarounds if it does.
-func (t *transferWriter) probeRequestBody() {
- t.ByteReadCh = make(chan readResult, 1)
- go func(body io.Reader) {
- var buf [1]byte
- var rres readResult
- rres.n, rres.err = body.Read(buf[:])
- if rres.n == 1 {
- rres.b = buf[0]
- }
- t.ByteReadCh <- rres
- close(t.ByteReadCh)
- }(t.Body)
- timer := time.NewTimer(200 * time.Millisecond)
- select {
- case rres := <-t.ByteReadCh:
- timer.Stop()
- if rres.n == 0 && rres.err == io.EOF {
- // It was empty.
- t.Body = nil
- t.ContentLength = 0
- } else if rres.n == 1 {
- if rres.err != nil {
- t.Body = io.MultiReader(&byteReader{b: rres.b}, errorReader{rres.err})
- } else {
- t.Body = io.MultiReader(&byteReader{b: rres.b}, t.Body)
- }
- } else if rres.err != nil {
- t.Body = errorReader{rres.err}
- }
- case <-timer.C:
- // Too slow. Don't wait. Read it later, and keep
- // assuming that this is ContentLength == -1
- // (unknown), which means we'll send a
- // "Transfer-Encoding: chunked" header.
- t.Body = io.MultiReader(finishAsyncByteRead{t}, t.Body)
- // Request that Request.Write flush the headers to the
- // network before writing the body, since our body may not
- // become readable until it's seen the response headers.
- t.FlushHeaders = true
- }
-}
-
-func noResponseBodyExpected(requestMethod string) bool {
- return requestMethod == "HEAD"
-}
-
-func (t *transferWriter) shouldSendContentLength() bool {
- if chunked(t.TransferEncoding) {
- return false
- }
- if t.ContentLength > 0 {
- return true
- }
- if t.ContentLength < 0 {
- return false
- }
- // Many servers expect a Content-Length for these methods
- if t.Method == "POST" || t.Method == "PUT" || t.Method == "PATCH" {
- return true
- }
- if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
- if t.Method == "GET" || t.Method == "HEAD" {
- return false
- }
- return true
- }
-
- return false
-}
-
-func (t *transferWriter) writeHeader(w io.Writer, trace *httptrace.ClientTrace) error {
- if t.Close && !hasToken(t.Header.get("Connection"), "close") {
- if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil {
- return err
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField("Connection", []string{"close"})
- }
- }
-
- // Write Content-Length and/or Transfer-Encoding whose values are a
- // function of the sanitized field triple (Body, ContentLength,
- // TransferEncoding)
- if t.shouldSendContentLength() {
- if _, err := io.WriteString(w, "Content-Length: "); err != nil {
- return err
- }
- if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil {
- return err
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField("Content-Length", []string{strconv.FormatInt(t.ContentLength, 10)})
- }
- } else if chunked(t.TransferEncoding) {
- if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil {
- return err
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField("Transfer-Encoding", []string{"chunked"})
- }
- }
-
- // Write Trailer header
- if t.Trailer != nil {
- keys := make([]string, 0, len(t.Trailer))
- for k := range t.Trailer {
- k = CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return badStringError("invalid Trailer key", k)
- }
- keys = append(keys, k)
- }
- if len(keys) > 0 {
- sort.Strings(keys)
- // TODO: could do better allocation-wise here, but trailers are rare,
- // so being lazy for now.
- if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil {
- return err
- }
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField("Trailer", keys)
- }
- }
- }
-
- return nil
-}
-
-// always closes t.BodyCloser
-func (t *transferWriter) writeBody(w io.Writer) (err error) {
- var ncopy int64
- closed := false
- defer func() {
- if closed || t.BodyCloser == nil {
- return
- }
- if closeErr := t.BodyCloser.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- }()
-
- // Write body. We "unwrap" the body first if it was wrapped in a
- // nopCloser or readTrackingBody. This is to ensure that we can take advantage of
- // OS-level optimizations in the event that the body is an
- // *os.File.
- if t.Body != nil {
- var body = t.unwrapBody()
- if chunked(t.TransferEncoding) {
- if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse {
- w = &internal.FlushAfterChunkWriter{Writer: bw}
- }
- cw := internal.NewChunkedWriter(w)
- _, err = t.doBodyCopy(cw, body)
- if err == nil {
- err = cw.Close()
- }
- } else if t.ContentLength == -1 {
- dst := w
- if t.Method == "CONNECT" {
- dst = bufioFlushWriter{dst}
- }
- ncopy, err = t.doBodyCopy(dst, body)
- } else {
- ncopy, err = t.doBodyCopy(w, io.LimitReader(body, t.ContentLength))
- if err != nil {
- return err
- }
- var nextra int64
- nextra, err = t.doBodyCopy(io.Discard, body)
- ncopy += nextra
- }
- if err != nil {
- return err
- }
- }
- if t.BodyCloser != nil {
- closed = true
- if err := t.BodyCloser.Close(); err != nil {
- return err
- }
- }
-
- if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy {
- return fmt.Errorf("http: ContentLength=%d with Body length %d",
- t.ContentLength, ncopy)
- }
-
- if chunked(t.TransferEncoding) {
- // Write Trailer header
- if t.Trailer != nil {
- if err := t.Trailer.Write(w); err != nil {
- return err
- }
- }
- // Last chunk, empty trailer
- _, err = io.WriteString(w, "\r\n")
- }
- return err
-}
-
-// doBodyCopy wraps a copy operation, with any resulting error also
-// being saved in bodyReadError.
-//
-// This function is only intended for use in writeBody.
-func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) {
- n, err = io.Copy(dst, src)
- if err != nil && err != io.EOF {
- t.bodyReadError = err
- }
- return
-}
-
-// unwrapBodyReader unwraps the body's inner reader if it's a
-// nopCloser. This is to ensure that body writes sourced from local
-// files (*os.File types) are properly optimized.
-//
-// This function is only intended for use in writeBody.
-func (t *transferWriter) unwrapBody() io.Reader {
- if reflect.TypeOf(t.Body) == nopCloserType {
- return reflect.ValueOf(t.Body).Field(0).Interface().(io.Reader)
- }
- if r, ok := t.Body.(*readTrackingBody); ok {
- r.didRead = true
- return r.ReadCloser
- }
- return t.Body
-}
-
-type transferReader struct {
- // Input
- Header Header
- StatusCode int
- RequestMethod string
- ProtoMajor int
- ProtoMinor int
- // Output
- Body io.ReadCloser
- ContentLength int64
- Chunked bool
- Close bool
- Trailer Header
-}
-
-func (t *transferReader) protoAtLeast(m, n int) bool {
- return t.ProtoMajor > m || (t.ProtoMajor == m && t.ProtoMinor >= n)
-}
-
-// bodyAllowedForStatus reports whether a given response status code
-// permits a body. See RFC 7230, section 3.3.
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-var (
- suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"}
- suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"}
-)
-
-func suppressedHeaders(status int) []string {
- switch {
- case status == 304:
- // RFC 7232 section 4.1
- return suppressedHeaders304
- case !bodyAllowedForStatus(status):
- return suppressedHeadersNoBody
- }
- return nil
-}
-
-// msg is *Request or *Response.
-func readTransfer(msg any, r *bufio.Reader) (err error) {
- t := &transferReader{RequestMethod: "GET"}
-
- // Unify input
- isResponse := false
- switch rr := msg.(type) {
- case *Response:
- t.Header = rr.Header
- t.StatusCode = rr.StatusCode
- t.ProtoMajor = rr.ProtoMajor
- t.ProtoMinor = rr.ProtoMinor
- t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true)
- isResponse = true
- if rr.Request != nil {
- t.RequestMethod = rr.Request.Method
- }
- case *Request:
- t.Header = rr.Header
- t.RequestMethod = rr.Method
- t.ProtoMajor = rr.ProtoMajor
- t.ProtoMinor = rr.ProtoMinor
- // Transfer semantics for Requests are exactly like those for
- // Responses with status code 200, responding to a GET method
- t.StatusCode = 200
- t.Close = rr.Close
- default:
- panic("unexpected type")
- }
-
- // Default to HTTP/1.1
- if t.ProtoMajor == 0 && t.ProtoMinor == 0 {
- t.ProtoMajor, t.ProtoMinor = 1, 1
- }
-
- // Transfer-Encoding: chunked, and overriding Content-Length.
- if err := t.parseTransferEncoding(); err != nil {
- return err
- }
-
- realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
- if err != nil {
- return err
- }
- if isResponse && t.RequestMethod == "HEAD" {
- if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil {
- return err
- } else {
- t.ContentLength = n
- }
- } else {
- t.ContentLength = realLength
- }
-
- // Trailer
- t.Trailer, err = fixTrailer(t.Header, t.Chunked)
- if err != nil {
- return err
- }
-
- // If there is no Content-Length or chunked Transfer-Encoding on a *Response
- // and the status is not 1xx, 204 or 304, then the body is unbounded.
- // See RFC 7230, section 3.3.
- switch msg.(type) {
- case *Response:
- if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
- // Unbounded body.
- t.Close = true
- }
- }
-
- // Prepare body reader. ContentLength < 0 means chunked encoding
- // or close connection when finished, since multipart is not supported yet
- switch {
- case t.Chunked:
- if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
- t.Body = NoBody
- } else {
- t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
- }
- case realLength == 0:
- t.Body = NoBody
- case realLength > 0:
- t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close}
- default:
- // realLength < 0, i.e. "Content-Length" not mentioned in header
- if t.Close {
- // Close semantics (i.e. HTTP/1.0)
- t.Body = &body{src: r, closing: t.Close}
- } else {
- // Persistent connection (i.e. HTTP/1.1)
- t.Body = NoBody
- }
- }
-
- // Unify output
- switch rr := msg.(type) {
- case *Request:
- rr.Body = t.Body
- rr.ContentLength = t.ContentLength
- if t.Chunked {
- rr.TransferEncoding = []string{"chunked"}
- }
- rr.Close = t.Close
- rr.Trailer = t.Trailer
- case *Response:
- rr.Body = t.Body
- rr.ContentLength = t.ContentLength
- if t.Chunked {
- rr.TransferEncoding = []string{"chunked"}
- }
- rr.Close = t.Close
- rr.Trailer = t.Trailer
- }
-
- return nil
-}
-
-// Checks whether chunked is part of the encodings stack
-func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
-
-// Checks whether the encoding is explicitly "identity".
-func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
-
-// unsupportedTEError reports unsupported transfer-encodings.
-type unsupportedTEError struct {
- err string
-}
-
-func (uste *unsupportedTEError) Error() string {
- return uste.err
-}
-
-// isUnsupportedTEError checks if the error is of type
-// unsupportedTEError. It is usually invoked with a non-nil err.
-func isUnsupportedTEError(err error) bool {
- _, ok := err.(*unsupportedTEError)
- return ok
-}
-
-// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
-func (t *transferReader) parseTransferEncoding() error {
- raw, present := t.Header["Transfer-Encoding"]
- if !present {
- return nil
- }
- delete(t.Header, "Transfer-Encoding")
-
- // Issue 12785; ignore Transfer-Encoding on HTTP/1.0 requests.
- if !t.protoAtLeast(1, 1) {
- return nil
- }
-
- // Like nginx, we only support a single Transfer-Encoding header field, and
- // only if set to "chunked". This is one of the most security sensitive
- // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
- // strict and simple.
- if len(raw) != 1 {
- return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
- }
- if !ascii.EqualFold(raw[0], "chunked") {
- return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
- }
-
- // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
- // in any message that contains a Transfer-Encoding header field."
- //
- // but also: "If a message is received with both a Transfer-Encoding and a
- // Content-Length header field, the Transfer-Encoding overrides the
- // Content-Length. Such a message might indicate an attempt to perform
- // request smuggling (Section 9.5) or response splitting (Section 9.4) and
- // ought to be handled as an error. A sender MUST remove the received
- // Content-Length field prior to forwarding such a message downstream."
- //
- // Reportedly, these appear in the wild.
- delete(t.Header, "Content-Length")
-
- t.Chunked = true
- return nil
-}
-
-// Determine the expected body length, using RFC 7230 Section 3.3. This
-// function is not a method, because ultimately it should be shared by
-// ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
- isRequest := !isResponse
- contentLens := header["Content-Length"]
-
- // Hardening against HTTP request smuggling
- if len(contentLens) > 1 {
- // Per RFC 7230 Section 3.3.2, prevent multiple
- // Content-Length headers if they differ in value.
- // If there are dups of the value, remove the dups.
- // See Issue 16490.
- first := textproto.TrimString(contentLens[0])
- for _, ct := range contentLens[1:] {
- if first != textproto.TrimString(ct) {
- return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
- }
- }
-
- // deduplicate Content-Length
- header.Del("Content-Length")
- header.Add("Content-Length", first)
-
- contentLens = header["Content-Length"]
- }
-
- // Logic based on response type or status
- if noResponseBodyExpected(requestMethod) {
- // For HTTP requests, as part of hardening against request
- // smuggling (RFC 7230), don't allow a Content-Length header for
- // methods which don't permit bodies. As an exception, allow
- // exactly one Content-Length header if its value is "0".
- if isRequest && len(contentLens) > 0 && !(len(contentLens) == 1 && contentLens[0] == "0") {
- return 0, fmt.Errorf("http: method cannot contain a Content-Length; got %q", contentLens)
- }
- return 0, nil
- }
- if status/100 == 1 {
- return 0, nil
- }
- switch status {
- case 204, 304:
- return 0, nil
- }
-
- // Logic based on Transfer-Encoding
- if chunked {
- return -1, nil
- }
-
- // Logic based on Content-Length
- var cl string
- if len(contentLens) == 1 {
- cl = textproto.TrimString(contentLens[0])
- }
- if cl != "" {
- n, err := parseContentLength(cl)
- if err != nil {
- return -1, err
- }
- return n, nil
- }
- header.Del("Content-Length")
-
- if isRequest {
- // RFC 7230 neither explicitly permits nor forbids an
- // entity-body on a GET request so we permit one if
- // declared, but we default to 0 here (not -1 below)
- // if there's no mention of a body.
- // Likewise, all other request methods are assumed to have
- // no body if neither Transfer-Encoding chunked nor a
- // Content-Length are set.
- return 0, nil
- }
-
- // Body-EOF logic based on other methods (like closing, or chunked coding)
- return -1, nil
-}
-
-// Determine whether to hang up after sending a request and body, or
-// receiving a response and body
-// 'header' is the request headers
-func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
- if major < 1 {
- return true
- }
-
- conv := header["Connection"]
- hasClose := httpguts.HeaderValuesContainsToken(conv, "close")
- if major == 1 && minor == 0 {
- return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive")
- }
-
- if hasClose && removeCloseHeader {
- header.Del("Connection")
- }
-
- return hasClose
-}
-
-// Parse the trailer header
-func fixTrailer(header Header, chunked bool) (Header, error) {
- vv, ok := header["Trailer"]
- if !ok {
- return nil, nil
- }
- if !chunked {
- // Trailer and no chunking:
- // this is an invalid use case for trailer header.
- // Nevertheless, no error will be returned and we
- // let users decide if this is a valid HTTP message.
- // The Trailer header will be kept in Response.Header
- // but not populate Response.Trailer.
- // See issue #27197.
- return nil, nil
- }
- header.Del("Trailer")
-
- trailer := make(Header)
- var err error
- for _, v := range vv {
- foreachHeaderElement(v, func(key string) {
- key = CanonicalHeaderKey(key)
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- if err == nil {
- err = badStringError("bad trailer key", key)
- return
- }
- }
- trailer[key] = nil
- })
- }
- if err != nil {
- return nil, err
- }
- if len(trailer) == 0 {
- return nil, nil
- }
- return trailer, nil
-}
-
-// body turns a Reader into a ReadCloser.
-// Close ensures that the body has been fully read
-// and then reads the trailer if necessary.
-type body struct {
- src io.Reader
- hdr any // non-nil (Response or Request) value means read trailer
- r *bufio.Reader // underlying wire-format reader for the trailer
- closing bool // is the connection to be closed after reading body?
- doEarlyClose bool // whether Close should stop early
-
- mu sync.Mutex // guards following, and calls to Read and Close
- sawEOF bool
- closed bool
- earlyClose bool // Close called and we didn't read to the end of src
- onHitEOF func() // if non-nil, func to call when EOF is Read
-}
-
-// ErrBodyReadAfterClose is returned when reading a Request or Response
-// Body after the body has been closed. This typically happens when the body is
-// read after an HTTP Handler calls WriteHeader or Write on its
-// ResponseWriter.
-var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body")
-
-func (b *body) Read(p []byte) (n int, err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.closed {
- return 0, ErrBodyReadAfterClose
- }
- return b.readLocked(p)
-}
-
-// Must hold b.mu.
-func (b *body) readLocked(p []byte) (n int, err error) {
- if b.sawEOF {
- return 0, io.EOF
- }
- n, err = b.src.Read(p)
-
- if err == io.EOF {
- b.sawEOF = true
- // Chunked case. Read the trailer.
- if b.hdr != nil {
- if e := b.readTrailer(); e != nil {
- err = e
- // Something went wrong in the trailer, we must not allow any
- // further reads of any kind to succeed from body, nor any
- // subsequent requests on the server connection. See
- // golang.org/issue/12027
- b.sawEOF = false
- b.closed = true
- }
- b.hdr = nil
- } else {
- // If the server declared the Content-Length, our body is a LimitedReader
- // and we need to check whether this EOF arrived early.
- if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 {
- err = io.ErrUnexpectedEOF
- }
- }
- }
-
- // If we can return an EOF here along with the read data, do
- // so. This is optional per the io.Reader contract, but doing
- // so helps the HTTP transport code recycle its connection
- // earlier (since it will see this EOF itself), even if the
- // client doesn't do future reads or Close.
- if err == nil && n > 0 {
- if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 {
- err = io.EOF
- b.sawEOF = true
- }
- }
-
- if b.sawEOF && b.onHitEOF != nil {
- b.onHitEOF()
- }
-
- return n, err
-}
-
-var (
- singleCRLF = []byte("\r\n")
- doubleCRLF = []byte("\r\n\r\n")
-)
-
-func seeUpcomingDoubleCRLF(r *bufio.Reader) bool {
- for peekSize := 4; ; peekSize++ {
- // This loop stops when Peek returns an error,
- // which it does when r's buffer has been filled.
- buf, err := r.Peek(peekSize)
- if bytes.HasSuffix(buf, doubleCRLF) {
- return true
- }
- if err != nil {
- break
- }
- }
- return false
-}
-
-var errTrailerEOF = errors.New("http: unexpected EOF reading trailer")
-
-func (b *body) readTrailer() error {
- // The common case, since nobody uses trailers.
- buf, err := b.r.Peek(2)
- if bytes.Equal(buf, singleCRLF) {
- b.r.Discard(2)
- return nil
- }
- if len(buf) < 2 {
- return errTrailerEOF
- }
- if err != nil {
- return err
- }
-
- // Make sure there's a header terminator coming up, to prevent
- // a DoS with an unbounded size Trailer. It's not easy to
- // slip in a LimitReader here, as textproto.NewReader requires
- // a concrete *bufio.Reader. Also, we can't get all the way
- // back up to our conn's LimitedReader that *might* be backing
- // this bufio.Reader. Instead, a hack: we iteratively Peek up
- // to the bufio.Reader's max size, looking for a double CRLF.
- // This limits the trailer to the underlying buffer size, typically 4kB.
- if !seeUpcomingDoubleCRLF(b.r) {
- return errors.New("http: suspiciously long trailer after chunked body")
- }
-
- hdr, err := textproto.NewReader(b.r).ReadMIMEHeader()
- if err != nil {
- if err == io.EOF {
- return errTrailerEOF
- }
- return err
- }
- switch rr := b.hdr.(type) {
- case *Request:
- mergeSetHeader(&rr.Trailer, Header(hdr))
- case *Response:
- mergeSetHeader(&rr.Trailer, Header(hdr))
- }
- return nil
-}
-
-func mergeSetHeader(dst *Header, src Header) {
- if *dst == nil {
- *dst = src
- return
- }
- for k, vv := range src {
- (*dst)[k] = vv
- }
-}
-
-// unreadDataSizeLocked returns the number of bytes of unread input.
-// It returns -1 if unknown.
-// b.mu must be held.
-func (b *body) unreadDataSizeLocked() int64 {
- if lr, ok := b.src.(*io.LimitedReader); ok {
- return lr.N
- }
- return -1
-}
-
-func (b *body) Close() error {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.closed {
- return nil
- }
- var err error
- switch {
- case b.sawEOF:
- // Already saw EOF, so no need going to look for it.
- case b.hdr == nil && b.closing:
- // no trailer and closing the connection next.
- // no point in reading to EOF.
- case b.doEarlyClose:
- // Read up to maxPostHandlerReadBytes bytes of the body, looking
- // for EOF (and trailers), so we can re-use this connection.
- if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes {
- // There was a declared Content-Length, and we have more bytes remaining
- // than our maxPostHandlerReadBytes tolerance. So, give up.
- b.earlyClose = true
- } else {
- var n int64
- // Consume the body, or, which will also lead to us reading
- // the trailer headers after the body, if present.
- n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
- if err == io.EOF {
- err = nil
- }
- if n == maxPostHandlerReadBytes {
- b.earlyClose = true
- }
- }
- default:
- // Fully consume the body, which will also lead to us reading
- // the trailer headers after the body, if present.
- _, err = io.Copy(io.Discard, bodyLocked{b})
- }
- b.closed = true
- return err
-}
-
-func (b *body) didEarlyClose() bool {
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.earlyClose
-}
-
-// bodyRemains reports whether future Read calls might
-// yield data.
-func (b *body) bodyRemains() bool {
- b.mu.Lock()
- defer b.mu.Unlock()
- return !b.sawEOF
-}
-
-func (b *body) registerOnHitEOF(fn func()) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.onHitEOF = fn
-}
-
-// bodyLocked is an io.Reader reading from a *body when its mutex is
-// already held.
-type bodyLocked struct {
- b *body
-}
-
-func (bl bodyLocked) Read(p []byte) (n int, err error) {
- if bl.b.closed {
- return 0, ErrBodyReadAfterClose
- }
- return bl.b.readLocked(p)
-}
-
-// parseContentLength trims whitespace from s and returns -1 if no value
-// is set, or the value if it's >= 0.
-func parseContentLength(cl string) (int64, error) {
- cl = textproto.TrimString(cl)
- if cl == "" {
- return -1, nil
- }
- n, err := strconv.ParseUint(cl, 10, 63)
- if err != nil {
- return 0, badStringError("bad Content-Length", cl)
- }
- return int64(n), nil
-
-}
-
-// finishAsyncByteRead finishes reading the 1-byte sniff
-// from the ContentLength==0, Body!=nil case.
-type finishAsyncByteRead struct {
- tw *transferWriter
-}
-
-func (fr finishAsyncByteRead) Read(p []byte) (n int, err error) {
- if len(p) == 0 {
- return
- }
- rres := <-fr.tw.ByteReadCh
- n, err = rres.n, rres.err
- if n == 1 {
- p[0] = rres.b
- }
- if err == nil {
- err = io.EOF
- }
- return
-}
-
-var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
-
-// isKnownInMemoryReader reports whether r is a type known to not
-// block on Read. Its caller uses this as an optional optimization to
-// send fewer TCP packets.
-func isKnownInMemoryReader(r io.Reader) bool {
- switch r.(type) {
- case *bytes.Reader, *bytes.Buffer, *strings.Reader:
- return true
- }
- if reflect.TypeOf(r) == nopCloserType {
- return isKnownInMemoryReader(reflect.ValueOf(r).Field(0).Interface().(io.Reader))
- }
- if r, ok := r.(*readTrackingBody); ok {
- return isKnownInMemoryReader(r.ReadCloser)
- }
- return false
-}
-
-// bufioFlushWriter is an io.Writer wrapper that flushes all writes
-// on its wrapped writer if it's a *bufio.Writer.
-type bufioFlushWriter struct{ w io.Writer }
-
-func (fw bufioFlushWriter) Write(p []byte) (n int, err error) {
- n, err = fw.w.Write(p)
- if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok {
- ferr := bw.Flush()
- if ferr != nil && err == nil {
- err = ferr
- }
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/net/http/transport.go b/contrib/go/_std_1.18/src/net/http/transport.go
deleted file mode 100644
index e41b20a15b..0000000000
--- a/contrib/go/_std_1.18/src/net/http/transport.go
+++ /dev/null
@@ -1,2906 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// HTTP client implementation. See RFC 7230 through 7235.
-//
-// This is the low-level Transport implementation of RoundTripper.
-// The high-level interface is in client.go.
-
-package http
-
-import (
- "bufio"
- "compress/gzip"
- "container/list"
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "internal/godebug"
- "io"
- "log"
- "net"
- "net/http/httptrace"
- "net/http/internal/ascii"
- "net/textproto"
- "net/url"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http/httpguts"
- "golang.org/x/net/http/httpproxy"
-)
-
-// DefaultTransport is the default implementation of Transport and is
-// used by DefaultClient. It establishes network connections as needed
-// and caches them for reuse by subsequent calls. It uses HTTP proxies
-// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and
-// $no_proxy) environment variables.
-var DefaultTransport RoundTripper = &Transport{
- Proxy: ProxyFromEnvironment,
- DialContext: defaultTransportDialContext(&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }),
- ForceAttemptHTTP2: true,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
-}
-
-// DefaultMaxIdleConnsPerHost is the default value of Transport's
-// MaxIdleConnsPerHost.
-const DefaultMaxIdleConnsPerHost = 2
-
-// Transport is an implementation of RoundTripper that supports HTTP,
-// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
-//
-// By default, Transport caches connections for future re-use.
-// This may leave many open connections when accessing many hosts.
-// This behavior can be managed using Transport's CloseIdleConnections method
-// and the MaxIdleConnsPerHost and DisableKeepAlives fields.
-//
-// Transports should be reused instead of created as needed.
-// Transports are safe for concurrent use by multiple goroutines.
-//
-// A Transport is a low-level primitive for making HTTP and HTTPS requests.
-// For high-level functionality, such as cookies and redirects, see Client.
-//
-// Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2
-// for HTTPS URLs, depending on whether the server supports HTTP/2,
-// and how the Transport is configured. The DefaultTransport supports HTTP/2.
-// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
-// and call ConfigureTransport. See the package docs for more about HTTP/2.
-//
-// Responses with status codes in the 1xx range are either handled
-// automatically (100 expect-continue) or ignored. The one
-// exception is HTTP status code 101 (Switching Protocols), which is
-// considered a terminal status and returned by RoundTrip. To see the
-// ignored 1xx responses, use the httptrace trace package's
-// ClientTrace.Got1xxResponse.
-//
-// Transport only retries a request upon encountering a network error
-// if the request is idempotent and either has no body or has its
-// Request.GetBody defined. HTTP requests are considered idempotent if
-// they have HTTP methods GET, HEAD, OPTIONS, or TRACE; or if their
-// Header map contains an "Idempotency-Key" or "X-Idempotency-Key"
-// entry. If the idempotency key value is a zero-length slice, the
-// request is treated as idempotent but the header is not sent on the
-// wire.
-type Transport struct {
- idleMu sync.Mutex
- closeIdle bool // user has requested to close all idle conns
- idleConn map[connectMethodKey][]*persistConn // most recently used at end
- idleConnWait map[connectMethodKey]wantConnQueue // waiting getConns
- idleLRU connLRU
-
- reqMu sync.Mutex
- reqCanceler map[cancelKey]func(error)
-
- altMu sync.Mutex // guards changing altProto only
- altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
-
- connsPerHostMu sync.Mutex
- connsPerHost map[connectMethodKey]int
- connsPerHostWait map[connectMethodKey]wantConnQueue // waiting getConns
-
- // Proxy specifies a function to return a proxy for a given
- // Request. If the function returns a non-nil error, the
- // request is aborted with the provided error.
- //
- // The proxy type is determined by the URL scheme. "http",
- // "https", and "socks5" are supported. If the scheme is empty,
- // "http" is assumed.
- //
- // If Proxy is nil or returns a nil *URL, no proxy is used.
- Proxy func(*Request) (*url.URL, error)
-
- // DialContext specifies the dial function for creating unencrypted TCP connections.
- // If DialContext is nil (and the deprecated Dial below is also nil),
- // then the transport dials using package net.
- //
- // DialContext runs concurrently with calls to RoundTrip.
- // A RoundTrip call that initiates a dial may end up using
- // a connection dialed previously when the earlier connection
- // becomes idle before the later DialContext completes.
- DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // Dial specifies the dial function for creating unencrypted TCP connections.
- //
- // Dial runs concurrently with calls to RoundTrip.
- // A RoundTrip call that initiates a dial may end up using
- // a connection dialed previously when the earlier connection
- // becomes idle before the later Dial completes.
- //
- // Deprecated: Use DialContext instead, which allows the transport
- // to cancel dials as soon as they are no longer needed.
- // If both are set, DialContext takes priority.
- Dial func(network, addr string) (net.Conn, error)
-
- // DialTLSContext specifies an optional dial function for creating
- // TLS connections for non-proxied HTTPS requests.
- //
- // If DialTLSContext is nil (and the deprecated DialTLS below is also nil),
- // DialContext and TLSClientConfig are used.
- //
- // If DialTLSContext is set, the Dial and DialContext hooks are not used for HTTPS
- // requests and the TLSClientConfig and TLSHandshakeTimeout
- // are ignored. The returned net.Conn is assumed to already be
- // past the TLS handshake.
- DialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // DialTLS specifies an optional dial function for creating
- // TLS connections for non-proxied HTTPS requests.
- //
- // Deprecated: Use DialTLSContext instead, which allows the transport
- // to cancel dials as soon as they are no longer needed.
- // If both are set, DialTLSContext takes priority.
- DialTLS func(network, addr string) (net.Conn, error)
-
- // TLSClientConfig specifies the TLS configuration to use with
- // tls.Client.
- // If nil, the default configuration is used.
- // If non-nil, HTTP/2 support may not be enabled by default.
- TLSClientConfig *tls.Config
-
- // TLSHandshakeTimeout specifies the maximum amount of time waiting to
- // wait for a TLS handshake. Zero means no timeout.
- TLSHandshakeTimeout time.Duration
-
- // DisableKeepAlives, if true, disables HTTP keep-alives and
- // will only use the connection to the server for a single
- // HTTP request.
- //
- // This is unrelated to the similarly named TCP keep-alives.
- DisableKeepAlives bool
-
- // DisableCompression, if true, prevents the Transport from
- // requesting compression with an "Accept-Encoding: gzip"
- // request header when the Request contains no existing
- // Accept-Encoding value. If the Transport requests gzip on
- // its own and gets a gzipped response, it's transparently
- // decoded in the Response.Body. However, if the user
- // explicitly requested gzip it is not automatically
- // uncompressed.
- DisableCompression bool
-
- // MaxIdleConns controls the maximum number of idle (keep-alive)
- // connections across all hosts. Zero means no limit.
- MaxIdleConns int
-
- // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
- // (keep-alive) connections to keep per-host. If zero,
- // DefaultMaxIdleConnsPerHost is used.
- MaxIdleConnsPerHost int
-
- // MaxConnsPerHost optionally limits the total number of
- // connections per host, including connections in the dialing,
- // active, and idle states. On limit violation, dials will block.
- //
- // Zero means no limit.
- MaxConnsPerHost int
-
- // IdleConnTimeout is the maximum amount of time an idle
- // (keep-alive) connection will remain idle before closing
- // itself.
- // Zero means no limit.
- IdleConnTimeout time.Duration
-
- // ResponseHeaderTimeout, if non-zero, specifies the amount of
- // time to wait for a server's response headers after fully
- // writing the request (including its body, if any). This
- // time does not include the time to read the response body.
- ResponseHeaderTimeout time.Duration
-
- // ExpectContinueTimeout, if non-zero, specifies the amount of
- // time to wait for a server's first response headers after fully
- // writing the request headers if the request has an
- // "Expect: 100-continue" header. Zero means no timeout and
- // causes the body to be sent immediately, without
- // waiting for the server to approve.
- // This time does not include the time to send the request header.
- ExpectContinueTimeout time.Duration
-
- // TLSNextProto specifies how the Transport switches to an
- // alternate protocol (such as HTTP/2) after a TLS ALPN
- // protocol negotiation. If Transport dials an TLS connection
- // with a non-empty protocol name and TLSNextProto contains a
- // map entry for that key (such as "h2"), then the func is
- // called with the request's authority (such as "example.com"
- // or "example.com:1234") and the TLS connection. The function
- // must return a RoundTripper that then handles the request.
- // If TLSNextProto is not nil, HTTP/2 support is not enabled
- // automatically.
- TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper
-
- // ProxyConnectHeader optionally specifies headers to send to
- // proxies during CONNECT requests.
- // To set the header dynamically, see GetProxyConnectHeader.
- ProxyConnectHeader Header
-
- // GetProxyConnectHeader optionally specifies a func to return
- // headers to send to proxyURL during a CONNECT request to the
- // ip:port target.
- // If it returns an error, the Transport's RoundTrip fails with
- // that error. It can return (nil, nil) to not add headers.
- // If GetProxyConnectHeader is non-nil, ProxyConnectHeader is
- // ignored.
- GetProxyConnectHeader func(ctx context.Context, proxyURL *url.URL, target string) (Header, error)
-
- // MaxResponseHeaderBytes specifies a limit on how many
- // response bytes are allowed in the server's response
- // header.
- //
- // Zero means to use a default limit.
- MaxResponseHeaderBytes int64
-
- // WriteBufferSize specifies the size of the write buffer used
- // when writing to the transport.
- // If zero, a default (currently 4KB) is used.
- WriteBufferSize int
-
- // ReadBufferSize specifies the size of the read buffer used
- // when reading from the transport.
- // If zero, a default (currently 4KB) is used.
- ReadBufferSize int
-
- // nextProtoOnce guards initialization of TLSNextProto and
- // h2transport (via onceSetNextProtoDefaults)
- nextProtoOnce sync.Once
- h2transport h2Transport // non-nil if http2 wired up
- tlsNextProtoWasNil bool // whether TLSNextProto was nil when the Once fired
-
- // ForceAttemptHTTP2 controls whether HTTP/2 is enabled when a non-zero
- // Dial, DialTLS, or DialContext func or TLSClientConfig is provided.
- // By default, use of any those fields conservatively disables HTTP/2.
- // To use a custom dialer or TLS config and still attempt HTTP/2
- // upgrades, set this to true.
- ForceAttemptHTTP2 bool
-}
-
-// A cancelKey is the key of the reqCanceler map.
-// We wrap the *Request in this type since we want to use the original request,
-// not any transient one created by roundTrip.
-type cancelKey struct {
- req *Request
-}
-
-func (t *Transport) writeBufferSize() int {
- if t.WriteBufferSize > 0 {
- return t.WriteBufferSize
- }
- return 4 << 10
-}
-
-func (t *Transport) readBufferSize() int {
- if t.ReadBufferSize > 0 {
- return t.ReadBufferSize
- }
- return 4 << 10
-}
-
-// Clone returns a deep copy of t's exported fields.
-func (t *Transport) Clone() *Transport {
- t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
- t2 := &Transport{
- Proxy: t.Proxy,
- DialContext: t.DialContext,
- Dial: t.Dial,
- DialTLS: t.DialTLS,
- DialTLSContext: t.DialTLSContext,
- TLSHandshakeTimeout: t.TLSHandshakeTimeout,
- DisableKeepAlives: t.DisableKeepAlives,
- DisableCompression: t.DisableCompression,
- MaxIdleConns: t.MaxIdleConns,
- MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
- MaxConnsPerHost: t.MaxConnsPerHost,
- IdleConnTimeout: t.IdleConnTimeout,
- ResponseHeaderTimeout: t.ResponseHeaderTimeout,
- ExpectContinueTimeout: t.ExpectContinueTimeout,
- ProxyConnectHeader: t.ProxyConnectHeader.Clone(),
- GetProxyConnectHeader: t.GetProxyConnectHeader,
- MaxResponseHeaderBytes: t.MaxResponseHeaderBytes,
- ForceAttemptHTTP2: t.ForceAttemptHTTP2,
- WriteBufferSize: t.WriteBufferSize,
- ReadBufferSize: t.ReadBufferSize,
- }
- if t.TLSClientConfig != nil {
- t2.TLSClientConfig = t.TLSClientConfig.Clone()
- }
- if !t.tlsNextProtoWasNil {
- npm := map[string]func(authority string, c *tls.Conn) RoundTripper{}
- for k, v := range t.TLSNextProto {
- npm[k] = v
- }
- t2.TLSNextProto = npm
- }
- return t2
-}
-
-// h2Transport is the interface we expect to be able to call from
-// net/http against an *http2.Transport that's either bundled into
-// h2_bundle.go or supplied by the user via x/net/http2.
-//
-// We name it with the "h2" prefix to stay out of the "http2" prefix
-// namespace used by x/tools/cmd/bundle for h2_bundle.go.
-type h2Transport interface {
- CloseIdleConnections()
-}
-
-func (t *Transport) hasCustomTLSDialer() bool {
- return t.DialTLS != nil || t.DialTLSContext != nil
-}
-
-// onceSetNextProtoDefaults initializes TLSNextProto.
-// It must be called via t.nextProtoOnce.Do.
-func (t *Transport) onceSetNextProtoDefaults() {
- t.tlsNextProtoWasNil = (t.TLSNextProto == nil)
- if godebug.Get("http2client") == "0" {
- return
- }
-
- // If they've already configured http2 with
- // golang.org/x/net/http2 instead of the bundled copy, try to
- // get at its http2.Transport value (via the "https"
- // altproto map) so we can call CloseIdleConnections on it if
- // requested. (Issue 22891)
- altProto, _ := t.altProto.Load().(map[string]RoundTripper)
- if rv := reflect.ValueOf(altProto["https"]); rv.IsValid() && rv.Type().Kind() == reflect.Struct && rv.Type().NumField() == 1 {
- if v := rv.Field(0); v.CanInterface() {
- if h2i, ok := v.Interface().(h2Transport); ok {
- t.h2transport = h2i
- return
- }
- }
- }
-
- if t.TLSNextProto != nil {
- // This is the documented way to disable http2 on a
- // Transport.
- return
- }
- if !t.ForceAttemptHTTP2 && (t.TLSClientConfig != nil || t.Dial != nil || t.DialContext != nil || t.hasCustomTLSDialer()) {
- // Be conservative and don't automatically enable
- // http2 if they've specified a custom TLS config or
- // custom dialers. Let them opt-in themselves via
- // http2.ConfigureTransport so we don't surprise them
- // by modifying their tls.Config. Issue 14275.
- // However, if ForceAttemptHTTP2 is true, it overrides the above checks.
- return
- }
- if omitBundledHTTP2 {
- return
- }
- t2, err := http2configureTransports(t)
- if err != nil {
- log.Printf("Error enabling Transport HTTP/2 support: %v", err)
- return
- }
- t.h2transport = t2
-
- // Auto-configure the http2.Transport's MaxHeaderListSize from
- // the http.Transport's MaxResponseHeaderBytes. They don't
- // exactly mean the same thing, but they're close.
- //
- // TODO: also add this to x/net/http2.Configure Transport, behind
- // a +build go1.7 build tag:
- if limit1 := t.MaxResponseHeaderBytes; limit1 != 0 && t2.MaxHeaderListSize == 0 {
- const h2max = 1<<32 - 1
- if limit1 >= h2max {
- t2.MaxHeaderListSize = h2max
- } else {
- t2.MaxHeaderListSize = uint32(limit1)
- }
- }
-}
-
-// ProxyFromEnvironment returns the URL of the proxy to use for a
-// given request, as indicated by the environment variables
-// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions
-// thereof). HTTPS_PROXY takes precedence over HTTP_PROXY for https
-// requests.
-//
-// The environment values may be either a complete URL or a
-// "host[:port]", in which case the "http" scheme is assumed.
-// The schemes "http", "https", and "socks5" are supported.
-// An error is returned if the value is a different form.
-//
-// A nil URL and nil error are returned if no proxy is defined in the
-// environment, or a proxy should not be used for the given request,
-// as defined by NO_PROXY.
-//
-// As a special case, if req.URL.Host is "localhost" (with or without
-// a port number), then a nil URL and nil error will be returned.
-func ProxyFromEnvironment(req *Request) (*url.URL, error) {
- return envProxyFunc()(req.URL)
-}
-
-// ProxyURL returns a proxy function (for use in a Transport)
-// that always returns the same URL.
-func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
- return func(*Request) (*url.URL, error) {
- return fixedURL, nil
- }
-}
-
-// transportRequest is a wrapper around a *Request that adds
-// optional extra headers to write and stores any error to return
-// from roundTrip.
-type transportRequest struct {
- *Request // original request, not to be mutated
- extra Header // extra headers to write, or nil
- trace *httptrace.ClientTrace // optional
- cancelKey cancelKey
-
- mu sync.Mutex // guards err
- err error // first setError value for mapRoundTripError to consider
-}
-
-func (tr *transportRequest) extraHeaders() Header {
- if tr.extra == nil {
- tr.extra = make(Header)
- }
- return tr.extra
-}
-
-func (tr *transportRequest) setError(err error) {
- tr.mu.Lock()
- if tr.err == nil {
- tr.err = err
- }
- tr.mu.Unlock()
-}
-
-// useRegisteredProtocol reports whether an alternate protocol (as registered
-// with Transport.RegisterProtocol) should be respected for this request.
-func (t *Transport) useRegisteredProtocol(req *Request) bool {
- if req.URL.Scheme == "https" && req.requiresHTTP1() {
- // If this request requires HTTP/1, don't use the
- // "https" alternate protocol, which is used by the
- // HTTP/2 code to take over requests if there's an
- // existing cached HTTP/2 connection.
- return false
- }
- return true
-}
-
-// alternateRoundTripper returns the alternate RoundTripper to use
-// for this request if the Request's URL scheme requires one,
-// or nil for the normal case of using the Transport.
-func (t *Transport) alternateRoundTripper(req *Request) RoundTripper {
- if !t.useRegisteredProtocol(req) {
- return nil
- }
- altProto, _ := t.altProto.Load().(map[string]RoundTripper)
- return altProto[req.URL.Scheme]
-}
-
-// roundTrip implements a RoundTripper over HTTP.
-func (t *Transport) roundTrip(req *Request) (*Response, error) {
- t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
- ctx := req.Context()
- trace := httptrace.ContextClientTrace(ctx)
-
- if req.URL == nil {
- req.closeBody()
- return nil, errors.New("http: nil Request.URL")
- }
- if req.Header == nil {
- req.closeBody()
- return nil, errors.New("http: nil Request.Header")
- }
- scheme := req.URL.Scheme
- isHTTP := scheme == "http" || scheme == "https"
- if isHTTP {
- for k, vv := range req.Header {
- if !httpguts.ValidHeaderFieldName(k) {
- req.closeBody()
- return nil, fmt.Errorf("net/http: invalid header field name %q", k)
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- req.closeBody()
- return nil, fmt.Errorf("net/http: invalid header field value %q for key %v", v, k)
- }
- }
- }
- }
-
- origReq := req
- cancelKey := cancelKey{origReq}
- req = setupRewindBody(req)
-
- if altRT := t.alternateRoundTripper(req); altRT != nil {
- if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
- return resp, err
- }
- var err error
- req, err = rewindBody(req)
- if err != nil {
- return nil, err
- }
- }
- if !isHTTP {
- req.closeBody()
- return nil, badStringError("unsupported protocol scheme", scheme)
- }
- if req.Method != "" && !validMethod(req.Method) {
- req.closeBody()
- return nil, fmt.Errorf("net/http: invalid method %q", req.Method)
- }
- if req.URL.Host == "" {
- req.closeBody()
- return nil, errors.New("http: no Host in request URL")
- }
-
- for {
- select {
- case <-ctx.Done():
- req.closeBody()
- return nil, ctx.Err()
- default:
- }
-
- // treq gets modified by roundTrip, so we need to recreate for each retry.
- treq := &transportRequest{Request: req, trace: trace, cancelKey: cancelKey}
- cm, err := t.connectMethodForRequest(treq)
- if err != nil {
- req.closeBody()
- return nil, err
- }
-
- // Get the cached or newly-created connection to either the
- // host (for http or https), the http proxy, or the http proxy
- // pre-CONNECTed to https server. In any case, we'll be ready
- // to send it requests.
- pconn, err := t.getConn(treq, cm)
- if err != nil {
- t.setReqCanceler(cancelKey, nil)
- req.closeBody()
- return nil, err
- }
-
- var resp *Response
- if pconn.alt != nil {
- // HTTP/2 path.
- t.setReqCanceler(cancelKey, nil) // not cancelable with CancelRequest
- resp, err = pconn.alt.RoundTrip(req)
- } else {
- resp, err = pconn.roundTrip(treq)
- }
- if err == nil {
- resp.Request = origReq
- return resp, nil
- }
-
- // Failed. Clean up and determine whether to retry.
- if http2isNoCachedConnError(err) {
- if t.removeIdleConn(pconn) {
- t.decConnsPerHost(pconn.cacheKey)
- }
- } else if !pconn.shouldRetryRequest(req, err) {
- // Issue 16465: return underlying net.Conn.Read error from peek,
- // as we've historically done.
- if e, ok := err.(nothingWrittenError); ok {
- err = e.error
- }
- if e, ok := err.(transportReadFromServerError); ok {
- err = e.err
- }
- return nil, err
- }
- testHookRoundTripRetried()
-
- // Rewind the body if we're able to.
- req, err = rewindBody(req)
- if err != nil {
- return nil, err
- }
- }
-}
-
-var errCannotRewind = errors.New("net/http: cannot rewind body after connection loss")
-
-type readTrackingBody struct {
- io.ReadCloser
- didRead bool
- didClose bool
-}
-
-func (r *readTrackingBody) Read(data []byte) (int, error) {
- r.didRead = true
- return r.ReadCloser.Read(data)
-}
-
-func (r *readTrackingBody) Close() error {
- r.didClose = true
- return r.ReadCloser.Close()
-}
-
-// setupRewindBody returns a new request with a custom body wrapper
-// that can report whether the body needs rewinding.
-// This lets rewindBody avoid an error result when the request
-// does not have GetBody but the body hasn't been read at all yet.
-func setupRewindBody(req *Request) *Request {
- if req.Body == nil || req.Body == NoBody {
- return req
- }
- newReq := *req
- newReq.Body = &readTrackingBody{ReadCloser: req.Body}
- return &newReq
-}
-
-// rewindBody returns a new request with the body rewound.
-// It returns req unmodified if the body does not need rewinding.
-// rewindBody takes care of closing req.Body when appropriate
-// (in all cases except when rewindBody returns req unmodified).
-func rewindBody(req *Request) (rewound *Request, err error) {
- if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose) {
- return req, nil // nothing to rewind
- }
- if !req.Body.(*readTrackingBody).didClose {
- req.closeBody()
- }
- if req.GetBody == nil {
- return nil, errCannotRewind
- }
- body, err := req.GetBody()
- if err != nil {
- return nil, err
- }
- newReq := *req
- newReq.Body = &readTrackingBody{ReadCloser: body}
- return &newReq, nil
-}
-
-// shouldRetryRequest reports whether we should retry sending a failed
-// HTTP request on a new connection. The non-nil input error is the
-// error from roundTrip.
-func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
- if http2isNoCachedConnError(err) {
- // Issue 16582: if the user started a bunch of
- // requests at once, they can all pick the same conn
- // and violate the server's max concurrent streams.
- // Instead, match the HTTP/1 behavior for now and dial
- // again to get a new TCP connection, rather than failing
- // this request.
- return true
- }
- if err == errMissingHost {
- // User error.
- return false
- }
- if !pc.isReused() {
- // This was a fresh connection. There's no reason the server
- // should've hung up on us.
- //
- // Also, if we retried now, we could loop forever
- // creating new connections and retrying if the server
- // is just hanging up on us because it doesn't like
- // our request (as opposed to sending an error).
- return false
- }
- if _, ok := err.(nothingWrittenError); ok {
- // We never wrote anything, so it's safe to retry, if there's no body or we
- // can "rewind" the body with GetBody.
- return req.outgoingLength() == 0 || req.GetBody != nil
- }
- if !req.isReplayable() {
- // Don't retry non-idempotent requests.
- return false
- }
- if _, ok := err.(transportReadFromServerError); ok {
- // We got some non-EOF net.Conn.Read failure reading
- // the 1st response byte from the server.
- return true
- }
- if err == errServerClosedIdle {
- // The server replied with io.EOF while we were trying to
- // read the response. Probably an unfortunately keep-alive
- // timeout, just as the client was writing a request.
- return true
- }
- return false // conservatively
-}
-
-// ErrSkipAltProtocol is a sentinel error value defined by Transport.RegisterProtocol.
-var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol")
-
-// RegisterProtocol registers a new protocol with scheme.
-// The Transport will pass requests using the given scheme to rt.
-// It is rt's responsibility to simulate HTTP request semantics.
-//
-// RegisterProtocol can be used by other packages to provide
-// implementations of protocol schemes like "ftp" or "file".
-//
-// If rt.RoundTrip returns ErrSkipAltProtocol, the Transport will
-// handle the RoundTrip itself for that one request, as if the
-// protocol were not registered.
-func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
- t.altMu.Lock()
- defer t.altMu.Unlock()
- oldMap, _ := t.altProto.Load().(map[string]RoundTripper)
- if _, exists := oldMap[scheme]; exists {
- panic("protocol " + scheme + " already registered")
- }
- newMap := make(map[string]RoundTripper)
- for k, v := range oldMap {
- newMap[k] = v
- }
- newMap[scheme] = rt
- t.altProto.Store(newMap)
-}
-
-// CloseIdleConnections closes any connections which were previously
-// connected from previous requests but are now sitting idle in
-// a "keep-alive" state. It does not interrupt any connections currently
-// in use.
-func (t *Transport) CloseIdleConnections() {
- t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
- t.idleMu.Lock()
- m := t.idleConn
- t.idleConn = nil
- t.closeIdle = true // close newly idle connections
- t.idleLRU = connLRU{}
- t.idleMu.Unlock()
- for _, conns := range m {
- for _, pconn := range conns {
- pconn.close(errCloseIdleConns)
- }
- }
- if t2 := t.h2transport; t2 != nil {
- t2.CloseIdleConnections()
- }
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-// CancelRequest should only be called after RoundTrip has returned.
-//
-// Deprecated: Use Request.WithContext to create a request with a
-// cancelable context instead. CancelRequest cannot cancel HTTP/2
-// requests.
-func (t *Transport) CancelRequest(req *Request) {
- t.cancelRequest(cancelKey{req}, errRequestCanceled)
-}
-
-// Cancel an in-flight request, recording the error value.
-// Returns whether the request was canceled.
-func (t *Transport) cancelRequest(key cancelKey, err error) bool {
- // This function must not return until the cancel func has completed.
- // See: https://golang.org/issue/34658
- t.reqMu.Lock()
- defer t.reqMu.Unlock()
- cancel := t.reqCanceler[key]
- delete(t.reqCanceler, key)
- if cancel != nil {
- cancel(err)
- }
-
- return cancel != nil
-}
-
-//
-// Private implementation past this point.
-//
-
-var (
- // proxyConfigOnce guards proxyConfig
- envProxyOnce sync.Once
- envProxyFuncValue func(*url.URL) (*url.URL, error)
-)
-
-// defaultProxyConfig returns a ProxyConfig value looked up
-// from the environment. This mitigates expensive lookups
-// on some platforms (e.g. Windows).
-func envProxyFunc() func(*url.URL) (*url.URL, error) {
- envProxyOnce.Do(func() {
- envProxyFuncValue = httpproxy.FromEnvironment().ProxyFunc()
- })
- return envProxyFuncValue
-}
-
-// resetProxyConfig is used by tests.
-func resetProxyConfig() {
- envProxyOnce = sync.Once{}
- envProxyFuncValue = nil
-}
-
-func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) {
- cm.targetScheme = treq.URL.Scheme
- cm.targetAddr = canonicalAddr(treq.URL)
- if t.Proxy != nil {
- cm.proxyURL, err = t.Proxy(treq.Request)
- }
- cm.onlyH1 = treq.requiresHTTP1()
- return cm, err
-}
-
-// proxyAuth returns the Proxy-Authorization header to set
-// on requests, if applicable.
-func (cm *connectMethod) proxyAuth() string {
- if cm.proxyURL == nil {
- return ""
- }
- if u := cm.proxyURL.User; u != nil {
- username := u.Username()
- password, _ := u.Password()
- return "Basic " + basicAuth(username, password)
- }
- return ""
-}
-
-// error values for debugging and testing, not seen by users.
-var (
- errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled")
- errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
- errCloseIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
- errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
- errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
- errCloseIdleConns = errors.New("http: CloseIdleConnections called")
- errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
- errIdleConnTimeout = errors.New("http: idle connection timeout")
-
- // errServerClosedIdle is not seen by users for idempotent requests, but may be
- // seen by a user if the server shuts down an idle connection and sends its FIN
- // in flight with already-written POST body bytes from the client.
- // See https://github.com/golang/go/issues/19943#issuecomment-355607646
- errServerClosedIdle = errors.New("http: server closed idle connection")
-)
-
-// transportReadFromServerError is used by Transport.readLoop when the
-// 1 byte peek read fails and we're actually anticipating a response.
-// Usually this is just due to the inherent keep-alive shut down race,
-// where the server closed the connection at the same time the client
-// wrote. The underlying err field is usually io.EOF or some
-// ECONNRESET sort of thing which varies by platform. But it might be
-// the user's custom net.Conn.Read error too, so we carry it along for
-// them to return from Transport.RoundTrip.
-type transportReadFromServerError struct {
- err error
-}
-
-func (e transportReadFromServerError) Unwrap() error { return e.err }
-
-func (e transportReadFromServerError) Error() string {
- return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err)
-}
-
-func (t *Transport) putOrCloseIdleConn(pconn *persistConn) {
- if err := t.tryPutIdleConn(pconn); err != nil {
- pconn.close(err)
- }
-}
-
-func (t *Transport) maxIdleConnsPerHost() int {
- if v := t.MaxIdleConnsPerHost; v != 0 {
- return v
- }
- return DefaultMaxIdleConnsPerHost
-}
-
-// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting
-// a new request.
-// If pconn is no longer needed or not in a good state, tryPutIdleConn returns
-// an error explaining why it wasn't registered.
-// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that.
-func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
- if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
- return errKeepAlivesDisabled
- }
- if pconn.isBroken() {
- return errConnBroken
- }
- pconn.markReused()
-
- t.idleMu.Lock()
- defer t.idleMu.Unlock()
-
- // HTTP/2 (pconn.alt != nil) connections do not come out of the idle list,
- // because multiple goroutines can use them simultaneously.
- // If this is an HTTP/2 connection being “returned,” we're done.
- if pconn.alt != nil && t.idleLRU.m[pconn] != nil {
- return nil
- }
-
- // Deliver pconn to goroutine waiting for idle connection, if any.
- // (They may be actively dialing, but this conn is ready first.
- // Chrome calls this socket late binding.
- // See https://www.chromium.org/developers/design-documents/network-stack#TOC-Connection-Management.)
- key := pconn.cacheKey
- if q, ok := t.idleConnWait[key]; ok {
- done := false
- if pconn.alt == nil {
- // HTTP/1.
- // Loop over the waiting list until we find a w that isn't done already, and hand it pconn.
- for q.len() > 0 {
- w := q.popFront()
- if w.tryDeliver(pconn, nil) {
- done = true
- break
- }
- }
- } else {
- // HTTP/2.
- // Can hand the same pconn to everyone in the waiting list,
- // and we still won't be done: we want to put it in the idle
- // list unconditionally, for any future clients too.
- for q.len() > 0 {
- w := q.popFront()
- w.tryDeliver(pconn, nil)
- }
- }
- if q.len() == 0 {
- delete(t.idleConnWait, key)
- } else {
- t.idleConnWait[key] = q
- }
- if done {
- return nil
- }
- }
-
- if t.closeIdle {
- return errCloseIdle
- }
- if t.idleConn == nil {
- t.idleConn = make(map[connectMethodKey][]*persistConn)
- }
- idles := t.idleConn[key]
- if len(idles) >= t.maxIdleConnsPerHost() {
- return errTooManyIdleHost
- }
- for _, exist := range idles {
- if exist == pconn {
- log.Fatalf("dup idle pconn %p in freelist", pconn)
- }
- }
- t.idleConn[key] = append(idles, pconn)
- t.idleLRU.add(pconn)
- if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns {
- oldest := t.idleLRU.removeOldest()
- oldest.close(errTooManyIdle)
- t.removeIdleConnLocked(oldest)
- }
-
- // Set idle timer, but only for HTTP/1 (pconn.alt == nil).
- // The HTTP/2 implementation manages the idle timer itself
- // (see idleConnTimeout in h2_bundle.go).
- if t.IdleConnTimeout > 0 && pconn.alt == nil {
- if pconn.idleTimer != nil {
- pconn.idleTimer.Reset(t.IdleConnTimeout)
- } else {
- pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
- }
- }
- pconn.idleAt = time.Now()
- return nil
-}
-
-// queueForIdleConn queues w to receive the next idle connection for w.cm.
-// As an optimization hint to the caller, queueForIdleConn reports whether
-// it successfully delivered an already-idle connection.
-func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) {
- if t.DisableKeepAlives {
- return false
- }
-
- t.idleMu.Lock()
- defer t.idleMu.Unlock()
-
- // Stop closing connections that become idle - we might want one.
- // (That is, undo the effect of t.CloseIdleConnections.)
- t.closeIdle = false
-
- if w == nil {
- // Happens in test hook.
- return false
- }
-
- // If IdleConnTimeout is set, calculate the oldest
- // persistConn.idleAt time we're willing to use a cached idle
- // conn.
- var oldTime time.Time
- if t.IdleConnTimeout > 0 {
- oldTime = time.Now().Add(-t.IdleConnTimeout)
- }
-
- // Look for most recently-used idle connection.
- if list, ok := t.idleConn[w.key]; ok {
- stop := false
- delivered := false
- for len(list) > 0 && !stop {
- pconn := list[len(list)-1]
-
- // See whether this connection has been idle too long, considering
- // only the wall time (the Round(0)), in case this is a laptop or VM
- // coming out of suspend with previously cached idle connections.
- tooOld := !oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime)
- if tooOld {
- // Async cleanup. Launch in its own goroutine (as if a
- // time.AfterFunc called it); it acquires idleMu, which we're
- // holding, and does a synchronous net.Conn.Close.
- go pconn.closeConnIfStillIdle()
- }
- if pconn.isBroken() || tooOld {
- // If either persistConn.readLoop has marked the connection
- // broken, but Transport.removeIdleConn has not yet removed it
- // from the idle list, or if this persistConn is too old (it was
- // idle too long), then ignore it and look for another. In both
- // cases it's already in the process of being closed.
- list = list[:len(list)-1]
- continue
- }
- delivered = w.tryDeliver(pconn, nil)
- if delivered {
- if pconn.alt != nil {
- // HTTP/2: multiple clients can share pconn.
- // Leave it in the list.
- } else {
- // HTTP/1: only one client can use pconn.
- // Remove it from the list.
- t.idleLRU.remove(pconn)
- list = list[:len(list)-1]
- }
- }
- stop = true
- }
- if len(list) > 0 {
- t.idleConn[w.key] = list
- } else {
- delete(t.idleConn, w.key)
- }
- if stop {
- return delivered
- }
- }
-
- // Register to receive next connection that becomes idle.
- if t.idleConnWait == nil {
- t.idleConnWait = make(map[connectMethodKey]wantConnQueue)
- }
- q := t.idleConnWait[w.key]
- q.cleanFront()
- q.pushBack(w)
- t.idleConnWait[w.key] = q
- return false
-}
-
-// removeIdleConn marks pconn as dead.
-func (t *Transport) removeIdleConn(pconn *persistConn) bool {
- t.idleMu.Lock()
- defer t.idleMu.Unlock()
- return t.removeIdleConnLocked(pconn)
-}
-
-// t.idleMu must be held.
-func (t *Transport) removeIdleConnLocked(pconn *persistConn) bool {
- if pconn.idleTimer != nil {
- pconn.idleTimer.Stop()
- }
- t.idleLRU.remove(pconn)
- key := pconn.cacheKey
- pconns := t.idleConn[key]
- var removed bool
- switch len(pconns) {
- case 0:
- // Nothing
- case 1:
- if pconns[0] == pconn {
- delete(t.idleConn, key)
- removed = true
- }
- default:
- for i, v := range pconns {
- if v != pconn {
- continue
- }
- // Slide down, keeping most recently-used
- // conns at the end.
- copy(pconns[i:], pconns[i+1:])
- t.idleConn[key] = pconns[:len(pconns)-1]
- removed = true
- break
- }
- }
- return removed
-}
-
-func (t *Transport) setReqCanceler(key cancelKey, fn func(error)) {
- t.reqMu.Lock()
- defer t.reqMu.Unlock()
- if t.reqCanceler == nil {
- t.reqCanceler = make(map[cancelKey]func(error))
- }
- if fn != nil {
- t.reqCanceler[key] = fn
- } else {
- delete(t.reqCanceler, key)
- }
-}
-
-// replaceReqCanceler replaces an existing cancel function. If there is no cancel function
-// for the request, we don't set the function and return false.
-// Since CancelRequest will clear the canceler, we can use the return value to detect if
-// the request was canceled since the last setReqCancel call.
-func (t *Transport) replaceReqCanceler(key cancelKey, fn func(error)) bool {
- t.reqMu.Lock()
- defer t.reqMu.Unlock()
- _, ok := t.reqCanceler[key]
- if !ok {
- return false
- }
- if fn != nil {
- t.reqCanceler[key] = fn
- } else {
- delete(t.reqCanceler, key)
- }
- return true
-}
-
-var zeroDialer net.Dialer
-
-func (t *Transport) dial(ctx context.Context, network, addr string) (net.Conn, error) {
- if t.DialContext != nil {
- return t.DialContext(ctx, network, addr)
- }
- if t.Dial != nil {
- c, err := t.Dial(network, addr)
- if c == nil && err == nil {
- err = errors.New("net/http: Transport.Dial hook returned (nil, nil)")
- }
- return c, err
- }
- return zeroDialer.DialContext(ctx, network, addr)
-}
-
-// A wantConn records state about a wanted connection
-// (that is, an active call to getConn).
-// The conn may be gotten by dialing or by finding an idle connection,
-// or a cancellation may make the conn no longer wanted.
-// These three options are racing against each other and use
-// wantConn to coordinate and agree about the winning outcome.
-type wantConn struct {
- cm connectMethod
- key connectMethodKey // cm.key()
- ctx context.Context // context for dial
- ready chan struct{} // closed when pc, err pair is delivered
-
- // hooks for testing to know when dials are done
- // beforeDial is called in the getConn goroutine when the dial is queued.
- // afterDial is called when the dial is completed or canceled.
- beforeDial func()
- afterDial func()
-
- mu sync.Mutex // protects pc, err, close(ready)
- pc *persistConn
- err error
-}
-
-// waiting reports whether w is still waiting for an answer (connection or error).
-func (w *wantConn) waiting() bool {
- select {
- case <-w.ready:
- return false
- default:
- return true
- }
-}
-
-// tryDeliver attempts to deliver pc, err to w and reports whether it succeeded.
-func (w *wantConn) tryDeliver(pc *persistConn, err error) bool {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if w.pc != nil || w.err != nil {
- return false
- }
-
- w.pc = pc
- w.err = err
- if w.pc == nil && w.err == nil {
- panic("net/http: internal error: misuse of tryDeliver")
- }
- close(w.ready)
- return true
-}
-
-// cancel marks w as no longer wanting a result (for example, due to cancellation).
-// If a connection has been delivered already, cancel returns it with t.putOrCloseIdleConn.
-func (w *wantConn) cancel(t *Transport, err error) {
- w.mu.Lock()
- if w.pc == nil && w.err == nil {
- close(w.ready) // catch misbehavior in future delivery
- }
- pc := w.pc
- w.pc = nil
- w.err = err
- w.mu.Unlock()
-
- if pc != nil {
- t.putOrCloseIdleConn(pc)
- }
-}
-
-// A wantConnQueue is a queue of wantConns.
-type wantConnQueue struct {
- // This is a queue, not a deque.
- // It is split into two stages - head[headPos:] and tail.
- // popFront is trivial (headPos++) on the first stage, and
- // pushBack is trivial (append) on the second stage.
- // If the first stage is empty, popFront can swap the
- // first and second stages to remedy the situation.
- //
- // This two-stage split is analogous to the use of two lists
- // in Okasaki's purely functional queue but without the
- // overhead of reversing the list when swapping stages.
- head []*wantConn
- headPos int
- tail []*wantConn
-}
-
-// len returns the number of items in the queue.
-func (q *wantConnQueue) len() int {
- return len(q.head) - q.headPos + len(q.tail)
-}
-
-// pushBack adds w to the back of the queue.
-func (q *wantConnQueue) pushBack(w *wantConn) {
- q.tail = append(q.tail, w)
-}
-
-// popFront removes and returns the wantConn at the front of the queue.
-func (q *wantConnQueue) popFront() *wantConn {
- if q.headPos >= len(q.head) {
- if len(q.tail) == 0 {
- return nil
- }
- // Pick up tail as new head, clear tail.
- q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
- }
- w := q.head[q.headPos]
- q.head[q.headPos] = nil
- q.headPos++
- return w
-}
-
-// peekFront returns the wantConn at the front of the queue without removing it.
-func (q *wantConnQueue) peekFront() *wantConn {
- if q.headPos < len(q.head) {
- return q.head[q.headPos]
- }
- if len(q.tail) > 0 {
- return q.tail[0]
- }
- return nil
-}
-
-// cleanFront pops any wantConns that are no longer waiting from the head of the
-// queue, reporting whether any were popped.
-func (q *wantConnQueue) cleanFront() (cleaned bool) {
- for {
- w := q.peekFront()
- if w == nil || w.waiting() {
- return cleaned
- }
- q.popFront()
- cleaned = true
- }
-}
-
-func (t *Transport) customDialTLS(ctx context.Context, network, addr string) (conn net.Conn, err error) {
- if t.DialTLSContext != nil {
- conn, err = t.DialTLSContext(ctx, network, addr)
- } else {
- conn, err = t.DialTLS(network, addr)
- }
- if conn == nil && err == nil {
- err = errors.New("net/http: Transport.DialTLS or DialTLSContext returned (nil, nil)")
- }
- return
-}
-
-// getConn dials and creates a new persistConn to the target as
-// specified in the connectMethod. This includes doing a proxy CONNECT
-// and/or setting up TLS. If this doesn't return an error, the persistConn
-// is ready to write requests to.
-func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persistConn, err error) {
- req := treq.Request
- trace := treq.trace
- ctx := req.Context()
- if trace != nil && trace.GetConn != nil {
- trace.GetConn(cm.addr())
- }
-
- w := &wantConn{
- cm: cm,
- key: cm.key(),
- ctx: ctx,
- ready: make(chan struct{}, 1),
- beforeDial: testHookPrePendingDial,
- afterDial: testHookPostPendingDial,
- }
- defer func() {
- if err != nil {
- w.cancel(t, err)
- }
- }()
-
- // Queue for idle connection.
- if delivered := t.queueForIdleConn(w); delivered {
- pc := w.pc
- // Trace only for HTTP/1.
- // HTTP/2 calls trace.GotConn itself.
- if pc.alt == nil && trace != nil && trace.GotConn != nil {
- trace.GotConn(pc.gotIdleConnTrace(pc.idleAt))
- }
- // set request canceler to some non-nil function so we
- // can detect whether it was cleared between now and when
- // we enter roundTrip
- t.setReqCanceler(treq.cancelKey, func(error) {})
- return pc, nil
- }
-
- cancelc := make(chan error, 1)
- t.setReqCanceler(treq.cancelKey, func(err error) { cancelc <- err })
-
- // Queue for permission to dial.
- t.queueForDial(w)
-
- // Wait for completion or cancellation.
- select {
- case <-w.ready:
- // Trace success but only for HTTP/1.
- // HTTP/2 calls trace.GotConn itself.
- if w.pc != nil && w.pc.alt == nil && trace != nil && trace.GotConn != nil {
- trace.GotConn(httptrace.GotConnInfo{Conn: w.pc.conn, Reused: w.pc.isReused()})
- }
- if w.err != nil {
- // If the request has been canceled, that's probably
- // what caused w.err; if so, prefer to return the
- // cancellation error (see golang.org/issue/16049).
- select {
- case <-req.Cancel:
- return nil, errRequestCanceledConn
- case <-req.Context().Done():
- return nil, req.Context().Err()
- case err := <-cancelc:
- if err == errRequestCanceled {
- err = errRequestCanceledConn
- }
- return nil, err
- default:
- // return below
- }
- }
- return w.pc, w.err
- case <-req.Cancel:
- return nil, errRequestCanceledConn
- case <-req.Context().Done():
- return nil, req.Context().Err()
- case err := <-cancelc:
- if err == errRequestCanceled {
- err = errRequestCanceledConn
- }
- return nil, err
- }
-}
-
-// queueForDial queues w to wait for permission to begin dialing.
-// Once w receives permission to dial, it will do so in a separate goroutine.
-func (t *Transport) queueForDial(w *wantConn) {
- w.beforeDial()
- if t.MaxConnsPerHost <= 0 {
- go t.dialConnFor(w)
- return
- }
-
- t.connsPerHostMu.Lock()
- defer t.connsPerHostMu.Unlock()
-
- if n := t.connsPerHost[w.key]; n < t.MaxConnsPerHost {
- if t.connsPerHost == nil {
- t.connsPerHost = make(map[connectMethodKey]int)
- }
- t.connsPerHost[w.key] = n + 1
- go t.dialConnFor(w)
- return
- }
-
- if t.connsPerHostWait == nil {
- t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue)
- }
- q := t.connsPerHostWait[w.key]
- q.cleanFront()
- q.pushBack(w)
- t.connsPerHostWait[w.key] = q
-}
-
-// dialConnFor dials on behalf of w and delivers the result to w.
-// dialConnFor has received permission to dial w.cm and is counted in t.connCount[w.cm.key()].
-// If the dial is canceled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()].
-func (t *Transport) dialConnFor(w *wantConn) {
- defer w.afterDial()
-
- pc, err := t.dialConn(w.ctx, w.cm)
- delivered := w.tryDeliver(pc, err)
- if err == nil && (!delivered || pc.alt != nil) {
- // pconn was not passed to w,
- // or it is HTTP/2 and can be shared.
- // Add to the idle connection pool.
- t.putOrCloseIdleConn(pc)
- }
- if err != nil {
- t.decConnsPerHost(w.key)
- }
-}
-
-// decConnsPerHost decrements the per-host connection count for key,
-// which may in turn give a different waiting goroutine permission to dial.
-func (t *Transport) decConnsPerHost(key connectMethodKey) {
- if t.MaxConnsPerHost <= 0 {
- return
- }
-
- t.connsPerHostMu.Lock()
- defer t.connsPerHostMu.Unlock()
- n := t.connsPerHost[key]
- if n == 0 {
- // Shouldn't happen, but if it does, the counting is buggy and could
- // easily lead to a silent deadlock, so report the problem loudly.
- panic("net/http: internal error: connCount underflow")
- }
-
- // Can we hand this count to a goroutine still waiting to dial?
- // (Some goroutines on the wait list may have timed out or
- // gotten a connection another way. If they're all gone,
- // we don't want to kick off any spurious dial operations.)
- if q := t.connsPerHostWait[key]; q.len() > 0 {
- done := false
- for q.len() > 0 {
- w := q.popFront()
- if w.waiting() {
- go t.dialConnFor(w)
- done = true
- break
- }
- }
- if q.len() == 0 {
- delete(t.connsPerHostWait, key)
- } else {
- // q is a value (like a slice), so we have to store
- // the updated q back into the map.
- t.connsPerHostWait[key] = q
- }
- if done {
- return
- }
- }
-
- // Otherwise, decrement the recorded count.
- if n--; n == 0 {
- delete(t.connsPerHost, key)
- } else {
- t.connsPerHost[key] = n
- }
-}
-
-// Add TLS to a persistent connection, i.e. negotiate a TLS session. If pconn is already a TLS
-// tunnel, this function establishes a nested TLS session inside the encrypted channel.
-// The remote endpoint's name may be overridden by TLSClientConfig.ServerName.
-func (pconn *persistConn) addTLS(ctx context.Context, name string, trace *httptrace.ClientTrace) error {
- // Initiate TLS and check remote host name against certificate.
- cfg := cloneTLSConfig(pconn.t.TLSClientConfig)
- if cfg.ServerName == "" {
- cfg.ServerName = name
- }
- if pconn.cacheKey.onlyH1 {
- cfg.NextProtos = nil
- }
- plainConn := pconn.conn
- tlsConn := tls.Client(plainConn, cfg)
- errc := make(chan error, 2)
- var timer *time.Timer // for canceling TLS handshake
- if d := pconn.t.TLSHandshakeTimeout; d != 0 {
- timer = time.AfterFunc(d, func() {
- errc <- tlsHandshakeTimeoutError{}
- })
- }
- go func() {
- if trace != nil && trace.TLSHandshakeStart != nil {
- trace.TLSHandshakeStart()
- }
- err := tlsConn.HandshakeContext(ctx)
- if timer != nil {
- timer.Stop()
- }
- errc <- err
- }()
- if err := <-errc; err != nil {
- plainConn.Close()
- if trace != nil && trace.TLSHandshakeDone != nil {
- trace.TLSHandshakeDone(tls.ConnectionState{}, err)
- }
- return err
- }
- cs := tlsConn.ConnectionState()
- if trace != nil && trace.TLSHandshakeDone != nil {
- trace.TLSHandshakeDone(cs, nil)
- }
- pconn.tlsState = &cs
- pconn.conn = tlsConn
- return nil
-}
-
-type erringRoundTripper interface {
- RoundTripErr() error
-}
-
-func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *persistConn, err error) {
- pconn = &persistConn{
- t: t,
- cacheKey: cm.key(),
- reqch: make(chan requestAndChan, 1),
- writech: make(chan writeRequest, 1),
- closech: make(chan struct{}),
- writeErrCh: make(chan error, 1),
- writeLoopDone: make(chan struct{}),
- }
- trace := httptrace.ContextClientTrace(ctx)
- wrapErr := func(err error) error {
- if cm.proxyURL != nil {
- // Return a typed error, per Issue 16997
- return &net.OpError{Op: "proxyconnect", Net: "tcp", Err: err}
- }
- return err
- }
- if cm.scheme() == "https" && t.hasCustomTLSDialer() {
- var err error
- pconn.conn, err = t.customDialTLS(ctx, "tcp", cm.addr())
- if err != nil {
- return nil, wrapErr(err)
- }
- if tc, ok := pconn.conn.(*tls.Conn); ok {
- // Handshake here, in case DialTLS didn't. TLSNextProto below
- // depends on it for knowing the connection state.
- if trace != nil && trace.TLSHandshakeStart != nil {
- trace.TLSHandshakeStart()
- }
- if err := tc.HandshakeContext(ctx); err != nil {
- go pconn.conn.Close()
- if trace != nil && trace.TLSHandshakeDone != nil {
- trace.TLSHandshakeDone(tls.ConnectionState{}, err)
- }
- return nil, err
- }
- cs := tc.ConnectionState()
- if trace != nil && trace.TLSHandshakeDone != nil {
- trace.TLSHandshakeDone(cs, nil)
- }
- pconn.tlsState = &cs
- }
- } else {
- conn, err := t.dial(ctx, "tcp", cm.addr())
- if err != nil {
- return nil, wrapErr(err)
- }
- pconn.conn = conn
- if cm.scheme() == "https" {
- var firstTLSHost string
- if firstTLSHost, _, err = net.SplitHostPort(cm.addr()); err != nil {
- return nil, wrapErr(err)
- }
- if err = pconn.addTLS(ctx, firstTLSHost, trace); err != nil {
- return nil, wrapErr(err)
- }
- }
- }
-
- // Proxy setup.
- switch {
- case cm.proxyURL == nil:
- // Do nothing. Not using a proxy.
- case cm.proxyURL.Scheme == "socks5":
- conn := pconn.conn
- d := socksNewDialer("tcp", conn.RemoteAddr().String())
- if u := cm.proxyURL.User; u != nil {
- auth := &socksUsernamePassword{
- Username: u.Username(),
- }
- auth.Password, _ = u.Password()
- d.AuthMethods = []socksAuthMethod{
- socksAuthMethodNotRequired,
- socksAuthMethodUsernamePassword,
- }
- d.Authenticate = auth.Authenticate
- }
- if _, err := d.DialWithConn(ctx, conn, "tcp", cm.targetAddr); err != nil {
- conn.Close()
- return nil, err
- }
- case cm.targetScheme == "http":
- pconn.isProxy = true
- if pa := cm.proxyAuth(); pa != "" {
- pconn.mutateHeaderFunc = func(h Header) {
- h.Set("Proxy-Authorization", pa)
- }
- }
- case cm.targetScheme == "https":
- conn := pconn.conn
- var hdr Header
- if t.GetProxyConnectHeader != nil {
- var err error
- hdr, err = t.GetProxyConnectHeader(ctx, cm.proxyURL, cm.targetAddr)
- if err != nil {
- conn.Close()
- return nil, err
- }
- } else {
- hdr = t.ProxyConnectHeader
- }
- if hdr == nil {
- hdr = make(Header)
- }
- if pa := cm.proxyAuth(); pa != "" {
- hdr = hdr.Clone()
- hdr.Set("Proxy-Authorization", pa)
- }
- connectReq := &Request{
- Method: "CONNECT",
- URL: &url.URL{Opaque: cm.targetAddr},
- Host: cm.targetAddr,
- Header: hdr,
- }
-
- // If there's no done channel (no deadline or cancellation
- // from the caller possible), at least set some (long)
- // timeout here. This will make sure we don't block forever
- // and leak a goroutine if the connection stops replying
- // after the TCP connect.
- connectCtx := ctx
- if ctx.Done() == nil {
- newCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
- defer cancel()
- connectCtx = newCtx
- }
-
- didReadResponse := make(chan struct{}) // closed after CONNECT write+read is done or fails
- var (
- resp *Response
- err error // write or read error
- )
- // Write the CONNECT request & read the response.
- go func() {
- defer close(didReadResponse)
- err = connectReq.Write(conn)
- if err != nil {
- return
- }
- // Okay to use and discard buffered reader here, because
- // TLS server will not speak until spoken to.
- br := bufio.NewReader(conn)
- resp, err = ReadResponse(br, connectReq)
- }()
- select {
- case <-connectCtx.Done():
- conn.Close()
- <-didReadResponse
- return nil, connectCtx.Err()
- case <-didReadResponse:
- // resp or err now set
- }
- if err != nil {
- conn.Close()
- return nil, err
- }
- if resp.StatusCode != 200 {
- _, text, ok := strings.Cut(resp.Status, " ")
- conn.Close()
- if !ok {
- return nil, errors.New("unknown status code")
- }
- return nil, errors.New(text)
- }
- }
-
- if cm.proxyURL != nil && cm.targetScheme == "https" {
- if err := pconn.addTLS(ctx, cm.tlsHost(), trace); err != nil {
- return nil, err
- }
- }
-
- if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" {
- if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok {
- alt := next(cm.targetAddr, pconn.conn.(*tls.Conn))
- if e, ok := alt.(erringRoundTripper); ok {
- // pconn.conn was closed by next (http2configureTransports.upgradeFn).
- return nil, e.RoundTripErr()
- }
- return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil
- }
- }
-
- pconn.br = bufio.NewReaderSize(pconn, t.readBufferSize())
- pconn.bw = bufio.NewWriterSize(persistConnWriter{pconn}, t.writeBufferSize())
-
- go pconn.readLoop()
- go pconn.writeLoop()
- return pconn, nil
-}
-
-// persistConnWriter is the io.Writer written to by pc.bw.
-// It accumulates the number of bytes written to the underlying conn,
-// so the retry logic can determine whether any bytes made it across
-// the wire.
-// This is exactly 1 pointer field wide so it can go into an interface
-// without allocation.
-type persistConnWriter struct {
- pc *persistConn
-}
-
-func (w persistConnWriter) Write(p []byte) (n int, err error) {
- n, err = w.pc.conn.Write(p)
- w.pc.nwrite += int64(n)
- return
-}
-
-// ReadFrom exposes persistConnWriter's underlying Conn to io.Copy and if
-// the Conn implements io.ReaderFrom, it can take advantage of optimizations
-// such as sendfile.
-func (w persistConnWriter) ReadFrom(r io.Reader) (n int64, err error) {
- n, err = io.Copy(w.pc.conn, r)
- w.pc.nwrite += n
- return
-}
-
-var _ io.ReaderFrom = (*persistConnWriter)(nil)
-
-// connectMethod is the map key (in its String form) for keeping persistent
-// TCP connections alive for subsequent HTTP requests.
-//
-// A connect method may be of the following types:
-//
-// connectMethod.key().String() Description
-// ------------------------------ -------------------------
-// |http|foo.com http directly to server, no proxy
-// |https|foo.com https directly to server, no proxy
-// |https,h1|foo.com https directly to server w/o HTTP/2, no proxy
-// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
-// http://proxy.com|http http to proxy, http to anywhere after that
-// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com
-// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com
-// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com
-// https://proxy.com|http https to proxy, http to anywhere after that
-//
-type connectMethod struct {
- _ incomparable
- proxyURL *url.URL // nil for no proxy, else full proxy URL
- targetScheme string // "http" or "https"
- // If proxyURL specifies an http or https proxy, and targetScheme is http (not https),
- // then targetAddr is not included in the connect method key, because the socket can
- // be reused for different targetAddr values.
- targetAddr string
- onlyH1 bool // whether to disable HTTP/2 and force HTTP/1
-}
-
-func (cm *connectMethod) key() connectMethodKey {
- proxyStr := ""
- targetAddr := cm.targetAddr
- if cm.proxyURL != nil {
- proxyStr = cm.proxyURL.String()
- if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" {
- targetAddr = ""
- }
- }
- return connectMethodKey{
- proxy: proxyStr,
- scheme: cm.targetScheme,
- addr: targetAddr,
- onlyH1: cm.onlyH1,
- }
-}
-
-// scheme returns the first hop scheme: http, https, or socks5
-func (cm *connectMethod) scheme() string {
- if cm.proxyURL != nil {
- return cm.proxyURL.Scheme
- }
- return cm.targetScheme
-}
-
-// addr returns the first hop "host:port" to which we need to TCP connect.
-func (cm *connectMethod) addr() string {
- if cm.proxyURL != nil {
- return canonicalAddr(cm.proxyURL)
- }
- return cm.targetAddr
-}
-
-// tlsHost returns the host name to match against the peer's
-// TLS certificate.
-func (cm *connectMethod) tlsHost() string {
- h := cm.targetAddr
- if hasPort(h) {
- h = h[:strings.LastIndex(h, ":")]
- }
- return h
-}
-
-// connectMethodKey is the map key version of connectMethod, with a
-// stringified proxy URL (or the empty string) instead of a pointer to
-// a URL.
-type connectMethodKey struct {
- proxy, scheme, addr string
- onlyH1 bool
-}
-
-func (k connectMethodKey) String() string {
- // Only used by tests.
- var h1 string
- if k.onlyH1 {
- h1 = ",h1"
- }
- return fmt.Sprintf("%s|%s%s|%s", k.proxy, k.scheme, h1, k.addr)
-}
-
-// persistConn wraps a connection, usually a persistent one
-// (but may be used for non-keep-alive requests as well)
-type persistConn struct {
- // alt optionally specifies the TLS NextProto RoundTripper.
- // This is used for HTTP/2 today and future protocols later.
- // If it's non-nil, the rest of the fields are unused.
- alt RoundTripper
-
- t *Transport
- cacheKey connectMethodKey
- conn net.Conn
- tlsState *tls.ConnectionState
- br *bufio.Reader // from conn
- bw *bufio.Writer // to conn
- nwrite int64 // bytes written
- reqch chan requestAndChan // written by roundTrip; read by readLoop
- writech chan writeRequest // written by roundTrip; read by writeLoop
- closech chan struct{} // closed when conn closed
- isProxy bool
- sawEOF bool // whether we've seen EOF from conn; owned by readLoop
- readLimit int64 // bytes allowed to be read; owned by readLoop
- // writeErrCh passes the request write error (usually nil)
- // from the writeLoop goroutine to the readLoop which passes
- // it off to the res.Body reader, which then uses it to decide
- // whether or not a connection can be reused. Issue 7569.
- writeErrCh chan error
-
- writeLoopDone chan struct{} // closed when write loop ends
-
- // Both guarded by Transport.idleMu:
- idleAt time.Time // time it last become idle
- idleTimer *time.Timer // holding an AfterFunc to close it
-
- mu sync.Mutex // guards following fields
- numExpectedResponses int
- closed error // set non-nil when conn is closed, before closech is closed
- canceledErr error // set non-nil if conn is canceled
- broken bool // an error has happened on this connection; marked broken so it's not reused.
- reused bool // whether conn has had successful request/response and is being reused.
- // mutateHeaderFunc is an optional func to modify extra
- // headers on each outbound request before it's written. (the
- // original Request given to RoundTrip is not modified)
- mutateHeaderFunc func(Header)
-}
-
-func (pc *persistConn) maxHeaderResponseSize() int64 {
- if v := pc.t.MaxResponseHeaderBytes; v != 0 {
- return v
- }
- return 10 << 20 // conservative default; same as http2
-}
-
-func (pc *persistConn) Read(p []byte) (n int, err error) {
- if pc.readLimit <= 0 {
- return 0, fmt.Errorf("read limit of %d bytes exhausted", pc.maxHeaderResponseSize())
- }
- if int64(len(p)) > pc.readLimit {
- p = p[:pc.readLimit]
- }
- n, err = pc.conn.Read(p)
- if err == io.EOF {
- pc.sawEOF = true
- }
- pc.readLimit -= int64(n)
- return
-}
-
-// isBroken reports whether this connection is in a known broken state.
-func (pc *persistConn) isBroken() bool {
- pc.mu.Lock()
- b := pc.closed != nil
- pc.mu.Unlock()
- return b
-}
-
-// canceled returns non-nil if the connection was closed due to
-// CancelRequest or due to context cancellation.
-func (pc *persistConn) canceled() error {
- pc.mu.Lock()
- defer pc.mu.Unlock()
- return pc.canceledErr
-}
-
-// isReused reports whether this connection has been used before.
-func (pc *persistConn) isReused() bool {
- pc.mu.Lock()
- r := pc.reused
- pc.mu.Unlock()
- return r
-}
-
-func (pc *persistConn) gotIdleConnTrace(idleAt time.Time) (t httptrace.GotConnInfo) {
- pc.mu.Lock()
- defer pc.mu.Unlock()
- t.Reused = pc.reused
- t.Conn = pc.conn
- t.WasIdle = true
- if !idleAt.IsZero() {
- t.IdleTime = time.Since(idleAt)
- }
- return
-}
-
-func (pc *persistConn) cancelRequest(err error) {
- pc.mu.Lock()
- defer pc.mu.Unlock()
- pc.canceledErr = err
- pc.closeLocked(errRequestCanceled)
-}
-
-// closeConnIfStillIdle closes the connection if it's still sitting idle.
-// This is what's called by the persistConn's idleTimer, and is run in its
-// own goroutine.
-func (pc *persistConn) closeConnIfStillIdle() {
- t := pc.t
- t.idleMu.Lock()
- defer t.idleMu.Unlock()
- if _, ok := t.idleLRU.m[pc]; !ok {
- // Not idle.
- return
- }
- t.removeIdleConnLocked(pc)
- pc.close(errIdleConnTimeout)
-}
-
-// mapRoundTripError returns the appropriate error value for
-// persistConn.roundTrip.
-//
-// The provided err is the first error that (*persistConn).roundTrip
-// happened to receive from its select statement.
-//
-// The startBytesWritten value should be the value of pc.nwrite before the roundTrip
-// started writing the request.
-func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error {
- if err == nil {
- return nil
- }
-
- // Wait for the writeLoop goroutine to terminate to avoid data
- // races on callers who mutate the request on failure.
- //
- // When resc in pc.roundTrip and hence rc.ch receives a responseAndError
- // with a non-nil error it implies that the persistConn is either closed
- // or closing. Waiting on pc.writeLoopDone is hence safe as all callers
- // close closech which in turn ensures writeLoop returns.
- <-pc.writeLoopDone
-
- // If the request was canceled, that's better than network
- // failures that were likely the result of tearing down the
- // connection.
- if cerr := pc.canceled(); cerr != nil {
- return cerr
- }
-
- // See if an error was set explicitly.
- req.mu.Lock()
- reqErr := req.err
- req.mu.Unlock()
- if reqErr != nil {
- return reqErr
- }
-
- if err == errServerClosedIdle {
- // Don't decorate
- return err
- }
-
- if _, ok := err.(transportReadFromServerError); ok {
- if pc.nwrite == startBytesWritten {
- return nothingWrittenError{err}
- }
- // Don't decorate
- return err
- }
- if pc.isBroken() {
- if pc.nwrite == startBytesWritten {
- return nothingWrittenError{err}
- }
- return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", err)
- }
- return err
-}
-
-// errCallerOwnsConn is an internal sentinel error used when we hand
-// off a writable response.Body to the caller. We use this to prevent
-// closing a net.Conn that is now owned by the caller.
-var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn")
-
-func (pc *persistConn) readLoop() {
- closeErr := errReadLoopExiting // default value, if not changed below
- defer func() {
- pc.close(closeErr)
- pc.t.removeIdleConn(pc)
- }()
-
- tryPutIdleConn := func(trace *httptrace.ClientTrace) bool {
- if err := pc.t.tryPutIdleConn(pc); err != nil {
- closeErr = err
- if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled {
- trace.PutIdleConn(err)
- }
- return false
- }
- if trace != nil && trace.PutIdleConn != nil {
- trace.PutIdleConn(nil)
- }
- return true
- }
-
- // eofc is used to block caller goroutines reading from Response.Body
- // at EOF until this goroutines has (potentially) added the connection
- // back to the idle pool.
- eofc := make(chan struct{})
- defer close(eofc) // unblock reader on errors
-
- // Read this once, before loop starts. (to avoid races in tests)
- testHookMu.Lock()
- testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead
- testHookMu.Unlock()
-
- alive := true
- for alive {
- pc.readLimit = pc.maxHeaderResponseSize()
- _, err := pc.br.Peek(1)
-
- pc.mu.Lock()
- if pc.numExpectedResponses == 0 {
- pc.readLoopPeekFailLocked(err)
- pc.mu.Unlock()
- return
- }
- pc.mu.Unlock()
-
- rc := <-pc.reqch
- trace := httptrace.ContextClientTrace(rc.req.Context())
-
- var resp *Response
- if err == nil {
- resp, err = pc.readResponse(rc, trace)
- } else {
- err = transportReadFromServerError{err}
- closeErr = err
- }
-
- if err != nil {
- if pc.readLimit <= 0 {
- err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize())
- }
-
- select {
- case rc.ch <- responseAndError{err: err}:
- case <-rc.callerGone:
- return
- }
- return
- }
- pc.readLimit = maxInt64 // effectively no limit for response bodies
-
- pc.mu.Lock()
- pc.numExpectedResponses--
- pc.mu.Unlock()
-
- bodyWritable := resp.bodyIsWritable()
- hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
-
- if resp.Close || rc.req.Close || resp.StatusCode <= 199 || bodyWritable {
- // Don't do keep-alive on error if either party requested a close
- // or we get an unexpected informational (1xx) response.
- // StatusCode 100 is already handled above.
- alive = false
- }
-
- if !hasBody || bodyWritable {
- replaced := pc.t.replaceReqCanceler(rc.cancelKey, nil)
-
- // Put the idle conn back into the pool before we send the response
- // so if they process it quickly and make another request, they'll
- // get this same conn. But we use the unbuffered channel 'rc'
- // to guarantee that persistConn.roundTrip got out of its select
- // potentially waiting for this persistConn to close.
- alive = alive &&
- !pc.sawEOF &&
- pc.wroteRequest() &&
- replaced && tryPutIdleConn(trace)
-
- if bodyWritable {
- closeErr = errCallerOwnsConn
- }
-
- select {
- case rc.ch <- responseAndError{res: resp}:
- case <-rc.callerGone:
- return
- }
-
- // Now that they've read from the unbuffered channel, they're safely
- // out of the select that also waits on this goroutine to die, so
- // we're allowed to exit now if needed (if alive is false)
- testHookReadLoopBeforeNextRead()
- continue
- }
-
- waitForBodyRead := make(chan bool, 2)
- body := &bodyEOFSignal{
- body: resp.Body,
- earlyCloseFn: func() error {
- waitForBodyRead <- false
- <-eofc // will be closed by deferred call at the end of the function
- return nil
-
- },
- fn: func(err error) error {
- isEOF := err == io.EOF
- waitForBodyRead <- isEOF
- if isEOF {
- <-eofc // see comment above eofc declaration
- } else if err != nil {
- if cerr := pc.canceled(); cerr != nil {
- return cerr
- }
- }
- return err
- },
- }
-
- resp.Body = body
- if rc.addedGzip && ascii.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") {
- resp.Body = &gzipReader{body: body}
- resp.Header.Del("Content-Encoding")
- resp.Header.Del("Content-Length")
- resp.ContentLength = -1
- resp.Uncompressed = true
- }
-
- select {
- case rc.ch <- responseAndError{res: resp}:
- case <-rc.callerGone:
- return
- }
-
- // Before looping back to the top of this function and peeking on
- // the bufio.Reader, wait for the caller goroutine to finish
- // reading the response body. (or for cancellation or death)
- select {
- case bodyEOF := <-waitForBodyRead:
- replaced := pc.t.replaceReqCanceler(rc.cancelKey, nil) // before pc might return to idle pool
- alive = alive &&
- bodyEOF &&
- !pc.sawEOF &&
- pc.wroteRequest() &&
- replaced && tryPutIdleConn(trace)
- if bodyEOF {
- eofc <- struct{}{}
- }
- case <-rc.req.Cancel:
- alive = false
- pc.t.CancelRequest(rc.req)
- case <-rc.req.Context().Done():
- alive = false
- pc.t.cancelRequest(rc.cancelKey, rc.req.Context().Err())
- case <-pc.closech:
- alive = false
- }
-
- testHookReadLoopBeforeNextRead()
- }
-}
-
-func (pc *persistConn) readLoopPeekFailLocked(peekErr error) {
- if pc.closed != nil {
- return
- }
- if n := pc.br.Buffered(); n > 0 {
- buf, _ := pc.br.Peek(n)
- if is408Message(buf) {
- pc.closeLocked(errServerClosedIdle)
- return
- } else {
- log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", buf, peekErr)
- }
- }
- if peekErr == io.EOF {
- // common case.
- pc.closeLocked(errServerClosedIdle)
- } else {
- pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %v", peekErr))
- }
-}
-
-// is408Message reports whether buf has the prefix of an
-// HTTP 408 Request Timeout response.
-// See golang.org/issue/32310.
-func is408Message(buf []byte) bool {
- if len(buf) < len("HTTP/1.x 408") {
- return false
- }
- if string(buf[:7]) != "HTTP/1." {
- return false
- }
- return string(buf[8:12]) == " 408"
-}
-
-// readResponse reads an HTTP response (or two, in the case of "Expect:
-// 100-continue") from the server. It returns the final non-100 one.
-// trace is optional.
-func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTrace) (resp *Response, err error) {
- if trace != nil && trace.GotFirstResponseByte != nil {
- if peek, err := pc.br.Peek(1); err == nil && len(peek) == 1 {
- trace.GotFirstResponseByte()
- }
- }
- num1xx := 0 // number of informational 1xx headers received
- const max1xxResponses = 5 // arbitrary bound on number of informational responses
-
- continueCh := rc.continueCh
- for {
- resp, err = ReadResponse(pc.br, rc.req)
- if err != nil {
- return
- }
- resCode := resp.StatusCode
- if continueCh != nil {
- if resCode == 100 {
- if trace != nil && trace.Got100Continue != nil {
- trace.Got100Continue()
- }
- continueCh <- struct{}{}
- continueCh = nil
- } else if resCode >= 200 {
- close(continueCh)
- continueCh = nil
- }
- }
- is1xx := 100 <= resCode && resCode <= 199
- // treat 101 as a terminal status, see issue 26161
- is1xxNonTerminal := is1xx && resCode != StatusSwitchingProtocols
- if is1xxNonTerminal {
- num1xx++
- if num1xx > max1xxResponses {
- return nil, errors.New("net/http: too many 1xx informational responses")
- }
- pc.readLimit = pc.maxHeaderResponseSize() // reset the limit
- if trace != nil && trace.Got1xxResponse != nil {
- if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(resp.Header)); err != nil {
- return nil, err
- }
- }
- continue
- }
- break
- }
- if resp.isProtocolSwitch() {
- resp.Body = newReadWriteCloserBody(pc.br, pc.conn)
- }
-
- resp.TLS = pc.tlsState
- return
-}
-
-// waitForContinue returns the function to block until
-// any response, timeout or connection close. After any of them,
-// the function returns a bool which indicates if the body should be sent.
-func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool {
- if continueCh == nil {
- return nil
- }
- return func() bool {
- timer := time.NewTimer(pc.t.ExpectContinueTimeout)
- defer timer.Stop()
-
- select {
- case _, ok := <-continueCh:
- return ok
- case <-timer.C:
- return true
- case <-pc.closech:
- return false
- }
- }
-}
-
-func newReadWriteCloserBody(br *bufio.Reader, rwc io.ReadWriteCloser) io.ReadWriteCloser {
- body := &readWriteCloserBody{ReadWriteCloser: rwc}
- if br.Buffered() != 0 {
- body.br = br
- }
- return body
-}
-
-// readWriteCloserBody is the Response.Body type used when we want to
-// give users write access to the Body through the underlying
-// connection (TCP, unless using custom dialers). This is then
-// the concrete type for a Response.Body on the 101 Switching
-// Protocols response, as used by WebSockets, h2c, etc.
-type readWriteCloserBody struct {
- _ incomparable
- br *bufio.Reader // used until empty
- io.ReadWriteCloser
-}
-
-func (b *readWriteCloserBody) Read(p []byte) (n int, err error) {
- if b.br != nil {
- if n := b.br.Buffered(); len(p) > n {
- p = p[:n]
- }
- n, err = b.br.Read(p)
- if b.br.Buffered() == 0 {
- b.br = nil
- }
- return n, err
- }
- return b.ReadWriteCloser.Read(p)
-}
-
-// nothingWrittenError wraps a write errors which ended up writing zero bytes.
-type nothingWrittenError struct {
- error
-}
-
-func (pc *persistConn) writeLoop() {
- defer close(pc.writeLoopDone)
- for {
- select {
- case wr := <-pc.writech:
- startBytesWritten := pc.nwrite
- err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh))
- if bre, ok := err.(requestBodyReadError); ok {
- err = bre.error
- // Errors reading from the user's
- // Request.Body are high priority.
- // Set it here before sending on the
- // channels below or calling
- // pc.close() which tears down
- // connections and causes other
- // errors.
- wr.req.setError(err)
- }
- if err == nil {
- err = pc.bw.Flush()
- }
- if err != nil {
- if pc.nwrite == startBytesWritten {
- err = nothingWrittenError{err}
- }
- }
- pc.writeErrCh <- err // to the body reader, which might recycle us
- wr.ch <- err // to the roundTrip function
- if err != nil {
- pc.close(err)
- return
- }
- case <-pc.closech:
- return
- }
- }
-}
-
-// maxWriteWaitBeforeConnReuse is how long the a Transport RoundTrip
-// will wait to see the Request's Body.Write result after getting a
-// response from the server. See comments in (*persistConn).wroteRequest.
-const maxWriteWaitBeforeConnReuse = 50 * time.Millisecond
-
-// wroteRequest is a check before recycling a connection that the previous write
-// (from writeLoop above) happened and was successful.
-func (pc *persistConn) wroteRequest() bool {
- select {
- case err := <-pc.writeErrCh:
- // Common case: the write happened well before the response, so
- // avoid creating a timer.
- return err == nil
- default:
- // Rare case: the request was written in writeLoop above but
- // before it could send to pc.writeErrCh, the reader read it
- // all, processed it, and called us here. In this case, give the
- // write goroutine a bit of time to finish its send.
- //
- // Less rare case: We also get here in the legitimate case of
- // Issue 7569, where the writer is still writing (or stalled),
- // but the server has already replied. In this case, we don't
- // want to wait too long, and we want to return false so this
- // connection isn't re-used.
- t := time.NewTimer(maxWriteWaitBeforeConnReuse)
- defer t.Stop()
- select {
- case err := <-pc.writeErrCh:
- return err == nil
- case <-t.C:
- return false
- }
- }
-}
-
-// responseAndError is how the goroutine reading from an HTTP/1 server
-// communicates with the goroutine doing the RoundTrip.
-type responseAndError struct {
- _ incomparable
- res *Response // else use this response (see res method)
- err error
-}
-
-type requestAndChan struct {
- _ incomparable
- req *Request
- cancelKey cancelKey
- ch chan responseAndError // unbuffered; always send in select on callerGone
-
- // whether the Transport (as opposed to the user client code)
- // added the Accept-Encoding gzip header. If the Transport
- // set it, only then do we transparently decode the gzip.
- addedGzip bool
-
- // Optional blocking chan for Expect: 100-continue (for send).
- // If the request has an "Expect: 100-continue" header and
- // the server responds 100 Continue, readLoop send a value
- // to writeLoop via this chan.
- continueCh chan<- struct{}
-
- callerGone <-chan struct{} // closed when roundTrip caller has returned
-}
-
-// A writeRequest is sent by the caller's goroutine to the
-// writeLoop's goroutine to write a request while the read loop
-// concurrently waits on both the write response and the server's
-// reply.
-type writeRequest struct {
- req *transportRequest
- ch chan<- error
-
- // Optional blocking chan for Expect: 100-continue (for receive).
- // If not nil, writeLoop blocks sending request body until
- // it receives from this chan.
- continueCh <-chan struct{}
-}
-
-type httpError struct {
- err string
- timeout bool
-}
-
-func (e *httpError) Error() string { return e.err }
-func (e *httpError) Timeout() bool { return e.timeout }
-func (e *httpError) Temporary() bool { return true }
-
-var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
-
-// errRequestCanceled is set to be identical to the one from h2 to facilitate
-// testing.
-var errRequestCanceled = http2errRequestCanceled
-var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify?
-
-func nop() {}
-
-// testHooks. Always non-nil.
-var (
- testHookEnterRoundTrip = nop
- testHookWaitResLoop = nop
- testHookRoundTripRetried = nop
- testHookPrePendingDial = nop
- testHookPostPendingDial = nop
-
- testHookMu sync.Locker = fakeLocker{} // guards following
- testHookReadLoopBeforeNextRead = nop
-)
-
-func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
- testHookEnterRoundTrip()
- if !pc.t.replaceReqCanceler(req.cancelKey, pc.cancelRequest) {
- pc.t.putOrCloseIdleConn(pc)
- return nil, errRequestCanceled
- }
- pc.mu.Lock()
- pc.numExpectedResponses++
- headerFn := pc.mutateHeaderFunc
- pc.mu.Unlock()
-
- if headerFn != nil {
- headerFn(req.extraHeaders())
- }
-
- // Ask for a compressed version if the caller didn't set their
- // own value for Accept-Encoding. We only attempt to
- // uncompress the gzip stream if we were the layer that
- // requested it.
- requestedGzip := false
- if !pc.t.DisableCompression &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- req.Method != "HEAD" {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: https://zlib.net/zlib_faq.html#faq39
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // https://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- requestedGzip = true
- req.extraHeaders().Set("Accept-Encoding", "gzip")
- }
-
- var continueCh chan struct{}
- if req.ProtoAtLeast(1, 1) && req.Body != nil && req.expectsContinue() {
- continueCh = make(chan struct{}, 1)
- }
-
- if pc.t.DisableKeepAlives &&
- !req.wantsClose() &&
- !isProtocolSwitchHeader(req.Header) {
- req.extraHeaders().Set("Connection", "close")
- }
-
- gone := make(chan struct{})
- defer close(gone)
-
- defer func() {
- if err != nil {
- pc.t.setReqCanceler(req.cancelKey, nil)
- }
- }()
-
- const debugRoundTrip = false
-
- // Write the request concurrently with waiting for a response,
- // in case the server decides to reply before reading our full
- // request body.
- startBytesWritten := pc.nwrite
- writeErrCh := make(chan error, 1)
- pc.writech <- writeRequest{req, writeErrCh, continueCh}
-
- resc := make(chan responseAndError)
- pc.reqch <- requestAndChan{
- req: req.Request,
- cancelKey: req.cancelKey,
- ch: resc,
- addedGzip: requestedGzip,
- continueCh: continueCh,
- callerGone: gone,
- }
-
- var respHeaderTimer <-chan time.Time
- cancelChan := req.Request.Cancel
- ctxDoneChan := req.Context().Done()
- pcClosed := pc.closech
- canceled := false
- for {
- testHookWaitResLoop()
- select {
- case err := <-writeErrCh:
- if debugRoundTrip {
- req.logf("writeErrCh resv: %T/%#v", err, err)
- }
- if err != nil {
- pc.close(fmt.Errorf("write error: %v", err))
- return nil, pc.mapRoundTripError(req, startBytesWritten, err)
- }
- if d := pc.t.ResponseHeaderTimeout; d > 0 {
- if debugRoundTrip {
- req.logf("starting timer for %v", d)
- }
- timer := time.NewTimer(d)
- defer timer.Stop() // prevent leaks
- respHeaderTimer = timer.C
- }
- case <-pcClosed:
- pcClosed = nil
- if canceled || pc.t.replaceReqCanceler(req.cancelKey, nil) {
- if debugRoundTrip {
- req.logf("closech recv: %T %#v", pc.closed, pc.closed)
- }
- return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed)
- }
- case <-respHeaderTimer:
- if debugRoundTrip {
- req.logf("timeout waiting for response headers.")
- }
- pc.close(errTimeout)
- return nil, errTimeout
- case re := <-resc:
- if (re.res == nil) == (re.err == nil) {
- panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil))
- }
- if debugRoundTrip {
- req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err)
- }
- if re.err != nil {
- return nil, pc.mapRoundTripError(req, startBytesWritten, re.err)
- }
- return re.res, nil
- case <-cancelChan:
- canceled = pc.t.cancelRequest(req.cancelKey, errRequestCanceled)
- cancelChan = nil
- case <-ctxDoneChan:
- canceled = pc.t.cancelRequest(req.cancelKey, req.Context().Err())
- cancelChan = nil
- ctxDoneChan = nil
- }
- }
-}
-
-// tLogKey is a context WithValue key for test debugging contexts containing
-// a t.Logf func. See export_test.go's Request.WithT method.
-type tLogKey struct{}
-
-func (tr *transportRequest) logf(format string, args ...any) {
- if logf, ok := tr.Request.Context().Value(tLogKey{}).(func(string, ...any)); ok {
- logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...)
- }
-}
-
-// markReused marks this connection as having been successfully used for a
-// request and response.
-func (pc *persistConn) markReused() {
- pc.mu.Lock()
- pc.reused = true
- pc.mu.Unlock()
-}
-
-// close closes the underlying TCP connection and closes
-// the pc.closech channel.
-//
-// The provided err is only for testing and debugging; in normal
-// circumstances it should never be seen by users.
-func (pc *persistConn) close(err error) {
- pc.mu.Lock()
- defer pc.mu.Unlock()
- pc.closeLocked(err)
-}
-
-func (pc *persistConn) closeLocked(err error) {
- if err == nil {
- panic("nil error")
- }
- pc.broken = true
- if pc.closed == nil {
- pc.closed = err
- pc.t.decConnsPerHost(pc.cacheKey)
- // Close HTTP/1 (pc.alt == nil) connection.
- // HTTP/2 closes its connection itself.
- if pc.alt == nil {
- if err != errCallerOwnsConn {
- pc.conn.Close()
- }
- close(pc.closech)
- }
- }
- pc.mutateHeaderFunc = nil
-}
-
-var portMap = map[string]string{
- "http": "80",
- "https": "443",
- "socks5": "1080",
-}
-
-// canonicalAddr returns url.Host but always with a ":port" suffix
-func canonicalAddr(url *url.URL) string {
- addr := url.Hostname()
- if v, err := idnaASCII(addr); err == nil {
- addr = v
- }
- port := url.Port()
- if port == "" {
- port = portMap[url.Scheme]
- }
- return net.JoinHostPort(addr, port)
-}
-
-// bodyEOFSignal is used by the HTTP/1 transport when reading response
-// bodies to make sure we see the end of a response body before
-// proceeding and reading on the connection again.
-//
-// It wraps a ReadCloser but runs fn (if non-nil) at most
-// once, right before its final (error-producing) Read or Close call
-// returns. fn should return the new error to return from Read or Close.
-//
-// If earlyCloseFn is non-nil and Close is called before io.EOF is
-// seen, earlyCloseFn is called instead of fn, and its return value is
-// the return value from Close.
-type bodyEOFSignal struct {
- body io.ReadCloser
- mu sync.Mutex // guards following 4 fields
- closed bool // whether Close has been called
- rerr error // sticky Read error
- fn func(error) error // err will be nil on Read io.EOF
- earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
-}
-
-var errReadOnClosedResBody = errors.New("http: read on closed response body")
-
-func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
- es.mu.Lock()
- closed, rerr := es.closed, es.rerr
- es.mu.Unlock()
- if closed {
- return 0, errReadOnClosedResBody
- }
- if rerr != nil {
- return 0, rerr
- }
-
- n, err = es.body.Read(p)
- if err != nil {
- es.mu.Lock()
- defer es.mu.Unlock()
- if es.rerr == nil {
- es.rerr = err
- }
- err = es.condfn(err)
- }
- return
-}
-
-func (es *bodyEOFSignal) Close() error {
- es.mu.Lock()
- defer es.mu.Unlock()
- if es.closed {
- return nil
- }
- es.closed = true
- if es.earlyCloseFn != nil && es.rerr != io.EOF {
- return es.earlyCloseFn()
- }
- err := es.body.Close()
- return es.condfn(err)
-}
-
-// caller must hold es.mu.
-func (es *bodyEOFSignal) condfn(err error) error {
- if es.fn == nil {
- return err
- }
- err = es.fn(err)
- es.fn = nil
- return err
-}
-
-// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
-type gzipReader struct {
- _ incomparable
- body *bodyEOFSignal // underlying HTTP/1 response body framing
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // any error from gzip.NewReader; sticky
-}
-
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
- if gz.zr == nil {
- if gz.zerr == nil {
- gz.zr, gz.zerr = gzip.NewReader(gz.body)
- }
- if gz.zerr != nil {
- return 0, gz.zerr
- }
- }
-
- gz.body.mu.Lock()
- if gz.body.closed {
- err = errReadOnClosedResBody
- }
- gz.body.mu.Unlock()
-
- if err != nil {
- return 0, err
- }
- return gz.zr.Read(p)
-}
-
-func (gz *gzipReader) Close() error {
- return gz.body.Close()
-}
-
-type tlsHandshakeTimeoutError struct{}
-
-func (tlsHandshakeTimeoutError) Timeout() bool { return true }
-func (tlsHandshakeTimeoutError) Temporary() bool { return true }
-func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
-
-// fakeLocker is a sync.Locker which does nothing. It's used to guard
-// test-only fields when not under test, to avoid runtime atomic
-// overhead.
-type fakeLocker struct{}
-
-func (fakeLocker) Lock() {}
-func (fakeLocker) Unlock() {}
-
-// cloneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if
-// cfg is nil. This is safe to call even if cfg is in active use by a TLS
-// client or server.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
- return cfg.Clone()
-}
-
-type connLRU struct {
- ll *list.List // list.Element.Value type is of *persistConn
- m map[*persistConn]*list.Element
-}
-
-// add adds pc to the head of the linked list.
-func (cl *connLRU) add(pc *persistConn) {
- if cl.ll == nil {
- cl.ll = list.New()
- cl.m = make(map[*persistConn]*list.Element)
- }
- ele := cl.ll.PushFront(pc)
- if _, ok := cl.m[pc]; ok {
- panic("persistConn was already in LRU")
- }
- cl.m[pc] = ele
-}
-
-func (cl *connLRU) removeOldest() *persistConn {
- ele := cl.ll.Back()
- pc := ele.Value.(*persistConn)
- cl.ll.Remove(ele)
- delete(cl.m, pc)
- return pc
-}
-
-// remove removes pc from cl.
-func (cl *connLRU) remove(pc *persistConn) {
- if ele, ok := cl.m[pc]; ok {
- cl.ll.Remove(ele)
- delete(cl.m, pc)
- }
-}
-
-// len returns the number of items in the cache.
-func (cl *connLRU) len() int {
- return len(cl.m)
-}
diff --git a/contrib/go/_std_1.18/src/net/iprawsock_posix.go b/contrib/go/_std_1.18/src/net/iprawsock_posix.go
deleted file mode 100644
index 74f977e1ef..0000000000
--- a/contrib/go/_std_1.18/src/net/iprawsock_posix.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "context"
- "syscall"
-)
-
-func sockaddrToIP(sa syscall.Sockaddr) Addr {
- switch sa := sa.(type) {
- case *syscall.SockaddrInet4:
- return &IPAddr{IP: sa.Addr[0:]}
- case *syscall.SockaddrInet6:
- return &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
- }
- return nil
-}
-
-func (a *IPAddr) family() int {
- if a == nil || len(a.IP) <= IPv4len {
- return syscall.AF_INET
- }
- if a.IP.To4() != nil {
- return syscall.AF_INET
- }
- return syscall.AF_INET6
-}
-
-func (a *IPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
- if a == nil {
- return nil, nil
- }
- return ipToSockaddr(family, a.IP, 0, a.Zone)
-}
-
-func (a *IPAddr) toLocal(net string) sockaddr {
- return &IPAddr{loopbackIP(net), a.Zone}
-}
-
-func (c *IPConn) readFrom(b []byte) (int, *IPAddr, error) {
- // TODO(cw,rsc): consider using readv if we know the family
- // type to avoid the header trim/copy
- var addr *IPAddr
- n, sa, err := c.fd.readFrom(b)
- switch sa := sa.(type) {
- case *syscall.SockaddrInet4:
- addr = &IPAddr{IP: sa.Addr[0:]}
- n = stripIPv4Header(n, b)
- case *syscall.SockaddrInet6:
- addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
- }
- return n, addr, err
-}
-
-func stripIPv4Header(n int, b []byte) int {
- if len(b) < 20 {
- return n
- }
- l := int(b[0]&0x0f) << 2
- if 20 > l || l > len(b) {
- return n
- }
- if b[0]>>4 != 4 {
- return n
- }
- copy(b, b[l:])
- return n - l
-}
-
-func (c *IPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err error) {
- var sa syscall.Sockaddr
- n, oobn, flags, sa, err = c.fd.readMsg(b, oob, 0)
- switch sa := sa.(type) {
- case *syscall.SockaddrInet4:
- addr = &IPAddr{IP: sa.Addr[0:]}
- case *syscall.SockaddrInet6:
- addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
- }
- return
-}
-
-func (c *IPConn) writeTo(b []byte, addr *IPAddr) (int, error) {
- if c.fd.isConnected {
- return 0, ErrWriteToConnected
- }
- if addr == nil {
- return 0, errMissingAddress
- }
- sa, err := addr.sockaddr(c.fd.family)
- if err != nil {
- return 0, err
- }
- return c.fd.writeTo(b, sa)
-}
-
-func (c *IPConn) writeMsg(b, oob []byte, addr *IPAddr) (n, oobn int, err error) {
- if c.fd.isConnected {
- return 0, 0, ErrWriteToConnected
- }
- if addr == nil {
- return 0, 0, errMissingAddress
- }
- sa, err := addr.sockaddr(c.fd.family)
- if err != nil {
- return 0, 0, err
- }
- return c.fd.writeMsg(b, oob, sa)
-}
-
-func (sd *sysDialer) dialIP(ctx context.Context, laddr, raddr *IPAddr) (*IPConn, error) {
- network, proto, err := parseNetwork(ctx, sd.network, true)
- if err != nil {
- return nil, err
- }
- switch network {
- case "ip", "ip4", "ip6":
- default:
- return nil, UnknownNetworkError(sd.network)
- }
- fd, err := internetSocket(ctx, network, laddr, raddr, syscall.SOCK_RAW, proto, "dial", sd.Dialer.Control)
- if err != nil {
- return nil, err
- }
- return newIPConn(fd), nil
-}
-
-func (sl *sysListener) listenIP(ctx context.Context, laddr *IPAddr) (*IPConn, error) {
- network, proto, err := parseNetwork(ctx, sl.network, true)
- if err != nil {
- return nil, err
- }
- switch network {
- case "ip", "ip4", "ip6":
- default:
- return nil, UnknownNetworkError(sl.network)
- }
- fd, err := internetSocket(ctx, network, laddr, nil, syscall.SOCK_RAW, proto, "listen", sl.ListenConfig.Control)
- if err != nil {
- return nil, err
- }
- return newIPConn(fd), nil
-}
diff --git a/contrib/go/_std_1.18/src/net/ipsock_posix.go b/contrib/go/_std_1.18/src/net/ipsock_posix.go
deleted file mode 100644
index e433e8a91c..0000000000
--- a/contrib/go/_std_1.18/src/net/ipsock_posix.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "context"
- "internal/poll"
- "net/netip"
- "runtime"
- "syscall"
-)
-
-// probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication
-// capabilities which are controlled by the IPV6_V6ONLY socket option
-// and kernel configuration.
-//
-// Should we try to use the IPv4 socket interface if we're only
-// dealing with IPv4 sockets? As long as the host system understands
-// IPv4-mapped IPv6, it's okay to pass IPv4-mapped IPv6 addresses to
-// the IPv6 interface. That simplifies our code and is most
-// general. Unfortunately, we need to run on kernels built without
-// IPv6 support too. So probe the kernel to figure it out.
-func (p *ipStackCapabilities) probe() {
- s, err := sysSocket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
- switch err {
- case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT:
- case nil:
- poll.CloseFunc(s)
- p.ipv4Enabled = true
- }
- var probes = []struct {
- laddr TCPAddr
- value int
- }{
- // IPv6 communication capability
- {laddr: TCPAddr{IP: ParseIP("::1")}, value: 1},
- // IPv4-mapped IPv6 address communication capability
- {laddr: TCPAddr{IP: IPv4(127, 0, 0, 1)}, value: 0},
- }
- switch runtime.GOOS {
- case "dragonfly", "openbsd":
- // The latest DragonFly BSD and OpenBSD kernels don't
- // support IPV6_V6ONLY=0. They always return an error
- // and we don't need to probe the capability.
- probes = probes[:1]
- }
- for i := range probes {
- s, err := sysSocket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
- if err != nil {
- continue
- }
- defer poll.CloseFunc(s)
- syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value)
- sa, err := probes[i].laddr.sockaddr(syscall.AF_INET6)
- if err != nil {
- continue
- }
- if err := syscall.Bind(s, sa); err != nil {
- continue
- }
- if i == 0 {
- p.ipv6Enabled = true
- } else {
- p.ipv4MappedIPv6Enabled = true
- }
- }
-}
-
-// favoriteAddrFamily returns the appropriate address family for the
-// given network, laddr, raddr and mode.
-//
-// If mode indicates "listen" and laddr is a wildcard, we assume that
-// the user wants to make a passive-open connection with a wildcard
-// address family, both AF_INET and AF_INET6, and a wildcard address
-// like the following:
-//
-// - A listen for a wildcard communication domain, "tcp" or
-// "udp", with a wildcard address: If the platform supports
-// both IPv6 and IPv4-mapped IPv6 communication capabilities,
-// or does not support IPv4, we use a dual stack, AF_INET6 and
-// IPV6_V6ONLY=0, wildcard address listen. The dual stack
-// wildcard address listen may fall back to an IPv6-only,
-// AF_INET6 and IPV6_V6ONLY=1, wildcard address listen.
-// Otherwise we prefer an IPv4-only, AF_INET, wildcard address
-// listen.
-//
-// - A listen for a wildcard communication domain, "tcp" or
-// "udp", with an IPv4 wildcard address: same as above.
-//
-// - A listen for a wildcard communication domain, "tcp" or
-// "udp", with an IPv6 wildcard address: same as above.
-//
-// - A listen for an IPv4 communication domain, "tcp4" or "udp4",
-// with an IPv4 wildcard address: We use an IPv4-only, AF_INET,
-// wildcard address listen.
-//
-// - A listen for an IPv6 communication domain, "tcp6" or "udp6",
-// with an IPv6 wildcard address: We use an IPv6-only, AF_INET6
-// and IPV6_V6ONLY=1, wildcard address listen.
-//
-// Otherwise guess: If the addresses are IPv4 then returns AF_INET,
-// or else returns AF_INET6. It also returns a boolean value what
-// designates IPV6_V6ONLY option.
-//
-// Note that the latest DragonFly BSD and OpenBSD kernels allow
-// neither "net.inet6.ip6.v6only=1" change nor IPPROTO_IPV6 level
-// IPV6_V6ONLY socket option setting.
-func favoriteAddrFamily(network string, laddr, raddr sockaddr, mode string) (family int, ipv6only bool) {
- switch network[len(network)-1] {
- case '4':
- return syscall.AF_INET, false
- case '6':
- return syscall.AF_INET6, true
- }
-
- if mode == "listen" && (laddr == nil || laddr.isWildcard()) {
- if supportsIPv4map() || !supportsIPv4() {
- return syscall.AF_INET6, false
- }
- if laddr == nil {
- return syscall.AF_INET, false
- }
- return laddr.family(), false
- }
-
- if (laddr == nil || laddr.family() == syscall.AF_INET) &&
- (raddr == nil || raddr.family() == syscall.AF_INET) {
- return syscall.AF_INET, false
- }
- return syscall.AF_INET6, false
-}
-
-func internetSocket(ctx context.Context, net string, laddr, raddr sockaddr, sotype, proto int, mode string, ctrlFn func(string, string, syscall.RawConn) error) (fd *netFD, err error) {
- if (runtime.GOOS == "aix" || runtime.GOOS == "windows" || runtime.GOOS == "openbsd") && mode == "dial" && raddr.isWildcard() {
- raddr = raddr.toLocal(net)
- }
- family, ipv6only := favoriteAddrFamily(net, laddr, raddr, mode)
- return socket(ctx, net, family, sotype, proto, ipv6only, laddr, raddr, ctrlFn)
-}
-
-func ipToSockaddrInet4(ip IP, port int) (syscall.SockaddrInet4, error) {
- if len(ip) == 0 {
- ip = IPv4zero
- }
- ip4 := ip.To4()
- if ip4 == nil {
- return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: ip.String()}
- }
- sa := syscall.SockaddrInet4{Port: port}
- copy(sa.Addr[:], ip4)
- return sa, nil
-}
-
-func ipToSockaddrInet6(ip IP, port int, zone string) (syscall.SockaddrInet6, error) {
- // In general, an IP wildcard address, which is either
- // "0.0.0.0" or "::", means the entire IP addressing
- // space. For some historical reason, it is used to
- // specify "any available address" on some operations
- // of IP node.
- //
- // When the IP node supports IPv4-mapped IPv6 address,
- // we allow a listener to listen to the wildcard
- // address of both IP addressing spaces by specifying
- // IPv6 wildcard address.
- if len(ip) == 0 || ip.Equal(IPv4zero) {
- ip = IPv6zero
- }
- // We accept any IPv6 address including IPv4-mapped
- // IPv6 address.
- ip6 := ip.To16()
- if ip6 == nil {
- return syscall.SockaddrInet6{}, &AddrError{Err: "non-IPv6 address", Addr: ip.String()}
- }
- sa := syscall.SockaddrInet6{Port: port, ZoneId: uint32(zoneCache.index(zone))}
- copy(sa.Addr[:], ip6)
- return sa, nil
-}
-
-func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, error) {
- switch family {
- case syscall.AF_INET:
- sa, err := ipToSockaddrInet4(ip, port)
- if err != nil {
- return nil, err
- }
- return &sa, nil
- case syscall.AF_INET6:
- sa, err := ipToSockaddrInet6(ip, port, zone)
- if err != nil {
- return nil, err
- }
- return &sa, nil
- }
- return nil, &AddrError{Err: "invalid address family", Addr: ip.String()}
-}
-
-func addrPortToSockaddrInet4(ap netip.AddrPort) (syscall.SockaddrInet4, error) {
- // ipToSockaddrInet4 has special handling here for zero length slices.
- // We do not, because netip has no concept of a generic zero IP address.
- addr := ap.Addr()
- if !addr.Is4() {
- return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: addr.String()}
- }
- sa := syscall.SockaddrInet4{
- Addr: addr.As4(),
- Port: int(ap.Port()),
- }
- return sa, nil
-}
-
-func addrPortToSockaddrInet6(ap netip.AddrPort) (syscall.SockaddrInet6, error) {
- // ipToSockaddrInet6 has special handling here for zero length slices.
- // We do not, because netip has no concept of a generic zero IP address.
- addr := ap.Addr()
- if !addr.Is6() {
- return syscall.SockaddrInet6{}, &AddrError{Err: "non-IPv6 address", Addr: addr.String()}
- }
- sa := syscall.SockaddrInet6{
- Addr: addr.As16(),
- Port: int(ap.Port()),
- ZoneId: uint32(zoneCache.index(addr.Zone())),
- }
- return sa, nil
-}
diff --git a/contrib/go/_std_1.18/src/net/lookup.go b/contrib/go/_std_1.18/src/net/lookup.go
deleted file mode 100644
index c7b8dc6905..0000000000
--- a/contrib/go/_std_1.18/src/net/lookup.go
+++ /dev/null
@@ -1,667 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "context"
- "internal/nettrace"
- "internal/singleflight"
- "net/netip"
- "sync"
-)
-
-// protocols contains minimal mappings between internet protocol
-// names and numbers for platforms that don't have a complete list of
-// protocol numbers.
-//
-// See https://www.iana.org/assignments/protocol-numbers
-//
-// On Unix, this map is augmented by readProtocols via lookupProtocol.
-var protocols = map[string]int{
- "icmp": 1,
- "igmp": 2,
- "tcp": 6,
- "udp": 17,
- "ipv6-icmp": 58,
-}
-
-// services contains minimal mappings between services names and port
-// numbers for platforms that don't have a complete list of port numbers.
-//
-// See https://www.iana.org/assignments/service-names-port-numbers
-//
-// On Unix, this map is augmented by readServices via goLookupPort.
-var services = map[string]map[string]int{
- "udp": {
- "domain": 53,
- },
- "tcp": {
- "ftp": 21,
- "ftps": 990,
- "gopher": 70, // ʕ◔ϖ◔ʔ
- "http": 80,
- "https": 443,
- "imap2": 143,
- "imap3": 220,
- "imaps": 993,
- "pop3": 110,
- "pop3s": 995,
- "smtp": 25,
- "ssh": 22,
- "telnet": 23,
- },
-}
-
-// dnsWaitGroup can be used by tests to wait for all DNS goroutines to
-// complete. This avoids races on the test hooks.
-var dnsWaitGroup sync.WaitGroup
-
-const maxProtoLength = len("RSVP-E2E-IGNORE") + 10 // with room to grow
-
-func lookupProtocolMap(name string) (int, error) {
- var lowerProtocol [maxProtoLength]byte
- n := copy(lowerProtocol[:], name)
- lowerASCIIBytes(lowerProtocol[:n])
- proto, found := protocols[string(lowerProtocol[:n])]
- if !found || n != len(name) {
- return 0, &AddrError{Err: "unknown IP protocol specified", Addr: name}
- }
- return proto, nil
-}
-
-// maxPortBufSize is the longest reasonable name of a service
-// (non-numeric port).
-// Currently the longest known IANA-unregistered name is
-// "mobility-header", so we use that length, plus some slop in case
-// something longer is added in the future.
-const maxPortBufSize = len("mobility-header") + 10
-
-func lookupPortMap(network, service string) (port int, error error) {
- switch network {
- case "tcp4", "tcp6":
- network = "tcp"
- case "udp4", "udp6":
- network = "udp"
- }
-
- if m, ok := services[network]; ok {
- var lowerService [maxPortBufSize]byte
- n := copy(lowerService[:], service)
- lowerASCIIBytes(lowerService[:n])
- if port, ok := m[string(lowerService[:n])]; ok && n == len(service) {
- return port, nil
- }
- }
- return 0, &AddrError{Err: "unknown port", Addr: network + "/" + service}
-}
-
-// ipVersion returns the provided network's IP version: '4', '6' or 0
-// if network does not end in a '4' or '6' byte.
-func ipVersion(network string) byte {
- if network == "" {
- return 0
- }
- n := network[len(network)-1]
- if n != '4' && n != '6' {
- n = 0
- }
- return n
-}
-
-// DefaultResolver is the resolver used by the package-level Lookup
-// functions and by Dialers without a specified Resolver.
-var DefaultResolver = &Resolver{}
-
-// A Resolver looks up names and numbers.
-//
-// A nil *Resolver is equivalent to a zero Resolver.
-type Resolver struct {
- // PreferGo controls whether Go's built-in DNS resolver is preferred
- // on platforms where it's available. It is equivalent to setting
- // GODEBUG=netdns=go, but scoped to just this resolver.
- PreferGo bool
-
- // StrictErrors controls the behavior of temporary errors
- // (including timeout, socket errors, and SERVFAIL) when using
- // Go's built-in resolver. For a query composed of multiple
- // sub-queries (such as an A+AAAA address lookup, or walking the
- // DNS search list), this option causes such errors to abort the
- // whole query instead of returning a partial result. This is
- // not enabled by default because it may affect compatibility
- // with resolvers that process AAAA queries incorrectly.
- StrictErrors bool
-
- // Dial optionally specifies an alternate dialer for use by
- // Go's built-in DNS resolver to make TCP and UDP connections
- // to DNS services. The host in the address parameter will
- // always be a literal IP address and not a host name, and the
- // port in the address parameter will be a literal port number
- // and not a service name.
- // If the Conn returned is also a PacketConn, sent and received DNS
- // messages must adhere to RFC 1035 section 4.2.1, "UDP usage".
- // Otherwise, DNS messages transmitted over Conn must adhere
- // to RFC 7766 section 5, "Transport Protocol Selection".
- // If nil, the default dialer is used.
- Dial func(ctx context.Context, network, address string) (Conn, error)
-
- // lookupGroup merges LookupIPAddr calls together for lookups for the same
- // host. The lookupGroup key is the LookupIPAddr.host argument.
- // The return values are ([]IPAddr, error).
- lookupGroup singleflight.Group
-
- // TODO(bradfitz): optional interface impl override hook
- // TODO(bradfitz): Timeout time.Duration?
-}
-
-func (r *Resolver) preferGo() bool { return r != nil && r.PreferGo }
-func (r *Resolver) strictErrors() bool { return r != nil && r.StrictErrors }
-
-func (r *Resolver) getLookupGroup() *singleflight.Group {
- if r == nil {
- return &DefaultResolver.lookupGroup
- }
- return &r.lookupGroup
-}
-
-// LookupHost looks up the given host using the local resolver.
-// It returns a slice of that host's addresses.
-//
-// LookupHost uses context.Background internally; to specify the context, use
-// Resolver.LookupHost.
-func LookupHost(host string) (addrs []string, err error) {
- return DefaultResolver.LookupHost(context.Background(), host)
-}
-
-// LookupHost looks up the given host using the local resolver.
-// It returns a slice of that host's addresses.
-func (r *Resolver) LookupHost(ctx context.Context, host string) (addrs []string, err error) {
- // Make sure that no matter what we do later, host=="" is rejected.
- // parseIP, for example, does accept empty strings.
- if host == "" {
- return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true}
- }
- if ip, _ := parseIPZone(host); ip != nil {
- return []string{host}, nil
- }
- return r.lookupHost(ctx, host)
-}
-
-// LookupIP looks up host using the local resolver.
-// It returns a slice of that host's IPv4 and IPv6 addresses.
-func LookupIP(host string) ([]IP, error) {
- addrs, err := DefaultResolver.LookupIPAddr(context.Background(), host)
- if err != nil {
- return nil, err
- }
- ips := make([]IP, len(addrs))
- for i, ia := range addrs {
- ips[i] = ia.IP
- }
- return ips, nil
-}
-
-// LookupIPAddr looks up host using the local resolver.
-// It returns a slice of that host's IPv4 and IPv6 addresses.
-func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, error) {
- return r.lookupIPAddr(ctx, "ip", host)
-}
-
-// LookupIP looks up host for the given network using the local resolver.
-// It returns a slice of that host's IP addresses of the type specified by
-// network.
-// network must be one of "ip", "ip4" or "ip6".
-func (r *Resolver) LookupIP(ctx context.Context, network, host string) ([]IP, error) {
- afnet, _, err := parseNetwork(ctx, network, false)
- if err != nil {
- return nil, err
- }
- switch afnet {
- case "ip", "ip4", "ip6":
- default:
- return nil, UnknownNetworkError(network)
- }
- addrs, err := r.internetAddrList(ctx, afnet, host)
- if err != nil {
- return nil, err
- }
- ips := make([]IP, 0, len(addrs))
- for _, addr := range addrs {
- ips = append(ips, addr.(*IPAddr).IP)
- }
- return ips, nil
-}
-
-// LookupNetIP looks up host using the local resolver.
-// It returns a slice of that host's IP addresses of the type specified by
-// network.
-// The network must be one of "ip", "ip4" or "ip6".
-func (r *Resolver) LookupNetIP(ctx context.Context, network, host string) ([]netip.Addr, error) {
- // TODO(bradfitz): make this efficient, making the internal net package
- // type throughout be netip.Addr and only converting to the net.IP slice
- // version at the edge. But for now (2021-10-20), this is a wrapper around
- // the old way.
- ips, err := r.LookupIP(ctx, network, host)
- if err != nil {
- return nil, err
- }
- ret := make([]netip.Addr, 0, len(ips))
- for _, ip := range ips {
- if a, ok := netip.AddrFromSlice(ip); ok {
- ret = append(ret, a)
- }
- }
- return ret, nil
-}
-
-// onlyValuesCtx is a context that uses an underlying context
-// for value lookup if the underlying context hasn't yet expired.
-type onlyValuesCtx struct {
- context.Context
- lookupValues context.Context
-}
-
-var _ context.Context = (*onlyValuesCtx)(nil)
-
-// Value performs a lookup if the original context hasn't expired.
-func (ovc *onlyValuesCtx) Value(key any) any {
- select {
- case <-ovc.lookupValues.Done():
- return nil
- default:
- return ovc.lookupValues.Value(key)
- }
-}
-
-// withUnexpiredValuesPreserved returns a context.Context that only uses lookupCtx
-// for its values, otherwise it is never canceled and has no deadline.
-// If the lookup context expires, any looked up values will return nil.
-// See Issue 28600.
-func withUnexpiredValuesPreserved(lookupCtx context.Context) context.Context {
- return &onlyValuesCtx{Context: context.Background(), lookupValues: lookupCtx}
-}
-
-// lookupIPAddr looks up host using the local resolver and particular network.
-// It returns a slice of that host's IPv4 and IPv6 addresses.
-func (r *Resolver) lookupIPAddr(ctx context.Context, network, host string) ([]IPAddr, error) {
- // Make sure that no matter what we do later, host=="" is rejected.
- // parseIP, for example, does accept empty strings.
- if host == "" {
- return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true}
- }
- if ip, zone := parseIPZone(host); ip != nil {
- return []IPAddr{{IP: ip, Zone: zone}}, nil
- }
- trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace)
- if trace != nil && trace.DNSStart != nil {
- trace.DNSStart(host)
- }
- // The underlying resolver func is lookupIP by default but it
- // can be overridden by tests. This is needed by net/http, so it
- // uses a context key instead of unexported variables.
- resolverFunc := r.lookupIP
- if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string, string) ([]IPAddr, error)); alt != nil {
- resolverFunc = alt
- }
-
- // We don't want a cancellation of ctx to affect the
- // lookupGroup operation. Otherwise if our context gets
- // canceled it might cause an error to be returned to a lookup
- // using a completely different context. However we need to preserve
- // only the values in context. See Issue 28600.
- lookupGroupCtx, lookupGroupCancel := context.WithCancel(withUnexpiredValuesPreserved(ctx))
-
- lookupKey := network + "\000" + host
- dnsWaitGroup.Add(1)
- ch, called := r.getLookupGroup().DoChan(lookupKey, func() (any, error) {
- defer dnsWaitGroup.Done()
- return testHookLookupIP(lookupGroupCtx, resolverFunc, network, host)
- })
- if !called {
- dnsWaitGroup.Done()
- }
-
- select {
- case <-ctx.Done():
- // Our context was canceled. If we are the only
- // goroutine looking up this key, then drop the key
- // from the lookupGroup and cancel the lookup.
- // If there are other goroutines looking up this key,
- // let the lookup continue uncanceled, and let later
- // lookups with the same key share the result.
- // See issues 8602, 20703, 22724.
- if r.getLookupGroup().ForgetUnshared(lookupKey) {
- lookupGroupCancel()
- } else {
- go func() {
- <-ch
- lookupGroupCancel()
- }()
- }
- ctxErr := ctx.Err()
- err := &DNSError{
- Err: mapErr(ctxErr).Error(),
- Name: host,
- IsTimeout: ctxErr == context.DeadlineExceeded,
- }
- if trace != nil && trace.DNSDone != nil {
- trace.DNSDone(nil, false, err)
- }
- return nil, err
- case r := <-ch:
- lookupGroupCancel()
- err := r.Err
- if err != nil {
- if _, ok := err.(*DNSError); !ok {
- isTimeout := false
- if err == context.DeadlineExceeded {
- isTimeout = true
- } else if terr, ok := err.(timeout); ok {
- isTimeout = terr.Timeout()
- }
- err = &DNSError{
- Err: err.Error(),
- Name: host,
- IsTimeout: isTimeout,
- }
- }
- }
- if trace != nil && trace.DNSDone != nil {
- addrs, _ := r.Val.([]IPAddr)
- trace.DNSDone(ipAddrsEface(addrs), r.Shared, err)
- }
- return lookupIPReturn(r.Val, err, r.Shared)
- }
-}
-
-// lookupIPReturn turns the return values from singleflight.Do into
-// the return values from LookupIP.
-func lookupIPReturn(addrsi any, err error, shared bool) ([]IPAddr, error) {
- if err != nil {
- return nil, err
- }
- addrs := addrsi.([]IPAddr)
- if shared {
- clone := make([]IPAddr, len(addrs))
- copy(clone, addrs)
- addrs = clone
- }
- return addrs, nil
-}
-
-// ipAddrsEface returns an empty interface slice of addrs.
-func ipAddrsEface(addrs []IPAddr) []any {
- s := make([]any, len(addrs))
- for i, v := range addrs {
- s[i] = v
- }
- return s
-}
-
-// LookupPort looks up the port for the given network and service.
-//
-// LookupPort uses context.Background internally; to specify the context, use
-// Resolver.LookupPort.
-func LookupPort(network, service string) (port int, err error) {
- return DefaultResolver.LookupPort(context.Background(), network, service)
-}
-
-// LookupPort looks up the port for the given network and service.
-func (r *Resolver) LookupPort(ctx context.Context, network, service string) (port int, err error) {
- port, needsLookup := parsePort(service)
- if needsLookup {
- switch network {
- case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6":
- case "": // a hint wildcard for Go 1.0 undocumented behavior
- network = "ip"
- default:
- return 0, &AddrError{Err: "unknown network", Addr: network}
- }
- port, err = r.lookupPort(ctx, network, service)
- if err != nil {
- return 0, err
- }
- }
- if 0 > port || port > 65535 {
- return 0, &AddrError{Err: "invalid port", Addr: service}
- }
- return port, nil
-}
-
-// LookupCNAME returns the canonical name for the given host.
-// Callers that do not care about the canonical name can call
-// LookupHost or LookupIP directly; both take care of resolving
-// the canonical name as part of the lookup.
-//
-// A canonical name is the final name after following zero
-// or more CNAME records.
-// LookupCNAME does not return an error if host does not
-// contain DNS "CNAME" records, as long as host resolves to
-// address records.
-//
-// The returned canonical name is validated to be a properly
-// formatted presentation-format domain name.
-//
-// LookupCNAME uses context.Background internally; to specify the context, use
-// Resolver.LookupCNAME.
-func LookupCNAME(host string) (cname string, err error) {
- return DefaultResolver.LookupCNAME(context.Background(), host)
-}
-
-// LookupCNAME returns the canonical name for the given host.
-// Callers that do not care about the canonical name can call
-// LookupHost or LookupIP directly; both take care of resolving
-// the canonical name as part of the lookup.
-//
-// A canonical name is the final name after following zero
-// or more CNAME records.
-// LookupCNAME does not return an error if host does not
-// contain DNS "CNAME" records, as long as host resolves to
-// address records.
-//
-// The returned canonical name is validated to be a properly
-// formatted presentation-format domain name.
-func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error) {
- cname, err := r.lookupCNAME(ctx, host)
- if err != nil {
- return "", err
- }
- if !isDomainName(cname) {
- return "", &DNSError{Err: errMalformedDNSRecordsDetail, Name: host}
- }
- return cname, nil
-}
-
-// LookupSRV tries to resolve an SRV query of the given service,
-// protocol, and domain name. The proto is "tcp" or "udp".
-// The returned records are sorted by priority and randomized
-// by weight within a priority.
-//
-// LookupSRV constructs the DNS name to look up following RFC 2782.
-// That is, it looks up _service._proto.name. To accommodate services
-// publishing SRV records under non-standard names, if both service
-// and proto are empty strings, LookupSRV looks up name directly.
-//
-// The returned service names are validated to be properly
-// formatted presentation-format domain names. If the response contains
-// invalid names, those records are filtered out and an error
-// will be returned alongside the remaining results, if any.
-func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
- return DefaultResolver.LookupSRV(context.Background(), service, proto, name)
-}
-
-// LookupSRV tries to resolve an SRV query of the given service,
-// protocol, and domain name. The proto is "tcp" or "udp".
-// The returned records are sorted by priority and randomized
-// by weight within a priority.
-//
-// LookupSRV constructs the DNS name to look up following RFC 2782.
-// That is, it looks up _service._proto.name. To accommodate services
-// publishing SRV records under non-standard names, if both service
-// and proto are empty strings, LookupSRV looks up name directly.
-//
-// The returned service names are validated to be properly
-// formatted presentation-format domain names. If the response contains
-// invalid names, those records are filtered out and an error
-// will be returned alongside the remaining results, if any.
-func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
- cname, addrs, err := r.lookupSRV(ctx, service, proto, name)
- if err != nil {
- return "", nil, err
- }
- if cname != "" && !isDomainName(cname) {
- return "", nil, &DNSError{Err: "SRV header name is invalid", Name: name}
- }
- filteredAddrs := make([]*SRV, 0, len(addrs))
- for _, addr := range addrs {
- if addr == nil {
- continue
- }
- if !isDomainName(addr.Target) {
- continue
- }
- filteredAddrs = append(filteredAddrs, addr)
- }
- if len(addrs) != len(filteredAddrs) {
- return cname, filteredAddrs, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name}
- }
- return cname, filteredAddrs, nil
-}
-
-// LookupMX returns the DNS MX records for the given domain name sorted by preference.
-//
-// The returned mail server names are validated to be properly
-// formatted presentation-format domain names. If the response contains
-// invalid names, those records are filtered out and an error
-// will be returned alongside the remaining results, if any.
-//
-// LookupMX uses context.Background internally; to specify the context, use
-// Resolver.LookupMX.
-func LookupMX(name string) ([]*MX, error) {
- return DefaultResolver.LookupMX(context.Background(), name)
-}
-
-// LookupMX returns the DNS MX records for the given domain name sorted by preference.
-//
-// The returned mail server names are validated to be properly
-// formatted presentation-format domain names. If the response contains
-// invalid names, those records are filtered out and an error
-// will be returned alongside the remaining results, if any.
-func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) {
- records, err := r.lookupMX(ctx, name)
- if err != nil {
- return nil, err
- }
- filteredMX := make([]*MX, 0, len(records))
- for _, mx := range records {
- if mx == nil {
- continue
- }
- if !isDomainName(mx.Host) {
- continue
- }
- filteredMX = append(filteredMX, mx)
- }
- if len(records) != len(filteredMX) {
- return filteredMX, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name}
- }
- return filteredMX, nil
-}
-
-// LookupNS returns the DNS NS records for the given domain name.
-//
-// The returned name server names are validated to be properly
-// formatted presentation-format domain names. If the response contains
-// invalid names, those records are filtered out and an error
-// will be returned alongside the remaining results, if any.
-//
-// LookupNS uses context.Background internally; to specify the context, use
-// Resolver.LookupNS.
-func LookupNS(name string) ([]*NS, error) {
- return DefaultResolver.LookupNS(context.Background(), name)
-}
-
-// LookupNS returns the DNS NS records for the given domain name.
-//
-// The returned name server names are validated to be properly
-// formatted presentation-format domain names. If the response contains
-// invalid names, those records are filtered out and an error
-// will be returned alongside the remaining results, if any.
-func (r *Resolver) LookupNS(ctx context.Context, name string) ([]*NS, error) {
- records, err := r.lookupNS(ctx, name)
- if err != nil {
- return nil, err
- }
- filteredNS := make([]*NS, 0, len(records))
- for _, ns := range records {
- if ns == nil {
- continue
- }
- if !isDomainName(ns.Host) {
- continue
- }
- filteredNS = append(filteredNS, ns)
- }
- if len(records) != len(filteredNS) {
- return filteredNS, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name}
- }
- return filteredNS, nil
-}
-
-// LookupTXT returns the DNS TXT records for the given domain name.
-//
-// LookupTXT uses context.Background internally; to specify the context, use
-// Resolver.LookupTXT.
-func LookupTXT(name string) ([]string, error) {
- return DefaultResolver.lookupTXT(context.Background(), name)
-}
-
-// LookupTXT returns the DNS TXT records for the given domain name.
-func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
- return r.lookupTXT(ctx, name)
-}
-
-// LookupAddr performs a reverse lookup for the given address, returning a list
-// of names mapping to that address.
-//
-// The returned names are validated to be properly formatted presentation-format
-// domain names. If the response contains invalid names, those records are filtered
-// out and an error will be returned alongside the remaining results, if any.
-//
-// When using the host C library resolver, at most one result will be
-// returned. To bypass the host resolver, use a custom Resolver.
-//
-// LookupAddr uses context.Background internally; to specify the context, use
-// Resolver.LookupAddr.
-func LookupAddr(addr string) (names []string, err error) {
- return DefaultResolver.LookupAddr(context.Background(), addr)
-}
-
-// LookupAddr performs a reverse lookup for the given address, returning a list
-// of names mapping to that address.
-//
-// The returned names are validated to be properly formatted presentation-format
-// domain names. If the response contains invalid names, those records are filtered
-// out and an error will be returned alongside the remaining results, if any.
-func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) {
- names, err := r.lookupAddr(ctx, addr)
- if err != nil {
- return nil, err
- }
- filteredNames := make([]string, 0, len(names))
- for _, name := range names {
- if isDomainName(name) {
- filteredNames = append(filteredNames, name)
- }
- }
- if len(names) != len(filteredNames) {
- return filteredNames, &DNSError{Err: errMalformedDNSRecordsDetail, Name: addr}
- }
- return filteredNames, nil
-}
-
-// errMalformedDNSRecordsDetail is the DNSError detail which is returned when a Resolver.Lookup...
-// method receives DNS records which contain invalid DNS names. This may be returned alongside
-// results which have had the malformed records filtered out.
-var errMalformedDNSRecordsDetail = "DNS response contained records which contain invalid names"
diff --git a/contrib/go/_std_1.18/src/net/lookup_unix.go b/contrib/go/_std_1.18/src/net/lookup_unix.go
deleted file mode 100644
index 255a19dfdb..0000000000
--- a/contrib/go/_std_1.18/src/net/lookup_unix.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package net
-
-import (
- "context"
- "internal/bytealg"
- "sync"
- "syscall"
-
- "golang.org/x/net/dns/dnsmessage"
-)
-
-var onceReadProtocols sync.Once
-
-// readProtocols loads contents of /etc/protocols into protocols map
-// for quick access.
-func readProtocols() {
- file, err := open("/etc/protocols")
- if err != nil {
- return
- }
- defer file.close()
-
- for line, ok := file.readLine(); ok; line, ok = file.readLine() {
- // tcp 6 TCP # transmission control protocol
- if i := bytealg.IndexByteString(line, '#'); i >= 0 {
- line = line[0:i]
- }
- f := getFields(line)
- if len(f) < 2 {
- continue
- }
- if proto, _, ok := dtoi(f[1]); ok {
- if _, ok := protocols[f[0]]; !ok {
- protocols[f[0]] = proto
- }
- for _, alias := range f[2:] {
- if _, ok := protocols[alias]; !ok {
- protocols[alias] = proto
- }
- }
- }
- }
-}
-
-// lookupProtocol looks up IP protocol name in /etc/protocols and
-// returns correspondent protocol number.
-func lookupProtocol(_ context.Context, name string) (int, error) {
- onceReadProtocols.Do(readProtocols)
- return lookupProtocolMap(name)
-}
-
-func (r *Resolver) dial(ctx context.Context, network, server string) (Conn, error) {
- // Calling Dial here is scary -- we have to be sure not to
- // dial a name that will require a DNS lookup, or Dial will
- // call back here to translate it. The DNS config parser has
- // already checked that all the cfg.servers are IP
- // addresses, which Dial will use without a DNS lookup.
- var c Conn
- var err error
- if r != nil && r.Dial != nil {
- c, err = r.Dial(ctx, network, server)
- } else {
- var d Dialer
- c, err = d.DialContext(ctx, network, server)
- }
- if err != nil {
- return nil, mapErr(err)
- }
- return c, nil
-}
-
-func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string, err error) {
- order := systemConf().hostLookupOrder(r, host)
- if !r.preferGo() && order == hostLookupCgo {
- if addrs, err, ok := cgoLookupHost(ctx, host); ok {
- return addrs, err
- }
- // cgo not available (or netgo); fall back to Go's DNS resolver
- order = hostLookupFilesDNS
- }
- return r.goLookupHostOrder(ctx, host, order)
-}
-
-func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
- if r.preferGo() {
- return r.goLookupIP(ctx, network, host)
- }
- order := systemConf().hostLookupOrder(r, host)
- if order == hostLookupCgo {
- if addrs, err, ok := cgoLookupIP(ctx, network, host); ok {
- return addrs, err
- }
- // cgo not available (or netgo); fall back to Go's DNS resolver
- order = hostLookupFilesDNS
- }
- ips, _, err := r.goLookupIPCNAMEOrder(ctx, network, host, order)
- return ips, err
-}
-
-func (r *Resolver) lookupPort(ctx context.Context, network, service string) (int, error) {
- if !r.preferGo() && systemConf().canUseCgo() {
- if port, err, ok := cgoLookupPort(ctx, network, service); ok {
- if err != nil {
- // Issue 18213: if cgo fails, first check to see whether we
- // have the answer baked-in to the net package.
- if port, err := goLookupPort(network, service); err == nil {
- return port, nil
- }
- }
- return port, err
- }
- }
- return goLookupPort(network, service)
-}
-
-func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error) {
- if !r.preferGo() && systemConf().canUseCgo() {
- if cname, err, ok := cgoLookupCNAME(ctx, name); ok {
- return cname, err
- }
- }
- return r.goLookupCNAME(ctx, name)
-}
-
-func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
- var target string
- if service == "" && proto == "" {
- target = name
- } else {
- target = "_" + service + "._" + proto + "." + name
- }
- p, server, err := r.lookup(ctx, target, dnsmessage.TypeSRV)
- if err != nil {
- return "", nil, err
- }
- var srvs []*SRV
- var cname dnsmessage.Name
- for {
- h, err := p.AnswerHeader()
- if err == dnsmessage.ErrSectionDone {
- break
- }
- if err != nil {
- return "", nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- if h.Type != dnsmessage.TypeSRV {
- if err := p.SkipAnswer(); err != nil {
- return "", nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- continue
- }
- if cname.Length == 0 && h.Name.Length != 0 {
- cname = h.Name
- }
- srv, err := p.SRVResource()
- if err != nil {
- return "", nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- srvs = append(srvs, &SRV{Target: srv.Target.String(), Port: srv.Port, Priority: srv.Priority, Weight: srv.Weight})
- }
- byPriorityWeight(srvs).sort()
- return cname.String(), srvs, nil
-}
-
-func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
- p, server, err := r.lookup(ctx, name, dnsmessage.TypeMX)
- if err != nil {
- return nil, err
- }
- var mxs []*MX
- for {
- h, err := p.AnswerHeader()
- if err == dnsmessage.ErrSectionDone {
- break
- }
- if err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- if h.Type != dnsmessage.TypeMX {
- if err := p.SkipAnswer(); err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- continue
- }
- mx, err := p.MXResource()
- if err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- mxs = append(mxs, &MX{Host: mx.MX.String(), Pref: mx.Pref})
-
- }
- byPref(mxs).sort()
- return mxs, nil
-}
-
-func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) {
- p, server, err := r.lookup(ctx, name, dnsmessage.TypeNS)
- if err != nil {
- return nil, err
- }
- var nss []*NS
- for {
- h, err := p.AnswerHeader()
- if err == dnsmessage.ErrSectionDone {
- break
- }
- if err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- if h.Type != dnsmessage.TypeNS {
- if err := p.SkipAnswer(); err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- continue
- }
- ns, err := p.NSResource()
- if err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- nss = append(nss, &NS{Host: ns.NS.String()})
- }
- return nss, nil
-}
-
-func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) {
- p, server, err := r.lookup(ctx, name, dnsmessage.TypeTXT)
- if err != nil {
- return nil, err
- }
- var txts []string
- for {
- h, err := p.AnswerHeader()
- if err == dnsmessage.ErrSectionDone {
- break
- }
- if err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- if h.Type != dnsmessage.TypeTXT {
- if err := p.SkipAnswer(); err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- continue
- }
- txt, err := p.TXTResource()
- if err != nil {
- return nil, &DNSError{
- Err: "cannot unmarshal DNS message",
- Name: name,
- Server: server,
- }
- }
- // Multiple strings in one TXT record need to be
- // concatenated without separator to be consistent
- // with previous Go resolver.
- n := 0
- for _, s := range txt.TXT {
- n += len(s)
- }
- txtJoin := make([]byte, 0, n)
- for _, s := range txt.TXT {
- txtJoin = append(txtJoin, s...)
- }
- if len(txts) == 0 {
- txts = make([]string, 0, 1)
- }
- txts = append(txts, string(txtJoin))
- }
- return txts, nil
-}
-
-func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error) {
- if !r.preferGo() && systemConf().canUseCgo() {
- if ptrs, err, ok := cgoLookupPTR(ctx, addr); ok {
- return ptrs, err
- }
- }
- return r.goLookupPTR(ctx, addr)
-}
-
-// concurrentThreadsLimit returns the number of threads we permit to
-// run concurrently doing DNS lookups via cgo. A DNS lookup may use a
-// file descriptor so we limit this to less than the number of
-// permitted open files. On some systems, notably Darwin, if
-// getaddrinfo is unable to open a file descriptor it simply returns
-// EAI_NONAME rather than a useful error. Limiting the number of
-// concurrent getaddrinfo calls to less than the permitted number of
-// file descriptors makes that error less likely. We don't bother to
-// apply the same limit to DNS lookups run directly from Go, because
-// there we will return a meaningful "too many open files" error.
-func concurrentThreadsLimit() int {
- var rlim syscall.Rlimit
- if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil {
- return 500
- }
- r := int(rlim.Cur)
- if r > 500 {
- r = 500
- } else if r > 30 {
- r -= 30
- }
- return r
-}
diff --git a/contrib/go/_std_1.18/src/net/mac.go b/contrib/go/_std_1.18/src/net/mac.go
deleted file mode 100644
index 373ac3d7e2..0000000000
--- a/contrib/go/_std_1.18/src/net/mac.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-const hexDigit = "0123456789abcdef"
-
-// A HardwareAddr represents a physical hardware address.
-type HardwareAddr []byte
-
-func (a HardwareAddr) String() string {
- if len(a) == 0 {
- return ""
- }
- buf := make([]byte, 0, len(a)*3-1)
- for i, b := range a {
- if i > 0 {
- buf = append(buf, ':')
- }
- buf = append(buf, hexDigit[b>>4])
- buf = append(buf, hexDigit[b&0xF])
- }
- return string(buf)
-}
-
-// ParseMAC parses s as an IEEE 802 MAC-48, EUI-48, EUI-64, or a 20-octet
-// IP over InfiniBand link-layer address using one of the following formats:
-// 00:00:5e:00:53:01
-// 02:00:5e:10:00:00:00:01
-// 00:00:00:00:fe:80:00:00:00:00:00:00:02:00:5e:10:00:00:00:01
-// 00-00-5e-00-53-01
-// 02-00-5e-10-00-00-00-01
-// 00-00-00-00-fe-80-00-00-00-00-00-00-02-00-5e-10-00-00-00-01
-// 0000.5e00.5301
-// 0200.5e10.0000.0001
-// 0000.0000.fe80.0000.0000.0000.0200.5e10.0000.0001
-func ParseMAC(s string) (hw HardwareAddr, err error) {
- if len(s) < 14 {
- goto error
- }
-
- if s[2] == ':' || s[2] == '-' {
- if (len(s)+1)%3 != 0 {
- goto error
- }
- n := (len(s) + 1) / 3
- if n != 6 && n != 8 && n != 20 {
- goto error
- }
- hw = make(HardwareAddr, n)
- for x, i := 0, 0; i < n; i++ {
- var ok bool
- if hw[i], ok = xtoi2(s[x:], s[2]); !ok {
- goto error
- }
- x += 3
- }
- } else if s[4] == '.' {
- if (len(s)+1)%5 != 0 {
- goto error
- }
- n := 2 * (len(s) + 1) / 5
- if n != 6 && n != 8 && n != 20 {
- goto error
- }
- hw = make(HardwareAddr, n)
- for x, i := 0, 0; i < n; i += 2 {
- var ok bool
- if hw[i], ok = xtoi2(s[x:x+2], 0); !ok {
- goto error
- }
- if hw[i+1], ok = xtoi2(s[x+2:], s[4]); !ok {
- goto error
- }
- x += 5
- }
- } else {
- goto error
- }
- return hw, nil
-
-error:
- return nil, &AddrError{Err: "invalid MAC address", Addr: s}
-}
diff --git a/contrib/go/_std_1.18/src/net/net.go b/contrib/go/_std_1.18/src/net/net.go
deleted file mode 100644
index d91e743a01..0000000000
--- a/contrib/go/_std_1.18/src/net/net.go
+++ /dev/null
@@ -1,758 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package net provides a portable interface for network I/O, including
-TCP/IP, UDP, domain name resolution, and Unix domain sockets.
-
-Although the package provides access to low-level networking
-primitives, most clients will need only the basic interface provided
-by the Dial, Listen, and Accept functions and the associated
-Conn and Listener interfaces. The crypto/tls package uses
-the same interfaces and similar Dial and Listen functions.
-
-The Dial function connects to a server:
-
- conn, err := net.Dial("tcp", "golang.org:80")
- if err != nil {
- // handle error
- }
- fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n")
- status, err := bufio.NewReader(conn).ReadString('\n')
- // ...
-
-The Listen function creates servers:
-
- ln, err := net.Listen("tcp", ":8080")
- if err != nil {
- // handle error
- }
- for {
- conn, err := ln.Accept()
- if err != nil {
- // handle error
- }
- go handleConnection(conn)
- }
-
-Name Resolution
-
-The method for resolving domain names, whether indirectly with functions like Dial
-or directly with functions like LookupHost and LookupAddr, varies by operating system.
-
-On Unix systems, the resolver has two options for resolving names.
-It can use a pure Go resolver that sends DNS requests directly to the servers
-listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C
-library routines such as getaddrinfo and getnameinfo.
-
-By default the pure Go resolver is used, because a blocked DNS request consumes
-only a goroutine, while a blocked C call consumes an operating system thread.
-When cgo is available, the cgo-based resolver is used instead under a variety of
-conditions: on systems that do not let programs make direct DNS requests (OS X),
-when the LOCALDOMAIN environment variable is present (even if empty),
-when the RES_OPTIONS or HOSTALIASES environment variable is non-empty,
-when the ASR_CONFIG environment variable is non-empty (OpenBSD only),
-when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the
-Go resolver does not implement, and when the name being looked up ends in .local
-or is an mDNS name.
-
-The resolver decision can be overridden by setting the netdns value of the
-GODEBUG environment variable (see package runtime) to go or cgo, as in:
-
- export GODEBUG=netdns=go # force pure Go resolver
- export GODEBUG=netdns=cgo # force cgo resolver
-
-The decision can also be forced while building the Go source tree
-by setting the netgo or netcgo build tag.
-
-A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver
-to print debugging information about its decisions.
-To force a particular resolver while also printing debugging information,
-join the two settings by a plus sign, as in GODEBUG=netdns=go+1.
-
-On Plan 9, the resolver always accesses /net/cs and /net/dns.
-
-On Windows, the resolver always uses C library functions, such as GetAddrInfo and DnsQuery.
-
-*/
-package net
-
-import (
- "context"
- "errors"
- "internal/poll"
- "io"
- "os"
- "sync"
- "syscall"
- "time"
-)
-
-// netGo and netCgo contain the state of the build tags used
-// to build this binary, and whether cgo is available.
-// conf.go mirrors these into conf for easier testing.
-var (
- netGo bool // set true in cgo_stub.go for build tag "netgo" (or no cgo)
- netCgo bool // set true in conf_netcgo.go for build tag "netcgo"
-)
-
-// Addr represents a network end point address.
-//
-// The two methods Network and String conventionally return strings
-// that can be passed as the arguments to Dial, but the exact form
-// and meaning of the strings is up to the implementation.
-type Addr interface {
- Network() string // name of the network (for example, "tcp", "udp")
- String() string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80")
-}
-
-// Conn is a generic stream-oriented network connection.
-//
-// Multiple goroutines may invoke methods on a Conn simultaneously.
-type Conn interface {
- // Read reads data from the connection.
- // Read can be made to time out and return an error after a fixed
- // time limit; see SetDeadline and SetReadDeadline.
- Read(b []byte) (n int, err error)
-
- // Write writes data to the connection.
- // Write can be made to time out and return an error after a fixed
- // time limit; see SetDeadline and SetWriteDeadline.
- Write(b []byte) (n int, err error)
-
- // Close closes the connection.
- // Any blocked Read or Write operations will be unblocked and return errors.
- Close() error
-
- // LocalAddr returns the local network address, if known.
- LocalAddr() Addr
-
- // RemoteAddr returns the remote network address, if known.
- RemoteAddr() Addr
-
- // SetDeadline sets the read and write deadlines associated
- // with the connection. It is equivalent to calling both
- // SetReadDeadline and SetWriteDeadline.
- //
- // A deadline is an absolute time after which I/O operations
- // fail instead of blocking. The deadline applies to all future
- // and pending I/O, not just the immediately following call to
- // Read or Write. After a deadline has been exceeded, the
- // connection can be refreshed by setting a deadline in the future.
- //
- // If the deadline is exceeded a call to Read or Write or to other
- // I/O methods will return an error that wraps os.ErrDeadlineExceeded.
- // This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
- // The error's Timeout method will return true, but note that there
- // are other possible errors for which the Timeout method will
- // return true even if the deadline has not been exceeded.
- //
- // An idle timeout can be implemented by repeatedly extending
- // the deadline after successful Read or Write calls.
- //
- // A zero value for t means I/O operations will not time out.
- SetDeadline(t time.Time) error
-
- // SetReadDeadline sets the deadline for future Read calls
- // and any currently-blocked Read call.
- // A zero value for t means Read will not time out.
- SetReadDeadline(t time.Time) error
-
- // SetWriteDeadline sets the deadline for future Write calls
- // and any currently-blocked Write call.
- // Even if write times out, it may return n > 0, indicating that
- // some of the data was successfully written.
- // A zero value for t means Write will not time out.
- SetWriteDeadline(t time.Time) error
-}
-
-type conn struct {
- fd *netFD
-}
-
-func (c *conn) ok() bool { return c != nil && c.fd != nil }
-
-// Implementation of the Conn interface.
-
-// Read implements the Conn Read method.
-func (c *conn) Read(b []byte) (int, error) {
- if !c.ok() {
- return 0, syscall.EINVAL
- }
- n, err := c.fd.Read(b)
- if err != nil && err != io.EOF {
- err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return n, err
-}
-
-// Write implements the Conn Write method.
-func (c *conn) Write(b []byte) (int, error) {
- if !c.ok() {
- return 0, syscall.EINVAL
- }
- n, err := c.fd.Write(b)
- if err != nil {
- err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return n, err
-}
-
-// Close closes the connection.
-func (c *conn) Close() error {
- if !c.ok() {
- return syscall.EINVAL
- }
- err := c.fd.Close()
- if err != nil {
- err = &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return err
-}
-
-// LocalAddr returns the local network address.
-// The Addr returned is shared by all invocations of LocalAddr, so
-// do not modify it.
-func (c *conn) LocalAddr() Addr {
- if !c.ok() {
- return nil
- }
- return c.fd.laddr
-}
-
-// RemoteAddr returns the remote network address.
-// The Addr returned is shared by all invocations of RemoteAddr, so
-// do not modify it.
-func (c *conn) RemoteAddr() Addr {
- if !c.ok() {
- return nil
- }
- return c.fd.raddr
-}
-
-// SetDeadline implements the Conn SetDeadline method.
-func (c *conn) SetDeadline(t time.Time) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := c.fd.SetDeadline(t); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
- }
- return nil
-}
-
-// SetReadDeadline implements the Conn SetReadDeadline method.
-func (c *conn) SetReadDeadline(t time.Time) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := c.fd.SetReadDeadline(t); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
- }
- return nil
-}
-
-// SetWriteDeadline implements the Conn SetWriteDeadline method.
-func (c *conn) SetWriteDeadline(t time.Time) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := c.fd.SetWriteDeadline(t); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
- }
- return nil
-}
-
-// SetReadBuffer sets the size of the operating system's
-// receive buffer associated with the connection.
-func (c *conn) SetReadBuffer(bytes int) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := setReadBuffer(c.fd, bytes); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
- }
- return nil
-}
-
-// SetWriteBuffer sets the size of the operating system's
-// transmit buffer associated with the connection.
-func (c *conn) SetWriteBuffer(bytes int) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := setWriteBuffer(c.fd, bytes); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
- }
- return nil
-}
-
-// File returns a copy of the underlying os.File.
-// It is the caller's responsibility to close f when finished.
-// Closing c does not affect f, and closing f does not affect c.
-//
-// The returned os.File's file descriptor is different from the connection's.
-// Attempting to change properties of the original using this duplicate
-// may or may not have the desired effect.
-func (c *conn) File() (f *os.File, err error) {
- f, err = c.fd.dup()
- if err != nil {
- err = &OpError{Op: "file", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return
-}
-
-// PacketConn is a generic packet-oriented network connection.
-//
-// Multiple goroutines may invoke methods on a PacketConn simultaneously.
-type PacketConn interface {
- // ReadFrom reads a packet from the connection,
- // copying the payload into p. It returns the number of
- // bytes copied into p and the return address that
- // was on the packet.
- // It returns the number of bytes read (0 <= n <= len(p))
- // and any error encountered. Callers should always process
- // the n > 0 bytes returned before considering the error err.
- // ReadFrom can be made to time out and return an error after a
- // fixed time limit; see SetDeadline and SetReadDeadline.
- ReadFrom(p []byte) (n int, addr Addr, err error)
-
- // WriteTo writes a packet with payload p to addr.
- // WriteTo can be made to time out and return an Error after a
- // fixed time limit; see SetDeadline and SetWriteDeadline.
- // On packet-oriented connections, write timeouts are rare.
- WriteTo(p []byte, addr Addr) (n int, err error)
-
- // Close closes the connection.
- // Any blocked ReadFrom or WriteTo operations will be unblocked and return errors.
- Close() error
-
- // LocalAddr returns the local network address, if known.
- LocalAddr() Addr
-
- // SetDeadline sets the read and write deadlines associated
- // with the connection. It is equivalent to calling both
- // SetReadDeadline and SetWriteDeadline.
- //
- // A deadline is an absolute time after which I/O operations
- // fail instead of blocking. The deadline applies to all future
- // and pending I/O, not just the immediately following call to
- // Read or Write. After a deadline has been exceeded, the
- // connection can be refreshed by setting a deadline in the future.
- //
- // If the deadline is exceeded a call to Read or Write or to other
- // I/O methods will return an error that wraps os.ErrDeadlineExceeded.
- // This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
- // The error's Timeout method will return true, but note that there
- // are other possible errors for which the Timeout method will
- // return true even if the deadline has not been exceeded.
- //
- // An idle timeout can be implemented by repeatedly extending
- // the deadline after successful ReadFrom or WriteTo calls.
- //
- // A zero value for t means I/O operations will not time out.
- SetDeadline(t time.Time) error
-
- // SetReadDeadline sets the deadline for future ReadFrom calls
- // and any currently-blocked ReadFrom call.
- // A zero value for t means ReadFrom will not time out.
- SetReadDeadline(t time.Time) error
-
- // SetWriteDeadline sets the deadline for future WriteTo calls
- // and any currently-blocked WriteTo call.
- // Even if write times out, it may return n > 0, indicating that
- // some of the data was successfully written.
- // A zero value for t means WriteTo will not time out.
- SetWriteDeadline(t time.Time) error
-}
-
-var listenerBacklogCache struct {
- sync.Once
- val int
-}
-
-// listenerBacklog is a caching wrapper around maxListenerBacklog.
-func listenerBacklog() int {
- listenerBacklogCache.Do(func() { listenerBacklogCache.val = maxListenerBacklog() })
- return listenerBacklogCache.val
-}
-
-// A Listener is a generic network listener for stream-oriented protocols.
-//
-// Multiple goroutines may invoke methods on a Listener simultaneously.
-type Listener interface {
- // Accept waits for and returns the next connection to the listener.
- Accept() (Conn, error)
-
- // Close closes the listener.
- // Any blocked Accept operations will be unblocked and return errors.
- Close() error
-
- // Addr returns the listener's network address.
- Addr() Addr
-}
-
-// An Error represents a network error.
-type Error interface {
- error
- Timeout() bool // Is the error a timeout?
-
- // Deprecated: Temporary errors are not well-defined.
- // Most "temporary" errors are timeouts, and the few exceptions are surprising.
- // Do not use this method.
- Temporary() bool
-}
-
-// Various errors contained in OpError.
-var (
- // For connection setup operations.
- errNoSuitableAddress = errors.New("no suitable address found")
-
- // For connection setup and write operations.
- errMissingAddress = errors.New("missing address")
-
- // For both read and write operations.
- errCanceled = errors.New("operation was canceled")
- ErrWriteToConnected = errors.New("use of WriteTo with pre-connected connection")
-)
-
-// mapErr maps from the context errors to the historical internal net
-// error values.
-//
-// TODO(bradfitz): get rid of this after adjusting tests and making
-// context.DeadlineExceeded implement net.Error?
-func mapErr(err error) error {
- switch err {
- case context.Canceled:
- return errCanceled
- case context.DeadlineExceeded:
- return errTimeout
- default:
- return err
- }
-}
-
-// OpError is the error type usually returned by functions in the net
-// package. It describes the operation, network type, and address of
-// an error.
-type OpError struct {
- // Op is the operation which caused the error, such as
- // "read" or "write".
- Op string
-
- // Net is the network type on which this error occurred,
- // such as "tcp" or "udp6".
- Net string
-
- // For operations involving a remote network connection, like
- // Dial, Read, or Write, Source is the corresponding local
- // network address.
- Source Addr
-
- // Addr is the network address for which this error occurred.
- // For local operations, like Listen or SetDeadline, Addr is
- // the address of the local endpoint being manipulated.
- // For operations involving a remote network connection, like
- // Dial, Read, or Write, Addr is the remote address of that
- // connection.
- Addr Addr
-
- // Err is the error that occurred during the operation.
- // The Error method panics if the error is nil.
- Err error
-}
-
-func (e *OpError) Unwrap() error { return e.Err }
-
-func (e *OpError) Error() string {
- if e == nil {
- return "<nil>"
- }
- s := e.Op
- if e.Net != "" {
- s += " " + e.Net
- }
- if e.Source != nil {
- s += " " + e.Source.String()
- }
- if e.Addr != nil {
- if e.Source != nil {
- s += "->"
- } else {
- s += " "
- }
- s += e.Addr.String()
- }
- s += ": " + e.Err.Error()
- return s
-}
-
-var (
- // aLongTimeAgo is a non-zero time, far in the past, used for
- // immediate cancellation of dials.
- aLongTimeAgo = time.Unix(1, 0)
-
- // nonDeadline and noCancel are just zero values for
- // readability with functions taking too many parameters.
- noDeadline = time.Time{}
- noCancel = (chan struct{})(nil)
-)
-
-type timeout interface {
- Timeout() bool
-}
-
-func (e *OpError) Timeout() bool {
- if ne, ok := e.Err.(*os.SyscallError); ok {
- t, ok := ne.Err.(timeout)
- return ok && t.Timeout()
- }
- t, ok := e.Err.(timeout)
- return ok && t.Timeout()
-}
-
-type temporary interface {
- Temporary() bool
-}
-
-func (e *OpError) Temporary() bool {
- // Treat ECONNRESET and ECONNABORTED as temporary errors when
- // they come from calling accept. See issue 6163.
- if e.Op == "accept" && isConnError(e.Err) {
- return true
- }
-
- if ne, ok := e.Err.(*os.SyscallError); ok {
- t, ok := ne.Err.(temporary)
- return ok && t.Temporary()
- }
- t, ok := e.Err.(temporary)
- return ok && t.Temporary()
-}
-
-// A ParseError is the error type of literal network address parsers.
-type ParseError struct {
- // Type is the type of string that was expected, such as
- // "IP address", "CIDR address".
- Type string
-
- // Text is the malformed text string.
- Text string
-}
-
-func (e *ParseError) Error() string { return "invalid " + e.Type + ": " + e.Text }
-
-func (e *ParseError) Timeout() bool { return false }
-func (e *ParseError) Temporary() bool { return false }
-
-type AddrError struct {
- Err string
- Addr string
-}
-
-func (e *AddrError) Error() string {
- if e == nil {
- return "<nil>"
- }
- s := e.Err
- if e.Addr != "" {
- s = "address " + e.Addr + ": " + s
- }
- return s
-}
-
-func (e *AddrError) Timeout() bool { return false }
-func (e *AddrError) Temporary() bool { return false }
-
-type UnknownNetworkError string
-
-func (e UnknownNetworkError) Error() string { return "unknown network " + string(e) }
-func (e UnknownNetworkError) Timeout() bool { return false }
-func (e UnknownNetworkError) Temporary() bool { return false }
-
-type InvalidAddrError string
-
-func (e InvalidAddrError) Error() string { return string(e) }
-func (e InvalidAddrError) Timeout() bool { return false }
-func (e InvalidAddrError) Temporary() bool { return false }
-
-// errTimeout exists to return the historical "i/o timeout" string
-// for context.DeadlineExceeded. See mapErr.
-// It is also used when Dialer.Deadline is exceeded.
-//
-// TODO(iant): We could consider changing this to os.ErrDeadlineExceeded
-// in the future, but note that that would conflict with the TODO
-// at mapErr that suggests changing it to context.DeadlineExceeded.
-var errTimeout error = &timeoutError{}
-
-type timeoutError struct{}
-
-func (e *timeoutError) Error() string { return "i/o timeout" }
-func (e *timeoutError) Timeout() bool { return true }
-func (e *timeoutError) Temporary() bool { return true }
-
-// DNSConfigError represents an error reading the machine's DNS configuration.
-// (No longer used; kept for compatibility.)
-type DNSConfigError struct {
- Err error
-}
-
-func (e *DNSConfigError) Unwrap() error { return e.Err }
-func (e *DNSConfigError) Error() string { return "error reading DNS config: " + e.Err.Error() }
-func (e *DNSConfigError) Timeout() bool { return false }
-func (e *DNSConfigError) Temporary() bool { return false }
-
-// Various errors contained in DNSError.
-var (
- errNoSuchHost = errors.New("no such host")
-)
-
-// DNSError represents a DNS lookup error.
-type DNSError struct {
- Err string // description of the error
- Name string // name looked for
- Server string // server used
- IsTimeout bool // if true, timed out; not all timeouts set this
- IsTemporary bool // if true, error is temporary; not all errors set this
- IsNotFound bool // if true, host could not be found
-}
-
-func (e *DNSError) Error() string {
- if e == nil {
- return "<nil>"
- }
- s := "lookup " + e.Name
- if e.Server != "" {
- s += " on " + e.Server
- }
- s += ": " + e.Err
- return s
-}
-
-// Timeout reports whether the DNS lookup is known to have timed out.
-// This is not always known; a DNS lookup may fail due to a timeout
-// and return a DNSError for which Timeout returns false.
-func (e *DNSError) Timeout() bool { return e.IsTimeout }
-
-// Temporary reports whether the DNS error is known to be temporary.
-// This is not always known; a DNS lookup may fail due to a temporary
-// error and return a DNSError for which Temporary returns false.
-func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary }
-
-// errClosed exists just so that the docs for ErrClosed don't mention
-// the internal package poll.
-var errClosed = poll.ErrNetClosing
-
-// ErrClosed is the error returned by an I/O call on a network
-// connection that has already been closed, or that is closed by
-// another goroutine before the I/O is completed. This may be wrapped
-// in another error, and should normally be tested using
-// errors.Is(err, net.ErrClosed).
-var ErrClosed error = errClosed
-
-type writerOnly struct {
- io.Writer
-}
-
-// Fallback implementation of io.ReaderFrom's ReadFrom, when sendfile isn't
-// applicable.
-func genericReadFrom(w io.Writer, r io.Reader) (n int64, err error) {
- // Use wrapper to hide existing r.ReadFrom from io.Copy.
- return io.Copy(writerOnly{w}, r)
-}
-
-// Limit the number of concurrent cgo-using goroutines, because
-// each will block an entire operating system thread. The usual culprit
-// is resolving many DNS names in separate goroutines but the DNS
-// server is not responding. Then the many lookups each use a different
-// thread, and the system or the program runs out of threads.
-
-var threadLimit chan struct{}
-
-var threadOnce sync.Once
-
-func acquireThread() {
- threadOnce.Do(func() {
- threadLimit = make(chan struct{}, concurrentThreadsLimit())
- })
- threadLimit <- struct{}{}
-}
-
-func releaseThread() {
- <-threadLimit
-}
-
-// buffersWriter is the interface implemented by Conns that support a
-// "writev"-like batch write optimization.
-// writeBuffers should fully consume and write all chunks from the
-// provided Buffers, else it should report a non-nil error.
-type buffersWriter interface {
- writeBuffers(*Buffers) (int64, error)
-}
-
-// Buffers contains zero or more runs of bytes to write.
-//
-// On certain machines, for certain types of connections, this is
-// optimized into an OS-specific batch write operation (such as
-// "writev").
-type Buffers [][]byte
-
-var (
- _ io.WriterTo = (*Buffers)(nil)
- _ io.Reader = (*Buffers)(nil)
-)
-
-// WriteTo writes contents of the buffers to w.
-//
-// WriteTo implements io.WriterTo for Buffers.
-//
-// WriteTo modifies the slice v as well as v[i] for 0 <= i < len(v),
-// but does not modify v[i][j] for any i, j.
-func (v *Buffers) WriteTo(w io.Writer) (n int64, err error) {
- if wv, ok := w.(buffersWriter); ok {
- return wv.writeBuffers(v)
- }
- for _, b := range *v {
- nb, err := w.Write(b)
- n += int64(nb)
- if err != nil {
- v.consume(n)
- return n, err
- }
- }
- v.consume(n)
- return n, nil
-}
-
-// Read from the buffers.
-//
-// Read implements io.Reader for Buffers.
-//
-// Read modifies the slice v as well as v[i] for 0 <= i < len(v),
-// but does not modify v[i][j] for any i, j.
-func (v *Buffers) Read(p []byte) (n int, err error) {
- for len(p) > 0 && len(*v) > 0 {
- n0 := copy(p, (*v)[0])
- v.consume(int64(n0))
- p = p[n0:]
- n += n0
- }
- if len(*v) == 0 {
- err = io.EOF
- }
- return
-}
-
-func (v *Buffers) consume(n int64) {
- for len(*v) > 0 {
- ln0 := int64(len((*v)[0]))
- if ln0 > n {
- (*v)[0] = (*v)[0][n:]
- return
- }
- n -= ln0
- (*v)[0] = nil
- *v = (*v)[1:]
- }
-}
diff --git a/contrib/go/_std_1.18/src/net/netip/netip.go b/contrib/go/_std_1.18/src/net/netip/netip.go
deleted file mode 100644
index f27984ab57..0000000000
--- a/contrib/go/_std_1.18/src/net/netip/netip.go
+++ /dev/null
@@ -1,1498 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package netip defines an IP address type that's a small value type.
-// Building on that Addr type, the package also defines AddrPort (an
-// IP address and a port), and Prefix (an IP address and a bit length
-// prefix).
-//
-// Compared to the net.IP type, this package's Addr type takes less
-// memory, is immutable, and is comparable (supports == and being a
-// map key).
-package netip
-
-import (
- "errors"
- "math"
- "strconv"
-
- "internal/bytealg"
- "internal/intern"
- "internal/itoa"
-)
-
-// Sizes: (64-bit)
-// net.IP: 24 byte slice header + {4, 16} = 28 to 40 bytes
-// net.IPAddr: 40 byte slice header + {4, 16} = 44 to 56 bytes + zone length
-// netip.Addr: 24 bytes (zone is per-name singleton, shared across all users)
-
-// Addr represents an IPv4 or IPv6 address (with or without a scoped
-// addressing zone), similar to net.IP or net.IPAddr.
-//
-// Unlike net.IP or net.IPAddr, Addr is a comparable value
-// type (it supports == and can be a map key) and is immutable.
-//
-// The zero Addr is not a valid IP address.
-// Addr{} is distinct from both 0.0.0.0 and ::.
-type Addr struct {
- // addr is the hi and lo bits of an IPv6 address. If z==z4,
- // hi and lo contain the IPv4-mapped IPv6 address.
- //
- // hi and lo are constructed by interpreting a 16-byte IPv6
- // address as a big-endian 128-bit number. The most significant
- // bits of that number go into hi, the rest into lo.
- //
- // For example, 0011:2233:4455:6677:8899:aabb:ccdd:eeff is stored as:
- // addr.hi = 0x0011223344556677
- // addr.lo = 0x8899aabbccddeeff
- //
- // We store IPs like this, rather than as [16]byte, because it
- // turns most operations on IPs into arithmetic and bit-twiddling
- // operations on 64-bit registers, which is much faster than
- // bytewise processing.
- addr uint128
-
- // z is a combination of the address family and the IPv6 zone.
- //
- // nil means invalid IP address (for a zero Addr).
- // z4 means an IPv4 address.
- // z6noz means an IPv6 address without a zone.
- //
- // Otherwise it's the interned zone name string.
- z *intern.Value
-}
-
-// z0, z4, and z6noz are sentinel IP.z values.
-// See the IP type's field docs.
-var (
- z0 = (*intern.Value)(nil)
- z4 = new(intern.Value)
- z6noz = new(intern.Value)
-)
-
-// IPv6LinkLocalAllNodes returns the IPv6 link-local all nodes multicast
-// address ff02::1.
-func IPv6LinkLocalAllNodes() Addr { return AddrFrom16([16]byte{0: 0xff, 1: 0x02, 15: 0x01}) }
-
-// IPv6Unspecified returns the IPv6 unspecified address "::".
-func IPv6Unspecified() Addr { return Addr{z: z6noz} }
-
-// IPv4Unspecified returns the IPv4 unspecified address "0.0.0.0".
-func IPv4Unspecified() Addr { return AddrFrom4([4]byte{}) }
-
-// AddrFrom4 returns the address of the IPv4 address given by the bytes in addr.
-func AddrFrom4(addr [4]byte) Addr {
- return Addr{
- addr: uint128{0, 0xffff00000000 | uint64(addr[0])<<24 | uint64(addr[1])<<16 | uint64(addr[2])<<8 | uint64(addr[3])},
- z: z4,
- }
-}
-
-// AddrFrom16 returns the IPv6 address given by the bytes in addr.
-// An IPv6-mapped IPv4 address is left as an IPv6 address.
-// (Use Unmap to convert them if needed.)
-func AddrFrom16(addr [16]byte) Addr {
- return Addr{
- addr: uint128{
- beUint64(addr[:8]),
- beUint64(addr[8:]),
- },
- z: z6noz,
- }
-}
-
-// ipv6Slice is like IPv6Raw, but operates on a 16-byte slice. Assumes
-// slice is 16 bytes, caller must enforce this.
-func ipv6Slice(addr []byte) Addr {
- return Addr{
- addr: uint128{
- beUint64(addr[:8]),
- beUint64(addr[8:]),
- },
- z: z6noz,
- }
-}
-
-// ParseAddr parses s as an IP address, returning the result. The string
-// s can be in dotted decimal ("192.0.2.1"), IPv6 ("2001:db8::68"),
-// or IPv6 with a scoped addressing zone ("fe80::1cc0:3e8c:119f:c2e1%ens18").
-func ParseAddr(s string) (Addr, error) {
- for i := 0; i < len(s); i++ {
- switch s[i] {
- case '.':
- return parseIPv4(s)
- case ':':
- return parseIPv6(s)
- case '%':
- // Assume that this was trying to be an IPv6 address with
- // a zone specifier, but the address is missing.
- return Addr{}, parseAddrError{in: s, msg: "missing IPv6 address"}
- }
- }
- return Addr{}, parseAddrError{in: s, msg: "unable to parse IP"}
-}
-
-// MustParseAddr calls ParseAddr(s) and panics on error.
-// It is intended for use in tests with hard-coded strings.
-func MustParseAddr(s string) Addr {
- ip, err := ParseAddr(s)
- if err != nil {
- panic(err)
- }
- return ip
-}
-
-type parseAddrError struct {
- in string // the string given to ParseAddr
- msg string // an explanation of the parse failure
- at string // optionally, the unparsed portion of in at which the error occurred.
-}
-
-func (err parseAddrError) Error() string {
- q := strconv.Quote
- if err.at != "" {
- return "ParseAddr(" + q(err.in) + "): " + err.msg + " (at " + q(err.at) + ")"
- }
- return "ParseAddr(" + q(err.in) + "): " + err.msg
-}
-
-// parseIPv4 parses s as an IPv4 address (in form "192.168.0.1").
-func parseIPv4(s string) (ip Addr, err error) {
- var fields [4]uint8
- var val, pos int
- var digLen int // number of digits in current octet
- for i := 0; i < len(s); i++ {
- if s[i] >= '0' && s[i] <= '9' {
- if digLen == 1 && val == 0 {
- return Addr{}, parseAddrError{in: s, msg: "IPv4 field has octet with leading zero"}
- }
- val = val*10 + int(s[i]) - '0'
- digLen++
- if val > 255 {
- return Addr{}, parseAddrError{in: s, msg: "IPv4 field has value >255"}
- }
- } else if s[i] == '.' {
- // .1.2.3
- // 1.2.3.
- // 1..2.3
- if i == 0 || i == len(s)-1 || s[i-1] == '.' {
- return Addr{}, parseAddrError{in: s, msg: "IPv4 field must have at least one digit", at: s[i:]}
- }
- // 1.2.3.4.5
- if pos == 3 {
- return Addr{}, parseAddrError{in: s, msg: "IPv4 address too long"}
- }
- fields[pos] = uint8(val)
- pos++
- val = 0
- digLen = 0
- } else {
- return Addr{}, parseAddrError{in: s, msg: "unexpected character", at: s[i:]}
- }
- }
- if pos < 3 {
- return Addr{}, parseAddrError{in: s, msg: "IPv4 address too short"}
- }
- fields[3] = uint8(val)
- return AddrFrom4(fields), nil
-}
-
-// parseIPv6 parses s as an IPv6 address (in form "2001:db8::68").
-func parseIPv6(in string) (Addr, error) {
- s := in
-
- // Split off the zone right from the start. Yes it's a second scan
- // of the string, but trying to handle it inline makes a bunch of
- // other inner loop conditionals more expensive, and it ends up
- // being slower.
- zone := ""
- i := bytealg.IndexByteString(s, '%')
- if i != -1 {
- s, zone = s[:i], s[i+1:]
- if zone == "" {
- // Not allowed to have an empty zone if explicitly specified.
- return Addr{}, parseAddrError{in: in, msg: "zone must be a non-empty string"}
- }
- }
-
- var ip [16]byte
- ellipsis := -1 // position of ellipsis in ip
-
- // Might have leading ellipsis
- if len(s) >= 2 && s[0] == ':' && s[1] == ':' {
- ellipsis = 0
- s = s[2:]
- // Might be only ellipsis
- if len(s) == 0 {
- return IPv6Unspecified().WithZone(zone), nil
- }
- }
-
- // Loop, parsing hex numbers followed by colon.
- i = 0
- for i < 16 {
- // Hex number. Similar to parseIPv4, inlining the hex number
- // parsing yields a significant performance increase.
- off := 0
- acc := uint32(0)
- for ; off < len(s); off++ {
- c := s[off]
- if c >= '0' && c <= '9' {
- acc = (acc << 4) + uint32(c-'0')
- } else if c >= 'a' && c <= 'f' {
- acc = (acc << 4) + uint32(c-'a'+10)
- } else if c >= 'A' && c <= 'F' {
- acc = (acc << 4) + uint32(c-'A'+10)
- } else {
- break
- }
- if acc > math.MaxUint16 {
- // Overflow, fail.
- return Addr{}, parseAddrError{in: in, msg: "IPv6 field has value >=2^16", at: s}
- }
- }
- if off == 0 {
- // No digits found, fail.
- return Addr{}, parseAddrError{in: in, msg: "each colon-separated field must have at least one digit", at: s}
- }
-
- // If followed by dot, might be in trailing IPv4.
- if off < len(s) && s[off] == '.' {
- if ellipsis < 0 && i != 12 {
- // Not the right place.
- return Addr{}, parseAddrError{in: in, msg: "embedded IPv4 address must replace the final 2 fields of the address", at: s}
- }
- if i+4 > 16 {
- // Not enough room.
- return Addr{}, parseAddrError{in: in, msg: "too many hex fields to fit an embedded IPv4 at the end of the address", at: s}
- }
- // TODO: could make this a bit faster by having a helper
- // that parses to a [4]byte, and have both parseIPv4 and
- // parseIPv6 use it.
- ip4, err := parseIPv4(s)
- if err != nil {
- return Addr{}, parseAddrError{in: in, msg: err.Error(), at: s}
- }
- ip[i] = ip4.v4(0)
- ip[i+1] = ip4.v4(1)
- ip[i+2] = ip4.v4(2)
- ip[i+3] = ip4.v4(3)
- s = ""
- i += 4
- break
- }
-
- // Save this 16-bit chunk.
- ip[i] = byte(acc >> 8)
- ip[i+1] = byte(acc)
- i += 2
-
- // Stop at end of string.
- s = s[off:]
- if len(s) == 0 {
- break
- }
-
- // Otherwise must be followed by colon and more.
- if s[0] != ':' {
- return Addr{}, parseAddrError{in: in, msg: "unexpected character, want colon", at: s}
- } else if len(s) == 1 {
- return Addr{}, parseAddrError{in: in, msg: "colon must be followed by more characters", at: s}
- }
- s = s[1:]
-
- // Look for ellipsis.
- if s[0] == ':' {
- if ellipsis >= 0 { // already have one
- return Addr{}, parseAddrError{in: in, msg: "multiple :: in address", at: s}
- }
- ellipsis = i
- s = s[1:]
- if len(s) == 0 { // can be at end
- break
- }
- }
- }
-
- // Must have used entire string.
- if len(s) != 0 {
- return Addr{}, parseAddrError{in: in, msg: "trailing garbage after address", at: s}
- }
-
- // If didn't parse enough, expand ellipsis.
- if i < 16 {
- if ellipsis < 0 {
- return Addr{}, parseAddrError{in: in, msg: "address string too short"}
- }
- n := 16 - i
- for j := i - 1; j >= ellipsis; j-- {
- ip[j+n] = ip[j]
- }
- for j := ellipsis + n - 1; j >= ellipsis; j-- {
- ip[j] = 0
- }
- } else if ellipsis >= 0 {
- // Ellipsis must represent at least one 0 group.
- return Addr{}, parseAddrError{in: in, msg: "the :: must expand to at least one field of zeros"}
- }
- return AddrFrom16(ip).WithZone(zone), nil
-}
-
-// AddrFromSlice parses the 4- or 16-byte byte slice as an IPv4 or IPv6 address.
-// Note that a net.IP can be passed directly as the []byte argument.
-// If slice's length is not 4 or 16, AddrFromSlice returns Addr{}, false.
-func AddrFromSlice(slice []byte) (ip Addr, ok bool) {
- switch len(slice) {
- case 4:
- return AddrFrom4(*(*[4]byte)(slice)), true
- case 16:
- return ipv6Slice(slice), true
- }
- return Addr{}, false
-}
-
-// v4 returns the i'th byte of ip. If ip is not an IPv4, v4 returns
-// unspecified garbage.
-func (ip Addr) v4(i uint8) uint8 {
- return uint8(ip.addr.lo >> ((3 - i) * 8))
-}
-
-// v6 returns the i'th byte of ip. If ip is an IPv4 address, this
-// accesses the IPv4-mapped IPv6 address form of the IP.
-func (ip Addr) v6(i uint8) uint8 {
- return uint8(*(ip.addr.halves()[(i/8)%2]) >> ((7 - i%8) * 8))
-}
-
-// v6u16 returns the i'th 16-bit word of ip. If ip is an IPv4 address,
-// this accesses the IPv4-mapped IPv6 address form of the IP.
-func (ip Addr) v6u16(i uint8) uint16 {
- return uint16(*(ip.addr.halves()[(i/4)%2]) >> ((3 - i%4) * 16))
-}
-
-// isZero reports whether ip is the zero value of the IP type.
-// The zero value is not a valid IP address of any type.
-//
-// Note that "0.0.0.0" and "::" are not the zero value. Use IsUnspecified to
-// check for these values instead.
-func (ip Addr) isZero() bool {
- // Faster than comparing ip == Addr{}, but effectively equivalent,
- // as there's no way to make an IP with a nil z from this package.
- return ip.z == z0
-}
-
-// IsValid reports whether the Addr is an initialized address (not the zero Addr).
-//
-// Note that "0.0.0.0" and "::" are both valid values.
-func (ip Addr) IsValid() bool { return ip.z != z0 }
-
-// BitLen returns the number of bits in the IP address:
-// 128 for IPv6, 32 for IPv4, and 0 for the zero Addr.
-//
-// Note that IPv4-mapped IPv6 addresses are considered IPv6 addresses
-// and therefore have bit length 128.
-func (ip Addr) BitLen() int {
- switch ip.z {
- case z0:
- return 0
- case z4:
- return 32
- }
- return 128
-}
-
-// Zone returns ip's IPv6 scoped addressing zone, if any.
-func (ip Addr) Zone() string {
- if ip.z == nil {
- return ""
- }
- zone, _ := ip.z.Get().(string)
- return zone
-}
-
-// Compare returns an integer comparing two IPs.
-// The result will be 0 if ip == ip2, -1 if ip < ip2, and +1 if ip > ip2.
-// The definition of "less than" is the same as the Less method.
-func (ip Addr) Compare(ip2 Addr) int {
- f1, f2 := ip.BitLen(), ip2.BitLen()
- if f1 < f2 {
- return -1
- }
- if f1 > f2 {
- return 1
- }
- hi1, hi2 := ip.addr.hi, ip2.addr.hi
- if hi1 < hi2 {
- return -1
- }
- if hi1 > hi2 {
- return 1
- }
- lo1, lo2 := ip.addr.lo, ip2.addr.lo
- if lo1 < lo2 {
- return -1
- }
- if lo1 > lo2 {
- return 1
- }
- if ip.Is6() {
- za, zb := ip.Zone(), ip2.Zone()
- if za < zb {
- return -1
- }
- if za > zb {
- return 1
- }
- }
- return 0
-}
-
-// Less reports whether ip sorts before ip2.
-// IP addresses sort first by length, then their address.
-// IPv6 addresses with zones sort just after the same address without a zone.
-func (ip Addr) Less(ip2 Addr) bool { return ip.Compare(ip2) == -1 }
-
-func (ip Addr) lessOrEq(ip2 Addr) bool { return ip.Compare(ip2) <= 0 }
-
-// Is4 reports whether ip is an IPv4 address.
-//
-// It returns false for IP4-mapped IPv6 addresses. See IP.Unmap.
-func (ip Addr) Is4() bool {
- return ip.z == z4
-}
-
-// Is4In6 reports whether ip is an IPv4-mapped IPv6 address.
-func (ip Addr) Is4In6() bool {
- return ip.Is6() && ip.addr.hi == 0 && ip.addr.lo>>32 == 0xffff
-}
-
-// Is6 reports whether ip is an IPv6 address, including IPv4-mapped
-// IPv6 addresses.
-func (ip Addr) Is6() bool {
- return ip.z != z0 && ip.z != z4
-}
-
-// Unmap returns ip with any IPv4-mapped IPv6 address prefix removed.
-//
-// That is, if ip is an IPv6 address wrapping an IPv4 adddress, it
-// returns the wrapped IPv4 address. Otherwise it returns ip unmodified.
-func (ip Addr) Unmap() Addr {
- if ip.Is4In6() {
- ip.z = z4
- }
- return ip
-}
-
-// WithZone returns an IP that's the same as ip but with the provided
-// zone. If zone is empty, the zone is removed. If ip is an IPv4
-// address, WithZone is a no-op and returns ip unchanged.
-func (ip Addr) WithZone(zone string) Addr {
- if !ip.Is6() {
- return ip
- }
- if zone == "" {
- ip.z = z6noz
- return ip
- }
- ip.z = intern.GetByString(zone)
- return ip
-}
-
-// withoutZone unconditionally strips the zone from IP.
-// It's similar to WithZone, but small enough to be inlinable.
-func (ip Addr) withoutZone() Addr {
- if !ip.Is6() {
- return ip
- }
- ip.z = z6noz
- return ip
-}
-
-// hasZone reports whether IP has an IPv6 zone.
-func (ip Addr) hasZone() bool {
- return ip.z != z0 && ip.z != z4 && ip.z != z6noz
-}
-
-// IsLinkLocalUnicast reports whether ip is a link-local unicast address.
-func (ip Addr) IsLinkLocalUnicast() bool {
- // Dynamic Configuration of IPv4 Link-Local Addresses
- // https://datatracker.ietf.org/doc/html/rfc3927#section-2.1
- if ip.Is4() {
- return ip.v4(0) == 169 && ip.v4(1) == 254
- }
- // IP Version 6 Addressing Architecture (2.4 Address Type Identification)
- // https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
- if ip.Is6() {
- return ip.v6u16(0)&0xffc0 == 0xfe80
- }
- return false // zero value
-}
-
-// IsLoopback reports whether ip is a loopback address.
-func (ip Addr) IsLoopback() bool {
- // Requirements for Internet Hosts -- Communication Layers (3.2.1.3 Addressing)
- // https://datatracker.ietf.org/doc/html/rfc1122#section-3.2.1.3
- if ip.Is4() {
- return ip.v4(0) == 127
- }
- // IP Version 6 Addressing Architecture (2.4 Address Type Identification)
- // https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
- if ip.Is6() {
- return ip.addr.hi == 0 && ip.addr.lo == 1
- }
- return false // zero value
-}
-
-// IsMulticast reports whether ip is a multicast address.
-func (ip Addr) IsMulticast() bool {
- // Host Extensions for IP Multicasting (4. HOST GROUP ADDRESSES)
- // https://datatracker.ietf.org/doc/html/rfc1112#section-4
- if ip.Is4() {
- return ip.v4(0)&0xf0 == 0xe0
- }
- // IP Version 6 Addressing Architecture (2.4 Address Type Identification)
- // https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
- if ip.Is6() {
- return ip.addr.hi>>(64-8) == 0xff // ip.v6(0) == 0xff
- }
- return false // zero value
-}
-
-// IsInterfaceLocalMulticast reports whether ip is an IPv6 interface-local
-// multicast address.
-func (ip Addr) IsInterfaceLocalMulticast() bool {
- // IPv6 Addressing Architecture (2.7.1. Pre-Defined Multicast Addresses)
- // https://datatracker.ietf.org/doc/html/rfc4291#section-2.7.1
- if ip.Is6() {
- return ip.v6u16(0)&0xff0f == 0xff01
- }
- return false // zero value
-}
-
-// IsLinkLocalMulticast reports whether ip is a link-local multicast address.
-func (ip Addr) IsLinkLocalMulticast() bool {
- // IPv4 Multicast Guidelines (4. Local Network Control Block (224.0.0/24))
- // https://datatracker.ietf.org/doc/html/rfc5771#section-4
- if ip.Is4() {
- return ip.v4(0) == 224 && ip.v4(1) == 0 && ip.v4(2) == 0
- }
- // IPv6 Addressing Architecture (2.7.1. Pre-Defined Multicast Addresses)
- // https://datatracker.ietf.org/doc/html/rfc4291#section-2.7.1
- if ip.Is6() {
- return ip.v6u16(0)&0xff0f == 0xff02
- }
- return false // zero value
-}
-
-// IsGlobalUnicast reports whether ip is a global unicast address.
-//
-// It returns true for IPv6 addresses which fall outside of the current
-// IANA-allocated 2000::/3 global unicast space, with the exception of the
-// link-local address space. It also returns true even if ip is in the IPv4
-// private address space or IPv6 unique local address space.
-// It returns false for the zero Addr.
-//
-// For reference, see RFC 1122, RFC 4291, and RFC 4632.
-func (ip Addr) IsGlobalUnicast() bool {
- if ip.z == z0 {
- // Invalid or zero-value.
- return false
- }
-
- // Match package net's IsGlobalUnicast logic. Notably private IPv4 addresses
- // and ULA IPv6 addresses are still considered "global unicast".
- if ip.Is4() && (ip == IPv4Unspecified() || ip == AddrFrom4([4]byte{255, 255, 255, 255})) {
- return false
- }
-
- return ip != IPv6Unspecified() &&
- !ip.IsLoopback() &&
- !ip.IsMulticast() &&
- !ip.IsLinkLocalUnicast()
-}
-
-// IsPrivate reports whether ip is a private address, according to RFC 1918
-// (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether
-// ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. This is the
-// same as net.IP.IsPrivate.
-func (ip Addr) IsPrivate() bool {
- // Match the stdlib's IsPrivate logic.
- if ip.Is4() {
- // RFC 1918 allocates 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 as
- // private IPv4 address subnets.
- return ip.v4(0) == 10 ||
- (ip.v4(0) == 172 && ip.v4(1)&0xf0 == 16) ||
- (ip.v4(0) == 192 && ip.v4(1) == 168)
- }
-
- if ip.Is6() {
- // RFC 4193 allocates fc00::/7 as the unique local unicast IPv6 address
- // subnet.
- return ip.v6(0)&0xfe == 0xfc
- }
-
- return false // zero value
-}
-
-// IsUnspecified reports whether ip is an unspecified address, either the IPv4
-// address "0.0.0.0" or the IPv6 address "::".
-//
-// Note that the zero Addr is not an unspecified address.
-func (ip Addr) IsUnspecified() bool {
- return ip == IPv4Unspecified() || ip == IPv6Unspecified()
-}
-
-// Prefix keeps only the top b bits of IP, producing a Prefix
-// of the specified length.
-// If ip is a zero Addr, Prefix always returns a zero Prefix and a nil error.
-// Otherwise, if bits is less than zero or greater than ip.BitLen(),
-// Prefix returns an error.
-func (ip Addr) Prefix(b int) (Prefix, error) {
- if b < 0 {
- return Prefix{}, errors.New("negative Prefix bits")
- }
- effectiveBits := b
- switch ip.z {
- case z0:
- return Prefix{}, nil
- case z4:
- if b > 32 {
- return Prefix{}, errors.New("prefix length " + itoa.Itoa(b) + " too large for IPv4")
- }
- effectiveBits += 96
- default:
- if b > 128 {
- return Prefix{}, errors.New("prefix length " + itoa.Itoa(b) + " too large for IPv6")
- }
- }
- ip.addr = ip.addr.and(mask6(effectiveBits))
- return PrefixFrom(ip, b), nil
-}
-
-const (
- netIPv4len = 4
- netIPv6len = 16
-)
-
-// As16 returns the IP address in its 16-byte representation.
-// IPv4 addresses are returned in their v6-mapped form.
-// IPv6 addresses with zones are returned without their zone (use the
-// Zone method to get it).
-// The ip zero value returns all zeroes.
-func (ip Addr) As16() (a16 [16]byte) {
- bePutUint64(a16[:8], ip.addr.hi)
- bePutUint64(a16[8:], ip.addr.lo)
- return a16
-}
-
-// As4 returns an IPv4 or IPv4-in-IPv6 address in its 4-byte representation.
-// If ip is the zero Addr or an IPv6 address, As4 panics.
-// Note that 0.0.0.0 is not the zero Addr.
-func (ip Addr) As4() (a4 [4]byte) {
- if ip.z == z4 || ip.Is4In6() {
- bePutUint32(a4[:], uint32(ip.addr.lo))
- return a4
- }
- if ip.z == z0 {
- panic("As4 called on IP zero value")
- }
- panic("As4 called on IPv6 address")
-}
-
-// AsSlice returns an IPv4 or IPv6 address in its respective 4-byte or 16-byte representation.
-func (ip Addr) AsSlice() []byte {
- switch ip.z {
- case z0:
- return nil
- case z4:
- var ret [4]byte
- bePutUint32(ret[:], uint32(ip.addr.lo))
- return ret[:]
- default:
- var ret [16]byte
- bePutUint64(ret[:8], ip.addr.hi)
- bePutUint64(ret[8:], ip.addr.lo)
- return ret[:]
- }
-}
-
-// Next returns the address following ip.
-// If there is none, it returns the zero Addr.
-func (ip Addr) Next() Addr {
- ip.addr = ip.addr.addOne()
- if ip.Is4() {
- if uint32(ip.addr.lo) == 0 {
- // Overflowed.
- return Addr{}
- }
- } else {
- if ip.addr.isZero() {
- // Overflowed
- return Addr{}
- }
- }
- return ip
-}
-
-// Prev returns the IP before ip.
-// If there is none, it returns the IP zero value.
-func (ip Addr) Prev() Addr {
- if ip.Is4() {
- if uint32(ip.addr.lo) == 0 {
- return Addr{}
- }
- } else if ip.addr.isZero() {
- return Addr{}
- }
- ip.addr = ip.addr.subOne()
- return ip
-}
-
-// String returns the string form of the IP address ip.
-// It returns one of 5 forms:
-//
-// - "invalid IP", if ip is the zero Addr
-// - IPv4 dotted decimal ("192.0.2.1")
-// - IPv6 ("2001:db8::1")
-// - "::ffff:1.2.3.4" (if Is4In6)
-// - IPv6 with zone ("fe80:db8::1%eth0")
-//
-// Note that unlike package net's IP.String method,
-// IP4-mapped IPv6 addresses format with a "::ffff:"
-// prefix before the dotted quad.
-func (ip Addr) String() string {
- switch ip.z {
- case z0:
- return "invalid IP"
- case z4:
- return ip.string4()
- default:
- if ip.Is4In6() {
- // TODO(bradfitz): this could alloc less.
- if z := ip.Zone(); z != "" {
- return "::ffff:" + ip.Unmap().String() + "%" + z
- } else {
- return "::ffff:" + ip.Unmap().String()
- }
- }
- return ip.string6()
- }
-}
-
-// AppendTo appends a text encoding of ip,
-// as generated by MarshalText,
-// to b and returns the extended buffer.
-func (ip Addr) AppendTo(b []byte) []byte {
- switch ip.z {
- case z0:
- return b
- case z4:
- return ip.appendTo4(b)
- default:
- if ip.Is4In6() {
- b = append(b, "::ffff:"...)
- b = ip.Unmap().appendTo4(b)
- if z := ip.Zone(); z != "" {
- b = append(b, '%')
- b = append(b, z...)
- }
- return b
- }
- return ip.appendTo6(b)
- }
-}
-
-// digits is a string of the hex digits from 0 to f. It's used in
-// appendDecimal and appendHex to format IP addresses.
-const digits = "0123456789abcdef"
-
-// appendDecimal appends the decimal string representation of x to b.
-func appendDecimal(b []byte, x uint8) []byte {
- // Using this function rather than strconv.AppendUint makes IPv4
- // string building 2x faster.
-
- if x >= 100 {
- b = append(b, digits[x/100])
- }
- if x >= 10 {
- b = append(b, digits[x/10%10])
- }
- return append(b, digits[x%10])
-}
-
-// appendHex appends the hex string representation of x to b.
-func appendHex(b []byte, x uint16) []byte {
- // Using this function rather than strconv.AppendUint makes IPv6
- // string building 2x faster.
-
- if x >= 0x1000 {
- b = append(b, digits[x>>12])
- }
- if x >= 0x100 {
- b = append(b, digits[x>>8&0xf])
- }
- if x >= 0x10 {
- b = append(b, digits[x>>4&0xf])
- }
- return append(b, digits[x&0xf])
-}
-
-// appendHexPad appends the fully padded hex string representation of x to b.
-func appendHexPad(b []byte, x uint16) []byte {
- return append(b, digits[x>>12], digits[x>>8&0xf], digits[x>>4&0xf], digits[x&0xf])
-}
-
-func (ip Addr) string4() string {
- const max = len("255.255.255.255")
- ret := make([]byte, 0, max)
- ret = ip.appendTo4(ret)
- return string(ret)
-}
-
-func (ip Addr) appendTo4(ret []byte) []byte {
- ret = appendDecimal(ret, ip.v4(0))
- ret = append(ret, '.')
- ret = appendDecimal(ret, ip.v4(1))
- ret = append(ret, '.')
- ret = appendDecimal(ret, ip.v4(2))
- ret = append(ret, '.')
- ret = appendDecimal(ret, ip.v4(3))
- return ret
-}
-
-// string6 formats ip in IPv6 textual representation. It follows the
-// guidelines in section 4 of RFC 5952
-// (https://tools.ietf.org/html/rfc5952#section-4): no unnecessary
-// zeros, use :: to elide the longest run of zeros, and don't use ::
-// to compact a single zero field.
-func (ip Addr) string6() string {
- // Use a zone with a "plausibly long" name, so that most zone-ful
- // IP addresses won't require additional allocation.
- //
- // The compiler does a cool optimization here, where ret ends up
- // stack-allocated and so the only allocation this function does
- // is to construct the returned string. As such, it's okay to be a
- // bit greedy here, size-wise.
- const max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0")
- ret := make([]byte, 0, max)
- ret = ip.appendTo6(ret)
- return string(ret)
-}
-
-func (ip Addr) appendTo6(ret []byte) []byte {
- zeroStart, zeroEnd := uint8(255), uint8(255)
- for i := uint8(0); i < 8; i++ {
- j := i
- for j < 8 && ip.v6u16(j) == 0 {
- j++
- }
- if l := j - i; l >= 2 && l > zeroEnd-zeroStart {
- zeroStart, zeroEnd = i, j
- }
- }
-
- for i := uint8(0); i < 8; i++ {
- if i == zeroStart {
- ret = append(ret, ':', ':')
- i = zeroEnd
- if i >= 8 {
- break
- }
- } else if i > 0 {
- ret = append(ret, ':')
- }
-
- ret = appendHex(ret, ip.v6u16(i))
- }
-
- if ip.z != z6noz {
- ret = append(ret, '%')
- ret = append(ret, ip.Zone()...)
- }
- return ret
-}
-
-// StringExpanded is like String but IPv6 addresses are expanded with leading
-// zeroes and no "::" compression. For example, "2001:db8::1" becomes
-// "2001:0db8:0000:0000:0000:0000:0000:0001".
-func (ip Addr) StringExpanded() string {
- switch ip.z {
- case z0, z4:
- return ip.String()
- }
-
- const size = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
- ret := make([]byte, 0, size)
- for i := uint8(0); i < 8; i++ {
- if i > 0 {
- ret = append(ret, ':')
- }
-
- ret = appendHexPad(ret, ip.v6u16(i))
- }
-
- if ip.z != z6noz {
- // The addition of a zone will cause a second allocation, but when there
- // is no zone the ret slice will be stack allocated.
- ret = append(ret, '%')
- ret = append(ret, ip.Zone()...)
- }
- return string(ret)
-}
-
-// MarshalText implements the encoding.TextMarshaler interface,
-// The encoding is the same as returned by String, with one exception:
-// If ip is the zero Addr, the encoding is the empty string.
-func (ip Addr) MarshalText() ([]byte, error) {
- switch ip.z {
- case z0:
- return []byte(""), nil
- case z4:
- max := len("255.255.255.255")
- b := make([]byte, 0, max)
- return ip.appendTo4(b), nil
- default:
- max := len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0")
- b := make([]byte, 0, max)
- if ip.Is4In6() {
- b = append(b, "::ffff:"...)
- b = ip.Unmap().appendTo4(b)
- if z := ip.Zone(); z != "" {
- b = append(b, '%')
- b = append(b, z...)
- }
- return b, nil
- }
- return ip.appendTo6(b), nil
- }
-
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The IP address is expected in a form accepted by ParseAddr.
-//
-// If text is empty, UnmarshalText sets *ip to the zero Addr and
-// returns no error.
-func (ip *Addr) UnmarshalText(text []byte) error {
- if len(text) == 0 {
- *ip = Addr{}
- return nil
- }
- var err error
- *ip, err = ParseAddr(string(text))
- return err
-}
-
-func (ip Addr) marshalBinaryWithTrailingBytes(trailingBytes int) []byte {
- var b []byte
- switch ip.z {
- case z0:
- b = make([]byte, trailingBytes)
- case z4:
- b = make([]byte, 4+trailingBytes)
- bePutUint32(b, uint32(ip.addr.lo))
- default:
- z := ip.Zone()
- b = make([]byte, 16+len(z)+trailingBytes)
- bePutUint64(b[:8], ip.addr.hi)
- bePutUint64(b[8:], ip.addr.lo)
- copy(b[16:], z)
- }
- return b
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-// It returns a zero-length slice for the zero Addr,
-// the 4-byte form for an IPv4 address,
-// and the 16-byte form with zone appended for an IPv6 address.
-func (ip Addr) MarshalBinary() ([]byte, error) {
- return ip.marshalBinaryWithTrailingBytes(0), nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It expects data in the form generated by MarshalBinary.
-func (ip *Addr) UnmarshalBinary(b []byte) error {
- n := len(b)
- switch {
- case n == 0:
- *ip = Addr{}
- return nil
- case n == 4:
- *ip = AddrFrom4(*(*[4]byte)(b))
- return nil
- case n == 16:
- *ip = ipv6Slice(b)
- return nil
- case n > 16:
- *ip = ipv6Slice(b[:16]).WithZone(string(b[16:]))
- return nil
- }
- return errors.New("unexpected slice size")
-}
-
-// AddrPort is an IP and a port number.
-type AddrPort struct {
- ip Addr
- port uint16
-}
-
-// AddrPortFrom returns an AddrPort with the provided IP and port.
-// It does not allocate.
-func AddrPortFrom(ip Addr, port uint16) AddrPort { return AddrPort{ip: ip, port: port} }
-
-// Addr returns p's IP address.
-func (p AddrPort) Addr() Addr { return p.ip }
-
-// Port returns p's port.
-func (p AddrPort) Port() uint16 { return p.port }
-
-// splitAddrPort splits s into an IP address string and a port
-// string. It splits strings shaped like "foo:bar" or "[foo]:bar",
-// without further validating the substrings. v6 indicates whether the
-// ip string should parse as an IPv6 address or an IPv4 address, in
-// order for s to be a valid ip:port string.
-func splitAddrPort(s string) (ip, port string, v6 bool, err error) {
- i := stringsLastIndexByte(s, ':')
- if i == -1 {
- return "", "", false, errors.New("not an ip:port")
- }
-
- ip, port = s[:i], s[i+1:]
- if len(ip) == 0 {
- return "", "", false, errors.New("no IP")
- }
- if len(port) == 0 {
- return "", "", false, errors.New("no port")
- }
- if ip[0] == '[' {
- if len(ip) < 2 || ip[len(ip)-1] != ']' {
- return "", "", false, errors.New("missing ]")
- }
- ip = ip[1 : len(ip)-1]
- v6 = true
- }
-
- return ip, port, v6, nil
-}
-
-// ParseAddrPort parses s as an AddrPort.
-//
-// It doesn't do any name resolution: both the address and the port
-// must be numeric.
-func ParseAddrPort(s string) (AddrPort, error) {
- var ipp AddrPort
- ip, port, v6, err := splitAddrPort(s)
- if err != nil {
- return ipp, err
- }
- port16, err := strconv.ParseUint(port, 10, 16)
- if err != nil {
- return ipp, errors.New("invalid port " + strconv.Quote(port) + " parsing " + strconv.Quote(s))
- }
- ipp.port = uint16(port16)
- ipp.ip, err = ParseAddr(ip)
- if err != nil {
- return AddrPort{}, err
- }
- if v6 && ipp.ip.Is4() {
- return AddrPort{}, errors.New("invalid ip:port " + strconv.Quote(s) + ", square brackets can only be used with IPv6 addresses")
- } else if !v6 && ipp.ip.Is6() {
- return AddrPort{}, errors.New("invalid ip:port " + strconv.Quote(s) + ", IPv6 addresses must be surrounded by square brackets")
- }
- return ipp, nil
-}
-
-// MustParseAddrPort calls ParseAddrPort(s) and panics on error.
-// It is intended for use in tests with hard-coded strings.
-func MustParseAddrPort(s string) AddrPort {
- ip, err := ParseAddrPort(s)
- if err != nil {
- panic(err)
- }
- return ip
-}
-
-// isZero reports whether p is the zero AddrPort.
-func (p AddrPort) isZero() bool { return p == AddrPort{} }
-
-// IsValid reports whether p.IP() is valid.
-// All ports are valid, including zero.
-func (p AddrPort) IsValid() bool { return p.ip.IsValid() }
-
-func (p AddrPort) String() string {
- switch p.ip.z {
- case z0:
- return "invalid AddrPort"
- case z4:
- a := p.ip.As4()
- buf := make([]byte, 0, 21)
- for i := range a {
- buf = strconv.AppendUint(buf, uint64(a[i]), 10)
- buf = append(buf, "...:"[i])
- }
- buf = strconv.AppendUint(buf, uint64(p.port), 10)
- return string(buf)
- default:
- // TODO: this could be more efficient allocation-wise:
- return joinHostPort(p.ip.String(), itoa.Itoa(int(p.port)))
- }
-}
-
-func joinHostPort(host, port string) string {
- // We assume that host is a literal IPv6 address if host has
- // colons.
- if bytealg.IndexByteString(host, ':') >= 0 {
- return "[" + host + "]:" + port
- }
- return host + ":" + port
-}
-
-// AppendTo appends a text encoding of p,
-// as generated by MarshalText,
-// to b and returns the extended buffer.
-func (p AddrPort) AppendTo(b []byte) []byte {
- switch p.ip.z {
- case z0:
- return b
- case z4:
- b = p.ip.appendTo4(b)
- default:
- if p.ip.Is4In6() {
- b = append(b, "[::ffff:"...)
- b = p.ip.Unmap().appendTo4(b)
- if z := p.ip.Zone(); z != "" {
- b = append(b, '%')
- b = append(b, z...)
- }
- } else {
- b = append(b, '[')
- b = p.ip.appendTo6(b)
- }
- b = append(b, ']')
- }
- b = append(b, ':')
- b = strconv.AppendInt(b, int64(p.port), 10)
- return b
-}
-
-// MarshalText implements the encoding.TextMarshaler interface. The
-// encoding is the same as returned by String, with one exception: if
-// p.Addr() is the zero Addr, the encoding is the empty string.
-func (p AddrPort) MarshalText() ([]byte, error) {
- var max int
- switch p.ip.z {
- case z0:
- case z4:
- max = len("255.255.255.255:65535")
- default:
- max = len("[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0]:65535")
- }
- b := make([]byte, 0, max)
- b = p.AppendTo(b)
- return b, nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler
-// interface. The AddrPort is expected in a form
-// generated by MarshalText or accepted by ParseAddrPort.
-func (p *AddrPort) UnmarshalText(text []byte) error {
- if len(text) == 0 {
- *p = AddrPort{}
- return nil
- }
- var err error
- *p, err = ParseAddrPort(string(text))
- return err
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-// It returns Addr.MarshalBinary with an additional two bytes appended
-// containing the port in little-endian.
-func (p AddrPort) MarshalBinary() ([]byte, error) {
- b := p.Addr().marshalBinaryWithTrailingBytes(2)
- lePutUint16(b[len(b)-2:], p.Port())
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It expects data in the form generated by MarshalBinary.
-func (p *AddrPort) UnmarshalBinary(b []byte) error {
- if len(b) < 2 {
- return errors.New("unexpected slice size")
- }
- var addr Addr
- err := addr.UnmarshalBinary(b[:len(b)-2])
- if err != nil {
- return err
- }
- *p = AddrPortFrom(addr, leUint16(b[len(b)-2:]))
- return nil
-}
-
-// Prefix is an IP address prefix (CIDR) representing an IP network.
-//
-// The first Bits() of Addr() are specified. The remaining bits match any address.
-// The range of Bits() is [0,32] for IPv4 or [0,128] for IPv6.
-type Prefix struct {
- ip Addr
-
- // bits is logically a uint8 (storing [0,128]) but also
- // encodes an "invalid" bit, currently represented by the
- // invalidPrefixBits sentinel value. It could be packed into
- // the uint8 more with more complicated expressions in the
- // accessors, but the extra byte (in padding anyway) doesn't
- // hurt and simplifies code below.
- bits int16
-}
-
-// invalidPrefixBits is the Prefix.bits value used when PrefixFrom is
-// outside the range of a uint8. It's returned as the int -1 in the
-// public API.
-const invalidPrefixBits = -1
-
-// PrefixFrom returns a Prefix with the provided IP address and bit
-// prefix length.
-//
-// It does not allocate. Unlike Addr.Prefix, PrefixFrom does not mask
-// off the host bits of ip.
-//
-// If bits is less than zero or greater than ip.BitLen, Prefix.Bits
-// will return an invalid value -1.
-func PrefixFrom(ip Addr, bits int) Prefix {
- if bits < 0 || bits > ip.BitLen() {
- bits = invalidPrefixBits
- }
- b16 := int16(bits)
- return Prefix{
- ip: ip.withoutZone(),
- bits: b16,
- }
-}
-
-// Addr returns p's IP address.
-func (p Prefix) Addr() Addr { return p.ip }
-
-// Bits returns p's prefix length.
-//
-// It reports -1 if invalid.
-func (p Prefix) Bits() int { return int(p.bits) }
-
-// IsValid reports whether p.Bits() has a valid range for p.IP().
-// If p.Addr() is the zero Addr, IsValid returns false.
-// Note that if p is the zero Prefix, then p.IsValid() == false.
-func (p Prefix) IsValid() bool { return !p.ip.isZero() && p.bits >= 0 && int(p.bits) <= p.ip.BitLen() }
-
-func (p Prefix) isZero() bool { return p == Prefix{} }
-
-// IsSingleIP reports whether p contains exactly one IP.
-func (p Prefix) IsSingleIP() bool { return p.bits != 0 && int(p.bits) == p.ip.BitLen() }
-
-// ParsePrefix parses s as an IP address prefix.
-// The string can be in the form "192.168.1.0/24" or "2001:db8::/32",
-// the CIDR notation defined in RFC 4632 and RFC 4291.
-//
-// Note that masked address bits are not zeroed. Use Masked for that.
-func ParsePrefix(s string) (Prefix, error) {
- i := stringsLastIndexByte(s, '/')
- if i < 0 {
- return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): no '/'")
- }
- ip, err := ParseAddr(s[:i])
- if err != nil {
- return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): " + err.Error())
- }
- bitsStr := s[i+1:]
- bits, err := strconv.Atoi(bitsStr)
- if err != nil {
- return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + ": bad bits after slash: " + strconv.Quote(bitsStr))
- }
- maxBits := 32
- if ip.Is6() {
- maxBits = 128
- }
- if bits < 0 || bits > maxBits {
- return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + ": prefix length out of range")
- }
- return PrefixFrom(ip, bits), nil
-}
-
-// MustParsePrefix calls ParsePrefix(s) and panics on error.
-// It is intended for use in tests with hard-coded strings.
-func MustParsePrefix(s string) Prefix {
- ip, err := ParsePrefix(s)
- if err != nil {
- panic(err)
- }
- return ip
-}
-
-// Masked returns p in its canonical form, with all but the high
-// p.Bits() bits of p.Addr() masked off.
-//
-// If p is zero or otherwise invalid, Masked returns the zero Prefix.
-func (p Prefix) Masked() Prefix {
- if m, err := p.ip.Prefix(int(p.bits)); err == nil {
- return m
- }
- return Prefix{}
-}
-
-// Contains reports whether the network p includes ip.
-//
-// An IPv4 address will not match an IPv6 prefix.
-// A v6-mapped IPv6 address will not match an IPv4 prefix.
-// A zero-value IP will not match any prefix.
-// If ip has an IPv6 zone, Contains returns false,
-// because Prefixes strip zones.
-func (p Prefix) Contains(ip Addr) bool {
- if !p.IsValid() || ip.hasZone() {
- return false
- }
- if f1, f2 := p.ip.BitLen(), ip.BitLen(); f1 == 0 || f2 == 0 || f1 != f2 {
- return false
- }
- if ip.Is4() {
- // xor the IP addresses together; mismatched bits are now ones.
- // Shift away the number of bits we don't care about.
- // Shifts in Go are more efficient if the compiler can prove
- // that the shift amount is smaller than the width of the shifted type (64 here).
- // We know that p.bits is in the range 0..32 because p is Valid;
- // the compiler doesn't know that, so mask with 63 to help it.
- // Now truncate to 32 bits, because this is IPv4.
- // If all the bits we care about are equal, the result will be zero.
- return uint32((ip.addr.lo^p.ip.addr.lo)>>((32-p.bits)&63)) == 0
- } else {
- // xor the IP addresses together.
- // Mask away the bits we don't care about.
- // If all the bits we care about are equal, the result will be zero.
- return ip.addr.xor(p.ip.addr).and(mask6(int(p.bits))).isZero()
- }
-}
-
-// Overlaps reports whether p and o contain any IP addresses in common.
-//
-// If p and o are of different address families or either have a zero
-// IP, it reports false. Like the Contains method, a prefix with a
-// v6-mapped IPv4 IP is still treated as an IPv6 mask.
-func (p Prefix) Overlaps(o Prefix) bool {
- if !p.IsValid() || !o.IsValid() {
- return false
- }
- if p == o {
- return true
- }
- if p.ip.Is4() != o.ip.Is4() {
- return false
- }
- var minBits int16
- if p.bits < o.bits {
- minBits = p.bits
- } else {
- minBits = o.bits
- }
- if minBits == 0 {
- return true
- }
- // One of these Prefix calls might look redundant, but we don't require
- // that p and o values are normalized (via Prefix.Masked) first,
- // so the Prefix call on the one that's already minBits serves to zero
- // out any remaining bits in IP.
- var err error
- if p, err = p.ip.Prefix(int(minBits)); err != nil {
- return false
- }
- if o, err = o.ip.Prefix(int(minBits)); err != nil {
- return false
- }
- return p.ip == o.ip
-}
-
-// AppendTo appends a text encoding of p,
-// as generated by MarshalText,
-// to b and returns the extended buffer.
-func (p Prefix) AppendTo(b []byte) []byte {
- if p.isZero() {
- return b
- }
- if !p.IsValid() {
- return append(b, "invalid Prefix"...)
- }
-
- // p.ip is non-nil, because p is valid.
- if p.ip.z == z4 {
- b = p.ip.appendTo4(b)
- } else {
- if p.ip.Is4In6() {
- b = append(b, "::ffff:"...)
- b = p.ip.Unmap().appendTo4(b)
- } else {
- b = p.ip.appendTo6(b)
- }
- }
-
- b = append(b, '/')
- b = appendDecimal(b, uint8(p.bits))
- return b
-}
-
-// MarshalText implements the encoding.TextMarshaler interface,
-// The encoding is the same as returned by String, with one exception:
-// If p is the zero value, the encoding is the empty string.
-func (p Prefix) MarshalText() ([]byte, error) {
- var max int
- switch p.ip.z {
- case z0:
- case z4:
- max = len("255.255.255.255/32")
- default:
- max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0/128")
- }
- b := make([]byte, 0, max)
- b = p.AppendTo(b)
- return b, nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The IP address is expected in a form accepted by ParsePrefix
-// or generated by MarshalText.
-func (p *Prefix) UnmarshalText(text []byte) error {
- if len(text) == 0 {
- *p = Prefix{}
- return nil
- }
- var err error
- *p, err = ParsePrefix(string(text))
- return err
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-// It returns Addr.MarshalBinary with an additional byte appended
-// containing the prefix bits.
-func (p Prefix) MarshalBinary() ([]byte, error) {
- b := p.Addr().withoutZone().marshalBinaryWithTrailingBytes(1)
- b[len(b)-1] = uint8(p.Bits())
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It expects data in the form generated by MarshalBinary.
-func (p *Prefix) UnmarshalBinary(b []byte) error {
- if len(b) < 1 {
- return errors.New("unexpected slice size")
- }
- var addr Addr
- err := addr.UnmarshalBinary(b[:len(b)-1])
- if err != nil {
- return err
- }
- *p = PrefixFrom(addr, int(b[len(b)-1]))
- return nil
-}
-
-// String returns the CIDR notation of p: "<ip>/<bits>".
-func (p Prefix) String() string {
- if !p.IsValid() {
- return "invalid Prefix"
- }
- return p.ip.String() + "/" + itoa.Itoa(int(p.bits))
-}
diff --git a/contrib/go/_std_1.18/src/net/nss.go b/contrib/go/_std_1.18/src/net/nss.go
deleted file mode 100644
index ee5568883f..0000000000
--- a/contrib/go/_std_1.18/src/net/nss.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package net
-
-import (
- "errors"
- "internal/bytealg"
- "io"
- "os"
-)
-
-// nssConf represents the state of the machine's /etc/nsswitch.conf file.
-type nssConf struct {
- err error // any error encountered opening or parsing the file
- sources map[string][]nssSource // keyed by database (e.g. "hosts")
-}
-
-type nssSource struct {
- source string // e.g. "compat", "files", "mdns4_minimal"
- criteria []nssCriterion
-}
-
-// standardCriteria reports all specified criteria have the default
-// status actions.
-func (s nssSource) standardCriteria() bool {
- for i, crit := range s.criteria {
- if !crit.standardStatusAction(i == len(s.criteria)-1) {
- return false
- }
- }
- return true
-}
-
-// nssCriterion is the parsed structure of one of the criteria in brackets
-// after an NSS source name.
-type nssCriterion struct {
- negate bool // if "!" was present
- status string // e.g. "success", "unavail" (lowercase)
- action string // e.g. "return", "continue" (lowercase)
-}
-
-// standardStatusAction reports whether c is equivalent to not
-// specifying the criterion at all. last is whether this criteria is the
-// last in the list.
-func (c nssCriterion) standardStatusAction(last bool) bool {
- if c.negate {
- return false
- }
- var def string
- switch c.status {
- case "success":
- def = "return"
- case "notfound", "unavail", "tryagain":
- def = "continue"
- default:
- // Unknown status
- return false
- }
- if last && c.action == "return" {
- return true
- }
- return c.action == def
-}
-
-func parseNSSConfFile(file string) *nssConf {
- f, err := os.Open(file)
- if err != nil {
- return &nssConf{err: err}
- }
- defer f.Close()
- return parseNSSConf(f)
-}
-
-func parseNSSConf(r io.Reader) *nssConf {
- slurp, err := readFull(r)
- if err != nil {
- return &nssConf{err: err}
- }
- conf := new(nssConf)
- conf.err = foreachLine(slurp, func(line []byte) error {
- line = trimSpace(removeComment(line))
- if len(line) == 0 {
- return nil
- }
- colon := bytealg.IndexByte(line, ':')
- if colon == -1 {
- return errors.New("no colon on line")
- }
- db := string(trimSpace(line[:colon]))
- srcs := line[colon+1:]
- for {
- srcs = trimSpace(srcs)
- if len(srcs) == 0 {
- break
- }
- sp := bytealg.IndexByte(srcs, ' ')
- var src string
- if sp == -1 {
- src = string(srcs)
- srcs = nil // done
- } else {
- src = string(srcs[:sp])
- srcs = trimSpace(srcs[sp+1:])
- }
- var criteria []nssCriterion
- // See if there's a criteria block in brackets.
- if len(srcs) > 0 && srcs[0] == '[' {
- bclose := bytealg.IndexByte(srcs, ']')
- if bclose == -1 {
- return errors.New("unclosed criterion bracket")
- }
- var err error
- criteria, err = parseCriteria(srcs[1:bclose])
- if err != nil {
- return errors.New("invalid criteria: " + string(srcs[1:bclose]))
- }
- srcs = srcs[bclose+1:]
- }
- if conf.sources == nil {
- conf.sources = make(map[string][]nssSource)
- }
- conf.sources[db] = append(conf.sources[db], nssSource{
- source: src,
- criteria: criteria,
- })
- }
- return nil
- })
- return conf
-}
-
-// parses "foo=bar !foo=bar"
-func parseCriteria(x []byte) (c []nssCriterion, err error) {
- err = foreachField(x, func(f []byte) error {
- not := false
- if len(f) > 0 && f[0] == '!' {
- not = true
- f = f[1:]
- }
- if len(f) < 3 {
- return errors.New("criterion too short")
- }
- eq := bytealg.IndexByte(f, '=')
- if eq == -1 {
- return errors.New("criterion lacks equal sign")
- }
- lowerASCIIBytes(f)
- c = append(c, nssCriterion{
- negate: not,
- status: string(f[:eq]),
- action: string(f[eq+1:]),
- })
- return nil
- })
- return
-}
diff --git a/contrib/go/_std_1.18/src/net/port_unix.go b/contrib/go/_std_1.18/src/net/port_unix.go
deleted file mode 100644
index 102722b2ca..0000000000
--- a/contrib/go/_std_1.18/src/net/port_unix.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-// Read system port mappings from /etc/services
-
-package net
-
-import (
- "internal/bytealg"
- "sync"
-)
-
-var onceReadServices sync.Once
-
-func readServices() {
- file, err := open("/etc/services")
- if err != nil {
- return
- }
- defer file.close()
-
- for line, ok := file.readLine(); ok; line, ok = file.readLine() {
- // "http 80/tcp www www-http # World Wide Web HTTP"
- if i := bytealg.IndexByteString(line, '#'); i >= 0 {
- line = line[:i]
- }
- f := getFields(line)
- if len(f) < 2 {
- continue
- }
- portnet := f[1] // "80/tcp"
- port, j, ok := dtoi(portnet)
- if !ok || port <= 0 || j >= len(portnet) || portnet[j] != '/' {
- continue
- }
- netw := portnet[j+1:] // "tcp"
- m, ok1 := services[netw]
- if !ok1 {
- m = make(map[string]int)
- services[netw] = m
- }
- for i := 0; i < len(f); i++ {
- if i != 1 { // f[1] was port/net
- m[f[i]] = port
- }
- }
- }
-}
-
-// goLookupPort is the native Go implementation of LookupPort.
-func goLookupPort(network, service string) (port int, err error) {
- onceReadServices.Do(readServices)
- return lookupPortMap(network, service)
-}
diff --git a/contrib/go/_std_1.18/src/net/sock_cloexec.go b/contrib/go/_std_1.18/src/net/sock_cloexec.go
deleted file mode 100644
index 56dab31b14..0000000000
--- a/contrib/go/_std_1.18/src/net/sock_cloexec.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements sysSocket for platforms that provide a fast path for
-// setting SetNonblock and CloseOnExec.
-
-//go:build dragonfly || freebsd || illumos || linux || netbsd || openbsd
-
-package net
-
-import (
- "internal/poll"
- "os"
- "syscall"
-)
-
-// Wrapper around the socket system call that marks the returned file
-// descriptor as nonblocking and close-on-exec.
-func sysSocket(family, sotype, proto int) (int, error) {
- s, err := socketFunc(family, sotype|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, proto)
- // On Linux the SOCK_NONBLOCK and SOCK_CLOEXEC flags were
- // introduced in 2.6.27 kernel and on FreeBSD both flags were
- // introduced in 10 kernel. If we get an EINVAL error on Linux
- // or EPROTONOSUPPORT error on FreeBSD, fall back to using
- // socket without them.
- switch err {
- case nil:
- return s, nil
- default:
- return -1, os.NewSyscallError("socket", err)
- case syscall.EPROTONOSUPPORT, syscall.EINVAL:
- }
-
- // See ../syscall/exec_unix.go for description of ForkLock.
- syscall.ForkLock.RLock()
- s, err = socketFunc(family, sotype, proto)
- if err == nil {
- syscall.CloseOnExec(s)
- }
- syscall.ForkLock.RUnlock()
- if err != nil {
- return -1, os.NewSyscallError("socket", err)
- }
- if err = syscall.SetNonblock(s, true); err != nil {
- poll.CloseFunc(s)
- return -1, os.NewSyscallError("setnonblock", err)
- }
- return s, nil
-}
diff --git a/contrib/go/_std_1.18/src/net/sock_linux.go b/contrib/go/_std_1.18/src/net/sock_linux.go
deleted file mode 100644
index 9f62ed3dee..0000000000
--- a/contrib/go/_std_1.18/src/net/sock_linux.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import "syscall"
-
-func kernelVersion() (major int, minor int) {
- var uname syscall.Utsname
- if err := syscall.Uname(&uname); err != nil {
- return
- }
-
- rl := uname.Release
- var values [2]int
- vi := 0
- value := 0
- for _, c := range rl {
- if c >= '0' && c <= '9' {
- value = (value * 10) + int(c-'0')
- } else {
- // Note that we're assuming N.N.N here. If we see anything else we are likely to
- // mis-parse it.
- values[vi] = value
- vi++
- if vi >= len(values) {
- break
- }
- value = 0
- }
- }
- switch vi {
- case 0:
- return 0, 0
- case 1:
- return values[0], 0
- case 2:
- return values[0], values[1]
- }
- return
-}
-
-// Linux stores the backlog as:
-//
-// - uint16 in kernel version < 4.1,
-// - uint32 in kernel version >= 4.1
-//
-// Truncate number to avoid wrapping.
-//
-// See issue 5030 and 41470.
-func maxAckBacklog(n int) int {
- major, minor := kernelVersion()
- size := 16
- if major > 4 || (major == 4 && minor >= 1) {
- size = 32
- }
-
- var max uint = 1<<size - 1
- if uint(n) > max {
- n = int(max)
- }
- return n
-}
-
-func maxListenerBacklog() int {
- fd, err := open("/proc/sys/net/core/somaxconn")
- if err != nil {
- return syscall.SOMAXCONN
- }
- defer fd.close()
- l, ok := fd.readLine()
- if !ok {
- return syscall.SOMAXCONN
- }
- f := getFields(l)
- n, _, ok := dtoi(f[0])
- if n == 0 || !ok {
- return syscall.SOMAXCONN
- }
-
- if n > 1<<16-1 {
- return maxAckBacklog(n)
- }
- return n
-}
diff --git a/contrib/go/_std_1.18/src/net/sock_posix.go b/contrib/go/_std_1.18/src/net/sock_posix.go
deleted file mode 100644
index 98a48229c7..0000000000
--- a/contrib/go/_std_1.18/src/net/sock_posix.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "context"
- "internal/poll"
- "os"
- "syscall"
-)
-
-// socket returns a network file descriptor that is ready for
-// asynchronous I/O using the network poller.
-func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, ctrlFn func(string, string, syscall.RawConn) error) (fd *netFD, err error) {
- s, err := sysSocket(family, sotype, proto)
- if err != nil {
- return nil, err
- }
- if err = setDefaultSockopts(s, family, sotype, ipv6only); err != nil {
- poll.CloseFunc(s)
- return nil, err
- }
- if fd, err = newFD(s, family, sotype, net); err != nil {
- poll.CloseFunc(s)
- return nil, err
- }
-
- // This function makes a network file descriptor for the
- // following applications:
- //
- // - An endpoint holder that opens a passive stream
- // connection, known as a stream listener
- //
- // - An endpoint holder that opens a destination-unspecific
- // datagram connection, known as a datagram listener
- //
- // - An endpoint holder that opens an active stream or a
- // destination-specific datagram connection, known as a
- // dialer
- //
- // - An endpoint holder that opens the other connection, such
- // as talking to the protocol stack inside the kernel
- //
- // For stream and datagram listeners, they will only require
- // named sockets, so we can assume that it's just a request
- // from stream or datagram listeners when laddr is not nil but
- // raddr is nil. Otherwise we assume it's just for dialers or
- // the other connection holders.
-
- if laddr != nil && raddr == nil {
- switch sotype {
- case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET:
- if err := fd.listenStream(laddr, listenerBacklog(), ctrlFn); err != nil {
- fd.Close()
- return nil, err
- }
- return fd, nil
- case syscall.SOCK_DGRAM:
- if err := fd.listenDatagram(laddr, ctrlFn); err != nil {
- fd.Close()
- return nil, err
- }
- return fd, nil
- }
- }
- if err := fd.dial(ctx, laddr, raddr, ctrlFn); err != nil {
- fd.Close()
- return nil, err
- }
- return fd, nil
-}
-
-func (fd *netFD) ctrlNetwork() string {
- switch fd.net {
- case "unix", "unixgram", "unixpacket":
- return fd.net
- }
- switch fd.net[len(fd.net)-1] {
- case '4', '6':
- return fd.net
- }
- if fd.family == syscall.AF_INET {
- return fd.net + "4"
- }
- return fd.net + "6"
-}
-
-func (fd *netFD) addrFunc() func(syscall.Sockaddr) Addr {
- switch fd.family {
- case syscall.AF_INET, syscall.AF_INET6:
- switch fd.sotype {
- case syscall.SOCK_STREAM:
- return sockaddrToTCP
- case syscall.SOCK_DGRAM:
- return sockaddrToUDP
- case syscall.SOCK_RAW:
- return sockaddrToIP
- }
- case syscall.AF_UNIX:
- switch fd.sotype {
- case syscall.SOCK_STREAM:
- return sockaddrToUnix
- case syscall.SOCK_DGRAM:
- return sockaddrToUnixgram
- case syscall.SOCK_SEQPACKET:
- return sockaddrToUnixpacket
- }
- }
- return func(syscall.Sockaddr) Addr { return nil }
-}
-
-func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr, ctrlFn func(string, string, syscall.RawConn) error) error {
- if ctrlFn != nil {
- c, err := newRawConn(fd)
- if err != nil {
- return err
- }
- var ctrlAddr string
- if raddr != nil {
- ctrlAddr = raddr.String()
- } else if laddr != nil {
- ctrlAddr = laddr.String()
- }
- if err := ctrlFn(fd.ctrlNetwork(), ctrlAddr, c); err != nil {
- return err
- }
- }
- var err error
- var lsa syscall.Sockaddr
- if laddr != nil {
- if lsa, err = laddr.sockaddr(fd.family); err != nil {
- return err
- } else if lsa != nil {
- if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
- return os.NewSyscallError("bind", err)
- }
- }
- }
- var rsa syscall.Sockaddr // remote address from the user
- var crsa syscall.Sockaddr // remote address we actually connected to
- if raddr != nil {
- if rsa, err = raddr.sockaddr(fd.family); err != nil {
- return err
- }
- if crsa, err = fd.connect(ctx, lsa, rsa); err != nil {
- return err
- }
- fd.isConnected = true
- } else {
- if err := fd.init(); err != nil {
- return err
- }
- }
- // Record the local and remote addresses from the actual socket.
- // Get the local address by calling Getsockname.
- // For the remote address, use
- // 1) the one returned by the connect method, if any; or
- // 2) the one from Getpeername, if it succeeds; or
- // 3) the one passed to us as the raddr parameter.
- lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
- if crsa != nil {
- fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(crsa))
- } else if rsa, _ = syscall.Getpeername(fd.pfd.Sysfd); rsa != nil {
- fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(rsa))
- } else {
- fd.setAddr(fd.addrFunc()(lsa), raddr)
- }
- return nil
-}
-
-func (fd *netFD) listenStream(laddr sockaddr, backlog int, ctrlFn func(string, string, syscall.RawConn) error) error {
- var err error
- if err = setDefaultListenerSockopts(fd.pfd.Sysfd); err != nil {
- return err
- }
- var lsa syscall.Sockaddr
- if lsa, err = laddr.sockaddr(fd.family); err != nil {
- return err
- }
- if ctrlFn != nil {
- c, err := newRawConn(fd)
- if err != nil {
- return err
- }
- if err := ctrlFn(fd.ctrlNetwork(), laddr.String(), c); err != nil {
- return err
- }
- }
- if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
- return os.NewSyscallError("bind", err)
- }
- if err = listenFunc(fd.pfd.Sysfd, backlog); err != nil {
- return os.NewSyscallError("listen", err)
- }
- if err = fd.init(); err != nil {
- return err
- }
- lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
- fd.setAddr(fd.addrFunc()(lsa), nil)
- return nil
-}
-
-func (fd *netFD) listenDatagram(laddr sockaddr, ctrlFn func(string, string, syscall.RawConn) error) error {
- switch addr := laddr.(type) {
- case *UDPAddr:
- // We provide a socket that listens to a wildcard
- // address with reusable UDP port when the given laddr
- // is an appropriate UDP multicast address prefix.
- // This makes it possible for a single UDP listener to
- // join multiple different group addresses, for
- // multiple UDP listeners that listen on the same UDP
- // port to join the same group address.
- if addr.IP != nil && addr.IP.IsMulticast() {
- if err := setDefaultMulticastSockopts(fd.pfd.Sysfd); err != nil {
- return err
- }
- addr := *addr
- switch fd.family {
- case syscall.AF_INET:
- addr.IP = IPv4zero
- case syscall.AF_INET6:
- addr.IP = IPv6unspecified
- }
- laddr = &addr
- }
- }
- var err error
- var lsa syscall.Sockaddr
- if lsa, err = laddr.sockaddr(fd.family); err != nil {
- return err
- }
- if ctrlFn != nil {
- c, err := newRawConn(fd)
- if err != nil {
- return err
- }
- if err := ctrlFn(fd.ctrlNetwork(), laddr.String(), c); err != nil {
- return err
- }
- }
- if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
- return os.NewSyscallError("bind", err)
- }
- if err = fd.init(); err != nil {
- return err
- }
- lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
- fd.setAddr(fd.addrFunc()(lsa), nil)
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/net/sockaddr_posix.go b/contrib/go/_std_1.18/src/net/sockaddr_posix.go
deleted file mode 100644
index c8e91936ad..0000000000
--- a/contrib/go/_std_1.18/src/net/sockaddr_posix.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "syscall"
-)
-
-// A sockaddr represents a TCP, UDP, IP or Unix network endpoint
-// address that can be converted into a syscall.Sockaddr.
-type sockaddr interface {
- Addr
-
- // family returns the platform-dependent address family
- // identifier.
- family() int
-
- // isWildcard reports whether the address is a wildcard
- // address.
- isWildcard() bool
-
- // sockaddr returns the address converted into a syscall
- // sockaddr type that implements syscall.Sockaddr
- // interface. It returns a nil interface when the address is
- // nil.
- sockaddr(family int) (syscall.Sockaddr, error)
-
- // toLocal maps the zero address to a local system address (127.0.0.1 or ::1)
- toLocal(net string) sockaddr
-}
diff --git a/contrib/go/_std_1.18/src/net/sockopt_posix.go b/contrib/go/_std_1.18/src/net/sockopt_posix.go
deleted file mode 100644
index 645080f988..0000000000
--- a/contrib/go/_std_1.18/src/net/sockopt_posix.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "internal/bytealg"
- "runtime"
- "syscall"
-)
-
-// Boolean to int.
-func boolint(b bool) int {
- if b {
- return 1
- }
- return 0
-}
-
-func ipv4AddrToInterface(ip IP) (*Interface, error) {
- ift, err := Interfaces()
- if err != nil {
- return nil, err
- }
- for _, ifi := range ift {
- ifat, err := ifi.Addrs()
- if err != nil {
- return nil, err
- }
- for _, ifa := range ifat {
- switch v := ifa.(type) {
- case *IPAddr:
- if ip.Equal(v.IP) {
- return &ifi, nil
- }
- case *IPNet:
- if ip.Equal(v.IP) {
- return &ifi, nil
- }
- }
- }
- }
- if ip.Equal(IPv4zero) {
- return nil, nil
- }
- return nil, errNoSuchInterface
-}
-
-func interfaceToIPv4Addr(ifi *Interface) (IP, error) {
- if ifi == nil {
- return IPv4zero, nil
- }
- ifat, err := ifi.Addrs()
- if err != nil {
- return nil, err
- }
- for _, ifa := range ifat {
- switch v := ifa.(type) {
- case *IPAddr:
- if v.IP.To4() != nil {
- return v.IP, nil
- }
- case *IPNet:
- if v.IP.To4() != nil {
- return v.IP, nil
- }
- }
- }
- return nil, errNoSuchInterface
-}
-
-func setIPv4MreqToInterface(mreq *syscall.IPMreq, ifi *Interface) error {
- if ifi == nil {
- return nil
- }
- ifat, err := ifi.Addrs()
- if err != nil {
- return err
- }
- for _, ifa := range ifat {
- switch v := ifa.(type) {
- case *IPAddr:
- if a := v.IP.To4(); a != nil {
- copy(mreq.Interface[:], a)
- goto done
- }
- case *IPNet:
- if a := v.IP.To4(); a != nil {
- copy(mreq.Interface[:], a)
- goto done
- }
- }
- }
-done:
- if bytealg.Equal(mreq.Multiaddr[:], IPv4zero.To4()) {
- return errNoSuchMulticastInterface
- }
- return nil
-}
-
-func setReadBuffer(fd *netFD, bytes int) error {
- err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes)
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
-
-func setWriteBuffer(fd *netFD, bytes int) error {
- err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes)
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
-
-func setKeepAlive(fd *netFD, keepalive bool) error {
- err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, boolint(keepalive))
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
-
-func setLinger(fd *netFD, sec int) error {
- var l syscall.Linger
- if sec >= 0 {
- l.Onoff = 1
- l.Linger = int32(sec)
- } else {
- l.Onoff = 0
- l.Linger = 0
- }
- err := fd.pfd.SetsockoptLinger(syscall.SOL_SOCKET, syscall.SO_LINGER, &l)
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
diff --git a/contrib/go/_std_1.18/src/net/sockoptip_posix.go b/contrib/go/_std_1.18/src/net/sockoptip_posix.go
deleted file mode 100644
index 22031df22c..0000000000
--- a/contrib/go/_std_1.18/src/net/sockoptip_posix.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "runtime"
- "syscall"
-)
-
-func joinIPv4Group(fd *netFD, ifi *Interface, ip IP) error {
- mreq := &syscall.IPMreq{Multiaddr: [4]byte{ip[0], ip[1], ip[2], ip[3]}}
- if err := setIPv4MreqToInterface(mreq, ifi); err != nil {
- return err
- }
- err := fd.pfd.SetsockoptIPMreq(syscall.IPPROTO_IP, syscall.IP_ADD_MEMBERSHIP, mreq)
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
-
-func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error {
- var v int
- if ifi != nil {
- v = ifi.Index
- }
- err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_IF, v)
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
-
-func setIPv6MulticastLoopback(fd *netFD, v bool) error {
- err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_LOOP, boolint(v))
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
-
-func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error {
- mreq := &syscall.IPv6Mreq{}
- copy(mreq.Multiaddr[:], ip)
- if ifi != nil {
- mreq.Interface = uint32(ifi.Index)
- }
- err := fd.pfd.SetsockoptIPv6Mreq(syscall.IPPROTO_IPV6, syscall.IPV6_JOIN_GROUP, mreq)
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
diff --git a/contrib/go/_std_1.18/src/net/sys_cloexec.go b/contrib/go/_std_1.18/src/net/sys_cloexec.go
deleted file mode 100644
index 26eac5585a..0000000000
--- a/contrib/go/_std_1.18/src/net/sys_cloexec.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements sysSocket for platforms that do not provide a fast path
-// for setting SetNonblock and CloseOnExec.
-
-//go:build aix || darwin || (solaris && !illumos)
-
-package net
-
-import (
- "internal/poll"
- "os"
- "syscall"
-)
-
-// Wrapper around the socket system call that marks the returned file
-// descriptor as nonblocking and close-on-exec.
-func sysSocket(family, sotype, proto int) (int, error) {
- // See ../syscall/exec_unix.go for description of ForkLock.
- syscall.ForkLock.RLock()
- s, err := socketFunc(family, sotype, proto)
- if err == nil {
- syscall.CloseOnExec(s)
- }
- syscall.ForkLock.RUnlock()
- if err != nil {
- return -1, os.NewSyscallError("socket", err)
- }
- if err = syscall.SetNonblock(s, true); err != nil {
- poll.CloseFunc(s)
- return -1, os.NewSyscallError("setnonblock", err)
- }
- return s, nil
-}
diff --git a/contrib/go/_std_1.18/src/net/tcpsock_posix.go b/contrib/go/_std_1.18/src/net/tcpsock_posix.go
deleted file mode 100644
index ed6b18b551..0000000000
--- a/contrib/go/_std_1.18/src/net/tcpsock_posix.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "context"
- "io"
- "os"
- "syscall"
-)
-
-func sockaddrToTCP(sa syscall.Sockaddr) Addr {
- switch sa := sa.(type) {
- case *syscall.SockaddrInet4:
- return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port}
- case *syscall.SockaddrInet6:
- return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
- }
- return nil
-}
-
-func (a *TCPAddr) family() int {
- if a == nil || len(a.IP) <= IPv4len {
- return syscall.AF_INET
- }
- if a.IP.To4() != nil {
- return syscall.AF_INET
- }
- return syscall.AF_INET6
-}
-
-func (a *TCPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
- if a == nil {
- return nil, nil
- }
- return ipToSockaddr(family, a.IP, a.Port, a.Zone)
-}
-
-func (a *TCPAddr) toLocal(net string) sockaddr {
- return &TCPAddr{loopbackIP(net), a.Port, a.Zone}
-}
-
-func (c *TCPConn) readFrom(r io.Reader) (int64, error) {
- if n, err, handled := splice(c.fd, r); handled {
- return n, err
- }
- if n, err, handled := sendFile(c.fd, r); handled {
- return n, err
- }
- return genericReadFrom(c, r)
-}
-
-func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
- if testHookDialTCP != nil {
- return testHookDialTCP(ctx, sd.network, laddr, raddr)
- }
- return sd.doDialTCP(ctx, laddr, raddr)
-}
-
-func (sd *sysDialer) doDialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
- fd, err := internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_STREAM, 0, "dial", sd.Dialer.Control)
-
- // TCP has a rarely used mechanism called a 'simultaneous connection' in
- // which Dial("tcp", addr1, addr2) run on the machine at addr1 can
- // connect to a simultaneous Dial("tcp", addr2, addr1) run on the machine
- // at addr2, without either machine executing Listen. If laddr == nil,
- // it means we want the kernel to pick an appropriate originating local
- // address. Some Linux kernels cycle blindly through a fixed range of
- // local ports, regardless of destination port. If a kernel happens to
- // pick local port 50001 as the source for a Dial("tcp", "", "localhost:50001"),
- // then the Dial will succeed, having simultaneously connected to itself.
- // This can only happen when we are letting the kernel pick a port (laddr == nil)
- // and when there is no listener for the destination address.
- // It's hard to argue this is anything other than a kernel bug. If we
- // see this happen, rather than expose the buggy effect to users, we
- // close the fd and try again. If it happens twice more, we relent and
- // use the result. See also:
- // https://golang.org/issue/2690
- // https://stackoverflow.com/questions/4949858/
- //
- // The opposite can also happen: if we ask the kernel to pick an appropriate
- // originating local address, sometimes it picks one that is already in use.
- // So if the error is EADDRNOTAVAIL, we have to try again too, just for
- // a different reason.
- //
- // The kernel socket code is no doubt enjoying watching us squirm.
- for i := 0; i < 2 && (laddr == nil || laddr.Port == 0) && (selfConnect(fd, err) || spuriousENOTAVAIL(err)); i++ {
- if err == nil {
- fd.Close()
- }
- fd, err = internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_STREAM, 0, "dial", sd.Dialer.Control)
- }
-
- if err != nil {
- return nil, err
- }
- return newTCPConn(fd), nil
-}
-
-func selfConnect(fd *netFD, err error) bool {
- // If the connect failed, we clearly didn't connect to ourselves.
- if err != nil {
- return false
- }
-
- // The socket constructor can return an fd with raddr nil under certain
- // unknown conditions. The errors in the calls there to Getpeername
- // are discarded, but we can't catch the problem there because those
- // calls are sometimes legally erroneous with a "socket not connected".
- // Since this code (selfConnect) is already trying to work around
- // a problem, we make sure if this happens we recognize trouble and
- // ask the DialTCP routine to try again.
- // TODO: try to understand what's really going on.
- if fd.laddr == nil || fd.raddr == nil {
- return true
- }
- l := fd.laddr.(*TCPAddr)
- r := fd.raddr.(*TCPAddr)
- return l.Port == r.Port && l.IP.Equal(r.IP)
-}
-
-func spuriousENOTAVAIL(err error) bool {
- if op, ok := err.(*OpError); ok {
- err = op.Err
- }
- if sys, ok := err.(*os.SyscallError); ok {
- err = sys.Err
- }
- return err == syscall.EADDRNOTAVAIL
-}
-
-func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil }
-
-func (ln *TCPListener) accept() (*TCPConn, error) {
- fd, err := ln.fd.accept()
- if err != nil {
- return nil, err
- }
- tc := newTCPConn(fd)
- if ln.lc.KeepAlive >= 0 {
- setKeepAlive(fd, true)
- ka := ln.lc.KeepAlive
- if ln.lc.KeepAlive == 0 {
- ka = defaultTCPKeepAlive
- }
- setKeepAlivePeriod(fd, ka)
- }
- return tc, nil
-}
-
-func (ln *TCPListener) close() error {
- return ln.fd.Close()
-}
-
-func (ln *TCPListener) file() (*os.File, error) {
- f, err := ln.fd.dup()
- if err != nil {
- return nil, err
- }
- return f, nil
-}
-
-func (sl *sysListener) listenTCP(ctx context.Context, laddr *TCPAddr) (*TCPListener, error) {
- fd, err := internetSocket(ctx, sl.network, laddr, nil, syscall.SOCK_STREAM, 0, "listen", sl.ListenConfig.Control)
- if err != nil {
- return nil, err
- }
- return &TCPListener{fd: fd, lc: sl.ListenConfig}, nil
-}
diff --git a/contrib/go/_std_1.18/src/net/tcpsockopt_posix.go b/contrib/go/_std_1.18/src/net/tcpsockopt_posix.go
deleted file mode 100644
index 73754b1a0f..0000000000
--- a/contrib/go/_std_1.18/src/net/tcpsockopt_posix.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "runtime"
- "syscall"
-)
-
-func setNoDelay(fd *netFD, noDelay bool) error {
- err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_NODELAY, boolint(noDelay))
- runtime.KeepAlive(fd)
- return wrapSyscallError("setsockopt", err)
-}
diff --git a/contrib/go/_std_1.18/src/net/textproto/reader.go b/contrib/go/_std_1.18/src/net/textproto/reader.go
deleted file mode 100644
index 157c59b17a..0000000000
--- a/contrib/go/_std_1.18/src/net/textproto/reader.go
+++ /dev/null
@@ -1,790 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package textproto
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
- "sync"
-)
-
-// A Reader implements convenience methods for reading requests
-// or responses from a text protocol network connection.
-type Reader struct {
- R *bufio.Reader
- dot *dotReader
- buf []byte // a re-usable buffer for readContinuedLineSlice
-}
-
-// NewReader returns a new Reader reading from r.
-//
-// To avoid denial of service attacks, the provided bufio.Reader
-// should be reading from an io.LimitReader or similar Reader to bound
-// the size of responses.
-func NewReader(r *bufio.Reader) *Reader {
- commonHeaderOnce.Do(initCommonHeader)
- return &Reader{R: r}
-}
-
-// ReadLine reads a single line from r,
-// eliding the final \n or \r\n from the returned string.
-func (r *Reader) ReadLine() (string, error) {
- line, err := r.readLineSlice()
- return string(line), err
-}
-
-// ReadLineBytes is like ReadLine but returns a []byte instead of a string.
-func (r *Reader) ReadLineBytes() ([]byte, error) {
- line, err := r.readLineSlice()
- if line != nil {
- buf := make([]byte, len(line))
- copy(buf, line)
- line = buf
- }
- return line, err
-}
-
-func (r *Reader) readLineSlice() ([]byte, error) {
- r.closeDot()
- var line []byte
- for {
- l, more, err := r.R.ReadLine()
- if err != nil {
- return nil, err
- }
- // Avoid the copy if the first call produced a full line.
- if line == nil && !more {
- return l, nil
- }
- line = append(line, l...)
- if !more {
- break
- }
- }
- return line, nil
-}
-
-// ReadContinuedLine reads a possibly continued line from r,
-// eliding the final trailing ASCII white space.
-// Lines after the first are considered continuations if they
-// begin with a space or tab character. In the returned data,
-// continuation lines are separated from the previous line
-// only by a single space: the newline and leading white space
-// are removed.
-//
-// For example, consider this input:
-//
-// Line 1
-// continued...
-// Line 2
-//
-// The first call to ReadContinuedLine will return "Line 1 continued..."
-// and the second will return "Line 2".
-//
-// Empty lines are never continued.
-//
-func (r *Reader) ReadContinuedLine() (string, error) {
- line, err := r.readContinuedLineSlice(noValidation)
- return string(line), err
-}
-
-// trim returns s with leading and trailing spaces and tabs removed.
-// It does not assume Unicode or UTF-8.
-func trim(s []byte) []byte {
- i := 0
- for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
- i++
- }
- n := len(s)
- for n > i && (s[n-1] == ' ' || s[n-1] == '\t') {
- n--
- }
- return s[i:n]
-}
-
-// ReadContinuedLineBytes is like ReadContinuedLine but
-// returns a []byte instead of a string.
-func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
- line, err := r.readContinuedLineSlice(noValidation)
- if line != nil {
- buf := make([]byte, len(line))
- copy(buf, line)
- line = buf
- }
- return line, err
-}
-
-// readContinuedLineSlice reads continued lines from the reader buffer,
-// returning a byte slice with all lines. The validateFirstLine function
-// is run on the first read line, and if it returns an error then this
-// error is returned from readContinuedLineSlice.
-func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) {
- if validateFirstLine == nil {
- return nil, fmt.Errorf("missing validateFirstLine func")
- }
-
- // Read the first line.
- line, err := r.readLineSlice()
- if err != nil {
- return nil, err
- }
- if len(line) == 0 { // blank line - no continuation
- return line, nil
- }
-
- if err := validateFirstLine(line); err != nil {
- return nil, err
- }
-
- // Optimistically assume that we have started to buffer the next line
- // and it starts with an ASCII letter (the next header key), or a blank
- // line, so we can avoid copying that buffered data around in memory
- // and skipping over non-existent whitespace.
- if r.R.Buffered() > 1 {
- peek, _ := r.R.Peek(2)
- if len(peek) > 0 && (isASCIILetter(peek[0]) || peek[0] == '\n') ||
- len(peek) == 2 && peek[0] == '\r' && peek[1] == '\n' {
- return trim(line), nil
- }
- }
-
- // ReadByte or the next readLineSlice will flush the read buffer;
- // copy the slice into buf.
- r.buf = append(r.buf[:0], trim(line)...)
-
- // Read continuation lines.
- for r.skipSpace() > 0 {
- line, err := r.readLineSlice()
- if err != nil {
- break
- }
- r.buf = append(r.buf, ' ')
- r.buf = append(r.buf, trim(line)...)
- }
- return r.buf, nil
-}
-
-// skipSpace skips R over all spaces and returns the number of bytes skipped.
-func (r *Reader) skipSpace() int {
- n := 0
- for {
- c, err := r.R.ReadByte()
- if err != nil {
- // Bufio will keep err until next read.
- break
- }
- if c != ' ' && c != '\t' {
- r.R.UnreadByte()
- break
- }
- n++
- }
- return n
-}
-
-func (r *Reader) readCodeLine(expectCode int) (code int, continued bool, message string, err error) {
- line, err := r.ReadLine()
- if err != nil {
- return
- }
- return parseCodeLine(line, expectCode)
-}
-
-func parseCodeLine(line string, expectCode int) (code int, continued bool, message string, err error) {
- if len(line) < 4 || line[3] != ' ' && line[3] != '-' {
- err = ProtocolError("short response: " + line)
- return
- }
- continued = line[3] == '-'
- code, err = strconv.Atoi(line[0:3])
- if err != nil || code < 100 {
- err = ProtocolError("invalid response code: " + line)
- return
- }
- message = line[4:]
- if 1 <= expectCode && expectCode < 10 && code/100 != expectCode ||
- 10 <= expectCode && expectCode < 100 && code/10 != expectCode ||
- 100 <= expectCode && expectCode < 1000 && code != expectCode {
- err = &Error{code, message}
- }
- return
-}
-
-// ReadCodeLine reads a response code line of the form
-// code message
-// where code is a three-digit status code and the message
-// extends to the rest of the line. An example of such a line is:
-// 220 plan9.bell-labs.com ESMTP
-//
-// If the prefix of the status does not match the digits in expectCode,
-// ReadCodeLine returns with err set to &Error{code, message}.
-// For example, if expectCode is 31, an error will be returned if
-// the status is not in the range [310,319].
-//
-// If the response is multi-line, ReadCodeLine returns an error.
-//
-// An expectCode <= 0 disables the check of the status code.
-//
-func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err error) {
- code, continued, message, err := r.readCodeLine(expectCode)
- if err == nil && continued {
- err = ProtocolError("unexpected multi-line response: " + message)
- }
- return
-}
-
-// ReadResponse reads a multi-line response of the form:
-//
-// code-message line 1
-// code-message line 2
-// ...
-// code message line n
-//
-// where code is a three-digit status code. The first line starts with the
-// code and a hyphen. The response is terminated by a line that starts
-// with the same code followed by a space. Each line in message is
-// separated by a newline (\n).
-//
-// See page 36 of RFC 959 (https://www.ietf.org/rfc/rfc959.txt) for
-// details of another form of response accepted:
-//
-// code-message line 1
-// message line 2
-// ...
-// code message line n
-//
-// If the prefix of the status does not match the digits in expectCode,
-// ReadResponse returns with err set to &Error{code, message}.
-// For example, if expectCode is 31, an error will be returned if
-// the status is not in the range [310,319].
-//
-// An expectCode <= 0 disables the check of the status code.
-//
-func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) {
- code, continued, message, err := r.readCodeLine(expectCode)
- multi := continued
- for continued {
- line, err := r.ReadLine()
- if err != nil {
- return 0, "", err
- }
-
- var code2 int
- var moreMessage string
- code2, continued, moreMessage, err = parseCodeLine(line, 0)
- if err != nil || code2 != code {
- message += "\n" + strings.TrimRight(line, "\r\n")
- continued = true
- continue
- }
- message += "\n" + moreMessage
- }
- if err != nil && multi && message != "" {
- // replace one line error message with all lines (full message)
- err = &Error{code, message}
- }
- return
-}
-
-// DotReader returns a new Reader that satisfies Reads using the
-// decoded text of a dot-encoded block read from r.
-// The returned Reader is only valid until the next call
-// to a method on r.
-//
-// Dot encoding is a common framing used for data blocks
-// in text protocols such as SMTP. The data consists of a sequence
-// of lines, each of which ends in "\r\n". The sequence itself
-// ends at a line containing just a dot: ".\r\n". Lines beginning
-// with a dot are escaped with an additional dot to avoid
-// looking like the end of the sequence.
-//
-// The decoded form returned by the Reader's Read method
-// rewrites the "\r\n" line endings into the simpler "\n",
-// removes leading dot escapes if present, and stops with error io.EOF
-// after consuming (and discarding) the end-of-sequence line.
-func (r *Reader) DotReader() io.Reader {
- r.closeDot()
- r.dot = &dotReader{r: r}
- return r.dot
-}
-
-type dotReader struct {
- r *Reader
- state int
-}
-
-// Read satisfies reads by decoding dot-encoded data read from d.r.
-func (d *dotReader) Read(b []byte) (n int, err error) {
- // Run data through a simple state machine to
- // elide leading dots, rewrite trailing \r\n into \n,
- // and detect ending .\r\n line.
- const (
- stateBeginLine = iota // beginning of line; initial state; must be zero
- stateDot // read . at beginning of line
- stateDotCR // read .\r at beginning of line
- stateCR // read \r (possibly at end of line)
- stateData // reading data in middle of line
- stateEOF // reached .\r\n end marker line
- )
- br := d.r.R
- for n < len(b) && d.state != stateEOF {
- var c byte
- c, err = br.ReadByte()
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- break
- }
- switch d.state {
- case stateBeginLine:
- if c == '.' {
- d.state = stateDot
- continue
- }
- if c == '\r' {
- d.state = stateCR
- continue
- }
- d.state = stateData
-
- case stateDot:
- if c == '\r' {
- d.state = stateDotCR
- continue
- }
- if c == '\n' {
- d.state = stateEOF
- continue
- }
- d.state = stateData
-
- case stateDotCR:
- if c == '\n' {
- d.state = stateEOF
- continue
- }
- // Not part of .\r\n.
- // Consume leading dot and emit saved \r.
- br.UnreadByte()
- c = '\r'
- d.state = stateData
-
- case stateCR:
- if c == '\n' {
- d.state = stateBeginLine
- break
- }
- // Not part of \r\n. Emit saved \r
- br.UnreadByte()
- c = '\r'
- d.state = stateData
-
- case stateData:
- if c == '\r' {
- d.state = stateCR
- continue
- }
- if c == '\n' {
- d.state = stateBeginLine
- }
- }
- b[n] = c
- n++
- }
- if err == nil && d.state == stateEOF {
- err = io.EOF
- }
- if err != nil && d.r.dot == d {
- d.r.dot = nil
- }
- return
-}
-
-// closeDot drains the current DotReader if any,
-// making sure that it reads until the ending dot line.
-func (r *Reader) closeDot() {
- if r.dot == nil {
- return
- }
- buf := make([]byte, 128)
- for r.dot != nil {
- // When Read reaches EOF or an error,
- // it will set r.dot == nil.
- r.dot.Read(buf)
- }
-}
-
-// ReadDotBytes reads a dot-encoding and returns the decoded data.
-//
-// See the documentation for the DotReader method for details about dot-encoding.
-func (r *Reader) ReadDotBytes() ([]byte, error) {
- return io.ReadAll(r.DotReader())
-}
-
-// ReadDotLines reads a dot-encoding and returns a slice
-// containing the decoded lines, with the final \r\n or \n elided from each.
-//
-// See the documentation for the DotReader method for details about dot-encoding.
-func (r *Reader) ReadDotLines() ([]string, error) {
- // We could use ReadDotBytes and then Split it,
- // but reading a line at a time avoids needing a
- // large contiguous block of memory and is simpler.
- var v []string
- var err error
- for {
- var line string
- line, err = r.ReadLine()
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- break
- }
-
- // Dot by itself marks end; otherwise cut one dot.
- if len(line) > 0 && line[0] == '.' {
- if len(line) == 1 {
- break
- }
- line = line[1:]
- }
- v = append(v, line)
- }
- return v, err
-}
-
-var colon = []byte(":")
-
-// ReadMIMEHeader reads a MIME-style header from r.
-// The header is a sequence of possibly continued Key: Value lines
-// ending in a blank line.
-// The returned map m maps CanonicalMIMEHeaderKey(key) to a
-// sequence of values in the same order encountered in the input.
-//
-// For example, consider this input:
-//
-// My-Key: Value 1
-// Long-Key: Even
-// Longer Value
-// My-Key: Value 2
-//
-// Given that input, ReadMIMEHeader returns the map:
-//
-// map[string][]string{
-// "My-Key": {"Value 1", "Value 2"},
-// "Long-Key": {"Even Longer Value"},
-// }
-//
-func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
- // Avoid lots of small slice allocations later by allocating one
- // large one ahead of time which we'll cut up into smaller
- // slices. If this isn't big enough later, we allocate small ones.
- var strs []string
- hint := r.upcomingHeaderNewlines()
- if hint > 0 {
- strs = make([]string, hint)
- }
-
- m := make(MIMEHeader, hint)
-
- // The first line cannot start with a leading space.
- if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
- line, err := r.readLineSlice()
- if err != nil {
- return m, err
- }
- return m, ProtocolError("malformed MIME header initial line: " + string(line))
- }
-
- for {
- kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon)
- if len(kv) == 0 {
- return m, err
- }
-
- // Key ends at first colon.
- k, v, ok := bytes.Cut(kv, colon)
- if !ok {
- return m, ProtocolError("malformed MIME header line: " + string(kv))
- }
- key := canonicalMIMEHeaderKey(k)
-
- // As per RFC 7230 field-name is a token, tokens consist of one or more chars.
- // We could return a ProtocolError here, but better to be liberal in what we
- // accept, so if we get an empty key, skip it.
- if key == "" {
- continue
- }
-
- // Skip initial spaces in value.
- value := strings.TrimLeft(string(v), " \t")
-
- vv := m[key]
- if vv == nil && len(strs) > 0 {
- // More than likely this will be a single-element key.
- // Most headers aren't multi-valued.
- // Set the capacity on strs[0] to 1, so any future append
- // won't extend the slice into the other strings.
- vv, strs = strs[:1:1], strs[1:]
- vv[0] = value
- m[key] = vv
- } else {
- m[key] = append(vv, value)
- }
-
- if err != nil {
- return m, err
- }
- }
-}
-
-// noValidation is a no-op validation func for readContinuedLineSlice
-// that permits any lines.
-func noValidation(_ []byte) error { return nil }
-
-// mustHaveFieldNameColon ensures that, per RFC 7230, the
-// field-name is on a single line, so the first line must
-// contain a colon.
-func mustHaveFieldNameColon(line []byte) error {
- if bytes.IndexByte(line, ':') < 0 {
- return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q", line))
- }
- return nil
-}
-
-var nl = []byte("\n")
-
-// upcomingHeaderNewlines returns an approximation of the number of newlines
-// that will be in this header. If it gets confused, it returns 0.
-func (r *Reader) upcomingHeaderNewlines() (n int) {
- // Try to determine the 'hint' size.
- r.R.Peek(1) // force a buffer load if empty
- s := r.R.Buffered()
- if s == 0 {
- return
- }
- peek, _ := r.R.Peek(s)
- return bytes.Count(peek, nl)
-}
-
-// CanonicalMIMEHeaderKey returns the canonical format of the
-// MIME header key s. The canonicalization converts the first
-// letter and any letter following a hyphen to upper case;
-// the rest are converted to lowercase. For example, the
-// canonical key for "accept-encoding" is "Accept-Encoding".
-// MIME header keys are assumed to be ASCII only.
-// If s contains a space or invalid header field bytes, it is
-// returned without modifications.
-func CanonicalMIMEHeaderKey(s string) string {
- commonHeaderOnce.Do(initCommonHeader)
-
- // Quick check for canonical encoding.
- upper := true
- for i := 0; i < len(s); i++ {
- c := s[i]
- if !validHeaderFieldByte(c) {
- return s
- }
- if upper && 'a' <= c && c <= 'z' {
- return canonicalMIMEHeaderKey([]byte(s))
- }
- if !upper && 'A' <= c && c <= 'Z' {
- return canonicalMIMEHeaderKey([]byte(s))
- }
- upper = c == '-'
- }
- return s
-}
-
-const toLower = 'a' - 'A'
-
-// validHeaderFieldByte reports whether b is a valid byte in a header
-// field name. RFC 7230 says:
-// header-field = field-name ":" OWS field-value OWS
-// field-name = token
-// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
-// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
-// token = 1*tchar
-func validHeaderFieldByte(b byte) bool {
- return int(b) < len(isTokenTable) && isTokenTable[b]
-}
-
-// canonicalMIMEHeaderKey is like CanonicalMIMEHeaderKey but is
-// allowed to mutate the provided byte slice before returning the
-// string.
-//
-// For invalid inputs (if a contains spaces or non-token bytes), a
-// is unchanged and a string copy is returned.
-func canonicalMIMEHeaderKey(a []byte) string {
- // See if a looks like a header key. If not, return it unchanged.
- for _, c := range a {
- if validHeaderFieldByte(c) {
- continue
- }
- // Don't canonicalize.
- return string(a)
- }
-
- upper := true
- for i, c := range a {
- // Canonicalize: first letter upper case
- // and upper case after each dash.
- // (Host, User-Agent, If-Modified-Since).
- // MIME headers are ASCII only, so no Unicode issues.
- if upper && 'a' <= c && c <= 'z' {
- c -= toLower
- } else if !upper && 'A' <= c && c <= 'Z' {
- c += toLower
- }
- a[i] = c
- upper = c == '-' // for next time
- }
- // The compiler recognizes m[string(byteSlice)] as a special
- // case, so a copy of a's bytes into a new string does not
- // happen in this map lookup:
- if v := commonHeader[string(a)]; v != "" {
- return v
- }
- return string(a)
-}
-
-// commonHeader interns common header strings.
-var commonHeader map[string]string
-
-var commonHeaderOnce sync.Once
-
-func initCommonHeader() {
- commonHeader = make(map[string]string)
- for _, v := range []string{
- "Accept",
- "Accept-Charset",
- "Accept-Encoding",
- "Accept-Language",
- "Accept-Ranges",
- "Cache-Control",
- "Cc",
- "Connection",
- "Content-Id",
- "Content-Language",
- "Content-Length",
- "Content-Transfer-Encoding",
- "Content-Type",
- "Cookie",
- "Date",
- "Dkim-Signature",
- "Etag",
- "Expires",
- "From",
- "Host",
- "If-Modified-Since",
- "If-None-Match",
- "In-Reply-To",
- "Last-Modified",
- "Location",
- "Message-Id",
- "Mime-Version",
- "Pragma",
- "Received",
- "Return-Path",
- "Server",
- "Set-Cookie",
- "Subject",
- "To",
- "User-Agent",
- "Via",
- "X-Forwarded-For",
- "X-Imforwards",
- "X-Powered-By",
- } {
- commonHeader[v] = v
- }
-}
-
-// isTokenTable is a copy of net/http/lex.go's isTokenTable.
-// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
-var isTokenTable = [127]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
diff --git a/contrib/go/_std_1.18/src/net/textproto/textproto.go b/contrib/go/_std_1.18/src/net/textproto/textproto.go
deleted file mode 100644
index cc1a847e4e..0000000000
--- a/contrib/go/_std_1.18/src/net/textproto/textproto.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package textproto implements generic support for text-based request/response
-// protocols in the style of HTTP, NNTP, and SMTP.
-//
-// The package provides:
-//
-// Error, which represents a numeric error response from
-// a server.
-//
-// Pipeline, to manage pipelined requests and responses
-// in a client.
-//
-// Reader, to read numeric response code lines,
-// key: value headers, lines wrapped with leading spaces
-// on continuation lines, and whole text blocks ending
-// with a dot on a line by itself.
-//
-// Writer, to write dot-encoded text blocks.
-//
-// Conn, a convenient packaging of Reader, Writer, and Pipeline for use
-// with a single network connection.
-//
-package textproto
-
-import (
- "bufio"
- "fmt"
- "io"
- "net"
-)
-
-// An Error represents a numeric error response from a server.
-type Error struct {
- Code int
- Msg string
-}
-
-func (e *Error) Error() string {
- return fmt.Sprintf("%03d %s", e.Code, e.Msg)
-}
-
-// A ProtocolError describes a protocol violation such
-// as an invalid response or a hung-up connection.
-type ProtocolError string
-
-func (p ProtocolError) Error() string {
- return string(p)
-}
-
-// A Conn represents a textual network protocol connection.
-// It consists of a Reader and Writer to manage I/O
-// and a Pipeline to sequence concurrent requests on the connection.
-// These embedded types carry methods with them;
-// see the documentation of those types for details.
-type Conn struct {
- Reader
- Writer
- Pipeline
- conn io.ReadWriteCloser
-}
-
-// NewConn returns a new Conn using conn for I/O.
-func NewConn(conn io.ReadWriteCloser) *Conn {
- return &Conn{
- Reader: Reader{R: bufio.NewReader(conn)},
- Writer: Writer{W: bufio.NewWriter(conn)},
- conn: conn,
- }
-}
-
-// Close closes the connection.
-func (c *Conn) Close() error {
- return c.conn.Close()
-}
-
-// Dial connects to the given address on the given network using net.Dial
-// and then returns a new Conn for the connection.
-func Dial(network, addr string) (*Conn, error) {
- c, err := net.Dial(network, addr)
- if err != nil {
- return nil, err
- }
- return NewConn(c), nil
-}
-
-// Cmd is a convenience method that sends a command after
-// waiting its turn in the pipeline. The command text is the
-// result of formatting format with args and appending \r\n.
-// Cmd returns the id of the command, for use with StartResponse and EndResponse.
-//
-// For example, a client might run a HELP command that returns a dot-body
-// by using:
-//
-// id, err := c.Cmd("HELP")
-// if err != nil {
-// return nil, err
-// }
-//
-// c.StartResponse(id)
-// defer c.EndResponse(id)
-//
-// if _, _, err = c.ReadCodeLine(110); err != nil {
-// return nil, err
-// }
-// text, err := c.ReadDotBytes()
-// if err != nil {
-// return nil, err
-// }
-// return c.ReadCodeLine(250)
-//
-func (c *Conn) Cmd(format string, args ...any) (id uint, err error) {
- id = c.Next()
- c.StartRequest(id)
- err = c.PrintfLine(format, args...)
- c.EndRequest(id)
- if err != nil {
- return 0, err
- }
- return id, nil
-}
-
-// TrimString returns s without leading and trailing ASCII space.
-func TrimString(s string) string {
- for len(s) > 0 && isASCIISpace(s[0]) {
- s = s[1:]
- }
- for len(s) > 0 && isASCIISpace(s[len(s)-1]) {
- s = s[:len(s)-1]
- }
- return s
-}
-
-// TrimBytes returns b without leading and trailing ASCII space.
-func TrimBytes(b []byte) []byte {
- for len(b) > 0 && isASCIISpace(b[0]) {
- b = b[1:]
- }
- for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
- b = b[:len(b)-1]
- }
- return b
-}
-
-func isASCIISpace(b byte) bool {
- return b == ' ' || b == '\t' || b == '\n' || b == '\r'
-}
-
-func isASCIILetter(b byte) bool {
- b |= 0x20 // make lower case
- return 'a' <= b && b <= 'z'
-}
diff --git a/contrib/go/_std_1.18/src/net/udpsock.go b/contrib/go/_std_1.18/src/net/udpsock.go
deleted file mode 100644
index 6d29a39edf..0000000000
--- a/contrib/go/_std_1.18/src/net/udpsock.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "context"
- "internal/itoa"
- "net/netip"
- "syscall"
-)
-
-// BUG(mikio): On Plan 9, the ReadMsgUDP and
-// WriteMsgUDP methods of UDPConn are not implemented.
-
-// BUG(mikio): On Windows, the File method of UDPConn is not
-// implemented.
-
-// BUG(mikio): On JS, methods and functions related to UDPConn are not
-// implemented.
-
-// UDPAddr represents the address of a UDP end point.
-type UDPAddr struct {
- IP IP
- Port int
- Zone string // IPv6 scoped addressing zone
-}
-
-// AddrPort returns the UDPAddr a as a netip.AddrPort.
-//
-// If a.Port does not fit in a uint16, it's silently truncated.
-//
-// If a is nil, a zero value is returned.
-func (a *UDPAddr) AddrPort() netip.AddrPort {
- if a == nil {
- return netip.AddrPort{}
- }
- na, _ := netip.AddrFromSlice(a.IP)
- na = na.WithZone(a.Zone)
- return netip.AddrPortFrom(na, uint16(a.Port))
-}
-
-// Network returns the address's network name, "udp".
-func (a *UDPAddr) Network() string { return "udp" }
-
-func (a *UDPAddr) String() string {
- if a == nil {
- return "<nil>"
- }
- ip := ipEmptyString(a.IP)
- if a.Zone != "" {
- return JoinHostPort(ip+"%"+a.Zone, itoa.Itoa(a.Port))
- }
- return JoinHostPort(ip, itoa.Itoa(a.Port))
-}
-
-func (a *UDPAddr) isWildcard() bool {
- if a == nil || a.IP == nil {
- return true
- }
- return a.IP.IsUnspecified()
-}
-
-func (a *UDPAddr) opAddr() Addr {
- if a == nil {
- return nil
- }
- return a
-}
-
-// ResolveUDPAddr returns an address of UDP end point.
-//
-// The network must be a UDP network name.
-//
-// If the host in the address parameter is not a literal IP address or
-// the port is not a literal port number, ResolveUDPAddr resolves the
-// address to an address of UDP end point.
-// Otherwise, it parses the address as a pair of literal IP address
-// and port number.
-// The address parameter can use a host name, but this is not
-// recommended, because it will return at most one of the host name's
-// IP addresses.
-//
-// See func Dial for a description of the network and address
-// parameters.
-func ResolveUDPAddr(network, address string) (*UDPAddr, error) {
- switch network {
- case "udp", "udp4", "udp6":
- case "": // a hint wildcard for Go 1.0 undocumented behavior
- network = "udp"
- default:
- return nil, UnknownNetworkError(network)
- }
- addrs, err := DefaultResolver.internetAddrList(context.Background(), network, address)
- if err != nil {
- return nil, err
- }
- return addrs.forResolve(network, address).(*UDPAddr), nil
-}
-
-// UDPAddrFromAddrPort returns addr as a UDPAddr. If addr.IsValid() is false,
-// then the returned UDPAddr will contain a nil IP field, indicating an
-// address family-agnostic unspecified address.
-func UDPAddrFromAddrPort(addr netip.AddrPort) *UDPAddr {
- return &UDPAddr{
- IP: addr.Addr().AsSlice(),
- Zone: addr.Addr().Zone(),
- Port: int(addr.Port()),
- }
-}
-
-// An addrPortUDPAddr is a netip.AddrPort-based UDP address that satisfies the Addr interface.
-type addrPortUDPAddr struct {
- netip.AddrPort
-}
-
-func (addrPortUDPAddr) Network() string { return "udp" }
-
-// UDPConn is the implementation of the Conn and PacketConn interfaces
-// for UDP network connections.
-type UDPConn struct {
- conn
-}
-
-// SyscallConn returns a raw network connection.
-// This implements the syscall.Conn interface.
-func (c *UDPConn) SyscallConn() (syscall.RawConn, error) {
- if !c.ok() {
- return nil, syscall.EINVAL
- }
- return newRawConn(c.fd)
-}
-
-// ReadFromUDP acts like ReadFrom but returns a UDPAddr.
-func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) {
- // This function is designed to allow the caller to control the lifetime
- // of the returned *UDPAddr and thereby prevent an allocation.
- // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/.
- // The real work is done by readFromUDP, below.
- return c.readFromUDP(b, &UDPAddr{})
-}
-
-// readFromUDP implements ReadFromUDP.
-func (c *UDPConn) readFromUDP(b []byte, addr *UDPAddr) (int, *UDPAddr, error) {
- if !c.ok() {
- return 0, nil, syscall.EINVAL
- }
- n, addr, err := c.readFrom(b, addr)
- if err != nil {
- err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return n, addr, err
-}
-
-// ReadFrom implements the PacketConn ReadFrom method.
-func (c *UDPConn) ReadFrom(b []byte) (int, Addr, error) {
- n, addr, err := c.readFromUDP(b, &UDPAddr{})
- if addr == nil {
- // Return Addr(nil), not Addr(*UDPConn(nil)).
- return n, nil, err
- }
- return n, addr, err
-}
-
-// ReadFromUDPAddrPort acts like ReadFrom but returns a netip.AddrPort.
-func (c *UDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) {
- if !c.ok() {
- return 0, netip.AddrPort{}, syscall.EINVAL
- }
- n, addr, err = c.readFromAddrPort(b)
- if err != nil {
- err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return n, addr, err
-}
-
-// ReadMsgUDP reads a message from c, copying the payload into b and
-// the associated out-of-band data into oob. It returns the number of
-// bytes copied into b, the number of bytes copied into oob, the flags
-// that were set on the message and the source address of the message.
-//
-// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
-// used to manipulate IP-level socket options in oob.
-func (c *UDPConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *UDPAddr, err error) {
- var ap netip.AddrPort
- n, oobn, flags, ap, err = c.ReadMsgUDPAddrPort(b, oob)
- if ap.IsValid() {
- addr = UDPAddrFromAddrPort(ap)
- }
- return
-}
-
-// ReadMsgUDPAddrPort is like ReadMsgUDP but returns an netip.AddrPort instead of a UDPAddr.
-func (c *UDPConn) ReadMsgUDPAddrPort(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) {
- if !c.ok() {
- return 0, 0, 0, netip.AddrPort{}, syscall.EINVAL
- }
- n, oobn, flags, addr, err = c.readMsg(b, oob)
- if err != nil {
- err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return
-}
-
-// WriteToUDP acts like WriteTo but takes a UDPAddr.
-func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) {
- if !c.ok() {
- return 0, syscall.EINVAL
- }
- n, err := c.writeTo(b, addr)
- if err != nil {
- err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err}
- }
- return n, err
-}
-
-// WriteToUDPAddrPort acts like WriteTo but takes a netip.AddrPort.
-func (c *UDPConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) {
- if !c.ok() {
- return 0, syscall.EINVAL
- }
- n, err := c.writeToAddrPort(b, addr)
- if err != nil {
- err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addrPortUDPAddr{addr}, Err: err}
- }
- return n, err
-}
-
-// WriteTo implements the PacketConn WriteTo method.
-func (c *UDPConn) WriteTo(b []byte, addr Addr) (int, error) {
- if !c.ok() {
- return 0, syscall.EINVAL
- }
- a, ok := addr.(*UDPAddr)
- if !ok {
- return 0, &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr, Err: syscall.EINVAL}
- }
- n, err := c.writeTo(b, a)
- if err != nil {
- err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: a.opAddr(), Err: err}
- }
- return n, err
-}
-
-// WriteMsgUDP writes a message to addr via c if c isn't connected, or
-// to c's remote address if c is connected (in which case addr must be
-// nil). The payload is copied from b and the associated out-of-band
-// data is copied from oob. It returns the number of payload and
-// out-of-band bytes written.
-//
-// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
-// used to manipulate IP-level socket options in oob.
-func (c *UDPConn) WriteMsgUDP(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) {
- if !c.ok() {
- return 0, 0, syscall.EINVAL
- }
- n, oobn, err = c.writeMsg(b, oob, addr)
- if err != nil {
- err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err}
- }
- return
-}
-
-// WriteMsgUDPAddrPort is like WriteMsgUDP but takes a netip.AddrPort instead of a UDPAddr.
-func (c *UDPConn) WriteMsgUDPAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) {
- if !c.ok() {
- return 0, 0, syscall.EINVAL
- }
- n, oobn, err = c.writeMsgAddrPort(b, oob, addr)
- if err != nil {
- err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addrPortUDPAddr{addr}, Err: err}
- }
- return
-}
-
-func newUDPConn(fd *netFD) *UDPConn { return &UDPConn{conn{fd}} }
-
-// DialUDP acts like Dial for UDP networks.
-//
-// The network must be a UDP network name; see func Dial for details.
-//
-// If laddr is nil, a local address is automatically chosen.
-// If the IP field of raddr is nil or an unspecified IP address, the
-// local system is assumed.
-func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
- switch network {
- case "udp", "udp4", "udp6":
- default:
- return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
- }
- if raddr == nil {
- return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
- }
- sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialUDP(context.Background(), laddr, raddr)
- if err != nil {
- return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
- }
- return c, nil
-}
-
-// ListenUDP acts like ListenPacket for UDP networks.
-//
-// The network must be a UDP network name; see func Dial for details.
-//
-// If the IP field of laddr is nil or an unspecified IP address,
-// ListenUDP listens on all available IP addresses of the local system
-// except multicast IP addresses.
-// If the Port field of laddr is 0, a port number is automatically
-// chosen.
-func ListenUDP(network string, laddr *UDPAddr) (*UDPConn, error) {
- switch network {
- case "udp", "udp4", "udp6":
- default:
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)}
- }
- if laddr == nil {
- laddr = &UDPAddr{}
- }
- sl := &sysListener{network: network, address: laddr.String()}
- c, err := sl.listenUDP(context.Background(), laddr)
- if err != nil {
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
- }
- return c, nil
-}
-
-// ListenMulticastUDP acts like ListenPacket for UDP networks but
-// takes a group address on a specific network interface.
-//
-// The network must be a UDP network name; see func Dial for details.
-//
-// ListenMulticastUDP listens on all available IP addresses of the
-// local system including the group, multicast IP address.
-// If ifi is nil, ListenMulticastUDP uses the system-assigned
-// multicast interface, although this is not recommended because the
-// assignment depends on platforms and sometimes it might require
-// routing configuration.
-// If the Port field of gaddr is 0, a port number is automatically
-// chosen.
-//
-// ListenMulticastUDP is just for convenience of simple, small
-// applications. There are golang.org/x/net/ipv4 and
-// golang.org/x/net/ipv6 packages for general purpose uses.
-//
-// Note that ListenMulticastUDP will set the IP_MULTICAST_LOOP socket option
-// to 0 under IPPROTO_IP, to disable loopback of multicast packets.
-func ListenMulticastUDP(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) {
- switch network {
- case "udp", "udp4", "udp6":
- default:
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: UnknownNetworkError(network)}
- }
- if gaddr == nil || gaddr.IP == nil {
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: errMissingAddress}
- }
- sl := &sysListener{network: network, address: gaddr.String()}
- c, err := sl.listenMulticastUDP(context.Background(), ifi, gaddr)
- if err != nil {
- return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: err}
- }
- return c, nil
-}
diff --git a/contrib/go/_std_1.18/src/net/udpsock_posix.go b/contrib/go/_std_1.18/src/net/udpsock_posix.go
deleted file mode 100644
index 6544397673..0000000000
--- a/contrib/go/_std_1.18/src/net/udpsock_posix.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "context"
- "net/netip"
- "syscall"
-)
-
-func sockaddrToUDP(sa syscall.Sockaddr) Addr {
- switch sa := sa.(type) {
- case *syscall.SockaddrInet4:
- return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port}
- case *syscall.SockaddrInet6:
- return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
- }
- return nil
-}
-
-func (a *UDPAddr) family() int {
- if a == nil || len(a.IP) <= IPv4len {
- return syscall.AF_INET
- }
- if a.IP.To4() != nil {
- return syscall.AF_INET
- }
- return syscall.AF_INET6
-}
-
-func (a *UDPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
- if a == nil {
- return nil, nil
- }
- return ipToSockaddr(family, a.IP, a.Port, a.Zone)
-}
-
-func (a *UDPAddr) toLocal(net string) sockaddr {
- return &UDPAddr{loopbackIP(net), a.Port, a.Zone}
-}
-
-func (c *UDPConn) readFrom(b []byte, addr *UDPAddr) (int, *UDPAddr, error) {
- var n int
- var err error
- switch c.fd.family {
- case syscall.AF_INET:
- var from syscall.SockaddrInet4
- n, err = c.fd.readFromInet4(b, &from)
- if err == nil {
- ip := from.Addr // copy from.Addr; ip escapes, so this line allocates 4 bytes
- *addr = UDPAddr{IP: ip[:], Port: from.Port}
- }
- case syscall.AF_INET6:
- var from syscall.SockaddrInet6
- n, err = c.fd.readFromInet6(b, &from)
- if err == nil {
- ip := from.Addr // copy from.Addr; ip escapes, so this line allocates 16 bytes
- *addr = UDPAddr{IP: ip[:], Port: from.Port, Zone: zoneCache.name(int(from.ZoneId))}
- }
- }
- if err != nil {
- // No sockaddr, so don't return UDPAddr.
- addr = nil
- }
- return n, addr, err
-}
-
-func (c *UDPConn) readFromAddrPort(b []byte) (n int, addr netip.AddrPort, err error) {
- var ip netip.Addr
- var port int
- switch c.fd.family {
- case syscall.AF_INET:
- var from syscall.SockaddrInet4
- n, err = c.fd.readFromInet4(b, &from)
- if err == nil {
- ip = netip.AddrFrom4(from.Addr)
- port = from.Port
- }
- case syscall.AF_INET6:
- var from syscall.SockaddrInet6
- n, err = c.fd.readFromInet6(b, &from)
- if err == nil {
- ip = netip.AddrFrom16(from.Addr).WithZone(zoneCache.name(int(from.ZoneId)))
- port = from.Port
- }
- }
- if err == nil {
- addr = netip.AddrPortFrom(ip, uint16(port))
- }
- return n, addr, err
-}
-
-func (c *UDPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) {
- switch c.fd.family {
- case syscall.AF_INET:
- var sa syscall.SockaddrInet4
- n, oobn, flags, err = c.fd.readMsgInet4(b, oob, 0, &sa)
- ip := netip.AddrFrom4(sa.Addr)
- addr = netip.AddrPortFrom(ip, uint16(sa.Port))
- case syscall.AF_INET6:
- var sa syscall.SockaddrInet6
- n, oobn, flags, err = c.fd.readMsgInet6(b, oob, 0, &sa)
- ip := netip.AddrFrom16(sa.Addr).WithZone(zoneCache.name(int(sa.ZoneId)))
- addr = netip.AddrPortFrom(ip, uint16(sa.Port))
- }
- return
-}
-
-func (c *UDPConn) writeTo(b []byte, addr *UDPAddr) (int, error) {
- if c.fd.isConnected {
- return 0, ErrWriteToConnected
- }
- if addr == nil {
- return 0, errMissingAddress
- }
-
- switch c.fd.family {
- case syscall.AF_INET:
- sa, err := ipToSockaddrInet4(addr.IP, addr.Port)
- if err != nil {
- return 0, err
- }
- return c.fd.writeToInet4(b, &sa)
- case syscall.AF_INET6:
- sa, err := ipToSockaddrInet6(addr.IP, addr.Port, addr.Zone)
- if err != nil {
- return 0, err
- }
- return c.fd.writeToInet6(b, &sa)
- default:
- return 0, &AddrError{Err: "invalid address family", Addr: addr.IP.String()}
- }
-}
-
-func (c *UDPConn) writeToAddrPort(b []byte, addr netip.AddrPort) (int, error) {
- if c.fd.isConnected {
- return 0, ErrWriteToConnected
- }
- if !addr.IsValid() {
- return 0, errMissingAddress
- }
-
- switch c.fd.family {
- case syscall.AF_INET:
- sa, err := addrPortToSockaddrInet4(addr)
- if err != nil {
- return 0, err
- }
- return c.fd.writeToInet4(b, &sa)
- case syscall.AF_INET6:
- sa, err := addrPortToSockaddrInet6(addr)
- if err != nil {
- return 0, err
- }
- return c.fd.writeToInet6(b, &sa)
- default:
- return 0, &AddrError{Err: "invalid address family", Addr: addr.Addr().String()}
- }
-}
-
-func (c *UDPConn) writeMsg(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) {
- if c.fd.isConnected && addr != nil {
- return 0, 0, ErrWriteToConnected
- }
- if !c.fd.isConnected && addr == nil {
- return 0, 0, errMissingAddress
- }
- sa, err := addr.sockaddr(c.fd.family)
- if err != nil {
- return 0, 0, err
- }
- return c.fd.writeMsg(b, oob, sa)
-}
-
-func (c *UDPConn) writeMsgAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) {
- if c.fd.isConnected && addr.IsValid() {
- return 0, 0, ErrWriteToConnected
- }
- if !c.fd.isConnected && !addr.IsValid() {
- return 0, 0, errMissingAddress
- }
-
- switch c.fd.family {
- case syscall.AF_INET:
- sa, err := addrPortToSockaddrInet4(addr)
- if err != nil {
- return 0, 0, err
- }
- return c.fd.writeMsgInet4(b, oob, &sa)
- case syscall.AF_INET6:
- sa, err := addrPortToSockaddrInet6(addr)
- if err != nil {
- return 0, 0, err
- }
- return c.fd.writeMsgInet6(b, oob, &sa)
- default:
- return 0, 0, &AddrError{Err: "invalid address family", Addr: addr.Addr().String()}
- }
-}
-
-func (sd *sysDialer) dialUDP(ctx context.Context, laddr, raddr *UDPAddr) (*UDPConn, error) {
- fd, err := internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_DGRAM, 0, "dial", sd.Dialer.Control)
- if err != nil {
- return nil, err
- }
- return newUDPConn(fd), nil
-}
-
-func (sl *sysListener) listenUDP(ctx context.Context, laddr *UDPAddr) (*UDPConn, error) {
- fd, err := internetSocket(ctx, sl.network, laddr, nil, syscall.SOCK_DGRAM, 0, "listen", sl.ListenConfig.Control)
- if err != nil {
- return nil, err
- }
- return newUDPConn(fd), nil
-}
-
-func (sl *sysListener) listenMulticastUDP(ctx context.Context, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) {
- fd, err := internetSocket(ctx, sl.network, gaddr, nil, syscall.SOCK_DGRAM, 0, "listen", sl.ListenConfig.Control)
- if err != nil {
- return nil, err
- }
- c := newUDPConn(fd)
- if ip4 := gaddr.IP.To4(); ip4 != nil {
- if err := listenIPv4MulticastUDP(c, ifi, ip4); err != nil {
- c.Close()
- return nil, err
- }
- } else {
- if err := listenIPv6MulticastUDP(c, ifi, gaddr.IP); err != nil {
- c.Close()
- return nil, err
- }
- }
- return c, nil
-}
-
-func listenIPv4MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error {
- if ifi != nil {
- if err := setIPv4MulticastInterface(c.fd, ifi); err != nil {
- return err
- }
- }
- if err := setIPv4MulticastLoopback(c.fd, false); err != nil {
- return err
- }
- if err := joinIPv4Group(c.fd, ifi, ip); err != nil {
- return err
- }
- return nil
-}
-
-func listenIPv6MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error {
- if ifi != nil {
- if err := setIPv6MulticastInterface(c.fd, ifi); err != nil {
- return err
- }
- }
- if err := setIPv6MulticastLoopback(c.fd, false); err != nil {
- return err
- }
- if err := joinIPv6Group(c.fd, ifi, ip); err != nil {
- return err
- }
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/net/unixsock_posix.go b/contrib/go/_std_1.18/src/net/unixsock_posix.go
deleted file mode 100644
index 1b69df53bf..0000000000
--- a/contrib/go/_std_1.18/src/net/unixsock_posix.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package net
-
-import (
- "context"
- "errors"
- "os"
- "syscall"
-)
-
-func unixSocket(ctx context.Context, net string, laddr, raddr sockaddr, mode string, ctrlFn func(string, string, syscall.RawConn) error) (*netFD, error) {
- var sotype int
- switch net {
- case "unix":
- sotype = syscall.SOCK_STREAM
- case "unixgram":
- sotype = syscall.SOCK_DGRAM
- case "unixpacket":
- sotype = syscall.SOCK_SEQPACKET
- default:
- return nil, UnknownNetworkError(net)
- }
-
- switch mode {
- case "dial":
- if laddr != nil && laddr.isWildcard() {
- laddr = nil
- }
- if raddr != nil && raddr.isWildcard() {
- raddr = nil
- }
- if raddr == nil && (sotype != syscall.SOCK_DGRAM || laddr == nil) {
- return nil, errMissingAddress
- }
- case "listen":
- default:
- return nil, errors.New("unknown mode: " + mode)
- }
-
- fd, err := socket(ctx, net, syscall.AF_UNIX, sotype, 0, false, laddr, raddr, ctrlFn)
- if err != nil {
- return nil, err
- }
- return fd, nil
-}
-
-func sockaddrToUnix(sa syscall.Sockaddr) Addr {
- if s, ok := sa.(*syscall.SockaddrUnix); ok {
- return &UnixAddr{Name: s.Name, Net: "unix"}
- }
- return nil
-}
-
-func sockaddrToUnixgram(sa syscall.Sockaddr) Addr {
- if s, ok := sa.(*syscall.SockaddrUnix); ok {
- return &UnixAddr{Name: s.Name, Net: "unixgram"}
- }
- return nil
-}
-
-func sockaddrToUnixpacket(sa syscall.Sockaddr) Addr {
- if s, ok := sa.(*syscall.SockaddrUnix); ok {
- return &UnixAddr{Name: s.Name, Net: "unixpacket"}
- }
- return nil
-}
-
-func sotypeToNet(sotype int) string {
- switch sotype {
- case syscall.SOCK_STREAM:
- return "unix"
- case syscall.SOCK_DGRAM:
- return "unixgram"
- case syscall.SOCK_SEQPACKET:
- return "unixpacket"
- default:
- panic("sotypeToNet unknown socket type")
- }
-}
-
-func (a *UnixAddr) family() int {
- return syscall.AF_UNIX
-}
-
-func (a *UnixAddr) sockaddr(family int) (syscall.Sockaddr, error) {
- if a == nil {
- return nil, nil
- }
- return &syscall.SockaddrUnix{Name: a.Name}, nil
-}
-
-func (a *UnixAddr) toLocal(net string) sockaddr {
- return a
-}
-
-func (c *UnixConn) readFrom(b []byte) (int, *UnixAddr, error) {
- var addr *UnixAddr
- n, sa, err := c.fd.readFrom(b)
- switch sa := sa.(type) {
- case *syscall.SockaddrUnix:
- if sa.Name != "" {
- addr = &UnixAddr{Name: sa.Name, Net: sotypeToNet(c.fd.sotype)}
- }
- }
- return n, addr, err
-}
-
-func (c *UnixConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *UnixAddr, err error) {
- var sa syscall.Sockaddr
- n, oobn, flags, sa, err = c.fd.readMsg(b, oob, readMsgFlags)
- if readMsgFlags == 0 && err == nil && oobn > 0 {
- setReadMsgCloseOnExec(oob[:oobn])
- }
-
- switch sa := sa.(type) {
- case *syscall.SockaddrUnix:
- if sa.Name != "" {
- addr = &UnixAddr{Name: sa.Name, Net: sotypeToNet(c.fd.sotype)}
- }
- }
- return
-}
-
-func (c *UnixConn) writeTo(b []byte, addr *UnixAddr) (int, error) {
- if c.fd.isConnected {
- return 0, ErrWriteToConnected
- }
- if addr == nil {
- return 0, errMissingAddress
- }
- if addr.Net != sotypeToNet(c.fd.sotype) {
- return 0, syscall.EAFNOSUPPORT
- }
- sa := &syscall.SockaddrUnix{Name: addr.Name}
- return c.fd.writeTo(b, sa)
-}
-
-func (c *UnixConn) writeMsg(b, oob []byte, addr *UnixAddr) (n, oobn int, err error) {
- if c.fd.sotype == syscall.SOCK_DGRAM && c.fd.isConnected {
- return 0, 0, ErrWriteToConnected
- }
- var sa syscall.Sockaddr
- if addr != nil {
- if addr.Net != sotypeToNet(c.fd.sotype) {
- return 0, 0, syscall.EAFNOSUPPORT
- }
- sa = &syscall.SockaddrUnix{Name: addr.Name}
- }
- return c.fd.writeMsg(b, oob, sa)
-}
-
-func (sd *sysDialer) dialUnix(ctx context.Context, laddr, raddr *UnixAddr) (*UnixConn, error) {
- fd, err := unixSocket(ctx, sd.network, laddr, raddr, "dial", sd.Dialer.Control)
- if err != nil {
- return nil, err
- }
- return newUnixConn(fd), nil
-}
-
-func (ln *UnixListener) accept() (*UnixConn, error) {
- fd, err := ln.fd.accept()
- if err != nil {
- return nil, err
- }
- return newUnixConn(fd), nil
-}
-
-func (ln *UnixListener) close() error {
- // The operating system doesn't clean up
- // the file that announcing created, so
- // we have to clean it up ourselves.
- // There's a race here--we can't know for
- // sure whether someone else has come along
- // and replaced our socket name already--
- // but this sequence (remove then close)
- // is at least compatible with the auto-remove
- // sequence in ListenUnix. It's only non-Go
- // programs that can mess us up.
- // Even if there are racy calls to Close, we want to unlink only for the first one.
- ln.unlinkOnce.Do(func() {
- if ln.path[0] != '@' && ln.unlink {
- syscall.Unlink(ln.path)
- }
- })
- return ln.fd.Close()
-}
-
-func (ln *UnixListener) file() (*os.File, error) {
- f, err := ln.fd.dup()
- if err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// SetUnlinkOnClose sets whether the underlying socket file should be removed
-// from the file system when the listener is closed.
-//
-// The default behavior is to unlink the socket file only when package net created it.
-// That is, when the listener and the underlying socket file were created by a call to
-// Listen or ListenUnix, then by default closing the listener will remove the socket file.
-// but if the listener was created by a call to FileListener to use an already existing
-// socket file, then by default closing the listener will not remove the socket file.
-func (l *UnixListener) SetUnlinkOnClose(unlink bool) {
- l.unlink = unlink
-}
-
-func (sl *sysListener) listenUnix(ctx context.Context, laddr *UnixAddr) (*UnixListener, error) {
- fd, err := unixSocket(ctx, sl.network, laddr, nil, "listen", sl.ListenConfig.Control)
- if err != nil {
- return nil, err
- }
- return &UnixListener{fd: fd, path: fd.laddr.String(), unlink: true}, nil
-}
-
-func (sl *sysListener) listenUnixgram(ctx context.Context, laddr *UnixAddr) (*UnixConn, error) {
- fd, err := unixSocket(ctx, sl.network, laddr, nil, "listen", sl.ListenConfig.Control)
- if err != nil {
- return nil, err
- }
- return newUnixConn(fd), nil
-}
diff --git a/contrib/go/_std_1.18/src/net/url/url.go b/contrib/go/_std_1.18/src/net/url/url.go
deleted file mode 100644
index f31aa08b59..0000000000
--- a/contrib/go/_std_1.18/src/net/url/url.go
+++ /dev/null
@@ -1,1218 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package url parses URLs and implements query escaping.
-package url
-
-// See RFC 3986. This package generally follows RFC 3986, except where
-// it deviates for compatibility reasons. When sending changes, first
-// search old issues for history on decisions. Unit tests should also
-// contain references to issue numbers with details.
-
-import (
- "errors"
- "fmt"
- "sort"
- "strconv"
- "strings"
-)
-
-// Error reports an error and the operation and URL that caused it.
-type Error struct {
- Op string
- URL string
- Err error
-}
-
-func (e *Error) Unwrap() error { return e.Err }
-func (e *Error) Error() string { return fmt.Sprintf("%s %q: %s", e.Op, e.URL, e.Err) }
-
-func (e *Error) Timeout() bool {
- t, ok := e.Err.(interface {
- Timeout() bool
- })
- return ok && t.Timeout()
-}
-
-func (e *Error) Temporary() bool {
- t, ok := e.Err.(interface {
- Temporary() bool
- })
- return ok && t.Temporary()
-}
-
-const upperhex = "0123456789ABCDEF"
-
-func ishex(c byte) bool {
- switch {
- case '0' <= c && c <= '9':
- return true
- case 'a' <= c && c <= 'f':
- return true
- case 'A' <= c && c <= 'F':
- return true
- }
- return false
-}
-
-func unhex(c byte) byte {
- switch {
- case '0' <= c && c <= '9':
- return c - '0'
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10
- }
- return 0
-}
-
-type encoding int
-
-const (
- encodePath encoding = 1 + iota
- encodePathSegment
- encodeHost
- encodeZone
- encodeUserPassword
- encodeQueryComponent
- encodeFragment
-)
-
-type EscapeError string
-
-func (e EscapeError) Error() string {
- return "invalid URL escape " + strconv.Quote(string(e))
-}
-
-type InvalidHostError string
-
-func (e InvalidHostError) Error() string {
- return "invalid character " + strconv.Quote(string(e)) + " in host name"
-}
-
-// Return true if the specified character should be escaped when
-// appearing in a URL string, according to RFC 3986.
-//
-// Please be informed that for now shouldEscape does not check all
-// reserved characters correctly. See golang.org/issue/5684.
-func shouldEscape(c byte, mode encoding) bool {
- // §2.3 Unreserved characters (alphanum)
- if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
- return false
- }
-
- if mode == encodeHost || mode == encodeZone {
- // §3.2.2 Host allows
- // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
- // as part of reg-name.
- // We add : because we include :port as part of host.
- // We add [ ] because we include [ipv6]:port as part of host.
- // We add < > because they're the only characters left that
- // we could possibly allow, and Parse will reject them if we
- // escape them (because hosts can't use %-encoding for
- // ASCII bytes).
- switch c {
- case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"':
- return false
- }
- }
-
- switch c {
- case '-', '_', '.', '~': // §2.3 Unreserved characters (mark)
- return false
-
- case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved)
- // Different sections of the URL allow a few of
- // the reserved characters to appear unescaped.
- switch mode {
- case encodePath: // §3.3
- // The RFC allows : @ & = + $ but saves / ; , for assigning
- // meaning to individual path segments. This package
- // only manipulates the path as a whole, so we allow those
- // last three as well. That leaves only ? to escape.
- return c == '?'
-
- case encodePathSegment: // §3.3
- // The RFC allows : @ & = + $ but saves / ; , for assigning
- // meaning to individual path segments.
- return c == '/' || c == ';' || c == ',' || c == '?'
-
- case encodeUserPassword: // §3.2.1
- // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in
- // userinfo, so we must escape only '@', '/', and '?'.
- // The parsing of userinfo treats ':' as special so we must escape
- // that too.
- return c == '@' || c == '/' || c == '?' || c == ':'
-
- case encodeQueryComponent: // §3.4
- // The RFC reserves (so we must escape) everything.
- return true
-
- case encodeFragment: // §4.1
- // The RFC text is silent but the grammar allows
- // everything, so escape nothing.
- return false
- }
- }
-
- if mode == encodeFragment {
- // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are
- // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not
- // need to be escaped. To minimize potential breakage, we apply two restrictions:
- // (1) we always escape sub-delims outside of the fragment, and (2) we always
- // escape single quote to avoid breaking callers that had previously assumed that
- // single quotes would be escaped. See issue #19917.
- switch c {
- case '!', '(', ')', '*':
- return false
- }
- }
-
- // Everything else must be escaped.
- return true
-}
-
-// QueryUnescape does the inverse transformation of QueryEscape,
-// converting each 3-byte encoded substring of the form "%AB" into the
-// hex-decoded byte 0xAB.
-// It returns an error if any % is not followed by two hexadecimal
-// digits.
-func QueryUnescape(s string) (string, error) {
- return unescape(s, encodeQueryComponent)
-}
-
-// PathUnescape does the inverse transformation of PathEscape,
-// converting each 3-byte encoded substring of the form "%AB" into the
-// hex-decoded byte 0xAB. It returns an error if any % is not followed
-// by two hexadecimal digits.
-//
-// PathUnescape is identical to QueryUnescape except that it does not
-// unescape '+' to ' ' (space).
-func PathUnescape(s string) (string, error) {
- return unescape(s, encodePathSegment)
-}
-
-// unescape unescapes a string; the mode specifies
-// which section of the URL string is being unescaped.
-func unescape(s string, mode encoding) (string, error) {
- // Count %, check that they're well-formed.
- n := 0
- hasPlus := false
- for i := 0; i < len(s); {
- switch s[i] {
- case '%':
- n++
- if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
- s = s[i:]
- if len(s) > 3 {
- s = s[:3]
- }
- return "", EscapeError(s)
- }
- // Per https://tools.ietf.org/html/rfc3986#page-21
- // in the host component %-encoding can only be used
- // for non-ASCII bytes.
- // But https://tools.ietf.org/html/rfc6874#section-2
- // introduces %25 being allowed to escape a percent sign
- // in IPv6 scoped-address literals. Yay.
- if mode == encodeHost && unhex(s[i+1]) < 8 && s[i:i+3] != "%25" {
- return "", EscapeError(s[i : i+3])
- }
- if mode == encodeZone {
- // RFC 6874 says basically "anything goes" for zone identifiers
- // and that even non-ASCII can be redundantly escaped,
- // but it seems prudent to restrict %-escaped bytes here to those
- // that are valid host name bytes in their unescaped form.
- // That is, you can use escaping in the zone identifier but not
- // to introduce bytes you couldn't just write directly.
- // But Windows puts spaces here! Yay.
- v := unhex(s[i+1])<<4 | unhex(s[i+2])
- if s[i:i+3] != "%25" && v != ' ' && shouldEscape(v, encodeHost) {
- return "", EscapeError(s[i : i+3])
- }
- }
- i += 3
- case '+':
- hasPlus = mode == encodeQueryComponent
- i++
- default:
- if (mode == encodeHost || mode == encodeZone) && s[i] < 0x80 && shouldEscape(s[i], mode) {
- return "", InvalidHostError(s[i : i+1])
- }
- i++
- }
- }
-
- if n == 0 && !hasPlus {
- return s, nil
- }
-
- var t strings.Builder
- t.Grow(len(s) - 2*n)
- for i := 0; i < len(s); i++ {
- switch s[i] {
- case '%':
- t.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
- i += 2
- case '+':
- if mode == encodeQueryComponent {
- t.WriteByte(' ')
- } else {
- t.WriteByte('+')
- }
- default:
- t.WriteByte(s[i])
- }
- }
- return t.String(), nil
-}
-
-// QueryEscape escapes the string so it can be safely placed
-// inside a URL query.
-func QueryEscape(s string) string {
- return escape(s, encodeQueryComponent)
-}
-
-// PathEscape escapes the string so it can be safely placed inside a URL path segment,
-// replacing special characters (including /) with %XX sequences as needed.
-func PathEscape(s string) string {
- return escape(s, encodePathSegment)
-}
-
-func escape(s string, mode encoding) string {
- spaceCount, hexCount := 0, 0
- for i := 0; i < len(s); i++ {
- c := s[i]
- if shouldEscape(c, mode) {
- if c == ' ' && mode == encodeQueryComponent {
- spaceCount++
- } else {
- hexCount++
- }
- }
- }
-
- if spaceCount == 0 && hexCount == 0 {
- return s
- }
-
- var buf [64]byte
- var t []byte
-
- required := len(s) + 2*hexCount
- if required <= len(buf) {
- t = buf[:required]
- } else {
- t = make([]byte, required)
- }
-
- if hexCount == 0 {
- copy(t, s)
- for i := 0; i < len(s); i++ {
- if s[i] == ' ' {
- t[i] = '+'
- }
- }
- return string(t)
- }
-
- j := 0
- for i := 0; i < len(s); i++ {
- switch c := s[i]; {
- case c == ' ' && mode == encodeQueryComponent:
- t[j] = '+'
- j++
- case shouldEscape(c, mode):
- t[j] = '%'
- t[j+1] = upperhex[c>>4]
- t[j+2] = upperhex[c&15]
- j += 3
- default:
- t[j] = s[i]
- j++
- }
- }
- return string(t)
-}
-
-// A URL represents a parsed URL (technically, a URI reference).
-//
-// The general form represented is:
-//
-// [scheme:][//[userinfo@]host][/]path[?query][#fragment]
-//
-// URLs that do not start with a slash after the scheme are interpreted as:
-//
-// scheme:opaque[?query][#fragment]
-//
-// Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/.
-// A consequence is that it is impossible to tell which slashes in the Path were
-// slashes in the raw URL and which were %2f. This distinction is rarely important,
-// but when it is, the code should use RawPath, an optional field which only gets
-// set if the default encoding is different from Path.
-//
-// URL's String method uses the EscapedPath method to obtain the path. See the
-// EscapedPath method for more details.
-type URL struct {
- Scheme string
- Opaque string // encoded opaque data
- User *Userinfo // username and password information
- Host string // host or host:port
- Path string // path (relative paths may omit leading slash)
- RawPath string // encoded path hint (see EscapedPath method)
- ForceQuery bool // append a query ('?') even if RawQuery is empty
- RawQuery string // encoded query values, without '?'
- Fragment string // fragment for references, without '#'
- RawFragment string // encoded fragment hint (see EscapedFragment method)
-}
-
-// User returns a Userinfo containing the provided username
-// and no password set.
-func User(username string) *Userinfo {
- return &Userinfo{username, "", false}
-}
-
-// UserPassword returns a Userinfo containing the provided username
-// and password.
-//
-// This functionality should only be used with legacy web sites.
-// RFC 2396 warns that interpreting Userinfo this way
-// ``is NOT RECOMMENDED, because the passing of authentication
-// information in clear text (such as URI) has proven to be a
-// security risk in almost every case where it has been used.''
-func UserPassword(username, password string) *Userinfo {
- return &Userinfo{username, password, true}
-}
-
-// The Userinfo type is an immutable encapsulation of username and
-// password details for a URL. An existing Userinfo value is guaranteed
-// to have a username set (potentially empty, as allowed by RFC 2396),
-// and optionally a password.
-type Userinfo struct {
- username string
- password string
- passwordSet bool
-}
-
-// Username returns the username.
-func (u *Userinfo) Username() string {
- if u == nil {
- return ""
- }
- return u.username
-}
-
-// Password returns the password in case it is set, and whether it is set.
-func (u *Userinfo) Password() (string, bool) {
- if u == nil {
- return "", false
- }
- return u.password, u.passwordSet
-}
-
-// String returns the encoded userinfo information in the standard form
-// of "username[:password]".
-func (u *Userinfo) String() string {
- if u == nil {
- return ""
- }
- s := escape(u.username, encodeUserPassword)
- if u.passwordSet {
- s += ":" + escape(u.password, encodeUserPassword)
- }
- return s
-}
-
-// Maybe rawURL is of the form scheme:path.
-// (Scheme must be [a-zA-Z][a-zA-Z0-9+-.]*)
-// If so, return scheme, path; else return "", rawURL.
-func getScheme(rawURL string) (scheme, path string, err error) {
- for i := 0; i < len(rawURL); i++ {
- c := rawURL[i]
- switch {
- case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
- // do nothing
- case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
- if i == 0 {
- return "", rawURL, nil
- }
- case c == ':':
- if i == 0 {
- return "", "", errors.New("missing protocol scheme")
- }
- return rawURL[:i], rawURL[i+1:], nil
- default:
- // we have encountered an invalid character,
- // so there is no valid scheme
- return "", rawURL, nil
- }
- }
- return "", rawURL, nil
-}
-
-// Parse parses a raw url into a URL structure.
-//
-// The url may be relative (a path, without a host) or absolute
-// (starting with a scheme). Trying to parse a hostname and path
-// without a scheme is invalid but may not necessarily return an
-// error, due to parsing ambiguities.
-func Parse(rawURL string) (*URL, error) {
- // Cut off #frag
- u, frag, _ := strings.Cut(rawURL, "#")
- url, err := parse(u, false)
- if err != nil {
- return nil, &Error{"parse", u, err}
- }
- if frag == "" {
- return url, nil
- }
- if err = url.setFragment(frag); err != nil {
- return nil, &Error{"parse", rawURL, err}
- }
- return url, nil
-}
-
-// ParseRequestURI parses a raw url into a URL structure. It assumes that
-// url was received in an HTTP request, so the url is interpreted
-// only as an absolute URI or an absolute path.
-// The string url is assumed not to have a #fragment suffix.
-// (Web browsers strip #fragment before sending the URL to a web server.)
-func ParseRequestURI(rawURL string) (*URL, error) {
- url, err := parse(rawURL, true)
- if err != nil {
- return nil, &Error{"parse", rawURL, err}
- }
- return url, nil
-}
-
-// parse parses a URL from a string in one of two contexts. If
-// viaRequest is true, the URL is assumed to have arrived via an HTTP request,
-// in which case only absolute URLs or path-absolute relative URLs are allowed.
-// If viaRequest is false, all forms of relative URLs are allowed.
-func parse(rawURL string, viaRequest bool) (*URL, error) {
- var rest string
- var err error
-
- if stringContainsCTLByte(rawURL) {
- return nil, errors.New("net/url: invalid control character in URL")
- }
-
- if rawURL == "" && viaRequest {
- return nil, errors.New("empty url")
- }
- url := new(URL)
-
- if rawURL == "*" {
- url.Path = "*"
- return url, nil
- }
-
- // Split off possible leading "http:", "mailto:", etc.
- // Cannot contain escaped characters.
- if url.Scheme, rest, err = getScheme(rawURL); err != nil {
- return nil, err
- }
- url.Scheme = strings.ToLower(url.Scheme)
-
- if strings.HasSuffix(rest, "?") && strings.Count(rest, "?") == 1 {
- url.ForceQuery = true
- rest = rest[:len(rest)-1]
- } else {
- rest, url.RawQuery, _ = strings.Cut(rest, "?")
- }
-
- if !strings.HasPrefix(rest, "/") {
- if url.Scheme != "" {
- // We consider rootless paths per RFC 3986 as opaque.
- url.Opaque = rest
- return url, nil
- }
- if viaRequest {
- return nil, errors.New("invalid URI for request")
- }
-
- // Avoid confusion with malformed schemes, like cache_object:foo/bar.
- // See golang.org/issue/16822.
- //
- // RFC 3986, §3.3:
- // In addition, a URI reference (Section 4.1) may be a relative-path reference,
- // in which case the first path segment cannot contain a colon (":") character.
- if segment, _, _ := strings.Cut(rest, "/"); strings.Contains(segment, ":") {
- // First path segment has colon. Not allowed in relative URL.
- return nil, errors.New("first path segment in URL cannot contain colon")
- }
- }
-
- if (url.Scheme != "" || !viaRequest && !strings.HasPrefix(rest, "///")) && strings.HasPrefix(rest, "//") {
- var authority string
- authority, rest = rest[2:], ""
- if i := strings.Index(authority, "/"); i >= 0 {
- authority, rest = authority[:i], authority[i:]
- }
- url.User, url.Host, err = parseAuthority(authority)
- if err != nil {
- return nil, err
- }
- }
- // Set Path and, optionally, RawPath.
- // RawPath is a hint of the encoding of Path. We don't want to set it if
- // the default escaping of Path is equivalent, to help make sure that people
- // don't rely on it in general.
- if err := url.setPath(rest); err != nil {
- return nil, err
- }
- return url, nil
-}
-
-func parseAuthority(authority string) (user *Userinfo, host string, err error) {
- i := strings.LastIndex(authority, "@")
- if i < 0 {
- host, err = parseHost(authority)
- } else {
- host, err = parseHost(authority[i+1:])
- }
- if err != nil {
- return nil, "", err
- }
- if i < 0 {
- return nil, host, nil
- }
- userinfo := authority[:i]
- if !validUserinfo(userinfo) {
- return nil, "", errors.New("net/url: invalid userinfo")
- }
- if !strings.Contains(userinfo, ":") {
- if userinfo, err = unescape(userinfo, encodeUserPassword); err != nil {
- return nil, "", err
- }
- user = User(userinfo)
- } else {
- username, password, _ := strings.Cut(userinfo, ":")
- if username, err = unescape(username, encodeUserPassword); err != nil {
- return nil, "", err
- }
- if password, err = unescape(password, encodeUserPassword); err != nil {
- return nil, "", err
- }
- user = UserPassword(username, password)
- }
- return user, host, nil
-}
-
-// parseHost parses host as an authority without user
-// information. That is, as host[:port].
-func parseHost(host string) (string, error) {
- if strings.HasPrefix(host, "[") {
- // Parse an IP-Literal in RFC 3986 and RFC 6874.
- // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80".
- i := strings.LastIndex(host, "]")
- if i < 0 {
- return "", errors.New("missing ']' in host")
- }
- colonPort := host[i+1:]
- if !validOptionalPort(colonPort) {
- return "", fmt.Errorf("invalid port %q after host", colonPort)
- }
-
- // RFC 6874 defines that %25 (%-encoded percent) introduces
- // the zone identifier, and the zone identifier can use basically
- // any %-encoding it likes. That's different from the host, which
- // can only %-encode non-ASCII bytes.
- // We do impose some restrictions on the zone, to avoid stupidity
- // like newlines.
- zone := strings.Index(host[:i], "%25")
- if zone >= 0 {
- host1, err := unescape(host[:zone], encodeHost)
- if err != nil {
- return "", err
- }
- host2, err := unescape(host[zone:i], encodeZone)
- if err != nil {
- return "", err
- }
- host3, err := unescape(host[i:], encodeHost)
- if err != nil {
- return "", err
- }
- return host1 + host2 + host3, nil
- }
- } else if i := strings.LastIndex(host, ":"); i != -1 {
- colonPort := host[i:]
- if !validOptionalPort(colonPort) {
- return "", fmt.Errorf("invalid port %q after host", colonPort)
- }
- }
-
- var err error
- if host, err = unescape(host, encodeHost); err != nil {
- return "", err
- }
- return host, nil
-}
-
-// setPath sets the Path and RawPath fields of the URL based on the provided
-// escaped path p. It maintains the invariant that RawPath is only specified
-// when it differs from the default encoding of the path.
-// For example:
-// - setPath("/foo/bar") will set Path="/foo/bar" and RawPath=""
-// - setPath("/foo%2fbar") will set Path="/foo/bar" and RawPath="/foo%2fbar"
-// setPath will return an error only if the provided path contains an invalid
-// escaping.
-func (u *URL) setPath(p string) error {
- path, err := unescape(p, encodePath)
- if err != nil {
- return err
- }
- u.Path = path
- if escp := escape(path, encodePath); p == escp {
- // Default encoding is fine.
- u.RawPath = ""
- } else {
- u.RawPath = p
- }
- return nil
-}
-
-// EscapedPath returns the escaped form of u.Path.
-// In general there are multiple possible escaped forms of any path.
-// EscapedPath returns u.RawPath when it is a valid escaping of u.Path.
-// Otherwise EscapedPath ignores u.RawPath and computes an escaped
-// form on its own.
-// The String and RequestURI methods use EscapedPath to construct
-// their results.
-// In general, code should call EscapedPath instead of
-// reading u.RawPath directly.
-func (u *URL) EscapedPath() string {
- if u.RawPath != "" && validEncoded(u.RawPath, encodePath) {
- p, err := unescape(u.RawPath, encodePath)
- if err == nil && p == u.Path {
- return u.RawPath
- }
- }
- if u.Path == "*" {
- return "*" // don't escape (Issue 11202)
- }
- return escape(u.Path, encodePath)
-}
-
-// validEncoded reports whether s is a valid encoded path or fragment,
-// according to mode.
-// It must not contain any bytes that require escaping during encoding.
-func validEncoded(s string, mode encoding) bool {
- for i := 0; i < len(s); i++ {
- // RFC 3986, Appendix A.
- // pchar = unreserved / pct-encoded / sub-delims / ":" / "@".
- // shouldEscape is not quite compliant with the RFC,
- // so we check the sub-delims ourselves and let
- // shouldEscape handle the others.
- switch s[i] {
- case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '@':
- // ok
- case '[', ']':
- // ok - not specified in RFC 3986 but left alone by modern browsers
- case '%':
- // ok - percent encoded, will decode
- default:
- if shouldEscape(s[i], mode) {
- return false
- }
- }
- }
- return true
-}
-
-// setFragment is like setPath but for Fragment/RawFragment.
-func (u *URL) setFragment(f string) error {
- frag, err := unescape(f, encodeFragment)
- if err != nil {
- return err
- }
- u.Fragment = frag
- if escf := escape(frag, encodeFragment); f == escf {
- // Default encoding is fine.
- u.RawFragment = ""
- } else {
- u.RawFragment = f
- }
- return nil
-}
-
-// EscapedFragment returns the escaped form of u.Fragment.
-// In general there are multiple possible escaped forms of any fragment.
-// EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment.
-// Otherwise EscapedFragment ignores u.RawFragment and computes an escaped
-// form on its own.
-// The String method uses EscapedFragment to construct its result.
-// In general, code should call EscapedFragment instead of
-// reading u.RawFragment directly.
-func (u *URL) EscapedFragment() string {
- if u.RawFragment != "" && validEncoded(u.RawFragment, encodeFragment) {
- f, err := unescape(u.RawFragment, encodeFragment)
- if err == nil && f == u.Fragment {
- return u.RawFragment
- }
- }
- return escape(u.Fragment, encodeFragment)
-}
-
-// validOptionalPort reports whether port is either an empty string
-// or matches /^:\d*$/
-func validOptionalPort(port string) bool {
- if port == "" {
- return true
- }
- if port[0] != ':' {
- return false
- }
- for _, b := range port[1:] {
- if b < '0' || b > '9' {
- return false
- }
- }
- return true
-}
-
-// String reassembles the URL into a valid URL string.
-// The general form of the result is one of:
-//
-// scheme:opaque?query#fragment
-// scheme://userinfo@host/path?query#fragment
-//
-// If u.Opaque is non-empty, String uses the first form;
-// otherwise it uses the second form.
-// Any non-ASCII characters in host are escaped.
-// To obtain the path, String uses u.EscapedPath().
-//
-// In the second form, the following rules apply:
-// - if u.Scheme is empty, scheme: is omitted.
-// - if u.User is nil, userinfo@ is omitted.
-// - if u.Host is empty, host/ is omitted.
-// - if u.Scheme and u.Host are empty and u.User is nil,
-// the entire scheme://userinfo@host/ is omitted.
-// - if u.Host is non-empty and u.Path begins with a /,
-// the form host/path does not add its own /.
-// - if u.RawQuery is empty, ?query is omitted.
-// - if u.Fragment is empty, #fragment is omitted.
-func (u *URL) String() string {
- var buf strings.Builder
- if u.Scheme != "" {
- buf.WriteString(u.Scheme)
- buf.WriteByte(':')
- }
- if u.Opaque != "" {
- buf.WriteString(u.Opaque)
- } else {
- if u.Scheme != "" || u.Host != "" || u.User != nil {
- if u.Host != "" || u.Path != "" || u.User != nil {
- buf.WriteString("//")
- }
- if ui := u.User; ui != nil {
- buf.WriteString(ui.String())
- buf.WriteByte('@')
- }
- if h := u.Host; h != "" {
- buf.WriteString(escape(h, encodeHost))
- }
- }
- path := u.EscapedPath()
- if path != "" && path[0] != '/' && u.Host != "" {
- buf.WriteByte('/')
- }
- if buf.Len() == 0 {
- // RFC 3986 §4.2
- // A path segment that contains a colon character (e.g., "this:that")
- // cannot be used as the first segment of a relative-path reference, as
- // it would be mistaken for a scheme name. Such a segment must be
- // preceded by a dot-segment (e.g., "./this:that") to make a relative-
- // path reference.
- if segment, _, _ := strings.Cut(path, "/"); strings.Contains(segment, ":") {
- buf.WriteString("./")
- }
- }
- buf.WriteString(path)
- }
- if u.ForceQuery || u.RawQuery != "" {
- buf.WriteByte('?')
- buf.WriteString(u.RawQuery)
- }
- if u.Fragment != "" {
- buf.WriteByte('#')
- buf.WriteString(u.EscapedFragment())
- }
- return buf.String()
-}
-
-// Redacted is like String but replaces any password with "xxxxx".
-// Only the password in u.URL is redacted.
-func (u *URL) Redacted() string {
- if u == nil {
- return ""
- }
-
- ru := *u
- if _, has := ru.User.Password(); has {
- ru.User = UserPassword(ru.User.Username(), "xxxxx")
- }
- return ru.String()
-}
-
-// Values maps a string key to a list of values.
-// It is typically used for query parameters and form values.
-// Unlike in the http.Header map, the keys in a Values map
-// are case-sensitive.
-type Values map[string][]string
-
-// Get gets the first value associated with the given key.
-// If there are no values associated with the key, Get returns
-// the empty string. To access multiple values, use the map
-// directly.
-func (v Values) Get(key string) string {
- if v == nil {
- return ""
- }
- vs := v[key]
- if len(vs) == 0 {
- return ""
- }
- return vs[0]
-}
-
-// Set sets the key to value. It replaces any existing
-// values.
-func (v Values) Set(key, value string) {
- v[key] = []string{value}
-}
-
-// Add adds the value to key. It appends to any existing
-// values associated with key.
-func (v Values) Add(key, value string) {
- v[key] = append(v[key], value)
-}
-
-// Del deletes the values associated with key.
-func (v Values) Del(key string) {
- delete(v, key)
-}
-
-// Has checks whether a given key is set.
-func (v Values) Has(key string) bool {
- _, ok := v[key]
- return ok
-}
-
-// ParseQuery parses the URL-encoded query string and returns
-// a map listing the values specified for each key.
-// ParseQuery always returns a non-nil map containing all the
-// valid query parameters found; err describes the first decoding error
-// encountered, if any.
-//
-// Query is expected to be a list of key=value settings separated by ampersands.
-// A setting without an equals sign is interpreted as a key set to an empty
-// value.
-// Settings containing a non-URL-encoded semicolon are considered invalid.
-func ParseQuery(query string) (Values, error) {
- m := make(Values)
- err := parseQuery(m, query)
- return m, err
-}
-
-func parseQuery(m Values, query string) (err error) {
- for query != "" {
- var key string
- key, query, _ = strings.Cut(query, "&")
- if strings.Contains(key, ";") {
- err = fmt.Errorf("invalid semicolon separator in query")
- continue
- }
- if key == "" {
- continue
- }
- key, value, _ := strings.Cut(key, "=")
- key, err1 := QueryUnescape(key)
- if err1 != nil {
- if err == nil {
- err = err1
- }
- continue
- }
- value, err1 = QueryUnescape(value)
- if err1 != nil {
- if err == nil {
- err = err1
- }
- continue
- }
- m[key] = append(m[key], value)
- }
- return err
-}
-
-// Encode encodes the values into ``URL encoded'' form
-// ("bar=baz&foo=quux") sorted by key.
-func (v Values) Encode() string {
- if v == nil {
- return ""
- }
- var buf strings.Builder
- keys := make([]string, 0, len(v))
- for k := range v {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- vs := v[k]
- keyEscaped := QueryEscape(k)
- for _, v := range vs {
- if buf.Len() > 0 {
- buf.WriteByte('&')
- }
- buf.WriteString(keyEscaped)
- buf.WriteByte('=')
- buf.WriteString(QueryEscape(v))
- }
- }
- return buf.String()
-}
-
-// resolvePath applies special path segments from refs and applies
-// them to base, per RFC 3986.
-func resolvePath(base, ref string) string {
- var full string
- if ref == "" {
- full = base
- } else if ref[0] != '/' {
- i := strings.LastIndex(base, "/")
- full = base[:i+1] + ref
- } else {
- full = ref
- }
- if full == "" {
- return ""
- }
-
- var (
- elem string
- dst strings.Builder
- )
- first := true
- remaining := full
- // We want to return a leading '/', so write it now.
- dst.WriteByte('/')
- found := true
- for found {
- elem, remaining, found = strings.Cut(remaining, "/")
- if elem == "." {
- first = false
- // drop
- continue
- }
-
- if elem == ".." {
- // Ignore the leading '/' we already wrote.
- str := dst.String()[1:]
- index := strings.LastIndexByte(str, '/')
-
- dst.Reset()
- dst.WriteByte('/')
- if index == -1 {
- first = true
- } else {
- dst.WriteString(str[:index])
- }
- } else {
- if !first {
- dst.WriteByte('/')
- }
- dst.WriteString(elem)
- first = false
- }
- }
-
- if elem == "." || elem == ".." {
- dst.WriteByte('/')
- }
-
- // We wrote an initial '/', but we don't want two.
- r := dst.String()
- if len(r) > 1 && r[1] == '/' {
- r = r[1:]
- }
- return r
-}
-
-// IsAbs reports whether the URL is absolute.
-// Absolute means that it has a non-empty scheme.
-func (u *URL) IsAbs() bool {
- return u.Scheme != ""
-}
-
-// Parse parses a URL in the context of the receiver. The provided URL
-// may be relative or absolute. Parse returns nil, err on parse
-// failure, otherwise its return value is the same as ResolveReference.
-func (u *URL) Parse(ref string) (*URL, error) {
- refURL, err := Parse(ref)
- if err != nil {
- return nil, err
- }
- return u.ResolveReference(refURL), nil
-}
-
-// ResolveReference resolves a URI reference to an absolute URI from
-// an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
-// may be relative or absolute. ResolveReference always returns a new
-// URL instance, even if the returned URL is identical to either the
-// base or reference. If ref is an absolute URL, then ResolveReference
-// ignores base and returns a copy of ref.
-func (u *URL) ResolveReference(ref *URL) *URL {
- url := *ref
- if ref.Scheme == "" {
- url.Scheme = u.Scheme
- }
- if ref.Scheme != "" || ref.Host != "" || ref.User != nil {
- // The "absoluteURI" or "net_path" cases.
- // We can ignore the error from setPath since we know we provided a
- // validly-escaped path.
- url.setPath(resolvePath(ref.EscapedPath(), ""))
- return &url
- }
- if ref.Opaque != "" {
- url.User = nil
- url.Host = ""
- url.Path = ""
- return &url
- }
- if ref.Path == "" && !ref.ForceQuery && ref.RawQuery == "" {
- url.RawQuery = u.RawQuery
- if ref.Fragment == "" {
- url.Fragment = u.Fragment
- url.RawFragment = u.RawFragment
- }
- }
- // The "abs_path" or "rel_path" cases.
- url.Host = u.Host
- url.User = u.User
- url.setPath(resolvePath(u.EscapedPath(), ref.EscapedPath()))
- return &url
-}
-
-// Query parses RawQuery and returns the corresponding values.
-// It silently discards malformed value pairs.
-// To check errors use ParseQuery.
-func (u *URL) Query() Values {
- v, _ := ParseQuery(u.RawQuery)
- return v
-}
-
-// RequestURI returns the encoded path?query or opaque?query
-// string that would be used in an HTTP request for u.
-func (u *URL) RequestURI() string {
- result := u.Opaque
- if result == "" {
- result = u.EscapedPath()
- if result == "" {
- result = "/"
- }
- } else {
- if strings.HasPrefix(result, "//") {
- result = u.Scheme + ":" + result
- }
- }
- if u.ForceQuery || u.RawQuery != "" {
- result += "?" + u.RawQuery
- }
- return result
-}
-
-// Hostname returns u.Host, stripping any valid port number if present.
-//
-// If the result is enclosed in square brackets, as literal IPv6 addresses are,
-// the square brackets are removed from the result.
-func (u *URL) Hostname() string {
- host, _ := splitHostPort(u.Host)
- return host
-}
-
-// Port returns the port part of u.Host, without the leading colon.
-//
-// If u.Host doesn't contain a valid numeric port, Port returns an empty string.
-func (u *URL) Port() string {
- _, port := splitHostPort(u.Host)
- return port
-}
-
-// splitHostPort separates host and port. If the port is not valid, it returns
-// the entire input as host, and it doesn't check the validity of the host.
-// Unlike net.SplitHostPort, but per RFC 3986, it requires ports to be numeric.
-func splitHostPort(hostPort string) (host, port string) {
- host = hostPort
-
- colon := strings.LastIndexByte(host, ':')
- if colon != -1 && validOptionalPort(host[colon:]) {
- host, port = host[:colon], host[colon+1:]
- }
-
- if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
- host = host[1 : len(host)-1]
- }
-
- return
-}
-
-// Marshaling interface implementations.
-// Would like to implement MarshalText/UnmarshalText but that will change the JSON representation of URLs.
-
-func (u *URL) MarshalBinary() (text []byte, err error) {
- return []byte(u.String()), nil
-}
-
-func (u *URL) UnmarshalBinary(text []byte) error {
- u1, err := Parse(string(text))
- if err != nil {
- return err
- }
- *u = *u1
- return nil
-}
-
-// validUserinfo reports whether s is a valid userinfo string per RFC 3986
-// Section 3.2.1:
-// userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
-// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
-// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
-// / "*" / "+" / "," / ";" / "="
-//
-// It doesn't validate pct-encoded. The caller does that via func unescape.
-func validUserinfo(s string) bool {
- for _, r := range s {
- if 'A' <= r && r <= 'Z' {
- continue
- }
- if 'a' <= r && r <= 'z' {
- continue
- }
- if '0' <= r && r <= '9' {
- continue
- }
- switch r {
- case '-', '.', '_', ':', '~', '!', '$', '&', '\'',
- '(', ')', '*', '+', ',', ';', '=', '%', '@':
- continue
- default:
- return false
- }
- }
- return true
-}
-
-// stringContainsCTLByte reports whether s contains any ASCII control character.
-func stringContainsCTLByte(s string) bool {
- for i := 0; i < len(s); i++ {
- b := s[i]
- if b < ' ' || b == 0x7f {
- return true
- }
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/os/endian_little.go b/contrib/go/_std_1.18/src/os/endian_little.go
deleted file mode 100644
index 10643a804e..0000000000
--- a/contrib/go/_std_1.18/src/os/endian_little.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm
-
-package os
-
-const isBigEndian = false
diff --git a/contrib/go/_std_1.18/src/os/error_posix.go b/contrib/go/_std_1.18/src/os/error_posix.go
deleted file mode 100644
index 234f4eb692..0000000000
--- a/contrib/go/_std_1.18/src/os/error_posix.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package os
-
-import "syscall"
-
-// wrapSyscallError takes an error and a syscall name. If the error is
-// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name.
-func wrapSyscallError(name string, err error) error {
- if _, ok := err.(syscall.Errno); ok {
- err = NewSyscallError(name, err)
- }
- return err
-}
diff --git a/contrib/go/_std_1.18/src/os/exec_posix.go b/contrib/go/_std_1.18/src/os/exec_posix.go
deleted file mode 100644
index d619984693..0000000000
--- a/contrib/go/_std_1.18/src/os/exec_posix.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package os
-
-import (
- "internal/itoa"
- "internal/syscall/execenv"
- "runtime"
- "syscall"
-)
-
-// The only signal values guaranteed to be present in the os package on all
-// systems are os.Interrupt (send the process an interrupt) and os.Kill (force
-// the process to exit). On Windows, sending os.Interrupt to a process with
-// os.Process.Signal is not implemented; it will return an error instead of
-// sending a signal.
-var (
- Interrupt Signal = syscall.SIGINT
- Kill Signal = syscall.SIGKILL
-)
-
-func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err error) {
- // If there is no SysProcAttr (ie. no Chroot or changed
- // UID/GID), double-check existence of the directory we want
- // to chdir into. We can make the error clearer this way.
- if attr != nil && attr.Sys == nil && attr.Dir != "" {
- if _, err := Stat(attr.Dir); err != nil {
- pe := err.(*PathError)
- pe.Op = "chdir"
- return nil, pe
- }
- }
-
- sysattr := &syscall.ProcAttr{
- Dir: attr.Dir,
- Env: attr.Env,
- Sys: attr.Sys,
- }
- if sysattr.Env == nil {
- sysattr.Env, err = execenv.Default(sysattr.Sys)
- if err != nil {
- return nil, err
- }
- }
- sysattr.Files = make([]uintptr, 0, len(attr.Files))
- for _, f := range attr.Files {
- sysattr.Files = append(sysattr.Files, f.Fd())
- }
-
- pid, h, e := syscall.StartProcess(name, argv, sysattr)
-
- // Make sure we don't run the finalizers of attr.Files.
- runtime.KeepAlive(attr)
-
- if e != nil {
- return nil, &PathError{Op: "fork/exec", Path: name, Err: e}
- }
-
- return newProcess(pid, h), nil
-}
-
-func (p *Process) kill() error {
- return p.Signal(Kill)
-}
-
-// ProcessState stores information about a process, as reported by Wait.
-type ProcessState struct {
- pid int // The process's id.
- status syscall.WaitStatus // System-dependent status info.
- rusage *syscall.Rusage
-}
-
-// Pid returns the process id of the exited process.
-func (p *ProcessState) Pid() int {
- return p.pid
-}
-
-func (p *ProcessState) exited() bool {
- return p.status.Exited()
-}
-
-func (p *ProcessState) success() bool {
- return p.status.ExitStatus() == 0
-}
-
-func (p *ProcessState) sys() any {
- return p.status
-}
-
-func (p *ProcessState) sysUsage() any {
- return p.rusage
-}
-
-func (p *ProcessState) String() string {
- if p == nil {
- return "<nil>"
- }
- status := p.Sys().(syscall.WaitStatus)
- res := ""
- switch {
- case status.Exited():
- code := status.ExitStatus()
- if runtime.GOOS == "windows" && uint(code) >= 1<<16 { // windows uses large hex numbers
- res = "exit status " + uitox(uint(code))
- } else { // unix systems use small decimal integers
- res = "exit status " + itoa.Itoa(code) // unix
- }
- case status.Signaled():
- res = "signal: " + status.Signal().String()
- case status.Stopped():
- res = "stop signal: " + status.StopSignal().String()
- if status.StopSignal() == syscall.SIGTRAP && status.TrapCause() != 0 {
- res += " (trap " + itoa.Itoa(status.TrapCause()) + ")"
- }
- case status.Continued():
- res = "continued"
- }
- if status.CoreDump() {
- res += " (core dumped)"
- }
- return res
-}
-
-// ExitCode returns the exit code of the exited process, or -1
-// if the process hasn't exited or was terminated by a signal.
-func (p *ProcessState) ExitCode() int {
- // return -1 if the process hasn't started.
- if p == nil {
- return -1
- }
- return p.status.ExitStatus()
-}
diff --git a/contrib/go/_std_1.18/src/os/exec_unix.go b/contrib/go/_std_1.18/src/os/exec_unix.go
deleted file mode 100644
index 250c5c6402..0000000000
--- a/contrib/go/_std_1.18/src/os/exec_unix.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package os
-
-import (
- "errors"
- "runtime"
- "syscall"
- "time"
-)
-
-func (p *Process) wait() (ps *ProcessState, err error) {
- if p.Pid == -1 {
- return nil, syscall.EINVAL
- }
-
- // If we can block until Wait4 will succeed immediately, do so.
- ready, err := p.blockUntilWaitable()
- if err != nil {
- return nil, err
- }
- if ready {
- // Mark the process done now, before the call to Wait4,
- // so that Process.signal will not send a signal.
- p.setDone()
- // Acquire a write lock on sigMu to wait for any
- // active call to the signal method to complete.
- p.sigMu.Lock()
- p.sigMu.Unlock()
- }
-
- var (
- status syscall.WaitStatus
- rusage syscall.Rusage
- pid1 int
- e error
- )
- for {
- pid1, e = syscall.Wait4(p.Pid, &status, 0, &rusage)
- if e != syscall.EINTR {
- break
- }
- }
- if e != nil {
- return nil, NewSyscallError("wait", e)
- }
- if pid1 != 0 {
- p.setDone()
- }
- ps = &ProcessState{
- pid: pid1,
- status: status,
- rusage: &rusage,
- }
- return ps, nil
-}
-
-func (p *Process) signal(sig Signal) error {
- if p.Pid == -1 {
- return errors.New("os: process already released")
- }
- if p.Pid == 0 {
- return errors.New("os: process not initialized")
- }
- p.sigMu.RLock()
- defer p.sigMu.RUnlock()
- if p.done() {
- return ErrProcessDone
- }
- s, ok := sig.(syscall.Signal)
- if !ok {
- return errors.New("os: unsupported signal type")
- }
- if e := syscall.Kill(p.Pid, s); e != nil {
- if e == syscall.ESRCH {
- return ErrProcessDone
- }
- return e
- }
- return nil
-}
-
-func (p *Process) release() error {
- // NOOP for unix.
- p.Pid = -1
- // no need for a finalizer anymore
- runtime.SetFinalizer(p, nil)
- return nil
-}
-
-func findProcess(pid int) (p *Process, err error) {
- // NOOP for unix.
- return newProcess(pid, 0), nil
-}
-
-func (p *ProcessState) userTime() time.Duration {
- return time.Duration(p.rusage.Utime.Nano()) * time.Nanosecond
-}
-
-func (p *ProcessState) systemTime() time.Duration {
- return time.Duration(p.rusage.Stime.Nano()) * time.Nanosecond
-}
diff --git a/contrib/go/_std_1.18/src/os/file.go b/contrib/go/_std_1.18/src/os/file.go
deleted file mode 100644
index 2823128554..0000000000
--- a/contrib/go/_std_1.18/src/os/file.go
+++ /dev/null
@@ -1,723 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package os provides a platform-independent interface to operating system
-// functionality. The design is Unix-like, although the error handling is
-// Go-like; failing calls return values of type error rather than error numbers.
-// Often, more information is available within the error. For example,
-// if a call that takes a file name fails, such as Open or Stat, the error
-// will include the failing file name when printed and will be of type
-// *PathError, which may be unpacked for more information.
-//
-// The os interface is intended to be uniform across all operating systems.
-// Features not generally available appear in the system-specific package syscall.
-//
-// Here is a simple example, opening a file and reading some of it.
-//
-// file, err := os.Open("file.go") // For read access.
-// if err != nil {
-// log.Fatal(err)
-// }
-//
-// If the open fails, the error string will be self-explanatory, like
-//
-// open file.go: no such file or directory
-//
-// The file's data can then be read into a slice of bytes. Read and
-// Write take their byte counts from the length of the argument slice.
-//
-// data := make([]byte, 100)
-// count, err := file.Read(data)
-// if err != nil {
-// log.Fatal(err)
-// }
-// fmt.Printf("read %d bytes: %q\n", count, data[:count])
-//
-// Note: The maximum number of concurrent operations on a File may be limited by
-// the OS or the system. The number should be high, but exceeding it may degrade
-// performance or cause other issues.
-//
-package os
-
-import (
- "errors"
- "internal/poll"
- "internal/testlog"
- "internal/unsafeheader"
- "io"
- "io/fs"
- "runtime"
- "syscall"
- "time"
- "unsafe"
-)
-
-// Name returns the name of the file as presented to Open.
-func (f *File) Name() string { return f.name }
-
-// Stdin, Stdout, and Stderr are open Files pointing to the standard input,
-// standard output, and standard error file descriptors.
-//
-// Note that the Go runtime writes to standard error for panics and crashes;
-// closing Stderr may cause those messages to go elsewhere, perhaps
-// to a file opened later.
-var (
- Stdin = NewFile(uintptr(syscall.Stdin), "/dev/stdin")
- Stdout = NewFile(uintptr(syscall.Stdout), "/dev/stdout")
- Stderr = NewFile(uintptr(syscall.Stderr), "/dev/stderr")
-)
-
-// Flags to OpenFile wrapping those of the underlying system. Not all
-// flags may be implemented on a given system.
-const (
- // Exactly one of O_RDONLY, O_WRONLY, or O_RDWR must be specified.
- O_RDONLY int = syscall.O_RDONLY // open the file read-only.
- O_WRONLY int = syscall.O_WRONLY // open the file write-only.
- O_RDWR int = syscall.O_RDWR // open the file read-write.
- // The remaining values may be or'ed in to control behavior.
- O_APPEND int = syscall.O_APPEND // append data to the file when writing.
- O_CREATE int = syscall.O_CREAT // create a new file if none exists.
- O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist.
- O_SYNC int = syscall.O_SYNC // open for synchronous I/O.
- O_TRUNC int = syscall.O_TRUNC // truncate regular writable file when opened.
-)
-
-// Seek whence values.
-//
-// Deprecated: Use io.SeekStart, io.SeekCurrent, and io.SeekEnd.
-const (
- SEEK_SET int = 0 // seek relative to the origin of the file
- SEEK_CUR int = 1 // seek relative to the current offset
- SEEK_END int = 2 // seek relative to the end
-)
-
-// LinkError records an error during a link or symlink or rename
-// system call and the paths that caused it.
-type LinkError struct {
- Op string
- Old string
- New string
- Err error
-}
-
-func (e *LinkError) Error() string {
- return e.Op + " " + e.Old + " " + e.New + ": " + e.Err.Error()
-}
-
-func (e *LinkError) Unwrap() error {
- return e.Err
-}
-
-// Read reads up to len(b) bytes from the File and stores them in b.
-// It returns the number of bytes read and any error encountered.
-// At end of file, Read returns 0, io.EOF.
-func (f *File) Read(b []byte) (n int, err error) {
- if err := f.checkValid("read"); err != nil {
- return 0, err
- }
- n, e := f.read(b)
- return n, f.wrapErr("read", e)
-}
-
-// ReadAt reads len(b) bytes from the File starting at byte offset off.
-// It returns the number of bytes read and the error, if any.
-// ReadAt always returns a non-nil error when n < len(b).
-// At end of file, that error is io.EOF.
-func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
- if err := f.checkValid("read"); err != nil {
- return 0, err
- }
-
- if off < 0 {
- return 0, &PathError{Op: "readat", Path: f.name, Err: errors.New("negative offset")}
- }
-
- for len(b) > 0 {
- m, e := f.pread(b, off)
- if e != nil {
- err = f.wrapErr("read", e)
- break
- }
- n += m
- b = b[m:]
- off += int64(m)
- }
- return
-}
-
-// ReadFrom implements io.ReaderFrom.
-func (f *File) ReadFrom(r io.Reader) (n int64, err error) {
- if err := f.checkValid("write"); err != nil {
- return 0, err
- }
- n, handled, e := f.readFrom(r)
- if !handled {
- return genericReadFrom(f, r) // without wrapping
- }
- return n, f.wrapErr("write", e)
-}
-
-func genericReadFrom(f *File, r io.Reader) (int64, error) {
- return io.Copy(onlyWriter{f}, r)
-}
-
-type onlyWriter struct {
- io.Writer
-}
-
-// Write writes len(b) bytes from b to the File.
-// It returns the number of bytes written and an error, if any.
-// Write returns a non-nil error when n != len(b).
-func (f *File) Write(b []byte) (n int, err error) {
- if err := f.checkValid("write"); err != nil {
- return 0, err
- }
- n, e := f.write(b)
- if n < 0 {
- n = 0
- }
- if n != len(b) {
- err = io.ErrShortWrite
- }
-
- epipecheck(f, e)
-
- if e != nil {
- err = f.wrapErr("write", e)
- }
-
- return n, err
-}
-
-var errWriteAtInAppendMode = errors.New("os: invalid use of WriteAt on file opened with O_APPEND")
-
-// WriteAt writes len(b) bytes to the File starting at byte offset off.
-// It returns the number of bytes written and an error, if any.
-// WriteAt returns a non-nil error when n != len(b).
-//
-// If file was opened with the O_APPEND flag, WriteAt returns an error.
-func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
- if err := f.checkValid("write"); err != nil {
- return 0, err
- }
- if f.appendMode {
- return 0, errWriteAtInAppendMode
- }
-
- if off < 0 {
- return 0, &PathError{Op: "writeat", Path: f.name, Err: errors.New("negative offset")}
- }
-
- for len(b) > 0 {
- m, e := f.pwrite(b, off)
- if e != nil {
- err = f.wrapErr("write", e)
- break
- }
- n += m
- b = b[m:]
- off += int64(m)
- }
- return
-}
-
-// Seek sets the offset for the next Read or Write on file to offset, interpreted
-// according to whence: 0 means relative to the origin of the file, 1 means
-// relative to the current offset, and 2 means relative to the end.
-// It returns the new offset and an error, if any.
-// The behavior of Seek on a file opened with O_APPEND is not specified.
-//
-// If f is a directory, the behavior of Seek varies by operating
-// system; you can seek to the beginning of the directory on Unix-like
-// operating systems, but not on Windows.
-func (f *File) Seek(offset int64, whence int) (ret int64, err error) {
- if err := f.checkValid("seek"); err != nil {
- return 0, err
- }
- r, e := f.seek(offset, whence)
- if e == nil && f.dirinfo != nil && r != 0 {
- e = syscall.EISDIR
- }
- if e != nil {
- return 0, f.wrapErr("seek", e)
- }
- return r, nil
-}
-
-// WriteString is like Write, but writes the contents of string s rather than
-// a slice of bytes.
-func (f *File) WriteString(s string) (n int, err error) {
- var b []byte
- hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
- hdr.Data = (*unsafeheader.String)(unsafe.Pointer(&s)).Data
- hdr.Cap = len(s)
- hdr.Len = len(s)
- return f.Write(b)
-}
-
-// Mkdir creates a new directory with the specified name and permission
-// bits (before umask).
-// If there is an error, it will be of type *PathError.
-func Mkdir(name string, perm FileMode) error {
- if runtime.GOOS == "windows" && isWindowsNulName(name) {
- return &PathError{Op: "mkdir", Path: name, Err: syscall.ENOTDIR}
- }
- longName := fixLongPath(name)
- e := ignoringEINTR(func() error {
- return syscall.Mkdir(longName, syscallMode(perm))
- })
-
- if e != nil {
- return &PathError{Op: "mkdir", Path: name, Err: e}
- }
-
- // mkdir(2) itself won't handle the sticky bit on *BSD and Solaris
- if !supportsCreateWithStickyBit && perm&ModeSticky != 0 {
- e = setStickyBit(name)
-
- if e != nil {
- Remove(name)
- return e
- }
- }
-
- return nil
-}
-
-// setStickyBit adds ModeSticky to the permission bits of path, non atomic.
-func setStickyBit(name string) error {
- fi, err := Stat(name)
- if err != nil {
- return err
- }
- return Chmod(name, fi.Mode()|ModeSticky)
-}
-
-// Chdir changes the current working directory to the named directory.
-// If there is an error, it will be of type *PathError.
-func Chdir(dir string) error {
- if e := syscall.Chdir(dir); e != nil {
- testlog.Open(dir) // observe likely non-existent directory
- return &PathError{Op: "chdir", Path: dir, Err: e}
- }
- if log := testlog.Logger(); log != nil {
- wd, err := Getwd()
- if err == nil {
- log.Chdir(wd)
- }
- }
- return nil
-}
-
-// Open opens the named file for reading. If successful, methods on
-// the returned file can be used for reading; the associated file
-// descriptor has mode O_RDONLY.
-// If there is an error, it will be of type *PathError.
-func Open(name string) (*File, error) {
- return OpenFile(name, O_RDONLY, 0)
-}
-
-// Create creates or truncates the named file. If the file already exists,
-// it is truncated. If the file does not exist, it is created with mode 0666
-// (before umask). If successful, methods on the returned File can
-// be used for I/O; the associated file descriptor has mode O_RDWR.
-// If there is an error, it will be of type *PathError.
-func Create(name string) (*File, error) {
- return OpenFile(name, O_RDWR|O_CREATE|O_TRUNC, 0666)
-}
-
-// OpenFile is the generalized open call; most users will use Open
-// or Create instead. It opens the named file with specified flag
-// (O_RDONLY etc.). If the file does not exist, and the O_CREATE flag
-// is passed, it is created with mode perm (before umask). If successful,
-// methods on the returned File can be used for I/O.
-// If there is an error, it will be of type *PathError.
-func OpenFile(name string, flag int, perm FileMode) (*File, error) {
- testlog.Open(name)
- f, err := openFileNolog(name, flag, perm)
- if err != nil {
- return nil, err
- }
- f.appendMode = flag&O_APPEND != 0
-
- return f, nil
-}
-
-// lstat is overridden in tests.
-var lstat = Lstat
-
-// Rename renames (moves) oldpath to newpath.
-// If newpath already exists and is not a directory, Rename replaces it.
-// OS-specific restrictions may apply when oldpath and newpath are in different directories.
-// If there is an error, it will be of type *LinkError.
-func Rename(oldpath, newpath string) error {
- return rename(oldpath, newpath)
-}
-
-// Many functions in package syscall return a count of -1 instead of 0.
-// Using fixCount(call()) instead of call() corrects the count.
-func fixCount(n int, err error) (int, error) {
- if n < 0 {
- n = 0
- }
- return n, err
-}
-
-// wrapErr wraps an error that occurred during an operation on an open file.
-// It passes io.EOF through unchanged, otherwise converts
-// poll.ErrFileClosing to ErrClosed and wraps the error in a PathError.
-func (f *File) wrapErr(op string, err error) error {
- if err == nil || err == io.EOF {
- return err
- }
- if err == poll.ErrFileClosing {
- err = ErrClosed
- }
- return &PathError{Op: op, Path: f.name, Err: err}
-}
-
-// TempDir returns the default directory to use for temporary files.
-//
-// On Unix systems, it returns $TMPDIR if non-empty, else /tmp.
-// On Windows, it uses GetTempPath, returning the first non-empty
-// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
-// On Plan 9, it returns /tmp.
-//
-// The directory is neither guaranteed to exist nor have accessible
-// permissions.
-func TempDir() string {
- return tempDir()
-}
-
-// UserCacheDir returns the default root directory to use for user-specific
-// cached data. Users should create their own application-specific subdirectory
-// within this one and use that.
-//
-// On Unix systems, it returns $XDG_CACHE_HOME as specified by
-// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if
-// non-empty, else $HOME/.cache.
-// On Darwin, it returns $HOME/Library/Caches.
-// On Windows, it returns %LocalAppData%.
-// On Plan 9, it returns $home/lib/cache.
-//
-// If the location cannot be determined (for example, $HOME is not defined),
-// then it will return an error.
-func UserCacheDir() (string, error) {
- var dir string
-
- switch runtime.GOOS {
- case "windows":
- dir = Getenv("LocalAppData")
- if dir == "" {
- return "", errors.New("%LocalAppData% is not defined")
- }
-
- case "darwin", "ios":
- dir = Getenv("HOME")
- if dir == "" {
- return "", errors.New("$HOME is not defined")
- }
- dir += "/Library/Caches"
-
- case "plan9":
- dir = Getenv("home")
- if dir == "" {
- return "", errors.New("$home is not defined")
- }
- dir += "/lib/cache"
-
- default: // Unix
- dir = Getenv("XDG_CACHE_HOME")
- if dir == "" {
- dir = Getenv("HOME")
- if dir == "" {
- return "", errors.New("neither $XDG_CACHE_HOME nor $HOME are defined")
- }
- dir += "/.cache"
- }
- }
-
- return dir, nil
-}
-
-// UserConfigDir returns the default root directory to use for user-specific
-// configuration data. Users should create their own application-specific
-// subdirectory within this one and use that.
-//
-// On Unix systems, it returns $XDG_CONFIG_HOME as specified by
-// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if
-// non-empty, else $HOME/.config.
-// On Darwin, it returns $HOME/Library/Application Support.
-// On Windows, it returns %AppData%.
-// On Plan 9, it returns $home/lib.
-//
-// If the location cannot be determined (for example, $HOME is not defined),
-// then it will return an error.
-func UserConfigDir() (string, error) {
- var dir string
-
- switch runtime.GOOS {
- case "windows":
- dir = Getenv("AppData")
- if dir == "" {
- return "", errors.New("%AppData% is not defined")
- }
-
- case "darwin", "ios":
- dir = Getenv("HOME")
- if dir == "" {
- return "", errors.New("$HOME is not defined")
- }
- dir += "/Library/Application Support"
-
- case "plan9":
- dir = Getenv("home")
- if dir == "" {
- return "", errors.New("$home is not defined")
- }
- dir += "/lib"
-
- default: // Unix
- dir = Getenv("XDG_CONFIG_HOME")
- if dir == "" {
- dir = Getenv("HOME")
- if dir == "" {
- return "", errors.New("neither $XDG_CONFIG_HOME nor $HOME are defined")
- }
- dir += "/.config"
- }
- }
-
- return dir, nil
-}
-
-// UserHomeDir returns the current user's home directory.
-//
-// On Unix, including macOS, it returns the $HOME environment variable.
-// On Windows, it returns %USERPROFILE%.
-// On Plan 9, it returns the $home environment variable.
-func UserHomeDir() (string, error) {
- env, enverr := "HOME", "$HOME"
- switch runtime.GOOS {
- case "windows":
- env, enverr = "USERPROFILE", "%userprofile%"
- case "plan9":
- env, enverr = "home", "$home"
- }
- if v := Getenv(env); v != "" {
- return v, nil
- }
- // On some geese the home directory is not always defined.
- switch runtime.GOOS {
- case "android":
- return "/sdcard", nil
- case "ios":
- return "/", nil
- }
- return "", errors.New(enverr + " is not defined")
-}
-
-// Chmod changes the mode of the named file to mode.
-// If the file is a symbolic link, it changes the mode of the link's target.
-// If there is an error, it will be of type *PathError.
-//
-// A different subset of the mode bits are used, depending on the
-// operating system.
-//
-// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
-// ModeSticky are used.
-//
-// On Windows, only the 0200 bit (owner writable) of mode is used; it
-// controls whether the file's read-only attribute is set or cleared.
-// The other bits are currently unused. For compatibility with Go 1.12
-// and earlier, use a non-zero mode. Use mode 0400 for a read-only
-// file and 0600 for a readable+writable file.
-//
-// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
-// and ModeTemporary are used.
-func Chmod(name string, mode FileMode) error { return chmod(name, mode) }
-
-// Chmod changes the mode of the file to mode.
-// If there is an error, it will be of type *PathError.
-func (f *File) Chmod(mode FileMode) error { return f.chmod(mode) }
-
-// SetDeadline sets the read and write deadlines for a File.
-// It is equivalent to calling both SetReadDeadline and SetWriteDeadline.
-//
-// Only some kinds of files support setting a deadline. Calls to SetDeadline
-// for files that do not support deadlines will return ErrNoDeadline.
-// On most systems ordinary files do not support deadlines, but pipes do.
-//
-// A deadline is an absolute time after which I/O operations fail with an
-// error instead of blocking. The deadline applies to all future and pending
-// I/O, not just the immediately following call to Read or Write.
-// After a deadline has been exceeded, the connection can be refreshed
-// by setting a deadline in the future.
-//
-// If the deadline is exceeded a call to Read or Write or to other I/O
-// methods will return an error that wraps ErrDeadlineExceeded.
-// This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
-// That error implements the Timeout method, and calling the Timeout
-// method will return true, but there are other possible errors for which
-// the Timeout will return true even if the deadline has not been exceeded.
-//
-// An idle timeout can be implemented by repeatedly extending
-// the deadline after successful Read or Write calls.
-//
-// A zero value for t means I/O operations will not time out.
-func (f *File) SetDeadline(t time.Time) error {
- return f.setDeadline(t)
-}
-
-// SetReadDeadline sets the deadline for future Read calls and any
-// currently-blocked Read call.
-// A zero value for t means Read will not time out.
-// Not all files support setting deadlines; see SetDeadline.
-func (f *File) SetReadDeadline(t time.Time) error {
- return f.setReadDeadline(t)
-}
-
-// SetWriteDeadline sets the deadline for any future Write calls and any
-// currently-blocked Write call.
-// Even if Write times out, it may return n > 0, indicating that
-// some of the data was successfully written.
-// A zero value for t means Write will not time out.
-// Not all files support setting deadlines; see SetDeadline.
-func (f *File) SetWriteDeadline(t time.Time) error {
- return f.setWriteDeadline(t)
-}
-
-// SyscallConn returns a raw file.
-// This implements the syscall.Conn interface.
-func (f *File) SyscallConn() (syscall.RawConn, error) {
- if err := f.checkValid("SyscallConn"); err != nil {
- return nil, err
- }
- return newRawConn(f)
-}
-
-// isWindowsNulName reports whether name is os.DevNull ('NUL') on Windows.
-// True is returned if name is 'NUL' whatever the case.
-func isWindowsNulName(name string) bool {
- if len(name) != 3 {
- return false
- }
- if name[0] != 'n' && name[0] != 'N' {
- return false
- }
- if name[1] != 'u' && name[1] != 'U' {
- return false
- }
- if name[2] != 'l' && name[2] != 'L' {
- return false
- }
- return true
-}
-
-// DirFS returns a file system (an fs.FS) for the tree of files rooted at the directory dir.
-//
-// Note that DirFS("/prefix") only guarantees that the Open calls it makes to the
-// operating system will begin with "/prefix": DirFS("/prefix").Open("file") is the
-// same as os.Open("/prefix/file"). So if /prefix/file is a symbolic link pointing outside
-// the /prefix tree, then using DirFS does not stop the access any more than using
-// os.Open does. DirFS is therefore not a general substitute for a chroot-style security
-// mechanism when the directory tree contains arbitrary content.
-func DirFS(dir string) fs.FS {
- return dirFS(dir)
-}
-
-func containsAny(s, chars string) bool {
- for i := 0; i < len(s); i++ {
- for j := 0; j < len(chars); j++ {
- if s[i] == chars[j] {
- return true
- }
- }
- }
- return false
-}
-
-type dirFS string
-
-func (dir dirFS) Open(name string) (fs.File, error) {
- if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
- return nil, &PathError{Op: "open", Path: name, Err: ErrInvalid}
- }
- f, err := Open(string(dir) + "/" + name)
- if err != nil {
- return nil, err // nil fs.File
- }
- return f, nil
-}
-
-func (dir dirFS) Stat(name string) (fs.FileInfo, error) {
- if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
- return nil, &PathError{Op: "stat", Path: name, Err: ErrInvalid}
- }
- f, err := Stat(string(dir) + "/" + name)
- if err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ReadFile reads the named file and returns the contents.
-// A successful call returns err == nil, not err == EOF.
-// Because ReadFile reads the whole file, it does not treat an EOF from Read
-// as an error to be reported.
-func ReadFile(name string) ([]byte, error) {
- f, err := Open(name)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var size int
- if info, err := f.Stat(); err == nil {
- size64 := info.Size()
- if int64(int(size64)) == size64 {
- size = int(size64)
- }
- }
- size++ // one byte for final read at EOF
-
- // If a file claims a small size, read at least 512 bytes.
- // In particular, files in Linux's /proc claim size 0 but
- // then do not work right if read in small pieces,
- // so an initial read of 1 byte would not work correctly.
- if size < 512 {
- size = 512
- }
-
- data := make([]byte, 0, size)
- for {
- if len(data) >= cap(data) {
- d := append(data[:cap(data)], 0)
- data = d[:len(data)]
- }
- n, err := f.Read(data[len(data):cap(data)])
- data = data[:len(data)+n]
- if err != nil {
- if err == io.EOF {
- err = nil
- }
- return data, err
- }
- }
-}
-
-// WriteFile writes data to the named file, creating it if necessary.
-// If the file does not exist, WriteFile creates it with permissions perm (before umask);
-// otherwise WriteFile truncates it before writing, without changing permissions.
-func WriteFile(name string, data []byte, perm FileMode) error {
- f, err := OpenFile(name, O_WRONLY|O_CREATE|O_TRUNC, perm)
- if err != nil {
- return err
- }
- _, err = f.Write(data)
- if err1 := f.Close(); err1 != nil && err == nil {
- err = err1
- }
- return err
-}
diff --git a/contrib/go/_std_1.18/src/os/file_posix.go b/contrib/go/_std_1.18/src/os/file_posix.go
deleted file mode 100644
index f34571d68d..0000000000
--- a/contrib/go/_std_1.18/src/os/file_posix.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package os
-
-import (
- "runtime"
- "syscall"
- "time"
-)
-
-func sigpipe() // implemented in package runtime
-
-// Close closes the File, rendering it unusable for I/O.
-// On files that support SetDeadline, any pending I/O operations will
-// be canceled and return immediately with an ErrClosed error.
-// Close will return an error if it has already been called.
-func (f *File) Close() error {
- if f == nil {
- return ErrInvalid
- }
- return f.file.close()
-}
-
-// read reads up to len(b) bytes from the File.
-// It returns the number of bytes read and an error, if any.
-func (f *File) read(b []byte) (n int, err error) {
- n, err = f.pfd.Read(b)
- runtime.KeepAlive(f)
- return n, err
-}
-
-// pread reads len(b) bytes from the File starting at byte offset off.
-// It returns the number of bytes read and the error, if any.
-// EOF is signaled by a zero count with err set to nil.
-func (f *File) pread(b []byte, off int64) (n int, err error) {
- n, err = f.pfd.Pread(b, off)
- runtime.KeepAlive(f)
- return n, err
-}
-
-// write writes len(b) bytes to the File.
-// It returns the number of bytes written and an error, if any.
-func (f *File) write(b []byte) (n int, err error) {
- n, err = f.pfd.Write(b)
- runtime.KeepAlive(f)
- return n, err
-}
-
-// pwrite writes len(b) bytes to the File starting at byte offset off.
-// It returns the number of bytes written and an error, if any.
-func (f *File) pwrite(b []byte, off int64) (n int, err error) {
- n, err = f.pfd.Pwrite(b, off)
- runtime.KeepAlive(f)
- return n, err
-}
-
-// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
-func syscallMode(i FileMode) (o uint32) {
- o |= uint32(i.Perm())
- if i&ModeSetuid != 0 {
- o |= syscall.S_ISUID
- }
- if i&ModeSetgid != 0 {
- o |= syscall.S_ISGID
- }
- if i&ModeSticky != 0 {
- o |= syscall.S_ISVTX
- }
- // No mapping for Go's ModeTemporary (plan9 only).
- return
-}
-
-// See docs in file.go:Chmod.
-func chmod(name string, mode FileMode) error {
- longName := fixLongPath(name)
- e := ignoringEINTR(func() error {
- return syscall.Chmod(longName, syscallMode(mode))
- })
- if e != nil {
- return &PathError{Op: "chmod", Path: name, Err: e}
- }
- return nil
-}
-
-// See docs in file.go:(*File).Chmod.
-func (f *File) chmod(mode FileMode) error {
- if err := f.checkValid("chmod"); err != nil {
- return err
- }
- if e := f.pfd.Fchmod(syscallMode(mode)); e != nil {
- return f.wrapErr("chmod", e)
- }
- return nil
-}
-
-// Chown changes the numeric uid and gid of the named file.
-// If the file is a symbolic link, it changes the uid and gid of the link's target.
-// A uid or gid of -1 means to not change that value.
-// If there is an error, it will be of type *PathError.
-//
-// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
-// EPLAN9 error, wrapped in *PathError.
-func Chown(name string, uid, gid int) error {
- e := ignoringEINTR(func() error {
- return syscall.Chown(name, uid, gid)
- })
- if e != nil {
- return &PathError{Op: "chown", Path: name, Err: e}
- }
- return nil
-}
-
-// Lchown changes the numeric uid and gid of the named file.
-// If the file is a symbolic link, it changes the uid and gid of the link itself.
-// If there is an error, it will be of type *PathError.
-//
-// On Windows, it always returns the syscall.EWINDOWS error, wrapped
-// in *PathError.
-func Lchown(name string, uid, gid int) error {
- e := ignoringEINTR(func() error {
- return syscall.Lchown(name, uid, gid)
- })
- if e != nil {
- return &PathError{Op: "lchown", Path: name, Err: e}
- }
- return nil
-}
-
-// Chown changes the numeric uid and gid of the named file.
-// If there is an error, it will be of type *PathError.
-//
-// On Windows, it always returns the syscall.EWINDOWS error, wrapped
-// in *PathError.
-func (f *File) Chown(uid, gid int) error {
- if err := f.checkValid("chown"); err != nil {
- return err
- }
- if e := f.pfd.Fchown(uid, gid); e != nil {
- return f.wrapErr("chown", e)
- }
- return nil
-}
-
-// Truncate changes the size of the file.
-// It does not change the I/O offset.
-// If there is an error, it will be of type *PathError.
-func (f *File) Truncate(size int64) error {
- if err := f.checkValid("truncate"); err != nil {
- return err
- }
- if e := f.pfd.Ftruncate(size); e != nil {
- return f.wrapErr("truncate", e)
- }
- return nil
-}
-
-// Sync commits the current contents of the file to stable storage.
-// Typically, this means flushing the file system's in-memory copy
-// of recently written data to disk.
-func (f *File) Sync() error {
- if err := f.checkValid("sync"); err != nil {
- return err
- }
- if e := f.pfd.Fsync(); e != nil {
- return f.wrapErr("sync", e)
- }
- return nil
-}
-
-// Chtimes changes the access and modification times of the named
-// file, similar to the Unix utime() or utimes() functions.
-//
-// The underlying filesystem may truncate or round the values to a
-// less precise time unit.
-// If there is an error, it will be of type *PathError.
-func Chtimes(name string, atime time.Time, mtime time.Time) error {
- var utimes [2]syscall.Timespec
- utimes[0] = syscall.NsecToTimespec(atime.UnixNano())
- utimes[1] = syscall.NsecToTimespec(mtime.UnixNano())
- if e := syscall.UtimesNano(fixLongPath(name), utimes[0:]); e != nil {
- return &PathError{Op: "chtimes", Path: name, Err: e}
- }
- return nil
-}
-
-// Chdir changes the current working directory to the file,
-// which must be a directory.
-// If there is an error, it will be of type *PathError.
-func (f *File) Chdir() error {
- if err := f.checkValid("chdir"); err != nil {
- return err
- }
- if e := f.pfd.Fchdir(); e != nil {
- return f.wrapErr("chdir", e)
- }
- return nil
-}
-
-// setDeadline sets the read and write deadline.
-func (f *File) setDeadline(t time.Time) error {
- if err := f.checkValid("SetDeadline"); err != nil {
- return err
- }
- return f.pfd.SetDeadline(t)
-}
-
-// setReadDeadline sets the read deadline.
-func (f *File) setReadDeadline(t time.Time) error {
- if err := f.checkValid("SetReadDeadline"); err != nil {
- return err
- }
- return f.pfd.SetReadDeadline(t)
-}
-
-// setWriteDeadline sets the write deadline.
-func (f *File) setWriteDeadline(t time.Time) error {
- if err := f.checkValid("SetWriteDeadline"); err != nil {
- return err
- }
- return f.pfd.SetWriteDeadline(t)
-}
-
-// checkValid checks whether f is valid for use.
-// If not, it returns an appropriate error, perhaps incorporating the operation name op.
-func (f *File) checkValid(op string) error {
- if f == nil {
- return ErrInvalid
- }
- return nil
-}
-
-// ignoringEINTR makes a function call and repeats it if it returns an
-// EINTR error. This appears to be required even though we install all
-// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
-// Also #20400 and #36644 are issues in which a signal handler is
-// installed without setting SA_RESTART. None of these are the common case,
-// but there are enough of them that it seems that we can't avoid
-// an EINTR loop.
-func ignoringEINTR(fn func() error) error {
- for {
- err := fn()
- if err != syscall.EINTR {
- return err
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/os/file_unix.go b/contrib/go/_std_1.18/src/os/file_unix.go
deleted file mode 100644
index a38db18954..0000000000
--- a/contrib/go/_std_1.18/src/os/file_unix.go
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package os
-
-import (
- "internal/poll"
- "internal/syscall/unix"
- "runtime"
- "syscall"
-)
-
-// fixLongPath is a noop on non-Windows platforms.
-func fixLongPath(path string) string {
- return path
-}
-
-func rename(oldname, newname string) error {
- fi, err := Lstat(newname)
- if err == nil && fi.IsDir() {
- // There are two independent errors this function can return:
- // one for a bad oldname, and one for a bad newname.
- // At this point we've determined the newname is bad.
- // But just in case oldname is also bad, prioritize returning
- // the oldname error because that's what we did historically.
- // However, if the old name and new name are not the same, yet
- // they refer to the same file, it implies a case-only
- // rename on a case-insensitive filesystem, which is ok.
- if ofi, err := Lstat(oldname); err != nil {
- if pe, ok := err.(*PathError); ok {
- err = pe.Err
- }
- return &LinkError{"rename", oldname, newname, err}
- } else if newname == oldname || !SameFile(fi, ofi) {
- return &LinkError{"rename", oldname, newname, syscall.EEXIST}
- }
- }
- err = ignoringEINTR(func() error {
- return syscall.Rename(oldname, newname)
- })
- if err != nil {
- return &LinkError{"rename", oldname, newname, err}
- }
- return nil
-}
-
-// file is the real representation of *File.
-// The extra level of indirection ensures that no clients of os
-// can overwrite this data, which could cause the finalizer
-// to close the wrong file descriptor.
-type file struct {
- pfd poll.FD
- name string
- dirinfo *dirInfo // nil unless directory being read
- nonblock bool // whether we set nonblocking mode
- stdoutOrErr bool // whether this is stdout or stderr
- appendMode bool // whether file is opened for appending
-}
-
-// Fd returns the integer Unix file descriptor referencing the open file.
-// If f is closed, the file descriptor becomes invalid.
-// If f is garbage collected, a finalizer may close the file descriptor,
-// making it invalid; see runtime.SetFinalizer for more information on when
-// a finalizer might be run. On Unix systems this will cause the SetDeadline
-// methods to stop working.
-// Because file descriptors can be reused, the returned file descriptor may
-// only be closed through the Close method of f, or by its finalizer during
-// garbage collection. Otherwise, during garbage collection the finalizer
-// may close an unrelated file descriptor with the same (reused) number.
-//
-// As an alternative, see the f.SyscallConn method.
-func (f *File) Fd() uintptr {
- if f == nil {
- return ^(uintptr(0))
- }
-
- // If we put the file descriptor into nonblocking mode,
- // then set it to blocking mode before we return it,
- // because historically we have always returned a descriptor
- // opened in blocking mode. The File will continue to work,
- // but any blocking operation will tie up a thread.
- if f.nonblock {
- f.pfd.SetBlocking()
- }
-
- return uintptr(f.pfd.Sysfd)
-}
-
-// NewFile returns a new File with the given file descriptor and
-// name. The returned value will be nil if fd is not a valid file
-// descriptor. On Unix systems, if the file descriptor is in
-// non-blocking mode, NewFile will attempt to return a pollable File
-// (one for which the SetDeadline methods work).
-//
-// After passing it to NewFile, fd may become invalid under the same
-// conditions described in the comments of the Fd method, and the same
-// constraints apply.
-func NewFile(fd uintptr, name string) *File {
- kind := kindNewFile
- if nb, err := unix.IsNonblock(int(fd)); err == nil && nb {
- kind = kindNonBlock
- }
- return newFile(fd, name, kind)
-}
-
-// newFileKind describes the kind of file to newFile.
-type newFileKind int
-
-const (
- kindNewFile newFileKind = iota
- kindOpenFile
- kindPipe
- kindNonBlock
-)
-
-// newFile is like NewFile, but if called from OpenFile or Pipe
-// (as passed in the kind parameter) it tries to add the file to
-// the runtime poller.
-func newFile(fd uintptr, name string, kind newFileKind) *File {
- fdi := int(fd)
- if fdi < 0 {
- return nil
- }
- f := &File{&file{
- pfd: poll.FD{
- Sysfd: fdi,
- IsStream: true,
- ZeroReadIsEOF: true,
- },
- name: name,
- stdoutOrErr: fdi == 1 || fdi == 2,
- }}
-
- pollable := kind == kindOpenFile || kind == kindPipe || kind == kindNonBlock
-
- // If the caller passed a non-blocking filedes (kindNonBlock),
- // we assume they know what they are doing so we allow it to be
- // used with kqueue.
- if kind == kindOpenFile {
- switch runtime.GOOS {
- case "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd":
- var st syscall.Stat_t
- err := ignoringEINTR(func() error {
- return syscall.Fstat(fdi, &st)
- })
- typ := st.Mode & syscall.S_IFMT
- // Don't try to use kqueue with regular files on *BSDs.
- // On FreeBSD a regular file is always
- // reported as ready for writing.
- // On Dragonfly, NetBSD and OpenBSD the fd is signaled
- // only once as ready (both read and write).
- // Issue 19093.
- // Also don't add directories to the netpoller.
- if err == nil && (typ == syscall.S_IFREG || typ == syscall.S_IFDIR) {
- pollable = false
- }
-
- // In addition to the behavior described above for regular files,
- // on Darwin, kqueue does not work properly with fifos:
- // closing the last writer does not cause a kqueue event
- // for any readers. See issue #24164.
- if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && typ == syscall.S_IFIFO {
- pollable = false
- }
- }
- }
-
- if err := f.pfd.Init("file", pollable); err != nil {
- // An error here indicates a failure to register
- // with the netpoll system. That can happen for
- // a file descriptor that is not supported by
- // epoll/kqueue; for example, disk files on
- // Linux systems. We assume that any real error
- // will show up in later I/O.
- } else if pollable {
- // We successfully registered with netpoll, so put
- // the file into nonblocking mode.
- if err := syscall.SetNonblock(fdi, true); err == nil {
- f.nonblock = true
- }
- }
-
- runtime.SetFinalizer(f.file, (*file).close)
- return f
-}
-
-// epipecheck raises SIGPIPE if we get an EPIPE error on standard
-// output or standard error. See the SIGPIPE docs in os/signal, and
-// issue 11845.
-func epipecheck(file *File, e error) {
- if e == syscall.EPIPE && file.stdoutOrErr {
- sigpipe()
- }
-}
-
-// DevNull is the name of the operating system's ``null device.''
-// On Unix-like systems, it is "/dev/null"; on Windows, "NUL".
-const DevNull = "/dev/null"
-
-// openFileNolog is the Unix implementation of OpenFile.
-// Changes here should be reflected in openFdAt, if relevant.
-func openFileNolog(name string, flag int, perm FileMode) (*File, error) {
- setSticky := false
- if !supportsCreateWithStickyBit && flag&O_CREATE != 0 && perm&ModeSticky != 0 {
- if _, err := Stat(name); IsNotExist(err) {
- setSticky = true
- }
- }
-
- var r int
- for {
- var e error
- r, e = syscall.Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
- if e == nil {
- break
- }
-
- // We have to check EINTR here, per issues 11180 and 39237.
- if e == syscall.EINTR {
- continue
- }
-
- return nil, &PathError{Op: "open", Path: name, Err: e}
- }
-
- // open(2) itself won't handle the sticky bit on *BSD and Solaris
- if setSticky {
- setStickyBit(name)
- }
-
- // There's a race here with fork/exec, which we are
- // content to live with. See ../syscall/exec_unix.go.
- if !supportsCloseOnExec {
- syscall.CloseOnExec(r)
- }
-
- return newFile(uintptr(r), name, kindOpenFile), nil
-}
-
-func (file *file) close() error {
- if file == nil {
- return syscall.EINVAL
- }
- if file.dirinfo != nil {
- file.dirinfo.close()
- file.dirinfo = nil
- }
- var err error
- if e := file.pfd.Close(); e != nil {
- if e == poll.ErrFileClosing {
- e = ErrClosed
- }
- err = &PathError{Op: "close", Path: file.name, Err: e}
- }
-
- // no need for a finalizer anymore
- runtime.SetFinalizer(file, nil)
- return err
-}
-
-// seek sets the offset for the next Read or Write on file to offset, interpreted
-// according to whence: 0 means relative to the origin of the file, 1 means
-// relative to the current offset, and 2 means relative to the end.
-// It returns the new offset and an error, if any.
-func (f *File) seek(offset int64, whence int) (ret int64, err error) {
- if f.dirinfo != nil {
- // Free cached dirinfo, so we allocate a new one if we
- // access this file as a directory again. See #35767 and #37161.
- f.dirinfo.close()
- f.dirinfo = nil
- }
- ret, err = f.pfd.Seek(offset, whence)
- runtime.KeepAlive(f)
- return ret, err
-}
-
-// Truncate changes the size of the named file.
-// If the file is a symbolic link, it changes the size of the link's target.
-// If there is an error, it will be of type *PathError.
-func Truncate(name string, size int64) error {
- e := ignoringEINTR(func() error {
- return syscall.Truncate(name, size)
- })
- if e != nil {
- return &PathError{Op: "truncate", Path: name, Err: e}
- }
- return nil
-}
-
-// Remove removes the named file or (empty) directory.
-// If there is an error, it will be of type *PathError.
-func Remove(name string) error {
- // System call interface forces us to know
- // whether name is a file or directory.
- // Try both: it is cheaper on average than
- // doing a Stat plus the right one.
- e := ignoringEINTR(func() error {
- return syscall.Unlink(name)
- })
- if e == nil {
- return nil
- }
- e1 := ignoringEINTR(func() error {
- return syscall.Rmdir(name)
- })
- if e1 == nil {
- return nil
- }
-
- // Both failed: figure out which error to return.
- // OS X and Linux differ on whether unlink(dir)
- // returns EISDIR, so can't use that. However,
- // both agree that rmdir(file) returns ENOTDIR,
- // so we can use that to decide which error is real.
- // Rmdir might also return ENOTDIR if given a bad
- // file path, like /etc/passwd/foo, but in that case,
- // both errors will be ENOTDIR, so it's okay to
- // use the error from unlink.
- if e1 != syscall.ENOTDIR {
- e = e1
- }
- return &PathError{Op: "remove", Path: name, Err: e}
-}
-
-func tempDir() string {
- dir := Getenv("TMPDIR")
- if dir == "" {
- if runtime.GOOS == "android" {
- dir = "/data/local/tmp"
- } else {
- dir = "/tmp"
- }
- }
- return dir
-}
-
-// Link creates newname as a hard link to the oldname file.
-// If there is an error, it will be of type *LinkError.
-func Link(oldname, newname string) error {
- e := ignoringEINTR(func() error {
- return syscall.Link(oldname, newname)
- })
- if e != nil {
- return &LinkError{"link", oldname, newname, e}
- }
- return nil
-}
-
-// Symlink creates newname as a symbolic link to oldname.
-// On Windows, a symlink to a non-existent oldname creates a file symlink;
-// if oldname is later created as a directory the symlink will not work.
-// If there is an error, it will be of type *LinkError.
-func Symlink(oldname, newname string) error {
- e := ignoringEINTR(func() error {
- return syscall.Symlink(oldname, newname)
- })
- if e != nil {
- return &LinkError{"symlink", oldname, newname, e}
- }
- return nil
-}
-
-// Readlink returns the destination of the named symbolic link.
-// If there is an error, it will be of type *PathError.
-func Readlink(name string) (string, error) {
- for len := 128; ; len *= 2 {
- b := make([]byte, len)
- var (
- n int
- e error
- )
- for {
- n, e = fixCount(syscall.Readlink(name, b))
- if e != syscall.EINTR {
- break
- }
- }
- // buffer too small
- if runtime.GOOS == "aix" && e == syscall.ERANGE {
- continue
- }
- if e != nil {
- return "", &PathError{Op: "readlink", Path: name, Err: e}
- }
- if n < len {
- return string(b[0:n]), nil
- }
- }
-}
-
-type unixDirent struct {
- parent string
- name string
- typ FileMode
- info FileInfo
-}
-
-func (d *unixDirent) Name() string { return d.name }
-func (d *unixDirent) IsDir() bool { return d.typ.IsDir() }
-func (d *unixDirent) Type() FileMode { return d.typ }
-
-func (d *unixDirent) Info() (FileInfo, error) {
- if d.info != nil {
- return d.info, nil
- }
- return lstat(d.parent + "/" + d.name)
-}
-
-func newUnixDirent(parent, name string, typ FileMode) (DirEntry, error) {
- ude := &unixDirent{
- parent: parent,
- name: name,
- typ: typ,
- }
- if typ != ^FileMode(0) && !testingForceReadDirLstat {
- return ude, nil
- }
-
- info, err := lstat(parent + "/" + name)
- if err != nil {
- return nil, err
- }
-
- ude.typ = info.Mode().Type()
- ude.info = info
- return ude, nil
-}
diff --git a/contrib/go/_std_1.18/src/os/path_unix.go b/contrib/go/_std_1.18/src/os/path_unix.go
deleted file mode 100644
index d1ffe2c187..0000000000
--- a/contrib/go/_std_1.18/src/os/path_unix.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package os
-
-const (
- PathSeparator = '/' // OS-specific path separator
- PathListSeparator = ':' // OS-specific path list separator
-)
-
-// IsPathSeparator reports whether c is a directory separator character.
-func IsPathSeparator(c uint8) bool {
- return PathSeparator == c
-}
-
-// basename removes trailing slashes and the leading directory name from path name.
-func basename(name string) string {
- i := len(name) - 1
- // Remove trailing slashes
- for ; i > 0 && name[i] == '/'; i-- {
- name = name[:i]
- }
- // Remove leading directory name
- for i--; i >= 0; i-- {
- if name[i] == '/' {
- name = name[i+1:]
- break
- }
- }
-
- return name
-}
-
-// splitPath returns the base name and parent directory.
-func splitPath(path string) (string, string) {
- // if no better parent is found, the path is relative from "here"
- dirname := "."
-
- // Remove all but one leading slash.
- for len(path) > 1 && path[0] == '/' && path[1] == '/' {
- path = path[1:]
- }
-
- i := len(path) - 1
-
- // Remove trailing slashes.
- for ; i > 0 && path[i] == '/'; i-- {
- path = path[:i]
- }
-
- // if no slashes in path, base is path
- basename := path
-
- // Remove leading directory path
- for i--; i >= 0; i-- {
- if path[i] == '/' {
- if i == 0 {
- dirname = path[:1]
- } else {
- dirname = path[:i]
- }
- basename = path[i+1:]
- break
- }
- }
-
- return dirname, basename
-}
-
-func fixRootDirectory(p string) string {
- return p
-}
diff --git a/contrib/go/_std_1.18/src/os/pipe_bsd.go b/contrib/go/_std_1.18/src/os/pipe_bsd.go
deleted file mode 100644
index 554d62111a..0000000000
--- a/contrib/go/_std_1.18/src/os/pipe_bsd.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || (js && wasm) || (solaris && !illumos)
-
-package os
-
-import "syscall"
-
-// Pipe returns a connected pair of Files; reads from r return bytes written to w.
-// It returns the files and an error, if any.
-func Pipe() (r *File, w *File, err error) {
- var p [2]int
-
- // See ../syscall/exec.go for description of lock.
- syscall.ForkLock.RLock()
- e := syscall.Pipe(p[0:])
- if e != nil {
- syscall.ForkLock.RUnlock()
- return nil, nil, NewSyscallError("pipe", e)
- }
- syscall.CloseOnExec(p[0])
- syscall.CloseOnExec(p[1])
- syscall.ForkLock.RUnlock()
-
- return newFile(uintptr(p[0]), "|0", kindPipe), newFile(uintptr(p[1]), "|1", kindPipe), nil
-}
diff --git a/contrib/go/_std_1.18/src/os/pipe_linux.go b/contrib/go/_std_1.18/src/os/pipe_linux.go
deleted file mode 100644
index 52f4e21e7c..0000000000
--- a/contrib/go/_std_1.18/src/os/pipe_linux.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package os
-
-import "syscall"
-
-// Pipe returns a connected pair of Files; reads from r return bytes written to w.
-// It returns the files and an error, if any.
-func Pipe() (r *File, w *File, err error) {
- var p [2]int
-
- e := syscall.Pipe2(p[0:], syscall.O_CLOEXEC)
- if e != nil {
- return nil, nil, NewSyscallError("pipe2", e)
- }
-
- return newFile(uintptr(p[0]), "|0", kindPipe), newFile(uintptr(p[1]), "|1", kindPipe), nil
-}
diff --git a/contrib/go/_std_1.18/src/os/removeall_at.go b/contrib/go/_std_1.18/src/os/removeall_at.go
deleted file mode 100644
index da804c436f..0000000000
--- a/contrib/go/_std_1.18/src/os/removeall_at.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package os
-
-import (
- "internal/syscall/unix"
- "io"
- "syscall"
-)
-
-func removeAll(path string) error {
- if path == "" {
- // fail silently to retain compatibility with previous behavior
- // of RemoveAll. See issue 28830.
- return nil
- }
-
- // The rmdir system call does not permit removing ".",
- // so we don't permit it either.
- if endsWithDot(path) {
- return &PathError{Op: "RemoveAll", Path: path, Err: syscall.EINVAL}
- }
-
- // Simple case: if Remove works, we're done.
- err := Remove(path)
- if err == nil || IsNotExist(err) {
- return nil
- }
-
- // RemoveAll recurses by deleting the path base from
- // its parent directory
- parentDir, base := splitPath(path)
-
- parent, err := Open(parentDir)
- if IsNotExist(err) {
- // If parent does not exist, base cannot exist. Fail silently
- return nil
- }
- if err != nil {
- return err
- }
- defer parent.Close()
-
- if err := removeAllFrom(parent, base); err != nil {
- if pathErr, ok := err.(*PathError); ok {
- pathErr.Path = parentDir + string(PathSeparator) + pathErr.Path
- err = pathErr
- }
- return err
- }
- return nil
-}
-
-func removeAllFrom(parent *File, base string) error {
- parentFd := int(parent.Fd())
- // Simple case: if Unlink (aka remove) works, we're done.
- err := unix.Unlinkat(parentFd, base, 0)
- if err == nil || IsNotExist(err) {
- return nil
- }
-
- // EISDIR means that we have a directory, and we need to
- // remove its contents.
- // EPERM or EACCES means that we don't have write permission on
- // the parent directory, but this entry might still be a directory
- // whose contents need to be removed.
- // Otherwise just return the error.
- if err != syscall.EISDIR && err != syscall.EPERM && err != syscall.EACCES {
- return &PathError{Op: "unlinkat", Path: base, Err: err}
- }
-
- // Is this a directory we need to recurse into?
- var statInfo syscall.Stat_t
- statErr := unix.Fstatat(parentFd, base, &statInfo, unix.AT_SYMLINK_NOFOLLOW)
- if statErr != nil {
- if IsNotExist(statErr) {
- return nil
- }
- return &PathError{Op: "fstatat", Path: base, Err: statErr}
- }
- if statInfo.Mode&syscall.S_IFMT != syscall.S_IFDIR {
- // Not a directory; return the error from the unix.Unlinkat.
- return &PathError{Op: "unlinkat", Path: base, Err: err}
- }
-
- // Remove the directory's entries.
- var recurseErr error
- for {
- const reqSize = 1024
- var respSize int
-
- // Open the directory to recurse into
- file, err := openFdAt(parentFd, base)
- if err != nil {
- if IsNotExist(err) {
- return nil
- }
- recurseErr = &PathError{Op: "openfdat", Path: base, Err: err}
- break
- }
-
- for {
- numErr := 0
-
- names, readErr := file.Readdirnames(reqSize)
- // Errors other than EOF should stop us from continuing.
- if readErr != nil && readErr != io.EOF {
- file.Close()
- if IsNotExist(readErr) {
- return nil
- }
- return &PathError{Op: "readdirnames", Path: base, Err: readErr}
- }
-
- respSize = len(names)
- for _, name := range names {
- err := removeAllFrom(file, name)
- if err != nil {
- if pathErr, ok := err.(*PathError); ok {
- pathErr.Path = base + string(PathSeparator) + pathErr.Path
- }
- numErr++
- if recurseErr == nil {
- recurseErr = err
- }
- }
- }
-
- // If we can delete any entry, break to start new iteration.
- // Otherwise, we discard current names, get next entries and try deleting them.
- if numErr != reqSize {
- break
- }
- }
-
- // Removing files from the directory may have caused
- // the OS to reshuffle it. Simply calling Readdirnames
- // again may skip some entries. The only reliable way
- // to avoid this is to close and re-open the
- // directory. See issue 20841.
- file.Close()
-
- // Finish when the end of the directory is reached
- if respSize < reqSize {
- break
- }
- }
-
- // Remove the directory itself.
- unlinkError := unix.Unlinkat(parentFd, base, unix.AT_REMOVEDIR)
- if unlinkError == nil || IsNotExist(unlinkError) {
- return nil
- }
-
- if recurseErr != nil {
- return recurseErr
- }
- return &PathError{Op: "unlinkat", Path: base, Err: unlinkError}
-}
-
-// openFdAt opens path relative to the directory in fd.
-// Other than that this should act like openFileNolog.
-// This acts like openFileNolog rather than OpenFile because
-// we are going to (try to) remove the file.
-// The contents of this file are not relevant for test caching.
-func openFdAt(dirfd int, name string) (*File, error) {
- var r int
- for {
- var e error
- r, e = unix.Openat(dirfd, name, O_RDONLY|syscall.O_CLOEXEC, 0)
- if e == nil {
- break
- }
-
- // See comment in openFileNolog.
- if e == syscall.EINTR {
- continue
- }
-
- return nil, e
- }
-
- if !supportsCloseOnExec {
- syscall.CloseOnExec(r)
- }
-
- return newFile(uintptr(r), name, kindOpenFile), nil
-}
diff --git a/contrib/go/_std_1.18/src/os/stat_darwin.go b/contrib/go/_std_1.18/src/os/stat_darwin.go
deleted file mode 100644
index 74214cefa4..0000000000
--- a/contrib/go/_std_1.18/src/os/stat_darwin.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package os
-
-import (
- "syscall"
- "time"
-)
-
-func fillFileStatFromSys(fs *fileStat, name string) {
- fs.name = basename(name)
- fs.size = fs.sys.Size
- fs.modTime = timespecToTime(fs.sys.Mtimespec)
- fs.mode = FileMode(fs.sys.Mode & 0777)
- switch fs.sys.Mode & syscall.S_IFMT {
- case syscall.S_IFBLK, syscall.S_IFWHT:
- fs.mode |= ModeDevice
- case syscall.S_IFCHR:
- fs.mode |= ModeDevice | ModeCharDevice
- case syscall.S_IFDIR:
- fs.mode |= ModeDir
- case syscall.S_IFIFO:
- fs.mode |= ModeNamedPipe
- case syscall.S_IFLNK:
- fs.mode |= ModeSymlink
- case syscall.S_IFREG:
- // nothing to do
- case syscall.S_IFSOCK:
- fs.mode |= ModeSocket
- }
- if fs.sys.Mode&syscall.S_ISGID != 0 {
- fs.mode |= ModeSetgid
- }
- if fs.sys.Mode&syscall.S_ISUID != 0 {
- fs.mode |= ModeSetuid
- }
- if fs.sys.Mode&syscall.S_ISVTX != 0 {
- fs.mode |= ModeSticky
- }
-}
-
-func timespecToTime(ts syscall.Timespec) time.Time {
- return time.Unix(int64(ts.Sec), int64(ts.Nsec))
-}
-
-// For testing.
-func atime(fi FileInfo) time.Time {
- return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
-}
diff --git a/contrib/go/_std_1.18/src/os/stat_linux.go b/contrib/go/_std_1.18/src/os/stat_linux.go
deleted file mode 100644
index d36afa9ffd..0000000000
--- a/contrib/go/_std_1.18/src/os/stat_linux.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package os
-
-import (
- "syscall"
- "time"
-)
-
-func fillFileStatFromSys(fs *fileStat, name string) {
- fs.name = basename(name)
- fs.size = fs.sys.Size
- fs.modTime = timespecToTime(fs.sys.Mtim)
- fs.mode = FileMode(fs.sys.Mode & 0777)
- switch fs.sys.Mode & syscall.S_IFMT {
- case syscall.S_IFBLK:
- fs.mode |= ModeDevice
- case syscall.S_IFCHR:
- fs.mode |= ModeDevice | ModeCharDevice
- case syscall.S_IFDIR:
- fs.mode |= ModeDir
- case syscall.S_IFIFO:
- fs.mode |= ModeNamedPipe
- case syscall.S_IFLNK:
- fs.mode |= ModeSymlink
- case syscall.S_IFREG:
- // nothing to do
- case syscall.S_IFSOCK:
- fs.mode |= ModeSocket
- }
- if fs.sys.Mode&syscall.S_ISGID != 0 {
- fs.mode |= ModeSetgid
- }
- if fs.sys.Mode&syscall.S_ISUID != 0 {
- fs.mode |= ModeSetuid
- }
- if fs.sys.Mode&syscall.S_ISVTX != 0 {
- fs.mode |= ModeSticky
- }
-}
-
-func timespecToTime(ts syscall.Timespec) time.Time {
- return time.Unix(int64(ts.Sec), int64(ts.Nsec))
-}
-
-// For testing.
-func atime(fi FileInfo) time.Time {
- return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
-}
diff --git a/contrib/go/_std_1.18/src/os/stat_unix.go b/contrib/go/_std_1.18/src/os/stat_unix.go
deleted file mode 100644
index eb15db5453..0000000000
--- a/contrib/go/_std_1.18/src/os/stat_unix.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package os
-
-import (
- "syscall"
-)
-
-// Stat returns the FileInfo structure describing file.
-// If there is an error, it will be of type *PathError.
-func (f *File) Stat() (FileInfo, error) {
- if f == nil {
- return nil, ErrInvalid
- }
- var fs fileStat
- err := f.pfd.Fstat(&fs.sys)
- if err != nil {
- return nil, &PathError{Op: "stat", Path: f.name, Err: err}
- }
- fillFileStatFromSys(&fs, f.name)
- return &fs, nil
-}
-
-// statNolog stats a file with no test logging.
-func statNolog(name string) (FileInfo, error) {
- var fs fileStat
- err := ignoringEINTR(func() error {
- return syscall.Stat(name, &fs.sys)
- })
- if err != nil {
- return nil, &PathError{Op: "stat", Path: name, Err: err}
- }
- fillFileStatFromSys(&fs, name)
- return &fs, nil
-}
-
-// lstatNolog lstats a file with no test logging.
-func lstatNolog(name string) (FileInfo, error) {
- var fs fileStat
- err := ignoringEINTR(func() error {
- return syscall.Lstat(name, &fs.sys)
- })
- if err != nil {
- return nil, &PathError{Op: "lstat", Path: name, Err: err}
- }
- fillFileStatFromSys(&fs, name)
- return &fs, nil
-}
diff --git a/contrib/go/_std_1.18/src/os/sys_unix.go b/contrib/go/_std_1.18/src/os/sys_unix.go
deleted file mode 100644
index 5ff39780e5..0000000000
--- a/contrib/go/_std_1.18/src/os/sys_unix.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package os
-
-// supportsCloseOnExec reports whether the platform supports the
-// O_CLOEXEC flag.
-// On Darwin, the O_CLOEXEC flag was introduced in OS X 10.7 (Darwin 11.0.0).
-// See https://support.apple.com/kb/HT1633.
-// On FreeBSD, the O_CLOEXEC flag was introduced in version 8.3.
-const supportsCloseOnExec = true
diff --git a/contrib/go/_std_1.18/src/path/filepath/match.go b/contrib/go/_std_1.18/src/path/filepath/match.go
deleted file mode 100644
index 55ed1d75ae..0000000000
--- a/contrib/go/_std_1.18/src/path/filepath/match.go
+++ /dev/null
@@ -1,370 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package filepath
-
-import (
- "errors"
- "os"
- "runtime"
- "sort"
- "strings"
- "unicode/utf8"
-)
-
-// ErrBadPattern indicates a pattern was malformed.
-var ErrBadPattern = errors.New("syntax error in pattern")
-
-// Match reports whether name matches the shell file name pattern.
-// The pattern syntax is:
-//
-// pattern:
-// { term }
-// term:
-// '*' matches any sequence of non-Separator characters
-// '?' matches any single non-Separator character
-// '[' [ '^' ] { character-range } ']'
-// character class (must be non-empty)
-// c matches character c (c != '*', '?', '\\', '[')
-// '\\' c matches character c
-//
-// character-range:
-// c matches character c (c != '\\', '-', ']')
-// '\\' c matches character c
-// lo '-' hi matches character c for lo <= c <= hi
-//
-// Match requires pattern to match all of name, not just a substring.
-// The only possible returned error is ErrBadPattern, when pattern
-// is malformed.
-//
-// On Windows, escaping is disabled. Instead, '\\' is treated as
-// path separator.
-//
-func Match(pattern, name string) (matched bool, err error) {
-Pattern:
- for len(pattern) > 0 {
- var star bool
- var chunk string
- star, chunk, pattern = scanChunk(pattern)
- if star && chunk == "" {
- // Trailing * matches rest of string unless it has a /.
- return !strings.Contains(name, string(Separator)), nil
- }
- // Look for match at current position.
- t, ok, err := matchChunk(chunk, name)
- // if we're the last chunk, make sure we've exhausted the name
- // otherwise we'll give a false result even if we could still match
- // using the star
- if ok && (len(t) == 0 || len(pattern) > 0) {
- name = t
- continue
- }
- if err != nil {
- return false, err
- }
- if star {
- // Look for match skipping i+1 bytes.
- // Cannot skip /.
- for i := 0; i < len(name) && name[i] != Separator; i++ {
- t, ok, err := matchChunk(chunk, name[i+1:])
- if ok {
- // if we're the last chunk, make sure we exhausted the name
- if len(pattern) == 0 && len(t) > 0 {
- continue
- }
- name = t
- continue Pattern
- }
- if err != nil {
- return false, err
- }
- }
- }
- return false, nil
- }
- return len(name) == 0, nil
-}
-
-// scanChunk gets the next segment of pattern, which is a non-star string
-// possibly preceded by a star.
-func scanChunk(pattern string) (star bool, chunk, rest string) {
- for len(pattern) > 0 && pattern[0] == '*' {
- pattern = pattern[1:]
- star = true
- }
- inrange := false
- var i int
-Scan:
- for i = 0; i < len(pattern); i++ {
- switch pattern[i] {
- case '\\':
- if runtime.GOOS != "windows" {
- // error check handled in matchChunk: bad pattern.
- if i+1 < len(pattern) {
- i++
- }
- }
- case '[':
- inrange = true
- case ']':
- inrange = false
- case '*':
- if !inrange {
- break Scan
- }
- }
- }
- return star, pattern[0:i], pattern[i:]
-}
-
-// matchChunk checks whether chunk matches the beginning of s.
-// If so, it returns the remainder of s (after the match).
-// Chunk is all single-character operators: literals, char classes, and ?.
-func matchChunk(chunk, s string) (rest string, ok bool, err error) {
- // failed records whether the match has failed.
- // After the match fails, the loop continues on processing chunk,
- // checking that the pattern is well-formed but no longer reading s.
- failed := false
- for len(chunk) > 0 {
- if !failed && len(s) == 0 {
- failed = true
- }
- switch chunk[0] {
- case '[':
- // character class
- var r rune
- if !failed {
- var n int
- r, n = utf8.DecodeRuneInString(s)
- s = s[n:]
- }
- chunk = chunk[1:]
- // possibly negated
- negated := false
- if len(chunk) > 0 && chunk[0] == '^' {
- negated = true
- chunk = chunk[1:]
- }
- // parse all ranges
- match := false
- nrange := 0
- for {
- if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
- chunk = chunk[1:]
- break
- }
- var lo, hi rune
- if lo, chunk, err = getEsc(chunk); err != nil {
- return "", false, err
- }
- hi = lo
- if chunk[0] == '-' {
- if hi, chunk, err = getEsc(chunk[1:]); err != nil {
- return "", false, err
- }
- }
- if lo <= r && r <= hi {
- match = true
- }
- nrange++
- }
- if match == negated {
- failed = true
- }
-
- case '?':
- if !failed {
- if s[0] == Separator {
- failed = true
- }
- _, n := utf8.DecodeRuneInString(s)
- s = s[n:]
- }
- chunk = chunk[1:]
-
- case '\\':
- if runtime.GOOS != "windows" {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- return "", false, ErrBadPattern
- }
- }
- fallthrough
-
- default:
- if !failed {
- if chunk[0] != s[0] {
- failed = true
- }
- s = s[1:]
- }
- chunk = chunk[1:]
- }
- }
- if failed {
- return "", false, nil
- }
- return s, true, nil
-}
-
-// getEsc gets a possibly-escaped character from chunk, for a character class.
-func getEsc(chunk string) (r rune, nchunk string, err error) {
- if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
- err = ErrBadPattern
- return
- }
- if chunk[0] == '\\' && runtime.GOOS != "windows" {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- err = ErrBadPattern
- return
- }
- }
- r, n := utf8.DecodeRuneInString(chunk)
- if r == utf8.RuneError && n == 1 {
- err = ErrBadPattern
- }
- nchunk = chunk[n:]
- if len(nchunk) == 0 {
- err = ErrBadPattern
- }
- return
-}
-
-// Glob returns the names of all files matching pattern or nil
-// if there is no matching file. The syntax of patterns is the same
-// as in Match. The pattern may describe hierarchical names such as
-// /usr/*/bin/ed (assuming the Separator is '/').
-//
-// Glob ignores file system errors such as I/O errors reading directories.
-// The only possible returned error is ErrBadPattern, when pattern
-// is malformed.
-func Glob(pattern string) (matches []string, err error) {
- return globWithLimit(pattern, 0)
-}
-
-func globWithLimit(pattern string, depth int) (matches []string, err error) {
- // This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
- const pathSeparatorsLimit = 10000
- if depth == pathSeparatorsLimit {
- return nil, ErrBadPattern
- }
-
- // Check pattern is well-formed.
- if _, err := Match(pattern, ""); err != nil {
- return nil, err
- }
- if !hasMeta(pattern) {
- if _, err = os.Lstat(pattern); err != nil {
- return nil, nil
- }
- return []string{pattern}, nil
- }
-
- dir, file := Split(pattern)
- volumeLen := 0
- if runtime.GOOS == "windows" {
- volumeLen, dir = cleanGlobPathWindows(dir)
- } else {
- dir = cleanGlobPath(dir)
- }
-
- if !hasMeta(dir[volumeLen:]) {
- return glob(dir, file, nil)
- }
-
- // Prevent infinite recursion. See issue 15879.
- if dir == pattern {
- return nil, ErrBadPattern
- }
-
- var m []string
- m, err = globWithLimit(dir, depth+1)
- if err != nil {
- return
- }
- for _, d := range m {
- matches, err = glob(d, file, matches)
- if err != nil {
- return
- }
- }
- return
-}
-
-// cleanGlobPath prepares path for glob matching.
-func cleanGlobPath(path string) string {
- switch path {
- case "":
- return "."
- case string(Separator):
- // do nothing to the path
- return path
- default:
- return path[0 : len(path)-1] // chop off trailing separator
- }
-}
-
-// cleanGlobPathWindows is windows version of cleanGlobPath.
-func cleanGlobPathWindows(path string) (prefixLen int, cleaned string) {
- vollen := volumeNameLen(path)
- switch {
- case path == "":
- return 0, "."
- case vollen+1 == len(path) && os.IsPathSeparator(path[len(path)-1]): // /, \, C:\ and C:/
- // do nothing to the path
- return vollen + 1, path
- case vollen == len(path) && len(path) == 2: // C:
- return vollen, path + "." // convert C: into C:.
- default:
- if vollen >= len(path) {
- vollen = len(path) - 1
- }
- return vollen, path[0 : len(path)-1] // chop off trailing separator
- }
-}
-
-// glob searches for files matching pattern in the directory dir
-// and appends them to matches. If the directory cannot be
-// opened, it returns the existing matches. New matches are
-// added in lexicographical order.
-func glob(dir, pattern string, matches []string) (m []string, e error) {
- m = matches
- fi, err := os.Stat(dir)
- if err != nil {
- return // ignore I/O error
- }
- if !fi.IsDir() {
- return // ignore I/O error
- }
- d, err := os.Open(dir)
- if err != nil {
- return // ignore I/O error
- }
- defer d.Close()
-
- names, _ := d.Readdirnames(-1)
- sort.Strings(names)
-
- for _, n := range names {
- matched, err := Match(pattern, n)
- if err != nil {
- return m, err
- }
- if matched {
- m = append(m, Join(dir, n))
- }
- }
- return
-}
-
-// hasMeta reports whether path contains any of the magic characters
-// recognized by Match.
-func hasMeta(path string) bool {
- magicChars := `*?[`
- if runtime.GOOS != "windows" {
- magicChars = `*?[\`
- }
- return strings.ContainsAny(path, magicChars)
-}
diff --git a/contrib/go/_std_1.18/src/path/filepath/path.go b/contrib/go/_std_1.18/src/path/filepath/path.go
deleted file mode 100644
index 8300a32cb1..0000000000
--- a/contrib/go/_std_1.18/src/path/filepath/path.go
+++ /dev/null
@@ -1,612 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package filepath implements utility routines for manipulating filename paths
-// in a way compatible with the target operating system-defined file paths.
-//
-// The filepath package uses either forward slashes or backslashes,
-// depending on the operating system. To process paths such as URLs
-// that always use forward slashes regardless of the operating
-// system, see the path package.
-package filepath
-
-import (
- "errors"
- "io/fs"
- "os"
- "sort"
- "strings"
-)
-
-// A lazybuf is a lazily constructed path buffer.
-// It supports append, reading previously appended bytes,
-// and retrieving the final string. It does not allocate a buffer
-// to hold the output until that output diverges from s.
-type lazybuf struct {
- path string
- buf []byte
- w int
- volAndPath string
- volLen int
-}
-
-func (b *lazybuf) index(i int) byte {
- if b.buf != nil {
- return b.buf[i]
- }
- return b.path[i]
-}
-
-func (b *lazybuf) append(c byte) {
- if b.buf == nil {
- if b.w < len(b.path) && b.path[b.w] == c {
- b.w++
- return
- }
- b.buf = make([]byte, len(b.path))
- copy(b.buf, b.path[:b.w])
- }
- b.buf[b.w] = c
- b.w++
-}
-
-func (b *lazybuf) string() string {
- if b.buf == nil {
- return b.volAndPath[:b.volLen+b.w]
- }
- return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
-}
-
-const (
- Separator = os.PathSeparator
- ListSeparator = os.PathListSeparator
-)
-
-// Clean returns the shortest path name equivalent to path
-// by purely lexical processing. It applies the following rules
-// iteratively until no further processing can be done:
-//
-// 1. Replace multiple Separator elements with a single one.
-// 2. Eliminate each . path name element (the current directory).
-// 3. Eliminate each inner .. path name element (the parent directory)
-// along with the non-.. element that precedes it.
-// 4. Eliminate .. elements that begin a rooted path:
-// that is, replace "/.." by "/" at the beginning of a path,
-// assuming Separator is '/'.
-//
-// The returned path ends in a slash only if it represents a root directory,
-// such as "/" on Unix or `C:\` on Windows.
-//
-// Finally, any occurrences of slash are replaced by Separator.
-//
-// If the result of this process is an empty string, Clean
-// returns the string ".".
-//
-// See also Rob Pike, ``Lexical File Names in Plan 9 or
-// Getting Dot-Dot Right,''
-// https://9p.io/sys/doc/lexnames.html
-func Clean(path string) string {
- originalPath := path
- volLen := volumeNameLen(path)
- path = path[volLen:]
- if path == "" {
- if volLen > 1 && originalPath[1] != ':' {
- // should be UNC
- return FromSlash(originalPath)
- }
- return originalPath + "."
- }
- rooted := os.IsPathSeparator(path[0])
-
- // Invariants:
- // reading from path; r is index of next byte to process.
- // writing to buf; w is index of next byte to write.
- // dotdot is index in buf where .. must stop, either because
- // it is the leading slash or it is a leading ../../.. prefix.
- n := len(path)
- out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}
- r, dotdot := 0, 0
- if rooted {
- out.append(Separator)
- r, dotdot = 1, 1
- }
-
- for r < n {
- switch {
- case os.IsPathSeparator(path[r]):
- // empty path element
- r++
- case path[r] == '.' && r+1 == n:
- // . element
- r++
- case path[r] == '.' && os.IsPathSeparator(path[r+1]):
- // ./ element
- r++
-
- for r < len(path) && os.IsPathSeparator(path[r]) {
- r++
- }
- if out.w == 0 && volumeNameLen(path[r:]) > 0 {
- // When joining prefix "." and an absolute path on Windows,
- // the prefix should not be removed.
- out.append('.')
- }
- case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
- // .. element: remove to last separator
- r += 2
- switch {
- case out.w > dotdot:
- // can backtrack
- out.w--
- for out.w > dotdot && !os.IsPathSeparator(out.index(out.w)) {
- out.w--
- }
- case !rooted:
- // cannot backtrack, but not rooted, so append .. element.
- if out.w > 0 {
- out.append(Separator)
- }
- out.append('.')
- out.append('.')
- dotdot = out.w
- }
- default:
- // real path element.
- // add slash if needed
- if rooted && out.w != 1 || !rooted && out.w != 0 {
- out.append(Separator)
- }
- // copy element
- for ; r < n && !os.IsPathSeparator(path[r]); r++ {
- out.append(path[r])
- }
- }
- }
-
- // Turn empty string into "."
- if out.w == 0 {
- out.append('.')
- }
-
- return FromSlash(out.string())
-}
-
-// ToSlash returns the result of replacing each separator character
-// in path with a slash ('/') character. Multiple separators are
-// replaced by multiple slashes.
-func ToSlash(path string) string {
- if Separator == '/' {
- return path
- }
- return strings.ReplaceAll(path, string(Separator), "/")
-}
-
-// FromSlash returns the result of replacing each slash ('/') character
-// in path with a separator character. Multiple slashes are replaced
-// by multiple separators.
-func FromSlash(path string) string {
- if Separator == '/' {
- return path
- }
- return strings.ReplaceAll(path, "/", string(Separator))
-}
-
-// SplitList splits a list of paths joined by the OS-specific ListSeparator,
-// usually found in PATH or GOPATH environment variables.
-// Unlike strings.Split, SplitList returns an empty slice when passed an empty
-// string.
-func SplitList(path string) []string {
- return splitList(path)
-}
-
-// Split splits path immediately following the final Separator,
-// separating it into a directory and file name component.
-// If there is no Separator in path, Split returns an empty dir
-// and file set to path.
-// The returned values have the property that path = dir+file.
-func Split(path string) (dir, file string) {
- vol := VolumeName(path)
- i := len(path) - 1
- for i >= len(vol) && !os.IsPathSeparator(path[i]) {
- i--
- }
- return path[:i+1], path[i+1:]
-}
-
-// Join joins any number of path elements into a single path,
-// separating them with an OS specific Separator. Empty elements
-// are ignored. The result is Cleaned. However, if the argument
-// list is empty or all its elements are empty, Join returns
-// an empty string.
-// On Windows, the result will only be a UNC path if the first
-// non-empty element is a UNC path.
-func Join(elem ...string) string {
- return join(elem)
-}
-
-// Ext returns the file name extension used by path.
-// The extension is the suffix beginning at the final dot
-// in the final element of path; it is empty if there is
-// no dot.
-func Ext(path string) string {
- for i := len(path) - 1; i >= 0 && !os.IsPathSeparator(path[i]); i-- {
- if path[i] == '.' {
- return path[i:]
- }
- }
- return ""
-}
-
-// EvalSymlinks returns the path name after the evaluation of any symbolic
-// links.
-// If path is relative the result will be relative to the current directory,
-// unless one of the components is an absolute symbolic link.
-// EvalSymlinks calls Clean on the result.
-func EvalSymlinks(path string) (string, error) {
- return evalSymlinks(path)
-}
-
-// Abs returns an absolute representation of path.
-// If the path is not absolute it will be joined with the current
-// working directory to turn it into an absolute path. The absolute
-// path name for a given file is not guaranteed to be unique.
-// Abs calls Clean on the result.
-func Abs(path string) (string, error) {
- return abs(path)
-}
-
-func unixAbs(path string) (string, error) {
- if IsAbs(path) {
- return Clean(path), nil
- }
- wd, err := os.Getwd()
- if err != nil {
- return "", err
- }
- return Join(wd, path), nil
-}
-
-// Rel returns a relative path that is lexically equivalent to targpath when
-// joined to basepath with an intervening separator. That is,
-// Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
-// On success, the returned path will always be relative to basepath,
-// even if basepath and targpath share no elements.
-// An error is returned if targpath can't be made relative to basepath or if
-// knowing the current working directory would be necessary to compute it.
-// Rel calls Clean on the result.
-func Rel(basepath, targpath string) (string, error) {
- baseVol := VolumeName(basepath)
- targVol := VolumeName(targpath)
- base := Clean(basepath)
- targ := Clean(targpath)
- if sameWord(targ, base) {
- return ".", nil
- }
- base = base[len(baseVol):]
- targ = targ[len(targVol):]
- if base == "." {
- base = ""
- } else if base == "" && volumeNameLen(baseVol) > 2 /* isUNC */ {
- // Treat any targetpath matching `\\host\share` basepath as absolute path.
- base = string(Separator)
- }
-
- // Can't use IsAbs - `\a` and `a` are both relative in Windows.
- baseSlashed := len(base) > 0 && base[0] == Separator
- targSlashed := len(targ) > 0 && targ[0] == Separator
- if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
- return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
- }
- // Position base[b0:bi] and targ[t0:ti] at the first differing elements.
- bl := len(base)
- tl := len(targ)
- var b0, bi, t0, ti int
- for {
- for bi < bl && base[bi] != Separator {
- bi++
- }
- for ti < tl && targ[ti] != Separator {
- ti++
- }
- if !sameWord(targ[t0:ti], base[b0:bi]) {
- break
- }
- if bi < bl {
- bi++
- }
- if ti < tl {
- ti++
- }
- b0 = bi
- t0 = ti
- }
- if base[b0:bi] == ".." {
- return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
- }
- if b0 != bl {
- // Base elements left. Must go up before going down.
- seps := strings.Count(base[b0:bl], string(Separator))
- size := 2 + seps*3
- if tl != t0 {
- size += 1 + tl - t0
- }
- buf := make([]byte, size)
- n := copy(buf, "..")
- for i := 0; i < seps; i++ {
- buf[n] = Separator
- copy(buf[n+1:], "..")
- n += 3
- }
- if t0 != tl {
- buf[n] = Separator
- copy(buf[n+1:], targ[t0:])
- }
- return string(buf), nil
- }
- return targ[t0:], nil
-}
-
-// SkipDir is used as a return value from WalkFuncs to indicate that
-// the directory named in the call is to be skipped. It is not returned
-// as an error by any function.
-var SkipDir error = fs.SkipDir
-
-// WalkFunc is the type of the function called by Walk to visit each
-// file or directory.
-//
-// The path argument contains the argument to Walk as a prefix.
-// That is, if Walk is called with root argument "dir" and finds a file
-// named "a" in that directory, the walk function will be called with
-// argument "dir/a".
-//
-// The directory and file are joined with Join, which may clean the
-// directory name: if Walk is called with the root argument "x/../dir"
-// and finds a file named "a" in that directory, the walk function will
-// be called with argument "dir/a", not "x/../dir/a".
-//
-// The info argument is the fs.FileInfo for the named path.
-//
-// The error result returned by the function controls how Walk continues.
-// If the function returns the special value SkipDir, Walk skips the
-// current directory (path if info.IsDir() is true, otherwise path's
-// parent directory). Otherwise, if the function returns a non-nil error,
-// Walk stops entirely and returns that error.
-//
-// The err argument reports an error related to path, signaling that Walk
-// will not walk into that directory. The function can decide how to
-// handle that error; as described earlier, returning the error will
-// cause Walk to stop walking the entire tree.
-//
-// Walk calls the function with a non-nil err argument in two cases.
-//
-// First, if an os.Lstat on the root directory or any directory or file
-// in the tree fails, Walk calls the function with path set to that
-// directory or file's path, info set to nil, and err set to the error
-// from os.Lstat.
-//
-// Second, if a directory's Readdirnames method fails, Walk calls the
-// function with path set to the directory's path, info, set to an
-// fs.FileInfo describing the directory, and err set to the error from
-// Readdirnames.
-type WalkFunc func(path string, info fs.FileInfo, err error) error
-
-var lstat = os.Lstat // for testing
-
-// walkDir recursively descends path, calling walkDirFn.
-func walkDir(path string, d fs.DirEntry, walkDirFn fs.WalkDirFunc) error {
- if err := walkDirFn(path, d, nil); err != nil || !d.IsDir() {
- if err == SkipDir && d.IsDir() {
- // Successfully skipped directory.
- err = nil
- }
- return err
- }
-
- dirs, err := readDir(path)
- if err != nil {
- // Second call, to report ReadDir error.
- err = walkDirFn(path, d, err)
- if err != nil {
- return err
- }
- }
-
- for _, d1 := range dirs {
- path1 := Join(path, d1.Name())
- if err := walkDir(path1, d1, walkDirFn); err != nil {
- if err == SkipDir {
- break
- }
- return err
- }
- }
- return nil
-}
-
-// walk recursively descends path, calling walkFn.
-func walk(path string, info fs.FileInfo, walkFn WalkFunc) error {
- if !info.IsDir() {
- return walkFn(path, info, nil)
- }
-
- names, err := readDirNames(path)
- err1 := walkFn(path, info, err)
- // If err != nil, walk can't walk into this directory.
- // err1 != nil means walkFn want walk to skip this directory or stop walking.
- // Therefore, if one of err and err1 isn't nil, walk will return.
- if err != nil || err1 != nil {
- // The caller's behavior is controlled by the return value, which is decided
- // by walkFn. walkFn may ignore err and return nil.
- // If walkFn returns SkipDir, it will be handled by the caller.
- // So walk should return whatever walkFn returns.
- return err1
- }
-
- for _, name := range names {
- filename := Join(path, name)
- fileInfo, err := lstat(filename)
- if err != nil {
- if err := walkFn(filename, fileInfo, err); err != nil && err != SkipDir {
- return err
- }
- } else {
- err = walk(filename, fileInfo, walkFn)
- if err != nil {
- if !fileInfo.IsDir() || err != SkipDir {
- return err
- }
- }
- }
- }
- return nil
-}
-
-// WalkDir walks the file tree rooted at root, calling fn for each file or
-// directory in the tree, including root.
-//
-// All errors that arise visiting files and directories are filtered by fn:
-// see the fs.WalkDirFunc documentation for details.
-//
-// The files are walked in lexical order, which makes the output deterministic
-// but requires WalkDir to read an entire directory into memory before proceeding
-// to walk that directory.
-//
-// WalkDir does not follow symbolic links.
-func WalkDir(root string, fn fs.WalkDirFunc) error {
- info, err := os.Lstat(root)
- if err != nil {
- err = fn(root, nil, err)
- } else {
- err = walkDir(root, &statDirEntry{info}, fn)
- }
- if err == SkipDir {
- return nil
- }
- return err
-}
-
-type statDirEntry struct {
- info fs.FileInfo
-}
-
-func (d *statDirEntry) Name() string { return d.info.Name() }
-func (d *statDirEntry) IsDir() bool { return d.info.IsDir() }
-func (d *statDirEntry) Type() fs.FileMode { return d.info.Mode().Type() }
-func (d *statDirEntry) Info() (fs.FileInfo, error) { return d.info, nil }
-
-// Walk walks the file tree rooted at root, calling fn for each file or
-// directory in the tree, including root.
-//
-// All errors that arise visiting files and directories are filtered by fn:
-// see the WalkFunc documentation for details.
-//
-// The files are walked in lexical order, which makes the output deterministic
-// but requires Walk to read an entire directory into memory before proceeding
-// to walk that directory.
-//
-// Walk does not follow symbolic links.
-//
-// Walk is less efficient than WalkDir, introduced in Go 1.16,
-// which avoids calling os.Lstat on every visited file or directory.
-func Walk(root string, fn WalkFunc) error {
- info, err := os.Lstat(root)
- if err != nil {
- err = fn(root, nil, err)
- } else {
- err = walk(root, info, fn)
- }
- if err == SkipDir {
- return nil
- }
- return err
-}
-
-// readDir reads the directory named by dirname and returns
-// a sorted list of directory entries.
-func readDir(dirname string) ([]fs.DirEntry, error) {
- f, err := os.Open(dirname)
- if err != nil {
- return nil, err
- }
- dirs, err := f.ReadDir(-1)
- f.Close()
- if err != nil {
- return nil, err
- }
- sort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() })
- return dirs, nil
-}
-
-// readDirNames reads the directory named by dirname and returns
-// a sorted list of directory entry names.
-func readDirNames(dirname string) ([]string, error) {
- f, err := os.Open(dirname)
- if err != nil {
- return nil, err
- }
- names, err := f.Readdirnames(-1)
- f.Close()
- if err != nil {
- return nil, err
- }
- sort.Strings(names)
- return names, nil
-}
-
-// Base returns the last element of path.
-// Trailing path separators are removed before extracting the last element.
-// If the path is empty, Base returns ".".
-// If the path consists entirely of separators, Base returns a single separator.
-func Base(path string) string {
- if path == "" {
- return "."
- }
- // Strip trailing slashes.
- for len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) {
- path = path[0 : len(path)-1]
- }
- // Throw away volume name
- path = path[len(VolumeName(path)):]
- // Find the last element
- i := len(path) - 1
- for i >= 0 && !os.IsPathSeparator(path[i]) {
- i--
- }
- if i >= 0 {
- path = path[i+1:]
- }
- // If empty now, it had only slashes.
- if path == "" {
- return string(Separator)
- }
- return path
-}
-
-// Dir returns all but the last element of path, typically the path's directory.
-// After dropping the final element, Dir calls Clean on the path and trailing
-// slashes are removed.
-// If the path is empty, Dir returns ".".
-// If the path consists entirely of separators, Dir returns a single separator.
-// The returned path does not end in a separator unless it is the root directory.
-func Dir(path string) string {
- vol := VolumeName(path)
- i := len(path) - 1
- for i >= len(vol) && !os.IsPathSeparator(path[i]) {
- i--
- }
- dir := Clean(path[len(vol) : i+1])
- if dir == "." && len(vol) > 2 {
- // must be UNC
- return vol
- }
- return vol + dir
-}
-
-// VolumeName returns leading volume name.
-// Given "C:\foo\bar" it returns "C:" on Windows.
-// Given "\\host\share\foo" it returns "\\host\share".
-// On other platforms it returns "".
-func VolumeName(path string) string {
- return path[:volumeNameLen(path)]
-}
diff --git a/contrib/go/_std_1.18/src/path/filepath/path_unix.go b/contrib/go/_std_1.18/src/path/filepath/path_unix.go
deleted file mode 100644
index dcf1d187e7..0000000000
--- a/contrib/go/_std_1.18/src/path/filepath/path_unix.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package filepath
-
-import "strings"
-
-// IsAbs reports whether the path is absolute.
-func IsAbs(path string) bool {
- return strings.HasPrefix(path, "/")
-}
-
-// volumeNameLen returns length of the leading volume name on Windows.
-// It returns 0 elsewhere.
-func volumeNameLen(path string) int {
- return 0
-}
-
-// HasPrefix exists for historical compatibility and should not be used.
-//
-// Deprecated: HasPrefix does not respect path boundaries and
-// does not ignore case when required.
-func HasPrefix(p, prefix string) bool {
- return strings.HasPrefix(p, prefix)
-}
-
-func splitList(path string) []string {
- if path == "" {
- return []string{}
- }
- return strings.Split(path, string(ListSeparator))
-}
-
-func abs(path string) (string, error) {
- return unixAbs(path)
-}
-
-func join(elem []string) string {
- // If there's a bug here, fix the logic in ./path_plan9.go too.
- for i, e := range elem {
- if e != "" {
- return Clean(strings.Join(elem[i:], string(Separator)))
- }
- }
- return ""
-}
-
-func sameWord(a, b string) bool {
- return a == b
-}
diff --git a/contrib/go/_std_1.18/src/path/filepath/symlink_unix.go b/contrib/go/_std_1.18/src/path/filepath/symlink_unix.go
deleted file mode 100644
index 7bfe17e2fd..0000000000
--- a/contrib/go/_std_1.18/src/path/filepath/symlink_unix.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !windows
-
-package filepath
-
-func evalSymlinks(path string) (string, error) {
- return walkSymlinks(path)
-}
diff --git a/contrib/go/_std_1.18/src/path/match.go b/contrib/go/_std_1.18/src/path/match.go
deleted file mode 100644
index 918624c60e..0000000000
--- a/contrib/go/_std_1.18/src/path/match.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package path
-
-import (
- "errors"
- "internal/bytealg"
- "unicode/utf8"
-)
-
-// ErrBadPattern indicates a pattern was malformed.
-var ErrBadPattern = errors.New("syntax error in pattern")
-
-// Match reports whether name matches the shell pattern.
-// The pattern syntax is:
-//
-// pattern:
-// { term }
-// term:
-// '*' matches any sequence of non-/ characters
-// '?' matches any single non-/ character
-// '[' [ '^' ] { character-range } ']'
-// character class (must be non-empty)
-// c matches character c (c != '*', '?', '\\', '[')
-// '\\' c matches character c
-//
-// character-range:
-// c matches character c (c != '\\', '-', ']')
-// '\\' c matches character c
-// lo '-' hi matches character c for lo <= c <= hi
-//
-// Match requires pattern to match all of name, not just a substring.
-// The only possible returned error is ErrBadPattern, when pattern
-// is malformed.
-//
-func Match(pattern, name string) (matched bool, err error) {
-Pattern:
- for len(pattern) > 0 {
- var star bool
- var chunk string
- star, chunk, pattern = scanChunk(pattern)
- if star && chunk == "" {
- // Trailing * matches rest of string unless it has a /.
- return bytealg.IndexByteString(name, '/') < 0, nil
- }
- // Look for match at current position.
- t, ok, err := matchChunk(chunk, name)
- // if we're the last chunk, make sure we've exhausted the name
- // otherwise we'll give a false result even if we could still match
- // using the star
- if ok && (len(t) == 0 || len(pattern) > 0) {
- name = t
- continue
- }
- if err != nil {
- return false, err
- }
- if star {
- // Look for match skipping i+1 bytes.
- // Cannot skip /.
- for i := 0; i < len(name) && name[i] != '/'; i++ {
- t, ok, err := matchChunk(chunk, name[i+1:])
- if ok {
- // if we're the last chunk, make sure we exhausted the name
- if len(pattern) == 0 && len(t) > 0 {
- continue
- }
- name = t
- continue Pattern
- }
- if err != nil {
- return false, err
- }
- }
- }
- // Before returning false with no error,
- // check that the remainder of the pattern is syntactically valid.
- for len(pattern) > 0 {
- _, chunk, pattern = scanChunk(pattern)
- if _, _, err := matchChunk(chunk, ""); err != nil {
- return false, err
- }
- }
- return false, nil
- }
- return len(name) == 0, nil
-}
-
-// scanChunk gets the next segment of pattern, which is a non-star string
-// possibly preceded by a star.
-func scanChunk(pattern string) (star bool, chunk, rest string) {
- for len(pattern) > 0 && pattern[0] == '*' {
- pattern = pattern[1:]
- star = true
- }
- inrange := false
- var i int
-Scan:
- for i = 0; i < len(pattern); i++ {
- switch pattern[i] {
- case '\\':
- // error check handled in matchChunk: bad pattern.
- if i+1 < len(pattern) {
- i++
- }
- case '[':
- inrange = true
- case ']':
- inrange = false
- case '*':
- if !inrange {
- break Scan
- }
- }
- }
- return star, pattern[0:i], pattern[i:]
-}
-
-// matchChunk checks whether chunk matches the beginning of s.
-// If so, it returns the remainder of s (after the match).
-// Chunk is all single-character operators: literals, char classes, and ?.
-func matchChunk(chunk, s string) (rest string, ok bool, err error) {
- // failed records whether the match has failed.
- // After the match fails, the loop continues on processing chunk,
- // checking that the pattern is well-formed but no longer reading s.
- failed := false
- for len(chunk) > 0 {
- if !failed && len(s) == 0 {
- failed = true
- }
- switch chunk[0] {
- case '[':
- // character class
- var r rune
- if !failed {
- var n int
- r, n = utf8.DecodeRuneInString(s)
- s = s[n:]
- }
- chunk = chunk[1:]
- // possibly negated
- negated := false
- if len(chunk) > 0 && chunk[0] == '^' {
- negated = true
- chunk = chunk[1:]
- }
- // parse all ranges
- match := false
- nrange := 0
- for {
- if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
- chunk = chunk[1:]
- break
- }
- var lo, hi rune
- if lo, chunk, err = getEsc(chunk); err != nil {
- return "", false, err
- }
- hi = lo
- if chunk[0] == '-' {
- if hi, chunk, err = getEsc(chunk[1:]); err != nil {
- return "", false, err
- }
- }
- if lo <= r && r <= hi {
- match = true
- }
- nrange++
- }
- if match == negated {
- failed = true
- }
-
- case '?':
- if !failed {
- if s[0] == '/' {
- failed = true
- }
- _, n := utf8.DecodeRuneInString(s)
- s = s[n:]
- }
- chunk = chunk[1:]
-
- case '\\':
- chunk = chunk[1:]
- if len(chunk) == 0 {
- return "", false, ErrBadPattern
- }
- fallthrough
-
- default:
- if !failed {
- if chunk[0] != s[0] {
- failed = true
- }
- s = s[1:]
- }
- chunk = chunk[1:]
- }
- }
- if failed {
- return "", false, nil
- }
- return s, true, nil
-}
-
-// getEsc gets a possibly-escaped character from chunk, for a character class.
-func getEsc(chunk string) (r rune, nchunk string, err error) {
- if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
- err = ErrBadPattern
- return
- }
- if chunk[0] == '\\' {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- err = ErrBadPattern
- return
- }
- }
- r, n := utf8.DecodeRuneInString(chunk)
- if r == utf8.RuneError && n == 1 {
- err = ErrBadPattern
- }
- nchunk = chunk[n:]
- if len(nchunk) == 0 {
- err = ErrBadPattern
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/path/path.go b/contrib/go/_std_1.18/src/path/path.go
deleted file mode 100644
index f1f3499f63..0000000000
--- a/contrib/go/_std_1.18/src/path/path.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package path implements utility routines for manipulating slash-separated
-// paths.
-//
-// The path package should only be used for paths separated by forward
-// slashes, such as the paths in URLs. This package does not deal with
-// Windows paths with drive letters or backslashes; to manipulate
-// operating system paths, use the path/filepath package.
-package path
-
-// A lazybuf is a lazily constructed path buffer.
-// It supports append, reading previously appended bytes,
-// and retrieving the final string. It does not allocate a buffer
-// to hold the output until that output diverges from s.
-type lazybuf struct {
- s string
- buf []byte
- w int
-}
-
-func (b *lazybuf) index(i int) byte {
- if b.buf != nil {
- return b.buf[i]
- }
- return b.s[i]
-}
-
-func (b *lazybuf) append(c byte) {
- if b.buf == nil {
- if b.w < len(b.s) && b.s[b.w] == c {
- b.w++
- return
- }
- b.buf = make([]byte, len(b.s))
- copy(b.buf, b.s[:b.w])
- }
- b.buf[b.w] = c
- b.w++
-}
-
-func (b *lazybuf) string() string {
- if b.buf == nil {
- return b.s[:b.w]
- }
- return string(b.buf[:b.w])
-}
-
-// Clean returns the shortest path name equivalent to path
-// by purely lexical processing. It applies the following rules
-// iteratively until no further processing can be done:
-//
-// 1. Replace multiple slashes with a single slash.
-// 2. Eliminate each . path name element (the current directory).
-// 3. Eliminate each inner .. path name element (the parent directory)
-// along with the non-.. element that precedes it.
-// 4. Eliminate .. elements that begin a rooted path:
-// that is, replace "/.." by "/" at the beginning of a path.
-//
-// The returned path ends in a slash only if it is the root "/".
-//
-// If the result of this process is an empty string, Clean
-// returns the string ".".
-//
-// See also Rob Pike, ``Lexical File Names in Plan 9 or
-// Getting Dot-Dot Right,''
-// https://9p.io/sys/doc/lexnames.html
-func Clean(path string) string {
- if path == "" {
- return "."
- }
-
- rooted := path[0] == '/'
- n := len(path)
-
- // Invariants:
- // reading from path; r is index of next byte to process.
- // writing to buf; w is index of next byte to write.
- // dotdot is index in buf where .. must stop, either because
- // it is the leading slash or it is a leading ../../.. prefix.
- out := lazybuf{s: path}
- r, dotdot := 0, 0
- if rooted {
- out.append('/')
- r, dotdot = 1, 1
- }
-
- for r < n {
- switch {
- case path[r] == '/':
- // empty path element
- r++
- case path[r] == '.' && (r+1 == n || path[r+1] == '/'):
- // . element
- r++
- case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'):
- // .. element: remove to last /
- r += 2
- switch {
- case out.w > dotdot:
- // can backtrack
- out.w--
- for out.w > dotdot && out.index(out.w) != '/' {
- out.w--
- }
- case !rooted:
- // cannot backtrack, but not rooted, so append .. element.
- if out.w > 0 {
- out.append('/')
- }
- out.append('.')
- out.append('.')
- dotdot = out.w
- }
- default:
- // real path element.
- // add slash if needed
- if rooted && out.w != 1 || !rooted && out.w != 0 {
- out.append('/')
- }
- // copy element
- for ; r < n && path[r] != '/'; r++ {
- out.append(path[r])
- }
- }
- }
-
- // Turn empty string into "."
- if out.w == 0 {
- return "."
- }
-
- return out.string()
-}
-
-// lastSlash(s) is strings.LastIndex(s, "/") but we can't import strings.
-func lastSlash(s string) int {
- i := len(s) - 1
- for i >= 0 && s[i] != '/' {
- i--
- }
- return i
-}
-
-// Split splits path immediately following the final slash,
-// separating it into a directory and file name component.
-// If there is no slash in path, Split returns an empty dir and
-// file set to path.
-// The returned values have the property that path = dir+file.
-func Split(path string) (dir, file string) {
- i := lastSlash(path)
- return path[:i+1], path[i+1:]
-}
-
-// Join joins any number of path elements into a single path,
-// separating them with slashes. Empty elements are ignored.
-// The result is Cleaned. However, if the argument list is
-// empty or all its elements are empty, Join returns
-// an empty string.
-func Join(elem ...string) string {
- size := 0
- for _, e := range elem {
- size += len(e)
- }
- if size == 0 {
- return ""
- }
- buf := make([]byte, 0, size+len(elem)-1)
- for _, e := range elem {
- if len(buf) > 0 || e != "" {
- if len(buf) > 0 {
- buf = append(buf, '/')
- }
- buf = append(buf, e...)
- }
- }
- return Clean(string(buf))
-}
-
-// Ext returns the file name extension used by path.
-// The extension is the suffix beginning at the final dot
-// in the final slash-separated element of path;
-// it is empty if there is no dot.
-func Ext(path string) string {
- for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- {
- if path[i] == '.' {
- return path[i:]
- }
- }
- return ""
-}
-
-// Base returns the last element of path.
-// Trailing slashes are removed before extracting the last element.
-// If the path is empty, Base returns ".".
-// If the path consists entirely of slashes, Base returns "/".
-func Base(path string) string {
- if path == "" {
- return "."
- }
- // Strip trailing slashes.
- for len(path) > 0 && path[len(path)-1] == '/' {
- path = path[0 : len(path)-1]
- }
- // Find the last element
- if i := lastSlash(path); i >= 0 {
- path = path[i+1:]
- }
- // If empty now, it had only slashes.
- if path == "" {
- return "/"
- }
- return path
-}
-
-// IsAbs reports whether the path is absolute.
-func IsAbs(path string) bool {
- return len(path) > 0 && path[0] == '/'
-}
-
-// Dir returns all but the last element of path, typically the path's directory.
-// After dropping the final element using Split, the path is Cleaned and trailing
-// slashes are removed.
-// If the path is empty, Dir returns ".".
-// If the path consists entirely of slashes followed by non-slash bytes, Dir
-// returns a single slash. In any other case, the returned path does not end in a
-// slash.
-func Dir(path string) string {
- dir, _ := Split(path)
- return Clean(dir)
-}
diff --git a/contrib/go/_std_1.18/src/reflect/abi.go b/contrib/go/_std_1.18/src/reflect/abi.go
deleted file mode 100644
index 28204b8193..0000000000
--- a/contrib/go/_std_1.18/src/reflect/abi.go
+++ /dev/null
@@ -1,511 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package reflect
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/goexperiment"
- "unsafe"
-)
-
-// These variables are used by the register assignment
-// algorithm in this file.
-//
-// They should be modified with care (no other reflect code
-// may be executing) and are generally only modified
-// when testing this package.
-//
-// They should never be set higher than their internal/abi
-// constant counterparts, because the system relies on a
-// structure that is at least large enough to hold the
-// registers the system supports.
-//
-// Currently they're set to zero because using the actual
-// constants will break every part of the toolchain that
-// uses reflect to call functions (e.g. go test, or anything
-// that uses text/template). The values that are currently
-// commented out there should be the actual values once
-// we're ready to use the register ABI everywhere.
-var (
- intArgRegs = abi.IntArgRegs * goexperiment.RegabiArgsInt
- floatArgRegs = abi.FloatArgRegs * goexperiment.RegabiArgsInt
- floatRegSize = uintptr(abi.EffectiveFloatRegSize * goexperiment.RegabiArgsInt)
-)
-
-// abiStep represents an ABI "instruction." Each instruction
-// describes one part of how to translate between a Go value
-// in memory and a call frame.
-type abiStep struct {
- kind abiStepKind
-
- // offset and size together describe a part of a Go value
- // in memory.
- offset uintptr
- size uintptr // size in bytes of the part
-
- // These fields describe the ABI side of the translation.
- stkOff uintptr // stack offset, used if kind == abiStepStack
- ireg int // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer
- freg int // FP register index, used if kind == abiStepFloatReg
-}
-
-// abiStepKind is the "op-code" for an abiStep instruction.
-type abiStepKind int
-
-const (
- abiStepBad abiStepKind = iota
- abiStepStack // copy to/from stack
- abiStepIntReg // copy to/from integer register
- abiStepPointer // copy pointer to/from integer register
- abiStepFloatReg // copy to/from FP register
-)
-
-// abiSeq represents a sequence of ABI instructions for copying
-// from a series of reflect.Values to a call frame (for call arguments)
-// or vice-versa (for call results).
-//
-// An abiSeq should be populated by calling its addArg method.
-type abiSeq struct {
- // steps is the set of instructions.
- //
- // The instructions are grouped together by whole arguments,
- // with the starting index for the instructions
- // of the i'th Go value available in valueStart.
- //
- // For instance, if this abiSeq represents 3 arguments
- // passed to a function, then the 2nd argument's steps
- // begin at steps[valueStart[1]].
- //
- // Because reflect accepts Go arguments in distinct
- // Values and each Value is stored separately, each abiStep
- // that begins a new argument will have its offset
- // field == 0.
- steps []abiStep
- valueStart []int
-
- stackBytes uintptr // stack space used
- iregs, fregs int // registers used
-}
-
-func (a *abiSeq) dump() {
- for i, p := range a.steps {
- println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg)
- }
- print("values ")
- for _, i := range a.valueStart {
- print(i, " ")
- }
- println()
- println("stack", a.stackBytes)
- println("iregs", a.iregs)
- println("fregs", a.fregs)
-}
-
-// stepsForValue returns the ABI instructions for translating
-// the i'th Go argument or return value represented by this
-// abiSeq to the Go ABI.
-func (a *abiSeq) stepsForValue(i int) []abiStep {
- s := a.valueStart[i]
- var e int
- if i == len(a.valueStart)-1 {
- e = len(a.steps)
- } else {
- e = a.valueStart[i+1]
- }
- return a.steps[s:e]
-}
-
-// addArg extends the abiSeq with a new Go value of type t.
-//
-// If the value was stack-assigned, returns the single
-// abiStep describing that translation, and nil otherwise.
-func (a *abiSeq) addArg(t *rtype) *abiStep {
- // We'll always be adding a new value, so do that first.
- pStart := len(a.steps)
- a.valueStart = append(a.valueStart, pStart)
- if t.size == 0 {
- // If the size of the argument type is zero, then
- // in order to degrade gracefully into ABI0, we need
- // to stack-assign this type. The reason is that
- // although zero-sized types take up no space on the
- // stack, they do cause the next argument to be aligned.
- // So just do that here, but don't bother actually
- // generating a new ABI step for it (there's nothing to
- // actually copy).
- //
- // We cannot handle this in the recursive case of
- // regAssign because zero-sized *fields* of a
- // non-zero-sized struct do not cause it to be
- // stack-assigned. So we need a special case here
- // at the top.
- a.stackBytes = align(a.stackBytes, uintptr(t.align))
- return nil
- }
- // Hold a copy of "a" so that we can roll back if
- // register assignment fails.
- aOld := *a
- if !a.regAssign(t, 0) {
- // Register assignment failed. Roll back any changes
- // and stack-assign.
- *a = aOld
- a.stackAssign(t.size, uintptr(t.align))
- return &a.steps[len(a.steps)-1]
- }
- return nil
-}
-
-// addRcvr extends the abiSeq with a new method call
-// receiver according to the interface calling convention.
-//
-// If the receiver was stack-assigned, returns the single
-// abiStep describing that translation, and nil otherwise.
-// Returns true if the receiver is a pointer.
-func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
- // The receiver is always one word.
- a.valueStart = append(a.valueStart, len(a.steps))
- var ok, ptr bool
- if ifaceIndir(rcvr) || rcvr.pointers() {
- ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
- ptr = true
- } else {
- // TODO(mknyszek): Is this case even possible?
- // The interface data work never contains a non-pointer
- // value. This case was copied over from older code
- // in the reflect package which only conditionally added
- // a pointer bit to the reflect.(Value).Call stack frame's
- // GC bitmap.
- ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
- ptr = false
- }
- if !ok {
- a.stackAssign(goarch.PtrSize, goarch.PtrSize)
- return &a.steps[len(a.steps)-1], ptr
- }
- return nil, ptr
-}
-
-// regAssign attempts to reserve argument registers for a value of
-// type t, stored at some offset.
-//
-// It returns whether or not the assignment succeeded, but
-// leaves any changes it made to a.steps behind, so the caller
-// must undo that work by adjusting a.steps if it fails.
-//
-// This method along with the assign* methods represent the
-// complete register-assignment algorithm for the Go ABI.
-func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
- switch t.Kind() {
- case UnsafePointer, Pointer, Chan, Map, Func:
- return a.assignIntN(offset, t.size, 1, 0b1)
- case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
- return a.assignIntN(offset, t.size, 1, 0b0)
- case Int64, Uint64:
- switch goarch.PtrSize {
- case 4:
- return a.assignIntN(offset, 4, 2, 0b0)
- case 8:
- return a.assignIntN(offset, 8, 1, 0b0)
- }
- case Float32, Float64:
- return a.assignFloatN(offset, t.size, 1)
- case Complex64:
- return a.assignFloatN(offset, 4, 2)
- case Complex128:
- return a.assignFloatN(offset, 8, 2)
- case String:
- return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
- case Interface:
- return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
- case Slice:
- return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- switch tt.len {
- case 0:
- // There's nothing to assign, so don't modify
- // a.steps but succeed so the caller doesn't
- // try to stack-assign this value.
- return true
- case 1:
- return a.regAssign(tt.elem, offset)
- default:
- return false
- }
- case Struct:
- st := (*structType)(unsafe.Pointer(t))
- for i := range st.fields {
- f := &st.fields[i]
- if !a.regAssign(f.typ, offset+f.offset()) {
- return false
- }
- }
- return true
- default:
- print("t.Kind == ", t.Kind(), "\n")
- panic("unknown type kind")
- }
- panic("unhandled register assignment path")
-}
-
-// assignIntN assigns n values to registers, each "size" bytes large,
-// from the data at [offset, offset+n*size) in memory. Each value at
-// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
-// next n integer registers.
-//
-// Bit i in ptrMap indicates whether the i'th value is a pointer.
-// n must be <= 8.
-//
-// Returns whether assignment succeeded.
-func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool {
- if n > 8 || n < 0 {
- panic("invalid n")
- }
- if ptrMap != 0 && size != goarch.PtrSize {
- panic("non-empty pointer map passed for non-pointer-size values")
- }
- if a.iregs+n > intArgRegs {
- return false
- }
- for i := 0; i < n; i++ {
- kind := abiStepIntReg
- if ptrMap&(uint8(1)<<i) != 0 {
- kind = abiStepPointer
- }
- a.steps = append(a.steps, abiStep{
- kind: kind,
- offset: offset + uintptr(i)*size,
- size: size,
- ireg: a.iregs,
- })
- a.iregs++
- }
- return true
-}
-
-// assignFloatN assigns n values to registers, each "size" bytes large,
-// from the data at [offset, offset+n*size) in memory. Each value at
-// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
-// next n floating-point registers.
-//
-// Returns whether assignment succeeded.
-func (a *abiSeq) assignFloatN(offset, size uintptr, n int) bool {
- if n < 0 {
- panic("invalid n")
- }
- if a.fregs+n > floatArgRegs || floatRegSize < size {
- return false
- }
- for i := 0; i < n; i++ {
- a.steps = append(a.steps, abiStep{
- kind: abiStepFloatReg,
- offset: offset + uintptr(i)*size,
- size: size,
- freg: a.fregs,
- })
- a.fregs++
- }
- return true
-}
-
-// stackAssign reserves space for one value that is "size" bytes
-// large with alignment "alignment" to the stack.
-//
-// Should not be called directly; use addArg instead.
-func (a *abiSeq) stackAssign(size, alignment uintptr) {
- a.stackBytes = align(a.stackBytes, alignment)
- a.steps = append(a.steps, abiStep{
- kind: abiStepStack,
- offset: 0, // Only used for whole arguments, so the memory offset is 0.
- size: size,
- stkOff: a.stackBytes,
- })
- a.stackBytes += size
-}
-
-// abiDesc describes the ABI for a function or method.
-type abiDesc struct {
- // call and ret represent the translation steps for
- // the call and return paths of a Go function.
- call, ret abiSeq
-
- // These fields describe the stack space allocated
- // for the call. stackCallArgsSize is the amount of space
- // reserved for arguments but not return values. retOffset
- // is the offset at which return values begin, and
- // spill is the size in bytes of additional space reserved
- // to spill argument registers into in case of preemption in
- // reflectcall's stack frame.
- stackCallArgsSize, retOffset, spill uintptr
-
- // stackPtrs is a bitmap that indicates whether
- // each word in the ABI stack space (stack-assigned
- // args + return values) is a pointer. Used
- // as the heap pointer bitmap for stack space
- // passed to reflectcall.
- stackPtrs *bitVector
-
- // inRegPtrs is a bitmap whose i'th bit indicates
- // whether the i'th integer argument register contains
- // a pointer. Used by makeFuncStub and methodValueCall
- // to make result pointers visible to the GC.
- //
- // outRegPtrs is the same, but for result values.
- // Used by reflectcall to make result pointers visible
- // to the GC.
- inRegPtrs, outRegPtrs abi.IntArgRegBitmap
-}
-
-func (a *abiDesc) dump() {
- println("ABI")
- println("call")
- a.call.dump()
- println("ret")
- a.ret.dump()
- println("stackCallArgsSize", a.stackCallArgsSize)
- println("retOffset", a.retOffset)
- println("spill", a.spill)
- print("inRegPtrs:")
- dumpPtrBitMap(a.inRegPtrs)
- println()
- print("outRegPtrs:")
- dumpPtrBitMap(a.outRegPtrs)
- println()
-}
-
-func dumpPtrBitMap(b abi.IntArgRegBitmap) {
- for i := 0; i < intArgRegs; i++ {
- x := 0
- if b.Get(i) {
- x = 1
- }
- print(" ", x)
- }
-}
-
-func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
- // We need to add space for this argument to
- // the frame so that it can spill args into it.
- //
- // The size of this space is just the sum of the sizes
- // of each register-allocated type.
- //
- // TODO(mknyszek): Remove this when we no longer have
- // caller reserved spill space.
- spill := uintptr(0)
-
- // Compute gc program & stack bitmap for stack arguments
- stackPtrs := new(bitVector)
-
- // Compute the stack frame pointer bitmap and register
- // pointer bitmap for arguments.
- inRegPtrs := abi.IntArgRegBitmap{}
-
- // Compute abiSeq for input parameters.
- var in abiSeq
- if rcvr != nil {
- stkStep, isPtr := in.addRcvr(rcvr)
- if stkStep != nil {
- if isPtr {
- stackPtrs.append(1)
- } else {
- stackPtrs.append(0)
- }
- } else {
- spill += goarch.PtrSize
- }
- }
- for i, arg := range t.in() {
- stkStep := in.addArg(arg)
- if stkStep != nil {
- addTypeBits(stackPtrs, stkStep.stkOff, arg)
- } else {
- spill = align(spill, uintptr(arg.align))
- spill += arg.size
- for _, st := range in.stepsForValue(i) {
- if st.kind == abiStepPointer {
- inRegPtrs.Set(st.ireg)
- }
- }
- }
- }
- spill = align(spill, goarch.PtrSize)
-
- // From the input parameters alone, we now know
- // the stackCallArgsSize and retOffset.
- stackCallArgsSize := in.stackBytes
- retOffset := align(in.stackBytes, goarch.PtrSize)
-
- // Compute the stack frame pointer bitmap and register
- // pointer bitmap for return values.
- outRegPtrs := abi.IntArgRegBitmap{}
-
- // Compute abiSeq for output parameters.
- var out abiSeq
- // Stack-assigned return values do not share
- // space with arguments like they do with registers,
- // so we need to inject a stack offset here.
- // Fake it by artificially extending stackBytes by
- // the return offset.
- out.stackBytes = retOffset
- for i, res := range t.out() {
- stkStep := out.addArg(res)
- if stkStep != nil {
- addTypeBits(stackPtrs, stkStep.stkOff, res)
- } else {
- for _, st := range out.stepsForValue(i) {
- if st.kind == abiStepPointer {
- outRegPtrs.Set(st.ireg)
- }
- }
- }
- }
- // Undo the faking from earlier so that stackBytes
- // is accurate.
- out.stackBytes -= retOffset
- return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, inRegPtrs, outRegPtrs}
-}
-
-// intFromReg loads an argSize sized integer from reg and places it at to.
-//
-// argSize must be non-zero, fit in a register, and a power-of-two.
-func intFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
- memmove(to, r.IntRegArgAddr(reg, argSize), argSize)
-}
-
-// intToReg loads an argSize sized integer and stores it into reg.
-//
-// argSize must be non-zero, fit in a register, and a power-of-two.
-func intToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
- memmove(r.IntRegArgAddr(reg, argSize), from, argSize)
-}
-
-// floatFromReg loads a float value from its register representation in r.
-//
-// argSize must be 4 or 8.
-func floatFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
- switch argSize {
- case 4:
- *(*float32)(to) = archFloat32FromReg(r.Floats[reg])
- case 8:
- *(*float64)(to) = *(*float64)(unsafe.Pointer(&r.Floats[reg]))
- default:
- panic("bad argSize")
- }
-}
-
-// floatToReg stores a float value in its register representation in r.
-//
-// argSize must be either 4 or 8.
-func floatToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
- switch argSize {
- case 4:
- r.Floats[reg] = archFloat32ToReg(*(*float32)(from))
- case 8:
- r.Floats[reg] = *(*uint64)(from)
- default:
- panic("bad argSize")
- }
-}
diff --git a/contrib/go/_std_1.18/src/reflect/deepequal.go b/contrib/go/_std_1.18/src/reflect/deepequal.go
deleted file mode 100644
index eaab101221..0000000000
--- a/contrib/go/_std_1.18/src/reflect/deepequal.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Deep equality test via reflection
-
-package reflect
-
-import (
- "internal/bytealg"
- "unsafe"
-)
-
-// During deepValueEqual, must keep track of checks that are
-// in progress. The comparison algorithm assumes that all
-// checks in progress are true when it reencounters them.
-// Visited comparisons are stored in a map indexed by visit.
-type visit struct {
- a1 unsafe.Pointer
- a2 unsafe.Pointer
- typ Type
-}
-
-// Tests for deep equality using reflected types. The map argument tracks
-// comparisons that have already been seen, which allows short circuiting on
-// recursive types.
-func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
- if !v1.IsValid() || !v2.IsValid() {
- return v1.IsValid() == v2.IsValid()
- }
- if v1.Type() != v2.Type() {
- return false
- }
-
- // We want to avoid putting more in the visited map than we need to.
- // For any possible reference cycle that might be encountered,
- // hard(v1, v2) needs to return true for at least one of the types in the cycle,
- // and it's safe and valid to get Value's internal pointer.
- hard := func(v1, v2 Value) bool {
- switch v1.Kind() {
- case Pointer:
- if v1.typ.ptrdata == 0 {
- // go:notinheap pointers can't be cyclic.
- // At least, all of our current uses of go:notinheap have
- // that property. The runtime ones aren't cyclic (and we don't use
- // DeepEqual on them anyway), and the cgo-generated ones are
- // all empty structs.
- return false
- }
- fallthrough
- case Map, Slice, Interface:
- // Nil pointers cannot be cyclic. Avoid putting them in the visited map.
- return !v1.IsNil() && !v2.IsNil()
- }
- return false
- }
-
- if hard(v1, v2) {
- // For a Pointer or Map value, we need to check flagIndir,
- // which we do by calling the pointer method.
- // For Slice or Interface, flagIndir is always set,
- // and using v.ptr suffices.
- ptrval := func(v Value) unsafe.Pointer {
- switch v.Kind() {
- case Pointer, Map:
- return v.pointer()
- default:
- return v.ptr
- }
- }
- addr1 := ptrval(v1)
- addr2 := ptrval(v2)
- if uintptr(addr1) > uintptr(addr2) {
- // Canonicalize order to reduce number of entries in visited.
- // Assumes non-moving garbage collector.
- addr1, addr2 = addr2, addr1
- }
-
- // Short circuit if references are already seen.
- typ := v1.Type()
- v := visit{addr1, addr2, typ}
- if visited[v] {
- return true
- }
-
- // Remember for later.
- visited[v] = true
- }
-
- switch v1.Kind() {
- case Array:
- for i := 0; i < v1.Len(); i++ {
- if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
- return false
- }
- }
- return true
- case Slice:
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- if v1.Len() != v2.Len() {
- return false
- }
- if v1.UnsafePointer() == v2.UnsafePointer() {
- return true
- }
- // Special case for []byte, which is common.
- if v1.Type().Elem().Kind() == Uint8 {
- return bytealg.Equal(v1.Bytes(), v2.Bytes())
- }
- for i := 0; i < v1.Len(); i++ {
- if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
- return false
- }
- }
- return true
- case Interface:
- if v1.IsNil() || v2.IsNil() {
- return v1.IsNil() == v2.IsNil()
- }
- return deepValueEqual(v1.Elem(), v2.Elem(), visited)
- case Pointer:
- if v1.UnsafePointer() == v2.UnsafePointer() {
- return true
- }
- return deepValueEqual(v1.Elem(), v2.Elem(), visited)
- case Struct:
- for i, n := 0, v1.NumField(); i < n; i++ {
- if !deepValueEqual(v1.Field(i), v2.Field(i), visited) {
- return false
- }
- }
- return true
- case Map:
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- if v1.Len() != v2.Len() {
- return false
- }
- if v1.UnsafePointer() == v2.UnsafePointer() {
- return true
- }
- for _, k := range v1.MapKeys() {
- val1 := v1.MapIndex(k)
- val2 := v2.MapIndex(k)
- if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) {
- return false
- }
- }
- return true
- case Func:
- if v1.IsNil() && v2.IsNil() {
- return true
- }
- // Can't do better than this:
- return false
- case Int, Int8, Int16, Int32, Int64:
- return v1.Int() == v2.Int()
- case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- return v1.Uint() == v2.Uint()
- case String:
- return v1.String() == v2.String()
- case Bool:
- return v1.Bool() == v2.Bool()
- case Float32, Float64:
- return v1.Float() == v2.Float()
- case Complex64, Complex128:
- return v1.Complex() == v2.Complex()
- default:
- // Normal equality suffices
- return valueInterface(v1, false) == valueInterface(v2, false)
- }
-}
-
-// DeepEqual reports whether x and y are ``deeply equal,'' defined as follows.
-// Two values of identical type are deeply equal if one of the following cases applies.
-// Values of distinct types are never deeply equal.
-//
-// Array values are deeply equal when their corresponding elements are deeply equal.
-//
-// Struct values are deeply equal if their corresponding fields,
-// both exported and unexported, are deeply equal.
-//
-// Func values are deeply equal if both are nil; otherwise they are not deeply equal.
-//
-// Interface values are deeply equal if they hold deeply equal concrete values.
-//
-// Map values are deeply equal when all of the following are true:
-// they are both nil or both non-nil, they have the same length,
-// and either they are the same map object or their corresponding keys
-// (matched using Go equality) map to deeply equal values.
-//
-// Pointer values are deeply equal if they are equal using Go's == operator
-// or if they point to deeply equal values.
-//
-// Slice values are deeply equal when all of the following are true:
-// they are both nil or both non-nil, they have the same length,
-// and either they point to the same initial entry of the same underlying array
-// (that is, &x[0] == &y[0]) or their corresponding elements (up to length) are deeply equal.
-// Note that a non-nil empty slice and a nil slice (for example, []byte{} and []byte(nil))
-// are not deeply equal.
-//
-// Other values - numbers, bools, strings, and channels - are deeply equal
-// if they are equal using Go's == operator.
-//
-// In general DeepEqual is a recursive relaxation of Go's == operator.
-// However, this idea is impossible to implement without some inconsistency.
-// Specifically, it is possible for a value to be unequal to itself,
-// either because it is of func type (uncomparable in general)
-// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
-// or because it is an array, struct, or interface containing
-// such a value.
-// On the other hand, pointer values are always equal to themselves,
-// even if they point at or contain such problematic values,
-// because they compare equal using Go's == operator, and that
-// is a sufficient condition to be deeply equal, regardless of content.
-// DeepEqual has been defined so that the same short-cut applies
-// to slices and maps: if x and y are the same slice or the same map,
-// they are deeply equal regardless of content.
-//
-// As DeepEqual traverses the data values it may find a cycle. The
-// second and subsequent times that DeepEqual compares two pointer
-// values that have been compared before, it treats the values as
-// equal rather than examining the values to which they point.
-// This ensures that DeepEqual terminates.
-func DeepEqual(x, y any) bool {
- if x == nil || y == nil {
- return x == y
- }
- v1 := ValueOf(x)
- v2 := ValueOf(y)
- if v1.Type() != v2.Type() {
- return false
- }
- return deepValueEqual(v1, v2, make(map[visit]bool))
-}
diff --git a/contrib/go/_std_1.18/src/reflect/float32reg_generic.go b/contrib/go/_std_1.18/src/reflect/float32reg_generic.go
deleted file mode 100644
index 307c0bb33c..0000000000
--- a/contrib/go/_std_1.18/src/reflect/float32reg_generic.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !ppc64 && !ppc64le
-
-package reflect
-
-import "unsafe"
-
-// This file implements a straightforward conversion of a float32
-// value into its representation in a register. This conversion
-// applies for amd64 and arm64. It is also chosen for the case of
-// zero argument registers, but is not used.
-
-func archFloat32FromReg(reg uint64) float32 {
- i := uint32(reg)
- return *(*float32)(unsafe.Pointer(&i))
-}
-
-func archFloat32ToReg(val float32) uint64 {
- return uint64(*(*uint32)(unsafe.Pointer(&val)))
-}
diff --git a/contrib/go/_std_1.18/src/reflect/makefunc.go b/contrib/go/_std_1.18/src/reflect/makefunc.go
deleted file mode 100644
index d0b0935cb8..0000000000
--- a/contrib/go/_std_1.18/src/reflect/makefunc.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// MakeFunc implementation.
-
-package reflect
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-// makeFuncImpl is the closure value implementing the function
-// returned by MakeFunc.
-// The first three words of this type must be kept in sync with
-// methodValue and runtime.reflectMethodValue.
-// Any changes should be reflected in all three.
-type makeFuncImpl struct {
- makeFuncCtxt
- ftyp *funcType
- fn func([]Value) []Value
-}
-
-// MakeFunc returns a new function of the given Type
-// that wraps the function fn. When called, that new function
-// does the following:
-//
-// - converts its arguments to a slice of Values.
-// - runs results := fn(args).
-// - returns the results as a slice of Values, one per formal result.
-//
-// The implementation fn can assume that the argument Value slice
-// has the number and type of arguments given by typ.
-// If typ describes a variadic function, the final Value is itself
-// a slice representing the variadic arguments, as in the
-// body of a variadic function. The result Value slice returned by fn
-// must have the number and type of results given by typ.
-//
-// The Value.Call method allows the caller to invoke a typed function
-// in terms of Values; in contrast, MakeFunc allows the caller to implement
-// a typed function in terms of Values.
-//
-// The Examples section of the documentation includes an illustration
-// of how to use MakeFunc to build a swap function for different types.
-//
-func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
- if typ.Kind() != Func {
- panic("reflect: call of MakeFunc with non-Func type")
- }
-
- t := typ.common()
- ftyp := (*funcType)(unsafe.Pointer(t))
-
- code := abi.FuncPCABI0(makeFuncStub)
-
- // makeFuncImpl contains a stack map for use by the runtime
- _, _, abi := funcLayout(ftyp, nil)
-
- impl := &makeFuncImpl{
- makeFuncCtxt: makeFuncCtxt{
- fn: code,
- stack: abi.stackPtrs,
- argLen: abi.stackCallArgsSize,
- regPtrs: abi.inRegPtrs,
- },
- ftyp: ftyp,
- fn: fn,
- }
-
- return Value{t, unsafe.Pointer(impl), flag(Func)}
-}
-
-// makeFuncStub is an assembly function that is the code half of
-// the function returned from MakeFunc. It expects a *callReflectFunc
-// as its context register, and its job is to invoke callReflect(ctxt, frame)
-// where ctxt is the context register and frame is a pointer to the first
-// word in the passed-in argument frame.
-func makeFuncStub()
-
-// The first 3 words of this type must be kept in sync with
-// makeFuncImpl and runtime.reflectMethodValue.
-// Any changes should be reflected in all three.
-type methodValue struct {
- makeFuncCtxt
- method int
- rcvr Value
-}
-
-// makeMethodValue converts v from the rcvr+method index representation
-// of a method value to an actual method func value, which is
-// basically the receiver value with a special bit set, into a true
-// func value - a value holding an actual func. The output is
-// semantically equivalent to the input as far as the user of package
-// reflect can tell, but the true func representation can be handled
-// by code like Convert and Interface and Assign.
-func makeMethodValue(op string, v Value) Value {
- if v.flag&flagMethod == 0 {
- panic("reflect: internal error: invalid use of makeMethodValue")
- }
-
- // Ignoring the flagMethod bit, v describes the receiver, not the method type.
- fl := v.flag & (flagRO | flagAddr | flagIndir)
- fl |= flag(v.typ.Kind())
- rcvr := Value{v.typ, v.ptr, fl}
-
- // v.Type returns the actual type of the method value.
- ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
-
- code := methodValueCallCodePtr()
-
- // methodValue contains a stack map for use by the runtime
- _, _, abi := funcLayout(ftyp, nil)
- fv := &methodValue{
- makeFuncCtxt: makeFuncCtxt{
- fn: code,
- stack: abi.stackPtrs,
- argLen: abi.stackCallArgsSize,
- regPtrs: abi.inRegPtrs,
- },
- method: int(v.flag) >> flagMethodShift,
- rcvr: rcvr,
- }
-
- // Cause panic if method is not appropriate.
- // The panic would still happen during the call if we omit this,
- // but we want Interface() and other operations to fail early.
- methodReceiver(op, fv.rcvr, fv.method)
-
- return Value{&ftyp.rtype, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)}
-}
-
-func methodValueCallCodePtr() uintptr {
- return abi.FuncPCABI0(methodValueCall)
-}
-
-// methodValueCall is an assembly function that is the code half of
-// the function returned from makeMethodValue. It expects a *methodValue
-// as its context register, and its job is to invoke callMethod(ctxt, frame)
-// where ctxt is the context register and frame is a pointer to the first
-// word in the passed-in argument frame.
-func methodValueCall()
-
-// This structure must be kept in sync with runtime.reflectMethodValue.
-// Any changes should be reflected in all both.
-type makeFuncCtxt struct {
- fn uintptr
- stack *bitVector // ptrmap for both stack args and results
- argLen uintptr // just args
- regPtrs abi.IntArgRegBitmap
-}
-
-// moveMakeFuncArgPtrs uses ctxt.regPtrs to copy integer pointer arguments
-// in args.Ints to args.Ptrs where the GC can see them.
-//
-// This is similar to what reflectcallmove does in the runtime, except
-// that happens on the return path, whereas this happens on the call path.
-//
-// nosplit because pointers are being held in uintptr slots in args, so
-// having our stack scanned now could lead to accidentally freeing
-// memory.
-//go:nosplit
-func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) {
- for i, arg := range args.Ints {
- // Avoid write barriers! Because our write barrier enqueues what
- // was there before, we might enqueue garbage.
- if ctxt.regPtrs.Get(i) {
- *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = arg
- } else {
- // We *must* zero this space ourselves because it's defined in
- // assembly code and the GC will scan these pointers. Otherwise,
- // there will be garbage here.
- *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = 0
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/reflect/type.go b/contrib/go/_std_1.18/src/reflect/type.go
deleted file mode 100644
index 8ba63bcad0..0000000000
--- a/contrib/go/_std_1.18/src/reflect/type.go
+++ /dev/null
@@ -1,3173 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package reflect implements run-time reflection, allowing a program to
-// manipulate objects with arbitrary types. The typical use is to take a value
-// with static type interface{} and extract its dynamic type information by
-// calling TypeOf, which returns a Type.
-//
-// A call to ValueOf returns a Value representing the run-time data.
-// Zero takes a Type and returns a Value representing a zero value
-// for that type.
-//
-// See "The Laws of Reflection" for an introduction to reflection in Go:
-// https://golang.org/doc/articles/laws_of_reflection.html
-package reflect
-
-import (
- "internal/goarch"
- "internal/unsafeheader"
- "strconv"
- "sync"
- "unicode"
- "unicode/utf8"
- "unsafe"
-)
-
-// Type is the representation of a Go type.
-//
-// Not all methods apply to all kinds of types. Restrictions,
-// if any, are noted in the documentation for each method.
-// Use the Kind method to find out the kind of type before
-// calling kind-specific methods. Calling a method
-// inappropriate to the kind of type causes a run-time panic.
-//
-// Type values are comparable, such as with the == operator,
-// so they can be used as map keys.
-// Two Type values are equal if they represent identical types.
-type Type interface {
- // Methods applicable to all types.
-
- // Align returns the alignment in bytes of a value of
- // this type when allocated in memory.
- Align() int
-
- // FieldAlign returns the alignment in bytes of a value of
- // this type when used as a field in a struct.
- FieldAlign() int
-
- // Method returns the i'th method in the type's method set.
- // It panics if i is not in the range [0, NumMethod()).
- //
- // For a non-interface type T or *T, the returned Method's Type and Func
- // fields describe a function whose first argument is the receiver,
- // and only exported methods are accessible.
- //
- // For an interface type, the returned Method's Type field gives the
- // method signature, without a receiver, and the Func field is nil.
- //
- // Methods are sorted in lexicographic order.
- Method(int) Method
-
- // MethodByName returns the method with that name in the type's
- // method set and a boolean indicating if the method was found.
- //
- // For a non-interface type T or *T, the returned Method's Type and Func
- // fields describe a function whose first argument is the receiver.
- //
- // For an interface type, the returned Method's Type field gives the
- // method signature, without a receiver, and the Func field is nil.
- MethodByName(string) (Method, bool)
-
- // NumMethod returns the number of methods accessible using Method.
- //
- // Note that NumMethod counts unexported methods only for interface types.
- NumMethod() int
-
- // Name returns the type's name within its package for a defined type.
- // For other (non-defined) types it returns the empty string.
- Name() string
-
- // PkgPath returns a defined type's package path, that is, the import path
- // that uniquely identifies the package, such as "encoding/base64".
- // If the type was predeclared (string, error) or not defined (*T, struct{},
- // []int, or A where A is an alias for a non-defined type), the package path
- // will be the empty string.
- PkgPath() string
-
- // Size returns the number of bytes needed to store
- // a value of the given type; it is analogous to unsafe.Sizeof.
- Size() uintptr
-
- // String returns a string representation of the type.
- // The string representation may use shortened package names
- // (e.g., base64 instead of "encoding/base64") and is not
- // guaranteed to be unique among types. To test for type identity,
- // compare the Types directly.
- String() string
-
- // Kind returns the specific kind of this type.
- Kind() Kind
-
- // Implements reports whether the type implements the interface type u.
- Implements(u Type) bool
-
- // AssignableTo reports whether a value of the type is assignable to type u.
- AssignableTo(u Type) bool
-
- // ConvertibleTo reports whether a value of the type is convertible to type u.
- // Even if ConvertibleTo returns true, the conversion may still panic.
- // For example, a slice of type []T is convertible to *[N]T,
- // but the conversion will panic if its length is less than N.
- ConvertibleTo(u Type) bool
-
- // Comparable reports whether values of this type are comparable.
- // Even if Comparable returns true, the comparison may still panic.
- // For example, values of interface type are comparable,
- // but the comparison will panic if their dynamic type is not comparable.
- Comparable() bool
-
- // Methods applicable only to some types, depending on Kind.
- // The methods allowed for each kind are:
- //
- // Int*, Uint*, Float*, Complex*: Bits
- // Array: Elem, Len
- // Chan: ChanDir, Elem
- // Func: In, NumIn, Out, NumOut, IsVariadic.
- // Map: Key, Elem
- // Pointer: Elem
- // Slice: Elem
- // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
-
- // Bits returns the size of the type in bits.
- // It panics if the type's Kind is not one of the
- // sized or unsized Int, Uint, Float, or Complex kinds.
- Bits() int
-
- // ChanDir returns a channel type's direction.
- // It panics if the type's Kind is not Chan.
- ChanDir() ChanDir
-
- // IsVariadic reports whether a function type's final input parameter
- // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
- // implicit actual type []T.
- //
- // For concreteness, if t represents func(x int, y ... float64), then
- //
- // t.NumIn() == 2
- // t.In(0) is the reflect.Type for "int"
- // t.In(1) is the reflect.Type for "[]float64"
- // t.IsVariadic() == true
- //
- // IsVariadic panics if the type's Kind is not Func.
- IsVariadic() bool
-
- // Elem returns a type's element type.
- // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice.
- Elem() Type
-
- // Field returns a struct type's i'th field.
- // It panics if the type's Kind is not Struct.
- // It panics if i is not in the range [0, NumField()).
- Field(i int) StructField
-
- // FieldByIndex returns the nested field corresponding
- // to the index sequence. It is equivalent to calling Field
- // successively for each index i.
- // It panics if the type's Kind is not Struct.
- FieldByIndex(index []int) StructField
-
- // FieldByName returns the struct field with the given name
- // and a boolean indicating if the field was found.
- FieldByName(name string) (StructField, bool)
-
- // FieldByNameFunc returns the struct field with a name
- // that satisfies the match function and a boolean indicating if
- // the field was found.
- //
- // FieldByNameFunc considers the fields in the struct itself
- // and then the fields in any embedded structs, in breadth first order,
- // stopping at the shallowest nesting depth containing one or more
- // fields satisfying the match function. If multiple fields at that depth
- // satisfy the match function, they cancel each other
- // and FieldByNameFunc returns no match.
- // This behavior mirrors Go's handling of name lookup in
- // structs containing embedded fields.
- FieldByNameFunc(match func(string) bool) (StructField, bool)
-
- // In returns the type of a function type's i'th input parameter.
- // It panics if the type's Kind is not Func.
- // It panics if i is not in the range [0, NumIn()).
- In(i int) Type
-
- // Key returns a map type's key type.
- // It panics if the type's Kind is not Map.
- Key() Type
-
- // Len returns an array type's length.
- // It panics if the type's Kind is not Array.
- Len() int
-
- // NumField returns a struct type's field count.
- // It panics if the type's Kind is not Struct.
- NumField() int
-
- // NumIn returns a function type's input parameter count.
- // It panics if the type's Kind is not Func.
- NumIn() int
-
- // NumOut returns a function type's output parameter count.
- // It panics if the type's Kind is not Func.
- NumOut() int
-
- // Out returns the type of a function type's i'th output parameter.
- // It panics if the type's Kind is not Func.
- // It panics if i is not in the range [0, NumOut()).
- Out(i int) Type
-
- common() *rtype
- uncommon() *uncommonType
-}
-
-// BUG(rsc): FieldByName and related functions consider struct field names to be equal
-// if the names are equal, even if they are unexported names originating
-// in different packages. The practical effect of this is that the result of
-// t.FieldByName("x") is not well defined if the struct type t contains
-// multiple fields named x (embedded from different packages).
-// FieldByName may return one of the fields named x or may report that there are none.
-// See https://golang.org/issue/4876 for more details.
-
-/*
- * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
- * A few are known to ../runtime/type.go to convey to debuggers.
- * They are also known to ../runtime/type.go.
- */
-
-// A Kind represents the specific kind of type that a Type represents.
-// The zero Kind is not a valid kind.
-type Kind uint
-
-const (
- Invalid Kind = iota
- Bool
- Int
- Int8
- Int16
- Int32
- Int64
- Uint
- Uint8
- Uint16
- Uint32
- Uint64
- Uintptr
- Float32
- Float64
- Complex64
- Complex128
- Array
- Chan
- Func
- Interface
- Map
- Pointer
- Slice
- String
- Struct
- UnsafePointer
-)
-
-// Ptr is the old name for the Pointer kind.
-const Ptr = Pointer
-
-// tflag is used by an rtype to signal what extra type information is
-// available in the memory directly following the rtype value.
-//
-// tflag values must be kept in sync with copies in:
-// cmd/compile/internal/reflectdata/reflect.go
-// cmd/link/internal/ld/decodesym.go
-// runtime/type.go
-type tflag uint8
-
-const (
- // tflagUncommon means that there is a pointer, *uncommonType,
- // just beyond the outer type structure.
- //
- // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
- // then t has uncommonType data and it can be accessed as:
- //
- // type tUncommon struct {
- // structType
- // u uncommonType
- // }
- // u := &(*tUncommon)(unsafe.Pointer(t)).u
- tflagUncommon tflag = 1 << 0
-
- // tflagExtraStar means the name in the str field has an
- // extraneous '*' prefix. This is because for most types T in
- // a program, the type *T also exists and reusing the str data
- // saves binary size.
- tflagExtraStar tflag = 1 << 1
-
- // tflagNamed means the type has a name.
- tflagNamed tflag = 1 << 2
-
- // tflagRegularMemory means that equal and hash functions can treat
- // this type as a single region of t.size bytes.
- tflagRegularMemory tflag = 1 << 3
-)
-
-// rtype is the common implementation of most values.
-// It is embedded in other struct types.
-//
-// rtype must be kept in sync with ../runtime/type.go:/^type._type.
-type rtype struct {
- size uintptr
- ptrdata uintptr // number of bytes in the type that can contain pointers
- hash uint32 // hash of type; avoids computation in hash tables
- tflag tflag // extra type information flags
- align uint8 // alignment of variable with this type
- fieldAlign uint8 // alignment of struct field with this type
- kind uint8 // enumeration for C
- // function for comparing objects of this type
- // (ptr to object A, ptr to object B) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer) bool
- gcdata *byte // garbage collection data
- str nameOff // string form
- ptrToThis typeOff // type for pointer to this type, may be zero
-}
-
-// Method on non-interface type
-type method struct {
- name nameOff // name of method
- mtyp typeOff // method type (without receiver)
- ifn textOff // fn used in interface call (one-word receiver)
- tfn textOff // fn used for normal method call
-}
-
-// uncommonType is present only for defined types or types with methods
-// (if T is a defined type, the uncommonTypes for T and *T have methods).
-// Using a pointer to this struct reduces the overall size required
-// to describe a non-defined type with no methods.
-type uncommonType struct {
- pkgPath nameOff // import path; empty for built-in types like int, string
- mcount uint16 // number of methods
- xcount uint16 // number of exported methods
- moff uint32 // offset from this uncommontype to [mcount]method
- _ uint32 // unused
-}
-
-// ChanDir represents a channel type's direction.
-type ChanDir int
-
-const (
- RecvDir ChanDir = 1 << iota // <-chan
- SendDir // chan<-
- BothDir = RecvDir | SendDir // chan
-)
-
-// arrayType represents a fixed array type.
-type arrayType struct {
- rtype
- elem *rtype // array element type
- slice *rtype // slice type
- len uintptr
-}
-
-// chanType represents a channel type.
-type chanType struct {
- rtype
- elem *rtype // channel element type
- dir uintptr // channel direction (ChanDir)
-}
-
-// funcType represents a function type.
-//
-// A *rtype for each in and out parameter is stored in an array that
-// directly follows the funcType (and possibly its uncommonType). So
-// a function type with one method, one input, and one output is:
-//
-// struct {
-// funcType
-// uncommonType
-// [2]*rtype // [0] is in, [1] is out
-// }
-type funcType struct {
- rtype
- inCount uint16
- outCount uint16 // top bit is set if last input parameter is ...
-}
-
-// imethod represents a method on an interface type
-type imethod struct {
- name nameOff // name of method
- typ typeOff // .(*FuncType) underneath
-}
-
-// interfaceType represents an interface type.
-type interfaceType struct {
- rtype
- pkgPath name // import path
- methods []imethod // sorted by hash
-}
-
-// mapType represents a map type.
-type mapType struct {
- rtype
- key *rtype // map key type
- elem *rtype // map element (value) type
- bucket *rtype // internal bucket structure
- // function for hashing keys (ptr to key, seed) -> hash
- hasher func(unsafe.Pointer, uintptr) uintptr
- keysize uint8 // size of key slot
- valuesize uint8 // size of value slot
- bucketsize uint16 // size of bucket
- flags uint32
-}
-
-// ptrType represents a pointer type.
-type ptrType struct {
- rtype
- elem *rtype // pointer element (pointed at) type
-}
-
-// sliceType represents a slice type.
-type sliceType struct {
- rtype
- elem *rtype // slice element type
-}
-
-// Struct field
-type structField struct {
- name name // name is always non-empty
- typ *rtype // type of field
- offsetEmbed uintptr // byte offset of field<<1 | isEmbedded
-}
-
-func (f *structField) offset() uintptr {
- return f.offsetEmbed >> 1
-}
-
-func (f *structField) embedded() bool {
- return f.offsetEmbed&1 != 0
-}
-
-// structType represents a struct type.
-type structType struct {
- rtype
- pkgPath name
- fields []structField // sorted by offset
-}
-
-// name is an encoded type name with optional extra data.
-//
-// The first byte is a bit field containing:
-//
-// 1<<0 the name is exported
-// 1<<1 tag data follows the name
-// 1<<2 pkgPath nameOff follows the name and tag
-//
-// Following that, there is a varint-encoded length of the name,
-// followed by the name itself.
-//
-// If tag data is present, it also has a varint-encoded length
-// followed by the tag itself.
-//
-// If the import path follows, then 4 bytes at the end of
-// the data form a nameOff. The import path is only set for concrete
-// methods that are defined in a different package than their type.
-//
-// If a name starts with "*", then the exported bit represents
-// whether the pointed to type is exported.
-//
-// Note: this encoding must match here and in:
-// cmd/compile/internal/reflectdata/reflect.go
-// runtime/type.go
-// internal/reflectlite/type.go
-// cmd/link/internal/ld/decodesym.go
-
-type name struct {
- bytes *byte
-}
-
-func (n name) data(off int, whySafe string) *byte {
- return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
-}
-
-func (n name) isExported() bool {
- return (*n.bytes)&(1<<0) != 0
-}
-
-func (n name) hasTag() bool {
- return (*n.bytes)&(1<<1) != 0
-}
-
-// readVarint parses a varint as encoded by encoding/binary.
-// It returns the number of encoded bytes and the encoded value.
-func (n name) readVarint(off int) (int, int) {
- v := 0
- for i := 0; ; i++ {
- x := *n.data(off+i, "read varint")
- v += int(x&0x7f) << (7 * i)
- if x&0x80 == 0 {
- return i + 1, v
- }
- }
-}
-
-// writeVarint writes n to buf in varint form. Returns the
-// number of bytes written. n must be nonnegative.
-// Writes at most 10 bytes.
-func writeVarint(buf []byte, n int) int {
- for i := 0; ; i++ {
- b := byte(n & 0x7f)
- n >>= 7
- if n == 0 {
- buf[i] = b
- return i + 1
- }
- buf[i] = b | 0x80
- }
-}
-
-func (n name) name() (s string) {
- if n.bytes == nil {
- return
- }
- i, l := n.readVarint(1)
- hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
- hdr.Data = unsafe.Pointer(n.data(1+i, "non-empty string"))
- hdr.Len = l
- return
-}
-
-func (n name) tag() (s string) {
- if !n.hasTag() {
- return ""
- }
- i, l := n.readVarint(1)
- i2, l2 := n.readVarint(1 + i + l)
- hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
- hdr.Data = unsafe.Pointer(n.data(1+i+l+i2, "non-empty string"))
- hdr.Len = l2
- return
-}
-
-func (n name) pkgPath() string {
- if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
- return ""
- }
- i, l := n.readVarint(1)
- off := 1 + i + l
- if n.hasTag() {
- i2, l2 := n.readVarint(off)
- off += i2 + l2
- }
- var nameOff int32
- // Note that this field may not be aligned in memory,
- // so we cannot use a direct int32 assignment here.
- copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
- pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
- return pkgPathName.name()
-}
-
-func newName(n, tag string, exported bool) name {
- if len(n) >= 1<<29 {
- panic("reflect.nameFrom: name too long: " + n[:1024] + "...")
- }
- if len(tag) >= 1<<29 {
- panic("reflect.nameFrom: tag too long: " + tag[:1024] + "...")
- }
- var nameLen [10]byte
- var tagLen [10]byte
- nameLenLen := writeVarint(nameLen[:], len(n))
- tagLenLen := writeVarint(tagLen[:], len(tag))
-
- var bits byte
- l := 1 + nameLenLen + len(n)
- if exported {
- bits |= 1 << 0
- }
- if len(tag) > 0 {
- l += tagLenLen + len(tag)
- bits |= 1 << 1
- }
-
- b := make([]byte, l)
- b[0] = bits
- copy(b[1:], nameLen[:nameLenLen])
- copy(b[1+nameLenLen:], n)
- if len(tag) > 0 {
- tb := b[1+nameLenLen+len(n):]
- copy(tb, tagLen[:tagLenLen])
- copy(tb[tagLenLen:], tag)
- }
-
- return name{bytes: &b[0]}
-}
-
-/*
- * The compiler knows the exact layout of all the data structures above.
- * The compiler does not know about the data structures and methods below.
- */
-
-// Method represents a single method.
-type Method struct {
- // Name is the method name.
- Name string
-
- // PkgPath is the package path that qualifies a lower case (unexported)
- // method name. It is empty for upper case (exported) method names.
- // The combination of PkgPath and Name uniquely identifies a method
- // in a method set.
- // See https://golang.org/ref/spec#Uniqueness_of_identifiers
- PkgPath string
-
- Type Type // method type
- Func Value // func with receiver as first argument
- Index int // index for Type.Method
-}
-
-// IsExported reports whether the method is exported.
-func (m Method) IsExported() bool {
- return m.PkgPath == ""
-}
-
-const (
- kindDirectIface = 1 << 5
- kindGCProg = 1 << 6 // Type.gc points to GC program
- kindMask = (1 << 5) - 1
-)
-
-// String returns the name of k.
-func (k Kind) String() string {
- if int(k) < len(kindNames) {
- return kindNames[k]
- }
- return "kind" + strconv.Itoa(int(k))
-}
-
-var kindNames = []string{
- Invalid: "invalid",
- Bool: "bool",
- Int: "int",
- Int8: "int8",
- Int16: "int16",
- Int32: "int32",
- Int64: "int64",
- Uint: "uint",
- Uint8: "uint8",
- Uint16: "uint16",
- Uint32: "uint32",
- Uint64: "uint64",
- Uintptr: "uintptr",
- Float32: "float32",
- Float64: "float64",
- Complex64: "complex64",
- Complex128: "complex128",
- Array: "array",
- Chan: "chan",
- Func: "func",
- Interface: "interface",
- Map: "map",
- Pointer: "ptr",
- Slice: "slice",
- String: "string",
- Struct: "struct",
- UnsafePointer: "unsafe.Pointer",
-}
-
-func (t *uncommonType) methods() []method {
- if t.mcount == 0 {
- return nil
- }
- return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
-}
-
-func (t *uncommonType) exportedMethods() []method {
- if t.xcount == 0 {
- return nil
- }
- return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
-}
-
-// resolveNameOff resolves a name offset from a base pointer.
-// The (*rtype).nameOff method is a convenience wrapper for this function.
-// Implemented in the runtime package.
-func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
-
-// resolveTypeOff resolves an *rtype offset from a base type.
-// The (*rtype).typeOff method is a convenience wrapper for this function.
-// Implemented in the runtime package.
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
-
-// resolveTextOff resolves a function pointer offset from a base type.
-// The (*rtype).textOff method is a convenience wrapper for this function.
-// Implemented in the runtime package.
-func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
-
-// addReflectOff adds a pointer to the reflection lookup map in the runtime.
-// It returns a new ID that can be used as a typeOff or textOff, and will
-// be resolved correctly. Implemented in the runtime package.
-func addReflectOff(ptr unsafe.Pointer) int32
-
-// resolveReflectName adds a name to the reflection lookup map in the runtime.
-// It returns a new nameOff that can be used to refer to the pointer.
-func resolveReflectName(n name) nameOff {
- return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
-}
-
-// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
-// It returns a new typeOff that can be used to refer to the pointer.
-func resolveReflectType(t *rtype) typeOff {
- return typeOff(addReflectOff(unsafe.Pointer(t)))
-}
-
-// resolveReflectText adds a function pointer to the reflection lookup map in
-// the runtime. It returns a new textOff that can be used to refer to the
-// pointer.
-func resolveReflectText(ptr unsafe.Pointer) textOff {
- return textOff(addReflectOff(ptr))
-}
-
-type nameOff int32 // offset to a name
-type typeOff int32 // offset to an *rtype
-type textOff int32 // offset from top of text section
-
-func (t *rtype) nameOff(off nameOff) name {
- return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
-}
-
-func (t *rtype) typeOff(off typeOff) *rtype {
- return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
-}
-
-func (t *rtype) textOff(off textOff) unsafe.Pointer {
- return resolveTextOff(unsafe.Pointer(t), int32(off))
-}
-
-func (t *rtype) uncommon() *uncommonType {
- if t.tflag&tflagUncommon == 0 {
- return nil
- }
- switch t.Kind() {
- case Struct:
- return &(*structTypeUncommon)(unsafe.Pointer(t)).u
- case Pointer:
- type u struct {
- ptrType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Func:
- type u struct {
- funcType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Slice:
- type u struct {
- sliceType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Array:
- type u struct {
- arrayType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Chan:
- type u struct {
- chanType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Map:
- type u struct {
- mapType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- case Interface:
- type u struct {
- interfaceType
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- default:
- type u struct {
- rtype
- u uncommonType
- }
- return &(*u)(unsafe.Pointer(t)).u
- }
-}
-
-func (t *rtype) String() string {
- s := t.nameOff(t.str).name()
- if t.tflag&tflagExtraStar != 0 {
- return s[1:]
- }
- return s
-}
-
-func (t *rtype) Size() uintptr { return t.size }
-
-func (t *rtype) Bits() int {
- if t == nil {
- panic("reflect: Bits of nil Type")
- }
- k := t.Kind()
- if k < Int || k > Complex128 {
- panic("reflect: Bits of non-arithmetic Type " + t.String())
- }
- return int(t.size) * 8
-}
-
-func (t *rtype) Align() int { return int(t.align) }
-
-func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
-
-func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
-
-func (t *rtype) pointers() bool { return t.ptrdata != 0 }
-
-func (t *rtype) common() *rtype { return t }
-
-func (t *rtype) exportedMethods() []method {
- ut := t.uncommon()
- if ut == nil {
- return nil
- }
- return ut.exportedMethods()
-}
-
-func (t *rtype) NumMethod() int {
- if t.Kind() == Interface {
- tt := (*interfaceType)(unsafe.Pointer(t))
- return tt.NumMethod()
- }
- return len(t.exportedMethods())
-}
-
-func (t *rtype) Method(i int) (m Method) {
- if t.Kind() == Interface {
- tt := (*interfaceType)(unsafe.Pointer(t))
- return tt.Method(i)
- }
- methods := t.exportedMethods()
- if i < 0 || i >= len(methods) {
- panic("reflect: Method index out of range")
- }
- p := methods[i]
- pname := t.nameOff(p.name)
- m.Name = pname.name()
- fl := flag(Func)
- mtyp := t.typeOff(p.mtyp)
- ft := (*funcType)(unsafe.Pointer(mtyp))
- in := make([]Type, 0, 1+len(ft.in()))
- in = append(in, t)
- for _, arg := range ft.in() {
- in = append(in, arg)
- }
- out := make([]Type, 0, len(ft.out()))
- for _, ret := range ft.out() {
- out = append(out, ret)
- }
- mt := FuncOf(in, out, ft.IsVariadic())
- m.Type = mt
- tfn := t.textOff(p.tfn)
- fn := unsafe.Pointer(&tfn)
- m.Func = Value{mt.(*rtype), fn, fl}
-
- m.Index = i
- return m
-}
-
-func (t *rtype) MethodByName(name string) (m Method, ok bool) {
- if t.Kind() == Interface {
- tt := (*interfaceType)(unsafe.Pointer(t))
- return tt.MethodByName(name)
- }
- ut := t.uncommon()
- if ut == nil {
- return Method{}, false
- }
- // TODO(mdempsky): Binary search.
- for i, p := range ut.exportedMethods() {
- if t.nameOff(p.name).name() == name {
- return t.Method(i), true
- }
- }
- return Method{}, false
-}
-
-func (t *rtype) PkgPath() string {
- if t.tflag&tflagNamed == 0 {
- return ""
- }
- ut := t.uncommon()
- if ut == nil {
- return ""
- }
- return t.nameOff(ut.pkgPath).name()
-}
-
-func (t *rtype) hasName() bool {
- return t.tflag&tflagNamed != 0
-}
-
-func (t *rtype) Name() string {
- if !t.hasName() {
- return ""
- }
- s := t.String()
- i := len(s) - 1
- sqBrackets := 0
- for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
- switch s[i] {
- case ']':
- sqBrackets++
- case '[':
- sqBrackets--
- }
- i--
- }
- return s[i+1:]
-}
-
-func (t *rtype) ChanDir() ChanDir {
- if t.Kind() != Chan {
- panic("reflect: ChanDir of non-chan type " + t.String())
- }
- tt := (*chanType)(unsafe.Pointer(t))
- return ChanDir(tt.dir)
-}
-
-func (t *rtype) IsVariadic() bool {
- if t.Kind() != Func {
- panic("reflect: IsVariadic of non-func type " + t.String())
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return tt.outCount&(1<<15) != 0
-}
-
-func (t *rtype) Elem() Type {
- switch t.Kind() {
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Chan:
- tt := (*chanType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Map:
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Pointer:
- tt := (*ptrType)(unsafe.Pointer(t))
- return toType(tt.elem)
- case Slice:
- tt := (*sliceType)(unsafe.Pointer(t))
- return toType(tt.elem)
- }
- panic("reflect: Elem of invalid type " + t.String())
-}
-
-func (t *rtype) Field(i int) StructField {
- if t.Kind() != Struct {
- panic("reflect: Field of non-struct type " + t.String())
- }
- tt := (*structType)(unsafe.Pointer(t))
- return tt.Field(i)
-}
-
-func (t *rtype) FieldByIndex(index []int) StructField {
- if t.Kind() != Struct {
- panic("reflect: FieldByIndex of non-struct type " + t.String())
- }
- tt := (*structType)(unsafe.Pointer(t))
- return tt.FieldByIndex(index)
-}
-
-func (t *rtype) FieldByName(name string) (StructField, bool) {
- if t.Kind() != Struct {
- panic("reflect: FieldByName of non-struct type " + t.String())
- }
- tt := (*structType)(unsafe.Pointer(t))
- return tt.FieldByName(name)
-}
-
-func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
- if t.Kind() != Struct {
- panic("reflect: FieldByNameFunc of non-struct type " + t.String())
- }
- tt := (*structType)(unsafe.Pointer(t))
- return tt.FieldByNameFunc(match)
-}
-
-func (t *rtype) In(i int) Type {
- if t.Kind() != Func {
- panic("reflect: In of non-func type " + t.String())
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return toType(tt.in()[i])
-}
-
-func (t *rtype) Key() Type {
- if t.Kind() != Map {
- panic("reflect: Key of non-map type " + t.String())
- }
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.key)
-}
-
-func (t *rtype) Len() int {
- if t.Kind() != Array {
- panic("reflect: Len of non-array type " + t.String())
- }
- tt := (*arrayType)(unsafe.Pointer(t))
- return int(tt.len)
-}
-
-func (t *rtype) NumField() int {
- if t.Kind() != Struct {
- panic("reflect: NumField of non-struct type " + t.String())
- }
- tt := (*structType)(unsafe.Pointer(t))
- return len(tt.fields)
-}
-
-func (t *rtype) NumIn() int {
- if t.Kind() != Func {
- panic("reflect: NumIn of non-func type " + t.String())
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return int(tt.inCount)
-}
-
-func (t *rtype) NumOut() int {
- if t.Kind() != Func {
- panic("reflect: NumOut of non-func type " + t.String())
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return len(tt.out())
-}
-
-func (t *rtype) Out(i int) Type {
- if t.Kind() != Func {
- panic("reflect: Out of non-func type " + t.String())
- }
- tt := (*funcType)(unsafe.Pointer(t))
- return toType(tt.out()[i])
-}
-
-func (t *funcType) in() []*rtype {
- uadd := unsafe.Sizeof(*t)
- if t.tflag&tflagUncommon != 0 {
- uadd += unsafe.Sizeof(uncommonType{})
- }
- if t.inCount == 0 {
- return nil
- }
- return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
-}
-
-func (t *funcType) out() []*rtype {
- uadd := unsafe.Sizeof(*t)
- if t.tflag&tflagUncommon != 0 {
- uadd += unsafe.Sizeof(uncommonType{})
- }
- outCount := t.outCount & (1<<15 - 1)
- if outCount == 0 {
- return nil
- }
- return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
-}
-
-// add returns p+x.
-//
-// The whySafe string is ignored, so that the function still inlines
-// as efficiently as p+x, but all call sites should use the string to
-// record why the addition is safe, which is to say why the addition
-// does not cause x to advance to the very end of p's allocation
-// and therefore point incorrectly at the next block in memory.
-func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + x)
-}
-
-func (d ChanDir) String() string {
- switch d {
- case SendDir:
- return "chan<-"
- case RecvDir:
- return "<-chan"
- case BothDir:
- return "chan"
- }
- return "ChanDir" + strconv.Itoa(int(d))
-}
-
-// Method returns the i'th method in the type's method set.
-func (t *interfaceType) Method(i int) (m Method) {
- if i < 0 || i >= len(t.methods) {
- return
- }
- p := &t.methods[i]
- pname := t.nameOff(p.name)
- m.Name = pname.name()
- if !pname.isExported() {
- m.PkgPath = pname.pkgPath()
- if m.PkgPath == "" {
- m.PkgPath = t.pkgPath.name()
- }
- }
- m.Type = toType(t.typeOff(p.typ))
- m.Index = i
- return
-}
-
-// NumMethod returns the number of interface methods in the type's method set.
-func (t *interfaceType) NumMethod() int { return len(t.methods) }
-
-// MethodByName method with the given name in the type's method set.
-func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
- if t == nil {
- return
- }
- var p *imethod
- for i := range t.methods {
- p = &t.methods[i]
- if t.nameOff(p.name).name() == name {
- return t.Method(i), true
- }
- }
- return
-}
-
-// A StructField describes a single field in a struct.
-type StructField struct {
- // Name is the field name.
- Name string
-
- // PkgPath is the package path that qualifies a lower case (unexported)
- // field name. It is empty for upper case (exported) field names.
- // See https://golang.org/ref/spec#Uniqueness_of_identifiers
- PkgPath string
-
- Type Type // field type
- Tag StructTag // field tag string
- Offset uintptr // offset within struct, in bytes
- Index []int // index sequence for Type.FieldByIndex
- Anonymous bool // is an embedded field
-}
-
-// IsExported reports whether the field is exported.
-func (f StructField) IsExported() bool {
- return f.PkgPath == ""
-}
-
-// A StructTag is the tag string in a struct field.
-//
-// By convention, tag strings are a concatenation of
-// optionally space-separated key:"value" pairs.
-// Each key is a non-empty string consisting of non-control
-// characters other than space (U+0020 ' '), quote (U+0022 '"'),
-// and colon (U+003A ':'). Each value is quoted using U+0022 '"'
-// characters and Go string literal syntax.
-type StructTag string
-
-// Get returns the value associated with key in the tag string.
-// If there is no such key in the tag, Get returns the empty string.
-// If the tag does not have the conventional format, the value
-// returned by Get is unspecified. To determine whether a tag is
-// explicitly set to the empty string, use Lookup.
-func (tag StructTag) Get(key string) string {
- v, _ := tag.Lookup(key)
- return v
-}
-
-// Lookup returns the value associated with key in the tag string.
-// If the key is present in the tag the value (which may be empty)
-// is returned. Otherwise the returned value will be the empty string.
-// The ok return value reports whether the value was explicitly set in
-// the tag string. If the tag does not have the conventional format,
-// the value returned by Lookup is unspecified.
-func (tag StructTag) Lookup(key string) (value string, ok bool) {
- // When modifying this code, also update the validateStructTag code
- // in cmd/vet/structtag.go.
-
- for tag != "" {
- // Skip leading space.
- i := 0
- for i < len(tag) && tag[i] == ' ' {
- i++
- }
- tag = tag[i:]
- if tag == "" {
- break
- }
-
- // Scan to colon. A space, a quote or a control character is a syntax error.
- // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
- // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
- // as it is simpler to inspect the tag's bytes than the tag's runes.
- i = 0
- for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
- i++
- }
- if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
- break
- }
- name := string(tag[:i])
- tag = tag[i+1:]
-
- // Scan quoted string to find value.
- i = 1
- for i < len(tag) && tag[i] != '"' {
- if tag[i] == '\\' {
- i++
- }
- i++
- }
- if i >= len(tag) {
- break
- }
- qvalue := string(tag[:i+1])
- tag = tag[i+1:]
-
- if key == name {
- value, err := strconv.Unquote(qvalue)
- if err != nil {
- break
- }
- return value, true
- }
- }
- return "", false
-}
-
-// Field returns the i'th struct field.
-func (t *structType) Field(i int) (f StructField) {
- if i < 0 || i >= len(t.fields) {
- panic("reflect: Field index out of bounds")
- }
- p := &t.fields[i]
- f.Type = toType(p.typ)
- f.Name = p.name.name()
- f.Anonymous = p.embedded()
- if !p.name.isExported() {
- f.PkgPath = t.pkgPath.name()
- }
- if tag := p.name.tag(); tag != "" {
- f.Tag = StructTag(tag)
- }
- f.Offset = p.offset()
-
- // NOTE(rsc): This is the only allocation in the interface
- // presented by a reflect.Type. It would be nice to avoid,
- // at least in the common cases, but we need to make sure
- // that misbehaving clients of reflect cannot affect other
- // uses of reflect. One possibility is CL 5371098, but we
- // postponed that ugliness until there is a demonstrated
- // need for the performance. This is issue 2320.
- f.Index = []int{i}
- return
-}
-
-// TODO(gri): Should there be an error/bool indicator if the index
-// is wrong for FieldByIndex?
-
-// FieldByIndex returns the nested field corresponding to index.
-func (t *structType) FieldByIndex(index []int) (f StructField) {
- f.Type = toType(&t.rtype)
- for i, x := range index {
- if i > 0 {
- ft := f.Type
- if ft.Kind() == Pointer && ft.Elem().Kind() == Struct {
- ft = ft.Elem()
- }
- f.Type = ft
- }
- f = f.Type.Field(x)
- }
- return
-}
-
-// A fieldScan represents an item on the fieldByNameFunc scan work list.
-type fieldScan struct {
- typ *structType
- index []int
-}
-
-// FieldByNameFunc returns the struct field with a name that satisfies the
-// match function and a boolean to indicate if the field was found.
-func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
- // This uses the same condition that the Go language does: there must be a unique instance
- // of the match at a given depth level. If there are multiple instances of a match at the
- // same depth, they annihilate each other and inhibit any possible match at a lower level.
- // The algorithm is breadth first search, one depth level at a time.
-
- // The current and next slices are work queues:
- // current lists the fields to visit on this depth level,
- // and next lists the fields on the next lower level.
- current := []fieldScan{}
- next := []fieldScan{{typ: t}}
-
- // nextCount records the number of times an embedded type has been
- // encountered and considered for queueing in the 'next' slice.
- // We only queue the first one, but we increment the count on each.
- // If a struct type T can be reached more than once at a given depth level,
- // then it annihilates itself and need not be considered at all when we
- // process that next depth level.
- var nextCount map[*structType]int
-
- // visited records the structs that have been considered already.
- // Embedded pointer fields can create cycles in the graph of
- // reachable embedded types; visited avoids following those cycles.
- // It also avoids duplicated effort: if we didn't find the field in an
- // embedded type T at level 2, we won't find it in one at level 4 either.
- visited := map[*structType]bool{}
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count := nextCount
- nextCount = nil
-
- // Process all the fields at this depth, now listed in 'current'.
- // The loop queues embedded fields found in 'next', for processing during the next
- // iteration. The multiplicity of the 'current' field counts is recorded
- // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
- for _, scan := range current {
- t := scan.typ
- if visited[t] {
- // We've looked through this type before, at a higher level.
- // That higher level would shadow the lower level we're now at,
- // so this one can't be useful to us. Ignore it.
- continue
- }
- visited[t] = true
- for i := range t.fields {
- f := &t.fields[i]
- // Find name and (for embedded field) type for field f.
- fname := f.name.name()
- var ntyp *rtype
- if f.embedded() {
- // Embedded field of type T or *T.
- ntyp = f.typ
- if ntyp.Kind() == Pointer {
- ntyp = ntyp.Elem().common()
- }
- }
-
- // Does it match?
- if match(fname) {
- // Potential match
- if count[t] > 1 || ok {
- // Name appeared multiple times at this level: annihilate.
- return StructField{}, false
- }
- result = t.Field(i)
- result.Index = nil
- result.Index = append(result.Index, scan.index...)
- result.Index = append(result.Index, i)
- ok = true
- continue
- }
-
- // Queue embedded struct fields for processing with next level,
- // but only if we haven't seen a match yet at this level and only
- // if the embedded types haven't already been queued.
- if ok || ntyp == nil || ntyp.Kind() != Struct {
- continue
- }
- styp := (*structType)(unsafe.Pointer(ntyp))
- if nextCount[styp] > 0 {
- nextCount[styp] = 2 // exact multiple doesn't matter
- continue
- }
- if nextCount == nil {
- nextCount = map[*structType]int{}
- }
- nextCount[styp] = 1
- if count[t] > 1 {
- nextCount[styp] = 2 // exact multiple doesn't matter
- }
- var index []int
- index = append(index, scan.index...)
- index = append(index, i)
- next = append(next, fieldScan{styp, index})
- }
- }
- if ok {
- break
- }
- }
- return
-}
-
-// FieldByName returns the struct field with the given name
-// and a boolean to indicate if the field was found.
-func (t *structType) FieldByName(name string) (f StructField, present bool) {
- // Quick check for top-level name, or struct without embedded fields.
- hasEmbeds := false
- if name != "" {
- for i := range t.fields {
- tf := &t.fields[i]
- if tf.name.name() == name {
- return t.Field(i), true
- }
- if tf.embedded() {
- hasEmbeds = true
- }
- }
- }
- if !hasEmbeds {
- return
- }
- return t.FieldByNameFunc(func(s string) bool { return s == name })
-}
-
-// TypeOf returns the reflection Type that represents the dynamic type of i.
-// If i is a nil interface value, TypeOf returns nil.
-func TypeOf(i any) Type {
- eface := *(*emptyInterface)(unsafe.Pointer(&i))
- return toType(eface.typ)
-}
-
-// ptrMap is the cache for PointerTo.
-var ptrMap sync.Map // map[*rtype]*ptrType
-
-// PtrTo returns the pointer type with element t.
-// For example, if t represents type Foo, PtrTo(t) represents *Foo.
-//
-// PtrTo is the old spelling of PointerTo.
-// The two functions behave identically.
-func PtrTo(t Type) Type { return PointerTo(t) }
-
-// PointerTo returns the pointer type with element t.
-// For example, if t represents type Foo, PointerTo(t) represents *Foo.
-func PointerTo(t Type) Type {
- return t.(*rtype).ptrTo()
-}
-
-func (t *rtype) ptrTo() *rtype {
- if t.ptrToThis != 0 {
- return t.typeOff(t.ptrToThis)
- }
-
- // Check the cache.
- if pi, ok := ptrMap.Load(t); ok {
- return &pi.(*ptrType).rtype
- }
-
- // Look in known types.
- s := "*" + t.String()
- for _, tt := range typesByString(s) {
- p := (*ptrType)(unsafe.Pointer(tt))
- if p.elem != t {
- continue
- }
- pi, _ := ptrMap.LoadOrStore(t, p)
- return &pi.(*ptrType).rtype
- }
-
- // Create a new ptrType starting with the description
- // of an *unsafe.Pointer.
- var iptr any = (*unsafe.Pointer)(nil)
- prototype := *(**ptrType)(unsafe.Pointer(&iptr))
- pp := *prototype
-
- pp.str = resolveReflectName(newName(s, "", false))
- pp.ptrToThis = 0
-
- // For the type structures linked into the binary, the
- // compiler provides a good hash of the string.
- // Create a good hash for the new string by using
- // the FNV-1 hash's mixing function to combine the
- // old hash and the new "*".
- pp.hash = fnv1(t.hash, '*')
-
- pp.elem = t
-
- pi, _ := ptrMap.LoadOrStore(t, &pp)
- return &pi.(*ptrType).rtype
-}
-
-// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
-func fnv1(x uint32, list ...byte) uint32 {
- for _, b := range list {
- x = x*16777619 ^ uint32(b)
- }
- return x
-}
-
-func (t *rtype) Implements(u Type) bool {
- if u == nil {
- panic("reflect: nil type passed to Type.Implements")
- }
- if u.Kind() != Interface {
- panic("reflect: non-interface type passed to Type.Implements")
- }
- return implements(u.(*rtype), t)
-}
-
-func (t *rtype) AssignableTo(u Type) bool {
- if u == nil {
- panic("reflect: nil type passed to Type.AssignableTo")
- }
- uu := u.(*rtype)
- return directlyAssignable(uu, t) || implements(uu, t)
-}
-
-func (t *rtype) ConvertibleTo(u Type) bool {
- if u == nil {
- panic("reflect: nil type passed to Type.ConvertibleTo")
- }
- uu := u.(*rtype)
- return convertOp(uu, t) != nil
-}
-
-func (t *rtype) Comparable() bool {
- return t.equal != nil
-}
-
-// implements reports whether the type V implements the interface type T.
-func implements(T, V *rtype) bool {
- if T.Kind() != Interface {
- return false
- }
- t := (*interfaceType)(unsafe.Pointer(T))
- if len(t.methods) == 0 {
- return true
- }
-
- // The same algorithm applies in both cases, but the
- // method tables for an interface type and a concrete type
- // are different, so the code is duplicated.
- // In both cases the algorithm is a linear scan over the two
- // lists - T's methods and V's methods - simultaneously.
- // Since method tables are stored in a unique sorted order
- // (alphabetical, with no duplicate method names), the scan
- // through V's methods must hit a match for each of T's
- // methods along the way, or else V does not implement T.
- // This lets us run the scan in overall linear time instead of
- // the quadratic time a naive search would require.
- // See also ../runtime/iface.go.
- if V.Kind() == Interface {
- v := (*interfaceType)(unsafe.Pointer(V))
- i := 0
- for j := 0; j < len(v.methods); j++ {
- tm := &t.methods[i]
- tmName := t.nameOff(tm.name)
- vm := &v.methods[j]
- vmName := V.nameOff(vm.name)
- if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
- if !tmName.isExported() {
- tmPkgPath := tmName.pkgPath()
- if tmPkgPath == "" {
- tmPkgPath = t.pkgPath.name()
- }
- vmPkgPath := vmName.pkgPath()
- if vmPkgPath == "" {
- vmPkgPath = v.pkgPath.name()
- }
- if tmPkgPath != vmPkgPath {
- continue
- }
- }
- if i++; i >= len(t.methods) {
- return true
- }
- }
- }
- return false
- }
-
- v := V.uncommon()
- if v == nil {
- return false
- }
- i := 0
- vmethods := v.methods()
- for j := 0; j < int(v.mcount); j++ {
- tm := &t.methods[i]
- tmName := t.nameOff(tm.name)
- vm := vmethods[j]
- vmName := V.nameOff(vm.name)
- if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
- if !tmName.isExported() {
- tmPkgPath := tmName.pkgPath()
- if tmPkgPath == "" {
- tmPkgPath = t.pkgPath.name()
- }
- vmPkgPath := vmName.pkgPath()
- if vmPkgPath == "" {
- vmPkgPath = V.nameOff(v.pkgPath).name()
- }
- if tmPkgPath != vmPkgPath {
- continue
- }
- }
- if i++; i >= len(t.methods) {
- return true
- }
- }
- }
- return false
-}
-
-// specialChannelAssignability reports whether a value x of channel type V
-// can be directly assigned (using memmove) to another channel type T.
-// https://golang.org/doc/go_spec.html#Assignability
-// T and V must be both of Chan kind.
-func specialChannelAssignability(T, V *rtype) bool {
- // Special case:
- // x is a bidirectional channel value, T is a channel type,
- // x's type V and T have identical element types,
- // and at least one of V or T is not a defined type.
- return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
-}
-
-// directlyAssignable reports whether a value x of type V can be directly
-// assigned (using memmove) to a value of type T.
-// https://golang.org/doc/go_spec.html#Assignability
-// Ignoring the interface rules (implemented elsewhere)
-// and the ideal constant rules (no ideal constants at run time).
-func directlyAssignable(T, V *rtype) bool {
- // x's type V is identical to T?
- if T == V {
- return true
- }
-
- // Otherwise at least one of T and V must not be defined
- // and they must have the same kind.
- if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
- return false
- }
-
- if T.Kind() == Chan && specialChannelAssignability(T, V) {
- return true
- }
-
- // x's type T and V must have identical underlying types.
- return haveIdenticalUnderlyingType(T, V, true)
-}
-
-func haveIdenticalType(T, V Type, cmpTags bool) bool {
- if cmpTags {
- return T == V
- }
-
- if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
- return false
- }
-
- return haveIdenticalUnderlyingType(T.common(), V.common(), false)
-}
-
-func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
- if T == V {
- return true
- }
-
- kind := T.Kind()
- if kind != V.Kind() {
- return false
- }
-
- // Non-composite types of equal kind have same underlying type
- // (the predefined instance of the type).
- if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
- return true
- }
-
- // Composite types.
- switch kind {
- case Array:
- return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Chan:
- return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Func:
- t := (*funcType)(unsafe.Pointer(T))
- v := (*funcType)(unsafe.Pointer(V))
- if t.outCount != v.outCount || t.inCount != v.inCount {
- return false
- }
- for i := 0; i < t.NumIn(); i++ {
- if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
- return false
- }
- }
- for i := 0; i < t.NumOut(); i++ {
- if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
- return false
- }
- }
- return true
-
- case Interface:
- t := (*interfaceType)(unsafe.Pointer(T))
- v := (*interfaceType)(unsafe.Pointer(V))
- if len(t.methods) == 0 && len(v.methods) == 0 {
- return true
- }
- // Might have the same methods but still
- // need a run time conversion.
- return false
-
- case Map:
- return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Pointer, Slice:
- return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
-
- case Struct:
- t := (*structType)(unsafe.Pointer(T))
- v := (*structType)(unsafe.Pointer(V))
- if len(t.fields) != len(v.fields) {
- return false
- }
- if t.pkgPath.name() != v.pkgPath.name() {
- return false
- }
- for i := range t.fields {
- tf := &t.fields[i]
- vf := &v.fields[i]
- if tf.name.name() != vf.name.name() {
- return false
- }
- if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
- return false
- }
- if cmpTags && tf.name.tag() != vf.name.tag() {
- return false
- }
- if tf.offsetEmbed != vf.offsetEmbed {
- return false
- }
- }
- return true
- }
-
- return false
-}
-
-// typelinks is implemented in package runtime.
-// It returns a slice of the sections in each module,
-// and a slice of *rtype offsets in each module.
-//
-// The types in each module are sorted by string. That is, the first
-// two linked types of the first module are:
-//
-// d0 := sections[0]
-// t1 := (*rtype)(add(d0, offset[0][0]))
-// t2 := (*rtype)(add(d0, offset[0][1]))
-//
-// and
-//
-// t1.String() < t2.String()
-//
-// Note that strings are not unique identifiers for types:
-// there can be more than one with a given string.
-// Only types we might want to look up are included:
-// pointers, channels, maps, slices, and arrays.
-func typelinks() (sections []unsafe.Pointer, offset [][]int32)
-
-func rtypeOff(section unsafe.Pointer, off int32) *rtype {
- return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
-}
-
-// typesByString returns the subslice of typelinks() whose elements have
-// the given string representation.
-// It may be empty (no known types with that string) or may have
-// multiple elements (multiple types with that string).
-func typesByString(s string) []*rtype {
- sections, offset := typelinks()
- var ret []*rtype
-
- for offsI, offs := range offset {
- section := sections[offsI]
-
- // We are looking for the first index i where the string becomes >= s.
- // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
- i, j := 0, len(offs)
- for i < j {
- h := i + (j-i)>>1 // avoid overflow when computing h
- // i ≤ h < j
- if !(rtypeOff(section, offs[h]).String() >= s) {
- i = h + 1 // preserves f(i-1) == false
- } else {
- j = h // preserves f(j) == true
- }
- }
- // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
-
- // Having found the first, linear scan forward to find the last.
- // We could do a second binary search, but the caller is going
- // to do a linear scan anyway.
- for j := i; j < len(offs); j++ {
- typ := rtypeOff(section, offs[j])
- if typ.String() != s {
- break
- }
- ret = append(ret, typ)
- }
- }
- return ret
-}
-
-// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
-var lookupCache sync.Map // map[cacheKey]*rtype
-
-// A cacheKey is the key for use in the lookupCache.
-// Four values describe any of the types we are looking for:
-// type kind, one or two subtypes, and an extra integer.
-type cacheKey struct {
- kind Kind
- t1 *rtype
- t2 *rtype
- extra uintptr
-}
-
-// The funcLookupCache caches FuncOf lookups.
-// FuncOf does not share the common lookupCache since cacheKey is not
-// sufficient to represent functions unambiguously.
-var funcLookupCache struct {
- sync.Mutex // Guards stores (but not loads) on m.
-
- // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
- // Elements of m are append-only and thus safe for concurrent reading.
- m sync.Map
-}
-
-// ChanOf returns the channel type with the given direction and element type.
-// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
-//
-// The gc runtime imposes a limit of 64 kB on channel element types.
-// If t's size is equal to or exceeds this limit, ChanOf panics.
-func ChanOf(dir ChanDir, t Type) Type {
- typ := t.(*rtype)
-
- // Look in cache.
- ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
- if ch, ok := lookupCache.Load(ckey); ok {
- return ch.(*rtype)
- }
-
- // This restriction is imposed by the gc compiler and the runtime.
- if typ.size >= 1<<16 {
- panic("reflect.ChanOf: element size too large")
- }
-
- // Look in known types.
- var s string
- switch dir {
- default:
- panic("reflect.ChanOf: invalid dir")
- case SendDir:
- s = "chan<- " + typ.String()
- case RecvDir:
- s = "<-chan " + typ.String()
- case BothDir:
- typeStr := typ.String()
- if typeStr[0] == '<' {
- // typ is recv chan, need parentheses as "<-" associates with leftmost
- // chan possible, see:
- // * https://golang.org/ref/spec#Channel_types
- // * https://github.com/golang/go/issues/39897
- s = "chan (" + typeStr + ")"
- } else {
- s = "chan " + typeStr
- }
- }
- for _, tt := range typesByString(s) {
- ch := (*chanType)(unsafe.Pointer(tt))
- if ch.elem == typ && ch.dir == uintptr(dir) {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
- return ti.(Type)
- }
- }
-
- // Make a channel type.
- var ichan any = (chan unsafe.Pointer)(nil)
- prototype := *(**chanType)(unsafe.Pointer(&ichan))
- ch := *prototype
- ch.tflag = tflagRegularMemory
- ch.dir = uintptr(dir)
- ch.str = resolveReflectName(newName(s, "", false))
- ch.hash = fnv1(typ.hash, 'c', byte(dir))
- ch.elem = typ
-
- ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
- return ti.(Type)
-}
-
-// MapOf returns the map type with the given key and element types.
-// For example, if k represents int and e represents string,
-// MapOf(k, e) represents map[int]string.
-//
-// If the key type is not a valid map key type (that is, if it does
-// not implement Go's == operator), MapOf panics.
-func MapOf(key, elem Type) Type {
- ktyp := key.(*rtype)
- etyp := elem.(*rtype)
-
- if ktyp.equal == nil {
- panic("reflect.MapOf: invalid key type " + ktyp.String())
- }
-
- // Look in cache.
- ckey := cacheKey{Map, ktyp, etyp, 0}
- if mt, ok := lookupCache.Load(ckey); ok {
- return mt.(Type)
- }
-
- // Look in known types.
- s := "map[" + ktyp.String() + "]" + etyp.String()
- for _, tt := range typesByString(s) {
- mt := (*mapType)(unsafe.Pointer(tt))
- if mt.key == ktyp && mt.elem == etyp {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
- return ti.(Type)
- }
- }
-
- // Make a map type.
- // Note: flag values must match those used in the TMAP case
- // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
- var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
- mt := **(**mapType)(unsafe.Pointer(&imap))
- mt.str = resolveReflectName(newName(s, "", false))
- mt.tflag = 0
- mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
- mt.key = ktyp
- mt.elem = etyp
- mt.bucket = bucketOf(ktyp, etyp)
- mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
- return typehash(ktyp, p, seed)
- }
- mt.flags = 0
- if ktyp.size > maxKeySize {
- mt.keysize = uint8(goarch.PtrSize)
- mt.flags |= 1 // indirect key
- } else {
- mt.keysize = uint8(ktyp.size)
- }
- if etyp.size > maxValSize {
- mt.valuesize = uint8(goarch.PtrSize)
- mt.flags |= 2 // indirect value
- } else {
- mt.valuesize = uint8(etyp.size)
- }
- mt.bucketsize = uint16(mt.bucket.size)
- if isReflexive(ktyp) {
- mt.flags |= 4
- }
- if needKeyUpdate(ktyp) {
- mt.flags |= 8
- }
- if hashMightPanic(ktyp) {
- mt.flags |= 16
- }
- mt.ptrToThis = 0
-
- ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
- return ti.(Type)
-}
-
-// TODO(crawshaw): as these funcTypeFixedN structs have no methods,
-// they could be defined at runtime using the StructOf function.
-type funcTypeFixed4 struct {
- funcType
- args [4]*rtype
-}
-type funcTypeFixed8 struct {
- funcType
- args [8]*rtype
-}
-type funcTypeFixed16 struct {
- funcType
- args [16]*rtype
-}
-type funcTypeFixed32 struct {
- funcType
- args [32]*rtype
-}
-type funcTypeFixed64 struct {
- funcType
- args [64]*rtype
-}
-type funcTypeFixed128 struct {
- funcType
- args [128]*rtype
-}
-
-// FuncOf returns the function type with the given argument and result types.
-// For example if k represents int and e represents string,
-// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
-//
-// The variadic argument controls whether the function is variadic. FuncOf
-// panics if the in[len(in)-1] does not represent a slice and variadic is
-// true.
-func FuncOf(in, out []Type, variadic bool) Type {
- if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
- panic("reflect.FuncOf: last arg of variadic func must be slice")
- }
-
- // Make a func type.
- var ifunc any = (func())(nil)
- prototype := *(**funcType)(unsafe.Pointer(&ifunc))
- n := len(in) + len(out)
-
- var ft *funcType
- var args []*rtype
- switch {
- case n <= 4:
- fixed := new(funcTypeFixed4)
- args = fixed.args[:0:len(fixed.args)]
- ft = &fixed.funcType
- case n <= 8:
- fixed := new(funcTypeFixed8)
- args = fixed.args[:0:len(fixed.args)]
- ft = &fixed.funcType
- case n <= 16:
- fixed := new(funcTypeFixed16)
- args = fixed.args[:0:len(fixed.args)]
- ft = &fixed.funcType
- case n <= 32:
- fixed := new(funcTypeFixed32)
- args = fixed.args[:0:len(fixed.args)]
- ft = &fixed.funcType
- case n <= 64:
- fixed := new(funcTypeFixed64)
- args = fixed.args[:0:len(fixed.args)]
- ft = &fixed.funcType
- case n <= 128:
- fixed := new(funcTypeFixed128)
- args = fixed.args[:0:len(fixed.args)]
- ft = &fixed.funcType
- default:
- panic("reflect.FuncOf: too many arguments")
- }
- *ft = *prototype
-
- // Build a hash and minimally populate ft.
- var hash uint32
- for _, in := range in {
- t := in.(*rtype)
- args = append(args, t)
- hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
- }
- if variadic {
- hash = fnv1(hash, 'v')
- }
- hash = fnv1(hash, '.')
- for _, out := range out {
- t := out.(*rtype)
- args = append(args, t)
- hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
- }
- if len(args) > 50 {
- panic("reflect.FuncOf does not support more than 50 arguments")
- }
- ft.tflag = 0
- ft.hash = hash
- ft.inCount = uint16(len(in))
- ft.outCount = uint16(len(out))
- if variadic {
- ft.outCount |= 1 << 15
- }
-
- // Look in cache.
- if ts, ok := funcLookupCache.m.Load(hash); ok {
- for _, t := range ts.([]*rtype) {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- return t
- }
- }
- }
-
- // Not in cache, lock and retry.
- funcLookupCache.Lock()
- defer funcLookupCache.Unlock()
- if ts, ok := funcLookupCache.m.Load(hash); ok {
- for _, t := range ts.([]*rtype) {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- return t
- }
- }
- }
-
- addToCache := func(tt *rtype) Type {
- var rts []*rtype
- if rti, ok := funcLookupCache.m.Load(hash); ok {
- rts = rti.([]*rtype)
- }
- funcLookupCache.m.Store(hash, append(rts, tt))
- return tt
- }
-
- // Look in known types for the same string representation.
- str := funcStr(ft)
- for _, tt := range typesByString(str) {
- if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
- return addToCache(tt)
- }
- }
-
- // Populate the remaining fields of ft and store in cache.
- ft.str = resolveReflectName(newName(str, "", false))
- ft.ptrToThis = 0
- return addToCache(&ft.rtype)
-}
-
-// funcStr builds a string representation of a funcType.
-func funcStr(ft *funcType) string {
- repr := make([]byte, 0, 64)
- repr = append(repr, "func("...)
- for i, t := range ft.in() {
- if i > 0 {
- repr = append(repr, ", "...)
- }
- if ft.IsVariadic() && i == int(ft.inCount)-1 {
- repr = append(repr, "..."...)
- repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
- } else {
- repr = append(repr, t.String()...)
- }
- }
- repr = append(repr, ')')
- out := ft.out()
- if len(out) == 1 {
- repr = append(repr, ' ')
- } else if len(out) > 1 {
- repr = append(repr, " ("...)
- }
- for i, t := range out {
- if i > 0 {
- repr = append(repr, ", "...)
- }
- repr = append(repr, t.String()...)
- }
- if len(out) > 1 {
- repr = append(repr, ')')
- }
- return string(repr)
-}
-
-// isReflexive reports whether the == operation on the type is reflexive.
-// That is, x == x for all values x of type t.
-func isReflexive(t *rtype) bool {
- switch t.Kind() {
- case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
- return true
- case Float32, Float64, Complex64, Complex128, Interface:
- return false
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- return isReflexive(tt.elem)
- case Struct:
- tt := (*structType)(unsafe.Pointer(t))
- for _, f := range tt.fields {
- if !isReflexive(f.typ) {
- return false
- }
- }
- return true
- default:
- // Func, Map, Slice, Invalid
- panic("isReflexive called on non-key type " + t.String())
- }
-}
-
-// needKeyUpdate reports whether map overwrites require the key to be copied.
-func needKeyUpdate(t *rtype) bool {
- switch t.Kind() {
- case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
- return false
- case Float32, Float64, Complex64, Complex128, Interface, String:
- // Float keys can be updated from +0 to -0.
- // String keys can be updated to use a smaller backing store.
- // Interfaces might have floats of strings in them.
- return true
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- return needKeyUpdate(tt.elem)
- case Struct:
- tt := (*structType)(unsafe.Pointer(t))
- for _, f := range tt.fields {
- if needKeyUpdate(f.typ) {
- return true
- }
- }
- return false
- default:
- // Func, Map, Slice, Invalid
- panic("needKeyUpdate called on non-key type " + t.String())
- }
-}
-
-// hashMightPanic reports whether the hash of a map key of type t might panic.
-func hashMightPanic(t *rtype) bool {
- switch t.Kind() {
- case Interface:
- return true
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- return hashMightPanic(tt.elem)
- case Struct:
- tt := (*structType)(unsafe.Pointer(t))
- for _, f := range tt.fields {
- if hashMightPanic(f.typ) {
- return true
- }
- }
- return false
- default:
- return false
- }
-}
-
-// Make sure these routines stay in sync with ../../runtime/map.go!
-// These types exist only for GC, so we only fill out GC relevant info.
-// Currently, that's just size and the GC program. We also fill in string
-// for possible debugging use.
-const (
- bucketSize uintptr = 8
- maxKeySize uintptr = 128
- maxValSize uintptr = 128
-)
-
-func bucketOf(ktyp, etyp *rtype) *rtype {
- if ktyp.size > maxKeySize {
- ktyp = PointerTo(ktyp).(*rtype)
- }
- if etyp.size > maxValSize {
- etyp = PointerTo(etyp).(*rtype)
- }
-
- // Prepare GC data if any.
- // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
- // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
- // Note that since the key and value are known to be <= 128 bytes,
- // they're guaranteed to have bitmaps instead of GC programs.
- var gcdata *byte
- var ptrdata uintptr
- var overflowPad uintptr
-
- size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + goarch.PtrSize
- if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
- panic("reflect: bad size computation in MapOf")
- }
-
- if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
- nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize
- mask := make([]byte, (nptr+7)/8)
- base := bucketSize / goarch.PtrSize
-
- if ktyp.ptrdata != 0 {
- emitGCMask(mask, base, ktyp, bucketSize)
- }
- base += bucketSize * ktyp.size / goarch.PtrSize
-
- if etyp.ptrdata != 0 {
- emitGCMask(mask, base, etyp, bucketSize)
- }
- base += bucketSize * etyp.size / goarch.PtrSize
- base += overflowPad / goarch.PtrSize
-
- word := base
- mask[word/8] |= 1 << (word % 8)
- gcdata = &mask[0]
- ptrdata = (word + 1) * goarch.PtrSize
-
- // overflow word must be last
- if ptrdata != size {
- panic("reflect: bad layout computation in MapOf")
- }
- }
-
- b := &rtype{
- align: goarch.PtrSize,
- size: size,
- kind: uint8(Struct),
- ptrdata: ptrdata,
- gcdata: gcdata,
- }
- if overflowPad > 0 {
- b.align = 8
- }
- s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
- b.str = resolveReflectName(newName(s, "", false))
- return b
-}
-
-func (t *rtype) gcSlice(begin, end uintptr) []byte {
- return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
-}
-
-// emitGCMask writes the GC mask for [n]typ into out, starting at bit
-// offset base.
-func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
- if typ.kind&kindGCProg != 0 {
- panic("reflect: unexpected GC program")
- }
- ptrs := typ.ptrdata / goarch.PtrSize
- words := typ.size / goarch.PtrSize
- mask := typ.gcSlice(0, (ptrs+7)/8)
- for j := uintptr(0); j < ptrs; j++ {
- if (mask[j/8]>>(j%8))&1 != 0 {
- for i := uintptr(0); i < n; i++ {
- k := base + i*words + j
- out[k/8] |= 1 << (k % 8)
- }
- }
- }
-}
-
-// appendGCProg appends the GC program for the first ptrdata bytes of
-// typ to dst and returns the extended slice.
-func appendGCProg(dst []byte, typ *rtype) []byte {
- if typ.kind&kindGCProg != 0 {
- // Element has GC program; emit one element.
- n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
- prog := typ.gcSlice(4, 4+n-1)
- return append(dst, prog...)
- }
-
- // Element is small with pointer mask; use as literal bits.
- ptrs := typ.ptrdata / goarch.PtrSize
- mask := typ.gcSlice(0, (ptrs+7)/8)
-
- // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
- for ; ptrs > 120; ptrs -= 120 {
- dst = append(dst, 120)
- dst = append(dst, mask[:15]...)
- mask = mask[15:]
- }
-
- dst = append(dst, byte(ptrs))
- dst = append(dst, mask...)
- return dst
-}
-
-// SliceOf returns the slice type with element type t.
-// For example, if t represents int, SliceOf(t) represents []int.
-func SliceOf(t Type) Type {
- typ := t.(*rtype)
-
- // Look in cache.
- ckey := cacheKey{Slice, typ, nil, 0}
- if slice, ok := lookupCache.Load(ckey); ok {
- return slice.(Type)
- }
-
- // Look in known types.
- s := "[]" + typ.String()
- for _, tt := range typesByString(s) {
- slice := (*sliceType)(unsafe.Pointer(tt))
- if slice.elem == typ {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
- return ti.(Type)
- }
- }
-
- // Make a slice type.
- var islice any = ([]unsafe.Pointer)(nil)
- prototype := *(**sliceType)(unsafe.Pointer(&islice))
- slice := *prototype
- slice.tflag = 0
- slice.str = resolveReflectName(newName(s, "", false))
- slice.hash = fnv1(typ.hash, '[')
- slice.elem = typ
- slice.ptrToThis = 0
-
- ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
- return ti.(Type)
-}
-
-// The structLookupCache caches StructOf lookups.
-// StructOf does not share the common lookupCache since we need to pin
-// the memory associated with *structTypeFixedN.
-var structLookupCache struct {
- sync.Mutex // Guards stores (but not loads) on m.
-
- // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
- // Elements in m are append-only and thus safe for concurrent reading.
- m sync.Map
-}
-
-type structTypeUncommon struct {
- structType
- u uncommonType
-}
-
-// isLetter reports whether a given 'rune' is classified as a Letter.
-func isLetter(ch rune) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
-}
-
-// isValidFieldName checks if a string is a valid (struct) field name or not.
-//
-// According to the language spec, a field name should be an identifier.
-//
-// identifier = letter { letter | unicode_digit } .
-// letter = unicode_letter | "_" .
-func isValidFieldName(fieldName string) bool {
- for i, c := range fieldName {
- if i == 0 && !isLetter(c) {
- return false
- }
-
- if !(isLetter(c) || unicode.IsDigit(c)) {
- return false
- }
- }
-
- return len(fieldName) > 0
-}
-
-// StructOf returns the struct type containing fields.
-// The Offset and Index fields are ignored and computed as they would be
-// by the compiler.
-//
-// StructOf currently does not generate wrapper methods for embedded
-// fields and panics if passed unexported StructFields.
-// These limitations may be lifted in a future version.
-func StructOf(fields []StructField) Type {
- var (
- hash = fnv1(0, []byte("struct {")...)
- size uintptr
- typalign uint8
- comparable = true
- methods []method
-
- fs = make([]structField, len(fields))
- repr = make([]byte, 0, 64)
- fset = map[string]struct{}{} // fields' names
-
- hasGCProg = false // records whether a struct-field type has a GCProg
- )
-
- lastzero := uintptr(0)
- repr = append(repr, "struct {"...)
- pkgpath := ""
- for i, field := range fields {
- if field.Name == "" {
- panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
- }
- if !isValidFieldName(field.Name) {
- panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
- }
- if field.Type == nil {
- panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
- }
- f, fpkgpath := runtimeStructField(field)
- ft := f.typ
- if ft.kind&kindGCProg != 0 {
- hasGCProg = true
- }
- if fpkgpath != "" {
- if pkgpath == "" {
- pkgpath = fpkgpath
- } else if pkgpath != fpkgpath {
- panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
- }
- }
-
- // Update string and hash
- name := f.name.name()
- hash = fnv1(hash, []byte(name)...)
- repr = append(repr, (" " + name)...)
- if f.embedded() {
- // Embedded field
- if f.typ.Kind() == Pointer {
- // Embedded ** and *interface{} are illegal
- elem := ft.Elem()
- if k := elem.Kind(); k == Pointer || k == Interface {
- panic("reflect.StructOf: illegal embedded field type " + ft.String())
- }
- }
-
- switch f.typ.Kind() {
- case Interface:
- ift := (*interfaceType)(unsafe.Pointer(ft))
- for im, m := range ift.methods {
- if ift.nameOff(m.name).pkgPath() != "" {
- // TODO(sbinet). Issue 15924.
- panic("reflect: embedded interface with unexported method(s) not implemented")
- }
-
- var (
- mtyp = ift.typeOff(m.typ)
- ifield = i
- imethod = im
- ifn Value
- tfn Value
- )
-
- if ft.kind&kindDirectIface != 0 {
- tfn = MakeFunc(mtyp, func(in []Value) []Value {
- var args []Value
- var recv = in[0]
- if len(in) > 1 {
- args = in[1:]
- }
- return recv.Field(ifield).Method(imethod).Call(args)
- })
- ifn = MakeFunc(mtyp, func(in []Value) []Value {
- var args []Value
- var recv = in[0]
- if len(in) > 1 {
- args = in[1:]
- }
- return recv.Field(ifield).Method(imethod).Call(args)
- })
- } else {
- tfn = MakeFunc(mtyp, func(in []Value) []Value {
- var args []Value
- var recv = in[0]
- if len(in) > 1 {
- args = in[1:]
- }
- return recv.Field(ifield).Method(imethod).Call(args)
- })
- ifn = MakeFunc(mtyp, func(in []Value) []Value {
- var args []Value
- var recv = Indirect(in[0])
- if len(in) > 1 {
- args = in[1:]
- }
- return recv.Field(ifield).Method(imethod).Call(args)
- })
- }
-
- methods = append(methods, method{
- name: resolveReflectName(ift.nameOff(m.name)),
- mtyp: resolveReflectType(mtyp),
- ifn: resolveReflectText(unsafe.Pointer(&ifn)),
- tfn: resolveReflectText(unsafe.Pointer(&tfn)),
- })
- }
- case Pointer:
- ptr := (*ptrType)(unsafe.Pointer(ft))
- if unt := ptr.uncommon(); unt != nil {
- if i > 0 && unt.mcount > 0 {
- // Issue 15924.
- panic("reflect: embedded type with methods not implemented if type is not first field")
- }
- if len(fields) > 1 {
- panic("reflect: embedded type with methods not implemented if there is more than one field")
- }
- for _, m := range unt.methods() {
- mname := ptr.nameOff(m.name)
- if mname.pkgPath() != "" {
- // TODO(sbinet).
- // Issue 15924.
- panic("reflect: embedded interface with unexported method(s) not implemented")
- }
- methods = append(methods, method{
- name: resolveReflectName(mname),
- mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
- ifn: resolveReflectText(ptr.textOff(m.ifn)),
- tfn: resolveReflectText(ptr.textOff(m.tfn)),
- })
- }
- }
- if unt := ptr.elem.uncommon(); unt != nil {
- for _, m := range unt.methods() {
- mname := ptr.nameOff(m.name)
- if mname.pkgPath() != "" {
- // TODO(sbinet)
- // Issue 15924.
- panic("reflect: embedded interface with unexported method(s) not implemented")
- }
- methods = append(methods, method{
- name: resolveReflectName(mname),
- mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
- ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
- tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
- })
- }
- }
- default:
- if unt := ft.uncommon(); unt != nil {
- if i > 0 && unt.mcount > 0 {
- // Issue 15924.
- panic("reflect: embedded type with methods not implemented if type is not first field")
- }
- if len(fields) > 1 && ft.kind&kindDirectIface != 0 {
- panic("reflect: embedded type with methods not implemented for non-pointer type")
- }
- for _, m := range unt.methods() {
- mname := ft.nameOff(m.name)
- if mname.pkgPath() != "" {
- // TODO(sbinet)
- // Issue 15924.
- panic("reflect: embedded interface with unexported method(s) not implemented")
- }
- methods = append(methods, method{
- name: resolveReflectName(mname),
- mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
- ifn: resolveReflectText(ft.textOff(m.ifn)),
- tfn: resolveReflectText(ft.textOff(m.tfn)),
- })
-
- }
- }
- }
- }
- if _, dup := fset[name]; dup && name != "_" {
- panic("reflect.StructOf: duplicate field " + name)
- }
- fset[name] = struct{}{}
-
- hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
-
- repr = append(repr, (" " + ft.String())...)
- if f.name.hasTag() {
- hash = fnv1(hash, []byte(f.name.tag())...)
- repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
- }
- if i < len(fields)-1 {
- repr = append(repr, ';')
- }
-
- comparable = comparable && (ft.equal != nil)
-
- offset := align(size, uintptr(ft.align))
- if ft.align > typalign {
- typalign = ft.align
- }
- size = offset + ft.size
- f.offsetEmbed |= offset << 1
-
- if ft.size == 0 {
- lastzero = size
- }
-
- fs[i] = f
- }
-
- if size > 0 && lastzero == size {
- // This is a non-zero sized struct that ends in a
- // zero-sized field. We add an extra byte of padding,
- // to ensure that taking the address of the final
- // zero-sized field can't manufacture a pointer to the
- // next object in the heap. See issue 9401.
- size++
- }
-
- var typ *structType
- var ut *uncommonType
-
- if len(methods) == 0 {
- t := new(structTypeUncommon)
- typ = &t.structType
- ut = &t.u
- } else {
- // A *rtype representing a struct is followed directly in memory by an
- // array of method objects representing the methods attached to the
- // struct. To get the same layout for a run time generated type, we
- // need an array directly following the uncommonType memory.
- // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
- tt := New(StructOf([]StructField{
- {Name: "S", Type: TypeOf(structType{})},
- {Name: "U", Type: TypeOf(uncommonType{})},
- {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
- }))
-
- typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer())
- ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer())
-
- copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods)
- }
- // TODO(sbinet): Once we allow embedding multiple types,
- // methods will need to be sorted like the compiler does.
- // TODO(sbinet): Once we allow non-exported methods, we will
- // need to compute xcount as the number of exported methods.
- ut.mcount = uint16(len(methods))
- ut.xcount = ut.mcount
- ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
-
- if len(fs) > 0 {
- repr = append(repr, ' ')
- }
- repr = append(repr, '}')
- hash = fnv1(hash, '}')
- str := string(repr)
-
- // Round the size up to be a multiple of the alignment.
- size = align(size, uintptr(typalign))
-
- // Make the struct type.
- var istruct any = struct{}{}
- prototype := *(**structType)(unsafe.Pointer(&istruct))
- *typ = *prototype
- typ.fields = fs
- if pkgpath != "" {
- typ.pkgPath = newName(pkgpath, "", false)
- }
-
- // Look in cache.
- if ts, ok := structLookupCache.m.Load(hash); ok {
- for _, st := range ts.([]Type) {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
- }
- }
- }
-
- // Not in cache, lock and retry.
- structLookupCache.Lock()
- defer structLookupCache.Unlock()
- if ts, ok := structLookupCache.m.Load(hash); ok {
- for _, st := range ts.([]Type) {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
- }
- }
- }
-
- addToCache := func(t Type) Type {
- var ts []Type
- if ti, ok := structLookupCache.m.Load(hash); ok {
- ts = ti.([]Type)
- }
- structLookupCache.m.Store(hash, append(ts, t))
- return t
- }
-
- // Look in known types.
- for _, t := range typesByString(str) {
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- // even if 't' wasn't a structType with methods, we should be ok
- // as the 'u uncommonType' field won't be accessed except when
- // tflag&tflagUncommon is set.
- return addToCache(t)
- }
- }
-
- typ.str = resolveReflectName(newName(str, "", false))
- typ.tflag = 0 // TODO: set tflagRegularMemory
- typ.hash = hash
- typ.size = size
- typ.ptrdata = typeptrdata(typ.common())
- typ.align = typalign
- typ.fieldAlign = typalign
- typ.ptrToThis = 0
- if len(methods) > 0 {
- typ.tflag |= tflagUncommon
- }
-
- if hasGCProg {
- lastPtrField := 0
- for i, ft := range fs {
- if ft.typ.pointers() {
- lastPtrField = i
- }
- }
- prog := []byte{0, 0, 0, 0} // will be length of prog
- var off uintptr
- for i, ft := range fs {
- if i > lastPtrField {
- // gcprog should not include anything for any field after
- // the last field that contains pointer data
- break
- }
- if !ft.typ.pointers() {
- // Ignore pointerless fields.
- continue
- }
- // Pad to start of this field with zeros.
- if ft.offset() > off {
- n := (ft.offset() - off) / goarch.PtrSize
- prog = append(prog, 0x01, 0x00) // emit a 0 bit
- if n > 1 {
- prog = append(prog, 0x81) // repeat previous bit
- prog = appendVarint(prog, n-1) // n-1 times
- }
- off = ft.offset()
- }
-
- prog = appendGCProg(prog, ft.typ)
- off += ft.typ.ptrdata
- }
- prog = append(prog, 0)
- *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
- typ.kind |= kindGCProg
- typ.gcdata = &prog[0]
- } else {
- typ.kind &^= kindGCProg
- bv := new(bitVector)
- addTypeBits(bv, 0, typ.common())
- if len(bv.data) > 0 {
- typ.gcdata = &bv.data[0]
- }
- }
- typ.equal = nil
- if comparable {
- typ.equal = func(p, q unsafe.Pointer) bool {
- for _, ft := range typ.fields {
- pi := add(p, ft.offset(), "&x.field safe")
- qi := add(q, ft.offset(), "&x.field safe")
- if !ft.typ.equal(pi, qi) {
- return false
- }
- }
- return true
- }
- }
-
- switch {
- case len(fs) == 1 && !ifaceIndir(fs[0].typ):
- // structs of 1 direct iface type can be direct
- typ.kind |= kindDirectIface
- default:
- typ.kind &^= kindDirectIface
- }
-
- return addToCache(&typ.rtype)
-}
-
-// runtimeStructField takes a StructField value passed to StructOf and
-// returns both the corresponding internal representation, of type
-// structField, and the pkgpath value to use for this field.
-func runtimeStructField(field StructField) (structField, string) {
- if field.Anonymous && field.PkgPath != "" {
- panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
- }
-
- if field.IsExported() {
- // Best-effort check for misuse.
- // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
- c := field.Name[0]
- if 'a' <= c && c <= 'z' || c == '_' {
- panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
- }
- }
-
- offsetEmbed := uintptr(0)
- if field.Anonymous {
- offsetEmbed |= 1
- }
-
- resolveReflectType(field.Type.common()) // install in runtime
- f := structField{
- name: newName(field.Name, string(field.Tag), field.IsExported()),
- typ: field.Type.common(),
- offsetEmbed: offsetEmbed,
- }
- return f, field.PkgPath
-}
-
-// typeptrdata returns the length in bytes of the prefix of t
-// containing pointer data. Anything after this offset is scalar data.
-// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
-func typeptrdata(t *rtype) uintptr {
- switch t.Kind() {
- case Struct:
- st := (*structType)(unsafe.Pointer(t))
- // find the last field that has pointers.
- field := -1
- for i := range st.fields {
- ft := st.fields[i].typ
- if ft.pointers() {
- field = i
- }
- }
- if field == -1 {
- return 0
- }
- f := st.fields[field]
- return f.offset() + f.typ.ptrdata
-
- default:
- panic("reflect.typeptrdata: unexpected type, " + t.String())
- }
-}
-
-// See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
-const maxPtrmaskBytes = 2048
-
-// ArrayOf returns the array type with the given length and element type.
-// For example, if t represents int, ArrayOf(5, t) represents [5]int.
-//
-// If the resulting type would be larger than the available address space,
-// ArrayOf panics.
-func ArrayOf(length int, elem Type) Type {
- if length < 0 {
- panic("reflect: negative length passed to ArrayOf")
- }
-
- typ := elem.(*rtype)
-
- // Look in cache.
- ckey := cacheKey{Array, typ, nil, uintptr(length)}
- if array, ok := lookupCache.Load(ckey); ok {
- return array.(Type)
- }
-
- // Look in known types.
- s := "[" + strconv.Itoa(length) + "]" + typ.String()
- for _, tt := range typesByString(s) {
- array := (*arrayType)(unsafe.Pointer(tt))
- if array.elem == typ {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
- return ti.(Type)
- }
- }
-
- // Make an array type.
- var iarray any = [1]unsafe.Pointer{}
- prototype := *(**arrayType)(unsafe.Pointer(&iarray))
- array := *prototype
- array.tflag = typ.tflag & tflagRegularMemory
- array.str = resolveReflectName(newName(s, "", false))
- array.hash = fnv1(typ.hash, '[')
- for n := uint32(length); n > 0; n >>= 8 {
- array.hash = fnv1(array.hash, byte(n))
- }
- array.hash = fnv1(array.hash, ']')
- array.elem = typ
- array.ptrToThis = 0
- if typ.size > 0 {
- max := ^uintptr(0) / typ.size
- if uintptr(length) > max {
- panic("reflect.ArrayOf: array size would exceed virtual address space")
- }
- }
- array.size = typ.size * uintptr(length)
- if length > 0 && typ.ptrdata != 0 {
- array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata
- }
- array.align = typ.align
- array.fieldAlign = typ.fieldAlign
- array.len = uintptr(length)
- array.slice = SliceOf(elem).(*rtype)
-
- switch {
- case typ.ptrdata == 0 || array.size == 0:
- // No pointers.
- array.gcdata = nil
- array.ptrdata = 0
-
- case length == 1:
- // In memory, 1-element array looks just like the element.
- array.kind |= typ.kind & kindGCProg
- array.gcdata = typ.gcdata
- array.ptrdata = typ.ptrdata
-
- case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
- // Element is small with pointer mask; array is still small.
- // Create direct pointer mask by turning each 1 bit in elem
- // into length 1 bits in larger mask.
- mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8)
- emitGCMask(mask, 0, typ, array.len)
- array.gcdata = &mask[0]
-
- default:
- // Create program that emits one element
- // and then repeats to make the array.
- prog := []byte{0, 0, 0, 0} // will be length of prog
- prog = appendGCProg(prog, typ)
- // Pad from ptrdata to size.
- elemPtrs := typ.ptrdata / goarch.PtrSize
- elemWords := typ.size / goarch.PtrSize
- if elemPtrs < elemWords {
- // Emit literal 0 bit, then repeat as needed.
- prog = append(prog, 0x01, 0x00)
- if elemPtrs+1 < elemWords {
- prog = append(prog, 0x81)
- prog = appendVarint(prog, elemWords-elemPtrs-1)
- }
- }
- // Repeat length-1 times.
- if elemWords < 0x80 {
- prog = append(prog, byte(elemWords|0x80))
- } else {
- prog = append(prog, 0x80)
- prog = appendVarint(prog, elemWords)
- }
- prog = appendVarint(prog, uintptr(length)-1)
- prog = append(prog, 0)
- *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
- array.kind |= kindGCProg
- array.gcdata = &prog[0]
- array.ptrdata = array.size // overestimate but ok; must match program
- }
-
- etyp := typ.common()
- esize := etyp.Size()
-
- array.equal = nil
- if eequal := etyp.equal; eequal != nil {
- array.equal = func(p, q unsafe.Pointer) bool {
- for i := 0; i < length; i++ {
- pi := arrayAt(p, i, esize, "i < length")
- qi := arrayAt(q, i, esize, "i < length")
- if !eequal(pi, qi) {
- return false
- }
-
- }
- return true
- }
- }
-
- switch {
- case length == 1 && !ifaceIndir(typ):
- // array of 1 direct iface type can be direct
- array.kind |= kindDirectIface
- default:
- array.kind &^= kindDirectIface
- }
-
- ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
- return ti.(Type)
-}
-
-func appendVarint(x []byte, v uintptr) []byte {
- for ; v >= 0x80; v >>= 7 {
- x = append(x, byte(v|0x80))
- }
- x = append(x, byte(v))
- return x
-}
-
-// toType converts from a *rtype to a Type that can be returned
-// to the client of package reflect. In gc, the only concern is that
-// a nil *rtype must be replaced by a nil Type, but in gccgo this
-// function takes care of ensuring that multiple *rtype for the same
-// type are coalesced into a single Type.
-func toType(t *rtype) Type {
- if t == nil {
- return nil
- }
- return t
-}
-
-type layoutKey struct {
- ftyp *funcType // function signature
- rcvr *rtype // receiver type, or nil if none
-}
-
-type layoutType struct {
- t *rtype
- framePool *sync.Pool
- abi abiDesc
-}
-
-var layoutCache sync.Map // map[layoutKey]layoutType
-
-// funcLayout computes a struct type representing the layout of the
-// stack-assigned function arguments and return values for the function
-// type t.
-// If rcvr != nil, rcvr specifies the type of the receiver.
-// The returned type exists only for GC, so we only fill out GC relevant info.
-// Currently, that's just size and the GC program. We also fill in
-// the name for possible debugging use.
-func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abi abiDesc) {
- if t.Kind() != Func {
- panic("reflect: funcLayout of non-func type " + t.String())
- }
- if rcvr != nil && rcvr.Kind() == Interface {
- panic("reflect: funcLayout with interface receiver " + rcvr.String())
- }
- k := layoutKey{t, rcvr}
- if lti, ok := layoutCache.Load(k); ok {
- lt := lti.(layoutType)
- return lt.t, lt.framePool, lt.abi
- }
-
- // Compute the ABI layout.
- abi = newAbiDesc(t, rcvr)
-
- // build dummy rtype holding gc program
- x := &rtype{
- align: goarch.PtrSize,
- // Don't add spill space here; it's only necessary in
- // reflectcall's frame, not in the allocated frame.
- // TODO(mknyszek): Remove this comment when register
- // spill space in the frame is no longer required.
- size: align(abi.retOffset+abi.ret.stackBytes, goarch.PtrSize),
- ptrdata: uintptr(abi.stackPtrs.n) * goarch.PtrSize,
- }
- if abi.stackPtrs.n > 0 {
- x.gcdata = &abi.stackPtrs.data[0]
- }
-
- var s string
- if rcvr != nil {
- s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
- } else {
- s = "funcargs(" + t.String() + ")"
- }
- x.str = resolveReflectName(newName(s, "", false))
-
- // cache result for future callers
- framePool = &sync.Pool{New: func() any {
- return unsafe_New(x)
- }}
- lti, _ := layoutCache.LoadOrStore(k, layoutType{
- t: x,
- framePool: framePool,
- abi: abi,
- })
- lt := lti.(layoutType)
- return lt.t, lt.framePool, lt.abi
-}
-
-// ifaceIndir reports whether t is stored indirectly in an interface value.
-func ifaceIndir(t *rtype) bool {
- return t.kind&kindDirectIface == 0
-}
-
-// Note: this type must agree with runtime.bitvector.
-type bitVector struct {
- n uint32 // number of bits
- data []byte
-}
-
-// append a bit to the bitmap.
-func (bv *bitVector) append(bit uint8) {
- if bv.n%8 == 0 {
- bv.data = append(bv.data, 0)
- }
- bv.data[bv.n/8] |= bit << (bv.n % 8)
- bv.n++
-}
-
-func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
- if t.ptrdata == 0 {
- return
- }
-
- switch Kind(t.kind & kindMask) {
- case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
- // 1 pointer at start of representation
- for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
- bv.append(0)
- }
- bv.append(1)
-
- case Interface:
- // 2 pointers
- for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
- bv.append(0)
- }
- bv.append(1)
- bv.append(1)
-
- case Array:
- // repeat inner type
- tt := (*arrayType)(unsafe.Pointer(t))
- for i := 0; i < int(tt.len); i++ {
- addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
- }
-
- case Struct:
- // apply fields
- tt := (*structType)(unsafe.Pointer(t))
- for i := range tt.fields {
- f := &tt.fields[i]
- addTypeBits(bv, offset+f.offset(), f.typ)
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/reflect/value.go b/contrib/go/_std_1.18/src/reflect/value.go
deleted file mode 100644
index 147b402b2a..0000000000
--- a/contrib/go/_std_1.18/src/reflect/value.go
+++ /dev/null
@@ -1,3532 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package reflect
-
-import (
- "errors"
- "internal/abi"
- "internal/goarch"
- "internal/itoa"
- "internal/unsafeheader"
- "math"
- "runtime"
- "unsafe"
-)
-
-// Value is the reflection interface to a Go value.
-//
-// Not all methods apply to all kinds of values. Restrictions,
-// if any, are noted in the documentation for each method.
-// Use the Kind method to find out the kind of value before
-// calling kind-specific methods. Calling a method
-// inappropriate to the kind of type causes a run time panic.
-//
-// The zero Value represents no value.
-// Its IsValid method returns false, its Kind method returns Invalid,
-// its String method returns "<invalid Value>", and all other methods panic.
-// Most functions and methods never return an invalid value.
-// If one does, its documentation states the conditions explicitly.
-//
-// A Value can be used concurrently by multiple goroutines provided that
-// the underlying Go value can be used concurrently for the equivalent
-// direct operations.
-//
-// To compare two Values, compare the results of the Interface method.
-// Using == on two Values does not compare the underlying values
-// they represent.
-type Value struct {
- // typ holds the type of the value represented by a Value.
- typ *rtype
-
- // Pointer-valued data or, if flagIndir is set, pointer to data.
- // Valid when either flagIndir is set or typ.pointers() is true.
- ptr unsafe.Pointer
-
- // flag holds metadata about the value.
- // The lowest bits are flag bits:
- // - flagStickyRO: obtained via unexported not embedded field, so read-only
- // - flagEmbedRO: obtained via unexported embedded field, so read-only
- // - flagIndir: val holds a pointer to the data
- // - flagAddr: v.CanAddr is true (implies flagIndir)
- // - flagMethod: v is a method value.
- // The next five bits give the Kind of the value.
- // This repeats typ.Kind() except for method values.
- // The remaining 23+ bits give a method number for method values.
- // If flag.kind() != Func, code can assume that flagMethod is unset.
- // If ifaceIndir(typ), code can assume that flagIndir is set.
- flag
-
- // A method value represents a curried method invocation
- // like r.Read for some receiver r. The typ+val+flag bits describe
- // the receiver r, but the flag's Kind bits say Func (methods are
- // functions), and the top bits of the flag give the method number
- // in r's type's method table.
-}
-
-type flag uintptr
-
-const (
- flagKindWidth = 5 // there are 27 kinds
- flagKindMask flag = 1<<flagKindWidth - 1
- flagStickyRO flag = 1 << 5
- flagEmbedRO flag = 1 << 6
- flagIndir flag = 1 << 7
- flagAddr flag = 1 << 8
- flagMethod flag = 1 << 9
- flagMethodShift = 10
- flagRO flag = flagStickyRO | flagEmbedRO
-)
-
-func (f flag) kind() Kind {
- return Kind(f & flagKindMask)
-}
-
-func (f flag) ro() flag {
- if f&flagRO != 0 {
- return flagStickyRO
- }
- return 0
-}
-
-// pointer returns the underlying pointer represented by v.
-// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
-// if v.Kind() == Pointer, the base type must not be go:notinheap.
-func (v Value) pointer() unsafe.Pointer {
- if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
- panic("can't call pointer on a non-pointer Value")
- }
- if v.flag&flagIndir != 0 {
- return *(*unsafe.Pointer)(v.ptr)
- }
- return v.ptr
-}
-
-// packEface converts v to the empty interface.
-func packEface(v Value) any {
- t := v.typ
- var i any
- e := (*emptyInterface)(unsafe.Pointer(&i))
- // First, fill in the data portion of the interface.
- switch {
- case ifaceIndir(t):
- if v.flag&flagIndir == 0 {
- panic("bad indir")
- }
- // Value is indirect, and so is the interface we're making.
- ptr := v.ptr
- if v.flag&flagAddr != 0 {
- // TODO: pass safe boolean from valueInterface so
- // we don't need to copy if safe==true?
- c := unsafe_New(t)
- typedmemmove(t, c, ptr)
- ptr = c
- }
- e.word = ptr
- case v.flag&flagIndir != 0:
- // Value is indirect, but interface is direct. We need
- // to load the data at v.ptr into the interface data word.
- e.word = *(*unsafe.Pointer)(v.ptr)
- default:
- // Value is direct, and so is the interface.
- e.word = v.ptr
- }
- // Now, fill in the type portion. We're very careful here not
- // to have any operation between the e.word and e.typ assignments
- // that would let the garbage collector observe the partially-built
- // interface value.
- e.typ = t
- return i
-}
-
-// unpackEface converts the empty interface i to a Value.
-func unpackEface(i any) Value {
- e := (*emptyInterface)(unsafe.Pointer(&i))
- // NOTE: don't read e.word until we know whether it is really a pointer or not.
- t := e.typ
- if t == nil {
- return Value{}
- }
- f := flag(t.Kind())
- if ifaceIndir(t) {
- f |= flagIndir
- }
- return Value{t, e.word, f}
-}
-
-// A ValueError occurs when a Value method is invoked on
-// a Value that does not support it. Such cases are documented
-// in the description of each method.
-type ValueError struct {
- Method string
- Kind Kind
-}
-
-func (e *ValueError) Error() string {
- if e.Kind == 0 {
- return "reflect: call of " + e.Method + " on zero Value"
- }
- return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
-}
-
-// methodName returns the name of the calling method,
-// assumed to be two stack frames above.
-func methodName() string {
- pc, _, _, _ := runtime.Caller(2)
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown method"
- }
- return f.Name()
-}
-
-// methodNameSkip is like methodName, but skips another stack frame.
-// This is a separate function so that reflect.flag.mustBe will be inlined.
-func methodNameSkip() string {
- pc, _, _, _ := runtime.Caller(3)
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown method"
- }
- return f.Name()
-}
-
-// emptyInterface is the header for an interface{} value.
-type emptyInterface struct {
- typ *rtype
- word unsafe.Pointer
-}
-
-// nonEmptyInterface is the header for an interface value with methods.
-type nonEmptyInterface struct {
- // see ../runtime/iface.go:/Itab
- itab *struct {
- ityp *rtype // static interface type
- typ *rtype // dynamic concrete type
- hash uint32 // copy of typ.hash
- _ [4]byte
- fun [100000]unsafe.Pointer // method table
- }
- word unsafe.Pointer
-}
-
-// mustBe panics if f's kind is not expected.
-// Making this a method on flag instead of on Value
-// (and embedding flag in Value) means that we can write
-// the very clear v.mustBe(Bool) and have it compile into
-// v.flag.mustBe(Bool), which will only bother to copy the
-// single important word for the receiver.
-func (f flag) mustBe(expected Kind) {
- // TODO(mvdan): use f.kind() again once mid-stack inlining gets better
- if Kind(f&flagKindMask) != expected {
- panic(&ValueError{methodName(), f.kind()})
- }
-}
-
-// mustBeExported panics if f records that the value was obtained using
-// an unexported field.
-func (f flag) mustBeExported() {
- if f == 0 || f&flagRO != 0 {
- f.mustBeExportedSlow()
- }
-}
-
-func (f flag) mustBeExportedSlow() {
- if f == 0 {
- panic(&ValueError{methodNameSkip(), Invalid})
- }
- if f&flagRO != 0 {
- panic("reflect: " + methodNameSkip() + " using value obtained using unexported field")
- }
-}
-
-// mustBeAssignable panics if f records that the value is not assignable,
-// which is to say that either it was obtained using an unexported field
-// or it is not addressable.
-func (f flag) mustBeAssignable() {
- if f&flagRO != 0 || f&flagAddr == 0 {
- f.mustBeAssignableSlow()
- }
-}
-
-func (f flag) mustBeAssignableSlow() {
- if f == 0 {
- panic(&ValueError{methodNameSkip(), Invalid})
- }
- // Assignable if addressable and not read-only.
- if f&flagRO != 0 {
- panic("reflect: " + methodNameSkip() + " using value obtained using unexported field")
- }
- if f&flagAddr == 0 {
- panic("reflect: " + methodNameSkip() + " using unaddressable value")
- }
-}
-
-// Addr returns a pointer value representing the address of v.
-// It panics if CanAddr() returns false.
-// Addr is typically used to obtain a pointer to a struct field
-// or slice element in order to call a method that requires a
-// pointer receiver.
-func (v Value) Addr() Value {
- if v.flag&flagAddr == 0 {
- panic("reflect.Value.Addr of unaddressable value")
- }
- // Preserve flagRO instead of using v.flag.ro() so that
- // v.Addr().Elem() is equivalent to v (#32772)
- fl := v.flag & flagRO
- return Value{v.typ.ptrTo(), v.ptr, fl | flag(Pointer)}
-}
-
-// Bool returns v's underlying value.
-// It panics if v's kind is not Bool.
-func (v Value) Bool() bool {
- v.mustBe(Bool)
- return *(*bool)(v.ptr)
-}
-
-// Bytes returns v's underlying value.
-// It panics if v's underlying value is not a slice of bytes.
-func (v Value) Bytes() []byte {
- v.mustBe(Slice)
- if v.typ.Elem().Kind() != Uint8 {
- panic("reflect.Value.Bytes of non-byte slice")
- }
- // Slice is always bigger than a word; assume flagIndir.
- return *(*[]byte)(v.ptr)
-}
-
-// runes returns v's underlying value.
-// It panics if v's underlying value is not a slice of runes (int32s).
-func (v Value) runes() []rune {
- v.mustBe(Slice)
- if v.typ.Elem().Kind() != Int32 {
- panic("reflect.Value.Bytes of non-rune slice")
- }
- // Slice is always bigger than a word; assume flagIndir.
- return *(*[]rune)(v.ptr)
-}
-
-// CanAddr reports whether the value's address can be obtained with Addr.
-// Such values are called addressable. A value is addressable if it is
-// an element of a slice, an element of an addressable array,
-// a field of an addressable struct, or the result of dereferencing a pointer.
-// If CanAddr returns false, calling Addr will panic.
-func (v Value) CanAddr() bool {
- return v.flag&flagAddr != 0
-}
-
-// CanSet reports whether the value of v can be changed.
-// A Value can be changed only if it is addressable and was not
-// obtained by the use of unexported struct fields.
-// If CanSet returns false, calling Set or any type-specific
-// setter (e.g., SetBool, SetInt) will panic.
-func (v Value) CanSet() bool {
- return v.flag&(flagAddr|flagRO) == flagAddr
-}
-
-// Call calls the function v with the input arguments in.
-// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
-// Call panics if v's Kind is not Func.
-// It returns the output results as Values.
-// As in Go, each input argument must be assignable to the
-// type of the function's corresponding input parameter.
-// If v is a variadic function, Call creates the variadic slice parameter
-// itself, copying in the corresponding values.
-func (v Value) Call(in []Value) []Value {
- v.mustBe(Func)
- v.mustBeExported()
- return v.call("Call", in)
-}
-
-// CallSlice calls the variadic function v with the input arguments in,
-// assigning the slice in[len(in)-1] to v's final variadic argument.
-// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
-// CallSlice panics if v's Kind is not Func or if v is not variadic.
-// It returns the output results as Values.
-// As in Go, each input argument must be assignable to the
-// type of the function's corresponding input parameter.
-func (v Value) CallSlice(in []Value) []Value {
- v.mustBe(Func)
- v.mustBeExported()
- return v.call("CallSlice", in)
-}
-
-var callGC bool // for testing; see TestCallMethodJump and TestCallArgLive
-
-const debugReflectCall = false
-
-func (v Value) call(op string, in []Value) []Value {
- // Get function pointer, type.
- t := (*funcType)(unsafe.Pointer(v.typ))
- var (
- fn unsafe.Pointer
- rcvr Value
- rcvrtype *rtype
- )
- if v.flag&flagMethod != 0 {
- rcvr = v
- rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
- } else if v.flag&flagIndir != 0 {
- fn = *(*unsafe.Pointer)(v.ptr)
- } else {
- fn = v.ptr
- }
-
- if fn == nil {
- panic("reflect.Value.Call: call of nil function")
- }
-
- isSlice := op == "CallSlice"
- n := t.NumIn()
- isVariadic := t.IsVariadic()
- if isSlice {
- if !isVariadic {
- panic("reflect: CallSlice of non-variadic function")
- }
- if len(in) < n {
- panic("reflect: CallSlice with too few input arguments")
- }
- if len(in) > n {
- panic("reflect: CallSlice with too many input arguments")
- }
- } else {
- if isVariadic {
- n--
- }
- if len(in) < n {
- panic("reflect: Call with too few input arguments")
- }
- if !isVariadic && len(in) > n {
- panic("reflect: Call with too many input arguments")
- }
- }
- for _, x := range in {
- if x.Kind() == Invalid {
- panic("reflect: " + op + " using zero Value argument")
- }
- }
- for i := 0; i < n; i++ {
- if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
- panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
- }
- }
- if !isSlice && isVariadic {
- // prepare slice for remaining values
- m := len(in) - n
- slice := MakeSlice(t.In(n), m, m)
- elem := t.In(n).Elem()
- for i := 0; i < m; i++ {
- x := in[n+i]
- if xt := x.Type(); !xt.AssignableTo(elem) {
- panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
- }
- slice.Index(i).Set(x)
- }
- origIn := in
- in = make([]Value, n+1)
- copy(in[:n], origIn)
- in[n] = slice
- }
-
- nin := len(in)
- if nin != t.NumIn() {
- panic("reflect.Value.Call: wrong argument count")
- }
- nout := t.NumOut()
-
- // Register argument space.
- var regArgs abi.RegArgs
-
- // Compute frame type.
- frametype, framePool, abi := funcLayout(t, rcvrtype)
-
- // Allocate a chunk of memory for frame if needed.
- var stackArgs unsafe.Pointer
- if frametype.size != 0 {
- if nout == 0 {
- stackArgs = framePool.Get().(unsafe.Pointer)
- } else {
- // Can't use pool if the function has return values.
- // We will leak pointer to args in ret, so its lifetime is not scoped.
- stackArgs = unsafe_New(frametype)
- }
- }
- frameSize := frametype.size
-
- if debugReflectCall {
- println("reflect.call", t.String())
- abi.dump()
- }
-
- // Copy inputs into args.
-
- // Handle receiver.
- inStart := 0
- if rcvrtype != nil {
- // Guaranteed to only be one word in size,
- // so it will only take up exactly 1 abiStep (either
- // in a register or on the stack).
- switch st := abi.call.steps[0]; st.kind {
- case abiStepStack:
- storeRcvr(rcvr, stackArgs)
- case abiStepIntReg, abiStepPointer:
- // Even pointers can go into the uintptr slot because
- // they'll be kept alive by the Values referenced by
- // this frame. Reflection forces these to be heap-allocated,
- // so we don't need to worry about stack copying.
- storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ints[st.ireg]))
- case abiStepFloatReg:
- storeRcvr(rcvr, unsafe.Pointer(&regArgs.Floats[st.freg]))
- default:
- panic("unknown ABI parameter kind")
- }
- inStart = 1
- }
-
- // Handle arguments.
- for i, v := range in {
- v.mustBeExported()
- targ := t.In(i).(*rtype)
- // TODO(mknyszek): Figure out if it's possible to get some
- // scratch space for this assignment check. Previously, it
- // was possible to use space in the argument frame.
- v = v.assignTo("reflect.Value.Call", targ, nil)
- stepsLoop:
- for _, st := range abi.call.stepsForValue(i + inStart) {
- switch st.kind {
- case abiStepStack:
- // Copy values to the "stack."
- addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
- if v.flag&flagIndir != 0 {
- typedmemmove(targ, addr, v.ptr)
- } else {
- *(*unsafe.Pointer)(addr) = v.ptr
- }
- // There's only one step for a stack-allocated value.
- break stepsLoop
- case abiStepIntReg, abiStepPointer:
- // Copy values to "integer registers."
- if v.flag&flagIndir != 0 {
- offset := add(v.ptr, st.offset, "precomputed value offset")
- if st.kind == abiStepPointer {
- // Duplicate this pointer in the pointer area of the
- // register space. Otherwise, there's the potential for
- // this to be the last reference to v.ptr.
- regArgs.Ptrs[st.ireg] = *(*unsafe.Pointer)(offset)
- }
- intToReg(&regArgs, st.ireg, st.size, offset)
- } else {
- if st.kind == abiStepPointer {
- // See the comment in abiStepPointer case above.
- regArgs.Ptrs[st.ireg] = v.ptr
- }
- regArgs.Ints[st.ireg] = uintptr(v.ptr)
- }
- case abiStepFloatReg:
- // Copy values to "float registers."
- if v.flag&flagIndir == 0 {
- panic("attempted to copy pointer to FP register")
- }
- offset := add(v.ptr, st.offset, "precomputed value offset")
- floatToReg(&regArgs, st.freg, st.size, offset)
- default:
- panic("unknown ABI part kind")
- }
- }
- }
- // TODO(mknyszek): Remove this when we no longer have
- // caller reserved spill space.
- frameSize = align(frameSize, goarch.PtrSize)
- frameSize += abi.spill
-
- // Mark pointers in registers for the return path.
- regArgs.ReturnIsPtr = abi.outRegPtrs
-
- if debugReflectCall {
- regArgs.Dump()
- }
-
- // For testing; see TestCallArgLive.
- if callGC {
- runtime.GC()
- }
-
- // Call.
- call(frametype, fn, stackArgs, uint32(frametype.size), uint32(abi.retOffset), uint32(frameSize), &regArgs)
-
- // For testing; see TestCallMethodJump.
- if callGC {
- runtime.GC()
- }
-
- var ret []Value
- if nout == 0 {
- if stackArgs != nil {
- typedmemclr(frametype, stackArgs)
- framePool.Put(stackArgs)
- }
- } else {
- if stackArgs != nil {
- // Zero the now unused input area of args,
- // because the Values returned by this function contain pointers to the args object,
- // and will thus keep the args object alive indefinitely.
- typedmemclrpartial(frametype, stackArgs, 0, abi.retOffset)
- }
-
- // Wrap Values around return values in args.
- ret = make([]Value, nout)
- for i := 0; i < nout; i++ {
- tv := t.Out(i)
- if tv.Size() == 0 {
- // For zero-sized return value, args+off may point to the next object.
- // In this case, return the zero value instead.
- ret[i] = Zero(tv)
- continue
- }
- steps := abi.ret.stepsForValue(i)
- if st := steps[0]; st.kind == abiStepStack {
- // This value is on the stack. If part of a value is stack
- // allocated, the entire value is according to the ABI. So
- // just make an indirection into the allocated frame.
- fl := flagIndir | flag(tv.Kind())
- ret[i] = Value{tv.common(), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
- // Note: this does introduce false sharing between results -
- // if any result is live, they are all live.
- // (And the space for the args is live as well, but as we've
- // cleared that space it isn't as big a deal.)
- continue
- }
-
- // Handle pointers passed in registers.
- if !ifaceIndir(tv.common()) {
- // Pointer-valued data gets put directly
- // into v.ptr.
- if steps[0].kind != abiStepPointer {
- print("kind=", steps[0].kind, ", type=", tv.String(), "\n")
- panic("mismatch between ABI description and types")
- }
- ret[i] = Value{tv.common(), regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())}
- continue
- }
-
- // All that's left is values passed in registers that we need to
- // create space for and copy values back into.
- //
- // TODO(mknyszek): We make a new allocation for each register-allocated
- // value, but previously we could always point into the heap-allocated
- // stack frame. This is a regression that could be fixed by adding
- // additional space to the allocated stack frame and storing the
- // register-allocated return values into the allocated stack frame and
- // referring there in the resulting Value.
- s := unsafe_New(tv.common())
- for _, st := range steps {
- switch st.kind {
- case abiStepIntReg:
- offset := add(s, st.offset, "precomputed value offset")
- intFromReg(&regArgs, st.ireg, st.size, offset)
- case abiStepPointer:
- s := add(s, st.offset, "precomputed value offset")
- *((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
- case abiStepFloatReg:
- offset := add(s, st.offset, "precomputed value offset")
- floatFromReg(&regArgs, st.freg, st.size, offset)
- case abiStepStack:
- panic("register-based return value has stack component")
- default:
- panic("unknown ABI part kind")
- }
- }
- ret[i] = Value{tv.common(), s, flagIndir | flag(tv.Kind())}
- }
- }
-
- return ret
-}
-
-// callReflect is the call implementation used by a function
-// returned by MakeFunc. In many ways it is the opposite of the
-// method Value.call above. The method above converts a call using Values
-// into a call of a function with a concrete argument frame, while
-// callReflect converts a call of a function with a concrete argument
-// frame into a call using Values.
-// It is in this file so that it can be next to the call method above.
-// The remainder of the MakeFunc implementation is in makefunc.go.
-//
-// NOTE: This function must be marked as a "wrapper" in the generated code,
-// so that the linker can make it work correctly for panic and recover.
-// The gc compilers know to do that for the name "reflect.callReflect".
-//
-// ctxt is the "closure" generated by MakeFunc.
-// frame is a pointer to the arguments to that closure on the stack.
-// retValid points to a boolean which should be set when the results
-// section of frame is set.
-//
-// regs contains the argument values passed in registers and will contain
-// the values returned from ctxt.fn in registers.
-func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) {
- if callGC {
- // Call GC upon entry during testing.
- // Getting our stack scanned here is the biggest hazard, because
- // our caller (makeFuncStub) could have failed to place the last
- // pointer to a value in regs' pointer space, in which case it
- // won't be visible to the GC.
- runtime.GC()
- }
- ftyp := ctxt.ftyp
- f := ctxt.fn
-
- _, _, abi := funcLayout(ftyp, nil)
-
- // Copy arguments into Values.
- ptr := frame
- in := make([]Value, 0, int(ftyp.inCount))
- for i, typ := range ftyp.in() {
- if typ.Size() == 0 {
- in = append(in, Zero(typ))
- continue
- }
- v := Value{typ, nil, flag(typ.Kind())}
- steps := abi.call.stepsForValue(i)
- if st := steps[0]; st.kind == abiStepStack {
- if ifaceIndir(typ) {
- // value cannot be inlined in interface data.
- // Must make a copy, because f might keep a reference to it,
- // and we cannot let f keep a reference to the stack frame
- // after this function returns, not even a read-only reference.
- v.ptr = unsafe_New(typ)
- if typ.size > 0 {
- typedmemmove(typ, v.ptr, add(ptr, st.stkOff, "typ.size > 0"))
- }
- v.flag |= flagIndir
- } else {
- v.ptr = *(*unsafe.Pointer)(add(ptr, st.stkOff, "1-ptr"))
- }
- } else {
- if ifaceIndir(typ) {
- // All that's left is values passed in registers that we need to
- // create space for the values.
- v.flag |= flagIndir
- v.ptr = unsafe_New(typ)
- for _, st := range steps {
- switch st.kind {
- case abiStepIntReg:
- offset := add(v.ptr, st.offset, "precomputed value offset")
- intFromReg(regs, st.ireg, st.size, offset)
- case abiStepPointer:
- s := add(v.ptr, st.offset, "precomputed value offset")
- *((*unsafe.Pointer)(s)) = regs.Ptrs[st.ireg]
- case abiStepFloatReg:
- offset := add(v.ptr, st.offset, "precomputed value offset")
- floatFromReg(regs, st.freg, st.size, offset)
- case abiStepStack:
- panic("register-based return value has stack component")
- default:
- panic("unknown ABI part kind")
- }
- }
- } else {
- // Pointer-valued data gets put directly
- // into v.ptr.
- if steps[0].kind != abiStepPointer {
- print("kind=", steps[0].kind, ", type=", typ.String(), "\n")
- panic("mismatch between ABI description and types")
- }
- v.ptr = regs.Ptrs[steps[0].ireg]
- }
- }
- in = append(in, v)
- }
-
- // Call underlying function.
- out := f(in)
- numOut := ftyp.NumOut()
- if len(out) != numOut {
- panic("reflect: wrong return count from function created by MakeFunc")
- }
-
- // Copy results back into argument frame and register space.
- if numOut > 0 {
- for i, typ := range ftyp.out() {
- v := out[i]
- if v.typ == nil {
- panic("reflect: function created by MakeFunc using " + funcName(f) +
- " returned zero Value")
- }
- if v.flag&flagRO != 0 {
- panic("reflect: function created by MakeFunc using " + funcName(f) +
- " returned value obtained from unexported field")
- }
- if typ.size == 0 {
- continue
- }
-
- // Convert v to type typ if v is assignable to a variable
- // of type t in the language spec.
- // See issue 28761.
- //
- //
- // TODO(mknyszek): In the switch to the register ABI we lost
- // the scratch space here for the register cases (and
- // temporarily for all the cases).
- //
- // If/when this happens, take note of the following:
- //
- // We must clear the destination before calling assignTo,
- // in case assignTo writes (with memory barriers) to the
- // target location used as scratch space. See issue 39541.
- v = v.assignTo("reflect.MakeFunc", typ, nil)
- stepsLoop:
- for _, st := range abi.ret.stepsForValue(i) {
- switch st.kind {
- case abiStepStack:
- // Copy values to the "stack."
- addr := add(ptr, st.stkOff, "precomputed stack arg offset")
- // Do not use write barriers. The stack space used
- // for this call is not adequately zeroed, and we
- // are careful to keep the arguments alive until we
- // return to makeFuncStub's caller.
- if v.flag&flagIndir != 0 {
- memmove(addr, v.ptr, st.size)
- } else {
- // This case must be a pointer type.
- *(*uintptr)(addr) = uintptr(v.ptr)
- }
- // There's only one step for a stack-allocated value.
- break stepsLoop
- case abiStepIntReg, abiStepPointer:
- // Copy values to "integer registers."
- if v.flag&flagIndir != 0 {
- offset := add(v.ptr, st.offset, "precomputed value offset")
- intToReg(regs, st.ireg, st.size, offset)
- } else {
- // Only populate the Ints space on the return path.
- // This is safe because out is kept alive until the
- // end of this function, and the return path through
- // makeFuncStub has no preemption, so these pointers
- // are always visible to the GC.
- regs.Ints[st.ireg] = uintptr(v.ptr)
- }
- case abiStepFloatReg:
- // Copy values to "float registers."
- if v.flag&flagIndir == 0 {
- panic("attempted to copy pointer to FP register")
- }
- offset := add(v.ptr, st.offset, "precomputed value offset")
- floatToReg(regs, st.freg, st.size, offset)
- default:
- panic("unknown ABI part kind")
- }
- }
- }
- }
-
- // Announce that the return values are valid.
- // After this point the runtime can depend on the return values being valid.
- *retValid = true
-
- // We have to make sure that the out slice lives at least until
- // the runtime knows the return values are valid. Otherwise, the
- // return values might not be scanned by anyone during a GC.
- // (out would be dead, and the return slots not yet alive.)
- runtime.KeepAlive(out)
-
- // runtime.getArgInfo expects to be able to find ctxt on the
- // stack when it finds our caller, makeFuncStub. Make sure it
- // doesn't get garbage collected.
- runtime.KeepAlive(ctxt)
-}
-
-// methodReceiver returns information about the receiver
-// described by v. The Value v may or may not have the
-// flagMethod bit set, so the kind cached in v.flag should
-// not be used.
-// The return value rcvrtype gives the method's actual receiver type.
-// The return value t gives the method type signature (without the receiver).
-// The return value fn is a pointer to the method code.
-func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) {
- i := methodIndex
- if v.typ.Kind() == Interface {
- tt := (*interfaceType)(unsafe.Pointer(v.typ))
- if uint(i) >= uint(len(tt.methods)) {
- panic("reflect: internal error: invalid method index")
- }
- m := &tt.methods[i]
- if !tt.nameOff(m.name).isExported() {
- panic("reflect: " + op + " of unexported method")
- }
- iface := (*nonEmptyInterface)(v.ptr)
- if iface.itab == nil {
- panic("reflect: " + op + " of method on nil interface value")
- }
- rcvrtype = iface.itab.typ
- fn = unsafe.Pointer(&iface.itab.fun[i])
- t = (*funcType)(unsafe.Pointer(tt.typeOff(m.typ)))
- } else {
- rcvrtype = v.typ
- ms := v.typ.exportedMethods()
- if uint(i) >= uint(len(ms)) {
- panic("reflect: internal error: invalid method index")
- }
- m := ms[i]
- if !v.typ.nameOff(m.name).isExported() {
- panic("reflect: " + op + " of unexported method")
- }
- ifn := v.typ.textOff(m.ifn)
- fn = unsafe.Pointer(&ifn)
- t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.mtyp)))
- }
- return
-}
-
-// v is a method receiver. Store at p the word which is used to
-// encode that receiver at the start of the argument list.
-// Reflect uses the "interface" calling convention for
-// methods, which always uses one word to record the receiver.
-func storeRcvr(v Value, p unsafe.Pointer) {
- t := v.typ
- if t.Kind() == Interface {
- // the interface data word becomes the receiver word
- iface := (*nonEmptyInterface)(v.ptr)
- *(*unsafe.Pointer)(p) = iface.word
- } else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
- *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
- } else {
- *(*unsafe.Pointer)(p) = v.ptr
- }
-}
-
-// align returns the result of rounding x up to a multiple of n.
-// n must be a power of two.
-func align(x, n uintptr) uintptr {
- return (x + n - 1) &^ (n - 1)
-}
-
-// callMethod is the call implementation used by a function returned
-// by makeMethodValue (used by v.Method(i).Interface()).
-// It is a streamlined version of the usual reflect call: the caller has
-// already laid out the argument frame for us, so we don't have
-// to deal with individual Values for each argument.
-// It is in this file so that it can be next to the two similar functions above.
-// The remainder of the makeMethodValue implementation is in makefunc.go.
-//
-// NOTE: This function must be marked as a "wrapper" in the generated code,
-// so that the linker can make it work correctly for panic and recover.
-// The gc compilers know to do that for the name "reflect.callMethod".
-//
-// ctxt is the "closure" generated by makeVethodValue.
-// frame is a pointer to the arguments to that closure on the stack.
-// retValid points to a boolean which should be set when the results
-// section of frame is set.
-//
-// regs contains the argument values passed in registers and will contain
-// the values returned from ctxt.fn in registers.
-func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) {
- rcvr := ctxt.rcvr
- rcvrType, valueFuncType, methodFn := methodReceiver("call", rcvr, ctxt.method)
-
- // There are two ABIs at play here.
- //
- // methodValueCall was invoked with the ABI assuming there was no
- // receiver ("value ABI") and that's what frame and regs are holding.
- //
- // Meanwhile, we need to actually call the method with a receiver, which
- // has its own ABI ("method ABI"). Everything that follows is a translation
- // between the two.
- _, _, valueABI := funcLayout(valueFuncType, nil)
- valueFrame, valueRegs := frame, regs
- methodFrameType, methodFramePool, methodABI := funcLayout(valueFuncType, rcvrType)
-
- // Make a new frame that is one word bigger so we can store the receiver.
- // This space is used for both arguments and return values.
- methodFrame := methodFramePool.Get().(unsafe.Pointer)
- var methodRegs abi.RegArgs
-
- // Deal with the receiver. It's guaranteed to only be one word in size.
- if st := methodABI.call.steps[0]; st.kind == abiStepStack {
- // Only copy the receiver to the stack if the ABI says so.
- // Otherwise, it'll be in a register already.
- storeRcvr(rcvr, methodFrame)
- } else {
- // Put the receiver in a register.
- storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ints))
- }
-
- // Translate the rest of the arguments.
- for i, t := range valueFuncType.in() {
- valueSteps := valueABI.call.stepsForValue(i)
- methodSteps := methodABI.call.stepsForValue(i + 1)
-
- // Zero-sized types are trivial: nothing to do.
- if len(valueSteps) == 0 {
- if len(methodSteps) != 0 {
- panic("method ABI and value ABI do not align")
- }
- continue
- }
-
- // There are four cases to handle in translating each
- // argument:
- // 1. Stack -> stack translation.
- // 2. Stack -> registers translation.
- // 3. Registers -> stack translation.
- // 4. Registers -> registers translation.
-
- // If the value ABI passes the value on the stack,
- // then the method ABI does too, because it has strictly
- // fewer arguments. Simply copy between the two.
- if vStep := valueSteps[0]; vStep.kind == abiStepStack {
- mStep := methodSteps[0]
- // Handle stack -> stack translation.
- if mStep.kind == abiStepStack {
- if vStep.size != mStep.size {
- panic("method ABI and value ABI do not align")
- }
- typedmemmove(t,
- add(methodFrame, mStep.stkOff, "precomputed stack offset"),
- add(valueFrame, vStep.stkOff, "precomputed stack offset"))
- continue
- }
- // Handle stack -> register translation.
- for _, mStep := range methodSteps {
- from := add(valueFrame, vStep.stkOff+mStep.offset, "precomputed stack offset")
- switch mStep.kind {
- case abiStepPointer:
- // Do the pointer copy directly so we get a write barrier.
- methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
- fallthrough // We need to make sure this ends up in Ints, too.
- case abiStepIntReg:
- intToReg(&methodRegs, mStep.ireg, mStep.size, from)
- case abiStepFloatReg:
- floatToReg(&methodRegs, mStep.freg, mStep.size, from)
- default:
- panic("unexpected method step")
- }
- }
- continue
- }
- // Handle register -> stack translation.
- if mStep := methodSteps[0]; mStep.kind == abiStepStack {
- for _, vStep := range valueSteps {
- to := add(methodFrame, mStep.stkOff+vStep.offset, "precomputed stack offset")
- switch vStep.kind {
- case abiStepPointer:
- // Do the pointer copy directly so we get a write barrier.
- *(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
- case abiStepIntReg:
- intFromReg(valueRegs, vStep.ireg, vStep.size, to)
- case abiStepFloatReg:
- floatFromReg(valueRegs, vStep.freg, vStep.size, to)
- default:
- panic("unexpected value step")
- }
- }
- continue
- }
- // Handle register -> register translation.
- if len(valueSteps) != len(methodSteps) {
- // Because it's the same type for the value, and it's assigned
- // to registers both times, it should always take up the same
- // number of registers for each ABI.
- panic("method ABI and value ABI don't align")
- }
- for i, vStep := range valueSteps {
- mStep := methodSteps[i]
- if mStep.kind != vStep.kind {
- panic("method ABI and value ABI don't align")
- }
- switch vStep.kind {
- case abiStepPointer:
- // Copy this too, so we get a write barrier.
- methodRegs.Ptrs[mStep.ireg] = valueRegs.Ptrs[vStep.ireg]
- fallthrough
- case abiStepIntReg:
- methodRegs.Ints[mStep.ireg] = valueRegs.Ints[vStep.ireg]
- case abiStepFloatReg:
- methodRegs.Floats[mStep.freg] = valueRegs.Floats[vStep.freg]
- default:
- panic("unexpected value step")
- }
- }
- }
-
- methodFrameSize := methodFrameType.size
- // TODO(mknyszek): Remove this when we no longer have
- // caller reserved spill space.
- methodFrameSize = align(methodFrameSize, goarch.PtrSize)
- methodFrameSize += methodABI.spill
-
- // Mark pointers in registers for the return path.
- methodRegs.ReturnIsPtr = methodABI.outRegPtrs
-
- // Call.
- // Call copies the arguments from scratch to the stack, calls fn,
- // and then copies the results back into scratch.
- call(methodFrameType, methodFn, methodFrame, uint32(methodFrameType.size), uint32(methodABI.retOffset), uint32(methodFrameSize), &methodRegs)
-
- // Copy return values.
- //
- // This is somewhat simpler because both ABIs have an identical
- // return value ABI (the types are identical). As a result, register
- // results can simply be copied over. Stack-allocated values are laid
- // out the same, but are at different offsets from the start of the frame
- // Ignore any changes to args.
- // Avoid constructing out-of-bounds pointers if there are no return values.
- // because the arguments may be laid out differently.
- if valueRegs != nil {
- *valueRegs = methodRegs
- }
- if retSize := methodFrameType.size - methodABI.retOffset; retSize > 0 {
- valueRet := add(valueFrame, valueABI.retOffset, "valueFrame's size > retOffset")
- methodRet := add(methodFrame, methodABI.retOffset, "methodFrame's size > retOffset")
- // This copies to the stack. Write barriers are not needed.
- memmove(valueRet, methodRet, retSize)
- }
-
- // Tell the runtime it can now depend on the return values
- // being properly initialized.
- *retValid = true
-
- // Clear the scratch space and put it back in the pool.
- // This must happen after the statement above, so that the return
- // values will always be scanned by someone.
- typedmemclr(methodFrameType, methodFrame)
- methodFramePool.Put(methodFrame)
-
- // See the comment in callReflect.
- runtime.KeepAlive(ctxt)
-
- // Keep valueRegs alive because it may hold live pointer results.
- // The caller (methodValueCall) has it as a stack object, which is only
- // scanned when there is a reference to it.
- runtime.KeepAlive(valueRegs)
-}
-
-// funcName returns the name of f, for use in error messages.
-func funcName(f func([]Value) []Value) string {
- pc := *(*uintptr)(unsafe.Pointer(&f))
- rf := runtime.FuncForPC(pc)
- if rf != nil {
- return rf.Name()
- }
- return "closure"
-}
-
-// Cap returns v's capacity.
-// It panics if v's Kind is not Array, Chan, or Slice.
-func (v Value) Cap() int {
- k := v.kind()
- switch k {
- case Array:
- return v.typ.Len()
- case Chan:
- return chancap(v.pointer())
- case Slice:
- // Slice is always bigger than a word; assume flagIndir.
- return (*unsafeheader.Slice)(v.ptr).Cap
- }
- panic(&ValueError{"reflect.Value.Cap", v.kind()})
-}
-
-// Close closes the channel v.
-// It panics if v's Kind is not Chan.
-func (v Value) Close() {
- v.mustBe(Chan)
- v.mustBeExported()
- chanclose(v.pointer())
-}
-
-// CanComplex reports whether Complex can be used without panicking.
-func (v Value) CanComplex() bool {
- switch v.kind() {
- case Complex64, Complex128:
- return true
- default:
- return false
- }
-}
-
-// Complex returns v's underlying value, as a complex128.
-// It panics if v's Kind is not Complex64 or Complex128
-func (v Value) Complex() complex128 {
- k := v.kind()
- switch k {
- case Complex64:
- return complex128(*(*complex64)(v.ptr))
- case Complex128:
- return *(*complex128)(v.ptr)
- }
- panic(&ValueError{"reflect.Value.Complex", v.kind()})
-}
-
-// Elem returns the value that the interface v contains
-// or that the pointer v points to.
-// It panics if v's Kind is not Interface or Pointer.
-// It returns the zero Value if v is nil.
-func (v Value) Elem() Value {
- k := v.kind()
- switch k {
- case Interface:
- var eface any
- if v.typ.NumMethod() == 0 {
- eface = *(*any)(v.ptr)
- } else {
- eface = (any)(*(*interface {
- M()
- })(v.ptr))
- }
- x := unpackEface(eface)
- if x.flag != 0 {
- x.flag |= v.flag.ro()
- }
- return x
- case Pointer:
- ptr := v.ptr
- if v.flag&flagIndir != 0 {
- if ifaceIndir(v.typ) {
- // This is a pointer to a not-in-heap object. ptr points to a uintptr
- // in the heap. That uintptr is the address of a not-in-heap object.
- // In general, pointers to not-in-heap objects can be total junk.
- // But Elem() is asking to dereference it, so the user has asserted
- // that at least it is a valid pointer (not just an integer stored in
- // a pointer slot). So let's check, to make sure that it isn't a pointer
- // that the runtime will crash on if it sees it during GC or write barriers.
- // Since it is a not-in-heap pointer, all pointers to the heap are
- // forbidden! That makes the test pretty easy.
- // See issue 48399.
- if !verifyNotInHeapPtr(*(*uintptr)(ptr)) {
- panic("reflect: reflect.Value.Elem on an invalid notinheap pointer")
- }
- }
- ptr = *(*unsafe.Pointer)(ptr)
- }
- // The returned value's address is v's value.
- if ptr == nil {
- return Value{}
- }
- tt := (*ptrType)(unsafe.Pointer(v.typ))
- typ := tt.elem
- fl := v.flag&flagRO | flagIndir | flagAddr
- fl |= flag(typ.Kind())
- return Value{typ, ptr, fl}
- }
- panic(&ValueError{"reflect.Value.Elem", v.kind()})
-}
-
-// Field returns the i'th field of the struct v.
-// It panics if v's Kind is not Struct or i is out of range.
-func (v Value) Field(i int) Value {
- if v.kind() != Struct {
- panic(&ValueError{"reflect.Value.Field", v.kind()})
- }
- tt := (*structType)(unsafe.Pointer(v.typ))
- if uint(i) >= uint(len(tt.fields)) {
- panic("reflect: Field index out of range")
- }
- field := &tt.fields[i]
- typ := field.typ
-
- // Inherit permission bits from v, but clear flagEmbedRO.
- fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
- // Using an unexported field forces flagRO.
- if !field.name.isExported() {
- if field.embedded() {
- fl |= flagEmbedRO
- } else {
- fl |= flagStickyRO
- }
- }
- // Either flagIndir is set and v.ptr points at struct,
- // or flagIndir is not set and v.ptr is the actual struct data.
- // In the former case, we want v.ptr + offset.
- // In the latter case, we must have field.offset = 0,
- // so v.ptr + field.offset is still the correct address.
- ptr := add(v.ptr, field.offset(), "same as non-reflect &v.field")
- return Value{typ, ptr, fl}
-}
-
-// FieldByIndex returns the nested field corresponding to index.
-// It panics if evaluation requires stepping through a nil
-// pointer or a field that is not a struct.
-func (v Value) FieldByIndex(index []int) Value {
- if len(index) == 1 {
- return v.Field(index[0])
- }
- v.mustBe(Struct)
- for i, x := range index {
- if i > 0 {
- if v.Kind() == Pointer && v.typ.Elem().Kind() == Struct {
- if v.IsNil() {
- panic("reflect: indirection through nil pointer to embedded struct")
- }
- v = v.Elem()
- }
- }
- v = v.Field(x)
- }
- return v
-}
-
-// FieldByIndexErr returns the nested field corresponding to index.
-// It returns an error if evaluation requires stepping through a nil
-// pointer, but panics if it must step through a field that
-// is not a struct.
-func (v Value) FieldByIndexErr(index []int) (Value, error) {
- if len(index) == 1 {
- return v.Field(index[0]), nil
- }
- v.mustBe(Struct)
- for i, x := range index {
- if i > 0 {
- if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct {
- if v.IsNil() {
- return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + v.typ.Elem().Name())
- }
- v = v.Elem()
- }
- }
- v = v.Field(x)
- }
- return v, nil
-}
-
-// FieldByName returns the struct field with the given name.
-// It returns the zero Value if no field was found.
-// It panics if v's Kind is not struct.
-func (v Value) FieldByName(name string) Value {
- v.mustBe(Struct)
- if f, ok := v.typ.FieldByName(name); ok {
- return v.FieldByIndex(f.Index)
- }
- return Value{}
-}
-
-// FieldByNameFunc returns the struct field with a name
-// that satisfies the match function.
-// It panics if v's Kind is not struct.
-// It returns the zero Value if no field was found.
-func (v Value) FieldByNameFunc(match func(string) bool) Value {
- if f, ok := v.typ.FieldByNameFunc(match); ok {
- return v.FieldByIndex(f.Index)
- }
- return Value{}
-}
-
-// CanFloat reports whether Float can be used without panicking.
-func (v Value) CanFloat() bool {
- switch v.kind() {
- case Float32, Float64:
- return true
- default:
- return false
- }
-}
-
-// Float returns v's underlying value, as a float64.
-// It panics if v's Kind is not Float32 or Float64
-func (v Value) Float() float64 {
- k := v.kind()
- switch k {
- case Float32:
- return float64(*(*float32)(v.ptr))
- case Float64:
- return *(*float64)(v.ptr)
- }
- panic(&ValueError{"reflect.Value.Float", v.kind()})
-}
-
-var uint8Type = TypeOf(uint8(0)).(*rtype)
-
-// Index returns v's i'th element.
-// It panics if v's Kind is not Array, Slice, or String or i is out of range.
-func (v Value) Index(i int) Value {
- switch v.kind() {
- case Array:
- tt := (*arrayType)(unsafe.Pointer(v.typ))
- if uint(i) >= uint(tt.len) {
- panic("reflect: array index out of range")
- }
- typ := tt.elem
- offset := uintptr(i) * typ.size
-
- // Either flagIndir is set and v.ptr points at array,
- // or flagIndir is not set and v.ptr is the actual array data.
- // In the former case, we want v.ptr + offset.
- // In the latter case, we must be doing Index(0), so offset = 0,
- // so v.ptr + offset is still the correct address.
- val := add(v.ptr, offset, "same as &v[i], i < tt.len")
- fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
- return Value{typ, val, fl}
-
- case Slice:
- // Element flag same as Elem of Pointer.
- // Addressable, indirect, possibly read-only.
- s := (*unsafeheader.Slice)(v.ptr)
- if uint(i) >= uint(s.Len) {
- panic("reflect: slice index out of range")
- }
- tt := (*sliceType)(unsafe.Pointer(v.typ))
- typ := tt.elem
- val := arrayAt(s.Data, i, typ.size, "i < s.Len")
- fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
- return Value{typ, val, fl}
-
- case String:
- s := (*unsafeheader.String)(v.ptr)
- if uint(i) >= uint(s.Len) {
- panic("reflect: string index out of range")
- }
- p := arrayAt(s.Data, i, 1, "i < s.Len")
- fl := v.flag.ro() | flag(Uint8) | flagIndir
- return Value{uint8Type, p, fl}
- }
- panic(&ValueError{"reflect.Value.Index", v.kind()})
-}
-
-// CanInt reports whether Int can be used without panicking.
-func (v Value) CanInt() bool {
- switch v.kind() {
- case Int, Int8, Int16, Int32, Int64:
- return true
- default:
- return false
- }
-}
-
-// Int returns v's underlying value, as an int64.
-// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
-func (v Value) Int() int64 {
- k := v.kind()
- p := v.ptr
- switch k {
- case Int:
- return int64(*(*int)(p))
- case Int8:
- return int64(*(*int8)(p))
- case Int16:
- return int64(*(*int16)(p))
- case Int32:
- return int64(*(*int32)(p))
- case Int64:
- return *(*int64)(p)
- }
- panic(&ValueError{"reflect.Value.Int", v.kind()})
-}
-
-// CanInterface reports whether Interface can be used without panicking.
-func (v Value) CanInterface() bool {
- if v.flag == 0 {
- panic(&ValueError{"reflect.Value.CanInterface", Invalid})
- }
- return v.flag&flagRO == 0
-}
-
-// Interface returns v's current value as an interface{}.
-// It is equivalent to:
-// var i interface{} = (v's underlying value)
-// It panics if the Value was obtained by accessing
-// unexported struct fields.
-func (v Value) Interface() (i any) {
- return valueInterface(v, true)
-}
-
-func valueInterface(v Value, safe bool) any {
- if v.flag == 0 {
- panic(&ValueError{"reflect.Value.Interface", Invalid})
- }
- if safe && v.flag&flagRO != 0 {
- // Do not allow access to unexported values via Interface,
- // because they might be pointers that should not be
- // writable or methods or function that should not be callable.
- panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
- }
- if v.flag&flagMethod != 0 {
- v = makeMethodValue("Interface", v)
- }
-
- if v.kind() == Interface {
- // Special case: return the element inside the interface.
- // Empty interface has one layout, all interfaces with
- // methods have a second layout.
- if v.NumMethod() == 0 {
- return *(*any)(v.ptr)
- }
- return *(*interface {
- M()
- })(v.ptr)
- }
-
- // TODO: pass safe to packEface so we don't need to copy if safe==true?
- return packEface(v)
-}
-
-// InterfaceData returns a pair of unspecified uintptr values.
-// It panics if v's Kind is not Interface.
-//
-// In earlier versions of Go, this function returned the interface's
-// value as a uintptr pair. As of Go 1.4, the implementation of
-// interface values precludes any defined use of InterfaceData.
-//
-// Deprecated: The memory representation of interface values is not
-// compatible with InterfaceData.
-func (v Value) InterfaceData() [2]uintptr {
- v.mustBe(Interface)
- // We treat this as a read operation, so we allow
- // it even for unexported data, because the caller
- // has to import "unsafe" to turn it into something
- // that can be abused.
- // Interface value is always bigger than a word; assume flagIndir.
- return *(*[2]uintptr)(v.ptr)
-}
-
-// IsNil reports whether its argument v is nil. The argument must be
-// a chan, func, interface, map, pointer, or slice value; if it is
-// not, IsNil panics. Note that IsNil is not always equivalent to a
-// regular comparison with nil in Go. For example, if v was created
-// by calling ValueOf with an uninitialized interface variable i,
-// i==nil will be true but v.IsNil will panic as v will be the zero
-// Value.
-func (v Value) IsNil() bool {
- k := v.kind()
- switch k {
- case Chan, Func, Map, Pointer, UnsafePointer:
- if v.flag&flagMethod != 0 {
- return false
- }
- ptr := v.ptr
- if v.flag&flagIndir != 0 {
- ptr = *(*unsafe.Pointer)(ptr)
- }
- return ptr == nil
- case Interface, Slice:
- // Both interface and slice are nil if first word is 0.
- // Both are always bigger than a word; assume flagIndir.
- return *(*unsafe.Pointer)(v.ptr) == nil
- }
- panic(&ValueError{"reflect.Value.IsNil", v.kind()})
-}
-
-// IsValid reports whether v represents a value.
-// It returns false if v is the zero Value.
-// If IsValid returns false, all other methods except String panic.
-// Most functions and methods never return an invalid Value.
-// If one does, its documentation states the conditions explicitly.
-func (v Value) IsValid() bool {
- return v.flag != 0
-}
-
-// IsZero reports whether v is the zero value for its type.
-// It panics if the argument is invalid.
-func (v Value) IsZero() bool {
- switch v.kind() {
- case Bool:
- return !v.Bool()
- case Int, Int8, Int16, Int32, Int64:
- return v.Int() == 0
- case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- return v.Uint() == 0
- case Float32, Float64:
- return math.Float64bits(v.Float()) == 0
- case Complex64, Complex128:
- c := v.Complex()
- return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
- case Array:
- for i := 0; i < v.Len(); i++ {
- if !v.Index(i).IsZero() {
- return false
- }
- }
- return true
- case Chan, Func, Interface, Map, Pointer, Slice, UnsafePointer:
- return v.IsNil()
- case String:
- return v.Len() == 0
- case Struct:
- for i := 0; i < v.NumField(); i++ {
- if !v.Field(i).IsZero() {
- return false
- }
- }
- return true
- default:
- // This should never happens, but will act as a safeguard for
- // later, as a default value doesn't makes sense here.
- panic(&ValueError{"reflect.Value.IsZero", v.Kind()})
- }
-}
-
-// Kind returns v's Kind.
-// If v is the zero Value (IsValid returns false), Kind returns Invalid.
-func (v Value) Kind() Kind {
- return v.kind()
-}
-
-// Len returns v's length.
-// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
-func (v Value) Len() int {
- k := v.kind()
- switch k {
- case Array:
- tt := (*arrayType)(unsafe.Pointer(v.typ))
- return int(tt.len)
- case Chan:
- return chanlen(v.pointer())
- case Map:
- return maplen(v.pointer())
- case Slice:
- // Slice is bigger than a word; assume flagIndir.
- return (*unsafeheader.Slice)(v.ptr).Len
- case String:
- // String is bigger than a word; assume flagIndir.
- return (*unsafeheader.String)(v.ptr).Len
- }
- panic(&ValueError{"reflect.Value.Len", v.kind()})
-}
-
-var stringType = TypeOf("").(*rtype)
-
-// MapIndex returns the value associated with key in the map v.
-// It panics if v's Kind is not Map.
-// It returns the zero Value if key is not found in the map or if v represents a nil map.
-// As in Go, the key's value must be assignable to the map's key type.
-func (v Value) MapIndex(key Value) Value {
- v.mustBe(Map)
- tt := (*mapType)(unsafe.Pointer(v.typ))
-
- // Do not require key to be exported, so that DeepEqual
- // and other programs can use all the keys returned by
- // MapKeys as arguments to MapIndex. If either the map
- // or the key is unexported, though, the result will be
- // considered unexported. This is consistent with the
- // behavior for structs, which allow read but not write
- // of unexported fields.
-
- var e unsafe.Pointer
- if (tt.key == stringType || key.kind() == String) && tt.key == key.typ && tt.elem.size <= maxValSize {
- k := *(*string)(key.ptr)
- e = mapaccess_faststr(v.typ, v.pointer(), k)
- } else {
- key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
- var k unsafe.Pointer
- if key.flag&flagIndir != 0 {
- k = key.ptr
- } else {
- k = unsafe.Pointer(&key.ptr)
- }
- e = mapaccess(v.typ, v.pointer(), k)
- }
- if e == nil {
- return Value{}
- }
- typ := tt.elem
- fl := (v.flag | key.flag).ro()
- fl |= flag(typ.Kind())
- return copyVal(typ, fl, e)
-}
-
-// MapKeys returns a slice containing all the keys present in the map,
-// in unspecified order.
-// It panics if v's Kind is not Map.
-// It returns an empty slice if v represents a nil map.
-func (v Value) MapKeys() []Value {
- v.mustBe(Map)
- tt := (*mapType)(unsafe.Pointer(v.typ))
- keyType := tt.key
-
- fl := v.flag.ro() | flag(keyType.Kind())
-
- m := v.pointer()
- mlen := int(0)
- if m != nil {
- mlen = maplen(m)
- }
- var it hiter
- mapiterinit(v.typ, m, &it)
- a := make([]Value, mlen)
- var i int
- for i = 0; i < len(a); i++ {
- key := mapiterkey(&it)
- if key == nil {
- // Someone deleted an entry from the map since we
- // called maplen above. It's a data race, but nothing
- // we can do about it.
- break
- }
- a[i] = copyVal(keyType, fl, key)
- mapiternext(&it)
- }
- return a[:i]
-}
-
-// hiter's structure matches runtime.hiter's structure.
-// Having a clone here allows us to embed a map iterator
-// inside type MapIter so that MapIters can be re-used
-// without doing any allocations.
-type hiter struct {
- key unsafe.Pointer
- elem unsafe.Pointer
- t unsafe.Pointer
- h unsafe.Pointer
- buckets unsafe.Pointer
- bptr unsafe.Pointer
- overflow *[]unsafe.Pointer
- oldoverflow *[]unsafe.Pointer
- startBucket uintptr
- offset uint8
- wrapped bool
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
-}
-
-func (h *hiter) initialized() bool {
- return h.t != nil
-}
-
-// A MapIter is an iterator for ranging over a map.
-// See Value.MapRange.
-type MapIter struct {
- m Value
- hiter hiter
-}
-
-// Key returns the key of iter's current map entry.
-func (iter *MapIter) Key() Value {
- if !iter.hiter.initialized() {
- panic("MapIter.Key called before Next")
- }
- iterkey := mapiterkey(&iter.hiter)
- if iterkey == nil {
- panic("MapIter.Key called on exhausted iterator")
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ))
- ktype := t.key
- return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
-}
-
-// SetIterKey assigns to v the key of iter's current map entry.
-// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
-// As in Go, the key must be assignable to v's type.
-func (v Value) SetIterKey(iter *MapIter) {
- if !iter.hiter.initialized() {
- panic("reflect: Value.SetIterKey called before Next")
- }
- iterkey := mapiterkey(&iter.hiter)
- if iterkey == nil {
- panic("reflect: Value.SetIterKey called on exhausted iterator")
- }
-
- v.mustBeAssignable()
- var target unsafe.Pointer
- if v.kind() == Interface {
- target = v.ptr
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ))
- ktype := t.key
-
- key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
- key = key.assignTo("reflect.MapIter.SetKey", v.typ, target)
- typedmemmove(v.typ, v.ptr, key.ptr)
-}
-
-// Value returns the value of iter's current map entry.
-func (iter *MapIter) Value() Value {
- if !iter.hiter.initialized() {
- panic("MapIter.Value called before Next")
- }
- iterelem := mapiterelem(&iter.hiter)
- if iterelem == nil {
- panic("MapIter.Value called on exhausted iterator")
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ))
- vtype := t.elem
- return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
-}
-
-// SetIterValue assigns to v the value of iter's current map entry.
-// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
-// As in Go, the value must be assignable to v's type.
-func (v Value) SetIterValue(iter *MapIter) {
- if !iter.hiter.initialized() {
- panic("reflect: Value.SetIterValue called before Next")
- }
- iterelem := mapiterelem(&iter.hiter)
- if iterelem == nil {
- panic("reflect: Value.SetIterValue called on exhausted iterator")
- }
-
- v.mustBeAssignable()
- var target unsafe.Pointer
- if v.kind() == Interface {
- target = v.ptr
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ))
- vtype := t.elem
-
- elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
- elem = elem.assignTo("reflect.MapIter.SetValue", v.typ, target)
- typedmemmove(v.typ, v.ptr, elem.ptr)
-}
-
-// Next advances the map iterator and reports whether there is another
-// entry. It returns false when iter is exhausted; subsequent
-// calls to Key, Value, or Next will panic.
-func (iter *MapIter) Next() bool {
- if !iter.m.IsValid() {
- panic("MapIter.Next called on an iterator that does not have an associated map Value")
- }
- if !iter.hiter.initialized() {
- mapiterinit(iter.m.typ, iter.m.pointer(), &iter.hiter)
- } else {
- if mapiterkey(&iter.hiter) == nil {
- panic("MapIter.Next called on exhausted iterator")
- }
- mapiternext(&iter.hiter)
- }
- return mapiterkey(&iter.hiter) != nil
-}
-
-// Reset modifies iter to iterate over v.
-// It panics if v's Kind is not Map and v is not the zero Value.
-// Reset(Value{}) causes iter to not to refer to any map,
-// which may allow the previously iterated-over map to be garbage collected.
-func (iter *MapIter) Reset(v Value) {
- if v.IsValid() {
- v.mustBe(Map)
- }
- iter.m = v
- iter.hiter = hiter{}
-}
-
-// MapRange returns a range iterator for a map.
-// It panics if v's Kind is not Map.
-//
-// Call Next to advance the iterator, and Key/Value to access each entry.
-// Next returns false when the iterator is exhausted.
-// MapRange follows the same iteration semantics as a range statement.
-//
-// Example:
-//
-// iter := reflect.ValueOf(m).MapRange()
-// for iter.Next() {
-// k := iter.Key()
-// v := iter.Value()
-// ...
-// }
-//
-func (v Value) MapRange() *MapIter {
- v.mustBe(Map)
- return &MapIter{m: v}
-}
-
-// copyVal returns a Value containing the map key or value at ptr,
-// allocating a new variable as needed.
-func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value {
- if ifaceIndir(typ) {
- // Copy result so future changes to the map
- // won't change the underlying value.
- c := unsafe_New(typ)
- typedmemmove(typ, c, ptr)
- return Value{typ, c, fl | flagIndir}
- }
- return Value{typ, *(*unsafe.Pointer)(ptr), fl}
-}
-
-// Method returns a function value corresponding to v's i'th method.
-// The arguments to a Call on the returned function should not include
-// a receiver; the returned function will always use v as the receiver.
-// Method panics if i is out of range or if v is a nil interface value.
-func (v Value) Method(i int) Value {
- if v.typ == nil {
- panic(&ValueError{"reflect.Value.Method", Invalid})
- }
- if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
- panic("reflect: Method index out of range")
- }
- if v.typ.Kind() == Interface && v.IsNil() {
- panic("reflect: Method on nil interface value")
- }
- fl := v.flag.ro() | (v.flag & flagIndir)
- fl |= flag(Func)
- fl |= flag(i)<<flagMethodShift | flagMethod
- return Value{v.typ, v.ptr, fl}
-}
-
-// NumMethod returns the number of exported methods in the value's method set.
-func (v Value) NumMethod() int {
- if v.typ == nil {
- panic(&ValueError{"reflect.Value.NumMethod", Invalid})
- }
- if v.flag&flagMethod != 0 {
- return 0
- }
- return v.typ.NumMethod()
-}
-
-// MethodByName returns a function value corresponding to the method
-// of v with the given name.
-// The arguments to a Call on the returned function should not include
-// a receiver; the returned function will always use v as the receiver.
-// It returns the zero Value if no method was found.
-func (v Value) MethodByName(name string) Value {
- if v.typ == nil {
- panic(&ValueError{"reflect.Value.MethodByName", Invalid})
- }
- if v.flag&flagMethod != 0 {
- return Value{}
- }
- m, ok := v.typ.MethodByName(name)
- if !ok {
- return Value{}
- }
- return v.Method(m.Index)
-}
-
-// NumField returns the number of fields in the struct v.
-// It panics if v's Kind is not Struct.
-func (v Value) NumField() int {
- v.mustBe(Struct)
- tt := (*structType)(unsafe.Pointer(v.typ))
- return len(tt.fields)
-}
-
-// OverflowComplex reports whether the complex128 x cannot be represented by v's type.
-// It panics if v's Kind is not Complex64 or Complex128.
-func (v Value) OverflowComplex(x complex128) bool {
- k := v.kind()
- switch k {
- case Complex64:
- return overflowFloat32(real(x)) || overflowFloat32(imag(x))
- case Complex128:
- return false
- }
- panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
-}
-
-// OverflowFloat reports whether the float64 x cannot be represented by v's type.
-// It panics if v's Kind is not Float32 or Float64.
-func (v Value) OverflowFloat(x float64) bool {
- k := v.kind()
- switch k {
- case Float32:
- return overflowFloat32(x)
- case Float64:
- return false
- }
- panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
-}
-
-func overflowFloat32(x float64) bool {
- if x < 0 {
- x = -x
- }
- return math.MaxFloat32 < x && x <= math.MaxFloat64
-}
-
-// OverflowInt reports whether the int64 x cannot be represented by v's type.
-// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
-func (v Value) OverflowInt(x int64) bool {
- k := v.kind()
- switch k {
- case Int, Int8, Int16, Int32, Int64:
- bitSize := v.typ.size * 8
- trunc := (x << (64 - bitSize)) >> (64 - bitSize)
- return x != trunc
- }
- panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
-}
-
-// OverflowUint reports whether the uint64 x cannot be represented by v's type.
-// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
-func (v Value) OverflowUint(x uint64) bool {
- k := v.kind()
- switch k {
- case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
- bitSize := v.typ.size * 8
- trunc := (x << (64 - bitSize)) >> (64 - bitSize)
- return x != trunc
- }
- panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
-}
-
-//go:nocheckptr
-// This prevents inlining Value.Pointer when -d=checkptr is enabled,
-// which ensures cmd/compile can recognize unsafe.Pointer(v.Pointer())
-// and make an exception.
-
-// Pointer returns v's value as a uintptr.
-// It returns uintptr instead of unsafe.Pointer so that
-// code using reflect cannot obtain unsafe.Pointers
-// without importing the unsafe package explicitly.
-// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
-//
-// If v's Kind is Func, the returned pointer is an underlying
-// code pointer, but not necessarily enough to identify a
-// single function uniquely. The only guarantee is that the
-// result is zero if and only if v is a nil func Value.
-//
-// If v's Kind is Slice, the returned pointer is to the first
-// element of the slice. If the slice is nil the returned value
-// is 0. If the slice is empty but non-nil the return value is non-zero.
-//
-// It's preferred to use uintptr(Value.UnsafePointer()) to get the equivalent result.
-func (v Value) Pointer() uintptr {
- k := v.kind()
- switch k {
- case Pointer:
- if v.typ.ptrdata == 0 {
- val := *(*uintptr)(v.ptr)
- // Since it is a not-in-heap pointer, all pointers to the heap are
- // forbidden! See comment in Value.Elem and issue #48399.
- if !verifyNotInHeapPtr(val) {
- panic("reflect: reflect.Value.Pointer on an invalid notinheap pointer")
- }
- return val
- }
- fallthrough
- case Chan, Map, UnsafePointer:
- return uintptr(v.pointer())
- case Func:
- if v.flag&flagMethod != 0 {
- // As the doc comment says, the returned pointer is an
- // underlying code pointer but not necessarily enough to
- // identify a single function uniquely. All method expressions
- // created via reflect have the same underlying code pointer,
- // so their Pointers are equal. The function used here must
- // match the one used in makeMethodValue.
- return methodValueCallCodePtr()
- }
- p := v.pointer()
- // Non-nil func value points at data block.
- // First word of data block is actual code.
- if p != nil {
- p = *(*unsafe.Pointer)(p)
- }
- return uintptr(p)
-
- case Slice:
- return (*SliceHeader)(v.ptr).Data
- }
- panic(&ValueError{"reflect.Value.Pointer", v.kind()})
-}
-
-// Recv receives and returns a value from the channel v.
-// It panics if v's Kind is not Chan.
-// The receive blocks until a value is ready.
-// The boolean value ok is true if the value x corresponds to a send
-// on the channel, false if it is a zero value received because the channel is closed.
-func (v Value) Recv() (x Value, ok bool) {
- v.mustBe(Chan)
- v.mustBeExported()
- return v.recv(false)
-}
-
-// internal recv, possibly non-blocking (nb).
-// v is known to be a channel.
-func (v Value) recv(nb bool) (val Value, ok bool) {
- tt := (*chanType)(unsafe.Pointer(v.typ))
- if ChanDir(tt.dir)&RecvDir == 0 {
- panic("reflect: recv on send-only channel")
- }
- t := tt.elem
- val = Value{t, nil, flag(t.Kind())}
- var p unsafe.Pointer
- if ifaceIndir(t) {
- p = unsafe_New(t)
- val.ptr = p
- val.flag |= flagIndir
- } else {
- p = unsafe.Pointer(&val.ptr)
- }
- selected, ok := chanrecv(v.pointer(), nb, p)
- if !selected {
- val = Value{}
- }
- return
-}
-
-// Send sends x on the channel v.
-// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
-// As in Go, x's value must be assignable to the channel's element type.
-func (v Value) Send(x Value) {
- v.mustBe(Chan)
- v.mustBeExported()
- v.send(x, false)
-}
-
-// internal send, possibly non-blocking.
-// v is known to be a channel.
-func (v Value) send(x Value, nb bool) (selected bool) {
- tt := (*chanType)(unsafe.Pointer(v.typ))
- if ChanDir(tt.dir)&SendDir == 0 {
- panic("reflect: send on recv-only channel")
- }
- x.mustBeExported()
- x = x.assignTo("reflect.Value.Send", tt.elem, nil)
- var p unsafe.Pointer
- if x.flag&flagIndir != 0 {
- p = x.ptr
- } else {
- p = unsafe.Pointer(&x.ptr)
- }
- return chansend(v.pointer(), p, nb)
-}
-
-// Set assigns x to the value v.
-// It panics if CanSet returns false.
-// As in Go, x's value must be assignable to v's type.
-func (v Value) Set(x Value) {
- v.mustBeAssignable()
- x.mustBeExported() // do not let unexported x leak
- var target unsafe.Pointer
- if v.kind() == Interface {
- target = v.ptr
- }
- x = x.assignTo("reflect.Set", v.typ, target)
- if x.flag&flagIndir != 0 {
- if x.ptr == unsafe.Pointer(&zeroVal[0]) {
- typedmemclr(v.typ, v.ptr)
- } else {
- typedmemmove(v.typ, v.ptr, x.ptr)
- }
- } else {
- *(*unsafe.Pointer)(v.ptr) = x.ptr
- }
-}
-
-// SetBool sets v's underlying value.
-// It panics if v's Kind is not Bool or if CanSet() is false.
-func (v Value) SetBool(x bool) {
- v.mustBeAssignable()
- v.mustBe(Bool)
- *(*bool)(v.ptr) = x
-}
-
-// SetBytes sets v's underlying value.
-// It panics if v's underlying value is not a slice of bytes.
-func (v Value) SetBytes(x []byte) {
- v.mustBeAssignable()
- v.mustBe(Slice)
- if v.typ.Elem().Kind() != Uint8 {
- panic("reflect.Value.SetBytes of non-byte slice")
- }
- *(*[]byte)(v.ptr) = x
-}
-
-// setRunes sets v's underlying value.
-// It panics if v's underlying value is not a slice of runes (int32s).
-func (v Value) setRunes(x []rune) {
- v.mustBeAssignable()
- v.mustBe(Slice)
- if v.typ.Elem().Kind() != Int32 {
- panic("reflect.Value.setRunes of non-rune slice")
- }
- *(*[]rune)(v.ptr) = x
-}
-
-// SetComplex sets v's underlying value to x.
-// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
-func (v Value) SetComplex(x complex128) {
- v.mustBeAssignable()
- switch k := v.kind(); k {
- default:
- panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
- case Complex64:
- *(*complex64)(v.ptr) = complex64(x)
- case Complex128:
- *(*complex128)(v.ptr) = x
- }
-}
-
-// SetFloat sets v's underlying value to x.
-// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
-func (v Value) SetFloat(x float64) {
- v.mustBeAssignable()
- switch k := v.kind(); k {
- default:
- panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
- case Float32:
- *(*float32)(v.ptr) = float32(x)
- case Float64:
- *(*float64)(v.ptr) = x
- }
-}
-
-// SetInt sets v's underlying value to x.
-// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
-func (v Value) SetInt(x int64) {
- v.mustBeAssignable()
- switch k := v.kind(); k {
- default:
- panic(&ValueError{"reflect.Value.SetInt", v.kind()})
- case Int:
- *(*int)(v.ptr) = int(x)
- case Int8:
- *(*int8)(v.ptr) = int8(x)
- case Int16:
- *(*int16)(v.ptr) = int16(x)
- case Int32:
- *(*int32)(v.ptr) = int32(x)
- case Int64:
- *(*int64)(v.ptr) = x
- }
-}
-
-// SetLen sets v's length to n.
-// It panics if v's Kind is not Slice or if n is negative or
-// greater than the capacity of the slice.
-func (v Value) SetLen(n int) {
- v.mustBeAssignable()
- v.mustBe(Slice)
- s := (*unsafeheader.Slice)(v.ptr)
- if uint(n) > uint(s.Cap) {
- panic("reflect: slice length out of range in SetLen")
- }
- s.Len = n
-}
-
-// SetCap sets v's capacity to n.
-// It panics if v's Kind is not Slice or if n is smaller than the length or
-// greater than the capacity of the slice.
-func (v Value) SetCap(n int) {
- v.mustBeAssignable()
- v.mustBe(Slice)
- s := (*unsafeheader.Slice)(v.ptr)
- if n < s.Len || n > s.Cap {
- panic("reflect: slice capacity out of range in SetCap")
- }
- s.Cap = n
-}
-
-// SetMapIndex sets the element associated with key in the map v to elem.
-// It panics if v's Kind is not Map.
-// If elem is the zero Value, SetMapIndex deletes the key from the map.
-// Otherwise if v holds a nil map, SetMapIndex will panic.
-// As in Go, key's elem must be assignable to the map's key type,
-// and elem's value must be assignable to the map's elem type.
-func (v Value) SetMapIndex(key, elem Value) {
- v.mustBe(Map)
- v.mustBeExported()
- key.mustBeExported()
- tt := (*mapType)(unsafe.Pointer(v.typ))
-
- if (tt.key == stringType || key.kind() == String) && tt.key == key.typ && tt.elem.size <= maxValSize {
- k := *(*string)(key.ptr)
- if elem.typ == nil {
- mapdelete_faststr(v.typ, v.pointer(), k)
- return
- }
- elem.mustBeExported()
- elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
- var e unsafe.Pointer
- if elem.flag&flagIndir != 0 {
- e = elem.ptr
- } else {
- e = unsafe.Pointer(&elem.ptr)
- }
- mapassign_faststr(v.typ, v.pointer(), k, e)
- return
- }
-
- key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
- var k unsafe.Pointer
- if key.flag&flagIndir != 0 {
- k = key.ptr
- } else {
- k = unsafe.Pointer(&key.ptr)
- }
- if elem.typ == nil {
- mapdelete(v.typ, v.pointer(), k)
- return
- }
- elem.mustBeExported()
- elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
- var e unsafe.Pointer
- if elem.flag&flagIndir != 0 {
- e = elem.ptr
- } else {
- e = unsafe.Pointer(&elem.ptr)
- }
- mapassign(v.typ, v.pointer(), k, e)
-}
-
-// SetUint sets v's underlying value to x.
-// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
-func (v Value) SetUint(x uint64) {
- v.mustBeAssignable()
- switch k := v.kind(); k {
- default:
- panic(&ValueError{"reflect.Value.SetUint", v.kind()})
- case Uint:
- *(*uint)(v.ptr) = uint(x)
- case Uint8:
- *(*uint8)(v.ptr) = uint8(x)
- case Uint16:
- *(*uint16)(v.ptr) = uint16(x)
- case Uint32:
- *(*uint32)(v.ptr) = uint32(x)
- case Uint64:
- *(*uint64)(v.ptr) = x
- case Uintptr:
- *(*uintptr)(v.ptr) = uintptr(x)
- }
-}
-
-// SetPointer sets the unsafe.Pointer value v to x.
-// It panics if v's Kind is not UnsafePointer.
-func (v Value) SetPointer(x unsafe.Pointer) {
- v.mustBeAssignable()
- v.mustBe(UnsafePointer)
- *(*unsafe.Pointer)(v.ptr) = x
-}
-
-// SetString sets v's underlying value to x.
-// It panics if v's Kind is not String or if CanSet() is false.
-func (v Value) SetString(x string) {
- v.mustBeAssignable()
- v.mustBe(String)
- *(*string)(v.ptr) = x
-}
-
-// Slice returns v[i:j].
-// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
-// or if the indexes are out of bounds.
-func (v Value) Slice(i, j int) Value {
- var (
- cap int
- typ *sliceType
- base unsafe.Pointer
- )
- switch kind := v.kind(); kind {
- default:
- panic(&ValueError{"reflect.Value.Slice", v.kind()})
-
- case Array:
- if v.flag&flagAddr == 0 {
- panic("reflect.Value.Slice: slice of unaddressable array")
- }
- tt := (*arrayType)(unsafe.Pointer(v.typ))
- cap = int(tt.len)
- typ = (*sliceType)(unsafe.Pointer(tt.slice))
- base = v.ptr
-
- case Slice:
- typ = (*sliceType)(unsafe.Pointer(v.typ))
- s := (*unsafeheader.Slice)(v.ptr)
- base = s.Data
- cap = s.Cap
-
- case String:
- s := (*unsafeheader.String)(v.ptr)
- if i < 0 || j < i || j > s.Len {
- panic("reflect.Value.Slice: string slice index out of bounds")
- }
- var t unsafeheader.String
- if i < s.Len {
- t = unsafeheader.String{Data: arrayAt(s.Data, i, 1, "i < s.Len"), Len: j - i}
- }
- return Value{v.typ, unsafe.Pointer(&t), v.flag}
- }
-
- if i < 0 || j < i || j > cap {
- panic("reflect.Value.Slice: slice index out of bounds")
- }
-
- // Declare slice so that gc can see the base pointer in it.
- var x []unsafe.Pointer
-
- // Reinterpret as *unsafeheader.Slice to edit.
- s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
- s.Len = j - i
- s.Cap = cap - i
- if cap-i > 0 {
- s.Data = arrayAt(base, i, typ.elem.Size(), "i < cap")
- } else {
- // do not advance pointer, to avoid pointing beyond end of slice
- s.Data = base
- }
-
- fl := v.flag.ro() | flagIndir | flag(Slice)
- return Value{typ.common(), unsafe.Pointer(&x), fl}
-}
-
-// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
-// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
-// or if the indexes are out of bounds.
-func (v Value) Slice3(i, j, k int) Value {
- var (
- cap int
- typ *sliceType
- base unsafe.Pointer
- )
- switch kind := v.kind(); kind {
- default:
- panic(&ValueError{"reflect.Value.Slice3", v.kind()})
-
- case Array:
- if v.flag&flagAddr == 0 {
- panic("reflect.Value.Slice3: slice of unaddressable array")
- }
- tt := (*arrayType)(unsafe.Pointer(v.typ))
- cap = int(tt.len)
- typ = (*sliceType)(unsafe.Pointer(tt.slice))
- base = v.ptr
-
- case Slice:
- typ = (*sliceType)(unsafe.Pointer(v.typ))
- s := (*unsafeheader.Slice)(v.ptr)
- base = s.Data
- cap = s.Cap
- }
-
- if i < 0 || j < i || k < j || k > cap {
- panic("reflect.Value.Slice3: slice index out of bounds")
- }
-
- // Declare slice so that the garbage collector
- // can see the base pointer in it.
- var x []unsafe.Pointer
-
- // Reinterpret as *unsafeheader.Slice to edit.
- s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
- s.Len = j - i
- s.Cap = k - i
- if k-i > 0 {
- s.Data = arrayAt(base, i, typ.elem.Size(), "i < k <= cap")
- } else {
- // do not advance pointer, to avoid pointing beyond end of slice
- s.Data = base
- }
-
- fl := v.flag.ro() | flagIndir | flag(Slice)
- return Value{typ.common(), unsafe.Pointer(&x), fl}
-}
-
-// String returns the string v's underlying value, as a string.
-// String is a special case because of Go's String method convention.
-// Unlike the other getters, it does not panic if v's Kind is not String.
-// Instead, it returns a string of the form "<T value>" where T is v's type.
-// The fmt package treats Values specially. It does not call their String
-// method implicitly but instead prints the concrete values they hold.
-func (v Value) String() string {
- switch k := v.kind(); k {
- case Invalid:
- return "<invalid Value>"
- case String:
- return *(*string)(v.ptr)
- }
- // If you call String on a reflect.Value of other type, it's better to
- // print something than to panic. Useful in debugging.
- return "<" + v.Type().String() + " Value>"
-}
-
-// TryRecv attempts to receive a value from the channel v but will not block.
-// It panics if v's Kind is not Chan.
-// If the receive delivers a value, x is the transferred value and ok is true.
-// If the receive cannot finish without blocking, x is the zero Value and ok is false.
-// If the channel is closed, x is the zero value for the channel's element type and ok is false.
-func (v Value) TryRecv() (x Value, ok bool) {
- v.mustBe(Chan)
- v.mustBeExported()
- return v.recv(true)
-}
-
-// TrySend attempts to send x on the channel v but will not block.
-// It panics if v's Kind is not Chan.
-// It reports whether the value was sent.
-// As in Go, x's value must be assignable to the channel's element type.
-func (v Value) TrySend(x Value) bool {
- v.mustBe(Chan)
- v.mustBeExported()
- return v.send(x, true)
-}
-
-// Type returns v's type.
-func (v Value) Type() Type {
- f := v.flag
- if f == 0 {
- panic(&ValueError{"reflect.Value.Type", Invalid})
- }
- if f&flagMethod == 0 {
- // Easy case
- return v.typ
- }
-
- // Method value.
- // v.typ describes the receiver, not the method type.
- i := int(v.flag) >> flagMethodShift
- if v.typ.Kind() == Interface {
- // Method on interface.
- tt := (*interfaceType)(unsafe.Pointer(v.typ))
- if uint(i) >= uint(len(tt.methods)) {
- panic("reflect: internal error: invalid method index")
- }
- m := &tt.methods[i]
- return v.typ.typeOff(m.typ)
- }
- // Method on concrete type.
- ms := v.typ.exportedMethods()
- if uint(i) >= uint(len(ms)) {
- panic("reflect: internal error: invalid method index")
- }
- m := ms[i]
- return v.typ.typeOff(m.mtyp)
-}
-
-// CanUint reports whether Uint can be used without panicking.
-func (v Value) CanUint() bool {
- switch v.kind() {
- case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- return true
- default:
- return false
- }
-}
-
-// Uint returns v's underlying value, as a uint64.
-// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
-func (v Value) Uint() uint64 {
- k := v.kind()
- p := v.ptr
- switch k {
- case Uint:
- return uint64(*(*uint)(p))
- case Uint8:
- return uint64(*(*uint8)(p))
- case Uint16:
- return uint64(*(*uint16)(p))
- case Uint32:
- return uint64(*(*uint32)(p))
- case Uint64:
- return *(*uint64)(p)
- case Uintptr:
- return uint64(*(*uintptr)(p))
- }
- panic(&ValueError{"reflect.Value.Uint", v.kind()})
-}
-
-//go:nocheckptr
-// This prevents inlining Value.UnsafeAddr when -d=checkptr is enabled,
-// which ensures cmd/compile can recognize unsafe.Pointer(v.UnsafeAddr())
-// and make an exception.
-
-// UnsafeAddr returns a pointer to v's data, as a uintptr.
-// It is for advanced clients that also import the "unsafe" package.
-// It panics if v is not addressable.
-//
-// It's preferred to use uintptr(Value.Addr().UnsafePointer()) to get the equivalent result.
-func (v Value) UnsafeAddr() uintptr {
- if v.typ == nil {
- panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid})
- }
- if v.flag&flagAddr == 0 {
- panic("reflect.Value.UnsafeAddr of unaddressable value")
- }
- return uintptr(v.ptr)
-}
-
-// UnsafePointer returns v's value as a unsafe.Pointer.
-// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
-//
-// If v's Kind is Func, the returned pointer is an underlying
-// code pointer, but not necessarily enough to identify a
-// single function uniquely. The only guarantee is that the
-// result is zero if and only if v is a nil func Value.
-//
-// If v's Kind is Slice, the returned pointer is to the first
-// element of the slice. If the slice is nil the returned value
-// is nil. If the slice is empty but non-nil the return value is non-nil.
-func (v Value) UnsafePointer() unsafe.Pointer {
- k := v.kind()
- switch k {
- case Pointer:
- if v.typ.ptrdata == 0 {
- // Since it is a not-in-heap pointer, all pointers to the heap are
- // forbidden! See comment in Value.Elem and issue #48399.
- if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) {
- panic("reflect: reflect.Value.UnsafePointer on an invalid notinheap pointer")
- }
- return *(*unsafe.Pointer)(v.ptr)
- }
- fallthrough
- case Chan, Map, UnsafePointer:
- return v.pointer()
- case Func:
- if v.flag&flagMethod != 0 {
- // As the doc comment says, the returned pointer is an
- // underlying code pointer but not necessarily enough to
- // identify a single function uniquely. All method expressions
- // created via reflect have the same underlying code pointer,
- // so their Pointers are equal. The function used here must
- // match the one used in makeMethodValue.
- code := methodValueCallCodePtr()
- return *(*unsafe.Pointer)(unsafe.Pointer(&code))
- }
- p := v.pointer()
- // Non-nil func value points at data block.
- // First word of data block is actual code.
- if p != nil {
- p = *(*unsafe.Pointer)(p)
- }
- return p
-
- case Slice:
- return (*unsafeheader.Slice)(v.ptr).Data
- }
- panic(&ValueError{"reflect.Value.UnsafePointer", v.kind()})
-}
-
-// StringHeader is the runtime representation of a string.
-// It cannot be used safely or portably and its representation may
-// change in a later release.
-// Moreover, the Data field is not sufficient to guarantee the data
-// it references will not be garbage collected, so programs must keep
-// a separate, correctly typed pointer to the underlying data.
-type StringHeader struct {
- Data uintptr
- Len int
-}
-
-// SliceHeader is the runtime representation of a slice.
-// It cannot be used safely or portably and its representation may
-// change in a later release.
-// Moreover, the Data field is not sufficient to guarantee the data
-// it references will not be garbage collected, so programs must keep
-// a separate, correctly typed pointer to the underlying data.
-type SliceHeader struct {
- Data uintptr
- Len int
- Cap int
-}
-
-func typesMustMatch(what string, t1, t2 Type) {
- if t1 != t2 {
- panic(what + ": " + t1.String() + " != " + t2.String())
- }
-}
-
-// arrayAt returns the i-th element of p,
-// an array whose elements are eltSize bytes wide.
-// The array pointed at by p must have at least i+1 elements:
-// it is invalid (but impossible to check here) to pass i >= len,
-// because then the result will point outside the array.
-// whySafe must explain why i < len. (Passing "i < len" is fine;
-// the benefit is to surface this assumption at the call site.)
-func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
- return add(p, uintptr(i)*eltSize, "i < len")
-}
-
-// grow grows the slice s so that it can hold extra more values, allocating
-// more capacity if needed. It also returns the old and new slice lengths.
-func grow(s Value, extra int) (Value, int, int) {
- i0 := s.Len()
- i1 := i0 + extra
- if i1 < i0 {
- panic("reflect.Append: slice overflow")
- }
- m := s.Cap()
- if i1 <= m {
- return s.Slice(0, i1), i0, i1
- }
- if m == 0 {
- m = extra
- } else {
- const threshold = 256
- for m < i1 {
- if i0 < threshold {
- m += m
- } else {
- m += (m + 3*threshold) / 4
- }
- }
- }
- t := MakeSlice(s.Type(), i1, m)
- Copy(t, s)
- return t, i0, i1
-}
-
-// Append appends the values x to a slice s and returns the resulting slice.
-// As in Go, each x's value must be assignable to the slice's element type.
-func Append(s Value, x ...Value) Value {
- s.mustBe(Slice)
- s, i0, i1 := grow(s, len(x))
- for i, j := i0, 0; i < i1; i, j = i+1, j+1 {
- s.Index(i).Set(x[j])
- }
- return s
-}
-
-// AppendSlice appends a slice t to a slice s and returns the resulting slice.
-// The slices s and t must have the same element type.
-func AppendSlice(s, t Value) Value {
- s.mustBe(Slice)
- t.mustBe(Slice)
- typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
- s, i0, i1 := grow(s, t.Len())
- Copy(s.Slice(i0, i1), t)
- return s
-}
-
-// Copy copies the contents of src into dst until either
-// dst has been filled or src has been exhausted.
-// It returns the number of elements copied.
-// Dst and src each must have kind Slice or Array, and
-// dst and src must have the same element type.
-//
-// As a special case, src can have kind String if the element type of dst is kind Uint8.
-func Copy(dst, src Value) int {
- dk := dst.kind()
- if dk != Array && dk != Slice {
- panic(&ValueError{"reflect.Copy", dk})
- }
- if dk == Array {
- dst.mustBeAssignable()
- }
- dst.mustBeExported()
-
- sk := src.kind()
- var stringCopy bool
- if sk != Array && sk != Slice {
- stringCopy = sk == String && dst.typ.Elem().Kind() == Uint8
- if !stringCopy {
- panic(&ValueError{"reflect.Copy", sk})
- }
- }
- src.mustBeExported()
-
- de := dst.typ.Elem()
- if !stringCopy {
- se := src.typ.Elem()
- typesMustMatch("reflect.Copy", de, se)
- }
-
- var ds, ss unsafeheader.Slice
- if dk == Array {
- ds.Data = dst.ptr
- ds.Len = dst.Len()
- ds.Cap = ds.Len
- } else {
- ds = *(*unsafeheader.Slice)(dst.ptr)
- }
- if sk == Array {
- ss.Data = src.ptr
- ss.Len = src.Len()
- ss.Cap = ss.Len
- } else if sk == Slice {
- ss = *(*unsafeheader.Slice)(src.ptr)
- } else {
- sh := *(*unsafeheader.String)(src.ptr)
- ss.Data = sh.Data
- ss.Len = sh.Len
- ss.Cap = sh.Len
- }
-
- return typedslicecopy(de.common(), ds, ss)
-}
-
-// A runtimeSelect is a single case passed to rselect.
-// This must match ../runtime/select.go:/runtimeSelect
-type runtimeSelect struct {
- dir SelectDir // SelectSend, SelectRecv or SelectDefault
- typ *rtype // channel type
- ch unsafe.Pointer // channel
- val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
-}
-
-// rselect runs a select. It returns the index of the chosen case.
-// If the case was a receive, val is filled in with the received value.
-// The conventional OK bool indicates whether the receive corresponds
-// to a sent value.
-//go:noescape
-func rselect([]runtimeSelect) (chosen int, recvOK bool)
-
-// A SelectDir describes the communication direction of a select case.
-type SelectDir int
-
-// NOTE: These values must match ../runtime/select.go:/selectDir.
-
-const (
- _ SelectDir = iota
- SelectSend // case Chan <- Send
- SelectRecv // case <-Chan:
- SelectDefault // default
-)
-
-// A SelectCase describes a single case in a select operation.
-// The kind of case depends on Dir, the communication direction.
-//
-// If Dir is SelectDefault, the case represents a default case.
-// Chan and Send must be zero Values.
-//
-// If Dir is SelectSend, the case represents a send operation.
-// Normally Chan's underlying value must be a channel, and Send's underlying value must be
-// assignable to the channel's element type. As a special case, if Chan is a zero Value,
-// then the case is ignored, and the field Send will also be ignored and may be either zero
-// or non-zero.
-//
-// If Dir is SelectRecv, the case represents a receive operation.
-// Normally Chan's underlying value must be a channel and Send must be a zero Value.
-// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
-// When a receive operation is selected, the received Value is returned by Select.
-//
-type SelectCase struct {
- Dir SelectDir // direction of case
- Chan Value // channel to use (for send or receive)
- Send Value // value to send (for send)
-}
-
-// Select executes a select operation described by the list of cases.
-// Like the Go select statement, it blocks until at least one of the cases
-// can proceed, makes a uniform pseudo-random choice,
-// and then executes that case. It returns the index of the chosen case
-// and, if that case was a receive operation, the value received and a
-// boolean indicating whether the value corresponds to a send on the channel
-// (as opposed to a zero value received because the channel is closed).
-// Select supports a maximum of 65536 cases.
-func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
- if len(cases) > 65536 {
- panic("reflect.Select: too many cases (max 65536)")
- }
- // NOTE: Do not trust that caller is not modifying cases data underfoot.
- // The range is safe because the caller cannot modify our copy of the len
- // and each iteration makes its own copy of the value c.
- var runcases []runtimeSelect
- if len(cases) > 4 {
- // Slice is heap allocated due to runtime dependent capacity.
- runcases = make([]runtimeSelect, len(cases))
- } else {
- // Slice can be stack allocated due to constant capacity.
- runcases = make([]runtimeSelect, len(cases), 4)
- }
-
- haveDefault := false
- for i, c := range cases {
- rc := &runcases[i]
- rc.dir = c.Dir
- switch c.Dir {
- default:
- panic("reflect.Select: invalid Dir")
-
- case SelectDefault: // default
- if haveDefault {
- panic("reflect.Select: multiple default cases")
- }
- haveDefault = true
- if c.Chan.IsValid() {
- panic("reflect.Select: default case has Chan value")
- }
- if c.Send.IsValid() {
- panic("reflect.Select: default case has Send value")
- }
-
- case SelectSend:
- ch := c.Chan
- if !ch.IsValid() {
- break
- }
- ch.mustBe(Chan)
- ch.mustBeExported()
- tt := (*chanType)(unsafe.Pointer(ch.typ))
- if ChanDir(tt.dir)&SendDir == 0 {
- panic("reflect.Select: SendDir case using recv-only channel")
- }
- rc.ch = ch.pointer()
- rc.typ = &tt.rtype
- v := c.Send
- if !v.IsValid() {
- panic("reflect.Select: SendDir case missing Send value")
- }
- v.mustBeExported()
- v = v.assignTo("reflect.Select", tt.elem, nil)
- if v.flag&flagIndir != 0 {
- rc.val = v.ptr
- } else {
- rc.val = unsafe.Pointer(&v.ptr)
- }
-
- case SelectRecv:
- if c.Send.IsValid() {
- panic("reflect.Select: RecvDir case has Send value")
- }
- ch := c.Chan
- if !ch.IsValid() {
- break
- }
- ch.mustBe(Chan)
- ch.mustBeExported()
- tt := (*chanType)(unsafe.Pointer(ch.typ))
- if ChanDir(tt.dir)&RecvDir == 0 {
- panic("reflect.Select: RecvDir case using send-only channel")
- }
- rc.ch = ch.pointer()
- rc.typ = &tt.rtype
- rc.val = unsafe_New(tt.elem)
- }
- }
-
- chosen, recvOK = rselect(runcases)
- if runcases[chosen].dir == SelectRecv {
- tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
- t := tt.elem
- p := runcases[chosen].val
- fl := flag(t.Kind())
- if ifaceIndir(t) {
- recv = Value{t, p, fl | flagIndir}
- } else {
- recv = Value{t, *(*unsafe.Pointer)(p), fl}
- }
- }
- return chosen, recv, recvOK
-}
-
-/*
- * constructors
- */
-
-// implemented in package runtime
-func unsafe_New(*rtype) unsafe.Pointer
-func unsafe_NewArray(*rtype, int) unsafe.Pointer
-
-// MakeSlice creates a new zero-initialized slice value
-// for the specified slice type, length, and capacity.
-func MakeSlice(typ Type, len, cap int) Value {
- if typ.Kind() != Slice {
- panic("reflect.MakeSlice of non-slice type")
- }
- if len < 0 {
- panic("reflect.MakeSlice: negative len")
- }
- if cap < 0 {
- panic("reflect.MakeSlice: negative cap")
- }
- if len > cap {
- panic("reflect.MakeSlice: len > cap")
- }
-
- s := unsafeheader.Slice{Data: unsafe_NewArray(typ.Elem().(*rtype), cap), Len: len, Cap: cap}
- return Value{typ.(*rtype), unsafe.Pointer(&s), flagIndir | flag(Slice)}
-}
-
-// MakeChan creates a new channel with the specified type and buffer size.
-func MakeChan(typ Type, buffer int) Value {
- if typ.Kind() != Chan {
- panic("reflect.MakeChan of non-chan type")
- }
- if buffer < 0 {
- panic("reflect.MakeChan: negative buffer size")
- }
- if typ.ChanDir() != BothDir {
- panic("reflect.MakeChan: unidirectional channel type")
- }
- t := typ.(*rtype)
- ch := makechan(t, buffer)
- return Value{t, ch, flag(Chan)}
-}
-
-// MakeMap creates a new map with the specified type.
-func MakeMap(typ Type) Value {
- return MakeMapWithSize(typ, 0)
-}
-
-// MakeMapWithSize creates a new map with the specified type
-// and initial space for approximately n elements.
-func MakeMapWithSize(typ Type, n int) Value {
- if typ.Kind() != Map {
- panic("reflect.MakeMapWithSize of non-map type")
- }
- t := typ.(*rtype)
- m := makemap(t, n)
- return Value{t, m, flag(Map)}
-}
-
-// Indirect returns the value that v points to.
-// If v is a nil pointer, Indirect returns a zero Value.
-// If v is not a pointer, Indirect returns v.
-func Indirect(v Value) Value {
- if v.Kind() != Pointer {
- return v
- }
- return v.Elem()
-}
-
-// ValueOf returns a new Value initialized to the concrete value
-// stored in the interface i. ValueOf(nil) returns the zero Value.
-func ValueOf(i any) Value {
- if i == nil {
- return Value{}
- }
-
- // TODO: Maybe allow contents of a Value to live on the stack.
- // For now we make the contents always escape to the heap. It
- // makes life easier in a few places (see chanrecv/mapassign
- // comment below).
- escapes(i)
-
- return unpackEface(i)
-}
-
-// Zero returns a Value representing the zero value for the specified type.
-// The result is different from the zero value of the Value struct,
-// which represents no value at all.
-// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
-// The returned value is neither addressable nor settable.
-func Zero(typ Type) Value {
- if typ == nil {
- panic("reflect: Zero(nil)")
- }
- t := typ.(*rtype)
- fl := flag(t.Kind())
- if ifaceIndir(t) {
- var p unsafe.Pointer
- if t.size <= maxZero {
- p = unsafe.Pointer(&zeroVal[0])
- } else {
- p = unsafe_New(t)
- }
- return Value{t, p, fl | flagIndir}
- }
- return Value{t, nil, fl}
-}
-
-// must match declarations in runtime/map.go.
-const maxZero = 1024
-
-//go:linkname zeroVal runtime.zeroVal
-var zeroVal [maxZero]byte
-
-// New returns a Value representing a pointer to a new zero value
-// for the specified type. That is, the returned Value's Type is PointerTo(typ).
-func New(typ Type) Value {
- if typ == nil {
- panic("reflect: New(nil)")
- }
- t := typ.(*rtype)
- pt := t.ptrTo()
- if ifaceIndir(pt) {
- // This is a pointer to a go:notinheap type.
- panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)")
- }
- ptr := unsafe_New(t)
- fl := flag(Pointer)
- return Value{pt, ptr, fl}
-}
-
-// NewAt returns a Value representing a pointer to a value of the
-// specified type, using p as that pointer.
-func NewAt(typ Type, p unsafe.Pointer) Value {
- fl := flag(Pointer)
- t := typ.(*rtype)
- return Value{t.ptrTo(), p, fl}
-}
-
-// assignTo returns a value v that can be assigned directly to typ.
-// It panics if v is not assignable to typ.
-// For a conversion to an interface type, target is a suggested scratch space to use.
-// target must be initialized memory (or nil).
-func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
- if v.flag&flagMethod != 0 {
- v = makeMethodValue(context, v)
- }
-
- switch {
- case directlyAssignable(dst, v.typ):
- // Overwrite type so that they match.
- // Same memory layout, so no harm done.
- fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
- fl |= flag(dst.Kind())
- return Value{dst, v.ptr, fl}
-
- case implements(dst, v.typ):
- if target == nil {
- target = unsafe_New(dst)
- }
- if v.Kind() == Interface && v.IsNil() {
- // A nil ReadWriter passed to nil Reader is OK,
- // but using ifaceE2I below will panic.
- // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
- return Value{dst, nil, flag(Interface)}
- }
- x := valueInterface(v, false)
- if dst.NumMethod() == 0 {
- *(*any)(target) = x
- } else {
- ifaceE2I(dst, x, target)
- }
- return Value{dst, target, flagIndir | flag(Interface)}
- }
-
- // Failed.
- panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
-}
-
-// Convert returns the value v converted to type t.
-// If the usual Go conversion rules do not allow conversion
-// of the value v to type t, or if converting v to type t panics, Convert panics.
-func (v Value) Convert(t Type) Value {
- if v.flag&flagMethod != 0 {
- v = makeMethodValue("Convert", v)
- }
- op := convertOp(t.common(), v.typ)
- if op == nil {
- panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
- }
- return op(v, t)
-}
-
-// CanConvert reports whether the value v can be converted to type t.
-// If v.CanConvert(t) returns true then v.Convert(t) will not panic.
-func (v Value) CanConvert(t Type) bool {
- vt := v.Type()
- if !vt.ConvertibleTo(t) {
- return false
- }
- // Currently the only conversion that is OK in terms of type
- // but that can panic depending on the value is converting
- // from slice to pointer-to-array.
- if vt.Kind() == Slice && t.Kind() == Pointer && t.Elem().Kind() == Array {
- n := t.Elem().Len()
- if n > v.Len() {
- return false
- }
- }
- return true
-}
-
-// convertOp returns the function to convert a value of type src
-// to a value of type dst. If the conversion is illegal, convertOp returns nil.
-func convertOp(dst, src *rtype) func(Value, Type) Value {
- switch src.Kind() {
- case Int, Int8, Int16, Int32, Int64:
- switch dst.Kind() {
- case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- return cvtInt
- case Float32, Float64:
- return cvtIntFloat
- case String:
- return cvtIntString
- }
-
- case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- switch dst.Kind() {
- case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- return cvtUint
- case Float32, Float64:
- return cvtUintFloat
- case String:
- return cvtUintString
- }
-
- case Float32, Float64:
- switch dst.Kind() {
- case Int, Int8, Int16, Int32, Int64:
- return cvtFloatInt
- case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- return cvtFloatUint
- case Float32, Float64:
- return cvtFloat
- }
-
- case Complex64, Complex128:
- switch dst.Kind() {
- case Complex64, Complex128:
- return cvtComplex
- }
-
- case String:
- if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
- switch dst.Elem().Kind() {
- case Uint8:
- return cvtStringBytes
- case Int32:
- return cvtStringRunes
- }
- }
-
- case Slice:
- if dst.Kind() == String && src.Elem().PkgPath() == "" {
- switch src.Elem().Kind() {
- case Uint8:
- return cvtBytesString
- case Int32:
- return cvtRunesString
- }
- }
- // "x is a slice, T is a pointer-to-array type,
- // and the slice and array types have identical element types."
- if dst.Kind() == Pointer && dst.Elem().Kind() == Array && src.Elem() == dst.Elem().Elem() {
- return cvtSliceArrayPtr
- }
-
- case Chan:
- if dst.Kind() == Chan && specialChannelAssignability(dst, src) {
- return cvtDirect
- }
- }
-
- // dst and src have same underlying type.
- if haveIdenticalUnderlyingType(dst, src, false) {
- return cvtDirect
- }
-
- // dst and src are non-defined pointer types with same underlying base type.
- if dst.Kind() == Pointer && dst.Name() == "" &&
- src.Kind() == Pointer && src.Name() == "" &&
- haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common(), false) {
- return cvtDirect
- }
-
- if implements(dst, src) {
- if src.Kind() == Interface {
- return cvtI2I
- }
- return cvtT2I
- }
-
- return nil
-}
-
-// makeInt returns a Value of type t equal to bits (possibly truncated),
-// where t is a signed or unsigned int type.
-func makeInt(f flag, bits uint64, t Type) Value {
- typ := t.common()
- ptr := unsafe_New(typ)
- switch typ.size {
- case 1:
- *(*uint8)(ptr) = uint8(bits)
- case 2:
- *(*uint16)(ptr) = uint16(bits)
- case 4:
- *(*uint32)(ptr) = uint32(bits)
- case 8:
- *(*uint64)(ptr) = bits
- }
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
-}
-
-// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
-// where t is a float32 or float64 type.
-func makeFloat(f flag, v float64, t Type) Value {
- typ := t.common()
- ptr := unsafe_New(typ)
- switch typ.size {
- case 4:
- *(*float32)(ptr) = float32(v)
- case 8:
- *(*float64)(ptr) = v
- }
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
-}
-
-// makeFloat returns a Value of type t equal to v, where t is a float32 type.
-func makeFloat32(f flag, v float32, t Type) Value {
- typ := t.common()
- ptr := unsafe_New(typ)
- *(*float32)(ptr) = v
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
-}
-
-// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
-// where t is a complex64 or complex128 type.
-func makeComplex(f flag, v complex128, t Type) Value {
- typ := t.common()
- ptr := unsafe_New(typ)
- switch typ.size {
- case 8:
- *(*complex64)(ptr) = complex64(v)
- case 16:
- *(*complex128)(ptr) = v
- }
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
-}
-
-func makeString(f flag, v string, t Type) Value {
- ret := New(t).Elem()
- ret.SetString(v)
- ret.flag = ret.flag&^flagAddr | f
- return ret
-}
-
-func makeBytes(f flag, v []byte, t Type) Value {
- ret := New(t).Elem()
- ret.SetBytes(v)
- ret.flag = ret.flag&^flagAddr | f
- return ret
-}
-
-func makeRunes(f flag, v []rune, t Type) Value {
- ret := New(t).Elem()
- ret.setRunes(v)
- ret.flag = ret.flag&^flagAddr | f
- return ret
-}
-
-// These conversion functions are returned by convertOp
-// for classes of conversions. For example, the first function, cvtInt,
-// takes any value v of signed int type and returns the value converted
-// to type t, where t is any signed or unsigned int type.
-
-// convertOp: intXX -> [u]intXX
-func cvtInt(v Value, t Type) Value {
- return makeInt(v.flag.ro(), uint64(v.Int()), t)
-}
-
-// convertOp: uintXX -> [u]intXX
-func cvtUint(v Value, t Type) Value {
- return makeInt(v.flag.ro(), v.Uint(), t)
-}
-
-// convertOp: floatXX -> intXX
-func cvtFloatInt(v Value, t Type) Value {
- return makeInt(v.flag.ro(), uint64(int64(v.Float())), t)
-}
-
-// convertOp: floatXX -> uintXX
-func cvtFloatUint(v Value, t Type) Value {
- return makeInt(v.flag.ro(), uint64(v.Float()), t)
-}
-
-// convertOp: intXX -> floatXX
-func cvtIntFloat(v Value, t Type) Value {
- return makeFloat(v.flag.ro(), float64(v.Int()), t)
-}
-
-// convertOp: uintXX -> floatXX
-func cvtUintFloat(v Value, t Type) Value {
- return makeFloat(v.flag.ro(), float64(v.Uint()), t)
-}
-
-// convertOp: floatXX -> floatXX
-func cvtFloat(v Value, t Type) Value {
- if v.Type().Kind() == Float32 && t.Kind() == Float32 {
- // Don't do any conversion if both types have underlying type float32.
- // This avoids converting to float64 and back, which will
- // convert a signaling NaN to a quiet NaN. See issue 36400.
- return makeFloat32(v.flag.ro(), *(*float32)(v.ptr), t)
- }
- return makeFloat(v.flag.ro(), v.Float(), t)
-}
-
-// convertOp: complexXX -> complexXX
-func cvtComplex(v Value, t Type) Value {
- return makeComplex(v.flag.ro(), v.Complex(), t)
-}
-
-// convertOp: intXX -> string
-func cvtIntString(v Value, t Type) Value {
- s := "\uFFFD"
- if x := v.Int(); int64(rune(x)) == x {
- s = string(rune(x))
- }
- return makeString(v.flag.ro(), s, t)
-}
-
-// convertOp: uintXX -> string
-func cvtUintString(v Value, t Type) Value {
- s := "\uFFFD"
- if x := v.Uint(); uint64(rune(x)) == x {
- s = string(rune(x))
- }
- return makeString(v.flag.ro(), s, t)
-}
-
-// convertOp: []byte -> string
-func cvtBytesString(v Value, t Type) Value {
- return makeString(v.flag.ro(), string(v.Bytes()), t)
-}
-
-// convertOp: string -> []byte
-func cvtStringBytes(v Value, t Type) Value {
- return makeBytes(v.flag.ro(), []byte(v.String()), t)
-}
-
-// convertOp: []rune -> string
-func cvtRunesString(v Value, t Type) Value {
- return makeString(v.flag.ro(), string(v.runes()), t)
-}
-
-// convertOp: string -> []rune
-func cvtStringRunes(v Value, t Type) Value {
- return makeRunes(v.flag.ro(), []rune(v.String()), t)
-}
-
-// convertOp: []T -> *[N]T
-func cvtSliceArrayPtr(v Value, t Type) Value {
- n := t.Elem().Len()
- if n > v.Len() {
- panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to pointer to array with length " + itoa.Itoa(n))
- }
- h := (*unsafeheader.Slice)(v.ptr)
- return Value{t.common(), h.Data, v.flag&^(flagIndir|flagAddr|flagKindMask) | flag(Pointer)}
-}
-
-// convertOp: direct copy
-func cvtDirect(v Value, typ Type) Value {
- f := v.flag
- t := typ.common()
- ptr := v.ptr
- if f&flagAddr != 0 {
- // indirect, mutable word - make a copy
- c := unsafe_New(t)
- typedmemmove(t, c, ptr)
- ptr = c
- f &^= flagAddr
- }
- return Value{t, ptr, v.flag.ro() | f} // v.flag.ro()|f == f?
-}
-
-// convertOp: concrete -> interface
-func cvtT2I(v Value, typ Type) Value {
- target := unsafe_New(typ.common())
- x := valueInterface(v, false)
- if typ.NumMethod() == 0 {
- *(*any)(target) = x
- } else {
- ifaceE2I(typ.(*rtype), x, target)
- }
- return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)}
-}
-
-// convertOp: interface -> interface
-func cvtI2I(v Value, typ Type) Value {
- if v.IsNil() {
- ret := Zero(typ)
- ret.flag |= v.flag.ro()
- return ret
- }
- return cvtT2I(v.Elem(), typ)
-}
-
-// implemented in ../runtime
-func chancap(ch unsafe.Pointer) int
-func chanclose(ch unsafe.Pointer)
-func chanlen(ch unsafe.Pointer) int
-
-// Note: some of the noescape annotations below are technically a lie,
-// but safe in the context of this package. Functions like chansend
-// and mapassign don't escape the referent, but may escape anything
-// the referent points to (they do shallow copies of the referent).
-// It is safe in this package because the referent may only point
-// to something a Value may point to, and that is always in the heap
-// (due to the escapes() call in ValueOf).
-
-//go:noescape
-func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, received bool)
-
-//go:noescape
-func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
-
-func makechan(typ *rtype, size int) (ch unsafe.Pointer)
-func makemap(t *rtype, cap int) (m unsafe.Pointer)
-
-//go:noescape
-func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
-
-//go:noescape
-func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
-
-//go:noescape
-func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
-
-//go:noescape
-func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
-
-//go:noescape
-func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
-
-//go:noescape
-func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
-
-//go:noescape
-func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
-
-//go:noescape
-func mapiterkey(it *hiter) (key unsafe.Pointer)
-
-//go:noescape
-func mapiterelem(it *hiter) (elem unsafe.Pointer)
-
-//go:noescape
-func mapiternext(it *hiter)
-
-//go:noescape
-func maplen(m unsafe.Pointer) int
-
-// call calls fn with "stackArgsSize" bytes of stack arguments laid out
-// at stackArgs and register arguments laid out in regArgs. frameSize is
-// the total amount of stack space that will be reserved by call, so this
-// should include enough space to spill register arguments to the stack in
-// case of preemption.
-//
-// After fn returns, call copies stackArgsSize-stackRetOffset result bytes
-// back into stackArgs+stackRetOffset before returning, for any return
-// values passed on the stack. Register-based return values will be found
-// in the same regArgs structure.
-//
-// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap
-// indicating which registers will contain pointer-valued return values. The
-// purpose of this bitmap is to keep pointers visible to the GC between
-// returning from reflectcall and actually using them.
-//
-// If copying result bytes back from the stack, the caller must pass the
-// argument frame type as stackArgsType, so that call can execute appropriate
-// write barriers during the copy.
-//
-// Arguments passed through to call do not escape. The type is used only in a
-// very limited callee of call, the stackArgs are copied, and regArgs is only
-// used in the call frame.
-//go:noescape
-//go:linkname call runtime.reflectcall
-func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-
-func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
-
-// memmove copies size bytes to dst from src. No write barriers are used.
-//go:noescape
-func memmove(dst, src unsafe.Pointer, size uintptr)
-
-// typedmemmove copies a value of type t to dst from src.
-//go:noescape
-func typedmemmove(t *rtype, dst, src unsafe.Pointer)
-
-// typedmemmovepartial is like typedmemmove but assumes that
-// dst and src point off bytes into the value and only copies size bytes.
-//go:noescape
-func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr)
-
-// typedmemclr zeros the value at ptr of type t.
-//go:noescape
-func typedmemclr(t *rtype, ptr unsafe.Pointer)
-
-// typedmemclrpartial is like typedmemclr but assumes that
-// dst points off bytes into the value and only clears size bytes.
-//go:noescape
-func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
-
-// typedslicecopy copies a slice of elemType values from src to dst,
-// returning the number of elements copied.
-//go:noescape
-func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int
-
-//go:noescape
-func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
-
-func verifyNotInHeapPtr(p uintptr) bool
-
-// Dummy annotation marking that the value x escapes,
-// for use in cases where the reflect code is so clever that
-// the compiler cannot follow.
-func escapes(x any) {
- if dummy.b {
- dummy.x = x
- }
-}
-
-var dummy struct {
- b bool
- x any
-}
diff --git a/contrib/go/_std_1.18/src/runtime/alg.go b/contrib/go/_std_1.18/src/runtime/alg.go
deleted file mode 100644
index 5d7d1c77f4..0000000000
--- a/contrib/go/_std_1.18/src/runtime/alg.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/cpu"
- "internal/goarch"
- "unsafe"
-)
-
-const (
- c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
- c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
-)
-
-func memhash0(p unsafe.Pointer, h uintptr) uintptr {
- return h
-}
-
-func memhash8(p unsafe.Pointer, h uintptr) uintptr {
- return memhash(p, h, 1)
-}
-
-func memhash16(p unsafe.Pointer, h uintptr) uintptr {
- return memhash(p, h, 2)
-}
-
-func memhash128(p unsafe.Pointer, h uintptr) uintptr {
- return memhash(p, h, 16)
-}
-
-//go:nosplit
-func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
- ptr := getclosureptr()
- size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
- return memhash(p, h, size)
-}
-
-// runtime variable to check if the processor we're running on
-// actually supports the instructions used by the AES-based
-// hash implementation.
-var useAeshash bool
-
-// in asm_*.s
-func memhash(p unsafe.Pointer, h, s uintptr) uintptr
-func memhash32(p unsafe.Pointer, h uintptr) uintptr
-func memhash64(p unsafe.Pointer, h uintptr) uintptr
-func strhash(p unsafe.Pointer, h uintptr) uintptr
-
-func strhashFallback(a unsafe.Pointer, h uintptr) uintptr {
- x := (*stringStruct)(a)
- return memhashFallback(x.str, h, uintptr(x.len))
-}
-
-// NOTE: Because NaN != NaN, a map can contain any
-// number of (mostly useless) entries keyed with NaNs.
-// To avoid long hash chains, we assign a random number
-// as the hash value for a NaN.
-
-func f32hash(p unsafe.Pointer, h uintptr) uintptr {
- f := *(*float32)(p)
- switch {
- case f == 0:
- return c1 * (c0 ^ h) // +0, -0
- case f != f:
- return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
- default:
- return memhash(p, h, 4)
- }
-}
-
-func f64hash(p unsafe.Pointer, h uintptr) uintptr {
- f := *(*float64)(p)
- switch {
- case f == 0:
- return c1 * (c0 ^ h) // +0, -0
- case f != f:
- return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
- default:
- return memhash(p, h, 8)
- }
-}
-
-func c64hash(p unsafe.Pointer, h uintptr) uintptr {
- x := (*[2]float32)(p)
- return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
-}
-
-func c128hash(p unsafe.Pointer, h uintptr) uintptr {
- x := (*[2]float64)(p)
- return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
-}
-
-func interhash(p unsafe.Pointer, h uintptr) uintptr {
- a := (*iface)(p)
- tab := a.tab
- if tab == nil {
- return h
- }
- t := tab._type
- if t.equal == nil {
- // Check hashability here. We could do this check inside
- // typehash, but we want to report the topmost type in
- // the error text (e.g. in a struct with a field of slice type
- // we want to report the struct, not the slice).
- panic(errorString("hash of unhashable type " + t.string()))
- }
- if isDirectIface(t) {
- return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
- } else {
- return c1 * typehash(t, a.data, h^c0)
- }
-}
-
-func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
- a := (*eface)(p)
- t := a._type
- if t == nil {
- return h
- }
- if t.equal == nil {
- // See comment in interhash above.
- panic(errorString("hash of unhashable type " + t.string()))
- }
- if isDirectIface(t) {
- return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
- } else {
- return c1 * typehash(t, a.data, h^c0)
- }
-}
-
-// typehash computes the hash of the object of type t at address p.
-// h is the seed.
-// This function is seldom used. Most maps use for hashing either
-// fixed functions (e.g. f32hash) or compiler-generated functions
-// (e.g. for a type like struct { x, y string }). This implementation
-// is slower but more general and is used for hashing interface types
-// (called from interhash or nilinterhash, above) or for hashing in
-// maps generated by reflect.MapOf (reflect_typehash, below).
-// Note: this function must match the compiler generated
-// functions exactly. See issue 37716.
-func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
- if t.tflag&tflagRegularMemory != 0 {
- // Handle ptr sizes specially, see issue 37086.
- switch t.size {
- case 4:
- return memhash32(p, h)
- case 8:
- return memhash64(p, h)
- default:
- return memhash(p, h, t.size)
- }
- }
- switch t.kind & kindMask {
- case kindFloat32:
- return f32hash(p, h)
- case kindFloat64:
- return f64hash(p, h)
- case kindComplex64:
- return c64hash(p, h)
- case kindComplex128:
- return c128hash(p, h)
- case kindString:
- return strhash(p, h)
- case kindInterface:
- i := (*interfacetype)(unsafe.Pointer(t))
- if len(i.mhdr) == 0 {
- return nilinterhash(p, h)
- }
- return interhash(p, h)
- case kindArray:
- a := (*arraytype)(unsafe.Pointer(t))
- for i := uintptr(0); i < a.len; i++ {
- h = typehash(a.elem, add(p, i*a.elem.size), h)
- }
- return h
- case kindStruct:
- s := (*structtype)(unsafe.Pointer(t))
- for _, f := range s.fields {
- if f.name.isBlank() {
- continue
- }
- h = typehash(f.typ, add(p, f.offset()), h)
- }
- return h
- default:
- // Should never happen, as typehash should only be called
- // with comparable types.
- panic(errorString("hash of unhashable type " + t.string()))
- }
-}
-
-//go:linkname reflect_typehash reflect.typehash
-func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
- return typehash(t, p, h)
-}
-
-func memequal0(p, q unsafe.Pointer) bool {
- return true
-}
-func memequal8(p, q unsafe.Pointer) bool {
- return *(*int8)(p) == *(*int8)(q)
-}
-func memequal16(p, q unsafe.Pointer) bool {
- return *(*int16)(p) == *(*int16)(q)
-}
-func memequal32(p, q unsafe.Pointer) bool {
- return *(*int32)(p) == *(*int32)(q)
-}
-func memequal64(p, q unsafe.Pointer) bool {
- return *(*int64)(p) == *(*int64)(q)
-}
-func memequal128(p, q unsafe.Pointer) bool {
- return *(*[2]int64)(p) == *(*[2]int64)(q)
-}
-func f32equal(p, q unsafe.Pointer) bool {
- return *(*float32)(p) == *(*float32)(q)
-}
-func f64equal(p, q unsafe.Pointer) bool {
- return *(*float64)(p) == *(*float64)(q)
-}
-func c64equal(p, q unsafe.Pointer) bool {
- return *(*complex64)(p) == *(*complex64)(q)
-}
-func c128equal(p, q unsafe.Pointer) bool {
- return *(*complex128)(p) == *(*complex128)(q)
-}
-func strequal(p, q unsafe.Pointer) bool {
- return *(*string)(p) == *(*string)(q)
-}
-func interequal(p, q unsafe.Pointer) bool {
- x := *(*iface)(p)
- y := *(*iface)(q)
- return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
-}
-func nilinterequal(p, q unsafe.Pointer) bool {
- x := *(*eface)(p)
- y := *(*eface)(q)
- return x._type == y._type && efaceeq(x._type, x.data, y.data)
-}
-func efaceeq(t *_type, x, y unsafe.Pointer) bool {
- if t == nil {
- return true
- }
- eq := t.equal
- if eq == nil {
- panic(errorString("comparing uncomparable type " + t.string()))
- }
- if isDirectIface(t) {
- // Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
- // Maps and funcs are not comparable, so they can't reach here.
- // Ptrs, chans, and single-element items can be compared directly using ==.
- return x == y
- }
- return eq(x, y)
-}
-func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
- if tab == nil {
- return true
- }
- t := tab._type
- eq := t.equal
- if eq == nil {
- panic(errorString("comparing uncomparable type " + t.string()))
- }
- if isDirectIface(t) {
- // See comment in efaceeq.
- return x == y
- }
- return eq(x, y)
-}
-
-// Testing adapters for hash quality tests (see hash_test.go)
-func stringHash(s string, seed uintptr) uintptr {
- return strhash(noescape(unsafe.Pointer(&s)), seed)
-}
-
-func bytesHash(b []byte, seed uintptr) uintptr {
- s := (*slice)(unsafe.Pointer(&b))
- return memhash(s.array, seed, uintptr(s.len))
-}
-
-func int32Hash(i uint32, seed uintptr) uintptr {
- return memhash32(noescape(unsafe.Pointer(&i)), seed)
-}
-
-func int64Hash(i uint64, seed uintptr) uintptr {
- return memhash64(noescape(unsafe.Pointer(&i)), seed)
-}
-
-func efaceHash(i any, seed uintptr) uintptr {
- return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
-}
-
-func ifaceHash(i interface {
- F()
-}, seed uintptr) uintptr {
- return interhash(noescape(unsafe.Pointer(&i)), seed)
-}
-
-const hashRandomBytes = goarch.PtrSize / 4 * 64
-
-// used in asm_{386,amd64,arm64}.s to seed the hash function
-var aeskeysched [hashRandomBytes]byte
-
-// used in hash{32,64}.go to seed the hash function
-var hashkey [4]uintptr
-
-func alginit() {
- // Install AES hash algorithms if the instructions needed are present.
- if (GOARCH == "386" || GOARCH == "amd64") &&
- cpu.X86.HasAES && // AESENC
- cpu.X86.HasSSSE3 && // PSHUFB
- cpu.X86.HasSSE41 { // PINSR{D,Q}
- initAlgAES()
- return
- }
- if GOARCH == "arm64" && cpu.ARM64.HasAES {
- initAlgAES()
- return
- }
- getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
- hashkey[0] |= 1 // make sure these numbers are odd
- hashkey[1] |= 1
- hashkey[2] |= 1
- hashkey[3] |= 1
-}
-
-func initAlgAES() {
- useAeshash = true
- // Initialize with random data so hash collisions will be hard to engineer.
- getRandomData(aeskeysched[:])
-}
-
-// Note: These routines perform the read with a native endianness.
-func readUnaligned32(p unsafe.Pointer) uint32 {
- q := (*[4]byte)(p)
- if goarch.BigEndian {
- return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
- }
- return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
-}
-
-func readUnaligned64(p unsafe.Pointer) uint64 {
- q := (*[8]byte)(p)
- if goarch.BigEndian {
- return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
- uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
- }
- return uint64(q[0]) | uint64(q[1])<<8 | uint64(q[2])<<16 | uint64(q[3])<<24 | uint64(q[4])<<32 | uint64(q[5])<<40 | uint64(q[6])<<48 | uint64(q[7])<<56
-}
diff --git a/contrib/go/_std_1.18/src/runtime/asan0.go b/contrib/go/_std_1.18/src/runtime/asan0.go
deleted file mode 100644
index d5478d6bee..0000000000
--- a/contrib/go/_std_1.18/src/runtime/asan0.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !asan
-
-// Dummy ASan support API, used when not built with -asan.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-const asanenabled = false
-
-// Because asanenabled is false, none of these functions should be called.
-
-func asanread(addr unsafe.Pointer, sz uintptr) { throw("asan") }
-func asanwrite(addr unsafe.Pointer, sz uintptr) { throw("asan") }
-func asanunpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") }
-func asanpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") }
diff --git a/contrib/go/_std_1.18/src/runtime/asm_amd64.s b/contrib/go/_std_1.18/src/runtime/asm_amd64.s
deleted file mode 100644
index c08ae610fb..0000000000
--- a/contrib/go/_std_1.18/src/runtime/asm_amd64.s
+++ /dev/null
@@ -1,2036 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "go_asm.h"
-#include "go_tls.h"
-#include "funcdata.h"
-#include "textflag.h"
-#include "cgo/abi_amd64.h"
-
-// _rt0_amd64 is common startup code for most amd64 systems when using
-// internal linking. This is the entry point for the program from the
-// kernel for an ordinary -buildmode=exe program. The stack holds the
-// number of arguments and the C-style argv.
-TEXT _rt0_amd64(SB),NOSPLIT,$-8
- MOVQ 0(SP), DI // argc
- LEAQ 8(SP), SI // argv
- JMP runtime·rt0_go(SB)
-
-// main is common startup code for most amd64 systems when using
-// external linking. The C startup code will call the symbol "main"
-// passing argc and argv in the usual C ABI registers DI and SI.
-TEXT main(SB),NOSPLIT,$-8
- JMP runtime·rt0_go(SB)
-
-// _rt0_amd64_lib is common startup code for most amd64 systems when
-// using -buildmode=c-archive or -buildmode=c-shared. The linker will
-// arrange to invoke this function as a global constructor (for
-// c-archive) or when the shared library is loaded (for c-shared).
-// We expect argc and argv to be passed in the usual C ABI registers
-// DI and SI.
-TEXT _rt0_amd64_lib(SB),NOSPLIT,$0
- // Transition from C ABI to Go ABI.
- PUSH_REGS_HOST_TO_ABI0()
-
- MOVQ DI, _rt0_amd64_lib_argc<>(SB)
- MOVQ SI, _rt0_amd64_lib_argv<>(SB)
-
- // Synchronous initialization.
- CALL runtime·libpreinit(SB)
-
- // Create a new thread to finish Go runtime initialization.
- MOVQ _cgo_sys_thread_create(SB), AX
- TESTQ AX, AX
- JZ nocgo
-
- // We're calling back to C.
- // Align stack per ELF ABI requirements.
- MOVQ SP, BX // Callee-save in C ABI
- ANDQ $~15, SP
- MOVQ $_rt0_amd64_lib_go(SB), DI
- MOVQ $0, SI
- CALL AX
- MOVQ BX, SP
- JMP restore
-
-nocgo:
- ADJSP $16
- MOVQ $0x800000, 0(SP) // stacksize
- MOVQ $_rt0_amd64_lib_go(SB), AX
- MOVQ AX, 8(SP) // fn
- CALL runtime·newosproc0(SB)
- ADJSP $-16
-
-restore:
- POP_REGS_HOST_TO_ABI0()
- RET
-
-// _rt0_amd64_lib_go initializes the Go runtime.
-// This is started in a separate thread by _rt0_amd64_lib.
-TEXT _rt0_amd64_lib_go(SB),NOSPLIT,$0
- MOVQ _rt0_amd64_lib_argc<>(SB), DI
- MOVQ _rt0_amd64_lib_argv<>(SB), SI
- JMP runtime·rt0_go(SB)
-
-DATA _rt0_amd64_lib_argc<>(SB)/8, $0
-GLOBL _rt0_amd64_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_amd64_lib_argv<>(SB)/8, $0
-GLOBL _rt0_amd64_lib_argv<>(SB),NOPTR, $8
-
-#ifdef GOAMD64_v2
-DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v2 microarchitecture support.\n"
-#endif
-
-#ifdef GOAMD64_v3
-DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v3 microarchitecture support.\n"
-#endif
-
-#ifdef GOAMD64_v4
-DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v4 microarchitecture support.\n"
-#endif
-
-GLOBL bad_cpu_msg<>(SB), RODATA, $84
-
-// Define a list of AMD64 microarchitecture level features
-// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
-
- // SSE3 SSSE3 CMPXCHNG16 SSE4.1 SSE4.2 POPCNT
-#define V2_FEATURES_CX (1 << 0 | 1 << 9 | 1 << 13 | 1 << 19 | 1 << 20 | 1 << 23)
- // LAHF/SAHF
-#define V2_EXT_FEATURES_CX (1 << 0)
- // FMA MOVBE OSXSAVE AVX F16C
-#define V3_FEATURES_CX (V2_FEATURES_CX | 1 << 12 | 1 << 22 | 1 << 27 | 1 << 28 | 1 << 29)
- // ABM (FOR LZNCT)
-#define V3_EXT_FEATURES_CX (V2_EXT_FEATURES_CX | 1 << 5)
- // BMI1 AVX2 BMI2
-#define V3_EXT_FEATURES_BX (1 << 3 | 1 << 5 | 1 << 8)
- // XMM YMM
-#define V3_OS_SUPPORT_AX (1 << 1 | 1 << 2)
-
-#define V4_FEATURES_CX V3_FEATURES_CX
-
-#define V4_EXT_FEATURES_CX V3_EXT_FEATURES_CX
- // AVX512F AVX512DQ AVX512CD AVX512BW AVX512VL
-#define V4_EXT_FEATURES_BX (V3_EXT_FEATURES_BX | 1 << 16 | 1 << 17 | 1 << 28 | 1 << 30 | 1 << 31)
- // OPMASK ZMM
-#define V4_OS_SUPPORT_AX (V3_OS_SUPPORT_AX | 1 << 5 | (1 << 6 | 1 << 7))
-
-#ifdef GOAMD64_v2
-#define NEED_MAX_CPUID 0x80000001
-#define NEED_FEATURES_CX V2_FEATURES_CX
-#define NEED_EXT_FEATURES_CX V2_EXT_FEATURES_CX
-#endif
-
-#ifdef GOAMD64_v3
-#define NEED_MAX_CPUID 0x80000001
-#define NEED_FEATURES_CX V3_FEATURES_CX
-#define NEED_EXT_FEATURES_CX V3_EXT_FEATURES_CX
-#define NEED_EXT_FEATURES_BX V3_EXT_FEATURES_BX
-#define NEED_OS_SUPPORT_AX V3_OS_SUPPORT_AX
-#endif
-
-#ifdef GOAMD64_v4
-#define NEED_MAX_CPUID 0x80000001
-#define NEED_FEATURES_CX V4_FEATURES_CX
-#define NEED_EXT_FEATURES_CX V4_EXT_FEATURES_CX
-#define NEED_EXT_FEATURES_BX V4_EXT_FEATURES_BX
-
-// Downgrading v4 OS checks on Darwin for now, see CL 285572.
-#ifdef GOOS_darwin
-#define NEED_OS_SUPPORT_AX V3_OS_SUPPORT_AX
-#else
-#define NEED_OS_SUPPORT_AX V4_OS_SUPPORT_AX
-#endif
-
-#endif
-
-TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
- // copy arguments forward on an even stack
- MOVQ DI, AX // argc
- MOVQ SI, BX // argv
- SUBQ $(5*8), SP // 3args 2auto
- ANDQ $~15, SP
- MOVQ AX, 24(SP)
- MOVQ BX, 32(SP)
-
- // create istack out of the given (operating system) stack.
- // _cgo_init may update stackguard.
- MOVQ $runtime·g0(SB), DI
- LEAQ (-64*1024+104)(SP), BX
- MOVQ BX, g_stackguard0(DI)
- MOVQ BX, g_stackguard1(DI)
- MOVQ BX, (g_stack+stack_lo)(DI)
- MOVQ SP, (g_stack+stack_hi)(DI)
-
- // find out information about the processor we're on
- MOVL $0, AX
- CPUID
- CMPL AX, $0
- JE nocpuinfo
-
- CMPL BX, $0x756E6547 // "Genu"
- JNE notintel
- CMPL DX, $0x49656E69 // "ineI"
- JNE notintel
- CMPL CX, $0x6C65746E // "ntel"
- JNE notintel
- MOVB $1, runtime·isIntel(SB)
-
-notintel:
- // Load EAX=1 cpuid flags
- MOVL $1, AX
- CPUID
- MOVL AX, runtime·processorVersionInfo(SB)
-
-nocpuinfo:
- // if there is an _cgo_init, call it.
- MOVQ _cgo_init(SB), AX
- TESTQ AX, AX
- JZ needtls
- // arg 1: g0, already in DI
- MOVQ $setg_gcc<>(SB), SI // arg 2: setg_gcc
-#ifdef GOOS_android
- MOVQ $runtime·tls_g(SB), DX // arg 3: &tls_g
- // arg 4: TLS base, stored in slot 0 (Android's TLS_SLOT_SELF).
- // Compensate for tls_g (+16).
- MOVQ -16(TLS), CX
-#else
- MOVQ $0, DX // arg 3, 4: not used when using platform's TLS
- MOVQ $0, CX
-#endif
-#ifdef GOOS_windows
- // Adjust for the Win64 calling convention.
- MOVQ CX, R9 // arg 4
- MOVQ DX, R8 // arg 3
- MOVQ SI, DX // arg 2
- MOVQ DI, CX // arg 1
-#endif
- CALL AX
-
- // update stackguard after _cgo_init
- MOVQ $runtime·g0(SB), CX
- MOVQ (g_stack+stack_lo)(CX), AX
- ADDQ $const__StackGuard, AX
- MOVQ AX, g_stackguard0(CX)
- MOVQ AX, g_stackguard1(CX)
-
-#ifndef GOOS_windows
- JMP ok
-#endif
-needtls:
-#ifdef GOOS_plan9
- // skip TLS setup on Plan 9
- JMP ok
-#endif
-#ifdef GOOS_solaris
- // skip TLS setup on Solaris
- JMP ok
-#endif
-#ifdef GOOS_illumos
- // skip TLS setup on illumos
- JMP ok
-#endif
-#ifdef GOOS_darwin
- // skip TLS setup on Darwin
- JMP ok
-#endif
-#ifdef GOOS_openbsd
- // skip TLS setup on OpenBSD
- JMP ok
-#endif
-
- LEAQ runtime·m0+m_tls(SB), DI
- CALL runtime·settls(SB)
-
- // store through it, to make sure it works
- get_tls(BX)
- MOVQ $0x123, g(BX)
- MOVQ runtime·m0+m_tls(SB), AX
- CMPQ AX, $0x123
- JEQ 2(PC)
- CALL runtime·abort(SB)
-ok:
- // set the per-goroutine and per-mach "registers"
- get_tls(BX)
- LEAQ runtime·g0(SB), CX
- MOVQ CX, g(BX)
- LEAQ runtime·m0(SB), AX
-
- // save m->g0 = g0
- MOVQ CX, m_g0(AX)
- // save m0 to g0->m
- MOVQ AX, g_m(CX)
-
- CLD // convention is D is always left cleared
-
- // Check GOAMD64 reqirements
- // We need to do this after setting up TLS, so that
- // we can report an error if there is a failure. See issue 49586.
-#ifdef NEED_FEATURES_CX
- MOVL $0, AX
- CPUID
- CMPL AX, $0
- JE bad_cpu
- MOVL $1, AX
- CPUID
- ANDL $NEED_FEATURES_CX, CX
- CMPL CX, $NEED_FEATURES_CX
- JNE bad_cpu
-#endif
-
-#ifdef NEED_MAX_CPUID
- MOVL $0x80000000, AX
- CPUID
- CMPL AX, $NEED_MAX_CPUID
- JL bad_cpu
-#endif
-
-#ifdef NEED_EXT_FEATURES_BX
- MOVL $7, AX
- MOVL $0, CX
- CPUID
- ANDL $NEED_EXT_FEATURES_BX, BX
- CMPL BX, $NEED_EXT_FEATURES_BX
- JNE bad_cpu
-#endif
-
-#ifdef NEED_EXT_FEATURES_CX
- MOVL $0x80000001, AX
- CPUID
- ANDL $NEED_EXT_FEATURES_CX, CX
- CMPL CX, $NEED_EXT_FEATURES_CX
- JNE bad_cpu
-#endif
-
-#ifdef NEED_OS_SUPPORT_AX
- XORL CX, CX
- XGETBV
- ANDL $NEED_OS_SUPPORT_AX, AX
- CMPL AX, $NEED_OS_SUPPORT_AX
- JNE bad_cpu
-#endif
-
- CALL runtime·check(SB)
-
- MOVL 24(SP), AX // copy argc
- MOVL AX, 0(SP)
- MOVQ 32(SP), AX // copy argv
- MOVQ AX, 8(SP)
- CALL runtime·args(SB)
- CALL runtime·osinit(SB)
- CALL runtime·schedinit(SB)
-
- // create a new goroutine to start program
- MOVQ $runtime·mainPC(SB), AX // entry
- PUSHQ AX
- CALL runtime·newproc(SB)
- POPQ AX
-
- // start this M
- CALL runtime·mstart(SB)
-
- CALL runtime·abort(SB) // mstart should never return
- RET
-
-bad_cpu: // show that the program requires a certain microarchitecture level.
- MOVQ $2, 0(SP)
- MOVQ $bad_cpu_msg<>(SB), AX
- MOVQ AX, 8(SP)
- MOVQ $84, 16(SP)
- CALL runtime·write(SB)
- MOVQ $1, 0(SP)
- CALL runtime·exit(SB)
- CALL runtime·abort(SB)
- RET
-
- // Prevent dead-code elimination of debugCallV2, which is
- // intended to be called by debuggers.
- MOVQ $runtime·debugCallV2<ABIInternal>(SB), AX
- RET
-
-// mainPC is a function value for runtime.main, to be passed to newproc.
-// The reference to runtime.main is made via ABIInternal, since the
-// actual function (not the ABI0 wrapper) is needed by newproc.
-DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
-GLOBL runtime·mainPC(SB),RODATA,$8
-
-TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
- BYTE $0xcc
- RET
-
-TEXT runtime·asminit(SB),NOSPLIT,$0-0
- // No per-thread init.
- RET
-
-TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
- CALL runtime·mstart0(SB)
- RET // not reached
-
-/*
- * go-routine
- */
-
-// func gogo(buf *gobuf)
-// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $0-8
- MOVQ buf+0(FP), BX // gobuf
- MOVQ gobuf_g(BX), DX
- MOVQ 0(DX), CX // make sure g != nil
- JMP gogo<>(SB)
-
-TEXT gogo<>(SB), NOSPLIT, $0
- get_tls(CX)
- MOVQ DX, g(CX)
- MOVQ DX, R14 // set the g register
- MOVQ gobuf_sp(BX), SP // restore SP
- MOVQ gobuf_ret(BX), AX
- MOVQ gobuf_ctxt(BX), DX
- MOVQ gobuf_bp(BX), BP
- MOVQ $0, gobuf_sp(BX) // clear to help garbage collector
- MOVQ $0, gobuf_ret(BX)
- MOVQ $0, gobuf_ctxt(BX)
- MOVQ $0, gobuf_bp(BX)
- MOVQ gobuf_pc(BX), BX
- JMP BX
-
-// func mcall(fn func(*g))
-// Switch to m->g0's stack, call fn(g).
-// Fn must never return. It should gogo(&g->sched)
-// to keep running g.
-TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT, $0-8
- MOVQ AX, DX // DX = fn
-
- // save state in g->sched
- MOVQ 0(SP), BX // caller's PC
- MOVQ BX, (g_sched+gobuf_pc)(R14)
- LEAQ fn+0(FP), BX // caller's SP
- MOVQ BX, (g_sched+gobuf_sp)(R14)
- MOVQ BP, (g_sched+gobuf_bp)(R14)
-
- // switch to m->g0 & its stack, call fn
- MOVQ g_m(R14), BX
- MOVQ m_g0(BX), SI // SI = g.m.g0
- CMPQ SI, R14 // if g == m->g0 call badmcall
- JNE goodm
- JMP runtime·badmcall(SB)
-goodm:
- MOVQ R14, AX // AX (and arg 0) = g
- MOVQ SI, R14 // g = g.m.g0
- get_tls(CX) // Set G in TLS
- MOVQ R14, g(CX)
- MOVQ (g_sched+gobuf_sp)(R14), SP // sp = g0.sched.sp
- PUSHQ AX // open up space for fn's arg spill slot
- MOVQ 0(DX), R12
- CALL R12 // fn(g)
- POPQ AX
- JMP runtime·badmcall2(SB)
- RET
-
-// systemstack_switch is a dummy routine that systemstack leaves at the bottom
-// of the G stack. We need to distinguish the routine that
-// lives at the bottom of the G stack from the one that lives
-// at the top of the system stack because the one at the top of
-// the system stack terminates the stack walk (see topofstack()).
-TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
- RET
-
-// func systemstack(fn func())
-TEXT runtime·systemstack(SB), NOSPLIT, $0-8
- MOVQ fn+0(FP), DI // DI = fn
- get_tls(CX)
- MOVQ g(CX), AX // AX = g
- MOVQ g_m(AX), BX // BX = m
-
- CMPQ AX, m_gsignal(BX)
- JEQ noswitch
-
- MOVQ m_g0(BX), DX // DX = g0
- CMPQ AX, DX
- JEQ noswitch
-
- CMPQ AX, m_curg(BX)
- JNE bad
-
- // switch stacks
- // save our state in g->sched. Pretend to
- // be systemstack_switch if the G stack is scanned.
- CALL gosave_systemstack_switch<>(SB)
-
- // switch to g0
- MOVQ DX, g(CX)
- MOVQ DX, R14 // set the g register
- MOVQ (g_sched+gobuf_sp)(DX), BX
- MOVQ BX, SP
-
- // call target function
- MOVQ DI, DX
- MOVQ 0(DI), DI
- CALL DI
-
- // switch back to g
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX
- MOVQ m_curg(BX), AX
- MOVQ AX, g(CX)
- MOVQ (g_sched+gobuf_sp)(AX), SP
- MOVQ $0, (g_sched+gobuf_sp)(AX)
- RET
-
-noswitch:
- // already on m stack; tail call the function
- // Using a tail call here cleans up tracebacks since we won't stop
- // at an intermediate systemstack.
- MOVQ DI, DX
- MOVQ 0(DI), DI
- JMP DI
-
-bad:
- // Bad: g is not gsignal, not g0, not curg. What is it?
- MOVQ $runtime·badsystemstack(SB), AX
- CALL AX
- INT $3
-
-
-/*
- * support for morestack
- */
-
-// Called during function prolog when more stack is needed.
-//
-// The traceback routines see morestack on a g0 as being
-// the top of a stack (for example, morestack calling newstack
-// calling the scheduler calling newm calling gc), so we must
-// record an argument size. For that purpose, it has no arguments.
-TEXT runtime·morestack(SB),NOSPLIT,$0-0
- // Cannot grow scheduler stack (m->g0).
- get_tls(CX)
- MOVQ g(CX), BX
- MOVQ g_m(BX), BX
- MOVQ m_g0(BX), SI
- CMPQ g(CX), SI
- JNE 3(PC)
- CALL runtime·badmorestackg0(SB)
- CALL runtime·abort(SB)
-
- // Cannot grow signal stack (m->gsignal).
- MOVQ m_gsignal(BX), SI
- CMPQ g(CX), SI
- JNE 3(PC)
- CALL runtime·badmorestackgsignal(SB)
- CALL runtime·abort(SB)
-
- // Called from f.
- // Set m->morebuf to f's caller.
- NOP SP // tell vet SP changed - stop checking offsets
- MOVQ 8(SP), AX // f's caller's PC
- MOVQ AX, (m_morebuf+gobuf_pc)(BX)
- LEAQ 16(SP), AX // f's caller's SP
- MOVQ AX, (m_morebuf+gobuf_sp)(BX)
- get_tls(CX)
- MOVQ g(CX), SI
- MOVQ SI, (m_morebuf+gobuf_g)(BX)
-
- // Set g->sched to context in f.
- MOVQ 0(SP), AX // f's PC
- MOVQ AX, (g_sched+gobuf_pc)(SI)
- LEAQ 8(SP), AX // f's SP
- MOVQ AX, (g_sched+gobuf_sp)(SI)
- MOVQ BP, (g_sched+gobuf_bp)(SI)
- MOVQ DX, (g_sched+gobuf_ctxt)(SI)
-
- // Call newstack on m->g0's stack.
- MOVQ m_g0(BX), BX
- MOVQ BX, g(CX)
- MOVQ (g_sched+gobuf_sp)(BX), SP
- CALL runtime·newstack(SB)
- CALL runtime·abort(SB) // crash if newstack returns
- RET
-
-// morestack but not preserving ctxt.
-TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
- MOVL $0, DX
- JMP runtime·morestack(SB)
-
-// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
-TEXT ·spillArgs(SB),NOSPLIT,$0-0
- MOVQ AX, 0(R12)
- MOVQ BX, 8(R12)
- MOVQ CX, 16(R12)
- MOVQ DI, 24(R12)
- MOVQ SI, 32(R12)
- MOVQ R8, 40(R12)
- MOVQ R9, 48(R12)
- MOVQ R10, 56(R12)
- MOVQ R11, 64(R12)
- MOVQ X0, 72(R12)
- MOVQ X1, 80(R12)
- MOVQ X2, 88(R12)
- MOVQ X3, 96(R12)
- MOVQ X4, 104(R12)
- MOVQ X5, 112(R12)
- MOVQ X6, 120(R12)
- MOVQ X7, 128(R12)
- MOVQ X8, 136(R12)
- MOVQ X9, 144(R12)
- MOVQ X10, 152(R12)
- MOVQ X11, 160(R12)
- MOVQ X12, 168(R12)
- MOVQ X13, 176(R12)
- MOVQ X14, 184(R12)
- RET
-
-// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
-TEXT ·unspillArgs(SB),NOSPLIT,$0-0
- MOVQ 0(R12), AX
- MOVQ 8(R12), BX
- MOVQ 16(R12), CX
- MOVQ 24(R12), DI
- MOVQ 32(R12), SI
- MOVQ 40(R12), R8
- MOVQ 48(R12), R9
- MOVQ 56(R12), R10
- MOVQ 64(R12), R11
- MOVQ 72(R12), X0
- MOVQ 80(R12), X1
- MOVQ 88(R12), X2
- MOVQ 96(R12), X3
- MOVQ 104(R12), X4
- MOVQ 112(R12), X5
- MOVQ 120(R12), X6
- MOVQ 128(R12), X7
- MOVQ 136(R12), X8
- MOVQ 144(R12), X9
- MOVQ 152(R12), X10
- MOVQ 160(R12), X11
- MOVQ 168(R12), X12
- MOVQ 176(R12), X13
- MOVQ 184(R12), X14
- RET
-
-// reflectcall: call a function with the given argument list
-// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
-// we don't have variable-sized frames, so we use a small number
-// of constant-sized-frame functions to encode a few bits of size in the pc.
-// Caution: ugly multiline assembly macros in your future!
-
-#define DISPATCH(NAME,MAXSIZE) \
- CMPQ CX, $MAXSIZE; \
- JA 3(PC); \
- MOVQ $NAME(SB), AX; \
- JMP AX
-// Note: can't just "JMP NAME(SB)" - bad inlining results.
-
-TEXT ·reflectcall(SB), NOSPLIT, $0-48
- MOVLQZX frameSize+32(FP), CX
- DISPATCH(runtime·call16, 16)
- DISPATCH(runtime·call32, 32)
- DISPATCH(runtime·call64, 64)
- DISPATCH(runtime·call128, 128)
- DISPATCH(runtime·call256, 256)
- DISPATCH(runtime·call512, 512)
- DISPATCH(runtime·call1024, 1024)
- DISPATCH(runtime·call2048, 2048)
- DISPATCH(runtime·call4096, 4096)
- DISPATCH(runtime·call8192, 8192)
- DISPATCH(runtime·call16384, 16384)
- DISPATCH(runtime·call32768, 32768)
- DISPATCH(runtime·call65536, 65536)
- DISPATCH(runtime·call131072, 131072)
- DISPATCH(runtime·call262144, 262144)
- DISPATCH(runtime·call524288, 524288)
- DISPATCH(runtime·call1048576, 1048576)
- DISPATCH(runtime·call2097152, 2097152)
- DISPATCH(runtime·call4194304, 4194304)
- DISPATCH(runtime·call8388608, 8388608)
- DISPATCH(runtime·call16777216, 16777216)
- DISPATCH(runtime·call33554432, 33554432)
- DISPATCH(runtime·call67108864, 67108864)
- DISPATCH(runtime·call134217728, 134217728)
- DISPATCH(runtime·call268435456, 268435456)
- DISPATCH(runtime·call536870912, 536870912)
- DISPATCH(runtime·call1073741824, 1073741824)
- MOVQ $runtime·badreflectcall(SB), AX
- JMP AX
-
-#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
- NO_LOCAL_POINTERS; \
- /* copy arguments to stack */ \
- MOVQ stackArgs+16(FP), SI; \
- MOVLQZX stackArgsSize+24(FP), CX; \
- MOVQ SP, DI; \
- REP;MOVSB; \
- /* set up argument registers */ \
- MOVQ regArgs+40(FP), R12; \
- CALL ·unspillArgs(SB); \
- /* call function */ \
- MOVQ f+8(FP), DX; \
- PCDATA $PCDATA_StackMapIndex, $0; \
- MOVQ (DX), R12; \
- CALL R12; \
- /* copy register return values back */ \
- MOVQ regArgs+40(FP), R12; \
- CALL ·spillArgs(SB); \
- MOVLQZX stackArgsSize+24(FP), CX; \
- MOVLQZX stackRetOffset+28(FP), BX; \
- MOVQ stackArgs+16(FP), DI; \
- MOVQ stackArgsType+0(FP), DX; \
- MOVQ SP, SI; \
- ADDQ BX, DI; \
- ADDQ BX, SI; \
- SUBQ BX, CX; \
- CALL callRet<>(SB); \
- RET
-
-// callRet copies return values back at the end of call*. This is a
-// separate function so it can allocate stack space for the arguments
-// to reflectcallmove. It does not follow the Go ABI; it expects its
-// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $40-0
- NO_LOCAL_POINTERS
- MOVQ DX, 0(SP)
- MOVQ DI, 8(SP)
- MOVQ SI, 16(SP)
- MOVQ CX, 24(SP)
- MOVQ R12, 32(SP)
- CALL runtime·reflectcallmove(SB)
- RET
-
-CALLFN(·call16, 16)
-CALLFN(·call32, 32)
-CALLFN(·call64, 64)
-CALLFN(·call128, 128)
-CALLFN(·call256, 256)
-CALLFN(·call512, 512)
-CALLFN(·call1024, 1024)
-CALLFN(·call2048, 2048)
-CALLFN(·call4096, 4096)
-CALLFN(·call8192, 8192)
-CALLFN(·call16384, 16384)
-CALLFN(·call32768, 32768)
-CALLFN(·call65536, 65536)
-CALLFN(·call131072, 131072)
-CALLFN(·call262144, 262144)
-CALLFN(·call524288, 524288)
-CALLFN(·call1048576, 1048576)
-CALLFN(·call2097152, 2097152)
-CALLFN(·call4194304, 4194304)
-CALLFN(·call8388608, 8388608)
-CALLFN(·call16777216, 16777216)
-CALLFN(·call33554432, 33554432)
-CALLFN(·call67108864, 67108864)
-CALLFN(·call134217728, 134217728)
-CALLFN(·call268435456, 268435456)
-CALLFN(·call536870912, 536870912)
-CALLFN(·call1073741824, 1073741824)
-
-TEXT runtime·procyield(SB),NOSPLIT,$0-0
- MOVL cycles+0(FP), AX
-again:
- PAUSE
- SUBL $1, AX
- JNZ again
- RET
-
-
-TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
- // Stores are already ordered on x86, so this is just a
- // compile barrier.
- RET
-
-// Save state of caller into g->sched,
-// but using fake PC from systemstack_switch.
-// Must only be called from functions with no locals ($0)
-// or else unwinding from systemstack_switch is incorrect.
-// Smashes R9.
-TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
- MOVQ $runtime·systemstack_switch(SB), R9
- MOVQ R9, (g_sched+gobuf_pc)(R14)
- LEAQ 8(SP), R9
- MOVQ R9, (g_sched+gobuf_sp)(R14)
- MOVQ $0, (g_sched+gobuf_ret)(R14)
- MOVQ BP, (g_sched+gobuf_bp)(R14)
- // Assert ctxt is zero. See func save.
- MOVQ (g_sched+gobuf_ctxt)(R14), R9
- TESTQ R9, R9
- JZ 2(PC)
- CALL runtime·abort(SB)
- RET
-
-// func asmcgocall_no_g(fn, arg unsafe.Pointer)
-// Call fn(arg) aligned appropriately for the gcc ABI.
-// Called on a system stack, and there may be no g yet (during needm).
-TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
- MOVQ fn+0(FP), AX
- MOVQ arg+8(FP), BX
- MOVQ SP, DX
- SUBQ $32, SP
- ANDQ $~15, SP // alignment
- MOVQ DX, 8(SP)
- MOVQ BX, DI // DI = first argument in AMD64 ABI
- MOVQ BX, CX // CX = first argument in Win64
- CALL AX
- MOVQ 8(SP), DX
- MOVQ DX, SP
- RET
-
-// func asmcgocall(fn, arg unsafe.Pointer) int32
-// Call fn(arg) on the scheduler stack,
-// aligned appropriately for the gcc ABI.
-// See cgocall.go for more details.
-TEXT ·asmcgocall(SB),NOSPLIT,$0-20
- MOVQ fn+0(FP), AX
- MOVQ arg+8(FP), BX
-
- MOVQ SP, DX
-
- // Figure out if we need to switch to m->g0 stack.
- // We get called to create new OS threads too, and those
- // come in on the m->g0 stack already. Or we might already
- // be on the m->gsignal stack.
- get_tls(CX)
- MOVQ g(CX), DI
- CMPQ DI, $0
- JEQ nosave
- MOVQ g_m(DI), R8
- MOVQ m_gsignal(R8), SI
- CMPQ DI, SI
- JEQ nosave
- MOVQ m_g0(R8), SI
- CMPQ DI, SI
- JEQ nosave
-
- // Switch to system stack.
- CALL gosave_systemstack_switch<>(SB)
- MOVQ SI, g(CX)
- MOVQ (g_sched+gobuf_sp)(SI), SP
-
- // Now on a scheduling stack (a pthread-created stack).
- // Make sure we have enough room for 4 stack-backed fast-call
- // registers as per windows amd64 calling convention.
- SUBQ $64, SP
- ANDQ $~15, SP // alignment for gcc ABI
- MOVQ DI, 48(SP) // save g
- MOVQ (g_stack+stack_hi)(DI), DI
- SUBQ DX, DI
- MOVQ DI, 40(SP) // save depth in stack (can't just save SP, as stack might be copied during a callback)
- MOVQ BX, DI // DI = first argument in AMD64 ABI
- MOVQ BX, CX // CX = first argument in Win64
- CALL AX
-
- // Restore registers, g, stack pointer.
- get_tls(CX)
- MOVQ 48(SP), DI
- MOVQ (g_stack+stack_hi)(DI), SI
- SUBQ 40(SP), SI
- MOVQ DI, g(CX)
- MOVQ SI, SP
-
- MOVL AX, ret+16(FP)
- RET
-
-nosave:
- // Running on a system stack, perhaps even without a g.
- // Having no g can happen during thread creation or thread teardown
- // (see needm/dropm on Solaris, for example).
- // This code is like the above sequence but without saving/restoring g
- // and without worrying about the stack moving out from under us
- // (because we're on a system stack, not a goroutine stack).
- // The above code could be used directly if already on a system stack,
- // but then the only path through this code would be a rare case on Solaris.
- // Using this code for all "already on system stack" calls exercises it more,
- // which should help keep it correct.
- SUBQ $64, SP
- ANDQ $~15, SP
- MOVQ $0, 48(SP) // where above code stores g, in case someone looks during debugging
- MOVQ DX, 40(SP) // save original stack pointer
- MOVQ BX, DI // DI = first argument in AMD64 ABI
- MOVQ BX, CX // CX = first argument in Win64
- CALL AX
- MOVQ 40(SP), SI // restore original stack pointer
- MOVQ SI, SP
- MOVL AX, ret+16(FP)
- RET
-
-#ifdef GOOS_windows
-// Dummy TLS that's used on Windows so that we don't crash trying
-// to restore the G register in needm. needm and its callees are
-// very careful never to actually use the G, the TLS just can't be
-// unset since we're in Go code.
-GLOBL zeroTLS<>(SB),RODATA,$const_tlsSize
-#endif
-
-// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
-// See cgocall.go for more details.
-TEXT ·cgocallback(SB),NOSPLIT,$24-24
- NO_LOCAL_POINTERS
-
- // If g is nil, Go did not create the current thread.
- // Call needm to obtain one m for temporary use.
- // In this case, we're running on the thread stack, so there's
- // lots of space, but the linker doesn't know. Hide the call from
- // the linker analysis by using an indirect call through AX.
- get_tls(CX)
-#ifdef GOOS_windows
- MOVL $0, BX
- CMPQ CX, $0
- JEQ 2(PC)
-#endif
- MOVQ g(CX), BX
- CMPQ BX, $0
- JEQ needm
- MOVQ g_m(BX), BX
- MOVQ BX, savedm-8(SP) // saved copy of oldm
- JMP havem
-needm:
-#ifdef GOOS_windows
- // Set up a dummy TLS value. needm is careful not to use it,
- // but it needs to be there to prevent autogenerated code from
- // crashing when it loads from it.
- // We don't need to clear it or anything later because needm
- // will set up TLS properly.
- MOVQ $zeroTLS<>(SB), DI
- CALL runtime·settls(SB)
-#endif
- // On some platforms (Windows) we cannot call needm through
- // an ABI wrapper because there's no TLS set up, and the ABI
- // wrapper will try to restore the G register (R14) from TLS.
- // Clear X15 because Go expects it and we're not calling
- // through a wrapper, but otherwise avoid setting the G
- // register in the wrapper and call needm directly. It
- // takes no arguments and doesn't return any values so
- // there's no need to handle that. Clear R14 so that there's
- // a bad value in there, in case needm tries to use it.
- XORPS X15, X15
- XORQ R14, R14
- MOVQ $runtime·needm<ABIInternal>(SB), AX
- CALL AX
- MOVQ $0, savedm-8(SP) // dropm on return
- get_tls(CX)
- MOVQ g(CX), BX
- MOVQ g_m(BX), BX
-
- // Set m->sched.sp = SP, so that if a panic happens
- // during the function we are about to execute, it will
- // have a valid SP to run on the g0 stack.
- // The next few lines (after the havem label)
- // will save this SP onto the stack and then write
- // the same SP back to m->sched.sp. That seems redundant,
- // but if an unrecovered panic happens, unwindm will
- // restore the g->sched.sp from the stack location
- // and then systemstack will try to use it. If we don't set it here,
- // that restored SP will be uninitialized (typically 0) and
- // will not be usable.
- MOVQ m_g0(BX), SI
- MOVQ SP, (g_sched+gobuf_sp)(SI)
-
-havem:
- // Now there's a valid m, and we're running on its m->g0.
- // Save current m->g0->sched.sp on stack and then set it to SP.
- // Save current sp in m->g0->sched.sp in preparation for
- // switch back to m->curg stack.
- // NOTE: unwindm knows that the saved g->sched.sp is at 0(SP).
- MOVQ m_g0(BX), SI
- MOVQ (g_sched+gobuf_sp)(SI), AX
- MOVQ AX, 0(SP)
- MOVQ SP, (g_sched+gobuf_sp)(SI)
-
- // Switch to m->curg stack and call runtime.cgocallbackg.
- // Because we are taking over the execution of m->curg
- // but *not* resuming what had been running, we need to
- // save that information (m->curg->sched) so we can restore it.
- // We can restore m->curg->sched.sp easily, because calling
- // runtime.cgocallbackg leaves SP unchanged upon return.
- // To save m->curg->sched.pc, we push it onto the curg stack and
- // open a frame the same size as cgocallback's g0 frame.
- // Once we switch to the curg stack, the pushed PC will appear
- // to be the return PC of cgocallback, so that the traceback
- // will seamlessly trace back into the earlier calls.
- MOVQ m_curg(BX), SI
- MOVQ SI, g(CX)
- MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
- MOVQ (g_sched+gobuf_pc)(SI), BX
- MOVQ BX, -8(DI) // "push" return PC on the g stack
- // Gather our arguments into registers.
- MOVQ fn+0(FP), BX
- MOVQ frame+8(FP), CX
- MOVQ ctxt+16(FP), DX
- // Compute the size of the frame, including return PC and, if
- // GOEXPERIMENT=framepointer, the saved base pointer
- LEAQ fn+0(FP), AX
- SUBQ SP, AX // AX is our actual frame size
- SUBQ AX, DI // Allocate the same frame size on the g stack
- MOVQ DI, SP
-
- MOVQ BX, 0(SP)
- MOVQ CX, 8(SP)
- MOVQ DX, 16(SP)
- MOVQ $runtime·cgocallbackg(SB), AX
- CALL AX // indirect call to bypass nosplit check. We're on a different stack now.
-
- // Compute the size of the frame again. FP and SP have
- // completely different values here than they did above,
- // but only their difference matters.
- LEAQ fn+0(FP), AX
- SUBQ SP, AX
-
- // Restore g->sched (== m->curg->sched) from saved values.
- get_tls(CX)
- MOVQ g(CX), SI
- MOVQ SP, DI
- ADDQ AX, DI
- MOVQ -8(DI), BX
- MOVQ BX, (g_sched+gobuf_pc)(SI)
- MOVQ DI, (g_sched+gobuf_sp)(SI)
-
- // Switch back to m->g0's stack and restore m->g0->sched.sp.
- // (Unlike m->curg, the g0 goroutine never uses sched.pc,
- // so we do not have to restore it.)
- MOVQ g(CX), BX
- MOVQ g_m(BX), BX
- MOVQ m_g0(BX), SI
- MOVQ SI, g(CX)
- MOVQ (g_sched+gobuf_sp)(SI), SP
- MOVQ 0(SP), AX
- MOVQ AX, (g_sched+gobuf_sp)(SI)
-
- // If the m on entry was nil, we called needm above to borrow an m
- // for the duration of the call. Since the call is over, return it with dropm.
- MOVQ savedm-8(SP), BX
- CMPQ BX, $0
- JNE done
- MOVQ $runtime·dropm(SB), AX
- CALL AX
-#ifdef GOOS_windows
- // We need to clear the TLS pointer in case the next
- // thread that comes into Go tries to reuse that space
- // but uses the same M.
- XORQ DI, DI
- CALL runtime·settls(SB)
-#endif
-done:
-
- // Done!
- RET
-
-// func setg(gg *g)
-// set g. for use by needm.
-TEXT runtime·setg(SB), NOSPLIT, $0-8
- MOVQ gg+0(FP), BX
- get_tls(CX)
- MOVQ BX, g(CX)
- RET
-
-// void setg_gcc(G*); set g called from gcc.
-TEXT setg_gcc<>(SB),NOSPLIT,$0
- get_tls(AX)
- MOVQ DI, g(AX)
- MOVQ DI, R14 // set the g register
- RET
-
-TEXT runtime·abort(SB),NOSPLIT,$0-0
- INT $3
-loop:
- JMP loop
-
-// check that SP is in range [g->stack.lo, g->stack.hi)
-TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
- get_tls(CX)
- MOVQ g(CX), AX
- CMPQ (g_stack+stack_hi)(AX), SP
- JHI 2(PC)
- CALL runtime·abort(SB)
- CMPQ SP, (g_stack+stack_lo)(AX)
- JHI 2(PC)
- CALL runtime·abort(SB)
- RET
-
-// func cputicks() int64
-TEXT runtime·cputicks(SB),NOSPLIT,$0-0
- CMPB internal∕cpu·X86+const_offsetX86HasRDTSCP(SB), $1
- JNE fences
- // Instruction stream serializing RDTSCP is supported.
- // RDTSCP is supported by Intel Nehalem (2008) and
- // AMD K8 Rev. F (2006) and newer.
- RDTSCP
-done:
- SHLQ $32, DX
- ADDQ DX, AX
- MOVQ AX, ret+0(FP)
- RET
-fences:
- // MFENCE is instruction stream serializing and flushes the
- // store buffers on AMD. The serialization semantics of LFENCE on AMD
- // are dependent on MSR C001_1029 and CPU generation.
- // LFENCE on Intel does wait for all previous instructions to have executed.
- // Intel recommends MFENCE;LFENCE in its manuals before RDTSC to have all
- // previous instructions executed and all previous loads and stores to globally visible.
- // Using MFENCE;LFENCE here aligns the serializing properties without
- // runtime detection of CPU manufacturer.
- MFENCE
- LFENCE
- RDTSC
- JMP done
-
-// func memhash(p unsafe.Pointer, h, s uintptr) uintptr
-// hash function using AES hardware instructions
-TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT,$0-32
- // AX = ptr to data
- // BX = seed
- // CX = size
- CMPB runtime·useAeshash(SB), $0
- JEQ noaes
- JMP aeshashbody<>(SB)
-noaes:
- JMP runtime·memhashFallback<ABIInternal>(SB)
-
-// func strhash(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT,$0-24
- // AX = ptr to string struct
- // BX = seed
- CMPB runtime·useAeshash(SB), $0
- JEQ noaes
- MOVQ 8(AX), CX // length of string
- MOVQ (AX), AX // string data
- JMP aeshashbody<>(SB)
-noaes:
- JMP runtime·strhashFallback<ABIInternal>(SB)
-
-// AX: data
-// BX: hash seed
-// CX: length
-// At return: AX = return value
-TEXT aeshashbody<>(SB),NOSPLIT,$0-0
- // Fill an SSE register with our seeds.
- MOVQ BX, X0 // 64 bits of per-table hash seed
- PINSRW $4, CX, X0 // 16 bits of length
- PSHUFHW $0, X0, X0 // repeat length 4 times total
- MOVO X0, X1 // save unscrambled seed
- PXOR runtime·aeskeysched(SB), X0 // xor in per-process seed
- AESENC X0, X0 // scramble seed
-
- CMPQ CX, $16
- JB aes0to15
- JE aes16
- CMPQ CX, $32
- JBE aes17to32
- CMPQ CX, $64
- JBE aes33to64
- CMPQ CX, $128
- JBE aes65to128
- JMP aes129plus
-
-aes0to15:
- TESTQ CX, CX
- JE aes0
-
- ADDQ $16, AX
- TESTW $0xff0, AX
- JE endofpage
-
- // 16 bytes loaded at this address won't cross
- // a page boundary, so we can load it directly.
- MOVOU -16(AX), X1
- ADDQ CX, CX
- MOVQ $masks<>(SB), AX
- PAND (AX)(CX*8), X1
-final1:
- PXOR X0, X1 // xor data with seed
- AESENC X1, X1 // scramble combo 3 times
- AESENC X1, X1
- AESENC X1, X1
- MOVQ X1, AX // return X1
- RET
-
-endofpage:
- // address ends in 1111xxxx. Might be up against
- // a page boundary, so load ending at last byte.
- // Then shift bytes down using pshufb.
- MOVOU -32(AX)(CX*1), X1
- ADDQ CX, CX
- MOVQ $shifts<>(SB), AX
- PSHUFB (AX)(CX*8), X1
- JMP final1
-
-aes0:
- // Return scrambled input seed
- AESENC X0, X0
- MOVQ X0, AX // return X0
- RET
-
-aes16:
- MOVOU (AX), X1
- JMP final1
-
-aes17to32:
- // make second starting seed
- PXOR runtime·aeskeysched+16(SB), X1
- AESENC X1, X1
-
- // load data to be hashed
- MOVOU (AX), X2
- MOVOU -16(AX)(CX*1), X3
-
- // xor with seed
- PXOR X0, X2
- PXOR X1, X3
-
- // scramble 3 times
- AESENC X2, X2
- AESENC X3, X3
- AESENC X2, X2
- AESENC X3, X3
- AESENC X2, X2
- AESENC X3, X3
-
- // combine results
- PXOR X3, X2
- MOVQ X2, AX // return X2
- RET
-
-aes33to64:
- // make 3 more starting seeds
- MOVO X1, X2
- MOVO X1, X3
- PXOR runtime·aeskeysched+16(SB), X1
- PXOR runtime·aeskeysched+32(SB), X2
- PXOR runtime·aeskeysched+48(SB), X3
- AESENC X1, X1
- AESENC X2, X2
- AESENC X3, X3
-
- MOVOU (AX), X4
- MOVOU 16(AX), X5
- MOVOU -32(AX)(CX*1), X6
- MOVOU -16(AX)(CX*1), X7
-
- PXOR X0, X4
- PXOR X1, X5
- PXOR X2, X6
- PXOR X3, X7
-
- AESENC X4, X4
- AESENC X5, X5
- AESENC X6, X6
- AESENC X7, X7
-
- AESENC X4, X4
- AESENC X5, X5
- AESENC X6, X6
- AESENC X7, X7
-
- AESENC X4, X4
- AESENC X5, X5
- AESENC X6, X6
- AESENC X7, X7
-
- PXOR X6, X4
- PXOR X7, X5
- PXOR X5, X4
- MOVQ X4, AX // return X4
- RET
-
-aes65to128:
- // make 7 more starting seeds
- MOVO X1, X2
- MOVO X1, X3
- MOVO X1, X4
- MOVO X1, X5
- MOVO X1, X6
- MOVO X1, X7
- PXOR runtime·aeskeysched+16(SB), X1
- PXOR runtime·aeskeysched+32(SB), X2
- PXOR runtime·aeskeysched+48(SB), X3
- PXOR runtime·aeskeysched+64(SB), X4
- PXOR runtime·aeskeysched+80(SB), X5
- PXOR runtime·aeskeysched+96(SB), X6
- PXOR runtime·aeskeysched+112(SB), X7
- AESENC X1, X1
- AESENC X2, X2
- AESENC X3, X3
- AESENC X4, X4
- AESENC X5, X5
- AESENC X6, X6
- AESENC X7, X7
-
- // load data
- MOVOU (AX), X8
- MOVOU 16(AX), X9
- MOVOU 32(AX), X10
- MOVOU 48(AX), X11
- MOVOU -64(AX)(CX*1), X12
- MOVOU -48(AX)(CX*1), X13
- MOVOU -32(AX)(CX*1), X14
- MOVOU -16(AX)(CX*1), X15
-
- // xor with seed
- PXOR X0, X8
- PXOR X1, X9
- PXOR X2, X10
- PXOR X3, X11
- PXOR X4, X12
- PXOR X5, X13
- PXOR X6, X14
- PXOR X7, X15
-
- // scramble 3 times
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
-
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
-
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
-
- // combine results
- PXOR X12, X8
- PXOR X13, X9
- PXOR X14, X10
- PXOR X15, X11
- PXOR X10, X8
- PXOR X11, X9
- PXOR X9, X8
- // X15 must be zero on return
- PXOR X15, X15
- MOVQ X8, AX // return X8
- RET
-
-aes129plus:
- // make 7 more starting seeds
- MOVO X1, X2
- MOVO X1, X3
- MOVO X1, X4
- MOVO X1, X5
- MOVO X1, X6
- MOVO X1, X7
- PXOR runtime·aeskeysched+16(SB), X1
- PXOR runtime·aeskeysched+32(SB), X2
- PXOR runtime·aeskeysched+48(SB), X3
- PXOR runtime·aeskeysched+64(SB), X4
- PXOR runtime·aeskeysched+80(SB), X5
- PXOR runtime·aeskeysched+96(SB), X6
- PXOR runtime·aeskeysched+112(SB), X7
- AESENC X1, X1
- AESENC X2, X2
- AESENC X3, X3
- AESENC X4, X4
- AESENC X5, X5
- AESENC X6, X6
- AESENC X7, X7
-
- // start with last (possibly overlapping) block
- MOVOU -128(AX)(CX*1), X8
- MOVOU -112(AX)(CX*1), X9
- MOVOU -96(AX)(CX*1), X10
- MOVOU -80(AX)(CX*1), X11
- MOVOU -64(AX)(CX*1), X12
- MOVOU -48(AX)(CX*1), X13
- MOVOU -32(AX)(CX*1), X14
- MOVOU -16(AX)(CX*1), X15
-
- // xor in seed
- PXOR X0, X8
- PXOR X1, X9
- PXOR X2, X10
- PXOR X3, X11
- PXOR X4, X12
- PXOR X5, X13
- PXOR X6, X14
- PXOR X7, X15
-
- // compute number of remaining 128-byte blocks
- DECQ CX
- SHRQ $7, CX
-
-aesloop:
- // scramble state
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
-
- // scramble state, xor in a block
- MOVOU (AX), X0
- MOVOU 16(AX), X1
- MOVOU 32(AX), X2
- MOVOU 48(AX), X3
- AESENC X0, X8
- AESENC X1, X9
- AESENC X2, X10
- AESENC X3, X11
- MOVOU 64(AX), X4
- MOVOU 80(AX), X5
- MOVOU 96(AX), X6
- MOVOU 112(AX), X7
- AESENC X4, X12
- AESENC X5, X13
- AESENC X6, X14
- AESENC X7, X15
-
- ADDQ $128, AX
- DECQ CX
- JNE aesloop
-
- // 3 more scrambles to finish
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
- AESENC X8, X8
- AESENC X9, X9
- AESENC X10, X10
- AESENC X11, X11
- AESENC X12, X12
- AESENC X13, X13
- AESENC X14, X14
- AESENC X15, X15
-
- PXOR X12, X8
- PXOR X13, X9
- PXOR X14, X10
- PXOR X15, X11
- PXOR X10, X8
- PXOR X11, X9
- PXOR X9, X8
- // X15 must be zero on return
- PXOR X15, X15
- MOVQ X8, AX // return X8
- RET
-
-// func memhash32(p unsafe.Pointer, h uintptr) uintptr
-// ABIInternal for performance.
-TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT,$0-24
- // AX = ptr to data
- // BX = seed
- CMPB runtime·useAeshash(SB), $0
- JEQ noaes
- MOVQ BX, X0 // X0 = seed
- PINSRD $2, (AX), X0 // data
- AESENC runtime·aeskeysched+0(SB), X0
- AESENC runtime·aeskeysched+16(SB), X0
- AESENC runtime·aeskeysched+32(SB), X0
- MOVQ X0, AX // return X0
- RET
-noaes:
- JMP runtime·memhash32Fallback<ABIInternal>(SB)
-
-// func memhash64(p unsafe.Pointer, h uintptr) uintptr
-// ABIInternal for performance.
-TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT,$0-24
- // AX = ptr to data
- // BX = seed
- CMPB runtime·useAeshash(SB), $0
- JEQ noaes
- MOVQ BX, X0 // X0 = seed
- PINSRQ $1, (AX), X0 // data
- AESENC runtime·aeskeysched+0(SB), X0
- AESENC runtime·aeskeysched+16(SB), X0
- AESENC runtime·aeskeysched+32(SB), X0
- MOVQ X0, AX // return X0
- RET
-noaes:
- JMP runtime·memhash64Fallback<ABIInternal>(SB)
-
-// simple mask to get rid of data in the high part of the register.
-DATA masks<>+0x00(SB)/8, $0x0000000000000000
-DATA masks<>+0x08(SB)/8, $0x0000000000000000
-DATA masks<>+0x10(SB)/8, $0x00000000000000ff
-DATA masks<>+0x18(SB)/8, $0x0000000000000000
-DATA masks<>+0x20(SB)/8, $0x000000000000ffff
-DATA masks<>+0x28(SB)/8, $0x0000000000000000
-DATA masks<>+0x30(SB)/8, $0x0000000000ffffff
-DATA masks<>+0x38(SB)/8, $0x0000000000000000
-DATA masks<>+0x40(SB)/8, $0x00000000ffffffff
-DATA masks<>+0x48(SB)/8, $0x0000000000000000
-DATA masks<>+0x50(SB)/8, $0x000000ffffffffff
-DATA masks<>+0x58(SB)/8, $0x0000000000000000
-DATA masks<>+0x60(SB)/8, $0x0000ffffffffffff
-DATA masks<>+0x68(SB)/8, $0x0000000000000000
-DATA masks<>+0x70(SB)/8, $0x00ffffffffffffff
-DATA masks<>+0x78(SB)/8, $0x0000000000000000
-DATA masks<>+0x80(SB)/8, $0xffffffffffffffff
-DATA masks<>+0x88(SB)/8, $0x0000000000000000
-DATA masks<>+0x90(SB)/8, $0xffffffffffffffff
-DATA masks<>+0x98(SB)/8, $0x00000000000000ff
-DATA masks<>+0xa0(SB)/8, $0xffffffffffffffff
-DATA masks<>+0xa8(SB)/8, $0x000000000000ffff
-DATA masks<>+0xb0(SB)/8, $0xffffffffffffffff
-DATA masks<>+0xb8(SB)/8, $0x0000000000ffffff
-DATA masks<>+0xc0(SB)/8, $0xffffffffffffffff
-DATA masks<>+0xc8(SB)/8, $0x00000000ffffffff
-DATA masks<>+0xd0(SB)/8, $0xffffffffffffffff
-DATA masks<>+0xd8(SB)/8, $0x000000ffffffffff
-DATA masks<>+0xe0(SB)/8, $0xffffffffffffffff
-DATA masks<>+0xe8(SB)/8, $0x0000ffffffffffff
-DATA masks<>+0xf0(SB)/8, $0xffffffffffffffff
-DATA masks<>+0xf8(SB)/8, $0x00ffffffffffffff
-GLOBL masks<>(SB),RODATA,$256
-
-// func checkASM() bool
-TEXT ·checkASM(SB),NOSPLIT,$0-1
- // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte
- MOVQ $masks<>(SB), AX
- MOVQ $shifts<>(SB), BX
- ORQ BX, AX
- TESTQ $15, AX
- SETEQ ret+0(FP)
- RET
-
-// these are arguments to pshufb. They move data down from
-// the high bytes of the register to the low bytes of the register.
-// index is how many bytes to move.
-DATA shifts<>+0x00(SB)/8, $0x0000000000000000
-DATA shifts<>+0x08(SB)/8, $0x0000000000000000
-DATA shifts<>+0x10(SB)/8, $0xffffffffffffff0f
-DATA shifts<>+0x18(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x20(SB)/8, $0xffffffffffff0f0e
-DATA shifts<>+0x28(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x30(SB)/8, $0xffffffffff0f0e0d
-DATA shifts<>+0x38(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x40(SB)/8, $0xffffffff0f0e0d0c
-DATA shifts<>+0x48(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x50(SB)/8, $0xffffff0f0e0d0c0b
-DATA shifts<>+0x58(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x60(SB)/8, $0xffff0f0e0d0c0b0a
-DATA shifts<>+0x68(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x70(SB)/8, $0xff0f0e0d0c0b0a09
-DATA shifts<>+0x78(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x80(SB)/8, $0x0f0e0d0c0b0a0908
-DATA shifts<>+0x88(SB)/8, $0xffffffffffffffff
-DATA shifts<>+0x90(SB)/8, $0x0e0d0c0b0a090807
-DATA shifts<>+0x98(SB)/8, $0xffffffffffffff0f
-DATA shifts<>+0xa0(SB)/8, $0x0d0c0b0a09080706
-DATA shifts<>+0xa8(SB)/8, $0xffffffffffff0f0e
-DATA shifts<>+0xb0(SB)/8, $0x0c0b0a0908070605
-DATA shifts<>+0xb8(SB)/8, $0xffffffffff0f0e0d
-DATA shifts<>+0xc0(SB)/8, $0x0b0a090807060504
-DATA shifts<>+0xc8(SB)/8, $0xffffffff0f0e0d0c
-DATA shifts<>+0xd0(SB)/8, $0x0a09080706050403
-DATA shifts<>+0xd8(SB)/8, $0xffffff0f0e0d0c0b
-DATA shifts<>+0xe0(SB)/8, $0x0908070605040302
-DATA shifts<>+0xe8(SB)/8, $0xffff0f0e0d0c0b0a
-DATA shifts<>+0xf0(SB)/8, $0x0807060504030201
-DATA shifts<>+0xf8(SB)/8, $0xff0f0e0d0c0b0a09
-GLOBL shifts<>(SB),RODATA,$256
-
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVL $0, AX
- RET
-
-
-// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
-// Must obey the gcc calling convention.
-TEXT _cgo_topofstack(SB),NOSPLIT,$0
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), AX
- MOVQ m_curg(AX), AX
- MOVQ (g_stack+stack_hi)(AX), AX
- RET
-
-// The top-most function running on a goroutine
-// returns to goexit+PCQuantum.
-TEXT runtime·goexit(SB),NOSPLIT|TOPFRAME,$0-0
- BYTE $0x90 // NOP
- CALL runtime·goexit1(SB) // does not return
- // traceback from goexit1 must hit code range of goexit
- BYTE $0x90 // NOP
-
-// This is called from .init_array and follows the platform, not Go, ABI.
-TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
- PUSHQ R15 // The access to global variables below implicitly uses R15, which is callee-save
- MOVQ runtime·lastmoduledatap(SB), AX
- MOVQ DI, moduledata_next(AX)
- MOVQ DI, runtime·lastmoduledatap(SB)
- POPQ R15
- RET
-
-// Initialize special registers then jump to sigpanic.
-// This function is injected from the signal handler for panicking
-// signals. It is quite painful to set X15 in the signal context,
-// so we do it here.
-TEXT ·sigpanic0(SB),NOSPLIT,$0-0
- get_tls(R14)
- MOVQ g(R14), R14
-#ifndef GOOS_plan9
- XORPS X15, X15
-#endif
- JMP ·sigpanic<ABIInternal>(SB)
-
-// gcWriteBarrier performs a heap pointer write and informs the GC.
-//
-// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
-// - DI is the destination of the write
-// - AX is the value being written at DI
-// It clobbers FLAGS. It does not clobber any general-purpose registers,
-// but may clobber others (e.g., SSE registers).
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$112
- // Save the registers clobbered by the fast path. This is slightly
- // faster than having the caller spill these.
- MOVQ R12, 96(SP)
- MOVQ R13, 104(SP)
- // TODO: Consider passing g.m.p in as an argument so they can be shared
- // across a sequence of write barriers.
- MOVQ g_m(R14), R13
- MOVQ m_p(R13), R13
- MOVQ (p_wbBuf+wbBuf_next)(R13), R12
- // Increment wbBuf.next position.
- LEAQ 16(R12), R12
- MOVQ R12, (p_wbBuf+wbBuf_next)(R13)
- CMPQ R12, (p_wbBuf+wbBuf_end)(R13)
- // Record the write.
- MOVQ AX, -16(R12) // Record value
- // Note: This turns bad pointer writes into bad
- // pointer reads, which could be confusing. We could avoid
- // reading from obviously bad pointers, which would
- // take care of the vast majority of these. We could
- // patch this up in the signal handler, or use XCHG to
- // combine the read and the write.
- MOVQ (DI), R13
- MOVQ R13, -8(R12) // Record *slot
- // Is the buffer full? (flags set in CMPQ above)
- JEQ flush
-ret:
- MOVQ 96(SP), R12
- MOVQ 104(SP), R13
- // Do the write.
- MOVQ AX, (DI)
- RET
-
-flush:
- // Save all general purpose registers since these could be
- // clobbered by wbBufFlush and were not saved by the caller.
- // It is possible for wbBufFlush to clobber other registers
- // (e.g., SSE registers), but the compiler takes care of saving
- // those in the caller if necessary. This strikes a balance
- // with registers that are likely to be used.
- //
- // We don't have type information for these, but all code under
- // here is NOSPLIT, so nothing will observe these.
- //
- // TODO: We could strike a different balance; e.g., saving X0
- // and not saving GP registers that are less likely to be used.
- MOVQ DI, 0(SP) // Also first argument to wbBufFlush
- MOVQ AX, 8(SP) // Also second argument to wbBufFlush
- MOVQ BX, 16(SP)
- MOVQ CX, 24(SP)
- MOVQ DX, 32(SP)
- // DI already saved
- MOVQ SI, 40(SP)
- MOVQ BP, 48(SP)
- MOVQ R8, 56(SP)
- MOVQ R9, 64(SP)
- MOVQ R10, 72(SP)
- MOVQ R11, 80(SP)
- // R12 already saved
- // R13 already saved
- // R14 is g
- MOVQ R15, 88(SP)
-
- // This takes arguments DI and AX
- CALL runtime·wbBufFlush(SB)
-
- MOVQ 0(SP), DI
- MOVQ 8(SP), AX
- MOVQ 16(SP), BX
- MOVQ 24(SP), CX
- MOVQ 32(SP), DX
- MOVQ 40(SP), SI
- MOVQ 48(SP), BP
- MOVQ 56(SP), R8
- MOVQ 64(SP), R9
- MOVQ 72(SP), R10
- MOVQ 80(SP), R11
- MOVQ 88(SP), R15
- JMP ret
-
-// gcWriteBarrierCX is gcWriteBarrier, but with args in DI and CX.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierCX<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ CX, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ CX, AX
- RET
-
-// gcWriteBarrierDX is gcWriteBarrier, but with args in DI and DX.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierDX<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ DX, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ DX, AX
- RET
-
-// gcWriteBarrierBX is gcWriteBarrier, but with args in DI and BX.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierBX<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ BX, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ BX, AX
- RET
-
-// gcWriteBarrierBP is gcWriteBarrier, but with args in DI and BP.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierBP<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ BP, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ BP, AX
- RET
-
-// gcWriteBarrierSI is gcWriteBarrier, but with args in DI and SI.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierSI<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ SI, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ SI, AX
- RET
-
-// gcWriteBarrierR8 is gcWriteBarrier, but with args in DI and R8.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierR8<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ R8, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ R8, AX
- RET
-
-// gcWriteBarrierR9 is gcWriteBarrier, but with args in DI and R9.
-// Defined as ABIInternal since it does not use the stable Go ABI.
-TEXT runtime·gcWriteBarrierR9<ABIInternal>(SB),NOSPLIT,$0
- XCHGQ R9, AX
- CALL runtime·gcWriteBarrier<ABIInternal>(SB)
- XCHGQ R9, AX
- RET
-
-DATA debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large"
-GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below
-
-// debugCallV2 is the entry point for debugger-injected function
-// calls on running goroutines. It informs the runtime that a
-// debug call has been injected and creates a call frame for the
-// debugger to fill in.
-//
-// To inject a function call, a debugger should:
-// 1. Check that the goroutine is in state _Grunning and that
-// there are at least 256 bytes free on the stack.
-// 2. Push the current PC on the stack (updating SP).
-// 3. Write the desired argument frame size at SP-16 (using the SP
-// after step 2).
-// 4. Save all machine registers (including flags and XMM reigsters)
-// so they can be restored later by the debugger.
-// 5. Set the PC to debugCallV2 and resume execution.
-//
-// If the goroutine is in state _Grunnable, then it's not generally
-// safe to inject a call because it may return out via other runtime
-// operations. Instead, the debugger should unwind the stack to find
-// the return to non-runtime code, add a temporary breakpoint there,
-// and inject the call once that breakpoint is hit.
-//
-// If the goroutine is in any other state, it's not safe to inject a call.
-//
-// This function communicates back to the debugger by setting R12 and
-// invoking INT3 to raise a breakpoint signal. See the comments in the
-// implementation for the protocol the debugger is expected to
-// follow. InjectDebugCall in the runtime tests demonstrates this protocol.
-//
-// The debugger must ensure that any pointers passed to the function
-// obey escape analysis requirements. Specifically, it must not pass
-// a stack pointer to an escaping argument. debugCallV2 cannot check
-// this invariant.
-//
-// This is ABIInternal because Go code injects its PC directly into new
-// goroutine stacks.
-TEXT runtime·debugCallV2<ABIInternal>(SB),NOSPLIT,$152-0
- // Save all registers that may contain pointers so they can be
- // conservatively scanned.
- //
- // We can't do anything that might clobber any of these
- // registers before this.
- MOVQ R15, r15-(14*8+8)(SP)
- MOVQ R14, r14-(13*8+8)(SP)
- MOVQ R13, r13-(12*8+8)(SP)
- MOVQ R12, r12-(11*8+8)(SP)
- MOVQ R11, r11-(10*8+8)(SP)
- MOVQ R10, r10-(9*8+8)(SP)
- MOVQ R9, r9-(8*8+8)(SP)
- MOVQ R8, r8-(7*8+8)(SP)
- MOVQ DI, di-(6*8+8)(SP)
- MOVQ SI, si-(5*8+8)(SP)
- MOVQ BP, bp-(4*8+8)(SP)
- MOVQ BX, bx-(3*8+8)(SP)
- MOVQ DX, dx-(2*8+8)(SP)
- // Save the frame size before we clobber it. Either of the last
- // saves could clobber this depending on whether there's a saved BP.
- MOVQ frameSize-24(FP), DX // aka -16(RSP) before prologue
- MOVQ CX, cx-(1*8+8)(SP)
- MOVQ AX, ax-(0*8+8)(SP)
-
- // Save the argument frame size.
- MOVQ DX, frameSize-128(SP)
-
- // Perform a safe-point check.
- MOVQ retpc-8(FP), AX // Caller's PC
- MOVQ AX, 0(SP)
- CALL runtime·debugCallCheck(SB)
- MOVQ 8(SP), AX
- TESTQ AX, AX
- JZ good
- // The safety check failed. Put the reason string at the top
- // of the stack.
- MOVQ AX, 0(SP)
- MOVQ 16(SP), AX
- MOVQ AX, 8(SP)
- // Set R12 to 8 and invoke INT3. The debugger should get the
- // reason a call can't be injected from the top of the stack
- // and resume execution.
- MOVQ $8, R12
- BYTE $0xcc
- JMP restore
-
-good:
- // Registers are saved and it's safe to make a call.
- // Open up a call frame, moving the stack if necessary.
- //
- // Once the frame is allocated, this will set R12 to 0 and
- // invoke INT3. The debugger should write the argument
- // frame for the call at SP, set up argument registers, push
- // the trapping PC on the stack, set the PC to the function to
- // call, set RDX to point to the closure (if a closure call),
- // and resume execution.
- //
- // If the function returns, this will set R12 to 1 and invoke
- // INT3. The debugger can then inspect any return value saved
- // on the stack at SP and in registers and resume execution again.
- //
- // If the function panics, this will set R12 to 2 and invoke INT3.
- // The interface{} value of the panic will be at SP. The debugger
- // can inspect the panic value and resume execution again.
-#define DEBUG_CALL_DISPATCH(NAME,MAXSIZE) \
- CMPQ AX, $MAXSIZE; \
- JA 5(PC); \
- MOVQ $NAME(SB), AX; \
- MOVQ AX, 0(SP); \
- CALL runtime·debugCallWrap(SB); \
- JMP restore
-
- MOVQ frameSize-128(SP), AX
- DEBUG_CALL_DISPATCH(debugCall32<>, 32)
- DEBUG_CALL_DISPATCH(debugCall64<>, 64)
- DEBUG_CALL_DISPATCH(debugCall128<>, 128)
- DEBUG_CALL_DISPATCH(debugCall256<>, 256)
- DEBUG_CALL_DISPATCH(debugCall512<>, 512)
- DEBUG_CALL_DISPATCH(debugCall1024<>, 1024)
- DEBUG_CALL_DISPATCH(debugCall2048<>, 2048)
- DEBUG_CALL_DISPATCH(debugCall4096<>, 4096)
- DEBUG_CALL_DISPATCH(debugCall8192<>, 8192)
- DEBUG_CALL_DISPATCH(debugCall16384<>, 16384)
- DEBUG_CALL_DISPATCH(debugCall32768<>, 32768)
- DEBUG_CALL_DISPATCH(debugCall65536<>, 65536)
- // The frame size is too large. Report the error.
- MOVQ $debugCallFrameTooLarge<>(SB), AX
- MOVQ AX, 0(SP)
- MOVQ $20, 8(SP) // length of debugCallFrameTooLarge string
- MOVQ $8, R12
- BYTE $0xcc
- JMP restore
-
-restore:
- // Calls and failures resume here.
- //
- // Set R12 to 16 and invoke INT3. The debugger should restore
- // all registers except RIP and RSP and resume execution.
- MOVQ $16, R12
- BYTE $0xcc
- // We must not modify flags after this point.
-
- // Restore pointer-containing registers, which may have been
- // modified from the debugger's copy by stack copying.
- MOVQ ax-(0*8+8)(SP), AX
- MOVQ cx-(1*8+8)(SP), CX
- MOVQ dx-(2*8+8)(SP), DX
- MOVQ bx-(3*8+8)(SP), BX
- MOVQ bp-(4*8+8)(SP), BP
- MOVQ si-(5*8+8)(SP), SI
- MOVQ di-(6*8+8)(SP), DI
- MOVQ r8-(7*8+8)(SP), R8
- MOVQ r9-(8*8+8)(SP), R9
- MOVQ r10-(9*8+8)(SP), R10
- MOVQ r11-(10*8+8)(SP), R11
- MOVQ r12-(11*8+8)(SP), R12
- MOVQ r13-(12*8+8)(SP), R13
- MOVQ r14-(13*8+8)(SP), R14
- MOVQ r15-(14*8+8)(SP), R15
-
- RET
-
-// runtime.debugCallCheck assumes that functions defined with the
-// DEBUG_CALL_FN macro are safe points to inject calls.
-#define DEBUG_CALL_FN(NAME,MAXSIZE) \
-TEXT NAME(SB),WRAPPER,$MAXSIZE-0; \
- NO_LOCAL_POINTERS; \
- MOVQ $0, R12; \
- BYTE $0xcc; \
- MOVQ $1, R12; \
- BYTE $0xcc; \
- RET
-DEBUG_CALL_FN(debugCall32<>, 32)
-DEBUG_CALL_FN(debugCall64<>, 64)
-DEBUG_CALL_FN(debugCall128<>, 128)
-DEBUG_CALL_FN(debugCall256<>, 256)
-DEBUG_CALL_FN(debugCall512<>, 512)
-DEBUG_CALL_FN(debugCall1024<>, 1024)
-DEBUG_CALL_FN(debugCall2048<>, 2048)
-DEBUG_CALL_FN(debugCall4096<>, 4096)
-DEBUG_CALL_FN(debugCall8192<>, 8192)
-DEBUG_CALL_FN(debugCall16384<>, 16384)
-DEBUG_CALL_FN(debugCall32768<>, 32768)
-DEBUG_CALL_FN(debugCall65536<>, 65536)
-
-// func debugCallPanicked(val interface{})
-TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
- // Copy the panic value to the top of stack.
- MOVQ val_type+0(FP), AX
- MOVQ AX, 0(SP)
- MOVQ val_data+8(FP), AX
- MOVQ AX, 8(SP)
- MOVQ $2, R12
- BYTE $0xcc
- RET
-
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-// Defined as ABIInternal since they do not use the stack-based Go ABI.
-TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicIndex<ABIInternal>(SB)
-TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicIndexU<ABIInternal>(SB)
-TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
-TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
-TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
-TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
-TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSliceB<ABIInternal>(SB)
-TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSliceBU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
-TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
-TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
-TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSlice3B<ABIInternal>(SB)
-TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
-TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSlice3C<ABIInternal>(SB)
-TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
-TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
-
-#ifdef GOOS_android
-// Use the free TLS_SLOT_APP slot #2 on Android Q.
-// Earlier androids are set up in gcc_android.c.
-DATA runtime·tls_g+0(SB)/8, $16
-GLOBL runtime·tls_g+0(SB), NOPTR, $8
-#endif
-
-// The compiler and assembler's -spectre=ret mode rewrites
-// all indirect CALL AX / JMP AX instructions to be
-// CALL retpolineAX / JMP retpolineAX.
-// See https://support.google.com/faqs/answer/7625886.
-#define RETPOLINE(reg) \
- /* CALL setup */ BYTE $0xE8; BYTE $(2+2); BYTE $0; BYTE $0; BYTE $0; \
- /* nospec: */ \
- /* PAUSE */ BYTE $0xF3; BYTE $0x90; \
- /* JMP nospec */ BYTE $0xEB; BYTE $-(2+2); \
- /* setup: */ \
- /* MOVQ AX, 0(SP) */ BYTE $0x48|((reg&8)>>1); BYTE $0x89; \
- BYTE $0x04|((reg&7)<<3); BYTE $0x24; \
- /* RET */ BYTE $0xC3
-
-TEXT runtime·retpolineAX(SB),NOSPLIT,$0; RETPOLINE(0)
-TEXT runtime·retpolineCX(SB),NOSPLIT,$0; RETPOLINE(1)
-TEXT runtime·retpolineDX(SB),NOSPLIT,$0; RETPOLINE(2)
-TEXT runtime·retpolineBX(SB),NOSPLIT,$0; RETPOLINE(3)
-/* SP is 4, can't happen / magic encodings */
-TEXT runtime·retpolineBP(SB),NOSPLIT,$0; RETPOLINE(5)
-TEXT runtime·retpolineSI(SB),NOSPLIT,$0; RETPOLINE(6)
-TEXT runtime·retpolineDI(SB),NOSPLIT,$0; RETPOLINE(7)
-TEXT runtime·retpolineR8(SB),NOSPLIT,$0; RETPOLINE(8)
-TEXT runtime·retpolineR9(SB),NOSPLIT,$0; RETPOLINE(9)
-TEXT runtime·retpolineR10(SB),NOSPLIT,$0; RETPOLINE(10)
-TEXT runtime·retpolineR11(SB),NOSPLIT,$0; RETPOLINE(11)
-TEXT runtime·retpolineR12(SB),NOSPLIT,$0; RETPOLINE(12)
-TEXT runtime·retpolineR13(SB),NOSPLIT,$0; RETPOLINE(13)
-TEXT runtime·retpolineR14(SB),NOSPLIT,$0; RETPOLINE(14)
-TEXT runtime·retpolineR15(SB),NOSPLIT,$0; RETPOLINE(15)
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/callbacks.go b/contrib/go/_std_1.18/src/runtime/cgo/callbacks.go
deleted file mode 100644
index cd8b795387..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgo/callbacks.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cgo
-
-import "unsafe"
-
-// These utility functions are available to be called from code
-// compiled with gcc via crosscall2.
-
-// The declaration of crosscall2 is:
-// void crosscall2(void (*fn)(void *), void *, int);
-//
-// We need to export the symbol crosscall2 in order to support
-// callbacks from shared libraries. This applies regardless of
-// linking mode.
-//
-// Compatibility note: SWIG uses crosscall2 in exactly one situation:
-// to call _cgo_panic using the pattern shown below. We need to keep
-// that pattern working. In particular, crosscall2 actually takes four
-// arguments, but it works to call it with three arguments when
-// calling _cgo_panic.
-//go:cgo_export_static crosscall2
-//go:cgo_export_dynamic crosscall2
-
-// Panic. The argument is converted into a Go string.
-
-// Call like this in code compiled with gcc:
-// struct { const char *p; } a;
-// a.p = /* string to pass to panic */;
-// crosscall2(_cgo_panic, &a, sizeof a);
-// /* The function call will not return. */
-
-// TODO: We should export a regular C function to panic, change SWIG
-// to use that instead of the above pattern, and then we can drop
-// backwards-compatibility from crosscall2 and stop exporting it.
-
-//go:linkname _runtime_cgo_panic_internal runtime._cgo_panic_internal
-func _runtime_cgo_panic_internal(p *byte)
-
-//go:linkname _cgo_panic _cgo_panic
-//go:cgo_export_static _cgo_panic
-//go:cgo_export_dynamic _cgo_panic
-func _cgo_panic(a *struct{ cstr *byte }) {
- _runtime_cgo_panic_internal(a.cstr)
-}
-
-//go:cgo_import_static x_cgo_init
-//go:linkname x_cgo_init x_cgo_init
-//go:linkname _cgo_init _cgo_init
-var x_cgo_init byte
-var _cgo_init = &x_cgo_init
-
-//go:cgo_import_static x_cgo_thread_start
-//go:linkname x_cgo_thread_start x_cgo_thread_start
-//go:linkname _cgo_thread_start _cgo_thread_start
-var x_cgo_thread_start byte
-var _cgo_thread_start = &x_cgo_thread_start
-
-// Creates a new system thread without updating any Go state.
-//
-// This method is invoked during shared library loading to create a new OS
-// thread to perform the runtime initialization. This method is similar to
-// _cgo_sys_thread_start except that it doesn't update any Go state.
-
-//go:cgo_import_static x_cgo_sys_thread_create
-//go:linkname x_cgo_sys_thread_create x_cgo_sys_thread_create
-//go:linkname _cgo_sys_thread_create _cgo_sys_thread_create
-var x_cgo_sys_thread_create byte
-var _cgo_sys_thread_create = &x_cgo_sys_thread_create
-
-// Notifies that the runtime has been initialized.
-//
-// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done)
-// to ensure that the runtime has been initialized before the CGO call is
-// executed. This is necessary for shared libraries where we kickoff runtime
-// initialization in a separate thread and return without waiting for this
-// thread to complete the init.
-
-//go:cgo_import_static x_cgo_notify_runtime_init_done
-//go:linkname x_cgo_notify_runtime_init_done x_cgo_notify_runtime_init_done
-//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done
-var x_cgo_notify_runtime_init_done byte
-var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done
-
-// Sets the traceback context function. See runtime.SetCgoTraceback.
-
-//go:cgo_import_static x_cgo_set_context_function
-//go:linkname x_cgo_set_context_function x_cgo_set_context_function
-//go:linkname _cgo_set_context_function _cgo_set_context_function
-var x_cgo_set_context_function byte
-var _cgo_set_context_function = &x_cgo_set_context_function
-
-// Calls a libc function to execute background work injected via libc
-// interceptors, such as processing pending signals under the thread
-// sanitizer.
-//
-// Left as a nil pointer if no libc interceptors are expected.
-
-//go:cgo_import_static _cgo_yield
-//go:linkname _cgo_yield _cgo_yield
-var _cgo_yield unsafe.Pointer
-
-//go:cgo_export_static _cgo_topofstack
-//go:cgo_export_dynamic _cgo_topofstack
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/cgo.go b/contrib/go/_std_1.18/src/runtime/cgo/cgo.go
deleted file mode 100644
index 4d2caf6c4f..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgo/cgo.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package cgo contains runtime support for code generated
-by the cgo tool. See the documentation for the cgo command
-for details on using cgo.
-*/
-package cgo
-
-/*
-
-#cgo darwin,!arm64 LDFLAGS: -lpthread
-#cgo darwin,arm64 LDFLAGS: -framework CoreFoundation
-#cgo dragonfly LDFLAGS: -lpthread
-#cgo freebsd LDFLAGS: -lpthread
-#cgo android LDFLAGS: -llog
-#cgo !android,linux LDFLAGS: -lpthread
-#cgo netbsd LDFLAGS: -lpthread
-#cgo openbsd LDFLAGS: -lpthread
-#cgo aix LDFLAGS: -Wl,-berok
-#cgo solaris LDFLAGS: -lxnet
-#cgo illumos LDFLAGS: -lsocket
-
-// Issue 35247.
-#cgo darwin CFLAGS: -Wno-nullability-completeness
-
-#cgo CFLAGS: -Wall -Werror
-
-#cgo solaris CPPFLAGS: -D_POSIX_PTHREAD_SEMANTICS
-
-*/
-import "C"
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_linux_amd64.c b/contrib/go/_std_1.18/src/runtime/cgo/gcc_linux_amd64.c
deleted file mode 100644
index c25e7e769b..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_linux_amd64.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include <pthread.h>
-#include <errno.h>
-#include <string.h> // strerror
-#include <signal.h>
-#include <stdlib.h>
-#include "libcgo.h"
-#include "libcgo_unix.h"
-
-static void* threadentry(void*);
-static void (*setg_gcc)(void*);
-
-// This will be set in gcc_android.c for android-specific customization.
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
-
-void
-x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
-{
- pthread_attr_t *attr;
- size_t size;
-
- /* The memory sanitizer distributed with versions of clang
- before 3.8 has a bug: if you call mmap before malloc, mmap
- may return an address that is later overwritten by the msan
- library. Avoid this problem by forcing a call to malloc
- here, before we ever call malloc.
-
- This is only required for the memory sanitizer, so it's
- unfortunate that we always run it. It should be possible
- to remove this when we no longer care about versions of
- clang before 3.8. The test for this is
- misc/cgo/testsanitizers.
-
- GCC works hard to eliminate a seemingly unnecessary call to
- malloc, so we actually use the memory we allocate. */
-
- setg_gcc = setg;
- attr = (pthread_attr_t*)malloc(sizeof *attr);
- if (attr == NULL) {
- fatalf("malloc failed: %s", strerror(errno));
- }
- pthread_attr_init(attr);
- pthread_attr_getstacksize(attr, &size);
- g->stacklo = (uintptr)&size - size + 4096;
- pthread_attr_destroy(attr);
- free(attr);
-
- if (x_cgo_inittls) {
- x_cgo_inittls(tlsg, tlsbase);
- }
-}
-
-
-void
-_cgo_sys_thread_start(ThreadStart *ts)
-{
- pthread_attr_t attr;
- sigset_t ign, oset;
- pthread_t p;
- size_t size;
- int err;
-
- sigfillset(&ign);
- pthread_sigmask(SIG_SETMASK, &ign, &oset);
-
- pthread_attr_init(&attr);
- pthread_attr_getstacksize(&attr, &size);
- // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
- ts->g->stackhi = size;
- err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
-
- pthread_sigmask(SIG_SETMASK, &oset, nil);
-
- if (err != 0) {
- fatalf("pthread_create failed: %s", strerror(err));
- }
-}
-
-static void*
-threadentry(void *v)
-{
- ThreadStart ts;
-
- ts = *(ThreadStart*)v;
- _cgo_tsan_acquire();
- free(v);
- _cgo_tsan_release();
-
- crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
- return nil;
-}
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/setenv.go b/contrib/go/_std_1.18/src/runtime/cgo/setenv.go
deleted file mode 100644
index 0f4780a945..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgo/setenv.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package cgo
-
-import _ "unsafe" // for go:linkname
-
-//go:cgo_import_static x_cgo_setenv
-//go:linkname x_cgo_setenv x_cgo_setenv
-//go:linkname _cgo_setenv runtime._cgo_setenv
-var x_cgo_setenv byte
-var _cgo_setenv = &x_cgo_setenv
-
-//go:cgo_import_static x_cgo_unsetenv
-//go:linkname x_cgo_unsetenv x_cgo_unsetenv
-//go:linkname _cgo_unsetenv runtime._cgo_unsetenv
-var x_cgo_unsetenv byte
-var _cgo_unsetenv = &x_cgo_unsetenv
diff --git a/contrib/go/_std_1.18/src/runtime/cgo_mmap.go b/contrib/go/_std_1.18/src/runtime/cgo_mmap.go
deleted file mode 100644
index 0cb25bdcda..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgo_mmap.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Support for memory sanitizer. See runtime/cgo/mmap.go.
-
-//go:build (linux && amd64) || (linux && arm64)
-
-package runtime
-
-import "unsafe"
-
-// _cgo_mmap is filled in by runtime/cgo when it is linked into the
-// program, so it is only non-nil when using cgo.
-//go:linkname _cgo_mmap _cgo_mmap
-var _cgo_mmap unsafe.Pointer
-
-// _cgo_munmap is filled in by runtime/cgo when it is linked into the
-// program, so it is only non-nil when using cgo.
-//go:linkname _cgo_munmap _cgo_munmap
-var _cgo_munmap unsafe.Pointer
-
-// mmap is used to route the mmap system call through C code when using cgo, to
-// support sanitizer interceptors. Don't allow stack splits, since this function
-// (used by sysAlloc) is called in a lot of low-level parts of the runtime and
-// callers often assume it won't acquire any locks.
-//go:nosplit
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
- if _cgo_mmap != nil {
- // Make ret a uintptr so that writing to it in the
- // function literal does not trigger a write barrier.
- // A write barrier here could break because of the way
- // that mmap uses the same value both as a pointer and
- // an errno value.
- var ret uintptr
- systemstack(func() {
- ret = callCgoMmap(addr, n, prot, flags, fd, off)
- })
- if ret < 4096 {
- return nil, int(ret)
- }
- return unsafe.Pointer(ret), 0
- }
- return sysMmap(addr, n, prot, flags, fd, off)
-}
-
-func munmap(addr unsafe.Pointer, n uintptr) {
- if _cgo_munmap != nil {
- systemstack(func() { callCgoMunmap(addr, n) })
- return
- }
- sysMunmap(addr, n)
-}
-
-// sysMmap calls the mmap system call. It is implemented in assembly.
-func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
-
-// callCgoMmap calls the mmap function in the runtime/cgo package
-// using the GCC calling convention. It is implemented in assembly.
-func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
-
-// sysMunmap calls the munmap system call. It is implemented in assembly.
-func sysMunmap(addr unsafe.Pointer, n uintptr)
-
-// callCgoMunmap calls the munmap function in the runtime/cgo package
-// using the GCC calling convention. It is implemented in assembly.
-func callCgoMunmap(addr unsafe.Pointer, n uintptr)
diff --git a/contrib/go/_std_1.18/src/runtime/cgo_sigaction.go b/contrib/go/_std_1.18/src/runtime/cgo_sigaction.go
deleted file mode 100644
index a2e12f0f0e..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgo_sigaction.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Support for sanitizers. See runtime/cgo/sigaction.go.
-
-//go:build (linux && amd64) || (freebsd && amd64) || (linux && arm64) || (linux && ppc64le)
-
-package runtime
-
-import "unsafe"
-
-// _cgo_sigaction is filled in by runtime/cgo when it is linked into the
-// program, so it is only non-nil when using cgo.
-//go:linkname _cgo_sigaction _cgo_sigaction
-var _cgo_sigaction unsafe.Pointer
-
-//go:nosplit
-//go:nowritebarrierrec
-func sigaction(sig uint32, new, old *sigactiont) {
- // racewalk.go avoids adding sanitizing instrumentation to package runtime,
- // but we might be calling into instrumented C functions here,
- // so we need the pointer parameters to be properly marked.
- //
- // Mark the input as having been written before the call
- // and the output as read after.
- if msanenabled && new != nil {
- msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
- }
- if asanenabled && new != nil {
- asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
- }
- if _cgo_sigaction == nil || inForkedChild {
- sysSigaction(sig, new, old)
- } else {
- // We need to call _cgo_sigaction, which means we need a big enough stack
- // for C. To complicate matters, we may be in libpreinit (before the
- // runtime has been initialized) or in an asynchronous signal handler (with
- // the current thread in transition between goroutines, or with the g0
- // system stack already in use).
-
- var ret int32
-
- var g *g
- if mainStarted {
- g = getg()
- }
- sp := uintptr(unsafe.Pointer(&sig))
- switch {
- case g == nil:
- // No g: we're on a C stack or a signal stack.
- ret = callCgoSigaction(uintptr(sig), new, old)
- case sp < g.stack.lo || sp >= g.stack.hi:
- // We're no longer on g's stack, so we must be handling a signal. It's
- // possible that we interrupted the thread during a transition between g
- // and g0, so we should stay on the current stack to avoid corrupting g0.
- ret = callCgoSigaction(uintptr(sig), new, old)
- default:
- // We're running on g's stack, so either we're not in a signal handler or
- // the signal handler has set the correct g. If we're on gsignal or g0,
- // systemstack will make the call directly; otherwise, it will switch to
- // g0 to ensure we have enough room to call a libc function.
- //
- // The function literal that we pass to systemstack is not nosplit, but
- // that's ok: we'll be running on a fresh, clean system stack so the stack
- // check will always succeed anyway.
- systemstack(func() {
- ret = callCgoSigaction(uintptr(sig), new, old)
- })
- }
-
- const EINVAL = 22
- if ret == EINVAL {
- // libc reserves certain signals — normally 32-33 — for pthreads, and
- // returns EINVAL for sigaction calls on those signals. If we get EINVAL,
- // fall back to making the syscall directly.
- sysSigaction(sig, new, old)
- }
- }
-
- if msanenabled && old != nil {
- msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
- }
- if asanenabled && old != nil {
- asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
- }
-}
-
-// callCgoSigaction calls the sigaction function in the runtime/cgo package
-// using the GCC calling convention. It is implemented in assembly.
-//go:noescape
-func callCgoSigaction(sig uintptr, new, old *sigactiont) int32
diff --git a/contrib/go/_std_1.18/src/runtime/cgocall.go b/contrib/go/_std_1.18/src/runtime/cgocall.go
deleted file mode 100644
index a0c9560fd0..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgocall.go
+++ /dev/null
@@ -1,639 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Cgo call and callback support.
-//
-// To call into the C function f from Go, the cgo-generated code calls
-// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
-// gcc-compiled function written by cgo.
-//
-// runtime.cgocall (below) calls entersyscall so as not to block
-// other goroutines or the garbage collector, and then calls
-// runtime.asmcgocall(_cgo_Cfunc_f, frame).
-//
-// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
-// (assumed to be an operating system-allocated stack, so safe to run
-// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
-//
-// _cgo_Cfunc_f invokes the actual C function f with arguments
-// taken from the frame structure, records the results in the frame,
-// and returns to runtime.asmcgocall.
-//
-// After it regains control, runtime.asmcgocall switches back to the
-// original g (m->curg)'s stack and returns to runtime.cgocall.
-//
-// After it regains control, runtime.cgocall calls exitsyscall, which blocks
-// until this m can run Go code without violating the $GOMAXPROCS limit,
-// and then unlocks g from m.
-//
-// The above description skipped over the possibility of the gcc-compiled
-// function f calling back into Go. If that happens, we continue down
-// the rabbit hole during the execution of f.
-//
-// To make it possible for gcc-compiled C code to call a Go function p.GoF,
-// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
-// know about packages). The gcc-compiled C function f calls GoF.
-//
-// GoF initializes "frame", a structure containing all of its
-// arguments and slots for p.GoF's results. It calls
-// crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI.
-//
-// crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from
-// the gcc function call ABI to the gc function call ABI. At this
-// point we're in the Go runtime, but we're still running on m.g0's
-// stack and outside the $GOMAXPROCS limit. crosscall2 calls
-// runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI.
-// (crosscall2's framesize argument is no longer used, but there's one
-// case where SWIG calls crosscall2 directly and expects to pass this
-// argument. See _cgo_panic.)
-//
-// runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack
-// to the original g (m.curg)'s stack, on which it calls
-// runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the
-// stack switch, runtime.cgocallback saves the current SP as
-// m.g0.sched.sp, so that any use of m.g0's stack during the execution
-// of the callback will be done below the existing stack frames.
-// Before overwriting m.g0.sched.sp, it pushes the old value on the
-// m.g0 stack, so that it can be restored later.
-//
-// runtime.cgocallbackg (below) is now running on a real goroutine
-// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will
-// block until the $GOMAXPROCS limit allows running this goroutine.
-// Once exitsyscall has returned, it is safe to do things like call the memory
-// allocator or invoke the Go callback function. runtime.cgocallbackg
-// first defers a function to unwind m.g0.sched.sp, so that if p.GoF
-// panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack
-// and the m.curg stack will be unwound in lock step.
-// Then it calls _cgoexp_GoF(frame).
-//
-// _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments
-// from frame, calls p.GoF, writes the results back to frame, and
-// returns. Now we start unwinding this whole process.
-//
-// runtime.cgocallbackg pops but does not execute the deferred
-// function to unwind m.g0.sched.sp, calls runtime.entersyscall, and
-// returns to runtime.cgocallback.
-//
-// After it regains control, runtime.cgocallback switches back to
-// m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old
-// m.g0.sched.sp value from the stack, and returns to crosscall2.
-//
-// crosscall2 restores the callee-save registers for gcc and returns
-// to GoF, which unpacks any result values and returns to f.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// Addresses collected in a cgo backtrace when crashing.
-// Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
-type cgoCallers [32]uintptr
-
-// argset matches runtime/cgo/linux_syscall.c:argset_t
-type argset struct {
- args unsafe.Pointer
- retval uintptr
-}
-
-// wrapper for syscall package to call cgocall for libc (cgo) calls.
-//go:linkname syscall_cgocaller syscall.cgocaller
-//go:nosplit
-//go:uintptrescapes
-func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr {
- as := argset{args: unsafe.Pointer(&args[0])}
- cgocall(fn, unsafe.Pointer(&as))
- return as.retval
-}
-
-var ncgocall uint64 // number of cgo calls in total for dead m
-
-// Call from Go to C.
-//
-// This must be nosplit because it's used for syscalls on some
-// platforms. Syscalls may have untyped arguments on the stack, so
-// it's not safe to grow or scan the stack.
-//
-//go:nosplit
-func cgocall(fn, arg unsafe.Pointer) int32 {
- if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
- throw("cgocall unavailable")
- }
-
- if fn == nil {
- throw("cgocall nil")
- }
-
- if raceenabled {
- racereleasemerge(unsafe.Pointer(&racecgosync))
- }
-
- mp := getg().m
- mp.ncgocall++
- mp.ncgo++
-
- // Reset traceback.
- mp.cgoCallers[0] = 0
-
- // Announce we are entering a system call
- // so that the scheduler knows to create another
- // M to run goroutines while we are in the
- // foreign code.
- //
- // The call to asmcgocall is guaranteed not to
- // grow the stack and does not allocate memory,
- // so it is safe to call while "in a system call", outside
- // the $GOMAXPROCS accounting.
- //
- // fn may call back into Go code, in which case we'll exit the
- // "system call", run the Go code (which may grow the stack),
- // and then re-enter the "system call" reusing the PC and SP
- // saved by entersyscall here.
- entersyscall()
-
- // Tell asynchronous preemption that we're entering external
- // code. We do this after entersyscall because this may block
- // and cause an async preemption to fail, but at this point a
- // sync preemption will succeed (though this is not a matter
- // of correctness).
- osPreemptExtEnter(mp)
-
- mp.incgo = true
- errno := asmcgocall(fn, arg)
-
- // Update accounting before exitsyscall because exitsyscall may
- // reschedule us on to a different M.
- mp.incgo = false
- mp.ncgo--
-
- osPreemptExtExit(mp)
-
- exitsyscall()
-
- // Note that raceacquire must be called only after exitsyscall has
- // wired this M to a P.
- if raceenabled {
- raceacquire(unsafe.Pointer(&racecgosync))
- }
-
- // From the garbage collector's perspective, time can move
- // backwards in the sequence above. If there's a callback into
- // Go code, GC will see this function at the call to
- // asmcgocall. When the Go call later returns to C, the
- // syscall PC/SP is rolled back and the GC sees this function
- // back at the call to entersyscall. Normally, fn and arg
- // would be live at entersyscall and dead at asmcgocall, so if
- // time moved backwards, GC would see these arguments as dead
- // and then live. Prevent these undead arguments from crashing
- // GC by forcing them to stay live across this time warp.
- KeepAlive(fn)
- KeepAlive(arg)
- KeepAlive(mp)
-
- return errno
-}
-
-// Call from C back to Go. fn must point to an ABIInternal Go entry-point.
-//go:nosplit
-func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
- gp := getg()
- if gp != gp.m.curg {
- println("runtime: bad g in cgocallback")
- exit(2)
- }
-
- // The call from C is on gp.m's g0 stack, so we must ensure
- // that we stay on that M. We have to do this before calling
- // exitsyscall, since it would otherwise be free to move us to
- // a different M. The call to unlockOSThread is in unwindm.
- lockOSThread()
-
- checkm := gp.m
-
- // Save current syscall parameters, so m.syscall can be
- // used again if callback decide to make syscall.
- syscall := gp.m.syscall
-
- // entersyscall saves the caller's SP to allow the GC to trace the Go
- // stack. However, since we're returning to an earlier stack frame and
- // need to pair with the entersyscall() call made by cgocall, we must
- // save syscall* and let reentersyscall restore them.
- savedsp := unsafe.Pointer(gp.syscallsp)
- savedpc := gp.syscallpc
- exitsyscall() // coming out of cgo call
- gp.m.incgo = false
-
- osPreemptExtExit(gp.m)
-
- cgocallbackg1(fn, frame, ctxt) // will call unlockOSThread
-
- // At this point unlockOSThread has been called.
- // The following code must not change to a different m.
- // This is enforced by checking incgo in the schedule function.
-
- gp.m.incgo = true
-
- if gp.m != checkm {
- throw("m changed unexpectedly in cgocallbackg")
- }
-
- osPreemptExtEnter(gp.m)
-
- // going back to cgo call
- reentersyscall(savedpc, uintptr(savedsp))
-
- gp.m.syscall = syscall
-}
-
-func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
- gp := getg()
-
- // When we return, undo the call to lockOSThread in cgocallbackg.
- // We must still stay on the same m.
- defer unlockOSThread()
-
- if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
- gp.m.needextram = false
- systemstack(newextram)
- }
-
- if ctxt != 0 {
- s := append(gp.cgoCtxt, ctxt)
-
- // Now we need to set gp.cgoCtxt = s, but we could get
- // a SIGPROF signal while manipulating the slice, and
- // the SIGPROF handler could pick up gp.cgoCtxt while
- // tracing up the stack. We need to ensure that the
- // handler always sees a valid slice, so set the
- // values in an order such that it always does.
- p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
- atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
- p.cap = cap(s)
- p.len = len(s)
-
- defer func(gp *g) {
- // Decrease the length of the slice by one, safely.
- p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
- p.len--
- }(gp)
- }
-
- if gp.m.ncgo == 0 {
- // The C call to Go came from a thread not currently running
- // any Go. In the case of -buildmode=c-archive or c-shared,
- // this call may be coming in before package initialization
- // is complete. Wait until it is.
- <-main_init_done
- }
-
- // Check whether the profiler needs to be turned on or off; this route to
- // run Go code does not use runtime.execute, so bypasses the check there.
- hz := sched.profilehz
- if gp.m.profilehz != hz {
- setThreadCPUProfiler(hz)
- }
-
- // Add entry to defer stack in case of panic.
- restore := true
- defer unwindm(&restore)
-
- if raceenabled {
- raceacquire(unsafe.Pointer(&racecgosync))
- }
-
- // Invoke callback. This function is generated by cmd/cgo and
- // will unpack the argument frame and call the Go function.
- var cb func(frame unsafe.Pointer)
- cbFV := funcval{uintptr(fn)}
- *(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV))
- cb(frame)
-
- if raceenabled {
- racereleasemerge(unsafe.Pointer(&racecgosync))
- }
-
- // Do not unwind m->g0->sched.sp.
- // Our caller, cgocallback, will do that.
- restore = false
-}
-
-func unwindm(restore *bool) {
- if *restore {
- // Restore sp saved by cgocallback during
- // unwind of g's stack (see comment at top of file).
- mp := acquirem()
- sched := &mp.g0.sched
- sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
-
- // Do the accounting that cgocall will not have a chance to do
- // during an unwind.
- //
- // In the case where a Go call originates from C, ncgo is 0
- // and there is no matching cgocall to end.
- if mp.ncgo > 0 {
- mp.incgo = false
- mp.ncgo--
- osPreemptExtExit(mp)
- }
-
- releasem(mp)
- }
-}
-
-// called from assembly
-func badcgocallback() {
- throw("misaligned stack in cgocallback")
-}
-
-// called from (incomplete) assembly
-func cgounimpl() {
- throw("cgo not implemented")
-}
-
-var racecgosync uint64 // represents possible synchronization in C code
-
-// Pointer checking for cgo code.
-
-// We want to detect all cases where a program that does not use
-// unsafe makes a cgo call passing a Go pointer to memory that
-// contains a Go pointer. Here a Go pointer is defined as a pointer
-// to memory allocated by the Go runtime. Programs that use unsafe
-// can evade this restriction easily, so we don't try to catch them.
-// The cgo program will rewrite all possibly bad pointer arguments to
-// call cgoCheckPointer, where we can catch cases of a Go pointer
-// pointing to a Go pointer.
-
-// Complicating matters, taking the address of a slice or array
-// element permits the C program to access all elements of the slice
-// or array. In that case we will see a pointer to a single element,
-// but we need to check the entire data structure.
-
-// The cgoCheckPointer call takes additional arguments indicating that
-// it was called on an address expression. An additional argument of
-// true means that it only needs to check a single element. An
-// additional argument of a slice or array means that it needs to
-// check the entire slice/array, but nothing else. Otherwise, the
-// pointer could be anything, and we check the entire heap object,
-// which is conservative but safe.
-
-// When and if we implement a moving garbage collector,
-// cgoCheckPointer will pin the pointer for the duration of the cgo
-// call. (This is necessary but not sufficient; the cgo program will
-// also have to change to pin Go pointers that cannot point to Go
-// pointers.)
-
-// cgoCheckPointer checks if the argument contains a Go pointer that
-// points to a Go pointer, and panics if it does.
-func cgoCheckPointer(ptr any, arg any) {
- if debug.cgocheck == 0 {
- return
- }
-
- ep := efaceOf(&ptr)
- t := ep._type
-
- top := true
- if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
- p := ep.data
- if t.kind&kindDirectIface == 0 {
- p = *(*unsafe.Pointer)(p)
- }
- if p == nil || !cgoIsGoPointer(p) {
- return
- }
- aep := efaceOf(&arg)
- switch aep._type.kind & kindMask {
- case kindBool:
- if t.kind&kindMask == kindUnsafePointer {
- // We don't know the type of the element.
- break
- }
- pt := (*ptrtype)(unsafe.Pointer(t))
- cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
- return
- case kindSlice:
- // Check the slice rather than the pointer.
- ep = aep
- t = ep._type
- case kindArray:
- // Check the array rather than the pointer.
- // Pass top as false since we have a pointer
- // to the array.
- ep = aep
- t = ep._type
- top = false
- default:
- throw("can't happen")
- }
- }
-
- cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
-}
-
-const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
-const cgoResultFail = "cgo result has Go pointer"
-
-// cgoCheckArg is the real work of cgoCheckPointer. The argument p
-// is either a pointer to the value (of type t), or the value itself,
-// depending on indir. The top parameter is whether we are at the top
-// level, where Go pointers are allowed.
-func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
- if t.ptrdata == 0 || p == nil {
- // If the type has no pointers there is nothing to do.
- return
- }
-
- switch t.kind & kindMask {
- default:
- throw("can't happen")
- case kindArray:
- at := (*arraytype)(unsafe.Pointer(t))
- if !indir {
- if at.len != 1 {
- throw("can't happen")
- }
- cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
- return
- }
- for i := uintptr(0); i < at.len; i++ {
- cgoCheckArg(at.elem, p, true, top, msg)
- p = add(p, at.elem.size)
- }
- case kindChan, kindMap:
- // These types contain internal pointers that will
- // always be allocated in the Go heap. It's never OK
- // to pass them to C.
- panic(errorString(msg))
- case kindFunc:
- if indir {
- p = *(*unsafe.Pointer)(p)
- }
- if !cgoIsGoPointer(p) {
- return
- }
- panic(errorString(msg))
- case kindInterface:
- it := *(**_type)(p)
- if it == nil {
- return
- }
- // A type known at compile time is OK since it's
- // constant. A type not known at compile time will be
- // in the heap and will not be OK.
- if inheap(uintptr(unsafe.Pointer(it))) {
- panic(errorString(msg))
- }
- p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
- if !cgoIsGoPointer(p) {
- return
- }
- if !top {
- panic(errorString(msg))
- }
- cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
- case kindSlice:
- st := (*slicetype)(unsafe.Pointer(t))
- s := (*slice)(p)
- p = s.array
- if p == nil || !cgoIsGoPointer(p) {
- return
- }
- if !top {
- panic(errorString(msg))
- }
- if st.elem.ptrdata == 0 {
- return
- }
- for i := 0; i < s.cap; i++ {
- cgoCheckArg(st.elem, p, true, false, msg)
- p = add(p, st.elem.size)
- }
- case kindString:
- ss := (*stringStruct)(p)
- if !cgoIsGoPointer(ss.str) {
- return
- }
- if !top {
- panic(errorString(msg))
- }
- case kindStruct:
- st := (*structtype)(unsafe.Pointer(t))
- if !indir {
- if len(st.fields) != 1 {
- throw("can't happen")
- }
- cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
- return
- }
- for _, f := range st.fields {
- if f.typ.ptrdata == 0 {
- continue
- }
- cgoCheckArg(f.typ, add(p, f.offset()), true, top, msg)
- }
- case kindPtr, kindUnsafePointer:
- if indir {
- p = *(*unsafe.Pointer)(p)
- if p == nil {
- return
- }
- }
-
- if !cgoIsGoPointer(p) {
- return
- }
- if !top {
- panic(errorString(msg))
- }
-
- cgoCheckUnknownPointer(p, msg)
- }
-}
-
-// cgoCheckUnknownPointer is called for an arbitrary pointer into Go
-// memory. It checks whether that Go memory contains any other
-// pointer into Go memory. If it does, we panic.
-// The return values are unused but useful to see in panic tracebacks.
-func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
- if inheap(uintptr(p)) {
- b, span, _ := findObject(uintptr(p), 0, 0)
- base = b
- if base == 0 {
- return
- }
- hbits := heapBitsForAddr(base)
- n := span.elemsize
- for i = uintptr(0); i < n; i += goarch.PtrSize {
- if !hbits.morePointers() {
- // No more possible pointers.
- break
- }
- if hbits.isPointer() && cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
- panic(errorString(msg))
- }
- hbits = hbits.next()
- }
-
- return
- }
-
- for _, datap := range activeModules() {
- if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
- // We have no way to know the size of the object.
- // We have to assume that it might contain a pointer.
- panic(errorString(msg))
- }
- // In the text or noptr sections, we know that the
- // pointer does not point to a Go pointer.
- }
-
- return
-}
-
-// cgoIsGoPointer reports whether the pointer is a Go pointer--a
-// pointer to Go memory. We only care about Go memory that might
-// contain pointers.
-//go:nosplit
-//go:nowritebarrierrec
-func cgoIsGoPointer(p unsafe.Pointer) bool {
- if p == nil {
- return false
- }
-
- if inHeapOrStack(uintptr(p)) {
- return true
- }
-
- for _, datap := range activeModules() {
- if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
- return true
- }
- }
-
- return false
-}
-
-// cgoInRange reports whether p is between start and end.
-//go:nosplit
-//go:nowritebarrierrec
-func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
- return start <= uintptr(p) && uintptr(p) < end
-}
-
-// cgoCheckResult is called to check the result parameter of an
-// exported Go function. It panics if the result is or contains a Go
-// pointer.
-func cgoCheckResult(val any) {
- if debug.cgocheck == 0 {
- return
- }
-
- ep := efaceOf(&val)
- t := ep._type
- cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/cgocheck.go b/contrib/go/_std_1.18/src/runtime/cgocheck.go
deleted file mode 100644
index 3acbadf803..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cgocheck.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code to check that pointer writes follow the cgo rules.
-// These functions are invoked via the write barrier when debug.cgocheck > 1.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
-
-// cgoCheckWriteBarrier is called whenever a pointer is stored into memory.
-// It throws if the program is storing a Go pointer into non-Go memory.
-//
-// This is called from the write barrier, so its entire call tree must
-// be nosplit.
-//
-//go:nosplit
-//go:nowritebarrier
-func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
- if !cgoIsGoPointer(unsafe.Pointer(src)) {
- return
- }
- if cgoIsGoPointer(unsafe.Pointer(dst)) {
- return
- }
-
- // If we are running on the system stack then dst might be an
- // address on the stack, which is OK.
- g := getg()
- if g == g.m.g0 || g == g.m.gsignal {
- return
- }
-
- // Allocating memory can write to various mfixalloc structs
- // that look like they are non-Go memory.
- if g.m.mallocing != 0 {
- return
- }
-
- // It's OK if writing to memory allocated by persistentalloc.
- // Do this check last because it is more expensive and rarely true.
- // If it is false the expense doesn't matter since we are crashing.
- if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) {
- return
- }
-
- systemstack(func() {
- println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
- throw(cgoWriteBarrierFail)
- })
-}
-
-// cgoCheckMemmove is called when moving a block of memory.
-// dst and src point off bytes into the value to copy.
-// size is the number of bytes to copy.
-// It throws if the program is copying a block that contains a Go pointer
-// into non-Go memory.
-//go:nosplit
-//go:nowritebarrier
-func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if typ.ptrdata == 0 {
- return
- }
- if !cgoIsGoPointer(src) {
- return
- }
- if cgoIsGoPointer(dst) {
- return
- }
- cgoCheckTypedBlock(typ, src, off, size)
-}
-
-// cgoCheckSliceCopy is called when copying n elements of a slice.
-// src and dst are pointers to the first element of the slice.
-// typ is the element type of the slice.
-// It throws if the program is copying slice elements that contain Go pointers
-// into non-Go memory.
-//go:nosplit
-//go:nowritebarrier
-func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
- if typ.ptrdata == 0 {
- return
- }
- if !cgoIsGoPointer(src) {
- return
- }
- if cgoIsGoPointer(dst) {
- return
- }
- p := src
- for i := 0; i < n; i++ {
- cgoCheckTypedBlock(typ, p, 0, typ.size)
- p = add(p, typ.size)
- }
-}
-
-// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
-// and throws if it finds a Go pointer. The type of the memory is typ,
-// and src is off bytes into that type.
-//go:nosplit
-//go:nowritebarrier
-func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
- // Anything past typ.ptrdata is not a pointer.
- if typ.ptrdata <= off {
- return
- }
- if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
- size = ptrdataSize
- }
-
- if typ.kind&kindGCProg == 0 {
- cgoCheckBits(src, typ.gcdata, off, size)
- return
- }
-
- // The type has a GC program. Try to find GC bits somewhere else.
- for _, datap := range activeModules() {
- if cgoInRange(src, datap.data, datap.edata) {
- doff := uintptr(src) - datap.data
- cgoCheckBits(add(src, -doff), datap.gcdatamask.bytedata, off+doff, size)
- return
- }
- if cgoInRange(src, datap.bss, datap.ebss) {
- boff := uintptr(src) - datap.bss
- cgoCheckBits(add(src, -boff), datap.gcbssmask.bytedata, off+boff, size)
- return
- }
- }
-
- s := spanOfUnchecked(uintptr(src))
- if s.state.get() == mSpanManual {
- // There are no heap bits for value stored on the stack.
- // For a channel receive src might be on the stack of some
- // other goroutine, so we can't unwind the stack even if
- // we wanted to.
- // We can't expand the GC program without extra storage
- // space we can't easily get.
- // Fortunately we have the type information.
- systemstack(func() {
- cgoCheckUsingType(typ, src, off, size)
- })
- return
- }
-
- // src must be in the regular heap.
-
- hbits := heapBitsForAddr(uintptr(src))
- for i := uintptr(0); i < off+size; i += goarch.PtrSize {
- bits := hbits.bits()
- if i >= off && bits&bitPointer != 0 {
- v := *(*unsafe.Pointer)(add(src, i))
- if cgoIsGoPointer(v) {
- throw(cgoWriteBarrierFail)
- }
- }
- hbits = hbits.next()
- }
-}
-
-// cgoCheckBits checks the block of memory at src, for up to size
-// bytes, and throws if it finds a Go pointer. The gcbits mark each
-// pointer value. The src pointer is off bytes into the gcbits.
-//go:nosplit
-//go:nowritebarrier
-func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
- skipMask := off / goarch.PtrSize / 8
- skipBytes := skipMask * goarch.PtrSize * 8
- ptrmask := addb(gcbits, skipMask)
- src = add(src, skipBytes)
- off -= skipBytes
- size += off
- var bits uint32
- for i := uintptr(0); i < size; i += goarch.PtrSize {
- if i&(goarch.PtrSize*8-1) == 0 {
- bits = uint32(*ptrmask)
- ptrmask = addb(ptrmask, 1)
- } else {
- bits >>= 1
- }
- if off > 0 {
- off -= goarch.PtrSize
- } else {
- if bits&1 != 0 {
- v := *(*unsafe.Pointer)(add(src, i))
- if cgoIsGoPointer(v) {
- throw(cgoWriteBarrierFail)
- }
- }
- }
- }
-}
-
-// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
-// fall back to look for pointers in src using the type information.
-// We only use this when looking at a value on the stack when the type
-// uses a GC program, because otherwise it's more efficient to use the
-// GC bits. This is called on the system stack.
-//go:nowritebarrier
-//go:systemstack
-func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
- if typ.ptrdata == 0 {
- return
- }
-
- // Anything past typ.ptrdata is not a pointer.
- if typ.ptrdata <= off {
- return
- }
- if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
- size = ptrdataSize
- }
-
- if typ.kind&kindGCProg == 0 {
- cgoCheckBits(src, typ.gcdata, off, size)
- return
- }
- switch typ.kind & kindMask {
- default:
- throw("can't happen")
- case kindArray:
- at := (*arraytype)(unsafe.Pointer(typ))
- for i := uintptr(0); i < at.len; i++ {
- if off < at.elem.size {
- cgoCheckUsingType(at.elem, src, off, size)
- }
- src = add(src, at.elem.size)
- skipped := off
- if skipped > at.elem.size {
- skipped = at.elem.size
- }
- checked := at.elem.size - skipped
- off -= skipped
- if size <= checked {
- return
- }
- size -= checked
- }
- case kindStruct:
- st := (*structtype)(unsafe.Pointer(typ))
- for _, f := range st.fields {
- if off < f.typ.size {
- cgoCheckUsingType(f.typ, src, off, size)
- }
- src = add(src, f.typ.size)
- skipped := off
- if skipped > f.typ.size {
- skipped = f.typ.size
- }
- checked := f.typ.size - skipped
- off -= skipped
- if size <= checked {
- return
- }
- size -= checked
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/chan.go b/contrib/go/_std_1.18/src/runtime/chan.go
deleted file mode 100644
index 3cdb5dce11..0000000000
--- a/contrib/go/_std_1.18/src/runtime/chan.go
+++ /dev/null
@@ -1,846 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// This file contains the implementation of Go channels.
-
-// Invariants:
-// At least one of c.sendq and c.recvq is empty,
-// except for the case of an unbuffered channel with a single goroutine
-// blocked on it for both sending and receiving using a select statement,
-// in which case the length of c.sendq and c.recvq is limited only by the
-// size of the select statement.
-//
-// For buffered channels, also:
-// c.qcount > 0 implies that c.recvq is empty.
-// c.qcount < c.dataqsiz implies that c.sendq is empty.
-
-import (
- "internal/abi"
- "runtime/internal/atomic"
- "runtime/internal/math"
- "unsafe"
-)
-
-const (
- maxAlign = 8
- hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
- debugChan = false
-)
-
-type hchan struct {
- qcount uint // total data in the queue
- dataqsiz uint // size of the circular queue
- buf unsafe.Pointer // points to an array of dataqsiz elements
- elemsize uint16
- closed uint32
- elemtype *_type // element type
- sendx uint // send index
- recvx uint // receive index
- recvq waitq // list of recv waiters
- sendq waitq // list of send waiters
-
- // lock protects all fields in hchan, as well as several
- // fields in sudogs blocked on this channel.
- //
- // Do not change another G's status while holding this lock
- // (in particular, do not ready a G), as this can deadlock
- // with stack shrinking.
- lock mutex
-}
-
-type waitq struct {
- first *sudog
- last *sudog
-}
-
-//go:linkname reflect_makechan reflect.makechan
-func reflect_makechan(t *chantype, size int) *hchan {
- return makechan(t, size)
-}
-
-func makechan64(t *chantype, size int64) *hchan {
- if int64(int(size)) != size {
- panic(plainError("makechan: size out of range"))
- }
-
- return makechan(t, int(size))
-}
-
-func makechan(t *chantype, size int) *hchan {
- elem := t.elem
-
- // compiler checks this but be safe.
- if elem.size >= 1<<16 {
- throw("makechan: invalid channel element type")
- }
- if hchanSize%maxAlign != 0 || elem.align > maxAlign {
- throw("makechan: bad alignment")
- }
-
- mem, overflow := math.MulUintptr(elem.size, uintptr(size))
- if overflow || mem > maxAlloc-hchanSize || size < 0 {
- panic(plainError("makechan: size out of range"))
- }
-
- // Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
- // buf points into the same allocation, elemtype is persistent.
- // SudoG's are referenced from their owning thread so they can't be collected.
- // TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
- var c *hchan
- switch {
- case mem == 0:
- // Queue or element size is zero.
- c = (*hchan)(mallocgc(hchanSize, nil, true))
- // Race detector uses this location for synchronization.
- c.buf = c.raceaddr()
- case elem.ptrdata == 0:
- // Elements do not contain pointers.
- // Allocate hchan and buf in one call.
- c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
- c.buf = add(unsafe.Pointer(c), hchanSize)
- default:
- // Elements contain pointers.
- c = new(hchan)
- c.buf = mallocgc(mem, elem, true)
- }
-
- c.elemsize = uint16(elem.size)
- c.elemtype = elem
- c.dataqsiz = uint(size)
- lockInit(&c.lock, lockRankHchan)
-
- if debugChan {
- print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
- }
- return c
-}
-
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-func chanbuf(c *hchan, i uint) unsafe.Pointer {
- return add(c.buf, uintptr(i)*uintptr(c.elemsize))
-}
-
-// full reports whether a send on c would block (that is, the channel is full).
-// It uses a single word-sized read of mutable state, so although
-// the answer is instantaneously true, the correct answer may have changed
-// by the time the calling function receives the return value.
-func full(c *hchan) bool {
- // c.dataqsiz is immutable (never written after the channel is created)
- // so it is safe to read at any time during channel operation.
- if c.dataqsiz == 0 {
- // Assumes that a pointer read is relaxed-atomic.
- return c.recvq.first == nil
- }
- // Assumes that a uint read is relaxed-atomic.
- return c.qcount == c.dataqsiz
-}
-
-// entry point for c <- x from compiled code
-//go:nosplit
-func chansend1(c *hchan, elem unsafe.Pointer) {
- chansend(c, elem, true, getcallerpc())
-}
-
-/*
- * generic single channel send/recv
- * If block is not nil,
- * then the protocol will not
- * sleep but return if it could
- * not complete.
- *
- * sleep can wake up with g.param == nil
- * when a channel involved in the sleep has
- * been closed. it is easiest to loop and re-run
- * the operation; we'll see that it's now closed.
- */
-func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
- if c == nil {
- if !block {
- return false
- }
- gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
- throw("unreachable")
- }
-
- if debugChan {
- print("chansend: chan=", c, "\n")
- }
-
- if raceenabled {
- racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
- }
-
- // Fast path: check for failed non-blocking operation without acquiring the lock.
- //
- // After observing that the channel is not closed, we observe that the channel is
- // not ready for sending. Each of these observations is a single word-sized read
- // (first c.closed and second full()).
- // Because a closed channel cannot transition from 'ready for sending' to
- // 'not ready for sending', even if the channel is closed between the two observations,
- // they imply a moment between the two when the channel was both not yet closed
- // and not ready for sending. We behave as if we observed the channel at that moment,
- // and report that the send cannot proceed.
- //
- // It is okay if the reads are reordered here: if we observe that the channel is not
- // ready for sending and then observe that it is not closed, that implies that the
- // channel wasn't closed during the first observation. However, nothing here
- // guarantees forward progress. We rely on the side effects of lock release in
- // chanrecv() and closechan() to update this thread's view of c.closed and full().
- if !block && c.closed == 0 && full(c) {
- return false
- }
-
- var t0 int64
- if blockprofilerate > 0 {
- t0 = cputicks()
- }
-
- lock(&c.lock)
-
- if c.closed != 0 {
- unlock(&c.lock)
- panic(plainError("send on closed channel"))
- }
-
- if sg := c.recvq.dequeue(); sg != nil {
- // Found a waiting receiver. We pass the value we want to send
- // directly to the receiver, bypassing the channel buffer (if any).
- send(c, sg, ep, func() { unlock(&c.lock) }, 3)
- return true
- }
-
- if c.qcount < c.dataqsiz {
- // Space is available in the channel buffer. Enqueue the element to send.
- qp := chanbuf(c, c.sendx)
- if raceenabled {
- racenotify(c, c.sendx, nil)
- }
- typedmemmove(c.elemtype, qp, ep)
- c.sendx++
- if c.sendx == c.dataqsiz {
- c.sendx = 0
- }
- c.qcount++
- unlock(&c.lock)
- return true
- }
-
- if !block {
- unlock(&c.lock)
- return false
- }
-
- // Block on the channel. Some receiver will complete our operation for us.
- gp := getg()
- mysg := acquireSudog()
- mysg.releasetime = 0
- if t0 != 0 {
- mysg.releasetime = -1
- }
- // No stack splits between assigning elem and enqueuing mysg
- // on gp.waiting where copystack can find it.
- mysg.elem = ep
- mysg.waitlink = nil
- mysg.g = gp
- mysg.isSelect = false
- mysg.c = c
- gp.waiting = mysg
- gp.param = nil
- c.sendq.enqueue(mysg)
- // Signal to anyone trying to shrink our stack that we're about
- // to park on a channel. The window between when this G's status
- // changes and when we set gp.activeStackChans is not safe for
- // stack shrinking.
- atomic.Store8(&gp.parkingOnChan, 1)
- gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
- // Ensure the value being sent is kept alive until the
- // receiver copies it out. The sudog has a pointer to the
- // stack object, but sudogs aren't considered as roots of the
- // stack tracer.
- KeepAlive(ep)
-
- // someone woke us up.
- if mysg != gp.waiting {
- throw("G waiting list is corrupted")
- }
- gp.waiting = nil
- gp.activeStackChans = false
- closed := !mysg.success
- gp.param = nil
- if mysg.releasetime > 0 {
- blockevent(mysg.releasetime-t0, 2)
- }
- mysg.c = nil
- releaseSudog(mysg)
- if closed {
- if c.closed == 0 {
- throw("chansend: spurious wakeup")
- }
- panic(plainError("send on closed channel"))
- }
- return true
-}
-
-// send processes a send operation on an empty channel c.
-// The value ep sent by the sender is copied to the receiver sg.
-// The receiver is then woken up to go on its merry way.
-// Channel c must be empty and locked. send unlocks c with unlockf.
-// sg must already be dequeued from c.
-// ep must be non-nil and point to the heap or the caller's stack.
-func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
- if raceenabled {
- if c.dataqsiz == 0 {
- racesync(c, sg)
- } else {
- // Pretend we go through the buffer, even though
- // we copy directly. Note that we need to increment
- // the head/tail locations only when raceenabled.
- racenotify(c, c.recvx, nil)
- racenotify(c, c.recvx, sg)
- c.recvx++
- if c.recvx == c.dataqsiz {
- c.recvx = 0
- }
- c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
- }
- }
- if sg.elem != nil {
- sendDirect(c.elemtype, sg, ep)
- sg.elem = nil
- }
- gp := sg.g
- unlockf()
- gp.param = unsafe.Pointer(sg)
- sg.success = true
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp, skip+1)
-}
-
-// Sends and receives on unbuffered or empty-buffered channels are the
-// only operations where one running goroutine writes to the stack of
-// another running goroutine. The GC assumes that stack writes only
-// happen when the goroutine is running and are only done by that
-// goroutine. Using a write barrier is sufficient to make up for
-// violating that assumption, but the write barrier has to work.
-// typedmemmove will call bulkBarrierPreWrite, but the target bytes
-// are not in the heap, so that will not help. We arrange to call
-// memmove and typeBitsBulkBarrier instead.
-
-func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
- // src is on our stack, dst is a slot on another stack.
-
- // Once we read sg.elem out of sg, it will no longer
- // be updated if the destination's stack gets copied (shrunk).
- // So make sure that no preemption points can happen between read & use.
- dst := sg.elem
- typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
- // No need for cgo write barrier checks because dst is always
- // Go memory.
- memmove(dst, src, t.size)
-}
-
-func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
- // dst is on our stack or the heap, src is on another stack.
- // The channel is locked, so src will not move during this
- // operation.
- src := sg.elem
- typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
- memmove(dst, src, t.size)
-}
-
-func closechan(c *hchan) {
- if c == nil {
- panic(plainError("close of nil channel"))
- }
-
- lock(&c.lock)
- if c.closed != 0 {
- unlock(&c.lock)
- panic(plainError("close of closed channel"))
- }
-
- if raceenabled {
- callerpc := getcallerpc()
- racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
- racerelease(c.raceaddr())
- }
-
- c.closed = 1
-
- var glist gList
-
- // release all readers
- for {
- sg := c.recvq.dequeue()
- if sg == nil {
- break
- }
- if sg.elem != nil {
- typedmemclr(c.elemtype, sg.elem)
- sg.elem = nil
- }
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- gp := sg.g
- gp.param = unsafe.Pointer(sg)
- sg.success = false
- if raceenabled {
- raceacquireg(gp, c.raceaddr())
- }
- glist.push(gp)
- }
-
- // release all writers (they will panic)
- for {
- sg := c.sendq.dequeue()
- if sg == nil {
- break
- }
- sg.elem = nil
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- gp := sg.g
- gp.param = unsafe.Pointer(sg)
- sg.success = false
- if raceenabled {
- raceacquireg(gp, c.raceaddr())
- }
- glist.push(gp)
- }
- unlock(&c.lock)
-
- // Ready all Gs now that we've dropped the channel lock.
- for !glist.empty() {
- gp := glist.pop()
- gp.schedlink = 0
- goready(gp, 3)
- }
-}
-
-// empty reports whether a read from c would block (that is, the channel is
-// empty). It uses a single atomic read of mutable state.
-func empty(c *hchan) bool {
- // c.dataqsiz is immutable.
- if c.dataqsiz == 0 {
- return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
- }
- return atomic.Loaduint(&c.qcount) == 0
-}
-
-// entry points for <- c from compiled code
-//go:nosplit
-func chanrecv1(c *hchan, elem unsafe.Pointer) {
- chanrecv(c, elem, true)
-}
-
-//go:nosplit
-func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
- _, received = chanrecv(c, elem, true)
- return
-}
-
-// chanrecv receives on channel c and writes the received data to ep.
-// ep may be nil, in which case received data is ignored.
-// If block == false and no elements are available, returns (false, false).
-// Otherwise, if c is closed, zeros *ep and returns (true, false).
-// Otherwise, fills in *ep with an element and returns (true, true).
-// A non-nil ep must point to the heap or the caller's stack.
-func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
- // raceenabled: don't need to check ep, as it is always on the stack
- // or is new memory allocated by reflect.
-
- if debugChan {
- print("chanrecv: chan=", c, "\n")
- }
-
- if c == nil {
- if !block {
- return
- }
- gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
- throw("unreachable")
- }
-
- // Fast path: check for failed non-blocking operation without acquiring the lock.
- if !block && empty(c) {
- // After observing that the channel is not ready for receiving, we observe whether the
- // channel is closed.
- //
- // Reordering of these checks could lead to incorrect behavior when racing with a close.
- // For example, if the channel was open and not empty, was closed, and then drained,
- // reordered reads could incorrectly indicate "open and empty". To prevent reordering,
- // we use atomic loads for both checks, and rely on emptying and closing to happen in
- // separate critical sections under the same lock. This assumption fails when closing
- // an unbuffered channel with a blocked send, but that is an error condition anyway.
- if atomic.Load(&c.closed) == 0 {
- // Because a channel cannot be reopened, the later observation of the channel
- // being not closed implies that it was also not closed at the moment of the
- // first observation. We behave as if we observed the channel at that moment
- // and report that the receive cannot proceed.
- return
- }
- // The channel is irreversibly closed. Re-check whether the channel has any pending data
- // to receive, which could have arrived between the empty and closed checks above.
- // Sequential consistency is also required here, when racing with such a send.
- if empty(c) {
- // The channel is irreversibly closed and empty.
- if raceenabled {
- raceacquire(c.raceaddr())
- }
- if ep != nil {
- typedmemclr(c.elemtype, ep)
- }
- return true, false
- }
- }
-
- var t0 int64
- if blockprofilerate > 0 {
- t0 = cputicks()
- }
-
- lock(&c.lock)
-
- if c.closed != 0 && c.qcount == 0 {
- if raceenabled {
- raceacquire(c.raceaddr())
- }
- unlock(&c.lock)
- if ep != nil {
- typedmemclr(c.elemtype, ep)
- }
- return true, false
- }
-
- if sg := c.sendq.dequeue(); sg != nil {
- // Found a waiting sender. If buffer is size 0, receive value
- // directly from sender. Otherwise, receive from head of queue
- // and add sender's value to the tail of the queue (both map to
- // the same buffer slot because the queue is full).
- recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
- return true, true
- }
-
- if c.qcount > 0 {
- // Receive directly from queue
- qp := chanbuf(c, c.recvx)
- if raceenabled {
- racenotify(c, c.recvx, nil)
- }
- if ep != nil {
- typedmemmove(c.elemtype, ep, qp)
- }
- typedmemclr(c.elemtype, qp)
- c.recvx++
- if c.recvx == c.dataqsiz {
- c.recvx = 0
- }
- c.qcount--
- unlock(&c.lock)
- return true, true
- }
-
- if !block {
- unlock(&c.lock)
- return false, false
- }
-
- // no sender available: block on this channel.
- gp := getg()
- mysg := acquireSudog()
- mysg.releasetime = 0
- if t0 != 0 {
- mysg.releasetime = -1
- }
- // No stack splits between assigning elem and enqueuing mysg
- // on gp.waiting where copystack can find it.
- mysg.elem = ep
- mysg.waitlink = nil
- gp.waiting = mysg
- mysg.g = gp
- mysg.isSelect = false
- mysg.c = c
- gp.param = nil
- c.recvq.enqueue(mysg)
- // Signal to anyone trying to shrink our stack that we're about
- // to park on a channel. The window between when this G's status
- // changes and when we set gp.activeStackChans is not safe for
- // stack shrinking.
- atomic.Store8(&gp.parkingOnChan, 1)
- gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
-
- // someone woke us up
- if mysg != gp.waiting {
- throw("G waiting list is corrupted")
- }
- gp.waiting = nil
- gp.activeStackChans = false
- if mysg.releasetime > 0 {
- blockevent(mysg.releasetime-t0, 2)
- }
- success := mysg.success
- gp.param = nil
- mysg.c = nil
- releaseSudog(mysg)
- return true, success
-}
-
-// recv processes a receive operation on a full channel c.
-// There are 2 parts:
-// 1) The value sent by the sender sg is put into the channel
-// and the sender is woken up to go on its merry way.
-// 2) The value received by the receiver (the current G) is
-// written to ep.
-// For synchronous channels, both values are the same.
-// For asynchronous channels, the receiver gets its data from
-// the channel buffer and the sender's data is put in the
-// channel buffer.
-// Channel c must be full and locked. recv unlocks c with unlockf.
-// sg must already be dequeued from c.
-// A non-nil ep must point to the heap or the caller's stack.
-func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
- if c.dataqsiz == 0 {
- if raceenabled {
- racesync(c, sg)
- }
- if ep != nil {
- // copy data from sender
- recvDirect(c.elemtype, sg, ep)
- }
- } else {
- // Queue is full. Take the item at the
- // head of the queue. Make the sender enqueue
- // its item at the tail of the queue. Since the
- // queue is full, those are both the same slot.
- qp := chanbuf(c, c.recvx)
- if raceenabled {
- racenotify(c, c.recvx, nil)
- racenotify(c, c.recvx, sg)
- }
- // copy data from queue to receiver
- if ep != nil {
- typedmemmove(c.elemtype, ep, qp)
- }
- // copy data from sender to queue
- typedmemmove(c.elemtype, qp, sg.elem)
- c.recvx++
- if c.recvx == c.dataqsiz {
- c.recvx = 0
- }
- c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
- }
- sg.elem = nil
- gp := sg.g
- unlockf()
- gp.param = unsafe.Pointer(sg)
- sg.success = true
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp, skip+1)
-}
-
-func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
- // There are unlocked sudogs that point into gp's stack. Stack
- // copying must lock the channels of those sudogs.
- // Set activeStackChans here instead of before we try parking
- // because we could self-deadlock in stack growth on the
- // channel lock.
- gp.activeStackChans = true
- // Mark that it's safe for stack shrinking to occur now,
- // because any thread acquiring this G's stack for shrinking
- // is guaranteed to observe activeStackChans after this store.
- atomic.Store8(&gp.parkingOnChan, 0)
- // Make sure we unlock after setting activeStackChans and
- // unsetting parkingOnChan. The moment we unlock chanLock
- // we risk gp getting readied by a channel operation and
- // so gp could continue running before everything before
- // the unlock is visible (even to gp itself).
- unlock((*mutex)(chanLock))
- return true
-}
-
-// compiler implements
-//
-// select {
-// case c <- v:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbsend(c, v) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
- return chansend(c, elem, false, getcallerpc())
-}
-
-// compiler implements
-//
-// select {
-// case v, ok = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selected, ok = selectnbrecv(&v, c); selected {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected, received bool) {
- return chanrecv(c, elem, false)
-}
-
-//go:linkname reflect_chansend reflect.chansend
-func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
- return chansend(c, elem, !nb, getcallerpc())
-}
-
-//go:linkname reflect_chanrecv reflect.chanrecv
-func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
- return chanrecv(c, elem, !nb)
-}
-
-//go:linkname reflect_chanlen reflect.chanlen
-func reflect_chanlen(c *hchan) int {
- if c == nil {
- return 0
- }
- return int(c.qcount)
-}
-
-//go:linkname reflectlite_chanlen internal/reflectlite.chanlen
-func reflectlite_chanlen(c *hchan) int {
- if c == nil {
- return 0
- }
- return int(c.qcount)
-}
-
-//go:linkname reflect_chancap reflect.chancap
-func reflect_chancap(c *hchan) int {
- if c == nil {
- return 0
- }
- return int(c.dataqsiz)
-}
-
-//go:linkname reflect_chanclose reflect.chanclose
-func reflect_chanclose(c *hchan) {
- closechan(c)
-}
-
-func (q *waitq) enqueue(sgp *sudog) {
- sgp.next = nil
- x := q.last
- if x == nil {
- sgp.prev = nil
- q.first = sgp
- q.last = sgp
- return
- }
- sgp.prev = x
- x.next = sgp
- q.last = sgp
-}
-
-func (q *waitq) dequeue() *sudog {
- for {
- sgp := q.first
- if sgp == nil {
- return nil
- }
- y := sgp.next
- if y == nil {
- q.first = nil
- q.last = nil
- } else {
- y.prev = nil
- q.first = y
- sgp.next = nil // mark as removed (see dequeueSudog)
- }
-
- // if a goroutine was put on this queue because of a
- // select, there is a small window between the goroutine
- // being woken up by a different case and it grabbing the
- // channel locks. Once it has the lock
- // it removes itself from the queue, so we won't see it after that.
- // We use a flag in the G struct to tell us when someone
- // else has won the race to signal this goroutine but the goroutine
- // hasn't removed itself from the queue yet.
- if sgp.isSelect && !atomic.Cas(&sgp.g.selectDone, 0, 1) {
- continue
- }
-
- return sgp
- }
-}
-
-func (c *hchan) raceaddr() unsafe.Pointer {
- // Treat read-like and write-like operations on the channel to
- // happen at this address. Avoid using the address of qcount
- // or dataqsiz, because the len() and cap() builtins read
- // those addresses, and we don't want them racing with
- // operations like close().
- return unsafe.Pointer(&c.buf)
-}
-
-func racesync(c *hchan, sg *sudog) {
- racerelease(chanbuf(c, 0))
- raceacquireg(sg.g, chanbuf(c, 0))
- racereleaseg(sg.g, chanbuf(c, 0))
- raceacquire(chanbuf(c, 0))
-}
-
-// Notify the race detector of a send or receive involving buffer entry idx
-// and a channel c or its communicating partner sg.
-// This function handles the special case of c.elemsize==0.
-func racenotify(c *hchan, idx uint, sg *sudog) {
- // We could have passed the unsafe.Pointer corresponding to entry idx
- // instead of idx itself. However, in a future version of this function,
- // we can use idx to better handle the case of elemsize==0.
- // A future improvement to the detector is to call TSan with c and idx:
- // this way, Go will continue to not allocating buffer entries for channels
- // of elemsize==0, yet the race detector can be made to handle multiple
- // sync objects underneath the hood (one sync object per idx)
- qp := chanbuf(c, idx)
- // When elemsize==0, we don't allocate a full buffer for the channel.
- // Instead of individual buffer entries, the race detector uses the
- // c.buf as the only buffer entry. This simplification prevents us from
- // following the memory model's happens-before rules (rules that are
- // implemented in racereleaseacquire). Instead, we accumulate happens-before
- // information in the synchronization object associated with c.buf.
- if c.elemsize == 0 {
- if sg == nil {
- raceacquire(qp)
- racerelease(qp)
- } else {
- raceacquireg(sg.g, qp)
- racereleaseg(sg.g, qp)
- }
- } else {
- if sg == nil {
- racereleaseacquire(qp)
- } else {
- racereleaseacquireg(sg.g, qp)
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/compiler.go b/contrib/go/_std_1.18/src/runtime/compiler.go
deleted file mode 100644
index 1ebc62dea1..0000000000
--- a/contrib/go/_std_1.18/src/runtime/compiler.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// Compiler is the name of the compiler toolchain that built the
-// running binary. Known toolchains are:
-//
-// gc Also known as cmd/compile.
-// gccgo The gccgo front end, part of the GCC compiler suite.
-//
-const Compiler = "gc"
diff --git a/contrib/go/_std_1.18/src/runtime/cpuprof.go b/contrib/go/_std_1.18/src/runtime/cpuprof.go
deleted file mode 100644
index 48cef46fe9..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cpuprof.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// CPU profiling.
-//
-// The signal handler for the profiling clock tick adds a new stack trace
-// to a log of recent traces. The log is read by a user goroutine that
-// turns it into formatted profile data. If the reader does not keep up
-// with the log, those writes will be recorded as a count of lost records.
-// The actual profile buffer is in profbuf.go.
-
-package runtime
-
-import (
- "internal/abi"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const maxCPUProfStack = 64
-
-type cpuProfile struct {
- lock mutex
- on bool // profiling is on
- log *profBuf // profile events written here
-
- // extra holds extra stacks accumulated in addNonGo
- // corresponding to profiling signals arriving on
- // non-Go-created threads. Those stacks are written
- // to log the next time a normal Go thread gets the
- // signal handler.
- // Assuming the stacks are 2 words each (we don't get
- // a full traceback from those threads), plus one word
- // size for framing, 100 Hz profiling would generate
- // 300 words per second.
- // Hopefully a normal Go thread will get the profiling
- // signal at least once every few seconds.
- extra [1000]uintptr
- numExtra int
- lostExtra uint64 // count of frames lost because extra is full
- lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily
-}
-
-var cpuprof cpuProfile
-
-// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
-// If hz <= 0, SetCPUProfileRate turns off profiling.
-// If the profiler is on, the rate cannot be changed without first turning it off.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.cpuprofile flag instead of calling
-// SetCPUProfileRate directly.
-func SetCPUProfileRate(hz int) {
- // Clamp hz to something reasonable.
- if hz < 0 {
- hz = 0
- }
- if hz > 1000000 {
- hz = 1000000
- }
-
- lock(&cpuprof.lock)
- if hz > 0 {
- if cpuprof.on || cpuprof.log != nil {
- print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
- unlock(&cpuprof.lock)
- return
- }
-
- cpuprof.on = true
- cpuprof.log = newProfBuf(1, 1<<17, 1<<14)
- hdr := [1]uint64{uint64(hz)}
- cpuprof.log.write(nil, nanotime(), hdr[:], nil)
- setcpuprofilerate(int32(hz))
- } else if cpuprof.on {
- setcpuprofilerate(0)
- cpuprof.on = false
- cpuprof.addExtra()
- cpuprof.log.close()
- }
- unlock(&cpuprof.lock)
-}
-
-// add adds the stack trace to the profile.
-// It is called from signal handlers and other limited environments
-// and cannot allocate memory or acquire locks that might be
-// held at the time of the signal, nor can it use substantial amounts
-// of stack.
-//go:nowritebarrierrec
-func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr) {
- // Simple cas-lock to coordinate with setcpuprofilerate.
- for !atomic.Cas(&prof.signalLock, 0, 1) {
- osyield()
- }
-
- if prof.hz != 0 { // implies cpuprof.log != nil
- if p.numExtra > 0 || p.lostExtra > 0 || p.lostAtomic > 0 {
- p.addExtra()
- }
- hdr := [1]uint64{1}
- // Note: write "knows" that the argument is &gp.labels,
- // because otherwise its write barrier behavior may not
- // be correct. See the long comment there before
- // changing the argument here.
- cpuprof.log.write(tagPtr, nanotime(), hdr[:], stk)
- }
-
- atomic.Store(&prof.signalLock, 0)
-}
-
-// addNonGo adds the non-Go stack trace to the profile.
-// It is called from a non-Go thread, so we cannot use much stack at all,
-// nor do anything that needs a g or an m.
-// In particular, we can't call cpuprof.log.write.
-// Instead, we copy the stack into cpuprof.extra,
-// which will be drained the next time a Go thread
-// gets the signal handling event.
-//go:nosplit
-//go:nowritebarrierrec
-func (p *cpuProfile) addNonGo(stk []uintptr) {
- // Simple cas-lock to coordinate with SetCPUProfileRate.
- // (Other calls to add or addNonGo should be blocked out
- // by the fact that only one SIGPROF can be handled by the
- // process at a time. If not, this lock will serialize those too.)
- for !atomic.Cas(&prof.signalLock, 0, 1) {
- osyield()
- }
-
- if cpuprof.numExtra+1+len(stk) < len(cpuprof.extra) {
- i := cpuprof.numExtra
- cpuprof.extra[i] = uintptr(1 + len(stk))
- copy(cpuprof.extra[i+1:], stk)
- cpuprof.numExtra += 1 + len(stk)
- } else {
- cpuprof.lostExtra++
- }
-
- atomic.Store(&prof.signalLock, 0)
-}
-
-// addExtra adds the "extra" profiling events,
-// queued by addNonGo, to the profile log.
-// addExtra is called either from a signal handler on a Go thread
-// or from an ordinary goroutine; either way it can use stack
-// and has a g. The world may be stopped, though.
-func (p *cpuProfile) addExtra() {
- // Copy accumulated non-Go profile events.
- hdr := [1]uint64{1}
- for i := 0; i < p.numExtra; {
- p.log.write(nil, 0, hdr[:], p.extra[i+1:i+int(p.extra[i])])
- i += int(p.extra[i])
- }
- p.numExtra = 0
-
- // Report any lost events.
- if p.lostExtra > 0 {
- hdr := [1]uint64{p.lostExtra}
- lostStk := [2]uintptr{
- abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum,
- abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
- }
- p.log.write(nil, 0, hdr[:], lostStk[:])
- p.lostExtra = 0
- }
-
- if p.lostAtomic > 0 {
- hdr := [1]uint64{p.lostAtomic}
- lostStk := [2]uintptr{
- abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
- abi.FuncPCABIInternal(_System) + sys.PCQuantum,
- }
- p.log.write(nil, 0, hdr[:], lostStk[:])
- p.lostAtomic = 0
- }
-
-}
-
-// CPUProfile panics.
-// It formerly provided raw access to chunks of
-// a pprof-format profile generated by the runtime.
-// The details of generating that format have changed,
-// so this functionality has been removed.
-//
-// Deprecated: Use the runtime/pprof package,
-// or the handlers in the net/http/pprof package,
-// or the testing package's -test.cpuprofile flag instead.
-func CPUProfile() []byte {
- panic("CPUProfile no longer available")
-}
-
-//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
-func runtime_pprof_runtime_cyclesPerSecond() int64 {
- return tickspersecond()
-}
-
-// readProfile, provided to runtime/pprof, returns the next chunk of
-// binary CPU profiling stack trace data, blocking until data is available.
-// If profiling is turned off and all the profile data accumulated while it was
-// on has been returned, readProfile returns eof=true.
-// The caller must save the returned data and tags before calling readProfile again.
-// The returned data contains a whole number of records, and tags contains
-// exactly one entry per record.
-//
-//go:linkname runtime_pprof_readProfile runtime/pprof.readProfile
-func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) {
- lock(&cpuprof.lock)
- log := cpuprof.log
- unlock(&cpuprof.lock)
- data, tags, eof := log.read(profBufBlocking)
- if len(data) == 0 && eof {
- lock(&cpuprof.lock)
- cpuprof.log = nil
- unlock(&cpuprof.lock)
- }
- return data, tags, eof
-}
diff --git a/contrib/go/_std_1.18/src/runtime/cputicks.go b/contrib/go/_std_1.18/src/runtime/cputicks.go
deleted file mode 100644
index 2cf3240333..0000000000
--- a/contrib/go/_std_1.18/src/runtime/cputicks.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !arm && !arm64 && !mips64 && !mips64le && !mips && !mipsle && !wasm
-
-package runtime
-
-// careful: cputicks is not guaranteed to be monotonic! In particular, we have
-// noticed drift between cpus on certain os/arch combinations. See issue 8976.
-func cputicks() int64
diff --git a/contrib/go/_std_1.18/src/runtime/debug.go b/contrib/go/_std_1.18/src/runtime/debug.go
deleted file mode 100644
index 2703a0ce01..0000000000
--- a/contrib/go/_std_1.18/src/runtime/debug.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// GOMAXPROCS sets the maximum number of CPUs that can be executing
-// simultaneously and returns the previous setting. It defaults to
-// the value of runtime.NumCPU. If n < 1, it does not change the current setting.
-// This call will go away when the scheduler improves.
-func GOMAXPROCS(n int) int {
- if GOARCH == "wasm" && n > 1 {
- n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
- }
-
- lock(&sched.lock)
- ret := int(gomaxprocs)
- unlock(&sched.lock)
- if n <= 0 || n == ret {
- return ret
- }
-
- stopTheWorldGC("GOMAXPROCS")
-
- // newprocs will be processed by startTheWorld
- newprocs = int32(n)
-
- startTheWorldGC()
- return ret
-}
-
-// NumCPU returns the number of logical CPUs usable by the current process.
-//
-// The set of available CPUs is checked by querying the operating system
-// at process startup. Changes to operating system CPU allocation after
-// process startup are not reflected.
-func NumCPU() int {
- return int(ncpu)
-}
-
-// NumCgoCall returns the number of cgo calls made by the current process.
-func NumCgoCall() int64 {
- var n = int64(atomic.Load64(&ncgocall))
- for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
- n += int64(mp.ncgocall)
- }
- return n
-}
-
-// NumGoroutine returns the number of goroutines that currently exist.
-func NumGoroutine() int {
- return int(gcount())
-}
-
-//go:linkname debug_modinfo runtime/debug.modinfo
-func debug_modinfo() string {
- return modinfo
-}
-
-// mayMoreStackPreempt is a maymorestack hook that forces a preemption
-// at every possible cooperative preemption point.
-//
-// This is valuable to apply to the runtime, which can be sensitive to
-// preemption points. To apply this to all preemption points in the
-// runtime and runtime-like code, use the following in bash or zsh:
-//
-// X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
-//
-// This must be deeply nosplit because it is called from a function
-// prologue before the stack is set up and because the compiler will
-// call it from any splittable prologue (leading to infinite
-// recursion).
-//
-// Ideally it should also use very little stack because the linker
-// doesn't currently account for this in nosplit stack depth checking.
-//
-//go:nosplit
-//
-// Ensure mayMoreStackPreempt can be called for all ABIs.
-//
-//go:linkname mayMoreStackPreempt
-func mayMoreStackPreempt() {
- // Don't do anything on the g0 or gsignal stack.
- g := getg()
- if g == g.m.g0 || g == g.m.gsignal {
- return
- }
- // Force a preemption, unless the stack is already poisoned.
- if g.stackguard0 < stackPoisonMin {
- g.stackguard0 = stackPreempt
- }
-}
-
-// mayMoreStackMove is a maymorestack hook that forces stack movement
-// at every possible point.
-//
-// See mayMoreStackPreempt.
-//
-//go:nosplit
-//go:linkname mayMoreStackMove
-func mayMoreStackMove() {
- // Don't do anything on the g0 or gsignal stack.
- g := getg()
- if g == g.m.g0 || g == g.m.gsignal {
- return
- }
- // Force stack movement, unless the stack is already poisoned.
- if g.stackguard0 < stackPoisonMin {
- g.stackguard0 = stackForceMove
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/debuglog.go b/contrib/go/_std_1.18/src/runtime/debuglog.go
deleted file mode 100644
index 75b91c4216..0000000000
--- a/contrib/go/_std_1.18/src/runtime/debuglog.go
+++ /dev/null
@@ -1,820 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides an internal debug logging facility. The debug
-// log is a lightweight, in-memory, per-M ring buffer. By default, the
-// runtime prints the debug log on panic.
-//
-// To print something to the debug log, call dlog to obtain a dlogger
-// and use the methods on that to add values. The values will be
-// space-separated in the output (much like println).
-//
-// This facility can be enabled by passing -tags debuglog when
-// building. Without this tag, dlog calls compile to nothing.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// debugLogBytes is the size of each per-M ring buffer. This is
-// allocated off-heap to avoid blowing up the M and hence the GC'd
-// heap size.
-const debugLogBytes = 16 << 10
-
-// debugLogStringLimit is the maximum number of bytes in a string.
-// Above this, the string will be truncated with "..(n more bytes).."
-const debugLogStringLimit = debugLogBytes / 8
-
-// dlog returns a debug logger. The caller can use methods on the
-// returned logger to add values, which will be space-separated in the
-// final output, much like println. The caller must call end() to
-// finish the message.
-//
-// dlog can be used from highly-constrained corners of the runtime: it
-// is safe to use in the signal handler, from within the write
-// barrier, from within the stack implementation, and in places that
-// must be recursively nosplit.
-//
-// This will be compiled away if built without the debuglog build tag.
-// However, argument construction may not be. If any of the arguments
-// are not literals or trivial expressions, consider protecting the
-// call with "if dlogEnabled".
-//
-//go:nosplit
-//go:nowritebarrierrec
-func dlog() *dlogger {
- if !dlogEnabled {
- return nil
- }
-
- // Get the time.
- tick, nano := uint64(cputicks()), uint64(nanotime())
-
- // Try to get a cached logger.
- l := getCachedDlogger()
-
- // If we couldn't get a cached logger, try to get one from the
- // global pool.
- if l == nil {
- allp := (*uintptr)(unsafe.Pointer(&allDloggers))
- all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
- for l1 := all; l1 != nil; l1 = l1.allLink {
- if atomic.Load(&l1.owned) == 0 && atomic.Cas(&l1.owned, 0, 1) {
- l = l1
- break
- }
- }
- }
-
- // If that failed, allocate a new logger.
- if l == nil {
- l = (*dlogger)(sysAlloc(unsafe.Sizeof(dlogger{}), nil))
- if l == nil {
- throw("failed to allocate debug log")
- }
- l.w.r.data = &l.w.data
- l.owned = 1
-
- // Prepend to allDloggers list.
- headp := (*uintptr)(unsafe.Pointer(&allDloggers))
- for {
- head := atomic.Loaduintptr(headp)
- l.allLink = (*dlogger)(unsafe.Pointer(head))
- if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
- break
- }
- }
- }
-
- // If the time delta is getting too high, write a new sync
- // packet. We set the limit so we don't write more than 6
- // bytes of delta in the record header.
- const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
- if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
- l.w.writeSync(tick, nano)
- }
-
- // Reserve space for framing header.
- l.w.ensure(debugLogHeaderSize)
- l.w.write += debugLogHeaderSize
-
- // Write record header.
- l.w.uvarint(tick - l.w.tick)
- l.w.uvarint(nano - l.w.nano)
- gp := getg()
- if gp != nil && gp.m != nil && gp.m.p != 0 {
- l.w.varint(int64(gp.m.p.ptr().id))
- } else {
- l.w.varint(-1)
- }
-
- return l
-}
-
-// A dlogger writes to the debug log.
-//
-// To obtain a dlogger, call dlog(). When done with the dlogger, call
-// end().
-//
-//go:notinheap
-type dlogger struct {
- w debugLogWriter
-
- // allLink is the next dlogger in the allDloggers list.
- allLink *dlogger
-
- // owned indicates that this dlogger is owned by an M. This is
- // accessed atomically.
- owned uint32
-}
-
-// allDloggers is a list of all dloggers, linked through
-// dlogger.allLink. This is accessed atomically. This is prepend only,
-// so it doesn't need to protect against ABA races.
-var allDloggers *dlogger
-
-//go:nosplit
-func (l *dlogger) end() {
- if !dlogEnabled {
- return
- }
-
- // Fill in framing header.
- size := l.w.write - l.w.r.end
- if !l.w.writeFrameAt(l.w.r.end, size) {
- throw("record too large")
- }
-
- // Commit the record.
- l.w.r.end = l.w.write
-
- // Attempt to return this logger to the cache.
- if putCachedDlogger(l) {
- return
- }
-
- // Return the logger to the global pool.
- atomic.Store(&l.owned, 0)
-}
-
-const (
- debugLogUnknown = 1 + iota
- debugLogBoolTrue
- debugLogBoolFalse
- debugLogInt
- debugLogUint
- debugLogHex
- debugLogPtr
- debugLogString
- debugLogConstString
- debugLogStringOverflow
-
- debugLogPC
- debugLogTraceback
-)
-
-//go:nosplit
-func (l *dlogger) b(x bool) *dlogger {
- if !dlogEnabled {
- return l
- }
- if x {
- l.w.byte(debugLogBoolTrue)
- } else {
- l.w.byte(debugLogBoolFalse)
- }
- return l
-}
-
-//go:nosplit
-func (l *dlogger) i(x int) *dlogger {
- return l.i64(int64(x))
-}
-
-//go:nosplit
-func (l *dlogger) i8(x int8) *dlogger {
- return l.i64(int64(x))
-}
-
-//go:nosplit
-func (l *dlogger) i16(x int16) *dlogger {
- return l.i64(int64(x))
-}
-
-//go:nosplit
-func (l *dlogger) i32(x int32) *dlogger {
- return l.i64(int64(x))
-}
-
-//go:nosplit
-func (l *dlogger) i64(x int64) *dlogger {
- if !dlogEnabled {
- return l
- }
- l.w.byte(debugLogInt)
- l.w.varint(x)
- return l
-}
-
-//go:nosplit
-func (l *dlogger) u(x uint) *dlogger {
- return l.u64(uint64(x))
-}
-
-//go:nosplit
-func (l *dlogger) uptr(x uintptr) *dlogger {
- return l.u64(uint64(x))
-}
-
-//go:nosplit
-func (l *dlogger) u8(x uint8) *dlogger {
- return l.u64(uint64(x))
-}
-
-//go:nosplit
-func (l *dlogger) u16(x uint16) *dlogger {
- return l.u64(uint64(x))
-}
-
-//go:nosplit
-func (l *dlogger) u32(x uint32) *dlogger {
- return l.u64(uint64(x))
-}
-
-//go:nosplit
-func (l *dlogger) u64(x uint64) *dlogger {
- if !dlogEnabled {
- return l
- }
- l.w.byte(debugLogUint)
- l.w.uvarint(x)
- return l
-}
-
-//go:nosplit
-func (l *dlogger) hex(x uint64) *dlogger {
- if !dlogEnabled {
- return l
- }
- l.w.byte(debugLogHex)
- l.w.uvarint(x)
- return l
-}
-
-//go:nosplit
-func (l *dlogger) p(x any) *dlogger {
- if !dlogEnabled {
- return l
- }
- l.w.byte(debugLogPtr)
- if x == nil {
- l.w.uvarint(0)
- } else {
- v := efaceOf(&x)
- switch v._type.kind & kindMask {
- case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer:
- l.w.uvarint(uint64(uintptr(v.data)))
- default:
- throw("not a pointer type")
- }
- }
- return l
-}
-
-//go:nosplit
-func (l *dlogger) s(x string) *dlogger {
- if !dlogEnabled {
- return l
- }
- str := stringStructOf(&x)
- datap := &firstmoduledata
- if len(x) > 4 && datap.etext <= uintptr(str.str) && uintptr(str.str) < datap.end {
- // String constants are in the rodata section, which
- // isn't recorded in moduledata. But it has to be
- // somewhere between etext and end.
- l.w.byte(debugLogConstString)
- l.w.uvarint(uint64(str.len))
- l.w.uvarint(uint64(uintptr(str.str) - datap.etext))
- } else {
- l.w.byte(debugLogString)
- var b []byte
- bb := (*slice)(unsafe.Pointer(&b))
- bb.array = str.str
- bb.len, bb.cap = str.len, str.len
- if len(b) > debugLogStringLimit {
- b = b[:debugLogStringLimit]
- }
- l.w.uvarint(uint64(len(b)))
- l.w.bytes(b)
- if len(b) != len(x) {
- l.w.byte(debugLogStringOverflow)
- l.w.uvarint(uint64(len(x) - len(b)))
- }
- }
- return l
-}
-
-//go:nosplit
-func (l *dlogger) pc(x uintptr) *dlogger {
- if !dlogEnabled {
- return l
- }
- l.w.byte(debugLogPC)
- l.w.uvarint(uint64(x))
- return l
-}
-
-//go:nosplit
-func (l *dlogger) traceback(x []uintptr) *dlogger {
- if !dlogEnabled {
- return l
- }
- l.w.byte(debugLogTraceback)
- l.w.uvarint(uint64(len(x)))
- for _, pc := range x {
- l.w.uvarint(uint64(pc))
- }
- return l
-}
-
-// A debugLogWriter is a ring buffer of binary debug log records.
-//
-// A log record consists of a 2-byte framing header and a sequence of
-// fields. The framing header gives the size of the record as a little
-// endian 16-bit value. Each field starts with a byte indicating its
-// type, followed by type-specific data. If the size in the framing
-// header is 0, it's a sync record consisting of two little endian
-// 64-bit values giving a new time base.
-//
-// Because this is a ring buffer, new records will eventually
-// overwrite old records. Hence, it maintains a reader that consumes
-// the log as it gets overwritten. That reader state is where an
-// actual log reader would start.
-//
-//go:notinheap
-type debugLogWriter struct {
- write uint64
- data debugLogBuf
-
- // tick and nano are the time bases from the most recently
- // written sync record.
- tick, nano uint64
-
- // r is a reader that consumes records as they get overwritten
- // by the writer. It also acts as the initial reader state
- // when printing the log.
- r debugLogReader
-
- // buf is a scratch buffer for encoding. This is here to
- // reduce stack usage.
- buf [10]byte
-}
-
-//go:notinheap
-type debugLogBuf [debugLogBytes]byte
-
-const (
- // debugLogHeaderSize is the number of bytes in the framing
- // header of every dlog record.
- debugLogHeaderSize = 2
-
- // debugLogSyncSize is the number of bytes in a sync record.
- debugLogSyncSize = debugLogHeaderSize + 2*8
-)
-
-//go:nosplit
-func (l *debugLogWriter) ensure(n uint64) {
- for l.write+n >= l.r.begin+uint64(len(l.data)) {
- // Consume record at begin.
- if l.r.skip() == ^uint64(0) {
- // Wrapped around within a record.
- //
- // TODO(austin): It would be better to just
- // eat the whole buffer at this point, but we
- // have to communicate that to the reader
- // somehow.
- throw("record wrapped around")
- }
- }
-}
-
-//go:nosplit
-func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
- l.data[pos%uint64(len(l.data))] = uint8(size)
- l.data[(pos+1)%uint64(len(l.data))] = uint8(size >> 8)
- return size <= 0xFFFF
-}
-
-//go:nosplit
-func (l *debugLogWriter) writeSync(tick, nano uint64) {
- l.tick, l.nano = tick, nano
- l.ensure(debugLogHeaderSize)
- l.writeFrameAt(l.write, 0)
- l.write += debugLogHeaderSize
- l.writeUint64LE(tick)
- l.writeUint64LE(nano)
- l.r.end = l.write
-}
-
-//go:nosplit
-func (l *debugLogWriter) writeUint64LE(x uint64) {
- var b [8]byte
- b[0] = byte(x)
- b[1] = byte(x >> 8)
- b[2] = byte(x >> 16)
- b[3] = byte(x >> 24)
- b[4] = byte(x >> 32)
- b[5] = byte(x >> 40)
- b[6] = byte(x >> 48)
- b[7] = byte(x >> 56)
- l.bytes(b[:])
-}
-
-//go:nosplit
-func (l *debugLogWriter) byte(x byte) {
- l.ensure(1)
- pos := l.write
- l.write++
- l.data[pos%uint64(len(l.data))] = x
-}
-
-//go:nosplit
-func (l *debugLogWriter) bytes(x []byte) {
- l.ensure(uint64(len(x)))
- pos := l.write
- l.write += uint64(len(x))
- for len(x) > 0 {
- n := copy(l.data[pos%uint64(len(l.data)):], x)
- pos += uint64(n)
- x = x[n:]
- }
-}
-
-//go:nosplit
-func (l *debugLogWriter) varint(x int64) {
- var u uint64
- if x < 0 {
- u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
- } else {
- u = (uint64(x) << 1) // do not complement i, bit 0 is 0
- }
- l.uvarint(u)
-}
-
-//go:nosplit
-func (l *debugLogWriter) uvarint(u uint64) {
- i := 0
- for u >= 0x80 {
- l.buf[i] = byte(u) | 0x80
- u >>= 7
- i++
- }
- l.buf[i] = byte(u)
- i++
- l.bytes(l.buf[:i])
-}
-
-type debugLogReader struct {
- data *debugLogBuf
-
- // begin and end are the positions in the log of the beginning
- // and end of the log data, modulo len(data).
- begin, end uint64
-
- // tick and nano are the current time base at begin.
- tick, nano uint64
-}
-
-//go:nosplit
-func (r *debugLogReader) skip() uint64 {
- // Read size at pos.
- if r.begin+debugLogHeaderSize > r.end {
- return ^uint64(0)
- }
- size := uint64(r.readUint16LEAt(r.begin))
- if size == 0 {
- // Sync packet.
- r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
- r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
- size = debugLogSyncSize
- }
- if r.begin+size > r.end {
- return ^uint64(0)
- }
- r.begin += size
- return size
-}
-
-//go:nosplit
-func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
- return uint16(r.data[pos%uint64(len(r.data))]) |
- uint16(r.data[(pos+1)%uint64(len(r.data))])<<8
-}
-
-//go:nosplit
-func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
- var b [8]byte
- for i := range b {
- b[i] = r.data[pos%uint64(len(r.data))]
- pos++
- }
- return uint64(b[0]) | uint64(b[1])<<8 |
- uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 |
- uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func (r *debugLogReader) peek() (tick uint64) {
- // Consume any sync records.
- size := uint64(0)
- for size == 0 {
- if r.begin+debugLogHeaderSize > r.end {
- return ^uint64(0)
- }
- size = uint64(r.readUint16LEAt(r.begin))
- if size != 0 {
- break
- }
- if r.begin+debugLogSyncSize > r.end {
- return ^uint64(0)
- }
- // Sync packet.
- r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
- r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
- r.begin += debugLogSyncSize
- }
-
- // Peek tick delta.
- if r.begin+size > r.end {
- return ^uint64(0)
- }
- pos := r.begin + debugLogHeaderSize
- var u uint64
- for i := uint(0); ; i += 7 {
- b := r.data[pos%uint64(len(r.data))]
- pos++
- u |= uint64(b&^0x80) << i
- if b&0x80 == 0 {
- break
- }
- }
- if pos > r.begin+size {
- return ^uint64(0)
- }
- return r.tick + u
-}
-
-func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
- // Read size. We've already skipped sync packets and checked
- // bounds in peek.
- size := uint64(r.readUint16LEAt(r.begin))
- end = r.begin + size
- r.begin += debugLogHeaderSize
-
- // Read tick, nano, and p.
- tick = r.uvarint() + r.tick
- nano = r.uvarint() + r.nano
- p = int(r.varint())
-
- return
-}
-
-func (r *debugLogReader) uvarint() uint64 {
- var u uint64
- for i := uint(0); ; i += 7 {
- b := r.data[r.begin%uint64(len(r.data))]
- r.begin++
- u |= uint64(b&^0x80) << i
- if b&0x80 == 0 {
- break
- }
- }
- return u
-}
-
-func (r *debugLogReader) varint() int64 {
- u := r.uvarint()
- var v int64
- if u&1 == 0 {
- v = int64(u >> 1)
- } else {
- v = ^int64(u >> 1)
- }
- return v
-}
-
-func (r *debugLogReader) printVal() bool {
- typ := r.data[r.begin%uint64(len(r.data))]
- r.begin++
-
- switch typ {
- default:
- print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
- return false
-
- case debugLogUnknown:
- print("<unknown kind>")
-
- case debugLogBoolTrue:
- print(true)
-
- case debugLogBoolFalse:
- print(false)
-
- case debugLogInt:
- print(r.varint())
-
- case debugLogUint:
- print(r.uvarint())
-
- case debugLogHex, debugLogPtr:
- print(hex(r.uvarint()))
-
- case debugLogString:
- sl := r.uvarint()
- if r.begin+sl > r.end {
- r.begin = r.end
- print("<string length corrupted>")
- break
- }
- for sl > 0 {
- b := r.data[r.begin%uint64(len(r.data)):]
- if uint64(len(b)) > sl {
- b = b[:sl]
- }
- r.begin += uint64(len(b))
- sl -= uint64(len(b))
- gwrite(b)
- }
-
- case debugLogConstString:
- len, ptr := int(r.uvarint()), uintptr(r.uvarint())
- ptr += firstmoduledata.etext
- str := stringStruct{
- str: unsafe.Pointer(ptr),
- len: len,
- }
- s := *(*string)(unsafe.Pointer(&str))
- print(s)
-
- case debugLogStringOverflow:
- print("..(", r.uvarint(), " more bytes)..")
-
- case debugLogPC:
- printDebugLogPC(uintptr(r.uvarint()), false)
-
- case debugLogTraceback:
- n := int(r.uvarint())
- for i := 0; i < n; i++ {
- print("\n\t")
- // gentraceback PCs are always return PCs.
- // Convert them to call PCs.
- //
- // TODO(austin): Expand inlined frames.
- printDebugLogPC(uintptr(r.uvarint()), true)
- }
- }
-
- return true
-}
-
-// printDebugLog prints the debug log.
-func printDebugLog() {
- if !dlogEnabled {
- return
- }
-
- // This function should not panic or throw since it is used in
- // the fatal panic path and this may deadlock.
-
- printlock()
-
- // Get the list of all debug logs.
- allp := (*uintptr)(unsafe.Pointer(&allDloggers))
- all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
-
- // Count the logs.
- n := 0
- for l := all; l != nil; l = l.allLink {
- n++
- }
- if n == 0 {
- printunlock()
- return
- }
-
- // Prepare read state for all logs.
- type readState struct {
- debugLogReader
- first bool
- lost uint64
- nextTick uint64
- }
- state1 := sysAlloc(unsafe.Sizeof(readState{})*uintptr(n), nil)
- if state1 == nil {
- println("failed to allocate read state for", n, "logs")
- printunlock()
- return
- }
- state := (*[1 << 20]readState)(state1)[:n]
- {
- l := all
- for i := range state {
- s := &state[i]
- s.debugLogReader = l.w.r
- s.first = true
- s.lost = l.w.r.begin
- s.nextTick = s.peek()
- l = l.allLink
- }
- }
-
- // Print records.
- for {
- // Find the next record.
- var best struct {
- tick uint64
- i int
- }
- best.tick = ^uint64(0)
- for i := range state {
- if state[i].nextTick < best.tick {
- best.tick = state[i].nextTick
- best.i = i
- }
- }
- if best.tick == ^uint64(0) {
- break
- }
-
- // Print record.
- s := &state[best.i]
- if s.first {
- print(">> begin log ", best.i)
- if s.lost != 0 {
- print("; lost first ", s.lost>>10, "KB")
- }
- print(" <<\n")
- s.first = false
- }
-
- end, _, nano, p := s.header()
- oldEnd := s.end
- s.end = end
-
- print("[")
- var tmpbuf [21]byte
- pnano := int64(nano) - runtimeInitTime
- if pnano < 0 {
- // Logged before runtimeInitTime was set.
- pnano = 0
- }
- print(string(itoaDiv(tmpbuf[:], uint64(pnano), 9)))
- print(" P ", p, "] ")
-
- for i := 0; s.begin < s.end; i++ {
- if i > 0 {
- print(" ")
- }
- if !s.printVal() {
- // Abort this P log.
- print("<aborting P log>")
- end = oldEnd
- break
- }
- }
- println()
-
- // Move on to the next record.
- s.begin = end
- s.end = oldEnd
- s.nextTick = s.peek()
- }
-
- printunlock()
-}
-
-// printDebugLogPC prints a single symbolized PC. If returnPC is true,
-// pc is a return PC that must first be converted to a call PC.
-func printDebugLogPC(pc uintptr, returnPC bool) {
- fn := findfunc(pc)
- if returnPC && (!fn.valid() || pc > fn.entry()) {
- // TODO(austin): Don't back up if the previous frame
- // was a sigpanic.
- pc--
- }
-
- print(hex(pc))
- if !fn.valid() {
- print(" [unknown PC]")
- } else {
- name := funcname(fn)
- file, line := funcline(fn, pc)
- print(" [", name, "+", hex(pc-fn.entry()),
- " ", file, ":", line, "]")
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/defs_linux_amd64.go b/contrib/go/_std_1.18/src/runtime/defs_linux_amd64.go
deleted file mode 100644
index 47fb468621..0000000000
--- a/contrib/go/_std_1.18/src/runtime/defs_linux_amd64.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// created by cgo -cdefs and then converted to Go
-// cgo -cdefs defs_linux.go defs1_linux.go
-
-package runtime
-
-import "unsafe"
-
-const (
- _EINTR = 0x4
- _EAGAIN = 0xb
- _ENOMEM = 0xc
- _ENOSYS = 0x26
-
- _PROT_NONE = 0x0
- _PROT_READ = 0x1
- _PROT_WRITE = 0x2
- _PROT_EXEC = 0x4
-
- _MAP_ANON = 0x20
- _MAP_PRIVATE = 0x2
- _MAP_FIXED = 0x10
-
- _MADV_DONTNEED = 0x4
- _MADV_FREE = 0x8
- _MADV_HUGEPAGE = 0xe
- _MADV_NOHUGEPAGE = 0xf
-
- _SA_RESTART = 0x10000000
- _SA_ONSTACK = 0x8000000
- _SA_RESTORER = 0x4000000
- _SA_SIGINFO = 0x4
-
- _SI_KERNEL = 0x80
- _SI_TIMER = -0x2
-
- _SIGHUP = 0x1
- _SIGINT = 0x2
- _SIGQUIT = 0x3
- _SIGILL = 0x4
- _SIGTRAP = 0x5
- _SIGABRT = 0x6
- _SIGBUS = 0x7
- _SIGFPE = 0x8
- _SIGKILL = 0x9
- _SIGUSR1 = 0xa
- _SIGSEGV = 0xb
- _SIGUSR2 = 0xc
- _SIGPIPE = 0xd
- _SIGALRM = 0xe
- _SIGSTKFLT = 0x10
- _SIGCHLD = 0x11
- _SIGCONT = 0x12
- _SIGSTOP = 0x13
- _SIGTSTP = 0x14
- _SIGTTIN = 0x15
- _SIGTTOU = 0x16
- _SIGURG = 0x17
- _SIGXCPU = 0x18
- _SIGXFSZ = 0x19
- _SIGVTALRM = 0x1a
- _SIGPROF = 0x1b
- _SIGWINCH = 0x1c
- _SIGIO = 0x1d
- _SIGPWR = 0x1e
- _SIGSYS = 0x1f
-
- _SIGRTMIN = 0x20
-
- _FPE_INTDIV = 0x1
- _FPE_INTOVF = 0x2
- _FPE_FLTDIV = 0x3
- _FPE_FLTOVF = 0x4
- _FPE_FLTUND = 0x5
- _FPE_FLTRES = 0x6
- _FPE_FLTINV = 0x7
- _FPE_FLTSUB = 0x8
-
- _BUS_ADRALN = 0x1
- _BUS_ADRERR = 0x2
- _BUS_OBJERR = 0x3
-
- _SEGV_MAPERR = 0x1
- _SEGV_ACCERR = 0x2
-
- _ITIMER_REAL = 0x0
- _ITIMER_VIRTUAL = 0x1
- _ITIMER_PROF = 0x2
-
- _CLOCK_THREAD_CPUTIME_ID = 0x3
-
- _SIGEV_THREAD_ID = 0x4
-
- _EPOLLIN = 0x1
- _EPOLLOUT = 0x4
- _EPOLLERR = 0x8
- _EPOLLHUP = 0x10
- _EPOLLRDHUP = 0x2000
- _EPOLLET = 0x80000000
- _EPOLL_CLOEXEC = 0x80000
- _EPOLL_CTL_ADD = 0x1
- _EPOLL_CTL_DEL = 0x2
- _EPOLL_CTL_MOD = 0x3
-
- _AF_UNIX = 0x1
- _SOCK_DGRAM = 0x2
-)
-
-type timespec struct {
- tv_sec int64
- tv_nsec int64
-}
-
-//go:nosplit
-func (ts *timespec) setNsec(ns int64) {
- ts.tv_sec = ns / 1e9
- ts.tv_nsec = ns % 1e9
-}
-
-type timeval struct {
- tv_sec int64
- tv_usec int64
-}
-
-func (tv *timeval) set_usec(x int32) {
- tv.tv_usec = int64(x)
-}
-
-type sigactiont struct {
- sa_handler uintptr
- sa_flags uint64
- sa_restorer uintptr
- sa_mask uint64
-}
-
-type siginfoFields struct {
- si_signo int32
- si_errno int32
- si_code int32
- // below here is a union; si_addr is the only field we use
- si_addr uint64
-}
-
-type siginfo struct {
- siginfoFields
-
- // Pad struct to the max size in the kernel.
- _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte
-}
-
-type itimerspec struct {
- it_interval timespec
- it_value timespec
-}
-
-type itimerval struct {
- it_interval timeval
- it_value timeval
-}
-
-type sigeventFields struct {
- value uintptr
- signo int32
- notify int32
- // below here is a union; sigev_notify_thread_id is the only field we use
- sigev_notify_thread_id int32
-}
-
-type sigevent struct {
- sigeventFields
-
- // Pad struct to the max size in the kernel.
- _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte
-}
-
-type epollevent struct {
- events uint32
- data [8]byte // unaligned uintptr
-}
-
-// created by cgo -cdefs and then converted to Go
-// cgo -cdefs defs_linux.go defs1_linux.go
-
-const (
- _O_RDONLY = 0x0
- _O_NONBLOCK = 0x800
- _O_CLOEXEC = 0x80000
-)
-
-type usigset struct {
- __val [16]uint64
-}
-
-type fpxreg struct {
- significand [4]uint16
- exponent uint16
- padding [3]uint16
-}
-
-type xmmreg struct {
- element [4]uint32
-}
-
-type fpstate struct {
- cwd uint16
- swd uint16
- ftw uint16
- fop uint16
- rip uint64
- rdp uint64
- mxcsr uint32
- mxcr_mask uint32
- _st [8]fpxreg
- _xmm [16]xmmreg
- padding [24]uint32
-}
-
-type fpxreg1 struct {
- significand [4]uint16
- exponent uint16
- padding [3]uint16
-}
-
-type xmmreg1 struct {
- element [4]uint32
-}
-
-type fpstate1 struct {
- cwd uint16
- swd uint16
- ftw uint16
- fop uint16
- rip uint64
- rdp uint64
- mxcsr uint32
- mxcr_mask uint32
- _st [8]fpxreg1
- _xmm [16]xmmreg1
- padding [24]uint32
-}
-
-type fpreg1 struct {
- significand [4]uint16
- exponent uint16
-}
-
-type stackt struct {
- ss_sp *byte
- ss_flags int32
- pad_cgo_0 [4]byte
- ss_size uintptr
-}
-
-type mcontext struct {
- gregs [23]uint64
- fpregs *fpstate
- __reserved1 [8]uint64
-}
-
-type ucontext struct {
- uc_flags uint64
- uc_link *ucontext
- uc_stack stackt
- uc_mcontext mcontext
- uc_sigmask usigset
- __fpregs_mem fpstate
-}
-
-type sigcontext struct {
- r8 uint64
- r9 uint64
- r10 uint64
- r11 uint64
- r12 uint64
- r13 uint64
- r14 uint64
- r15 uint64
- rdi uint64
- rsi uint64
- rbp uint64
- rbx uint64
- rdx uint64
- rax uint64
- rcx uint64
- rsp uint64
- rip uint64
- eflags uint64
- cs uint16
- gs uint16
- fs uint16
- __pad0 uint16
- err uint64
- trapno uint64
- oldmask uint64
- cr2 uint64
- fpstate *fpstate1
- __reserved1 [8]uint64
-}
-
-type sockaddr_un struct {
- family uint16
- path [108]byte
-}
diff --git a/contrib/go/_std_1.18/src/runtime/env_posix.go b/contrib/go/_std_1.18/src/runtime/env_posix.go
deleted file mode 100644
index 44086c1d63..0000000000
--- a/contrib/go/_std_1.18/src/runtime/env_posix.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows || plan9
-
-package runtime
-
-import "unsafe"
-
-func gogetenv(key string) string {
- env := environ()
- if env == nil {
- throw("getenv before env init")
- }
- for _, s := range env {
- if len(s) > len(key) && s[len(key)] == '=' && envKeyEqual(s[:len(key)], key) {
- return s[len(key)+1:]
- }
- }
- return ""
-}
-
-// envKeyEqual reports whether a == b, with ASCII-only case insensitivity
-// on Windows. The two strings must have the same length.
-func envKeyEqual(a, b string) bool {
- if GOOS == "windows" { // case insensitive
- for i := 0; i < len(a); i++ {
- ca, cb := a[i], b[i]
- if ca == cb || lowerASCII(ca) == lowerASCII(cb) {
- continue
- }
- return false
- }
- return true
- }
- return a == b
-}
-
-func lowerASCII(c byte) byte {
- if 'A' <= c && c <= 'Z' {
- return c + ('a' - 'A')
- }
- return c
-}
-
-var _cgo_setenv unsafe.Pointer // pointer to C function
-var _cgo_unsetenv unsafe.Pointer // pointer to C function
-
-// Update the C environment if cgo is loaded.
-// Called from syscall.Setenv.
-//go:linkname syscall_setenv_c syscall.setenv_c
-func syscall_setenv_c(k string, v string) {
- if _cgo_setenv == nil {
- return
- }
- arg := [2]unsafe.Pointer{cstring(k), cstring(v)}
- asmcgocall(_cgo_setenv, unsafe.Pointer(&arg))
-}
-
-// Update the C environment if cgo is loaded.
-// Called from syscall.unsetenv.
-//go:linkname syscall_unsetenv_c syscall.unsetenv_c
-func syscall_unsetenv_c(k string) {
- if _cgo_unsetenv == nil {
- return
- }
- arg := [1]unsafe.Pointer{cstring(k)}
- asmcgocall(_cgo_unsetenv, unsafe.Pointer(&arg))
-}
-
-func cstring(s string) unsafe.Pointer {
- p := make([]byte, len(s)+1)
- copy(p, s)
- return unsafe.Pointer(&p[0])
-}
diff --git a/contrib/go/_std_1.18/src/runtime/error.go b/contrib/go/_std_1.18/src/runtime/error.go
deleted file mode 100644
index 43114f092e..0000000000
--- a/contrib/go/_std_1.18/src/runtime/error.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "internal/bytealg"
-
-// The Error interface identifies a run time error.
-type Error interface {
- error
-
- // RuntimeError is a no-op function but
- // serves to distinguish types that are run time
- // errors from ordinary errors: a type is a
- // run time error if it has a RuntimeError method.
- RuntimeError()
-}
-
-// A TypeAssertionError explains a failed type assertion.
-type TypeAssertionError struct {
- _interface *_type
- concrete *_type
- asserted *_type
- missingMethod string // one method needed by Interface, missing from Concrete
-}
-
-func (*TypeAssertionError) RuntimeError() {}
-
-func (e *TypeAssertionError) Error() string {
- inter := "interface"
- if e._interface != nil {
- inter = e._interface.string()
- }
- as := e.asserted.string()
- if e.concrete == nil {
- return "interface conversion: " + inter + " is nil, not " + as
- }
- cs := e.concrete.string()
- if e.missingMethod == "" {
- msg := "interface conversion: " + inter + " is " + cs + ", not " + as
- if cs == as {
- // provide slightly clearer error message
- if e.concrete.pkgpath() != e.asserted.pkgpath() {
- msg += " (types from different packages)"
- } else {
- msg += " (types from different scopes)"
- }
- }
- return msg
- }
- return "interface conversion: " + cs + " is not " + as +
- ": missing method " + e.missingMethod
-}
-
-//go:nosplit
-// itoa converts val to a decimal representation. The result is
-// written somewhere within buf and the location of the result is returned.
-// buf must be at least 20 bytes.
-func itoa(buf []byte, val uint64) []byte {
- i := len(buf) - 1
- for val >= 10 {
- buf[i] = byte(val%10 + '0')
- i--
- val /= 10
- }
- buf[i] = byte(val + '0')
- return buf[i:]
-}
-
-// An errorString represents a runtime error described by a single string.
-type errorString string
-
-func (e errorString) RuntimeError() {}
-
-func (e errorString) Error() string {
- return "runtime error: " + string(e)
-}
-
-type errorAddressString struct {
- msg string // error message
- addr uintptr // memory address where the error occurred
-}
-
-func (e errorAddressString) RuntimeError() {}
-
-func (e errorAddressString) Error() string {
- return "runtime error: " + e.msg
-}
-
-// Addr returns the memory address where a fault occurred.
-// The address provided is best-effort.
-// The veracity of the result may depend on the platform.
-// Errors providing this method will only be returned as
-// a result of using runtime/debug.SetPanicOnFault.
-func (e errorAddressString) Addr() uintptr {
- return e.addr
-}
-
-// plainError represents a runtime error described a string without
-// the prefix "runtime error: " after invoking errorString.Error().
-// See Issue #14965.
-type plainError string
-
-func (e plainError) RuntimeError() {}
-
-func (e plainError) Error() string {
- return string(e)
-}
-
-// A boundsError represents an indexing or slicing operation gone wrong.
-type boundsError struct {
- x int64
- y int
- // Values in an index or slice expression can be signed or unsigned.
- // That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1.
- // Instead, we keep track of whether x should be interpreted as signed or unsigned.
- // y is known to be nonnegative and to fit in an int.
- signed bool
- code boundsErrorCode
-}
-
-type boundsErrorCode uint8
-
-const (
- boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed
-
- boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
- boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
- boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
-
- boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
- boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
- boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
- boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
-
- boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
- // Note: in the above, len(s) and cap(s) are stored in y
-)
-
-// boundsErrorFmts provide error text for various out-of-bounds panics.
-// Note: if you change these strings, you should adjust the size of the buffer
-// in boundsError.Error below as well.
-var boundsErrorFmts = [...]string{
- boundsIndex: "index out of range [%x] with length %y",
- boundsSliceAlen: "slice bounds out of range [:%x] with length %y",
- boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
- boundsSliceB: "slice bounds out of range [%x:%y]",
- boundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
- boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
- boundsSlice3B: "slice bounds out of range [:%x:%y]",
- boundsSlice3C: "slice bounds out of range [%x:%y:]",
- boundsConvert: "cannot convert slice with length %y to pointer to array with length %x",
-}
-
-// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
-var boundsNegErrorFmts = [...]string{
- boundsIndex: "index out of range [%x]",
- boundsSliceAlen: "slice bounds out of range [:%x]",
- boundsSliceAcap: "slice bounds out of range [:%x]",
- boundsSliceB: "slice bounds out of range [%x:]",
- boundsSlice3Alen: "slice bounds out of range [::%x]",
- boundsSlice3Acap: "slice bounds out of range [::%x]",
- boundsSlice3B: "slice bounds out of range [:%x:]",
- boundsSlice3C: "slice bounds out of range [%x::]",
-}
-
-func (e boundsError) RuntimeError() {}
-
-func appendIntStr(b []byte, v int64, signed bool) []byte {
- if signed && v < 0 {
- b = append(b, '-')
- v = -v
- }
- var buf [20]byte
- b = append(b, itoa(buf[:], uint64(v))...)
- return b
-}
-
-func (e boundsError) Error() string {
- fmt := boundsErrorFmts[e.code]
- if e.signed && e.x < 0 {
- fmt = boundsNegErrorFmts[e.code]
- }
- // max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y"
- // x can be at most 20 characters. y can be at most 19.
- b := make([]byte, 0, 100)
- b = append(b, "runtime error: "...)
- for i := 0; i < len(fmt); i++ {
- c := fmt[i]
- if c != '%' {
- b = append(b, c)
- continue
- }
- i++
- switch fmt[i] {
- case 'x':
- b = appendIntStr(b, e.x, e.signed)
- case 'y':
- b = appendIntStr(b, int64(e.y), true)
- }
- }
- return string(b)
-}
-
-type stringer interface {
- String() string
-}
-
-// printany prints an argument passed to panic.
-// If panic is called with a value that has a String or Error method,
-// it has already been converted into a string by preprintpanics.
-func printany(i any) {
- switch v := i.(type) {
- case nil:
- print("nil")
- case bool:
- print(v)
- case int:
- print(v)
- case int8:
- print(v)
- case int16:
- print(v)
- case int32:
- print(v)
- case int64:
- print(v)
- case uint:
- print(v)
- case uint8:
- print(v)
- case uint16:
- print(v)
- case uint32:
- print(v)
- case uint64:
- print(v)
- case uintptr:
- print(v)
- case float32:
- print(v)
- case float64:
- print(v)
- case complex64:
- print(v)
- case complex128:
- print(v)
- case string:
- print(v)
- default:
- printanycustomtype(i)
- }
-}
-
-func printanycustomtype(i any) {
- eface := efaceOf(&i)
- typestring := eface._type.string()
-
- switch eface._type.kind {
- case kindString:
- print(typestring, `("`, *(*string)(eface.data), `")`)
- case kindBool:
- print(typestring, "(", *(*bool)(eface.data), ")")
- case kindInt:
- print(typestring, "(", *(*int)(eface.data), ")")
- case kindInt8:
- print(typestring, "(", *(*int8)(eface.data), ")")
- case kindInt16:
- print(typestring, "(", *(*int16)(eface.data), ")")
- case kindInt32:
- print(typestring, "(", *(*int32)(eface.data), ")")
- case kindInt64:
- print(typestring, "(", *(*int64)(eface.data), ")")
- case kindUint:
- print(typestring, "(", *(*uint)(eface.data), ")")
- case kindUint8:
- print(typestring, "(", *(*uint8)(eface.data), ")")
- case kindUint16:
- print(typestring, "(", *(*uint16)(eface.data), ")")
- case kindUint32:
- print(typestring, "(", *(*uint32)(eface.data), ")")
- case kindUint64:
- print(typestring, "(", *(*uint64)(eface.data), ")")
- case kindUintptr:
- print(typestring, "(", *(*uintptr)(eface.data), ")")
- case kindFloat32:
- print(typestring, "(", *(*float32)(eface.data), ")")
- case kindFloat64:
- print(typestring, "(", *(*float64)(eface.data), ")")
- case kindComplex64:
- print(typestring, *(*complex64)(eface.data))
- case kindComplex128:
- print(typestring, *(*complex128)(eface.data))
- default:
- print("(", typestring, ") ", eface.data)
- }
-}
-
-// panicwrap generates a panic for a call to a wrapped value method
-// with a nil pointer receiver.
-//
-// It is called from the generated wrapper code.
-func panicwrap() {
- pc := getcallerpc()
- name := funcname(findfunc(pc))
- // name is something like "main.(*T).F".
- // We want to extract pkg ("main"), typ ("T"), and meth ("F").
- // Do it by finding the parens.
- i := bytealg.IndexByteString(name, '(')
- if i < 0 {
- throw("panicwrap: no ( in " + name)
- }
- pkg := name[:i-1]
- if i+2 >= len(name) || name[i-1:i+2] != ".(*" {
- throw("panicwrap: unexpected string after package name: " + name)
- }
- name = name[i+2:]
- i = bytealg.IndexByteString(name, ')')
- if i < 0 {
- throw("panicwrap: no ) in " + name)
- }
- if i+2 >= len(name) || name[i:i+2] != ")." {
- throw("panicwrap: unexpected string after type name: " + name)
- }
- typ := name[:i]
- meth := name[i+2:]
- panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/extern.go b/contrib/go/_std_1.18/src/runtime/extern.go
deleted file mode 100644
index f1f6ea5123..0000000000
--- a/contrib/go/_std_1.18/src/runtime/extern.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package runtime contains operations that interact with Go's runtime system,
-such as functions to control goroutines. It also includes the low-level type information
-used by the reflect package; see reflect's documentation for the programmable
-interface to the run-time type system.
-
-Environment Variables
-
-The following environment variables ($name or %name%, depending on the host
-operating system) control the run-time behavior of Go programs. The meanings
-and use may change from release to release.
-
-The GOGC variable sets the initial garbage collection target percentage.
-A collection is triggered when the ratio of freshly allocated data to live data
-remaining after the previous collection reaches this percentage. The default
-is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
-The runtime/debug package's SetGCPercent function allows changing this
-percentage at run time. See https://golang.org/pkg/runtime/debug/#SetGCPercent.
-
-The GODEBUG variable controls debugging variables within the runtime.
-It is a comma-separated list of name=val pairs setting these named variables:
-
- allocfreetrace: setting allocfreetrace=1 causes every allocation to be
- profiled and a stack trace printed on each object's allocation and free.
-
- clobberfree: setting clobberfree=1 causes the garbage collector to
- clobber the memory content of an object with bad content when it frees
- the object.
-
- cgocheck: setting cgocheck=0 disables all checks for packages
- using cgo to incorrectly pass Go pointers to non-Go code.
- Setting cgocheck=1 (the default) enables relatively cheap
- checks that may miss some errors. Setting cgocheck=2 enables
- expensive checks that should not miss any errors, but will
- cause your program to run slower.
-
- efence: setting efence=1 causes the allocator to run in a mode
- where each object is allocated on a unique page and addresses are
- never recycled.
-
- gccheckmark: setting gccheckmark=1 enables verification of the
- garbage collector's concurrent mark phase by performing a
- second mark pass while the world is stopped. If the second
- pass finds a reachable object that was not found by concurrent
- mark, the garbage collector will panic.
-
- gcpacertrace: setting gcpacertrace=1 causes the garbage collector to
- print information about the internal state of the concurrent pacer.
-
- gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines
- onto smaller stacks. In this mode, a goroutine's stack can only grow.
-
- gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,
- making every garbage collection a stop-the-world event. Setting gcstoptheworld=2
- also disables concurrent sweeping after the garbage collection finishes.
-
- gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
- error at each collection, summarizing the amount of memory collected and the
- length of the pause. The format of this line is subject to change.
- Currently, it is:
- gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P
- where the fields are as follows:
- gc # the GC number, incremented at each GC
- @#s time in seconds since program start
- #% percentage of time spent in GC since program start
- #+...+# wall-clock/CPU times for the phases of the GC
- #->#-># MB heap size at GC start, at GC end, and live heap
- # MB goal goal heap size
- # P number of processors used
- The phases are stop-the-world (STW) sweep termination, concurrent
- mark and scan, and STW mark termination. The CPU times
- for mark/scan are broken down in to assist time (GC performed in
- line with allocation), background GC time, and idle GC time.
- If the line ends with "(forced)", this GC was forced by a
- runtime.GC() call.
-
- harddecommit: setting harddecommit=1 causes memory that is returned to the OS to
- also have protections removed on it. This is the only mode of operation on Windows,
- but is helpful in debugging scavenger-related issues on other platforms. Currently,
- only supported on Linux.
-
- inittrace: setting inittrace=1 causes the runtime to emit a single line to standard
- error for each package with init work, summarizing the execution time and memory
- allocation. No information is printed for inits executed as part of plugin loading
- and for packages without both user defined and compiler generated init work.
- The format of this line is subject to change. Currently, it is:
- init # @#ms, # ms clock, # bytes, # allocs
- where the fields are as follows:
- init # the package name
- @# ms time in milliseconds when the init started since program start
- # clock wall-clock time for package initialization work
- # bytes memory allocated on the heap
- # allocs number of heap allocations
-
- madvdontneed: setting madvdontneed=0 will use MADV_FREE
- instead of MADV_DONTNEED on Linux when returning memory to the
- kernel. This is more efficient, but means RSS numbers will
- drop only when the OS is under memory pressure.
-
- memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.
- When set to 0 memory profiling is disabled. Refer to the description of
- MemProfileRate for the default value.
-
- invalidptr: invalidptr=1 (the default) causes the garbage collector and stack
- copier to crash the program if an invalid pointer value (for example, 1)
- is found in a pointer-typed location. Setting invalidptr=0 disables this check.
- This should only be used as a temporary workaround to diagnose buggy code.
- The real fix is to not store integers in pointer-typed locations.
-
- sbrk: setting sbrk=1 replaces the memory allocator and garbage collector
- with a trivial allocator that obtains memory from the operating system and
- never reclaims any memory.
-
- scavtrace: setting scavtrace=1 causes the runtime to emit a single line to standard
- error, roughly once per GC cycle, summarizing the amount of work done by the
- scavenger as well as the total amount of memory returned to the operating system
- and an estimate of physical memory utilization. The format of this line is subject
- to change, but currently it is:
- scav # # KiB work, # KiB total, #% util
- where the fields are as follows:
- scav # the scavenge cycle number
- # KiB work the amount of memory returned to the OS since the last line
- # KiB total the total amount of memory returned to the OS
- #% util the fraction of all unscavenged memory which is in-use
- If the line ends with "(forced)", then scavenging was forced by a
- debug.FreeOSMemory() call.
-
- scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
- detailed multiline info every X milliseconds, describing state of the scheduler,
- processors, threads and goroutines.
-
- schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
- error every X milliseconds, summarizing the scheduler state.
-
- tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at
- which goroutines were created, where N limits the number of ancestor goroutines to
- report. This also extends the information returned by runtime.Stack. Ancestor's goroutine
- IDs will refer to the ID of the goroutine at the time of creation; it's possible for this
- ID to be reused for another goroutine. Setting N to 0 will report no ancestry information.
-
- asyncpreemptoff: asyncpreemptoff=1 disables signal-based
- asynchronous goroutine preemption. This makes some loops
- non-preemptible for long periods, which may delay GC and
- goroutine scheduling. This is useful for debugging GC issues
- because it also disables the conservative stack scanning used
- for asynchronously preempted goroutines.
-
-The net and net/http packages also refer to debugging variables in GODEBUG.
-See the documentation for those packages for details.
-
-The GOMAXPROCS variable limits the number of operating system threads that
-can execute user-level Go code simultaneously. There is no limit to the number of threads
-that can be blocked in system calls on behalf of Go code; those do not count against
-the GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes
-the limit.
-
-The GORACE variable configures the race detector, for programs built using -race.
-See https://golang.org/doc/articles/race_detector.html for details.
-
-The GOTRACEBACK variable controls the amount of output generated when a Go
-program fails due to an unrecovered panic or an unexpected runtime condition.
-By default, a failure prints a stack trace for the current goroutine,
-eliding functions internal to the run-time system, and then exits with exit code 2.
-The failure prints stack traces for all goroutines if there is no current goroutine
-or the failure is internal to the run-time.
-GOTRACEBACK=none omits the goroutine stack traces entirely.
-GOTRACEBACK=single (the default) behaves as described above.
-GOTRACEBACK=all adds stack traces for all user-created goroutines.
-GOTRACEBACK=system is like ``all'' but adds stack frames for run-time functions
-and shows goroutines created internally by the run-time.
-GOTRACEBACK=crash is like ``system'' but crashes in an operating system-specific
-manner instead of exiting. For example, on Unix systems, the crash raises
-SIGABRT to trigger a core dump.
-For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for
-none, all, and system, respectively.
-The runtime/debug package's SetTraceback function allows increasing the
-amount of output at run time, but it cannot reduce the amount below that
-specified by the environment variable.
-See https://golang.org/pkg/runtime/debug/#SetTraceback.
-
-The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
-the set of Go environment variables. They influence the building of Go programs
-(see https://golang.org/cmd/go and https://golang.org/pkg/go/build).
-GOARCH, GOOS, and GOROOT are recorded at compile time and made available by
-constants or functions in this package, but they do not influence the execution
-of the run-time system.
-*/
-package runtime
-
-import (
- "internal/goarch"
- "internal/goos"
-)
-
-// Caller reports file and line number information about function invocations on
-// the calling goroutine's stack. The argument skip is the number of stack frames
-// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
-// meaning of skip differs between Caller and Callers.) The return values report the
-// program counter, file name, and line number within the file of the corresponding
-// call. The boolean ok is false if it was not possible to recover the information.
-func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
- rpc := make([]uintptr, 1)
- n := callers(skip+1, rpc[:])
- if n < 1 {
- return
- }
- frame, _ := CallersFrames(rpc).Next()
- return frame.PC, frame.File, frame.Line, frame.PC != 0
-}
-
-// Callers fills the slice pc with the return program counters of function invocations
-// on the calling goroutine's stack. The argument skip is the number of stack frames
-// to skip before recording in pc, with 0 identifying the frame for Callers itself and
-// 1 identifying the caller of Callers.
-// It returns the number of entries written to pc.
-//
-// To translate these PCs into symbolic information such as function
-// names and line numbers, use CallersFrames. CallersFrames accounts
-// for inlined functions and adjusts the return program counters into
-// call program counters. Iterating over the returned slice of PCs
-// directly is discouraged, as is using FuncForPC on any of the
-// returned PCs, since these cannot account for inlining or return
-// program counter adjustment.
-func Callers(skip int, pc []uintptr) int {
- // runtime.callers uses pc.array==nil as a signal
- // to print a stack trace. Pick off 0-length pc here
- // so that we don't let a nil pc slice get to it.
- if len(pc) == 0 {
- return 0
- }
- return callers(skip, pc)
-}
-
-var defaultGOROOT string // set by cmd/link
-
-// GOROOT returns the root of the Go tree. It uses the
-// GOROOT environment variable, if set at process start,
-// or else the root used during the Go build.
-func GOROOT() string {
- s := gogetenv("GOROOT")
- if s != "" {
- return s
- }
- return defaultGOROOT
-}
-
-// buildVersion is the Go tree's version string at build time.
-//
-// If any GOEXPERIMENTs are set to non-default values, it will include
-// "X:<GOEXPERIMENT>".
-//
-// This is set by the linker.
-//
-// This is accessed by "go version <binary>".
-var buildVersion string
-
-// Version returns the Go tree's version string.
-// It is either the commit hash and date at the time of the build or,
-// when possible, a release tag like "go1.3".
-func Version() string {
- return buildVersion
-}
-
-// GOOS is the running program's operating system target:
-// one of darwin, freebsd, linux, and so on.
-// To view possible combinations of GOOS and GOARCH, run "go tool dist list".
-const GOOS string = goos.GOOS
-
-// GOARCH is the running program's architecture target:
-// one of 386, amd64, arm, s390x, and so on.
-const GOARCH string = goarch.GOARCH
diff --git a/contrib/go/_std_1.18/src/runtime/float.go b/contrib/go/_std_1.18/src/runtime/float.go
deleted file mode 100644
index 459e58dd7e..0000000000
--- a/contrib/go/_std_1.18/src/runtime/float.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-var inf = float64frombits(0x7FF0000000000000)
-
-// isNaN reports whether f is an IEEE 754 ``not-a-number'' value.
-func isNaN(f float64) (is bool) {
- // IEEE 754 says that only NaNs satisfy f != f.
- return f != f
-}
-
-// isFinite reports whether f is neither NaN nor an infinity.
-func isFinite(f float64) bool {
- return !isNaN(f - f)
-}
-
-// isInf reports whether f is an infinity.
-func isInf(f float64) bool {
- return !isNaN(f) && !isFinite(f)
-}
-
-// Abs returns the absolute value of x.
-//
-// Special cases are:
-// Abs(±Inf) = +Inf
-// Abs(NaN) = NaN
-func abs(x float64) float64 {
- const sign = 1 << 63
- return float64frombits(float64bits(x) &^ sign)
-}
-
-// copysign returns a value with the magnitude
-// of x and the sign of y.
-func copysign(x, y float64) float64 {
- const sign = 1 << 63
- return float64frombits(float64bits(x)&^sign | float64bits(y)&sign)
-}
-
-// Float64bits returns the IEEE 754 binary representation of f.
-func float64bits(f float64) uint64 {
- return *(*uint64)(unsafe.Pointer(&f))
-}
-
-// Float64frombits returns the floating point number corresponding
-// the IEEE 754 binary representation b.
-func float64frombits(b uint64) float64 {
- return *(*float64)(unsafe.Pointer(&b))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/hash64.go b/contrib/go/_std_1.18/src/runtime/hash64.go
deleted file mode 100644
index f773eb929c..0000000000
--- a/contrib/go/_std_1.18/src/runtime/hash64.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Hashing algorithm inspired by
-// wyhash: https://github.com/wangyi-fudan/wyhash
-
-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm
-
-package runtime
-
-import (
- "runtime/internal/math"
- "unsafe"
-)
-
-const (
- m1 = 0xa0761d6478bd642f
- m2 = 0xe7037ed1a0b428db
- m3 = 0x8ebc6af09c88c6e3
- m4 = 0x589965cc75374cc3
- m5 = 0x1d8e4e27c47d124f
-)
-
-func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr {
- var a, b uintptr
- seed ^= hashkey[0] ^ m1
- switch {
- case s == 0:
- return seed
- case s < 4:
- a = uintptr(*(*byte)(p))
- a |= uintptr(*(*byte)(add(p, s>>1))) << 8
- a |= uintptr(*(*byte)(add(p, s-1))) << 16
- case s == 4:
- a = r4(p)
- b = a
- case s < 8:
- a = r4(p)
- b = r4(add(p, s-4))
- case s == 8:
- a = r8(p)
- b = a
- case s <= 16:
- a = r8(p)
- b = r8(add(p, s-8))
- default:
- l := s
- if l > 48 {
- seed1 := seed
- seed2 := seed
- for ; l > 48; l -= 48 {
- seed = mix(r8(p)^m2, r8(add(p, 8))^seed)
- seed1 = mix(r8(add(p, 16))^m3, r8(add(p, 24))^seed1)
- seed2 = mix(r8(add(p, 32))^m4, r8(add(p, 40))^seed2)
- p = add(p, 48)
- }
- seed ^= seed1 ^ seed2
- }
- for ; l > 16; l -= 16 {
- seed = mix(r8(p)^m2, r8(add(p, 8))^seed)
- p = add(p, 16)
- }
- a = r8(add(p, l-16))
- b = r8(add(p, l-8))
- }
-
- return mix(m5^s, mix(a^m2, b^seed))
-}
-
-func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr {
- a := r4(p)
- return mix(m5^4, mix(a^m2, a^seed^hashkey[0]^m1))
-}
-
-func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr {
- a := r8(p)
- return mix(m5^8, mix(a^m2, a^seed^hashkey[0]^m1))
-}
-
-func mix(a, b uintptr) uintptr {
- hi, lo := math.Mul64(uint64(a), uint64(b))
- return uintptr(hi ^ lo)
-}
-
-func r4(p unsafe.Pointer) uintptr {
- return uintptr(readUnaligned32(p))
-}
-
-func r8(p unsafe.Pointer) uintptr {
- return uintptr(readUnaligned64(p))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/heapdump.go b/contrib/go/_std_1.18/src/runtime/heapdump.go
deleted file mode 100644
index 871637a09e..0000000000
--- a/contrib/go/_std_1.18/src/runtime/heapdump.go
+++ /dev/null
@@ -1,757 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Implementation of runtime/debug.WriteHeapDump. Writes all
-// objects in the heap plus additional info (roots, threads,
-// finalizers, etc.) to a file.
-
-// The format of the dumped file is described at
-// https://golang.org/s/go15heapdump.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
-func runtime_debug_WriteHeapDump(fd uintptr) {
- stopTheWorld("write heap dump")
-
- // Keep m on this G's stack instead of the system stack.
- // Both readmemstats_m and writeheapdump_m have pretty large
- // peak stack depths and we risk blowing the system stack.
- // This is safe because the world is stopped, so we don't
- // need to worry about anyone shrinking and therefore moving
- // our stack.
- var m MemStats
- systemstack(func() {
- // Call readmemstats_m here instead of deeper in
- // writeheapdump_m because we might blow the system stack
- // otherwise.
- readmemstats_m(&m)
- writeheapdump_m(fd, &m)
- })
-
- startTheWorld()
-}
-
-const (
- fieldKindEol = 0
- fieldKindPtr = 1
- fieldKindIface = 2
- fieldKindEface = 3
- tagEOF = 0
- tagObject = 1
- tagOtherRoot = 2
- tagType = 3
- tagGoroutine = 4
- tagStackFrame = 5
- tagParams = 6
- tagFinalizer = 7
- tagItab = 8
- tagOSThread = 9
- tagMemStats = 10
- tagQueuedFinalizer = 11
- tagData = 12
- tagBSS = 13
- tagDefer = 14
- tagPanic = 15
- tagMemProf = 16
- tagAllocSample = 17
-)
-
-var dumpfd uintptr // fd to write the dump to.
-var tmpbuf []byte
-
-// buffer of pending write data
-const (
- bufSize = 4096
-)
-
-var buf [bufSize]byte
-var nbuf uintptr
-
-func dwrite(data unsafe.Pointer, len uintptr) {
- if len == 0 {
- return
- }
- if nbuf+len <= bufSize {
- copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
- nbuf += len
- return
- }
-
- write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
- if len >= bufSize {
- write(dumpfd, data, int32(len))
- nbuf = 0
- } else {
- copy(buf[:], (*[bufSize]byte)(data)[:len])
- nbuf = len
- }
-}
-
-func dwritebyte(b byte) {
- dwrite(unsafe.Pointer(&b), 1)
-}
-
-func flush() {
- write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
- nbuf = 0
-}
-
-// Cache of types that have been serialized already.
-// We use a type's hash field to pick a bucket.
-// Inside a bucket, we keep a list of types that
-// have been serialized so far, most recently used first.
-// Note: when a bucket overflows we may end up
-// serializing a type more than once. That's ok.
-const (
- typeCacheBuckets = 256
- typeCacheAssoc = 4
-)
-
-type typeCacheBucket struct {
- t [typeCacheAssoc]*_type
-}
-
-var typecache [typeCacheBuckets]typeCacheBucket
-
-// dump a uint64 in a varint format parseable by encoding/binary
-func dumpint(v uint64) {
- var buf [10]byte
- var n int
- for v >= 0x80 {
- buf[n] = byte(v | 0x80)
- n++
- v >>= 7
- }
- buf[n] = byte(v)
- n++
- dwrite(unsafe.Pointer(&buf), uintptr(n))
-}
-
-func dumpbool(b bool) {
- if b {
- dumpint(1)
- } else {
- dumpint(0)
- }
-}
-
-// dump varint uint64 length followed by memory contents
-func dumpmemrange(data unsafe.Pointer, len uintptr) {
- dumpint(uint64(len))
- dwrite(data, len)
-}
-
-func dumpslice(b []byte) {
- dumpint(uint64(len(b)))
- if len(b) > 0 {
- dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
- }
-}
-
-func dumpstr(s string) {
- sp := stringStructOf(&s)
- dumpmemrange(sp.str, uintptr(sp.len))
-}
-
-// dump information for a type
-func dumptype(t *_type) {
- if t == nil {
- return
- }
-
- // If we've definitely serialized the type before,
- // no need to do it again.
- b := &typecache[t.hash&(typeCacheBuckets-1)]
- if t == b.t[0] {
- return
- }
- for i := 1; i < typeCacheAssoc; i++ {
- if t == b.t[i] {
- // Move-to-front
- for j := i; j > 0; j-- {
- b.t[j] = b.t[j-1]
- }
- b.t[0] = t
- return
- }
- }
-
- // Might not have been dumped yet. Dump it and
- // remember we did so.
- for j := typeCacheAssoc - 1; j > 0; j-- {
- b.t[j] = b.t[j-1]
- }
- b.t[0] = t
-
- // dump the type
- dumpint(tagType)
- dumpint(uint64(uintptr(unsafe.Pointer(t))))
- dumpint(uint64(t.size))
- if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" {
- dumpstr(t.string())
- } else {
- pkgpathstr := t.nameOff(x.pkgpath).name()
- pkgpath := stringStructOf(&pkgpathstr)
- namestr := t.name()
- name := stringStructOf(&namestr)
- dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
- dwrite(pkgpath.str, uintptr(pkgpath.len))
- dwritebyte('.')
- dwrite(name.str, uintptr(name.len))
- }
- dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
-}
-
-// dump an object
-func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
- dumpint(tagObject)
- dumpint(uint64(uintptr(obj)))
- dumpmemrange(obj, size)
- dumpfields(bv)
-}
-
-func dumpotherroot(description string, to unsafe.Pointer) {
- dumpint(tagOtherRoot)
- dumpstr(description)
- dumpint(uint64(uintptr(to)))
-}
-
-func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
- dumpint(tagFinalizer)
- dumpint(uint64(uintptr(obj)))
- dumpint(uint64(uintptr(unsafe.Pointer(fn))))
- dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
- dumpint(uint64(uintptr(unsafe.Pointer(fint))))
- dumpint(uint64(uintptr(unsafe.Pointer(ot))))
-}
-
-type childInfo struct {
- // Information passed up from the callee frame about
- // the layout of the outargs region.
- argoff uintptr // where the arguments start in the frame
- arglen uintptr // size of args region
- args bitvector // if args.n >= 0, pointer map of args region
- sp *uint8 // callee sp
- depth uintptr // depth in call stack (0 == most recent)
-}
-
-// dump kinds & offsets of interesting fields in bv
-func dumpbv(cbv *bitvector, offset uintptr) {
- for i := uintptr(0); i < uintptr(cbv.n); i++ {
- if cbv.ptrbit(i) == 1 {
- dumpint(fieldKindPtr)
- dumpint(uint64(offset + i*goarch.PtrSize))
- }
- }
-}
-
-func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
- child := (*childInfo)(arg)
- f := s.fn
-
- // Figure out what we can about our stack map
- pc := s.pc
- pcdata := int32(-1) // Use the entry map at function entry
- if pc != f.entry() {
- pc--
- pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil)
- }
- if pcdata == -1 {
- // We do not have a valid pcdata value but there might be a
- // stackmap for this function. It is likely that we are looking
- // at the function prologue, assume so and hope for the best.
- pcdata = 0
- }
- stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
-
- var bv bitvector
- if stkmap != nil && stkmap.n > 0 {
- bv = stackmapdata(stkmap, pcdata)
- } else {
- bv.n = -1
- }
-
- // Dump main body of stack frame.
- dumpint(tagStackFrame)
- dumpint(uint64(s.sp)) // lowest address in frame
- dumpint(uint64(child.depth)) // # of frames deep on the stack
- dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
- dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents
- dumpint(uint64(f.entry()))
- dumpint(uint64(s.pc))
- dumpint(uint64(s.continpc))
- name := funcname(f)
- if name == "" {
- name = "unknown function"
- }
- dumpstr(name)
-
- // Dump fields in the outargs section
- if child.args.n >= 0 {
- dumpbv(&child.args, child.argoff)
- } else {
- // conservative - everything might be a pointer
- for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
- dumpint(fieldKindPtr)
- dumpint(uint64(off))
- }
- }
-
- // Dump fields in the local vars section
- if stkmap == nil {
- // No locals information, dump everything.
- for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
- dumpint(fieldKindPtr)
- dumpint(uint64(off))
- }
- } else if stkmap.n < 0 {
- // Locals size information, dump just the locals.
- size := uintptr(-stkmap.n)
- for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
- dumpint(fieldKindPtr)
- dumpint(uint64(off))
- }
- } else if stkmap.n > 0 {
- // Locals bitmap information, scan just the pointers in
- // locals.
- dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
- }
- dumpint(fieldKindEol)
-
- // Record arg info for parent.
- child.argoff = s.argp - s.fp
- child.arglen = s.arglen
- child.sp = (*uint8)(unsafe.Pointer(s.sp))
- child.depth++
- stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
- if stkmap != nil {
- child.args = stackmapdata(stkmap, pcdata)
- } else {
- child.args.n = -1
- }
- return true
-}
-
-func dumpgoroutine(gp *g) {
- var sp, pc, lr uintptr
- if gp.syscallsp != 0 {
- sp = gp.syscallsp
- pc = gp.syscallpc
- lr = 0
- } else {
- sp = gp.sched.sp
- pc = gp.sched.pc
- lr = gp.sched.lr
- }
-
- dumpint(tagGoroutine)
- dumpint(uint64(uintptr(unsafe.Pointer(gp))))
- dumpint(uint64(sp))
- dumpint(uint64(gp.goid))
- dumpint(uint64(gp.gopc))
- dumpint(uint64(readgstatus(gp)))
- dumpbool(isSystemGoroutine(gp, false))
- dumpbool(false) // isbackground
- dumpint(uint64(gp.waitsince))
- dumpstr(gp.waitreason.String())
- dumpint(uint64(uintptr(gp.sched.ctxt)))
- dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
- dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
- dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
-
- // dump stack
- var child childInfo
- child.args.n = -1
- child.arglen = 0
- child.sp = nil
- child.depth = 0
- gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
-
- // dump defer & panic records
- for d := gp._defer; d != nil; d = d.link {
- dumpint(tagDefer)
- dumpint(uint64(uintptr(unsafe.Pointer(d))))
- dumpint(uint64(uintptr(unsafe.Pointer(gp))))
- dumpint(uint64(d.sp))
- dumpint(uint64(d.pc))
- fn := *(**funcval)(unsafe.Pointer(&d.fn))
- dumpint(uint64(uintptr(unsafe.Pointer(fn))))
- if d.fn == nil {
- // d.fn can be nil for open-coded defers
- dumpint(uint64(0))
- } else {
- dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
- }
- dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
- }
- for p := gp._panic; p != nil; p = p.link {
- dumpint(tagPanic)
- dumpint(uint64(uintptr(unsafe.Pointer(p))))
- dumpint(uint64(uintptr(unsafe.Pointer(gp))))
- eface := efaceOf(&p.arg)
- dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
- dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
- dumpint(0) // was p->defer, no longer recorded
- dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
- }
-}
-
-func dumpgs() {
- assertWorldStopped()
-
- // goroutines & stacks
- forEachG(func(gp *g) {
- status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
- switch status {
- default:
- print("runtime: unexpected G.status ", hex(status), "\n")
- throw("dumpgs in STW - bad status")
- case _Gdead:
- // ok
- case _Grunnable,
- _Gsyscall,
- _Gwaiting:
- dumpgoroutine(gp)
- }
- })
-}
-
-func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
- dumpint(tagQueuedFinalizer)
- dumpint(uint64(uintptr(obj)))
- dumpint(uint64(uintptr(unsafe.Pointer(fn))))
- dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
- dumpint(uint64(uintptr(unsafe.Pointer(fint))))
- dumpint(uint64(uintptr(unsafe.Pointer(ot))))
-}
-
-func dumproots() {
- // To protect mheap_.allspans.
- assertWorldStopped()
-
- // TODO(mwhudson): dump datamask etc from all objects
- // data segment
- dumpint(tagData)
- dumpint(uint64(firstmoduledata.data))
- dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
- dumpfields(firstmoduledata.gcdatamask)
-
- // bss segment
- dumpint(tagBSS)
- dumpint(uint64(firstmoduledata.bss))
- dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
- dumpfields(firstmoduledata.gcbssmask)
-
- // mspan.types
- for _, s := range mheap_.allspans {
- if s.state.get() == mSpanInUse {
- // Finalizers
- for sp := s.specials; sp != nil; sp = sp.next {
- if sp.kind != _KindSpecialFinalizer {
- continue
- }
- spf := (*specialfinalizer)(unsafe.Pointer(sp))
- p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
- dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
- }
- }
- }
-
- // Finalizer queue
- iterate_finq(finq_callback)
-}
-
-// Bit vector of free marks.
-// Needs to be as big as the largest number of objects per span.
-var freemark [_PageSize / 8]bool
-
-func dumpobjs() {
- // To protect mheap_.allspans.
- assertWorldStopped()
-
- for _, s := range mheap_.allspans {
- if s.state.get() != mSpanInUse {
- continue
- }
- p := s.base()
- size := s.elemsize
- n := (s.npages << _PageShift) / size
- if n > uintptr(len(freemark)) {
- throw("freemark array doesn't have enough entries")
- }
-
- for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
- if s.isFree(freeIndex) {
- freemark[freeIndex] = true
- }
- }
-
- for j := uintptr(0); j < n; j, p = j+1, p+size {
- if freemark[j] {
- freemark[j] = false
- continue
- }
- dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
- }
- }
-}
-
-func dumpparams() {
- dumpint(tagParams)
- x := uintptr(1)
- if *(*byte)(unsafe.Pointer(&x)) == 1 {
- dumpbool(false) // little-endian ptrs
- } else {
- dumpbool(true) // big-endian ptrs
- }
- dumpint(goarch.PtrSize)
- var arenaStart, arenaEnd uintptr
- for i1 := range mheap_.arenas {
- if mheap_.arenas[i1] == nil {
- continue
- }
- for i, ha := range mheap_.arenas[i1] {
- if ha == nil {
- continue
- }
- base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
- if arenaStart == 0 || base < arenaStart {
- arenaStart = base
- }
- if base+heapArenaBytes > arenaEnd {
- arenaEnd = base + heapArenaBytes
- }
- }
- }
- dumpint(uint64(arenaStart))
- dumpint(uint64(arenaEnd))
- dumpstr(goarch.GOARCH)
- dumpstr(buildVersion)
- dumpint(uint64(ncpu))
-}
-
-func itab_callback(tab *itab) {
- t := tab._type
- dumptype(t)
- dumpint(tagItab)
- dumpint(uint64(uintptr(unsafe.Pointer(tab))))
- dumpint(uint64(uintptr(unsafe.Pointer(t))))
-}
-
-func dumpitabs() {
- iterate_itabs(itab_callback)
-}
-
-func dumpms() {
- for mp := allm; mp != nil; mp = mp.alllink {
- dumpint(tagOSThread)
- dumpint(uint64(uintptr(unsafe.Pointer(mp))))
- dumpint(uint64(mp.id))
- dumpint(mp.procid)
- }
-}
-
-//go:systemstack
-func dumpmemstats(m *MemStats) {
- assertWorldStopped()
-
- // These ints should be identical to the exported
- // MemStats structure and should be ordered the same
- // way too.
- dumpint(tagMemStats)
- dumpint(m.Alloc)
- dumpint(m.TotalAlloc)
- dumpint(m.Sys)
- dumpint(m.Lookups)
- dumpint(m.Mallocs)
- dumpint(m.Frees)
- dumpint(m.HeapAlloc)
- dumpint(m.HeapSys)
- dumpint(m.HeapIdle)
- dumpint(m.HeapInuse)
- dumpint(m.HeapReleased)
- dumpint(m.HeapObjects)
- dumpint(m.StackInuse)
- dumpint(m.StackSys)
- dumpint(m.MSpanInuse)
- dumpint(m.MSpanSys)
- dumpint(m.MCacheInuse)
- dumpint(m.MCacheSys)
- dumpint(m.BuckHashSys)
- dumpint(m.GCSys)
- dumpint(m.OtherSys)
- dumpint(m.NextGC)
- dumpint(m.LastGC)
- dumpint(m.PauseTotalNs)
- for i := 0; i < 256; i++ {
- dumpint(m.PauseNs[i])
- }
- dumpint(uint64(m.NumGC))
-}
-
-func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
- stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
- dumpint(tagMemProf)
- dumpint(uint64(uintptr(unsafe.Pointer(b))))
- dumpint(uint64(size))
- dumpint(uint64(nstk))
- for i := uintptr(0); i < nstk; i++ {
- pc := stk[i]
- f := findfunc(pc)
- if !f.valid() {
- var buf [64]byte
- n := len(buf)
- n--
- buf[n] = ')'
- if pc == 0 {
- n--
- buf[n] = '0'
- } else {
- for pc > 0 {
- n--
- buf[n] = "0123456789abcdef"[pc&15]
- pc >>= 4
- }
- }
- n--
- buf[n] = 'x'
- n--
- buf[n] = '0'
- n--
- buf[n] = '('
- dumpslice(buf[n:])
- dumpstr("?")
- dumpint(0)
- } else {
- dumpstr(funcname(f))
- if i > 0 && pc > f.entry() {
- pc--
- }
- file, line := funcline(f, pc)
- dumpstr(file)
- dumpint(uint64(line))
- }
- }
- dumpint(uint64(allocs))
- dumpint(uint64(frees))
-}
-
-func dumpmemprof() {
- // To protect mheap_.allspans.
- assertWorldStopped()
-
- iterate_memprof(dumpmemprof_callback)
- for _, s := range mheap_.allspans {
- if s.state.get() != mSpanInUse {
- continue
- }
- for sp := s.specials; sp != nil; sp = sp.next {
- if sp.kind != _KindSpecialProfile {
- continue
- }
- spp := (*specialprofile)(unsafe.Pointer(sp))
- p := s.base() + uintptr(spp.special.offset)
- dumpint(tagAllocSample)
- dumpint(uint64(p))
- dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
- }
- }
-}
-
-var dumphdr = []byte("go1.7 heap dump\n")
-
-func mdump(m *MemStats) {
- assertWorldStopped()
-
- // make sure we're done sweeping
- for _, s := range mheap_.allspans {
- if s.state.get() == mSpanInUse {
- s.ensureSwept()
- }
- }
- memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
- dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
- dumpparams()
- dumpitabs()
- dumpobjs()
- dumpgs()
- dumpms()
- dumproots()
- dumpmemstats(m)
- dumpmemprof()
- dumpint(tagEOF)
- flush()
-}
-
-func writeheapdump_m(fd uintptr, m *MemStats) {
- assertWorldStopped()
-
- _g_ := getg()
- casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
- _g_.waitreason = waitReasonDumpingHeap
-
- // Update stats so we can dump them.
- // As a side effect, flushes all the mcaches so the mspan.freelist
- // lists contain all the free objects.
- updatememstats()
-
- // Set dump file.
- dumpfd = fd
-
- // Call dump routine.
- mdump(m)
-
- // Reset dump file.
- dumpfd = 0
- if tmpbuf != nil {
- sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
- tmpbuf = nil
- }
-
- casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
-}
-
-// dumpint() the kind & offset of each field in an object.
-func dumpfields(bv bitvector) {
- dumpbv(&bv, 0)
- dumpint(fieldKindEol)
-}
-
-func makeheapobjbv(p uintptr, size uintptr) bitvector {
- // Extend the temp buffer if necessary.
- nptr := size / goarch.PtrSize
- if uintptr(len(tmpbuf)) < nptr/8+1 {
- if tmpbuf != nil {
- sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
- }
- n := nptr/8 + 1
- p := sysAlloc(n, &memstats.other_sys)
- if p == nil {
- throw("heapdump: out of memory")
- }
- tmpbuf = (*[1 << 30]byte)(p)[:n]
- }
- // Convert heap bitmap to pointer bitmap.
- for i := uintptr(0); i < nptr/8+1; i++ {
- tmpbuf[i] = 0
- }
- i := uintptr(0)
- hbits := heapBitsForAddr(p)
- for ; i < nptr; i++ {
- if !hbits.morePointers() {
- break // end of object
- }
- if hbits.isPointer() {
- tmpbuf[i/8] |= 1 << (i % 8)
- }
- hbits = hbits.next()
- }
- return bitvector{int32(i), &tmpbuf[0]}
-}
diff --git a/contrib/go/_std_1.18/src/runtime/histogram.go b/contrib/go/_std_1.18/src/runtime/histogram.go
deleted file mode 100644
index cd7e29a8c8..0000000000
--- a/contrib/go/_std_1.18/src/runtime/histogram.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const (
- // For the time histogram type, we use an HDR histogram.
- // Values are placed in super-buckets based solely on the most
- // significant set bit. Thus, super-buckets are power-of-2 sized.
- // Values are then placed into sub-buckets based on the value of
- // the next timeHistSubBucketBits most significant bits. Thus,
- // sub-buckets are linear within a super-bucket.
- //
- // Therefore, the number of sub-buckets (timeHistNumSubBuckets)
- // defines the error. This error may be computed as
- // 1/timeHistNumSubBuckets*100%. For example, for 16 sub-buckets
- // per super-bucket the error is approximately 6%.
- //
- // The number of super-buckets (timeHistNumSuperBuckets), on the
- // other hand, defines the range. To reserve room for sub-buckets,
- // bit timeHistSubBucketBits is the first bit considered for
- // super-buckets, so super-bucket indices are adjusted accordingly.
- //
- // As an example, consider 45 super-buckets with 16 sub-buckets.
- //
- // 00110
- // ^----
- // │ ^
- // │ └---- Lowest 4 bits -> sub-bucket 6
- // └------- Bit 4 unset -> super-bucket 0
- //
- // 10110
- // ^----
- // │ ^
- // │ └---- Next 4 bits -> sub-bucket 6
- // └------- Bit 4 set -> super-bucket 1
- // 100010
- // ^----^
- // │ ^ └-- Lower bits ignored
- // │ └---- Next 4 bits -> sub-bucket 1
- // └------- Bit 5 set -> super-bucket 2
- //
- // Following this pattern, super-bucket 44 will have the bit 47 set. We don't
- // have any buckets for higher values, so the highest sub-bucket will
- // contain values of 2^48-1 nanoseconds or approx. 3 days. This range is
- // more than enough to handle durations produced by the runtime.
- timeHistSubBucketBits = 4
- timeHistNumSubBuckets = 1 << timeHistSubBucketBits
- timeHistNumSuperBuckets = 45
- timeHistTotalBuckets = timeHistNumSuperBuckets*timeHistNumSubBuckets + 1
-)
-
-// timeHistogram represents a distribution of durations in
-// nanoseconds.
-//
-// The accuracy and range of the histogram is defined by the
-// timeHistSubBucketBits and timeHistNumSuperBuckets constants.
-//
-// It is an HDR histogram with exponentially-distributed
-// buckets and linearly distributed sub-buckets.
-//
-// Counts in the histogram are updated atomically, so it is safe
-// for concurrent use. It is also safe to read all the values
-// atomically.
-type timeHistogram struct {
- counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
-
- // underflow counts all the times we got a negative duration
- // sample. Because of how time works on some platforms, it's
- // possible to measure negative durations. We could ignore them,
- // but we record them anyway because it's better to have some
- // signal that it's happening than just missing samples.
- underflow uint64
-}
-
-// record adds the given duration to the distribution.
-//
-// Disallow preemptions and stack growths because this function
-// may run in sensitive locations.
-//go:nosplit
-func (h *timeHistogram) record(duration int64) {
- if duration < 0 {
- atomic.Xadd64(&h.underflow, 1)
- return
- }
- // The index of the exponential bucket is just the index
- // of the highest set bit adjusted for how many bits we
- // use for the subbucket. Note that it's timeHistSubBucketsBits-1
- // because we use the 0th bucket to hold values < timeHistNumSubBuckets.
- var superBucket, subBucket uint
- if duration >= timeHistNumSubBuckets {
- // At this point, we know the duration value will always be
- // at least timeHistSubBucketsBits long.
- superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits
- if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) {
- // The bucket index we got is larger than what we support, so
- // include this count in the highest bucket, which extends to
- // infinity.
- superBucket = timeHistNumSuperBuckets - 1
- subBucket = timeHistNumSubBuckets - 1
- } else {
- // The linear subbucket index is just the timeHistSubBucketsBits
- // bits after the top bit. To extract that value, shift down
- // the duration such that we leave the top bit and the next bits
- // intact, then extract the index.
- subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
- }
- } else {
- subBucket = uint(duration)
- }
- atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
-}
-
-const (
- fInf = 0x7FF0000000000000
- fNegInf = 0xFFF0000000000000
-)
-
-func float64Inf() float64 {
- inf := uint64(fInf)
- return *(*float64)(unsafe.Pointer(&inf))
-}
-
-func float64NegInf() float64 {
- inf := uint64(fNegInf)
- return *(*float64)(unsafe.Pointer(&inf))
-}
-
-// timeHistogramMetricsBuckets generates a slice of boundaries for
-// the timeHistogram. These boundaries are represented in seconds,
-// not nanoseconds like the timeHistogram represents durations.
-func timeHistogramMetricsBuckets() []float64 {
- b := make([]float64, timeHistTotalBuckets+1)
- b[0] = float64NegInf()
- // Super-bucket 0 has no bits above timeHistSubBucketBits
- // set, so just iterate over each bucket and assign the
- // incrementing bucket.
- for i := 0; i < timeHistNumSubBuckets; i++ {
- bucketNanos := uint64(i)
- b[i+1] = float64(bucketNanos) / 1e9
- }
- // Generate the rest of the super-buckets. It's easier to reason
- // about if we cut out the 0'th bucket, so subtract one since
- // we just handled that bucket.
- for i := 0; i < timeHistNumSuperBuckets-1; i++ {
- for j := 0; j < timeHistNumSubBuckets; j++ {
- // Set the super-bucket bit.
- bucketNanos := uint64(1) << (i + timeHistSubBucketBits)
- // Set the sub-bucket bits.
- bucketNanos |= uint64(j) << i
- // The index for this bucket is going to be the (i+1)'th super bucket
- // (note that we're starting from zero, but handled the first super-bucket
- // earlier, so we need to compensate), and the j'th sub bucket.
- // Add 1 because we left space for -Inf.
- bucketIndex := (i+1)*timeHistNumSubBuckets + j + 1
- // Convert nanoseconds to seconds via a division.
- // These values will all be exactly representable by a float64.
- b[bucketIndex] = float64(bucketNanos) / 1e9
- }
- }
- b[len(b)-1] = float64Inf()
- return b
-}
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.go b/contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.go
deleted file mode 100644
index e36eb83a11..0000000000
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package atomic
-
-import "unsafe"
-
-// Export some functions via linkname to assembly in sync/atomic.
-//go:linkname Load
-//go:linkname Loadp
-//go:linkname Load64
-
-//go:nosplit
-//go:noinline
-func Load(ptr *uint32) uint32 {
- return *ptr
-}
-
-//go:nosplit
-//go:noinline
-func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
- return *(*unsafe.Pointer)(ptr)
-}
-
-//go:nosplit
-//go:noinline
-func Load64(ptr *uint64) uint64 {
- return *ptr
-}
-
-//go:nosplit
-//go:noinline
-func LoadAcq(ptr *uint32) uint32 {
- return *ptr
-}
-
-//go:nosplit
-//go:noinline
-func LoadAcq64(ptr *uint64) uint64 {
- return *ptr
-}
-
-//go:nosplit
-//go:noinline
-func LoadAcquintptr(ptr *uintptr) uintptr {
- return *ptr
-}
-
-//go:noescape
-func Xadd(ptr *uint32, delta int32) uint32
-
-//go:noescape
-func Xadd64(ptr *uint64, delta int64) uint64
-
-//go:noescape
-func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
-
-//go:noescape
-func Xchg(ptr *uint32, new uint32) uint32
-
-//go:noescape
-func Xchg64(ptr *uint64, new uint64) uint64
-
-//go:noescape
-func Xchguintptr(ptr *uintptr, new uintptr) uintptr
-
-//go:nosplit
-//go:noinline
-func Load8(ptr *uint8) uint8 {
- return *ptr
-}
-
-//go:noescape
-func And8(ptr *uint8, val uint8)
-
-//go:noescape
-func Or8(ptr *uint8, val uint8)
-
-//go:noescape
-func And(ptr *uint32, val uint32)
-
-//go:noescape
-func Or(ptr *uint32, val uint32)
-
-// NOTE: Do not add atomicxor8 (XOR is not idempotent).
-
-//go:noescape
-func Cas64(ptr *uint64, old, new uint64) bool
-
-//go:noescape
-func CasRel(ptr *uint32, old, new uint32) bool
-
-//go:noescape
-func Store(ptr *uint32, val uint32)
-
-//go:noescape
-func Store8(ptr *uint8, val uint8)
-
-//go:noescape
-func Store64(ptr *uint64, val uint64)
-
-//go:noescape
-func StoreRel(ptr *uint32, val uint32)
-
-//go:noescape
-func StoreRel64(ptr *uint64, val uint64)
-
-//go:noescape
-func StoreReluintptr(ptr *uintptr, val uintptr)
-
-// StorepNoWB performs *ptr = val atomically and without a write
-// barrier.
-//
-// NO go:noescape annotation; see atomic_pointer.go.
-func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/types.go b/contrib/go/_std_1.18/src/runtime/internal/atomic/types.go
deleted file mode 100644
index 1a240d7c91..0000000000
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/types.go
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package atomic
-
-import "unsafe"
-
-// Int32 is an atomically accessed int32 value.
-//
-// An Int32 must not be copied.
-type Int32 struct {
- noCopy noCopy
- value int32
-}
-
-// Load accesses and returns the value atomically.
-func (i *Int32) Load() int32 {
- return Loadint32(&i.value)
-}
-
-// Store updates the value atomically.
-func (i *Int32) Store(value int32) {
- Storeint32(&i.value, value)
-}
-
-// CompareAndSwap atomically compares i's value with old,
-// and if they're equal, swaps i's value with new.
-//
-// Returns true if the operation succeeded.
-func (i *Int32) CompareAndSwap(old, new int32) bool {
- return Casint32(&i.value, old, new)
-}
-
-// Swap replaces i's value with new, returning
-// i's value before the replacement.
-func (i *Int32) Swap(new int32) int32 {
- return Xchgint32(&i.value, new)
-}
-
-// Add adds delta to i atomically, returning
-// the new updated value.
-//
-// This operation wraps around in the usual
-// two's-complement way.
-func (i *Int32) Add(delta int32) int32 {
- return Xaddint32(&i.value, delta)
-}
-
-// Int64 is an atomically accessed int64 value.
-//
-// An Int64 must not be copied.
-type Int64 struct {
- noCopy noCopy
- value int64
-}
-
-// Load accesses and returns the value atomically.
-func (i *Int64) Load() int64 {
- return Loadint64(&i.value)
-}
-
-// Store updates the value atomically.
-func (i *Int64) Store(value int64) {
- Storeint64(&i.value, value)
-}
-
-// CompareAndSwap atomically compares i's value with old,
-// and if they're equal, swaps i's value with new.
-//
-// Returns true if the operation succeeded.
-func (i *Int64) CompareAndSwap(old, new int64) bool {
- return Casint64(&i.value, old, new)
-}
-
-// Swap replaces i's value with new, returning
-// i's value before the replacement.
-func (i *Int64) Swap(new int64) int64 {
- return Xchgint64(&i.value, new)
-}
-
-// Add adds delta to i atomically, returning
-// the new updated value.
-//
-// This operation wraps around in the usual
-// two's-complement way.
-func (i *Int64) Add(delta int64) int64 {
- return Xaddint64(&i.value, delta)
-}
-
-// Uint8 is an atomically accessed uint8 value.
-//
-// A Uint8 must not be copied.
-type Uint8 struct {
- noCopy noCopy
- value uint8
-}
-
-// Load accesses and returns the value atomically.
-func (u *Uint8) Load() uint8 {
- return Load8(&u.value)
-}
-
-// Store updates the value atomically.
-func (u *Uint8) Store(value uint8) {
- Store8(&u.value, value)
-}
-
-// And takes value and performs a bit-wise
-// "and" operation with the value of u, storing
-// the result into u.
-//
-// The full process is performed atomically.
-func (u *Uint8) And(value uint8) {
- And8(&u.value, value)
-}
-
-// Or takes value and performs a bit-wise
-// "or" operation with the value of u, storing
-// the result into u.
-//
-// The full process is performed atomically.
-func (u *Uint8) Or(value uint8) {
- Or8(&u.value, value)
-}
-
-// Uint32 is an atomically accessed uint32 value.
-//
-// A Uint32 must not be copied.
-type Uint32 struct {
- noCopy noCopy
- value uint32
-}
-
-// Load accesses and returns the value atomically.
-func (u *Uint32) Load() uint32 {
- return Load(&u.value)
-}
-
-// LoadAcquire is a partially unsynchronized version
-// of Load that relaxes ordering constraints. Other threads
-// may observe operations that precede this operation to
-// occur after it, but no operation that occurs after it
-// on this thread can be observed to occur before it.
-//
-// WARNING: Use sparingly and with great care.
-func (u *Uint32) LoadAcquire() uint32 {
- return LoadAcq(&u.value)
-}
-
-// Store updates the value atomically.
-func (u *Uint32) Store(value uint32) {
- Store(&u.value, value)
-}
-
-// StoreRelease is a partially unsynchronized version
-// of Store that relaxes ordering constraints. Other threads
-// may observe operations that occur after this operation to
-// precede it, but no operation that precedes it
-// on this thread can be observed to occur after it.
-//
-// WARNING: Use sparingly and with great care.
-func (u *Uint32) StoreRelease(value uint32) {
- StoreRel(&u.value, value)
-}
-
-// CompareAndSwap atomically compares u's value with old,
-// and if they're equal, swaps u's value with new.
-//
-// Returns true if the operation succeeded.
-func (u *Uint32) CompareAndSwap(old, new uint32) bool {
- return Cas(&u.value, old, new)
-}
-
-// CompareAndSwapRelease is a partially unsynchronized version
-// of Cas that relaxes ordering constraints. Other threads
-// may observe operations that occur after this operation to
-// precede it, but no operation that precedes it
-// on this thread can be observed to occur after it.
-//
-// Returns true if the operation succeeded.
-//
-// WARNING: Use sparingly and with great care.
-func (u *Uint32) CompareAndSwapRelease(old, new uint32) bool {
- return CasRel(&u.value, old, new)
-}
-
-// Swap replaces u's value with new, returning
-// u's value before the replacement.
-func (u *Uint32) Swap(value uint32) uint32 {
- return Xchg(&u.value, value)
-}
-
-// And takes value and performs a bit-wise
-// "and" operation with the value of u, storing
-// the result into u.
-//
-// The full process is performed atomically.
-func (u *Uint32) And(value uint32) {
- And(&u.value, value)
-}
-
-// Or takes value and performs a bit-wise
-// "or" operation with the value of u, storing
-// the result into u.
-//
-// The full process is performed atomically.
-func (u *Uint32) Or(value uint32) {
- Or(&u.value, value)
-}
-
-// Add adds delta to u atomically, returning
-// the new updated value.
-//
-// This operation wraps around in the usual
-// two's-complement way.
-func (u *Uint32) Add(delta int32) uint32 {
- return Xadd(&u.value, delta)
-}
-
-// Uint64 is an atomically accessed uint64 value.
-//
-// A Uint64 must not be copied.
-type Uint64 struct {
- noCopy noCopy
- value uint64
-}
-
-// Load accesses and returns the value atomically.
-func (u *Uint64) Load() uint64 {
- return Load64(&u.value)
-}
-
-// Store updates the value atomically.
-func (u *Uint64) Store(value uint64) {
- Store64(&u.value, value)
-}
-
-// CompareAndSwap atomically compares u's value with old,
-// and if they're equal, swaps u's value with new.
-//
-// Returns true if the operation succeeded.
-func (u *Uint64) CompareAndSwap(old, new uint64) bool {
- return Cas64(&u.value, old, new)
-}
-
-// Swap replaces u's value with new, returning
-// u's value before the replacement.
-func (u *Uint64) Swap(value uint64) uint64 {
- return Xchg64(&u.value, value)
-}
-
-// Add adds delta to u atomically, returning
-// the new updated value.
-//
-// This operation wraps around in the usual
-// two's-complement way.
-func (u *Uint64) Add(delta int64) uint64 {
- return Xadd64(&u.value, delta)
-}
-
-// Uintptr is an atomically accessed uintptr value.
-//
-// A Uintptr must not be copied.
-type Uintptr struct {
- noCopy noCopy
- value uintptr
-}
-
-// Load accesses and returns the value atomically.
-func (u *Uintptr) Load() uintptr {
- return Loaduintptr(&u.value)
-}
-
-// LoadAcquire is a partially unsynchronized version
-// of Load that relaxes ordering constraints. Other threads
-// may observe operations that precede this operation to
-// occur after it, but no operation that occurs after it
-// on this thread can be observed to occur before it.
-//
-// WARNING: Use sparingly and with great care.
-func (u *Uintptr) LoadAcquire() uintptr {
- return LoadAcquintptr(&u.value)
-}
-
-// Store updates the value atomically.
-func (u *Uintptr) Store(value uintptr) {
- Storeuintptr(&u.value, value)
-}
-
-// StoreRelease is a partially unsynchronized version
-// of Store that relaxes ordering constraints. Other threads
-// may observe operations that occur after this operation to
-// precede it, but no operation that precedes it
-// on this thread can be observed to occur after it.
-//
-// WARNING: Use sparingly and with great care.
-func (u *Uintptr) StoreRelease(value uintptr) {
- StoreReluintptr(&u.value, value)
-}
-
-// CompareAndSwap atomically compares u's value with old,
-// and if they're equal, swaps u's value with new.
-//
-// Returns true if the operation succeeded.
-func (u *Uintptr) CompareAndSwap(old, new uintptr) bool {
- return Casuintptr(&u.value, old, new)
-}
-
-// Swap replaces u's value with new, returning
-// u's value before the replacement.
-func (u *Uintptr) Swap(value uintptr) uintptr {
- return Xchguintptr(&u.value, value)
-}
-
-// Add adds delta to u atomically, returning
-// the new updated value.
-//
-// This operation wraps around in the usual
-// two's-complement way.
-func (u *Uintptr) Add(delta uintptr) uintptr {
- return Xadduintptr(&u.value, delta)
-}
-
-// Float64 is an atomically accessed float64 value.
-//
-// A Float64 must not be copied.
-type Float64 struct {
- u Uint64
-}
-
-// Load accesses and returns the value atomically.
-func (f *Float64) Load() float64 {
- r := f.u.Load()
- return *(*float64)(unsafe.Pointer(&r))
-}
-
-// Store updates the value atomically.
-func (f *Float64) Store(value float64) {
- f.u.Store(*(*uint64)(unsafe.Pointer(&value)))
-}
-
-// UnsafePointer is an atomically accessed unsafe.Pointer value.
-//
-// Note that because of the atomicity guarantees, stores to values
-// of this type never trigger a write barrier, and the relevant
-// methods are suffixed with "NoWB" to indicate that explicitly.
-// As a result, this type should be used carefully, and sparingly,
-// mostly with values that do not live in the Go heap anyway.
-//
-// An UnsafePointer must not be copied.
-type UnsafePointer struct {
- noCopy noCopy
- value unsafe.Pointer
-}
-
-// Load accesses and returns the value atomically.
-func (u *UnsafePointer) Load() unsafe.Pointer {
- return Loadp(unsafe.Pointer(&u.value))
-}
-
-// StoreNoWB updates the value atomically.
-//
-// WARNING: As the name implies this operation does *not*
-// perform a write barrier on value, and so this operation may
-// hide pointers from the GC. Use with care and sparingly.
-// It is safe to use with values not found in the Go heap.
-func (u *UnsafePointer) StoreNoWB(value unsafe.Pointer) {
- StorepNoWB(unsafe.Pointer(&u.value), value)
-}
-
-// CompareAndSwapNoWB atomically (with respect to other methods)
-// compares u's value with old, and if they're equal,
-// swaps u's value with new.
-//
-// Returns true if the operation succeeded.
-//
-// WARNING: As the name implies this operation does *not*
-// perform a write barrier on value, and so this operation may
-// hide pointers from the GC. Use with care and sparingly.
-// It is safe to use with values not found in the Go heap.
-func (u *UnsafePointer) CompareAndSwapNoWB(old, new unsafe.Pointer) bool {
- return Casp1(&u.value, old, new)
-}
-
-// noCopy may be embedded into structs which must not be copied
-// after the first use.
-//
-// See https://golang.org/issues/8005#issuecomment-190753527
-// for details.
-type noCopy struct{}
-
-// Lock is a no-op used by -copylocks checker from `go vet`.
-func (*noCopy) Lock() {}
-func (*noCopy) Unlock() {}
diff --git a/contrib/go/_std_1.18/src/runtime/internal/syscall/asm_linux_amd64.s b/contrib/go/_std_1.18/src/runtime/internal/syscall/asm_linux_amd64.s
deleted file mode 100644
index 961d9bd640..0000000000
--- a/contrib/go/_std_1.18/src/runtime/internal/syscall/asm_linux_amd64.s
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
-//
-// Syscall # in AX, args in DI SI DX R10 R8 R9, return in AX DX.
-//
-// Note that this differs from "standard" ABI convention, which would pass 4th
-// arg in CX, not R10.
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- MOVQ num+0(FP), AX // syscall entry
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ a4+32(FP), R10
- MOVQ a5+40(FP), R8
- MOVQ a6+48(FP), R9
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS ok
- MOVQ $-1, r1+56(FP)
- MOVQ $0, r2+64(FP)
- NEGQ AX
- MOVQ AX, errno+72(FP)
- RET
-ok:
- MOVQ AX, r1+56(FP)
- MOVQ DX, r2+64(FP)
- MOVQ $0, errno+72(FP)
- RET
diff --git a/contrib/go/_std_1.18/src/runtime/internal/syscall/syscall_linux.go b/contrib/go/_std_1.18/src/runtime/internal/syscall/syscall_linux.go
deleted file mode 100644
index 06d5f21e7c..0000000000
--- a/contrib/go/_std_1.18/src/runtime/internal/syscall/syscall_linux.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package syscall provides the syscall primitives required for the runtime.
-package syscall
-
-// TODO(https://go.dev/issue/51087): This package is incomplete and currently
-// only contains very minimal support for Linux.
-
-// Syscall6 calls system call number 'num' with arguments a1-6.
-func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
diff --git a/contrib/go/_std_1.18/src/runtime/lfstack_64bit.go b/contrib/go/_std_1.18/src/runtime/lfstack_64bit.go
deleted file mode 100644
index 3f0e480897..0000000000
--- a/contrib/go/_std_1.18/src/runtime/lfstack_64bit.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm
-
-package runtime
-
-import "unsafe"
-
-const (
- // addrBits is the number of bits needed to represent a virtual address.
- //
- // See heapAddrBits for a table of address space sizes on
- // various architectures. 48 bits is enough for all
- // architectures except s390x.
- //
- // On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64.
- // We shift the address left 16 to eliminate the sign extended part and make
- // room in the bottom for the count.
- //
- // On s390x, virtual addresses are 64-bit. There's not much we
- // can do about this, so we just hope that the kernel doesn't
- // get to really high addresses and panic if it does.
- addrBits = 48
-
- // In addition to the 16 bits taken from the top, we can take 3 from the
- // bottom, because node must be pointer-aligned, giving a total of 19 bits
- // of count.
- cntBits = 64 - addrBits + 3
-
- // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit
- // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA)
- // are available for mmap.
- // We assume all lfnode addresses are from memory allocated with mmap.
- // We use one bit to distinguish between the two ranges.
- aixAddrBits = 57
- aixCntBits = 64 - aixAddrBits + 3
-)
-
-func lfstackPack(node *lfnode, cnt uintptr) uint64 {
- if GOARCH == "ppc64" && GOOS == "aix" {
- return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<<aixCntBits-1))
- }
- return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
-}
-
-func lfstackUnpack(val uint64) *lfnode {
- if GOARCH == "amd64" {
- // amd64 systems can place the stack above the VA hole, so we need to sign extend
- // val before unpacking.
- return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3)))
- }
- if GOARCH == "ppc64" && GOOS == "aix" {
- return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56)))
- }
- return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/lock_futex.go b/contrib/go/_std_1.18/src/runtime/lock_futex.go
deleted file mode 100644
index 575df7a1d5..0000000000
--- a/contrib/go/_std_1.18/src/runtime/lock_futex.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build dragonfly || freebsd || linux
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// This implementation depends on OS-specific implementations of
-//
-// futexsleep(addr *uint32, val uint32, ns int64)
-// Atomically,
-// if *addr == val { sleep }
-// Might be woken up spuriously; that's allowed.
-// Don't sleep longer than ns; ns < 0 means forever.
-//
-// futexwakeup(addr *uint32, cnt uint32)
-// If any procs are sleeping on addr, wake up at most cnt.
-
-const (
- mutex_unlocked = 0
- mutex_locked = 1
- mutex_sleeping = 2
-
- active_spin = 4
- active_spin_cnt = 30
- passive_spin = 1
-)
-
-// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
-// mutex_sleeping means that there is presumably at least one sleeping thread.
-// Note that there can be spinning threads during all states - they do not
-// affect mutex's state.
-
-// We use the uintptr mutex.key and note.key as a uint32.
-//go:nosplit
-func key32(p *uintptr) *uint32 {
- return (*uint32)(unsafe.Pointer(p))
-}
-
-func lock(l *mutex) {
- lockWithRank(l, getLockRank(l))
-}
-
-func lock2(l *mutex) {
- gp := getg()
-
- if gp.m.locks < 0 {
- throw("runtime·lock: lock count")
- }
- gp.m.locks++
-
- // Speculative grab for lock.
- v := atomic.Xchg(key32(&l.key), mutex_locked)
- if v == mutex_unlocked {
- return
- }
-
- // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
- // depending on whether there is a thread sleeping
- // on this mutex. If we ever change l->key from
- // MUTEX_SLEEPING to some other value, we must be
- // careful to change it back to MUTEX_SLEEPING before
- // returning, to ensure that the sleeping thread gets
- // its wakeup call.
- wait := v
-
- // On uniprocessors, no point spinning.
- // On multiprocessors, spin for ACTIVE_SPIN attempts.
- spin := 0
- if ncpu > 1 {
- spin = active_spin
- }
- for {
- // Try for lock, spinning.
- for i := 0; i < spin; i++ {
- for l.key == mutex_unlocked {
- if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
- return
- }
- }
- procyield(active_spin_cnt)
- }
-
- // Try for lock, rescheduling.
- for i := 0; i < passive_spin; i++ {
- for l.key == mutex_unlocked {
- if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
- return
- }
- }
- osyield()
- }
-
- // Sleep.
- v = atomic.Xchg(key32(&l.key), mutex_sleeping)
- if v == mutex_unlocked {
- return
- }
- wait = mutex_sleeping
- futexsleep(key32(&l.key), mutex_sleeping, -1)
- }
-}
-
-func unlock(l *mutex) {
- unlockWithRank(l)
-}
-
-func unlock2(l *mutex) {
- v := atomic.Xchg(key32(&l.key), mutex_unlocked)
- if v == mutex_unlocked {
- throw("unlock of unlocked lock")
- }
- if v == mutex_sleeping {
- futexwakeup(key32(&l.key), 1)
- }
-
- gp := getg()
- gp.m.locks--
- if gp.m.locks < 0 {
- throw("runtime·unlock: lock count")
- }
- if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
- gp.stackguard0 = stackPreempt
- }
-}
-
-// One-time notifications.
-func noteclear(n *note) {
- n.key = 0
-}
-
-func notewakeup(n *note) {
- old := atomic.Xchg(key32(&n.key), 1)
- if old != 0 {
- print("notewakeup - double wakeup (", old, ")\n")
- throw("notewakeup - double wakeup")
- }
- futexwakeup(key32(&n.key), 1)
-}
-
-func notesleep(n *note) {
- gp := getg()
- if gp != gp.m.g0 {
- throw("notesleep not on g0")
- }
- ns := int64(-1)
- if *cgo_yield != nil {
- // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
- ns = 10e6
- }
- for atomic.Load(key32(&n.key)) == 0 {
- gp.m.blocked = true
- futexsleep(key32(&n.key), 0, ns)
- if *cgo_yield != nil {
- asmcgocall(*cgo_yield, nil)
- }
- gp.m.blocked = false
- }
-}
-
-// May run with m.p==nil if called from notetsleep, so write barriers
-// are not allowed.
-//
-//go:nosplit
-//go:nowritebarrier
-func notetsleep_internal(n *note, ns int64) bool {
- gp := getg()
-
- if ns < 0 {
- if *cgo_yield != nil {
- // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
- ns = 10e6
- }
- for atomic.Load(key32(&n.key)) == 0 {
- gp.m.blocked = true
- futexsleep(key32(&n.key), 0, ns)
- if *cgo_yield != nil {
- asmcgocall(*cgo_yield, nil)
- }
- gp.m.blocked = false
- }
- return true
- }
-
- if atomic.Load(key32(&n.key)) != 0 {
- return true
- }
-
- deadline := nanotime() + ns
- for {
- if *cgo_yield != nil && ns > 10e6 {
- ns = 10e6
- }
- gp.m.blocked = true
- futexsleep(key32(&n.key), 0, ns)
- if *cgo_yield != nil {
- asmcgocall(*cgo_yield, nil)
- }
- gp.m.blocked = false
- if atomic.Load(key32(&n.key)) != 0 {
- break
- }
- now := nanotime()
- if now >= deadline {
- break
- }
- ns = deadline - now
- }
- return atomic.Load(key32(&n.key)) != 0
-}
-
-func notetsleep(n *note, ns int64) bool {
- gp := getg()
- if gp != gp.m.g0 && gp.m.preemptoff != "" {
- throw("notetsleep not on g0")
- }
-
- return notetsleep_internal(n, ns)
-}
-
-// same as runtime·notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
-func notetsleepg(n *note, ns int64) bool {
- gp := getg()
- if gp == gp.m.g0 {
- throw("notetsleepg on g0")
- }
-
- entersyscallblock()
- ok := notetsleep_internal(n, ns)
- exitsyscall()
- return ok
-}
-
-func beforeIdle(int64, int64) (*g, bool) {
- return nil, false
-}
-
-func checkTimeouts() {}
diff --git a/contrib/go/_std_1.18/src/runtime/lock_sema.go b/contrib/go/_std_1.18/src/runtime/lock_sema.go
deleted file mode 100644
index db36df1f37..0000000000
--- a/contrib/go/_std_1.18/src/runtime/lock_sema.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// This implementation depends on OS-specific implementations of
-//
-// func semacreate(mp *m)
-// Create a semaphore for mp, if it does not already have one.
-//
-// func semasleep(ns int64) int32
-// If ns < 0, acquire m's semaphore and return 0.
-// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
-// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
-//
-// func semawakeup(mp *m)
-// Wake up mp, which is or will soon be sleeping on its semaphore.
-//
-const (
- locked uintptr = 1
-
- active_spin = 4
- active_spin_cnt = 30
- passive_spin = 1
-)
-
-func lock(l *mutex) {
- lockWithRank(l, getLockRank(l))
-}
-
-func lock2(l *mutex) {
- gp := getg()
- if gp.m.locks < 0 {
- throw("runtime·lock: lock count")
- }
- gp.m.locks++
-
- // Speculative grab for lock.
- if atomic.Casuintptr(&l.key, 0, locked) {
- return
- }
- semacreate(gp.m)
-
- // On uniprocessor's, no point spinning.
- // On multiprocessors, spin for ACTIVE_SPIN attempts.
- spin := 0
- if ncpu > 1 {
- spin = active_spin
- }
-Loop:
- for i := 0; ; i++ {
- v := atomic.Loaduintptr(&l.key)
- if v&locked == 0 {
- // Unlocked. Try to lock.
- if atomic.Casuintptr(&l.key, v, v|locked) {
- return
- }
- i = 0
- }
- if i < spin {
- procyield(active_spin_cnt)
- } else if i < spin+passive_spin {
- osyield()
- } else {
- // Someone else has it.
- // l->waitm points to a linked list of M's waiting
- // for this lock, chained through m->nextwaitm.
- // Queue this M.
- for {
- gp.m.nextwaitm = muintptr(v &^ locked)
- if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
- break
- }
- v = atomic.Loaduintptr(&l.key)
- if v&locked == 0 {
- continue Loop
- }
- }
- if v&locked != 0 {
- // Queued. Wait.
- semasleep(-1)
- i = 0
- }
- }
- }
-}
-
-func unlock(l *mutex) {
- unlockWithRank(l)
-}
-
-//go:nowritebarrier
-// We might not be holding a p in this code.
-func unlock2(l *mutex) {
- gp := getg()
- var mp *m
- for {
- v := atomic.Loaduintptr(&l.key)
- if v == locked {
- if atomic.Casuintptr(&l.key, locked, 0) {
- break
- }
- } else {
- // Other M's are waiting for the lock.
- // Dequeue an M.
- mp = muintptr(v &^ locked).ptr()
- if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
- // Dequeued an M. Wake it.
- semawakeup(mp)
- break
- }
- }
- }
- gp.m.locks--
- if gp.m.locks < 0 {
- throw("runtime·unlock: lock count")
- }
- if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
- gp.stackguard0 = stackPreempt
- }
-}
-
-// One-time notifications.
-func noteclear(n *note) {
- if GOOS == "aix" {
- // On AIX, semaphores might not synchronize the memory in some
- // rare cases. See issue #30189.
- atomic.Storeuintptr(&n.key, 0)
- } else {
- n.key = 0
- }
-}
-
-func notewakeup(n *note) {
- var v uintptr
- for {
- v = atomic.Loaduintptr(&n.key)
- if atomic.Casuintptr(&n.key, v, locked) {
- break
- }
- }
-
- // Successfully set waitm to locked.
- // What was it before?
- switch {
- case v == 0:
- // Nothing was waiting. Done.
- case v == locked:
- // Two notewakeups! Not allowed.
- throw("notewakeup - double wakeup")
- default:
- // Must be the waiting m. Wake it up.
- semawakeup((*m)(unsafe.Pointer(v)))
- }
-}
-
-func notesleep(n *note) {
- gp := getg()
- if gp != gp.m.g0 {
- throw("notesleep not on g0")
- }
- semacreate(gp.m)
- if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
- // Must be locked (got wakeup).
- if n.key != locked {
- throw("notesleep - waitm out of sync")
- }
- return
- }
- // Queued. Sleep.
- gp.m.blocked = true
- if *cgo_yield == nil {
- semasleep(-1)
- } else {
- // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
- const ns = 10e6
- for atomic.Loaduintptr(&n.key) == 0 {
- semasleep(ns)
- asmcgocall(*cgo_yield, nil)
- }
- }
- gp.m.blocked = false
-}
-
-//go:nosplit
-func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
- // gp and deadline are logically local variables, but they are written
- // as parameters so that the stack space they require is charged
- // to the caller.
- // This reduces the nosplit footprint of notetsleep_internal.
- gp = getg()
-
- // Register for wakeup on n->waitm.
- if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
- // Must be locked (got wakeup).
- if n.key != locked {
- throw("notetsleep - waitm out of sync")
- }
- return true
- }
- if ns < 0 {
- // Queued. Sleep.
- gp.m.blocked = true
- if *cgo_yield == nil {
- semasleep(-1)
- } else {
- // Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
- const ns = 10e6
- for semasleep(ns) < 0 {
- asmcgocall(*cgo_yield, nil)
- }
- }
- gp.m.blocked = false
- return true
- }
-
- deadline = nanotime() + ns
- for {
- // Registered. Sleep.
- gp.m.blocked = true
- if *cgo_yield != nil && ns > 10e6 {
- ns = 10e6
- }
- if semasleep(ns) >= 0 {
- gp.m.blocked = false
- // Acquired semaphore, semawakeup unregistered us.
- // Done.
- return true
- }
- if *cgo_yield != nil {
- asmcgocall(*cgo_yield, nil)
- }
- gp.m.blocked = false
- // Interrupted or timed out. Still registered. Semaphore not acquired.
- ns = deadline - nanotime()
- if ns <= 0 {
- break
- }
- // Deadline hasn't arrived. Keep sleeping.
- }
-
- // Deadline arrived. Still registered. Semaphore not acquired.
- // Want to give up and return, but have to unregister first,
- // so that any notewakeup racing with the return does not
- // try to grant us the semaphore when we don't expect it.
- for {
- v := atomic.Loaduintptr(&n.key)
- switch v {
- case uintptr(unsafe.Pointer(gp.m)):
- // No wakeup yet; unregister if possible.
- if atomic.Casuintptr(&n.key, v, 0) {
- return false
- }
- case locked:
- // Wakeup happened so semaphore is available.
- // Grab it to avoid getting out of sync.
- gp.m.blocked = true
- if semasleep(-1) < 0 {
- throw("runtime: unable to acquire - semaphore out of sync")
- }
- gp.m.blocked = false
- return true
- default:
- throw("runtime: unexpected waitm - semaphore out of sync")
- }
- }
-}
-
-func notetsleep(n *note, ns int64) bool {
- gp := getg()
- if gp != gp.m.g0 {
- throw("notetsleep not on g0")
- }
- semacreate(gp.m)
- return notetsleep_internal(n, ns, nil, 0)
-}
-
-// same as runtime·notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
-func notetsleepg(n *note, ns int64) bool {
- gp := getg()
- if gp == gp.m.g0 {
- throw("notetsleepg on g0")
- }
- semacreate(gp.m)
- entersyscallblock()
- ok := notetsleep_internal(n, ns, nil, 0)
- exitsyscall()
- return ok
-}
-
-func beforeIdle(int64, int64) (*g, bool) {
- return nil, false
-}
-
-func checkTimeouts() {}
diff --git a/contrib/go/_std_1.18/src/runtime/lockrank.go b/contrib/go/_std_1.18/src/runtime/lockrank.go
deleted file mode 100644
index 4a16bc0ddb..0000000000
--- a/contrib/go/_std_1.18/src/runtime/lockrank.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file records the static ranks of the locks in the runtime. If a lock
-// is not given a rank, then it is assumed to be a leaf lock, which means no other
-// lock can be acquired while it is held. Therefore, leaf locks do not need to be
-// given an explicit rank. We list all of the architecture-independent leaf locks
-// for documentation purposes, but don't list any of the architecture-dependent
-// locks (which are all leaf locks). debugLock is ignored for ranking, since it is used
-// when printing out lock ranking errors.
-//
-// lockInit(l *mutex, rank int) is used to set the rank of lock before it is used.
-// If there is no clear place to initialize a lock, then the rank of a lock can be
-// specified during the lock call itself via lockWithrank(l *mutex, rank int).
-//
-// Besides the static lock ranking (which is a total ordering of the locks), we
-// also represent and enforce the actual partial order among the locks in the
-// arcs[] array below. That is, if it is possible that lock B can be acquired when
-// lock A is the previous acquired lock that is still held, then there should be
-// an entry for A in arcs[B][]. We will currently fail not only if the total order
-// (the lock ranking) is violated, but also if there is a missing entry in the
-// partial order.
-
-package runtime
-
-type lockRank int
-
-// Constants representing the lock rank of the architecture-independent locks in
-// the runtime. Locks with lower rank must be taken before locks with higher
-// rank.
-const (
- lockRankDummy lockRank = iota
-
- // Locks held above sched
- lockRankSysmon
- lockRankScavenge
- lockRankForcegc
- lockRankSweepWaiters
- lockRankAssistQueue
- lockRankCpuprof
- lockRankSweep
-
- lockRankPollDesc
- lockRankSched
- lockRankDeadlock
- lockRankAllg
- lockRankAllp
-
- lockRankTimers // Multiple timers locked simultaneously in destroy()
- lockRankItab
- lockRankReflectOffs
- lockRankHchan // Multiple hchans acquired in lock order in syncadjustsudogs()
- lockRankTraceBuf
- lockRankFin
- lockRankNotifyList
- lockRankTraceStrings
- lockRankMspanSpecial
- lockRankProf
- lockRankGcBitsArenas
- lockRankRoot
- lockRankTrace
- lockRankTraceStackTab
- lockRankNetpollInit
-
- lockRankRwmutexW
- lockRankRwmutexR
-
- lockRankSpanSetSpine
- lockRankGscan
- lockRankStackpool
- lockRankStackLarge
- lockRankDefer
- lockRankSudog
-
- // Memory-related non-leaf locks
- lockRankWbufSpans
- lockRankMheap
- lockRankMheapSpecial
-
- // Memory-related leaf locks
- lockRankGlobalAlloc
- lockRankPageAllocScav
-
- // Other leaf locks
- lockRankGFree
- // Generally, hchan must be acquired before gscan. But in one specific
- // case (in syncadjustsudogs from markroot after the g has been suspended
- // by suspendG), we allow gscan to be acquired, and then an hchan lock. To
- // allow this case, we get this lockRankHchanLeaf rank in
- // syncadjustsudogs(), rather than lockRankHchan. By using this special
- // rank, we don't allow any further locks to be acquired other than more
- // hchan locks.
- lockRankHchanLeaf
- lockRankPanic
-
- // Leaf locks with no dependencies, so these constants are not actually used anywhere.
- // There are other architecture-dependent leaf locks as well.
- lockRankNewmHandoff
- lockRankDebugPtrmask
- lockRankFaketimeState
- lockRankTicks
- lockRankRaceFini
- lockRankPollCache
- lockRankDebug
-)
-
-// lockRankLeafRank is the rank of lock that does not have a declared rank, and hence is
-// a leaf lock.
-const lockRankLeafRank lockRank = 1000
-
-// lockNames gives the names associated with each of the above ranks
-var lockNames = []string{
- lockRankDummy: "",
-
- lockRankSysmon: "sysmon",
- lockRankScavenge: "scavenge",
- lockRankForcegc: "forcegc",
- lockRankSweepWaiters: "sweepWaiters",
- lockRankAssistQueue: "assistQueue",
- lockRankCpuprof: "cpuprof",
- lockRankSweep: "sweep",
-
- lockRankPollDesc: "pollDesc",
- lockRankSched: "sched",
- lockRankDeadlock: "deadlock",
- lockRankAllg: "allg",
- lockRankAllp: "allp",
-
- lockRankTimers: "timers",
- lockRankItab: "itab",
- lockRankReflectOffs: "reflectOffs",
-
- lockRankHchan: "hchan",
- lockRankTraceBuf: "traceBuf",
- lockRankFin: "fin",
- lockRankNotifyList: "notifyList",
- lockRankTraceStrings: "traceStrings",
- lockRankMspanSpecial: "mspanSpecial",
- lockRankProf: "prof",
- lockRankGcBitsArenas: "gcBitsArenas",
- lockRankRoot: "root",
- lockRankTrace: "trace",
- lockRankTraceStackTab: "traceStackTab",
- lockRankNetpollInit: "netpollInit",
-
- lockRankRwmutexW: "rwmutexW",
- lockRankRwmutexR: "rwmutexR",
-
- lockRankSpanSetSpine: "spanSetSpine",
- lockRankGscan: "gscan",
- lockRankStackpool: "stackpool",
- lockRankStackLarge: "stackLarge",
- lockRankDefer: "defer",
- lockRankSudog: "sudog",
-
- lockRankWbufSpans: "wbufSpans",
- lockRankMheap: "mheap",
- lockRankMheapSpecial: "mheapSpecial",
-
- lockRankGlobalAlloc: "globalAlloc.mutex",
- lockRankPageAllocScav: "pageAlloc.scav.lock",
-
- lockRankGFree: "gFree",
- lockRankHchanLeaf: "hchanLeaf",
- lockRankPanic: "panic",
-
- lockRankNewmHandoff: "newmHandoff.lock",
- lockRankDebugPtrmask: "debugPtrmask.lock",
- lockRankFaketimeState: "faketimeState.lock",
- lockRankTicks: "ticks.lock",
- lockRankRaceFini: "raceFiniLock",
- lockRankPollCache: "pollCache.lock",
- lockRankDebug: "debugLock",
-}
-
-func (rank lockRank) String() string {
- if rank == 0 {
- return "UNKNOWN"
- }
- if rank == lockRankLeafRank {
- return "LEAF"
- }
- return lockNames[rank]
-}
-
-// lockPartialOrder is a partial order among the various lock types, listing the
-// immediate ordering that has actually been observed in the runtime. Each entry
-// (which corresponds to a particular lock rank) specifies the list of locks
-// that can already be held immediately "above" it.
-//
-// So, for example, the lockRankSched entry shows that all the locks preceding
-// it in rank can actually be held. The allp lock shows that only the sysmon or
-// sched lock can be held immediately above it when it is acquired.
-var lockPartialOrder [][]lockRank = [][]lockRank{
- lockRankDummy: {},
- lockRankSysmon: {},
- lockRankScavenge: {lockRankSysmon},
- lockRankForcegc: {lockRankSysmon},
- lockRankSweepWaiters: {},
- lockRankAssistQueue: {},
- lockRankCpuprof: {},
- lockRankSweep: {},
- lockRankPollDesc: {},
- lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc},
- lockRankDeadlock: {lockRankDeadlock},
- lockRankAllg: {lockRankSysmon, lockRankSched},
- lockRankAllp: {lockRankSysmon, lockRankSched},
- lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankPollDesc, lockRankSched, lockRankAllp, lockRankTimers},
- lockRankItab: {},
- lockRankReflectOffs: {lockRankItab},
- lockRankHchan: {lockRankScavenge, lockRankSweep, lockRankHchan},
- lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
- lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankSched, lockRankAllg, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf},
- lockRankNotifyList: {},
- lockRankTraceStrings: {lockRankTraceBuf},
- lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankProf: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankRoot: {},
- lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot},
- lockRankTraceStackTab: {lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankTimers, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankRoot, lockRankTrace},
- lockRankNetpollInit: {lockRankTimers},
-
- lockRankRwmutexW: {},
- lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
-
- lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankSpanSetSpine},
- lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankSpanSetSpine, lockRankGscan},
- lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan},
- lockRankDefer: {},
- lockRankSudog: {lockRankHchan, lockRankNotifyList},
- lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog},
- lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans},
- lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankGlobalAlloc: {lockRankProf, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
- lockRankPageAllocScav: {lockRankMheap},
-
- lockRankGFree: {lockRankSched},
- lockRankHchanLeaf: {lockRankGscan, lockRankHchanLeaf},
- lockRankPanic: {lockRankDeadlock}, // plus any other lock held on throw.
-
- lockRankNewmHandoff: {},
- lockRankDebugPtrmask: {},
- lockRankFaketimeState: {},
- lockRankTicks: {},
- lockRankRaceFini: {},
- lockRankPollCache: {},
- lockRankDebug: {},
-}
diff --git a/contrib/go/_std_1.18/src/runtime/lockrank_off.go b/contrib/go/_std_1.18/src/runtime/lockrank_off.go
deleted file mode 100644
index daa45b542d..0000000000
--- a/contrib/go/_std_1.18/src/runtime/lockrank_off.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.staticlockranking
-
-package runtime
-
-// // lockRankStruct is embedded in mutex, but is empty when staticklockranking is
-// disabled (the default)
-type lockRankStruct struct {
-}
-
-func lockInit(l *mutex, rank lockRank) {
-}
-
-func getLockRank(l *mutex) lockRank {
- return 0
-}
-
-func lockWithRank(l *mutex, rank lockRank) {
- lock2(l)
-}
-
-// This function may be called in nosplit context and thus must be nosplit.
-//go:nosplit
-func acquireLockRank(rank lockRank) {
-}
-
-func unlockWithRank(l *mutex) {
- unlock2(l)
-}
-
-// This function may be called in nosplit context and thus must be nosplit.
-//go:nosplit
-func releaseLockRank(rank lockRank) {
-}
-
-func lockWithRankMayAcquire(l *mutex, rank lockRank) {
-}
-
-//go:nosplit
-func assertLockHeld(l *mutex) {
-}
-
-//go:nosplit
-func assertRankHeld(r lockRank) {
-}
-
-//go:nosplit
-func worldStopped() {
-}
-
-//go:nosplit
-func worldStarted() {
-}
-
-//go:nosplit
-func assertWorldStopped() {
-}
-
-//go:nosplit
-func assertWorldStoppedOrLockHeld(l *mutex) {
-}
diff --git a/contrib/go/_std_1.18/src/runtime/malloc.go b/contrib/go/_std_1.18/src/runtime/malloc.go
deleted file mode 100644
index 6ed6ceade2..0000000000
--- a/contrib/go/_std_1.18/src/runtime/malloc.go
+++ /dev/null
@@ -1,1564 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Memory allocator.
-//
-// This was originally based on tcmalloc, but has diverged quite a bit.
-// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
-
-// The main allocator works in runs of pages.
-// Small allocation sizes (up to and including 32 kB) are
-// rounded to one of about 70 size classes, each of which
-// has its own free set of objects of exactly that size.
-// Any free page of memory can be split into a set of objects
-// of one size class, which are then managed using a free bitmap.
-//
-// The allocator's data structures are:
-//
-// fixalloc: a free-list allocator for fixed-size off-heap objects,
-// used to manage storage used by the allocator.
-// mheap: the malloc heap, managed at page (8192-byte) granularity.
-// mspan: a run of in-use pages managed by the mheap.
-// mcentral: collects all spans of a given size class.
-// mcache: a per-P cache of mspans with free space.
-// mstats: allocation statistics.
-//
-// Allocating a small object proceeds up a hierarchy of caches:
-//
-// 1. Round the size up to one of the small size classes
-// and look in the corresponding mspan in this P's mcache.
-// Scan the mspan's free bitmap to find a free slot.
-// If there is a free slot, allocate it.
-// This can all be done without acquiring a lock.
-//
-// 2. If the mspan has no free slots, obtain a new mspan
-// from the mcentral's list of mspans of the required size
-// class that have free space.
-// Obtaining a whole span amortizes the cost of locking
-// the mcentral.
-//
-// 3. If the mcentral's mspan list is empty, obtain a run
-// of pages from the mheap to use for the mspan.
-//
-// 4. If the mheap is empty or has no page runs large enough,
-// allocate a new group of pages (at least 1MB) from the
-// operating system. Allocating a large run of pages
-// amortizes the cost of talking to the operating system.
-//
-// Sweeping an mspan and freeing objects on it proceeds up a similar
-// hierarchy:
-//
-// 1. If the mspan is being swept in response to allocation, it
-// is returned to the mcache to satisfy the allocation.
-//
-// 2. Otherwise, if the mspan still has allocated objects in it,
-// it is placed on the mcentral free list for the mspan's size
-// class.
-//
-// 3. Otherwise, if all objects in the mspan are free, the mspan's
-// pages are returned to the mheap and the mspan is now dead.
-//
-// Allocating and freeing a large object uses the mheap
-// directly, bypassing the mcache and mcentral.
-//
-// If mspan.needzero is false, then free object slots in the mspan are
-// already zeroed. Otherwise if needzero is true, objects are zeroed as
-// they are allocated. There are various benefits to delaying zeroing
-// this way:
-//
-// 1. Stack frame allocation can avoid zeroing altogether.
-//
-// 2. It exhibits better temporal locality, since the program is
-// probably about to write to the memory.
-//
-// 3. We don't zero pages that never get reused.
-
-// Virtual memory layout
-//
-// The heap consists of a set of arenas, which are 64MB on 64-bit and
-// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
-// aligned to the arena size.
-//
-// Each arena has an associated heapArena object that stores the
-// metadata for that arena: the heap bitmap for all words in the arena
-// and the span map for all pages in the arena. heapArena objects are
-// themselves allocated off-heap.
-//
-// Since arenas are aligned, the address space can be viewed as a
-// series of arena frames. The arena map (mheap_.arenas) maps from
-// arena frame number to *heapArena, or nil for parts of the address
-// space not backed by the Go heap. The arena map is structured as a
-// two-level array consisting of a "L1" arena map and many "L2" arena
-// maps; however, since arenas are large, on many architectures, the
-// arena map consists of a single, large L2 map.
-//
-// The arena map covers the entire possible address space, allowing
-// the Go heap to use any part of the address space. The allocator
-// attempts to keep arenas contiguous so that large spans (and hence
-// large objects) can cross arenas.
-
-package runtime
-
-import (
- "internal/goarch"
- "internal/goos"
- "runtime/internal/atomic"
- "runtime/internal/math"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const (
- debugMalloc = false
-
- maxTinySize = _TinySize
- tinySizeClass = _TinySizeClass
- maxSmallSize = _MaxSmallSize
-
- pageShift = _PageShift
- pageSize = _PageSize
- pageMask = _PageMask
- // By construction, single page spans of the smallest object class
- // have the most objects per span.
- maxObjsPerSpan = pageSize / 8
-
- concurrentSweep = _ConcurrentSweep
-
- _PageSize = 1 << _PageShift
- _PageMask = _PageSize - 1
-
- // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
- _64bit = 1 << (^uintptr(0) >> 63) / 2
-
- // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
- _TinySize = 16
- _TinySizeClass = int8(2)
-
- _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
-
- // Per-P, per order stack segment cache size.
- _StackCacheSize = 32 * 1024
-
- // Number of orders that get caching. Order 0 is FixedStack
- // and each successive order is twice as large.
- // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
- // will be allocated directly.
- // Since FixedStack is different on different systems, we
- // must vary NumStackOrders to keep the same maximum cached size.
- // OS | FixedStack | NumStackOrders
- // -----------------+------------+---------------
- // linux/darwin/bsd | 2KB | 4
- // windows/32 | 4KB | 3
- // windows/64 | 8KB | 2
- // plan9 | 4KB | 3
- _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
-
- // heapAddrBits is the number of bits in a heap address. On
- // amd64, addresses are sign-extended beyond heapAddrBits. On
- // other arches, they are zero-extended.
- //
- // On most 64-bit platforms, we limit this to 48 bits based on a
- // combination of hardware and OS limitations.
- //
- // amd64 hardware limits addresses to 48 bits, sign-extended
- // to 64 bits. Addresses where the top 16 bits are not either
- // all 0 or all 1 are "non-canonical" and invalid. Because of
- // these "negative" addresses, we offset addresses by 1<<47
- // (arenaBaseOffset) on amd64 before computing indexes into
- // the heap arenas index. In 2017, amd64 hardware added
- // support for 57 bit addresses; however, currently only Linux
- // supports this extension and the kernel will never choose an
- // address above 1<<47 unless mmap is called with a hint
- // address above 1<<47 (which we never do).
- //
- // arm64 hardware (as of ARMv8) limits user addresses to 48
- // bits, in the range [0, 1<<48).
- //
- // ppc64, mips64, and s390x support arbitrary 64 bit addresses
- // in hardware. On Linux, Go leans on stricter OS limits. Based
- // on Linux's processor.h, the user address space is limited as
- // follows on 64-bit architectures:
- //
- // Architecture Name Maximum Value (exclusive)
- // ---------------------------------------------------------------------
- // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
- // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
- // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
- // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
- // s390x TASK_SIZE 1<<64 (64 bit addresses)
- //
- // These limits may increase over time, but are currently at
- // most 48 bits except on s390x. On all architectures, Linux
- // starts placing mmap'd regions at addresses that are
- // significantly below 48 bits, so even if it's possible to
- // exceed Go's 48 bit limit, it's extremely unlikely in
- // practice.
- //
- // On 32-bit platforms, we accept the full 32-bit address
- // space because doing so is cheap.
- // mips32 only has access to the low 2GB of virtual memory, so
- // we further limit it to 31 bits.
- //
- // On ios/arm64, although 64-bit pointers are presumably
- // available, pointers are truncated to 33 bits in iOS <14.
- // Furthermore, only the top 4 GiB of the address space are
- // actually available to the application. In iOS >=14, more
- // of the address space is available, and the OS can now
- // provide addresses outside of those 33 bits. Pick 40 bits
- // as a reasonable balance between address space usage by the
- // page allocator, and flexibility for what mmap'd regions
- // we'll accept for the heap. We can't just move to the full
- // 48 bits because this uses too much address space for older
- // iOS versions.
- // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
- // to a 48-bit address space like every other arm64 platform.
- //
- // WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
-
- // maxAlloc is the maximum size of an allocation. On 64-bit,
- // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
- // 32-bit, however, this is one less than 1<<32 because the
- // number of bytes in the address space doesn't actually fit
- // in a uintptr.
- maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
-
- // The number of bits in a heap address, the size of heap
- // arenas, and the L1 and L2 arena map sizes are related by
- //
- // (1 << addr bits) = arena size * L1 entries * L2 entries
- //
- // Currently, we balance these as follows:
- //
- // Platform Addr bits Arena size L1 entries L2 entries
- // -------------- --------- ---------- ---------- -----------
- // */64-bit 48 64MB 1 4M (32MB)
- // windows/64-bit 48 4MB 64 1M (8MB)
- // ios/arm64 33 4MB 1 2048 (8KB)
- // */32-bit 32 4MB 1 1024 (4KB)
- // */mips(le) 31 4MB 1 512 (2KB)
-
- // heapArenaBytes is the size of a heap arena. The heap
- // consists of mappings of size heapArenaBytes, aligned to
- // heapArenaBytes. The initial heap mapping is one arena.
- //
- // This is currently 64MB on 64-bit non-Windows and 4MB on
- // 32-bit and on Windows. We use smaller arenas on Windows
- // because all committed memory is charged to the process,
- // even if it's not touched. Hence, for processes with small
- // heaps, the mapped arena space needs to be commensurate.
- // This is particularly important with the race detector,
- // since it significantly amplifies the cost of committed
- // memory.
- heapArenaBytes = 1 << logHeapArenaBytes
-
- // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
- // prefer using heapArenaBytes where possible (we need the
- // constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
-
- // heapArenaBitmapBytes is the size of each heap arena's bitmap.
- heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
-
- pagesPerArena = heapArenaBytes / pageSize
-
- // arenaL1Bits is the number of bits of the arena number
- // covered by the first level arena map.
- //
- // This number should be small, since the first level arena
- // map requires PtrSize*(1<<arenaL1Bits) of space in the
- // binary's BSS. It can be zero, in which case the first level
- // index is effectively unused. There is a performance benefit
- // to this, since the generated code can be more efficient,
- // but comes at the cost of having a large L2 mapping.
- //
- // We use the L1 map on 64-bit Windows because the arena size
- // is small, but the address space is still 48 bits, and
- // there's a high cost to having a large L2.
- arenaL1Bits = 6 * (_64bit * goos.IsWindows)
-
- // arenaL2Bits is the number of bits of the arena number
- // covered by the second level arena index.
- //
- // The size of each arena map allocation is proportional to
- // 1<<arenaL2Bits, so it's important that this not be too
- // large. 48 bits leads to 32MB arena index allocations, which
- // is about the practical threshold.
- arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
-
- // arenaL1Shift is the number of bits to shift an arena frame
- // number by to compute an index into the first level arena map.
- arenaL1Shift = arenaL2Bits
-
- // arenaBits is the total bits in a combined arena map index.
- // This is split between the index into the L1 arena map and
- // the L2 arena map.
- arenaBits = arenaL1Bits + arenaL2Bits
-
- // arenaBaseOffset is the pointer value that corresponds to
- // index 0 in the heap arena map.
- //
- // On amd64, the address space is 48 bits, sign extended to 64
- // bits. This offset lets us handle "negative" addresses (or
- // high addresses if viewed as unsigned).
- //
- // On aix/ppc64, this offset allows to keep the heapAddrBits to
- // 48. Otherwise, it would be 60 in order to handle mmap addresses
- // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
- // case, the memory reserved in (s *pageAlloc).init for chunks
- // is causing important slowdowns.
- //
- // On other platforms, the user address space is contiguous
- // and starts at 0, so no offset is necessary.
- arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
- // A typed version of this constant that will make it into DWARF (for viewcore).
- arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
-
- // Max number of threads to run garbage collection.
- // 2, 3, and 4 are all plausible maximums depending
- // on the hardware details of the machine. The garbage
- // collector scales well to 32 cpus.
- _MaxGcproc = 32
-
- // minLegalPointer is the smallest possible legal pointer.
- // This is the smallest possible architectural page size,
- // since we assume that the first page is never mapped.
- //
- // This should agree with minZeroPage in the compiler.
- minLegalPointer uintptr = 4096
-)
-
-// physPageSize is the size in bytes of the OS's physical pages.
-// Mapping and unmapping operations must be done at multiples of
-// physPageSize.
-//
-// This must be set by the OS init code (typically in osinit) before
-// mallocinit.
-var physPageSize uintptr
-
-// physHugePageSize is the size in bytes of the OS's default physical huge
-// page size whose allocation is opaque to the application. It is assumed
-// and verified to be a power of two.
-//
-// If set, this must be set by the OS init code (typically in osinit) before
-// mallocinit. However, setting it at all is optional, and leaving the default
-// value is always safe (though potentially less efficient).
-//
-// Since physHugePageSize is always assumed to be a power of two,
-// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
-// The purpose of physHugePageShift is to avoid doing divisions in
-// performance critical functions.
-var (
- physHugePageSize uintptr
- physHugePageShift uint
-)
-
-// OS memory management abstraction layer
-//
-// Regions of the address space managed by the runtime may be in one of four
-// states at any given time:
-// 1) None - Unreserved and unmapped, the default state of any region.
-// 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
-// Does not count against the process' memory footprint.
-// 3) Prepared - Reserved, intended not to be backed by physical memory (though
-// an OS may implement this lazily). Can transition efficiently to
-// Ready. Accessing memory in such a region is undefined (may
-// fault, may give back unexpected zeroes, etc.).
-// 4) Ready - may be accessed safely.
-//
-// This set of states is more than is strictly necessary to support all the
-// currently supported platforms. One could get by with just None, Reserved, and
-// Ready. However, the Prepared state gives us flexibility for performance
-// purposes. For example, on POSIX-y operating systems, Reserved is usually a
-// private anonymous mmap'd region with PROT_NONE set, and to transition
-// to Ready would require setting PROT_READ|PROT_WRITE. However the
-// underspecification of Prepared lets us use just MADV_FREE to transition from
-// Ready to Prepared. Thus with the Prepared state we can set the permission
-// bits just once early on, we can efficiently tell the OS that it's free to
-// take pages away from us when we don't strictly need them.
-//
-// For each OS there is a common set of helpers defined that transition
-// memory regions between these states. The helpers are as follows:
-//
-// sysAlloc transitions an OS-chosen region of memory from None to Ready.
-// More specifically, it obtains a large chunk of zeroed memory from the
-// operating system, typically on the order of a hundred kilobytes
-// or a megabyte. This memory is always immediately available for use.
-//
-// sysFree transitions a memory region from any state to None. Therefore, it
-// returns memory unconditionally. It is used if an out-of-memory error has been
-// detected midway through an allocation or to carve out an aligned section of
-// the address space. It is okay if sysFree is a no-op only if sysReserve always
-// returns a memory region aligned to the heap allocator's alignment
-// restrictions.
-//
-// sysReserve transitions a memory region from None to Reserved. It reserves
-// address space in such a way that it would cause a fatal fault upon access
-// (either via permissions or not committing the memory). Such a reservation is
-// thus never backed by physical memory.
-// If the pointer passed to it is non-nil, the caller wants the
-// reservation there, but sysReserve can still choose another
-// location if that one is unavailable.
-// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
-// may use larger alignment, so the caller must be careful to realign the
-// memory obtained by sysReserve.
-//
-// sysMap transitions a memory region from Reserved to Prepared. It ensures the
-// memory region can be efficiently transitioned to Ready.
-//
-// sysUsed transitions a memory region from Prepared to Ready. It notifies the
-// operating system that the memory region is needed and ensures that the region
-// may be safely accessed. This is typically a no-op on systems that don't have
-// an explicit commit step and hard over-commit limits, but is critical on
-// Windows, for example.
-//
-// sysUnused transitions a memory region from Ready to Prepared. It notifies the
-// operating system that the physical pages backing this memory region are no
-// longer needed and can be reused for other purposes. The contents of a
-// sysUnused memory region are considered forfeit and the region must not be
-// accessed again until sysUsed is called.
-//
-// sysFault transitions a memory region from Ready or Prepared to Reserved. It
-// marks a region such that it will always fault if accessed. Used only for
-// debugging the runtime.
-
-func mallocinit() {
- if class_to_size[_TinySizeClass] != _TinySize {
- throw("bad TinySizeClass")
- }
-
- if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
- // heapBits expects modular arithmetic on bitmap
- // addresses to work.
- throw("heapArenaBitmapBytes not a power of 2")
- }
-
- // Copy class sizes out for statistics table.
- for i := range class_to_size {
- memstats.by_size[i].size = uint32(class_to_size[i])
- }
-
- // Check physPageSize.
- if physPageSize == 0 {
- // The OS init code failed to fetch the physical page size.
- throw("failed to get system page size")
- }
- if physPageSize > maxPhysPageSize {
- print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
- throw("bad system page size")
- }
- if physPageSize < minPhysPageSize {
- print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
- throw("bad system page size")
- }
- if physPageSize&(physPageSize-1) != 0 {
- print("system page size (", physPageSize, ") must be a power of 2\n")
- throw("bad system page size")
- }
- if physHugePageSize&(physHugePageSize-1) != 0 {
- print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
- throw("bad system huge page size")
- }
- if physHugePageSize > maxPhysHugePageSize {
- // physHugePageSize is greater than the maximum supported huge page size.
- // Don't throw here, like in the other cases, since a system configured
- // in this way isn't wrong, we just don't have the code to support them.
- // Instead, silently set the huge page size to zero.
- physHugePageSize = 0
- }
- if physHugePageSize != 0 {
- // Since physHugePageSize is a power of 2, it suffices to increase
- // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
- for 1<<physHugePageShift != physHugePageSize {
- physHugePageShift++
- }
- }
- if pagesPerArena%pagesPerSpanRoot != 0 {
- print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
- throw("bad pagesPerSpanRoot")
- }
- if pagesPerArena%pagesPerReclaimerChunk != 0 {
- print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
- throw("bad pagesPerReclaimerChunk")
- }
-
- // Initialize the heap.
- mheap_.init()
- mcache0 = allocmcache()
- lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
- lockInit(&proflock, lockRankProf)
- lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
-
- // Create initial arena growth hints.
- if goarch.PtrSize == 8 {
- // On a 64-bit machine, we pick the following hints
- // because:
- //
- // 1. Starting from the middle of the address space
- // makes it easier to grow out a contiguous range
- // without running in to some other mapping.
- //
- // 2. This makes Go heap addresses more easily
- // recognizable when debugging.
- //
- // 3. Stack scanning in gccgo is still conservative,
- // so it's important that addresses be distinguishable
- // from other data.
- //
- // Starting at 0x00c0 means that the valid memory addresses
- // will begin 0x00c0, 0x00c1, ...
- // In little-endian, that's c0 00, c1 00, ... None of those are valid
- // UTF-8 sequences, and they are otherwise as far away from
- // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
- // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
- // on OS X during thread allocations. 0x00c0 causes conflicts with
- // AddressSanitizer which reserves all memory up to 0x0100.
- // These choices reduce the odds of a conservative garbage collector
- // not collecting memory because some non-pointer block of memory
- // had a bit pattern that matched a memory address.
- //
- // However, on arm64, we ignore all this advice above and slam the
- // allocation at 0x40 << 32 because when using 4k pages with 3-level
- // translation buffers, the user address space is limited to 39 bits
- // On ios/arm64, the address space is even smaller.
- //
- // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
- // processes.
- for i := 0x7f; i >= 0; i-- {
- var p uintptr
- switch {
- case raceenabled:
- // The TSAN runtime requires the heap
- // to be in the range [0x00c000000000,
- // 0x00e000000000).
- p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
- if p >= uintptrMask&0x00e000000000 {
- continue
- }
- case GOARCH == "arm64" && GOOS == "ios":
- p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
- case GOARCH == "arm64":
- p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
- case GOOS == "aix":
- if i == 0 {
- // We don't use addresses directly after 0x0A00000000000000
- // to avoid collisions with others mmaps done by non-go programs.
- continue
- }
- p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
- default:
- p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
- }
- hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
- hint.addr = p
- hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
- }
- } else {
- // On a 32-bit machine, we're much more concerned
- // about keeping the usable heap contiguous.
- // Hence:
- //
- // 1. We reserve space for all heapArenas up front so
- // they don't get interleaved with the heap. They're
- // ~258MB, so this isn't too bad. (We could reserve a
- // smaller amount of space up front if this is a
- // problem.)
- //
- // 2. We hint the heap to start right above the end of
- // the binary so we have the best chance of keeping it
- // contiguous.
- //
- // 3. We try to stake out a reasonably large initial
- // heap reservation.
-
- const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
- meta := uintptr(sysReserve(nil, arenaMetaSize))
- if meta != 0 {
- mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
- }
-
- // We want to start the arena low, but if we're linked
- // against C code, it's possible global constructors
- // have called malloc and adjusted the process' brk.
- // Query the brk so we can avoid trying to map the
- // region over it (which will cause the kernel to put
- // the region somewhere else, likely at a high
- // address).
- procBrk := sbrk0()
-
- // If we ask for the end of the data segment but the
- // operating system requires a little more space
- // before we can start allocating, it will give out a
- // slightly higher pointer. Except QEMU, which is
- // buggy, as usual: it won't adjust the pointer
- // upward. So adjust it upward a little bit ourselves:
- // 1/4 MB to get away from the running binary image.
- p := firstmoduledata.end
- if p < procBrk {
- p = procBrk
- }
- if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
- p = mheap_.heapArenaAlloc.end
- }
- p = alignUp(p+(256<<10), heapArenaBytes)
- // Because we're worried about fragmentation on
- // 32-bit, we try to make a large initial reservation.
- arenaSizes := []uintptr{
- 512 << 20,
- 256 << 20,
- 128 << 20,
- }
- for _, arenaSize := range arenaSizes {
- a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
- if a != nil {
- mheap_.arena.init(uintptr(a), size, false)
- p = mheap_.arena.end // For hint below
- break
- }
- }
- hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
- hint.addr = p
- hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
- }
-}
-
-// sysAlloc allocates heap arena space for at least n bytes. The
-// returned pointer is always heapArenaBytes-aligned and backed by
-// h.arenas metadata. The returned size is always a multiple of
-// heapArenaBytes. sysAlloc returns nil on failure.
-// There is no corresponding free function.
-//
-// sysAlloc returns a memory region in the Reserved state. This region must
-// be transitioned to Prepared and then Ready before use.
-//
-// h must be locked.
-func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
- assertLockHeld(&h.lock)
-
- n = alignUp(n, heapArenaBytes)
-
- // First, try the arena pre-reservation.
- v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys)
- if v != nil {
- size = n
- goto mapped
- }
-
- // Try to grow the heap at a hint address.
- for h.arenaHints != nil {
- hint := h.arenaHints
- p := hint.addr
- if hint.down {
- p -= n
- }
- if p+n < p {
- // We can't use this, so don't ask.
- v = nil
- } else if arenaIndex(p+n-1) >= 1<<arenaBits {
- // Outside addressable heap. Can't use.
- v = nil
- } else {
- v = sysReserve(unsafe.Pointer(p), n)
- }
- if p == uintptr(v) {
- // Success. Update the hint.
- if !hint.down {
- p += n
- }
- hint.addr = p
- size = n
- break
- }
- // Failed. Discard this hint and try the next.
- //
- // TODO: This would be cleaner if sysReserve could be
- // told to only return the requested address. In
- // particular, this is already how Windows behaves, so
- // it would simplify things there.
- if v != nil {
- sysFree(v, n, nil)
- }
- h.arenaHints = hint.next
- h.arenaHintAlloc.free(unsafe.Pointer(hint))
- }
-
- if size == 0 {
- if raceenabled {
- // The race detector assumes the heap lives in
- // [0x00c000000000, 0x00e000000000), but we
- // just ran out of hints in this region. Give
- // a nice failure.
- throw("too many address space collisions for -race mode")
- }
-
- // All of the hints failed, so we'll take any
- // (sufficiently aligned) address the kernel will give
- // us.
- v, size = sysReserveAligned(nil, n, heapArenaBytes)
- if v == nil {
- return nil, 0
- }
-
- // Create new hints for extending this region.
- hint := (*arenaHint)(h.arenaHintAlloc.alloc())
- hint.addr, hint.down = uintptr(v), true
- hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
- hint = (*arenaHint)(h.arenaHintAlloc.alloc())
- hint.addr = uintptr(v) + size
- hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
- }
-
- // Check for bad pointers or pointers we can't use.
- {
- var bad string
- p := uintptr(v)
- if p+size < p {
- bad = "region exceeds uintptr range"
- } else if arenaIndex(p) >= 1<<arenaBits {
- bad = "base outside usable address space"
- } else if arenaIndex(p+size-1) >= 1<<arenaBits {
- bad = "end outside usable address space"
- }
- if bad != "" {
- // This should be impossible on most architectures,
- // but it would be really confusing to debug.
- print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
- throw("memory reservation exceeds address space limit")
- }
- }
-
- if uintptr(v)&(heapArenaBytes-1) != 0 {
- throw("misrounded allocation in sysAlloc")
- }
-
-mapped:
- // Create arena metadata.
- for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
- l2 := h.arenas[ri.l1()]
- if l2 == nil {
- // Allocate an L2 arena map.
- l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
- if l2 == nil {
- throw("out of memory allocating heap arena map")
- }
- atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
- }
-
- if l2[ri.l2()] != nil {
- throw("arena already initialized")
- }
- var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
- if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
- if r == nil {
- throw("out of memory allocating heap arena metadata")
- }
- }
-
- // Add the arena to the arenas list.
- if len(h.allArenas) == cap(h.allArenas) {
- size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
- if size == 0 {
- size = physPageSize
- }
- newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
- if newArray == nil {
- throw("out of memory allocating allArenas")
- }
- oldSlice := h.allArenas
- *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
- copy(h.allArenas, oldSlice)
- // Do not free the old backing array because
- // there may be concurrent readers. Since we
- // double the array each time, this can lead
- // to at most 2x waste.
- }
- h.allArenas = h.allArenas[:len(h.allArenas)+1]
- h.allArenas[len(h.allArenas)-1] = ri
-
- // Store atomically just in case an object from the
- // new heap arena becomes visible before the heap lock
- // is released (which shouldn't happen, but there's
- // little downside to this).
- atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
- }
-
- // Tell the race detector about the new heap memory.
- if raceenabled {
- racemapshadow(v, size)
- }
-
- return
-}
-
-// sysReserveAligned is like sysReserve, but the returned pointer is
-// aligned to align bytes. It may reserve either n or n+align bytes,
-// so it returns the size that was reserved.
-func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
- // Since the alignment is rather large in uses of this
- // function, we're not likely to get it by chance, so we ask
- // for a larger region and remove the parts we don't need.
- retries := 0
-retry:
- p := uintptr(sysReserve(v, size+align))
- switch {
- case p == 0:
- return nil, 0
- case p&(align-1) == 0:
- // We got lucky and got an aligned region, so we can
- // use the whole thing.
- return unsafe.Pointer(p), size + align
- case GOOS == "windows":
- // On Windows we can't release pieces of a
- // reservation, so we release the whole thing and
- // re-reserve the aligned sub-region. This may race,
- // so we may have to try again.
- sysFree(unsafe.Pointer(p), size+align, nil)
- p = alignUp(p, align)
- p2 := sysReserve(unsafe.Pointer(p), size)
- if p != uintptr(p2) {
- // Must have raced. Try again.
- sysFree(p2, size, nil)
- if retries++; retries == 100 {
- throw("failed to allocate aligned heap memory; too many retries")
- }
- goto retry
- }
- // Success.
- return p2, size
- default:
- // Trim off the unaligned parts.
- pAligned := alignUp(p, align)
- sysFree(unsafe.Pointer(p), pAligned-p, nil)
- end := pAligned + size
- endLen := (p + size + align) - end
- if endLen > 0 {
- sysFree(unsafe.Pointer(end), endLen, nil)
- }
- return unsafe.Pointer(pAligned), size
- }
-}
-
-// base address for all 0-byte allocations
-var zerobase uintptr
-
-// nextFreeFast returns the next free object if one is quickly available.
-// Otherwise it returns 0.
-func nextFreeFast(s *mspan) gclinkptr {
- theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
- if theBit < 64 {
- result := s.freeindex + uintptr(theBit)
- if result < s.nelems {
- freeidx := result + 1
- if freeidx%64 == 0 && freeidx != s.nelems {
- return 0
- }
- s.allocCache >>= uint(theBit + 1)
- s.freeindex = freeidx
- s.allocCount++
- return gclinkptr(result*s.elemsize + s.base())
- }
- }
- return 0
-}
-
-// nextFree returns the next free object from the cached span if one is available.
-// Otherwise it refills the cache with a span with an available object and
-// returns that object along with a flag indicating that this was a heavy
-// weight allocation. If it is a heavy weight allocation the caller must
-// determine whether a new GC cycle needs to be started or if the GC is active
-// whether this goroutine needs to assist the GC.
-//
-// Must run in a non-preemptible context since otherwise the owner of
-// c could change.
-func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
- s = c.alloc[spc]
- shouldhelpgc = false
- freeIndex := s.nextFreeIndex()
- if freeIndex == s.nelems {
- // The span is full.
- if uintptr(s.allocCount) != s.nelems {
- println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
- throw("s.allocCount != s.nelems && freeIndex == s.nelems")
- }
- c.refill(spc)
- shouldhelpgc = true
- s = c.alloc[spc]
-
- freeIndex = s.nextFreeIndex()
- }
-
- if freeIndex >= s.nelems {
- throw("freeIndex is not valid")
- }
-
- v = gclinkptr(freeIndex*s.elemsize + s.base())
- s.allocCount++
- if uintptr(s.allocCount) > s.nelems {
- println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
- throw("s.allocCount > s.nelems")
- }
- return
-}
-
-// Allocate an object of size bytes.
-// Small objects are allocated from the per-P cache's free lists.
-// Large objects (> 32 kB) are allocated straight from the heap.
-func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
- if gcphase == _GCmarktermination {
- throw("mallocgc called with gcphase == _GCmarktermination")
- }
-
- if size == 0 {
- return unsafe.Pointer(&zerobase)
- }
- userSize := size
- if asanenabled {
- // Refer to ASAN runtime library, the malloc() function allocates extra memory,
- // the redzone, around the user requested memory region. And the redzones are marked
- // as unaddressable. We perform the same operations in Go to detect the overflows or
- // underflows.
- size += computeRZlog(size)
- }
-
- if debug.malloc {
- if debug.sbrk != 0 {
- align := uintptr(16)
- if typ != nil {
- // TODO(austin): This should be just
- // align = uintptr(typ.align)
- // but that's only 4 on 32-bit platforms,
- // even if there's a uint64 field in typ (see #599).
- // This causes 64-bit atomic accesses to panic.
- // Hence, we use stricter alignment that matches
- // the normal allocator better.
- if size&7 == 0 {
- align = 8
- } else if size&3 == 0 {
- align = 4
- } else if size&1 == 0 {
- align = 2
- } else {
- align = 1
- }
- }
- return persistentalloc(size, align, &memstats.other_sys)
- }
-
- if inittrace.active && inittrace.id == getg().goid {
- // Init functions are executed sequentially in a single goroutine.
- inittrace.allocs += 1
- }
- }
-
- // assistG is the G to charge for this allocation, or nil if
- // GC is not currently active.
- var assistG *g
- if gcBlackenEnabled != 0 {
- // Charge the current user G for this allocation.
- assistG = getg()
- if assistG.m.curg != nil {
- assistG = assistG.m.curg
- }
- // Charge the allocation against the G. We'll account
- // for internal fragmentation at the end of mallocgc.
- assistG.gcAssistBytes -= int64(size)
-
- if assistG.gcAssistBytes < 0 {
- // This G is in debt. Assist the GC to correct
- // this before allocating. This must happen
- // before disabling preemption.
- gcAssistAlloc(assistG)
- }
- }
-
- // Set mp.mallocing to keep from being preempted by GC.
- mp := acquirem()
- if mp.mallocing != 0 {
- throw("malloc deadlock")
- }
- if mp.gsignal == getg() {
- throw("malloc during signal")
- }
- mp.mallocing = 1
-
- shouldhelpgc := false
- dataSize := userSize
- c := getMCache(mp)
- if c == nil {
- throw("mallocgc called without a P or outside bootstrapping")
- }
- var span *mspan
- var x unsafe.Pointer
- noscan := typ == nil || typ.ptrdata == 0
- // In some cases block zeroing can profitably (for latency reduction purposes)
- // be delayed till preemption is possible; delayedZeroing tracks that state.
- delayedZeroing := false
- if size <= maxSmallSize {
- if noscan && size < maxTinySize {
- // Tiny allocator.
- //
- // Tiny allocator combines several tiny allocation requests
- // into a single memory block. The resulting memory block
- // is freed when all subobjects are unreachable. The subobjects
- // must be noscan (don't have pointers), this ensures that
- // the amount of potentially wasted memory is bounded.
- //
- // Size of the memory block used for combining (maxTinySize) is tunable.
- // Current setting is 16 bytes, which relates to 2x worst case memory
- // wastage (when all but one subobjects are unreachable).
- // 8 bytes would result in no wastage at all, but provides less
- // opportunities for combining.
- // 32 bytes provides more opportunities for combining,
- // but can lead to 4x worst case wastage.
- // The best case winning is 8x regardless of block size.
- //
- // Objects obtained from tiny allocator must not be freed explicitly.
- // So when an object will be freed explicitly, we ensure that
- // its size >= maxTinySize.
- //
- // SetFinalizer has a special case for objects potentially coming
- // from tiny allocator, it such case it allows to set finalizers
- // for an inner byte of a memory block.
- //
- // The main targets of tiny allocator are small strings and
- // standalone escaping variables. On a json benchmark
- // the allocator reduces number of allocations by ~12% and
- // reduces heap size by ~20%.
- off := c.tinyoffset
- // Align tiny pointer for required (conservative) alignment.
- if size&7 == 0 {
- off = alignUp(off, 8)
- } else if goarch.PtrSize == 4 && size == 12 {
- // Conservatively align 12-byte objects to 8 bytes on 32-bit
- // systems so that objects whose first field is a 64-bit
- // value is aligned to 8 bytes and does not cause a fault on
- // atomic access. See issue 37262.
- // TODO(mknyszek): Remove this workaround if/when issue 36606
- // is resolved.
- off = alignUp(off, 8)
- } else if size&3 == 0 {
- off = alignUp(off, 4)
- } else if size&1 == 0 {
- off = alignUp(off, 2)
- }
- if off+size <= maxTinySize && c.tiny != 0 {
- // The object fits into existing tiny block.
- x = unsafe.Pointer(c.tiny + off)
- c.tinyoffset = off + size
- c.tinyAllocs++
- mp.mallocing = 0
- releasem(mp)
- return x
- }
- // Allocate a new maxTinySize block.
- span = c.alloc[tinySpanClass]
- v := nextFreeFast(span)
- if v == 0 {
- v, span, shouldhelpgc = c.nextFree(tinySpanClass)
- }
- x = unsafe.Pointer(v)
- (*[2]uint64)(x)[0] = 0
- (*[2]uint64)(x)[1] = 0
- // See if we need to replace the existing tiny block with the new one
- // based on amount of remaining free space.
- if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
- // Note: disabled when race detector is on, see comment near end of this function.
- c.tiny = uintptr(x)
- c.tinyoffset = size
- }
- size = maxTinySize
- } else {
- var sizeclass uint8
- if size <= smallSizeMax-8 {
- sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
- } else {
- sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
- }
- size = uintptr(class_to_size[sizeclass])
- spc := makeSpanClass(sizeclass, noscan)
- span = c.alloc[spc]
- v := nextFreeFast(span)
- if v == 0 {
- v, span, shouldhelpgc = c.nextFree(spc)
- }
- x = unsafe.Pointer(v)
- if needzero && span.needzero != 0 {
- memclrNoHeapPointers(unsafe.Pointer(v), size)
- }
- }
- } else {
- shouldhelpgc = true
- // For large allocations, keep track of zeroed state so that
- // bulk zeroing can be happen later in a preemptible context.
- span = c.allocLarge(size, noscan)
- span.freeindex = 1
- span.allocCount = 1
- size = span.elemsize
- x = unsafe.Pointer(span.base())
- if needzero && span.needzero != 0 {
- if noscan {
- delayedZeroing = true
- } else {
- memclrNoHeapPointers(x, size)
- // We've in theory cleared almost the whole span here,
- // and could take the extra step of actually clearing
- // the whole thing. However, don't. Any GC bits for the
- // uncleared parts will be zero, and it's just going to
- // be needzero = 1 once freed anyway.
- }
- }
- }
-
- var scanSize uintptr
- if !noscan {
- heapBitsSetType(uintptr(x), size, dataSize, typ)
- if dataSize > typ.size {
- // Array allocation. If there are any
- // pointers, GC has to scan to the last
- // element.
- if typ.ptrdata != 0 {
- scanSize = dataSize - typ.size + typ.ptrdata
- }
- } else {
- scanSize = typ.ptrdata
- }
- c.scanAlloc += scanSize
- }
-
- // Ensure that the stores above that initialize x to
- // type-safe memory and set the heap bits occur before
- // the caller can make x observable to the garbage
- // collector. Otherwise, on weakly ordered machines,
- // the garbage collector could follow a pointer to x,
- // but see uninitialized memory or stale heap bits.
- publicationBarrier()
-
- // Allocate black during GC.
- // All slots hold nil so no scanning is needed.
- // This may be racing with GC so do it atomically if there can be
- // a race marking the bit.
- if gcphase != _GCoff {
- gcmarknewobject(span, uintptr(x), size, scanSize)
- }
-
- if raceenabled {
- racemalloc(x, size)
- }
-
- if msanenabled {
- msanmalloc(x, size)
- }
-
- if asanenabled {
- // We should only read/write the memory with the size asked by the user.
- // The rest of the allocated memory should be poisoned, so that we can report
- // errors when accessing poisoned memory.
- // The allocated memory is larger than required userSize, it will also include
- // redzone and some other padding bytes.
- rzBeg := unsafe.Add(x, userSize)
- asanpoison(rzBeg, size-userSize)
- asanunpoison(x, userSize)
- }
-
- if rate := MemProfileRate; rate > 0 {
- // Note cache c only valid while m acquired; see #47302
- if rate != 1 && size < c.nextSample {
- c.nextSample -= size
- } else {
- profilealloc(mp, x, size)
- }
- }
- mp.mallocing = 0
- releasem(mp)
-
- // Pointerfree data can be zeroed late in a context where preemption can occur.
- // x will keep the memory alive.
- if delayedZeroing {
- if !noscan {
- throw("delayed zeroing on data that may contain pointers")
- }
- memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
- }
-
- if debug.malloc {
- if debug.allocfreetrace != 0 {
- tracealloc(x, size, typ)
- }
-
- if inittrace.active && inittrace.id == getg().goid {
- // Init functions are executed sequentially in a single goroutine.
- inittrace.bytes += uint64(size)
- }
- }
-
- if assistG != nil {
- // Account for internal fragmentation in the assist
- // debt now that we know it.
- assistG.gcAssistBytes -= int64(size - dataSize)
- }
-
- if shouldhelpgc {
- if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
- gcStart(t)
- }
- }
-
- if raceenabled && noscan && dataSize < maxTinySize {
- // Pad tinysize allocations so they are aligned with the end
- // of the tinyalloc region. This ensures that any arithmetic
- // that goes off the top end of the object will be detectable
- // by checkptr (issue 38872).
- // Note that we disable tinyalloc when raceenabled for this to work.
- // TODO: This padding is only performed when the race detector
- // is enabled. It would be nice to enable it if any package
- // was compiled with checkptr, but there's no easy way to
- // detect that (especially at compile time).
- // TODO: enable this padding for all allocations, not just
- // tinyalloc ones. It's tricky because of pointer maps.
- // Maybe just all noscan objects?
- x = add(x, size-dataSize)
- }
-
- return x
-}
-
-// memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
-// on chunks of the buffer to be zeroed, with opportunities for preemption
-// along the way. memclrNoHeapPointers contains no safepoints and also
-// cannot be preemptively scheduled, so this provides a still-efficient
-// block copy that can also be preempted on a reasonable granularity.
-//
-// Use this with care; if the data being cleared is tagged to contain
-// pointers, this allows the GC to run before it is all cleared.
-func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
- v := uintptr(x)
- // got this from benchmarking. 128k is too small, 512k is too large.
- const chunkBytes = 256 * 1024
- vsize := v + size
- for voff := v; voff < vsize; voff = voff + chunkBytes {
- if getg().preempt {
- // may hold locks, e.g., profiling
- goschedguarded()
- }
- // clear min(avail, lump) bytes
- n := vsize - voff
- if n > chunkBytes {
- n = chunkBytes
- }
- memclrNoHeapPointers(unsafe.Pointer(voff), n)
- }
-}
-
-// implementation of new builtin
-// compiler (both frontend and SSA backend) knows the signature
-// of this function
-func newobject(typ *_type) unsafe.Pointer {
- return mallocgc(typ.size, typ, true)
-}
-
-//go:linkname reflect_unsafe_New reflect.unsafe_New
-func reflect_unsafe_New(typ *_type) unsafe.Pointer {
- return mallocgc(typ.size, typ, true)
-}
-
-//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
-func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
- return mallocgc(typ.size, typ, true)
-}
-
-// newarray allocates an array of n elements of type typ.
-func newarray(typ *_type, n int) unsafe.Pointer {
- if n == 1 {
- return mallocgc(typ.size, typ, true)
- }
- mem, overflow := math.MulUintptr(typ.size, uintptr(n))
- if overflow || mem > maxAlloc || n < 0 {
- panic(plainError("runtime: allocation size out of range"))
- }
- return mallocgc(mem, typ, true)
-}
-
-//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
-func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
- return newarray(typ, n)
-}
-
-func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
- c := getMCache(mp)
- if c == nil {
- throw("profilealloc called without a P or outside bootstrapping")
- }
- c.nextSample = nextSample()
- mProf_Malloc(x, size)
-}
-
-// nextSample returns the next sampling point for heap profiling. The goal is
-// to sample allocations on average every MemProfileRate bytes, but with a
-// completely random distribution over the allocation timeline; this
-// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
-// processes, the distance between two samples follows the exponential
-// distribution (exp(MemProfileRate)), so the best return value is a random
-// number taken from an exponential distribution whose mean is MemProfileRate.
-func nextSample() uintptr {
- if MemProfileRate == 1 {
- // Callers assign our return value to
- // mcache.next_sample, but next_sample is not used
- // when the rate is 1. So avoid the math below and
- // just return something.
- return 0
- }
- if GOOS == "plan9" {
- // Plan 9 doesn't support floating point in note handler.
- if g := getg(); g == g.m.gsignal {
- return nextSampleNoFP()
- }
- }
-
- return uintptr(fastexprand(MemProfileRate))
-}
-
-// fastexprand returns a random number from an exponential distribution with
-// the specified mean.
-func fastexprand(mean int) int32 {
- // Avoid overflow. Maximum possible step is
- // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
- switch {
- case mean > 0x7000000:
- mean = 0x7000000
- case mean == 0:
- return 0
- }
-
- // Take a random sample of the exponential distribution exp(-mean*x).
- // The probability distribution function is mean*exp(-mean*x), so the CDF is
- // p = 1 - exp(-mean*x), so
- // q = 1 - p == exp(-mean*x)
- // log_e(q) = -mean*x
- // -log_e(q)/mean = x
- // x = -log_e(q) * mean
- // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
- const randomBitCount = 26
- q := fastrandn(1<<randomBitCount) + 1
- qlog := fastlog2(float64(q)) - randomBitCount
- if qlog > 0 {
- qlog = 0
- }
- const minusLog2 = -0.6931471805599453 // -ln(2)
- return int32(qlog*(minusLog2*float64(mean))) + 1
-}
-
-// nextSampleNoFP is similar to nextSample, but uses older,
-// simpler code to avoid floating point.
-func nextSampleNoFP() uintptr {
- // Set first allocation sample size.
- rate := MemProfileRate
- if rate > 0x3fffffff { // make 2*rate not overflow
- rate = 0x3fffffff
- }
- if rate != 0 {
- return uintptr(fastrandn(uint32(2 * rate)))
- }
- return 0
-}
-
-type persistentAlloc struct {
- base *notInHeap
- off uintptr
-}
-
-var globalAlloc struct {
- mutex
- persistentAlloc
-}
-
-// persistentChunkSize is the number of bytes we allocate when we grow
-// a persistentAlloc.
-const persistentChunkSize = 256 << 10
-
-// persistentChunks is a list of all the persistent chunks we have
-// allocated. The list is maintained through the first word in the
-// persistent chunk. This is updated atomically.
-var persistentChunks *notInHeap
-
-// Wrapper around sysAlloc that can allocate small chunks.
-// There is no associated free operation.
-// Intended for things like function/type/debug-related persistent data.
-// If align is 0, uses default align (currently 8).
-// The returned memory will be zeroed.
-//
-// Consider marking persistentalloc'd types go:notinheap.
-func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
- var p *notInHeap
- systemstack(func() {
- p = persistentalloc1(size, align, sysStat)
- })
- return unsafe.Pointer(p)
-}
-
-// Must run on system stack because stack growth can (re)invoke it.
-// See issue 9174.
-//go:systemstack
-func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
- const (
- maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
- )
-
- if size == 0 {
- throw("persistentalloc: size == 0")
- }
- if align != 0 {
- if align&(align-1) != 0 {
- throw("persistentalloc: align is not a power of 2")
- }
- if align > _PageSize {
- throw("persistentalloc: align is too large")
- }
- } else {
- align = 8
- }
-
- if size >= maxBlock {
- return (*notInHeap)(sysAlloc(size, sysStat))
- }
-
- mp := acquirem()
- var persistent *persistentAlloc
- if mp != nil && mp.p != 0 {
- persistent = &mp.p.ptr().palloc
- } else {
- lock(&globalAlloc.mutex)
- persistent = &globalAlloc.persistentAlloc
- }
- persistent.off = alignUp(persistent.off, align)
- if persistent.off+size > persistentChunkSize || persistent.base == nil {
- persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
- if persistent.base == nil {
- if persistent == &globalAlloc.persistentAlloc {
- unlock(&globalAlloc.mutex)
- }
- throw("runtime: cannot allocate memory")
- }
-
- // Add the new chunk to the persistentChunks list.
- for {
- chunks := uintptr(unsafe.Pointer(persistentChunks))
- *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
- if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
- break
- }
- }
- persistent.off = alignUp(goarch.PtrSize, align)
- }
- p := persistent.base.add(persistent.off)
- persistent.off += size
- releasem(mp)
- if persistent == &globalAlloc.persistentAlloc {
- unlock(&globalAlloc.mutex)
- }
-
- if sysStat != &memstats.other_sys {
- sysStat.add(int64(size))
- memstats.other_sys.add(-int64(size))
- }
- return p
-}
-
-// inPersistentAlloc reports whether p points to memory allocated by
-// persistentalloc. This must be nosplit because it is called by the
-// cgo checker code, which is called by the write barrier code.
-//go:nosplit
-func inPersistentAlloc(p uintptr) bool {
- chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
- for chunk != 0 {
- if p >= chunk && p < chunk+persistentChunkSize {
- return true
- }
- chunk = *(*uintptr)(unsafe.Pointer(chunk))
- }
- return false
-}
-
-// linearAlloc is a simple linear allocator that pre-reserves a region
-// of memory and then optionally maps that region into the Ready state
-// as needed.
-//
-// The caller is responsible for locking.
-type linearAlloc struct {
- next uintptr // next free byte
- mapped uintptr // one byte past end of mapped space
- end uintptr // end of reserved space
-
- mapMemory bool // transition memory from Reserved to Ready if true
-}
-
-func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
- if base+size < base {
- // Chop off the last byte. The runtime isn't prepared
- // to deal with situations where the bounds could overflow.
- // Leave that memory reserved, though, so we don't map it
- // later.
- size -= 1
- }
- l.next, l.mapped = base, base
- l.end = base + size
- l.mapMemory = mapMemory
-}
-
-func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
- p := alignUp(l.next, align)
- if p+size > l.end {
- return nil
- }
- l.next = p + size
- if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
- if l.mapMemory {
- // Transition from Reserved to Prepared to Ready.
- sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
- sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
- }
- l.mapped = pEnd
- }
- return unsafe.Pointer(p)
-}
-
-// notInHeap is off-heap memory allocated by a lower-level allocator
-// like sysAlloc or persistentAlloc.
-//
-// In general, it's better to use real types marked as go:notinheap,
-// but this serves as a generic type for situations where that isn't
-// possible (like in the allocators).
-//
-// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
-//
-//go:notinheap
-type notInHeap struct{}
-
-func (p *notInHeap) add(bytes uintptr) *notInHeap {
- return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
-}
-
-// computeRZlog computes the size of the redzone.
-// Refer to the implementation of the compiler-rt.
-func computeRZlog(userSize uintptr) uintptr {
- switch {
- case userSize <= (64 - 16):
- return 16 << 0
- case userSize <= (128 - 32):
- return 16 << 1
- case userSize <= (512 - 64):
- return 16 << 2
- case userSize <= (4096 - 128):
- return 16 << 3
- case userSize <= (1<<14)-256:
- return 16 << 4
- case userSize <= (1<<15)-512:
- return 16 << 5
- case userSize <= (1<<16)-1024:
- return 16 << 6
- default:
- return 16 << 7
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/map.go b/contrib/go/_std_1.18/src/runtime/map.go
deleted file mode 100644
index e91b25eaec..0000000000
--- a/contrib/go/_std_1.18/src/runtime/map.go
+++ /dev/null
@@ -1,1416 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/elem pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and elems)
-// loadFactor %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/elem pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
-import (
- "internal/abi"
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/math"
- "unsafe"
-)
-
-const (
- // Maximum number of key/elem pairs a bucket can hold.
- bucketCntBits = 3
- bucketCnt = 1 << bucketCntBits
-
- // Maximum average load of a bucket that triggers growth is 6.5.
- // Represent as loadFactorNum/loadFactorDen, to allow integer math.
- loadFactorNum = 13
- loadFactorDen = 2
-
- // Maximum key or elem size to keep inline (instead of mallocing per element).
- // Must fit in a uint8.
- // Fast versions cannot handle big elems - the cutoff size for
- // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
- maxKeySize = 128
- maxElemSize = 128
-
- // data offset should be the size of the bmap struct, but needs to be
- // aligned correctly. For amd64p32 this means 64-bit alignment
- // even though pointers are 32 bit.
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
-
- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
- emptyOne = 1 // this cell is empty
- evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
- minTopHash = 5 // minimum tophash for a normal filled cell.
-
- // flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
- hashWriting = 4 // a goroutine is writing to the map
- sameSizeGrow = 8 // the current map growth is to a new map of the same size
-
- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*goarch.PtrSize) - 1
-)
-
-// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
-func isEmpty(x uint8) bool {
- return x <= emptyOne
-}
-
-// A header for a Go map.
-type hmap struct {
- // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
- // Make sure this stays in sync with the compiler's definition.
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint8
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
- hash0 uint32 // hash seed
-
- buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
-
- extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
- // If both key and elem do not contain pointers and are inline, then we mark bucket
- // type as containing no pointers. This avoids scanning such maps.
- // However, bmap.overflow is a pointer. In order to keep overflow buckets
- // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
- // overflow and oldoverflow are only used if key and elem do not contain pointers.
- // overflow contains overflow buckets for hmap.buckets.
- // oldoverflow contains overflow buckets for hmap.oldbuckets.
- // The indirection allows to store a pointer to the slice in hiter.
- overflow *[]*bmap
- oldoverflow *[]*bmap
-
- // nextOverflow holds a pointer to a free overflow bucket.
- nextOverflow *bmap
-}
-
-// A bucket for a Go map.
-type bmap struct {
- // tophash generally contains the top byte of the hash value
- // for each key in this bucket. If tophash[0] < minTopHash,
- // tophash[0] is a bucket evacuation state instead.
- tophash [bucketCnt]uint8
- // Followed by bucketCnt keys and then bucketCnt elems.
- // NOTE: packing all the keys together and then all the elems together makes the
- // code a bit more complicated than alternating key/elem/key/elem/... but it allows
- // us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
-// and reflect/value.go to match the layout of this structure.
-type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
- elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
- t *maptype
- h *hmap
- buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
- bptr *bmap // current bucket
- overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
- oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
- startBucket uintptr // bucket iteration started at
- offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
- wrapped bool // already wrapped around from end of bucket array to beginning
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
-}
-
-// bucketShift returns 1<<b, optimized for code generation.
-func bucketShift(b uint8) uintptr {
- // Masking the shift amount allows overflow checks to be elided.
- return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
-}
-
-// bucketMask returns 1<<b - 1, optimized for code generation.
-func bucketMask(b uint8) uintptr {
- return bucketShift(b) - 1
-}
-
-// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (goarch.PtrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- return top
-}
-
-func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > emptyOne && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
-}
-
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
- return add(unsafe.Pointer(b), dataOffset)
-}
-
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
- // We trigger same-size map growth if there are
- // as many overflow buckets as buckets.
- // We need to be able to count to 1<<h.B.
- if h.B < 16 {
- h.noverflow++
- return
- }
- // Increment with probability 1/(1<<(h.B-15)).
- // When we reach 1<<15 - 1, we will have approximately
- // as many overflow buckets as buckets.
- mask := uint32(1)<<(h.B-15) - 1
- // Example: if h.B == 18, then mask == 7,
- // and fastrand & 7 == 0 with probability 1/8.
- if fastrand()&mask == 0 {
- h.noverflow++
- }
-}
-
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
- var ovf *bmap
- if h.extra != nil && h.extra.nextOverflow != nil {
- // We have preallocated overflow buckets available.
- // See makeBucketArray for more details.
- ovf = h.extra.nextOverflow
- if ovf.overflow(t) == nil {
- // We're not at the end of the preallocated overflow buckets. Bump the pointer.
- h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
- } else {
- // This is the last preallocated overflow bucket.
- // Reset the overflow pointer on this bucket,
- // which was set to a non-nil sentinel value.
- ovf.setoverflow(t, nil)
- h.extra.nextOverflow = nil
- }
- } else {
- ovf = (*bmap)(newobject(t.bucket))
- }
- h.incrnoverflow()
- if t.bucket.ptrdata == 0 {
- h.createOverflow()
- *h.extra.overflow = append(*h.extra.overflow, ovf)
- }
- b.setoverflow(t, ovf)
- return ovf
-}
-
-func (h *hmap) createOverflow() {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- if h.extra.overflow == nil {
- h.extra.overflow = new([]*bmap)
- }
-}
-
-func makemap64(t *maptype, hint int64, h *hmap) *hmap {
- if int64(int(hint)) != hint {
- hint = 0
- }
- return makemap(t, int(hint), h)
-}
-
-// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-func makemap_small() *hmap {
- h := new(hmap)
- h.hash0 = fastrand()
- return h
-}
-
-// makemap implements Go map creation for make(map[k]v, hint).
-// If the compiler has determined that the map or the first bucket
-// can be created on the stack, h and/or bucket may be non-nil.
-// If h != nil, the map can be created directly in h.
-// If h.buckets != nil, bucket pointed to can be used as the first bucket.
-func makemap(t *maptype, hint int, h *hmap) *hmap {
- mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
- if overflow || mem > maxAlloc {
- hint = 0
- }
-
- // initialize Hmap
- if h == nil {
- h = new(hmap)
- }
- h.hash0 = fastrand()
-
- // Find the size parameter B which will hold the requested # of elements.
- // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
- B := uint8(0)
- for overLoadFactor(hint, B) {
- B++
- }
- h.B = B
-
- // allocate initial hash table
- // if B == 0, the buckets field is allocated lazily later (in mapassign)
- // If hint is large zeroing this memory could take a while.
- if h.B != 0 {
- var nextOverflow *bmap
- h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
- if nextOverflow != nil {
- h.extra = new(mapextra)
- h.extra.nextOverflow = nextOverflow
- }
- }
-
- return h
-}
-
-// makeBucketArray initializes a backing array for map buckets.
-// 1<<b is the minimum number of buckets to allocate.
-// dirtyalloc should either be nil or a bucket array previously
-// allocated by makeBucketArray with the same t and b parameters.
-// If dirtyalloc is nil a new backing array will be alloced and
-// otherwise dirtyalloc will be cleared and reused as backing array.
-func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
- base := bucketShift(b)
- nbuckets := base
- // For small b, overflow buckets are unlikely.
- // Avoid the overhead of the calculation.
- if b >= 4 {
- // Add on the estimated number of overflow buckets
- // required to insert the median number of elements
- // used with this value of b.
- nbuckets += bucketShift(b - 4)
- sz := t.bucket.size * nbuckets
- up := roundupsize(sz)
- if up != sz {
- nbuckets = up / t.bucket.size
- }
- }
-
- if dirtyalloc == nil {
- buckets = newarray(t.bucket, int(nbuckets))
- } else {
- // dirtyalloc was previously generated by
- // the above newarray(t.bucket, int(nbuckets))
- // but may not be empty.
- buckets = dirtyalloc
- size := t.bucket.size * nbuckets
- if t.bucket.ptrdata != 0 {
- memclrHasPointers(buckets, size)
- } else {
- memclrNoHeapPointers(buckets, size)
- }
- }
-
- if base != nbuckets {
- // We preallocated some overflow buckets.
- // To keep the overhead of tracking these overflow buckets to a minimum,
- // we use the convention that if a preallocated overflow bucket's overflow
- // pointer is nil, then there are more available by bumping the pointer.
- // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
- nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
- last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
- last.setoverflow(t, (*bmap)(buckets))
- }
- return buckets, nextOverflow
-}
-
-// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
-// it will return a reference to the zero object for the elem type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.key.size)
- }
- if asanenabled && h != nil {
- asanread(key, t.key.size)
- }
- if h == nil || h.count == 0 {
- if t.hashMightPanic() {
- t.hasher(key, 0) // see issue 23734
- }
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- hash := t.hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.key.equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- if t.indirectelem() {
- e = *((*unsafe.Pointer)(e))
- }
- return e
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.key.size)
- }
- if asanenabled && h != nil {
- asanread(key, t.key.size)
- }
- if h == nil || h.count == 0 {
- if t.hashMightPanic() {
- t.hasher(key, 0) // see issue 23734
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- hash := t.hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.key.equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- if t.indirectelem() {
- e = *((*unsafe.Pointer)(e))
- }
- return e, true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// returns both key and elem. Used by map iterator
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- hash := t.hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.key.equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- if t.indirectelem() {
- e = *((*unsafe.Pointer)(e))
- }
- return k, e
- }
- }
- }
- return nil, nil
-}
-
-func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
- e := mapaccess1(t, h, key)
- if e == unsafe.Pointer(&zeroVal[0]) {
- return zero
- }
- return e
-}
-
-func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
- e := mapaccess1(t, h, key)
- if e == unsafe.Pointer(&zeroVal[0]) {
- return zero, false
- }
- return e, true
-}
-
-// Like mapaccess, but allocates a slot for the key if it is not present in the map.
-func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(mapassign)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled {
- msanread(key, t.key.size)
- }
- if asanenabled {
- asanread(key, t.key.size)
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- hash := t.hasher(key, uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher, since t.hasher may panic,
- // in which case we have not actually done a write.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- top := tophash(hash)
-
- var inserti *uint8
- var insertk unsafe.Pointer
- var elem unsafe.Pointer
-bucketloop:
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey() {
- k = *((*unsafe.Pointer)(k))
- }
- if !t.key.equal(key, k) {
- continue
- }
- // already have a mapping for key. Update it.
- if t.needkeyupdate() {
- typedmemmove(t.key, k, key)
- }
- elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if inserti == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- newb := h.newoverflow(t, b)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- elem = add(insertk, bucketCnt*uintptr(t.keysize))
- }
-
- // store new key/elem at insert position
- if t.indirectkey() {
- kmem := newobject(t.key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.indirectelem() {
- vmem := newobject(t.elem)
- *(*unsafe.Pointer)(elem) = vmem
- }
- typedmemmove(t.key, insertk, key)
- *inserti = top
- h.count++
-
-done:
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- if t.indirectelem() {
- elem = *((*unsafe.Pointer)(elem))
- }
- return elem
-}
-
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.key.size)
- }
- if asanenabled && h != nil {
- asanread(key, t.key.size)
- }
- if h == nil || h.count == 0 {
- if t.hashMightPanic() {
- t.hasher(key, 0) // see issue 23734
- }
- return
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
-
- hash := t.hasher(key, uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher, since t.hasher may panic,
- // in which case we have not actually done a write (delete).
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
- top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break search
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey() {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !t.key.equal(key, k2) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.indirectkey() {
- *(*unsafe.Pointer)(k) = nil
- } else if t.key.ptrdata != 0 {
- memclrHasPointers(k, t.key.size)
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- if t.indirectelem() {
- *(*unsafe.Pointer)(e) = nil
- } else if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- // It would be nice to make this a separate function, but
- // for loops are not currently inlineable.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-// mapiterinit initializes the hiter struct used for ranging over maps.
-// The hiter struct pointed to by 'it' is allocated on the stack
-// by the compilers order pass or on the heap by reflect_mapiterinit.
-// Both need to have zeroed hiter since the struct contains pointers.
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
- }
-
- it.t = t
- if h == nil || h.count == 0 {
- return
- }
-
- if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
- throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
- }
- it.h = h
-
- // grab snapshot of bucket state
- it.B = h.B
- it.buckets = h.buckets
- if t.bucket.ptrdata == 0 {
- // Allocate the current slice and remember pointers to both current and old.
- // This preserves all relevant overflow buckets alive even if
- // the table grows and/or overflow buckets are added to the table
- // while we are iterating.
- h.createOverflow()
- it.overflow = h.extra.overflow
- it.oldoverflow = h.extra.oldoverflow
- }
-
- // decide where to start
- r := uintptr(fastrand())
- if h.B > 31-bucketCntBits {
- r += uintptr(fastrand()) << 31
- }
- it.startBucket = r & bucketMask(h.B)
- it.offset = uint8(r >> h.B & (bucketCnt - 1))
-
- // iterator state
- it.bucket = it.startBucket
-
- // Remember we have an iterator.
- // Can run concurrently with another mapiterinit().
- if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
- atomic.Or8(&h.flags, iterator|oldIterator)
- }
-
- mapiternext(it)
-}
-
-func mapiternext(it *hiter) {
- h := it.h
- if raceenabled {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map iteration and map write")
- }
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
-
-next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.elem = nil
- return
- }
- if h.growing() && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & it.h.oldbucketmask()
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == bucketShift(it.B) {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < bucketCnt; i++ {
- offi := (i + it.offset) & (bucketCnt - 1)
- if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
- // TODO: emptyRest is hard to use here, as we start iterating
- // in the middle of a bucket. It's feasible, just tricky.
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- if t.indirectkey() {
- k = *((*unsafe.Pointer)(k))
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
- if checkBucket != noCheck && !h.sameSizeGrow() {
- // Special case: iterator was started during a grow to a larger size
- // and the grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- if t.reflexivekey() || t.key.equal(k, k) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&bucketMask(it.B) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey() || t.key.equal(k, k)) {
- // This is the golden data, we can return it.
- // OR
- // key!=key, so the entry can't be deleted or updated, so we can just return it.
- // That's lucky for us because when key!=key we can't look it up successfully.
- it.key = k
- if t.indirectelem() {
- e = *((*unsafe.Pointer)(e))
- }
- it.elem = e
- } else {
- // The hash table has grown since the iterator was started.
- // The golden data for this key is now somewhere else.
- // Check the current hash table for the data.
- // This code handles the case where the key
- // has been deleted, updated, or deleted and reinserted.
- // NOTE: we need to regrab the key as it has potentially been
- // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
- rk, re := mapaccessK(t, h, k)
- if rk == nil {
- continue // key has been deleted
- }
- it.key = rk
- it.elem = re
- }
- it.bucket = bucket
- if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
- it.bptr = b
- }
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- b = b.overflow(t)
- i = 0
- goto next
-}
-
-// mapclear deletes all keys from a map.
-func mapclear(t *maptype, h *hmap) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(mapclear)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- }
-
- if h == nil || h.count == 0 {
- return
- }
-
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
-
- h.flags ^= hashWriting
-
- h.flags &^= sameSizeGrow
- h.oldbuckets = nil
- h.nevacuate = 0
- h.noverflow = 0
- h.count = 0
-
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- h.hash0 = fastrand()
-
- // Keep the mapextra allocation but clear any extra information.
- if h.extra != nil {
- *h.extra = mapextra{}
- }
-
- // makeBucketArray clears the memory pointed to by h.buckets
- // and recovers any overflow buckets by generating them
- // as if h.buckets was newly alloced.
- _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
- if nextOverflow != nil {
- // If overflow buckets are created then h.extra
- // will have been allocated during initial bucket creation.
- h.extra.nextOverflow = nextOverflow
- }
-
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func hashGrow(t *maptype, h *hmap) {
- // If we've hit the load factor, get bigger.
- // Otherwise, there are too many overflow buckets,
- // so keep the same number of buckets and "grow" laterally.
- bigger := uint8(1)
- if !overLoadFactor(h.count+1, h.B) {
- bigger = 0
- h.flags |= sameSizeGrow
- }
- oldbuckets := h.buckets
- newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
-
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B += bigger
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
- h.noverflow = 0
-
- if h.extra != nil && h.extra.overflow != nil {
- // Promote current overflow buckets to the old generation.
- if h.extra.oldoverflow != nil {
- throw("oldoverflow is not nil")
- }
- h.extra.oldoverflow = h.extra.overflow
- h.extra.overflow = nil
- }
- if nextOverflow != nil {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- h.extra.nextOverflow = nextOverflow
- }
-
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
-}
-
-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
- return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
- // If the threshold is too low, we do extraneous work.
- // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
- // "too many" means (approximately) as many overflow buckets as regular buckets.
- // See incrnoverflow for more details.
- if B > 15 {
- B = 15
- }
- // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
- return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
- return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
- return h.flags&sameSizeGrow != 0
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
- oldB := h.B
- if !h.sameSizeGrow() {
- oldB--
- }
- return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
- return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate(t, h, h.nevacuate)
- }
-}
-
-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
- b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
- return evacuated(b)
-}
-
-// evacDst is an evacuation destination.
-type evacDst struct {
- b *bmap // current destination bucket
- i int // key/elem index into b
- k unsafe.Pointer // pointer to current key storage
- e unsafe.Pointer // pointer to current elem storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*uintptr(t.keysize))
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*uintptr(t.keysize))
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*uintptr(t.keysize))
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- k2 := k
- if t.indirectkey() {
- k2 = *((*unsafe.Pointer)(k2))
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- useY = top & 1
- top = tophash(hash)
- } else {
- if hash&newbit != 0 {
- useY = 1
- }
- }
- }
-
- if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
- throw("bad evacuatedN")
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.indirectkey() {
- *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
- } else {
- typedmemmove(t.key, dst.k, k) // copy elem
- }
- if t.indirectelem() {
- *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
- } else {
- typedmemmove(t.elem, dst.e, e)
- }
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, uintptr(t.keysize))
- dst.e = add(dst.e, uintptr(t.elemsize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
-
-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
- h.nevacuate++
- // Experiments suggest that 1024 is overkill by at least an order of magnitude.
- // Put it in there as a safeguard anyway, to ensure O(1) behavior.
- stop := h.nevacuate + 1024
- if stop > newbit {
- stop = newbit
- }
- for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
- h.nevacuate++
- }
- if h.nevacuate == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- // Can discard old overflow buckets as well.
- // If they are still referenced by an iterator,
- // then the iterator holds a pointers to the slice.
- if h.extra != nil {
- h.extra.oldoverflow = nil
- }
- h.flags &^= sameSizeGrow
- }
-}
-
-// Reflect stubs. Called from ../reflect/asm_*.s
-
-//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype, cap int) *hmap {
- // Check invariants and reflects math.
- if t.key.equal == nil {
- throw("runtime.reflect_makemap: unsupported map key type")
- }
- if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
- throw("key size wrong")
- }
- if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
- t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
- throw("elem size wrong")
- }
- if t.key.align > bucketCnt {
- throw("key align too big")
- }
- if t.elem.align > bucketCnt {
- throw("elem align too big")
- }
- if t.key.size%uintptr(t.key.align) != 0 {
- throw("key size not a multiple of key align")
- }
- if t.elem.size%uintptr(t.elem.align) != 0 {
- throw("elem size not a multiple of elem align")
- }
- if bucketCnt < 8 {
- throw("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.key.align) != 0 {
- throw("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.elem.align) != 0 {
- throw("need padding in bucket (elem)")
- }
-
- return makemap(t, cap, nil)
-}
-
-//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- elem, ok := mapaccess2(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- elem = nil
- }
- return elem
-}
-
-//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
- elem, ok := mapaccess2_faststr(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- elem = nil
- }
- return elem
-}
-
-//go:linkname reflect_mapassign reflect.mapassign
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
- p := mapassign(t, h, key)
- typedmemmove(t.elem, p, elem)
-}
-
-//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr
-func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
- p := mapassign_faststr(t, h, key)
- typedmemmove(t.elem, p, elem)
-}
-
-//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
-}
-
-//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
- mapdelete_faststr(t, h, key)
-}
-
-//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
- mapiterinit(t, h, it)
-}
-
-//go:linkname reflect_mapiternext reflect.mapiternext
-func reflect_mapiternext(it *hiter) {
- mapiternext(it)
-}
-
-//go:linkname reflect_mapiterkey reflect.mapiterkey
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
-}
-
-//go:linkname reflect_mapiterelem reflect.mapiterelem
-func reflect_mapiterelem(it *hiter) unsafe.Pointer {
- return it.elem
-}
-
-//go:linkname reflect_maplen reflect.maplen
-func reflect_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
- }
- return h.count
-}
-
-//go:linkname reflectlite_maplen internal/reflectlite.maplen
-func reflectlite_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
- }
- return h.count
-}
-
-const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
-var zeroVal [maxZero]byte
diff --git a/contrib/go/_std_1.18/src/runtime/map_fast32.go b/contrib/go/_std_1.18/src/runtime/map_fast32.go
deleted file mode 100644
index e80caeef55..0000000000
--- a/contrib/go/_std_1.18/src/runtime/map_fast32.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "unsafe"
-)
-
-func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
- if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
- if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- inserti = i
- insertb = b
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
- }
- inserti = i
- insertb = b
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
- // store new key at insert position
- *(*uint32)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- inserti = i
- insertb = b
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
- }
- inserti = i
- insertb = b
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
- // store new key at insert position
- *(*unsafe.Pointer)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
-
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapdelete
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
- if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
- continue
- }
- // Only clear key if there are pointers in it.
- // This can only happen if pointers are 32 bit
- // wide as 64 bit pointers do not fit into a 32 bit key.
- if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
- // The key must be a pointer as we checked pointers are
- // 32 bits wide and the key is 32 bits wide also.
- *(*unsafe.Pointer)(k) = nil
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
- if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_fast32(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_fast32(t, h, h.nevacuate)
- }
-}
-
-func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*4)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*4)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*4)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*4)
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
- // Write with a write barrier.
- *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
- } else {
- *(*uint32)(dst.k) = *(*uint32)(k)
- }
-
- typedmemmove(t.elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 4)
- dst.e = add(dst.e, uintptr(t.elemsize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/map_fast64.go b/contrib/go/_std_1.18/src/runtime/map_fast64.go
deleted file mode 100644
index 69d8872885..0000000000
--- a/contrib/go/_std_1.18/src/runtime/map_fast64.go
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "unsafe"
-)
-
-func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
- if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
- if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- insertb = b
- inserti = i
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
- // store new key at insert position
- *(*uint64)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- insertb = b
- inserti = i
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
- // store new key at insert position
- *(*unsafe.Pointer)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
-
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapdelete
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
- if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.key.ptrdata != 0 {
- if goarch.PtrSize == 8 {
- *(*unsafe.Pointer)(k) = nil
- } else {
- // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
- // Just call memclrHasPointers instead of trying to handle all cases here.
- memclrHasPointers(k, 8)
- }
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
- if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_fast64(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_fast64(t, h, h.nevacuate)
- }
-}
-
-func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*8)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*8)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*8)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*8)
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- if t.key.ptrdata != 0 && writeBarrier.enabled {
- if goarch.PtrSize == 8 {
- // Write with a write barrier.
- *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
- } else {
- // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
- // Give up and call typedmemmove.
- typedmemmove(t.key, dst.k, k)
- }
- } else {
- *(*uint64)(dst.k) = *(*uint64)(k)
- }
-
- typedmemmove(t.elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 8)
- dst.e = add(dst.e, uintptr(t.elemsize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/map_faststr.go b/contrib/go/_std_1.18/src/runtime/map_faststr.go
deleted file mode 100644
index 4dca882c63..0000000000
--- a/contrib/go/_std_1.18/src/runtime/map_faststr.go
+++ /dev/null
@@ -1,485 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "unsafe"
-)
-
-func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- key := stringStructOf(&ky)
- if h.B == 0 {
- // One-bucket table.
- b := (*bmap)(h.buckets)
- if key.len < 32 {
- // short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
- }
- }
- return unsafe.Pointer(&zeroVal[0])
- }
- // long key, try not to do more comparisons than necessary
- keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
- }
- // check first 4 bytes
- if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
- continue
- }
- // check last 4 bytes
- if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
- continue
- }
- if keymaybe != bucketCnt {
- // Two keys are potential matches. Use hash to distinguish them.
- goto dohash
- }
- keymaybe = i
- }
- if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
- if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
- }
- }
- return unsafe.Pointer(&zeroVal[0])
- }
-dohash:
- hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- key := stringStructOf(&ky)
- if h.B == 0 {
- // One-bucket table.
- b := (*bmap)(h.buckets)
- if key.len < 32 {
- // short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
- // long key, try not to do more comparisons than necessary
- keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
- }
- // check first 4 bytes
- if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
- continue
- }
- // check last 4 bytes
- if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
- continue
- }
- if keymaybe != bucketCnt {
- // Two keys are potential matches. Use hash to distinguish them.
- goto dohash
- }
- keymaybe = i
- }
- if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
- if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
-dohash:
- hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- key := stringStructOf(&s)
- hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_faststr(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- top := tophash(hash)
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
- if k.len != key.len {
- continue
- }
- if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
- continue
- }
- // already have a mapping for key. Update it.
- inserti = i
- insertb = b
- // Overwrite existing key, so it can be garbage collected.
- // The size is already guaranteed to be set correctly.
- k.str = key.str
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
- // store new key at insert position
- *((*stringStruct)(insertk)) = *key
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapdelete_faststr(t *maptype, h *hmap, ky string) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
-
- key := stringStructOf(&ky)
- hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapdelete
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_faststr(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
- top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
- continue
- }
- // Clear key's pointer.
- k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
- if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_faststr(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_faststr(t, h, h.nevacuate)
- }
-}
-
-func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*2*goarch.PtrSize)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- *(*string)(dst.k) = *(*string)(k)
-
- typedmemmove(t.elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 2*goarch.PtrSize)
- dst.e = add(dst.e, uintptr(t.elemsize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mbarrier.go b/contrib/go/_std_1.18/src/runtime/mbarrier.go
deleted file mode 100644
index 465c21f83f..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mbarrier.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector: write barriers.
-//
-// For the concurrent garbage collector, the Go compiler implements
-// updates to pointer-valued fields that may be in heap objects by
-// emitting calls to write barriers. The main write barrier for
-// individual pointer writes is gcWriteBarrier and is implemented in
-// assembly. This file contains write barrier entry points for bulk
-// operations. See also mwbbuf.go.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "unsafe"
-)
-
-// Go uses a hybrid barrier that combines a Yuasa-style deletion
-// barrier—which shades the object whose reference is being
-// overwritten—with Dijkstra insertion barrier—which shades the object
-// whose reference is being written. The insertion part of the barrier
-// is necessary while the calling goroutine's stack is grey. In
-// pseudocode, the barrier is:
-//
-// writePointer(slot, ptr):
-// shade(*slot)
-// if current stack is grey:
-// shade(ptr)
-// *slot = ptr
-//
-// slot is the destination in Go code.
-// ptr is the value that goes into the slot in Go code.
-//
-// Shade indicates that it has seen a white pointer by adding the referent
-// to wbuf as well as marking it.
-//
-// The two shades and the condition work together to prevent a mutator
-// from hiding an object from the garbage collector:
-//
-// 1. shade(*slot) prevents a mutator from hiding an object by moving
-// the sole pointer to it from the heap to its stack. If it attempts
-// to unlink an object from the heap, this will shade it.
-//
-// 2. shade(ptr) prevents a mutator from hiding an object by moving
-// the sole pointer to it from its stack into a black object in the
-// heap. If it attempts to install the pointer into a black object,
-// this will shade it.
-//
-// 3. Once a goroutine's stack is black, the shade(ptr) becomes
-// unnecessary. shade(ptr) prevents hiding an object by moving it from
-// the stack to the heap, but this requires first having a pointer
-// hidden on the stack. Immediately after a stack is scanned, it only
-// points to shaded objects, so it's not hiding anything, and the
-// shade(*slot) prevents it from hiding any other pointers on its
-// stack.
-//
-// For a detailed description of this barrier and proof of
-// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
-//
-//
-//
-// Dealing with memory ordering:
-//
-// Both the Yuasa and Dijkstra barriers can be made conditional on the
-// color of the object containing the slot. We chose not to make these
-// conditional because the cost of ensuring that the object holding
-// the slot doesn't concurrently change color without the mutator
-// noticing seems prohibitive.
-//
-// Consider the following example where the mutator writes into
-// a slot and then loads the slot's mark bit while the GC thread
-// writes to the slot's mark bit and then as part of scanning reads
-// the slot.
-//
-// Initially both [slot] and [slotmark] are 0 (nil)
-// Mutator thread GC thread
-// st [slot], ptr st [slotmark], 1
-//
-// ld r1, [slotmark] ld r2, [slot]
-//
-// Without an expensive memory barrier between the st and the ld, the final
-// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
-// example of what can happen when loads are allowed to be reordered with older
-// stores (avoiding such reorderings lies at the heart of the classic
-// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
-// barriers, which will slow down both the mutator and the GC, we always grey
-// the ptr object regardless of the slot's color.
-//
-// Another place where we intentionally omit memory barriers is when
-// accessing mheap_.arena_used to check if a pointer points into the
-// heap. On relaxed memory machines, it's possible for a mutator to
-// extend the size of the heap by updating arena_used, allocate an
-// object from this new region, and publish a pointer to that object,
-// but for tracing running on another processor to observe the pointer
-// but use the old value of arena_used. In this case, tracing will not
-// mark the object, even though it's reachable. However, the mutator
-// is guaranteed to execute a write barrier when it publishes the
-// pointer, so it will take care of marking the object. A general
-// consequence of this is that the garbage collector may cache the
-// value of mheap_.arena_used. (See issue #9984.)
-//
-//
-// Stack writes:
-//
-// The compiler omits write barriers for writes to the current frame,
-// but if a stack pointer has been passed down the call stack, the
-// compiler will generate a write barrier for writes through that
-// pointer (because it doesn't know it's not a heap pointer).
-//
-// One might be tempted to ignore the write barrier if slot points
-// into to the stack. Don't do it! Mark termination only re-scans
-// frames that have potentially been active since the concurrent scan,
-// so it depends on write barriers to track changes to pointers in
-// stack frames that have not been active.
-//
-//
-// Global writes:
-//
-// The Go garbage collector requires write barriers when heap pointers
-// are stored in globals. Many garbage collectors ignore writes to
-// globals and instead pick up global -> heap pointers during
-// termination. This increases pause time, so we instead rely on write
-// barriers for writes to globals so that we don't have to rescan
-// global during mark termination.
-//
-//
-// Publication ordering:
-//
-// The write barrier is *pre-publication*, meaning that the write
-// barrier happens prior to the *slot = ptr write that may make ptr
-// reachable by some goroutine that currently cannot reach it.
-//
-//
-// Signal handler pointer writes:
-//
-// In general, the signal handler cannot safely invoke the write
-// barrier because it may run without a P or even during the write
-// barrier.
-//
-// There is exactly one exception: profbuf.go omits a barrier during
-// signal handler profile logging. That's safe only because of the
-// deletion barrier. See profbuf.go for a detailed argument. If we
-// remove the deletion barrier, we'll have to work out a new way to
-// handle the profile logging.
-
-// typedmemmove copies a value of type t to dst from src.
-// Must be nosplit, see #16026.
-//
-// TODO: Perfect for go:nosplitrec since we can't have a safe point
-// anywhere in the bulk barrier or memmove.
-//
-//go:nosplit
-func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
- if dst == src {
- return
- }
- if writeBarrier.needed && typ.ptrdata != 0 {
- bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata)
- }
- // There's a race here: if some other goroutine can write to
- // src, it may change some pointer in src after we've
- // performed the write barrier but before we perform the
- // memory copy. This safe because the write performed by that
- // other goroutine must also be accompanied by a write
- // barrier, so at worst we've unnecessarily greyed the old
- // pointer that was in src.
- memmove(dst, src, typ.size)
- if writeBarrier.cgo {
- cgoCheckMemmove(typ, dst, src, 0, typ.size)
- }
-}
-
-//go:linkname reflect_typedmemmove reflect.typedmemmove
-func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
- if raceenabled {
- raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
- raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
- }
- if msanenabled {
- msanwrite(dst, typ.size)
- msanread(src, typ.size)
- }
- if asanenabled {
- asanwrite(dst, typ.size)
- asanread(src, typ.size)
- }
- typedmemmove(typ, dst, src)
-}
-
-//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
-func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
- reflect_typedmemmove(typ, dst, src)
-}
-
-// typedmemmovepartial is like typedmemmove but assumes that
-// dst and src point off bytes into the value and only copies size bytes.
-// off must be a multiple of goarch.PtrSize.
-//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
-func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {
- if off&(goarch.PtrSize-1) != 0 {
- panic("reflect: internal error: misaligned offset")
- }
- pwsize := alignDown(size, goarch.PtrSize)
- if poff := typ.ptrdata - off; pwsize > poff {
- pwsize = poff
- }
- bulkBarrierPreWrite(uintptr(dst), uintptr(src), pwsize)
- }
-
- memmove(dst, src, size)
- if writeBarrier.cgo {
- cgoCheckMemmove(typ, dst, src, off, size)
- }
-}
-
-// reflectcallmove is invoked by reflectcall to copy the return values
-// out of the stack and into the heap, invoking the necessary write
-// barriers. dst, src, and size describe the return value area to
-// copy. typ describes the entire frame (not just the return values).
-// typ may be nil, which indicates write barriers are not needed.
-//
-// It must be nosplit and must only call nosplit functions because the
-// stack map of reflectcall is wrong.
-//
-//go:nosplit
-func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
- if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= goarch.PtrSize {
- bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
- }
- memmove(dst, src, size)
-
- // Move pointers returned in registers to a place where the GC can see them.
- for i := range regs.Ints {
- if regs.ReturnIsPtr.Get(i) {
- regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
- }
- }
-}
-
-//go:nosplit
-func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
- n := dstLen
- if n > srcLen {
- n = srcLen
- }
- if n == 0 {
- return 0
- }
-
- // The compiler emits calls to typedslicecopy before
- // instrumentation runs, so unlike the other copying and
- // assignment operations, it's not instrumented in the calling
- // code and needs its own instrumentation.
- if raceenabled {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(slicecopy)
- racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
- racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
- }
- if msanenabled {
- msanwrite(dstPtr, uintptr(n)*typ.size)
- msanread(srcPtr, uintptr(n)*typ.size)
- }
- if asanenabled {
- asanwrite(dstPtr, uintptr(n)*typ.size)
- asanread(srcPtr, uintptr(n)*typ.size)
- }
-
- if writeBarrier.cgo {
- cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
- }
-
- if dstPtr == srcPtr {
- return n
- }
-
- // Note: No point in checking typ.ptrdata here:
- // compiler only emits calls to typedslicecopy for types with pointers,
- // and growslice and reflect_typedslicecopy check for pointers
- // before calling typedslicecopy.
- size := uintptr(n) * typ.size
- if writeBarrier.needed {
- pwsize := size - typ.size + typ.ptrdata
- bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
- }
- // See typedmemmove for a discussion of the race between the
- // barrier and memmove.
- memmove(dstPtr, srcPtr, size)
- return n
-}
-
-//go:linkname reflect_typedslicecopy reflect.typedslicecopy
-func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
- if elemType.ptrdata == 0 {
- return slicecopy(dst.array, dst.len, src.array, src.len, elemType.size)
- }
- return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
-}
-
-// typedmemclr clears the typed memory at ptr with type typ. The
-// memory at ptr must already be initialized (and hence in type-safe
-// state). If the memory is being initialized for the first time, see
-// memclrNoHeapPointers.
-//
-// If the caller knows that typ has pointers, it can alternatively
-// call memclrHasPointers.
-//
-//go:nosplit
-func typedmemclr(typ *_type, ptr unsafe.Pointer) {
- if writeBarrier.needed && typ.ptrdata != 0 {
- bulkBarrierPreWrite(uintptr(ptr), 0, typ.ptrdata)
- }
- memclrNoHeapPointers(ptr, typ.size)
-}
-
-//go:linkname reflect_typedmemclr reflect.typedmemclr
-func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
- typedmemclr(typ, ptr)
-}
-
-//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
-func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.ptrdata != 0 {
- bulkBarrierPreWrite(uintptr(ptr), 0, size)
- }
- memclrNoHeapPointers(ptr, size)
-}
-
-// memclrHasPointers clears n bytes of typed memory starting at ptr.
-// The caller must ensure that the type of the object at ptr has
-// pointers, usually by checking typ.ptrdata. However, ptr
-// does not have to point to the start of the allocation.
-//
-//go:nosplit
-func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
- bulkBarrierPreWrite(uintptr(ptr), 0, n)
- memclrNoHeapPointers(ptr, n)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mbitmap.go b/contrib/go/_std_1.18/src/runtime/mbitmap.go
deleted file mode 100644
index 937968807b..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mbitmap.go
+++ /dev/null
@@ -1,2043 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector: type and heap bitmaps.
-//
-// Stack, data, and bss bitmaps
-//
-// Stack frames and global variables in the data and bss sections are
-// described by bitmaps with 1 bit per pointer-sized word. A "1" bit
-// means the word is a live pointer to be visited by the GC (referred to
-// as "pointer"). A "0" bit means the word should be ignored by GC
-// (referred to as "scalar", though it could be a dead pointer value).
-//
-// Heap bitmap
-//
-// The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
-// stored in the heapArena metadata backing each heap arena.
-// That is, if ha is the heapArena for the arena starting a start,
-// then ha.bitmap[0] holds the 2-bit entries for the four words start
-// through start+3*ptrSize, ha.bitmap[1] holds the entries for
-// start+4*ptrSize through start+7*ptrSize, and so on.
-//
-// In each 2-bit entry, the lower bit is a pointer/scalar bit, just
-// like in the stack/data bitmaps described above. The upper bit
-// indicates scan/dead: a "1" value ("scan") indicates that there may
-// be pointers in later words of the allocation, and a "0" value
-// ("dead") indicates there are no more pointers in the allocation. If
-// the upper bit is 0, the lower bit must also be 0, and this
-// indicates scanning can ignore the rest of the allocation.
-//
-// The 2-bit entries are split when written into the byte, so that the top half
-// of the byte contains 4 high (scan) bits and the bottom half contains 4 low
-// (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to
-// keep the pointer bits contiguous, instead of having to space them out.
-//
-// The code makes use of the fact that the zero value for a heap
-// bitmap means scalar/dead. This property must be preserved when
-// modifying the encoding.
-//
-// The bitmap for noscan spans is not maintained. Code must ensure
-// that an object is scannable before consulting its bitmap by
-// checking either the noscan bit in the span or by consulting its
-// type's information.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const (
- bitPointer = 1 << 0
- bitScan = 1 << 4
-
- heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
- wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
-
- // all scan/pointer bits in a byte
- bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
- bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
-)
-
-// addb returns the byte pointer p+n.
-//go:nowritebarrier
-//go:nosplit
-func addb(p *byte, n uintptr) *byte {
- // Note: wrote out full expression instead of calling add(p, n)
- // to reduce the number of temporaries generated by the
- // compiler for this trivial expression during inlining.
- return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
-}
-
-// subtractb returns the byte pointer p-n.
-//go:nowritebarrier
-//go:nosplit
-func subtractb(p *byte, n uintptr) *byte {
- // Note: wrote out full expression instead of calling add(p, -n)
- // to reduce the number of temporaries generated by the
- // compiler for this trivial expression during inlining.
- return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
-}
-
-// add1 returns the byte pointer p+1.
-//go:nowritebarrier
-//go:nosplit
-func add1(p *byte) *byte {
- // Note: wrote out full expression instead of calling addb(p, 1)
- // to reduce the number of temporaries generated by the
- // compiler for this trivial expression during inlining.
- return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
-}
-
-// subtract1 returns the byte pointer p-1.
-//go:nowritebarrier
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//go:nosplit
-func subtract1(p *byte) *byte {
- // Note: wrote out full expression instead of calling subtractb(p, 1)
- // to reduce the number of temporaries generated by the
- // compiler for this trivial expression during inlining.
- return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
-}
-
-// heapBits provides access to the bitmap bits for a single heap word.
-// The methods on heapBits take value receivers so that the compiler
-// can more easily inline calls to those methods and registerize the
-// struct fields independently.
-type heapBits struct {
- bitp *uint8
- shift uint32
- arena uint32 // Index of heap arena containing bitp
- last *uint8 // Last byte arena's bitmap
-}
-
-// Make the compiler check that heapBits.arena is large enough to hold
-// the maximum arena frame number.
-var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
-
-// markBits provides access to the mark bit for an object in the heap.
-// bytep points to the byte holding the mark bit.
-// mask is a byte with a single bit set that can be &ed with *bytep
-// to see if the bit has been set.
-// *m.byte&m.mask != 0 indicates the mark bit is set.
-// index can be used along with span information to generate
-// the address of the object in the heap.
-// We maintain one set of mark bits for allocation and one for
-// marking purposes.
-type markBits struct {
- bytep *uint8
- mask uint8
- index uintptr
-}
-
-//go:nosplit
-func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
- bytep, mask := s.allocBits.bitp(allocBitIndex)
- return markBits{bytep, mask, allocBitIndex}
-}
-
-// refillAllocCache takes 8 bytes s.allocBits starting at whichByte
-// and negates them so that ctz (count trailing zeros) instructions
-// can be used. It then places these 8 bytes into the cached 64 bit
-// s.allocCache.
-func (s *mspan) refillAllocCache(whichByte uintptr) {
- bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
- aCache := uint64(0)
- aCache |= uint64(bytes[0])
- aCache |= uint64(bytes[1]) << (1 * 8)
- aCache |= uint64(bytes[2]) << (2 * 8)
- aCache |= uint64(bytes[3]) << (3 * 8)
- aCache |= uint64(bytes[4]) << (4 * 8)
- aCache |= uint64(bytes[5]) << (5 * 8)
- aCache |= uint64(bytes[6]) << (6 * 8)
- aCache |= uint64(bytes[7]) << (7 * 8)
- s.allocCache = ^aCache
-}
-
-// nextFreeIndex returns the index of the next free object in s at
-// or after s.freeindex.
-// There are hardware instructions that can be used to make this
-// faster if profiling warrants it.
-func (s *mspan) nextFreeIndex() uintptr {
- sfreeindex := s.freeindex
- snelems := s.nelems
- if sfreeindex == snelems {
- return sfreeindex
- }
- if sfreeindex > snelems {
- throw("s.freeindex > s.nelems")
- }
-
- aCache := s.allocCache
-
- bitIndex := sys.Ctz64(aCache)
- for bitIndex == 64 {
- // Move index to start of next cached bits.
- sfreeindex = (sfreeindex + 64) &^ (64 - 1)
- if sfreeindex >= snelems {
- s.freeindex = snelems
- return snelems
- }
- whichByte := sfreeindex / 8
- // Refill s.allocCache with the next 64 alloc bits.
- s.refillAllocCache(whichByte)
- aCache = s.allocCache
- bitIndex = sys.Ctz64(aCache)
- // nothing available in cached bits
- // grab the next 8 bytes and try again.
- }
- result := sfreeindex + uintptr(bitIndex)
- if result >= snelems {
- s.freeindex = snelems
- return snelems
- }
-
- s.allocCache >>= uint(bitIndex + 1)
- sfreeindex = result + 1
-
- if sfreeindex%64 == 0 && sfreeindex != snelems {
- // We just incremented s.freeindex so it isn't 0.
- // As each 1 in s.allocCache was encountered and used for allocation
- // it was shifted away. At this point s.allocCache contains all 0s.
- // Refill s.allocCache so that it corresponds
- // to the bits at s.allocBits starting at s.freeindex.
- whichByte := sfreeindex / 8
- s.refillAllocCache(whichByte)
- }
- s.freeindex = sfreeindex
- return result
-}
-
-// isFree reports whether the index'th object in s is unallocated.
-//
-// The caller must ensure s.state is mSpanInUse, and there must have
-// been no preemption points since ensuring this (which could allow a
-// GC transition, which would allow the state to change).
-func (s *mspan) isFree(index uintptr) bool {
- if index < s.freeindex {
- return false
- }
- bytep, mask := s.allocBits.bitp(index)
- return *bytep&mask == 0
-}
-
-// divideByElemSize returns n/s.elemsize.
-// n must be within [0, s.npages*_PageSize),
-// or may be exactly s.npages*_PageSize
-// if s.elemsize is from sizeclasses.go.
-func (s *mspan) divideByElemSize(n uintptr) uintptr {
- const doubleCheck = false
-
- // See explanation in mksizeclasses.go's computeDivMagic.
- q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
-
- if doubleCheck && q != n/s.elemsize {
- println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
- throw("bad magic division")
- }
- return q
-}
-
-func (s *mspan) objIndex(p uintptr) uintptr {
- return s.divideByElemSize(p - s.base())
-}
-
-func markBitsForAddr(p uintptr) markBits {
- s := spanOf(p)
- objIndex := s.objIndex(p)
- return s.markBitsForIndex(objIndex)
-}
-
-func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
- bytep, mask := s.gcmarkBits.bitp(objIndex)
- return markBits{bytep, mask, objIndex}
-}
-
-func (s *mspan) markBitsForBase() markBits {
- return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
-}
-
-// isMarked reports whether mark bit m is set.
-func (m markBits) isMarked() bool {
- return *m.bytep&m.mask != 0
-}
-
-// setMarked sets the marked bit in the markbits, atomically.
-func (m markBits) setMarked() {
- // Might be racing with other updates, so use atomic update always.
- // We used to be clever here and use a non-atomic update in certain
- // cases, but it's not worth the risk.
- atomic.Or8(m.bytep, m.mask)
-}
-
-// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
-func (m markBits) setMarkedNonAtomic() {
- *m.bytep |= m.mask
-}
-
-// clearMarked clears the marked bit in the markbits, atomically.
-func (m markBits) clearMarked() {
- // Might be racing with other updates, so use atomic update always.
- // We used to be clever here and use a non-atomic update in certain
- // cases, but it's not worth the risk.
- atomic.And8(m.bytep, ^m.mask)
-}
-
-// markBitsForSpan returns the markBits for the span base address base.
-func markBitsForSpan(base uintptr) (mbits markBits) {
- mbits = markBitsForAddr(base)
- if mbits.mask != 1 {
- throw("markBitsForSpan: unaligned start")
- }
- return mbits
-}
-
-// advance advances the markBits to the next object in the span.
-func (m *markBits) advance() {
- if m.mask == 1<<7 {
- m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
- m.mask = 1
- } else {
- m.mask = m.mask << 1
- }
- m.index++
-}
-
-// heapBitsForAddr returns the heapBits for the address addr.
-// The caller must ensure addr is in an allocated span.
-// In particular, be careful not to point past the end of an object.
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//go:nosplit
-func heapBitsForAddr(addr uintptr) (h heapBits) {
- // 2 bits per word, 4 pairs per byte, and a mask is hard coded.
- arena := arenaIndex(addr)
- ha := mheap_.arenas[arena.l1()][arena.l2()]
- // The compiler uses a load for nil checking ha, but in this
- // case we'll almost never hit that cache line again, so it
- // makes more sense to do a value check.
- if ha == nil {
- // addr is not in the heap. Return nil heapBits, which
- // we expect to crash in the caller.
- return
- }
- h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
- h.shift = uint32((addr / goarch.PtrSize) & 3)
- h.arena = uint32(arena)
- h.last = &ha.bitmap[len(ha.bitmap)-1]
- return
-}
-
-// clobberdeadPtr is a special value that is used by the compiler to
-// clobber dead stack slots, when -clobberdead flag is set.
-const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
-
-// badPointer throws bad pointer in heap panic.
-func badPointer(s *mspan, p, refBase, refOff uintptr) {
- // Typically this indicates an incorrect use
- // of unsafe or cgo to store a bad pointer in
- // the Go heap. It may also indicate a runtime
- // bug.
- //
- // TODO(austin): We could be more aggressive
- // and detect pointers to unallocated objects
- // in allocated spans.
- printlock()
- print("runtime: pointer ", hex(p))
- if s != nil {
- state := s.state.get()
- if state != mSpanInUse {
- print(" to unallocated span")
- } else {
- print(" to unused region of span")
- }
- print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
- }
- print("\n")
- if refBase != 0 {
- print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
- gcDumpObject("object", refBase, refOff)
- }
- getg().m.traceback = 2
- throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
-}
-
-// findObject returns the base address for the heap object containing
-// the address p, the object's span, and the index of the object in s.
-// If p does not point into a heap object, it returns base == 0.
-//
-// If p points is an invalid heap pointer and debug.invalidptr != 0,
-// findObject panics.
-//
-// refBase and refOff optionally give the base address of the object
-// in which the pointer p was found and the byte offset at which it
-// was found. These are used for error reporting.
-//
-// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
-// Since p is a uintptr, it would not be adjusted if the stack were to move.
-//go:nosplit
-func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
- s = spanOf(p)
- // If s is nil, the virtual address has never been part of the heap.
- // This pointer may be to some mmap'd region, so we allow it.
- if s == nil {
- if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
- // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
- // as they are the only platform where compiler's clobberdead mode is
- // implemented. On these platforms clobberdeadPtr cannot be a valid address.
- badPointer(s, p, refBase, refOff)
- }
- return
- }
- // If p is a bad pointer, it may not be in s's bounds.
- //
- // Check s.state to synchronize with span initialization
- // before checking other fields. See also spanOfHeap.
- if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
- // Pointers into stacks are also ok, the runtime manages these explicitly.
- if state == mSpanManual {
- return
- }
- // The following ensures that we are rigorous about what data
- // structures hold valid pointers.
- if debug.invalidptr != 0 {
- badPointer(s, p, refBase, refOff)
- }
- return
- }
-
- objIndex = s.objIndex(p)
- base = s.base() + objIndex*s.elemsize
- return
-}
-
-// verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
-//go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
-func reflect_verifyNotInHeapPtr(p uintptr) bool {
- // Conversion to a pointer is ok as long as findObject above does not call badPointer.
- // Since we're already promised that p doesn't point into the heap, just disallow heap
- // pointers and the special clobbered pointer.
- return spanOf(p) == nil && p != clobberdeadPtr
-}
-
-// next returns the heapBits describing the next pointer-sized word in memory.
-// That is, if h describes address p, h.next() describes p+ptrSize.
-// Note that next does not modify h. The caller must record the result.
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//go:nosplit
-func (h heapBits) next() heapBits {
- if h.shift < 3*heapBitsShift {
- h.shift += heapBitsShift
- } else if h.bitp != h.last {
- h.bitp, h.shift = add1(h.bitp), 0
- } else {
- // Move to the next arena.
- return h.nextArena()
- }
- return h
-}
-
-// nextArena advances h to the beginning of the next heap arena.
-//
-// This is a slow-path helper to next. gc's inliner knows that
-// heapBits.next can be inlined even though it calls this. This is
-// marked noinline so it doesn't get inlined into next and cause next
-// to be too big to inline.
-//
-//go:nosplit
-//go:noinline
-func (h heapBits) nextArena() heapBits {
- h.arena++
- ai := arenaIdx(h.arena)
- l2 := mheap_.arenas[ai.l1()]
- if l2 == nil {
- // We just passed the end of the object, which
- // was also the end of the heap. Poison h. It
- // should never be dereferenced at this point.
- return heapBits{}
- }
- ha := l2[ai.l2()]
- if ha == nil {
- return heapBits{}
- }
- h.bitp, h.shift = &ha.bitmap[0], 0
- h.last = &ha.bitmap[len(ha.bitmap)-1]
- return h
-}
-
-// forward returns the heapBits describing n pointer-sized words ahead of h in memory.
-// That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
-// h.forward(1) is equivalent to h.next(), just slower.
-// Note that forward does not modify h. The caller must record the result.
-// bits returns the heap bits for the current word.
-//go:nosplit
-func (h heapBits) forward(n uintptr) heapBits {
- n += uintptr(h.shift) / heapBitsShift
- nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
- h.shift = uint32(n%4) * heapBitsShift
- if nbitp <= uintptr(unsafe.Pointer(h.last)) {
- h.bitp = (*uint8)(unsafe.Pointer(nbitp))
- return h
- }
-
- // We're in a new heap arena.
- past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
- h.arena += 1 + uint32(past/heapArenaBitmapBytes)
- ai := arenaIdx(h.arena)
- if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
- a := l2[ai.l2()]
- h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
- h.last = &a.bitmap[len(a.bitmap)-1]
- } else {
- h.bitp, h.last = nil, nil
- }
- return h
-}
-
-// forwardOrBoundary is like forward, but stops at boundaries between
-// contiguous sections of the bitmap. It returns the number of words
-// advanced over, which will be <= n.
-func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
- maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
- if n > maxn {
- n = maxn
- }
- return h.forward(n), n
-}
-
-// The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
-// The result includes in its higher bits the bits for subsequent words
-// described by the same bitmap byte.
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//go:nosplit
-func (h heapBits) bits() uint32 {
- // The (shift & 31) eliminates a test and conditional branch
- // from the generated code.
- return uint32(*h.bitp) >> (h.shift & 31)
-}
-
-// morePointers reports whether this word and all remaining words in this object
-// are scalars.
-// h must not describe the second word of the object.
-func (h heapBits) morePointers() bool {
- return h.bits()&bitScan != 0
-}
-
-// isPointer reports whether the heap bits describe a pointer word.
-//
-// nosplit because it is used during write barriers and must not be preempted.
-//go:nosplit
-func (h heapBits) isPointer() bool {
- return h.bits()&bitPointer != 0
-}
-
-// bulkBarrierPreWrite executes a write barrier
-// for every pointer slot in the memory range [src, src+size),
-// using pointer/scalar information from [dst, dst+size).
-// This executes the write barriers necessary before a memmove.
-// src, dst, and size must be pointer-aligned.
-// The range [dst, dst+size) must lie within a single object.
-// It does not perform the actual writes.
-//
-// As a special case, src == 0 indicates that this is being used for a
-// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
-// barrier.
-//
-// Callers should call bulkBarrierPreWrite immediately before
-// calling memmove(dst, src, size). This function is marked nosplit
-// to avoid being preempted; the GC must not stop the goroutine
-// between the memmove and the execution of the barriers.
-// The caller is also responsible for cgo pointer checks if this
-// may be writing Go pointers into non-Go memory.
-//
-// The pointer bitmap is not maintained for allocations containing
-// no pointers at all; any caller of bulkBarrierPreWrite must first
-// make sure the underlying allocation contains pointers, usually
-// by checking typ.ptrdata.
-//
-// Callers must perform cgo checks if writeBarrier.cgo.
-//
-//go:nosplit
-func bulkBarrierPreWrite(dst, src, size uintptr) {
- if (dst|src|size)&(goarch.PtrSize-1) != 0 {
- throw("bulkBarrierPreWrite: unaligned arguments")
- }
- if !writeBarrier.needed {
- return
- }
- if s := spanOf(dst); s == nil {
- // If dst is a global, use the data or BSS bitmaps to
- // execute write barriers.
- for _, datap := range activeModules() {
- if datap.data <= dst && dst < datap.edata {
- bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
- return
- }
- }
- for _, datap := range activeModules() {
- if datap.bss <= dst && dst < datap.ebss {
- bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
- return
- }
- }
- return
- } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
- // dst was heap memory at some point, but isn't now.
- // It can't be a global. It must be either our stack,
- // or in the case of direct channel sends, it could be
- // another stack. Either way, no need for barriers.
- // This will also catch if dst is in a freed span,
- // though that should never have.
- return
- }
-
- buf := &getg().m.p.ptr().wbBuf
- h := heapBitsForAddr(dst)
- if src == 0 {
- for i := uintptr(0); i < size; i += goarch.PtrSize {
- if h.isPointer() {
- dstx := (*uintptr)(unsafe.Pointer(dst + i))
- if !buf.putFast(*dstx, 0) {
- wbBufFlush(nil, 0)
- }
- }
- h = h.next()
- }
- } else {
- for i := uintptr(0); i < size; i += goarch.PtrSize {
- if h.isPointer() {
- dstx := (*uintptr)(unsafe.Pointer(dst + i))
- srcx := (*uintptr)(unsafe.Pointer(src + i))
- if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
- }
- }
- h = h.next()
- }
- }
-}
-
-// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
-// does not execute write barriers for [dst, dst+size).
-//
-// In addition to the requirements of bulkBarrierPreWrite
-// callers need to ensure [dst, dst+size) is zeroed.
-//
-// This is used for special cases where e.g. dst was just
-// created and zeroed with malloc.
-//go:nosplit
-func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
- if (dst|src|size)&(goarch.PtrSize-1) != 0 {
- throw("bulkBarrierPreWrite: unaligned arguments")
- }
- if !writeBarrier.needed {
- return
- }
- buf := &getg().m.p.ptr().wbBuf
- h := heapBitsForAddr(dst)
- for i := uintptr(0); i < size; i += goarch.PtrSize {
- if h.isPointer() {
- srcx := (*uintptr)(unsafe.Pointer(src + i))
- if !buf.putFast(0, *srcx) {
- wbBufFlush(nil, 0)
- }
- }
- h = h.next()
- }
-}
-
-// bulkBarrierBitmap executes write barriers for copying from [src,
-// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
-// assumed to start maskOffset bytes into the data covered by the
-// bitmap in bits (which may not be a multiple of 8).
-//
-// This is used by bulkBarrierPreWrite for writes to data and BSS.
-//
-//go:nosplit
-func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
- word := maskOffset / goarch.PtrSize
- bits = addb(bits, word/8)
- mask := uint8(1) << (word % 8)
-
- buf := &getg().m.p.ptr().wbBuf
- for i := uintptr(0); i < size; i += goarch.PtrSize {
- if mask == 0 {
- bits = addb(bits, 1)
- if *bits == 0 {
- // Skip 8 words.
- i += 7 * goarch.PtrSize
- continue
- }
- mask = 1
- }
- if *bits&mask != 0 {
- dstx := (*uintptr)(unsafe.Pointer(dst + i))
- if src == 0 {
- if !buf.putFast(*dstx, 0) {
- wbBufFlush(nil, 0)
- }
- } else {
- srcx := (*uintptr)(unsafe.Pointer(src + i))
- if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
- }
- }
- }
- mask <<= 1
- }
-}
-
-// typeBitsBulkBarrier executes a write barrier for every
-// pointer that would be copied from [src, src+size) to [dst,
-// dst+size) by a memmove using the type bitmap to locate those
-// pointer slots.
-//
-// The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
-// dst, src, and size must be pointer-aligned.
-// The type typ must have a plain bitmap, not a GC program.
-// The only use of this function is in channel sends, and the
-// 64 kB channel element limit takes care of this for us.
-//
-// Must not be preempted because it typically runs right before memmove,
-// and the GC must observe them as an atomic action.
-//
-// Callers must perform cgo checks if writeBarrier.cgo.
-//
-//go:nosplit
-func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
- if typ == nil {
- throw("runtime: typeBitsBulkBarrier without type")
- }
- if typ.size != size {
- println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
- throw("runtime: invalid typeBitsBulkBarrier")
- }
- if typ.kind&kindGCProg != 0 {
- println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
- throw("runtime: invalid typeBitsBulkBarrier")
- }
- if !writeBarrier.needed {
- return
- }
- ptrmask := typ.gcdata
- buf := &getg().m.p.ptr().wbBuf
- var bits uint32
- for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
- if i&(goarch.PtrSize*8-1) == 0 {
- bits = uint32(*ptrmask)
- ptrmask = addb(ptrmask, 1)
- } else {
- bits = bits >> 1
- }
- if bits&1 != 0 {
- dstx := (*uintptr)(unsafe.Pointer(dst + i))
- srcx := (*uintptr)(unsafe.Pointer(src + i))
- if !buf.putFast(*dstx, *srcx) {
- wbBufFlush(nil, 0)
- }
- }
- }
-}
-
-// The methods operating on spans all require that h has been returned
-// by heapBitsForSpan and that size, n, total are the span layout description
-// returned by the mspan's layout method.
-// If total > size*n, it means that there is extra leftover memory in the span,
-// usually due to rounding.
-//
-// TODO(rsc): Perhaps introduce a different heapBitsSpan type.
-
-// initSpan initializes the heap bitmap for a span.
-// If this is a span of pointer-sized objects, it initializes all
-// words to pointer/scan.
-// Otherwise, it initializes all words to scalar/dead.
-func (h heapBits) initSpan(s *mspan) {
- // Clear bits corresponding to objects.
- nw := (s.npages << _PageShift) / goarch.PtrSize
- if nw%wordsPerBitmapByte != 0 {
- throw("initSpan: unaligned length")
- }
- if h.shift != 0 {
- throw("initSpan: unaligned base")
- }
- isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
- for nw > 0 {
- hNext, anw := h.forwardOrBoundary(nw)
- nbyte := anw / wordsPerBitmapByte
- if isPtrs {
- bitp := h.bitp
- for i := uintptr(0); i < nbyte; i++ {
- *bitp = bitPointerAll | bitScanAll
- bitp = add1(bitp)
- }
- } else {
- memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
- }
- h = hNext
- nw -= anw
- }
-}
-
-// countAlloc returns the number of objects allocated in span s by
-// scanning the allocation bitmap.
-func (s *mspan) countAlloc() int {
- count := 0
- bytes := divRoundUp(s.nelems, 8)
- // Iterate over each 8-byte chunk and count allocations
- // with an intrinsic. Note that newMarkBits guarantees that
- // gcmarkBits will be 8-byte aligned, so we don't have to
- // worry about edge cases, irrelevant bits will simply be zero.
- for i := uintptr(0); i < bytes; i += 8 {
- // Extract 64 bits from the byte pointer and get a OnesCount.
- // Note that the unsafe cast here doesn't preserve endianness,
- // but that's OK. We only care about how many bits are 1, not
- // about the order we discover them in.
- mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
- count += sys.OnesCount64(mrkBits)
- }
- return count
-}
-
-// heapBitsSetType records that the new allocation [x, x+size)
-// holds in [x, x+dataSize) one or more values of type typ.
-// (The number of values is given by dataSize / typ.size.)
-// If dataSize < size, the fragment [x+dataSize, x+size) is
-// recorded as non-pointer data.
-// It is known that the type has pointers somewhere;
-// malloc does not call heapBitsSetType when there are no pointers,
-// because all free objects are marked as noscan during
-// heapBitsSweepSpan.
-//
-// There can only be one allocation from a given span active at a time,
-// and the bitmap for a span always falls on byte boundaries,
-// so there are no write-write races for access to the heap bitmap.
-// Hence, heapBitsSetType can access the bitmap without atomics.
-//
-// There can be read-write races between heapBitsSetType and things
-// that read the heap bitmap like scanobject. However, since
-// heapBitsSetType is only used for objects that have not yet been
-// made reachable, readers will ignore bits being modified by this
-// function. This does mean this function cannot transiently modify
-// bits that belong to neighboring objects. Also, on weakly-ordered
-// machines, callers must execute a store/store (publication) barrier
-// between calling this function and making the object reachable.
-func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
- const doubleCheck = false // slow but helpful; enable to test modifications to this code
-
- const (
- mask1 = bitPointer | bitScan // 00010001
- mask2 = bitPointer | bitScan | mask1<<heapBitsShift // 00110011
- mask3 = bitPointer | bitScan | mask2<<heapBitsShift // 01110111
- )
-
- // dataSize is always size rounded up to the next malloc size class,
- // except in the case of allocating a defer block, in which case
- // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
- // arbitrarily larger.
- //
- // The checks for size == goarch.PtrSize and size == 2*goarch.PtrSize can therefore
- // assume that dataSize == size without checking it explicitly.
-
- if goarch.PtrSize == 8 && size == goarch.PtrSize {
- // It's one word and it has pointers, it must be a pointer.
- // Since all allocated one-word objects are pointers
- // (non-pointers are aggregated into tinySize allocations),
- // initSpan sets the pointer bits for us. Nothing to do here.
- if doubleCheck {
- h := heapBitsForAddr(x)
- if !h.isPointer() {
- throw("heapBitsSetType: pointer bit missing")
- }
- if !h.morePointers() {
- throw("heapBitsSetType: scan bit missing")
- }
- }
- return
- }
-
- h := heapBitsForAddr(x)
- ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
-
- // 2-word objects only have 4 bitmap bits and 3-word objects only have 6 bitmap bits.
- // Therefore, these objects share a heap bitmap byte with the objects next to them.
- // These are called out as a special case primarily so the code below can assume all
- // objects are at least 4 words long and that their bitmaps start either at the beginning
- // of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
-
- if size == 2*goarch.PtrSize {
- if typ.size == goarch.PtrSize {
- // We're allocating a block big enough to hold two pointers.
- // On 64-bit, that means the actual object must be two pointers,
- // or else we'd have used the one-pointer-sized block.
- // On 32-bit, however, this is the 8-byte block, the smallest one.
- // So it could be that we're allocating one pointer and this was
- // just the smallest block available. Distinguish by checking dataSize.
- // (In general the number of instances of typ being allocated is
- // dataSize/typ.size.)
- if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
- // 1 pointer object. On 32-bit machines clear the bit for the
- // unused second word.
- *h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
- *h.bitp |= (bitPointer | bitScan) << h.shift
- } else {
- // 2-element array of pointer.
- *h.bitp |= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
- }
- return
- }
- // Otherwise typ.size must be 2*goarch.PtrSize,
- // and typ.kind&kindGCProg == 0.
- if doubleCheck {
- if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
- print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
- throw("heapBitsSetType")
- }
- }
- b := uint32(*ptrmask)
- hb := b & 3
- hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
- // Clear the bits for this object so we can set the
- // appropriate ones.
- *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
- *h.bitp |= uint8(hb << h.shift)
- return
- } else if size == 3*goarch.PtrSize {
- b := uint8(*ptrmask)
- if doubleCheck {
- if b == 0 {
- println("runtime: invalid type ", typ.string())
- throw("heapBitsSetType: called with non-pointer type")
- }
- if goarch.PtrSize != 8 {
- throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
- }
- if typ.kind&kindGCProg != 0 {
- throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
- }
- if typ.size == 2*goarch.PtrSize {
- print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
- throw("heapBitsSetType: inconsistent object sizes")
- }
- }
- if typ.size == goarch.PtrSize {
- // The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
- // Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
- if doubleCheck && *typ.gcdata != 1 {
- print("runtime: heapBitsSetType size=", size, " typ.size=", typ.size, "but *typ.gcdata", *typ.gcdata, "\n")
- throw("heapBitsSetType: unexpected gcdata for 1 pointer wide type size in 3 pointer wide size class")
- }
- // 3 element array of pointers. Unrolling ptrmask 3 times into p yields 00000111.
- b = 7
- }
-
- hb := b & 7
- // Set bitScan bits for all pointers.
- hb |= hb << wordsPerBitmapByte
- // First bitScan bit is always set since the type contains pointers.
- hb |= bitScan
- // Second bitScan bit needs to also be set if the third bitScan bit is set.
- hb |= hb & (bitScan << (2 * heapBitsShift)) >> 1
-
- // For h.shift > 1 heap bits cross a byte boundary and need to be written part
- // to h.bitp and part to the next h.bitp.
- switch h.shift {
- case 0:
- *h.bitp &^= mask3 << 0
- *h.bitp |= hb << 0
- case 1:
- *h.bitp &^= mask3 << 1
- *h.bitp |= hb << 1
- case 2:
- *h.bitp &^= mask2 << 2
- *h.bitp |= (hb & mask2) << 2
- // Two words written to the first byte.
- // Advance two words to get to the next byte.
- h = h.next().next()
- *h.bitp &^= mask1
- *h.bitp |= (hb >> 2) & mask1
- case 3:
- *h.bitp &^= mask1 << 3
- *h.bitp |= (hb & mask1) << 3
- // One word written to the first byte.
- // Advance one word to get to the next byte.
- h = h.next()
- *h.bitp &^= mask2
- *h.bitp |= (hb >> 1) & mask2
- }
- return
- }
-
- // Copy from 1-bit ptrmask into 2-bit bitmap.
- // The basic approach is to use a single uintptr as a bit buffer,
- // alternating between reloading the buffer and writing bitmap bytes.
- // In general, one load can supply two bitmap byte writes.
- // This is a lot of lines of code, but it compiles into relatively few
- // machine instructions.
-
- outOfPlace := false
- if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrandn(2) == 0) {
- // This object spans heap arenas, so the bitmap may be
- // discontiguous. Unroll it into the object instead
- // and then copy it out.
- //
- // In doubleCheck mode, we randomly do this anyway to
- // stress test the bitmap copying path.
- outOfPlace = true
- h.bitp = (*uint8)(unsafe.Pointer(x))
- h.last = nil
- }
-
- var (
- // Ptrmask input.
- p *byte // last ptrmask byte read
- b uintptr // ptrmask bits already loaded
- nb uintptr // number of bits in b at next read
- endp *byte // final ptrmask byte to read (then repeat)
- endnb uintptr // number of valid bits in *endp
- pbits uintptr // alternate source of bits
-
- // Heap bitmap output.
- w uintptr // words processed
- nw uintptr // number of words to process
- hbitp *byte // next heap bitmap byte to write
- hb uintptr // bits being prepared for *hbitp
- )
-
- hbitp = h.bitp
-
- // Handle GC program. Delayed until this part of the code
- // so that we can use the same double-checking mechanism
- // as the 1-bit case. Nothing above could have encountered
- // GC programs: the cases were all too small.
- if typ.kind&kindGCProg != 0 {
- heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
- if doubleCheck {
- // Double-check the heap bits written by GC program
- // by running the GC program to create a 1-bit pointer mask
- // and then jumping to the double-check code below.
- // This doesn't catch bugs shared between the 1-bit and 4-bit
- // GC program execution, but it does catch mistakes specific
- // to just one of those and bugs in heapBitsSetTypeGCProg's
- // implementation of arrays.
- lock(&debugPtrmask.lock)
- if debugPtrmask.data == nil {
- debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
- }
- ptrmask = debugPtrmask.data
- runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
- }
- goto Phase4
- }
-
- // Note about sizes:
- //
- // typ.size is the number of words in the object,
- // and typ.ptrdata is the number of words in the prefix
- // of the object that contains pointers. That is, the final
- // typ.size - typ.ptrdata words contain no pointers.
- // This allows optimization of a common pattern where
- // an object has a small header followed by a large scalar
- // buffer. If we know the pointers are over, we don't have
- // to scan the buffer's heap bitmap at all.
- // The 1-bit ptrmasks are sized to contain only bits for
- // the typ.ptrdata prefix, zero padded out to a full byte
- // of bitmap. This code sets nw (below) so that heap bitmap
- // bits are only written for the typ.ptrdata prefix; if there is
- // more room in the allocated object, the next heap bitmap
- // entry is a 00, indicating that there are no more pointers
- // to scan. So only the ptrmask for the ptrdata bytes is needed.
- //
- // Replicated copies are not as nice: if there is an array of
- // objects with scalar tails, all but the last tail does have to
- // be initialized, because there is no way to say "skip forward".
- // However, because of the possibility of a repeated type with
- // size not a multiple of 4 pointers (one heap bitmap byte),
- // the code already must handle the last ptrmask byte specially
- // by treating it as containing only the bits for endnb pointers,
- // where endnb <= 4. We represent large scalar tails that must
- // be expanded in the replication by setting endnb larger than 4.
- // This will have the effect of reading many bits out of b,
- // but once the real bits are shifted out, b will supply as many
- // zero bits as we try to read, which is exactly what we need.
-
- p = ptrmask
- if typ.size < dataSize {
- // Filling in bits for an array of typ.
- // Set up for repetition of ptrmask during main loop.
- // Note that ptrmask describes only a prefix of
- const maxBits = goarch.PtrSize*8 - 7
- if typ.ptrdata/goarch.PtrSize <= maxBits {
- // Entire ptrmask fits in uintptr with room for a byte fragment.
- // Load into pbits and never read from ptrmask again.
- // This is especially important when the ptrmask has
- // fewer than 8 bits in it; otherwise the reload in the middle
- // of the Phase 2 loop would itself need to loop to gather
- // at least 8 bits.
-
- // Accumulate ptrmask into b.
- // ptrmask is sized to describe only typ.ptrdata, but we record
- // it as describing typ.size bytes, since all the high bits are zero.
- nb = typ.ptrdata / goarch.PtrSize
- for i := uintptr(0); i < nb; i += 8 {
- b |= uintptr(*p) << i
- p = add1(p)
- }
- nb = typ.size / goarch.PtrSize
-
- // Replicate ptrmask to fill entire pbits uintptr.
- // Doubling and truncating is fewer steps than
- // iterating by nb each time. (nb could be 1.)
- // Since we loaded typ.ptrdata/goarch.PtrSize bits
- // but are pretending to have typ.size/goarch.PtrSize,
- // there might be no replication necessary/possible.
- pbits = b
- endnb = nb
- if nb+nb <= maxBits {
- for endnb <= goarch.PtrSize*8 {
- pbits |= pbits << endnb
- endnb += endnb
- }
- // Truncate to a multiple of original ptrmask.
- // Because nb+nb <= maxBits, nb fits in a byte.
- // Byte division is cheaper than uintptr division.
- endnb = uintptr(maxBits/byte(nb)) * nb
- pbits &= 1<<endnb - 1
- b = pbits
- nb = endnb
- }
-
- // Clear p and endp as sentinel for using pbits.
- // Checked during Phase 2 loop.
- p = nil
- endp = nil
- } else {
- // Ptrmask is larger. Read it multiple times.
- n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
- endp = addb(ptrmask, n)
- endnb = typ.size/goarch.PtrSize - n*8
- }
- }
- if p != nil {
- b = uintptr(*p)
- p = add1(p)
- nb = 8
- }
-
- if typ.size == dataSize {
- // Single entry: can stop once we reach the non-pointer data.
- nw = typ.ptrdata / goarch.PtrSize
- } else {
- // Repeated instances of typ in an array.
- // Have to process first N-1 entries in full, but can stop
- // once we reach the non-pointer data in the final entry.
- nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
- }
- if nw == 0 {
- // No pointers! Caller was supposed to check.
- println("runtime: invalid type ", typ.string())
- throw("heapBitsSetType: called with non-pointer type")
- return
- }
-
- // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
- // The leading byte is special because it contains the bits for word 1,
- // which does not have the scan bit set.
- // The leading half-byte is special because it's a half a byte,
- // so we have to be careful with the bits already there.
- switch {
- default:
- throw("heapBitsSetType: unexpected shift")
-
- case h.shift == 0:
- // Ptrmask and heap bitmap are aligned.
- //
- // This is a fast path for small objects.
- //
- // The first byte we write out covers the first four
- // words of the object. The scan/dead bit on the first
- // word must be set to scan since there are pointers
- // somewhere in the object.
- // In all following words, we set the scan/dead
- // appropriately to indicate that the object continues
- // to the next 2-bit entry in the bitmap.
- //
- // We set four bits at a time here, but if the object
- // is fewer than four words, phase 3 will clear
- // unnecessary bits.
- hb = b & bitPointerAll
- hb |= bitScanAll
- if w += 4; w >= nw {
- goto Phase3
- }
- *hbitp = uint8(hb)
- hbitp = add1(hbitp)
- b >>= 4
- nb -= 4
-
- case h.shift == 2:
- // Ptrmask and heap bitmap are misaligned.
- //
- // On 32 bit architectures only the 6-word object that corresponds
- // to a 24 bytes size class can start with h.shift of 2 here since
- // all other non 16 byte aligned size classes have been handled by
- // special code paths at the beginning of heapBitsSetType on 32 bit.
- //
- // Many size classes are only 16 byte aligned. On 64 bit architectures
- // this results in a heap bitmap position starting with a h.shift of 2.
- //
- // The bits for the first two words are in a byte shared
- // with another object, so we must be careful with the bits
- // already there.
- //
- // We took care of 1-word, 2-word, and 3-word objects above,
- // so this is at least a 6-word object.
- hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
- hb |= bitScan << (2 * heapBitsShift)
- if nw > 1 {
- hb |= bitScan << (3 * heapBitsShift)
- }
- b >>= 2
- nb -= 2
- *hbitp &^= uint8((bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << (2 * heapBitsShift))
- *hbitp |= uint8(hb)
- hbitp = add1(hbitp)
- if w += 2; w >= nw {
- // We know that there is more data, because we handled 2-word and 3-word objects above.
- // This must be at least a 6-word object. If we're out of pointer words,
- // mark no scan in next bitmap byte and finish.
- hb = 0
- w += 4
- goto Phase3
- }
- }
-
- // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
- // The loop computes the bits for that last write but does not execute the write;
- // it leaves the bits in hb for processing by phase 3.
- // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
- // use in the first half of the loop right now, and then we only adjust nb explicitly
- // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
- nb -= 4
- for {
- // Emit bitmap byte.
- // b has at least nb+4 bits, with one exception:
- // if w+4 >= nw, then b has only nw-w bits,
- // but we'll stop at the break and then truncate
- // appropriately in Phase 3.
- hb = b & bitPointerAll
- hb |= bitScanAll
- if w += 4; w >= nw {
- break
- }
- *hbitp = uint8(hb)
- hbitp = add1(hbitp)
- b >>= 4
-
- // Load more bits. b has nb right now.
- if p != endp {
- // Fast path: keep reading from ptrmask.
- // nb unmodified: we just loaded 8 bits,
- // and the next iteration will consume 8 bits,
- // leaving us with the same nb the next time we're here.
- if nb < 8 {
- b |= uintptr(*p) << nb
- p = add1(p)
- } else {
- // Reduce the number of bits in b.
- // This is important if we skipped
- // over a scalar tail, since nb could
- // be larger than the bit width of b.
- nb -= 8
- }
- } else if p == nil {
- // Almost as fast path: track bit count and refill from pbits.
- // For short repetitions.
- if nb < 8 {
- b |= pbits << nb
- nb += endnb
- }
- nb -= 8 // for next iteration
- } else {
- // Slow path: reached end of ptrmask.
- // Process final partial byte and rewind to start.
- b |= uintptr(*p) << nb
- nb += endnb
- if nb < 8 {
- b |= uintptr(*ptrmask) << nb
- p = add1(ptrmask)
- } else {
- nb -= 8
- p = ptrmask
- }
- }
-
- // Emit bitmap byte.
- hb = b & bitPointerAll
- hb |= bitScanAll
- if w += 4; w >= nw {
- break
- }
- *hbitp = uint8(hb)
- hbitp = add1(hbitp)
- b >>= 4
- }
-
-Phase3:
- // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
- if w > nw {
- // Counting the 4 entries in hb not yet written to memory,
- // there are more entries than possible pointer slots.
- // Discard the excess entries (can't be more than 3).
- mask := uintptr(1)<<(4-(w-nw)) - 1
- hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
- }
-
- // Change nw from counting possibly-pointer words to total words in allocation.
- nw = size / goarch.PtrSize
-
- // Write whole bitmap bytes.
- // The first is hb, the rest are zero.
- if w <= nw {
- *hbitp = uint8(hb)
- hbitp = add1(hbitp)
- hb = 0 // for possible final half-byte below
- for w += 4; w <= nw; w += 4 {
- *hbitp = 0
- hbitp = add1(hbitp)
- }
- }
-
- // Write final partial bitmap byte if any.
- // We know w > nw, or else we'd still be in the loop above.
- // It can be bigger only due to the 4 entries in hb that it counts.
- // If w == nw+4 then there's nothing left to do: we wrote all nw entries
- // and can discard the 4 sitting in hb.
- // But if w == nw+2, we need to write first two in hb.
- // The byte is shared with the next object, so be careful with
- // existing bits.
- if w == nw+2 {
- *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
- }
-
-Phase4:
- // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
- if outOfPlace {
- // TODO: We could probably make this faster by
- // handling [x+dataSize, x+size) specially.
- h := heapBitsForAddr(x)
- // cnw is the number of heap words, or bit pairs
- // remaining (like nw above).
- cnw := size / goarch.PtrSize
- src := (*uint8)(unsafe.Pointer(x))
- // We know the first and last byte of the bitmap are
- // not the same, but it's still possible for small
- // objects span arenas, so it may share bitmap bytes
- // with neighboring objects.
- //
- // Handle the first byte specially if it's shared. See
- // Phase 1 for why this is the only special case we need.
- if doubleCheck {
- if !(h.shift == 0 || h.shift == 2) {
- print("x=", x, " size=", size, " cnw=", h.shift, "\n")
- throw("bad start shift")
- }
- }
- if h.shift == 2 {
- *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
- h = h.next().next()
- cnw -= 2
- src = addb(src, 1)
- }
- // We're now byte aligned. Copy out to per-arena
- // bitmaps until the last byte (which may again be
- // partial).
- for cnw >= 4 {
- // This loop processes four words at a time,
- // so round cnw down accordingly.
- hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
-
- // n is the number of bitmap bytes to copy.
- n := words / 4
- memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
- cnw -= words
- h = hNext
- src = addb(src, n)
- }
- if doubleCheck && h.shift != 0 {
- print("cnw=", cnw, " h.shift=", h.shift, "\n")
- throw("bad shift after block copy")
- }
- // Handle the last byte if it's shared.
- if cnw == 2 {
- *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
- src = addb(src, 1)
- h = h.next().next()
- }
- if doubleCheck {
- if uintptr(unsafe.Pointer(src)) > x+size {
- throw("copy exceeded object size")
- }
- if !(cnw == 0 || cnw == 2) {
- print("x=", x, " size=", size, " cnw=", cnw, "\n")
- throw("bad number of remaining words")
- }
- // Set up hbitp so doubleCheck code below can check it.
- hbitp = h.bitp
- }
- // Zero the object where we wrote the bitmap.
- memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
- }
-
- // Double check the whole bitmap.
- if doubleCheck {
- // x+size may not point to the heap, so back up one
- // word and then advance it the way we do above.
- end := heapBitsForAddr(x + size - goarch.PtrSize)
- if outOfPlace {
- // In out-of-place copying, we just advance
- // using next.
- end = end.next()
- } else {
- // Don't use next because that may advance to
- // the next arena and the in-place logic
- // doesn't do that.
- end.shift += heapBitsShift
- if end.shift == 4*heapBitsShift {
- end.bitp, end.shift = add1(end.bitp), 0
- }
- }
- if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
- println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
- print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
- print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
- h0 := heapBitsForAddr(x)
- print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
- print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
- throw("bad heapBitsSetType")
- }
-
- // Double-check that bits to be written were written correctly.
- // Does not check that other bits were not written, unfortunately.
- h := heapBitsForAddr(x)
- nptr := typ.ptrdata / goarch.PtrSize
- ndata := typ.size / goarch.PtrSize
- count := dataSize / typ.size
- totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
- for i := uintptr(0); i < size/goarch.PtrSize; i++ {
- j := i % ndata
- var have, want uint8
- have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
- if i >= totalptr {
- if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
- // heapBitsSetTypeGCProg always fills
- // in full nibbles of bitScan.
- want = bitScan
- }
- } else {
- if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
- want |= bitPointer
- }
- want |= bitScan
- }
- if have != want {
- println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
- print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
- print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
- print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
- h0 := heapBitsForAddr(x)
- print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
- print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
- print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
- println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
- if typ.kind&kindGCProg != 0 {
- println("GC program:")
- dumpGCProg(addb(typ.gcdata, 4))
- }
- throw("bad heapBitsSetType")
- }
- h = h.next()
- }
- if ptrmask == debugPtrmask.data {
- unlock(&debugPtrmask.lock)
- }
- }
-}
-
-var debugPtrmask struct {
- lock mutex
- data *byte
-}
-
-// heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
-// progSize is the size of the memory described by the program.
-// elemSize is the size of the element that the GC program describes (a prefix of).
-// dataSize is the total size of the intended data, a multiple of elemSize.
-// allocSize is the total size of the allocated memory.
-//
-// GC programs are only used for large allocations.
-// heapBitsSetType requires that allocSize is a multiple of 4 words,
-// so that the relevant bitmap bytes are not shared with surrounding
-// objects.
-func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
- if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
- // Alignment will be wrong.
- throw("heapBitsSetTypeGCProg: small allocation")
- }
- var totalBits uintptr
- if elemSize == dataSize {
- totalBits = runGCProg(prog, nil, h.bitp, 2)
- if totalBits*goarch.PtrSize != progSize {
- println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
- throw("heapBitsSetTypeGCProg: unexpected bit count")
- }
- } else {
- count := dataSize / elemSize
-
- // Piece together program trailer to run after prog that does:
- // literal(0)
- // repeat(1, elemSize-progSize-1) // zeros to fill element size
- // repeat(elemSize, count-1) // repeat that element for count
- // This zero-pads the data remaining in the first element and then
- // repeats that first element to fill the array.
- var trailer [40]byte // 3 varints (max 10 each) + some bytes
- i := 0
- if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
- // literal(0)
- trailer[i] = 0x01
- i++
- trailer[i] = 0
- i++
- if n > 1 {
- // repeat(1, n-1)
- trailer[i] = 0x81
- i++
- n--
- for ; n >= 0x80; n >>= 7 {
- trailer[i] = byte(n | 0x80)
- i++
- }
- trailer[i] = byte(n)
- i++
- }
- }
- // repeat(elemSize/ptrSize, count-1)
- trailer[i] = 0x80
- i++
- n := elemSize / goarch.PtrSize
- for ; n >= 0x80; n >>= 7 {
- trailer[i] = byte(n | 0x80)
- i++
- }
- trailer[i] = byte(n)
- i++
- n = count - 1
- for ; n >= 0x80; n >>= 7 {
- trailer[i] = byte(n | 0x80)
- i++
- }
- trailer[i] = byte(n)
- i++
- trailer[i] = 0
- i++
-
- runGCProg(prog, &trailer[0], h.bitp, 2)
-
- // Even though we filled in the full array just now,
- // record that we only filled in up to the ptrdata of the
- // last element. This will cause the code below to
- // memclr the dead section of the final array element,
- // so that scanobject can stop early in the final element.
- totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
- }
- endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
- endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
- memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
-}
-
-// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
-// size the size of the region described by prog, in bytes.
-// The resulting bitvector will have no more than size/goarch.PtrSize bits.
-func progToPointerMask(prog *byte, size uintptr) bitvector {
- n := (size/goarch.PtrSize + 7) / 8
- x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
- x[len(x)-1] = 0xa1 // overflow check sentinel
- n = runGCProg(prog, nil, &x[0], 1)
- if x[len(x)-1] != 0xa1 {
- throw("progToPointerMask: overflow")
- }
- return bitvector{int32(n), &x[0]}
-}
-
-// Packed GC pointer bitmaps, aka GC programs.
-//
-// For large types containing arrays, the type information has a
-// natural repetition that can be encoded to save space in the
-// binary and in the memory representation of the type information.
-//
-// The encoding is a simple Lempel-Ziv style bytecode machine
-// with the following instructions:
-//
-// 00000000: stop
-// 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
-// 10000000 n c: repeat the previous n bits c times; n, c are varints
-// 1nnnnnnn c: repeat the previous n bits c times; c is a varint
-
-// runGCProg executes the GC program prog, and then trailer if non-nil,
-// writing to dst with entries of the given size.
-// If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
-// If size == 2, dst is the 2-bit heap bitmap, and writes move backward
-// starting at dst (because the heap bitmap does). In this case, the caller guarantees
-// that only whole bytes in dst need to be written.
-//
-// runGCProg returns the number of 1- or 2-bit entries written to memory.
-func runGCProg(prog, trailer, dst *byte, size int) uintptr {
- dstStart := dst
-
- // Bits waiting to be written to memory.
- var bits uintptr
- var nbits uintptr
-
- p := prog
-Run:
- for {
- // Flush accumulated full bytes.
- // The rest of the loop assumes that nbits <= 7.
- for ; nbits >= 8; nbits -= 8 {
- if size == 1 {
- *dst = uint8(bits)
- dst = add1(dst)
- bits >>= 8
- } else {
- v := bits&bitPointerAll | bitScanAll
- *dst = uint8(v)
- dst = add1(dst)
- bits >>= 4
- v = bits&bitPointerAll | bitScanAll
- *dst = uint8(v)
- dst = add1(dst)
- bits >>= 4
- }
- }
-
- // Process one instruction.
- inst := uintptr(*p)
- p = add1(p)
- n := inst & 0x7F
- if inst&0x80 == 0 {
- // Literal bits; n == 0 means end of program.
- if n == 0 {
- // Program is over; continue in trailer if present.
- if trailer != nil {
- p = trailer
- trailer = nil
- continue
- }
- break Run
- }
- nbyte := n / 8
- for i := uintptr(0); i < nbyte; i++ {
- bits |= uintptr(*p) << nbits
- p = add1(p)
- if size == 1 {
- *dst = uint8(bits)
- dst = add1(dst)
- bits >>= 8
- } else {
- v := bits&0xf | bitScanAll
- *dst = uint8(v)
- dst = add1(dst)
- bits >>= 4
- v = bits&0xf | bitScanAll
- *dst = uint8(v)
- dst = add1(dst)
- bits >>= 4
- }
- }
- if n %= 8; n > 0 {
- bits |= uintptr(*p) << nbits
- p = add1(p)
- nbits += n
- }
- continue Run
- }
-
- // Repeat. If n == 0, it is encoded in a varint in the next bytes.
- if n == 0 {
- for off := uint(0); ; off += 7 {
- x := uintptr(*p)
- p = add1(p)
- n |= (x & 0x7F) << off
- if x&0x80 == 0 {
- break
- }
- }
- }
-
- // Count is encoded in a varint in the next bytes.
- c := uintptr(0)
- for off := uint(0); ; off += 7 {
- x := uintptr(*p)
- p = add1(p)
- c |= (x & 0x7F) << off
- if x&0x80 == 0 {
- break
- }
- }
- c *= n // now total number of bits to copy
-
- // If the number of bits being repeated is small, load them
- // into a register and use that register for the entire loop
- // instead of repeatedly reading from memory.
- // Handling fewer than 8 bits here makes the general loop simpler.
- // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
- // the pattern to a bit buffer holding at most 7 bits (a partial byte)
- // it will not overflow.
- src := dst
- const maxBits = goarch.PtrSize*8 - 7
- if n <= maxBits {
- // Start with bits in output buffer.
- pattern := bits
- npattern := nbits
-
- // If we need more bits, fetch them from memory.
- if size == 1 {
- src = subtract1(src)
- for npattern < n {
- pattern <<= 8
- pattern |= uintptr(*src)
- src = subtract1(src)
- npattern += 8
- }
- } else {
- src = subtract1(src)
- for npattern < n {
- pattern <<= 4
- pattern |= uintptr(*src) & 0xf
- src = subtract1(src)
- npattern += 4
- }
- }
-
- // We started with the whole bit output buffer,
- // and then we loaded bits from whole bytes.
- // Either way, we might now have too many instead of too few.
- // Discard the extra.
- if npattern > n {
- pattern >>= npattern - n
- npattern = n
- }
-
- // Replicate pattern to at most maxBits.
- if npattern == 1 {
- // One bit being repeated.
- // If the bit is 1, make the pattern all 1s.
- // If the bit is 0, the pattern is already all 0s,
- // but we can claim that the number of bits
- // in the word is equal to the number we need (c),
- // because right shift of bits will zero fill.
- if pattern == 1 {
- pattern = 1<<maxBits - 1
- npattern = maxBits
- } else {
- npattern = c
- }
- } else {
- b := pattern
- nb := npattern
- if nb+nb <= maxBits {
- // Double pattern until the whole uintptr is filled.
- for nb <= goarch.PtrSize*8 {
- b |= b << nb
- nb += nb
- }
- // Trim away incomplete copy of original pattern in high bits.
- // TODO(rsc): Replace with table lookup or loop on systems without divide?
- nb = maxBits / npattern * npattern
- b &= 1<<nb - 1
- pattern = b
- npattern = nb
- }
- }
-
- // Add pattern to bit buffer and flush bit buffer, c/npattern times.
- // Since pattern contains >8 bits, there will be full bytes to flush
- // on each iteration.
- for ; c >= npattern; c -= npattern {
- bits |= pattern << nbits
- nbits += npattern
- if size == 1 {
- for nbits >= 8 {
- *dst = uint8(bits)
- dst = add1(dst)
- bits >>= 8
- nbits -= 8
- }
- } else {
- for nbits >= 4 {
- *dst = uint8(bits&0xf | bitScanAll)
- dst = add1(dst)
- bits >>= 4
- nbits -= 4
- }
- }
- }
-
- // Add final fragment to bit buffer.
- if c > 0 {
- pattern &= 1<<c - 1
- bits |= pattern << nbits
- nbits += c
- }
- continue Run
- }
-
- // Repeat; n too large to fit in a register.
- // Since nbits <= 7, we know the first few bytes of repeated data
- // are already written to memory.
- off := n - nbits // n > nbits because n > maxBits and nbits <= 7
- if size == 1 {
- // Leading src fragment.
- src = subtractb(src, (off+7)/8)
- if frag := off & 7; frag != 0 {
- bits |= uintptr(*src) >> (8 - frag) << nbits
- src = add1(src)
- nbits += frag
- c -= frag
- }
- // Main loop: load one byte, write another.
- // The bits are rotating through the bit buffer.
- for i := c / 8; i > 0; i-- {
- bits |= uintptr(*src) << nbits
- src = add1(src)
- *dst = uint8(bits)
- dst = add1(dst)
- bits >>= 8
- }
- // Final src fragment.
- if c %= 8; c > 0 {
- bits |= (uintptr(*src) & (1<<c - 1)) << nbits
- nbits += c
- }
- } else {
- // Leading src fragment.
- src = subtractb(src, (off+3)/4)
- if frag := off & 3; frag != 0 {
- bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
- src = add1(src)
- nbits += frag
- c -= frag
- }
- // Main loop: load one byte, write another.
- // The bits are rotating through the bit buffer.
- for i := c / 4; i > 0; i-- {
- bits |= (uintptr(*src) & 0xf) << nbits
- src = add1(src)
- *dst = uint8(bits&0xf | bitScanAll)
- dst = add1(dst)
- bits >>= 4
- }
- // Final src fragment.
- if c %= 4; c > 0 {
- bits |= (uintptr(*src) & (1<<c - 1)) << nbits
- nbits += c
- }
- }
- }
-
- // Write any final bits out, using full-byte writes, even for the final byte.
- var totalBits uintptr
- if size == 1 {
- totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
- nbits += -nbits & 7
- for ; nbits > 0; nbits -= 8 {
- *dst = uint8(bits)
- dst = add1(dst)
- bits >>= 8
- }
- } else {
- totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
- nbits += -nbits & 3
- for ; nbits > 0; nbits -= 4 {
- v := bits&0xf | bitScanAll
- *dst = uint8(v)
- dst = add1(dst)
- bits >>= 4
- }
- }
- return totalBits
-}
-
-// materializeGCProg allocates space for the (1-bit) pointer bitmask
-// for an object of size ptrdata. Then it fills that space with the
-// pointer bitmask specified by the program prog.
-// The bitmask starts at s.startAddr.
-// The result must be deallocated with dematerializeGCProg.
-func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
- // Each word of ptrdata needs one bit in the bitmap.
- bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
- // Compute the number of pages needed for bitmapBytes.
- pages := divRoundUp(bitmapBytes, pageSize)
- s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
- runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
- return s
-}
-func dematerializeGCProg(s *mspan) {
- mheap_.freeManual(s, spanAllocPtrScalarBits)
-}
-
-func dumpGCProg(p *byte) {
- nptr := 0
- for {
- x := *p
- p = add1(p)
- if x == 0 {
- print("\t", nptr, " end\n")
- break
- }
- if x&0x80 == 0 {
- print("\t", nptr, " lit ", x, ":")
- n := int(x+7) / 8
- for i := 0; i < n; i++ {
- print(" ", hex(*p))
- p = add1(p)
- }
- print("\n")
- nptr += int(x)
- } else {
- nbit := int(x &^ 0x80)
- if nbit == 0 {
- for nb := uint(0); ; nb += 7 {
- x := *p
- p = add1(p)
- nbit |= int(x&0x7f) << nb
- if x&0x80 == 0 {
- break
- }
- }
- }
- count := 0
- for nb := uint(0); ; nb += 7 {
- x := *p
- p = add1(p)
- count |= int(x&0x7f) << nb
- if x&0x80 == 0 {
- break
- }
- }
- print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
- nptr += nbit * count
- }
- }
-}
-
-// Testing.
-
-func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
- target := (*stkframe)(ctxt)
- if frame.sp <= target.sp && target.sp < frame.varp {
- *target = *frame
- return false
- }
- return true
-}
-
-// gcbits returns the GC type info for x, for testing.
-// The result is the bitmap entries (0 or 1), one entry per byte.
-//go:linkname reflect_gcbits reflect.gcbits
-func reflect_gcbits(x any) []byte {
- ret := getgcmask(x)
- typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
- nptr := typ.ptrdata / goarch.PtrSize
- for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
- ret = ret[:len(ret)-1]
- }
- return ret
-}
-
-// Returns GC type info for the pointer stored in ep for testing.
-// If ep points to the stack, only static live information will be returned
-// (i.e. not for objects which are only dynamically live stack objects).
-func getgcmask(ep any) (mask []byte) {
- e := *efaceOf(&ep)
- p := e.data
- t := e._type
- // data or bss
- for _, datap := range activeModules() {
- // data
- if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
- bitmap := datap.gcdatamask.bytedata
- n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/goarch.PtrSize)
- for i := uintptr(0); i < n; i += goarch.PtrSize {
- off := (uintptr(p) + i - datap.data) / goarch.PtrSize
- mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
- }
- return
- }
-
- // bss
- if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
- bitmap := datap.gcbssmask.bytedata
- n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/goarch.PtrSize)
- for i := uintptr(0); i < n; i += goarch.PtrSize {
- off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
- mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
- }
- return
- }
- }
-
- // heap
- if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
- hbits := heapBitsForAddr(base)
- n := s.elemsize
- mask = make([]byte, n/goarch.PtrSize)
- for i := uintptr(0); i < n; i += goarch.PtrSize {
- if hbits.isPointer() {
- mask[i/goarch.PtrSize] = 1
- }
- if !hbits.morePointers() {
- mask = mask[:i/goarch.PtrSize]
- break
- }
- hbits = hbits.next()
- }
- return
- }
-
- // stack
- if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
- var frame stkframe
- frame.sp = uintptr(p)
- _g_ := getg()
- gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
- if frame.fn.valid() {
- locals, _, _ := getStackMap(&frame, nil, false)
- if locals.n == 0 {
- return
- }
- size := uintptr(locals.n) * goarch.PtrSize
- n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/goarch.PtrSize)
- for i := uintptr(0); i < n; i += goarch.PtrSize {
- off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
- mask[i/goarch.PtrSize] = locals.ptrbit(off)
- }
- }
- return
- }
-
- // otherwise, not something the GC knows about.
- // possibly read-only data, like malloc(0).
- // must not have pointers
- return
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mcache.go b/contrib/go/_std_1.18/src/runtime/mcache.go
deleted file mode 100644
index c58e05dd29..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mcache.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// Per-thread (in Go, per-P) cache for small objects.
-// This includes a small object cache and local allocation stats.
-// No locking needed because it is per-thread (per-P).
-//
-// mcaches are allocated from non-GC'd memory, so any heap pointers
-// must be specially handled.
-//
-//go:notinheap
-type mcache struct {
- // The following members are accessed on every malloc,
- // so they are grouped here for better caching.
- nextSample uintptr // trigger heap sample after allocating this many bytes
- scanAlloc uintptr // bytes of scannable heap allocated
-
- // Allocator cache for tiny objects w/o pointers.
- // See "Tiny allocator" comment in malloc.go.
-
- // tiny points to the beginning of the current tiny block, or
- // nil if there is no current tiny block.
- //
- // tiny is a heap pointer. Since mcache is in non-GC'd memory,
- // we handle it by clearing it in releaseAll during mark
- // termination.
- //
- // tinyAllocs is the number of tiny allocations performed
- // by the P that owns this mcache.
- tiny uintptr
- tinyoffset uintptr
- tinyAllocs uintptr
-
- // The rest is not accessed on every malloc.
-
- alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
-
- stackcache [_NumStackOrders]stackfreelist
-
- // flushGen indicates the sweepgen during which this mcache
- // was last flushed. If flushGen != mheap_.sweepgen, the spans
- // in this mcache are stale and need to the flushed so they
- // can be swept. This is done in acquirep.
- flushGen uint32
-}
-
-// A gclink is a node in a linked list of blocks, like mlink,
-// but it is opaque to the garbage collector.
-// The GC does not trace the pointers during collection,
-// and the compiler does not emit write barriers for assignments
-// of gclinkptr values. Code should store references to gclinks
-// as gclinkptr, not as *gclink.
-type gclink struct {
- next gclinkptr
-}
-
-// A gclinkptr is a pointer to a gclink, but it is opaque
-// to the garbage collector.
-type gclinkptr uintptr
-
-// ptr returns the *gclink form of p.
-// The result should be used for accessing fields, not stored
-// in other data structures.
-func (p gclinkptr) ptr() *gclink {
- return (*gclink)(unsafe.Pointer(p))
-}
-
-type stackfreelist struct {
- list gclinkptr // linked list of free stacks
- size uintptr // total size of stacks in list
-}
-
-// dummy mspan that contains no free objects.
-var emptymspan mspan
-
-func allocmcache() *mcache {
- var c *mcache
- systemstack(func() {
- lock(&mheap_.lock)
- c = (*mcache)(mheap_.cachealloc.alloc())
- c.flushGen = mheap_.sweepgen
- unlock(&mheap_.lock)
- })
- for i := range c.alloc {
- c.alloc[i] = &emptymspan
- }
- c.nextSample = nextSample()
- return c
-}
-
-// freemcache releases resources associated with this
-// mcache and puts the object onto a free list.
-//
-// In some cases there is no way to simply release
-// resources, such as statistics, so donate them to
-// a different mcache (the recipient).
-func freemcache(c *mcache) {
- systemstack(func() {
- c.releaseAll()
- stackcache_clear(c)
-
- // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
- // with the stealing of gcworkbufs during garbage collection to avoid
- // a race where the workbuf is double-freed.
- // gcworkbuffree(c.gcworkbuf)
-
- lock(&mheap_.lock)
- mheap_.cachealloc.free(unsafe.Pointer(c))
- unlock(&mheap_.lock)
- })
-}
-
-// getMCache is a convenience function which tries to obtain an mcache.
-//
-// Returns nil if we're not bootstrapping or we don't have a P. The caller's
-// P must not change, so we must be in a non-preemptible state.
-func getMCache(mp *m) *mcache {
- // Grab the mcache, since that's where stats live.
- pp := mp.p.ptr()
- var c *mcache
- if pp == nil {
- // We will be called without a P while bootstrapping,
- // in which case we use mcache0, which is set in mallocinit.
- // mcache0 is cleared when bootstrapping is complete,
- // by procresize.
- c = mcache0
- } else {
- c = pp.mcache
- }
- return c
-}
-
-// refill acquires a new span of span class spc for c. This span will
-// have at least one free object. The current span in c must be full.
-//
-// Must run in a non-preemptible context since otherwise the owner of
-// c could change.
-func (c *mcache) refill(spc spanClass) {
- // Return the current cached span to the central lists.
- s := c.alloc[spc]
-
- if uintptr(s.allocCount) != s.nelems {
- throw("refill of span with free space remaining")
- }
- if s != &emptymspan {
- // Mark this span as no longer cached.
- if s.sweepgen != mheap_.sweepgen+3 {
- throw("bad sweepgen in refill")
- }
- mheap_.central[spc].mcentral.uncacheSpan(s)
- }
-
- // Get a new cached span from the central lists.
- s = mheap_.central[spc].mcentral.cacheSpan()
- if s == nil {
- throw("out of memory")
- }
-
- if uintptr(s.allocCount) == s.nelems {
- throw("span has no free space")
- }
-
- // Indicate that this span is cached and prevent asynchronous
- // sweeping in the next sweep phase.
- s.sweepgen = mheap_.sweepgen + 3
-
- // Assume all objects from this span will be allocated in the
- // mcache. If it gets uncached, we'll adjust this.
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], int64(s.nelems)-int64(s.allocCount))
-
- // Flush tinyAllocs.
- if spc == tinySpanClass {
- atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
- c.tinyAllocs = 0
- }
- memstats.heapStats.release()
-
- // Update heapLive with the same assumption.
- // While we're here, flush scanAlloc, since we have to call
- // revise anyway.
- usedBytes := uintptr(s.allocCount) * s.elemsize
- gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
- c.scanAlloc = 0
-
- c.alloc[spc] = s
-}
-
-// allocLarge allocates a span for a large object.
-func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
- if size+_PageSize < size {
- throw("out of memory")
- }
- npages := size >> _PageShift
- if size&_PageMask != 0 {
- npages++
- }
-
- // Deduct credit for this span allocation and sweep if
- // necessary. mHeap_Alloc will also sweep npages, so this only
- // pays the debt down to npage pages.
- deductSweepCredit(npages*_PageSize, npages)
-
- spc := makeSpanClass(0, noscan)
- s := mheap_.alloc(npages, spc)
- if s == nil {
- throw("out of memory")
- }
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
- atomic.Xadd64(&stats.largeAllocCount, 1)
- memstats.heapStats.release()
-
- // Update heapLive.
- gcController.update(int64(s.npages*pageSize), 0)
-
- // Put the large span in the mcentral swept list so that it's
- // visible to the background sweeper.
- mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
- s.limit = s.base() + size
- heapBitsForAddr(s.base()).initSpan(s)
- return s
-}
-
-func (c *mcache) releaseAll() {
- // Take this opportunity to flush scanAlloc.
- scanAlloc := int64(c.scanAlloc)
- c.scanAlloc = 0
-
- sg := mheap_.sweepgen
- dHeapLive := int64(0)
- for i := range c.alloc {
- s := c.alloc[i]
- if s != &emptymspan {
- // Adjust nsmallalloc in case the span wasn't fully allocated.
- n := int64(s.nelems) - int64(s.allocCount)
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
- memstats.heapStats.release()
- if s.sweepgen != sg+1 {
- // refill conservatively counted unallocated slots in gcController.heapLive.
- // Undo this.
- //
- // If this span was cached before sweep, then
- // gcController.heapLive was totally recomputed since
- // caching this span, so we don't do this for
- // stale spans.
- dHeapLive -= n * int64(s.elemsize)
- }
- // Release the span to the mcentral.
- mheap_.central[i].mcentral.uncacheSpan(s)
- c.alloc[i] = &emptymspan
- }
- }
- // Clear tinyalloc pool.
- c.tiny = 0
- c.tinyoffset = 0
-
- // Flush tinyAllocs.
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
- c.tinyAllocs = 0
- memstats.heapStats.release()
-
- // Updated heapScan and heapLive.
- gcController.update(dHeapLive, scanAlloc)
-}
-
-// prepareForSweep flushes c if the system has entered a new sweep phase
-// since c was populated. This must happen between the sweep phase
-// starting and the first allocation from c.
-func (c *mcache) prepareForSweep() {
- // Alternatively, instead of making sure we do this on every P
- // between starting the world and allocating on that P, we
- // could leave allocate-black on, allow allocation to continue
- // as usual, use a ragged barrier at the beginning of sweep to
- // ensure all cached spans are swept, and then disable
- // allocate-black. However, with this approach it's difficult
- // to avoid spilling mark bits into the *next* GC cycle.
- sg := mheap_.sweepgen
- if c.flushGen == sg {
- return
- } else if c.flushGen != sg-2 {
- println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg)
- throw("bad flushGen")
- }
- c.releaseAll()
- stackcache_clear(c)
- atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mem_darwin.go b/contrib/go/_std_1.18/src/runtime/mem_darwin.go
deleted file mode 100644
index 9f836c0818..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mem_darwin.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-// Don't split the stack as this function may be invoked without a valid G,
-// which prevents us from allocating more stack.
-//go:nosplit
-func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
- v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if err != 0 {
- return nil
- }
- sysStat.add(int64(n))
- return v
-}
-
-func sysUnused(v unsafe.Pointer, n uintptr) {
- // MADV_FREE_REUSABLE is like MADV_FREE except it also propagates
- // accounting information about the process to task_info.
- madvise(v, n, _MADV_FREE_REUSABLE)
-}
-
-func sysUsed(v unsafe.Pointer, n uintptr) {
- // MADV_FREE_REUSE is necessary to keep the kernel's accounting
- // accurate. If called on any memory region that hasn't been
- // MADV_FREE_REUSABLE'd, it's a no-op.
- madvise(v, n, _MADV_FREE_REUSE)
-}
-
-func sysHugePage(v unsafe.Pointer, n uintptr) {
-}
-
-// Don't split the stack as this function may be invoked without a valid G,
-// which prevents us from allocating more stack.
-//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
- sysStat.add(-int64(n))
- munmap(v, n)
-}
-
-func sysFault(v unsafe.Pointer, n uintptr) {
- mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
-}
-
-func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
- p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if err != 0 {
- return nil
- }
- return p
-}
-
-const _ENOMEM = 12
-
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
- sysStat.add(int64(n))
-
- p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
- if err == _ENOMEM {
- throw("runtime: out of memory")
- }
- if p != v || err != 0 {
- print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
- throw("runtime: cannot map pages in arena address space")
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mem_linux.go b/contrib/go/_std_1.18/src/runtime/mem_linux.go
deleted file mode 100644
index f8333014c2..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mem_linux.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-const (
- _EACCES = 13
- _EINVAL = 22
-)
-
-// Don't split the stack as this method may be invoked without a valid G, which
-// prevents us from allocating more stack.
-//go:nosplit
-func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
- p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if err != 0 {
- if err == _EACCES {
- print("runtime: mmap: access denied\n")
- exit(2)
- }
- if err == _EAGAIN {
- print("runtime: mmap: too much locked memory (check 'ulimit -l').\n")
- exit(2)
- }
- return nil
- }
- sysStat.add(int64(n))
- return p
-}
-
-var adviseUnused = uint32(_MADV_FREE)
-
-func sysUnused(v unsafe.Pointer, n uintptr) {
- // By default, Linux's "transparent huge page" support will
- // merge pages into a huge page if there's even a single
- // present regular page, undoing the effects of madvise(adviseUnused)
- // below. On amd64, that means khugepaged can turn a single
- // 4KB page to 2MB, bloating the process's RSS by as much as
- // 512X. (See issue #8832 and Linux kernel bug
- // https://bugzilla.kernel.org/show_bug.cgi?id=93111)
- //
- // To work around this, we explicitly disable transparent huge
- // pages when we release pages of the heap. However, we have
- // to do this carefully because changing this flag tends to
- // split the VMA (memory mapping) containing v in to three
- // VMAs in order to track the different values of the
- // MADV_NOHUGEPAGE flag in the different regions. There's a
- // default limit of 65530 VMAs per address space (sysctl
- // vm.max_map_count), so we must be careful not to create too
- // many VMAs (see issue #12233).
- //
- // Since huge pages are huge, there's little use in adjusting
- // the MADV_NOHUGEPAGE flag on a fine granularity, so we avoid
- // exploding the number of VMAs by only adjusting the
- // MADV_NOHUGEPAGE flag on a large granularity. This still
- // gets most of the benefit of huge pages while keeping the
- // number of VMAs under control. With hugePageSize = 2MB, even
- // a pessimal heap can reach 128GB before running out of VMAs.
- if physHugePageSize != 0 {
- // If it's a large allocation, we want to leave huge
- // pages enabled. Hence, we only adjust the huge page
- // flag on the huge pages containing v and v+n-1, and
- // only if those aren't aligned.
- var head, tail uintptr
- if uintptr(v)&(physHugePageSize-1) != 0 {
- // Compute huge page containing v.
- head = alignDown(uintptr(v), physHugePageSize)
- }
- if (uintptr(v)+n)&(physHugePageSize-1) != 0 {
- // Compute huge page containing v+n-1.
- tail = alignDown(uintptr(v)+n-1, physHugePageSize)
- }
-
- // Note that madvise will return EINVAL if the flag is
- // already set, which is quite likely. We ignore
- // errors.
- if head != 0 && head+physHugePageSize == tail {
- // head and tail are different but adjacent,
- // so do this in one call.
- madvise(unsafe.Pointer(head), 2*physHugePageSize, _MADV_NOHUGEPAGE)
- } else {
- // Advise the huge pages containing v and v+n-1.
- if head != 0 {
- madvise(unsafe.Pointer(head), physHugePageSize, _MADV_NOHUGEPAGE)
- }
- if tail != 0 && tail != head {
- madvise(unsafe.Pointer(tail), physHugePageSize, _MADV_NOHUGEPAGE)
- }
- }
- }
-
- if uintptr(v)&(physPageSize-1) != 0 || n&(physPageSize-1) != 0 {
- // madvise will round this to any physical page
- // *covered* by this range, so an unaligned madvise
- // will release more memory than intended.
- throw("unaligned sysUnused")
- }
-
- var advise uint32
- if debug.madvdontneed != 0 {
- advise = _MADV_DONTNEED
- } else {
- advise = atomic.Load(&adviseUnused)
- }
- if errno := madvise(v, n, int32(advise)); advise == _MADV_FREE && errno != 0 {
- // MADV_FREE was added in Linux 4.5. Fall back to MADV_DONTNEED if it is
- // not supported.
- atomic.Store(&adviseUnused, _MADV_DONTNEED)
- madvise(v, n, _MADV_DONTNEED)
- }
-
- if debug.harddecommit > 0 {
- p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
- if p != v || err != 0 {
- throw("runtime: cannot disable permissions in address space")
- }
- }
-}
-
-func sysUsed(v unsafe.Pointer, n uintptr) {
- if debug.harddecommit > 0 {
- p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
- if err == _ENOMEM {
- throw("runtime: out of memory")
- }
- if p != v || err != 0 {
- throw("runtime: cannot remap pages in address space")
- }
- return
-
- // Don't do the sysHugePage optimization in hard decommit mode.
- // We're breaking up pages everywhere, there's no point.
- }
- // Partially undo the NOHUGEPAGE marks from sysUnused
- // for whole huge pages between v and v+n. This may
- // leave huge pages off at the end points v and v+n
- // even though allocations may cover these entire huge
- // pages. We could detect this and undo NOHUGEPAGE on
- // the end points as well, but it's probably not worth
- // the cost because when neighboring allocations are
- // freed sysUnused will just set NOHUGEPAGE again.
- sysHugePage(v, n)
-}
-
-func sysHugePage(v unsafe.Pointer, n uintptr) {
- if physHugePageSize != 0 {
- // Round v up to a huge page boundary.
- beg := alignUp(uintptr(v), physHugePageSize)
- // Round v+n down to a huge page boundary.
- end := alignDown(uintptr(v)+n, physHugePageSize)
-
- if beg < end {
- madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
- }
- }
-}
-
-// Don't split the stack as this function may be invoked without a valid G,
-// which prevents us from allocating more stack.
-//go:nosplit
-func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
- sysStat.add(-int64(n))
- munmap(v, n)
-}
-
-func sysFault(v unsafe.Pointer, n uintptr) {
- mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
-}
-
-func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
- p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if err != 0 {
- return nil
- }
- return p
-}
-
-func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
- sysStat.add(int64(n))
-
- p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
- if err == _ENOMEM {
- throw("runtime: out of memory")
- }
- if p != v || err != 0 {
- print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
- throw("runtime: cannot map pages in arena address space")
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/memclr_amd64.s b/contrib/go/_std_1.18/src/runtime/memclr_amd64.s
deleted file mode 100644
index 700bbd7b9b..0000000000
--- a/contrib/go/_std_1.18/src/runtime/memclr_amd64.s
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !plan9
-
-#include "go_asm.h"
-#include "textflag.h"
-
-// See memclrNoHeapPointers Go doc for important implementation constraints.
-
-// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
-// ABIInternal for performance.
-TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB), NOSPLIT, $0-16
- // AX = ptr
- // BX = n
- MOVQ AX, DI // DI = ptr
- XORQ AX, AX
-
- // MOVOU seems always faster than REP STOSQ.
-tail:
- // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
- TESTQ BX, BX
- JEQ _0
- CMPQ BX, $2
- JBE _1or2
- CMPQ BX, $4
- JBE _3or4
- CMPQ BX, $8
- JB _5through7
- JE _8
- CMPQ BX, $16
- JBE _9through16
- CMPQ BX, $32
- JBE _17through32
- CMPQ BX, $64
- JBE _33through64
- CMPQ BX, $128
- JBE _65through128
- CMPQ BX, $256
- JBE _129through256
- CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
- JE loop_preheader_avx2
- // TODO: for really big clears, use MOVNTDQ, even without AVX2.
-
-loop:
- MOVOU X15, 0(DI)
- MOVOU X15, 16(DI)
- MOVOU X15, 32(DI)
- MOVOU X15, 48(DI)
- MOVOU X15, 64(DI)
- MOVOU X15, 80(DI)
- MOVOU X15, 96(DI)
- MOVOU X15, 112(DI)
- MOVOU X15, 128(DI)
- MOVOU X15, 144(DI)
- MOVOU X15, 160(DI)
- MOVOU X15, 176(DI)
- MOVOU X15, 192(DI)
- MOVOU X15, 208(DI)
- MOVOU X15, 224(DI)
- MOVOU X15, 240(DI)
- SUBQ $256, BX
- ADDQ $256, DI
- CMPQ BX, $256
- JAE loop
- JMP tail
-
-loop_preheader_avx2:
- VPXOR Y0, Y0, Y0
- // For smaller sizes MOVNTDQ may be faster or slower depending on hardware.
- // For larger sizes it is always faster, even on dual Xeons with 30M cache.
- // TODO take into account actual LLC size. E. g. glibc uses LLC size/2.
- CMPQ BX, $0x2000000
- JAE loop_preheader_avx2_huge
-loop_avx2:
- VMOVDQU Y0, 0(DI)
- VMOVDQU Y0, 32(DI)
- VMOVDQU Y0, 64(DI)
- VMOVDQU Y0, 96(DI)
- SUBQ $128, BX
- ADDQ $128, DI
- CMPQ BX, $128
- JAE loop_avx2
- VMOVDQU Y0, -32(DI)(BX*1)
- VMOVDQU Y0, -64(DI)(BX*1)
- VMOVDQU Y0, -96(DI)(BX*1)
- VMOVDQU Y0, -128(DI)(BX*1)
- VZEROUPPER
- RET
-loop_preheader_avx2_huge:
- // Align to 32 byte boundary
- VMOVDQU Y0, 0(DI)
- MOVQ DI, SI
- ADDQ $32, DI
- ANDQ $~31, DI
- SUBQ DI, SI
- ADDQ SI, BX
-loop_avx2_huge:
- VMOVNTDQ Y0, 0(DI)
- VMOVNTDQ Y0, 32(DI)
- VMOVNTDQ Y0, 64(DI)
- VMOVNTDQ Y0, 96(DI)
- SUBQ $128, BX
- ADDQ $128, DI
- CMPQ BX, $128
- JAE loop_avx2_huge
- // In the description of MOVNTDQ in [1]
- // "... fencing operation implemented with the SFENCE or MFENCE instruction
- // should be used in conjunction with MOVNTDQ instructions..."
- // [1] 64-ia-32-architectures-software-developer-manual-325462.pdf
- SFENCE
- VMOVDQU Y0, -32(DI)(BX*1)
- VMOVDQU Y0, -64(DI)(BX*1)
- VMOVDQU Y0, -96(DI)(BX*1)
- VMOVDQU Y0, -128(DI)(BX*1)
- VZEROUPPER
- RET
-
-_1or2:
- MOVB AX, (DI)
- MOVB AX, -1(DI)(BX*1)
- RET
-_0:
- RET
-_3or4:
- MOVW AX, (DI)
- MOVW AX, -2(DI)(BX*1)
- RET
-_5through7:
- MOVL AX, (DI)
- MOVL AX, -4(DI)(BX*1)
- RET
-_8:
- // We need a separate case for 8 to make sure we clear pointers atomically.
- MOVQ AX, (DI)
- RET
-_9through16:
- MOVQ AX, (DI)
- MOVQ AX, -8(DI)(BX*1)
- RET
-_17through32:
- MOVOU X15, (DI)
- MOVOU X15, -16(DI)(BX*1)
- RET
-_33through64:
- MOVOU X15, (DI)
- MOVOU X15, 16(DI)
- MOVOU X15, -32(DI)(BX*1)
- MOVOU X15, -16(DI)(BX*1)
- RET
-_65through128:
- MOVOU X15, (DI)
- MOVOU X15, 16(DI)
- MOVOU X15, 32(DI)
- MOVOU X15, 48(DI)
- MOVOU X15, -64(DI)(BX*1)
- MOVOU X15, -48(DI)(BX*1)
- MOVOU X15, -32(DI)(BX*1)
- MOVOU X15, -16(DI)(BX*1)
- RET
-_129through256:
- MOVOU X15, (DI)
- MOVOU X15, 16(DI)
- MOVOU X15, 32(DI)
- MOVOU X15, 48(DI)
- MOVOU X15, 64(DI)
- MOVOU X15, 80(DI)
- MOVOU X15, 96(DI)
- MOVOU X15, 112(DI)
- MOVOU X15, -128(DI)(BX*1)
- MOVOU X15, -112(DI)(BX*1)
- MOVOU X15, -96(DI)(BX*1)
- MOVOU X15, -80(DI)(BX*1)
- MOVOU X15, -64(DI)(BX*1)
- MOVOU X15, -48(DI)(BX*1)
- MOVOU X15, -32(DI)(BX*1)
- MOVOU X15, -16(DI)(BX*1)
- RET
diff --git a/contrib/go/_std_1.18/src/runtime/memmove_amd64.s b/contrib/go/_std_1.18/src/runtime/memmove_amd64.s
deleted file mode 100644
index eeb5033fd9..0000000000
--- a/contrib/go/_std_1.18/src/runtime/memmove_amd64.s
+++ /dev/null
@@ -1,532 +0,0 @@
-// Derived from Inferno's libkern/memmove-386.s (adapted for amd64)
-// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-386.s
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
-// Portions Copyright 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build !plan9
-
-#include "go_asm.h"
-#include "textflag.h"
-
-// See memmove Go doc for important implementation constraints.
-
-// func memmove(to, from unsafe.Pointer, n uintptr)
-// ABIInternal for performance.
-TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT, $0-24
- // AX = to
- // BX = from
- // CX = n
- MOVQ AX, DI
- MOVQ BX, SI
- MOVQ CX, BX
-
- // REP instructions have a high startup cost, so we handle small sizes
- // with some straightline code. The REP MOVSQ instruction is really fast
- // for large sizes. The cutover is approximately 2K.
-tail:
- // move_129through256 or smaller work whether or not the source and the
- // destination memory regions overlap because they load all data into
- // registers before writing it back. move_256through2048 on the other
- // hand can be used only when the memory regions don't overlap or the copy
- // direction is forward.
- //
- // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
- TESTQ BX, BX
- JEQ move_0
- CMPQ BX, $2
- JBE move_1or2
- CMPQ BX, $4
- JB move_3
- JBE move_4
- CMPQ BX, $8
- JB move_5through7
- JE move_8
- CMPQ BX, $16
- JBE move_9through16
- CMPQ BX, $32
- JBE move_17through32
- CMPQ BX, $64
- JBE move_33through64
- CMPQ BX, $128
- JBE move_65through128
- CMPQ BX, $256
- JBE move_129through256
-
- TESTB $1, runtime·useAVXmemmove(SB)
- JNZ avxUnaligned
-
-/*
- * check and set for backwards
- */
- CMPQ SI, DI
- JLS back
-
-/*
- * forward copy loop
- */
-forward:
- CMPQ BX, $2048
- JLS move_256through2048
-
- // If REP MOVSB isn't fast, don't use it
- CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
- JNE fwdBy8
-
- // Check alignment
- MOVL SI, AX
- ORL DI, AX
- TESTL $7, AX
- JEQ fwdBy8
-
- // Do 1 byte at a time
- MOVQ BX, CX
- REP; MOVSB
- RET
-
-fwdBy8:
- // Do 8 bytes at a time
- MOVQ BX, CX
- SHRQ $3, CX
- ANDQ $7, BX
- REP; MOVSQ
- JMP tail
-
-back:
-/*
- * check overlap
- */
- MOVQ SI, CX
- ADDQ BX, CX
- CMPQ CX, DI
- JLS forward
-/*
- * whole thing backwards has
- * adjusted addresses
- */
- ADDQ BX, DI
- ADDQ BX, SI
- STD
-
-/*
- * copy
- */
- MOVQ BX, CX
- SHRQ $3, CX
- ANDQ $7, BX
-
- SUBQ $8, DI
- SUBQ $8, SI
- REP; MOVSQ
-
- CLD
- ADDQ $8, DI
- ADDQ $8, SI
- SUBQ BX, DI
- SUBQ BX, SI
- JMP tail
-
-move_1or2:
- MOVB (SI), AX
- MOVB -1(SI)(BX*1), CX
- MOVB AX, (DI)
- MOVB CX, -1(DI)(BX*1)
- RET
-move_0:
- RET
-move_4:
- MOVL (SI), AX
- MOVL AX, (DI)
- RET
-move_3:
- MOVW (SI), AX
- MOVB 2(SI), CX
- MOVW AX, (DI)
- MOVB CX, 2(DI)
- RET
-move_5through7:
- MOVL (SI), AX
- MOVL -4(SI)(BX*1), CX
- MOVL AX, (DI)
- MOVL CX, -4(DI)(BX*1)
- RET
-move_8:
- // We need a separate case for 8 to make sure we write pointers atomically.
- MOVQ (SI), AX
- MOVQ AX, (DI)
- RET
-move_9through16:
- MOVQ (SI), AX
- MOVQ -8(SI)(BX*1), CX
- MOVQ AX, (DI)
- MOVQ CX, -8(DI)(BX*1)
- RET
-move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(BX*1), X1
- MOVOU X0, (DI)
- MOVOU X1, -16(DI)(BX*1)
- RET
-move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(BX*1), X2
- MOVOU -16(SI)(BX*1), X3
- MOVOU X0, (DI)
- MOVOU X1, 16(DI)
- MOVOU X2, -32(DI)(BX*1)
- MOVOU X3, -16(DI)(BX*1)
- RET
-move_65through128:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU 32(SI), X2
- MOVOU 48(SI), X3
- MOVOU -64(SI)(BX*1), X4
- MOVOU -48(SI)(BX*1), X5
- MOVOU -32(SI)(BX*1), X6
- MOVOU -16(SI)(BX*1), X7
- MOVOU X0, (DI)
- MOVOU X1, 16(DI)
- MOVOU X2, 32(DI)
- MOVOU X3, 48(DI)
- MOVOU X4, -64(DI)(BX*1)
- MOVOU X5, -48(DI)(BX*1)
- MOVOU X6, -32(DI)(BX*1)
- MOVOU X7, -16(DI)(BX*1)
- RET
-move_129through256:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU 32(SI), X2
- MOVOU 48(SI), X3
- MOVOU 64(SI), X4
- MOVOU 80(SI), X5
- MOVOU 96(SI), X6
- MOVOU 112(SI), X7
- MOVOU -128(SI)(BX*1), X8
- MOVOU -112(SI)(BX*1), X9
- MOVOU -96(SI)(BX*1), X10
- MOVOU -80(SI)(BX*1), X11
- MOVOU -64(SI)(BX*1), X12
- MOVOU -48(SI)(BX*1), X13
- MOVOU -32(SI)(BX*1), X14
- MOVOU -16(SI)(BX*1), X15
- MOVOU X0, (DI)
- MOVOU X1, 16(DI)
- MOVOU X2, 32(DI)
- MOVOU X3, 48(DI)
- MOVOU X4, 64(DI)
- MOVOU X5, 80(DI)
- MOVOU X6, 96(DI)
- MOVOU X7, 112(DI)
- MOVOU X8, -128(DI)(BX*1)
- MOVOU X9, -112(DI)(BX*1)
- MOVOU X10, -96(DI)(BX*1)
- MOVOU X11, -80(DI)(BX*1)
- MOVOU X12, -64(DI)(BX*1)
- MOVOU X13, -48(DI)(BX*1)
- MOVOU X14, -32(DI)(BX*1)
- MOVOU X15, -16(DI)(BX*1)
- // X15 must be zero on return
- PXOR X15, X15
- RET
-move_256through2048:
- SUBQ $256, BX
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU 32(SI), X2
- MOVOU 48(SI), X3
- MOVOU 64(SI), X4
- MOVOU 80(SI), X5
- MOVOU 96(SI), X6
- MOVOU 112(SI), X7
- MOVOU 128(SI), X8
- MOVOU 144(SI), X9
- MOVOU 160(SI), X10
- MOVOU 176(SI), X11
- MOVOU 192(SI), X12
- MOVOU 208(SI), X13
- MOVOU 224(SI), X14
- MOVOU 240(SI), X15
- MOVOU X0, (DI)
- MOVOU X1, 16(DI)
- MOVOU X2, 32(DI)
- MOVOU X3, 48(DI)
- MOVOU X4, 64(DI)
- MOVOU X5, 80(DI)
- MOVOU X6, 96(DI)
- MOVOU X7, 112(DI)
- MOVOU X8, 128(DI)
- MOVOU X9, 144(DI)
- MOVOU X10, 160(DI)
- MOVOU X11, 176(DI)
- MOVOU X12, 192(DI)
- MOVOU X13, 208(DI)
- MOVOU X14, 224(DI)
- MOVOU X15, 240(DI)
- CMPQ BX, $256
- LEAQ 256(SI), SI
- LEAQ 256(DI), DI
- JGE move_256through2048
- // X15 must be zero on return
- PXOR X15, X15
- JMP tail
-
-avxUnaligned:
- // There are two implementations of move algorithm.
- // The first one for non-overlapped memory regions. It uses forward copying.
- // The second one for overlapped regions. It uses backward copying
- MOVQ DI, CX
- SUBQ SI, CX
- // Now CX contains distance between SRC and DEST
- CMPQ CX, BX
- // If the distance lesser than region length it means that regions are overlapped
- JC copy_backward
-
- // Non-temporal copy would be better for big sizes.
- CMPQ BX, $0x100000
- JAE gobble_big_data_fwd
-
- // Memory layout on the source side
- // SI CX
- // |<---------BX before correction--------->|
- // | |<--BX corrected-->| |
- // | | |<--- AX --->|
- // |<-R11->| |<-128 bytes->|
- // +----------------------------------------+
- // | Head | Body | Tail |
- // +-------+------------------+-------------+
- // ^ ^ ^
- // | | |
- // Save head into Y4 Save tail into X5..X12
- // |
- // SI+R11, where R11 = ((DI & -32) + 32) - DI
- // Algorithm:
- // 1. Unaligned save of the tail's 128 bytes
- // 2. Unaligned save of the head's 32 bytes
- // 3. Destination-aligned copying of body (128 bytes per iteration)
- // 4. Put head on the new place
- // 5. Put the tail on the new place
- // It can be important to satisfy processor's pipeline requirements for
- // small sizes as the cost of unaligned memory region copying is
- // comparable with the cost of main loop. So code is slightly messed there.
- // There is more clean implementation of that algorithm for bigger sizes
- // where the cost of unaligned part copying is negligible.
- // You can see it after gobble_big_data_fwd label.
- LEAQ (SI)(BX*1), CX
- MOVQ DI, R10
- // CX points to the end of buffer so we need go back slightly. We will use negative offsets there.
- MOVOU -0x80(CX), X5
- MOVOU -0x70(CX), X6
- MOVQ $0x80, AX
- // Align destination address
- ANDQ $-32, DI
- ADDQ $32, DI
- // Continue tail saving.
- MOVOU -0x60(CX), X7
- MOVOU -0x50(CX), X8
- // Make R11 delta between aligned and unaligned destination addresses.
- MOVQ DI, R11
- SUBQ R10, R11
- // Continue tail saving.
- MOVOU -0x40(CX), X9
- MOVOU -0x30(CX), X10
- // Let's make bytes-to-copy value adjusted as we've prepared unaligned part for copying.
- SUBQ R11, BX
- // Continue tail saving.
- MOVOU -0x20(CX), X11
- MOVOU -0x10(CX), X12
- // The tail will be put on its place after main body copying.
- // It's time for the unaligned heading part.
- VMOVDQU (SI), Y4
- // Adjust source address to point past head.
- ADDQ R11, SI
- SUBQ AX, BX
- // Aligned memory copying there
-gobble_128_loop:
- VMOVDQU (SI), Y0
- VMOVDQU 0x20(SI), Y1
- VMOVDQU 0x40(SI), Y2
- VMOVDQU 0x60(SI), Y3
- ADDQ AX, SI
- VMOVDQA Y0, (DI)
- VMOVDQA Y1, 0x20(DI)
- VMOVDQA Y2, 0x40(DI)
- VMOVDQA Y3, 0x60(DI)
- ADDQ AX, DI
- SUBQ AX, BX
- JA gobble_128_loop
- // Now we can store unaligned parts.
- ADDQ AX, BX
- ADDQ DI, BX
- VMOVDQU Y4, (R10)
- VZEROUPPER
- MOVOU X5, -0x80(BX)
- MOVOU X6, -0x70(BX)
- MOVOU X7, -0x60(BX)
- MOVOU X8, -0x50(BX)
- MOVOU X9, -0x40(BX)
- MOVOU X10, -0x30(BX)
- MOVOU X11, -0x20(BX)
- MOVOU X12, -0x10(BX)
- RET
-
-gobble_big_data_fwd:
- // There is forward copying for big regions.
- // It uses non-temporal mov instructions.
- // Details of this algorithm are commented previously for small sizes.
- LEAQ (SI)(BX*1), CX
- MOVOU -0x80(SI)(BX*1), X5
- MOVOU -0x70(CX), X6
- MOVOU -0x60(CX), X7
- MOVOU -0x50(CX), X8
- MOVOU -0x40(CX), X9
- MOVOU -0x30(CX), X10
- MOVOU -0x20(CX), X11
- MOVOU -0x10(CX), X12
- VMOVDQU (SI), Y4
- MOVQ DI, R8
- ANDQ $-32, DI
- ADDQ $32, DI
- MOVQ DI, R10
- SUBQ R8, R10
- SUBQ R10, BX
- ADDQ R10, SI
- LEAQ (DI)(BX*1), CX
- SUBQ $0x80, BX
-gobble_mem_fwd_loop:
- PREFETCHNTA 0x1C0(SI)
- PREFETCHNTA 0x280(SI)
- // Prefetch values were chosen empirically.
- // Approach for prefetch usage as in 7.6.6 of [1]
- // [1] 64-ia-32-architectures-optimization-manual.pdf
- // https://www.intel.ru/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
- VMOVDQU (SI), Y0
- VMOVDQU 0x20(SI), Y1
- VMOVDQU 0x40(SI), Y2
- VMOVDQU 0x60(SI), Y3
- ADDQ $0x80, SI
- VMOVNTDQ Y0, (DI)
- VMOVNTDQ Y1, 0x20(DI)
- VMOVNTDQ Y2, 0x40(DI)
- VMOVNTDQ Y3, 0x60(DI)
- ADDQ $0x80, DI
- SUBQ $0x80, BX
- JA gobble_mem_fwd_loop
- // NT instructions don't follow the normal cache-coherency rules.
- // We need SFENCE there to make copied data available timely.
- SFENCE
- VMOVDQU Y4, (R8)
- VZEROUPPER
- MOVOU X5, -0x80(CX)
- MOVOU X6, -0x70(CX)
- MOVOU X7, -0x60(CX)
- MOVOU X8, -0x50(CX)
- MOVOU X9, -0x40(CX)
- MOVOU X10, -0x30(CX)
- MOVOU X11, -0x20(CX)
- MOVOU X12, -0x10(CX)
- RET
-
-copy_backward:
- MOVQ DI, AX
- // Backward copying is about the same as the forward one.
- // Firstly we load unaligned tail in the beginning of region.
- MOVOU (SI), X5
- MOVOU 0x10(SI), X6
- ADDQ BX, DI
- MOVOU 0x20(SI), X7
- MOVOU 0x30(SI), X8
- LEAQ -0x20(DI), R10
- MOVQ DI, R11
- MOVOU 0x40(SI), X9
- MOVOU 0x50(SI), X10
- ANDQ $0x1F, R11
- MOVOU 0x60(SI), X11
- MOVOU 0x70(SI), X12
- XORQ R11, DI
- // Let's point SI to the end of region
- ADDQ BX, SI
- // and load unaligned head into X4.
- VMOVDQU -0x20(SI), Y4
- SUBQ R11, SI
- SUBQ R11, BX
- // If there is enough data for non-temporal moves go to special loop
- CMPQ BX, $0x100000
- JA gobble_big_data_bwd
- SUBQ $0x80, BX
-gobble_mem_bwd_loop:
- VMOVDQU -0x20(SI), Y0
- VMOVDQU -0x40(SI), Y1
- VMOVDQU -0x60(SI), Y2
- VMOVDQU -0x80(SI), Y3
- SUBQ $0x80, SI
- VMOVDQA Y0, -0x20(DI)
- VMOVDQA Y1, -0x40(DI)
- VMOVDQA Y2, -0x60(DI)
- VMOVDQA Y3, -0x80(DI)
- SUBQ $0x80, DI
- SUBQ $0x80, BX
- JA gobble_mem_bwd_loop
- // Let's store unaligned data
- VMOVDQU Y4, (R10)
- VZEROUPPER
- MOVOU X5, (AX)
- MOVOU X6, 0x10(AX)
- MOVOU X7, 0x20(AX)
- MOVOU X8, 0x30(AX)
- MOVOU X9, 0x40(AX)
- MOVOU X10, 0x50(AX)
- MOVOU X11, 0x60(AX)
- MOVOU X12, 0x70(AX)
- RET
-
-gobble_big_data_bwd:
- SUBQ $0x80, BX
-gobble_big_mem_bwd_loop:
- PREFETCHNTA -0x1C0(SI)
- PREFETCHNTA -0x280(SI)
- VMOVDQU -0x20(SI), Y0
- VMOVDQU -0x40(SI), Y1
- VMOVDQU -0x60(SI), Y2
- VMOVDQU -0x80(SI), Y3
- SUBQ $0x80, SI
- VMOVNTDQ Y0, -0x20(DI)
- VMOVNTDQ Y1, -0x40(DI)
- VMOVNTDQ Y2, -0x60(DI)
- VMOVNTDQ Y3, -0x80(DI)
- SUBQ $0x80, DI
- SUBQ $0x80, BX
- JA gobble_big_mem_bwd_loop
- SFENCE
- VMOVDQU Y4, (R10)
- VZEROUPPER
- MOVOU X5, (AX)
- MOVOU X6, 0x10(AX)
- MOVOU X7, 0x20(AX)
- MOVOU X8, 0x30(AX)
- MOVOU X9, 0x40(AX)
- MOVOU X10, 0x50(AX)
- MOVOU X11, 0x60(AX)
- MOVOU X12, 0x70(AX)
- RET
diff --git a/contrib/go/_std_1.18/src/runtime/metrics.go b/contrib/go/_std_1.18/src/runtime/metrics.go
deleted file mode 100644
index 922dd2f814..0000000000
--- a/contrib/go/_std_1.18/src/runtime/metrics.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// Metrics implementation exported to runtime/metrics.
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-var (
- // metrics is a map of runtime/metrics keys to data used by the runtime
- // to sample each metric's value. metricsInit indicates it has been
- // initialized.
- //
- // These fields are protected by metricsSema which should be
- // locked/unlocked with metricsLock() / metricsUnlock().
- metricsSema uint32 = 1
- metricsInit bool
- metrics map[string]metricData
-
- sizeClassBuckets []float64
- timeHistBuckets []float64
-)
-
-type metricData struct {
- // deps is the set of runtime statistics that this metric
- // depends on. Before compute is called, the statAggregate
- // which will be passed must ensure() these dependencies.
- deps statDepSet
-
- // compute is a function that populates a metricValue
- // given a populated statAggregate structure.
- compute func(in *statAggregate, out *metricValue)
-}
-
-func metricsLock() {
- // Acquire the metricsSema but with handoff. Operations are typically
- // expensive enough that queueing up goroutines and handing off between
- // them will be noticeably better-behaved.
- semacquire1(&metricsSema, true, 0, 0)
- if raceenabled {
- raceacquire(unsafe.Pointer(&metricsSema))
- }
-}
-
-func metricsUnlock() {
- if raceenabled {
- racerelease(unsafe.Pointer(&metricsSema))
- }
- semrelease(&metricsSema)
-}
-
-// initMetrics initializes the metrics map if it hasn't been yet.
-//
-// metricsSema must be held.
-func initMetrics() {
- if metricsInit {
- return
- }
-
- sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
- // Skip size class 0 which is a stand-in for large objects, but large
- // objects are tracked separately (and they actually get placed in
- // the last bucket, not the first).
- sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
- for i := 1; i < _NumSizeClasses; i++ {
- // Size classes have an inclusive upper-bound
- // and exclusive lower bound (e.g. 48-byte size class is
- // (32, 48]) whereas we want and inclusive lower-bound
- // and exclusive upper-bound (e.g. 48-byte size class is
- // [33, 49). We can achieve this by shifting all bucket
- // boundaries up by 1.
- //
- // Also, a float64 can precisely represent integers with
- // value up to 2^53 and size classes are relatively small
- // (nowhere near 2^48 even) so this will give us exact
- // boundaries.
- sizeClassBuckets[i] = float64(class_to_size[i] + 1)
- }
- sizeClassBuckets = append(sizeClassBuckets, float64Inf())
-
- timeHistBuckets = timeHistogramMetricsBuckets()
- metrics = map[string]metricData{
- "/gc/cycles/automatic:gc-cycles": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
- },
- },
- "/gc/cycles/forced:gc-cycles": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.gcCyclesForced
- },
- },
- "/gc/cycles/total:gc-cycles": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.gcCyclesDone
- },
- },
- "/gc/heap/allocs-by-size:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- hist := out.float64HistOrInit(sizeClassBuckets)
- hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
- // Cut off the first index which is ostensibly for size class 0,
- // but large objects are tracked separately so it's actually unused.
- for i, count := range in.heapStats.smallAllocCount[1:] {
- hist.counts[i] = uint64(count)
- }
- },
- },
- "/gc/heap/allocs:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.heapStats.totalAllocated
- },
- },
- "/gc/heap/allocs:objects": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.heapStats.totalAllocs
- },
- },
- "/gc/heap/frees-by-size:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- hist := out.float64HistOrInit(sizeClassBuckets)
- hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
- // Cut off the first index which is ostensibly for size class 0,
- // but large objects are tracked separately so it's actually unused.
- for i, count := range in.heapStats.smallFreeCount[1:] {
- hist.counts[i] = uint64(count)
- }
- },
- },
- "/gc/heap/frees:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.heapStats.totalFreed
- },
- },
- "/gc/heap/frees:objects": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.heapStats.totalFrees
- },
- },
- "/gc/heap/goal:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.heapGoal
- },
- },
- "/gc/heap/objects:objects": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.heapStats.numObjects
- },
- },
- "/gc/heap/tiny/allocs:objects": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.tinyAllocCount)
- },
- },
- "/gc/pauses:seconds": {
- compute: func(_ *statAggregate, out *metricValue) {
- hist := out.float64HistOrInit(timeHistBuckets)
- // The bottom-most bucket, containing negative values, is tracked
- // as a separately as underflow, so fill that in manually and then
- // iterate over the rest.
- hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
- for i := range memstats.gcPauseDist.counts {
- hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
- }
- },
- },
- "/memory/classes/heap/free:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
- in.heapStats.inStacks - in.heapStats.inWorkBufs -
- in.heapStats.inPtrScalarBits)
- },
- },
- "/memory/classes/heap/objects:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.heapStats.inObjects
- },
- },
- "/memory/classes/heap/released:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.released)
- },
- },
- "/memory/classes/heap/stacks:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.inStacks)
- },
- },
- "/memory/classes/heap/unused:bytes": {
- deps: makeStatDepSet(heapStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
- },
- },
- "/memory/classes/metadata/mcache/free:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
- },
- },
- "/memory/classes/metadata/mcache/inuse:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.mCacheInUse
- },
- },
- "/memory/classes/metadata/mspan/free:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
- },
- },
- "/memory/classes/metadata/mspan/inuse:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.mSpanInUse
- },
- },
- "/memory/classes/metadata/other:bytes": {
- deps: makeStatDepSet(heapStatsDep, sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
- },
- },
- "/memory/classes/os-stacks:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.stacksSys
- },
- },
- "/memory/classes/other:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.otherSys
- },
- },
- "/memory/classes/profiling/buckets:bytes": {
- deps: makeStatDepSet(sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = in.sysStats.buckHashSys
- },
- },
- "/memory/classes/total:bytes": {
- deps: makeStatDepSet(heapStatsDep, sysStatsDep),
- compute: func(in *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
- in.sysStats.stacksSys + in.sysStats.mSpanSys +
- in.sysStats.mCacheSys + in.sysStats.buckHashSys +
- in.sysStats.gcMiscSys + in.sysStats.otherSys
- },
- },
- "/sched/goroutines:goroutines": {
- compute: func(_ *statAggregate, out *metricValue) {
- out.kind = metricKindUint64
- out.scalar = uint64(gcount())
- },
- },
- "/sched/latencies:seconds": {
- compute: func(_ *statAggregate, out *metricValue) {
- hist := out.float64HistOrInit(timeHistBuckets)
- hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
- for i := range sched.timeToRun.counts {
- hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
- }
- },
- },
- }
- metricsInit = true
-}
-
-// statDep is a dependency on a group of statistics
-// that a metric might have.
-type statDep uint
-
-const (
- heapStatsDep statDep = iota // corresponds to heapStatsAggregate
- sysStatsDep // corresponds to sysStatsAggregate
- numStatsDeps
-)
-
-// statDepSet represents a set of statDeps.
-//
-// Under the hood, it's a bitmap.
-type statDepSet [1]uint64
-
-// makeStatDepSet creates a new statDepSet from a list of statDeps.
-func makeStatDepSet(deps ...statDep) statDepSet {
- var s statDepSet
- for _, d := range deps {
- s[d/64] |= 1 << (d % 64)
- }
- return s
-}
-
-// differennce returns set difference of s from b as a new set.
-func (s statDepSet) difference(b statDepSet) statDepSet {
- var c statDepSet
- for i := range s {
- c[i] = s[i] &^ b[i]
- }
- return c
-}
-
-// union returns the union of the two sets as a new set.
-func (s statDepSet) union(b statDepSet) statDepSet {
- var c statDepSet
- for i := range s {
- c[i] = s[i] | b[i]
- }
- return c
-}
-
-// empty returns true if there are no dependencies in the set.
-func (s *statDepSet) empty() bool {
- for _, c := range s {
- if c != 0 {
- return false
- }
- }
- return true
-}
-
-// has returns true if the set contains a given statDep.
-func (s *statDepSet) has(d statDep) bool {
- return s[d/64]&(1<<(d%64)) != 0
-}
-
-// heapStatsAggregate represents memory stats obtained from the
-// runtime. This set of stats is grouped together because they
-// depend on each other in some way to make sense of the runtime's
-// current heap memory use. They're also sharded across Ps, so it
-// makes sense to grab them all at once.
-type heapStatsAggregate struct {
- heapStatsDelta
-
- // Derived from values in heapStatsDelta.
-
- // inObjects is the bytes of memory occupied by objects,
- inObjects uint64
-
- // numObjects is the number of live objects in the heap.
- numObjects uint64
-
- // totalAllocated is the total bytes of heap objects allocated
- // over the lifetime of the program.
- totalAllocated uint64
-
- // totalFreed is the total bytes of heap objects freed
- // over the lifetime of the program.
- totalFreed uint64
-
- // totalAllocs is the number of heap objects allocated over
- // the lifetime of the program.
- totalAllocs uint64
-
- // totalFrees is the number of heap objects freed over
- // the lifetime of the program.
- totalFrees uint64
-}
-
-// compute populates the heapStatsAggregate with values from the runtime.
-func (a *heapStatsAggregate) compute() {
- memstats.heapStats.read(&a.heapStatsDelta)
-
- // Calculate derived stats.
- a.totalAllocs = a.largeAllocCount
- a.totalFrees = a.largeFreeCount
- a.totalAllocated = a.largeAlloc
- a.totalFreed = a.largeFree
- for i := range a.smallAllocCount {
- na := a.smallAllocCount[i]
- nf := a.smallFreeCount[i]
- a.totalAllocs += na
- a.totalFrees += nf
- a.totalAllocated += na * uint64(class_to_size[i])
- a.totalFreed += nf * uint64(class_to_size[i])
- }
- a.inObjects = a.totalAllocated - a.totalFreed
- a.numObjects = a.totalAllocs - a.totalFrees
-}
-
-// sysStatsAggregate represents system memory stats obtained
-// from the runtime. This set of stats is grouped together because
-// they're all relatively cheap to acquire and generally independent
-// of one another and other runtime memory stats. The fact that they
-// may be acquired at different times, especially with respect to
-// heapStatsAggregate, means there could be some skew, but because of
-// these stats are independent, there's no real consistency issue here.
-type sysStatsAggregate struct {
- stacksSys uint64
- mSpanSys uint64
- mSpanInUse uint64
- mCacheSys uint64
- mCacheInUse uint64
- buckHashSys uint64
- gcMiscSys uint64
- otherSys uint64
- heapGoal uint64
- gcCyclesDone uint64
- gcCyclesForced uint64
-}
-
-// compute populates the sysStatsAggregate with values from the runtime.
-func (a *sysStatsAggregate) compute() {
- a.stacksSys = memstats.stacks_sys.load()
- a.buckHashSys = memstats.buckhash_sys.load()
- a.gcMiscSys = memstats.gcMiscSys.load()
- a.otherSys = memstats.other_sys.load()
- a.heapGoal = atomic.Load64(&gcController.heapGoal)
- a.gcCyclesDone = uint64(memstats.numgc)
- a.gcCyclesForced = uint64(memstats.numforcedgc)
-
- systemstack(func() {
- lock(&mheap_.lock)
- a.mSpanSys = memstats.mspan_sys.load()
- a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
- a.mCacheSys = memstats.mcache_sys.load()
- a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
- unlock(&mheap_.lock)
- })
-}
-
-// statAggregate is the main driver of the metrics implementation.
-//
-// It contains multiple aggregates of runtime statistics, as well
-// as a set of these aggregates that it has populated. The aggergates
-// are populated lazily by its ensure method.
-type statAggregate struct {
- ensured statDepSet
- heapStats heapStatsAggregate
- sysStats sysStatsAggregate
-}
-
-// ensure populates statistics aggregates determined by deps if they
-// haven't yet been populated.
-func (a *statAggregate) ensure(deps *statDepSet) {
- missing := deps.difference(a.ensured)
- if missing.empty() {
- return
- }
- for i := statDep(0); i < numStatsDeps; i++ {
- if !missing.has(i) {
- continue
- }
- switch i {
- case heapStatsDep:
- a.heapStats.compute()
- case sysStatsDep:
- a.sysStats.compute()
- }
- }
- a.ensured = a.ensured.union(missing)
-}
-
-// metricValidKind is a runtime copy of runtime/metrics.ValueKind and
-// must be kept structurally identical to that type.
-type metricKind int
-
-const (
- // These values must be kept identical to their corresponding Kind* values
- // in the runtime/metrics package.
- metricKindBad metricKind = iota
- metricKindUint64
- metricKindFloat64
- metricKindFloat64Histogram
-)
-
-// metricSample is a runtime copy of runtime/metrics.Sample and
-// must be kept structurally identical to that type.
-type metricSample struct {
- name string
- value metricValue
-}
-
-// metricValue is a runtime copy of runtime/metrics.Sample and
-// must be kept structurally identical to that type.
-type metricValue struct {
- kind metricKind
- scalar uint64 // contains scalar values for scalar Kinds.
- pointer unsafe.Pointer // contains non-scalar values.
-}
-
-// float64HistOrInit tries to pull out an existing float64Histogram
-// from the value, but if none exists, then it allocates one with
-// the given buckets.
-func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
- var hist *metricFloat64Histogram
- if v.kind == metricKindFloat64Histogram && v.pointer != nil {
- hist = (*metricFloat64Histogram)(v.pointer)
- } else {
- v.kind = metricKindFloat64Histogram
- hist = new(metricFloat64Histogram)
- v.pointer = unsafe.Pointer(hist)
- }
- hist.buckets = buckets
- if len(hist.counts) != len(hist.buckets)-1 {
- hist.counts = make([]uint64, len(buckets)-1)
- }
- return hist
-}
-
-// metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
-// and must be kept structurally identical to that type.
-type metricFloat64Histogram struct {
- counts []uint64
- buckets []float64
-}
-
-// agg is used by readMetrics, and is protected by metricsSema.
-//
-// Managed as a global variable because its pointer will be
-// an argument to a dynamically-defined function, and we'd
-// like to avoid it escaping to the heap.
-var agg statAggregate
-
-// readMetrics is the implementation of runtime/metrics.Read.
-//
-//go:linkname readMetrics runtime/metrics.runtime_readMetrics
-func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
- // Construct a slice from the args.
- sl := slice{samplesp, len, cap}
- samples := *(*[]metricSample)(unsafe.Pointer(&sl))
-
- metricsLock()
-
- // Ensure the map is initialized.
- initMetrics()
-
- // Clear agg defensively.
- agg = statAggregate{}
-
- // Sample.
- for i := range samples {
- sample := &samples[i]
- data, ok := metrics[sample.name]
- if !ok {
- sample.value.kind = metricKindBad
- continue
- }
- // Ensure we have all the stats we need.
- // agg is populated lazily.
- agg.ensure(&data.deps)
-
- // Compute the value based on the stats we have.
- data.compute(&agg, &sample.value)
- }
-
- metricsUnlock()
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mfinal.go b/contrib/go/_std_1.18/src/runtime/mfinal.go
deleted file mode 100644
index 10623e4d67..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mfinal.go
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector: finalizers and block profiling.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// finblock is an array of finalizers to be executed. finblocks are
-// arranged in a linked list for the finalizer queue.
-//
-// finblock is allocated from non-GC'd memory, so any heap pointers
-// must be specially handled. GC currently assumes that the finalizer
-// queue does not grow during marking (but it can shrink).
-//
-//go:notinheap
-type finblock struct {
- alllink *finblock
- next *finblock
- cnt uint32
- _ int32
- fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
-}
-
-var finlock mutex // protects the following variables
-var fing *g // goroutine that runs finalizers
-var finq *finblock // list of finalizers that are to be executed
-var finc *finblock // cache of free blocks
-var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
-var fingwait bool
-var fingwake bool
-var allfin *finblock // list of all blocks
-
-// NOTE: Layout known to queuefinalizer.
-type finalizer struct {
- fn *funcval // function to call (may be a heap pointer)
- arg unsafe.Pointer // ptr to object (may be a heap pointer)
- nret uintptr // bytes of return values from fn
- fint *_type // type of first argument of fn
- ot *ptrtype // type of ptr to object (may be a heap pointer)
-}
-
-var finalizer1 = [...]byte{
- // Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
- // Each byte describes 8 words.
- // Need 8 Finalizers described by 5 bytes before pattern repeats:
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // ptr ptr INT ptr ptr
- // aka
- //
- // ptr ptr INT ptr ptr ptr ptr INT
- // ptr ptr ptr ptr INT ptr ptr ptr
- // ptr INT ptr ptr ptr ptr INT ptr
- // ptr ptr ptr INT ptr ptr ptr ptr
- // INT ptr ptr ptr ptr INT ptr ptr
- //
- // Assumptions about Finalizer layout checked below.
- 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
- 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
- 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
- 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
- 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
-}
-
-func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
- if gcphase != _GCoff {
- // Currently we assume that the finalizer queue won't
- // grow during marking so we don't have to rescan it
- // during mark termination. If we ever need to lift
- // this assumption, we can do it by adding the
- // necessary barriers to queuefinalizer (which it may
- // have automatically).
- throw("queuefinalizer during GC")
- }
-
- lock(&finlock)
- if finq == nil || finq.cnt == uint32(len(finq.fin)) {
- if finc == nil {
- finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
- finc.alllink = allfin
- allfin = finc
- if finptrmask[0] == 0 {
- // Build pointer mask for Finalizer array in block.
- // Check assumptions made in finalizer1 array above.
- if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
- unsafe.Offsetof(finalizer{}.fn) != 0 ||
- unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
- unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
- unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
- unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
- throw("finalizer out of sync")
- }
- for i := range finptrmask {
- finptrmask[i] = finalizer1[i%len(finalizer1)]
- }
- }
- }
- block := finc
- finc = block.next
- block.next = finq
- finq = block
- }
- f := &finq.fin[finq.cnt]
- atomic.Xadd(&finq.cnt, +1) // Sync with markroots
- f.fn = fn
- f.nret = nret
- f.fint = fint
- f.ot = ot
- f.arg = p
- fingwake = true
- unlock(&finlock)
-}
-
-//go:nowritebarrier
-func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
- for fb := allfin; fb != nil; fb = fb.alllink {
- for i := uint32(0); i < fb.cnt; i++ {
- f := &fb.fin[i]
- callback(f.fn, f.arg, f.nret, f.fint, f.ot)
- }
- }
-}
-
-func wakefing() *g {
- var res *g
- lock(&finlock)
- if fingwait && fingwake {
- fingwait = false
- fingwake = false
- res = fing
- }
- unlock(&finlock)
- return res
-}
-
-var (
- fingCreate uint32
- fingRunning bool
-)
-
-func createfing() {
- // start the finalizer goroutine exactly once
- if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
- go runfinq()
- }
-}
-
-// This is the goroutine that runs all of the finalizers
-func runfinq() {
- var (
- frame unsafe.Pointer
- framecap uintptr
- argRegs int
- )
-
- for {
- lock(&finlock)
- fb := finq
- finq = nil
- if fb == nil {
- gp := getg()
- fing = gp
- fingwait = true
- goparkunlock(&finlock, waitReasonFinalizerWait, traceEvGoBlock, 1)
- continue
- }
- argRegs = intArgRegs
- unlock(&finlock)
- if raceenabled {
- racefingo()
- }
- for fb != nil {
- for i := fb.cnt; i > 0; i-- {
- f := &fb.fin[i-1]
-
- var regs abi.RegArgs
- // The args may be passed in registers or on stack. Even for
- // the register case, we still need the spill slots.
- // TODO: revisit if we remove spill slots.
- //
- // Unfortunately because we can have an arbitrary
- // amount of returns and it would be complex to try and
- // figure out how many of those can get passed in registers,
- // just conservatively assume none of them do.
- framesz := unsafe.Sizeof((any)(nil)) + f.nret
- if framecap < framesz {
- // The frame does not contain pointers interesting for GC,
- // all not yet finalized objects are stored in finq.
- // If we do not mark it as FlagNoScan,
- // the last finalized object is not collected.
- frame = mallocgc(framesz, nil, true)
- framecap = framesz
- }
-
- if f.fint == nil {
- throw("missing type in runfinq")
- }
- r := frame
- if argRegs > 0 {
- r = unsafe.Pointer(&regs.Ints)
- } else {
- // frame is effectively uninitialized
- // memory. That means we have to clear
- // it before writing to it to avoid
- // confusing the write barrier.
- *(*[2]uintptr)(frame) = [2]uintptr{}
- }
- switch f.fint.kind & kindMask {
- case kindPtr:
- // direct use of pointer
- *(*unsafe.Pointer)(r) = f.arg
- case kindInterface:
- ityp := (*interfacetype)(unsafe.Pointer(f.fint))
- // set up with empty interface
- (*eface)(r)._type = &f.ot.typ
- (*eface)(r).data = f.arg
- if len(ityp.mhdr) != 0 {
- // convert to interface with methods
- // this conversion is guaranteed to succeed - we checked in SetFinalizer
- (*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type)
- }
- default:
- throw("bad kind in runfinq")
- }
- fingRunning = true
- reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
- fingRunning = false
-
- // Drop finalizer queue heap references
- // before hiding them from markroot.
- // This also ensures these will be
- // clear if we reuse the finalizer.
- f.fn = nil
- f.arg = nil
- f.ot = nil
- atomic.Store(&fb.cnt, i-1)
- }
- next := fb.next
- lock(&finlock)
- fb.next = finc
- finc = fb
- unlock(&finlock)
- fb = next
- }
- }
-}
-
-// SetFinalizer sets the finalizer associated with obj to the provided
-// finalizer function. When the garbage collector finds an unreachable block
-// with an associated finalizer, it clears the association and runs
-// finalizer(obj) in a separate goroutine. This makes obj reachable again,
-// but now without an associated finalizer. Assuming that SetFinalizer
-// is not called again, the next time the garbage collector sees
-// that obj is unreachable, it will free obj.
-//
-// SetFinalizer(obj, nil) clears any finalizer associated with obj.
-//
-// The argument obj must be a pointer to an object allocated by calling
-// new, by taking the address of a composite literal, or by taking the
-// address of a local variable.
-// The argument finalizer must be a function that takes a single argument
-// to which obj's type can be assigned, and can have arbitrary ignored return
-// values. If either of these is not true, SetFinalizer may abort the
-// program.
-//
-// Finalizers are run in dependency order: if A points at B, both have
-// finalizers, and they are otherwise unreachable, only the finalizer
-// for A runs; once A is freed, the finalizer for B can run.
-// If a cyclic structure includes a block with a finalizer, that
-// cycle is not guaranteed to be garbage collected and the finalizer
-// is not guaranteed to run, because there is no ordering that
-// respects the dependencies.
-//
-// The finalizer is scheduled to run at some arbitrary time after the
-// program can no longer reach the object to which obj points.
-// There is no guarantee that finalizers will run before a program exits,
-// so typically they are useful only for releasing non-memory resources
-// associated with an object during a long-running program.
-// For example, an os.File object could use a finalizer to close the
-// associated operating system file descriptor when a program discards
-// an os.File without calling Close, but it would be a mistake
-// to depend on a finalizer to flush an in-memory I/O buffer such as a
-// bufio.Writer, because the buffer would not be flushed at program exit.
-//
-// It is not guaranteed that a finalizer will run if the size of *obj is
-// zero bytes.
-//
-// It is not guaranteed that a finalizer will run for objects allocated
-// in initializers for package-level variables. Such objects may be
-// linker-allocated, not heap-allocated.
-//
-// A finalizer may run as soon as an object becomes unreachable.
-// In order to use finalizers correctly, the program must ensure that
-// the object is reachable until it is no longer required.
-// Objects stored in global variables, or that can be found by tracing
-// pointers from a global variable, are reachable. For other objects,
-// pass the object to a call of the KeepAlive function to mark the
-// last point in the function where the object must be reachable.
-//
-// For example, if p points to a struct, such as os.File, that contains
-// a file descriptor d, and p has a finalizer that closes that file
-// descriptor, and if the last use of p in a function is a call to
-// syscall.Write(p.d, buf, size), then p may be unreachable as soon as
-// the program enters syscall.Write. The finalizer may run at that moment,
-// closing p.d, causing syscall.Write to fail because it is writing to
-// a closed file descriptor (or, worse, to an entirely different
-// file descriptor opened by a different goroutine). To avoid this problem,
-// call runtime.KeepAlive(p) after the call to syscall.Write.
-//
-// A single goroutine runs all finalizers for a program, sequentially.
-// If a finalizer must run for a long time, it should do so by starting
-// a new goroutine.
-func SetFinalizer(obj any, finalizer any) {
- if debug.sbrk != 0 {
- // debug.sbrk never frees memory, so no finalizers run
- // (and we don't have the data structures to record them).
- return
- }
- e := efaceOf(&obj)
- etyp := e._type
- if etyp == nil {
- throw("runtime.SetFinalizer: first argument is nil")
- }
- if etyp.kind&kindMask != kindPtr {
- throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
- }
- ot := (*ptrtype)(unsafe.Pointer(etyp))
- if ot.elem == nil {
- throw("nil elem type!")
- }
-
- // find the containing object
- base, _, _ := findObject(uintptr(e.data), 0, 0)
-
- if base == 0 {
- // 0-length objects are okay.
- if e.data == unsafe.Pointer(&zerobase) {
- return
- }
-
- // Global initializers might be linker-allocated.
- // var Foo = &Object{}
- // func main() {
- // runtime.SetFinalizer(Foo, nil)
- // }
- // The relevant segments are: noptrdata, data, bss, noptrbss.
- // We cannot assume they are in any order or even contiguous,
- // due to external linking.
- for datap := &firstmoduledata; datap != nil; datap = datap.next {
- if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
- datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
- datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
- datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
- return
- }
- }
- throw("runtime.SetFinalizer: pointer not in allocated block")
- }
-
- if uintptr(e.data) != base {
- // As an implementation detail we allow to set finalizers for an inner byte
- // of an object if it could come from tiny alloc (see mallocgc for details).
- if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize {
- throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
- }
- }
-
- f := efaceOf(&finalizer)
- ftyp := f._type
- if ftyp == nil {
- // switch to system stack and remove finalizer
- systemstack(func() {
- removefinalizer(e.data)
- })
- return
- }
-
- if ftyp.kind&kindMask != kindFunc {
- throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
- }
- ft := (*functype)(unsafe.Pointer(ftyp))
- if ft.dotdotdot() {
- throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
- }
- if ft.inCount != 1 {
- throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
- }
- fint := ft.in()[0]
- switch {
- case fint == etyp:
- // ok - same type
- goto okarg
- case fint.kind&kindMask == kindPtr:
- if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
- // ok - not same type, but both pointers,
- // one or the other is unnamed, and same element type, so assignable.
- goto okarg
- }
- case fint.kind&kindMask == kindInterface:
- ityp := (*interfacetype)(unsafe.Pointer(fint))
- if len(ityp.mhdr) == 0 {
- // ok - satisfies empty interface
- goto okarg
- }
- if iface := assertE2I2(ityp, *efaceOf(&obj)); iface.tab != nil {
- goto okarg
- }
- }
- throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
-okarg:
- // compute size needed for return parameters
- nret := uintptr(0)
- for _, t := range ft.out() {
- nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
- }
- nret = alignUp(nret, goarch.PtrSize)
-
- // make sure we have a finalizer goroutine
- createfing()
-
- systemstack(func() {
- if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
- throw("runtime.SetFinalizer: finalizer already set")
- }
- })
-}
-
-// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
-//go:noinline
-
-// KeepAlive marks its argument as currently reachable.
-// This ensures that the object is not freed, and its finalizer is not run,
-// before the point in the program where KeepAlive is called.
-//
-// A very simplified example showing where KeepAlive is required:
-// type File struct { d int }
-// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
-// // ... do something if err != nil ...
-// p := &File{d}
-// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
-// var buf [10]byte
-// n, err := syscall.Read(p.d, buf[:])
-// // Ensure p is not finalized until Read returns.
-// runtime.KeepAlive(p)
-// // No more uses of p after this point.
-//
-// Without the KeepAlive call, the finalizer could run at the start of
-// syscall.Read, closing the file descriptor before syscall.Read makes
-// the actual system call.
-//
-// Note: KeepAlive should only be used to prevent finalizers from
-// running prematurely. In particular, when used with unsafe.Pointer,
-// the rules for valid uses of unsafe.Pointer still apply.
-func KeepAlive(x any) {
- // Introduce a use of x that the compiler can't eliminate.
- // This makes sure x is alive on entry. We need x to be alive
- // on entry for "defer runtime.KeepAlive(x)"; see issue 21402.
- if cgoAlwaysFalse {
- println(x)
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgc.go b/contrib/go/_std_1.18/src/runtime/mgc.go
deleted file mode 100644
index 44b96154e7..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgc.go
+++ /dev/null
@@ -1,1714 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector (GC).
-//
-// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
-// GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
-// non-generational and non-compacting. Allocation is done using size segregated per P allocation
-// areas to minimize fragmentation while eliminating locks in the common case.
-//
-// The algorithm decomposes into several steps.
-// This is a high level description of the algorithm being used. For an overview of GC a good
-// place to start is Richard Jones' gchandbook.org.
-//
-// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
-// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
-// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
-// 966-975.
-// For journal quality proofs that these steps are complete, correct, and terminate see
-// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
-// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
-//
-// 1. GC performs sweep termination.
-//
-// a. Stop the world. This causes all Ps to reach a GC safe-point.
-//
-// b. Sweep any unswept spans. There will only be unswept spans if
-// this GC cycle was forced before the expected time.
-//
-// 2. GC performs the mark phase.
-//
-// a. Prepare for the mark phase by setting gcphase to _GCmark
-// (from _GCoff), enabling the write barrier, enabling mutator
-// assists, and enqueueing root mark jobs. No objects may be
-// scanned until all Ps have enabled the write barrier, which is
-// accomplished using STW.
-//
-// b. Start the world. From this point, GC work is done by mark
-// workers started by the scheduler and by assists performed as
-// part of allocation. The write barrier shades both the
-// overwritten pointer and the new pointer value for any pointer
-// writes (see mbarrier.go for details). Newly allocated objects
-// are immediately marked black.
-//
-// c. GC performs root marking jobs. This includes scanning all
-// stacks, shading all globals, and shading any heap pointers in
-// off-heap runtime data structures. Scanning a stack stops a
-// goroutine, shades any pointers found on its stack, and then
-// resumes the goroutine.
-//
-// d. GC drains the work queue of grey objects, scanning each grey
-// object to black and shading all pointers found in the object
-// (which in turn may add those pointers to the work queue).
-//
-// e. Because GC work is spread across local caches, GC uses a
-// distributed termination algorithm to detect when there are no
-// more root marking jobs or grey objects (see gcMarkDone). At this
-// point, GC transitions to mark termination.
-//
-// 3. GC performs mark termination.
-//
-// a. Stop the world.
-//
-// b. Set gcphase to _GCmarktermination, and disable workers and
-// assists.
-//
-// c. Perform housekeeping like flushing mcaches.
-//
-// 4. GC performs the sweep phase.
-//
-// a. Prepare for the sweep phase by setting gcphase to _GCoff,
-// setting up sweep state and disabling the write barrier.
-//
-// b. Start the world. From this point on, newly allocated objects
-// are white, and allocating sweeps spans before use if necessary.
-//
-// c. GC does concurrent sweeping in the background and in response
-// to allocation. See description below.
-//
-// 5. When sufficient allocation has taken place, replay the sequence
-// starting with 1 above. See discussion of GC rate below.
-
-// Concurrent sweep.
-//
-// The sweep phase proceeds concurrently with normal program execution.
-// The heap is swept span-by-span both lazily (when a goroutine needs another span)
-// and concurrently in a background goroutine (this helps programs that are not CPU bound).
-// At the end of STW mark termination all spans are marked as "needs sweeping".
-//
-// The background sweeper goroutine simply sweeps spans one-by-one.
-//
-// To avoid requesting more OS memory while there are unswept spans, when a
-// goroutine needs another span, it first attempts to reclaim that much memory
-// by sweeping. When a goroutine needs to allocate a new small-object span, it
-// sweeps small-object spans for the same object size until it frees at least
-// one object. When a goroutine needs to allocate large-object span from heap,
-// it sweeps spans until it frees at least that many pages into heap. There is
-// one case where this may not suffice: if a goroutine sweeps and frees two
-// nonadjacent one-page spans to the heap, it will allocate a new two-page
-// span, but there can still be other one-page unswept spans which could be
-// combined into a two-page span.
-//
-// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
-// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
-// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
-// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
-// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
-// The finalizer goroutine is kicked off only when all spans are swept.
-// When the next GC starts, it sweeps all not-yet-swept spans (if any).
-
-// GC rate.
-// Next GC is after we've allocated an extra amount of memory proportional to
-// the amount already in use. The proportion is controlled by GOGC environment variable
-// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
-// (this mark is tracked in gcController.heapGoal variable). This keeps the GC cost in
-// linear proportion to the allocation cost. Adjusting GOGC just changes the linear constant
-// (and also the amount of extra memory used).
-
-// Oblets
-//
-// In order to prevent long pauses while scanning large objects and to
-// improve parallelism, the garbage collector breaks up scan jobs for
-// objects larger than maxObletBytes into "oblets" of at most
-// maxObletBytes. When scanning encounters the beginning of a large
-// object, it scans only the first oblet and enqueues the remaining
-// oblets as new scan jobs.
-
-package runtime
-
-import (
- "internal/cpu"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-const (
- _DebugGC = 0
- _ConcurrentSweep = true
- _FinBlockSize = 4 * 1024
-
- // debugScanConservative enables debug logging for stack
- // frames that are scanned conservatively.
- debugScanConservative = false
-
- // sweepMinHeapDistance is a lower bound on the heap distance
- // (in bytes) reserved for concurrent sweeping between GC
- // cycles.
- sweepMinHeapDistance = 1024 * 1024
-)
-
-func gcinit() {
- if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
- throw("size of Workbuf is suboptimal")
- }
- // No sweep on the first cycle.
- sweep.active.state.Store(sweepDrainedMask)
-
- // Initialize GC pacer state.
- // Use the environment variable GOGC for the initial gcPercent value.
- gcController.init(readGOGC())
-
- work.startSema = 1
- work.markDoneSema = 1
- lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
- lockInit(&work.assistQueue.lock, lockRankAssistQueue)
- lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
-}
-
-// gcenable is called after the bulk of the runtime initialization,
-// just before we're about to start letting user code run.
-// It kicks off the background sweeper goroutine, the background
-// scavenger goroutine, and enables GC.
-func gcenable() {
- // Kick off sweeping and scavenging.
- c := make(chan int, 2)
- go bgsweep(c)
- go bgscavenge(c)
- <-c
- <-c
- memstats.enablegc = true // now that runtime is initialized, GC is okay
-}
-
-// Garbage collector phase.
-// Indicates to write barrier and synchronization task to perform.
-var gcphase uint32
-
-// The compiler knows about this variable.
-// If you change it, you must change builtin/runtime.go, too.
-// If you change the first four bytes, you must also change the write
-// barrier insertion code.
-var writeBarrier struct {
- enabled bool // compiler emits a check of this before calling write barrier
- pad [3]byte // compiler uses 32-bit load for "enabled" field
- needed bool // whether we need a write barrier for current GC phase
- cgo bool // whether we need a write barrier for a cgo check
- alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
-}
-
-// gcBlackenEnabled is 1 if mutator assists and background mark
-// workers are allowed to blacken objects. This must only be set when
-// gcphase == _GCmark.
-var gcBlackenEnabled uint32
-
-const (
- _GCoff = iota // GC not running; sweeping in background, write barrier disabled
- _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
- _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
-)
-
-//go:nosplit
-func setGCPhase(x uint32) {
- atomic.Store(&gcphase, x)
- writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
- writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
-}
-
-// gcMarkWorkerMode represents the mode that a concurrent mark worker
-// should operate in.
-//
-// Concurrent marking happens through four different mechanisms. One
-// is mutator assists, which happen in response to allocations and are
-// not scheduled. The other three are variations in the per-P mark
-// workers and are distinguished by gcMarkWorkerMode.
-type gcMarkWorkerMode int
-
-const (
- // gcMarkWorkerNotWorker indicates that the next scheduled G is not
- // starting work and the mode should be ignored.
- gcMarkWorkerNotWorker gcMarkWorkerMode = iota
-
- // gcMarkWorkerDedicatedMode indicates that the P of a mark
- // worker is dedicated to running that mark worker. The mark
- // worker should run without preemption.
- gcMarkWorkerDedicatedMode
-
- // gcMarkWorkerFractionalMode indicates that a P is currently
- // running the "fractional" mark worker. The fractional worker
- // is necessary when GOMAXPROCS*gcBackgroundUtilization is not
- // an integer and using only dedicated workers would result in
- // utilization too far from the target of gcBackgroundUtilization.
- // The fractional worker should run until it is preempted and
- // will be scheduled to pick up the fractional part of
- // GOMAXPROCS*gcBackgroundUtilization.
- gcMarkWorkerFractionalMode
-
- // gcMarkWorkerIdleMode indicates that a P is running the mark
- // worker because it has nothing else to do. The idle worker
- // should run until it is preempted and account its time
- // against gcController.idleMarkTime.
- gcMarkWorkerIdleMode
-)
-
-// gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes
-// to use in execution traces.
-var gcMarkWorkerModeStrings = [...]string{
- "Not worker",
- "GC (dedicated)",
- "GC (fractional)",
- "GC (idle)",
-}
-
-// pollFractionalWorkerExit reports whether a fractional mark worker
-// should self-preempt. It assumes it is called from the fractional
-// worker.
-func pollFractionalWorkerExit() bool {
- // This should be kept in sync with the fractional worker
- // scheduler logic in findRunnableGCWorker.
- now := nanotime()
- delta := now - gcController.markStartTime
- if delta <= 0 {
- return true
- }
- p := getg().m.p.ptr()
- selfTime := p.gcFractionalMarkTime + (now - p.gcMarkWorkerStartTime)
- // Add some slack to the utilization goal so that the
- // fractional worker isn't behind again the instant it exits.
- return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal
-}
-
-var work struct {
- full lfstack // lock-free list of full blocks workbuf
- empty lfstack // lock-free list of empty blocks workbuf
- pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait
-
- wbufSpans struct {
- lock mutex
- // free is a list of spans dedicated to workbufs, but
- // that don't currently contain any workbufs.
- free mSpanList
- // busy is a list of all spans containing workbufs on
- // one of the workbuf lists.
- busy mSpanList
- }
-
- // Restore 64-bit alignment on 32-bit.
- _ uint32
-
- // bytesMarked is the number of bytes marked this cycle. This
- // includes bytes blackened in scanned objects, noscan objects
- // that go straight to black, and permagrey objects scanned by
- // markroot during the concurrent scan phase. This is updated
- // atomically during the cycle. Updates may be batched
- // arbitrarily, since the value is only read at the end of the
- // cycle.
- //
- // Because of benign races during marking, this number may not
- // be the exact number of marked bytes, but it should be very
- // close.
- //
- // Put this field here because it needs 64-bit atomic access
- // (and thus 8-byte alignment even on 32-bit architectures).
- bytesMarked uint64
-
- markrootNext uint32 // next markroot job
- markrootJobs uint32 // number of markroot jobs
-
- nproc uint32
- tstart int64
- nwait uint32
-
- // Number of roots of various root types. Set by gcMarkRootPrepare.
- //
- // nStackRoots == len(stackRoots), but we have nStackRoots for
- // consistency.
- nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
-
- // Base indexes of each root type. Set by gcMarkRootPrepare.
- baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
-
- // stackRoots is a snapshot of all of the Gs that existed
- // before the beginning of concurrent marking. The backing
- // store of this must not be modified because it might be
- // shared with allgs.
- stackRoots []*g
-
- // Each type of GC state transition is protected by a lock.
- // Since multiple threads can simultaneously detect the state
- // transition condition, any thread that detects a transition
- // condition must acquire the appropriate transition lock,
- // re-check the transition condition and return if it no
- // longer holds or perform the transition if it does.
- // Likewise, any transition must invalidate the transition
- // condition before releasing the lock. This ensures that each
- // transition is performed by exactly one thread and threads
- // that need the transition to happen block until it has
- // happened.
- //
- // startSema protects the transition from "off" to mark or
- // mark termination.
- startSema uint32
- // markDoneSema protects transitions from mark to mark termination.
- markDoneSema uint32
-
- bgMarkReady note // signal background mark worker has started
- bgMarkDone uint32 // cas to 1 when at a background mark completion point
- // Background mark completion signaling
-
- // mode is the concurrency mode of the current GC cycle.
- mode gcMode
-
- // userForced indicates the current GC cycle was forced by an
- // explicit user call.
- userForced bool
-
- // totaltime is the CPU nanoseconds spent in GC since the
- // program started if debug.gctrace > 0.
- totaltime int64
-
- // initialHeapLive is the value of gcController.heapLive at the
- // beginning of this GC cycle.
- initialHeapLive uint64
-
- // assistQueue is a queue of assists that are blocked because
- // there was neither enough credit to steal or enough work to
- // do.
- assistQueue struct {
- lock mutex
- q gQueue
- }
-
- // sweepWaiters is a list of blocked goroutines to wake when
- // we transition from mark termination to sweep.
- sweepWaiters struct {
- lock mutex
- list gList
- }
-
- // cycles is the number of completed GC cycles, where a GC
- // cycle is sweep termination, mark, mark termination, and
- // sweep. This differs from memstats.numgc, which is
- // incremented at mark termination.
- cycles uint32
-
- // Timing/utilization stats for this cycle.
- stwprocs, maxprocs int32
- tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
-
- pauseNS int64 // total STW time this cycle
- pauseStart int64 // nanotime() of last STW
-
- // debug.gctrace heap sizes for this cycle.
- heap0, heap1, heap2, heapGoal uint64
-}
-
-// GC runs a garbage collection and blocks the caller until the
-// garbage collection is complete. It may also block the entire
-// program.
-func GC() {
- // We consider a cycle to be: sweep termination, mark, mark
- // termination, and sweep. This function shouldn't return
- // until a full cycle has been completed, from beginning to
- // end. Hence, we always want to finish up the current cycle
- // and start a new one. That means:
- //
- // 1. In sweep termination, mark, or mark termination of cycle
- // N, wait until mark termination N completes and transitions
- // to sweep N.
- //
- // 2. In sweep N, help with sweep N.
- //
- // At this point we can begin a full cycle N+1.
- //
- // 3. Trigger cycle N+1 by starting sweep termination N+1.
- //
- // 4. Wait for mark termination N+1 to complete.
- //
- // 5. Help with sweep N+1 until it's done.
- //
- // This all has to be written to deal with the fact that the
- // GC may move ahead on its own. For example, when we block
- // until mark termination N, we may wake up in cycle N+2.
-
- // Wait until the current sweep termination, mark, and mark
- // termination complete.
- n := atomic.Load(&work.cycles)
- gcWaitOnMark(n)
-
- // We're now in sweep N or later. Trigger GC cycle N+1, which
- // will first finish sweep N if necessary and then enter sweep
- // termination N+1.
- gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1})
-
- // Wait for mark termination N+1 to complete.
- gcWaitOnMark(n + 1)
-
- // Finish sweep N+1 before returning. We do this both to
- // complete the cycle and because runtime.GC() is often used
- // as part of tests and benchmarks to get the system into a
- // relatively stable and isolated state.
- for atomic.Load(&work.cycles) == n+1 && sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
- Gosched()
- }
-
- // Callers may assume that the heap profile reflects the
- // just-completed cycle when this returns (historically this
- // happened because this was a STW GC), but right now the
- // profile still reflects mark termination N, not N+1.
- //
- // As soon as all of the sweep frees from cycle N+1 are done,
- // we can go ahead and publish the heap profile.
- //
- // First, wait for sweeping to finish. (We know there are no
- // more spans on the sweep queue, but we may be concurrently
- // sweeping spans, so we have to wait.)
- for atomic.Load(&work.cycles) == n+1 && !isSweepDone() {
- Gosched()
- }
-
- // Now we're really done with sweeping, so we can publish the
- // stable heap profile. Only do this if we haven't already hit
- // another mark termination.
- mp := acquirem()
- cycle := atomic.Load(&work.cycles)
- if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) {
- mProf_PostSweep()
- }
- releasem(mp)
-}
-
-// gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
-// already completed this mark phase, it returns immediately.
-func gcWaitOnMark(n uint32) {
- for {
- // Disable phase transitions.
- lock(&work.sweepWaiters.lock)
- nMarks := atomic.Load(&work.cycles)
- if gcphase != _GCmark {
- // We've already completed this cycle's mark.
- nMarks++
- }
- if nMarks > n {
- // We're done.
- unlock(&work.sweepWaiters.lock)
- return
- }
-
- // Wait until sweep termination, mark, and mark
- // termination of cycle N complete.
- work.sweepWaiters.list.push(getg())
- goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceEvGoBlock, 1)
- }
-}
-
-// gcMode indicates how concurrent a GC cycle should be.
-type gcMode int
-
-const (
- gcBackgroundMode gcMode = iota // concurrent GC and sweep
- gcForceMode // stop-the-world GC now, concurrent sweep
- gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user)
-)
-
-// A gcTrigger is a predicate for starting a GC cycle. Specifically,
-// it is an exit condition for the _GCoff phase.
-type gcTrigger struct {
- kind gcTriggerKind
- now int64 // gcTriggerTime: current time
- n uint32 // gcTriggerCycle: cycle number to start
-}
-
-type gcTriggerKind int
-
-const (
- // gcTriggerHeap indicates that a cycle should be started when
- // the heap size reaches the trigger heap size computed by the
- // controller.
- gcTriggerHeap gcTriggerKind = iota
-
- // gcTriggerTime indicates that a cycle should be started when
- // it's been more than forcegcperiod nanoseconds since the
- // previous GC cycle.
- gcTriggerTime
-
- // gcTriggerCycle indicates that a cycle should be started if
- // we have not yet started cycle number gcTrigger.n (relative
- // to work.cycles).
- gcTriggerCycle
-)
-
-// test reports whether the trigger condition is satisfied, meaning
-// that the exit condition for the _GCoff phase has been met. The exit
-// condition should be tested when allocating.
-func (t gcTrigger) test() bool {
- if !memstats.enablegc || panicking != 0 || gcphase != _GCoff {
- return false
- }
- switch t.kind {
- case gcTriggerHeap:
- // Non-atomic access to gcController.heapLive for performance. If
- // we are going to trigger on this, this thread just
- // atomically wrote gcController.heapLive anyway and we'll see our
- // own write.
- return gcController.heapLive >= gcController.trigger
- case gcTriggerTime:
- if gcController.gcPercent.Load() < 0 {
- return false
- }
- lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
- return lastgc != 0 && t.now-lastgc > forcegcperiod
- case gcTriggerCycle:
- // t.n > work.cycles, but accounting for wraparound.
- return int32(t.n-work.cycles) > 0
- }
- return true
-}
-
-// gcStart starts the GC. It transitions from _GCoff to _GCmark (if
-// debug.gcstoptheworld == 0) or performs all of GC (if
-// debug.gcstoptheworld != 0).
-//
-// This may return without performing this transition in some cases,
-// such as when called on a system stack or with locks held.
-func gcStart(trigger gcTrigger) {
- // Since this is called from malloc and malloc is called in
- // the guts of a number of libraries that might be holding
- // locks, don't attempt to start GC in non-preemptible or
- // potentially unstable situations.
- mp := acquirem()
- if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
- releasem(mp)
- return
- }
- releasem(mp)
- mp = nil
-
- // Pick up the remaining unswept/not being swept spans concurrently
- //
- // This shouldn't happen if we're being invoked in background
- // mode since proportional sweep should have just finished
- // sweeping everything, but rounding errors, etc, may leave a
- // few spans unswept. In forced mode, this is necessary since
- // GC can be forced at any point in the sweeping cycle.
- //
- // We check the transition condition continuously here in case
- // this G gets delayed in to the next GC cycle.
- for trigger.test() && sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
- }
-
- // Perform GC initialization and the sweep termination
- // transition.
- semacquire(&work.startSema)
- // Re-check transition condition under transition lock.
- if !trigger.test() {
- semrelease(&work.startSema)
- return
- }
-
- // For stats, check if this GC was forced by the user.
- work.userForced = trigger.kind == gcTriggerCycle
-
- // In gcstoptheworld debug mode, upgrade the mode accordingly.
- // We do this after re-checking the transition condition so
- // that multiple goroutines that detect the heap trigger don't
- // start multiple STW GCs.
- mode := gcBackgroundMode
- if debug.gcstoptheworld == 1 {
- mode = gcForceMode
- } else if debug.gcstoptheworld == 2 {
- mode = gcForceBlockMode
- }
-
- // Ok, we're doing it! Stop everybody else
- semacquire(&gcsema)
- semacquire(&worldsema)
-
- if trace.enabled {
- traceGCStart()
- }
-
- // Check that all Ps have finished deferred mcache flushes.
- for _, p := range allp {
- if fg := atomic.Load(&p.mcache.flushGen); fg != mheap_.sweepgen {
- println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen)
- throw("p mcache not flushed")
- }
- }
-
- gcBgMarkStartWorkers()
-
- systemstack(gcResetMarkState)
-
- work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
- if work.stwprocs > ncpu {
- // This is used to compute CPU time of the STW phases,
- // so it can't be more than ncpu, even if GOMAXPROCS is.
- work.stwprocs = ncpu
- }
- work.heap0 = atomic.Load64(&gcController.heapLive)
- work.pauseNS = 0
- work.mode = mode
-
- now := nanotime()
- work.tSweepTerm = now
- work.pauseStart = now
- if trace.enabled {
- traceGCSTWStart(1)
- }
- systemstack(stopTheWorldWithSema)
- // Finish sweep before we start concurrent scan.
- systemstack(func() {
- finishsweep_m()
- })
-
- // clearpools before we start the GC. If we wait they memory will not be
- // reclaimed until the next GC cycle.
- clearpools()
-
- work.cycles++
-
- // Assists and workers can start the moment we start
- // the world.
- gcController.startCycle(now, int(gomaxprocs))
- work.heapGoal = gcController.heapGoal
-
- // In STW mode, disable scheduling of user Gs. This may also
- // disable scheduling of this goroutine, so it may block as
- // soon as we start the world again.
- if mode != gcBackgroundMode {
- schedEnableUser(false)
- }
-
- // Enter concurrent mark phase and enable
- // write barriers.
- //
- // Because the world is stopped, all Ps will
- // observe that write barriers are enabled by
- // the time we start the world and begin
- // scanning.
- //
- // Write barriers must be enabled before assists are
- // enabled because they must be enabled before
- // any non-leaf heap objects are marked. Since
- // allocations are blocked until assists can
- // happen, we want enable assists as early as
- // possible.
- setGCPhase(_GCmark)
-
- gcBgMarkPrepare() // Must happen before assist enable.
- gcMarkRootPrepare()
-
- // Mark all active tinyalloc blocks. Since we're
- // allocating from these, they need to be black like
- // other allocations. The alternative is to blacken
- // the tiny block on every allocation from it, which
- // would slow down the tiny allocator.
- gcMarkTinyAllocs()
-
- // At this point all Ps have enabled the write
- // barrier, thus maintaining the no white to
- // black invariant. Enable mutator assists to
- // put back-pressure on fast allocating
- // mutators.
- atomic.Store(&gcBlackenEnabled, 1)
-
- // In STW mode, we could block the instant systemstack
- // returns, so make sure we're not preemptible.
- mp = acquirem()
-
- // Concurrent mark.
- systemstack(func() {
- now = startTheWorldWithSema(trace.enabled)
- work.pauseNS += now - work.pauseStart
- work.tMark = now
- memstats.gcPauseDist.record(now - work.pauseStart)
- })
-
- // Release the world sema before Gosched() in STW mode
- // because we will need to reacquire it later but before
- // this goroutine becomes runnable again, and we could
- // self-deadlock otherwise.
- semrelease(&worldsema)
- releasem(mp)
-
- // Make sure we block instead of returning to user code
- // in STW mode.
- if mode != gcBackgroundMode {
- Gosched()
- }
-
- semrelease(&work.startSema)
-}
-
-// gcMarkDoneFlushed counts the number of P's with flushed work.
-//
-// Ideally this would be a captured local in gcMarkDone, but forEachP
-// escapes its callback closure, so it can't capture anything.
-//
-// This is protected by markDoneSema.
-var gcMarkDoneFlushed uint32
-
-// gcMarkDone transitions the GC from mark to mark termination if all
-// reachable objects have been marked (that is, there are no grey
-// objects and can be no more in the future). Otherwise, it flushes
-// all local work to the global queues where it can be discovered by
-// other workers.
-//
-// This should be called when all local mark work has been drained and
-// there are no remaining workers. Specifically, when
-//
-// work.nwait == work.nproc && !gcMarkWorkAvailable(p)
-//
-// The calling context must be preemptible.
-//
-// Flushing local work is important because idle Ps may have local
-// work queued. This is the only way to make that work visible and
-// drive GC to completion.
-//
-// It is explicitly okay to have write barriers in this function. If
-// it does transition to mark termination, then all reachable objects
-// have been marked, so the write barrier cannot shade any more
-// objects.
-func gcMarkDone() {
- // Ensure only one thread is running the ragged barrier at a
- // time.
- semacquire(&work.markDoneSema)
-
-top:
- // Re-check transition condition under transition lock.
- //
- // It's critical that this checks the global work queues are
- // empty before performing the ragged barrier. Otherwise,
- // there could be global work that a P could take after the P
- // has passed the ragged barrier.
- if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
- semrelease(&work.markDoneSema)
- return
- }
-
- // forEachP needs worldsema to execute, and we'll need it to
- // stop the world later, so acquire worldsema now.
- semacquire(&worldsema)
-
- // Flush all local buffers and collect flushedWork flags.
- gcMarkDoneFlushed = 0
- systemstack(func() {
- gp := getg().m.curg
- // Mark the user stack as preemptible so that it may be scanned.
- // Otherwise, our attempt to force all P's to a safepoint could
- // result in a deadlock as we attempt to preempt a worker that's
- // trying to preempt us (e.g. for a stack scan).
- casgstatus(gp, _Grunning, _Gwaiting)
- forEachP(func(_p_ *p) {
- // Flush the write barrier buffer, since this may add
- // work to the gcWork.
- wbBufFlush1(_p_)
-
- // Flush the gcWork, since this may create global work
- // and set the flushedWork flag.
- //
- // TODO(austin): Break up these workbufs to
- // better distribute work.
- _p_.gcw.dispose()
- // Collect the flushedWork flag.
- if _p_.gcw.flushedWork {
- atomic.Xadd(&gcMarkDoneFlushed, 1)
- _p_.gcw.flushedWork = false
- }
- })
- casgstatus(gp, _Gwaiting, _Grunning)
- })
-
- if gcMarkDoneFlushed != 0 {
- // More grey objects were discovered since the
- // previous termination check, so there may be more
- // work to do. Keep going. It's possible the
- // transition condition became true again during the
- // ragged barrier, so re-check it.
- semrelease(&worldsema)
- goto top
- }
-
- // There was no global work, no local work, and no Ps
- // communicated work since we took markDoneSema. Therefore
- // there are no grey objects and no more objects can be
- // shaded. Transition to mark termination.
- now := nanotime()
- work.tMarkTerm = now
- work.pauseStart = now
- getg().m.preemptoff = "gcing"
- if trace.enabled {
- traceGCSTWStart(0)
- }
- systemstack(stopTheWorldWithSema)
- // The gcphase is _GCmark, it will transition to _GCmarktermination
- // below. The important thing is that the wb remains active until
- // all marking is complete. This includes writes made by the GC.
-
- // There is sometimes work left over when we enter mark termination due
- // to write barriers performed after the completion barrier above.
- // Detect this and resume concurrent mark. This is obviously
- // unfortunate.
- //
- // See issue #27993 for details.
- //
- // Switch to the system stack to call wbBufFlush1, though in this case
- // it doesn't matter because we're non-preemptible anyway.
- restart := false
- systemstack(func() {
- for _, p := range allp {
- wbBufFlush1(p)
- if !p.gcw.empty() {
- restart = true
- break
- }
- }
- })
- if restart {
- getg().m.preemptoff = ""
- systemstack(func() {
- now := startTheWorldWithSema(true)
- work.pauseNS += now - work.pauseStart
- memstats.gcPauseDist.record(now - work.pauseStart)
- })
- semrelease(&worldsema)
- goto top
- }
-
- // Disable assists and background workers. We must do
- // this before waking blocked assists.
- atomic.Store(&gcBlackenEnabled, 0)
-
- // Wake all blocked assists. These will run when we
- // start the world again.
- gcWakeAllAssists()
-
- // Likewise, release the transition lock. Blocked
- // workers and assists will run when we start the
- // world again.
- semrelease(&work.markDoneSema)
-
- // In STW mode, re-enable user goroutines. These will be
- // queued to run after we start the world.
- schedEnableUser(true)
-
- // endCycle depends on all gcWork cache stats being flushed.
- // The termination algorithm above ensured that up to
- // allocations since the ragged barrier.
- nextTriggerRatio := gcController.endCycle(now, int(gomaxprocs), work.userForced)
-
- // Perform mark termination. This will restart the world.
- gcMarkTermination(nextTriggerRatio)
-}
-
-// World must be stopped and mark assists and background workers must be
-// disabled.
-func gcMarkTermination(nextTriggerRatio float64) {
- // Start marktermination (write barrier remains enabled for now).
- setGCPhase(_GCmarktermination)
-
- work.heap1 = gcController.heapLive
- startTime := nanotime()
-
- mp := acquirem()
- mp.preemptoff = "gcing"
- _g_ := getg()
- _g_.m.traceback = 2
- gp := _g_.m.curg
- casgstatus(gp, _Grunning, _Gwaiting)
- gp.waitreason = waitReasonGarbageCollection
-
- // Run gc on the g0 stack. We do this so that the g stack
- // we're currently running on will no longer change. Cuts
- // the root set down a bit (g0 stacks are not scanned, and
- // we don't need to scan gc's internal state). We also
- // need to switch to g0 so we can shrink the stack.
- systemstack(func() {
- gcMark(startTime)
- // Must return immediately.
- // The outer function's stack may have moved
- // during gcMark (it shrinks stacks, including the
- // outer function's stack), so we must not refer
- // to any of its variables. Return back to the
- // non-system stack to pick up the new addresses
- // before continuing.
- })
-
- systemstack(func() {
- work.heap2 = work.bytesMarked
- if debug.gccheckmark > 0 {
- // Run a full non-parallel, stop-the-world
- // mark using checkmark bits, to check that we
- // didn't forget to mark anything during the
- // concurrent mark process.
- startCheckmarks()
- gcResetMarkState()
- gcw := &getg().m.p.ptr().gcw
- gcDrain(gcw, 0)
- wbBufFlush1(getg().m.p.ptr())
- gcw.dispose()
- endCheckmarks()
- }
-
- // marking is complete so we can turn the write barrier off
- setGCPhase(_GCoff)
- gcSweep(work.mode)
- })
-
- _g_.m.traceback = 0
- casgstatus(gp, _Gwaiting, _Grunning)
-
- if trace.enabled {
- traceGCDone()
- }
-
- // all done
- mp.preemptoff = ""
-
- if gcphase != _GCoff {
- throw("gc done but gcphase != _GCoff")
- }
-
- // Record heap_inuse for scavenger.
- memstats.last_heap_inuse = memstats.heap_inuse
-
- // Update GC trigger and pacing for the next cycle.
- gcController.commit(nextTriggerRatio)
- gcPaceSweeper(gcController.trigger)
- gcPaceScavenger(gcController.heapGoal, gcController.lastHeapGoal)
-
- // Update timing memstats
- now := nanotime()
- sec, nsec, _ := time_now()
- unixNow := sec*1e9 + int64(nsec)
- work.pauseNS += now - work.pauseStart
- work.tEnd = now
- memstats.gcPauseDist.record(now - work.pauseStart)
- atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
- atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
- memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
- memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
- memstats.pause_total_ns += uint64(work.pauseNS)
-
- // Update work.totaltime.
- sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
- // We report idle marking time below, but omit it from the
- // overall utilization here since it's "free".
- markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
- markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
- cycleCpu := sweepTermCpu + markCpu + markTermCpu
- work.totaltime += cycleCpu
-
- // Compute overall GC CPU utilization.
- totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
- memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
-
- // Reset sweep state.
- sweep.nbgsweep = 0
- sweep.npausesweep = 0
-
- if work.userForced {
- memstats.numforcedgc++
- }
-
- // Bump GC cycle count and wake goroutines waiting on sweep.
- lock(&work.sweepWaiters.lock)
- memstats.numgc++
- injectglist(&work.sweepWaiters.list)
- unlock(&work.sweepWaiters.lock)
-
- // Finish the current heap profiling cycle and start a new
- // heap profiling cycle. We do this before starting the world
- // so events don't leak into the wrong cycle.
- mProf_NextCycle()
-
- // There may be stale spans in mcaches that need to be swept.
- // Those aren't tracked in any sweep lists, so we need to
- // count them against sweep completion until we ensure all
- // those spans have been forced out.
- sl := sweep.active.begin()
- if !sl.valid {
- throw("failed to set sweep barrier")
- }
-
- systemstack(func() { startTheWorldWithSema(true) })
-
- // Flush the heap profile so we can start a new cycle next GC.
- // This is relatively expensive, so we don't do it with the
- // world stopped.
- mProf_Flush()
-
- // Prepare workbufs for freeing by the sweeper. We do this
- // asynchronously because it can take non-trivial time.
- prepareFreeWorkbufs()
-
- // Free stack spans. This must be done between GC cycles.
- systemstack(freeStackSpans)
-
- // Ensure all mcaches are flushed. Each P will flush its own
- // mcache before allocating, but idle Ps may not. Since this
- // is necessary to sweep all spans, we need to ensure all
- // mcaches are flushed before we start the next GC cycle.
- systemstack(func() {
- forEachP(func(_p_ *p) {
- _p_.mcache.prepareForSweep()
- })
- })
- // Now that we've swept stale spans in mcaches, they don't
- // count against unswept spans.
- sweep.active.end(sl)
-
- // Print gctrace before dropping worldsema. As soon as we drop
- // worldsema another cycle could start and smash the stats
- // we're trying to print.
- if debug.gctrace > 0 {
- util := int(memstats.gc_cpu_fraction * 100)
-
- var sbuf [24]byte
- printlock()
- print("gc ", memstats.numgc,
- " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
- util, "%: ")
- prev := work.tSweepTerm
- for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
- if i != 0 {
- print("+")
- }
- print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
- prev = ns
- }
- print(" ms clock, ")
- for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} {
- if i == 2 || i == 3 {
- // Separate mark time components with /.
- print("/")
- } else if i != 0 {
- print("+")
- }
- print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
- }
- print(" ms cpu, ",
- work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
- work.heapGoal>>20, " MB goal, ",
- gcController.stackScan>>20, " MB stacks, ",
- gcController.globalsScan>>20, " MB globals, ",
- work.maxprocs, " P")
- if work.userForced {
- print(" (forced)")
- }
- print("\n")
- printunlock()
- }
-
- semrelease(&worldsema)
- semrelease(&gcsema)
- // Careful: another GC cycle may start now.
-
- releasem(mp)
- mp = nil
-
- // now that gc is done, kick off finalizer thread if needed
- if !concurrentSweep {
- // give the queued finalizers, if any, a chance to run
- Gosched()
- }
-}
-
-// gcBgMarkStartWorkers prepares background mark worker goroutines. These
-// goroutines will not run until the mark phase, but they must be started while
-// the work is not stopped and from a regular G stack. The caller must hold
-// worldsema.
-func gcBgMarkStartWorkers() {
- // Background marking is performed by per-P G's. Ensure that each P has
- // a background GC G.
- //
- // Worker Gs don't exit if gomaxprocs is reduced. If it is raised
- // again, we can reuse the old workers; no need to create new workers.
- for gcBgMarkWorkerCount < gomaxprocs {
- go gcBgMarkWorker()
-
- notetsleepg(&work.bgMarkReady, -1)
- noteclear(&work.bgMarkReady)
- // The worker is now guaranteed to be added to the pool before
- // its P's next findRunnableGCWorker.
-
- gcBgMarkWorkerCount++
- }
-}
-
-// gcBgMarkPrepare sets up state for background marking.
-// Mutator assists must not yet be enabled.
-func gcBgMarkPrepare() {
- // Background marking will stop when the work queues are empty
- // and there are no more workers (note that, since this is
- // concurrent, this may be a transient state, but mark
- // termination will clean it up). Between background workers
- // and assists, we don't really know how many workers there
- // will be, so we pretend to have an arbitrarily large number
- // of workers, almost all of which are "waiting". While a
- // worker is working it decrements nwait. If nproc == nwait,
- // there are no workers.
- work.nproc = ^uint32(0)
- work.nwait = ^uint32(0)
-}
-
-// gcBgMarkWorker is an entry in the gcBgMarkWorkerPool. It points to a single
-// gcBgMarkWorker goroutine.
-type gcBgMarkWorkerNode struct {
- // Unused workers are managed in a lock-free stack. This field must be first.
- node lfnode
-
- // The g of this worker.
- gp guintptr
-
- // Release this m on park. This is used to communicate with the unlock
- // function, which cannot access the G's stack. It is unused outside of
- // gcBgMarkWorker().
- m muintptr
-}
-
-func gcBgMarkWorker() {
- gp := getg()
-
- // We pass node to a gopark unlock function, so it can't be on
- // the stack (see gopark). Prevent deadlock from recursively
- // starting GC by disabling preemption.
- gp.m.preemptoff = "GC worker init"
- node := new(gcBgMarkWorkerNode)
- gp.m.preemptoff = ""
-
- node.gp.set(gp)
-
- node.m.set(acquirem())
- notewakeup(&work.bgMarkReady)
- // After this point, the background mark worker is generally scheduled
- // cooperatively by gcController.findRunnableGCWorker. While performing
- // work on the P, preemption is disabled because we are working on
- // P-local work buffers. When the preempt flag is set, this puts itself
- // into _Gwaiting to be woken up by gcController.findRunnableGCWorker
- // at the appropriate time.
- //
- // When preemption is enabled (e.g., while in gcMarkDone), this worker
- // may be preempted and schedule as a _Grunnable G from a runq. That is
- // fine; it will eventually gopark again for further scheduling via
- // findRunnableGCWorker.
- //
- // Since we disable preemption before notifying bgMarkReady, we
- // guarantee that this G will be in the worker pool for the next
- // findRunnableGCWorker. This isn't strictly necessary, but it reduces
- // latency between _GCmark starting and the workers starting.
-
- for {
- // Go to sleep until woken by
- // gcController.findRunnableGCWorker.
- gopark(func(g *g, nodep unsafe.Pointer) bool {
- node := (*gcBgMarkWorkerNode)(nodep)
-
- if mp := node.m.ptr(); mp != nil {
- // The worker G is no longer running; release
- // the M.
- //
- // N.B. it is _safe_ to release the M as soon
- // as we are no longer performing P-local mark
- // work.
- //
- // However, since we cooperatively stop work
- // when gp.preempt is set, if we releasem in
- // the loop then the following call to gopark
- // would immediately preempt the G. This is
- // also safe, but inefficient: the G must
- // schedule again only to enter gopark and park
- // again. Thus, we defer the release until
- // after parking the G.
- releasem(mp)
- }
-
- // Release this G to the pool.
- gcBgMarkWorkerPool.push(&node.node)
- // Note that at this point, the G may immediately be
- // rescheduled and may be running.
- return true
- }, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceEvGoBlock, 0)
-
- // Preemption must not occur here, or another G might see
- // p.gcMarkWorkerMode.
-
- // Disable preemption so we can use the gcw. If the
- // scheduler wants to preempt us, we'll stop draining,
- // dispose the gcw, and then preempt.
- node.m.set(acquirem())
- pp := gp.m.p.ptr() // P can't change with preemption disabled.
-
- if gcBlackenEnabled == 0 {
- println("worker mode", pp.gcMarkWorkerMode)
- throw("gcBgMarkWorker: blackening not enabled")
- }
-
- if pp.gcMarkWorkerMode == gcMarkWorkerNotWorker {
- throw("gcBgMarkWorker: mode not set")
- }
-
- startTime := nanotime()
- pp.gcMarkWorkerStartTime = startTime
-
- decnwait := atomic.Xadd(&work.nwait, -1)
- if decnwait == work.nproc {
- println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
- throw("work.nwait was > work.nproc")
- }
-
- systemstack(func() {
- // Mark our goroutine preemptible so its stack
- // can be scanned. This lets two mark workers
- // scan each other (otherwise, they would
- // deadlock). We must not modify anything on
- // the G stack. However, stack shrinking is
- // disabled for mark workers, so it is safe to
- // read from the G stack.
- casgstatus(gp, _Grunning, _Gwaiting)
- switch pp.gcMarkWorkerMode {
- default:
- throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
- case gcMarkWorkerDedicatedMode:
- gcDrain(&pp.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
- if gp.preempt {
- // We were preempted. This is
- // a useful signal to kick
- // everything out of the run
- // queue so it can run
- // somewhere else.
- if drainQ, n := runqdrain(pp); n > 0 {
- lock(&sched.lock)
- globrunqputbatch(&drainQ, int32(n))
- unlock(&sched.lock)
- }
- }
- // Go back to draining, this time
- // without preemption.
- gcDrain(&pp.gcw, gcDrainFlushBgCredit)
- case gcMarkWorkerFractionalMode:
- gcDrain(&pp.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
- case gcMarkWorkerIdleMode:
- gcDrain(&pp.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
- }
- casgstatus(gp, _Gwaiting, _Grunning)
- })
-
- // Account for time.
- duration := nanotime() - startTime
- gcController.logWorkTime(pp.gcMarkWorkerMode, duration)
- if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode {
- atomic.Xaddint64(&pp.gcFractionalMarkTime, duration)
- }
-
- // Was this the last worker and did we run out
- // of work?
- incnwait := atomic.Xadd(&work.nwait, +1)
- if incnwait > work.nproc {
- println("runtime: p.gcMarkWorkerMode=", pp.gcMarkWorkerMode,
- "work.nwait=", incnwait, "work.nproc=", work.nproc)
- throw("work.nwait > work.nproc")
- }
-
- // We'll releasem after this point and thus this P may run
- // something else. We must clear the worker mode to avoid
- // attributing the mode to a different (non-worker) G in
- // traceGoStart.
- pp.gcMarkWorkerMode = gcMarkWorkerNotWorker
-
- // If this worker reached a background mark completion
- // point, signal the main GC goroutine.
- if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
- // We don't need the P-local buffers here, allow
- // preemption because we may schedule like a regular
- // goroutine in gcMarkDone (block on locks, etc).
- releasem(node.m.ptr())
- node.m.set(nil)
-
- gcMarkDone()
- }
- }
-}
-
-// gcMarkWorkAvailable reports whether executing a mark worker
-// on p is potentially useful. p may be nil, in which case it only
-// checks the global sources of work.
-func gcMarkWorkAvailable(p *p) bool {
- if p != nil && !p.gcw.empty() {
- return true
- }
- if !work.full.empty() {
- return true // global work available
- }
- if work.markrootNext < work.markrootJobs {
- return true // root scan work available
- }
- return false
-}
-
-// gcMark runs the mark (or, for concurrent GC, mark termination)
-// All gcWork caches must be empty.
-// STW is in effect at this point.
-func gcMark(startTime int64) {
- if debug.allocfreetrace > 0 {
- tracegc()
- }
-
- if gcphase != _GCmarktermination {
- throw("in gcMark expecting to see gcphase as _GCmarktermination")
- }
- work.tstart = startTime
-
- // Check that there's no marking work remaining.
- if work.full != 0 || work.markrootNext < work.markrootJobs {
- print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
- panic("non-empty mark queue after concurrent mark")
- }
-
- if debug.gccheckmark > 0 {
- // This is expensive when there's a large number of
- // Gs, so only do it if checkmark is also enabled.
- gcMarkRootCheck()
- }
- if work.full != 0 {
- throw("work.full != 0")
- }
-
- // Drop allg snapshot. allgs may have grown, in which case
- // this is the only reference to the old backing store and
- // there's no need to keep it around.
- work.stackRoots = nil
-
- // Clear out buffers and double-check that all gcWork caches
- // are empty. This should be ensured by gcMarkDone before we
- // enter mark termination.
- //
- // TODO: We could clear out buffers just before mark if this
- // has a non-negligible impact on STW time.
- for _, p := range allp {
- // The write barrier may have buffered pointers since
- // the gcMarkDone barrier. However, since the barrier
- // ensured all reachable objects were marked, all of
- // these must be pointers to black objects. Hence we
- // can just discard the write barrier buffer.
- if debug.gccheckmark > 0 {
- // For debugging, flush the buffer and make
- // sure it really was all marked.
- wbBufFlush1(p)
- } else {
- p.wbBuf.reset()
- }
-
- gcw := &p.gcw
- if !gcw.empty() {
- printlock()
- print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork)
- if gcw.wbuf1 == nil {
- print(" wbuf1=<nil>")
- } else {
- print(" wbuf1.n=", gcw.wbuf1.nobj)
- }
- if gcw.wbuf2 == nil {
- print(" wbuf2=<nil>")
- } else {
- print(" wbuf2.n=", gcw.wbuf2.nobj)
- }
- print("\n")
- throw("P has cached GC work at end of mark termination")
- }
- // There may still be cached empty buffers, which we
- // need to flush since we're going to free them. Also,
- // there may be non-zero stats because we allocated
- // black after the gcMarkDone barrier.
- gcw.dispose()
- }
-
- // Flush scanAlloc from each mcache since we're about to modify
- // heapScan directly. If we were to flush this later, then scanAlloc
- // might have incorrect information.
- //
- // Note that it's not important to retain this information; we know
- // exactly what heapScan is at this point via scanWork.
- for _, p := range allp {
- c := p.mcache
- if c == nil {
- continue
- }
- c.scanAlloc = 0
- }
-
- // Reset controller state.
- gcController.resetLive(work.bytesMarked)
-}
-
-// gcSweep must be called on the system stack because it acquires the heap
-// lock. See mheap for details.
-//
-// The world must be stopped.
-//
-//go:systemstack
-func gcSweep(mode gcMode) {
- assertWorldStopped()
-
- if gcphase != _GCoff {
- throw("gcSweep being done but phase is not GCoff")
- }
-
- lock(&mheap_.lock)
- mheap_.sweepgen += 2
- sweep.active.reset()
- mheap_.pagesSwept.Store(0)
- mheap_.sweepArenas = mheap_.allArenas
- mheap_.reclaimIndex.Store(0)
- mheap_.reclaimCredit.Store(0)
- unlock(&mheap_.lock)
-
- sweep.centralIndex.clear()
-
- if !_ConcurrentSweep || mode == gcForceBlockMode {
- // Special case synchronous sweep.
- // Record that no proportional sweeping has to happen.
- lock(&mheap_.lock)
- mheap_.sweepPagesPerByte = 0
- unlock(&mheap_.lock)
- // Sweep all spans eagerly.
- for sweepone() != ^uintptr(0) {
- sweep.npausesweep++
- }
- // Free workbufs eagerly.
- prepareFreeWorkbufs()
- for freeSomeWbufs(false) {
- }
- // All "free" events for this mark/sweep cycle have
- // now happened, so we can make this profile cycle
- // available immediately.
- mProf_NextCycle()
- mProf_Flush()
- return
- }
-
- // Background sweep.
- lock(&sweep.lock)
- if sweep.parked {
- sweep.parked = false
- ready(sweep.g, 0, true)
- }
- unlock(&sweep.lock)
-}
-
-// gcResetMarkState resets global state prior to marking (concurrent
-// or STW) and resets the stack scan state of all Gs.
-//
-// This is safe to do without the world stopped because any Gs created
-// during or after this will start out in the reset state.
-//
-// gcResetMarkState must be called on the system stack because it acquires
-// the heap lock. See mheap for details.
-//
-//go:systemstack
-func gcResetMarkState() {
- // This may be called during a concurrent phase, so lock to make sure
- // allgs doesn't change.
- forEachG(func(gp *g) {
- gp.gcscandone = false // set to true in gcphasework
- gp.gcAssistBytes = 0
- })
-
- // Clear page marks. This is just 1MB per 64GB of heap, so the
- // time here is pretty trivial.
- lock(&mheap_.lock)
- arenas := mheap_.allArenas
- unlock(&mheap_.lock)
- for _, ai := range arenas {
- ha := mheap_.arenas[ai.l1()][ai.l2()]
- for i := range ha.pageMarks {
- ha.pageMarks[i] = 0
- }
- }
-
- work.bytesMarked = 0
- work.initialHeapLive = atomic.Load64(&gcController.heapLive)
-}
-
-// Hooks for other packages
-
-var poolcleanup func()
-
-//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
-func sync_runtime_registerPoolCleanup(f func()) {
- poolcleanup = f
-}
-
-func clearpools() {
- // clear sync.Pools
- if poolcleanup != nil {
- poolcleanup()
- }
-
- // Clear central sudog cache.
- // Leave per-P caches alone, they have strictly bounded size.
- // Disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- lock(&sched.sudoglock)
- var sg, sgnext *sudog
- for sg = sched.sudogcache; sg != nil; sg = sgnext {
- sgnext = sg.next
- sg.next = nil
- }
- sched.sudogcache = nil
- unlock(&sched.sudoglock)
-
- // Clear central defer pool.
- // Leave per-P pools alone, they have strictly bounded size.
- lock(&sched.deferlock)
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var d, dlink *_defer
- for d = sched.deferpool; d != nil; d = dlink {
- dlink = d.link
- d.link = nil
- }
- sched.deferpool = nil
- unlock(&sched.deferlock)
-}
-
-// Timing
-
-// itoaDiv formats val/(10**dec) into buf.
-func itoaDiv(buf []byte, val uint64, dec int) []byte {
- i := len(buf) - 1
- idec := i - dec
- for val >= 10 || i >= idec {
- buf[i] = byte(val%10 + '0')
- i--
- if i == idec {
- buf[i] = '.'
- i--
- }
- val /= 10
- }
- buf[i] = byte(val + '0')
- return buf[i:]
-}
-
-// fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
-func fmtNSAsMS(buf []byte, ns uint64) []byte {
- if ns >= 10e6 {
- // Format as whole milliseconds.
- return itoaDiv(buf, ns/1e6, 0)
- }
- // Format two digits of precision, with at most three decimal places.
- x := ns / 1e3
- if x == 0 {
- buf[0] = '0'
- return buf[:1]
- }
- dec := 3
- for x >= 100 {
- x /= 10
- dec--
- }
- return itoaDiv(buf, x, dec)
-}
-
-// Helpers for testing GC.
-
-// gcTestMoveStackOnNextCall causes the stack to be moved on a call
-// immediately following the call to this. It may not work correctly
-// if any other work appears after this call (such as returning).
-// Typically the following call should be marked go:noinline so it
-// performs a stack check.
-//
-// In rare cases this may not cause the stack to move, specifically if
-// there's a preemption between this call and the next.
-func gcTestMoveStackOnNextCall() {
- gp := getg()
- gp.stackguard0 = stackForceMove
-}
-
-// gcTestIsReachable performs a GC and returns a bit set where bit i
-// is set if ptrs[i] is reachable.
-func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
- // This takes the pointers as unsafe.Pointers in order to keep
- // them live long enough for us to attach specials. After
- // that, we drop our references to them.
-
- if len(ptrs) > 64 {
- panic("too many pointers for uint64 mask")
- }
-
- // Block GC while we attach specials and drop our references
- // to ptrs. Otherwise, if a GC is in progress, it could mark
- // them reachable via this function before we have a chance to
- // drop them.
- semacquire(&gcsema)
-
- // Create reachability specials for ptrs.
- specials := make([]*specialReachable, len(ptrs))
- for i, p := range ptrs {
- lock(&mheap_.speciallock)
- s := (*specialReachable)(mheap_.specialReachableAlloc.alloc())
- unlock(&mheap_.speciallock)
- s.special.kind = _KindSpecialReachable
- if !addspecial(p, &s.special) {
- throw("already have a reachable special (duplicate pointer?)")
- }
- specials[i] = s
- // Make sure we don't retain ptrs.
- ptrs[i] = nil
- }
-
- semrelease(&gcsema)
-
- // Force a full GC and sweep.
- GC()
-
- // Process specials.
- for i, s := range specials {
- if !s.done {
- printlock()
- println("runtime: object", i, "was not swept")
- throw("IsReachable failed")
- }
- if s.reachable {
- mask |= 1 << i
- }
- lock(&mheap_.speciallock)
- mheap_.specialReachableAlloc.free(unsafe.Pointer(s))
- unlock(&mheap_.speciallock)
- }
-
- return mask
-}
-
-// gcTestPointerClass returns the category of what p points to, one of:
-// "heap", "stack", "data", "bss", "other". This is useful for checking
-// that a test is doing what it's intended to do.
-//
-// This is nosplit simply to avoid extra pointer shuffling that may
-// complicate a test.
-//
-//go:nosplit
-func gcTestPointerClass(p unsafe.Pointer) string {
- p2 := uintptr(noescape(p))
- gp := getg()
- if gp.stack.lo <= p2 && p2 < gp.stack.hi {
- return "stack"
- }
- if base, _, _ := findObject(p2, 0, 0); base != 0 {
- return "heap"
- }
- for _, datap := range activeModules() {
- if datap.data <= p2 && p2 < datap.edata || datap.noptrdata <= p2 && p2 < datap.enoptrdata {
- return "data"
- }
- if datap.bss <= p2 && p2 < datap.ebss || datap.noptrbss <= p2 && p2 <= datap.enoptrbss {
- return "bss"
- }
- }
- KeepAlive(p)
- return "other"
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgcmark.go b/contrib/go/_std_1.18/src/runtime/mgcmark.go
deleted file mode 100644
index 0bf044e314..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgcmark.go
+++ /dev/null
@@ -1,1591 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector: marking and scanning
-
-package runtime
-
-import (
- "internal/goarch"
- "internal/goexperiment"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const (
- fixedRootFinalizers = iota
- fixedRootFreeGStacks
- fixedRootCount
-
- // rootBlockBytes is the number of bytes to scan per data or
- // BSS root.
- rootBlockBytes = 256 << 10
-
- // maxObletBytes is the maximum bytes of an object to scan at
- // once. Larger objects will be split up into "oblets" of at
- // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
- // scan preemption at ~100 µs.
- //
- // This must be > _MaxSmallSize so that the object base is the
- // span base.
- maxObletBytes = 128 << 10
-
- // drainCheckThreshold specifies how many units of work to do
- // between self-preemption checks in gcDrain. Assuming a scan
- // rate of 1 MB/ms, this is ~100 µs. Lower values have higher
- // overhead in the scan loop (the scheduler check may perform
- // a syscall, so its overhead is nontrivial). Higher values
- // make the system less responsive to incoming work.
- drainCheckThreshold = 100000
-
- // pagesPerSpanRoot indicates how many pages to scan from a span root
- // at a time. Used by special root marking.
- //
- // Higher values improve throughput by increasing locality, but
- // increase the minimum latency of a marking operation.
- //
- // Must be a multiple of the pageInUse bitmap element size and
- // must also evenly divide pagesPerArena.
- pagesPerSpanRoot = 512
-)
-
-// gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
-// some miscellany) and initializes scanning-related state.
-//
-// The world must be stopped.
-func gcMarkRootPrepare() {
- assertWorldStopped()
-
- // Compute how many data and BSS root blocks there are.
- nBlocks := func(bytes uintptr) int {
- return int(divRoundUp(bytes, rootBlockBytes))
- }
-
- work.nDataRoots = 0
- work.nBSSRoots = 0
-
- // Scan globals.
- for _, datap := range activeModules() {
- nDataRoots := nBlocks(datap.edata - datap.data)
- if nDataRoots > work.nDataRoots {
- work.nDataRoots = nDataRoots
- }
- }
-
- for _, datap := range activeModules() {
- nBSSRoots := nBlocks(datap.ebss - datap.bss)
- if nBSSRoots > work.nBSSRoots {
- work.nBSSRoots = nBSSRoots
- }
- }
-
- // Scan span roots for finalizer specials.
- //
- // We depend on addfinalizer to mark objects that get
- // finalizers after root marking.
- //
- // We're going to scan the whole heap (that was available at the time the
- // mark phase started, i.e. markArenas) for in-use spans which have specials.
- //
- // Break up the work into arenas, and further into chunks.
- //
- // Snapshot allArenas as markArenas. This snapshot is safe because allArenas
- // is append-only.
- mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
- work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
-
- // Scan stacks.
- //
- // Gs may be created after this point, but it's okay that we
- // ignore them because they begin life without any roots, so
- // there's nothing to scan, and any roots they create during
- // the concurrent phase will be caught by the write barrier.
- work.stackRoots = allGsSnapshot()
- work.nStackRoots = len(work.stackRoots)
-
- work.markrootNext = 0
- work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
-
- // Calculate base indexes of each root type
- work.baseData = uint32(fixedRootCount)
- work.baseBSS = work.baseData + uint32(work.nDataRoots)
- work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
- work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
- work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
-}
-
-// gcMarkRootCheck checks that all roots have been scanned. It is
-// purely for debugging.
-func gcMarkRootCheck() {
- if work.markrootNext < work.markrootJobs {
- print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
- throw("left over markroot jobs")
- }
-
- // Check that stacks have been scanned.
- //
- // We only check the first nStackRoots Gs that we should have scanned.
- // Since we don't care about newer Gs (see comment in
- // gcMarkRootPrepare), no locking is required.
- i := 0
- forEachGRace(func(gp *g) {
- if i >= work.nStackRoots {
- return
- }
-
- if !gp.gcscandone {
- println("gp", gp, "goid", gp.goid,
- "status", readgstatus(gp),
- "gcscandone", gp.gcscandone)
- throw("scan missed a g")
- }
-
- i++
- })
-}
-
-// ptrmask for an allocation containing a single pointer.
-var oneptrmask = [...]uint8{1}
-
-// markroot scans the i'th root.
-//
-// Preemption must be disabled (because this uses a gcWork).
-//
-// Returns the amount of GC work credit produced by the operation.
-// If flushBgCredit is true, then that credit is also flushed
-// to the background credit pool.
-//
-// nowritebarrier is only advisory here.
-//
-//go:nowritebarrier
-func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
- // Note: if you add a case here, please also update heapdump.go:dumproots.
- var workDone int64
- var workCounter *atomic.Int64
- switch {
- case work.baseData <= i && i < work.baseBSS:
- workCounter = &gcController.globalsScanWork
- for _, datap := range activeModules() {
- workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
- }
-
- case work.baseBSS <= i && i < work.baseSpans:
- workCounter = &gcController.globalsScanWork
- for _, datap := range activeModules() {
- workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
- }
-
- case i == fixedRootFinalizers:
- for fb := allfin; fb != nil; fb = fb.alllink {
- cnt := uintptr(atomic.Load(&fb.cnt))
- scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
- }
-
- case i == fixedRootFreeGStacks:
- // Switch to the system stack so we can call
- // stackfree.
- systemstack(markrootFreeGStacks)
-
- case work.baseSpans <= i && i < work.baseStacks:
- // mark mspan.specials
- markrootSpans(gcw, int(i-work.baseSpans))
-
- default:
- // the rest is scanning goroutine stacks
- workCounter = &gcController.stackScanWork
- if i < work.baseStacks || work.baseEnd <= i {
- printlock()
- print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
- throw("markroot: bad index")
- }
- gp := work.stackRoots[i-work.baseStacks]
-
- // remember when we've first observed the G blocked
- // needed only to output in traceback
- status := readgstatus(gp) // We are not in a scan state
- if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
- gp.waitsince = work.tstart
- }
-
- // scanstack must be done on the system stack in case
- // we're trying to scan our own stack.
- systemstack(func() {
- // If this is a self-scan, put the user G in
- // _Gwaiting to prevent self-deadlock. It may
- // already be in _Gwaiting if this is a mark
- // worker or we're in mark termination.
- userG := getg().m.curg
- selfScan := gp == userG && readgstatus(userG) == _Grunning
- if selfScan {
- casgstatus(userG, _Grunning, _Gwaiting)
- userG.waitreason = waitReasonGarbageCollectionScan
- }
-
- // TODO: suspendG blocks (and spins) until gp
- // stops, which may take a while for
- // running goroutines. Consider doing this in
- // two phases where the first is non-blocking:
- // we scan the stacks we can and ask running
- // goroutines to scan themselves; and the
- // second blocks.
- stopped := suspendG(gp)
- if stopped.dead {
- gp.gcscandone = true
- return
- }
- if gp.gcscandone {
- throw("g already scanned")
- }
- workDone += scanstack(gp, gcw)
- gp.gcscandone = true
- resumeG(stopped)
-
- if selfScan {
- casgstatus(userG, _Gwaiting, _Grunning)
- }
- })
- }
- if goexperiment.PacerRedesign {
- if workCounter != nil && workDone != 0 {
- workCounter.Add(workDone)
- if flushBgCredit {
- gcFlushBgCredit(workDone)
- }
- }
- }
- return workDone
-}
-
-// markrootBlock scans the shard'th shard of the block of memory [b0,
-// b0+n0), with the given pointer mask.
-//
-// Returns the amount of work done.
-//
-//go:nowritebarrier
-func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
- if rootBlockBytes%(8*goarch.PtrSize) != 0 {
- // This is necessary to pick byte offsets in ptrmask0.
- throw("rootBlockBytes must be a multiple of 8*ptrSize")
- }
-
- // Note that if b0 is toward the end of the address space,
- // then b0 + rootBlockBytes might wrap around.
- // These tests are written to avoid any possible overflow.
- off := uintptr(shard) * rootBlockBytes
- if off >= n0 {
- return 0
- }
- b := b0 + off
- ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
- n := uintptr(rootBlockBytes)
- if off+n > n0 {
- n = n0 - off
- }
-
- // Scan this shard.
- scanblock(b, n, ptrmask, gcw, nil)
- return int64(n)
-}
-
-// markrootFreeGStacks frees stacks of dead Gs.
-//
-// This does not free stacks of dead Gs cached on Ps, but having a few
-// cached stacks around isn't a problem.
-func markrootFreeGStacks() {
- // Take list of dead Gs with stacks.
- lock(&sched.gFree.lock)
- list := sched.gFree.stack
- sched.gFree.stack = gList{}
- unlock(&sched.gFree.lock)
- if list.empty() {
- return
- }
-
- // Free stacks.
- q := gQueue{list.head, list.head}
- for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
- stackfree(gp.stack)
- gp.stack.lo = 0
- gp.stack.hi = 0
- // Manipulate the queue directly since the Gs are
- // already all linked the right way.
- q.tail.set(gp)
- }
-
- // Put Gs back on the free list.
- lock(&sched.gFree.lock)
- sched.gFree.noStack.pushAll(q)
- unlock(&sched.gFree.lock)
-}
-
-// markrootSpans marks roots for one shard of markArenas.
-//
-//go:nowritebarrier
-func markrootSpans(gcw *gcWork, shard int) {
- // Objects with finalizers have two GC-related invariants:
- //
- // 1) Everything reachable from the object must be marked.
- // This ensures that when we pass the object to its finalizer,
- // everything the finalizer can reach will be retained.
- //
- // 2) Finalizer specials (which are not in the garbage
- // collected heap) are roots. In practice, this means the fn
- // field must be scanned.
- sg := mheap_.sweepgen
-
- // Find the arena and page index into that arena for this shard.
- ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
- ha := mheap_.arenas[ai.l1()][ai.l2()]
- arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
-
- // Construct slice of bitmap which we'll iterate over.
- specialsbits := ha.pageSpecials[arenaPage/8:]
- specialsbits = specialsbits[:pagesPerSpanRoot/8]
- for i := range specialsbits {
- // Find set bits, which correspond to spans with specials.
- specials := atomic.Load8(&specialsbits[i])
- if specials == 0 {
- continue
- }
- for j := uint(0); j < 8; j++ {
- if specials&(1<<j) == 0 {
- continue
- }
- // Find the span for this bit.
- //
- // This value is guaranteed to be non-nil because having
- // specials implies that the span is in-use, and since we're
- // currently marking we can be sure that we don't have to worry
- // about the span being freed and re-used.
- s := ha.spans[arenaPage+uint(i)*8+j]
-
- // The state must be mSpanInUse if the specials bit is set, so
- // sanity check that.
- if state := s.state.get(); state != mSpanInUse {
- print("s.state = ", state, "\n")
- throw("non in-use span found with specials bit set")
- }
- // Check that this span was swept (it may be cached or uncached).
- if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
- // sweepgen was updated (+2) during non-checkmark GC pass
- print("sweep ", s.sweepgen, " ", sg, "\n")
- throw("gc: unswept span")
- }
-
- // Lock the specials to prevent a special from being
- // removed from the list while we're traversing it.
- lock(&s.speciallock)
- for sp := s.specials; sp != nil; sp = sp.next {
- if sp.kind != _KindSpecialFinalizer {
- continue
- }
- // don't mark finalized object, but scan it so we
- // retain everything it points to.
- spf := (*specialfinalizer)(unsafe.Pointer(sp))
- // A finalizer can be set for an inner byte of an object, find object beginning.
- p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
-
- // Mark everything that can be reached from
- // the object (but *not* the object itself or
- // we'll never collect it).
- scanobject(p, gcw)
-
- // The special itself is a root.
- scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
- }
- unlock(&s.speciallock)
- }
- }
-}
-
-// gcAssistAlloc performs GC work to make gp's assist debt positive.
-// gp must be the calling user goroutine.
-//
-// This must be called with preemption enabled.
-func gcAssistAlloc(gp *g) {
- // Don't assist in non-preemptible contexts. These are
- // generally fragile and won't allow the assist to block.
- if getg() == gp.m.g0 {
- return
- }
- if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
- return
- }
-
- traced := false
-retry:
- // Compute the amount of scan work we need to do to make the
- // balance positive. When the required amount of work is low,
- // we over-assist to build up credit for future allocations
- // and amortize the cost of assisting.
- assistWorkPerByte := gcController.assistWorkPerByte.Load()
- assistBytesPerWork := gcController.assistBytesPerWork.Load()
- debtBytes := -gp.gcAssistBytes
- scanWork := int64(assistWorkPerByte * float64(debtBytes))
- if scanWork < gcOverAssistWork {
- scanWork = gcOverAssistWork
- debtBytes = int64(assistBytesPerWork * float64(scanWork))
- }
-
- // Steal as much credit as we can from the background GC's
- // scan credit. This is racy and may drop the background
- // credit below 0 if two mutators steal at the same time. This
- // will just cause steals to fail until credit is accumulated
- // again, so in the long run it doesn't really matter, but we
- // do have to handle the negative credit case.
- bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
- stolen := int64(0)
- if bgScanCredit > 0 {
- if bgScanCredit < scanWork {
- stolen = bgScanCredit
- gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
- } else {
- stolen = scanWork
- gp.gcAssistBytes += debtBytes
- }
- atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
-
- scanWork -= stolen
-
- if scanWork == 0 {
- // We were able to steal all of the credit we
- // needed.
- if traced {
- traceGCMarkAssistDone()
- }
- return
- }
- }
-
- if trace.enabled && !traced {
- traced = true
- traceGCMarkAssistStart()
- }
-
- // Perform assist work
- systemstack(func() {
- gcAssistAlloc1(gp, scanWork)
- // The user stack may have moved, so this can't touch
- // anything on it until it returns from systemstack.
- })
-
- completed := gp.param != nil
- gp.param = nil
- if completed {
- gcMarkDone()
- }
-
- if gp.gcAssistBytes < 0 {
- // We were unable steal enough credit or perform
- // enough work to pay off the assist debt. We need to
- // do one of these before letting the mutator allocate
- // more to prevent over-allocation.
- //
- // If this is because we were preempted, reschedule
- // and try some more.
- if gp.preempt {
- Gosched()
- goto retry
- }
-
- // Add this G to an assist queue and park. When the GC
- // has more background credit, it will satisfy queued
- // assists before flushing to the global credit pool.
- //
- // Note that this does *not* get woken up when more
- // work is added to the work list. The theory is that
- // there wasn't enough work to do anyway, so we might
- // as well let background marking take care of the
- // work that is available.
- if !gcParkAssist() {
- goto retry
- }
-
- // At this point either background GC has satisfied
- // this G's assist debt, or the GC cycle is over.
- }
- if traced {
- traceGCMarkAssistDone()
- }
-}
-
-// gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
-// stack. This is a separate function to make it easier to see that
-// we're not capturing anything from the user stack, since the user
-// stack may move while we're in this function.
-//
-// gcAssistAlloc1 indicates whether this assist completed the mark
-// phase by setting gp.param to non-nil. This can't be communicated on
-// the stack since it may move.
-//
-//go:systemstack
-func gcAssistAlloc1(gp *g, scanWork int64) {
- // Clear the flag indicating that this assist completed the
- // mark phase.
- gp.param = nil
-
- if atomic.Load(&gcBlackenEnabled) == 0 {
- // The gcBlackenEnabled check in malloc races with the
- // store that clears it but an atomic check in every malloc
- // would be a performance hit.
- // Instead we recheck it here on the non-preemptable system
- // stack to determine if we should perform an assist.
-
- // GC is done, so ignore any remaining debt.
- gp.gcAssistBytes = 0
- return
- }
- // Track time spent in this assist. Since we're on the
- // system stack, this is non-preemptible, so we can
- // just measure start and end time.
- startTime := nanotime()
-
- decnwait := atomic.Xadd(&work.nwait, -1)
- if decnwait == work.nproc {
- println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
- throw("nwait > work.nprocs")
- }
-
- // gcDrainN requires the caller to be preemptible.
- casgstatus(gp, _Grunning, _Gwaiting)
- gp.waitreason = waitReasonGCAssistMarking
-
- // drain own cached work first in the hopes that it
- // will be more cache friendly.
- gcw := &getg().m.p.ptr().gcw
- workDone := gcDrainN(gcw, scanWork)
-
- casgstatus(gp, _Gwaiting, _Grunning)
-
- // Record that we did this much scan work.
- //
- // Back out the number of bytes of assist credit that
- // this scan work counts for. The "1+" is a poor man's
- // round-up, to ensure this adds credit even if
- // assistBytesPerWork is very low.
- assistBytesPerWork := gcController.assistBytesPerWork.Load()
- gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
-
- // If this is the last worker and we ran out of work,
- // signal a completion point.
- incnwait := atomic.Xadd(&work.nwait, +1)
- if incnwait > work.nproc {
- println("runtime: work.nwait=", incnwait,
- "work.nproc=", work.nproc)
- throw("work.nwait > work.nproc")
- }
-
- if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
- // This has reached a background completion point. Set
- // gp.param to a non-nil value to indicate this. It
- // doesn't matter what we set it to (it just has to be
- // a valid pointer).
- gp.param = unsafe.Pointer(gp)
- }
- duration := nanotime() - startTime
- _p_ := gp.m.p.ptr()
- _p_.gcAssistTime += duration
- if _p_.gcAssistTime > gcAssistTimeSlack {
- atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
- _p_.gcAssistTime = 0
- }
-}
-
-// gcWakeAllAssists wakes all currently blocked assists. This is used
-// at the end of a GC cycle. gcBlackenEnabled must be false to prevent
-// new assists from going to sleep after this point.
-func gcWakeAllAssists() {
- lock(&work.assistQueue.lock)
- list := work.assistQueue.q.popList()
- injectglist(&list)
- unlock(&work.assistQueue.lock)
-}
-
-// gcParkAssist puts the current goroutine on the assist queue and parks.
-//
-// gcParkAssist reports whether the assist is now satisfied. If it
-// returns false, the caller must retry the assist.
-func gcParkAssist() bool {
- lock(&work.assistQueue.lock)
- // If the GC cycle finished while we were getting the lock,
- // exit the assist. The cycle can't finish while we hold the
- // lock.
- if atomic.Load(&gcBlackenEnabled) == 0 {
- unlock(&work.assistQueue.lock)
- return true
- }
-
- gp := getg()
- oldList := work.assistQueue.q
- work.assistQueue.q.pushBack(gp)
-
- // Recheck for background credit now that this G is in
- // the queue, but can still back out. This avoids a
- // race in case background marking has flushed more
- // credit since we checked above.
- if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
- work.assistQueue.q = oldList
- if oldList.tail != 0 {
- oldList.tail.ptr().schedlink.set(nil)
- }
- unlock(&work.assistQueue.lock)
- return false
- }
- // Park.
- goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
- return true
-}
-
-// gcFlushBgCredit flushes scanWork units of background scan work
-// credit. This first satisfies blocked assists on the
-// work.assistQueue and then flushes any remaining credit to
-// gcController.bgScanCredit.
-//
-// Write barriers are disallowed because this is used by gcDrain after
-// it has ensured that all work is drained and this must preserve that
-// condition.
-//
-//go:nowritebarrierrec
-func gcFlushBgCredit(scanWork int64) {
- if work.assistQueue.q.empty() {
- // Fast path; there are no blocked assists. There's a
- // small window here where an assist may add itself to
- // the blocked queue and park. If that happens, we'll
- // just get it on the next flush.
- atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
- return
- }
-
- assistBytesPerWork := gcController.assistBytesPerWork.Load()
- scanBytes := int64(float64(scanWork) * assistBytesPerWork)
-
- lock(&work.assistQueue.lock)
- for !work.assistQueue.q.empty() && scanBytes > 0 {
- gp := work.assistQueue.q.pop()
- // Note that gp.gcAssistBytes is negative because gp
- // is in debt. Think carefully about the signs below.
- if scanBytes+gp.gcAssistBytes >= 0 {
- // Satisfy this entire assist debt.
- scanBytes += gp.gcAssistBytes
- gp.gcAssistBytes = 0
- // It's important that we *not* put gp in
- // runnext. Otherwise, it's possible for user
- // code to exploit the GC worker's high
- // scheduler priority to get itself always run
- // before other goroutines and always in the
- // fresh quantum started by GC.
- ready(gp, 0, false)
- } else {
- // Partially satisfy this assist.
- gp.gcAssistBytes += scanBytes
- scanBytes = 0
- // As a heuristic, we move this assist to the
- // back of the queue so that large assists
- // can't clog up the assist queue and
- // substantially delay small assists.
- work.assistQueue.q.pushBack(gp)
- break
- }
- }
-
- if scanBytes > 0 {
- // Convert from scan bytes back to work.
- assistWorkPerByte := gcController.assistWorkPerByte.Load()
- scanWork = int64(float64(scanBytes) * assistWorkPerByte)
- atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
- }
- unlock(&work.assistQueue.lock)
-}
-
-// scanstack scans gp's stack, greying all pointers found on the stack.
-//
-// For goexperiment.PacerRedesign:
-// Returns the amount of scan work performed, but doesn't update
-// gcController.stackScanWork or flush any credit. Any background credit produced
-// by this function should be flushed by its caller. scanstack itself can't
-// safely flush because it may result in trying to wake up a goroutine that
-// was just scanned, resulting in a self-deadlock.
-//
-// scanstack will also shrink the stack if it is safe to do so. If it
-// is not, it schedules a stack shrink for the next synchronous safe
-// point.
-//
-// scanstack is marked go:systemstack because it must not be preempted
-// while using a workbuf.
-//
-//go:nowritebarrier
-//go:systemstack
-func scanstack(gp *g, gcw *gcWork) int64 {
- if readgstatus(gp)&_Gscan == 0 {
- print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
- throw("scanstack - bad status")
- }
-
- switch readgstatus(gp) &^ _Gscan {
- default:
- print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- throw("mark - bad status")
- case _Gdead:
- return 0
- case _Grunning:
- print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- throw("scanstack: goroutine not stopped")
- case _Grunnable, _Gsyscall, _Gwaiting:
- // ok
- }
-
- if gp == getg() {
- throw("can't scan our own stack")
- }
-
- // stackSize is the amount of work we'll be reporting.
- //
- // We report the total stack size, more than we scan,
- // because this number needs to line up with gcControllerState's
- // stackScan and scannableStackSize fields.
- //
- // See the documentation on those fields for more information.
- stackSize := gp.stack.hi - gp.stack.lo
-
- if isShrinkStackSafe(gp) {
- // Shrink the stack if not much of it is being used.
- shrinkstack(gp)
- } else {
- // Otherwise, shrink the stack at the next sync safe point.
- gp.preemptShrink = true
- }
-
- var state stackScanState
- state.stack = gp.stack
-
- if stackTraceDebug {
- println("stack trace goroutine", gp.goid)
- }
-
- if debugScanConservative && gp.asyncSafePoint {
- print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
- }
-
- // Scan the saved context register. This is effectively a live
- // register that gets moved back and forth between the
- // register and sched.ctxt without a write barrier.
- if gp.sched.ctxt != nil {
- scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
- }
-
- // Scan the stack. Accumulate a list of stack objects.
- scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
- scanframeworker(frame, &state, gcw)
- return true
- }
- gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
-
- // Find additional pointers that point into the stack from the heap.
- // Currently this includes defers and panics. See also function copystack.
-
- // Find and trace other pointers in defer records.
- for d := gp._defer; d != nil; d = d.link {
- if d.fn != nil {
- // Scan the func value, which could be a stack allocated closure.
- // See issue 30453.
- scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
- }
- if d.link != nil {
- // The link field of a stack-allocated defer record might point
- // to a heap-allocated defer record. Keep that heap record live.
- scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
- }
- // Retain defers records themselves.
- // Defer records might not be reachable from the G through regular heap
- // tracing because the defer linked list might weave between the stack and the heap.
- if d.heap {
- scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
- }
- }
- if gp._panic != nil {
- // Panics are always stack allocated.
- state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
- }
-
- // Find and scan all reachable stack objects.
- //
- // The state's pointer queue prioritizes precise pointers over
- // conservative pointers so that we'll prefer scanning stack
- // objects precisely.
- state.buildIndex()
- for {
- p, conservative := state.getPtr()
- if p == 0 {
- break
- }
- obj := state.findObject(p)
- if obj == nil {
- continue
- }
- r := obj.r
- if r == nil {
- // We've already scanned this object.
- continue
- }
- obj.setRecord(nil) // Don't scan it again.
- if stackTraceDebug {
- printlock()
- print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
- if conservative {
- print(" (conservative)")
- }
- println()
- printunlock()
- }
- gcdata := r.gcdata()
- var s *mspan
- if r.useGCProg() {
- // This path is pretty unlikely, an object large enough
- // to have a GC program allocated on the stack.
- // We need some space to unpack the program into a straight
- // bitmask, which we allocate/free here.
- // TODO: it would be nice if there were a way to run a GC
- // program without having to store all its bits. We'd have
- // to change from a Lempel-Ziv style program to something else.
- // Or we can forbid putting objects on stacks if they require
- // a gc program (see issue 27447).
- s = materializeGCProg(r.ptrdata(), gcdata)
- gcdata = (*byte)(unsafe.Pointer(s.startAddr))
- }
-
- b := state.stack.lo + uintptr(obj.off)
- if conservative {
- scanConservative(b, r.ptrdata(), gcdata, gcw, &state)
- } else {
- scanblock(b, r.ptrdata(), gcdata, gcw, &state)
- }
-
- if s != nil {
- dematerializeGCProg(s)
- }
- }
-
- // Deallocate object buffers.
- // (Pointer buffers were all deallocated in the loop above.)
- for state.head != nil {
- x := state.head
- state.head = x.next
- if stackTraceDebug {
- for i := 0; i < x.nobj; i++ {
- obj := &x.obj[i]
- if obj.r == nil { // reachable
- continue
- }
- println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
- // Note: not necessarily really dead - only reachable-from-ptr dead.
- }
- }
- x.nobj = 0
- putempty((*workbuf)(unsafe.Pointer(x)))
- }
- if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
- throw("remaining pointer buffers")
- }
- return int64(stackSize)
-}
-
-// Scan a stack frame: local variables and function arguments/results.
-//go:nowritebarrier
-func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
- if _DebugGC > 1 && frame.continpc != 0 {
- print("scanframe ", funcname(frame.fn), "\n")
- }
-
- isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
- isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV2
- if state.conservative || isAsyncPreempt || isDebugCall {
- if debugScanConservative {
- println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
- }
-
- // Conservatively scan the frame. Unlike the precise
- // case, this includes the outgoing argument space
- // since we may have stopped while this function was
- // setting up a call.
- //
- // TODO: We could narrow this down if the compiler
- // produced a single map per function of stack slots
- // and registers that ever contain a pointer.
- if frame.varp != 0 {
- size := frame.varp - frame.sp
- if size > 0 {
- scanConservative(frame.sp, size, nil, gcw, state)
- }
- }
-
- // Scan arguments to this frame.
- if frame.arglen != 0 {
- // TODO: We could pass the entry argument map
- // to narrow this down further.
- scanConservative(frame.argp, frame.arglen, nil, gcw, state)
- }
-
- if isAsyncPreempt || isDebugCall {
- // This function's frame contained the
- // registers for the asynchronously stopped
- // parent frame. Scan the parent
- // conservatively.
- state.conservative = true
- } else {
- // We only wanted to scan those two frames
- // conservatively. Clear the flag for future
- // frames.
- state.conservative = false
- }
- return
- }
-
- locals, args, objs := getStackMap(frame, &state.cache, false)
-
- // Scan local variables if stack frame has been allocated.
- if locals.n > 0 {
- size := uintptr(locals.n) * goarch.PtrSize
- scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
- }
-
- // Scan arguments.
- if args.n > 0 {
- scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
- }
-
- // Add all stack objects to the stack object list.
- if frame.varp != 0 {
- // varp is 0 for defers, where there are no locals.
- // In that case, there can't be a pointer to its args, either.
- // (And all args would be scanned above anyway.)
- for i := range objs {
- obj := &objs[i]
- off := obj.off
- base := frame.varp // locals base pointer
- if off >= 0 {
- base = frame.argp // arguments and return values base pointer
- }
- ptr := base + uintptr(off)
- if ptr < frame.sp {
- // object hasn't been allocated in the frame yet.
- continue
- }
- if stackTraceDebug {
- println("stkobj at", hex(ptr), "of size", obj.size)
- }
- state.addObject(ptr, obj)
- }
- }
-}
-
-type gcDrainFlags int
-
-const (
- gcDrainUntilPreempt gcDrainFlags = 1 << iota
- gcDrainFlushBgCredit
- gcDrainIdle
- gcDrainFractional
-)
-
-// gcDrain scans roots and objects in work buffers, blackening grey
-// objects until it is unable to get more work. It may return before
-// GC is done; it's the caller's responsibility to balance work from
-// other Ps.
-//
-// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
-// is set.
-//
-// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
-// to do.
-//
-// If flags&gcDrainFractional != 0, gcDrain self-preempts when
-// pollFractionalWorkerExit() returns true. This implies
-// gcDrainNoBlock.
-//
-// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
-// credit to gcController.bgScanCredit every gcCreditSlack units of
-// scan work.
-//
-// gcDrain will always return if there is a pending STW.
-//
-//go:nowritebarrier
-func gcDrain(gcw *gcWork, flags gcDrainFlags) {
- if !writeBarrier.needed {
- throw("gcDrain phase incorrect")
- }
-
- gp := getg().m.curg
- preemptible := flags&gcDrainUntilPreempt != 0
- flushBgCredit := flags&gcDrainFlushBgCredit != 0
- idle := flags&gcDrainIdle != 0
-
- initScanWork := gcw.heapScanWork
-
- // checkWork is the scan work before performing the next
- // self-preempt check.
- checkWork := int64(1<<63 - 1)
- var check func() bool
- if flags&(gcDrainIdle|gcDrainFractional) != 0 {
- checkWork = initScanWork + drainCheckThreshold
- if idle {
- check = pollWork
- } else if flags&gcDrainFractional != 0 {
- check = pollFractionalWorkerExit
- }
- }
-
- // Drain root marking jobs.
- if work.markrootNext < work.markrootJobs {
- // Stop if we're preemptible or if someone wants to STW.
- for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
- job := atomic.Xadd(&work.markrootNext, +1) - 1
- if job >= work.markrootJobs {
- break
- }
- markroot(gcw, job, flushBgCredit)
- if check != nil && check() {
- goto done
- }
- }
- }
-
- // Drain heap marking jobs.
- // Stop if we're preemptible or if someone wants to STW.
- for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
- // Try to keep work available on the global queue. We used to
- // check if there were waiting workers, but it's better to
- // just keep work available than to make workers wait. In the
- // worst case, we'll do O(log(_WorkbufSize)) unnecessary
- // balances.
- if work.full == 0 {
- gcw.balance()
- }
-
- b := gcw.tryGetFast()
- if b == 0 {
- b = gcw.tryGet()
- if b == 0 {
- // Flush the write barrier
- // buffer; this may create
- // more work.
- wbBufFlush(nil, 0)
- b = gcw.tryGet()
- }
- }
- if b == 0 {
- // Unable to get work.
- break
- }
- scanobject(b, gcw)
-
- // Flush background scan work credit to the global
- // account if we've accumulated enough locally so
- // mutator assists can draw on it.
- if gcw.heapScanWork >= gcCreditSlack {
- gcController.heapScanWork.Add(gcw.heapScanWork)
- if flushBgCredit {
- gcFlushBgCredit(gcw.heapScanWork - initScanWork)
- initScanWork = 0
- }
- checkWork -= gcw.heapScanWork
- gcw.heapScanWork = 0
-
- if checkWork <= 0 {
- checkWork += drainCheckThreshold
- if check != nil && check() {
- break
- }
- }
- }
- }
-
-done:
- // Flush remaining scan work credit.
- if gcw.heapScanWork > 0 {
- gcController.heapScanWork.Add(gcw.heapScanWork)
- if flushBgCredit {
- gcFlushBgCredit(gcw.heapScanWork - initScanWork)
- }
- gcw.heapScanWork = 0
- }
-}
-
-// gcDrainN blackens grey objects until it has performed roughly
-// scanWork units of scan work or the G is preempted. This is
-// best-effort, so it may perform less work if it fails to get a work
-// buffer. Otherwise, it will perform at least n units of work, but
-// may perform more because scanning is always done in whole object
-// increments. It returns the amount of scan work performed.
-//
-// The caller goroutine must be in a preemptible state (e.g.,
-// _Gwaiting) to prevent deadlocks during stack scanning. As a
-// consequence, this must be called on the system stack.
-//
-//go:nowritebarrier
-//go:systemstack
-func gcDrainN(gcw *gcWork, scanWork int64) int64 {
- if !writeBarrier.needed {
- throw("gcDrainN phase incorrect")
- }
-
- // There may already be scan work on the gcw, which we don't
- // want to claim was done by this call.
- workFlushed := -gcw.heapScanWork
-
- gp := getg().m.curg
- for !gp.preempt && workFlushed+gcw.heapScanWork < scanWork {
- // See gcDrain comment.
- if work.full == 0 {
- gcw.balance()
- }
-
- b := gcw.tryGetFast()
- if b == 0 {
- b = gcw.tryGet()
- if b == 0 {
- // Flush the write barrier buffer;
- // this may create more work.
- wbBufFlush(nil, 0)
- b = gcw.tryGet()
- }
- }
-
- if b == 0 {
- // Try to do a root job.
- if work.markrootNext < work.markrootJobs {
- job := atomic.Xadd(&work.markrootNext, +1) - 1
- if job < work.markrootJobs {
- work := markroot(gcw, job, false)
- if goexperiment.PacerRedesign {
- workFlushed += work
- }
- continue
- }
- }
- // No heap or root jobs.
- break
- }
-
- scanobject(b, gcw)
-
- // Flush background scan work credit.
- if gcw.heapScanWork >= gcCreditSlack {
- gcController.heapScanWork.Add(gcw.heapScanWork)
- workFlushed += gcw.heapScanWork
- gcw.heapScanWork = 0
- }
- }
-
- // Unlike gcDrain, there's no need to flush remaining work
- // here because this never flushes to bgScanCredit and
- // gcw.dispose will flush any remaining work to scanWork.
-
- return workFlushed + gcw.heapScanWork
-}
-
-// scanblock scans b as scanobject would, but using an explicit
-// pointer bitmap instead of the heap bitmap.
-//
-// This is used to scan non-heap roots, so it does not update
-// gcw.bytesMarked or gcw.heapScanWork.
-//
-// If stk != nil, possible stack pointers are also reported to stk.putPtr.
-//go:nowritebarrier
-func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
- // Use local copies of original parameters, so that a stack trace
- // due to one of the throws below shows the original block
- // base and extent.
- b := b0
- n := n0
-
- for i := uintptr(0); i < n; {
- // Find bits for the next word.
- bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
- if bits == 0 {
- i += goarch.PtrSize * 8
- continue
- }
- for j := 0; j < 8 && i < n; j++ {
- if bits&1 != 0 {
- // Same work as in scanobject; see comments there.
- p := *(*uintptr)(unsafe.Pointer(b + i))
- if p != 0 {
- if obj, span, objIndex := findObject(p, b, i); obj != 0 {
- greyobject(obj, b, i, span, gcw, objIndex)
- } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
- stk.putPtr(p, false)
- }
- }
- }
- bits >>= 1
- i += goarch.PtrSize
- }
- }
-}
-
-// scanobject scans the object starting at b, adding pointers to gcw.
-// b must point to the beginning of a heap object or an oblet.
-// scanobject consults the GC bitmap for the pointer mask and the
-// spans for the size of the object.
-//
-//go:nowritebarrier
-func scanobject(b uintptr, gcw *gcWork) {
- // Prefetch object before we scan it.
- //
- // This will overlap fetching the beginning of the object with initial
- // setup before we start scanning the object.
- sys.Prefetch(b)
-
- // Find the bits for b and the size of the object at b.
- //
- // b is either the beginning of an object, in which case this
- // is the size of the object to scan, or it points to an
- // oblet, in which case we compute the size to scan below.
- hbits := heapBitsForAddr(b)
- s := spanOfUnchecked(b)
- n := s.elemsize
- if n == 0 {
- throw("scanobject n == 0")
- }
-
- if n > maxObletBytes {
- // Large object. Break into oblets for better
- // parallelism and lower latency.
- if b == s.base() {
- // It's possible this is a noscan object (not
- // from greyobject, but from other code
- // paths), in which case we must *not* enqueue
- // oblets since their bitmaps will be
- // uninitialized.
- if s.spanclass.noscan() {
- // Bypass the whole scan.
- gcw.bytesMarked += uint64(n)
- return
- }
-
- // Enqueue the other oblets to scan later.
- // Some oblets may be in b's scalar tail, but
- // these will be marked as "no more pointers",
- // so we'll drop out immediately when we go to
- // scan those.
- for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
- if !gcw.putFast(oblet) {
- gcw.put(oblet)
- }
- }
- }
-
- // Compute the size of the oblet. Since this object
- // must be a large object, s.base() is the beginning
- // of the object.
- n = s.base() + s.elemsize - b
- if n > maxObletBytes {
- n = maxObletBytes
- }
- }
-
- var i uintptr
- for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() {
- // Load bits once. See CL 22712 and issue 16973 for discussion.
- bits := hbits.bits()
- if bits&bitScan == 0 {
- break // no more pointers in this object
- }
- if bits&bitPointer == 0 {
- continue // not a pointer
- }
-
- // Work here is duplicated in scanblock and above.
- // If you make changes here, make changes there too.
- obj := *(*uintptr)(unsafe.Pointer(b + i))
-
- // At this point we have extracted the next potential pointer.
- // Quickly filter out nil and pointers back to the current object.
- if obj != 0 && obj-b >= n {
- // Test if obj points into the Go heap and, if so,
- // mark the object.
- //
- // Note that it's possible for findObject to
- // fail if obj points to a just-allocated heap
- // object because of a race with growing the
- // heap. In this case, we know the object was
- // just allocated and hence will be marked by
- // allocation itself.
- if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
- greyobject(obj, b, i, span, gcw, objIndex)
- }
- }
- }
- gcw.bytesMarked += uint64(n)
- gcw.heapScanWork += int64(i)
-}
-
-// scanConservative scans block [b, b+n) conservatively, treating any
-// pointer-like value in the block as a pointer.
-//
-// If ptrmask != nil, only words that are marked in ptrmask are
-// considered as potential pointers.
-//
-// If state != nil, it's assumed that [b, b+n) is a block in the stack
-// and may contain pointers to stack objects.
-func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
- if debugScanConservative {
- printlock()
- print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
- hexdumpWords(b, b+n, func(p uintptr) byte {
- if ptrmask != nil {
- word := (p - b) / goarch.PtrSize
- bits := *addb(ptrmask, word/8)
- if (bits>>(word%8))&1 == 0 {
- return '$'
- }
- }
-
- val := *(*uintptr)(unsafe.Pointer(p))
- if state != nil && state.stack.lo <= val && val < state.stack.hi {
- return '@'
- }
-
- span := spanOfHeap(val)
- if span == nil {
- return ' '
- }
- idx := span.objIndex(val)
- if span.isFree(idx) {
- return ' '
- }
- return '*'
- })
- printunlock()
- }
-
- for i := uintptr(0); i < n; i += goarch.PtrSize {
- if ptrmask != nil {
- word := i / goarch.PtrSize
- bits := *addb(ptrmask, word/8)
- if bits == 0 {
- // Skip 8 words (the loop increment will do the 8th)
- //
- // This must be the first time we've
- // seen this word of ptrmask, so i
- // must be 8-word-aligned, but check
- // our reasoning just in case.
- if i%(goarch.PtrSize*8) != 0 {
- throw("misaligned mask")
- }
- i += goarch.PtrSize*8 - goarch.PtrSize
- continue
- }
- if (bits>>(word%8))&1 == 0 {
- continue
- }
- }
-
- val := *(*uintptr)(unsafe.Pointer(b + i))
-
- // Check if val points into the stack.
- if state != nil && state.stack.lo <= val && val < state.stack.hi {
- // val may point to a stack object. This
- // object may be dead from last cycle and
- // hence may contain pointers to unallocated
- // objects, but unlike heap objects we can't
- // tell if it's already dead. Hence, if all
- // pointers to this object are from
- // conservative scanning, we have to scan it
- // defensively, too.
- state.putPtr(val, true)
- continue
- }
-
- // Check if val points to a heap span.
- span := spanOfHeap(val)
- if span == nil {
- continue
- }
-
- // Check if val points to an allocated object.
- idx := span.objIndex(val)
- if span.isFree(idx) {
- continue
- }
-
- // val points to an allocated object. Mark it.
- obj := span.base() + idx*span.elemsize
- greyobject(obj, b, i, span, gcw, idx)
- }
-}
-
-// Shade the object if it isn't already.
-// The object is not nil and known to be in the heap.
-// Preemption must be disabled.
-//go:nowritebarrier
-func shade(b uintptr) {
- if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
- gcw := &getg().m.p.ptr().gcw
- greyobject(obj, 0, 0, span, gcw, objIndex)
- }
-}
-
-// obj is the start of an object with mark mbits.
-// If it isn't already marked, mark it and enqueue into gcw.
-// base and off are for debugging only and could be removed.
-//
-// See also wbBufFlush1, which partially duplicates this logic.
-//
-//go:nowritebarrierrec
-func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
- // obj should be start of allocation, and so must be at least pointer-aligned.
- if obj&(goarch.PtrSize-1) != 0 {
- throw("greyobject: obj not pointer-aligned")
- }
- mbits := span.markBitsForIndex(objIndex)
-
- if useCheckmark {
- if setCheckmark(obj, base, off, mbits) {
- // Already marked.
- return
- }
- } else {
- if debug.gccheckmark > 0 && span.isFree(objIndex) {
- print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
- gcDumpObject("base", base, off)
- gcDumpObject("obj", obj, ^uintptr(0))
- getg().m.traceback = 2
- throw("marking free object")
- }
-
- // If marked we have nothing to do.
- if mbits.isMarked() {
- return
- }
- mbits.setMarked()
-
- // Mark span.
- arena, pageIdx, pageMask := pageIndexOf(span.base())
- if arena.pageMarks[pageIdx]&pageMask == 0 {
- atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
- }
-
- // If this is a noscan object, fast-track it to black
- // instead of greying it.
- if span.spanclass.noscan() {
- gcw.bytesMarked += uint64(span.elemsize)
- return
- }
- }
-
- // We're adding obj to P's local workbuf, so it's likely
- // this object will be processed soon by the same P.
- // Even if the workbuf gets flushed, there will likely still be
- // some benefit on platforms with inclusive shared caches.
- sys.Prefetch(obj)
- // Queue the obj for scanning.
- if !gcw.putFast(obj) {
- gcw.put(obj)
- }
-}
-
-// gcDumpObject dumps the contents of obj for debugging and marks the
-// field at byte offset off in obj.
-func gcDumpObject(label string, obj, off uintptr) {
- s := spanOf(obj)
- print(label, "=", hex(obj))
- if s == nil {
- print(" s=nil\n")
- return
- }
- print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
- if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
- print(mSpanStateNames[state], "\n")
- } else {
- print("unknown(", state, ")\n")
- }
-
- skipped := false
- size := s.elemsize
- if s.state.get() == mSpanManual && size == 0 {
- // We're printing something from a stack frame. We
- // don't know how big it is, so just show up to an
- // including off.
- size = off + goarch.PtrSize
- }
- for i := uintptr(0); i < size; i += goarch.PtrSize {
- // For big objects, just print the beginning (because
- // that usually hints at the object's type) and the
- // fields around off.
- if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
- skipped = true
- continue
- }
- if skipped {
- print(" ...\n")
- skipped = false
- }
- print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
- if i == off {
- print(" <==")
- }
- print("\n")
- }
- if skipped {
- print(" ...\n")
- }
-}
-
-// gcmarknewobject marks a newly allocated object black. obj must
-// not contain any non-nil pointers.
-//
-// This is nosplit so it can manipulate a gcWork without preemption.
-//
-//go:nowritebarrier
-//go:nosplit
-func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) {
- if useCheckmark { // The world should be stopped so this should not happen.
- throw("gcmarknewobject called while doing checkmark")
- }
-
- // Mark object.
- objIndex := span.objIndex(obj)
- span.markBitsForIndex(objIndex).setMarked()
-
- // Mark span.
- arena, pageIdx, pageMask := pageIndexOf(span.base())
- if arena.pageMarks[pageIdx]&pageMask == 0 {
- atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
- }
-
- gcw := &getg().m.p.ptr().gcw
- gcw.bytesMarked += uint64(size)
- if !goexperiment.PacerRedesign {
- // The old pacer counts newly allocated memory toward
- // heapScanWork because heapScan is continuously updated
- // throughout the GC cycle with newly allocated memory. However,
- // these objects are never actually scanned, so we need
- // to account for them in heapScanWork here, "faking" their work.
- // Otherwise the pacer will think it's always behind, potentially
- // by a large margin.
- //
- // The new pacer doesn't care about this because it ceases to updated
- // heapScan once a GC cycle starts, effectively snapshotting it.
- gcw.heapScanWork += int64(scanSize)
- }
-}
-
-// gcMarkTinyAllocs greys all active tiny alloc blocks.
-//
-// The world must be stopped.
-func gcMarkTinyAllocs() {
- assertWorldStopped()
-
- for _, p := range allp {
- c := p.mcache
- if c == nil || c.tiny == 0 {
- continue
- }
- _, span, objIndex := findObject(c.tiny, 0, 0)
- gcw := &p.gcw
- greyobject(c.tiny, 0, 0, span, gcw, objIndex)
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgcpacer.go b/contrib/go/_std_1.18/src/runtime/mgcpacer.go
deleted file mode 100644
index d54dbc26c2..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgcpacer.go
+++ /dev/null
@@ -1,1348 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/cpu"
- "internal/goexperiment"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-const (
- // gcGoalUtilization is the goal CPU utilization for
- // marking as a fraction of GOMAXPROCS.
- gcGoalUtilization = goexperiment.PacerRedesignInt*gcBackgroundUtilization +
- (1-goexperiment.PacerRedesignInt)*(gcBackgroundUtilization+0.05)
-
- // gcBackgroundUtilization is the fixed CPU utilization for background
- // marking. It must be <= gcGoalUtilization. The difference between
- // gcGoalUtilization and gcBackgroundUtilization will be made up by
- // mark assists. The scheduler will aim to use within 50% of this
- // goal.
- //
- // Setting this to < gcGoalUtilization avoids saturating the trigger
- // feedback controller when there are no assists, which allows it to
- // better control CPU and heap growth. However, the larger the gap,
- // the more mutator assists are expected to happen, which impact
- // mutator latency.
- //
- // If goexperiment.PacerRedesign, the trigger feedback controller
- // is replaced with an estimate of the mark/cons ratio that doesn't
- // have the same saturation issues, so this is set equal to
- // gcGoalUtilization.
- gcBackgroundUtilization = 0.25
-
- // gcCreditSlack is the amount of scan work credit that can
- // accumulate locally before updating gcController.heapScanWork and,
- // optionally, gcController.bgScanCredit. Lower values give a more
- // accurate assist ratio and make it more likely that assists will
- // successfully steal background credit. Higher values reduce memory
- // contention.
- gcCreditSlack = 2000
-
- // gcAssistTimeSlack is the nanoseconds of mutator assist time that
- // can accumulate on a P before updating gcController.assistTime.
- gcAssistTimeSlack = 5000
-
- // gcOverAssistWork determines how many extra units of scan work a GC
- // assist does when an assist happens. This amortizes the cost of an
- // assist by pre-paying for this many bytes of future allocations.
- gcOverAssistWork = 64 << 10
-
- // defaultHeapMinimum is the value of heapMinimum for GOGC==100.
- defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
- (1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
-
- // scannableStackSizeSlack is the bytes of stack space allocated or freed
- // that can accumulate on a P before updating gcController.stackSize.
- scannableStackSizeSlack = 8 << 10
-)
-
-func init() {
- if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
- println(offset)
- throw("gcController.heapLive not aligned to 8 bytes")
- }
-}
-
-// gcController implements the GC pacing controller that determines
-// when to trigger concurrent garbage collection and how much marking
-// work to do in mutator assists and background marking.
-//
-// It uses a feedback control algorithm to adjust the gcController.trigger
-// trigger based on the heap growth and GC CPU utilization each cycle.
-// This algorithm optimizes for heap growth to match GOGC and for CPU
-// utilization between assist and background marking to be 25% of
-// GOMAXPROCS. The high-level design of this algorithm is documented
-// at https://golang.org/s/go15gcpacing.
-//
-// All fields of gcController are used only during a single mark
-// cycle.
-var gcController gcControllerState
-
-type gcControllerState struct {
-
- // Initialized from GOGC. GOGC=off means no GC.
- gcPercent atomic.Int32
-
- _ uint32 // padding so following 64-bit values are 8-byte aligned
-
- // heapMinimum is the minimum heap size at which to trigger GC.
- // For small heaps, this overrides the usual GOGC*live set rule.
- //
- // When there is a very small live set but a lot of allocation, simply
- // collecting when the heap reaches GOGC*live results in many GC
- // cycles and high total per-GC overhead. This minimum amortizes this
- // per-GC overhead while keeping the heap reasonably small.
- //
- // During initialization this is set to 4MB*GOGC/100. In the case of
- // GOGC==0, this will set heapMinimum to 0, resulting in constant
- // collection even when the heap size is small, which is useful for
- // debugging.
- heapMinimum uint64
-
- // triggerRatio is the heap growth ratio that triggers marking.
- //
- // E.g., if this is 0.6, then GC should start when the live
- // heap has reached 1.6 times the heap size marked by the
- // previous cycle. This should be ≤ GOGC/100 so the trigger
- // heap size is less than the goal heap size. This is set
- // during mark termination for the next cycle's trigger.
- //
- // Protected by mheap_.lock or a STW.
- //
- // Used if !goexperiment.PacerRedesign.
- triggerRatio float64
-
- // trigger is the heap size that triggers marking.
- //
- // When heapLive ≥ trigger, the mark phase will start.
- // This is also the heap size by which proportional sweeping
- // must be complete.
- //
- // This is computed from triggerRatio during mark termination
- // for the next cycle's trigger.
- //
- // Protected by mheap_.lock or a STW.
- trigger uint64
-
- // consMark is the estimated per-CPU consMark ratio for the application.
- //
- // It represents the ratio between the application's allocation
- // rate, as bytes allocated per CPU-time, and the GC's scan rate,
- // as bytes scanned per CPU-time.
- // The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
- //
- // At a high level, this value is computed as the bytes of memory
- // allocated (cons) per unit of scan work completed (mark) in a GC
- // cycle, divided by the CPU time spent on each activity.
- //
- // Updated at the end of each GC cycle, in endCycle.
- //
- // For goexperiment.PacerRedesign.
- consMark float64
-
- // consMarkController holds the state for the mark-cons ratio
- // estimation over time.
- //
- // Its purpose is to smooth out noisiness in the computation of
- // consMark; see consMark for details.
- //
- // For goexperiment.PacerRedesign.
- consMarkController piController
-
- _ uint32 // Padding for atomics on 32-bit platforms.
-
- // heapGoal is the goal heapLive for when next GC ends.
- // Set to ^uint64(0) if disabled.
- //
- // Read and written atomically, unless the world is stopped.
- heapGoal uint64
-
- // lastHeapGoal is the value of heapGoal for the previous GC.
- // Note that this is distinct from the last value heapGoal had,
- // because it could change if e.g. gcPercent changes.
- //
- // Read and written with the world stopped or with mheap_.lock held.
- lastHeapGoal uint64
-
- // heapLive is the number of bytes considered live by the GC.
- // That is: retained by the most recent GC plus allocated
- // since then. heapLive ≤ memstats.heapAlloc, since heapAlloc includes
- // unmarked objects that have not yet been swept (and hence goes up as we
- // allocate and down as we sweep) while heapLive excludes these
- // objects (and hence only goes up between GCs).
- //
- // This is updated atomically without locking. To reduce
- // contention, this is updated only when obtaining a span from
- // an mcentral and at this point it counts all of the
- // unallocated slots in that span (which will be allocated
- // before that mcache obtains another span from that
- // mcentral). Hence, it slightly overestimates the "true" live
- // heap size. It's better to overestimate than to
- // underestimate because 1) this triggers the GC earlier than
- // necessary rather than potentially too late and 2) this
- // leads to a conservative GC rate rather than a GC rate that
- // is potentially too low.
- //
- // Reads should likewise be atomic (or during STW).
- //
- // Whenever this is updated, call traceHeapAlloc() and
- // this gcControllerState's revise() method.
- heapLive uint64
-
- // heapScan is the number of bytes of "scannable" heap. This
- // is the live heap (as counted by heapLive), but omitting
- // no-scan objects and no-scan tails of objects.
- //
- // For !goexperiment.PacerRedesign: Whenever this is updated,
- // call this gcControllerState's revise() method. It is read
- // and written atomically or with the world stopped.
- //
- // For goexperiment.PacerRedesign: This value is fixed at the
- // start of a GC cycle, so during a GC cycle it is safe to
- // read without atomics, and it represents the maximum scannable
- // heap.
- heapScan uint64
-
- // lastHeapScan is the number of bytes of heap that were scanned
- // last GC cycle. It is the same as heapMarked, but only
- // includes the "scannable" parts of objects.
- //
- // Updated when the world is stopped.
- lastHeapScan uint64
-
- // stackScan is a snapshot of scannableStackSize taken at each GC
- // STW pause and is used in pacing decisions.
- //
- // Updated only while the world is stopped.
- stackScan uint64
-
- // scannableStackSize is the amount of allocated goroutine stack space in
- // use by goroutines.
- //
- // This number tracks allocated goroutine stack space rather than used
- // goroutine stack space (i.e. what is actually scanned) because used
- // goroutine stack space is much harder to measure cheaply. By using
- // allocated space, we make an overestimate; this is OK, it's better
- // to conservatively overcount than undercount.
- //
- // Read and updated atomically.
- scannableStackSize uint64
-
- // globalsScan is the total amount of global variable space
- // that is scannable.
- //
- // Read and updated atomically.
- globalsScan uint64
-
- // heapMarked is the number of bytes marked by the previous
- // GC. After mark termination, heapLive == heapMarked, but
- // unlike heapLive, heapMarked does not change until the
- // next mark termination.
- heapMarked uint64
-
- // heapScanWork is the total heap scan work performed this cycle.
- // stackScanWork is the total stack scan work performed this cycle.
- // globalsScanWork is the total globals scan work performed this cycle.
- //
- // These are updated atomically during the cycle. Updates occur in
- // bounded batches, since they are both written and read
- // throughout the cycle. At the end of the cycle, heapScanWork is how
- // much of the retained heap is scannable.
- //
- // Currently these are measured in bytes. For most uses, this is an
- // opaque unit of work, but for estimation the definition is important.
- //
- // Note that stackScanWork includes all allocated space, not just the
- // size of the stack itself, mirroring stackSize.
- //
- // For !goexperiment.PacerRedesign, stackScanWork and globalsScanWork
- // are always zero.
- heapScanWork atomic.Int64
- stackScanWork atomic.Int64
- globalsScanWork atomic.Int64
-
- // bgScanCredit is the scan work credit accumulated by the
- // concurrent background scan. This credit is accumulated by
- // the background scan and stolen by mutator assists. This is
- // updated atomically. Updates occur in bounded batches, since
- // it is both written and read throughout the cycle.
- bgScanCredit int64
-
- // assistTime is the nanoseconds spent in mutator assists
- // during this cycle. This is updated atomically. Updates
- // occur in bounded batches, since it is both written and read
- // throughout the cycle.
- assistTime int64
-
- // dedicatedMarkTime is the nanoseconds spent in dedicated
- // mark workers during this cycle. This is updated atomically
- // at the end of the concurrent mark phase.
- dedicatedMarkTime int64
-
- // fractionalMarkTime is the nanoseconds spent in the
- // fractional mark worker during this cycle. This is updated
- // atomically throughout the cycle and will be up-to-date if
- // the fractional mark worker is not currently running.
- fractionalMarkTime int64
-
- // idleMarkTime is the nanoseconds spent in idle marking
- // during this cycle. This is updated atomically throughout
- // the cycle.
- idleMarkTime int64
-
- // markStartTime is the absolute start time in nanoseconds
- // that assists and background mark workers started.
- markStartTime int64
-
- // dedicatedMarkWorkersNeeded is the number of dedicated mark
- // workers that need to be started. This is computed at the
- // beginning of each cycle and decremented atomically as
- // dedicated mark workers get started.
- dedicatedMarkWorkersNeeded int64
-
- // assistWorkPerByte is the ratio of scan work to allocated
- // bytes that should be performed by mutator assists. This is
- // computed at the beginning of each cycle and updated every
- // time heapScan is updated.
- assistWorkPerByte atomic.Float64
-
- // assistBytesPerWork is 1/assistWorkPerByte.
- //
- // Note that because this is read and written independently
- // from assistWorkPerByte users may notice a skew between
- // the two values, and such a state should be safe.
- assistBytesPerWork atomic.Float64
-
- // fractionalUtilizationGoal is the fraction of wall clock
- // time that should be spent in the fractional mark worker on
- // each P that isn't running a dedicated worker.
- //
- // For example, if the utilization goal is 25% and there are
- // no dedicated workers, this will be 0.25. If the goal is
- // 25%, there is one dedicated worker, and GOMAXPROCS is 5,
- // this will be 0.05 to make up the missing 5%.
- //
- // If this is zero, no fractional workers are needed.
- fractionalUtilizationGoal float64
-
- // test indicates that this is a test-only copy of gcControllerState.
- test bool
-
- _ cpu.CacheLinePad
-}
-
-func (c *gcControllerState) init(gcPercent int32) {
- c.heapMinimum = defaultHeapMinimum
-
- if goexperiment.PacerRedesign {
- c.consMarkController = piController{
- // Tuned first via the Ziegler-Nichols process in simulation,
- // then the integral time was manually tuned against real-world
- // applications to deal with noisiness in the measured cons/mark
- // ratio.
- kp: 0.9,
- ti: 4.0,
-
- // Set a high reset time in GC cycles.
- // This is inversely proportional to the rate at which we
- // accumulate error from clipping. By making this very high
- // we make the accumulation slow. In general, clipping is
- // OK in our situation, hence the choice.
- //
- // Tune this if we get unintended effects from clipping for
- // a long time.
- tt: 1000,
- min: -1000,
- max: 1000,
- }
- } else {
- // Set a reasonable initial GC trigger.
- c.triggerRatio = 7 / 8.0
-
- // Fake a heapMarked value so it looks like a trigger at
- // heapMinimum is the appropriate growth from heapMarked.
- // This will go into computing the initial GC goal.
- c.heapMarked = uint64(float64(c.heapMinimum) / (1 + c.triggerRatio))
- }
-
- // This will also compute and set the GC trigger and goal.
- c.setGCPercent(gcPercent)
-}
-
-// startCycle resets the GC controller's state and computes estimates
-// for a new GC cycle. The caller must hold worldsema and the world
-// must be stopped.
-func (c *gcControllerState) startCycle(markStartTime int64, procs int) {
- c.heapScanWork.Store(0)
- c.stackScanWork.Store(0)
- c.globalsScanWork.Store(0)
- c.bgScanCredit = 0
- c.assistTime = 0
- c.dedicatedMarkTime = 0
- c.fractionalMarkTime = 0
- c.idleMarkTime = 0
- c.markStartTime = markStartTime
- c.stackScan = atomic.Load64(&c.scannableStackSize)
-
- // Ensure that the heap goal is at least a little larger than
- // the current live heap size. This may not be the case if GC
- // start is delayed or if the allocation that pushed gcController.heapLive
- // over trigger is large or if the trigger is really close to
- // GOGC. Assist is proportional to this distance, so enforce a
- // minimum distance, even if it means going over the GOGC goal
- // by a tiny bit.
- if goexperiment.PacerRedesign {
- if c.heapGoal < c.heapLive+64<<10 {
- c.heapGoal = c.heapLive + 64<<10
- }
- } else {
- if c.heapGoal < c.heapLive+1<<20 {
- c.heapGoal = c.heapLive + 1<<20
- }
- }
-
- // Compute the background mark utilization goal. In general,
- // this may not come out exactly. We round the number of
- // dedicated workers so that the utilization is closest to
- // 25%. For small GOMAXPROCS, this would introduce too much
- // error, so we add fractional workers in that case.
- totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
- c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
- utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
- const maxUtilError = 0.3
- if utilError < -maxUtilError || utilError > maxUtilError {
- // Rounding put us more than 30% off our goal. With
- // gcBackgroundUtilization of 25%, this happens for
- // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
- // workers to compensate.
- if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
- // Too many dedicated workers.
- c.dedicatedMarkWorkersNeeded--
- }
- c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(procs)
- } else {
- c.fractionalUtilizationGoal = 0
- }
-
- // In STW mode, we just want dedicated workers.
- if debug.gcstoptheworld > 0 {
- c.dedicatedMarkWorkersNeeded = int64(procs)
- c.fractionalUtilizationGoal = 0
- }
-
- // Clear per-P state
- for _, p := range allp {
- p.gcAssistTime = 0
- p.gcFractionalMarkTime = 0
- }
-
- // Compute initial values for controls that are updated
- // throughout the cycle.
- c.revise()
-
- if debug.gcpacertrace > 0 {
- assistRatio := c.assistWorkPerByte.Load()
- print("pacer: assist ratio=", assistRatio,
- " (scan ", gcController.heapScan>>20, " MB in ",
- work.initialHeapLive>>20, "->",
- c.heapGoal>>20, " MB)",
- " workers=", c.dedicatedMarkWorkersNeeded,
- "+", c.fractionalUtilizationGoal, "\n")
- }
-}
-
-// revise updates the assist ratio during the GC cycle to account for
-// improved estimates. This should be called whenever gcController.heapScan,
-// gcController.heapLive, or gcController.heapGoal is updated. It is safe to
-// call concurrently, but it may race with other calls to revise.
-//
-// The result of this race is that the two assist ratio values may not line
-// up or may be stale. In practice this is OK because the assist ratio
-// moves slowly throughout a GC cycle, and the assist ratio is a best-effort
-// heuristic anyway. Furthermore, no part of the heuristic depends on
-// the two assist ratio values being exact reciprocals of one another, since
-// the two values are used to convert values from different sources.
-//
-// The worst case result of this raciness is that we may miss a larger shift
-// in the ratio (say, if we decide to pace more aggressively against the
-// hard heap goal) but even this "hard goal" is best-effort (see #40460).
-// The dedicated GC should ensure we don't exceed the hard goal by too much
-// in the rare case we do exceed it.
-//
-// It should only be called when gcBlackenEnabled != 0 (because this
-// is when assists are enabled and the necessary statistics are
-// available).
-func (c *gcControllerState) revise() {
- gcPercent := c.gcPercent.Load()
- if gcPercent < 0 {
- // If GC is disabled but we're running a forced GC,
- // act like GOGC is huge for the below calculations.
- gcPercent = 100000
- }
- live := atomic.Load64(&c.heapLive)
- scan := atomic.Load64(&c.heapScan)
- work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
-
- // Assume we're under the soft goal. Pace GC to complete at
- // heapGoal assuming the heap is in steady-state.
- heapGoal := int64(atomic.Load64(&c.heapGoal))
-
- var scanWorkExpected int64
- if goexperiment.PacerRedesign {
- // The expected scan work is computed as the amount of bytes scanned last
- // GC cycle, plus our estimate of stacks and globals work for this cycle.
- scanWorkExpected = int64(c.lastHeapScan + c.stackScan + c.globalsScan)
-
- // maxScanWork is a worst-case estimate of the amount of scan work that
- // needs to be performed in this GC cycle. Specifically, it represents
- // the case where *all* scannable memory turns out to be live.
- maxScanWork := int64(scan + c.stackScan + c.globalsScan)
- if work > scanWorkExpected {
- // We've already done more scan work than expected. Because our expectation
- // is based on a steady-state scannable heap size, we assume this means our
- // heap is growing. Compute a new heap goal that takes our existing runway
- // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
- // scan work. This keeps our assist ratio stable if the heap continues to grow.
- //
- // The effect of this mechanism is that assists stay flat in the face of heap
- // growths. It's OK to use more memory this cycle to scan all the live heap,
- // because the next GC cycle is inevitably going to use *at least* that much
- // memory anyway.
- extHeapGoal := int64(float64(heapGoal-int64(c.trigger))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.trigger)
- scanWorkExpected = maxScanWork
-
- // hardGoal is a hard limit on the amount that we're willing to push back the
- // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
- // stacks and/or globals grow to twice their size, this limits the current GC cycle's
- // growth to 4x the original live heap's size).
- //
- // This maintains the invariant that we use no more memory than the next GC cycle
- // will anyway.
- hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
- if extHeapGoal > hardGoal {
- extHeapGoal = hardGoal
- }
- heapGoal = extHeapGoal
- }
- if int64(live) > heapGoal {
- // We're already past our heap goal, even the extrapolated one.
- // Leave ourselves some extra runway, so in the worst case we
- // finish by that point.
- const maxOvershoot = 1.1
- heapGoal = int64(float64(heapGoal) * maxOvershoot)
-
- // Compute the upper bound on the scan work remaining.
- scanWorkExpected = maxScanWork
- }
- } else {
- // Compute the expected scan work remaining.
- //
- // This is estimated based on the expected
- // steady-state scannable heap. For example, with
- // GOGC=100, only half of the scannable heap is
- // expected to be live, so that's what we target.
- //
- // (This is a float calculation to avoid overflowing on
- // 100*heapScan.)
- scanWorkExpected = int64(float64(scan) * 100 / float64(100+gcPercent))
- if int64(live) > heapGoal || work > scanWorkExpected {
- // We're past the soft goal, or we've already done more scan
- // work than we expected. Pace GC so that in the worst case it
- // will complete by the hard goal.
- const maxOvershoot = 1.1
- heapGoal = int64(float64(heapGoal) * maxOvershoot)
-
- // Compute the upper bound on the scan work remaining.
- scanWorkExpected = int64(scan)
- }
- }
-
- // Compute the remaining scan work estimate.
- //
- // Note that we currently count allocations during GC as both
- // scannable heap (heapScan) and scan work completed
- // (scanWork), so allocation will change this difference
- // slowly in the soft regime and not at all in the hard
- // regime.
- scanWorkRemaining := scanWorkExpected - work
- if scanWorkRemaining < 1000 {
- // We set a somewhat arbitrary lower bound on
- // remaining scan work since if we aim a little high,
- // we can miss by a little.
- //
- // We *do* need to enforce that this is at least 1,
- // since marking is racy and double-scanning objects
- // may legitimately make the remaining scan work
- // negative, even in the hard goal regime.
- scanWorkRemaining = 1000
- }
-
- // Compute the heap distance remaining.
- heapRemaining := heapGoal - int64(live)
- if heapRemaining <= 0 {
- // This shouldn't happen, but if it does, avoid
- // dividing by zero or setting the assist negative.
- heapRemaining = 1
- }
-
- // Compute the mutator assist ratio so by the time the mutator
- // allocates the remaining heap bytes up to heapGoal, it will
- // have done (or stolen) the remaining amount of scan work.
- // Note that the assist ratio values are updated atomically
- // but not together. This means there may be some degree of
- // skew between the two values. This is generally OK as the
- // values shift relatively slowly over the course of a GC
- // cycle.
- assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
- assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
- c.assistWorkPerByte.Store(assistWorkPerByte)
- c.assistBytesPerWork.Store(assistBytesPerWork)
-}
-
-// endCycle computes the trigger ratio (!goexperiment.PacerRedesign)
-// or the consMark estimate (goexperiment.PacerRedesign) for the next cycle.
-// Returns the trigger ratio if application, or 0 (goexperiment.PacerRedesign).
-// userForced indicates whether the current GC cycle was forced
-// by the application.
-func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) float64 {
- // Record last heap goal for the scavenger.
- // We'll be updating the heap goal soon.
- gcController.lastHeapGoal = gcController.heapGoal
-
- // Compute the duration of time for which assists were turned on.
- assistDuration := now - c.markStartTime
-
- // Assume background mark hit its utilization goal.
- utilization := gcBackgroundUtilization
- // Add assist utilization; avoid divide by zero.
- if assistDuration > 0 {
- utilization += float64(c.assistTime) / float64(assistDuration*int64(procs))
- }
-
- if goexperiment.PacerRedesign {
- if c.heapLive <= c.trigger {
- // Shouldn't happen, but let's be very safe about this in case the
- // GC is somehow extremely short.
- //
- // In this case though, the only reasonable value for c.heapLive-c.trigger
- // would be 0, which isn't really all that useful, i.e. the GC was so short
- // that it didn't matter.
- //
- // Ignore this case and don't update anything.
- return 0
- }
- idleUtilization := 0.0
- if assistDuration > 0 {
- idleUtilization = float64(c.idleMarkTime) / float64(assistDuration*int64(procs))
- }
- // Determine the cons/mark ratio.
- //
- // The units we want for the numerator and denominator are both B / cpu-ns.
- // We get this by taking the bytes allocated or scanned, and divide by the amount of
- // CPU time it took for those operations. For allocations, that CPU time is
- //
- // assistDuration * procs * (1 - utilization)
- //
- // Where utilization includes just background GC workers and assists. It does *not*
- // include idle GC work time, because in theory the mutator is free to take that at
- // any point.
- //
- // For scanning, that CPU time is
- //
- // assistDuration * procs * (utilization + idleUtilization)
- //
- // In this case, we *include* idle utilization, because that is additional CPU time that the
- // the GC had available to it.
- //
- // In effect, idle GC time is sort of double-counted here, but it's very weird compared
- // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
- // *always* free to take it.
- //
- // So this calculation is really:
- // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
- // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)
- //
- // Note that because we only care about the ratio, assistDuration and procs cancel out.
- scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
- currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
- (float64(scanWork) * (1 - utilization))
-
- // Update cons/mark controller. The time period for this is 1 GC cycle.
- //
- // This use of a PI controller might seem strange. So, here's an explanation:
- //
- // currentConsMark represents the consMark we *should've* had to be perfectly
- // on-target for this cycle. Given that we assume the next GC will be like this
- // one in the steady-state, it stands to reason that we should just pick that
- // as our next consMark. In practice, however, currentConsMark is too noisy:
- // we're going to be wildly off-target in each GC cycle if we do that.
- //
- // What we do instead is make a long-term assumption: there is some steady-state
- // consMark value, but it's obscured by noise. By constantly shooting for this
- // noisy-but-perfect consMark value, the controller will bounce around a bit,
- // but its average behavior, in aggregate, should be less noisy and closer to
- // the true long-term consMark value, provided its tuned to be slightly overdamped.
- var ok bool
- oldConsMark := c.consMark
- c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
- if !ok {
- // The error spiraled out of control. This is incredibly unlikely seeing
- // as this controller is essentially just a smoothing function, but it might
- // mean that something went very wrong with how currentConsMark was calculated.
- // Just reset consMark and keep going.
- c.consMark = 0
- }
-
- if debug.gcpacertrace > 0 {
- printlock()
- goal := gcGoalUtilization * 100
- print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
- print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
- print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
- if !ok {
- print("[controller reset]")
- }
- println()
- printunlock()
- }
- return 0
- }
-
- // !goexperiment.PacerRedesign below.
-
- if userForced {
- // Forced GC means this cycle didn't start at the
- // trigger, so where it finished isn't good
- // information about how to adjust the trigger.
- // Just leave it where it is.
- return c.triggerRatio
- }
-
- // Proportional response gain for the trigger controller. Must
- // be in [0, 1]. Lower values smooth out transient effects but
- // take longer to respond to phase changes. Higher values
- // react to phase changes quickly, but are more affected by
- // transient changes. Values near 1 may be unstable.
- const triggerGain = 0.5
-
- // Compute next cycle trigger ratio. First, this computes the
- // "error" for this cycle; that is, how far off the trigger
- // was from what it should have been, accounting for both heap
- // growth and GC CPU utilization. We compute the actual heap
- // growth during this cycle and scale that by how far off from
- // the goal CPU utilization we were (to estimate the heap
- // growth if we had the desired CPU utilization). The
- // difference between this estimate and the GOGC-based goal
- // heap growth is the error.
- goalGrowthRatio := c.effectiveGrowthRatio()
- actualGrowthRatio := float64(c.heapLive)/float64(c.heapMarked) - 1
- triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
-
- // Finally, we adjust the trigger for next time by this error,
- // damped by the proportional gain.
- triggerRatio := c.triggerRatio + triggerGain*triggerError
-
- if debug.gcpacertrace > 0 {
- // Print controller state in terms of the design
- // document.
- H_m_prev := c.heapMarked
- h_t := c.triggerRatio
- H_T := c.trigger
- h_a := actualGrowthRatio
- H_a := c.heapLive
- h_g := goalGrowthRatio
- H_g := int64(float64(H_m_prev) * (1 + h_g))
- u_a := utilization
- u_g := gcGoalUtilization
- W_a := c.heapScanWork.Load()
- print("pacer: H_m_prev=", H_m_prev,
- " h_t=", h_t, " H_T=", H_T,
- " h_a=", h_a, " H_a=", H_a,
- " h_g=", h_g, " H_g=", H_g,
- " u_a=", u_a, " u_g=", u_g,
- " W_a=", W_a,
- " goalΔ=", goalGrowthRatio-h_t,
- " actualΔ=", h_a-h_t,
- " u_a/u_g=", u_a/u_g,
- "\n")
- }
-
- return triggerRatio
-}
-
-// enlistWorker encourages another dedicated mark worker to start on
-// another P if there are spare worker slots. It is used by putfull
-// when more work is made available.
-//
-//go:nowritebarrier
-func (c *gcControllerState) enlistWorker() {
- // If there are idle Ps, wake one so it will run an idle worker.
- // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
- //
- // if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
- // wakep()
- // return
- // }
-
- // There are no idle Ps. If we need more dedicated workers,
- // try to preempt a running P so it will switch to a worker.
- if c.dedicatedMarkWorkersNeeded <= 0 {
- return
- }
- // Pick a random other P to preempt.
- if gomaxprocs <= 1 {
- return
- }
- gp := getg()
- if gp == nil || gp.m == nil || gp.m.p == 0 {
- return
- }
- myID := gp.m.p.ptr().id
- for tries := 0; tries < 5; tries++ {
- id := int32(fastrandn(uint32(gomaxprocs - 1)))
- if id >= myID {
- id++
- }
- p := allp[id]
- if p.status != _Prunning {
- continue
- }
- if preemptone(p) {
- return
- }
- }
-}
-
-// findRunnableGCWorker returns a background mark worker for _p_ if it
-// should be run. This must only be called when gcBlackenEnabled != 0.
-func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
- if gcBlackenEnabled == 0 {
- throw("gcControllerState.findRunnable: blackening not enabled")
- }
-
- if !gcMarkWorkAvailable(_p_) {
- // No work to be done right now. This can happen at
- // the end of the mark phase when there are still
- // assists tapering off. Don't bother running a worker
- // now because it'll just return immediately.
- return nil
- }
-
- // Grab a worker before we commit to running below.
- node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
- if node == nil {
- // There is at least one worker per P, so normally there are
- // enough workers to run on all Ps, if necessary. However, once
- // a worker enters gcMarkDone it may park without rejoining the
- // pool, thus freeing a P with no corresponding worker.
- // gcMarkDone never depends on another worker doing work, so it
- // is safe to simply do nothing here.
- //
- // If gcMarkDone bails out without completing the mark phase,
- // it will always do so with queued global work. Thus, that P
- // will be immediately eligible to re-run the worker G it was
- // just using, ensuring work can complete.
- return nil
- }
-
- decIfPositive := func(ptr *int64) bool {
- for {
- v := atomic.Loadint64(ptr)
- if v <= 0 {
- return false
- }
-
- if atomic.Casint64(ptr, v, v-1) {
- return true
- }
- }
- }
-
- if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
- // This P is now dedicated to marking until the end of
- // the concurrent mark phase.
- _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
- } else if c.fractionalUtilizationGoal == 0 {
- // No need for fractional workers.
- gcBgMarkWorkerPool.push(&node.node)
- return nil
- } else {
- // Is this P behind on the fractional utilization
- // goal?
- //
- // This should be kept in sync with pollFractionalWorkerExit.
- delta := nanotime() - c.markStartTime
- if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
- // Nope. No need to run a fractional worker.
- gcBgMarkWorkerPool.push(&node.node)
- return nil
- }
- // Run a fractional worker.
- _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
- }
-
- // Run the background mark worker.
- gp := node.gp.ptr()
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- return gp
-}
-
-// resetLive sets up the controller state for the next mark phase after the end
-// of the previous one. Must be called after endCycle and before commit, before
-// the world is started.
-//
-// The world must be stopped.
-func (c *gcControllerState) resetLive(bytesMarked uint64) {
- c.heapMarked = bytesMarked
- c.heapLive = bytesMarked
- c.heapScan = uint64(c.heapScanWork.Load())
- c.lastHeapScan = uint64(c.heapScanWork.Load())
-
- // heapLive was updated, so emit a trace event.
- if trace.enabled {
- traceHeapAlloc()
- }
-}
-
-// logWorkTime updates mark work accounting in the controller by a duration of
-// work in nanoseconds.
-//
-// Safe to execute at any time.
-func (c *gcControllerState) logWorkTime(mode gcMarkWorkerMode, duration int64) {
- switch mode {
- case gcMarkWorkerDedicatedMode:
- atomic.Xaddint64(&c.dedicatedMarkTime, duration)
- atomic.Xaddint64(&c.dedicatedMarkWorkersNeeded, 1)
- case gcMarkWorkerFractionalMode:
- atomic.Xaddint64(&c.fractionalMarkTime, duration)
- case gcMarkWorkerIdleMode:
- atomic.Xaddint64(&c.idleMarkTime, duration)
- default:
- throw("logWorkTime: unknown mark worker mode")
- }
-}
-
-func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
- if dHeapLive != 0 {
- atomic.Xadd64(&gcController.heapLive, dHeapLive)
- if trace.enabled {
- // gcController.heapLive changed.
- traceHeapAlloc()
- }
- }
- // Only update heapScan in the new pacer redesign if we're not
- // currently in a GC.
- if !goexperiment.PacerRedesign || gcBlackenEnabled == 0 {
- if dHeapScan != 0 {
- atomic.Xadd64(&gcController.heapScan, dHeapScan)
- }
- }
- if gcBlackenEnabled != 0 {
- // gcController.heapLive and heapScan changed.
- c.revise()
- }
-}
-
-func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
- if pp == nil {
- atomic.Xadd64(&c.scannableStackSize, amount)
- return
- }
- pp.scannableStackSizeDelta += amount
- if pp.scannableStackSizeDelta >= scannableStackSizeSlack || pp.scannableStackSizeDelta <= -scannableStackSizeSlack {
- atomic.Xadd64(&c.scannableStackSize, pp.scannableStackSizeDelta)
- pp.scannableStackSizeDelta = 0
- }
-}
-
-func (c *gcControllerState) addGlobals(amount int64) {
- atomic.Xadd64(&c.globalsScan, amount)
-}
-
-// commit recomputes all pacing parameters from scratch, namely
-// absolute trigger, the heap goal, mark pacing, and sweep pacing.
-//
-// If goexperiment.PacerRedesign is true, triggerRatio is ignored.
-//
-// This can be called any time. If GC is the in the middle of a
-// concurrent phase, it will adjust the pacing of that phase.
-//
-// This depends on gcPercent, gcController.heapMarked, and
-// gcController.heapLive. These must be up to date.
-//
-// mheap_.lock must be held or the world must be stopped.
-func (c *gcControllerState) commit(triggerRatio float64) {
- if !c.test {
- assertWorldStoppedOrLockHeld(&mheap_.lock)
- }
-
- if !goexperiment.PacerRedesign {
- c.oldCommit(triggerRatio)
- return
- }
-
- // Compute the next GC goal, which is when the allocated heap
- // has grown by GOGC/100 over where it started the last cycle,
- // plus additional runway for non-heap sources of GC work.
- goal := ^uint64(0)
- if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
- goal = c.heapMarked + (c.heapMarked+atomic.Load64(&c.stackScan)+atomic.Load64(&c.globalsScan))*uint64(gcPercent)/100
- }
-
- // Don't trigger below the minimum heap size.
- minTrigger := c.heapMinimum
- if !isSweepDone() {
- // Concurrent sweep happens in the heap growth
- // from gcController.heapLive to trigger, so ensure
- // that concurrent sweep has some heap growth
- // in which to perform sweeping before we
- // start the next GC cycle.
- sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
- if sweepMin > minTrigger {
- minTrigger = sweepMin
- }
- }
-
- // If we let the trigger go too low, then if the application
- // is allocating very rapidly we might end up in a situation
- // where we're allocating black during a nearly always-on GC.
- // The result of this is a growing heap and ultimately an
- // increase in RSS. By capping us at a point >0, we're essentially
- // saying that we're OK using more CPU during the GC to prevent
- // this growth in RSS.
- //
- // The current constant was chosen empirically: given a sufficiently
- // fast/scalable allocator with 48 Ps that could drive the trigger ratio
- // to <0.05, this constant causes applications to retain the same peak
- // RSS compared to not having this allocator.
- if triggerBound := uint64(0.7*float64(goal-c.heapMarked)) + c.heapMarked; minTrigger < triggerBound {
- minTrigger = triggerBound
- }
-
- // For small heaps, set the max trigger point at 95% of the heap goal.
- // This ensures we always have *some* headroom when the GC actually starts.
- // For larger heaps, set the max trigger point at the goal, minus the
- // minimum heap size.
- // This choice follows from the fact that the minimum heap size is chosen
- // to reflect the costs of a GC with no work to do. With a large heap but
- // very little scan work to perform, this gives us exactly as much runway
- // as we would need, in the worst case.
- maxRunway := uint64(0.95 * float64(goal-c.heapMarked))
- if largeHeapMaxRunway := goal - c.heapMinimum; goal > c.heapMinimum && maxRunway < largeHeapMaxRunway {
- maxRunway = largeHeapMaxRunway
- }
- maxTrigger := maxRunway + c.heapMarked
- if maxTrigger < minTrigger {
- maxTrigger = minTrigger
- }
-
- // Compute the trigger by using our estimate of the cons/mark ratio.
- //
- // The idea is to take our expected scan work, and multiply it by
- // the cons/mark ratio to determine how long it'll take to complete
- // that scan work in terms of bytes allocated. This gives us our GC's
- // runway.
- //
- // However, the cons/mark ratio is a ratio of rates per CPU-second, but
- // here we care about the relative rates for some division of CPU
- // resources among the mutator and the GC.
- //
- // To summarize, we have B / cpu-ns, and we want B / ns. We get that
- // by multiplying by our desired division of CPU resources. We choose
- // to express CPU resources as GOMAPROCS*fraction. Note that because
- // we're working with a ratio here, we can omit the number of CPU cores,
- // because they'll appear in the numerator and denominator and cancel out.
- // As a result, this is basically just "weighing" the cons/mark ratio by
- // our desired division of resources.
- //
- // Furthermore, by setting the trigger so that CPU resources are divided
- // this way, assuming that the cons/mark ratio is correct, we make that
- // division a reality.
- var trigger uint64
- runway := uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.stackScan+c.globalsScan))
- if runway > goal {
- trigger = minTrigger
- } else {
- trigger = goal - runway
- }
- if trigger < minTrigger {
- trigger = minTrigger
- }
- if trigger > maxTrigger {
- trigger = maxTrigger
- }
- if trigger > goal {
- goal = trigger
- }
-
- // Commit to the trigger and goal.
- c.trigger = trigger
- atomic.Store64(&c.heapGoal, goal)
- if trace.enabled {
- traceHeapGoal()
- }
-
- // Update mark pacing.
- if gcphase != _GCoff {
- c.revise()
- }
-}
-
-// oldCommit sets the trigger ratio and updates everything
-// derived from it: the absolute trigger, the heap goal, mark pacing,
-// and sweep pacing.
-//
-// This can be called any time. If GC is the in the middle of a
-// concurrent phase, it will adjust the pacing of that phase.
-//
-// This depends on gcPercent, gcController.heapMarked, and
-// gcController.heapLive. These must be up to date.
-//
-// For !goexperiment.PacerRedesign.
-func (c *gcControllerState) oldCommit(triggerRatio float64) {
- gcPercent := c.gcPercent.Load()
-
- // Compute the next GC goal, which is when the allocated heap
- // has grown by GOGC/100 over the heap marked by the last
- // cycle.
- goal := ^uint64(0)
- if gcPercent >= 0 {
- goal = c.heapMarked + c.heapMarked*uint64(gcPercent)/100
- }
-
- // Set the trigger ratio, capped to reasonable bounds.
- if gcPercent >= 0 {
- scalingFactor := float64(gcPercent) / 100
- // Ensure there's always a little margin so that the
- // mutator assist ratio isn't infinity.
- maxTriggerRatio := 0.95 * scalingFactor
- if triggerRatio > maxTriggerRatio {
- triggerRatio = maxTriggerRatio
- }
-
- // If we let triggerRatio go too low, then if the application
- // is allocating very rapidly we might end up in a situation
- // where we're allocating black during a nearly always-on GC.
- // The result of this is a growing heap and ultimately an
- // increase in RSS. By capping us at a point >0, we're essentially
- // saying that we're OK using more CPU during the GC to prevent
- // this growth in RSS.
- //
- // The current constant was chosen empirically: given a sufficiently
- // fast/scalable allocator with 48 Ps that could drive the trigger ratio
- // to <0.05, this constant causes applications to retain the same peak
- // RSS compared to not having this allocator.
- minTriggerRatio := 0.6 * scalingFactor
- if triggerRatio < minTriggerRatio {
- triggerRatio = minTriggerRatio
- }
- } else if triggerRatio < 0 {
- // gcPercent < 0, so just make sure we're not getting a negative
- // triggerRatio. This case isn't expected to happen in practice,
- // and doesn't really matter because if gcPercent < 0 then we won't
- // ever consume triggerRatio further on in this function, but let's
- // just be defensive here; the triggerRatio being negative is almost
- // certainly undesirable.
- triggerRatio = 0
- }
- c.triggerRatio = triggerRatio
-
- // Compute the absolute GC trigger from the trigger ratio.
- //
- // We trigger the next GC cycle when the allocated heap has
- // grown by the trigger ratio over the marked heap size.
- trigger := ^uint64(0)
- if gcPercent >= 0 {
- trigger = uint64(float64(c.heapMarked) * (1 + triggerRatio))
- // Don't trigger below the minimum heap size.
- minTrigger := c.heapMinimum
- if !isSweepDone() {
- // Concurrent sweep happens in the heap growth
- // from gcController.heapLive to trigger, so ensure
- // that concurrent sweep has some heap growth
- // in which to perform sweeping before we
- // start the next GC cycle.
- sweepMin := atomic.Load64(&c.heapLive) + sweepMinHeapDistance
- if sweepMin > minTrigger {
- minTrigger = sweepMin
- }
- }
- if trigger < minTrigger {
- trigger = minTrigger
- }
- if int64(trigger) < 0 {
- print("runtime: heapGoal=", c.heapGoal, " heapMarked=", c.heapMarked, " gcController.heapLive=", c.heapLive, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
- throw("trigger underflow")
- }
- if trigger > goal {
- // The trigger ratio is always less than GOGC/100, but
- // other bounds on the trigger may have raised it.
- // Push up the goal, too.
- goal = trigger
- }
- }
-
- // Commit to the trigger and goal.
- c.trigger = trigger
- atomic.Store64(&c.heapGoal, goal)
- if trace.enabled {
- traceHeapGoal()
- }
-
- // Update mark pacing.
- if gcphase != _GCoff {
- c.revise()
- }
-}
-
-// effectiveGrowthRatio returns the current effective heap growth
-// ratio (GOGC/100) based on heapMarked from the previous GC and
-// heapGoal for the current GC.
-//
-// This may differ from gcPercent/100 because of various upper and
-// lower bounds on gcPercent. For example, if the heap is smaller than
-// heapMinimum, this can be higher than gcPercent/100.
-//
-// mheap_.lock must be held or the world must be stopped.
-func (c *gcControllerState) effectiveGrowthRatio() float64 {
- if !c.test {
- assertWorldStoppedOrLockHeld(&mheap_.lock)
- }
-
- egogc := float64(atomic.Load64(&c.heapGoal)-c.heapMarked) / float64(c.heapMarked)
- if egogc < 0 {
- // Shouldn't happen, but just in case.
- egogc = 0
- }
- return egogc
-}
-
-// setGCPercent updates gcPercent and all related pacer state.
-// Returns the old value of gcPercent.
-//
-// Calls gcControllerState.commit.
-//
-// The world must be stopped, or mheap_.lock must be held.
-func (c *gcControllerState) setGCPercent(in int32) int32 {
- if !c.test {
- assertWorldStoppedOrLockHeld(&mheap_.lock)
- }
-
- out := c.gcPercent.Load()
- if in < 0 {
- in = -1
- }
- c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
- c.gcPercent.Store(in)
- // Update pacing in response to gcPercent change.
- c.commit(c.triggerRatio)
-
- return out
-}
-
-//go:linkname setGCPercent runtime/debug.setGCPercent
-func setGCPercent(in int32) (out int32) {
- // Run on the system stack since we grab the heap lock.
- systemstack(func() {
- lock(&mheap_.lock)
- out = gcController.setGCPercent(in)
- gcPaceSweeper(gcController.trigger)
- gcPaceScavenger(gcController.heapGoal, gcController.lastHeapGoal)
- unlock(&mheap_.lock)
- })
-
- // If we just disabled GC, wait for any concurrent GC mark to
- // finish so we always return with no GC running.
- if in < 0 {
- gcWaitOnMark(atomic.Load(&work.cycles))
- }
-
- return out
-}
-
-func readGOGC() int32 {
- p := gogetenv("GOGC")
- if p == "off" {
- return -1
- }
- if n, ok := atoi32(p); ok {
- return n
- }
- return 100
-}
-
-type piController struct {
- kp float64 // Proportional constant.
- ti float64 // Integral time constant.
- tt float64 // Reset time.
-
- min, max float64 // Output boundaries.
-
- // PI controller state.
-
- errIntegral float64 // Integral of the error from t=0 to now.
-
- // Error flags.
- errOverflow bool // Set if errIntegral ever overflowed.
- inputOverflow bool // Set if an operation with the input overflowed.
-}
-
-// next provides a new sample to the controller.
-//
-// input is the sample, setpoint is the desired point, and period is how much
-// time (in whatever unit makes the most sense) has passed since the last sample.
-//
-// Returns a new value for the variable it's controlling, and whether the operation
-// completed successfully. One reason this might fail is if error has been growing
-// in an unbounded manner, to the point of overflow.
-//
-// In the specific case of an error overflow occurs, the errOverflow field will be
-// set and the rest of the controller's internal state will be fully reset.
-func (c *piController) next(input, setpoint, period float64) (float64, bool) {
- // Compute the raw output value.
- prop := c.kp * (setpoint - input)
- rawOutput := prop + c.errIntegral
-
- // Clamp rawOutput into output.
- output := rawOutput
- if isInf(output) || isNaN(output) {
- // The input had a large enough magnitude that either it was already
- // overflowed, or some operation with it overflowed.
- // Set a flag and reset. That's the safest thing to do.
- c.reset()
- c.inputOverflow = true
- return c.min, false
- }
- if output < c.min {
- output = c.min
- } else if output > c.max {
- output = c.max
- }
-
- // Update the controller's state.
- if c.ti != 0 && c.tt != 0 {
- c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
- if isInf(c.errIntegral) || isNaN(c.errIntegral) {
- // So much error has accumulated that we managed to overflow.
- // The assumptions around the controller have likely broken down.
- // Set a flag and reset. That's the safest thing to do.
- c.reset()
- c.errOverflow = true
- return c.min, false
- }
- }
- return output, true
-}
-
-// reset resets the controller state, except for controller error flags.
-func (c *piController) reset() {
- c.errIntegral = 0
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgcscavenge.go b/contrib/go/_std_1.18/src/runtime/mgcscavenge.go
deleted file mode 100644
index 5f50378adf..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgcscavenge.go
+++ /dev/null
@@ -1,1008 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Scavenging free pages.
-//
-// This file implements scavenging (the release of physical pages backing mapped
-// memory) of free and unused pages in the heap as a way to deal with page-level
-// fragmentation and reduce the RSS of Go applications.
-//
-// Scavenging in Go happens on two fronts: there's the background
-// (asynchronous) scavenger and the heap-growth (synchronous) scavenger.
-//
-// The former happens on a goroutine much like the background sweeper which is
-// soft-capped at using scavengePercent of the mutator's time, based on
-// order-of-magnitude estimates of the costs of scavenging. The background
-// scavenger's primary goal is to bring the estimated heap RSS of the
-// application down to a goal.
-//
-// That goal is defined as:
-// (retainExtraPercent+100) / 100 * (heapGoal / lastHeapGoal) * last_heap_inuse
-//
-// Essentially, we wish to have the application's RSS track the heap goal, but
-// the heap goal is defined in terms of bytes of objects, rather than pages like
-// RSS. As a result, we need to take into account for fragmentation internal to
-// spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal
-// and the last heap goal, which tells us by how much the heap is growing and
-// shrinking. We estimate what the heap will grow to in terms of pages by taking
-// this ratio and multiplying it by heap_inuse at the end of the last GC, which
-// allows us to account for this additional fragmentation. Note that this
-// procedure makes the assumption that the degree of fragmentation won't change
-// dramatically over the next GC cycle. Overestimating the amount of
-// fragmentation simply results in higher memory use, which will be accounted
-// for by the next pacing up date. Underestimating the fragmentation however
-// could lead to performance degradation. Handling this case is not within the
-// scope of the scavenger. Situations where the amount of fragmentation balloons
-// over the course of a single GC cycle should be considered pathologies,
-// flagged as bugs, and fixed appropriately.
-//
-// An additional factor of retainExtraPercent is added as a buffer to help ensure
-// that there's more unscavenged memory to allocate out of, since each allocation
-// out of scavenged memory incurs a potentially expensive page fault.
-//
-// The goal is updated after each GC and the scavenger's pacing parameters
-// (which live in mheap_) are updated to match. The pacing parameters work much
-// like the background sweeping parameters. The parameters define a line whose
-// horizontal axis is time and vertical axis is estimated heap RSS, and the
-// scavenger attempts to stay below that line at all times.
-//
-// The synchronous heap-growth scavenging happens whenever the heap grows in
-// size, for some definition of heap-growth. The intuition behind this is that
-// the application had to grow the heap because existing fragments were
-// not sufficiently large to satisfy a page-level memory allocation, so we
-// scavenge those fragments eagerly to offset the growth in RSS that results.
-
-package runtime
-
-import (
- "internal/goos"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const (
- // The background scavenger is paced according to these parameters.
- //
- // scavengePercent represents the portion of mutator time we're willing
- // to spend on scavenging in percent.
- scavengePercent = 1 // 1%
-
- // retainExtraPercent represents the amount of memory over the heap goal
- // that the scavenger should keep as a buffer space for the allocator.
- //
- // The purpose of maintaining this overhead is to have a greater pool of
- // unscavenged memory available for allocation (since using scavenged memory
- // incurs an additional cost), to account for heap fragmentation and
- // the ever-changing layout of the heap.
- retainExtraPercent = 10
-
- // maxPagesPerPhysPage is the maximum number of supported runtime pages per
- // physical page, based on maxPhysPageSize.
- maxPagesPerPhysPage = maxPhysPageSize / pageSize
-
- // scavengeCostRatio is the approximate ratio between the costs of using previously
- // scavenged memory and scavenging memory.
- //
- // For most systems the cost of scavenging greatly outweighs the costs
- // associated with using scavenged memory, making this constant 0. On other systems
- // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial.
- //
- // This ratio is used as part of multiplicative factor to help the scavenger account
- // for the additional costs of using scavenged memory in its pacing.
- scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos)
-
- // scavengeReservationShards determines the amount of memory the scavenger
- // should reserve for scavenging at a time. Specifically, the amount of
- // memory reserved is (heap size in bytes) / scavengeReservationShards.
- scavengeReservationShards = 64
-)
-
-// heapRetained returns an estimate of the current heap RSS.
-func heapRetained() uint64 {
- return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released)
-}
-
-// gcPaceScavenger updates the scavenger's pacing, particularly
-// its rate and RSS goal. For this, it requires the current heapGoal,
-// and the heapGoal for the previous GC cycle.
-//
-// The RSS goal is based on the current heap goal with a small overhead
-// to accommodate non-determinism in the allocator.
-//
-// The pacing is based on scavengePageRate, which applies to both regular and
-// huge pages. See that constant for more information.
-//
-// Must be called whenever GC pacing is updated.
-//
-// mheap_.lock must be held or the world must be stopped.
-func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
- assertWorldStoppedOrLockHeld(&mheap_.lock)
-
- // If we're called before the first GC completed, disable scavenging.
- // We never scavenge before the 2nd GC cycle anyway (we don't have enough
- // information about the heap yet) so this is fine, and avoids a fault
- // or garbage data later.
- if lastHeapGoal == 0 {
- atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
- return
- }
- // Compute our scavenging goal.
- goalRatio := float64(heapGoal) / float64(lastHeapGoal)
- retainedGoal := uint64(float64(memstats.last_heap_inuse) * goalRatio)
- // Add retainExtraPercent overhead to retainedGoal. This calculation
- // looks strange but the purpose is to arrive at an integer division
- // (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8)
- // that also avoids the overflow from a multiplication.
- retainedGoal += retainedGoal / (1.0 / (retainExtraPercent / 100.0))
- // Align it to a physical page boundary to make the following calculations
- // a bit more exact.
- retainedGoal = (retainedGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1)
-
- // Represents where we are now in the heap's contribution to RSS in bytes.
- //
- // Guaranteed to always be a multiple of physPageSize on systems where
- // physPageSize <= pageSize since we map heap_sys at a rate larger than
- // any physPageSize and released memory in multiples of the physPageSize.
- //
- // However, certain functions recategorize heap_sys as other stats (e.g.
- // stack_sys) and this happens in multiples of pageSize, so on systems
- // where physPageSize > pageSize the calculations below will not be exact.
- // Generally this is OK since we'll be off by at most one regular
- // physical page.
- retainedNow := heapRetained()
-
- // If we're already below our goal, or within one page of our goal, then disable
- // the background scavenger. We disable the background scavenger if there's
- // less than one physical page of work to do because it's not worth it.
- if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) {
- atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
- return
- }
- atomic.Store64(&mheap_.scavengeGoal, retainedGoal)
-}
-
-// Sleep/wait state of the background scavenger.
-var scavenge struct {
- lock mutex
- g *g
- parked bool
- timer *timer
- sysmonWake uint32 // Set atomically.
- printControllerReset bool // Whether the scavenger is in cooldown.
-}
-
-// readyForScavenger signals sysmon to wake the scavenger because
-// there may be new work to do.
-//
-// There may be a significant delay between when this function runs
-// and when the scavenger is kicked awake, but it may be safely invoked
-// in contexts where wakeScavenger is unsafe to call directly.
-func readyForScavenger() {
- atomic.Store(&scavenge.sysmonWake, 1)
-}
-
-// wakeScavenger immediately unparks the scavenger if necessary.
-//
-// May run without a P, but it may allocate, so it must not be called
-// on any allocation path.
-//
-// mheap_.lock, scavenge.lock, and sched.lock must not be held.
-func wakeScavenger() {
- lock(&scavenge.lock)
- if scavenge.parked {
- // Notify sysmon that it shouldn't bother waking up the scavenger.
- atomic.Store(&scavenge.sysmonWake, 0)
-
- // Try to stop the timer but we don't really care if we succeed.
- // It's possible that either a timer was never started, or that
- // we're racing with it.
- // In the case that we're racing with there's the low chance that
- // we experience a spurious wake-up of the scavenger, but that's
- // totally safe.
- stopTimer(scavenge.timer)
-
- // Unpark the goroutine and tell it that there may have been a pacing
- // change. Note that we skip the scheduler's runnext slot because we
- // want to avoid having the scavenger interfere with the fair
- // scheduling of user goroutines. In effect, this schedules the
- // scavenger at a "lower priority" but that's OK because it'll
- // catch up on the work it missed when it does get scheduled.
- scavenge.parked = false
-
- // Ready the goroutine by injecting it. We use injectglist instead
- // of ready or goready in order to allow us to run this function
- // without a P. injectglist also avoids placing the goroutine in
- // the current P's runnext slot, which is desirable to prevent
- // the scavenger from interfering with user goroutine scheduling
- // too much.
- var list gList
- list.push(scavenge.g)
- injectglist(&list)
- }
- unlock(&scavenge.lock)
-}
-
-// scavengeSleep attempts to put the scavenger to sleep for ns.
-//
-// Note that this function should only be called by the scavenger.
-//
-// The scavenger may be woken up earlier by a pacing change, and it may not go
-// to sleep at all if there's a pending pacing change.
-//
-// Returns the amount of time actually slept.
-func scavengeSleep(ns int64) int64 {
- lock(&scavenge.lock)
-
- // Set the timer.
- //
- // This must happen here instead of inside gopark
- // because we can't close over any variables without
- // failing escape analysis.
- start := nanotime()
- resetTimer(scavenge.timer, start+ns)
-
- // Mark ourself as asleep and go to sleep.
- scavenge.parked = true
- goparkunlock(&scavenge.lock, waitReasonSleep, traceEvGoSleep, 2)
-
- // Return how long we actually slept for.
- return nanotime() - start
-}
-
-// Background scavenger.
-//
-// The background scavenger maintains the RSS of the application below
-// the line described by the proportional scavenging statistics in
-// the mheap struct.
-func bgscavenge(c chan int) {
- scavenge.g = getg()
-
- lockInit(&scavenge.lock, lockRankScavenge)
- lock(&scavenge.lock)
- scavenge.parked = true
-
- scavenge.timer = new(timer)
- scavenge.timer.f = func(_ any, _ uintptr) {
- wakeScavenger()
- }
-
- c <- 1
- goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
-
- // idealFraction is the ideal % of overall application CPU time that we
- // spend scavenging.
- idealFraction := float64(scavengePercent) / 100.0
-
- // Input: fraction of CPU time used.
- // Setpoint: idealFraction.
- // Output: ratio of critical time to sleep time (determines sleep time).
- //
- // The output of this controller is somewhat indirect to what we actually
- // want to achieve: how much time to sleep for. The reason for this definition
- // is to ensure that the controller's outputs have a direct relationship with
- // its inputs (as opposed to an inverse relationship), making it somewhat
- // easier to reason about for tuning purposes.
- critSleepController := piController{
- // Tuned loosely via Ziegler-Nichols process.
- kp: 0.3375,
- ti: 3.2e6,
- tt: 1e9, // 1 second reset time.
-
- // These ranges seem wide, but we want to give the controller plenty of
- // room to hunt for the optimal value.
- min: 0.001, // 1:1000
- max: 1000.0, // 1000:1
- }
- // It doesn't really matter what value we start at, but we can't be zero, because
- // that'll cause divide-by-zero issues. Pick something conservative which we'll
- // also use as a fallback.
- const startingCritSleepRatio = 0.001
- critSleepRatio := startingCritSleepRatio
- // Duration left in nanoseconds during which we avoid using the controller and
- // we hold critSleepRatio at a conservative value. Used if the controller's
- // assumptions fail to hold.
- controllerCooldown := int64(0)
- for {
- released := uintptr(0)
- crit := float64(0)
-
- // Spend at least 1 ms scavenging, otherwise the corresponding
- // sleep time to maintain our desired utilization is too low to
- // be reliable.
- const minCritTime = 1e6
- for crit < minCritTime {
- // If background scavenging is disabled or if there's no work to do just park.
- retained, goal := heapRetained(), atomic.Load64(&mheap_.scavengeGoal)
- if retained <= goal {
- break
- }
-
- // scavengeQuantum is the amount of memory we try to scavenge
- // in one go. A smaller value means the scavenger is more responsive
- // to the scheduler in case of e.g. preemption. A larger value means
- // that the overheads of scavenging are better amortized, so better
- // scavenging throughput.
- //
- // The current value is chosen assuming a cost of ~10µs/physical page
- // (this is somewhat pessimistic), which implies a worst-case latency of
- // about 160µs for 4 KiB physical pages. The current value is biased
- // toward latency over throughput.
- const scavengeQuantum = 64 << 10
-
- // Accumulate the amount of time spent scavenging.
- start := nanotime()
- r := mheap_.pages.scavenge(scavengeQuantum)
- atomic.Xadduintptr(&mheap_.pages.scav.released, r)
- end := nanotime()
-
- // On some platforms we may see end >= start if the time it takes to scavenge
- // memory is less than the minimum granularity of its clock (e.g. Windows) or
- // due to clock bugs.
- //
- // In this case, just assume scavenging takes 10 µs per regular physical page
- // (determined empirically), and conservatively ignore the impact of huge pages
- // on timing.
- const approxCritNSPerPhysicalPage = 10e3
- if end <= start {
- crit += approxCritNSPerPhysicalPage * float64(r/physPageSize)
- } else {
- crit += float64(end - start)
- }
- released += r
-
- // When using fake time just do one loop.
- if faketime != 0 {
- break
- }
- }
-
- if released == 0 {
- lock(&scavenge.lock)
- scavenge.parked = true
- goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
- continue
- }
-
- if released < physPageSize {
- // If this happens, it means that we may have attempted to release part
- // of a physical page, but the likely effect of that is that it released
- // the whole physical page, some of which may have still been in-use.
- // This could lead to memory corruption. Throw.
- throw("released less than one physical page of memory")
- }
-
- if crit < minCritTime {
- // This means there wasn't enough work to actually fill up minCritTime.
- // That's fine; we shouldn't try to do anything with this information
- // because it's going result in a short enough sleep request that things
- // will get messy. Just assume we did at least this much work.
- // All this means is that we'll sleep longer than we otherwise would have.
- crit = minCritTime
- }
-
- // Multiply the critical time by 1 + the ratio of the costs of using
- // scavenged memory vs. scavenging memory. This forces us to pay down
- // the cost of reusing this memory eagerly by sleeping for a longer period
- // of time and scavenging less frequently. More concretely, we avoid situations
- // where we end up scavenging so often that we hurt allocation performance
- // because of the additional overheads of using scavenged memory.
- crit *= 1 + scavengeCostRatio
-
- // Go to sleep based on how much time we spent doing work.
- slept := scavengeSleep(int64(crit / critSleepRatio))
-
- // Stop here if we're cooling down from the controller.
- if controllerCooldown > 0 {
- // crit and slept aren't exact measures of time, but it's OK to be a bit
- // sloppy here. We're just hoping we're avoiding some transient bad behavior.
- t := slept + int64(crit)
- if t > controllerCooldown {
- controllerCooldown = 0
- } else {
- controllerCooldown -= t
- }
- continue
- }
-
- // Calculate the CPU time spent.
- //
- // This may be slightly inaccurate with respect to GOMAXPROCS, but we're
- // recomputing this often enough relative to GOMAXPROCS changes in general
- // (it only changes when the world is stopped, and not during a GC) that
- // that small inaccuracy is in the noise.
- cpuFraction := float64(crit) / ((float64(slept) + crit) * float64(gomaxprocs))
-
- // Update the critSleepRatio, adjusting until we reach our ideal fraction.
- var ok bool
- critSleepRatio, ok = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
- if !ok {
- // The core assumption of the controller, that we can get a proportional
- // response, broke down. This may be transient, so temporarily switch to
- // sleeping a fixed, conservative amount.
- critSleepRatio = startingCritSleepRatio
- controllerCooldown = 5e9 // 5 seconds.
-
- // Signal the scav trace printer to output this.
- lock(&scavenge.lock)
- scavenge.printControllerReset = true
- unlock(&scavenge.lock)
- }
- }
-}
-
-// scavenge scavenges nbytes worth of free pages, starting with the
-// highest address first. Successive calls continue from where it left
-// off until the heap is exhausted. Call scavengeStartGen to bring it
-// back to the top of the heap.
-//
-// Returns the amount of memory scavenged in bytes.
-func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
- var (
- addrs addrRange
- gen uint32
- )
- released := uintptr(0)
- for released < nbytes {
- if addrs.size() == 0 {
- if addrs, gen = p.scavengeReserve(); addrs.size() == 0 {
- break
- }
- }
- systemstack(func() {
- r, a := p.scavengeOne(addrs, nbytes-released)
- released += r
- addrs = a
- })
- }
- // Only unreserve the space which hasn't been scavenged or searched
- // to ensure we always make progress.
- p.scavengeUnreserve(addrs, gen)
- return released
-}
-
-// printScavTrace prints a scavenge trace line to standard error.
-//
-// released should be the amount of memory released since the last time this
-// was called, and forced indicates whether the scavenge was forced by the
-// application.
-//
-// scavenge.lock must be held.
-func printScavTrace(gen uint32, released uintptr, forced bool) {
- assertLockHeld(&scavenge.lock)
-
- printlock()
- print("scav ", gen, " ",
- released>>10, " KiB work, ",
- atomic.Load64(&memstats.heap_released)>>10, " KiB total, ",
- (atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util",
- )
- if forced {
- print(" (forced)")
- } else if scavenge.printControllerReset {
- print(" [controller reset]")
- scavenge.printControllerReset = false
- }
- println()
- printunlock()
-}
-
-// scavengeStartGen starts a new scavenge generation, resetting
-// the scavenger's search space to the full in-use address space.
-//
-// p.mheapLock must be held.
-//
-// Must run on the system stack because p.mheapLock must be held.
-//
-//go:systemstack
-func (p *pageAlloc) scavengeStartGen() {
- assertLockHeld(p.mheapLock)
-
- lock(&p.scav.lock)
- if debug.scavtrace > 0 {
- printScavTrace(p.scav.gen, atomic.Loaduintptr(&p.scav.released), false)
- }
- p.inUse.cloneInto(&p.scav.inUse)
-
- // Pick the new starting address for the scavenger cycle.
- var startAddr offAddr
- if p.scav.scavLWM.lessThan(p.scav.freeHWM) {
- // The "free" high watermark exceeds the "scavenged" low watermark,
- // so there are free scavengable pages in parts of the address space
- // that the scavenger already searched, the high watermark being the
- // highest one. Pick that as our new starting point to ensure we
- // see those pages.
- startAddr = p.scav.freeHWM
- } else {
- // The "free" high watermark does not exceed the "scavenged" low
- // watermark. This means the allocator didn't free any memory in
- // the range we scavenged last cycle, so we might as well continue
- // scavenging from where we were.
- startAddr = p.scav.scavLWM
- }
- p.scav.inUse.removeGreaterEqual(startAddr.addr())
-
- // reservationBytes may be zero if p.inUse.totalBytes is small, or if
- // scavengeReservationShards is large. This case is fine as the scavenger
- // will simply be turned off, but it does mean that scavengeReservationShards,
- // in concert with pallocChunkBytes, dictates the minimum heap size at which
- // the scavenger triggers. In practice this minimum is generally less than an
- // arena in size, so virtually every heap has the scavenger on.
- p.scav.reservationBytes = alignUp(p.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
- p.scav.gen++
- atomic.Storeuintptr(&p.scav.released, 0)
- p.scav.freeHWM = minOffAddr
- p.scav.scavLWM = maxOffAddr
- unlock(&p.scav.lock)
-}
-
-// scavengeReserve reserves a contiguous range of the address space
-// for scavenging. The maximum amount of space it reserves is proportional
-// to the size of the heap. The ranges are reserved from the high addresses
-// first.
-//
-// Returns the reserved range and the scavenge generation number for it.
-func (p *pageAlloc) scavengeReserve() (addrRange, uint32) {
- lock(&p.scav.lock)
- gen := p.scav.gen
-
- // Start by reserving the minimum.
- r := p.scav.inUse.removeLast(p.scav.reservationBytes)
-
- // Return early if the size is zero; we don't want to use
- // the bogus address below.
- if r.size() == 0 {
- unlock(&p.scav.lock)
- return r, gen
- }
-
- // The scavenger requires that base be aligned to a
- // palloc chunk because that's the unit of operation for
- // the scavenger, so align down, potentially extending
- // the range.
- newBase := alignDown(r.base.addr(), pallocChunkBytes)
-
- // Remove from inUse however much extra we just pulled out.
- p.scav.inUse.removeGreaterEqual(newBase)
- unlock(&p.scav.lock)
-
- r.base = offAddr{newBase}
- return r, gen
-}
-
-// scavengeUnreserve returns an unscavenged portion of a range that was
-// previously reserved with scavengeReserve.
-func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
- if r.size() == 0 {
- return
- }
- if r.base.addr()%pallocChunkBytes != 0 {
- throw("unreserving unaligned region")
- }
- lock(&p.scav.lock)
- if gen == p.scav.gen {
- p.scav.inUse.add(r)
- }
- unlock(&p.scav.lock)
-}
-
-// scavengeOne walks over address range work until it finds
-// a contiguous run of pages to scavenge. It will try to scavenge
-// at most max bytes at once, but may scavenge more to avoid
-// breaking huge pages. Once it scavenges some memory it returns
-// how much it scavenged in bytes.
-//
-// Returns the number of bytes scavenged and the part of work
-// which was not yet searched.
-//
-// work's base address must be aligned to pallocChunkBytes.
-//
-// Must run on the systemstack because it acquires p.mheapLock.
-//
-//go:systemstack
-func (p *pageAlloc) scavengeOne(work addrRange, max uintptr) (uintptr, addrRange) {
- // Defensively check if we've received an empty address range.
- // If so, just return.
- if work.size() == 0 {
- // Nothing to do.
- return 0, work
- }
- // Check the prerequisites of work.
- if work.base.addr()%pallocChunkBytes != 0 {
- throw("scavengeOne called with unaligned work region")
- }
- // Calculate the maximum number of pages to scavenge.
- //
- // This should be alignUp(max, pageSize) / pageSize but max can and will
- // be ^uintptr(0), so we need to be very careful not to overflow here.
- // Rather than use alignUp, calculate the number of pages rounded down
- // first, then add back one if necessary.
- maxPages := max / pageSize
- if max%pageSize != 0 {
- maxPages++
- }
-
- // Calculate the minimum number of pages we can scavenge.
- //
- // Because we can only scavenge whole physical pages, we must
- // ensure that we scavenge at least minPages each time, aligned
- // to minPages*pageSize.
- minPages := physPageSize / pageSize
- if minPages < 1 {
- minPages = 1
- }
-
- // Fast path: check the chunk containing the top-most address in work.
- if r, w := p.scavengeOneFast(work, minPages, maxPages); r != 0 {
- return r, w
- } else {
- work = w
- }
-
- // findCandidate finds the next scavenge candidate in work optimistically.
- //
- // Returns the candidate chunk index and true on success, and false on failure.
- //
- // The heap need not be locked.
- findCandidate := func(work addrRange) (chunkIdx, bool) {
- // Iterate over this work's chunks.
- for i := chunkIndex(work.limit.addr() - 1); i >= chunkIndex(work.base.addr()); i-- {
- // If this chunk is totally in-use or has no unscavenged pages, don't bother
- // doing a more sophisticated check.
- //
- // Note we're accessing the summary and the chunks without a lock, but
- // that's fine. We're being optimistic anyway.
-
- // Check quickly if there are enough free pages at all.
- if p.summary[len(p.summary)-1][i].max() < uint(minPages) {
- continue
- }
-
- // Run over the chunk looking harder for a candidate. Again, we could
- // race with a lot of different pieces of code, but we're just being
- // optimistic. Make sure we load the l2 pointer atomically though, to
- // avoid races with heap growth. It may or may not be possible to also
- // see a nil pointer in this case if we do race with heap growth, but
- // just defensively ignore the nils. This operation is optimistic anyway.
- l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&p.chunks[i.l1()])))
- if l2 != nil && l2[i.l2()].hasScavengeCandidate(minPages) {
- return i, true
- }
- }
- return 0, false
- }
-
- // Slow path: iterate optimistically over the in-use address space
- // looking for any free and unscavenged page. If we think we see something,
- // lock and verify it!
- for work.size() != 0 {
-
- // Search for the candidate.
- candidateChunkIdx, ok := findCandidate(work)
- if !ok {
- // We didn't find a candidate, so we're done.
- work.limit = work.base
- break
- }
-
- // Lock, so we can verify what we found.
- lock(p.mheapLock)
-
- // Find, verify, and scavenge if we can.
- chunk := p.chunkOf(candidateChunkIdx)
- base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
- if npages > 0 {
- work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)}
- unlock(p.mheapLock)
- return uintptr(npages) * pageSize, work
- }
- unlock(p.mheapLock)
-
- // We were fooled, so let's continue from where we left off.
- work.limit = offAddr{chunkBase(candidateChunkIdx)}
- }
- return 0, work
-}
-
-// scavengeOneFast is the fast path for scavengeOne, which just checks the top
-// chunk of work for some pages to scavenge.
-//
-// Must run on the system stack because it acquires the heap lock.
-//
-//go:systemstack
-func (p *pageAlloc) scavengeOneFast(work addrRange, minPages, maxPages uintptr) (uintptr, addrRange) {
- maxAddr := work.limit.addr() - 1
- maxChunk := chunkIndex(maxAddr)
-
- lock(p.mheapLock)
- if p.summary[len(p.summary)-1][maxChunk].max() >= uint(minPages) {
- // We only bother looking for a candidate if there at least
- // minPages free pages at all.
- base, npages := p.chunkOf(maxChunk).findScavengeCandidate(chunkPageIndex(maxAddr), minPages, maxPages)
-
- // If we found something, scavenge it and return!
- if npages != 0 {
- work.limit = offAddr{p.scavengeRangeLocked(maxChunk, base, npages)}
- unlock(p.mheapLock)
- return uintptr(npages) * pageSize, work
- }
- }
- unlock(p.mheapLock)
-
- // Update the limit to reflect the fact that we checked maxChunk already.
- work.limit = offAddr{chunkBase(maxChunk)}
- return 0, work
-}
-
-// scavengeRangeLocked scavenges the given region of memory.
-// The region of memory is described by its chunk index (ci),
-// the starting page index of the region relative to that
-// chunk (base), and the length of the region in pages (npages).
-//
-// Returns the base address of the scavenged region.
-//
-// p.mheapLock must be held. Unlocks p.mheapLock but reacquires
-// it before returning. Must be run on the systemstack as a result.
-//
-//go:systemstack
-func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
- assertLockHeld(p.mheapLock)
-
- // Compute the full address for the start of the range.
- addr := chunkBase(ci) + uintptr(base)*pageSize
-
- // Mark the range we're about to scavenge as allocated, because
- // we don't want any allocating goroutines to grab it while
- // the scavenging is in progress.
- if scav := p.allocRange(addr, uintptr(npages)); scav != 0 {
- throw("double scavenge")
- }
-
- // With that done, it's safe to unlock.
- unlock(p.mheapLock)
-
- // Update the scavenge low watermark.
- lock(&p.scav.lock)
- if oAddr := (offAddr{addr}); oAddr.lessThan(p.scav.scavLWM) {
- p.scav.scavLWM = oAddr
- }
- unlock(&p.scav.lock)
-
- if !p.test {
- // Only perform the actual scavenging if we're not in a test.
- // It's dangerous to do so otherwise.
- sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
-
- // Update global accounting only when not in test, otherwise
- // the runtime's accounting will be wrong.
- nbytes := int64(npages) * pageSize
- atomic.Xadd64(&memstats.heap_released, nbytes)
-
- // Update consistent accounting too.
- stats := memstats.heapStats.acquire()
- atomic.Xaddint64(&stats.committed, -nbytes)
- atomic.Xaddint64(&stats.released, nbytes)
- memstats.heapStats.release()
- }
-
- // Relock the heap, because now we need to make these pages
- // available allocation. Free them back to the page allocator.
- lock(p.mheapLock)
- p.free(addr, uintptr(npages), true)
-
- // Mark the range as scavenged.
- p.chunkOf(ci).scavenged.setRange(base, npages)
- return addr
-}
-
-// fillAligned returns x but with all zeroes in m-aligned
-// groups of m bits set to 1 if any bit in the group is non-zero.
-//
-// For example, fillAligned(0x0100a3, 8) == 0xff00ff.
-//
-// Note that if m == 1, this is a no-op.
-//
-// m must be a power of 2 <= maxPagesPerPhysPage.
-func fillAligned(x uint64, m uint) uint64 {
- apply := func(x uint64, c uint64) uint64 {
- // The technique used it here is derived from
- // https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
- // and extended for more than just bytes (like nibbles
- // and uint16s) by using an appropriate constant.
- //
- // To summarize the technique, quoting from that page:
- // "[It] works by first zeroing the high bits of the [8]
- // bytes in the word. Subsequently, it adds a number that
- // will result in an overflow to the high bit of a byte if
- // any of the low bits were initially set. Next the high
- // bits of the original word are ORed with these values;
- // thus, the high bit of a byte is set iff any bit in the
- // byte was set. Finally, we determine if any of these high
- // bits are zero by ORing with ones everywhere except the
- // high bits and inverting the result."
- return ^((((x & c) + c) | x) | c)
- }
- // Transform x to contain a 1 bit at the top of each m-aligned
- // group of m zero bits.
- switch m {
- case 1:
- return x
- case 2:
- x = apply(x, 0x5555555555555555)
- case 4:
- x = apply(x, 0x7777777777777777)
- case 8:
- x = apply(x, 0x7f7f7f7f7f7f7f7f)
- case 16:
- x = apply(x, 0x7fff7fff7fff7fff)
- case 32:
- x = apply(x, 0x7fffffff7fffffff)
- case 64: // == maxPagesPerPhysPage
- x = apply(x, 0x7fffffffffffffff)
- default:
- throw("bad m value")
- }
- // Now, the top bit of each m-aligned group in x is set
- // that group was all zero in the original x.
-
- // From each group of m bits subtract 1.
- // Because we know only the top bits of each
- // m-aligned group are set, we know this will
- // set each group to have all the bits set except
- // the top bit, so just OR with the original
- // result to set all the bits.
- return ^((x - (x >> (m - 1))) | x)
-}
-
-// hasScavengeCandidate returns true if there's any min-page-aligned groups of
-// min pages of free-and-unscavenged memory in the region represented by this
-// pallocData.
-//
-// min must be a non-zero power of 2 <= maxPagesPerPhysPage.
-func (m *pallocData) hasScavengeCandidate(min uintptr) bool {
- if min&(min-1) != 0 || min == 0 {
- print("runtime: min = ", min, "\n")
- throw("min must be a non-zero power of 2")
- } else if min > maxPagesPerPhysPage {
- print("runtime: min = ", min, "\n")
- throw("min too large")
- }
-
- // The goal of this search is to see if the chunk contains any free and unscavenged memory.
- for i := len(m.scavenged) - 1; i >= 0; i-- {
- // 1s are scavenged OR non-free => 0s are unscavenged AND free
- //
- // TODO(mknyszek): Consider splitting up fillAligned into two
- // functions, since here we technically could get by with just
- // the first half of its computation. It'll save a few instructions
- // but adds some additional code complexity.
- x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
-
- // Quickly skip over chunks of non-free or scavenged pages.
- if x != ^uint64(0) {
- return true
- }
- }
- return false
-}
-
-// findScavengeCandidate returns a start index and a size for this pallocData
-// segment which represents a contiguous region of free and unscavenged memory.
-//
-// searchIdx indicates the page index within this chunk to start the search, but
-// note that findScavengeCandidate searches backwards through the pallocData. As a
-// a result, it will return the highest scavenge candidate in address order.
-//
-// min indicates a hard minimum size and alignment for runs of pages. That is,
-// findScavengeCandidate will not return a region smaller than min pages in size,
-// or that is min pages or greater in size but not aligned to min. min must be
-// a non-zero power of 2 <= maxPagesPerPhysPage.
-//
-// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
-// findScavengeCandidate effectively returns entire free and unscavenged regions.
-// If max < pallocChunkPages, it may truncate the returned region such that size is
-// max. However, findScavengeCandidate may still return a larger region if, for
-// example, it chooses to preserve huge pages, or if max is not aligned to min (it
-// will round up). That is, even if max is small, the returned size is not guaranteed
-// to be equal to max. max is allowed to be less than min, in which case it is as if
-// max == min.
-func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
- if min&(min-1) != 0 || min == 0 {
- print("runtime: min = ", min, "\n")
- throw("min must be a non-zero power of 2")
- } else if min > maxPagesPerPhysPage {
- print("runtime: min = ", min, "\n")
- throw("min too large")
- }
- // max may not be min-aligned, so we might accidentally truncate to
- // a max value which causes us to return a non-min-aligned value.
- // To prevent this, align max up to a multiple of min (which is always
- // a power of 2). This also prevents max from ever being less than
- // min, unless it's zero, so handle that explicitly.
- if max == 0 {
- max = min
- } else {
- max = alignUp(max, min)
- }
-
- i := int(searchIdx / 64)
- // Start by quickly skipping over blocks of non-free or scavenged pages.
- for ; i >= 0; i-- {
- // 1s are scavenged OR non-free => 0s are unscavenged AND free
- x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
- if x != ^uint64(0) {
- break
- }
- }
- if i < 0 {
- // Failed to find any free/unscavenged pages.
- return 0, 0
- }
- // We have something in the 64-bit chunk at i, but it could
- // extend further. Loop until we find the extent of it.
-
- // 1s are scavenged OR non-free => 0s are unscavenged AND free
- x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
- z1 := uint(sys.LeadingZeros64(^x))
- run, end := uint(0), uint(i)*64+(64-z1)
- if x<<z1 != 0 {
- // After shifting out z1 bits, we still have 1s,
- // so the run ends inside this word.
- run = uint(sys.LeadingZeros64(x << z1))
- } else {
- // After shifting out z1 bits, we have no more 1s.
- // This means the run extends to the bottom of the
- // word so it may extend into further words.
- run = 64 - z1
- for j := i - 1; j >= 0; j-- {
- x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min))
- run += uint(sys.LeadingZeros64(x))
- if x != 0 {
- // The run stopped in this word.
- break
- }
- }
- }
-
- // Split the run we found if it's larger than max but hold on to
- // our original length, since we may need it later.
- size := run
- if size > uint(max) {
- size = uint(max)
- }
- start := end - size
-
- // Each huge page is guaranteed to fit in a single palloc chunk.
- //
- // TODO(mknyszek): Support larger huge page sizes.
- // TODO(mknyszek): Consider taking pages-per-huge-page as a parameter
- // so we can write tests for this.
- if physHugePageSize > pageSize && physHugePageSize > physPageSize {
- // We have huge pages, so let's ensure we don't break one by scavenging
- // over a huge page boundary. If the range [start, start+size) overlaps with
- // a free-and-unscavenged huge page, we want to grow the region we scavenge
- // to include that huge page.
-
- // Compute the huge page boundary above our candidate.
- pagesPerHugePage := uintptr(physHugePageSize / pageSize)
- hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
-
- // If that boundary is within our current candidate, then we may be breaking
- // a huge page.
- if hugePageAbove <= end {
- // Compute the huge page boundary below our candidate.
- hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage))
-
- if hugePageBelow >= end-run {
- // We're in danger of breaking apart a huge page since start+size crosses
- // a huge page boundary and rounding down start to the nearest huge
- // page boundary is included in the full run we found. Include the entire
- // huge page in the bound by rounding down to the huge page size.
- size = size + (start - hugePageBelow)
- start = hugePageBelow
- }
- }
- }
- return start, size
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgcstack.go b/contrib/go/_std_1.18/src/runtime/mgcstack.go
deleted file mode 100644
index 49dc54e165..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgcstack.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector: stack objects and stack tracing
-// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing
-// Also see issue 22350.
-
-// Stack tracing solves the problem of determining which parts of the
-// stack are live and should be scanned. It runs as part of scanning
-// a single goroutine stack.
-//
-// Normally determining which parts of the stack are live is easy to
-// do statically, as user code has explicit references (reads and
-// writes) to stack variables. The compiler can do a simple dataflow
-// analysis to determine liveness of stack variables at every point in
-// the code. See cmd/compile/internal/gc/plive.go for that analysis.
-//
-// However, when we take the address of a stack variable, determining
-// whether that variable is still live is less clear. We can still
-// look for static accesses, but accesses through a pointer to the
-// variable are difficult in general to track statically. That pointer
-// can be passed among functions on the stack, conditionally retained,
-// etc.
-//
-// Instead, we will track pointers to stack variables dynamically.
-// All pointers to stack-allocated variables will themselves be on the
-// stack somewhere (or in associated locations, like defer records), so
-// we can find them all efficiently.
-//
-// Stack tracing is organized as a mini garbage collection tracing
-// pass. The objects in this garbage collection are all the variables
-// on the stack whose address is taken, and which themselves contain a
-// pointer. We call these variables "stack objects".
-//
-// We begin by determining all the stack objects on the stack and all
-// the statically live pointers that may point into the stack. We then
-// process each pointer to see if it points to a stack object. If it
-// does, we scan that stack object. It may contain pointers into the
-// heap, in which case those pointers are passed to the main garbage
-// collection. It may also contain pointers into the stack, in which
-// case we add them to our set of stack pointers.
-//
-// Once we're done processing all the pointers (including the ones we
-// added during processing), we've found all the stack objects that
-// are live. Any dead stack objects are not scanned and their contents
-// will not keep heap objects live. Unlike the main garbage
-// collection, we can't sweep the dead stack objects; they live on in
-// a moribund state until the stack frame that contains them is
-// popped.
-//
-// A stack can look like this:
-//
-// +----------+
-// | foo() |
-// | +------+ |
-// | | A | | <---\
-// | +------+ | |
-// | | |
-// | +------+ | |
-// | | B | | |
-// | +------+ | |
-// | | |
-// +----------+ |
-// | bar() | |
-// | +------+ | |
-// | | C | | <-\ |
-// | +----|-+ | | |
-// | | | | |
-// | +----v-+ | | |
-// | | D ---------/
-// | +------+ | |
-// | | |
-// +----------+ |
-// | baz() | |
-// | +------+ | |
-// | | E -------/
-// | +------+ |
-// | ^ |
-// | F: --/ |
-// | |
-// +----------+
-//
-// foo() calls bar() calls baz(). Each has a frame on the stack.
-// foo() has stack objects A and B.
-// bar() has stack objects C and D, with C pointing to D and D pointing to A.
-// baz() has a stack object E pointing to C, and a local variable F pointing to E.
-//
-// Starting from the pointer in local variable F, we will eventually
-// scan all of E, C, D, and A (in that order). B is never scanned
-// because there is no live pointer to it. If B is also statically
-// dead (meaning that foo() never accesses B again after it calls
-// bar()), then B's pointers into the heap are not considered live.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-const stackTraceDebug = false
-
-// Buffer for pointers found during stack tracing.
-// Must be smaller than or equal to workbuf.
-//
-//go:notinheap
-type stackWorkBuf struct {
- stackWorkBufHdr
- obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
-}
-
-// Header declaration must come after the buf declaration above, because of issue #14620.
-//
-//go:notinheap
-type stackWorkBufHdr struct {
- workbufhdr
- next *stackWorkBuf // linked list of workbufs
- // Note: we could theoretically repurpose lfnode.next as this next pointer.
- // It would save 1 word, but that probably isn't worth busting open
- // the lfnode API.
-}
-
-// Buffer for stack objects found on a goroutine stack.
-// Must be smaller than or equal to workbuf.
-//
-//go:notinheap
-type stackObjectBuf struct {
- stackObjectBufHdr
- obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
-}
-
-//go:notinheap
-type stackObjectBufHdr struct {
- workbufhdr
- next *stackObjectBuf
-}
-
-func init() {
- if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
- panic("stackWorkBuf too big")
- }
- if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
- panic("stackObjectBuf too big")
- }
-}
-
-// A stackObject represents a variable on the stack that has had
-// its address taken.
-//
-//go:notinheap
-type stackObject struct {
- off uint32 // offset above stack.lo
- size uint32 // size of object
- r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned.
- left *stackObject // objects with lower addresses
- right *stackObject // objects with higher addresses
-}
-
-// obj.r = r, but with no write barrier.
-//go:nowritebarrier
-func (obj *stackObject) setRecord(r *stackObjectRecord) {
- // Types of stack objects are always in read-only memory, not the heap.
- // So not using a write barrier is ok.
- *(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r))
-}
-
-// A stackScanState keeps track of the state used during the GC walk
-// of a goroutine.
-type stackScanState struct {
- cache pcvalueCache
-
- // stack limits
- stack stack
-
- // conservative indicates that the next frame must be scanned conservatively.
- // This applies only to the innermost frame at an async safe-point.
- conservative bool
-
- // buf contains the set of possible pointers to stack objects.
- // Organized as a LIFO linked list of buffers.
- // All buffers except possibly the head buffer are full.
- buf *stackWorkBuf
- freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis
-
- // cbuf contains conservative pointers to stack objects. If
- // all pointers to a stack object are obtained via
- // conservative scanning, then the stack object may be dead
- // and may contain dead pointers, so it must be scanned
- // defensively.
- cbuf *stackWorkBuf
-
- // list of stack objects
- // Objects are in increasing address order.
- head *stackObjectBuf
- tail *stackObjectBuf
- nobjs int
-
- // root of binary tree for fast object lookup by address
- // Initialized by buildIndex.
- root *stackObject
-}
-
-// Add p as a potential pointer to a stack object.
-// p must be a stack address.
-func (s *stackScanState) putPtr(p uintptr, conservative bool) {
- if p < s.stack.lo || p >= s.stack.hi {
- throw("address not a stack address")
- }
- head := &s.buf
- if conservative {
- head = &s.cbuf
- }
- buf := *head
- if buf == nil {
- // Initial setup.
- buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
- buf.nobj = 0
- buf.next = nil
- *head = buf
- } else if buf.nobj == len(buf.obj) {
- if s.freeBuf != nil {
- buf = s.freeBuf
- s.freeBuf = nil
- } else {
- buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
- }
- buf.nobj = 0
- buf.next = *head
- *head = buf
- }
- buf.obj[buf.nobj] = p
- buf.nobj++
-}
-
-// Remove and return a potential pointer to a stack object.
-// Returns 0 if there are no more pointers available.
-//
-// This prefers non-conservative pointers so we scan stack objects
-// precisely if there are any non-conservative pointers to them.
-func (s *stackScanState) getPtr() (p uintptr, conservative bool) {
- for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} {
- buf := *head
- if buf == nil {
- // Never had any data.
- continue
- }
- if buf.nobj == 0 {
- if s.freeBuf != nil {
- // Free old freeBuf.
- putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
- }
- // Move buf to the freeBuf.
- s.freeBuf = buf
- buf = buf.next
- *head = buf
- if buf == nil {
- // No more data in this list.
- continue
- }
- }
- buf.nobj--
- return buf.obj[buf.nobj], head == &s.cbuf
- }
- // No more data in either list.
- if s.freeBuf != nil {
- putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
- s.freeBuf = nil
- }
- return 0, false
-}
-
-// addObject adds a stack object at addr of type typ to the set of stack objects.
-func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) {
- x := s.tail
- if x == nil {
- // initial setup
- x = (*stackObjectBuf)(unsafe.Pointer(getempty()))
- x.next = nil
- s.head = x
- s.tail = x
- }
- if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size {
- throw("objects added out of order or overlapping")
- }
- if x.nobj == len(x.obj) {
- // full buffer - allocate a new buffer, add to end of linked list
- y := (*stackObjectBuf)(unsafe.Pointer(getempty()))
- y.next = nil
- x.next = y
- s.tail = y
- x = y
- }
- obj := &x.obj[x.nobj]
- x.nobj++
- obj.off = uint32(addr - s.stack.lo)
- obj.size = uint32(r.size)
- obj.setRecord(r)
- // obj.left and obj.right will be initialized by buildIndex before use.
- s.nobjs++
-}
-
-// buildIndex initializes s.root to a binary search tree.
-// It should be called after all addObject calls but before
-// any call of findObject.
-func (s *stackScanState) buildIndex() {
- s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs)
-}
-
-// Build a binary search tree with the n objects in the list
-// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ...
-// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx].
-// (The first object that was not included in the binary search tree.)
-// If n == 0, returns nil, x.
-func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) {
- if n == 0 {
- return nil, x, idx
- }
- var left, right *stackObject
- left, x, idx = binarySearchTree(x, idx, n/2)
- root = &x.obj[idx]
- idx++
- if idx == len(x.obj) {
- x = x.next
- idx = 0
- }
- right, x, idx = binarySearchTree(x, idx, n-n/2-1)
- root.left = left
- root.right = right
- return root, x, idx
-}
-
-// findObject returns the stack object containing address a, if any.
-// Must have called buildIndex previously.
-func (s *stackScanState) findObject(a uintptr) *stackObject {
- off := uint32(a - s.stack.lo)
- obj := s.root
- for {
- if obj == nil {
- return nil
- }
- if off < obj.off {
- obj = obj.left
- continue
- }
- if off >= obj.off+obj.size {
- obj = obj.right
- continue
- }
- return obj
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgcsweep.go b/contrib/go/_std_1.18/src/runtime/mgcsweep.go
deleted file mode 100644
index 0d58f8e0b5..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgcsweep.go
+++ /dev/null
@@ -1,878 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector: sweeping
-
-// The sweeper consists of two different algorithms:
-//
-// * The object reclaimer finds and frees unmarked slots in spans. It
-// can free a whole span if none of the objects are marked, but that
-// isn't its goal. This can be driven either synchronously by
-// mcentral.cacheSpan for mcentral spans, or asynchronously by
-// sweepone, which looks at all the mcentral lists.
-//
-// * The span reclaimer looks for spans that contain no marked objects
-// and frees whole spans. This is a separate algorithm because
-// freeing whole spans is the hardest task for the object reclaimer,
-// but is critical when allocating new spans. The entry point for
-// this is mheap_.reclaim and it's driven by a sequential scan of
-// the page marks bitmap in the heap arenas.
-//
-// Both algorithms ultimately call mspan.sweep, which sweeps a single
-// heap span.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-var sweep sweepdata
-
-// State of background sweep.
-type sweepdata struct {
- lock mutex
- g *g
- parked bool
- started bool
-
- nbgsweep uint32
- npausesweep uint32
-
- // active tracks outstanding sweepers and the sweep
- // termination condition.
- active activeSweep
-
- // centralIndex is the current unswept span class.
- // It represents an index into the mcentral span
- // sets. Accessed and updated via its load and
- // update methods. Not protected by a lock.
- //
- // Reset at mark termination.
- // Used by mheap.nextSpanForSweep.
- centralIndex sweepClass
-}
-
-// sweepClass is a spanClass and one bit to represent whether we're currently
-// sweeping partial or full spans.
-type sweepClass uint32
-
-const (
- numSweepClasses = numSpanClasses * 2
- sweepClassDone sweepClass = sweepClass(^uint32(0))
-)
-
-func (s *sweepClass) load() sweepClass {
- return sweepClass(atomic.Load((*uint32)(s)))
-}
-
-func (s *sweepClass) update(sNew sweepClass) {
- // Only update *s if its current value is less than sNew,
- // since *s increases monotonically.
- sOld := s.load()
- for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
- sOld = s.load()
- }
- // TODO(mknyszek): This isn't the only place we have
- // an atomic monotonically increasing counter. It would
- // be nice to have an "atomic max" which is just implemented
- // as the above on most architectures. Some architectures
- // like RISC-V however have native support for an atomic max.
-}
-
-func (s *sweepClass) clear() {
- atomic.Store((*uint32)(s), 0)
-}
-
-// split returns the underlying span class as well as
-// whether we're interested in the full or partial
-// unswept lists for that class, indicated as a boolean
-// (true means "full").
-func (s sweepClass) split() (spc spanClass, full bool) {
- return spanClass(s >> 1), s&1 == 0
-}
-
-// nextSpanForSweep finds and pops the next span for sweeping from the
-// central sweep buffers. It returns ownership of the span to the caller.
-// Returns nil if no such span exists.
-func (h *mheap) nextSpanForSweep() *mspan {
- sg := h.sweepgen
- for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
- spc, full := sc.split()
- c := &h.central[spc].mcentral
- var s *mspan
- if full {
- s = c.fullUnswept(sg).pop()
- } else {
- s = c.partialUnswept(sg).pop()
- }
- if s != nil {
- // Write down that we found something so future sweepers
- // can start from here.
- sweep.centralIndex.update(sc)
- return s
- }
- }
- // Write down that we found nothing.
- sweep.centralIndex.update(sweepClassDone)
- return nil
-}
-
-const sweepDrainedMask = 1 << 31
-
-// activeSweep is a type that captures whether sweeping
-// is done, and whether there are any outstanding sweepers.
-//
-// Every potential sweeper must call begin() before they look
-// for work, and end() after they've finished sweeping.
-type activeSweep struct {
- // state is divided into two parts.
- //
- // The top bit (masked by sweepDrainedMask) is a boolean
- // value indicating whether all the sweep work has been
- // drained from the queue.
- //
- // The rest of the bits are a counter, indicating the
- // number of outstanding concurrent sweepers.
- state atomic.Uint32
-}
-
-// begin registers a new sweeper. Returns a sweepLocker
-// for acquiring spans for sweeping. Any outstanding sweeper blocks
-// sweep termination.
-//
-// If the sweepLocker is invalid, the caller can be sure that all
-// outstanding sweep work has been drained, so there is nothing left
-// to sweep. Note that there may be sweepers currently running, so
-// this does not indicate that all sweeping has completed.
-//
-// Even if the sweepLocker is invalid, its sweepGen is always valid.
-func (a *activeSweep) begin() sweepLocker {
- for {
- state := a.state.Load()
- if state&sweepDrainedMask != 0 {
- return sweepLocker{mheap_.sweepgen, false}
- }
- if a.state.CompareAndSwap(state, state+1) {
- return sweepLocker{mheap_.sweepgen, true}
- }
- }
-}
-
-// end deregisters a sweeper. Must be called once for each time
-// begin is called if the sweepLocker is valid.
-func (a *activeSweep) end(sl sweepLocker) {
- if sl.sweepGen != mheap_.sweepgen {
- throw("sweeper left outstanding across sweep generations")
- }
- for {
- state := a.state.Load()
- if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
- throw("mismatched begin/end of activeSweep")
- }
- if a.state.CompareAndSwap(state, state-1) {
- if state != sweepDrainedMask {
- return
- }
- if debug.gcpacertrace > 0 {
- print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
- }
- return
- }
- }
-}
-
-// markDrained marks the active sweep cycle as having drained
-// all remaining work. This is safe to be called concurrently
-// with all other methods of activeSweep, though may race.
-//
-// Returns true if this call was the one that actually performed
-// the mark.
-func (a *activeSweep) markDrained() bool {
- for {
- state := a.state.Load()
- if state&sweepDrainedMask != 0 {
- return false
- }
- if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
- return true
- }
- }
-}
-
-// sweepers returns the current number of active sweepers.
-func (a *activeSweep) sweepers() uint32 {
- return a.state.Load() &^ sweepDrainedMask
-}
-
-// isDone returns true if all sweep work has been drained and no more
-// outstanding sweepers exist. That is, when the sweep phase is
-// completely done.
-func (a *activeSweep) isDone() bool {
- return a.state.Load() == sweepDrainedMask
-}
-
-// reset sets up the activeSweep for the next sweep cycle.
-//
-// The world must be stopped.
-func (a *activeSweep) reset() {
- assertWorldStopped()
- a.state.Store(0)
-}
-
-// finishsweep_m ensures that all spans are swept.
-//
-// The world must be stopped. This ensures there are no sweeps in
-// progress.
-//
-//go:nowritebarrier
-func finishsweep_m() {
- assertWorldStopped()
-
- // Sweeping must be complete before marking commences, so
- // sweep any unswept spans. If this is a concurrent GC, there
- // shouldn't be any spans left to sweep, so this should finish
- // instantly. If GC was forced before the concurrent sweep
- // finished, there may be spans to sweep.
- for sweepone() != ^uintptr(0) {
- sweep.npausesweep++
- }
-
- // Make sure there aren't any outstanding sweepers left.
- // At this point, with the world stopped, it means one of two
- // things. Either we were able to preempt a sweeper, or that
- // a sweeper didn't call sweep.active.end when it should have.
- // Both cases indicate a bug, so throw.
- if sweep.active.sweepers() != 0 {
- throw("active sweepers found at start of mark phase")
- }
-
- // Reset all the unswept buffers, which should be empty.
- // Do this in sweep termination as opposed to mark termination
- // so that we can catch unswept spans and reclaim blocks as
- // soon as possible.
- sg := mheap_.sweepgen
- for i := range mheap_.central {
- c := &mheap_.central[i].mcentral
- c.partialUnswept(sg).reset()
- c.fullUnswept(sg).reset()
- }
-
- // Sweeping is done, so if the scavenger isn't already awake,
- // wake it up. There's definitely work for it to do at this
- // point.
- wakeScavenger()
-
- nextMarkBitArenaEpoch()
-}
-
-func bgsweep(c chan int) {
- sweep.g = getg()
-
- lockInit(&sweep.lock, lockRankSweep)
- lock(&sweep.lock)
- sweep.parked = true
- c <- 1
- goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
-
- for {
- for sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
- Gosched()
- }
- for freeSomeWbufs(true) {
- Gosched()
- }
- lock(&sweep.lock)
- if !isSweepDone() {
- // This can happen if a GC runs between
- // gosweepone returning ^0 above
- // and the lock being acquired.
- unlock(&sweep.lock)
- continue
- }
- sweep.parked = true
- goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
- }
-}
-
-// sweepLocker acquires sweep ownership of spans.
-type sweepLocker struct {
- // sweepGen is the sweep generation of the heap.
- sweepGen uint32
- valid bool
-}
-
-// sweepLocked represents sweep ownership of a span.
-type sweepLocked struct {
- *mspan
-}
-
-// tryAcquire attempts to acquire sweep ownership of span s. If it
-// successfully acquires ownership, it blocks sweep completion.
-func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
- if !l.valid {
- throw("use of invalid sweepLocker")
- }
- // Check before attempting to CAS.
- if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
- return sweepLocked{}, false
- }
- // Attempt to acquire sweep ownership of s.
- if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
- return sweepLocked{}, false
- }
- return sweepLocked{s}, true
-}
-
-// sweepone sweeps some unswept heap span and returns the number of pages returned
-// to the heap, or ^uintptr(0) if there was nothing to sweep.
-func sweepone() uintptr {
- gp := getg()
-
- // Increment locks to ensure that the goroutine is not preempted
- // in the middle of sweep thus leaving the span in an inconsistent state for next GC
- gp.m.locks++
-
- // TODO(austin): sweepone is almost always called in a loop;
- // lift the sweepLocker into its callers.
- sl := sweep.active.begin()
- if !sl.valid {
- gp.m.locks--
- return ^uintptr(0)
- }
-
- // Find a span to sweep.
- npages := ^uintptr(0)
- var noMoreWork bool
- for {
- s := mheap_.nextSpanForSweep()
- if s == nil {
- noMoreWork = sweep.active.markDrained()
- break
- }
- if state := s.state.get(); state != mSpanInUse {
- // This can happen if direct sweeping already
- // swept this span, but in that case the sweep
- // generation should always be up-to-date.
- if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
- print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
- throw("non in-use span in unswept list")
- }
- continue
- }
- if s, ok := sl.tryAcquire(s); ok {
- // Sweep the span we found.
- npages = s.npages
- if s.sweep(false) {
- // Whole span was freed. Count it toward the
- // page reclaimer credit since these pages can
- // now be used for span allocation.
- mheap_.reclaimCredit.Add(npages)
- } else {
- // Span is still in-use, so this returned no
- // pages to the heap and the span needs to
- // move to the swept in-use list.
- npages = 0
- }
- break
- }
- }
- sweep.active.end(sl)
-
- if noMoreWork {
- // The sweep list is empty. There may still be
- // concurrent sweeps running, but we're at least very
- // close to done sweeping.
-
- // Move the scavenge gen forward (signalling
- // that there's new work to do) and wake the scavenger.
- //
- // The scavenger is signaled by the last sweeper because once
- // sweeping is done, we will definitely have useful work for
- // the scavenger to do, since the scavenger only runs over the
- // heap once per GC cycle. This update is not done during sweep
- // termination because in some cases there may be a long delay
- // between sweep done and sweep termination (e.g. not enough
- // allocations to trigger a GC) which would be nice to fill in
- // with scavenging work.
- systemstack(func() {
- lock(&mheap_.lock)
- mheap_.pages.scavengeStartGen()
- unlock(&mheap_.lock)
- })
- // Since we might sweep in an allocation path, it's not possible
- // for us to wake the scavenger directly via wakeScavenger, since
- // it could allocate. Ask sysmon to do it for us instead.
- readyForScavenger()
- }
-
- gp.m.locks--
- return npages
-}
-
-// isSweepDone reports whether all spans are swept.
-//
-// Note that this condition may transition from false to true at any
-// time as the sweeper runs. It may transition from true to false if a
-// GC runs; to prevent that the caller must be non-preemptible or must
-// somehow block GC progress.
-func isSweepDone() bool {
- return sweep.active.isDone()
-}
-
-// Returns only when span s has been swept.
-//go:nowritebarrier
-func (s *mspan) ensureSwept() {
- // Caller must disable preemption.
- // Otherwise when this function returns the span can become unswept again
- // (if GC is triggered on another goroutine).
- _g_ := getg()
- if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
- throw("mspan.ensureSwept: m is not locked")
- }
-
- // If this operation fails, then that means that there are
- // no more spans to be swept. In this case, either s has already
- // been swept, or is about to be acquired for sweeping and swept.
- sl := sweep.active.begin()
- if sl.valid {
- // The caller must be sure that the span is a mSpanInUse span.
- if s, ok := sl.tryAcquire(s); ok {
- s.sweep(false)
- sweep.active.end(sl)
- return
- }
- sweep.active.end(sl)
- }
-
- // Unfortunately we can't sweep the span ourselves. Somebody else
- // got to it first. We don't have efficient means to wait, but that's
- // OK, it will be swept fairly soon.
- for {
- spangen := atomic.Load(&s.sweepgen)
- if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
- break
- }
- osyield()
- }
-}
-
-// Sweep frees or collects finalizers for blocks not marked in the mark phase.
-// It clears the mark bits in preparation for the next GC round.
-// Returns true if the span was returned to heap.
-// If preserve=true, don't return it to heap nor relink in mcentral lists;
-// caller takes care of it.
-func (sl *sweepLocked) sweep(preserve bool) bool {
- // It's critical that we enter this function with preemption disabled,
- // GC must not start while we are in the middle of this function.
- _g_ := getg()
- if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
- throw("mspan.sweep: m is not locked")
- }
-
- s := sl.mspan
- if !preserve {
- // We'll release ownership of this span. Nil it out to
- // prevent the caller from accidentally using it.
- sl.mspan = nil
- }
-
- sweepgen := mheap_.sweepgen
- if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
- print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
- throw("mspan.sweep: bad span state")
- }
-
- if trace.enabled {
- traceGCSweepSpan(s.npages * _PageSize)
- }
-
- mheap_.pagesSwept.Add(int64(s.npages))
-
- spc := s.spanclass
- size := s.elemsize
-
- // The allocBits indicate which unmarked objects don't need to be
- // processed since they were free at the end of the last GC cycle
- // and were not allocated since then.
- // If the allocBits index is >= s.freeindex and the bit
- // is not marked then the object remains unallocated
- // since the last GC.
- // This situation is analogous to being on a freelist.
-
- // Unlink & free special records for any objects we're about to free.
- // Two complications here:
- // 1. An object can have both finalizer and profile special records.
- // In such case we need to queue finalizer for execution,
- // mark the object as live and preserve the profile special.
- // 2. A tiny object can have several finalizers setup for different offsets.
- // If such object is not marked, we need to queue all finalizers at once.
- // Both 1 and 2 are possible at the same time.
- hadSpecials := s.specials != nil
- siter := newSpecialsIter(s)
- for siter.valid() {
- // A finalizer can be set for an inner byte of an object, find object beginning.
- objIndex := uintptr(siter.s.offset) / size
- p := s.base() + objIndex*size
- mbits := s.markBitsForIndex(objIndex)
- if !mbits.isMarked() {
- // This object is not marked and has at least one special record.
- // Pass 1: see if it has at least one finalizer.
- hasFin := false
- endOffset := p - s.base() + size
- for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
- if tmp.kind == _KindSpecialFinalizer {
- // Stop freeing of object if it has a finalizer.
- mbits.setMarkedNonAtomic()
- hasFin = true
- break
- }
- }
- // Pass 2: queue all finalizers _or_ handle profile record.
- for siter.valid() && uintptr(siter.s.offset) < endOffset {
- // Find the exact byte for which the special was setup
- // (as opposed to object beginning).
- special := siter.s
- p := s.base() + uintptr(special.offset)
- if special.kind == _KindSpecialFinalizer || !hasFin {
- siter.unlinkAndNext()
- freeSpecial(special, unsafe.Pointer(p), size)
- } else {
- // The object has finalizers, so we're keeping it alive.
- // All other specials only apply when an object is freed,
- // so just keep the special record.
- siter.next()
- }
- }
- } else {
- // object is still live
- if siter.s.kind == _KindSpecialReachable {
- special := siter.unlinkAndNext()
- (*specialReachable)(unsafe.Pointer(special)).reachable = true
- freeSpecial(special, unsafe.Pointer(p), size)
- } else {
- // keep special record
- siter.next()
- }
- }
- }
- if hadSpecials && s.specials == nil {
- spanHasNoSpecials(s)
- }
-
- if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
- // Find all newly freed objects. This doesn't have to
- // efficient; allocfreetrace has massive overhead.
- mbits := s.markBitsForBase()
- abits := s.allocBitsForIndex(0)
- for i := uintptr(0); i < s.nelems; i++ {
- if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
- x := s.base() + i*s.elemsize
- if debug.allocfreetrace != 0 {
- tracefree(unsafe.Pointer(x), size)
- }
- if debug.clobberfree != 0 {
- clobberfree(unsafe.Pointer(x), size)
- }
- if raceenabled {
- racefree(unsafe.Pointer(x), size)
- }
- if msanenabled {
- msanfree(unsafe.Pointer(x), size)
- }
- if asanenabled {
- asanpoison(unsafe.Pointer(x), size)
- }
- }
- mbits.advance()
- abits.advance()
- }
- }
-
- // Check for zombie objects.
- if s.freeindex < s.nelems {
- // Everything < freeindex is allocated and hence
- // cannot be zombies.
- //
- // Check the first bitmap byte, where we have to be
- // careful with freeindex.
- obj := s.freeindex
- if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
- s.reportZombies()
- }
- // Check remaining bytes.
- for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
- if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
- s.reportZombies()
- }
- }
- }
-
- // Count the number of free objects in this span.
- nalloc := uint16(s.countAlloc())
- nfreed := s.allocCount - nalloc
- if nalloc > s.allocCount {
- // The zombie check above should have caught this in
- // more detail.
- print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
- throw("sweep increased allocation count")
- }
-
- s.allocCount = nalloc
- s.freeindex = 0 // reset allocation index to start of span.
- if trace.enabled {
- getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
- }
-
- // gcmarkBits becomes the allocBits.
- // get a fresh cleared gcmarkBits in preparation for next GC
- s.allocBits = s.gcmarkBits
- s.gcmarkBits = newMarkBits(s.nelems)
-
- // Initialize alloc bits cache.
- s.refillAllocCache(0)
-
- // The span must be in our exclusive ownership until we update sweepgen,
- // check for potential races.
- if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
- print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
- throw("mspan.sweep: bad span state after sweep")
- }
- if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
- throw("swept cached span")
- }
-
- // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
- // because of the potential for a concurrent free/SetFinalizer.
- //
- // But we need to set it before we make the span available for allocation
- // (return it to heap or mcentral), because allocation code assumes that a
- // span is already swept if available for allocation.
- //
- // Serialization point.
- // At this point the mark bits are cleared and allocation ready
- // to go so release the span.
- atomic.Store(&s.sweepgen, sweepgen)
-
- if spc.sizeclass() != 0 {
- // Handle spans for small objects.
- if nfreed > 0 {
- // Only mark the span as needing zeroing if we've freed any
- // objects, because a fresh span that had been allocated into,
- // wasn't totally filled, but then swept, still has all of its
- // free slots zeroed.
- s.needzero = 1
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
- memstats.heapStats.release()
- }
- if !preserve {
- // The caller may not have removed this span from whatever
- // unswept set its on but taken ownership of the span for
- // sweeping by updating sweepgen. If this span still is in
- // an unswept set, then the mcentral will pop it off the
- // set, check its sweepgen, and ignore it.
- if nalloc == 0 {
- // Free totally free span directly back to the heap.
- mheap_.freeSpan(s)
- return true
- }
- // Return span back to the right mcentral list.
- if uintptr(nalloc) == s.nelems {
- mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
- } else {
- mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
- }
- }
- } else if !preserve {
- // Handle spans for large objects.
- if nfreed != 0 {
- // Free large object span to heap.
-
- // NOTE(rsc,dvyukov): The original implementation of efence
- // in CL 22060046 used sysFree instead of sysFault, so that
- // the operating system would eventually give the memory
- // back to us again, so that an efence program could run
- // longer without running out of memory. Unfortunately,
- // calling sysFree here without any kind of adjustment of the
- // heap data structures means that when the memory does
- // come back to us, we have the wrong metadata for it, either in
- // the mspan structures or in the garbage collection bitmap.
- // Using sysFault here means that the program will run out of
- // memory fairly quickly in efence mode, but at least it won't
- // have mysterious crashes due to confused memory reuse.
- // It should be possible to switch back to sysFree if we also
- // implement and then call some kind of mheap.deleteSpan.
- if debug.efence > 0 {
- s.limit = 0 // prevent mlookup from finding this span
- sysFault(unsafe.Pointer(s.base()), size)
- } else {
- mheap_.freeSpan(s)
- }
- stats := memstats.heapStats.acquire()
- atomic.Xadd64(&stats.largeFreeCount, 1)
- atomic.Xadd64(&stats.largeFree, int64(size))
- memstats.heapStats.release()
- return true
- }
-
- // Add a large span directly onto the full+swept list.
- mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
- }
- return false
-}
-
-// reportZombies reports any marked but free objects in s and throws.
-//
-// This generally means one of the following:
-//
-// 1. User code converted a pointer to a uintptr and then back
-// unsafely, and a GC ran while the uintptr was the only reference to
-// an object.
-//
-// 2. User code (or a compiler bug) constructed a bad pointer that
-// points to a free slot, often a past-the-end pointer.
-//
-// 3. The GC two cycles ago missed a pointer and freed a live object,
-// but it was still live in the last cycle, so this GC cycle found a
-// pointer to that object and marked it.
-func (s *mspan) reportZombies() {
- printlock()
- print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
- mbits := s.markBitsForBase()
- abits := s.allocBitsForIndex(0)
- for i := uintptr(0); i < s.nelems; i++ {
- addr := s.base() + i*s.elemsize
- print(hex(addr))
- alloc := i < s.freeindex || abits.isMarked()
- if alloc {
- print(" alloc")
- } else {
- print(" free ")
- }
- if mbits.isMarked() {
- print(" marked ")
- } else {
- print(" unmarked")
- }
- zombie := mbits.isMarked() && !alloc
- if zombie {
- print(" zombie")
- }
- print("\n")
- if zombie {
- length := s.elemsize
- if length > 1024 {
- length = 1024
- }
- hexdumpWords(addr, addr+length, nil)
- }
- mbits.advance()
- abits.advance()
- }
- throw("found pointer to free object")
-}
-
-// deductSweepCredit deducts sweep credit for allocating a span of
-// size spanBytes. This must be performed *before* the span is
-// allocated to ensure the system has enough credit. If necessary, it
-// performs sweeping to prevent going in to debt. If the caller will
-// also sweep pages (e.g., for a large allocation), it can pass a
-// non-zero callerSweepPages to leave that many pages unswept.
-//
-// deductSweepCredit makes a worst-case assumption that all spanBytes
-// bytes of the ultimately allocated span will be available for object
-// allocation.
-//
-// deductSweepCredit is the core of the "proportional sweep" system.
-// It uses statistics gathered by the garbage collector to perform
-// enough sweeping so that all pages are swept during the concurrent
-// sweep phase between GC cycles.
-//
-// mheap_ must NOT be locked.
-func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
- if mheap_.sweepPagesPerByte == 0 {
- // Proportional sweep is done or disabled.
- return
- }
-
- if trace.enabled {
- traceGCSweepStart()
- }
-
-retry:
- sweptBasis := mheap_.pagesSweptBasis.Load()
-
- // Fix debt if necessary.
- newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
- pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
- for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
- if sweepone() == ^uintptr(0) {
- mheap_.sweepPagesPerByte = 0
- break
- }
- if mheap_.pagesSweptBasis.Load() != sweptBasis {
- // Sweep pacing changed. Recompute debt.
- goto retry
- }
- }
-
- if trace.enabled {
- traceGCSweepDone()
- }
-}
-
-// clobberfree sets the memory content at x to bad content, for debugging
-// purposes.
-func clobberfree(x unsafe.Pointer, size uintptr) {
- // size (span.elemsize) is always a multiple of 4.
- for i := uintptr(0); i < size; i += 4 {
- *(*uint32)(add(x, i)) = 0xdeadbeef
- }
-}
-
-// gcPaceSweeper updates the sweeper's pacing parameters.
-//
-// Must be called whenever the GC's pacing is updated.
-//
-// The world must be stopped, or mheap_.lock must be held.
-func gcPaceSweeper(trigger uint64) {
- assertWorldStoppedOrLockHeld(&mheap_.lock)
-
- // Update sweep pacing.
- if isSweepDone() {
- mheap_.sweepPagesPerByte = 0
- } else {
- // Concurrent sweep needs to sweep all of the in-use
- // pages by the time the allocated heap reaches the GC
- // trigger. Compute the ratio of in-use pages to sweep
- // per byte allocated, accounting for the fact that
- // some might already be swept.
- heapLiveBasis := atomic.Load64(&gcController.heapLive)
- heapDistance := int64(trigger) - int64(heapLiveBasis)
- // Add a little margin so rounding errors and
- // concurrent sweep are less likely to leave pages
- // unswept when GC starts.
- heapDistance -= 1024 * 1024
- if heapDistance < _PageSize {
- // Avoid setting the sweep ratio extremely high
- heapDistance = _PageSize
- }
- pagesSwept := mheap_.pagesSwept.Load()
- pagesInUse := mheap_.pagesInUse.Load()
- sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
- if sweepDistancePages <= 0 {
- mheap_.sweepPagesPerByte = 0
- } else {
- mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
- mheap_.sweepHeapLiveBasis = heapLiveBasis
- // Write pagesSweptBasis last, since this
- // signals concurrent sweeps to recompute
- // their debt.
- mheap_.pagesSweptBasis.Store(pagesSwept)
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mgcwork.go b/contrib/go/_std_1.18/src/runtime/mgcwork.go
deleted file mode 100644
index 9c3f7fd223..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mgcwork.go
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-const (
- _WorkbufSize = 2048 // in bytes; larger values result in less contention
-
- // workbufAlloc is the number of bytes to allocate at a time
- // for new workbufs. This must be a multiple of pageSize and
- // should be a multiple of _WorkbufSize.
- //
- // Larger values reduce workbuf allocation overhead. Smaller
- // values reduce heap fragmentation.
- workbufAlloc = 32 << 10
-)
-
-func init() {
- if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
- throw("bad workbufAlloc")
- }
-}
-
-// Garbage collector work pool abstraction.
-//
-// This implements a producer/consumer model for pointers to grey
-// objects. A grey object is one that is marked and on a work
-// queue. A black object is marked and not on a work queue.
-//
-// Write barriers, root discovery, stack scanning, and object scanning
-// produce pointers to grey objects. Scanning consumes pointers to
-// grey objects, thus blackening them, and then scans them,
-// potentially producing new pointers to grey objects.
-
-// A gcWork provides the interface to produce and consume work for the
-// garbage collector.
-//
-// A gcWork can be used on the stack as follows:
-//
-// (preemption must be disabled)
-// gcw := &getg().m.p.ptr().gcw
-// .. call gcw.put() to produce and gcw.tryGet() to consume ..
-//
-// It's important that any use of gcWork during the mark phase prevent
-// the garbage collector from transitioning to mark termination since
-// gcWork may locally hold GC work buffers. This can be done by
-// disabling preemption (systemstack or acquirem).
-type gcWork struct {
- // wbuf1 and wbuf2 are the primary and secondary work buffers.
- //
- // This can be thought of as a stack of both work buffers'
- // pointers concatenated. When we pop the last pointer, we
- // shift the stack up by one work buffer by bringing in a new
- // full buffer and discarding an empty one. When we fill both
- // buffers, we shift the stack down by one work buffer by
- // bringing in a new empty buffer and discarding a full one.
- // This way we have one buffer's worth of hysteresis, which
- // amortizes the cost of getting or putting a work buffer over
- // at least one buffer of work and reduces contention on the
- // global work lists.
- //
- // wbuf1 is always the buffer we're currently pushing to and
- // popping from and wbuf2 is the buffer that will be discarded
- // next.
- //
- // Invariant: Both wbuf1 and wbuf2 are nil or neither are.
- wbuf1, wbuf2 *workbuf
-
- // Bytes marked (blackened) on this gcWork. This is aggregated
- // into work.bytesMarked by dispose.
- bytesMarked uint64
-
- // Heap scan work performed on this gcWork. This is aggregated into
- // gcController by dispose and may also be flushed by callers.
- // Other types of scan work are flushed immediately.
- heapScanWork int64
-
- // flushedWork indicates that a non-empty work buffer was
- // flushed to the global work list since the last gcMarkDone
- // termination check. Specifically, this indicates that this
- // gcWork may have communicated work to another gcWork.
- flushedWork bool
-}
-
-// Most of the methods of gcWork are go:nowritebarrierrec because the
-// write barrier itself can invoke gcWork methods but the methods are
-// not generally re-entrant. Hence, if a gcWork method invoked the
-// write barrier while the gcWork was in an inconsistent state, and
-// the write barrier in turn invoked a gcWork method, it could
-// permanently corrupt the gcWork.
-
-func (w *gcWork) init() {
- w.wbuf1 = getempty()
- wbuf2 := trygetfull()
- if wbuf2 == nil {
- wbuf2 = getempty()
- }
- w.wbuf2 = wbuf2
-}
-
-// put enqueues a pointer for the garbage collector to trace.
-// obj must point to the beginning of a heap object or an oblet.
-//go:nowritebarrierrec
-func (w *gcWork) put(obj uintptr) {
- flushed := false
- wbuf := w.wbuf1
- // Record that this may acquire the wbufSpans or heap lock to
- // allocate a workbuf.
- lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
- lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
- if wbuf == nil {
- w.init()
- wbuf = w.wbuf1
- // wbuf is empty at this point.
- } else if wbuf.nobj == len(wbuf.obj) {
- w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
- wbuf = w.wbuf1
- if wbuf.nobj == len(wbuf.obj) {
- putfull(wbuf)
- w.flushedWork = true
- wbuf = getempty()
- w.wbuf1 = wbuf
- flushed = true
- }
- }
-
- wbuf.obj[wbuf.nobj] = obj
- wbuf.nobj++
-
- // If we put a buffer on full, let the GC controller know so
- // it can encourage more workers to run. We delay this until
- // the end of put so that w is in a consistent state, since
- // enlistWorker may itself manipulate w.
- if flushed && gcphase == _GCmark {
- gcController.enlistWorker()
- }
-}
-
-// putFast does a put and reports whether it can be done quickly
-// otherwise it returns false and the caller needs to call put.
-//go:nowritebarrierrec
-func (w *gcWork) putFast(obj uintptr) bool {
- wbuf := w.wbuf1
- if wbuf == nil {
- return false
- } else if wbuf.nobj == len(wbuf.obj) {
- return false
- }
-
- wbuf.obj[wbuf.nobj] = obj
- wbuf.nobj++
- return true
-}
-
-// putBatch performs a put on every pointer in obj. See put for
-// constraints on these pointers.
-//
-//go:nowritebarrierrec
-func (w *gcWork) putBatch(obj []uintptr) {
- if len(obj) == 0 {
- return
- }
-
- flushed := false
- wbuf := w.wbuf1
- if wbuf == nil {
- w.init()
- wbuf = w.wbuf1
- }
-
- for len(obj) > 0 {
- for wbuf.nobj == len(wbuf.obj) {
- putfull(wbuf)
- w.flushedWork = true
- w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
- wbuf = w.wbuf1
- flushed = true
- }
- n := copy(wbuf.obj[wbuf.nobj:], obj)
- wbuf.nobj += n
- obj = obj[n:]
- }
-
- if flushed && gcphase == _GCmark {
- gcController.enlistWorker()
- }
-}
-
-// tryGet dequeues a pointer for the garbage collector to trace.
-//
-// If there are no pointers remaining in this gcWork or in the global
-// queue, tryGet returns 0. Note that there may still be pointers in
-// other gcWork instances or other caches.
-//go:nowritebarrierrec
-func (w *gcWork) tryGet() uintptr {
- wbuf := w.wbuf1
- if wbuf == nil {
- w.init()
- wbuf = w.wbuf1
- // wbuf is empty at this point.
- }
- if wbuf.nobj == 0 {
- w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
- wbuf = w.wbuf1
- if wbuf.nobj == 0 {
- owbuf := wbuf
- wbuf = trygetfull()
- if wbuf == nil {
- return 0
- }
- putempty(owbuf)
- w.wbuf1 = wbuf
- }
- }
-
- wbuf.nobj--
- return wbuf.obj[wbuf.nobj]
-}
-
-// tryGetFast dequeues a pointer for the garbage collector to trace
-// if one is readily available. Otherwise it returns 0 and
-// the caller is expected to call tryGet().
-//go:nowritebarrierrec
-func (w *gcWork) tryGetFast() uintptr {
- wbuf := w.wbuf1
- if wbuf == nil {
- return 0
- }
- if wbuf.nobj == 0 {
- return 0
- }
-
- wbuf.nobj--
- return wbuf.obj[wbuf.nobj]
-}
-
-// dispose returns any cached pointers to the global queue.
-// The buffers are being put on the full queue so that the
-// write barriers will not simply reacquire them before the
-// GC can inspect them. This helps reduce the mutator's
-// ability to hide pointers during the concurrent mark phase.
-//
-//go:nowritebarrierrec
-func (w *gcWork) dispose() {
- if wbuf := w.wbuf1; wbuf != nil {
- if wbuf.nobj == 0 {
- putempty(wbuf)
- } else {
- putfull(wbuf)
- w.flushedWork = true
- }
- w.wbuf1 = nil
-
- wbuf = w.wbuf2
- if wbuf.nobj == 0 {
- putempty(wbuf)
- } else {
- putfull(wbuf)
- w.flushedWork = true
- }
- w.wbuf2 = nil
- }
- if w.bytesMarked != 0 {
- // dispose happens relatively infrequently. If this
- // atomic becomes a problem, we should first try to
- // dispose less and if necessary aggregate in a per-P
- // counter.
- atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
- w.bytesMarked = 0
- }
- if w.heapScanWork != 0 {
- gcController.heapScanWork.Add(w.heapScanWork)
- w.heapScanWork = 0
- }
-}
-
-// balance moves some work that's cached in this gcWork back on the
-// global queue.
-//go:nowritebarrierrec
-func (w *gcWork) balance() {
- if w.wbuf1 == nil {
- return
- }
- if wbuf := w.wbuf2; wbuf.nobj != 0 {
- putfull(wbuf)
- w.flushedWork = true
- w.wbuf2 = getempty()
- } else if wbuf := w.wbuf1; wbuf.nobj > 4 {
- w.wbuf1 = handoff(wbuf)
- w.flushedWork = true // handoff did putfull
- } else {
- return
- }
- // We flushed a buffer to the full list, so wake a worker.
- if gcphase == _GCmark {
- gcController.enlistWorker()
- }
-}
-
-// empty reports whether w has no mark work available.
-//go:nowritebarrierrec
-func (w *gcWork) empty() bool {
- return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
-}
-
-// Internally, the GC work pool is kept in arrays in work buffers.
-// The gcWork interface caches a work buffer until full (or empty) to
-// avoid contending on the global work buffer lists.
-
-type workbufhdr struct {
- node lfnode // must be first
- nobj int
-}
-
-//go:notinheap
-type workbuf struct {
- workbufhdr
- // account for the above fields
- obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
-}
-
-// workbuf factory routines. These funcs are used to manage the
-// workbufs.
-// If the GC asks for some work these are the only routines that
-// make wbufs available to the GC.
-
-func (b *workbuf) checknonempty() {
- if b.nobj == 0 {
- throw("workbuf is empty")
- }
-}
-
-func (b *workbuf) checkempty() {
- if b.nobj != 0 {
- throw("workbuf is not empty")
- }
-}
-
-// getempty pops an empty work buffer off the work.empty list,
-// allocating new buffers if none are available.
-//go:nowritebarrier
-func getempty() *workbuf {
- var b *workbuf
- if work.empty != 0 {
- b = (*workbuf)(work.empty.pop())
- if b != nil {
- b.checkempty()
- }
- }
- // Record that this may acquire the wbufSpans or heap lock to
- // allocate a workbuf.
- lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
- lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
- if b == nil {
- // Allocate more workbufs.
- var s *mspan
- if work.wbufSpans.free.first != nil {
- lock(&work.wbufSpans.lock)
- s = work.wbufSpans.free.first
- if s != nil {
- work.wbufSpans.free.remove(s)
- work.wbufSpans.busy.insert(s)
- }
- unlock(&work.wbufSpans.lock)
- }
- if s == nil {
- systemstack(func() {
- s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
- })
- if s == nil {
- throw("out of memory")
- }
- // Record the new span in the busy list.
- lock(&work.wbufSpans.lock)
- work.wbufSpans.busy.insert(s)
- unlock(&work.wbufSpans.lock)
- }
- // Slice up the span into new workbufs. Return one and
- // put the rest on the empty list.
- for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
- newb := (*workbuf)(unsafe.Pointer(s.base() + i))
- newb.nobj = 0
- lfnodeValidate(&newb.node)
- if i == 0 {
- b = newb
- } else {
- putempty(newb)
- }
- }
- }
- return b
-}
-
-// putempty puts a workbuf onto the work.empty list.
-// Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
-//go:nowritebarrier
-func putempty(b *workbuf) {
- b.checkempty()
- work.empty.push(&b.node)
-}
-
-// putfull puts the workbuf on the work.full list for the GC.
-// putfull accepts partially full buffers so the GC can avoid competing
-// with the mutators for ownership of partially full buffers.
-//go:nowritebarrier
-func putfull(b *workbuf) {
- b.checknonempty()
- work.full.push(&b.node)
-}
-
-// trygetfull tries to get a full or partially empty workbuffer.
-// If one is not immediately available return nil
-//go:nowritebarrier
-func trygetfull() *workbuf {
- b := (*workbuf)(work.full.pop())
- if b != nil {
- b.checknonempty()
- return b
- }
- return b
-}
-
-//go:nowritebarrier
-func handoff(b *workbuf) *workbuf {
- // Make new buffer with half of b's pointers.
- b1 := getempty()
- n := b.nobj / 2
- b.nobj -= n
- b1.nobj = n
- memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
-
- // Put b on full list - let first half of b get stolen.
- putfull(b)
- return b1
-}
-
-// prepareFreeWorkbufs moves busy workbuf spans to free list so they
-// can be freed to the heap. This must only be called when all
-// workbufs are on the empty list.
-func prepareFreeWorkbufs() {
- lock(&work.wbufSpans.lock)
- if work.full != 0 {
- throw("cannot free workbufs when work.full != 0")
- }
- // Since all workbufs are on the empty list, we don't care
- // which ones are in which spans. We can wipe the entire empty
- // list and move all workbuf spans to the free list.
- work.empty = 0
- work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
- unlock(&work.wbufSpans.lock)
-}
-
-// freeSomeWbufs frees some workbufs back to the heap and returns
-// true if it should be called again to free more.
-func freeSomeWbufs(preemptible bool) bool {
- const batchSize = 64 // ~1–2 µs per span.
- lock(&work.wbufSpans.lock)
- if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
- unlock(&work.wbufSpans.lock)
- return false
- }
- systemstack(func() {
- gp := getg().m.curg
- for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
- span := work.wbufSpans.free.first
- if span == nil {
- break
- }
- work.wbufSpans.free.remove(span)
- mheap_.freeManual(span, spanAllocWorkBuf)
- }
- })
- more := !work.wbufSpans.free.isEmpty()
- unlock(&work.wbufSpans.lock)
- return more
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mheap.go b/contrib/go/_std_1.18/src/runtime/mheap.go
deleted file mode 100644
index ecbd0a3a49..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mheap.go
+++ /dev/null
@@ -1,2119 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Page heap.
-//
-// See malloc.go for overview.
-
-package runtime
-
-import (
- "internal/cpu"
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-const (
- // minPhysPageSize is a lower-bound on the physical page size. The
- // true physical page size may be larger than this. In contrast,
- // sys.PhysPageSize is an upper-bound on the physical page size.
- minPhysPageSize = 4096
-
- // maxPhysPageSize is the maximum page size the runtime supports.
- maxPhysPageSize = 512 << 10
-
- // maxPhysHugePageSize sets an upper-bound on the maximum huge page size
- // that the runtime supports.
- maxPhysHugePageSize = pallocChunkBytes
-
- // pagesPerReclaimerChunk indicates how many pages to scan from the
- // pageInUse bitmap at a time. Used by the page reclaimer.
- //
- // Higher values reduce contention on scanning indexes (such as
- // h.reclaimIndex), but increase the minimum latency of the
- // operation.
- //
- // The time required to scan this many pages can vary a lot depending
- // on how many spans are actually freed. Experimentally, it can
- // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only
- // free spans at ~32 MB/ms. Using 512 pages bounds this at
- // roughly 100µs.
- //
- // Must be a multiple of the pageInUse bitmap element size and
- // must also evenly divide pagesPerArena.
- pagesPerReclaimerChunk = 512
-
- // physPageAlignedStacks indicates whether stack allocations must be
- // physical page aligned. This is a requirement for MAP_STACK on
- // OpenBSD.
- physPageAlignedStacks = GOOS == "openbsd"
-)
-
-// Main malloc heap.
-// The heap itself is the "free" and "scav" treaps,
-// but all the other global data is here too.
-//
-// mheap must not be heap-allocated because it contains mSpanLists,
-// which must not be heap-allocated.
-//
-//go:notinheap
-type mheap struct {
- // lock must only be acquired on the system stack, otherwise a g
- // could self-deadlock if its stack grows with the lock held.
- lock mutex
- pages pageAlloc // page allocation data structure
-
- sweepgen uint32 // sweep generation, see comment in mspan; written during STW
-
- // allspans is a slice of all mspans ever created. Each mspan
- // appears exactly once.
- //
- // The memory for allspans is manually managed and can be
- // reallocated and move as the heap grows.
- //
- // In general, allspans is protected by mheap_.lock, which
- // prevents concurrent access as well as freeing the backing
- // store. Accesses during STW might not hold the lock, but
- // must ensure that allocation cannot happen around the
- // access (since that may free the backing store).
- allspans []*mspan // all spans out there
-
- // _ uint32 // align uint64 fields on 32-bit for atomics
-
- // Proportional sweep
- //
- // These parameters represent a linear function from gcController.heapLive
- // to page sweep count. The proportional sweep system works to
- // stay in the black by keeping the current page sweep count
- // above this line at the current gcController.heapLive.
- //
- // The line has slope sweepPagesPerByte and passes through a
- // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
- // any given time, the system is at (gcController.heapLive,
- // pagesSwept) in this space.
- //
- // It is important that the line pass through a point we
- // control rather than simply starting at a 0,0 origin
- // because that lets us adjust sweep pacing at any time while
- // accounting for current progress. If we could only adjust
- // the slope, it would create a discontinuity in debt if any
- // progress has already been made.
- pagesInUse atomic.Uint64 // pages of spans in stats mSpanInUse
- pagesSwept atomic.Uint64 // pages swept this cycle
- pagesSweptBasis atomic.Uint64 // pagesSwept to use as the origin of the sweep ratio
- sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
- sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
- // TODO(austin): pagesInUse should be a uintptr, but the 386
- // compiler can't 8-byte align fields.
-
- // scavengeGoal is the amount of total retained heap memory (measured by
- // heapRetained) that the runtime will try to maintain by returning memory
- // to the OS.
- //
- // Accessed atomically.
- scavengeGoal uint64
-
- // Page reclaimer state
-
- // reclaimIndex is the page index in allArenas of next page to
- // reclaim. Specifically, it refers to page (i %
- // pagesPerArena) of arena allArenas[i / pagesPerArena].
- //
- // If this is >= 1<<63, the page reclaimer is done scanning
- // the page marks.
- reclaimIndex atomic.Uint64
-
- // reclaimCredit is spare credit for extra pages swept. Since
- // the page reclaimer works in large chunks, it may reclaim
- // more than requested. Any spare pages released go to this
- // credit pool.
- reclaimCredit atomic.Uintptr
-
- // arenas is the heap arena map. It points to the metadata for
- // the heap for every arena frame of the entire usable virtual
- // address space.
- //
- // Use arenaIndex to compute indexes into this array.
- //
- // For regions of the address space that are not backed by the
- // Go heap, the arena map contains nil.
- //
- // Modifications are protected by mheap_.lock. Reads can be
- // performed without locking; however, a given entry can
- // transition from nil to non-nil at any time when the lock
- // isn't held. (Entries never transitions back to nil.)
- //
- // In general, this is a two-level mapping consisting of an L1
- // map and possibly many L2 maps. This saves space when there
- // are a huge number of arena frames. However, on many
- // platforms (even 64-bit), arenaL1Bits is 0, making this
- // effectively a single-level map. In this case, arenas[0]
- // will never be nil.
- arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
-
- // heapArenaAlloc is pre-reserved space for allocating heapArena
- // objects. This is only used on 32-bit, where we pre-reserve
- // this space to avoid interleaving it with the heap itself.
- heapArenaAlloc linearAlloc
-
- // arenaHints is a list of addresses at which to attempt to
- // add more heap arenas. This is initially populated with a
- // set of general hint addresses, and grown with the bounds of
- // actual heap arena ranges.
- arenaHints *arenaHint
-
- // arena is a pre-reserved space for allocating heap arenas
- // (the actual arenas). This is only used on 32-bit.
- arena linearAlloc
-
- // allArenas is the arenaIndex of every mapped arena. This can
- // be used to iterate through the address space.
- //
- // Access is protected by mheap_.lock. However, since this is
- // append-only and old backing arrays are never freed, it is
- // safe to acquire mheap_.lock, copy the slice header, and
- // then release mheap_.lock.
- allArenas []arenaIdx
-
- // sweepArenas is a snapshot of allArenas taken at the
- // beginning of the sweep cycle. This can be read safely by
- // simply blocking GC (by disabling preemption).
- sweepArenas []arenaIdx
-
- // markArenas is a snapshot of allArenas taken at the beginning
- // of the mark cycle. Because allArenas is append-only, neither
- // this slice nor its contents will change during the mark, so
- // it can be read safely.
- markArenas []arenaIdx
-
- // curArena is the arena that the heap is currently growing
- // into. This should always be physPageSize-aligned.
- curArena struct {
- base, end uintptr
- }
-
- _ uint32 // ensure 64-bit alignment of central
-
- // central free lists for small size classes.
- // the padding makes sure that the mcentrals are
- // spaced CacheLinePadSize bytes apart, so that each mcentral.lock
- // gets its own cache line.
- // central is indexed by spanClass.
- central [numSpanClasses]struct {
- mcentral mcentral
- pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
- }
-
- spanalloc fixalloc // allocator for span*
- cachealloc fixalloc // allocator for mcache*
- specialfinalizeralloc fixalloc // allocator for specialfinalizer*
- specialprofilealloc fixalloc // allocator for specialprofile*
- specialReachableAlloc fixalloc // allocator for specialReachable
- speciallock mutex // lock for special record allocators.
- arenaHintAlloc fixalloc // allocator for arenaHints
-
- unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
-}
-
-var mheap_ mheap
-
-// A heapArena stores metadata for a heap arena. heapArenas are stored
-// outside of the Go heap and accessed via the mheap_.arenas index.
-//
-//go:notinheap
-type heapArena struct {
- // bitmap stores the pointer/scalar bitmap for the words in
- // this arena. See mbitmap.go for a description. Use the
- // heapBits type to access this.
- bitmap [heapArenaBitmapBytes]byte
-
- // spans maps from virtual address page ID within this arena to *mspan.
- // For allocated spans, their pages map to the span itself.
- // For free spans, only the lowest and highest pages map to the span itself.
- // Internal pages map to an arbitrary span.
- // For pages that have never been allocated, spans entries are nil.
- //
- // Modifications are protected by mheap.lock. Reads can be
- // performed without locking, but ONLY from indexes that are
- // known to contain in-use or stack spans. This means there
- // must not be a safe-point between establishing that an
- // address is live and looking it up in the spans array.
- spans [pagesPerArena]*mspan
-
- // pageInUse is a bitmap that indicates which spans are in
- // state mSpanInUse. This bitmap is indexed by page number,
- // but only the bit corresponding to the first page in each
- // span is used.
- //
- // Reads and writes are atomic.
- pageInUse [pagesPerArena / 8]uint8
-
- // pageMarks is a bitmap that indicates which spans have any
- // marked objects on them. Like pageInUse, only the bit
- // corresponding to the first page in each span is used.
- //
- // Writes are done atomically during marking. Reads are
- // non-atomic and lock-free since they only occur during
- // sweeping (and hence never race with writes).
- //
- // This is used to quickly find whole spans that can be freed.
- //
- // TODO(austin): It would be nice if this was uint64 for
- // faster scanning, but we don't have 64-bit atomic bit
- // operations.
- pageMarks [pagesPerArena / 8]uint8
-
- // pageSpecials is a bitmap that indicates which spans have
- // specials (finalizers or other). Like pageInUse, only the bit
- // corresponding to the first page in each span is used.
- //
- // Writes are done atomically whenever a special is added to
- // a span and whenever the last special is removed from a span.
- // Reads are done atomically to find spans containing specials
- // during marking.
- pageSpecials [pagesPerArena / 8]uint8
-
- // checkmarks stores the debug.gccheckmark state. It is only
- // used if debug.gccheckmark > 0.
- checkmarks *checkmarksMap
-
- // zeroedBase marks the first byte of the first page in this
- // arena which hasn't been used yet and is therefore already
- // zero. zeroedBase is relative to the arena base.
- // Increases monotonically until it hits heapArenaBytes.
- //
- // This field is sufficient to determine if an allocation
- // needs to be zeroed because the page allocator follows an
- // address-ordered first-fit policy.
- //
- // Read atomically and written with an atomic CAS.
- zeroedBase uintptr
-}
-
-// arenaHint is a hint for where to grow the heap arenas. See
-// mheap_.arenaHints.
-//
-//go:notinheap
-type arenaHint struct {
- addr uintptr
- down bool
- next *arenaHint
-}
-
-// An mspan is a run of pages.
-//
-// When a mspan is in the heap free treap, state == mSpanFree
-// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
-// If the mspan is in the heap scav treap, then in addition to the
-// above scavenged == true. scavenged == false in all other cases.
-//
-// When a mspan is allocated, state == mSpanInUse or mSpanManual
-// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
-
-// Every mspan is in one doubly-linked list, either in the mheap's
-// busy list or one of the mcentral's span lists.
-
-// An mspan representing actual memory has state mSpanInUse,
-// mSpanManual, or mSpanFree. Transitions between these states are
-// constrained as follows:
-//
-// * A span may transition from free to in-use or manual during any GC
-// phase.
-//
-// * During sweeping (gcphase == _GCoff), a span may transition from
-// in-use to free (as a result of sweeping) or manual to free (as a
-// result of stacks being freed).
-//
-// * During GC (gcphase != _GCoff), a span *must not* transition from
-// manual or in-use to free. Because concurrent GC may read a pointer
-// and then look up its span, the span state must be monotonic.
-//
-// Setting mspan.state to mSpanInUse or mSpanManual must be done
-// atomically and only after all other span fields are valid.
-// Likewise, if inspecting a span is contingent on it being
-// mSpanInUse, the state should be loaded atomically and checked
-// before depending on other fields. This allows the garbage collector
-// to safely deal with potentially invalid pointers, since resolving
-// such pointers may race with a span being allocated.
-type mSpanState uint8
-
-const (
- mSpanDead mSpanState = iota
- mSpanInUse // allocated for garbage collected heap
- mSpanManual // allocated for manual management (e.g., stack allocator)
-)
-
-// mSpanStateNames are the names of the span states, indexed by
-// mSpanState.
-var mSpanStateNames = []string{
- "mSpanDead",
- "mSpanInUse",
- "mSpanManual",
- "mSpanFree",
-}
-
-// mSpanStateBox holds an mSpanState and provides atomic operations on
-// it. This is a separate type to disallow accidental comparison or
-// assignment with mSpanState.
-type mSpanStateBox struct {
- s mSpanState
-}
-
-func (b *mSpanStateBox) set(s mSpanState) {
- atomic.Store8((*uint8)(&b.s), uint8(s))
-}
-
-func (b *mSpanStateBox) get() mSpanState {
- return mSpanState(atomic.Load8((*uint8)(&b.s)))
-}
-
-// mSpanList heads a linked list of spans.
-//
-//go:notinheap
-type mSpanList struct {
- first *mspan // first span in list, or nil if none
- last *mspan // last span in list, or nil if none
-}
-
-//go:notinheap
-type mspan struct {
- next *mspan // next span in list, or nil if none
- prev *mspan // previous span in list, or nil if none
- list *mSpanList // For debugging. TODO: Remove.
-
- startAddr uintptr // address of first byte of span aka s.base()
- npages uintptr // number of pages in span
-
- manualFreeList gclinkptr // list of free objects in mSpanManual spans
-
- // freeindex is the slot index between 0 and nelems at which to begin scanning
- // for the next free object in this span.
- // Each allocation scans allocBits starting at freeindex until it encounters a 0
- // indicating a free object. freeindex is then adjusted so that subsequent scans begin
- // just past the newly discovered free object.
- //
- // If freeindex == nelem, this span has no free objects.
- //
- // allocBits is a bitmap of objects in this span.
- // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
- // then object n is free;
- // otherwise, object n is allocated. Bits starting at nelem are
- // undefined and should never be referenced.
- //
- // Object n starts at address n*elemsize + (start << pageShift).
- freeindex uintptr
- // TODO: Look up nelems from sizeclass and remove this field if it
- // helps performance.
- nelems uintptr // number of object in the span.
-
- // Cache of the allocBits at freeindex. allocCache is shifted
- // such that the lowest bit corresponds to the bit freeindex.
- // allocCache holds the complement of allocBits, thus allowing
- // ctz (count trailing zero) to use it directly.
- // allocCache may contain bits beyond s.nelems; the caller must ignore
- // these.
- allocCache uint64
-
- // allocBits and gcmarkBits hold pointers to a span's mark and
- // allocation bits. The pointers are 8 byte aligned.
- // There are three arenas where this data is held.
- // free: Dirty arenas that are no longer accessed
- // and can be reused.
- // next: Holds information to be used in the next GC cycle.
- // current: Information being used during this GC cycle.
- // previous: Information being used during the last GC cycle.
- // A new GC cycle starts with the call to finishsweep_m.
- // finishsweep_m moves the previous arena to the free arena,
- // the current arena to the previous arena, and
- // the next arena to the current arena.
- // The next arena is populated as the spans request
- // memory to hold gcmarkBits for the next GC cycle as well
- // as allocBits for newly allocated spans.
- //
- // The pointer arithmetic is done "by hand" instead of using
- // arrays to avoid bounds checks along critical performance
- // paths.
- // The sweep will free the old allocBits and set allocBits to the
- // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
- // out memory.
- allocBits *gcBits
- gcmarkBits *gcBits
-
- // sweep generation:
- // if sweepgen == h->sweepgen - 2, the span needs sweeping
- // if sweepgen == h->sweepgen - 1, the span is currently being swept
- // if sweepgen == h->sweepgen, the span is swept and ready to use
- // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
- // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
- // h->sweepgen is incremented by 2 after every GC
-
- sweepgen uint32
- divMul uint32 // for divide by elemsize
- allocCount uint16 // number of allocated objects
- spanclass spanClass // size class and noscan (uint8)
- state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
- needzero uint8 // needs to be zeroed before allocation
- elemsize uintptr // computed from sizeclass or from npages
- limit uintptr // end of data in span
- speciallock mutex // guards specials list
- specials *special // linked list of special records sorted by offset.
-}
-
-func (s *mspan) base() uintptr {
- return s.startAddr
-}
-
-func (s *mspan) layout() (size, n, total uintptr) {
- total = s.npages << _PageShift
- size = s.elemsize
- if size > 0 {
- n = total / size
- }
- return
-}
-
-// recordspan adds a newly allocated span to h.allspans.
-//
-// This only happens the first time a span is allocated from
-// mheap.spanalloc (it is not called when a span is reused).
-//
-// Write barriers are disallowed here because it can be called from
-// gcWork when allocating new workbufs. However, because it's an
-// indirect call from the fixalloc initializer, the compiler can't see
-// this.
-//
-// The heap lock must be held.
-//
-//go:nowritebarrierrec
-func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
- h := (*mheap)(vh)
- s := (*mspan)(p)
-
- assertLockHeld(&h.lock)
-
- if len(h.allspans) >= cap(h.allspans) {
- n := 64 * 1024 / goarch.PtrSize
- if n < cap(h.allspans)*3/2 {
- n = cap(h.allspans) * 3 / 2
- }
- var new []*mspan
- sp := (*slice)(unsafe.Pointer(&new))
- sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
- if sp.array == nil {
- throw("runtime: cannot allocate memory")
- }
- sp.len = len(h.allspans)
- sp.cap = n
- if len(h.allspans) > 0 {
- copy(new, h.allspans)
- }
- oldAllspans := h.allspans
- *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
- if len(oldAllspans) != 0 {
- sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
- }
- }
- h.allspans = h.allspans[:len(h.allspans)+1]
- h.allspans[len(h.allspans)-1] = s
-}
-
-// A spanClass represents the size class and noscan-ness of a span.
-//
-// Each size class has a noscan spanClass and a scan spanClass. The
-// noscan spanClass contains only noscan objects, which do not contain
-// pointers and thus do not need to be scanned by the garbage
-// collector.
-type spanClass uint8
-
-const (
- numSpanClasses = _NumSizeClasses << 1
- tinySpanClass = spanClass(tinySizeClass<<1 | 1)
-)
-
-func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
- return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
-}
-
-func (sc spanClass) sizeclass() int8 {
- return int8(sc >> 1)
-}
-
-func (sc spanClass) noscan() bool {
- return sc&1 != 0
-}
-
-// arenaIndex returns the index into mheap_.arenas of the arena
-// containing metadata for p. This index combines of an index into the
-// L1 map and an index into the L2 map and should be used as
-// mheap_.arenas[ai.l1()][ai.l2()].
-//
-// If p is outside the range of valid heap addresses, either l1() or
-// l2() will be out of bounds.
-//
-// It is nosplit because it's called by spanOf and several other
-// nosplit functions.
-//
-//go:nosplit
-func arenaIndex(p uintptr) arenaIdx {
- return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
-}
-
-// arenaBase returns the low address of the region covered by heap
-// arena i.
-func arenaBase(i arenaIdx) uintptr {
- return uintptr(i)*heapArenaBytes + arenaBaseOffset
-}
-
-type arenaIdx uint
-
-func (i arenaIdx) l1() uint {
- if arenaL1Bits == 0 {
- // Let the compiler optimize this away if there's no
- // L1 map.
- return 0
- } else {
- return uint(i) >> arenaL1Shift
- }
-}
-
-func (i arenaIdx) l2() uint {
- if arenaL1Bits == 0 {
- return uint(i)
- } else {
- return uint(i) & (1<<arenaL2Bits - 1)
- }
-}
-
-// inheap reports whether b is a pointer into a (potentially dead) heap object.
-// It returns false for pointers into mSpanManual spans.
-// Non-preemptible because it is used by write barriers.
-//go:nowritebarrier
-//go:nosplit
-func inheap(b uintptr) bool {
- return spanOfHeap(b) != nil
-}
-
-// inHeapOrStack is a variant of inheap that returns true for pointers
-// into any allocated heap span.
-//
-//go:nowritebarrier
-//go:nosplit
-func inHeapOrStack(b uintptr) bool {
- s := spanOf(b)
- if s == nil || b < s.base() {
- return false
- }
- switch s.state.get() {
- case mSpanInUse, mSpanManual:
- return b < s.limit
- default:
- return false
- }
-}
-
-// spanOf returns the span of p. If p does not point into the heap
-// arena or no span has ever contained p, spanOf returns nil.
-//
-// If p does not point to allocated memory, this may return a non-nil
-// span that does *not* contain p. If this is a possibility, the
-// caller should either call spanOfHeap or check the span bounds
-// explicitly.
-//
-// Must be nosplit because it has callers that are nosplit.
-//
-//go:nosplit
-func spanOf(p uintptr) *mspan {
- // This function looks big, but we use a lot of constant
- // folding around arenaL1Bits to get it under the inlining
- // budget. Also, many of the checks here are safety checks
- // that Go needs to do anyway, so the generated code is quite
- // short.
- ri := arenaIndex(p)
- if arenaL1Bits == 0 {
- // If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
- if ri.l2() >= uint(len(mheap_.arenas[0])) {
- return nil
- }
- } else {
- // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
- if ri.l1() >= uint(len(mheap_.arenas)) {
- return nil
- }
- }
- l2 := mheap_.arenas[ri.l1()]
- if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
- return nil
- }
- ha := l2[ri.l2()]
- if ha == nil {
- return nil
- }
- return ha.spans[(p/pageSize)%pagesPerArena]
-}
-
-// spanOfUnchecked is equivalent to spanOf, but the caller must ensure
-// that p points into an allocated heap arena.
-//
-// Must be nosplit because it has callers that are nosplit.
-//
-//go:nosplit
-func spanOfUnchecked(p uintptr) *mspan {
- ai := arenaIndex(p)
- return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
-}
-
-// spanOfHeap is like spanOf, but returns nil if p does not point to a
-// heap object.
-//
-// Must be nosplit because it has callers that are nosplit.
-//
-//go:nosplit
-func spanOfHeap(p uintptr) *mspan {
- s := spanOf(p)
- // s is nil if it's never been allocated. Otherwise, we check
- // its state first because we don't trust this pointer, so we
- // have to synchronize with span initialization. Then, it's
- // still possible we picked up a stale span pointer, so we
- // have to check the span's bounds.
- if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
- return nil
- }
- return s
-}
-
-// pageIndexOf returns the arena, page index, and page mask for pointer p.
-// The caller must ensure p is in the heap.
-func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) {
- ai := arenaIndex(p)
- arena = mheap_.arenas[ai.l1()][ai.l2()]
- pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
- pageMask = byte(1 << ((p / pageSize) % 8))
- return
-}
-
-// Initialize the heap.
-func (h *mheap) init() {
- lockInit(&h.lock, lockRankMheap)
- lockInit(&h.speciallock, lockRankMheapSpecial)
-
- h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
- h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
- h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
- h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
- h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
- h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
-
- // Don't zero mspan allocations. Background sweeping can
- // inspect a span concurrently with allocating it, so it's
- // important that the span's sweepgen survive across freeing
- // and re-allocating a span to prevent background sweeping
- // from improperly cas'ing it from 0.
- //
- // This is safe because mspan contains no heap pointers.
- h.spanalloc.zero = false
-
- // h->mapcache needs no init
-
- for i := range h.central {
- h.central[i].mcentral.init(spanClass(i))
- }
-
- h.pages.init(&h.lock, &memstats.gcMiscSys)
-}
-
-// reclaim sweeps and reclaims at least npage pages into the heap.
-// It is called before allocating npage pages to keep growth in check.
-//
-// reclaim implements the page-reclaimer half of the sweeper.
-//
-// h.lock must NOT be held.
-func (h *mheap) reclaim(npage uintptr) {
- // TODO(austin): Half of the time spent freeing spans is in
- // locking/unlocking the heap (even with low contention). We
- // could make the slow path here several times faster by
- // batching heap frees.
-
- // Bail early if there's no more reclaim work.
- if h.reclaimIndex.Load() >= 1<<63 {
- return
- }
-
- // Disable preemption so the GC can't start while we're
- // sweeping, so we can read h.sweepArenas, and so
- // traceGCSweepStart/Done pair on the P.
- mp := acquirem()
-
- if trace.enabled {
- traceGCSweepStart()
- }
-
- arenas := h.sweepArenas
- locked := false
- for npage > 0 {
- // Pull from accumulated credit first.
- if credit := h.reclaimCredit.Load(); credit > 0 {
- take := credit
- if take > npage {
- // Take only what we need.
- take = npage
- }
- if h.reclaimCredit.CompareAndSwap(credit, credit-take) {
- npage -= take
- }
- continue
- }
-
- // Claim a chunk of work.
- idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk)
- if idx/pagesPerArena >= uintptr(len(arenas)) {
- // Page reclaiming is done.
- h.reclaimIndex.Store(1 << 63)
- break
- }
-
- if !locked {
- // Lock the heap for reclaimChunk.
- lock(&h.lock)
- locked = true
- }
-
- // Scan this chunk.
- nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk)
- if nfound <= npage {
- npage -= nfound
- } else {
- // Put spare pages toward global credit.
- h.reclaimCredit.Add(nfound - npage)
- npage = 0
- }
- }
- if locked {
- unlock(&h.lock)
- }
-
- if trace.enabled {
- traceGCSweepDone()
- }
- releasem(mp)
-}
-
-// reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
-// It returns the number of pages returned to the heap.
-//
-// h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
-// temporarily unlocked and re-locked in order to do sweeping or if tracing is
-// enabled.
-func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
- // The heap lock must be held because this accesses the
- // heapArena.spans arrays using potentially non-live pointers.
- // In particular, if a span were freed and merged concurrently
- // with this probing heapArena.spans, it would be possible to
- // observe arbitrary, stale span pointers.
- assertLockHeld(&h.lock)
-
- n0 := n
- var nFreed uintptr
- sl := sweep.active.begin()
- if !sl.valid {
- return 0
- }
- for n > 0 {
- ai := arenas[pageIdx/pagesPerArena]
- ha := h.arenas[ai.l1()][ai.l2()]
-
- // Get a chunk of the bitmap to work on.
- arenaPage := uint(pageIdx % pagesPerArena)
- inUse := ha.pageInUse[arenaPage/8:]
- marked := ha.pageMarks[arenaPage/8:]
- if uintptr(len(inUse)) > n/8 {
- inUse = inUse[:n/8]
- marked = marked[:n/8]
- }
-
- // Scan this bitmap chunk for spans that are in-use
- // but have no marked objects on them.
- for i := range inUse {
- inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
- if inUseUnmarked == 0 {
- continue
- }
-
- for j := uint(0); j < 8; j++ {
- if inUseUnmarked&(1<<j) != 0 {
- s := ha.spans[arenaPage+uint(i)*8+j]
- if s, ok := sl.tryAcquire(s); ok {
- npages := s.npages
- unlock(&h.lock)
- if s.sweep(false) {
- nFreed += npages
- }
- lock(&h.lock)
- // Reload inUse. It's possible nearby
- // spans were freed when we dropped the
- // lock and we don't want to get stale
- // pointers from the spans array.
- inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
- }
- }
- }
- }
-
- // Advance.
- pageIdx += uintptr(len(inUse) * 8)
- n -= uintptr(len(inUse) * 8)
- }
- sweep.active.end(sl)
- if trace.enabled {
- unlock(&h.lock)
- // Account for pages scanned but not reclaimed.
- traceGCSweepSpan((n0 - nFreed) * pageSize)
- lock(&h.lock)
- }
-
- assertLockHeld(&h.lock) // Must be locked on return.
- return nFreed
-}
-
-// spanAllocType represents the type of allocation to make, or
-// the type of allocation to be freed.
-type spanAllocType uint8
-
-const (
- spanAllocHeap spanAllocType = iota // heap span
- spanAllocStack // stack span
- spanAllocPtrScalarBits // unrolled GC prog bitmap span
- spanAllocWorkBuf // work buf span
-)
-
-// manual returns true if the span allocation is manually managed.
-func (s spanAllocType) manual() bool {
- return s != spanAllocHeap
-}
-
-// alloc allocates a new span of npage pages from the GC'd heap.
-//
-// spanclass indicates the span's size class and scannability.
-//
-// Returns a span that has been fully initialized. span.needzero indicates
-// whether the span has been zeroed. Note that it may not be.
-func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
- // Don't do any operations that lock the heap on the G stack.
- // It might trigger stack growth, and the stack growth code needs
- // to be able to allocate heap.
- var s *mspan
- systemstack(func() {
- // To prevent excessive heap growth, before allocating n pages
- // we need to sweep and reclaim at least n pages.
- if !isSweepDone() {
- h.reclaim(npages)
- }
- s = h.allocSpan(npages, spanAllocHeap, spanclass)
- })
- return s
-}
-
-// allocManual allocates a manually-managed span of npage pages.
-// allocManual returns nil if allocation fails.
-//
-// allocManual adds the bytes used to *stat, which should be a
-// memstats in-use field. Unlike allocations in the GC'd heap, the
-// allocation does *not* count toward heap_inuse or heap_sys.
-//
-// The memory backing the returned span may not be zeroed if
-// span.needzero is set.
-//
-// allocManual must be called on the system stack because it may
-// acquire the heap lock via allocSpan. See mheap for details.
-//
-// If new code is written to call allocManual, do NOT use an
-// existing spanAllocType value and instead declare a new one.
-//
-//go:systemstack
-func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
- if !typ.manual() {
- throw("manual span allocation called with non-manually-managed type")
- }
- return h.allocSpan(npages, typ, 0)
-}
-
-// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
-// is s.
-func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
- p := base / pageSize
- ai := arenaIndex(base)
- ha := h.arenas[ai.l1()][ai.l2()]
- for n := uintptr(0); n < npage; n++ {
- i := (p + n) % pagesPerArena
- if i == 0 {
- ai = arenaIndex(base + n*pageSize)
- ha = h.arenas[ai.l1()][ai.l2()]
- }
- ha.spans[i] = s
- }
-}
-
-// allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
-// assumed to be allocated, needs to be zeroed, updating heap arena metadata for
-// future allocations.
-//
-// This must be called each time pages are allocated from the heap, even if the page
-// allocator can otherwise prove the memory it's allocating is already zero because
-// they're fresh from the operating system. It updates heapArena metadata that is
-// critical for future page allocations.
-//
-// There are no locking constraints on this method.
-func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
- for npage > 0 {
- ai := arenaIndex(base)
- ha := h.arenas[ai.l1()][ai.l2()]
-
- zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
- arenaBase := base % heapArenaBytes
- if arenaBase < zeroedBase {
- // We extended into the non-zeroed part of the
- // arena, so this region needs to be zeroed before use.
- //
- // zeroedBase is monotonically increasing, so if we see this now then
- // we can be sure we need to zero this memory region.
- //
- // We still need to update zeroedBase for this arena, and
- // potentially more arenas.
- needZero = true
- }
- // We may observe arenaBase > zeroedBase if we're racing with one or more
- // allocations which are acquiring memory directly before us in the address
- // space. But, because we know no one else is acquiring *this* memory, it's
- // still safe to not zero.
-
- // Compute how far into the arena we extend into, capped
- // at heapArenaBytes.
- arenaLimit := arenaBase + npage*pageSize
- if arenaLimit > heapArenaBytes {
- arenaLimit = heapArenaBytes
- }
- // Increase ha.zeroedBase so it's >= arenaLimit.
- // We may be racing with other updates.
- for arenaLimit > zeroedBase {
- if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) {
- break
- }
- zeroedBase = atomic.Loaduintptr(&ha.zeroedBase)
- // Double check basic conditions of zeroedBase.
- if zeroedBase <= arenaLimit && zeroedBase > arenaBase {
- // The zeroedBase moved into the space we were trying to
- // claim. That's very bad, and indicates someone allocated
- // the same region we did.
- throw("potentially overlapping in-use allocations detected")
- }
- }
-
- // Move base forward and subtract from npage to move into
- // the next arena, or finish.
- base += arenaLimit - arenaBase
- npage -= (arenaLimit - arenaBase) / pageSize
- }
- return
-}
-
-// tryAllocMSpan attempts to allocate an mspan object from
-// the P-local cache, but may fail.
-//
-// h.lock need not be held.
-//
-// This caller must ensure that its P won't change underneath
-// it during this function. Currently to ensure that we enforce
-// that the function is run on the system stack, because that's
-// the only place it is used now. In the future, this requirement
-// may be relaxed if its use is necessary elsewhere.
-//
-//go:systemstack
-func (h *mheap) tryAllocMSpan() *mspan {
- pp := getg().m.p.ptr()
- // If we don't have a p or the cache is empty, we can't do
- // anything here.
- if pp == nil || pp.mspancache.len == 0 {
- return nil
- }
- // Pull off the last entry in the cache.
- s := pp.mspancache.buf[pp.mspancache.len-1]
- pp.mspancache.len--
- return s
-}
-
-// allocMSpanLocked allocates an mspan object.
-//
-// h.lock must be held.
-//
-// allocMSpanLocked must be called on the system stack because
-// its caller holds the heap lock. See mheap for details.
-// Running on the system stack also ensures that we won't
-// switch Ps during this function. See tryAllocMSpan for details.
-//
-//go:systemstack
-func (h *mheap) allocMSpanLocked() *mspan {
- assertLockHeld(&h.lock)
-
- pp := getg().m.p.ptr()
- if pp == nil {
- // We don't have a p so just do the normal thing.
- return (*mspan)(h.spanalloc.alloc())
- }
- // Refill the cache if necessary.
- if pp.mspancache.len == 0 {
- const refillCount = len(pp.mspancache.buf) / 2
- for i := 0; i < refillCount; i++ {
- pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc())
- }
- pp.mspancache.len = refillCount
- }
- // Pull off the last entry in the cache.
- s := pp.mspancache.buf[pp.mspancache.len-1]
- pp.mspancache.len--
- return s
-}
-
-// freeMSpanLocked free an mspan object.
-//
-// h.lock must be held.
-//
-// freeMSpanLocked must be called on the system stack because
-// its caller holds the heap lock. See mheap for details.
-// Running on the system stack also ensures that we won't
-// switch Ps during this function. See tryAllocMSpan for details.
-//
-//go:systemstack
-func (h *mheap) freeMSpanLocked(s *mspan) {
- assertLockHeld(&h.lock)
-
- pp := getg().m.p.ptr()
- // First try to free the mspan directly to the cache.
- if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
- pp.mspancache.buf[pp.mspancache.len] = s
- pp.mspancache.len++
- return
- }
- // Failing that (or if we don't have a p), just free it to
- // the heap.
- h.spanalloc.free(unsafe.Pointer(s))
-}
-
-// allocSpan allocates an mspan which owns npages worth of memory.
-//
-// If typ.manual() == false, allocSpan allocates a heap span of class spanclass
-// and updates heap accounting. If manual == true, allocSpan allocates a
-// manually-managed span (spanclass is ignored), and the caller is
-// responsible for any accounting related to its use of the span. Either
-// way, allocSpan will atomically add the bytes in the newly allocated
-// span to *sysStat.
-//
-// The returned span is fully initialized.
-//
-// h.lock must not be held.
-//
-// allocSpan must be called on the system stack both because it acquires
-// the heap lock and because it must block GC transitions.
-//
-//go:systemstack
-func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
- // Function-global state.
- gp := getg()
- base, scav := uintptr(0), uintptr(0)
- growth := uintptr(0)
-
- // On some platforms we need to provide physical page aligned stack
- // allocations. Where the page size is less than the physical page
- // size, we already manage to do this by default.
- needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
-
- // If the allocation is small enough, try the page cache!
- // The page cache does not support aligned allocations, so we cannot use
- // it if we need to provide a physical page aligned stack allocation.
- pp := gp.m.p.ptr()
- if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 {
- c := &pp.pcache
-
- // If the cache is empty, refill it.
- if c.empty() {
- lock(&h.lock)
- *c = h.pages.allocToCache()
- unlock(&h.lock)
- }
-
- // Try to allocate from the cache.
- base, scav = c.alloc(npages)
- if base != 0 {
- s = h.tryAllocMSpan()
- if s != nil {
- goto HaveSpan
- }
- // We have a base but no mspan, so we need
- // to lock the heap.
- }
- }
-
- // For one reason or another, we couldn't get the
- // whole job done without the heap lock.
- lock(&h.lock)
-
- if needPhysPageAlign {
- // Overallocate by a physical page to allow for later alignment.
- npages += physPageSize / pageSize
- }
-
- if base == 0 {
- // Try to acquire a base address.
- base, scav = h.pages.alloc(npages)
- if base == 0 {
- var ok bool
- growth, ok = h.grow(npages)
- if !ok {
- unlock(&h.lock)
- return nil
- }
- base, scav = h.pages.alloc(npages)
- if base == 0 {
- throw("grew heap, but no adequate free space found")
- }
- }
- }
- if s == nil {
- // We failed to get an mspan earlier, so grab
- // one now that we have the heap lock.
- s = h.allocMSpanLocked()
- }
-
- if needPhysPageAlign {
- allocBase, allocPages := base, npages
- base = alignUp(allocBase, physPageSize)
- npages -= physPageSize / pageSize
-
- // Return memory around the aligned allocation.
- spaceBefore := base - allocBase
- if spaceBefore > 0 {
- h.pages.free(allocBase, spaceBefore/pageSize, false)
- }
- spaceAfter := (allocPages-npages)*pageSize - spaceBefore
- if spaceAfter > 0 {
- h.pages.free(base+npages*pageSize, spaceAfter/pageSize, false)
- }
- }
-
- unlock(&h.lock)
-
- if growth > 0 {
- // We just caused a heap growth, so scavenge down what will soon be used.
- // By scavenging inline we deal with the failure to allocate out of
- // memory fragments by scavenging the memory fragments that are least
- // likely to be re-used.
- scavengeGoal := atomic.Load64(&h.scavengeGoal)
- if retained := heapRetained(); retained+uint64(growth) > scavengeGoal {
- // The scavenging algorithm requires the heap lock to be dropped so it
- // can acquire it only sparingly. This is a potentially expensive operation
- // so it frees up other goroutines to allocate in the meanwhile. In fact,
- // they can make use of the growth we just created.
- todo := growth
- if overage := uintptr(retained + uint64(growth) - scavengeGoal); todo > overage {
- todo = overage
- }
- h.pages.scavenge(todo)
- }
- }
-
-HaveSpan:
- // At this point, both s != nil and base != 0, and the heap
- // lock is no longer held. Initialize the span.
- s.init(base, npages)
- if h.allocNeedsZero(base, npages) {
- s.needzero = 1
- }
- nbytes := npages * pageSize
- if typ.manual() {
- s.manualFreeList = 0
- s.nelems = 0
- s.limit = s.base() + s.npages*pageSize
- s.state.set(mSpanManual)
- } else {
- // We must set span properties before the span is published anywhere
- // since we're not holding the heap lock.
- s.spanclass = spanclass
- if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
- s.elemsize = nbytes
- s.nelems = 1
- s.divMul = 0
- } else {
- s.elemsize = uintptr(class_to_size[sizeclass])
- s.nelems = nbytes / s.elemsize
- s.divMul = class_to_divmagic[sizeclass]
- }
-
- // Initialize mark and allocation structures.
- s.freeindex = 0
- s.allocCache = ^uint64(0) // all 1s indicating all free.
- s.gcmarkBits = newMarkBits(s.nelems)
- s.allocBits = newAllocBits(s.nelems)
-
- // It's safe to access h.sweepgen without the heap lock because it's
- // only ever updated with the world stopped and we run on the
- // systemstack which blocks a STW transition.
- atomic.Store(&s.sweepgen, h.sweepgen)
-
- // Now that the span is filled in, set its state. This
- // is a publication barrier for the other fields in
- // the span. While valid pointers into this span
- // should never be visible until the span is returned,
- // if the garbage collector finds an invalid pointer,
- // access to the span may race with initialization of
- // the span. We resolve this race by atomically
- // setting the state after the span is fully
- // initialized, and atomically checking the state in
- // any situation where a pointer is suspect.
- s.state.set(mSpanInUse)
- }
-
- // Commit and account for any scavenged memory that the span now owns.
- if scav != 0 {
- // sysUsed all the pages that are actually available
- // in the span since some of them might be scavenged.
- sysUsed(unsafe.Pointer(base), nbytes)
- atomic.Xadd64(&memstats.heap_released, -int64(scav))
- }
- // Update stats.
- if typ == spanAllocHeap {
- atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
- }
- if typ.manual() {
- // Manually managed memory doesn't count toward heap_sys.
- memstats.heap_sys.add(-int64(nbytes))
- }
- // Update consistent stats.
- stats := memstats.heapStats.acquire()
- atomic.Xaddint64(&stats.committed, int64(scav))
- atomic.Xaddint64(&stats.released, -int64(scav))
- switch typ {
- case spanAllocHeap:
- atomic.Xaddint64(&stats.inHeap, int64(nbytes))
- case spanAllocStack:
- atomic.Xaddint64(&stats.inStacks, int64(nbytes))
- case spanAllocPtrScalarBits:
- atomic.Xaddint64(&stats.inPtrScalarBits, int64(nbytes))
- case spanAllocWorkBuf:
- atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
- }
- memstats.heapStats.release()
-
- // Publish the span in various locations.
-
- // This is safe to call without the lock held because the slots
- // related to this span will only ever be read or modified by
- // this thread until pointers into the span are published (and
- // we execute a publication barrier at the end of this function
- // before that happens) or pageInUse is updated.
- h.setSpans(s.base(), npages, s)
-
- if !typ.manual() {
- // Mark in-use span in arena page bitmap.
- //
- // This publishes the span to the page sweeper, so
- // it's imperative that the span be completely initialized
- // prior to this line.
- arena, pageIdx, pageMask := pageIndexOf(s.base())
- atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
-
- // Update related page sweeper stats.
- h.pagesInUse.Add(int64(npages))
- }
-
- // Make sure the newly allocated span will be observed
- // by the GC before pointers into the span are published.
- publicationBarrier()
-
- return s
-}
-
-// Try to add at least npage pages of memory to the heap,
-// returning how much the heap grew by and whether it worked.
-//
-// h.lock must be held.
-func (h *mheap) grow(npage uintptr) (uintptr, bool) {
- assertLockHeld(&h.lock)
-
- // We must grow the heap in whole palloc chunks.
- // We call sysMap below but note that because we
- // round up to pallocChunkPages which is on the order
- // of MiB (generally >= to the huge page size) we
- // won't be calling it too much.
- ask := alignUp(npage, pallocChunkPages) * pageSize
-
- totalGrowth := uintptr(0)
- // This may overflow because ask could be very large
- // and is otherwise unrelated to h.curArena.base.
- end := h.curArena.base + ask
- nBase := alignUp(end, physPageSize)
- if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
- // Not enough room in the current arena. Allocate more
- // arena space. This may not be contiguous with the
- // current arena, so we have to request the full ask.
- av, asize := h.sysAlloc(ask)
- if av == nil {
- print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
- return 0, false
- }
-
- if uintptr(av) == h.curArena.end {
- // The new space is contiguous with the old
- // space, so just extend the current space.
- h.curArena.end = uintptr(av) + asize
- } else {
- // The new space is discontiguous. Track what
- // remains of the current space and switch to
- // the new space. This should be rare.
- if size := h.curArena.end - h.curArena.base; size != 0 {
- // Transition this space from Reserved to Prepared and mark it
- // as released since we'll be able to start using it after updating
- // the page allocator and releasing the lock at any time.
- sysMap(unsafe.Pointer(h.curArena.base), size, &memstats.heap_sys)
- // Update stats.
- atomic.Xadd64(&memstats.heap_released, int64(size))
- stats := memstats.heapStats.acquire()
- atomic.Xaddint64(&stats.released, int64(size))
- memstats.heapStats.release()
- // Update the page allocator's structures to make this
- // space ready for allocation.
- h.pages.grow(h.curArena.base, size)
- totalGrowth += size
- }
- // Switch to the new space.
- h.curArena.base = uintptr(av)
- h.curArena.end = uintptr(av) + asize
- }
-
- // Recalculate nBase.
- // We know this won't overflow, because sysAlloc returned
- // a valid region starting at h.curArena.base which is at
- // least ask bytes in size.
- nBase = alignUp(h.curArena.base+ask, physPageSize)
- }
-
- // Grow into the current arena.
- v := h.curArena.base
- h.curArena.base = nBase
-
- // Transition the space we're going to use from Reserved to Prepared.
- sysMap(unsafe.Pointer(v), nBase-v, &memstats.heap_sys)
-
- // The memory just allocated counts as both released
- // and idle, even though it's not yet backed by spans.
- //
- // The allocation is always aligned to the heap arena
- // size which is always > physPageSize, so its safe to
- // just add directly to heap_released.
- atomic.Xadd64(&memstats.heap_released, int64(nBase-v))
- stats := memstats.heapStats.acquire()
- atomic.Xaddint64(&stats.released, int64(nBase-v))
- memstats.heapStats.release()
-
- // Update the page allocator's structures to make this
- // space ready for allocation.
- h.pages.grow(v, nBase-v)
- totalGrowth += nBase - v
- return totalGrowth, true
-}
-
-// Free the span back into the heap.
-func (h *mheap) freeSpan(s *mspan) {
- systemstack(func() {
- lock(&h.lock)
- if msanenabled {
- // Tell msan that this entire span is no longer in use.
- base := unsafe.Pointer(s.base())
- bytes := s.npages << _PageShift
- msanfree(base, bytes)
- }
- if asanenabled {
- // Tell asan that this entire span is no longer in use.
- base := unsafe.Pointer(s.base())
- bytes := s.npages << _PageShift
- asanpoison(base, bytes)
- }
- h.freeSpanLocked(s, spanAllocHeap)
- unlock(&h.lock)
- })
-}
-
-// freeManual frees a manually-managed span returned by allocManual.
-// typ must be the same as the spanAllocType passed to the allocManual that
-// allocated s.
-//
-// This must only be called when gcphase == _GCoff. See mSpanState for
-// an explanation.
-//
-// freeManual must be called on the system stack because it acquires
-// the heap lock. See mheap for details.
-//
-//go:systemstack
-func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
- s.needzero = 1
- lock(&h.lock)
- h.freeSpanLocked(s, typ)
- unlock(&h.lock)
-}
-
-func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
- assertLockHeld(&h.lock)
-
- switch s.state.get() {
- case mSpanManual:
- if s.allocCount != 0 {
- throw("mheap.freeSpanLocked - invalid stack free")
- }
- case mSpanInUse:
- if s.allocCount != 0 || s.sweepgen != h.sweepgen {
- print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
- throw("mheap.freeSpanLocked - invalid free")
- }
- h.pagesInUse.Add(-int64(s.npages))
-
- // Clear in-use bit in arena page bitmap.
- arena, pageIdx, pageMask := pageIndexOf(s.base())
- atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
- default:
- throw("mheap.freeSpanLocked - invalid span state")
- }
-
- // Update stats.
- //
- // Mirrors the code in allocSpan.
- nbytes := s.npages * pageSize
- if typ == spanAllocHeap {
- atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
- }
- if typ.manual() {
- // Manually managed memory doesn't count toward heap_sys, so add it back.
- memstats.heap_sys.add(int64(nbytes))
- }
- // Update consistent stats.
- stats := memstats.heapStats.acquire()
- switch typ {
- case spanAllocHeap:
- atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
- case spanAllocStack:
- atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
- case spanAllocPtrScalarBits:
- atomic.Xaddint64(&stats.inPtrScalarBits, -int64(nbytes))
- case spanAllocWorkBuf:
- atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
- }
- memstats.heapStats.release()
-
- // Mark the space as free.
- h.pages.free(s.base(), s.npages, false)
-
- // Free the span structure. We no longer have a use for it.
- s.state.set(mSpanDead)
- h.freeMSpanLocked(s)
-}
-
-// scavengeAll acquires the heap lock (blocking any additional
-// manipulation of the page allocator) and iterates over the whole
-// heap, scavenging every free page available.
-func (h *mheap) scavengeAll() {
- // Disallow malloc or panic while holding the heap lock. We do
- // this here because this is a non-mallocgc entry-point to
- // the mheap API.
- gp := getg()
- gp.m.mallocing++
-
- lock(&h.lock)
- // Start a new scavenge generation so we have a chance to walk
- // over the whole heap.
- h.pages.scavengeStartGen()
- unlock(&h.lock)
-
- released := h.pages.scavenge(^uintptr(0))
-
- lock(&h.pages.scav.lock)
- gen := h.pages.scav.gen
- unlock(&h.pages.scav.lock)
-
- gp.m.mallocing--
-
- if debug.scavtrace > 0 {
- printScavTrace(gen, released, true)
- }
-}
-
-//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
-func runtime_debug_freeOSMemory() {
- GC()
- systemstack(func() { mheap_.scavengeAll() })
-}
-
-// Initialize a new span with the given start and npages.
-func (span *mspan) init(base uintptr, npages uintptr) {
- // span is *not* zeroed.
- span.next = nil
- span.prev = nil
- span.list = nil
- span.startAddr = base
- span.npages = npages
- span.allocCount = 0
- span.spanclass = 0
- span.elemsize = 0
- span.speciallock.key = 0
- span.specials = nil
- span.needzero = 0
- span.freeindex = 0
- span.allocBits = nil
- span.gcmarkBits = nil
- span.state.set(mSpanDead)
- lockInit(&span.speciallock, lockRankMspanSpecial)
-}
-
-func (span *mspan) inList() bool {
- return span.list != nil
-}
-
-// Initialize an empty doubly-linked list.
-func (list *mSpanList) init() {
- list.first = nil
- list.last = nil
-}
-
-func (list *mSpanList) remove(span *mspan) {
- if span.list != list {
- print("runtime: failed mSpanList.remove span.npages=", span.npages,
- " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
- throw("mSpanList.remove")
- }
- if list.first == span {
- list.first = span.next
- } else {
- span.prev.next = span.next
- }
- if list.last == span {
- list.last = span.prev
- } else {
- span.next.prev = span.prev
- }
- span.next = nil
- span.prev = nil
- span.list = nil
-}
-
-func (list *mSpanList) isEmpty() bool {
- return list.first == nil
-}
-
-func (list *mSpanList) insert(span *mspan) {
- if span.next != nil || span.prev != nil || span.list != nil {
- println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list)
- throw("mSpanList.insert")
- }
- span.next = list.first
- if list.first != nil {
- // The list contains at least one span; link it in.
- // The last span in the list doesn't change.
- list.first.prev = span
- } else {
- // The list contains no spans, so this is also the last span.
- list.last = span
- }
- list.first = span
- span.list = list
-}
-
-func (list *mSpanList) insertBack(span *mspan) {
- if span.next != nil || span.prev != nil || span.list != nil {
- println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list)
- throw("mSpanList.insertBack")
- }
- span.prev = list.last
- if list.last != nil {
- // The list contains at least one span.
- list.last.next = span
- } else {
- // The list contains no spans, so this is also the first span.
- list.first = span
- }
- list.last = span
- span.list = list
-}
-
-// takeAll removes all spans from other and inserts them at the front
-// of list.
-func (list *mSpanList) takeAll(other *mSpanList) {
- if other.isEmpty() {
- return
- }
-
- // Reparent everything in other to list.
- for s := other.first; s != nil; s = s.next {
- s.list = list
- }
-
- // Concatenate the lists.
- if list.isEmpty() {
- *list = *other
- } else {
- // Neither list is empty. Put other before list.
- other.last.next = list.first
- list.first.prev = other.last
- list.first = other.first
- }
-
- other.first, other.last = nil, nil
-}
-
-const (
- _KindSpecialFinalizer = 1
- _KindSpecialProfile = 2
- // _KindSpecialReachable is a special used for tracking
- // reachability during testing.
- _KindSpecialReachable = 3
- // Note: The finalizer special must be first because if we're freeing
- // an object, a finalizer special will cause the freeing operation
- // to abort, and we want to keep the other special records around
- // if that happens.
-)
-
-//go:notinheap
-type special struct {
- next *special // linked list in span
- offset uint16 // span offset of object
- kind byte // kind of special
-}
-
-// spanHasSpecials marks a span as having specials in the arena bitmap.
-func spanHasSpecials(s *mspan) {
- arenaPage := (s.base() / pageSize) % pagesPerArena
- ai := arenaIndex(s.base())
- ha := mheap_.arenas[ai.l1()][ai.l2()]
- atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8))
-}
-
-// spanHasNoSpecials marks a span as having no specials in the arena bitmap.
-func spanHasNoSpecials(s *mspan) {
- arenaPage := (s.base() / pageSize) % pagesPerArena
- ai := arenaIndex(s.base())
- ha := mheap_.arenas[ai.l1()][ai.l2()]
- atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8)))
-}
-
-// Adds the special record s to the list of special records for
-// the object p. All fields of s should be filled in except for
-// offset & next, which this routine will fill in.
-// Returns true if the special was successfully added, false otherwise.
-// (The add will fail only if a record with the same p and s->kind
-// already exists.)
-func addspecial(p unsafe.Pointer, s *special) bool {
- span := spanOfHeap(uintptr(p))
- if span == nil {
- throw("addspecial on invalid pointer")
- }
-
- // Ensure that the span is swept.
- // Sweeping accesses the specials list w/o locks, so we have
- // to synchronize with it. And it's just much safer.
- mp := acquirem()
- span.ensureSwept()
-
- offset := uintptr(p) - span.base()
- kind := s.kind
-
- lock(&span.speciallock)
-
- // Find splice point, check for existing record.
- t := &span.specials
- for {
- x := *t
- if x == nil {
- break
- }
- if offset == uintptr(x.offset) && kind == x.kind {
- unlock(&span.speciallock)
- releasem(mp)
- return false // already exists
- }
- if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
- break
- }
- t = &x.next
- }
-
- // Splice in record, fill in offset.
- s.offset = uint16(offset)
- s.next = *t
- *t = s
- spanHasSpecials(span)
- unlock(&span.speciallock)
- releasem(mp)
-
- return true
-}
-
-// Removes the Special record of the given kind for the object p.
-// Returns the record if the record existed, nil otherwise.
-// The caller must FixAlloc_Free the result.
-func removespecial(p unsafe.Pointer, kind uint8) *special {
- span := spanOfHeap(uintptr(p))
- if span == nil {
- throw("removespecial on invalid pointer")
- }
-
- // Ensure that the span is swept.
- // Sweeping accesses the specials list w/o locks, so we have
- // to synchronize with it. And it's just much safer.
- mp := acquirem()
- span.ensureSwept()
-
- offset := uintptr(p) - span.base()
-
- var result *special
- lock(&span.speciallock)
- t := &span.specials
- for {
- s := *t
- if s == nil {
- break
- }
- // This function is used for finalizers only, so we don't check for
- // "interior" specials (p must be exactly equal to s->offset).
- if offset == uintptr(s.offset) && kind == s.kind {
- *t = s.next
- result = s
- break
- }
- t = &s.next
- }
- if span.specials == nil {
- spanHasNoSpecials(span)
- }
- unlock(&span.speciallock)
- releasem(mp)
- return result
-}
-
-// The described object has a finalizer set for it.
-//
-// specialfinalizer is allocated from non-GC'd memory, so any heap
-// pointers must be specially handled.
-//
-//go:notinheap
-type specialfinalizer struct {
- special special
- fn *funcval // May be a heap pointer.
- nret uintptr
- fint *_type // May be a heap pointer, but always live.
- ot *ptrtype // May be a heap pointer, but always live.
-}
-
-// Adds a finalizer to the object p. Returns true if it succeeded.
-func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
- lock(&mheap_.speciallock)
- s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
- unlock(&mheap_.speciallock)
- s.special.kind = _KindSpecialFinalizer
- s.fn = f
- s.nret = nret
- s.fint = fint
- s.ot = ot
- if addspecial(p, &s.special) {
- // This is responsible for maintaining the same
- // GC-related invariants as markrootSpans in any
- // situation where it's possible that markrootSpans
- // has already run but mark termination hasn't yet.
- if gcphase != _GCoff {
- base, _, _ := findObject(uintptr(p), 0, 0)
- mp := acquirem()
- gcw := &mp.p.ptr().gcw
- // Mark everything reachable from the object
- // so it's retained for the finalizer.
- scanobject(base, gcw)
- // Mark the finalizer itself, since the
- // special isn't part of the GC'd heap.
- scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
- releasem(mp)
- }
- return true
- }
-
- // There was an old finalizer
- lock(&mheap_.speciallock)
- mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
- unlock(&mheap_.speciallock)
- return false
-}
-
-// Removes the finalizer (if any) from the object p.
-func removefinalizer(p unsafe.Pointer) {
- s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
- if s == nil {
- return // there wasn't a finalizer to remove
- }
- lock(&mheap_.speciallock)
- mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
- unlock(&mheap_.speciallock)
-}
-
-// The described object is being heap profiled.
-//
-//go:notinheap
-type specialprofile struct {
- special special
- b *bucket
-}
-
-// Set the heap profile bucket associated with addr to b.
-func setprofilebucket(p unsafe.Pointer, b *bucket) {
- lock(&mheap_.speciallock)
- s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
- unlock(&mheap_.speciallock)
- s.special.kind = _KindSpecialProfile
- s.b = b
- if !addspecial(p, &s.special) {
- throw("setprofilebucket: profile already set")
- }
-}
-
-// specialReachable tracks whether an object is reachable on the next
-// GC cycle. This is used by testing.
-type specialReachable struct {
- special special
- done bool
- reachable bool
-}
-
-// specialsIter helps iterate over specials lists.
-type specialsIter struct {
- pprev **special
- s *special
-}
-
-func newSpecialsIter(span *mspan) specialsIter {
- return specialsIter{&span.specials, span.specials}
-}
-
-func (i *specialsIter) valid() bool {
- return i.s != nil
-}
-
-func (i *specialsIter) next() {
- i.pprev = &i.s.next
- i.s = *i.pprev
-}
-
-// unlinkAndNext removes the current special from the list and moves
-// the iterator to the next special. It returns the unlinked special.
-func (i *specialsIter) unlinkAndNext() *special {
- cur := i.s
- i.s = cur.next
- *i.pprev = i.s
- return cur
-}
-
-// freeSpecial performs any cleanup on special s and deallocates it.
-// s must already be unlinked from the specials list.
-func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
- switch s.kind {
- case _KindSpecialFinalizer:
- sf := (*specialfinalizer)(unsafe.Pointer(s))
- queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
- lock(&mheap_.speciallock)
- mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
- unlock(&mheap_.speciallock)
- case _KindSpecialProfile:
- sp := (*specialprofile)(unsafe.Pointer(s))
- mProf_Free(sp.b, size)
- lock(&mheap_.speciallock)
- mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
- unlock(&mheap_.speciallock)
- case _KindSpecialReachable:
- sp := (*specialReachable)(unsafe.Pointer(s))
- sp.done = true
- // The creator frees these.
- default:
- throw("bad special kind")
- panic("not reached")
- }
-}
-
-// gcBits is an alloc/mark bitmap. This is always used as *gcBits.
-//
-//go:notinheap
-type gcBits uint8
-
-// bytep returns a pointer to the n'th byte of b.
-func (b *gcBits) bytep(n uintptr) *uint8 {
- return addb((*uint8)(b), n)
-}
-
-// bitp returns a pointer to the byte containing bit n and a mask for
-// selecting that bit from *bytep.
-func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
- return b.bytep(n / 8), 1 << (n % 8)
-}
-
-const gcBitsChunkBytes = uintptr(64 << 10)
-const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
-
-type gcBitsHeader struct {
- free uintptr // free is the index into bits of the next free byte.
- next uintptr // *gcBits triggers recursive type bug. (issue 14620)
-}
-
-//go:notinheap
-type gcBitsArena struct {
- // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
- free uintptr // free is the index into bits of the next free byte; read/write atomically
- next *gcBitsArena
- bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
-}
-
-var gcBitsArenas struct {
- lock mutex
- free *gcBitsArena
- next *gcBitsArena // Read atomically. Write atomically under lock.
- current *gcBitsArena
- previous *gcBitsArena
-}
-
-// tryAlloc allocates from b or returns nil if b does not have enough room.
-// This is safe to call concurrently.
-func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
- if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
- return nil
- }
- // Try to allocate from this block.
- end := atomic.Xadduintptr(&b.free, bytes)
- if end > uintptr(len(b.bits)) {
- return nil
- }
- // There was enough room.
- start := end - bytes
- return &b.bits[start]
-}
-
-// newMarkBits returns a pointer to 8 byte aligned bytes
-// to be used for a span's mark bits.
-func newMarkBits(nelems uintptr) *gcBits {
- blocksNeeded := uintptr((nelems + 63) / 64)
- bytesNeeded := blocksNeeded * 8
-
- // Try directly allocating from the current head arena.
- head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
- if p := head.tryAlloc(bytesNeeded); p != nil {
- return p
- }
-
- // There's not enough room in the head arena. We may need to
- // allocate a new arena.
- lock(&gcBitsArenas.lock)
- // Try the head arena again, since it may have changed. Now
- // that we hold the lock, the list head can't change, but its
- // free position still can.
- if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
- unlock(&gcBitsArenas.lock)
- return p
- }
-
- // Allocate a new arena. This may temporarily drop the lock.
- fresh := newArenaMayUnlock()
- // If newArenaMayUnlock dropped the lock, another thread may
- // have put a fresh arena on the "next" list. Try allocating
- // from next again.
- if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
- // Put fresh back on the free list.
- // TODO: Mark it "already zeroed"
- fresh.next = gcBitsArenas.free
- gcBitsArenas.free = fresh
- unlock(&gcBitsArenas.lock)
- return p
- }
-
- // Allocate from the fresh arena. We haven't linked it in yet, so
- // this cannot race and is guaranteed to succeed.
- p := fresh.tryAlloc(bytesNeeded)
- if p == nil {
- throw("markBits overflow")
- }
-
- // Add the fresh arena to the "next" list.
- fresh.next = gcBitsArenas.next
- atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
-
- unlock(&gcBitsArenas.lock)
- return p
-}
-
-// newAllocBits returns a pointer to 8 byte aligned bytes
-// to be used for this span's alloc bits.
-// newAllocBits is used to provide newly initialized spans
-// allocation bits. For spans not being initialized the
-// mark bits are repurposed as allocation bits when
-// the span is swept.
-func newAllocBits(nelems uintptr) *gcBits {
- return newMarkBits(nelems)
-}
-
-// nextMarkBitArenaEpoch establishes a new epoch for the arenas
-// holding the mark bits. The arenas are named relative to the
-// current GC cycle which is demarcated by the call to finishweep_m.
-//
-// All current spans have been swept.
-// During that sweep each span allocated room for its gcmarkBits in
-// gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
-// where the GC will mark objects and after each span is swept these bits
-// will be used to allocate objects.
-// gcBitsArenas.current becomes gcBitsArenas.previous where the span's
-// gcAllocBits live until all the spans have been swept during this GC cycle.
-// The span's sweep extinguishes all the references to gcBitsArenas.previous
-// by pointing gcAllocBits into the gcBitsArenas.current.
-// The gcBitsArenas.previous is released to the gcBitsArenas.free list.
-func nextMarkBitArenaEpoch() {
- lock(&gcBitsArenas.lock)
- if gcBitsArenas.previous != nil {
- if gcBitsArenas.free == nil {
- gcBitsArenas.free = gcBitsArenas.previous
- } else {
- // Find end of previous arenas.
- last := gcBitsArenas.previous
- for last = gcBitsArenas.previous; last.next != nil; last = last.next {
- }
- last.next = gcBitsArenas.free
- gcBitsArenas.free = gcBitsArenas.previous
- }
- }
- gcBitsArenas.previous = gcBitsArenas.current
- gcBitsArenas.current = gcBitsArenas.next
- atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
- unlock(&gcBitsArenas.lock)
-}
-
-// newArenaMayUnlock allocates and zeroes a gcBits arena.
-// The caller must hold gcBitsArena.lock. This may temporarily release it.
-func newArenaMayUnlock() *gcBitsArena {
- var result *gcBitsArena
- if gcBitsArenas.free == nil {
- unlock(&gcBitsArenas.lock)
- result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
- if result == nil {
- throw("runtime: cannot allocate memory")
- }
- lock(&gcBitsArenas.lock)
- } else {
- result = gcBitsArenas.free
- gcBitsArenas.free = gcBitsArenas.free.next
- memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
- }
- result.next = nil
- // If result.bits is not 8 byte aligned adjust index so
- // that &result.bits[result.free] is 8 byte aligned.
- if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
- result.free = 0
- } else {
- result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
- }
- return result
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mpagealloc.go b/contrib/go/_std_1.18/src/runtime/mpagealloc.go
deleted file mode 100644
index 2725e3b7c7..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mpagealloc.go
+++ /dev/null
@@ -1,1026 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Page allocator.
-//
-// The page allocator manages mapped pages (defined by pageSize, NOT
-// physPageSize) for allocation and re-use. It is embedded into mheap.
-//
-// Pages are managed using a bitmap that is sharded into chunks.
-// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
-// process's address space. Chunks are managed in a sparse-array-style structure
-// similar to mheap.arenas, since the bitmap may be large on some systems.
-//
-// The bitmap is efficiently searched by using a radix tree in combination
-// with fast bit-wise intrinsics. Allocation is performed using an address-ordered
-// first-fit approach.
-//
-// Each entry in the radix tree is a summary that describes three properties of
-// a particular region of the address space: the number of contiguous free pages
-// at the start and end of the region it represents, and the maximum number of
-// contiguous free pages found anywhere in that region.
-//
-// Each level of the radix tree is stored as one contiguous array, which represents
-// a different granularity of subdivision of the processes' address space. Thus, this
-// radix tree is actually implicit in these large arrays, as opposed to having explicit
-// dynamically-allocated pointer-based node structures. Naturally, these arrays may be
-// quite large for system with large address spaces, so in these cases they are mapped
-// into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk.
-//
-// The root level (referred to as L0 and index 0 in pageAlloc.summary) has each
-// summary represent the largest section of address space (16 GiB on 64-bit systems),
-// with each subsequent level representing successively smaller subsections until we
-// reach the finest granularity at the leaves, a chunk.
-//
-// More specifically, each summary in each level (except for leaf summaries)
-// represents some number of entries in the following level. For example, each
-// summary in the root level may represent a 16 GiB region of address space,
-// and in the next level there could be 8 corresponding entries which represent 2
-// GiB subsections of that 16 GiB region, each of which could correspond to 8
-// entries in the next level which each represent 256 MiB regions, and so on.
-//
-// Thus, this design only scales to heaps so large, but can always be extended to
-// larger heaps by simply adding levels to the radix tree, which mostly costs
-// additional virtual address space. The choice of managing large arrays also means
-// that a large amount of virtual address space may be reserved by the runtime.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-const (
- // The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
- // in the bitmap at once.
- pallocChunkPages = 1 << logPallocChunkPages
- pallocChunkBytes = pallocChunkPages * pageSize
- logPallocChunkPages = 9
- logPallocChunkBytes = logPallocChunkPages + pageShift
-
- // The number of radix bits for each level.
- //
- // The value of 3 is chosen such that the block of summaries we need to scan at
- // each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is
- // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree
- // levels perfectly into the 21-bit pallocBits summary field at the root level.
- //
- // The following equation explains how each of the constants relate:
- // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits
- //
- // summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.
- summaryLevelBits = 3
- summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
-
- // pallocChunksL2Bits is the number of bits of the chunk index number
- // covered by the second level of the chunks map.
- //
- // See (*pageAlloc).chunks for more details. Update the documentation
- // there should this change.
- pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
- pallocChunksL1Shift = pallocChunksL2Bits
-)
-
-// Maximum searchAddr value, which indicates that the heap has no free space.
-//
-// We alias maxOffAddr just to make it clear that this is the maximum address
-// for the page allocator's search space. See maxOffAddr for details.
-var maxSearchAddr = maxOffAddr
-
-// Global chunk index.
-//
-// Represents an index into the leaf level of the radix tree.
-// Similar to arenaIndex, except instead of arenas, it divides the address
-// space into chunks.
-type chunkIdx uint
-
-// chunkIndex returns the global index of the palloc chunk containing the
-// pointer p.
-func chunkIndex(p uintptr) chunkIdx {
- return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes)
-}
-
-// chunkIndex returns the base address of the palloc chunk at index ci.
-func chunkBase(ci chunkIdx) uintptr {
- return uintptr(ci)*pallocChunkBytes + arenaBaseOffset
-}
-
-// chunkPageIndex computes the index of the page that contains p,
-// relative to the chunk which contains p.
-func chunkPageIndex(p uintptr) uint {
- return uint(p % pallocChunkBytes / pageSize)
-}
-
-// l1 returns the index into the first level of (*pageAlloc).chunks.
-func (i chunkIdx) l1() uint {
- if pallocChunksL1Bits == 0 {
- // Let the compiler optimize this away if there's no
- // L1 map.
- return 0
- } else {
- return uint(i) >> pallocChunksL1Shift
- }
-}
-
-// l2 returns the index into the second level of (*pageAlloc).chunks.
-func (i chunkIdx) l2() uint {
- if pallocChunksL1Bits == 0 {
- return uint(i)
- } else {
- return uint(i) & (1<<pallocChunksL2Bits - 1)
- }
-}
-
-// offAddrToLevelIndex converts an address in the offset address space
-// to the index into summary[level] containing addr.
-func offAddrToLevelIndex(level int, addr offAddr) int {
- return int((addr.a - arenaBaseOffset) >> levelShift[level])
-}
-
-// levelIndexToOffAddr converts an index into summary[level] into
-// the corresponding address in the offset address space.
-func levelIndexToOffAddr(level, idx int) offAddr {
- return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset}
-}
-
-// addrsToSummaryRange converts base and limit pointers into a range
-// of entries for the given summary level.
-//
-// The returned range is inclusive on the lower bound and exclusive on
-// the upper bound.
-func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) {
- // This is slightly more nuanced than just a shift for the exclusive
- // upper-bound. Note that the exclusive upper bound may be within a
- // summary at this level, meaning if we just do the obvious computation
- // hi will end up being an inclusive upper bound. Unfortunately, just
- // adding 1 to that is too broad since we might be on the very edge
- // of a summary's max page count boundary for this level
- // (1 << levelLogPages[level]). So, make limit an inclusive upper bound
- // then shift, then add 1, so we get an exclusive upper bound at the end.
- lo = int((base - arenaBaseOffset) >> levelShift[level])
- hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1
- return
-}
-
-// blockAlignSummaryRange aligns indices into the given level to that
-// level's block width (1 << levelBits[level]). It assumes lo is inclusive
-// and hi is exclusive, and so aligns them down and up respectively.
-func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
- e := uintptr(1) << levelBits[level]
- return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
-}
-
-type pageAlloc struct {
- // Radix tree of summaries.
- //
- // Each slice's cap represents the whole memory reservation.
- // Each slice's len reflects the allocator's maximum known
- // mapped heap address for that level.
- //
- // The backing store of each summary level is reserved in init
- // and may or may not be committed in grow (small address spaces
- // may commit all the memory in init).
- //
- // The purpose of keeping len <= cap is to enforce bounds checks
- // on the top end of the slice so that instead of an unknown
- // runtime segmentation fault, we get a much friendlier out-of-bounds
- // error.
- //
- // To iterate over a summary level, use inUse to determine which ranges
- // are currently available. Otherwise one might try to access
- // memory which is only Reserved which may result in a hard fault.
- //
- // We may still get segmentation faults < len since some of that
- // memory may not be committed yet.
- summary [summaryLevels][]pallocSum
-
- // chunks is a slice of bitmap chunks.
- //
- // The total size of chunks is quite large on most 64-bit platforms
- // (O(GiB) or more) if flattened, so rather than making one large mapping
- // (which has problems on some platforms, even when PROT_NONE) we use a
- // two-level sparse array approach similar to the arena index in mheap.
- //
- // To find the chunk containing a memory address `a`, do:
- // chunkOf(chunkIndex(a))
- //
- // Below is a table describing the configuration for chunks for various
- // heapAddrBits supported by the runtime.
- //
- // heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
- // ------------------------------------------------
- // 32 | 0 | 10 | 128 KiB
- // 33 (iOS) | 0 | 11 | 256 KiB
- // 48 | 13 | 13 | 1 MiB
- //
- // There's no reason to use the L1 part of chunks on 32-bit, the
- // address space is small so the L2 is small. For platforms with a
- // 48-bit address space, we pick the L1 such that the L2 is 1 MiB
- // in size, which is a good balance between low granularity without
- // making the impact on BSS too high (note the L1 is stored directly
- // in pageAlloc).
- //
- // To iterate over the bitmap, use inUse to determine which ranges
- // are currently available. Otherwise one might iterate over unused
- // ranges.
- //
- // Protected by mheapLock.
- //
- // TODO(mknyszek): Consider changing the definition of the bitmap
- // such that 1 means free and 0 means in-use so that summaries and
- // the bitmaps align better on zero-values.
- chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
-
- // The address to start an allocation search with. It must never
- // point to any memory that is not contained in inUse, i.e.
- // inUse.contains(searchAddr.addr()) must always be true. The one
- // exception to this rule is that it may take on the value of
- // maxOffAddr to indicate that the heap is exhausted.
- //
- // We guarantee that all valid heap addresses below this value
- // are allocated and not worth searching.
- searchAddr offAddr
-
- // start and end represent the chunk indices
- // which pageAlloc knows about. It assumes
- // chunks in the range [start, end) are
- // currently ready to use.
- start, end chunkIdx
-
- // inUse is a slice of ranges of address space which are
- // known by the page allocator to be currently in-use (passed
- // to grow).
- //
- // This field is currently unused on 32-bit architectures but
- // is harmless to track. We care much more about having a
- // contiguous heap in these cases and take additional measures
- // to ensure that, so in nearly all cases this should have just
- // 1 element.
- //
- // All access is protected by the mheapLock.
- inUse addrRanges
-
- // scav stores the scavenger state.
- scav struct {
- lock mutex
-
- // inUse is a slice of ranges of address space which have not
- // yet been looked at by the scavenger.
- //
- // Protected by lock.
- inUse addrRanges
-
- // gen is the scavenge generation number.
- //
- // Protected by lock.
- gen uint32
-
- // reservationBytes is how large of a reservation should be made
- // in bytes of address space for each scavenge iteration.
- //
- // Protected by lock.
- reservationBytes uintptr
-
- // released is the amount of memory released this generation.
- //
- // Updated atomically.
- released uintptr
-
- // scavLWM is the lowest (offset) address that the scavenger reached this
- // scavenge generation.
- //
- // Protected by lock.
- scavLWM offAddr
-
- // freeHWM is the highest (offset) address of a page that was freed to
- // the page allocator this scavenge generation.
- //
- // Protected by mheapLock.
- freeHWM offAddr
- }
-
- // mheap_.lock. This level of indirection makes it possible
- // to test pageAlloc indepedently of the runtime allocator.
- mheapLock *mutex
-
- // sysStat is the runtime memstat to update when new system
- // memory is committed by the pageAlloc for allocation metadata.
- sysStat *sysMemStat
-
- // Whether or not this struct is being used in tests.
- test bool
-}
-
-func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) {
- if levelLogPages[0] > logMaxPackedValue {
- // We can't represent 1<<levelLogPages[0] pages, the maximum number
- // of pages we need to represent at the root level, in a summary, which
- // is a big problem. Throw.
- print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n")
- print("runtime: summary max pages = ", maxPackedValue, "\n")
- throw("root level max pages doesn't fit in summary")
- }
- p.sysStat = sysStat
-
- // Initialize p.inUse.
- p.inUse.init(sysStat)
-
- // System-dependent initialization.
- p.sysInit()
-
- // Start with the searchAddr in a state indicating there's no free memory.
- p.searchAddr = maxSearchAddr
-
- // Set the mheapLock.
- p.mheapLock = mheapLock
-
- // Initialize scavenge tracking state.
- p.scav.scavLWM = maxSearchAddr
-}
-
-// tryChunkOf returns the bitmap data for the given chunk.
-//
-// Returns nil if the chunk data has not been mapped.
-func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
- l2 := p.chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return &l2[ci.l2()]
-}
-
-// chunkOf returns the chunk at the given chunk index.
-//
-// The chunk index must be valid or this method may throw.
-func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
- return &p.chunks[ci.l1()][ci.l2()]
-}
-
-// grow sets up the metadata for the address range [base, base+size).
-// It may allocate metadata, in which case *p.sysStat will be updated.
-//
-// p.mheapLock must be held.
-func (p *pageAlloc) grow(base, size uintptr) {
- assertLockHeld(p.mheapLock)
-
- // Round up to chunks, since we can't deal with increments smaller
- // than chunks. Also, sysGrow expects aligned values.
- limit := alignUp(base+size, pallocChunkBytes)
- base = alignDown(base, pallocChunkBytes)
-
- // Grow the summary levels in a system-dependent manner.
- // We just update a bunch of additional metadata here.
- p.sysGrow(base, limit)
-
- // Update p.start and p.end.
- // If no growth happened yet, start == 0. This is generally
- // safe since the zero page is unmapped.
- firstGrowth := p.start == 0
- start, end := chunkIndex(base), chunkIndex(limit)
- if firstGrowth || start < p.start {
- p.start = start
- }
- if end > p.end {
- p.end = end
- }
- // Note that [base, limit) will never overlap with any existing
- // range inUse because grow only ever adds never-used memory
- // regions to the page allocator.
- p.inUse.add(makeAddrRange(base, limit))
-
- // A grow operation is a lot like a free operation, so if our
- // chunk ends up below p.searchAddr, update p.searchAddr to the
- // new address, just like in free.
- if b := (offAddr{base}); b.lessThan(p.searchAddr) {
- p.searchAddr = b
- }
-
- // Add entries into chunks, which is sparse, if needed. Then,
- // initialize the bitmap.
- //
- // Newly-grown memory is always considered scavenged.
- // Set all the bits in the scavenged bitmaps high.
- for c := chunkIndex(base); c < chunkIndex(limit); c++ {
- if p.chunks[c.l1()] == nil {
- // Create the necessary l2 entry.
- //
- // Store it atomically to avoid races with readers which
- // don't acquire the heap lock.
- r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat)
- if r == nil {
- throw("pageAlloc: out of memory")
- }
- atomic.StorepNoWB(unsafe.Pointer(&p.chunks[c.l1()]), r)
- }
- p.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
- }
-
- // Update summaries accordingly. The grow acts like a free, so
- // we need to ensure this newly-free memory is visible in the
- // summaries.
- p.update(base, size/pageSize, true, false)
-}
-
-// update updates heap metadata. It must be called each time the bitmap
-// is updated.
-//
-// If contig is true, update does some optimizations assuming that there was
-// a contiguous allocation or free between addr and addr+npages. alloc indicates
-// whether the operation performed was an allocation or a free.
-//
-// p.mheapLock must be held.
-func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
- assertLockHeld(p.mheapLock)
-
- // base, limit, start, and end are inclusive.
- limit := base + npages*pageSize - 1
- sc, ec := chunkIndex(base), chunkIndex(limit)
-
- // Handle updating the lowest level first.
- if sc == ec {
- // Fast path: the allocation doesn't span more than one chunk,
- // so update this one and if the summary didn't change, return.
- x := p.summary[len(p.summary)-1][sc]
- y := p.chunkOf(sc).summarize()
- if x == y {
- return
- }
- p.summary[len(p.summary)-1][sc] = y
- } else if contig {
- // Slow contiguous path: the allocation spans more than one chunk
- // and at least one summary is guaranteed to change.
- summary := p.summary[len(p.summary)-1]
-
- // Update the summary for chunk sc.
- summary[sc] = p.chunkOf(sc).summarize()
-
- // Update the summaries for chunks in between, which are
- // either totally allocated or freed.
- whole := p.summary[len(p.summary)-1][sc+1 : ec]
- if alloc {
- // Should optimize into a memclr.
- for i := range whole {
- whole[i] = 0
- }
- } else {
- for i := range whole {
- whole[i] = freeChunkSum
- }
- }
-
- // Update the summary for chunk ec.
- summary[ec] = p.chunkOf(ec).summarize()
- } else {
- // Slow general path: the allocation spans more than one chunk
- // and at least one summary is guaranteed to change.
- //
- // We can't assume a contiguous allocation happened, so walk over
- // every chunk in the range and manually recompute the summary.
- summary := p.summary[len(p.summary)-1]
- for c := sc; c <= ec; c++ {
- summary[c] = p.chunkOf(c).summarize()
- }
- }
-
- // Walk up the radix tree and update the summaries appropriately.
- changed := true
- for l := len(p.summary) - 2; l >= 0 && changed; l-- {
- // Update summaries at level l from summaries at level l+1.
- changed = false
-
- // "Constants" for the previous level which we
- // need to compute the summary from that level.
- logEntriesPerBlock := levelBits[l+1]
- logMaxPages := levelLogPages[l+1]
-
- // lo and hi describe all the parts of the level we need to look at.
- lo, hi := addrsToSummaryRange(l, base, limit+1)
-
- // Iterate over each block, updating the corresponding summary in the less-granular level.
- for i := lo; i < hi; i++ {
- children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
- sum := mergeSummaries(children, logMaxPages)
- old := p.summary[l][i]
- if old != sum {
- changed = true
- p.summary[l][i] = sum
- }
- }
- }
-}
-
-// allocRange marks the range of memory [base, base+npages*pageSize) as
-// allocated. It also updates the summaries to reflect the newly-updated
-// bitmap.
-//
-// Returns the amount of scavenged memory in bytes present in the
-// allocated range.
-//
-// p.mheapLock must be held.
-func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
- assertLockHeld(p.mheapLock)
-
- limit := base + npages*pageSize - 1
- sc, ec := chunkIndex(base), chunkIndex(limit)
- si, ei := chunkPageIndex(base), chunkPageIndex(limit)
-
- scav := uint(0)
- if sc == ec {
- // The range doesn't cross any chunk boundaries.
- chunk := p.chunkOf(sc)
- scav += chunk.scavenged.popcntRange(si, ei+1-si)
- chunk.allocRange(si, ei+1-si)
- } else {
- // The range crosses at least one chunk boundary.
- chunk := p.chunkOf(sc)
- scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
- chunk.allocRange(si, pallocChunkPages-si)
- for c := sc + 1; c < ec; c++ {
- chunk := p.chunkOf(c)
- scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
- chunk.allocAll()
- }
- chunk = p.chunkOf(ec)
- scav += chunk.scavenged.popcntRange(0, ei+1)
- chunk.allocRange(0, ei+1)
- }
- p.update(base, npages, true, true)
- return uintptr(scav) * pageSize
-}
-
-// findMappedAddr returns the smallest mapped offAddr that is
-// >= addr. That is, if addr refers to mapped memory, then it is
-// returned. If addr is higher than any mapped region, then
-// it returns maxOffAddr.
-//
-// p.mheapLock must be held.
-func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
- assertLockHeld(p.mheapLock)
-
- // If we're not in a test, validate first by checking mheap_.arenas.
- // This is a fast path which is only safe to use outside of testing.
- ai := arenaIndex(addr.addr())
- if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
- vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr())
- if ok {
- return offAddr{vAddr}
- } else {
- // The candidate search address is greater than any
- // known address, which means we definitely have no
- // free memory left.
- return maxOffAddr
- }
- }
- return addr
-}
-
-// find searches for the first (address-ordered) contiguous free region of
-// npages in size and returns a base address for that region.
-//
-// It uses p.searchAddr to prune its search and assumes that no palloc chunks
-// below chunkIndex(p.searchAddr) contain any free memory at all.
-//
-// find also computes and returns a candidate p.searchAddr, which may or
-// may not prune more of the address space than p.searchAddr already does.
-// This candidate is always a valid p.searchAddr.
-//
-// find represents the slow path and the full radix tree search.
-//
-// Returns a base address of 0 on failure, in which case the candidate
-// searchAddr returned is invalid and must be ignored.
-//
-// p.mheapLock must be held.
-func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
- assertLockHeld(p.mheapLock)
-
- // Search algorithm.
- //
- // This algorithm walks each level l of the radix tree from the root level
- // to the leaf level. It iterates over at most 1 << levelBits[l] of entries
- // in a given level in the radix tree, and uses the summary information to
- // find either:
- // 1) That a given subtree contains a large enough contiguous region, at
- // which point it continues iterating on the next level, or
- // 2) That there are enough contiguous boundary-crossing bits to satisfy
- // the allocation, at which point it knows exactly where to start
- // allocating from.
- //
- // i tracks the index into the current level l's structure for the
- // contiguous 1 << levelBits[l] entries we're actually interested in.
- //
- // NOTE: Technically this search could allocate a region which crosses
- // the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is
- // a discontinuity. However, the only way this could happen is if the
- // page at the zero address is mapped, and this is impossible on
- // every system we support where arenaBaseOffset != 0. So, the
- // discontinuity is already encoded in the fact that the OS will never
- // map the zero page for us, and this function doesn't try to handle
- // this case in any way.
-
- // i is the beginning of the block of entries we're searching at the
- // current level.
- i := 0
-
- // firstFree is the region of address space that we are certain to
- // find the first free page in the heap. base and bound are the inclusive
- // bounds of this window, and both are addresses in the linearized, contiguous
- // view of the address space (with arenaBaseOffset pre-added). At each level,
- // this window is narrowed as we find the memory region containing the
- // first free page of memory. To begin with, the range reflects the
- // full process address space.
- //
- // firstFree is updated by calling foundFree each time free space in the
- // heap is discovered.
- //
- // At the end of the search, base.addr() is the best new
- // searchAddr we could deduce in this search.
- firstFree := struct {
- base, bound offAddr
- }{
- base: minOffAddr,
- bound: maxOffAddr,
- }
- // foundFree takes the given address range [addr, addr+size) and
- // updates firstFree if it is a narrower range. The input range must
- // either be fully contained within firstFree or not overlap with it
- // at all.
- //
- // This way, we'll record the first summary we find with any free
- // pages on the root level and narrow that down if we descend into
- // that summary. But as soon as we need to iterate beyond that summary
- // in a level to find a large enough range, we'll stop narrowing.
- foundFree := func(addr offAddr, size uintptr) {
- if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
- // This range fits within the current firstFree window, so narrow
- // down the firstFree window to the base and bound of this range.
- firstFree.base = addr
- firstFree.bound = addr.add(size - 1)
- } else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
- // This range only partially overlaps with the firstFree range,
- // so throw.
- print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
- print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
- throw("range partially overlaps")
- }
- }
-
- // lastSum is the summary which we saw on the previous level that made us
- // move on to the next level. Used to print additional information in the
- // case of a catastrophic failure.
- // lastSumIdx is that summary's index in the previous level.
- lastSum := packPallocSum(0, 0, 0)
- lastSumIdx := -1
-
-nextLevel:
- for l := 0; l < len(p.summary); l++ {
- // For the root level, entriesPerBlock is the whole level.
- entriesPerBlock := 1 << levelBits[l]
- logMaxPages := levelLogPages[l]
-
- // We've moved into a new level, so let's update i to our new
- // starting index. This is a no-op for level 0.
- i <<= levelBits[l]
-
- // Slice out the block of entries we care about.
- entries := p.summary[l][i : i+entriesPerBlock]
-
- // Determine j0, the first index we should start iterating from.
- // The searchAddr may help us eliminate iterations if we followed the
- // searchAddr on the previous level or we're on the root leve, in which
- // case the searchAddr should be the same as i after levelShift.
- j0 := 0
- if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
- j0 = searchIdx & (entriesPerBlock - 1)
- }
-
- // Run over the level entries looking for
- // a contiguous run of at least npages either
- // within an entry or across entries.
- //
- // base contains the page index (relative to
- // the first entry's first page) of the currently
- // considered run of consecutive pages.
- //
- // size contains the size of the currently considered
- // run of consecutive pages.
- var base, size uint
- for j := j0; j < len(entries); j++ {
- sum := entries[j]
- if sum == 0 {
- // A full entry means we broke any streak and
- // that we should skip it altogether.
- size = 0
- continue
- }
-
- // We've encountered a non-zero summary which means
- // free memory, so update firstFree.
- foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
-
- s := sum.start()
- if size+s >= uint(npages) {
- // If size == 0 we don't have a run yet,
- // which means base isn't valid. So, set
- // base to the first page in this block.
- if size == 0 {
- base = uint(j) << logMaxPages
- }
- // We hit npages; we're done!
- size += s
- break
- }
- if sum.max() >= uint(npages) {
- // The entry itself contains npages contiguous
- // free pages, so continue on the next level
- // to find that run.
- i += j
- lastSumIdx = i
- lastSum = sum
- continue nextLevel
- }
- if size == 0 || s < 1<<logMaxPages {
- // We either don't have a current run started, or this entry
- // isn't totally free (meaning we can't continue the current
- // one), so try to begin a new run by setting size and base
- // based on sum.end.
- size = sum.end()
- base = uint(j+1)<<logMaxPages - size
- continue
- }
- // The entry is completely free, so continue the run.
- size += 1 << logMaxPages
- }
- if size >= uint(npages) {
- // We found a sufficiently large run of free pages straddling
- // some boundary, so compute the address and return it.
- addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
- return addr, p.findMappedAddr(firstFree.base)
- }
- if l == 0 {
- // We're at level zero, so that means we've exhausted our search.
- return 0, maxSearchAddr
- }
-
- // We're not at level zero, and we exhausted the level we were looking in.
- // This means that either our calculations were wrong or the level above
- // lied to us. In either case, dump some useful state and throw.
- print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
- print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
- print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n")
- print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
- for j := 0; j < len(entries); j++ {
- sum := entries[j]
- print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
- }
- throw("bad summary data")
- }
-
- // Since we've gotten to this point, that means we haven't found a
- // sufficiently-sized free region straddling some boundary (chunk or larger).
- // This means the last summary we inspected must have had a large enough "max"
- // value, so look inside the chunk to find a suitable run.
- //
- // After iterating over all levels, i must contain a chunk index which
- // is what the final level represents.
- ci := chunkIdx(i)
- j, searchIdx := p.chunkOf(ci).find(npages, 0)
- if j == ^uint(0) {
- // We couldn't find any space in this chunk despite the summaries telling
- // us it should be there. There's likely a bug, so dump some state and throw.
- sum := p.summary[len(p.summary)-1][i]
- print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
- print("runtime: npages = ", npages, "\n")
- throw("bad summary data")
- }
-
- // Compute the address at which the free space starts.
- addr := chunkBase(ci) + uintptr(j)*pageSize
-
- // Since we actually searched the chunk, we may have
- // found an even narrower free window.
- searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
- foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
- return addr, p.findMappedAddr(firstFree.base)
-}
-
-// alloc allocates npages worth of memory from the page heap, returning the base
-// address for the allocation and the amount of scavenged memory in bytes
-// contained in the region [base address, base address + npages*pageSize).
-//
-// Returns a 0 base address on failure, in which case other returned values
-// should be ignored.
-//
-// p.mheapLock must be held.
-//
-// Must run on the system stack because p.mheapLock must be held.
-//
-//go:systemstack
-func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
- assertLockHeld(p.mheapLock)
-
- // If the searchAddr refers to a region which has a higher address than
- // any known chunk, then we know we're out of memory.
- if chunkIndex(p.searchAddr.addr()) >= p.end {
- return 0, 0
- }
-
- // If npages has a chance of fitting in the chunk where the searchAddr is,
- // search it directly.
- searchAddr := minOffAddr
- if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) {
- // npages is guaranteed to be no greater than pallocChunkPages here.
- i := chunkIndex(p.searchAddr.addr())
- if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) {
- j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr()))
- if j == ^uint(0) {
- print("runtime: max = ", max, ", npages = ", npages, "\n")
- print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n")
- throw("bad summary data")
- }
- addr = chunkBase(i) + uintptr(j)*pageSize
- searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
- goto Found
- }
- }
- // We failed to use a searchAddr for one reason or another, so try
- // the slow path.
- addr, searchAddr = p.find(npages)
- if addr == 0 {
- if npages == 1 {
- // We failed to find a single free page, the smallest unit
- // of allocation. This means we know the heap is completely
- // exhausted. Otherwise, the heap still might have free
- // space in it, just not enough contiguous space to
- // accommodate npages.
- p.searchAddr = maxSearchAddr
- }
- return 0, 0
- }
-Found:
- // Go ahead and actually mark the bits now that we have an address.
- scav = p.allocRange(addr, npages)
-
- // If we found a higher searchAddr, we know that all the
- // heap memory before that searchAddr in an offset address space is
- // allocated, so bump p.searchAddr up to the new one.
- if p.searchAddr.lessThan(searchAddr) {
- p.searchAddr = searchAddr
- }
- return addr, scav
-}
-
-// free returns npages worth of memory starting at base back to the page heap.
-//
-// p.mheapLock must be held.
-//
-// Must run on the system stack because p.mheapLock must be held.
-//
-//go:systemstack
-func (p *pageAlloc) free(base, npages uintptr, scavenged bool) {
- assertLockHeld(p.mheapLock)
-
- // If we're freeing pages below the p.searchAddr, update searchAddr.
- if b := (offAddr{base}); b.lessThan(p.searchAddr) {
- p.searchAddr = b
- }
- limit := base + npages*pageSize - 1
- if !scavenged {
- // Update the free high watermark for the scavenger.
- if offLimit := (offAddr{limit}); p.scav.freeHWM.lessThan(offLimit) {
- p.scav.freeHWM = offLimit
- }
- }
- if npages == 1 {
- // Fast path: we're clearing a single bit, and we know exactly
- // where it is, so mark it directly.
- i := chunkIndex(base)
- p.chunkOf(i).free1(chunkPageIndex(base))
- } else {
- // Slow path: we're clearing more bits so we may need to iterate.
- sc, ec := chunkIndex(base), chunkIndex(limit)
- si, ei := chunkPageIndex(base), chunkPageIndex(limit)
-
- if sc == ec {
- // The range doesn't cross any chunk boundaries.
- p.chunkOf(sc).free(si, ei+1-si)
- } else {
- // The range crosses at least one chunk boundary.
- p.chunkOf(sc).free(si, pallocChunkPages-si)
- for c := sc + 1; c < ec; c++ {
- p.chunkOf(c).freeAll()
- }
- p.chunkOf(ec).free(0, ei+1)
- }
- }
- p.update(base, npages, true, false)
-}
-
-const (
- pallocSumBytes = unsafe.Sizeof(pallocSum(0))
-
- // maxPackedValue is the maximum value that any of the three fields in
- // the pallocSum may take on.
- maxPackedValue = 1 << logMaxPackedValue
- logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits
-
- freeChunkSum = pallocSum(uint64(pallocChunkPages) |
- uint64(pallocChunkPages<<logMaxPackedValue) |
- uint64(pallocChunkPages<<(2*logMaxPackedValue)))
-)
-
-// pallocSum is a packed summary type which packs three numbers: start, max,
-// and end into a single 8-byte value. Each of these values are a summary of
-// a bitmap and are thus counts, each of which may have a maximum value of
-// 2^21 - 1, or all three may be equal to 2^21. The latter case is represented
-// by just setting the 64th bit.
-type pallocSum uint64
-
-// packPallocSum takes a start, max, and end value and produces a pallocSum.
-func packPallocSum(start, max, end uint) pallocSum {
- if max == maxPackedValue {
- return pallocSum(uint64(1 << 63))
- }
- return pallocSum((uint64(start) & (maxPackedValue - 1)) |
- ((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) |
- ((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue)))
-}
-
-// start extracts the start value from a packed sum.
-func (p pallocSum) start() uint {
- if uint64(p)&uint64(1<<63) != 0 {
- return maxPackedValue
- }
- return uint(uint64(p) & (maxPackedValue - 1))
-}
-
-// max extracts the max value from a packed sum.
-func (p pallocSum) max() uint {
- if uint64(p)&uint64(1<<63) != 0 {
- return maxPackedValue
- }
- return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1))
-}
-
-// end extracts the end value from a packed sum.
-func (p pallocSum) end() uint {
- if uint64(p)&uint64(1<<63) != 0 {
- return maxPackedValue
- }
- return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
-}
-
-// unpack unpacks all three values from the summary.
-func (p pallocSum) unpack() (uint, uint, uint) {
- if uint64(p)&uint64(1<<63) != 0 {
- return maxPackedValue, maxPackedValue, maxPackedValue
- }
- return uint(uint64(p) & (maxPackedValue - 1)),
- uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)),
- uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
-}
-
-// mergeSummaries merges consecutive summaries which may each represent at
-// most 1 << logMaxPagesPerSum pages each together into one.
-func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum {
- // Merge the summaries in sums into one.
- //
- // We do this by keeping a running summary representing the merged
- // summaries of sums[:i] in start, max, and end.
- start, max, end := sums[0].unpack()
- for i := 1; i < len(sums); i++ {
- // Merge in sums[i].
- si, mi, ei := sums[i].unpack()
-
- // Merge in sums[i].start only if the running summary is
- // completely free, otherwise this summary's start
- // plays no role in the combined sum.
- if start == uint(i)<<logMaxPagesPerSum {
- start += si
- }
-
- // Recompute the max value of the running sum by looking
- // across the boundary between the running sum and sums[i]
- // and at the max sums[i], taking the greatest of those two
- // and the max of the running sum.
- if end+si > max {
- max = end + si
- }
- if mi > max {
- max = mi
- }
-
- // Merge in end by checking if this new summary is totally
- // free. If it is, then we want to extend the running sum's
- // end by the new summary. If not, then we have some alloc'd
- // pages in there and we just want to take the end value in
- // sums[i].
- if ei == 1<<logMaxPagesPerSum {
- end += 1 << logMaxPagesPerSum
- } else {
- end = ei
- }
- }
- return packPallocSum(start, max, end)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mpagealloc_64bit.go b/contrib/go/_std_1.18/src/runtime/mpagealloc_64bit.go
deleted file mode 100644
index 1bacfbe0fa..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mpagealloc_64bit.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x
-
-package runtime
-
-import "unsafe"
-
-const (
- // The number of levels in the radix tree.
- summaryLevels = 5
-
- // Constants for testing.
- pageAlloc32Bit = 0
- pageAlloc64Bit = 1
-
- // Number of bits needed to represent all indices into the L1 of the
- // chunks map.
- //
- // See (*pageAlloc).chunks for more details. Update the documentation
- // there should this number change.
- pallocChunksL1Bits = 13
-)
-
-// levelBits is the number of bits in the radix for a given level in the super summary
-// structure.
-//
-// The sum of all the entries of levelBits should equal heapAddrBits.
-var levelBits = [summaryLevels]uint{
- summaryL0Bits,
- summaryLevelBits,
- summaryLevelBits,
- summaryLevelBits,
- summaryLevelBits,
-}
-
-// levelShift is the number of bits to shift to acquire the radix for a given level
-// in the super summary structure.
-//
-// With levelShift, one can compute the index of the summary at level l related to a
-// pointer p by doing:
-// p >> levelShift[l]
-var levelShift = [summaryLevels]uint{
- heapAddrBits - summaryL0Bits,
- heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
- heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
- heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
- heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
-}
-
-// levelLogPages is log2 the maximum number of runtime pages in the address space
-// a summary in the given level represents.
-//
-// The leaf level always represents exactly log2 of 1 chunk's worth of pages.
-var levelLogPages = [summaryLevels]uint{
- logPallocChunkPages + 4*summaryLevelBits,
- logPallocChunkPages + 3*summaryLevelBits,
- logPallocChunkPages + 2*summaryLevelBits,
- logPallocChunkPages + 1*summaryLevelBits,
- logPallocChunkPages,
-}
-
-// sysInit performs architecture-dependent initialization of fields
-// in pageAlloc. pageAlloc should be uninitialized except for sysStat
-// if any runtime statistic should be updated.
-func (p *pageAlloc) sysInit() {
- // Reserve memory for each level. This will get mapped in
- // as R/W by setArenas.
- for l, shift := range levelShift {
- entries := 1 << (heapAddrBits - shift)
-
- // Reserve b bytes of memory anywhere in the address space.
- b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
- r := sysReserve(nil, b)
- if r == nil {
- throw("failed to reserve page summary memory")
- }
-
- // Put this reservation into a slice.
- sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
- p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
- }
-}
-
-// sysGrow performs architecture-dependent operations on heap
-// growth for the page allocator, such as mapping in new memory
-// for summaries. It also updates the length of the slices in
-// [.summary.
-//
-// base is the base of the newly-added heap memory and limit is
-// the first address past the end of the newly-added heap memory.
-// Both must be aligned to pallocChunkBytes.
-//
-// The caller must update p.start and p.end after calling sysGrow.
-func (p *pageAlloc) sysGrow(base, limit uintptr) {
- if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
- print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
- throw("sysGrow bounds not aligned to pallocChunkBytes")
- }
-
- // addrRangeToSummaryRange converts a range of addresses into a range
- // of summary indices which must be mapped to support those addresses
- // in the summary range.
- addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
- sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
- return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
- }
-
- // summaryRangeToSumAddrRange converts a range of indices in any
- // level of p.summary into page-aligned addresses which cover that
- // range of indices.
- summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
- baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
- limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
- base := unsafe.Pointer(&p.summary[level][0])
- return addrRange{
- offAddr{uintptr(add(base, baseOffset))},
- offAddr{uintptr(add(base, limitOffset))},
- }
- }
-
- // addrRangeToSumAddrRange is a convienience function that converts
- // an address range r to the address range of the given summary level
- // that stores the summaries for r.
- addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
- sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
- return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
- }
-
- // Find the first inUse index which is strictly greater than base.
- //
- // Because this function will never be asked remap the same memory
- // twice, this index is effectively the index at which we would insert
- // this new growth, and base will never overlap/be contained within
- // any existing range.
- //
- // This will be used to look at what memory in the summary array is already
- // mapped before and after this new range.
- inUseIndex := p.inUse.findSucc(base)
-
- // Walk up the radix tree and map summaries in as needed.
- for l := range p.summary {
- // Figure out what part of the summary array this new address space needs.
- needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
-
- // Update the summary slices with a new upper-bound. This ensures
- // we get tight bounds checks on at least the top bound.
- //
- // We must do this regardless of whether we map new memory.
- if needIdxLimit > len(p.summary[l]) {
- p.summary[l] = p.summary[l][:needIdxLimit]
- }
-
- // Compute the needed address range in the summary array for level l.
- need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
-
- // Prune need down to what needs to be newly mapped. Some parts of it may
- // already be mapped by what inUse describes due to page alignment requirements
- // for mapping. prune's invariants are guaranteed by the fact that this
- // function will never be asked to remap the same memory twice.
- if inUseIndex > 0 {
- need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
- }
- if inUseIndex < len(p.inUse.ranges) {
- need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
- }
- // It's possible that after our pruning above, there's nothing new to map.
- if need.size() == 0 {
- continue
- }
-
- // Map and commit need.
- sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
- sysUsed(unsafe.Pointer(need.base.addr()), need.size())
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mpagecache.go b/contrib/go/_std_1.18/src/runtime/mpagecache.go
deleted file mode 100644
index 7206e2dbdb..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mpagecache.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/sys"
- "unsafe"
-)
-
-const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
-
-// pageCache represents a per-p cache of pages the allocator can
-// allocate from without a lock. More specifically, it represents
-// a pageCachePages*pageSize chunk of memory with 0 or more free
-// pages in it.
-type pageCache struct {
- base uintptr // base address of the chunk
- cache uint64 // 64-bit bitmap representing free pages (1 means free)
- scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
-}
-
-// empty returns true if the pageCache has any free pages, and false
-// otherwise.
-func (c *pageCache) empty() bool {
- return c.cache == 0
-}
-
-// alloc allocates npages from the page cache and is the main entry
-// point for allocation.
-//
-// Returns a base address and the amount of scavenged memory in the
-// allocated region in bytes.
-//
-// Returns a base address of zero on failure, in which case the
-// amount of scavenged memory should be ignored.
-func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
- if c.cache == 0 {
- return 0, 0
- }
- if npages == 1 {
- i := uintptr(sys.TrailingZeros64(c.cache))
- scav := (c.scav >> i) & 1
- c.cache &^= 1 << i // set bit to mark in-use
- c.scav &^= 1 << i // clear bit to mark unscavenged
- return c.base + i*pageSize, uintptr(scav) * pageSize
- }
- return c.allocN(npages)
-}
-
-// allocN is a helper which attempts to allocate npages worth of pages
-// from the cache. It represents the general case for allocating from
-// the page cache.
-//
-// Returns a base address and the amount of scavenged memory in the
-// allocated region in bytes.
-func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
- i := findBitRange64(c.cache, uint(npages))
- if i >= 64 {
- return 0, 0
- }
- mask := ((uint64(1) << npages) - 1) << i
- scav := sys.OnesCount64(c.scav & mask)
- c.cache &^= mask // mark in-use bits
- c.scav &^= mask // clear scavenged bits
- return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
-}
-
-// flush empties out unallocated free pages in the given cache
-// into s. Then, it clears the cache, such that empty returns
-// true.
-//
-// p.mheapLock must be held.
-//
-// Must run on the system stack because p.mheapLock must be held.
-//
-//go:systemstack
-func (c *pageCache) flush(p *pageAlloc) {
- assertLockHeld(p.mheapLock)
-
- if c.empty() {
- return
- }
- ci := chunkIndex(c.base)
- pi := chunkPageIndex(c.base)
-
- // This method is called very infrequently, so just do the
- // slower, safer thing by iterating over each bit individually.
- for i := uint(0); i < 64; i++ {
- if c.cache&(1<<i) != 0 {
- p.chunkOf(ci).free1(pi + i)
- }
- if c.scav&(1<<i) != 0 {
- p.chunkOf(ci).scavenged.setRange(pi+i, 1)
- }
- }
- // Since this is a lot like a free, we need to make sure
- // we update the searchAddr just like free does.
- if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
- p.searchAddr = b
- }
- p.update(c.base, pageCachePages, false, false)
- *c = pageCache{}
-}
-
-// allocToCache acquires a pageCachePages-aligned chunk of free pages which
-// may not be contiguous, and returns a pageCache structure which owns the
-// chunk.
-//
-// p.mheapLock must be held.
-//
-// Must run on the system stack because p.mheapLock must be held.
-//
-//go:systemstack
-func (p *pageAlloc) allocToCache() pageCache {
- assertLockHeld(p.mheapLock)
-
- // If the searchAddr refers to a region which has a higher address than
- // any known chunk, then we know we're out of memory.
- if chunkIndex(p.searchAddr.addr()) >= p.end {
- return pageCache{}
- }
- c := pageCache{}
- ci := chunkIndex(p.searchAddr.addr()) // chunk index
- var chunk *pallocData
- if p.summary[len(p.summary)-1][ci] != 0 {
- // Fast path: there's free pages at or near the searchAddr address.
- chunk = p.chunkOf(ci)
- j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
- if j == ^uint(0) {
- throw("bad summary data")
- }
- c = pageCache{
- base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
- cache: ^chunk.pages64(j),
- scav: chunk.scavenged.block64(j),
- }
- } else {
- // Slow path: the searchAddr address had nothing there, so go find
- // the first free page the slow way.
- addr, _ := p.find(1)
- if addr == 0 {
- // We failed to find adequate free space, so mark the searchAddr as OoM
- // and return an empty pageCache.
- p.searchAddr = maxSearchAddr
- return pageCache{}
- }
- ci := chunkIndex(addr)
- chunk = p.chunkOf(ci)
- c = pageCache{
- base: alignDown(addr, 64*pageSize),
- cache: ^chunk.pages64(chunkPageIndex(addr)),
- scav: chunk.scavenged.block64(chunkPageIndex(addr)),
- }
- }
-
- // Set the page bits as allocated and clear the scavenged bits, but
- // be careful to only set and clear the relevant bits.
- cpi := chunkPageIndex(c.base)
- chunk.allocPages64(cpi, c.cache)
- chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
-
- // Update as an allocation, but note that it's not contiguous.
- p.update(c.base, pageCachePages, false, true)
-
- // Set the search address to the last page represented by the cache.
- // Since all of the pages in this block are going to the cache, and we
- // searched for the first free page, we can confidently start at the
- // next page.
- //
- // However, p.searchAddr is not allowed to point into unmapped heap memory
- // unless it is maxSearchAddr, so make it the last page as opposed to
- // the page after.
- p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
- return c
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mprof.go b/contrib/go/_std_1.18/src/runtime/mprof.go
deleted file mode 100644
index 569c17f0a7..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mprof.go
+++ /dev/null
@@ -1,937 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Malloc profiling.
-// Patterned after tcmalloc's algorithms; shorter code.
-
-package runtime
-
-import (
- "internal/abi"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// NOTE(rsc): Everything here could use cas if contention became an issue.
-var proflock mutex
-
-// All memory allocations are local and do not escape outside of the profiler.
-// The profiler is forbidden from referring to garbage-collected memory.
-
-const (
- // profile types
- memProfile bucketType = 1 + iota
- blockProfile
- mutexProfile
-
- // size of bucket hash table
- buckHashSize = 179999
-
- // max depth of stack to record in bucket
- maxStack = 32
-)
-
-type bucketType int
-
-// A bucket holds per-call-stack profiling information.
-// The representation is a bit sleazy, inherited from C.
-// This struct defines the bucket header. It is followed in
-// memory by the stack words and then the actual record
-// data, either a memRecord or a blockRecord.
-//
-// Per-call-stack profiling information.
-// Lookup by hashing call stack into a linked-list hash table.
-//
-// No heap pointers.
-//
-//go:notinheap
-type bucket struct {
- next *bucket
- allnext *bucket
- typ bucketType // memBucket or blockBucket (includes mutexProfile)
- hash uintptr
- size uintptr
- nstk uintptr
-}
-
-// A memRecord is the bucket data for a bucket of type memProfile,
-// part of the memory profile.
-type memRecord struct {
- // The following complex 3-stage scheme of stats accumulation
- // is required to obtain a consistent picture of mallocs and frees
- // for some point in time.
- // The problem is that mallocs come in real time, while frees
- // come only after a GC during concurrent sweeping. So if we would
- // naively count them, we would get a skew toward mallocs.
- //
- // Hence, we delay information to get consistent snapshots as
- // of mark termination. Allocations count toward the next mark
- // termination's snapshot, while sweep frees count toward the
- // previous mark termination's snapshot:
- //
- // MT MT MT MT
- // .·| .·| .·| .·|
- // .·˙ | .·˙ | .·˙ | .·˙ |
- // .·˙ | .·˙ | .·˙ | .·˙ |
- // .·˙ |.·˙ |.·˙ |.·˙ |
- //
- // alloc → ▲ ← free
- // ┠┅┅┅┅┅┅┅┅┅┅┅P
- // C+2 → C+1 → C
- //
- // alloc → ▲ ← free
- // ┠┅┅┅┅┅┅┅┅┅┅┅P
- // C+2 → C+1 → C
- //
- // Since we can't publish a consistent snapshot until all of
- // the sweep frees are accounted for, we wait until the next
- // mark termination ("MT" above) to publish the previous mark
- // termination's snapshot ("P" above). To do this, allocation
- // and free events are accounted to *future* heap profile
- // cycles ("C+n" above) and we only publish a cycle once all
- // of the events from that cycle must be done. Specifically:
- //
- // Mallocs are accounted to cycle C+2.
- // Explicit frees are accounted to cycle C+2.
- // GC frees (done during sweeping) are accounted to cycle C+1.
- //
- // After mark termination, we increment the global heap
- // profile cycle counter and accumulate the stats from cycle C
- // into the active profile.
-
- // active is the currently published profile. A profiling
- // cycle can be accumulated into active once its complete.
- active memRecordCycle
-
- // future records the profile events we're counting for cycles
- // that have not yet been published. This is ring buffer
- // indexed by the global heap profile cycle C and stores
- // cycles C, C+1, and C+2. Unlike active, these counts are
- // only for a single cycle; they are not cumulative across
- // cycles.
- //
- // We store cycle C here because there's a window between when
- // C becomes the active cycle and when we've flushed it to
- // active.
- future [3]memRecordCycle
-}
-
-// memRecordCycle
-type memRecordCycle struct {
- allocs, frees uintptr
- alloc_bytes, free_bytes uintptr
-}
-
-// add accumulates b into a. It does not zero b.
-func (a *memRecordCycle) add(b *memRecordCycle) {
- a.allocs += b.allocs
- a.frees += b.frees
- a.alloc_bytes += b.alloc_bytes
- a.free_bytes += b.free_bytes
-}
-
-// A blockRecord is the bucket data for a bucket of type blockProfile,
-// which is used in blocking and mutex profiles.
-type blockRecord struct {
- count float64
- cycles int64
-}
-
-var (
- mbuckets *bucket // memory profile buckets
- bbuckets *bucket // blocking profile buckets
- xbuckets *bucket // mutex profile buckets
- buckhash *[buckHashSize]*bucket
- bucketmem uintptr
-
- mProf struct {
- // All fields in mProf are protected by proflock.
-
- // cycle is the global heap profile cycle. This wraps
- // at mProfCycleWrap.
- cycle uint32
- // flushed indicates that future[cycle] in all buckets
- // has been flushed to the active profile.
- flushed bool
- }
-)
-
-const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
-
-// newBucket allocates a bucket with the given type and number of stack entries.
-func newBucket(typ bucketType, nstk int) *bucket {
- size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
- switch typ {
- default:
- throw("invalid profile bucket type")
- case memProfile:
- size += unsafe.Sizeof(memRecord{})
- case blockProfile, mutexProfile:
- size += unsafe.Sizeof(blockRecord{})
- }
-
- b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
- bucketmem += size
- b.typ = typ
- b.nstk = uintptr(nstk)
- return b
-}
-
-// stk returns the slice in b holding the stack.
-func (b *bucket) stk() []uintptr {
- stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
- return stk[:b.nstk:b.nstk]
-}
-
-// mp returns the memRecord associated with the memProfile bucket b.
-func (b *bucket) mp() *memRecord {
- if b.typ != memProfile {
- throw("bad use of bucket.mp")
- }
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
- return (*memRecord)(data)
-}
-
-// bp returns the blockRecord associated with the blockProfile bucket b.
-func (b *bucket) bp() *blockRecord {
- if b.typ != blockProfile && b.typ != mutexProfile {
- throw("bad use of bucket.bp")
- }
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
- return (*blockRecord)(data)
-}
-
-// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
- if buckhash == nil {
- buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
- if buckhash == nil {
- throw("runtime: cannot allocate memory")
- }
- }
-
- // Hash stack.
- var h uintptr
- for _, pc := range stk {
- h += pc
- h += h << 10
- h ^= h >> 6
- }
- // hash in size
- h += size
- h += h << 10
- h ^= h >> 6
- // finalize
- h += h << 3
- h ^= h >> 11
-
- i := int(h % buckHashSize)
- for b := buckhash[i]; b != nil; b = b.next {
- if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
- return b
- }
- }
-
- if !alloc {
- return nil
- }
-
- // Create new bucket.
- b := newBucket(typ, len(stk))
- copy(b.stk(), stk)
- b.hash = h
- b.size = size
- b.next = buckhash[i]
- buckhash[i] = b
- if typ == memProfile {
- b.allnext = mbuckets
- mbuckets = b
- } else if typ == mutexProfile {
- b.allnext = xbuckets
- xbuckets = b
- } else {
- b.allnext = bbuckets
- bbuckets = b
- }
- return b
-}
-
-func eqslice(x, y []uintptr) bool {
- if len(x) != len(y) {
- return false
- }
- for i, xi := range x {
- if xi != y[i] {
- return false
- }
- }
- return true
-}
-
-// mProf_NextCycle publishes the next heap profile cycle and creates a
-// fresh heap profile cycle. This operation is fast and can be done
-// during STW. The caller must call mProf_Flush before calling
-// mProf_NextCycle again.
-//
-// This is called by mark termination during STW so allocations and
-// frees after the world is started again count towards a new heap
-// profiling cycle.
-func mProf_NextCycle() {
- lock(&proflock)
- // We explicitly wrap mProf.cycle rather than depending on
- // uint wraparound because the memRecord.future ring does not
- // itself wrap at a power of two.
- mProf.cycle = (mProf.cycle + 1) % mProfCycleWrap
- mProf.flushed = false
- unlock(&proflock)
-}
-
-// mProf_Flush flushes the events from the current heap profiling
-// cycle into the active profile. After this it is safe to start a new
-// heap profiling cycle with mProf_NextCycle.
-//
-// This is called by GC after mark termination starts the world. In
-// contrast with mProf_NextCycle, this is somewhat expensive, but safe
-// to do concurrently.
-func mProf_Flush() {
- lock(&proflock)
- if !mProf.flushed {
- mProf_FlushLocked()
- mProf.flushed = true
- }
- unlock(&proflock)
-}
-
-func mProf_FlushLocked() {
- c := mProf.cycle
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
-
- // Flush cycle C into the published profile and clear
- // it for reuse.
- mpc := &mp.future[c%uint32(len(mp.future))]
- mp.active.add(mpc)
- *mpc = memRecordCycle{}
- }
-}
-
-// mProf_PostSweep records that all sweep frees for this GC cycle have
-// completed. This has the effect of publishing the heap profile
-// snapshot as of the last mark termination without advancing the heap
-// profile cycle.
-func mProf_PostSweep() {
- lock(&proflock)
- // Flush cycle C+1 to the active profile so everything as of
- // the last mark termination becomes visible. *Don't* advance
- // the cycle, since we're still accumulating allocs in cycle
- // C+2, which have to become C+1 in the next mark termination
- // and so on.
- c := mProf.cycle
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- mpc := &mp.future[(c+1)%uint32(len(mp.future))]
- mp.active.add(mpc)
- *mpc = memRecordCycle{}
- }
- unlock(&proflock)
-}
-
-// Called by malloc to record a profiled block.
-func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]uintptr
- nstk := callers(4, stk[:])
- lock(&proflock)
- b := stkbucket(memProfile, size, stk[:nstk], true)
- c := mProf.cycle
- mp := b.mp()
- mpc := &mp.future[(c+2)%uint32(len(mp.future))]
- mpc.allocs++
- mpc.alloc_bytes += size
- unlock(&proflock)
-
- // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
- // This reduces potential contention and chances of deadlocks.
- // Since the object must be alive during call to mProf_Malloc,
- // it's fine to do this non-atomically.
- systemstack(func() {
- setprofilebucket(p, b)
- })
-}
-
-// Called when freeing a profiled block.
-func mProf_Free(b *bucket, size uintptr) {
- lock(&proflock)
- c := mProf.cycle
- mp := b.mp()
- mpc := &mp.future[(c+1)%uint32(len(mp.future))]
- mpc.frees++
- mpc.free_bytes += size
- unlock(&proflock)
-}
-
-var blockprofilerate uint64 // in CPU ticks
-
-// SetBlockProfileRate controls the fraction of goroutine blocking events
-// that are reported in the blocking profile. The profiler aims to sample
-// an average of one blocking event per rate nanoseconds spent blocked.
-//
-// To include every blocking event in the profile, pass rate = 1.
-// To turn off profiling entirely, pass rate <= 0.
-func SetBlockProfileRate(rate int) {
- var r int64
- if rate <= 0 {
- r = 0 // disable profiling
- } else if rate == 1 {
- r = 1 // profile everything
- } else {
- // convert ns to cycles, use float64 to prevent overflow during multiplication
- r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
- if r == 0 {
- r = 1
- }
- }
-
- atomic.Store64(&blockprofilerate, uint64(r))
-}
-
-func blockevent(cycles int64, skip int) {
- if cycles <= 0 {
- cycles = 1
- }
-
- rate := int64(atomic.Load64(&blockprofilerate))
- if blocksampled(cycles, rate) {
- saveblockevent(cycles, rate, skip+1, blockProfile)
- }
-}
-
-// blocksampled returns true for all events where cycles >= rate. Shorter
-// events have a cycles/rate random chance of returning true.
-func blocksampled(cycles, rate int64) bool {
- if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
- return false
- }
- return true
-}
-
-func saveblockevent(cycles, rate int64, skip int, which bucketType) {
- gp := getg()
- var nstk int
- var stk [maxStack]uintptr
- if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, stk[:])
- } else {
- nstk = gcallers(gp.m.curg, skip, stk[:])
- }
- lock(&proflock)
- b := stkbucket(which, 0, stk[:nstk], true)
-
- if which == blockProfile && cycles < rate {
- // Remove sampling bias, see discussion on http://golang.org/cl/299991.
- b.bp().count += float64(rate) / float64(cycles)
- b.bp().cycles += rate
- } else {
- b.bp().count++
- b.bp().cycles += cycles
- }
- unlock(&proflock)
-}
-
-var mutexprofilerate uint64 // fraction sampled
-
-// SetMutexProfileFraction controls the fraction of mutex contention events
-// that are reported in the mutex profile. On average 1/rate events are
-// reported. The previous rate is returned.
-//
-// To turn off profiling entirely, pass rate 0.
-// To just read the current rate, pass rate < 0.
-// (For n>1 the details of sampling may change.)
-func SetMutexProfileFraction(rate int) int {
- if rate < 0 {
- return int(mutexprofilerate)
- }
- old := mutexprofilerate
- atomic.Store64(&mutexprofilerate, uint64(rate))
- return int(old)
-}
-
-//go:linkname mutexevent sync.event
-func mutexevent(cycles int64, skip int) {
- if cycles < 0 {
- cycles = 0
- }
- rate := int64(atomic.Load64(&mutexprofilerate))
- // TODO(pjw): measure impact of always calling fastrand vs using something
- // like malloc.go:nextSample()
- if rate > 0 && int64(fastrand())%rate == 0 {
- saveblockevent(cycles, rate, skip+1, mutexProfile)
- }
-}
-
-// Go interface to profile data.
-
-// A StackRecord describes a single execution stack.
-type StackRecord struct {
- Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
-}
-
-// Stack returns the stack trace associated with the record,
-// a prefix of r.Stack0.
-func (r *StackRecord) Stack() []uintptr {
- for i, v := range r.Stack0 {
- if v == 0 {
- return r.Stack0[0:i]
- }
- }
- return r.Stack0[0:]
-}
-
-// MemProfileRate controls the fraction of memory allocations
-// that are recorded and reported in the memory profile.
-// The profiler aims to sample an average of
-// one allocation per MemProfileRate bytes allocated.
-//
-// To include every allocated block in the profile, set MemProfileRate to 1.
-// To turn off profiling entirely, set MemProfileRate to 0.
-//
-// The tools that process the memory profiles assume that the
-// profile rate is constant across the lifetime of the program
-// and equal to the current value. Programs that change the
-// memory profiling rate should do so just once, as early as
-// possible in the execution of the program (for example,
-// at the beginning of main).
-var MemProfileRate int = defaultMemProfileRate(512 * 1024)
-
-// defaultMemProfileRate returns 0 if disableMemoryProfiling is set.
-// It exists primarily for the godoc rendering of MemProfileRate
-// above.
-func defaultMemProfileRate(v int) int {
- if disableMemoryProfiling {
- return 0
- }
- return v
-}
-
-// disableMemoryProfiling is set by the linker if runtime.MemProfile
-// is not used and the link type guarantees nobody else could use it
-// elsewhere.
-var disableMemoryProfiling bool
-
-// A MemProfileRecord describes the live objects allocated
-// by a particular call sequence (stack trace).
-type MemProfileRecord struct {
- AllocBytes, FreeBytes int64 // number of bytes allocated, freed
- AllocObjects, FreeObjects int64 // number of objects allocated, freed
- Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
-}
-
-// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
-func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
-
-// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
-func (r *MemProfileRecord) InUseObjects() int64 {
- return r.AllocObjects - r.FreeObjects
-}
-
-// Stack returns the stack trace associated with the record,
-// a prefix of r.Stack0.
-func (r *MemProfileRecord) Stack() []uintptr {
- for i, v := range r.Stack0 {
- if v == 0 {
- return r.Stack0[0:i]
- }
- }
- return r.Stack0[0:]
-}
-
-// MemProfile returns a profile of memory allocated and freed per allocation
-// site.
-//
-// MemProfile returns n, the number of records in the current memory profile.
-// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
-// If len(p) < n, MemProfile does not change p and returns n, false.
-//
-// If inuseZero is true, the profile includes allocation records
-// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
-// These are sites where memory was allocated, but it has all
-// been released back to the runtime.
-//
-// The returned profile may be up to two garbage collection cycles old.
-// This is to avoid skewing the profile toward allocations; because
-// allocations happen in real time but frees are delayed until the garbage
-// collector performs sweeping, the profile only accounts for allocations
-// that have had a chance to be freed by the garbage collector.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.memprofile flag instead
-// of calling MemProfile directly.
-func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
- lock(&proflock)
- // If we're between mProf_NextCycle and mProf_Flush, take care
- // of flushing to the active profile so we only have to look
- // at the active profile below.
- mProf_FlushLocked()
- clear := true
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- n++
- }
- if mp.active.allocs != 0 || mp.active.frees != 0 {
- clear = false
- }
- }
- if clear {
- // Absolutely no data, suggesting that a garbage collection
- // has not yet happened. In order to allow profiling when
- // garbage collection is disabled from the beginning of execution,
- // accumulate all of the cycles, and recount buckets.
- n = 0
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- for c := range mp.future {
- mp.active.add(&mp.future[c])
- mp.future[c] = memRecordCycle{}
- }
- if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- n++
- }
- }
- }
- if n <= len(p) {
- ok = true
- idx := 0
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- record(&p[idx], b)
- idx++
- }
- }
- }
- unlock(&proflock)
- return
-}
-
-// Write b's data to r.
-func record(r *MemProfileRecord, b *bucket) {
- mp := b.mp()
- r.AllocBytes = int64(mp.active.alloc_bytes)
- r.FreeBytes = int64(mp.active.free_bytes)
- r.AllocObjects = int64(mp.active.allocs)
- r.FreeObjects = int64(mp.active.frees)
- if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
- }
- if msanenabled {
- msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
- }
- if asanenabled {
- asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
- }
- copy(r.Stack0[:], b.stk())
- for i := int(b.nstk); i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
-}
-
-func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
- lock(&proflock)
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
- }
- unlock(&proflock)
-}
-
-// BlockProfileRecord describes blocking events originated
-// at a particular call sequence (stack trace).
-type BlockProfileRecord struct {
- Count int64
- Cycles int64
- StackRecord
-}
-
-// BlockProfile returns n, the number of records in the current blocking profile.
-// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
-// If len(p) < n, BlockProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.blockprofile flag instead
-// of calling BlockProfile directly.
-func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
- lock(&proflock)
- for b := bbuckets; b != nil; b = b.allnext {
- n++
- }
- if n <= len(p) {
- ok = true
- for b := bbuckets; b != nil; b = b.allnext {
- bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- // Prevent callers from having to worry about division by zero errors.
- // See discussion on http://golang.org/cl/299991.
- if r.Count == 0 {
- r.Count = 1
- }
- r.Cycles = bp.cycles
- if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
- }
- if msanenabled {
- msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
- }
- if asanenabled {
- asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
- }
- i := copy(r.Stack0[:], b.stk())
- for ; i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
- p = p[1:]
- }
- }
- unlock(&proflock)
- return
-}
-
-// MutexProfile returns n, the number of records in the current mutex profile.
-// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
-// Otherwise, MutexProfile does not change p, and returns n, false.
-//
-// Most clients should use the runtime/pprof package
-// instead of calling MutexProfile directly.
-func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
- lock(&proflock)
- for b := xbuckets; b != nil; b = b.allnext {
- n++
- }
- if n <= len(p) {
- ok = true
- for b := xbuckets; b != nil; b = b.allnext {
- bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = bp.cycles
- i := copy(r.Stack0[:], b.stk())
- for ; i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
- p = p[1:]
- }
- }
- unlock(&proflock)
- return
-}
-
-// ThreadCreateProfile returns n, the number of records in the thread creation profile.
-// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
-// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package instead
-// of calling ThreadCreateProfile directly.
-func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
- first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
- for mp := first; mp != nil; mp = mp.alllink {
- n++
- }
- if n <= len(p) {
- ok = true
- i := 0
- for mp := first; mp != nil; mp = mp.alllink {
- p[i].Stack0 = mp.createstack
- i++
- }
- }
- return
-}
-
-//go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
-func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
- return goroutineProfileWithLabels(p, labels)
-}
-
-// labels may be nil. If labels is non-nil, it must have the same length as p.
-func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
- if labels != nil && len(labels) != len(p) {
- labels = nil
- }
- gp := getg()
-
- isOK := func(gp1 *g) bool {
- // Checking isSystemGoroutine here makes GoroutineProfile
- // consistent with both NumGoroutine and Stack.
- return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
- }
-
- stopTheWorld("profile")
-
- // World is stopped, no locking required.
- n = 1
- forEachGRace(func(gp1 *g) {
- if isOK(gp1) {
- n++
- }
- })
-
- if n <= len(p) {
- ok = true
- r, lbl := p, labels
-
- // Save current goroutine.
- sp := getcallersp()
- pc := getcallerpc()
- systemstack(func() {
- saveg(pc, sp, gp, &r[0])
- })
- r = r[1:]
-
- // If we have a place to put our goroutine labelmap, insert it there.
- if labels != nil {
- lbl[0] = gp.labels
- lbl = lbl[1:]
- }
-
- // Save other goroutines.
- forEachGRace(func(gp1 *g) {
- if !isOK(gp1) {
- return
- }
-
- if len(r) == 0 {
- // Should be impossible, but better to return a
- // truncated profile than to crash the entire process.
- return
- }
- // saveg calls gentraceback, which may call cgo traceback functions.
- // The world is stopped, so it cannot use cgocall (which will be
- // blocked at exitsyscall). Do it on the system stack so it won't
- // call into the schedular (see traceback.go:cgoContextPCs).
- systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
- if labels != nil {
- lbl[0] = gp1.labels
- lbl = lbl[1:]
- }
- r = r[1:]
- })
- }
-
- startTheWorld()
- return n, ok
-}
-
-// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
-// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
-// If len(p) < n, GoroutineProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package instead
-// of calling GoroutineProfile directly.
-func GoroutineProfile(p []StackRecord) (n int, ok bool) {
-
- return goroutineProfileWithLabels(p, nil)
-}
-
-func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
- n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
- if n < len(r.Stack0) {
- r.Stack0[n] = 0
- }
-}
-
-// Stack formats a stack trace of the calling goroutine into buf
-// and returns the number of bytes written to buf.
-// If all is true, Stack formats stack traces of all other goroutines
-// into buf after the trace for the current goroutine.
-func Stack(buf []byte, all bool) int {
- if all {
- stopTheWorld("stack trace")
- }
-
- n := 0
- if len(buf) > 0 {
- gp := getg()
- sp := getcallersp()
- pc := getcallerpc()
- systemstack(func() {
- g0 := getg()
- // Force traceback=1 to override GOTRACEBACK setting,
- // so that Stack's results are consistent.
- // GOTRACEBACK is only about crash dumps.
- g0.m.traceback = 1
- g0.writebuf = buf[0:0:len(buf)]
- goroutineheader(gp)
- traceback(pc, sp, 0, gp)
- if all {
- tracebackothers(gp)
- }
- g0.m.traceback = 0
- n = len(g0.writebuf)
- g0.writebuf = nil
- })
- }
-
- if all {
- startTheWorld()
- }
- return n
-}
-
-// Tracing of alloc/free/gc.
-
-var tracelock mutex
-
-func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- if typ == nil {
- print("tracealloc(", p, ", ", hex(size), ")\n")
- } else {
- print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
- }
- if gp.m.curg == nil || gp == gp.m.curg {
- goroutineheader(gp)
- pc := getcallerpc()
- sp := getcallersp()
- systemstack(func() {
- traceback(pc, sp, 0, gp)
- })
- } else {
- goroutineheader(gp.m.curg)
- traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
- }
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
-
-func tracefree(p unsafe.Pointer, size uintptr) {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- print("tracefree(", p, ", ", hex(size), ")\n")
- goroutineheader(gp)
- pc := getcallerpc()
- sp := getcallersp()
- systemstack(func() {
- traceback(pc, sp, 0, gp)
- })
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
-
-func tracegc() {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- print("tracegc()\n")
- // running on m->g0 stack; show all non-g0 goroutines
- tracebackothers(gp)
- print("end tracegc\n")
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mranges.go b/contrib/go/_std_1.18/src/runtime/mranges.go
deleted file mode 100644
index e0be1e134e..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mranges.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Address range data structure.
-//
-// This file contains an implementation of a data structure which
-// manages ordered address ranges.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-// addrRange represents a region of address space.
-//
-// An addrRange must never span a gap in the address space.
-type addrRange struct {
- // base and limit together represent the region of address space
- // [base, limit). That is, base is inclusive, limit is exclusive.
- // These are address over an offset view of the address space on
- // platforms with a segmented address space, that is, on platforms
- // where arenaBaseOffset != 0.
- base, limit offAddr
-}
-
-// makeAddrRange creates a new address range from two virtual addresses.
-//
-// Throws if the base and limit are not in the same memory segment.
-func makeAddrRange(base, limit uintptr) addrRange {
- r := addrRange{offAddr{base}, offAddr{limit}}
- if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
- throw("addr range base and limit are not in the same memory segment")
- }
- return r
-}
-
-// size returns the size of the range represented in bytes.
-func (a addrRange) size() uintptr {
- if !a.base.lessThan(a.limit) {
- return 0
- }
- // Subtraction is safe because limit and base must be in the same
- // segment of the address space.
- return a.limit.diff(a.base)
-}
-
-// contains returns whether or not the range contains a given address.
-func (a addrRange) contains(addr uintptr) bool {
- return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
-}
-
-// subtract takes the addrRange toPrune and cuts out any overlap with
-// from, then returns the new range. subtract assumes that a and b
-// either don't overlap at all, only overlap on one side, or are equal.
-// If b is strictly contained in a, thus forcing a split, it will throw.
-func (a addrRange) subtract(b addrRange) addrRange {
- if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
- return addrRange{}
- } else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
- throw("bad prune")
- } else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
- a.base = b.limit
- } else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
- a.limit = b.base
- }
- return a
-}
-
-// removeGreaterEqual removes all addresses in a greater than or equal
-// to addr and returns the new range.
-func (a addrRange) removeGreaterEqual(addr uintptr) addrRange {
- if (offAddr{addr}).lessEqual(a.base) {
- return addrRange{}
- }
- if a.limit.lessEqual(offAddr{addr}) {
- return a
- }
- return makeAddrRange(a.base.addr(), addr)
-}
-
-var (
- // minOffAddr is the minimum address in the offset space, and
- // it corresponds to the virtual address arenaBaseOffset.
- minOffAddr = offAddr{arenaBaseOffset}
-
- // maxOffAddr is the maximum address in the offset address
- // space. It corresponds to the highest virtual address representable
- // by the page alloc chunk and heap arena maps.
- maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
-)
-
-// offAddr represents an address in a contiguous view
-// of the address space on systems where the address space is
-// segmented. On other systems, it's just a normal address.
-type offAddr struct {
- // a is just the virtual address, but should never be used
- // directly. Call addr() to get this value instead.
- a uintptr
-}
-
-// add adds a uintptr offset to the offAddr.
-func (l offAddr) add(bytes uintptr) offAddr {
- return offAddr{a: l.a + bytes}
-}
-
-// sub subtracts a uintptr offset from the offAddr.
-func (l offAddr) sub(bytes uintptr) offAddr {
- return offAddr{a: l.a - bytes}
-}
-
-// diff returns the amount of bytes in between the
-// two offAddrs.
-func (l1 offAddr) diff(l2 offAddr) uintptr {
- return l1.a - l2.a
-}
-
-// lessThan returns true if l1 is less than l2 in the offset
-// address space.
-func (l1 offAddr) lessThan(l2 offAddr) bool {
- return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
-}
-
-// lessEqual returns true if l1 is less than or equal to l2 in
-// the offset address space.
-func (l1 offAddr) lessEqual(l2 offAddr) bool {
- return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
-}
-
-// equal returns true if the two offAddr values are equal.
-func (l1 offAddr) equal(l2 offAddr) bool {
- // No need to compare in the offset space, it
- // means the same thing.
- return l1 == l2
-}
-
-// addr returns the virtual address for this offset address.
-func (l offAddr) addr() uintptr {
- return l.a
-}
-
-// addrRanges is a data structure holding a collection of ranges of
-// address space.
-//
-// The ranges are coalesced eagerly to reduce the
-// number ranges it holds.
-//
-// The slice backing store for this field is persistentalloc'd
-// and thus there is no way to free it.
-//
-// addrRanges is not thread-safe.
-type addrRanges struct {
- // ranges is a slice of ranges sorted by base.
- ranges []addrRange
-
- // totalBytes is the total amount of address space in bytes counted by
- // this addrRanges.
- totalBytes uintptr
-
- // sysStat is the stat to track allocations by this type
- sysStat *sysMemStat
-}
-
-func (a *addrRanges) init(sysStat *sysMemStat) {
- ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
- ranges.len = 0
- ranges.cap = 16
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
- a.sysStat = sysStat
- a.totalBytes = 0
-}
-
-// findSucc returns the first index in a such that addr is
-// less than the base of the addrRange at that index.
-func (a *addrRanges) findSucc(addr uintptr) int {
- base := offAddr{addr}
-
- // Narrow down the search space via a binary search
- // for large addrRanges until we have at most iterMax
- // candidates left.
- const iterMax = 8
- bot, top := 0, len(a.ranges)
- for top-bot > iterMax {
- i := ((top - bot) / 2) + bot
- if a.ranges[i].contains(base.addr()) {
- // a.ranges[i] contains base, so
- // its successor is the next index.
- return i + 1
- }
- if base.lessThan(a.ranges[i].base) {
- // In this case i might actually be
- // the successor, but we can't be sure
- // until we check the ones before it.
- top = i
- } else {
- // In this case we know base is
- // greater than or equal to a.ranges[i].limit-1,
- // so i is definitely not the successor.
- // We already checked i, so pick the next
- // one.
- bot = i + 1
- }
- }
- // There are top-bot candidates left, so
- // iterate over them and find the first that
- // base is strictly less than.
- for i := bot; i < top; i++ {
- if base.lessThan(a.ranges[i].base) {
- return i
- }
- }
- return top
-}
-
-// findAddrGreaterEqual returns the smallest address represented by a
-// that is >= addr. Thus, if the address is represented by a,
-// then it returns addr. The second return value indicates whether
-// such an address exists for addr in a. That is, if addr is larger than
-// any address known to a, the second return value will be false.
-func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
- i := a.findSucc(addr)
- if i == 0 {
- return a.ranges[0].base.addr(), true
- }
- if a.ranges[i-1].contains(addr) {
- return addr, true
- }
- if i < len(a.ranges) {
- return a.ranges[i].base.addr(), true
- }
- return 0, false
-}
-
-// contains returns true if a covers the address addr.
-func (a *addrRanges) contains(addr uintptr) bool {
- i := a.findSucc(addr)
- if i == 0 {
- return false
- }
- return a.ranges[i-1].contains(addr)
-}
-
-// add inserts a new address range to a.
-//
-// r must not overlap with any address range in a and r.size() must be > 0.
-func (a *addrRanges) add(r addrRange) {
- // The copies in this function are potentially expensive, but this data
- // structure is meant to represent the Go heap. At worst, copying this
- // would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
- // copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
- // arenas which is completely discontiguous. ~160µs is still a lot, but in
- // practice most platforms have 64 MiB arenas (which cuts this by a factor
- // of 16) and Go heaps are usually mostly contiguous, so the chance that
- // an addrRanges even grows to that size is extremely low.
-
- // An empty range has no effect on the set of addresses represented
- // by a, but passing a zero-sized range is almost always a bug.
- if r.size() == 0 {
- print("runtime: range = {", hex(r.base.addr()), ", ", hex(r.limit.addr()), "}\n")
- throw("attempted to add zero-sized address range")
- }
- // Because we assume r is not currently represented in a,
- // findSucc gives us our insertion index.
- i := a.findSucc(r.base.addr())
- coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
- coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
- if coalescesUp && coalescesDown {
- // We have neighbors and they both border us.
- // Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
- a.ranges[i-1].limit = a.ranges[i].limit
-
- // Delete a.ranges[i].
- copy(a.ranges[i:], a.ranges[i+1:])
- a.ranges = a.ranges[:len(a.ranges)-1]
- } else if coalescesDown {
- // We have a neighbor at a lower address only and it borders us.
- // Merge the new space into a.ranges[i-1].
- a.ranges[i-1].limit = r.limit
- } else if coalescesUp {
- // We have a neighbor at a higher address only and it borders us.
- // Merge the new space into a.ranges[i].
- a.ranges[i].base = r.base
- } else {
- // We may or may not have neighbors which don't border us.
- // Add the new range.
- if len(a.ranges)+1 > cap(a.ranges) {
- // Grow the array. Note that this leaks the old array, but since
- // we're doubling we have at most 2x waste. For a 1 TiB heap and
- // 4 MiB arenas which are all discontiguous (both very conservative
- // assumptions), this would waste at most 4 MiB of memory.
- oldRanges := a.ranges
- ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
- ranges.len = len(oldRanges) + 1
- ranges.cap = cap(oldRanges) * 2
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
-
- // Copy in the old array, but make space for the new range.
- copy(a.ranges[:i], oldRanges[:i])
- copy(a.ranges[i+1:], oldRanges[i:])
- } else {
- a.ranges = a.ranges[:len(a.ranges)+1]
- copy(a.ranges[i+1:], a.ranges[i:])
- }
- a.ranges[i] = r
- }
- a.totalBytes += r.size()
-}
-
-// removeLast removes and returns the highest-addressed contiguous range
-// of a, or the last nBytes of that range, whichever is smaller. If a is
-// empty, it returns an empty range.
-func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
- if len(a.ranges) == 0 {
- return addrRange{}
- }
- r := a.ranges[len(a.ranges)-1]
- size := r.size()
- if size > nBytes {
- newEnd := r.limit.sub(nBytes)
- a.ranges[len(a.ranges)-1].limit = newEnd
- a.totalBytes -= nBytes
- return addrRange{newEnd, r.limit}
- }
- a.ranges = a.ranges[:len(a.ranges)-1]
- a.totalBytes -= size
- return r
-}
-
-// removeGreaterEqual removes the ranges of a which are above addr, and additionally
-// splits any range containing addr.
-func (a *addrRanges) removeGreaterEqual(addr uintptr) {
- pivot := a.findSucc(addr)
- if pivot == 0 {
- // addr is before all ranges in a.
- a.totalBytes = 0
- a.ranges = a.ranges[:0]
- return
- }
- removed := uintptr(0)
- for _, r := range a.ranges[pivot:] {
- removed += r.size()
- }
- if r := a.ranges[pivot-1]; r.contains(addr) {
- removed += r.size()
- r = r.removeGreaterEqual(addr)
- if r.size() == 0 {
- pivot--
- } else {
- removed -= r.size()
- a.ranges[pivot-1] = r
- }
- }
- a.ranges = a.ranges[:pivot]
- a.totalBytes -= removed
-}
-
-// cloneInto makes a deep clone of a's state into b, re-using
-// b's ranges if able.
-func (a *addrRanges) cloneInto(b *addrRanges) {
- if len(a.ranges) > cap(b.ranges) {
- // Grow the array.
- ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
- ranges.len = 0
- ranges.cap = cap(a.ranges)
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
- }
- b.ranges = b.ranges[:len(a.ranges)]
- b.totalBytes = a.totalBytes
- copy(b.ranges, a.ranges)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mspanset.go b/contrib/go/_std_1.18/src/runtime/mspanset.go
deleted file mode 100644
index 29f14910cc..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mspanset.go
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/cpu"
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// A spanSet is a set of *mspans.
-//
-// spanSet is safe for concurrent push and pop operations.
-type spanSet struct {
- // A spanSet is a two-level data structure consisting of a
- // growable spine that points to fixed-sized blocks. The spine
- // can be accessed without locks, but adding a block or
- // growing it requires taking the spine lock.
- //
- // Because each mspan covers at least 8K of heap and takes at
- // most 8 bytes in the spanSet, the growth of the spine is
- // quite limited.
- //
- // The spine and all blocks are allocated off-heap, which
- // allows this to be used in the memory manager and avoids the
- // need for write barriers on all of these. spanSetBlocks are
- // managed in a pool, though never freed back to the operating
- // system. We never release spine memory because there could be
- // concurrent lock-free access and we're likely to reuse it
- // anyway. (In principle, we could do this during STW.)
-
- spineLock mutex
- spine unsafe.Pointer // *[N]*spanSetBlock, accessed atomically
- spineLen uintptr // Spine array length, accessed atomically
- spineCap uintptr // Spine array cap, accessed under lock
-
- // index is the head and tail of the spanSet in a single field.
- // The head and the tail both represent an index into the logical
- // concatenation of all blocks, with the head always behind or
- // equal to the tail (indicating an empty set). This field is
- // always accessed atomically.
- //
- // The head and the tail are only 32 bits wide, which means we
- // can only support up to 2^32 pushes before a reset. If every
- // span in the heap were stored in this set, and each span were
- // the minimum size (1 runtime page, 8 KiB), then roughly the
- // smallest heap which would be unrepresentable is 32 TiB in size.
- index headTailIndex
-}
-
-const (
- spanSetBlockEntries = 512 // 4KB on 64-bit
- spanSetInitSpineCap = 256 // Enough for 1GB heap on 64-bit
-)
-
-type spanSetBlock struct {
- // Free spanSetBlocks are managed via a lock-free stack.
- lfnode
-
- // popped is the number of pop operations that have occurred on
- // this block. This number is used to help determine when a block
- // may be safely recycled.
- popped uint32
-
- // spans is the set of spans in this block.
- spans [spanSetBlockEntries]*mspan
-}
-
-// push adds span s to buffer b. push is safe to call concurrently
-// with other push and pop operations.
-func (b *spanSet) push(s *mspan) {
- // Obtain our slot.
- cursor := uintptr(b.index.incTail().tail() - 1)
- top, bottom := cursor/spanSetBlockEntries, cursor%spanSetBlockEntries
-
- // Do we need to add a block?
- spineLen := atomic.Loaduintptr(&b.spineLen)
- var block *spanSetBlock
-retry:
- if top < spineLen {
- spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, goarch.PtrSize*top)
- block = (*spanSetBlock)(atomic.Loadp(blockp))
- } else {
- // Add a new block to the spine, potentially growing
- // the spine.
- lock(&b.spineLock)
- // spineLen cannot change until we release the lock,
- // but may have changed while we were waiting.
- spineLen = atomic.Loaduintptr(&b.spineLen)
- if top < spineLen {
- unlock(&b.spineLock)
- goto retry
- }
-
- if spineLen == b.spineCap {
- // Grow the spine.
- newCap := b.spineCap * 2
- if newCap == 0 {
- newCap = spanSetInitSpineCap
- }
- newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
- if b.spineCap != 0 {
- // Blocks are allocated off-heap, so
- // no write barriers.
- memmove(newSpine, b.spine, b.spineCap*goarch.PtrSize)
- }
- // Spine is allocated off-heap, so no write barrier.
- atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
- b.spineCap = newCap
- // We can't immediately free the old spine
- // since a concurrent push with a lower index
- // could still be reading from it. We let it
- // leak because even a 1TB heap would waste
- // less than 2MB of memory on old spines. If
- // this is a problem, we could free old spines
- // during STW.
- }
-
- // Allocate a new block from the pool.
- block = spanSetBlockPool.alloc()
-
- // Add it to the spine.
- blockp := add(b.spine, goarch.PtrSize*top)
- // Blocks are allocated off-heap, so no write barrier.
- atomic.StorepNoWB(blockp, unsafe.Pointer(block))
- atomic.Storeuintptr(&b.spineLen, spineLen+1)
- unlock(&b.spineLock)
- }
-
- // We have a block. Insert the span atomically, since there may be
- // concurrent readers via the block API.
- atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), unsafe.Pointer(s))
-}
-
-// pop removes and returns a span from buffer b, or nil if b is empty.
-// pop is safe to call concurrently with other pop and push operations.
-func (b *spanSet) pop() *mspan {
- var head, tail uint32
-claimLoop:
- for {
- headtail := b.index.load()
- head, tail = headtail.split()
- if head >= tail {
- // The buf is empty, as far as we can tell.
- return nil
- }
- // Check if the head position we want to claim is actually
- // backed by a block.
- spineLen := atomic.Loaduintptr(&b.spineLen)
- if spineLen <= uintptr(head)/spanSetBlockEntries {
- // We're racing with a spine growth and the allocation of
- // a new block (and maybe a new spine!), and trying to grab
- // the span at the index which is currently being pushed.
- // Instead of spinning, let's just notify the caller that
- // there's nothing currently here. Spinning on this is
- // almost definitely not worth it.
- return nil
- }
- // Try to claim the current head by CASing in an updated head.
- // This may fail transiently due to a push which modifies the
- // tail, so keep trying while the head isn't changing.
- want := head
- for want == head {
- if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) {
- break claimLoop
- }
- headtail = b.index.load()
- head, tail = headtail.split()
- }
- // We failed to claim the spot we were after and the head changed,
- // meaning a popper got ahead of us. Try again from the top because
- // the buf may not be empty.
- }
- top, bottom := head/spanSetBlockEntries, head%spanSetBlockEntries
-
- // We may be reading a stale spine pointer, but because the length
- // grows monotonically and we've already verified it, we'll definitely
- // be reading from a valid block.
- spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, goarch.PtrSize*uintptr(top))
-
- // Given that the spine length is correct, we know we will never
- // see a nil block here, since the length is always updated after
- // the block is set.
- block := (*spanSetBlock)(atomic.Loadp(blockp))
- s := (*mspan)(atomic.Loadp(unsafe.Pointer(&block.spans[bottom])))
- for s == nil {
- // We raced with the span actually being set, but given that we
- // know a block for this span exists, the race window here is
- // extremely small. Try again.
- s = (*mspan)(atomic.Loadp(unsafe.Pointer(&block.spans[bottom])))
- }
- // Clear the pointer. This isn't strictly necessary, but defensively
- // avoids accidentally re-using blocks which could lead to memory
- // corruption. This way, we'll get a nil pointer access instead.
- atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), nil)
-
- // Increase the popped count. If we are the last possible popper
- // in the block (note that bottom need not equal spanSetBlockEntries-1
- // due to races) then it's our resposibility to free the block.
- //
- // If we increment popped to spanSetBlockEntries, we can be sure that
- // we're the last popper for this block, and it's thus safe to free it.
- // Every other popper must have crossed this barrier (and thus finished
- // popping its corresponding mspan) by the time we get here. Because
- // we're the last popper, we also don't have to worry about concurrent
- // pushers (there can't be any). Note that we may not be the popper
- // which claimed the last slot in the block, we're just the last one
- // to finish popping.
- if atomic.Xadd(&block.popped, 1) == spanSetBlockEntries {
- // Clear the block's pointer.
- atomic.StorepNoWB(blockp, nil)
-
- // Return the block to the block pool.
- spanSetBlockPool.free(block)
- }
- return s
-}
-
-// reset resets a spanSet which is empty. It will also clean up
-// any left over blocks.
-//
-// Throws if the buf is not empty.
-//
-// reset may not be called concurrently with any other operations
-// on the span set.
-func (b *spanSet) reset() {
- head, tail := b.index.load().split()
- if head < tail {
- print("head = ", head, ", tail = ", tail, "\n")
- throw("attempt to clear non-empty span set")
- }
- top := head / spanSetBlockEntries
- if uintptr(top) < b.spineLen {
- // If the head catches up to the tail and the set is empty,
- // we may not clean up the block containing the head and tail
- // since it may be pushed into again. In order to avoid leaking
- // memory since we're going to reset the head and tail, clean
- // up such a block now, if it exists.
- blockp := (**spanSetBlock)(add(b.spine, goarch.PtrSize*uintptr(top)))
- block := *blockp
- if block != nil {
- // Sanity check the popped value.
- if block.popped == 0 {
- // popped should never be zero because that means we have
- // pushed at least one value but not yet popped if this
- // block pointer is not nil.
- throw("span set block with unpopped elements found in reset")
- }
- if block.popped == spanSetBlockEntries {
- // popped should also never be equal to spanSetBlockEntries
- // because the last popper should have made the block pointer
- // in this slot nil.
- throw("fully empty unfreed span set block found in reset")
- }
-
- // Clear the pointer to the block.
- atomic.StorepNoWB(unsafe.Pointer(blockp), nil)
-
- // Return the block to the block pool.
- spanSetBlockPool.free(block)
- }
- }
- b.index.reset()
- atomic.Storeuintptr(&b.spineLen, 0)
-}
-
-// spanSetBlockPool is a global pool of spanSetBlocks.
-var spanSetBlockPool spanSetBlockAlloc
-
-// spanSetBlockAlloc represents a concurrent pool of spanSetBlocks.
-type spanSetBlockAlloc struct {
- stack lfstack
-}
-
-// alloc tries to grab a spanSetBlock out of the pool, and if it fails
-// persistentallocs a new one and returns it.
-func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
- if s := (*spanSetBlock)(p.stack.pop()); s != nil {
- return s
- }
- return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
-}
-
-// free returns a spanSetBlock back to the pool.
-func (p *spanSetBlockAlloc) free(block *spanSetBlock) {
- atomic.Store(&block.popped, 0)
- p.stack.push(&block.lfnode)
-}
-
-// haidTailIndex represents a combined 32-bit head and 32-bit tail
-// of a queue into a single 64-bit value.
-type headTailIndex uint64
-
-// makeHeadTailIndex creates a headTailIndex value from a separate
-// head and tail.
-func makeHeadTailIndex(head, tail uint32) headTailIndex {
- return headTailIndex(uint64(head)<<32 | uint64(tail))
-}
-
-// head returns the head of a headTailIndex value.
-func (h headTailIndex) head() uint32 {
- return uint32(h >> 32)
-}
-
-// tail returns the tail of a headTailIndex value.
-func (h headTailIndex) tail() uint32 {
- return uint32(h)
-}
-
-// split splits the headTailIndex value into its parts.
-func (h headTailIndex) split() (head uint32, tail uint32) {
- return h.head(), h.tail()
-}
-
-// load atomically reads a headTailIndex value.
-func (h *headTailIndex) load() headTailIndex {
- return headTailIndex(atomic.Load64((*uint64)(h)))
-}
-
-// cas atomically compares-and-swaps a headTailIndex value.
-func (h *headTailIndex) cas(old, new headTailIndex) bool {
- return atomic.Cas64((*uint64)(h), uint64(old), uint64(new))
-}
-
-// incHead atomically increments the head of a headTailIndex.
-func (h *headTailIndex) incHead() headTailIndex {
- return headTailIndex(atomic.Xadd64((*uint64)(h), (1 << 32)))
-}
-
-// decHead atomically decrements the head of a headTailIndex.
-func (h *headTailIndex) decHead() headTailIndex {
- return headTailIndex(atomic.Xadd64((*uint64)(h), -(1 << 32)))
-}
-
-// incTail atomically increments the tail of a headTailIndex.
-func (h *headTailIndex) incTail() headTailIndex {
- ht := headTailIndex(atomic.Xadd64((*uint64)(h), +1))
- // Check for overflow.
- if ht.tail() == 0 {
- print("runtime: head = ", ht.head(), ", tail = ", ht.tail(), "\n")
- throw("headTailIndex overflow")
- }
- return ht
-}
-
-// reset clears the headTailIndex to (0, 0).
-func (h *headTailIndex) reset() {
- atomic.Store64((*uint64)(h), 0)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mstats.go b/contrib/go/_std_1.18/src/runtime/mstats.go
deleted file mode 100644
index 2c1ec79bf8..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mstats.go
+++ /dev/null
@@ -1,928 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Memory statistics
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// Statistics.
-//
-// For detailed descriptions see the documentation for MemStats.
-// Fields that differ from MemStats are further documented here.
-//
-// Many of these fields are updated on the fly, while others are only
-// updated when updatememstats is called.
-type mstats struct {
- // General statistics.
- alloc uint64 // bytes allocated and not yet freed
- total_alloc uint64 // bytes allocated (even if freed)
- sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
- nlookup uint64 // number of pointer lookups (unused)
- nmalloc uint64 // number of mallocs
- nfree uint64 // number of frees
-
- // Statistics about malloc heap.
- // Updated atomically, or with the world stopped.
- //
- // Like MemStats, heap_sys and heap_inuse do not count memory
- // in manually-managed spans.
- heap_sys sysMemStat // virtual address space obtained from system for GC'd heap
- heap_inuse uint64 // bytes in mSpanInUse spans
- heap_released uint64 // bytes released to the os
-
- // heap_objects is not used by the runtime directly and instead
- // computed on the fly by updatememstats.
- heap_objects uint64 // total number of allocated objects
-
- // Statistics about stacks.
- stacks_inuse uint64 // bytes in manually-managed stack spans; computed by updatememstats
- stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
-
- // Statistics about allocation of low-level fixed-size structures.
- // Protected by FixAlloc locks.
- mspan_inuse uint64 // mspan structures
- mspan_sys sysMemStat
- mcache_inuse uint64 // mcache structures
- mcache_sys sysMemStat
- buckhash_sys sysMemStat // profiling bucket hash table
-
- // Statistics about GC overhead.
- gcWorkBufInUse uint64 // computed by updatememstats
- gcProgPtrScalarBitsInUse uint64 // computed by updatememstats
- gcMiscSys sysMemStat // updated atomically or during STW
-
- // Miscellaneous statistics.
- other_sys sysMemStat // updated atomically or during STW
-
- // Statistics about the garbage collector.
-
- // Protected by mheap or stopping the world during GC.
- last_gc_unix uint64 // last gc (in unix time)
- pause_total_ns uint64
- pause_ns [256]uint64 // circular buffer of recent gc pause lengths
- pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
- numgc uint32
- numforcedgc uint32 // number of user-forced GCs
- gc_cpu_fraction float64 // fraction of CPU time used by GC
- enablegc bool
- debuggc bool
-
- // Statistics about allocation size classes.
-
- by_size [_NumSizeClasses]struct {
- size uint32
- nmalloc uint64
- nfree uint64
- }
-
- // Add an uint32 for even number of size classes to align below fields
- // to 64 bits for atomic operations on 32 bit platforms.
- _ [1 - _NumSizeClasses%2]uint32
-
- last_gc_nanotime uint64 // last gc (monotonic time)
- last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC
-
- // heapStats is a set of statistics
- heapStats consistentHeapStats
-
- // _ uint32 // ensure gcPauseDist is aligned
-
- // gcPauseDist represents the distribution of all GC-related
- // application pauses in the runtime.
- //
- // Each individual pause is counted separately, unlike pause_ns.
- gcPauseDist timeHistogram
-}
-
-var memstats mstats
-
-// A MemStats records statistics about the memory allocator.
-type MemStats struct {
- // General statistics.
-
- // Alloc is bytes of allocated heap objects.
- //
- // This is the same as HeapAlloc (see below).
- Alloc uint64
-
- // TotalAlloc is cumulative bytes allocated for heap objects.
- //
- // TotalAlloc increases as heap objects are allocated, but
- // unlike Alloc and HeapAlloc, it does not decrease when
- // objects are freed.
- TotalAlloc uint64
-
- // Sys is the total bytes of memory obtained from the OS.
- //
- // Sys is the sum of the XSys fields below. Sys measures the
- // virtual address space reserved by the Go runtime for the
- // heap, stacks, and other internal data structures. It's
- // likely that not all of the virtual address space is backed
- // by physical memory at any given moment, though in general
- // it all was at some point.
- Sys uint64
-
- // Lookups is the number of pointer lookups performed by the
- // runtime.
- //
- // This is primarily useful for debugging runtime internals.
- Lookups uint64
-
- // Mallocs is the cumulative count of heap objects allocated.
- // The number of live objects is Mallocs - Frees.
- Mallocs uint64
-
- // Frees is the cumulative count of heap objects freed.
- Frees uint64
-
- // Heap memory statistics.
- //
- // Interpreting the heap statistics requires some knowledge of
- // how Go organizes memory. Go divides the virtual address
- // space of the heap into "spans", which are contiguous
- // regions of memory 8K or larger. A span may be in one of
- // three states:
- //
- // An "idle" span contains no objects or other data. The
- // physical memory backing an idle span can be released back
- // to the OS (but the virtual address space never is), or it
- // can be converted into an "in use" or "stack" span.
- //
- // An "in use" span contains at least one heap object and may
- // have free space available to allocate more heap objects.
- //
- // A "stack" span is used for goroutine stacks. Stack spans
- // are not considered part of the heap. A span can change
- // between heap and stack memory; it is never used for both
- // simultaneously.
-
- // HeapAlloc is bytes of allocated heap objects.
- //
- // "Allocated" heap objects include all reachable objects, as
- // well as unreachable objects that the garbage collector has
- // not yet freed. Specifically, HeapAlloc increases as heap
- // objects are allocated and decreases as the heap is swept
- // and unreachable objects are freed. Sweeping occurs
- // incrementally between GC cycles, so these two processes
- // occur simultaneously, and as a result HeapAlloc tends to
- // change smoothly (in contrast with the sawtooth that is
- // typical of stop-the-world garbage collectors).
- HeapAlloc uint64
-
- // HeapSys is bytes of heap memory obtained from the OS.
- //
- // HeapSys measures the amount of virtual address space
- // reserved for the heap. This includes virtual address space
- // that has been reserved but not yet used, which consumes no
- // physical memory, but tends to be small, as well as virtual
- // address space for which the physical memory has been
- // returned to the OS after it became unused (see HeapReleased
- // for a measure of the latter).
- //
- // HeapSys estimates the largest size the heap has had.
- HeapSys uint64
-
- // HeapIdle is bytes in idle (unused) spans.
- //
- // Idle spans have no objects in them. These spans could be
- // (and may already have been) returned to the OS, or they can
- // be reused for heap allocations, or they can be reused as
- // stack memory.
- //
- // HeapIdle minus HeapReleased estimates the amount of memory
- // that could be returned to the OS, but is being retained by
- // the runtime so it can grow the heap without requesting more
- // memory from the OS. If this difference is significantly
- // larger than the heap size, it indicates there was a recent
- // transient spike in live heap size.
- HeapIdle uint64
-
- // HeapInuse is bytes in in-use spans.
- //
- // In-use spans have at least one object in them. These spans
- // can only be used for other objects of roughly the same
- // size.
- //
- // HeapInuse minus HeapAlloc estimates the amount of memory
- // that has been dedicated to particular size classes, but is
- // not currently being used. This is an upper bound on
- // fragmentation, but in general this memory can be reused
- // efficiently.
- HeapInuse uint64
-
- // HeapReleased is bytes of physical memory returned to the OS.
- //
- // This counts heap memory from idle spans that was returned
- // to the OS and has not yet been reacquired for the heap.
- HeapReleased uint64
-
- // HeapObjects is the number of allocated heap objects.
- //
- // Like HeapAlloc, this increases as objects are allocated and
- // decreases as the heap is swept and unreachable objects are
- // freed.
- HeapObjects uint64
-
- // Stack memory statistics.
- //
- // Stacks are not considered part of the heap, but the runtime
- // can reuse a span of heap memory for stack memory, and
- // vice-versa.
-
- // StackInuse is bytes in stack spans.
- //
- // In-use stack spans have at least one stack in them. These
- // spans can only be used for other stacks of the same size.
- //
- // There is no StackIdle because unused stack spans are
- // returned to the heap (and hence counted toward HeapIdle).
- StackInuse uint64
-
- // StackSys is bytes of stack memory obtained from the OS.
- //
- // StackSys is StackInuse, plus any memory obtained directly
- // from the OS for OS thread stacks (which should be minimal).
- StackSys uint64
-
- // Off-heap memory statistics.
- //
- // The following statistics measure runtime-internal
- // structures that are not allocated from heap memory (usually
- // because they are part of implementing the heap). Unlike
- // heap or stack memory, any memory allocated to these
- // structures is dedicated to these structures.
- //
- // These are primarily useful for debugging runtime memory
- // overheads.
-
- // MSpanInuse is bytes of allocated mspan structures.
- MSpanInuse uint64
-
- // MSpanSys is bytes of memory obtained from the OS for mspan
- // structures.
- MSpanSys uint64
-
- // MCacheInuse is bytes of allocated mcache structures.
- MCacheInuse uint64
-
- // MCacheSys is bytes of memory obtained from the OS for
- // mcache structures.
- MCacheSys uint64
-
- // BuckHashSys is bytes of memory in profiling bucket hash tables.
- BuckHashSys uint64
-
- // GCSys is bytes of memory in garbage collection metadata.
- GCSys uint64
-
- // OtherSys is bytes of memory in miscellaneous off-heap
- // runtime allocations.
- OtherSys uint64
-
- // Garbage collector statistics.
-
- // NextGC is the target heap size of the next GC cycle.
- //
- // The garbage collector's goal is to keep HeapAlloc ≤ NextGC.
- // At the end of each GC cycle, the target for the next cycle
- // is computed based on the amount of reachable data and the
- // value of GOGC.
- NextGC uint64
-
- // LastGC is the time the last garbage collection finished, as
- // nanoseconds since 1970 (the UNIX epoch).
- LastGC uint64
-
- // PauseTotalNs is the cumulative nanoseconds in GC
- // stop-the-world pauses since the program started.
- //
- // During a stop-the-world pause, all goroutines are paused
- // and only the garbage collector can run.
- PauseTotalNs uint64
-
- // PauseNs is a circular buffer of recent GC stop-the-world
- // pause times in nanoseconds.
- //
- // The most recent pause is at PauseNs[(NumGC+255)%256]. In
- // general, PauseNs[N%256] records the time paused in the most
- // recent N%256th GC cycle. There may be multiple pauses per
- // GC cycle; this is the sum of all pauses during a cycle.
- PauseNs [256]uint64
-
- // PauseEnd is a circular buffer of recent GC pause end times,
- // as nanoseconds since 1970 (the UNIX epoch).
- //
- // This buffer is filled the same way as PauseNs. There may be
- // multiple pauses per GC cycle; this records the end of the
- // last pause in a cycle.
- PauseEnd [256]uint64
-
- // NumGC is the number of completed GC cycles.
- NumGC uint32
-
- // NumForcedGC is the number of GC cycles that were forced by
- // the application calling the GC function.
- NumForcedGC uint32
-
- // GCCPUFraction is the fraction of this program's available
- // CPU time used by the GC since the program started.
- //
- // GCCPUFraction is expressed as a number between 0 and 1,
- // where 0 means GC has consumed none of this program's CPU. A
- // program's available CPU time is defined as the integral of
- // GOMAXPROCS since the program started. That is, if
- // GOMAXPROCS is 2 and a program has been running for 10
- // seconds, its "available CPU" is 20 seconds. GCCPUFraction
- // does not include CPU time used for write barrier activity.
- //
- // This is the same as the fraction of CPU reported by
- // GODEBUG=gctrace=1.
- GCCPUFraction float64
-
- // EnableGC indicates that GC is enabled. It is always true,
- // even if GOGC=off.
- EnableGC bool
-
- // DebugGC is currently unused.
- DebugGC bool
-
- // BySize reports per-size class allocation statistics.
- //
- // BySize[N] gives statistics for allocations of size S where
- // BySize[N-1].Size < S ≤ BySize[N].Size.
- //
- // This does not report allocations larger than BySize[60].Size.
- BySize [61]struct {
- // Size is the maximum byte size of an object in this
- // size class.
- Size uint32
-
- // Mallocs is the cumulative count of heap objects
- // allocated in this size class. The cumulative bytes
- // of allocation is Size*Mallocs. The number of live
- // objects in this size class is Mallocs - Frees.
- Mallocs uint64
-
- // Frees is the cumulative count of heap objects freed
- // in this size class.
- Frees uint64
- }
-}
-
-func init() {
- if offset := unsafe.Offsetof(memstats.heapStats); offset%8 != 0 {
- println(offset)
- throw("memstats.heapStats not aligned to 8 bytes")
- }
- if offset := unsafe.Offsetof(memstats.gcPauseDist); offset%8 != 0 {
- println(offset)
- throw("memstats.gcPauseDist not aligned to 8 bytes")
- }
- // Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
- // [3]heapStatsDelta) to be 8-byte aligned.
- if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
- println(size)
- throw("heapStatsDelta not a multiple of 8 bytes in size")
- }
-}
-
-// ReadMemStats populates m with memory allocator statistics.
-//
-// The returned memory allocator statistics are up to date as of the
-// call to ReadMemStats. This is in contrast with a heap profile,
-// which is a snapshot as of the most recently completed garbage
-// collection cycle.
-func ReadMemStats(m *MemStats) {
- stopTheWorld("read mem stats")
-
- systemstack(func() {
- readmemstats_m(m)
- })
-
- startTheWorld()
-}
-
-func readmemstats_m(stats *MemStats) {
- updatememstats()
-
- stats.Alloc = memstats.alloc
- stats.TotalAlloc = memstats.total_alloc
- stats.Sys = memstats.sys
- stats.Mallocs = memstats.nmalloc
- stats.Frees = memstats.nfree
- stats.HeapAlloc = memstats.alloc
- stats.HeapSys = memstats.heap_sys.load()
- // By definition, HeapIdle is memory that was mapped
- // for the heap but is not currently used to hold heap
- // objects. It also specifically is memory that can be
- // used for other purposes, like stacks, but this memory
- // is subtracted out of HeapSys before it makes that
- // transition. Put another way:
- //
- // heap_sys = bytes allocated from the OS for the heap - bytes ultimately used for non-heap purposes
- // heap_idle = bytes allocated from the OS for the heap - bytes ultimately used for any purpose
- //
- // or
- //
- // heap_sys = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse
- // heap_idle = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse - heap_inuse
- //
- // => heap_idle = heap_sys - heap_inuse
- stats.HeapIdle = memstats.heap_sys.load() - memstats.heap_inuse
- stats.HeapInuse = memstats.heap_inuse
- stats.HeapReleased = memstats.heap_released
- stats.HeapObjects = memstats.heap_objects
- stats.StackInuse = memstats.stacks_inuse
- // memstats.stacks_sys is only memory mapped directly for OS stacks.
- // Add in heap-allocated stack memory for user consumption.
- stats.StackSys = memstats.stacks_inuse + memstats.stacks_sys.load()
- stats.MSpanInuse = memstats.mspan_inuse
- stats.MSpanSys = memstats.mspan_sys.load()
- stats.MCacheInuse = memstats.mcache_inuse
- stats.MCacheSys = memstats.mcache_sys.load()
- stats.BuckHashSys = memstats.buckhash_sys.load()
- // MemStats defines GCSys as an aggregate of all memory related
- // to the memory management system, but we track this memory
- // at a more granular level in the runtime.
- stats.GCSys = memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
- stats.OtherSys = memstats.other_sys.load()
- stats.NextGC = gcController.heapGoal
- stats.LastGC = memstats.last_gc_unix
- stats.PauseTotalNs = memstats.pause_total_ns
- stats.PauseNs = memstats.pause_ns
- stats.PauseEnd = memstats.pause_end
- stats.NumGC = memstats.numgc
- stats.NumForcedGC = memstats.numforcedgc
- stats.GCCPUFraction = memstats.gc_cpu_fraction
- stats.EnableGC = true
-
- // Handle BySize. Copy N values, where N is
- // the minimum of the lengths of the two arrays.
- // Unfortunately copy() won't work here because
- // the arrays have different structs.
- //
- // TODO(mknyszek): Consider renaming the fields
- // of by_size's elements to align so we can use
- // the copy built-in.
- bySizeLen := len(stats.BySize)
- if l := len(memstats.by_size); l < bySizeLen {
- bySizeLen = l
- }
- for i := 0; i < bySizeLen; i++ {
- stats.BySize[i].Size = memstats.by_size[i].size
- stats.BySize[i].Mallocs = memstats.by_size[i].nmalloc
- stats.BySize[i].Frees = memstats.by_size[i].nfree
- }
-}
-
-//go:linkname readGCStats runtime/debug.readGCStats
-func readGCStats(pauses *[]uint64) {
- systemstack(func() {
- readGCStats_m(pauses)
- })
-}
-
-// readGCStats_m must be called on the system stack because it acquires the heap
-// lock. See mheap for details.
-//go:systemstack
-func readGCStats_m(pauses *[]uint64) {
- p := *pauses
- // Calling code in runtime/debug should make the slice large enough.
- if cap(p) < len(memstats.pause_ns)+3 {
- throw("short slice passed to readGCStats")
- }
-
- // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
- lock(&mheap_.lock)
-
- n := memstats.numgc
- if n > uint32(len(memstats.pause_ns)) {
- n = uint32(len(memstats.pause_ns))
- }
-
- // The pause buffer is circular. The most recent pause is at
- // pause_ns[(numgc-1)%len(pause_ns)], and then backward
- // from there to go back farther in time. We deliver the times
- // most recent first (in p[0]).
- p = p[:cap(p)]
- for i := uint32(0); i < n; i++ {
- j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
- p[i] = memstats.pause_ns[j]
- p[n+i] = memstats.pause_end[j]
- }
-
- p[n+n] = memstats.last_gc_unix
- p[n+n+1] = uint64(memstats.numgc)
- p[n+n+2] = memstats.pause_total_ns
- unlock(&mheap_.lock)
- *pauses = p[:n+n+3]
-}
-
-// Updates the memstats structure.
-//
-// The world must be stopped.
-//
-//go:nowritebarrier
-func updatememstats() {
- assertWorldStopped()
-
- // Flush mcaches to mcentral before doing anything else.
- //
- // Flushing to the mcentral may in general cause stats to
- // change as mcentral data structures are manipulated.
- systemstack(flushallmcaches)
-
- memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
- memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
- memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
- memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gcMiscSys.load() +
- memstats.other_sys.load()
-
- // Calculate memory allocator stats.
- // During program execution we only count number of frees and amount of freed memory.
- // Current number of alive objects in the heap and amount of alive heap memory
- // are calculated by scanning all spans.
- // Total number of mallocs is calculated as number of frees plus number of alive objects.
- // Similarly, total amount of allocated memory is calculated as amount of freed memory
- // plus amount of alive heap memory.
- memstats.alloc = 0
- memstats.total_alloc = 0
- memstats.nmalloc = 0
- memstats.nfree = 0
- for i := 0; i < len(memstats.by_size); i++ {
- memstats.by_size[i].nmalloc = 0
- memstats.by_size[i].nfree = 0
- }
- // Collect consistent stats, which are the source-of-truth in the some cases.
- var consStats heapStatsDelta
- memstats.heapStats.unsafeRead(&consStats)
-
- // Collect large allocation stats.
- totalAlloc := consStats.largeAlloc
- memstats.nmalloc += consStats.largeAllocCount
- totalFree := consStats.largeFree
- memstats.nfree += consStats.largeFreeCount
-
- // Collect per-sizeclass stats.
- for i := 0; i < _NumSizeClasses; i++ {
- // Malloc stats.
- a := consStats.smallAllocCount[i]
- totalAlloc += a * uint64(class_to_size[i])
- memstats.nmalloc += a
- memstats.by_size[i].nmalloc = a
-
- // Free stats.
- f := consStats.smallFreeCount[i]
- totalFree += f * uint64(class_to_size[i])
- memstats.nfree += f
- memstats.by_size[i].nfree = f
- }
-
- // Account for tiny allocations.
- memstats.nfree += consStats.tinyAllocCount
- memstats.nmalloc += consStats.tinyAllocCount
-
- // Calculate derived stats.
- memstats.total_alloc = totalAlloc
- memstats.alloc = totalAlloc - totalFree
- memstats.heap_objects = memstats.nmalloc - memstats.nfree
-
- memstats.stacks_inuse = uint64(consStats.inStacks)
- memstats.gcWorkBufInUse = uint64(consStats.inWorkBufs)
- memstats.gcProgPtrScalarBitsInUse = uint64(consStats.inPtrScalarBits)
-
- // We also count stacks_inuse, gcWorkBufInUse, and gcProgPtrScalarBitsInUse as sys memory.
- memstats.sys += memstats.stacks_inuse + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
-
- // The world is stopped, so the consistent stats (after aggregation)
- // should be identical to some combination of memstats. In particular:
- //
- // * heap_inuse == inHeap
- // * heap_released == released
- // * heap_sys - heap_released == committed - inStacks - inWorkBufs - inPtrScalarBits
- //
- // Check if that's actually true.
- //
- // TODO(mknyszek): Maybe don't throw here. It would be bad if a
- // bug in otherwise benign accounting caused the whole application
- // to crash.
- if memstats.heap_inuse != uint64(consStats.inHeap) {
- print("runtime: heap_inuse=", memstats.heap_inuse, "\n")
- print("runtime: consistent value=", consStats.inHeap, "\n")
- throw("heap_inuse and consistent stats are not equal")
- }
- if memstats.heap_released != uint64(consStats.released) {
- print("runtime: heap_released=", memstats.heap_released, "\n")
- print("runtime: consistent value=", consStats.released, "\n")
- throw("heap_released and consistent stats are not equal")
- }
- globalRetained := memstats.heap_sys.load() - memstats.heap_released
- consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
- if globalRetained != consRetained {
- print("runtime: global value=", globalRetained, "\n")
- print("runtime: consistent value=", consRetained, "\n")
- throw("measures of the retained heap are not equal")
- }
-}
-
-// flushmcache flushes the mcache of allp[i].
-//
-// The world must be stopped.
-//
-//go:nowritebarrier
-func flushmcache(i int) {
- assertWorldStopped()
-
- p := allp[i]
- c := p.mcache
- if c == nil {
- return
- }
- c.releaseAll()
- stackcache_clear(c)
-}
-
-// flushallmcaches flushes the mcaches of all Ps.
-//
-// The world must be stopped.
-//
-//go:nowritebarrier
-func flushallmcaches() {
- assertWorldStopped()
-
- for i := 0; i < int(gomaxprocs); i++ {
- flushmcache(i)
- }
-}
-
-// sysMemStat represents a global system statistic that is managed atomically.
-//
-// This type must structurally be a uint64 so that mstats aligns with MemStats.
-type sysMemStat uint64
-
-// load atomically reads the value of the stat.
-//
-// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
-//go:nosplit
-func (s *sysMemStat) load() uint64 {
- return atomic.Load64((*uint64)(s))
-}
-
-// add atomically adds the sysMemStat by n.
-//
-// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
-//go:nosplit
-func (s *sysMemStat) add(n int64) {
- if s == nil {
- return
- }
- val := atomic.Xadd64((*uint64)(s), n)
- if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) {
- print("runtime: val=", val, " n=", n, "\n")
- throw("sysMemStat overflow")
- }
-}
-
-// heapStatsDelta contains deltas of various runtime memory statistics
-// that need to be updated together in order for them to be kept
-// consistent with one another.
-type heapStatsDelta struct {
- // Memory stats.
- committed int64 // byte delta of memory committed
- released int64 // byte delta of released memory generated
- inHeap int64 // byte delta of memory placed in the heap
- inStacks int64 // byte delta of memory reserved for stacks
- inWorkBufs int64 // byte delta of memory reserved for work bufs
- inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
-
- // Allocator stats.
- //
- // These are all uint64 because they're cumulative, and could quickly wrap
- // around otherwise.
- tinyAllocCount uint64 // number of tiny allocations
- largeAlloc uint64 // bytes allocated for large objects
- largeAllocCount uint64 // number of large object allocations
- smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
- largeFree uint64 // bytes freed for large objects (>maxSmallSize)
- largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
- smallFreeCount [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
-
- // NOTE: This struct must be a multiple of 8 bytes in size because it
- // is stored in an array. If it's not, atomic accesses to the above
- // fields may be unaligned and fail on 32-bit platforms.
-}
-
-// merge adds in the deltas from b into a.
-func (a *heapStatsDelta) merge(b *heapStatsDelta) {
- a.committed += b.committed
- a.released += b.released
- a.inHeap += b.inHeap
- a.inStacks += b.inStacks
- a.inWorkBufs += b.inWorkBufs
- a.inPtrScalarBits += b.inPtrScalarBits
-
- a.tinyAllocCount += b.tinyAllocCount
- a.largeAlloc += b.largeAlloc
- a.largeAllocCount += b.largeAllocCount
- for i := range b.smallAllocCount {
- a.smallAllocCount[i] += b.smallAllocCount[i]
- }
- a.largeFree += b.largeFree
- a.largeFreeCount += b.largeFreeCount
- for i := range b.smallFreeCount {
- a.smallFreeCount[i] += b.smallFreeCount[i]
- }
-}
-
-// consistentHeapStats represents a set of various memory statistics
-// whose updates must be viewed completely to get a consistent
-// state of the world.
-//
-// To write updates to memory stats use the acquire and release
-// methods. To obtain a consistent global snapshot of these statistics,
-// use read.
-type consistentHeapStats struct {
- // stats is a ring buffer of heapStatsDelta values.
- // Writers always atomically update the delta at index gen.
- //
- // Readers operate by rotating gen (0 -> 1 -> 2 -> 0 -> ...)
- // and synchronizing with writers by observing each P's
- // statsSeq field. If the reader observes a P not writing,
- // it can be sure that it will pick up the new gen value the
- // next time it writes.
- //
- // The reader then takes responsibility by clearing space
- // in the ring buffer for the next reader to rotate gen to
- // that space (i.e. it merges in values from index (gen-2) mod 3
- // to index (gen-1) mod 3, then clears the former).
- //
- // Note that this means only one reader can be reading at a time.
- // There is no way for readers to synchronize.
- //
- // This process is why we need a ring buffer of size 3 instead
- // of 2: one is for the writers, one contains the most recent
- // data, and the last one is clear so writers can begin writing
- // to it the moment gen is updated.
- stats [3]heapStatsDelta
-
- // gen represents the current index into which writers
- // are writing, and can take on the value of 0, 1, or 2.
- // This value is updated atomically.
- gen uint32
-
- // noPLock is intended to provide mutual exclusion for updating
- // stats when no P is available. It does not block other writers
- // with a P, only other writers without a P and the reader. Because
- // stats are usually updated when a P is available, contention on
- // this lock should be minimal.
- noPLock mutex
-}
-
-// acquire returns a heapStatsDelta to be updated. In effect,
-// it acquires the shard for writing. release must be called
-// as soon as the relevant deltas are updated.
-//
-// The returned heapStatsDelta must be updated atomically.
-//
-// The caller's P must not change between acquire and
-// release. This also means that the caller should not
-// acquire a P or release its P in between. A P also must
-// not acquire a given consistentHeapStats if it hasn't
-// yet released it.
-//
-// nosplit because a stack growth in this function could
-// lead to a stack allocation that could reenter the
-// function.
-//
-//go:nosplit
-func (m *consistentHeapStats) acquire() *heapStatsDelta {
- if pp := getg().m.p.ptr(); pp != nil {
- seq := atomic.Xadd(&pp.statsSeq, 1)
- if seq%2 == 0 {
- // Should have been incremented to odd.
- print("runtime: seq=", seq, "\n")
- throw("bad sequence number")
- }
- } else {
- lock(&m.noPLock)
- }
- gen := atomic.Load(&m.gen) % 3
- return &m.stats[gen]
-}
-
-// release indicates that the writer is done modifying
-// the delta. The value returned by the corresponding
-// acquire must no longer be accessed or modified after
-// release is called.
-//
-// The caller's P must not change between acquire and
-// release. This also means that the caller should not
-// acquire a P or release its P in between.
-//
-// nosplit because a stack growth in this function could
-// lead to a stack allocation that causes another acquire
-// before this operation has completed.
-//
-//go:nosplit
-func (m *consistentHeapStats) release() {
- if pp := getg().m.p.ptr(); pp != nil {
- seq := atomic.Xadd(&pp.statsSeq, 1)
- if seq%2 != 0 {
- // Should have been incremented to even.
- print("runtime: seq=", seq, "\n")
- throw("bad sequence number")
- }
- } else {
- unlock(&m.noPLock)
- }
-}
-
-// unsafeRead aggregates the delta for this shard into out.
-//
-// Unsafe because it does so without any synchronization. The
-// world must be stopped.
-func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta) {
- assertWorldStopped()
-
- for i := range m.stats {
- out.merge(&m.stats[i])
- }
-}
-
-// unsafeClear clears the shard.
-//
-// Unsafe because the world must be stopped and values should
-// be donated elsewhere before clearing.
-func (m *consistentHeapStats) unsafeClear() {
- assertWorldStopped()
-
- for i := range m.stats {
- m.stats[i] = heapStatsDelta{}
- }
-}
-
-// read takes a globally consistent snapshot of m
-// and puts the aggregated value in out. Even though out is a
-// heapStatsDelta, the resulting values should be complete and
-// valid statistic values.
-//
-// Not safe to call concurrently. The world must be stopped
-// or metricsSema must be held.
-func (m *consistentHeapStats) read(out *heapStatsDelta) {
- // Getting preempted after this point is not safe because
- // we read allp. We need to make sure a STW can't happen
- // so it doesn't change out from under us.
- mp := acquirem()
-
- // Get the current generation. We can be confident that this
- // will not change since read is serialized and is the only
- // one that modifies currGen.
- currGen := atomic.Load(&m.gen)
- prevGen := currGen - 1
- if currGen == 0 {
- prevGen = 2
- }
-
- // Prevent writers without a P from writing while we update gen.
- lock(&m.noPLock)
-
- // Rotate gen, effectively taking a snapshot of the state of
- // these statistics at the point of the exchange by moving
- // writers to the next set of deltas.
- //
- // This exchange is safe to do because we won't race
- // with anyone else trying to update this value.
- atomic.Xchg(&m.gen, (currGen+1)%3)
-
- // Allow P-less writers to continue. They'll be writing to the
- // next generation now.
- unlock(&m.noPLock)
-
- for _, p := range allp {
- // Spin until there are no more writers.
- for atomic.Load(&p.statsSeq)%2 != 0 {
- }
- }
-
- // At this point we've observed that each sequence
- // number is even, so any future writers will observe
- // the new gen value. That means it's safe to read from
- // the other deltas in the stats buffer.
-
- // Perform our responsibilities and free up
- // stats[prevGen] for the next time we want to take
- // a snapshot.
- m.stats[currGen].merge(&m.stats[prevGen])
- m.stats[prevGen] = heapStatsDelta{}
-
- // Finally, copy out the complete delta.
- *out = m.stats[currGen]
-
- releasem(mp)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/mwbbuf.go b/contrib/go/_std_1.18/src/runtime/mwbbuf.go
deleted file mode 100644
index 78d9382620..0000000000
--- a/contrib/go/_std_1.18/src/runtime/mwbbuf.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This implements the write barrier buffer. The write barrier itself
-// is gcWriteBarrier and is implemented in assembly.
-//
-// See mbarrier.go for algorithmic details on the write barrier. This
-// file deals only with the buffer.
-//
-// The write barrier has a fast path and a slow path. The fast path
-// simply enqueues to a per-P write barrier buffer. It's written in
-// assembly and doesn't clobber any general purpose registers, so it
-// doesn't have the usual overheads of a Go call.
-//
-// When the buffer fills up, the write barrier invokes the slow path
-// (wbBufFlush) to flush the buffer to the GC work queues. In this
-// path, since the compiler didn't spill registers, we spill *all*
-// registers and disallow any GC safe points that could observe the
-// stack frame (since we don't know the types of the spilled
-// registers).
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// testSmallBuf forces a small write barrier buffer to stress write
-// barrier flushing.
-const testSmallBuf = false
-
-// wbBuf is a per-P buffer of pointers queued by the write barrier.
-// This buffer is flushed to the GC workbufs when it fills up and on
-// various GC transitions.
-//
-// This is closely related to a "sequential store buffer" (SSB),
-// except that SSBs are usually used for maintaining remembered sets,
-// while this is used for marking.
-type wbBuf struct {
- // next points to the next slot in buf. It must not be a
- // pointer type because it can point past the end of buf and
- // must be updated without write barriers.
- //
- // This is a pointer rather than an index to optimize the
- // write barrier assembly.
- next uintptr
-
- // end points to just past the end of buf. It must not be a
- // pointer type because it points past the end of buf and must
- // be updated without write barriers.
- end uintptr
-
- // buf stores a series of pointers to execute write barriers
- // on. This must be a multiple of wbBufEntryPointers because
- // the write barrier only checks for overflow once per entry.
- buf [wbBufEntryPointers * wbBufEntries]uintptr
-}
-
-const (
- // wbBufEntries is the number of write barriers between
- // flushes of the write barrier buffer.
- //
- // This trades latency for throughput amortization. Higher
- // values amortize flushing overhead more, but increase the
- // latency of flushing. Higher values also increase the cache
- // footprint of the buffer.
- //
- // TODO: What is the latency cost of this? Tune this value.
- wbBufEntries = 256
-
- // wbBufEntryPointers is the number of pointers added to the
- // buffer by each write barrier.
- wbBufEntryPointers = 2
-)
-
-// reset empties b by resetting its next and end pointers.
-func (b *wbBuf) reset() {
- start := uintptr(unsafe.Pointer(&b.buf[0]))
- b.next = start
- if writeBarrier.cgo {
- // Effectively disable the buffer by forcing a flush
- // on every barrier.
- b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
- } else if testSmallBuf {
- // For testing, allow two barriers in the buffer. If
- // we only did one, then barriers of non-heap pointers
- // would be no-ops. This lets us combine a buffered
- // barrier with a flush at a later time.
- b.end = uintptr(unsafe.Pointer(&b.buf[2*wbBufEntryPointers]))
- } else {
- b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
- }
-
- if (b.end-b.next)%(wbBufEntryPointers*unsafe.Sizeof(b.buf[0])) != 0 {
- throw("bad write barrier buffer bounds")
- }
-}
-
-// discard resets b's next pointer, but not its end pointer.
-//
-// This must be nosplit because it's called by wbBufFlush.
-//
-//go:nosplit
-func (b *wbBuf) discard() {
- b.next = uintptr(unsafe.Pointer(&b.buf[0]))
-}
-
-// empty reports whether b contains no pointers.
-func (b *wbBuf) empty() bool {
- return b.next == uintptr(unsafe.Pointer(&b.buf[0]))
-}
-
-// putFast adds old and new to the write barrier buffer and returns
-// false if a flush is necessary. Callers should use this as:
-//
-// buf := &getg().m.p.ptr().wbBuf
-// if !buf.putFast(old, new) {
-// wbBufFlush(...)
-// }
-// ... actual memory write ...
-//
-// The arguments to wbBufFlush depend on whether the caller is doing
-// its own cgo pointer checks. If it is, then this can be
-// wbBufFlush(nil, 0). Otherwise, it must pass the slot address and
-// new.
-//
-// The caller must ensure there are no preemption points during the
-// above sequence. There must be no preemption points while buf is in
-// use because it is a per-P resource. There must be no preemption
-// points between the buffer put and the write to memory because this
-// could allow a GC phase change, which could result in missed write
-// barriers.
-//
-// putFast must be nowritebarrierrec to because write barriers here would
-// corrupt the write barrier buffer. It (and everything it calls, if
-// it called anything) has to be nosplit to avoid scheduling on to a
-// different P and a different buffer.
-//
-//go:nowritebarrierrec
-//go:nosplit
-func (b *wbBuf) putFast(old, new uintptr) bool {
- p := (*[2]uintptr)(unsafe.Pointer(b.next))
- p[0] = old
- p[1] = new
- b.next += 2 * goarch.PtrSize
- return b.next != b.end
-}
-
-// wbBufFlush flushes the current P's write barrier buffer to the GC
-// workbufs. It is passed the slot and value of the write barrier that
-// caused the flush so that it can implement cgocheck.
-//
-// This must not have write barriers because it is part of the write
-// barrier implementation.
-//
-// This and everything it calls must be nosplit because 1) the stack
-// contains untyped slots from gcWriteBarrier and 2) there must not be
-// a GC safe point between the write barrier test in the caller and
-// flushing the buffer.
-//
-// TODO: A "go:nosplitrec" annotation would be perfect for this.
-//
-//go:nowritebarrierrec
-//go:nosplit
-func wbBufFlush(dst *uintptr, src uintptr) {
- // Note: Every possible return from this function must reset
- // the buffer's next pointer to prevent buffer overflow.
-
- // This *must not* modify its arguments because this
- // function's argument slots do double duty in gcWriteBarrier
- // as register spill slots. Currently, not modifying the
- // arguments is sufficient to keep the spill slots unmodified
- // (which seems unlikely to change since it costs little and
- // helps with debugging).
-
- if getg().m.dying > 0 {
- // We're going down. Not much point in write barriers
- // and this way we can allow write barriers in the
- // panic path.
- getg().m.p.ptr().wbBuf.discard()
- return
- }
-
- if writeBarrier.cgo && dst != nil {
- // This must be called from the stack that did the
- // write. It's nosplit all the way down.
- cgoCheckWriteBarrier(dst, src)
- if !writeBarrier.needed {
- // We were only called for cgocheck.
- getg().m.p.ptr().wbBuf.discard()
- return
- }
- }
-
- // Switch to the system stack so we don't have to worry about
- // the untyped stack slots or safe points.
- systemstack(func() {
- wbBufFlush1(getg().m.p.ptr())
- })
-}
-
-// wbBufFlush1 flushes p's write barrier buffer to the GC work queue.
-//
-// This must not have write barriers because it is part of the write
-// barrier implementation, so this may lead to infinite loops or
-// buffer corruption.
-//
-// This must be non-preemptible because it uses the P's workbuf.
-//
-//go:nowritebarrierrec
-//go:systemstack
-func wbBufFlush1(_p_ *p) {
- // Get the buffered pointers.
- start := uintptr(unsafe.Pointer(&_p_.wbBuf.buf[0]))
- n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0])
- ptrs := _p_.wbBuf.buf[:n]
-
- // Poison the buffer to make extra sure nothing is enqueued
- // while we're processing the buffer.
- _p_.wbBuf.next = 0
-
- if useCheckmark {
- // Slow path for checkmark mode.
- for _, ptr := range ptrs {
- shade(ptr)
- }
- _p_.wbBuf.reset()
- return
- }
-
- // Mark all of the pointers in the buffer and record only the
- // pointers we greyed. We use the buffer itself to temporarily
- // record greyed pointers.
- //
- // TODO: Should scanobject/scanblock just stuff pointers into
- // the wbBuf? Then this would become the sole greying path.
- //
- // TODO: We could avoid shading any of the "new" pointers in
- // the buffer if the stack has been shaded, or even avoid
- // putting them in the buffer at all (which would double its
- // capacity). This is slightly complicated with the buffer; we
- // could track whether any un-shaded goroutine has used the
- // buffer, or just track globally whether there are any
- // un-shaded stacks and flush after each stack scan.
- gcw := &_p_.gcw
- pos := 0
- for _, ptr := range ptrs {
- if ptr < minLegalPointer {
- // nil pointers are very common, especially
- // for the "old" values. Filter out these and
- // other "obvious" non-heap pointers ASAP.
- //
- // TODO: Should we filter out nils in the fast
- // path to reduce the rate of flushes?
- continue
- }
- obj, span, objIndex := findObject(ptr, 0, 0)
- if obj == 0 {
- continue
- }
- // TODO: Consider making two passes where the first
- // just prefetches the mark bits.
- mbits := span.markBitsForIndex(objIndex)
- if mbits.isMarked() {
- continue
- }
- mbits.setMarked()
-
- // Mark span.
- arena, pageIdx, pageMask := pageIndexOf(span.base())
- if arena.pageMarks[pageIdx]&pageMask == 0 {
- atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
- }
-
- if span.spanclass.noscan() {
- gcw.bytesMarked += uint64(span.elemsize)
- continue
- }
- ptrs[pos] = obj
- pos++
- }
-
- // Enqueue the greyed objects.
- gcw.putBatch(ptrs[:pos])
-
- _p_.wbBuf.reset()
-}
diff --git a/contrib/go/_std_1.18/src/runtime/nbpipe_pipe2.go b/contrib/go/_std_1.18/src/runtime/nbpipe_pipe2.go
deleted file mode 100644
index 6a555bcd99..0000000000
--- a/contrib/go/_std_1.18/src/runtime/nbpipe_pipe2.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package runtime
-
-func nonblockingPipe() (r, w int32, errno int32) {
- r, w, errno = pipe2(_O_NONBLOCK | _O_CLOEXEC)
- if errno == -_ENOSYS {
- r, w, errno = pipe()
- if errno != 0 {
- return -1, -1, errno
- }
- closeonexec(r)
- setNonblock(r)
- closeonexec(w)
- setNonblock(w)
- }
- return r, w, errno
-}
diff --git a/contrib/go/_std_1.18/src/runtime/netpoll.go b/contrib/go/_std_1.18/src/runtime/netpoll.go
deleted file mode 100644
index bb3dd35317..0000000000
--- a/contrib/go/_std_1.18/src/runtime/netpoll.go
+++ /dev/null
@@ -1,652 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || windows
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// Integrated network poller (platform-independent part).
-// A particular implementation (epoll/kqueue/port/AIX/Windows)
-// must define the following functions:
-//
-// func netpollinit()
-// Initialize the poller. Only called once.
-//
-// func netpollopen(fd uintptr, pd *pollDesc) int32
-// Arm edge-triggered notifications for fd. The pd argument is to pass
-// back to netpollready when fd is ready. Return an errno value.
-//
-// func netpollclose(fd uintptr) int32
-// Disable notifications for fd. Return an errno value.
-//
-// func netpoll(delta int64) gList
-// Poll the network. If delta < 0, block indefinitely. If delta == 0,
-// poll without blocking. If delta > 0, block for up to delta nanoseconds.
-// Return a list of goroutines built by calling netpollready.
-//
-// func netpollBreak()
-// Wake up the network poller, assumed to be blocked in netpoll.
-//
-// func netpollIsPollDescriptor(fd uintptr) bool
-// Reports whether fd is a file descriptor used by the poller.
-
-// Error codes returned by runtime_pollReset and runtime_pollWait.
-// These must match the values in internal/poll/fd_poll_runtime.go.
-const (
- pollNoError = 0 // no error
- pollErrClosing = 1 // descriptor is closed
- pollErrTimeout = 2 // I/O timeout
- pollErrNotPollable = 3 // general error polling descriptor
-)
-
-// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
-// goroutines respectively. The semaphore can be in the following states:
-// pdReady - io readiness notification is pending;
-// a goroutine consumes the notification by changing the state to nil.
-// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
-// the goroutine commits to park by changing the state to G pointer,
-// or, alternatively, concurrent io notification changes the state to pdReady,
-// or, alternatively, concurrent timeout/close changes the state to nil.
-// G pointer - the goroutine is blocked on the semaphore;
-// io notification or timeout/close changes the state to pdReady or nil respectively
-// and unparks the goroutine.
-// nil - none of the above.
-const (
- pdReady uintptr = 1
- pdWait uintptr = 2
-)
-
-const pollBlockSize = 4 * 1024
-
-// Network poller descriptor.
-//
-// No heap pointers.
-//
-//go:notinheap
-type pollDesc struct {
- link *pollDesc // in pollcache, protected by pollcache.lock
- fd uintptr // constant for pollDesc usage lifetime
-
- // atomicInfo holds bits from closing, rd, and wd,
- // which are only ever written while holding the lock,
- // summarized for use by netpollcheckerr,
- // which cannot acquire the lock.
- // After writing these fields under lock in a way that
- // might change the summary, code must call publishInfo
- // before releasing the lock.
- // Code that changes fields and then calls netpollunblock
- // (while still holding the lock) must call publishInfo
- // before calling netpollunblock, because publishInfo is what
- // stops netpollblock from blocking anew
- // (by changing the result of netpollcheckerr).
- // atomicInfo also holds the eventErr bit,
- // recording whether a poll event on the fd got an error;
- // atomicInfo is the only source of truth for that bit.
- atomicInfo atomic.Uint32 // atomic pollInfo
-
- // rg, wg are accessed atomically and hold g pointers.
- // (Using atomic.Uintptr here is similar to using guintptr elsewhere.)
- rg atomic.Uintptr // pdReady, pdWait, G waiting for read or nil
- wg atomic.Uintptr // pdReady, pdWait, G waiting for write or nil
-
- lock mutex // protects the following fields
- closing bool
- user uint32 // user settable cookie
- rseq uintptr // protects from stale read timers
- rt timer // read deadline timer (set if rt.f != nil)
- rd int64 // read deadline (a nanotime in the future, -1 when expired)
- wseq uintptr // protects from stale write timers
- wt timer // write deadline timer
- wd int64 // write deadline (a nanotime in the future, -1 when expired)
- self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
-}
-
-// pollInfo is the bits needed by netpollcheckerr, stored atomically,
-// mostly duplicating state that is manipulated under lock in pollDesc.
-// The one exception is the pollEventErr bit, which is maintained only
-// in the pollInfo.
-type pollInfo uint32
-
-const (
- pollClosing = 1 << iota
- pollEventErr
- pollExpiredReadDeadline
- pollExpiredWriteDeadline
-)
-
-func (i pollInfo) closing() bool { return i&pollClosing != 0 }
-func (i pollInfo) eventErr() bool { return i&pollEventErr != 0 }
-func (i pollInfo) expiredReadDeadline() bool { return i&pollExpiredReadDeadline != 0 }
-func (i pollInfo) expiredWriteDeadline() bool { return i&pollExpiredWriteDeadline != 0 }
-
-// info returns the pollInfo corresponding to pd.
-func (pd *pollDesc) info() pollInfo {
- return pollInfo(pd.atomicInfo.Load())
-}
-
-// publishInfo updates pd.atomicInfo (returned by pd.info)
-// using the other values in pd.
-// It must be called while holding pd.lock,
-// and it must be called after changing anything
-// that might affect the info bits.
-// In practice this means after changing closing
-// or changing rd or wd from < 0 to >= 0.
-func (pd *pollDesc) publishInfo() {
- var info uint32
- if pd.closing {
- info |= pollClosing
- }
- if pd.rd < 0 {
- info |= pollExpiredReadDeadline
- }
- if pd.wd < 0 {
- info |= pollExpiredWriteDeadline
- }
-
- // Set all of x except the pollEventErr bit.
- x := pd.atomicInfo.Load()
- for !pd.atomicInfo.CompareAndSwap(x, (x&pollEventErr)|info) {
- x = pd.atomicInfo.Load()
- }
-}
-
-// setEventErr sets the result of pd.info().eventErr() to b.
-func (pd *pollDesc) setEventErr(b bool) {
- x := pd.atomicInfo.Load()
- for (x&pollEventErr != 0) != b && !pd.atomicInfo.CompareAndSwap(x, x^pollEventErr) {
- x = pd.atomicInfo.Load()
- }
-}
-
-type pollCache struct {
- lock mutex
- first *pollDesc
- // PollDesc objects must be type-stable,
- // because we can get ready notification from epoll/kqueue
- // after the descriptor is closed/reused.
- // Stale notifications are detected using seq variable,
- // seq is incremented when deadlines are changed or descriptor is reused.
-}
-
-var (
- netpollInitLock mutex
- netpollInited uint32
-
- pollcache pollCache
- netpollWaiters uint32
-)
-
-//go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
-func poll_runtime_pollServerInit() {
- netpollGenericInit()
-}
-
-func netpollGenericInit() {
- if atomic.Load(&netpollInited) == 0 {
- lockInit(&netpollInitLock, lockRankNetpollInit)
- lock(&netpollInitLock)
- if netpollInited == 0 {
- netpollinit()
- atomic.Store(&netpollInited, 1)
- }
- unlock(&netpollInitLock)
- }
-}
-
-func netpollinited() bool {
- return atomic.Load(&netpollInited) != 0
-}
-
-//go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor
-
-// poll_runtime_isPollServerDescriptor reports whether fd is a
-// descriptor being used by netpoll.
-func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
- return netpollIsPollDescriptor(fd)
-}
-
-//go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
-func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
- pd := pollcache.alloc()
- lock(&pd.lock)
- wg := pd.wg.Load()
- if wg != 0 && wg != pdReady {
- throw("runtime: blocked write on free polldesc")
- }
- rg := pd.rg.Load()
- if rg != 0 && rg != pdReady {
- throw("runtime: blocked read on free polldesc")
- }
- pd.fd = fd
- pd.closing = false
- pd.setEventErr(false)
- pd.rseq++
- pd.rg.Store(0)
- pd.rd = 0
- pd.wseq++
- pd.wg.Store(0)
- pd.wd = 0
- pd.self = pd
- pd.publishInfo()
- unlock(&pd.lock)
-
- errno := netpollopen(fd, pd)
- if errno != 0 {
- pollcache.free(pd)
- return nil, int(errno)
- }
- return pd, 0
-}
-
-//go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose
-func poll_runtime_pollClose(pd *pollDesc) {
- if !pd.closing {
- throw("runtime: close polldesc w/o unblock")
- }
- wg := pd.wg.Load()
- if wg != 0 && wg != pdReady {
- throw("runtime: blocked write on closing polldesc")
- }
- rg := pd.rg.Load()
- if rg != 0 && rg != pdReady {
- throw("runtime: blocked read on closing polldesc")
- }
- netpollclose(pd.fd)
- pollcache.free(pd)
-}
-
-func (c *pollCache) free(pd *pollDesc) {
- lock(&c.lock)
- pd.link = c.first
- c.first = pd
- unlock(&c.lock)
-}
-
-// poll_runtime_pollReset, which is internal/poll.runtime_pollReset,
-// prepares a descriptor for polling in mode, which is 'r' or 'w'.
-// This returns an error code; the codes are defined above.
-//go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
-func poll_runtime_pollReset(pd *pollDesc, mode int) int {
- errcode := netpollcheckerr(pd, int32(mode))
- if errcode != pollNoError {
- return errcode
- }
- if mode == 'r' {
- pd.rg.Store(0)
- } else if mode == 'w' {
- pd.wg.Store(0)
- }
- return pollNoError
-}
-
-// poll_runtime_pollWait, which is internal/poll.runtime_pollWait,
-// waits for a descriptor to be ready for reading or writing,
-// according to mode, which is 'r' or 'w'.
-// This returns an error code; the codes are defined above.
-//go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
-func poll_runtime_pollWait(pd *pollDesc, mode int) int {
- errcode := netpollcheckerr(pd, int32(mode))
- if errcode != pollNoError {
- return errcode
- }
- // As for now only Solaris, illumos, and AIX use level-triggered IO.
- if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" {
- netpollarm(pd, mode)
- }
- for !netpollblock(pd, int32(mode), false) {
- errcode = netpollcheckerr(pd, int32(mode))
- if errcode != pollNoError {
- return errcode
- }
- // Can happen if timeout has fired and unblocked us,
- // but before we had a chance to run, timeout has been reset.
- // Pretend it has not happened and retry.
- }
- return pollNoError
-}
-
-//go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled
-func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
- // This function is used only on windows after a failed attempt to cancel
- // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
- for !netpollblock(pd, int32(mode), true) {
- }
-}
-
-//go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline
-func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
- lock(&pd.lock)
- if pd.closing {
- unlock(&pd.lock)
- return
- }
- rd0, wd0 := pd.rd, pd.wd
- combo0 := rd0 > 0 && rd0 == wd0
- if d > 0 {
- d += nanotime()
- if d <= 0 {
- // If the user has a deadline in the future, but the delay calculation
- // overflows, then set the deadline to the maximum possible value.
- d = 1<<63 - 1
- }
- }
- if mode == 'r' || mode == 'r'+'w' {
- pd.rd = d
- }
- if mode == 'w' || mode == 'r'+'w' {
- pd.wd = d
- }
- pd.publishInfo()
- combo := pd.rd > 0 && pd.rd == pd.wd
- rtf := netpollReadDeadline
- if combo {
- rtf = netpollDeadline
- }
- if pd.rt.f == nil {
- if pd.rd > 0 {
- pd.rt.f = rtf
- // Copy current seq into the timer arg.
- // Timer func will check the seq against current descriptor seq,
- // if they differ the descriptor was reused or timers were reset.
- pd.rt.arg = pd.makeArg()
- pd.rt.seq = pd.rseq
- resettimer(&pd.rt, pd.rd)
- }
- } else if pd.rd != rd0 || combo != combo0 {
- pd.rseq++ // invalidate current timers
- if pd.rd > 0 {
- modtimer(&pd.rt, pd.rd, 0, rtf, pd.makeArg(), pd.rseq)
- } else {
- deltimer(&pd.rt)
- pd.rt.f = nil
- }
- }
- if pd.wt.f == nil {
- if pd.wd > 0 && !combo {
- pd.wt.f = netpollWriteDeadline
- pd.wt.arg = pd.makeArg()
- pd.wt.seq = pd.wseq
- resettimer(&pd.wt, pd.wd)
- }
- } else if pd.wd != wd0 || combo != combo0 {
- pd.wseq++ // invalidate current timers
- if pd.wd > 0 && !combo {
- modtimer(&pd.wt, pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq)
- } else {
- deltimer(&pd.wt)
- pd.wt.f = nil
- }
- }
- // If we set the new deadline in the past, unblock currently pending IO if any.
- // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
- var rg, wg *g
- if pd.rd < 0 {
- rg = netpollunblock(pd, 'r', false)
- }
- if pd.wd < 0 {
- wg = netpollunblock(pd, 'w', false)
- }
- unlock(&pd.lock)
- if rg != nil {
- netpollgoready(rg, 3)
- }
- if wg != nil {
- netpollgoready(wg, 3)
- }
-}
-
-//go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
-func poll_runtime_pollUnblock(pd *pollDesc) {
- lock(&pd.lock)
- if pd.closing {
- throw("runtime: unblock on closing polldesc")
- }
- pd.closing = true
- pd.rseq++
- pd.wseq++
- var rg, wg *g
- pd.publishInfo()
- rg = netpollunblock(pd, 'r', false)
- wg = netpollunblock(pd, 'w', false)
- if pd.rt.f != nil {
- deltimer(&pd.rt)
- pd.rt.f = nil
- }
- if pd.wt.f != nil {
- deltimer(&pd.wt)
- pd.wt.f = nil
- }
- unlock(&pd.lock)
- if rg != nil {
- netpollgoready(rg, 3)
- }
- if wg != nil {
- netpollgoready(wg, 3)
- }
-}
-
-// netpollready is called by the platform-specific netpoll function.
-// It declares that the fd associated with pd is ready for I/O.
-// The toRun argument is used to build a list of goroutines to return
-// from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
-// whether the fd is ready for reading or writing or both.
-//
-// This may run while the world is stopped, so write barriers are not allowed.
-//go:nowritebarrier
-func netpollready(toRun *gList, pd *pollDesc, mode int32) {
- var rg, wg *g
- if mode == 'r' || mode == 'r'+'w' {
- rg = netpollunblock(pd, 'r', true)
- }
- if mode == 'w' || mode == 'r'+'w' {
- wg = netpollunblock(pd, 'w', true)
- }
- if rg != nil {
- toRun.push(rg)
- }
- if wg != nil {
- toRun.push(wg)
- }
-}
-
-func netpollcheckerr(pd *pollDesc, mode int32) int {
- info := pd.info()
- if info.closing() {
- return pollErrClosing
- }
- if (mode == 'r' && info.expiredReadDeadline()) || (mode == 'w' && info.expiredWriteDeadline()) {
- return pollErrTimeout
- }
- // Report an event scanning error only on a read event.
- // An error on a write event will be captured in a subsequent
- // write call that is able to report a more specific error.
- if mode == 'r' && info.eventErr() {
- return pollErrNotPollable
- }
- return pollNoError
-}
-
-func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
- r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
- if r {
- // Bump the count of goroutines waiting for the poller.
- // The scheduler uses this to decide whether to block
- // waiting for the poller if there is nothing else to do.
- atomic.Xadd(&netpollWaiters, 1)
- }
- return r
-}
-
-func netpollgoready(gp *g, traceskip int) {
- atomic.Xadd(&netpollWaiters, -1)
- goready(gp, traceskip+1)
-}
-
-// returns true if IO is ready, or false if timedout or closed
-// waitio - wait only for completed IO, ignore errors
-// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
-// can hold only a single waiting goroutine for each mode.
-func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
- gpp := &pd.rg
- if mode == 'w' {
- gpp = &pd.wg
- }
-
- // set the gpp semaphore to pdWait
- for {
- // Consume notification if already ready.
- if gpp.CompareAndSwap(pdReady, 0) {
- return true
- }
- if gpp.CompareAndSwap(0, pdWait) {
- break
- }
-
- // Double check that this isn't corrupt; otherwise we'd loop
- // forever.
- if v := gpp.Load(); v != pdReady && v != 0 {
- throw("runtime: double wait")
- }
- }
-
- // need to recheck error states after setting gpp to pdWait
- // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
- // do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg
- if waitio || netpollcheckerr(pd, mode) == pollNoError {
- gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceEvGoBlockNet, 5)
- }
- // be careful to not lose concurrent pdReady notification
- old := gpp.Swap(0)
- if old > pdWait {
- throw("runtime: corrupted polldesc")
- }
- return old == pdReady
-}
-
-func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
- gpp := &pd.rg
- if mode == 'w' {
- gpp = &pd.wg
- }
-
- for {
- old := gpp.Load()
- if old == pdReady {
- return nil
- }
- if old == 0 && !ioready {
- // Only set pdReady for ioready. runtime_pollWait
- // will check for timeout/cancel before waiting.
- return nil
- }
- var new uintptr
- if ioready {
- new = pdReady
- }
- if gpp.CompareAndSwap(old, new) {
- if old == pdWait {
- old = 0
- }
- return (*g)(unsafe.Pointer(old))
- }
- }
-}
-
-func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
- lock(&pd.lock)
- // Seq arg is seq when the timer was set.
- // If it's stale, ignore the timer event.
- currentSeq := pd.rseq
- if !read {
- currentSeq = pd.wseq
- }
- if seq != currentSeq {
- // The descriptor was reused or timers were reset.
- unlock(&pd.lock)
- return
- }
- var rg *g
- if read {
- if pd.rd <= 0 || pd.rt.f == nil {
- throw("runtime: inconsistent read deadline")
- }
- pd.rd = -1
- pd.publishInfo()
- rg = netpollunblock(pd, 'r', false)
- }
- var wg *g
- if write {
- if pd.wd <= 0 || pd.wt.f == nil && !read {
- throw("runtime: inconsistent write deadline")
- }
- pd.wd = -1
- pd.publishInfo()
- wg = netpollunblock(pd, 'w', false)
- }
- unlock(&pd.lock)
- if rg != nil {
- netpollgoready(rg, 0)
- }
- if wg != nil {
- netpollgoready(wg, 0)
- }
-}
-
-func netpollDeadline(arg any, seq uintptr) {
- netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
-}
-
-func netpollReadDeadline(arg any, seq uintptr) {
- netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
-}
-
-func netpollWriteDeadline(arg any, seq uintptr) {
- netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
-}
-
-func (c *pollCache) alloc() *pollDesc {
- lock(&c.lock)
- if c.first == nil {
- const pdSize = unsafe.Sizeof(pollDesc{})
- n := pollBlockSize / pdSize
- if n == 0 {
- n = 1
- }
- // Must be in non-GC memory because can be referenced
- // only from epoll/kqueue internals.
- mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
- for i := uintptr(0); i < n; i++ {
- pd := (*pollDesc)(add(mem, i*pdSize))
- pd.link = c.first
- c.first = pd
- }
- }
- pd := c.first
- c.first = pd.link
- lockInit(&pd.lock, lockRankPollDesc)
- unlock(&c.lock)
- return pd
-}
-
-// makeArg converts pd to an interface{}.
-// makeArg does not do any allocation. Normally, such
-// a conversion requires an allocation because pointers to
-// go:notinheap types (which pollDesc is) must be stored
-// in interfaces indirectly. See issue 42076.
-func (pd *pollDesc) makeArg() (i any) {
- x := (*eface)(unsafe.Pointer(&i))
- x._type = pdType
- x.data = unsafe.Pointer(&pd.self)
- return
-}
-
-var (
- pdEface any = (*pollDesc)(nil)
- pdType *_type = efaceOf(&pdEface)._type
-)
diff --git a/contrib/go/_std_1.18/src/runtime/os_darwin.go b/contrib/go/_std_1.18/src/runtime/os_darwin.go
deleted file mode 100644
index 9065b76375..0000000000
--- a/contrib/go/_std_1.18/src/runtime/os_darwin.go
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-type mOS struct {
- initialized bool
- mutex pthreadmutex
- cond pthreadcond
- count int
-}
-
-func unimplemented(name string) {
- println(name, "not implemented")
- *(*int)(unsafe.Pointer(uintptr(1231))) = 1231
-}
-
-//go:nosplit
-func semacreate(mp *m) {
- if mp.initialized {
- return
- }
- mp.initialized = true
- if err := pthread_mutex_init(&mp.mutex, nil); err != 0 {
- throw("pthread_mutex_init")
- }
- if err := pthread_cond_init(&mp.cond, nil); err != 0 {
- throw("pthread_cond_init")
- }
-}
-
-//go:nosplit
-func semasleep(ns int64) int32 {
- var start int64
- if ns >= 0 {
- start = nanotime()
- }
- mp := getg().m
- pthread_mutex_lock(&mp.mutex)
- for {
- if mp.count > 0 {
- mp.count--
- pthread_mutex_unlock(&mp.mutex)
- return 0
- }
- if ns >= 0 {
- spent := nanotime() - start
- if spent >= ns {
- pthread_mutex_unlock(&mp.mutex)
- return -1
- }
- var t timespec
- t.setNsec(ns - spent)
- err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t)
- if err == _ETIMEDOUT {
- pthread_mutex_unlock(&mp.mutex)
- return -1
- }
- } else {
- pthread_cond_wait(&mp.cond, &mp.mutex)
- }
- }
-}
-
-//go:nosplit
-func semawakeup(mp *m) {
- pthread_mutex_lock(&mp.mutex)
- mp.count++
- if mp.count > 0 {
- pthread_cond_signal(&mp.cond)
- }
- pthread_mutex_unlock(&mp.mutex)
-}
-
-// The read and write file descriptors used by the sigNote functions.
-var sigNoteRead, sigNoteWrite int32
-
-// sigNoteSetup initializes an async-signal-safe note.
-//
-// The current implementation of notes on Darwin is not async-signal-safe,
-// because the functions pthread_mutex_lock, pthread_cond_signal, and
-// pthread_mutex_unlock, called by semawakeup, are not async-signal-safe.
-// There is only one case where we need to wake up a note from a signal
-// handler: the sigsend function. The signal handler code does not require
-// all the features of notes: it does not need to do a timed wait.
-// This is a separate implementation of notes, based on a pipe, that does
-// not support timed waits but is async-signal-safe.
-func sigNoteSetup(*note) {
- if sigNoteRead != 0 || sigNoteWrite != 0 {
- throw("duplicate sigNoteSetup")
- }
- var errno int32
- sigNoteRead, sigNoteWrite, errno = pipe()
- if errno != 0 {
- throw("pipe failed")
- }
- closeonexec(sigNoteRead)
- closeonexec(sigNoteWrite)
-
- // Make the write end of the pipe non-blocking, so that if the pipe
- // buffer is somehow full we will not block in the signal handler.
- // Leave the read end of the pipe blocking so that we will block
- // in sigNoteSleep.
- setNonblock(sigNoteWrite)
-}
-
-// sigNoteWakeup wakes up a thread sleeping on a note created by sigNoteSetup.
-func sigNoteWakeup(*note) {
- var b byte
- write(uintptr(sigNoteWrite), unsafe.Pointer(&b), 1)
-}
-
-// sigNoteSleep waits for a note created by sigNoteSetup to be woken.
-func sigNoteSleep(*note) {
- for {
- var b byte
- entersyscallblock()
- n := read(sigNoteRead, unsafe.Pointer(&b), 1)
- exitsyscall()
- if n != -_EINTR {
- return
- }
- }
-}
-
-// BSD interface for threading.
-func osinit() {
- // pthread_create delayed until end of goenvs so that we
- // can look at the environment first.
-
- ncpu = getncpu()
- physPageSize = getPageSize()
-}
-
-func sysctlbynameInt32(name []byte) (int32, int32) {
- out := int32(0)
- nout := unsafe.Sizeof(out)
- ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
- return ret, out
-}
-
-//go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname
-func internal_cpu_getsysctlbyname(name []byte) (int32, int32) {
- return sysctlbynameInt32(name)
-}
-
-const (
- _CTL_HW = 6
- _HW_NCPU = 3
- _HW_PAGESIZE = 7
-)
-
-func getncpu() int32 {
- // Use sysctl to fetch hw.ncpu.
- mib := [2]uint32{_CTL_HW, _HW_NCPU}
- out := uint32(0)
- nout := unsafe.Sizeof(out)
- ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
- if ret >= 0 && int32(out) > 0 {
- return int32(out)
- }
- return 1
-}
-
-func getPageSize() uintptr {
- // Use sysctl to fetch hw.pagesize.
- mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
- out := uint32(0)
- nout := unsafe.Sizeof(out)
- ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
- if ret >= 0 && int32(out) > 0 {
- return uintptr(out)
- }
- return 0
-}
-
-var urandom_dev = []byte("/dev/urandom\x00")
-
-//go:nosplit
-func getRandomData(r []byte) {
- fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
- n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
- closefd(fd)
- extendRandom(r, int(n))
-}
-
-func goenvs() {
- goenvs_unix()
-}
-
-// May run with m.p==nil, so write barriers are not allowed.
-//go:nowritebarrierrec
-func newosproc(mp *m) {
- stk := unsafe.Pointer(mp.g0.stack.hi)
- if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
- }
-
- // Initialize an attribute object.
- var attr pthreadattr
- var err int32
- err = pthread_attr_init(&attr)
- if err != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-
- // Find out OS stack size for our own stack guard.
- var stacksize uintptr
- if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
- mp.g0.stack.hi = stacksize // for mstart
-
- // Tell the pthread library we won't join with this thread.
- if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-
- // Finally, create the thread. It starts at mstart_stub, which does some low-level
- // setup and then calls mstart.
- var oset sigset
- sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- err = pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
- sigprocmask(_SIG_SETMASK, &oset, nil)
- if err != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-}
-
-// glue code to call mstart from pthread_create.
-func mstart_stub()
-
-// newosproc0 is a version of newosproc that can be called before the runtime
-// is initialized.
-//
-// This function is not safe to use after initialization as it does not pass an M as fnarg.
-//
-//go:nosplit
-func newosproc0(stacksize uintptr, fn uintptr) {
- // Initialize an attribute object.
- var attr pthreadattr
- var err int32
- err = pthread_attr_init(&attr)
- if err != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-
- // The caller passes in a suggested stack size,
- // from when we allocated the stack and thread ourselves,
- // without libpthread. Now that we're using libpthread,
- // we use the OS default stack size instead of the suggestion.
- // Find out that stack size for our own stack guard.
- if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
- g0.stack.hi = stacksize // for mstart
- memstats.stacks_sys.add(int64(stacksize))
-
- // Tell the pthread library we won't join with this thread.
- if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-
- // Finally, create the thread. It starts at mstart_stub, which does some low-level
- // setup and then calls mstart.
- var oset sigset
- sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- err = pthread_create(&attr, fn, nil)
- sigprocmask(_SIG_SETMASK, &oset, nil)
- if err != 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-}
-
-var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
-var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
-
-// Called to do synchronous initialization of Go code built with
-// -buildmode=c-archive or -buildmode=c-shared.
-// None of the Go runtime is initialized.
-//go:nosplit
-//go:nowritebarrierrec
-func libpreinit() {
- initsig(true)
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-func mpreinit(mp *m) {
- mp.gsignal = malg(32 * 1024) // OS X wants >= 8K
- mp.gsignal.m = mp
- if GOOS == "darwin" && GOARCH == "arm64" {
- // mlock the signal stack to work around a kernel bug where it may
- // SIGILL when the signal stack is not faulted in while a signal
- // arrives. See issue 42774.
- mlock(unsafe.Pointer(mp.gsignal.stack.hi-physPageSize), physPageSize)
- }
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, cannot allocate memory.
-func minit() {
- // iOS does not support alternate signal stack.
- // The signal handler handles it directly.
- if !(GOOS == "ios" && GOARCH == "arm64") {
- minitSignalStack()
- }
- minitSignalMask()
- getg().m.procid = uint64(pthread_self())
-}
-
-// Called from dropm to undo the effect of an minit.
-//go:nosplit
-func unminit() {
- // iOS does not support alternate signal stack.
- // See minit.
- if !(GOOS == "ios" && GOARCH == "arm64") {
- unminitSignals()
- }
-}
-
-// Called from exitm, but not from drop, to undo the effect of thread-owned
-// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
-func mdestroy(mp *m) {
-}
-
-//go:nosplit
-func osyield_no_g() {
- usleep_no_g(1)
-}
-
-//go:nosplit
-func osyield() {
- usleep(1)
-}
-
-const (
- _NSIG = 32
- _SI_USER = 0 /* empirically true, but not what headers say */
- _SIG_BLOCK = 1
- _SIG_UNBLOCK = 2
- _SIG_SETMASK = 3
- _SS_DISABLE = 4
-)
-
-//extern SigTabTT runtime·sigtab[];
-
-type sigset uint32
-
-var sigset_all = ^sigset(0)
-
-//go:nosplit
-//go:nowritebarrierrec
-func setsig(i uint32, fn uintptr) {
- var sa usigactiont
- sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
- sa.sa_mask = ^uint32(0)
- if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
- if iscgo {
- fn = abi.FuncPCABI0(cgoSigtramp)
- } else {
- fn = abi.FuncPCABI0(sigtramp)
- }
- }
- *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn
- sigaction(i, &sa, nil)
-}
-
-// sigtramp is the callback from libc when a signal is received.
-// It is called with the C calling convention.
-func sigtramp()
-func cgoSigtramp()
-
-//go:nosplit
-//go:nowritebarrierrec
-func setsigstack(i uint32) {
- var osa usigactiont
- sigaction(i, nil, &osa)
- handler := *(*uintptr)(unsafe.Pointer(&osa.__sigaction_u))
- if osa.sa_flags&_SA_ONSTACK != 0 {
- return
- }
- var sa usigactiont
- *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = handler
- sa.sa_mask = osa.sa_mask
- sa.sa_flags = osa.sa_flags | _SA_ONSTACK
- sigaction(i, &sa, nil)
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func getsig(i uint32) uintptr {
- var sa usigactiont
- sigaction(i, nil, &sa)
- return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
-}
-
-// setSignaltstackSP sets the ss_sp field of a stackt.
-//go:nosplit
-func setSignalstackSP(s *stackt, sp uintptr) {
- *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func sigaddset(mask *sigset, i int) {
- *mask |= 1 << (uint32(i) - 1)
-}
-
-func sigdelset(mask *sigset, i int) {
- *mask &^= 1 << (uint32(i) - 1)
-}
-
-func setProcessCPUProfiler(hz int32) {
- setProcessCPUProfilerTimer(hz)
-}
-
-func setThreadCPUProfiler(hz int32) {
- setThreadCPUProfilerHz(hz)
-}
-
-//go:nosplit
-func validSIGPROF(mp *m, c *sigctxt) bool {
- return true
-}
-
-//go:linkname executablePath os.executablePath
-var executablePath string
-
-func sysargs(argc int32, argv **byte) {
- // skip over argv, envv and the first string will be the path
- n := argc + 1
- for argv_index(argv, n) != nil {
- n++
- }
- executablePath = gostringnocopy(argv_index(argv, n+1))
-
- // strip "executable_path=" prefix if available, it's added after OS X 10.11.
- const prefix = "executable_path="
- if len(executablePath) > len(prefix) && executablePath[:len(prefix)] == prefix {
- executablePath = executablePath[len(prefix):]
- }
-}
-
-func signalM(mp *m, sig int) {
- pthread_kill(pthread(mp.procid), uint32(sig))
-}
-
-// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
-// number.
-const sigPerThreadSyscall = 1 << 31
-
-//go:nosplit
-func runPerThreadSyscall() {
- throw("runPerThreadSyscall only valid on linux")
-}
diff --git a/contrib/go/_std_1.18/src/runtime/os_linux.go b/contrib/go/_std_1.18/src/runtime/os_linux.go
deleted file mode 100644
index eb8aa076e9..0000000000
--- a/contrib/go/_std_1.18/src/runtime/os_linux.go
+++ /dev/null
@@ -1,878 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/syscall"
- "unsafe"
-)
-
-// sigPerThreadSyscall is the same signal (SIGSETXID) used by glibc for
-// per-thread syscalls on Linux. We use it for the same purpose in non-cgo
-// binaries.
-const sigPerThreadSyscall = _SIGRTMIN + 1
-
-type mOS struct {
- // profileTimer holds the ID of the POSIX interval timer for profiling CPU
- // usage on this thread.
- //
- // It is valid when the profileTimerValid field is non-zero. A thread
- // creates and manages its own timer, and these fields are read and written
- // only by this thread. But because some of the reads on profileTimerValid
- // are in signal handling code, access to that field uses atomic operations.
- profileTimer int32
- profileTimerValid uint32
-
- // needPerThreadSyscall indicates that a per-thread syscall is required
- // for doAllThreadsSyscall.
- needPerThreadSyscall atomic.Uint8
-}
-
-//go:noescape
-func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
-
-// Linux futex.
-//
-// futexsleep(uint32 *addr, uint32 val)
-// futexwakeup(uint32 *addr)
-//
-// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
-// Futexwakeup wakes up threads sleeping on addr.
-// Futexsleep is allowed to wake up spuriously.
-
-const (
- _FUTEX_PRIVATE_FLAG = 128
- _FUTEX_WAIT_PRIVATE = 0 | _FUTEX_PRIVATE_FLAG
- _FUTEX_WAKE_PRIVATE = 1 | _FUTEX_PRIVATE_FLAG
-)
-
-// Atomically,
-// if(*addr == val) sleep
-// Might be woken up spuriously; that's allowed.
-// Don't sleep longer than ns; ns < 0 means forever.
-//go:nosplit
-func futexsleep(addr *uint32, val uint32, ns int64) {
- // Some Linux kernels have a bug where futex of
- // FUTEX_WAIT returns an internal error code
- // as an errno. Libpthread ignores the return value
- // here, and so can we: as it says a few lines up,
- // spurious wakeups are allowed.
- if ns < 0 {
- futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, nil, nil, 0)
- return
- }
-
- var ts timespec
- ts.setNsec(ns)
- futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, unsafe.Pointer(&ts), nil, 0)
-}
-
-// If any procs are sleeping on addr, wake up at most cnt.
-//go:nosplit
-func futexwakeup(addr *uint32, cnt uint32) {
- ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE_PRIVATE, cnt, nil, nil, 0)
- if ret >= 0 {
- return
- }
-
- // I don't know that futex wakeup can return
- // EAGAIN or EINTR, but if it does, it would be
- // safe to loop and call futex again.
- systemstack(func() {
- print("futexwakeup addr=", addr, " returned ", ret, "\n")
- })
-
- *(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
-}
-
-func getproccount() int32 {
- // This buffer is huge (8 kB) but we are on the system stack
- // and there should be plenty of space (64 kB).
- // Also this is a leaf, so we're not holding up the memory for long.
- // See golang.org/issue/11823.
- // The suggested behavior here is to keep trying with ever-larger
- // buffers, but we don't have a dynamic memory allocator at the
- // moment, so that's a bit tricky and seems like overkill.
- const maxCPUs = 64 * 1024
- var buf [maxCPUs / 8]byte
- r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
- if r < 0 {
- return 1
- }
- n := int32(0)
- for _, v := range buf[:r] {
- for v != 0 {
- n += int32(v & 1)
- v >>= 1
- }
- }
- if n == 0 {
- n = 1
- }
- return n
-}
-
-// Clone, the Linux rfork.
-const (
- _CLONE_VM = 0x100
- _CLONE_FS = 0x200
- _CLONE_FILES = 0x400
- _CLONE_SIGHAND = 0x800
- _CLONE_PTRACE = 0x2000
- _CLONE_VFORK = 0x4000
- _CLONE_PARENT = 0x8000
- _CLONE_THREAD = 0x10000
- _CLONE_NEWNS = 0x20000
- _CLONE_SYSVSEM = 0x40000
- _CLONE_SETTLS = 0x80000
- _CLONE_PARENT_SETTID = 0x100000
- _CLONE_CHILD_CLEARTID = 0x200000
- _CLONE_UNTRACED = 0x800000
- _CLONE_CHILD_SETTID = 0x1000000
- _CLONE_STOPPED = 0x2000000
- _CLONE_NEWUTS = 0x4000000
- _CLONE_NEWIPC = 0x8000000
-
- // As of QEMU 2.8.0 (5ea2fc84d), user emulation requires all six of these
- // flags to be set when creating a thread; attempts to share the other
- // five but leave SYSVSEM unshared will fail with -EINVAL.
- //
- // In non-QEMU environments CLONE_SYSVSEM is inconsequential as we do not
- // use System V semaphores.
-
- cloneFlags = _CLONE_VM | /* share memory */
- _CLONE_FS | /* share cwd, etc */
- _CLONE_FILES | /* share fd table */
- _CLONE_SIGHAND | /* share sig handler table */
- _CLONE_SYSVSEM | /* share SysV semaphore undo lists (see issue #20763) */
- _CLONE_THREAD /* revisit - okay for now */
-)
-
-//go:noescape
-func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
-
-// May run with m.p==nil, so write barriers are not allowed.
-//go:nowritebarrier
-func newosproc(mp *m) {
- stk := unsafe.Pointer(mp.g0.stack.hi)
- /*
- * note: strace gets confused if we use CLONE_PTRACE here.
- */
- if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", abi.FuncPCABI0(clone), " id=", mp.id, " ostk=", &mp, "\n")
- }
-
- // Disable signals during clone, so that the new thread starts
- // with signals disabled. It will enable them in minit.
- var oset sigset
- sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(abi.FuncPCABI0(mstart)))
- sigprocmask(_SIG_SETMASK, &oset, nil)
-
- if ret < 0 {
- print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
- if ret == -_EAGAIN {
- println("runtime: may need to increase max user processes (ulimit -u)")
- }
- throw("newosproc")
- }
-}
-
-// Version of newosproc that doesn't require a valid G.
-//go:nosplit
-func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
- stack := sysAlloc(stacksize, &memstats.stacks_sys)
- if stack == nil {
- write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
- exit(1)
- }
- ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
- if ret < 0 {
- write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
- exit(1)
- }
-}
-
-var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
-var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
-
-const (
- _AT_NULL = 0 // End of vector
- _AT_PAGESZ = 6 // System physical page size
- _AT_HWCAP = 16 // hardware capability bit vector
- _AT_RANDOM = 25 // introduced in 2.6.29
- _AT_HWCAP2 = 26 // hardware capability bit vector 2
-)
-
-var procAuxv = []byte("/proc/self/auxv\x00")
-
-var addrspace_vec [1]byte
-
-func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
-
-func sysargs(argc int32, argv **byte) {
- n := argc + 1
-
- // skip over argv, envp to get to auxv
- for argv_index(argv, n) != nil {
- n++
- }
-
- // skip NULL separator
- n++
-
- // now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
- if sysauxv(auxv[:]) != 0 {
- return
- }
- // In some situations we don't get a loader-provided
- // auxv, such as when loaded as a library on Android.
- // Fall back to /proc/self/auxv.
- fd := open(&procAuxv[0], 0 /* O_RDONLY */, 0)
- if fd < 0 {
- // On Android, /proc/self/auxv might be unreadable (issue 9229), so we fallback to
- // try using mincore to detect the physical page size.
- // mincore should return EINVAL when address is not a multiple of system page size.
- const size = 256 << 10 // size of memory region to allocate
- p, err := mmap(nil, size, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if err != 0 {
- return
- }
- var n uintptr
- for n = 4 << 10; n < size; n <<= 1 {
- err := mincore(unsafe.Pointer(uintptr(p)+n), 1, &addrspace_vec[0])
- if err == 0 {
- physPageSize = n
- break
- }
- }
- if physPageSize == 0 {
- physPageSize = size
- }
- munmap(p, size)
- return
- }
- var buf [128]uintptr
- n = read(fd, noescape(unsafe.Pointer(&buf[0])), int32(unsafe.Sizeof(buf)))
- closefd(fd)
- if n < 0 {
- return
- }
- // Make sure buf is terminated, even if we didn't read
- // the whole file.
- buf[len(buf)-2] = _AT_NULL
- sysauxv(buf[:])
-}
-
-// startupRandomData holds random bytes initialized at startup. These come from
-// the ELF AT_RANDOM auxiliary vector.
-var startupRandomData []byte
-
-func sysauxv(auxv []uintptr) int {
- var i int
- for ; auxv[i] != _AT_NULL; i += 2 {
- tag, val := auxv[i], auxv[i+1]
- switch tag {
- case _AT_RANDOM:
- // The kernel provides a pointer to 16-bytes
- // worth of random data.
- startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:]
-
- case _AT_PAGESZ:
- physPageSize = val
- }
-
- archauxv(tag, val)
- vdsoauxv(tag, val)
- }
- return i / 2
-}
-
-var sysTHPSizePath = []byte("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size\x00")
-
-func getHugePageSize() uintptr {
- var numbuf [20]byte
- fd := open(&sysTHPSizePath[0], 0 /* O_RDONLY */, 0)
- if fd < 0 {
- return 0
- }
- ptr := noescape(unsafe.Pointer(&numbuf[0]))
- n := read(fd, ptr, int32(len(numbuf)))
- closefd(fd)
- if n <= 0 {
- return 0
- }
- n-- // remove trailing newline
- v, ok := atoi(slicebytetostringtmp((*byte)(ptr), int(n)))
- if !ok || v < 0 {
- v = 0
- }
- if v&(v-1) != 0 {
- // v is not a power of 2
- return 0
- }
- return uintptr(v)
-}
-
-func osinit() {
- ncpu = getproccount()
- physHugePageSize = getHugePageSize()
- if iscgo {
- // #42494 glibc and musl reserve some signals for
- // internal use and require they not be blocked by
- // the rest of a normal C runtime. When the go runtime
- // blocks...unblocks signals, temporarily, the blocked
- // interval of time is generally very short. As such,
- // these expectations of *libc code are mostly met by
- // the combined go+cgo system of threads. However,
- // when go causes a thread to exit, via a return from
- // mstart(), the combined runtime can deadlock if
- // these signals are blocked. Thus, don't block these
- // signals when exiting threads.
- // - glibc: SIGCANCEL (32), SIGSETXID (33)
- // - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
- sigdelset(&sigsetAllExiting, 32)
- sigdelset(&sigsetAllExiting, 33)
- sigdelset(&sigsetAllExiting, 34)
- }
- osArchInit()
-}
-
-var urandom_dev = []byte("/dev/urandom\x00")
-
-func getRandomData(r []byte) {
- if startupRandomData != nil {
- n := copy(r, startupRandomData)
- extendRandom(r, n)
- return
- }
- fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
- n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
- closefd(fd)
- extendRandom(r, int(n))
-}
-
-func goenvs() {
- goenvs_unix()
-}
-
-// Called to do synchronous initialization of Go code built with
-// -buildmode=c-archive or -buildmode=c-shared.
-// None of the Go runtime is initialized.
-//go:nosplit
-//go:nowritebarrierrec
-func libpreinit() {
- initsig(true)
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-func mpreinit(mp *m) {
- mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
- mp.gsignal.m = mp
-}
-
-func gettid() uint32
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, cannot allocate memory.
-func minit() {
- minitSignals()
-
- // Cgo-created threads and the bootstrap m are missing a
- // procid. We need this for asynchronous preemption and it's
- // useful in debuggers.
- getg().m.procid = uint64(gettid())
-}
-
-// Called from dropm to undo the effect of an minit.
-//go:nosplit
-func unminit() {
- unminitSignals()
-}
-
-// Called from exitm, but not from drop, to undo the effect of thread-owned
-// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
-func mdestroy(mp *m) {
-}
-
-//#ifdef GOARCH_386
-//#define sa_handler k_sa_handler
-//#endif
-
-func sigreturn()
-func sigtramp() // Called via C ABI
-func cgoSigtramp()
-
-//go:noescape
-func sigaltstack(new, old *stackt)
-
-//go:noescape
-func setitimer(mode int32, new, old *itimerval)
-
-//go:noescape
-func timer_create(clockid int32, sevp *sigevent, timerid *int32) int32
-
-//go:noescape
-func timer_settime(timerid int32, flags int32, new, old *itimerspec) int32
-
-//go:noescape
-func timer_delete(timerid int32) int32
-
-//go:noescape
-func rtsigprocmask(how int32, new, old *sigset, size int32)
-
-//go:nosplit
-//go:nowritebarrierrec
-func sigprocmask(how int32, new, old *sigset) {
- rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
-}
-
-func raise(sig uint32)
-func raiseproc(sig uint32)
-
-//go:noescape
-func sched_getaffinity(pid, len uintptr, buf *byte) int32
-func osyield()
-
-//go:nosplit
-func osyield_no_g() {
- osyield()
-}
-
-func pipe() (r, w int32, errno int32)
-func pipe2(flags int32) (r, w int32, errno int32)
-func setNonblock(fd int32)
-
-const (
- _si_max_size = 128
- _sigev_max_size = 64
-)
-
-//go:nosplit
-//go:nowritebarrierrec
-func setsig(i uint32, fn uintptr) {
- var sa sigactiont
- sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTORER | _SA_RESTART
- sigfillset(&sa.sa_mask)
- // Although Linux manpage says "sa_restorer element is obsolete and
- // should not be used". x86_64 kernel requires it. Only use it on
- // x86.
- if GOARCH == "386" || GOARCH == "amd64" {
- sa.sa_restorer = abi.FuncPCABI0(sigreturn)
- }
- if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
- if iscgo {
- fn = abi.FuncPCABI0(cgoSigtramp)
- } else {
- fn = abi.FuncPCABI0(sigtramp)
- }
- }
- sa.sa_handler = fn
- sigaction(i, &sa, nil)
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func setsigstack(i uint32) {
- var sa sigactiont
- sigaction(i, nil, &sa)
- if sa.sa_flags&_SA_ONSTACK != 0 {
- return
- }
- sa.sa_flags |= _SA_ONSTACK
- sigaction(i, &sa, nil)
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func getsig(i uint32) uintptr {
- var sa sigactiont
- sigaction(i, nil, &sa)
- return sa.sa_handler
-}
-
-// setSignaltstackSP sets the ss_sp field of a stackt.
-//go:nosplit
-func setSignalstackSP(s *stackt, sp uintptr) {
- *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
-}
-
-//go:nosplit
-func (c *sigctxt) fixsigcode(sig uint32) {
-}
-
-// sysSigaction calls the rt_sigaction system call.
-//go:nosplit
-func sysSigaction(sig uint32, new, old *sigactiont) {
- if rt_sigaction(uintptr(sig), new, old, unsafe.Sizeof(sigactiont{}.sa_mask)) != 0 {
- // Workaround for bugs in QEMU user mode emulation.
- //
- // QEMU turns calls to the sigaction system call into
- // calls to the C library sigaction call; the C
- // library call rejects attempts to call sigaction for
- // SIGCANCEL (32) or SIGSETXID (33).
- //
- // QEMU rejects calling sigaction on SIGRTMAX (64).
- //
- // Just ignore the error in these case. There isn't
- // anything we can do about it anyhow.
- if sig != 32 && sig != 33 && sig != 64 {
- // Use system stack to avoid split stack overflow on ppc64/ppc64le.
- systemstack(func() {
- throw("sigaction failed")
- })
- }
- }
-}
-
-// rt_sigaction is implemented in assembly.
-//go:noescape
-func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
-
-func getpid() int
-func tgkill(tgid, tid, sig int)
-
-// signalM sends a signal to mp.
-func signalM(mp *m, sig int) {
- tgkill(getpid(), int(mp.procid), sig)
-}
-
-// go118UseTimerCreateProfiler enables the per-thread CPU profiler.
-const go118UseTimerCreateProfiler = true
-
-// validSIGPROF compares this signal delivery's code against the signal sources
-// that the profiler uses, returning whether the delivery should be processed.
-// To be processed, a signal delivery from a known profiling mechanism should
-// correspond to the best profiling mechanism available to this thread. Signals
-// from other sources are always considered valid.
-//
-//go:nosplit
-func validSIGPROF(mp *m, c *sigctxt) bool {
- code := int32(c.sigcode())
- setitimer := code == _SI_KERNEL
- timer_create := code == _SI_TIMER
-
- if !(setitimer || timer_create) {
- // The signal doesn't correspond to a profiling mechanism that the
- // runtime enables itself. There's no reason to process it, but there's
- // no reason to ignore it either.
- return true
- }
-
- if mp == nil {
- // Since we don't have an M, we can't check if there's an active
- // per-thread timer for this thread. We don't know how long this thread
- // has been around, and if it happened to interact with the Go scheduler
- // at a time when profiling was active (causing it to have a per-thread
- // timer). But it may have never interacted with the Go scheduler, or
- // never while profiling was active. To avoid double-counting, process
- // only signals from setitimer.
- //
- // When a custom cgo traceback function has been registered (on
- // platforms that support runtime.SetCgoTraceback), SIGPROF signals
- // delivered to a thread that cannot find a matching M do this check in
- // the assembly implementations of runtime.cgoSigtramp.
- return setitimer
- }
-
- // Having an M means the thread interacts with the Go scheduler, and we can
- // check whether there's an active per-thread timer for this thread.
- if atomic.Load(&mp.profileTimerValid) != 0 {
- // If this M has its own per-thread CPU profiling interval timer, we
- // should track the SIGPROF signals that come from that timer (for
- // accurate reporting of its CPU usage; see issue 35057) and ignore any
- // that it gets from the process-wide setitimer (to not over-count its
- // CPU consumption).
- return timer_create
- }
-
- // No active per-thread timer means the only valid profiler is setitimer.
- return setitimer
-}
-
-func setProcessCPUProfiler(hz int32) {
- setProcessCPUProfilerTimer(hz)
-}
-
-func setThreadCPUProfiler(hz int32) {
- mp := getg().m
- mp.profilehz = hz
-
- if !go118UseTimerCreateProfiler {
- return
- }
-
- // destroy any active timer
- if atomic.Load(&mp.profileTimerValid) != 0 {
- timerid := mp.profileTimer
- atomic.Store(&mp.profileTimerValid, 0)
- mp.profileTimer = 0
-
- ret := timer_delete(timerid)
- if ret != 0 {
- print("runtime: failed to disable profiling timer; timer_delete(", timerid, ") errno=", -ret, "\n")
- throw("timer_delete")
- }
- }
-
- if hz == 0 {
- // If the goal was to disable profiling for this thread, then the job's done.
- return
- }
-
- // The period of the timer should be 1/Hz. For every "1/Hz" of additional
- // work, the user should expect one additional sample in the profile.
- //
- // But to scale down to very small amounts of application work, to observe
- // even CPU usage of "one tenth" of the requested period, set the initial
- // timing delay in a different way: So that "one tenth" of a period of CPU
- // spend shows up as a 10% chance of one sample (for an expected value of
- // 0.1 samples), and so that "two and six tenths" periods of CPU spend show
- // up as a 60% chance of 3 samples and a 40% chance of 2 samples (for an
- // expected value of 2.6). Set the initial delay to a value in the unifom
- // random distribution between 0 and the desired period. And because "0"
- // means "disable timer", add 1 so the half-open interval [0,period) turns
- // into (0,period].
- //
- // Otherwise, this would show up as a bias away from short-lived threads and
- // from threads that are only occasionally active: for example, when the
- // garbage collector runs on a mostly-idle system, the additional threads it
- // activates may do a couple milliseconds of GC-related work and nothing
- // else in the few seconds that the profiler observes.
- spec := new(itimerspec)
- spec.it_value.setNsec(1 + int64(fastrandn(uint32(1e9/hz))))
- spec.it_interval.setNsec(1e9 / int64(hz))
-
- var timerid int32
- var sevp sigevent
- sevp.notify = _SIGEV_THREAD_ID
- sevp.signo = _SIGPROF
- sevp.sigev_notify_thread_id = int32(mp.procid)
- ret := timer_create(_CLOCK_THREAD_CPUTIME_ID, &sevp, &timerid)
- if ret != 0 {
- // If we cannot create a timer for this M, leave profileTimerValid false
- // to fall back to the process-wide setitimer profiler.
- return
- }
-
- ret = timer_settime(timerid, 0, spec, nil)
- if ret != 0 {
- print("runtime: failed to configure profiling timer; timer_settime(", timerid,
- ", 0, {interval: {",
- spec.it_interval.tv_sec, "s + ", spec.it_interval.tv_nsec, "ns} value: {",
- spec.it_value.tv_sec, "s + ", spec.it_value.tv_nsec, "ns}}, nil) errno=", -ret, "\n")
- throw("timer_settime")
- }
-
- mp.profileTimer = timerid
- atomic.Store(&mp.profileTimerValid, 1)
-}
-
-// perThreadSyscallArgs contains the system call number, arguments, and
-// expected return values for a system call to be executed on all threads.
-type perThreadSyscallArgs struct {
- trap uintptr
- a1 uintptr
- a2 uintptr
- a3 uintptr
- a4 uintptr
- a5 uintptr
- a6 uintptr
- r1 uintptr
- r2 uintptr
-}
-
-// perThreadSyscall is the system call to execute for the ongoing
-// doAllThreadsSyscall.
-//
-// perThreadSyscall may only be written while mp.needPerThreadSyscall == 0 on
-// all Ms.
-var perThreadSyscall perThreadSyscallArgs
-
-// syscall_runtime_doAllThreadsSyscall and executes a specified system call on
-// all Ms.
-//
-// The system call is expected to succeed and return the same value on every
-// thread. If any threads do not match, the runtime throws.
-//
-//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
-//go:uintptrescapes
-func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- if iscgo {
- // In cgo, we are not aware of threads created in C, so this approach will not work.
- panic("doAllThreadsSyscall not supported with cgo enabled")
- }
-
- // STW to guarantee that user goroutines see an atomic change to thread
- // state. Without STW, goroutines could migrate Ms while change is in
- // progress and e.g., see state old -> new -> old -> new.
- //
- // N.B. Internally, this function does not depend on STW to
- // successfully change every thread. It is only needed for user
- // expectations, per above.
- stopTheWorld("doAllThreadsSyscall")
-
- // This function depends on several properties:
- //
- // 1. All OS threads that already exist are associated with an M in
- // allm. i.e., we won't miss any pre-existing threads.
- // 2. All Ms listed in allm will eventually have an OS thread exist.
- // i.e., they will set procid and be able to receive signals.
- // 3. OS threads created after we read allm will clone from a thread
- // that has executed the system call. i.e., they inherit the
- // modified state.
- //
- // We achieve these through different mechanisms:
- //
- // 1. Addition of new Ms to allm in allocm happens before clone of its
- // OS thread later in newm.
- // 2. newm does acquirem to avoid being preempted, ensuring that new Ms
- // created in allocm will eventually reach OS thread clone later in
- // newm.
- // 3. We take allocmLock for write here to prevent allocation of new Ms
- // while this function runs. Per (1), this prevents clone of OS
- // threads that are not yet in allm.
- allocmLock.lock()
-
- // Disable preemption, preventing us from changing Ms, as we handle
- // this M specially.
- //
- // N.B. STW and lock() above do this as well, this is added for extra
- // clarity.
- acquirem()
-
- // N.B. allocmLock also prevents concurrent execution of this function,
- // serializing use of perThreadSyscall, mp.needPerThreadSyscall, and
- // ensuring all threads execute system calls from multiple calls in the
- // same order.
-
- r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
- if GOARCH == "ppc64" || GOARCH == "ppc64le" {
- // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
- r2 = 0
- }
- if errno != 0 {
- releasem(getg().m)
- allocmLock.unlock()
- startTheWorld()
- return r1, r2, errno
- }
-
- perThreadSyscall = perThreadSyscallArgs{
- trap: trap,
- a1: a1,
- a2: a2,
- a3: a3,
- a4: a4,
- a5: a5,
- a6: a6,
- r1: r1,
- r2: r2,
- }
-
- // Wait for all threads to start.
- //
- // As described above, some Ms have been added to allm prior to
- // allocmLock, but not yet completed OS clone and set procid.
- //
- // At minimum we must wait for a thread to set procid before we can
- // send it a signal.
- //
- // We take this one step further and wait for all threads to start
- // before sending any signals. This prevents system calls from getting
- // applied twice: once in the parent and once in the child, like so:
- //
- // A B C
- // add C to allm
- // doAllThreadsSyscall
- // allocmLock.lock()
- // signal B
- // <receive signal>
- // execute syscall
- // <signal return>
- // clone C
- // <thread start>
- // set procid
- // signal C
- // <receive signal>
- // execute syscall
- // <signal return>
- //
- // In this case, thread C inherited the syscall-modified state from
- // thread B and did not need to execute the syscall, but did anyway
- // because doAllThreadsSyscall could not be sure whether it was
- // required.
- //
- // Some system calls may not be idempotent, so we ensure each thread
- // executes the system call exactly once.
- for mp := allm; mp != nil; mp = mp.alllink {
- for atomic.Load64(&mp.procid) == 0 {
- // Thread is starting.
- osyield()
- }
- }
-
- // Signal every other thread, where they will execute perThreadSyscall
- // from the signal handler.
- gp := getg()
- tid := gp.m.procid
- for mp := allm; mp != nil; mp = mp.alllink {
- if atomic.Load64(&mp.procid) == tid {
- // Our thread already performed the syscall.
- continue
- }
- mp.needPerThreadSyscall.Store(1)
- signalM(mp, sigPerThreadSyscall)
- }
-
- // Wait for all threads to complete.
- for mp := allm; mp != nil; mp = mp.alllink {
- if mp.procid == tid {
- continue
- }
- for mp.needPerThreadSyscall.Load() != 0 {
- osyield()
- }
- }
-
- perThreadSyscall = perThreadSyscallArgs{}
-
- releasem(getg().m)
- allocmLock.unlock()
- startTheWorld()
-
- return r1, r2, errno
-}
-
-// runPerThreadSyscall runs perThreadSyscall for this M if required.
-//
-// This function throws if the system call returns with anything other than the
-// expected values.
-//go:nosplit
-func runPerThreadSyscall() {
- gp := getg()
- if gp.m.needPerThreadSyscall.Load() == 0 {
- return
- }
-
- args := perThreadSyscall
- r1, r2, errno := syscall.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
- if GOARCH == "ppc64" || GOARCH == "ppc64le" {
- // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
- r2 = 0
- }
- if errno != 0 || r1 != args.r1 || r2 != args.r2 {
- print("trap:", args.trap, ", a123456=[", args.a1, ",", args.a2, ",", args.a3, ",", args.a4, ",", args.a5, ",", args.a6, "]\n")
- print("results: got {r1=", r1, ",r2=", r2, ",errno=", errno, "}, want {r1=", args.r1, ",r2=", args.r2, ",errno=0\n")
- throw("AllThreadsSyscall6 results differ between threads; runtime corrupted")
- }
-
- gp.m.needPerThreadSyscall.Store(0)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/os_linux_noauxv.go b/contrib/go/_std_1.18/src/runtime/os_linux_noauxv.go
deleted file mode 100644
index 7b84f713d6..0000000000
--- a/contrib/go/_std_1.18/src/runtime/os_linux_noauxv.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && !arm && !arm64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le
-
-package runtime
-
-func archauxv(tag, val uintptr) {
-}
diff --git a/contrib/go/_std_1.18/src/runtime/panic.go b/contrib/go/_std_1.18/src/runtime/panic.go
deleted file mode 100644
index 6600410cb6..0000000000
--- a/contrib/go/_std_1.18/src/runtime/panic.go
+++ /dev/null
@@ -1,1292 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// We have two different ways of doing defers. The older way involves creating a
-// defer record at the time that a defer statement is executing and adding it to a
-// defer chain. This chain is inspected by the deferreturn call at all function
-// exits in order to run the appropriate defer calls. A cheaper way (which we call
-// open-coded defers) is used for functions in which no defer statements occur in
-// loops. In that case, we simply store the defer function/arg information into
-// specific stack slots at the point of each defer statement, as well as setting a
-// bit in a bitmask. At each function exit, we add inline code to directly make
-// the appropriate defer calls based on the bitmask and fn/arg information stored
-// on the stack. During panic/Goexit processing, the appropriate defer calls are
-// made using extra funcdata info that indicates the exact stack slots that
-// contain the bitmask and defer fn/args.
-
-// Check to make sure we can really generate a panic. If the panic
-// was generated from the runtime, or from inside malloc, then convert
-// to a throw of msg.
-// pc should be the program counter of the compiler-generated code that
-// triggered this panic.
-func panicCheck1(pc uintptr, msg string) {
- if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
- // Note: wasm can't tail call, so we can't get the original caller's pc.
- throw(msg)
- }
- // TODO: is this redundant? How could we be in malloc
- // but not in the runtime? runtime/internal/*, maybe?
- gp := getg()
- if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
- throw(msg)
- }
-}
-
-// Same as above, but calling from the runtime is allowed.
-//
-// Using this function is necessary for any panic that may be
-// generated by runtime.sigpanic, since those are always called by the
-// runtime.
-func panicCheck2(err string) {
- // panic allocates, so to avoid recursive malloc, turn panics
- // during malloc into throws.
- gp := getg()
- if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
- throw(err)
- }
-}
-
-// Many of the following panic entry-points turn into throws when they
-// happen in various runtime contexts. These should never happen in
-// the runtime, and if they do, they indicate a serious issue and
-// should not be caught by user code.
-//
-// The panic{Index,Slice,divide,shift} functions are called by
-// code generated by the compiler for out of bounds index expressions,
-// out of bounds slice expressions, division by zero, and shift by negative.
-// The panicdivide (again), panicoverflow, panicfloat, and panicmem
-// functions are called by the signal handler when a signal occurs
-// indicating the respective problem.
-//
-// Since panic{Index,Slice,shift} are never called directly, and
-// since the runtime package should never have an out of bounds slice
-// or array reference or negative shift, if we see those functions called from the
-// runtime package we turn the panic into a throw. That will dump the
-// entire runtime stack for easier debugging.
-//
-// The entry points called by the signal handler will be called from
-// runtime.sigpanic, so we can't disallow calls from the runtime to
-// these (they always look like they're called from the runtime).
-// Hence, for these, we just check for clearly bad runtime conditions.
-//
-// The panic{Index,Slice} functions are implemented in assembly and tail call
-// to the goPanic{Index,Slice} functions below. This is done so we can use
-// a space-minimal register calling convention.
-
-// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
-func goPanicIndex(x int, y int) {
- panicCheck1(getcallerpc(), "index out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
-}
-func goPanicIndexU(x uint, y int) {
- panicCheck1(getcallerpc(), "index out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
-}
-
-// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
-func goPanicSliceAlen(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
-}
-func goPanicSliceAlenU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
-}
-func goPanicSliceAcap(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
-}
-func goPanicSliceAcapU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
-}
-
-// failures in the comparisons for s[x:y], 0 <= x <= y
-func goPanicSliceB(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
-}
-func goPanicSliceBU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
-}
-
-// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
-func goPanicSlice3Alen(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
-}
-func goPanicSlice3AlenU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
-}
-func goPanicSlice3Acap(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
-}
-func goPanicSlice3AcapU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
-}
-
-// failures in the comparisons for s[:x:y], 0 <= x <= y
-func goPanicSlice3B(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
-}
-func goPanicSlice3BU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
-}
-
-// failures in the comparisons for s[x:y:], 0 <= x <= y
-func goPanicSlice3C(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
-}
-func goPanicSlice3CU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
-}
-
-// failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s)
-func goPanicSliceConvert(x int, y int) {
- panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
-}
-
-// Implemented in assembly, as they take arguments in registers.
-// Declared here to mark them as ABIInternal.
-func panicIndex(x int, y int)
-func panicIndexU(x uint, y int)
-func panicSliceAlen(x int, y int)
-func panicSliceAlenU(x uint, y int)
-func panicSliceAcap(x int, y int)
-func panicSliceAcapU(x uint, y int)
-func panicSliceB(x int, y int)
-func panicSliceBU(x uint, y int)
-func panicSlice3Alen(x int, y int)
-func panicSlice3AlenU(x uint, y int)
-func panicSlice3Acap(x int, y int)
-func panicSlice3AcapU(x uint, y int)
-func panicSlice3B(x int, y int)
-func panicSlice3BU(x uint, y int)
-func panicSlice3C(x int, y int)
-func panicSlice3CU(x uint, y int)
-func panicSliceConvert(x int, y int)
-
-var shiftError = error(errorString("negative shift amount"))
-
-func panicshift() {
- panicCheck1(getcallerpc(), "negative shift amount")
- panic(shiftError)
-}
-
-var divideError = error(errorString("integer divide by zero"))
-
-func panicdivide() {
- panicCheck2("integer divide by zero")
- panic(divideError)
-}
-
-var overflowError = error(errorString("integer overflow"))
-
-func panicoverflow() {
- panicCheck2("integer overflow")
- panic(overflowError)
-}
-
-var floatError = error(errorString("floating point error"))
-
-func panicfloat() {
- panicCheck2("floating point error")
- panic(floatError)
-}
-
-var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
-
-func panicmem() {
- panicCheck2("invalid memory address or nil pointer dereference")
- panic(memoryError)
-}
-
-func panicmemAddr(addr uintptr) {
- panicCheck2("invalid memory address or nil pointer dereference")
- panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
-}
-
-// Create a new deferred function fn, which has no arguments and results.
-// The compiler turns a defer statement into a call to this.
-func deferproc(fn func()) {
- gp := getg()
- if gp.m.curg != gp {
- // go code on the system stack can't defer
- throw("defer on system stack")
- }
-
- d := newdefer()
- if d._panic != nil {
- throw("deferproc: d.panic != nil after newdefer")
- }
- d.link = gp._defer
- gp._defer = d
- d.fn = fn
- d.pc = getcallerpc()
- // We must not be preempted between calling getcallersp and
- // storing it to d.sp because getcallersp's result is a
- // uintptr stack pointer.
- d.sp = getcallersp()
-
- // deferproc returns 0 normally.
- // a deferred func that stops a panic
- // makes the deferproc return 1.
- // the code the compiler generates always
- // checks the return value and jumps to the
- // end of the function if deferproc returns != 0.
- return0()
- // No code can go here - the C return register has
- // been set and must not be clobbered.
-}
-
-// deferprocStack queues a new deferred function with a defer record on the stack.
-// The defer record must have its fn field initialized.
-// All other fields can contain junk.
-// Nosplit because of the uninitialized pointer fields on the stack.
-//
-//go:nosplit
-func deferprocStack(d *_defer) {
- gp := getg()
- if gp.m.curg != gp {
- // go code on the system stack can't defer
- throw("defer on system stack")
- }
- // fn is already set.
- // The other fields are junk on entry to deferprocStack and
- // are initialized here.
- d.started = false
- d.heap = false
- d.openDefer = false
- d.sp = getcallersp()
- d.pc = getcallerpc()
- d.framepc = 0
- d.varp = 0
- // The lines below implement:
- // d.panic = nil
- // d.fd = nil
- // d.link = gp._defer
- // gp._defer = d
- // But without write barriers. The first three are writes to
- // the stack so they don't need a write barrier, and furthermore
- // are to uninitialized memory, so they must not use a write barrier.
- // The fourth write does not require a write barrier because we
- // explicitly mark all the defer structures, so we don't need to
- // keep track of pointers to them with a write barrier.
- *(*uintptr)(unsafe.Pointer(&d._panic)) = 0
- *(*uintptr)(unsafe.Pointer(&d.fd)) = 0
- *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
- *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
-
- return0()
- // No code can go here - the C return register has
- // been set and must not be clobbered.
-}
-
-// Each P holds a pool for defers.
-
-// Allocate a Defer, usually using per-P pool.
-// Each defer must be released with freedefer. The defer is not
-// added to any defer chain yet.
-func newdefer() *_defer {
- var d *_defer
- mp := acquirem()
- pp := mp.p.ptr()
- if len(pp.deferpool) == 0 && sched.deferpool != nil {
- lock(&sched.deferlock)
- for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
- d := sched.deferpool
- sched.deferpool = d.link
- d.link = nil
- pp.deferpool = append(pp.deferpool, d)
- }
- unlock(&sched.deferlock)
- }
- if n := len(pp.deferpool); n > 0 {
- d = pp.deferpool[n-1]
- pp.deferpool[n-1] = nil
- pp.deferpool = pp.deferpool[:n-1]
- }
- releasem(mp)
- mp, pp = nil, nil
-
- if d == nil {
- // Allocate new defer.
- d = new(_defer)
- }
- d.heap = true
- return d
-}
-
-// Free the given defer.
-// The defer cannot be used after this call.
-//
-// This is nosplit because the incoming defer is in a perilous state.
-// It's not on any defer list, so stack copying won't adjust stack
-// pointers in it (namely, d.link). Hence, if we were to copy the
-// stack, d could then contain a stale pointer.
-//
-//go:nosplit
-func freedefer(d *_defer) {
- d.link = nil
- // After this point we can copy the stack.
-
- if d._panic != nil {
- freedeferpanic()
- }
- if d.fn != nil {
- freedeferfn()
- }
- if !d.heap {
- return
- }
-
- mp := acquirem()
- pp := mp.p.ptr()
- if len(pp.deferpool) == cap(pp.deferpool) {
- // Transfer half of local cache to the central cache.
- var first, last *_defer
- for len(pp.deferpool) > cap(pp.deferpool)/2 {
- n := len(pp.deferpool)
- d := pp.deferpool[n-1]
- pp.deferpool[n-1] = nil
- pp.deferpool = pp.deferpool[:n-1]
- if first == nil {
- first = d
- } else {
- last.link = d
- }
- last = d
- }
- lock(&sched.deferlock)
- last.link = sched.deferpool
- sched.deferpool = first
- unlock(&sched.deferlock)
- }
-
- *d = _defer{}
-
- pp.deferpool = append(pp.deferpool, d)
-
- releasem(mp)
- mp, pp = nil, nil
-}
-
-// Separate function so that it can split stack.
-// Windows otherwise runs out of stack space.
-func freedeferpanic() {
- // _panic must be cleared before d is unlinked from gp.
- throw("freedefer with d._panic != nil")
-}
-
-func freedeferfn() {
- // fn must be cleared before d is unlinked from gp.
- throw("freedefer with d.fn != nil")
-}
-
-// deferreturn runs deferred functions for the caller's frame.
-// The compiler inserts a call to this at the end of any
-// function which calls defer.
-func deferreturn() {
- gp := getg()
- for {
- d := gp._defer
- if d == nil {
- return
- }
- sp := getcallersp()
- if d.sp != sp {
- return
- }
- if d.openDefer {
- done := runOpenDeferFrame(gp, d)
- if !done {
- throw("unfinished open-coded defers in deferreturn")
- }
- gp._defer = d.link
- freedefer(d)
- // If this frame uses open defers, then this
- // must be the only defer record for the
- // frame, so we can just return.
- return
- }
-
- fn := d.fn
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- fn()
- }
-}
-
-// Goexit terminates the goroutine that calls it. No other goroutine is affected.
-// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
-// is not a panic, any recover calls in those deferred functions will return nil.
-//
-// Calling Goexit from the main goroutine terminates that goroutine
-// without func main returning. Since func main has not returned,
-// the program continues execution of other goroutines.
-// If all other goroutines exit, the program crashes.
-func Goexit() {
- // Run all deferred functions for the current goroutine.
- // This code is similar to gopanic, see that implementation
- // for detailed comments.
- gp := getg()
-
- // Create a panic object for Goexit, so we can recognize when it might be
- // bypassed by a recover().
- var p _panic
- p.goexit = true
- p.link = gp._panic
- gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
-
- addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
- for {
- d := gp._defer
- if d == nil {
- break
- }
- if d.started {
- if d._panic != nil {
- d._panic.aborted = true
- d._panic = nil
- }
- if !d.openDefer {
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- continue
- }
- }
- d.started = true
- d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
- if d.openDefer {
- done := runOpenDeferFrame(gp, d)
- if !done {
- // We should always run all defers in the frame,
- // since there is no panic associated with this
- // defer that can be recovered.
- throw("unfinished open-coded defers in Goexit")
- }
- if p.aborted {
- // Since our current defer caused a panic and may
- // have been already freed, just restart scanning
- // for open-coded defers from this frame again.
- addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
- } else {
- addOneOpenDeferFrame(gp, 0, nil)
- }
- } else {
- // Save the pc/sp in deferCallSave(), so we can "recover" back to this
- // loop if necessary.
- deferCallSave(&p, d.fn)
- }
- if p.aborted {
- // We had a recursive panic in the defer d we started, and
- // then did a recover in a defer that was further down the
- // defer chain than d. In the case of an outstanding Goexit,
- // we force the recover to return back to this loop. d will
- // have already been freed if completed, so just continue
- // immediately to the next defer on the chain.
- p.aborted = false
- continue
- }
- if gp._defer != d {
- throw("bad defer entry in Goexit")
- }
- d._panic = nil
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- // Note: we ignore recovers here because Goexit isn't a panic
- }
- goexit1()
-}
-
-// Call all Error and String methods before freezing the world.
-// Used when crashing with panicking.
-func preprintpanics(p *_panic) {
- defer func() {
- if recover() != nil {
- throw("panic while printing panic value")
- }
- }()
- for p != nil {
- switch v := p.arg.(type) {
- case error:
- p.arg = v.Error()
- case stringer:
- p.arg = v.String()
- }
- p = p.link
- }
-}
-
-// Print all currently active panics. Used when crashing.
-// Should only be called after preprintpanics.
-func printpanics(p *_panic) {
- if p.link != nil {
- printpanics(p.link)
- if !p.link.goexit {
- print("\t")
- }
- }
- if p.goexit {
- return
- }
- print("panic: ")
- printany(p.arg)
- if p.recovered {
- print(" [recovered]")
- }
- print("\n")
-}
-
-// addOneOpenDeferFrame scans the stack (in gentraceback order, from inner frames to
-// outer frames) for the first frame (if any) with open-coded defers. If it finds
-// one, it adds a single entry to the defer chain for that frame. The entry added
-// represents all the defers in the associated open defer frame, and is sorted in
-// order with respect to any non-open-coded defers.
-//
-// addOneOpenDeferFrame stops (possibly without adding a new entry) if it encounters
-// an in-progress open defer entry. An in-progress open defer entry means there has
-// been a new panic because of a defer in the associated frame. addOneOpenDeferFrame
-// does not add an open defer entry past a started entry, because that started entry
-// still needs to finished, and addOneOpenDeferFrame will be called when that started
-// entry is completed. The defer removal loop in gopanic() similarly stops at an
-// in-progress defer entry. Together, addOneOpenDeferFrame and the defer removal loop
-// ensure the invariant that there is no open defer entry further up the stack than
-// an in-progress defer, and also that the defer removal loop is guaranteed to remove
-// all not-in-progress open defer entries from the defer chain.
-//
-// If sp is non-nil, addOneOpenDeferFrame starts the stack scan from the frame
-// specified by sp. If sp is nil, it uses the sp from the current defer record (which
-// has just been finished). Hence, it continues the stack scan from the frame of the
-// defer that just finished. It skips any frame that already has a (not-in-progress)
-// open-coded _defer record in the defer chain.
-//
-// Note: All entries of the defer chain (including this new open-coded entry) have
-// their pointers (including sp) adjusted properly if the stack moves while
-// running deferred functions. Also, it is safe to pass in the sp arg (which is
-// the direct result of calling getcallersp()), because all pointer variables
-// (including arguments) are adjusted as needed during stack copies.
-func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
- var prevDefer *_defer
- if sp == nil {
- prevDefer = gp._defer
- pc = prevDefer.framepc
- sp = unsafe.Pointer(prevDefer.sp)
- }
- systemstack(func() {
- gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff,
- func(frame *stkframe, unused unsafe.Pointer) bool {
- if prevDefer != nil && prevDefer.sp == frame.sp {
- // Skip the frame for the previous defer that
- // we just finished (and was used to set
- // where we restarted the stack scan)
- return true
- }
- f := frame.fn
- fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo)
- if fd == nil {
- return true
- }
- // Insert the open defer record in the
- // chain, in order sorted by sp.
- d := gp._defer
- var prev *_defer
- for d != nil {
- dsp := d.sp
- if frame.sp < dsp {
- break
- }
- if frame.sp == dsp {
- if !d.openDefer {
- throw("duplicated defer entry")
- }
- // Don't add any record past an
- // in-progress defer entry. We don't
- // need it, and more importantly, we
- // want to keep the invariant that
- // there is no open defer entry
- // passed an in-progress entry (see
- // header comment).
- if d.started {
- return false
- }
- return true
- }
- prev = d
- d = d.link
- }
- if frame.fn.deferreturn == 0 {
- throw("missing deferreturn")
- }
-
- d1 := newdefer()
- d1.openDefer = true
- d1._panic = nil
- // These are the pc/sp to set after we've
- // run a defer in this frame that did a
- // recover. We return to a special
- // deferreturn that runs any remaining
- // defers and then returns from the
- // function.
- d1.pc = frame.fn.entry() + uintptr(frame.fn.deferreturn)
- d1.varp = frame.varp
- d1.fd = fd
- // Save the SP/PC associated with current frame,
- // so we can continue stack trace later if needed.
- d1.framepc = frame.pc
- d1.sp = frame.sp
- d1.link = d
- if prev == nil {
- gp._defer = d1
- } else {
- prev.link = d1
- }
- // Stop stack scanning after adding one open defer record
- return false
- },
- nil, 0)
- })
-}
-
-// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
-// uint32 and a pointer to the byte following the varint.
-//
-// There is a similar function runtime.readvarint, which takes a slice of bytes,
-// rather than an unsafe pointer. These functions are duplicated, because one of
-// the two use cases for the functions would get slower if the functions were
-// combined.
-func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
- var r uint32
- var shift int
- for {
- b := *(*uint8)((unsafe.Pointer(fd)))
- fd = add(fd, unsafe.Sizeof(b))
- if b < 128 {
- return r + uint32(b)<<shift, fd
- }
- r += ((uint32(b) &^ 128) << shift)
- shift += 7
- if shift > 28 {
- panic("Bad varint")
- }
- }
-}
-
-// runOpenDeferFrame runs the active open-coded defers in the frame specified by
-// d. It normally processes all active defers in the frame, but stops immediately
-// if a defer does a successful recover. It returns true if there are no
-// remaining defers to run in the frame.
-func runOpenDeferFrame(gp *g, d *_defer) bool {
- done := true
- fd := d.fd
-
- deferBitsOffset, fd := readvarintUnsafe(fd)
- nDefers, fd := readvarintUnsafe(fd)
- deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
-
- for i := int(nDefers) - 1; i >= 0; i-- {
- // read the funcdata info for this defer
- var closureOffset uint32
- closureOffset, fd = readvarintUnsafe(fd)
- if deferBits&(1<<i) == 0 {
- continue
- }
- closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
- d.fn = closure
- deferBits = deferBits &^ (1 << i)
- *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
- p := d._panic
- // Call the defer. Note that this can change d.varp if
- // the stack moves.
- deferCallSave(p, d.fn)
- if p != nil && p.aborted {
- break
- }
- d.fn = nil
- if d._panic != nil && d._panic.recovered {
- done = deferBits == 0
- break
- }
- }
-
- return done
-}
-
-// deferCallSave calls fn() after saving the caller's pc and sp in the
-// panic record. This allows the runtime to return to the Goexit defer
-// processing loop, in the unusual case where the Goexit may be
-// bypassed by a successful recover.
-//
-// This is marked as a wrapper by the compiler so it doesn't appear in
-// tracebacks.
-func deferCallSave(p *_panic, fn func()) {
- if p != nil {
- p.argp = unsafe.Pointer(getargp())
- p.pc = getcallerpc()
- p.sp = unsafe.Pointer(getcallersp())
- }
- fn()
- if p != nil {
- p.pc = 0
- p.sp = unsafe.Pointer(nil)
- }
-}
-
-// The implementation of the predeclared function panic.
-func gopanic(e any) {
- gp := getg()
- if gp.m.curg != gp {
- print("panic: ")
- printany(e)
- print("\n")
- throw("panic on system stack")
- }
-
- if gp.m.mallocing != 0 {
- print("panic: ")
- printany(e)
- print("\n")
- throw("panic during malloc")
- }
- if gp.m.preemptoff != "" {
- print("panic: ")
- printany(e)
- print("\n")
- print("preempt off reason: ")
- print(gp.m.preemptoff)
- print("\n")
- throw("panic during preemptoff")
- }
- if gp.m.locks != 0 {
- print("panic: ")
- printany(e)
- print("\n")
- throw("panic holding locks")
- }
-
- var p _panic
- p.arg = e
- p.link = gp._panic
- gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
-
- atomic.Xadd(&runningPanicDefers, 1)
-
- // By calculating getcallerpc/getcallersp here, we avoid scanning the
- // gopanic frame (stack scanning is slow...)
- addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
-
- for {
- d := gp._defer
- if d == nil {
- break
- }
-
- // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
- // take defer off list. An earlier panic will not continue running, but we will make sure below that an
- // earlier Goexit does continue running.
- if d.started {
- if d._panic != nil {
- d._panic.aborted = true
- }
- d._panic = nil
- if !d.openDefer {
- // For open-coded defers, we need to process the
- // defer again, in case there are any other defers
- // to call in the frame (not including the defer
- // call that caused the panic).
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- continue
- }
- }
-
- // Mark defer as started, but keep on list, so that traceback
- // can find and update the defer's argument frame if stack growth
- // or a garbage collection happens before executing d.fn.
- d.started = true
-
- // Record the panic that is running the defer.
- // If there is a new panic during the deferred call, that panic
- // will find d in the list and will mark d._panic (this panic) aborted.
- d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
-
- done := true
- if d.openDefer {
- done = runOpenDeferFrame(gp, d)
- if done && !d._panic.recovered {
- addOneOpenDeferFrame(gp, 0, nil)
- }
- } else {
- p.argp = unsafe.Pointer(getargp())
- d.fn()
- }
- p.argp = nil
-
- // Deferred function did not panic. Remove d.
- if gp._defer != d {
- throw("bad defer entry in panic")
- }
- d._panic = nil
-
- // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
- //GC()
-
- pc := d.pc
- sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
- if done {
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- }
- if p.recovered {
- gp._panic = p.link
- if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
- // A normal recover would bypass/abort the Goexit. Instead,
- // we return to the processing loop of the Goexit.
- gp.sigcode0 = uintptr(gp._panic.sp)
- gp.sigcode1 = uintptr(gp._panic.pc)
- mcall(recovery)
- throw("bypassed recovery failed") // mcall should not return
- }
- atomic.Xadd(&runningPanicDefers, -1)
-
- // After a recover, remove any remaining non-started,
- // open-coded defer entries, since the corresponding defers
- // will be executed normally (inline). Any such entry will
- // become stale once we run the corresponding defers inline
- // and exit the associated stack frame. We only remove up to
- // the first started (in-progress) open defer entry, not
- // including the current frame, since any higher entries will
- // be from a higher panic in progress, and will still be
- // needed.
- d := gp._defer
- var prev *_defer
- if !done {
- // Skip our current frame, if not done. It is
- // needed to complete any remaining defers in
- // deferreturn()
- prev = d
- d = d.link
- }
- for d != nil {
- if d.started {
- // This defer is started but we
- // are in the middle of a
- // defer-panic-recover inside of
- // it, so don't remove it or any
- // further defer entries
- break
- }
- if d.openDefer {
- if prev == nil {
- gp._defer = d.link
- } else {
- prev.link = d.link
- }
- newd := d.link
- freedefer(d)
- d = newd
- } else {
- prev = d
- d = d.link
- }
- }
-
- gp._panic = p.link
- // Aborted panics are marked but remain on the g.panic list.
- // Remove them from the list.
- for gp._panic != nil && gp._panic.aborted {
- gp._panic = gp._panic.link
- }
- if gp._panic == nil { // must be done with signal
- gp.sig = 0
- }
- // Pass information about recovering frame to recovery.
- gp.sigcode0 = uintptr(sp)
- gp.sigcode1 = pc
- mcall(recovery)
- throw("recovery failed") // mcall should not return
- }
- }
-
- // ran out of deferred calls - old-school panic now
- // Because it is unsafe to call arbitrary user code after freezing
- // the world, we call preprintpanics to invoke all necessary Error
- // and String methods to prepare the panic strings before startpanic.
- preprintpanics(gp._panic)
-
- fatalpanic(gp._panic) // should not return
- *(*int)(nil) = 0 // not reached
-}
-
-// getargp returns the location where the caller
-// writes outgoing function call arguments.
-//go:nosplit
-//go:noinline
-func getargp() uintptr {
- return getcallersp() + sys.MinFrameSize
-}
-
-// The implementation of the predeclared function recover.
-// Cannot split the stack because it needs to reliably
-// find the stack segment of its caller.
-//
-// TODO(rsc): Once we commit to CopyStackAlways,
-// this doesn't need to be nosplit.
-//go:nosplit
-func gorecover(argp uintptr) any {
- // Must be in a function running as part of a deferred call during the panic.
- // Must be called from the topmost function of the call
- // (the function used in the defer statement).
- // p.argp is the argument pointer of that topmost deferred function call.
- // Compare against argp reported by caller.
- // If they match, the caller is the one who can recover.
- gp := getg()
- p := gp._panic
- if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
- p.recovered = true
- return p.arg
- }
- return nil
-}
-
-//go:linkname sync_throw sync.throw
-func sync_throw(s string) {
- throw(s)
-}
-
-//go:nosplit
-func throw(s string) {
- // Everything throw does should be recursively nosplit so it
- // can be called even when it's unsafe to grow the stack.
- systemstack(func() {
- print("fatal error: ", s, "\n")
- })
- gp := getg()
- if gp.m.throwing == 0 {
- gp.m.throwing = 1
- }
- fatalthrow()
- *(*int)(nil) = 0 // not reached
-}
-
-// runningPanicDefers is non-zero while running deferred functions for panic.
-// runningPanicDefers is incremented and decremented atomically.
-// This is used to try hard to get a panic stack trace out when exiting.
-var runningPanicDefers uint32
-
-// panicking is non-zero when crashing the program for an unrecovered panic.
-// panicking is incremented and decremented atomically.
-var panicking uint32
-
-// paniclk is held while printing the panic information and stack trace,
-// so that two concurrent panics don't overlap their output.
-var paniclk mutex
-
-// Unwind the stack after a deferred function calls recover
-// after a panic. Then arrange to continue running as though
-// the caller of the deferred function returned normally.
-func recovery(gp *g) {
- // Info about defer passed in G struct.
- sp := gp.sigcode0
- pc := gp.sigcode1
-
- // d's arguments need to be in the stack.
- if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
- print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
- throw("bad recovery")
- }
-
- // Make the deferproc for this d return again,
- // this time returning 1. The calling function will
- // jump to the standard return epilogue.
- gp.sched.sp = sp
- gp.sched.pc = pc
- gp.sched.lr = 0
- gp.sched.ret = 1
- gogo(&gp.sched)
-}
-
-// fatalthrow implements an unrecoverable runtime throw. It freezes the
-// system, prints stack traces starting from its caller, and terminates the
-// process.
-//
-//go:nosplit
-func fatalthrow() {
- pc := getcallerpc()
- sp := getcallersp()
- gp := getg()
- // Switch to the system stack to avoid any stack growth, which
- // may make things worse if the runtime is in a bad state.
- systemstack(func() {
- startpanic_m()
-
- if dopanic_m(gp, pc, sp) {
- // crash uses a decent amount of nosplit stack and we're already
- // low on stack in throw, so crash on the system stack (unlike
- // fatalpanic).
- crash()
- }
-
- exit(2)
- })
-
- *(*int)(nil) = 0 // not reached
-}
-
-// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
-// that if msgs != nil, fatalpanic also prints panic messages and decrements
-// runningPanicDefers once main is blocked from exiting.
-//
-//go:nosplit
-func fatalpanic(msgs *_panic) {
- pc := getcallerpc()
- sp := getcallersp()
- gp := getg()
- var docrash bool
- // Switch to the system stack to avoid any stack growth, which
- // may make things worse if the runtime is in a bad state.
- systemstack(func() {
- if startpanic_m() && msgs != nil {
- // There were panic messages and startpanic_m
- // says it's okay to try to print them.
-
- // startpanic_m set panicking, which will
- // block main from exiting, so now OK to
- // decrement runningPanicDefers.
- atomic.Xadd(&runningPanicDefers, -1)
-
- printpanics(msgs)
- }
-
- docrash = dopanic_m(gp, pc, sp)
- })
-
- if docrash {
- // By crashing outside the above systemstack call, debuggers
- // will not be confused when generating a backtrace.
- // Function crash is marked nosplit to avoid stack growth.
- crash()
- }
-
- systemstack(func() {
- exit(2)
- })
-
- *(*int)(nil) = 0 // not reached
-}
-
-// startpanic_m prepares for an unrecoverable panic.
-//
-// It returns true if panic messages should be printed, or false if
-// the runtime is in bad shape and should just print stacks.
-//
-// It must not have write barriers even though the write barrier
-// explicitly ignores writes once dying > 0. Write barriers still
-// assume that g.m.p != nil, and this function may not have P
-// in some contexts (e.g. a panic in a signal handler for a signal
-// sent to an M with no P).
-//
-//go:nowritebarrierrec
-func startpanic_m() bool {
- _g_ := getg()
- if mheap_.cachealloc.size == 0 { // very early
- print("runtime: panic before malloc heap initialized\n")
- }
- // Disallow malloc during an unrecoverable panic. A panic
- // could happen in a signal handler, or in a throw, or inside
- // malloc itself. We want to catch if an allocation ever does
- // happen (even if we're not in one of these situations).
- _g_.m.mallocing++
-
- // If we're dying because of a bad lock count, set it to a
- // good lock count so we don't recursively panic below.
- if _g_.m.locks < 0 {
- _g_.m.locks = 1
- }
-
- switch _g_.m.dying {
- case 0:
- // Setting dying >0 has the side-effect of disabling this G's writebuf.
- _g_.m.dying = 1
- atomic.Xadd(&panicking, 1)
- lock(&paniclk)
- if debug.schedtrace > 0 || debug.scheddetail > 0 {
- schedtrace(true)
- }
- freezetheworld()
- return true
- case 1:
- // Something failed while panicking.
- // Just print a stack trace and exit.
- _g_.m.dying = 2
- print("panic during panic\n")
- return false
- case 2:
- // This is a genuine bug in the runtime, we couldn't even
- // print the stack trace successfully.
- _g_.m.dying = 3
- print("stack trace unavailable\n")
- exit(4)
- fallthrough
- default:
- // Can't even print! Just exit.
- exit(5)
- return false // Need to return something.
- }
-}
-
-var didothers bool
-var deadlock mutex
-
-func dopanic_m(gp *g, pc, sp uintptr) bool {
- if gp.sig != 0 {
- signame := signame(gp.sig)
- if signame != "" {
- print("[signal ", signame)
- } else {
- print("[signal ", hex(gp.sig))
- }
- print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
- }
-
- level, all, docrash := gotraceback()
- _g_ := getg()
- if level > 0 {
- if gp != gp.m.curg {
- all = true
- }
- if gp != gp.m.g0 {
- print("\n")
- goroutineheader(gp)
- traceback(pc, sp, 0, gp)
- } else if level >= 2 || _g_.m.throwing > 0 {
- print("\nruntime stack:\n")
- traceback(pc, sp, 0, gp)
- }
- if !didothers && all {
- didothers = true
- tracebackothers(gp)
- }
- }
- unlock(&paniclk)
-
- if atomic.Xadd(&panicking, -1) != 0 {
- // Some other m is panicking too.
- // Let it print what it needs to print.
- // Wait forever without chewing up cpu.
- // It will exit when it's done.
- lock(&deadlock)
- lock(&deadlock)
- }
-
- printDebugLog()
-
- return docrash
-}
-
-// canpanic returns false if a signal should throw instead of
-// panicking.
-//
-//go:nosplit
-func canpanic(gp *g) bool {
- // Note that g is m->gsignal, different from gp.
- // Note also that g->m can change at preemption, so m can go stale
- // if this function ever makes a function call.
- _g_ := getg()
- mp := _g_.m
-
- // Is it okay for gp to panic instead of crashing the program?
- // Yes, as long as it is running Go code, not runtime code,
- // and not stuck in a system call.
- if gp == nil || gp != mp.curg {
- return false
- }
- if mp.locks != 0 || mp.mallocing != 0 || mp.throwing != 0 || mp.preemptoff != "" || mp.dying != 0 {
- return false
- }
- status := readgstatus(gp)
- if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
- return false
- }
- if GOOS == "windows" && mp.libcallsp != 0 {
- return false
- }
- return true
-}
-
-// shouldPushSigpanic reports whether pc should be used as sigpanic's
-// return PC (pushing a frame for the call). Otherwise, it should be
-// left alone so that LR is used as sigpanic's return PC, effectively
-// replacing the top-most frame with sigpanic. This is used by
-// preparePanic.
-func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
- if pc == 0 {
- // Probably a call to a nil func. The old LR is more
- // useful in the stack trace. Not pushing the frame
- // will make the trace look like a call to sigpanic
- // instead. (Otherwise the trace will end at sigpanic
- // and we won't get to see who faulted.)
- return false
- }
- // If we don't recognize the PC as code, but we do recognize
- // the link register as code, then this assumes the panic was
- // caused by a call to non-code. In this case, we want to
- // ignore this call to make unwinding show the context.
- //
- // If we running C code, we're not going to recognize pc as a
- // Go function, so just assume it's good. Otherwise, traceback
- // may try to read a stale LR that looks like a Go code
- // pointer and wander into the woods.
- if gp.m.incgo || findfunc(pc).valid() {
- // This wasn't a bad call, so use PC as sigpanic's
- // return PC.
- return true
- }
- if findfunc(lr).valid() {
- // This was a bad call, but the LR is good, so use the
- // LR as sigpanic's return PC.
- return false
- }
- // Neither the PC or LR is good. Hopefully pushing a frame
- // will work.
- return true
-}
-
-// isAbortPC reports whether pc is the program counter at which
-// runtime.abort raises a signal.
-//
-// It is nosplit because it's part of the isgoexception
-// implementation.
-//
-//go:nosplit
-func isAbortPC(pc uintptr) bool {
- f := findfunc(pc)
- if !f.valid() {
- return false
- }
- return f.funcID == funcID_abort
-}
diff --git a/contrib/go/_std_1.18/src/runtime/proc.go b/contrib/go/_std_1.18/src/runtime/proc.go
deleted file mode 100644
index b997a467ba..0000000000
--- a/contrib/go/_std_1.18/src/runtime/proc.go
+++ /dev/null
@@ -1,6244 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/cpu"
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// set using cmd/go/internal/modload.ModInfoProg
-var modinfo string
-
-// Goroutine scheduler
-// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
-//
-// The main concepts are:
-// G - goroutine.
-// M - worker thread, or machine.
-// P - processor, a resource that is required to execute Go code.
-// M must have an associated P to execute Go code, however it can be
-// blocked or in a syscall w/o an associated P.
-//
-// Design doc at https://golang.org/s/go11sched.
-
-// Worker thread parking/unparking.
-// We need to balance between keeping enough running worker threads to utilize
-// available hardware parallelism and parking excessive running worker threads
-// to conserve CPU resources and power. This is not simple for two reasons:
-// (1) scheduler state is intentionally distributed (in particular, per-P work
-// queues), so it is not possible to compute global predicates on fast paths;
-// (2) for optimal thread management we would need to know the future (don't park
-// a worker thread when a new goroutine will be readied in near future).
-//
-// Three rejected approaches that would work badly:
-// 1. Centralize all scheduler state (would inhibit scalability).
-// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
-// is a spare P, unpark a thread and handoff it the thread and the goroutine.
-// This would lead to thread state thrashing, as the thread that readied the
-// goroutine can be out of work the very next moment, we will need to park it.
-// Also, it would destroy locality of computation as we want to preserve
-// dependent goroutines on the same thread; and introduce additional latency.
-// 3. Unpark an additional thread whenever we ready a goroutine and there is an
-// idle P, but don't do handoff. This would lead to excessive thread parking/
-// unparking as the additional threads will instantly park without discovering
-// any work to do.
-//
-// The current approach:
-//
-// This approach applies to three primary sources of potential work: readying a
-// goroutine, new/modified-earlier timers, and idle-priority GC. See below for
-// additional details.
-//
-// We unpark an additional thread when we submit work if (this is wakep()):
-// 1. There is an idle P, and
-// 2. There are no "spinning" worker threads.
-//
-// A worker thread is considered spinning if it is out of local work and did
-// not find work in the global run queue or netpoller; the spinning state is
-// denoted in m.spinning and in sched.nmspinning. Threads unparked this way are
-// also considered spinning; we don't do goroutine handoff so such threads are
-// out of work initially. Spinning threads spin on looking for work in per-P
-// run queues and timer heaps or from the GC before parking. If a spinning
-// thread finds work it takes itself out of the spinning state and proceeds to
-// execution. If it does not find work it takes itself out of the spinning
-// state and then parks.
-//
-// If there is at least one spinning thread (sched.nmspinning>1), we don't
-// unpark new threads when submitting work. To compensate for that, if the last
-// spinning thread finds work and stops spinning, it must unpark a new spinning
-// thread. This approach smooths out unjustified spikes of thread unparking,
-// but at the same time guarantees eventual maximal CPU parallelism
-// utilization.
-//
-// The main implementation complication is that we need to be very careful
-// during spinning->non-spinning thread transition. This transition can race
-// with submission of new work, and either one part or another needs to unpark
-// another worker thread. If they both fail to do that, we can end up with
-// semi-persistent CPU underutilization.
-//
-// The general pattern for submission is:
-// 1. Submit work to the local run queue, timer heap, or GC state.
-// 2. #StoreLoad-style memory barrier.
-// 3. Check sched.nmspinning.
-//
-// The general pattern for spinning->non-spinning transition is:
-// 1. Decrement nmspinning.
-// 2. #StoreLoad-style memory barrier.
-// 3. Check all per-P work queues and GC for new work.
-//
-// Note that all this complexity does not apply to global run queue as we are
-// not sloppy about thread unparking when submitting to global queue. Also see
-// comments for nmspinning manipulation.
-//
-// How these different sources of work behave varies, though it doesn't affect
-// the synchronization approach:
-// * Ready goroutine: this is an obvious source of work; the goroutine is
-// immediately ready and must run on some thread eventually.
-// * New/modified-earlier timer: The current timer implementation (see time.go)
-// uses netpoll in a thread with no work available to wait for the soonest
-// timer. If there is no thread waiting, we want a new spinning thread to go
-// wait.
-// * Idle-priority GC: The GC wakes a stopped idle thread to contribute to
-// background GC work (note: currently disabled per golang.org/issue/19112).
-// Also see golang.org/issue/44313, as this should be extended to all GC
-// workers.
-
-var (
- m0 m
- g0 g
- mcache0 *mcache
- raceprocctx0 uintptr
-)
-
-//go:linkname runtime_inittask runtime..inittask
-var runtime_inittask initTask
-
-//go:linkname main_inittask main..inittask
-var main_inittask initTask
-
-// main_init_done is a signal used by cgocallbackg that initialization
-// has been completed. It is made before _cgo_notify_runtime_init_done,
-// so all cgo calls can rely on it existing. When main_init is complete,
-// it is closed, meaning cgocallbackg can reliably receive from it.
-var main_init_done chan bool
-
-//go:linkname main_main main.main
-func main_main()
-
-// mainStarted indicates that the main M has started.
-var mainStarted bool
-
-// runtimeInitTime is the nanotime() at which the runtime started.
-var runtimeInitTime int64
-
-// Value to use for signal mask for newly created M's.
-var initSigmask sigset
-
-// The main goroutine.
-func main() {
- g := getg()
-
- // Racectx of m0->g0 is used only as the parent of the main goroutine.
- // It must not be used for anything else.
- g.m.g0.racectx = 0
-
- // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
- // Using decimal instead of binary GB and MB because
- // they look nicer in the stack overflow failure message.
- if goarch.PtrSize == 8 {
- maxstacksize = 1000000000
- } else {
- maxstacksize = 250000000
- }
-
- // An upper limit for max stack size. Used to avoid random crashes
- // after calling SetMaxStack and trying to allocate a stack that is too big,
- // since stackalloc works with 32-bit sizes.
- maxstackceiling = 2 * maxstacksize
-
- // Allow newproc to start new Ms.
- mainStarted = true
-
- if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
- systemstack(func() {
- newm(sysmon, nil, -1)
- })
- }
-
- // Lock the main goroutine onto this, the main OS thread,
- // during initialization. Most programs won't care, but a few
- // do require certain calls to be made by the main thread.
- // Those can arrange for main.main to run in the main thread
- // by calling runtime.LockOSThread during initialization
- // to preserve the lock.
- lockOSThread()
-
- if g.m != &m0 {
- throw("runtime.main not on m0")
- }
-
- // Record when the world started.
- // Must be before doInit for tracing init.
- runtimeInitTime = nanotime()
- if runtimeInitTime == 0 {
- throw("nanotime returning zero")
- }
-
- if debug.inittrace != 0 {
- inittrace.id = getg().goid
- inittrace.active = true
- }
-
- doInit(&runtime_inittask) // Must be before defer.
-
- // Defer unlock so that runtime.Goexit during init does the unlock too.
- needUnlock := true
- defer func() {
- if needUnlock {
- unlockOSThread()
- }
- }()
-
- gcenable()
-
- main_init_done = make(chan bool)
- if iscgo {
- if _cgo_thread_start == nil {
- throw("_cgo_thread_start missing")
- }
- if GOOS != "windows" {
- if _cgo_setenv == nil {
- throw("_cgo_setenv missing")
- }
- if _cgo_unsetenv == nil {
- throw("_cgo_unsetenv missing")
- }
- }
- if _cgo_notify_runtime_init_done == nil {
- throw("_cgo_notify_runtime_init_done missing")
- }
- // Start the template thread in case we enter Go from
- // a C-created thread and need to create a new thread.
- startTemplateThread()
- cgocall(_cgo_notify_runtime_init_done, nil)
- }
-
- doInit(&main_inittask)
-
- // Disable init tracing after main init done to avoid overhead
- // of collecting statistics in malloc and newproc
- inittrace.active = false
-
- close(main_init_done)
-
- needUnlock = false
- unlockOSThread()
-
- if isarchive || islibrary {
- // A program compiled with -buildmode=c-archive or c-shared
- // has a main, but it is not executed.
- return
- }
- fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
- fn()
- if raceenabled {
- racefini()
- }
-
- // Make racy client program work: if panicking on
- // another goroutine at the same time as main returns,
- // let the other goroutine finish printing the panic trace.
- // Once it does, it will exit. See issues 3934 and 20018.
- if atomic.Load(&runningPanicDefers) != 0 {
- // Running deferred functions should not take long.
- for c := 0; c < 1000; c++ {
- if atomic.Load(&runningPanicDefers) == 0 {
- break
- }
- Gosched()
- }
- }
- if atomic.Load(&panicking) != 0 {
- gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
- }
-
- exit(0)
- for {
- var x *int32
- *x = 0
- }
-}
-
-// os_beforeExit is called from os.Exit(0).
-//go:linkname os_beforeExit os.runtime_beforeExit
-func os_beforeExit() {
- if raceenabled {
- racefini()
- }
-}
-
-// start forcegc helper goroutine
-func init() {
- go forcegchelper()
-}
-
-func forcegchelper() {
- forcegc.g = getg()
- lockInit(&forcegc.lock, lockRankForcegc)
- for {
- lock(&forcegc.lock)
- if forcegc.idle != 0 {
- throw("forcegc: phase error")
- }
- atomic.Store(&forcegc.idle, 1)
- goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
- // this goroutine is explicitly resumed by sysmon
- if debug.gctrace > 0 {
- println("GC forced")
- }
- // Time-triggered, fully concurrent.
- gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
- }
-}
-
-//go:nosplit
-
-// Gosched yields the processor, allowing other goroutines to run. It does not
-// suspend the current goroutine, so execution resumes automatically.
-func Gosched() {
- checkTimeouts()
- mcall(gosched_m)
-}
-
-// goschedguarded yields the processor like gosched, but also checks
-// for forbidden states and opts out of the yield in those cases.
-//go:nosplit
-func goschedguarded() {
- mcall(goschedguarded_m)
-}
-
-// Puts the current goroutine into a waiting state and calls unlockf on the
-// system stack.
-//
-// If unlockf returns false, the goroutine is resumed.
-//
-// unlockf must not access this G's stack, as it may be moved between
-// the call to gopark and the call to unlockf.
-//
-// Note that because unlockf is called after putting the G into a waiting
-// state, the G may have already been readied by the time unlockf is called
-// unless there is external synchronization preventing the G from being
-// readied. If unlockf returns false, it must guarantee that the G cannot be
-// externally readied.
-//
-// Reason explains why the goroutine has been parked. It is displayed in stack
-// traces and heap dumps. Reasons should be unique and descriptive. Do not
-// re-use reasons, add new ones.
-func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
- if reason != waitReasonSleep {
- checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
- }
- mp := acquirem()
- gp := mp.curg
- status := readgstatus(gp)
- if status != _Grunning && status != _Gscanrunning {
- throw("gopark: bad g status")
- }
- mp.waitlock = lock
- mp.waitunlockf = unlockf
- gp.waitreason = reason
- mp.waittraceev = traceEv
- mp.waittraceskip = traceskip
- releasem(mp)
- // can't do anything that might move the G between Ms here.
- mcall(park_m)
-}
-
-// Puts the current goroutine into a waiting state and unlocks the lock.
-// The goroutine can be made runnable again by calling goready(gp).
-func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
- gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
-}
-
-func goready(gp *g, traceskip int) {
- systemstack(func() {
- ready(gp, traceskip, true)
- })
-}
-
-//go:nosplit
-func acquireSudog() *sudog {
- // Delicate dance: the semaphore implementation calls
- // acquireSudog, acquireSudog calls new(sudog),
- // new calls malloc, malloc can call the garbage collector,
- // and the garbage collector calls the semaphore implementation
- // in stopTheWorld.
- // Break the cycle by doing acquirem/releasem around new(sudog).
- // The acquirem/releasem increments m.locks during new(sudog),
- // which keeps the garbage collector from being invoked.
- mp := acquirem()
- pp := mp.p.ptr()
- if len(pp.sudogcache) == 0 {
- lock(&sched.sudoglock)
- // First, try to grab a batch from central cache.
- for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
- s := sched.sudogcache
- sched.sudogcache = s.next
- s.next = nil
- pp.sudogcache = append(pp.sudogcache, s)
- }
- unlock(&sched.sudoglock)
- // If the central cache is empty, allocate a new one.
- if len(pp.sudogcache) == 0 {
- pp.sudogcache = append(pp.sudogcache, new(sudog))
- }
- }
- n := len(pp.sudogcache)
- s := pp.sudogcache[n-1]
- pp.sudogcache[n-1] = nil
- pp.sudogcache = pp.sudogcache[:n-1]
- if s.elem != nil {
- throw("acquireSudog: found s.elem != nil in cache")
- }
- releasem(mp)
- return s
-}
-
-//go:nosplit
-func releaseSudog(s *sudog) {
- if s.elem != nil {
- throw("runtime: sudog with non-nil elem")
- }
- if s.isSelect {
- throw("runtime: sudog with non-false isSelect")
- }
- if s.next != nil {
- throw("runtime: sudog with non-nil next")
- }
- if s.prev != nil {
- throw("runtime: sudog with non-nil prev")
- }
- if s.waitlink != nil {
- throw("runtime: sudog with non-nil waitlink")
- }
- if s.c != nil {
- throw("runtime: sudog with non-nil c")
- }
- gp := getg()
- if gp.param != nil {
- throw("runtime: releaseSudog with non-nil gp.param")
- }
- mp := acquirem() // avoid rescheduling to another P
- pp := mp.p.ptr()
- if len(pp.sudogcache) == cap(pp.sudogcache) {
- // Transfer half of local cache to the central cache.
- var first, last *sudog
- for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
- n := len(pp.sudogcache)
- p := pp.sudogcache[n-1]
- pp.sudogcache[n-1] = nil
- pp.sudogcache = pp.sudogcache[:n-1]
- if first == nil {
- first = p
- } else {
- last.next = p
- }
- last = p
- }
- lock(&sched.sudoglock)
- last.next = sched.sudogcache
- sched.sudogcache = first
- unlock(&sched.sudoglock)
- }
- pp.sudogcache = append(pp.sudogcache, s)
- releasem(mp)
-}
-
-// called from assembly
-func badmcall(fn func(*g)) {
- throw("runtime: mcall called on m->g0 stack")
-}
-
-func badmcall2(fn func(*g)) {
- throw("runtime: mcall function returned")
-}
-
-func badreflectcall() {
- panic(plainError("arg size to reflect.call more than 1GB"))
-}
-
-var badmorestackg0Msg = "fatal: morestack on g0\n"
-
-//go:nosplit
-//go:nowritebarrierrec
-func badmorestackg0() {
- sp := stringStructOf(&badmorestackg0Msg)
- write(2, sp.str, int32(sp.len))
-}
-
-var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
-
-//go:nosplit
-//go:nowritebarrierrec
-func badmorestackgsignal() {
- sp := stringStructOf(&badmorestackgsignalMsg)
- write(2, sp.str, int32(sp.len))
-}
-
-//go:nosplit
-func badctxt() {
- throw("ctxt != 0")
-}
-
-func lockedOSThread() bool {
- gp := getg()
- return gp.lockedm != 0 && gp.m.lockedg != 0
-}
-
-var (
- // allgs contains all Gs ever created (including dead Gs), and thus
- // never shrinks.
- //
- // Access via the slice is protected by allglock or stop-the-world.
- // Readers that cannot take the lock may (carefully!) use the atomic
- // variables below.
- allglock mutex
- allgs []*g
-
- // allglen and allgptr are atomic variables that contain len(allgs) and
- // &allgs[0] respectively. Proper ordering depends on totally-ordered
- // loads and stores. Writes are protected by allglock.
- //
- // allgptr is updated before allglen. Readers should read allglen
- // before allgptr to ensure that allglen is always <= len(allgptr). New
- // Gs appended during the race can be missed. For a consistent view of
- // all Gs, allglock must be held.
- //
- // allgptr copies should always be stored as a concrete type or
- // unsafe.Pointer, not uintptr, to ensure that GC can still reach it
- // even if it points to a stale array.
- allglen uintptr
- allgptr **g
-)
-
-func allgadd(gp *g) {
- if readgstatus(gp) == _Gidle {
- throw("allgadd: bad status Gidle")
- }
-
- lock(&allglock)
- allgs = append(allgs, gp)
- if &allgs[0] != allgptr {
- atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
- }
- atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
- unlock(&allglock)
-}
-
-// allGsSnapshot returns a snapshot of the slice of all Gs.
-//
-// The world must be stopped or allglock must be held.
-func allGsSnapshot() []*g {
- assertWorldStoppedOrLockHeld(&allglock)
-
- // Because the world is stopped or allglock is held, allgadd
- // cannot happen concurrently with this. allgs grows
- // monotonically and existing entries never change, so we can
- // simply return a copy of the slice header. For added safety,
- // we trim everything past len because that can still change.
- return allgs[:len(allgs):len(allgs)]
-}
-
-// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
-func atomicAllG() (**g, uintptr) {
- length := atomic.Loaduintptr(&allglen)
- ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
- return ptr, length
-}
-
-// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
-func atomicAllGIndex(ptr **g, i uintptr) *g {
- return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
-}
-
-// forEachG calls fn on every G from allgs.
-//
-// forEachG takes a lock to exclude concurrent addition of new Gs.
-func forEachG(fn func(gp *g)) {
- lock(&allglock)
- for _, gp := range allgs {
- fn(gp)
- }
- unlock(&allglock)
-}
-
-// forEachGRace calls fn on every G from allgs.
-//
-// forEachGRace avoids locking, but does not exclude addition of new Gs during
-// execution, which may be missed.
-func forEachGRace(fn func(gp *g)) {
- ptr, length := atomicAllG()
- for i := uintptr(0); i < length; i++ {
- gp := atomicAllGIndex(ptr, i)
- fn(gp)
- }
- return
-}
-
-const (
- // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
- // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
- _GoidCacheBatch = 16
-)
-
-// cpuinit extracts the environment variable GODEBUG from the environment on
-// Unix-like operating systems and calls internal/cpu.Initialize.
-func cpuinit() {
- const prefix = "GODEBUG="
- var env string
-
- switch GOOS {
- case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
- cpu.DebugOptions = true
-
- // Similar to goenv_unix but extracts the environment value for
- // GODEBUG directly.
- // TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
- n := int32(0)
- for argv_index(argv, argc+1+n) != nil {
- n++
- }
-
- for i := int32(0); i < n; i++ {
- p := argv_index(argv, argc+1+i)
- s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
-
- if hasPrefix(s, prefix) {
- env = gostring(p)[len(prefix):]
- break
- }
- }
- }
-
- cpu.Initialize(env)
-
- // Support cpu feature variables are used in code generated by the compiler
- // to guard execution of instructions that can not be assumed to be always supported.
- switch GOARCH {
- case "386", "amd64":
- x86HasPOPCNT = cpu.X86.HasPOPCNT
- x86HasSSE41 = cpu.X86.HasSSE41
- x86HasFMA = cpu.X86.HasFMA
-
- case "arm":
- armHasVFPv4 = cpu.ARM.HasVFPv4
-
- case "arm64":
- arm64HasATOMICS = cpu.ARM64.HasATOMICS
- }
-}
-
-// The bootstrap sequence is:
-//
-// call osinit
-// call schedinit
-// make & queue new G
-// call runtime·mstart
-//
-// The new G calls runtime·main.
-func schedinit() {
- lockInit(&sched.lock, lockRankSched)
- lockInit(&sched.sysmonlock, lockRankSysmon)
- lockInit(&sched.deferlock, lockRankDefer)
- lockInit(&sched.sudoglock, lockRankSudog)
- lockInit(&deadlock, lockRankDeadlock)
- lockInit(&paniclk, lockRankPanic)
- lockInit(&allglock, lockRankAllg)
- lockInit(&allpLock, lockRankAllp)
- lockInit(&reflectOffs.lock, lockRankReflectOffs)
- lockInit(&finlock, lockRankFin)
- lockInit(&trace.bufLock, lockRankTraceBuf)
- lockInit(&trace.stringsLock, lockRankTraceStrings)
- lockInit(&trace.lock, lockRankTrace)
- lockInit(&cpuprof.lock, lockRankCpuprof)
- lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
- // Enforce that this lock is always a leaf lock.
- // All of this lock's critical sections should be
- // extremely short.
- lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
-
- // raceinit must be the first call to race detector.
- // In particular, it must be done before mallocinit below calls racemapshadow.
- _g_ := getg()
- if raceenabled {
- _g_.racectx, raceprocctx0 = raceinit()
- }
-
- sched.maxmcount = 10000
-
- // The world starts stopped.
- worldStopped()
-
- moduledataverify()
- stackinit()
- mallocinit()
- cpuinit() // must run before alginit
- alginit() // maps, hash, fastrand must not be used before this call
- fastrandinit() // must run before mcommoninit
- mcommoninit(_g_.m, -1)
- modulesinit() // provides activeModules
- typelinksinit() // uses maps, activeModules
- itabsinit() // uses activeModules
- stkobjinit() // must run before GC starts
-
- sigsave(&_g_.m.sigmask)
- initSigmask = _g_.m.sigmask
-
- if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
- println(offset)
- throw("sched.timeToRun not aligned to 8 bytes")
- }
-
- goargs()
- goenvs()
- parsedebugvars()
- gcinit()
-
- lock(&sched.lock)
- sched.lastpoll = uint64(nanotime())
- procs := ncpu
- if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
- procs = n
- }
- if procresize(procs) != nil {
- throw("unknown runnable goroutine during bootstrap")
- }
- unlock(&sched.lock)
-
- // World is effectively started now, as P's can run.
- worldStarted()
-
- // For cgocheck > 1, we turn on the write barrier at all times
- // and check all pointer writes. We can't do this until after
- // procresize because the write barrier needs a P.
- if debug.cgocheck > 1 {
- writeBarrier.cgo = true
- writeBarrier.enabled = true
- for _, p := range allp {
- p.wbBuf.reset()
- }
- }
-
- if buildVersion == "" {
- // Condition should never trigger. This code just serves
- // to ensure runtime·buildVersion is kept in the resulting binary.
- buildVersion = "unknown"
- }
- if len(modinfo) == 1 {
- // Condition should never trigger. This code just serves
- // to ensure runtime·modinfo is kept in the resulting binary.
- modinfo = ""
- }
-}
-
-func dumpgstatus(gp *g) {
- _g_ := getg()
- print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
-}
-
-// sched.lock must be held.
-func checkmcount() {
- assertLockHeld(&sched.lock)
-
- if mcount() > sched.maxmcount {
- print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
- throw("thread exhaustion")
- }
-}
-
-// mReserveID returns the next ID to use for a new m. This new m is immediately
-// considered 'running' by checkdead.
-//
-// sched.lock must be held.
-func mReserveID() int64 {
- assertLockHeld(&sched.lock)
-
- if sched.mnext+1 < sched.mnext {
- throw("runtime: thread ID overflow")
- }
- id := sched.mnext
- sched.mnext++
- checkmcount()
- return id
-}
-
-// Pre-allocated ID may be passed as 'id', or omitted by passing -1.
-func mcommoninit(mp *m, id int64) {
- _g_ := getg()
-
- // g0 stack won't make sense for user (and is not necessary unwindable).
- if _g_ != _g_.m.g0 {
- callers(1, mp.createstack[:])
- }
-
- lock(&sched.lock)
-
- if id >= 0 {
- mp.id = id
- } else {
- mp.id = mReserveID()
- }
-
- lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
- hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
- if lo|hi == 0 {
- hi = 1
- }
- // Same behavior as for 1.17.
- // TODO: Simplify ths.
- if goarch.BigEndian {
- mp.fastrand = uint64(lo)<<32 | uint64(hi)
- } else {
- mp.fastrand = uint64(hi)<<32 | uint64(lo)
- }
-
- mpreinit(mp)
- if mp.gsignal != nil {
- mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
- }
-
- // Add to allm so garbage collector doesn't free g->m
- // when it is just in a register or thread-local storage.
- mp.alllink = allm
-
- // NumCgoCall() iterates over allm w/o schedlock,
- // so we need to publish it safely.
- atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
- unlock(&sched.lock)
-
- // Allocate memory to hold a cgo traceback if the cgo call crashes.
- if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
- mp.cgoCallers = new(cgoCallers)
- }
-}
-
-var fastrandseed uintptr
-
-func fastrandinit() {
- s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
- getRandomData(s)
-}
-
-// Mark gp ready to run.
-func ready(gp *g, traceskip int, next bool) {
- if trace.enabled {
- traceGoUnpark(gp, traceskip)
- }
-
- status := readgstatus(gp)
-
- // Mark runnable.
- _g_ := getg()
- mp := acquirem() // disable preemption because it can be holding p in a local var
- if status&^_Gscan != _Gwaiting {
- dumpgstatus(gp)
- throw("bad g->status in ready")
- }
-
- // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
- casgstatus(gp, _Gwaiting, _Grunnable)
- runqput(_g_.m.p.ptr(), gp, next)
- wakep()
- releasem(mp)
-}
-
-// freezeStopWait is a large value that freezetheworld sets
-// sched.stopwait to in order to request that all Gs permanently stop.
-const freezeStopWait = 0x7fffffff
-
-// freezing is set to non-zero if the runtime is trying to freeze the
-// world.
-var freezing uint32
-
-// Similar to stopTheWorld but best-effort and can be called several times.
-// There is no reverse operation, used during crashing.
-// This function must not lock any mutexes.
-func freezetheworld() {
- atomic.Store(&freezing, 1)
- // stopwait and preemption requests can be lost
- // due to races with concurrently executing threads,
- // so try several times
- for i := 0; i < 5; i++ {
- // this should tell the scheduler to not start any new goroutines
- sched.stopwait = freezeStopWait
- atomic.Store(&sched.gcwaiting, 1)
- // this should stop running goroutines
- if !preemptall() {
- break // no running goroutines
- }
- usleep(1000)
- }
- // to be sure
- usleep(1000)
- preemptall()
- usleep(1000)
-}
-
-// All reads and writes of g's status go through readgstatus, casgstatus
-// castogscanstatus, casfrom_Gscanstatus.
-//go:nosplit
-func readgstatus(gp *g) uint32 {
- return atomic.Load(&gp.atomicstatus)
-}
-
-// The Gscanstatuses are acting like locks and this releases them.
-// If it proves to be a performance hit we should be able to make these
-// simple atomic stores but for now we are going to throw if
-// we see an inconsistent state.
-func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
- success := false
-
- // Check that transition is valid.
- switch oldval {
- default:
- print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
- dumpgstatus(gp)
- throw("casfrom_Gscanstatus:top gp->status is not in scan state")
- case _Gscanrunnable,
- _Gscanwaiting,
- _Gscanrunning,
- _Gscansyscall,
- _Gscanpreempted:
- if newval == oldval&^_Gscan {
- success = atomic.Cas(&gp.atomicstatus, oldval, newval)
- }
- }
- if !success {
- print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
- dumpgstatus(gp)
- throw("casfrom_Gscanstatus: gp->status is not in scan state")
- }
- releaseLockRank(lockRankGscan)
-}
-
-// This will return false if the gp is not in the expected status and the cas fails.
-// This acts like a lock acquire while the casfromgstatus acts like a lock release.
-func castogscanstatus(gp *g, oldval, newval uint32) bool {
- switch oldval {
- case _Grunnable,
- _Grunning,
- _Gwaiting,
- _Gsyscall:
- if newval == oldval|_Gscan {
- r := atomic.Cas(&gp.atomicstatus, oldval, newval)
- if r {
- acquireLockRank(lockRankGscan)
- }
- return r
-
- }
- }
- print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
- throw("castogscanstatus")
- panic("not reached")
-}
-
-// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
-// and casfrom_Gscanstatus instead.
-// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
-// put it in the Gscan state is finished.
-//go:nosplit
-func casgstatus(gp *g, oldval, newval uint32) {
- if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
- systemstack(func() {
- print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
- throw("casgstatus: bad incoming values")
- })
- }
-
- acquireLockRank(lockRankGscan)
- releaseLockRank(lockRankGscan)
-
- // See https://golang.org/cl/21503 for justification of the yield delay.
- const yieldDelay = 5 * 1000
- var nextYield int64
-
- // loop if gp->atomicstatus is in a scan state giving
- // GC time to finish and change the state to oldval.
- for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
- if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
- throw("casgstatus: waiting for Gwaiting but is Grunnable")
- }
- if i == 0 {
- nextYield = nanotime() + yieldDelay
- }
- if nanotime() < nextYield {
- for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
- procyield(1)
- }
- } else {
- osyield()
- nextYield = nanotime() + yieldDelay/2
- }
- }
-
- // Handle tracking for scheduling latencies.
- if oldval == _Grunning {
- // Track every 8th time a goroutine transitions out of running.
- if gp.trackingSeq%gTrackingPeriod == 0 {
- gp.tracking = true
- }
- gp.trackingSeq++
- }
- if gp.tracking {
- if oldval == _Grunnable {
- // We transitioned out of runnable, so measure how much
- // time we spent in this state and add it to
- // runnableTime.
- now := nanotime()
- gp.runnableTime += now - gp.runnableStamp
- gp.runnableStamp = 0
- }
- if newval == _Grunnable {
- // We just transitioned into runnable, so record what
- // time that happened.
- now := nanotime()
- gp.runnableStamp = now
- } else if newval == _Grunning {
- // We're transitioning into running, so turn off
- // tracking and record how much time we spent in
- // runnable.
- gp.tracking = false
- sched.timeToRun.record(gp.runnableTime)
- gp.runnableTime = 0
- }
- }
-}
-
-// casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
-// Returns old status. Cannot call casgstatus directly, because we are racing with an
-// async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
-// it might have become Grunnable by the time we get to the cas. If we called casgstatus,
-// it would loop waiting for the status to go back to Gwaiting, which it never will.
-//go:nosplit
-func casgcopystack(gp *g) uint32 {
- for {
- oldstatus := readgstatus(gp) &^ _Gscan
- if oldstatus != _Gwaiting && oldstatus != _Grunnable {
- throw("copystack: bad status, not Gwaiting or Grunnable")
- }
- if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
- return oldstatus
- }
- }
-}
-
-// casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
-//
-// TODO(austin): This is the only status operation that both changes
-// the status and locks the _Gscan bit. Rethink this.
-func casGToPreemptScan(gp *g, old, new uint32) {
- if old != _Grunning || new != _Gscan|_Gpreempted {
- throw("bad g transition")
- }
- acquireLockRank(lockRankGscan)
- for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
- }
-}
-
-// casGFromPreempted attempts to transition gp from _Gpreempted to
-// _Gwaiting. If successful, the caller is responsible for
-// re-scheduling gp.
-func casGFromPreempted(gp *g, old, new uint32) bool {
- if old != _Gpreempted || new != _Gwaiting {
- throw("bad g transition")
- }
- return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
-}
-
-// stopTheWorld stops all P's from executing goroutines, interrupting
-// all goroutines at GC safe points and records reason as the reason
-// for the stop. On return, only the current goroutine's P is running.
-// stopTheWorld must not be called from a system stack and the caller
-// must not hold worldsema. The caller must call startTheWorld when
-// other P's should resume execution.
-//
-// stopTheWorld is safe for multiple goroutines to call at the
-// same time. Each will execute its own stop, and the stops will
-// be serialized.
-//
-// This is also used by routines that do stack dumps. If the system is
-// in panic or being exited, this may not reliably stop all
-// goroutines.
-func stopTheWorld(reason string) {
- semacquire(&worldsema)
- gp := getg()
- gp.m.preemptoff = reason
- systemstack(func() {
- // Mark the goroutine which called stopTheWorld preemptible so its
- // stack may be scanned.
- // This lets a mark worker scan us while we try to stop the world
- // since otherwise we could get in a mutual preemption deadlock.
- // We must not modify anything on the G stack because a stack shrink
- // may occur. A stack shrink is otherwise OK though because in order
- // to return from this function (and to leave the system stack) we
- // must have preempted all goroutines, including any attempting
- // to scan our stack, in which case, any stack shrinking will
- // have already completed by the time we exit.
- casgstatus(gp, _Grunning, _Gwaiting)
- stopTheWorldWithSema()
- casgstatus(gp, _Gwaiting, _Grunning)
- })
-}
-
-// startTheWorld undoes the effects of stopTheWorld.
-func startTheWorld() {
- systemstack(func() { startTheWorldWithSema(false) })
-
- // worldsema must be held over startTheWorldWithSema to ensure
- // gomaxprocs cannot change while worldsema is held.
- //
- // Release worldsema with direct handoff to the next waiter, but
- // acquirem so that semrelease1 doesn't try to yield our time.
- //
- // Otherwise if e.g. ReadMemStats is being called in a loop,
- // it might stomp on other attempts to stop the world, such as
- // for starting or ending GC. The operation this blocks is
- // so heavy-weight that we should just try to be as fair as
- // possible here.
- //
- // We don't want to just allow us to get preempted between now
- // and releasing the semaphore because then we keep everyone
- // (including, for example, GCs) waiting longer.
- mp := acquirem()
- mp.preemptoff = ""
- semrelease1(&worldsema, true, 0)
- releasem(mp)
-}
-
-// stopTheWorldGC has the same effect as stopTheWorld, but blocks
-// until the GC is not running. It also blocks a GC from starting
-// until startTheWorldGC is called.
-func stopTheWorldGC(reason string) {
- semacquire(&gcsema)
- stopTheWorld(reason)
-}
-
-// startTheWorldGC undoes the effects of stopTheWorldGC.
-func startTheWorldGC() {
- startTheWorld()
- semrelease(&gcsema)
-}
-
-// Holding worldsema grants an M the right to try to stop the world.
-var worldsema uint32 = 1
-
-// Holding gcsema grants the M the right to block a GC, and blocks
-// until the current GC is done. In particular, it prevents gomaxprocs
-// from changing concurrently.
-//
-// TODO(mknyszek): Once gomaxprocs and the execution tracer can handle
-// being changed/enabled during a GC, remove this.
-var gcsema uint32 = 1
-
-// stopTheWorldWithSema is the core implementation of stopTheWorld.
-// The caller is responsible for acquiring worldsema and disabling
-// preemption first and then should stopTheWorldWithSema on the system
-// stack:
-//
-// semacquire(&worldsema, 0)
-// m.preemptoff = "reason"
-// systemstack(stopTheWorldWithSema)
-//
-// When finished, the caller must either call startTheWorld or undo
-// these three operations separately:
-//
-// m.preemptoff = ""
-// systemstack(startTheWorldWithSema)
-// semrelease(&worldsema)
-//
-// It is allowed to acquire worldsema once and then execute multiple
-// startTheWorldWithSema/stopTheWorldWithSema pairs.
-// Other P's are able to execute between successive calls to
-// startTheWorldWithSema and stopTheWorldWithSema.
-// Holding worldsema causes any other goroutines invoking
-// stopTheWorld to block.
-func stopTheWorldWithSema() {
- _g_ := getg()
-
- // If we hold a lock, then we won't be able to stop another M
- // that is blocked trying to acquire the lock.
- if _g_.m.locks > 0 {
- throw("stopTheWorld: holding locks")
- }
-
- lock(&sched.lock)
- sched.stopwait = gomaxprocs
- atomic.Store(&sched.gcwaiting, 1)
- preemptall()
- // stop current P
- _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
- sched.stopwait--
- // try to retake all P's in Psyscall status
- for _, p := range allp {
- s := p.status
- if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
- if trace.enabled {
- traceGoSysBlock(p)
- traceProcStop(p)
- }
- p.syscalltick++
- sched.stopwait--
- }
- }
- // stop idle P's
- for {
- p := pidleget()
- if p == nil {
- break
- }
- p.status = _Pgcstop
- sched.stopwait--
- }
- wait := sched.stopwait > 0
- unlock(&sched.lock)
-
- // wait for remaining P's to stop voluntarily
- if wait {
- for {
- // wait for 100us, then try to re-preempt in case of any races
- if notetsleep(&sched.stopnote, 100*1000) {
- noteclear(&sched.stopnote)
- break
- }
- preemptall()
- }
- }
-
- // sanity checks
- bad := ""
- if sched.stopwait != 0 {
- bad = "stopTheWorld: not stopped (stopwait != 0)"
- } else {
- for _, p := range allp {
- if p.status != _Pgcstop {
- bad = "stopTheWorld: not stopped (status != _Pgcstop)"
- }
- }
- }
- if atomic.Load(&freezing) != 0 {
- // Some other thread is panicking. This can cause the
- // sanity checks above to fail if the panic happens in
- // the signal handler on a stopped thread. Either way,
- // we should halt this thread.
- lock(&deadlock)
- lock(&deadlock)
- }
- if bad != "" {
- throw(bad)
- }
-
- worldStopped()
-}
-
-func startTheWorldWithSema(emitTraceEvent bool) int64 {
- assertWorldStopped()
-
- mp := acquirem() // disable preemption because it can be holding p in a local var
- if netpollinited() {
- list := netpoll(0) // non-blocking
- injectglist(&list)
- }
- lock(&sched.lock)
-
- procs := gomaxprocs
- if newprocs != 0 {
- procs = newprocs
- newprocs = 0
- }
- p1 := procresize(procs)
- sched.gcwaiting = 0
- if sched.sysmonwait != 0 {
- sched.sysmonwait = 0
- notewakeup(&sched.sysmonnote)
- }
- unlock(&sched.lock)
-
- worldStarted()
-
- for p1 != nil {
- p := p1
- p1 = p1.link.ptr()
- if p.m != 0 {
- mp := p.m.ptr()
- p.m = 0
- if mp.nextp != 0 {
- throw("startTheWorld: inconsistent mp->nextp")
- }
- mp.nextp.set(p)
- notewakeup(&mp.park)
- } else {
- // Start M to run P. Do not start another M below.
- newm(nil, p, -1)
- }
- }
-
- // Capture start-the-world time before doing clean-up tasks.
- startTime := nanotime()
- if emitTraceEvent {
- traceGCSTWDone()
- }
-
- // Wakeup an additional proc in case we have excessive runnable goroutines
- // in local queues or in the global queue. If we don't, the proc will park itself.
- // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
- wakep()
-
- releasem(mp)
-
- return startTime
-}
-
-// usesLibcall indicates whether this runtime performs system calls
-// via libcall.
-func usesLibcall() bool {
- switch GOOS {
- case "aix", "darwin", "illumos", "ios", "solaris", "windows":
- return true
- case "openbsd":
- return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
- }
- return false
-}
-
-// mStackIsSystemAllocated indicates whether this runtime starts on a
-// system-allocated stack.
-func mStackIsSystemAllocated() bool {
- switch GOOS {
- case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
- return true
- case "openbsd":
- switch GOARCH {
- case "386", "amd64", "arm", "arm64":
- return true
- }
- }
- return false
-}
-
-// mstart is the entry-point for new Ms.
-// It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0.
-func mstart()
-
-// mstart0 is the Go entry-point for new Ms.
-// This must not split the stack because we may not even have stack
-// bounds set up yet.
-//
-// May run during STW (because it doesn't have a P yet), so write
-// barriers are not allowed.
-//
-//go:nosplit
-//go:nowritebarrierrec
-func mstart0() {
- _g_ := getg()
-
- osStack := _g_.stack.lo == 0
- if osStack {
- // Initialize stack bounds from system stack.
- // Cgo may have left stack size in stack.hi.
- // minit may update the stack bounds.
- //
- // Note: these bounds may not be very accurate.
- // We set hi to &size, but there are things above
- // it. The 1024 is supposed to compensate this,
- // but is somewhat arbitrary.
- size := _g_.stack.hi
- if size == 0 {
- size = 8192 * sys.StackGuardMultiplier
- }
- _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
- _g_.stack.lo = _g_.stack.hi - size + 1024
- }
- // Initialize stack guard so that we can start calling regular
- // Go code.
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
- // This is the g0, so we can also call go:systemstack
- // functions, which check stackguard1.
- _g_.stackguard1 = _g_.stackguard0
- mstart1()
-
- // Exit this thread.
- if mStackIsSystemAllocated() {
- // Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
- // the stack, but put it in _g_.stack before mstart,
- // so the logic above hasn't set osStack yet.
- osStack = true
- }
- mexit(osStack)
-}
-
-// The go:noinline is to guarantee the getcallerpc/getcallersp below are safe,
-// so that we can set up g0.sched to return to the call of mstart1 above.
-//go:noinline
-func mstart1() {
- _g_ := getg()
-
- if _g_ != _g_.m.g0 {
- throw("bad runtime·mstart")
- }
-
- // Set up m.g0.sched as a label returning to just
- // after the mstart1 call in mstart0 above, for use by goexit0 and mcall.
- // We're never coming back to mstart1 after we call schedule,
- // so other calls can reuse the current frame.
- // And goexit0 does a gogo that needs to return from mstart1
- // and let mstart0 exit the thread.
- _g_.sched.g = guintptr(unsafe.Pointer(_g_))
- _g_.sched.pc = getcallerpc()
- _g_.sched.sp = getcallersp()
-
- asminit()
- minit()
-
- // Install signal handlers; after minit so that minit can
- // prepare the thread to be able to handle the signals.
- if _g_.m == &m0 {
- mstartm0()
- }
-
- if fn := _g_.m.mstartfn; fn != nil {
- fn()
- }
-
- if _g_.m != &m0 {
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
- }
- schedule()
-}
-
-// mstartm0 implements part of mstart1 that only runs on the m0.
-//
-// Write barriers are allowed here because we know the GC can't be
-// running yet, so they'll be no-ops.
-//
-//go:yeswritebarrierrec
-func mstartm0() {
- // Create an extra M for callbacks on threads not created by Go.
- // An extra M is also needed on Windows for callbacks created by
- // syscall.NewCallback. See issue #6751 for details.
- if (iscgo || GOOS == "windows") && !cgoHasExtraM {
- cgoHasExtraM = true
- newextram()
- }
- initsig(false)
-}
-
-// mPark causes a thread to park itself, returning once woken.
-//go:nosplit
-func mPark() {
- gp := getg()
- notesleep(&gp.m.park)
- noteclear(&gp.m.park)
-}
-
-// mexit tears down and exits the current thread.
-//
-// Don't call this directly to exit the thread, since it must run at
-// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
-// unwind the stack to the point that exits the thread.
-//
-// It is entered with m.p != nil, so write barriers are allowed. It
-// will release the P before exiting.
-//
-//go:yeswritebarrierrec
-func mexit(osStack bool) {
- g := getg()
- m := g.m
-
- if m == &m0 {
- // This is the main thread. Just wedge it.
- //
- // On Linux, exiting the main thread puts the process
- // into a non-waitable zombie state. On Plan 9,
- // exiting the main thread unblocks wait even though
- // other threads are still running. On Solaris we can
- // neither exitThread nor return from mstart. Other
- // bad things probably happen on other platforms.
- //
- // We could try to clean up this M more before wedging
- // it, but that complicates signal handling.
- handoffp(releasep())
- lock(&sched.lock)
- sched.nmfreed++
- checkdead()
- unlock(&sched.lock)
- mPark()
- throw("locked m0 woke up")
- }
-
- sigblock(true)
- unminit()
-
- // Free the gsignal stack.
- if m.gsignal != nil {
- stackfree(m.gsignal.stack)
- // On some platforms, when calling into VDSO (e.g. nanotime)
- // we store our g on the gsignal stack, if there is one.
- // Now the stack is freed, unlink it from the m, so we
- // won't write to it when calling VDSO code.
- m.gsignal = nil
- }
-
- // Remove m from allm.
- lock(&sched.lock)
- for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
- if *pprev == m {
- *pprev = m.alllink
- goto found
- }
- }
- throw("m not found in allm")
-found:
- if !osStack {
- // Delay reaping m until it's done with the stack.
- //
- // If this is using an OS stack, the OS will free it
- // so there's no need for reaping.
- atomic.Store(&m.freeWait, 1)
- // Put m on the free list, though it will not be reaped until
- // freeWait is 0. Note that the free list must not be linked
- // through alllink because some functions walk allm without
- // locking, so may be using alllink.
- m.freelink = sched.freem
- sched.freem = m
- }
- unlock(&sched.lock)
-
- atomic.Xadd64(&ncgocall, int64(m.ncgocall))
-
- // Release the P.
- handoffp(releasep())
- // After this point we must not have write barriers.
-
- // Invoke the deadlock detector. This must happen after
- // handoffp because it may have started a new M to take our
- // P's work.
- lock(&sched.lock)
- sched.nmfreed++
- checkdead()
- unlock(&sched.lock)
-
- if GOOS == "darwin" || GOOS == "ios" {
- // Make sure pendingPreemptSignals is correct when an M exits.
- // For #41702.
- if atomic.Load(&m.signalPending) != 0 {
- atomic.Xadd(&pendingPreemptSignals, -1)
- }
- }
-
- // Destroy all allocated resources. After this is called, we may no
- // longer take any locks.
- mdestroy(m)
-
- if osStack {
- // Return from mstart and let the system thread
- // library free the g0 stack and terminate the thread.
- return
- }
-
- // mstart is the thread's entry point, so there's nothing to
- // return to. Exit the thread directly. exitThread will clear
- // m.freeWait when it's done with the stack and the m can be
- // reaped.
- exitThread(&m.freeWait)
-}
-
-// forEachP calls fn(p) for every P p when p reaches a GC safe point.
-// If a P is currently executing code, this will bring the P to a GC
-// safe point and execute fn on that P. If the P is not executing code
-// (it is idle or in a syscall), this will call fn(p) directly while
-// preventing the P from exiting its state. This does not ensure that
-// fn will run on every CPU executing Go code, but it acts as a global
-// memory barrier. GC uses this as a "ragged barrier."
-//
-// The caller must hold worldsema.
-//
-//go:systemstack
-func forEachP(fn func(*p)) {
- mp := acquirem()
- _p_ := getg().m.p.ptr()
-
- lock(&sched.lock)
- if sched.safePointWait != 0 {
- throw("forEachP: sched.safePointWait != 0")
- }
- sched.safePointWait = gomaxprocs - 1
- sched.safePointFn = fn
-
- // Ask all Ps to run the safe point function.
- for _, p := range allp {
- if p != _p_ {
- atomic.Store(&p.runSafePointFn, 1)
- }
- }
- preemptall()
-
- // Any P entering _Pidle or _Psyscall from now on will observe
- // p.runSafePointFn == 1 and will call runSafePointFn when
- // changing its status to _Pidle/_Psyscall.
-
- // Run safe point function for all idle Ps. sched.pidle will
- // not change because we hold sched.lock.
- for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
- if atomic.Cas(&p.runSafePointFn, 1, 0) {
- fn(p)
- sched.safePointWait--
- }
- }
-
- wait := sched.safePointWait > 0
- unlock(&sched.lock)
-
- // Run fn for the current P.
- fn(_p_)
-
- // Force Ps currently in _Psyscall into _Pidle and hand them
- // off to induce safe point function execution.
- for _, p := range allp {
- s := p.status
- if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
- if trace.enabled {
- traceGoSysBlock(p)
- traceProcStop(p)
- }
- p.syscalltick++
- handoffp(p)
- }
- }
-
- // Wait for remaining Ps to run fn.
- if wait {
- for {
- // Wait for 100us, then try to re-preempt in
- // case of any races.
- //
- // Requires system stack.
- if notetsleep(&sched.safePointNote, 100*1000) {
- noteclear(&sched.safePointNote)
- break
- }
- preemptall()
- }
- }
- if sched.safePointWait != 0 {
- throw("forEachP: not done")
- }
- for _, p := range allp {
- if p.runSafePointFn != 0 {
- throw("forEachP: P did not run fn")
- }
- }
-
- lock(&sched.lock)
- sched.safePointFn = nil
- unlock(&sched.lock)
- releasem(mp)
-}
-
-// runSafePointFn runs the safe point function, if any, for this P.
-// This should be called like
-//
-// if getg().m.p.runSafePointFn != 0 {
-// runSafePointFn()
-// }
-//
-// runSafePointFn must be checked on any transition in to _Pidle or
-// _Psyscall to avoid a race where forEachP sees that the P is running
-// just before the P goes into _Pidle/_Psyscall and neither forEachP
-// nor the P run the safe-point function.
-func runSafePointFn() {
- p := getg().m.p.ptr()
- // Resolve the race between forEachP running the safe-point
- // function on this P's behalf and this P running the
- // safe-point function directly.
- if !atomic.Cas(&p.runSafePointFn, 1, 0) {
- return
- }
- sched.safePointFn(p)
- lock(&sched.lock)
- sched.safePointWait--
- if sched.safePointWait == 0 {
- notewakeup(&sched.safePointNote)
- }
- unlock(&sched.lock)
-}
-
-// When running with cgo, we call _cgo_thread_start
-// to start threads for us so that we can play nicely with
-// foreign code.
-var cgoThreadStart unsafe.Pointer
-
-type cgothreadstart struct {
- g guintptr
- tls *uint64
- fn unsafe.Pointer
-}
-
-// Allocate a new m unassociated with any thread.
-// Can use p for allocation context if needed.
-// fn is recorded as the new m's m.mstartfn.
-// id is optional pre-allocated m ID. Omit by passing -1.
-//
-// This function is allowed to have write barriers even if the caller
-// isn't because it borrows _p_.
-//
-//go:yeswritebarrierrec
-func allocm(_p_ *p, fn func(), id int64) *m {
- allocmLock.rlock()
-
- // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
- // disable preemption to ensure it is not stolen, which would make the
- // caller lose ownership.
- acquirem()
-
- _g_ := getg()
- if _g_.m.p == 0 {
- acquirep(_p_) // temporarily borrow p for mallocs in this function
- }
-
- // Release the free M list. We need to do this somewhere and
- // this may free up a stack we can use.
- if sched.freem != nil {
- lock(&sched.lock)
- var newList *m
- for freem := sched.freem; freem != nil; {
- if freem.freeWait != 0 {
- next := freem.freelink
- freem.freelink = newList
- newList = freem
- freem = next
- continue
- }
- // stackfree must be on the system stack, but allocm is
- // reachable off the system stack transitively from
- // startm.
- systemstack(func() {
- stackfree(freem.g0.stack)
- })
- freem = freem.freelink
- }
- sched.freem = newList
- unlock(&sched.lock)
- }
-
- mp := new(m)
- mp.mstartfn = fn
- mcommoninit(mp, id)
-
- // In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
- // Windows and Plan 9 will layout sched stack on OS stack.
- if iscgo || mStackIsSystemAllocated() {
- mp.g0 = malg(-1)
- } else {
- mp.g0 = malg(8192 * sys.StackGuardMultiplier)
- }
- mp.g0.m = mp
-
- if _p_ == _g_.m.p.ptr() {
- releasep()
- }
-
- releasem(_g_.m)
- allocmLock.runlock()
- return mp
-}
-
-// needm is called when a cgo callback happens on a
-// thread without an m (a thread not created by Go).
-// In this case, needm is expected to find an m to use
-// and return with m, g initialized correctly.
-// Since m and g are not set now (likely nil, but see below)
-// needm is limited in what routines it can call. In particular
-// it can only call nosplit functions (textflag 7) and cannot
-// do any scheduling that requires an m.
-//
-// In order to avoid needing heavy lifting here, we adopt
-// the following strategy: there is a stack of available m's
-// that can be stolen. Using compare-and-swap
-// to pop from the stack has ABA races, so we simulate
-// a lock by doing an exchange (via Casuintptr) to steal the stack
-// head and replace the top pointer with MLOCKED (1).
-// This serves as a simple spin lock that we can use even
-// without an m. The thread that locks the stack in this way
-// unlocks the stack by storing a valid stack head pointer.
-//
-// In order to make sure that there is always an m structure
-// available to be stolen, we maintain the invariant that there
-// is always one more than needed. At the beginning of the
-// program (if cgo is in use) the list is seeded with a single m.
-// If needm finds that it has taken the last m off the list, its job
-// is - once it has installed its own m so that it can do things like
-// allocate memory - to create a spare m and put it on the list.
-//
-// Each of these extra m's also has a g0 and a curg that are
-// pressed into service as the scheduling stack and current
-// goroutine for the duration of the cgo callback.
-//
-// When the callback is done with the m, it calls dropm to
-// put the m back on the list.
-//go:nosplit
-func needm() {
- if (iscgo || GOOS == "windows") && !cgoHasExtraM {
- // Can happen if C/C++ code calls Go from a global ctor.
- // Can also happen on Windows if a global ctor uses a
- // callback created by syscall.NewCallback. See issue #6751
- // for details.
- //
- // Can not throw, because scheduler is not initialized yet.
- write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
- exit(1)
- }
-
- // Save and block signals before getting an M.
- // The signal handler may call needm itself,
- // and we must avoid a deadlock. Also, once g is installed,
- // any incoming signals will try to execute,
- // but we won't have the sigaltstack settings and other data
- // set up appropriately until the end of minit, which will
- // unblock the signals. This is the same dance as when
- // starting a new m to run Go code via newosproc.
- var sigmask sigset
- sigsave(&sigmask)
- sigblock(false)
-
- // Lock extra list, take head, unlock popped list.
- // nilokay=false is safe here because of the invariant above,
- // that the extra list always contains or will soon contain
- // at least one m.
- mp := lockextra(false)
-
- // Set needextram when we've just emptied the list,
- // so that the eventual call into cgocallbackg will
- // allocate a new m for the extra list. We delay the
- // allocation until then so that it can be done
- // after exitsyscall makes sure it is okay to be
- // running at all (that is, there's no garbage collection
- // running right now).
- mp.needextram = mp.schedlink == 0
- extraMCount--
- unlockextra(mp.schedlink.ptr())
-
- // Store the original signal mask for use by minit.
- mp.sigmask = sigmask
-
- // Install TLS on some platforms (previously setg
- // would do this if necessary).
- osSetupTLS(mp)
-
- // Install g (= m->g0) and set the stack bounds
- // to match the current stack. We don't actually know
- // how big the stack is, like we don't know how big any
- // scheduling stack is, but we assume there's at least 32 kB,
- // which is more than enough for us.
- setg(mp.g0)
- _g_ := getg()
- _g_.stack.hi = getcallersp() + 1024
- _g_.stack.lo = getcallersp() - 32*1024
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
-
- // Initialize this thread to use the m.
- asminit()
- minit()
-
- // mp.curg is now a real goroutine.
- casgstatus(mp.curg, _Gdead, _Gsyscall)
- atomic.Xadd(&sched.ngsys, -1)
-}
-
-var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
-
-// newextram allocates m's and puts them on the extra list.
-// It is called with a working local m, so that it can do things
-// like call schedlock and allocate.
-func newextram() {
- c := atomic.Xchg(&extraMWaiters, 0)
- if c > 0 {
- for i := uint32(0); i < c; i++ {
- oneNewExtraM()
- }
- } else {
- // Make sure there is at least one extra M.
- mp := lockextra(true)
- unlockextra(mp)
- if mp == nil {
- oneNewExtraM()
- }
- }
-}
-
-// oneNewExtraM allocates an m and puts it on the extra list.
-func oneNewExtraM() {
- // Create extra goroutine locked to extra m.
- // The goroutine is the context in which the cgo callback will run.
- // The sched.pc will never be returned to, but setting it to
- // goexit makes clear to the traceback routines where
- // the goroutine stack ends.
- mp := allocm(nil, nil, -1)
- gp := malg(4096)
- gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
- gp.sched.sp = gp.stack.hi
- gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
- gp.sched.lr = 0
- gp.sched.g = guintptr(unsafe.Pointer(gp))
- gp.syscallpc = gp.sched.pc
- gp.syscallsp = gp.sched.sp
- gp.stktopsp = gp.sched.sp
- // malg returns status as _Gidle. Change to _Gdead before
- // adding to allg where GC can see it. We use _Gdead to hide
- // this from tracebacks and stack scans since it isn't a
- // "real" goroutine until needm grabs it.
- casgstatus(gp, _Gidle, _Gdead)
- gp.m = mp
- mp.curg = gp
- mp.lockedInt++
- mp.lockedg.set(gp)
- gp.lockedm.set(mp)
- gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
- if raceenabled {
- gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
- }
- // put on allg for garbage collector
- allgadd(gp)
-
- // gp is now on the allg list, but we don't want it to be
- // counted by gcount. It would be more "proper" to increment
- // sched.ngfree, but that requires locking. Incrementing ngsys
- // has the same effect.
- atomic.Xadd(&sched.ngsys, +1)
-
- // Add m to the extra list.
- mnext := lockextra(true)
- mp.schedlink.set(mnext)
- extraMCount++
- unlockextra(mp)
-}
-
-// dropm is called when a cgo callback has called needm but is now
-// done with the callback and returning back into the non-Go thread.
-// It puts the current m back onto the extra list.
-//
-// The main expense here is the call to signalstack to release the
-// m's signal stack, and then the call to needm on the next callback
-// from this thread. It is tempting to try to save the m for next time,
-// which would eliminate both these costs, but there might not be
-// a next time: the current thread (which Go does not control) might exit.
-// If we saved the m for that thread, there would be an m leak each time
-// such a thread exited. Instead, we acquire and release an m on each
-// call. These should typically not be scheduling operations, just a few
-// atomics, so the cost should be small.
-//
-// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
-// variable using pthread_key_create. Unlike the pthread keys we already use
-// on OS X, this dummy key would never be read by Go code. It would exist
-// only so that we could register at thread-exit-time destructor.
-// That destructor would put the m back onto the extra list.
-// This is purely a performance optimization. The current version,
-// in which dropm happens on each cgo call, is still correct too.
-// We may have to keep the current version on systems with cgo
-// but without pthreads, like Windows.
-func dropm() {
- // Clear m and g, and return m to the extra list.
- // After the call to setg we can only call nosplit functions
- // with no pointer manipulation.
- mp := getg().m
-
- // Return mp.curg to dead state.
- casgstatus(mp.curg, _Gsyscall, _Gdead)
- mp.curg.preemptStop = false
- atomic.Xadd(&sched.ngsys, +1)
-
- // Block signals before unminit.
- // Unminit unregisters the signal handling stack (but needs g on some systems).
- // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
- // It's important not to try to handle a signal between those two steps.
- sigmask := mp.sigmask
- sigblock(false)
- unminit()
-
- mnext := lockextra(true)
- extraMCount++
- mp.schedlink.set(mnext)
-
- setg(nil)
-
- // Commit the release of mp.
- unlockextra(mp)
-
- msigrestore(sigmask)
-}
-
-// A helper function for EnsureDropM.
-func getm() uintptr {
- return uintptr(unsafe.Pointer(getg().m))
-}
-
-var extram uintptr
-var extraMCount uint32 // Protected by lockextra
-var extraMWaiters uint32
-
-// lockextra locks the extra list and returns the list head.
-// The caller must unlock the list by storing a new list head
-// to extram. If nilokay is true, then lockextra will
-// return a nil list head if that's what it finds. If nilokay is false,
-// lockextra will keep waiting until the list head is no longer nil.
-//go:nosplit
-func lockextra(nilokay bool) *m {
- const locked = 1
-
- incr := false
- for {
- old := atomic.Loaduintptr(&extram)
- if old == locked {
- osyield_no_g()
- continue
- }
- if old == 0 && !nilokay {
- if !incr {
- // Add 1 to the number of threads
- // waiting for an M.
- // This is cleared by newextram.
- atomic.Xadd(&extraMWaiters, 1)
- incr = true
- }
- usleep_no_g(1)
- continue
- }
- if atomic.Casuintptr(&extram, old, locked) {
- return (*m)(unsafe.Pointer(old))
- }
- osyield_no_g()
- continue
- }
-}
-
-//go:nosplit
-func unlockextra(mp *m) {
- atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
-}
-
-var (
- // allocmLock is locked for read when creating new Ms in allocm and their
- // addition to allm. Thus acquiring this lock for write blocks the
- // creation of new Ms.
- allocmLock rwmutex
-
- // execLock serializes exec and clone to avoid bugs or unspecified
- // behaviour around exec'ing while creating/destroying threads. See
- // issue #19546.
- execLock rwmutex
-)
-
-// newmHandoff contains a list of m structures that need new OS threads.
-// This is used by newm in situations where newm itself can't safely
-// start an OS thread.
-var newmHandoff struct {
- lock mutex
-
- // newm points to a list of M structures that need new OS
- // threads. The list is linked through m.schedlink.
- newm muintptr
-
- // waiting indicates that wake needs to be notified when an m
- // is put on the list.
- waiting bool
- wake note
-
- // haveTemplateThread indicates that the templateThread has
- // been started. This is not protected by lock. Use cas to set
- // to 1.
- haveTemplateThread uint32
-}
-
-// Create a new m. It will start off with a call to fn, or else the scheduler.
-// fn needs to be static and not a heap allocated closure.
-// May run with m.p==nil, so write barriers are not allowed.
-//
-// id is optional pre-allocated m ID. Omit by passing -1.
-//go:nowritebarrierrec
-func newm(fn func(), _p_ *p, id int64) {
- // allocm adds a new M to allm, but they do not start until created by
- // the OS in newm1 or the template thread.
- //
- // doAllThreadsSyscall requires that every M in allm will eventually
- // start and be signal-able, even with a STW.
- //
- // Disable preemption here until we start the thread to ensure that
- // newm is not preempted between allocm and starting the new thread,
- // ensuring that anything added to allm is guaranteed to eventually
- // start.
- acquirem()
-
- mp := allocm(_p_, fn, id)
- mp.nextp.set(_p_)
- mp.sigmask = initSigmask
- if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
- // We're on a locked M or a thread that may have been
- // started by C. The kernel state of this thread may
- // be strange (the user may have locked it for that
- // purpose). We don't want to clone that into another
- // thread. Instead, ask a known-good thread to create
- // the thread for us.
- //
- // This is disabled on Plan 9. See golang.org/issue/22227.
- //
- // TODO: This may be unnecessary on Windows, which
- // doesn't model thread creation off fork.
- lock(&newmHandoff.lock)
- if newmHandoff.haveTemplateThread == 0 {
- throw("on a locked thread with no template thread")
- }
- mp.schedlink = newmHandoff.newm
- newmHandoff.newm.set(mp)
- if newmHandoff.waiting {
- newmHandoff.waiting = false
- notewakeup(&newmHandoff.wake)
- }
- unlock(&newmHandoff.lock)
- // The M has not started yet, but the template thread does not
- // participate in STW, so it will always process queued Ms and
- // it is safe to releasem.
- releasem(getg().m)
- return
- }
- newm1(mp)
- releasem(getg().m)
-}
-
-func newm1(mp *m) {
- if iscgo {
- var ts cgothreadstart
- if _cgo_thread_start == nil {
- throw("_cgo_thread_start missing")
- }
- ts.g.set(mp.g0)
- ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
- ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
- if msanenabled {
- msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
- }
- if asanenabled {
- asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
- }
- execLock.rlock() // Prevent process clone.
- asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
- execLock.runlock()
- return
- }
- execLock.rlock() // Prevent process clone.
- newosproc(mp)
- execLock.runlock()
-}
-
-// startTemplateThread starts the template thread if it is not already
-// running.
-//
-// The calling thread must itself be in a known-good state.
-func startTemplateThread() {
- if GOARCH == "wasm" { // no threads on wasm yet
- return
- }
-
- // Disable preemption to guarantee that the template thread will be
- // created before a park once haveTemplateThread is set.
- mp := acquirem()
- if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
- releasem(mp)
- return
- }
- newm(templateThread, nil, -1)
- releasem(mp)
-}
-
-// templateThread is a thread in a known-good state that exists solely
-// to start new threads in known-good states when the calling thread
-// may not be in a good state.
-//
-// Many programs never need this, so templateThread is started lazily
-// when we first enter a state that might lead to running on a thread
-// in an unknown state.
-//
-// templateThread runs on an M without a P, so it must not have write
-// barriers.
-//
-//go:nowritebarrierrec
-func templateThread() {
- lock(&sched.lock)
- sched.nmsys++
- checkdead()
- unlock(&sched.lock)
-
- for {
- lock(&newmHandoff.lock)
- for newmHandoff.newm != 0 {
- newm := newmHandoff.newm.ptr()
- newmHandoff.newm = 0
- unlock(&newmHandoff.lock)
- for newm != nil {
- next := newm.schedlink.ptr()
- newm.schedlink = 0
- newm1(newm)
- newm = next
- }
- lock(&newmHandoff.lock)
- }
- newmHandoff.waiting = true
- noteclear(&newmHandoff.wake)
- unlock(&newmHandoff.lock)
- notesleep(&newmHandoff.wake)
- }
-}
-
-// Stops execution of the current m until new work is available.
-// Returns with acquired P.
-func stopm() {
- _g_ := getg()
-
- if _g_.m.locks != 0 {
- throw("stopm holding locks")
- }
- if _g_.m.p != 0 {
- throw("stopm holding p")
- }
- if _g_.m.spinning {
- throw("stopm spinning")
- }
-
- lock(&sched.lock)
- mput(_g_.m)
- unlock(&sched.lock)
- mPark()
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
-}
-
-func mspinning() {
- // startm's caller incremented nmspinning. Set the new M's spinning.
- getg().m.spinning = true
-}
-
-// Schedules some M to run the p (creates an M if necessary).
-// If p==nil, tries to get an idle P, if no idle P's does nothing.
-// May run with m.p==nil, so write barriers are not allowed.
-// If spinning is set, the caller has incremented nmspinning and startm will
-// either decrement nmspinning or set m.spinning in the newly started M.
-//
-// Callers passing a non-nil P must call from a non-preemptible context. See
-// comment on acquirem below.
-//
-// Must not have write barriers because this may be called without a P.
-//go:nowritebarrierrec
-func startm(_p_ *p, spinning bool) {
- // Disable preemption.
- //
- // Every owned P must have an owner that will eventually stop it in the
- // event of a GC stop request. startm takes transient ownership of a P
- // (either from argument or pidleget below) and transfers ownership to
- // a started M, which will be responsible for performing the stop.
- //
- // Preemption must be disabled during this transient ownership,
- // otherwise the P this is running on may enter GC stop while still
- // holding the transient P, leaving that P in limbo and deadlocking the
- // STW.
- //
- // Callers passing a non-nil P must already be in non-preemptible
- // context, otherwise such preemption could occur on function entry to
- // startm. Callers passing a nil P may be preemptible, so we must
- // disable preemption before acquiring a P from pidleget below.
- mp := acquirem()
- lock(&sched.lock)
- if _p_ == nil {
- _p_ = pidleget()
- if _p_ == nil {
- unlock(&sched.lock)
- if spinning {
- // The caller incremented nmspinning, but there are no idle Ps,
- // so it's okay to just undo the increment and give up.
- if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
- throw("startm: negative nmspinning")
- }
- }
- releasem(mp)
- return
- }
- }
- nmp := mget()
- if nmp == nil {
- // No M is available, we must drop sched.lock and call newm.
- // However, we already own a P to assign to the M.
- //
- // Once sched.lock is released, another G (e.g., in a syscall),
- // could find no idle P while checkdead finds a runnable G but
- // no running M's because this new M hasn't started yet, thus
- // throwing in an apparent deadlock.
- //
- // Avoid this situation by pre-allocating the ID for the new M,
- // thus marking it as 'running' before we drop sched.lock. This
- // new M will eventually run the scheduler to execute any
- // queued G's.
- id := mReserveID()
- unlock(&sched.lock)
-
- var fn func()
- if spinning {
- // The caller incremented nmspinning, so set m.spinning in the new M.
- fn = mspinning
- }
- newm(fn, _p_, id)
- // Ownership transfer of _p_ committed by start in newm.
- // Preemption is now safe.
- releasem(mp)
- return
- }
- unlock(&sched.lock)
- if nmp.spinning {
- throw("startm: m is spinning")
- }
- if nmp.nextp != 0 {
- throw("startm: m has p")
- }
- if spinning && !runqempty(_p_) {
- throw("startm: p has runnable gs")
- }
- // The caller incremented nmspinning, so set m.spinning in the new M.
- nmp.spinning = spinning
- nmp.nextp.set(_p_)
- notewakeup(&nmp.park)
- // Ownership transfer of _p_ committed by wakeup. Preemption is now
- // safe.
- releasem(mp)
-}
-
-// Hands off P from syscall or locked M.
-// Always runs without a P, so write barriers are not allowed.
-//go:nowritebarrierrec
-func handoffp(_p_ *p) {
- // handoffp must start an M in any situation where
- // findrunnable would return a G to run on _p_.
-
- // if it has local work, start it straight away
- if !runqempty(_p_) || sched.runqsize != 0 {
- startm(_p_, false)
- return
- }
- // if it has GC work, start it straight away
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
- startm(_p_, false)
- return
- }
- // no local work, check that there are no spinning/idle M's,
- // otherwise our help is not required
- if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
- startm(_p_, true)
- return
- }
- lock(&sched.lock)
- if sched.gcwaiting != 0 {
- _p_.status = _Pgcstop
- sched.stopwait--
- if sched.stopwait == 0 {
- notewakeup(&sched.stopnote)
- }
- unlock(&sched.lock)
- return
- }
- if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
- sched.safePointFn(_p_)
- sched.safePointWait--
- if sched.safePointWait == 0 {
- notewakeup(&sched.safePointNote)
- }
- }
- if sched.runqsize != 0 {
- unlock(&sched.lock)
- startm(_p_, false)
- return
- }
- // If this is the last running P and nobody is polling network,
- // need to wakeup another M to poll network.
- if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
- unlock(&sched.lock)
- startm(_p_, false)
- return
- }
-
- // The scheduler lock cannot be held when calling wakeNetPoller below
- // because wakeNetPoller may call wakep which may call startm.
- when := nobarrierWakeTime(_p_)
- pidleput(_p_)
- unlock(&sched.lock)
-
- if when != 0 {
- wakeNetPoller(when)
- }
-}
-
-// Tries to add one more P to execute G's.
-// Called when a G is made runnable (newproc, ready).
-func wakep() {
- if atomic.Load(&sched.npidle) == 0 {
- return
- }
- // be conservative about spinning threads
- if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
- return
- }
- startm(nil, true)
-}
-
-// Stops execution of the current m that is locked to a g until the g is runnable again.
-// Returns with acquired P.
-func stoplockedm() {
- _g_ := getg()
-
- if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
- throw("stoplockedm: inconsistent locking")
- }
- if _g_.m.p != 0 {
- // Schedule another M to run this p.
- _p_ := releasep()
- handoffp(_p_)
- }
- incidlelocked(1)
- // Wait until another thread schedules lockedg again.
- mPark()
- status := readgstatus(_g_.m.lockedg.ptr())
- if status&^_Gscan != _Grunnable {
- print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
- dumpgstatus(_g_.m.lockedg.ptr())
- throw("stoplockedm: not runnable")
- }
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
-}
-
-// Schedules the locked m to run the locked gp.
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func startlockedm(gp *g) {
- _g_ := getg()
-
- mp := gp.lockedm.ptr()
- if mp == _g_.m {
- throw("startlockedm: locked to me")
- }
- if mp.nextp != 0 {
- throw("startlockedm: m has p")
- }
- // directly handoff current P to the locked m
- incidlelocked(-1)
- _p_ := releasep()
- mp.nextp.set(_p_)
- notewakeup(&mp.park)
- stopm()
-}
-
-// Stops the current m for stopTheWorld.
-// Returns when the world is restarted.
-func gcstopm() {
- _g_ := getg()
-
- if sched.gcwaiting == 0 {
- throw("gcstopm: not waiting for gc")
- }
- if _g_.m.spinning {
- _g_.m.spinning = false
- // OK to just drop nmspinning here,
- // startTheWorld will unpark threads as necessary.
- if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
- throw("gcstopm: negative nmspinning")
- }
- }
- _p_ := releasep()
- lock(&sched.lock)
- _p_.status = _Pgcstop
- sched.stopwait--
- if sched.stopwait == 0 {
- notewakeup(&sched.stopnote)
- }
- unlock(&sched.lock)
- stopm()
-}
-
-// Schedules gp to run on the current M.
-// If inheritTime is true, gp inherits the remaining time in the
-// current time slice. Otherwise, it starts a new time slice.
-// Never returns.
-//
-// Write barriers are allowed because this is called immediately after
-// acquiring a P in several places.
-//
-//go:yeswritebarrierrec
-func execute(gp *g, inheritTime bool) {
- _g_ := getg()
-
- // Assign gp.m before entering _Grunning so running Gs have an
- // M.
- _g_.m.curg = gp
- gp.m = _g_.m
- casgstatus(gp, _Grunnable, _Grunning)
- gp.waitsince = 0
- gp.preempt = false
- gp.stackguard0 = gp.stack.lo + _StackGuard
- if !inheritTime {
- _g_.m.p.ptr().schedtick++
- }
-
- // Check whether the profiler needs to be turned on or off.
- hz := sched.profilehz
- if _g_.m.profilehz != hz {
- setThreadCPUProfiler(hz)
- }
-
- if trace.enabled {
- // GoSysExit has to happen when we have a P, but before GoStart.
- // So we emit it here.
- if gp.syscallsp != 0 && gp.sysblocktraced {
- traceGoSysExit(gp.sysexitticks)
- }
- traceGoStart()
- }
-
- gogo(&gp.sched)
-}
-
-// Finds a runnable goroutine to execute.
-// Tries to steal from other P's, get g from local or global queue, poll network.
-func findrunnable() (gp *g, inheritTime bool) {
- _g_ := getg()
-
- // The conditions here and in handoffp must agree: if
- // findrunnable would return a G to run, handoffp must start
- // an M.
-
-top:
- _p_ := _g_.m.p.ptr()
- if sched.gcwaiting != 0 {
- gcstopm()
- goto top
- }
- if _p_.runSafePointFn != 0 {
- runSafePointFn()
- }
-
- now, pollUntil, _ := checkTimers(_p_, 0)
-
- if fingwait && fingwake {
- if gp := wakefing(); gp != nil {
- ready(gp, 0, true)
- }
- }
- if *cgo_yield != nil {
- asmcgocall(*cgo_yield, nil)
- }
-
- // local runq
- if gp, inheritTime := runqget(_p_); gp != nil {
- return gp, inheritTime
- }
-
- // global runq
- if sched.runqsize != 0 {
- lock(&sched.lock)
- gp := globrunqget(_p_, 0)
- unlock(&sched.lock)
- if gp != nil {
- return gp, false
- }
- }
-
- // Poll network.
- // This netpoll is only an optimization before we resort to stealing.
- // We can safely skip it if there are no waiters or a thread is blocked
- // in netpoll already. If there is any kind of logical race with that
- // blocked thread (e.g. it has already returned from netpoll, but does
- // not set lastpoll yet), this thread will do blocking netpoll below
- // anyway.
- if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
- if list := netpoll(0); !list.empty() { // non-blocking
- gp := list.pop()
- injectglist(&list)
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- return gp, false
- }
- }
-
- // Spinning Ms: steal work from other Ps.
- //
- // Limit the number of spinning Ms to half the number of busy Ps.
- // This is necessary to prevent excessive CPU consumption when
- // GOMAXPROCS>>1 but the program parallelism is low.
- procs := uint32(gomaxprocs)
- if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
- if !_g_.m.spinning {
- _g_.m.spinning = true
- atomic.Xadd(&sched.nmspinning, 1)
- }
-
- gp, inheritTime, tnow, w, newWork := stealWork(now)
- now = tnow
- if gp != nil {
- // Successfully stole.
- return gp, inheritTime
- }
- if newWork {
- // There may be new timer or GC work; restart to
- // discover.
- goto top
- }
- if w != 0 && (pollUntil == 0 || w < pollUntil) {
- // Earlier timer to wait for.
- pollUntil = w
- }
- }
-
- // We have nothing to do.
- //
- // If we're in the GC mark phase, can safely scan and blacken objects,
- // and have work to do, run idle-time marking rather than give up the
- // P.
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
- node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
- if node != nil {
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
- gp := node.gp.ptr()
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- return gp, false
- }
- }
-
- // wasm only:
- // If a callback returned and no other goroutine is awake,
- // then wake event handler goroutine which pauses execution
- // until a callback was triggered.
- gp, otherReady := beforeIdle(now, pollUntil)
- if gp != nil {
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- return gp, false
- }
- if otherReady {
- goto top
- }
-
- // Before we drop our P, make a snapshot of the allp slice,
- // which can change underfoot once we no longer block
- // safe-points. We don't need to snapshot the contents because
- // everything up to cap(allp) is immutable.
- allpSnapshot := allp
- // Also snapshot masks. Value changes are OK, but we can't allow
- // len to change out from under us.
- idlepMaskSnapshot := idlepMask
- timerpMaskSnapshot := timerpMask
-
- // return P and block
- lock(&sched.lock)
- if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
- unlock(&sched.lock)
- goto top
- }
- if sched.runqsize != 0 {
- gp := globrunqget(_p_, 0)
- unlock(&sched.lock)
- return gp, false
- }
- if releasep() != _p_ {
- throw("findrunnable: wrong p")
- }
- pidleput(_p_)
- unlock(&sched.lock)
-
- // Delicate dance: thread transitions from spinning to non-spinning
- // state, potentially concurrently with submission of new work. We must
- // drop nmspinning first and then check all sources again (with
- // #StoreLoad memory barrier in between). If we do it the other way
- // around, another thread can submit work after we've checked all
- // sources but before we drop nmspinning; as a result nobody will
- // unpark a thread to run the work.
- //
- // This applies to the following sources of work:
- //
- // * Goroutines added to a per-P run queue.
- // * New/modified-earlier timers on a per-P timer heap.
- // * Idle-priority GC work (barring golang.org/issue/19112).
- //
- // If we discover new work below, we need to restore m.spinning as a signal
- // for resetspinning to unpark a new worker thread (because there can be more
- // than one starving goroutine). However, if after discovering new work
- // we also observe no idle Ps it is OK to skip unparking a new worker
- // thread: the system is fully loaded so no spinning threads are required.
- // Also see "Worker thread parking/unparking" comment at the top of the file.
- wasSpinning := _g_.m.spinning
- if _g_.m.spinning {
- _g_.m.spinning = false
- if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
- throw("findrunnable: negative nmspinning")
- }
-
- // Note the for correctness, only the last M transitioning from
- // spinning to non-spinning must perform these rechecks to
- // ensure no missed work. We are performing it on every M that
- // transitions as a conservative change to monitor effects on
- // latency. See golang.org/issue/43997.
-
- // Check all runqueues once again.
- _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
- if _p_ != nil {
- acquirep(_p_)
- _g_.m.spinning = true
- atomic.Xadd(&sched.nmspinning, 1)
- goto top
- }
-
- // Check for idle-priority GC work again.
- _p_, gp = checkIdleGCNoP()
- if _p_ != nil {
- acquirep(_p_)
- _g_.m.spinning = true
- atomic.Xadd(&sched.nmspinning, 1)
-
- // Run the idle worker.
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- return gp, false
- }
-
- // Finally, check for timer creation or expiry concurrently with
- // transitioning from spinning to non-spinning.
- //
- // Note that we cannot use checkTimers here because it calls
- // adjusttimers which may need to allocate memory, and that isn't
- // allowed when we don't have an active P.
- pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
- }
-
- // Poll network until next timer.
- if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
- atomic.Store64(&sched.pollUntil, uint64(pollUntil))
- if _g_.m.p != 0 {
- throw("findrunnable: netpoll with p")
- }
- if _g_.m.spinning {
- throw("findrunnable: netpoll with spinning")
- }
- delay := int64(-1)
- if pollUntil != 0 {
- if now == 0 {
- now = nanotime()
- }
- delay = pollUntil - now
- if delay < 0 {
- delay = 0
- }
- }
- if faketime != 0 {
- // When using fake time, just poll.
- delay = 0
- }
- list := netpoll(delay) // block until new work is available
- atomic.Store64(&sched.pollUntil, 0)
- atomic.Store64(&sched.lastpoll, uint64(nanotime()))
- if faketime != 0 && list.empty() {
- // Using fake time and nothing is ready; stop M.
- // When all M's stop, checkdead will call timejump.
- stopm()
- goto top
- }
- lock(&sched.lock)
- _p_ = pidleget()
- unlock(&sched.lock)
- if _p_ == nil {
- injectglist(&list)
- } else {
- acquirep(_p_)
- if !list.empty() {
- gp := list.pop()
- injectglist(&list)
- casgstatus(gp, _Gwaiting, _Grunnable)
- if trace.enabled {
- traceGoUnpark(gp, 0)
- }
- return gp, false
- }
- if wasSpinning {
- _g_.m.spinning = true
- atomic.Xadd(&sched.nmspinning, 1)
- }
- goto top
- }
- } else if pollUntil != 0 && netpollinited() {
- pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
- if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
- netpollBreak()
- }
- }
- stopm()
- goto top
-}
-
-// pollWork reports whether there is non-background work this P could
-// be doing. This is a fairly lightweight check to be used for
-// background work loops, like idle GC. It checks a subset of the
-// conditions checked by the actual scheduler.
-func pollWork() bool {
- if sched.runqsize != 0 {
- return true
- }
- p := getg().m.p.ptr()
- if !runqempty(p) {
- return true
- }
- if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
- if list := netpoll(0); !list.empty() {
- injectglist(&list)
- return true
- }
- }
- return false
-}
-
-// stealWork attempts to steal a runnable goroutine or timer from any P.
-//
-// If newWork is true, new work may have been readied.
-//
-// If now is not 0 it is the current time. stealWork returns the passed time or
-// the current time if now was passed as 0.
-func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
- pp := getg().m.p.ptr()
-
- ranTimer := false
-
- const stealTries = 4
- for i := 0; i < stealTries; i++ {
- stealTimersOrRunNextG := i == stealTries-1
-
- for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
- if sched.gcwaiting != 0 {
- // GC work may be available.
- return nil, false, now, pollUntil, true
- }
- p2 := allp[enum.position()]
- if pp == p2 {
- continue
- }
-
- // Steal timers from p2. This call to checkTimers is the only place
- // where we might hold a lock on a different P's timers. We do this
- // once on the last pass before checking runnext because stealing
- // from the other P's runnext should be the last resort, so if there
- // are timers to steal do that first.
- //
- // We only check timers on one of the stealing iterations because
- // the time stored in now doesn't change in this loop and checking
- // the timers for each P more than once with the same value of now
- // is probably a waste of time.
- //
- // timerpMask tells us whether the P may have timers at all. If it
- // can't, no need to check at all.
- if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
- tnow, w, ran := checkTimers(p2, now)
- now = tnow
- if w != 0 && (pollUntil == 0 || w < pollUntil) {
- pollUntil = w
- }
- if ran {
- // Running the timers may have
- // made an arbitrary number of G's
- // ready and added them to this P's
- // local run queue. That invalidates
- // the assumption of runqsteal
- // that it always has room to add
- // stolen G's. So check now if there
- // is a local G to run.
- if gp, inheritTime := runqget(pp); gp != nil {
- return gp, inheritTime, now, pollUntil, ranTimer
- }
- ranTimer = true
- }
- }
-
- // Don't bother to attempt to steal if p2 is idle.
- if !idlepMask.read(enum.position()) {
- if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
- return gp, false, now, pollUntil, ranTimer
- }
- }
- }
- }
-
- // No goroutines found to steal. Regardless, running a timer may have
- // made some goroutine ready that we missed. Indicate the next timer to
- // wait for.
- return nil, false, now, pollUntil, ranTimer
-}
-
-// Check all Ps for a runnable G to steal.
-//
-// On entry we have no P. If a G is available to steal and a P is available,
-// the P is returned which the caller should acquire and attempt to steal the
-// work to.
-func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
- for id, p2 := range allpSnapshot {
- if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
- lock(&sched.lock)
- pp := pidleget()
- unlock(&sched.lock)
- if pp != nil {
- return pp
- }
-
- // Can't get a P, don't bother checking remaining Ps.
- break
- }
- }
-
- return nil
-}
-
-// Check all Ps for a timer expiring sooner than pollUntil.
-//
-// Returns updated pollUntil value.
-func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
- for id, p2 := range allpSnapshot {
- if timerpMaskSnapshot.read(uint32(id)) {
- w := nobarrierWakeTime(p2)
- if w != 0 && (pollUntil == 0 || w < pollUntil) {
- pollUntil = w
- }
- }
- }
-
- return pollUntil
-}
-
-// Check for idle-priority GC, without a P on entry.
-//
-// If some GC work, a P, and a worker G are all available, the P and G will be
-// returned. The returned P has not been wired yet.
-func checkIdleGCNoP() (*p, *g) {
- // N.B. Since we have no P, gcBlackenEnabled may change at any time; we
- // must check again after acquiring a P.
- if atomic.Load(&gcBlackenEnabled) == 0 {
- return nil, nil
- }
- if !gcMarkWorkAvailable(nil) {
- return nil, nil
- }
-
- // Work is available; we can start an idle GC worker only if there is
- // an available P and available worker G.
- //
- // We can attempt to acquire these in either order, though both have
- // synchronization concerns (see below). Workers are almost always
- // available (see comment in findRunnableGCWorker for the one case
- // there may be none). Since we're slightly less likely to find a P,
- // check for that first.
- //
- // Synchronization: note that we must hold sched.lock until we are
- // committed to keeping it. Otherwise we cannot put the unnecessary P
- // back in sched.pidle without performing the full set of idle
- // transition checks.
- //
- // If we were to check gcBgMarkWorkerPool first, we must somehow handle
- // the assumption in gcControllerState.findRunnableGCWorker that an
- // empty gcBgMarkWorkerPool is only possible if gcMarkDone is running.
- lock(&sched.lock)
- pp := pidleget()
- if pp == nil {
- unlock(&sched.lock)
- return nil, nil
- }
-
- // Now that we own a P, gcBlackenEnabled can't change (as it requires
- // STW).
- if gcBlackenEnabled == 0 {
- pidleput(pp)
- unlock(&sched.lock)
- return nil, nil
- }
-
- node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
- if node == nil {
- pidleput(pp)
- unlock(&sched.lock)
- return nil, nil
- }
-
- unlock(&sched.lock)
-
- return pp, node.gp.ptr()
-}
-
-// wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
-// going to wake up before the when argument; or it wakes an idle P to service
-// timers and the network poller if there isn't one already.
-func wakeNetPoller(when int64) {
- if atomic.Load64(&sched.lastpoll) == 0 {
- // In findrunnable we ensure that when polling the pollUntil
- // field is either zero or the time to which the current
- // poll is expected to run. This can have a spurious wakeup
- // but should never miss a wakeup.
- pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
- if pollerPollUntil == 0 || pollerPollUntil > when {
- netpollBreak()
- }
- } else {
- // There are no threads in the network poller, try to get
- // one there so it can handle new timers.
- if GOOS != "plan9" { // Temporary workaround - see issue #42303.
- wakep()
- }
- }
-}
-
-func resetspinning() {
- _g_ := getg()
- if !_g_.m.spinning {
- throw("resetspinning: not a spinning m")
- }
- _g_.m.spinning = false
- nmspinning := atomic.Xadd(&sched.nmspinning, -1)
- if int32(nmspinning) < 0 {
- throw("findrunnable: negative nmspinning")
- }
- // M wakeup policy is deliberately somewhat conservative, so check if we
- // need to wakeup another P here. See "Worker thread parking/unparking"
- // comment at the top of the file for details.
- wakep()
-}
-
-// injectglist adds each runnable G on the list to some run queue,
-// and clears glist. If there is no current P, they are added to the
-// global queue, and up to npidle M's are started to run them.
-// Otherwise, for each idle P, this adds a G to the global queue
-// and starts an M. Any remaining G's are added to the current P's
-// local run queue.
-// This may temporarily acquire sched.lock.
-// Can run concurrently with GC.
-func injectglist(glist *gList) {
- if glist.empty() {
- return
- }
- if trace.enabled {
- for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
- traceGoUnpark(gp, 0)
- }
- }
-
- // Mark all the goroutines as runnable before we put them
- // on the run queues.
- head := glist.head.ptr()
- var tail *g
- qsize := 0
- for gp := head; gp != nil; gp = gp.schedlink.ptr() {
- tail = gp
- qsize++
- casgstatus(gp, _Gwaiting, _Grunnable)
- }
-
- // Turn the gList into a gQueue.
- var q gQueue
- q.head.set(head)
- q.tail.set(tail)
- *glist = gList{}
-
- startIdle := func(n int) {
- for ; n != 0 && sched.npidle != 0; n-- {
- startm(nil, false)
- }
- }
-
- pp := getg().m.p.ptr()
- if pp == nil {
- lock(&sched.lock)
- globrunqputbatch(&q, int32(qsize))
- unlock(&sched.lock)
- startIdle(qsize)
- return
- }
-
- npidle := int(atomic.Load(&sched.npidle))
- var globq gQueue
- var n int
- for n = 0; n < npidle && !q.empty(); n++ {
- g := q.pop()
- globq.pushBack(g)
- }
- if n > 0 {
- lock(&sched.lock)
- globrunqputbatch(&globq, int32(n))
- unlock(&sched.lock)
- startIdle(n)
- qsize -= n
- }
-
- if !q.empty() {
- runqputbatch(pp, &q, qsize)
- }
-}
-
-// One round of scheduler: find a runnable goroutine and execute it.
-// Never returns.
-func schedule() {
- _g_ := getg()
-
- if _g_.m.locks != 0 {
- throw("schedule: holding locks")
- }
-
- if _g_.m.lockedg != 0 {
- stoplockedm()
- execute(_g_.m.lockedg.ptr(), false) // Never returns.
- }
-
- // We should not schedule away from a g that is executing a cgo call,
- // since the cgo call is using the m's g0 stack.
- if _g_.m.incgo {
- throw("schedule: in cgo")
- }
-
-top:
- pp := _g_.m.p.ptr()
- pp.preempt = false
-
- if sched.gcwaiting != 0 {
- gcstopm()
- goto top
- }
- if pp.runSafePointFn != 0 {
- runSafePointFn()
- }
-
- // Sanity check: if we are spinning, the run queue should be empty.
- // Check this before calling checkTimers, as that might call
- // goready to put a ready goroutine on the local run queue.
- if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
- throw("schedule: spinning with local work")
- }
-
- checkTimers(pp, 0)
-
- var gp *g
- var inheritTime bool
-
- // Normal goroutines will check for need to wakeP in ready,
- // but GCworkers and tracereaders will not, so the check must
- // be done here instead.
- tryWakeP := false
- if trace.enabled || trace.shutdown {
- gp = traceReader()
- if gp != nil {
- casgstatus(gp, _Gwaiting, _Grunnable)
- traceGoUnpark(gp, 0)
- tryWakeP = true
- }
- }
- if gp == nil && gcBlackenEnabled != 0 {
- gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
- if gp != nil {
- tryWakeP = true
- }
- }
- if gp == nil {
- // Check the global runnable queue once in a while to ensure fairness.
- // Otherwise two goroutines can completely occupy the local runqueue
- // by constantly respawning each other.
- if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
- lock(&sched.lock)
- gp = globrunqget(_g_.m.p.ptr(), 1)
- unlock(&sched.lock)
- }
- }
- if gp == nil {
- gp, inheritTime = runqget(_g_.m.p.ptr())
- // We can see gp != nil here even if the M is spinning,
- // if checkTimers added a local goroutine via goready.
- }
- if gp == nil {
- gp, inheritTime = findrunnable() // blocks until work is available
- }
-
- // This thread is going to run a goroutine and is not spinning anymore,
- // so if it was marked as spinning we need to reset it now and potentially
- // start a new spinning M.
- if _g_.m.spinning {
- resetspinning()
- }
-
- if sched.disable.user && !schedEnabled(gp) {
- // Scheduling of this goroutine is disabled. Put it on
- // the list of pending runnable goroutines for when we
- // re-enable user scheduling and look again.
- lock(&sched.lock)
- if schedEnabled(gp) {
- // Something re-enabled scheduling while we
- // were acquiring the lock.
- unlock(&sched.lock)
- } else {
- sched.disable.runnable.pushBack(gp)
- sched.disable.n++
- unlock(&sched.lock)
- goto top
- }
- }
-
- // If about to schedule a not-normal goroutine (a GCworker or tracereader),
- // wake a P if there is one.
- if tryWakeP {
- wakep()
- }
- if gp.lockedm != 0 {
- // Hands off own p to the locked m,
- // then blocks waiting for a new p.
- startlockedm(gp)
- goto top
- }
-
- execute(gp, inheritTime)
-}
-
-// dropg removes the association between m and the current goroutine m->curg (gp for short).
-// Typically a caller sets gp's status away from Grunning and then
-// immediately calls dropg to finish the job. The caller is also responsible
-// for arranging that gp will be restarted using ready at an
-// appropriate time. After calling dropg and arranging for gp to be
-// readied later, the caller can do other work but eventually should
-// call schedule to restart the scheduling of goroutines on this m.
-func dropg() {
- _g_ := getg()
-
- setMNoWB(&_g_.m.curg.m, nil)
- setGNoWB(&_g_.m.curg, nil)
-}
-
-// checkTimers runs any timers for the P that are ready.
-// If now is not 0 it is the current time.
-// It returns the passed time or the current time if now was passed as 0.
-// and the time when the next timer should run or 0 if there is no next timer,
-// and reports whether it ran any timers.
-// If the time when the next timer should run is not 0,
-// it is always larger than the returned time.
-// We pass now in and out to avoid extra calls of nanotime.
-//go:yeswritebarrierrec
-func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
- // If it's not yet time for the first timer, or the first adjusted
- // timer, then there is nothing to do.
- next := int64(atomic.Load64(&pp.timer0When))
- nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
- if next == 0 || (nextAdj != 0 && nextAdj < next) {
- next = nextAdj
- }
-
- if next == 0 {
- // No timers to run or adjust.
- return now, 0, false
- }
-
- if now == 0 {
- now = nanotime()
- }
- if now < next {
- // Next timer is not ready to run, but keep going
- // if we would clear deleted timers.
- // This corresponds to the condition below where
- // we decide whether to call clearDeletedTimers.
- if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
- return now, next, false
- }
- }
-
- lock(&pp.timersLock)
-
- if len(pp.timers) > 0 {
- adjusttimers(pp, now)
- for len(pp.timers) > 0 {
- // Note that runtimer may temporarily unlock
- // pp.timersLock.
- if tw := runtimer(pp, now); tw != 0 {
- if tw > 0 {
- pollUntil = tw
- }
- break
- }
- ran = true
- }
- }
-
- // If this is the local P, and there are a lot of deleted timers,
- // clear them out. We only do this for the local P to reduce
- // lock contention on timersLock.
- if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
- clearDeletedTimers(pp)
- }
-
- unlock(&pp.timersLock)
-
- return now, pollUntil, ran
-}
-
-func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
- unlock((*mutex)(lock))
- return true
-}
-
-// park continuation on g0.
-func park_m(gp *g) {
- _g_ := getg()
-
- if trace.enabled {
- traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
- }
-
- casgstatus(gp, _Grunning, _Gwaiting)
- dropg()
-
- if fn := _g_.m.waitunlockf; fn != nil {
- ok := fn(gp, _g_.m.waitlock)
- _g_.m.waitunlockf = nil
- _g_.m.waitlock = nil
- if !ok {
- if trace.enabled {
- traceGoUnpark(gp, 2)
- }
- casgstatus(gp, _Gwaiting, _Grunnable)
- execute(gp, true) // Schedule it back, never returns.
- }
- }
- schedule()
-}
-
-func goschedImpl(gp *g) {
- status := readgstatus(gp)
- if status&^_Gscan != _Grunning {
- dumpgstatus(gp)
- throw("bad g status")
- }
- casgstatus(gp, _Grunning, _Grunnable)
- dropg()
- lock(&sched.lock)
- globrunqput(gp)
- unlock(&sched.lock)
-
- schedule()
-}
-
-// Gosched continuation on g0.
-func gosched_m(gp *g) {
- if trace.enabled {
- traceGoSched()
- }
- goschedImpl(gp)
-}
-
-// goschedguarded is a forbidden-states-avoided version of gosched_m
-func goschedguarded_m(gp *g) {
-
- if !canPreemptM(gp.m) {
- gogo(&gp.sched) // never return
- }
-
- if trace.enabled {
- traceGoSched()
- }
- goschedImpl(gp)
-}
-
-func gopreempt_m(gp *g) {
- if trace.enabled {
- traceGoPreempt()
- }
- goschedImpl(gp)
-}
-
-// preemptPark parks gp and puts it in _Gpreempted.
-//
-//go:systemstack
-func preemptPark(gp *g) {
- if trace.enabled {
- traceGoPark(traceEvGoBlock, 0)
- }
- status := readgstatus(gp)
- if status&^_Gscan != _Grunning {
- dumpgstatus(gp)
- throw("bad g status")
- }
- gp.waitreason = waitReasonPreempted
-
- if gp.asyncSafePoint {
- // Double-check that async preemption does not
- // happen in SPWRITE assembly functions.
- // isAsyncSafePoint must exclude this case.
- f := findfunc(gp.sched.pc)
- if !f.valid() {
- throw("preempt at unknown pc")
- }
- if f.flag&funcFlag_SPWRITE != 0 {
- println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
- throw("preempt SPWRITE")
- }
- }
-
- // Transition from _Grunning to _Gscan|_Gpreempted. We can't
- // be in _Grunning when we dropg because then we'd be running
- // without an M, but the moment we're in _Gpreempted,
- // something could claim this G before we've fully cleaned it
- // up. Hence, we set the scan bit to lock down further
- // transitions until we can dropg.
- casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
- dropg()
- casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
- schedule()
-}
-
-// goyield is like Gosched, but it:
-// - emits a GoPreempt trace event instead of a GoSched trace event
-// - puts the current G on the runq of the current P instead of the globrunq
-func goyield() {
- checkTimeouts()
- mcall(goyield_m)
-}
-
-func goyield_m(gp *g) {
- if trace.enabled {
- traceGoPreempt()
- }
- pp := gp.m.p.ptr()
- casgstatus(gp, _Grunning, _Grunnable)
- dropg()
- runqput(pp, gp, false)
- schedule()
-}
-
-// Finishes execution of the current goroutine.
-func goexit1() {
- if raceenabled {
- racegoend()
- }
- if trace.enabled {
- traceGoEnd()
- }
- mcall(goexit0)
-}
-
-// goexit continuation on g0.
-func goexit0(gp *g) {
- _g_ := getg()
- _p_ := _g_.m.p.ptr()
-
- casgstatus(gp, _Grunning, _Gdead)
- gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
- if isSystemGoroutine(gp, false) {
- atomic.Xadd(&sched.ngsys, -1)
- }
- gp.m = nil
- locked := gp.lockedm != 0
- gp.lockedm = 0
- _g_.m.lockedg = 0
- gp.preemptStop = false
- gp.paniconfault = false
- gp._defer = nil // should be true already but just in case.
- gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
- gp.writebuf = nil
- gp.waitreason = 0
- gp.param = nil
- gp.labels = nil
- gp.timer = nil
-
- if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
- // Flush assist credit to the global pool. This gives
- // better information to pacing if the application is
- // rapidly creating an exiting goroutines.
- assistWorkPerByte := gcController.assistWorkPerByte.Load()
- scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
- atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
- gp.gcAssistBytes = 0
- }
-
- dropg()
-
- if GOARCH == "wasm" { // no threads yet on wasm
- gfput(_p_, gp)
- schedule() // never returns
- }
-
- if _g_.m.lockedInt != 0 {
- print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
- throw("internal lockOSThread error")
- }
- gfput(_p_, gp)
- if locked {
- // The goroutine may have locked this thread because
- // it put it in an unusual kernel state. Kill it
- // rather than returning it to the thread pool.
-
- // Return to mstart, which will release the P and exit
- // the thread.
- if GOOS != "plan9" { // See golang.org/issue/22227.
- gogo(&_g_.m.g0.sched)
- } else {
- // Clear lockedExt on plan9 since we may end up re-using
- // this thread.
- _g_.m.lockedExt = 0
- }
- }
- schedule()
-}
-
-// save updates getg().sched to refer to pc and sp so that a following
-// gogo will restore pc and sp.
-//
-// save must not have write barriers because invoking a write barrier
-// can clobber getg().sched.
-//
-//go:nosplit
-//go:nowritebarrierrec
-func save(pc, sp uintptr) {
- _g_ := getg()
-
- if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
- // m.g0.sched is special and must describe the context
- // for exiting the thread. mstart1 writes to it directly.
- // m.gsignal.sched should not be used at all.
- // This check makes sure save calls do not accidentally
- // run in contexts where they'd write to system g's.
- throw("save on system g not allowed")
- }
-
- _g_.sched.pc = pc
- _g_.sched.sp = sp
- _g_.sched.lr = 0
- _g_.sched.ret = 0
- // We need to ensure ctxt is zero, but can't have a write
- // barrier here. However, it should always already be zero.
- // Assert that.
- if _g_.sched.ctxt != nil {
- badctxt()
- }
-}
-
-// The goroutine g is about to enter a system call.
-// Record that it's not using the cpu anymore.
-// This is called only from the go syscall library and cgocall,
-// not from the low-level system calls used by the runtime.
-//
-// Entersyscall cannot split the stack: the save must
-// make g->sched refer to the caller's stack segment, because
-// entersyscall is going to return immediately after.
-//
-// Nothing entersyscall calls can split the stack either.
-// We cannot safely move the stack during an active call to syscall,
-// because we do not know which of the uintptr arguments are
-// really pointers (back into the stack).
-// In practice, this means that we make the fast path run through
-// entersyscall doing no-split things, and the slow path has to use systemstack
-// to run bigger things on the system stack.
-//
-// reentersyscall is the entry point used by cgo callbacks, where explicitly
-// saved SP and PC are restored. This is needed when exitsyscall will be called
-// from a function further up in the call stack than the parent, as g->syscallsp
-// must always point to a valid stack frame. entersyscall below is the normal
-// entry point for syscalls, which obtains the SP and PC from the caller.
-//
-// Syscall tracing:
-// At the start of a syscall we emit traceGoSysCall to capture the stack trace.
-// If the syscall does not block, that is it, we do not emit any other events.
-// If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
-// when syscall returns we emit traceGoSysExit and when the goroutine starts running
-// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
-// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
-// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
-// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
-// and we wait for the increment before emitting traceGoSysExit.
-// Note that the increment is done even if tracing is not enabled,
-// because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
-//
-//go:nosplit
-func reentersyscall(pc, sp uintptr) {
- _g_ := getg()
-
- // Disable preemption because during this function g is in Gsyscall status,
- // but can have inconsistent g->sched, do not let GC observe it.
- _g_.m.locks++
-
- // Entersyscall must not call any function that might split/grow the stack.
- // (See details in comment above.)
- // Catch calls that might, by replacing the stack guard with something that
- // will trip any stack check and leaving a flag to tell newstack to die.
- _g_.stackguard0 = stackPreempt
- _g_.throwsplit = true
-
- // Leave SP around for GC and traceback.
- save(pc, sp)
- _g_.syscallsp = sp
- _g_.syscallpc = pc
- casgstatus(_g_, _Grunning, _Gsyscall)
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
- systemstack(func() {
- print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
- throw("entersyscall")
- })
- }
-
- if trace.enabled {
- systemstack(traceGoSysCall)
- // systemstack itself clobbers g.sched.{pc,sp} and we might
- // need them later when the G is genuinely blocked in a
- // syscall
- save(pc, sp)
- }
-
- if atomic.Load(&sched.sysmonwait) != 0 {
- systemstack(entersyscall_sysmon)
- save(pc, sp)
- }
-
- if _g_.m.p.ptr().runSafePointFn != 0 {
- // runSafePointFn may stack split if run on this stack
- systemstack(runSafePointFn)
- save(pc, sp)
- }
-
- _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
- _g_.sysblocktraced = true
- pp := _g_.m.p.ptr()
- pp.m = 0
- _g_.m.oldp.set(pp)
- _g_.m.p = 0
- atomic.Store(&pp.status, _Psyscall)
- if sched.gcwaiting != 0 {
- systemstack(entersyscall_gcwait)
- save(pc, sp)
- }
-
- _g_.m.locks--
-}
-
-// Standard syscall entry used by the go syscall library and normal cgo calls.
-//
-// This is exported via linkname to assembly in the syscall package.
-//
-//go:nosplit
-//go:linkname entersyscall
-func entersyscall() {
- reentersyscall(getcallerpc(), getcallersp())
-}
-
-func entersyscall_sysmon() {
- lock(&sched.lock)
- if atomic.Load(&sched.sysmonwait) != 0 {
- atomic.Store(&sched.sysmonwait, 0)
- notewakeup(&sched.sysmonnote)
- }
- unlock(&sched.lock)
-}
-
-func entersyscall_gcwait() {
- _g_ := getg()
- _p_ := _g_.m.oldp.ptr()
-
- lock(&sched.lock)
- if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
- if trace.enabled {
- traceGoSysBlock(_p_)
- traceProcStop(_p_)
- }
- _p_.syscalltick++
- if sched.stopwait--; sched.stopwait == 0 {
- notewakeup(&sched.stopnote)
- }
- }
- unlock(&sched.lock)
-}
-
-// The same as entersyscall(), but with a hint that the syscall is blocking.
-//go:nosplit
-func entersyscallblock() {
- _g_ := getg()
-
- _g_.m.locks++ // see comment in entersyscall
- _g_.throwsplit = true
- _g_.stackguard0 = stackPreempt // see comment in entersyscall
- _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
- _g_.sysblocktraced = true
- _g_.m.p.ptr().syscalltick++
-
- // Leave SP around for GC and traceback.
- pc := getcallerpc()
- sp := getcallersp()
- save(pc, sp)
- _g_.syscallsp = _g_.sched.sp
- _g_.syscallpc = _g_.sched.pc
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
- sp1 := sp
- sp2 := _g_.sched.sp
- sp3 := _g_.syscallsp
- systemstack(func() {
- print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
- throw("entersyscallblock")
- })
- }
- casgstatus(_g_, _Grunning, _Gsyscall)
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
- systemstack(func() {
- print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
- throw("entersyscallblock")
- })
- }
-
- systemstack(entersyscallblock_handoff)
-
- // Resave for traceback during blocked call.
- save(getcallerpc(), getcallersp())
-
- _g_.m.locks--
-}
-
-func entersyscallblock_handoff() {
- if trace.enabled {
- traceGoSysCall()
- traceGoSysBlock(getg().m.p.ptr())
- }
- handoffp(releasep())
-}
-
-// The goroutine g exited its system call.
-// Arrange for it to run on a cpu again.
-// This is called only from the go syscall library, not
-// from the low-level system calls used by the runtime.
-//
-// Write barriers are not allowed because our P may have been stolen.
-//
-// This is exported via linkname to assembly in the syscall package.
-//
-//go:nosplit
-//go:nowritebarrierrec
-//go:linkname exitsyscall
-func exitsyscall() {
- _g_ := getg()
-
- _g_.m.locks++ // see comment in entersyscall
- if getcallersp() > _g_.syscallsp {
- throw("exitsyscall: syscall frame is no longer valid")
- }
-
- _g_.waitsince = 0
- oldp := _g_.m.oldp.ptr()
- _g_.m.oldp = 0
- if exitsyscallfast(oldp) {
- if trace.enabled {
- if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
- systemstack(traceGoStart)
- }
- }
- // There's a cpu for us, so we can run.
- _g_.m.p.ptr().syscalltick++
- // We need to cas the status and scan before resuming...
- casgstatus(_g_, _Gsyscall, _Grunning)
-
- // Garbage collector isn't running (since we are),
- // so okay to clear syscallsp.
- _g_.syscallsp = 0
- _g_.m.locks--
- if _g_.preempt {
- // restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
- } else {
- // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
- }
- _g_.throwsplit = false
-
- if sched.disable.user && !schedEnabled(_g_) {
- // Scheduling of this goroutine is disabled.
- Gosched()
- }
-
- return
- }
-
- _g_.sysexitticks = 0
- if trace.enabled {
- // Wait till traceGoSysBlock event is emitted.
- // This ensures consistency of the trace (the goroutine is started after it is blocked).
- for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
- osyield()
- }
- // We can't trace syscall exit right now because we don't have a P.
- // Tracing code can invoke write barriers that cannot run without a P.
- // So instead we remember the syscall exit time and emit the event
- // in execute when we have a P.
- _g_.sysexitticks = cputicks()
- }
-
- _g_.m.locks--
-
- // Call the scheduler.
- mcall(exitsyscall0)
-
- // Scheduler returned, so we're allowed to run now.
- // Delete the syscallsp information that we left for
- // the garbage collector during the system call.
- // Must wait until now because until gosched returns
- // we don't know for sure that the garbage collector
- // is not running.
- _g_.syscallsp = 0
- _g_.m.p.ptr().syscalltick++
- _g_.throwsplit = false
-}
-
-//go:nosplit
-func exitsyscallfast(oldp *p) bool {
- _g_ := getg()
-
- // Freezetheworld sets stopwait but does not retake P's.
- if sched.stopwait == freezeStopWait {
- return false
- }
-
- // Try to re-acquire the last P.
- if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
- // There's a cpu for us, so we can run.
- wirep(oldp)
- exitsyscallfast_reacquired()
- return true
- }
-
- // Try to get any other idle P.
- if sched.pidle != 0 {
- var ok bool
- systemstack(func() {
- ok = exitsyscallfast_pidle()
- if ok && trace.enabled {
- if oldp != nil {
- // Wait till traceGoSysBlock event is emitted.
- // This ensures consistency of the trace (the goroutine is started after it is blocked).
- for oldp.syscalltick == _g_.m.syscalltick {
- osyield()
- }
- }
- traceGoSysExit(0)
- }
- })
- if ok {
- return true
- }
- }
- return false
-}
-
-// exitsyscallfast_reacquired is the exitsyscall path on which this G
-// has successfully reacquired the P it was running on before the
-// syscall.
-//
-//go:nosplit
-func exitsyscallfast_reacquired() {
- _g_ := getg()
- if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
- if trace.enabled {
- // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
- // traceGoSysBlock for this syscall was already emitted,
- // but here we effectively retake the p from the new syscall running on the same p.
- systemstack(func() {
- // Denote blocking of the new syscall.
- traceGoSysBlock(_g_.m.p.ptr())
- // Denote completion of the current syscall.
- traceGoSysExit(0)
- })
- }
- _g_.m.p.ptr().syscalltick++
- }
-}
-
-func exitsyscallfast_pidle() bool {
- lock(&sched.lock)
- _p_ := pidleget()
- if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
- atomic.Store(&sched.sysmonwait, 0)
- notewakeup(&sched.sysmonnote)
- }
- unlock(&sched.lock)
- if _p_ != nil {
- acquirep(_p_)
- return true
- }
- return false
-}
-
-// exitsyscall slow path on g0.
-// Failed to acquire P, enqueue gp as runnable.
-//
-// Called via mcall, so gp is the calling g from this M.
-//
-//go:nowritebarrierrec
-func exitsyscall0(gp *g) {
- casgstatus(gp, _Gsyscall, _Grunnable)
- dropg()
- lock(&sched.lock)
- var _p_ *p
- if schedEnabled(gp) {
- _p_ = pidleget()
- }
- var locked bool
- if _p_ == nil {
- globrunqput(gp)
-
- // Below, we stoplockedm if gp is locked. globrunqput releases
- // ownership of gp, so we must check if gp is locked prior to
- // committing the release by unlocking sched.lock, otherwise we
- // could race with another M transitioning gp from unlocked to
- // locked.
- locked = gp.lockedm != 0
- } else if atomic.Load(&sched.sysmonwait) != 0 {
- atomic.Store(&sched.sysmonwait, 0)
- notewakeup(&sched.sysmonnote)
- }
- unlock(&sched.lock)
- if _p_ != nil {
- acquirep(_p_)
- execute(gp, false) // Never returns.
- }
- if locked {
- // Wait until another thread schedules gp and so m again.
- //
- // N.B. lockedm must be this M, as this g was running on this M
- // before entersyscall.
- stoplockedm()
- execute(gp, false) // Never returns.
- }
- stopm()
- schedule() // Never returns.
-}
-
-// Called from syscall package before fork.
-//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
-//go:nosplit
-func syscall_runtime_BeforeFork() {
- gp := getg().m.curg
-
- // Block signals during a fork, so that the child does not run
- // a signal handler before exec if a signal is sent to the process
- // group. See issue #18600.
- gp.m.locks++
- sigsave(&gp.m.sigmask)
- sigblock(false)
-
- // This function is called before fork in syscall package.
- // Code between fork and exec must not allocate memory nor even try to grow stack.
- // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
- // runtime_AfterFork will undo this in parent process, but not in child.
- gp.stackguard0 = stackFork
-}
-
-// Called from syscall package after fork in parent.
-//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
-//go:nosplit
-func syscall_runtime_AfterFork() {
- gp := getg().m.curg
-
- // See the comments in beforefork.
- gp.stackguard0 = gp.stack.lo + _StackGuard
-
- msigrestore(gp.m.sigmask)
-
- gp.m.locks--
-}
-
-// inForkedChild is true while manipulating signals in the child process.
-// This is used to avoid calling libc functions in case we are using vfork.
-var inForkedChild bool
-
-// Called from syscall package after fork in child.
-// It resets non-sigignored signals to the default handler, and
-// restores the signal mask in preparation for the exec.
-//
-// Because this might be called during a vfork, and therefore may be
-// temporarily sharing address space with the parent process, this must
-// not change any global variables or calling into C code that may do so.
-//
-//go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
-//go:nosplit
-//go:nowritebarrierrec
-func syscall_runtime_AfterForkInChild() {
- // It's OK to change the global variable inForkedChild here
- // because we are going to change it back. There is no race here,
- // because if we are sharing address space with the parent process,
- // then the parent process can not be running concurrently.
- inForkedChild = true
-
- clearSignalHandlers()
-
- // When we are the child we are the only thread running,
- // so we know that nothing else has changed gp.m.sigmask.
- msigrestore(getg().m.sigmask)
-
- inForkedChild = false
-}
-
-// pendingPreemptSignals is the number of preemption signals
-// that have been sent but not received. This is only used on Darwin.
-// For #41702.
-var pendingPreemptSignals uint32
-
-// Called from syscall package before Exec.
-//go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
-func syscall_runtime_BeforeExec() {
- // Prevent thread creation during exec.
- execLock.lock()
-
- // On Darwin, wait for all pending preemption signals to
- // be received. See issue #41702.
- if GOOS == "darwin" || GOOS == "ios" {
- for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
- osyield()
- }
- }
-}
-
-// Called from syscall package after Exec.
-//go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
-func syscall_runtime_AfterExec() {
- execLock.unlock()
-}
-
-// Allocate a new g, with a stack big enough for stacksize bytes.
-func malg(stacksize int32) *g {
- newg := new(g)
- if stacksize >= 0 {
- stacksize = round2(_StackSystem + stacksize)
- systemstack(func() {
- newg.stack = stackalloc(uint32(stacksize))
- })
- newg.stackguard0 = newg.stack.lo + _StackGuard
- newg.stackguard1 = ^uintptr(0)
- // Clear the bottom word of the stack. We record g
- // there on gsignal stack during VDSO on ARM and ARM64.
- *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
- }
- return newg
-}
-
-// Create a new g running fn.
-// Put it on the queue of g's waiting to run.
-// The compiler turns a go statement into a call to this.
-func newproc(fn *funcval) {
- gp := getg()
- pc := getcallerpc()
- systemstack(func() {
- newg := newproc1(fn, gp, pc)
-
- _p_ := getg().m.p.ptr()
- runqput(_p_, newg, true)
-
- if mainStarted {
- wakep()
- }
- })
-}
-
-// Create a new g in state _Grunnable, starting at fn. callerpc is the
-// address of the go statement that created this. The caller is responsible
-// for adding the new g to the scheduler.
-func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
- _g_ := getg()
-
- if fn == nil {
- _g_.m.throwing = -1 // do not dump full stacks
- throw("go of nil func value")
- }
- acquirem() // disable preemption because it can be holding p in a local var
-
- _p_ := _g_.m.p.ptr()
- newg := gfget(_p_)
- if newg == nil {
- newg = malg(_StackMin)
- casgstatus(newg, _Gidle, _Gdead)
- allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
- }
- if newg.stack.hi == 0 {
- throw("newproc1: newg missing stack")
- }
-
- if readgstatus(newg) != _Gdead {
- throw("newproc1: new g is not Gdead")
- }
-
- totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
- totalSize = alignUp(totalSize, sys.StackAlign)
- sp := newg.stack.hi - totalSize
- spArg := sp
- if usesLR {
- // caller's LR
- *(*uintptr)(unsafe.Pointer(sp)) = 0
- prepGoExitFrame(sp)
- spArg += sys.MinFrameSize
- }
-
- memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
- newg.sched.sp = sp
- newg.stktopsp = sp
- newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
- newg.sched.g = guintptr(unsafe.Pointer(newg))
- gostartcallfn(&newg.sched, fn)
- newg.gopc = callerpc
- newg.ancestors = saveAncestors(callergp)
- newg.startpc = fn.fn
- if isSystemGoroutine(newg, false) {
- atomic.Xadd(&sched.ngsys, +1)
- } else {
- // Only user goroutines inherit pprof labels.
- if _g_.m.curg != nil {
- newg.labels = _g_.m.curg.labels
- }
- }
- // Track initial transition?
- newg.trackingSeq = uint8(fastrand())
- if newg.trackingSeq%gTrackingPeriod == 0 {
- newg.tracking = true
- }
- casgstatus(newg, _Gdead, _Grunnable)
- gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
-
- if _p_.goidcache == _p_.goidcacheend {
- // Sched.goidgen is the last allocated id,
- // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
- // At startup sched.goidgen=0, so main goroutine receives goid=1.
- _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
- _p_.goidcache -= _GoidCacheBatch - 1
- _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
- }
- newg.goid = int64(_p_.goidcache)
- _p_.goidcache++
- if raceenabled {
- newg.racectx = racegostart(callerpc)
- }
- if trace.enabled {
- traceGoCreate(newg, newg.startpc)
- }
- releasem(_g_.m)
-
- return newg
-}
-
-// saveAncestors copies previous ancestors of the given caller g and
-// includes infor for the current caller into a new set of tracebacks for
-// a g being created.
-func saveAncestors(callergp *g) *[]ancestorInfo {
- // Copy all prior info, except for the root goroutine (goid 0).
- if debug.tracebackancestors <= 0 || callergp.goid == 0 {
- return nil
- }
- var callerAncestors []ancestorInfo
- if callergp.ancestors != nil {
- callerAncestors = *callergp.ancestors
- }
- n := int32(len(callerAncestors)) + 1
- if n > debug.tracebackancestors {
- n = debug.tracebackancestors
- }
- ancestors := make([]ancestorInfo, n)
- copy(ancestors[1:], callerAncestors)
-
- var pcs [_TracebackMaxFrames]uintptr
- npcs := gcallers(callergp, 0, pcs[:])
- ipcs := make([]uintptr, npcs)
- copy(ipcs, pcs[:])
- ancestors[0] = ancestorInfo{
- pcs: ipcs,
- goid: callergp.goid,
- gopc: callergp.gopc,
- }
-
- ancestorsp := new([]ancestorInfo)
- *ancestorsp = ancestors
- return ancestorsp
-}
-
-// Put on gfree list.
-// If local list is too long, transfer a batch to the global list.
-func gfput(_p_ *p, gp *g) {
- if readgstatus(gp) != _Gdead {
- throw("gfput: bad status (not Gdead)")
- }
-
- stksize := gp.stack.hi - gp.stack.lo
-
- if stksize != _FixedStack {
- // non-standard stack size - free it.
- stackfree(gp.stack)
- gp.stack.lo = 0
- gp.stack.hi = 0
- gp.stackguard0 = 0
- }
-
- _p_.gFree.push(gp)
- _p_.gFree.n++
- if _p_.gFree.n >= 64 {
- var (
- inc int32
- stackQ gQueue
- noStackQ gQueue
- )
- for _p_.gFree.n >= 32 {
- gp = _p_.gFree.pop()
- _p_.gFree.n--
- if gp.stack.lo == 0 {
- noStackQ.push(gp)
- } else {
- stackQ.push(gp)
- }
- inc++
- }
- lock(&sched.gFree.lock)
- sched.gFree.noStack.pushAll(noStackQ)
- sched.gFree.stack.pushAll(stackQ)
- sched.gFree.n += inc
- unlock(&sched.gFree.lock)
- }
-}
-
-// Get from gfree list.
-// If local list is empty, grab a batch from global list.
-func gfget(_p_ *p) *g {
-retry:
- if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
- lock(&sched.gFree.lock)
- // Move a batch of free Gs to the P.
- for _p_.gFree.n < 32 {
- // Prefer Gs with stacks.
- gp := sched.gFree.stack.pop()
- if gp == nil {
- gp = sched.gFree.noStack.pop()
- if gp == nil {
- break
- }
- }
- sched.gFree.n--
- _p_.gFree.push(gp)
- _p_.gFree.n++
- }
- unlock(&sched.gFree.lock)
- goto retry
- }
- gp := _p_.gFree.pop()
- if gp == nil {
- return nil
- }
- _p_.gFree.n--
- if gp.stack.lo == 0 {
- // Stack was deallocated in gfput. Allocate a new one.
- systemstack(func() {
- gp.stack = stackalloc(_FixedStack)
- })
- gp.stackguard0 = gp.stack.lo + _StackGuard
- } else {
- if raceenabled {
- racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
- }
- if msanenabled {
- msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
- }
- if asanenabled {
- asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
- }
- }
- return gp
-}
-
-// Purge all cached G's from gfree list to the global list.
-func gfpurge(_p_ *p) {
- var (
- inc int32
- stackQ gQueue
- noStackQ gQueue
- )
- for !_p_.gFree.empty() {
- gp := _p_.gFree.pop()
- _p_.gFree.n--
- if gp.stack.lo == 0 {
- noStackQ.push(gp)
- } else {
- stackQ.push(gp)
- }
- inc++
- }
- lock(&sched.gFree.lock)
- sched.gFree.noStack.pushAll(noStackQ)
- sched.gFree.stack.pushAll(stackQ)
- sched.gFree.n += inc
- unlock(&sched.gFree.lock)
-}
-
-// Breakpoint executes a breakpoint trap.
-func Breakpoint() {
- breakpoint()
-}
-
-// dolockOSThread is called by LockOSThread and lockOSThread below
-// after they modify m.locked. Do not allow preemption during this call,
-// or else the m might be different in this function than in the caller.
-//go:nosplit
-func dolockOSThread() {
- if GOARCH == "wasm" {
- return // no threads on wasm yet
- }
- _g_ := getg()
- _g_.m.lockedg.set(_g_)
- _g_.lockedm.set(_g_.m)
-}
-
-//go:nosplit
-
-// LockOSThread wires the calling goroutine to its current operating system thread.
-// The calling goroutine will always execute in that thread,
-// and no other goroutine will execute in it,
-// until the calling goroutine has made as many calls to
-// UnlockOSThread as to LockOSThread.
-// If the calling goroutine exits without unlocking the thread,
-// the thread will be terminated.
-//
-// All init functions are run on the startup thread. Calling LockOSThread
-// from an init function will cause the main function to be invoked on
-// that thread.
-//
-// A goroutine should call LockOSThread before calling OS services or
-// non-Go library functions that depend on per-thread state.
-func LockOSThread() {
- if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
- // If we need to start a new thread from the locked
- // thread, we need the template thread. Start it now
- // while we're in a known-good state.
- startTemplateThread()
- }
- _g_ := getg()
- _g_.m.lockedExt++
- if _g_.m.lockedExt == 0 {
- _g_.m.lockedExt--
- panic("LockOSThread nesting overflow")
- }
- dolockOSThread()
-}
-
-//go:nosplit
-func lockOSThread() {
- getg().m.lockedInt++
- dolockOSThread()
-}
-
-// dounlockOSThread is called by UnlockOSThread and unlockOSThread below
-// after they update m->locked. Do not allow preemption during this call,
-// or else the m might be in different in this function than in the caller.
-//go:nosplit
-func dounlockOSThread() {
- if GOARCH == "wasm" {
- return // no threads on wasm yet
- }
- _g_ := getg()
- if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
- return
- }
- _g_.m.lockedg = 0
- _g_.lockedm = 0
-}
-
-//go:nosplit
-
-// UnlockOSThread undoes an earlier call to LockOSThread.
-// If this drops the number of active LockOSThread calls on the
-// calling goroutine to zero, it unwires the calling goroutine from
-// its fixed operating system thread.
-// If there are no active LockOSThread calls, this is a no-op.
-//
-// Before calling UnlockOSThread, the caller must ensure that the OS
-// thread is suitable for running other goroutines. If the caller made
-// any permanent changes to the state of the thread that would affect
-// other goroutines, it should not call this function and thus leave
-// the goroutine locked to the OS thread until the goroutine (and
-// hence the thread) exits.
-func UnlockOSThread() {
- _g_ := getg()
- if _g_.m.lockedExt == 0 {
- return
- }
- _g_.m.lockedExt--
- dounlockOSThread()
-}
-
-//go:nosplit
-func unlockOSThread() {
- _g_ := getg()
- if _g_.m.lockedInt == 0 {
- systemstack(badunlockosthread)
- }
- _g_.m.lockedInt--
- dounlockOSThread()
-}
-
-func badunlockosthread() {
- throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
-}
-
-func gcount() int32 {
- n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
- for _, _p_ := range allp {
- n -= _p_.gFree.n
- }
-
- // All these variables can be changed concurrently, so the result can be inconsistent.
- // But at least the current goroutine is running.
- if n < 1 {
- n = 1
- }
- return n
-}
-
-func mcount() int32 {
- return int32(sched.mnext - sched.nmfreed)
-}
-
-var prof struct {
- signalLock uint32
- hz int32
-}
-
-func _System() { _System() }
-func _ExternalCode() { _ExternalCode() }
-func _LostExternalCode() { _LostExternalCode() }
-func _GC() { _GC() }
-func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
-func _VDSO() { _VDSO() }
-
-// Called if we receive a SIGPROF signal.
-// Called by the signal handler, may run during STW.
-//go:nowritebarrierrec
-func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
- if prof.hz == 0 {
- return
- }
-
- // If mp.profilehz is 0, then profiling is not enabled for this thread.
- // We must check this to avoid a deadlock between setcpuprofilerate
- // and the call to cpuprof.add, below.
- if mp != nil && mp.profilehz == 0 {
- return
- }
-
- // On mips{,le}/arm, 64bit atomics are emulated with spinlocks, in
- // runtime/internal/atomic. If SIGPROF arrives while the program is inside
- // the critical section, it creates a deadlock (when writing the sample).
- // As a workaround, create a counter of SIGPROFs while in critical section
- // to store the count, and pass it to sigprof.add() later when SIGPROF is
- // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
- if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
- if f := findfunc(pc); f.valid() {
- if hasPrefix(funcname(f), "runtime/internal/atomic") {
- cpuprof.lostAtomic++
- return
- }
- }
- if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
- // runtime/internal/atomic functions call into kernel
- // helpers on arm < 7. See
- // runtime/internal/atomic/sys_linux_arm.s.
- cpuprof.lostAtomic++
- return
- }
- }
-
- // Profiling runs concurrently with GC, so it must not allocate.
- // Set a trap in case the code does allocate.
- // Note that on windows, one thread takes profiles of all the
- // other threads, so mp is usually not getg().m.
- // In fact mp may not even be stopped.
- // See golang.org/issue/17165.
- getg().m.mallocing++
-
- var stk [maxCPUProfStack]uintptr
- n := 0
- if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
- cgoOff := 0
- // Check cgoCallersUse to make sure that we are not
- // interrupting other code that is fiddling with
- // cgoCallers. We are running in a signal handler
- // with all signals blocked, so we don't have to worry
- // about any other code interrupting us.
- if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
- for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
- cgoOff++
- }
- copy(stk[:], mp.cgoCallers[:cgoOff])
- mp.cgoCallers[0] = 0
- }
-
- // Collect Go stack that leads to the cgo call.
- n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
- if n > 0 {
- n += cgoOff
- }
- } else {
- n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
- }
-
- if n <= 0 {
- // Normal traceback is impossible or has failed.
- // See if it falls into several common cases.
- n = 0
- if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
- // Libcall, i.e. runtime syscall on windows.
- // Collect Go stack that leads to the call.
- n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
- }
- if n == 0 && mp != nil && mp.vdsoSP != 0 {
- n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
- }
- if n == 0 {
- // If all of the above has failed, account it against abstract "System" or "GC".
- n = 2
- if inVDSOPage(pc) {
- pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
- } else if pc > firstmoduledata.etext {
- // "ExternalCode" is better than "etext".
- pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
- }
- stk[0] = pc
- if mp.preemptoff != "" {
- stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
- } else {
- stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
- }
- }
- }
-
- if prof.hz != 0 {
- // Note: it can happen on Windows that we interrupted a system thread
- // with no g, so gp could nil. The other nil checks are done out of
- // caution, but not expected to be nil in practice.
- var tagPtr *unsafe.Pointer
- if gp != nil && gp.m != nil && gp.m.curg != nil {
- tagPtr = &gp.m.curg.labels
- }
- cpuprof.add(tagPtr, stk[:n])
- }
- getg().m.mallocing--
-}
-
-// setcpuprofilerate sets the CPU profiling rate to hz times per second.
-// If hz <= 0, setcpuprofilerate turns off CPU profiling.
-func setcpuprofilerate(hz int32) {
- // Force sane arguments.
- if hz < 0 {
- hz = 0
- }
-
- // Disable preemption, otherwise we can be rescheduled to another thread
- // that has profiling enabled.
- _g_ := getg()
- _g_.m.locks++
-
- // Stop profiler on this thread so that it is safe to lock prof.
- // if a profiling signal came in while we had prof locked,
- // it would deadlock.
- setThreadCPUProfiler(0)
-
- for !atomic.Cas(&prof.signalLock, 0, 1) {
- osyield()
- }
- if prof.hz != hz {
- setProcessCPUProfiler(hz)
- prof.hz = hz
- }
- atomic.Store(&prof.signalLock, 0)
-
- lock(&sched.lock)
- sched.profilehz = hz
- unlock(&sched.lock)
-
- if hz != 0 {
- setThreadCPUProfiler(hz)
- }
-
- _g_.m.locks--
-}
-
-// init initializes pp, which may be a freshly allocated p or a
-// previously destroyed p, and transitions it to status _Pgcstop.
-func (pp *p) init(id int32) {
- pp.id = id
- pp.status = _Pgcstop
- pp.sudogcache = pp.sudogbuf[:0]
- pp.deferpool = pp.deferpoolbuf[:0]
- pp.wbBuf.reset()
- if pp.mcache == nil {
- if id == 0 {
- if mcache0 == nil {
- throw("missing mcache?")
- }
- // Use the bootstrap mcache0. Only one P will get
- // mcache0: the one with ID 0.
- pp.mcache = mcache0
- } else {
- pp.mcache = allocmcache()
- }
- }
- if raceenabled && pp.raceprocctx == 0 {
- if id == 0 {
- pp.raceprocctx = raceprocctx0
- raceprocctx0 = 0 // bootstrap
- } else {
- pp.raceprocctx = raceproccreate()
- }
- }
- lockInit(&pp.timersLock, lockRankTimers)
-
- // This P may get timers when it starts running. Set the mask here
- // since the P may not go through pidleget (notably P 0 on startup).
- timerpMask.set(id)
- // Similarly, we may not go through pidleget before this P starts
- // running if it is P 0 on startup.
- idlepMask.clear(id)
-}
-
-// destroy releases all of the resources associated with pp and
-// transitions it to status _Pdead.
-//
-// sched.lock must be held and the world must be stopped.
-func (pp *p) destroy() {
- assertLockHeld(&sched.lock)
- assertWorldStopped()
-
- // Move all runnable goroutines to the global queue
- for pp.runqhead != pp.runqtail {
- // Pop from tail of local queue
- pp.runqtail--
- gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
- // Push onto head of global queue
- globrunqputhead(gp)
- }
- if pp.runnext != 0 {
- globrunqputhead(pp.runnext.ptr())
- pp.runnext = 0
- }
- if len(pp.timers) > 0 {
- plocal := getg().m.p.ptr()
- // The world is stopped, but we acquire timersLock to
- // protect against sysmon calling timeSleepUntil.
- // This is the only case where we hold the timersLock of
- // more than one P, so there are no deadlock concerns.
- lock(&plocal.timersLock)
- lock(&pp.timersLock)
- moveTimers(plocal, pp.timers)
- pp.timers = nil
- pp.numTimers = 0
- pp.deletedTimers = 0
- atomic.Store64(&pp.timer0When, 0)
- unlock(&pp.timersLock)
- unlock(&plocal.timersLock)
- }
- // Flush p's write barrier buffer.
- if gcphase != _GCoff {
- wbBufFlush1(pp)
- pp.gcw.dispose()
- }
- for i := range pp.sudogbuf {
- pp.sudogbuf[i] = nil
- }
- pp.sudogcache = pp.sudogbuf[:0]
- for j := range pp.deferpoolbuf {
- pp.deferpoolbuf[j] = nil
- }
- pp.deferpool = pp.deferpoolbuf[:0]
- systemstack(func() {
- for i := 0; i < pp.mspancache.len; i++ {
- // Safe to call since the world is stopped.
- mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
- }
- pp.mspancache.len = 0
- lock(&mheap_.lock)
- pp.pcache.flush(&mheap_.pages)
- unlock(&mheap_.lock)
- })
- freemcache(pp.mcache)
- pp.mcache = nil
- gfpurge(pp)
- traceProcFree(pp)
- if raceenabled {
- if pp.timerRaceCtx != 0 {
- // The race detector code uses a callback to fetch
- // the proc context, so arrange for that callback
- // to see the right thing.
- // This hack only works because we are the only
- // thread running.
- mp := getg().m
- phold := mp.p.ptr()
- mp.p.set(pp)
-
- racectxend(pp.timerRaceCtx)
- pp.timerRaceCtx = 0
-
- mp.p.set(phold)
- }
- raceprocdestroy(pp.raceprocctx)
- pp.raceprocctx = 0
- }
- pp.gcAssistTime = 0
- pp.status = _Pdead
-}
-
-// Change number of processors.
-//
-// sched.lock must be held, and the world must be stopped.
-//
-// gcworkbufs must not be being modified by either the GC or the write barrier
-// code, so the GC must not be running if the number of Ps actually changes.
-//
-// Returns list of Ps with local work, they need to be scheduled by the caller.
-func procresize(nprocs int32) *p {
- assertLockHeld(&sched.lock)
- assertWorldStopped()
-
- old := gomaxprocs
- if old < 0 || nprocs <= 0 {
- throw("procresize: invalid arg")
- }
- if trace.enabled {
- traceGomaxprocs(nprocs)
- }
-
- // update statistics
- now := nanotime()
- if sched.procresizetime != 0 {
- sched.totaltime += int64(old) * (now - sched.procresizetime)
- }
- sched.procresizetime = now
-
- maskWords := (nprocs + 31) / 32
-
- // Grow allp if necessary.
- if nprocs > int32(len(allp)) {
- // Synchronize with retake, which could be running
- // concurrently since it doesn't run on a P.
- lock(&allpLock)
- if nprocs <= int32(cap(allp)) {
- allp = allp[:nprocs]
- } else {
- nallp := make([]*p, nprocs)
- // Copy everything up to allp's cap so we
- // never lose old allocated Ps.
- copy(nallp, allp[:cap(allp)])
- allp = nallp
- }
-
- if maskWords <= int32(cap(idlepMask)) {
- idlepMask = idlepMask[:maskWords]
- timerpMask = timerpMask[:maskWords]
- } else {
- nidlepMask := make([]uint32, maskWords)
- // No need to copy beyond len, old Ps are irrelevant.
- copy(nidlepMask, idlepMask)
- idlepMask = nidlepMask
-
- ntimerpMask := make([]uint32, maskWords)
- copy(ntimerpMask, timerpMask)
- timerpMask = ntimerpMask
- }
- unlock(&allpLock)
- }
-
- // initialize new P's
- for i := old; i < nprocs; i++ {
- pp := allp[i]
- if pp == nil {
- pp = new(p)
- }
- pp.init(i)
- atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
- }
-
- _g_ := getg()
- if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
- // continue to use the current P
- _g_.m.p.ptr().status = _Prunning
- _g_.m.p.ptr().mcache.prepareForSweep()
- } else {
- // release the current P and acquire allp[0].
- //
- // We must do this before destroying our current P
- // because p.destroy itself has write barriers, so we
- // need to do that from a valid P.
- if _g_.m.p != 0 {
- if trace.enabled {
- // Pretend that we were descheduled
- // and then scheduled again to keep
- // the trace sane.
- traceGoSched()
- traceProcStop(_g_.m.p.ptr())
- }
- _g_.m.p.ptr().m = 0
- }
- _g_.m.p = 0
- p := allp[0]
- p.m = 0
- p.status = _Pidle
- acquirep(p)
- if trace.enabled {
- traceGoStart()
- }
- }
-
- // g.m.p is now set, so we no longer need mcache0 for bootstrapping.
- mcache0 = nil
-
- // release resources from unused P's
- for i := nprocs; i < old; i++ {
- p := allp[i]
- p.destroy()
- // can't free P itself because it can be referenced by an M in syscall
- }
-
- // Trim allp.
- if int32(len(allp)) != nprocs {
- lock(&allpLock)
- allp = allp[:nprocs]
- idlepMask = idlepMask[:maskWords]
- timerpMask = timerpMask[:maskWords]
- unlock(&allpLock)
- }
-
- var runnablePs *p
- for i := nprocs - 1; i >= 0; i-- {
- p := allp[i]
- if _g_.m.p.ptr() == p {
- continue
- }
- p.status = _Pidle
- if runqempty(p) {
- pidleput(p)
- } else {
- p.m.set(mget())
- p.link.set(runnablePs)
- runnablePs = p
- }
- }
- stealOrder.reset(uint32(nprocs))
- var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
- atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
- return runnablePs
-}
-
-// Associate p and the current m.
-//
-// This function is allowed to have write barriers even if the caller
-// isn't because it immediately acquires _p_.
-//
-//go:yeswritebarrierrec
-func acquirep(_p_ *p) {
- // Do the part that isn't allowed to have write barriers.
- wirep(_p_)
-
- // Have p; write barriers now allowed.
-
- // Perform deferred mcache flush before this P can allocate
- // from a potentially stale mcache.
- _p_.mcache.prepareForSweep()
-
- if trace.enabled {
- traceProcStart()
- }
-}
-
-// wirep is the first step of acquirep, which actually associates the
-// current M to _p_. This is broken out so we can disallow write
-// barriers for this part, since we don't yet have a P.
-//
-//go:nowritebarrierrec
-//go:nosplit
-func wirep(_p_ *p) {
- _g_ := getg()
-
- if _g_.m.p != 0 {
- throw("wirep: already in go")
- }
- if _p_.m != 0 || _p_.status != _Pidle {
- id := int64(0)
- if _p_.m != 0 {
- id = _p_.m.ptr().id
- }
- print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
- throw("wirep: invalid p state")
- }
- _g_.m.p.set(_p_)
- _p_.m.set(_g_.m)
- _p_.status = _Prunning
-}
-
-// Disassociate p and the current m.
-func releasep() *p {
- _g_ := getg()
-
- if _g_.m.p == 0 {
- throw("releasep: invalid arg")
- }
- _p_ := _g_.m.p.ptr()
- if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
- print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
- throw("releasep: invalid p state")
- }
- if trace.enabled {
- traceProcStop(_g_.m.p.ptr())
- }
- _g_.m.p = 0
- _p_.m = 0
- _p_.status = _Pidle
- return _p_
-}
-
-func incidlelocked(v int32) {
- lock(&sched.lock)
- sched.nmidlelocked += v
- if v > 0 {
- checkdead()
- }
- unlock(&sched.lock)
-}
-
-// Check for deadlock situation.
-// The check is based on number of running M's, if 0 -> deadlock.
-// sched.lock must be held.
-func checkdead() {
- assertLockHeld(&sched.lock)
-
- // For -buildmode=c-shared or -buildmode=c-archive it's OK if
- // there are no running goroutines. The calling program is
- // assumed to be running.
- if islibrary || isarchive {
- return
- }
-
- // If we are dying because of a signal caught on an already idle thread,
- // freezetheworld will cause all running threads to block.
- // And runtime will essentially enter into deadlock state,
- // except that there is a thread that will call exit soon.
- if panicking > 0 {
- return
- }
-
- // If we are not running under cgo, but we have an extra M then account
- // for it. (It is possible to have an extra M on Windows without cgo to
- // accommodate callbacks created by syscall.NewCallback. See issue #6751
- // for details.)
- var run0 int32
- if !iscgo && cgoHasExtraM {
- mp := lockextra(true)
- haveExtraM := extraMCount > 0
- unlockextra(mp)
- if haveExtraM {
- run0 = 1
- }
- }
-
- run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
- if run > run0 {
- return
- }
- if run < 0 {
- print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
- throw("checkdead: inconsistent counts")
- }
-
- grunning := 0
- forEachG(func(gp *g) {
- if isSystemGoroutine(gp, false) {
- return
- }
- s := readgstatus(gp)
- switch s &^ _Gscan {
- case _Gwaiting,
- _Gpreempted:
- grunning++
- case _Grunnable,
- _Grunning,
- _Gsyscall:
- print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
- throw("checkdead: runnable g")
- }
- })
- if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
- unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
- throw("no goroutines (main called runtime.Goexit) - deadlock!")
- }
-
- // Maybe jump time forward for playground.
- if faketime != 0 {
- when, _p_ := timeSleepUntil()
- if _p_ != nil {
- faketime = when
- for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
- if (*pp).ptr() == _p_ {
- *pp = _p_.link
- break
- }
- }
- mp := mget()
- if mp == nil {
- // There should always be a free M since
- // nothing is running.
- throw("checkdead: no m for timer")
- }
- mp.nextp.set(_p_)
- notewakeup(&mp.park)
- return
- }
- }
-
- // There are no goroutines running, so we can look at the P's.
- for _, _p_ := range allp {
- if len(_p_.timers) > 0 {
- return
- }
- }
-
- getg().m.throwing = -1 // do not dump full stacks
- unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
- throw("all goroutines are asleep - deadlock!")
-}
-
-// forcegcperiod is the maximum time in nanoseconds between garbage
-// collections. If we go this long without a garbage collection, one
-// is forced to run.
-//
-// This is a variable for testing purposes. It normally doesn't change.
-var forcegcperiod int64 = 2 * 60 * 1e9
-
-// needSysmonWorkaround is true if the workaround for
-// golang.org/issue/42515 is needed on NetBSD.
-var needSysmonWorkaround bool = false
-
-// Always runs without a P, so write barriers are not allowed.
-//
-//go:nowritebarrierrec
-func sysmon() {
- lock(&sched.lock)
- sched.nmsys++
- checkdead()
- unlock(&sched.lock)
-
- lasttrace := int64(0)
- idle := 0 // how many cycles in succession we had not wokeup somebody
- delay := uint32(0)
-
- for {
- if idle == 0 { // start with 20us sleep...
- delay = 20
- } else if idle > 50 { // start doubling the sleep after 1ms...
- delay *= 2
- }
- if delay > 10*1000 { // up to 10ms
- delay = 10 * 1000
- }
- usleep(delay)
-
- // sysmon should not enter deep sleep if schedtrace is enabled so that
- // it can print that information at the right time.
- //
- // It should also not enter deep sleep if there are any active P's so
- // that it can retake P's from syscalls, preempt long running G's, and
- // poll the network if all P's are busy for long stretches.
- //
- // It should wakeup from deep sleep if any P's become active either due
- // to exiting a syscall or waking up due to a timer expiring so that it
- // can resume performing those duties. If it wakes from a syscall it
- // resets idle and delay as a bet that since it had retaken a P from a
- // syscall before, it may need to do it again shortly after the
- // application starts work again. It does not reset idle when waking
- // from a timer to avoid adding system load to applications that spend
- // most of their time sleeping.
- now := nanotime()
- if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
- lock(&sched.lock)
- if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
- syscallWake := false
- next, _ := timeSleepUntil()
- if next > now {
- atomic.Store(&sched.sysmonwait, 1)
- unlock(&sched.lock)
- // Make wake-up period small enough
- // for the sampling to be correct.
- sleep := forcegcperiod / 2
- if next-now < sleep {
- sleep = next - now
- }
- shouldRelax := sleep >= osRelaxMinNS
- if shouldRelax {
- osRelax(true)
- }
- syscallWake = notetsleep(&sched.sysmonnote, sleep)
- if shouldRelax {
- osRelax(false)
- }
- lock(&sched.lock)
- atomic.Store(&sched.sysmonwait, 0)
- noteclear(&sched.sysmonnote)
- }
- if syscallWake {
- idle = 0
- delay = 20
- }
- }
- unlock(&sched.lock)
- }
-
- lock(&sched.sysmonlock)
- // Update now in case we blocked on sysmonnote or spent a long time
- // blocked on schedlock or sysmonlock above.
- now = nanotime()
-
- // trigger libc interceptors if needed
- if *cgo_yield != nil {
- asmcgocall(*cgo_yield, nil)
- }
- // poll network if not polled for more than 10ms
- lastpoll := int64(atomic.Load64(&sched.lastpoll))
- if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
- atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
- list := netpoll(0) // non-blocking - returns list of goroutines
- if !list.empty() {
- // Need to decrement number of idle locked M's
- // (pretending that one more is running) before injectglist.
- // Otherwise it can lead to the following situation:
- // injectglist grabs all P's but before it starts M's to run the P's,
- // another M returns from syscall, finishes running its G,
- // observes that there is no work to do and no other running M's
- // and reports deadlock.
- incidlelocked(-1)
- injectglist(&list)
- incidlelocked(1)
- }
- }
- if GOOS == "netbsd" && needSysmonWorkaround {
- // netpoll is responsible for waiting for timer
- // expiration, so we typically don't have to worry
- // about starting an M to service timers. (Note that
- // sleep for timeSleepUntil above simply ensures sysmon
- // starts running again when that timer expiration may
- // cause Go code to run again).
- //
- // However, netbsd has a kernel bug that sometimes
- // misses netpollBreak wake-ups, which can lead to
- // unbounded delays servicing timers. If we detect this
- // overrun, then startm to get something to handle the
- // timer.
- //
- // See issue 42515 and
- // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
- if next, _ := timeSleepUntil(); next < now {
- startm(nil, false)
- }
- }
- if atomic.Load(&scavenge.sysmonWake) != 0 {
- // Kick the scavenger awake if someone requested it.
- wakeScavenger()
- }
- // retake P's blocked in syscalls
- // and preempt long running G's
- if retake(now) != 0 {
- idle = 0
- } else {
- idle++
- }
- // check if we need to force a GC
- if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
- lock(&forcegc.lock)
- forcegc.idle = 0
- var list gList
- list.push(forcegc.g)
- injectglist(&list)
- unlock(&forcegc.lock)
- }
- if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
- lasttrace = now
- schedtrace(debug.scheddetail > 0)
- }
- unlock(&sched.sysmonlock)
- }
-}
-
-type sysmontick struct {
- schedtick uint32
- schedwhen int64
- syscalltick uint32
- syscallwhen int64
-}
-
-// forcePreemptNS is the time slice given to a G before it is
-// preempted.
-const forcePreemptNS = 10 * 1000 * 1000 // 10ms
-
-func retake(now int64) uint32 {
- n := 0
- // Prevent allp slice changes. This lock will be completely
- // uncontended unless we're already stopping the world.
- lock(&allpLock)
- // We can't use a range loop over allp because we may
- // temporarily drop the allpLock. Hence, we need to re-fetch
- // allp each time around the loop.
- for i := 0; i < len(allp); i++ {
- _p_ := allp[i]
- if _p_ == nil {
- // This can happen if procresize has grown
- // allp but not yet created new Ps.
- continue
- }
- pd := &_p_.sysmontick
- s := _p_.status
- sysretake := false
- if s == _Prunning || s == _Psyscall {
- // Preempt G if it's running for too long.
- t := int64(_p_.schedtick)
- if int64(pd.schedtick) != t {
- pd.schedtick = uint32(t)
- pd.schedwhen = now
- } else if pd.schedwhen+forcePreemptNS <= now {
- preemptone(_p_)
- // In case of syscall, preemptone() doesn't
- // work, because there is no M wired to P.
- sysretake = true
- }
- }
- if s == _Psyscall {
- // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
- t := int64(_p_.syscalltick)
- if !sysretake && int64(pd.syscalltick) != t {
- pd.syscalltick = uint32(t)
- pd.syscallwhen = now
- continue
- }
- // On the one hand we don't want to retake Ps if there is no other work to do,
- // but on the other hand we want to retake them eventually
- // because they can prevent the sysmon thread from deep sleep.
- if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
- continue
- }
- // Drop allpLock so we can take sched.lock.
- unlock(&allpLock)
- // Need to decrement number of idle locked M's
- // (pretending that one more is running) before the CAS.
- // Otherwise the M from which we retake can exit the syscall,
- // increment nmidle and report deadlock.
- incidlelocked(-1)
- if atomic.Cas(&_p_.status, s, _Pidle) {
- if trace.enabled {
- traceGoSysBlock(_p_)
- traceProcStop(_p_)
- }
- n++
- _p_.syscalltick++
- handoffp(_p_)
- }
- incidlelocked(1)
- lock(&allpLock)
- }
- }
- unlock(&allpLock)
- return uint32(n)
-}
-
-// Tell all goroutines that they have been preempted and they should stop.
-// This function is purely best-effort. It can fail to inform a goroutine if a
-// processor just started running it.
-// No locks need to be held.
-// Returns true if preemption request was issued to at least one goroutine.
-func preemptall() bool {
- res := false
- for _, _p_ := range allp {
- if _p_.status != _Prunning {
- continue
- }
- if preemptone(_p_) {
- res = true
- }
- }
- return res
-}
-
-// Tell the goroutine running on processor P to stop.
-// This function is purely best-effort. It can incorrectly fail to inform the
-// goroutine. It can inform the wrong goroutine. Even if it informs the
-// correct goroutine, that goroutine might ignore the request if it is
-// simultaneously executing newstack.
-// No lock needs to be held.
-// Returns true if preemption request was issued.
-// The actual preemption will happen at some point in the future
-// and will be indicated by the gp->status no longer being
-// Grunning
-func preemptone(_p_ *p) bool {
- mp := _p_.m.ptr()
- if mp == nil || mp == getg().m {
- return false
- }
- gp := mp.curg
- if gp == nil || gp == mp.g0 {
- return false
- }
-
- gp.preempt = true
-
- // Every call in a goroutine checks for stack overflow by
- // comparing the current stack pointer to gp->stackguard0.
- // Setting gp->stackguard0 to StackPreempt folds
- // preemption into the normal stack overflow check.
- gp.stackguard0 = stackPreempt
-
- // Request an async preemption of this P.
- if preemptMSupported && debug.asyncpreemptoff == 0 {
- _p_.preempt = true
- preemptM(mp)
- }
-
- return true
-}
-
-var starttime int64
-
-func schedtrace(detailed bool) {
- now := nanotime()
- if starttime == 0 {
- starttime = now
- }
-
- lock(&sched.lock)
- print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
- if detailed {
- print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
- }
- // We must be careful while reading data from P's, M's and G's.
- // Even if we hold schedlock, most data can be changed concurrently.
- // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
- for i, _p_ := range allp {
- mp := _p_.m.ptr()
- h := atomic.Load(&_p_.runqhead)
- t := atomic.Load(&_p_.runqtail)
- if detailed {
- id := int64(-1)
- if mp != nil {
- id = mp.id
- }
- print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
- } else {
- // In non-detailed mode format lengths of per-P run queues as:
- // [len1 len2 len3 len4]
- print(" ")
- if i == 0 {
- print("[")
- }
- print(t - h)
- if i == len(allp)-1 {
- print("]\n")
- }
- }
- }
-
- if !detailed {
- unlock(&sched.lock)
- return
- }
-
- for mp := allm; mp != nil; mp = mp.alllink {
- _p_ := mp.p.ptr()
- gp := mp.curg
- lockedg := mp.lockedg.ptr()
- id1 := int32(-1)
- if _p_ != nil {
- id1 = _p_.id
- }
- id2 := int64(-1)
- if gp != nil {
- id2 = gp.goid
- }
- id3 := int64(-1)
- if lockedg != nil {
- id3 = lockedg.goid
- }
- print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
- }
-
- forEachG(func(gp *g) {
- mp := gp.m
- lockedm := gp.lockedm.ptr()
- id1 := int64(-1)
- if mp != nil {
- id1 = mp.id
- }
- id2 := int64(-1)
- if lockedm != nil {
- id2 = lockedm.id
- }
- print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
- })
- unlock(&sched.lock)
-}
-
-// schedEnableUser enables or disables the scheduling of user
-// goroutines.
-//
-// This does not stop already running user goroutines, so the caller
-// should first stop the world when disabling user goroutines.
-func schedEnableUser(enable bool) {
- lock(&sched.lock)
- if sched.disable.user == !enable {
- unlock(&sched.lock)
- return
- }
- sched.disable.user = !enable
- if enable {
- n := sched.disable.n
- sched.disable.n = 0
- globrunqputbatch(&sched.disable.runnable, n)
- unlock(&sched.lock)
- for ; n != 0 && sched.npidle != 0; n-- {
- startm(nil, false)
- }
- } else {
- unlock(&sched.lock)
- }
-}
-
-// schedEnabled reports whether gp should be scheduled. It returns
-// false is scheduling of gp is disabled.
-//
-// sched.lock must be held.
-func schedEnabled(gp *g) bool {
- assertLockHeld(&sched.lock)
-
- if sched.disable.user {
- return isSystemGoroutine(gp, true)
- }
- return true
-}
-
-// Put mp on midle list.
-// sched.lock must be held.
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func mput(mp *m) {
- assertLockHeld(&sched.lock)
-
- mp.schedlink = sched.midle
- sched.midle.set(mp)
- sched.nmidle++
- checkdead()
-}
-
-// Try to get an m from midle list.
-// sched.lock must be held.
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func mget() *m {
- assertLockHeld(&sched.lock)
-
- mp := sched.midle.ptr()
- if mp != nil {
- sched.midle = mp.schedlink
- sched.nmidle--
- }
- return mp
-}
-
-// Put gp on the global runnable queue.
-// sched.lock must be held.
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func globrunqput(gp *g) {
- assertLockHeld(&sched.lock)
-
- sched.runq.pushBack(gp)
- sched.runqsize++
-}
-
-// Put gp at the head of the global runnable queue.
-// sched.lock must be held.
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func globrunqputhead(gp *g) {
- assertLockHeld(&sched.lock)
-
- sched.runq.push(gp)
- sched.runqsize++
-}
-
-// Put a batch of runnable goroutines on the global runnable queue.
-// This clears *batch.
-// sched.lock must be held.
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func globrunqputbatch(batch *gQueue, n int32) {
- assertLockHeld(&sched.lock)
-
- sched.runq.pushBackAll(*batch)
- sched.runqsize += n
- *batch = gQueue{}
-}
-
-// Try get a batch of G's from the global runnable queue.
-// sched.lock must be held.
-func globrunqget(_p_ *p, max int32) *g {
- assertLockHeld(&sched.lock)
-
- if sched.runqsize == 0 {
- return nil
- }
-
- n := sched.runqsize/gomaxprocs + 1
- if n > sched.runqsize {
- n = sched.runqsize
- }
- if max > 0 && n > max {
- n = max
- }
- if n > int32(len(_p_.runq))/2 {
- n = int32(len(_p_.runq)) / 2
- }
-
- sched.runqsize -= n
-
- gp := sched.runq.pop()
- n--
- for ; n > 0; n-- {
- gp1 := sched.runq.pop()
- runqput(_p_, gp1, false)
- }
- return gp
-}
-
-// pMask is an atomic bitstring with one bit per P.
-type pMask []uint32
-
-// read returns true if P id's bit is set.
-func (p pMask) read(id uint32) bool {
- word := id / 32
- mask := uint32(1) << (id % 32)
- return (atomic.Load(&p[word]) & mask) != 0
-}
-
-// set sets P id's bit.
-func (p pMask) set(id int32) {
- word := id / 32
- mask := uint32(1) << (id % 32)
- atomic.Or(&p[word], mask)
-}
-
-// clear clears P id's bit.
-func (p pMask) clear(id int32) {
- word := id / 32
- mask := uint32(1) << (id % 32)
- atomic.And(&p[word], ^mask)
-}
-
-// updateTimerPMask clears pp's timer mask if it has no timers on its heap.
-//
-// Ideally, the timer mask would be kept immediately consistent on any timer
-// operations. Unfortunately, updating a shared global data structure in the
-// timer hot path adds too much overhead in applications frequently switching
-// between no timers and some timers.
-//
-// As a compromise, the timer mask is updated only on pidleget / pidleput. A
-// running P (returned by pidleget) may add a timer at any time, so its mask
-// must be set. An idle P (passed to pidleput) cannot add new timers while
-// idle, so if it has no timers at that time, its mask may be cleared.
-//
-// Thus, we get the following effects on timer-stealing in findrunnable:
-//
-// * Idle Ps with no timers when they go idle are never checked in findrunnable
-// (for work- or timer-stealing; this is the ideal case).
-// * Running Ps must always be checked.
-// * Idle Ps whose timers are stolen must continue to be checked until they run
-// again, even after timer expiration.
-//
-// When the P starts running again, the mask should be set, as a timer may be
-// added at any time.
-//
-// TODO(prattmic): Additional targeted updates may improve the above cases.
-// e.g., updating the mask when stealing a timer.
-func updateTimerPMask(pp *p) {
- if atomic.Load(&pp.numTimers) > 0 {
- return
- }
-
- // Looks like there are no timers, however another P may transiently
- // decrement numTimers when handling a timerModified timer in
- // checkTimers. We must take timersLock to serialize with these changes.
- lock(&pp.timersLock)
- if atomic.Load(&pp.numTimers) == 0 {
- timerpMask.clear(pp.id)
- }
- unlock(&pp.timersLock)
-}
-
-// pidleput puts p to on the _Pidle list.
-//
-// This releases ownership of p. Once sched.lock is released it is no longer
-// safe to use p.
-//
-// sched.lock must be held.
-//
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func pidleput(_p_ *p) {
- assertLockHeld(&sched.lock)
-
- if !runqempty(_p_) {
- throw("pidleput: P has non-empty run queue")
- }
- updateTimerPMask(_p_) // clear if there are no timers.
- idlepMask.set(_p_.id)
- _p_.link = sched.pidle
- sched.pidle.set(_p_)
- atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
-}
-
-// pidleget tries to get a p from the _Pidle list, acquiring ownership.
-//
-// sched.lock must be held.
-//
-// May run during STW, so write barriers are not allowed.
-//go:nowritebarrierrec
-func pidleget() *p {
- assertLockHeld(&sched.lock)
-
- _p_ := sched.pidle.ptr()
- if _p_ != nil {
- // Timer may get added at any time now.
- timerpMask.set(_p_.id)
- idlepMask.clear(_p_.id)
- sched.pidle = _p_.link
- atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
- }
- return _p_
-}
-
-// runqempty reports whether _p_ has no Gs on its local run queue.
-// It never returns true spuriously.
-func runqempty(_p_ *p) bool {
- // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
- // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
- // Simply observing that runqhead == runqtail and then observing that runqnext == nil
- // does not mean the queue is empty.
- for {
- head := atomic.Load(&_p_.runqhead)
- tail := atomic.Load(&_p_.runqtail)
- runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
- if tail == atomic.Load(&_p_.runqtail) {
- return head == tail && runnext == 0
- }
- }
-}
-
-// To shake out latent assumptions about scheduling order,
-// we introduce some randomness into scheduling decisions
-// when running with the race detector.
-// The need for this was made obvious by changing the
-// (deterministic) scheduling order in Go 1.5 and breaking
-// many poorly-written tests.
-// With the randomness here, as long as the tests pass
-// consistently with -race, they shouldn't have latent scheduling
-// assumptions.
-const randomizeScheduler = raceenabled
-
-// runqput tries to put g on the local runnable queue.
-// If next is false, runqput adds g to the tail of the runnable queue.
-// If next is true, runqput puts g in the _p_.runnext slot.
-// If the run queue is full, runnext puts g on the global queue.
-// Executed only by the owner P.
-func runqput(_p_ *p, gp *g, next bool) {
- if randomizeScheduler && next && fastrandn(2) == 0 {
- next = false
- }
-
- if next {
- retryNext:
- oldnext := _p_.runnext
- if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
- goto retryNext
- }
- if oldnext == 0 {
- return
- }
- // Kick the old runnext out to the regular run queue.
- gp = oldnext.ptr()
- }
-
-retry:
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
- t := _p_.runqtail
- if t-h < uint32(len(_p_.runq)) {
- _p_.runq[t%uint32(len(_p_.runq))].set(gp)
- atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
- return
- }
- if runqputslow(_p_, gp, h, t) {
- return
- }
- // the queue is not full, now the put above must succeed
- goto retry
-}
-
-// Put g and a batch of work from local runnable queue on global queue.
-// Executed only by the owner P.
-func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
- var batch [len(_p_.runq)/2 + 1]*g
-
- // First, grab a batch from local queue.
- n := t - h
- n = n / 2
- if n != uint32(len(_p_.runq)/2) {
- throw("runqputslow: queue is not full")
- }
- for i := uint32(0); i < n; i++ {
- batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
- }
- if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
- return false
- }
- batch[n] = gp
-
- if randomizeScheduler {
- for i := uint32(1); i <= n; i++ {
- j := fastrandn(i + 1)
- batch[i], batch[j] = batch[j], batch[i]
- }
- }
-
- // Link the goroutines.
- for i := uint32(0); i < n; i++ {
- batch[i].schedlink.set(batch[i+1])
- }
- var q gQueue
- q.head.set(batch[0])
- q.tail.set(batch[n])
-
- // Now put the batch on global queue.
- lock(&sched.lock)
- globrunqputbatch(&q, int32(n+1))
- unlock(&sched.lock)
- return true
-}
-
-// runqputbatch tries to put all the G's on q on the local runnable queue.
-// If the queue is full, they are put on the global queue; in that case
-// this will temporarily acquire the scheduler lock.
-// Executed only by the owner P.
-func runqputbatch(pp *p, q *gQueue, qsize int) {
- h := atomic.LoadAcq(&pp.runqhead)
- t := pp.runqtail
- n := uint32(0)
- for !q.empty() && t-h < uint32(len(pp.runq)) {
- gp := q.pop()
- pp.runq[t%uint32(len(pp.runq))].set(gp)
- t++
- n++
- }
- qsize -= int(n)
-
- if randomizeScheduler {
- off := func(o uint32) uint32 {
- return (pp.runqtail + o) % uint32(len(pp.runq))
- }
- for i := uint32(1); i < n; i++ {
- j := fastrandn(i + 1)
- pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
- }
- }
-
- atomic.StoreRel(&pp.runqtail, t)
- if !q.empty() {
- lock(&sched.lock)
- globrunqputbatch(q, int32(qsize))
- unlock(&sched.lock)
- }
-}
-
-// Get g from local runnable queue.
-// If inheritTime is true, gp should inherit the remaining time in the
-// current time slice. Otherwise, it should start a new time slice.
-// Executed only by the owner P.
-func runqget(_p_ *p) (gp *g, inheritTime bool) {
- // If there's a runnext, it's the next G to run.
- next := _p_.runnext
- // If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
- // because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
- // Hence, there's no need to retry this CAS if it falls.
- if next != 0 && _p_.runnext.cas(next, 0) {
- return next.ptr(), true
- }
-
- for {
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := _p_.runqtail
- if t == h {
- return nil, false
- }
- gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
- if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
- return gp, false
- }
- }
-}
-
-// runqdrain drains the local runnable queue of _p_ and returns all goroutines in it.
-// Executed only by the owner P.
-func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
- oldNext := _p_.runnext
- if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
- drainQ.pushBack(oldNext.ptr())
- n++
- }
-
-retry:
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := _p_.runqtail
- qn := t - h
- if qn == 0 {
- return
- }
- if qn > uint32(len(_p_.runq)) { // read inconsistent h and t
- goto retry
- }
-
- if !atomic.CasRel(&_p_.runqhead, h, h+qn) { // cas-release, commits consume
- goto retry
- }
-
- // We've inverted the order in which it gets G's from the local P's runnable queue
- // and then advances the head pointer because we don't want to mess up the statuses of G's
- // while runqdrain() and runqsteal() are running in parallel.
- // Thus we should advance the head pointer before draining the local P into a gQueue,
- // so that we can update any gp.schedlink only after we take the full ownership of G,
- // meanwhile, other P's can't access to all G's in local P's runnable queue and steal them.
- // See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details.
- for i := uint32(0); i < qn; i++ {
- gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
- drainQ.pushBack(gp)
- n++
- }
- return
-}
-
-// Grabs a batch of goroutines from _p_'s runnable queue into batch.
-// Batch is a ring buffer starting at batchHead.
-// Returns number of grabbed goroutines.
-// Can be executed by any P.
-func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
- for {
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
- n := t - h
- n = n - n/2
- if n == 0 {
- if stealRunNextG {
- // Try to steal from _p_.runnext.
- if next := _p_.runnext; next != 0 {
- if _p_.status == _Prunning {
- // Sleep to ensure that _p_ isn't about to run the g
- // we are about to steal.
- // The important use case here is when the g running
- // on _p_ ready()s another g and then almost
- // immediately blocks. Instead of stealing runnext
- // in this window, back off to give _p_ a chance to
- // schedule runnext. This will avoid thrashing gs
- // between different Ps.
- // A sync chan send/recv takes ~50ns as of time of
- // writing, so 3us gives ~50x overshoot.
- if GOOS != "windows" {
- usleep(3)
- } else {
- // On windows system timer granularity is
- // 1-15ms, which is way too much for this
- // optimization. So just yield.
- osyield()
- }
- }
- if !_p_.runnext.cas(next, 0) {
- continue
- }
- batch[batchHead%uint32(len(batch))] = next
- return 1
- }
- }
- return 0
- }
- if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
- continue
- }
- for i := uint32(0); i < n; i++ {
- g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
- batch[(batchHead+i)%uint32(len(batch))] = g
- }
- if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
- return n
- }
- }
-}
-
-// Steal half of elements from local runnable queue of p2
-// and put onto local runnable queue of p.
-// Returns one of the stolen elements (or nil if failed).
-func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
- t := _p_.runqtail
- n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
- if n == 0 {
- return nil
- }
- n--
- gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
- if n == 0 {
- return gp
- }
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
- if t-h+n >= uint32(len(_p_.runq)) {
- throw("runqsteal: runq overflow")
- }
- atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
- return gp
-}
-
-// A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
-// be on one gQueue or gList at a time.
-type gQueue struct {
- head guintptr
- tail guintptr
-}
-
-// empty reports whether q is empty.
-func (q *gQueue) empty() bool {
- return q.head == 0
-}
-
-// push adds gp to the head of q.
-func (q *gQueue) push(gp *g) {
- gp.schedlink = q.head
- q.head.set(gp)
- if q.tail == 0 {
- q.tail.set(gp)
- }
-}
-
-// pushBack adds gp to the tail of q.
-func (q *gQueue) pushBack(gp *g) {
- gp.schedlink = 0
- if q.tail != 0 {
- q.tail.ptr().schedlink.set(gp)
- } else {
- q.head.set(gp)
- }
- q.tail.set(gp)
-}
-
-// pushBackAll adds all Gs in q2 to the tail of q. After this q2 must
-// not be used.
-func (q *gQueue) pushBackAll(q2 gQueue) {
- if q2.tail == 0 {
- return
- }
- q2.tail.ptr().schedlink = 0
- if q.tail != 0 {
- q.tail.ptr().schedlink = q2.head
- } else {
- q.head = q2.head
- }
- q.tail = q2.tail
-}
-
-// pop removes and returns the head of queue q. It returns nil if
-// q is empty.
-func (q *gQueue) pop() *g {
- gp := q.head.ptr()
- if gp != nil {
- q.head = gp.schedlink
- if q.head == 0 {
- q.tail = 0
- }
- }
- return gp
-}
-
-// popList takes all Gs in q and returns them as a gList.
-func (q *gQueue) popList() gList {
- stack := gList{q.head}
- *q = gQueue{}
- return stack
-}
-
-// A gList is a list of Gs linked through g.schedlink. A G can only be
-// on one gQueue or gList at a time.
-type gList struct {
- head guintptr
-}
-
-// empty reports whether l is empty.
-func (l *gList) empty() bool {
- return l.head == 0
-}
-
-// push adds gp to the head of l.
-func (l *gList) push(gp *g) {
- gp.schedlink = l.head
- l.head.set(gp)
-}
-
-// pushAll prepends all Gs in q to l.
-func (l *gList) pushAll(q gQueue) {
- if !q.empty() {
- q.tail.ptr().schedlink = l.head
- l.head = q.head
- }
-}
-
-// pop removes and returns the head of l. If l is empty, it returns nil.
-func (l *gList) pop() *g {
- gp := l.head.ptr()
- if gp != nil {
- l.head = gp.schedlink
- }
- return gp
-}
-
-//go:linkname setMaxThreads runtime/debug.setMaxThreads
-func setMaxThreads(in int) (out int) {
- lock(&sched.lock)
- out = int(sched.maxmcount)
- if in > 0x7fffffff { // MaxInt32
- sched.maxmcount = 0x7fffffff
- } else {
- sched.maxmcount = int32(in)
- }
- checkmcount()
- unlock(&sched.lock)
- return
-}
-
-//go:nosplit
-func procPin() int {
- _g_ := getg()
- mp := _g_.m
-
- mp.locks++
- return int(mp.p.ptr().id)
-}
-
-//go:nosplit
-func procUnpin() {
- _g_ := getg()
- _g_.m.locks--
-}
-
-//go:linkname sync_runtime_procPin sync.runtime_procPin
-//go:nosplit
-func sync_runtime_procPin() int {
- return procPin()
-}
-
-//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
-//go:nosplit
-func sync_runtime_procUnpin() {
- procUnpin()
-}
-
-//go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
-//go:nosplit
-func sync_atomic_runtime_procPin() int {
- return procPin()
-}
-
-//go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
-//go:nosplit
-func sync_atomic_runtime_procUnpin() {
- procUnpin()
-}
-
-// Active spinning for sync.Mutex.
-//go:linkname sync_runtime_canSpin sync.runtime_canSpin
-//go:nosplit
-func sync_runtime_canSpin(i int) bool {
- // sync.Mutex is cooperative, so we are conservative with spinning.
- // Spin only few times and only if running on a multicore machine and
- // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
- // As opposed to runtime mutex we don't do passive spinning here,
- // because there can be work on global runq or on other Ps.
- if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
- return false
- }
- if p := getg().m.p.ptr(); !runqempty(p) {
- return false
- }
- return true
-}
-
-//go:linkname sync_runtime_doSpin sync.runtime_doSpin
-//go:nosplit
-func sync_runtime_doSpin() {
- procyield(active_spin_cnt)
-}
-
-var stealOrder randomOrder
-
-// randomOrder/randomEnum are helper types for randomized work stealing.
-// They allow to enumerate all Ps in different pseudo-random orders without repetitions.
-// The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
-// are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
-type randomOrder struct {
- count uint32
- coprimes []uint32
-}
-
-type randomEnum struct {
- i uint32
- count uint32
- pos uint32
- inc uint32
-}
-
-func (ord *randomOrder) reset(count uint32) {
- ord.count = count
- ord.coprimes = ord.coprimes[:0]
- for i := uint32(1); i <= count; i++ {
- if gcd(i, count) == 1 {
- ord.coprimes = append(ord.coprimes, i)
- }
- }
-}
-
-func (ord *randomOrder) start(i uint32) randomEnum {
- return randomEnum{
- count: ord.count,
- pos: i % ord.count,
- inc: ord.coprimes[i%uint32(len(ord.coprimes))],
- }
-}
-
-func (enum *randomEnum) done() bool {
- return enum.i == enum.count
-}
-
-func (enum *randomEnum) next() {
- enum.i++
- enum.pos = (enum.pos + enum.inc) % enum.count
-}
-
-func (enum *randomEnum) position() uint32 {
- return enum.pos
-}
-
-func gcd(a, b uint32) uint32 {
- for b != 0 {
- a, b = b, a%b
- }
- return a
-}
-
-// An initTask represents the set of initializations that need to be done for a package.
-// Keep in sync with ../../test/initempty.go:initTask
-type initTask struct {
- // TODO: pack the first 3 fields more tightly?
- state uintptr // 0 = uninitialized, 1 = in progress, 2 = done
- ndeps uintptr
- nfns uintptr
- // followed by ndeps instances of an *initTask, one per package depended on
- // followed by nfns pcs, one per init function to run
-}
-
-// inittrace stores statistics for init functions which are
-// updated by malloc and newproc when active is true.
-var inittrace tracestat
-
-type tracestat struct {
- active bool // init tracing activation status
- id int64 // init goroutine id
- allocs uint64 // heap allocations
- bytes uint64 // heap allocated bytes
-}
-
-func doInit(t *initTask) {
- switch t.state {
- case 2: // fully initialized
- return
- case 1: // initialization in progress
- throw("recursive call during initialization - linker skew")
- default: // not initialized yet
- t.state = 1 // initialization in progress
-
- for i := uintptr(0); i < t.ndeps; i++ {
- p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
- t2 := *(**initTask)(p)
- doInit(t2)
- }
-
- if t.nfns == 0 {
- t.state = 2 // initialization done
- return
- }
-
- var (
- start int64
- before tracestat
- )
-
- if inittrace.active {
- start = nanotime()
- // Load stats non-atomically since tracinit is updated only by this init goroutine.
- before = inittrace
- }
-
- firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
- for i := uintptr(0); i < t.nfns; i++ {
- p := add(firstFunc, i*goarch.PtrSize)
- f := *(*func())(unsafe.Pointer(&p))
- f()
- }
-
- if inittrace.active {
- end := nanotime()
- // Load stats non-atomically since tracinit is updated only by this init goroutine.
- after := inittrace
-
- f := *(*func())(unsafe.Pointer(&firstFunc))
- pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
-
- var sbuf [24]byte
- print("init ", pkg, " @")
- print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
- print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
- print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
- print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
- print("\n")
- }
-
- t.state = 2 // initialization done
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/profbuf.go b/contrib/go/_std_1.18/src/runtime/profbuf.go
deleted file mode 100644
index f40881aed5..0000000000
--- a/contrib/go/_std_1.18/src/runtime/profbuf.go
+++ /dev/null
@@ -1,561 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// A profBuf is a lock-free buffer for profiling events,
-// safe for concurrent use by one reader and one writer.
-// The writer may be a signal handler running without a user g.
-// The reader is assumed to be a user g.
-//
-// Each logged event corresponds to a fixed size header, a list of
-// uintptrs (typically a stack), and exactly one unsafe.Pointer tag.
-// The header and uintptrs are stored in the circular buffer data and the
-// tag is stored in a circular buffer tags, running in parallel.
-// In the circular buffer data, each event takes 2+hdrsize+len(stk)
-// words: the value 2+hdrsize+len(stk), then the time of the event, then
-// hdrsize words giving the fixed-size header, and then len(stk) words
-// for the stack.
-//
-// The current effective offsets into the tags and data circular buffers
-// for reading and writing are stored in the high 30 and low 32 bits of r and w.
-// The bottom bits of the high 32 are additional flag bits in w, unused in r.
-// "Effective" offsets means the total number of reads or writes, mod 2^length.
-// The offset in the buffer is the effective offset mod the length of the buffer.
-// To make wraparound mod 2^length match wraparound mod length of the buffer,
-// the length of the buffer must be a power of two.
-//
-// If the reader catches up to the writer, a flag passed to read controls
-// whether the read blocks until more data is available. A read returns a
-// pointer to the buffer data itself; the caller is assumed to be done with
-// that data at the next read. The read offset rNext tracks the next offset to
-// be returned by read. By definition, r ≤ rNext ≤ w (before wraparound),
-// and rNext is only used by the reader, so it can be accessed without atomics.
-//
-// If the writer gets ahead of the reader, so that the buffer fills,
-// future writes are discarded and replaced in the output stream by an
-// overflow entry, which has size 2+hdrsize+1, time set to the time of
-// the first discarded write, a header of all zeroed words, and a "stack"
-// containing one word, the number of discarded writes.
-//
-// Between the time the buffer fills and the buffer becomes empty enough
-// to hold more data, the overflow entry is stored as a pending overflow
-// entry in the fields overflow and overflowTime. The pending overflow
-// entry can be turned into a real record by either the writer or the
-// reader. If the writer is called to write a new record and finds that
-// the output buffer has room for both the pending overflow entry and the
-// new record, the writer emits the pending overflow entry and the new
-// record into the buffer. If the reader is called to read data and finds
-// that the output buffer is empty but that there is a pending overflow
-// entry, the reader will return a synthesized record for the pending
-// overflow entry.
-//
-// Only the writer can create or add to a pending overflow entry, but
-// either the reader or the writer can clear the pending overflow entry.
-// A pending overflow entry is indicated by the low 32 bits of 'overflow'
-// holding the number of discarded writes, and overflowTime holding the
-// time of the first discarded write. The high 32 bits of 'overflow'
-// increment each time the low 32 bits transition from zero to non-zero
-// or vice versa. This sequence number avoids ABA problems in the use of
-// compare-and-swap to coordinate between reader and writer.
-// The overflowTime is only written when the low 32 bits of overflow are
-// zero, that is, only when there is no pending overflow entry, in
-// preparation for creating a new one. The reader can therefore fetch and
-// clear the entry atomically using
-//
-// for {
-// overflow = load(&b.overflow)
-// if uint32(overflow) == 0 {
-// // no pending entry
-// break
-// }
-// time = load(&b.overflowTime)
-// if cas(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
-// // pending entry cleared
-// break
-// }
-// }
-// if uint32(overflow) > 0 {
-// emit entry for uint32(overflow), time
-// }
-//
-type profBuf struct {
- // accessed atomically
- r, w profAtomic
- overflow uint64
- overflowTime uint64
- eof uint32
-
- // immutable (excluding slice content)
- hdrsize uintptr
- data []uint64
- tags []unsafe.Pointer
-
- // owned by reader
- rNext profIndex
- overflowBuf []uint64 // for use by reader to return overflow record
- wait note
-}
-
-// A profAtomic is the atomically-accessed word holding a profIndex.
-type profAtomic uint64
-
-// A profIndex is the packet tag and data counts and flags bits, described above.
-type profIndex uint64
-
-const (
- profReaderSleeping profIndex = 1 << 32 // reader is sleeping and must be woken up
- profWriteExtra profIndex = 1 << 33 // overflow or eof waiting
-)
-
-func (x *profAtomic) load() profIndex {
- return profIndex(atomic.Load64((*uint64)(x)))
-}
-
-func (x *profAtomic) store(new profIndex) {
- atomic.Store64((*uint64)(x), uint64(new))
-}
-
-func (x *profAtomic) cas(old, new profIndex) bool {
- return atomic.Cas64((*uint64)(x), uint64(old), uint64(new))
-}
-
-func (x profIndex) dataCount() uint32 {
- return uint32(x)
-}
-
-func (x profIndex) tagCount() uint32 {
- return uint32(x >> 34)
-}
-
-// countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount,
-// assuming that they are no more than 2^29 apart (guaranteed since they are never more than
-// len(data) or len(tags) apart, respectively).
-// tagCount wraps at 2^30, while dataCount wraps at 2^32.
-// This function works for both.
-func countSub(x, y uint32) int {
- // x-y is 32-bit signed or 30-bit signed; sign-extend to 32 bits and convert to int.
- return int(int32(x-y) << 2 >> 2)
-}
-
-// addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".
-func (x profIndex) addCountsAndClearFlags(data, tag int) profIndex {
- return profIndex((uint64(x)>>34+uint64(uint32(tag)<<2>>2))<<34 | uint64(uint32(x)+uint32(data)))
-}
-
-// hasOverflow reports whether b has any overflow records pending.
-func (b *profBuf) hasOverflow() bool {
- return uint32(atomic.Load64(&b.overflow)) > 0
-}
-
-// takeOverflow consumes the pending overflow records, returning the overflow count
-// and the time of the first overflow.
-// When called by the reader, it is racing against incrementOverflow.
-func (b *profBuf) takeOverflow() (count uint32, time uint64) {
- overflow := atomic.Load64(&b.overflow)
- time = atomic.Load64(&b.overflowTime)
- for {
- count = uint32(overflow)
- if count == 0 {
- time = 0
- break
- }
- // Increment generation, clear overflow count in low bits.
- if atomic.Cas64(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
- break
- }
- overflow = atomic.Load64(&b.overflow)
- time = atomic.Load64(&b.overflowTime)
- }
- return uint32(overflow), time
-}
-
-// incrementOverflow records a single overflow at time now.
-// It is racing against a possible takeOverflow in the reader.
-func (b *profBuf) incrementOverflow(now int64) {
- for {
- overflow := atomic.Load64(&b.overflow)
-
- // Once we see b.overflow reach 0, it's stable: no one else is changing it underfoot.
- // We need to set overflowTime if we're incrementing b.overflow from 0.
- if uint32(overflow) == 0 {
- // Store overflowTime first so it's always available when overflow != 0.
- atomic.Store64(&b.overflowTime, uint64(now))
- atomic.Store64(&b.overflow, (((overflow>>32)+1)<<32)+1)
- break
- }
- // Otherwise we're racing to increment against reader
- // who wants to set b.overflow to 0.
- // Out of paranoia, leave 2³²-1 a sticky overflow value,
- // to avoid wrapping around. Extremely unlikely.
- if int32(overflow) == -1 {
- break
- }
- if atomic.Cas64(&b.overflow, overflow, overflow+1) {
- break
- }
- }
-}
-
-// newProfBuf returns a new profiling buffer with room for
-// a header of hdrsize words and a buffer of at least bufwords words.
-func newProfBuf(hdrsize, bufwords, tags int) *profBuf {
- if min := 2 + hdrsize + 1; bufwords < min {
- bufwords = min
- }
-
- // Buffer sizes must be power of two, so that we don't have to
- // worry about uint32 wraparound changing the effective position
- // within the buffers. We store 30 bits of count; limiting to 28
- // gives us some room for intermediate calculations.
- if bufwords >= 1<<28 || tags >= 1<<28 {
- throw("newProfBuf: buffer too large")
- }
- var i int
- for i = 1; i < bufwords; i <<= 1 {
- }
- bufwords = i
- for i = 1; i < tags; i <<= 1 {
- }
- tags = i
-
- b := new(profBuf)
- b.hdrsize = uintptr(hdrsize)
- b.data = make([]uint64, bufwords)
- b.tags = make([]unsafe.Pointer, tags)
- b.overflowBuf = make([]uint64, 2+b.hdrsize+1)
- return b
-}
-
-// canWriteRecord reports whether the buffer has room
-// for a single contiguous record with a stack of length nstk.
-func (b *profBuf) canWriteRecord(nstk int) bool {
- br := b.r.load()
- bw := b.w.load()
-
- // room for tag?
- if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 1 {
- return false
- }
-
- // room for data?
- nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
- want := 2 + int(b.hdrsize) + nstk
- i := int(bw.dataCount() % uint32(len(b.data)))
- if i+want > len(b.data) {
- // Can't fit in trailing fragment of slice.
- // Skip over that and start over at beginning of slice.
- nd -= len(b.data) - i
- }
- return nd >= want
-}
-
-// canWriteTwoRecords reports whether the buffer has room
-// for two records with stack lengths nstk1, nstk2, in that order.
-// Each record must be contiguous on its own, but the two
-// records need not be contiguous (one can be at the end of the buffer
-// and the other can wrap around and start at the beginning of the buffer).
-func (b *profBuf) canWriteTwoRecords(nstk1, nstk2 int) bool {
- br := b.r.load()
- bw := b.w.load()
-
- // room for tag?
- if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 2 {
- return false
- }
-
- // room for data?
- nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
-
- // first record
- want := 2 + int(b.hdrsize) + nstk1
- i := int(bw.dataCount() % uint32(len(b.data)))
- if i+want > len(b.data) {
- // Can't fit in trailing fragment of slice.
- // Skip over that and start over at beginning of slice.
- nd -= len(b.data) - i
- i = 0
- }
- i += want
- nd -= want
-
- // second record
- want = 2 + int(b.hdrsize) + nstk2
- if i+want > len(b.data) {
- // Can't fit in trailing fragment of slice.
- // Skip over that and start over at beginning of slice.
- nd -= len(b.data) - i
- i = 0
- }
- return nd >= want
-}
-
-// write writes an entry to the profiling buffer b.
-// The entry begins with a fixed hdr, which must have
-// length b.hdrsize, followed by a variable-sized stack
-// and a single tag pointer *tagPtr (or nil if tagPtr is nil).
-// No write barriers allowed because this might be called from a signal handler.
-func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
- if b == nil {
- return
- }
- if len(hdr) > int(b.hdrsize) {
- throw("misuse of profBuf.write")
- }
-
- if hasOverflow := b.hasOverflow(); hasOverflow && b.canWriteTwoRecords(1, len(stk)) {
- // Room for both an overflow record and the one being written.
- // Write the overflow record if the reader hasn't gotten to it yet.
- // Only racing against reader, not other writers.
- count, time := b.takeOverflow()
- if count > 0 {
- var stk [1]uintptr
- stk[0] = uintptr(count)
- b.write(nil, int64(time), nil, stk[:])
- }
- } else if hasOverflow || !b.canWriteRecord(len(stk)) {
- // Pending overflow without room to write overflow and new records
- // or no overflow but also no room for new record.
- b.incrementOverflow(now)
- b.wakeupExtra()
- return
- }
-
- // There's room: write the record.
- br := b.r.load()
- bw := b.w.load()
-
- // Profiling tag
- //
- // The tag is a pointer, but we can't run a write barrier here.
- // We have interrupted the OS-level execution of gp, but the
- // runtime still sees gp as executing. In effect, we are running
- // in place of the real gp. Since gp is the only goroutine that
- // can overwrite gp.labels, the value of gp.labels is stable during
- // this signal handler: it will still be reachable from gp when
- // we finish executing. If a GC is in progress right now, it must
- // keep gp.labels alive, because gp.labels is reachable from gp.
- // If gp were to overwrite gp.labels, the deletion barrier would
- // still shade that pointer, which would preserve it for the
- // in-progress GC, so all is well. Any future GC will see the
- // value we copied when scanning b.tags (heap-allocated).
- // We arrange that the store here is always overwriting a nil,
- // so there is no need for a deletion barrier on b.tags[wt].
- wt := int(bw.tagCount() % uint32(len(b.tags)))
- if tagPtr != nil {
- *(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(unsafe.Pointer(*tagPtr))
- }
-
- // Main record.
- // It has to fit in a contiguous section of the slice, so if it doesn't fit at the end,
- // leave a rewind marker (0) and start over at the beginning of the slice.
- wd := int(bw.dataCount() % uint32(len(b.data)))
- nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
- skip := 0
- if wd+2+int(b.hdrsize)+len(stk) > len(b.data) {
- b.data[wd] = 0
- skip = len(b.data) - wd
- nd -= skip
- wd = 0
- }
- data := b.data[wd:]
- data[0] = uint64(2 + b.hdrsize + uintptr(len(stk))) // length
- data[1] = uint64(now) // time stamp
- // header, zero-padded
- i := uintptr(copy(data[2:2+b.hdrsize], hdr))
- for ; i < b.hdrsize; i++ {
- data[2+i] = 0
- }
- for i, pc := range stk {
- data[2+b.hdrsize+uintptr(i)] = uint64(pc)
- }
-
- for {
- // Commit write.
- // Racing with reader setting flag bits in b.w, to avoid lost wakeups.
- old := b.w.load()
- new := old.addCountsAndClearFlags(skip+2+len(stk)+int(b.hdrsize), 1)
- if !b.w.cas(old, new) {
- continue
- }
- // If there was a reader, wake it up.
- if old&profReaderSleeping != 0 {
- notewakeup(&b.wait)
- }
- break
- }
-}
-
-// close signals that there will be no more writes on the buffer.
-// Once all the data has been read from the buffer, reads will return eof=true.
-func (b *profBuf) close() {
- if atomic.Load(&b.eof) > 0 {
- throw("runtime: profBuf already closed")
- }
- atomic.Store(&b.eof, 1)
- b.wakeupExtra()
-}
-
-// wakeupExtra must be called after setting one of the "extra"
-// atomic fields b.overflow or b.eof.
-// It records the change in b.w and wakes up the reader if needed.
-func (b *profBuf) wakeupExtra() {
- for {
- old := b.w.load()
- new := old | profWriteExtra
- if !b.w.cas(old, new) {
- continue
- }
- if old&profReaderSleeping != 0 {
- notewakeup(&b.wait)
- }
- break
- }
-}
-
-// profBufReadMode specifies whether to block when no data is available to read.
-type profBufReadMode int
-
-const (
- profBufBlocking profBufReadMode = iota
- profBufNonBlocking
-)
-
-var overflowTag [1]unsafe.Pointer // always nil
-
-func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool) {
- if b == nil {
- return nil, nil, true
- }
-
- br := b.rNext
-
- // Commit previous read, returning that part of the ring to the writer.
- // First clear tags that have now been read, both to avoid holding
- // up the memory they point at for longer than necessary
- // and so that b.write can assume it is always overwriting
- // nil tag entries (see comment in b.write).
- rPrev := b.r.load()
- if rPrev != br {
- ntag := countSub(br.tagCount(), rPrev.tagCount())
- ti := int(rPrev.tagCount() % uint32(len(b.tags)))
- for i := 0; i < ntag; i++ {
- b.tags[ti] = nil
- if ti++; ti == len(b.tags) {
- ti = 0
- }
- }
- b.r.store(br)
- }
-
-Read:
- bw := b.w.load()
- numData := countSub(bw.dataCount(), br.dataCount())
- if numData == 0 {
- if b.hasOverflow() {
- // No data to read, but there is overflow to report.
- // Racing with writer flushing b.overflow into a real record.
- count, time := b.takeOverflow()
- if count == 0 {
- // Lost the race, go around again.
- goto Read
- }
- // Won the race, report overflow.
- dst := b.overflowBuf
- dst[0] = uint64(2 + b.hdrsize + 1)
- dst[1] = uint64(time)
- for i := uintptr(0); i < b.hdrsize; i++ {
- dst[2+i] = 0
- }
- dst[2+b.hdrsize] = uint64(count)
- return dst[:2+b.hdrsize+1], overflowTag[:1], false
- }
- if atomic.Load(&b.eof) > 0 {
- // No data, no overflow, EOF set: done.
- return nil, nil, true
- }
- if bw&profWriteExtra != 0 {
- // Writer claims to have published extra information (overflow or eof).
- // Attempt to clear notification and then check again.
- // If we fail to clear the notification it means b.w changed,
- // so we still need to check again.
- b.w.cas(bw, bw&^profWriteExtra)
- goto Read
- }
-
- // Nothing to read right now.
- // Return or sleep according to mode.
- if mode == profBufNonBlocking {
- return nil, nil, false
- }
- if !b.w.cas(bw, bw|profReaderSleeping) {
- goto Read
- }
- // Committed to sleeping.
- notetsleepg(&b.wait, -1)
- noteclear(&b.wait)
- goto Read
- }
- data = b.data[br.dataCount()%uint32(len(b.data)):]
- if len(data) > numData {
- data = data[:numData]
- } else {
- numData -= len(data) // available in case of wraparound
- }
- skip := 0
- if data[0] == 0 {
- // Wraparound record. Go back to the beginning of the ring.
- skip = len(data)
- data = b.data
- if len(data) > numData {
- data = data[:numData]
- }
- }
-
- ntag := countSub(bw.tagCount(), br.tagCount())
- if ntag == 0 {
- throw("runtime: malformed profBuf buffer - tag and data out of sync")
- }
- tags = b.tags[br.tagCount()%uint32(len(b.tags)):]
- if len(tags) > ntag {
- tags = tags[:ntag]
- }
-
- // Count out whole data records until either data or tags is done.
- // They are always in sync in the buffer, but due to an end-of-slice
- // wraparound we might need to stop early and return the rest
- // in the next call.
- di := 0
- ti := 0
- for di < len(data) && data[di] != 0 && ti < len(tags) {
- if uintptr(di)+uintptr(data[di]) > uintptr(len(data)) {
- throw("runtime: malformed profBuf buffer - invalid size")
- }
- di += int(data[di])
- ti++
- }
-
- // Remember how much we returned, to commit read on next call.
- b.rNext = br.addCountsAndClearFlags(skip+di, ti)
-
- if raceenabled {
- // Match racereleasemerge in runtime_setProfLabel,
- // so that the setting of the labels in runtime_setProfLabel
- // is treated as happening before any use of the labels
- // by our caller. The synchronization on labelSync itself is a fiction
- // for the race detector. The actual synchronization is handled
- // by the fact that the signal handler only reads from the current
- // goroutine and uses atomics to write the updated queue indices,
- // and then the read-out from the signal handler buffer uses
- // atomics to read those queue indices.
- raceacquire(unsafe.Pointer(&labelSync))
- }
-
- return data[:di], tags[:ti], false
-}
diff --git a/contrib/go/_std_1.18/src/runtime/runtime.go b/contrib/go/_std_1.18/src/runtime/runtime.go
deleted file mode 100644
index 33ecc260dd..0000000000
--- a/contrib/go/_std_1.18/src/runtime/runtime.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- _ "unsafe" // for go:linkname
-)
-
-//go:generate go run wincallback.go
-//go:generate go run mkduff.go
-//go:generate go run mkfastlog2table.go
-
-var ticks struct {
- lock mutex
- pad uint32 // ensure 8-byte alignment of val on 386
- val uint64
-}
-
-// Note: Called by runtime/pprof in addition to runtime code.
-func tickspersecond() int64 {
- r := int64(atomic.Load64(&ticks.val))
- if r != 0 {
- return r
- }
- lock(&ticks.lock)
- r = int64(ticks.val)
- if r == 0 {
- t0 := nanotime()
- c0 := cputicks()
- usleep(100 * 1000)
- t1 := nanotime()
- c1 := cputicks()
- if t1 == t0 {
- t1++
- }
- r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0)
- if r == 0 {
- r++
- }
- atomic.Store64(&ticks.val, uint64(r))
- }
- unlock(&ticks.lock)
- return r
-}
-
-var envs []string
-var argslice []string
-
-//go:linkname syscall_runtime_envs syscall.runtime_envs
-func syscall_runtime_envs() []string { return append([]string{}, envs...) }
-
-//go:linkname syscall_Getpagesize syscall.Getpagesize
-func syscall_Getpagesize() int { return int(physPageSize) }
-
-//go:linkname os_runtime_args os.runtime_args
-func os_runtime_args() []string { return append([]string{}, argslice...) }
-
-//go:linkname syscall_Exit syscall.Exit
-//go:nosplit
-func syscall_Exit(code int) {
- exit(int32(code))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/runtime1.go b/contrib/go/_std_1.18/src/runtime/runtime1.go
deleted file mode 100644
index 65e1e0eebc..0000000000
--- a/contrib/go/_std_1.18/src/runtime/runtime1.go
+++ /dev/null
@@ -1,544 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/bytealg"
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// Keep a cached value to make gotraceback fast,
-// since we call it on every call to gentraceback.
-// The cached value is a uint32 in which the low bits
-// are the "crash" and "all" settings and the remaining
-// bits are the traceback value (0 off, 1 on, 2 include system).
-const (
- tracebackCrash = 1 << iota
- tracebackAll
- tracebackShift = iota
-)
-
-var traceback_cache uint32 = 2 << tracebackShift
-var traceback_env uint32
-
-// gotraceback returns the current traceback settings.
-//
-// If level is 0, suppress all tracebacks.
-// If level is 1, show tracebacks, but exclude runtime frames.
-// If level is 2, show tracebacks including runtime frames.
-// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
-// If crash is set, crash (core dump, etc) after tracebacking.
-//
-//go:nosplit
-func gotraceback() (level int32, all, crash bool) {
- _g_ := getg()
- t := atomic.Load(&traceback_cache)
- crash = t&tracebackCrash != 0
- all = _g_.m.throwing > 0 || t&tracebackAll != 0
- if _g_.m.traceback != 0 {
- level = int32(_g_.m.traceback)
- } else {
- level = int32(t >> tracebackShift)
- }
- return
-}
-
-var (
- argc int32
- argv **byte
-)
-
-// nosplit for use in linux startup sysargs
-//go:nosplit
-func argv_index(argv **byte, i int32) *byte {
- return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
-}
-
-func args(c int32, v **byte) {
- argc = c
- argv = v
- sysargs(c, v)
-}
-
-func goargs() {
- if GOOS == "windows" {
- return
- }
- argslice = make([]string, argc)
- for i := int32(0); i < argc; i++ {
- argslice[i] = gostringnocopy(argv_index(argv, i))
- }
-}
-
-func goenvs_unix() {
- // TODO(austin): ppc64 in dynamic linking mode doesn't
- // guarantee env[] will immediately follow argv. Might cause
- // problems.
- n := int32(0)
- for argv_index(argv, argc+1+n) != nil {
- n++
- }
-
- envs = make([]string, n)
- for i := int32(0); i < n; i++ {
- envs[i] = gostring(argv_index(argv, argc+1+i))
- }
-}
-
-func environ() []string {
- return envs
-}
-
-// TODO: These should be locals in testAtomic64, but we don't 8-byte
-// align stack variables on 386.
-var test_z64, test_x64 uint64
-
-func testAtomic64() {
- test_z64 = 42
- test_x64 = 0
- if atomic.Cas64(&test_z64, test_x64, 1) {
- throw("cas64 failed")
- }
- if test_x64 != 0 {
- throw("cas64 failed")
- }
- test_x64 = 42
- if !atomic.Cas64(&test_z64, test_x64, 1) {
- throw("cas64 failed")
- }
- if test_x64 != 42 || test_z64 != 1 {
- throw("cas64 failed")
- }
- if atomic.Load64(&test_z64) != 1 {
- throw("load64 failed")
- }
- atomic.Store64(&test_z64, (1<<40)+1)
- if atomic.Load64(&test_z64) != (1<<40)+1 {
- throw("store64 failed")
- }
- if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
- throw("xadd64 failed")
- }
- if atomic.Load64(&test_z64) != (2<<40)+2 {
- throw("xadd64 failed")
- }
- if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
- throw("xchg64 failed")
- }
- if atomic.Load64(&test_z64) != (3<<40)+3 {
- throw("xchg64 failed")
- }
-}
-
-func check() {
- var (
- a int8
- b uint8
- c int16
- d uint16
- e int32
- f uint32
- g int64
- h uint64
- i, i1 float32
- j, j1 float64
- k unsafe.Pointer
- l *uint16
- m [4]byte
- )
- type x1t struct {
- x uint8
- }
- type y1t struct {
- x1 x1t
- y uint8
- }
- var x1 x1t
- var y1 y1t
-
- if unsafe.Sizeof(a) != 1 {
- throw("bad a")
- }
- if unsafe.Sizeof(b) != 1 {
- throw("bad b")
- }
- if unsafe.Sizeof(c) != 2 {
- throw("bad c")
- }
- if unsafe.Sizeof(d) != 2 {
- throw("bad d")
- }
- if unsafe.Sizeof(e) != 4 {
- throw("bad e")
- }
- if unsafe.Sizeof(f) != 4 {
- throw("bad f")
- }
- if unsafe.Sizeof(g) != 8 {
- throw("bad g")
- }
- if unsafe.Sizeof(h) != 8 {
- throw("bad h")
- }
- if unsafe.Sizeof(i) != 4 {
- throw("bad i")
- }
- if unsafe.Sizeof(j) != 8 {
- throw("bad j")
- }
- if unsafe.Sizeof(k) != goarch.PtrSize {
- throw("bad k")
- }
- if unsafe.Sizeof(l) != goarch.PtrSize {
- throw("bad l")
- }
- if unsafe.Sizeof(x1) != 1 {
- throw("bad unsafe.Sizeof x1")
- }
- if unsafe.Offsetof(y1.y) != 1 {
- throw("bad offsetof y1.y")
- }
- if unsafe.Sizeof(y1) != 2 {
- throw("bad unsafe.Sizeof y1")
- }
-
- if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
- throw("bad timediv")
- }
-
- var z uint32
- z = 1
- if !atomic.Cas(&z, 1, 2) {
- throw("cas1")
- }
- if z != 2 {
- throw("cas2")
- }
-
- z = 4
- if atomic.Cas(&z, 5, 6) {
- throw("cas3")
- }
- if z != 4 {
- throw("cas4")
- }
-
- z = 0xffffffff
- if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
- throw("cas5")
- }
- if z != 0xfffffffe {
- throw("cas6")
- }
-
- m = [4]byte{1, 1, 1, 1}
- atomic.Or8(&m[1], 0xf0)
- if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
- throw("atomicor8")
- }
-
- m = [4]byte{0xff, 0xff, 0xff, 0xff}
- atomic.And8(&m[1], 0x1)
- if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
- throw("atomicand8")
- }
-
- *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
- if j == j {
- throw("float64nan")
- }
- if !(j != j) {
- throw("float64nan1")
- }
-
- *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
- if j == j1 {
- throw("float64nan2")
- }
- if !(j != j1) {
- throw("float64nan3")
- }
-
- *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
- if i == i {
- throw("float32nan")
- }
- if i == i {
- throw("float32nan1")
- }
-
- *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
- if i == i1 {
- throw("float32nan2")
- }
- if i == i1 {
- throw("float32nan3")
- }
-
- testAtomic64()
-
- if _FixedStack != round2(_FixedStack) {
- throw("FixedStack is not power-of-2")
- }
-
- if !checkASM() {
- throw("assembly checks failed")
- }
-}
-
-type dbgVar struct {
- name string
- value *int32
-}
-
-// Holds variables parsed from GODEBUG env var,
-// except for "memprofilerate" since there is an
-// existing int var for that value, which may
-// already have an initial value.
-var debug struct {
- cgocheck int32
- clobberfree int32
- efence int32
- gccheckmark int32
- gcpacertrace int32
- gcshrinkstackoff int32
- gcstoptheworld int32
- gctrace int32
- invalidptr int32
- madvdontneed int32 // for Linux; issue 28466
- scavtrace int32
- scheddetail int32
- schedtrace int32
- tracebackancestors int32
- asyncpreemptoff int32
- harddecommit int32
-
- // debug.malloc is used as a combined debug check
- // in the malloc function and should be set
- // if any of the below debug options is != 0.
- malloc bool
- allocfreetrace int32
- inittrace int32
- sbrk int32
-}
-
-var dbgvars = []dbgVar{
- {"allocfreetrace", &debug.allocfreetrace},
- {"clobberfree", &debug.clobberfree},
- {"cgocheck", &debug.cgocheck},
- {"efence", &debug.efence},
- {"gccheckmark", &debug.gccheckmark},
- {"gcpacertrace", &debug.gcpacertrace},
- {"gcshrinkstackoff", &debug.gcshrinkstackoff},
- {"gcstoptheworld", &debug.gcstoptheworld},
- {"gctrace", &debug.gctrace},
- {"invalidptr", &debug.invalidptr},
- {"madvdontneed", &debug.madvdontneed},
- {"sbrk", &debug.sbrk},
- {"scavtrace", &debug.scavtrace},
- {"scheddetail", &debug.scheddetail},
- {"schedtrace", &debug.schedtrace},
- {"tracebackancestors", &debug.tracebackancestors},
- {"asyncpreemptoff", &debug.asyncpreemptoff},
- {"inittrace", &debug.inittrace},
- {"harddecommit", &debug.harddecommit},
-}
-
-func parsedebugvars() {
- // defaults
- debug.cgocheck = 1
- debug.invalidptr = 1
- if GOOS == "linux" {
- // On Linux, MADV_FREE is faster than MADV_DONTNEED,
- // but doesn't affect many of the statistics that
- // MADV_DONTNEED does until the memory is actually
- // reclaimed. This generally leads to poor user
- // experience, like confusing stats in top and other
- // monitoring tools; and bad integration with
- // management systems that respond to memory usage.
- // Hence, default to MADV_DONTNEED.
- debug.madvdontneed = 1
- }
-
- for p := gogetenv("GODEBUG"); p != ""; {
- field := ""
- i := bytealg.IndexByteString(p, ',')
- if i < 0 {
- field, p = p, ""
- } else {
- field, p = p[:i], p[i+1:]
- }
- i = bytealg.IndexByteString(field, '=')
- if i < 0 {
- continue
- }
- key, value := field[:i], field[i+1:]
-
- // Update MemProfileRate directly here since it
- // is int, not int32, and should only be updated
- // if specified in GODEBUG.
- if key == "memprofilerate" {
- if n, ok := atoi(value); ok {
- MemProfileRate = n
- }
- } else {
- for _, v := range dbgvars {
- if v.name == key {
- if n, ok := atoi32(value); ok {
- *v.value = n
- }
- }
- }
- }
- }
-
- debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
-
- setTraceback(gogetenv("GOTRACEBACK"))
- traceback_env = traceback_cache
-}
-
-//go:linkname setTraceback runtime/debug.SetTraceback
-func setTraceback(level string) {
- var t uint32
- switch level {
- case "none":
- t = 0
- case "single", "":
- t = 1 << tracebackShift
- case "all":
- t = 1<<tracebackShift | tracebackAll
- case "system":
- t = 2<<tracebackShift | tracebackAll
- case "crash":
- t = 2<<tracebackShift | tracebackAll | tracebackCrash
- default:
- t = tracebackAll
- if n, ok := atoi(level); ok && n == int(uint32(n)) {
- t |= uint32(n) << tracebackShift
- }
- }
- // when C owns the process, simply exit'ing the process on fatal errors
- // and panics is surprising. Be louder and abort instead.
- if islibrary || isarchive {
- t |= tracebackCrash
- }
-
- t |= traceback_env
-
- atomic.Store(&traceback_cache, t)
-}
-
-// Poor mans 64-bit division.
-// This is a very special function, do not use it if you are not sure what you are doing.
-// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
-// Handles overflow in a time-specific manner.
-// This keeps us within no-split stack limits on 32-bit processors.
-//go:nosplit
-func timediv(v int64, div int32, rem *int32) int32 {
- res := int32(0)
- for bit := 30; bit >= 0; bit-- {
- if v >= int64(div)<<uint(bit) {
- v = v - (int64(div) << uint(bit))
- // Before this for loop, res was 0, thus all these
- // power of 2 increments are now just bitsets.
- res |= 1 << uint(bit)
- }
- }
- if v >= int64(div) {
- if rem != nil {
- *rem = 0
- }
- return 0x7fffffff
- }
- if rem != nil {
- *rem = int32(v)
- }
- return res
-}
-
-// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
-
-//go:nosplit
-func acquirem() *m {
- _g_ := getg()
- _g_.m.locks++
- return _g_.m
-}
-
-//go:nosplit
-func releasem(mp *m) {
- _g_ := getg()
- mp.locks--
- if mp.locks == 0 && _g_.preempt {
- // restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
- }
-}
-
-//go:linkname reflect_typelinks reflect.typelinks
-func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
- modules := activeModules()
- sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
- ret := [][]int32{modules[0].typelinks}
- for _, md := range modules[1:] {
- sections = append(sections, unsafe.Pointer(md.types))
- ret = append(ret, md.typelinks)
- }
- return sections, ret
-}
-
-// reflect_resolveNameOff resolves a name offset from a base pointer.
-//go:linkname reflect_resolveNameOff reflect.resolveNameOff
-func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
- return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
-}
-
-// reflect_resolveTypeOff resolves an *rtype offset from a base type.
-//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
-func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
- return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
-}
-
-// reflect_resolveTextOff resolves a function pointer offset from a base type.
-//go:linkname reflect_resolveTextOff reflect.resolveTextOff
-func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
- return (*_type)(rtype).textOff(textOff(off))
-
-}
-
-// reflectlite_resolveNameOff resolves a name offset from a base pointer.
-//go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
-func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
- return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
-}
-
-// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
-//go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
-func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
- return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
-}
-
-// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
-//go:linkname reflect_addReflectOff reflect.addReflectOff
-func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
- reflectOffsLock()
- if reflectOffs.m == nil {
- reflectOffs.m = make(map[int32]unsafe.Pointer)
- reflectOffs.minv = make(map[unsafe.Pointer]int32)
- reflectOffs.next = -1
- }
- id, found := reflectOffs.minv[ptr]
- if !found {
- id = reflectOffs.next
- reflectOffs.next-- // use negative offsets as IDs to aid debugging
- reflectOffs.m[id] = ptr
- reflectOffs.minv[ptr] = id
- }
- reflectOffsUnlock()
- return id
-}
diff --git a/contrib/go/_std_1.18/src/runtime/runtime2.go b/contrib/go/_std_1.18/src/runtime/runtime2.go
deleted file mode 100644
index b40045e4a5..0000000000
--- a/contrib/go/_std_1.18/src/runtime/runtime2.go
+++ /dev/null
@@ -1,1134 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// defined constants
-const (
- // G status
- //
- // Beyond indicating the general state of a G, the G status
- // acts like a lock on the goroutine's stack (and hence its
- // ability to execute user code).
- //
- // If you add to this list, add to the list
- // of "okay during garbage collection" status
- // in mgcmark.go too.
- //
- // TODO(austin): The _Gscan bit could be much lighter-weight.
- // For example, we could choose not to run _Gscanrunnable
- // goroutines found in the run queue, rather than CAS-looping
- // until they become _Grunnable. And transitions like
- // _Gscanwaiting -> _Gscanrunnable are actually okay because
- // they don't affect stack ownership.
-
- // _Gidle means this goroutine was just allocated and has not
- // yet been initialized.
- _Gidle = iota // 0
-
- // _Grunnable means this goroutine is on a run queue. It is
- // not currently executing user code. The stack is not owned.
- _Grunnable // 1
-
- // _Grunning means this goroutine may execute user code. The
- // stack is owned by this goroutine. It is not on a run queue.
- // It is assigned an M and a P (g.m and g.m.p are valid).
- _Grunning // 2
-
- // _Gsyscall means this goroutine is executing a system call.
- // It is not executing user code. The stack is owned by this
- // goroutine. It is not on a run queue. It is assigned an M.
- _Gsyscall // 3
-
- // _Gwaiting means this goroutine is blocked in the runtime.
- // It is not executing user code. It is not on a run queue,
- // but should be recorded somewhere (e.g., a channel wait
- // queue) so it can be ready()d when necessary. The stack is
- // not owned *except* that a channel operation may read or
- // write parts of the stack under the appropriate channel
- // lock. Otherwise, it is not safe to access the stack after a
- // goroutine enters _Gwaiting (e.g., it may get moved).
- _Gwaiting // 4
-
- // _Gmoribund_unused is currently unused, but hardcoded in gdb
- // scripts.
- _Gmoribund_unused // 5
-
- // _Gdead means this goroutine is currently unused. It may be
- // just exited, on a free list, or just being initialized. It
- // is not executing user code. It may or may not have a stack
- // allocated. The G and its stack (if any) are owned by the M
- // that is exiting the G or that obtained the G from the free
- // list.
- _Gdead // 6
-
- // _Genqueue_unused is currently unused.
- _Genqueue_unused // 7
-
- // _Gcopystack means this goroutine's stack is being moved. It
- // is not executing user code and is not on a run queue. The
- // stack is owned by the goroutine that put it in _Gcopystack.
- _Gcopystack // 8
-
- // _Gpreempted means this goroutine stopped itself for a
- // suspendG preemption. It is like _Gwaiting, but nothing is
- // yet responsible for ready()ing it. Some suspendG must CAS
- // the status to _Gwaiting to take responsibility for
- // ready()ing this G.
- _Gpreempted // 9
-
- // _Gscan combined with one of the above states other than
- // _Grunning indicates that GC is scanning the stack. The
- // goroutine is not executing user code and the stack is owned
- // by the goroutine that set the _Gscan bit.
- //
- // _Gscanrunning is different: it is used to briefly block
- // state transitions while GC signals the G to scan its own
- // stack. This is otherwise like _Grunning.
- //
- // atomicstatus&~Gscan gives the state the goroutine will
- // return to when the scan completes.
- _Gscan = 0x1000
- _Gscanrunnable = _Gscan + _Grunnable // 0x1001
- _Gscanrunning = _Gscan + _Grunning // 0x1002
- _Gscansyscall = _Gscan + _Gsyscall // 0x1003
- _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
- _Gscanpreempted = _Gscan + _Gpreempted // 0x1009
-)
-
-const (
- // P status
-
- // _Pidle means a P is not being used to run user code or the
- // scheduler. Typically, it's on the idle P list and available
- // to the scheduler, but it may just be transitioning between
- // other states.
- //
- // The P is owned by the idle list or by whatever is
- // transitioning its state. Its run queue is empty.
- _Pidle = iota
-
- // _Prunning means a P is owned by an M and is being used to
- // run user code or the scheduler. Only the M that owns this P
- // is allowed to change the P's status from _Prunning. The M
- // may transition the P to _Pidle (if it has no more work to
- // do), _Psyscall (when entering a syscall), or _Pgcstop (to
- // halt for the GC). The M may also hand ownership of the P
- // off directly to another M (e.g., to schedule a locked G).
- _Prunning
-
- // _Psyscall means a P is not running user code. It has
- // affinity to an M in a syscall but is not owned by it and
- // may be stolen by another M. This is similar to _Pidle but
- // uses lightweight transitions and maintains M affinity.
- //
- // Leaving _Psyscall must be done with a CAS, either to steal
- // or retake the P. Note that there's an ABA hazard: even if
- // an M successfully CASes its original P back to _Prunning
- // after a syscall, it must understand the P may have been
- // used by another M in the interim.
- _Psyscall
-
- // _Pgcstop means a P is halted for STW and owned by the M
- // that stopped the world. The M that stopped the world
- // continues to use its P, even in _Pgcstop. Transitioning
- // from _Prunning to _Pgcstop causes an M to release its P and
- // park.
- //
- // The P retains its run queue and startTheWorld will restart
- // the scheduler on Ps with non-empty run queues.
- _Pgcstop
-
- // _Pdead means a P is no longer used (GOMAXPROCS shrank). We
- // reuse Ps if GOMAXPROCS increases. A dead P is mostly
- // stripped of its resources, though a few things remain
- // (e.g., trace buffers).
- _Pdead
-)
-
-// Mutual exclusion locks. In the uncontended case,
-// as fast as spin locks (just a few user-level instructions),
-// but on the contention path they sleep in the kernel.
-// A zeroed Mutex is unlocked (no need to initialize each lock).
-// Initialization is helpful for static lock ranking, but not required.
-type mutex struct {
- // Empty struct if lock ranking is disabled, otherwise includes the lock rank
- lockRankStruct
- // Futex-based impl treats it as uint32 key,
- // while sema-based impl as M* waitm.
- // Used to be a union, but unions break precise GC.
- key uintptr
-}
-
-// sleep and wakeup on one-time events.
-// before any calls to notesleep or notewakeup,
-// must call noteclear to initialize the Note.
-// then, exactly one thread can call notesleep
-// and exactly one thread can call notewakeup (once).
-// once notewakeup has been called, the notesleep
-// will return. future notesleep will return immediately.
-// subsequent noteclear must be called only after
-// previous notesleep has returned, e.g. it's disallowed
-// to call noteclear straight after notewakeup.
-//
-// notetsleep is like notesleep but wakes up after
-// a given number of nanoseconds even if the event
-// has not yet happened. if a goroutine uses notetsleep to
-// wake up early, it must wait to call noteclear until it
-// can be sure that no other goroutine is calling
-// notewakeup.
-//
-// notesleep/notetsleep are generally called on g0,
-// notetsleepg is similar to notetsleep but is called on user g.
-type note struct {
- // Futex-based impl treats it as uint32 key,
- // while sema-based impl as M* waitm.
- // Used to be a union, but unions break precise GC.
- key uintptr
-}
-
-type funcval struct {
- fn uintptr
- // variable-size, fn-specific data here
-}
-
-type iface struct {
- tab *itab
- data unsafe.Pointer
-}
-
-type eface struct {
- _type *_type
- data unsafe.Pointer
-}
-
-func efaceOf(ep *any) *eface {
- return (*eface)(unsafe.Pointer(ep))
-}
-
-// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
-// It is particularly important to avoid write barriers when the current P has
-// been released, because the GC thinks the world is stopped, and an
-// unexpected write barrier would not be synchronized with the GC,
-// which can lead to a half-executed write barrier that has marked the object
-// but not queued it. If the GC skips the object and completes before the
-// queuing can occur, it will incorrectly free the object.
-//
-// We tried using special assignment functions invoked only when not
-// holding a running P, but then some updates to a particular memory
-// word went through write barriers and some did not. This breaks the
-// write barrier shadow checking mode, and it is also scary: better to have
-// a word that is completely ignored by the GC than to have one for which
-// only a few updates are ignored.
-//
-// Gs and Ps are always reachable via true pointers in the
-// allgs and allp lists or (during allocation before they reach those lists)
-// from stack variables.
-//
-// Ms are always reachable via true pointers either from allm or
-// freem. Unlike Gs and Ps we do free Ms, so it's important that
-// nothing ever hold an muintptr across a safe point.
-
-// A guintptr holds a goroutine pointer, but typed as a uintptr
-// to bypass write barriers. It is used in the Gobuf goroutine state
-// and in scheduling lists that are manipulated without a P.
-//
-// The Gobuf.g goroutine pointer is almost always updated by assembly code.
-// In one of the few places it is updated by Go code - func save - it must be
-// treated as a uintptr to avoid a write barrier being emitted at a bad time.
-// Instead of figuring out how to emit the write barriers missing in the
-// assembly manipulation, we change the type of the field to uintptr,
-// so that it does not require write barriers at all.
-//
-// Goroutine structs are published in the allg list and never freed.
-// That will keep the goroutine structs from being collected.
-// There is never a time that Gobuf.g's contain the only references
-// to a goroutine: the publishing of the goroutine in allg comes first.
-// Goroutine pointers are also kept in non-GC-visible places like TLS,
-// so I can't see them ever moving. If we did want to start moving data
-// in the GC, we'd need to allocate the goroutine structs from an
-// alternate arena. Using guintptr doesn't make that problem any worse.
-// Note that pollDesc.rg, pollDesc.wg also store g in uintptr form,
-// so they would need to be updated too if g's start moving.
-type guintptr uintptr
-
-//go:nosplit
-func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
-
-//go:nosplit
-func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
-
-//go:nosplit
-func (gp *guintptr) cas(old, new guintptr) bool {
- return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
-}
-
-// setGNoWB performs *gp = new without a write barrier.
-// For times when it's impractical to use a guintptr.
-//go:nosplit
-//go:nowritebarrier
-func setGNoWB(gp **g, new *g) {
- (*guintptr)(unsafe.Pointer(gp)).set(new)
-}
-
-type puintptr uintptr
-
-//go:nosplit
-func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
-
-//go:nosplit
-func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
-
-// muintptr is a *m that is not tracked by the garbage collector.
-//
-// Because we do free Ms, there are some additional constrains on
-// muintptrs:
-//
-// 1. Never hold an muintptr locally across a safe point.
-//
-// 2. Any muintptr in the heap must be owned by the M itself so it can
-// ensure it is not in use when the last true *m is released.
-type muintptr uintptr
-
-//go:nosplit
-func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
-
-//go:nosplit
-func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
-
-// setMNoWB performs *mp = new without a write barrier.
-// For times when it's impractical to use an muintptr.
-//go:nosplit
-//go:nowritebarrier
-func setMNoWB(mp **m, new *m) {
- (*muintptr)(unsafe.Pointer(mp)).set(new)
-}
-
-type gobuf struct {
- // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
- //
- // ctxt is unusual with respect to GC: it may be a
- // heap-allocated funcval, so GC needs to track it, but it
- // needs to be set and cleared from assembly, where it's
- // difficult to have write barriers. However, ctxt is really a
- // saved, live register, and we only ever exchange it between
- // the real register and the gobuf. Hence, we treat it as a
- // root during stack scanning, which means assembly that saves
- // and restores it doesn't need write barriers. It's still
- // typed as a pointer so that any other writes from Go get
- // write barriers.
- sp uintptr
- pc uintptr
- g guintptr
- ctxt unsafe.Pointer
- ret uintptr
- lr uintptr
- bp uintptr // for framepointer-enabled architectures
-}
-
-// sudog represents a g in a wait list, such as for sending/receiving
-// on a channel.
-//
-// sudog is necessary because the g ↔ synchronization object relation
-// is many-to-many. A g can be on many wait lists, so there may be
-// many sudogs for one g; and many gs may be waiting on the same
-// synchronization object, so there may be many sudogs for one object.
-//
-// sudogs are allocated from a special pool. Use acquireSudog and
-// releaseSudog to allocate and free them.
-type sudog struct {
- // The following fields are protected by the hchan.lock of the
- // channel this sudog is blocking on. shrinkstack depends on
- // this for sudogs involved in channel ops.
-
- g *g
-
- next *sudog
- prev *sudog
- elem unsafe.Pointer // data element (may point to stack)
-
- // The following fields are never accessed concurrently.
- // For channels, waitlink is only accessed by g.
- // For semaphores, all fields (including the ones above)
- // are only accessed when holding a semaRoot lock.
-
- acquiretime int64
- releasetime int64
- ticket uint32
-
- // isSelect indicates g is participating in a select, so
- // g.selectDone must be CAS'd to win the wake-up race.
- isSelect bool
-
- // success indicates whether communication over channel c
- // succeeded. It is true if the goroutine was awoken because a
- // value was delivered over channel c, and false if awoken
- // because c was closed.
- success bool
-
- parent *sudog // semaRoot binary tree
- waitlink *sudog // g.waiting list or semaRoot
- waittail *sudog // semaRoot
- c *hchan // channel
-}
-
-type libcall struct {
- fn uintptr
- n uintptr // number of parameters
- args uintptr // parameters
- r1 uintptr // return values
- r2 uintptr
- err uintptr // error number
-}
-
-// Stack describes a Go execution stack.
-// The bounds of the stack are exactly [lo, hi),
-// with no implicit data structures on either side.
-type stack struct {
- lo uintptr
- hi uintptr
-}
-
-// heldLockInfo gives info on a held lock and the rank of that lock
-type heldLockInfo struct {
- lockAddr uintptr
- rank lockRank
-}
-
-type g struct {
- // Stack parameters.
- // stack describes the actual stack memory: [stack.lo, stack.hi).
- // stackguard0 is the stack pointer compared in the Go stack growth prologue.
- // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
- // stackguard1 is the stack pointer compared in the C stack growth prologue.
- // It is stack.lo+StackGuard on g0 and gsignal stacks.
- // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
- stack stack // offset known to runtime/cgo
- stackguard0 uintptr // offset known to liblink
- stackguard1 uintptr // offset known to liblink
-
- _panic *_panic // innermost panic - offset known to liblink
- _defer *_defer // innermost defer
- m *m // current m; offset known to arm liblink
- sched gobuf
- syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
- syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
- stktopsp uintptr // expected sp at top of stack, to check in traceback
- // param is a generic pointer parameter field used to pass
- // values in particular contexts where other storage for the
- // parameter would be difficult to find. It is currently used
- // in three ways:
- // 1. When a channel operation wakes up a blocked goroutine, it sets param to
- // point to the sudog of the completed blocking operation.
- // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
- // the GC cycle. It is unsafe to do so in any other way, because the goroutine's
- // stack may have moved in the meantime.
- // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
- // closure in the runtime is forbidden.
- param unsafe.Pointer
- atomicstatus uint32
- stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
- goid int64
- schedlink guintptr
- waitsince int64 // approx time when the g become blocked
- waitreason waitReason // if status==Gwaiting
-
- preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
- preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
- preemptShrink bool // shrink stack at synchronous safe point
-
- // asyncSafePoint is set if g is stopped at an asynchronous
- // safe point. This means there are frames on the stack
- // without precise pointer information.
- asyncSafePoint bool
-
- paniconfault bool // panic (instead of crash) on unexpected fault address
- gcscandone bool // g has scanned stack; protected by _Gscan bit in status
- throwsplit bool // must not split stack
- // activeStackChans indicates that there are unlocked channels
- // pointing into this goroutine's stack. If true, stack
- // copying needs to acquire channel locks to protect these
- // areas of the stack.
- activeStackChans bool
- // parkingOnChan indicates that the goroutine is about to
- // park on a chansend or chanrecv. Used to signal an unsafe point
- // for stack shrinking. It's a boolean value, but is updated atomically.
- parkingOnChan uint8
-
- raceignore int8 // ignore race detection events
- sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
- tracking bool // whether we're tracking this G for sched latency statistics
- trackingSeq uint8 // used to decide whether to track this G
- runnableStamp int64 // timestamp of when the G last became runnable, only used when tracking
- runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
- sysexitticks int64 // cputicks when syscall has returned (for tracing)
- traceseq uint64 // trace event sequencer
- tracelastp puintptr // last P emitted an event for this goroutine
- lockedm muintptr
- sig uint32
- writebuf []byte
- sigcode0 uintptr
- sigcode1 uintptr
- sigpc uintptr
- gopc uintptr // pc of go statement that created this goroutine
- ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
- startpc uintptr // pc of goroutine function
- racectx uintptr
- waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
- cgoCtxt []uintptr // cgo traceback context
- labels unsafe.Pointer // profiler labels
- timer *timer // cached timer for time.Sleep
- selectDone uint32 // are we participating in a select and did someone win the race?
-
- // Per-G GC state
-
- // gcAssistBytes is this G's GC assist credit in terms of
- // bytes allocated. If this is positive, then the G has credit
- // to allocate gcAssistBytes bytes without assisting. If this
- // is negative, then the G must correct this by performing
- // scan work. We track this in bytes to make it fast to update
- // and check for debt in the malloc hot path. The assist ratio
- // determines how this corresponds to scan work debt.
- gcAssistBytes int64
-}
-
-// gTrackingPeriod is the number of transitions out of _Grunning between
-// latency tracking runs.
-const gTrackingPeriod = 8
-
-const (
- // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
- // like Windows.
- tlsSlots = 6
- tlsSize = tlsSlots * goarch.PtrSize
-)
-
-type m struct {
- g0 *g // goroutine with scheduling stack
- morebuf gobuf // gobuf arg to morestack
- divmod uint32 // div/mod denominator for arm - known to liblink
- _ uint32 // align next field to 8 bytes
-
- // Fields not known to debuggers.
- procid uint64 // for debuggers, but offset not hard-coded
- gsignal *g // signal-handling g
- goSigStack gsignalStack // Go-allocated signal handling stack
- sigmask sigset // storage for saved signal mask
- tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
- mstartfn func()
- curg *g // current running goroutine
- caughtsig guintptr // goroutine running during fatal signal
- p puintptr // attached p for executing go code (nil if not executing go code)
- nextp puintptr
- oldp puintptr // the p that was attached before executing a syscall
- id int64
- mallocing int32
- throwing int32
- preemptoff string // if != "", keep curg running on this m
- locks int32
- dying int32
- profilehz int32
- spinning bool // m is out of work and is actively looking for work
- blocked bool // m is blocked on a note
- newSigstack bool // minit on C thread called sigaltstack
- printlock int8
- incgo bool // m is executing a cgo call
- freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
- fastrand uint64
- needextram bool
- traceback uint8
- ncgocall uint64 // number of cgo calls in total
- ncgo int32 // number of cgo calls currently in progress
- cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
- cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
- park note
- alllink *m // on allm
- schedlink muintptr
- lockedg guintptr
- createstack [32]uintptr // stack that created this thread.
- lockedExt uint32 // tracking for external LockOSThread
- lockedInt uint32 // tracking for internal lockOSThread
- nextwaitm muintptr // next m waiting for lock
- waitunlockf func(*g, unsafe.Pointer) bool
- waitlock unsafe.Pointer
- waittraceev byte
- waittraceskip int
- startingtrace bool
- syscalltick uint32
- freelink *m // on sched.freem
-
- // these are here because they are too large to be on the stack
- // of low-level NOSPLIT functions.
- libcall libcall
- libcallpc uintptr // for cpu profiler
- libcallsp uintptr
- libcallg guintptr
- syscall libcall // stores syscall parameters on windows
-
- vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
- vdsoPC uintptr // PC for traceback while in VDSO call
-
- // preemptGen counts the number of completed preemption
- // signals. This is used to detect when a preemption is
- // requested, but fails. Accessed atomically.
- preemptGen uint32
-
- // Whether this is a pending preemption signal on this M.
- // Accessed atomically.
- signalPending uint32
-
- dlogPerM
-
- mOS
-
- // Up to 10 locks held by this m, maintained by the lock ranking code.
- locksHeldLen int
- locksHeld [10]heldLockInfo
-}
-
-type p struct {
- id int32
- status uint32 // one of pidle/prunning/...
- link puintptr
- schedtick uint32 // incremented on every scheduler call
- syscalltick uint32 // incremented on every system call
- sysmontick sysmontick // last tick observed by sysmon
- m muintptr // back-link to associated m (nil if idle)
- mcache *mcache
- pcache pageCache
- raceprocctx uintptr
-
- deferpool []*_defer // pool of available defer structs (see panic.go)
- deferpoolbuf [32]*_defer
-
- // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
- goidcache uint64
- goidcacheend uint64
-
- // Queue of runnable goroutines. Accessed without lock.
- runqhead uint32
- runqtail uint32
- runq [256]guintptr
- // runnext, if non-nil, is a runnable G that was ready'd by
- // the current G and should be run next instead of what's in
- // runq if there's time remaining in the running G's time
- // slice. It will inherit the time left in the current time
- // slice. If a set of goroutines is locked in a
- // communicate-and-wait pattern, this schedules that set as a
- // unit and eliminates the (potentially large) scheduling
- // latency that otherwise arises from adding the ready'd
- // goroutines to the end of the run queue.
- //
- // Note that while other P's may atomically CAS this to zero,
- // only the owner P can CAS it to a valid G.
- runnext guintptr
-
- // Available G's (status == Gdead)
- gFree struct {
- gList
- n int32
- }
-
- sudogcache []*sudog
- sudogbuf [128]*sudog
-
- // Cache of mspan objects from the heap.
- mspancache struct {
- // We need an explicit length here because this field is used
- // in allocation codepaths where write barriers are not allowed,
- // and eliminating the write barrier/keeping it eliminated from
- // slice updates is tricky, moreso than just managing the length
- // ourselves.
- len int
- buf [128]*mspan
- }
-
- tracebuf traceBufPtr
-
- // traceSweep indicates the sweep events should be traced.
- // This is used to defer the sweep start event until a span
- // has actually been swept.
- traceSweep bool
- // traceSwept and traceReclaimed track the number of bytes
- // swept and reclaimed by sweeping in the current sweep loop.
- traceSwept, traceReclaimed uintptr
-
- palloc persistentAlloc // per-P to avoid mutex
-
- _ uint32 // Alignment for atomic fields below
-
- // The when field of the first entry on the timer heap.
- // This is updated using atomic functions.
- // This is 0 if the timer heap is empty.
- timer0When uint64
-
- // The earliest known nextwhen field of a timer with
- // timerModifiedEarlier status. Because the timer may have been
- // modified again, there need not be any timer with this value.
- // This is updated using atomic functions.
- // This is 0 if there are no timerModifiedEarlier timers.
- timerModifiedEarliest uint64
-
- // Per-P GC state
- gcAssistTime int64 // Nanoseconds in assistAlloc
- gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
-
- // gcMarkWorkerMode is the mode for the next mark worker to run in.
- // That is, this is used to communicate with the worker goroutine
- // selected for immediate execution by
- // gcController.findRunnableGCWorker. When scheduling other goroutines,
- // this field must be set to gcMarkWorkerNotWorker.
- gcMarkWorkerMode gcMarkWorkerMode
- // gcMarkWorkerStartTime is the nanotime() at which the most recent
- // mark worker started.
- gcMarkWorkerStartTime int64
-
- // gcw is this P's GC work buffer cache. The work buffer is
- // filled by write barriers, drained by mutator assists, and
- // disposed on certain GC state transitions.
- gcw gcWork
-
- // wbBuf is this P's GC write barrier buffer.
- //
- // TODO: Consider caching this in the running G.
- wbBuf wbBuf
-
- runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
-
- // statsSeq is a counter indicating whether this P is currently
- // writing any stats. Its value is even when not, odd when it is.
- statsSeq uint32
-
- // Lock for timers. We normally access the timers while running
- // on this P, but the scheduler can also do it from a different P.
- timersLock mutex
-
- // Actions to take at some time. This is used to implement the
- // standard library's time package.
- // Must hold timersLock to access.
- timers []*timer
-
- // Number of timers in P's heap.
- // Modified using atomic instructions.
- numTimers uint32
-
- // Number of timerDeleted timers in P's heap.
- // Modified using atomic instructions.
- deletedTimers uint32
-
- // Race context used while executing timer functions.
- timerRaceCtx uintptr
-
- // scannableStackSizeDelta accumulates the amount of stack space held by
- // live goroutines (i.e. those eligible for stack scanning).
- // Flushed to gcController.scannableStackSize once scannableStackSizeSlack
- // or -scannableStackSizeSlack is reached.
- scannableStackSizeDelta int64
-
- // preempt is set to indicate that this P should be enter the
- // scheduler ASAP (regardless of what G is running on it).
- preempt bool
-
- // Padding is no longer needed. False sharing is now not a worry because p is large enough
- // that its size class is an integer multiple of the cache line size (for any of our architectures).
-}
-
-type schedt struct {
- // accessed atomically. keep at top to ensure alignment on 32-bit systems.
- goidgen uint64
- lastpoll uint64 // time of last network poll, 0 if currently polling
- pollUntil uint64 // time to which current poll is sleeping
-
- lock mutex
-
- // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
- // sure to call checkdead().
-
- midle muintptr // idle m's waiting for work
- nmidle int32 // number of idle m's waiting for work
- nmidlelocked int32 // number of locked m's waiting for work
- mnext int64 // number of m's that have been created and next M ID
- maxmcount int32 // maximum number of m's allowed (or die)
- nmsys int32 // number of system m's not counted for deadlock
- nmfreed int64 // cumulative number of freed m's
-
- ngsys uint32 // number of system goroutines; updated atomically
-
- pidle puintptr // idle p's
- npidle uint32
- nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
-
- // Global runnable queue.
- runq gQueue
- runqsize int32
-
- // disable controls selective disabling of the scheduler.
- //
- // Use schedEnableUser to control this.
- //
- // disable is protected by sched.lock.
- disable struct {
- // user disables scheduling of user goroutines.
- user bool
- runnable gQueue // pending runnable Gs
- n int32 // length of runnable
- }
-
- // Global cache of dead G's.
- gFree struct {
- lock mutex
- stack gList // Gs with stacks
- noStack gList // Gs without stacks
- n int32
- }
-
- // Central cache of sudog structs.
- sudoglock mutex
- sudogcache *sudog
-
- // Central pool of available defer structs.
- deferlock mutex
- deferpool *_defer
-
- // freem is the list of m's waiting to be freed when their
- // m.exited is set. Linked through m.freelink.
- freem *m
-
- gcwaiting uint32 // gc is waiting to run
- stopwait int32
- stopnote note
- sysmonwait uint32
- sysmonnote note
-
- // safepointFn should be called on each P at the next GC
- // safepoint if p.runSafePointFn is set.
- safePointFn func(*p)
- safePointWait int32
- safePointNote note
-
- profilehz int32 // cpu profiling rate
-
- procresizetime int64 // nanotime() of last change to gomaxprocs
- totaltime int64 // ∫gomaxprocs dt up to procresizetime
-
- // sysmonlock protects sysmon's actions on the runtime.
- //
- // Acquire and hold this mutex to block sysmon from interacting
- // with the rest of the runtime.
- sysmonlock mutex
-
- // timeToRun is a distribution of scheduling latencies, defined
- // as the sum of time a G spends in the _Grunnable state before
- // it transitions to _Grunning.
- //
- // timeToRun is protected by sched.lock.
- timeToRun timeHistogram
-}
-
-// Values for the flags field of a sigTabT.
-const (
- _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
- _SigKill // if signal.Notify doesn't take it, exit quietly
- _SigThrow // if signal.Notify doesn't take it, exit loudly
- _SigPanic // if the signal is from the kernel, panic
- _SigDefault // if the signal isn't explicitly requested, don't monitor it
- _SigGoExit // cause all runtime procs to exit (only used on Plan 9).
- _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
- _SigUnblock // always unblock; see blockableSig
- _SigIgn // _SIG_DFL action is to ignore the signal
-)
-
-// Layout of in-memory per-function information prepared by linker
-// See https://golang.org/s/go12symtab.
-// Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
-// and with package debug/gosym and with symtab.go in package runtime.
-type _func struct {
- entryoff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
- nameoff int32 // function name
-
- args int32 // in/out args size
- deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
-
- pcsp uint32
- pcfile uint32
- pcln uint32
- npcdata uint32
- cuOffset uint32 // runtime.cutab offset of this function's CU
- funcID funcID // set for certain special runtime functions
- flag funcFlag
- _ [1]byte // pad
- nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
-}
-
-// Pseudo-Func that is returned for PCs that occur in inlined code.
-// A *Func can be either a *_func or a *funcinl, and they are distinguished
-// by the first uintptr.
-type funcinl struct {
- ones uint32 // set to ^0 to distinguish from _func
- entry uintptr // entry of the real (the "outermost") frame
- name string
- file string
- line int
-}
-
-// layout of Itab known to compilers
-// allocated in non-garbage-collected memory
-// Needs to be in sync with
-// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WriteTabs.
-type itab struct {
- inter *interfacetype
- _type *_type
- hash uint32 // copy of _type.hash. Used for type switches.
- _ [4]byte
- fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
-}
-
-// Lock-free stack node.
-// Also known to export_test.go.
-type lfnode struct {
- next uint64
- pushcnt uintptr
-}
-
-type forcegcstate struct {
- lock mutex
- g *g
- idle uint32
-}
-
-// extendRandom extends the random numbers in r[:n] to the whole slice r.
-// Treats n<0 as n==0.
-func extendRandom(r []byte, n int) {
- if n < 0 {
- n = 0
- }
- for n < len(r) {
- // Extend random bits using hash function & time seed
- w := n
- if w > 16 {
- w = 16
- }
- h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
- for i := 0; i < goarch.PtrSize && n < len(r); i++ {
- r[n] = byte(h)
- n++
- h >>= 8
- }
- }
-}
-
-// A _defer holds an entry on the list of deferred calls.
-// If you add a field here, add code to clear it in deferProcStack.
-// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
-// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
-// Some defers will be allocated on the stack and some on the heap.
-// All defers are logically part of the stack, so write barriers to
-// initialize them are not required. All defers must be manually scanned,
-// and for heap defers, marked.
-type _defer struct {
- started bool
- heap bool
- // openDefer indicates that this _defer is for a frame with open-coded
- // defers. We have only one defer record for the entire frame (which may
- // currently have 0, 1, or more defers active).
- openDefer bool
- sp uintptr // sp at time of defer
- pc uintptr // pc at time of defer
- fn func() // can be nil for open-coded defers
- _panic *_panic // panic that is running defer
- link *_defer // next defer on G; can point to either heap or stack!
-
- // If openDefer is true, the fields below record values about the stack
- // frame and associated function that has the open-coded defer(s). sp
- // above will be the sp for the frame, and pc will be address of the
- // deferreturn call in the function.
- fd unsafe.Pointer // funcdata for the function associated with the frame
- varp uintptr // value of varp for the stack frame
- // framepc is the current pc associated with the stack frame. Together,
- // with sp above (which is the sp associated with the stack frame),
- // framepc/sp can be used as pc/sp pair to continue a stack trace via
- // gentraceback().
- framepc uintptr
-}
-
-// A _panic holds information about an active panic.
-//
-// A _panic value must only ever live on the stack.
-//
-// The argp and link fields are stack pointers, but don't need special
-// handling during stack growth: because they are pointer-typed and
-// _panic values only live on the stack, regular stack pointer
-// adjustment takes care of them.
-type _panic struct {
- argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
- arg any // argument to panic
- link *_panic // link to earlier panic
- pc uintptr // where to return to in runtime if this panic is bypassed
- sp unsafe.Pointer // where to return to in runtime if this panic is bypassed
- recovered bool // whether this panic is over
- aborted bool // the panic was aborted
- goexit bool
-}
-
-// stack traces
-type stkframe struct {
- fn funcInfo // function being run
- pc uintptr // program counter within fn
- continpc uintptr // program counter where execution can continue, or 0 if not
- lr uintptr // program counter at caller aka link register
- sp uintptr // stack pointer at pc
- fp uintptr // stack pointer at caller aka frame pointer
- varp uintptr // top of local variables
- argp uintptr // pointer to function arguments
- arglen uintptr // number of bytes at argp
- argmap *bitvector // force use of this argmap
-}
-
-// ancestorInfo records details of where a goroutine was started.
-type ancestorInfo struct {
- pcs []uintptr // pcs from the stack of this goroutine
- goid int64 // goroutine id of this goroutine; original goroutine possibly dead
- gopc uintptr // pc of go statement that created this goroutine
-}
-
-const (
- _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
- _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
- _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
-)
-
-// The maximum number of frames we print for a traceback
-const _TracebackMaxFrames = 100
-
-// A waitReason explains why a goroutine has been stopped.
-// See gopark. Do not re-use waitReasons, add new ones.
-type waitReason uint8
-
-const (
- waitReasonZero waitReason = iota // ""
- waitReasonGCAssistMarking // "GC assist marking"
- waitReasonIOWait // "IO wait"
- waitReasonChanReceiveNilChan // "chan receive (nil chan)"
- waitReasonChanSendNilChan // "chan send (nil chan)"
- waitReasonDumpingHeap // "dumping heap"
- waitReasonGarbageCollection // "garbage collection"
- waitReasonGarbageCollectionScan // "garbage collection scan"
- waitReasonPanicWait // "panicwait"
- waitReasonSelect // "select"
- waitReasonSelectNoCases // "select (no cases)"
- waitReasonGCAssistWait // "GC assist wait"
- waitReasonGCSweepWait // "GC sweep wait"
- waitReasonGCScavengeWait // "GC scavenge wait"
- waitReasonChanReceive // "chan receive"
- waitReasonChanSend // "chan send"
- waitReasonFinalizerWait // "finalizer wait"
- waitReasonForceGCIdle // "force gc (idle)"
- waitReasonSemacquire // "semacquire"
- waitReasonSleep // "sleep"
- waitReasonSyncCondWait // "sync.Cond.Wait"
- waitReasonTimerGoroutineIdle // "timer goroutine (idle)"
- waitReasonTraceReaderBlocked // "trace reader (blocked)"
- waitReasonWaitForGCCycle // "wait for GC cycle"
- waitReasonGCWorkerIdle // "GC worker (idle)"
- waitReasonPreempted // "preempted"
- waitReasonDebugCall // "debug call"
-)
-
-var waitReasonStrings = [...]string{
- waitReasonZero: "",
- waitReasonGCAssistMarking: "GC assist marking",
- waitReasonIOWait: "IO wait",
- waitReasonChanReceiveNilChan: "chan receive (nil chan)",
- waitReasonChanSendNilChan: "chan send (nil chan)",
- waitReasonDumpingHeap: "dumping heap",
- waitReasonGarbageCollection: "garbage collection",
- waitReasonGarbageCollectionScan: "garbage collection scan",
- waitReasonPanicWait: "panicwait",
- waitReasonSelect: "select",
- waitReasonSelectNoCases: "select (no cases)",
- waitReasonGCAssistWait: "GC assist wait",
- waitReasonGCSweepWait: "GC sweep wait",
- waitReasonGCScavengeWait: "GC scavenge wait",
- waitReasonChanReceive: "chan receive",
- waitReasonChanSend: "chan send",
- waitReasonFinalizerWait: "finalizer wait",
- waitReasonForceGCIdle: "force gc (idle)",
- waitReasonSemacquire: "semacquire",
- waitReasonSleep: "sleep",
- waitReasonSyncCondWait: "sync.Cond.Wait",
- waitReasonTimerGoroutineIdle: "timer goroutine (idle)",
- waitReasonTraceReaderBlocked: "trace reader (blocked)",
- waitReasonWaitForGCCycle: "wait for GC cycle",
- waitReasonGCWorkerIdle: "GC worker (idle)",
- waitReasonPreempted: "preempted",
- waitReasonDebugCall: "debug call",
-}
-
-func (w waitReason) String() string {
- if w < 0 || w >= waitReason(len(waitReasonStrings)) {
- return "unknown wait reason"
- }
- return waitReasonStrings[w]
-}
-
-var (
- allm *m
- gomaxprocs int32
- ncpu int32
- forcegc forcegcstate
- sched schedt
- newprocs int32
-
- // allpLock protects P-less reads and size changes of allp, idlepMask,
- // and timerpMask, and all writes to allp.
- allpLock mutex
- // len(allp) == gomaxprocs; may change at safe points, otherwise
- // immutable.
- allp []*p
- // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must
- // be atomic. Length may change at safe points.
- //
- // Each P must update only its own bit. In order to maintain
- // consistency, a P going idle must the idle mask simultaneously with
- // updates to the idle P list under the sched.lock, otherwise a racing
- // pidleget may clear the mask before pidleput sets the mask,
- // corrupting the bitmap.
- //
- // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.
- idlepMask pMask
- // Bitmask of Ps that may have a timer, one bit per P. Reads and writes
- // must be atomic. Length may change at safe points.
- timerpMask pMask
-
- // Pool of GC parked background workers. Entries are type
- // *gcBgMarkWorkerNode.
- gcBgMarkWorkerPool lfstack
-
- // Total number of gcBgMarkWorker goroutines. Protected by worldsema.
- gcBgMarkWorkerCount int32
-
- // Information about what cpu features are available.
- // Packages outside the runtime should not use these
- // as they are not an external api.
- // Set on startup in asm_{386,amd64}.s
- processorVersionInfo uint32
- isIntel bool
-
- goarm uint8 // set by cmd/link on arm systems
-)
-
-// Set by the linker so the runtime can determine the buildmode.
-var (
- islibrary bool // -buildmode=c-shared
- isarchive bool // -buildmode=c-archive
-)
-
-// Must agree with internal/buildcfg.Experiment.FramePointer.
-const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"
diff --git a/contrib/go/_std_1.18/src/runtime/sema.go b/contrib/go/_std_1.18/src/runtime/sema.go
deleted file mode 100644
index f94c1aa891..0000000000
--- a/contrib/go/_std_1.18/src/runtime/sema.go
+++ /dev/null
@@ -1,617 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Semaphore implementation exposed to Go.
-// Intended use is provide a sleep and wakeup
-// primitive that can be used in the contended case
-// of other synchronization primitives.
-// Thus it targets the same goal as Linux's futex,
-// but it has much simpler semantics.
-//
-// That is, don't think of these as semaphores.
-// Think of them as a way to implement sleep and wakeup
-// such that every sleep is paired with a single wakeup,
-// even if, due to races, the wakeup happens before the sleep.
-//
-// See Mullender and Cox, ``Semaphores in Plan 9,''
-// https://swtch.com/semaphore.pdf
-
-package runtime
-
-import (
- "internal/cpu"
- "runtime/internal/atomic"
- "unsafe"
-)
-
-// Asynchronous semaphore for sync.Mutex.
-
-// A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem).
-// Each of those sudog may in turn point (through s.waitlink) to a list
-// of other sudogs waiting on the same address.
-// The operations on the inner lists of sudogs with the same address
-// are all O(1). The scanning of the top-level semaRoot list is O(log n),
-// where n is the number of distinct addresses with goroutines blocked
-// on them that hash to the given semaRoot.
-// See golang.org/issue/17953 for a program that worked badly
-// before we introduced the second level of list, and test/locklinear.go
-// for a test that exercises this.
-type semaRoot struct {
- lock mutex
- treap *sudog // root of balanced tree of unique waiters.
- nwait uint32 // Number of waiters. Read w/o the lock.
-}
-
-// Prime to not correlate with any user patterns.
-const semTabSize = 251
-
-var semtable [semTabSize]struct {
- root semaRoot
- pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
-}
-
-//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
-func sync_runtime_Semacquire(addr *uint32) {
- semacquire1(addr, false, semaBlockProfile, 0)
-}
-
-//go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire
-func poll_runtime_Semacquire(addr *uint32) {
- semacquire1(addr, false, semaBlockProfile, 0)
-}
-
-//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
-func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
- semrelease1(addr, handoff, skipframes)
-}
-
-//go:linkname sync_runtime_SemacquireMutex sync.runtime_SemacquireMutex
-func sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) {
- semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes)
-}
-
-//go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease
-func poll_runtime_Semrelease(addr *uint32) {
- semrelease(addr)
-}
-
-func readyWithTime(s *sudog, traceskip int) {
- if s.releasetime != 0 {
- s.releasetime = cputicks()
- }
- goready(s.g, traceskip)
-}
-
-type semaProfileFlags int
-
-const (
- semaBlockProfile semaProfileFlags = 1 << iota
- semaMutexProfile
-)
-
-// Called from runtime.
-func semacquire(addr *uint32) {
- semacquire1(addr, false, 0, 0)
-}
-
-func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int) {
- gp := getg()
- if gp != gp.m.curg {
- throw("semacquire not on the G stack")
- }
-
- // Easy case.
- if cansemacquire(addr) {
- return
- }
-
- // Harder case:
- // increment waiter count
- // try cansemacquire one more time, return if succeeded
- // enqueue itself as a waiter
- // sleep
- // (waiter descriptor is dequeued by signaler)
- s := acquireSudog()
- root := semroot(addr)
- t0 := int64(0)
- s.releasetime = 0
- s.acquiretime = 0
- s.ticket = 0
- if profile&semaBlockProfile != 0 && blockprofilerate > 0 {
- t0 = cputicks()
- s.releasetime = -1
- }
- if profile&semaMutexProfile != 0 && mutexprofilerate > 0 {
- if t0 == 0 {
- t0 = cputicks()
- }
- s.acquiretime = t0
- }
- for {
- lockWithRank(&root.lock, lockRankRoot)
- // Add ourselves to nwait to disable "easy case" in semrelease.
- atomic.Xadd(&root.nwait, 1)
- // Check cansemacquire to avoid missed wakeup.
- if cansemacquire(addr) {
- atomic.Xadd(&root.nwait, -1)
- unlock(&root.lock)
- break
- }
- // Any semrelease after the cansemacquire knows we're waiting
- // (we set nwait above), so go to sleep.
- root.queue(addr, s, lifo)
- goparkunlock(&root.lock, waitReasonSemacquire, traceEvGoBlockSync, 4+skipframes)
- if s.ticket != 0 || cansemacquire(addr) {
- break
- }
- }
- if s.releasetime > 0 {
- blockevent(s.releasetime-t0, 3+skipframes)
- }
- releaseSudog(s)
-}
-
-func semrelease(addr *uint32) {
- semrelease1(addr, false, 0)
-}
-
-func semrelease1(addr *uint32, handoff bool, skipframes int) {
- root := semroot(addr)
- atomic.Xadd(addr, 1)
-
- // Easy case: no waiters?
- // This check must happen after the xadd, to avoid a missed wakeup
- // (see loop in semacquire).
- if atomic.Load(&root.nwait) == 0 {
- return
- }
-
- // Harder case: search for a waiter and wake it.
- lockWithRank(&root.lock, lockRankRoot)
- if atomic.Load(&root.nwait) == 0 {
- // The count is already consumed by another goroutine,
- // so no need to wake up another goroutine.
- unlock(&root.lock)
- return
- }
- s, t0 := root.dequeue(addr)
- if s != nil {
- atomic.Xadd(&root.nwait, -1)
- }
- unlock(&root.lock)
- if s != nil { // May be slow or even yield, so unlock first
- acquiretime := s.acquiretime
- if acquiretime != 0 {
- mutexevent(t0-acquiretime, 3+skipframes)
- }
- if s.ticket != 0 {
- throw("corrupted semaphore ticket")
- }
- if handoff && cansemacquire(addr) {
- s.ticket = 1
- }
- readyWithTime(s, 5+skipframes)
- if s.ticket == 1 && getg().m.locks == 0 {
- // Direct G handoff
- // readyWithTime has added the waiter G as runnext in the
- // current P; we now call the scheduler so that we start running
- // the waiter G immediately.
- // Note that waiter inherits our time slice: this is desirable
- // to avoid having a highly contended semaphore hog the P
- // indefinitely. goyield is like Gosched, but it emits a
- // "preempted" trace event instead and, more importantly, puts
- // the current G on the local runq instead of the global one.
- // We only do this in the starving regime (handoff=true), as in
- // the non-starving case it is possible for a different waiter
- // to acquire the semaphore while we are yielding/scheduling,
- // and this would be wasteful. We wait instead to enter starving
- // regime, and then we start to do direct handoffs of ticket and
- // P.
- // See issue 33747 for discussion.
- goyield()
- }
- }
-}
-
-func semroot(addr *uint32) *semaRoot {
- return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
-}
-
-func cansemacquire(addr *uint32) bool {
- for {
- v := atomic.Load(addr)
- if v == 0 {
- return false
- }
- if atomic.Cas(addr, v, v-1) {
- return true
- }
- }
-}
-
-// queue adds s to the blocked goroutines in semaRoot.
-func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
- s.g = getg()
- s.elem = unsafe.Pointer(addr)
- s.next = nil
- s.prev = nil
-
- var last *sudog
- pt := &root.treap
- for t := *pt; t != nil; t = *pt {
- if t.elem == unsafe.Pointer(addr) {
- // Already have addr in list.
- if lifo {
- // Substitute s in t's place in treap.
- *pt = s
- s.ticket = t.ticket
- s.acquiretime = t.acquiretime
- s.parent = t.parent
- s.prev = t.prev
- s.next = t.next
- if s.prev != nil {
- s.prev.parent = s
- }
- if s.next != nil {
- s.next.parent = s
- }
- // Add t first in s's wait list.
- s.waitlink = t
- s.waittail = t.waittail
- if s.waittail == nil {
- s.waittail = t
- }
- t.parent = nil
- t.prev = nil
- t.next = nil
- t.waittail = nil
- } else {
- // Add s to end of t's wait list.
- if t.waittail == nil {
- t.waitlink = s
- } else {
- t.waittail.waitlink = s
- }
- t.waittail = s
- s.waitlink = nil
- }
- return
- }
- last = t
- if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) {
- pt = &t.prev
- } else {
- pt = &t.next
- }
- }
-
- // Add s as new leaf in tree of unique addrs.
- // The balanced tree is a treap using ticket as the random heap priority.
- // That is, it is a binary tree ordered according to the elem addresses,
- // but then among the space of possible binary trees respecting those
- // addresses, it is kept balanced on average by maintaining a heap ordering
- // on the ticket: s.ticket <= both s.prev.ticket and s.next.ticket.
- // https://en.wikipedia.org/wiki/Treap
- // https://faculty.washington.edu/aragon/pubs/rst89.pdf
- //
- // s.ticket compared with zero in couple of places, therefore set lowest bit.
- // It will not affect treap's quality noticeably.
- s.ticket = fastrand() | 1
- s.parent = last
- *pt = s
-
- // Rotate up into tree according to ticket (priority).
- for s.parent != nil && s.parent.ticket > s.ticket {
- if s.parent.prev == s {
- root.rotateRight(s.parent)
- } else {
- if s.parent.next != s {
- panic("semaRoot queue")
- }
- root.rotateLeft(s.parent)
- }
- }
-}
-
-// dequeue searches for and finds the first goroutine
-// in semaRoot blocked on addr.
-// If the sudog was being profiled, dequeue returns the time
-// at which it was woken up as now. Otherwise now is 0.
-func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now int64) {
- ps := &root.treap
- s := *ps
- for ; s != nil; s = *ps {
- if s.elem == unsafe.Pointer(addr) {
- goto Found
- }
- if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) {
- ps = &s.prev
- } else {
- ps = &s.next
- }
- }
- return nil, 0
-
-Found:
- now = int64(0)
- if s.acquiretime != 0 {
- now = cputicks()
- }
- if t := s.waitlink; t != nil {
- // Substitute t, also waiting on addr, for s in root tree of unique addrs.
- *ps = t
- t.ticket = s.ticket
- t.parent = s.parent
- t.prev = s.prev
- if t.prev != nil {
- t.prev.parent = t
- }
- t.next = s.next
- if t.next != nil {
- t.next.parent = t
- }
- if t.waitlink != nil {
- t.waittail = s.waittail
- } else {
- t.waittail = nil
- }
- t.acquiretime = now
- s.waitlink = nil
- s.waittail = nil
- } else {
- // Rotate s down to be leaf of tree for removal, respecting priorities.
- for s.next != nil || s.prev != nil {
- if s.next == nil || s.prev != nil && s.prev.ticket < s.next.ticket {
- root.rotateRight(s)
- } else {
- root.rotateLeft(s)
- }
- }
- // Remove s, now a leaf.
- if s.parent != nil {
- if s.parent.prev == s {
- s.parent.prev = nil
- } else {
- s.parent.next = nil
- }
- } else {
- root.treap = nil
- }
- }
- s.parent = nil
- s.elem = nil
- s.next = nil
- s.prev = nil
- s.ticket = 0
- return s, now
-}
-
-// rotateLeft rotates the tree rooted at node x.
-// turning (x a (y b c)) into (y (x a b) c).
-func (root *semaRoot) rotateLeft(x *sudog) {
- // p -> (x a (y b c))
- p := x.parent
- y := x.next
- b := y.prev
-
- y.prev = x
- x.parent = y
- x.next = b
- if b != nil {
- b.parent = x
- }
-
- y.parent = p
- if p == nil {
- root.treap = y
- } else if p.prev == x {
- p.prev = y
- } else {
- if p.next != x {
- throw("semaRoot rotateLeft")
- }
- p.next = y
- }
-}
-
-// rotateRight rotates the tree rooted at node y.
-// turning (y (x a b) c) into (x a (y b c)).
-func (root *semaRoot) rotateRight(y *sudog) {
- // p -> (y (x a b) c)
- p := y.parent
- x := y.prev
- b := x.next
-
- x.next = y
- y.parent = x
- y.prev = b
- if b != nil {
- b.parent = y
- }
-
- x.parent = p
- if p == nil {
- root.treap = x
- } else if p.prev == y {
- p.prev = x
- } else {
- if p.next != y {
- throw("semaRoot rotateRight")
- }
- p.next = x
- }
-}
-
-// notifyList is a ticket-based notification list used to implement sync.Cond.
-//
-// It must be kept in sync with the sync package.
-type notifyList struct {
- // wait is the ticket number of the next waiter. It is atomically
- // incremented outside the lock.
- wait uint32
-
- // notify is the ticket number of the next waiter to be notified. It can
- // be read outside the lock, but is only written to with lock held.
- //
- // Both wait & notify can wrap around, and such cases will be correctly
- // handled as long as their "unwrapped" difference is bounded by 2^31.
- // For this not to be the case, we'd need to have 2^31+ goroutines
- // blocked on the same condvar, which is currently not possible.
- notify uint32
-
- // List of parked waiters.
- lock mutex
- head *sudog
- tail *sudog
-}
-
-// less checks if a < b, considering a & b running counts that may overflow the
-// 32-bit range, and that their "unwrapped" difference is always less than 2^31.
-func less(a, b uint32) bool {
- return int32(a-b) < 0
-}
-
-// notifyListAdd adds the caller to a notify list such that it can receive
-// notifications. The caller must eventually call notifyListWait to wait for
-// such a notification, passing the returned ticket number.
-//go:linkname notifyListAdd sync.runtime_notifyListAdd
-func notifyListAdd(l *notifyList) uint32 {
- // This may be called concurrently, for example, when called from
- // sync.Cond.Wait while holding a RWMutex in read mode.
- return atomic.Xadd(&l.wait, 1) - 1
-}
-
-// notifyListWait waits for a notification. If one has been sent since
-// notifyListAdd was called, it returns immediately. Otherwise, it blocks.
-//go:linkname notifyListWait sync.runtime_notifyListWait
-func notifyListWait(l *notifyList, t uint32) {
- lockWithRank(&l.lock, lockRankNotifyList)
-
- // Return right away if this ticket has already been notified.
- if less(t, l.notify) {
- unlock(&l.lock)
- return
- }
-
- // Enqueue itself.
- s := acquireSudog()
- s.g = getg()
- s.ticket = t
- s.releasetime = 0
- t0 := int64(0)
- if blockprofilerate > 0 {
- t0 = cputicks()
- s.releasetime = -1
- }
- if l.tail == nil {
- l.head = s
- } else {
- l.tail.next = s
- }
- l.tail = s
- goparkunlock(&l.lock, waitReasonSyncCondWait, traceEvGoBlockCond, 3)
- if t0 != 0 {
- blockevent(s.releasetime-t0, 2)
- }
- releaseSudog(s)
-}
-
-// notifyListNotifyAll notifies all entries in the list.
-//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
-func notifyListNotifyAll(l *notifyList) {
- // Fast-path: if there are no new waiters since the last notification
- // we don't need to acquire the lock.
- if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
- return
- }
-
- // Pull the list out into a local variable, waiters will be readied
- // outside the lock.
- lockWithRank(&l.lock, lockRankNotifyList)
- s := l.head
- l.head = nil
- l.tail = nil
-
- // Update the next ticket to be notified. We can set it to the current
- // value of wait because any previous waiters are already in the list
- // or will notice that they have already been notified when trying to
- // add themselves to the list.
- atomic.Store(&l.notify, atomic.Load(&l.wait))
- unlock(&l.lock)
-
- // Go through the local list and ready all waiters.
- for s != nil {
- next := s.next
- s.next = nil
- readyWithTime(s, 4)
- s = next
- }
-}
-
-// notifyListNotifyOne notifies one entry in the list.
-//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
-func notifyListNotifyOne(l *notifyList) {
- // Fast-path: if there are no new waiters since the last notification
- // we don't need to acquire the lock at all.
- if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
- return
- }
-
- lockWithRank(&l.lock, lockRankNotifyList)
-
- // Re-check under the lock if we need to do anything.
- t := l.notify
- if t == atomic.Load(&l.wait) {
- unlock(&l.lock)
- return
- }
-
- // Update the next notify ticket number.
- atomic.Store(&l.notify, t+1)
-
- // Try to find the g that needs to be notified.
- // If it hasn't made it to the list yet we won't find it,
- // but it won't park itself once it sees the new notify number.
- //
- // This scan looks linear but essentially always stops quickly.
- // Because g's queue separately from taking numbers,
- // there may be minor reorderings in the list, but we
- // expect the g we're looking for to be near the front.
- // The g has others in front of it on the list only to the
- // extent that it lost the race, so the iteration will not
- // be too long. This applies even when the g is missing:
- // it hasn't yet gotten to sleep and has lost the race to
- // the (few) other g's that we find on the list.
- for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
- if s.ticket == t {
- n := s.next
- if p != nil {
- p.next = n
- } else {
- l.head = n
- }
- if n == nil {
- l.tail = p
- }
- unlock(&l.lock)
- s.next = nil
- readyWithTime(s, 4)
- return
- }
- }
- unlock(&l.lock)
-}
-
-//go:linkname notifyListCheck sync.runtime_notifyListCheck
-func notifyListCheck(sz uintptr) {
- if sz != unsafe.Sizeof(notifyList{}) {
- print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
- throw("bad notifyList size")
- }
-}
-
-//go:linkname sync_nanotime sync.runtime_nanotime
-func sync_nanotime() int64 {
- return nanotime()
-}
diff --git a/contrib/go/_std_1.18/src/runtime/signal_darwin_amd64.go b/contrib/go/_std_1.18/src/runtime/signal_darwin_amd64.go
deleted file mode 100644
index abc212ad51..0000000000
--- a/contrib/go/_std_1.18/src/runtime/signal_darwin_amd64.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-type sigctxt struct {
- info *siginfo
- ctxt unsafe.Pointer
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func (c *sigctxt) regs() *regs64 { return &(*ucontext)(c.ctxt).uc_mcontext.ss }
-
-func (c *sigctxt) rax() uint64 { return c.regs().rax }
-func (c *sigctxt) rbx() uint64 { return c.regs().rbx }
-func (c *sigctxt) rcx() uint64 { return c.regs().rcx }
-func (c *sigctxt) rdx() uint64 { return c.regs().rdx }
-func (c *sigctxt) rdi() uint64 { return c.regs().rdi }
-func (c *sigctxt) rsi() uint64 { return c.regs().rsi }
-func (c *sigctxt) rbp() uint64 { return c.regs().rbp }
-func (c *sigctxt) rsp() uint64 { return c.regs().rsp }
-func (c *sigctxt) r8() uint64 { return c.regs().r8 }
-func (c *sigctxt) r9() uint64 { return c.regs().r9 }
-func (c *sigctxt) r10() uint64 { return c.regs().r10 }
-func (c *sigctxt) r11() uint64 { return c.regs().r11 }
-func (c *sigctxt) r12() uint64 { return c.regs().r12 }
-func (c *sigctxt) r13() uint64 { return c.regs().r13 }
-func (c *sigctxt) r14() uint64 { return c.regs().r14 }
-func (c *sigctxt) r15() uint64 { return c.regs().r15 }
-
-//go:nosplit
-//go:nowritebarrierrec
-func (c *sigctxt) rip() uint64 { return c.regs().rip }
-
-func (c *sigctxt) rflags() uint64 { return c.regs().rflags }
-func (c *sigctxt) cs() uint64 { return c.regs().cs }
-func (c *sigctxt) fs() uint64 { return c.regs().fs }
-func (c *sigctxt) gs() uint64 { return c.regs().gs }
-func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
-func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
-
-func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
-func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
-func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
-func (c *sigctxt) set_sigaddr(x uint64) { c.info.si_addr = x }
-
-//go:nosplit
-func (c *sigctxt) fixsigcode(sig uint32) {
- switch sig {
- case _SIGTRAP:
- // OS X sets c.sigcode() == TRAP_BRKPT unconditionally for all SIGTRAPs,
- // leaving no way to distinguish a breakpoint-induced SIGTRAP
- // from an asynchronous signal SIGTRAP.
- // They all look breakpoint-induced by default.
- // Try looking at the code to see if it's a breakpoint.
- // The assumption is that we're very unlikely to get an
- // asynchronous SIGTRAP at just the moment that the
- // PC started to point at unmapped memory.
- pc := uintptr(c.rip())
- // OS X will leave the pc just after the INT 3 instruction.
- // INT 3 is usually 1 byte, but there is a 2-byte form.
- code := (*[2]byte)(unsafe.Pointer(pc - 2))
- if code[1] != 0xCC && (code[0] != 0xCD || code[1] != 3) {
- // SIGTRAP on something other than INT 3.
- c.set_sigcode(_SI_USER)
- }
-
- case _SIGSEGV:
- // x86-64 has 48-bit virtual addresses. The top 16 bits must echo bit 47.
- // The hardware delivers a different kind of fault for a malformed address
- // than it does for an attempt to access a valid but unmapped address.
- // OS X 10.9.2 mishandles the malformed address case, making it look like
- // a user-generated signal (like someone ran kill -SEGV ourpid).
- // We pass user-generated signals to os/signal, or else ignore them.
- // Doing that here - and returning to the faulting code - results in an
- // infinite loop. It appears the best we can do is rewrite what the kernel
- // delivers into something more like the truth. The address used below
- // has very little chance of being the one that caused the fault, but it is
- // malformed, it is clearly not a real pointer, and if it does get printed
- // in real life, people will probably search for it and find this code.
- // There are no Google hits for b01dfacedebac1e or 0xb01dfacedebac1e
- // as I type this comment.
- if c.sigcode() == _SI_USER {
- c.set_sigcode(_SI_USER + 1)
- c.set_sigaddr(0xb01dfacedebac1e)
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/signal_unix.go b/contrib/go/_std_1.18/src/runtime/signal_unix.go
deleted file mode 100644
index 5cb51d10ba..0000000000
--- a/contrib/go/_std_1.18/src/runtime/signal_unix.go
+++ /dev/null
@@ -1,1320 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package runtime
-
-import (
- "internal/abi"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// sigTabT is the type of an entry in the global sigtable array.
-// sigtable is inherently system dependent, and appears in OS-specific files,
-// but sigTabT is the same for all Unixy systems.
-// The sigtable array is indexed by a system signal number to get the flags
-// and printable name of each signal.
-type sigTabT struct {
- flags int32
- name string
-}
-
-//go:linkname os_sigpipe os.sigpipe
-func os_sigpipe() {
- systemstack(sigpipe)
-}
-
-func signame(sig uint32) string {
- if sig >= uint32(len(sigtable)) {
- return ""
- }
- return sigtable[sig].name
-}
-
-const (
- _SIG_DFL uintptr = 0
- _SIG_IGN uintptr = 1
-)
-
-// sigPreempt is the signal used for non-cooperative preemption.
-//
-// There's no good way to choose this signal, but there are some
-// heuristics:
-//
-// 1. It should be a signal that's passed-through by debuggers by
-// default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO,
-// SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
-//
-// 2. It shouldn't be used internally by libc in mixed Go/C binaries
-// because libc may assume it's the only thing that can handle these
-// signals. For example SIGCANCEL or SIGSETXID.
-//
-// 3. It should be a signal that can happen spuriously without
-// consequences. For example, SIGALRM is a bad choice because the
-// signal handler can't tell if it was caused by the real process
-// alarm or not (arguably this means the signal is broken, but I
-// digress). SIGUSR1 and SIGUSR2 are also bad because those are often
-// used in meaningful ways by applications.
-//
-// 4. We need to deal with platforms without real-time signals (like
-// macOS), so those are out.
-//
-// We use SIGURG because it meets all of these criteria, is extremely
-// unlikely to be used by an application for its "real" meaning (both
-// because out-of-band data is basically unused and because SIGURG
-// doesn't report which socket has the condition, making it pretty
-// useless), and even if it is, the application has to be ready for
-// spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more
-// likely to be used for real.
-const sigPreempt = _SIGURG
-
-// Stores the signal handlers registered before Go installed its own.
-// These signal handlers will be invoked in cases where Go doesn't want to
-// handle a particular signal (e.g., signal occurred on a non-Go thread).
-// See sigfwdgo for more information on when the signals are forwarded.
-//
-// This is read by the signal handler; accesses should use
-// atomic.Loaduintptr and atomic.Storeuintptr.
-var fwdSig [_NSIG]uintptr
-
-// handlingSig is indexed by signal number and is non-zero if we are
-// currently handling the signal. Or, to put it another way, whether
-// the signal handler is currently set to the Go signal handler or not.
-// This is uint32 rather than bool so that we can use atomic instructions.
-var handlingSig [_NSIG]uint32
-
-// channels for synchronizing signal mask updates with the signal mask
-// thread
-var (
- disableSigChan chan uint32
- enableSigChan chan uint32
- maskUpdatedChan chan struct{}
-)
-
-func init() {
- // _NSIG is the number of signals on this operating system.
- // sigtable should describe what to do for all the possible signals.
- if len(sigtable) != _NSIG {
- print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
- throw("bad sigtable len")
- }
-}
-
-var signalsOK bool
-
-// Initialize signals.
-// Called by libpreinit so runtime may not be initialized.
-//go:nosplit
-//go:nowritebarrierrec
-func initsig(preinit bool) {
- if !preinit {
- // It's now OK for signal handlers to run.
- signalsOK = true
- }
-
- // For c-archive/c-shared this is called by libpreinit with
- // preinit == true.
- if (isarchive || islibrary) && !preinit {
- return
- }
-
- for i := uint32(0); i < _NSIG; i++ {
- t := &sigtable[i]
- if t.flags == 0 || t.flags&_SigDefault != 0 {
- continue
- }
-
- // We don't need to use atomic operations here because
- // there shouldn't be any other goroutines running yet.
- fwdSig[i] = getsig(i)
-
- if !sigInstallGoHandler(i) {
- // Even if we are not installing a signal handler,
- // set SA_ONSTACK if necessary.
- if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN {
- setsigstack(i)
- } else if fwdSig[i] == _SIG_IGN {
- sigInitIgnored(i)
- }
- continue
- }
-
- handlingSig[i] = 1
- setsig(i, abi.FuncPCABIInternal(sighandler))
- }
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func sigInstallGoHandler(sig uint32) bool {
- // For some signals, we respect an inherited SIG_IGN handler
- // rather than insist on installing our own default handler.
- // Even these signals can be fetched using the os/signal package.
- switch sig {
- case _SIGHUP, _SIGINT:
- if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
- return false
- }
- }
-
- if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
- // sigPerThreadSyscall is the same signal used by glibc for
- // per-thread syscalls on Linux. We use it for the same purpose
- // in non-cgo binaries.
- return true
- }
-
- t := &sigtable[sig]
- if t.flags&_SigSetStack != 0 {
- return false
- }
-
- // When built using c-archive or c-shared, only install signal
- // handlers for synchronous signals and SIGPIPE and sigPreempt.
- if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt {
- return false
- }
-
- return true
-}
-
-// sigenable enables the Go signal handler to catch the signal sig.
-// It is only called while holding the os/signal.handlers lock,
-// via os/signal.enableSignal and signal_enable.
-func sigenable(sig uint32) {
- if sig >= uint32(len(sigtable)) {
- return
- }
-
- // SIGPROF is handled specially for profiling.
- if sig == _SIGPROF {
- return
- }
-
- t := &sigtable[sig]
- if t.flags&_SigNotify != 0 {
- ensureSigM()
- enableSigChan <- sig
- <-maskUpdatedChan
- if atomic.Cas(&handlingSig[sig], 0, 1) {
- atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
- setsig(sig, abi.FuncPCABIInternal(sighandler))
- }
- }
-}
-
-// sigdisable disables the Go signal handler for the signal sig.
-// It is only called while holding the os/signal.handlers lock,
-// via os/signal.disableSignal and signal_disable.
-func sigdisable(sig uint32) {
- if sig >= uint32(len(sigtable)) {
- return
- }
-
- // SIGPROF is handled specially for profiling.
- if sig == _SIGPROF {
- return
- }
-
- t := &sigtable[sig]
- if t.flags&_SigNotify != 0 {
- ensureSigM()
- disableSigChan <- sig
- <-maskUpdatedChan
-
- // If initsig does not install a signal handler for a
- // signal, then to go back to the state before Notify
- // we should remove the one we installed.
- if !sigInstallGoHandler(sig) {
- atomic.Store(&handlingSig[sig], 0)
- setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
- }
- }
-}
-
-// sigignore ignores the signal sig.
-// It is only called while holding the os/signal.handlers lock,
-// via os/signal.ignoreSignal and signal_ignore.
-func sigignore(sig uint32) {
- if sig >= uint32(len(sigtable)) {
- return
- }
-
- // SIGPROF is handled specially for profiling.
- if sig == _SIGPROF {
- return
- }
-
- t := &sigtable[sig]
- if t.flags&_SigNotify != 0 {
- atomic.Store(&handlingSig[sig], 0)
- setsig(sig, _SIG_IGN)
- }
-}
-
-// clearSignalHandlers clears all signal handlers that are not ignored
-// back to the default. This is called by the child after a fork, so that
-// we can enable the signal mask for the exec without worrying about
-// running a signal handler in the child.
-//go:nosplit
-//go:nowritebarrierrec
-func clearSignalHandlers() {
- for i := uint32(0); i < _NSIG; i++ {
- if atomic.Load(&handlingSig[i]) != 0 {
- setsig(i, _SIG_DFL)
- }
- }
-}
-
-// setProcessCPUProfilerTimer is called when the profiling timer changes.
-// It is called with prof.signalLock held. hz is the new timer, and is 0 if
-// profiling is being disabled. Enable or disable the signal as
-// required for -buildmode=c-archive.
-func setProcessCPUProfilerTimer(hz int32) {
- if hz != 0 {
- // Enable the Go signal handler if not enabled.
- if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
- h := getsig(_SIGPROF)
- // If no signal handler was installed before, then we record
- // _SIG_IGN here. When we turn off profiling (below) we'll start
- // ignoring SIGPROF signals. We do this, rather than change
- // to SIG_DFL, because there may be a pending SIGPROF
- // signal that has not yet been delivered to some other thread.
- // If we change to SIG_DFL when turning off profiling, the
- // program will crash when that SIGPROF is delivered. We assume
- // that programs that use profiling don't want to crash on a
- // stray SIGPROF. See issue 19320.
- // We do the change here instead of when turning off profiling,
- // because there we may race with a signal handler running
- // concurrently, in particular, sigfwdgo may observe _SIG_DFL and
- // die. See issue 43828.
- if h == _SIG_DFL {
- h = _SIG_IGN
- }
- atomic.Storeuintptr(&fwdSig[_SIGPROF], h)
- setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
- }
-
- var it itimerval
- it.it_interval.tv_sec = 0
- it.it_interval.set_usec(1000000 / hz)
- it.it_value = it.it_interval
- setitimer(_ITIMER_PROF, &it, nil)
- } else {
- setitimer(_ITIMER_PROF, &itimerval{}, nil)
-
- // If the Go signal handler should be disabled by default,
- // switch back to the signal handler that was installed
- // when we enabled profiling. We don't try to handle the case
- // of a program that changes the SIGPROF handler while Go
- // profiling is enabled.
- if !sigInstallGoHandler(_SIGPROF) {
- if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
- h := atomic.Loaduintptr(&fwdSig[_SIGPROF])
- setsig(_SIGPROF, h)
- }
- }
- }
-}
-
-// setThreadCPUProfilerHz makes any thread-specific changes required to
-// implement profiling at a rate of hz.
-// No changes required on Unix systems when using setitimer.
-func setThreadCPUProfilerHz(hz int32) {
- getg().m.profilehz = hz
-}
-
-func sigpipe() {
- if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) {
- return
- }
- dieFromSignal(_SIGPIPE)
-}
-
-// doSigPreempt handles a preemption signal on gp.
-func doSigPreempt(gp *g, ctxt *sigctxt) {
- // Check if this G wants to be preempted and is safe to
- // preempt.
- if wantAsyncPreempt(gp) {
- if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
- // Adjust the PC and inject a call to asyncPreempt.
- ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
- }
- }
-
- // Acknowledge the preemption.
- atomic.Xadd(&gp.m.preemptGen, 1)
- atomic.Store(&gp.m.signalPending, 0)
-
- if GOOS == "darwin" || GOOS == "ios" {
- atomic.Xadd(&pendingPreemptSignals, -1)
- }
-}
-
-const preemptMSupported = true
-
-// preemptM sends a preemption request to mp. This request may be
-// handled asynchronously and may be coalesced with other requests to
-// the M. When the request is received, if the running G or P are
-// marked for preemption and the goroutine is at an asynchronous
-// safe-point, it will preempt the goroutine. It always atomically
-// increments mp.preemptGen after handling a preemption request.
-func preemptM(mp *m) {
- // On Darwin, don't try to preempt threads during exec.
- // Issue #41702.
- if GOOS == "darwin" || GOOS == "ios" {
- execLock.rlock()
- }
-
- if atomic.Cas(&mp.signalPending, 0, 1) {
- if GOOS == "darwin" || GOOS == "ios" {
- atomic.Xadd(&pendingPreemptSignals, 1)
- }
-
- // If multiple threads are preempting the same M, it may send many
- // signals to the same M such that it hardly make progress, causing
- // live-lock problem. Apparently this could happen on darwin. See
- // issue #37741.
- // Only send a signal if there isn't already one pending.
- signalM(mp, sigPreempt)
- }
-
- if GOOS == "darwin" || GOOS == "ios" {
- execLock.runlock()
- }
-}
-
-// sigFetchG fetches the value of G safely when running in a signal handler.
-// On some architectures, the g value may be clobbered when running in a VDSO.
-// See issue #32912.
-//
-//go:nosplit
-func sigFetchG(c *sigctxt) *g {
- switch GOARCH {
- case "arm", "arm64", "ppc64", "ppc64le", "riscv64":
- if !iscgo && inVDSOPage(c.sigpc()) {
- // When using cgo, we save the g on TLS and load it from there
- // in sigtramp. Just use that.
- // Otherwise, before making a VDSO call we save the g to the
- // bottom of the signal stack. Fetch from there.
- // TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
- // work.
- sp := getcallersp()
- s := spanOf(sp)
- if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
- gp := *(**g)(unsafe.Pointer(s.base()))
- return gp
- }
- return nil
- }
- }
- return getg()
-}
-
-// sigtrampgo is called from the signal handler function, sigtramp,
-// written in assembly code.
-// This is called by the signal handler, and the world may be stopped.
-//
-// It must be nosplit because getg() is still the G that was running
-// (if any) when the signal was delivered, but it's (usually) called
-// on the gsignal stack. Until this switches the G to gsignal, the
-// stack bounds check won't work.
-//
-//go:nosplit
-//go:nowritebarrierrec
-func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
- if sigfwdgo(sig, info, ctx) {
- return
- }
- c := &sigctxt{info, ctx}
- g := sigFetchG(c)
- setg(g)
- if g == nil {
- if sig == _SIGPROF {
- // Some platforms (Linux) have per-thread timers, which we use in
- // combination with the process-wide timer. Avoid double-counting.
- if validSIGPROF(nil, c) {
- sigprofNonGoPC(c.sigpc())
- }
- return
- }
- if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
- // This is probably a signal from preemptM sent
- // while executing Go code but received while
- // executing non-Go code.
- // We got past sigfwdgo, so we know that there is
- // no non-Go signal handler for sigPreempt.
- // The default behavior for sigPreempt is to ignore
- // the signal, so badsignal will be a no-op anyway.
- if GOOS == "darwin" || GOOS == "ios" {
- atomic.Xadd(&pendingPreemptSignals, -1)
- }
- return
- }
- c.fixsigcode(sig)
- badsignal(uintptr(sig), c)
- return
- }
-
- setg(g.m.gsignal)
-
- // If some non-Go code called sigaltstack, adjust.
- var gsignalStack gsignalStack
- setStack := adjustSignalStack(sig, g.m, &gsignalStack)
- if setStack {
- g.m.gsignal.stktopsp = getcallersp()
- }
-
- if g.stackguard0 == stackFork {
- signalDuringFork(sig)
- }
-
- c.fixsigcode(sig)
- sighandler(sig, info, ctx, g)
- setg(g)
- if setStack {
- restoreGsignalStack(&gsignalStack)
- }
-}
-
-// If the signal handler receives a SIGPROF signal on a non-Go thread,
-// it tries to collect a traceback into sigprofCallers.
-// sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
-var sigprofCallers cgoCallers
-var sigprofCallersUse uint32
-
-// sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
-// and the signal handler collected a stack trace in sigprofCallers.
-// When this is called, sigprofCallersUse will be non-zero.
-// g is nil, and what we can do is very limited.
-//
-// It is called from the signal handling functions written in assembly code that
-// are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
-// not verified that the SIGPROF delivery corresponds to the best available
-// profiling source for this thread.
-//
-//go:nosplit
-//go:nowritebarrierrec
-func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
- if prof.hz != 0 {
- c := &sigctxt{info, ctx}
- // Some platforms (Linux) have per-thread timers, which we use in
- // combination with the process-wide timer. Avoid double-counting.
- if validSIGPROF(nil, c) {
- n := 0
- for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
- n++
- }
- cpuprof.addNonGo(sigprofCallers[:n])
- }
- }
-
- atomic.Store(&sigprofCallersUse, 0)
-}
-
-// sigprofNonGoPC is called when a profiling signal arrived on a
-// non-Go thread and we have a single PC value, not a stack trace.
-// g is nil, and what we can do is very limited.
-//go:nosplit
-//go:nowritebarrierrec
-func sigprofNonGoPC(pc uintptr) {
- if prof.hz != 0 {
- stk := []uintptr{
- pc,
- abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
- }
- cpuprof.addNonGo(stk)
- }
-}
-
-// adjustSignalStack adjusts the current stack guard based on the
-// stack pointer that is actually in use while handling a signal.
-// We do this in case some non-Go code called sigaltstack.
-// This reports whether the stack was adjusted, and if so stores the old
-// signal stack in *gsigstack.
-//go:nosplit
-func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
- sp := uintptr(unsafe.Pointer(&sig))
- if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi {
- return false
- }
-
- var st stackt
- sigaltstack(nil, &st)
- stsp := uintptr(unsafe.Pointer(st.ss_sp))
- if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
- setGsignalStack(&st, gsigStack)
- return true
- }
-
- if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
- // The signal was delivered on the g0 stack.
- // This can happen when linked with C code
- // using the thread sanitizer, which collects
- // signals then delivers them itself by calling
- // the signal handler directly when C code,
- // including C code called via cgo, calls a
- // TSAN-intercepted function such as malloc.
- //
- // We check this condition last as g0.stack.lo
- // may be not very accurate (see mstart).
- st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
- setSignalstackSP(&st, mp.g0.stack.lo)
- setGsignalStack(&st, gsigStack)
- return true
- }
-
- // sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
- setg(nil)
- needm()
- if st.ss_flags&_SS_DISABLE != 0 {
- noSignalStack(sig)
- } else {
- sigNotOnStack(sig)
- }
- dropm()
- return false
-}
-
-// crashing is the number of m's we have waited for when implementing
-// GOTRACEBACK=crash when a signal is received.
-var crashing int32
-
-// testSigtrap and testSigusr1 are used by the runtime tests. If
-// non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
-// normal behavior on this signal is suppressed.
-var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
-var testSigusr1 func(gp *g) bool
-
-// sighandler is invoked when a signal occurs. The global g will be
-// set to a gsignal goroutine and we will be running on the alternate
-// signal stack. The parameter g will be the value of the global g
-// when the signal occurred. The sig, info, and ctxt parameters are
-// from the system signal handler: they are the parameters passed when
-// the SA is passed to the sigaction system call.
-//
-// The garbage collector may have stopped the world, so write barriers
-// are not allowed.
-//
-//go:nowritebarrierrec
-func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
- _g_ := getg()
- c := &sigctxt{info, ctxt}
-
- if sig == _SIGPROF {
- mp := _g_.m
- // Some platforms (Linux) have per-thread timers, which we use in
- // combination with the process-wide timer. Avoid double-counting.
- if validSIGPROF(mp, c) {
- sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp)
- }
- return
- }
-
- if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) {
- return
- }
-
- if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
- return
- }
-
- if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
- // sigPerThreadSyscall is the same signal used by glibc for
- // per-thread syscalls on Linux. We use it for the same purpose
- // in non-cgo binaries. Since this signal is not _SigNotify,
- // there is nothing more to do once we run the syscall.
- runPerThreadSyscall()
- return
- }
-
- if sig == sigPreempt && debug.asyncpreemptoff == 0 {
- // Might be a preemption signal.
- doSigPreempt(gp, c)
- // Even if this was definitely a preemption signal, it
- // may have been coalesced with another signal, so we
- // still let it through to the application.
- }
-
- flags := int32(_SigThrow)
- if sig < uint32(len(sigtable)) {
- flags = sigtable[sig].flags
- }
- if c.sigcode() != _SI_USER && flags&_SigPanic != 0 && gp.throwsplit {
- // We can't safely sigpanic because it may grow the
- // stack. Abort in the signal handler instead.
- flags = _SigThrow
- }
- if isAbortPC(c.sigpc()) {
- // On many architectures, the abort function just
- // causes a memory fault. Don't turn that into a panic.
- flags = _SigThrow
- }
- if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
- // The signal is going to cause a panic.
- // Arrange the stack so that it looks like the point
- // where the signal occurred made a call to the
- // function sigpanic. Then set the PC to sigpanic.
-
- // Have to pass arguments out of band since
- // augmenting the stack frame would break
- // the unwinding code.
- gp.sig = sig
- gp.sigcode0 = uintptr(c.sigcode())
- gp.sigcode1 = uintptr(c.fault())
- gp.sigpc = c.sigpc()
-
- c.preparePanic(sig, gp)
- return
- }
-
- if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
- if sigsend(sig) {
- return
- }
- }
-
- if c.sigcode() == _SI_USER && signal_ignored(sig) {
- return
- }
-
- if flags&_SigKill != 0 {
- dieFromSignal(sig)
- }
-
- // _SigThrow means that we should exit now.
- // If we get here with _SigPanic, it means that the signal
- // was sent to us by a program (c.sigcode() == _SI_USER);
- // in that case, if we didn't handle it in sigsend, we exit now.
- if flags&(_SigThrow|_SigPanic) == 0 {
- return
- }
-
- _g_.m.throwing = 1
- _g_.m.caughtsig.set(gp)
-
- if crashing == 0 {
- startpanic_m()
- }
-
- if sig < uint32(len(sigtable)) {
- print(sigtable[sig].name, "\n")
- } else {
- print("Signal ", sig, "\n")
- }
-
- print("PC=", hex(c.sigpc()), " m=", _g_.m.id, " sigcode=", c.sigcode(), "\n")
- if _g_.m.incgo && gp == _g_.m.g0 && _g_.m.curg != nil {
- print("signal arrived during cgo execution\n")
- // Switch to curg so that we get a traceback of the Go code
- // leading up to the cgocall, which switched from curg to g0.
- gp = _g_.m.curg
- }
- if sig == _SIGILL || sig == _SIGFPE {
- // It would be nice to know how long the instruction is.
- // Unfortunately, that's complicated to do in general (mostly for x86
- // and s930x, but other archs have non-standard instruction lengths also).
- // Opt to print 16 bytes, which covers most instructions.
- const maxN = 16
- n := uintptr(maxN)
- // We have to be careful, though. If we're near the end of
- // a page and the following page isn't mapped, we could
- // segfault. So make sure we don't straddle a page (even though
- // that could lead to printing an incomplete instruction).
- // We're assuming here we can read at least the page containing the PC.
- // I suppose it is possible that the page is mapped executable but not readable?
- pc := c.sigpc()
- if n > physPageSize-pc%physPageSize {
- n = physPageSize - pc%physPageSize
- }
- print("instruction bytes:")
- b := (*[maxN]byte)(unsafe.Pointer(pc))
- for i := uintptr(0); i < n; i++ {
- print(" ", hex(b[i]))
- }
- println()
- }
- print("\n")
-
- level, _, docrash := gotraceback()
- if level > 0 {
- goroutineheader(gp)
- tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
- if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
- // tracebackothers on original m skipped this one; trace it now.
- goroutineheader(_g_.m.curg)
- traceback(^uintptr(0), ^uintptr(0), 0, _g_.m.curg)
- } else if crashing == 0 {
- tracebackothers(gp)
- print("\n")
- }
- dumpregs(c)
- }
-
- if docrash {
- crashing++
- if crashing < mcount()-int32(extraMCount) {
- // There are other m's that need to dump their stacks.
- // Relay SIGQUIT to the next m by sending it to the current process.
- // All m's that have already received SIGQUIT have signal masks blocking
- // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
- // When the last m receives the SIGQUIT, it will fall through to the call to
- // crash below. Just in case the relaying gets botched, each m involved in
- // the relay sleeps for 5 seconds and then does the crash/exit itself.
- // In expected operation, the last m has received the SIGQUIT and run
- // crash/exit and the process is gone, all long before any of the
- // 5-second sleeps have finished.
- print("\n-----\n\n")
- raiseproc(_SIGQUIT)
- usleep(5 * 1000 * 1000)
- }
- crash()
- }
-
- printDebugLog()
-
- exit(2)
-}
-
-// sigpanic turns a synchronous signal into a run-time panic.
-// If the signal handler sees a synchronous panic, it arranges the
-// stack to look like the function where the signal occurred called
-// sigpanic, sets the signal's PC value to sigpanic, and returns from
-// the signal handler. The effect is that the program will act as
-// though the function that got the signal simply called sigpanic
-// instead.
-//
-// This must NOT be nosplit because the linker doesn't know where
-// sigpanic calls can be injected.
-//
-// The signal handler must not inject a call to sigpanic if
-// getg().throwsplit, since sigpanic may need to grow the stack.
-//
-// This is exported via linkname to assembly in runtime/cgo.
-//go:linkname sigpanic
-func sigpanic() {
- g := getg()
- if !canpanic(g) {
- throw("unexpected signal during runtime execution")
- }
-
- switch g.sig {
- case _SIGBUS:
- if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 {
- panicmem()
- }
- // Support runtime/debug.SetPanicOnFault.
- if g.paniconfault {
- panicmemAddr(g.sigcode1)
- }
- print("unexpected fault address ", hex(g.sigcode1), "\n")
- throw("fault")
- case _SIGSEGV:
- if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 {
- panicmem()
- }
- // Support runtime/debug.SetPanicOnFault.
- if g.paniconfault {
- panicmemAddr(g.sigcode1)
- }
- print("unexpected fault address ", hex(g.sigcode1), "\n")
- throw("fault")
- case _SIGFPE:
- switch g.sigcode0 {
- case _FPE_INTDIV:
- panicdivide()
- case _FPE_INTOVF:
- panicoverflow()
- }
- panicfloat()
- }
-
- if g.sig >= uint32(len(sigtable)) {
- // can't happen: we looked up g.sig in sigtable to decide to call sigpanic
- throw("unexpected signal value")
- }
- panic(errorString(sigtable[g.sig].name))
-}
-
-// dieFromSignal kills the program with a signal.
-// This provides the expected exit status for the shell.
-// This is only called with fatal signals expected to kill the process.
-//go:nosplit
-//go:nowritebarrierrec
-func dieFromSignal(sig uint32) {
- unblocksig(sig)
- // Mark the signal as unhandled to ensure it is forwarded.
- atomic.Store(&handlingSig[sig], 0)
- raise(sig)
-
- // That should have killed us. On some systems, though, raise
- // sends the signal to the whole process rather than to just
- // the current thread, which means that the signal may not yet
- // have been delivered. Give other threads a chance to run and
- // pick up the signal.
- osyield()
- osyield()
- osyield()
-
- // If that didn't work, try _SIG_DFL.
- setsig(sig, _SIG_DFL)
- raise(sig)
-
- osyield()
- osyield()
- osyield()
-
- // If we are still somehow running, just exit with the wrong status.
- exit(2)
-}
-
-// raisebadsignal is called when a signal is received on a non-Go
-// thread, and the Go program does not want to handle it (that is, the
-// program has not called os/signal.Notify for the signal).
-func raisebadsignal(sig uint32, c *sigctxt) {
- if sig == _SIGPROF {
- // Ignore profiling signals that arrive on non-Go threads.
- return
- }
-
- var handler uintptr
- if sig >= _NSIG {
- handler = _SIG_DFL
- } else {
- handler = atomic.Loaduintptr(&fwdSig[sig])
- }
-
- // Reset the signal handler and raise the signal.
- // We are currently running inside a signal handler, so the
- // signal is blocked. We need to unblock it before raising the
- // signal, or the signal we raise will be ignored until we return
- // from the signal handler. We know that the signal was unblocked
- // before entering the handler, or else we would not have received
- // it. That means that we don't have to worry about blocking it
- // again.
- unblocksig(sig)
- setsig(sig, handler)
-
- // If we're linked into a non-Go program we want to try to
- // avoid modifying the original context in which the signal
- // was raised. If the handler is the default, we know it
- // is non-recoverable, so we don't have to worry about
- // re-installing sighandler. At this point we can just
- // return and the signal will be re-raised and caught by
- // the default handler with the correct context.
- //
- // On FreeBSD, the libthr sigaction code prevents
- // this from working so we fall through to raise.
- if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && c.sigcode() != _SI_USER {
- return
- }
-
- raise(sig)
-
- // Give the signal a chance to be delivered.
- // In almost all real cases the program is about to crash,
- // so sleeping here is not a waste of time.
- usleep(1000)
-
- // If the signal didn't cause the program to exit, restore the
- // Go signal handler and carry on.
- //
- // We may receive another instance of the signal before we
- // restore the Go handler, but that is not so bad: we know
- // that the Go program has been ignoring the signal.
- setsig(sig, abi.FuncPCABIInternal(sighandler))
-}
-
-//go:nosplit
-func crash() {
- // OS X core dumps are linear dumps of the mapped memory,
- // from the first virtual byte to the last, with zeros in the gaps.
- // Because of the way we arrange the address space on 64-bit systems,
- // this means the OS X core file will be >128 GB and even on a zippy
- // workstation can take OS X well over an hour to write (uninterruptible).
- // Save users from making that mistake.
- if GOOS == "darwin" && GOARCH == "amd64" {
- return
- }
-
- dieFromSignal(_SIGABRT)
-}
-
-// ensureSigM starts one global, sleeping thread to make sure at least one thread
-// is available to catch signals enabled for os/signal.
-func ensureSigM() {
- if maskUpdatedChan != nil {
- return
- }
- maskUpdatedChan = make(chan struct{})
- disableSigChan = make(chan uint32)
- enableSigChan = make(chan uint32)
- go func() {
- // Signal masks are per-thread, so make sure this goroutine stays on one
- // thread.
- LockOSThread()
- defer UnlockOSThread()
- // The sigBlocked mask contains the signals not active for os/signal,
- // initially all signals except the essential. When signal.Notify()/Stop is called,
- // sigenable/sigdisable in turn notify this thread to update its signal
- // mask accordingly.
- sigBlocked := sigset_all
- for i := range sigtable {
- if !blockableSig(uint32(i)) {
- sigdelset(&sigBlocked, i)
- }
- }
- sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
- for {
- select {
- case sig := <-enableSigChan:
- if sig > 0 {
- sigdelset(&sigBlocked, int(sig))
- }
- case sig := <-disableSigChan:
- if sig > 0 && blockableSig(sig) {
- sigaddset(&sigBlocked, int(sig))
- }
- }
- sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
- maskUpdatedChan <- struct{}{}
- }
- }()
-}
-
-// This is called when we receive a signal when there is no signal stack.
-// This can only happen if non-Go code calls sigaltstack to disable the
-// signal stack.
-func noSignalStack(sig uint32) {
- println("signal", sig, "received on thread with no signal stack")
- throw("non-Go code disabled sigaltstack")
-}
-
-// This is called if we receive a signal when there is a signal stack
-// but we are not on it. This can only happen if non-Go code called
-// sigaction without setting the SS_ONSTACK flag.
-func sigNotOnStack(sig uint32) {
- println("signal", sig, "received but handler not on signal stack")
- throw("non-Go code set up signal handler without SA_ONSTACK flag")
-}
-
-// signalDuringFork is called if we receive a signal while doing a fork.
-// We do not want signals at that time, as a signal sent to the process
-// group may be delivered to the child process, causing confusion.
-// This should never be called, because we block signals across the fork;
-// this function is just a safety check. See issue 18600 for background.
-func signalDuringFork(sig uint32) {
- println("signal", sig, "received during fork")
- throw("signal received during fork")
-}
-
-var badginsignalMsg = "fatal: bad g in signal handler\n"
-
-// This runs on a foreign stack, without an m or a g. No stack split.
-//go:nosplit
-//go:norace
-//go:nowritebarrierrec
-func badsignal(sig uintptr, c *sigctxt) {
- if !iscgo && !cgoHasExtraM {
- // There is no extra M. needm will not be able to grab
- // an M. Instead of hanging, just crash.
- // Cannot call split-stack function as there is no G.
- s := stringStructOf(&badginsignalMsg)
- write(2, s.str, int32(s.len))
- exit(2)
- *(*uintptr)(unsafe.Pointer(uintptr(123))) = 2
- }
- needm()
- if !sigsend(uint32(sig)) {
- // A foreign thread received the signal sig, and the
- // Go code does not want to handle it.
- raisebadsignal(uint32(sig), c)
- }
- dropm()
-}
-
-//go:noescape
-func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
-
-// Determines if the signal should be handled by Go and if not, forwards the
-// signal to the handler that was installed before Go's. Returns whether the
-// signal was forwarded.
-// This is called by the signal handler, and the world may be stopped.
-//go:nosplit
-//go:nowritebarrierrec
-func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
- if sig >= uint32(len(sigtable)) {
- return false
- }
- fwdFn := atomic.Loaduintptr(&fwdSig[sig])
- flags := sigtable[sig].flags
-
- // If we aren't handling the signal, forward it.
- if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK {
- // If the signal is ignored, doing nothing is the same as forwarding.
- if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) {
- return true
- }
- // We are not handling the signal and there is no other handler to forward to.
- // Crash with the default behavior.
- if fwdFn == _SIG_DFL {
- setsig(sig, _SIG_DFL)
- dieFromSignal(sig)
- return false
- }
-
- sigfwd(fwdFn, sig, info, ctx)
- return true
- }
-
- // This function and its caller sigtrampgo assumes SIGPIPE is delivered on the
- // originating thread. This property does not hold on macOS (golang.org/issue/33384),
- // so we have no choice but to ignore SIGPIPE.
- if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE {
- return true
- }
-
- // If there is no handler to forward to, no need to forward.
- if fwdFn == _SIG_DFL {
- return false
- }
-
- c := &sigctxt{info, ctx}
- // Only forward synchronous signals and SIGPIPE.
- // Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
- // is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
- // or pipe.
- if (c.sigcode() == _SI_USER || flags&_SigPanic == 0) && sig != _SIGPIPE {
- return false
- }
- // Determine if the signal occurred inside Go code. We test that:
- // (1) we weren't in VDSO page,
- // (2) we were in a goroutine (i.e., m.curg != nil), and
- // (3) we weren't in CGO.
- g := sigFetchG(c)
- if g != nil && g.m != nil && g.m.curg != nil && !g.m.incgo {
- return false
- }
-
- // Signal not handled by Go, forward it.
- if fwdFn != _SIG_IGN {
- sigfwd(fwdFn, sig, info, ctx)
- }
-
- return true
-}
-
-// sigsave saves the current thread's signal mask into *p.
-// This is used to preserve the non-Go signal mask when a non-Go
-// thread calls a Go function.
-// This is nosplit and nowritebarrierrec because it is called by needm
-// which may be called on a non-Go thread with no g available.
-//go:nosplit
-//go:nowritebarrierrec
-func sigsave(p *sigset) {
- sigprocmask(_SIG_SETMASK, nil, p)
-}
-
-// msigrestore sets the current thread's signal mask to sigmask.
-// This is used to restore the non-Go signal mask when a non-Go thread
-// calls a Go function.
-// This is nosplit and nowritebarrierrec because it is called by dropm
-// after g has been cleared.
-//go:nosplit
-//go:nowritebarrierrec
-func msigrestore(sigmask sigset) {
- sigprocmask(_SIG_SETMASK, &sigmask, nil)
-}
-
-// sigsetAllExiting is used by sigblock(true) when a thread is
-// exiting. sigset_all is defined in OS specific code, and per GOOS
-// behavior may override this default for sigsetAllExiting: see
-// osinit().
-var sigsetAllExiting = sigset_all
-
-// sigblock blocks signals in the current thread's signal mask.
-// This is used to block signals while setting up and tearing down g
-// when a non-Go thread calls a Go function. When a thread is exiting
-// we use the sigsetAllExiting value, otherwise the OS specific
-// definition of sigset_all is used.
-// This is nosplit and nowritebarrierrec because it is called by needm
-// which may be called on a non-Go thread with no g available.
-//go:nosplit
-//go:nowritebarrierrec
-func sigblock(exiting bool) {
- if exiting {
- sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
- return
- }
- sigprocmask(_SIG_SETMASK, &sigset_all, nil)
-}
-
-// unblocksig removes sig from the current thread's signal mask.
-// This is nosplit and nowritebarrierrec because it is called from
-// dieFromSignal, which can be called by sigfwdgo while running in the
-// signal handler, on the signal stack, with no g available.
-//go:nosplit
-//go:nowritebarrierrec
-func unblocksig(sig uint32) {
- var set sigset
- sigaddset(&set, int(sig))
- sigprocmask(_SIG_UNBLOCK, &set, nil)
-}
-
-// minitSignals is called when initializing a new m to set the
-// thread's alternate signal stack and signal mask.
-func minitSignals() {
- minitSignalStack()
- minitSignalMask()
-}
-
-// minitSignalStack is called when initializing a new m to set the
-// alternate signal stack. If the alternate signal stack is not set
-// for the thread (the normal case) then set the alternate signal
-// stack to the gsignal stack. If the alternate signal stack is set
-// for the thread (the case when a non-Go thread sets the alternate
-// signal stack and then calls a Go function) then set the gsignal
-// stack to the alternate signal stack. We also set the alternate
-// signal stack to the gsignal stack if cgo is not used (regardless
-// of whether it is already set). Record which choice was made in
-// newSigstack, so that it can be undone in unminit.
-func minitSignalStack() {
- _g_ := getg()
- var st stackt
- sigaltstack(nil, &st)
- if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
- signalstack(&_g_.m.gsignal.stack)
- _g_.m.newSigstack = true
- } else {
- setGsignalStack(&st, &_g_.m.goSigStack)
- _g_.m.newSigstack = false
- }
-}
-
-// minitSignalMask is called when initializing a new m to set the
-// thread's signal mask. When this is called all signals have been
-// blocked for the thread. This starts with m.sigmask, which was set
-// either from initSigmask for a newly created thread or by calling
-// sigsave if this is a non-Go thread calling a Go function. It
-// removes all essential signals from the mask, thus causing those
-// signals to not be blocked. Then it sets the thread's signal mask.
-// After this is called the thread can receive signals.
-func minitSignalMask() {
- nmask := getg().m.sigmask
- for i := range sigtable {
- if !blockableSig(uint32(i)) {
- sigdelset(&nmask, i)
- }
- }
- sigprocmask(_SIG_SETMASK, &nmask, nil)
-}
-
-// unminitSignals is called from dropm, via unminit, to undo the
-// effect of calling minit on a non-Go thread.
-//go:nosplit
-func unminitSignals() {
- if getg().m.newSigstack {
- st := stackt{ss_flags: _SS_DISABLE}
- sigaltstack(&st, nil)
- } else {
- // We got the signal stack from someone else. Restore
- // the Go-allocated stack in case this M gets reused
- // for another thread (e.g., it's an extram). Also, on
- // Android, libc allocates a signal stack for all
- // threads, so it's important to restore the Go stack
- // even on Go-created threads so we can free it.
- restoreGsignalStack(&getg().m.goSigStack)
- }
-}
-
-// blockableSig reports whether sig may be blocked by the signal mask.
-// We never want to block the signals marked _SigUnblock;
-// these are the synchronous signals that turn into a Go panic.
-// We never want to block the preemption signal if it is being used.
-// In a Go program--not a c-archive/c-shared--we never want to block
-// the signals marked _SigKill or _SigThrow, as otherwise it's possible
-// for all running threads to block them and delay their delivery until
-// we start a new thread. When linked into a C program we let the C code
-// decide on the disposition of those signals.
-func blockableSig(sig uint32) bool {
- flags := sigtable[sig].flags
- if flags&_SigUnblock != 0 {
- return false
- }
- if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
- return false
- }
- if isarchive || islibrary {
- return true
- }
- return flags&(_SigKill|_SigThrow) == 0
-}
-
-// gsignalStack saves the fields of the gsignal stack changed by
-// setGsignalStack.
-type gsignalStack struct {
- stack stack
- stackguard0 uintptr
- stackguard1 uintptr
- stktopsp uintptr
-}
-
-// setGsignalStack sets the gsignal stack of the current m to an
-// alternate signal stack returned from the sigaltstack system call.
-// It saves the old values in *old for use by restoreGsignalStack.
-// This is used when handling a signal if non-Go code has set the
-// alternate signal stack.
-//go:nosplit
-//go:nowritebarrierrec
-func setGsignalStack(st *stackt, old *gsignalStack) {
- g := getg()
- if old != nil {
- old.stack = g.m.gsignal.stack
- old.stackguard0 = g.m.gsignal.stackguard0
- old.stackguard1 = g.m.gsignal.stackguard1
- old.stktopsp = g.m.gsignal.stktopsp
- }
- stsp := uintptr(unsafe.Pointer(st.ss_sp))
- g.m.gsignal.stack.lo = stsp
- g.m.gsignal.stack.hi = stsp + st.ss_size
- g.m.gsignal.stackguard0 = stsp + _StackGuard
- g.m.gsignal.stackguard1 = stsp + _StackGuard
-}
-
-// restoreGsignalStack restores the gsignal stack to the value it had
-// before entering the signal handler.
-//go:nosplit
-//go:nowritebarrierrec
-func restoreGsignalStack(st *gsignalStack) {
- gp := getg().m.gsignal
- gp.stack = st.stack
- gp.stackguard0 = st.stackguard0
- gp.stackguard1 = st.stackguard1
- gp.stktopsp = st.stktopsp
-}
-
-// signalstack sets the current thread's alternate signal stack to s.
-//go:nosplit
-func signalstack(s *stack) {
- st := stackt{ss_size: s.hi - s.lo}
- setSignalstackSP(&st, s.lo)
- sigaltstack(&st, nil)
-}
-
-// setsigsegv is used on darwin/arm64 to fake a segmentation fault.
-//
-// This is exported via linkname to assembly in runtime/cgo.
-//
-//go:nosplit
-//go:linkname setsigsegv
-func setsigsegv(pc uintptr) {
- g := getg()
- g.sig = _SIGSEGV
- g.sigpc = pc
- g.sigcode0 = _SEGV_MAPERR
- g.sigcode1 = 0 // TODO: emulate si_addr
-}
diff --git a/contrib/go/_std_1.18/src/runtime/sigqueue.go b/contrib/go/_std_1.18/src/runtime/sigqueue.go
deleted file mode 100644
index fdf99d94a2..0000000000
--- a/contrib/go/_std_1.18/src/runtime/sigqueue.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements runtime support for signal handling.
-//
-// Most synchronization primitives are not available from
-// the signal handler (it cannot block, allocate memory, or use locks)
-// so the handler communicates with a processing goroutine
-// via struct sig, below.
-//
-// sigsend is called by the signal handler to queue a new signal.
-// signal_recv is called by the Go program to receive a newly queued signal.
-//
-// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in three states:
-// * sigReceiving means that signal_recv is blocked on sig.Note and there are
-// no new pending signals.
-// * sigSending means that sig.mask *may* contain new pending signals,
-// signal_recv can't be blocked in this state.
-// * sigIdle means that there are no new pending signals and signal_recv is not
-// blocked.
-//
-// Transitions between states are done atomically with CAS.
-//
-// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
-// If several sigsends and signal_recv execute concurrently, it can lead to
-// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
-// nor deadlocks.
-
-//go:build !plan9
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- _ "unsafe" // for go:linkname
-)
-
-// sig handles communication between the signal handler and os/signal.
-// Other than the inuse and recv fields, the fields are accessed atomically.
-//
-// The wanted and ignored fields are only written by one goroutine at
-// a time; access is controlled by the handlers Mutex in os/signal.
-// The fields are only read by that one goroutine and by the signal handler.
-// We access them atomically to minimize the race between setting them
-// in the goroutine calling os/signal and the signal handler,
-// which may be running in a different thread. That race is unavoidable,
-// as there is no connection between handling a signal and receiving one,
-// but atomic instructions should minimize it.
-var sig struct {
- note note
- mask [(_NSIG + 31) / 32]uint32
- wanted [(_NSIG + 31) / 32]uint32
- ignored [(_NSIG + 31) / 32]uint32
- recv [(_NSIG + 31) / 32]uint32
- state uint32
- delivering uint32
- inuse bool
-}
-
-const (
- sigIdle = iota
- sigReceiving
- sigSending
-)
-
-// sigsend delivers a signal from sighandler to the internal signal delivery queue.
-// It reports whether the signal was sent. If not, the caller typically crashes the program.
-// It runs from the signal handler, so it's limited in what it can do.
-func sigsend(s uint32) bool {
- bit := uint32(1) << uint(s&31)
- if s >= uint32(32*len(sig.wanted)) {
- return false
- }
-
- atomic.Xadd(&sig.delivering, 1)
- // We are running in the signal handler; defer is not available.
-
- if w := atomic.Load(&sig.wanted[s/32]); w&bit == 0 {
- atomic.Xadd(&sig.delivering, -1)
- return false
- }
-
- // Add signal to outgoing queue.
- for {
- mask := sig.mask[s/32]
- if mask&bit != 0 {
- atomic.Xadd(&sig.delivering, -1)
- return true // signal already in queue
- }
- if atomic.Cas(&sig.mask[s/32], mask, mask|bit) {
- break
- }
- }
-
- // Notify receiver that queue has new bit.
-Send:
- for {
- switch atomic.Load(&sig.state) {
- default:
- throw("sigsend: inconsistent state")
- case sigIdle:
- if atomic.Cas(&sig.state, sigIdle, sigSending) {
- break Send
- }
- case sigSending:
- // notification already pending
- break Send
- case sigReceiving:
- if atomic.Cas(&sig.state, sigReceiving, sigIdle) {
- if GOOS == "darwin" || GOOS == "ios" {
- sigNoteWakeup(&sig.note)
- break Send
- }
- notewakeup(&sig.note)
- break Send
- }
- }
- }
-
- atomic.Xadd(&sig.delivering, -1)
- return true
-}
-
-// Called to receive the next queued signal.
-// Must only be called from a single goroutine at a time.
-//go:linkname signal_recv os/signal.signal_recv
-func signal_recv() uint32 {
- for {
- // Serve any signals from local copy.
- for i := uint32(0); i < _NSIG; i++ {
- if sig.recv[i/32]&(1<<(i&31)) != 0 {
- sig.recv[i/32] &^= 1 << (i & 31)
- return i
- }
- }
-
- // Wait for updates to be available from signal sender.
- Receive:
- for {
- switch atomic.Load(&sig.state) {
- default:
- throw("signal_recv: inconsistent state")
- case sigIdle:
- if atomic.Cas(&sig.state, sigIdle, sigReceiving) {
- if GOOS == "darwin" || GOOS == "ios" {
- sigNoteSleep(&sig.note)
- break Receive
- }
- notetsleepg(&sig.note, -1)
- noteclear(&sig.note)
- break Receive
- }
- case sigSending:
- if atomic.Cas(&sig.state, sigSending, sigIdle) {
- break Receive
- }
- }
- }
-
- // Incorporate updates from sender into local copy.
- for i := range sig.mask {
- sig.recv[i] = atomic.Xchg(&sig.mask[i], 0)
- }
- }
-}
-
-// signalWaitUntilIdle waits until the signal delivery mechanism is idle.
-// This is used to ensure that we do not drop a signal notification due
-// to a race between disabling a signal and receiving a signal.
-// This assumes that signal delivery has already been disabled for
-// the signal(s) in question, and here we are just waiting to make sure
-// that all the signals have been delivered to the user channels
-// by the os/signal package.
-//go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle
-func signalWaitUntilIdle() {
- // Although the signals we care about have been removed from
- // sig.wanted, it is possible that another thread has received
- // a signal, has read from sig.wanted, is now updating sig.mask,
- // and has not yet woken up the processor thread. We need to wait
- // until all current signal deliveries have completed.
- for atomic.Load(&sig.delivering) != 0 {
- Gosched()
- }
-
- // Although WaitUntilIdle seems like the right name for this
- // function, the state we are looking for is sigReceiving, not
- // sigIdle. The sigIdle state is really more like sigProcessing.
- for atomic.Load(&sig.state) != sigReceiving {
- Gosched()
- }
-}
-
-// Must only be called from a single goroutine at a time.
-//go:linkname signal_enable os/signal.signal_enable
-func signal_enable(s uint32) {
- if !sig.inuse {
- // This is the first call to signal_enable. Initialize.
- sig.inuse = true // enable reception of signals; cannot disable
- if GOOS == "darwin" || GOOS == "ios" {
- sigNoteSetup(&sig.note)
- } else {
- noteclear(&sig.note)
- }
- }
-
- if s >= uint32(len(sig.wanted)*32) {
- return
- }
-
- w := sig.wanted[s/32]
- w |= 1 << (s & 31)
- atomic.Store(&sig.wanted[s/32], w)
-
- i := sig.ignored[s/32]
- i &^= 1 << (s & 31)
- atomic.Store(&sig.ignored[s/32], i)
-
- sigenable(s)
-}
-
-// Must only be called from a single goroutine at a time.
-//go:linkname signal_disable os/signal.signal_disable
-func signal_disable(s uint32) {
- if s >= uint32(len(sig.wanted)*32) {
- return
- }
- sigdisable(s)
-
- w := sig.wanted[s/32]
- w &^= 1 << (s & 31)
- atomic.Store(&sig.wanted[s/32], w)
-}
-
-// Must only be called from a single goroutine at a time.
-//go:linkname signal_ignore os/signal.signal_ignore
-func signal_ignore(s uint32) {
- if s >= uint32(len(sig.wanted)*32) {
- return
- }
- sigignore(s)
-
- w := sig.wanted[s/32]
- w &^= 1 << (s & 31)
- atomic.Store(&sig.wanted[s/32], w)
-
- i := sig.ignored[s/32]
- i |= 1 << (s & 31)
- atomic.Store(&sig.ignored[s/32], i)
-}
-
-// sigInitIgnored marks the signal as already ignored. This is called at
-// program start by initsig. In a shared library initsig is called by
-// libpreinit, so the runtime may not be initialized yet.
-//go:nosplit
-func sigInitIgnored(s uint32) {
- i := sig.ignored[s/32]
- i |= 1 << (s & 31)
- atomic.Store(&sig.ignored[s/32], i)
-}
-
-// Checked by signal handlers.
-//go:linkname signal_ignored os/signal.signal_ignored
-func signal_ignored(s uint32) bool {
- i := atomic.Load(&sig.ignored[s/32])
- return i&(1<<(s&31)) != 0
-}
diff --git a/contrib/go/_std_1.18/src/runtime/slice.go b/contrib/go/_std_1.18/src/runtime/slice.go
deleted file mode 100644
index e0aeba604f..0000000000
--- a/contrib/go/_std_1.18/src/runtime/slice.go
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "runtime/internal/math"
- "runtime/internal/sys"
- "unsafe"
-)
-
-type slice struct {
- array unsafe.Pointer
- len int
- cap int
-}
-
-// A notInHeapSlice is a slice backed by go:notinheap memory.
-type notInHeapSlice struct {
- array *notInHeap
- len int
- cap int
-}
-
-func panicmakeslicelen() {
- panic(errorString("makeslice: len out of range"))
-}
-
-func panicmakeslicecap() {
- panic(errorString("makeslice: cap out of range"))
-}
-
-// makeslicecopy allocates a slice of "tolen" elements of type "et",
-// then copies "fromlen" elements of type "et" into that new allocation from "from".
-func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
- var tomem, copymem uintptr
- if uintptr(tolen) > uintptr(fromlen) {
- var overflow bool
- tomem, overflow = math.MulUintptr(et.size, uintptr(tolen))
- if overflow || tomem > maxAlloc || tolen < 0 {
- panicmakeslicelen()
- }
- copymem = et.size * uintptr(fromlen)
- } else {
- // fromlen is a known good length providing and equal or greater than tolen,
- // thereby making tolen a good slice length too as from and to slices have the
- // same element width.
- tomem = et.size * uintptr(tolen)
- copymem = tomem
- }
-
- var to unsafe.Pointer
- if et.ptrdata == 0 {
- to = mallocgc(tomem, nil, false)
- if copymem < tomem {
- memclrNoHeapPointers(add(to, copymem), tomem-copymem)
- }
- } else {
- // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
- to = mallocgc(tomem, et, true)
- if copymem > 0 && writeBarrier.enabled {
- // Only shade the pointers in old.array since we know the destination slice to
- // only contains nil pointers because it has been cleared during alloc.
- bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem)
- }
- }
-
- if raceenabled {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(makeslicecopy)
- racereadrangepc(from, copymem, callerpc, pc)
- }
- if msanenabled {
- msanread(from, copymem)
- }
- if asanenabled {
- asanread(from, copymem)
- }
-
- memmove(to, from, copymem)
-
- return to
-}
-
-func makeslice(et *_type, len, cap int) unsafe.Pointer {
- mem, overflow := math.MulUintptr(et.size, uintptr(cap))
- if overflow || mem > maxAlloc || len < 0 || len > cap {
- // NOTE: Produce a 'len out of range' error instead of a
- // 'cap out of range' error when someone does make([]T, bignumber).
- // 'cap out of range' is true too, but since the cap is only being
- // supplied implicitly, saying len is clearer.
- // See golang.org/issue/4085.
- mem, overflow := math.MulUintptr(et.size, uintptr(len))
- if overflow || mem > maxAlloc || len < 0 {
- panicmakeslicelen()
- }
- panicmakeslicecap()
- }
-
- return mallocgc(mem, et, true)
-}
-
-func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
- len := int(len64)
- if int64(len) != len64 {
- panicmakeslicelen()
- }
-
- cap := int(cap64)
- if int64(cap) != cap64 {
- panicmakeslicecap()
- }
-
- return makeslice(et, len, cap)
-}
-
-func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
- if len < 0 {
- panicunsafeslicelen()
- }
-
- mem, overflow := math.MulUintptr(et.size, uintptr(len))
- if overflow || mem > -uintptr(ptr) {
- if ptr == nil {
- panic(errorString("unsafe.Slice: ptr is nil and len is not zero"))
- }
- panicunsafeslicelen()
- }
-}
-
-func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64) {
- len := int(len64)
- if int64(len) != len64 {
- panicunsafeslicelen()
- }
- unsafeslice(et, ptr, len)
-}
-
-func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64) {
- unsafeslice64(et, ptr, len64)
-
- // Check that underlying array doesn't straddle multiple heap objects.
- // unsafeslice64 has already checked for overflow.
- if checkptrStraddles(ptr, uintptr(len64)*et.size) {
- throw("checkptr: unsafe.Slice result straddles multiple allocations")
- }
-}
-
-func panicunsafeslicelen() {
- panic(errorString("unsafe.Slice: len out of range"))
-}
-
-// growslice handles slice growth during append.
-// It is passed the slice element type, the old slice, and the desired new minimum capacity,
-// and it returns a new slice with at least that capacity, with the old data
-// copied into it.
-// The new slice's length is set to the old slice's length,
-// NOT to the new requested capacity.
-// This is for codegen convenience. The old slice's length is used immediately
-// to calculate where to write new values during an append.
-// TODO: When the old backend is gone, reconsider this decision.
-// The SSA backend might prefer the new length or to return only ptr/cap and save stack space.
-func growslice(et *_type, old slice, cap int) slice {
- if raceenabled {
- callerpc := getcallerpc()
- racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
- }
- if msanenabled {
- msanread(old.array, uintptr(old.len*int(et.size)))
- }
- if asanenabled {
- asanread(old.array, uintptr(old.len*int(et.size)))
- }
-
- if cap < old.cap {
- panic(errorString("growslice: cap out of range"))
- }
-
- if et.size == 0 {
- // append should not create a slice with nil pointer but non-zero len.
- // We assume that append doesn't need to preserve old.array in this case.
- return slice{unsafe.Pointer(&zerobase), old.len, cap}
- }
-
- newcap := old.cap
- doublecap := newcap + newcap
- if cap > doublecap {
- newcap = cap
- } else {
- const threshold = 256
- if old.cap < threshold {
- newcap = doublecap
- } else {
- // Check 0 < newcap to detect overflow
- // and prevent an infinite loop.
- for 0 < newcap && newcap < cap {
- // Transition from growing 2x for small slices
- // to growing 1.25x for large slices. This formula
- // gives a smooth-ish transition between the two.
- newcap += (newcap + 3*threshold) / 4
- }
- // Set newcap to the requested cap when
- // the newcap calculation overflowed.
- if newcap <= 0 {
- newcap = cap
- }
- }
- }
-
- var overflow bool
- var lenmem, newlenmem, capmem uintptr
- // Specialize for common values of et.size.
- // For 1 we don't need any division/multiplication.
- // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
- // For powers of 2, use a variable shift.
- switch {
- case et.size == 1:
- lenmem = uintptr(old.len)
- newlenmem = uintptr(cap)
- capmem = roundupsize(uintptr(newcap))
- overflow = uintptr(newcap) > maxAlloc
- newcap = int(capmem)
- case et.size == goarch.PtrSize:
- lenmem = uintptr(old.len) * goarch.PtrSize
- newlenmem = uintptr(cap) * goarch.PtrSize
- capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
- overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
- newcap = int(capmem / goarch.PtrSize)
- case isPowerOfTwo(et.size):
- var shift uintptr
- if goarch.PtrSize == 8 {
- // Mask shift for better code generation.
- shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
- } else {
- shift = uintptr(sys.Ctz32(uint32(et.size))) & 31
- }
- lenmem = uintptr(old.len) << shift
- newlenmem = uintptr(cap) << shift
- capmem = roundupsize(uintptr(newcap) << shift)
- overflow = uintptr(newcap) > (maxAlloc >> shift)
- newcap = int(capmem >> shift)
- default:
- lenmem = uintptr(old.len) * et.size
- newlenmem = uintptr(cap) * et.size
- capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
- capmem = roundupsize(capmem)
- newcap = int(capmem / et.size)
- }
-
- // The check of overflow in addition to capmem > maxAlloc is needed
- // to prevent an overflow which can be used to trigger a segfault
- // on 32bit architectures with this example program:
- //
- // type T [1<<27 + 1]int64
- //
- // var d T
- // var s []T
- //
- // func main() {
- // s = append(s, d, d, d, d)
- // print(len(s), "\n")
- // }
- if overflow || capmem > maxAlloc {
- panic(errorString("growslice: cap out of range"))
- }
-
- var p unsafe.Pointer
- if et.ptrdata == 0 {
- p = mallocgc(capmem, nil, false)
- // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
- // Only clear the part that will not be overwritten.
- memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
- } else {
- // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
- p = mallocgc(capmem, et, true)
- if lenmem > 0 && writeBarrier.enabled {
- // Only shade the pointers in old.array since we know the destination slice p
- // only contains nil pointers because it has been cleared during alloc.
- bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata)
- }
- }
- memmove(p, old.array, lenmem)
-
- return slice{p, old.len, newcap}
-}
-
-func isPowerOfTwo(x uintptr) bool {
- return x&(x-1) == 0
-}
-
-// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
-func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
- if fromLen == 0 || toLen == 0 {
- return 0
- }
-
- n := fromLen
- if toLen < n {
- n = toLen
- }
-
- if width == 0 {
- return n
- }
-
- size := uintptr(n) * width
- if raceenabled {
- callerpc := getcallerpc()
- pc := abi.FuncPCABIInternal(slicecopy)
- racereadrangepc(fromPtr, size, callerpc, pc)
- racewriterangepc(toPtr, size, callerpc, pc)
- }
- if msanenabled {
- msanread(fromPtr, size)
- msanwrite(toPtr, size)
- }
- if asanenabled {
- asanread(fromPtr, size)
- asanwrite(toPtr, size)
- }
-
- if size == 1 { // common case worth about 2x to do here
- // TODO: is this still worth it with new memmove impl?
- *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
- } else {
- memmove(toPtr, fromPtr, size)
- }
- return n
-}
diff --git a/contrib/go/_std_1.18/src/runtime/stack.go b/contrib/go/_std_1.18/src/runtime/stack.go
deleted file mode 100644
index edc37d4878..0000000000
--- a/contrib/go/_std_1.18/src/runtime/stack.go
+++ /dev/null
@@ -1,1434 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/cpu"
- "internal/goarch"
- "internal/goos"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-/*
-Stack layout parameters.
-Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
-
-The per-goroutine g->stackguard is set to point StackGuard bytes
-above the bottom of the stack. Each function compares its stack
-pointer against g->stackguard to check for overflow. To cut one
-instruction from the check sequence for functions with tiny frames,
-the stack is allowed to protrude StackSmall bytes below the stack
-guard. Functions with large frames don't bother with the check and
-always call morestack. The sequences are (for amd64, others are
-similar):
-
- guard = g->stackguard
- frame = function's stack frame size
- argsize = size of function arguments (call + return)
-
- stack frame size <= StackSmall:
- CMPQ guard, SP
- JHI 3(PC)
- MOVQ m->morearg, $(argsize << 32)
- CALL morestack(SB)
-
- stack frame size > StackSmall but < StackBig
- LEAQ (frame-StackSmall)(SP), R0
- CMPQ guard, R0
- JHI 3(PC)
- MOVQ m->morearg, $(argsize << 32)
- CALL morestack(SB)
-
- stack frame size >= StackBig:
- MOVQ m->morearg, $((argsize << 32) | frame)
- CALL morestack(SB)
-
-The bottom StackGuard - StackSmall bytes are important: there has
-to be enough room to execute functions that refuse to check for
-stack overflow, either because they need to be adjacent to the
-actual caller's frame (deferproc) or because they handle the imminent
-stack overflow (morestack).
-
-For example, deferproc might call malloc, which does one of the
-above checks (without allocating a full frame), which might trigger
-a call to morestack. This sequence needs to fit in the bottom
-section of the stack. On amd64, morestack's frame is 40 bytes, and
-deferproc's frame is 56 bytes. That fits well within the
-StackGuard - StackSmall bytes at the bottom.
-The linkers explore all possible call traces involving non-splitting
-functions to make sure that this limit cannot be violated.
-*/
-
-const (
- // StackSystem is a number of additional bytes to add
- // to each stack below the usual guard area for OS-specific
- // purposes like signal handling. Used on Windows, Plan 9,
- // and iOS because they do not use a separate stack.
- _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
-
- // The minimum size of stack used by Go code
- _StackMin = 2048
-
- // The minimum stack size to allocate.
- // The hackery here rounds FixedStack0 up to a power of 2.
- _FixedStack0 = _StackMin + _StackSystem
- _FixedStack1 = _FixedStack0 - 1
- _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
- _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
- _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
- _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
- _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
- _FixedStack = _FixedStack6 + 1
-
- // Functions that need frames bigger than this use an extra
- // instruction to do the stack split check, to avoid overflow
- // in case SP - framesize wraps below zero.
- // This value can be no bigger than the size of the unmapped
- // space at zero.
- _StackBig = 4096
-
- // The stack guard is a pointer this many bytes above the
- // bottom of the stack.
- //
- // The guard leaves enough room for one _StackSmall frame plus
- // a _StackLimit chain of NOSPLIT calls plus _StackSystem
- // bytes for the OS.
- _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
-
- // After a stack split check the SP is allowed to be this
- // many bytes below the stack guard. This saves an instruction
- // in the checking sequence for tiny frames.
- _StackSmall = 128
-
- // The maximum number of bytes that a chain of NOSPLIT
- // functions can use.
- _StackLimit = _StackGuard - _StackSystem - _StackSmall
-)
-
-const (
- // stackDebug == 0: no logging
- // == 1: logging of per-stack operations
- // == 2: logging of per-frame operations
- // == 3: logging of per-word updates
- // == 4: logging of per-word reads
- stackDebug = 0
- stackFromSystem = 0 // allocate stacks from system memory instead of the heap
- stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
- stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
- stackNoCache = 0 // disable per-P small stack caches
-
- // check the BP links during traceback.
- debugCheckBP = false
-)
-
-const (
- uintptrMask = 1<<(8*goarch.PtrSize) - 1
-
- // The values below can be stored to g.stackguard0 to force
- // the next stack check to fail.
- // These are all larger than any real SP.
-
- // Goroutine preemption request.
- // 0xfffffade in hex.
- stackPreempt = uintptrMask & -1314
-
- // Thread is forking. Causes a split stack check failure.
- // 0xfffffb2e in hex.
- stackFork = uintptrMask & -1234
-
- // Force a stack movement. Used for debugging.
- // 0xfffffeed in hex.
- stackForceMove = uintptrMask & -275
-
- // stackPoisonMin is the lowest allowed stack poison value.
- stackPoisonMin = uintptrMask & -4096
-)
-
-// Global pool of spans that have free stacks.
-// Stacks are assigned an order according to size.
-// order = log_2(size/FixedStack)
-// There is a free list for each order.
-var stackpool [_NumStackOrders]struct {
- item stackpoolItem
- _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
-}
-
-//go:notinheap
-type stackpoolItem struct {
- mu mutex
- span mSpanList
-}
-
-// Global pool of large stack spans.
-var stackLarge struct {
- lock mutex
- free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
-}
-
-func stackinit() {
- if _StackCacheSize&_PageMask != 0 {
- throw("cache size must be a multiple of page size")
- }
- for i := range stackpool {
- stackpool[i].item.span.init()
- lockInit(&stackpool[i].item.mu, lockRankStackpool)
- }
- for i := range stackLarge.free {
- stackLarge.free[i].init()
- lockInit(&stackLarge.lock, lockRankStackLarge)
- }
-}
-
-// stacklog2 returns ⌊log_2(n)⌋.
-func stacklog2(n uintptr) int {
- log2 := 0
- for n > 1 {
- n >>= 1
- log2++
- }
- return log2
-}
-
-// Allocates a stack from the free pool. Must be called with
-// stackpool[order].item.mu held.
-func stackpoolalloc(order uint8) gclinkptr {
- list := &stackpool[order].item.span
- s := list.first
- lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
- if s == nil {
- // no free stacks. Allocate another span worth.
- s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
- if s == nil {
- throw("out of memory")
- }
- if s.allocCount != 0 {
- throw("bad allocCount")
- }
- if s.manualFreeList.ptr() != nil {
- throw("bad manualFreeList")
- }
- osStackAlloc(s)
- s.elemsize = _FixedStack << order
- for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
- x := gclinkptr(s.base() + i)
- x.ptr().next = s.manualFreeList
- s.manualFreeList = x
- }
- list.insert(s)
- }
- x := s.manualFreeList
- if x.ptr() == nil {
- throw("span has no free stacks")
- }
- s.manualFreeList = x.ptr().next
- s.allocCount++
- if s.manualFreeList.ptr() == nil {
- // all stacks in s are allocated.
- list.remove(s)
- }
- return x
-}
-
-// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
-func stackpoolfree(x gclinkptr, order uint8) {
- s := spanOfUnchecked(uintptr(x))
- if s.state.get() != mSpanManual {
- throw("freeing stack not in a stack span")
- }
- if s.manualFreeList.ptr() == nil {
- // s will now have a free stack
- stackpool[order].item.span.insert(s)
- }
- x.ptr().next = s.manualFreeList
- s.manualFreeList = x
- s.allocCount--
- if gcphase == _GCoff && s.allocCount == 0 {
- // Span is completely free. Return it to the heap
- // immediately if we're sweeping.
- //
- // If GC is active, we delay the free until the end of
- // GC to avoid the following type of situation:
- //
- // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
- // 2) The stack that pointer points to is copied
- // 3) The old stack is freed
- // 4) The containing span is marked free
- // 5) GC attempts to mark the SudoG.elem pointer. The
- // marking fails because the pointer looks like a
- // pointer into a free span.
- //
- // By not freeing, we prevent step #4 until GC is done.
- stackpool[order].item.span.remove(s)
- s.manualFreeList = 0
- osStackFree(s)
- mheap_.freeManual(s, spanAllocStack)
- }
-}
-
-// stackcacherefill/stackcacherelease implement a global pool of stack segments.
-// The pool is required to prevent unlimited growth of per-thread caches.
-//
-//go:systemstack
-func stackcacherefill(c *mcache, order uint8) {
- if stackDebug >= 1 {
- print("stackcacherefill order=", order, "\n")
- }
-
- // Grab some stacks from the global cache.
- // Grab half of the allowed capacity (to prevent thrashing).
- var list gclinkptr
- var size uintptr
- lock(&stackpool[order].item.mu)
- for size < _StackCacheSize/2 {
- x := stackpoolalloc(order)
- x.ptr().next = list
- list = x
- size += _FixedStack << order
- }
- unlock(&stackpool[order].item.mu)
- c.stackcache[order].list = list
- c.stackcache[order].size = size
-}
-
-//go:systemstack
-func stackcacherelease(c *mcache, order uint8) {
- if stackDebug >= 1 {
- print("stackcacherelease order=", order, "\n")
- }
- x := c.stackcache[order].list
- size := c.stackcache[order].size
- lock(&stackpool[order].item.mu)
- for size > _StackCacheSize/2 {
- y := x.ptr().next
- stackpoolfree(x, order)
- x = y
- size -= _FixedStack << order
- }
- unlock(&stackpool[order].item.mu)
- c.stackcache[order].list = x
- c.stackcache[order].size = size
-}
-
-//go:systemstack
-func stackcache_clear(c *mcache) {
- if stackDebug >= 1 {
- print("stackcache clear\n")
- }
- for order := uint8(0); order < _NumStackOrders; order++ {
- lock(&stackpool[order].item.mu)
- x := c.stackcache[order].list
- for x.ptr() != nil {
- y := x.ptr().next
- stackpoolfree(x, order)
- x = y
- }
- c.stackcache[order].list = 0
- c.stackcache[order].size = 0
- unlock(&stackpool[order].item.mu)
- }
-}
-
-// stackalloc allocates an n byte stack.
-//
-// stackalloc must run on the system stack because it uses per-P
-// resources and must not split the stack.
-//
-//go:systemstack
-func stackalloc(n uint32) stack {
- // Stackalloc must be called on scheduler stack, so that we
- // never try to grow the stack during the code that stackalloc runs.
- // Doing so would cause a deadlock (issue 1547).
- thisg := getg()
- if thisg != thisg.m.g0 {
- throw("stackalloc not on scheduler stack")
- }
- if n&(n-1) != 0 {
- throw("stack size not a power of 2")
- }
- if stackDebug >= 1 {
- print("stackalloc ", n, "\n")
- }
-
- if debug.efence != 0 || stackFromSystem != 0 {
- n = uint32(alignUp(uintptr(n), physPageSize))
- v := sysAlloc(uintptr(n), &memstats.stacks_sys)
- if v == nil {
- throw("out of memory (stackalloc)")
- }
- return stack{uintptr(v), uintptr(v) + uintptr(n)}
- }
-
- // Small stacks are allocated with a fixed-size free-list allocator.
- // If we need a stack of a bigger size, we fall back on allocating
- // a dedicated span.
- var v unsafe.Pointer
- if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
- order := uint8(0)
- n2 := n
- for n2 > _FixedStack {
- order++
- n2 >>= 1
- }
- var x gclinkptr
- if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
- // thisg.m.p == 0 can happen in the guts of exitsyscall
- // or procresize. Just get a stack from the global pool.
- // Also don't touch stackcache during gc
- // as it's flushed concurrently.
- lock(&stackpool[order].item.mu)
- x = stackpoolalloc(order)
- unlock(&stackpool[order].item.mu)
- } else {
- c := thisg.m.p.ptr().mcache
- x = c.stackcache[order].list
- if x.ptr() == nil {
- stackcacherefill(c, order)
- x = c.stackcache[order].list
- }
- c.stackcache[order].list = x.ptr().next
- c.stackcache[order].size -= uintptr(n)
- }
- v = unsafe.Pointer(x)
- } else {
- var s *mspan
- npage := uintptr(n) >> _PageShift
- log2npage := stacklog2(npage)
-
- // Try to get a stack from the large stack cache.
- lock(&stackLarge.lock)
- if !stackLarge.free[log2npage].isEmpty() {
- s = stackLarge.free[log2npage].first
- stackLarge.free[log2npage].remove(s)
- }
- unlock(&stackLarge.lock)
-
- lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
-
- if s == nil {
- // Allocate a new stack from the heap.
- s = mheap_.allocManual(npage, spanAllocStack)
- if s == nil {
- throw("out of memory")
- }
- osStackAlloc(s)
- s.elemsize = uintptr(n)
- }
- v = unsafe.Pointer(s.base())
- }
-
- if raceenabled {
- racemalloc(v, uintptr(n))
- }
- if msanenabled {
- msanmalloc(v, uintptr(n))
- }
- if asanenabled {
- asanunpoison(v, uintptr(n))
- }
- if stackDebug >= 1 {
- print(" allocated ", v, "\n")
- }
- return stack{uintptr(v), uintptr(v) + uintptr(n)}
-}
-
-// stackfree frees an n byte stack allocation at stk.
-//
-// stackfree must run on the system stack because it uses per-P
-// resources and must not split the stack.
-//
-//go:systemstack
-func stackfree(stk stack) {
- gp := getg()
- v := unsafe.Pointer(stk.lo)
- n := stk.hi - stk.lo
- if n&(n-1) != 0 {
- throw("stack not a power of 2")
- }
- if stk.lo+n < stk.hi {
- throw("bad stack size")
- }
- if stackDebug >= 1 {
- println("stackfree", v, n)
- memclrNoHeapPointers(v, n) // for testing, clobber stack data
- }
- if debug.efence != 0 || stackFromSystem != 0 {
- if debug.efence != 0 || stackFaultOnFree != 0 {
- sysFault(v, n)
- } else {
- sysFree(v, n, &memstats.stacks_sys)
- }
- return
- }
- if msanenabled {
- msanfree(v, n)
- }
- if asanenabled {
- asanpoison(v, n)
- }
- if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
- order := uint8(0)
- n2 := n
- for n2 > _FixedStack {
- order++
- n2 >>= 1
- }
- x := gclinkptr(v)
- if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
- lock(&stackpool[order].item.mu)
- stackpoolfree(x, order)
- unlock(&stackpool[order].item.mu)
- } else {
- c := gp.m.p.ptr().mcache
- if c.stackcache[order].size >= _StackCacheSize {
- stackcacherelease(c, order)
- }
- x.ptr().next = c.stackcache[order].list
- c.stackcache[order].list = x
- c.stackcache[order].size += n
- }
- } else {
- s := spanOfUnchecked(uintptr(v))
- if s.state.get() != mSpanManual {
- println(hex(s.base()), v)
- throw("bad span state")
- }
- if gcphase == _GCoff {
- // Free the stack immediately if we're
- // sweeping.
- osStackFree(s)
- mheap_.freeManual(s, spanAllocStack)
- } else {
- // If the GC is running, we can't return a
- // stack span to the heap because it could be
- // reused as a heap span, and this state
- // change would race with GC. Add it to the
- // large stack cache instead.
- log2npage := stacklog2(s.npages)
- lock(&stackLarge.lock)
- stackLarge.free[log2npage].insert(s)
- unlock(&stackLarge.lock)
- }
- }
-}
-
-var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
-
-var maxstackceiling = maxstacksize
-
-var ptrnames = []string{
- 0: "scalar",
- 1: "ptr",
-}
-
-// Stack frame layout
-//
-// (x86)
-// +------------------+
-// | args from caller |
-// +------------------+ <- frame->argp
-// | return address |
-// +------------------+
-// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
-// +------------------+ <- frame->varp
-// | locals |
-// +------------------+
-// | args to callee |
-// +------------------+ <- frame->sp
-//
-// (arm)
-// +------------------+
-// | args from caller |
-// +------------------+ <- frame->argp
-// | caller's retaddr |
-// +------------------+ <- frame->varp
-// | locals |
-// +------------------+
-// | args to callee |
-// +------------------+
-// | return address |
-// +------------------+ <- frame->sp
-
-type adjustinfo struct {
- old stack
- delta uintptr // ptr distance from old to new stack (newbase - oldbase)
- cache pcvalueCache
-
- // sghi is the highest sudog.elem on the stack.
- sghi uintptr
-}
-
-// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
-// If so, it rewrites *vpp to point into the new stack.
-func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
- pp := (*uintptr)(vpp)
- p := *pp
- if stackDebug >= 4 {
- print(" ", pp, ":", hex(p), "\n")
- }
- if adjinfo.old.lo <= p && p < adjinfo.old.hi {
- *pp = p + adjinfo.delta
- if stackDebug >= 3 {
- print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
- }
- }
-}
-
-// Information from the compiler about the layout of stack frames.
-// Note: this type must agree with reflect.bitVector.
-type bitvector struct {
- n int32 // # of bits
- bytedata *uint8
-}
-
-// ptrbit returns the i'th bit in bv.
-// ptrbit is less efficient than iterating directly over bitvector bits,
-// and should only be used in non-performance-critical code.
-// See adjustpointers for an example of a high-efficiency walk of a bitvector.
-func (bv *bitvector) ptrbit(i uintptr) uint8 {
- b := *(addb(bv.bytedata, i/8))
- return (b >> (i % 8)) & 1
-}
-
-// bv describes the memory starting at address scanp.
-// Adjust any pointers contained therein.
-func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
- minp := adjinfo.old.lo
- maxp := adjinfo.old.hi
- delta := adjinfo.delta
- num := uintptr(bv.n)
- // If this frame might contain channel receive slots, use CAS
- // to adjust pointers. If the slot hasn't been received into
- // yet, it may contain stack pointers and a concurrent send
- // could race with adjusting those pointers. (The sent value
- // itself can never contain stack pointers.)
- useCAS := uintptr(scanp) < adjinfo.sghi
- for i := uintptr(0); i < num; i += 8 {
- if stackDebug >= 4 {
- for j := uintptr(0); j < 8; j++ {
- print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
- }
- }
- b := *(addb(bv.bytedata, i/8))
- for b != 0 {
- j := uintptr(sys.Ctz8(b))
- b &= b - 1
- pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
- retry:
- p := *pp
- if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
- // Looks like a junk value in a pointer slot.
- // Live analysis wrong?
- getg().m.traceback = 2
- print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
- throw("invalid pointer found on stack")
- }
- if minp <= p && p < maxp {
- if stackDebug >= 3 {
- print("adjust ptr ", hex(p), " ", funcname(f), "\n")
- }
- if useCAS {
- ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
- if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
- goto retry
- }
- } else {
- *pp = p + delta
- }
- }
- }
- }
-}
-
-// Note: the argument/return area is adjusted by the callee.
-func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
- adjinfo := (*adjustinfo)(arg)
- if frame.continpc == 0 {
- // Frame is dead.
- return true
- }
- f := frame.fn
- if stackDebug >= 2 {
- print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
- }
- if f.funcID == funcID_systemstack_switch {
- // A special routine at the bottom of stack of a goroutine that does a systemstack call.
- // We will allow it to be copied even though we don't
- // have full GC info for it (because it is written in asm).
- return true
- }
-
- locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
-
- // Adjust local variables if stack frame has been allocated.
- if locals.n > 0 {
- size := uintptr(locals.n) * goarch.PtrSize
- adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
- }
-
- // Adjust saved base pointer if there is one.
- // TODO what about arm64 frame pointer adjustment?
- if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
- if stackDebug >= 3 {
- print(" saved bp\n")
- }
- if debugCheckBP {
- // Frame pointers should always point to the next higher frame on
- // the Go stack (or be nil, for the top frame on the stack).
- bp := *(*uintptr)(unsafe.Pointer(frame.varp))
- if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
- println("runtime: found invalid frame pointer")
- print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
- throw("bad frame pointer")
- }
- }
- adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
- }
-
- // Adjust arguments.
- if args.n > 0 {
- if stackDebug >= 3 {
- print(" args\n")
- }
- adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
- }
-
- // Adjust pointers in all stack objects (whether they are live or not).
- // See comments in mgcmark.go:scanframeworker.
- if frame.varp != 0 {
- for i := range objs {
- obj := &objs[i]
- off := obj.off
- base := frame.varp // locals base pointer
- if off >= 0 {
- base = frame.argp // arguments and return values base pointer
- }
- p := base + uintptr(off)
- if p < frame.sp {
- // Object hasn't been allocated in the frame yet.
- // (Happens when the stack bounds check fails and
- // we call into morestack.)
- continue
- }
- ptrdata := obj.ptrdata()
- gcdata := obj.gcdata()
- var s *mspan
- if obj.useGCProg() {
- // See comments in mgcmark.go:scanstack
- s = materializeGCProg(ptrdata, gcdata)
- gcdata = (*byte)(unsafe.Pointer(s.startAddr))
- }
- for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
- if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
- adjustpointer(adjinfo, unsafe.Pointer(p+i))
- }
- }
- if s != nil {
- dematerializeGCProg(s)
- }
- }
- }
-
- return true
-}
-
-func adjustctxt(gp *g, adjinfo *adjustinfo) {
- adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
- if !framepointer_enabled {
- return
- }
- if debugCheckBP {
- bp := gp.sched.bp
- if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
- println("runtime: found invalid top frame pointer")
- print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
- throw("bad top frame pointer")
- }
- }
- adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
-}
-
-func adjustdefers(gp *g, adjinfo *adjustinfo) {
- // Adjust pointers in the Defer structs.
- // We need to do this first because we need to adjust the
- // defer.link fields so we always work on the new stack.
- adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
- for d := gp._defer; d != nil; d = d.link {
- adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
- adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
- adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
- adjustpointer(adjinfo, unsafe.Pointer(&d.link))
- adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
- adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
- }
-}
-
-func adjustpanics(gp *g, adjinfo *adjustinfo) {
- // Panics are on stack and already adjusted.
- // Update pointer to head of list in G.
- adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
-}
-
-func adjustsudogs(gp *g, adjinfo *adjustinfo) {
- // the data elements pointed to by a SudoG structure
- // might be in the stack.
- for s := gp.waiting; s != nil; s = s.waitlink {
- adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
- }
-}
-
-func fillstack(stk stack, b byte) {
- for p := stk.lo; p < stk.hi; p++ {
- *(*byte)(unsafe.Pointer(p)) = b
- }
-}
-
-func findsghi(gp *g, stk stack) uintptr {
- var sghi uintptr
- for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
- if stk.lo <= p && p < stk.hi && p > sghi {
- sghi = p
- }
- }
- return sghi
-}
-
-// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
-// stack they refer to while synchronizing with concurrent channel
-// operations. It returns the number of bytes of stack copied.
-func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
- if gp.waiting == nil {
- return 0
- }
-
- // Lock channels to prevent concurrent send/receive.
- var lastc *hchan
- for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- if sg.c != lastc {
- // There is a ranking cycle here between gscan bit and
- // hchan locks. Normally, we only allow acquiring hchan
- // locks and then getting a gscan bit. In this case, we
- // already have the gscan bit. We allow acquiring hchan
- // locks here as a special case, since a deadlock can't
- // happen because the G involved must already be
- // suspended. So, we get a special hchan lock rank here
- // that is lower than gscan, but doesn't allow acquiring
- // any other locks other than hchan.
- lockWithRank(&sg.c.lock, lockRankHchanLeaf)
- }
- lastc = sg.c
- }
-
- // Adjust sudogs.
- adjustsudogs(gp, adjinfo)
-
- // Copy the part of the stack the sudogs point in to
- // while holding the lock to prevent races on
- // send/receive slots.
- var sgsize uintptr
- if adjinfo.sghi != 0 {
- oldBot := adjinfo.old.hi - used
- newBot := oldBot + adjinfo.delta
- sgsize = adjinfo.sghi - oldBot
- memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
- }
-
- // Unlock channels.
- lastc = nil
- for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- if sg.c != lastc {
- unlock(&sg.c.lock)
- }
- lastc = sg.c
- }
-
- return sgsize
-}
-
-// Copies gp's stack to a new stack of a different size.
-// Caller must have changed gp status to Gcopystack.
-func copystack(gp *g, newsize uintptr) {
- if gp.syscallsp != 0 {
- throw("stack growth not allowed in system call")
- }
- old := gp.stack
- if old.lo == 0 {
- throw("nil stackbase")
- }
- used := old.hi - gp.sched.sp
- // Add just the difference to gcController.addScannableStack.
- // g0 stacks never move, so this will never account for them.
- // It's also fine if we have no P, addScannableStack can deal with
- // that case.
- gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
-
- // allocate new stack
- new := stackalloc(uint32(newsize))
- if stackPoisonCopy != 0 {
- fillstack(new, 0xfd)
- }
- if stackDebug >= 1 {
- print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
- }
-
- // Compute adjustment.
- var adjinfo adjustinfo
- adjinfo.old = old
- adjinfo.delta = new.hi - old.hi
-
- // Adjust sudogs, synchronizing with channel ops if necessary.
- ncopy := used
- if !gp.activeStackChans {
- if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
- // It's not safe for someone to shrink this stack while we're actively
- // parking on a channel, but it is safe to grow since we do that
- // ourselves and explicitly don't want to synchronize with channels
- // since we could self-deadlock.
- throw("racy sudog adjustment due to parking on channel")
- }
- adjustsudogs(gp, &adjinfo)
- } else {
- // sudogs may be pointing in to the stack and gp has
- // released channel locks, so other goroutines could
- // be writing to gp's stack. Find the highest such
- // pointer so we can handle everything there and below
- // carefully. (This shouldn't be far from the bottom
- // of the stack, so there's little cost in handling
- // everything below it carefully.)
- adjinfo.sghi = findsghi(gp, old)
-
- // Synchronize with channel ops and copy the part of
- // the stack they may interact with.
- ncopy -= syncadjustsudogs(gp, used, &adjinfo)
- }
-
- // Copy the stack (or the rest of it) to the new location
- memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
-
- // Adjust remaining structures that have pointers into stacks.
- // We have to do most of these before we traceback the new
- // stack because gentraceback uses them.
- adjustctxt(gp, &adjinfo)
- adjustdefers(gp, &adjinfo)
- adjustpanics(gp, &adjinfo)
- if adjinfo.sghi != 0 {
- adjinfo.sghi += adjinfo.delta
- }
-
- // Swap out old stack for new one
- gp.stack = new
- gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
- gp.sched.sp = new.hi - used
- gp.stktopsp += adjinfo.delta
-
- // Adjust pointers in the new stack.
- gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
-
- // free old stack
- if stackPoisonCopy != 0 {
- fillstack(old, 0xfc)
- }
- stackfree(old)
-}
-
-// round x up to a power of 2.
-func round2(x int32) int32 {
- s := uint(0)
- for 1<<s < x {
- s++
- }
- return 1 << s
-}
-
-// Called from runtime·morestack when more stack is needed.
-// Allocate larger stack and relocate to new stack.
-// Stack growth is multiplicative, for constant amortized cost.
-//
-// g->atomicstatus will be Grunning or Gscanrunning upon entry.
-// If the scheduler is trying to stop this g, then it will set preemptStop.
-//
-// This must be nowritebarrierrec because it can be called as part of
-// stack growth from other nowritebarrierrec functions, but the
-// compiler doesn't check this.
-//
-//go:nowritebarrierrec
-func newstack() {
- thisg := getg()
- // TODO: double check all gp. shouldn't be getg().
- if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
- throw("stack growth after fork")
- }
- if thisg.m.morebuf.g.ptr() != thisg.m.curg {
- print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
- morebuf := thisg.m.morebuf
- traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
- throw("runtime: wrong goroutine in newstack")
- }
-
- gp := thisg.m.curg
-
- if thisg.m.curg.throwsplit {
- // Update syscallsp, syscallpc in case traceback uses them.
- morebuf := thisg.m.morebuf
- gp.syscallsp = morebuf.sp
- gp.syscallpc = morebuf.pc
- pcname, pcoff := "(unknown)", uintptr(0)
- f := findfunc(gp.sched.pc)
- if f.valid() {
- pcname = funcname(f)
- pcoff = gp.sched.pc - f.entry()
- }
- print("runtime: newstack at ", pcname, "+", hex(pcoff),
- " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
- "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
- "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
-
- thisg.m.traceback = 2 // Include runtime frames
- traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
- throw("runtime: stack split at bad time")
- }
-
- morebuf := thisg.m.morebuf
- thisg.m.morebuf.pc = 0
- thisg.m.morebuf.lr = 0
- thisg.m.morebuf.sp = 0
- thisg.m.morebuf.g = 0
-
- // NOTE: stackguard0 may change underfoot, if another thread
- // is about to try to preempt gp. Read it just once and use that same
- // value now and below.
- stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
-
- // Be conservative about where we preempt.
- // We are interested in preempting user Go code, not runtime code.
- // If we're holding locks, mallocing, or preemption is disabled, don't
- // preempt.
- // This check is very early in newstack so that even the status change
- // from Grunning to Gwaiting and back doesn't happen in this case.
- // That status change by itself can be viewed as a small preemption,
- // because the GC might change Gwaiting to Gscanwaiting, and then
- // this goroutine has to wait for the GC to finish before continuing.
- // If the GC is in some way dependent on this goroutine (for example,
- // it needs a lock held by the goroutine), that small preemption turns
- // into a real deadlock.
- preempt := stackguard0 == stackPreempt
- if preempt {
- if !canPreemptM(thisg.m) {
- // Let the goroutine keep running for now.
- // gp->preempt is set, so it will be preempted next time.
- gp.stackguard0 = gp.stack.lo + _StackGuard
- gogo(&gp.sched) // never return
- }
- }
-
- if gp.stack.lo == 0 {
- throw("missing stack in newstack")
- }
- sp := gp.sched.sp
- if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
- // The call to morestack cost a word.
- sp -= goarch.PtrSize
- }
- if stackDebug >= 1 || sp < gp.stack.lo {
- print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
- "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
- "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
- }
- if sp < gp.stack.lo {
- print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
- print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
- throw("runtime: split stack overflow")
- }
-
- if preempt {
- if gp == thisg.m.g0 {
- throw("runtime: preempt g0")
- }
- if thisg.m.p == 0 && thisg.m.locks == 0 {
- throw("runtime: g is running but p is not")
- }
-
- if gp.preemptShrink {
- // We're at a synchronous safe point now, so
- // do the pending stack shrink.
- gp.preemptShrink = false
- shrinkstack(gp)
- }
-
- if gp.preemptStop {
- preemptPark(gp) // never returns
- }
-
- // Act like goroutine called runtime.Gosched.
- gopreempt_m(gp) // never return
- }
-
- // Allocate a bigger segment and move the stack.
- oldsize := gp.stack.hi - gp.stack.lo
- newsize := oldsize * 2
-
- // Make sure we grow at least as much as needed to fit the new frame.
- // (This is just an optimization - the caller of morestack will
- // recheck the bounds on return.)
- if f := findfunc(gp.sched.pc); f.valid() {
- max := uintptr(funcMaxSPDelta(f))
- needed := max + _StackGuard
- used := gp.stack.hi - gp.sched.sp
- for newsize-used < needed {
- newsize *= 2
- }
- }
-
- if stackguard0 == stackForceMove {
- // Forced stack movement used for debugging.
- // Don't double the stack (or we may quickly run out
- // if this is done repeatedly).
- newsize = oldsize
- }
-
- if newsize > maxstacksize || newsize > maxstackceiling {
- if maxstacksize < maxstackceiling {
- print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
- } else {
- print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
- }
- print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
- throw("stack overflow")
- }
-
- // The goroutine must be executing in order to call newstack,
- // so it must be Grunning (or Gscanrunning).
- casgstatus(gp, _Grunning, _Gcopystack)
-
- // The concurrent GC will not scan the stack while we are doing the copy since
- // the gp is in a Gcopystack status.
- copystack(gp, newsize)
- if stackDebug >= 1 {
- print("stack grow done\n")
- }
- casgstatus(gp, _Gcopystack, _Grunning)
- gogo(&gp.sched)
-}
-
-//go:nosplit
-func nilfunc() {
- *(*uint8)(nil) = 0
-}
-
-// adjust Gobuf as if it executed a call to fn
-// and then stopped before the first instruction in fn.
-func gostartcallfn(gobuf *gobuf, fv *funcval) {
- var fn unsafe.Pointer
- if fv != nil {
- fn = unsafe.Pointer(fv.fn)
- } else {
- fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
- }
- gostartcall(gobuf, fn, unsafe.Pointer(fv))
-}
-
-// isShrinkStackSafe returns whether it's safe to attempt to shrink
-// gp's stack. Shrinking the stack is only safe when we have precise
-// pointer maps for all frames on the stack.
-func isShrinkStackSafe(gp *g) bool {
- // We can't copy the stack if we're in a syscall.
- // The syscall might have pointers into the stack and
- // often we don't have precise pointer maps for the innermost
- // frames.
- //
- // We also can't copy the stack if we're at an asynchronous
- // safe-point because we don't have precise pointer maps for
- // all frames.
- //
- // We also can't *shrink* the stack in the window between the
- // goroutine calling gopark to park on a channel and
- // gp.activeStackChans being set.
- return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
-}
-
-// Maybe shrink the stack being used by gp.
-//
-// gp must be stopped and we must own its stack. It may be in
-// _Grunning, but only if this is our own user G.
-func shrinkstack(gp *g) {
- if gp.stack.lo == 0 {
- throw("missing stack in shrinkstack")
- }
- if s := readgstatus(gp); s&_Gscan == 0 {
- // We don't own the stack via _Gscan. We could still
- // own it if this is our own user G and we're on the
- // system stack.
- if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
- // We don't own the stack.
- throw("bad status in shrinkstack")
- }
- }
- if !isShrinkStackSafe(gp) {
- throw("shrinkstack at bad time")
- }
- // Check for self-shrinks while in a libcall. These may have
- // pointers into the stack disguised as uintptrs, but these
- // code paths should all be nosplit.
- if gp == getg().m.curg && gp.m.libcallsp != 0 {
- throw("shrinking stack in libcall")
- }
-
- if debug.gcshrinkstackoff > 0 {
- return
- }
- f := findfunc(gp.startpc)
- if f.valid() && f.funcID == funcID_gcBgMarkWorker {
- // We're not allowed to shrink the gcBgMarkWorker
- // stack (see gcBgMarkWorker for explanation).
- return
- }
-
- oldsize := gp.stack.hi - gp.stack.lo
- newsize := oldsize / 2
- // Don't shrink the allocation below the minimum-sized stack
- // allocation.
- if newsize < _FixedStack {
- return
- }
- // Compute how much of the stack is currently in use and only
- // shrink the stack if gp is using less than a quarter of its
- // current stack. The currently used stack includes everything
- // down to the SP plus the stack guard space that ensures
- // there's room for nosplit functions.
- avail := gp.stack.hi - gp.stack.lo
- if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
- return
- }
-
- if stackDebug > 0 {
- print("shrinking stack ", oldsize, "->", newsize, "\n")
- }
-
- copystack(gp, newsize)
-}
-
-// freeStackSpans frees unused stack spans at the end of GC.
-func freeStackSpans() {
- // Scan stack pools for empty stack spans.
- for order := range stackpool {
- lock(&stackpool[order].item.mu)
- list := &stackpool[order].item.span
- for s := list.first; s != nil; {
- next := s.next
- if s.allocCount == 0 {
- list.remove(s)
- s.manualFreeList = 0
- osStackFree(s)
- mheap_.freeManual(s, spanAllocStack)
- }
- s = next
- }
- unlock(&stackpool[order].item.mu)
- }
-
- // Free large stack spans.
- lock(&stackLarge.lock)
- for i := range stackLarge.free {
- for s := stackLarge.free[i].first; s != nil; {
- next := s.next
- stackLarge.free[i].remove(s)
- osStackFree(s)
- mheap_.freeManual(s, spanAllocStack)
- s = next
- }
- }
- unlock(&stackLarge.lock)
-}
-
-// getStackMap returns the locals and arguments live pointer maps, and
-// stack object list for frame.
-func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
- targetpc := frame.continpc
- if targetpc == 0 {
- // Frame is dead. Return empty bitvectors.
- return
- }
-
- f := frame.fn
- pcdata := int32(-1)
- if targetpc != f.entry() {
- // Back up to the CALL. If we're at the function entry
- // point, we want to use the entry map (-1), even if
- // the first instruction of the function changes the
- // stack map.
- targetpc--
- pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
- }
- if pcdata == -1 {
- // We do not have a valid pcdata value but there might be a
- // stackmap for this function. It is likely that we are looking
- // at the function prologue, assume so and hope for the best.
- pcdata = 0
- }
-
- // Local variables.
- size := frame.varp - frame.sp
- var minsize uintptr
- switch goarch.ArchFamily {
- case goarch.ARM64:
- minsize = sys.StackAlign
- default:
- minsize = sys.MinFrameSize
- }
- if size > minsize {
- stackid := pcdata
- stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
- if stkmap == nil || stkmap.n <= 0 {
- print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
- throw("missing stackmap")
- }
- // If nbit == 0, there's no work to do.
- if stkmap.nbit > 0 {
- if stackid < 0 || stackid >= stkmap.n {
- // don't know where we are
- print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
- throw("bad symbol table")
- }
- locals = stackmapdata(stkmap, stackid)
- if stackDebug >= 3 && debug {
- print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
- }
- } else if stackDebug >= 3 && debug {
- print(" no locals to adjust\n")
- }
- }
-
- // Arguments.
- if frame.arglen > 0 {
- if frame.argmap != nil {
- // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
- // In this case, arglen specifies how much of the args section is actually live.
- // (It could be either all the args + results, or just the args.)
- args = *frame.argmap
- n := int32(frame.arglen / goarch.PtrSize)
- if n < args.n {
- args.n = n // Don't use more of the arguments than arglen.
- }
- } else {
- stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
- if stackmap == nil || stackmap.n <= 0 {
- print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
- throw("missing stackmap")
- }
- if pcdata < 0 || pcdata >= stackmap.n {
- // don't know where we are
- print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
- throw("bad symbol table")
- }
- if stackmap.nbit > 0 {
- args = stackmapdata(stackmap, pcdata)
- }
- }
- }
-
- // stack objects.
- if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le") && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
- // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
- // We don't actually use argmap in this case, but we need to fake the stack object
- // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
- // This offset matches the assembly code on amd64 and arm64.
- objs = methodValueCallFrameObjs[:]
- } else {
- p := funcdata(f, _FUNCDATA_StackObjects)
- if p != nil {
- n := *(*uintptr)(p)
- p = add(p, goarch.PtrSize)
- *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
- // Note: the noescape above is needed to keep
- // getStackMap from "leaking param content:
- // frame". That leak propagates up to getgcmask, then
- // GCMask, then verifyGCInfo, which converts the stack
- // gcinfo tests into heap gcinfo tests :(
- }
- }
-
- return
-}
-
-var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjectinit
-
-func stkobjinit() {
- var abiRegArgsEface any = abi.RegArgs{}
- abiRegArgsType := efaceOf(&abiRegArgsEface)._type
- if abiRegArgsType.kind&kindGCProg != 0 {
- throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
- }
- // Set methodValueCallFrameObjs[0].gcdataoff so that
- // stackObjectRecord.gcdata() will work correctly with it.
- ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
- var mod *moduledata
- for datap := &firstmoduledata; datap != nil; datap = datap.next {
- if datap.gofunc <= ptr && ptr < datap.end {
- mod = datap
- break
- }
- }
- if mod == nil {
- throw("methodValueCallFrameObjs is not in a module")
- }
- methodValueCallFrameObjs[0] = stackObjectRecord{
- off: -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local.
- size: int32(abiRegArgsType.size),
- _ptrdata: int32(abiRegArgsType.ptrdata),
- gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata),
- }
-}
-
-// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
-// This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
-type stackObjectRecord struct {
- // offset in frame
- // if negative, offset from varp
- // if non-negative, offset from argp
- off int32
- size int32
- _ptrdata int32 // ptrdata, or -ptrdata is GC prog is used
- gcdataoff uint32 // offset to gcdata from moduledata.rodata
-}
-
-func (r *stackObjectRecord) useGCProg() bool {
- return r._ptrdata < 0
-}
-
-func (r *stackObjectRecord) ptrdata() uintptr {
- x := r._ptrdata
- if x < 0 {
- return uintptr(-x)
- }
- return uintptr(x)
-}
-
-// gcdata returns pointer map or GC prog of the type.
-func (r *stackObjectRecord) gcdata() *byte {
- ptr := uintptr(unsafe.Pointer(r))
- var mod *moduledata
- for datap := &firstmoduledata; datap != nil; datap = datap.next {
- if datap.gofunc <= ptr && ptr < datap.end {
- mod = datap
- break
- }
- }
- // If you get a panic here due to a nil mod,
- // you may have made a copy of a stackObjectRecord.
- // You must use the original pointer.
- res := mod.rodata + uintptr(r.gcdataoff)
- return (*byte)(unsafe.Pointer(res))
-}
-
-// This is exported as ABI0 via linkname so obj can call it.
-//
-//go:nosplit
-//go:linkname morestackc
-func morestackc() {
- throw("attempt to execute system stack code on user stack")
-}
diff --git a/contrib/go/_std_1.18/src/runtime/string.go b/contrib/go/_std_1.18/src/runtime/string.go
deleted file mode 100644
index 980a9866e6..0000000000
--- a/contrib/go/_std_1.18/src/runtime/string.go
+++ /dev/null
@@ -1,495 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/bytealg"
- "internal/goarch"
- "unsafe"
-)
-
-// The constant is known to the compiler.
-// There is no fundamental theory behind this number.
-const tmpStringBufSize = 32
-
-type tmpBuf [tmpStringBufSize]byte
-
-// concatstrings implements a Go string concatenation x+y+z+...
-// The operands are passed in the slice a.
-// If buf != nil, the compiler has determined that the result does not
-// escape the calling function, so the string data can be stored in buf
-// if small enough.
-func concatstrings(buf *tmpBuf, a []string) string {
- idx := 0
- l := 0
- count := 0
- for i, x := range a {
- n := len(x)
- if n == 0 {
- continue
- }
- if l+n < l {
- throw("string concatenation too long")
- }
- l += n
- count++
- idx = i
- }
- if count == 0 {
- return ""
- }
-
- // If there is just one string and either it is not on the stack
- // or our result does not escape the calling frame (buf != nil),
- // then we can return that string directly.
- if count == 1 && (buf != nil || !stringDataOnStack(a[idx])) {
- return a[idx]
- }
- s, b := rawstringtmp(buf, l)
- for _, x := range a {
- copy(b, x)
- b = b[len(x):]
- }
- return s
-}
-
-func concatstring2(buf *tmpBuf, a0, a1 string) string {
- return concatstrings(buf, []string{a0, a1})
-}
-
-func concatstring3(buf *tmpBuf, a0, a1, a2 string) string {
- return concatstrings(buf, []string{a0, a1, a2})
-}
-
-func concatstring4(buf *tmpBuf, a0, a1, a2, a3 string) string {
- return concatstrings(buf, []string{a0, a1, a2, a3})
-}
-
-func concatstring5(buf *tmpBuf, a0, a1, a2, a3, a4 string) string {
- return concatstrings(buf, []string{a0, a1, a2, a3, a4})
-}
-
-// slicebytetostring converts a byte slice to a string.
-// It is inserted by the compiler into generated code.
-// ptr is a pointer to the first element of the slice;
-// n is the length of the slice.
-// Buf is a fixed-size buffer for the result,
-// it is not nil if the result does not escape.
-func slicebytetostring(buf *tmpBuf, ptr *byte, n int) (str string) {
- if n == 0 {
- // Turns out to be a relatively common case.
- // Consider that you want to parse out data between parens in "foo()bar",
- // you find the indices and convert the subslice to string.
- return ""
- }
- if raceenabled {
- racereadrangepc(unsafe.Pointer(ptr),
- uintptr(n),
- getcallerpc(),
- abi.FuncPCABIInternal(slicebytetostring))
- }
- if msanenabled {
- msanread(unsafe.Pointer(ptr), uintptr(n))
- }
- if asanenabled {
- asanread(unsafe.Pointer(ptr), uintptr(n))
- }
- if n == 1 {
- p := unsafe.Pointer(&staticuint64s[*ptr])
- if goarch.BigEndian {
- p = add(p, 7)
- }
- stringStructOf(&str).str = p
- stringStructOf(&str).len = 1
- return
- }
-
- var p unsafe.Pointer
- if buf != nil && n <= len(buf) {
- p = unsafe.Pointer(buf)
- } else {
- p = mallocgc(uintptr(n), nil, false)
- }
- stringStructOf(&str).str = p
- stringStructOf(&str).len = n
- memmove(p, unsafe.Pointer(ptr), uintptr(n))
- return
-}
-
-// stringDataOnStack reports whether the string's data is
-// stored on the current goroutine's stack.
-func stringDataOnStack(s string) bool {
- ptr := uintptr(stringStructOf(&s).str)
- stk := getg().stack
- return stk.lo <= ptr && ptr < stk.hi
-}
-
-func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
- if buf != nil && l <= len(buf) {
- b = buf[:l]
- s = slicebytetostringtmp(&b[0], len(b))
- } else {
- s, b = rawstring(l)
- }
- return
-}
-
-// slicebytetostringtmp returns a "string" referring to the actual []byte bytes.
-//
-// Callers need to ensure that the returned string will not be used after
-// the calling goroutine modifies the original slice or synchronizes with
-// another goroutine.
-//
-// The function is only called when instrumenting
-// and otherwise intrinsified by the compiler.
-//
-// Some internal compiler optimizations use this function.
-// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)]
-// where k is []byte, T1 to Tn is a nesting of struct and array literals.
-// - Used for "<"+string(b)+">" concatenation where b is []byte.
-// - Used for string(b)=="foo" comparison where b is []byte.
-func slicebytetostringtmp(ptr *byte, n int) (str string) {
- if raceenabled && n > 0 {
- racereadrangepc(unsafe.Pointer(ptr),
- uintptr(n),
- getcallerpc(),
- abi.FuncPCABIInternal(slicebytetostringtmp))
- }
- if msanenabled && n > 0 {
- msanread(unsafe.Pointer(ptr), uintptr(n))
- }
- if asanenabled && n > 0 {
- asanread(unsafe.Pointer(ptr), uintptr(n))
- }
- stringStructOf(&str).str = unsafe.Pointer(ptr)
- stringStructOf(&str).len = n
- return
-}
-
-func stringtoslicebyte(buf *tmpBuf, s string) []byte {
- var b []byte
- if buf != nil && len(s) <= len(buf) {
- *buf = tmpBuf{}
- b = buf[:len(s)]
- } else {
- b = rawbyteslice(len(s))
- }
- copy(b, s)
- return b
-}
-
-func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune {
- // two passes.
- // unlike slicerunetostring, no race because strings are immutable.
- n := 0
- for range s {
- n++
- }
-
- var a []rune
- if buf != nil && n <= len(buf) {
- *buf = [tmpStringBufSize]rune{}
- a = buf[:n]
- } else {
- a = rawruneslice(n)
- }
-
- n = 0
- for _, r := range s {
- a[n] = r
- n++
- }
- return a
-}
-
-func slicerunetostring(buf *tmpBuf, a []rune) string {
- if raceenabled && len(a) > 0 {
- racereadrangepc(unsafe.Pointer(&a[0]),
- uintptr(len(a))*unsafe.Sizeof(a[0]),
- getcallerpc(),
- abi.FuncPCABIInternal(slicerunetostring))
- }
- if msanenabled && len(a) > 0 {
- msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
- }
- if asanenabled && len(a) > 0 {
- asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
- }
- var dum [4]byte
- size1 := 0
- for _, r := range a {
- size1 += encoderune(dum[:], r)
- }
- s, b := rawstringtmp(buf, size1+3)
- size2 := 0
- for _, r := range a {
- // check for race
- if size2 >= size1 {
- break
- }
- size2 += encoderune(b[size2:], r)
- }
- return s[:size2]
-}
-
-type stringStruct struct {
- str unsafe.Pointer
- len int
-}
-
-// Variant with *byte pointer type for DWARF debugging.
-type stringStructDWARF struct {
- str *byte
- len int
-}
-
-func stringStructOf(sp *string) *stringStruct {
- return (*stringStruct)(unsafe.Pointer(sp))
-}
-
-func intstring(buf *[4]byte, v int64) (s string) {
- var b []byte
- if buf != nil {
- b = buf[:]
- s = slicebytetostringtmp(&b[0], len(b))
- } else {
- s, b = rawstring(4)
- }
- if int64(rune(v)) != v {
- v = runeError
- }
- n := encoderune(b, rune(v))
- return s[:n]
-}
-
-// rawstring allocates storage for a new string. The returned
-// string and byte slice both refer to the same storage.
-// The storage is not zeroed. Callers should use
-// b to set the string contents and then drop b.
-func rawstring(size int) (s string, b []byte) {
- p := mallocgc(uintptr(size), nil, false)
-
- stringStructOf(&s).str = p
- stringStructOf(&s).len = size
-
- *(*slice)(unsafe.Pointer(&b)) = slice{p, size, size}
-
- return
-}
-
-// rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
-func rawbyteslice(size int) (b []byte) {
- cap := roundupsize(uintptr(size))
- p := mallocgc(cap, nil, false)
- if cap != uintptr(size) {
- memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size))
- }
-
- *(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(cap)}
- return
-}
-
-// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
-func rawruneslice(size int) (b []rune) {
- if uintptr(size) > maxAlloc/4 {
- throw("out of memory")
- }
- mem := roundupsize(uintptr(size) * 4)
- p := mallocgc(mem, nil, false)
- if mem != uintptr(size)*4 {
- memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4)
- }
-
- *(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(mem / 4)}
- return
-}
-
-// used by cmd/cgo
-func gobytes(p *byte, n int) (b []byte) {
- if n == 0 {
- return make([]byte, 0)
- }
-
- if n < 0 || uintptr(n) > maxAlloc {
- panic(errorString("gobytes: length out of range"))
- }
-
- bp := mallocgc(uintptr(n), nil, false)
- memmove(bp, unsafe.Pointer(p), uintptr(n))
-
- *(*slice)(unsafe.Pointer(&b)) = slice{bp, n, n}
- return
-}
-
-// This is exported via linkname to assembly in syscall (for Plan9).
-//go:linkname gostring
-func gostring(p *byte) string {
- l := findnull(p)
- if l == 0 {
- return ""
- }
- s, b := rawstring(l)
- memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
- return s
-}
-
-func gostringn(p *byte, l int) string {
- if l == 0 {
- return ""
- }
- s, b := rawstring(l)
- memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
- return s
-}
-
-func hasPrefix(s, prefix string) bool {
- return len(s) >= len(prefix) && s[:len(prefix)] == prefix
-}
-
-const (
- maxUint = ^uint(0)
- maxInt = int(maxUint >> 1)
-)
-
-// atoi parses an int from a string s.
-// The bool result reports whether s is a number
-// representable by a value of type int.
-func atoi(s string) (int, bool) {
- if s == "" {
- return 0, false
- }
-
- neg := false
- if s[0] == '-' {
- neg = true
- s = s[1:]
- }
-
- un := uint(0)
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c < '0' || c > '9' {
- return 0, false
- }
- if un > maxUint/10 {
- // overflow
- return 0, false
- }
- un *= 10
- un1 := un + uint(c) - '0'
- if un1 < un {
- // overflow
- return 0, false
- }
- un = un1
- }
-
- if !neg && un > uint(maxInt) {
- return 0, false
- }
- if neg && un > uint(maxInt)+1 {
- return 0, false
- }
-
- n := int(un)
- if neg {
- n = -n
- }
-
- return n, true
-}
-
-// atoi32 is like atoi but for integers
-// that fit into an int32.
-func atoi32(s string) (int32, bool) {
- if n, ok := atoi(s); n == int(int32(n)) {
- return int32(n), ok
- }
- return 0, false
-}
-
-//go:nosplit
-func findnull(s *byte) int {
- if s == nil {
- return 0
- }
-
- // Avoid IndexByteString on Plan 9 because it uses SSE instructions
- // on x86 machines, and those are classified as floating point instructions,
- // which are illegal in a note handler.
- if GOOS == "plan9" {
- p := (*[maxAlloc/2 - 1]byte)(unsafe.Pointer(s))
- l := 0
- for p[l] != 0 {
- l++
- }
- return l
- }
-
- // pageSize is the unit we scan at a time looking for NULL.
- // It must be the minimum page size for any architecture Go
- // runs on. It's okay (just a minor performance loss) if the
- // actual system page size is larger than this value.
- const pageSize = 4096
-
- offset := 0
- ptr := unsafe.Pointer(s)
- // IndexByteString uses wide reads, so we need to be careful
- // with page boundaries. Call IndexByteString on
- // [ptr, endOfPage) interval.
- safeLen := int(pageSize - uintptr(ptr)%pageSize)
-
- for {
- t := *(*string)(unsafe.Pointer(&stringStruct{ptr, safeLen}))
- // Check one page at a time.
- if i := bytealg.IndexByteString(t, 0); i != -1 {
- return offset + i
- }
- // Move to next page
- ptr = unsafe.Pointer(uintptr(ptr) + uintptr(safeLen))
- offset += safeLen
- safeLen = pageSize
- }
-}
-
-func findnullw(s *uint16) int {
- if s == nil {
- return 0
- }
- p := (*[maxAlloc/2/2 - 1]uint16)(unsafe.Pointer(s))
- l := 0
- for p[l] != 0 {
- l++
- }
- return l
-}
-
-//go:nosplit
-func gostringnocopy(str *byte) string {
- ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)}
- s := *(*string)(unsafe.Pointer(&ss))
- return s
-}
-
-func gostringw(strw *uint16) string {
- var buf [8]byte
- str := (*[maxAlloc/2/2 - 1]uint16)(unsafe.Pointer(strw))
- n1 := 0
- for i := 0; str[i] != 0; i++ {
- n1 += encoderune(buf[:], rune(str[i]))
- }
- s, b := rawstring(n1 + 4)
- n2 := 0
- for i := 0; str[i] != 0; i++ {
- // check for race
- if n2 >= n1 {
- break
- }
- n2 += encoderune(b[n2:], rune(str[i]))
- }
- b[n2] = 0 // for luck
- return s[:n2]
-}
diff --git a/contrib/go/_std_1.18/src/runtime/stubs.go b/contrib/go/_std_1.18/src/runtime/stubs.go
deleted file mode 100644
index ad78363bb6..0000000000
--- a/contrib/go/_std_1.18/src/runtime/stubs.go
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/goexperiment"
- "runtime/internal/math"
- "unsafe"
-)
-
-// Should be a built-in for unsafe.Pointer?
-//go:nosplit
-func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + x)
-}
-
-// getg returns the pointer to the current g.
-// The compiler rewrites calls to this function into instructions
-// that fetch the g directly (from TLS or from the dedicated register).
-func getg() *g
-
-// mcall switches from the g to the g0 stack and invokes fn(g),
-// where g is the goroutine that made the call.
-// mcall saves g's current PC/SP in g->sched so that it can be restored later.
-// It is up to fn to arrange for that later execution, typically by recording
-// g in a data structure, causing something to call ready(g) later.
-// mcall returns to the original goroutine g later, when g has been rescheduled.
-// fn must not return at all; typically it ends by calling schedule, to let the m
-// run other goroutines.
-//
-// mcall can only be called from g stacks (not g0, not gsignal).
-//
-// This must NOT be go:noescape: if fn is a stack-allocated closure,
-// fn puts g on a run queue, and g executes before fn returns, the
-// closure will be invalidated while it is still executing.
-func mcall(fn func(*g))
-
-// systemstack runs fn on a system stack.
-// If systemstack is called from the per-OS-thread (g0) stack, or
-// if systemstack is called from the signal handling (gsignal) stack,
-// systemstack calls fn directly and returns.
-// Otherwise, systemstack is being called from the limited stack
-// of an ordinary goroutine. In this case, systemstack switches
-// to the per-OS-thread stack, calls fn, and switches back.
-// It is common to use a func literal as the argument, in order
-// to share inputs and outputs with the code around the call
-// to system stack:
-//
-// ... set up y ...
-// systemstack(func() {
-// x = bigcall(y)
-// })
-// ... use x ...
-//
-//go:noescape
-func systemstack(fn func())
-
-var badsystemstackMsg = "fatal: systemstack called from unexpected goroutine"
-
-//go:nosplit
-//go:nowritebarrierrec
-func badsystemstack() {
- sp := stringStructOf(&badsystemstackMsg)
- write(2, sp.str, int32(sp.len))
-}
-
-// memclrNoHeapPointers clears n bytes starting at ptr.
-//
-// Usually you should use typedmemclr. memclrNoHeapPointers should be
-// used only when the caller knows that *ptr contains no heap pointers
-// because either:
-//
-// *ptr is initialized memory and its type is pointer-free, or
-//
-// *ptr is uninitialized memory (e.g., memory that's being reused
-// for a new allocation) and hence contains only "junk".
-//
-// memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
-// is a multiple of the pointer size, then any pointer-aligned,
-// pointer-sized portion is cleared atomically. Despite the function
-// name, this is necessary because this function is the underlying
-// implementation of typedmemclr and memclrHasPointers. See the doc of
-// memmove for more details.
-//
-// The (CPU-specific) implementations of this function are in memclr_*.s.
-//
-//go:noescape
-func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
-
-//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
-func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
- memclrNoHeapPointers(ptr, n)
-}
-
-// memmove copies n bytes from "from" to "to".
-//
-// memmove ensures that any pointer in "from" is written to "to" with
-// an indivisible write, so that racy reads cannot observe a
-// half-written pointer. This is necessary to prevent the garbage
-// collector from observing invalid pointers, and differs from memmove
-// in unmanaged languages. However, memmove is only required to do
-// this if "from" and "to" may contain pointers, which can only be the
-// case if "from", "to", and "n" are all be word-aligned.
-//
-// Implementations are in memmove_*.s.
-//
-//go:noescape
-func memmove(to, from unsafe.Pointer, n uintptr)
-
-// Outside assembly calls memmove. Make sure it has ABI wrappers.
-//go:linkname memmove
-
-//go:linkname reflect_memmove reflect.memmove
-func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
- memmove(to, from, n)
-}
-
-// exported value for testing
-const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
-
-//go:nosplit
-func fastrand() uint32 {
- mp := getg().m
- // Implement wyrand: https://github.com/wangyi-fudan/wyhash
- // Only the platform that math.Mul64 can be lowered
- // by the compiler should be in this list.
- if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
- goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
- goarch.IsS390x|goarch.IsRiscv64 == 1 {
- mp.fastrand += 0xa0761d6478bd642f
- hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
- return uint32(hi ^ lo)
- }
-
- // Implement xorshift64+: 2 32-bit xorshift sequences added together.
- // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
- // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
- // This generator passes the SmallCrush suite, part of TestU01 framework:
- // http://simul.iro.umontreal.ca/testu01/tu01.html
- t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand))
- s1, s0 := t[0], t[1]
- s1 ^= s1 << 17
- s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
- t[0], t[1] = s0, s1
- return s0 + s1
-}
-
-//go:nosplit
-func fastrandn(n uint32) uint32 {
- // This is similar to fastrand() % n, but faster.
- // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
- return uint32(uint64(fastrand()) * uint64(n) >> 32)
-}
-
-//go:linkname sync_fastrandn sync.fastrandn
-func sync_fastrandn(n uint32) uint32 { return fastrandn(n) }
-
-//go:linkname net_fastrand net.fastrand
-func net_fastrand() uint32 { return fastrand() }
-
-//go:linkname os_fastrand os.fastrand
-func os_fastrand() uint32 { return fastrand() }
-
-// in internal/bytealg/equal_*.s
-//go:noescape
-func memequal(a, b unsafe.Pointer, size uintptr) bool
-
-// noescape hides a pointer from escape analysis. noescape is
-// the identity function but escape analysis doesn't think the
-// output depends on the input. noescape is inlined and currently
-// compiles down to zero instructions.
-// USE CAREFULLY!
-//go:nosplit
-func noescape(p unsafe.Pointer) unsafe.Pointer {
- x := uintptr(p)
- return unsafe.Pointer(x ^ 0)
-}
-
-// Not all cgocallback frames are actually cgocallback,
-// so not all have these arguments. Mark them uintptr so that the GC
-// does not misinterpret memory when the arguments are not present.
-// cgocallback is not called from Go, only from crosscall2.
-// This in turn calls cgocallbackg, which is where we'll find
-// pointer-declared arguments.
-func cgocallback(fn, frame, ctxt uintptr)
-
-func gogo(buf *gobuf)
-
-func asminit()
-func setg(gg *g)
-func breakpoint()
-
-// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
-// frameSize, and regArgs.
-//
-// Arguments passed on the stack and space for return values passed on the stack
-// must be laid out at the space pointed to by stackArgs (with total length
-// stackArgsSize) according to the ABI.
-//
-// stackRetOffset must be some value <= stackArgsSize that indicates the
-// offset within stackArgs where the return value space begins.
-//
-// frameSize is the total size of the argument frame at stackArgs and must
-// therefore be >= stackArgsSize. It must include additional space for spilling
-// register arguments for stack growth and preemption.
-//
-// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
-// since frameSize will be redundant with stackArgsSize.
-//
-// Arguments passed in registers must be laid out in regArgs according to the ABI.
-// regArgs will hold any return values passed in registers after the call.
-//
-// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
-// then copies back stackArgsSize-stackRetOffset bytes back to the return space
-// in stackArgs once fn has completed. It also "unspills" argument registers from
-// regArgs before calling fn, and spills them back into regArgs immediately
-// following the call to fn. If there are results being returned on the stack,
-// the caller should pass the argument frame type as stackArgsType so that
-// reflectcall can execute appropriate write barriers during the copy.
-//
-// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
-// registers on the return path will contain Go pointers. It will then store
-// these pointers in regArgs.Ptrs such that they are visible to the GC.
-//
-// Package reflect passes a frame type. In package runtime, there is only
-// one call that copies results back, in callbackWrap in syscall_windows.go, and it
-// does NOT pass a frame type, meaning there are no write barriers invoked. See that
-// call site for justification.
-//
-// Package reflect accesses this symbol through a linkname.
-//
-// Arguments passed through to reflectcall do not escape. The type is used
-// only in a very limited callee of reflectcall, the stackArgs are copied, and
-// regArgs is only used in the reflectcall frame.
-//go:noescape
-func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-
-func procyield(cycles uint32)
-
-type neverCallThisFunction struct{}
-
-// goexit is the return stub at the top of every goroutine call stack.
-// Each goroutine stack is constructed as if goexit called the
-// goroutine's entry point function, so that when the entry point
-// function returns, it will return to goexit, which will call goexit1
-// to perform the actual exit.
-//
-// This function must never be called directly. Call goexit1 instead.
-// gentraceback assumes that goexit terminates the stack. A direct
-// call on the stack will cause gentraceback to stop walking the stack
-// prematurely and if there is leftover state it may panic.
-func goexit(neverCallThisFunction)
-
-// publicationBarrier performs a store/store barrier (a "publication"
-// or "export" barrier). Some form of synchronization is required
-// between initializing an object and making that object accessible to
-// another processor. Without synchronization, the initialization
-// writes and the "publication" write may be reordered, allowing the
-// other processor to follow the pointer and observe an uninitialized
-// object. In general, higher-level synchronization should be used,
-// such as locking or an atomic pointer write. publicationBarrier is
-// for when those aren't an option, such as in the implementation of
-// the memory manager.
-//
-// There's no corresponding barrier for the read side because the read
-// side naturally has a data dependency order. All architectures that
-// Go supports or seems likely to ever support automatically enforce
-// data dependency ordering.
-func publicationBarrier()
-
-// getcallerpc returns the program counter (PC) of its caller's caller.
-// getcallersp returns the stack pointer (SP) of its caller's caller.
-// The implementation may be a compiler intrinsic; there is not
-// necessarily code implementing this on every platform.
-//
-// For example:
-//
-// func f(arg1, arg2, arg3 int) {
-// pc := getcallerpc()
-// sp := getcallersp()
-// }
-//
-// These two lines find the PC and SP immediately following
-// the call to f (where f will return).
-//
-// The call to getcallerpc and getcallersp must be done in the
-// frame being asked about.
-//
-// The result of getcallersp is correct at the time of the return,
-// but it may be invalidated by any subsequent call to a function
-// that might relocate the stack in order to grow or shrink it.
-// A general rule is that the result of getcallersp should be used
-// immediately and can only be passed to nosplit functions.
-
-//go:noescape
-func getcallerpc() uintptr
-
-//go:noescape
-func getcallersp() uintptr // implemented as an intrinsic on all platforms
-
-// getclosureptr returns the pointer to the current closure.
-// getclosureptr can only be used in an assignment statement
-// at the entry of a function. Moreover, go:nosplit directive
-// must be specified at the declaration of caller function,
-// so that the function prolog does not clobber the closure register.
-// for example:
-//
-// //go:nosplit
-// func f(arg1, arg2, arg3 int) {
-// dx := getclosureptr()
-// }
-//
-// The compiler rewrites calls to this function into instructions that fetch the
-// pointer from a well-known register (DX on x86 architecture, etc.) directly.
-func getclosureptr() uintptr
-
-//go:noescape
-func asmcgocall(fn, arg unsafe.Pointer) int32
-
-func morestack()
-func morestack_noctxt()
-func rt0_go()
-
-// return0 is a stub used to return 0 from deferproc.
-// It is called at the very end of deferproc to signal
-// the calling Go function that it should not jump
-// to deferreturn.
-// in asm_*.s
-func return0()
-
-// in asm_*.s
-// not called directly; definitions here supply type information for traceback.
-// These must have the same signature (arg pointer map) as reflectcall.
-func call16(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call32(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call64(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call128(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call256(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call512(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call1024(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call2048(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call4096(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call8192(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call16384(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call32768(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call65536(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call131072(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call262144(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call524288(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call1048576(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call2097152(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call4194304(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call8388608(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call16777216(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call33554432(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call67108864(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call134217728(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call268435456(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call536870912(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-
-func systemstack_switch()
-
-// alignUp rounds n up to a multiple of a. a must be a power of 2.
-func alignUp(n, a uintptr) uintptr {
- return (n + a - 1) &^ (a - 1)
-}
-
-// alignDown rounds n down to a multiple of a. a must be a power of 2.
-func alignDown(n, a uintptr) uintptr {
- return n &^ (a - 1)
-}
-
-// divRoundUp returns ceil(n / a).
-func divRoundUp(n, a uintptr) uintptr {
- // a is generally a power of two. This will get inlined and
- // the compiler will optimize the division.
- return (n + a - 1) / a
-}
-
-// checkASM reports whether assembly runtime checks have passed.
-func checkASM() bool
-
-func memequal_varlen(a, b unsafe.Pointer) bool
-
-// bool2int returns 0 if x is false or 1 if x is true.
-func bool2int(x bool) int {
- // Avoid branches. In the SSA compiler, this compiles to
- // exactly what you would want it to.
- return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
-}
-
-// abort crashes the runtime in situations where even throw might not
-// work. In general it should do something a debugger will recognize
-// (e.g., an INT3 on x86). A crash in abort is recognized by the
-// signal handler, which will attempt to tear down the runtime
-// immediately.
-func abort()
-
-// Called from compiled code; declared for vet; do NOT call from Go.
-func gcWriteBarrier()
-func duffzero()
-func duffcopy()
-
-// Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
-func addmoduledata()
-
-// Injected by the signal handler for panicking signals.
-// Initializes any registers that have fixed meaning at calls but
-// are scratch in bodies and calls sigpanic.
-// On many platforms it just jumps to sigpanic.
-func sigpanic0()
-
-// intArgRegs is used by the various register assignment
-// algorithm implementations in the runtime. These include:.
-// - Finalizers (mfinal.go)
-// - Windows callbacks (syscall_windows.go)
-//
-// Both are stripped-down versions of the algorithm since they
-// only have to deal with a subset of cases (finalizers only
-// take a pointer or interface argument, Go Windows callbacks
-// don't support floating point).
-//
-// It should be modified with care and are generally only
-// modified when testing this package.
-//
-// It should never be set higher than its internal/abi
-// constant counterparts, because the system relies on a
-// structure that is at least large enough to hold the
-// registers the system supports.
-//
-// Protected by finlock.
-var intArgRegs = abi.IntArgRegs * (goexperiment.RegabiArgsInt | goarch.IsAmd64)
diff --git a/contrib/go/_std_1.18/src/runtime/stubs2.go b/contrib/go/_std_1.18/src/runtime/stubs2.go
deleted file mode 100644
index 9aa965454d..0000000000
--- a/contrib/go/_std_1.18/src/runtime/stubs2.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !aix && !darwin && !js && !openbsd && !plan9 && !solaris && !windows
-
-package runtime
-
-import "unsafe"
-
-// read calls the read system call.
-// It returns a non-negative number of bytes written or a negative errno value.
-func read(fd int32, p unsafe.Pointer, n int32) int32
-
-func closefd(fd int32) int32
-
-func exit(code int32)
-func usleep(usec uint32)
-
-//go:nosplit
-func usleep_no_g(usec uint32) {
- usleep(usec)
-}
-
-// write calls the write system call.
-// It returns a non-negative number of bytes written or a negative errno value.
-//go:noescape
-func write1(fd uintptr, p unsafe.Pointer, n int32) int32
-
-//go:noescape
-func open(name *byte, mode, perm int32) int32
-
-// return value is only set on linux to be used in osinit()
-func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
-
-// exitThread terminates the current thread, writing *wait = 0 when
-// the stack is safe to reclaim.
-//
-//go:noescape
-func exitThread(wait *uint32)
diff --git a/contrib/go/_std_1.18/src/runtime/stubs_linux.go b/contrib/go/_std_1.18/src/runtime/stubs_linux.go
deleted file mode 100644
index 06c14e2160..0000000000
--- a/contrib/go/_std_1.18/src/runtime/stubs_linux.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-
-package runtime
-
-import "unsafe"
-
-func sbrk0() uintptr
-
-// Called from write_err_android.go only, but defined in sys_linux_*.s;
-// declared here (instead of in write_err_android.go) for go vet on non-android builds.
-// The return value is the raw syscall result, which may encode an error number.
-//go:noescape
-func access(name *byte, mode int32) int32
-func connect(fd int32, addr unsafe.Pointer, len int32) int32
-func socket(domain int32, typ int32, prot int32) int32
diff --git a/contrib/go/_std_1.18/src/runtime/symtab.go b/contrib/go/_std_1.18/src/runtime/symtab.go
deleted file mode 100644
index ee4db47314..0000000000
--- a/contrib/go/_std_1.18/src/runtime/symtab.go
+++ /dev/null
@@ -1,1180 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// Frames may be used to get function/file/line information for a
-// slice of PC values returned by Callers.
-type Frames struct {
- // callers is a slice of PCs that have not yet been expanded to frames.
- callers []uintptr
-
- // frames is a slice of Frames that have yet to be returned.
- frames []Frame
- frameStore [2]Frame
-}
-
-// Frame is the information returned by Frames for each call frame.
-type Frame struct {
- // PC is the program counter for the location in this frame.
- // For a frame that calls another frame, this will be the
- // program counter of a call instruction. Because of inlining,
- // multiple frames may have the same PC value, but different
- // symbolic information.
- PC uintptr
-
- // Func is the Func value of this call frame. This may be nil
- // for non-Go code or fully inlined functions.
- Func *Func
-
- // Function is the package path-qualified function name of
- // this call frame. If non-empty, this string uniquely
- // identifies a single function in the program.
- // This may be the empty string if not known.
- // If Func is not nil then Function == Func.Name().
- Function string
-
- // File and Line are the file name and line number of the
- // location in this frame. For non-leaf frames, this will be
- // the location of a call. These may be the empty string and
- // zero, respectively, if not known.
- File string
- Line int
-
- // Entry point program counter for the function; may be zero
- // if not known. If Func is not nil then Entry ==
- // Func.Entry().
- Entry uintptr
-
- // The runtime's internal view of the function. This field
- // is set (funcInfo.valid() returns true) only for Go functions,
- // not for C functions.
- funcInfo funcInfo
-}
-
-// CallersFrames takes a slice of PC values returned by Callers and
-// prepares to return function/file/line information.
-// Do not change the slice until you are done with the Frames.
-func CallersFrames(callers []uintptr) *Frames {
- f := &Frames{callers: callers}
- f.frames = f.frameStore[:0]
- return f
-}
-
-// Next returns a Frame representing the next call frame in the slice
-// of PC values. If it has already returned all call frames, Next
-// returns a zero Frame.
-//
-// The more result indicates whether the next call to Next will return
-// a valid Frame. It does not necessarily indicate whether this call
-// returned one.
-//
-// See the Frames example for idiomatic usage.
-func (ci *Frames) Next() (frame Frame, more bool) {
- for len(ci.frames) < 2 {
- // Find the next frame.
- // We need to look for 2 frames so we know what
- // to return for the "more" result.
- if len(ci.callers) == 0 {
- break
- }
- pc := ci.callers[0]
- ci.callers = ci.callers[1:]
- funcInfo := findfunc(pc)
- if !funcInfo.valid() {
- if cgoSymbolizer != nil {
- // Pre-expand cgo frames. We could do this
- // incrementally, too, but there's no way to
- // avoid allocation in this case anyway.
- ci.frames = append(ci.frames, expandCgoFrames(pc)...)
- }
- continue
- }
- f := funcInfo._Func()
- entry := f.Entry()
- if pc > entry {
- // We store the pc of the start of the instruction following
- // the instruction in question (the call or the inline mark).
- // This is done for historical reasons, and to make FuncForPC
- // work correctly for entries in the result of runtime.Callers.
- pc--
- }
- name := funcname(funcInfo)
- if inldata := funcdata(funcInfo, _FUNCDATA_InlTree); inldata != nil {
- inltree := (*[1 << 20]inlinedCall)(inldata)
- // Non-strict as cgoTraceback may have added bogus PCs
- // with a valid funcInfo but invalid PCDATA.
- ix := pcdatavalue1(funcInfo, _PCDATA_InlTreeIndex, pc, nil, false)
- if ix >= 0 {
- // Note: entry is not modified. It always refers to a real frame, not an inlined one.
- f = nil
- name = funcnameFromNameoff(funcInfo, inltree[ix].func_)
- // File/line is already correct.
- // TODO: remove file/line from InlinedCall?
- }
- }
- ci.frames = append(ci.frames, Frame{
- PC: pc,
- Func: f,
- Function: name,
- Entry: entry,
- funcInfo: funcInfo,
- // Note: File,Line set below
- })
- }
-
- // Pop one frame from the frame list. Keep the rest.
- // Avoid allocation in the common case, which is 1 or 2 frames.
- switch len(ci.frames) {
- case 0: // In the rare case when there are no frames at all, we return Frame{}.
- return
- case 1:
- frame = ci.frames[0]
- ci.frames = ci.frameStore[:0]
- case 2:
- frame = ci.frames[0]
- ci.frameStore[0] = ci.frames[1]
- ci.frames = ci.frameStore[:1]
- default:
- frame = ci.frames[0]
- ci.frames = ci.frames[1:]
- }
- more = len(ci.frames) > 0
- if frame.funcInfo.valid() {
- // Compute file/line just before we need to return it,
- // as it can be expensive. This avoids computing file/line
- // for the Frame we find but don't return. See issue 32093.
- file, line := funcline1(frame.funcInfo, frame.PC, false)
- frame.File, frame.Line = file, int(line)
- }
- return
-}
-
-// runtime_expandFinalInlineFrame expands the final pc in stk to include all
-// "callers" if pc is inline.
-//
-//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
-func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
- if len(stk) == 0 {
- return stk
- }
- pc := stk[len(stk)-1]
- tracepc := pc - 1
-
- f := findfunc(tracepc)
- if !f.valid() {
- // Not a Go function.
- return stk
- }
-
- inldata := funcdata(f, _FUNCDATA_InlTree)
- if inldata == nil {
- // Nothing inline in f.
- return stk
- }
-
- // Treat the previous func as normal. We haven't actually checked, but
- // since this pc was included in the stack, we know it shouldn't be
- // elided.
- lastFuncID := funcID_normal
-
- // Remove pc from stk; we'll re-add it below.
- stk = stk[:len(stk)-1]
-
- // See inline expansion in gentraceback.
- var cache pcvalueCache
- inltree := (*[1 << 20]inlinedCall)(inldata)
- for {
- // Non-strict as cgoTraceback may have added bogus PCs
- // with a valid funcInfo but invalid PCDATA.
- ix := pcdatavalue1(f, _PCDATA_InlTreeIndex, tracepc, &cache, false)
- if ix < 0 {
- break
- }
- if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
- // ignore wrappers
- } else {
- stk = append(stk, pc)
- }
- lastFuncID = inltree[ix].funcID
- // Back up to an instruction in the "caller".
- tracepc = f.entry() + uintptr(inltree[ix].parentPc)
- pc = tracepc + 1
- }
-
- // N.B. we want to keep the last parentPC which is not inline.
- stk = append(stk, pc)
-
- return stk
-}
-
-// expandCgoFrames expands frame information for pc, known to be
-// a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
-// returns nil if pc could not be expanded.
-func expandCgoFrames(pc uintptr) []Frame {
- arg := cgoSymbolizerArg{pc: pc}
- callCgoSymbolizer(&arg)
-
- if arg.file == nil && arg.funcName == nil {
- // No useful information from symbolizer.
- return nil
- }
-
- var frames []Frame
- for {
- frames = append(frames, Frame{
- PC: pc,
- Func: nil,
- Function: gostring(arg.funcName),
- File: gostring(arg.file),
- Line: int(arg.lineno),
- Entry: arg.entry,
- // funcInfo is zero, which implies !funcInfo.valid().
- // That ensures that we use the File/Line info given here.
- })
- if arg.more == 0 {
- break
- }
- callCgoSymbolizer(&arg)
- }
-
- // No more frames for this PC. Tell the symbolizer we are done.
- // We don't try to maintain a single cgoSymbolizerArg for the
- // whole use of Frames, because there would be no good way to tell
- // the symbolizer when we are done.
- arg.pc = 0
- callCgoSymbolizer(&arg)
-
- return frames
-}
-
-// NOTE: Func does not expose the actual unexported fields, because we return *Func
-// values to users, and we want to keep them from being able to overwrite the data
-// with (say) *f = Func{}.
-// All code operating on a *Func must call raw() to get the *_func
-// or funcInfo() to get the funcInfo instead.
-
-// A Func represents a Go function in the running binary.
-type Func struct {
- opaque struct{} // unexported field to disallow conversions
-}
-
-func (f *Func) raw() *_func {
- return (*_func)(unsafe.Pointer(f))
-}
-
-func (f *Func) funcInfo() funcInfo {
- return f.raw().funcInfo()
-}
-
-func (f *_func) funcInfo() funcInfo {
- // Find the module containing fn. fn is located in the pclntable.
- // The unsafe.Pointer to uintptr conversions and arithmetic
- // are safe because we are working with module addresses.
- ptr := uintptr(unsafe.Pointer(f))
- var mod *moduledata
- for datap := &firstmoduledata; datap != nil; datap = datap.next {
- if len(datap.pclntable) == 0 {
- continue
- }
- base := uintptr(unsafe.Pointer(&datap.pclntable[0]))
- if base <= ptr && ptr < base+uintptr(len(datap.pclntable)) {
- mod = datap
- break
- }
- }
- return funcInfo{f, mod}
-}
-
-// PCDATA and FUNCDATA table indexes.
-//
-// See funcdata.h and ../cmd/internal/objabi/funcdata.go.
-const (
- _PCDATA_UnsafePoint = 0
- _PCDATA_StackMapIndex = 1
- _PCDATA_InlTreeIndex = 2
- _PCDATA_ArgLiveIndex = 3
-
- _FUNCDATA_ArgsPointerMaps = 0
- _FUNCDATA_LocalsPointerMaps = 1
- _FUNCDATA_StackObjects = 2
- _FUNCDATA_InlTree = 3
- _FUNCDATA_OpenCodedDeferInfo = 4
- _FUNCDATA_ArgInfo = 5
- _FUNCDATA_ArgLiveInfo = 6
- _FUNCDATA_WrapInfo = 7
-
- _ArgsSizeUnknown = -0x80000000
-)
-
-const (
- // PCDATA_UnsafePoint values.
- _PCDATA_UnsafePointSafe = -1 // Safe for async preemption
- _PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption
-
- // _PCDATA_Restart1(2) apply on a sequence of instructions, within
- // which if an async preemption happens, we should back off the PC
- // to the start of the sequence when resume.
- // We need two so we can distinguish the start/end of the sequence
- // in case that two sequences are next to each other.
- _PCDATA_Restart1 = -3
- _PCDATA_Restart2 = -4
-
- // Like _PCDATA_RestartAtEntry, but back to function entry if async
- // preempted.
- _PCDATA_RestartAtEntry = -5
-)
-
-// A FuncID identifies particular functions that need to be treated
-// specially by the runtime.
-// Note that in some situations involving plugins, there may be multiple
-// copies of a particular special runtime function.
-// Note: this list must match the list in cmd/internal/objabi/funcid.go.
-type funcID uint8
-
-const (
- funcID_normal funcID = iota // not a special function
- funcID_abort
- funcID_asmcgocall
- funcID_asyncPreempt
- funcID_cgocallback
- funcID_debugCallV2
- funcID_gcBgMarkWorker
- funcID_goexit
- funcID_gogo
- funcID_gopanic
- funcID_handleAsyncEvent
- funcID_mcall
- funcID_morestack
- funcID_mstart
- funcID_panicwrap
- funcID_rt0_go
- funcID_runfinq
- funcID_runtime_main
- funcID_sigpanic
- funcID_systemstack
- funcID_systemstack_switch
- funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
-)
-
-// A FuncFlag holds bits about a function.
-// This list must match the list in cmd/internal/objabi/funcid.go.
-type funcFlag uint8
-
-const (
- // TOPFRAME indicates a function that appears at the top of its stack.
- // The traceback routine stop at such a function and consider that a
- // successful, complete traversal of the stack.
- // Examples of TOPFRAME functions include goexit, which appears
- // at the top of a user goroutine stack, and mstart, which appears
- // at the top of a system goroutine stack.
- funcFlag_TOPFRAME funcFlag = 1 << iota
-
- // SPWRITE indicates a function that writes an arbitrary value to SP
- // (any write other than adding or subtracting a constant amount).
- // The traceback routines cannot encode such changes into the
- // pcsp tables, so the function traceback cannot safely unwind past
- // SPWRITE functions. Stopping at an SPWRITE function is considered
- // to be an incomplete unwinding of the stack. In certain contexts
- // (in particular garbage collector stack scans) that is a fatal error.
- funcFlag_SPWRITE
-
- // ASM indicates that a function was implemented in assembly.
- funcFlag_ASM
-)
-
-// pcHeader holds data used by the pclntab lookups.
-type pcHeader struct {
- magic uint32 // 0xFFFFFFF0
- pad1, pad2 uint8 // 0,0
- minLC uint8 // min instruction size
- ptrSize uint8 // size of a ptr in bytes
- nfunc int // number of functions in the module
- nfiles uint // number of entries in the file tab
- textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text
- funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
- cuOffset uintptr // offset to the cutab variable from pcHeader
- filetabOffset uintptr // offset to the filetab variable from pcHeader
- pctabOffset uintptr // offset to the pctab variable from pcHeader
- pclnOffset uintptr // offset to the pclntab variable from pcHeader
-}
-
-// moduledata records information about the layout of the executable
-// image. It is written by the linker. Any changes here must be
-// matched changes to the code in cmd/link/internal/ld/symtab.go:symtab.
-// moduledata is stored in statically allocated non-pointer memory;
-// none of the pointers here are visible to the garbage collector.
-type moduledata struct {
- pcHeader *pcHeader
- funcnametab []byte
- cutab []uint32
- filetab []byte
- pctab []byte
- pclntable []byte
- ftab []functab
- findfunctab uintptr
- minpc, maxpc uintptr
-
- text, etext uintptr
- noptrdata, enoptrdata uintptr
- data, edata uintptr
- bss, ebss uintptr
- noptrbss, enoptrbss uintptr
- end, gcdata, gcbss uintptr
- types, etypes uintptr
- rodata uintptr
- gofunc uintptr // go.func.*
-
- textsectmap []textsect
- typelinks []int32 // offsets from types
- itablinks []*itab
-
- ptab []ptabEntry
-
- pluginpath string
- pkghashes []modulehash
-
- modulename string
- modulehashes []modulehash
-
- hasmain uint8 // 1 if module contains the main function, 0 otherwise
-
- gcdatamask, gcbssmask bitvector
-
- typemap map[typeOff]*_type // offset to *_rtype in previous module
-
- bad bool // module failed to load and should be ignored
-
- next *moduledata
-}
-
-// A modulehash is used to compare the ABI of a new module or a
-// package in a new module with the loaded program.
-//
-// For each shared library a module links against, the linker creates an entry in the
-// moduledata.modulehashes slice containing the name of the module, the abi hash seen
-// at link time and a pointer to the runtime abi hash. These are checked in
-// moduledataverify1 below.
-//
-// For each loaded plugin, the pkghashes slice has a modulehash of the
-// newly loaded package that can be used to check the plugin's version of
-// a package against any previously loaded version of the package.
-// This is done in plugin.lastmoduleinit.
-type modulehash struct {
- modulename string
- linktimehash string
- runtimehash *string
-}
-
-// pinnedTypemaps are the map[typeOff]*_type from the moduledata objects.
-//
-// These typemap objects are allocated at run time on the heap, but the
-// only direct reference to them is in the moduledata, created by the
-// linker and marked SNOPTRDATA so it is ignored by the GC.
-//
-// To make sure the map isn't collected, we keep a second reference here.
-var pinnedTypemaps []map[typeOff]*_type
-
-var firstmoduledata moduledata // linker symbol
-var lastmoduledatap *moduledata // linker symbol
-var modulesSlice *[]*moduledata // see activeModules
-
-// activeModules returns a slice of active modules.
-//
-// A module is active once its gcdatamask and gcbssmask have been
-// assembled and it is usable by the GC.
-//
-// This is nosplit/nowritebarrier because it is called by the
-// cgo pointer checking code.
-//go:nosplit
-//go:nowritebarrier
-func activeModules() []*moduledata {
- p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
- if p == nil {
- return nil
- }
- return *p
-}
-
-// modulesinit creates the active modules slice out of all loaded modules.
-//
-// When a module is first loaded by the dynamic linker, an .init_array
-// function (written by cmd/link) is invoked to call addmoduledata,
-// appending to the module to the linked list that starts with
-// firstmoduledata.
-//
-// There are two times this can happen in the lifecycle of a Go
-// program. First, if compiled with -linkshared, a number of modules
-// built with -buildmode=shared can be loaded at program initialization.
-// Second, a Go program can load a module while running that was built
-// with -buildmode=plugin.
-//
-// After loading, this function is called which initializes the
-// moduledata so it is usable by the GC and creates a new activeModules
-// list.
-//
-// Only one goroutine may call modulesinit at a time.
-func modulesinit() {
- modules := new([]*moduledata)
- for md := &firstmoduledata; md != nil; md = md.next {
- if md.bad {
- continue
- }
- *modules = append(*modules, md)
- if md.gcdatamask == (bitvector{}) {
- scanDataSize := md.edata - md.data
- md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), scanDataSize)
- scanBSSSize := md.ebss - md.bss
- md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), scanBSSSize)
- gcController.addGlobals(int64(scanDataSize + scanBSSSize))
- }
- }
-
- // Modules appear in the moduledata linked list in the order they are
- // loaded by the dynamic loader, with one exception: the
- // firstmoduledata itself the module that contains the runtime. This
- // is not always the first module (when using -buildmode=shared, it
- // is typically libstd.so, the second module). The order matters for
- // typelinksinit, so we swap the first module with whatever module
- // contains the main function.
- //
- // See Issue #18729.
- for i, md := range *modules {
- if md.hasmain != 0 {
- (*modules)[0] = md
- (*modules)[i] = &firstmoduledata
- break
- }
- }
-
- atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules))
-}
-
-type functab struct {
- entryoff uint32 // relative to runtime.text
- funcoff uint32
-}
-
-// Mapping information for secondary text sections
-
-type textsect struct {
- vaddr uintptr // prelinked section vaddr
- end uintptr // vaddr + section length
- baseaddr uintptr // relocated section address
-}
-
-const minfunc = 16 // minimum function size
-const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table
-
-// findfunctab is an array of these structures.
-// Each bucket represents 4096 bytes of the text segment.
-// Each subbucket represents 256 bytes of the text segment.
-// To find a function given a pc, locate the bucket and subbucket for
-// that pc. Add together the idx and subbucket value to obtain a
-// function index. Then scan the functab array starting at that
-// index to find the target function.
-// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
-type findfuncbucket struct {
- idx uint32
- subbuckets [16]byte
-}
-
-func moduledataverify() {
- for datap := &firstmoduledata; datap != nil; datap = datap.next {
- moduledataverify1(datap)
- }
-}
-
-const debugPcln = false
-
-func moduledataverify1(datap *moduledata) {
- // Check that the pclntab's format is valid.
- hdr := datap.pcHeader
- if hdr.magic != 0xfffffff0 || hdr.pad1 != 0 || hdr.pad2 != 0 ||
- hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text {
- println("runtime: pcHeader: magic=", hex(hdr.magic), "pad1=", hdr.pad1, "pad2=", hdr.pad2,
- "minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pcHeader.textStart=", hex(hdr.textStart),
- "text=", hex(datap.text), "pluginpath=", datap.pluginpath)
- throw("invalid function symbol table")
- }
-
- // ftab is lookup table for function by program counter.
- nftab := len(datap.ftab) - 1
- for i := 0; i < nftab; i++ {
- // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function.
- if datap.ftab[i].entryoff > datap.ftab[i+1].entryoff {
- f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap}
- f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap}
- f2name := "end"
- if i+1 < nftab {
- f2name = funcname(f2)
- }
- println("function symbol table not sorted by PC offset:", hex(datap.ftab[i].entryoff), funcname(f1), ">", hex(datap.ftab[i+1].entryoff), f2name, ", plugin:", datap.pluginpath)
- for j := 0; j <= i; j++ {
- println("\t", hex(datap.ftab[j].entryoff), funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}))
- }
- if GOOS == "aix" && isarchive {
- println("-Wl,-bnoobjreorder is mandatory on aix/ppc64 with c-archive")
- }
- throw("invalid runtime symbol table")
- }
- }
-
- min := datap.textAddr(datap.ftab[0].entryoff)
- max := datap.textAddr(datap.ftab[nftab].entryoff)
- if datap.minpc != min || datap.maxpc != max {
- println("minpc=", hex(datap.minpc), "min=", hex(min), "maxpc=", hex(datap.maxpc), "max=", hex(max))
- throw("minpc or maxpc invalid")
- }
-
- for _, modulehash := range datap.modulehashes {
- if modulehash.linktimehash != *modulehash.runtimehash {
- println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename)
- throw("abi mismatch")
- }
- }
-}
-
-// textAddr returns md.text + off, with special handling for multiple text sections.
-// off is a (virtual) offset computed at internal linking time,
-// before the external linker adjusts the sections' base addresses.
-//
-// The text, or instruction stream is generated as one large buffer.
-// The off (offset) for a function is its offset within this buffer.
-// If the total text size gets too large, there can be issues on platforms like ppc64
-// if the target of calls are too far for the call instruction.
-// To resolve the large text issue, the text is split into multiple text sections
-// to allow the linker to generate long calls when necessary.
-// When this happens, the vaddr for each text section is set to its offset within the text.
-// Each function's offset is compared against the section vaddrs and ends to determine the containing section.
-// Then the section relative offset is added to the section's
-// relocated baseaddr to compute the function address.
-//
-// It is nosplit because it is part of the findfunc implementation.
-//go:nosplit
-func (md *moduledata) textAddr(off32 uint32) uintptr {
- off := uintptr(off32)
- res := md.text + off
- if len(md.textsectmap) > 1 {
- for i, sect := range md.textsectmap {
- // For the last section, include the end address (etext), as it is included in the functab.
- if off >= sect.vaddr && off < sect.end || (i == len(md.textsectmap)-1 && off == sect.end) {
- res = sect.baseaddr + off - sect.vaddr
- break
- }
- }
- if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory
- println("runtime: textAddr", hex(res), "out of range", hex(md.text), "-", hex(md.etext))
- throw("runtime: text offset out of range")
- }
- }
- return res
-}
-
-// textOff is the opposite of textAddr. It converts a PC to a (virtual) offset
-// to md.text, and returns if the PC is in any Go text section.
-//
-// It is nosplit because it is part of the findfunc implementation.
-//go:nosplit
-func (md *moduledata) textOff(pc uintptr) (uint32, bool) {
- res := uint32(pc - md.text)
- if len(md.textsectmap) > 1 {
- for i, sect := range md.textsectmap {
- if sect.baseaddr > pc {
- // pc is not in any section.
- return 0, false
- }
- end := sect.baseaddr + (sect.end - sect.vaddr)
- // For the last section, include the end address (etext), as it is included in the functab.
- if i == len(md.textsectmap) {
- end++
- }
- if pc < end {
- res = uint32(pc - sect.baseaddr + sect.vaddr)
- break
- }
- }
- }
- return res, true
-}
-
-// FuncForPC returns a *Func describing the function that contains the
-// given program counter address, or else nil.
-//
-// If pc represents multiple functions because of inlining, it returns
-// the *Func describing the innermost function, but with an entry of
-// the outermost function.
-func FuncForPC(pc uintptr) *Func {
- f := findfunc(pc)
- if !f.valid() {
- return nil
- }
- if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
- // Note: strict=false so bad PCs (those between functions) don't crash the runtime.
- // We just report the preceding function in that situation. See issue 29735.
- // TODO: Perhaps we should report no function at all in that case.
- // The runtime currently doesn't have function end info, alas.
- if ix := pcdatavalue1(f, _PCDATA_InlTreeIndex, pc, nil, false); ix >= 0 {
- inltree := (*[1 << 20]inlinedCall)(inldata)
- name := funcnameFromNameoff(f, inltree[ix].func_)
- file, line := funcline(f, pc)
- fi := &funcinl{
- ones: ^uint32(0),
- entry: f.entry(), // entry of the real (the outermost) function.
- name: name,
- file: file,
- line: int(line),
- }
- return (*Func)(unsafe.Pointer(fi))
- }
- }
- return f._Func()
-}
-
-// Name returns the name of the function.
-func (f *Func) Name() string {
- if f == nil {
- return ""
- }
- fn := f.raw()
- if fn.isInlined() { // inlined version
- fi := (*funcinl)(unsafe.Pointer(fn))
- return fi.name
- }
- return funcname(f.funcInfo())
-}
-
-// Entry returns the entry address of the function.
-func (f *Func) Entry() uintptr {
- fn := f.raw()
- if fn.isInlined() { // inlined version
- fi := (*funcinl)(unsafe.Pointer(fn))
- return fi.entry
- }
- return fn.funcInfo().entry()
-}
-
-// FileLine returns the file name and line number of the
-// source code corresponding to the program counter pc.
-// The result will not be accurate if pc is not a program
-// counter within f.
-func (f *Func) FileLine(pc uintptr) (file string, line int) {
- fn := f.raw()
- if fn.isInlined() { // inlined version
- fi := (*funcinl)(unsafe.Pointer(fn))
- return fi.file, fi.line
- }
- // Pass strict=false here, because anyone can call this function,
- // and they might just be wrong about targetpc belonging to f.
- file, line32 := funcline1(f.funcInfo(), pc, false)
- return file, int(line32)
-}
-
-// findmoduledatap looks up the moduledata for a PC.
-//
-// It is nosplit because it's part of the isgoexception
-// implementation.
-//
-//go:nosplit
-func findmoduledatap(pc uintptr) *moduledata {
- for datap := &firstmoduledata; datap != nil; datap = datap.next {
- if datap.minpc <= pc && pc < datap.maxpc {
- return datap
- }
- }
- return nil
-}
-
-type funcInfo struct {
- *_func
- datap *moduledata
-}
-
-func (f funcInfo) valid() bool {
- return f._func != nil
-}
-
-func (f funcInfo) _Func() *Func {
- return (*Func)(unsafe.Pointer(f._func))
-}
-
-// isInlined reports whether f should be re-interpreted as a *funcinl.
-func (f *_func) isInlined() bool {
- return f.entryoff == ^uint32(0) // see comment for funcinl.ones
-}
-
-// entry returns the entry PC for f.
-func (f funcInfo) entry() uintptr {
- return f.datap.textAddr(f.entryoff)
-}
-
-// findfunc looks up function metadata for a PC.
-//
-// It is nosplit because it's part of the isgoexception
-// implementation.
-//
-//go:nosplit
-func findfunc(pc uintptr) funcInfo {
- datap := findmoduledatap(pc)
- if datap == nil {
- return funcInfo{}
- }
- const nsub = uintptr(len(findfuncbucket{}.subbuckets))
-
- pcOff, ok := datap.textOff(pc)
- if !ok {
- return funcInfo{}
- }
-
- x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal?
- b := x / pcbucketsize
- i := x % pcbucketsize / (pcbucketsize / nsub)
-
- ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
- idx := ffb.idx + uint32(ffb.subbuckets[i])
-
- // Find the ftab entry.
- for datap.ftab[idx+1].entryoff <= pcOff {
- idx++
- }
-
- funcoff := datap.ftab[idx].funcoff
- return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[funcoff])), datap}
-}
-
-type pcvalueCache struct {
- entries [2][8]pcvalueCacheEnt
-}
-
-type pcvalueCacheEnt struct {
- // targetpc and off together are the key of this cache entry.
- targetpc uintptr
- off uint32
- // val is the value of this cached pcvalue entry.
- val int32
-}
-
-// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
-// It must be very cheap to calculate.
-// For now, align to goarch.PtrSize and reduce mod the number of entries.
-// In practice, this appears to be fairly randomly and evenly distributed.
-func pcvalueCacheKey(targetpc uintptr) uintptr {
- return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
-}
-
-// Returns the PCData value, and the PC where this value starts.
-// TODO: the start PC is returned only when cache is nil.
-func pcvalue(f funcInfo, off uint32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) {
- if off == 0 {
- return -1, 0
- }
-
- // Check the cache. This speeds up walks of deep stacks, which
- // tend to have the same recursive functions over and over.
- //
- // This cache is small enough that full associativity is
- // cheaper than doing the hashing for a less associative
- // cache.
- if cache != nil {
- x := pcvalueCacheKey(targetpc)
- for i := range cache.entries[x] {
- // We check off first because we're more
- // likely to have multiple entries with
- // different offsets for the same targetpc
- // than the other way around, so we'll usually
- // fail in the first clause.
- ent := &cache.entries[x][i]
- if ent.off == off && ent.targetpc == targetpc {
- return ent.val, 0
- }
- }
- }
-
- if !f.valid() {
- if strict && panicking == 0 {
- println("runtime: no module data for", hex(f.entry()))
- throw("no module data")
- }
- return -1, 0
- }
- datap := f.datap
- p := datap.pctab[off:]
- pc := f.entry()
- prevpc := pc
- val := int32(-1)
- for {
- var ok bool
- p, ok = step(p, &pc, &val, pc == f.entry())
- if !ok {
- break
- }
- if targetpc < pc {
- // Replace a random entry in the cache. Random
- // replacement prevents a performance cliff if
- // a recursive stack's cycle is slightly
- // larger than the cache.
- // Put the new element at the beginning,
- // since it is the most likely to be newly used.
- if cache != nil {
- x := pcvalueCacheKey(targetpc)
- e := &cache.entries[x]
- ci := fastrandn(uint32(len(cache.entries[x])))
- e[ci] = e[0]
- e[0] = pcvalueCacheEnt{
- targetpc: targetpc,
- off: off,
- val: val,
- }
- }
-
- return val, prevpc
- }
- prevpc = pc
- }
-
- // If there was a table, it should have covered all program counters.
- // If not, something is wrong.
- if panicking != 0 || !strict {
- return -1, 0
- }
-
- print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n")
-
- p = datap.pctab[off:]
- pc = f.entry()
- val = -1
- for {
- var ok bool
- p, ok = step(p, &pc, &val, pc == f.entry())
- if !ok {
- break
- }
- print("\tvalue=", val, " until pc=", hex(pc), "\n")
- }
-
- throw("invalid runtime symbol table")
- return -1, 0
-}
-
-func cfuncname(f funcInfo) *byte {
- if !f.valid() || f.nameoff == 0 {
- return nil
- }
- return &f.datap.funcnametab[f.nameoff]
-}
-
-func funcname(f funcInfo) string {
- return gostringnocopy(cfuncname(f))
-}
-
-func funcpkgpath(f funcInfo) string {
- name := funcname(f)
- i := len(name) - 1
- for ; i > 0; i-- {
- if name[i] == '/' {
- break
- }
- }
- for ; i < len(name); i++ {
- if name[i] == '.' {
- break
- }
- }
- return name[:i]
-}
-
-func cfuncnameFromNameoff(f funcInfo, nameoff int32) *byte {
- if !f.valid() {
- return nil
- }
- return &f.datap.funcnametab[nameoff]
-}
-
-func funcnameFromNameoff(f funcInfo, nameoff int32) string {
- return gostringnocopy(cfuncnameFromNameoff(f, nameoff))
-}
-
-func funcfile(f funcInfo, fileno int32) string {
- datap := f.datap
- if !f.valid() {
- return "?"
- }
- // Make sure the cu index and file offset are valid
- if fileoff := datap.cutab[f.cuOffset+uint32(fileno)]; fileoff != ^uint32(0) {
- return gostringnocopy(&datap.filetab[fileoff])
- }
- // pcln section is corrupt.
- return "?"
-}
-
-func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) {
- datap := f.datap
- if !f.valid() {
- return "?", 0
- }
- fileno, _ := pcvalue(f, f.pcfile, targetpc, nil, strict)
- line, _ = pcvalue(f, f.pcln, targetpc, nil, strict)
- if fileno == -1 || line == -1 || int(fileno) >= len(datap.filetab) {
- // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
- return "?", 0
- }
- file = funcfile(f, fileno)
- return
-}
-
-func funcline(f funcInfo, targetpc uintptr) (file string, line int32) {
- return funcline1(f, targetpc, true)
-}
-
-func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
- x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
- if debugPcln && x&(goarch.PtrSize-1) != 0 {
- print("invalid spdelta ", funcname(f), " ", hex(f.entry()), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
- throw("bad spdelta")
- }
- return x
-}
-
-// funcMaxSPDelta returns the maximum spdelta at any point in f.
-func funcMaxSPDelta(f funcInfo) int32 {
- datap := f.datap
- p := datap.pctab[f.pcsp:]
- pc := f.entry()
- val := int32(-1)
- max := int32(0)
- for {
- var ok bool
- p, ok = step(p, &pc, &val, pc == f.entry())
- if !ok {
- return max
- }
- if val > max {
- max = val
- }
- }
-}
-
-func pcdatastart(f funcInfo, table uint32) uint32 {
- return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
-}
-
-func pcdatavalue(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache) int32 {
- if table >= f.npcdata {
- return -1
- }
- r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, true)
- return r
-}
-
-func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
- if table >= f.npcdata {
- return -1
- }
- r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, strict)
- return r
-}
-
-// Like pcdatavalue, but also return the start PC of this PCData value.
-// It doesn't take a cache.
-func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) {
- if table >= f.npcdata {
- return -1, 0
- }
- return pcvalue(f, pcdatastart(f, table), targetpc, nil, true)
-}
-
-// funcdata returns a pointer to the ith funcdata for f.
-// funcdata should be kept in sync with cmd/link:writeFuncs.
-func funcdata(f funcInfo, i uint8) unsafe.Pointer {
- if i < 0 || i >= f.nfuncdata {
- return nil
- }
- base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses
- p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4
- off := *(*uint32)(unsafe.Pointer(p))
- // Return off == ^uint32(0) ? 0 : f.datap.gofunc + uintptr(off), but without branches.
- // The compiler calculates mask on most architectures using conditional assignment.
- var mask uintptr
- if off == ^uint32(0) {
- mask = 1
- }
- mask--
- raw := base + uintptr(off)
- return unsafe.Pointer(raw & mask)
-}
-
-// step advances to the next pc, value pair in the encoded table.
-func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {
- // For both uvdelta and pcdelta, the common case (~70%)
- // is that they are a single byte. If so, avoid calling readvarint.
- uvdelta := uint32(p[0])
- if uvdelta == 0 && !first {
- return nil, false
- }
- n := uint32(1)
- if uvdelta&0x80 != 0 {
- n, uvdelta = readvarint(p)
- }
- *val += int32(-(uvdelta & 1) ^ (uvdelta >> 1))
- p = p[n:]
-
- pcdelta := uint32(p[0])
- n = 1
- if pcdelta&0x80 != 0 {
- n, pcdelta = readvarint(p)
- }
- p = p[n:]
- *pc += uintptr(pcdelta * sys.PCQuantum)
- return p, true
-}
-
-// readvarint reads a varint from p.
-func readvarint(p []byte) (read uint32, val uint32) {
- var v, shift, n uint32
- for {
- b := p[n]
- n++
- v |= uint32(b&0x7F) << (shift & 31)
- if b&0x80 == 0 {
- break
- }
- shift += 7
- }
- return n, v
-}
-
-type stackmap struct {
- n int32 // number of bitmaps
- nbit int32 // number of bits in each bitmap
- bytedata [1]byte // bitmaps, each starting on a byte boundary
-}
-
-//go:nowritebarrier
-func stackmapdata(stkmap *stackmap, n int32) bitvector {
- // Check this invariant only when stackDebug is on at all.
- // The invariant is already checked by many of stackmapdata's callers,
- // and disabling it by default allows stackmapdata to be inlined.
- if stackDebug > 0 && (n < 0 || n >= stkmap.n) {
- throw("stackmapdata: index out of range")
- }
- return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))}
-}
-
-// inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.
-type inlinedCall struct {
- parent int16 // index of parent in the inltree, or < 0
- funcID funcID // type of the called function
- _ byte
- file int32 // perCU file index for inlined call. See cmd/link:pcln.go
- line int32 // line number of the call site
- func_ int32 // offset into pclntab for name of called function
- parentPc int32 // position of an instruction whose source position is the call site (offset from entry)
-}
diff --git a/contrib/go/_std_1.18/src/runtime/sys_darwin.go b/contrib/go/_std_1.18/src/runtime/sys_darwin.go
deleted file mode 100644
index 58b3a9171c..0000000000
--- a/contrib/go/_std_1.18/src/runtime/sys_darwin.go
+++ /dev/null
@@ -1,536 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-// The X versions of syscall expect the libc call to return a 64-bit result.
-// Otherwise (the non-X version) expects a 32-bit result.
-// This distinction is required because an error is indicated by returning -1,
-// and we need to know whether to check 32 or 64 bits of the result.
-// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
-
-//go:linkname syscall_syscall syscall.syscall
-//go:nosplit
-func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1, args.r2, args.err
-}
-func syscall()
-
-//go:linkname syscall_syscallX syscall.syscallX
-//go:nosplit
-func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1, args.r2, args.err
-}
-func syscallX()
-
-//go:linkname syscall_syscall6 syscall.syscall6
-//go:nosplit
-func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1, args.r2, args.err
-}
-func syscall6()
-
-//go:linkname syscall_syscall6X syscall.syscall6X
-//go:nosplit
-func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1, args.r2, args.err
-}
-func syscall6X()
-
-//go:linkname syscall_syscallPtr syscall.syscallPtr
-//go:nosplit
-func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1, args.r2, args.err
-}
-func syscallPtr()
-
-//go:linkname syscall_rawSyscall syscall.rawSyscall
-//go:nosplit
-func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
- return args.r1, args.r2, args.err
-}
-
-//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
-//go:nosplit
-func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
- return args.r1, args.r2, args.err
-}
-
-// syscallNoErr is used in crypto/x509 to call into Security.framework and CF.
-
-//go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall
-//go:nosplit
-func crypto_x509_syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) (r1 uintptr) {
- args := struct {
- fn, a1, a2, a3, a4, a5 uintptr
- f1 float64
- r1 uintptr
- }{fn, a1, a2, a3, a4, a5, f1, r1}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_x509)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1
-}
-func syscall_x509()
-
-// The *_trampoline functions convert from the Go calling convention to the C calling convention
-// and then call the underlying libc function. They are defined in sys_darwin_$ARCH.s.
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_attr_init(attr *pthreadattr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
- KeepAlive(attr)
- return ret
-}
-func pthread_attr_init_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
- KeepAlive(attr)
- KeepAlive(size)
- return ret
-}
-func pthread_attr_getstacksize_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
- KeepAlive(attr)
- return ret
-}
-func pthread_attr_setdetachstate_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_create_trampoline)), unsafe.Pointer(&attr))
- KeepAlive(attr)
- KeepAlive(arg) // Just for consistency. Arg of course needs to be kept alive for the start function.
- return ret
-}
-func pthread_create_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func raise(sig uint32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(raise_trampoline)), unsafe.Pointer(&sig))
-}
-func raise_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_self() (t pthread) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_self_trampoline)), unsafe.Pointer(&t))
- return
-}
-func pthread_self_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_kill(t pthread, sig uint32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_kill_trampoline)), unsafe.Pointer(&t))
- return
-}
-func pthread_kill_trampoline()
-
-// mmap is used to do low-level memory allocation via mmap. Don't allow stack
-// splits, since this function (used by sysAlloc) is called in a lot of low-level
-// parts of the runtime and callers often assume it won't acquire any locks.
-//go:nosplit
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
- args := struct {
- addr unsafe.Pointer
- n uintptr
- prot, flags, fd int32
- off uint32
- ret1 unsafe.Pointer
- ret2 int
- }{addr, n, prot, flags, fd, off, nil, 0}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
- return args.ret1, args.ret2
-}
-func mmap_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func munmap(addr unsafe.Pointer, n uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
- KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
-}
-func munmap_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
- KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
-}
-func madvise_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func mlock(addr unsafe.Pointer, n uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(mlock_trampoline)), unsafe.Pointer(&addr))
- KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
-}
-func mlock_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func read(fd int32, p unsafe.Pointer, n int32) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
- KeepAlive(p)
- return ret
-}
-func read_trampoline()
-
-func pipe() (r, w int32, errno int32) {
- var p [2]int32
- errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe_trampoline)), noescape(unsafe.Pointer(&p)))
- return p[0], p[1], errno
-}
-func pipe_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func closefd(fd int32) int32 {
- return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
-}
-func close_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-//
-// This is exported via linkname to assembly in runtime/cgo.
-//go:linkname exit
-func exit(code int32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
-}
-func exit_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func usleep(usec uint32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
-}
-func usleep_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func usleep_no_g(usec uint32) {
- asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
- KeepAlive(p)
- return ret
-}
-func write_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func open(name *byte, mode, perm int32) (ret int32) {
- ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
- KeepAlive(name)
- return
-}
-func open_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func nanotime1() int64 {
- var r struct {
- t int64 // raw timer
- numer, denom uint32 // conversion factors. nanoseconds = t * numer / denom.
- }
- libcCall(unsafe.Pointer(abi.FuncPCABI0(nanotime_trampoline)), unsafe.Pointer(&r))
- // Note: Apple seems unconcerned about overflow here. See
- // https://developer.apple.com/library/content/qa/qa1398/_index.html
- // Note also, numer == denom == 1 is common.
- t := r.t
- if r.numer != 1 {
- t *= int64(r.numer)
- }
- if r.denom != 1 {
- t /= int64(r.denom)
- }
- return t
-}
-func nanotime_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func walltime() (int64, int32) {
- var t timespec
- libcCall(unsafe.Pointer(abi.FuncPCABI0(walltime_trampoline)), unsafe.Pointer(&t))
- return t.tv_sec, int32(t.tv_nsec)
-}
-func walltime_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sigaction(sig uint32, new *usigactiont, old *usigactiont) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
- KeepAlive(new)
- KeepAlive(old)
-}
-func sigaction_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sigprocmask(how uint32, new *sigset, old *sigset) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
- KeepAlive(new)
- KeepAlive(old)
-}
-func sigprocmask_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sigaltstack(new *stackt, old *stackt) {
- if new != nil && new.ss_flags&_SS_DISABLE != 0 && new.ss_size == 0 {
- // Despite the fact that Darwin's sigaltstack man page says it ignores the size
- // when SS_DISABLE is set, it doesn't. sigaltstack returns ENOMEM
- // if we don't give it a reasonable size.
- // ref: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20140421/214296.html
- new.ss_size = 32768
- }
- libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
- KeepAlive(new)
- KeepAlive(old)
-}
-func sigaltstack_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func raiseproc(sig uint32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
-}
-func raiseproc_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func setitimer(mode int32, new, old *itimerval) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
- KeepAlive(new)
- KeepAlive(old)
-}
-func setitimer_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sysctl(mib *uint32, miblen uint32, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
- KeepAlive(mib)
- KeepAlive(oldp)
- KeepAlive(oldlenp)
- KeepAlive(newp)
- return ret
-}
-func sysctl_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sysctlbyname(name *byte, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctlbyname_trampoline)), unsafe.Pointer(&name))
- KeepAlive(name)
- KeepAlive(oldp)
- KeepAlive(oldlenp)
- KeepAlive(newp)
- return ret
-}
-func sysctlbyname_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func fcntl(fd, cmd, arg int32) int32 {
- return libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&fd))
-}
-func fcntl_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func kqueue() int32 {
- v := libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
- return v
-}
-func kqueue_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
- KeepAlive(ch)
- KeepAlive(ev)
- KeepAlive(ts)
- return ret
-}
-func kevent_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_mutex_init(m *pthreadmutex, attr *pthreadmutexattr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_init_trampoline)), unsafe.Pointer(&m))
- KeepAlive(m)
- KeepAlive(attr)
- return ret
-}
-func pthread_mutex_init_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_mutex_lock(m *pthreadmutex) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_lock_trampoline)), unsafe.Pointer(&m))
- KeepAlive(m)
- return ret
-}
-func pthread_mutex_lock_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_mutex_unlock(m *pthreadmutex) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_unlock_trampoline)), unsafe.Pointer(&m))
- KeepAlive(m)
- return ret
-}
-func pthread_mutex_unlock_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_cond_init(c *pthreadcond, attr *pthreadcondattr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_init_trampoline)), unsafe.Pointer(&c))
- KeepAlive(c)
- KeepAlive(attr)
- return ret
-}
-func pthread_cond_init_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_cond_wait(c *pthreadcond, m *pthreadmutex) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_wait_trampoline)), unsafe.Pointer(&c))
- KeepAlive(c)
- KeepAlive(m)
- return ret
-}
-func pthread_cond_wait_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_cond_timedwait_relative_np(c *pthreadcond, m *pthreadmutex, t *timespec) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_timedwait_relative_np_trampoline)), unsafe.Pointer(&c))
- KeepAlive(c)
- KeepAlive(m)
- KeepAlive(t)
- return ret
-}
-func pthread_cond_timedwait_relative_np_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func pthread_cond_signal(c *pthreadcond) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_signal_trampoline)), unsafe.Pointer(&c))
- KeepAlive(c)
- return ret
-}
-func pthread_cond_signal_trampoline()
-
-// Not used on Darwin, but must be defined.
-func exitThread(wait *uint32) {
-}
-
-//go:nosplit
-func closeonexec(fd int32) {
- fcntl(fd, _F_SETFD, _FD_CLOEXEC)
-}
-
-//go:nosplit
-func setNonblock(fd int32) {
- flags := fcntl(fd, _F_GETFL, 0)
- fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
-}
-
-// Tell the linker that the libc_* functions are to be found
-// in a system library, with the libc_ prefix missing.
-
-//go:cgo_import_dynamic libc_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_create pthread_create "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_self pthread_self "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_kill pthread_kill "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_exit _exit "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_raise raise "/usr/lib/libSystem.B.dylib"
-
-//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
-
-//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_error __error "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_usleep usleep "/usr/lib/libSystem.B.dylib"
-
-//go:cgo_import_dynamic libc_mach_timebase_info mach_timebase_info "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_mach_absolute_time mach_absolute_time "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_sigaction sigaction "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_sigaltstack sigaltstack "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_setitimer setitimer "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
-
-//go:cgo_import_dynamic libc_pthread_mutex_init pthread_mutex_init "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_cond_init pthread_cond_init "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_cond_wait pthread_cond_wait "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_cond_timedwait_relative_np pthread_cond_timedwait_relative_np "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_pthread_cond_signal pthread_cond_signal "/usr/lib/libSystem.B.dylib"
diff --git a/contrib/go/_std_1.18/src/runtime/sys_darwin_amd64.s b/contrib/go/_std_1.18/src/runtime/sys_darwin_amd64.s
deleted file mode 100644
index db4715d2b7..0000000000
--- a/contrib/go/_std_1.18/src/runtime/sys_darwin_amd64.s
+++ /dev/null
@@ -1,859 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// System calls and other sys.stuff for AMD64, Darwin
-// System calls are implemented in libSystem, this file contains
-// trampolines that convert from Go to C calling convention.
-
-#include "go_asm.h"
-#include "go_tls.h"
-#include "textflag.h"
-#include "cgo/abi_amd64.h"
-
-#define CLOCK_REALTIME 0
-
-// Exit the entire program (like C exit)
-TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 0(DI), DI // arg 1 exit status
- CALL libc_exit(SB)
- MOVL $0xf1, 0xf1 // crash
- POPQ BP
- RET
-
-TEXT runtime·open_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 8(DI), SI // arg 2 flags
- MOVL 12(DI), DX // arg 3 mode
- MOVQ 0(DI), DI // arg 1 pathname
- XORL AX, AX // vararg: say "no float args"
- CALL libc_open(SB)
- POPQ BP
- RET
-
-TEXT runtime·close_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 0(DI), DI // arg 1 fd
- CALL libc_close(SB)
- POPQ BP
- RET
-
-TEXT runtime·read_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 buf
- MOVL 16(DI), DX // arg 3 count
- MOVL 0(DI), DI // arg 1 fd
- CALL libc_read(SB)
- TESTL AX, AX
- JGE noerr
- CALL libc_error(SB)
- MOVL (AX), AX
- NEGL AX // caller expects negative errno value
-noerr:
- POPQ BP
- RET
-
-TEXT runtime·write_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 buf
- MOVL 16(DI), DX // arg 3 count
- MOVQ 0(DI), DI // arg 1 fd
- CALL libc_write(SB)
- TESTL AX, AX
- JGE noerr
- CALL libc_error(SB)
- MOVL (AX), AX
- NEGL AX // caller expects negative errno value
-noerr:
- POPQ BP
- RET
-
-TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- CALL libc_pipe(SB) // pointer already in DI
- TESTL AX, AX
- JEQ 3(PC)
- CALL libc_error(SB) // return negative errno value
- NEGL AX
- POPQ BP
- RET
-
-TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 new
- MOVQ 16(DI), DX // arg 3 old
- MOVL 0(DI), DI // arg 1 which
- CALL libc_setitimer(SB)
- POPQ BP
- RET
-
-TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 len
- MOVL 16(DI), DX // arg 3 advice
- MOVQ 0(DI), DI // arg 1 addr
- CALL libc_madvise(SB)
- // ignore failure - maybe pages are locked
- POPQ BP
- RET
-
-TEXT runtime·mlock_trampoline(SB), NOSPLIT, $0
- UNDEF // unimplemented
-
-GLOBL timebase<>(SB),NOPTR,$(machTimebaseInfo__size)
-
-TEXT runtime·nanotime_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ DI, BX
- CALL libc_mach_absolute_time(SB)
- MOVQ AX, 0(BX)
- MOVL timebase<>+machTimebaseInfo_numer(SB), SI
- MOVL timebase<>+machTimebaseInfo_denom(SB), DI // atomic read
- TESTL DI, DI
- JNE initialized
-
- SUBQ $(machTimebaseInfo__size+15)/16*16, SP
- MOVQ SP, DI
- CALL libc_mach_timebase_info(SB)
- MOVL machTimebaseInfo_numer(SP), SI
- MOVL machTimebaseInfo_denom(SP), DI
- ADDQ $(machTimebaseInfo__size+15)/16*16, SP
-
- MOVL SI, timebase<>+machTimebaseInfo_numer(SB)
- MOVL DI, AX
- XCHGL AX, timebase<>+machTimebaseInfo_denom(SB) // atomic write
-
-initialized:
- MOVL SI, 8(BX)
- MOVL DI, 12(BX)
- MOVQ BP, SP
- POPQ BP
- RET
-
-TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0
- PUSHQ BP // make a frame; keep stack aligned
- MOVQ SP, BP
- MOVQ DI, SI // arg 2 timespec
- MOVL $CLOCK_REALTIME, DI // arg 1 clock_id
- CALL libc_clock_gettime(SB)
- POPQ BP
- RET
-
-TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 new
- MOVQ 16(DI), DX // arg 3 old
- MOVL 0(DI), DI // arg 1 sig
- CALL libc_sigaction(SB)
- TESTL AX, AX
- JEQ 2(PC)
- MOVL $0xf1, 0xf1 // crash
- POPQ BP
- RET
-
-TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 new
- MOVQ 16(DI), DX // arg 3 old
- MOVL 0(DI), DI // arg 1 how
- CALL libc_pthread_sigmask(SB)
- TESTL AX, AX
- JEQ 2(PC)
- MOVL $0xf1, 0xf1 // crash
- POPQ BP
- RET
-
-TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 old
- MOVQ 0(DI), DI // arg 1 new
- CALL libc_sigaltstack(SB)
- TESTQ AX, AX
- JEQ 2(PC)
- MOVL $0xf1, 0xf1 // crash
- POPQ BP
- RET
-
-TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 0(DI), BX // signal
- CALL libc_getpid(SB)
- MOVL AX, DI // arg 1 pid
- MOVL BX, SI // arg 2 signal
- CALL libc_kill(SB)
- POPQ BP
- RET
-
-TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
- MOVQ fn+0(FP), AX
- MOVL sig+8(FP), DI
- MOVQ info+16(FP), SI
- MOVQ ctx+24(FP), DX
- PUSHQ BP
- MOVQ SP, BP
- ANDQ $~15, SP // alignment for x86_64 ABI
- CALL AX
- MOVQ BP, SP
- POPQ BP
- RET
-
-// This is the function registered during sigaction and is invoked when
-// a signal is received. It just redirects to the Go function sigtrampgo.
-// Called using C ABI.
-TEXT runtime·sigtramp(SB),NOSPLIT,$0
- // Transition from C ABI to Go ABI.
- PUSH_REGS_HOST_TO_ABI0()
-
- // Call into the Go signal handler
- NOP SP // disable vet stack checking
- ADJSP $24
- MOVL DI, 0(SP) // sig
- MOVQ SI, 8(SP) // info
- MOVQ DX, 16(SP) // ctx
- CALL ·sigtrampgo(SB)
- ADJSP $-24
-
- POP_REGS_HOST_TO_ABI0()
- RET
-
-// Called using C ABI.
-TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$0
- // Transition from C ABI to Go ABI.
- PUSH_REGS_HOST_TO_ABI0()
-
- // Call into the Go signal handler
- NOP SP // disable vet stack checking
- ADJSP $24
- MOVL DI, 0(SP) // sig
- MOVQ SI, 8(SP) // info
- MOVQ DX, 16(SP) // ctx
- CALL ·sigprofNonGo(SB)
- ADJSP $-24
-
- POP_REGS_HOST_TO_ABI0()
- RET
-
-// Used instead of sigtramp in programs that use cgo.
-// Arguments from kernel are in DI, SI, DX.
-TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
- // If no traceback function, do usual sigtramp.
- MOVQ runtime·cgoTraceback(SB), AX
- TESTQ AX, AX
- JZ sigtramp
-
- // If no traceback support function, which means that
- // runtime/cgo was not linked in, do usual sigtramp.
- MOVQ _cgo_callers(SB), AX
- TESTQ AX, AX
- JZ sigtramp
-
- // Figure out if we are currently in a cgo call.
- // If not, just do usual sigtramp.
- get_tls(CX)
- MOVQ g(CX),AX
- TESTQ AX, AX
- JZ sigtrampnog // g == nil
- MOVQ g_m(AX), AX
- TESTQ AX, AX
- JZ sigtramp // g.m == nil
- MOVL m_ncgo(AX), CX
- TESTL CX, CX
- JZ sigtramp // g.m.ncgo == 0
- MOVQ m_curg(AX), CX
- TESTQ CX, CX
- JZ sigtramp // g.m.curg == nil
- MOVQ g_syscallsp(CX), CX
- TESTQ CX, CX
- JZ sigtramp // g.m.curg.syscallsp == 0
- MOVQ m_cgoCallers(AX), R8
- TESTQ R8, R8
- JZ sigtramp // g.m.cgoCallers == nil
- MOVL m_cgoCallersUse(AX), CX
- TESTL CX, CX
- JNZ sigtramp // g.m.cgoCallersUse != 0
-
- // Jump to a function in runtime/cgo.
- // That function, written in C, will call the user's traceback
- // function with proper unwind info, and will then call back here.
- // The first three arguments, and the fifth, are already in registers.
- // Set the two remaining arguments now.
- MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigtramp(SB), R9
- MOVQ _cgo_callers(SB), AX
- JMP AX
-
-sigtramp:
- JMP runtime·sigtramp(SB)
-
-sigtrampnog:
- // Signal arrived on a non-Go thread. If this is SIGPROF, get a
- // stack trace.
- CMPL DI, $27 // 27 == SIGPROF
- JNZ sigtramp
-
- // Lock sigprofCallersUse.
- MOVL $0, AX
- MOVL $1, CX
- MOVQ $runtime·sigprofCallersUse(SB), R11
- LOCK
- CMPXCHGL CX, 0(R11)
- JNZ sigtramp // Skip stack trace if already locked.
-
- // Jump to the traceback function in runtime/cgo.
- // It will call back to sigprofNonGo, via sigprofNonGoWrapper, to convert
- // the arguments to the Go calling convention.
- // First three arguments to traceback function are in registers already.
- MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigprofCallers(SB), R8
- MOVQ $runtime·sigprofNonGoWrapper<>(SB), R9
- MOVQ _cgo_callers(SB), AX
- JMP AX
-
-TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0
- PUSHQ BP // make a frame; keep stack aligned
- MOVQ SP, BP
- MOVQ DI, BX
- MOVQ 0(BX), DI // arg 1 addr
- MOVQ 8(BX), SI // arg 2 len
- MOVL 16(BX), DX // arg 3 prot
- MOVL 20(BX), CX // arg 4 flags
- MOVL 24(BX), R8 // arg 5 fid
- MOVL 28(BX), R9 // arg 6 offset
- CALL libc_mmap(SB)
- XORL DX, DX
- CMPQ AX, $-1
- JNE ok
- CALL libc_error(SB)
- MOVLQSX (AX), DX // errno
- XORL AX, AX
-ok:
- MOVQ AX, 32(BX)
- MOVQ DX, 40(BX)
- POPQ BP
- RET
-
-TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 len
- MOVQ 0(DI), DI // arg 1 addr
- CALL libc_munmap(SB)
- TESTQ AX, AX
- JEQ 2(PC)
- MOVL $0xf1, 0xf1 // crash
- POPQ BP
- RET
-
-TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 0(DI), DI // arg 1 usec
- CALL libc_usleep(SB)
- POPQ BP
- RET
-
-TEXT runtime·settls(SB),NOSPLIT,$32
- // Nothing to do on Darwin, pthread already set thread-local storage up.
- RET
-
-TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 8(DI), SI // arg 2 miblen
- MOVQ 16(DI), DX // arg 3 oldp
- MOVQ 24(DI), CX // arg 4 oldlenp
- MOVQ 32(DI), R8 // arg 5 newp
- MOVQ 40(DI), R9 // arg 6 newlen
- MOVQ 0(DI), DI // arg 1 mib
- CALL libc_sysctl(SB)
- POPQ BP
- RET
-
-TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 oldp
- MOVQ 16(DI), DX // arg 3 oldlenp
- MOVQ 24(DI), CX // arg 4 newp
- MOVQ 32(DI), R8 // arg 5 newlen
- MOVQ 0(DI), DI // arg 1 name
- CALL libc_sysctlbyname(SB)
- POPQ BP
- RET
-
-TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- CALL libc_kqueue(SB)
- POPQ BP
- RET
-
-TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 keventt
- MOVL 16(DI), DX // arg 3 nch
- MOVQ 24(DI), CX // arg 4 ev
- MOVL 32(DI), R8 // arg 5 nev
- MOVQ 40(DI), R9 // arg 6 ts
- MOVL 0(DI), DI // arg 1 kq
- CALL libc_kevent(SB)
- CMPL AX, $-1
- JNE ok
- CALL libc_error(SB)
- MOVLQSX (AX), AX // errno
- NEGQ AX // caller wants it as a negative error code
-ok:
- POPQ BP
- RET
-
-TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 4(DI), SI // arg 2 cmd
- MOVL 8(DI), DX // arg 3 arg
- MOVL 0(DI), DI // arg 1 fd
- XORL AX, AX // vararg: say "no float args"
- CALL libc_fcntl(SB)
- POPQ BP
- RET
-
-// mstart_stub is the first function executed on a new thread started by pthread_create.
-// It just does some low-level setup and then calls mstart.
-// Note: called with the C calling convention.
-TEXT runtime·mstart_stub(SB),NOSPLIT,$0
- // DI points to the m.
- // We are already on m's g0 stack.
-
- // Transition from C ABI to Go ABI.
- PUSH_REGS_HOST_TO_ABI0()
-
- MOVQ m_g0(DI), DX // g
-
- // Initialize TLS entry.
- // See cmd/link/internal/ld/sym.go:computeTLSOffset.
- MOVQ DX, 0x30(GS)
-
- CALL runtime·mstart(SB)
-
- POP_REGS_HOST_TO_ABI0()
-
- // Go is all done with this OS thread.
- // Tell pthread everything is ok (we never join with this thread, so
- // the value here doesn't really matter).
- XORL AX, AX
- RET
-
-// These trampolines help convert from Go calling convention to C calling convention.
-// They should be called with asmcgocall.
-// A pointer to the arguments is passed in DI.
-// A single int32 result is returned in AX.
-// (For more results, make an args/results structure.)
-TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
- PUSHQ BP // make frame, keep stack 16-byte aligned.
- MOVQ SP, BP
- MOVQ 0(DI), DI // arg 1 attr
- CALL libc_pthread_attr_init(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 size
- MOVQ 0(DI), DI // arg 1 attr
- CALL libc_pthread_attr_getstacksize(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 state
- MOVQ 0(DI), DI // arg 1 attr
- CALL libc_pthread_attr_setdetachstate(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ 0(DI), SI // arg 2 attr
- MOVQ 8(DI), DX // arg 3 start
- MOVQ 16(DI), CX // arg 4 arg
- MOVQ SP, DI // arg 1 &threadid (which we throw away)
- CALL libc_pthread_create(SB)
- MOVQ BP, SP
- POPQ BP
- RET
-
-TEXT runtime·raise_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVL 0(DI), DI // arg 1 signal
- CALL libc_raise(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 attr
- MOVQ 0(DI), DI // arg 1 mutex
- CALL libc_pthread_mutex_init(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 0(DI), DI // arg 1 mutex
- CALL libc_pthread_mutex_lock(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 0(DI), DI // arg 1 mutex
- CALL libc_pthread_mutex_unlock(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 attr
- MOVQ 0(DI), DI // arg 1 cond
- CALL libc_pthread_cond_init(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 mutex
- MOVQ 0(DI), DI // arg 1 cond
- CALL libc_pthread_cond_wait(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 mutex
- MOVQ 16(DI), DX // arg 3 timeout
- MOVQ 0(DI), DI // arg 1 cond
- CALL libc_pthread_cond_timedwait_relative_np(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 0(DI), DI // arg 1 cond
- CALL libc_pthread_cond_signal(SB)
- POPQ BP
- RET
-
-TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ DI, BX // BX is caller-save
- CALL libc_pthread_self(SB)
- MOVQ AX, 0(BX) // return value
- POPQ BP
- RET
-
-TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- MOVQ 8(DI), SI // arg 2 sig
- MOVQ 0(DI), DI // arg 1 thread
- CALL libc_pthread_kill(SB)
- POPQ BP
- RET
-
-// syscall calls a function in libc on behalf of the syscall package.
-// syscall takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscall must be called on the g0 stack with the
-// C calling convention (use libcCall).
-//
-// syscall expects a 32-bit result and tests for 32-bit -1
-// to decide there was an error.
-TEXT runtime·syscall(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ (0*8)(DI), CX // fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL CX
-
- MOVQ (SP), DI
- MOVQ AX, (4*8)(DI) // r1
- MOVQ DX, (5*8)(DI) // r2
-
- // Standard libc functions return -1 on error
- // and set errno.
- CMPL AX, $-1 // Note: high 32 bits are junk
- JNE ok
-
- // Get error code from libc.
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (6*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- MOVQ BP, SP
- POPQ BP
- RET
-
-// syscallX calls a function in libc on behalf of the syscall package.
-// syscallX takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscallX must be called on the g0 stack with the
-// C calling convention (use libcCall).
-//
-// syscallX is like syscall but expects a 64-bit result
-// and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscallX(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ (0*8)(DI), CX // fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL CX
-
- MOVQ (SP), DI
- MOVQ AX, (4*8)(DI) // r1
- MOVQ DX, (5*8)(DI) // r2
-
- // Standard libc functions return -1 on error
- // and set errno.
- CMPQ AX, $-1
- JNE ok
-
- // Get error code from libc.
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (6*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- MOVQ BP, SP
- POPQ BP
- RET
-
-// syscallPtr is like syscallX except that the libc function reports an
-// error by returning NULL and setting errno.
-TEXT runtime·syscallPtr(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ (0*8)(DI), CX // fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL CX
-
- MOVQ (SP), DI
- MOVQ AX, (4*8)(DI) // r1
- MOVQ DX, (5*8)(DI) // r2
-
- // syscallPtr libc functions return NULL on error
- // and set errno.
- TESTQ AX, AX
- JNE ok
-
- // Get error code from libc.
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (6*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- MOVQ BP, SP
- POPQ BP
- RET
-
-// syscall6 calls a function in libc on behalf of the syscall package.
-// syscall6 takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// a4 uintptr
-// a5 uintptr
-// a6 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscall6 must be called on the g0 stack with the
-// C calling convention (use libcCall).
-//
-// syscall6 expects a 32-bit result and tests for 32-bit -1
-// to decide there was an error.
-TEXT runtime·syscall6(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ (0*8)(DI), R11// fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ (4*8)(DI), CX // a4
- MOVQ (5*8)(DI), R8 // a5
- MOVQ (6*8)(DI), R9 // a6
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL R11
-
- MOVQ (SP), DI
- MOVQ AX, (7*8)(DI) // r1
- MOVQ DX, (8*8)(DI) // r2
-
- CMPL AX, $-1
- JNE ok
-
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (9*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- MOVQ BP, SP
- POPQ BP
- RET
-
-// syscall6X calls a function in libc on behalf of the syscall package.
-// syscall6X takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// a4 uintptr
-// a5 uintptr
-// a6 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscall6X must be called on the g0 stack with the
-// C calling convention (use libcCall).
-//
-// syscall6X is like syscall6 but expects a 64-bit result
-// and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscall6X(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ (0*8)(DI), R11// fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ (4*8)(DI), CX // a4
- MOVQ (5*8)(DI), R8 // a5
- MOVQ (6*8)(DI), R9 // a6
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL R11
-
- MOVQ (SP), DI
- MOVQ AX, (7*8)(DI) // r1
- MOVQ DX, (8*8)(DI) // r2
-
- CMPQ AX, $-1
- JNE ok
-
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (9*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- MOVQ BP, SP
- POPQ BP
- RET
-
-// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
-// takes 5 uintptrs and 1 float64, and only returns one value,
-// for use with standard C ABI functions.
-TEXT runtime·syscall_x509(SB),NOSPLIT,$0
- PUSHQ BP
- MOVQ SP, BP
- SUBQ $16, SP
- MOVQ (0*8)(DI), R11// fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ (4*8)(DI), CX // a4
- MOVQ (5*8)(DI), R8 // a5
- MOVQ (6*8)(DI), X0 // f1
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL R11
-
- MOVQ (SP), DI
- MOVQ AX, (7*8)(DI) // r1
-
- XORL AX, AX // no error (it's ignored anyway)
- MOVQ BP, SP
- POPQ BP
- RET
diff --git a/contrib/go/_std_1.18/src/runtime/sys_libc.go b/contrib/go/_std_1.18/src/runtime/sys_libc.go
deleted file mode 100644
index 7012b4167e..0000000000
--- a/contrib/go/_std_1.18/src/runtime/sys_libc.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin || (openbsd && !mips64)
-
-package runtime
-
-import "unsafe"
-
-// Call fn with arg as its argument. Return what fn returns.
-// fn is the raw pc value of the entry point of the desired function.
-// Switches to the system stack, if not already there.
-// Preserves the calling point as the location where a profiler traceback will begin.
-//go:nosplit
-func libcCall(fn, arg unsafe.Pointer) int32 {
- // Leave caller's PC/SP/G around for traceback.
- gp := getg()
- var mp *m
- if gp != nil {
- mp = gp.m
- }
- if mp != nil && mp.libcallsp == 0 {
- mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
- // sp must be the last, because once async cpu profiler finds
- // all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
- } else {
- // Make sure we don't reset libcallsp. This makes
- // libcCall reentrant; We remember the g/pc/sp for the
- // first call on an M, until that libcCall instance
- // returns. Reentrance only matters for signals, as
- // libc never calls back into Go. The tricky case is
- // where we call libcX from an M and record g/pc/sp.
- // Before that call returns, a signal arrives on the
- // same M and the signal handling code calls another
- // libc function. We don't want that second libcCall
- // from within the handler to be recorded, and we
- // don't want that call's completion to zero
- // libcallsp.
- // We don't need to set libcall* while we're in a sighandler
- // (even if we're not currently in libc) because we block all
- // signals while we're handling a signal. That includes the
- // profile signal, which is the one that uses the libcall* info.
- mp = nil
- }
- res := asmcgocall(fn, arg)
- if mp != nil {
- mp.libcallsp = 0
- }
- return res
-}
diff --git a/contrib/go/_std_1.18/src/runtime/sys_linux_amd64.s b/contrib/go/_std_1.18/src/runtime/sys_linux_amd64.s
deleted file mode 100644
index f0e58e11db..0000000000
--- a/contrib/go/_std_1.18/src/runtime/sys_linux_amd64.s
+++ /dev/null
@@ -1,765 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//
-// System calls and other sys.stuff for AMD64, Linux
-//
-
-#include "go_asm.h"
-#include "go_tls.h"
-#include "textflag.h"
-#include "cgo/abi_amd64.h"
-
-#define AT_FDCWD -100
-
-#define SYS_read 0
-#define SYS_write 1
-#define SYS_close 3
-#define SYS_mmap 9
-#define SYS_munmap 11
-#define SYS_brk 12
-#define SYS_rt_sigaction 13
-#define SYS_rt_sigprocmask 14
-#define SYS_rt_sigreturn 15
-#define SYS_pipe 22
-#define SYS_sched_yield 24
-#define SYS_mincore 27
-#define SYS_madvise 28
-#define SYS_nanosleep 35
-#define SYS_setittimer 38
-#define SYS_getpid 39
-#define SYS_socket 41
-#define SYS_connect 42
-#define SYS_clone 56
-#define SYS_exit 60
-#define SYS_kill 62
-#define SYS_fcntl 72
-#define SYS_sigaltstack 131
-#define SYS_arch_prctl 158
-#define SYS_gettid 186
-#define SYS_futex 202
-#define SYS_sched_getaffinity 204
-#define SYS_epoll_create 213
-#define SYS_timer_create 222
-#define SYS_timer_settime 223
-#define SYS_timer_delete 226
-#define SYS_clock_gettime 228
-#define SYS_exit_group 231
-#define SYS_epoll_ctl 233
-#define SYS_tgkill 234
-#define SYS_openat 257
-#define SYS_faccessat 269
-#define SYS_epoll_pwait 281
-#define SYS_epoll_create1 291
-#define SYS_pipe2 293
-
-TEXT runtime·exit(SB),NOSPLIT,$0-4
- MOVL code+0(FP), DI
- MOVL $SYS_exit_group, AX
- SYSCALL
- RET
-
-// func exitThread(wait *uint32)
-TEXT runtime·exitThread(SB),NOSPLIT,$0-8
- MOVQ wait+0(FP), AX
- // We're done using the stack.
- MOVL $0, (AX)
- MOVL $0, DI // exit code
- MOVL $SYS_exit, AX
- SYSCALL
- // We may not even have a stack any more.
- INT $3
- JMP 0(PC)
-
-TEXT runtime·open(SB),NOSPLIT,$0-20
- // This uses openat instead of open, because Android O blocks open.
- MOVL $AT_FDCWD, DI // AT_FDCWD, so this acts like open
- MOVQ name+0(FP), SI
- MOVL mode+8(FP), DX
- MOVL perm+12(FP), R10
- MOVL $SYS_openat, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS 2(PC)
- MOVL $-1, AX
- MOVL AX, ret+16(FP)
- RET
-
-TEXT runtime·closefd(SB),NOSPLIT,$0-12
- MOVL fd+0(FP), DI
- MOVL $SYS_close, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS 2(PC)
- MOVL $-1, AX
- MOVL AX, ret+8(FP)
- RET
-
-TEXT runtime·write1(SB),NOSPLIT,$0-28
- MOVQ fd+0(FP), DI
- MOVQ p+8(FP), SI
- MOVL n+16(FP), DX
- MOVL $SYS_write, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-TEXT runtime·read(SB),NOSPLIT,$0-28
- MOVL fd+0(FP), DI
- MOVQ p+8(FP), SI
- MOVL n+16(FP), DX
- MOVL $SYS_read, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- LEAQ r+0(FP), DI
- MOVL $SYS_pipe, AX
- SYSCALL
- MOVL AX, errno+8(FP)
- RET
-
-// func pipe2(flags int32) (r, w int32, errno int32)
-TEXT runtime·pipe2(SB),NOSPLIT,$0-20
- LEAQ r+8(FP), DI
- MOVL flags+0(FP), SI
- MOVL $SYS_pipe2, AX
- SYSCALL
- MOVL AX, errno+16(FP)
- RET
-
-TEXT runtime·usleep(SB),NOSPLIT,$16
- MOVL $0, DX
- MOVL usec+0(FP), AX
- MOVL $1000000, CX
- DIVL CX
- MOVQ AX, 0(SP)
- MOVL $1000, AX // usec to nsec
- MULL DX
- MOVQ AX, 8(SP)
-
- // nanosleep(&ts, 0)
- MOVQ SP, DI
- MOVL $0, SI
- MOVL $SYS_nanosleep, AX
- SYSCALL
- RET
-
-TEXT runtime·gettid(SB),NOSPLIT,$0-4
- MOVL $SYS_gettid, AX
- SYSCALL
- MOVL AX, ret+0(FP)
- RET
-
-TEXT runtime·raise(SB),NOSPLIT,$0
- MOVL $SYS_getpid, AX
- SYSCALL
- MOVL AX, R12
- MOVL $SYS_gettid, AX
- SYSCALL
- MOVL AX, SI // arg 2 tid
- MOVL R12, DI // arg 1 pid
- MOVL sig+0(FP), DX // arg 3
- MOVL $SYS_tgkill, AX
- SYSCALL
- RET
-
-TEXT runtime·raiseproc(SB),NOSPLIT,$0
- MOVL $SYS_getpid, AX
- SYSCALL
- MOVL AX, DI // arg 1 pid
- MOVL sig+0(FP), SI // arg 2
- MOVL $SYS_kill, AX
- SYSCALL
- RET
-
-TEXT ·getpid(SB),NOSPLIT,$0-8
- MOVL $SYS_getpid, AX
- SYSCALL
- MOVQ AX, ret+0(FP)
- RET
-
-TEXT ·tgkill(SB),NOSPLIT,$0
- MOVQ tgid+0(FP), DI
- MOVQ tid+8(FP), SI
- MOVQ sig+16(FP), DX
- MOVL $SYS_tgkill, AX
- SYSCALL
- RET
-
-TEXT runtime·setitimer(SB),NOSPLIT,$0-24
- MOVL mode+0(FP), DI
- MOVQ new+8(FP), SI
- MOVQ old+16(FP), DX
- MOVL $SYS_setittimer, AX
- SYSCALL
- RET
-
-TEXT runtime·timer_create(SB),NOSPLIT,$0-28
- MOVL clockid+0(FP), DI
- MOVQ sevp+8(FP), SI
- MOVQ timerid+16(FP), DX
- MOVL $SYS_timer_create, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-TEXT runtime·timer_settime(SB),NOSPLIT,$0-28
- MOVL timerid+0(FP), DI
- MOVL flags+4(FP), SI
- MOVQ new+8(FP), DX
- MOVQ old+16(FP), R10
- MOVL $SYS_timer_settime, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-TEXT runtime·timer_delete(SB),NOSPLIT,$0-12
- MOVL timerid+0(FP), DI
- MOVL $SYS_timer_delete, AX
- SYSCALL
- MOVL AX, ret+8(FP)
- RET
-
-TEXT runtime·mincore(SB),NOSPLIT,$0-28
- MOVQ addr+0(FP), DI
- MOVQ n+8(FP), SI
- MOVQ dst+16(FP), DX
- MOVL $SYS_mincore, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// func nanotime1() int64
-TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
- // We don't know how much stack space the VDSO code will need,
- // so switch to g0.
- // In particular, a kernel configured with CONFIG_OPTIMIZE_INLINING=n
- // and hardening can use a full page of stack space in gettime_sym
- // due to stack probes inserted to avoid stack/heap collisions.
- // See issue #20427.
-
- MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-
- MOVQ g_m(R14), BX // BX unchanged by C code.
-
- // Set vdsoPC and vdsoSP for SIGPROF traceback.
- // Save the old values on stack and restore them on exit,
- // so this function is reentrant.
- MOVQ m_vdsoPC(BX), CX
- MOVQ m_vdsoSP(BX), DX
- MOVQ CX, 0(SP)
- MOVQ DX, 8(SP)
-
- LEAQ ret+0(FP), DX
- MOVQ -8(DX), CX
- MOVQ CX, m_vdsoPC(BX)
- MOVQ DX, m_vdsoSP(BX)
-
- CMPQ R14, m_curg(BX) // Only switch if on curg.
- JNE noswitch
-
- MOVQ m_g0(BX), DX
- MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
-
-noswitch:
- SUBQ $16, SP // Space for results
- ANDQ $~15, SP // Align for C code
-
- MOVL $1, DI // CLOCK_MONOTONIC
- LEAQ 0(SP), SI
- MOVQ runtime·vdsoClockgettimeSym(SB), AX
- CMPQ AX, $0
- JEQ fallback
- CALL AX
-ret:
- MOVQ 0(SP), AX // sec
- MOVQ 8(SP), DX // nsec
- MOVQ R12, SP // Restore real SP
- // Restore vdsoPC, vdsoSP
- // We don't worry about being signaled between the two stores.
- // If we are not in a signal handler, we'll restore vdsoSP to 0,
- // and no one will care about vdsoPC. If we are in a signal handler,
- // we cannot receive another signal.
- MOVQ 8(SP), CX
- MOVQ CX, m_vdsoSP(BX)
- MOVQ 0(SP), CX
- MOVQ CX, m_vdsoPC(BX)
- // sec is in AX, nsec in DX
- // return nsec in AX
- IMULQ $1000000000, AX
- ADDQ DX, AX
- MOVQ AX, ret+0(FP)
- RET
-fallback:
- MOVQ $SYS_clock_gettime, AX
- SYSCALL
- JMP ret
-
-TEXT runtime·rtsigprocmask(SB),NOSPLIT,$0-28
- MOVL how+0(FP), DI
- MOVQ new+8(FP), SI
- MOVQ old+16(FP), DX
- MOVL size+24(FP), R10
- MOVL $SYS_rt_sigprocmask, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS 2(PC)
- MOVL $0xf1, 0xf1 // crash
- RET
-
-TEXT runtime·rt_sigaction(SB),NOSPLIT,$0-36
- MOVQ sig+0(FP), DI
- MOVQ new+8(FP), SI
- MOVQ old+16(FP), DX
- MOVQ size+24(FP), R10
- MOVL $SYS_rt_sigaction, AX
- SYSCALL
- MOVL AX, ret+32(FP)
- RET
-
-// Call the function stored in _cgo_sigaction using the GCC calling convention.
-TEXT runtime·callCgoSigaction(SB),NOSPLIT,$16
- MOVQ sig+0(FP), DI
- MOVQ new+8(FP), SI
- MOVQ old+16(FP), DX
- MOVQ _cgo_sigaction(SB), AX
- MOVQ SP, BX // callee-saved
- ANDQ $~15, SP // alignment as per amd64 psABI
- CALL AX
- MOVQ BX, SP
- MOVL AX, ret+24(FP)
- RET
-
-TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
- MOVQ fn+0(FP), AX
- MOVL sig+8(FP), DI
- MOVQ info+16(FP), SI
- MOVQ ctx+24(FP), DX
- PUSHQ BP
- MOVQ SP, BP
- ANDQ $~15, SP // alignment for x86_64 ABI
- CALL AX
- MOVQ BP, SP
- POPQ BP
- RET
-
-// Called using C ABI.
-TEXT runtime·sigtramp(SB),NOSPLIT,$0
- // Transition from C ABI to Go ABI.
- PUSH_REGS_HOST_TO_ABI0()
-
- // Call into the Go signal handler
- NOP SP // disable vet stack checking
- ADJSP $24
- MOVQ DI, 0(SP) // sig
- MOVQ SI, 8(SP) // info
- MOVQ DX, 16(SP) // ctx
- CALL ·sigtrampgo(SB)
- ADJSP $-24
-
- POP_REGS_HOST_TO_ABI0()
- RET
-
-// Called using C ABI.
-TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$0
- // Transition from C ABI to Go ABI.
- PUSH_REGS_HOST_TO_ABI0()
-
- // Call into the Go signal handler
- NOP SP // disable vet stack checking
- ADJSP $24
- MOVL DI, 0(SP) // sig
- MOVQ SI, 8(SP) // info
- MOVQ DX, 16(SP) // ctx
- CALL ·sigprofNonGo(SB)
- ADJSP $-24
-
- POP_REGS_HOST_TO_ABI0()
- RET
-
-// Used instead of sigtramp in programs that use cgo.
-// Arguments from kernel are in DI, SI, DX.
-TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
- // If no traceback function, do usual sigtramp.
- MOVQ runtime·cgoTraceback(SB), AX
- TESTQ AX, AX
- JZ sigtramp
-
- // If no traceback support function, which means that
- // runtime/cgo was not linked in, do usual sigtramp.
- MOVQ _cgo_callers(SB), AX
- TESTQ AX, AX
- JZ sigtramp
-
- // Figure out if we are currently in a cgo call.
- // If not, just do usual sigtramp.
- get_tls(CX)
- MOVQ g(CX),AX
- TESTQ AX, AX
- JZ sigtrampnog // g == nil
- MOVQ g_m(AX), AX
- TESTQ AX, AX
- JZ sigtramp // g.m == nil
- MOVL m_ncgo(AX), CX
- TESTL CX, CX
- JZ sigtramp // g.m.ncgo == 0
- MOVQ m_curg(AX), CX
- TESTQ CX, CX
- JZ sigtramp // g.m.curg == nil
- MOVQ g_syscallsp(CX), CX
- TESTQ CX, CX
- JZ sigtramp // g.m.curg.syscallsp == 0
- MOVQ m_cgoCallers(AX), R8
- TESTQ R8, R8
- JZ sigtramp // g.m.cgoCallers == nil
- MOVL m_cgoCallersUse(AX), CX
- TESTL CX, CX
- JNZ sigtramp // g.m.cgoCallersUse != 0
-
- // Jump to a function in runtime/cgo.
- // That function, written in C, will call the user's traceback
- // function with proper unwind info, and will then call back here.
- // The first three arguments, and the fifth, are already in registers.
- // Set the two remaining arguments now.
- MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigtramp(SB), R9
- MOVQ _cgo_callers(SB), AX
- JMP AX
-
-sigtramp:
- JMP runtime·sigtramp(SB)
-
-sigtrampnog:
- // Signal arrived on a non-Go thread. If this is SIGPROF, get a
- // stack trace.
- CMPL DI, $27 // 27 == SIGPROF
- JNZ sigtramp
-
- // Lock sigprofCallersUse.
- MOVL $0, AX
- MOVL $1, CX
- MOVQ $runtime·sigprofCallersUse(SB), R11
- LOCK
- CMPXCHGL CX, 0(R11)
- JNZ sigtramp // Skip stack trace if already locked.
-
- // Jump to the traceback function in runtime/cgo.
- // It will call back to sigprofNonGo, via sigprofNonGoWrapper, to convert
- // the arguments to the Go calling convention.
- // First three arguments to traceback function are in registers already.
- MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigprofCallers(SB), R8
- MOVQ $runtime·sigprofNonGoWrapper<>(SB), R9
- MOVQ _cgo_callers(SB), AX
- JMP AX
-
-// For cgo unwinding to work, this function must look precisely like
-// the one in glibc. The glibc source code is:
-// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/x86_64/sigaction.c
-// The code that cares about the precise instructions used is:
-// https://gcc.gnu.org/viewcvs/gcc/trunk/libgcc/config/i386/linux-unwind.h?revision=219188&view=markup
-TEXT runtime·sigreturn(SB),NOSPLIT,$0
- MOVQ $SYS_rt_sigreturn, AX
- SYSCALL
- INT $3 // not reached
-
-TEXT runtime·sysMmap(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI
- MOVQ n+8(FP), SI
- MOVL prot+16(FP), DX
- MOVL flags+20(FP), R10
- MOVL fd+24(FP), R8
- MOVL off+28(FP), R9
-
- MOVL $SYS_mmap, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS ok
- NOTQ AX
- INCQ AX
- MOVQ $0, p+32(FP)
- MOVQ AX, err+40(FP)
- RET
-ok:
- MOVQ AX, p+32(FP)
- MOVQ $0, err+40(FP)
- RET
-
-// Call the function stored in _cgo_mmap using the GCC calling convention.
-// This must be called on the system stack.
-TEXT runtime·callCgoMmap(SB),NOSPLIT,$16
- MOVQ addr+0(FP), DI
- MOVQ n+8(FP), SI
- MOVL prot+16(FP), DX
- MOVL flags+20(FP), CX
- MOVL fd+24(FP), R8
- MOVL off+28(FP), R9
- MOVQ _cgo_mmap(SB), AX
- MOVQ SP, BX
- ANDQ $~15, SP // alignment as per amd64 psABI
- MOVQ BX, 0(SP)
- CALL AX
- MOVQ 0(SP), SP
- MOVQ AX, ret+32(FP)
- RET
-
-TEXT runtime·sysMunmap(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI
- MOVQ n+8(FP), SI
- MOVQ $SYS_munmap, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS 2(PC)
- MOVL $0xf1, 0xf1 // crash
- RET
-
-// Call the function stored in _cgo_munmap using the GCC calling convention.
-// This must be called on the system stack.
-TEXT runtime·callCgoMunmap(SB),NOSPLIT,$16-16
- MOVQ addr+0(FP), DI
- MOVQ n+8(FP), SI
- MOVQ _cgo_munmap(SB), AX
- MOVQ SP, BX
- ANDQ $~15, SP // alignment as per amd64 psABI
- MOVQ BX, 0(SP)
- CALL AX
- MOVQ 0(SP), SP
- RET
-
-TEXT runtime·madvise(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI
- MOVQ n+8(FP), SI
- MOVL flags+16(FP), DX
- MOVQ $SYS_madvise, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// int64 futex(int32 *uaddr, int32 op, int32 val,
-// struct timespec *timeout, int32 *uaddr2, int32 val2);
-TEXT runtime·futex(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI
- MOVL op+8(FP), SI
- MOVL val+12(FP), DX
- MOVQ ts+16(FP), R10
- MOVQ addr2+24(FP), R8
- MOVL val3+32(FP), R9
- MOVL $SYS_futex, AX
- SYSCALL
- MOVL AX, ret+40(FP)
- RET
-
-// int32 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void));
-TEXT runtime·clone(SB),NOSPLIT,$0
- MOVL flags+0(FP), DI
- MOVQ stk+8(FP), SI
- MOVQ $0, DX
- MOVQ $0, R10
- MOVQ $0, R8
- // Copy mp, gp, fn off parent stack for use by child.
- // Careful: Linux system call clobbers CX and R11.
- MOVQ mp+16(FP), R13
- MOVQ gp+24(FP), R9
- MOVQ fn+32(FP), R12
- CMPQ R13, $0 // m
- JEQ nog1
- CMPQ R9, $0 // g
- JEQ nog1
- LEAQ m_tls(R13), R8
-#ifdef GOOS_android
- // Android stores the TLS offset in runtime·tls_g.
- SUBQ runtime·tls_g(SB), R8
-#else
- ADDQ $8, R8 // ELF wants to use -8(FS)
-#endif
- ORQ $0x00080000, DI //add flag CLONE_SETTLS(0x00080000) to call clone
-nog1:
- MOVL $SYS_clone, AX
- SYSCALL
-
- // In parent, return.
- CMPQ AX, $0
- JEQ 3(PC)
- MOVL AX, ret+40(FP)
- RET
-
- // In child, on new stack.
- MOVQ SI, SP
-
- // If g or m are nil, skip Go-related setup.
- CMPQ R13, $0 // m
- JEQ nog2
- CMPQ R9, $0 // g
- JEQ nog2
-
- // Initialize m->procid to Linux tid
- MOVL $SYS_gettid, AX
- SYSCALL
- MOVQ AX, m_procid(R13)
-
- // In child, set up new stack
- get_tls(CX)
- MOVQ R13, g_m(R9)
- MOVQ R9, g(CX)
- MOVQ R9, R14 // set g register
- CALL runtime·stackcheck(SB)
-
-nog2:
- // Call fn. This is the PC of an ABI0 function.
- CALL R12
-
- // It shouldn't return. If it does, exit that thread.
- MOVL $111, DI
- MOVL $SYS_exit, AX
- SYSCALL
- JMP -3(PC) // keep exiting
-
-TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
- MOVQ new+0(FP), DI
- MOVQ old+8(FP), SI
- MOVQ $SYS_sigaltstack, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS 2(PC)
- MOVL $0xf1, 0xf1 // crash
- RET
-
-// set tls base to DI
-TEXT runtime·settls(SB),NOSPLIT,$32
-#ifdef GOOS_android
- // Android stores the TLS offset in runtime·tls_g.
- SUBQ runtime·tls_g(SB), DI
-#else
- ADDQ $8, DI // ELF wants to use -8(FS)
-#endif
- MOVQ DI, SI
- MOVQ $0x1002, DI // ARCH_SET_FS
- MOVQ $SYS_arch_prctl, AX
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS 2(PC)
- MOVL $0xf1, 0xf1 // crash
- RET
-
-TEXT runtime·osyield(SB),NOSPLIT,$0
- MOVL $SYS_sched_yield, AX
- SYSCALL
- RET
-
-TEXT runtime·sched_getaffinity(SB),NOSPLIT,$0
- MOVQ pid+0(FP), DI
- MOVQ len+8(FP), SI
- MOVQ buf+16(FP), DX
- MOVL $SYS_sched_getaffinity, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// int32 runtime·epollcreate(int32 size);
-TEXT runtime·epollcreate(SB),NOSPLIT,$0
- MOVL size+0(FP), DI
- MOVL $SYS_epoll_create, AX
- SYSCALL
- MOVL AX, ret+8(FP)
- RET
-
-// int32 runtime·epollcreate1(int32 flags);
-TEXT runtime·epollcreate1(SB),NOSPLIT,$0
- MOVL flags+0(FP), DI
- MOVL $SYS_epoll_create1, AX
- SYSCALL
- MOVL AX, ret+8(FP)
- RET
-
-// func epollctl(epfd, op, fd int32, ev *epollEvent) int
-TEXT runtime·epollctl(SB),NOSPLIT,$0
- MOVL epfd+0(FP), DI
- MOVL op+4(FP), SI
- MOVL fd+8(FP), DX
- MOVQ ev+16(FP), R10
- MOVL $SYS_epoll_ctl, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout);
-TEXT runtime·epollwait(SB),NOSPLIT,$0
- // This uses pwait instead of wait, because Android O blocks wait.
- MOVL epfd+0(FP), DI
- MOVQ ev+8(FP), SI
- MOVL nev+16(FP), DX
- MOVL timeout+20(FP), R10
- MOVQ $0, R8
- MOVL $SYS_epoll_pwait, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// void runtime·closeonexec(int32 fd);
-TEXT runtime·closeonexec(SB),NOSPLIT,$0
- MOVL fd+0(FP), DI // fd
- MOVQ $2, SI // F_SETFD
- MOVQ $1, DX // FD_CLOEXEC
- MOVL $SYS_fcntl, AX
- SYSCALL
- RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $SYS_fcntl, AX
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $0x800, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $SYS_fcntl, AX
- SYSCALL
- RET
-
-// int access(const char *name, int mode)
-TEXT runtime·access(SB),NOSPLIT,$0
- // This uses faccessat instead of access, because Android O blocks access.
- MOVL $AT_FDCWD, DI // AT_FDCWD, so this acts like access
- MOVQ name+0(FP), SI
- MOVL mode+8(FP), DX
- MOVL $0, R10
- MOVL $SYS_faccessat, AX
- SYSCALL
- MOVL AX, ret+16(FP)
- RET
-
-// int connect(int fd, const struct sockaddr *addr, socklen_t addrlen)
-TEXT runtime·connect(SB),NOSPLIT,$0-28
- MOVL fd+0(FP), DI
- MOVQ addr+8(FP), SI
- MOVL len+16(FP), DX
- MOVL $SYS_connect, AX
- SYSCALL
- MOVL AX, ret+24(FP)
- RET
-
-// int socket(int domain, int type, int protocol)
-TEXT runtime·socket(SB),NOSPLIT,$0-20
- MOVL domain+0(FP), DI
- MOVL typ+4(FP), SI
- MOVL prot+8(FP), DX
- MOVL $SYS_socket, AX
- SYSCALL
- MOVL AX, ret+16(FP)
- RET
-
-// func sbrk0() uintptr
-TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
- // Implemented as brk(NULL).
- MOVQ $0, DI
- MOVL $SYS_brk, AX
- SYSCALL
- MOVQ AX, ret+0(FP)
- RET
diff --git a/contrib/go/_std_1.18/src/runtime/time.go b/contrib/go/_std_1.18/src/runtime/time.go
deleted file mode 100644
index a9ad620776..0000000000
--- a/contrib/go/_std_1.18/src/runtime/time.go
+++ /dev/null
@@ -1,1128 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Time-related runtime and pieces of package time.
-
-package runtime
-
-import (
- "internal/abi"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// Package time knows the layout of this structure.
-// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
-type timer struct {
- // If this timer is on a heap, which P's heap it is on.
- // puintptr rather than *p to match uintptr in the versions
- // of this struct defined in other packages.
- pp puintptr
-
- // Timer wakes up at when, and then at when+period, ... (period > 0 only)
- // each time calling f(arg, now) in the timer goroutine, so f must be
- // a well-behaved function and not block.
- //
- // when must be positive on an active timer.
- when int64
- period int64
- f func(any, uintptr)
- arg any
- seq uintptr
-
- // What to set the when field to in timerModifiedXX status.
- nextwhen int64
-
- // The status field holds one of the values below.
- status uint32
-}
-
-// Code outside this file has to be careful in using a timer value.
-//
-// The pp, status, and nextwhen fields may only be used by code in this file.
-//
-// Code that creates a new timer value can set the when, period, f,
-// arg, and seq fields.
-// A new timer value may be passed to addtimer (called by time.startTimer).
-// After doing that no fields may be touched.
-//
-// An active timer (one that has been passed to addtimer) may be
-// passed to deltimer (time.stopTimer), after which it is no longer an
-// active timer. It is an inactive timer.
-// In an inactive timer the period, f, arg, and seq fields may be modified,
-// but not the when field.
-// It's OK to just drop an inactive timer and let the GC collect it.
-// It's not OK to pass an inactive timer to addtimer.
-// Only newly allocated timer values may be passed to addtimer.
-//
-// An active timer may be passed to modtimer. No fields may be touched.
-// It remains an active timer.
-//
-// An inactive timer may be passed to resettimer to turn into an
-// active timer with an updated when field.
-// It's OK to pass a newly allocated timer value to resettimer.
-//
-// Timer operations are addtimer, deltimer, modtimer, resettimer,
-// cleantimers, adjusttimers, and runtimer.
-//
-// We don't permit calling addtimer/deltimer/modtimer/resettimer simultaneously,
-// but adjusttimers and runtimer can be called at the same time as any of those.
-//
-// Active timers live in heaps attached to P, in the timers field.
-// Inactive timers live there too temporarily, until they are removed.
-//
-// addtimer:
-// timerNoStatus -> timerWaiting
-// anything else -> panic: invalid value
-// deltimer:
-// timerWaiting -> timerModifying -> timerDeleted
-// timerModifiedEarlier -> timerModifying -> timerDeleted
-// timerModifiedLater -> timerModifying -> timerDeleted
-// timerNoStatus -> do nothing
-// timerDeleted -> do nothing
-// timerRemoving -> do nothing
-// timerRemoved -> do nothing
-// timerRunning -> wait until status changes
-// timerMoving -> wait until status changes
-// timerModifying -> wait until status changes
-// modtimer:
-// timerWaiting -> timerModifying -> timerModifiedXX
-// timerModifiedXX -> timerModifying -> timerModifiedYY
-// timerNoStatus -> timerModifying -> timerWaiting
-// timerRemoved -> timerModifying -> timerWaiting
-// timerDeleted -> timerModifying -> timerModifiedXX
-// timerRunning -> wait until status changes
-// timerMoving -> wait until status changes
-// timerRemoving -> wait until status changes
-// timerModifying -> wait until status changes
-// cleantimers (looks in P's timer heap):
-// timerDeleted -> timerRemoving -> timerRemoved
-// timerModifiedXX -> timerMoving -> timerWaiting
-// adjusttimers (looks in P's timer heap):
-// timerDeleted -> timerRemoving -> timerRemoved
-// timerModifiedXX -> timerMoving -> timerWaiting
-// runtimer (looks in P's timer heap):
-// timerNoStatus -> panic: uninitialized timer
-// timerWaiting -> timerWaiting or
-// timerWaiting -> timerRunning -> timerNoStatus or
-// timerWaiting -> timerRunning -> timerWaiting
-// timerModifying -> wait until status changes
-// timerModifiedXX -> timerMoving -> timerWaiting
-// timerDeleted -> timerRemoving -> timerRemoved
-// timerRunning -> panic: concurrent runtimer calls
-// timerRemoved -> panic: inconsistent timer heap
-// timerRemoving -> panic: inconsistent timer heap
-// timerMoving -> panic: inconsistent timer heap
-
-// Values for the timer status field.
-const (
- // Timer has no status set yet.
- timerNoStatus = iota
-
- // Waiting for timer to fire.
- // The timer is in some P's heap.
- timerWaiting
-
- // Running the timer function.
- // A timer will only have this status briefly.
- timerRunning
-
- // The timer is deleted and should be removed.
- // It should not be run, but it is still in some P's heap.
- timerDeleted
-
- // The timer is being removed.
- // The timer will only have this status briefly.
- timerRemoving
-
- // The timer has been stopped.
- // It is not in any P's heap.
- timerRemoved
-
- // The timer is being modified.
- // The timer will only have this status briefly.
- timerModifying
-
- // The timer has been modified to an earlier time.
- // The new when value is in the nextwhen field.
- // The timer is in some P's heap, possibly in the wrong place.
- timerModifiedEarlier
-
- // The timer has been modified to the same or a later time.
- // The new when value is in the nextwhen field.
- // The timer is in some P's heap, possibly in the wrong place.
- timerModifiedLater
-
- // The timer has been modified and is being moved.
- // The timer will only have this status briefly.
- timerMoving
-)
-
-// maxWhen is the maximum value for timer's when field.
-const maxWhen = 1<<63 - 1
-
-// verifyTimers can be set to true to add debugging checks that the
-// timer heaps are valid.
-const verifyTimers = false
-
-// Package time APIs.
-// Godoc uses the comments in package time, not these.
-
-// time.now is implemented in assembly.
-
-// timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
-//go:linkname timeSleep time.Sleep
-func timeSleep(ns int64) {
- if ns <= 0 {
- return
- }
-
- gp := getg()
- t := gp.timer
- if t == nil {
- t = new(timer)
- gp.timer = t
- }
- t.f = goroutineReady
- t.arg = gp
- t.nextwhen = nanotime() + ns
- if t.nextwhen < 0 { // check for overflow.
- t.nextwhen = maxWhen
- }
- gopark(resetForSleep, unsafe.Pointer(t), waitReasonSleep, traceEvGoSleep, 1)
-}
-
-// resetForSleep is called after the goroutine is parked for timeSleep.
-// We can't call resettimer in timeSleep itself because if this is a short
-// sleep and there are many goroutines then the P can wind up running the
-// timer function, goroutineReady, before the goroutine has been parked.
-func resetForSleep(gp *g, ut unsafe.Pointer) bool {
- t := (*timer)(ut)
- resettimer(t, t.nextwhen)
- return true
-}
-
-// startTimer adds t to the timer heap.
-//go:linkname startTimer time.startTimer
-func startTimer(t *timer) {
- if raceenabled {
- racerelease(unsafe.Pointer(t))
- }
- addtimer(t)
-}
-
-// stopTimer stops a timer.
-// It reports whether t was stopped before being run.
-//go:linkname stopTimer time.stopTimer
-func stopTimer(t *timer) bool {
- return deltimer(t)
-}
-
-// resetTimer resets an inactive timer, adding it to the heap.
-//go:linkname resetTimer time.resetTimer
-// Reports whether the timer was modified before it was run.
-func resetTimer(t *timer, when int64) bool {
- if raceenabled {
- racerelease(unsafe.Pointer(t))
- }
- return resettimer(t, when)
-}
-
-// modTimer modifies an existing timer.
-//go:linkname modTimer time.modTimer
-func modTimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq uintptr) {
- modtimer(t, when, period, f, arg, seq)
-}
-
-// Go runtime.
-
-// Ready the goroutine arg.
-func goroutineReady(arg any, seq uintptr) {
- goready(arg.(*g), 0)
-}
-
-// addtimer adds a timer to the current P.
-// This should only be called with a newly created timer.
-// That avoids the risk of changing the when field of a timer in some P's heap,
-// which could cause the heap to become unsorted.
-func addtimer(t *timer) {
- // when must be positive. A negative value will cause runtimer to
- // overflow during its delta calculation and never expire other runtime
- // timers. Zero will cause checkTimers to fail to notice the timer.
- if t.when <= 0 {
- throw("timer when must be positive")
- }
- if t.period < 0 {
- throw("timer period must be non-negative")
- }
- if t.status != timerNoStatus {
- throw("addtimer called with initialized timer")
- }
- t.status = timerWaiting
-
- when := t.when
-
- // Disable preemption while using pp to avoid changing another P's heap.
- mp := acquirem()
-
- pp := getg().m.p.ptr()
- lock(&pp.timersLock)
- cleantimers(pp)
- doaddtimer(pp, t)
- unlock(&pp.timersLock)
-
- wakeNetPoller(when)
-
- releasem(mp)
-}
-
-// doaddtimer adds t to the current P's heap.
-// The caller must have locked the timers for pp.
-func doaddtimer(pp *p, t *timer) {
- // Timers rely on the network poller, so make sure the poller
- // has started.
- if netpollInited == 0 {
- netpollGenericInit()
- }
-
- if t.pp != 0 {
- throw("doaddtimer: P already set in timer")
- }
- t.pp.set(pp)
- i := len(pp.timers)
- pp.timers = append(pp.timers, t)
- siftupTimer(pp.timers, i)
- if t == pp.timers[0] {
- atomic.Store64(&pp.timer0When, uint64(t.when))
- }
- atomic.Xadd(&pp.numTimers, 1)
-}
-
-// deltimer deletes the timer t. It may be on some other P, so we can't
-// actually remove it from the timers heap. We can only mark it as deleted.
-// It will be removed in due course by the P whose heap it is on.
-// Reports whether the timer was removed before it was run.
-func deltimer(t *timer) bool {
- for {
- switch s := atomic.Load(&t.status); s {
- case timerWaiting, timerModifiedLater:
- // Prevent preemption while the timer is in timerModifying.
- // This could lead to a self-deadlock. See #38070.
- mp := acquirem()
- if atomic.Cas(&t.status, s, timerModifying) {
- // Must fetch t.pp before changing status,
- // as cleantimers in another goroutine
- // can clear t.pp of a timerDeleted timer.
- tpp := t.pp.ptr()
- if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
- badTimer()
- }
- releasem(mp)
- atomic.Xadd(&tpp.deletedTimers, 1)
- // Timer was not yet run.
- return true
- } else {
- releasem(mp)
- }
- case timerModifiedEarlier:
- // Prevent preemption while the timer is in timerModifying.
- // This could lead to a self-deadlock. See #38070.
- mp := acquirem()
- if atomic.Cas(&t.status, s, timerModifying) {
- // Must fetch t.pp before setting status
- // to timerDeleted.
- tpp := t.pp.ptr()
- if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
- badTimer()
- }
- releasem(mp)
- atomic.Xadd(&tpp.deletedTimers, 1)
- // Timer was not yet run.
- return true
- } else {
- releasem(mp)
- }
- case timerDeleted, timerRemoving, timerRemoved:
- // Timer was already run.
- return false
- case timerRunning, timerMoving:
- // The timer is being run or moved, by a different P.
- // Wait for it to complete.
- osyield()
- case timerNoStatus:
- // Removing timer that was never added or
- // has already been run. Also see issue 21874.
- return false
- case timerModifying:
- // Simultaneous calls to deltimer and modtimer.
- // Wait for the other call to complete.
- osyield()
- default:
- badTimer()
- }
- }
-}
-
-// dodeltimer removes timer i from the current P's heap.
-// We are locked on the P when this is called.
-// It returns the smallest changed index in pp.timers.
-// The caller must have locked the timers for pp.
-func dodeltimer(pp *p, i int) int {
- if t := pp.timers[i]; t.pp.ptr() != pp {
- throw("dodeltimer: wrong P")
- } else {
- t.pp = 0
- }
- last := len(pp.timers) - 1
- if i != last {
- pp.timers[i] = pp.timers[last]
- }
- pp.timers[last] = nil
- pp.timers = pp.timers[:last]
- smallestChanged := i
- if i != last {
- // Moving to i may have moved the last timer to a new parent,
- // so sift up to preserve the heap guarantee.
- smallestChanged = siftupTimer(pp.timers, i)
- siftdownTimer(pp.timers, i)
- }
- if i == 0 {
- updateTimer0When(pp)
- }
- atomic.Xadd(&pp.numTimers, -1)
- return smallestChanged
-}
-
-// dodeltimer0 removes timer 0 from the current P's heap.
-// We are locked on the P when this is called.
-// It reports whether it saw no problems due to races.
-// The caller must have locked the timers for pp.
-func dodeltimer0(pp *p) {
- if t := pp.timers[0]; t.pp.ptr() != pp {
- throw("dodeltimer0: wrong P")
- } else {
- t.pp = 0
- }
- last := len(pp.timers) - 1
- if last > 0 {
- pp.timers[0] = pp.timers[last]
- }
- pp.timers[last] = nil
- pp.timers = pp.timers[:last]
- if last > 0 {
- siftdownTimer(pp.timers, 0)
- }
- updateTimer0When(pp)
- atomic.Xadd(&pp.numTimers, -1)
-}
-
-// modtimer modifies an existing timer.
-// This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset.
-// Reports whether the timer was modified before it was run.
-func modtimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq uintptr) bool {
- if when <= 0 {
- throw("timer when must be positive")
- }
- if period < 0 {
- throw("timer period must be non-negative")
- }
-
- status := uint32(timerNoStatus)
- wasRemoved := false
- var pending bool
- var mp *m
-loop:
- for {
- switch status = atomic.Load(&t.status); status {
- case timerWaiting, timerModifiedEarlier, timerModifiedLater:
- // Prevent preemption while the timer is in timerModifying.
- // This could lead to a self-deadlock. See #38070.
- mp = acquirem()
- if atomic.Cas(&t.status, status, timerModifying) {
- pending = true // timer not yet run
- break loop
- }
- releasem(mp)
- case timerNoStatus, timerRemoved:
- // Prevent preemption while the timer is in timerModifying.
- // This could lead to a self-deadlock. See #38070.
- mp = acquirem()
-
- // Timer was already run and t is no longer in a heap.
- // Act like addtimer.
- if atomic.Cas(&t.status, status, timerModifying) {
- wasRemoved = true
- pending = false // timer already run or stopped
- break loop
- }
- releasem(mp)
- case timerDeleted:
- // Prevent preemption while the timer is in timerModifying.
- // This could lead to a self-deadlock. See #38070.
- mp = acquirem()
- if atomic.Cas(&t.status, status, timerModifying) {
- atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
- pending = false // timer already stopped
- break loop
- }
- releasem(mp)
- case timerRunning, timerRemoving, timerMoving:
- // The timer is being run or moved, by a different P.
- // Wait for it to complete.
- osyield()
- case timerModifying:
- // Multiple simultaneous calls to modtimer.
- // Wait for the other call to complete.
- osyield()
- default:
- badTimer()
- }
- }
-
- t.period = period
- t.f = f
- t.arg = arg
- t.seq = seq
-
- if wasRemoved {
- t.when = when
- pp := getg().m.p.ptr()
- lock(&pp.timersLock)
- doaddtimer(pp, t)
- unlock(&pp.timersLock)
- if !atomic.Cas(&t.status, timerModifying, timerWaiting) {
- badTimer()
- }
- releasem(mp)
- wakeNetPoller(when)
- } else {
- // The timer is in some other P's heap, so we can't change
- // the when field. If we did, the other P's heap would
- // be out of order. So we put the new when value in the
- // nextwhen field, and let the other P set the when field
- // when it is prepared to resort the heap.
- t.nextwhen = when
-
- newStatus := uint32(timerModifiedLater)
- if when < t.when {
- newStatus = timerModifiedEarlier
- }
-
- tpp := t.pp.ptr()
-
- if newStatus == timerModifiedEarlier {
- updateTimerModifiedEarliest(tpp, when)
- }
-
- // Set the new status of the timer.
- if !atomic.Cas(&t.status, timerModifying, newStatus) {
- badTimer()
- }
- releasem(mp)
-
- // If the new status is earlier, wake up the poller.
- if newStatus == timerModifiedEarlier {
- wakeNetPoller(when)
- }
- }
-
- return pending
-}
-
-// resettimer resets the time when a timer should fire.
-// If used for an inactive timer, the timer will become active.
-// This should be called instead of addtimer if the timer value has been,
-// or may have been, used previously.
-// Reports whether the timer was modified before it was run.
-func resettimer(t *timer, when int64) bool {
- return modtimer(t, when, t.period, t.f, t.arg, t.seq)
-}
-
-// cleantimers cleans up the head of the timer queue. This speeds up
-// programs that create and delete timers; leaving them in the heap
-// slows down addtimer. Reports whether no timer problems were found.
-// The caller must have locked the timers for pp.
-func cleantimers(pp *p) {
- gp := getg()
- for {
- if len(pp.timers) == 0 {
- return
- }
-
- // This loop can theoretically run for a while, and because
- // it is holding timersLock it cannot be preempted.
- // If someone is trying to preempt us, just return.
- // We can clean the timers later.
- if gp.preemptStop {
- return
- }
-
- t := pp.timers[0]
- if t.pp.ptr() != pp {
- throw("cleantimers: bad p")
- }
- switch s := atomic.Load(&t.status); s {
- case timerDeleted:
- if !atomic.Cas(&t.status, s, timerRemoving) {
- continue
- }
- dodeltimer0(pp)
- if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
- badTimer()
- }
- atomic.Xadd(&pp.deletedTimers, -1)
- case timerModifiedEarlier, timerModifiedLater:
- if !atomic.Cas(&t.status, s, timerMoving) {
- continue
- }
- // Now we can change the when field.
- t.when = t.nextwhen
- // Move t to the right position.
- dodeltimer0(pp)
- doaddtimer(pp, t)
- if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- badTimer()
- }
- default:
- // Head of timers does not need adjustment.
- return
- }
- }
-}
-
-// moveTimers moves a slice of timers to pp. The slice has been taken
-// from a different P.
-// This is currently called when the world is stopped, but the caller
-// is expected to have locked the timers for pp.
-func moveTimers(pp *p, timers []*timer) {
- for _, t := range timers {
- loop:
- for {
- switch s := atomic.Load(&t.status); s {
- case timerWaiting:
- if !atomic.Cas(&t.status, s, timerMoving) {
- continue
- }
- t.pp = 0
- doaddtimer(pp, t)
- if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- badTimer()
- }
- break loop
- case timerModifiedEarlier, timerModifiedLater:
- if !atomic.Cas(&t.status, s, timerMoving) {
- continue
- }
- t.when = t.nextwhen
- t.pp = 0
- doaddtimer(pp, t)
- if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- badTimer()
- }
- break loop
- case timerDeleted:
- if !atomic.Cas(&t.status, s, timerRemoved) {
- continue
- }
- t.pp = 0
- // We no longer need this timer in the heap.
- break loop
- case timerModifying:
- // Loop until the modification is complete.
- osyield()
- case timerNoStatus, timerRemoved:
- // We should not see these status values in a timers heap.
- badTimer()
- case timerRunning, timerRemoving, timerMoving:
- // Some other P thinks it owns this timer,
- // which should not happen.
- badTimer()
- default:
- badTimer()
- }
- }
- }
-}
-
-// adjusttimers looks through the timers in the current P's heap for
-// any timers that have been modified to run earlier, and puts them in
-// the correct place in the heap. While looking for those timers,
-// it also moves timers that have been modified to run later,
-// and removes deleted timers. The caller must have locked the timers for pp.
-func adjusttimers(pp *p, now int64) {
- // If we haven't yet reached the time of the first timerModifiedEarlier
- // timer, don't do anything. This speeds up programs that adjust
- // a lot of timers back and forth if the timers rarely expire.
- // We'll postpone looking through all the adjusted timers until
- // one would actually expire.
- first := atomic.Load64(&pp.timerModifiedEarliest)
- if first == 0 || int64(first) > now {
- if verifyTimers {
- verifyTimerHeap(pp)
- }
- return
- }
-
- // We are going to clear all timerModifiedEarlier timers.
- atomic.Store64(&pp.timerModifiedEarliest, 0)
-
- var moved []*timer
- for i := 0; i < len(pp.timers); i++ {
- t := pp.timers[i]
- if t.pp.ptr() != pp {
- throw("adjusttimers: bad p")
- }
- switch s := atomic.Load(&t.status); s {
- case timerDeleted:
- if atomic.Cas(&t.status, s, timerRemoving) {
- changed := dodeltimer(pp, i)
- if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
- badTimer()
- }
- atomic.Xadd(&pp.deletedTimers, -1)
- // Go back to the earliest changed heap entry.
- // "- 1" because the loop will add 1.
- i = changed - 1
- }
- case timerModifiedEarlier, timerModifiedLater:
- if atomic.Cas(&t.status, s, timerMoving) {
- // Now we can change the when field.
- t.when = t.nextwhen
- // Take t off the heap, and hold onto it.
- // We don't add it back yet because the
- // heap manipulation could cause our
- // loop to skip some other timer.
- changed := dodeltimer(pp, i)
- moved = append(moved, t)
- // Go back to the earliest changed heap entry.
- // "- 1" because the loop will add 1.
- i = changed - 1
- }
- case timerNoStatus, timerRunning, timerRemoving, timerRemoved, timerMoving:
- badTimer()
- case timerWaiting:
- // OK, nothing to do.
- case timerModifying:
- // Check again after modification is complete.
- osyield()
- i--
- default:
- badTimer()
- }
- }
-
- if len(moved) > 0 {
- addAdjustedTimers(pp, moved)
- }
-
- if verifyTimers {
- verifyTimerHeap(pp)
- }
-}
-
-// addAdjustedTimers adds any timers we adjusted in adjusttimers
-// back to the timer heap.
-func addAdjustedTimers(pp *p, moved []*timer) {
- for _, t := range moved {
- doaddtimer(pp, t)
- if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- badTimer()
- }
- }
-}
-
-// nobarrierWakeTime looks at P's timers and returns the time when we
-// should wake up the netpoller. It returns 0 if there are no timers.
-// This function is invoked when dropping a P, and must run without
-// any write barriers.
-//go:nowritebarrierrec
-func nobarrierWakeTime(pp *p) int64 {
- next := int64(atomic.Load64(&pp.timer0When))
- nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
- if next == 0 || (nextAdj != 0 && nextAdj < next) {
- next = nextAdj
- }
- return next
-}
-
-// runtimer examines the first timer in timers. If it is ready based on now,
-// it runs the timer and removes or updates it.
-// Returns 0 if it ran a timer, -1 if there are no more timers, or the time
-// when the first timer should run.
-// The caller must have locked the timers for pp.
-// If a timer is run, this will temporarily unlock the timers.
-//go:systemstack
-func runtimer(pp *p, now int64) int64 {
- for {
- t := pp.timers[0]
- if t.pp.ptr() != pp {
- throw("runtimer: bad p")
- }
- switch s := atomic.Load(&t.status); s {
- case timerWaiting:
- if t.when > now {
- // Not ready to run.
- return t.when
- }
-
- if !atomic.Cas(&t.status, s, timerRunning) {
- continue
- }
- // Note that runOneTimer may temporarily unlock
- // pp.timersLock.
- runOneTimer(pp, t, now)
- return 0
-
- case timerDeleted:
- if !atomic.Cas(&t.status, s, timerRemoving) {
- continue
- }
- dodeltimer0(pp)
- if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
- badTimer()
- }
- atomic.Xadd(&pp.deletedTimers, -1)
- if len(pp.timers) == 0 {
- return -1
- }
-
- case timerModifiedEarlier, timerModifiedLater:
- if !atomic.Cas(&t.status, s, timerMoving) {
- continue
- }
- t.when = t.nextwhen
- dodeltimer0(pp)
- doaddtimer(pp, t)
- if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- badTimer()
- }
-
- case timerModifying:
- // Wait for modification to complete.
- osyield()
-
- case timerNoStatus, timerRemoved:
- // Should not see a new or inactive timer on the heap.
- badTimer()
- case timerRunning, timerRemoving, timerMoving:
- // These should only be set when timers are locked,
- // and we didn't do it.
- badTimer()
- default:
- badTimer()
- }
- }
-}
-
-// runOneTimer runs a single timer.
-// The caller must have locked the timers for pp.
-// This will temporarily unlock the timers while running the timer function.
-//go:systemstack
-func runOneTimer(pp *p, t *timer, now int64) {
- if raceenabled {
- ppcur := getg().m.p.ptr()
- if ppcur.timerRaceCtx == 0 {
- ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum)
- }
- raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t))
- }
-
- f := t.f
- arg := t.arg
- seq := t.seq
-
- if t.period > 0 {
- // Leave in heap but adjust next time to fire.
- delta := t.when - now
- t.when += t.period * (1 + -delta/t.period)
- if t.when < 0 { // check for overflow.
- t.when = maxWhen
- }
- siftdownTimer(pp.timers, 0)
- if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
- badTimer()
- }
- updateTimer0When(pp)
- } else {
- // Remove from heap.
- dodeltimer0(pp)
- if !atomic.Cas(&t.status, timerRunning, timerNoStatus) {
- badTimer()
- }
- }
-
- if raceenabled {
- // Temporarily use the current P's racectx for g0.
- gp := getg()
- if gp.racectx != 0 {
- throw("runOneTimer: unexpected racectx")
- }
- gp.racectx = gp.m.p.ptr().timerRaceCtx
- }
-
- unlock(&pp.timersLock)
-
- f(arg, seq)
-
- lock(&pp.timersLock)
-
- if raceenabled {
- gp := getg()
- gp.racectx = 0
- }
-}
-
-// clearDeletedTimers removes all deleted timers from the P's timer heap.
-// This is used to avoid clogging up the heap if the program
-// starts a lot of long-running timers and then stops them.
-// For example, this can happen via context.WithTimeout.
-//
-// This is the only function that walks through the entire timer heap,
-// other than moveTimers which only runs when the world is stopped.
-//
-// The caller must have locked the timers for pp.
-func clearDeletedTimers(pp *p) {
- // We are going to clear all timerModifiedEarlier timers.
- // Do this now in case new ones show up while we are looping.
- atomic.Store64(&pp.timerModifiedEarliest, 0)
-
- cdel := int32(0)
- to := 0
- changedHeap := false
- timers := pp.timers
-nextTimer:
- for _, t := range timers {
- for {
- switch s := atomic.Load(&t.status); s {
- case timerWaiting:
- if changedHeap {
- timers[to] = t
- siftupTimer(timers, to)
- }
- to++
- continue nextTimer
- case timerModifiedEarlier, timerModifiedLater:
- if atomic.Cas(&t.status, s, timerMoving) {
- t.when = t.nextwhen
- timers[to] = t
- siftupTimer(timers, to)
- to++
- changedHeap = true
- if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- badTimer()
- }
- continue nextTimer
- }
- case timerDeleted:
- if atomic.Cas(&t.status, s, timerRemoving) {
- t.pp = 0
- cdel++
- if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
- badTimer()
- }
- changedHeap = true
- continue nextTimer
- }
- case timerModifying:
- // Loop until modification complete.
- osyield()
- case timerNoStatus, timerRemoved:
- // We should not see these status values in a timer heap.
- badTimer()
- case timerRunning, timerRemoving, timerMoving:
- // Some other P thinks it owns this timer,
- // which should not happen.
- badTimer()
- default:
- badTimer()
- }
- }
- }
-
- // Set remaining slots in timers slice to nil,
- // so that the timer values can be garbage collected.
- for i := to; i < len(timers); i++ {
- timers[i] = nil
- }
-
- atomic.Xadd(&pp.deletedTimers, -cdel)
- atomic.Xadd(&pp.numTimers, -cdel)
-
- timers = timers[:to]
- pp.timers = timers
- updateTimer0When(pp)
-
- if verifyTimers {
- verifyTimerHeap(pp)
- }
-}
-
-// verifyTimerHeap verifies that the timer heap is in a valid state.
-// This is only for debugging, and is only called if verifyTimers is true.
-// The caller must have locked the timers.
-func verifyTimerHeap(pp *p) {
- for i, t := range pp.timers {
- if i == 0 {
- // First timer has no parent.
- continue
- }
-
- // The heap is 4-ary. See siftupTimer and siftdownTimer.
- p := (i - 1) / 4
- if t.when < pp.timers[p].when {
- print("bad timer heap at ", i, ": ", p, ": ", pp.timers[p].when, ", ", i, ": ", t.when, "\n")
- throw("bad timer heap")
- }
- }
- if numTimers := int(atomic.Load(&pp.numTimers)); len(pp.timers) != numTimers {
- println("timer heap len", len(pp.timers), "!= numTimers", numTimers)
- throw("bad timer heap len")
- }
-}
-
-// updateTimer0When sets the P's timer0When field.
-// The caller must have locked the timers for pp.
-func updateTimer0When(pp *p) {
- if len(pp.timers) == 0 {
- atomic.Store64(&pp.timer0When, 0)
- } else {
- atomic.Store64(&pp.timer0When, uint64(pp.timers[0].when))
- }
-}
-
-// updateTimerModifiedEarliest updates the recorded nextwhen field of the
-// earlier timerModifiedEarier value.
-// The timers for pp will not be locked.
-func updateTimerModifiedEarliest(pp *p, nextwhen int64) {
- for {
- old := atomic.Load64(&pp.timerModifiedEarliest)
- if old != 0 && int64(old) < nextwhen {
- return
- }
- if atomic.Cas64(&pp.timerModifiedEarliest, old, uint64(nextwhen)) {
- return
- }
- }
-}
-
-// timeSleepUntil returns the time when the next timer should fire,
-// and the P that holds the timer heap that that timer is on.
-// This is only called by sysmon and checkdead.
-func timeSleepUntil() (int64, *p) {
- next := int64(maxWhen)
- var pret *p
-
- // Prevent allp slice changes. This is like retake.
- lock(&allpLock)
- for _, pp := range allp {
- if pp == nil {
- // This can happen if procresize has grown
- // allp but not yet created new Ps.
- continue
- }
-
- w := int64(atomic.Load64(&pp.timer0When))
- if w != 0 && w < next {
- next = w
- pret = pp
- }
-
- w = int64(atomic.Load64(&pp.timerModifiedEarliest))
- if w != 0 && w < next {
- next = w
- pret = pp
- }
- }
- unlock(&allpLock)
-
- return next, pret
-}
-
-// Heap maintenance algorithms.
-// These algorithms check for slice index errors manually.
-// Slice index error can happen if the program is using racy
-// access to timers. We don't want to panic here, because
-// it will cause the program to crash with a mysterious
-// "panic holding locks" message. Instead, we panic while not
-// holding a lock.
-
-// siftupTimer puts the timer at position i in the right place
-// in the heap by moving it up toward the top of the heap.
-// It returns the smallest changed index.
-func siftupTimer(t []*timer, i int) int {
- if i >= len(t) {
- badTimer()
- }
- when := t[i].when
- if when <= 0 {
- badTimer()
- }
- tmp := t[i]
- for i > 0 {
- p := (i - 1) / 4 // parent
- if when >= t[p].when {
- break
- }
- t[i] = t[p]
- i = p
- }
- if tmp != t[i] {
- t[i] = tmp
- }
- return i
-}
-
-// siftdownTimer puts the timer at position i in the right place
-// in the heap by moving it down toward the bottom of the heap.
-func siftdownTimer(t []*timer, i int) {
- n := len(t)
- if i >= n {
- badTimer()
- }
- when := t[i].when
- if when <= 0 {
- badTimer()
- }
- tmp := t[i]
- for {
- c := i*4 + 1 // left child
- c3 := c + 2 // mid child
- if c >= n {
- break
- }
- w := t[c].when
- if c+1 < n && t[c+1].when < w {
- w = t[c+1].when
- c++
- }
- if c3 < n {
- w3 := t[c3].when
- if c3+1 < n && t[c3+1].when < w3 {
- w3 = t[c3+1].when
- c3++
- }
- if w3 < w {
- w = w3
- c = c3
- }
- }
- if w >= when {
- break
- }
- t[i] = t[c]
- i = c
- }
- if tmp != t[i] {
- t[i] = tmp
- }
-}
-
-// badTimer is called if the timer data structures have been corrupted,
-// presumably due to racy use by the program. We panic here rather than
-// panicing due to invalid slice access while holding locks.
-// See issue #25686.
-func badTimer() {
- throw("timer data corruption")
-}
diff --git a/contrib/go/_std_1.18/src/runtime/trace.go b/contrib/go/_std_1.18/src/runtime/trace.go
deleted file mode 100644
index 8f60de2b05..0000000000
--- a/contrib/go/_std_1.18/src/runtime/trace.go
+++ /dev/null
@@ -1,1260 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Go execution tracer.
-// The tracer captures a wide range of execution events like goroutine
-// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
-// changes of heap size, processor start/stop, etc and writes them to a buffer
-// in a compact form. A precise nanosecond-precision timestamp and a stack
-// trace is captured for most events.
-// See https://golang.org/s/go15trace for more info.
-
-package runtime
-
-import (
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// Event types in the trace, args are given in square brackets.
-const (
- traceEvNone = 0 // unused
- traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
- traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
- traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
- traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
- traceEvProcStart = 5 // start of P [timestamp, thread id]
- traceEvProcStop = 6 // stop of P [timestamp]
- traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
- traceEvGCDone = 8 // GC done [timestamp]
- traceEvGCSTWStart = 9 // GC STW start [timestamp, kind]
- traceEvGCSTWDone = 10 // GC STW done [timestamp]
- traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
- traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
- traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
- traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
- traceEvGoEnd = 15 // goroutine ends [timestamp]
- traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
- traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
- traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
- traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
- traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
- traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
- traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
- traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
- traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
- traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
- traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
- traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
- traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
- traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
- traceEvGoSysBlock = 30 // syscall blocks [timestamp]
- traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
- traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
- traceEvHeapAlloc = 33 // gcController.heapLive change [timestamp, heap_alloc]
- traceEvHeapGoal = 34 // gcController.heapGoal (formerly next_gc) change [timestamp, heap goal in bytes]
- traceEvTimerGoroutine = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
- traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
- traceEvString = 37 // string dictionary entry [ID, length, string]
- traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
- traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
- traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
- traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
- traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
- traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
- traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
- traceEvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
- traceEvUserTaskEnd = 46 // end of a task [timestamp, internal task id, stack]
- traceEvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
- traceEvUserLog = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
- traceEvCount = 49
- // Byte is used but only 6 bits are available for event type.
- // The remaining 2 bits are used to specify the number of arguments.
- // That means, the max event type value is 63.
-)
-
-const (
- // Timestamps in trace are cputicks/traceTickDiv.
- // This makes absolute values of timestamp diffs smaller,
- // and so they are encoded in less number of bytes.
- // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
- // The suggested increment frequency for PowerPC's time base register is
- // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
- // and ppc64le.
- // Tracing won't work reliably for architectures where cputicks is emulated
- // by nanotime, so the value doesn't matter for those architectures.
- traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
- // Maximum number of PCs in a single stack trace.
- // Since events contain only stack id rather than whole stack trace,
- // we can allow quite large values here.
- traceStackSize = 128
- // Identifier of a fake P that is used when we trace without a real P.
- traceGlobProc = -1
- // Maximum number of bytes to encode uint64 in base-128.
- traceBytesPerNumber = 10
- // Shift of the number of arguments in the first event byte.
- traceArgCountShift = 6
- // Flag passed to traceGoPark to denote that the previous wakeup of this
- // goroutine was futile. For example, a goroutine was unblocked on a mutex,
- // but another goroutine got ahead and acquired the mutex before the first
- // goroutine is scheduled, so the first goroutine has to block again.
- // Such wakeups happen on buffered channels and sync.Mutex,
- // but are generally not interesting for end user.
- traceFutileWakeup byte = 128
-)
-
-// trace is global tracing context.
-var trace struct {
- lock mutex // protects the following members
- lockOwner *g // to avoid deadlocks during recursive lock locks
- enabled bool // when set runtime traces events
- shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false
- headerWritten bool // whether ReadTrace has emitted trace header
- footerWritten bool // whether ReadTrace has emitted trace footer
- shutdownSema uint32 // used to wait for ReadTrace completion
- seqStart uint64 // sequence number when tracing was started
- ticksStart int64 // cputicks when tracing was started
- ticksEnd int64 // cputicks when tracing was stopped
- timeStart int64 // nanotime when tracing was started
- timeEnd int64 // nanotime when tracing was stopped
- seqGC uint64 // GC start/done sequencer
- reading traceBufPtr // buffer currently handed off to user
- empty traceBufPtr // stack of empty buffers
- fullHead traceBufPtr // queue of full buffers
- fullTail traceBufPtr
- reader guintptr // goroutine that called ReadTrace, or nil
- stackTab traceStackTable // maps stack traces to unique ids
-
- // Dictionary for traceEvString.
- //
- // TODO: central lock to access the map is not ideal.
- // option: pre-assign ids to all user annotation region names and tags
- // option: per-P cache
- // option: sync.Map like data structure
- stringsLock mutex
- strings map[string]uint64
- stringSeq uint64
-
- // markWorkerLabels maps gcMarkWorkerMode to string ID.
- markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
-
- bufLock mutex // protects buf
- buf traceBufPtr // global trace buffer, used when running without a p
-}
-
-// traceBufHeader is per-P tracing buffer.
-type traceBufHeader struct {
- link traceBufPtr // in trace.empty/full
- lastTicks uint64 // when we wrote the last event
- pos int // next write offset in arr
- stk [traceStackSize]uintptr // scratch buffer for traceback
-}
-
-// traceBuf is per-P tracing buffer.
-//
-//go:notinheap
-type traceBuf struct {
- traceBufHeader
- arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
-}
-
-// traceBufPtr is a *traceBuf that is not traced by the garbage
-// collector and doesn't have write barriers. traceBufs are not
-// allocated from the GC'd heap, so this is safe, and are often
-// manipulated in contexts where write barriers are not allowed, so
-// this is necessary.
-//
-// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
-type traceBufPtr uintptr
-
-func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
-func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
-func traceBufPtrOf(b *traceBuf) traceBufPtr {
- return traceBufPtr(unsafe.Pointer(b))
-}
-
-// StartTrace enables tracing for the current process.
-// While tracing, the data will be buffered and available via ReadTrace.
-// StartTrace returns an error if tracing is already enabled.
-// Most clients should use the runtime/trace package or the testing package's
-// -test.trace flag instead of calling StartTrace directly.
-func StartTrace() error {
- // Stop the world so that we can take a consistent snapshot
- // of all goroutines at the beginning of the trace.
- // Do not stop the world during GC so we ensure we always see
- // a consistent view of GC-related events (e.g. a start is always
- // paired with an end).
- stopTheWorldGC("start tracing")
-
- // Prevent sysmon from running any code that could generate events.
- lock(&sched.sysmonlock)
-
- // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
- // Exitsyscall could check trace.enabled long before and then suddenly wake up
- // and decide to write to trace at a random point in time.
- // However, such syscall will use the global trace.buf buffer, because we've
- // acquired all p's by doing stop-the-world. So this protects us from such races.
- lock(&trace.bufLock)
-
- if trace.enabled || trace.shutdown {
- unlock(&trace.bufLock)
- unlock(&sched.sysmonlock)
- startTheWorldGC()
- return errorString("tracing is already enabled")
- }
-
- // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
- // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
- // That would lead to an inconsistent trace:
- // - either GoSysExit appears before EvGoInSyscall,
- // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
- // To instruct traceEvent that it must not ignore events below, we set startingtrace.
- // trace.enabled is set afterwards once we have emitted all preliminary events.
- _g_ := getg()
- _g_.m.startingtrace = true
-
- // Obtain current stack ID to use in all traceEvGoCreate events below.
- mp := acquirem()
- stkBuf := make([]uintptr, traceStackSize)
- stackID := traceStackID(mp, stkBuf, 2)
- releasem(mp)
-
- // World is stopped, no need to lock.
- forEachGRace(func(gp *g) {
- status := readgstatus(gp)
- if status != _Gdead {
- gp.traceseq = 0
- gp.tracelastp = getg().m.p
- // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
- traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
- }
- if status == _Gwaiting {
- // traceEvGoWaiting is implied to have seq=1.
- gp.traceseq++
- traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
- }
- if status == _Gsyscall {
- gp.traceseq++
- traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
- } else {
- gp.sysblocktraced = false
- }
- })
- traceProcStart()
- traceGoStart()
- // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
- // If we do it the other way around, it is possible that exitsyscall will
- // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
- // It will lead to a false conclusion that cputicks is broken.
- trace.ticksStart = cputicks()
- trace.timeStart = nanotime()
- trace.headerWritten = false
- trace.footerWritten = false
-
- // string to id mapping
- // 0 : reserved for an empty string
- // remaining: other strings registered by traceString
- trace.stringSeq = 0
- trace.strings = make(map[string]uint64)
-
- trace.seqGC = 0
- _g_.m.startingtrace = false
- trace.enabled = true
-
- // Register runtime goroutine labels.
- _, pid, bufp := traceAcquireBuffer()
- for i, label := range gcMarkWorkerModeStrings[:] {
- trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
- }
- traceReleaseBuffer(pid)
-
- unlock(&trace.bufLock)
-
- unlock(&sched.sysmonlock)
-
- startTheWorldGC()
- return nil
-}
-
-// StopTrace stops tracing, if it was previously enabled.
-// StopTrace only returns after all the reads for the trace have completed.
-func StopTrace() {
- // Stop the world so that we can collect the trace buffers from all p's below,
- // and also to avoid races with traceEvent.
- stopTheWorldGC("stop tracing")
-
- // See the comment in StartTrace.
- lock(&sched.sysmonlock)
-
- // See the comment in StartTrace.
- lock(&trace.bufLock)
-
- if !trace.enabled {
- unlock(&trace.bufLock)
- unlock(&sched.sysmonlock)
- startTheWorldGC()
- return
- }
-
- traceGoSched()
-
- // Loop over all allocated Ps because dead Ps may still have
- // trace buffers.
- for _, p := range allp[:cap(allp)] {
- buf := p.tracebuf
- if buf != 0 {
- traceFullQueue(buf)
- p.tracebuf = 0
- }
- }
- if trace.buf != 0 {
- buf := trace.buf
- trace.buf = 0
- if buf.ptr().pos != 0 {
- traceFullQueue(buf)
- }
- }
-
- for {
- trace.ticksEnd = cputicks()
- trace.timeEnd = nanotime()
- // Windows time can tick only every 15ms, wait for at least one tick.
- if trace.timeEnd != trace.timeStart {
- break
- }
- osyield()
- }
-
- trace.enabled = false
- trace.shutdown = true
- unlock(&trace.bufLock)
-
- unlock(&sched.sysmonlock)
-
- startTheWorldGC()
-
- // The world is started but we've set trace.shutdown, so new tracing can't start.
- // Wait for the trace reader to flush pending buffers and stop.
- semacquire(&trace.shutdownSema)
- if raceenabled {
- raceacquire(unsafe.Pointer(&trace.shutdownSema))
- }
-
- // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
- lock(&trace.lock)
- for _, p := range allp[:cap(allp)] {
- if p.tracebuf != 0 {
- throw("trace: non-empty trace buffer in proc")
- }
- }
- if trace.buf != 0 {
- throw("trace: non-empty global trace buffer")
- }
- if trace.fullHead != 0 || trace.fullTail != 0 {
- throw("trace: non-empty full trace buffer")
- }
- if trace.reading != 0 || trace.reader != 0 {
- throw("trace: reading after shutdown")
- }
- for trace.empty != 0 {
- buf := trace.empty
- trace.empty = buf.ptr().link
- sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
- }
- trace.strings = nil
- trace.shutdown = false
- unlock(&trace.lock)
-}
-
-// ReadTrace returns the next chunk of binary tracing data, blocking until data
-// is available. If tracing is turned off and all the data accumulated while it
-// was on has been returned, ReadTrace returns nil. The caller must copy the
-// returned data before calling ReadTrace again.
-// ReadTrace must be called from one goroutine at a time.
-func ReadTrace() []byte {
- // This function may need to lock trace.lock recursively
- // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
- // To allow this we use trace.lockOwner.
- // Also this function must not allocate while holding trace.lock:
- // allocation can call heap allocate, which will try to emit a trace
- // event while holding heap lock.
- lock(&trace.lock)
- trace.lockOwner = getg()
-
- if trace.reader != 0 {
- // More than one goroutine reads trace. This is bad.
- // But we rather do not crash the program because of tracing,
- // because tracing can be enabled at runtime on prod servers.
- trace.lockOwner = nil
- unlock(&trace.lock)
- println("runtime: ReadTrace called from multiple goroutines simultaneously")
- return nil
- }
- // Recycle the old buffer.
- if buf := trace.reading; buf != 0 {
- buf.ptr().link = trace.empty
- trace.empty = buf
- trace.reading = 0
- }
- // Write trace header.
- if !trace.headerWritten {
- trace.headerWritten = true
- trace.lockOwner = nil
- unlock(&trace.lock)
- return []byte("go 1.11 trace\x00\x00\x00")
- }
- // Wait for new data.
- if trace.fullHead == 0 && !trace.shutdown {
- trace.reader.set(getg())
- goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
- lock(&trace.lock)
- }
- // Write a buffer.
- if trace.fullHead != 0 {
- buf := traceFullDequeue()
- trace.reading = buf
- trace.lockOwner = nil
- unlock(&trace.lock)
- return buf.ptr().arr[:buf.ptr().pos]
- }
- // Write footer with timer frequency.
- if !trace.footerWritten {
- trace.footerWritten = true
- // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
- freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
- if freq <= 0 {
- throw("trace: ReadTrace got invalid frequency")
- }
- trace.lockOwner = nil
- unlock(&trace.lock)
- var data []byte
- data = append(data, traceEvFrequency|0<<traceArgCountShift)
- data = traceAppend(data, uint64(freq))
- // This will emit a bunch of full buffers, we will pick them up
- // on the next iteration.
- trace.stackTab.dump()
- return data
- }
- // Done.
- if trace.shutdown {
- trace.lockOwner = nil
- unlock(&trace.lock)
- if raceenabled {
- // Model synchronization on trace.shutdownSema, which race
- // detector does not see. This is required to avoid false
- // race reports on writer passed to trace.Start.
- racerelease(unsafe.Pointer(&trace.shutdownSema))
- }
- // trace.enabled is already reset, so can call traceable functions.
- semrelease(&trace.shutdownSema)
- return nil
- }
- // Also bad, but see the comment above.
- trace.lockOwner = nil
- unlock(&trace.lock)
- println("runtime: spurious wakeup of trace reader")
- return nil
-}
-
-// traceReader returns the trace reader that should be woken up, if any.
-func traceReader() *g {
- if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
- return nil
- }
- lock(&trace.lock)
- if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
- unlock(&trace.lock)
- return nil
- }
- gp := trace.reader.ptr()
- trace.reader.set(nil)
- unlock(&trace.lock)
- return gp
-}
-
-// traceProcFree frees trace buffer associated with pp.
-func traceProcFree(pp *p) {
- buf := pp.tracebuf
- pp.tracebuf = 0
- if buf == 0 {
- return
- }
- lock(&trace.lock)
- traceFullQueue(buf)
- unlock(&trace.lock)
-}
-
-// traceFullQueue queues buf into queue of full buffers.
-func traceFullQueue(buf traceBufPtr) {
- buf.ptr().link = 0
- if trace.fullHead == 0 {
- trace.fullHead = buf
- } else {
- trace.fullTail.ptr().link = buf
- }
- trace.fullTail = buf
-}
-
-// traceFullDequeue dequeues from queue of full buffers.
-func traceFullDequeue() traceBufPtr {
- buf := trace.fullHead
- if buf == 0 {
- return 0
- }
- trace.fullHead = buf.ptr().link
- if trace.fullHead == 0 {
- trace.fullTail = 0
- }
- buf.ptr().link = 0
- return buf
-}
-
-// traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
-// ev is event type.
-// If skip > 0, write current stack id as the last argument (skipping skip top frames).
-// If skip = 0, this event type should contain a stack, but we don't want
-// to collect and remember it for this particular call.
-func traceEvent(ev byte, skip int, args ...uint64) {
- mp, pid, bufp := traceAcquireBuffer()
- // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
- // This protects from races between traceEvent and StartTrace/StopTrace.
-
- // The caller checked that trace.enabled == true, but trace.enabled might have been
- // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
- // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
- // so if we see trace.enabled == true now, we know it's true for the rest of the function.
- // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
- // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
- //
- // Note trace_userTaskCreate runs the same check.
- if !trace.enabled && !mp.startingtrace {
- traceReleaseBuffer(pid)
- return
- }
-
- if skip > 0 {
- if getg() == mp.curg {
- skip++ // +1 because stack is captured in traceEventLocked.
- }
- }
- traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
- traceReleaseBuffer(pid)
-}
-
-func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
- buf := bufp.ptr()
- // TODO: test on non-zero extraBytes param.
- maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
- if buf == nil || len(buf.arr)-buf.pos < maxSize {
- buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
- bufp.set(buf)
- }
-
- // NOTE: ticks might be same after tick division, although the real cputicks is
- // linear growth.
- ticks := uint64(cputicks()) / traceTickDiv
- tickDiff := ticks - buf.lastTicks
- if tickDiff == 0 {
- ticks = buf.lastTicks + 1
- tickDiff = 1
- }
-
- buf.lastTicks = ticks
- narg := byte(len(args))
- if skip >= 0 {
- narg++
- }
- // We have only 2 bits for number of arguments.
- // If number is >= 3, then the event type is followed by event length in bytes.
- if narg > 3 {
- narg = 3
- }
- startPos := buf.pos
- buf.byte(ev | narg<<traceArgCountShift)
- var lenp *byte
- if narg == 3 {
- // Reserve the byte for length assuming that length < 128.
- buf.varint(0)
- lenp = &buf.arr[buf.pos-1]
- }
- buf.varint(tickDiff)
- for _, a := range args {
- buf.varint(a)
- }
- if skip == 0 {
- buf.varint(0)
- } else if skip > 0 {
- buf.varint(traceStackID(mp, buf.stk[:], skip))
- }
- evSize := buf.pos - startPos
- if evSize > maxSize {
- throw("invalid length of trace event")
- }
- if lenp != nil {
- // Fill in actual length.
- *lenp = byte(evSize - 2)
- }
-}
-
-func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
- _g_ := getg()
- gp := mp.curg
- var nstk int
- if gp == _g_ {
- nstk = callers(skip+1, buf)
- } else if gp != nil {
- gp = mp.curg
- nstk = gcallers(gp, skip, buf)
- }
- if nstk > 0 {
- nstk-- // skip runtime.goexit
- }
- if nstk > 0 && gp.goid == 1 {
- nstk-- // skip runtime.main
- }
- id := trace.stackTab.put(buf[:nstk])
- return uint64(id)
-}
-
-// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
-func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
- mp = acquirem()
- if p := mp.p.ptr(); p != nil {
- return mp, p.id, &p.tracebuf
- }
- lock(&trace.bufLock)
- return mp, traceGlobProc, &trace.buf
-}
-
-// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
-func traceReleaseBuffer(pid int32) {
- if pid == traceGlobProc {
- unlock(&trace.bufLock)
- }
- releasem(getg().m)
-}
-
-// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
-func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
- owner := trace.lockOwner
- dolock := owner == nil || owner != getg().m.curg
- if dolock {
- lock(&trace.lock)
- }
- if buf != 0 {
- traceFullQueue(buf)
- }
- if trace.empty != 0 {
- buf = trace.empty
- trace.empty = buf.ptr().link
- } else {
- buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
- if buf == 0 {
- throw("trace: out of memory")
- }
- }
- bufp := buf.ptr()
- bufp.link.set(nil)
- bufp.pos = 0
-
- // initialize the buffer for a new batch
- ticks := uint64(cputicks()) / traceTickDiv
- if ticks == bufp.lastTicks {
- ticks = bufp.lastTicks + 1
- }
- bufp.lastTicks = ticks
- bufp.byte(traceEvBatch | 1<<traceArgCountShift)
- bufp.varint(uint64(pid))
- bufp.varint(ticks)
-
- if dolock {
- unlock(&trace.lock)
- }
- return buf
-}
-
-// traceString adds a string to the trace.strings and returns the id.
-func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
- if s == "" {
- return 0, bufp
- }
-
- lock(&trace.stringsLock)
- if raceenabled {
- // raceacquire is necessary because the map access
- // below is race annotated.
- raceacquire(unsafe.Pointer(&trace.stringsLock))
- }
-
- if id, ok := trace.strings[s]; ok {
- if raceenabled {
- racerelease(unsafe.Pointer(&trace.stringsLock))
- }
- unlock(&trace.stringsLock)
-
- return id, bufp
- }
-
- trace.stringSeq++
- id := trace.stringSeq
- trace.strings[s] = id
-
- if raceenabled {
- racerelease(unsafe.Pointer(&trace.stringsLock))
- }
- unlock(&trace.stringsLock)
-
- // memory allocation in above may trigger tracing and
- // cause *bufp changes. Following code now works with *bufp,
- // so there must be no memory allocation or any activities
- // that causes tracing after this point.
-
- buf := bufp.ptr()
- size := 1 + 2*traceBytesPerNumber + len(s)
- if buf == nil || len(buf.arr)-buf.pos < size {
- buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
- bufp.set(buf)
- }
- buf.byte(traceEvString)
- buf.varint(id)
-
- // double-check the string and the length can fit.
- // Otherwise, truncate the string.
- slen := len(s)
- if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
- slen = room
- }
-
- buf.varint(uint64(slen))
- buf.pos += copy(buf.arr[buf.pos:], s[:slen])
-
- bufp.set(buf)
- return id, bufp
-}
-
-// traceAppend appends v to buf in little-endian-base-128 encoding.
-func traceAppend(buf []byte, v uint64) []byte {
- for ; v >= 0x80; v >>= 7 {
- buf = append(buf, 0x80|byte(v))
- }
- buf = append(buf, byte(v))
- return buf
-}
-
-// varint appends v to buf in little-endian-base-128 encoding.
-func (buf *traceBuf) varint(v uint64) {
- pos := buf.pos
- for ; v >= 0x80; v >>= 7 {
- buf.arr[pos] = 0x80 | byte(v)
- pos++
- }
- buf.arr[pos] = byte(v)
- pos++
- buf.pos = pos
-}
-
-// byte appends v to buf.
-func (buf *traceBuf) byte(v byte) {
- buf.arr[buf.pos] = v
- buf.pos++
-}
-
-// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
-// It is lock-free for reading.
-type traceStackTable struct {
- lock mutex
- seq uint32
- mem traceAlloc
- tab [1 << 13]traceStackPtr
-}
-
-// traceStack is a single stack in traceStackTable.
-type traceStack struct {
- link traceStackPtr
- hash uintptr
- id uint32
- n int
- stk [0]uintptr // real type [n]uintptr
-}
-
-type traceStackPtr uintptr
-
-func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
-
-// stack returns slice of PCs.
-func (ts *traceStack) stack() []uintptr {
- return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
-}
-
-// put returns a unique id for the stack trace pcs and caches it in the table,
-// if it sees the trace for the first time.
-func (tab *traceStackTable) put(pcs []uintptr) uint32 {
- if len(pcs) == 0 {
- return 0
- }
- hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
- // First, search the hashtable w/o the mutex.
- if id := tab.find(pcs, hash); id != 0 {
- return id
- }
- // Now, double check under the mutex.
- lock(&tab.lock)
- if id := tab.find(pcs, hash); id != 0 {
- unlock(&tab.lock)
- return id
- }
- // Create new record.
- tab.seq++
- stk := tab.newStack(len(pcs))
- stk.hash = hash
- stk.id = tab.seq
- stk.n = len(pcs)
- stkpc := stk.stack()
- for i, pc := range pcs {
- stkpc[i] = pc
- }
- part := int(hash % uintptr(len(tab.tab)))
- stk.link = tab.tab[part]
- atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
- unlock(&tab.lock)
- return stk.id
-}
-
-// find checks if the stack trace pcs is already present in the table.
-func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
- part := int(hash % uintptr(len(tab.tab)))
-Search:
- for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
- if stk.hash == hash && stk.n == len(pcs) {
- for i, stkpc := range stk.stack() {
- if stkpc != pcs[i] {
- continue Search
- }
- }
- return stk.id
- }
- }
- return 0
-}
-
-// newStack allocates a new stack of size n.
-func (tab *traceStackTable) newStack(n int) *traceStack {
- return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
-}
-
-// allFrames returns all of the Frames corresponding to pcs.
-func allFrames(pcs []uintptr) []Frame {
- frames := make([]Frame, 0, len(pcs))
- ci := CallersFrames(pcs)
- for {
- f, more := ci.Next()
- frames = append(frames, f)
- if !more {
- return frames
- }
- }
-}
-
-// dump writes all previously cached stacks to trace buffers,
-// releases all memory and resets state.
-func (tab *traceStackTable) dump() {
- var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
- bufp := traceFlush(0, 0)
- for _, stk := range tab.tab {
- stk := stk.ptr()
- for ; stk != nil; stk = stk.link.ptr() {
- tmpbuf := tmp[:0]
- tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
- frames := allFrames(stk.stack())
- tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
- for _, f := range frames {
- var frame traceFrame
- frame, bufp = traceFrameForPC(bufp, 0, f)
- tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
- tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
- tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
- tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
- }
- // Now copy to the buffer.
- size := 1 + traceBytesPerNumber + len(tmpbuf)
- if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
- bufp = traceFlush(bufp, 0)
- }
- buf := bufp.ptr()
- buf.byte(traceEvStack | 3<<traceArgCountShift)
- buf.varint(uint64(len(tmpbuf)))
- buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
- }
- }
-
- lock(&trace.lock)
- traceFullQueue(bufp)
- unlock(&trace.lock)
-
- tab.mem.drop()
- *tab = traceStackTable{}
- lockInit(&((*tab).lock), lockRankTraceStackTab)
-}
-
-type traceFrame struct {
- funcID uint64
- fileID uint64
- line uint64
-}
-
-// traceFrameForPC records the frame information.
-// It may allocate memory.
-func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
- bufp := &buf
- var frame traceFrame
-
- fn := f.Function
- const maxLen = 1 << 10
- if len(fn) > maxLen {
- fn = fn[len(fn)-maxLen:]
- }
- frame.funcID, bufp = traceString(bufp, pid, fn)
- frame.line = uint64(f.Line)
- file := f.File
- if len(file) > maxLen {
- file = file[len(file)-maxLen:]
- }
- frame.fileID, bufp = traceString(bufp, pid, file)
- return frame, (*bufp)
-}
-
-// traceAlloc is a non-thread-safe region allocator.
-// It holds a linked list of traceAllocBlock.
-type traceAlloc struct {
- head traceAllocBlockPtr
- off uintptr
-}
-
-// traceAllocBlock is a block in traceAlloc.
-//
-// traceAllocBlock is allocated from non-GC'd memory, so it must not
-// contain heap pointers. Writes to pointers to traceAllocBlocks do
-// not need write barriers.
-//
-//go:notinheap
-type traceAllocBlock struct {
- next traceAllocBlockPtr
- data [64<<10 - goarch.PtrSize]byte
-}
-
-// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
-type traceAllocBlockPtr uintptr
-
-func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
-func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
-
-// alloc allocates n-byte block.
-func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
- n = alignUp(n, goarch.PtrSize)
- if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
- if n > uintptr(len(a.head.ptr().data)) {
- throw("trace: alloc too large")
- }
- block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
- if block == nil {
- throw("trace: out of memory")
- }
- block.next.set(a.head.ptr())
- a.head.set(block)
- a.off = 0
- }
- p := &a.head.ptr().data[a.off]
- a.off += n
- return unsafe.Pointer(p)
-}
-
-// drop frees all previously allocated memory and resets the allocator.
-func (a *traceAlloc) drop() {
- for a.head != 0 {
- block := a.head.ptr()
- a.head.set(block.next.ptr())
- sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
- }
-}
-
-// The following functions write specific events to trace.
-
-func traceGomaxprocs(procs int32) {
- traceEvent(traceEvGomaxprocs, 1, uint64(procs))
-}
-
-func traceProcStart() {
- traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
-}
-
-func traceProcStop(pp *p) {
- // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
- // to handle this we temporary employ the P.
- mp := acquirem()
- oldp := mp.p
- mp.p.set(pp)
- traceEvent(traceEvProcStop, -1)
- mp.p = oldp
- releasem(mp)
-}
-
-func traceGCStart() {
- traceEvent(traceEvGCStart, 3, trace.seqGC)
- trace.seqGC++
-}
-
-func traceGCDone() {
- traceEvent(traceEvGCDone, -1)
-}
-
-func traceGCSTWStart(kind int) {
- traceEvent(traceEvGCSTWStart, -1, uint64(kind))
-}
-
-func traceGCSTWDone() {
- traceEvent(traceEvGCSTWDone, -1)
-}
-
-// traceGCSweepStart prepares to trace a sweep loop. This does not
-// emit any events until traceGCSweepSpan is called.
-//
-// traceGCSweepStart must be paired with traceGCSweepDone and there
-// must be no preemption points between these two calls.
-func traceGCSweepStart() {
- // Delay the actual GCSweepStart event until the first span
- // sweep. If we don't sweep anything, don't emit any events.
- _p_ := getg().m.p.ptr()
- if _p_.traceSweep {
- throw("double traceGCSweepStart")
- }
- _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
-}
-
-// traceGCSweepSpan traces the sweep of a single page.
-//
-// This may be called outside a traceGCSweepStart/traceGCSweepDone
-// pair; however, it will not emit any trace events in this case.
-func traceGCSweepSpan(bytesSwept uintptr) {
- _p_ := getg().m.p.ptr()
- if _p_.traceSweep {
- if _p_.traceSwept == 0 {
- traceEvent(traceEvGCSweepStart, 1)
- }
- _p_.traceSwept += bytesSwept
- }
-}
-
-func traceGCSweepDone() {
- _p_ := getg().m.p.ptr()
- if !_p_.traceSweep {
- throw("missing traceGCSweepStart")
- }
- if _p_.traceSwept != 0 {
- traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
- }
- _p_.traceSweep = false
-}
-
-func traceGCMarkAssistStart() {
- traceEvent(traceEvGCMarkAssistStart, 1)
-}
-
-func traceGCMarkAssistDone() {
- traceEvent(traceEvGCMarkAssistDone, -1)
-}
-
-func traceGoCreate(newg *g, pc uintptr) {
- newg.traceseq = 0
- newg.tracelastp = getg().m.p
- // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
- traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
-}
-
-func traceGoStart() {
- _g_ := getg().m.curg
- _p_ := _g_.m.p
- _g_.traceseq++
- if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
- traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
- } else if _g_.tracelastp == _p_ {
- traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
- } else {
- _g_.tracelastp = _p_
- traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
- }
-}
-
-func traceGoEnd() {
- traceEvent(traceEvGoEnd, -1)
-}
-
-func traceGoSched() {
- _g_ := getg()
- _g_.tracelastp = _g_.m.p
- traceEvent(traceEvGoSched, 1)
-}
-
-func traceGoPreempt() {
- _g_ := getg()
- _g_.tracelastp = _g_.m.p
- traceEvent(traceEvGoPreempt, 1)
-}
-
-func traceGoPark(traceEv byte, skip int) {
- if traceEv&traceFutileWakeup != 0 {
- traceEvent(traceEvFutileWakeup, -1)
- }
- traceEvent(traceEv & ^traceFutileWakeup, skip)
-}
-
-func traceGoUnpark(gp *g, skip int) {
- _p_ := getg().m.p
- gp.traceseq++
- if gp.tracelastp == _p_ {
- traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
- } else {
- gp.tracelastp = _p_
- traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
- }
-}
-
-func traceGoSysCall() {
- traceEvent(traceEvGoSysCall, 1)
-}
-
-func traceGoSysExit(ts int64) {
- if ts != 0 && ts < trace.ticksStart {
- // There is a race between the code that initializes sysexitticks
- // (in exitsyscall, which runs without a P, and therefore is not
- // stopped with the rest of the world) and the code that initializes
- // a new trace. The recorded sysexitticks must therefore be treated
- // as "best effort". If they are valid for this trace, then great,
- // use them for greater accuracy. But if they're not valid for this
- // trace, assume that the trace was started after the actual syscall
- // exit (but before we actually managed to start the goroutine,
- // aka right now), and assign a fresh time stamp to keep the log consistent.
- ts = 0
- }
- _g_ := getg().m.curg
- _g_.traceseq++
- _g_.tracelastp = _g_.m.p
- traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
-}
-
-func traceGoSysBlock(pp *p) {
- // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
- // to handle this we temporary employ the P.
- mp := acquirem()
- oldp := mp.p
- mp.p.set(pp)
- traceEvent(traceEvGoSysBlock, -1)
- mp.p = oldp
- releasem(mp)
-}
-
-func traceHeapAlloc() {
- traceEvent(traceEvHeapAlloc, -1, gcController.heapLive)
-}
-
-func traceHeapGoal() {
- if heapGoal := atomic.Load64(&gcController.heapGoal); heapGoal == ^uint64(0) {
- // Heap-based triggering is disabled.
- traceEvent(traceEvHeapGoal, -1, 0)
- } else {
- traceEvent(traceEvHeapGoal, -1, heapGoal)
- }
-}
-
-// To access runtime functions from runtime/trace.
-// See runtime/trace/annotation.go
-
-//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
-func trace_userTaskCreate(id, parentID uint64, taskType string) {
- if !trace.enabled {
- return
- }
-
- // Same as in traceEvent.
- mp, pid, bufp := traceAcquireBuffer()
- if !trace.enabled && !mp.startingtrace {
- traceReleaseBuffer(pid)
- return
- }
-
- typeStringID, bufp := traceString(bufp, pid, taskType)
- traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
- traceReleaseBuffer(pid)
-}
-
-//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
-func trace_userTaskEnd(id uint64) {
- traceEvent(traceEvUserTaskEnd, 2, id)
-}
-
-//go:linkname trace_userRegion runtime/trace.userRegion
-func trace_userRegion(id, mode uint64, name string) {
- if !trace.enabled {
- return
- }
-
- mp, pid, bufp := traceAcquireBuffer()
- if !trace.enabled && !mp.startingtrace {
- traceReleaseBuffer(pid)
- return
- }
-
- nameStringID, bufp := traceString(bufp, pid, name)
- traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
- traceReleaseBuffer(pid)
-}
-
-//go:linkname trace_userLog runtime/trace.userLog
-func trace_userLog(id uint64, category, message string) {
- if !trace.enabled {
- return
- }
-
- mp, pid, bufp := traceAcquireBuffer()
- if !trace.enabled && !mp.startingtrace {
- traceReleaseBuffer(pid)
- return
- }
-
- categoryID, bufp := traceString(bufp, pid, category)
-
- extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string
- traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
- // traceEventLocked reserved extra space for val and len(val)
- // in buf, so buf now has room for the following.
- buf := bufp.ptr()
-
- // double-check the message and its length can fit.
- // Otherwise, truncate the message.
- slen := len(message)
- if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
- slen = room
- }
- buf.varint(uint64(slen))
- buf.pos += copy(buf.arr[buf.pos:], message[:slen])
-
- traceReleaseBuffer(pid)
-}
-
-// the start PC of a goroutine for tracing purposes. If pc is a wrapper,
-// it returns the PC of the wrapped function. Otherwise it returns pc.
-func startPCforTrace(pc uintptr) uintptr {
- f := findfunc(pc)
- if !f.valid() {
- return pc // should not happen, but don't care
- }
- w := funcdata(f, _FUNCDATA_WrapInfo)
- if w == nil {
- return pc // not a wrapper
- }
- return f.datap.textAddr(*(*uint32)(w))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/traceback.go b/contrib/go/_std_1.18/src/runtime/traceback.go
deleted file mode 100644
index 229e3a9105..0000000000
--- a/contrib/go/_std_1.18/src/runtime/traceback.go
+++ /dev/null
@@ -1,1436 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/bytealg"
- "internal/goarch"
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-// The code in this file implements stack trace walking for all architectures.
-// The most important fact about a given architecture is whether it uses a link register.
-// On systems with link registers, the prologue for a non-leaf function stores the
-// incoming value of LR at the bottom of the newly allocated stack frame.
-// On systems without link registers (x86), the architecture pushes a return PC during
-// the call instruction, so the return PC ends up above the stack frame.
-// In this file, the return PC is always called LR, no matter how it was found.
-
-const usesLR = sys.MinFrameSize > 0
-
-// Generic traceback. Handles runtime stack prints (pcbuf == nil),
-// the runtime.Callers function (pcbuf != nil), as well as the garbage
-// collector (callback != nil). A little clunky to merge these, but avoids
-// duplicating the code and all its subtlety.
-//
-// The skip argument is only valid with pcbuf != nil and counts the number
-// of logical frames to skip rather than physical frames (with inlining, a
-// PC in pcbuf can represent multiple calls).
-func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int {
- if skip > 0 && callback != nil {
- throw("gentraceback callback cannot be used with non-zero skip")
- }
-
- // Don't call this "g"; it's too easy get "g" and "gp" confused.
- if ourg := getg(); ourg == gp && ourg == ourg.m.curg {
- // The starting sp has been passed in as a uintptr, and the caller may
- // have other uintptr-typed stack references as well.
- // If during one of the calls that got us here or during one of the
- // callbacks below the stack must be grown, all these uintptr references
- // to the stack will not be updated, and gentraceback will continue
- // to inspect the old stack memory, which may no longer be valid.
- // Even if all the variables were updated correctly, it is not clear that
- // we want to expose a traceback that begins on one stack and ends
- // on another stack. That could confuse callers quite a bit.
- // Instead, we require that gentraceback and any other function that
- // accepts an sp for the current goroutine (typically obtained by
- // calling getcallersp) must not run on that goroutine's stack but
- // instead on the g0 stack.
- throw("gentraceback cannot trace user goroutine on its own stack")
- }
- level, _, _ := gotraceback()
-
- var ctxt *funcval // Context pointer for unstarted goroutines. See issue #25897.
-
- if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
- if gp.syscallsp != 0 {
- pc0 = gp.syscallpc
- sp0 = gp.syscallsp
- if usesLR {
- lr0 = 0
- }
- } else {
- pc0 = gp.sched.pc
- sp0 = gp.sched.sp
- if usesLR {
- lr0 = gp.sched.lr
- }
- ctxt = (*funcval)(gp.sched.ctxt)
- }
- }
-
- nprint := 0
- var frame stkframe
- frame.pc = pc0
- frame.sp = sp0
- if usesLR {
- frame.lr = lr0
- }
- waspanic := false
- cgoCtxt := gp.cgoCtxt
- printing := pcbuf == nil && callback == nil
-
- // If the PC is zero, it's likely a nil function call.
- // Start in the caller's frame.
- if frame.pc == 0 {
- if usesLR {
- frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
- frame.lr = 0
- } else {
- frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
- frame.sp += goarch.PtrSize
- }
- }
-
- // runtime/internal/atomic functions call into kernel helpers on
- // arm < 7. See runtime/internal/atomic/sys_linux_arm.s.
- //
- // Start in the caller's frame.
- if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && frame.pc&0xffff0000 == 0xffff0000 {
- // Note that the calls are simple BL without pushing the return
- // address, so we use LR directly.
- //
- // The kernel helpers are frameless leaf functions, so SP and
- // LR are not touched.
- frame.pc = frame.lr
- frame.lr = 0
- }
-
- f := findfunc(frame.pc)
- if !f.valid() {
- if callback != nil || printing {
- print("runtime: unknown pc ", hex(frame.pc), "\n")
- tracebackHexdump(gp.stack, &frame, 0)
- }
- if callback != nil {
- throw("unknown pc")
- }
- return 0
- }
- frame.fn = f
-
- var cache pcvalueCache
-
- lastFuncID := funcID_normal
- n := 0
- for n < max {
- // Typically:
- // pc is the PC of the running function.
- // sp is the stack pointer at that program counter.
- // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown.
- // stk is the stack containing sp.
- // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
- f = frame.fn
- if f.pcsp == 0 {
- // No frame information, must be external function, like race support.
- // See golang.org/issue/13568.
- break
- }
-
- // Compute function info flags.
- flag := f.flag
- if f.funcID == funcID_cgocallback {
- // cgocallback does write SP to switch from the g0 to the curg stack,
- // but it carefully arranges that during the transition BOTH stacks
- // have cgocallback frame valid for unwinding through.
- // So we don't need to exclude it with the other SP-writing functions.
- flag &^= funcFlag_SPWRITE
- }
- if frame.pc == pc0 && frame.sp == sp0 && pc0 == gp.syscallpc && sp0 == gp.syscallsp {
- // Some Syscall functions write to SP, but they do so only after
- // saving the entry PC/SP using entersyscall.
- // Since we are using the entry PC/SP, the later SP write doesn't matter.
- flag &^= funcFlag_SPWRITE
- }
-
- // Found an actual function.
- // Derive frame pointer and link register.
- if frame.fp == 0 {
- // Jump over system stack transitions. If we're on g0 and there's a user
- // goroutine, try to jump. Otherwise this is a regular call.
- if flags&_TraceJumpStack != 0 && gp == gp.m.g0 && gp.m.curg != nil {
- switch f.funcID {
- case funcID_morestack:
- // morestack does not return normally -- newstack()
- // gogo's to curg.sched. Match that.
- // This keeps morestack() from showing up in the backtrace,
- // but that makes some sense since it'll never be returned
- // to.
- frame.pc = gp.m.curg.sched.pc
- frame.fn = findfunc(frame.pc)
- f = frame.fn
- flag = f.flag
- frame.sp = gp.m.curg.sched.sp
- cgoCtxt = gp.m.curg.cgoCtxt
- case funcID_systemstack:
- // systemstack returns normally, so just follow the
- // stack transition.
- frame.sp = gp.m.curg.sched.sp
- cgoCtxt = gp.m.curg.cgoCtxt
- flag &^= funcFlag_SPWRITE
- }
- }
- frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache))
- if !usesLR {
- // On x86, call instruction pushes return PC before entering new function.
- frame.fp += goarch.PtrSize
- }
- }
- var flr funcInfo
- if flag&funcFlag_TOPFRAME != 0 {
- // This function marks the top of the stack. Stop the traceback.
- frame.lr = 0
- flr = funcInfo{}
- } else if flag&funcFlag_SPWRITE != 0 && (callback == nil || n > 0) {
- // The function we are in does a write to SP that we don't know
- // how to encode in the spdelta table. Examples include context
- // switch routines like runtime.gogo but also any code that switches
- // to the g0 stack to run host C code. Since we can't reliably unwind
- // the SP (we might not even be on the stack we think we are),
- // we stop the traceback here.
- // This only applies for profiling signals (callback == nil).
- //
- // For a GC stack traversal (callback != nil), we should only see
- // a function when it has voluntarily preempted itself on entry
- // during the stack growth check. In that case, the function has
- // not yet had a chance to do any writes to SP and is safe to unwind.
- // isAsyncSafePoint does not allow assembly functions to be async preempted,
- // and preemptPark double-checks that SPWRITE functions are not async preempted.
- // So for GC stack traversal we leave things alone (this if body does not execute for n == 0)
- // at the bottom frame of the stack. But farther up the stack we'd better not
- // find any.
- if callback != nil {
- println("traceback: unexpected SPWRITE function", funcname(f))
- throw("traceback")
- }
- frame.lr = 0
- flr = funcInfo{}
- } else {
- var lrPtr uintptr
- if usesLR {
- if n == 0 && frame.sp < frame.fp || frame.lr == 0 {
- lrPtr = frame.sp
- frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
- }
- } else {
- if frame.lr == 0 {
- lrPtr = frame.fp - goarch.PtrSize
- frame.lr = uintptr(*(*uintptr)(unsafe.Pointer(lrPtr)))
- }
- }
- flr = findfunc(frame.lr)
- if !flr.valid() {
- // This happens if you get a profiling interrupt at just the wrong time.
- // In that context it is okay to stop early.
- // But if callback is set, we're doing a garbage collection and must
- // get everything, so crash loudly.
- doPrint := printing
- if doPrint && gp.m.incgo && f.funcID == funcID_sigpanic {
- // We can inject sigpanic
- // calls directly into C code,
- // in which case we'll see a C
- // return PC. Don't complain.
- doPrint = false
- }
- if callback != nil || doPrint {
- print("runtime: unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n")
- tracebackHexdump(gp.stack, &frame, lrPtr)
- }
- if callback != nil {
- throw("unknown caller pc")
- }
- }
- }
-
- frame.varp = frame.fp
- if !usesLR {
- // On x86, call instruction pushes return PC before entering new function.
- frame.varp -= goarch.PtrSize
- }
-
- // For architectures with frame pointers, if there's
- // a frame, then there's a saved frame pointer here.
- //
- // NOTE: This code is not as general as it looks.
- // On x86, the ABI is to save the frame pointer word at the
- // top of the stack frame, so we have to back down over it.
- // On arm64, the frame pointer should be at the bottom of
- // the stack (with R29 (aka FP) = RSP), in which case we would
- // not want to do the subtraction here. But we started out without
- // any frame pointer, and when we wanted to add it, we didn't
- // want to break all the assembly doing direct writes to 8(RSP)
- // to set the first parameter to a called function.
- // So we decided to write the FP link *below* the stack pointer
- // (with R29 = RSP - 8 in Go functions).
- // This is technically ABI-compatible but not standard.
- // And it happens to end up mimicking the x86 layout.
- // Other architectures may make different decisions.
- if frame.varp > frame.sp && framepointer_enabled {
- frame.varp -= goarch.PtrSize
- }
-
- // Derive size of arguments.
- // Most functions have a fixed-size argument block,
- // so we can use metadata about the function f.
- // Not all, though: there are some variadic functions
- // in package runtime and reflect, and for those we use call-specific
- // metadata recorded by f's caller.
- if callback != nil || printing {
- frame.argp = frame.fp + sys.MinFrameSize
- var ok bool
- frame.arglen, frame.argmap, ok = getArgInfoFast(f, callback != nil)
- if !ok {
- frame.arglen, frame.argmap = getArgInfo(&frame, f, callback != nil, ctxt)
- }
- }
- ctxt = nil // ctxt is only needed to get arg maps for the topmost frame
-
- // Determine frame's 'continuation PC', where it can continue.
- // Normally this is the return address on the stack, but if sigpanic
- // is immediately below this function on the stack, then the frame
- // stopped executing due to a trap, and frame.pc is probably not
- // a safe point for looking up liveness information. In this panicking case,
- // the function either doesn't return at all (if it has no defers or if the
- // defers do not recover) or it returns from one of the calls to
- // deferproc a second time (if the corresponding deferred func recovers).
- // In the latter case, use a deferreturn call site as the continuation pc.
- frame.continpc = frame.pc
- if waspanic {
- if frame.fn.deferreturn != 0 {
- frame.continpc = frame.fn.entry() + uintptr(frame.fn.deferreturn) + 1
- // Note: this may perhaps keep return variables alive longer than
- // strictly necessary, as we are using "function has a defer statement"
- // as a proxy for "function actually deferred something". It seems
- // to be a minor drawback. (We used to actually look through the
- // gp._defer for a defer corresponding to this function, but that
- // is hard to do with defer records on the stack during a stack copy.)
- // Note: the +1 is to offset the -1 that
- // stack.go:getStackMap does to back up a return
- // address make sure the pc is in the CALL instruction.
- } else {
- frame.continpc = 0
- }
- }
-
- if callback != nil {
- if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
- return n
- }
- }
-
- if pcbuf != nil {
- pc := frame.pc
- // backup to CALL instruction to read inlining info (same logic as below)
- tracepc := pc
- // Normally, pc is a return address. In that case, we want to look up
- // file/line information using pc-1, because that is the pc of the
- // call instruction (more precisely, the last byte of the call instruction).
- // Callers expect the pc buffer to contain return addresses and do the
- // same -1 themselves, so we keep pc unchanged.
- // When the pc is from a signal (e.g. profiler or segv) then we want
- // to look up file/line information using pc, and we store pc+1 in the
- // pc buffer so callers can unconditionally subtract 1 before looking up.
- // See issue 34123.
- // The pc can be at function entry when the frame is initialized without
- // actually running code, like runtime.mstart.
- if (n == 0 && flags&_TraceTrap != 0) || waspanic || pc == f.entry() {
- pc++
- } else {
- tracepc--
- }
-
- // If there is inlining info, record the inner frames.
- if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
- inltree := (*[1 << 20]inlinedCall)(inldata)
- for {
- ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache)
- if ix < 0 {
- break
- }
- if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
- // ignore wrappers
- } else if skip > 0 {
- skip--
- } else if n < max {
- (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
- n++
- }
- lastFuncID = inltree[ix].funcID
- // Back up to an instruction in the "caller".
- tracepc = frame.fn.entry() + uintptr(inltree[ix].parentPc)
- pc = tracepc + 1
- }
- }
- // Record the main frame.
- if f.funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
- // Ignore wrapper functions (except when they trigger panics).
- } else if skip > 0 {
- skip--
- } else if n < max {
- (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
- n++
- }
- lastFuncID = f.funcID
- n-- // offset n++ below
- }
-
- if printing {
- // assume skip=0 for printing.
- //
- // Never elide wrappers if we haven't printed
- // any frames. And don't elide wrappers that
- // called panic rather than the wrapped
- // function. Otherwise, leave them out.
-
- // backup to CALL instruction to read inlining info (same logic as below)
- tracepc := frame.pc
- if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry() && !waspanic {
- tracepc--
- }
- // If there is inlining info, print the inner frames.
- if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
- inltree := (*[1 << 20]inlinedCall)(inldata)
- var inlFunc _func
- inlFuncInfo := funcInfo{&inlFunc, f.datap}
- for {
- ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil)
- if ix < 0 {
- break
- }
-
- // Create a fake _func for the
- // inlined function.
- inlFunc.nameoff = inltree[ix].func_
- inlFunc.funcID = inltree[ix].funcID
-
- if (flags&_TraceRuntimeFrames) != 0 || showframe(inlFuncInfo, gp, nprint == 0, inlFuncInfo.funcID, lastFuncID) {
- name := funcname(inlFuncInfo)
- file, line := funcline(f, tracepc)
- print(name, "(...)\n")
- print("\t", file, ":", line, "\n")
- nprint++
- }
- lastFuncID = inltree[ix].funcID
- // Back up to an instruction in the "caller".
- tracepc = frame.fn.entry() + uintptr(inltree[ix].parentPc)
- }
- }
- if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, f.funcID, lastFuncID) {
- // Print during crash.
- // main(0x1, 0x2, 0x3)
- // /home/rsc/go/src/runtime/x.go:23 +0xf
- //
- name := funcname(f)
- file, line := funcline(f, tracepc)
- if name == "runtime.gopanic" {
- name = "panic"
- }
- print(name, "(")
- argp := unsafe.Pointer(frame.argp)
- printArgs(f, argp, tracepc)
- print(")\n")
- print("\t", file, ":", line)
- if frame.pc > f.entry() {
- print(" +", hex(frame.pc-f.entry()))
- }
- if gp.m != nil && gp.m.throwing > 0 && gp == gp.m.curg || level >= 2 {
- print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc))
- }
- print("\n")
- nprint++
- }
- lastFuncID = f.funcID
- }
- n++
-
- if f.funcID == funcID_cgocallback && len(cgoCtxt) > 0 {
- ctxt := cgoCtxt[len(cgoCtxt)-1]
- cgoCtxt = cgoCtxt[:len(cgoCtxt)-1]
-
- // skip only applies to Go frames.
- // callback != nil only used when we only care
- // about Go frames.
- if skip == 0 && callback == nil {
- n = tracebackCgoContext(pcbuf, printing, ctxt, n, max)
- }
- }
-
- waspanic = f.funcID == funcID_sigpanic
- injectedCall := waspanic || f.funcID == funcID_asyncPreempt || f.funcID == funcID_debugCallV2
-
- // Do not unwind past the bottom of the stack.
- if !flr.valid() {
- break
- }
-
- // Unwind to next frame.
- frame.fn = flr
- frame.pc = frame.lr
- frame.lr = 0
- frame.sp = frame.fp
- frame.fp = 0
- frame.argmap = nil
-
- // On link register architectures, sighandler saves the LR on stack
- // before faking a call.
- if usesLR && injectedCall {
- x := *(*uintptr)(unsafe.Pointer(frame.sp))
- frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign)
- f = findfunc(frame.pc)
- frame.fn = f
- if !f.valid() {
- frame.pc = x
- } else if funcspdelta(f, frame.pc, &cache) == 0 {
- frame.lr = x
- }
- }
- }
-
- if printing {
- n = nprint
- }
-
- // Note that panic != nil is okay here: there can be leftover panics,
- // because the defers on the panic stack do not nest in frame order as
- // they do on the defer stack. If you have:
- //
- // frame 1 defers d1
- // frame 2 defers d2
- // frame 3 defers d3
- // frame 4 panics
- // frame 4's panic starts running defers
- // frame 5, running d3, defers d4
- // frame 5 panics
- // frame 5's panic starts running defers
- // frame 6, running d4, garbage collects
- // frame 6, running d2, garbage collects
- //
- // During the execution of d4, the panic stack is d4 -> d3, which
- // is nested properly, and we'll treat frame 3 as resumable, because we
- // can find d3. (And in fact frame 3 is resumable. If d4 recovers
- // and frame 5 continues running, d3, d3 can recover and we'll
- // resume execution in (returning from) frame 3.)
- //
- // During the execution of d2, however, the panic stack is d2 -> d3,
- // which is inverted. The scan will match d2 to frame 2 but having
- // d2 on the stack until then means it will not match d3 to frame 3.
- // This is okay: if we're running d2, then all the defers after d2 have
- // completed and their corresponding frames are dead. Not finding d3
- // for frame 3 means we'll set frame 3's continpc == 0, which is correct
- // (frame 3 is dead). At the end of the walk the panic stack can thus
- // contain defers (d3 in this case) for dead frames. The inversion here
- // always indicates a dead frame, and the effect of the inversion on the
- // scan is to hide those dead frames, so the scan is still okay:
- // what's left on the panic stack are exactly (and only) the dead frames.
- //
- // We require callback != nil here because only when callback != nil
- // do we know that gentraceback is being called in a "must be correct"
- // context as opposed to a "best effort" context. The tracebacks with
- // callbacks only happen when everything is stopped nicely.
- // At other times, such as when gathering a stack for a profiling signal
- // or when printing a traceback during a crash, everything may not be
- // stopped nicely, and the stack walk may not be able to complete.
- if callback != nil && n < max && frame.sp != gp.stktopsp {
- print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n")
- print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n")
- throw("traceback did not unwind completely")
- }
-
- return n
-}
-
-// printArgs prints function arguments in traceback.
-func printArgs(f funcInfo, argp unsafe.Pointer, pc uintptr) {
- // The "instruction" of argument printing is encoded in _FUNCDATA_ArgInfo.
- // See cmd/compile/internal/ssagen.emitArgInfo for the description of the
- // encoding.
- // These constants need to be in sync with the compiler.
- const (
- _endSeq = 0xff
- _startAgg = 0xfe
- _endAgg = 0xfd
- _dotdotdot = 0xfc
- _offsetTooLarge = 0xfb
- )
-
- const (
- limit = 10 // print no more than 10 args/components
- maxDepth = 5 // no more than 5 layers of nesting
- maxLen = (maxDepth*3+2)*limit + 1 // max length of _FUNCDATA_ArgInfo (see the compiler side for reasoning)
- )
-
- p := (*[maxLen]uint8)(funcdata(f, _FUNCDATA_ArgInfo))
- if p == nil {
- return
- }
-
- liveInfo := funcdata(f, _FUNCDATA_ArgLiveInfo)
- liveIdx := pcdatavalue(f, _PCDATA_ArgLiveIndex, pc, nil)
- startOffset := uint8(0xff) // smallest offset that needs liveness info (slots with a lower offset is always live)
- if liveInfo != nil {
- startOffset = *(*uint8)(liveInfo)
- }
-
- isLive := func(off, slotIdx uint8) bool {
- if liveInfo == nil || liveIdx <= 0 {
- return true // no liveness info, always live
- }
- if off < startOffset {
- return true
- }
- bits := *(*uint8)(add(liveInfo, uintptr(liveIdx)+uintptr(slotIdx/8)))
- return bits&(1<<(slotIdx%8)) != 0
- }
-
- print1 := func(off, sz, slotIdx uint8) {
- x := readUnaligned64(add(argp, uintptr(off)))
- // mask out irrelevant bits
- if sz < 8 {
- shift := 64 - sz*8
- if goarch.BigEndian {
- x = x >> shift
- } else {
- x = x << shift >> shift
- }
- }
- print(hex(x))
- if !isLive(off, slotIdx) {
- print("?")
- }
- }
-
- start := true
- printcomma := func() {
- if !start {
- print(", ")
- }
- }
- pi := 0
- slotIdx := uint8(0) // register arg spill slot index
-printloop:
- for {
- o := p[pi]
- pi++
- switch o {
- case _endSeq:
- break printloop
- case _startAgg:
- printcomma()
- print("{")
- start = true
- continue
- case _endAgg:
- print("}")
- case _dotdotdot:
- printcomma()
- print("...")
- case _offsetTooLarge:
- printcomma()
- print("_")
- default:
- printcomma()
- sz := p[pi]
- pi++
- print1(o, sz, slotIdx)
- if o >= startOffset {
- slotIdx++
- }
- }
- start = false
- }
-}
-
-// reflectMethodValue is a partial duplicate of reflect.makeFuncImpl
-// and reflect.methodValue.
-type reflectMethodValue struct {
- fn uintptr
- stack *bitvector // ptrmap for both args and results
- argLen uintptr // just args
-}
-
-// getArgInfoFast returns the argument frame information for a call to f.
-// It is short and inlineable. However, it does not handle all functions.
-// If ok reports false, you must call getArgInfo instead.
-// TODO(josharian): once we do mid-stack inlining,
-// call getArgInfo directly from getArgInfoFast and stop returning an ok bool.
-func getArgInfoFast(f funcInfo, needArgMap bool) (arglen uintptr, argmap *bitvector, ok bool) {
- return uintptr(f.args), nil, !(needArgMap && f.args == _ArgsSizeUnknown)
-}
-
-// getArgInfo returns the argument frame information for a call to f
-// with call frame frame.
-//
-// This is used for both actual calls with active stack frames and for
-// deferred calls or goroutines that are not yet executing. If this is an actual
-// call, ctxt must be nil (getArgInfo will retrieve what it needs from
-// the active stack frame). If this is a deferred call or unstarted goroutine,
-// ctxt must be the function object that was deferred or go'd.
-func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector) {
- arglen = uintptr(f.args)
- if needArgMap && f.args == _ArgsSizeUnknown {
- // Extract argument bitmaps for reflect stubs from the calls they made to reflect.
- switch funcname(f) {
- case "reflect.makeFuncStub", "reflect.methodValueCall":
- // These take a *reflect.methodValue as their
- // context register.
- var mv *reflectMethodValue
- var retValid bool
- if ctxt != nil {
- // This is not an actual call, but a
- // deferred call or an unstarted goroutine.
- // The function value is itself the *reflect.methodValue.
- mv = (*reflectMethodValue)(unsafe.Pointer(ctxt))
- } else {
- // This is a real call that took the
- // *reflect.methodValue as its context
- // register and immediately saved it
- // to 0(SP). Get the methodValue from
- // 0(SP).
- arg0 := frame.sp + sys.MinFrameSize
- mv = *(**reflectMethodValue)(unsafe.Pointer(arg0))
- // Figure out whether the return values are valid.
- // Reflect will update this value after it copies
- // in the return values.
- retValid = *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
- }
- if mv.fn != f.entry() {
- print("runtime: confused by ", funcname(f), "\n")
- throw("reflect mismatch")
- }
- bv := mv.stack
- arglen = uintptr(bv.n * goarch.PtrSize)
- if !retValid {
- arglen = uintptr(mv.argLen) &^ (goarch.PtrSize - 1)
- }
- argmap = bv
- }
- }
- return
-}
-
-// tracebackCgoContext handles tracing back a cgo context value, from
-// the context argument to setCgoTraceback, for the gentraceback
-// function. It returns the new value of n.
-func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int {
- var cgoPCs [32]uintptr
- cgoContextPCs(ctxt, cgoPCs[:])
- var arg cgoSymbolizerArg
- anySymbolized := false
- for _, pc := range cgoPCs {
- if pc == 0 || n >= max {
- break
- }
- if pcbuf != nil {
- (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
- }
- if printing {
- if cgoSymbolizer == nil {
- print("non-Go function at pc=", hex(pc), "\n")
- } else {
- c := printOneCgoTraceback(pc, max-n, &arg)
- n += c - 1 // +1 a few lines down
- anySymbolized = true
- }
- }
- n++
- }
- if anySymbolized {
- arg.pc = 0
- callCgoSymbolizer(&arg)
- }
- return n
-}
-
-func printcreatedby(gp *g) {
- // Show what created goroutine, except main goroutine (goid 1).
- pc := gp.gopc
- f := findfunc(pc)
- if f.valid() && showframe(f, gp, false, funcID_normal, funcID_normal) && gp.goid != 1 {
- printcreatedby1(f, pc)
- }
-}
-
-func printcreatedby1(f funcInfo, pc uintptr) {
- print("created by ", funcname(f), "\n")
- tracepc := pc // back up to CALL instruction for funcline.
- if pc > f.entry() {
- tracepc -= sys.PCQuantum
- }
- file, line := funcline(f, tracepc)
- print("\t", file, ":", line)
- if pc > f.entry() {
- print(" +", hex(pc-f.entry()))
- }
- print("\n")
-}
-
-func traceback(pc, sp, lr uintptr, gp *g) {
- traceback1(pc, sp, lr, gp, 0)
-}
-
-// tracebacktrap is like traceback but expects that the PC and SP were obtained
-// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp.
-// Because they are from a trap instead of from a saved pair,
-// the initial PC must not be rewound to the previous instruction.
-// (All the saved pairs record a PC that is a return address, so we
-// rewind it into the CALL instruction.)
-// If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to
-// the pc/sp/lr passed in.
-func tracebacktrap(pc, sp, lr uintptr, gp *g) {
- if gp.m.libcallsp != 0 {
- // We're in C code somewhere, traceback from the saved position.
- traceback1(gp.m.libcallpc, gp.m.libcallsp, 0, gp.m.libcallg.ptr(), 0)
- return
- }
- traceback1(pc, sp, lr, gp, _TraceTrap)
-}
-
-func traceback1(pc, sp, lr uintptr, gp *g, flags uint) {
- // If the goroutine is in cgo, and we have a cgo traceback, print that.
- if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 {
- // Lock cgoCallers so that a signal handler won't
- // change it, copy the array, reset it, unlock it.
- // We are locked to the thread and are not running
- // concurrently with a signal handler.
- // We just have to stop a signal handler from interrupting
- // in the middle of our copy.
- atomic.Store(&gp.m.cgoCallersUse, 1)
- cgoCallers := *gp.m.cgoCallers
- gp.m.cgoCallers[0] = 0
- atomic.Store(&gp.m.cgoCallersUse, 0)
-
- printCgoTraceback(&cgoCallers)
- }
-
- if readgstatus(gp)&^_Gscan == _Gsyscall {
- // Override registers if blocked in system call.
- pc = gp.syscallpc
- sp = gp.syscallsp
- flags &^= _TraceTrap
- }
- if gp.m != nil && gp.m.vdsoSP != 0 {
- // Override registers if running in VDSO. This comes after the
- // _Gsyscall check to cover VDSO calls after entersyscall.
- pc = gp.m.vdsoPC
- sp = gp.m.vdsoSP
- flags &^= _TraceTrap
- }
-
- // Print traceback. By default, omits runtime frames.
- // If that means we print nothing at all, repeat forcing all frames printed.
- n := gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags)
- if n == 0 && (flags&_TraceRuntimeFrames) == 0 {
- n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags|_TraceRuntimeFrames)
- }
- if n == _TracebackMaxFrames {
- print("...additional frames elided...\n")
- }
- printcreatedby(gp)
-
- if gp.ancestors == nil {
- return
- }
- for _, ancestor := range *gp.ancestors {
- printAncestorTraceback(ancestor)
- }
-}
-
-// printAncestorTraceback prints the traceback of the given ancestor.
-// TODO: Unify this with gentraceback and CallersFrames.
-func printAncestorTraceback(ancestor ancestorInfo) {
- print("[originating from goroutine ", ancestor.goid, "]:\n")
- for fidx, pc := range ancestor.pcs {
- f := findfunc(pc) // f previously validated
- if showfuncinfo(f, fidx == 0, funcID_normal, funcID_normal) {
- printAncestorTracebackFuncInfo(f, pc)
- }
- }
- if len(ancestor.pcs) == _TracebackMaxFrames {
- print("...additional frames elided...\n")
- }
- // Show what created goroutine, except main goroutine (goid 1).
- f := findfunc(ancestor.gopc)
- if f.valid() && showfuncinfo(f, false, funcID_normal, funcID_normal) && ancestor.goid != 1 {
- printcreatedby1(f, ancestor.gopc)
- }
-}
-
-// printAncestorTraceback prints the given function info at a given pc
-// within an ancestor traceback. The precision of this info is reduced
-// due to only have access to the pcs at the time of the caller
-// goroutine being created.
-func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) {
- name := funcname(f)
- if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
- inltree := (*[1 << 20]inlinedCall)(inldata)
- ix := pcdatavalue(f, _PCDATA_InlTreeIndex, pc, nil)
- if ix >= 0 {
- name = funcnameFromNameoff(f, inltree[ix].func_)
- }
- }
- file, line := funcline(f, pc)
- if name == "runtime.gopanic" {
- name = "panic"
- }
- print(name, "(...)\n")
- print("\t", file, ":", line)
- if pc > f.entry() {
- print(" +", hex(pc-f.entry()))
- }
- print("\n")
-}
-
-func callers(skip int, pcbuf []uintptr) int {
- sp := getcallersp()
- pc := getcallerpc()
- gp := getg()
- var n int
- systemstack(func() {
- n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0)
- })
- return n
-}
-
-func gcallers(gp *g, skip int, pcbuf []uintptr) int {
- return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0)
-}
-
-// showframe reports whether the frame with the given characteristics should
-// be printed during a traceback.
-func showframe(f funcInfo, gp *g, firstFrame bool, funcID, childID funcID) bool {
- g := getg()
- if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
- return true
- }
- return showfuncinfo(f, firstFrame, funcID, childID)
-}
-
-// showfuncinfo reports whether a function with the given characteristics should
-// be printed during a traceback.
-func showfuncinfo(f funcInfo, firstFrame bool, funcID, childID funcID) bool {
- // Note that f may be a synthesized funcInfo for an inlined
- // function, in which case only nameoff and funcID are set.
-
- level, _, _ := gotraceback()
- if level > 1 {
- // Show all frames.
- return true
- }
-
- if !f.valid() {
- return false
- }
-
- if funcID == funcID_wrapper && elideWrapperCalling(childID) {
- return false
- }
-
- name := funcname(f)
-
- // Special case: always show runtime.gopanic frame
- // in the middle of a stack trace, so that we can
- // see the boundary between ordinary code and
- // panic-induced deferred code.
- // See golang.org/issue/5832.
- if name == "runtime.gopanic" && !firstFrame {
- return true
- }
-
- return bytealg.IndexByteString(name, '.') >= 0 && (!hasPrefix(name, "runtime.") || isExportedRuntime(name))
-}
-
-// isExportedRuntime reports whether name is an exported runtime function.
-// It is only for runtime functions, so ASCII A-Z is fine.
-func isExportedRuntime(name string) bool {
- const n = len("runtime.")
- return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z'
-}
-
-// elideWrapperCalling reports whether a wrapper function that called
-// function id should be elided from stack traces.
-func elideWrapperCalling(id funcID) bool {
- // If the wrapper called a panic function instead of the
- // wrapped function, we want to include it in stacks.
- return !(id == funcID_gopanic || id == funcID_sigpanic || id == funcID_panicwrap)
-}
-
-var gStatusStrings = [...]string{
- _Gidle: "idle",
- _Grunnable: "runnable",
- _Grunning: "running",
- _Gsyscall: "syscall",
- _Gwaiting: "waiting",
- _Gdead: "dead",
- _Gcopystack: "copystack",
- _Gpreempted: "preempted",
-}
-
-func goroutineheader(gp *g) {
- gpstatus := readgstatus(gp)
-
- isScan := gpstatus&_Gscan != 0
- gpstatus &^= _Gscan // drop the scan bit
-
- // Basic string status
- var status string
- if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) {
- status = gStatusStrings[gpstatus]
- } else {
- status = "???"
- }
-
- // Override.
- if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero {
- status = gp.waitreason.String()
- }
-
- // approx time the G is blocked, in minutes
- var waitfor int64
- if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
- waitfor = (nanotime() - gp.waitsince) / 60e9
- }
- print("goroutine ", gp.goid, " [", status)
- if isScan {
- print(" (scan)")
- }
- if waitfor >= 1 {
- print(", ", waitfor, " minutes")
- }
- if gp.lockedm != 0 {
- print(", locked to thread")
- }
- print("]:\n")
-}
-
-func tracebackothers(me *g) {
- level, _, _ := gotraceback()
-
- // Show the current goroutine first, if we haven't already.
- curgp := getg().m.curg
- if curgp != nil && curgp != me {
- print("\n")
- goroutineheader(curgp)
- traceback(^uintptr(0), ^uintptr(0), 0, curgp)
- }
-
- // We can't call locking forEachG here because this may be during fatal
- // throw/panic, where locking could be out-of-order or a direct
- // deadlock.
- //
- // Instead, use forEachGRace, which requires no locking. We don't lock
- // against concurrent creation of new Gs, but even with allglock we may
- // miss Gs created after this loop.
- forEachGRace(func(gp *g) {
- if gp == me || gp == curgp || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 {
- return
- }
- print("\n")
- goroutineheader(gp)
- // Note: gp.m == g.m occurs when tracebackothers is
- // called from a signal handler initiated during a
- // systemstack call. The original G is still in the
- // running state, and we want to print its stack.
- if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning {
- print("\tgoroutine running on other thread; stack unavailable\n")
- printcreatedby(gp)
- } else {
- traceback(^uintptr(0), ^uintptr(0), 0, gp)
- }
- })
-}
-
-// tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
-// for debugging purposes. If the address bad is included in the
-// hexdumped range, it will mark it as well.
-func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
- const expand = 32 * goarch.PtrSize
- const maxExpand = 256 * goarch.PtrSize
- // Start around frame.sp.
- lo, hi := frame.sp, frame.sp
- // Expand to include frame.fp.
- if frame.fp != 0 && frame.fp < lo {
- lo = frame.fp
- }
- if frame.fp != 0 && frame.fp > hi {
- hi = frame.fp
- }
- // Expand a bit more.
- lo, hi = lo-expand, hi+expand
- // But don't go too far from frame.sp.
- if lo < frame.sp-maxExpand {
- lo = frame.sp - maxExpand
- }
- if hi > frame.sp+maxExpand {
- hi = frame.sp + maxExpand
- }
- // And don't go outside the stack bounds.
- if lo < stk.lo {
- lo = stk.lo
- }
- if hi > stk.hi {
- hi = stk.hi
- }
-
- // Print the hex dump.
- print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n")
- hexdumpWords(lo, hi, func(p uintptr) byte {
- switch p {
- case frame.fp:
- return '>'
- case frame.sp:
- return '<'
- case bad:
- return '!'
- }
- return 0
- })
-}
-
-// isSystemGoroutine reports whether the goroutine g must be omitted
-// in stack dumps and deadlock detector. This is any goroutine that
-// starts at a runtime.* entry point, except for runtime.main,
-// runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq.
-//
-// If fixed is true, any goroutine that can vary between user and
-// system (that is, the finalizer goroutine) is considered a user
-// goroutine.
-func isSystemGoroutine(gp *g, fixed bool) bool {
- // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine.
- f := findfunc(gp.startpc)
- if !f.valid() {
- return false
- }
- if f.funcID == funcID_runtime_main || f.funcID == funcID_handleAsyncEvent {
- return false
- }
- if f.funcID == funcID_runfinq {
- // We include the finalizer goroutine if it's calling
- // back into user code.
- if fixed {
- // This goroutine can vary. In fixed mode,
- // always consider it a user goroutine.
- return false
- }
- return !fingRunning
- }
- return hasPrefix(funcname(f), "runtime.")
-}
-
-// SetCgoTraceback records three C functions to use to gather
-// traceback information from C code and to convert that traceback
-// information into symbolic information. These are used when printing
-// stack traces for a program that uses cgo.
-//
-// The traceback and context functions may be called from a signal
-// handler, and must therefore use only async-signal safe functions.
-// The symbolizer function may be called while the program is
-// crashing, and so must be cautious about using memory. None of the
-// functions may call back into Go.
-//
-// The context function will be called with a single argument, a
-// pointer to a struct:
-//
-// struct {
-// Context uintptr
-// }
-//
-// In C syntax, this struct will be
-//
-// struct {
-// uintptr_t Context;
-// };
-//
-// If the Context field is 0, the context function is being called to
-// record the current traceback context. It should record in the
-// Context field whatever information is needed about the current
-// point of execution to later produce a stack trace, probably the
-// stack pointer and PC. In this case the context function will be
-// called from C code.
-//
-// If the Context field is not 0, then it is a value returned by a
-// previous call to the context function. This case is called when the
-// context is no longer needed; that is, when the Go code is returning
-// to its C code caller. This permits the context function to release
-// any associated resources.
-//
-// While it would be correct for the context function to record a
-// complete a stack trace whenever it is called, and simply copy that
-// out in the traceback function, in a typical program the context
-// function will be called many times without ever recording a
-// traceback for that context. Recording a complete stack trace in a
-// call to the context function is likely to be inefficient.
-//
-// The traceback function will be called with a single argument, a
-// pointer to a struct:
-//
-// struct {
-// Context uintptr
-// SigContext uintptr
-// Buf *uintptr
-// Max uintptr
-// }
-//
-// In C syntax, this struct will be
-//
-// struct {
-// uintptr_t Context;
-// uintptr_t SigContext;
-// uintptr_t* Buf;
-// uintptr_t Max;
-// };
-//
-// The Context field will be zero to gather a traceback from the
-// current program execution point. In this case, the traceback
-// function will be called from C code.
-//
-// Otherwise Context will be a value previously returned by a call to
-// the context function. The traceback function should gather a stack
-// trace from that saved point in the program execution. The traceback
-// function may be called from an execution thread other than the one
-// that recorded the context, but only when the context is known to be
-// valid and unchanging. The traceback function may also be called
-// deeper in the call stack on the same thread that recorded the
-// context. The traceback function may be called multiple times with
-// the same Context value; it will usually be appropriate to cache the
-// result, if possible, the first time this is called for a specific
-// context value.
-//
-// If the traceback function is called from a signal handler on a Unix
-// system, SigContext will be the signal context argument passed to
-// the signal handler (a C ucontext_t* cast to uintptr_t). This may be
-// used to start tracing at the point where the signal occurred. If
-// the traceback function is not called from a signal handler,
-// SigContext will be zero.
-//
-// Buf is where the traceback information should be stored. It should
-// be PC values, such that Buf[0] is the PC of the caller, Buf[1] is
-// the PC of that function's caller, and so on. Max is the maximum
-// number of entries to store. The function should store a zero to
-// indicate the top of the stack, or that the caller is on a different
-// stack, presumably a Go stack.
-//
-// Unlike runtime.Callers, the PC values returned should, when passed
-// to the symbolizer function, return the file/line of the call
-// instruction. No additional subtraction is required or appropriate.
-//
-// On all platforms, the traceback function is invoked when a call from
-// Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le,
-// and freebsd/amd64, the traceback function is also invoked when a
-// signal is received by a thread that is executing a cgo call. The
-// traceback function should not make assumptions about when it is
-// called, as future versions of Go may make additional calls.
-//
-// The symbolizer function will be called with a single argument, a
-// pointer to a struct:
-//
-// struct {
-// PC uintptr // program counter to fetch information for
-// File *byte // file name (NUL terminated)
-// Lineno uintptr // line number
-// Func *byte // function name (NUL terminated)
-// Entry uintptr // function entry point
-// More uintptr // set non-zero if more info for this PC
-// Data uintptr // unused by runtime, available for function
-// }
-//
-// In C syntax, this struct will be
-//
-// struct {
-// uintptr_t PC;
-// char* File;
-// uintptr_t Lineno;
-// char* Func;
-// uintptr_t Entry;
-// uintptr_t More;
-// uintptr_t Data;
-// };
-//
-// The PC field will be a value returned by a call to the traceback
-// function.
-//
-// The first time the function is called for a particular traceback,
-// all the fields except PC will be 0. The function should fill in the
-// other fields if possible, setting them to 0/nil if the information
-// is not available. The Data field may be used to store any useful
-// information across calls. The More field should be set to non-zero
-// if there is more information for this PC, zero otherwise. If More
-// is set non-zero, the function will be called again with the same
-// PC, and may return different information (this is intended for use
-// with inlined functions). If More is zero, the function will be
-// called with the next PC value in the traceback. When the traceback
-// is complete, the function will be called once more with PC set to
-// zero; this may be used to free any information. Each call will
-// leave the fields of the struct set to the same values they had upon
-// return, except for the PC field when the More field is zero. The
-// function must not keep a copy of the struct pointer between calls.
-//
-// When calling SetCgoTraceback, the version argument is the version
-// number of the structs that the functions expect to receive.
-// Currently this must be zero.
-//
-// The symbolizer function may be nil, in which case the results of
-// the traceback function will be displayed as numbers. If the
-// traceback function is nil, the symbolizer function will never be
-// called. The context function may be nil, in which case the
-// traceback function will only be called with the context field set
-// to zero. If the context function is nil, then calls from Go to C
-// to Go will not show a traceback for the C portion of the call stack.
-//
-// SetCgoTraceback should be called only once, ideally from an init function.
-func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) {
- if version != 0 {
- panic("unsupported version")
- }
-
- if cgoTraceback != nil && cgoTraceback != traceback ||
- cgoContext != nil && cgoContext != context ||
- cgoSymbolizer != nil && cgoSymbolizer != symbolizer {
- panic("call SetCgoTraceback only once")
- }
-
- cgoTraceback = traceback
- cgoContext = context
- cgoSymbolizer = symbolizer
-
- // The context function is called when a C function calls a Go
- // function. As such it is only called by C code in runtime/cgo.
- if _cgo_set_context_function != nil {
- cgocall(_cgo_set_context_function, context)
- }
-}
-
-var cgoTraceback unsafe.Pointer
-var cgoContext unsafe.Pointer
-var cgoSymbolizer unsafe.Pointer
-
-// cgoTracebackArg is the type passed to cgoTraceback.
-type cgoTracebackArg struct {
- context uintptr
- sigContext uintptr
- buf *uintptr
- max uintptr
-}
-
-// cgoContextArg is the type passed to the context function.
-type cgoContextArg struct {
- context uintptr
-}
-
-// cgoSymbolizerArg is the type passed to cgoSymbolizer.
-type cgoSymbolizerArg struct {
- pc uintptr
- file *byte
- lineno uintptr
- funcName *byte
- entry uintptr
- more uintptr
- data uintptr
-}
-
-// cgoTraceback prints a traceback of callers.
-func printCgoTraceback(callers *cgoCallers) {
- if cgoSymbolizer == nil {
- for _, c := range callers {
- if c == 0 {
- break
- }
- print("non-Go function at pc=", hex(c), "\n")
- }
- return
- }
-
- var arg cgoSymbolizerArg
- for _, c := range callers {
- if c == 0 {
- break
- }
- printOneCgoTraceback(c, 0x7fffffff, &arg)
- }
- arg.pc = 0
- callCgoSymbolizer(&arg)
-}
-
-// printOneCgoTraceback prints the traceback of a single cgo caller.
-// This can print more than one line because of inlining.
-// Returns the number of frames printed.
-func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int {
- c := 0
- arg.pc = pc
- for c <= max {
- callCgoSymbolizer(arg)
- if arg.funcName != nil {
- // Note that we don't print any argument
- // information here, not even parentheses.
- // The symbolizer must add that if appropriate.
- println(gostringnocopy(arg.funcName))
- } else {
- println("non-Go function")
- }
- print("\t")
- if arg.file != nil {
- print(gostringnocopy(arg.file), ":", arg.lineno, " ")
- }
- print("pc=", hex(pc), "\n")
- c++
- if arg.more == 0 {
- break
- }
- }
- return c
-}
-
-// callCgoSymbolizer calls the cgoSymbolizer function.
-func callCgoSymbolizer(arg *cgoSymbolizerArg) {
- call := cgocall
- if panicking > 0 || getg().m.curg != getg() {
- // We do not want to call into the scheduler when panicking
- // or when on the system stack.
- call = asmcgocall
- }
- if msanenabled {
- msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
- }
- if asanenabled {
- asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
- }
- call(cgoSymbolizer, noescape(unsafe.Pointer(arg)))
-}
-
-// cgoContextPCs gets the PC values from a cgo traceback.
-func cgoContextPCs(ctxt uintptr, buf []uintptr) {
- if cgoTraceback == nil {
- return
- }
- call := cgocall
- if panicking > 0 || getg().m.curg != getg() {
- // We do not want to call into the scheduler when panicking
- // or when on the system stack.
- call = asmcgocall
- }
- arg := cgoTracebackArg{
- context: ctxt,
- buf: (*uintptr)(noescape(unsafe.Pointer(&buf[0]))),
- max: uintptr(len(buf)),
- }
- if msanenabled {
- msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
- }
- if asanenabled {
- asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
- }
- call(cgoTraceback, noescape(unsafe.Pointer(&arg)))
-}
diff --git a/contrib/go/_std_1.18/src/runtime/type.go b/contrib/go/_std_1.18/src/runtime/type.go
deleted file mode 100644
index da47147897..0000000000
--- a/contrib/go/_std_1.18/src/runtime/type.go
+++ /dev/null
@@ -1,708 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Runtime type representation.
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-// tflag is documented in reflect/type.go.
-//
-// tflag values must be kept in sync with copies in:
-// cmd/compile/internal/reflectdata/reflect.go
-// cmd/link/internal/ld/decodesym.go
-// reflect/type.go
-// internal/reflectlite/type.go
-type tflag uint8
-
-const (
- tflagUncommon tflag = 1 << 0
- tflagExtraStar tflag = 1 << 1
- tflagNamed tflag = 1 << 2
- tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
-)
-
-// Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize,
-// ../cmd/compile/internal/reflectdata/reflect.go:/^func.dcommontype and
-// ../reflect/type.go:/^type.rtype.
-// ../internal/reflectlite/type.go:/^type.rtype.
-type _type struct {
- size uintptr
- ptrdata uintptr // size of memory prefix holding all pointers
- hash uint32
- tflag tflag
- align uint8
- fieldAlign uint8
- kind uint8
- // function for comparing objects of this type
- // (ptr to object A, ptr to object B) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer) bool
- // gcdata stores the GC type data for the garbage collector.
- // If the KindGCProg bit is set in kind, gcdata is a GC program.
- // Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
- gcdata *byte
- str nameOff
- ptrToThis typeOff
-}
-
-func (t *_type) string() string {
- s := t.nameOff(t.str).name()
- if t.tflag&tflagExtraStar != 0 {
- return s[1:]
- }
- return s
-}
-
-func (t *_type) uncommon() *uncommontype {
- if t.tflag&tflagUncommon == 0 {
- return nil
- }
- switch t.kind & kindMask {
- case kindStruct:
- type u struct {
- structtype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindPtr:
- type u struct {
- ptrtype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindFunc:
- type u struct {
- functype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindSlice:
- type u struct {
- slicetype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindArray:
- type u struct {
- arraytype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindChan:
- type u struct {
- chantype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindMap:
- type u struct {
- maptype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- case kindInterface:
- type u struct {
- interfacetype
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- default:
- type u struct {
- _type
- u uncommontype
- }
- return &(*u)(unsafe.Pointer(t)).u
- }
-}
-
-func (t *_type) name() string {
- if t.tflag&tflagNamed == 0 {
- return ""
- }
- s := t.string()
- i := len(s) - 1
- for i >= 0 && s[i] != '.' {
- i--
- }
- return s[i+1:]
-}
-
-// pkgpath returns the path of the package where t was defined, if
-// available. This is not the same as the reflect package's PkgPath
-// method, in that it returns the package path for struct and interface
-// types, not just named types.
-func (t *_type) pkgpath() string {
- if u := t.uncommon(); u != nil {
- return t.nameOff(u.pkgpath).name()
- }
- switch t.kind & kindMask {
- case kindStruct:
- st := (*structtype)(unsafe.Pointer(t))
- return st.pkgPath.name()
- case kindInterface:
- it := (*interfacetype)(unsafe.Pointer(t))
- return it.pkgpath.name()
- }
- return ""
-}
-
-// reflectOffs holds type offsets defined at run time by the reflect package.
-//
-// When a type is defined at run time, its *rtype data lives on the heap.
-// There are a wide range of possible addresses the heap may use, that
-// may not be representable as a 32-bit offset. Moreover the GC may
-// one day start moving heap memory, in which case there is no stable
-// offset that can be defined.
-//
-// To provide stable offsets, we add pin *rtype objects in a global map
-// and treat the offset as an identifier. We use negative offsets that
-// do not overlap with any compile-time module offsets.
-//
-// Entries are created by reflect.addReflectOff.
-var reflectOffs struct {
- lock mutex
- next int32
- m map[int32]unsafe.Pointer
- minv map[unsafe.Pointer]int32
-}
-
-func reflectOffsLock() {
- lock(&reflectOffs.lock)
- if raceenabled {
- raceacquire(unsafe.Pointer(&reflectOffs.lock))
- }
-}
-
-func reflectOffsUnlock() {
- if raceenabled {
- racerelease(unsafe.Pointer(&reflectOffs.lock))
- }
- unlock(&reflectOffs.lock)
-}
-
-func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
- if off == 0 {
- return name{}
- }
- base := uintptr(ptrInModule)
- for md := &firstmoduledata; md != nil; md = md.next {
- if base >= md.types && base < md.etypes {
- res := md.types + uintptr(off)
- if res > md.etypes {
- println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
- throw("runtime: name offset out of range")
- }
- return name{(*byte)(unsafe.Pointer(res))}
- }
- }
-
- // No module found. see if it is a run time name.
- reflectOffsLock()
- res, found := reflectOffs.m[int32(off)]
- reflectOffsUnlock()
- if !found {
- println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
- for next := &firstmoduledata; next != nil; next = next.next {
- println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
- }
- throw("runtime: name offset base pointer out of range")
- }
- return name{(*byte)(res)}
-}
-
-func (t *_type) nameOff(off nameOff) name {
- return resolveNameOff(unsafe.Pointer(t), off)
-}
-
-func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
- if off == 0 || off == -1 {
- // -1 is the sentinel value for unreachable code.
- // See cmd/link/internal/ld/data.go:relocsym.
- return nil
- }
- base := uintptr(ptrInModule)
- var md *moduledata
- for next := &firstmoduledata; next != nil; next = next.next {
- if base >= next.types && base < next.etypes {
- md = next
- break
- }
- }
- if md == nil {
- reflectOffsLock()
- res := reflectOffs.m[int32(off)]
- reflectOffsUnlock()
- if res == nil {
- println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
- for next := &firstmoduledata; next != nil; next = next.next {
- println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
- }
- throw("runtime: type offset base pointer out of range")
- }
- return (*_type)(res)
- }
- if t := md.typemap[off]; t != nil {
- return t
- }
- res := md.types + uintptr(off)
- if res > md.etypes {
- println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
- throw("runtime: type offset out of range")
- }
- return (*_type)(unsafe.Pointer(res))
-}
-
-func (t *_type) typeOff(off typeOff) *_type {
- return resolveTypeOff(unsafe.Pointer(t), off)
-}
-
-func (t *_type) textOff(off textOff) unsafe.Pointer {
- if off == -1 {
- // -1 is the sentinel value for unreachable code.
- // See cmd/link/internal/ld/data.go:relocsym.
- return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
- }
- base := uintptr(unsafe.Pointer(t))
- var md *moduledata
- for next := &firstmoduledata; next != nil; next = next.next {
- if base >= next.types && base < next.etypes {
- md = next
- break
- }
- }
- if md == nil {
- reflectOffsLock()
- res := reflectOffs.m[int32(off)]
- reflectOffsUnlock()
- if res == nil {
- println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
- for next := &firstmoduledata; next != nil; next = next.next {
- println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
- }
- throw("runtime: text offset base pointer out of range")
- }
- return res
- }
- res := md.textAddr(uint32(off))
- return unsafe.Pointer(res)
-}
-
-func (t *functype) in() []*_type {
- // See funcType in reflect/type.go for details on data layout.
- uadd := uintptr(unsafe.Sizeof(functype{}))
- if t.typ.tflag&tflagUncommon != 0 {
- uadd += unsafe.Sizeof(uncommontype{})
- }
- return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount]
-}
-
-func (t *functype) out() []*_type {
- // See funcType in reflect/type.go for details on data layout.
- uadd := uintptr(unsafe.Sizeof(functype{}))
- if t.typ.tflag&tflagUncommon != 0 {
- uadd += unsafe.Sizeof(uncommontype{})
- }
- outCount := t.outCount & (1<<15 - 1)
- return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
-}
-
-func (t *functype) dotdotdot() bool {
- return t.outCount&(1<<15) != 0
-}
-
-type nameOff int32
-type typeOff int32
-type textOff int32
-
-type method struct {
- name nameOff
- mtyp typeOff
- ifn textOff
- tfn textOff
-}
-
-type uncommontype struct {
- pkgpath nameOff
- mcount uint16 // number of methods
- xcount uint16 // number of exported methods
- moff uint32 // offset from this uncommontype to [mcount]method
- _ uint32 // unused
-}
-
-type imethod struct {
- name nameOff
- ityp typeOff
-}
-
-type interfacetype struct {
- typ _type
- pkgpath name
- mhdr []imethod
-}
-
-type maptype struct {
- typ _type
- key *_type
- elem *_type
- bucket *_type // internal type representing a hash bucket
- // function for hashing keys (ptr to key, seed) -> hash
- hasher func(unsafe.Pointer, uintptr) uintptr
- keysize uint8 // size of key slot
- elemsize uint8 // size of elem slot
- bucketsize uint16 // size of bucket
- flags uint32
-}
-
-// Note: flag values must match those used in the TMAP case
-// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
-func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
- return mt.flags&1 != 0
-}
-func (mt *maptype) indirectelem() bool { // store ptr to elem instead of elem itself
- return mt.flags&2 != 0
-}
-func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
- return mt.flags&4 != 0
-}
-func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite
- return mt.flags&8 != 0
-}
-func (mt *maptype) hashMightPanic() bool { // true if hash function might panic
- return mt.flags&16 != 0
-}
-
-type arraytype struct {
- typ _type
- elem *_type
- slice *_type
- len uintptr
-}
-
-type chantype struct {
- typ _type
- elem *_type
- dir uintptr
-}
-
-type slicetype struct {
- typ _type
- elem *_type
-}
-
-type functype struct {
- typ _type
- inCount uint16
- outCount uint16
-}
-
-type ptrtype struct {
- typ _type
- elem *_type
-}
-
-type structfield struct {
- name name
- typ *_type
- offsetAnon uintptr
-}
-
-func (f *structfield) offset() uintptr {
- return f.offsetAnon >> 1
-}
-
-type structtype struct {
- typ _type
- pkgPath name
- fields []structfield
-}
-
-// name is an encoded type name with optional extra data.
-// See reflect/type.go for details.
-type name struct {
- bytes *byte
-}
-
-func (n name) data(off int) *byte {
- return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off)))
-}
-
-func (n name) isExported() bool {
- return (*n.bytes)&(1<<0) != 0
-}
-
-func (n name) readvarint(off int) (int, int) {
- v := 0
- for i := 0; ; i++ {
- x := *n.data(off + i)
- v += int(x&0x7f) << (7 * i)
- if x&0x80 == 0 {
- return i + 1, v
- }
- }
-}
-
-func (n name) name() (s string) {
- if n.bytes == nil {
- return ""
- }
- i, l := n.readvarint(1)
- if l == 0 {
- return ""
- }
- hdr := (*stringStruct)(unsafe.Pointer(&s))
- hdr.str = unsafe.Pointer(n.data(1 + i))
- hdr.len = l
- return
-}
-
-func (n name) tag() (s string) {
- if *n.data(0)&(1<<1) == 0 {
- return ""
- }
- i, l := n.readvarint(1)
- i2, l2 := n.readvarint(1 + i + l)
- hdr := (*stringStruct)(unsafe.Pointer(&s))
- hdr.str = unsafe.Pointer(n.data(1 + i + l + i2))
- hdr.len = l2
- return
-}
-
-func (n name) pkgPath() string {
- if n.bytes == nil || *n.data(0)&(1<<2) == 0 {
- return ""
- }
- i, l := n.readvarint(1)
- off := 1 + i + l
- if *n.data(0)&(1<<1) != 0 {
- i2, l2 := n.readvarint(off)
- off += i2 + l2
- }
- var nameOff nameOff
- copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:])
- pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff)
- return pkgPathName.name()
-}
-
-func (n name) isBlank() bool {
- if n.bytes == nil {
- return false
- }
- _, l := n.readvarint(1)
- return l == 1 && *n.data(2) == '_'
-}
-
-// typelinksinit scans the types from extra modules and builds the
-// moduledata typemap used to de-duplicate type pointers.
-func typelinksinit() {
- if firstmoduledata.next == nil {
- return
- }
- typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
-
- modules := activeModules()
- prev := modules[0]
- for _, md := range modules[1:] {
- // Collect types from the previous module into typehash.
- collect:
- for _, tl := range prev.typelinks {
- var t *_type
- if prev.typemap == nil {
- t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
- } else {
- t = prev.typemap[typeOff(tl)]
- }
- // Add to typehash if not seen before.
- tlist := typehash[t.hash]
- for _, tcur := range tlist {
- if tcur == t {
- continue collect
- }
- }
- typehash[t.hash] = append(tlist, t)
- }
-
- if md.typemap == nil {
- // If any of this module's typelinks match a type from a
- // prior module, prefer that prior type by adding the offset
- // to this module's typemap.
- tm := make(map[typeOff]*_type, len(md.typelinks))
- pinnedTypemaps = append(pinnedTypemaps, tm)
- md.typemap = tm
- for _, tl := range md.typelinks {
- t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
- for _, candidate := range typehash[t.hash] {
- seen := map[_typePair]struct{}{}
- if typesEqual(t, candidate, seen) {
- t = candidate
- break
- }
- }
- md.typemap[typeOff(tl)] = t
- }
- }
-
- prev = md
- }
-}
-
-type _typePair struct {
- t1 *_type
- t2 *_type
-}
-
-// typesEqual reports whether two types are equal.
-//
-// Everywhere in the runtime and reflect packages, it is assumed that
-// there is exactly one *_type per Go type, so that pointer equality
-// can be used to test if types are equal. There is one place that
-// breaks this assumption: buildmode=shared. In this case a type can
-// appear as two different pieces of memory. This is hidden from the
-// runtime and reflect package by the per-module typemap built in
-// typelinksinit. It uses typesEqual to map types from later modules
-// back into earlier ones.
-//
-// Only typelinksinit needs this function.
-func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
- tp := _typePair{t, v}
- if _, ok := seen[tp]; ok {
- return true
- }
-
- // mark these types as seen, and thus equivalent which prevents an infinite loop if
- // the two types are identical, but recursively defined and loaded from
- // different modules
- seen[tp] = struct{}{}
-
- if t == v {
- return true
- }
- kind := t.kind & kindMask
- if kind != v.kind&kindMask {
- return false
- }
- if t.string() != v.string() {
- return false
- }
- ut := t.uncommon()
- uv := v.uncommon()
- if ut != nil || uv != nil {
- if ut == nil || uv == nil {
- return false
- }
- pkgpatht := t.nameOff(ut.pkgpath).name()
- pkgpathv := v.nameOff(uv.pkgpath).name()
- if pkgpatht != pkgpathv {
- return false
- }
- }
- if kindBool <= kind && kind <= kindComplex128 {
- return true
- }
- switch kind {
- case kindString, kindUnsafePointer:
- return true
- case kindArray:
- at := (*arraytype)(unsafe.Pointer(t))
- av := (*arraytype)(unsafe.Pointer(v))
- return typesEqual(at.elem, av.elem, seen) && at.len == av.len
- case kindChan:
- ct := (*chantype)(unsafe.Pointer(t))
- cv := (*chantype)(unsafe.Pointer(v))
- return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem, seen)
- case kindFunc:
- ft := (*functype)(unsafe.Pointer(t))
- fv := (*functype)(unsafe.Pointer(v))
- if ft.outCount != fv.outCount || ft.inCount != fv.inCount {
- return false
- }
- tin, vin := ft.in(), fv.in()
- for i := 0; i < len(tin); i++ {
- if !typesEqual(tin[i], vin[i], seen) {
- return false
- }
- }
- tout, vout := ft.out(), fv.out()
- for i := 0; i < len(tout); i++ {
- if !typesEqual(tout[i], vout[i], seen) {
- return false
- }
- }
- return true
- case kindInterface:
- it := (*interfacetype)(unsafe.Pointer(t))
- iv := (*interfacetype)(unsafe.Pointer(v))
- if it.pkgpath.name() != iv.pkgpath.name() {
- return false
- }
- if len(it.mhdr) != len(iv.mhdr) {
- return false
- }
- for i := range it.mhdr {
- tm := &it.mhdr[i]
- vm := &iv.mhdr[i]
- // Note the mhdr array can be relocated from
- // another module. See #17724.
- tname := resolveNameOff(unsafe.Pointer(tm), tm.name)
- vname := resolveNameOff(unsafe.Pointer(vm), vm.name)
- if tname.name() != vname.name() {
- return false
- }
- if tname.pkgPath() != vname.pkgPath() {
- return false
- }
- tityp := resolveTypeOff(unsafe.Pointer(tm), tm.ityp)
- vityp := resolveTypeOff(unsafe.Pointer(vm), vm.ityp)
- if !typesEqual(tityp, vityp, seen) {
- return false
- }
- }
- return true
- case kindMap:
- mt := (*maptype)(unsafe.Pointer(t))
- mv := (*maptype)(unsafe.Pointer(v))
- return typesEqual(mt.key, mv.key, seen) && typesEqual(mt.elem, mv.elem, seen)
- case kindPtr:
- pt := (*ptrtype)(unsafe.Pointer(t))
- pv := (*ptrtype)(unsafe.Pointer(v))
- return typesEqual(pt.elem, pv.elem, seen)
- case kindSlice:
- st := (*slicetype)(unsafe.Pointer(t))
- sv := (*slicetype)(unsafe.Pointer(v))
- return typesEqual(st.elem, sv.elem, seen)
- case kindStruct:
- st := (*structtype)(unsafe.Pointer(t))
- sv := (*structtype)(unsafe.Pointer(v))
- if len(st.fields) != len(sv.fields) {
- return false
- }
- if st.pkgPath.name() != sv.pkgPath.name() {
- return false
- }
- for i := range st.fields {
- tf := &st.fields[i]
- vf := &sv.fields[i]
- if tf.name.name() != vf.name.name() {
- return false
- }
- if !typesEqual(tf.typ, vf.typ, seen) {
- return false
- }
- if tf.name.tag() != vf.name.tag() {
- return false
- }
- if tf.offsetAnon != vf.offsetAnon {
- return false
- }
- }
- return true
- default:
- println("runtime: impossible type kind", kind)
- throw("runtime: impossible type kind")
- return false
- }
-}
diff --git a/contrib/go/_std_1.18/src/runtime/vdso_elf64.go b/contrib/go/_std_1.18/src/runtime/vdso_elf64.go
deleted file mode 100644
index d46d6f8c34..0000000000
--- a/contrib/go/_std_1.18/src/runtime/vdso_elf64.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64)
-
-package runtime
-
-// ELF64 structure definitions for use by the vDSO loader
-
-type elfSym struct {
- st_name uint32
- st_info byte
- st_other byte
- st_shndx uint16
- st_value uint64
- st_size uint64
-}
-
-type elfVerdef struct {
- vd_version uint16 /* Version revision */
- vd_flags uint16 /* Version information */
- vd_ndx uint16 /* Version Index */
- vd_cnt uint16 /* Number of associated aux entries */
- vd_hash uint32 /* Version name hash value */
- vd_aux uint32 /* Offset in bytes to verdaux array */
- vd_next uint32 /* Offset in bytes to next verdef entry */
-}
-
-type elfEhdr struct {
- e_ident [_EI_NIDENT]byte /* Magic number and other info */
- e_type uint16 /* Object file type */
- e_machine uint16 /* Architecture */
- e_version uint32 /* Object file version */
- e_entry uint64 /* Entry point virtual address */
- e_phoff uint64 /* Program header table file offset */
- e_shoff uint64 /* Section header table file offset */
- e_flags uint32 /* Processor-specific flags */
- e_ehsize uint16 /* ELF header size in bytes */
- e_phentsize uint16 /* Program header table entry size */
- e_phnum uint16 /* Program header table entry count */
- e_shentsize uint16 /* Section header table entry size */
- e_shnum uint16 /* Section header table entry count */
- e_shstrndx uint16 /* Section header string table index */
-}
-
-type elfPhdr struct {
- p_type uint32 /* Segment type */
- p_flags uint32 /* Segment flags */
- p_offset uint64 /* Segment file offset */
- p_vaddr uint64 /* Segment virtual address */
- p_paddr uint64 /* Segment physical address */
- p_filesz uint64 /* Segment size in file */
- p_memsz uint64 /* Segment size in memory */
- p_align uint64 /* Segment alignment */
-}
-
-type elfShdr struct {
- sh_name uint32 /* Section name (string tbl index) */
- sh_type uint32 /* Section type */
- sh_flags uint64 /* Section flags */
- sh_addr uint64 /* Section virtual addr at execution */
- sh_offset uint64 /* Section file offset */
- sh_size uint64 /* Section size in bytes */
- sh_link uint32 /* Link to another section */
- sh_info uint32 /* Additional section information */
- sh_addralign uint64 /* Section alignment */
- sh_entsize uint64 /* Entry size if section holds table */
-}
-
-type elfDyn struct {
- d_tag int64 /* Dynamic entry type */
- d_val uint64 /* Integer value */
-}
-
-type elfVerdaux struct {
- vda_name uint32 /* Version or dependency names */
- vda_next uint32 /* Offset in bytes to next verdaux entry */
-}
diff --git a/contrib/go/_std_1.18/src/runtime/vdso_in_none.go b/contrib/go/_std_1.18/src/runtime/vdso_in_none.go
deleted file mode 100644
index 618bd39b42..0000000000
--- a/contrib/go/_std_1.18/src/runtime/vdso_in_none.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (linux && !386 && !amd64 && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || !linux
-
-package runtime
-
-// A dummy version of inVDSOPage for targets that don't use a VDSO.
-
-func inVDSOPage(pc uintptr) bool {
- return false
-}
diff --git a/contrib/go/_std_1.18/src/runtime/vdso_linux.go b/contrib/go/_std_1.18/src/runtime/vdso_linux.go
deleted file mode 100644
index cff2000767..0000000000
--- a/contrib/go/_std_1.18/src/runtime/vdso_linux.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (386 || amd64 || arm || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64)
-
-package runtime
-
-import "unsafe"
-
-// Look up symbols in the Linux vDSO.
-
-// This code was originally based on the sample Linux vDSO parser at
-// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/vDSO/parse_vdso.c
-
-// This implements the ELF dynamic linking spec at
-// http://sco.com/developers/gabi/latest/ch5.dynamic.html
-
-// The version section is documented at
-// https://refspecs.linuxfoundation.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/symversion.html
-
-const (
- _AT_SYSINFO_EHDR = 33
-
- _PT_LOAD = 1 /* Loadable program segment */
- _PT_DYNAMIC = 2 /* Dynamic linking information */
-
- _DT_NULL = 0 /* Marks end of dynamic section */
- _DT_HASH = 4 /* Dynamic symbol hash table */
- _DT_STRTAB = 5 /* Address of string table */
- _DT_SYMTAB = 6 /* Address of symbol table */
- _DT_GNU_HASH = 0x6ffffef5 /* GNU-style dynamic symbol hash table */
- _DT_VERSYM = 0x6ffffff0
- _DT_VERDEF = 0x6ffffffc
-
- _VER_FLG_BASE = 0x1 /* Version definition of file itself */
-
- _SHN_UNDEF = 0 /* Undefined section */
-
- _SHT_DYNSYM = 11 /* Dynamic linker symbol table */
-
- _STT_FUNC = 2 /* Symbol is a code object */
-
- _STT_NOTYPE = 0 /* Symbol type is not specified */
-
- _STB_GLOBAL = 1 /* Global symbol */
- _STB_WEAK = 2 /* Weak symbol */
-
- _EI_NIDENT = 16
-
- // Maximum indices for the array types used when traversing the vDSO ELF structures.
- // Computed from architecture-specific max provided by vdso_linux_*.go
- vdsoSymTabSize = vdsoArrayMax / unsafe.Sizeof(elfSym{})
- vdsoDynSize = vdsoArrayMax / unsafe.Sizeof(elfDyn{})
- vdsoSymStringsSize = vdsoArrayMax // byte
- vdsoVerSymSize = vdsoArrayMax / 2 // uint16
- vdsoHashSize = vdsoArrayMax / 4 // uint32
-
- // vdsoBloomSizeScale is a scaling factor for gnuhash tables which are uint32 indexed,
- // but contain uintptrs
- vdsoBloomSizeScale = unsafe.Sizeof(uintptr(0)) / 4 // uint32
-)
-
-/* How to extract and insert information held in the st_info field. */
-func _ELF_ST_BIND(val byte) byte { return val >> 4 }
-func _ELF_ST_TYPE(val byte) byte { return val & 0xf }
-
-type vdsoSymbolKey struct {
- name string
- symHash uint32
- gnuHash uint32
- ptr *uintptr
-}
-
-type vdsoVersionKey struct {
- version string
- verHash uint32
-}
-
-type vdsoInfo struct {
- valid bool
-
- /* Load information */
- loadAddr uintptr
- loadOffset uintptr /* loadAddr - recorded vaddr */
-
- /* Symbol table */
- symtab *[vdsoSymTabSize]elfSym
- symstrings *[vdsoSymStringsSize]byte
- chain []uint32
- bucket []uint32
- symOff uint32
- isGNUHash bool
-
- /* Version table */
- versym *[vdsoVerSymSize]uint16
- verdef *elfVerdef
-}
-
-// see vdso_linux_*.go for vdsoSymbolKeys[] and vdso*Sym vars
-
-func vdsoInitFromSysinfoEhdr(info *vdsoInfo, hdr *elfEhdr) {
- info.valid = false
- info.loadAddr = uintptr(unsafe.Pointer(hdr))
-
- pt := unsafe.Pointer(info.loadAddr + uintptr(hdr.e_phoff))
-
- // We need two things from the segment table: the load offset
- // and the dynamic table.
- var foundVaddr bool
- var dyn *[vdsoDynSize]elfDyn
- for i := uint16(0); i < hdr.e_phnum; i++ {
- pt := (*elfPhdr)(add(pt, uintptr(i)*unsafe.Sizeof(elfPhdr{})))
- switch pt.p_type {
- case _PT_LOAD:
- if !foundVaddr {
- foundVaddr = true
- info.loadOffset = info.loadAddr + uintptr(pt.p_offset-pt.p_vaddr)
- }
-
- case _PT_DYNAMIC:
- dyn = (*[vdsoDynSize]elfDyn)(unsafe.Pointer(info.loadAddr + uintptr(pt.p_offset)))
- }
- }
-
- if !foundVaddr || dyn == nil {
- return // Failed
- }
-
- // Fish out the useful bits of the dynamic table.
-
- var hash, gnuhash *[vdsoHashSize]uint32
- info.symstrings = nil
- info.symtab = nil
- info.versym = nil
- info.verdef = nil
- for i := 0; dyn[i].d_tag != _DT_NULL; i++ {
- dt := &dyn[i]
- p := info.loadOffset + uintptr(dt.d_val)
- switch dt.d_tag {
- case _DT_STRTAB:
- info.symstrings = (*[vdsoSymStringsSize]byte)(unsafe.Pointer(p))
- case _DT_SYMTAB:
- info.symtab = (*[vdsoSymTabSize]elfSym)(unsafe.Pointer(p))
- case _DT_HASH:
- hash = (*[vdsoHashSize]uint32)(unsafe.Pointer(p))
- case _DT_GNU_HASH:
- gnuhash = (*[vdsoHashSize]uint32)(unsafe.Pointer(p))
- case _DT_VERSYM:
- info.versym = (*[vdsoVerSymSize]uint16)(unsafe.Pointer(p))
- case _DT_VERDEF:
- info.verdef = (*elfVerdef)(unsafe.Pointer(p))
- }
- }
-
- if info.symstrings == nil || info.symtab == nil || (hash == nil && gnuhash == nil) {
- return // Failed
- }
-
- if info.verdef == nil {
- info.versym = nil
- }
-
- if gnuhash != nil {
- // Parse the GNU hash table header.
- nbucket := gnuhash[0]
- info.symOff = gnuhash[1]
- bloomSize := gnuhash[2]
- info.bucket = gnuhash[4+bloomSize*uint32(vdsoBloomSizeScale):][:nbucket]
- info.chain = gnuhash[4+bloomSize*uint32(vdsoBloomSizeScale)+nbucket:]
- info.isGNUHash = true
- } else {
- // Parse the hash table header.
- nbucket := hash[0]
- nchain := hash[1]
- info.bucket = hash[2 : 2+nbucket]
- info.chain = hash[2+nbucket : 2+nbucket+nchain]
- }
-
- // That's all we need.
- info.valid = true
-}
-
-func vdsoFindVersion(info *vdsoInfo, ver *vdsoVersionKey) int32 {
- if !info.valid {
- return 0
- }
-
- def := info.verdef
- for {
- if def.vd_flags&_VER_FLG_BASE == 0 {
- aux := (*elfVerdaux)(add(unsafe.Pointer(def), uintptr(def.vd_aux)))
- if def.vd_hash == ver.verHash && ver.version == gostringnocopy(&info.symstrings[aux.vda_name]) {
- return int32(def.vd_ndx & 0x7fff)
- }
- }
-
- if def.vd_next == 0 {
- break
- }
- def = (*elfVerdef)(add(unsafe.Pointer(def), uintptr(def.vd_next)))
- }
-
- return -1 // cannot match any version
-}
-
-func vdsoParseSymbols(info *vdsoInfo, version int32) {
- if !info.valid {
- return
- }
-
- apply := func(symIndex uint32, k vdsoSymbolKey) bool {
- sym := &info.symtab[symIndex]
- typ := _ELF_ST_TYPE(sym.st_info)
- bind := _ELF_ST_BIND(sym.st_info)
- // On ppc64x, VDSO functions are of type _STT_NOTYPE.
- if typ != _STT_FUNC && typ != _STT_NOTYPE || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF {
- return false
- }
- if k.name != gostringnocopy(&info.symstrings[sym.st_name]) {
- return false
- }
- // Check symbol version.
- if info.versym != nil && version != 0 && int32(info.versym[symIndex]&0x7fff) != version {
- return false
- }
-
- *k.ptr = info.loadOffset + uintptr(sym.st_value)
- return true
- }
-
- if !info.isGNUHash {
- // Old-style DT_HASH table.
- for _, k := range vdsoSymbolKeys {
- for chain := info.bucket[k.symHash%uint32(len(info.bucket))]; chain != 0; chain = info.chain[chain] {
- if apply(chain, k) {
- break
- }
- }
- }
- return
- }
-
- // New-style DT_GNU_HASH table.
- for _, k := range vdsoSymbolKeys {
- symIndex := info.bucket[k.gnuHash%uint32(len(info.bucket))]
- if symIndex < info.symOff {
- continue
- }
- for ; ; symIndex++ {
- hash := info.chain[symIndex-info.symOff]
- if hash|1 == k.gnuHash|1 {
- // Found a hash match.
- if apply(symIndex, k) {
- break
- }
- }
- if hash&1 != 0 {
- // End of chain.
- break
- }
- }
- }
-}
-
-func vdsoauxv(tag, val uintptr) {
- switch tag {
- case _AT_SYSINFO_EHDR:
- if val == 0 {
- // Something went wrong
- return
- }
- var info vdsoInfo
- // TODO(rsc): I don't understand why the compiler thinks info escapes
- // when passed to the three functions below.
- info1 := (*vdsoInfo)(noescape(unsafe.Pointer(&info)))
- vdsoInitFromSysinfoEhdr(info1, (*elfEhdr)(unsafe.Pointer(val)))
- vdsoParseSymbols(info1, vdsoFindVersion(info1, &vdsoLinuxVersion))
- }
-}
-
-// vdsoMarker reports whether PC is on the VDSO page.
-//go:nosplit
-func inVDSOPage(pc uintptr) bool {
- for _, k := range vdsoSymbolKeys {
- if *k.ptr != 0 {
- page := *k.ptr &^ (physPageSize - 1)
- return pc >= page && pc < page+physPageSize
- }
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/sort/search.go b/contrib/go/_std_1.18/src/sort/search.go
deleted file mode 100644
index fcff0f9491..0000000000
--- a/contrib/go/_std_1.18/src/sort/search.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements binary search.
-
-package sort
-
-// Search uses binary search to find and return the smallest index i
-// in [0, n) at which f(i) is true, assuming that on the range [0, n),
-// f(i) == true implies f(i+1) == true. That is, Search requires that
-// f is false for some (possibly empty) prefix of the input range [0, n)
-// and then true for the (possibly empty) remainder; Search returns
-// the first true index. If there is no such index, Search returns n.
-// (Note that the "not found" return value is not -1 as in, for instance,
-// strings.Index.)
-// Search calls f(i) only for i in the range [0, n).
-//
-// A common use of Search is to find the index i for a value x in
-// a sorted, indexable data structure such as an array or slice.
-// In this case, the argument f, typically a closure, captures the value
-// to be searched for, and how the data structure is indexed and
-// ordered.
-//
-// For instance, given a slice data sorted in ascending order,
-// the call Search(len(data), func(i int) bool { return data[i] >= 23 })
-// returns the smallest index i such that data[i] >= 23. If the caller
-// wants to find whether 23 is in the slice, it must test data[i] == 23
-// separately.
-//
-// Searching data sorted in descending order would use the <=
-// operator instead of the >= operator.
-//
-// To complete the example above, the following code tries to find the value
-// x in an integer slice data sorted in ascending order:
-//
-// x := 23
-// i := sort.Search(len(data), func(i int) bool { return data[i] >= x })
-// if i < len(data) && data[i] == x {
-// // x is present at data[i]
-// } else {
-// // x is not present in data,
-// // but i is the index where it would be inserted.
-// }
-//
-// As a more whimsical example, this program guesses your number:
-//
-// func GuessingGame() {
-// var s string
-// fmt.Printf("Pick an integer from 0 to 100.\n")
-// answer := sort.Search(100, func(i int) bool {
-// fmt.Printf("Is your number <= %d? ", i)
-// fmt.Scanf("%s", &s)
-// return s != "" && s[0] == 'y'
-// })
-// fmt.Printf("Your number is %d.\n", answer)
-// }
-//
-func Search(n int, f func(int) bool) int {
- // Define f(-1) == false and f(n) == true.
- // Invariant: f(i-1) == false, f(j) == true.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if !f(h) {
- i = h + 1 // preserves f(i-1) == false
- } else {
- j = h // preserves f(j) == true
- }
- }
- // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
- return i
-}
-
-// Convenience wrappers for common cases.
-
-// SearchInts searches for x in a sorted slice of ints and returns the index
-// as specified by Search. The return value is the index to insert x if x is
-// not present (it could be len(a)).
-// The slice must be sorted in ascending order.
-//
-func SearchInts(a []int, x int) int {
- return Search(len(a), func(i int) bool { return a[i] >= x })
-}
-
-// SearchFloat64s searches for x in a sorted slice of float64s and returns the index
-// as specified by Search. The return value is the index to insert x if x is not
-// present (it could be len(a)).
-// The slice must be sorted in ascending order.
-//
-func SearchFloat64s(a []float64, x float64) int {
- return Search(len(a), func(i int) bool { return a[i] >= x })
-}
-
-// SearchStrings searches for x in a sorted slice of strings and returns the index
-// as specified by Search. The return value is the index to insert x if x is not
-// present (it could be len(a)).
-// The slice must be sorted in ascending order.
-//
-func SearchStrings(a []string, x string) int {
- return Search(len(a), func(i int) bool { return a[i] >= x })
-}
-
-// Search returns the result of applying SearchInts to the receiver and x.
-func (p IntSlice) Search(x int) int { return SearchInts(p, x) }
-
-// Search returns the result of applying SearchFloat64s to the receiver and x.
-func (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) }
-
-// Search returns the result of applying SearchStrings to the receiver and x.
-func (p StringSlice) Search(x string) int { return SearchStrings(p, x) }
diff --git a/contrib/go/_std_1.18/src/sort/slice.go b/contrib/go/_std_1.18/src/sort/slice.go
deleted file mode 100644
index ba5c2e2f3d..0000000000
--- a/contrib/go/_std_1.18/src/sort/slice.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sort
-
-// Slice sorts the slice x given the provided less function.
-// It panics if x is not a slice.
-//
-// The sort is not guaranteed to be stable: equal elements
-// may be reversed from their original order.
-// For a stable sort, use SliceStable.
-//
-// The less function must satisfy the same requirements as
-// the Interface type's Less method.
-func Slice(x any, less func(i, j int) bool) {
- rv := reflectValueOf(x)
- swap := reflectSwapper(x)
- length := rv.Len()
- quickSort_func(lessSwap{less, swap}, 0, length, maxDepth(length))
-}
-
-// SliceStable sorts the slice x using the provided less
-// function, keeping equal elements in their original order.
-// It panics if x is not a slice.
-//
-// The less function must satisfy the same requirements as
-// the Interface type's Less method.
-func SliceStable(x any, less func(i, j int) bool) {
- rv := reflectValueOf(x)
- swap := reflectSwapper(x)
- stable_func(lessSwap{less, swap}, rv.Len())
-}
-
-// SliceIsSorted reports whether the slice x is sorted according to the provided less function.
-// It panics if x is not a slice.
-func SliceIsSorted(x any, less func(i, j int) bool) bool {
- rv := reflectValueOf(x)
- n := rv.Len()
- for i := n - 1; i > 0; i-- {
- if less(i, i-1) {
- return false
- }
- }
- return true
-}
diff --git a/contrib/go/_std_1.18/src/sort/sort.go b/contrib/go/_std_1.18/src/sort/sort.go
deleted file mode 100644
index 749310764a..0000000000
--- a/contrib/go/_std_1.18/src/sort/sort.go
+++ /dev/null
@@ -1,579 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run genzfunc.go
-
-// Package sort provides primitives for sorting slices and user-defined collections.
-package sort
-
-// An implementation of Interface can be sorted by the routines in this package.
-// The methods refer to elements of the underlying collection by integer index.
-type Interface interface {
- // Len is the number of elements in the collection.
- Len() int
-
- // Less reports whether the element with index i
- // must sort before the element with index j.
- //
- // If both Less(i, j) and Less(j, i) are false,
- // then the elements at index i and j are considered equal.
- // Sort may place equal elements in any order in the final result,
- // while Stable preserves the original input order of equal elements.
- //
- // Less must describe a transitive ordering:
- // - if both Less(i, j) and Less(j, k) are true, then Less(i, k) must be true as well.
- // - if both Less(i, j) and Less(j, k) are false, then Less(i, k) must be false as well.
- //
- // Note that floating-point comparison (the < operator on float32 or float64 values)
- // is not a transitive ordering when not-a-number (NaN) values are involved.
- // See Float64Slice.Less for a correct implementation for floating-point values.
- Less(i, j int) bool
-
- // Swap swaps the elements with indexes i and j.
- Swap(i, j int)
-}
-
-// insertionSort sorts data[a:b] using insertion sort.
-func insertionSort(data Interface, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && data.Less(j, j-1); j-- {
- data.Swap(j, j-1)
- }
- }
-}
-
-// siftDown implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDown(data Interface, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && data.Less(first+child, first+child+1) {
- child++
- }
- if !data.Less(first+root, first+child) {
- return
- }
- data.Swap(first+root, first+child)
- root = child
- }
-}
-
-func heapSort(data Interface, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDown(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data.Swap(first, first+i)
- siftDown(data, lo, i, first)
- }
-}
-
-// Quicksort, loosely following Bentley and McIlroy,
-// ``Engineering a Sort Function,'' SP&E November 1993.
-
-// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
-func medianOfThree(data Interface, m1, m0, m2 int) {
- // sort 3 elements
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- // data[m0] <= data[m1]
- if data.Less(m2, m1) {
- data.Swap(m2, m1)
- // data[m0] <= data[m2] && data[m1] < data[m2]
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- }
- // now data[m0] <= data[m1] <= data[m2]
-}
-
-func swapRange(data Interface, a, b, n int) {
- for i := 0; i < n; i++ {
- data.Swap(a+i, b+i)
- }
-}
-
-func doPivot(data Interface, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
- if hi-lo > 40 {
- // Tukey's ``Ninther,'' median of three medians of three.
- s := (hi - lo) / 8
- medianOfThree(data, lo, lo+s, lo+2*s)
- medianOfThree(data, m, m-s, m+s)
- medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThree(data, lo, m, hi-1)
-
- // Invariants are:
- // data[lo] = pivot (set up by ChoosePivot)
- // data[lo < i < a] < pivot
- // data[a <= i < b] <= pivot
- // data[b <= i < c] unexamined
- // data[c <= i < hi-1] > pivot
- // data[hi-1] >= pivot
- pivot := lo
- a, c := lo+1, hi-1
-
- for ; a < c && data.Less(a, pivot); a++ {
- }
- b := a
- for {
- for ; b < c && !data.Less(pivot, b); b++ { // data[b] <= pivot
- }
- for ; b < c && data.Less(pivot, c-1); c-- { // data[c-1] > pivot
- }
- if b >= c {
- break
- }
- // data[b] > pivot; data[c-1] <= pivot
- data.Swap(b, c-1)
- b++
- c--
- }
- // If hi-c<3 then there are duplicates (by property of median of nine).
- // Let's be a bit more conservative, and set border to 5.
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- // Lets test some points for equality to pivot
- dups := 0
- if !data.Less(pivot, hi-1) { // data[hi-1] = pivot
- data.Swap(c, hi-1)
- c++
- dups++
- }
- if !data.Less(b-1, pivot) { // data[b-1] = pivot
- b--
- dups++
- }
- // m-lo = (hi-lo)/2 > 6
- // b-lo > (hi-lo)*3/4-1 > 8
- // ==> m < b ==> data[m] <= pivot
- if !data.Less(m, pivot) { // data[m] = pivot
- data.Swap(m, b-1)
- b--
- dups++
- }
- // if at least 2 points are equal to pivot, assume skewed distribution
- protect = dups > 1
- }
- if protect {
- // Protect against a lot of duplicates
- // Add invariant:
- // data[a <= i < b] unexamined
- // data[b <= i < c] = pivot
- for {
- for ; a < b && !data.Less(b-1, pivot); b-- { // data[b] == pivot
- }
- for ; a < b && data.Less(a, pivot); a++ { // data[a] < pivot
- }
- if a >= b {
- break
- }
- // data[a] == pivot; data[b-1] < pivot
- data.Swap(a, b-1)
- a++
- b--
- }
- }
- // Swap pivot into middle
- data.Swap(pivot, b-1)
- return b - 1, c
-}
-
-func quickSort(data Interface, a, b, maxDepth int) {
- for b-a > 12 { // Use ShellSort for slices <= 12 elements
- if maxDepth == 0 {
- heapSort(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivot(data, a, b)
- // Avoiding recursion on the larger subproblem guarantees
- // a stack depth of at most lg(b-a).
- if mlo-a < b-mhi {
- quickSort(data, a, mlo, maxDepth)
- a = mhi // i.e., quickSort(data, mhi, b)
- } else {
- quickSort(data, mhi, b, maxDepth)
- b = mlo // i.e., quickSort(data, a, mlo)
- }
- }
- if b-a > 1 {
- // Do ShellSort pass with gap 6
- // It could be written in this simplified form cause b-a <= 12
- for i := a + 6; i < b; i++ {
- if data.Less(i, i-6) {
- data.Swap(i, i-6)
- }
- }
- insertionSort(data, a, b)
- }
-}
-
-// Sort sorts data in ascending order as determined by the Less method.
-// It makes one call to data.Len to determine n and O(n*log(n)) calls to
-// data.Less and data.Swap. The sort is not guaranteed to be stable.
-func Sort(data Interface) {
- n := data.Len()
- quickSort(data, 0, n, maxDepth(n))
-}
-
-// maxDepth returns a threshold at which quicksort should switch
-// to heapsort. It returns 2*ceil(lg(n+1)).
-func maxDepth(n int) int {
- var depth int
- for i := n; i > 0; i >>= 1 {
- depth++
- }
- return depth * 2
-}
-
-// lessSwap is a pair of Less and Swap function for use with the
-// auto-generated func-optimized variant of sort.go in
-// zfuncversion.go.
-type lessSwap struct {
- Less func(i, j int) bool
- Swap func(i, j int)
-}
-
-type reverse struct {
- // This embedded Interface permits Reverse to use the methods of
- // another Interface implementation.
- Interface
-}
-
-// Less returns the opposite of the embedded implementation's Less method.
-func (r reverse) Less(i, j int) bool {
- return r.Interface.Less(j, i)
-}
-
-// Reverse returns the reverse order for data.
-func Reverse(data Interface) Interface {
- return &reverse{data}
-}
-
-// IsSorted reports whether data is sorted.
-func IsSorted(data Interface) bool {
- n := data.Len()
- for i := n - 1; i > 0; i-- {
- if data.Less(i, i-1) {
- return false
- }
- }
- return true
-}
-
-// Convenience types for common cases
-
-// IntSlice attaches the methods of Interface to []int, sorting in increasing order.
-type IntSlice []int
-
-func (x IntSlice) Len() int { return len(x) }
-func (x IntSlice) Less(i, j int) bool { return x[i] < x[j] }
-func (x IntSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-// Sort is a convenience method: x.Sort() calls Sort(x).
-func (x IntSlice) Sort() { Sort(x) }
-
-// Float64Slice implements Interface for a []float64, sorting in increasing order,
-// with not-a-number (NaN) values ordered before other values.
-type Float64Slice []float64
-
-func (x Float64Slice) Len() int { return len(x) }
-
-// Less reports whether x[i] should be ordered before x[j], as required by the sort Interface.
-// Note that floating-point comparison by itself is not a transitive relation: it does not
-// report a consistent ordering for not-a-number (NaN) values.
-// This implementation of Less places NaN values before any others, by using:
-//
-// x[i] < x[j] || (math.IsNaN(x[i]) && !math.IsNaN(x[j]))
-//
-func (x Float64Slice) Less(i, j int) bool { return x[i] < x[j] || (isNaN(x[i]) && !isNaN(x[j])) }
-func (x Float64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-// isNaN is a copy of math.IsNaN to avoid a dependency on the math package.
-func isNaN(f float64) bool {
- return f != f
-}
-
-// Sort is a convenience method: x.Sort() calls Sort(x).
-func (x Float64Slice) Sort() { Sort(x) }
-
-// StringSlice attaches the methods of Interface to []string, sorting in increasing order.
-type StringSlice []string
-
-func (x StringSlice) Len() int { return len(x) }
-func (x StringSlice) Less(i, j int) bool { return x[i] < x[j] }
-func (x StringSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-// Sort is a convenience method: x.Sort() calls Sort(x).
-func (x StringSlice) Sort() { Sort(x) }
-
-// Convenience wrappers for common cases
-
-// Ints sorts a slice of ints in increasing order.
-func Ints(x []int) { Sort(IntSlice(x)) }
-
-// Float64s sorts a slice of float64s in increasing order.
-// Not-a-number (NaN) values are ordered before other values.
-func Float64s(x []float64) { Sort(Float64Slice(x)) }
-
-// Strings sorts a slice of strings in increasing order.
-func Strings(x []string) { Sort(StringSlice(x)) }
-
-// IntsAreSorted reports whether the slice x is sorted in increasing order.
-func IntsAreSorted(x []int) bool { return IsSorted(IntSlice(x)) }
-
-// Float64sAreSorted reports whether the slice x is sorted in increasing order,
-// with not-a-number (NaN) values before any other values.
-func Float64sAreSorted(x []float64) bool { return IsSorted(Float64Slice(x)) }
-
-// StringsAreSorted reports whether the slice x is sorted in increasing order.
-func StringsAreSorted(x []string) bool { return IsSorted(StringSlice(x)) }
-
-// Notes on stable sorting:
-// The used algorithms are simple and provable correct on all input and use
-// only logarithmic additional stack space. They perform well if compared
-// experimentally to other stable in-place sorting algorithms.
-//
-// Remarks on other algorithms evaluated:
-// - GCC's 4.6.3 stable_sort with merge_without_buffer from libstdc++:
-// Not faster.
-// - GCC's __rotate for block rotations: Not faster.
-// - "Practical in-place mergesort" from Jyrki Katajainen, Tomi A. Pasanen
-// and Jukka Teuhola; Nordic Journal of Computing 3,1 (1996), 27-40:
-// The given algorithms are in-place, number of Swap and Assignments
-// grow as n log n but the algorithm is not stable.
-// - "Fast Stable In-Place Sorting with O(n) Data Moves" J.I. Munro and
-// V. Raman in Algorithmica (1996) 16, 115-160:
-// This algorithm either needs additional 2n bits or works only if there
-// are enough different elements available to encode some permutations
-// which have to be undone later (so not stable on any input).
-// - All the optimal in-place sorting/merging algorithms I found are either
-// unstable or rely on enough different elements in each step to encode the
-// performed block rearrangements. See also "In-Place Merging Algorithms",
-// Denham Coates-Evely, Department of Computer Science, Kings College,
-// January 2004 and the references in there.
-// - Often "optimal" algorithms are optimal in the number of assignments
-// but Interface has only Swap as operation.
-
-// Stable sorts data in ascending order as determined by the Less method,
-// while keeping the original order of equal elements.
-//
-// It makes one call to data.Len to determine n, O(n*log(n)) calls to
-// data.Less and O(n*log(n)*log(n)) calls to data.Swap.
-func Stable(data Interface) {
- stable(data, data.Len())
-}
-
-func stable(data Interface, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSort(data, a, b)
- a = b
- b += blockSize
- }
- insertionSort(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMerge(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMerge(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMerge merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMerge(data Interface, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if data.Less(h, a) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data.Swap(k, k+1)
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !data.Less(m, h) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data.Swap(k, k-1)
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !data.Less(p-c, c) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotate(data, start, m, end)
- }
- if a < start && start < mid {
- symMerge(data, a, start, mid)
- }
- if mid < end && end < b {
- symMerge(data, mid, end, b)
- }
-}
-
-// rotate rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotate(data Interface, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRange(data, m-i, m, j)
- i -= j
- } else {
- swapRange(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRange(data, m-i, m, i)
-}
-
-/*
-Complexity of Stable Sorting
-
-
-Complexity of block swapping rotation
-
-Each Swap puts one new element into its correct, final position.
-Elements which reach their final position are no longer moved.
-Thus block swapping rotation needs |u|+|v| calls to Swaps.
-This is best possible as each element might need a move.
-
-Pay attention when comparing to other optimal algorithms which
-typically count the number of assignments instead of swaps:
-E.g. the optimal algorithm of Dudzinski and Dydek for in-place
-rotations uses O(u + v + gcd(u,v)) assignments which is
-better than our O(3 * (u+v)) as gcd(u,v) <= u.
-
-
-Stable sorting by SymMerge and BlockSwap rotations
-
-SymMerg complexity for same size input M = N:
-Calls to Less: O(M*log(N/M+1)) = O(N*log(2)) = O(N)
-Calls to Swap: O((M+N)*log(M)) = O(2*N*log(N)) = O(N*log(N))
-
-(The following argument does not fuzz over a missing -1 or
-other stuff which does not impact the final result).
-
-Let n = data.Len(). Assume n = 2^k.
-
-Plain merge sort performs log(n) = k iterations.
-On iteration i the algorithm merges 2^(k-i) blocks, each of size 2^i.
-
-Thus iteration i of merge sort performs:
-Calls to Less O(2^(k-i) * 2^i) = O(2^k) = O(2^log(n)) = O(n)
-Calls to Swap O(2^(k-i) * 2^i * log(2^i)) = O(2^k * i) = O(n*i)
-
-In total k = log(n) iterations are performed; so in total:
-Calls to Less O(log(n) * n)
-Calls to Swap O(n + 2*n + 3*n + ... + (k-1)*n + k*n)
- = O((k/2) * k * n) = O(n * k^2) = O(n * log^2(n))
-
-
-Above results should generalize to arbitrary n = 2^k + p
-and should not be influenced by the initial insertion sort phase:
-Insertion sort is O(n^2) on Swap and Less, thus O(bs^2) per block of
-size bs at n/bs blocks: O(bs*n) Swaps and Less during insertion sort.
-Merge sort iterations start at i = log(bs). With t = log(bs) constant:
-Calls to Less O((log(n)-t) * n + bs*n) = O(log(n)*n + (bs-t)*n)
- = O(n * log(n))
-Calls to Swap O(n * log^2(n) - (t^2+t)/2*n) = O(n * log^2(n))
-
-*/
diff --git a/contrib/go/_std_1.18/src/sort/zfuncversion.go b/contrib/go/_std_1.18/src/sort/zfuncversion.go
deleted file mode 100644
index 30067cbe07..0000000000
--- a/contrib/go/_std_1.18/src/sort/zfuncversion.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Code generated from sort.go using genzfunc.go; DO NOT EDIT.
-
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sort
-
-// Auto-generated variant of sort.go:insertionSort
-func insertionSort_func(data lessSwap, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && data.Less(j, j-1); j-- {
- data.Swap(j, j-1)
- }
- }
-}
-
-// Auto-generated variant of sort.go:siftDown
-func siftDown_func(data lessSwap, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && data.Less(first+child, first+child+1) {
- child++
- }
- if !data.Less(first+root, first+child) {
- return
- }
- data.Swap(first+root, first+child)
- root = child
- }
-}
-
-// Auto-generated variant of sort.go:heapSort
-func heapSort_func(data lessSwap, a, b int) {
- first := a
- lo := 0
- hi := b - a
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDown_func(data, i, hi, first)
- }
- for i := hi - 1; i >= 0; i-- {
- data.Swap(first, first+i)
- siftDown_func(data, lo, i, first)
- }
-}
-
-// Auto-generated variant of sort.go:medianOfThree
-func medianOfThree_func(data lessSwap, m1, m0, m2 int) {
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- if data.Less(m2, m1) {
- data.Swap(m2, m1)
- if data.Less(m1, m0) {
- data.Swap(m1, m0)
- }
- }
-}
-
-// Auto-generated variant of sort.go:swapRange
-func swapRange_func(data lessSwap, a, b, n int) {
- for i := 0; i < n; i++ {
- data.Swap(a+i, b+i)
- }
-}
-
-// Auto-generated variant of sort.go:doPivot
-func doPivot_func(data lessSwap, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1)
- if hi-lo > 40 {
- s := (hi - lo) / 8
- medianOfThree_func(data, lo, lo+s, lo+2*s)
- medianOfThree_func(data, m, m-s, m+s)
- medianOfThree_func(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThree_func(data, lo, m, hi-1)
- pivot := lo
- a, c := lo+1, hi-1
- for ; a < c && data.Less(a, pivot); a++ {
- }
- b := a
- for {
- for ; b < c && !data.Less(pivot, b); b++ {
- }
- for ; b < c && data.Less(pivot, c-1); c-- {
- }
- if b >= c {
- break
- }
- data.Swap(b, c-1)
- b++
- c--
- }
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- dups := 0
- if !data.Less(pivot, hi-1) {
- data.Swap(c, hi-1)
- c++
- dups++
- }
- if !data.Less(b-1, pivot) {
- b--
- dups++
- }
- if !data.Less(m, pivot) {
- data.Swap(m, b-1)
- b--
- dups++
- }
- protect = dups > 1
- }
- if protect {
- for {
- for ; a < b && !data.Less(b-1, pivot); b-- {
- }
- for ; a < b && data.Less(a, pivot); a++ {
- }
- if a >= b {
- break
- }
- data.Swap(a, b-1)
- a++
- b--
- }
- }
- data.Swap(pivot, b-1)
- return b - 1, c
-}
-
-// Auto-generated variant of sort.go:quickSort
-func quickSort_func(data lessSwap, a, b, maxDepth int) {
- for b-a > 12 {
- if maxDepth == 0 {
- heapSort_func(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivot_func(data, a, b)
- if mlo-a < b-mhi {
- quickSort_func(data, a, mlo, maxDepth)
- a = mhi
- } else {
- quickSort_func(data, mhi, b, maxDepth)
- b = mlo
- }
- }
- if b-a > 1 {
- for i := a + 6; i < b; i++ {
- if data.Less(i, i-6) {
- data.Swap(i, i-6)
- }
- }
- insertionSort_func(data, a, b)
- }
-}
-
-// Auto-generated variant of sort.go:stable
-func stable_func(data lessSwap, n int) {
- blockSize := 20
- a, b := 0, blockSize
- for b <= n {
- insertionSort_func(data, a, b)
- a = b
- b += blockSize
- }
- insertionSort_func(data, a, n)
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMerge_func(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMerge_func(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// Auto-generated variant of sort.go:symMerge
-func symMerge_func(data lessSwap, a, m, b int) {
- if m-a == 1 {
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if data.Less(h, a) {
- i = h + 1
- } else {
- j = h
- }
- }
- for k := a; k < i-1; k++ {
- data.Swap(k, k+1)
- }
- return
- }
- if b-m == 1 {
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !data.Less(m, h) {
- i = h + 1
- } else {
- j = h
- }
- }
- for k := m; k > i; k-- {
- data.Swap(k, k-1)
- }
- return
- }
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
- for start < r {
- c := int(uint(start+r) >> 1)
- if !data.Less(p-c, c) {
- start = c + 1
- } else {
- r = c
- }
- }
- end := n - start
- if start < m && m < end {
- rotate_func(data, start, m, end)
- }
- if a < start && start < mid {
- symMerge_func(data, a, start, mid)
- }
- if mid < end && end < b {
- symMerge_func(data, mid, end, b)
- }
-}
-
-// Auto-generated variant of sort.go:rotate
-func rotate_func(data lessSwap, a, m, b int) {
- i := m - a
- j := b - m
- for i != j {
- if i > j {
- swapRange_func(data, m-i, m, j)
- i -= j
- } else {
- swapRange_func(data, m-i, m+j-i, i)
- j -= i
- }
- }
- swapRange_func(data, m-i, m, i)
-}
diff --git a/contrib/go/_std_1.18/src/strconv/atof.go b/contrib/go/_std_1.18/src/strconv/atof.go
deleted file mode 100644
index 57556c7047..0000000000
--- a/contrib/go/_std_1.18/src/strconv/atof.go
+++ /dev/null
@@ -1,704 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strconv
-
-// decimal to binary floating point conversion.
-// Algorithm:
-// 1) Store input in multiprecision decimal.
-// 2) Multiply/divide decimal by powers of two until in range [0.5, 1)
-// 3) Multiply by 2^precision and round to get mantissa.
-
-import "math"
-
-var optimize = true // set to false to force slow-path conversions for testing
-
-// commonPrefixLenIgnoreCase returns the length of the common
-// prefix of s and prefix, with the character case of s ignored.
-// The prefix argument must be all lower-case.
-func commonPrefixLenIgnoreCase(s, prefix string) int {
- n := len(prefix)
- if n > len(s) {
- n = len(s)
- }
- for i := 0; i < n; i++ {
- c := s[i]
- if 'A' <= c && c <= 'Z' {
- c += 'a' - 'A'
- }
- if c != prefix[i] {
- return i
- }
- }
- return n
-}
-
-// special returns the floating-point value for the special,
-// possibly signed floating-point representations inf, infinity,
-// and NaN. The result is ok if a prefix of s contains one
-// of these representations and n is the length of that prefix.
-// The character case is ignored.
-func special(s string) (f float64, n int, ok bool) {
- if len(s) == 0 {
- return 0, 0, false
- }
- sign := 1
- nsign := 0
- switch s[0] {
- case '+', '-':
- if s[0] == '-' {
- sign = -1
- }
- nsign = 1
- s = s[1:]
- fallthrough
- case 'i', 'I':
- n := commonPrefixLenIgnoreCase(s, "infinity")
- // Anything longer than "inf" is ok, but if we
- // don't have "infinity", only consume "inf".
- if 3 < n && n < 8 {
- n = 3
- }
- if n == 3 || n == 8 {
- return math.Inf(sign), nsign + n, true
- }
- case 'n', 'N':
- if commonPrefixLenIgnoreCase(s, "nan") == 3 {
- return math.NaN(), 3, true
- }
- }
- return 0, 0, false
-}
-
-func (b *decimal) set(s string) (ok bool) {
- i := 0
- b.neg = false
- b.trunc = false
-
- // optional sign
- if i >= len(s) {
- return
- }
- switch {
- case s[i] == '+':
- i++
- case s[i] == '-':
- b.neg = true
- i++
- }
-
- // digits
- sawdot := false
- sawdigits := false
- for ; i < len(s); i++ {
- switch {
- case s[i] == '_':
- // readFloat already checked underscores
- continue
- case s[i] == '.':
- if sawdot {
- return
- }
- sawdot = true
- b.dp = b.nd
- continue
-
- case '0' <= s[i] && s[i] <= '9':
- sawdigits = true
- if s[i] == '0' && b.nd == 0 { // ignore leading zeros
- b.dp--
- continue
- }
- if b.nd < len(b.d) {
- b.d[b.nd] = s[i]
- b.nd++
- } else if s[i] != '0' {
- b.trunc = true
- }
- continue
- }
- break
- }
- if !sawdigits {
- return
- }
- if !sawdot {
- b.dp = b.nd
- }
-
- // optional exponent moves decimal point.
- // if we read a very large, very long number,
- // just be sure to move the decimal point by
- // a lot (say, 100000). it doesn't matter if it's
- // not the exact number.
- if i < len(s) && lower(s[i]) == 'e' {
- i++
- if i >= len(s) {
- return
- }
- esign := 1
- if s[i] == '+' {
- i++
- } else if s[i] == '-' {
- i++
- esign = -1
- }
- if i >= len(s) || s[i] < '0' || s[i] > '9' {
- return
- }
- e := 0
- for ; i < len(s) && ('0' <= s[i] && s[i] <= '9' || s[i] == '_'); i++ {
- if s[i] == '_' {
- // readFloat already checked underscores
- continue
- }
- if e < 10000 {
- e = e*10 + int(s[i]) - '0'
- }
- }
- b.dp += e * esign
- }
-
- if i != len(s) {
- return
- }
-
- ok = true
- return
-}
-
-// readFloat reads a decimal or hexadecimal mantissa and exponent from a float
-// string representation in s; the number may be followed by other characters.
-// readFloat reports the number of bytes consumed (i), and whether the number
-// is valid (ok).
-func readFloat(s string) (mantissa uint64, exp int, neg, trunc, hex bool, i int, ok bool) {
- underscores := false
-
- // optional sign
- if i >= len(s) {
- return
- }
- switch {
- case s[i] == '+':
- i++
- case s[i] == '-':
- neg = true
- i++
- }
-
- // digits
- base := uint64(10)
- maxMantDigits := 19 // 10^19 fits in uint64
- expChar := byte('e')
- if i+2 < len(s) && s[i] == '0' && lower(s[i+1]) == 'x' {
- base = 16
- maxMantDigits = 16 // 16^16 fits in uint64
- i += 2
- expChar = 'p'
- hex = true
- }
- sawdot := false
- sawdigits := false
- nd := 0
- ndMant := 0
- dp := 0
-loop:
- for ; i < len(s); i++ {
- switch c := s[i]; true {
- case c == '_':
- underscores = true
- continue
-
- case c == '.':
- if sawdot {
- break loop
- }
- sawdot = true
- dp = nd
- continue
-
- case '0' <= c && c <= '9':
- sawdigits = true
- if c == '0' && nd == 0 { // ignore leading zeros
- dp--
- continue
- }
- nd++
- if ndMant < maxMantDigits {
- mantissa *= base
- mantissa += uint64(c - '0')
- ndMant++
- } else if c != '0' {
- trunc = true
- }
- continue
-
- case base == 16 && 'a' <= lower(c) && lower(c) <= 'f':
- sawdigits = true
- nd++
- if ndMant < maxMantDigits {
- mantissa *= 16
- mantissa += uint64(lower(c) - 'a' + 10)
- ndMant++
- } else {
- trunc = true
- }
- continue
- }
- break
- }
- if !sawdigits {
- return
- }
- if !sawdot {
- dp = nd
- }
-
- if base == 16 {
- dp *= 4
- ndMant *= 4
- }
-
- // optional exponent moves decimal point.
- // if we read a very large, very long number,
- // just be sure to move the decimal point by
- // a lot (say, 100000). it doesn't matter if it's
- // not the exact number.
- if i < len(s) && lower(s[i]) == expChar {
- i++
- if i >= len(s) {
- return
- }
- esign := 1
- if s[i] == '+' {
- i++
- } else if s[i] == '-' {
- i++
- esign = -1
- }
- if i >= len(s) || s[i] < '0' || s[i] > '9' {
- return
- }
- e := 0
- for ; i < len(s) && ('0' <= s[i] && s[i] <= '9' || s[i] == '_'); i++ {
- if s[i] == '_' {
- underscores = true
- continue
- }
- if e < 10000 {
- e = e*10 + int(s[i]) - '0'
- }
- }
- dp += e * esign
- } else if base == 16 {
- // Must have exponent.
- return
- }
-
- if mantissa != 0 {
- exp = dp - ndMant
- }
-
- if underscores && !underscoreOK(s[:i]) {
- return
- }
-
- ok = true
- return
-}
-
-// decimal power of ten to binary power of two.
-var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26}
-
-func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) {
- var exp int
- var mant uint64
-
- // Zero is always a special case.
- if d.nd == 0 {
- mant = 0
- exp = flt.bias
- goto out
- }
-
- // Obvious overflow/underflow.
- // These bounds are for 64-bit floats.
- // Will have to change if we want to support 80-bit floats in the future.
- if d.dp > 310 {
- goto overflow
- }
- if d.dp < -330 {
- // zero
- mant = 0
- exp = flt.bias
- goto out
- }
-
- // Scale by powers of two until in range [0.5, 1.0)
- exp = 0
- for d.dp > 0 {
- var n int
- if d.dp >= len(powtab) {
- n = 27
- } else {
- n = powtab[d.dp]
- }
- d.Shift(-n)
- exp += n
- }
- for d.dp < 0 || d.dp == 0 && d.d[0] < '5' {
- var n int
- if -d.dp >= len(powtab) {
- n = 27
- } else {
- n = powtab[-d.dp]
- }
- d.Shift(n)
- exp -= n
- }
-
- // Our range is [0.5,1) but floating point range is [1,2).
- exp--
-
- // Minimum representable exponent is flt.bias+1.
- // If the exponent is smaller, move it up and
- // adjust d accordingly.
- if exp < flt.bias+1 {
- n := flt.bias + 1 - exp
- d.Shift(-n)
- exp += n
- }
-
- if exp-flt.bias >= 1<<flt.expbits-1 {
- goto overflow
- }
-
- // Extract 1+flt.mantbits bits.
- d.Shift(int(1 + flt.mantbits))
- mant = d.RoundedInteger()
-
- // Rounding might have added a bit; shift down.
- if mant == 2<<flt.mantbits {
- mant >>= 1
- exp++
- if exp-flt.bias >= 1<<flt.expbits-1 {
- goto overflow
- }
- }
-
- // Denormalized?
- if mant&(1<<flt.mantbits) == 0 {
- exp = flt.bias
- }
- goto out
-
-overflow:
- // ±Inf
- mant = 0
- exp = 1<<flt.expbits - 1 + flt.bias
- overflow = true
-
-out:
- // Assemble bits.
- bits := mant & (uint64(1)<<flt.mantbits - 1)
- bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
- if d.neg {
- bits |= 1 << flt.mantbits << flt.expbits
- }
- return bits, overflow
-}
-
-// Exact powers of 10.
-var float64pow10 = []float64{
- 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
- 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
- 1e20, 1e21, 1e22,
-}
-var float32pow10 = []float32{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10}
-
-// If possible to convert decimal representation to 64-bit float f exactly,
-// entirely in floating-point math, do so, avoiding the expense of decimalToFloatBits.
-// Three common cases:
-// value is exact integer
-// value is exact integer * exact power of ten
-// value is exact integer / exact power of ten
-// These all produce potentially inexact but correctly rounded answers.
-func atof64exact(mantissa uint64, exp int, neg bool) (f float64, ok bool) {
- if mantissa>>float64info.mantbits != 0 {
- return
- }
- f = float64(mantissa)
- if neg {
- f = -f
- }
- switch {
- case exp == 0:
- // an integer.
- return f, true
- // Exact integers are <= 10^15.
- // Exact powers of ten are <= 10^22.
- case exp > 0 && exp <= 15+22: // int * 10^k
- // If exponent is big but number of digits is not,
- // can move a few zeros into the integer part.
- if exp > 22 {
- f *= float64pow10[exp-22]
- exp = 22
- }
- if f > 1e15 || f < -1e15 {
- // the exponent was really too large.
- return
- }
- return f * float64pow10[exp], true
- case exp < 0 && exp >= -22: // int / 10^k
- return f / float64pow10[-exp], true
- }
- return
-}
-
-// If possible to compute mantissa*10^exp to 32-bit float f exactly,
-// entirely in floating-point math, do so, avoiding the machinery above.
-func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) {
- if mantissa>>float32info.mantbits != 0 {
- return
- }
- f = float32(mantissa)
- if neg {
- f = -f
- }
- switch {
- case exp == 0:
- return f, true
- // Exact integers are <= 10^7.
- // Exact powers of ten are <= 10^10.
- case exp > 0 && exp <= 7+10: // int * 10^k
- // If exponent is big but number of digits is not,
- // can move a few zeros into the integer part.
- if exp > 10 {
- f *= float32pow10[exp-10]
- exp = 10
- }
- if f > 1e7 || f < -1e7 {
- // the exponent was really too large.
- return
- }
- return f * float32pow10[exp], true
- case exp < 0 && exp >= -10: // int / 10^k
- return f / float32pow10[-exp], true
- }
- return
-}
-
-// atofHex converts the hex floating-point string s
-// to a rounded float32 or float64 value (depending on flt==&float32info or flt==&float64info)
-// and returns it as a float64.
-// The string s has already been parsed into a mantissa, exponent, and sign (neg==true for negative).
-// If trunc is true, trailing non-zero bits have been omitted from the mantissa.
-func atofHex(s string, flt *floatInfo, mantissa uint64, exp int, neg, trunc bool) (float64, error) {
- maxExp := 1<<flt.expbits + flt.bias - 2
- minExp := flt.bias + 1
- exp += int(flt.mantbits) // mantissa now implicitly divided by 2^mantbits.
-
- // Shift mantissa and exponent to bring representation into float range.
- // Eventually we want a mantissa with a leading 1-bit followed by mantbits other bits.
- // For rounding, we need two more, where the bottom bit represents
- // whether that bit or any later bit was non-zero.
- // (If the mantissa has already lost non-zero bits, trunc is true,
- // and we OR in a 1 below after shifting left appropriately.)
- for mantissa != 0 && mantissa>>(flt.mantbits+2) == 0 {
- mantissa <<= 1
- exp--
- }
- if trunc {
- mantissa |= 1
- }
- for mantissa>>(1+flt.mantbits+2) != 0 {
- mantissa = mantissa>>1 | mantissa&1
- exp++
- }
-
- // If exponent is too negative,
- // denormalize in hopes of making it representable.
- // (The -2 is for the rounding bits.)
- for mantissa > 1 && exp < minExp-2 {
- mantissa = mantissa>>1 | mantissa&1
- exp++
- }
-
- // Round using two bottom bits.
- round := mantissa & 3
- mantissa >>= 2
- round |= mantissa & 1 // round to even (round up if mantissa is odd)
- exp += 2
- if round == 3 {
- mantissa++
- if mantissa == 1<<(1+flt.mantbits) {
- mantissa >>= 1
- exp++
- }
- }
-
- if mantissa>>flt.mantbits == 0 { // Denormal or zero.
- exp = flt.bias
- }
- var err error
- if exp > maxExp { // infinity and range error
- mantissa = 1 << flt.mantbits
- exp = maxExp + 1
- err = rangeError(fnParseFloat, s)
- }
-
- bits := mantissa & (1<<flt.mantbits - 1)
- bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
- if neg {
- bits |= 1 << flt.mantbits << flt.expbits
- }
- if flt == &float32info {
- return float64(math.Float32frombits(uint32(bits))), err
- }
- return math.Float64frombits(bits), err
-}
-
-const fnParseFloat = "ParseFloat"
-
-func atof32(s string) (f float32, n int, err error) {
- if val, n, ok := special(s); ok {
- return float32(val), n, nil
- }
-
- mantissa, exp, neg, trunc, hex, n, ok := readFloat(s)
- if !ok {
- return 0, n, syntaxError(fnParseFloat, s)
- }
-
- if hex {
- f, err := atofHex(s[:n], &float32info, mantissa, exp, neg, trunc)
- return float32(f), n, err
- }
-
- if optimize {
- // Try pure floating-point arithmetic conversion, and if that fails,
- // the Eisel-Lemire algorithm.
- if !trunc {
- if f, ok := atof32exact(mantissa, exp, neg); ok {
- return f, n, nil
- }
- }
- f, ok := eiselLemire32(mantissa, exp, neg)
- if ok {
- if !trunc {
- return f, n, nil
- }
- // Even if the mantissa was truncated, we may
- // have found the correct result. Confirm by
- // converting the upper mantissa bound.
- fUp, ok := eiselLemire32(mantissa+1, exp, neg)
- if ok && f == fUp {
- return f, n, nil
- }
- }
- }
-
- // Slow fallback.
- var d decimal
- if !d.set(s[:n]) {
- return 0, n, syntaxError(fnParseFloat, s)
- }
- b, ovf := d.floatBits(&float32info)
- f = math.Float32frombits(uint32(b))
- if ovf {
- err = rangeError(fnParseFloat, s)
- }
- return f, n, err
-}
-
-func atof64(s string) (f float64, n int, err error) {
- if val, n, ok := special(s); ok {
- return val, n, nil
- }
-
- mantissa, exp, neg, trunc, hex, n, ok := readFloat(s)
- if !ok {
- return 0, n, syntaxError(fnParseFloat, s)
- }
-
- if hex {
- f, err := atofHex(s[:n], &float64info, mantissa, exp, neg, trunc)
- return f, n, err
- }
-
- if optimize {
- // Try pure floating-point arithmetic conversion, and if that fails,
- // the Eisel-Lemire algorithm.
- if !trunc {
- if f, ok := atof64exact(mantissa, exp, neg); ok {
- return f, n, nil
- }
- }
- f, ok := eiselLemire64(mantissa, exp, neg)
- if ok {
- if !trunc {
- return f, n, nil
- }
- // Even if the mantissa was truncated, we may
- // have found the correct result. Confirm by
- // converting the upper mantissa bound.
- fUp, ok := eiselLemire64(mantissa+1, exp, neg)
- if ok && f == fUp {
- return f, n, nil
- }
- }
- }
-
- // Slow fallback.
- var d decimal
- if !d.set(s[:n]) {
- return 0, n, syntaxError(fnParseFloat, s)
- }
- b, ovf := d.floatBits(&float64info)
- f = math.Float64frombits(b)
- if ovf {
- err = rangeError(fnParseFloat, s)
- }
- return f, n, err
-}
-
-// ParseFloat converts the string s to a floating-point number
-// with the precision specified by bitSize: 32 for float32, or 64 for float64.
-// When bitSize=32, the result still has type float64, but it will be
-// convertible to float32 without changing its value.
-//
-// ParseFloat accepts decimal and hexadecimal floating-point number syntax.
-// If s is well-formed and near a valid floating-point number,
-// ParseFloat returns the nearest floating-point number rounded
-// using IEEE754 unbiased rounding.
-// (Parsing a hexadecimal floating-point value only rounds when
-// there are more bits in the hexadecimal representation than
-// will fit in the mantissa.)
-//
-// The errors that ParseFloat returns have concrete type *NumError
-// and include err.Num = s.
-//
-// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax.
-//
-// If s is syntactically well-formed but is more than 1/2 ULP
-// away from the largest floating point number of the given size,
-// ParseFloat returns f = ±Inf, err.Err = ErrRange.
-//
-// ParseFloat recognizes the strings "NaN", and the (possibly signed) strings "Inf" and "Infinity"
-// as their respective special floating point values. It ignores case when matching.
-func ParseFloat(s string, bitSize int) (float64, error) {
- f, n, err := parseFloatPrefix(s, bitSize)
- if n != len(s) && (err == nil || err.(*NumError).Err != ErrSyntax) {
- return 0, syntaxError(fnParseFloat, s)
- }
- return f, err
-}
-
-func parseFloatPrefix(s string, bitSize int) (float64, int, error) {
- if bitSize == 32 {
- f, n, err := atof32(s)
- return float64(f), n, err
- }
- return atof64(s)
-}
diff --git a/contrib/go/_std_1.18/src/strconv/atoi.go b/contrib/go/_std_1.18/src/strconv/atoi.go
deleted file mode 100644
index 631b487d97..0000000000
--- a/contrib/go/_std_1.18/src/strconv/atoi.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strconv
-
-import "errors"
-
-// lower(c) is a lower-case letter if and only if
-// c is either that lower-case letter or the equivalent upper-case letter.
-// Instead of writing c == 'x' || c == 'X' one can write lower(c) == 'x'.
-// Note that lower of non-letters can produce other non-letters.
-func lower(c byte) byte {
- return c | ('x' - 'X')
-}
-
-// ErrRange indicates that a value is out of range for the target type.
-var ErrRange = errors.New("value out of range")
-
-// ErrSyntax indicates that a value does not have the right syntax for the target type.
-var ErrSyntax = errors.New("invalid syntax")
-
-// A NumError records a failed conversion.
-type NumError struct {
- Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat, ParseComplex)
- Num string // the input
- Err error // the reason the conversion failed (e.g. ErrRange, ErrSyntax, etc.)
-}
-
-func (e *NumError) Error() string {
- return "strconv." + e.Func + ": " + "parsing " + Quote(e.Num) + ": " + e.Err.Error()
-}
-
-func (e *NumError) Unwrap() error { return e.Err }
-
-func syntaxError(fn, str string) *NumError {
- return &NumError{fn, str, ErrSyntax}
-}
-
-func rangeError(fn, str string) *NumError {
- return &NumError{fn, str, ErrRange}
-}
-
-func baseError(fn, str string, base int) *NumError {
- return &NumError{fn, str, errors.New("invalid base " + Itoa(base))}
-}
-
-func bitSizeError(fn, str string, bitSize int) *NumError {
- return &NumError{fn, str, errors.New("invalid bit size " + Itoa(bitSize))}
-}
-
-const intSize = 32 << (^uint(0) >> 63)
-
-// IntSize is the size in bits of an int or uint value.
-const IntSize = intSize
-
-const maxUint64 = 1<<64 - 1
-
-// ParseUint is like ParseInt but for unsigned numbers.
-//
-// A sign prefix is not permitted.
-func ParseUint(s string, base int, bitSize int) (uint64, error) {
- const fnParseUint = "ParseUint"
-
- if s == "" {
- return 0, syntaxError(fnParseUint, s)
- }
-
- base0 := base == 0
-
- s0 := s
- switch {
- case 2 <= base && base <= 36:
- // valid base; nothing to do
-
- case base == 0:
- // Look for octal, hex prefix.
- base = 10
- if s[0] == '0' {
- switch {
- case len(s) >= 3 && lower(s[1]) == 'b':
- base = 2
- s = s[2:]
- case len(s) >= 3 && lower(s[1]) == 'o':
- base = 8
- s = s[2:]
- case len(s) >= 3 && lower(s[1]) == 'x':
- base = 16
- s = s[2:]
- default:
- base = 8
- s = s[1:]
- }
- }
-
- default:
- return 0, baseError(fnParseUint, s0, base)
- }
-
- if bitSize == 0 {
- bitSize = IntSize
- } else if bitSize < 0 || bitSize > 64 {
- return 0, bitSizeError(fnParseUint, s0, bitSize)
- }
-
- // Cutoff is the smallest number such that cutoff*base > maxUint64.
- // Use compile-time constants for common cases.
- var cutoff uint64
- switch base {
- case 10:
- cutoff = maxUint64/10 + 1
- case 16:
- cutoff = maxUint64/16 + 1
- default:
- cutoff = maxUint64/uint64(base) + 1
- }
-
- maxVal := uint64(1)<<uint(bitSize) - 1
-
- underscores := false
- var n uint64
- for _, c := range []byte(s) {
- var d byte
- switch {
- case c == '_' && base0:
- underscores = true
- continue
- case '0' <= c && c <= '9':
- d = c - '0'
- case 'a' <= lower(c) && lower(c) <= 'z':
- d = lower(c) - 'a' + 10
- default:
- return 0, syntaxError(fnParseUint, s0)
- }
-
- if d >= byte(base) {
- return 0, syntaxError(fnParseUint, s0)
- }
-
- if n >= cutoff {
- // n*base overflows
- return maxVal, rangeError(fnParseUint, s0)
- }
- n *= uint64(base)
-
- n1 := n + uint64(d)
- if n1 < n || n1 > maxVal {
- // n+d overflows
- return maxVal, rangeError(fnParseUint, s0)
- }
- n = n1
- }
-
- if underscores && !underscoreOK(s0) {
- return 0, syntaxError(fnParseUint, s0)
- }
-
- return n, nil
-}
-
-// ParseInt interprets a string s in the given base (0, 2 to 36) and
-// bit size (0 to 64) and returns the corresponding value i.
-//
-// The string may begin with a leading sign: "+" or "-".
-//
-// If the base argument is 0, the true base is implied by the string's
-// prefix following the sign (if present): 2 for "0b", 8 for "0" or "0o",
-// 16 for "0x", and 10 otherwise. Also, for argument base 0 only,
-// underscore characters are permitted as defined by the Go syntax for
-// integer literals.
-//
-// The bitSize argument specifies the integer type
-// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64
-// correspond to int, int8, int16, int32, and int64.
-// If bitSize is below 0 or above 64, an error is returned.
-//
-// The errors that ParseInt returns have concrete type *NumError
-// and include err.Num = s. If s is empty or contains invalid
-// digits, err.Err = ErrSyntax and the returned value is 0;
-// if the value corresponding to s cannot be represented by a
-// signed integer of the given size, err.Err = ErrRange and the
-// returned value is the maximum magnitude integer of the
-// appropriate bitSize and sign.
-func ParseInt(s string, base int, bitSize int) (i int64, err error) {
- const fnParseInt = "ParseInt"
-
- if s == "" {
- return 0, syntaxError(fnParseInt, s)
- }
-
- // Pick off leading sign.
- s0 := s
- neg := false
- if s[0] == '+' {
- s = s[1:]
- } else if s[0] == '-' {
- neg = true
- s = s[1:]
- }
-
- // Convert unsigned and check range.
- var un uint64
- un, err = ParseUint(s, base, bitSize)
- if err != nil && err.(*NumError).Err != ErrRange {
- err.(*NumError).Func = fnParseInt
- err.(*NumError).Num = s0
- return 0, err
- }
-
- if bitSize == 0 {
- bitSize = IntSize
- }
-
- cutoff := uint64(1 << uint(bitSize-1))
- if !neg && un >= cutoff {
- return int64(cutoff - 1), rangeError(fnParseInt, s0)
- }
- if neg && un > cutoff {
- return -int64(cutoff), rangeError(fnParseInt, s0)
- }
- n := int64(un)
- if neg {
- n = -n
- }
- return n, nil
-}
-
-// Atoi is equivalent to ParseInt(s, 10, 0), converted to type int.
-func Atoi(s string) (int, error) {
- const fnAtoi = "Atoi"
-
- sLen := len(s)
- if intSize == 32 && (0 < sLen && sLen < 10) ||
- intSize == 64 && (0 < sLen && sLen < 19) {
- // Fast path for small integers that fit int type.
- s0 := s
- if s[0] == '-' || s[0] == '+' {
- s = s[1:]
- if len(s) < 1 {
- return 0, &NumError{fnAtoi, s0, ErrSyntax}
- }
- }
-
- n := 0
- for _, ch := range []byte(s) {
- ch -= '0'
- if ch > 9 {
- return 0, &NumError{fnAtoi, s0, ErrSyntax}
- }
- n = n*10 + int(ch)
- }
- if s0[0] == '-' {
- n = -n
- }
- return n, nil
- }
-
- // Slow path for invalid, big, or underscored integers.
- i64, err := ParseInt(s, 10, 0)
- if nerr, ok := err.(*NumError); ok {
- nerr.Func = fnAtoi
- }
- return int(i64), err
-}
-
-// underscoreOK reports whether the underscores in s are allowed.
-// Checking them in this one function lets all the parsers skip over them simply.
-// Underscore must appear only between digits or between a base prefix and a digit.
-func underscoreOK(s string) bool {
- // saw tracks the last character (class) we saw:
- // ^ for beginning of number,
- // 0 for a digit or base prefix,
- // _ for an underscore,
- // ! for none of the above.
- saw := '^'
- i := 0
-
- // Optional sign.
- if len(s) >= 1 && (s[0] == '-' || s[0] == '+') {
- s = s[1:]
- }
-
- // Optional base prefix.
- hex := false
- if len(s) >= 2 && s[0] == '0' && (lower(s[1]) == 'b' || lower(s[1]) == 'o' || lower(s[1]) == 'x') {
- i = 2
- saw = '0' // base prefix counts as a digit for "underscore as digit separator"
- hex = lower(s[1]) == 'x'
- }
-
- // Number proper.
- for ; i < len(s); i++ {
- // Digits are always okay.
- if '0' <= s[i] && s[i] <= '9' || hex && 'a' <= lower(s[i]) && lower(s[i]) <= 'f' {
- saw = '0'
- continue
- }
- // Underscore must follow digit.
- if s[i] == '_' {
- if saw != '0' {
- return false
- }
- saw = '_'
- continue
- }
- // Underscore must also be followed by digit.
- if saw == '_' {
- return false
- }
- // Saw non-digit, non-underscore.
- saw = '!'
- }
- return saw != '_'
-}
diff --git a/contrib/go/_std_1.18/src/strconv/doc.go b/contrib/go/_std_1.18/src/strconv/doc.go
deleted file mode 100644
index 8db725f96a..0000000000
--- a/contrib/go/_std_1.18/src/strconv/doc.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package strconv implements conversions to and from string representations
-// of basic data types.
-//
-// Numeric Conversions
-//
-// The most common numeric conversions are Atoi (string to int) and Itoa (int to string).
-//
-// i, err := strconv.Atoi("-42")
-// s := strconv.Itoa(-42)
-//
-// These assume decimal and the Go int type.
-//
-// ParseBool, ParseFloat, ParseInt, and ParseUint convert strings to values:
-//
-// b, err := strconv.ParseBool("true")
-// f, err := strconv.ParseFloat("3.1415", 64)
-// i, err := strconv.ParseInt("-42", 10, 64)
-// u, err := strconv.ParseUint("42", 10, 64)
-//
-// The parse functions return the widest type (float64, int64, and uint64),
-// but if the size argument specifies a narrower width the result can be
-// converted to that narrower type without data loss:
-//
-// s := "2147483647" // biggest int32
-// i64, err := strconv.ParseInt(s, 10, 32)
-// ...
-// i := int32(i64)
-//
-// FormatBool, FormatFloat, FormatInt, and FormatUint convert values to strings:
-//
-// s := strconv.FormatBool(true)
-// s := strconv.FormatFloat(3.1415, 'E', -1, 64)
-// s := strconv.FormatInt(-42, 16)
-// s := strconv.FormatUint(42, 16)
-//
-// AppendBool, AppendFloat, AppendInt, and AppendUint are similar but
-// append the formatted value to a destination slice.
-//
-// String Conversions
-//
-// Quote and QuoteToASCII convert strings to quoted Go string literals.
-// The latter guarantees that the result is an ASCII string, by escaping
-// any non-ASCII Unicode with \u:
-//
-// q := strconv.Quote("Hello, 世界")
-// q := strconv.QuoteToASCII("Hello, 世界")
-//
-// QuoteRune and QuoteRuneToASCII are similar but accept runes and
-// return quoted Go rune literals.
-//
-// Unquote and UnquoteChar unquote Go string and rune literals.
-//
-package strconv
diff --git a/contrib/go/_std_1.18/src/strconv/eisel_lemire.go b/contrib/go/_std_1.18/src/strconv/eisel_lemire.go
deleted file mode 100644
index fecd1b9345..0000000000
--- a/contrib/go/_std_1.18/src/strconv/eisel_lemire.go
+++ /dev/null
@@ -1,884 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strconv
-
-// This file implements the Eisel-Lemire ParseFloat algorithm, published in
-// 2020 and discussed extensively at
-// https://nigeltao.github.io/blog/2020/eisel-lemire.html
-//
-// The original C++ implementation is at
-// https://github.com/lemire/fast_double_parser/blob/644bef4306059d3be01a04e77d3cc84b379c596f/include/fast_double_parser.h#L840
-//
-// This Go re-implementation closely follows the C re-implementation at
-// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/internal/cgen/base/floatconv-submodule-code.c#L990
-//
-// Additional testing (on over several million test strings) is done by
-// https://github.com/nigeltao/parse-number-fxx-test-data/blob/5280dcfccf6d0b02a65ae282dad0b6d9de50e039/script/test-go-strconv.go
-
-import (
- "math"
- "math/bits"
-)
-
-func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) {
- // The terse comments in this function body refer to sections of the
- // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post.
-
- // Exp10 Range.
- if man == 0 {
- if neg {
- f = math.Float64frombits(0x8000000000000000) // Negative zero.
- }
- return f, true
- }
- if exp10 < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < exp10 {
- return 0, false
- }
-
- // Normalization.
- clz := bits.LeadingZeros64(man)
- man <<= uint(clz)
- const float64ExponentBias = 1023
- retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz)
-
- // Multiplication.
- xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
-
- // Wider Approximation.
- if xHi&0x1FF == 0x1FF && xLo+man < man {
- yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
- mergedHi, mergedLo := xHi, xLo+yHi
- if mergedLo < xLo {
- mergedHi++
- }
- if mergedHi&0x1FF == 0x1FF && mergedLo+1 == 0 && yLo+man < man {
- return 0, false
- }
- xHi, xLo = mergedHi, mergedLo
- }
-
- // Shifting to 54 Bits.
- msb := xHi >> 63
- retMantissa := xHi >> (msb + 9)
- retExp2 -= 1 ^ msb
-
- // Half-way Ambiguity.
- if xLo == 0 && xHi&0x1FF == 0 && retMantissa&3 == 1 {
- return 0, false
- }
-
- // From 54 to 53 Bits.
- retMantissa += retMantissa & 1
- retMantissa >>= 1
- if retMantissa>>53 > 0 {
- retMantissa >>= 1
- retExp2 += 1
- }
- // retExp2 is a uint64. Zero or underflow means that we're in subnormal
- // float64 space. 0x7FF or above means that we're in Inf/NaN float64 space.
- //
- // The if block is equivalent to (but has fewer branches than):
- // if retExp2 <= 0 || retExp2 >= 0x7FF { etc }
- if retExp2-1 >= 0x7FF-1 {
- return 0, false
- }
- retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF
- if neg {
- retBits |= 0x8000000000000000
- }
- return math.Float64frombits(retBits), true
-}
-
-func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) {
- // The terse comments in this function body refer to sections of the
- // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post.
- //
- // That blog post discusses the float64 flavor (11 exponent bits with a
- // -1023 bias, 52 mantissa bits) of the algorithm, but the same approach
- // applies to the float32 flavor (8 exponent bits with a -127 bias, 23
- // mantissa bits). The computation here happens with 64-bit values (e.g.
- // man, xHi, retMantissa) before finally converting to a 32-bit float.
-
- // Exp10 Range.
- if man == 0 {
- if neg {
- f = math.Float32frombits(0x80000000) // Negative zero.
- }
- return f, true
- }
- if exp10 < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < exp10 {
- return 0, false
- }
-
- // Normalization.
- clz := bits.LeadingZeros64(man)
- man <<= uint(clz)
- const float32ExponentBias = 127
- retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz)
-
- // Multiplication.
- xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
-
- // Wider Approximation.
- if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man {
- yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
- mergedHi, mergedLo := xHi, xLo+yHi
- if mergedLo < xLo {
- mergedHi++
- }
- if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
- return 0, false
- }
- xHi, xLo = mergedHi, mergedLo
- }
-
- // Shifting to 54 Bits (and for float32, it's shifting to 25 bits).
- msb := xHi >> 63
- retMantissa := xHi >> (msb + 38)
- retExp2 -= 1 ^ msb
-
- // Half-way Ambiguity.
- if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 {
- return 0, false
- }
-
- // From 54 to 53 Bits (and for float32, it's from 25 to 24 bits).
- retMantissa += retMantissa & 1
- retMantissa >>= 1
- if retMantissa>>24 > 0 {
- retMantissa >>= 1
- retExp2 += 1
- }
- // retExp2 is a uint64. Zero or underflow means that we're in subnormal
- // float32 space. 0xFF or above means that we're in Inf/NaN float32 space.
- //
- // The if block is equivalent to (but has fewer branches than):
- // if retExp2 <= 0 || retExp2 >= 0xFF { etc }
- if retExp2-1 >= 0xFF-1 {
- return 0, false
- }
- retBits := retExp2<<23 | retMantissa&0x007FFFFF
- if neg {
- retBits |= 0x80000000
- }
- return math.Float32frombits(uint32(retBits)), true
-}
-
-// detailedPowersOfTen{Min,Max}Exp10 is the power of 10 represented by the
-// first and last rows of detailedPowersOfTen. Both bounds are inclusive.
-const (
- detailedPowersOfTenMinExp10 = -348
- detailedPowersOfTenMaxExp10 = +347
-)
-
-// detailedPowersOfTen contains 128-bit mantissa approximations (rounded down)
-// to the powers of 10. For example:
-//
-// - 1e43 ≈ (0xE596B7B0_C643C719 * (2 ** 79))
-// - 1e43 = (0xE596B7B0_C643C719_6D9CCD05_D0000000 * (2 ** 15))
-//
-// The mantissas are explicitly listed. The exponents are implied by a linear
-// expression with slope 217706.0/65536.0 ≈ log(10)/log(2).
-//
-// The table was generated by
-// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/script/print-mpb-powers-of-10.go
-var detailedPowersOfTen = [...][2]uint64{
- {0x1732C869CD60E453, 0xFA8FD5A0081C0288}, // 1e-348
- {0x0E7FBD42205C8EB4, 0x9C99E58405118195}, // 1e-347
- {0x521FAC92A873B261, 0xC3C05EE50655E1FA}, // 1e-346
- {0xE6A797B752909EF9, 0xF4B0769E47EB5A78}, // 1e-345
- {0x9028BED2939A635C, 0x98EE4A22ECF3188B}, // 1e-344
- {0x7432EE873880FC33, 0xBF29DCABA82FDEAE}, // 1e-343
- {0x113FAA2906A13B3F, 0xEEF453D6923BD65A}, // 1e-342
- {0x4AC7CA59A424C507, 0x9558B4661B6565F8}, // 1e-341
- {0x5D79BCF00D2DF649, 0xBAAEE17FA23EBF76}, // 1e-340
- {0xF4D82C2C107973DC, 0xE95A99DF8ACE6F53}, // 1e-339
- {0x79071B9B8A4BE869, 0x91D8A02BB6C10594}, // 1e-338
- {0x9748E2826CDEE284, 0xB64EC836A47146F9}, // 1e-337
- {0xFD1B1B2308169B25, 0xE3E27A444D8D98B7}, // 1e-336
- {0xFE30F0F5E50E20F7, 0x8E6D8C6AB0787F72}, // 1e-335
- {0xBDBD2D335E51A935, 0xB208EF855C969F4F}, // 1e-334
- {0xAD2C788035E61382, 0xDE8B2B66B3BC4723}, // 1e-333
- {0x4C3BCB5021AFCC31, 0x8B16FB203055AC76}, // 1e-332
- {0xDF4ABE242A1BBF3D, 0xADDCB9E83C6B1793}, // 1e-331
- {0xD71D6DAD34A2AF0D, 0xD953E8624B85DD78}, // 1e-330
- {0x8672648C40E5AD68, 0x87D4713D6F33AA6B}, // 1e-329
- {0x680EFDAF511F18C2, 0xA9C98D8CCB009506}, // 1e-328
- {0x0212BD1B2566DEF2, 0xD43BF0EFFDC0BA48}, // 1e-327
- {0x014BB630F7604B57, 0x84A57695FE98746D}, // 1e-326
- {0x419EA3BD35385E2D, 0xA5CED43B7E3E9188}, // 1e-325
- {0x52064CAC828675B9, 0xCF42894A5DCE35EA}, // 1e-324
- {0x7343EFEBD1940993, 0x818995CE7AA0E1B2}, // 1e-323
- {0x1014EBE6C5F90BF8, 0xA1EBFB4219491A1F}, // 1e-322
- {0xD41A26E077774EF6, 0xCA66FA129F9B60A6}, // 1e-321
- {0x8920B098955522B4, 0xFD00B897478238D0}, // 1e-320
- {0x55B46E5F5D5535B0, 0x9E20735E8CB16382}, // 1e-319
- {0xEB2189F734AA831D, 0xC5A890362FDDBC62}, // 1e-318
- {0xA5E9EC7501D523E4, 0xF712B443BBD52B7B}, // 1e-317
- {0x47B233C92125366E, 0x9A6BB0AA55653B2D}, // 1e-316
- {0x999EC0BB696E840A, 0xC1069CD4EABE89F8}, // 1e-315
- {0xC00670EA43CA250D, 0xF148440A256E2C76}, // 1e-314
- {0x380406926A5E5728, 0x96CD2A865764DBCA}, // 1e-313
- {0xC605083704F5ECF2, 0xBC807527ED3E12BC}, // 1e-312
- {0xF7864A44C633682E, 0xEBA09271E88D976B}, // 1e-311
- {0x7AB3EE6AFBE0211D, 0x93445B8731587EA3}, // 1e-310
- {0x5960EA05BAD82964, 0xB8157268FDAE9E4C}, // 1e-309
- {0x6FB92487298E33BD, 0xE61ACF033D1A45DF}, // 1e-308
- {0xA5D3B6D479F8E056, 0x8FD0C16206306BAB}, // 1e-307
- {0x8F48A4899877186C, 0xB3C4F1BA87BC8696}, // 1e-306
- {0x331ACDABFE94DE87, 0xE0B62E2929ABA83C}, // 1e-305
- {0x9FF0C08B7F1D0B14, 0x8C71DCD9BA0B4925}, // 1e-304
- {0x07ECF0AE5EE44DD9, 0xAF8E5410288E1B6F}, // 1e-303
- {0xC9E82CD9F69D6150, 0xDB71E91432B1A24A}, // 1e-302
- {0xBE311C083A225CD2, 0x892731AC9FAF056E}, // 1e-301
- {0x6DBD630A48AAF406, 0xAB70FE17C79AC6CA}, // 1e-300
- {0x092CBBCCDAD5B108, 0xD64D3D9DB981787D}, // 1e-299
- {0x25BBF56008C58EA5, 0x85F0468293F0EB4E}, // 1e-298
- {0xAF2AF2B80AF6F24E, 0xA76C582338ED2621}, // 1e-297
- {0x1AF5AF660DB4AEE1, 0xD1476E2C07286FAA}, // 1e-296
- {0x50D98D9FC890ED4D, 0x82CCA4DB847945CA}, // 1e-295
- {0xE50FF107BAB528A0, 0xA37FCE126597973C}, // 1e-294
- {0x1E53ED49A96272C8, 0xCC5FC196FEFD7D0C}, // 1e-293
- {0x25E8E89C13BB0F7A, 0xFF77B1FCBEBCDC4F}, // 1e-292
- {0x77B191618C54E9AC, 0x9FAACF3DF73609B1}, // 1e-291
- {0xD59DF5B9EF6A2417, 0xC795830D75038C1D}, // 1e-290
- {0x4B0573286B44AD1D, 0xF97AE3D0D2446F25}, // 1e-289
- {0x4EE367F9430AEC32, 0x9BECCE62836AC577}, // 1e-288
- {0x229C41F793CDA73F, 0xC2E801FB244576D5}, // 1e-287
- {0x6B43527578C1110F, 0xF3A20279ED56D48A}, // 1e-286
- {0x830A13896B78AAA9, 0x9845418C345644D6}, // 1e-285
- {0x23CC986BC656D553, 0xBE5691EF416BD60C}, // 1e-284
- {0x2CBFBE86B7EC8AA8, 0xEDEC366B11C6CB8F}, // 1e-283
- {0x7BF7D71432F3D6A9, 0x94B3A202EB1C3F39}, // 1e-282
- {0xDAF5CCD93FB0CC53, 0xB9E08A83A5E34F07}, // 1e-281
- {0xD1B3400F8F9CFF68, 0xE858AD248F5C22C9}, // 1e-280
- {0x23100809B9C21FA1, 0x91376C36D99995BE}, // 1e-279
- {0xABD40A0C2832A78A, 0xB58547448FFFFB2D}, // 1e-278
- {0x16C90C8F323F516C, 0xE2E69915B3FFF9F9}, // 1e-277
- {0xAE3DA7D97F6792E3, 0x8DD01FAD907FFC3B}, // 1e-276
- {0x99CD11CFDF41779C, 0xB1442798F49FFB4A}, // 1e-275
- {0x40405643D711D583, 0xDD95317F31C7FA1D}, // 1e-274
- {0x482835EA666B2572, 0x8A7D3EEF7F1CFC52}, // 1e-273
- {0xDA3243650005EECF, 0xAD1C8EAB5EE43B66}, // 1e-272
- {0x90BED43E40076A82, 0xD863B256369D4A40}, // 1e-271
- {0x5A7744A6E804A291, 0x873E4F75E2224E68}, // 1e-270
- {0x711515D0A205CB36, 0xA90DE3535AAAE202}, // 1e-269
- {0x0D5A5B44CA873E03, 0xD3515C2831559A83}, // 1e-268
- {0xE858790AFE9486C2, 0x8412D9991ED58091}, // 1e-267
- {0x626E974DBE39A872, 0xA5178FFF668AE0B6}, // 1e-266
- {0xFB0A3D212DC8128F, 0xCE5D73FF402D98E3}, // 1e-265
- {0x7CE66634BC9D0B99, 0x80FA687F881C7F8E}, // 1e-264
- {0x1C1FFFC1EBC44E80, 0xA139029F6A239F72}, // 1e-263
- {0xA327FFB266B56220, 0xC987434744AC874E}, // 1e-262
- {0x4BF1FF9F0062BAA8, 0xFBE9141915D7A922}, // 1e-261
- {0x6F773FC3603DB4A9, 0x9D71AC8FADA6C9B5}, // 1e-260
- {0xCB550FB4384D21D3, 0xC4CE17B399107C22}, // 1e-259
- {0x7E2A53A146606A48, 0xF6019DA07F549B2B}, // 1e-258
- {0x2EDA7444CBFC426D, 0x99C102844F94E0FB}, // 1e-257
- {0xFA911155FEFB5308, 0xC0314325637A1939}, // 1e-256
- {0x793555AB7EBA27CA, 0xF03D93EEBC589F88}, // 1e-255
- {0x4BC1558B2F3458DE, 0x96267C7535B763B5}, // 1e-254
- {0x9EB1AAEDFB016F16, 0xBBB01B9283253CA2}, // 1e-253
- {0x465E15A979C1CADC, 0xEA9C227723EE8BCB}, // 1e-252
- {0x0BFACD89EC191EC9, 0x92A1958A7675175F}, // 1e-251
- {0xCEF980EC671F667B, 0xB749FAED14125D36}, // 1e-250
- {0x82B7E12780E7401A, 0xE51C79A85916F484}, // 1e-249
- {0xD1B2ECB8B0908810, 0x8F31CC0937AE58D2}, // 1e-248
- {0x861FA7E6DCB4AA15, 0xB2FE3F0B8599EF07}, // 1e-247
- {0x67A791E093E1D49A, 0xDFBDCECE67006AC9}, // 1e-246
- {0xE0C8BB2C5C6D24E0, 0x8BD6A141006042BD}, // 1e-245
- {0x58FAE9F773886E18, 0xAECC49914078536D}, // 1e-244
- {0xAF39A475506A899E, 0xDA7F5BF590966848}, // 1e-243
- {0x6D8406C952429603, 0x888F99797A5E012D}, // 1e-242
- {0xC8E5087BA6D33B83, 0xAAB37FD7D8F58178}, // 1e-241
- {0xFB1E4A9A90880A64, 0xD5605FCDCF32E1D6}, // 1e-240
- {0x5CF2EEA09A55067F, 0x855C3BE0A17FCD26}, // 1e-239
- {0xF42FAA48C0EA481E, 0xA6B34AD8C9DFC06F}, // 1e-238
- {0xF13B94DAF124DA26, 0xD0601D8EFC57B08B}, // 1e-237
- {0x76C53D08D6B70858, 0x823C12795DB6CE57}, // 1e-236
- {0x54768C4B0C64CA6E, 0xA2CB1717B52481ED}, // 1e-235
- {0xA9942F5DCF7DFD09, 0xCB7DDCDDA26DA268}, // 1e-234
- {0xD3F93B35435D7C4C, 0xFE5D54150B090B02}, // 1e-233
- {0xC47BC5014A1A6DAF, 0x9EFA548D26E5A6E1}, // 1e-232
- {0x359AB6419CA1091B, 0xC6B8E9B0709F109A}, // 1e-231
- {0xC30163D203C94B62, 0xF867241C8CC6D4C0}, // 1e-230
- {0x79E0DE63425DCF1D, 0x9B407691D7FC44F8}, // 1e-229
- {0x985915FC12F542E4, 0xC21094364DFB5636}, // 1e-228
- {0x3E6F5B7B17B2939D, 0xF294B943E17A2BC4}, // 1e-227
- {0xA705992CEECF9C42, 0x979CF3CA6CEC5B5A}, // 1e-226
- {0x50C6FF782A838353, 0xBD8430BD08277231}, // 1e-225
- {0xA4F8BF5635246428, 0xECE53CEC4A314EBD}, // 1e-224
- {0x871B7795E136BE99, 0x940F4613AE5ED136}, // 1e-223
- {0x28E2557B59846E3F, 0xB913179899F68584}, // 1e-222
- {0x331AEADA2FE589CF, 0xE757DD7EC07426E5}, // 1e-221
- {0x3FF0D2C85DEF7621, 0x9096EA6F3848984F}, // 1e-220
- {0x0FED077A756B53A9, 0xB4BCA50B065ABE63}, // 1e-219
- {0xD3E8495912C62894, 0xE1EBCE4DC7F16DFB}, // 1e-218
- {0x64712DD7ABBBD95C, 0x8D3360F09CF6E4BD}, // 1e-217
- {0xBD8D794D96AACFB3, 0xB080392CC4349DEC}, // 1e-216
- {0xECF0D7A0FC5583A0, 0xDCA04777F541C567}, // 1e-215
- {0xF41686C49DB57244, 0x89E42CAAF9491B60}, // 1e-214
- {0x311C2875C522CED5, 0xAC5D37D5B79B6239}, // 1e-213
- {0x7D633293366B828B, 0xD77485CB25823AC7}, // 1e-212
- {0xAE5DFF9C02033197, 0x86A8D39EF77164BC}, // 1e-211
- {0xD9F57F830283FDFC, 0xA8530886B54DBDEB}, // 1e-210
- {0xD072DF63C324FD7B, 0xD267CAA862A12D66}, // 1e-209
- {0x4247CB9E59F71E6D, 0x8380DEA93DA4BC60}, // 1e-208
- {0x52D9BE85F074E608, 0xA46116538D0DEB78}, // 1e-207
- {0x67902E276C921F8B, 0xCD795BE870516656}, // 1e-206
- {0x00BA1CD8A3DB53B6, 0x806BD9714632DFF6}, // 1e-205
- {0x80E8A40ECCD228A4, 0xA086CFCD97BF97F3}, // 1e-204
- {0x6122CD128006B2CD, 0xC8A883C0FDAF7DF0}, // 1e-203
- {0x796B805720085F81, 0xFAD2A4B13D1B5D6C}, // 1e-202
- {0xCBE3303674053BB0, 0x9CC3A6EEC6311A63}, // 1e-201
- {0xBEDBFC4411068A9C, 0xC3F490AA77BD60FC}, // 1e-200
- {0xEE92FB5515482D44, 0xF4F1B4D515ACB93B}, // 1e-199
- {0x751BDD152D4D1C4A, 0x991711052D8BF3C5}, // 1e-198
- {0xD262D45A78A0635D, 0xBF5CD54678EEF0B6}, // 1e-197
- {0x86FB897116C87C34, 0xEF340A98172AACE4}, // 1e-196
- {0xD45D35E6AE3D4DA0, 0x9580869F0E7AAC0E}, // 1e-195
- {0x8974836059CCA109, 0xBAE0A846D2195712}, // 1e-194
- {0x2BD1A438703FC94B, 0xE998D258869FACD7}, // 1e-193
- {0x7B6306A34627DDCF, 0x91FF83775423CC06}, // 1e-192
- {0x1A3BC84C17B1D542, 0xB67F6455292CBF08}, // 1e-191
- {0x20CABA5F1D9E4A93, 0xE41F3D6A7377EECA}, // 1e-190
- {0x547EB47B7282EE9C, 0x8E938662882AF53E}, // 1e-189
- {0xE99E619A4F23AA43, 0xB23867FB2A35B28D}, // 1e-188
- {0x6405FA00E2EC94D4, 0xDEC681F9F4C31F31}, // 1e-187
- {0xDE83BC408DD3DD04, 0x8B3C113C38F9F37E}, // 1e-186
- {0x9624AB50B148D445, 0xAE0B158B4738705E}, // 1e-185
- {0x3BADD624DD9B0957, 0xD98DDAEE19068C76}, // 1e-184
- {0xE54CA5D70A80E5D6, 0x87F8A8D4CFA417C9}, // 1e-183
- {0x5E9FCF4CCD211F4C, 0xA9F6D30A038D1DBC}, // 1e-182
- {0x7647C3200069671F, 0xD47487CC8470652B}, // 1e-181
- {0x29ECD9F40041E073, 0x84C8D4DFD2C63F3B}, // 1e-180
- {0xF468107100525890, 0xA5FB0A17C777CF09}, // 1e-179
- {0x7182148D4066EEB4, 0xCF79CC9DB955C2CC}, // 1e-178
- {0xC6F14CD848405530, 0x81AC1FE293D599BF}, // 1e-177
- {0xB8ADA00E5A506A7C, 0xA21727DB38CB002F}, // 1e-176
- {0xA6D90811F0E4851C, 0xCA9CF1D206FDC03B}, // 1e-175
- {0x908F4A166D1DA663, 0xFD442E4688BD304A}, // 1e-174
- {0x9A598E4E043287FE, 0x9E4A9CEC15763E2E}, // 1e-173
- {0x40EFF1E1853F29FD, 0xC5DD44271AD3CDBA}, // 1e-172
- {0xD12BEE59E68EF47C, 0xF7549530E188C128}, // 1e-171
- {0x82BB74F8301958CE, 0x9A94DD3E8CF578B9}, // 1e-170
- {0xE36A52363C1FAF01, 0xC13A148E3032D6E7}, // 1e-169
- {0xDC44E6C3CB279AC1, 0xF18899B1BC3F8CA1}, // 1e-168
- {0x29AB103A5EF8C0B9, 0x96F5600F15A7B7E5}, // 1e-167
- {0x7415D448F6B6F0E7, 0xBCB2B812DB11A5DE}, // 1e-166
- {0x111B495B3464AD21, 0xEBDF661791D60F56}, // 1e-165
- {0xCAB10DD900BEEC34, 0x936B9FCEBB25C995}, // 1e-164
- {0x3D5D514F40EEA742, 0xB84687C269EF3BFB}, // 1e-163
- {0x0CB4A5A3112A5112, 0xE65829B3046B0AFA}, // 1e-162
- {0x47F0E785EABA72AB, 0x8FF71A0FE2C2E6DC}, // 1e-161
- {0x59ED216765690F56, 0xB3F4E093DB73A093}, // 1e-160
- {0x306869C13EC3532C, 0xE0F218B8D25088B8}, // 1e-159
- {0x1E414218C73A13FB, 0x8C974F7383725573}, // 1e-158
- {0xE5D1929EF90898FA, 0xAFBD2350644EEACF}, // 1e-157
- {0xDF45F746B74ABF39, 0xDBAC6C247D62A583}, // 1e-156
- {0x6B8BBA8C328EB783, 0x894BC396CE5DA772}, // 1e-155
- {0x066EA92F3F326564, 0xAB9EB47C81F5114F}, // 1e-154
- {0xC80A537B0EFEFEBD, 0xD686619BA27255A2}, // 1e-153
- {0xBD06742CE95F5F36, 0x8613FD0145877585}, // 1e-152
- {0x2C48113823B73704, 0xA798FC4196E952E7}, // 1e-151
- {0xF75A15862CA504C5, 0xD17F3B51FCA3A7A0}, // 1e-150
- {0x9A984D73DBE722FB, 0x82EF85133DE648C4}, // 1e-149
- {0xC13E60D0D2E0EBBA, 0xA3AB66580D5FDAF5}, // 1e-148
- {0x318DF905079926A8, 0xCC963FEE10B7D1B3}, // 1e-147
- {0xFDF17746497F7052, 0xFFBBCFE994E5C61F}, // 1e-146
- {0xFEB6EA8BEDEFA633, 0x9FD561F1FD0F9BD3}, // 1e-145
- {0xFE64A52EE96B8FC0, 0xC7CABA6E7C5382C8}, // 1e-144
- {0x3DFDCE7AA3C673B0, 0xF9BD690A1B68637B}, // 1e-143
- {0x06BEA10CA65C084E, 0x9C1661A651213E2D}, // 1e-142
- {0x486E494FCFF30A62, 0xC31BFA0FE5698DB8}, // 1e-141
- {0x5A89DBA3C3EFCCFA, 0xF3E2F893DEC3F126}, // 1e-140
- {0xF89629465A75E01C, 0x986DDB5C6B3A76B7}, // 1e-139
- {0xF6BBB397F1135823, 0xBE89523386091465}, // 1e-138
- {0x746AA07DED582E2C, 0xEE2BA6C0678B597F}, // 1e-137
- {0xA8C2A44EB4571CDC, 0x94DB483840B717EF}, // 1e-136
- {0x92F34D62616CE413, 0xBA121A4650E4DDEB}, // 1e-135
- {0x77B020BAF9C81D17, 0xE896A0D7E51E1566}, // 1e-134
- {0x0ACE1474DC1D122E, 0x915E2486EF32CD60}, // 1e-133
- {0x0D819992132456BA, 0xB5B5ADA8AAFF80B8}, // 1e-132
- {0x10E1FFF697ED6C69, 0xE3231912D5BF60E6}, // 1e-131
- {0xCA8D3FFA1EF463C1, 0x8DF5EFABC5979C8F}, // 1e-130
- {0xBD308FF8A6B17CB2, 0xB1736B96B6FD83B3}, // 1e-129
- {0xAC7CB3F6D05DDBDE, 0xDDD0467C64BCE4A0}, // 1e-128
- {0x6BCDF07A423AA96B, 0x8AA22C0DBEF60EE4}, // 1e-127
- {0x86C16C98D2C953C6, 0xAD4AB7112EB3929D}, // 1e-126
- {0xE871C7BF077BA8B7, 0xD89D64D57A607744}, // 1e-125
- {0x11471CD764AD4972, 0x87625F056C7C4A8B}, // 1e-124
- {0xD598E40D3DD89BCF, 0xA93AF6C6C79B5D2D}, // 1e-123
- {0x4AFF1D108D4EC2C3, 0xD389B47879823479}, // 1e-122
- {0xCEDF722A585139BA, 0x843610CB4BF160CB}, // 1e-121
- {0xC2974EB4EE658828, 0xA54394FE1EEDB8FE}, // 1e-120
- {0x733D226229FEEA32, 0xCE947A3DA6A9273E}, // 1e-119
- {0x0806357D5A3F525F, 0x811CCC668829B887}, // 1e-118
- {0xCA07C2DCB0CF26F7, 0xA163FF802A3426A8}, // 1e-117
- {0xFC89B393DD02F0B5, 0xC9BCFF6034C13052}, // 1e-116
- {0xBBAC2078D443ACE2, 0xFC2C3F3841F17C67}, // 1e-115
- {0xD54B944B84AA4C0D, 0x9D9BA7832936EDC0}, // 1e-114
- {0x0A9E795E65D4DF11, 0xC5029163F384A931}, // 1e-113
- {0x4D4617B5FF4A16D5, 0xF64335BCF065D37D}, // 1e-112
- {0x504BCED1BF8E4E45, 0x99EA0196163FA42E}, // 1e-111
- {0xE45EC2862F71E1D6, 0xC06481FB9BCF8D39}, // 1e-110
- {0x5D767327BB4E5A4C, 0xF07DA27A82C37088}, // 1e-109
- {0x3A6A07F8D510F86F, 0x964E858C91BA2655}, // 1e-108
- {0x890489F70A55368B, 0xBBE226EFB628AFEA}, // 1e-107
- {0x2B45AC74CCEA842E, 0xEADAB0ABA3B2DBE5}, // 1e-106
- {0x3B0B8BC90012929D, 0x92C8AE6B464FC96F}, // 1e-105
- {0x09CE6EBB40173744, 0xB77ADA0617E3BBCB}, // 1e-104
- {0xCC420A6A101D0515, 0xE55990879DDCAABD}, // 1e-103
- {0x9FA946824A12232D, 0x8F57FA54C2A9EAB6}, // 1e-102
- {0x47939822DC96ABF9, 0xB32DF8E9F3546564}, // 1e-101
- {0x59787E2B93BC56F7, 0xDFF9772470297EBD}, // 1e-100
- {0x57EB4EDB3C55B65A, 0x8BFBEA76C619EF36}, // 1e-99
- {0xEDE622920B6B23F1, 0xAEFAE51477A06B03}, // 1e-98
- {0xE95FAB368E45ECED, 0xDAB99E59958885C4}, // 1e-97
- {0x11DBCB0218EBB414, 0x88B402F7FD75539B}, // 1e-96
- {0xD652BDC29F26A119, 0xAAE103B5FCD2A881}, // 1e-95
- {0x4BE76D3346F0495F, 0xD59944A37C0752A2}, // 1e-94
- {0x6F70A4400C562DDB, 0x857FCAE62D8493A5}, // 1e-93
- {0xCB4CCD500F6BB952, 0xA6DFBD9FB8E5B88E}, // 1e-92
- {0x7E2000A41346A7A7, 0xD097AD07A71F26B2}, // 1e-91
- {0x8ED400668C0C28C8, 0x825ECC24C873782F}, // 1e-90
- {0x728900802F0F32FA, 0xA2F67F2DFA90563B}, // 1e-89
- {0x4F2B40A03AD2FFB9, 0xCBB41EF979346BCA}, // 1e-88
- {0xE2F610C84987BFA8, 0xFEA126B7D78186BC}, // 1e-87
- {0x0DD9CA7D2DF4D7C9, 0x9F24B832E6B0F436}, // 1e-86
- {0x91503D1C79720DBB, 0xC6EDE63FA05D3143}, // 1e-85
- {0x75A44C6397CE912A, 0xF8A95FCF88747D94}, // 1e-84
- {0xC986AFBE3EE11ABA, 0x9B69DBE1B548CE7C}, // 1e-83
- {0xFBE85BADCE996168, 0xC24452DA229B021B}, // 1e-82
- {0xFAE27299423FB9C3, 0xF2D56790AB41C2A2}, // 1e-81
- {0xDCCD879FC967D41A, 0x97C560BA6B0919A5}, // 1e-80
- {0x5400E987BBC1C920, 0xBDB6B8E905CB600F}, // 1e-79
- {0x290123E9AAB23B68, 0xED246723473E3813}, // 1e-78
- {0xF9A0B6720AAF6521, 0x9436C0760C86E30B}, // 1e-77
- {0xF808E40E8D5B3E69, 0xB94470938FA89BCE}, // 1e-76
- {0xB60B1D1230B20E04, 0xE7958CB87392C2C2}, // 1e-75
- {0xB1C6F22B5E6F48C2, 0x90BD77F3483BB9B9}, // 1e-74
- {0x1E38AEB6360B1AF3, 0xB4ECD5F01A4AA828}, // 1e-73
- {0x25C6DA63C38DE1B0, 0xE2280B6C20DD5232}, // 1e-72
- {0x579C487E5A38AD0E, 0x8D590723948A535F}, // 1e-71
- {0x2D835A9DF0C6D851, 0xB0AF48EC79ACE837}, // 1e-70
- {0xF8E431456CF88E65, 0xDCDB1B2798182244}, // 1e-69
- {0x1B8E9ECB641B58FF, 0x8A08F0F8BF0F156B}, // 1e-68
- {0xE272467E3D222F3F, 0xAC8B2D36EED2DAC5}, // 1e-67
- {0x5B0ED81DCC6ABB0F, 0xD7ADF884AA879177}, // 1e-66
- {0x98E947129FC2B4E9, 0x86CCBB52EA94BAEA}, // 1e-65
- {0x3F2398D747B36224, 0xA87FEA27A539E9A5}, // 1e-64
- {0x8EEC7F0D19A03AAD, 0xD29FE4B18E88640E}, // 1e-63
- {0x1953CF68300424AC, 0x83A3EEEEF9153E89}, // 1e-62
- {0x5FA8C3423C052DD7, 0xA48CEAAAB75A8E2B}, // 1e-61
- {0x3792F412CB06794D, 0xCDB02555653131B6}, // 1e-60
- {0xE2BBD88BBEE40BD0, 0x808E17555F3EBF11}, // 1e-59
- {0x5B6ACEAEAE9D0EC4, 0xA0B19D2AB70E6ED6}, // 1e-58
- {0xF245825A5A445275, 0xC8DE047564D20A8B}, // 1e-57
- {0xEED6E2F0F0D56712, 0xFB158592BE068D2E}, // 1e-56
- {0x55464DD69685606B, 0x9CED737BB6C4183D}, // 1e-55
- {0xAA97E14C3C26B886, 0xC428D05AA4751E4C}, // 1e-54
- {0xD53DD99F4B3066A8, 0xF53304714D9265DF}, // 1e-53
- {0xE546A8038EFE4029, 0x993FE2C6D07B7FAB}, // 1e-52
- {0xDE98520472BDD033, 0xBF8FDB78849A5F96}, // 1e-51
- {0x963E66858F6D4440, 0xEF73D256A5C0F77C}, // 1e-50
- {0xDDE7001379A44AA8, 0x95A8637627989AAD}, // 1e-49
- {0x5560C018580D5D52, 0xBB127C53B17EC159}, // 1e-48
- {0xAAB8F01E6E10B4A6, 0xE9D71B689DDE71AF}, // 1e-47
- {0xCAB3961304CA70E8, 0x9226712162AB070D}, // 1e-46
- {0x3D607B97C5FD0D22, 0xB6B00D69BB55C8D1}, // 1e-45
- {0x8CB89A7DB77C506A, 0xE45C10C42A2B3B05}, // 1e-44
- {0x77F3608E92ADB242, 0x8EB98A7A9A5B04E3}, // 1e-43
- {0x55F038B237591ED3, 0xB267ED1940F1C61C}, // 1e-42
- {0x6B6C46DEC52F6688, 0xDF01E85F912E37A3}, // 1e-41
- {0x2323AC4B3B3DA015, 0x8B61313BBABCE2C6}, // 1e-40
- {0xABEC975E0A0D081A, 0xAE397D8AA96C1B77}, // 1e-39
- {0x96E7BD358C904A21, 0xD9C7DCED53C72255}, // 1e-38
- {0x7E50D64177DA2E54, 0x881CEA14545C7575}, // 1e-37
- {0xDDE50BD1D5D0B9E9, 0xAA242499697392D2}, // 1e-36
- {0x955E4EC64B44E864, 0xD4AD2DBFC3D07787}, // 1e-35
- {0xBD5AF13BEF0B113E, 0x84EC3C97DA624AB4}, // 1e-34
- {0xECB1AD8AEACDD58E, 0xA6274BBDD0FADD61}, // 1e-33
- {0x67DE18EDA5814AF2, 0xCFB11EAD453994BA}, // 1e-32
- {0x80EACF948770CED7, 0x81CEB32C4B43FCF4}, // 1e-31
- {0xA1258379A94D028D, 0xA2425FF75E14FC31}, // 1e-30
- {0x096EE45813A04330, 0xCAD2F7F5359A3B3E}, // 1e-29
- {0x8BCA9D6E188853FC, 0xFD87B5F28300CA0D}, // 1e-28
- {0x775EA264CF55347D, 0x9E74D1B791E07E48}, // 1e-27
- {0x95364AFE032A819D, 0xC612062576589DDA}, // 1e-26
- {0x3A83DDBD83F52204, 0xF79687AED3EEC551}, // 1e-25
- {0xC4926A9672793542, 0x9ABE14CD44753B52}, // 1e-24
- {0x75B7053C0F178293, 0xC16D9A0095928A27}, // 1e-23
- {0x5324C68B12DD6338, 0xF1C90080BAF72CB1}, // 1e-22
- {0xD3F6FC16EBCA5E03, 0x971DA05074DA7BEE}, // 1e-21
- {0x88F4BB1CA6BCF584, 0xBCE5086492111AEA}, // 1e-20
- {0x2B31E9E3D06C32E5, 0xEC1E4A7DB69561A5}, // 1e-19
- {0x3AFF322E62439FCF, 0x9392EE8E921D5D07}, // 1e-18
- {0x09BEFEB9FAD487C2, 0xB877AA3236A4B449}, // 1e-17
- {0x4C2EBE687989A9B3, 0xE69594BEC44DE15B}, // 1e-16
- {0x0F9D37014BF60A10, 0x901D7CF73AB0ACD9}, // 1e-15
- {0x538484C19EF38C94, 0xB424DC35095CD80F}, // 1e-14
- {0x2865A5F206B06FB9, 0xE12E13424BB40E13}, // 1e-13
- {0xF93F87B7442E45D3, 0x8CBCCC096F5088CB}, // 1e-12
- {0xF78F69A51539D748, 0xAFEBFF0BCB24AAFE}, // 1e-11
- {0xB573440E5A884D1B, 0xDBE6FECEBDEDD5BE}, // 1e-10
- {0x31680A88F8953030, 0x89705F4136B4A597}, // 1e-9
- {0xFDC20D2B36BA7C3D, 0xABCC77118461CEFC}, // 1e-8
- {0x3D32907604691B4C, 0xD6BF94D5E57A42BC}, // 1e-7
- {0xA63F9A49C2C1B10F, 0x8637BD05AF6C69B5}, // 1e-6
- {0x0FCF80DC33721D53, 0xA7C5AC471B478423}, // 1e-5
- {0xD3C36113404EA4A8, 0xD1B71758E219652B}, // 1e-4
- {0x645A1CAC083126E9, 0x83126E978D4FDF3B}, // 1e-3
- {0x3D70A3D70A3D70A3, 0xA3D70A3D70A3D70A}, // 1e-2
- {0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC}, // 1e-1
- {0x0000000000000000, 0x8000000000000000}, // 1e0
- {0x0000000000000000, 0xA000000000000000}, // 1e1
- {0x0000000000000000, 0xC800000000000000}, // 1e2
- {0x0000000000000000, 0xFA00000000000000}, // 1e3
- {0x0000000000000000, 0x9C40000000000000}, // 1e4
- {0x0000000000000000, 0xC350000000000000}, // 1e5
- {0x0000000000000000, 0xF424000000000000}, // 1e6
- {0x0000000000000000, 0x9896800000000000}, // 1e7
- {0x0000000000000000, 0xBEBC200000000000}, // 1e8
- {0x0000000000000000, 0xEE6B280000000000}, // 1e9
- {0x0000000000000000, 0x9502F90000000000}, // 1e10
- {0x0000000000000000, 0xBA43B74000000000}, // 1e11
- {0x0000000000000000, 0xE8D4A51000000000}, // 1e12
- {0x0000000000000000, 0x9184E72A00000000}, // 1e13
- {0x0000000000000000, 0xB5E620F480000000}, // 1e14
- {0x0000000000000000, 0xE35FA931A0000000}, // 1e15
- {0x0000000000000000, 0x8E1BC9BF04000000}, // 1e16
- {0x0000000000000000, 0xB1A2BC2EC5000000}, // 1e17
- {0x0000000000000000, 0xDE0B6B3A76400000}, // 1e18
- {0x0000000000000000, 0x8AC7230489E80000}, // 1e19
- {0x0000000000000000, 0xAD78EBC5AC620000}, // 1e20
- {0x0000000000000000, 0xD8D726B7177A8000}, // 1e21
- {0x0000000000000000, 0x878678326EAC9000}, // 1e22
- {0x0000000000000000, 0xA968163F0A57B400}, // 1e23
- {0x0000000000000000, 0xD3C21BCECCEDA100}, // 1e24
- {0x0000000000000000, 0x84595161401484A0}, // 1e25
- {0x0000000000000000, 0xA56FA5B99019A5C8}, // 1e26
- {0x0000000000000000, 0xCECB8F27F4200F3A}, // 1e27
- {0x4000000000000000, 0x813F3978F8940984}, // 1e28
- {0x5000000000000000, 0xA18F07D736B90BE5}, // 1e29
- {0xA400000000000000, 0xC9F2C9CD04674EDE}, // 1e30
- {0x4D00000000000000, 0xFC6F7C4045812296}, // 1e31
- {0xF020000000000000, 0x9DC5ADA82B70B59D}, // 1e32
- {0x6C28000000000000, 0xC5371912364CE305}, // 1e33
- {0xC732000000000000, 0xF684DF56C3E01BC6}, // 1e34
- {0x3C7F400000000000, 0x9A130B963A6C115C}, // 1e35
- {0x4B9F100000000000, 0xC097CE7BC90715B3}, // 1e36
- {0x1E86D40000000000, 0xF0BDC21ABB48DB20}, // 1e37
- {0x1314448000000000, 0x96769950B50D88F4}, // 1e38
- {0x17D955A000000000, 0xBC143FA4E250EB31}, // 1e39
- {0x5DCFAB0800000000, 0xEB194F8E1AE525FD}, // 1e40
- {0x5AA1CAE500000000, 0x92EFD1B8D0CF37BE}, // 1e41
- {0xF14A3D9E40000000, 0xB7ABC627050305AD}, // 1e42
- {0x6D9CCD05D0000000, 0xE596B7B0C643C719}, // 1e43
- {0xE4820023A2000000, 0x8F7E32CE7BEA5C6F}, // 1e44
- {0xDDA2802C8A800000, 0xB35DBF821AE4F38B}, // 1e45
- {0xD50B2037AD200000, 0xE0352F62A19E306E}, // 1e46
- {0x4526F422CC340000, 0x8C213D9DA502DE45}, // 1e47
- {0x9670B12B7F410000, 0xAF298D050E4395D6}, // 1e48
- {0x3C0CDD765F114000, 0xDAF3F04651D47B4C}, // 1e49
- {0xA5880A69FB6AC800, 0x88D8762BF324CD0F}, // 1e50
- {0x8EEA0D047A457A00, 0xAB0E93B6EFEE0053}, // 1e51
- {0x72A4904598D6D880, 0xD5D238A4ABE98068}, // 1e52
- {0x47A6DA2B7F864750, 0x85A36366EB71F041}, // 1e53
- {0x999090B65F67D924, 0xA70C3C40A64E6C51}, // 1e54
- {0xFFF4B4E3F741CF6D, 0xD0CF4B50CFE20765}, // 1e55
- {0xBFF8F10E7A8921A4, 0x82818F1281ED449F}, // 1e56
- {0xAFF72D52192B6A0D, 0xA321F2D7226895C7}, // 1e57
- {0x9BF4F8A69F764490, 0xCBEA6F8CEB02BB39}, // 1e58
- {0x02F236D04753D5B4, 0xFEE50B7025C36A08}, // 1e59
- {0x01D762422C946590, 0x9F4F2726179A2245}, // 1e60
- {0x424D3AD2B7B97EF5, 0xC722F0EF9D80AAD6}, // 1e61
- {0xD2E0898765A7DEB2, 0xF8EBAD2B84E0D58B}, // 1e62
- {0x63CC55F49F88EB2F, 0x9B934C3B330C8577}, // 1e63
- {0x3CBF6B71C76B25FB, 0xC2781F49FFCFA6D5}, // 1e64
- {0x8BEF464E3945EF7A, 0xF316271C7FC3908A}, // 1e65
- {0x97758BF0E3CBB5AC, 0x97EDD871CFDA3A56}, // 1e66
- {0x3D52EEED1CBEA317, 0xBDE94E8E43D0C8EC}, // 1e67
- {0x4CA7AAA863EE4BDD, 0xED63A231D4C4FB27}, // 1e68
- {0x8FE8CAA93E74EF6A, 0x945E455F24FB1CF8}, // 1e69
- {0xB3E2FD538E122B44, 0xB975D6B6EE39E436}, // 1e70
- {0x60DBBCA87196B616, 0xE7D34C64A9C85D44}, // 1e71
- {0xBC8955E946FE31CD, 0x90E40FBEEA1D3A4A}, // 1e72
- {0x6BABAB6398BDBE41, 0xB51D13AEA4A488DD}, // 1e73
- {0xC696963C7EED2DD1, 0xE264589A4DCDAB14}, // 1e74
- {0xFC1E1DE5CF543CA2, 0x8D7EB76070A08AEC}, // 1e75
- {0x3B25A55F43294BCB, 0xB0DE65388CC8ADA8}, // 1e76
- {0x49EF0EB713F39EBE, 0xDD15FE86AFFAD912}, // 1e77
- {0x6E3569326C784337, 0x8A2DBF142DFCC7AB}, // 1e78
- {0x49C2C37F07965404, 0xACB92ED9397BF996}, // 1e79
- {0xDC33745EC97BE906, 0xD7E77A8F87DAF7FB}, // 1e80
- {0x69A028BB3DED71A3, 0x86F0AC99B4E8DAFD}, // 1e81
- {0xC40832EA0D68CE0C, 0xA8ACD7C0222311BC}, // 1e82
- {0xF50A3FA490C30190, 0xD2D80DB02AABD62B}, // 1e83
- {0x792667C6DA79E0FA, 0x83C7088E1AAB65DB}, // 1e84
- {0x577001B891185938, 0xA4B8CAB1A1563F52}, // 1e85
- {0xED4C0226B55E6F86, 0xCDE6FD5E09ABCF26}, // 1e86
- {0x544F8158315B05B4, 0x80B05E5AC60B6178}, // 1e87
- {0x696361AE3DB1C721, 0xA0DC75F1778E39D6}, // 1e88
- {0x03BC3A19CD1E38E9, 0xC913936DD571C84C}, // 1e89
- {0x04AB48A04065C723, 0xFB5878494ACE3A5F}, // 1e90
- {0x62EB0D64283F9C76, 0x9D174B2DCEC0E47B}, // 1e91
- {0x3BA5D0BD324F8394, 0xC45D1DF942711D9A}, // 1e92
- {0xCA8F44EC7EE36479, 0xF5746577930D6500}, // 1e93
- {0x7E998B13CF4E1ECB, 0x9968BF6ABBE85F20}, // 1e94
- {0x9E3FEDD8C321A67E, 0xBFC2EF456AE276E8}, // 1e95
- {0xC5CFE94EF3EA101E, 0xEFB3AB16C59B14A2}, // 1e96
- {0xBBA1F1D158724A12, 0x95D04AEE3B80ECE5}, // 1e97
- {0x2A8A6E45AE8EDC97, 0xBB445DA9CA61281F}, // 1e98
- {0xF52D09D71A3293BD, 0xEA1575143CF97226}, // 1e99
- {0x593C2626705F9C56, 0x924D692CA61BE758}, // 1e100
- {0x6F8B2FB00C77836C, 0xB6E0C377CFA2E12E}, // 1e101
- {0x0B6DFB9C0F956447, 0xE498F455C38B997A}, // 1e102
- {0x4724BD4189BD5EAC, 0x8EDF98B59A373FEC}, // 1e103
- {0x58EDEC91EC2CB657, 0xB2977EE300C50FE7}, // 1e104
- {0x2F2967B66737E3ED, 0xDF3D5E9BC0F653E1}, // 1e105
- {0xBD79E0D20082EE74, 0x8B865B215899F46C}, // 1e106
- {0xECD8590680A3AA11, 0xAE67F1E9AEC07187}, // 1e107
- {0xE80E6F4820CC9495, 0xDA01EE641A708DE9}, // 1e108
- {0x3109058D147FDCDD, 0x884134FE908658B2}, // 1e109
- {0xBD4B46F0599FD415, 0xAA51823E34A7EEDE}, // 1e110
- {0x6C9E18AC7007C91A, 0xD4E5E2CDC1D1EA96}, // 1e111
- {0x03E2CF6BC604DDB0, 0x850FADC09923329E}, // 1e112
- {0x84DB8346B786151C, 0xA6539930BF6BFF45}, // 1e113
- {0xE612641865679A63, 0xCFE87F7CEF46FF16}, // 1e114
- {0x4FCB7E8F3F60C07E, 0x81F14FAE158C5F6E}, // 1e115
- {0xE3BE5E330F38F09D, 0xA26DA3999AEF7749}, // 1e116
- {0x5CADF5BFD3072CC5, 0xCB090C8001AB551C}, // 1e117
- {0x73D9732FC7C8F7F6, 0xFDCB4FA002162A63}, // 1e118
- {0x2867E7FDDCDD9AFA, 0x9E9F11C4014DDA7E}, // 1e119
- {0xB281E1FD541501B8, 0xC646D63501A1511D}, // 1e120
- {0x1F225A7CA91A4226, 0xF7D88BC24209A565}, // 1e121
- {0x3375788DE9B06958, 0x9AE757596946075F}, // 1e122
- {0x0052D6B1641C83AE, 0xC1A12D2FC3978937}, // 1e123
- {0xC0678C5DBD23A49A, 0xF209787BB47D6B84}, // 1e124
- {0xF840B7BA963646E0, 0x9745EB4D50CE6332}, // 1e125
- {0xB650E5A93BC3D898, 0xBD176620A501FBFF}, // 1e126
- {0xA3E51F138AB4CEBE, 0xEC5D3FA8CE427AFF}, // 1e127
- {0xC66F336C36B10137, 0x93BA47C980E98CDF}, // 1e128
- {0xB80B0047445D4184, 0xB8A8D9BBE123F017}, // 1e129
- {0xA60DC059157491E5, 0xE6D3102AD96CEC1D}, // 1e130
- {0x87C89837AD68DB2F, 0x9043EA1AC7E41392}, // 1e131
- {0x29BABE4598C311FB, 0xB454E4A179DD1877}, // 1e132
- {0xF4296DD6FEF3D67A, 0xE16A1DC9D8545E94}, // 1e133
- {0x1899E4A65F58660C, 0x8CE2529E2734BB1D}, // 1e134
- {0x5EC05DCFF72E7F8F, 0xB01AE745B101E9E4}, // 1e135
- {0x76707543F4FA1F73, 0xDC21A1171D42645D}, // 1e136
- {0x6A06494A791C53A8, 0x899504AE72497EBA}, // 1e137
- {0x0487DB9D17636892, 0xABFA45DA0EDBDE69}, // 1e138
- {0x45A9D2845D3C42B6, 0xD6F8D7509292D603}, // 1e139
- {0x0B8A2392BA45A9B2, 0x865B86925B9BC5C2}, // 1e140
- {0x8E6CAC7768D7141E, 0xA7F26836F282B732}, // 1e141
- {0x3207D795430CD926, 0xD1EF0244AF2364FF}, // 1e142
- {0x7F44E6BD49E807B8, 0x8335616AED761F1F}, // 1e143
- {0x5F16206C9C6209A6, 0xA402B9C5A8D3A6E7}, // 1e144
- {0x36DBA887C37A8C0F, 0xCD036837130890A1}, // 1e145
- {0xC2494954DA2C9789, 0x802221226BE55A64}, // 1e146
- {0xF2DB9BAA10B7BD6C, 0xA02AA96B06DEB0FD}, // 1e147
- {0x6F92829494E5ACC7, 0xC83553C5C8965D3D}, // 1e148
- {0xCB772339BA1F17F9, 0xFA42A8B73ABBF48C}, // 1e149
- {0xFF2A760414536EFB, 0x9C69A97284B578D7}, // 1e150
- {0xFEF5138519684ABA, 0xC38413CF25E2D70D}, // 1e151
- {0x7EB258665FC25D69, 0xF46518C2EF5B8CD1}, // 1e152
- {0xEF2F773FFBD97A61, 0x98BF2F79D5993802}, // 1e153
- {0xAAFB550FFACFD8FA, 0xBEEEFB584AFF8603}, // 1e154
- {0x95BA2A53F983CF38, 0xEEAABA2E5DBF6784}, // 1e155
- {0xDD945A747BF26183, 0x952AB45CFA97A0B2}, // 1e156
- {0x94F971119AEEF9E4, 0xBA756174393D88DF}, // 1e157
- {0x7A37CD5601AAB85D, 0xE912B9D1478CEB17}, // 1e158
- {0xAC62E055C10AB33A, 0x91ABB422CCB812EE}, // 1e159
- {0x577B986B314D6009, 0xB616A12B7FE617AA}, // 1e160
- {0xED5A7E85FDA0B80B, 0xE39C49765FDF9D94}, // 1e161
- {0x14588F13BE847307, 0x8E41ADE9FBEBC27D}, // 1e162
- {0x596EB2D8AE258FC8, 0xB1D219647AE6B31C}, // 1e163
- {0x6FCA5F8ED9AEF3BB, 0xDE469FBD99A05FE3}, // 1e164
- {0x25DE7BB9480D5854, 0x8AEC23D680043BEE}, // 1e165
- {0xAF561AA79A10AE6A, 0xADA72CCC20054AE9}, // 1e166
- {0x1B2BA1518094DA04, 0xD910F7FF28069DA4}, // 1e167
- {0x90FB44D2F05D0842, 0x87AA9AFF79042286}, // 1e168
- {0x353A1607AC744A53, 0xA99541BF57452B28}, // 1e169
- {0x42889B8997915CE8, 0xD3FA922F2D1675F2}, // 1e170
- {0x69956135FEBADA11, 0x847C9B5D7C2E09B7}, // 1e171
- {0x43FAB9837E699095, 0xA59BC234DB398C25}, // 1e172
- {0x94F967E45E03F4BB, 0xCF02B2C21207EF2E}, // 1e173
- {0x1D1BE0EEBAC278F5, 0x8161AFB94B44F57D}, // 1e174
- {0x6462D92A69731732, 0xA1BA1BA79E1632DC}, // 1e175
- {0x7D7B8F7503CFDCFE, 0xCA28A291859BBF93}, // 1e176
- {0x5CDA735244C3D43E, 0xFCB2CB35E702AF78}, // 1e177
- {0x3A0888136AFA64A7, 0x9DEFBF01B061ADAB}, // 1e178
- {0x088AAA1845B8FDD0, 0xC56BAEC21C7A1916}, // 1e179
- {0x8AAD549E57273D45, 0xF6C69A72A3989F5B}, // 1e180
- {0x36AC54E2F678864B, 0x9A3C2087A63F6399}, // 1e181
- {0x84576A1BB416A7DD, 0xC0CB28A98FCF3C7F}, // 1e182
- {0x656D44A2A11C51D5, 0xF0FDF2D3F3C30B9F}, // 1e183
- {0x9F644AE5A4B1B325, 0x969EB7C47859E743}, // 1e184
- {0x873D5D9F0DDE1FEE, 0xBC4665B596706114}, // 1e185
- {0xA90CB506D155A7EA, 0xEB57FF22FC0C7959}, // 1e186
- {0x09A7F12442D588F2, 0x9316FF75DD87CBD8}, // 1e187
- {0x0C11ED6D538AEB2F, 0xB7DCBF5354E9BECE}, // 1e188
- {0x8F1668C8A86DA5FA, 0xE5D3EF282A242E81}, // 1e189
- {0xF96E017D694487BC, 0x8FA475791A569D10}, // 1e190
- {0x37C981DCC395A9AC, 0xB38D92D760EC4455}, // 1e191
- {0x85BBE253F47B1417, 0xE070F78D3927556A}, // 1e192
- {0x93956D7478CCEC8E, 0x8C469AB843B89562}, // 1e193
- {0x387AC8D1970027B2, 0xAF58416654A6BABB}, // 1e194
- {0x06997B05FCC0319E, 0xDB2E51BFE9D0696A}, // 1e195
- {0x441FECE3BDF81F03, 0x88FCF317F22241E2}, // 1e196
- {0xD527E81CAD7626C3, 0xAB3C2FDDEEAAD25A}, // 1e197
- {0x8A71E223D8D3B074, 0xD60B3BD56A5586F1}, // 1e198
- {0xF6872D5667844E49, 0x85C7056562757456}, // 1e199
- {0xB428F8AC016561DB, 0xA738C6BEBB12D16C}, // 1e200
- {0xE13336D701BEBA52, 0xD106F86E69D785C7}, // 1e201
- {0xECC0024661173473, 0x82A45B450226B39C}, // 1e202
- {0x27F002D7F95D0190, 0xA34D721642B06084}, // 1e203
- {0x31EC038DF7B441F4, 0xCC20CE9BD35C78A5}, // 1e204
- {0x7E67047175A15271, 0xFF290242C83396CE}, // 1e205
- {0x0F0062C6E984D386, 0x9F79A169BD203E41}, // 1e206
- {0x52C07B78A3E60868, 0xC75809C42C684DD1}, // 1e207
- {0xA7709A56CCDF8A82, 0xF92E0C3537826145}, // 1e208
- {0x88A66076400BB691, 0x9BBCC7A142B17CCB}, // 1e209
- {0x6ACFF893D00EA435, 0xC2ABF989935DDBFE}, // 1e210
- {0x0583F6B8C4124D43, 0xF356F7EBF83552FE}, // 1e211
- {0xC3727A337A8B704A, 0x98165AF37B2153DE}, // 1e212
- {0x744F18C0592E4C5C, 0xBE1BF1B059E9A8D6}, // 1e213
- {0x1162DEF06F79DF73, 0xEDA2EE1C7064130C}, // 1e214
- {0x8ADDCB5645AC2BA8, 0x9485D4D1C63E8BE7}, // 1e215
- {0x6D953E2BD7173692, 0xB9A74A0637CE2EE1}, // 1e216
- {0xC8FA8DB6CCDD0437, 0xE8111C87C5C1BA99}, // 1e217
- {0x1D9C9892400A22A2, 0x910AB1D4DB9914A0}, // 1e218
- {0x2503BEB6D00CAB4B, 0xB54D5E4A127F59C8}, // 1e219
- {0x2E44AE64840FD61D, 0xE2A0B5DC971F303A}, // 1e220
- {0x5CEAECFED289E5D2, 0x8DA471A9DE737E24}, // 1e221
- {0x7425A83E872C5F47, 0xB10D8E1456105DAD}, // 1e222
- {0xD12F124E28F77719, 0xDD50F1996B947518}, // 1e223
- {0x82BD6B70D99AAA6F, 0x8A5296FFE33CC92F}, // 1e224
- {0x636CC64D1001550B, 0xACE73CBFDC0BFB7B}, // 1e225
- {0x3C47F7E05401AA4E, 0xD8210BEFD30EFA5A}, // 1e226
- {0x65ACFAEC34810A71, 0x8714A775E3E95C78}, // 1e227
- {0x7F1839A741A14D0D, 0xA8D9D1535CE3B396}, // 1e228
- {0x1EDE48111209A050, 0xD31045A8341CA07C}, // 1e229
- {0x934AED0AAB460432, 0x83EA2B892091E44D}, // 1e230
- {0xF81DA84D5617853F, 0xA4E4B66B68B65D60}, // 1e231
- {0x36251260AB9D668E, 0xCE1DE40642E3F4B9}, // 1e232
- {0xC1D72B7C6B426019, 0x80D2AE83E9CE78F3}, // 1e233
- {0xB24CF65B8612F81F, 0xA1075A24E4421730}, // 1e234
- {0xDEE033F26797B627, 0xC94930AE1D529CFC}, // 1e235
- {0x169840EF017DA3B1, 0xFB9B7CD9A4A7443C}, // 1e236
- {0x8E1F289560EE864E, 0x9D412E0806E88AA5}, // 1e237
- {0xF1A6F2BAB92A27E2, 0xC491798A08A2AD4E}, // 1e238
- {0xAE10AF696774B1DB, 0xF5B5D7EC8ACB58A2}, // 1e239
- {0xACCA6DA1E0A8EF29, 0x9991A6F3D6BF1765}, // 1e240
- {0x17FD090A58D32AF3, 0xBFF610B0CC6EDD3F}, // 1e241
- {0xDDFC4B4CEF07F5B0, 0xEFF394DCFF8A948E}, // 1e242
- {0x4ABDAF101564F98E, 0x95F83D0A1FB69CD9}, // 1e243
- {0x9D6D1AD41ABE37F1, 0xBB764C4CA7A4440F}, // 1e244
- {0x84C86189216DC5ED, 0xEA53DF5FD18D5513}, // 1e245
- {0x32FD3CF5B4E49BB4, 0x92746B9BE2F8552C}, // 1e246
- {0x3FBC8C33221DC2A1, 0xB7118682DBB66A77}, // 1e247
- {0x0FABAF3FEAA5334A, 0xE4D5E82392A40515}, // 1e248
- {0x29CB4D87F2A7400E, 0x8F05B1163BA6832D}, // 1e249
- {0x743E20E9EF511012, 0xB2C71D5BCA9023F8}, // 1e250
- {0x914DA9246B255416, 0xDF78E4B2BD342CF6}, // 1e251
- {0x1AD089B6C2F7548E, 0x8BAB8EEFB6409C1A}, // 1e252
- {0xA184AC2473B529B1, 0xAE9672ABA3D0C320}, // 1e253
- {0xC9E5D72D90A2741E, 0xDA3C0F568CC4F3E8}, // 1e254
- {0x7E2FA67C7A658892, 0x8865899617FB1871}, // 1e255
- {0xDDBB901B98FEEAB7, 0xAA7EEBFB9DF9DE8D}, // 1e256
- {0x552A74227F3EA565, 0xD51EA6FA85785631}, // 1e257
- {0xD53A88958F87275F, 0x8533285C936B35DE}, // 1e258
- {0x8A892ABAF368F137, 0xA67FF273B8460356}, // 1e259
- {0x2D2B7569B0432D85, 0xD01FEF10A657842C}, // 1e260
- {0x9C3B29620E29FC73, 0x8213F56A67F6B29B}, // 1e261
- {0x8349F3BA91B47B8F, 0xA298F2C501F45F42}, // 1e262
- {0x241C70A936219A73, 0xCB3F2F7642717713}, // 1e263
- {0xED238CD383AA0110, 0xFE0EFB53D30DD4D7}, // 1e264
- {0xF4363804324A40AA, 0x9EC95D1463E8A506}, // 1e265
- {0xB143C6053EDCD0D5, 0xC67BB4597CE2CE48}, // 1e266
- {0xDD94B7868E94050A, 0xF81AA16FDC1B81DA}, // 1e267
- {0xCA7CF2B4191C8326, 0x9B10A4E5E9913128}, // 1e268
- {0xFD1C2F611F63A3F0, 0xC1D4CE1F63F57D72}, // 1e269
- {0xBC633B39673C8CEC, 0xF24A01A73CF2DCCF}, // 1e270
- {0xD5BE0503E085D813, 0x976E41088617CA01}, // 1e271
- {0x4B2D8644D8A74E18, 0xBD49D14AA79DBC82}, // 1e272
- {0xDDF8E7D60ED1219E, 0xEC9C459D51852BA2}, // 1e273
- {0xCABB90E5C942B503, 0x93E1AB8252F33B45}, // 1e274
- {0x3D6A751F3B936243, 0xB8DA1662E7B00A17}, // 1e275
- {0x0CC512670A783AD4, 0xE7109BFBA19C0C9D}, // 1e276
- {0x27FB2B80668B24C5, 0x906A617D450187E2}, // 1e277
- {0xB1F9F660802DEDF6, 0xB484F9DC9641E9DA}, // 1e278
- {0x5E7873F8A0396973, 0xE1A63853BBD26451}, // 1e279
- {0xDB0B487B6423E1E8, 0x8D07E33455637EB2}, // 1e280
- {0x91CE1A9A3D2CDA62, 0xB049DC016ABC5E5F}, // 1e281
- {0x7641A140CC7810FB, 0xDC5C5301C56B75F7}, // 1e282
- {0xA9E904C87FCB0A9D, 0x89B9B3E11B6329BA}, // 1e283
- {0x546345FA9FBDCD44, 0xAC2820D9623BF429}, // 1e284
- {0xA97C177947AD4095, 0xD732290FBACAF133}, // 1e285
- {0x49ED8EABCCCC485D, 0x867F59A9D4BED6C0}, // 1e286
- {0x5C68F256BFFF5A74, 0xA81F301449EE8C70}, // 1e287
- {0x73832EEC6FFF3111, 0xD226FC195C6A2F8C}, // 1e288
- {0xC831FD53C5FF7EAB, 0x83585D8FD9C25DB7}, // 1e289
- {0xBA3E7CA8B77F5E55, 0xA42E74F3D032F525}, // 1e290
- {0x28CE1BD2E55F35EB, 0xCD3A1230C43FB26F}, // 1e291
- {0x7980D163CF5B81B3, 0x80444B5E7AA7CF85}, // 1e292
- {0xD7E105BCC332621F, 0xA0555E361951C366}, // 1e293
- {0x8DD9472BF3FEFAA7, 0xC86AB5C39FA63440}, // 1e294
- {0xB14F98F6F0FEB951, 0xFA856334878FC150}, // 1e295
- {0x6ED1BF9A569F33D3, 0x9C935E00D4B9D8D2}, // 1e296
- {0x0A862F80EC4700C8, 0xC3B8358109E84F07}, // 1e297
- {0xCD27BB612758C0FA, 0xF4A642E14C6262C8}, // 1e298
- {0x8038D51CB897789C, 0x98E7E9CCCFBD7DBD}, // 1e299
- {0xE0470A63E6BD56C3, 0xBF21E44003ACDD2C}, // 1e300
- {0x1858CCFCE06CAC74, 0xEEEA5D5004981478}, // 1e301
- {0x0F37801E0C43EBC8, 0x95527A5202DF0CCB}, // 1e302
- {0xD30560258F54E6BA, 0xBAA718E68396CFFD}, // 1e303
- {0x47C6B82EF32A2069, 0xE950DF20247C83FD}, // 1e304
- {0x4CDC331D57FA5441, 0x91D28B7416CDD27E}, // 1e305
- {0xE0133FE4ADF8E952, 0xB6472E511C81471D}, // 1e306
- {0x58180FDDD97723A6, 0xE3D8F9E563A198E5}, // 1e307
- {0x570F09EAA7EA7648, 0x8E679C2F5E44FF8F}, // 1e308
- {0x2CD2CC6551E513DA, 0xB201833B35D63F73}, // 1e309
- {0xF8077F7EA65E58D1, 0xDE81E40A034BCF4F}, // 1e310
- {0xFB04AFAF27FAF782, 0x8B112E86420F6191}, // 1e311
- {0x79C5DB9AF1F9B563, 0xADD57A27D29339F6}, // 1e312
- {0x18375281AE7822BC, 0xD94AD8B1C7380874}, // 1e313
- {0x8F2293910D0B15B5, 0x87CEC76F1C830548}, // 1e314
- {0xB2EB3875504DDB22, 0xA9C2794AE3A3C69A}, // 1e315
- {0x5FA60692A46151EB, 0xD433179D9C8CB841}, // 1e316
- {0xDBC7C41BA6BCD333, 0x849FEEC281D7F328}, // 1e317
- {0x12B9B522906C0800, 0xA5C7EA73224DEFF3}, // 1e318
- {0xD768226B34870A00, 0xCF39E50FEAE16BEF}, // 1e319
- {0xE6A1158300D46640, 0x81842F29F2CCE375}, // 1e320
- {0x60495AE3C1097FD0, 0xA1E53AF46F801C53}, // 1e321
- {0x385BB19CB14BDFC4, 0xCA5E89B18B602368}, // 1e322
- {0x46729E03DD9ED7B5, 0xFCF62C1DEE382C42}, // 1e323
- {0x6C07A2C26A8346D1, 0x9E19DB92B4E31BA9}, // 1e324
- {0xC7098B7305241885, 0xC5A05277621BE293}, // 1e325
- {0xB8CBEE4FC66D1EA7, 0xF70867153AA2DB38}, // 1e326
- {0x737F74F1DC043328, 0x9A65406D44A5C903}, // 1e327
- {0x505F522E53053FF2, 0xC0FE908895CF3B44}, // 1e328
- {0x647726B9E7C68FEF, 0xF13E34AABB430A15}, // 1e329
- {0x5ECA783430DC19F5, 0x96C6E0EAB509E64D}, // 1e330
- {0xB67D16413D132072, 0xBC789925624C5FE0}, // 1e331
- {0xE41C5BD18C57E88F, 0xEB96BF6EBADF77D8}, // 1e332
- {0x8E91B962F7B6F159, 0x933E37A534CBAAE7}, // 1e333
- {0x723627BBB5A4ADB0, 0xB80DC58E81FE95A1}, // 1e334
- {0xCEC3B1AAA30DD91C, 0xE61136F2227E3B09}, // 1e335
- {0x213A4F0AA5E8A7B1, 0x8FCAC257558EE4E6}, // 1e336
- {0xA988E2CD4F62D19D, 0xB3BD72ED2AF29E1F}, // 1e337
- {0x93EB1B80A33B8605, 0xE0ACCFA875AF45A7}, // 1e338
- {0xBC72F130660533C3, 0x8C6C01C9498D8B88}, // 1e339
- {0xEB8FAD7C7F8680B4, 0xAF87023B9BF0EE6A}, // 1e340
- {0xA67398DB9F6820E1, 0xDB68C2CA82ED2A05}, // 1e341
- {0x88083F8943A1148C, 0x892179BE91D43A43}, // 1e342
- {0x6A0A4F6B948959B0, 0xAB69D82E364948D4}, // 1e343
- {0x848CE34679ABB01C, 0xD6444E39C3DB9B09}, // 1e344
- {0xF2D80E0C0C0B4E11, 0x85EAB0E41A6940E5}, // 1e345
- {0x6F8E118F0F0E2195, 0xA7655D1D2103911F}, // 1e346
- {0x4B7195F2D2D1A9FB, 0xD13EB46469447567}, // 1e347
-}
diff --git a/contrib/go/_std_1.18/src/strconv/ftoa.go b/contrib/go/_std_1.18/src/strconv/ftoa.go
deleted file mode 100644
index eca04b851c..0000000000
--- a/contrib/go/_std_1.18/src/strconv/ftoa.go
+++ /dev/null
@@ -1,582 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary to decimal floating point conversion.
-// Algorithm:
-// 1) store mantissa in multiprecision decimal
-// 2) shift decimal by exponent
-// 3) read digits out & format
-
-package strconv
-
-import "math"
-
-// TODO: move elsewhere?
-type floatInfo struct {
- mantbits uint
- expbits uint
- bias int
-}
-
-var float32info = floatInfo{23, 8, -127}
-var float64info = floatInfo{52, 11, -1023}
-
-// FormatFloat converts the floating-point number f to a string,
-// according to the format fmt and precision prec. It rounds the
-// result assuming that the original was obtained from a floating-point
-// value of bitSize bits (32 for float32, 64 for float64).
-//
-// The format fmt is one of
-// 'b' (-ddddp±ddd, a binary exponent),
-// 'e' (-d.dddde±dd, a decimal exponent),
-// 'E' (-d.ddddE±dd, a decimal exponent),
-// 'f' (-ddd.dddd, no exponent),
-// 'g' ('e' for large exponents, 'f' otherwise),
-// 'G' ('E' for large exponents, 'f' otherwise),
-// 'x' (-0xd.ddddp±ddd, a hexadecimal fraction and binary exponent), or
-// 'X' (-0Xd.ddddP±ddd, a hexadecimal fraction and binary exponent).
-//
-// The precision prec controls the number of digits (excluding the exponent)
-// printed by the 'e', 'E', 'f', 'g', 'G', 'x', and 'X' formats.
-// For 'e', 'E', 'f', 'x', and 'X', it is the number of digits after the decimal point.
-// For 'g' and 'G' it is the maximum number of significant digits (trailing
-// zeros are removed).
-// The special precision -1 uses the smallest number of digits
-// necessary such that ParseFloat will return f exactly.
-func FormatFloat(f float64, fmt byte, prec, bitSize int) string {
- return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize))
-}
-
-// AppendFloat appends the string form of the floating-point number f,
-// as generated by FormatFloat, to dst and returns the extended buffer.
-func AppendFloat(dst []byte, f float64, fmt byte, prec, bitSize int) []byte {
- return genericFtoa(dst, f, fmt, prec, bitSize)
-}
-
-func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte {
- var bits uint64
- var flt *floatInfo
- switch bitSize {
- case 32:
- bits = uint64(math.Float32bits(float32(val)))
- flt = &float32info
- case 64:
- bits = math.Float64bits(val)
- flt = &float64info
- default:
- panic("strconv: illegal AppendFloat/FormatFloat bitSize")
- }
-
- neg := bits>>(flt.expbits+flt.mantbits) != 0
- exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
- mant := bits & (uint64(1)<<flt.mantbits - 1)
-
- switch exp {
- case 1<<flt.expbits - 1:
- // Inf, NaN
- var s string
- switch {
- case mant != 0:
- s = "NaN"
- case neg:
- s = "-Inf"
- default:
- s = "+Inf"
- }
- return append(dst, s...)
-
- case 0:
- // denormalized
- exp++
-
- default:
- // add implicit top bit
- mant |= uint64(1) << flt.mantbits
- }
- exp += flt.bias
-
- // Pick off easy binary, hex formats.
- if fmt == 'b' {
- return fmtB(dst, neg, mant, exp, flt)
- }
- if fmt == 'x' || fmt == 'X' {
- return fmtX(dst, prec, fmt, neg, mant, exp, flt)
- }
-
- if !optimize {
- return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
- }
-
- var digs decimalSlice
- ok := false
- // Negative precision means "only as much as needed to be exact."
- shortest := prec < 0
- if shortest {
- // Use Ryu algorithm.
- var buf [32]byte
- digs.d = buf[:]
- ryuFtoaShortest(&digs, mant, exp-int(flt.mantbits), flt)
- ok = true
- // Precision for shortest representation mode.
- switch fmt {
- case 'e', 'E':
- prec = max(digs.nd-1, 0)
- case 'f':
- prec = max(digs.nd-digs.dp, 0)
- case 'g', 'G':
- prec = digs.nd
- }
- } else if fmt != 'f' {
- // Fixed number of digits.
- digits := prec
- switch fmt {
- case 'e', 'E':
- digits++
- case 'g', 'G':
- if prec == 0 {
- prec = 1
- }
- digits = prec
- }
- var buf [24]byte
- if bitSize == 32 && digits <= 9 {
- digs.d = buf[:]
- ryuFtoaFixed32(&digs, uint32(mant), exp-int(flt.mantbits), digits)
- ok = true
- } else if digits <= 18 {
- digs.d = buf[:]
- ryuFtoaFixed64(&digs, mant, exp-int(flt.mantbits), digits)
- ok = true
- }
- }
- if !ok {
- return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
- }
- return formatDigits(dst, shortest, neg, digs, prec, fmt)
-}
-
-// bigFtoa uses multiprecision computations to format a float.
-func bigFtoa(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
- d := new(decimal)
- d.Assign(mant)
- d.Shift(exp - int(flt.mantbits))
- var digs decimalSlice
- shortest := prec < 0
- if shortest {
- roundShortest(d, mant, exp, flt)
- digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
- // Precision for shortest representation mode.
- switch fmt {
- case 'e', 'E':
- prec = digs.nd - 1
- case 'f':
- prec = max(digs.nd-digs.dp, 0)
- case 'g', 'G':
- prec = digs.nd
- }
- } else {
- // Round appropriately.
- switch fmt {
- case 'e', 'E':
- d.Round(prec + 1)
- case 'f':
- d.Round(d.dp + prec)
- case 'g', 'G':
- if prec == 0 {
- prec = 1
- }
- d.Round(prec)
- }
- digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
- }
- return formatDigits(dst, shortest, neg, digs, prec, fmt)
-}
-
-func formatDigits(dst []byte, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) []byte {
- switch fmt {
- case 'e', 'E':
- return fmtE(dst, neg, digs, prec, fmt)
- case 'f':
- return fmtF(dst, neg, digs, prec)
- case 'g', 'G':
- // trailing fractional zeros in 'e' form will be trimmed.
- eprec := prec
- if eprec > digs.nd && digs.nd >= digs.dp {
- eprec = digs.nd
- }
- // %e is used if the exponent from the conversion
- // is less than -4 or greater than or equal to the precision.
- // if precision was the shortest possible, use precision 6 for this decision.
- if shortest {
- eprec = 6
- }
- exp := digs.dp - 1
- if exp < -4 || exp >= eprec {
- if prec > digs.nd {
- prec = digs.nd
- }
- return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
- }
- if prec > digs.dp {
- prec = digs.nd
- }
- return fmtF(dst, neg, digs, max(prec-digs.dp, 0))
- }
-
- // unknown format
- return append(dst, '%', fmt)
-}
-
-// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits
-// that will let the original floating point value be precisely reconstructed.
-func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
- // If mantissa is zero, the number is zero; stop now.
- if mant == 0 {
- d.nd = 0
- return
- }
-
- // Compute upper and lower such that any decimal number
- // between upper and lower (possibly inclusive)
- // will round to the original floating point number.
-
- // We may see at once that the number is already shortest.
- //
- // Suppose d is not denormal, so that 2^exp <= d < 10^dp.
- // The closest shorter number is at least 10^(dp-nd) away.
- // The lower/upper bounds computed below are at distance
- // at most 2^(exp-mantbits).
- //
- // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
- // or equivalently log2(10)*(dp-nd) > exp-mantbits.
- // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
- minexp := flt.bias + 1 // minimum possible exponent
- if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
- // The number is already shortest.
- return
- }
-
- // d = mant << (exp - mantbits)
- // Next highest floating point number is mant+1 << exp-mantbits.
- // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
- upper := new(decimal)
- upper.Assign(mant*2 + 1)
- upper.Shift(exp - int(flt.mantbits) - 1)
-
- // d = mant << (exp - mantbits)
- // Next lowest floating point number is mant-1 << exp-mantbits,
- // unless mant-1 drops the significant bit and exp is not the minimum exp,
- // in which case the next lowest is mant*2-1 << exp-mantbits-1.
- // Either way, call it mantlo << explo-mantbits.
- // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
- var mantlo uint64
- var explo int
- if mant > 1<<flt.mantbits || exp == minexp {
- mantlo = mant - 1
- explo = exp
- } else {
- mantlo = mant*2 - 1
- explo = exp - 1
- }
- lower := new(decimal)
- lower.Assign(mantlo*2 + 1)
- lower.Shift(explo - int(flt.mantbits) - 1)
-
- // The upper and lower bounds are possible outputs only if
- // the original mantissa is even, so that IEEE round-to-even
- // would round to the original mantissa and not the neighbors.
- inclusive := mant%2 == 0
-
- // As we walk the digits we want to know whether rounding up would fall
- // within the upper bound. This is tracked by upperdelta:
- //
- // If upperdelta == 0, the digits of d and upper are the same so far.
- //
- // If upperdelta == 1, we saw a difference of 1 between d and upper on a
- // previous digit and subsequently only 9s for d and 0s for upper.
- // (Thus rounding up may fall outside the bound, if it is exclusive.)
- //
- // If upperdelta == 2, then the difference is greater than 1
- // and we know that rounding up falls within the bound.
- var upperdelta uint8
-
- // Now we can figure out the minimum number of digits required.
- // Walk along until d has distinguished itself from upper and lower.
- for ui := 0; ; ui++ {
- // lower, d, and upper may have the decimal points at different
- // places. In this case upper is the longest, so we iterate from
- // ui==0 and start li and mi at (possibly) -1.
- mi := ui - upper.dp + d.dp
- if mi >= d.nd {
- break
- }
- li := ui - upper.dp + lower.dp
- l := byte('0') // lower digit
- if li >= 0 && li < lower.nd {
- l = lower.d[li]
- }
- m := byte('0') // middle digit
- if mi >= 0 {
- m = d.d[mi]
- }
- u := byte('0') // upper digit
- if ui < upper.nd {
- u = upper.d[ui]
- }
-
- // Okay to round down (truncate) if lower has a different digit
- // or if lower is inclusive and is exactly the result of rounding
- // down (i.e., and we have reached the final digit of lower).
- okdown := l != m || inclusive && li+1 == lower.nd
-
- switch {
- case upperdelta == 0 && m+1 < u:
- // Example:
- // m = 12345xxx
- // u = 12347xxx
- upperdelta = 2
- case upperdelta == 0 && m != u:
- // Example:
- // m = 12345xxx
- // u = 12346xxx
- upperdelta = 1
- case upperdelta == 1 && (m != '9' || u != '0'):
- // Example:
- // m = 1234598x
- // u = 1234600x
- upperdelta = 2
- }
- // Okay to round up if upper has a different digit and either upper
- // is inclusive or upper is bigger than the result of rounding up.
- okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd)
-
- // If it's okay to do either, then round to the nearest one.
- // If it's okay to do only one, do it.
- switch {
- case okdown && okup:
- d.Round(mi + 1)
- return
- case okdown:
- d.RoundDown(mi + 1)
- return
- case okup:
- d.RoundUp(mi + 1)
- return
- }
- }
-}
-
-type decimalSlice struct {
- d []byte
- nd, dp int
- neg bool
-}
-
-// %e: -d.ddddde±dd
-func fmtE(dst []byte, neg bool, d decimalSlice, prec int, fmt byte) []byte {
- // sign
- if neg {
- dst = append(dst, '-')
- }
-
- // first digit
- ch := byte('0')
- if d.nd != 0 {
- ch = d.d[0]
- }
- dst = append(dst, ch)
-
- // .moredigits
- if prec > 0 {
- dst = append(dst, '.')
- i := 1
- m := min(d.nd, prec+1)
- if i < m {
- dst = append(dst, d.d[i:m]...)
- i = m
- }
- for ; i <= prec; i++ {
- dst = append(dst, '0')
- }
- }
-
- // e±
- dst = append(dst, fmt)
- exp := d.dp - 1
- if d.nd == 0 { // special case: 0 has exponent 0
- exp = 0
- }
- if exp < 0 {
- ch = '-'
- exp = -exp
- } else {
- ch = '+'
- }
- dst = append(dst, ch)
-
- // dd or ddd
- switch {
- case exp < 10:
- dst = append(dst, '0', byte(exp)+'0')
- case exp < 100:
- dst = append(dst, byte(exp/10)+'0', byte(exp%10)+'0')
- default:
- dst = append(dst, byte(exp/100)+'0', byte(exp/10)%10+'0', byte(exp%10)+'0')
- }
-
- return dst
-}
-
-// %f: -ddddddd.ddddd
-func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte {
- // sign
- if neg {
- dst = append(dst, '-')
- }
-
- // integer, padded with zeros as needed.
- if d.dp > 0 {
- m := min(d.nd, d.dp)
- dst = append(dst, d.d[:m]...)
- for ; m < d.dp; m++ {
- dst = append(dst, '0')
- }
- } else {
- dst = append(dst, '0')
- }
-
- // fraction
- if prec > 0 {
- dst = append(dst, '.')
- for i := 0; i < prec; i++ {
- ch := byte('0')
- if j := d.dp + i; 0 <= j && j < d.nd {
- ch = d.d[j]
- }
- dst = append(dst, ch)
- }
- }
-
- return dst
-}
-
-// %b: -ddddddddp±ddd
-func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
- // sign
- if neg {
- dst = append(dst, '-')
- }
-
- // mantissa
- dst, _ = formatBits(dst, mant, 10, false, true)
-
- // p
- dst = append(dst, 'p')
-
- // ±exponent
- exp -= int(flt.mantbits)
- if exp >= 0 {
- dst = append(dst, '+')
- }
- dst, _ = formatBits(dst, uint64(exp), 10, exp < 0, true)
-
- return dst
-}
-
-// %x: -0x1.yyyyyyyyp±ddd or -0x0p+0. (y is hex digit, d is decimal digit)
-func fmtX(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
- if mant == 0 {
- exp = 0
- }
-
- // Shift digits so leading 1 (if any) is at bit 1<<60.
- mant <<= 60 - flt.mantbits
- for mant != 0 && mant&(1<<60) == 0 {
- mant <<= 1
- exp--
- }
-
- // Round if requested.
- if prec >= 0 && prec < 15 {
- shift := uint(prec * 4)
- extra := (mant << shift) & (1<<60 - 1)
- mant >>= 60 - shift
- if extra|(mant&1) > 1<<59 {
- mant++
- }
- mant <<= 60 - shift
- if mant&(1<<61) != 0 {
- // Wrapped around.
- mant >>= 1
- exp++
- }
- }
-
- hex := lowerhex
- if fmt == 'X' {
- hex = upperhex
- }
-
- // sign, 0x, leading digit
- if neg {
- dst = append(dst, '-')
- }
- dst = append(dst, '0', fmt, '0'+byte((mant>>60)&1))
-
- // .fraction
- mant <<= 4 // remove leading 0 or 1
- if prec < 0 && mant != 0 {
- dst = append(dst, '.')
- for mant != 0 {
- dst = append(dst, hex[(mant>>60)&15])
- mant <<= 4
- }
- } else if prec > 0 {
- dst = append(dst, '.')
- for i := 0; i < prec; i++ {
- dst = append(dst, hex[(mant>>60)&15])
- mant <<= 4
- }
- }
-
- // p±
- ch := byte('P')
- if fmt == lower(fmt) {
- ch = 'p'
- }
- dst = append(dst, ch)
- if exp < 0 {
- ch = '-'
- exp = -exp
- } else {
- ch = '+'
- }
- dst = append(dst, ch)
-
- // dd or ddd or dddd
- switch {
- case exp < 100:
- dst = append(dst, byte(exp/10)+'0', byte(exp%10)+'0')
- case exp < 1000:
- dst = append(dst, byte(exp/100)+'0', byte((exp/10)%10)+'0', byte(exp%10)+'0')
- default:
- dst = append(dst, byte(exp/1000)+'0', byte(exp/100)%10+'0', byte((exp/10)%10)+'0', byte(exp%10)+'0')
- }
-
- return dst
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
diff --git a/contrib/go/_std_1.18/src/strconv/ftoaryu.go b/contrib/go/_std_1.18/src/strconv/ftoaryu.go
deleted file mode 100644
index f2e74bed17..0000000000
--- a/contrib/go/_std_1.18/src/strconv/ftoaryu.go
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strconv
-
-import (
- "math/bits"
-)
-
-// binary to decimal conversion using the Ryū algorithm.
-//
-// See Ulf Adams, "Ryū: Fast Float-to-String Conversion" (doi:10.1145/3192366.3192369)
-//
-// Fixed precision formatting is a variant of the original paper's
-// algorithm, where a single multiplication by 10^k is required,
-// sharing the same rounding guarantees.
-
-// ryuFtoaFixed32 formats mant*(2^exp) with prec decimal digits.
-func ryuFtoaFixed32(d *decimalSlice, mant uint32, exp int, prec int) {
- if prec < 0 {
- panic("ryuFtoaFixed32 called with negative prec")
- }
- if prec > 9 {
- panic("ryuFtoaFixed32 called with prec > 9")
- }
- // Zero input.
- if mant == 0 {
- d.nd, d.dp = 0, 0
- return
- }
- // Renormalize to a 25-bit mantissa.
- e2 := exp
- if b := bits.Len32(mant); b < 25 {
- mant <<= uint(25 - b)
- e2 += int(b) - 25
- }
- // Choose an exponent such that rounded mant*(2^e2)*(10^q) has
- // at least prec decimal digits, i.e
- // mant*(2^e2)*(10^q) >= 10^(prec-1)
- // Because mant >= 2^24, it is enough to choose:
- // 2^(e2+24) >= 10^(-q+prec-1)
- // or q = -mulByLog2Log10(e2+24) + prec - 1
- q := -mulByLog2Log10(e2+24) + prec - 1
-
- // Now compute mant*(2^e2)*(10^q).
- // Is it an exact computation?
- // Only small positive powers of 10 are exact (5^28 has 66 bits).
- exact := q <= 27 && q >= 0
-
- di, dexp2, d0 := mult64bitPow10(mant, e2, q)
- if dexp2 >= 0 {
- panic("not enough significant bits after mult64bitPow10")
- }
- // As a special case, computation might still be exact, if exponent
- // was negative and if it amounts to computing an exact division.
- // In that case, we ignore all lower bits.
- // Note that division by 10^11 cannot be exact as 5^11 has 26 bits.
- if q < 0 && q >= -10 && divisibleByPower5(uint64(mant), -q) {
- exact = true
- d0 = true
- }
- // Remove extra lower bits and keep rounding info.
- extra := uint(-dexp2)
- extraMask := uint32(1<<extra - 1)
-
- di, dfrac := di>>extra, di&extraMask
- roundUp := false
- if exact {
- // If we computed an exact product, d + 1/2
- // should round to d+1 if 'd' is odd.
- roundUp = dfrac > 1<<(extra-1) ||
- (dfrac == 1<<(extra-1) && !d0) ||
- (dfrac == 1<<(extra-1) && d0 && di&1 == 1)
- } else {
- // otherwise, d+1/2 always rounds up because
- // we truncated below.
- roundUp = dfrac>>(extra-1) == 1
- }
- if dfrac != 0 {
- d0 = false
- }
- // Proceed to the requested number of digits
- formatDecimal(d, uint64(di), !d0, roundUp, prec)
- // Adjust exponent
- d.dp -= q
-}
-
-// ryuFtoaFixed64 formats mant*(2^exp) with prec decimal digits.
-func ryuFtoaFixed64(d *decimalSlice, mant uint64, exp int, prec int) {
- if prec > 18 {
- panic("ryuFtoaFixed64 called with prec > 18")
- }
- // Zero input.
- if mant == 0 {
- d.nd, d.dp = 0, 0
- return
- }
- // Renormalize to a 55-bit mantissa.
- e2 := exp
- if b := bits.Len64(mant); b < 55 {
- mant = mant << uint(55-b)
- e2 += int(b) - 55
- }
- // Choose an exponent such that rounded mant*(2^e2)*(10^q) has
- // at least prec decimal digits, i.e
- // mant*(2^e2)*(10^q) >= 10^(prec-1)
- // Because mant >= 2^54, it is enough to choose:
- // 2^(e2+54) >= 10^(-q+prec-1)
- // or q = -mulByLog2Log10(e2+54) + prec - 1
- //
- // The minimal required exponent is -mulByLog2Log10(1025)+18 = -291
- // The maximal required exponent is mulByLog2Log10(1074)+18 = 342
- q := -mulByLog2Log10(e2+54) + prec - 1
-
- // Now compute mant*(2^e2)*(10^q).
- // Is it an exact computation?
- // Only small positive powers of 10 are exact (5^55 has 128 bits).
- exact := q <= 55 && q >= 0
-
- di, dexp2, d0 := mult128bitPow10(mant, e2, q)
- if dexp2 >= 0 {
- panic("not enough significant bits after mult128bitPow10")
- }
- // As a special case, computation might still be exact, if exponent
- // was negative and if it amounts to computing an exact division.
- // In that case, we ignore all lower bits.
- // Note that division by 10^23 cannot be exact as 5^23 has 54 bits.
- if q < 0 && q >= -22 && divisibleByPower5(mant, -q) {
- exact = true
- d0 = true
- }
- // Remove extra lower bits and keep rounding info.
- extra := uint(-dexp2)
- extraMask := uint64(1<<extra - 1)
-
- di, dfrac := di>>extra, di&extraMask
- roundUp := false
- if exact {
- // If we computed an exact product, d + 1/2
- // should round to d+1 if 'd' is odd.
- roundUp = dfrac > 1<<(extra-1) ||
- (dfrac == 1<<(extra-1) && !d0) ||
- (dfrac == 1<<(extra-1) && d0 && di&1 == 1)
- } else {
- // otherwise, d+1/2 always rounds up because
- // we truncated below.
- roundUp = dfrac>>(extra-1) == 1
- }
- if dfrac != 0 {
- d0 = false
- }
- // Proceed to the requested number of digits
- formatDecimal(d, di, !d0, roundUp, prec)
- // Adjust exponent
- d.dp -= q
-}
-
-var uint64pow10 = [...]uint64{
- 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
- 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
-}
-
-// formatDecimal fills d with at most prec decimal digits
-// of mantissa m. The boolean trunc indicates whether m
-// is truncated compared to the original number being formatted.
-func formatDecimal(d *decimalSlice, m uint64, trunc bool, roundUp bool, prec int) {
- max := uint64pow10[prec]
- trimmed := 0
- for m >= max {
- a, b := m/10, m%10
- m = a
- trimmed++
- if b > 5 {
- roundUp = true
- } else if b < 5 {
- roundUp = false
- } else { // b == 5
- // round up if there are trailing digits,
- // or if the new value of m is odd (round-to-even convention)
- roundUp = trunc || m&1 == 1
- }
- if b != 0 {
- trunc = true
- }
- }
- if roundUp {
- m++
- }
- if m >= max {
- // Happens if di was originally 99999....xx
- m /= 10
- trimmed++
- }
- // render digits (similar to formatBits)
- n := uint(prec)
- d.nd = int(prec)
- v := m
- for v >= 100 {
- var v1, v2 uint64
- if v>>32 == 0 {
- v1, v2 = uint64(uint32(v)/100), uint64(uint32(v)%100)
- } else {
- v1, v2 = v/100, v%100
- }
- n -= 2
- d.d[n+1] = smallsString[2*v2+1]
- d.d[n+0] = smallsString[2*v2+0]
- v = v1
- }
- if v > 0 {
- n--
- d.d[n] = smallsString[2*v+1]
- }
- if v >= 10 {
- n--
- d.d[n] = smallsString[2*v]
- }
- for d.d[d.nd-1] == '0' {
- d.nd--
- trimmed++
- }
- d.dp = d.nd + trimmed
-}
-
-// ryuFtoaShortest formats mant*2^exp with prec decimal digits.
-func ryuFtoaShortest(d *decimalSlice, mant uint64, exp int, flt *floatInfo) {
- if mant == 0 {
- d.nd, d.dp = 0, 0
- return
- }
- // If input is an exact integer with fewer bits than the mantissa,
- // the previous and next integer are not admissible representations.
- if exp <= 0 && bits.TrailingZeros64(mant) >= -exp {
- mant >>= uint(-exp)
- ryuDigits(d, mant, mant, mant, true, false)
- return
- }
- ml, mc, mu, e2 := computeBounds(mant, exp, flt)
- if e2 == 0 {
- ryuDigits(d, ml, mc, mu, true, false)
- return
- }
- // Find 10^q *larger* than 2^-e2
- q := mulByLog2Log10(-e2) + 1
-
- // We are going to multiply by 10^q using 128-bit arithmetic.
- // The exponent is the same for all 3 numbers.
- var dl, dc, du uint64
- var dl0, dc0, du0 bool
- if flt == &float32info {
- var dl32, dc32, du32 uint32
- dl32, _, dl0 = mult64bitPow10(uint32(ml), e2, q)
- dc32, _, dc0 = mult64bitPow10(uint32(mc), e2, q)
- du32, e2, du0 = mult64bitPow10(uint32(mu), e2, q)
- dl, dc, du = uint64(dl32), uint64(dc32), uint64(du32)
- } else {
- dl, _, dl0 = mult128bitPow10(ml, e2, q)
- dc, _, dc0 = mult128bitPow10(mc, e2, q)
- du, e2, du0 = mult128bitPow10(mu, e2, q)
- }
- if e2 >= 0 {
- panic("not enough significant bits after mult128bitPow10")
- }
- // Is it an exact computation?
- if q > 55 {
- // Large positive powers of ten are not exact
- dl0, dc0, du0 = false, false, false
- }
- if q < 0 && q >= -24 {
- // Division by a power of ten may be exact.
- // (note that 5^25 is a 59-bit number so division by 5^25 is never exact).
- if divisibleByPower5(ml, -q) {
- dl0 = true
- }
- if divisibleByPower5(mc, -q) {
- dc0 = true
- }
- if divisibleByPower5(mu, -q) {
- du0 = true
- }
- }
- // Express the results (dl, dc, du)*2^e2 as integers.
- // Extra bits must be removed and rounding hints computed.
- extra := uint(-e2)
- extraMask := uint64(1<<extra - 1)
- // Now compute the floored, integral base 10 mantissas.
- dl, fracl := dl>>extra, dl&extraMask
- dc, fracc := dc>>extra, dc&extraMask
- du, fracu := du>>extra, du&extraMask
- // Is it allowed to use 'du' as a result?
- // It is always allowed when it is truncated, but also
- // if it is exact and the original binary mantissa is even
- // When disallowed, we can subtract 1.
- uok := !du0 || fracu > 0
- if du0 && fracu == 0 {
- uok = mant&1 == 0
- }
- if !uok {
- du--
- }
- // Is 'dc' the correctly rounded base 10 mantissa?
- // The correct rounding might be dc+1
- cup := false // don't round up.
- if dc0 {
- // If we computed an exact product, the half integer
- // should round to next (even) integer if 'dc' is odd.
- cup = fracc > 1<<(extra-1) ||
- (fracc == 1<<(extra-1) && dc&1 == 1)
- } else {
- // otherwise, the result is a lower truncation of the ideal
- // result.
- cup = fracc>>(extra-1) == 1
- }
- // Is 'dl' an allowed representation?
- // Only if it is an exact value, and if the original binary mantissa
- // was even.
- lok := dl0 && fracl == 0 && (mant&1 == 0)
- if !lok {
- dl++
- }
- // We need to remember whether the trimmed digits of 'dc' are zero.
- c0 := dc0 && fracc == 0
- // render digits
- ryuDigits(d, dl, dc, du, c0, cup)
- d.dp -= q
-}
-
-// mulByLog2Log10 returns math.Floor(x * log(2)/log(10)) for an integer x in
-// the range -1600 <= x && x <= +1600.
-//
-// The range restriction lets us work in faster integer arithmetic instead of
-// slower floating point arithmetic. Correctness is verified by unit tests.
-func mulByLog2Log10(x int) int {
- // log(2)/log(10) ≈ 0.30102999566 ≈ 78913 / 2^18
- return (x * 78913) >> 18
-}
-
-// mulByLog10Log2 returns math.Floor(x * log(10)/log(2)) for an integer x in
-// the range -500 <= x && x <= +500.
-//
-// The range restriction lets us work in faster integer arithmetic instead of
-// slower floating point arithmetic. Correctness is verified by unit tests.
-func mulByLog10Log2(x int) int {
- // log(10)/log(2) ≈ 3.32192809489 ≈ 108853 / 2^15
- return (x * 108853) >> 15
-}
-
-// computeBounds returns a floating-point vector (l, c, u)×2^e2
-// where the mantissas are 55-bit (or 26-bit) integers, describing the interval
-// represented by the input float64 or float32.
-func computeBounds(mant uint64, exp int, flt *floatInfo) (lower, central, upper uint64, e2 int) {
- if mant != 1<<flt.mantbits || exp == flt.bias+1-int(flt.mantbits) {
- // regular case (or denormals)
- lower, central, upper = 2*mant-1, 2*mant, 2*mant+1
- e2 = exp - 1
- return
- } else {
- // border of an exponent
- lower, central, upper = 4*mant-1, 4*mant, 4*mant+2
- e2 = exp - 2
- return
- }
-}
-
-func ryuDigits(d *decimalSlice, lower, central, upper uint64,
- c0, cup bool) {
- lhi, llo := divmod1e9(lower)
- chi, clo := divmod1e9(central)
- uhi, ulo := divmod1e9(upper)
- if uhi == 0 {
- // only low digits (for denormals)
- ryuDigits32(d, llo, clo, ulo, c0, cup, 8)
- } else if lhi < uhi {
- // truncate 9 digits at once.
- if llo != 0 {
- lhi++
- }
- c0 = c0 && clo == 0
- cup = (clo > 5e8) || (clo == 5e8 && cup)
- ryuDigits32(d, lhi, chi, uhi, c0, cup, 8)
- d.dp += 9
- } else {
- d.nd = 0
- // emit high part
- n := uint(9)
- for v := chi; v > 0; {
- v1, v2 := v/10, v%10
- v = v1
- n--
- d.d[n] = byte(v2 + '0')
- }
- d.d = d.d[n:]
- d.nd = int(9 - n)
- // emit low part
- ryuDigits32(d, llo, clo, ulo,
- c0, cup, d.nd+8)
- }
- // trim trailing zeros
- for d.nd > 0 && d.d[d.nd-1] == '0' {
- d.nd--
- }
- // trim initial zeros
- for d.nd > 0 && d.d[0] == '0' {
- d.nd--
- d.dp--
- d.d = d.d[1:]
- }
-}
-
-// ryuDigits32 emits decimal digits for a number less than 1e9.
-func ryuDigits32(d *decimalSlice, lower, central, upper uint32,
- c0, cup bool, endindex int) {
- if upper == 0 {
- d.dp = endindex + 1
- return
- }
- trimmed := 0
- // Remember last trimmed digit to check for round-up.
- // c0 will be used to remember zeroness of following digits.
- cNextDigit := 0
- for upper > 0 {
- // Repeatedly compute:
- // l = Ceil(lower / 10^k)
- // c = Round(central / 10^k)
- // u = Floor(upper / 10^k)
- // and stop when c goes out of the (l, u) interval.
- l := (lower + 9) / 10
- c, cdigit := central/10, central%10
- u := upper / 10
- if l > u {
- // don't trim the last digit as it is forbidden to go below l
- // other, trim and exit now.
- break
- }
- // Check that we didn't cross the lower boundary.
- // The case where l < u but c == l-1 is essentially impossible,
- // but may happen if:
- // lower = ..11
- // central = ..19
- // upper = ..31
- // and means that 'central' is very close but less than
- // an integer ending with many zeros, and usually
- // the "round-up" logic hides the problem.
- if l == c+1 && c < u {
- c++
- cdigit = 0
- cup = false
- }
- trimmed++
- // Remember trimmed digits of c
- c0 = c0 && cNextDigit == 0
- cNextDigit = int(cdigit)
- lower, central, upper = l, c, u
- }
- // should we round up?
- if trimmed > 0 {
- cup = cNextDigit > 5 ||
- (cNextDigit == 5 && !c0) ||
- (cNextDigit == 5 && c0 && central&1 == 1)
- }
- if central < upper && cup {
- central++
- }
- // We know where the number ends, fill directly
- endindex -= trimmed
- v := central
- n := endindex
- for n > d.nd {
- v1, v2 := v/100, v%100
- d.d[n] = smallsString[2*v2+1]
- d.d[n-1] = smallsString[2*v2+0]
- n -= 2
- v = v1
- }
- if n == d.nd {
- d.d[n] = byte(v + '0')
- }
- d.nd = endindex + 1
- d.dp = d.nd + trimmed
-}
-
-// mult64bitPow10 takes a floating-point input with a 25-bit
-// mantissa and multiplies it with 10^q. The resulting mantissa
-// is m*P >> 57 where P is a 64-bit element of the detailedPowersOfTen tables.
-// It is typically 31 or 32-bit wide.
-// The returned boolean is true if all trimmed bits were zero.
-//
-// That is:
-// m*2^e2 * round(10^q) = resM * 2^resE + ε
-// exact = ε == 0
-func mult64bitPow10(m uint32, e2, q int) (resM uint32, resE int, exact bool) {
- if q == 0 {
- // P == 1<<63
- return m << 6, e2 - 6, true
- }
- if q < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < q {
- // This never happens due to the range of float32/float64 exponent
- panic("mult64bitPow10: power of 10 is out of range")
- }
- pow := detailedPowersOfTen[q-detailedPowersOfTenMinExp10][1]
- if q < 0 {
- // Inverse powers of ten must be rounded up.
- pow += 1
- }
- hi, lo := bits.Mul64(uint64(m), pow)
- e2 += mulByLog10Log2(q) - 63 + 57
- return uint32(hi<<7 | lo>>57), e2, lo<<7 == 0
-}
-
-// mult128bitPow10 takes a floating-point input with a 55-bit
-// mantissa and multiplies it with 10^q. The resulting mantissa
-// is m*P >> 119 where P is a 128-bit element of the detailedPowersOfTen tables.
-// It is typically 63 or 64-bit wide.
-// The returned boolean is true is all trimmed bits were zero.
-//
-// That is:
-// m*2^e2 * round(10^q) = resM * 2^resE + ε
-// exact = ε == 0
-func mult128bitPow10(m uint64, e2, q int) (resM uint64, resE int, exact bool) {
- if q == 0 {
- // P == 1<<127
- return m << 8, e2 - 8, true
- }
- if q < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < q {
- // This never happens due to the range of float32/float64 exponent
- panic("mult128bitPow10: power of 10 is out of range")
- }
- pow := detailedPowersOfTen[q-detailedPowersOfTenMinExp10]
- if q < 0 {
- // Inverse powers of ten must be rounded up.
- pow[0] += 1
- }
- e2 += mulByLog10Log2(q) - 127 + 119
-
- // long multiplication
- l1, l0 := bits.Mul64(m, pow[0])
- h1, h0 := bits.Mul64(m, pow[1])
- mid, carry := bits.Add64(l1, h0, 0)
- h1 += carry
- return h1<<9 | mid>>55, e2, mid<<9 == 0 && l0 == 0
-}
-
-func divisibleByPower5(m uint64, k int) bool {
- if m == 0 {
- return true
- }
- for i := 0; i < k; i++ {
- if m%5 != 0 {
- return false
- }
- m /= 5
- }
- return true
-}
-
-// divmod1e9 computes quotient and remainder of division by 1e9,
-// avoiding runtime uint64 division on 32-bit platforms.
-func divmod1e9(x uint64) (uint32, uint32) {
- if !host32bit {
- return uint32(x / 1e9), uint32(x % 1e9)
- }
- // Use the same sequence of operations as the amd64 compiler.
- hi, _ := bits.Mul64(x>>1, 0x89705f4136b4a598) // binary digits of 1e-9
- q := hi >> 28
- return uint32(q), uint32(x - q*1e9)
-}
diff --git a/contrib/go/_std_1.18/src/strconv/itoa.go b/contrib/go/_std_1.18/src/strconv/itoa.go
deleted file mode 100644
index 45e4192c82..0000000000
--- a/contrib/go/_std_1.18/src/strconv/itoa.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strconv
-
-import "math/bits"
-
-const fastSmalls = true // enable fast path for small integers
-
-// FormatUint returns the string representation of i in the given base,
-// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
-// for digit values >= 10.
-func FormatUint(i uint64, base int) string {
- if fastSmalls && i < nSmalls && base == 10 {
- return small(int(i))
- }
- _, s := formatBits(nil, i, base, false, false)
- return s
-}
-
-// FormatInt returns the string representation of i in the given base,
-// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
-// for digit values >= 10.
-func FormatInt(i int64, base int) string {
- if fastSmalls && 0 <= i && i < nSmalls && base == 10 {
- return small(int(i))
- }
- _, s := formatBits(nil, uint64(i), base, i < 0, false)
- return s
-}
-
-// Itoa is equivalent to FormatInt(int64(i), 10).
-func Itoa(i int) string {
- return FormatInt(int64(i), 10)
-}
-
-// AppendInt appends the string form of the integer i,
-// as generated by FormatInt, to dst and returns the extended buffer.
-func AppendInt(dst []byte, i int64, base int) []byte {
- if fastSmalls && 0 <= i && i < nSmalls && base == 10 {
- return append(dst, small(int(i))...)
- }
- dst, _ = formatBits(dst, uint64(i), base, i < 0, true)
- return dst
-}
-
-// AppendUint appends the string form of the unsigned integer i,
-// as generated by FormatUint, to dst and returns the extended buffer.
-func AppendUint(dst []byte, i uint64, base int) []byte {
- if fastSmalls && i < nSmalls && base == 10 {
- return append(dst, small(int(i))...)
- }
- dst, _ = formatBits(dst, i, base, false, true)
- return dst
-}
-
-// small returns the string for an i with 0 <= i < nSmalls.
-func small(i int) string {
- if i < 10 {
- return digits[i : i+1]
- }
- return smallsString[i*2 : i*2+2]
-}
-
-const nSmalls = 100
-
-const smallsString = "00010203040506070809" +
- "10111213141516171819" +
- "20212223242526272829" +
- "30313233343536373839" +
- "40414243444546474849" +
- "50515253545556575859" +
- "60616263646566676869" +
- "70717273747576777879" +
- "80818283848586878889" +
- "90919293949596979899"
-
-const host32bit = ^uint(0)>>32 == 0
-
-const digits = "0123456789abcdefghijklmnopqrstuvwxyz"
-
-// formatBits computes the string representation of u in the given base.
-// If neg is set, u is treated as negative int64 value. If append_ is
-// set, the string is appended to dst and the resulting byte slice is
-// returned as the first result value; otherwise the string is returned
-// as the second result value.
-//
-func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s string) {
- if base < 2 || base > len(digits) {
- panic("strconv: illegal AppendInt/FormatInt base")
- }
- // 2 <= base && base <= len(digits)
-
- var a [64 + 1]byte // +1 for sign of 64bit value in base 2
- i := len(a)
-
- if neg {
- u = -u
- }
-
- // convert bits
- // We use uint values where we can because those will
- // fit into a single register even on a 32bit machine.
- if base == 10 {
- // common case: use constants for / because
- // the compiler can optimize it into a multiply+shift
-
- if host32bit {
- // convert the lower digits using 32bit operations
- for u >= 1e9 {
- // Avoid using r = a%b in addition to q = a/b
- // since 64bit division and modulo operations
- // are calculated by runtime functions on 32bit machines.
- q := u / 1e9
- us := uint(u - q*1e9) // u % 1e9 fits into a uint
- for j := 4; j > 0; j-- {
- is := us % 100 * 2
- us /= 100
- i -= 2
- a[i+1] = smallsString[is+1]
- a[i+0] = smallsString[is+0]
- }
-
- // us < 10, since it contains the last digit
- // from the initial 9-digit us.
- i--
- a[i] = smallsString[us*2+1]
-
- u = q
- }
- // u < 1e9
- }
-
- // u guaranteed to fit into a uint
- us := uint(u)
- for us >= 100 {
- is := us % 100 * 2
- us /= 100
- i -= 2
- a[i+1] = smallsString[is+1]
- a[i+0] = smallsString[is+0]
- }
-
- // us < 100
- is := us * 2
- i--
- a[i] = smallsString[is+1]
- if us >= 10 {
- i--
- a[i] = smallsString[is]
- }
-
- } else if isPowerOfTwo(base) {
- // Use shifts and masks instead of / and %.
- // Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36.
- // The largest power of 2 below or equal to 36 is 32, which is 1 << 5;
- // i.e., the largest possible shift count is 5. By &-ind that value with
- // the constant 7 we tell the compiler that the shift count is always
- // less than 8 which is smaller than any register width. This allows
- // the compiler to generate better code for the shift operation.
- shift := uint(bits.TrailingZeros(uint(base))) & 7
- b := uint64(base)
- m := uint(base) - 1 // == 1<<shift - 1
- for u >= b {
- i--
- a[i] = digits[uint(u)&m]
- u >>= shift
- }
- // u < base
- i--
- a[i] = digits[uint(u)]
- } else {
- // general case
- b := uint64(base)
- for u >= b {
- i--
- // Avoid using r = a%b in addition to q = a/b
- // since 64bit division and modulo operations
- // are calculated by runtime functions on 32bit machines.
- q := u / b
- a[i] = digits[uint(u-q*b)]
- u = q
- }
- // u < base
- i--
- a[i] = digits[uint(u)]
- }
-
- // add sign, if any
- if neg {
- i--
- a[i] = '-'
- }
-
- if append_ {
- d = append(dst, a[i:]...)
- return
- }
- s = string(a[i:])
- return
-}
-
-func isPowerOfTwo(x int) bool {
- return x&(x-1) == 0
-}
diff --git a/contrib/go/_std_1.18/src/strconv/quote.go b/contrib/go/_std_1.18/src/strconv/quote.go
deleted file mode 100644
index d2814b92da..0000000000
--- a/contrib/go/_std_1.18/src/strconv/quote.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run makeisprint.go -output isprint.go
-
-package strconv
-
-import (
- "unicode/utf8"
-)
-
-const (
- lowerhex = "0123456789abcdef"
- upperhex = "0123456789ABCDEF"
-)
-
-// contains reports whether the string contains the byte c.
-func contains(s string, c byte) bool {
- return index(s, c) != -1
-}
-
-func quoteWith(s string, quote byte, ASCIIonly, graphicOnly bool) string {
- return string(appendQuotedWith(make([]byte, 0, 3*len(s)/2), s, quote, ASCIIonly, graphicOnly))
-}
-
-func quoteRuneWith(r rune, quote byte, ASCIIonly, graphicOnly bool) string {
- return string(appendQuotedRuneWith(nil, r, quote, ASCIIonly, graphicOnly))
-}
-
-func appendQuotedWith(buf []byte, s string, quote byte, ASCIIonly, graphicOnly bool) []byte {
- // Often called with big strings, so preallocate. If there's quoting,
- // this is conservative but still helps a lot.
- if cap(buf)-len(buf) < len(s) {
- nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1)
- copy(nBuf, buf)
- buf = nBuf
- }
- buf = append(buf, quote)
- for width := 0; len(s) > 0; s = s[width:] {
- r := rune(s[0])
- width = 1
- if r >= utf8.RuneSelf {
- r, width = utf8.DecodeRuneInString(s)
- }
- if width == 1 && r == utf8.RuneError {
- buf = append(buf, `\x`...)
- buf = append(buf, lowerhex[s[0]>>4])
- buf = append(buf, lowerhex[s[0]&0xF])
- continue
- }
- buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
- }
- buf = append(buf, quote)
- return buf
-}
-
-func appendQuotedRuneWith(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
- buf = append(buf, quote)
- if !utf8.ValidRune(r) {
- r = utf8.RuneError
- }
- buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
- buf = append(buf, quote)
- return buf
-}
-
-func appendEscapedRune(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
- var runeTmp [utf8.UTFMax]byte
- if r == rune(quote) || r == '\\' { // always backslashed
- buf = append(buf, '\\')
- buf = append(buf, byte(r))
- return buf
- }
- if ASCIIonly {
- if r < utf8.RuneSelf && IsPrint(r) {
- buf = append(buf, byte(r))
- return buf
- }
- } else if IsPrint(r) || graphicOnly && isInGraphicList(r) {
- n := utf8.EncodeRune(runeTmp[:], r)
- buf = append(buf, runeTmp[:n]...)
- return buf
- }
- switch r {
- case '\a':
- buf = append(buf, `\a`...)
- case '\b':
- buf = append(buf, `\b`...)
- case '\f':
- buf = append(buf, `\f`...)
- case '\n':
- buf = append(buf, `\n`...)
- case '\r':
- buf = append(buf, `\r`...)
- case '\t':
- buf = append(buf, `\t`...)
- case '\v':
- buf = append(buf, `\v`...)
- default:
- switch {
- case r < ' ':
- buf = append(buf, `\x`...)
- buf = append(buf, lowerhex[byte(r)>>4])
- buf = append(buf, lowerhex[byte(r)&0xF])
- case !utf8.ValidRune(r):
- r = 0xFFFD
- fallthrough
- case r < 0x10000:
- buf = append(buf, `\u`...)
- for s := 12; s >= 0; s -= 4 {
- buf = append(buf, lowerhex[r>>uint(s)&0xF])
- }
- default:
- buf = append(buf, `\U`...)
- for s := 28; s >= 0; s -= 4 {
- buf = append(buf, lowerhex[r>>uint(s)&0xF])
- }
- }
- }
- return buf
-}
-
-// Quote returns a double-quoted Go string literal representing s. The
-// returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
-// control characters and non-printable characters as defined by
-// IsPrint.
-func Quote(s string) string {
- return quoteWith(s, '"', false, false)
-}
-
-// AppendQuote appends a double-quoted Go string literal representing s,
-// as generated by Quote, to dst and returns the extended buffer.
-func AppendQuote(dst []byte, s string) []byte {
- return appendQuotedWith(dst, s, '"', false, false)
-}
-
-// QuoteToASCII returns a double-quoted Go string literal representing s.
-// The returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
-// non-ASCII characters and non-printable characters as defined by IsPrint.
-func QuoteToASCII(s string) string {
- return quoteWith(s, '"', true, false)
-}
-
-// AppendQuoteToASCII appends a double-quoted Go string literal representing s,
-// as generated by QuoteToASCII, to dst and returns the extended buffer.
-func AppendQuoteToASCII(dst []byte, s string) []byte {
- return appendQuotedWith(dst, s, '"', true, false)
-}
-
-// QuoteToGraphic returns a double-quoted Go string literal representing s.
-// The returned string leaves Unicode graphic characters, as defined by
-// IsGraphic, unchanged and uses Go escape sequences (\t, \n, \xFF, \u0100)
-// for non-graphic characters.
-func QuoteToGraphic(s string) string {
- return quoteWith(s, '"', false, true)
-}
-
-// AppendQuoteToGraphic appends a double-quoted Go string literal representing s,
-// as generated by QuoteToGraphic, to dst and returns the extended buffer.
-func AppendQuoteToGraphic(dst []byte, s string) []byte {
- return appendQuotedWith(dst, s, '"', false, true)
-}
-
-// QuoteRune returns a single-quoted Go character literal representing the
-// rune. The returned string uses Go escape sequences (\t, \n, \xFF, \u0100)
-// for control characters and non-printable characters as defined by IsPrint.
-func QuoteRune(r rune) string {
- return quoteRuneWith(r, '\'', false, false)
-}
-
-// AppendQuoteRune appends a single-quoted Go character literal representing the rune,
-// as generated by QuoteRune, to dst and returns the extended buffer.
-func AppendQuoteRune(dst []byte, r rune) []byte {
- return appendQuotedRuneWith(dst, r, '\'', false, false)
-}
-
-// QuoteRuneToASCII returns a single-quoted Go character literal representing
-// the rune. The returned string uses Go escape sequences (\t, \n, \xFF,
-// \u0100) for non-ASCII characters and non-printable characters as defined
-// by IsPrint.
-func QuoteRuneToASCII(r rune) string {
- return quoteRuneWith(r, '\'', true, false)
-}
-
-// AppendQuoteRuneToASCII appends a single-quoted Go character literal representing the rune,
-// as generated by QuoteRuneToASCII, to dst and returns the extended buffer.
-func AppendQuoteRuneToASCII(dst []byte, r rune) []byte {
- return appendQuotedRuneWith(dst, r, '\'', true, false)
-}
-
-// QuoteRuneToGraphic returns a single-quoted Go character literal representing
-// the rune. If the rune is not a Unicode graphic character,
-// as defined by IsGraphic, the returned string will use a Go escape sequence
-// (\t, \n, \xFF, \u0100).
-func QuoteRuneToGraphic(r rune) string {
- return quoteRuneWith(r, '\'', false, true)
-}
-
-// AppendQuoteRuneToGraphic appends a single-quoted Go character literal representing the rune,
-// as generated by QuoteRuneToGraphic, to dst and returns the extended buffer.
-func AppendQuoteRuneToGraphic(dst []byte, r rune) []byte {
- return appendQuotedRuneWith(dst, r, '\'', false, true)
-}
-
-// CanBackquote reports whether the string s can be represented
-// unchanged as a single-line backquoted string without control
-// characters other than tab.
-func CanBackquote(s string) bool {
- for len(s) > 0 {
- r, wid := utf8.DecodeRuneInString(s)
- s = s[wid:]
- if wid > 1 {
- if r == '\ufeff' {
- return false // BOMs are invisible and should not be quoted.
- }
- continue // All other multibyte runes are correctly encoded and assumed printable.
- }
- if r == utf8.RuneError {
- return false
- }
- if (r < ' ' && r != '\t') || r == '`' || r == '\u007F' {
- return false
- }
- }
- return true
-}
-
-func unhex(b byte) (v rune, ok bool) {
- c := rune(b)
- switch {
- case '0' <= c && c <= '9':
- return c - '0', true
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10, true
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10, true
- }
- return
-}
-
-// UnquoteChar decodes the first character or byte in the escaped string
-// or character literal represented by the string s.
-// It returns four values:
-//
-// 1) value, the decoded Unicode code point or byte value;
-// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
-// 3) tail, the remainder of the string after the character; and
-// 4) an error that will be nil if the character is syntactically valid.
-//
-// The second argument, quote, specifies the type of literal being parsed
-// and therefore which escaped quote character is permitted.
-// If set to a single quote, it permits the sequence \' and disallows unescaped '.
-// If set to a double quote, it permits \" and disallows unescaped ".
-// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
-func UnquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
- // easy cases
- if len(s) == 0 {
- err = ErrSyntax
- return
- }
- switch c := s[0]; {
- case c == quote && (quote == '\'' || quote == '"'):
- err = ErrSyntax
- return
- case c >= utf8.RuneSelf:
- r, size := utf8.DecodeRuneInString(s)
- return r, true, s[size:], nil
- case c != '\\':
- return rune(s[0]), false, s[1:], nil
- }
-
- // hard case: c is backslash
- if len(s) <= 1 {
- err = ErrSyntax
- return
- }
- c := s[1]
- s = s[2:]
-
- switch c {
- case 'a':
- value = '\a'
- case 'b':
- value = '\b'
- case 'f':
- value = '\f'
- case 'n':
- value = '\n'
- case 'r':
- value = '\r'
- case 't':
- value = '\t'
- case 'v':
- value = '\v'
- case 'x', 'u', 'U':
- n := 0
- switch c {
- case 'x':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- var v rune
- if len(s) < n {
- err = ErrSyntax
- return
- }
- for j := 0; j < n; j++ {
- x, ok := unhex(s[j])
- if !ok {
- err = ErrSyntax
- return
- }
- v = v<<4 | x
- }
- s = s[n:]
- if c == 'x' {
- // single-byte string, possibly not UTF-8
- value = v
- break
- }
- if !utf8.ValidRune(v) {
- err = ErrSyntax
- return
- }
- value = v
- multibyte = true
- case '0', '1', '2', '3', '4', '5', '6', '7':
- v := rune(c) - '0'
- if len(s) < 2 {
- err = ErrSyntax
- return
- }
- for j := 0; j < 2; j++ { // one digit already; two more
- x := rune(s[j]) - '0'
- if x < 0 || x > 7 {
- err = ErrSyntax
- return
- }
- v = (v << 3) | x
- }
- s = s[2:]
- if v > 255 {
- err = ErrSyntax
- return
- }
- value = v
- case '\\':
- value = '\\'
- case '\'', '"':
- if c != quote {
- err = ErrSyntax
- return
- }
- value = rune(c)
- default:
- err = ErrSyntax
- return
- }
- tail = s
- return
-}
-
-// QuotedPrefix returns the quoted string (as understood by Unquote) at the prefix of s.
-// If s does not start with a valid quoted string, QuotedPrefix returns an error.
-func QuotedPrefix(s string) (string, error) {
- out, _, err := unquote(s, false)
- return out, err
-}
-
-// Unquote interprets s as a single-quoted, double-quoted,
-// or backquoted Go string literal, returning the string value
-// that s quotes. (If s is single-quoted, it would be a Go
-// character literal; Unquote returns the corresponding
-// one-character string.)
-func Unquote(s string) (string, error) {
- out, rem, err := unquote(s, true)
- if len(rem) > 0 {
- return "", ErrSyntax
- }
- return out, err
-}
-
-// unquote parses a quoted string at the start of the input,
-// returning the parsed prefix, the remaining suffix, and any parse errors.
-// If unescape is true, the parsed prefix is unescaped,
-// otherwise the input prefix is provided verbatim.
-func unquote(in string, unescape bool) (out, rem string, err error) {
- // Determine the quote form and optimistically find the terminating quote.
- if len(in) < 2 {
- return "", in, ErrSyntax
- }
- quote := in[0]
- end := index(in[1:], quote)
- if end < 0 {
- return "", in, ErrSyntax
- }
- end += 2 // position after terminating quote; may be wrong if escape sequences are present
-
- switch quote {
- case '`':
- switch {
- case !unescape:
- out = in[:end] // include quotes
- case !contains(in[:end], '\r'):
- out = in[len("`") : end-len("`")] // exclude quotes
- default:
- // Carriage return characters ('\r') inside raw string literals
- // are discarded from the raw string value.
- buf := make([]byte, 0, end-len("`")-len("\r")-len("`"))
- for i := len("`"); i < end-len("`"); i++ {
- if in[i] != '\r' {
- buf = append(buf, in[i])
- }
- }
- out = string(buf)
- }
- // NOTE: Prior implementations did not verify that raw strings consist
- // of valid UTF-8 characters and we continue to not verify it as such.
- // The Go specification does not explicitly require valid UTF-8,
- // but only mention that it is implicitly valid for Go source code
- // (which must be valid UTF-8).
- return out, in[end:], nil
- case '"', '\'':
- // Handle quoted strings without any escape sequences.
- if !contains(in[:end], '\\') && !contains(in[:end], '\n') {
- var valid bool
- switch quote {
- case '"':
- valid = utf8.ValidString(in[len(`"`) : end-len(`"`)])
- case '\'':
- r, n := utf8.DecodeRuneInString(in[len("'") : end-len("'")])
- valid = len("'")+n+len("'") == end && (r != utf8.RuneError || n != 1)
- }
- if valid {
- out = in[:end]
- if unescape {
- out = out[1 : end-1] // exclude quotes
- }
- return out, in[end:], nil
- }
- }
-
- // Handle quoted strings with escape sequences.
- var buf []byte
- in0 := in
- in = in[1:] // skip starting quote
- if unescape {
- buf = make([]byte, 0, 3*end/2) // try to avoid more allocations
- }
- for len(in) > 0 && in[0] != quote {
- // Process the next character,
- // rejecting any unescaped newline characters which are invalid.
- r, multibyte, rem, err := UnquoteChar(in, quote)
- if in[0] == '\n' || err != nil {
- return "", in0, ErrSyntax
- }
- in = rem
-
- // Append the character if unescaping the input.
- if unescape {
- if r < utf8.RuneSelf || !multibyte {
- buf = append(buf, byte(r))
- } else {
- var arr [utf8.UTFMax]byte
- n := utf8.EncodeRune(arr[:], r)
- buf = append(buf, arr[:n]...)
- }
- }
-
- // Single quoted strings must be a single character.
- if quote == '\'' {
- break
- }
- }
-
- // Verify that the string ends with a terminating quote.
- if !(len(in) > 0 && in[0] == quote) {
- return "", in0, ErrSyntax
- }
- in = in[1:] // skip terminating quote
-
- if unescape {
- return string(buf), in, nil
- }
- return in0[:len(in0)-len(in)], in, nil
- default:
- return "", in, ErrSyntax
- }
-}
-
-// bsearch16 returns the smallest i such that a[i] >= x.
-// If there is no such i, bsearch16 returns len(a).
-func bsearch16(a []uint16, x uint16) int {
- i, j := 0, len(a)
- for i < j {
- h := i + (j-i)>>1
- if a[h] < x {
- i = h + 1
- } else {
- j = h
- }
- }
- return i
-}
-
-// bsearch32 returns the smallest i such that a[i] >= x.
-// If there is no such i, bsearch32 returns len(a).
-func bsearch32(a []uint32, x uint32) int {
- i, j := 0, len(a)
- for i < j {
- h := i + (j-i)>>1
- if a[h] < x {
- i = h + 1
- } else {
- j = h
- }
- }
- return i
-}
-
-// TODO: IsPrint is a local implementation of unicode.IsPrint, verified by the tests
-// to give the same answer. It allows this package not to depend on unicode,
-// and therefore not pull in all the Unicode tables. If the linker were better
-// at tossing unused tables, we could get rid of this implementation.
-// That would be nice.
-
-// IsPrint reports whether the rune is defined as printable by Go, with
-// the same definition as unicode.IsPrint: letters, numbers, punctuation,
-// symbols and ASCII space.
-func IsPrint(r rune) bool {
- // Fast check for Latin-1
- if r <= 0xFF {
- if 0x20 <= r && r <= 0x7E {
- // All the ASCII is printable from space through DEL-1.
- return true
- }
- if 0xA1 <= r && r <= 0xFF {
- // Similarly for ¡ through ÿ...
- return r != 0xAD // ...except for the bizarre soft hyphen.
- }
- return false
- }
-
- // Same algorithm, either on uint16 or uint32 value.
- // First, find first i such that isPrint[i] >= x.
- // This is the index of either the start or end of a pair that might span x.
- // The start is even (isPrint[i&^1]) and the end is odd (isPrint[i|1]).
- // If we find x in a range, make sure x is not in isNotPrint list.
-
- if 0 <= r && r < 1<<16 {
- rr, isPrint, isNotPrint := uint16(r), isPrint16, isNotPrint16
- i := bsearch16(isPrint, rr)
- if i >= len(isPrint) || rr < isPrint[i&^1] || isPrint[i|1] < rr {
- return false
- }
- j := bsearch16(isNotPrint, rr)
- return j >= len(isNotPrint) || isNotPrint[j] != rr
- }
-
- rr, isPrint, isNotPrint := uint32(r), isPrint32, isNotPrint32
- i := bsearch32(isPrint, rr)
- if i >= len(isPrint) || rr < isPrint[i&^1] || isPrint[i|1] < rr {
- return false
- }
- if r >= 0x20000 {
- return true
- }
- r -= 0x10000
- j := bsearch16(isNotPrint, uint16(r))
- return j >= len(isNotPrint) || isNotPrint[j] != uint16(r)
-}
-
-// IsGraphic reports whether the rune is defined as a Graphic by Unicode. Such
-// characters include letters, marks, numbers, punctuation, symbols, and
-// spaces, from categories L, M, N, P, S, and Zs.
-func IsGraphic(r rune) bool {
- if IsPrint(r) {
- return true
- }
- return isInGraphicList(r)
-}
-
-// isInGraphicList reports whether the rune is in the isGraphic list. This separation
-// from IsGraphic allows quoteWith to avoid two calls to IsPrint.
-// Should be called only if IsPrint fails.
-func isInGraphicList(r rune) bool {
- // We know r must fit in 16 bits - see makeisprint.go.
- if r > 0xFFFF {
- return false
- }
- rr := uint16(r)
- i := bsearch16(isGraphic, rr)
- return i < len(isGraphic) && rr == isGraphic[i]
-}
diff --git a/contrib/go/_std_1.18/src/strings/builder.go b/contrib/go/_std_1.18/src/strings/builder.go
deleted file mode 100644
index ba4df618bf..0000000000
--- a/contrib/go/_std_1.18/src/strings/builder.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strings
-
-import (
- "unicode/utf8"
- "unsafe"
-)
-
-// A Builder is used to efficiently build a string using Write methods.
-// It minimizes memory copying. The zero value is ready to use.
-// Do not copy a non-zero Builder.
-type Builder struct {
- addr *Builder // of receiver, to detect copies by value
- buf []byte
-}
-
-// noescape hides a pointer from escape analysis. It is the identity function
-// but escape analysis doesn't think the output depends on the input.
-// noescape is inlined and currently compiles down to zero instructions.
-// USE CAREFULLY!
-// This was copied from the runtime; see issues 23382 and 7921.
-//go:nosplit
-//go:nocheckptr
-func noescape(p unsafe.Pointer) unsafe.Pointer {
- x := uintptr(p)
- return unsafe.Pointer(x ^ 0)
-}
-
-func (b *Builder) copyCheck() {
- if b.addr == nil {
- // This hack works around a failing of Go's escape analysis
- // that was causing b to escape and be heap allocated.
- // See issue 23382.
- // TODO: once issue 7921 is fixed, this should be reverted to
- // just "b.addr = b".
- b.addr = (*Builder)(noescape(unsafe.Pointer(b)))
- } else if b.addr != b {
- panic("strings: illegal use of non-zero Builder copied by value")
- }
-}
-
-// String returns the accumulated string.
-func (b *Builder) String() string {
- return *(*string)(unsafe.Pointer(&b.buf))
-}
-
-// Len returns the number of accumulated bytes; b.Len() == len(b.String()).
-func (b *Builder) Len() int { return len(b.buf) }
-
-// Cap returns the capacity of the builder's underlying byte slice. It is the
-// total space allocated for the string being built and includes any bytes
-// already written.
-func (b *Builder) Cap() int { return cap(b.buf) }
-
-// Reset resets the Builder to be empty.
-func (b *Builder) Reset() {
- b.addr = nil
- b.buf = nil
-}
-
-// grow copies the buffer to a new, larger buffer so that there are at least n
-// bytes of capacity beyond len(b.buf).
-func (b *Builder) grow(n int) {
- buf := make([]byte, len(b.buf), 2*cap(b.buf)+n)
- copy(buf, b.buf)
- b.buf = buf
-}
-
-// Grow grows b's capacity, if necessary, to guarantee space for
-// another n bytes. After Grow(n), at least n bytes can be written to b
-// without another allocation. If n is negative, Grow panics.
-func (b *Builder) Grow(n int) {
- b.copyCheck()
- if n < 0 {
- panic("strings.Builder.Grow: negative count")
- }
- if cap(b.buf)-len(b.buf) < n {
- b.grow(n)
- }
-}
-
-// Write appends the contents of p to b's buffer.
-// Write always returns len(p), nil.
-func (b *Builder) Write(p []byte) (int, error) {
- b.copyCheck()
- b.buf = append(b.buf, p...)
- return len(p), nil
-}
-
-// WriteByte appends the byte c to b's buffer.
-// The returned error is always nil.
-func (b *Builder) WriteByte(c byte) error {
- b.copyCheck()
- b.buf = append(b.buf, c)
- return nil
-}
-
-// WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer.
-// It returns the length of r and a nil error.
-func (b *Builder) WriteRune(r rune) (int, error) {
- b.copyCheck()
- // Compare as uint32 to correctly handle negative runes.
- if uint32(r) < utf8.RuneSelf {
- b.buf = append(b.buf, byte(r))
- return 1, nil
- }
- l := len(b.buf)
- if cap(b.buf)-l < utf8.UTFMax {
- b.grow(utf8.UTFMax)
- }
- n := utf8.EncodeRune(b.buf[l:l+utf8.UTFMax], r)
- b.buf = b.buf[:l+n]
- return n, nil
-}
-
-// WriteString appends the contents of s to b's buffer.
-// It returns the length of s and a nil error.
-func (b *Builder) WriteString(s string) (int, error) {
- b.copyCheck()
- b.buf = append(b.buf, s...)
- return len(s), nil
-}
diff --git a/contrib/go/_std_1.18/src/strings/replace.go b/contrib/go/_std_1.18/src/strings/replace.go
deleted file mode 100644
index ee728bb22b..0000000000
--- a/contrib/go/_std_1.18/src/strings/replace.go
+++ /dev/null
@@ -1,569 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package strings
-
-import (
- "io"
- "sync"
-)
-
-// Replacer replaces a list of strings with replacements.
-// It is safe for concurrent use by multiple goroutines.
-type Replacer struct {
- once sync.Once // guards buildOnce method
- r replacer
- oldnew []string
-}
-
-// replacer is the interface that a replacement algorithm needs to implement.
-type replacer interface {
- Replace(s string) string
- WriteString(w io.Writer, s string) (n int, err error)
-}
-
-// NewReplacer returns a new Replacer from a list of old, new string
-// pairs. Replacements are performed in the order they appear in the
-// target string, without overlapping matches. The old string
-// comparisons are done in argument order.
-//
-// NewReplacer panics if given an odd number of arguments.
-func NewReplacer(oldnew ...string) *Replacer {
- if len(oldnew)%2 == 1 {
- panic("strings.NewReplacer: odd argument count")
- }
- return &Replacer{oldnew: append([]string(nil), oldnew...)}
-}
-
-func (r *Replacer) buildOnce() {
- r.r = r.build()
- r.oldnew = nil
-}
-
-func (b *Replacer) build() replacer {
- oldnew := b.oldnew
- if len(oldnew) == 2 && len(oldnew[0]) > 1 {
- return makeSingleStringReplacer(oldnew[0], oldnew[1])
- }
-
- allNewBytes := true
- for i := 0; i < len(oldnew); i += 2 {
- if len(oldnew[i]) != 1 {
- return makeGenericReplacer(oldnew)
- }
- if len(oldnew[i+1]) != 1 {
- allNewBytes = false
- }
- }
-
- if allNewBytes {
- r := byteReplacer{}
- for i := range r {
- r[i] = byte(i)
- }
- // The first occurrence of old->new map takes precedence
- // over the others with the same old string.
- for i := len(oldnew) - 2; i >= 0; i -= 2 {
- o := oldnew[i][0]
- n := oldnew[i+1][0]
- r[o] = n
- }
- return &r
- }
-
- r := byteStringReplacer{toReplace: make([]string, 0, len(oldnew)/2)}
- // The first occurrence of old->new map takes precedence
- // over the others with the same old string.
- for i := len(oldnew) - 2; i >= 0; i -= 2 {
- o := oldnew[i][0]
- n := oldnew[i+1]
- // To avoid counting repetitions multiple times.
- if r.replacements[o] == nil {
- // We need to use string([]byte{o}) instead of string(o),
- // to avoid utf8 encoding of o.
- // E. g. byte(150) produces string of length 2.
- r.toReplace = append(r.toReplace, string([]byte{o}))
- }
- r.replacements[o] = []byte(n)
-
- }
- return &r
-}
-
-// Replace returns a copy of s with all replacements performed.
-func (r *Replacer) Replace(s string) string {
- r.once.Do(r.buildOnce)
- return r.r.Replace(s)
-}
-
-// WriteString writes s to w with all replacements performed.
-func (r *Replacer) WriteString(w io.Writer, s string) (n int, err error) {
- r.once.Do(r.buildOnce)
- return r.r.WriteString(w, s)
-}
-
-// trieNode is a node in a lookup trie for prioritized key/value pairs. Keys
-// and values may be empty. For example, the trie containing keys "ax", "ay",
-// "bcbc", "x" and "xy" could have eight nodes:
-//
-// n0 -
-// n1 a-
-// n2 .x+
-// n3 .y+
-// n4 b-
-// n5 .cbc+
-// n6 x+
-// n7 .y+
-//
-// n0 is the root node, and its children are n1, n4 and n6; n1's children are
-// n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked
-// with a trailing "-") are partial keys, and nodes n2, n3, n5, n6 and n7
-// (marked with a trailing "+") are complete keys.
-type trieNode struct {
- // value is the value of the trie node's key/value pair. It is empty if
- // this node is not a complete key.
- value string
- // priority is the priority (higher is more important) of the trie node's
- // key/value pair; keys are not necessarily matched shortest- or longest-
- // first. Priority is positive if this node is a complete key, and zero
- // otherwise. In the example above, positive/zero priorities are marked
- // with a trailing "+" or "-".
- priority int
-
- // A trie node may have zero, one or more child nodes:
- // * if the remaining fields are zero, there are no children.
- // * if prefix and next are non-zero, there is one child in next.
- // * if table is non-zero, it defines all the children.
- //
- // Prefixes are preferred over tables when there is one child, but the
- // root node always uses a table for lookup efficiency.
-
- // prefix is the difference in keys between this trie node and the next.
- // In the example above, node n4 has prefix "cbc" and n4's next node is n5.
- // Node n5 has no children and so has zero prefix, next and table fields.
- prefix string
- next *trieNode
-
- // table is a lookup table indexed by the next byte in the key, after
- // remapping that byte through genericReplacer.mapping to create a dense
- // index. In the example above, the keys only use 'a', 'b', 'c', 'x' and
- // 'y', which remap to 0, 1, 2, 3 and 4. All other bytes remap to 5, and
- // genericReplacer.tableSize will be 5. Node n0's table will be
- // []*trieNode{ 0:n1, 1:n4, 3:n6 }, where the 0, 1 and 3 are the remapped
- // 'a', 'b' and 'x'.
- table []*trieNode
-}
-
-func (t *trieNode) add(key, val string, priority int, r *genericReplacer) {
- if key == "" {
- if t.priority == 0 {
- t.value = val
- t.priority = priority
- }
- return
- }
-
- if t.prefix != "" {
- // Need to split the prefix among multiple nodes.
- var n int // length of the longest common prefix
- for ; n < len(t.prefix) && n < len(key); n++ {
- if t.prefix[n] != key[n] {
- break
- }
- }
- if n == len(t.prefix) {
- t.next.add(key[n:], val, priority, r)
- } else if n == 0 {
- // First byte differs, start a new lookup table here. Looking up
- // what is currently t.prefix[0] will lead to prefixNode, and
- // looking up key[0] will lead to keyNode.
- var prefixNode *trieNode
- if len(t.prefix) == 1 {
- prefixNode = t.next
- } else {
- prefixNode = &trieNode{
- prefix: t.prefix[1:],
- next: t.next,
- }
- }
- keyNode := new(trieNode)
- t.table = make([]*trieNode, r.tableSize)
- t.table[r.mapping[t.prefix[0]]] = prefixNode
- t.table[r.mapping[key[0]]] = keyNode
- t.prefix = ""
- t.next = nil
- keyNode.add(key[1:], val, priority, r)
- } else {
- // Insert new node after the common section of the prefix.
- next := &trieNode{
- prefix: t.prefix[n:],
- next: t.next,
- }
- t.prefix = t.prefix[:n]
- t.next = next
- next.add(key[n:], val, priority, r)
- }
- } else if t.table != nil {
- // Insert into existing table.
- m := r.mapping[key[0]]
- if t.table[m] == nil {
- t.table[m] = new(trieNode)
- }
- t.table[m].add(key[1:], val, priority, r)
- } else {
- t.prefix = key
- t.next = new(trieNode)
- t.next.add("", val, priority, r)
- }
-}
-
-func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) {
- // Iterate down the trie to the end, and grab the value and keylen with
- // the highest priority.
- bestPriority := 0
- node := &r.root
- n := 0
- for node != nil {
- if node.priority > bestPriority && !(ignoreRoot && node == &r.root) {
- bestPriority = node.priority
- val = node.value
- keylen = n
- found = true
- }
-
- if s == "" {
- break
- }
- if node.table != nil {
- index := r.mapping[s[0]]
- if int(index) == r.tableSize {
- break
- }
- node = node.table[index]
- s = s[1:]
- n++
- } else if node.prefix != "" && HasPrefix(s, node.prefix) {
- n += len(node.prefix)
- s = s[len(node.prefix):]
- node = node.next
- } else {
- break
- }
- }
- return
-}
-
-// genericReplacer is the fully generic algorithm.
-// It's used as a fallback when nothing faster can be used.
-type genericReplacer struct {
- root trieNode
- // tableSize is the size of a trie node's lookup table. It is the number
- // of unique key bytes.
- tableSize int
- // mapping maps from key bytes to a dense index for trieNode.table.
- mapping [256]byte
-}
-
-func makeGenericReplacer(oldnew []string) *genericReplacer {
- r := new(genericReplacer)
- // Find each byte used, then assign them each an index.
- for i := 0; i < len(oldnew); i += 2 {
- key := oldnew[i]
- for j := 0; j < len(key); j++ {
- r.mapping[key[j]] = 1
- }
- }
-
- for _, b := range r.mapping {
- r.tableSize += int(b)
- }
-
- var index byte
- for i, b := range r.mapping {
- if b == 0 {
- r.mapping[i] = byte(r.tableSize)
- } else {
- r.mapping[i] = index
- index++
- }
- }
- // Ensure root node uses a lookup table (for performance).
- r.root.table = make([]*trieNode, r.tableSize)
-
- for i := 0; i < len(oldnew); i += 2 {
- r.root.add(oldnew[i], oldnew[i+1], len(oldnew)-i, r)
- }
- return r
-}
-
-type appendSliceWriter []byte
-
-// Write writes to the buffer to satisfy io.Writer.
-func (w *appendSliceWriter) Write(p []byte) (int, error) {
- *w = append(*w, p...)
- return len(p), nil
-}
-
-// WriteString writes to the buffer without string->[]byte->string allocations.
-func (w *appendSliceWriter) WriteString(s string) (int, error) {
- *w = append(*w, s...)
- return len(s), nil
-}
-
-type stringWriter struct {
- w io.Writer
-}
-
-func (w stringWriter) WriteString(s string) (int, error) {
- return w.w.Write([]byte(s))
-}
-
-func getStringWriter(w io.Writer) io.StringWriter {
- sw, ok := w.(io.StringWriter)
- if !ok {
- sw = stringWriter{w}
- }
- return sw
-}
-
-func (r *genericReplacer) Replace(s string) string {
- buf := make(appendSliceWriter, 0, len(s))
- r.WriteString(&buf, s)
- return string(buf)
-}
-
-func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) {
- sw := getStringWriter(w)
- var last, wn int
- var prevMatchEmpty bool
- for i := 0; i <= len(s); {
- // Fast path: s[i] is not a prefix of any pattern.
- if i != len(s) && r.root.priority == 0 {
- index := int(r.mapping[s[i]])
- if index == r.tableSize || r.root.table[index] == nil {
- i++
- continue
- }
- }
-
- // Ignore the empty match iff the previous loop found the empty match.
- val, keylen, match := r.lookup(s[i:], prevMatchEmpty)
- prevMatchEmpty = match && keylen == 0
- if match {
- wn, err = sw.WriteString(s[last:i])
- n += wn
- if err != nil {
- return
- }
- wn, err = sw.WriteString(val)
- n += wn
- if err != nil {
- return
- }
- i += keylen
- last = i
- continue
- }
- i++
- }
- if last != len(s) {
- wn, err = sw.WriteString(s[last:])
- n += wn
- }
- return
-}
-
-// singleStringReplacer is the implementation that's used when there is only
-// one string to replace (and that string has more than one byte).
-type singleStringReplacer struct {
- finder *stringFinder
- // value is the new string that replaces that pattern when it's found.
- value string
-}
-
-func makeSingleStringReplacer(pattern string, value string) *singleStringReplacer {
- return &singleStringReplacer{finder: makeStringFinder(pattern), value: value}
-}
-
-func (r *singleStringReplacer) Replace(s string) string {
- var buf Builder
- i, matched := 0, false
- for {
- match := r.finder.next(s[i:])
- if match == -1 {
- break
- }
- matched = true
- buf.Grow(match + len(r.value))
- buf.WriteString(s[i : i+match])
- buf.WriteString(r.value)
- i += match + len(r.finder.pattern)
- }
- if !matched {
- return s
- }
- buf.WriteString(s[i:])
- return buf.String()
-}
-
-func (r *singleStringReplacer) WriteString(w io.Writer, s string) (n int, err error) {
- sw := getStringWriter(w)
- var i, wn int
- for {
- match := r.finder.next(s[i:])
- if match == -1 {
- break
- }
- wn, err = sw.WriteString(s[i : i+match])
- n += wn
- if err != nil {
- return
- }
- wn, err = sw.WriteString(r.value)
- n += wn
- if err != nil {
- return
- }
- i += match + len(r.finder.pattern)
- }
- wn, err = sw.WriteString(s[i:])
- n += wn
- return
-}
-
-// byteReplacer is the implementation that's used when all the "old"
-// and "new" values are single ASCII bytes.
-// The array contains replacement bytes indexed by old byte.
-type byteReplacer [256]byte
-
-func (r *byteReplacer) Replace(s string) string {
- var buf []byte // lazily allocated
- for i := 0; i < len(s); i++ {
- b := s[i]
- if r[b] != b {
- if buf == nil {
- buf = []byte(s)
- }
- buf[i] = r[b]
- }
- }
- if buf == nil {
- return s
- }
- return string(buf)
-}
-
-func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) {
- // TODO(bradfitz): use io.WriteString with slices of s, avoiding allocation.
- bufsize := 32 << 10
- if len(s) < bufsize {
- bufsize = len(s)
- }
- buf := make([]byte, bufsize)
-
- for len(s) > 0 {
- ncopy := copy(buf, s)
- s = s[ncopy:]
- for i, b := range buf[:ncopy] {
- buf[i] = r[b]
- }
- wn, err := w.Write(buf[:ncopy])
- n += wn
- if err != nil {
- return n, err
- }
- }
- return n, nil
-}
-
-// byteStringReplacer is the implementation that's used when all the
-// "old" values are single ASCII bytes but the "new" values vary in size.
-type byteStringReplacer struct {
- // replacements contains replacement byte slices indexed by old byte.
- // A nil []byte means that the old byte should not be replaced.
- replacements [256][]byte
- // toReplace keeps a list of bytes to replace. Depending on length of toReplace
- // and length of target string it may be faster to use Count, or a plain loop.
- // We store single byte as a string, because Count takes a string.
- toReplace []string
-}
-
-// countCutOff controls the ratio of a string length to a number of replacements
-// at which (*byteStringReplacer).Replace switches algorithms.
-// For strings with higher ration of length to replacements than that value,
-// we call Count, for each replacement from toReplace.
-// For strings, with a lower ratio we use simple loop, because of Count overhead.
-// countCutOff is an empirically determined overhead multiplier.
-// TODO(tocarip) revisit once we have register-based abi/mid-stack inlining.
-const countCutOff = 8
-
-func (r *byteStringReplacer) Replace(s string) string {
- newSize := len(s)
- anyChanges := false
- // Is it faster to use Count?
- if len(r.toReplace)*countCutOff <= len(s) {
- for _, x := range r.toReplace {
- if c := Count(s, x); c != 0 {
- // The -1 is because we are replacing 1 byte with len(replacements[b]) bytes.
- newSize += c * (len(r.replacements[x[0]]) - 1)
- anyChanges = true
- }
-
- }
- } else {
- for i := 0; i < len(s); i++ {
- b := s[i]
- if r.replacements[b] != nil {
- // See above for explanation of -1
- newSize += len(r.replacements[b]) - 1
- anyChanges = true
- }
- }
- }
- if !anyChanges {
- return s
- }
- buf := make([]byte, newSize)
- j := 0
- for i := 0; i < len(s); i++ {
- b := s[i]
- if r.replacements[b] != nil {
- j += copy(buf[j:], r.replacements[b])
- } else {
- buf[j] = b
- j++
- }
- }
- return string(buf)
-}
-
-func (r *byteStringReplacer) WriteString(w io.Writer, s string) (n int, err error) {
- sw := getStringWriter(w)
- last := 0
- for i := 0; i < len(s); i++ {
- b := s[i]
- if r.replacements[b] == nil {
- continue
- }
- if last != i {
- nw, err := sw.WriteString(s[last:i])
- n += nw
- if err != nil {
- return n, err
- }
- }
- last = i + 1
- nw, err := w.Write(r.replacements[b])
- n += nw
- if err != nil {
- return n, err
- }
- }
- if last != len(s) {
- var nw int
- nw, err = sw.WriteString(s[last:])
- n += nw
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/strings/strings.go b/contrib/go/_std_1.18/src/strings/strings.go
deleted file mode 100644
index 5793d9e26f..0000000000
--- a/contrib/go/_std_1.18/src/strings/strings.go
+++ /dev/null
@@ -1,1186 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package strings implements simple functions to manipulate UTF-8 encoded strings.
-//
-// For information about UTF-8 strings in Go, see https://blog.golang.org/strings.
-package strings
-
-import (
- "internal/bytealg"
- "unicode"
- "unicode/utf8"
-)
-
-// explode splits s into a slice of UTF-8 strings,
-// one string per Unicode character up to a maximum of n (n < 0 means no limit).
-// Invalid UTF-8 sequences become correct encodings of U+FFFD.
-func explode(s string, n int) []string {
- l := utf8.RuneCountInString(s)
- if n < 0 || n > l {
- n = l
- }
- a := make([]string, n)
- for i := 0; i < n-1; i++ {
- ch, size := utf8.DecodeRuneInString(s)
- a[i] = s[:size]
- s = s[size:]
- if ch == utf8.RuneError {
- a[i] = string(utf8.RuneError)
- }
- }
- if n > 0 {
- a[n-1] = s
- }
- return a
-}
-
-// Count counts the number of non-overlapping instances of substr in s.
-// If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
-func Count(s, substr string) int {
- // special case
- if len(substr) == 0 {
- return utf8.RuneCountInString(s) + 1
- }
- if len(substr) == 1 {
- return bytealg.CountString(s, substr[0])
- }
- n := 0
- for {
- i := Index(s, substr)
- if i == -1 {
- return n
- }
- n++
- s = s[i+len(substr):]
- }
-}
-
-// Contains reports whether substr is within s.
-func Contains(s, substr string) bool {
- return Index(s, substr) >= 0
-}
-
-// ContainsAny reports whether any Unicode code points in chars are within s.
-func ContainsAny(s, chars string) bool {
- return IndexAny(s, chars) >= 0
-}
-
-// ContainsRune reports whether the Unicode code point r is within s.
-func ContainsRune(s string, r rune) bool {
- return IndexRune(s, r) >= 0
-}
-
-// LastIndex returns the index of the last instance of substr in s, or -1 if substr is not present in s.
-func LastIndex(s, substr string) int {
- n := len(substr)
- switch {
- case n == 0:
- return len(s)
- case n == 1:
- return LastIndexByte(s, substr[0])
- case n == len(s):
- if substr == s {
- return 0
- }
- return -1
- case n > len(s):
- return -1
- }
- // Rabin-Karp search from the end of the string
- hashss, pow := bytealg.HashStrRev(substr)
- last := len(s) - n
- var h uint32
- for i := len(s) - 1; i >= last; i-- {
- h = h*bytealg.PrimeRK + uint32(s[i])
- }
- if h == hashss && s[last:] == substr {
- return last
- }
- for i := last - 1; i >= 0; i-- {
- h *= bytealg.PrimeRK
- h += uint32(s[i])
- h -= pow * uint32(s[i+n])
- if h == hashss && s[i:i+n] == substr {
- return i
- }
- }
- return -1
-}
-
-// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s.
-func IndexByte(s string, c byte) int {
- return bytealg.IndexByteString(s, c)
-}
-
-// IndexRune returns the index of the first instance of the Unicode code point
-// r, or -1 if rune is not present in s.
-// If r is utf8.RuneError, it returns the first instance of any
-// invalid UTF-8 byte sequence.
-func IndexRune(s string, r rune) int {
- switch {
- case 0 <= r && r < utf8.RuneSelf:
- return IndexByte(s, byte(r))
- case r == utf8.RuneError:
- for i, r := range s {
- if r == utf8.RuneError {
- return i
- }
- }
- return -1
- case !utf8.ValidRune(r):
- return -1
- default:
- return Index(s, string(r))
- }
-}
-
-// IndexAny returns the index of the first instance of any Unicode code point
-// from chars in s, or -1 if no Unicode code point from chars is present in s.
-func IndexAny(s, chars string) int {
- if chars == "" {
- // Avoid scanning all of s.
- return -1
- }
- if len(chars) == 1 {
- // Avoid scanning all of s.
- r := rune(chars[0])
- if r >= utf8.RuneSelf {
- r = utf8.RuneError
- }
- return IndexRune(s, r)
- }
- if len(s) > 8 {
- if as, isASCII := makeASCIISet(chars); isASCII {
- for i := 0; i < len(s); i++ {
- if as.contains(s[i]) {
- return i
- }
- }
- return -1
- }
- }
- for i, c := range s {
- if IndexRune(chars, c) >= 0 {
- return i
- }
- }
- return -1
-}
-
-// LastIndexAny returns the index of the last instance of any Unicode code
-// point from chars in s, or -1 if no Unicode code point from chars is
-// present in s.
-func LastIndexAny(s, chars string) int {
- if chars == "" {
- // Avoid scanning all of s.
- return -1
- }
- if len(s) == 1 {
- rc := rune(s[0])
- if rc >= utf8.RuneSelf {
- rc = utf8.RuneError
- }
- if IndexRune(chars, rc) >= 0 {
- return 0
- }
- return -1
- }
- if len(s) > 8 {
- if as, isASCII := makeASCIISet(chars); isASCII {
- for i := len(s) - 1; i >= 0; i-- {
- if as.contains(s[i]) {
- return i
- }
- }
- return -1
- }
- }
- if len(chars) == 1 {
- rc := rune(chars[0])
- if rc >= utf8.RuneSelf {
- rc = utf8.RuneError
- }
- for i := len(s); i > 0; {
- r, size := utf8.DecodeLastRuneInString(s[:i])
- i -= size
- if rc == r {
- return i
- }
- }
- return -1
- }
- for i := len(s); i > 0; {
- r, size := utf8.DecodeLastRuneInString(s[:i])
- i -= size
- if IndexRune(chars, r) >= 0 {
- return i
- }
- }
- return -1
-}
-
-// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
-func LastIndexByte(s string, c byte) int {
- for i := len(s) - 1; i >= 0; i-- {
- if s[i] == c {
- return i
- }
- }
- return -1
-}
-
-// Generic split: splits after each instance of sep,
-// including sepSave bytes of sep in the subarrays.
-func genSplit(s, sep string, sepSave, n int) []string {
- if n == 0 {
- return nil
- }
- if sep == "" {
- return explode(s, n)
- }
- if n < 0 {
- n = Count(s, sep) + 1
- }
-
- a := make([]string, n)
- n--
- i := 0
- for i < n {
- m := Index(s, sep)
- if m < 0 {
- break
- }
- a[i] = s[:m+sepSave]
- s = s[m+len(sep):]
- i++
- }
- a[i] = s
- return a[:i+1]
-}
-
-// SplitN slices s into substrings separated by sep and returns a slice of
-// the substrings between those separators.
-//
-// The count determines the number of substrings to return:
-// n > 0: at most n substrings; the last substring will be the unsplit remainder.
-// n == 0: the result is nil (zero substrings)
-// n < 0: all substrings
-//
-// Edge cases for s and sep (for example, empty strings) are handled
-// as described in the documentation for Split.
-//
-// To split around the first instance of a separator, see Cut.
-func SplitN(s, sep string, n int) []string { return genSplit(s, sep, 0, n) }
-
-// SplitAfterN slices s into substrings after each instance of sep and
-// returns a slice of those substrings.
-//
-// The count determines the number of substrings to return:
-// n > 0: at most n substrings; the last substring will be the unsplit remainder.
-// n == 0: the result is nil (zero substrings)
-// n < 0: all substrings
-//
-// Edge cases for s and sep (for example, empty strings) are handled
-// as described in the documentation for SplitAfter.
-func SplitAfterN(s, sep string, n int) []string {
- return genSplit(s, sep, len(sep), n)
-}
-
-// Split slices s into all substrings separated by sep and returns a slice of
-// the substrings between those separators.
-//
-// If s does not contain sep and sep is not empty, Split returns a
-// slice of length 1 whose only element is s.
-//
-// If sep is empty, Split splits after each UTF-8 sequence. If both s
-// and sep are empty, Split returns an empty slice.
-//
-// It is equivalent to SplitN with a count of -1.
-//
-// To split around the first instance of a separator, see Cut.
-func Split(s, sep string) []string { return genSplit(s, sep, 0, -1) }
-
-// SplitAfter slices s into all substrings after each instance of sep and
-// returns a slice of those substrings.
-//
-// If s does not contain sep and sep is not empty, SplitAfter returns
-// a slice of length 1 whose only element is s.
-//
-// If sep is empty, SplitAfter splits after each UTF-8 sequence. If
-// both s and sep are empty, SplitAfter returns an empty slice.
-//
-// It is equivalent to SplitAfterN with a count of -1.
-func SplitAfter(s, sep string) []string {
- return genSplit(s, sep, len(sep), -1)
-}
-
-var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
-
-// Fields splits the string s around each instance of one or more consecutive white space
-// characters, as defined by unicode.IsSpace, returning a slice of substrings of s or an
-// empty slice if s contains only white space.
-func Fields(s string) []string {
- // First count the fields.
- // This is an exact count if s is ASCII, otherwise it is an approximation.
- n := 0
- wasSpace := 1
- // setBits is used to track which bits are set in the bytes of s.
- setBits := uint8(0)
- for i := 0; i < len(s); i++ {
- r := s[i]
- setBits |= r
- isSpace := int(asciiSpace[r])
- n += wasSpace & ^isSpace
- wasSpace = isSpace
- }
-
- if setBits >= utf8.RuneSelf {
- // Some runes in the input string are not ASCII.
- return FieldsFunc(s, unicode.IsSpace)
- }
- // ASCII fast path
- a := make([]string, n)
- na := 0
- fieldStart := 0
- i := 0
- // Skip spaces in the front of the input.
- for i < len(s) && asciiSpace[s[i]] != 0 {
- i++
- }
- fieldStart = i
- for i < len(s) {
- if asciiSpace[s[i]] == 0 {
- i++
- continue
- }
- a[na] = s[fieldStart:i]
- na++
- i++
- // Skip spaces in between fields.
- for i < len(s) && asciiSpace[s[i]] != 0 {
- i++
- }
- fieldStart = i
- }
- if fieldStart < len(s) { // Last field might end at EOF.
- a[na] = s[fieldStart:]
- }
- return a
-}
-
-// FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c)
-// and returns an array of slices of s. If all code points in s satisfy f(c) or the
-// string is empty, an empty slice is returned.
-//
-// FieldsFunc makes no guarantees about the order in which it calls f(c)
-// and assumes that f always returns the same value for a given c.
-func FieldsFunc(s string, f func(rune) bool) []string {
- // A span is used to record a slice of s of the form s[start:end].
- // The start index is inclusive and the end index is exclusive.
- type span struct {
- start int
- end int
- }
- spans := make([]span, 0, 32)
-
- // Find the field start and end indices.
- // Doing this in a separate pass (rather than slicing the string s
- // and collecting the result substrings right away) is significantly
- // more efficient, possibly due to cache effects.
- start := -1 // valid span start if >= 0
- for end, rune := range s {
- if f(rune) {
- if start >= 0 {
- spans = append(spans, span{start, end})
- // Set start to a negative value.
- // Note: using -1 here consistently and reproducibly
- // slows down this code by a several percent on amd64.
- start = ^start
- }
- } else {
- if start < 0 {
- start = end
- }
- }
- }
-
- // Last field might end at EOF.
- if start >= 0 {
- spans = append(spans, span{start, len(s)})
- }
-
- // Create strings from recorded field indices.
- a := make([]string, len(spans))
- for i, span := range spans {
- a[i] = s[span.start:span.end]
- }
-
- return a
-}
-
-// Join concatenates the elements of its first argument to create a single string. The separator
-// string sep is placed between elements in the resulting string.
-func Join(elems []string, sep string) string {
- switch len(elems) {
- case 0:
- return ""
- case 1:
- return elems[0]
- }
- n := len(sep) * (len(elems) - 1)
- for i := 0; i < len(elems); i++ {
- n += len(elems[i])
- }
-
- var b Builder
- b.Grow(n)
- b.WriteString(elems[0])
- for _, s := range elems[1:] {
- b.WriteString(sep)
- b.WriteString(s)
- }
- return b.String()
-}
-
-// HasPrefix tests whether the string s begins with prefix.
-func HasPrefix(s, prefix string) bool {
- return len(s) >= len(prefix) && s[0:len(prefix)] == prefix
-}
-
-// HasSuffix tests whether the string s ends with suffix.
-func HasSuffix(s, suffix string) bool {
- return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
-}
-
-// Map returns a copy of the string s with all its characters modified
-// according to the mapping function. If mapping returns a negative value, the character is
-// dropped from the string with no replacement.
-func Map(mapping func(rune) rune, s string) string {
- // In the worst case, the string can grow when mapped, making
- // things unpleasant. But it's so rare we barge in assuming it's
- // fine. It could also shrink but that falls out naturally.
-
- // The output buffer b is initialized on demand, the first
- // time a character differs.
- var b Builder
-
- for i, c := range s {
- r := mapping(c)
- if r == c && c != utf8.RuneError {
- continue
- }
-
- var width int
- if c == utf8.RuneError {
- c, width = utf8.DecodeRuneInString(s[i:])
- if width != 1 && r == c {
- continue
- }
- } else {
- width = utf8.RuneLen(c)
- }
-
- b.Grow(len(s) + utf8.UTFMax)
- b.WriteString(s[:i])
- if r >= 0 {
- b.WriteRune(r)
- }
-
- s = s[i+width:]
- break
- }
-
- // Fast path for unchanged input
- if b.Cap() == 0 { // didn't call b.Grow above
- return s
- }
-
- for _, c := range s {
- r := mapping(c)
-
- if r >= 0 {
- // common case
- // Due to inlining, it is more performant to determine if WriteByte should be
- // invoked rather than always call WriteRune
- if r < utf8.RuneSelf {
- b.WriteByte(byte(r))
- } else {
- // r is not a ASCII rune.
- b.WriteRune(r)
- }
- }
- }
-
- return b.String()
-}
-
-// Repeat returns a new string consisting of count copies of the string s.
-//
-// It panics if count is negative or if
-// the result of (len(s) * count) overflows.
-func Repeat(s string, count int) string {
- if count == 0 {
- return ""
- }
-
- // Since we cannot return an error on overflow,
- // we should panic if the repeat will generate
- // an overflow.
- // See Issue golang.org/issue/16237
- if count < 0 {
- panic("strings: negative Repeat count")
- } else if len(s)*count/count != len(s) {
- panic("strings: Repeat count causes overflow")
- }
-
- n := len(s) * count
- var b Builder
- b.Grow(n)
- b.WriteString(s)
- for b.Len() < n {
- if b.Len() <= n/2 {
- b.WriteString(b.String())
- } else {
- b.WriteString(b.String()[:n-b.Len()])
- break
- }
- }
- return b.String()
-}
-
-// ToUpper returns s with all Unicode letters mapped to their upper case.
-func ToUpper(s string) string {
- isASCII, hasLower := true, false
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= utf8.RuneSelf {
- isASCII = false
- break
- }
- hasLower = hasLower || ('a' <= c && c <= 'z')
- }
-
- if isASCII { // optimize for ASCII-only strings.
- if !hasLower {
- return s
- }
- var b Builder
- b.Grow(len(s))
- for i := 0; i < len(s); i++ {
- c := s[i]
- if 'a' <= c && c <= 'z' {
- c -= 'a' - 'A'
- }
- b.WriteByte(c)
- }
- return b.String()
- }
- return Map(unicode.ToUpper, s)
-}
-
-// ToLower returns s with all Unicode letters mapped to their lower case.
-func ToLower(s string) string {
- isASCII, hasUpper := true, false
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= utf8.RuneSelf {
- isASCII = false
- break
- }
- hasUpper = hasUpper || ('A' <= c && c <= 'Z')
- }
-
- if isASCII { // optimize for ASCII-only strings.
- if !hasUpper {
- return s
- }
- var b Builder
- b.Grow(len(s))
- for i := 0; i < len(s); i++ {
- c := s[i]
- if 'A' <= c && c <= 'Z' {
- c += 'a' - 'A'
- }
- b.WriteByte(c)
- }
- return b.String()
- }
- return Map(unicode.ToLower, s)
-}
-
-// ToTitle returns a copy of the string s with all Unicode letters mapped to
-// their Unicode title case.
-func ToTitle(s string) string { return Map(unicode.ToTitle, s) }
-
-// ToUpperSpecial returns a copy of the string s with all Unicode letters mapped to their
-// upper case using the case mapping specified by c.
-func ToUpperSpecial(c unicode.SpecialCase, s string) string {
- return Map(c.ToUpper, s)
-}
-
-// ToLowerSpecial returns a copy of the string s with all Unicode letters mapped to their
-// lower case using the case mapping specified by c.
-func ToLowerSpecial(c unicode.SpecialCase, s string) string {
- return Map(c.ToLower, s)
-}
-
-// ToTitleSpecial returns a copy of the string s with all Unicode letters mapped to their
-// Unicode title case, giving priority to the special casing rules.
-func ToTitleSpecial(c unicode.SpecialCase, s string) string {
- return Map(c.ToTitle, s)
-}
-
-// ToValidUTF8 returns a copy of the string s with each run of invalid UTF-8 byte sequences
-// replaced by the replacement string, which may be empty.
-func ToValidUTF8(s, replacement string) string {
- var b Builder
-
- for i, c := range s {
- if c != utf8.RuneError {
- continue
- }
-
- _, wid := utf8.DecodeRuneInString(s[i:])
- if wid == 1 {
- b.Grow(len(s) + len(replacement))
- b.WriteString(s[:i])
- s = s[i:]
- break
- }
- }
-
- // Fast path for unchanged input
- if b.Cap() == 0 { // didn't call b.Grow above
- return s
- }
-
- invalid := false // previous byte was from an invalid UTF-8 sequence
- for i := 0; i < len(s); {
- c := s[i]
- if c < utf8.RuneSelf {
- i++
- invalid = false
- b.WriteByte(c)
- continue
- }
- _, wid := utf8.DecodeRuneInString(s[i:])
- if wid == 1 {
- i++
- if !invalid {
- invalid = true
- b.WriteString(replacement)
- }
- continue
- }
- invalid = false
- b.WriteString(s[i : i+wid])
- i += wid
- }
-
- return b.String()
-}
-
-// isSeparator reports whether the rune could mark a word boundary.
-// TODO: update when package unicode captures more of the properties.
-func isSeparator(r rune) bool {
- // ASCII alphanumerics and underscore are not separators
- if r <= 0x7F {
- switch {
- case '0' <= r && r <= '9':
- return false
- case 'a' <= r && r <= 'z':
- return false
- case 'A' <= r && r <= 'Z':
- return false
- case r == '_':
- return false
- }
- return true
- }
- // Letters and digits are not separators
- if unicode.IsLetter(r) || unicode.IsDigit(r) {
- return false
- }
- // Otherwise, all we can do for now is treat spaces as separators.
- return unicode.IsSpace(r)
-}
-
-// Title returns a copy of the string s with all Unicode letters that begin words
-// mapped to their Unicode title case.
-//
-// Deprecated: The rule Title uses for word boundaries does not handle Unicode
-// punctuation properly. Use golang.org/x/text/cases instead.
-func Title(s string) string {
- // Use a closure here to remember state.
- // Hackish but effective. Depends on Map scanning in order and calling
- // the closure once per rune.
- prev := ' '
- return Map(
- func(r rune) rune {
- if isSeparator(prev) {
- prev = r
- return unicode.ToTitle(r)
- }
- prev = r
- return r
- },
- s)
-}
-
-// TrimLeftFunc returns a slice of the string s with all leading
-// Unicode code points c satisfying f(c) removed.
-func TrimLeftFunc(s string, f func(rune) bool) string {
- i := indexFunc(s, f, false)
- if i == -1 {
- return ""
- }
- return s[i:]
-}
-
-// TrimRightFunc returns a slice of the string s with all trailing
-// Unicode code points c satisfying f(c) removed.
-func TrimRightFunc(s string, f func(rune) bool) string {
- i := lastIndexFunc(s, f, false)
- if i >= 0 && s[i] >= utf8.RuneSelf {
- _, wid := utf8.DecodeRuneInString(s[i:])
- i += wid
- } else {
- i++
- }
- return s[0:i]
-}
-
-// TrimFunc returns a slice of the string s with all leading
-// and trailing Unicode code points c satisfying f(c) removed.
-func TrimFunc(s string, f func(rune) bool) string {
- return TrimRightFunc(TrimLeftFunc(s, f), f)
-}
-
-// IndexFunc returns the index into s of the first Unicode
-// code point satisfying f(c), or -1 if none do.
-func IndexFunc(s string, f func(rune) bool) int {
- return indexFunc(s, f, true)
-}
-
-// LastIndexFunc returns the index into s of the last
-// Unicode code point satisfying f(c), or -1 if none do.
-func LastIndexFunc(s string, f func(rune) bool) int {
- return lastIndexFunc(s, f, true)
-}
-
-// indexFunc is the same as IndexFunc except that if
-// truth==false, the sense of the predicate function is
-// inverted.
-func indexFunc(s string, f func(rune) bool, truth bool) int {
- for i, r := range s {
- if f(r) == truth {
- return i
- }
- }
- return -1
-}
-
-// lastIndexFunc is the same as LastIndexFunc except that if
-// truth==false, the sense of the predicate function is
-// inverted.
-func lastIndexFunc(s string, f func(rune) bool, truth bool) int {
- for i := len(s); i > 0; {
- r, size := utf8.DecodeLastRuneInString(s[0:i])
- i -= size
- if f(r) == truth {
- return i
- }
- }
- return -1
-}
-
-// asciiSet is a 32-byte value, where each bit represents the presence of a
-// given ASCII character in the set. The 128-bits of the lower 16 bytes,
-// starting with the least-significant bit of the lowest word to the
-// most-significant bit of the highest word, map to the full range of all
-// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
-// ensuring that any non-ASCII character will be reported as not in the set.
-// This allocates a total of 32 bytes even though the upper half
-// is unused to avoid bounds checks in asciiSet.contains.
-type asciiSet [8]uint32
-
-// makeASCIISet creates a set of ASCII characters and reports whether all
-// characters in chars are ASCII.
-func makeASCIISet(chars string) (as asciiSet, ok bool) {
- for i := 0; i < len(chars); i++ {
- c := chars[i]
- if c >= utf8.RuneSelf {
- return as, false
- }
- as[c/32] |= 1 << (c % 32)
- }
- return as, true
-}
-
-// contains reports whether c is inside the set.
-func (as *asciiSet) contains(c byte) bool {
- return (as[c/32] & (1 << (c % 32))) != 0
-}
-
-// Trim returns a slice of the string s with all leading and
-// trailing Unicode code points contained in cutset removed.
-func Trim(s, cutset string) string {
- if s == "" || cutset == "" {
- return s
- }
- if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
- return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
- }
- if as, ok := makeASCIISet(cutset); ok {
- return trimLeftASCII(trimRightASCII(s, &as), &as)
- }
- return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
-}
-
-// TrimLeft returns a slice of the string s with all leading
-// Unicode code points contained in cutset removed.
-//
-// To remove a prefix, use TrimPrefix instead.
-func TrimLeft(s, cutset string) string {
- if s == "" || cutset == "" {
- return s
- }
- if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
- return trimLeftByte(s, cutset[0])
- }
- if as, ok := makeASCIISet(cutset); ok {
- return trimLeftASCII(s, &as)
- }
- return trimLeftUnicode(s, cutset)
-}
-
-func trimLeftByte(s string, c byte) string {
- for len(s) > 0 && s[0] == c {
- s = s[1:]
- }
- return s
-}
-
-func trimLeftASCII(s string, as *asciiSet) string {
- for len(s) > 0 {
- if !as.contains(s[0]) {
- break
- }
- s = s[1:]
- }
- return s
-}
-
-func trimLeftUnicode(s, cutset string) string {
- for len(s) > 0 {
- r, n := rune(s[0]), 1
- if r >= utf8.RuneSelf {
- r, n = utf8.DecodeRuneInString(s)
- }
- if !ContainsRune(cutset, r) {
- break
- }
- s = s[n:]
- }
- return s
-}
-
-// TrimRight returns a slice of the string s, with all trailing
-// Unicode code points contained in cutset removed.
-//
-// To remove a suffix, use TrimSuffix instead.
-func TrimRight(s, cutset string) string {
- if s == "" || cutset == "" {
- return s
- }
- if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
- return trimRightByte(s, cutset[0])
- }
- if as, ok := makeASCIISet(cutset); ok {
- return trimRightASCII(s, &as)
- }
- return trimRightUnicode(s, cutset)
-}
-
-func trimRightByte(s string, c byte) string {
- for len(s) > 0 && s[len(s)-1] == c {
- s = s[:len(s)-1]
- }
- return s
-}
-
-func trimRightASCII(s string, as *asciiSet) string {
- for len(s) > 0 {
- if !as.contains(s[len(s)-1]) {
- break
- }
- s = s[:len(s)-1]
- }
- return s
-}
-
-func trimRightUnicode(s, cutset string) string {
- for len(s) > 0 {
- r, n := rune(s[len(s)-1]), 1
- if r >= utf8.RuneSelf {
- r, n = utf8.DecodeLastRuneInString(s)
- }
- if !ContainsRune(cutset, r) {
- break
- }
- s = s[:len(s)-n]
- }
- return s
-}
-
-// TrimSpace returns a slice of the string s, with all leading
-// and trailing white space removed, as defined by Unicode.
-func TrimSpace(s string) string {
- // Fast path for ASCII: look for the first ASCII non-space byte
- start := 0
- for ; start < len(s); start++ {
- c := s[start]
- if c >= utf8.RuneSelf {
- // If we run into a non-ASCII byte, fall back to the
- // slower unicode-aware method on the remaining bytes
- return TrimFunc(s[start:], unicode.IsSpace)
- }
- if asciiSpace[c] == 0 {
- break
- }
- }
-
- // Now look for the first ASCII non-space byte from the end
- stop := len(s)
- for ; stop > start; stop-- {
- c := s[stop-1]
- if c >= utf8.RuneSelf {
- return TrimFunc(s[start:stop], unicode.IsSpace)
- }
- if asciiSpace[c] == 0 {
- break
- }
- }
-
- // At this point s[start:stop] starts and ends with an ASCII
- // non-space bytes, so we're done. Non-ASCII cases have already
- // been handled above.
- return s[start:stop]
-}
-
-// TrimPrefix returns s without the provided leading prefix string.
-// If s doesn't start with prefix, s is returned unchanged.
-func TrimPrefix(s, prefix string) string {
- if HasPrefix(s, prefix) {
- return s[len(prefix):]
- }
- return s
-}
-
-// TrimSuffix returns s without the provided trailing suffix string.
-// If s doesn't end with suffix, s is returned unchanged.
-func TrimSuffix(s, suffix string) string {
- if HasSuffix(s, suffix) {
- return s[:len(s)-len(suffix)]
- }
- return s
-}
-
-// Replace returns a copy of the string s with the first n
-// non-overlapping instances of old replaced by new.
-// If old is empty, it matches at the beginning of the string
-// and after each UTF-8 sequence, yielding up to k+1 replacements
-// for a k-rune string.
-// If n < 0, there is no limit on the number of replacements.
-func Replace(s, old, new string, n int) string {
- if old == new || n == 0 {
- return s // avoid allocation
- }
-
- // Compute number of replacements.
- if m := Count(s, old); m == 0 {
- return s // avoid allocation
- } else if n < 0 || m < n {
- n = m
- }
-
- // Apply replacements to buffer.
- var b Builder
- b.Grow(len(s) + n*(len(new)-len(old)))
- start := 0
- for i := 0; i < n; i++ {
- j := start
- if len(old) == 0 {
- if i > 0 {
- _, wid := utf8.DecodeRuneInString(s[start:])
- j += wid
- }
- } else {
- j += Index(s[start:], old)
- }
- b.WriteString(s[start:j])
- b.WriteString(new)
- start = j + len(old)
- }
- b.WriteString(s[start:])
- return b.String()
-}
-
-// ReplaceAll returns a copy of the string s with all
-// non-overlapping instances of old replaced by new.
-// If old is empty, it matches at the beginning of the string
-// and after each UTF-8 sequence, yielding up to k+1 replacements
-// for a k-rune string.
-func ReplaceAll(s, old, new string) string {
- return Replace(s, old, new, -1)
-}
-
-// EqualFold reports whether s and t, interpreted as UTF-8 strings,
-// are equal under Unicode case-folding, which is a more general
-// form of case-insensitivity.
-func EqualFold(s, t string) bool {
- for s != "" && t != "" {
- // Extract first rune from each string.
- var sr, tr rune
- if s[0] < utf8.RuneSelf {
- sr, s = rune(s[0]), s[1:]
- } else {
- r, size := utf8.DecodeRuneInString(s)
- sr, s = r, s[size:]
- }
- if t[0] < utf8.RuneSelf {
- tr, t = rune(t[0]), t[1:]
- } else {
- r, size := utf8.DecodeRuneInString(t)
- tr, t = r, t[size:]
- }
-
- // If they match, keep going; if not, return false.
-
- // Easy case.
- if tr == sr {
- continue
- }
-
- // Make sr < tr to simplify what follows.
- if tr < sr {
- tr, sr = sr, tr
- }
- // Fast check for ASCII.
- if tr < utf8.RuneSelf {
- // ASCII only, sr/tr must be upper/lower case
- if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
- continue
- }
- return false
- }
-
- // General case. SimpleFold(x) returns the next equivalent rune > x
- // or wraps around to smaller values.
- r := unicode.SimpleFold(sr)
- for r != sr && r < tr {
- r = unicode.SimpleFold(r)
- }
- if r == tr {
- continue
- }
- return false
- }
-
- // One string is empty. Are both?
- return s == t
-}
-
-// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
-func Index(s, substr string) int {
- n := len(substr)
- switch {
- case n == 0:
- return 0
- case n == 1:
- return IndexByte(s, substr[0])
- case n == len(s):
- if substr == s {
- return 0
- }
- return -1
- case n > len(s):
- return -1
- case n <= bytealg.MaxLen:
- // Use brute force when s and substr both are small
- if len(s) <= bytealg.MaxBruteForce {
- return bytealg.IndexString(s, substr)
- }
- c0 := substr[0]
- c1 := substr[1]
- i := 0
- t := len(s) - n + 1
- fails := 0
- for i < t {
- if s[i] != c0 {
- // IndexByte is faster than bytealg.IndexString, so use it as long as
- // we're not getting lots of false positives.
- o := IndexByte(s[i+1:t], c0)
- if o < 0 {
- return -1
- }
- i += o + 1
- }
- if s[i+1] == c1 && s[i:i+n] == substr {
- return i
- }
- fails++
- i++
- // Switch to bytealg.IndexString when IndexByte produces too many false positives.
- if fails > bytealg.Cutover(i) {
- r := bytealg.IndexString(s[i:], substr)
- if r >= 0 {
- return r + i
- }
- return -1
- }
- }
- return -1
- }
- c0 := substr[0]
- c1 := substr[1]
- i := 0
- t := len(s) - n + 1
- fails := 0
- for i < t {
- if s[i] != c0 {
- o := IndexByte(s[i+1:t], c0)
- if o < 0 {
- return -1
- }
- i += o + 1
- }
- if s[i+1] == c1 && s[i:i+n] == substr {
- return i
- }
- i++
- fails++
- if fails >= 4+i>>4 && i < t {
- // See comment in ../bytes/bytes.go.
- j := bytealg.IndexRabinKarp(s[i:], substr)
- if j < 0 {
- return -1
- }
- return i + j
- }
- }
- return -1
-}
-
-// Cut slices s around the first instance of sep,
-// returning the text before and after sep.
-// The found result reports whether sep appears in s.
-// If sep does not appear in s, cut returns s, "", false.
-func Cut(s, sep string) (before, after string, found bool) {
- if i := Index(s, sep); i >= 0 {
- return s[:i], s[i+len(sep):], true
- }
- return s, "", false
-}
diff --git a/contrib/go/_std_1.18/src/sync/atomic/doc.go b/contrib/go/_std_1.18/src/sync/atomic/doc.go
deleted file mode 100644
index 805ef956d5..0000000000
--- a/contrib/go/_std_1.18/src/sync/atomic/doc.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package atomic provides low-level atomic memory primitives
-// useful for implementing synchronization algorithms.
-//
-// These functions require great care to be used correctly.
-// Except for special, low-level applications, synchronization is better
-// done with channels or the facilities of the sync package.
-// Share memory by communicating;
-// don't communicate by sharing memory.
-//
-// The swap operation, implemented by the SwapT functions, is the atomic
-// equivalent of:
-//
-// old = *addr
-// *addr = new
-// return old
-//
-// The compare-and-swap operation, implemented by the CompareAndSwapT
-// functions, is the atomic equivalent of:
-//
-// if *addr == old {
-// *addr = new
-// return true
-// }
-// return false
-//
-// The add operation, implemented by the AddT functions, is the atomic
-// equivalent of:
-//
-// *addr += delta
-// return *addr
-//
-// The load and store operations, implemented by the LoadT and StoreT
-// functions, are the atomic equivalents of "return *addr" and
-// "*addr = val".
-//
-package atomic
-
-import (
- "unsafe"
-)
-
-// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
-//
-// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
-//
-// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility
-// to arrange for 64-bit alignment of 64-bit words accessed atomically.
-// The first word in a variable or in an allocated struct, array, or slice can
-// be relied upon to be 64-bit aligned.
-
-// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
-func SwapInt32(addr *int32, new int32) (old int32)
-
-// SwapInt64 atomically stores new into *addr and returns the previous *addr value.
-func SwapInt64(addr *int64, new int64) (old int64)
-
-// SwapUint32 atomically stores new into *addr and returns the previous *addr value.
-func SwapUint32(addr *uint32, new uint32) (old uint32)
-
-// SwapUint64 atomically stores new into *addr and returns the previous *addr value.
-func SwapUint64(addr *uint64, new uint64) (old uint64)
-
-// SwapUintptr atomically stores new into *addr and returns the previous *addr value.
-func SwapUintptr(addr *uintptr, new uintptr) (old uintptr)
-
-// SwapPointer atomically stores new into *addr and returns the previous *addr value.
-func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)
-
-// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
-func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
-
-// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
-func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
-
-// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
-func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
-
-// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
-func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
-
-// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
-func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool)
-
-// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
-func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
-
-// AddInt32 atomically adds delta to *addr and returns the new value.
-func AddInt32(addr *int32, delta int32) (new int32)
-
-// AddUint32 atomically adds delta to *addr and returns the new value.
-// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)).
-// In particular, to decrement x, do AddUint32(&x, ^uint32(0)).
-func AddUint32(addr *uint32, delta uint32) (new uint32)
-
-// AddInt64 atomically adds delta to *addr and returns the new value.
-func AddInt64(addr *int64, delta int64) (new int64)
-
-// AddUint64 atomically adds delta to *addr and returns the new value.
-// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)).
-// In particular, to decrement x, do AddUint64(&x, ^uint64(0)).
-func AddUint64(addr *uint64, delta uint64) (new uint64)
-
-// AddUintptr atomically adds delta to *addr and returns the new value.
-func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
-
-// LoadInt32 atomically loads *addr.
-func LoadInt32(addr *int32) (val int32)
-
-// LoadInt64 atomically loads *addr.
-func LoadInt64(addr *int64) (val int64)
-
-// LoadUint32 atomically loads *addr.
-func LoadUint32(addr *uint32) (val uint32)
-
-// LoadUint64 atomically loads *addr.
-func LoadUint64(addr *uint64) (val uint64)
-
-// LoadUintptr atomically loads *addr.
-func LoadUintptr(addr *uintptr) (val uintptr)
-
-// LoadPointer atomically loads *addr.
-func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
-
-// StoreInt32 atomically stores val into *addr.
-func StoreInt32(addr *int32, val int32)
-
-// StoreInt64 atomically stores val into *addr.
-func StoreInt64(addr *int64, val int64)
-
-// StoreUint32 atomically stores val into *addr.
-func StoreUint32(addr *uint32, val uint32)
-
-// StoreUint64 atomically stores val into *addr.
-func StoreUint64(addr *uint64, val uint64)
-
-// StoreUintptr atomically stores val into *addr.
-func StoreUintptr(addr *uintptr, val uintptr)
-
-// StorePointer atomically stores val into *addr.
-func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
diff --git a/contrib/go/_std_1.18/src/sync/cond.go b/contrib/go/_std_1.18/src/sync/cond.go
deleted file mode 100644
index b254c9360a..0000000000
--- a/contrib/go/_std_1.18/src/sync/cond.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// Cond implements a condition variable, a rendezvous point
-// for goroutines waiting for or announcing the occurrence
-// of an event.
-//
-// Each Cond has an associated Locker L (often a *Mutex or *RWMutex),
-// which must be held when changing the condition and
-// when calling the Wait method.
-//
-// A Cond must not be copied after first use.
-type Cond struct {
- noCopy noCopy
-
- // L is held while observing or changing the condition
- L Locker
-
- notify notifyList
- checker copyChecker
-}
-
-// NewCond returns a new Cond with Locker l.
-func NewCond(l Locker) *Cond {
- return &Cond{L: l}
-}
-
-// Wait atomically unlocks c.L and suspends execution
-// of the calling goroutine. After later resuming execution,
-// Wait locks c.L before returning. Unlike in other systems,
-// Wait cannot return unless awoken by Broadcast or Signal.
-//
-// Because c.L is not locked when Wait first resumes, the caller
-// typically cannot assume that the condition is true when
-// Wait returns. Instead, the caller should Wait in a loop:
-//
-// c.L.Lock()
-// for !condition() {
-// c.Wait()
-// }
-// ... make use of condition ...
-// c.L.Unlock()
-//
-func (c *Cond) Wait() {
- c.checker.check()
- t := runtime_notifyListAdd(&c.notify)
- c.L.Unlock()
- runtime_notifyListWait(&c.notify, t)
- c.L.Lock()
-}
-
-// Signal wakes one goroutine waiting on c, if there is any.
-//
-// It is allowed but not required for the caller to hold c.L
-// during the call.
-func (c *Cond) Signal() {
- c.checker.check()
- runtime_notifyListNotifyOne(&c.notify)
-}
-
-// Broadcast wakes all goroutines waiting on c.
-//
-// It is allowed but not required for the caller to hold c.L
-// during the call.
-func (c *Cond) Broadcast() {
- c.checker.check()
- runtime_notifyListNotifyAll(&c.notify)
-}
-
-// copyChecker holds back pointer to itself to detect object copying.
-type copyChecker uintptr
-
-func (c *copyChecker) check() {
- if uintptr(*c) != uintptr(unsafe.Pointer(c)) &&
- !atomic.CompareAndSwapUintptr((*uintptr)(c), 0, uintptr(unsafe.Pointer(c))) &&
- uintptr(*c) != uintptr(unsafe.Pointer(c)) {
- panic("sync.Cond is copied")
- }
-}
-
-// noCopy may be embedded into structs which must not be copied
-// after the first use.
-//
-// See https://golang.org/issues/8005#issuecomment-190753527
-// for details.
-type noCopy struct{}
-
-// Lock is a no-op used by -copylocks checker from `go vet`.
-func (*noCopy) Lock() {}
-func (*noCopy) Unlock() {}
diff --git a/contrib/go/_std_1.18/src/sync/map.go b/contrib/go/_std_1.18/src/sync/map.go
deleted file mode 100644
index 2fa3253429..0000000000
--- a/contrib/go/_std_1.18/src/sync/map.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
-// by multiple goroutines without additional locking or coordination.
-// Loads, stores, and deletes run in amortized constant time.
-//
-// The Map type is specialized. Most code should use a plain Go map instead,
-// with separate locking or coordination, for better type safety and to make it
-// easier to maintain other invariants along with the map content.
-//
-// The Map type is optimized for two common use cases: (1) when the entry for a given
-// key is only ever written once but read many times, as in caches that only grow,
-// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
-// sets of keys. In these two cases, use of a Map may significantly reduce lock
-// contention compared to a Go map paired with a separate Mutex or RWMutex.
-//
-// The zero Map is empty and ready for use. A Map must not be copied after first use.
-type Map struct {
- mu Mutex
-
- // read contains the portion of the map's contents that are safe for
- // concurrent access (with or without mu held).
- //
- // The read field itself is always safe to load, but must only be stored with
- // mu held.
- //
- // Entries stored in read may be updated concurrently without mu, but updating
- // a previously-expunged entry requires that the entry be copied to the dirty
- // map and unexpunged with mu held.
- read atomic.Value // readOnly
-
- // dirty contains the portion of the map's contents that require mu to be
- // held. To ensure that the dirty map can be promoted to the read map quickly,
- // it also includes all of the non-expunged entries in the read map.
- //
- // Expunged entries are not stored in the dirty map. An expunged entry in the
- // clean map must be unexpunged and added to the dirty map before a new value
- // can be stored to it.
- //
- // If the dirty map is nil, the next write to the map will initialize it by
- // making a shallow copy of the clean map, omitting stale entries.
- dirty map[any]*entry
-
- // misses counts the number of loads since the read map was last updated that
- // needed to lock mu to determine whether the key was present.
- //
- // Once enough misses have occurred to cover the cost of copying the dirty
- // map, the dirty map will be promoted to the read map (in the unamended
- // state) and the next store to the map will make a new dirty copy.
- misses int
-}
-
-// readOnly is an immutable struct stored atomically in the Map.read field.
-type readOnly struct {
- m map[any]*entry
- amended bool // true if the dirty map contains some key not in m.
-}
-
-// expunged is an arbitrary pointer that marks entries which have been deleted
-// from the dirty map.
-var expunged = unsafe.Pointer(new(any))
-
-// An entry is a slot in the map corresponding to a particular key.
-type entry struct {
- // p points to the interface{} value stored for the entry.
- //
- // If p == nil, the entry has been deleted, and either m.dirty == nil or
- // m.dirty[key] is e.
- //
- // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
- // is missing from m.dirty.
- //
- // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
- // != nil, in m.dirty[key].
- //
- // An entry can be deleted by atomic replacement with nil: when m.dirty is
- // next created, it will atomically replace nil with expunged and leave
- // m.dirty[key] unset.
- //
- // An entry's associated value can be updated by atomic replacement, provided
- // p != expunged. If p == expunged, an entry's associated value can be updated
- // only after first setting m.dirty[key] = e so that lookups using the dirty
- // map find the entry.
- p unsafe.Pointer // *interface{}
-}
-
-func newEntry(i any) *entry {
- return &entry{p: unsafe.Pointer(&i)}
-}
-
-// Load returns the value stored in the map for a key, or nil if no
-// value is present.
-// The ok result indicates whether value was found in the map.
-func (m *Map) Load(key any) (value any, ok bool) {
- read, _ := m.read.Load().(readOnly)
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- // Avoid reporting a spurious miss if m.dirty got promoted while we were
- // blocked on m.mu. (If further loads of the same key will not miss, it's
- // not worth copying the dirty map for this key.)
- read, _ = m.read.Load().(readOnly)
- e, ok = read.m[key]
- if !ok && read.amended {
- e, ok = m.dirty[key]
- // Regardless of whether the entry was present, record a miss: this key
- // will take the slow path until the dirty map is promoted to the read
- // map.
- m.missLocked()
- }
- m.mu.Unlock()
- }
- if !ok {
- return nil, false
- }
- return e.load()
-}
-
-func (e *entry) load() (value any, ok bool) {
- p := atomic.LoadPointer(&e.p)
- if p == nil || p == expunged {
- return nil, false
- }
- return *(*any)(p), true
-}
-
-// Store sets the value for a key.
-func (m *Map) Store(key, value any) {
- read, _ := m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok && e.tryStore(&value) {
- return
- }
-
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok {
- if e.unexpungeLocked() {
- // The entry was previously expunged, which implies that there is a
- // non-nil dirty map and this entry is not in it.
- m.dirty[key] = e
- }
- e.storeLocked(&value)
- } else if e, ok := m.dirty[key]; ok {
- e.storeLocked(&value)
- } else {
- if !read.amended {
- // We're adding the first new key to the dirty map.
- // Make sure it is allocated and mark the read-only map as incomplete.
- m.dirtyLocked()
- m.read.Store(readOnly{m: read.m, amended: true})
- }
- m.dirty[key] = newEntry(value)
- }
- m.mu.Unlock()
-}
-
-// tryStore stores a value if the entry has not been expunged.
-//
-// If the entry is expunged, tryStore returns false and leaves the entry
-// unchanged.
-func (e *entry) tryStore(i *any) bool {
- for {
- p := atomic.LoadPointer(&e.p)
- if p == expunged {
- return false
- }
- if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
- return true
- }
- }
-}
-
-// unexpungeLocked ensures that the entry is not marked as expunged.
-//
-// If the entry was previously expunged, it must be added to the dirty map
-// before m.mu is unlocked.
-func (e *entry) unexpungeLocked() (wasExpunged bool) {
- return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
-}
-
-// storeLocked unconditionally stores a value to the entry.
-//
-// The entry must be known not to be expunged.
-func (e *entry) storeLocked(i *any) {
- atomic.StorePointer(&e.p, unsafe.Pointer(i))
-}
-
-// LoadOrStore returns the existing value for the key if present.
-// Otherwise, it stores and returns the given value.
-// The loaded result is true if the value was loaded, false if stored.
-func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
- // Avoid locking if it's a clean hit.
- read, _ := m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok {
- actual, loaded, ok := e.tryLoadOrStore(value)
- if ok {
- return actual, loaded
- }
- }
-
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- if e, ok := read.m[key]; ok {
- if e.unexpungeLocked() {
- m.dirty[key] = e
- }
- actual, loaded, _ = e.tryLoadOrStore(value)
- } else if e, ok := m.dirty[key]; ok {
- actual, loaded, _ = e.tryLoadOrStore(value)
- m.missLocked()
- } else {
- if !read.amended {
- // We're adding the first new key to the dirty map.
- // Make sure it is allocated and mark the read-only map as incomplete.
- m.dirtyLocked()
- m.read.Store(readOnly{m: read.m, amended: true})
- }
- m.dirty[key] = newEntry(value)
- actual, loaded = value, false
- }
- m.mu.Unlock()
-
- return actual, loaded
-}
-
-// tryLoadOrStore atomically loads or stores a value if the entry is not
-// expunged.
-//
-// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
-// returns with ok==false.
-func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
- p := atomic.LoadPointer(&e.p)
- if p == expunged {
- return nil, false, false
- }
- if p != nil {
- return *(*any)(p), true, true
- }
-
- // Copy the interface after the first load to make this method more amenable
- // to escape analysis: if we hit the "load" path or the entry is expunged, we
- // shouldn't bother heap-allocating.
- ic := i
- for {
- if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
- return i, false, true
- }
- p = atomic.LoadPointer(&e.p)
- if p == expunged {
- return nil, false, false
- }
- if p != nil {
- return *(*any)(p), true, true
- }
- }
-}
-
-// LoadAndDelete deletes the value for a key, returning the previous value if any.
-// The loaded result reports whether the key was present.
-func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
- read, _ := m.read.Load().(readOnly)
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- e, ok = read.m[key]
- if !ok && read.amended {
- e, ok = m.dirty[key]
- delete(m.dirty, key)
- // Regardless of whether the entry was present, record a miss: this key
- // will take the slow path until the dirty map is promoted to the read
- // map.
- m.missLocked()
- }
- m.mu.Unlock()
- }
- if ok {
- return e.delete()
- }
- return nil, false
-}
-
-// Delete deletes the value for a key.
-func (m *Map) Delete(key any) {
- m.LoadAndDelete(key)
-}
-
-func (e *entry) delete() (value any, ok bool) {
- for {
- p := atomic.LoadPointer(&e.p)
- if p == nil || p == expunged {
- return nil, false
- }
- if atomic.CompareAndSwapPointer(&e.p, p, nil) {
- return *(*any)(p), true
- }
- }
-}
-
-// Range calls f sequentially for each key and value present in the map.
-// If f returns false, range stops the iteration.
-//
-// Range does not necessarily correspond to any consistent snapshot of the Map's
-// contents: no key will be visited more than once, but if the value for any key
-// is stored or deleted concurrently (including by f), Range may reflect any
-// mapping for that key from any point during the Range call. Range does not
-// block other methods on the receiver; even f itself may call any method on m.
-//
-// Range may be O(N) with the number of elements in the map even if f returns
-// false after a constant number of calls.
-func (m *Map) Range(f func(key, value any) bool) {
- // We need to be able to iterate over all of the keys that were already
- // present at the start of the call to Range.
- // If read.amended is false, then read.m satisfies that property without
- // requiring us to hold m.mu for a long time.
- read, _ := m.read.Load().(readOnly)
- if read.amended {
- // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
- // (assuming the caller does not break out early), so a call to Range
- // amortizes an entire copy of the map: we can promote the dirty copy
- // immediately!
- m.mu.Lock()
- read, _ = m.read.Load().(readOnly)
- if read.amended {
- read = readOnly{m: m.dirty}
- m.read.Store(read)
- m.dirty = nil
- m.misses = 0
- }
- m.mu.Unlock()
- }
-
- for k, e := range read.m {
- v, ok := e.load()
- if !ok {
- continue
- }
- if !f(k, v) {
- break
- }
- }
-}
-
-func (m *Map) missLocked() {
- m.misses++
- if m.misses < len(m.dirty) {
- return
- }
- m.read.Store(readOnly{m: m.dirty})
- m.dirty = nil
- m.misses = 0
-}
-
-func (m *Map) dirtyLocked() {
- if m.dirty != nil {
- return
- }
-
- read, _ := m.read.Load().(readOnly)
- m.dirty = make(map[any]*entry, len(read.m))
- for k, e := range read.m {
- if !e.tryExpungeLocked() {
- m.dirty[k] = e
- }
- }
-}
-
-func (e *entry) tryExpungeLocked() (isExpunged bool) {
- p := atomic.LoadPointer(&e.p)
- for p == nil {
- if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
- return true
- }
- p = atomic.LoadPointer(&e.p)
- }
- return p == expunged
-}
diff --git a/contrib/go/_std_1.18/src/sync/mutex.go b/contrib/go/_std_1.18/src/sync/mutex.go
deleted file mode 100644
index 18b2cedba7..0000000000
--- a/contrib/go/_std_1.18/src/sync/mutex.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sync provides basic synchronization primitives such as mutual
-// exclusion locks. Other than the Once and WaitGroup types, most are intended
-// for use by low-level library routines. Higher-level synchronization is
-// better done via channels and communication.
-//
-// Values containing the types defined in this package should not be copied.
-package sync
-
-import (
- "internal/race"
- "sync/atomic"
- "unsafe"
-)
-
-func throw(string) // provided by runtime
-
-// A Mutex is a mutual exclusion lock.
-// The zero value for a Mutex is an unlocked mutex.
-//
-// A Mutex must not be copied after first use.
-type Mutex struct {
- state int32
- sema uint32
-}
-
-// A Locker represents an object that can be locked and unlocked.
-type Locker interface {
- Lock()
- Unlock()
-}
-
-const (
- mutexLocked = 1 << iota // mutex is locked
- mutexWoken
- mutexStarving
- mutexWaiterShift = iota
-
- // Mutex fairness.
- //
- // Mutex can be in 2 modes of operations: normal and starvation.
- // In normal mode waiters are queued in FIFO order, but a woken up waiter
- // does not own the mutex and competes with new arriving goroutines over
- // the ownership. New arriving goroutines have an advantage -- they are
- // already running on CPU and there can be lots of them, so a woken up
- // waiter has good chances of losing. In such case it is queued at front
- // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
- // it switches mutex to the starvation mode.
- //
- // In starvation mode ownership of the mutex is directly handed off from
- // the unlocking goroutine to the waiter at the front of the queue.
- // New arriving goroutines don't try to acquire the mutex even if it appears
- // to be unlocked, and don't try to spin. Instead they queue themselves at
- // the tail of the wait queue.
- //
- // If a waiter receives ownership of the mutex and sees that either
- // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
- // it switches mutex back to normal operation mode.
- //
- // Normal mode has considerably better performance as a goroutine can acquire
- // a mutex several times in a row even if there are blocked waiters.
- // Starvation mode is important to prevent pathological cases of tail latency.
- starvationThresholdNs = 1e6
-)
-
-// Lock locks m.
-// If the lock is already in use, the calling goroutine
-// blocks until the mutex is available.
-func (m *Mutex) Lock() {
- // Fast path: grab unlocked mutex.
- if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
- if race.Enabled {
- race.Acquire(unsafe.Pointer(m))
- }
- return
- }
- // Slow path (outlined so that the fast path can be inlined)
- m.lockSlow()
-}
-
-// TryLock tries to lock m and reports whether it succeeded.
-//
-// Note that while correct uses of TryLock do exist, they are rare,
-// and use of TryLock is often a sign of a deeper problem
-// in a particular use of mutexes.
-func (m *Mutex) TryLock() bool {
- old := m.state
- if old&(mutexLocked|mutexStarving) != 0 {
- return false
- }
-
- // There may be a goroutine waiting for the mutex, but we are
- // running now and can try to grab the mutex before that
- // goroutine wakes up.
- if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
- return false
- }
-
- if race.Enabled {
- race.Acquire(unsafe.Pointer(m))
- }
- return true
-}
-
-func (m *Mutex) lockSlow() {
- var waitStartTime int64
- starving := false
- awoke := false
- iter := 0
- old := m.state
- for {
- // Don't spin in starvation mode, ownership is handed off to waiters
- // so we won't be able to acquire the mutex anyway.
- if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
- // Active spinning makes sense.
- // Try to set mutexWoken flag to inform Unlock
- // to not wake other blocked goroutines.
- if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
- atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
- awoke = true
- }
- runtime_doSpin()
- iter++
- old = m.state
- continue
- }
- new := old
- // Don't try to acquire starving mutex, new arriving goroutines must queue.
- if old&mutexStarving == 0 {
- new |= mutexLocked
- }
- if old&(mutexLocked|mutexStarving) != 0 {
- new += 1 << mutexWaiterShift
- }
- // The current goroutine switches mutex to starvation mode.
- // But if the mutex is currently unlocked, don't do the switch.
- // Unlock expects that starving mutex has waiters, which will not
- // be true in this case.
- if starving && old&mutexLocked != 0 {
- new |= mutexStarving
- }
- if awoke {
- // The goroutine has been woken from sleep,
- // so we need to reset the flag in either case.
- if new&mutexWoken == 0 {
- throw("sync: inconsistent mutex state")
- }
- new &^= mutexWoken
- }
- if atomic.CompareAndSwapInt32(&m.state, old, new) {
- if old&(mutexLocked|mutexStarving) == 0 {
- break // locked the mutex with CAS
- }
- // If we were already waiting before, queue at the front of the queue.
- queueLifo := waitStartTime != 0
- if waitStartTime == 0 {
- waitStartTime = runtime_nanotime()
- }
- runtime_SemacquireMutex(&m.sema, queueLifo, 1)
- starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
- old = m.state
- if old&mutexStarving != 0 {
- // If this goroutine was woken and mutex is in starvation mode,
- // ownership was handed off to us but mutex is in somewhat
- // inconsistent state: mutexLocked is not set and we are still
- // accounted as waiter. Fix that.
- if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
- throw("sync: inconsistent mutex state")
- }
- delta := int32(mutexLocked - 1<<mutexWaiterShift)
- if !starving || old>>mutexWaiterShift == 1 {
- // Exit starvation mode.
- // Critical to do it here and consider wait time.
- // Starvation mode is so inefficient, that two goroutines
- // can go lock-step infinitely once they switch mutex
- // to starvation mode.
- delta -= mutexStarving
- }
- atomic.AddInt32(&m.state, delta)
- break
- }
- awoke = true
- iter = 0
- } else {
- old = m.state
- }
- }
-
- if race.Enabled {
- race.Acquire(unsafe.Pointer(m))
- }
-}
-
-// Unlock unlocks m.
-// It is a run-time error if m is not locked on entry to Unlock.
-//
-// A locked Mutex is not associated with a particular goroutine.
-// It is allowed for one goroutine to lock a Mutex and then
-// arrange for another goroutine to unlock it.
-func (m *Mutex) Unlock() {
- if race.Enabled {
- _ = m.state
- race.Release(unsafe.Pointer(m))
- }
-
- // Fast path: drop lock bit.
- new := atomic.AddInt32(&m.state, -mutexLocked)
- if new != 0 {
- // Outlined slow path to allow inlining the fast path.
- // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
- m.unlockSlow(new)
- }
-}
-
-func (m *Mutex) unlockSlow(new int32) {
- if (new+mutexLocked)&mutexLocked == 0 {
- throw("sync: unlock of unlocked mutex")
- }
- if new&mutexStarving == 0 {
- old := new
- for {
- // If there are no waiters or a goroutine has already
- // been woken or grabbed the lock, no need to wake anyone.
- // In starvation mode ownership is directly handed off from unlocking
- // goroutine to the next waiter. We are not part of this chain,
- // since we did not observe mutexStarving when we unlocked the mutex above.
- // So get off the way.
- if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
- return
- }
- // Grab the right to wake someone.
- new = (old - 1<<mutexWaiterShift) | mutexWoken
- if atomic.CompareAndSwapInt32(&m.state, old, new) {
- runtime_Semrelease(&m.sema, false, 1)
- return
- }
- old = m.state
- }
- } else {
- // Starving mode: handoff mutex ownership to the next waiter, and yield
- // our time slice so that the next waiter can start to run immediately.
- // Note: mutexLocked is not set, the waiter will set it after wakeup.
- // But mutex is still considered locked if mutexStarving is set,
- // so new coming goroutines won't acquire it.
- runtime_Semrelease(&m.sema, true, 1)
- }
-}
diff --git a/contrib/go/_std_1.18/src/sync/once.go b/contrib/go/_std_1.18/src/sync/once.go
deleted file mode 100644
index 8844314e7e..0000000000
--- a/contrib/go/_std_1.18/src/sync/once.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "sync/atomic"
-)
-
-// Once is an object that will perform exactly one action.
-//
-// A Once must not be copied after first use.
-type Once struct {
- // done indicates whether the action has been performed.
- // It is first in the struct because it is used in the hot path.
- // The hot path is inlined at every call site.
- // Placing done first allows more compact instructions on some architectures (amd64/386),
- // and fewer instructions (to calculate offset) on other architectures.
- done uint32
- m Mutex
-}
-
-// Do calls the function f if and only if Do is being called for the
-// first time for this instance of Once. In other words, given
-// var once Once
-// if once.Do(f) is called multiple times, only the first call will invoke f,
-// even if f has a different value in each invocation. A new instance of
-// Once is required for each function to execute.
-//
-// Do is intended for initialization that must be run exactly once. Since f
-// is niladic, it may be necessary to use a function literal to capture the
-// arguments to a function to be invoked by Do:
-// config.once.Do(func() { config.init(filename) })
-//
-// Because no call to Do returns until the one call to f returns, if f causes
-// Do to be called, it will deadlock.
-//
-// If f panics, Do considers it to have returned; future calls of Do return
-// without calling f.
-//
-func (o *Once) Do(f func()) {
- // Note: Here is an incorrect implementation of Do:
- //
- // if atomic.CompareAndSwapUint32(&o.done, 0, 1) {
- // f()
- // }
- //
- // Do guarantees that when it returns, f has finished.
- // This implementation would not implement that guarantee:
- // given two simultaneous calls, the winner of the cas would
- // call f, and the second would return immediately, without
- // waiting for the first's call to f to complete.
- // This is why the slow path falls back to a mutex, and why
- // the atomic.StoreUint32 must be delayed until after f returns.
-
- if atomic.LoadUint32(&o.done) == 0 {
- // Outlined slow-path to allow inlining of the fast-path.
- o.doSlow(f)
- }
-}
-
-func (o *Once) doSlow(f func()) {
- o.m.Lock()
- defer o.m.Unlock()
- if o.done == 0 {
- defer atomic.StoreUint32(&o.done, 1)
- f()
- }
-}
diff --git a/contrib/go/_std_1.18/src/sync/pool.go b/contrib/go/_std_1.18/src/sync/pool.go
deleted file mode 100644
index d1abb6a8b7..0000000000
--- a/contrib/go/_std_1.18/src/sync/pool.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "internal/race"
- "runtime"
- "sync/atomic"
- "unsafe"
-)
-
-// A Pool is a set of temporary objects that may be individually saved and
-// retrieved.
-//
-// Any item stored in the Pool may be removed automatically at any time without
-// notification. If the Pool holds the only reference when this happens, the
-// item might be deallocated.
-//
-// A Pool is safe for use by multiple goroutines simultaneously.
-//
-// Pool's purpose is to cache allocated but unused items for later reuse,
-// relieving pressure on the garbage collector. That is, it makes it easy to
-// build efficient, thread-safe free lists. However, it is not suitable for all
-// free lists.
-//
-// An appropriate use of a Pool is to manage a group of temporary items
-// silently shared among and potentially reused by concurrent independent
-// clients of a package. Pool provides a way to amortize allocation overhead
-// across many clients.
-//
-// An example of good use of a Pool is in the fmt package, which maintains a
-// dynamically-sized store of temporary output buffers. The store scales under
-// load (when many goroutines are actively printing) and shrinks when
-// quiescent.
-//
-// On the other hand, a free list maintained as part of a short-lived object is
-// not a suitable use for a Pool, since the overhead does not amortize well in
-// that scenario. It is more efficient to have such objects implement their own
-// free list.
-//
-// A Pool must not be copied after first use.
-type Pool struct {
- noCopy noCopy
-
- local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
- localSize uintptr // size of the local array
-
- victim unsafe.Pointer // local from previous cycle
- victimSize uintptr // size of victims array
-
- // New optionally specifies a function to generate
- // a value when Get would otherwise return nil.
- // It may not be changed concurrently with calls to Get.
- New func() any
-}
-
-// Local per-P Pool appendix.
-type poolLocalInternal struct {
- private any // Can be used only by the respective P.
- shared poolChain // Local P can pushHead/popHead; any P can popTail.
-}
-
-type poolLocal struct {
- poolLocalInternal
-
- // Prevents false sharing on widespread platforms with
- // 128 mod (cache line size) = 0 .
- pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
-}
-
-// from runtime
-func fastrandn(n uint32) uint32
-
-var poolRaceHash [128]uint64
-
-// poolRaceAddr returns an address to use as the synchronization point
-// for race detector logic. We don't use the actual pointer stored in x
-// directly, for fear of conflicting with other synchronization on that address.
-// Instead, we hash the pointer to get an index into poolRaceHash.
-// See discussion on golang.org/cl/31589.
-func poolRaceAddr(x any) unsafe.Pointer {
- ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
- h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
- return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
-}
-
-// Put adds x to the pool.
-func (p *Pool) Put(x any) {
- if x == nil {
- return
- }
- if race.Enabled {
- if fastrandn(4) == 0 {
- // Randomly drop x on floor.
- return
- }
- race.ReleaseMerge(poolRaceAddr(x))
- race.Disable()
- }
- l, _ := p.pin()
- if l.private == nil {
- l.private = x
- x = nil
- }
- if x != nil {
- l.shared.pushHead(x)
- }
- runtime_procUnpin()
- if race.Enabled {
- race.Enable()
- }
-}
-
-// Get selects an arbitrary item from the Pool, removes it from the
-// Pool, and returns it to the caller.
-// Get may choose to ignore the pool and treat it as empty.
-// Callers should not assume any relation between values passed to Put and
-// the values returned by Get.
-//
-// If Get would otherwise return nil and p.New is non-nil, Get returns
-// the result of calling p.New.
-func (p *Pool) Get() any {
- if race.Enabled {
- race.Disable()
- }
- l, pid := p.pin()
- x := l.private
- l.private = nil
- if x == nil {
- // Try to pop the head of the local shard. We prefer
- // the head over the tail for temporal locality of
- // reuse.
- x, _ = l.shared.popHead()
- if x == nil {
- x = p.getSlow(pid)
- }
- }
- runtime_procUnpin()
- if race.Enabled {
- race.Enable()
- if x != nil {
- race.Acquire(poolRaceAddr(x))
- }
- }
- if x == nil && p.New != nil {
- x = p.New()
- }
- return x
-}
-
-func (p *Pool) getSlow(pid int) any {
- // See the comment in pin regarding ordering of the loads.
- size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
- locals := p.local // load-consume
- // Try to steal one element from other procs.
- for i := 0; i < int(size); i++ {
- l := indexLocal(locals, (pid+i+1)%int(size))
- if x, _ := l.shared.popTail(); x != nil {
- return x
- }
- }
-
- // Try the victim cache. We do this after attempting to steal
- // from all primary caches because we want objects in the
- // victim cache to age out if at all possible.
- size = atomic.LoadUintptr(&p.victimSize)
- if uintptr(pid) >= size {
- return nil
- }
- locals = p.victim
- l := indexLocal(locals, pid)
- if x := l.private; x != nil {
- l.private = nil
- return x
- }
- for i := 0; i < int(size); i++ {
- l := indexLocal(locals, (pid+i)%int(size))
- if x, _ := l.shared.popTail(); x != nil {
- return x
- }
- }
-
- // Mark the victim cache as empty for future gets don't bother
- // with it.
- atomic.StoreUintptr(&p.victimSize, 0)
-
- return nil
-}
-
-// pin pins the current goroutine to P, disables preemption and
-// returns poolLocal pool for the P and the P's id.
-// Caller must call runtime_procUnpin() when done with the pool.
-func (p *Pool) pin() (*poolLocal, int) {
- pid := runtime_procPin()
- // In pinSlow we store to local and then to localSize, here we load in opposite order.
- // Since we've disabled preemption, GC cannot happen in between.
- // Thus here we must observe local at least as large localSize.
- // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
- s := runtime_LoadAcquintptr(&p.localSize) // load-acquire
- l := p.local // load-consume
- if uintptr(pid) < s {
- return indexLocal(l, pid), pid
- }
- return p.pinSlow()
-}
-
-func (p *Pool) pinSlow() (*poolLocal, int) {
- // Retry under the mutex.
- // Can not lock the mutex while pinned.
- runtime_procUnpin()
- allPoolsMu.Lock()
- defer allPoolsMu.Unlock()
- pid := runtime_procPin()
- // poolCleanup won't be called while we are pinned.
- s := p.localSize
- l := p.local
- if uintptr(pid) < s {
- return indexLocal(l, pid), pid
- }
- if p.local == nil {
- allPools = append(allPools, p)
- }
- // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
- size := runtime.GOMAXPROCS(0)
- local := make([]poolLocal, size)
- atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
- runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
- return &local[pid], pid
-}
-
-func poolCleanup() {
- // This function is called with the world stopped, at the beginning of a garbage collection.
- // It must not allocate and probably should not call any runtime functions.
-
- // Because the world is stopped, no pool user can be in a
- // pinned section (in effect, this has all Ps pinned).
-
- // Drop victim caches from all pools.
- for _, p := range oldPools {
- p.victim = nil
- p.victimSize = 0
- }
-
- // Move primary cache to victim cache.
- for _, p := range allPools {
- p.victim = p.local
- p.victimSize = p.localSize
- p.local = nil
- p.localSize = 0
- }
-
- // The pools with non-empty primary caches now have non-empty
- // victim caches and no pools have primary caches.
- oldPools, allPools = allPools, nil
-}
-
-var (
- allPoolsMu Mutex
-
- // allPools is the set of pools that have non-empty primary
- // caches. Protected by either 1) allPoolsMu and pinning or 2)
- // STW.
- allPools []*Pool
-
- // oldPools is the set of pools that may have non-empty victim
- // caches. Protected by STW.
- oldPools []*Pool
-)
-
-func init() {
- runtime_registerPoolCleanup(poolCleanup)
-}
-
-func indexLocal(l unsafe.Pointer, i int) *poolLocal {
- lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
- return (*poolLocal)(lp)
-}
-
-// Implemented in runtime.
-func runtime_registerPoolCleanup(cleanup func())
-func runtime_procPin() int
-func runtime_procUnpin()
-
-// The below are implemented in runtime/internal/atomic and the
-// compiler also knows to intrinsify the symbol we linkname into this
-// package.
-
-//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr
-func runtime_LoadAcquintptr(ptr *uintptr) uintptr
-
-//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr
-func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr
diff --git a/contrib/go/_std_1.18/src/sync/rwmutex.go b/contrib/go/_std_1.18/src/sync/rwmutex.go
deleted file mode 100644
index f0d4c9771a..0000000000
--- a/contrib/go/_std_1.18/src/sync/rwmutex.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "internal/race"
- "sync/atomic"
- "unsafe"
-)
-
-// There is a modified copy of this file in runtime/rwmutex.go.
-// If you make any changes here, see if you should make them there.
-
-// A RWMutex is a reader/writer mutual exclusion lock.
-// The lock can be held by an arbitrary number of readers or a single writer.
-// The zero value for a RWMutex is an unlocked mutex.
-//
-// A RWMutex must not be copied after first use.
-//
-// If a goroutine holds a RWMutex for reading and another goroutine might
-// call Lock, no goroutine should expect to be able to acquire a read lock
-// until the initial read lock is released. In particular, this prohibits
-// recursive read locking. This is to ensure that the lock eventually becomes
-// available; a blocked Lock call excludes new readers from acquiring the
-// lock.
-type RWMutex struct {
- w Mutex // held if there are pending writers
- writerSem uint32 // semaphore for writers to wait for completing readers
- readerSem uint32 // semaphore for readers to wait for completing writers
- readerCount int32 // number of pending readers
- readerWait int32 // number of departing readers
-}
-
-const rwmutexMaxReaders = 1 << 30
-
-// Happens-before relationships are indicated to the race detector via:
-// - Unlock -> Lock: readerSem
-// - Unlock -> RLock: readerSem
-// - RUnlock -> Lock: writerSem
-//
-// The methods below temporarily disable handling of race synchronization
-// events in order to provide the more precise model above to the race
-// detector.
-//
-// For example, atomic.AddInt32 in RLock should not appear to provide
-// acquire-release semantics, which would incorrectly synchronize racing
-// readers, thus potentially missing races.
-
-// RLock locks rw for reading.
-//
-// It should not be used for recursive read locking; a blocked Lock
-// call excludes new readers from acquiring the lock. See the
-// documentation on the RWMutex type.
-func (rw *RWMutex) RLock() {
- if race.Enabled {
- _ = rw.w.state
- race.Disable()
- }
- if atomic.AddInt32(&rw.readerCount, 1) < 0 {
- // A writer is pending, wait for it.
- runtime_SemacquireMutex(&rw.readerSem, false, 0)
- }
- if race.Enabled {
- race.Enable()
- race.Acquire(unsafe.Pointer(&rw.readerSem))
- }
-}
-
-// TryRLock tries to lock rw for reading and reports whether it succeeded.
-//
-// Note that while correct uses of TryRLock do exist, they are rare,
-// and use of TryRLock is often a sign of a deeper problem
-// in a particular use of mutexes.
-func (rw *RWMutex) TryRLock() bool {
- if race.Enabled {
- _ = rw.w.state
- race.Disable()
- }
- for {
- c := atomic.LoadInt32(&rw.readerCount)
- if c < 0 {
- if race.Enabled {
- race.Enable()
- }
- return false
- }
- if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) {
- if race.Enabled {
- race.Enable()
- race.Acquire(unsafe.Pointer(&rw.readerSem))
- }
- return true
- }
- }
-}
-
-// RUnlock undoes a single RLock call;
-// it does not affect other simultaneous readers.
-// It is a run-time error if rw is not locked for reading
-// on entry to RUnlock.
-func (rw *RWMutex) RUnlock() {
- if race.Enabled {
- _ = rw.w.state
- race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
- race.Disable()
- }
- if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
- // Outlined slow-path to allow the fast-path to be inlined
- rw.rUnlockSlow(r)
- }
- if race.Enabled {
- race.Enable()
- }
-}
-
-func (rw *RWMutex) rUnlockSlow(r int32) {
- if r+1 == 0 || r+1 == -rwmutexMaxReaders {
- race.Enable()
- throw("sync: RUnlock of unlocked RWMutex")
- }
- // A writer is pending.
- if atomic.AddInt32(&rw.readerWait, -1) == 0 {
- // The last reader unblocks the writer.
- runtime_Semrelease(&rw.writerSem, false, 1)
- }
-}
-
-// Lock locks rw for writing.
-// If the lock is already locked for reading or writing,
-// Lock blocks until the lock is available.
-func (rw *RWMutex) Lock() {
- if race.Enabled {
- _ = rw.w.state
- race.Disable()
- }
- // First, resolve competition with other writers.
- rw.w.Lock()
- // Announce to readers there is a pending writer.
- r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
- // Wait for active readers.
- if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
- runtime_SemacquireMutex(&rw.writerSem, false, 0)
- }
- if race.Enabled {
- race.Enable()
- race.Acquire(unsafe.Pointer(&rw.readerSem))
- race.Acquire(unsafe.Pointer(&rw.writerSem))
- }
-}
-
-// TryLock tries to lock rw for writing and reports whether it succeeded.
-//
-// Note that while correct uses of TryLock do exist, they are rare,
-// and use of TryLock is often a sign of a deeper problem
-// in a particular use of mutexes.
-func (rw *RWMutex) TryLock() bool {
- if race.Enabled {
- _ = rw.w.state
- race.Disable()
- }
- if !rw.w.TryLock() {
- if race.Enabled {
- race.Enable()
- }
- return false
- }
- if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
- rw.w.Unlock()
- if race.Enabled {
- race.Enable()
- }
- return false
- }
- if race.Enabled {
- race.Enable()
- race.Acquire(unsafe.Pointer(&rw.readerSem))
- race.Acquire(unsafe.Pointer(&rw.writerSem))
- }
- return true
-}
-
-// Unlock unlocks rw for writing. It is a run-time error if rw is
-// not locked for writing on entry to Unlock.
-//
-// As with Mutexes, a locked RWMutex is not associated with a particular
-// goroutine. One goroutine may RLock (Lock) a RWMutex and then
-// arrange for another goroutine to RUnlock (Unlock) it.
-func (rw *RWMutex) Unlock() {
- if race.Enabled {
- _ = rw.w.state
- race.Release(unsafe.Pointer(&rw.readerSem))
- race.Disable()
- }
-
- // Announce to readers there is no active writer.
- r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
- if r >= rwmutexMaxReaders {
- race.Enable()
- throw("sync: Unlock of unlocked RWMutex")
- }
- // Unblock blocked readers, if any.
- for i := 0; i < int(r); i++ {
- runtime_Semrelease(&rw.readerSem, false, 0)
- }
- // Allow other writers to proceed.
- rw.w.Unlock()
- if race.Enabled {
- race.Enable()
- }
-}
-
-// RLocker returns a Locker interface that implements
-// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock.
-func (rw *RWMutex) RLocker() Locker {
- return (*rlocker)(rw)
-}
-
-type rlocker RWMutex
-
-func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
-func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
diff --git a/contrib/go/_std_1.18/src/sync/waitgroup.go b/contrib/go/_std_1.18/src/sync/waitgroup.go
deleted file mode 100644
index 9c6662d04b..0000000000
--- a/contrib/go/_std_1.18/src/sync/waitgroup.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "internal/race"
- "sync/atomic"
- "unsafe"
-)
-
-// A WaitGroup waits for a collection of goroutines to finish.
-// The main goroutine calls Add to set the number of
-// goroutines to wait for. Then each of the goroutines
-// runs and calls Done when finished. At the same time,
-// Wait can be used to block until all goroutines have finished.
-//
-// A WaitGroup must not be copied after first use.
-type WaitGroup struct {
- noCopy noCopy
-
- // 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
- // 64-bit atomic operations require 64-bit alignment, but 32-bit
- // compilers only guarantee that 64-bit fields are 32-bit aligned.
- // For this reason on 32 bit architectures we need to check in state()
- // if state1 is aligned or not, and dynamically "swap" the field order if
- // needed.
- state1 uint64
- state2 uint32
-}
-
-// state returns pointers to the state and sema fields stored within wg.state*.
-func (wg *WaitGroup) state() (statep *uint64, semap *uint32) {
- if unsafe.Alignof(wg.state1) == 8 || uintptr(unsafe.Pointer(&wg.state1))%8 == 0 {
- // state1 is 64-bit aligned: nothing to do.
- return &wg.state1, &wg.state2
- } else {
- // state1 is 32-bit aligned but not 64-bit aligned: this means that
- // (&state1)+4 is 64-bit aligned.
- state := (*[3]uint32)(unsafe.Pointer(&wg.state1))
- return (*uint64)(unsafe.Pointer(&state[1])), &state[0]
- }
-}
-
-// Add adds delta, which may be negative, to the WaitGroup counter.
-// If the counter becomes zero, all goroutines blocked on Wait are released.
-// If the counter goes negative, Add panics.
-//
-// Note that calls with a positive delta that occur when the counter is zero
-// must happen before a Wait. Calls with a negative delta, or calls with a
-// positive delta that start when the counter is greater than zero, may happen
-// at any time.
-// Typically this means the calls to Add should execute before the statement
-// creating the goroutine or other event to be waited for.
-// If a WaitGroup is reused to wait for several independent sets of events,
-// new Add calls must happen after all previous Wait calls have returned.
-// See the WaitGroup example.
-func (wg *WaitGroup) Add(delta int) {
- statep, semap := wg.state()
- if race.Enabled {
- _ = *statep // trigger nil deref early
- if delta < 0 {
- // Synchronize decrements with Wait.
- race.ReleaseMerge(unsafe.Pointer(wg))
- }
- race.Disable()
- defer race.Enable()
- }
- state := atomic.AddUint64(statep, uint64(delta)<<32)
- v := int32(state >> 32)
- w := uint32(state)
- if race.Enabled && delta > 0 && v == int32(delta) {
- // The first increment must be synchronized with Wait.
- // Need to model this as a read, because there can be
- // several concurrent wg.counter transitions from 0.
- race.Read(unsafe.Pointer(semap))
- }
- if v < 0 {
- panic("sync: negative WaitGroup counter")
- }
- if w != 0 && delta > 0 && v == int32(delta) {
- panic("sync: WaitGroup misuse: Add called concurrently with Wait")
- }
- if v > 0 || w == 0 {
- return
- }
- // This goroutine has set counter to 0 when waiters > 0.
- // Now there can't be concurrent mutations of state:
- // - Adds must not happen concurrently with Wait,
- // - Wait does not increment waiters if it sees counter == 0.
- // Still do a cheap sanity check to detect WaitGroup misuse.
- if *statep != state {
- panic("sync: WaitGroup misuse: Add called concurrently with Wait")
- }
- // Reset waiters count to 0.
- *statep = 0
- for ; w != 0; w-- {
- runtime_Semrelease(semap, false, 0)
- }
-}
-
-// Done decrements the WaitGroup counter by one.
-func (wg *WaitGroup) Done() {
- wg.Add(-1)
-}
-
-// Wait blocks until the WaitGroup counter is zero.
-func (wg *WaitGroup) Wait() {
- statep, semap := wg.state()
- if race.Enabled {
- _ = *statep // trigger nil deref early
- race.Disable()
- }
- for {
- state := atomic.LoadUint64(statep)
- v := int32(state >> 32)
- w := uint32(state)
- if v == 0 {
- // Counter is 0, no need to wait.
- if race.Enabled {
- race.Enable()
- race.Acquire(unsafe.Pointer(wg))
- }
- return
- }
- // Increment waiters count.
- if atomic.CompareAndSwapUint64(statep, state, state+1) {
- if race.Enabled && w == 0 {
- // Wait must be synchronized with the first Add.
- // Need to model this is as a write to race with the read in Add.
- // As a consequence, can do the write only for the first waiter,
- // otherwise concurrent Waits will race with each other.
- race.Write(unsafe.Pointer(semap))
- }
- runtime_Semacquire(semap)
- if *statep != 0 {
- panic("sync: WaitGroup is reused before previous Wait has returned")
- }
- if race.Enabled {
- race.Enable()
- race.Acquire(unsafe.Pointer(wg))
- }
- return
- }
- }
-}
diff --git a/contrib/go/_std_1.18/src/syscall/asm_darwin_amd64.s b/contrib/go/_std_1.18/src/syscall/asm_darwin_amd64.s
deleted file mode 100644
index c863889a71..0000000000
--- a/contrib/go/_std_1.18/src/syscall/asm_darwin_amd64.s
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-#include "funcdata.h"
-
-//
-// System call support for AMD64, Darwin
-//
-
-// Trap # in AX, args in DI SI DX, return in AX DX
-
-// func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno);
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- CALL runtime·entersyscall(SB)
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ trap+0(FP), AX // syscall entry
- ADDQ $0x2000000, AX
- SYSCALL
- JCC ok
- MOVQ $-1, r1+32(FP)
- MOVQ $0, r2+40(FP)
- MOVQ AX, err+48(FP)
- CALL runtime·exitsyscall(SB)
- RET
-ok:
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- MOVQ $0, err+48(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno);
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- CALL runtime·entersyscall(SB)
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ a4+32(FP), R10
- MOVQ a5+40(FP), R8
- MOVQ a6+48(FP), R9
- MOVQ trap+0(FP), AX // syscall entry
- ADDQ $0x2000000, AX
- SYSCALL
- JCC ok6
- MOVQ $-1, r1+56(FP)
- MOVQ $0, r2+64(FP)
- MOVQ AX, err+72(FP)
- CALL runtime·exitsyscall(SB)
- RET
-ok6:
- MOVQ AX, r1+56(FP)
- MOVQ DX, r2+64(FP)
- MOVQ $0, err+72(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-// func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- CALL runtime·entersyscall(SB)
- MOVQ trap+0(FP), AX // syscall entry
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ a4+32(FP), R10
- MOVQ a5+40(FP), R8
- MOVQ a6+48(FP), R9
- MOVQ a7+56(FP), R11
- MOVQ a8+64(FP), R12
- MOVQ a9+72(FP), R13
- SUBQ $32, SP
- MOVQ R11, 8(SP)
- MOVQ R12, 16(SP)
- MOVQ R13, 24(SP)
- ADDQ $0x2000000, AX
- SYSCALL
- JCC ok9
- ADDQ $32, SP
- MOVQ $-1, r1+80(FP)
- MOVQ $0, r2+88(FP)
- MOVQ AX, err+96(FP)
- CALL runtime·exitsyscall(SB)
- RET
-ok9:
- ADDQ $32, SP
- MOVQ AX, r1+80(FP)
- MOVQ DX, r2+88(FP)
- MOVQ $0, err+96(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ trap+0(FP), AX // syscall entry
- ADDQ $0x2000000, AX
- SYSCALL
- JCC ok1
- MOVQ $-1, r1+32(FP)
- MOVQ $0, r2+40(FP)
- MOVQ AX, err+48(FP)
- RET
-ok1:
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- MOVQ $0, err+48(FP)
- RET
-
-// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ a4+32(FP), R10
- MOVQ a5+40(FP), R8
- MOVQ a6+48(FP), R9
- MOVQ trap+0(FP), AX // syscall entry
- ADDQ $0x2000000, AX
- SYSCALL
- JCC ok2
- MOVQ $-1, r1+56(FP)
- MOVQ $0, r2+64(FP)
- MOVQ AX, err+72(FP)
- RET
-ok2:
- MOVQ AX, r1+56(FP)
- MOVQ DX, r2+64(FP)
- MOVQ $0, err+72(FP)
- RET
diff --git a/contrib/go/_std_1.18/src/syscall/asm_linux_amd64.s b/contrib/go/_std_1.18/src/syscall/asm_linux_amd64.s
deleted file mode 100644
index a9af68d51d..0000000000
--- a/contrib/go/_std_1.18/src/syscall/asm_linux_amd64.s
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-#include "funcdata.h"
-
-//
-// System calls for AMD64, Linux
-//
-
-#define SYS_gettimeofday 96
-
-// func Syscall(trap int64, a1, a2, a3 uintptr) (r1, r2, err uintptr);
-// Trap # in AX, args in DI SI DX R10 R8 R9, return in AX DX
-// Note that this differs from "standard" ABI convention, which
-// would pass 4th arg in CX, not R10.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- CALL runtime·entersyscall(SB)
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS ok
- MOVQ $-1, r1+32(FP)
- MOVQ $0, r2+40(FP)
- NEGQ AX
- MOVQ AX, err+48(FP)
- CALL runtime·exitsyscall(SB)
- RET
-ok:
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- MOVQ $0, err+48(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- CALL runtime·entersyscall(SB)
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ a4+32(FP), R10
- MOVQ a5+40(FP), R8
- MOVQ a6+48(FP), R9
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS ok6
- MOVQ $-1, r1+56(FP)
- MOVQ $0, r2+64(FP)
- NEGQ AX
- MOVQ AX, err+72(FP)
- CALL runtime·exitsyscall(SB)
- RET
-ok6:
- MOVQ AX, r1+56(FP)
- MOVQ DX, r2+64(FP)
- MOVQ $0, err+72(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS ok1
- MOVQ $-1, r1+32(FP)
- MOVQ $0, r2+40(FP)
- NEGQ AX
- MOVQ AX, err+48(FP)
- RET
-ok1:
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- MOVQ $0, err+48(FP)
- RET
-
-// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ a4+32(FP), R10
- MOVQ a5+40(FP), R8
- MOVQ a6+48(FP), R9
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- CMPQ AX, $0xfffffffffffff001
- JLS ok2
- MOVQ $-1, r1+56(FP)
- MOVQ $0, r2+64(FP)
- NEGQ AX
- MOVQ AX, err+72(FP)
- RET
-ok2:
- MOVQ AX, r1+56(FP)
- MOVQ DX, r2+64(FP)
- MOVQ $0, err+72(FP)
- RET
-
-// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-32
- MOVQ a1+8(FP), DI
- MOVQ $0, SI
- MOVQ $0, DX
- MOVQ $0, R10
- MOVQ $0, R8
- MOVQ $0, R9
- MOVQ trap+0(FP), AX // syscall entry
- POPQ R12 // preserve return address
- SYSCALL
- PUSHQ R12
- CMPQ AX, $0xfffffffffffff001
- JLS ok2
- MOVQ $-1, r1+16(FP)
- NEGQ AX
- MOVQ AX, err+24(FP)
- RET
-ok2:
- MOVQ AX, r1+16(FP)
- MOVQ $0, err+24(FP)
- RET
-
-// func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
-TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- RET
-
-// func gettimeofday(tv *Timeval) (err uintptr)
-TEXT ·gettimeofday(SB),NOSPLIT,$0-16
- MOVQ tv+0(FP), DI
- MOVQ $0, SI
- MOVQ runtime·vdsoGettimeofdaySym(SB), AX
- TESTQ AX, AX
- JZ fallback
- CALL AX
-ret:
- CMPQ AX, $0xfffffffffffff001
- JLS ok7
- NEGQ AX
- MOVQ AX, err+8(FP)
- RET
-fallback:
- MOVL $SYS_gettimeofday, AX
- SYSCALL
- JMP ret
-ok7:
- MOVQ $0, err+8(FP)
- RET
diff --git a/contrib/go/_std_1.18/src/syscall/dirent.go b/contrib/go/_std_1.18/src/syscall/dirent.go
deleted file mode 100644
index 237ea79ad6..0000000000
--- a/contrib/go/_std_1.18/src/syscall/dirent.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package syscall
-
-import "unsafe"
-
-// readInt returns the size-bytes unsigned integer in native byte order at offset off.
-func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
- if len(b) < int(off+size) {
- return 0, false
- }
- if isBigEndian {
- return readIntBE(b[off:], size), true
- }
- return readIntLE(b[off:], size), true
-}
-
-func readIntBE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[1]) | uint64(b[0])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-func readIntLE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-// ParseDirent parses up to max directory entries in buf,
-// appending the names to names. It returns the number of
-// bytes consumed from buf, the number of entries added
-// to names, and the new names slice.
-func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
- origlen := len(buf)
- count = 0
- for max != 0 && len(buf) > 0 {
- reclen, ok := direntReclen(buf)
- if !ok || reclen > uint64(len(buf)) {
- return origlen, count, names
- }
- rec := buf[:reclen]
- buf = buf[reclen:]
- ino, ok := direntIno(rec)
- if !ok {
- break
- }
- if ino == 0 { // File absent in directory.
- continue
- }
- const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
- namlen, ok := direntNamlen(rec)
- if !ok || namoff+namlen > uint64(len(rec)) {
- break
- }
- name := rec[namoff : namoff+namlen]
- for i, c := range name {
- if c == 0 {
- name = name[:i]
- break
- }
- }
- // Check for useless names before allocating a string.
- if string(name) == "." || string(name) == ".." {
- continue
- }
- max--
- count++
- names = append(names, string(name))
- }
- return origlen - len(buf), count, names
-}
diff --git a/contrib/go/_std_1.18/src/syscall/endian_little.go b/contrib/go/_std_1.18/src/syscall/endian_little.go
deleted file mode 100644
index edfb6cf164..0000000000
--- a/contrib/go/_std_1.18/src/syscall/endian_little.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm
-
-package syscall
-
-const isBigEndian = false
diff --git a/contrib/go/_std_1.18/src/syscall/env_unix.go b/contrib/go/_std_1.18/src/syscall/env_unix.go
deleted file mode 100644
index 521967c79f..0000000000
--- a/contrib/go/_std_1.18/src/syscall/env_unix.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris || plan9
-
-// Unix environment variables.
-
-package syscall
-
-import (
- "runtime"
- "sync"
-)
-
-var (
- // envOnce guards initialization by copyenv, which populates env.
- envOnce sync.Once
-
- // envLock guards env and envs.
- envLock sync.RWMutex
-
- // env maps from an environment variable to its first occurrence in envs.
- env map[string]int
-
- // envs is provided by the runtime. elements are expected to
- // be of the form "key=value". An empty string means deleted
- // (or a duplicate to be ignored).
- envs []string = runtime_envs()
-)
-
-func runtime_envs() []string // in package runtime
-
-// setenv_c and unsetenv_c are provided by the runtime but are no-ops
-// if cgo isn't loaded.
-func setenv_c(k, v string)
-func unsetenv_c(k string)
-
-func copyenv() {
- env = make(map[string]int)
- for i, s := range envs {
- for j := 0; j < len(s); j++ {
- if s[j] == '=' {
- key := s[:j]
- if _, ok := env[key]; !ok {
- env[key] = i // first mention of key
- } else {
- // Clear duplicate keys. This permits Unsetenv to
- // safely delete only the first item without
- // worrying about unshadowing a later one,
- // which might be a security problem.
- envs[i] = ""
- }
- break
- }
- }
- }
-}
-
-func Unsetenv(key string) error {
- envOnce.Do(copyenv)
-
- envLock.Lock()
- defer envLock.Unlock()
-
- if i, ok := env[key]; ok {
- envs[i] = ""
- delete(env, key)
- }
- unsetenv_c(key)
- return nil
-}
-
-func Getenv(key string) (value string, found bool) {
- envOnce.Do(copyenv)
- if len(key) == 0 {
- return "", false
- }
-
- envLock.RLock()
- defer envLock.RUnlock()
-
- i, ok := env[key]
- if !ok {
- return "", false
- }
- s := envs[i]
- for i := 0; i < len(s); i++ {
- if s[i] == '=' {
- return s[i+1:], true
- }
- }
- return "", false
-}
-
-func Setenv(key, value string) error {
- envOnce.Do(copyenv)
- if len(key) == 0 {
- return EINVAL
- }
- for i := 0; i < len(key); i++ {
- if key[i] == '=' || key[i] == 0 {
- return EINVAL
- }
- }
- // On Plan 9, null is used as a separator, eg in $path.
- if runtime.GOOS != "plan9" {
- for i := 0; i < len(value); i++ {
- if value[i] == 0 {
- return EINVAL
- }
- }
- }
-
- envLock.Lock()
- defer envLock.Unlock()
-
- i, ok := env[key]
- kv := key + "=" + value
- if ok {
- envs[i] = kv
- } else {
- i = len(envs)
- envs = append(envs, kv)
- }
- env[key] = i
- setenv_c(key, value)
- return nil
-}
-
-func Clearenv() {
- envOnce.Do(copyenv) // prevent copyenv in Getenv/Setenv
-
- envLock.Lock()
- defer envLock.Unlock()
-
- for k := range env {
- unsetenv_c(k)
- }
- env = make(map[string]int)
- envs = []string{}
-}
-
-func Environ() []string {
- envOnce.Do(copyenv)
- envLock.RLock()
- defer envLock.RUnlock()
- a := make([]string, 0, len(envs))
- for _, env := range envs {
- if env != "" {
- a = append(a, env)
- }
- }
- return a
-}
diff --git a/contrib/go/_std_1.18/src/syscall/exec_libc2.go b/contrib/go/_std_1.18/src/syscall/exec_libc2.go
deleted file mode 100644
index b05f053bbf..0000000000
--- a/contrib/go/_std_1.18/src/syscall/exec_libc2.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin || (openbsd && !mips64)
-
-package syscall
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-type SysProcAttr struct {
- Chroot string // Chroot.
- Credential *Credential // Credential.
- Ptrace bool // Enable tracing.
- Setsid bool // Create session.
- // Setpgid sets the process group ID of the child to Pgid,
- // or, if Pgid == 0, to the new child's process ID.
- Setpgid bool
- // Setctty sets the controlling terminal of the child to
- // file descriptor Ctty. Ctty must be a descriptor number
- // in the child process: an index into ProcAttr.Files.
- // This is only meaningful if Setsid is true.
- Setctty bool
- Noctty bool // Detach fd 0 from controlling terminal
- Ctty int // Controlling TTY fd
- // Foreground places the child process group in the foreground.
- // This implies Setpgid. The Ctty field must be set to
- // the descriptor of the controlling TTY.
- // Unlike Setctty, in this case Ctty must be a descriptor
- // number in the parent process.
- Foreground bool
- Pgid int // Child's process group ID if Setpgid.
-}
-
-// Implemented in runtime package.
-func runtime_BeforeFork()
-func runtime_AfterFork()
-func runtime_AfterForkInChild()
-
-// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
-// If a dup or exec fails, write the errno error to pipe.
-// (Pipe is close-on-exec so if exec succeeds, it will be closed.)
-// In the child, this function must not acquire any locks, because
-// they might have been locked at the time of the fork. This means
-// no rescheduling, no malloc calls, and no new stack segments.
-// For the same reason compiler does not race instrument it.
-// The calls to rawSyscall are okay because they are assembly
-// functions that do not grow the stack.
-//go:norace
-func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
- // Declare all variables at top in case any
- // declarations require heap allocation (e.g., err1).
- var (
- r1 uintptr
- err1 Errno
- nextfd int
- i int
- )
-
- // guard against side effects of shuffling fds below.
- // Make sure that nextfd is beyond any currently open files so
- // that we can't run the risk of overwriting any of them.
- fd := make([]int, len(attr.Files))
- nextfd = len(attr.Files)
- for i, ufd := range attr.Files {
- if nextfd < int(ufd) {
- nextfd = int(ufd)
- }
- fd[i] = int(ufd)
- }
- nextfd++
-
- // About to call fork.
- // No more allocation or calls of non-assembly functions.
- runtime_BeforeFork()
- r1, _, err1 = rawSyscall(abi.FuncPCABI0(libc_fork_trampoline), 0, 0, 0)
- if err1 != 0 {
- runtime_AfterFork()
- return 0, err1
- }
-
- if r1 != 0 {
- // parent; return PID
- runtime_AfterFork()
- return int(r1), 0
- }
-
- // Fork succeeded, now in child.
-
- // Enable tracing if requested.
- if sys.Ptrace {
- if err := ptrace(PTRACE_TRACEME, 0, 0, 0); err != nil {
- err1 = err.(Errno)
- goto childerror
- }
- }
-
- // Session ID
- if sys.Setsid {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setsid_trampoline), 0, 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set process group
- if sys.Setpgid || sys.Foreground {
- // Place child in process group.
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setpgid_trampoline), 0, uintptr(sys.Pgid), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- if sys.Foreground {
- // This should really be pid_t, however _C_int (aka int32) is
- // generally equivalent.
- pgrp := _C_int(sys.Pgid)
- if pgrp == 0 {
- r1, _, err1 = rawSyscall(abi.FuncPCABI0(libc_getpid_trampoline), 0, 0, 0)
- if err1 != 0 {
- goto childerror
- }
- pgrp = _C_int(r1)
- }
-
- // Place process group in foreground.
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp)))
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Restore the signal mask. We do this after TIOCSPGRP to avoid
- // having the kernel send a SIGTTOU signal to the process group.
- runtime_AfterForkInChild()
-
- // Chroot
- if chroot != nil {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_chroot_trampoline), uintptr(unsafe.Pointer(chroot)), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // User and groups
- if cred := sys.Credential; cred != nil {
- ngroups := uintptr(len(cred.Groups))
- groups := uintptr(0)
- if ngroups > 0 {
- groups = uintptr(unsafe.Pointer(&cred.Groups[0]))
- }
- if !cred.NoSetGroups {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setgroups_trampoline), ngroups, groups, 0)
- if err1 != 0 {
- goto childerror
- }
- }
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setgid_trampoline), uintptr(cred.Gid), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setuid_trampoline), uintptr(cred.Uid), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Chdir
- if dir != nil {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_chdir_trampoline), uintptr(unsafe.Pointer(dir)), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Pass 1: look for fd[i] < i and move those up above len(fd)
- // so that pass 2 won't stomp on an fd it needs later.
- if pipe < nextfd {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(pipe), uintptr(nextfd), 0)
- if err1 != 0 {
- goto childerror
- }
- rawSyscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(nextfd), F_SETFD, FD_CLOEXEC)
- pipe = nextfd
- nextfd++
- }
- for i = 0; i < len(fd); i++ {
- if fd[i] >= 0 && fd[i] < int(i) {
- if nextfd == pipe { // don't stomp on pipe
- nextfd++
- }
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(fd[i]), uintptr(nextfd), 0)
- if err1 != 0 {
- goto childerror
- }
- rawSyscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(nextfd), F_SETFD, FD_CLOEXEC)
- fd[i] = nextfd
- nextfd++
- }
- }
-
- // Pass 2: dup fd[i] down onto i.
- for i = 0; i < len(fd); i++ {
- if fd[i] == -1 {
- rawSyscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(i), 0, 0)
- continue
- }
- if fd[i] == int(i) {
- // dup2(i, i) won't clear close-on-exec flag on Linux,
- // probably not elsewhere either.
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd[i]), F_SETFD, 0)
- if err1 != 0 {
- goto childerror
- }
- continue
- }
- // The new fd is created NOT close-on-exec,
- // which is exactly what we want.
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(fd[i]), uintptr(i), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // By convention, we don't close-on-exec the fds we are
- // started with, so if len(fd) < 3, close 0, 1, 2 as needed.
- // Programs that know they inherit fds >= 3 will need
- // to set them close-on-exec.
- for i = len(fd); i < 3; i++ {
- rawSyscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(i), 0, 0)
- }
-
- // Detach fd 0 from tty
- if sys.Noctty {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), 0, uintptr(TIOCNOTTY), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set the controlling TTY to Ctty
- if sys.Setctty {
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Time to exec.
- _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_execve_trampoline),
- uintptr(unsafe.Pointer(argv0)),
- uintptr(unsafe.Pointer(&argv[0])),
- uintptr(unsafe.Pointer(&envv[0])))
-
-childerror:
- // send error code on pipe
- rawSyscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
- for {
- rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0)
- }
-}
diff --git a/contrib/go/_std_1.18/src/syscall/exec_linux.go b/contrib/go/_std_1.18/src/syscall/exec_linux.go
deleted file mode 100644
index 0f0dee8ea5..0000000000
--- a/contrib/go/_std_1.18/src/syscall/exec_linux.go
+++ /dev/null
@@ -1,621 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-
-package syscall
-
-import (
- "internal/itoa"
- "runtime"
- "unsafe"
-)
-
-// SysProcIDMap holds Container ID to Host ID mappings used for User Namespaces in Linux.
-// See user_namespaces(7).
-type SysProcIDMap struct {
- ContainerID int // Container ID.
- HostID int // Host ID.
- Size int // Size.
-}
-
-type SysProcAttr struct {
- Chroot string // Chroot.
- Credential *Credential // Credential.
- // Ptrace tells the child to call ptrace(PTRACE_TRACEME).
- // Call runtime.LockOSThread before starting a process with this set,
- // and don't call UnlockOSThread until done with PtraceSyscall calls.
- Ptrace bool
- Setsid bool // Create session.
- // Setpgid sets the process group ID of the child to Pgid,
- // or, if Pgid == 0, to the new child's process ID.
- Setpgid bool
- // Setctty sets the controlling terminal of the child to
- // file descriptor Ctty. Ctty must be a descriptor number
- // in the child process: an index into ProcAttr.Files.
- // This is only meaningful if Setsid is true.
- Setctty bool
- Noctty bool // Detach fd 0 from controlling terminal
- Ctty int // Controlling TTY fd
- // Foreground places the child process group in the foreground.
- // This implies Setpgid. The Ctty field must be set to
- // the descriptor of the controlling TTY.
- // Unlike Setctty, in this case Ctty must be a descriptor
- // number in the parent process.
- Foreground bool
- Pgid int // Child's process group ID if Setpgid.
- Pdeathsig Signal // Signal that the process will get when its parent dies (Linux and FreeBSD only)
- Cloneflags uintptr // Flags for clone calls (Linux only)
- Unshareflags uintptr // Flags for unshare calls (Linux only)
- UidMappings []SysProcIDMap // User ID mappings for user namespaces.
- GidMappings []SysProcIDMap // Group ID mappings for user namespaces.
- // GidMappingsEnableSetgroups enabling setgroups syscall.
- // If false, then setgroups syscall will be disabled for the child process.
- // This parameter is no-op if GidMappings == nil. Otherwise for unprivileged
- // users this should be set to false for mappings work.
- GidMappingsEnableSetgroups bool
- AmbientCaps []uintptr // Ambient capabilities (Linux only)
-}
-
-var (
- none = [...]byte{'n', 'o', 'n', 'e', 0}
- slash = [...]byte{'/', 0}
-)
-
-// Implemented in runtime package.
-func runtime_BeforeFork()
-func runtime_AfterFork()
-func runtime_AfterForkInChild()
-
-// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
-// If a dup or exec fails, write the errno error to pipe.
-// (Pipe is close-on-exec so if exec succeeds, it will be closed.)
-// In the child, this function must not acquire any locks, because
-// they might have been locked at the time of the fork. This means
-// no rescheduling, no malloc calls, and no new stack segments.
-// For the same reason compiler does not race instrument it.
-// The calls to RawSyscall are okay because they are assembly
-// functions that do not grow the stack.
-//go:norace
-func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
- // Set up and fork. This returns immediately in the parent or
- // if there's an error.
- r1, err1, p, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe)
- if locked {
- runtime_AfterFork()
- }
- if err1 != 0 {
- return 0, err1
- }
-
- // parent; return PID
- pid = int(r1)
-
- if sys.UidMappings != nil || sys.GidMappings != nil {
- Close(p[0])
- var err2 Errno
- // uid/gid mappings will be written after fork and unshare(2) for user
- // namespaces.
- if sys.Unshareflags&CLONE_NEWUSER == 0 {
- if err := writeUidGidMappings(pid, sys); err != nil {
- err2 = err.(Errno)
- }
- }
- RawSyscall(SYS_WRITE, uintptr(p[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
- Close(p[1])
- }
-
- return pid, 0
-}
-
-const _LINUX_CAPABILITY_VERSION_3 = 0x20080522
-
-type capHeader struct {
- version uint32
- pid int32
-}
-
-type capData struct {
- effective uint32
- permitted uint32
- inheritable uint32
-}
-type caps struct {
- hdr capHeader
- data [2]capData
-}
-
-// See CAP_TO_INDEX in linux/capability.h:
-func capToIndex(cap uintptr) uintptr { return cap >> 5 }
-
-// See CAP_TO_MASK in linux/capability.h:
-func capToMask(cap uintptr) uint32 { return 1 << uint(cap&31) }
-
-// forkAndExecInChild1 implements the body of forkAndExecInChild up to
-// the parent's post-fork path. This is a separate function so we can
-// separate the child's and parent's stack frames if we're using
-// vfork.
-//
-// This is go:noinline because the point is to keep the stack frames
-// of this and forkAndExecInChild separate.
-//
-//go:noinline
-//go:norace
-func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (r1 uintptr, err1 Errno, p [2]int, locked bool) {
- // Defined in linux/prctl.h starting with Linux 4.3.
- const (
- PR_CAP_AMBIENT = 0x2f
- PR_CAP_AMBIENT_RAISE = 0x2
- )
-
- // vfork requires that the child not touch any of the parent's
- // active stack frames. Hence, the child does all post-fork
- // processing in this stack frame and never returns, while the
- // parent returns immediately from this frame and does all
- // post-fork processing in the outer frame.
- // Declare all variables at top in case any
- // declarations require heap allocation (e.g., err1).
- var (
- err2 Errno
- nextfd int
- i int
- caps caps
- fd1 uintptr
- puid, psetgroups, pgid []byte
- uidmap, setgroups, gidmap []byte
- )
-
- if sys.UidMappings != nil {
- puid = []byte("/proc/self/uid_map\000")
- uidmap = formatIDMappings(sys.UidMappings)
- }
-
- if sys.GidMappings != nil {
- psetgroups = []byte("/proc/self/setgroups\000")
- pgid = []byte("/proc/self/gid_map\000")
-
- if sys.GidMappingsEnableSetgroups {
- setgroups = []byte("allow\000")
- } else {
- setgroups = []byte("deny\000")
- }
- gidmap = formatIDMappings(sys.GidMappings)
- }
-
- // Record parent PID so child can test if it has died.
- ppid, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0)
-
- // Guard against side effects of shuffling fds below.
- // Make sure that nextfd is beyond any currently open files so
- // that we can't run the risk of overwriting any of them.
- fd := make([]int, len(attr.Files))
- nextfd = len(attr.Files)
- for i, ufd := range attr.Files {
- if nextfd < int(ufd) {
- nextfd = int(ufd)
- }
- fd[i] = int(ufd)
- }
- nextfd++
-
- // Allocate another pipe for parent to child communication for
- // synchronizing writing of User ID/Group ID mappings.
- if sys.UidMappings != nil || sys.GidMappings != nil {
- if err := forkExecPipe(p[:]); err != nil {
- err1 = err.(Errno)
- return
- }
- }
-
- // About to call fork.
- // No more allocation or calls of non-assembly functions.
- runtime_BeforeFork()
- locked = true
- switch {
- case sys.Cloneflags&CLONE_NEWUSER == 0 && sys.Unshareflags&CLONE_NEWUSER == 0:
- r1, err1 = rawVforkSyscall(SYS_CLONE, uintptr(SIGCHLD|CLONE_VFORK|CLONE_VM)|sys.Cloneflags)
- case runtime.GOARCH == "s390x":
- r1, _, err1 = RawSyscall6(SYS_CLONE, 0, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0)
- default:
- r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
- }
- if err1 != 0 || r1 != 0 {
- // If we're in the parent, we must return immediately
- // so we're not in the same stack frame as the child.
- // This can at most use the return PC, which the child
- // will not modify, and the results of
- // rawVforkSyscall, which must have been written after
- // the child was replaced.
- return
- }
-
- // Fork succeeded, now in child.
-
- // Enable the "keep capabilities" flag to set ambient capabilities later.
- if len(sys.AmbientCaps) > 0 {
- _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_KEEPCAPS, 1, 0, 0, 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Wait for User ID/Group ID mappings to be written.
- if sys.UidMappings != nil || sys.GidMappings != nil {
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(p[1]), 0, 0); err1 != 0 {
- goto childerror
- }
- r1, _, err1 = RawSyscall(SYS_READ, uintptr(p[0]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
- if err1 != 0 {
- goto childerror
- }
- if r1 != unsafe.Sizeof(err2) {
- err1 = EINVAL
- goto childerror
- }
- if err2 != 0 {
- err1 = err2
- goto childerror
- }
- }
-
- // Session ID
- if sys.Setsid {
- _, _, err1 = RawSyscall(SYS_SETSID, 0, 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set process group
- if sys.Setpgid || sys.Foreground {
- // Place child in process group.
- _, _, err1 = RawSyscall(SYS_SETPGID, 0, uintptr(sys.Pgid), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- if sys.Foreground {
- pgrp := int32(sys.Pgid)
- if pgrp == 0 {
- r1, _ = rawSyscallNoError(SYS_GETPID, 0, 0, 0)
-
- pgrp = int32(r1)
- }
-
- // Place process group in foreground.
- _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp)))
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Restore the signal mask. We do this after TIOCSPGRP to avoid
- // having the kernel send a SIGTTOU signal to the process group.
- runtime_AfterForkInChild()
-
- // Unshare
- if sys.Unshareflags != 0 {
- _, _, err1 = RawSyscall(SYS_UNSHARE, sys.Unshareflags, 0, 0)
- if err1 != 0 {
- goto childerror
- }
-
- if sys.Unshareflags&CLONE_NEWUSER != 0 && sys.GidMappings != nil {
- dirfd := int(_AT_FDCWD)
- if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&psetgroups[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
- goto childerror
- }
- r1, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&setgroups[0])), uintptr(len(setgroups)))
- if err1 != 0 {
- goto childerror
- }
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
- goto childerror
- }
-
- if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&pgid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
- goto childerror
- }
- r1, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&gidmap[0])), uintptr(len(gidmap)))
- if err1 != 0 {
- goto childerror
- }
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
- goto childerror
- }
- }
-
- if sys.Unshareflags&CLONE_NEWUSER != 0 && sys.UidMappings != nil {
- dirfd := int(_AT_FDCWD)
- if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&puid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
- goto childerror
- }
- r1, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&uidmap[0])), uintptr(len(uidmap)))
- if err1 != 0 {
- goto childerror
- }
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
- goto childerror
- }
- }
-
- // The unshare system call in Linux doesn't unshare mount points
- // mounted with --shared. Systemd mounts / with --shared. For a
- // long discussion of the pros and cons of this see debian bug 739593.
- // The Go model of unsharing is more like Plan 9, where you ask
- // to unshare and the namespaces are unconditionally unshared.
- // To make this model work we must further mark / as MS_PRIVATE.
- // This is what the standard unshare command does.
- if sys.Unshareflags&CLONE_NEWNS == CLONE_NEWNS {
- _, _, err1 = RawSyscall6(SYS_MOUNT, uintptr(unsafe.Pointer(&none[0])), uintptr(unsafe.Pointer(&slash[0])), 0, MS_REC|MS_PRIVATE, 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
- }
-
- // Chroot
- if chroot != nil {
- _, _, err1 = RawSyscall(SYS_CHROOT, uintptr(unsafe.Pointer(chroot)), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // User and groups
- if cred := sys.Credential; cred != nil {
- ngroups := uintptr(len(cred.Groups))
- groups := uintptr(0)
- if ngroups > 0 {
- groups = uintptr(unsafe.Pointer(&cred.Groups[0]))
- }
- if !(sys.GidMappings != nil && !sys.GidMappingsEnableSetgroups && ngroups == 0) && !cred.NoSetGroups {
- _, _, err1 = RawSyscall(_SYS_setgroups, ngroups, groups, 0)
- if err1 != 0 {
- goto childerror
- }
- }
- _, _, err1 = RawSyscall(sys_SETGID, uintptr(cred.Gid), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- _, _, err1 = RawSyscall(sys_SETUID, uintptr(cred.Uid), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- if len(sys.AmbientCaps) != 0 {
- // Ambient capabilities were added in the 4.3 kernel,
- // so it is safe to always use _LINUX_CAPABILITY_VERSION_3.
- caps.hdr.version = _LINUX_CAPABILITY_VERSION_3
-
- if _, _, err1 := RawSyscall(SYS_CAPGET, uintptr(unsafe.Pointer(&caps.hdr)), uintptr(unsafe.Pointer(&caps.data[0])), 0); err1 != 0 {
- goto childerror
- }
-
- for _, c := range sys.AmbientCaps {
- // Add the c capability to the permitted and inheritable capability mask,
- // otherwise we will not be able to add it to the ambient capability mask.
- caps.data[capToIndex(c)].permitted |= capToMask(c)
- caps.data[capToIndex(c)].inheritable |= capToMask(c)
- }
-
- if _, _, err1 := RawSyscall(SYS_CAPSET, uintptr(unsafe.Pointer(&caps.hdr)), uintptr(unsafe.Pointer(&caps.data[0])), 0); err1 != 0 {
- goto childerror
- }
-
- for _, c := range sys.AmbientCaps {
- _, _, err1 = RawSyscall6(SYS_PRCTL, PR_CAP_AMBIENT, uintptr(PR_CAP_AMBIENT_RAISE), c, 0, 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
- }
-
- // Chdir
- if dir != nil {
- _, _, err1 = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Parent death signal
- if sys.Pdeathsig != 0 {
- _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_PDEATHSIG, uintptr(sys.Pdeathsig), 0, 0, 0, 0)
- if err1 != 0 {
- goto childerror
- }
-
- // Signal self if parent is already dead. This might cause a
- // duplicate signal in rare cases, but it won't matter when
- // using SIGKILL.
- r1, _ = rawSyscallNoError(SYS_GETPPID, 0, 0, 0)
- if r1 != ppid {
- pid, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0)
- _, _, err1 := RawSyscall(SYS_KILL, pid, uintptr(sys.Pdeathsig), 0)
- if err1 != 0 {
- goto childerror
- }
- }
- }
-
- // Pass 1: look for fd[i] < i and move those up above len(fd)
- // so that pass 2 won't stomp on an fd it needs later.
- if pipe < nextfd {
- _, _, err1 = RawSyscall(SYS_DUP3, uintptr(pipe), uintptr(nextfd), O_CLOEXEC)
- if err1 != 0 {
- goto childerror
- }
- pipe = nextfd
- nextfd++
- }
- for i = 0; i < len(fd); i++ {
- if fd[i] >= 0 && fd[i] < int(i) {
- if nextfd == pipe { // don't stomp on pipe
- nextfd++
- }
- _, _, err1 = RawSyscall(SYS_DUP3, uintptr(fd[i]), uintptr(nextfd), O_CLOEXEC)
- if err1 != 0 {
- goto childerror
- }
- fd[i] = nextfd
- nextfd++
- }
- }
-
- // Pass 2: dup fd[i] down onto i.
- for i = 0; i < len(fd); i++ {
- if fd[i] == -1 {
- RawSyscall(SYS_CLOSE, uintptr(i), 0, 0)
- continue
- }
- if fd[i] == int(i) {
- // dup2(i, i) won't clear close-on-exec flag on Linux,
- // probably not elsewhere either.
- _, _, err1 = RawSyscall(fcntl64Syscall, uintptr(fd[i]), F_SETFD, 0)
- if err1 != 0 {
- goto childerror
- }
- continue
- }
- // The new fd is created NOT close-on-exec,
- // which is exactly what we want.
- _, _, err1 = RawSyscall(SYS_DUP3, uintptr(fd[i]), uintptr(i), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // By convention, we don't close-on-exec the fds we are
- // started with, so if len(fd) < 3, close 0, 1, 2 as needed.
- // Programs that know they inherit fds >= 3 will need
- // to set them close-on-exec.
- for i = len(fd); i < 3; i++ {
- RawSyscall(SYS_CLOSE, uintptr(i), 0, 0)
- }
-
- // Detach fd 0 from tty
- if sys.Noctty {
- _, _, err1 = RawSyscall(SYS_IOCTL, 0, uintptr(TIOCNOTTY), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set the controlling TTY to Ctty
- if sys.Setctty {
- _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSCTTY), 1)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Enable tracing if requested.
- // Do this right before exec so that we don't unnecessarily trace the runtime
- // setting up after the fork. See issue #21428.
- if sys.Ptrace {
- _, _, err1 = RawSyscall(SYS_PTRACE, uintptr(PTRACE_TRACEME), 0, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Time to exec.
- _, _, err1 = RawSyscall(SYS_EXECVE,
- uintptr(unsafe.Pointer(argv0)),
- uintptr(unsafe.Pointer(&argv[0])),
- uintptr(unsafe.Pointer(&envv[0])))
-
-childerror:
- // send error code on pipe
- RawSyscall(SYS_WRITE, uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
- for {
- RawSyscall(SYS_EXIT, 253, 0, 0)
- }
-}
-
-// Try to open a pipe with O_CLOEXEC set on both file descriptors.
-func forkExecPipe(p []int) (err error) {
- return Pipe2(p, O_CLOEXEC)
-}
-
-func formatIDMappings(idMap []SysProcIDMap) []byte {
- var data []byte
- for _, im := range idMap {
- data = append(data, []byte(itoa.Itoa(im.ContainerID)+" "+itoa.Itoa(im.HostID)+" "+itoa.Itoa(im.Size)+"\n")...)
- }
- return data
-}
-
-// writeIDMappings writes the user namespace User ID or Group ID mappings to the specified path.
-func writeIDMappings(path string, idMap []SysProcIDMap) error {
- fd, err := Open(path, O_RDWR, 0)
- if err != nil {
- return err
- }
-
- if _, err := Write(fd, formatIDMappings(idMap)); err != nil {
- Close(fd)
- return err
- }
-
- if err := Close(fd); err != nil {
- return err
- }
-
- return nil
-}
-
-// writeSetgroups writes to /proc/PID/setgroups "deny" if enable is false
-// and "allow" if enable is true.
-// This is needed since kernel 3.19, because you can't write gid_map without
-// disabling setgroups() system call.
-func writeSetgroups(pid int, enable bool) error {
- sgf := "/proc/" + itoa.Itoa(pid) + "/setgroups"
- fd, err := Open(sgf, O_RDWR, 0)
- if err != nil {
- return err
- }
-
- var data []byte
- if enable {
- data = []byte("allow")
- } else {
- data = []byte("deny")
- }
-
- if _, err := Write(fd, data); err != nil {
- Close(fd)
- return err
- }
-
- return Close(fd)
-}
-
-// writeUidGidMappings writes User ID and Group ID mappings for user namespaces
-// for a process and it is called from the parent process.
-func writeUidGidMappings(pid int, sys *SysProcAttr) error {
- if sys.UidMappings != nil {
- uidf := "/proc/" + itoa.Itoa(pid) + "/uid_map"
- if err := writeIDMappings(uidf, sys.UidMappings); err != nil {
- return err
- }
- }
-
- if sys.GidMappings != nil {
- // If the kernel is too old to support /proc/PID/setgroups, writeSetGroups will return ENOENT; this is OK.
- if err := writeSetgroups(pid, sys.GidMappingsEnableSetgroups); err != nil && err != ENOENT {
- return err
- }
- gidf := "/proc/" + itoa.Itoa(pid) + "/gid_map"
- if err := writeIDMappings(gidf, sys.GidMappings); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/syscall/exec_unix.go b/contrib/go/_std_1.18/src/syscall/exec_unix.go
deleted file mode 100644
index 0e41959ffe..0000000000
--- a/contrib/go/_std_1.18/src/syscall/exec_unix.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-// Fork, exec, wait, etc.
-
-package syscall
-
-import (
- errorspkg "errors"
- "internal/bytealg"
- "runtime"
- "sync"
- "unsafe"
-)
-
-// Lock synchronizing creation of new file descriptors with fork.
-//
-// We want the child in a fork/exec sequence to inherit only the
-// file descriptors we intend. To do that, we mark all file
-// descriptors close-on-exec and then, in the child, explicitly
-// unmark the ones we want the exec'ed program to keep.
-// Unix doesn't make this easy: there is, in general, no way to
-// allocate a new file descriptor close-on-exec. Instead you
-// have to allocate the descriptor and then mark it close-on-exec.
-// If a fork happens between those two events, the child's exec
-// will inherit an unwanted file descriptor.
-//
-// This lock solves that race: the create new fd/mark close-on-exec
-// operation is done holding ForkLock for reading, and the fork itself
-// is done holding ForkLock for writing. At least, that's the idea.
-// There are some complications.
-//
-// Some system calls that create new file descriptors can block
-// for arbitrarily long times: open on a hung NFS server or named
-// pipe, accept on a socket, and so on. We can't reasonably grab
-// the lock across those operations.
-//
-// It is worse to inherit some file descriptors than others.
-// If a non-malicious child accidentally inherits an open ordinary file,
-// that's not a big deal. On the other hand, if a long-lived child
-// accidentally inherits the write end of a pipe, then the reader
-// of that pipe will not see EOF until that child exits, potentially
-// causing the parent program to hang. This is a common problem
-// in threaded C programs that use popen.
-//
-// Luckily, the file descriptors that are most important not to
-// inherit are not the ones that can take an arbitrarily long time
-// to create: pipe returns instantly, and the net package uses
-// non-blocking I/O to accept on a listening socket.
-// The rules for which file descriptor-creating operations use the
-// ForkLock are as follows:
-//
-// 1) Pipe. Does not block. Use the ForkLock.
-// 2) Socket. Does not block. Use the ForkLock.
-// 3) Accept. If using non-blocking mode, use the ForkLock.
-// Otherwise, live with the race.
-// 4) Open. Can block. Use O_CLOEXEC if available (Linux).
-// Otherwise, live with the race.
-// 5) Dup. Does not block. Use the ForkLock.
-// On Linux, could use fcntl F_DUPFD_CLOEXEC
-// instead of the ForkLock, but only for dup(fd, -1).
-
-var ForkLock sync.RWMutex
-
-// StringSlicePtr converts a slice of strings to a slice of pointers
-// to NUL-terminated byte arrays. If any string contains a NUL byte
-// this function panics instead of returning an error.
-//
-// Deprecated: Use SlicePtrFromStrings instead.
-func StringSlicePtr(ss []string) []*byte {
- bb := make([]*byte, len(ss)+1)
- for i := 0; i < len(ss); i++ {
- bb[i] = StringBytePtr(ss[i])
- }
- bb[len(ss)] = nil
- return bb
-}
-
-// SlicePtrFromStrings converts a slice of strings to a slice of
-// pointers to NUL-terminated byte arrays. If any string contains
-// a NUL byte, it returns (nil, EINVAL).
-func SlicePtrFromStrings(ss []string) ([]*byte, error) {
- n := 0
- for _, s := range ss {
- if bytealg.IndexByteString(s, 0) != -1 {
- return nil, EINVAL
- }
- n += len(s) + 1 // +1 for NUL
- }
- bb := make([]*byte, len(ss)+1)
- b := make([]byte, n)
- n = 0
- for i, s := range ss {
- bb[i] = &b[n]
- copy(b[n:], s)
- n += len(s) + 1
- }
- return bb, nil
-}
-
-func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) }
-
-func SetNonblock(fd int, nonblocking bool) (err error) {
- flag, err := fcntl(fd, F_GETFL, 0)
- if err != nil {
- return err
- }
- if nonblocking {
- flag |= O_NONBLOCK
- } else {
- flag &^= O_NONBLOCK
- }
- _, err = fcntl(fd, F_SETFL, flag)
- return err
-}
-
-// Credential holds user and group identities to be assumed
-// by a child process started by StartProcess.
-type Credential struct {
- Uid uint32 // User ID.
- Gid uint32 // Group ID.
- Groups []uint32 // Supplementary group IDs.
- NoSetGroups bool // If true, don't set supplementary groups
-}
-
-// ProcAttr holds attributes that will be applied to a new process started
-// by StartProcess.
-type ProcAttr struct {
- Dir string // Current working directory.
- Env []string // Environment.
- Files []uintptr // File descriptors.
- Sys *SysProcAttr
-}
-
-var zeroProcAttr ProcAttr
-var zeroSysProcAttr SysProcAttr
-
-func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) {
- var p [2]int
- var n int
- var err1 Errno
- var wstatus WaitStatus
-
- if attr == nil {
- attr = &zeroProcAttr
- }
- sys := attr.Sys
- if sys == nil {
- sys = &zeroSysProcAttr
- }
-
- // Convert args to C form.
- argv0p, err := BytePtrFromString(argv0)
- if err != nil {
- return 0, err
- }
- argvp, err := SlicePtrFromStrings(argv)
- if err != nil {
- return 0, err
- }
- envvp, err := SlicePtrFromStrings(attr.Env)
- if err != nil {
- return 0, err
- }
-
- if (runtime.GOOS == "freebsd" || runtime.GOOS == "dragonfly") && len(argv[0]) > len(argv0) {
- argvp[0] = argv0p
- }
-
- var chroot *byte
- if sys.Chroot != "" {
- chroot, err = BytePtrFromString(sys.Chroot)
- if err != nil {
- return 0, err
- }
- }
- var dir *byte
- if attr.Dir != "" {
- dir, err = BytePtrFromString(attr.Dir)
- if err != nil {
- return 0, err
- }
- }
-
- // Both Setctty and Foreground use the Ctty field,
- // but they give it slightly different meanings.
- if sys.Setctty && sys.Foreground {
- return 0, errorspkg.New("both Setctty and Foreground set in SysProcAttr")
- }
- if sys.Setctty && sys.Ctty >= len(attr.Files) {
- return 0, errorspkg.New("Setctty set but Ctty not valid in child")
- }
-
- // Acquire the fork lock so that no other threads
- // create new fds that are not yet close-on-exec
- // before we fork.
- ForkLock.Lock()
-
- // Allocate child status pipe close on exec.
- if err = forkExecPipe(p[:]); err != nil {
- ForkLock.Unlock()
- return 0, err
- }
-
- // Kick off child.
- pid, err1 = forkAndExecInChild(argv0p, argvp, envvp, chroot, dir, attr, sys, p[1])
- if err1 != 0 {
- Close(p[0])
- Close(p[1])
- ForkLock.Unlock()
- return 0, Errno(err1)
- }
- ForkLock.Unlock()
-
- // Read child error status from pipe.
- Close(p[1])
- for {
- n, err = readlen(p[0], (*byte)(unsafe.Pointer(&err1)), int(unsafe.Sizeof(err1)))
- if err != EINTR {
- break
- }
- }
- Close(p[0])
- if err != nil || n != 0 {
- if n == int(unsafe.Sizeof(err1)) {
- err = Errno(err1)
- }
- if err == nil {
- err = EPIPE
- }
-
- // Child failed; wait for it to exit, to make sure
- // the zombies don't accumulate.
- _, err1 := Wait4(pid, &wstatus, 0, nil)
- for err1 == EINTR {
- _, err1 = Wait4(pid, &wstatus, 0, nil)
- }
- return 0, err
- }
-
- // Read got EOF, so pipe closed on exec, so exec succeeded.
- return pid, nil
-}
-
-// Combination of fork and exec, careful to be thread safe.
-func ForkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) {
- return forkExec(argv0, argv, attr)
-}
-
-// StartProcess wraps ForkExec for package os.
-func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) {
- pid, err = forkExec(argv0, argv, attr)
- return pid, 0, err
-}
-
-// Implemented in runtime package.
-func runtime_BeforeExec()
-func runtime_AfterExec()
-
-// execveLibc is non-nil on OS using libc syscall, set to execve in exec_libc.go; this
-// avoids a build dependency for other platforms.
-var execveLibc func(path uintptr, argv uintptr, envp uintptr) Errno
-var execveDarwin func(path *byte, argv **byte, envp **byte) error
-var execveOpenBSD func(path *byte, argv **byte, envp **byte) error
-
-// Exec invokes the execve(2) system call.
-func Exec(argv0 string, argv []string, envv []string) (err error) {
- argv0p, err := BytePtrFromString(argv0)
- if err != nil {
- return err
- }
- argvp, err := SlicePtrFromStrings(argv)
- if err != nil {
- return err
- }
- envvp, err := SlicePtrFromStrings(envv)
- if err != nil {
- return err
- }
- runtime_BeforeExec()
-
- var err1 error
- if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" || runtime.GOOS == "aix" {
- // RawSyscall should never be used on Solaris, illumos, or AIX.
- err1 = execveLibc(
- uintptr(unsafe.Pointer(argv0p)),
- uintptr(unsafe.Pointer(&argvp[0])),
- uintptr(unsafe.Pointer(&envvp[0])))
- } else if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
- // Similarly on Darwin.
- err1 = execveDarwin(argv0p, &argvp[0], &envvp[0])
- } else if runtime.GOOS == "openbsd" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
- // Similarly on OpenBSD.
- err1 = execveOpenBSD(argv0p, &argvp[0], &envvp[0])
- } else {
- _, _, err1 = RawSyscall(SYS_EXECVE,
- uintptr(unsafe.Pointer(argv0p)),
- uintptr(unsafe.Pointer(&argvp[0])),
- uintptr(unsafe.Pointer(&envvp[0])))
- }
- runtime_AfterExec()
- return err1
-}
diff --git a/contrib/go/_std_1.18/src/syscall/forkpipe.go b/contrib/go/_std_1.18/src/syscall/forkpipe.go
deleted file mode 100644
index 6f7d29ce67..0000000000
--- a/contrib/go/_std_1.18/src/syscall/forkpipe.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || solaris
-
-package syscall
-
-// Try to open a pipe with O_CLOEXEC set on both file descriptors.
-func forkExecPipe(p []int) error {
- err := Pipe(p)
- if err != nil {
- return err
- }
- _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC)
- if err != nil {
- return err
- }
- _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
- return err
-}
diff --git a/contrib/go/_std_1.18/src/syscall/sockcmsg_unix.go b/contrib/go/_std_1.18/src/syscall/sockcmsg_unix.go
deleted file mode 100644
index a3dcf818da..0000000000
--- a/contrib/go/_std_1.18/src/syscall/sockcmsg_unix.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-// Socket control messages
-
-package syscall
-
-import (
- "unsafe"
-)
-
-// CmsgLen returns the value to store in the Len field of the Cmsghdr
-// structure, taking into account any necessary alignment.
-func CmsgLen(datalen int) int {
- return cmsgAlignOf(SizeofCmsghdr) + datalen
-}
-
-// CmsgSpace returns the number of bytes an ancillary element with
-// payload of the passed data length occupies.
-func CmsgSpace(datalen int) int {
- return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen)
-}
-
-func (h *Cmsghdr) data(offset uintptr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr)) + offset)
-}
-
-// SocketControlMessage represents a socket control message.
-type SocketControlMessage struct {
- Header Cmsghdr
- Data []byte
-}
-
-// ParseSocketControlMessage parses b as an array of socket control
-// messages.
-func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
- var msgs []SocketControlMessage
- i := 0
- for i+CmsgLen(0) <= len(b) {
- h, dbuf, err := socketControlMessageHeaderAndData(b[i:])
- if err != nil {
- return nil, err
- }
- m := SocketControlMessage{Header: *h, Data: dbuf}
- msgs = append(msgs, m)
- i += cmsgAlignOf(int(h.Len))
- }
- return msgs, nil
-}
-
-func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
- h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
- if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) {
- return nil, nil, EINVAL
- }
- return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil
-}
-
-// UnixRights encodes a set of open file descriptors into a socket
-// control message for sending to another process.
-func UnixRights(fds ...int) []byte {
- datalen := len(fds) * 4
- b := make([]byte, CmsgSpace(datalen))
- h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
- h.Level = SOL_SOCKET
- h.Type = SCM_RIGHTS
- h.SetLen(CmsgLen(datalen))
- for i, fd := range fds {
- *(*int32)(h.data(4 * uintptr(i))) = int32(fd)
- }
- return b
-}
-
-// ParseUnixRights decodes a socket control message that contains an
-// integer array of open file descriptors from another process.
-func ParseUnixRights(m *SocketControlMessage) ([]int, error) {
- if m.Header.Level != SOL_SOCKET {
- return nil, EINVAL
- }
- if m.Header.Type != SCM_RIGHTS {
- return nil, EINVAL
- }
- fds := make([]int, len(m.Data)>>2)
- for i, j := 0, 0; i < len(m.Data); i += 4 {
- fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i])))
- j++
- }
- return fds, nil
-}
diff --git a/contrib/go/_std_1.18/src/syscall/syscall.go b/contrib/go/_std_1.18/src/syscall/syscall.go
deleted file mode 100644
index 91173033ee..0000000000
--- a/contrib/go/_std_1.18/src/syscall/syscall.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package syscall contains an interface to the low-level operating system
-// primitives. The details vary depending on the underlying system, and
-// by default, godoc will display the syscall documentation for the current
-// system. If you want godoc to display syscall documentation for another
-// system, set $GOOS and $GOARCH to the desired system. For example, if
-// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
-// to freebsd and $GOARCH to arm.
-// The primary use of syscall is inside other packages that provide a more
-// portable interface to the system, such as "os", "time" and "net". Use
-// those packages rather than this one if you can.
-// For details of the functions and data types in this package consult
-// the manuals for the appropriate operating system.
-// These calls return err == nil to indicate success; otherwise
-// err is an operating system error describing the failure.
-// On most systems, that error has type syscall.Errno.
-//
-// Deprecated: this package is locked down. Callers should use the
-// corresponding package in the golang.org/x/sys repository instead.
-// That is also where updates required by new systems or versions
-// should be applied. See https://golang.org/s/go1.4-syscall for more
-// information.
-//
-package syscall
-
-//go:generate go run ./mksyscall_windows.go -systemdll -output zsyscall_windows.go syscall_windows.go security_windows.go
-
-// StringByteSlice converts a string to a NUL-terminated []byte,
-// If s contains a NUL byte this function panics instead of
-// returning an error.
-//
-// Deprecated: Use ByteSliceFromString instead.
-func StringByteSlice(s string) []byte {
- a, err := ByteSliceFromString(s)
- if err != nil {
- panic("syscall: string with NUL passed to StringByteSlice")
- }
- return a
-}
-
-// ByteSliceFromString returns a NUL-terminated slice of bytes
-// containing the text of s. If s contains a NUL byte at any
-// location, it returns (nil, EINVAL).
-func ByteSliceFromString(s string) ([]byte, error) {
- for i := 0; i < len(s); i++ {
- if s[i] == 0 {
- return nil, EINVAL
- }
- }
- a := make([]byte, len(s)+1)
- copy(a, s)
- return a, nil
-}
-
-// StringBytePtr returns a pointer to a NUL-terminated array of bytes.
-// If s contains a NUL byte this function panics instead of returning
-// an error.
-//
-// Deprecated: Use BytePtrFromString instead.
-func StringBytePtr(s string) *byte { return &StringByteSlice(s)[0] }
-
-// BytePtrFromString returns a pointer to a NUL-terminated array of
-// bytes containing the text of s. If s contains a NUL byte at any
-// location, it returns (nil, EINVAL).
-func BytePtrFromString(s string) (*byte, error) {
- a, err := ByteSliceFromString(s)
- if err != nil {
- return nil, err
- }
- return &a[0], nil
-}
-
-// Single-word zero for use when we need a valid pointer to 0 bytes.
-// See mksyscall.pl.
-var _zero uintptr
-
-// Unix returns the time stored in ts as seconds plus nanoseconds.
-func (ts *Timespec) Unix() (sec int64, nsec int64) {
- return int64(ts.Sec), int64(ts.Nsec)
-}
-
-// Unix returns the time stored in tv as seconds plus nanoseconds.
-func (tv *Timeval) Unix() (sec int64, nsec int64) {
- return int64(tv.Sec), int64(tv.Usec) * 1000
-}
-
-// Nano returns the time stored in ts as nanoseconds.
-func (ts *Timespec) Nano() int64 {
- return int64(ts.Sec)*1e9 + int64(ts.Nsec)
-}
-
-// Nano returns the time stored in tv as nanoseconds.
-func (tv *Timeval) Nano() int64 {
- return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
-}
-
-// Getpagesize and Exit are provided by the runtime.
-
-func Getpagesize() int
-func Exit(code int)
diff --git a/contrib/go/_std_1.18/src/syscall/syscall_darwin.go b/contrib/go/_std_1.18/src/syscall/syscall_darwin.go
deleted file mode 100644
index 902d6e77e1..0000000000
--- a/contrib/go/_std_1.18/src/syscall/syscall_darwin.go
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2009,2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Darwin system calls.
-// This file is compiled as ordinary Go code,
-// but it is also input to mksyscall,
-// which parses the //sys lines and generates system call stubs.
-// Note that sometimes we use a lowercase //sys name and wrap
-// it in our own nicer implementation, either here or in
-// syscall_bsd.go or syscall_unix.go.
-
-package syscall
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-type SockaddrDatalink struct {
- Len uint8
- Family uint8
- Index uint16
- Type uint8
- Nlen uint8
- Alen uint8
- Slen uint8
- Data [12]int8
- raw RawSockaddrDatalink
-}
-
-// Translate "kern.hostname" to []_C_int{0,1,2,3}.
-func nametomib(name string) (mib []_C_int, err error) {
- const siz = unsafe.Sizeof(mib[0])
-
- // NOTE(rsc): It seems strange to set the buffer to have
- // size CTL_MAXNAME+2 but use only CTL_MAXNAME
- // as the size. I don't know why the +2 is here, but the
- // kernel uses +2 for its own implementation of this function.
- // I am scared that if we don't include the +2 here, the kernel
- // will silently write 2 words farther than we specify
- // and we'll get memory corruption.
- var buf [CTL_MAXNAME + 2]_C_int
- n := uintptr(CTL_MAXNAME) * siz
-
- p := (*byte)(unsafe.Pointer(&buf[0]))
- bytes, err := ByteSliceFromString(name)
- if err != nil {
- return nil, err
- }
-
- // Magic sysctl: "setting" 0.3 to a string name
- // lets you read back the array of integers form.
- if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
- return nil, err
- }
- return buf[0 : n/siz], nil
-}
-
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
-}
-
-func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
-func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
-
-//sysnb pipe(p *[2]int32) (err error)
-
-func Pipe(p []int) (err error) {
- if len(p) != 2 {
- return EINVAL
- }
- var q [2]int32
- err = pipe(&q)
- if err == nil {
- p[0] = int(q[0])
- p[1] = int(q[1])
- }
- return
-}
-
-func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
- var _p0 unsafe.Pointer
- var bufsize uintptr
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(_p0), bufsize, uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = e1
- }
- return
-}
-
-func libc_getfsstat_trampoline()
-
-//go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib"
-
-//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
-
-/*
- * Wrapped
- */
-
-//sys kill(pid int, signum int, posix int) (err error)
-
-func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) }
-
-/*
- * Exposed directly
- */
-//sys Access(path string, mode uint32) (err error)
-//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
-//sys Chdir(path string) (err error)
-//sys Chflags(path string, flags int) (err error)
-//sys Chmod(path string, mode uint32) (err error)
-//sys Chown(path string, uid int, gid int) (err error)
-//sys Chroot(path string) (err error)
-//sys Close(fd int) (err error)
-//sys closedir(dir uintptr) (err error)
-//sys Dup(fd int) (nfd int, err error)
-//sys Dup2(from int, to int) (err error)
-//sys Exchangedata(path1 string, path2 string, options int) (err error)
-//sys Fchdir(fd int) (err error)
-//sys Fchflags(fd int, flags int) (err error)
-//sys Fchmod(fd int, mode uint32) (err error)
-//sys Fchown(fd int, uid int, gid int) (err error)
-//sys Flock(fd int, how int) (err error)
-//sys Fpathconf(fd int, name int) (val int, err error)
-//sys Fsync(fd int) (err error)
-// Fsync is not called for os.File.Sync(). Please see internal/poll/fd_fsync_darwin.go
-//sys Ftruncate(fd int, length int64) (err error)
-//sys Getdtablesize() (size int)
-//sysnb Getegid() (egid int)
-//sysnb Geteuid() (uid int)
-//sysnb Getgid() (gid int)
-//sysnb Getpgid(pid int) (pgid int, err error)
-//sysnb Getpgrp() (pgrp int)
-//sysnb Getpid() (pid int)
-//sysnb Getppid() (ppid int)
-//sys Getpriority(which int, who int) (prio int, err error)
-//sysnb Getrlimit(which int, lim *Rlimit) (err error)
-//sysnb Getrusage(who int, rusage *Rusage) (err error)
-//sysnb Getsid(pid int) (sid int, err error)
-//sysnb Getuid() (uid int)
-//sysnb Issetugid() (tainted bool)
-//sys Kqueue() (fd int, err error)
-//sys Lchown(path string, uid int, gid int) (err error)
-//sys Link(path string, link string) (err error)
-//sys Listen(s int, backlog int) (err error)
-//sys Mkdir(path string, mode uint32) (err error)
-//sys Mkfifo(path string, mode uint32) (err error)
-//sys Mknod(path string, mode uint32, dev int) (err error)
-//sys Mlock(b []byte) (err error)
-//sys Mlockall(flags int) (err error)
-//sys Mprotect(b []byte, prot int) (err error)
-//sys Munlock(b []byte) (err error)
-//sys Munlockall() (err error)
-//sys Open(path string, mode int, perm uint32) (fd int, err error)
-//sys Pathconf(path string, name int) (val int, err error)
-//sys Pread(fd int, p []byte, offset int64) (n int, err error)
-//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
-//sys read(fd int, p []byte) (n int, err error)
-//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
-//sys Readlink(path string, buf []byte) (n int, err error)
-//sys Rename(from string, to string) (err error)
-//sys Revoke(path string) (err error)
-//sys Rmdir(path string) (err error)
-//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_lseek
-//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
-//sys Setegid(egid int) (err error)
-//sysnb Seteuid(euid int) (err error)
-//sysnb Setgid(gid int) (err error)
-//sys Setlogin(name string) (err error)
-//sysnb Setpgid(pid int, pgid int) (err error)
-//sys Setpriority(which int, who int, prio int) (err error)
-//sys Setprivexec(flag int) (err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
-//sysnb Setrlimit(which int, lim *Rlimit) (err error)
-//sysnb Setsid() (pid int, err error)
-//sysnb Settimeofday(tp *Timeval) (err error)
-//sysnb Setuid(uid int) (err error)
-//sys Symlink(path string, link string) (err error)
-//sys Sync() (err error)
-//sys Truncate(path string, length int64) (err error)
-//sys Umask(newmask int) (oldmask int)
-//sys Undelete(path string) (err error)
-//sys Unlink(path string) (err error)
-//sys Unmount(path string, flags int) (err error)
-//sys write(fd int, p []byte) (n int, err error)
-//sys writev(fd int, iovecs []Iovec) (cnt uintptr, err error)
-//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
-//sys munmap(addr uintptr, length uintptr) (err error)
-//sysnb fork() (pid int, err error)
-//sysnb ioctl(fd int, req int, arg int) (err error)
-//sysnb ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_ioctl
-//sysnb execve(path *byte, argv **byte, envp **byte) (err error)
-//sysnb exit(res int) (err error)
-//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error)
-//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_fcntl
-//sys unlinkat(fd int, path string, flags int) (err error)
-//sys openat(fd int, path string, flags int, perm uint32) (fdret int, err error)
-//sys getcwd(buf []byte) (n int, err error)
-
-func init() {
- execveDarwin = execve
-}
-
-func fdopendir(fd int) (dir uintptr, err error) {
- r0, _, e1 := syscallPtr(abi.FuncPCABI0(libc_fdopendir_trampoline), uintptr(fd), 0, 0)
- dir = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fdopendir_trampoline()
-
-//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
-
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- // Simulate Getdirentries using fdopendir/readdir_r/closedir.
- // We store the number of entries to skip in the seek
- // offset of fd. See issue #31368.
- // It's not the full required semantics, but should handle the case
- // of calling Getdirentries or ReadDirent repeatedly.
- // It won't handle assigning the results of lseek to *basep, or handle
- // the directory being edited underfoot.
- skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
- if err != nil {
- return 0, err
- }
-
- // We need to duplicate the incoming file descriptor
- // because the caller expects to retain control of it, but
- // fdopendir expects to take control of its argument.
- // Just Dup'ing the file descriptor is not enough, as the
- // result shares underlying state. Use openat to make a really
- // new file descriptor referring to the same directory.
- fd2, err := openat(fd, ".", O_RDONLY, 0)
- if err != nil {
- return 0, err
- }
- d, err := fdopendir(fd2)
- if err != nil {
- Close(fd2)
- return 0, err
- }
- defer closedir(d)
-
- var cnt int64
- for {
- var entry Dirent
- var entryp *Dirent
- e := readdir_r(d, &entry, &entryp)
- if e != 0 {
- return n, errnoErr(e)
- }
- if entryp == nil {
- break
- }
- if skip > 0 {
- skip--
- cnt++
- continue
- }
- reclen := int(entry.Reclen)
- if reclen > len(buf) {
- // Not enough room. Return for now.
- // The counter will let us know where we should start up again.
- // Note: this strategy for suspending in the middle and
- // restarting is O(n^2) in the length of the directory. Oh well.
- break
- }
- // Copy entry into return buffer.
- s := struct {
- ptr unsafe.Pointer
- siz int
- cap int
- }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen}
- copy(buf, *(*[]byte)(unsafe.Pointer(&s)))
- buf = buf[reclen:]
- n += reclen
- cnt++
- }
- // Set the seek offset of the input fd to record
- // how many files we've already returned.
- _, err = Seek(fd, cnt, 0 /* SEEK_SET */)
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
-
-// Implemented in the runtime package (runtime/sys_darwin.go)
-func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
-func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
-func rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
diff --git a/contrib/go/_std_1.18/src/syscall/syscall_linux.go b/contrib/go/_std_1.18/src/syscall/syscall_linux.go
deleted file mode 100644
index f32020ca6c..0000000000
--- a/contrib/go/_std_1.18/src/syscall/syscall_linux.go
+++ /dev/null
@@ -1,1150 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Linux system calls.
-// This file is compiled as ordinary Go code,
-// but it is also input to mksyscall,
-// which parses the //sys lines and generates system call stubs.
-// Note that sometimes we use a lowercase //sys name and
-// wrap it in our own nicer implementation.
-
-package syscall
-
-import (
- "internal/itoa"
- "unsafe"
-)
-
-func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
-
-/*
- * Wrapped
- */
-
-func Access(path string, mode uint32) (err error) {
- return Faccessat(_AT_FDCWD, path, mode, 0)
-}
-
-func Chmod(path string, mode uint32) (err error) {
- return Fchmodat(_AT_FDCWD, path, mode, 0)
-}
-
-func Chown(path string, uid int, gid int) (err error) {
- return Fchownat(_AT_FDCWD, path, uid, gid, 0)
-}
-
-func Creat(path string, mode uint32) (fd int, err error) {
- return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
-}
-
-func isGroupMember(gid int) bool {
- groups, err := Getgroups()
- if err != nil {
- return false
- }
-
- for _, g := range groups {
- if g == gid {
- return true
- }
- }
- return false
-}
-
-//sys faccessat(dirfd int, path string, mode uint32) (err error)
-
-func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
- if flags & ^(_AT_SYMLINK_NOFOLLOW|_AT_EACCESS) != 0 {
- return EINVAL
- }
-
- // The Linux kernel faccessat system call does not take any flags.
- // The glibc faccessat implements the flags itself; see
- // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/faccessat.c;hb=HEAD
- // Because people naturally expect syscall.Faccessat to act
- // like C faccessat, we do the same.
-
- if flags == 0 {
- return faccessat(dirfd, path, mode)
- }
-
- var st Stat_t
- if err := fstatat(dirfd, path, &st, flags&_AT_SYMLINK_NOFOLLOW); err != nil {
- return err
- }
-
- mode &= 7
- if mode == 0 {
- return nil
- }
-
- var uid int
- if flags&_AT_EACCESS != 0 {
- uid = Geteuid()
- } else {
- uid = Getuid()
- }
-
- if uid == 0 {
- if mode&1 == 0 {
- // Root can read and write any file.
- return nil
- }
- if st.Mode&0111 != 0 {
- // Root can execute any file that anybody can execute.
- return nil
- }
- return EACCES
- }
-
- var fmode uint32
- if uint32(uid) == st.Uid {
- fmode = (st.Mode >> 6) & 7
- } else {
- var gid int
- if flags&_AT_EACCESS != 0 {
- gid = Getegid()
- } else {
- gid = Getgid()
- }
-
- if uint32(gid) == st.Gid || isGroupMember(int(st.Gid)) {
- fmode = (st.Mode >> 3) & 7
- } else {
- fmode = st.Mode & 7
- }
- }
-
- if fmode&mode == mode {
- return nil
- }
-
- return EACCES
-}
-
-//sys fchmodat(dirfd int, path string, mode uint32) (err error)
-
-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
- // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior
- // and check the flags. Otherwise the mode would be applied to the symlink
- // destination which is not what the user expects.
- if flags&^_AT_SYMLINK_NOFOLLOW != 0 {
- return EINVAL
- } else if flags&_AT_SYMLINK_NOFOLLOW != 0 {
- return EOPNOTSUPP
- }
- return fchmodat(dirfd, path, mode)
-}
-
-//sys linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
-
-func Link(oldpath string, newpath string) (err error) {
- return linkat(_AT_FDCWD, oldpath, _AT_FDCWD, newpath, 0)
-}
-
-func Mkdir(path string, mode uint32) (err error) {
- return Mkdirat(_AT_FDCWD, path, mode)
-}
-
-func Mknod(path string, mode uint32, dev int) (err error) {
- return Mknodat(_AT_FDCWD, path, mode, dev)
-}
-
-func Open(path string, mode int, perm uint32) (fd int, err error) {
- return openat(_AT_FDCWD, path, mode|O_LARGEFILE, perm)
-}
-
-//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
-
-func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
- return openat(dirfd, path, flags|O_LARGEFILE, mode)
-}
-
-func Pipe(p []int) error {
- return Pipe2(p, 0)
-}
-
-//sysnb pipe2(p *[2]_C_int, flags int) (err error)
-
-func Pipe2(p []int, flags int) error {
- if len(p) != 2 {
- return EINVAL
- }
- var pp [2]_C_int
- err := pipe2(&pp, flags)
- if err == nil {
- p[0] = int(pp[0])
- p[1] = int(pp[1])
- }
- return err
-}
-
-//sys readlinkat(dirfd int, path string, buf []byte) (n int, err error)
-
-func Readlink(path string, buf []byte) (n int, err error) {
- return readlinkat(_AT_FDCWD, path, buf)
-}
-
-func Rename(oldpath string, newpath string) (err error) {
- return Renameat(_AT_FDCWD, oldpath, _AT_FDCWD, newpath)
-}
-
-func Rmdir(path string) error {
- return unlinkat(_AT_FDCWD, path, _AT_REMOVEDIR)
-}
-
-//sys symlinkat(oldpath string, newdirfd int, newpath string) (err error)
-
-func Symlink(oldpath string, newpath string) (err error) {
- return symlinkat(oldpath, _AT_FDCWD, newpath)
-}
-
-func Unlink(path string) error {
- return unlinkat(_AT_FDCWD, path, 0)
-}
-
-//sys unlinkat(dirfd int, path string, flags int) (err error)
-
-func Unlinkat(dirfd int, path string) error {
- return unlinkat(dirfd, path, 0)
-}
-
-func Utimes(path string, tv []Timeval) (err error) {
- if len(tv) != 2 {
- return EINVAL
- }
- return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
-}
-
-//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
-
-func UtimesNano(path string, ts []Timespec) (err error) {
- if len(ts) != 2 {
- return EINVAL
- }
- return utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
-}
-
-func Futimesat(dirfd int, path string, tv []Timeval) (err error) {
- if len(tv) != 2 {
- return EINVAL
- }
- return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
-}
-
-func Futimes(fd int, tv []Timeval) (err error) {
- // Believe it or not, this is the best we can do on Linux
- // (and is what glibc does).
- return Utimes("/proc/self/fd/"+itoa.Itoa(fd), tv)
-}
-
-const ImplementsGetwd = true
-
-//sys Getcwd(buf []byte) (n int, err error)
-
-func Getwd() (wd string, err error) {
- var buf [PathMax]byte
- n, err := Getcwd(buf[0:])
- if err != nil {
- return "", err
- }
- // Getcwd returns the number of bytes written to buf, including the NUL.
- if n < 1 || n > len(buf) || buf[n-1] != 0 {
- return "", EINVAL
- }
- return string(buf[0 : n-1]), nil
-}
-
-func Getgroups() (gids []int, err error) {
- n, err := getgroups(0, nil)
- if err != nil {
- return nil, err
- }
- if n == 0 {
- return nil, nil
- }
-
- // Sanity check group count. Max is 1<<16 on Linux.
- if n < 0 || n > 1<<20 {
- return nil, EINVAL
- }
-
- a := make([]_Gid_t, n)
- n, err = getgroups(n, &a[0])
- if err != nil {
- return nil, err
- }
- gids = make([]int, n)
- for i, v := range a[0:n] {
- gids[i] = int(v)
- }
- return
-}
-
-var cgo_libc_setgroups unsafe.Pointer // non-nil if cgo linked.
-
-func Setgroups(gids []int) (err error) {
- n := uintptr(len(gids))
- if n == 0 {
- if cgo_libc_setgroups == nil {
- if _, _, e1 := AllThreadsSyscall(_SYS_setgroups, 0, 0, 0); e1 != 0 {
- err = errnoErr(e1)
- }
- return
- }
- if ret := cgocaller(cgo_libc_setgroups, 0, 0); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
- }
-
- a := make([]_Gid_t, len(gids))
- for i, v := range gids {
- a[i] = _Gid_t(v)
- }
- if cgo_libc_setgroups == nil {
- if _, _, e1 := AllThreadsSyscall(_SYS_setgroups, n, uintptr(unsafe.Pointer(&a[0])), 0); e1 != 0 {
- err = errnoErr(e1)
- }
- return
- }
- if ret := cgocaller(cgo_libc_setgroups, n, uintptr(unsafe.Pointer(&a[0]))); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-type WaitStatus uint32
-
-// Wait status is 7 bits at bottom, either 0 (exited),
-// 0x7F (stopped), or a signal number that caused an exit.
-// The 0x80 bit is whether there was a core dump.
-// An extra number (exit code, signal causing a stop)
-// is in the high bits. At least that's the idea.
-// There are various irregularities. For example, the
-// "continued" status is 0xFFFF, distinguishing itself
-// from stopped via the core dump bit.
-
-const (
- mask = 0x7F
- core = 0x80
- exited = 0x00
- stopped = 0x7F
- shift = 8
-)
-
-func (w WaitStatus) Exited() bool { return w&mask == exited }
-
-func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
-
-func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
-
-func (w WaitStatus) Continued() bool { return w == 0xFFFF }
-
-func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
-
-func (w WaitStatus) ExitStatus() int {
- if !w.Exited() {
- return -1
- }
- return int(w>>shift) & 0xFF
-}
-
-func (w WaitStatus) Signal() Signal {
- if !w.Signaled() {
- return -1
- }
- return Signal(w & mask)
-}
-
-func (w WaitStatus) StopSignal() Signal {
- if !w.Stopped() {
- return -1
- }
- return Signal(w>>shift) & 0xFF
-}
-
-func (w WaitStatus) TrapCause() int {
- if w.StopSignal() != SIGTRAP {
- return -1
- }
- return int(w>>shift) >> 8
-}
-
-//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
-
-func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
- var status _C_int
- wpid, err = wait4(pid, &status, options, rusage)
- if wstatus != nil {
- *wstatus = WaitStatus(status)
- }
- return
-}
-
-func Mkfifo(path string, mode uint32) (err error) {
- return Mknod(path, mode|S_IFIFO, 0)
-}
-
-func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
- if sa.Port < 0 || sa.Port > 0xFFFF {
- return nil, 0, EINVAL
- }
- sa.raw.Family = AF_INET
- p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
- p[0] = byte(sa.Port >> 8)
- p[1] = byte(sa.Port)
- sa.raw.Addr = sa.Addr
- return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
-}
-
-func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
- if sa.Port < 0 || sa.Port > 0xFFFF {
- return nil, 0, EINVAL
- }
- sa.raw.Family = AF_INET6
- p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
- p[0] = byte(sa.Port >> 8)
- p[1] = byte(sa.Port)
- sa.raw.Scope_id = sa.ZoneId
- sa.raw.Addr = sa.Addr
- return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
-}
-
-func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
- name := sa.Name
- n := len(name)
- if n > len(sa.raw.Path) {
- return nil, 0, EINVAL
- }
- if n == len(sa.raw.Path) && name[0] != '@' {
- return nil, 0, EINVAL
- }
- sa.raw.Family = AF_UNIX
- for i := 0; i < n; i++ {
- sa.raw.Path[i] = int8(name[i])
- }
- // length is family (uint16), name, NUL.
- sl := _Socklen(2)
- if n > 0 {
- sl += _Socklen(n) + 1
- }
- if sa.raw.Path[0] == '@' {
- sa.raw.Path[0] = 0
- // Don't count trailing NUL for abstract address.
- sl--
- }
-
- return unsafe.Pointer(&sa.raw), sl, nil
-}
-
-type SockaddrLinklayer struct {
- Protocol uint16
- Ifindex int
- Hatype uint16
- Pkttype uint8
- Halen uint8
- Addr [8]byte
- raw RawSockaddrLinklayer
-}
-
-func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
- if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
- return nil, 0, EINVAL
- }
- sa.raw.Family = AF_PACKET
- sa.raw.Protocol = sa.Protocol
- sa.raw.Ifindex = int32(sa.Ifindex)
- sa.raw.Hatype = sa.Hatype
- sa.raw.Pkttype = sa.Pkttype
- sa.raw.Halen = sa.Halen
- sa.raw.Addr = sa.Addr
- return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
-}
-
-type SockaddrNetlink struct {
- Family uint16
- Pad uint16
- Pid uint32
- Groups uint32
- raw RawSockaddrNetlink
-}
-
-func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
- sa.raw.Family = AF_NETLINK
- sa.raw.Pad = sa.Pad
- sa.raw.Pid = sa.Pid
- sa.raw.Groups = sa.Groups
- return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
-}
-
-func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
- switch rsa.Addr.Family {
- case AF_NETLINK:
- pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
- sa := new(SockaddrNetlink)
- sa.Family = pp.Family
- sa.Pad = pp.Pad
- sa.Pid = pp.Pid
- sa.Groups = pp.Groups
- return sa, nil
-
- case AF_PACKET:
- pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
- sa := new(SockaddrLinklayer)
- sa.Protocol = pp.Protocol
- sa.Ifindex = int(pp.Ifindex)
- sa.Hatype = pp.Hatype
- sa.Pkttype = pp.Pkttype
- sa.Halen = pp.Halen
- sa.Addr = pp.Addr
- return sa, nil
-
- case AF_UNIX:
- pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
- sa := new(SockaddrUnix)
- if pp.Path[0] == 0 {
- // "Abstract" Unix domain socket.
- // Rewrite leading NUL as @ for textual display.
- // (This is the standard convention.)
- // Not friendly to overwrite in place,
- // but the callers below don't care.
- pp.Path[0] = '@'
- }
-
- // Assume path ends at NUL.
- // This is not technically the Linux semantics for
- // abstract Unix domain sockets--they are supposed
- // to be uninterpreted fixed-size binary blobs--but
- // everyone uses this convention.
- n := 0
- for n < len(pp.Path) && pp.Path[n] != 0 {
- n++
- }
- bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
- sa.Name = string(bytes)
- return sa, nil
-
- case AF_INET:
- pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
- sa := new(SockaddrInet4)
- p := (*[2]byte)(unsafe.Pointer(&pp.Port))
- sa.Port = int(p[0])<<8 + int(p[1])
- sa.Addr = pp.Addr
- return sa, nil
-
- case AF_INET6:
- pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
- sa := new(SockaddrInet6)
- p := (*[2]byte)(unsafe.Pointer(&pp.Port))
- sa.Port = int(p[0])<<8 + int(p[1])
- sa.ZoneId = pp.Scope_id
- sa.Addr = pp.Addr
- return sa, nil
- }
- return nil, EAFNOSUPPORT
-}
-
-func Accept(fd int) (nfd int, sa Sockaddr, err error) {
- var rsa RawSockaddrAny
- var len _Socklen = SizeofSockaddrAny
- nfd, err = accept4(fd, &rsa, &len, 0)
- if err != nil {
- return
- }
- sa, err = anyToSockaddr(&rsa)
- if err != nil {
- Close(nfd)
- nfd = 0
- }
- return
-}
-
-func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
- var rsa RawSockaddrAny
- var len _Socklen = SizeofSockaddrAny
- nfd, err = accept4(fd, &rsa, &len, flags)
- if err != nil {
- return
- }
- if len > SizeofSockaddrAny {
- panic("RawSockaddrAny too small")
- }
- sa, err = anyToSockaddr(&rsa)
- if err != nil {
- Close(nfd)
- nfd = 0
- }
- return
-}
-
-func Getsockname(fd int) (sa Sockaddr, err error) {
- var rsa RawSockaddrAny
- var len _Socklen = SizeofSockaddrAny
- if err = getsockname(fd, &rsa, &len); err != nil {
- return
- }
- return anyToSockaddr(&rsa)
-}
-
-func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
- vallen := _Socklen(4)
- err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
- return value, err
-}
-
-func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
- var value IPMreq
- vallen := _Socklen(SizeofIPMreq)
- err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
- return &value, err
-}
-
-func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
- var value IPMreqn
- vallen := _Socklen(SizeofIPMreqn)
- err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
- return &value, err
-}
-
-func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
- var value IPv6Mreq
- vallen := _Socklen(SizeofIPv6Mreq)
- err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
- return &value, err
-}
-
-func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
- var value IPv6MTUInfo
- vallen := _Socklen(SizeofIPv6MTUInfo)
- err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
- return &value, err
-}
-
-func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
- var value ICMPv6Filter
- vallen := _Socklen(SizeofICMPv6Filter)
- err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
- return &value, err
-}
-
-func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
- var value Ucred
- vallen := _Socklen(SizeofUcred)
- err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
- return &value, err
-}
-
-func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
-}
-
-func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
- var msg Msghdr
- msg.Name = (*byte)(unsafe.Pointer(rsa))
- msg.Namelen = uint32(SizeofSockaddrAny)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = &p[0]
- iov.SetLen(len(p))
- }
- var dummy byte
- if len(oob) > 0 {
- if len(p) == 0 {
- var sockType int
- sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
- if err != nil {
- return
- }
- // receive at least one normal byte
- if sockType != SOCK_DGRAM {
- iov.Base = &dummy
- iov.SetLen(1)
- }
- }
- msg.Control = &oob[0]
- msg.SetControllen(len(oob))
- }
- msg.Iov = &iov
- msg.Iovlen = 1
- if n, err = recvmsg(fd, &msg, flags); err != nil {
- return
- }
- oobn = int(msg.Controllen)
- recvflags = int(msg.Flags)
- return
-}
-
-func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
- var msg Msghdr
- msg.Name = (*byte)(ptr)
- msg.Namelen = uint32(salen)
- var iov Iovec
- if len(p) > 0 {
- iov.Base = &p[0]
- iov.SetLen(len(p))
- }
- var dummy byte
- if len(oob) > 0 {
- if len(p) == 0 {
- var sockType int
- sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
- if err != nil {
- return 0, err
- }
- // send at least one normal byte
- if sockType != SOCK_DGRAM {
- iov.Base = &dummy
- iov.SetLen(1)
- }
- }
- msg.Control = &oob[0]
- msg.SetControllen(len(oob))
- }
- msg.Iov = &iov
- msg.Iovlen = 1
- if n, err = sendmsg(fd, &msg, flags); err != nil {
- return 0, err
- }
- if len(oob) > 0 && len(p) == 0 {
- n = 0
- }
- return n, nil
-}
-
-// BindToDevice binds the socket associated with fd to device.
-func BindToDevice(fd int, device string) (err error) {
- return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
-}
-
-//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
-
-func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
- // The peek requests are machine-size oriented, so we wrap it
- // to retrieve arbitrary-length data.
-
- // The ptrace syscall differs from glibc's ptrace.
- // Peeks returns the word in *data, not as the return value.
-
- var buf [sizeofPtr]byte
-
- // Leading edge. PEEKTEXT/PEEKDATA don't require aligned
- // access (PEEKUSER warns that it might), but if we don't
- // align our reads, we might straddle an unmapped page
- // boundary and not get the bytes leading up to the page
- // boundary.
- n := 0
- if addr%sizeofPtr != 0 {
- err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
- if err != nil {
- return 0, err
- }
- n += copy(out, buf[addr%sizeofPtr:])
- out = out[n:]
- }
-
- // Remainder.
- for len(out) > 0 {
- // We use an internal buffer to guarantee alignment.
- // It's not documented if this is necessary, but we're paranoid.
- err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
- if err != nil {
- return n, err
- }
- copied := copy(out, buf[0:])
- n += copied
- out = out[copied:]
- }
-
- return n, nil
-}
-
-func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
- return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
-}
-
-func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
- return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
-}
-
-func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
- // As for ptracePeek, we need to align our accesses to deal
- // with the possibility of straddling an invalid page.
-
- // Leading edge.
- n := 0
- if addr%sizeofPtr != 0 {
- var buf [sizeofPtr]byte
- err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
- if err != nil {
- return 0, err
- }
- n += copy(buf[addr%sizeofPtr:], data)
- word := *((*uintptr)(unsafe.Pointer(&buf[0])))
- err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word)
- if err != nil {
- return 0, err
- }
- data = data[n:]
- }
-
- // Interior.
- for len(data) > sizeofPtr {
- word := *((*uintptr)(unsafe.Pointer(&data[0])))
- err = ptrace(pokeReq, pid, addr+uintptr(n), word)
- if err != nil {
- return n, err
- }
- n += sizeofPtr
- data = data[sizeofPtr:]
- }
-
- // Trailing edge.
- if len(data) > 0 {
- var buf [sizeofPtr]byte
- err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
- if err != nil {
- return n, err
- }
- copy(buf[0:], data)
- word := *((*uintptr)(unsafe.Pointer(&buf[0])))
- err = ptrace(pokeReq, pid, addr+uintptr(n), word)
- if err != nil {
- return n, err
- }
- n += len(data)
- }
-
- return n, nil
-}
-
-func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
- return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
-}
-
-func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
- return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
-}
-
-func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
- return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
-}
-
-func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
- return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
-}
-
-func PtraceSetOptions(pid int, options int) (err error) {
- return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
-}
-
-func PtraceGetEventMsg(pid int) (msg uint, err error) {
- var data _C_long
- err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
- msg = uint(data)
- return
-}
-
-func PtraceCont(pid int, signal int) (err error) {
- return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
-}
-
-func PtraceSyscall(pid int, signal int) (err error) {
- return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
-}
-
-func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
-
-func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
-
-func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
-
-//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
-
-func Reboot(cmd int) (err error) {
- return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
-}
-
-func ReadDirent(fd int, buf []byte) (n int, err error) {
- return Getdents(fd, buf)
-}
-
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- reclen, ok := direntReclen(buf)
- if !ok {
- return 0, false
- }
- return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
-}
-
-//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
-
-func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
- // Certain file systems get rather angry and EINVAL if you give
- // them an empty string of data, rather than NULL.
- if data == "" {
- return mount(source, target, fstype, flags, nil)
- }
- datap, err := BytePtrFromString(data)
- if err != nil {
- return err
- }
- return mount(source, target, fstype, flags, datap)
-}
-
-// Sendto
-// Recvfrom
-// Socketpair
-
-/*
- * Direct access
- */
-//sys Acct(path string) (err error)
-//sys Adjtimex(buf *Timex) (state int, err error)
-//sys Chdir(path string) (err error)
-//sys Chroot(path string) (err error)
-//sys Close(fd int) (err error)
-//sys Dup(oldfd int) (fd int, err error)
-//sys Dup3(oldfd int, newfd int, flags int) (err error)
-//sysnb EpollCreate1(flag int) (fd int, err error)
-//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
-//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
-//sys Fchdir(fd int) (err error)
-//sys Fchmod(fd int, mode uint32) (err error)
-//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
-//sys fcntl(fd int, cmd int, arg int) (val int, err error)
-//sys Fdatasync(fd int) (err error)
-//sys Flock(fd int, how int) (err error)
-//sys Fsync(fd int) (err error)
-//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
-//sysnb Getpgid(pid int) (pgid int, err error)
-
-func Getpgrp() (pid int) {
- pid, _ = Getpgid(0)
- return
-}
-
-//sysnb Getpid() (pid int)
-//sysnb Getppid() (ppid int)
-//sys Getpriority(which int, who int) (prio int, err error)
-//sysnb Getrusage(who int, rusage *Rusage) (err error)
-//sysnb Gettid() (tid int)
-//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
-//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
-//sysnb InotifyInit1(flags int) (fd int, err error)
-//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
-//sysnb Kill(pid int, sig Signal) (err error)
-//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
-//sys Listxattr(path string, dest []byte) (sz int, err error)
-//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
-//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
-//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
-//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
-//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
-//sys read(fd int, p []byte) (n int, err error)
-//sys Removexattr(path string, attr string) (err error)
-//sys Setdomainname(p []byte) (err error)
-//sys Sethostname(p []byte) (err error)
-//sysnb Setpgid(pid int, pgid int) (err error)
-//sysnb Setsid() (pid int, err error)
-//sysnb Settimeofday(tv *Timeval) (err error)
-
-// Provided by runtime.syscall_runtime_doAllThreadsSyscall which stops the
-// world and invokes the syscall on each OS thread. Once this function returns,
-// all threads are in sync.
-//go:uintptrescapes
-func runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
-
-// AllThreadsSyscall performs a syscall on each OS thread of the Go
-// runtime. It first invokes the syscall on one thread. Should that
-// invocation fail, it returns immediately with the error status.
-// Otherwise, it invokes the syscall on all of the remaining threads
-// in parallel. It will terminate the program if it observes any
-// invoked syscall's return value differs from that of the first
-// invocation.
-//
-// AllThreadsSyscall is intended for emulating simultaneous
-// process-wide state changes that require consistently modifying
-// per-thread state of the Go runtime.
-//
-// AllThreadsSyscall is unaware of any threads that are launched
-// explicitly by cgo linked code, so the function always returns
-// ENOTSUP in binaries that use cgo.
-//go:uintptrescapes
-func AllThreadsSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
- if cgo_libc_setegid != nil {
- return minus1, minus1, ENOTSUP
- }
- r1, r2, errno := runtime_doAllThreadsSyscall(trap, a1, a2, a3, 0, 0, 0)
- return r1, r2, Errno(errno)
-}
-
-// AllThreadsSyscall6 is like AllThreadsSyscall, but extended to six
-// arguments.
-//go:uintptrescapes
-func AllThreadsSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) {
- if cgo_libc_setegid != nil {
- return minus1, minus1, ENOTSUP
- }
- r1, r2, errno := runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6)
- return r1, r2, Errno(errno)
-}
-
-// linked by runtime.cgocall.go
-//go:uintptrescapes
-func cgocaller(unsafe.Pointer, ...uintptr) uintptr
-
-var cgo_libc_setegid unsafe.Pointer // non-nil if cgo linked.
-
-const minus1 = ^uintptr(0)
-
-func Setegid(egid int) (err error) {
- if cgo_libc_setegid == nil {
- if _, _, e1 := AllThreadsSyscall(SYS_SETRESGID, minus1, uintptr(egid), minus1); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setegid, uintptr(egid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_seteuid unsafe.Pointer // non-nil if cgo linked.
-
-func Seteuid(euid int) (err error) {
- if cgo_libc_seteuid == nil {
- if _, _, e1 := AllThreadsSyscall(SYS_SETRESUID, minus1, uintptr(euid), minus1); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_seteuid, uintptr(euid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_setgid unsafe.Pointer // non-nil if cgo linked.
-
-func Setgid(gid int) (err error) {
- if cgo_libc_setgid == nil {
- if _, _, e1 := AllThreadsSyscall(sys_SETGID, uintptr(gid), 0, 0); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setgid, uintptr(gid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_setregid unsafe.Pointer // non-nil if cgo linked.
-
-func Setregid(rgid, egid int) (err error) {
- if cgo_libc_setregid == nil {
- if _, _, e1 := AllThreadsSyscall(sys_SETREGID, uintptr(rgid), uintptr(egid), 0); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setregid, uintptr(rgid), uintptr(egid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_setresgid unsafe.Pointer // non-nil if cgo linked.
-
-func Setresgid(rgid, egid, sgid int) (err error) {
- if cgo_libc_setresgid == nil {
- if _, _, e1 := AllThreadsSyscall(sys_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setresgid, uintptr(rgid), uintptr(egid), uintptr(sgid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_setresuid unsafe.Pointer // non-nil if cgo linked.
-
-func Setresuid(ruid, euid, suid int) (err error) {
- if cgo_libc_setresuid == nil {
- if _, _, e1 := AllThreadsSyscall(sys_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setresuid, uintptr(ruid), uintptr(euid), uintptr(suid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_setreuid unsafe.Pointer // non-nil if cgo linked.
-
-func Setreuid(ruid, euid int) (err error) {
- if cgo_libc_setreuid == nil {
- if _, _, e1 := AllThreadsSyscall(sys_SETREUID, uintptr(ruid), uintptr(euid), 0); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setreuid, uintptr(ruid), uintptr(euid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-var cgo_libc_setuid unsafe.Pointer // non-nil if cgo linked.
-
-func Setuid(uid int) (err error) {
- if cgo_libc_setuid == nil {
- if _, _, e1 := AllThreadsSyscall(sys_SETUID, uintptr(uid), 0, 0); e1 != 0 {
- err = errnoErr(e1)
- }
- } else if ret := cgocaller(cgo_libc_setuid, uintptr(uid)); ret != 0 {
- err = errnoErr(Errno(ret))
- }
- return
-}
-
-//sys Setpriority(which int, who int, prio int) (err error)
-//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
-//sys Sync()
-//sysnb Sysinfo(info *Sysinfo_t) (err error)
-//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
-//sysnb Tgkill(tgid int, tid int, sig Signal) (err error)
-//sysnb Times(tms *Tms) (ticks uintptr, err error)
-//sysnb Umask(mask int) (oldmask int)
-//sysnb Uname(buf *Utsname) (err error)
-//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
-//sys Unshare(flags int) (err error)
-//sys write(fd int, p []byte) (n int, err error)
-//sys exitThread(code int) (err error) = SYS_EXIT
-//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
-//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
-
-// mmap varies by architecture; see syscall_linux_*.go.
-//sys munmap(addr uintptr, length uintptr) (err error)
-
-var mapper = &mmapper{
- active: make(map[*byte][]byte),
- mmap: mmap,
- munmap: munmap,
-}
-
-func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
- return mapper.Mmap(fd, offset, length, prot, flags)
-}
-
-func Munmap(b []byte) (err error) {
- return mapper.Munmap(b)
-}
-
-//sys Madvise(b []byte, advice int) (err error)
-//sys Mprotect(b []byte, prot int) (err error)
-//sys Mlock(b []byte) (err error)
-//sys Munlock(b []byte) (err error)
-//sys Mlockall(flags int) (err error)
-//sys Munlockall() (err error)
diff --git a/contrib/go/_std_1.18/src/syscall/syscall_linux_amd64.go b/contrib/go/_std_1.18/src/syscall/syscall_linux_amd64.go
deleted file mode 100644
index 26b40ffe9b..0000000000
--- a/contrib/go/_std_1.18/src/syscall/syscall_linux_amd64.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syscall
-
-const _SYS_setgroups = SYS_SETGROUPS
-
-//sys Dup2(oldfd int, newfd int) (err error)
-//sysnb EpollCreate(size int) (fd int, err error)
-//sys Fchown(fd int, uid int, gid int) (err error)
-//sys Fstat(fd int, stat *Stat_t) (err error)
-//sys Fstatfs(fd int, buf *Statfs_t) (err error)
-//sys Ftruncate(fd int, length int64) (err error)
-//sysnb Getegid() (egid int)
-//sysnb Geteuid() (euid int)
-//sysnb Getgid() (gid int)
-//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Getuid() (uid int)
-//sysnb InotifyInit() (fd int, err error)
-//sys Ioperm(from int, num int, on int) (err error)
-//sys Iopl(level int) (err error)
-//sys Listen(s int, n int) (err error)
-//sys Pause() (err error)
-//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
-//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
-//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
-//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
-//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
-//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
-//sys Setfsgid(gid int) (err error)
-//sys Setfsuid(uid int) (err error)
-//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sys Shutdown(fd int, how int) (err error)
-//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
-//sys Statfs(path string, buf *Statfs_t) (err error)
-//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
-//sys Truncate(path string, length int64) (err error)
-//sys Ustat(dev int, ubuf *Ustat_t) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
-//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
-//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
-//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
-//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
-//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
-//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
-//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
-//sysnb socket(domain int, typ int, proto int) (fd int, err error)
-//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
-//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
-//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
-//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
-//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
-//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
-//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
-//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
-//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
-
-func Stat(path string, stat *Stat_t) (err error) {
- return fstatat(_AT_FDCWD, path, stat, 0)
-}
-
-func Lchown(path string, uid int, gid int) (err error) {
- return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW)
-}
-
-func Lstat(path string, stat *Stat_t) (err error) {
- return fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW)
-}
-
-//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error)
-
-//go:noescape
-func gettimeofday(tv *Timeval) (err Errno)
-
-func Gettimeofday(tv *Timeval) (err error) {
- errno := gettimeofday(tv)
- if errno != 0 {
- return errno
- }
- return nil
-}
-
-func Time(t *Time_t) (tt Time_t, err error) {
- var tv Timeval
- errno := gettimeofday(&tv)
- if errno != 0 {
- return 0, errno
- }
- if t != nil {
- *t = Time_t(tv.Sec)
- }
- return Time_t(tv.Sec), nil
-}
-
-//sys Utime(path string, buf *Utimbuf) (err error)
-//sys utimes(path string, times *[2]Timeval) (err error)
-
-func setTimespec(sec, nsec int64) Timespec {
- return Timespec{Sec: sec, Nsec: nsec}
-}
-
-func setTimeval(sec, usec int64) Timeval {
- return Timeval{Sec: sec, Usec: usec}
-}
-
-func (r *PtraceRegs) PC() uint64 { return r.Rip }
-
-func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc }
-
-func (iov *Iovec) SetLen(length int) {
- iov.Len = uint64(length)
-}
-
-func (msghdr *Msghdr) SetControllen(length int) {
- msghdr.Controllen = uint64(length)
-}
-
-func (cmsg *Cmsghdr) SetLen(length int) {
- cmsg.Len = uint64(length)
-}
-
-func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno)
diff --git a/contrib/go/_std_1.18/src/syscall/syscall_unix.go b/contrib/go/_std_1.18/src/syscall/syscall_unix.go
deleted file mode 100644
index 5ee938115d..0000000000
--- a/contrib/go/_std_1.18/src/syscall/syscall_unix.go
+++ /dev/null
@@ -1,487 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-
-package syscall
-
-import (
- "internal/itoa"
- "internal/oserror"
- "internal/race"
- "internal/unsafeheader"
- "runtime"
- "sync"
- "unsafe"
-)
-
-var (
- Stdin = 0
- Stdout = 1
- Stderr = 2
-)
-
-const (
- darwin64Bit = (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && sizeofPtr == 8
- netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4
-)
-
-func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
-func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
-func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-
-// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte.
-func clen(n []byte) int {
- for i := 0; i < len(n); i++ {
- if n[i] == 0 {
- return i
- }
- }
- return len(n)
-}
-
-// Mmap manager, for use by operating system-specific implementations.
-
-type mmapper struct {
- sync.Mutex
- active map[*byte][]byte // active mappings; key is last byte in mapping
- mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error)
- munmap func(addr uintptr, length uintptr) error
-}
-
-func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
- if length <= 0 {
- return nil, EINVAL
- }
-
- // Map the requested memory.
- addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset)
- if errno != nil {
- return nil, errno
- }
-
- // Use unsafe to turn addr into a []byte.
- var b []byte
- hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
- hdr.Data = unsafe.Pointer(addr)
- hdr.Cap = length
- hdr.Len = length
-
- // Register mapping in m and return it.
- p := &b[cap(b)-1]
- m.Lock()
- defer m.Unlock()
- m.active[p] = b
- return b, nil
-}
-
-func (m *mmapper) Munmap(data []byte) (err error) {
- if len(data) == 0 || len(data) != cap(data) {
- return EINVAL
- }
-
- // Find the base of the mapping.
- p := &data[cap(data)-1]
- m.Lock()
- defer m.Unlock()
- b := m.active[p]
- if b == nil || &b[0] != &data[0] {
- return EINVAL
- }
-
- // Unmap the memory and update m.
- if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil {
- return errno
- }
- delete(m.active, p)
- return nil
-}
-
-// An Errno is an unsigned number describing an error condition.
-// It implements the error interface. The zero Errno is by convention
-// a non-error, so code to convert from Errno to error should use:
-// err = nil
-// if errno != 0 {
-// err = errno
-// }
-//
-// Errno values can be tested against error values from the os package
-// using errors.Is. For example:
-//
-// _, _, err := syscall.Syscall(...)
-// if errors.Is(err, fs.ErrNotExist) ...
-type Errno uintptr
-
-func (e Errno) Error() string {
- if 0 <= int(e) && int(e) < len(errors) {
- s := errors[e]
- if s != "" {
- return s
- }
- }
- return "errno " + itoa.Itoa(int(e))
-}
-
-func (e Errno) Is(target error) bool {
- switch target {
- case oserror.ErrPermission:
- return e == EACCES || e == EPERM
- case oserror.ErrExist:
- return e == EEXIST || e == ENOTEMPTY
- case oserror.ErrNotExist:
- return e == ENOENT
- }
- return false
-}
-
-func (e Errno) Temporary() bool {
- return e == EINTR || e == EMFILE || e == ENFILE || e.Timeout()
-}
-
-func (e Errno) Timeout() bool {
- return e == EAGAIN || e == EWOULDBLOCK || e == ETIMEDOUT
-}
-
-// Do the interface allocations only once for common
-// Errno values.
-var (
- errEAGAIN error = EAGAIN
- errEINVAL error = EINVAL
- errENOENT error = ENOENT
-)
-
-// errnoErr returns common boxed Errno values, to prevent
-// allocations at runtime.
-func errnoErr(e Errno) error {
- switch e {
- case 0:
- return nil
- case EAGAIN:
- return errEAGAIN
- case EINVAL:
- return errEINVAL
- case ENOENT:
- return errENOENT
- }
- return e
-}
-
-// A Signal is a number describing a process signal.
-// It implements the os.Signal interface.
-type Signal int
-
-func (s Signal) Signal() {}
-
-func (s Signal) String() string {
- if 0 <= s && int(s) < len(signals) {
- str := signals[s]
- if str != "" {
- return str
- }
- }
- return "signal " + itoa.Itoa(int(s))
-}
-
-func Read(fd int, p []byte) (n int, err error) {
- n, err = read(fd, p)
- if race.Enabled {
- if n > 0 {
- race.WriteRange(unsafe.Pointer(&p[0]), n)
- }
- if err == nil {
- race.Acquire(unsafe.Pointer(&ioSync))
- }
- }
- if msanenabled && n > 0 {
- msanWrite(unsafe.Pointer(&p[0]), n)
- }
- if asanenabled && n > 0 {
- asanWrite(unsafe.Pointer(&p[0]), n)
- }
- return
-}
-
-func Write(fd int, p []byte) (n int, err error) {
- if race.Enabled {
- race.ReleaseMerge(unsafe.Pointer(&ioSync))
- }
- if faketime && (fd == 1 || fd == 2) {
- n = faketimeWrite(fd, p)
- if n < 0 {
- n, err = 0, errnoErr(Errno(-n))
- }
- } else {
- n, err = write(fd, p)
- }
- if race.Enabled && n > 0 {
- race.ReadRange(unsafe.Pointer(&p[0]), n)
- }
- if msanenabled && n > 0 {
- msanRead(unsafe.Pointer(&p[0]), n)
- }
- if asanenabled && n > 0 {
- asanRead(unsafe.Pointer(&p[0]), n)
- }
- return
-}
-
-// For testing: clients can set this flag to force
-// creation of IPv6 sockets to return EAFNOSUPPORT.
-var SocketDisableIPv6 bool
-
-type Sockaddr interface {
- sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs
-}
-
-type SockaddrInet4 struct {
- Port int
- Addr [4]byte
- raw RawSockaddrInet4
-}
-
-type SockaddrInet6 struct {
- Port int
- ZoneId uint32
- Addr [16]byte
- raw RawSockaddrInet6
-}
-
-type SockaddrUnix struct {
- Name string
- raw RawSockaddrUnix
-}
-
-func Bind(fd int, sa Sockaddr) (err error) {
- ptr, n, err := sa.sockaddr()
- if err != nil {
- return err
- }
- return bind(fd, ptr, n)
-}
-
-func Connect(fd int, sa Sockaddr) (err error) {
- ptr, n, err := sa.sockaddr()
- if err != nil {
- return err
- }
- return connect(fd, ptr, n)
-}
-
-func Getpeername(fd int) (sa Sockaddr, err error) {
- var rsa RawSockaddrAny
- var len _Socklen = SizeofSockaddrAny
- if err = getpeername(fd, &rsa, &len); err != nil {
- return
- }
- return anyToSockaddr(&rsa)
-}
-
-func GetsockoptInt(fd, level, opt int) (value int, err error) {
- var n int32
- vallen := _Socklen(4)
- err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
- return int(n), err
-}
-
-func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
- var rsa RawSockaddrAny
- var len _Socklen = SizeofSockaddrAny
- if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil {
- return
- }
- if rsa.Addr.Family != AF_UNSPEC {
- from, err = anyToSockaddr(&rsa)
- }
- return
-}
-
-func recvfromInet4(fd int, p []byte, flags int, from *SockaddrInet4) (n int, err error) {
- var rsa RawSockaddrAny
- var socklen _Socklen = SizeofSockaddrAny
- if n, err = recvfrom(fd, p, flags, &rsa, &socklen); err != nil {
- return
- }
- pp := (*RawSockaddrInet4)(unsafe.Pointer(&rsa))
- port := (*[2]byte)(unsafe.Pointer(&pp.Port))
- from.Port = int(port[0])<<8 + int(port[1])
- from.Addr = pp.Addr
- return
-}
-
-func recvfromInet6(fd int, p []byte, flags int, from *SockaddrInet6) (n int, err error) {
- var rsa RawSockaddrAny
- var socklen _Socklen = SizeofSockaddrAny
- if n, err = recvfrom(fd, p, flags, &rsa, &socklen); err != nil {
- return
- }
- pp := (*RawSockaddrInet6)(unsafe.Pointer(&rsa))
- port := (*[2]byte)(unsafe.Pointer(&pp.Port))
- from.Port = int(port[0])<<8 + int(port[1])
- from.ZoneId = pp.Scope_id
- from.Addr = pp.Addr
- return
-}
-
-func recvmsgInet4(fd int, p, oob []byte, flags int, from *SockaddrInet4) (n, oobn int, recvflags int, err error) {
- var rsa RawSockaddrAny
- n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
- if err != nil {
- return
- }
- pp := (*RawSockaddrInet4)(unsafe.Pointer(&rsa))
- port := (*[2]byte)(unsafe.Pointer(&pp.Port))
- from.Port = int(port[0])<<8 + int(port[1])
- from.Addr = pp.Addr
- return
-}
-
-func recvmsgInet6(fd int, p, oob []byte, flags int, from *SockaddrInet6) (n, oobn int, recvflags int, err error) {
- var rsa RawSockaddrAny
- n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
- if err != nil {
- return
- }
- pp := (*RawSockaddrInet6)(unsafe.Pointer(&rsa))
- port := (*[2]byte)(unsafe.Pointer(&pp.Port))
- from.Port = int(port[0])<<8 + int(port[1])
- from.ZoneId = pp.Scope_id
- from.Addr = pp.Addr
- return
-}
-
-func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
- var rsa RawSockaddrAny
- n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
- // source address is only specified if the socket is unconnected
- if rsa.Addr.Family != AF_UNSPEC {
- from, err = anyToSockaddr(&rsa)
- }
- return
-}
-
-func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
- _, err = SendmsgN(fd, p, oob, to, flags)
- return
-}
-
-func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
- var ptr unsafe.Pointer
- var salen _Socklen
- if to != nil {
- ptr, salen, err = to.sockaddr()
- if err != nil {
- return 0, err
- }
- }
- return sendmsgN(fd, p, oob, ptr, salen, flags)
-}
-
-func sendmsgNInet4(fd int, p, oob []byte, to *SockaddrInet4, flags int) (n int, err error) {
- ptr, salen, err := to.sockaddr()
- if err != nil {
- return 0, err
- }
- return sendmsgN(fd, p, oob, ptr, salen, flags)
-}
-
-func sendmsgNInet6(fd int, p, oob []byte, to *SockaddrInet6, flags int) (n int, err error) {
- ptr, salen, err := to.sockaddr()
- if err != nil {
- return 0, err
- }
- return sendmsgN(fd, p, oob, ptr, salen, flags)
-}
-
-func sendtoInet4(fd int, p []byte, flags int, to *SockaddrInet4) (err error) {
- ptr, n, err := to.sockaddr()
- if err != nil {
- return err
- }
- return sendto(fd, p, flags, ptr, n)
-}
-
-func sendtoInet6(fd int, p []byte, flags int, to *SockaddrInet6) (err error) {
- ptr, n, err := to.sockaddr()
- if err != nil {
- return err
- }
- return sendto(fd, p, flags, ptr, n)
-}
-
-func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) {
- ptr, n, err := to.sockaddr()
- if err != nil {
- return err
- }
- return sendto(fd, p, flags, ptr, n)
-}
-
-func SetsockoptByte(fd, level, opt int, value byte) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1)
-}
-
-func SetsockoptInt(fd, level, opt int, value int) (err error) {
- var n = int32(value)
- return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4)
-}
-
-func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4)
-}
-
-func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq)
-}
-
-func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq)
-}
-
-func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error {
- return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter)
-}
-
-func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger)
-}
-
-func SetsockoptString(fd, level, opt int, s string) (err error) {
- var p unsafe.Pointer
- if len(s) > 0 {
- p = unsafe.Pointer(&[]byte(s)[0])
- }
- return setsockopt(fd, level, opt, p, uintptr(len(s)))
-}
-
-func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) {
- return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
-}
-
-func Socket(domain, typ, proto int) (fd int, err error) {
- if domain == AF_INET6 && SocketDisableIPv6 {
- return -1, EAFNOSUPPORT
- }
- fd, err = socket(domain, typ, proto)
- return
-}
-
-func Socketpair(domain, typ, proto int) (fd [2]int, err error) {
- var fdx [2]int32
- err = socketpair(domain, typ, proto, &fdx)
- if err == nil {
- fd[0] = int(fdx[0])
- fd[1] = int(fdx[1])
- }
- return
-}
-
-func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
- if race.Enabled {
- race.ReleaseMerge(unsafe.Pointer(&ioSync))
- }
- return sendfile(outfd, infd, offset, count)
-}
-
-var ioSync int64
diff --git a/contrib/go/_std_1.18/src/syscall/timestruct.go b/contrib/go/_std_1.18/src/syscall/timestruct.go
deleted file mode 100644
index 7cf4be45b1..0000000000
--- a/contrib/go/_std_1.18/src/syscall/timestruct.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package syscall
-
-// TimespecToNSec returns the time stored in ts as nanoseconds.
-func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }
-
-// NsecToTimespec converts a number of nanoseconds into a Timespec.
-func NsecToTimespec(nsec int64) Timespec {
- sec := nsec / 1e9
- nsec = nsec % 1e9
- if nsec < 0 {
- nsec += 1e9
- sec--
- }
- return setTimespec(sec, nsec)
-}
-
-// TimevalToNsec returns the time stored in tv as nanoseconds.
-func TimevalToNsec(tv Timeval) int64 { return tv.Nano() }
-
-// NsecToTimeval converts a number of nanoseconds into a Timeval.
-func NsecToTimeval(nsec int64) Timeval {
- nsec += 999 // round up to microsecond
- usec := nsec % 1e9 / 1e3
- sec := nsec / 1e9
- if usec < 0 {
- usec += 1e6
- sec--
- }
- return setTimeval(sec, usec)
-}
diff --git a/contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.go b/contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.go
deleted file mode 100644
index 0ccdaf2d0e..0000000000
--- a/contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.go
+++ /dev/null
@@ -1,2004 +0,0 @@
-// mksyscall.pl -darwin -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
-// Code generated by the command above; DO NOT EDIT.
-
-//go:build darwin && amd64
-
-package syscall
-
-import "unsafe"
-import "internal/abi"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
- r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getgroups_trampoline()
-
-//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setgroups(ngid int, gid *_Gid_t) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setgroups_trampoline()
-
-//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := syscall6(abi.FuncPCABI0(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
- wpid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_wait4_trampoline()
-
-//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_accept_trampoline()
-
-//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_bind_trampoline()
-
-//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_connect_trampoline()
-
-//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_socket_trampoline()
-
-//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getsockopt_trampoline()
-
-//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setsockopt_trampoline()
-
-//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getpeername_trampoline()
-
-//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getsockname_trampoline()
-
-//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Shutdown(s int, how int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_shutdown_trampoline()
-
-//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := rawSyscall6(abi.FuncPCABI0(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_socketpair_trampoline()
-
-//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall6(abi.FuncPCABI0(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_recvfrom_trampoline()
-
-//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_sendto_trampoline()
-
-//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_recvmsg_trampoline()
-
-//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_sendmsg_trampoline()
-
-//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
- r0, _, e1 := syscall6(abi.FuncPCABI0(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_kevent_trampoline()
-
-//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimes(path string, timeval *[2]Timeval) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_utimes_trampoline()
-
-//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func futimes(fd int, timeval *[2]Timeval) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_futimes_trampoline()
-
-//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntl(fd int, cmd int, arg int) (val int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fcntl_trampoline()
-
-//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func pipe(p *[2]int32) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_pipe_trampoline()
-
-//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_utimensat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_utimensat_trampoline()
-
-//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kill(pid int, signum int, posix int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_kill_trampoline()
-
-//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Access(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_access_trampoline()
-
-//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_adjtime_trampoline()
-
-//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_chdir_trampoline()
-
-//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chflags(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_chflags_trampoline()
-
-//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chmod(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_chmod_trampoline()
-
-//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_chown_trampoline()
-
-//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chroot(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_chroot_trampoline()
-
-//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Close(fd int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_close_trampoline()
-
-//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func closedir(dir uintptr) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_closedir_trampoline), uintptr(dir), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_closedir_trampoline()
-
-//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup(fd int) (nfd int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_dup_trampoline), uintptr(fd), 0, 0)
- nfd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_dup_trampoline()
-
-//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup2(from int, to int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(from), uintptr(to), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_dup2_trampoline()
-
-//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exchangedata(path1 string, path2 string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path1)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(path2)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_exchangedata_trampoline()
-
-//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchdir(fd int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fchdir_trampoline), uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fchdir_trampoline()
-
-//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchflags(fd int, flags int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fchflags_trampoline()
-
-//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fchmod_trampoline()
-
-//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fchown_trampoline()
-
-//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Flock(fd int, how int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_flock_trampoline), uintptr(fd), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_flock_trampoline()
-
-//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fpathconf(fd int, name int) (val int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fpathconf_trampoline()
-
-//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fsync(fd int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fsync_trampoline), uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fsync_trampoline()
-
-//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_ftruncate_trampoline()
-
-//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdtablesize() (size int) {
- r0, _, _ := syscall(abi.FuncPCABI0(libc_getdtablesize_trampoline), 0, 0, 0)
- size = int(r0)
- return
-}
-
-func libc_getdtablesize_trampoline()
-
-//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getegid() (egid int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getegid_trampoline), 0, 0, 0)
- egid = int(r0)
- return
-}
-
-func libc_getegid_trampoline()
-
-//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Geteuid() (uid int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_geteuid_trampoline), 0, 0, 0)
- uid = int(r0)
- return
-}
-
-func libc_geteuid_trampoline()
-
-//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getgid() (gid int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getgid_trampoline), 0, 0, 0)
- gid = int(r0)
- return
-}
-
-func libc_getgid_trampoline()
-
-//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpgid_trampoline), uintptr(pid), 0, 0)
- pgid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getpgid_trampoline()
-
-//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgrp() (pgrp int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpgrp_trampoline), 0, 0, 0)
- pgrp = int(r0)
- return
-}
-
-func libc_getpgrp_trampoline()
-
-//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpid() (pid int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpid_trampoline), 0, 0, 0)
- pid = int(r0)
- return
-}
-
-func libc_getpid_trampoline()
-
-//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getppid() (ppid int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getppid_trampoline), 0, 0, 0)
- ppid = int(r0)
- return
-}
-
-func libc_getppid_trampoline()
-
-//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0)
- prio = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getpriority_trampoline()
-
-//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getrlimit_trampoline()
-
-//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getrusage_trampoline()
-
-//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getsid(pid int) (sid int, err error) {
- r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsid_trampoline), uintptr(pid), 0, 0)
- sid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getsid_trampoline()
-
-//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getuid() (uid int) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getuid_trampoline), 0, 0, 0)
- uid = int(r0)
- return
-}
-
-func libc_getuid_trampoline()
-
-//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Issetugid() (tainted bool) {
- r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_issetugid_trampoline), 0, 0, 0)
- tainted = bool(r0 != 0)
- return
-}
-
-func libc_issetugid_trampoline()
-
-//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Kqueue() (fd int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_kqueue_trampoline), 0, 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_kqueue_trampoline()
-
-//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lchown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_lchown_trampoline()
-
-//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Link(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_link_trampoline()
-
-//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Listen(s int, backlog int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_listen_trampoline()
-
-//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdir(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mkdir_trampoline()
-
-//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkfifo(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mkfifo_trampoline()
-
-//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mknod(path string, mode uint32, dev int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mknod_trampoline()
-
-//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mlock_trampoline()
-
-//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlockall(flags int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_mlockall_trampoline), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mlockall_trampoline()
-
-//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mprotect(b []byte, prot int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mprotect_trampoline()
-
-//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_munlock_trampoline()
-
-//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlockall() (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_munlockall_trampoline), 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_munlockall_trampoline()
-
-//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Open(path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_open_trampoline()
-
-//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pathconf(path string, name int) (val int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_pathconf_trampoline()
-
-//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pread(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_pread_trampoline()
-
-//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_pwrite_trampoline()
-
-//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func read(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_read_trampoline()
-
-//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
- r0, _, _ := syscall(abi.FuncPCABI0(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
- res = Errno(r0)
- return
-}
-
-func libc_readdir_r_trampoline()
-
-//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlink(path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_readlink_trampoline()
-
-//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rename(from string, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_rename_trampoline()
-
-//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Revoke(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_revoke_trampoline()
-
-//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rmdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_rmdir_trampoline()
-
-//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, _, e1 := syscallX(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence))
- newoffset = int64(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_lseek_trampoline()
-
-//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_select_trampoline()
-
-//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setegid(egid int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_setegid_trampoline), uintptr(egid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setegid_trampoline()
-
-//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seteuid(euid int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_seteuid_trampoline), uintptr(euid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_seteuid_trampoline()
-
-//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setgid(gid int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgid_trampoline), uintptr(gid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setgid_trampoline()
-
-//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setlogin(name string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(name)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setlogin_trampoline()
-
-//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setpgid_trampoline()
-
-//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setpriority_trampoline()
-
-//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setprivexec(flag int) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_setprivexec_trampoline), uintptr(flag), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setprivexec_trampoline()
-
-//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setregid_trampoline()
-
-//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setreuid_trampoline()
-
-//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setrlimit_trampoline()
-
-//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setsid() (pid int, err error) {
- r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setsid_trampoline), 0, 0, 0)
- pid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setsid_trampoline()
-
-//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Settimeofday(tp *Timeval) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_settimeofday_trampoline()
-
-//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setuid(uid int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setuid_trampoline), uintptr(uid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_setuid_trampoline()
-
-//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlink(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_symlink_trampoline()
-
-//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sync() (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_sync_trampoline), 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_sync_trampoline()
-
-//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Truncate(path string, length int64) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_truncate_trampoline()
-
-//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Umask(newmask int) (oldmask int) {
- r0, _, _ := syscall(abi.FuncPCABI0(libc_umask_trampoline), uintptr(newmask), 0, 0)
- oldmask = int(r0)
- return
-}
-
-func libc_umask_trampoline()
-
-//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Undelete(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_undelete_trampoline()
-
-//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlink(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_unlink_trampoline()
-
-//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unmount(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_unmount_trampoline()
-
-//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func write(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_write_trampoline()
-
-//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) {
- var _p0 unsafe.Pointer
- if len(iovecs) > 0 {
- _p0 = unsafe.Pointer(&iovecs[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscallX(abi.FuncPCABI0(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
- cnt = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_writev_trampoline()
-
-//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := syscall6X(abi.FuncPCABI0(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
- ret = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_mmap_trampoline()
-
-//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_munmap_trampoline()
-
-//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fork() (pid int, err error) {
- r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_fork_trampoline), 0, 0, 0)
- pid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fork_trampoline()
-
-//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctl(fd int, req int, arg int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_ioctl_trampoline()
-
-//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func execve(path *byte, argv **byte, envp **byte) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_execve_trampoline()
-
-//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func exit(res int) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), uintptr(res), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_exit_trampoline()
-
-//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
- var _p0 unsafe.Pointer
- if len(mib) > 0 {
- _p0 = unsafe.Pointer(&mib[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_sysctl_trampoline()
-
-//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func unlinkat(fd int, path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_unlinkat_trampoline()
-
-//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := syscall6(abi.FuncPCABI0(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0)
- fdret = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_openat_trampoline()
-
-//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getcwd(buf []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := syscall(abi.FuncPCABI0(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_getcwd_trampoline()
-
-//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fstat64_trampoline()
-
-//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := syscall(abi.FuncPCABI0(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fstatfs64_trampoline()
-
-//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Gettimeofday(tp *Timeval) (err error) {
- _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_gettimeofday_trampoline()
-
-//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lstat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_lstat64_trampoline()
-
-//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Stat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_stat64_trampoline()
-
-//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Statfs(path string, stat *Statfs_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall(abi.FuncPCABI0(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_statfs64_trampoline()
-
-//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_fstatat64_trampoline()
-
-//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-//go:nosplit
-func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) {
- _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-func libc_ptrace_trampoline()
-
-//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/contrib/go/_std_1.18/src/syscall/zsyscall_linux_amd64.go b/contrib/go/_std_1.18/src/syscall/zsyscall_linux_amd64.go
deleted file mode 100644
index 2059271324..0000000000
--- a/contrib/go/_std_1.18/src/syscall/zsyscall_linux_amd64.go
+++ /dev/null
@@ -1,1655 +0,0 @@
-// mksyscall.pl -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go
-// Code generated by the command above; DO NOT EDIT.
-
-//go:build linux && amd64
-
-package syscall
-
-import "unsafe"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func faccessat(dirfd int, path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fchmodat(dirfd int, path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func pipe2(p *[2]_C_int, flags int) (err error) {
- _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func unlinkat(dirfd int, path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getcwd(buf []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
- wpid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(arg)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(source)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(target)
- if err != nil {
- return
- }
- var _p2 *byte
- _p2, err = BytePtrFromString(fstype)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Acct(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Adjtimex(buf *Timex) (state int, err error) {
- r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
- state = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chroot(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Close(fd int) (err error) {
- _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup(oldfd int) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup3(oldfd int, newfd int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func EpollCreate1(flag int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
- _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
- _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchdir(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntl(fd int, cmd int, arg int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fdatasync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Flock(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fsync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdents(fd int, buf []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
- pgid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpid() (pid int) {
- r0, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0)
- pid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getppid() (ppid int) {
- r0, _ := rawSyscallNoError(SYS_GETPPID, 0, 0, 0)
- ppid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
- prio = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Gettid() (tid int) {
- r0, _ := rawSyscallNoError(SYS_GETTID, 0, 0, 0)
- tid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- var _p2 unsafe.Pointer
- if len(dest) > 0 {
- _p2 = unsafe.Pointer(&dest[0])
- } else {
- _p2 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(pathname)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
- watchdesc = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func InotifyInit1(flags int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
- r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
- success = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Kill(pid int, sig Signal) (err error) {
- _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Klogctl(typ int, buf []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Listxattr(path string, dest []byte) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(dest) > 0 {
- _p1 = unsafe.Pointer(&dest[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdirat(dirfd int, path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
- _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func PivotRoot(newroot string, putold string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(newroot)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(putold)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func read(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Removexattr(path string, attr string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setdomainname(p []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sethostname(p []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setsid() (pid int, err error) {
- r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
- pid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Settimeofday(tv *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setxattr(path string, attr string, data []byte, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- var _p2 unsafe.Pointer
- if len(data) > 0 {
- _p2 = unsafe.Pointer(&data[0])
- } else {
- _p2 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sync() {
- Syscall(SYS_SYNC, 0, 0, 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sysinfo(info *Sysinfo_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
- r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
- n = int64(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Tgkill(tgid int, tid int, sig Signal) (err error) {
- _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Times(tms *Tms) (ticks uintptr, err error) {
- r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
- ticks = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Umask(mask int) (oldmask int) {
- r0, _ := rawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0)
- oldmask = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Uname(buf *Utsname) (err error) {
- _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unmount(target string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(target)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unshare(flags int) (err error) {
- _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func write(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func exitThread(code int) (err error) {
- _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readlen(fd int, p *byte, np int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, p *byte, np int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Madvise(b []byte, advice int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mprotect(b []byte, prot int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlockall(flags int) (err error) {
- _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlockall() (err error) {
- _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup2(oldfd int, newfd int) (err error) {
- _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func EpollCreate(size int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatfs(fd int, buf *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getegid() (egid int) {
- r0, _ := rawSyscallNoError(SYS_GETEGID, 0, 0, 0)
- egid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Geteuid() (euid int) {
- r0, _ := rawSyscallNoError(SYS_GETEUID, 0, 0, 0)
- euid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getgid() (gid int) {
- r0, _ := rawSyscallNoError(SYS_GETGID, 0, 0, 0)
- gid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrlimit(resource int, rlim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getuid() (uid int) {
- r0, _ := rawSyscallNoError(SYS_GETUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func InotifyInit() (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ioperm(from int, num int, on int) (err error) {
- _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Iopl(level int) (err error) {
- _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Listen(s int, n int) (err error) {
- _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pause() (err error) {
- _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pread(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seek(fd int, offset int64, whence int) (off int64, err error) {
- r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
- off = int64(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
- r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
- written = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setfsgid(gid int) (err error) {
- _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setfsuid(uid int) (err error) {
- _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setrlimit(resource int, rlim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Shutdown(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
- r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
- n = int64(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Statfs(path string, buf *Statfs_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
- _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Truncate(path string, length int64) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ustat(dev int, ubuf *Ustat_t) (err error) {
- _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
- r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getgroups(n int, list *_Gid_t) (nn int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
- nn = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
- r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
- xaddr = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(events) > 0 {
- _p0 = unsafe.Pointer(&events[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func futimesat(dirfd int, path string, times *[2]Timeval) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Utime(path string, buf *Utimbuf) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimes(path string, times *[2]Timeval) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/text/template/doc.go b/contrib/go/_std_1.18/src/text/template/doc.go
deleted file mode 100644
index 10093881fb..0000000000
--- a/contrib/go/_std_1.18/src/text/template/doc.go
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output.
-
-To generate HTML output, see package html/template, which has the same interface
-as this package but automatically secures HTML output against certain attacks.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Except for raw strings, actions may not span newlines, although comments can.
-
-Once parsed, a template may be executed safely in parallel, although if parallel
-executions share a Writer the output may be interleaved.
-
-Here is a trivial example that prints "17 items are made of wool".
-
- type Inventory struct {
- Material string
- Count uint
- }
- sweaters := Inventory{"wool", 17}
- tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
- if err != nil { panic(err) }
- err = tmpl.Execute(os.Stdout, sweaters)
- if err != nil { panic(err) }
-
-More intricate examples appear below.
-
-Text and spaces
-
-By default, all text between actions is copied verbatim when the template is
-executed. For example, the string " items are made of " in the example above
-appears on standard output when the program is run.
-
-However, to aid in formatting template source code, if an action's left
-delimiter (by default "{{") is followed immediately by a minus sign and white
-space, all trailing white space is trimmed from the immediately preceding text.
-Similarly, if the right delimiter ("}}") is preceded by white space and a minus
-sign, all leading white space is trimmed from the immediately following text.
-In these trim markers, the white space must be present:
-"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
-"{{-3}}" parses as an action containing the number -3.
-
-For instance, when executing the template whose source is
-
- "{{23 -}} < {{- 45}}"
-
-the generated output would be
-
- "23<45"
-
-For this trimming, the definition of white space characters is the same as in Go:
-space, horizontal tab, carriage return, and newline.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail in the corresponding sections that follow.
-
-*/
-// {{/* a comment */}}
-// {{- /* a comment with white space trimmed from preceding and following text */ -}}
-// A comment; discarded. May contain newlines.
-// Comments do not nest and must start and end at the
-// delimiters, as shown here.
-/*
-
- {{pipeline}}
- The default textual representation (the same as would be
- printed by fmt.Print) of the value of the pipeline is copied
- to the output.
-
- {{if pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
- nil pointer or interface value, and any array, slice, map, or
- string of length zero.
- Dot is unaffected.
-
- {{if pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
-
- {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
- To simplify the appearance of if-else chains, the else action
- of an if may include another if directly; the effect is exactly
- the same as writing
- {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
-
- {{range pipeline}} T1 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, nothing is output;
- otherwise, dot is set to the successive elements of the array,
- slice, or map and T1 is executed. If the value is a map and the
- keys are of basic type with a defined order, the elements will be
- visited in sorted key order.
-
- {{range pipeline}} T1 {{else}} T0 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, dot is unaffected and
- T0 is executed; otherwise, dot is set to the successive elements
- of the array, slice, or map and T1 is executed.
-
- {{break}}
- The innermost {{range pipeline}} loop is ended early, stopping the
- current iteration and bypassing all remaining iterations.
-
- {{continue}}
- The current iteration of the innermost {{range pipeline}} loop is
- stopped, and the loop starts the next iteration.
-
- {{template "name"}}
- The template with the specified name is executed with nil data.
-
- {{template "name" pipeline}}
- The template with the specified name is executed with dot set
- to the value of the pipeline.
-
- {{block "name" pipeline}} T1 {{end}}
- A block is shorthand for defining a template
- {{define "name"}} T1 {{end}}
- and then executing it in place
- {{template "name" pipeline}}
- The typical use is to define a set of root templates that are
- then customized by redefining the block templates within.
-
- {{with pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, dot is set to the value of the pipeline and T1 is
- executed.
-
- {{with pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, dot is unaffected and T0
- is executed; otherwise, dot is set to the value of the pipeline
- and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
- - A boolean, string, character, integer, floating-point, imaginary
- or complex constant in Go syntax. These behave like Go's untyped
- constants. Note that, as in Go, whether a large integer constant
- overflows when assigned or passed to a function can depend on whether
- the host machine's ints are 32 or 64 bits.
- - The keyword nil, representing an untyped Go nil.
- - The character '.' (period):
- .
- The result is the value of dot.
- - A variable name, which is a (possibly empty) alphanumeric string
- preceded by a dollar sign, such as
- $piOver2
- or
- $
- The result is the value of the variable.
- Variables are described below.
- - The name of a field of the data, which must be a struct, preceded
- by a period, such as
- .Field
- The result is the value of the field. Field invocations may be
- chained:
- .Field1.Field2
- Fields can also be evaluated on variables, including chaining:
- $x.Field1.Field2
- - The name of a key of the data, which must be a map, preceded
- by a period, such as
- .Key
- The result is the map element value indexed by the key.
- Key invocations may be chained and combined with fields to any
- depth:
- .Field1.Key1.Field2.Key2
- Although the key must be an alphanumeric identifier, unlike with
- field names they do not need to start with an upper case letter.
- Keys can also be evaluated on variables, including chaining:
- $x.key1.key2
- - The name of a niladic method of the data, preceded by a period,
- such as
- .Method
- The result is the value of invoking the method with dot as the
- receiver, dot.Method(). Such a method must have one return value (of
- any type) or two return values, the second of which is an error.
- If it has two and the returned error is non-nil, execution terminates
- and an error is returned to the caller as the value of Execute.
- Method invocations may be chained and combined with fields and keys
- to any depth:
- .Field1.Key1.Method1.Field2.Key2.Method2
- Methods can also be evaluated on variables, including chaining:
- $x.Method1.Field
- - The name of a niladic function, such as
- fun
- The result is the value of invoking the function, fun(). The return
- types and values behave as in methods. Functions and function
- names are described below.
- - A parenthesized instance of one the above, for grouping. The result
- may be accessed by a field or map key invocation.
- print (.F1 arg1) (.F2 arg2)
- (.StructValuedMethod "arg").Field
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-If an evaluation yields a function value, such as a function-valued
-field of a struct, the function is not invoked automatically, but it
-can be used as a truth value for an if action and the like. To invoke
-it, use the call function, defined below.
-
-Pipelines
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
- Argument
- The result is the value of evaluating the argument.
- .Method [Argument...]
- The method can be alone or the last element of a chain but,
- unlike methods in the middle of a chain, it can take arguments.
- The result is the value of calling the method with the
- arguments:
- dot.Method(Argument1, etc.)
- functionName [Argument...]
- The result is the value of calling the function associated
- with the name:
- function(Argument1, etc.)
- Functions and function names are described below.
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
- $variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-Variables previously declared can also be assigned, using the syntax
-
- $variable = pipeline
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
- range $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
- {{"\"output\""}}
- A string constant.
- {{`"output"`}}
- A raw string constant.
- {{printf "%q" "output"}}
- A function call.
- {{"output" | printf "%q"}}
- A function call whose final argument comes from the previous
- command.
- {{printf "%q" (print "out" "put")}}
- A parenthesized argument.
- {{"put" | printf "%s%s" "out" | printf "%q"}}
- A more elaborate call.
- {{"output" | printf "%s" | printf "%q"}}
- A longer chain.
- {{with "output"}}{{printf "%q" .}}{{end}}
- A with action using dot.
- {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
- A with action that creates and uses a variable.
- {{with $x := "output"}}{{printf "%q" $x}}{{end}}
- A with action that uses the variable in another action.
- {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
- The same, but pipelined.
-
-Functions
-
-During execution functions are found in two function maps: first in the
-template, then in the global function map. By default, no functions are defined
-in the template but the Funcs method can be used to add them.
-
-Predefined global functions are named as follows.
-
- and
- Returns the boolean AND of its arguments by returning the
- first empty argument or the last argument. That is,
- "and x y" behaves as "if x then y else x."
- Evaluation proceeds through the arguments left to right
- and returns when the result is determined.
- call
- Returns the result of calling the first argument, which
- must be a function, with the remaining arguments as parameters.
- Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
- Y is a func-valued field, map entry, or the like.
- The first argument must be the result of an evaluation
- that yields a value of function type (as distinct from
- a predefined function such as print). The function must
- return either one or two result values, the second of which
- is of type error. If the arguments don't match the function
- or the returned error value is non-nil, execution stops.
- html
- Returns the escaped HTML equivalent of the textual
- representation of its arguments. This function is unavailable
- in html/template, with a few exceptions.
- index
- Returns the result of indexing its first argument by the
- following arguments. Thus "index x 1 2 3" is, in Go syntax,
- x[1][2][3]. Each indexed item must be a map, slice, or array.
- slice
- slice returns the result of slicing its first argument by the
- remaining arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2],
- while "slice x" is x[:], "slice x 1" is x[1:], and "slice x 1 2 3"
- is x[1:2:3]. The first argument must be a string, slice, or array.
- js
- Returns the escaped JavaScript equivalent of the textual
- representation of its arguments.
- len
- Returns the integer length of its argument.
- not
- Returns the boolean negation of its single argument.
- or
- Returns the boolean OR of its arguments by returning the
- first non-empty argument or the last argument, that is,
- "or x y" behaves as "if x then x else y".
- Evaluation proceeds through the arguments left to right
- and returns when the result is determined.
- print
- An alias for fmt.Sprint
- printf
- An alias for fmt.Sprintf
- println
- An alias for fmt.Sprintln
- urlquery
- Returns the escaped value of the textual representation of
- its arguments in a form suitable for embedding in a URL query.
- This function is unavailable in html/template, with a few
- exceptions.
-
-The boolean functions take any zero value to be false and a non-zero
-value to be true.
-
-There is also a set of binary comparison operators defined as
-functions:
-
- eq
- Returns the boolean truth of arg1 == arg2
- ne
- Returns the boolean truth of arg1 != arg2
- lt
- Returns the boolean truth of arg1 < arg2
- le
- Returns the boolean truth of arg1 <= arg2
- gt
- Returns the boolean truth of arg1 > arg2
- ge
- Returns the boolean truth of arg1 >= arg2
-
-For simpler multi-way equality tests, eq (only) accepts two or more
-arguments and compares the second and subsequent to the first,
-returning in effect
-
- arg1==arg2 || arg1==arg3 || arg1==arg4 ...
-
-(Unlike with || in Go, however, eq is a function call and all the
-arguments will be evaluated.)
-
-The comparison functions work on any values whose type Go defines as
-comparable. For basic types such as integers, the rules are relaxed:
-size and exact type are ignored, so any integer value, signed or unsigned,
-may be compared with any other integer value. (The arithmetic value is compared,
-not the bit pattern, so all negative integers are less than all unsigned integers.)
-However, as usual, one may not compare an int with a float32 and so on.
-
-Associated templates
-
-Each template is named by a string specified when it is created. Also, each
-template is associated with zero or more other templates that it may invoke by
-name; such associations are transitive and form a name space of templates.
-
-A template may use a template invocation to instantiate another associated
-template; see the explanation of the "template" action above. The name must be
-that of a template associated with the template that contains the invocation.
-
-Nested template definitions
-
-When parsing a template, another template may be defined and associated with the
-template being parsed. Template definitions must appear at the top level of the
-template, much like global variables in a Go program.
-
-The syntax of such definitions is to surround each template declaration with a
-"define" and "end" action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example:
-
- `{{define "T1"}}ONE{{end}}
- {{define "T2"}}TWO{{end}}
- {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
- {{template "T3"}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed. Finally it invokes T3. If executed this template will
-produce the text
-
- ONE TWO
-
-By construction, a template may reside in only one association. If it's
-necessary to have a template addressable from multiple associations, the
-template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
-
-Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
-
-A template may be executed directly or through ExecuteTemplate, which executes
-an associated template identified by name. To invoke our example above, we
-might write,
-
- err := tmpl.Execute(os.Stdout, "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-or to invoke a particular template explicitly by name,
-
- err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-*/
-package template
diff --git a/contrib/go/_std_1.18/src/text/template/funcs.go b/contrib/go/_std_1.18/src/text/template/funcs.go
deleted file mode 100644
index dca5ed28db..0000000000
--- a/contrib/go/_std_1.18/src/text/template/funcs.go
+++ /dev/null
@@ -1,753 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net/url"
- "reflect"
- "strings"
- "sync"
- "unicode"
- "unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-//
-// Errors returned by Execute wrap the underlying error; call errors.As to
-// uncover them.
-//
-// When template execution invokes a function with an argument list, that list
-// must be assignable to the function's parameter types. Functions meant to
-// apply to arguments of arbitrary type can use parameters of type interface{} or
-// of type reflect.Value. Similarly, functions meant to return a result of arbitrary
-// type can return interface{} or reflect.Value.
-type FuncMap map[string]any
-
-// builtins returns the FuncMap.
-// It is not a global variable so the linker can dead code eliminate
-// more when this isn't called. See golang.org/issue/36021.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtins() FuncMap {
- return FuncMap{
- "and": and,
- "call": call,
- "html": HTMLEscaper,
- "index": index,
- "slice": slice,
- "js": JSEscaper,
- "len": length,
- "not": not,
- "or": or,
- "print": fmt.Sprint,
- "printf": fmt.Sprintf,
- "println": fmt.Sprintln,
- "urlquery": URLQueryEscaper,
-
- // Comparisons
- "eq": eq, // ==
- "ge": ge, // >=
- "gt": gt, // >
- "le": le, // <=
- "lt": lt, // <
- "ne": ne, // !=
- }
-}
-
-var builtinFuncsOnce struct {
- sync.Once
- v map[string]reflect.Value
-}
-
-// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtinFuncs() map[string]reflect.Value {
- builtinFuncsOnce.Do(func() {
- builtinFuncsOnce.v = createValueFuncs(builtins())
- })
- return builtinFuncsOnce.v
-}
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
- addValueFuncs(m, funcMap)
- return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
- for name, fn := range in {
- if !goodName(name) {
- panic(fmt.Errorf("function name %q is not a valid identifier", name))
- }
- v := reflect.ValueOf(fn)
- if v.Kind() != reflect.Func {
- panic("value for " + name + " not a function")
- }
- if !goodFunc(v.Type()) {
- panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
- }
- out[name] = v
- }
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
- for name, fn := range in {
- out[name] = fn
- }
-}
-
-// goodFunc reports whether the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
- // We allow functions with 1 result or 2 results where the second is an error.
- switch {
- case typ.NumOut() == 1:
- return true
- case typ.NumOut() == 2 && typ.Out(1) == errorType:
- return true
- }
- return false
-}
-
-// goodName reports whether the function name is a valid identifier.
-func goodName(name string) bool {
- if name == "" {
- return false
- }
- for i, r := range name {
- switch {
- case r == '_':
- case i == 0 && !unicode.IsLetter(r):
- return false
- case !unicode.IsLetter(r) && !unicode.IsDigit(r):
- return false
- }
- }
- return true
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (v reflect.Value, isBuiltin, ok bool) {
- if tmpl != nil && tmpl.common != nil {
- tmpl.muFuncs.RLock()
- defer tmpl.muFuncs.RUnlock()
- if fn := tmpl.execFuncs[name]; fn.IsValid() {
- return fn, false, true
- }
- }
- if fn := builtinFuncs()[name]; fn.IsValid() {
- return fn, true, true
- }
- return reflect.Value{}, false, false
-}
-
-// prepareArg checks if value can be used as an argument of type argType, and
-// converts an invalid value to appropriate zero if possible.
-func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
- if !value.IsValid() {
- if !canBeNil(argType) {
- return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
- }
- value = reflect.Zero(argType)
- }
- if value.Type().AssignableTo(argType) {
- return value, nil
- }
- if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
- value = value.Convert(argType)
- return value, nil
- }
- return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
-}
-
-func intLike(typ reflect.Kind) bool {
- switch typ {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return true
- }
- return false
-}
-
-// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
-func indexArg(index reflect.Value, cap int) (int, error) {
- var x int64
- switch index.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- x = index.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- x = int64(index.Uint())
- case reflect.Invalid:
- return 0, fmt.Errorf("cannot index slice/array with nil")
- default:
- return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
- }
- if x < 0 || int(x) < 0 || int(x) > cap {
- return 0, fmt.Errorf("index out of range: %d", x)
- }
- return int(x), nil
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
- item = indirectInterface(item)
- if !item.IsValid() {
- return reflect.Value{}, fmt.Errorf("index of untyped nil")
- }
- for _, index := range indexes {
- index = indirectInterface(index)
- var isNil bool
- if item, isNil = indirect(item); isNil {
- return reflect.Value{}, fmt.Errorf("index of nil pointer")
- }
- switch item.Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- x, err := indexArg(index, item.Len())
- if err != nil {
- return reflect.Value{}, err
- }
- item = item.Index(x)
- case reflect.Map:
- index, err := prepareArg(index, item.Type().Key())
- if err != nil {
- return reflect.Value{}, err
- }
- if x := item.MapIndex(index); x.IsValid() {
- item = x
- } else {
- item = reflect.Zero(item.Type().Elem())
- }
- case reflect.Invalid:
- // the loop holds invariant: item.IsValid()
- panic("unreachable")
- default:
- return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
- }
- }
- return item, nil
-}
-
-// Slicing.
-
-// slice returns the result of slicing its first argument by the remaining
-// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
-// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
-// argument must be a string, slice, or array.
-func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
- item = indirectInterface(item)
- if !item.IsValid() {
- return reflect.Value{}, fmt.Errorf("slice of untyped nil")
- }
- if len(indexes) > 3 {
- return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
- }
- var cap int
- switch item.Kind() {
- case reflect.String:
- if len(indexes) == 3 {
- return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
- }
- cap = item.Len()
- case reflect.Array, reflect.Slice:
- cap = item.Cap()
- default:
- return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
- }
- // set default values for cases item[:], item[i:].
- idx := [3]int{0, item.Len()}
- for i, index := range indexes {
- x, err := indexArg(index, cap)
- if err != nil {
- return reflect.Value{}, err
- }
- idx[i] = x
- }
- // given item[i:j], make sure i <= j.
- if idx[0] > idx[1] {
- return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
- }
- if len(indexes) < 3 {
- return item.Slice(idx[0], idx[1]), nil
- }
- // given item[i:j:k], make sure i <= j <= k.
- if idx[1] > idx[2] {
- return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
- }
- return item.Slice3(idx[0], idx[1], idx[2]), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item reflect.Value) (int, error) {
- item, isNil := indirect(item)
- if isNil {
- return 0, fmt.Errorf("len of nil pointer")
- }
- switch item.Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return item.Len(), nil
- }
- return 0, fmt.Errorf("len of type %s", item.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
- fn = indirectInterface(fn)
- if !fn.IsValid() {
- return reflect.Value{}, fmt.Errorf("call of nil")
- }
- typ := fn.Type()
- if typ.Kind() != reflect.Func {
- return reflect.Value{}, fmt.Errorf("non-function of type %s", typ)
- }
- if !goodFunc(typ) {
- return reflect.Value{}, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
- }
- numIn := typ.NumIn()
- var dddType reflect.Type
- if typ.IsVariadic() {
- if len(args) < numIn-1 {
- return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
- }
- dddType = typ.In(numIn - 1).Elem()
- } else {
- if len(args) != numIn {
- return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
- }
- }
- argv := make([]reflect.Value, len(args))
- for i, arg := range args {
- arg = indirectInterface(arg)
- // Compute the expected type. Clumsy because of variadics.
- argType := dddType
- if !typ.IsVariadic() || i < numIn-1 {
- argType = typ.In(i)
- }
-
- var err error
- if argv[i], err = prepareArg(arg, argType); err != nil {
- return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
- }
- }
- return safeCall(fn, argv)
-}
-
-// safeCall runs fun.Call(args), and returns the resulting value and error, if
-// any. If the call panics, the panic value is returned as an error.
-func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
- defer func() {
- if r := recover(); r != nil {
- if e, ok := r.(error); ok {
- err = e
- } else {
- err = fmt.Errorf("%v", r)
- }
- }
- }()
- ret := fun.Call(args)
- if len(ret) == 2 && !ret[1].IsNil() {
- return ret[0], ret[1].Interface().(error)
- }
- return ret[0], nil
-}
-
-// Boolean logic.
-
-func truth(arg reflect.Value) bool {
- t, _ := isTrue(indirectInterface(arg))
- return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
- panic("unreachable") // implemented as a special case in evalCall
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
- panic("unreachable") // implemented as a special case in evalCall
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg reflect.Value) bool {
- return !truth(arg)
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
- errBadComparisonType = errors.New("invalid type for comparison")
- errBadComparison = errors.New("incompatible types for comparison")
- errNoComparison = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
- invalidKind kind = iota
- boolKind
- complexKind
- intKind
- floatKind
- stringKind
- uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
- switch v.Kind() {
- case reflect.Bool:
- return boolKind, nil
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return intKind, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return uintKind, nil
- case reflect.Float32, reflect.Float64:
- return floatKind, nil
- case reflect.Complex64, reflect.Complex128:
- return complexKind, nil
- case reflect.String:
- return stringKind, nil
- }
- return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
- arg1 = indirectInterface(arg1)
- if arg1 != zero {
- if t1 := arg1.Type(); !t1.Comparable() {
- return false, fmt.Errorf("uncomparable type %s: %v", t1, arg1)
- }
- }
- if len(arg2) == 0 {
- return false, errNoComparison
- }
- k1, _ := basicKind(arg1)
- for _, arg := range arg2 {
- arg = indirectInterface(arg)
- k2, _ := basicKind(arg)
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
- default:
- if arg1 != zero && arg != zero {
- return false, errBadComparison
- }
- }
- } else {
- switch k1 {
- case boolKind:
- truth = arg1.Bool() == arg.Bool()
- case complexKind:
- truth = arg1.Complex() == arg.Complex()
- case floatKind:
- truth = arg1.Float() == arg.Float()
- case intKind:
- truth = arg1.Int() == arg.Int()
- case stringKind:
- truth = arg1.String() == arg.String()
- case uintKind:
- truth = arg1.Uint() == arg.Uint()
- default:
- if arg == zero || arg1 == zero {
- truth = arg1 == arg
- } else {
- if t2 := arg.Type(); !t2.Comparable() {
- return false, fmt.Errorf("uncomparable type %s: %v", t2, arg)
- }
- truth = arg1.Interface() == arg.Interface()
- }
- }
- }
- if truth {
- return true, nil
- }
- }
- return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 reflect.Value) (bool, error) {
- // != is the inverse of ==.
- equal, err := eq(arg1, arg2)
- return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 reflect.Value) (bool, error) {
- arg1 = indirectInterface(arg1)
- k1, err := basicKind(arg1)
- if err != nil {
- return false, err
- }
- arg2 = indirectInterface(arg2)
- k2, err := basicKind(arg2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind, complexKind:
- return false, errBadComparisonType
- case floatKind:
- truth = arg1.Float() < arg2.Float()
- case intKind:
- truth = arg1.Int() < arg2.Int()
- case stringKind:
- truth = arg1.String() < arg2.String()
- case uintKind:
- truth = arg1.Uint() < arg2.Uint()
- default:
- panic("invalid kind")
- }
- }
- return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 reflect.Value) (bool, error) {
- // <= is < or ==.
- lessThan, err := lt(arg1, arg2)
- if lessThan || err != nil {
- return lessThan, err
- }
- return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 reflect.Value) (bool, error) {
- // > is the inverse of <=.
- lessOrEqual, err := le(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 reflect.Value) (bool, error) {
- // >= is the inverse of <.
- lessThan, err := lt(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
- htmlQuot = []byte("&#34;") // shorter than "&quot;"
- htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
- htmlAmp = []byte("&amp;")
- htmlLt = []byte("&lt;")
- htmlGt = []byte("&gt;")
- htmlNull = []byte("\uFFFD")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
- last := 0
- for i, c := range b {
- var html []byte
- switch c {
- case '\000':
- html = htmlNull
- case '"':
- html = htmlQuot
- case '\'':
- html = htmlApos
- case '&':
- html = htmlAmp
- case '<':
- html = htmlLt
- case '>':
- html = htmlGt
- default:
- continue
- }
- w.Write(b[last:i])
- w.Write(html)
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
- // Avoid allocation if we can.
- if !strings.ContainsAny(s, "'\"&<>\000") {
- return s
- }
- var b bytes.Buffer
- HTMLEscape(&b, []byte(s))
- return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...any) string {
- return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
- jsLowUni = []byte(`\u00`)
- hex = []byte("0123456789ABCDEF")
-
- jsBackslash = []byte(`\\`)
- jsApos = []byte(`\'`)
- jsQuot = []byte(`\"`)
- jsLt = []byte(`\u003C`)
- jsGt = []byte(`\u003E`)
- jsAmp = []byte(`\u0026`)
- jsEq = []byte(`\u003D`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
- last := 0
- for i := 0; i < len(b); i++ {
- c := b[i]
-
- if !jsIsSpecial(rune(c)) {
- // fast path: nothing to do
- continue
- }
- w.Write(b[last:i])
-
- if c < utf8.RuneSelf {
- // Quotes, slashes and angle brackets get quoted.
- // Control characters get written as \u00XX.
- switch c {
- case '\\':
- w.Write(jsBackslash)
- case '\'':
- w.Write(jsApos)
- case '"':
- w.Write(jsQuot)
- case '<':
- w.Write(jsLt)
- case '>':
- w.Write(jsGt)
- case '&':
- w.Write(jsAmp)
- case '=':
- w.Write(jsEq)
- default:
- w.Write(jsLowUni)
- t, b := c>>4, c&0x0f
- w.Write(hex[t : t+1])
- w.Write(hex[b : b+1])
- }
- } else {
- // Unicode rune.
- r, size := utf8.DecodeRune(b[i:])
- if unicode.IsPrint(r) {
- w.Write(b[i : i+size])
- } else {
- fmt.Fprintf(w, "\\u%04X", r)
- }
- i += size - 1
- }
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexFunc(s, jsIsSpecial) < 0 {
- return s
- }
- var b bytes.Buffer
- JSEscape(&b, []byte(s))
- return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
- switch r {
- case '\\', '\'', '"', '<', '>', '&', '=':
- return true
- }
- return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...any) string {
- return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...any) string {
- return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-// fmt.Sprint(args...)
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []any) string {
- ok := false
- var s string
- // Fast path for simple common case.
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- for i, arg := range args {
- a, ok := printableValue(reflect.ValueOf(arg))
- if ok {
- args[i] = a
- } // else let fmt do its thing
- }
- s = fmt.Sprint(args...)
- }
- return s
-}
diff --git a/contrib/go/_std_1.18/src/text/template/helper.go b/contrib/go/_std_1.18/src/text/template/helper.go
deleted file mode 100644
index 57905e613a..0000000000
--- a/contrib/go/_std_1.18/src/text/template/helper.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates easier.
-
-package template
-
-import (
- "fmt"
- "io/fs"
- "os"
- "path"
- "path/filepath"
-)
-
-// Functions and methods to parse templates.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the base name and
-// parsed contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
-// named "foo", while "a/foo" is unavailable.
-func ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(nil, readFileOS, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-// Since the templates created by ParseFiles are named by the base
-// names of the argument files, t should usually have the name of one
-// of the (base) names of the files. If it does not, depending on t's
-// contents before calling ParseFiles, t.Execute may fail. In that
-// case use t.ExecuteTemplate to execute a valid template.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
- t.init()
- return parseFiles(t, readFileOS, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
- if len(filenames) == 0 {
- // Not really a problem, but be consistent.
- return nil, fmt.Errorf("template: no files named in call to ParseFiles")
- }
- for _, filename := range filenames {
- name, b, err := readFile(filename)
- if err != nil {
- return nil, err
- }
- s := string(b)
- // First template becomes return value if not already defined,
- // and we use that one for subsequent New calls to associate
- // all the templates together. Also, if this file has the same name
- // as t, this file becomes the contents of t, so
- // t, err := New(name).Funcs(xxx).ParseFiles(name)
- // works. Otherwise we create a new template associated with t.
- var tmpl *Template
- if t == nil {
- t = New(name)
- }
- if name == t.Name() {
- tmpl = t
- } else {
- tmpl = t.New(name)
- }
- _, err = tmpl.Parse(s)
- if err != nil {
- return nil, err
- }
- }
- return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from
-// the files identified by the pattern. The files are matched according to the
-// semantics of filepath.Match, and the pattern must match at least one file.
-// The returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func ParseGlob(pattern string) (*Template, error) {
- return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The files are matched
-// according to the semantics of filepath.Match, and the pattern must match at
-// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
-// list of files matched by the pattern.
-//
-// When parsing multiple files with the same name in different directories,
-// the last one mentioned will be the one that results.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
- t.init()
- return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
- }
- return parseFiles(t, readFileOS, filenames...)
-}
-
-// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
-// instead of the host operating system's file system.
-// It accepts a list of glob patterns.
-// (Note that most file names serve as glob patterns matching only themselves.)
-func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
- return parseFS(nil, fsys, patterns)
-}
-
-// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
-// instead of the host operating system's file system.
-// It accepts a list of glob patterns.
-// (Note that most file names serve as glob patterns matching only themselves.)
-func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
- t.init()
- return parseFS(t, fsys, patterns)
-}
-
-func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
- var filenames []string
- for _, pattern := range patterns {
- list, err := fs.Glob(fsys, pattern)
- if err != nil {
- return nil, err
- }
- if len(list) == 0 {
- return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
- }
- filenames = append(filenames, list...)
- }
- return parseFiles(t, readFileFS(fsys), filenames...)
-}
-
-func readFileOS(file string) (name string, b []byte, err error) {
- name = filepath.Base(file)
- b, err = os.ReadFile(file)
- return
-}
-
-func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
- return func(file string) (name string, b []byte, err error) {
- name = path.Base(file)
- b, err = fs.ReadFile(fsys, file)
- return
- }
-}
diff --git a/contrib/go/_std_1.18/src/text/template/option.go b/contrib/go/_std_1.18/src/text/template/option.go
deleted file mode 100644
index 1035afad72..0000000000
--- a/contrib/go/_std_1.18/src/text/template/option.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains the code to handle template options.
-
-package template
-
-import "strings"
-
-// missingKeyAction defines how to respond to indexing a map with a key that is not present.
-type missingKeyAction int
-
-const (
- mapInvalid missingKeyAction = iota // Return an invalid reflect.Value.
- mapZeroValue // Return the zero value for the map element.
- mapError // Error out
-)
-
-type option struct {
- missingKey missingKeyAction
-}
-
-// Option sets options for the template. Options are described by
-// strings, either a simple string or "key=value". There can be at
-// most one equals sign in an option string. If the option string
-// is unrecognized or otherwise invalid, Option panics.
-//
-// Known options:
-//
-// missingkey: Control the behavior during execution if a map is
-// indexed with a key that is not present in the map.
-// "missingkey=default" or "missingkey=invalid"
-// The default behavior: Do nothing and continue execution.
-// If printed, the result of the index operation is the string
-// "<no value>".
-// "missingkey=zero"
-// The operation returns the zero value for the map type's element.
-// "missingkey=error"
-// Execution stops immediately with an error.
-//
-func (t *Template) Option(opt ...string) *Template {
- t.init()
- for _, s := range opt {
- t.setOption(s)
- }
- return t
-}
-
-func (t *Template) setOption(opt string) {
- if opt == "" {
- panic("empty option string")
- }
- // key=value
- if key, value, ok := strings.Cut(opt, "="); ok {
- switch key {
- case "missingkey":
- switch value {
- case "invalid", "default":
- t.option.missingKey = mapInvalid
- return
- case "zero":
- t.option.missingKey = mapZeroValue
- return
- case "error":
- t.option.missingKey = mapError
- return
- }
- }
- }
- panic("unrecognized option: " + opt)
-}
diff --git a/contrib/go/_std_1.18/src/text/template/parse/lex.go b/contrib/go/_std_1.18/src/text/template/parse/lex.go
deleted file mode 100644
index 40d0411121..0000000000
--- a/contrib/go/_std_1.18/src/text/template/parse/lex.go
+++ /dev/null
@@ -1,682 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
- typ itemType // The type of this item.
- pos Pos // The starting position, in bytes, of this item in the input string.
- val string // The value of this item.
- line int // The line number at the start of this item.
-}
-
-func (i item) String() string {
- switch {
- case i.typ == itemEOF:
- return "EOF"
- case i.typ == itemError:
- return i.val
- case i.typ > itemKeyword:
- return fmt.Sprintf("<%s>", i.val)
- case len(i.val) > 10:
- return fmt.Sprintf("%.10q...", i.val)
- }
- return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
- itemError itemType = iota // error occurred; value is text of error
- itemBool // boolean constant
- itemChar // printable ASCII character; grab bag for comma etc.
- itemCharConstant // character constant
- itemComment // comment text
- itemComplex // complex constant (1+2i); imaginary is just a number
- itemAssign // equals ('=') introducing an assignment
- itemDeclare // colon-equals (':=') introducing a declaration
- itemEOF
- itemField // alphanumeric identifier starting with '.'
- itemIdentifier // alphanumeric identifier not starting with '.'
- itemLeftDelim // left action delimiter
- itemLeftParen // '(' inside action
- itemNumber // simple number, including imaginary
- itemPipe // pipe symbol
- itemRawString // raw quoted string (includes quotes)
- itemRightDelim // right action delimiter
- itemRightParen // ')' inside action
- itemSpace // run of spaces separating arguments
- itemString // quoted string (includes quotes)
- itemText // plain text
- itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
- // Keywords appear after all the rest.
- itemKeyword // used only to delimit the keywords
- itemBlock // block keyword
- itemBreak // break keyword
- itemContinue // continue keyword
- itemDot // the cursor, spelled '.'
- itemDefine // define keyword
- itemElse // else keyword
- itemEnd // end keyword
- itemIf // if keyword
- itemNil // the untyped nil constant, easiest to treat as a keyword
- itemRange // range keyword
- itemTemplate // template keyword
- itemWith // with keyword
-)
-
-var key = map[string]itemType{
- ".": itemDot,
- "block": itemBlock,
- "break": itemBreak,
- "continue": itemContinue,
- "define": itemDefine,
- "else": itemElse,
- "end": itemEnd,
- "if": itemIf,
- "range": itemRange,
- "nil": itemNil,
- "template": itemTemplate,
- "with": itemWith,
-}
-
-const eof = -1
-
-// Trimming spaces.
-// If the action begins "{{- " rather than "{{", then all space/tab/newlines
-// preceding the action are trimmed; conversely if it ends " -}}" the
-// leading spaces are trimmed. This is done entirely in the lexer; the
-// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
-// to be present to avoid ambiguity with things like "{{-3}}". It reads
-// better with the space present anyway. For simplicity, only ASCII
-// does the job.
-const (
- spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
- trimMarker = '-' // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
- trimMarkerLen = Pos(1 + 1) // marker plus space before or after
-)
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
- name string // the name of the input; used only for error reports
- input string // the string being scanned
- leftDelim string // start of action
- rightDelim string // end of action
- emitComment bool // emit itemComment tokens.
- pos Pos // current position in the input
- start Pos // start position of this item
- width Pos // width of last rune read from input
- items chan item // channel of scanned items
- parenDepth int // nesting depth of ( ) exprs
- line int // 1+number of newlines seen
- startLine int // start line of this item
- breakOK bool // break keyword allowed
- continueOK bool // continue keyword allowed
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
- if int(l.pos) >= len(l.input) {
- l.width = 0
- return eof
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = Pos(w)
- l.pos += l.width
- if r == '\n' {
- l.line++
- }
- return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
- l.pos -= l.width
- // Correct newline count.
- if l.width == 1 && l.input[l.pos] == '\n' {
- l.line--
- }
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine}
- l.start = l.pos
- l.startLine = l.line
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
- l.line += strings.Count(l.input[l.start:l.pos], "\n")
- l.start = l.pos
- l.startLine = l.line
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
- if strings.ContainsRune(valid, l.next()) {
- return true
- }
- l.backup()
- return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
- for strings.ContainsRune(valid, l.next()) {
- }
- l.backup()
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...any) stateFn {
- l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine}
- return nil
-}
-
-// nextItem returns the next item from the input.
-// Called by the parser, not in the lexing goroutine.
-func (l *lexer) nextItem() item {
- return <-l.items
-}
-
-// drain drains the output so the lexing goroutine will exit.
-// Called by the parser, not in the lexing goroutine.
-func (l *lexer) drain() {
- for range l.items {
- }
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string, emitComment bool) *lexer {
- if left == "" {
- left = leftDelim
- }
- if right == "" {
- right = rightDelim
- }
- l := &lexer{
- name: name,
- input: input,
- leftDelim: left,
- rightDelim: right,
- emitComment: emitComment,
- items: make(chan item),
- line: 1,
- startLine: 1,
- }
- go l.run()
- return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
- for state := lexText; state != nil; {
- state = state(l)
- }
- close(l.items)
-}
-
-// state functions
-
-const (
- leftDelim = "{{"
- rightDelim = "}}"
- leftComment = "/*"
- rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
- l.width = 0
- if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
- ldn := Pos(len(l.leftDelim))
- l.pos += Pos(x)
- trimLength := Pos(0)
- if hasLeftTrimMarker(l.input[l.pos+ldn:]) {
- trimLength = rightTrimLength(l.input[l.start:l.pos])
- }
- l.pos -= trimLength
- if l.pos > l.start {
- l.line += strings.Count(l.input[l.start:l.pos], "\n")
- l.emit(itemText)
- }
- l.pos += trimLength
- l.ignore()
- return lexLeftDelim
- }
- l.pos = Pos(len(l.input))
- // Correctly reached EOF.
- if l.pos > l.start {
- l.line += strings.Count(l.input[l.start:l.pos], "\n")
- l.emit(itemText)
- }
- l.emit(itemEOF)
- return nil
-}
-
-// rightTrimLength returns the length of the spaces at the end of the string.
-func rightTrimLength(s string) Pos {
- return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
-}
-
-// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
-func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
- if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
- return true, true
- }
- if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
- return true, false
- }
- return false, false
-}
-
-// leftTrimLength returns the length of the spaces at the beginning of the string.
-func leftTrimLength(s string) Pos {
- return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
-func lexLeftDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.leftDelim))
- trimSpace := hasLeftTrimMarker(l.input[l.pos:])
- afterMarker := Pos(0)
- if trimSpace {
- afterMarker = trimMarkerLen
- }
- if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
- l.pos += afterMarker
- l.ignore()
- return lexComment
- }
- l.emit(itemLeftDelim)
- l.pos += afterMarker
- l.ignore()
- l.parenDepth = 0
- return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
- l.pos += Pos(len(leftComment))
- i := strings.Index(l.input[l.pos:], rightComment)
- if i < 0 {
- return l.errorf("unclosed comment")
- }
- l.pos += Pos(i + len(rightComment))
- delim, trimSpace := l.atRightDelim()
- if !delim {
- return l.errorf("comment ends before closing delimiter")
- }
- if l.emitComment {
- l.emit(itemComment)
- }
- if trimSpace {
- l.pos += trimMarkerLen
- }
- l.pos += Pos(len(l.rightDelim))
- if trimSpace {
- l.pos += leftTrimLength(l.input[l.pos:])
- }
- l.ignore()
- return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
-func lexRightDelim(l *lexer) stateFn {
- trimSpace := hasRightTrimMarker(l.input[l.pos:])
- if trimSpace {
- l.pos += trimMarkerLen
- l.ignore()
- }
- l.pos += Pos(len(l.rightDelim))
- l.emit(itemRightDelim)
- if trimSpace {
- l.pos += leftTrimLength(l.input[l.pos:])
- l.ignore()
- }
- return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
- // Either number, quoted string, or identifier.
- // Spaces separate arguments; runs of spaces turn into itemSpace.
- // Pipe symbols separate and are emitted.
- delim, _ := l.atRightDelim()
- if delim {
- if l.parenDepth == 0 {
- return lexRightDelim
- }
- return l.errorf("unclosed left paren")
- }
- switch r := l.next(); {
- case r == eof:
- return l.errorf("unclosed action")
- case isSpace(r):
- l.backup() // Put space back in case we have " -}}".
- return lexSpace
- case r == '=':
- l.emit(itemAssign)
- case r == ':':
- if l.next() != '=' {
- return l.errorf("expected :=")
- }
- l.emit(itemDeclare)
- case r == '|':
- l.emit(itemPipe)
- case r == '"':
- return lexQuote
- case r == '`':
- return lexRawQuote
- case r == '$':
- return lexVariable
- case r == '\'':
- return lexChar
- case r == '.':
- // special look-ahead for ".field" so we don't break l.backup().
- if l.pos < Pos(len(l.input)) {
- r := l.input[l.pos]
- if r < '0' || '9' < r {
- return lexField
- }
- }
- fallthrough // '.' can start a number.
- case r == '+' || r == '-' || ('0' <= r && r <= '9'):
- l.backup()
- return lexNumber
- case isAlphaNumeric(r):
- l.backup()
- return lexIdentifier
- case r == '(':
- l.emit(itemLeftParen)
- l.parenDepth++
- case r == ')':
- l.emit(itemRightParen)
- l.parenDepth--
- if l.parenDepth < 0 {
- return l.errorf("unexpected right paren %#U", r)
- }
- case r <= unicode.MaxASCII && unicode.IsPrint(r):
- l.emit(itemChar)
- default:
- return l.errorf("unrecognized character in action: %#U", r)
- }
- return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// We have not consumed the first space, which is known to be present.
-// Take care if there is a trim-marked right delimiter, which starts with a space.
-func lexSpace(l *lexer) stateFn {
- var r rune
- var numSpaces int
- for {
- r = l.peek()
- if !isSpace(r) {
- break
- }
- l.next()
- numSpaces++
- }
- // Be careful about a trim-marked closing delimiter, which has a minus
- // after a space. We know there is a space, so check for the '-' that might follow.
- if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
- l.backup() // Before the space.
- if numSpaces == 1 {
- return lexRightDelim // On the delim, so go right to that.
- }
- }
- l.emit(itemSpace)
- return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
- for {
- switch r := l.next(); {
- case isAlphaNumeric(r):
- // absorb.
- default:
- l.backup()
- word := l.input[l.start:l.pos]
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- switch {
- case key[word] > itemKeyword:
- item := key[word]
- if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
- l.emit(itemIdentifier)
- } else {
- l.emit(item)
- }
- case word[0] == '.':
- l.emit(itemField)
- case word == "true", word == "false":
- l.emit(itemBool)
- default:
- l.emit(itemIdentifier)
- }
- break Loop
- }
- }
- return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
- return lexFieldOrVariable(l, itemField)
-}
-
-// lexVariable scans a Variable: $Alphanumeric.
-// The $ has been scanned.
-func lexVariable(l *lexer) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "$".
- l.emit(itemVariable)
- return lexInsideAction
- }
- return lexFieldOrVariable(l, itemVariable)
-}
-
-// lexVariable scans a field or variable: [.$]Alphanumeric.
-// The . or $ has been scanned.
-func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "." or "$".
- if typ == itemVariable {
- l.emit(itemVariable)
- } else {
- l.emit(itemDot)
- }
- return lexInsideAction
- }
- var r rune
- for {
- r = l.next()
- if !isAlphaNumeric(r) {
- l.backup()
- break
- }
- }
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- l.emit(typ)
- return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
- r := l.peek()
- if isSpace(r) {
- return true
- }
- switch r {
- case eof, '.', ',', '|', ':', ')', '(':
- return true
- }
- // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
- // succeed but should fail) but only in extremely rare cases caused by willfully
- // bad choice of delimiter.
- if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
- return true
- }
- return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated character constant")
- case '\'':
- break Loop
- }
- }
- l.emit(itemCharConstant)
- return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
- if !l.scanNumber() {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- if sign := l.peek(); sign == '+' || sign == '-' {
- // Complex: 1+2i. No spaces, must end in 'i'.
- if !l.scanNumber() || l.input[l.pos-1] != 'i' {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(itemComplex)
- } else {
- l.emit(itemNumber)
- }
- return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
- // Optional leading sign.
- l.accept("+-")
- // Is it hex?
- digits := "0123456789_"
- if l.accept("0") {
- // Note: Leading 0 does not mean octal in floats.
- if l.accept("xX") {
- digits = "0123456789abcdefABCDEF_"
- } else if l.accept("oO") {
- digits = "01234567_"
- } else if l.accept("bB") {
- digits = "01_"
- }
- }
- l.acceptRun(digits)
- if l.accept(".") {
- l.acceptRun(digits)
- }
- if len(digits) == 10+1 && l.accept("eE") {
- l.accept("+-")
- l.acceptRun("0123456789_")
- }
- if len(digits) == 16+6+1 && l.accept("pP") {
- l.accept("+-")
- l.acceptRun("0123456789_")
- }
- // Is it imaginary?
- l.accept("i")
- // Next thing mustn't be alphanumeric.
- if isAlphaNumeric(l.peek()) {
- l.next()
- return false
- }
- return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated quoted string")
- case '"':
- break Loop
- }
- }
- l.emit(itemString)
- return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case eof:
- return l.errorf("unterminated raw quoted string")
- case '`':
- break Loop
- }
- }
- l.emit(itemRawString)
- return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
- return r == ' ' || r == '\t' || r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
-
-func hasLeftTrimMarker(s string) bool {
- return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
-}
-
-func hasRightTrimMarker(s string) bool {
- return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
-}
diff --git a/contrib/go/_std_1.18/src/text/template/parse/parse.go b/contrib/go/_std_1.18/src/text/template/parse/parse.go
deleted file mode 100644
index ce548b0886..0000000000
--- a/contrib/go/_std_1.18/src/text/template/parse/parse.go
+++ /dev/null
@@ -1,795 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates as defined by text/template
-// and html/template. Clients should use those packages to construct templates
-// rather than this one, which provides shared internal data structures not
-// intended for general use.
-package parse
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Tree is the representation of a single parsed template.
-type Tree struct {
- Name string // name of the template represented by the tree.
- ParseName string // name of the top-level template during parsing, for error messages.
- Root *ListNode // top-level root of the tree.
- Mode Mode // parsing mode.
- text string // text parsed to create the template (or its parent)
- // Parsing only; cleared after parse.
- funcs []map[string]any
- lex *lexer
- token [3]item // three-token lookahead for parser.
- peekCount int
- vars []string // variables defined at the moment.
- treeSet map[string]*Tree
- actionLine int // line of left delim starting action
- rangeDepth int
-}
-
-// A mode value is a set of flags (or 0). Modes control parser behavior.
-type Mode uint
-
-const (
- ParseComments Mode = 1 << iota // parse comments and add them to AST
- SkipFuncCheck // do not check that functions are defined
-)
-
-// Copy returns a copy of the Tree. Any parsing state is discarded.
-func (t *Tree) Copy() *Tree {
- if t == nil {
- return nil
- }
- return &Tree{
- Name: t.Name,
- ParseName: t.ParseName,
- Root: t.Root.CopyList(),
- text: t.text,
- }
-}
-
-// Parse returns a map from template name to parse.Tree, created by parsing the
-// templates described in the argument string. The top-level template will be
-// given the specified name. If an error is encountered, parsing stops and an
-// empty map is returned with the error.
-func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error) {
- treeSet := make(map[string]*Tree)
- t := New(name)
- t.text = text
- _, err := t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
- return treeSet, err
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
- if t.peekCount > 0 {
- t.peekCount--
- } else {
- t.token[0] = t.lex.nextItem()
- }
- return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
- t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Tree) backup2(t1 item) {
- t.token[1] = t1
- t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
- t.token[1] = t1
- t.token[2] = t2
- t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
- if t.peekCount > 0 {
- return t.token[t.peekCount-1]
- }
- t.peekCount = 1
- t.token[0] = t.lex.nextItem()
- return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Tree) nextNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Tree) peekNonSpace() item {
- token := t.nextNonSpace()
- t.backup()
- return token
-}
-
-// Parsing.
-
-// New allocates a new parse tree with the given name.
-func New(name string, funcs ...map[string]any) *Tree {
- return &Tree{
- Name: name,
- funcs: funcs,
- }
-}
-
-// ErrorContext returns a textual representation of the location of the node in the input text.
-// The receiver is only used when the node does not have a pointer to the tree inside,
-// which can occur in old code.
-func (t *Tree) ErrorContext(n Node) (location, context string) {
- pos := int(n.Position())
- tree := n.tree()
- if tree == nil {
- tree = t
- }
- text := tree.text[:pos]
- byteNum := strings.LastIndex(text, "\n")
- if byteNum == -1 {
- byteNum = pos // On first line.
- } else {
- byteNum++ // After the newline.
- byteNum = pos - byteNum
- }
- lineNum := 1 + strings.Count(text, "\n")
- context = n.String()
- return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...any) {
- t.Root = nil
- format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.token[0].line, format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
- t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected {
- t.unexpected(token, context)
- }
- return token
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected1 && token.typ != expected2 {
- t.unexpected(token, context)
- }
- return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
- if token.typ == itemError {
- extra := ""
- if t.actionLine != 0 && t.actionLine != token.line {
- extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
- if strings.HasSuffix(token.val, " action") {
- extra = extra[len(" in action"):] // avoid "action in action"
- }
- }
- t.errorf("%s%s", token, extra)
- }
- t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- if t != nil {
- t.lex.drain()
- t.stopParse()
- }
- *errp = e.(error)
- }
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Tree) startParse(funcs []map[string]any, lex *lexer, treeSet map[string]*Tree) {
- t.Root = nil
- t.lex = lex
- t.vars = []string{"$"}
- t.funcs = funcs
- t.treeSet = treeSet
- lex.breakOK = !t.hasFunction("break")
- lex.continueOK = !t.hasFunction("continue")
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
- t.lex = nil
- t.vars = nil
- t.funcs = nil
- t.treeSet = nil
-}
-
-// Parse parses the template definition string to construct a representation of
-// the template for execution. If either action delimiter string is empty, the
-// default ("{{" or "}}") is used. Embedded template definitions are added to
-// the treeSet map.
-func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]any) (tree *Tree, err error) {
- defer t.recover(&err)
- t.ParseName = t.Name
- emitComment := t.Mode&ParseComments != 0
- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet)
- t.text = text
- t.parse()
- t.add()
- t.stopParse()
- return t, nil
-}
-
-// add adds tree to t.treeSet.
-func (t *Tree) add() {
- tree := t.treeSet[t.Name]
- if tree == nil || IsEmptyTree(tree.Root) {
- t.treeSet[t.Name] = t
- return
- }
- if !IsEmptyTree(t.Root) {
- t.errorf("template: multiple definition of template %q", t.Name)
- }
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
-func IsEmptyTree(n Node) bool {
- switch n := n.(type) {
- case nil:
- return true
- case *ActionNode:
- case *CommentNode:
- return true
- case *IfNode:
- case *ListNode:
- for _, node := range n.Nodes {
- if !IsEmptyTree(node) {
- return false
- }
- }
- return true
- case *RangeNode:
- case *TemplateNode:
- case *TextNode:
- return len(bytes.TrimSpace(n.Text)) == 0
- case *WithNode:
- default:
- panic("unknown node: " + n.String())
- }
- return false
-}
-
-// parse is the top-level parser for a template, essentially the same
-// as itemList except it also parses {{define}} actions.
-// It runs to EOF.
-func (t *Tree) parse() {
- t.Root = t.newList(t.peek().pos)
- for t.peek().typ != itemEOF {
- if t.peek().typ == itemLeftDelim {
- delim := t.next()
- if t.nextNonSpace().typ == itemDefine {
- newT := New("definition") // name will be updated once we know it.
- newT.text = t.text
- newT.Mode = t.Mode
- newT.ParseName = t.ParseName
- newT.startParse(t.funcs, t.lex, t.treeSet)
- newT.parseDefinition()
- continue
- }
- t.backup2(delim)
- }
- switch n := t.textOrAction(); n.Type() {
- case nodeEnd, nodeElse:
- t.errorf("unexpected %s", n)
- default:
- t.Root.append(n)
- }
- }
-}
-
-// parseDefinition parses a {{define}} ... {{end}} template definition and
-// installs the definition in t.treeSet. The "define" keyword has already
-// been scanned.
-func (t *Tree) parseDefinition() {
- const context = "define clause"
- name := t.expectOneOf(itemString, itemRawString, context)
- var err error
- t.Name, err = strconv.Unquote(name.val)
- if err != nil {
- t.error(err)
- }
- t.expect(itemRightDelim, context)
- var end Node
- t.Root, end = t.itemList()
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- t.add()
- t.stopParse()
-}
-
-// itemList:
-// textOrAction*
-// Terminates at {{end}} or {{else}}, returned separately.
-func (t *Tree) itemList() (list *ListNode, next Node) {
- list = t.newList(t.peekNonSpace().pos)
- for t.peekNonSpace().typ != itemEOF {
- n := t.textOrAction()
- switch n.Type() {
- case nodeEnd, nodeElse:
- return list, n
- }
- list.append(n)
- }
- t.errorf("unexpected EOF")
- return
-}
-
-// textOrAction:
-// text | comment | action
-func (t *Tree) textOrAction() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemText:
- return t.newText(token.pos, token.val)
- case itemLeftDelim:
- t.actionLine = token.line
- defer t.clearActionLine()
- return t.action()
- case itemComment:
- return t.newComment(token.pos, token.val)
- default:
- t.unexpected(token, "input")
- }
- return nil
-}
-
-func (t *Tree) clearActionLine() {
- t.actionLine = 0
-}
-
-// Action:
-// control
-// command ("|" command)*
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
- switch token := t.nextNonSpace(); token.typ {
- case itemBlock:
- return t.blockControl()
- case itemBreak:
- return t.breakControl(token.pos, token.line)
- case itemContinue:
- return t.continueControl(token.pos, token.line)
- case itemElse:
- return t.elseControl()
- case itemEnd:
- return t.endControl()
- case itemIf:
- return t.ifControl()
- case itemRange:
- return t.rangeControl()
- case itemTemplate:
- return t.templateControl()
- case itemWith:
- return t.withControl()
- }
- t.backup()
- token := t.peek()
- // Do not pop variables; they persist until "end".
- return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
-}
-
-// Break:
-// {{break}}
-// Break keyword is past.
-func (t *Tree) breakControl(pos Pos, line int) Node {
- if token := t.nextNonSpace(); token.typ != itemRightDelim {
- t.unexpected(token, "{{break}}")
- }
- if t.rangeDepth == 0 {
- t.errorf("{{break}} outside {{range}}")
- }
- return t.newBreak(pos, line)
-}
-
-// Continue:
-// {{continue}}
-// Continue keyword is past.
-func (t *Tree) continueControl(pos Pos, line int) Node {
- if token := t.nextNonSpace(); token.typ != itemRightDelim {
- t.unexpected(token, "{{continue}}")
- }
- if t.rangeDepth == 0 {
- t.errorf("{{continue}} outside {{range}}")
- }
- return t.newContinue(pos, line)
-}
-
-// Pipeline:
-// declarations? command ('|' command)*
-func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
- token := t.peekNonSpace()
- pipe = t.newPipeline(token.pos, token.line, nil)
- // Are there declarations or assignments?
-decls:
- if v := t.peekNonSpace(); v.typ == itemVariable {
- t.next()
- // Since space is a token, we need 3-token look-ahead here in the worst case:
- // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
- // argument variable rather than a declaration. So remember the token
- // adjacent to the variable so we can push it back if necessary.
- tokenAfterVariable := t.peek()
- next := t.peekNonSpace()
- switch {
- case next.typ == itemAssign, next.typ == itemDeclare:
- pipe.IsAssign = next.typ == itemAssign
- t.nextNonSpace()
- pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
- t.vars = append(t.vars, v.val)
- case next.typ == itemChar && next.val == ",":
- t.nextNonSpace()
- pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
- t.vars = append(t.vars, v.val)
- if context == "range" && len(pipe.Decl) < 2 {
- switch t.peekNonSpace().typ {
- case itemVariable, itemRightDelim, itemRightParen:
- // second initialized variable in a range pipeline
- goto decls
- default:
- t.errorf("range can only initialize variables")
- }
- }
- t.errorf("too many declarations in %s", context)
- case tokenAfterVariable.typ == itemSpace:
- t.backup3(v, tokenAfterVariable)
- default:
- t.backup2(v)
- }
- }
- for {
- switch token := t.nextNonSpace(); token.typ {
- case end:
- // At this point, the pipeline is complete
- t.checkPipeline(pipe, context)
- return
- case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
- itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
- t.backup()
- pipe.append(t.command())
- default:
- t.unexpected(token, context)
- }
- }
-}
-
-func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
- // Reject empty pipelines
- if len(pipe.Cmds) == 0 {
- t.errorf("missing value for %s", context)
- }
- // Only the first command of a pipeline can start with a non executable operand
- for i, c := range pipe.Cmds[1:] {
- switch c.Args[0].Type() {
- case NodeBool, NodeDot, NodeNil, NodeNumber, NodeString:
- // With A|B|C, pipeline stage 2 is B
- t.errorf("non executable command in pipeline stage %d", i+2)
- }
- }
-}
-
-func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
- defer t.popVars(len(t.vars))
- pipe = t.pipeline(context, itemRightDelim)
- if context == "range" {
- t.rangeDepth++
- }
- var next Node
- list, next = t.itemList()
- if context == "range" {
- t.rangeDepth--
- }
- switch next.Type() {
- case nodeEnd: //done
- case nodeElse:
- if allowElseIf {
- // Special case for "else if". If the "else" is followed immediately by an "if",
- // the elseControl will have left the "if" token pending. Treat
- // {{if a}}_{{else if b}}_{{end}}
- // as
- // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
- // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
- // is assumed. This technique works even for long if-else-if chains.
- // TODO: Should we allow else-if in with and range?
- if t.peek().typ == itemIf {
- t.next() // Consume the "if" token.
- elseList = t.newList(next.Position())
- elseList.append(t.ifControl())
- // Do not consume the next item - only one {{end}} required.
- break
- }
- }
- elseList, next = t.itemList()
- if next.Type() != nodeEnd {
- t.errorf("expected end; found %s", next)
- }
- }
- return pipe.Position(), pipe.Line, pipe, list, elseList
-}
-
-// If:
-// {{if pipeline}} itemList {{end}}
-// {{if pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) ifControl() Node {
- return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-// {{range pipeline}} itemList {{end}}
-// {{range pipeline}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
- r := t.newRange(t.parseControl(false, "range"))
- return r
-}
-
-// With:
-// {{with pipeline}} itemList {{end}}
-// {{with pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) withControl() Node {
- return t.newWith(t.parseControl(false, "with"))
-}
-
-// End:
-// {{end}}
-// End keyword is past.
-func (t *Tree) endControl() Node {
- return t.newEnd(t.expect(itemRightDelim, "end").pos)
-}
-
-// Else:
-// {{else}}
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
- // Special case for "else if".
- peek := t.peekNonSpace()
- if peek.typ == itemIf {
- // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
- return t.newElse(peek.pos, peek.line)
- }
- token := t.expect(itemRightDelim, "else")
- return t.newElse(token.pos, token.line)
-}
-
-// Block:
-// {{block stringValue pipeline}}
-// Block keyword is past.
-// The name must be something that can evaluate to a string.
-// The pipeline is mandatory.
-func (t *Tree) blockControl() Node {
- const context = "block clause"
-
- token := t.nextNonSpace()
- name := t.parseTemplateName(token, context)
- pipe := t.pipeline(context, itemRightDelim)
-
- block := New(name) // name will be updated once we know it.
- block.text = t.text
- block.Mode = t.Mode
- block.ParseName = t.ParseName
- block.startParse(t.funcs, t.lex, t.treeSet)
- var end Node
- block.Root, end = block.itemList()
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- block.add()
- block.stopParse()
-
- return t.newTemplate(token.pos, token.line, name, pipe)
-}
-
-// Template:
-// {{template stringValue pipeline}}
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
- const context = "template clause"
- token := t.nextNonSpace()
- name := t.parseTemplateName(token, context)
- var pipe *PipeNode
- if t.nextNonSpace().typ != itemRightDelim {
- t.backup()
- // Do not pop variables; they persist until "end".
- pipe = t.pipeline(context, itemRightDelim)
- }
- return t.newTemplate(token.pos, token.line, name, pipe)
-}
-
-func (t *Tree) parseTemplateName(token item, context string) (name string) {
- switch token.typ {
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- name = s
- default:
- t.unexpected(token, context)
- }
- return
-}
-
-// command:
-// operand (space operand)*
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
- cmd := t.newCommand(t.peekNonSpace().pos)
- for {
- t.peekNonSpace() // skip leading spaces.
- operand := t.operand()
- if operand != nil {
- cmd.append(operand)
- }
- switch token := t.next(); token.typ {
- case itemSpace:
- continue
- case itemRightDelim, itemRightParen:
- t.backup()
- case itemPipe:
- // nothing here; break loop below
- default:
- t.unexpected(token, "operand")
- }
- break
- }
- if len(cmd.Args) == 0 {
- t.errorf("empty command")
- }
- return cmd
-}
-
-// operand:
-// term .Field*
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Tree) operand() Node {
- node := t.term()
- if node == nil {
- return nil
- }
- if t.peek().typ == itemField {
- chain := t.newChain(t.peek().pos, node)
- for t.peek().typ == itemField {
- chain.Add(t.next().val)
- }
- // Compatibility with original API: If the term is of type NodeField
- // or NodeVariable, just put more fields on the original.
- // Otherwise, keep the Chain node.
- // Obvious parsing errors involving literal values are detected here.
- // More complex error cases will have to be handled at execution time.
- switch node.Type() {
- case NodeField:
- node = t.newField(chain.Position(), chain.String())
- case NodeVariable:
- node = t.newVariable(chain.Position(), chain.String())
- case NodeBool, NodeString, NodeNumber, NodeNil, NodeDot:
- t.errorf("unexpected . after term %q", node.String())
- default:
- node = chain
- }
- }
- return node
-}
-
-// term:
-// literal (number, string, nil, boolean)
-// function (identifier)
-// .
-// .Field
-// $
-// '(' pipeline ')'
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Tree) term() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemIdentifier:
- checkFunc := t.Mode&SkipFuncCheck == 0
- if checkFunc && !t.hasFunction(token.val) {
- t.errorf("function %q not defined", token.val)
- }
- return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
- case itemDot:
- return t.newDot(token.pos)
- case itemNil:
- return t.newNil(token.pos)
- case itemVariable:
- return t.useVar(token.pos, token.val)
- case itemField:
- return t.newField(token.pos, token.val)
- case itemBool:
- return t.newBool(token.pos, token.val == "true")
- case itemCharConstant, itemComplex, itemNumber:
- number, err := t.newNumber(token.pos, token.val, token.typ)
- if err != nil {
- t.error(err)
- }
- return number
- case itemLeftParen:
- return t.pipeline("parenthesized pipeline", itemRightParen)
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- return t.newString(token.pos, token.val, s)
- }
- t.backup()
- return nil
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
- for _, funcMap := range t.funcs {
- if funcMap == nil {
- continue
- }
- if funcMap[name] != nil {
- return true
- }
- }
- return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
- t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(pos Pos, name string) Node {
- v := t.newVariable(pos, name)
- for _, varName := range t.vars {
- if varName == v.Ident[0] {
- return v
- }
- }
- t.errorf("undefined variable %q", v.Ident[0])
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/time/format.go b/contrib/go/_std_1.18/src/time/format.go
deleted file mode 100644
index 33e6543289..0000000000
--- a/contrib/go/_std_1.18/src/time/format.go
+++ /dev/null
@@ -1,1608 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package time
-
-import "errors"
-
-// These are predefined layouts for use in Time.Format and time.Parse.
-// The reference time used in these layouts is the specific time stamp:
-// 01/02 03:04:05PM '06 -0700
-// (January 2, 15:04:05, 2006, in time zone seven hours west of GMT).
-// That value is recorded as the constant named Layout, listed below. As a Unix
-// time, this is 1136239445. Since MST is GMT-0700, the reference would be
-// printed by the Unix date command as:
-// Mon Jan 2 15:04:05 MST 2006
-// It is a regrettable historic error that the date uses the American convention
-// of putting the numerical month before the day.
-//
-// The example for Time.Format demonstrates the working of the layout string
-// in detail and is a good reference.
-//
-// Note that the RFC822, RFC850, and RFC1123 formats should be applied
-// only to local times. Applying them to UTC times will use "UTC" as the
-// time zone abbreviation, while strictly speaking those RFCs require the
-// use of "GMT" in that case.
-// In general RFC1123Z should be used instead of RFC1123 for servers
-// that insist on that format, and RFC3339 should be preferred for new protocols.
-// RFC3339, RFC822, RFC822Z, RFC1123, and RFC1123Z are useful for formatting;
-// when used with time.Parse they do not accept all the time formats
-// permitted by the RFCs and they do accept time formats not formally defined.
-// The RFC3339Nano format removes trailing zeros from the seconds field
-// and thus may not sort correctly once formatted.
-//
-// Most programs can use one of the defined constants as the layout passed to
-// Format or Parse. The rest of this comment can be ignored unless you are
-// creating a custom layout string.
-//
-// To define your own format, write down what the reference time would look like
-// formatted your way; see the values of constants like ANSIC, StampMicro or
-// Kitchen for examples. The model is to demonstrate what the reference time
-// looks like so that the Format and Parse methods can apply the same
-// transformation to a general time value.
-//
-// Here is a summary of the components of a layout string. Each element shows by
-// example the formatting of an element of the reference time. Only these values
-// are recognized. Text in the layout string that is not recognized as part of
-// the reference time is echoed verbatim during Format and expected to appear
-// verbatim in the input to Parse.
-//
-// Year: "2006" "06"
-// Month: "Jan" "January"
-// Textual day of the week: "Mon" "Monday"
-// Numeric day of the month: "2" "_2" "02"
-// Numeric day of the year: "__2" "002"
-// Hour: "15" "3" "03" (PM or AM)
-// Minute: "4" "04"
-// Second: "5" "05"
-// AM/PM mark: "PM"
-//
-// Numeric time zone offsets format as follows:
-// "-0700" ±hhmm
-// "-07:00" ±hh:mm
-// "-07" ±hh
-// Replacing the sign in the format with a Z triggers
-// the ISO 8601 behavior of printing Z instead of an
-// offset for the UTC zone. Thus:
-// "Z0700" Z or ±hhmm
-// "Z07:00" Z or ±hh:mm
-// "Z07" Z or ±hh
-//
-// Within the format string, the underscores in "_2" and "__2" represent spaces
-// that may be replaced by digits if the following number has multiple digits,
-// for compatibility with fixed-width Unix time formats. A leading zero represents
-// a zero-padded value.
-//
-// The formats __2 and 002 are space-padded and zero-padded
-// three-character day of year; there is no unpadded day of year format.
-//
-// A comma or decimal point followed by one or more zeros represents
-// a fractional second, printed to the given number of decimal places.
-// A comma or decimal point followed by one or more nines represents
-// a fractional second, printed to the given number of decimal places, with
-// trailing zeros removed.
-// For example "15:04:05,000" or "15:04:05.000" formats or parses with
-// millisecond precision.
-//
-// Some valid layouts are invalid time values for time.Parse, due to formats
-// such as _ for space padding and Z for zone information.
-//
-const (
- Layout = "01/02 03:04:05PM '06 -0700" // The reference time, in numerical order.
- ANSIC = "Mon Jan _2 15:04:05 2006"
- UnixDate = "Mon Jan _2 15:04:05 MST 2006"
- RubyDate = "Mon Jan 02 15:04:05 -0700 2006"
- RFC822 = "02 Jan 06 15:04 MST"
- RFC822Z = "02 Jan 06 15:04 -0700" // RFC822 with numeric zone
- RFC850 = "Monday, 02-Jan-06 15:04:05 MST"
- RFC1123 = "Mon, 02 Jan 2006 15:04:05 MST"
- RFC1123Z = "Mon, 02 Jan 2006 15:04:05 -0700" // RFC1123 with numeric zone
- RFC3339 = "2006-01-02T15:04:05Z07:00"
- RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00"
- Kitchen = "3:04PM"
- // Handy time stamps.
- Stamp = "Jan _2 15:04:05"
- StampMilli = "Jan _2 15:04:05.000"
- StampMicro = "Jan _2 15:04:05.000000"
- StampNano = "Jan _2 15:04:05.000000000"
-)
-
-const (
- _ = iota
- stdLongMonth = iota + stdNeedDate // "January"
- stdMonth // "Jan"
- stdNumMonth // "1"
- stdZeroMonth // "01"
- stdLongWeekDay // "Monday"
- stdWeekDay // "Mon"
- stdDay // "2"
- stdUnderDay // "_2"
- stdZeroDay // "02"
- stdUnderYearDay // "__2"
- stdZeroYearDay // "002"
- stdHour = iota + stdNeedClock // "15"
- stdHour12 // "3"
- stdZeroHour12 // "03"
- stdMinute // "4"
- stdZeroMinute // "04"
- stdSecond // "5"
- stdZeroSecond // "05"
- stdLongYear = iota + stdNeedDate // "2006"
- stdYear // "06"
- stdPM = iota + stdNeedClock // "PM"
- stdpm // "pm"
- stdTZ = iota // "MST"
- stdISO8601TZ // "Z0700" // prints Z for UTC
- stdISO8601SecondsTZ // "Z070000"
- stdISO8601ShortTZ // "Z07"
- stdISO8601ColonTZ // "Z07:00" // prints Z for UTC
- stdISO8601ColonSecondsTZ // "Z07:00:00"
- stdNumTZ // "-0700" // always numeric
- stdNumSecondsTz // "-070000"
- stdNumShortTZ // "-07" // always numeric
- stdNumColonTZ // "-07:00" // always numeric
- stdNumColonSecondsTZ // "-07:00:00"
- stdFracSecond0 // ".0", ".00", ... , trailing zeros included
- stdFracSecond9 // ".9", ".99", ..., trailing zeros omitted
-
- stdNeedDate = 1 << 8 // need month, day, year
- stdNeedClock = 2 << 8 // need hour, minute, second
- stdArgShift = 16 // extra argument in high bits, above low stdArgShift
- stdSeparatorShift = 28 // extra argument in high 4 bits for fractional second separators
- stdMask = 1<<stdArgShift - 1 // mask out argument
-)
-
-// std0x records the std values for "01", "02", ..., "06".
-var std0x = [...]int{stdZeroMonth, stdZeroDay, stdZeroHour12, stdZeroMinute, stdZeroSecond, stdYear}
-
-// startsWithLowerCase reports whether the string has a lower-case letter at the beginning.
-// Its purpose is to prevent matching strings like "Month" when looking for "Mon".
-func startsWithLowerCase(str string) bool {
- if len(str) == 0 {
- return false
- }
- c := str[0]
- return 'a' <= c && c <= 'z'
-}
-
-// nextStdChunk finds the first occurrence of a std string in
-// layout and returns the text before, the std string, and the text after.
-func nextStdChunk(layout string) (prefix string, std int, suffix string) {
- for i := 0; i < len(layout); i++ {
- switch c := int(layout[i]); c {
- case 'J': // January, Jan
- if len(layout) >= i+3 && layout[i:i+3] == "Jan" {
- if len(layout) >= i+7 && layout[i:i+7] == "January" {
- return layout[0:i], stdLongMonth, layout[i+7:]
- }
- if !startsWithLowerCase(layout[i+3:]) {
- return layout[0:i], stdMonth, layout[i+3:]
- }
- }
-
- case 'M': // Monday, Mon, MST
- if len(layout) >= i+3 {
- if layout[i:i+3] == "Mon" {
- if len(layout) >= i+6 && layout[i:i+6] == "Monday" {
- return layout[0:i], stdLongWeekDay, layout[i+6:]
- }
- if !startsWithLowerCase(layout[i+3:]) {
- return layout[0:i], stdWeekDay, layout[i+3:]
- }
- }
- if layout[i:i+3] == "MST" {
- return layout[0:i], stdTZ, layout[i+3:]
- }
- }
-
- case '0': // 01, 02, 03, 04, 05, 06, 002
- if len(layout) >= i+2 && '1' <= layout[i+1] && layout[i+1] <= '6' {
- return layout[0:i], std0x[layout[i+1]-'1'], layout[i+2:]
- }
- if len(layout) >= i+3 && layout[i+1] == '0' && layout[i+2] == '2' {
- return layout[0:i], stdZeroYearDay, layout[i+3:]
- }
-
- case '1': // 15, 1
- if len(layout) >= i+2 && layout[i+1] == '5' {
- return layout[0:i], stdHour, layout[i+2:]
- }
- return layout[0:i], stdNumMonth, layout[i+1:]
-
- case '2': // 2006, 2
- if len(layout) >= i+4 && layout[i:i+4] == "2006" {
- return layout[0:i], stdLongYear, layout[i+4:]
- }
- return layout[0:i], stdDay, layout[i+1:]
-
- case '_': // _2, _2006, __2
- if len(layout) >= i+2 && layout[i+1] == '2' {
- //_2006 is really a literal _, followed by stdLongYear
- if len(layout) >= i+5 && layout[i+1:i+5] == "2006" {
- return layout[0 : i+1], stdLongYear, layout[i+5:]
- }
- return layout[0:i], stdUnderDay, layout[i+2:]
- }
- if len(layout) >= i+3 && layout[i+1] == '_' && layout[i+2] == '2' {
- return layout[0:i], stdUnderYearDay, layout[i+3:]
- }
-
- case '3':
- return layout[0:i], stdHour12, layout[i+1:]
-
- case '4':
- return layout[0:i], stdMinute, layout[i+1:]
-
- case '5':
- return layout[0:i], stdSecond, layout[i+1:]
-
- case 'P': // PM
- if len(layout) >= i+2 && layout[i+1] == 'M' {
- return layout[0:i], stdPM, layout[i+2:]
- }
-
- case 'p': // pm
- if len(layout) >= i+2 && layout[i+1] == 'm' {
- return layout[0:i], stdpm, layout[i+2:]
- }
-
- case '-': // -070000, -07:00:00, -0700, -07:00, -07
- if len(layout) >= i+7 && layout[i:i+7] == "-070000" {
- return layout[0:i], stdNumSecondsTz, layout[i+7:]
- }
- if len(layout) >= i+9 && layout[i:i+9] == "-07:00:00" {
- return layout[0:i], stdNumColonSecondsTZ, layout[i+9:]
- }
- if len(layout) >= i+5 && layout[i:i+5] == "-0700" {
- return layout[0:i], stdNumTZ, layout[i+5:]
- }
- if len(layout) >= i+6 && layout[i:i+6] == "-07:00" {
- return layout[0:i], stdNumColonTZ, layout[i+6:]
- }
- if len(layout) >= i+3 && layout[i:i+3] == "-07" {
- return layout[0:i], stdNumShortTZ, layout[i+3:]
- }
-
- case 'Z': // Z070000, Z07:00:00, Z0700, Z07:00,
- if len(layout) >= i+7 && layout[i:i+7] == "Z070000" {
- return layout[0:i], stdISO8601SecondsTZ, layout[i+7:]
- }
- if len(layout) >= i+9 && layout[i:i+9] == "Z07:00:00" {
- return layout[0:i], stdISO8601ColonSecondsTZ, layout[i+9:]
- }
- if len(layout) >= i+5 && layout[i:i+5] == "Z0700" {
- return layout[0:i], stdISO8601TZ, layout[i+5:]
- }
- if len(layout) >= i+6 && layout[i:i+6] == "Z07:00" {
- return layout[0:i], stdISO8601ColonTZ, layout[i+6:]
- }
- if len(layout) >= i+3 && layout[i:i+3] == "Z07" {
- return layout[0:i], stdISO8601ShortTZ, layout[i+3:]
- }
-
- case '.', ',': // ,000, or .000, or ,999, or .999 - repeated digits for fractional seconds.
- if i+1 < len(layout) && (layout[i+1] == '0' || layout[i+1] == '9') {
- ch := layout[i+1]
- j := i + 1
- for j < len(layout) && layout[j] == ch {
- j++
- }
- // String of digits must end here - only fractional second is all digits.
- if !isDigit(layout, j) {
- code := stdFracSecond0
- if layout[i+1] == '9' {
- code = stdFracSecond9
- }
- std := stdFracSecond(code, j-(i+1), c)
- return layout[0:i], std, layout[j:]
- }
- }
- }
- }
- return layout, 0, ""
-}
-
-var longDayNames = []string{
- "Sunday",
- "Monday",
- "Tuesday",
- "Wednesday",
- "Thursday",
- "Friday",
- "Saturday",
-}
-
-var shortDayNames = []string{
- "Sun",
- "Mon",
- "Tue",
- "Wed",
- "Thu",
- "Fri",
- "Sat",
-}
-
-var shortMonthNames = []string{
- "Jan",
- "Feb",
- "Mar",
- "Apr",
- "May",
- "Jun",
- "Jul",
- "Aug",
- "Sep",
- "Oct",
- "Nov",
- "Dec",
-}
-
-var longMonthNames = []string{
- "January",
- "February",
- "March",
- "April",
- "May",
- "June",
- "July",
- "August",
- "September",
- "October",
- "November",
- "December",
-}
-
-// match reports whether s1 and s2 match ignoring case.
-// It is assumed s1 and s2 are the same length.
-func match(s1, s2 string) bool {
- for i := 0; i < len(s1); i++ {
- c1 := s1[i]
- c2 := s2[i]
- if c1 != c2 {
- // Switch to lower-case; 'a'-'A' is known to be a single bit.
- c1 |= 'a' - 'A'
- c2 |= 'a' - 'A'
- if c1 != c2 || c1 < 'a' || c1 > 'z' {
- return false
- }
- }
- }
- return true
-}
-
-func lookup(tab []string, val string) (int, string, error) {
- for i, v := range tab {
- if len(val) >= len(v) && match(val[0:len(v)], v) {
- return i, val[len(v):], nil
- }
- }
- return -1, val, errBad
-}
-
-// appendInt appends the decimal form of x to b and returns the result.
-// If the decimal form (excluding sign) is shorter than width, the result is padded with leading 0's.
-// Duplicates functionality in strconv, but avoids dependency.
-func appendInt(b []byte, x int, width int) []byte {
- u := uint(x)
- if x < 0 {
- b = append(b, '-')
- u = uint(-x)
- }
-
- // Assemble decimal in reverse order.
- var buf [20]byte
- i := len(buf)
- for u >= 10 {
- i--
- q := u / 10
- buf[i] = byte('0' + u - q*10)
- u = q
- }
- i--
- buf[i] = byte('0' + u)
-
- // Add 0-padding.
- for w := len(buf) - i; w < width; w++ {
- b = append(b, '0')
- }
-
- return append(b, buf[i:]...)
-}
-
-// Never printed, just needs to be non-nil for return by atoi.
-var atoiError = errors.New("time: invalid number")
-
-// Duplicates functionality in strconv, but avoids dependency.
-func atoi(s string) (x int, err error) {
- neg := false
- if s != "" && (s[0] == '-' || s[0] == '+') {
- neg = s[0] == '-'
- s = s[1:]
- }
- q, rem, err := leadingInt(s)
- x = int(q)
- if err != nil || rem != "" {
- return 0, atoiError
- }
- if neg {
- x = -x
- }
- return x, nil
-}
-
-// The "std" value passed to formatNano contains two packed fields: the number of
-// digits after the decimal and the separator character (period or comma).
-// These functions pack and unpack that variable.
-func stdFracSecond(code, n, c int) int {
- // Use 0xfff to make the failure case even more absurd.
- if c == '.' {
- return code | ((n & 0xfff) << stdArgShift)
- }
- return code | ((n & 0xfff) << stdArgShift) | 1<<stdSeparatorShift
-}
-
-func digitsLen(std int) int {
- return (std >> stdArgShift) & 0xfff
-}
-
-func separator(std int) byte {
- if (std >> stdSeparatorShift) == 0 {
- return '.'
- }
- return ','
-}
-
-// formatNano appends a fractional second, as nanoseconds, to b
-// and returns the result.
-func formatNano(b []byte, nanosec uint, std int) []byte {
- var (
- n = digitsLen(std)
- separator = separator(std)
- trim = std&stdMask == stdFracSecond9
- )
- u := nanosec
- var buf [9]byte
- for start := len(buf); start > 0; {
- start--
- buf[start] = byte(u%10 + '0')
- u /= 10
- }
-
- if n > 9 {
- n = 9
- }
- if trim {
- for n > 0 && buf[n-1] == '0' {
- n--
- }
- if n == 0 {
- return b
- }
- }
- b = append(b, separator)
- return append(b, buf[:n]...)
-}
-
-// String returns the time formatted using the format string
-// "2006-01-02 15:04:05.999999999 -0700 MST"
-//
-// If the time has a monotonic clock reading, the returned string
-// includes a final field "m=±<value>", where value is the monotonic
-// clock reading formatted as a decimal number of seconds.
-//
-// The returned string is meant for debugging; for a stable serialized
-// representation, use t.MarshalText, t.MarshalBinary, or t.Format
-// with an explicit format string.
-func (t Time) String() string {
- s := t.Format("2006-01-02 15:04:05.999999999 -0700 MST")
-
- // Format monotonic clock reading as m=±ddd.nnnnnnnnn.
- if t.wall&hasMonotonic != 0 {
- m2 := uint64(t.ext)
- sign := byte('+')
- if t.ext < 0 {
- sign = '-'
- m2 = -m2
- }
- m1, m2 := m2/1e9, m2%1e9
- m0, m1 := m1/1e9, m1%1e9
- buf := make([]byte, 0, 24)
- buf = append(buf, " m="...)
- buf = append(buf, sign)
- wid := 0
- if m0 != 0 {
- buf = appendInt(buf, int(m0), 0)
- wid = 9
- }
- buf = appendInt(buf, int(m1), wid)
- buf = append(buf, '.')
- buf = appendInt(buf, int(m2), 9)
- s += string(buf)
- }
- return s
-}
-
-// GoString implements fmt.GoStringer and formats t to be printed in Go source
-// code.
-func (t Time) GoString() string {
- buf := make([]byte, 0, 70)
- buf = append(buf, "time.Date("...)
- buf = appendInt(buf, t.Year(), 0)
- month := t.Month()
- if January <= month && month <= December {
- buf = append(buf, ", time."...)
- buf = append(buf, t.Month().String()...)
- } else {
- // It's difficult to construct a time.Time with a date outside the
- // standard range but we might as well try to handle the case.
- buf = appendInt(buf, int(month), 0)
- }
- buf = append(buf, ", "...)
- buf = appendInt(buf, t.Day(), 0)
- buf = append(buf, ", "...)
- buf = appendInt(buf, t.Hour(), 0)
- buf = append(buf, ", "...)
- buf = appendInt(buf, t.Minute(), 0)
- buf = append(buf, ", "...)
- buf = appendInt(buf, t.Second(), 0)
- buf = append(buf, ", "...)
- buf = appendInt(buf, t.Nanosecond(), 0)
- buf = append(buf, ", "...)
- switch loc := t.Location(); loc {
- case UTC, nil:
- buf = append(buf, "time.UTC"...)
- case Local:
- buf = append(buf, "time.Local"...)
- default:
- // there are several options for how we could display this, none of
- // which are great:
- //
- // - use Location(loc.name), which is not technically valid syntax
- // - use LoadLocation(loc.name), which will cause a syntax error when
- // embedded and also would require us to escape the string without
- // importing fmt or strconv
- // - try to use FixedZone, which would also require escaping the name
- // and would represent e.g. "America/Los_Angeles" daylight saving time
- // shifts inaccurately
- // - use the pointer format, which is no worse than you'd get with the
- // old fmt.Sprintf("%#v", t) format.
- //
- // Of these, Location(loc.name) is the least disruptive. This is an edge
- // case we hope not to hit too often.
- buf = append(buf, `time.Location(`...)
- buf = append(buf, []byte(quote(loc.name))...)
- buf = append(buf, `)`...)
- }
- buf = append(buf, ')')
- return string(buf)
-}
-
-// Format returns a textual representation of the time value formatted according
-// to the layout defined by the argument. See the documentation for the
-// constant called Layout to see how to represent the layout format.
-//
-// The executable example for Time.Format demonstrates the working
-// of the layout string in detail and is a good reference.
-func (t Time) Format(layout string) string {
- const bufSize = 64
- var b []byte
- max := len(layout) + 10
- if max < bufSize {
- var buf [bufSize]byte
- b = buf[:0]
- } else {
- b = make([]byte, 0, max)
- }
- b = t.AppendFormat(b, layout)
- return string(b)
-}
-
-// AppendFormat is like Format but appends the textual
-// representation to b and returns the extended buffer.
-func (t Time) AppendFormat(b []byte, layout string) []byte {
- var (
- name, offset, abs = t.locabs()
-
- year int = -1
- month Month
- day int
- yday int
- hour int = -1
- min int
- sec int
- )
- // Each iteration generates one std value.
- for layout != "" {
- prefix, std, suffix := nextStdChunk(layout)
- if prefix != "" {
- b = append(b, prefix...)
- }
- if std == 0 {
- break
- }
- layout = suffix
-
- // Compute year, month, day if needed.
- if year < 0 && std&stdNeedDate != 0 {
- year, month, day, yday = absDate(abs, true)
- yday++
- }
-
- // Compute hour, minute, second if needed.
- if hour < 0 && std&stdNeedClock != 0 {
- hour, min, sec = absClock(abs)
- }
-
- switch std & stdMask {
- case stdYear:
- y := year
- if y < 0 {
- y = -y
- }
- b = appendInt(b, y%100, 2)
- case stdLongYear:
- b = appendInt(b, year, 4)
- case stdMonth:
- b = append(b, month.String()[:3]...)
- case stdLongMonth:
- m := month.String()
- b = append(b, m...)
- case stdNumMonth:
- b = appendInt(b, int(month), 0)
- case stdZeroMonth:
- b = appendInt(b, int(month), 2)
- case stdWeekDay:
- b = append(b, absWeekday(abs).String()[:3]...)
- case stdLongWeekDay:
- s := absWeekday(abs).String()
- b = append(b, s...)
- case stdDay:
- b = appendInt(b, day, 0)
- case stdUnderDay:
- if day < 10 {
- b = append(b, ' ')
- }
- b = appendInt(b, day, 0)
- case stdZeroDay:
- b = appendInt(b, day, 2)
- case stdUnderYearDay:
- if yday < 100 {
- b = append(b, ' ')
- if yday < 10 {
- b = append(b, ' ')
- }
- }
- b = appendInt(b, yday, 0)
- case stdZeroYearDay:
- b = appendInt(b, yday, 3)
- case stdHour:
- b = appendInt(b, hour, 2)
- case stdHour12:
- // Noon is 12PM, midnight is 12AM.
- hr := hour % 12
- if hr == 0 {
- hr = 12
- }
- b = appendInt(b, hr, 0)
- case stdZeroHour12:
- // Noon is 12PM, midnight is 12AM.
- hr := hour % 12
- if hr == 0 {
- hr = 12
- }
- b = appendInt(b, hr, 2)
- case stdMinute:
- b = appendInt(b, min, 0)
- case stdZeroMinute:
- b = appendInt(b, min, 2)
- case stdSecond:
- b = appendInt(b, sec, 0)
- case stdZeroSecond:
- b = appendInt(b, sec, 2)
- case stdPM:
- if hour >= 12 {
- b = append(b, "PM"...)
- } else {
- b = append(b, "AM"...)
- }
- case stdpm:
- if hour >= 12 {
- b = append(b, "pm"...)
- } else {
- b = append(b, "am"...)
- }
- case stdISO8601TZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ShortTZ, stdISO8601ColonSecondsTZ, stdNumTZ, stdNumColonTZ, stdNumSecondsTz, stdNumShortTZ, stdNumColonSecondsTZ:
- // Ugly special case. We cheat and take the "Z" variants
- // to mean "the time zone as formatted for ISO 8601".
- if offset == 0 && (std == stdISO8601TZ || std == stdISO8601ColonTZ || std == stdISO8601SecondsTZ || std == stdISO8601ShortTZ || std == stdISO8601ColonSecondsTZ) {
- b = append(b, 'Z')
- break
- }
- zone := offset / 60 // convert to minutes
- absoffset := offset
- if zone < 0 {
- b = append(b, '-')
- zone = -zone
- absoffset = -absoffset
- } else {
- b = append(b, '+')
- }
- b = appendInt(b, zone/60, 2)
- if std == stdISO8601ColonTZ || std == stdNumColonTZ || std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ {
- b = append(b, ':')
- }
- if std != stdNumShortTZ && std != stdISO8601ShortTZ {
- b = appendInt(b, zone%60, 2)
- }
-
- // append seconds if appropriate
- if std == stdISO8601SecondsTZ || std == stdNumSecondsTz || std == stdNumColonSecondsTZ || std == stdISO8601ColonSecondsTZ {
- if std == stdNumColonSecondsTZ || std == stdISO8601ColonSecondsTZ {
- b = append(b, ':')
- }
- b = appendInt(b, absoffset%60, 2)
- }
-
- case stdTZ:
- if name != "" {
- b = append(b, name...)
- break
- }
- // No time zone known for this time, but we must print one.
- // Use the -0700 format.
- zone := offset / 60 // convert to minutes
- if zone < 0 {
- b = append(b, '-')
- zone = -zone
- } else {
- b = append(b, '+')
- }
- b = appendInt(b, zone/60, 2)
- b = appendInt(b, zone%60, 2)
- case stdFracSecond0, stdFracSecond9:
- b = formatNano(b, uint(t.Nanosecond()), std)
- }
- }
- return b
-}
-
-var errBad = errors.New("bad value for field") // placeholder not passed to user
-
-// ParseError describes a problem parsing a time string.
-type ParseError struct {
- Layout string
- Value string
- LayoutElem string
- ValueElem string
- Message string
-}
-
-// These are borrowed from unicode/utf8 and strconv and replicate behavior in
-// that package, since we can't take a dependency on either.
-const (
- lowerhex = "0123456789abcdef"
- runeSelf = 0x80
- runeError = '\uFFFD'
-)
-
-func quote(s string) string {
- buf := make([]byte, 1, len(s)+2) // slice will be at least len(s) + quotes
- buf[0] = '"'
- for i, c := range s {
- if c >= runeSelf || c < ' ' {
- // This means you are asking us to parse a time.Duration or
- // time.Location with unprintable or non-ASCII characters in it.
- // We don't expect to hit this case very often. We could try to
- // reproduce strconv.Quote's behavior with full fidelity but
- // given how rarely we expect to hit these edge cases, speed and
- // conciseness are better.
- var width int
- if c == runeError {
- width = 1
- if i+2 < len(s) && s[i:i+3] == string(runeError) {
- width = 3
- }
- } else {
- width = len(string(c))
- }
- for j := 0; j < width; j++ {
- buf = append(buf, `\x`...)
- buf = append(buf, lowerhex[s[i+j]>>4])
- buf = append(buf, lowerhex[s[i+j]&0xF])
- }
- } else {
- if c == '"' || c == '\\' {
- buf = append(buf, '\\')
- }
- buf = append(buf, string(c)...)
- }
- }
- buf = append(buf, '"')
- return string(buf)
-}
-
-// Error returns the string representation of a ParseError.
-func (e *ParseError) Error() string {
- if e.Message == "" {
- return "parsing time " +
- quote(e.Value) + " as " +
- quote(e.Layout) + ": cannot parse " +
- quote(e.ValueElem) + " as " +
- quote(e.LayoutElem)
- }
- return "parsing time " +
- quote(e.Value) + e.Message
-}
-
-// isDigit reports whether s[i] is in range and is a decimal digit.
-func isDigit(s string, i int) bool {
- if len(s) <= i {
- return false
- }
- c := s[i]
- return '0' <= c && c <= '9'
-}
-
-// getnum parses s[0:1] or s[0:2] (fixed forces s[0:2])
-// as a decimal integer and returns the integer and the
-// remainder of the string.
-func getnum(s string, fixed bool) (int, string, error) {
- if !isDigit(s, 0) {
- return 0, s, errBad
- }
- if !isDigit(s, 1) {
- if fixed {
- return 0, s, errBad
- }
- return int(s[0] - '0'), s[1:], nil
- }
- return int(s[0]-'0')*10 + int(s[1]-'0'), s[2:], nil
-}
-
-// getnum3 parses s[0:1], s[0:2], or s[0:3] (fixed forces s[0:3])
-// as a decimal integer and returns the integer and the remainder
-// of the string.
-func getnum3(s string, fixed bool) (int, string, error) {
- var n, i int
- for i = 0; i < 3 && isDigit(s, i); i++ {
- n = n*10 + int(s[i]-'0')
- }
- if i == 0 || fixed && i != 3 {
- return 0, s, errBad
- }
- return n, s[i:], nil
-}
-
-func cutspace(s string) string {
- for len(s) > 0 && s[0] == ' ' {
- s = s[1:]
- }
- return s
-}
-
-// skip removes the given prefix from value,
-// treating runs of space characters as equivalent.
-func skip(value, prefix string) (string, error) {
- for len(prefix) > 0 {
- if prefix[0] == ' ' {
- if len(value) > 0 && value[0] != ' ' {
- return value, errBad
- }
- prefix = cutspace(prefix)
- value = cutspace(value)
- continue
- }
- if len(value) == 0 || value[0] != prefix[0] {
- return value, errBad
- }
- prefix = prefix[1:]
- value = value[1:]
- }
- return value, nil
-}
-
-// Parse parses a formatted string and returns the time value it represents.
-// See the documentation for the constant called Layout to see how to
-// represent the format. The second argument must be parseable using
-// the format string (layout) provided as the first argument.
-//
-// The example for Time.Format demonstrates the working of the layout string
-// in detail and is a good reference.
-//
-// When parsing (only), the input may contain a fractional second
-// field immediately after the seconds field, even if the layout does not
-// signify its presence. In that case either a comma or a decimal point
-// followed by a maximal series of digits is parsed as a fractional second.
-// Fractional seconds are truncated to nanosecond precision.
-//
-// Elements omitted from the layout are assumed to be zero or, when
-// zero is impossible, one, so parsing "3:04pm" returns the time
-// corresponding to Jan 1, year 0, 15:04:00 UTC (note that because the year is
-// 0, this time is before the zero Time).
-// Years must be in the range 0000..9999. The day of the week is checked
-// for syntax but it is otherwise ignored.
-//
-// For layouts specifying the two-digit year 06, a value NN >= 69 will be treated
-// as 19NN and a value NN < 69 will be treated as 20NN.
-//
-// The remainder of this comment describes the handling of time zones.
-//
-// In the absence of a time zone indicator, Parse returns a time in UTC.
-//
-// When parsing a time with a zone offset like -0700, if the offset corresponds
-// to a time zone used by the current location (Local), then Parse uses that
-// location and zone in the returned time. Otherwise it records the time as
-// being in a fabricated location with time fixed at the given zone offset.
-//
-// When parsing a time with a zone abbreviation like MST, if the zone abbreviation
-// has a defined offset in the current location, then that offset is used.
-// The zone abbreviation "UTC" is recognized as UTC regardless of location.
-// If the zone abbreviation is unknown, Parse records the time as being
-// in a fabricated location with the given zone abbreviation and a zero offset.
-// This choice means that such a time can be parsed and reformatted with the
-// same layout losslessly, but the exact instant used in the representation will
-// differ by the actual zone offset. To avoid such problems, prefer time layouts
-// that use a numeric zone offset, or use ParseInLocation.
-func Parse(layout, value string) (Time, error) {
- return parse(layout, value, UTC, Local)
-}
-
-// ParseInLocation is like Parse but differs in two important ways.
-// First, in the absence of time zone information, Parse interprets a time as UTC;
-// ParseInLocation interprets the time as in the given location.
-// Second, when given a zone offset or abbreviation, Parse tries to match it
-// against the Local location; ParseInLocation uses the given location.
-func ParseInLocation(layout, value string, loc *Location) (Time, error) {
- return parse(layout, value, loc, loc)
-}
-
-func parse(layout, value string, defaultLocation, local *Location) (Time, error) {
- alayout, avalue := layout, value
- rangeErrString := "" // set if a value is out of range
- amSet := false // do we need to subtract 12 from the hour for midnight?
- pmSet := false // do we need to add 12 to the hour?
-
- // Time being constructed.
- var (
- year int
- month int = -1
- day int = -1
- yday int = -1
- hour int
- min int
- sec int
- nsec int
- z *Location
- zoneOffset int = -1
- zoneName string
- )
-
- // Each iteration processes one std value.
- for {
- var err error
- prefix, std, suffix := nextStdChunk(layout)
- stdstr := layout[len(prefix) : len(layout)-len(suffix)]
- value, err = skip(value, prefix)
- if err != nil {
- return Time{}, &ParseError{alayout, avalue, prefix, value, ""}
- }
- if std == 0 {
- if len(value) != 0 {
- return Time{}, &ParseError{alayout, avalue, "", value, ": extra text: " + quote(value)}
- }
- break
- }
- layout = suffix
- var p string
- switch std & stdMask {
- case stdYear:
- if len(value) < 2 {
- err = errBad
- break
- }
- hold := value
- p, value = value[0:2], value[2:]
- year, err = atoi(p)
- if err != nil {
- value = hold
- } else if year >= 69 { // Unix time starts Dec 31 1969 in some time zones
- year += 1900
- } else {
- year += 2000
- }
- case stdLongYear:
- if len(value) < 4 || !isDigit(value, 0) {
- err = errBad
- break
- }
- p, value = value[0:4], value[4:]
- year, err = atoi(p)
- case stdMonth:
- month, value, err = lookup(shortMonthNames, value)
- month++
- case stdLongMonth:
- month, value, err = lookup(longMonthNames, value)
- month++
- case stdNumMonth, stdZeroMonth:
- month, value, err = getnum(value, std == stdZeroMonth)
- if err == nil && (month <= 0 || 12 < month) {
- rangeErrString = "month"
- }
- case stdWeekDay:
- // Ignore weekday except for error checking.
- _, value, err = lookup(shortDayNames, value)
- case stdLongWeekDay:
- _, value, err = lookup(longDayNames, value)
- case stdDay, stdUnderDay, stdZeroDay:
- if std == stdUnderDay && len(value) > 0 && value[0] == ' ' {
- value = value[1:]
- }
- day, value, err = getnum(value, std == stdZeroDay)
- // Note that we allow any one- or two-digit day here.
- // The month, day, year combination is validated after we've completed parsing.
- case stdUnderYearDay, stdZeroYearDay:
- for i := 0; i < 2; i++ {
- if std == stdUnderYearDay && len(value) > 0 && value[0] == ' ' {
- value = value[1:]
- }
- }
- yday, value, err = getnum3(value, std == stdZeroYearDay)
- // Note that we allow any one-, two-, or three-digit year-day here.
- // The year-day, year combination is validated after we've completed parsing.
- case stdHour:
- hour, value, err = getnum(value, false)
- if hour < 0 || 24 <= hour {
- rangeErrString = "hour"
- }
- case stdHour12, stdZeroHour12:
- hour, value, err = getnum(value, std == stdZeroHour12)
- if hour < 0 || 12 < hour {
- rangeErrString = "hour"
- }
- case stdMinute, stdZeroMinute:
- min, value, err = getnum(value, std == stdZeroMinute)
- if min < 0 || 60 <= min {
- rangeErrString = "minute"
- }
- case stdSecond, stdZeroSecond:
- sec, value, err = getnum(value, std == stdZeroSecond)
- if sec < 0 || 60 <= sec {
- rangeErrString = "second"
- break
- }
- // Special case: do we have a fractional second but no
- // fractional second in the format?
- if len(value) >= 2 && commaOrPeriod(value[0]) && isDigit(value, 1) {
- _, std, _ = nextStdChunk(layout)
- std &= stdMask
- if std == stdFracSecond0 || std == stdFracSecond9 {
- // Fractional second in the layout; proceed normally
- break
- }
- // No fractional second in the layout but we have one in the input.
- n := 2
- for ; n < len(value) && isDigit(value, n); n++ {
- }
- nsec, rangeErrString, err = parseNanoseconds(value, n)
- value = value[n:]
- }
- case stdPM:
- if len(value) < 2 {
- err = errBad
- break
- }
- p, value = value[0:2], value[2:]
- switch p {
- case "PM":
- pmSet = true
- case "AM":
- amSet = true
- default:
- err = errBad
- }
- case stdpm:
- if len(value) < 2 {
- err = errBad
- break
- }
- p, value = value[0:2], value[2:]
- switch p {
- case "pm":
- pmSet = true
- case "am":
- amSet = true
- default:
- err = errBad
- }
- case stdISO8601TZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ShortTZ, stdISO8601ColonSecondsTZ, stdNumTZ, stdNumShortTZ, stdNumColonTZ, stdNumSecondsTz, stdNumColonSecondsTZ:
- if (std == stdISO8601TZ || std == stdISO8601ShortTZ || std == stdISO8601ColonTZ) && len(value) >= 1 && value[0] == 'Z' {
- value = value[1:]
- z = UTC
- break
- }
- var sign, hour, min, seconds string
- if std == stdISO8601ColonTZ || std == stdNumColonTZ {
- if len(value) < 6 {
- err = errBad
- break
- }
- if value[3] != ':' {
- err = errBad
- break
- }
- sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], "00", value[6:]
- } else if std == stdNumShortTZ || std == stdISO8601ShortTZ {
- if len(value) < 3 {
- err = errBad
- break
- }
- sign, hour, min, seconds, value = value[0:1], value[1:3], "00", "00", value[3:]
- } else if std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ {
- if len(value) < 9 {
- err = errBad
- break
- }
- if value[3] != ':' || value[6] != ':' {
- err = errBad
- break
- }
- sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], value[7:9], value[9:]
- } else if std == stdISO8601SecondsTZ || std == stdNumSecondsTz {
- if len(value) < 7 {
- err = errBad
- break
- }
- sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], value[5:7], value[7:]
- } else {
- if len(value) < 5 {
- err = errBad
- break
- }
- sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], "00", value[5:]
- }
- var hr, mm, ss int
- hr, err = atoi(hour)
- if err == nil {
- mm, err = atoi(min)
- }
- if err == nil {
- ss, err = atoi(seconds)
- }
- zoneOffset = (hr*60+mm)*60 + ss // offset is in seconds
- switch sign[0] {
- case '+':
- case '-':
- zoneOffset = -zoneOffset
- default:
- err = errBad
- }
- case stdTZ:
- // Does it look like a time zone?
- if len(value) >= 3 && value[0:3] == "UTC" {
- z = UTC
- value = value[3:]
- break
- }
- n, ok := parseTimeZone(value)
- if !ok {
- err = errBad
- break
- }
- zoneName, value = value[:n], value[n:]
-
- case stdFracSecond0:
- // stdFracSecond0 requires the exact number of digits as specified in
- // the layout.
- ndigit := 1 + digitsLen(std)
- if len(value) < ndigit {
- err = errBad
- break
- }
- nsec, rangeErrString, err = parseNanoseconds(value, ndigit)
- value = value[ndigit:]
-
- case stdFracSecond9:
- if len(value) < 2 || !commaOrPeriod(value[0]) || value[1] < '0' || '9' < value[1] {
- // Fractional second omitted.
- break
- }
- // Take any number of digits, even more than asked for,
- // because it is what the stdSecond case would do.
- i := 0
- for i < 9 && i+1 < len(value) && '0' <= value[i+1] && value[i+1] <= '9' {
- i++
- }
- nsec, rangeErrString, err = parseNanoseconds(value, 1+i)
- value = value[1+i:]
- }
- if rangeErrString != "" {
- return Time{}, &ParseError{alayout, avalue, stdstr, value, ": " + rangeErrString + " out of range"}
- }
- if err != nil {
- return Time{}, &ParseError{alayout, avalue, stdstr, value, ""}
- }
- }
- if pmSet && hour < 12 {
- hour += 12
- } else if amSet && hour == 12 {
- hour = 0
- }
-
- // Convert yday to day, month.
- if yday >= 0 {
- var d int
- var m int
- if isLeap(year) {
- if yday == 31+29 {
- m = int(February)
- d = 29
- } else if yday > 31+29 {
- yday--
- }
- }
- if yday < 1 || yday > 365 {
- return Time{}, &ParseError{alayout, avalue, "", value, ": day-of-year out of range"}
- }
- if m == 0 {
- m = (yday-1)/31 + 1
- if int(daysBefore[m]) < yday {
- m++
- }
- d = yday - int(daysBefore[m-1])
- }
- // If month, day already seen, yday's m, d must match.
- // Otherwise, set them from m, d.
- if month >= 0 && month != m {
- return Time{}, &ParseError{alayout, avalue, "", value, ": day-of-year does not match month"}
- }
- month = m
- if day >= 0 && day != d {
- return Time{}, &ParseError{alayout, avalue, "", value, ": day-of-year does not match day"}
- }
- day = d
- } else {
- if month < 0 {
- month = int(January)
- }
- if day < 0 {
- day = 1
- }
- }
-
- // Validate the day of the month.
- if day < 1 || day > daysIn(Month(month), year) {
- return Time{}, &ParseError{alayout, avalue, "", value, ": day out of range"}
- }
-
- if z != nil {
- return Date(year, Month(month), day, hour, min, sec, nsec, z), nil
- }
-
- if zoneOffset != -1 {
- t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
- t.addSec(-int64(zoneOffset))
-
- // Look for local zone with the given offset.
- // If that zone was in effect at the given time, use it.
- name, offset, _, _, _ := local.lookup(t.unixSec())
- if offset == zoneOffset && (zoneName == "" || name == zoneName) {
- t.setLoc(local)
- return t, nil
- }
-
- // Otherwise create fake zone to record offset.
- t.setLoc(FixedZone(zoneName, zoneOffset))
- return t, nil
- }
-
- if zoneName != "" {
- t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
- // Look for local zone with the given offset.
- // If that zone was in effect at the given time, use it.
- offset, ok := local.lookupName(zoneName, t.unixSec())
- if ok {
- t.addSec(-int64(offset))
- t.setLoc(local)
- return t, nil
- }
-
- // Otherwise, create fake zone with unknown offset.
- if len(zoneName) > 3 && zoneName[:3] == "GMT" {
- offset, _ = atoi(zoneName[3:]) // Guaranteed OK by parseGMT.
- offset *= 3600
- }
- t.setLoc(FixedZone(zoneName, offset))
- return t, nil
- }
-
- // Otherwise, fall back to default.
- return Date(year, Month(month), day, hour, min, sec, nsec, defaultLocation), nil
-}
-
-// parseTimeZone parses a time zone string and returns its length. Time zones
-// are human-generated and unpredictable. We can't do precise error checking.
-// On the other hand, for a correct parse there must be a time zone at the
-// beginning of the string, so it's almost always true that there's one
-// there. We look at the beginning of the string for a run of upper-case letters.
-// If there are more than 5, it's an error.
-// If there are 4 or 5 and the last is a T, it's a time zone.
-// If there are 3, it's a time zone.
-// Otherwise, other than special cases, it's not a time zone.
-// GMT is special because it can have an hour offset.
-func parseTimeZone(value string) (length int, ok bool) {
- if len(value) < 3 {
- return 0, false
- }
- // Special case 1: ChST and MeST are the only zones with a lower-case letter.
- if len(value) >= 4 && (value[:4] == "ChST" || value[:4] == "MeST") {
- return 4, true
- }
- // Special case 2: GMT may have an hour offset; treat it specially.
- if value[:3] == "GMT" {
- length = parseGMT(value)
- return length, true
- }
- // Special Case 3: Some time zones are not named, but have +/-00 format
- if value[0] == '+' || value[0] == '-' {
- length = parseSignedOffset(value)
- ok := length > 0 // parseSignedOffset returns 0 in case of bad input
- return length, ok
- }
- // How many upper-case letters are there? Need at least three, at most five.
- var nUpper int
- for nUpper = 0; nUpper < 6; nUpper++ {
- if nUpper >= len(value) {
- break
- }
- if c := value[nUpper]; c < 'A' || 'Z' < c {
- break
- }
- }
- switch nUpper {
- case 0, 1, 2, 6:
- return 0, false
- case 5: // Must end in T to match.
- if value[4] == 'T' {
- return 5, true
- }
- case 4:
- // Must end in T, except one special case.
- if value[3] == 'T' || value[:4] == "WITA" {
- return 4, true
- }
- case 3:
- return 3, true
- }
- return 0, false
-}
-
-// parseGMT parses a GMT time zone. The input string is known to start "GMT".
-// The function checks whether that is followed by a sign and a number in the
-// range -23 through +23 excluding zero.
-func parseGMT(value string) int {
- value = value[3:]
- if len(value) == 0 {
- return 3
- }
-
- return 3 + parseSignedOffset(value)
-}
-
-// parseSignedOffset parses a signed timezone offset (e.g. "+03" or "-04").
-// The function checks for a signed number in the range -23 through +23 excluding zero.
-// Returns length of the found offset string or 0 otherwise
-func parseSignedOffset(value string) int {
- sign := value[0]
- if sign != '-' && sign != '+' {
- return 0
- }
- x, rem, err := leadingInt(value[1:])
-
- // fail if nothing consumed by leadingInt
- if err != nil || value[1:] == rem {
- return 0
- }
- if x > 23 {
- return 0
- }
- return len(value) - len(rem)
-}
-
-func commaOrPeriod(b byte) bool {
- return b == '.' || b == ','
-}
-
-func parseNanoseconds(value string, nbytes int) (ns int, rangeErrString string, err error) {
- if !commaOrPeriod(value[0]) {
- err = errBad
- return
- }
- if nbytes > 10 {
- value = value[:10]
- nbytes = 10
- }
- if ns, err = atoi(value[1:nbytes]); err != nil {
- return
- }
- if ns < 0 {
- rangeErrString = "fractional second"
- return
- }
- // We need nanoseconds, which means scaling by the number
- // of missing digits in the format, maximum length 10.
- scaleDigits := 10 - nbytes
- for i := 0; i < scaleDigits; i++ {
- ns *= 10
- }
- return
-}
-
-var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
-
-// leadingInt consumes the leading [0-9]* from s.
-func leadingInt(s string) (x uint64, rem string, err error) {
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c < '0' || c > '9' {
- break
- }
- if x > 1<<63/10 {
- // overflow
- return 0, "", errLeadingInt
- }
- x = x*10 + uint64(c) - '0'
- if x > 1<<63 {
- // overflow
- return 0, "", errLeadingInt
- }
- }
- return x, s[i:], nil
-}
-
-// leadingFraction consumes the leading [0-9]* from s.
-// It is used only for fractions, so does not return an error on overflow,
-// it just stops accumulating precision.
-func leadingFraction(s string) (x uint64, scale float64, rem string) {
- i := 0
- scale = 1
- overflow := false
- for ; i < len(s); i++ {
- c := s[i]
- if c < '0' || c > '9' {
- break
- }
- if overflow {
- continue
- }
- if x > (1<<63-1)/10 {
- // It's possible for overflow to give a positive number, so take care.
- overflow = true
- continue
- }
- y := x*10 + uint64(c) - '0'
- if y > 1<<63 {
- overflow = true
- continue
- }
- x = y
- scale *= 10
- }
- return x, scale, s[i:]
-}
-
-var unitMap = map[string]uint64{
- "ns": uint64(Nanosecond),
- "us": uint64(Microsecond),
- "µs": uint64(Microsecond), // U+00B5 = micro symbol
- "μs": uint64(Microsecond), // U+03BC = Greek letter mu
- "ms": uint64(Millisecond),
- "s": uint64(Second),
- "m": uint64(Minute),
- "h": uint64(Hour),
-}
-
-// ParseDuration parses a duration string.
-// A duration string is a possibly signed sequence of
-// decimal numbers, each with optional fraction and a unit suffix,
-// such as "300ms", "-1.5h" or "2h45m".
-// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
-func ParseDuration(s string) (Duration, error) {
- // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
- orig := s
- var d uint64
- neg := false
-
- // Consume [-+]?
- if s != "" {
- c := s[0]
- if c == '-' || c == '+' {
- neg = c == '-'
- s = s[1:]
- }
- }
- // Special case: if all that is left is "0", this is zero.
- if s == "0" {
- return 0, nil
- }
- if s == "" {
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- for s != "" {
- var (
- v, f uint64 // integers before, after decimal point
- scale float64 = 1 // value = v + f/scale
- )
-
- var err error
-
- // The next character must be [0-9.]
- if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- // Consume [0-9]*
- pl := len(s)
- v, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- pre := pl != len(s) // whether we consumed anything before a period
-
- // Consume (\.[0-9]*)?
- post := false
- if s != "" && s[0] == '.' {
- s = s[1:]
- pl := len(s)
- f, scale, s = leadingFraction(s)
- post = pl != len(s)
- }
- if !pre && !post {
- // no digits (e.g. ".s" or "-.s")
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
-
- // Consume unit.
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c == '.' || '0' <= c && c <= '9' {
- break
- }
- }
- if i == 0 {
- return 0, errors.New("time: missing unit in duration " + quote(orig))
- }
- u := s[:i]
- s = s[i:]
- unit, ok := unitMap[u]
- if !ok {
- return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
- }
- if v > 1<<63/unit {
- // overflow
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- v *= unit
- if f > 0 {
- // float64 is needed to be nanosecond accurate for fractions of hours.
- // v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
- v += uint64(float64(f) * (float64(unit) / scale))
- if v > 1<<63 {
- // overflow
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- }
- d += v
- if d > 1<<63 {
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- }
- if neg {
- return -Duration(d), nil
- }
- if d > 1<<63-1 {
- return 0, errors.New("time: invalid duration " + quote(orig))
- }
- return Duration(d), nil
-}
diff --git a/contrib/go/_std_1.18/src/time/sleep.go b/contrib/go/_std_1.18/src/time/sleep.go
deleted file mode 100644
index 1ffaabec67..0000000000
--- a/contrib/go/_std_1.18/src/time/sleep.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package time
-
-// Sleep pauses the current goroutine for at least the duration d.
-// A negative or zero duration causes Sleep to return immediately.
-func Sleep(d Duration)
-
-// Interface to timers implemented in package runtime.
-// Must be in sync with ../runtime/time.go:/^type timer
-type runtimeTimer struct {
- pp uintptr
- when int64
- period int64
- f func(any, uintptr) // NOTE: must not be closure
- arg any
- seq uintptr
- nextwhen int64
- status uint32
-}
-
-// when is a helper function for setting the 'when' field of a runtimeTimer.
-// It returns what the time will be, in nanoseconds, Duration d in the future.
-// If d is negative, it is ignored. If the returned value would be less than
-// zero because of an overflow, MaxInt64 is returned.
-func when(d Duration) int64 {
- if d <= 0 {
- return runtimeNano()
- }
- t := runtimeNano() + int64(d)
- if t < 0 {
- // N.B. runtimeNano() and d are always positive, so addition
- // (including overflow) will never result in t == 0.
- t = 1<<63 - 1 // math.MaxInt64
- }
- return t
-}
-
-func startTimer(*runtimeTimer)
-func stopTimer(*runtimeTimer) bool
-func resetTimer(*runtimeTimer, int64) bool
-func modTimer(t *runtimeTimer, when, period int64, f func(any, uintptr), arg any, seq uintptr)
-
-// The Timer type represents a single event.
-// When the Timer expires, the current time will be sent on C,
-// unless the Timer was created by AfterFunc.
-// A Timer must be created with NewTimer or AfterFunc.
-type Timer struct {
- C <-chan Time
- r runtimeTimer
-}
-
-// Stop prevents the Timer from firing.
-// It returns true if the call stops the timer, false if the timer has already
-// expired or been stopped.
-// Stop does not close the channel, to prevent a read from the channel succeeding
-// incorrectly.
-//
-// To ensure the channel is empty after a call to Stop, check the
-// return value and drain the channel.
-// For example, assuming the program has not received from t.C already:
-//
-// if !t.Stop() {
-// <-t.C
-// }
-//
-// This cannot be done concurrent to other receives from the Timer's
-// channel or other calls to the Timer's Stop method.
-//
-// For a timer created with AfterFunc(d, f), if t.Stop returns false, then the timer
-// has already expired and the function f has been started in its own goroutine;
-// Stop does not wait for f to complete before returning.
-// If the caller needs to know whether f is completed, it must coordinate
-// with f explicitly.
-func (t *Timer) Stop() bool {
- if t.r.f == nil {
- panic("time: Stop called on uninitialized Timer")
- }
- return stopTimer(&t.r)
-}
-
-// NewTimer creates a new Timer that will send
-// the current time on its channel after at least duration d.
-func NewTimer(d Duration) *Timer {
- c := make(chan Time, 1)
- t := &Timer{
- C: c,
- r: runtimeTimer{
- when: when(d),
- f: sendTime,
- arg: c,
- },
- }
- startTimer(&t.r)
- return t
-}
-
-// Reset changes the timer to expire after duration d.
-// It returns true if the timer had been active, false if the timer had
-// expired or been stopped.
-//
-// For a Timer created with NewTimer, Reset should be invoked only on
-// stopped or expired timers with drained channels.
-//
-// If a program has already received a value from t.C, the timer is known
-// to have expired and the channel drained, so t.Reset can be used directly.
-// If a program has not yet received a value from t.C, however,
-// the timer must be stopped and—if Stop reports that the timer expired
-// before being stopped—the channel explicitly drained:
-//
-// if !t.Stop() {
-// <-t.C
-// }
-// t.Reset(d)
-//
-// This should not be done concurrent to other receives from the Timer's
-// channel.
-//
-// Note that it is not possible to use Reset's return value correctly, as there
-// is a race condition between draining the channel and the new timer expiring.
-// Reset should always be invoked on stopped or expired channels, as described above.
-// The return value exists to preserve compatibility with existing programs.
-//
-// For a Timer created with AfterFunc(d, f), Reset either reschedules
-// when f will run, in which case Reset returns true, or schedules f
-// to run again, in which case it returns false.
-// When Reset returns false, Reset neither waits for the prior f to
-// complete before returning nor does it guarantee that the subsequent
-// goroutine running f does not run concurrently with the prior
-// one. If the caller needs to know whether the prior execution of
-// f is completed, it must coordinate with f explicitly.
-func (t *Timer) Reset(d Duration) bool {
- if t.r.f == nil {
- panic("time: Reset called on uninitialized Timer")
- }
- w := when(d)
- return resetTimer(&t.r, w)
-}
-
-// sendTime does a non-blocking send of the current time on c.
-func sendTime(c any, seq uintptr) {
- select {
- case c.(chan Time) <- Now():
- default:
- }
-}
-
-// After waits for the duration to elapse and then sends the current time
-// on the returned channel.
-// It is equivalent to NewTimer(d).C.
-// The underlying Timer is not recovered by the garbage collector
-// until the timer fires. If efficiency is a concern, use NewTimer
-// instead and call Timer.Stop if the timer is no longer needed.
-func After(d Duration) <-chan Time {
- return NewTimer(d).C
-}
-
-// AfterFunc waits for the duration to elapse and then calls f
-// in its own goroutine. It returns a Timer that can
-// be used to cancel the call using its Stop method.
-func AfterFunc(d Duration, f func()) *Timer {
- t := &Timer{
- r: runtimeTimer{
- when: when(d),
- f: goFunc,
- arg: f,
- },
- }
- startTimer(&t.r)
- return t
-}
-
-func goFunc(arg any, seq uintptr) {
- go arg.(func())()
-}
diff --git a/contrib/go/_std_1.18/src/time/sys_unix.go b/contrib/go/_std_1.18/src/time/sys_unix.go
deleted file mode 100644
index a949a6af22..0000000000
--- a/contrib/go/_std_1.18/src/time/sys_unix.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
-
-package time
-
-import (
- "errors"
- "syscall"
-)
-
-// for testing: whatever interrupts a sleep
-func interrupt() {
- syscall.Kill(syscall.Getpid(), syscall.SIGCHLD)
-}
-
-func open(name string) (uintptr, error) {
- fd, err := syscall.Open(name, syscall.O_RDONLY, 0)
- if err != nil {
- return 0, err
- }
- return uintptr(fd), nil
-}
-
-func read(fd uintptr, buf []byte) (int, error) {
- return syscall.Read(int(fd), buf)
-}
-
-func closefd(fd uintptr) {
- syscall.Close(int(fd))
-}
-
-func preadn(fd uintptr, buf []byte, off int) error {
- whence := seekStart
- if off < 0 {
- whence = seekEnd
- }
- if _, err := syscall.Seek(int(fd), int64(off), whence); err != nil {
- return err
- }
- for len(buf) > 0 {
- m, err := syscall.Read(int(fd), buf)
- if m <= 0 {
- if err == nil {
- return errors.New("short read")
- }
- return err
- }
- buf = buf[m:]
- }
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/time/tick.go b/contrib/go/_std_1.18/src/time/tick.go
deleted file mode 100644
index babf865aeb..0000000000
--- a/contrib/go/_std_1.18/src/time/tick.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package time
-
-import "errors"
-
-// A Ticker holds a channel that delivers ``ticks'' of a clock
-// at intervals.
-type Ticker struct {
- C <-chan Time // The channel on which the ticks are delivered.
- r runtimeTimer
-}
-
-// NewTicker returns a new Ticker containing a channel that will send
-// the current time on the channel after each tick. The period of the
-// ticks is specified by the duration argument. The ticker will adjust
-// the time interval or drop ticks to make up for slow receivers.
-// The duration d must be greater than zero; if not, NewTicker will
-// panic. Stop the ticker to release associated resources.
-func NewTicker(d Duration) *Ticker {
- if d <= 0 {
- panic(errors.New("non-positive interval for NewTicker"))
- }
- // Give the channel a 1-element time buffer.
- // If the client falls behind while reading, we drop ticks
- // on the floor until the client catches up.
- c := make(chan Time, 1)
- t := &Ticker{
- C: c,
- r: runtimeTimer{
- when: when(d),
- period: int64(d),
- f: sendTime,
- arg: c,
- },
- }
- startTimer(&t.r)
- return t
-}
-
-// Stop turns off a ticker. After Stop, no more ticks will be sent.
-// Stop does not close the channel, to prevent a concurrent goroutine
-// reading from the channel from seeing an erroneous "tick".
-func (t *Ticker) Stop() {
- stopTimer(&t.r)
-}
-
-// Reset stops a ticker and resets its period to the specified duration.
-// The next tick will arrive after the new period elapses. The duration d
-// must be greater than zero; if not, Reset will panic.
-func (t *Ticker) Reset(d Duration) {
- if d <= 0 {
- panic("non-positive interval for Ticker.Reset")
- }
- if t.r.f == nil {
- panic("time: Reset called on uninitialized Ticker")
- }
- modTimer(&t.r, when(d), int64(d), t.r.f, t.r.arg, t.r.seq)
-}
-
-// Tick is a convenience wrapper for NewTicker providing access to the ticking
-// channel only. While Tick is useful for clients that have no need to shut down
-// the Ticker, be aware that without a way to shut it down the underlying
-// Ticker cannot be recovered by the garbage collector; it "leaks".
-// Unlike NewTicker, Tick will return nil if d <= 0.
-func Tick(d Duration) <-chan Time {
- if d <= 0 {
- return nil
- }
- return NewTicker(d).C
-}
diff --git a/contrib/go/_std_1.18/src/time/time.go b/contrib/go/_std_1.18/src/time/time.go
deleted file mode 100644
index 8046ff508b..0000000000
--- a/contrib/go/_std_1.18/src/time/time.go
+++ /dev/null
@@ -1,1581 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package time provides functionality for measuring and displaying time.
-//
-// The calendrical calculations always assume a Gregorian calendar, with
-// no leap seconds.
-//
-// Monotonic Clocks
-//
-// Operating systems provide both a “wall clock,” which is subject to
-// changes for clock synchronization, and a “monotonic clock,” which is
-// not. The general rule is that the wall clock is for telling time and
-// the monotonic clock is for measuring time. Rather than split the API,
-// in this package the Time returned by time.Now contains both a wall
-// clock reading and a monotonic clock reading; later time-telling
-// operations use the wall clock reading, but later time-measuring
-// operations, specifically comparisons and subtractions, use the
-// monotonic clock reading.
-//
-// For example, this code always computes a positive elapsed time of
-// approximately 20 milliseconds, even if the wall clock is changed during
-// the operation being timed:
-//
-// start := time.Now()
-// ... operation that takes 20 milliseconds ...
-// t := time.Now()
-// elapsed := t.Sub(start)
-//
-// Other idioms, such as time.Since(start), time.Until(deadline), and
-// time.Now().Before(deadline), are similarly robust against wall clock
-// resets.
-//
-// The rest of this section gives the precise details of how operations
-// use monotonic clocks, but understanding those details is not required
-// to use this package.
-//
-// The Time returned by time.Now contains a monotonic clock reading.
-// If Time t has a monotonic clock reading, t.Add adds the same duration to
-// both the wall clock and monotonic clock readings to compute the result.
-// Because t.AddDate(y, m, d), t.Round(d), and t.Truncate(d) are wall time
-// computations, they always strip any monotonic clock reading from their results.
-// Because t.In, t.Local, and t.UTC are used for their effect on the interpretation
-// of the wall time, they also strip any monotonic clock reading from their results.
-// The canonical way to strip a monotonic clock reading is to use t = t.Round(0).
-//
-// If Times t and u both contain monotonic clock readings, the operations
-// t.After(u), t.Before(u), t.Equal(u), and t.Sub(u) are carried out
-// using the monotonic clock readings alone, ignoring the wall clock
-// readings. If either t or u contains no monotonic clock reading, these
-// operations fall back to using the wall clock readings.
-//
-// On some systems the monotonic clock will stop if the computer goes to sleep.
-// On such a system, t.Sub(u) may not accurately reflect the actual
-// time that passed between t and u.
-//
-// Because the monotonic clock reading has no meaning outside
-// the current process, the serialized forms generated by t.GobEncode,
-// t.MarshalBinary, t.MarshalJSON, and t.MarshalText omit the monotonic
-// clock reading, and t.Format provides no format for it. Similarly, the
-// constructors time.Date, time.Parse, time.ParseInLocation, and time.Unix,
-// as well as the unmarshalers t.GobDecode, t.UnmarshalBinary.
-// t.UnmarshalJSON, and t.UnmarshalText always create times with
-// no monotonic clock reading.
-//
-// Note that the Go == operator compares not just the time instant but
-// also the Location and the monotonic clock reading. See the
-// documentation for the Time type for a discussion of equality
-// testing for Time values.
-//
-// For debugging, the result of t.String does include the monotonic
-// clock reading if present. If t != u because of different monotonic clock readings,
-// that difference will be visible when printing t.String() and u.String().
-//
-package time
-
-import (
- "errors"
- _ "unsafe" // for go:linkname
-)
-
-// A Time represents an instant in time with nanosecond precision.
-//
-// Programs using times should typically store and pass them as values,
-// not pointers. That is, time variables and struct fields should be of
-// type time.Time, not *time.Time.
-//
-// A Time value can be used by multiple goroutines simultaneously except
-// that the methods GobDecode, UnmarshalBinary, UnmarshalJSON and
-// UnmarshalText are not concurrency-safe.
-//
-// Time instants can be compared using the Before, After, and Equal methods.
-// The Sub method subtracts two instants, producing a Duration.
-// The Add method adds a Time and a Duration, producing a Time.
-//
-// The zero value of type Time is January 1, year 1, 00:00:00.000000000 UTC.
-// As this time is unlikely to come up in practice, the IsZero method gives
-// a simple way of detecting a time that has not been initialized explicitly.
-//
-// Each Time has associated with it a Location, consulted when computing the
-// presentation form of the time, such as in the Format, Hour, and Year methods.
-// The methods Local, UTC, and In return a Time with a specific location.
-// Changing the location in this way changes only the presentation; it does not
-// change the instant in time being denoted and therefore does not affect the
-// computations described in earlier paragraphs.
-//
-// Representations of a Time value saved by the GobEncode, MarshalBinary,
-// MarshalJSON, and MarshalText methods store the Time.Location's offset, but not
-// the location name. They therefore lose information about Daylight Saving Time.
-//
-// In addition to the required “wall clock” reading, a Time may contain an optional
-// reading of the current process's monotonic clock, to provide additional precision
-// for comparison or subtraction.
-// See the “Monotonic Clocks” section in the package documentation for details.
-//
-// Note that the Go == operator compares not just the time instant but also the
-// Location and the monotonic clock reading. Therefore, Time values should not
-// be used as map or database keys without first guaranteeing that the
-// identical Location has been set for all values, which can be achieved
-// through use of the UTC or Local method, and that the monotonic clock reading
-// has been stripped by setting t = t.Round(0). In general, prefer t.Equal(u)
-// to t == u, since t.Equal uses the most accurate comparison available and
-// correctly handles the case when only one of its arguments has a monotonic
-// clock reading.
-//
-type Time struct {
- // wall and ext encode the wall time seconds, wall time nanoseconds,
- // and optional monotonic clock reading in nanoseconds.
- //
- // From high to low bit position, wall encodes a 1-bit flag (hasMonotonic),
- // a 33-bit seconds field, and a 30-bit wall time nanoseconds field.
- // The nanoseconds field is in the range [0, 999999999].
- // If the hasMonotonic bit is 0, then the 33-bit field must be zero
- // and the full signed 64-bit wall seconds since Jan 1 year 1 is stored in ext.
- // If the hasMonotonic bit is 1, then the 33-bit field holds a 33-bit
- // unsigned wall seconds since Jan 1 year 1885, and ext holds a
- // signed 64-bit monotonic clock reading, nanoseconds since process start.
- wall uint64
- ext int64
-
- // loc specifies the Location that should be used to
- // determine the minute, hour, month, day, and year
- // that correspond to this Time.
- // The nil location means UTC.
- // All UTC times are represented with loc==nil, never loc==&utcLoc.
- loc *Location
-}
-
-const (
- hasMonotonic = 1 << 63
- maxWall = wallToInternal + (1<<33 - 1) // year 2157
- minWall = wallToInternal // year 1885
- nsecMask = 1<<30 - 1
- nsecShift = 30
-)
-
-// These helpers for manipulating the wall and monotonic clock readings
-// take pointer receivers, even when they don't modify the time,
-// to make them cheaper to call.
-
-// nsec returns the time's nanoseconds.
-func (t *Time) nsec() int32 {
- return int32(t.wall & nsecMask)
-}
-
-// sec returns the time's seconds since Jan 1 year 1.
-func (t *Time) sec() int64 {
- if t.wall&hasMonotonic != 0 {
- return wallToInternal + int64(t.wall<<1>>(nsecShift+1))
- }
- return t.ext
-}
-
-// unixSec returns the time's seconds since Jan 1 1970 (Unix time).
-func (t *Time) unixSec() int64 { return t.sec() + internalToUnix }
-
-// addSec adds d seconds to the time.
-func (t *Time) addSec(d int64) {
- if t.wall&hasMonotonic != 0 {
- sec := int64(t.wall << 1 >> (nsecShift + 1))
- dsec := sec + d
- if 0 <= dsec && dsec <= 1<<33-1 {
- t.wall = t.wall&nsecMask | uint64(dsec)<<nsecShift | hasMonotonic
- return
- }
- // Wall second now out of range for packed field.
- // Move to ext.
- t.stripMono()
- }
-
- // Check if the sum of t.ext and d overflows and handle it properly.
- sum := t.ext + d
- if (sum > t.ext) == (d > 0) {
- t.ext = sum
- } else if d > 0 {
- t.ext = 1<<63 - 1
- } else {
- t.ext = -(1<<63 - 1)
- }
-}
-
-// setLoc sets the location associated with the time.
-func (t *Time) setLoc(loc *Location) {
- if loc == &utcLoc {
- loc = nil
- }
- t.stripMono()
- t.loc = loc
-}
-
-// stripMono strips the monotonic clock reading in t.
-func (t *Time) stripMono() {
- if t.wall&hasMonotonic != 0 {
- t.ext = t.sec()
- t.wall &= nsecMask
- }
-}
-
-// setMono sets the monotonic clock reading in t.
-// If t cannot hold a monotonic clock reading,
-// because its wall time is too large,
-// setMono is a no-op.
-func (t *Time) setMono(m int64) {
- if t.wall&hasMonotonic == 0 {
- sec := t.ext
- if sec < minWall || maxWall < sec {
- return
- }
- t.wall |= hasMonotonic | uint64(sec-minWall)<<nsecShift
- }
- t.ext = m
-}
-
-// mono returns t's monotonic clock reading.
-// It returns 0 for a missing reading.
-// This function is used only for testing,
-// so it's OK that technically 0 is a valid
-// monotonic clock reading as well.
-func (t *Time) mono() int64 {
- if t.wall&hasMonotonic == 0 {
- return 0
- }
- return t.ext
-}
-
-// After reports whether the time instant t is after u.
-func (t Time) After(u Time) bool {
- if t.wall&u.wall&hasMonotonic != 0 {
- return t.ext > u.ext
- }
- ts := t.sec()
- us := u.sec()
- return ts > us || ts == us && t.nsec() > u.nsec()
-}
-
-// Before reports whether the time instant t is before u.
-func (t Time) Before(u Time) bool {
- if t.wall&u.wall&hasMonotonic != 0 {
- return t.ext < u.ext
- }
- ts := t.sec()
- us := u.sec()
- return ts < us || ts == us && t.nsec() < u.nsec()
-}
-
-// Equal reports whether t and u represent the same time instant.
-// Two times can be equal even if they are in different locations.
-// For example, 6:00 +0200 and 4:00 UTC are Equal.
-// See the documentation on the Time type for the pitfalls of using == with
-// Time values; most code should use Equal instead.
-func (t Time) Equal(u Time) bool {
- if t.wall&u.wall&hasMonotonic != 0 {
- return t.ext == u.ext
- }
- return t.sec() == u.sec() && t.nsec() == u.nsec()
-}
-
-// A Month specifies a month of the year (January = 1, ...).
-type Month int
-
-const (
- January Month = 1 + iota
- February
- March
- April
- May
- June
- July
- August
- September
- October
- November
- December
-)
-
-// String returns the English name of the month ("January", "February", ...).
-func (m Month) String() string {
- if January <= m && m <= December {
- return longMonthNames[m-1]
- }
- buf := make([]byte, 20)
- n := fmtInt(buf, uint64(m))
- return "%!Month(" + string(buf[n:]) + ")"
-}
-
-// A Weekday specifies a day of the week (Sunday = 0, ...).
-type Weekday int
-
-const (
- Sunday Weekday = iota
- Monday
- Tuesday
- Wednesday
- Thursday
- Friday
- Saturday
-)
-
-// String returns the English name of the day ("Sunday", "Monday", ...).
-func (d Weekday) String() string {
- if Sunday <= d && d <= Saturday {
- return longDayNames[d]
- }
- buf := make([]byte, 20)
- n := fmtInt(buf, uint64(d))
- return "%!Weekday(" + string(buf[n:]) + ")"
-}
-
-// Computations on time.
-//
-// The zero value for a Time is defined to be
-// January 1, year 1, 00:00:00.000000000 UTC
-// which (1) looks like a zero, or as close as you can get in a date
-// (1-1-1 00:00:00 UTC), (2) is unlikely enough to arise in practice to
-// be a suitable "not set" sentinel, unlike Jan 1 1970, and (3) has a
-// non-negative year even in time zones west of UTC, unlike 1-1-0
-// 00:00:00 UTC, which would be 12-31-(-1) 19:00:00 in New York.
-//
-// The zero Time value does not force a specific epoch for the time
-// representation. For example, to use the Unix epoch internally, we
-// could define that to distinguish a zero value from Jan 1 1970, that
-// time would be represented by sec=-1, nsec=1e9. However, it does
-// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
-// epoch, and that's what we do.
-//
-// The Add and Sub computations are oblivious to the choice of epoch.
-//
-// The presentation computations - year, month, minute, and so on - all
-// rely heavily on division and modulus by positive constants. For
-// calendrical calculations we want these divisions to round down, even
-// for negative values, so that the remainder is always positive, but
-// Go's division (like most hardware division instructions) rounds to
-// zero. We can still do those computations and then adjust the result
-// for a negative numerator, but it's annoying to write the adjustment
-// over and over. Instead, we can change to a different epoch so long
-// ago that all the times we care about will be positive, and then round
-// to zero and round down coincide. These presentation routines already
-// have to add the zone offset, so adding the translation to the
-// alternate epoch is cheap. For example, having a non-negative time t
-// means that we can write
-//
-// sec = t % 60
-//
-// instead of
-//
-// sec = t % 60
-// if sec < 0 {
-// sec += 60
-// }
-//
-// everywhere.
-//
-// The calendar runs on an exact 400 year cycle: a 400-year calendar
-// printed for 1970-2369 will apply as well to 2370-2769. Even the days
-// of the week match up. It simplifies the computations to choose the
-// cycle boundaries so that the exceptional years are always delayed as
-// long as possible. That means choosing a year equal to 1 mod 400, so
-// that the first leap year is the 4th year, the first missed leap year
-// is the 100th year, and the missed missed leap year is the 400th year.
-// So we'd prefer instead to print a calendar for 2001-2400 and reuse it
-// for 2401-2800.
-//
-// Finally, it's convenient if the delta between the Unix epoch and
-// long-ago epoch is representable by an int64 constant.
-//
-// These three considerations—choose an epoch as early as possible, that
-// uses a year equal to 1 mod 400, and that is no more than 2⁶³ seconds
-// earlier than 1970—bring us to the year -292277022399. We refer to
-// this year as the absolute zero year, and to times measured as a uint64
-// seconds since this year as absolute times.
-//
-// Times measured as an int64 seconds since the year 1—the representation
-// used for Time's sec field—are called internal times.
-//
-// Times measured as an int64 seconds since the year 1970 are called Unix
-// times.
-//
-// It is tempting to just use the year 1 as the absolute epoch, defining
-// that the routines are only valid for years >= 1. However, the
-// routines would then be invalid when displaying the epoch in time zones
-// west of UTC, since it is year 0. It doesn't seem tenable to say that
-// printing the zero time correctly isn't supported in half the time
-// zones. By comparison, it's reasonable to mishandle some times in
-// the year -292277022399.
-//
-// All this is opaque to clients of the API and can be changed if a
-// better implementation presents itself.
-
-const (
- // The unsigned zero year for internal calculations.
- // Must be 1 mod 400, and times before it will not compute correctly,
- // but otherwise can be changed at will.
- absoluteZeroYear = -292277022399
-
- // The year of the zero Time.
- // Assumed by the unixToInternal computation below.
- internalYear = 1
-
- // Offsets to convert between internal and absolute or Unix times.
- absoluteToInternal int64 = (absoluteZeroYear - internalYear) * 365.2425 * secondsPerDay
- internalToAbsolute = -absoluteToInternal
-
- unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay
- internalToUnix int64 = -unixToInternal
-
- wallToInternal int64 = (1884*365 + 1884/4 - 1884/100 + 1884/400) * secondsPerDay
-)
-
-// IsZero reports whether t represents the zero time instant,
-// January 1, year 1, 00:00:00 UTC.
-func (t Time) IsZero() bool {
- return t.sec() == 0 && t.nsec() == 0
-}
-
-// abs returns the time t as an absolute time, adjusted by the zone offset.
-// It is called when computing a presentation property like Month or Hour.
-func (t Time) abs() uint64 {
- l := t.loc
- // Avoid function calls when possible.
- if l == nil || l == &localLoc {
- l = l.get()
- }
- sec := t.unixSec()
- if l != &utcLoc {
- if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
- sec += int64(l.cacheZone.offset)
- } else {
- _, offset, _, _, _ := l.lookup(sec)
- sec += int64(offset)
- }
- }
- return uint64(sec + (unixToInternal + internalToAbsolute))
-}
-
-// locabs is a combination of the Zone and abs methods,
-// extracting both return values from a single zone lookup.
-func (t Time) locabs() (name string, offset int, abs uint64) {
- l := t.loc
- if l == nil || l == &localLoc {
- l = l.get()
- }
- // Avoid function call if we hit the local time cache.
- sec := t.unixSec()
- if l != &utcLoc {
- if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
- name = l.cacheZone.name
- offset = l.cacheZone.offset
- } else {
- name, offset, _, _, _ = l.lookup(sec)
- }
- sec += int64(offset)
- } else {
- name = "UTC"
- }
- abs = uint64(sec + (unixToInternal + internalToAbsolute))
- return
-}
-
-// Date returns the year, month, and day in which t occurs.
-func (t Time) Date() (year int, month Month, day int) {
- year, month, day, _ = t.date(true)
- return
-}
-
-// Year returns the year in which t occurs.
-func (t Time) Year() int {
- year, _, _, _ := t.date(false)
- return year
-}
-
-// Month returns the month of the year specified by t.
-func (t Time) Month() Month {
- _, month, _, _ := t.date(true)
- return month
-}
-
-// Day returns the day of the month specified by t.
-func (t Time) Day() int {
- _, _, day, _ := t.date(true)
- return day
-}
-
-// Weekday returns the day of the week specified by t.
-func (t Time) Weekday() Weekday {
- return absWeekday(t.abs())
-}
-
-// absWeekday is like Weekday but operates on an absolute time.
-func absWeekday(abs uint64) Weekday {
- // January 1 of the absolute year, like January 1 of 2001, was a Monday.
- sec := (abs + uint64(Monday)*secondsPerDay) % secondsPerWeek
- return Weekday(int(sec) / secondsPerDay)
-}
-
-// ISOWeek returns the ISO 8601 year and week number in which t occurs.
-// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
-// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
-// of year n+1.
-func (t Time) ISOWeek() (year, week int) {
- // According to the rule that the first calendar week of a calendar year is
- // the week including the first Thursday of that year, and that the last one is
- // the week immediately preceding the first calendar week of the next calendar year.
- // See https://www.iso.org/obp/ui#iso:std:iso:8601:-1:ed-1:v1:en:term:3.1.1.23 for details.
-
- // weeks start with Monday
- // Monday Tuesday Wednesday Thursday Friday Saturday Sunday
- // 1 2 3 4 5 6 7
- // +3 +2 +1 0 -1 -2 -3
- // the offset to Thursday
- abs := t.abs()
- d := Thursday - absWeekday(abs)
- // handle Sunday
- if d == 4 {
- d = -3
- }
- // find the Thursday of the calendar week
- abs += uint64(d) * secondsPerDay
- year, _, _, yday := absDate(abs, false)
- return year, yday/7 + 1
-}
-
-// Clock returns the hour, minute, and second within the day specified by t.
-func (t Time) Clock() (hour, min, sec int) {
- return absClock(t.abs())
-}
-
-// absClock is like clock but operates on an absolute time.
-func absClock(abs uint64) (hour, min, sec int) {
- sec = int(abs % secondsPerDay)
- hour = sec / secondsPerHour
- sec -= hour * secondsPerHour
- min = sec / secondsPerMinute
- sec -= min * secondsPerMinute
- return
-}
-
-// Hour returns the hour within the day specified by t, in the range [0, 23].
-func (t Time) Hour() int {
- return int(t.abs()%secondsPerDay) / secondsPerHour
-}
-
-// Minute returns the minute offset within the hour specified by t, in the range [0, 59].
-func (t Time) Minute() int {
- return int(t.abs()%secondsPerHour) / secondsPerMinute
-}
-
-// Second returns the second offset within the minute specified by t, in the range [0, 59].
-func (t Time) Second() int {
- return int(t.abs() % secondsPerMinute)
-}
-
-// Nanosecond returns the nanosecond offset within the second specified by t,
-// in the range [0, 999999999].
-func (t Time) Nanosecond() int {
- return int(t.nsec())
-}
-
-// YearDay returns the day of the year specified by t, in the range [1,365] for non-leap years,
-// and [1,366] in leap years.
-func (t Time) YearDay() int {
- _, _, _, yday := t.date(false)
- return yday + 1
-}
-
-// A Duration represents the elapsed time between two instants
-// as an int64 nanosecond count. The representation limits the
-// largest representable duration to approximately 290 years.
-type Duration int64
-
-const (
- minDuration Duration = -1 << 63
- maxDuration Duration = 1<<63 - 1
-)
-
-// Common durations. There is no definition for units of Day or larger
-// to avoid confusion across daylight savings time zone transitions.
-//
-// To count the number of units in a Duration, divide:
-// second := time.Second
-// fmt.Print(int64(second/time.Millisecond)) // prints 1000
-//
-// To convert an integer number of units to a Duration, multiply:
-// seconds := 10
-// fmt.Print(time.Duration(seconds)*time.Second) // prints 10s
-//
-const (
- Nanosecond Duration = 1
- Microsecond = 1000 * Nanosecond
- Millisecond = 1000 * Microsecond
- Second = 1000 * Millisecond
- Minute = 60 * Second
- Hour = 60 * Minute
-)
-
-// String returns a string representing the duration in the form "72h3m0.5s".
-// Leading zero units are omitted. As a special case, durations less than one
-// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
-// that the leading digit is non-zero. The zero duration formats as 0s.
-func (d Duration) String() string {
- // Largest time is 2540400h10m10.000000000s
- var buf [32]byte
- w := len(buf)
-
- u := uint64(d)
- neg := d < 0
- if neg {
- u = -u
- }
-
- if u < uint64(Second) {
- // Special case: if duration is smaller than a second,
- // use smaller units, like 1.2ms
- var prec int
- w--
- buf[w] = 's'
- w--
- switch {
- case u == 0:
- return "0s"
- case u < uint64(Microsecond):
- // print nanoseconds
- prec = 0
- buf[w] = 'n'
- case u < uint64(Millisecond):
- // print microseconds
- prec = 3
- // U+00B5 'µ' micro sign == 0xC2 0xB5
- w-- // Need room for two bytes.
- copy(buf[w:], "µ")
- default:
- // print milliseconds
- prec = 6
- buf[w] = 'm'
- }
- w, u = fmtFrac(buf[:w], u, prec)
- w = fmtInt(buf[:w], u)
- } else {
- w--
- buf[w] = 's'
-
- w, u = fmtFrac(buf[:w], u, 9)
-
- // u is now integer seconds
- w = fmtInt(buf[:w], u%60)
- u /= 60
-
- // u is now integer minutes
- if u > 0 {
- w--
- buf[w] = 'm'
- w = fmtInt(buf[:w], u%60)
- u /= 60
-
- // u is now integer hours
- // Stop at hours because days can be different lengths.
- if u > 0 {
- w--
- buf[w] = 'h'
- w = fmtInt(buf[:w], u)
- }
- }
- }
-
- if neg {
- w--
- buf[w] = '-'
- }
-
- return string(buf[w:])
-}
-
-// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
-// tail of buf, omitting trailing zeros. It omits the decimal
-// point too when the fraction is 0. It returns the index where the
-// output bytes begin and the value v/10**prec.
-func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
- // Omit trailing zeros up to and including decimal point.
- w := len(buf)
- print := false
- for i := 0; i < prec; i++ {
- digit := v % 10
- print = print || digit != 0
- if print {
- w--
- buf[w] = byte(digit) + '0'
- }
- v /= 10
- }
- if print {
- w--
- buf[w] = '.'
- }
- return w, v
-}
-
-// fmtInt formats v into the tail of buf.
-// It returns the index where the output begins.
-func fmtInt(buf []byte, v uint64) int {
- w := len(buf)
- if v == 0 {
- w--
- buf[w] = '0'
- } else {
- for v > 0 {
- w--
- buf[w] = byte(v%10) + '0'
- v /= 10
- }
- }
- return w
-}
-
-// Nanoseconds returns the duration as an integer nanosecond count.
-func (d Duration) Nanoseconds() int64 { return int64(d) }
-
-// Microseconds returns the duration as an integer microsecond count.
-func (d Duration) Microseconds() int64 { return int64(d) / 1e3 }
-
-// Milliseconds returns the duration as an integer millisecond count.
-func (d Duration) Milliseconds() int64 { return int64(d) / 1e6 }
-
-// These methods return float64 because the dominant
-// use case is for printing a floating point number like 1.5s, and
-// a truncation to integer would make them not useful in those cases.
-// Splitting the integer and fraction ourselves guarantees that
-// converting the returned float64 to an integer rounds the same
-// way that a pure integer conversion would have, even in cases
-// where, say, float64(d.Nanoseconds())/1e9 would have rounded
-// differently.
-
-// Seconds returns the duration as a floating point number of seconds.
-func (d Duration) Seconds() float64 {
- sec := d / Second
- nsec := d % Second
- return float64(sec) + float64(nsec)/1e9
-}
-
-// Minutes returns the duration as a floating point number of minutes.
-func (d Duration) Minutes() float64 {
- min := d / Minute
- nsec := d % Minute
- return float64(min) + float64(nsec)/(60*1e9)
-}
-
-// Hours returns the duration as a floating point number of hours.
-func (d Duration) Hours() float64 {
- hour := d / Hour
- nsec := d % Hour
- return float64(hour) + float64(nsec)/(60*60*1e9)
-}
-
-// Truncate returns the result of rounding d toward zero to a multiple of m.
-// If m <= 0, Truncate returns d unchanged.
-func (d Duration) Truncate(m Duration) Duration {
- if m <= 0 {
- return d
- }
- return d - d%m
-}
-
-// lessThanHalf reports whether x+x < y but avoids overflow,
-// assuming x and y are both positive (Duration is signed).
-func lessThanHalf(x, y Duration) bool {
- return uint64(x)+uint64(x) < uint64(y)
-}
-
-// Round returns the result of rounding d to the nearest multiple of m.
-// The rounding behavior for halfway values is to round away from zero.
-// If the result exceeds the maximum (or minimum)
-// value that can be stored in a Duration,
-// Round returns the maximum (or minimum) duration.
-// If m <= 0, Round returns d unchanged.
-func (d Duration) Round(m Duration) Duration {
- if m <= 0 {
- return d
- }
- r := d % m
- if d < 0 {
- r = -r
- if lessThanHalf(r, m) {
- return d + r
- }
- if d1 := d - m + r; d1 < d {
- return d1
- }
- return minDuration // overflow
- }
- if lessThanHalf(r, m) {
- return d - r
- }
- if d1 := d + m - r; d1 > d {
- return d1
- }
- return maxDuration // overflow
-}
-
-// Add returns the time t+d.
-func (t Time) Add(d Duration) Time {
- dsec := int64(d / 1e9)
- nsec := t.nsec() + int32(d%1e9)
- if nsec >= 1e9 {
- dsec++
- nsec -= 1e9
- } else if nsec < 0 {
- dsec--
- nsec += 1e9
- }
- t.wall = t.wall&^nsecMask | uint64(nsec) // update nsec
- t.addSec(dsec)
- if t.wall&hasMonotonic != 0 {
- te := t.ext + int64(d)
- if d < 0 && te > t.ext || d > 0 && te < t.ext {
- // Monotonic clock reading now out of range; degrade to wall-only.
- t.stripMono()
- } else {
- t.ext = te
- }
- }
- return t
-}
-
-// Sub returns the duration t-u. If the result exceeds the maximum (or minimum)
-// value that can be stored in a Duration, the maximum (or minimum) duration
-// will be returned.
-// To compute t-d for a duration d, use t.Add(-d).
-func (t Time) Sub(u Time) Duration {
- if t.wall&u.wall&hasMonotonic != 0 {
- te := t.ext
- ue := u.ext
- d := Duration(te - ue)
- if d < 0 && te > ue {
- return maxDuration // t - u is positive out of range
- }
- if d > 0 && te < ue {
- return minDuration // t - u is negative out of range
- }
- return d
- }
- d := Duration(t.sec()-u.sec())*Second + Duration(t.nsec()-u.nsec())
- // Check for overflow or underflow.
- switch {
- case u.Add(d).Equal(t):
- return d // d is correct
- case t.Before(u):
- return minDuration // t - u is negative out of range
- default:
- return maxDuration // t - u is positive out of range
- }
-}
-
-// Since returns the time elapsed since t.
-// It is shorthand for time.Now().Sub(t).
-func Since(t Time) Duration {
- var now Time
- if t.wall&hasMonotonic != 0 {
- // Common case optimization: if t has monotonic time, then Sub will use only it.
- now = Time{hasMonotonic, runtimeNano() - startNano, nil}
- } else {
- now = Now()
- }
- return now.Sub(t)
-}
-
-// Until returns the duration until t.
-// It is shorthand for t.Sub(time.Now()).
-func Until(t Time) Duration {
- var now Time
- if t.wall&hasMonotonic != 0 {
- // Common case optimization: if t has monotonic time, then Sub will use only it.
- now = Time{hasMonotonic, runtimeNano() - startNano, nil}
- } else {
- now = Now()
- }
- return t.Sub(now)
-}
-
-// AddDate returns the time corresponding to adding the
-// given number of years, months, and days to t.
-// For example, AddDate(-1, 2, 3) applied to January 1, 2011
-// returns March 4, 2010.
-//
-// AddDate normalizes its result in the same way that Date does,
-// so, for example, adding one month to October 31 yields
-// December 1, the normalized form for November 31.
-func (t Time) AddDate(years int, months int, days int) Time {
- year, month, day := t.Date()
- hour, min, sec := t.Clock()
- return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec()), t.Location())
-}
-
-const (
- secondsPerMinute = 60
- secondsPerHour = 60 * secondsPerMinute
- secondsPerDay = 24 * secondsPerHour
- secondsPerWeek = 7 * secondsPerDay
- daysPer400Years = 365*400 + 97
- daysPer100Years = 365*100 + 24
- daysPer4Years = 365*4 + 1
-)
-
-// date computes the year, day of year, and when full=true,
-// the month and day in which t occurs.
-func (t Time) date(full bool) (year int, month Month, day int, yday int) {
- return absDate(t.abs(), full)
-}
-
-// absDate is like date but operates on an absolute time.
-func absDate(abs uint64, full bool) (year int, month Month, day int, yday int) {
- // Split into time and day.
- d := abs / secondsPerDay
-
- // Account for 400 year cycles.
- n := d / daysPer400Years
- y := 400 * n
- d -= daysPer400Years * n
-
- // Cut off 100-year cycles.
- // The last cycle has one extra leap year, so on the last day
- // of that year, day / daysPer100Years will be 4 instead of 3.
- // Cut it back down to 3 by subtracting n>>2.
- n = d / daysPer100Years
- n -= n >> 2
- y += 100 * n
- d -= daysPer100Years * n
-
- // Cut off 4-year cycles.
- // The last cycle has a missing leap year, which does not
- // affect the computation.
- n = d / daysPer4Years
- y += 4 * n
- d -= daysPer4Years * n
-
- // Cut off years within a 4-year cycle.
- // The last year is a leap year, so on the last day of that year,
- // day / 365 will be 4 instead of 3. Cut it back down to 3
- // by subtracting n>>2.
- n = d / 365
- n -= n >> 2
- y += n
- d -= 365 * n
-
- year = int(int64(y) + absoluteZeroYear)
- yday = int(d)
-
- if !full {
- return
- }
-
- day = yday
- if isLeap(year) {
- // Leap year
- switch {
- case day > 31+29-1:
- // After leap day; pretend it wasn't there.
- day--
- case day == 31+29-1:
- // Leap day.
- month = February
- day = 29
- return
- }
- }
-
- // Estimate month on assumption that every month has 31 days.
- // The estimate may be too low by at most one month, so adjust.
- month = Month(day / 31)
- end := int(daysBefore[month+1])
- var begin int
- if day >= end {
- month++
- begin = end
- } else {
- begin = int(daysBefore[month])
- }
-
- month++ // because January is 1
- day = day - begin + 1
- return
-}
-
-// daysBefore[m] counts the number of days in a non-leap year
-// before month m begins. There is an entry for m=12, counting
-// the number of days before January of next year (365).
-var daysBefore = [...]int32{
- 0,
- 31,
- 31 + 28,
- 31 + 28 + 31,
- 31 + 28 + 31 + 30,
- 31 + 28 + 31 + 30 + 31,
- 31 + 28 + 31 + 30 + 31 + 30,
- 31 + 28 + 31 + 30 + 31 + 30 + 31,
- 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
- 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
- 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
- 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
- 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
-}
-
-func daysIn(m Month, year int) int {
- if m == February && isLeap(year) {
- return 29
- }
- return int(daysBefore[m] - daysBefore[m-1])
-}
-
-// daysSinceEpoch takes a year and returns the number of days from
-// the absolute epoch to the start of that year.
-// This is basically (year - zeroYear) * 365, but accounting for leap days.
-func daysSinceEpoch(year int) uint64 {
- y := uint64(int64(year) - absoluteZeroYear)
-
- // Add in days from 400-year cycles.
- n := y / 400
- y -= 400 * n
- d := daysPer400Years * n
-
- // Add in 100-year cycles.
- n = y / 100
- y -= 100 * n
- d += daysPer100Years * n
-
- // Add in 4-year cycles.
- n = y / 4
- y -= 4 * n
- d += daysPer4Years * n
-
- // Add in non-leap years.
- n = y
- d += 365 * n
-
- return d
-}
-
-// Provided by package runtime.
-func now() (sec int64, nsec int32, mono int64)
-
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-//go:linkname runtimeNano runtime.nanotime
-func runtimeNano() int64
-
-// Monotonic times are reported as offsets from startNano.
-// We initialize startNano to runtimeNano() - 1 so that on systems where
-// monotonic time resolution is fairly low (e.g. Windows 2008
-// which appears to have a default resolution of 15ms),
-// we avoid ever reporting a monotonic time of 0.
-// (Callers may want to use 0 as "time not set".)
-var startNano int64 = runtimeNano() - 1
-
-// Now returns the current local time.
-func Now() Time {
- sec, nsec, mono := now()
- mono -= startNano
- sec += unixToInternal - minWall
- if uint64(sec)>>33 != 0 {
- return Time{uint64(nsec), sec + minWall, Local}
- }
- return Time{hasMonotonic | uint64(sec)<<nsecShift | uint64(nsec), mono, Local}
-}
-
-func unixTime(sec int64, nsec int32) Time {
- return Time{uint64(nsec), sec + unixToInternal, Local}
-}
-
-// UTC returns t with the location set to UTC.
-func (t Time) UTC() Time {
- t.setLoc(&utcLoc)
- return t
-}
-
-// Local returns t with the location set to local time.
-func (t Time) Local() Time {
- t.setLoc(Local)
- return t
-}
-
-// In returns a copy of t representing the same time instant, but
-// with the copy's location information set to loc for display
-// purposes.
-//
-// In panics if loc is nil.
-func (t Time) In(loc *Location) Time {
- if loc == nil {
- panic("time: missing Location in call to Time.In")
- }
- t.setLoc(loc)
- return t
-}
-
-// Location returns the time zone information associated with t.
-func (t Time) Location() *Location {
- l := t.loc
- if l == nil {
- l = UTC
- }
- return l
-}
-
-// Zone computes the time zone in effect at time t, returning the abbreviated
-// name of the zone (such as "CET") and its offset in seconds east of UTC.
-func (t Time) Zone() (name string, offset int) {
- name, offset, _, _, _ = t.loc.lookup(t.unixSec())
- return
-}
-
-// Unix returns t as a Unix time, the number of seconds elapsed
-// since January 1, 1970 UTC. The result does not depend on the
-// location associated with t.
-// Unix-like operating systems often record time as a 32-bit
-// count of seconds, but since the method here returns a 64-bit
-// value it is valid for billions of years into the past or future.
-func (t Time) Unix() int64 {
- return t.unixSec()
-}
-
-// UnixMilli returns t as a Unix time, the number of milliseconds elapsed since
-// January 1, 1970 UTC. The result is undefined if the Unix time in
-// milliseconds cannot be represented by an int64 (a date more than 292 million
-// years before or after 1970). The result does not depend on the
-// location associated with t.
-func (t Time) UnixMilli() int64 {
- return t.unixSec()*1e3 + int64(t.nsec())/1e6
-}
-
-// UnixMicro returns t as a Unix time, the number of microseconds elapsed since
-// January 1, 1970 UTC. The result is undefined if the Unix time in
-// microseconds cannot be represented by an int64 (a date before year -290307 or
-// after year 294246). The result does not depend on the location associated
-// with t.
-func (t Time) UnixMicro() int64 {
- return t.unixSec()*1e6 + int64(t.nsec())/1e3
-}
-
-// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
-// since January 1, 1970 UTC. The result is undefined if the Unix time
-// in nanoseconds cannot be represented by an int64 (a date before the year
-// 1678 or after 2262). Note that this means the result of calling UnixNano
-// on the zero Time is undefined. The result does not depend on the
-// location associated with t.
-func (t Time) UnixNano() int64 {
- return (t.unixSec())*1e9 + int64(t.nsec())
-}
-
-const (
- timeBinaryVersionV1 byte = iota + 1 // For general situation
- timeBinaryVersionV2 // For LMT only
-)
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (t Time) MarshalBinary() ([]byte, error) {
- var offsetMin int16 // minutes east of UTC. -1 is UTC.
- var offsetSec int8
- version := timeBinaryVersionV1
-
- if t.Location() == UTC {
- offsetMin = -1
- } else {
- _, offset := t.Zone()
- if offset%60 != 0 {
- version = timeBinaryVersionV2
- offsetSec = int8(offset % 60)
- }
-
- offset /= 60
- if offset < -32768 || offset == -1 || offset > 32767 {
- return nil, errors.New("Time.MarshalBinary: unexpected zone offset")
- }
- offsetMin = int16(offset)
- }
-
- sec := t.sec()
- nsec := t.nsec()
- enc := []byte{
- version, // byte 0 : version
- byte(sec >> 56), // bytes 1-8: seconds
- byte(sec >> 48),
- byte(sec >> 40),
- byte(sec >> 32),
- byte(sec >> 24),
- byte(sec >> 16),
- byte(sec >> 8),
- byte(sec),
- byte(nsec >> 24), // bytes 9-12: nanoseconds
- byte(nsec >> 16),
- byte(nsec >> 8),
- byte(nsec),
- byte(offsetMin >> 8), // bytes 13-14: zone offset in minutes
- byte(offsetMin),
- }
- if version == timeBinaryVersionV2 {
- enc = append(enc, byte(offsetSec))
- }
-
- return enc, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-func (t *Time) UnmarshalBinary(data []byte) error {
- buf := data
- if len(buf) == 0 {
- return errors.New("Time.UnmarshalBinary: no data")
- }
-
- version := buf[0]
- if version != timeBinaryVersionV1 && version != timeBinaryVersionV2 {
- return errors.New("Time.UnmarshalBinary: unsupported version")
- }
-
- wantLen := /*version*/ 1 + /*sec*/ 8 + /*nsec*/ 4 + /*zone offset*/ 2
- if version == timeBinaryVersionV2 {
- wantLen++
- }
- if len(buf) != wantLen {
- return errors.New("Time.UnmarshalBinary: invalid length")
- }
-
- buf = buf[1:]
- sec := int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
- int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56
-
- buf = buf[8:]
- nsec := int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
-
- buf = buf[4:]
- offset := int(int16(buf[1])|int16(buf[0])<<8) * 60
- if version == timeBinaryVersionV2 {
- offset += int(buf[2])
- }
-
- *t = Time{}
- t.wall = uint64(nsec)
- t.ext = sec
-
- if offset == -1*60 {
- t.setLoc(&utcLoc)
- } else if _, localoff, _, _, _ := Local.lookup(t.unixSec()); offset == localoff {
- t.setLoc(Local)
- } else {
- t.setLoc(FixedZone("", offset))
- }
-
- return nil
-}
-
-// TODO(rsc): Remove GobEncoder, GobDecoder, MarshalJSON, UnmarshalJSON in Go 2.
-// The same semantics will be provided by the generic MarshalBinary, MarshalText,
-// UnmarshalBinary, UnmarshalText.
-
-// GobEncode implements the gob.GobEncoder interface.
-func (t Time) GobEncode() ([]byte, error) {
- return t.MarshalBinary()
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (t *Time) GobDecode(data []byte) error {
- return t.UnmarshalBinary(data)
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-// The time is a quoted string in RFC 3339 format, with sub-second precision added if present.
-func (t Time) MarshalJSON() ([]byte, error) {
- if y := t.Year(); y < 0 || y >= 10000 {
- // RFC 3339 is clear that years are 4 digits exactly.
- // See golang.org/issue/4556#c15 for more discussion.
- return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
- }
-
- b := make([]byte, 0, len(RFC3339Nano)+2)
- b = append(b, '"')
- b = t.AppendFormat(b, RFC3339Nano)
- b = append(b, '"')
- return b, nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-// The time is expected to be a quoted string in RFC 3339 format.
-func (t *Time) UnmarshalJSON(data []byte) error {
- // Ignore null, like in the main JSON package.
- if string(data) == "null" {
- return nil
- }
- // Fractional seconds are handled implicitly by Parse.
- var err error
- *t, err = Parse(`"`+RFC3339+`"`, string(data))
- return err
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The time is formatted in RFC 3339 format, with sub-second precision added if present.
-func (t Time) MarshalText() ([]byte, error) {
- if y := t.Year(); y < 0 || y >= 10000 {
- return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
- }
-
- b := make([]byte, 0, len(RFC3339Nano))
- return t.AppendFormat(b, RFC3339Nano), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The time is expected to be in RFC 3339 format.
-func (t *Time) UnmarshalText(data []byte) error {
- // Fractional seconds are handled implicitly by Parse.
- var err error
- *t, err = Parse(RFC3339, string(data))
- return err
-}
-
-// Unix returns the local Time corresponding to the given Unix time,
-// sec seconds and nsec nanoseconds since January 1, 1970 UTC.
-// It is valid to pass nsec outside the range [0, 999999999].
-// Not all sec values have a corresponding time value. One such
-// value is 1<<63-1 (the largest int64 value).
-func Unix(sec int64, nsec int64) Time {
- if nsec < 0 || nsec >= 1e9 {
- n := nsec / 1e9
- sec += n
- nsec -= n * 1e9
- if nsec < 0 {
- nsec += 1e9
- sec--
- }
- }
- return unixTime(sec, int32(nsec))
-}
-
-// UnixMilli returns the local Time corresponding to the given Unix time,
-// msec milliseconds since January 1, 1970 UTC.
-func UnixMilli(msec int64) Time {
- return Unix(msec/1e3, (msec%1e3)*1e6)
-}
-
-// UnixMicro returns the local Time corresponding to the given Unix time,
-// usec microseconds since January 1, 1970 UTC.
-func UnixMicro(usec int64) Time {
- return Unix(usec/1e6, (usec%1e6)*1e3)
-}
-
-// IsDST reports whether the time in the configured location is in Daylight Savings Time.
-func (t Time) IsDST() bool {
- _, _, _, _, isDST := t.loc.lookup(t.Unix())
- return isDST
-}
-
-func isLeap(year int) bool {
- return year%4 == 0 && (year%100 != 0 || year%400 == 0)
-}
-
-// norm returns nhi, nlo such that
-// hi * base + lo == nhi * base + nlo
-// 0 <= nlo < base
-func norm(hi, lo, base int) (nhi, nlo int) {
- if lo < 0 {
- n := (-lo-1)/base + 1
- hi -= n
- lo += n * base
- }
- if lo >= base {
- n := lo / base
- hi += n
- lo -= n * base
- }
- return hi, lo
-}
-
-// Date returns the Time corresponding to
-// yyyy-mm-dd hh:mm:ss + nsec nanoseconds
-// in the appropriate zone for that time in the given location.
-//
-// The month, day, hour, min, sec, and nsec values may be outside
-// their usual ranges and will be normalized during the conversion.
-// For example, October 32 converts to November 1.
-//
-// A daylight savings time transition skips or repeats times.
-// For example, in the United States, March 13, 2011 2:15am never occurred,
-// while November 6, 2011 1:15am occurred twice. In such cases, the
-// choice of time zone, and therefore the time, is not well-defined.
-// Date returns a time that is correct in one of the two zones involved
-// in the transition, but it does not guarantee which.
-//
-// Date panics if loc is nil.
-func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {
- if loc == nil {
- panic("time: missing Location in call to Date")
- }
-
- // Normalize month, overflowing into year.
- m := int(month) - 1
- year, m = norm(year, m, 12)
- month = Month(m) + 1
-
- // Normalize nsec, sec, min, hour, overflowing into day.
- sec, nsec = norm(sec, nsec, 1e9)
- min, sec = norm(min, sec, 60)
- hour, min = norm(hour, min, 60)
- day, hour = norm(day, hour, 24)
-
- // Compute days since the absolute epoch.
- d := daysSinceEpoch(year)
-
- // Add in days before this month.
- d += uint64(daysBefore[month-1])
- if isLeap(year) && month >= March {
- d++ // February 29
- }
-
- // Add in days before today.
- d += uint64(day - 1)
-
- // Add in time elapsed today.
- abs := d * secondsPerDay
- abs += uint64(hour*secondsPerHour + min*secondsPerMinute + sec)
-
- unix := int64(abs) + (absoluteToInternal + internalToUnix)
-
- // Look for zone offset for expected time, so we can adjust to UTC.
- // The lookup function expects UTC, so first we pass unix in the
- // hope that it will not be too close to a zone transition,
- // and then adjust if it is.
- _, offset, start, end, _ := loc.lookup(unix)
- if offset != 0 {
- utc := unix - int64(offset)
- // If utc is valid for the time zone we found, then we have the right offset.
- // If not, we get the correct offset by looking up utc in the location.
- if utc < start || utc >= end {
- _, offset, _, _, _ = loc.lookup(utc)
- }
- unix -= int64(offset)
- }
-
- t := unixTime(unix, int32(nsec))
- t.setLoc(loc)
- return t
-}
-
-// Truncate returns the result of rounding t down to a multiple of d (since the zero time).
-// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged.
-//
-// Truncate operates on the time as an absolute duration since the
-// zero time; it does not operate on the presentation form of the
-// time. Thus, Truncate(Hour) may return a time with a non-zero
-// minute, depending on the time's Location.
-func (t Time) Truncate(d Duration) Time {
- t.stripMono()
- if d <= 0 {
- return t
- }
- _, r := div(t, d)
- return t.Add(-r)
-}
-
-// Round returns the result of rounding t to the nearest multiple of d (since the zero time).
-// The rounding behavior for halfway values is to round up.
-// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged.
-//
-// Round operates on the time as an absolute duration since the
-// zero time; it does not operate on the presentation form of the
-// time. Thus, Round(Hour) may return a time with a non-zero
-// minute, depending on the time's Location.
-func (t Time) Round(d Duration) Time {
- t.stripMono()
- if d <= 0 {
- return t
- }
- _, r := div(t, d)
- if lessThanHalf(r, d) {
- return t.Add(-r)
- }
- return t.Add(d - r)
-}
-
-// div divides t by d and returns the quotient parity and remainder.
-// We don't use the quotient parity anymore (round half up instead of round to even)
-// but it's still here in case we change our minds.
-func div(t Time, d Duration) (qmod2 int, r Duration) {
- neg := false
- nsec := t.nsec()
- sec := t.sec()
- if sec < 0 {
- // Operate on absolute value.
- neg = true
- sec = -sec
- nsec = -nsec
- if nsec < 0 {
- nsec += 1e9
- sec-- // sec >= 1 before the -- so safe
- }
- }
-
- switch {
- // Special case: 2d divides 1 second.
- case d < Second && Second%(d+d) == 0:
- qmod2 = int(nsec/int32(d)) & 1
- r = Duration(nsec % int32(d))
-
- // Special case: d is a multiple of 1 second.
- case d%Second == 0:
- d1 := int64(d / Second)
- qmod2 = int(sec/d1) & 1
- r = Duration(sec%d1)*Second + Duration(nsec)
-
- // General case.
- // This could be faster if more cleverness were applied,
- // but it's really only here to avoid special case restrictions in the API.
- // No one will care about these cases.
- default:
- // Compute nanoseconds as 128-bit number.
- sec := uint64(sec)
- tmp := (sec >> 32) * 1e9
- u1 := tmp >> 32
- u0 := tmp << 32
- tmp = (sec & 0xFFFFFFFF) * 1e9
- u0x, u0 := u0, u0+tmp
- if u0 < u0x {
- u1++
- }
- u0x, u0 = u0, u0+uint64(nsec)
- if u0 < u0x {
- u1++
- }
-
- // Compute remainder by subtracting r<<k for decreasing k.
- // Quotient parity is whether we subtract on last round.
- d1 := uint64(d)
- for d1>>63 != 1 {
- d1 <<= 1
- }
- d0 := uint64(0)
- for {
- qmod2 = 0
- if u1 > d1 || u1 == d1 && u0 >= d0 {
- // subtract
- qmod2 = 1
- u0x, u0 = u0, u0-d0
- if u0 > u0x {
- u1--
- }
- u1 -= d1
- }
- if d1 == 0 && d0 == uint64(d) {
- break
- }
- d0 >>= 1
- d0 |= (d1 & 1) << 63
- d1 >>= 1
- }
- r = Duration(u0)
- }
-
- if neg && r != 0 {
- // If input was negative and not an exact multiple of d, we computed q, r such that
- // q*d + r = -t
- // But the right answers are given by -(q-1), d-r:
- // q*d + r = -t
- // -q*d - r = t
- // -(q-1)*d + (d - r) = t
- qmod2 ^= 1
- r = d - r
- }
- return
-}
diff --git a/contrib/go/_std_1.18/src/time/zoneinfo.go b/contrib/go/_std_1.18/src/time/zoneinfo.go
deleted file mode 100644
index 7b39f869e6..0000000000
--- a/contrib/go/_std_1.18/src/time/zoneinfo.go
+++ /dev/null
@@ -1,687 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package time
-
-import (
- "errors"
- "sync"
- "syscall"
-)
-
-//go:generate env ZONEINFO=$GOROOT/lib/time/zoneinfo.zip go run genzabbrs.go -output zoneinfo_abbrs_windows.go
-
-// A Location maps time instants to the zone in use at that time.
-// Typically, the Location represents the collection of time offsets
-// in use in a geographical area. For many Locations the time offset varies
-// depending on whether daylight savings time is in use at the time instant.
-type Location struct {
- name string
- zone []zone
- tx []zoneTrans
-
- // The tzdata information can be followed by a string that describes
- // how to handle DST transitions not recorded in zoneTrans.
- // The format is the TZ environment variable without a colon; see
- // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap08.html.
- // Example string, for America/Los_Angeles: PST8PDT,M3.2.0,M11.1.0
- extend string
-
- // Most lookups will be for the current time.
- // To avoid the binary search through tx, keep a
- // static one-element cache that gives the correct
- // zone for the time when the Location was created.
- // if cacheStart <= t < cacheEnd,
- // lookup can return cacheZone.
- // The units for cacheStart and cacheEnd are seconds
- // since January 1, 1970 UTC, to match the argument
- // to lookup.
- cacheStart int64
- cacheEnd int64
- cacheZone *zone
-}
-
-// A zone represents a single time zone such as CET.
-type zone struct {
- name string // abbreviated name, "CET"
- offset int // seconds east of UTC
- isDST bool // is this zone Daylight Savings Time?
-}
-
-// A zoneTrans represents a single time zone transition.
-type zoneTrans struct {
- when int64 // transition time, in seconds since 1970 GMT
- index uint8 // the index of the zone that goes into effect at that time
- isstd, isutc bool // ignored - no idea what these mean
-}
-
-// alpha and omega are the beginning and end of time for zone
-// transitions.
-const (
- alpha = -1 << 63 // math.MinInt64
- omega = 1<<63 - 1 // math.MaxInt64
-)
-
-// UTC represents Universal Coordinated Time (UTC).
-var UTC *Location = &utcLoc
-
-// utcLoc is separate so that get can refer to &utcLoc
-// and ensure that it never returns a nil *Location,
-// even if a badly behaved client has changed UTC.
-var utcLoc = Location{name: "UTC"}
-
-// Local represents the system's local time zone.
-// On Unix systems, Local consults the TZ environment
-// variable to find the time zone to use. No TZ means
-// use the system default /etc/localtime.
-// TZ="" means use UTC.
-// TZ="foo" means use file foo in the system timezone directory.
-var Local *Location = &localLoc
-
-// localLoc is separate so that initLocal can initialize
-// it even if a client has changed Local.
-var localLoc Location
-var localOnce sync.Once
-
-func (l *Location) get() *Location {
- if l == nil {
- return &utcLoc
- }
- if l == &localLoc {
- localOnce.Do(initLocal)
- }
- return l
-}
-
-// String returns a descriptive name for the time zone information,
-// corresponding to the name argument to LoadLocation or FixedZone.
-func (l *Location) String() string {
- return l.get().name
-}
-
-// FixedZone returns a Location that always uses
-// the given zone name and offset (seconds east of UTC).
-func FixedZone(name string, offset int) *Location {
- l := &Location{
- name: name,
- zone: []zone{{name, offset, false}},
- tx: []zoneTrans{{alpha, 0, false, false}},
- cacheStart: alpha,
- cacheEnd: omega,
- }
- l.cacheZone = &l.zone[0]
- return l
-}
-
-// lookup returns information about the time zone in use at an
-// instant in time expressed as seconds since January 1, 1970 00:00:00 UTC.
-//
-// The returned information gives the name of the zone (such as "CET"),
-// the start and end times bracketing sec when that zone is in effect,
-// the offset in seconds east of UTC (such as -5*60*60), and whether
-// the daylight savings is being observed at that time.
-func (l *Location) lookup(sec int64) (name string, offset int, start, end int64, isDST bool) {
- l = l.get()
-
- if len(l.zone) == 0 {
- name = "UTC"
- offset = 0
- start = alpha
- end = omega
- isDST = false
- return
- }
-
- if zone := l.cacheZone; zone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
- name = zone.name
- offset = zone.offset
- start = l.cacheStart
- end = l.cacheEnd
- isDST = zone.isDST
- return
- }
-
- if len(l.tx) == 0 || sec < l.tx[0].when {
- zone := &l.zone[l.lookupFirstZone()]
- name = zone.name
- offset = zone.offset
- start = alpha
- if len(l.tx) > 0 {
- end = l.tx[0].when
- } else {
- end = omega
- }
- isDST = zone.isDST
- return
- }
-
- // Binary search for entry with largest time <= sec.
- // Not using sort.Search to avoid dependencies.
- tx := l.tx
- end = omega
- lo := 0
- hi := len(tx)
- for hi-lo > 1 {
- m := lo + (hi-lo)/2
- lim := tx[m].when
- if sec < lim {
- end = lim
- hi = m
- } else {
- lo = m
- }
- }
- zone := &l.zone[tx[lo].index]
- name = zone.name
- offset = zone.offset
- start = tx[lo].when
- // end = maintained during the search
- isDST = zone.isDST
-
- // If we're at the end of the known zone transitions,
- // try the extend string.
- if lo == len(tx)-1 && l.extend != "" {
- if ename, eoffset, estart, eend, eisDST, ok := tzset(l.extend, end, sec); ok {
- return ename, eoffset, estart, eend, eisDST
- }
- }
-
- return
-}
-
-// lookupFirstZone returns the index of the time zone to use for times
-// before the first transition time, or when there are no transition
-// times.
-//
-// The reference implementation in localtime.c from
-// https://www.iana.org/time-zones/repository/releases/tzcode2013g.tar.gz
-// implements the following algorithm for these cases:
-// 1) If the first zone is unused by the transitions, use it.
-// 2) Otherwise, if there are transition times, and the first
-// transition is to a zone in daylight time, find the first
-// non-daylight-time zone before and closest to the first transition
-// zone.
-// 3) Otherwise, use the first zone that is not daylight time, if
-// there is one.
-// 4) Otherwise, use the first zone.
-func (l *Location) lookupFirstZone() int {
- // Case 1.
- if !l.firstZoneUsed() {
- return 0
- }
-
- // Case 2.
- if len(l.tx) > 0 && l.zone[l.tx[0].index].isDST {
- for zi := int(l.tx[0].index) - 1; zi >= 0; zi-- {
- if !l.zone[zi].isDST {
- return zi
- }
- }
- }
-
- // Case 3.
- for zi := range l.zone {
- if !l.zone[zi].isDST {
- return zi
- }
- }
-
- // Case 4.
- return 0
-}
-
-// firstZoneUsed reports whether the first zone is used by some
-// transition.
-func (l *Location) firstZoneUsed() bool {
- for _, tx := range l.tx {
- if tx.index == 0 {
- return true
- }
- }
- return false
-}
-
-// tzset takes a timezone string like the one found in the TZ environment
-// variable, the end of the last time zone transition expressed as seconds
-// since January 1, 1970 00:00:00 UTC, and a time expressed the same way.
-// We call this a tzset string since in C the function tzset reads TZ.
-// The return values are as for lookup, plus ok which reports whether the
-// parse succeeded.
-func tzset(s string, initEnd, sec int64) (name string, offset int, start, end int64, isDST, ok bool) {
- var (
- stdName, dstName string
- stdOffset, dstOffset int
- )
-
- stdName, s, ok = tzsetName(s)
- if ok {
- stdOffset, s, ok = tzsetOffset(s)
- }
- if !ok {
- return "", 0, 0, 0, false, false
- }
-
- // The numbers in the tzset string are added to local time to get UTC,
- // but our offsets are added to UTC to get local time,
- // so we negate the number we see here.
- stdOffset = -stdOffset
-
- if len(s) == 0 || s[0] == ',' {
- // No daylight savings time.
- return stdName, stdOffset, initEnd, omega, false, true
- }
-
- dstName, s, ok = tzsetName(s)
- if ok {
- if len(s) == 0 || s[0] == ',' {
- dstOffset = stdOffset + secondsPerHour
- } else {
- dstOffset, s, ok = tzsetOffset(s)
- dstOffset = -dstOffset // as with stdOffset, above
- }
- }
- if !ok {
- return "", 0, 0, 0, false, false
- }
-
- if len(s) == 0 {
- // Default DST rules per tzcode.
- s = ",M3.2.0,M11.1.0"
- }
- // The TZ definition does not mention ';' here but tzcode accepts it.
- if s[0] != ',' && s[0] != ';' {
- return "", 0, 0, 0, false, false
- }
- s = s[1:]
-
- var startRule, endRule rule
- startRule, s, ok = tzsetRule(s)
- if !ok || len(s) == 0 || s[0] != ',' {
- return "", 0, 0, 0, false, false
- }
- s = s[1:]
- endRule, s, ok = tzsetRule(s)
- if !ok || len(s) > 0 {
- return "", 0, 0, 0, false, false
- }
-
- year, _, _, yday := absDate(uint64(sec+unixToInternal+internalToAbsolute), false)
-
- ysec := int64(yday*secondsPerDay) + sec%secondsPerDay
-
- // Compute start of year in seconds since Unix epoch.
- d := daysSinceEpoch(year)
- abs := int64(d * secondsPerDay)
- abs += absoluteToInternal + internalToUnix
-
- startSec := int64(tzruleTime(year, startRule, stdOffset))
- endSec := int64(tzruleTime(year, endRule, dstOffset))
- dstIsDST, stdIsDST := true, false
- // Note: this is a flipping of "DST" and "STD" while retaining the labels
- // This happens in southern hemispheres. The labelling here thus is a little
- // inconsistent with the goal.
- if endSec < startSec {
- startSec, endSec = endSec, startSec
- stdName, dstName = dstName, stdName
- stdOffset, dstOffset = dstOffset, stdOffset
- stdIsDST, dstIsDST = dstIsDST, stdIsDST
- }
-
- // The start and end values that we return are accurate
- // close to a daylight savings transition, but are otherwise
- // just the start and end of the year. That suffices for
- // the only caller that cares, which is Date.
- if ysec < startSec {
- return stdName, stdOffset, abs, startSec + abs, stdIsDST, true
- } else if ysec >= endSec {
- return stdName, stdOffset, endSec + abs, abs + 365*secondsPerDay, stdIsDST, true
- } else {
- return dstName, dstOffset, startSec + abs, endSec + abs, dstIsDST, true
- }
-}
-
-// tzsetName returns the timezone name at the start of the tzset string s,
-// and the remainder of s, and reports whether the parsing is OK.
-func tzsetName(s string) (string, string, bool) {
- if len(s) == 0 {
- return "", "", false
- }
- if s[0] != '<' {
- for i, r := range s {
- switch r {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',', '-', '+':
- if i < 3 {
- return "", "", false
- }
- return s[:i], s[i:], true
- }
- }
- if len(s) < 3 {
- return "", "", false
- }
- return s, "", true
- } else {
- for i, r := range s {
- if r == '>' {
- return s[1:i], s[i+1:], true
- }
- }
- return "", "", false
- }
-}
-
-// tzsetOffset returns the timezone offset at the start of the tzset string s,
-// and the remainder of s, and reports whether the parsing is OK.
-// The timezone offset is returned as a number of seconds.
-func tzsetOffset(s string) (offset int, rest string, ok bool) {
- if len(s) == 0 {
- return 0, "", false
- }
- neg := false
- if s[0] == '+' {
- s = s[1:]
- } else if s[0] == '-' {
- s = s[1:]
- neg = true
- }
-
- // The tzdata code permits values up to 24 * 7 here,
- // although POSIX does not.
- var hours int
- hours, s, ok = tzsetNum(s, 0, 24*7)
- if !ok {
- return 0, "", false
- }
- off := hours * secondsPerHour
- if len(s) == 0 || s[0] != ':' {
- if neg {
- off = -off
- }
- return off, s, true
- }
-
- var mins int
- mins, s, ok = tzsetNum(s[1:], 0, 59)
- if !ok {
- return 0, "", false
- }
- off += mins * secondsPerMinute
- if len(s) == 0 || s[0] != ':' {
- if neg {
- off = -off
- }
- return off, s, true
- }
-
- var secs int
- secs, s, ok = tzsetNum(s[1:], 0, 59)
- if !ok {
- return 0, "", false
- }
- off += secs
-
- if neg {
- off = -off
- }
- return off, s, true
-}
-
-// ruleKind is the kinds of rules that can be seen in a tzset string.
-type ruleKind int
-
-const (
- ruleJulian ruleKind = iota
- ruleDOY
- ruleMonthWeekDay
-)
-
-// rule is a rule read from a tzset string.
-type rule struct {
- kind ruleKind
- day int
- week int
- mon int
- time int // transition time
-}
-
-// tzsetRule parses a rule from a tzset string.
-// It returns the rule, and the remainder of the string, and reports success.
-func tzsetRule(s string) (rule, string, bool) {
- var r rule
- if len(s) == 0 {
- return rule{}, "", false
- }
- ok := false
- if s[0] == 'J' {
- var jday int
- jday, s, ok = tzsetNum(s[1:], 1, 365)
- if !ok {
- return rule{}, "", false
- }
- r.kind = ruleJulian
- r.day = jday
- } else if s[0] == 'M' {
- var mon int
- mon, s, ok = tzsetNum(s[1:], 1, 12)
- if !ok || len(s) == 0 || s[0] != '.' {
- return rule{}, "", false
-
- }
- var week int
- week, s, ok = tzsetNum(s[1:], 1, 5)
- if !ok || len(s) == 0 || s[0] != '.' {
- return rule{}, "", false
- }
- var day int
- day, s, ok = tzsetNum(s[1:], 0, 6)
- if !ok {
- return rule{}, "", false
- }
- r.kind = ruleMonthWeekDay
- r.day = day
- r.week = week
- r.mon = mon
- } else {
- var day int
- day, s, ok = tzsetNum(s, 0, 365)
- if !ok {
- return rule{}, "", false
- }
- r.kind = ruleDOY
- r.day = day
- }
-
- if len(s) == 0 || s[0] != '/' {
- r.time = 2 * secondsPerHour // 2am is the default
- return r, s, true
- }
-
- offset, s, ok := tzsetOffset(s[1:])
- if !ok {
- return rule{}, "", false
- }
- r.time = offset
-
- return r, s, true
-}
-
-// tzsetNum parses a number from a tzset string.
-// It returns the number, and the remainder of the string, and reports success.
-// The number must be between min and max.
-func tzsetNum(s string, min, max int) (num int, rest string, ok bool) {
- if len(s) == 0 {
- return 0, "", false
- }
- num = 0
- for i, r := range s {
- if r < '0' || r > '9' {
- if i == 0 || num < min {
- return 0, "", false
- }
- return num, s[i:], true
- }
- num *= 10
- num += int(r) - '0'
- if num > max {
- return 0, "", false
- }
- }
- if num < min {
- return 0, "", false
- }
- return num, "", true
-}
-
-// tzruleTime takes a year, a rule, and a timezone offset,
-// and returns the number of seconds since the start of the year
-// that the rule takes effect.
-func tzruleTime(year int, r rule, off int) int {
- var s int
- switch r.kind {
- case ruleJulian:
- s = (r.day - 1) * secondsPerDay
- if isLeap(year) && r.day >= 60 {
- s += secondsPerDay
- }
- case ruleDOY:
- s = r.day * secondsPerDay
- case ruleMonthWeekDay:
- // Zeller's Congruence.
- m1 := (r.mon+9)%12 + 1
- yy0 := year
- if r.mon <= 2 {
- yy0--
- }
- yy1 := yy0 / 100
- yy2 := yy0 % 100
- dow := ((26*m1-2)/10 + 1 + yy2 + yy2/4 + yy1/4 - 2*yy1) % 7
- if dow < 0 {
- dow += 7
- }
- // Now dow is the day-of-week of the first day of r.mon.
- // Get the day-of-month of the first "dow" day.
- d := r.day - dow
- if d < 0 {
- d += 7
- }
- for i := 1; i < r.week; i++ {
- if d+7 >= daysIn(Month(r.mon), year) {
- break
- }
- d += 7
- }
- d += int(daysBefore[r.mon-1])
- if isLeap(year) && r.mon > 2 {
- d++
- }
- s = d * secondsPerDay
- }
-
- return s + r.time - off
-}
-
-// lookupName returns information about the time zone with
-// the given name (such as "EST") at the given pseudo-Unix time
-// (what the given time of day would be in UTC).
-func (l *Location) lookupName(name string, unix int64) (offset int, ok bool) {
- l = l.get()
-
- // First try for a zone with the right name that was actually
- // in effect at the given time. (In Sydney, Australia, both standard
- // and daylight-savings time are abbreviated "EST". Using the
- // offset helps us pick the right one for the given time.
- // It's not perfect: during the backward transition we might pick
- // either one.)
- for i := range l.zone {
- zone := &l.zone[i]
- if zone.name == name {
- nam, offset, _, _, _ := l.lookup(unix - int64(zone.offset))
- if nam == zone.name {
- return offset, true
- }
- }
- }
-
- // Otherwise fall back to an ordinary name match.
- for i := range l.zone {
- zone := &l.zone[i]
- if zone.name == name {
- return zone.offset, true
- }
- }
-
- // Otherwise, give up.
- return
-}
-
-// NOTE(rsc): Eventually we will need to accept the POSIX TZ environment
-// syntax too, but I don't feel like implementing it today.
-
-var errLocation = errors.New("time: invalid location name")
-
-var zoneinfo *string
-var zoneinfoOnce sync.Once
-
-// LoadLocation returns the Location with the given name.
-//
-// If the name is "" or "UTC", LoadLocation returns UTC.
-// If the name is "Local", LoadLocation returns Local.
-//
-// Otherwise, the name is taken to be a location name corresponding to a file
-// in the IANA Time Zone database, such as "America/New_York".
-//
-// LoadLocation looks for the IANA Time Zone database in the following
-// locations in order:
-//
-// - the directory or uncompressed zip file named by the ZONEINFO environment variable
-// - on a Unix system, the system standard installation location
-// - $GOROOT/lib/time/zoneinfo.zip
-// - the time/tzdata package, if it was imported
-func LoadLocation(name string) (*Location, error) {
- if name == "" || name == "UTC" {
- return UTC, nil
- }
- if name == "Local" {
- return Local, nil
- }
- if containsDotDot(name) || name[0] == '/' || name[0] == '\\' {
- // No valid IANA Time Zone name contains a single dot,
- // much less dot dot. Likewise, none begin with a slash.
- return nil, errLocation
- }
- zoneinfoOnce.Do(func() {
- env, _ := syscall.Getenv("ZONEINFO")
- zoneinfo = &env
- })
- var firstErr error
- if *zoneinfo != "" {
- if zoneData, err := loadTzinfoFromDirOrZip(*zoneinfo, name); err == nil {
- if z, err := LoadLocationFromTZData(name, zoneData); err == nil {
- return z, nil
- }
- firstErr = err
- } else if err != syscall.ENOENT {
- firstErr = err
- }
- }
- if z, err := loadLocation(name, zoneSources); err == nil {
- return z, nil
- } else if firstErr == nil {
- firstErr = err
- }
- return nil, firstErr
-}
-
-// containsDotDot reports whether s contains "..".
-func containsDotDot(s string) bool {
- if len(s) < 2 {
- return false
- }
- for i := 0; i < len(s)-1; i++ {
- if s[i] == '.' && s[i+1] == '.' {
- return true
- }
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/time/zoneinfo_read.go b/contrib/go/_std_1.18/src/time/zoneinfo_read.go
deleted file mode 100644
index b9830265e1..0000000000
--- a/contrib/go/_std_1.18/src/time/zoneinfo_read.go
+++ /dev/null
@@ -1,586 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse "zoneinfo" time zone file.
-// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
-// See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo,
-// and ftp://munnari.oz.au/pub/oldtz/
-
-package time
-
-import (
- "errors"
- "runtime"
- "syscall"
-)
-
-// registerLoadFromEmbeddedTZData is called by the time/tzdata package,
-// if it is imported.
-func registerLoadFromEmbeddedTZData(f func(string) (string, error)) {
- loadFromEmbeddedTZData = f
-}
-
-// loadFromEmbeddedTZData is used to load a specific tzdata file
-// from tzdata information embedded in the binary itself.
-// This is set when the time/tzdata package is imported,
-// via registerLoadFromEmbeddedTzdata.
-var loadFromEmbeddedTZData func(zipname string) (string, error)
-
-// maxFileSize is the max permitted size of files read by readFile.
-// As reference, the zoneinfo.zip distributed by Go is ~350 KB,
-// so 10MB is overkill.
-const maxFileSize = 10 << 20
-
-type fileSizeError string
-
-func (f fileSizeError) Error() string {
- return "time: file " + string(f) + " is too large"
-}
-
-// Copies of io.Seek* constants to avoid importing "io":
-const (
- seekStart = 0
- seekCurrent = 1
- seekEnd = 2
-)
-
-// Simple I/O interface to binary blob of data.
-type dataIO struct {
- p []byte
- error bool
-}
-
-func (d *dataIO) read(n int) []byte {
- if len(d.p) < n {
- d.p = nil
- d.error = true
- return nil
- }
- p := d.p[0:n]
- d.p = d.p[n:]
- return p
-}
-
-func (d *dataIO) big4() (n uint32, ok bool) {
- p := d.read(4)
- if len(p) < 4 {
- d.error = true
- return 0, false
- }
- return uint32(p[3]) | uint32(p[2])<<8 | uint32(p[1])<<16 | uint32(p[0])<<24, true
-}
-
-func (d *dataIO) big8() (n uint64, ok bool) {
- n1, ok1 := d.big4()
- n2, ok2 := d.big4()
- if !ok1 || !ok2 {
- d.error = true
- return 0, false
- }
- return (uint64(n1) << 32) | uint64(n2), true
-}
-
-func (d *dataIO) byte() (n byte, ok bool) {
- p := d.read(1)
- if len(p) < 1 {
- d.error = true
- return 0, false
- }
- return p[0], true
-}
-
-// read returns the read of the data in the buffer.
-func (d *dataIO) rest() []byte {
- r := d.p
- d.p = nil
- return r
-}
-
-// Make a string by stopping at the first NUL
-func byteString(p []byte) string {
- for i := 0; i < len(p); i++ {
- if p[i] == 0 {
- return string(p[0:i])
- }
- }
- return string(p)
-}
-
-var badData = errors.New("malformed time zone information")
-
-// LoadLocationFromTZData returns a Location with the given name
-// initialized from the IANA Time Zone database-formatted data.
-// The data should be in the format of a standard IANA time zone file
-// (for example, the content of /etc/localtime on Unix systems).
-func LoadLocationFromTZData(name string, data []byte) (*Location, error) {
- d := dataIO{data, false}
-
- // 4-byte magic "TZif"
- if magic := d.read(4); string(magic) != "TZif" {
- return nil, badData
- }
-
- // 1-byte version, then 15 bytes of padding
- var version int
- var p []byte
- if p = d.read(16); len(p) != 16 {
- return nil, badData
- } else {
- switch p[0] {
- case 0:
- version = 1
- case '2':
- version = 2
- case '3':
- version = 3
- default:
- return nil, badData
- }
- }
-
- // six big-endian 32-bit integers:
- // number of UTC/local indicators
- // number of standard/wall indicators
- // number of leap seconds
- // number of transition times
- // number of local time zones
- // number of characters of time zone abbrev strings
- const (
- NUTCLocal = iota
- NStdWall
- NLeap
- NTime
- NZone
- NChar
- )
- var n [6]int
- for i := 0; i < 6; i++ {
- nn, ok := d.big4()
- if !ok {
- return nil, badData
- }
- if uint32(int(nn)) != nn {
- return nil, badData
- }
- n[i] = int(nn)
- }
-
- // If we have version 2 or 3, then the data is first written out
- // in a 32-bit format, then written out again in a 64-bit format.
- // Skip the 32-bit format and read the 64-bit one, as it can
- // describe a broader range of dates.
-
- is64 := false
- if version > 1 {
- // Skip the 32-bit data.
- skip := n[NTime]*4 +
- n[NTime] +
- n[NZone]*6 +
- n[NChar] +
- n[NLeap]*8 +
- n[NStdWall] +
- n[NUTCLocal]
- // Skip the version 2 header that we just read.
- skip += 4 + 16
- d.read(skip)
-
- is64 = true
-
- // Read the counts again, they can differ.
- for i := 0; i < 6; i++ {
- nn, ok := d.big4()
- if !ok {
- return nil, badData
- }
- if uint32(int(nn)) != nn {
- return nil, badData
- }
- n[i] = int(nn)
- }
- }
-
- size := 4
- if is64 {
- size = 8
- }
-
- // Transition times.
- txtimes := dataIO{d.read(n[NTime] * size), false}
-
- // Time zone indices for transition times.
- txzones := d.read(n[NTime])
-
- // Zone info structures
- zonedata := dataIO{d.read(n[NZone] * 6), false}
-
- // Time zone abbreviations.
- abbrev := d.read(n[NChar])
-
- // Leap-second time pairs
- d.read(n[NLeap] * (size + 4))
-
- // Whether tx times associated with local time types
- // are specified as standard time or wall time.
- isstd := d.read(n[NStdWall])
-
- // Whether tx times associated with local time types
- // are specified as UTC or local time.
- isutc := d.read(n[NUTCLocal])
-
- if d.error { // ran out of data
- return nil, badData
- }
-
- var extend string
- rest := d.rest()
- if len(rest) > 2 && rest[0] == '\n' && rest[len(rest)-1] == '\n' {
- extend = string(rest[1 : len(rest)-1])
- }
-
- // Now we can build up a useful data structure.
- // First the zone information.
- // utcoff[4] isdst[1] nameindex[1]
- nzone := n[NZone]
- if nzone == 0 {
- // Reject tzdata files with no zones. There's nothing useful in them.
- // This also avoids a panic later when we add and then use a fake transition (golang.org/issue/29437).
- return nil, badData
- }
- zones := make([]zone, nzone)
- for i := range zones {
- var ok bool
- var n uint32
- if n, ok = zonedata.big4(); !ok {
- return nil, badData
- }
- if uint32(int(n)) != n {
- return nil, badData
- }
- zones[i].offset = int(int32(n))
- var b byte
- if b, ok = zonedata.byte(); !ok {
- return nil, badData
- }
- zones[i].isDST = b != 0
- if b, ok = zonedata.byte(); !ok || int(b) >= len(abbrev) {
- return nil, badData
- }
- zones[i].name = byteString(abbrev[b:])
- if runtime.GOOS == "aix" && len(name) > 8 && (name[:8] == "Etc/GMT+" || name[:8] == "Etc/GMT-") {
- // There is a bug with AIX 7.2 TL 0 with files in Etc,
- // GMT+1 will return GMT-1 instead of GMT+1 or -01.
- if name != "Etc/GMT+0" {
- // GMT+0 is OK
- zones[i].name = name[4:]
- }
- }
- }
-
- // Now the transition time info.
- tx := make([]zoneTrans, n[NTime])
- for i := range tx {
- var n int64
- if !is64 {
- if n4, ok := txtimes.big4(); !ok {
- return nil, badData
- } else {
- n = int64(int32(n4))
- }
- } else {
- if n8, ok := txtimes.big8(); !ok {
- return nil, badData
- } else {
- n = int64(n8)
- }
- }
- tx[i].when = n
- if int(txzones[i]) >= len(zones) {
- return nil, badData
- }
- tx[i].index = txzones[i]
- if i < len(isstd) {
- tx[i].isstd = isstd[i] != 0
- }
- if i < len(isutc) {
- tx[i].isutc = isutc[i] != 0
- }
- }
-
- if len(tx) == 0 {
- // Build fake transition to cover all time.
- // This happens in fixed locations like "Etc/GMT0".
- tx = append(tx, zoneTrans{when: alpha, index: 0})
- }
-
- // Committed to succeed.
- l := &Location{zone: zones, tx: tx, name: name, extend: extend}
-
- // Fill in the cache with information about right now,
- // since that will be the most common lookup.
- sec, _, _ := now()
- for i := range tx {
- if tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {
- l.cacheStart = tx[i].when
- l.cacheEnd = omega
- l.cacheZone = &l.zone[tx[i].index]
- if i+1 < len(tx) {
- l.cacheEnd = tx[i+1].when
- } else if l.extend != "" {
- // If we're at the end of the known zone transitions,
- // try the extend string.
- if name, offset, estart, eend, isDST, ok := tzset(l.extend, l.cacheEnd, sec); ok {
- l.cacheStart = estart
- l.cacheEnd = eend
- // Find the zone that is returned by tzset to avoid allocation if possible.
- if zoneIdx := findZone(l.zone, name, offset, isDST); zoneIdx != -1 {
- l.cacheZone = &l.zone[zoneIdx]
- } else {
- l.cacheZone = &zone{
- name: name,
- offset: offset,
- isDST: isDST,
- }
- }
- }
- }
- break
- }
- }
-
- return l, nil
-}
-
-func findZone(zones []zone, name string, offset int, isDST bool) int {
- for i, z := range zones {
- if z.name == name && z.offset == offset && z.isDST == isDST {
- return i
- }
- }
- return -1
-}
-
-// loadTzinfoFromDirOrZip returns the contents of the file with the given name
-// in dir. dir can either be an uncompressed zip file, or a directory.
-func loadTzinfoFromDirOrZip(dir, name string) ([]byte, error) {
- if len(dir) > 4 && dir[len(dir)-4:] == ".zip" {
- return loadTzinfoFromZip(dir, name)
- }
- if dir != "" {
- name = dir + "/" + name
- }
- return readFile(name)
-}
-
-// There are 500+ zoneinfo files. Rather than distribute them all
-// individually, we ship them in an uncompressed zip file.
-// Used this way, the zip file format serves as a commonly readable
-// container for the individual small files. We choose zip over tar
-// because zip files have a contiguous table of contents, making
-// individual file lookups faster, and because the per-file overhead
-// in a zip file is considerably less than tar's 512 bytes.
-
-// get4 returns the little-endian 32-bit value in b.
-func get4(b []byte) int {
- if len(b) < 4 {
- return 0
- }
- return int(b[0]) | int(b[1])<<8 | int(b[2])<<16 | int(b[3])<<24
-}
-
-// get2 returns the little-endian 16-bit value in b.
-func get2(b []byte) int {
- if len(b) < 2 {
- return 0
- }
- return int(b[0]) | int(b[1])<<8
-}
-
-// loadTzinfoFromZip returns the contents of the file with the given name
-// in the given uncompressed zip file.
-func loadTzinfoFromZip(zipfile, name string) ([]byte, error) {
- fd, err := open(zipfile)
- if err != nil {
- return nil, err
- }
- defer closefd(fd)
-
- const (
- zecheader = 0x06054b50
- zcheader = 0x02014b50
- ztailsize = 22
-
- zheadersize = 30
- zheader = 0x04034b50
- )
-
- buf := make([]byte, ztailsize)
- if err := preadn(fd, buf, -ztailsize); err != nil || get4(buf) != zecheader {
- return nil, errors.New("corrupt zip file " + zipfile)
- }
- n := get2(buf[10:])
- size := get4(buf[12:])
- off := get4(buf[16:])
-
- buf = make([]byte, size)
- if err := preadn(fd, buf, off); err != nil {
- return nil, errors.New("corrupt zip file " + zipfile)
- }
-
- for i := 0; i < n; i++ {
- // zip entry layout:
- // 0 magic[4]
- // 4 madevers[1]
- // 5 madeos[1]
- // 6 extvers[1]
- // 7 extos[1]
- // 8 flags[2]
- // 10 meth[2]
- // 12 modtime[2]
- // 14 moddate[2]
- // 16 crc[4]
- // 20 csize[4]
- // 24 uncsize[4]
- // 28 namelen[2]
- // 30 xlen[2]
- // 32 fclen[2]
- // 34 disknum[2]
- // 36 iattr[2]
- // 38 eattr[4]
- // 42 off[4]
- // 46 name[namelen]
- // 46+namelen+xlen+fclen - next header
- //
- if get4(buf) != zcheader {
- break
- }
- meth := get2(buf[10:])
- size := get4(buf[24:])
- namelen := get2(buf[28:])
- xlen := get2(buf[30:])
- fclen := get2(buf[32:])
- off := get4(buf[42:])
- zname := buf[46 : 46+namelen]
- buf = buf[46+namelen+xlen+fclen:]
- if string(zname) != name {
- continue
- }
- if meth != 0 {
- return nil, errors.New("unsupported compression for " + name + " in " + zipfile)
- }
-
- // zip per-file header layout:
- // 0 magic[4]
- // 4 extvers[1]
- // 5 extos[1]
- // 6 flags[2]
- // 8 meth[2]
- // 10 modtime[2]
- // 12 moddate[2]
- // 14 crc[4]
- // 18 csize[4]
- // 22 uncsize[4]
- // 26 namelen[2]
- // 28 xlen[2]
- // 30 name[namelen]
- // 30+namelen+xlen - file data
- //
- buf = make([]byte, zheadersize+namelen)
- if err := preadn(fd, buf, off); err != nil ||
- get4(buf) != zheader ||
- get2(buf[8:]) != meth ||
- get2(buf[26:]) != namelen ||
- string(buf[30:30+namelen]) != name {
- return nil, errors.New("corrupt zip file " + zipfile)
- }
- xlen = get2(buf[28:])
-
- buf = make([]byte, size)
- if err := preadn(fd, buf, off+30+namelen+xlen); err != nil {
- return nil, errors.New("corrupt zip file " + zipfile)
- }
-
- return buf, nil
- }
-
- return nil, syscall.ENOENT
-}
-
-// loadTzinfoFromTzdata returns the time zone information of the time zone
-// with the given name, from a tzdata database file as they are typically
-// found on android.
-var loadTzinfoFromTzdata func(file, name string) ([]byte, error)
-
-// loadTzinfo returns the time zone information of the time zone
-// with the given name, from a given source. A source may be a
-// timezone database directory, tzdata database file or an uncompressed
-// zip file, containing the contents of such a directory.
-func loadTzinfo(name string, source string) ([]byte, error) {
- if len(source) >= 6 && source[len(source)-6:] == "tzdata" {
- return loadTzinfoFromTzdata(source, name)
- }
- return loadTzinfoFromDirOrZip(source, name)
-}
-
-// loadLocation returns the Location with the given name from one of
-// the specified sources. See loadTzinfo for a list of supported sources.
-// The first timezone data matching the given name that is successfully loaded
-// and parsed is returned as a Location.
-func loadLocation(name string, sources []string) (z *Location, firstErr error) {
- for _, source := range sources {
- var zoneData, err = loadTzinfo(name, source)
- if err == nil {
- if z, err = LoadLocationFromTZData(name, zoneData); err == nil {
- return z, nil
- }
- }
- if firstErr == nil && err != syscall.ENOENT {
- firstErr = err
- }
- }
- if loadFromEmbeddedTZData != nil {
- zonedata, err := loadFromEmbeddedTZData(name)
- if err == nil {
- if z, err = LoadLocationFromTZData(name, []byte(zonedata)); err == nil {
- return z, nil
- }
- }
- if firstErr == nil && err != syscall.ENOENT {
- firstErr = err
- }
- }
- if firstErr != nil {
- return nil, firstErr
- }
- return nil, errors.New("unknown time zone " + name)
-}
-
-// readFile reads and returns the content of the named file.
-// It is a trivial implementation of os.ReadFile, reimplemented
-// here to avoid depending on io/ioutil or os.
-// It returns an error if name exceeds maxFileSize bytes.
-func readFile(name string) ([]byte, error) {
- f, err := open(name)
- if err != nil {
- return nil, err
- }
- defer closefd(f)
- var (
- buf [4096]byte
- ret []byte
- n int
- )
- for {
- n, err = read(f, buf[:])
- if n > 0 {
- ret = append(ret, buf[:n]...)
- }
- if n == 0 || err != nil {
- break
- }
- if len(ret) > maxFileSize {
- return nil, fileSizeError(name)
- }
- }
- return ret, err
-}
diff --git a/contrib/go/_std_1.18/src/time/zoneinfo_unix.go b/contrib/go/_std_1.18/src/time/zoneinfo_unix.go
deleted file mode 100644
index 23f8b3cdb4..0000000000
--- a/contrib/go/_std_1.18/src/time/zoneinfo_unix.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || (darwin && !ios) || dragonfly || freebsd || (linux && !android) || netbsd || openbsd || solaris
-
-// Parse "zoneinfo" time zone file.
-// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
-// See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo,
-// and ftp://munnari.oz.au/pub/oldtz/
-
-package time
-
-import (
- "runtime"
- "syscall"
-)
-
-// Many systems use /usr/share/zoneinfo, Solaris 2 has
-// /usr/share/lib/zoneinfo, IRIX 6 has /usr/lib/locale/TZ.
-var zoneSources = []string{
- "/usr/share/zoneinfo/",
- "/usr/share/lib/zoneinfo/",
- "/usr/lib/locale/TZ/",
- runtime.GOROOT() + "/lib/time/zoneinfo.zip",
-}
-
-func initLocal() {
- // consult $TZ to find the time zone to use.
- // no $TZ means use the system default /etc/localtime.
- // $TZ="" means use UTC.
- // $TZ="foo" or $TZ=":foo" if foo is an absolute path, then the file pointed
- // by foo will be used to initialize timezone; otherwise, file
- // /usr/share/zoneinfo/foo will be used.
-
- tz, ok := syscall.Getenv("TZ")
- switch {
- case !ok:
- z, err := loadLocation("localtime", []string{"/etc"})
- if err == nil {
- localLoc = *z
- localLoc.name = "Local"
- return
- }
- case tz != "":
- if tz[0] == ':' {
- tz = tz[1:]
- }
- if tz != "" && tz[0] == '/' {
- if z, err := loadLocation(tz, []string{""}); err == nil {
- localLoc = *z
- if tz == "/etc/localtime" {
- localLoc.name = "Local"
- } else {
- localLoc.name = tz
- }
- return
- }
- } else if tz != "" && tz != "UTC" {
- if z, err := loadLocation(tz, zoneSources); err == nil {
- localLoc = *z
- return
- }
- }
- }
-
- // Fall back to UTC.
- localLoc.name = "UTC"
-}
diff --git a/contrib/go/_std_1.18/src/unicode/graphic.go b/contrib/go/_std_1.18/src/unicode/graphic.go
deleted file mode 100644
index ca6241949a..0000000000
--- a/contrib/go/_std_1.18/src/unicode/graphic.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unicode
-
-// Bit masks for each code point under U+0100, for fast lookup.
-const (
- pC = 1 << iota // a control character.
- pP // a punctuation character.
- pN // a numeral.
- pS // a symbolic character.
- pZ // a spacing character.
- pLu // an upper-case letter.
- pLl // a lower-case letter.
- pp // a printable character according to Go's definition.
- pg = pp | pZ // a graphical character according to the Unicode definition.
- pLo = pLl | pLu // a letter that is neither upper nor lower case.
- pLmask = pLo
-)
-
-// GraphicRanges defines the set of graphic characters according to Unicode.
-var GraphicRanges = []*RangeTable{
- L, M, N, P, S, Zs,
-}
-
-// PrintRanges defines the set of printable characters according to Go.
-// ASCII space, U+0020, is handled separately.
-var PrintRanges = []*RangeTable{
- L, M, N, P, S,
-}
-
-// IsGraphic reports whether the rune is defined as a Graphic by Unicode.
-// Such characters include letters, marks, numbers, punctuation, symbols, and
-// spaces, from categories L, M, N, P, S, Zs.
-func IsGraphic(r rune) bool {
- // We convert to uint32 to avoid the extra test for negative,
- // and in the index we convert to uint8 to avoid the range check.
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pg != 0
- }
- return In(r, GraphicRanges...)
-}
-
-// IsPrint reports whether the rune is defined as printable by Go. Such
-// characters include letters, marks, numbers, punctuation, symbols, and the
-// ASCII space character, from categories L, M, N, P, S and the ASCII space
-// character. This categorization is the same as IsGraphic except that the
-// only spacing character is ASCII space, U+0020.
-func IsPrint(r rune) bool {
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pp != 0
- }
- return In(r, PrintRanges...)
-}
-
-// IsOneOf reports whether the rune is a member of one of the ranges.
-// The function "In" provides a nicer signature and should be used in preference to IsOneOf.
-func IsOneOf(ranges []*RangeTable, r rune) bool {
- for _, inside := range ranges {
- if Is(inside, r) {
- return true
- }
- }
- return false
-}
-
-// In reports whether the rune is a member of one of the ranges.
-func In(r rune, ranges ...*RangeTable) bool {
- for _, inside := range ranges {
- if Is(inside, r) {
- return true
- }
- }
- return false
-}
-
-// IsControl reports whether the rune is a control character.
-// The C (Other) Unicode category includes more code points
-// such as surrogates; use Is(C, r) to test for them.
-func IsControl(r rune) bool {
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pC != 0
- }
- // All control characters are < MaxLatin1.
- return false
-}
-
-// IsLetter reports whether the rune is a letter (category L).
-func IsLetter(r rune) bool {
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&(pLmask) != 0
- }
- return isExcludingLatin(Letter, r)
-}
-
-// IsMark reports whether the rune is a mark character (category M).
-func IsMark(r rune) bool {
- // There are no mark characters in Latin-1.
- return isExcludingLatin(Mark, r)
-}
-
-// IsNumber reports whether the rune is a number (category N).
-func IsNumber(r rune) bool {
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pN != 0
- }
- return isExcludingLatin(Number, r)
-}
-
-// IsPunct reports whether the rune is a Unicode punctuation character
-// (category P).
-func IsPunct(r rune) bool {
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pP != 0
- }
- return Is(Punct, r)
-}
-
-// IsSpace reports whether the rune is a space character as defined
-// by Unicode's White Space property; in the Latin-1 space
-// this is
-// '\t', '\n', '\v', '\f', '\r', ' ', U+0085 (NEL), U+00A0 (NBSP).
-// Other definitions of spacing characters are set by category
-// Z and property Pattern_White_Space.
-func IsSpace(r rune) bool {
- // This property isn't the same as Z; special-case it.
- if uint32(r) <= MaxLatin1 {
- switch r {
- case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0:
- return true
- }
- return false
- }
- return isExcludingLatin(White_Space, r)
-}
-
-// IsSymbol reports whether the rune is a symbolic character.
-func IsSymbol(r rune) bool {
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pS != 0
- }
- return isExcludingLatin(Symbol, r)
-}
diff --git a/contrib/go/_std_1.18/src/unicode/letter.go b/contrib/go/_std_1.18/src/unicode/letter.go
deleted file mode 100644
index 268e457a87..0000000000
--- a/contrib/go/_std_1.18/src/unicode/letter.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package unicode provides data and functions to test some properties of
-// Unicode code points.
-package unicode
-
-const (
- MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
- ReplacementChar = '\uFFFD' // Represents invalid code points.
- MaxASCII = '\u007F' // maximum ASCII value.
- MaxLatin1 = '\u00FF' // maximum Latin-1 value.
-)
-
-// RangeTable defines a set of Unicode code points by listing the ranges of
-// code points within the set. The ranges are listed in two slices
-// to save space: a slice of 16-bit ranges and a slice of 32-bit ranges.
-// The two slices must be in sorted order and non-overlapping.
-// Also, R32 should contain only values >= 0x10000 (1<<16).
-type RangeTable struct {
- R16 []Range16
- R32 []Range32
- LatinOffset int // number of entries in R16 with Hi <= MaxLatin1
-}
-
-// Range16 represents of a range of 16-bit Unicode code points. The range runs from Lo to Hi
-// inclusive and has the specified stride.
-type Range16 struct {
- Lo uint16
- Hi uint16
- Stride uint16
-}
-
-// Range32 represents of a range of Unicode code points and is used when one or
-// more of the values will not fit in 16 bits. The range runs from Lo to Hi
-// inclusive and has the specified stride. Lo and Hi must always be >= 1<<16.
-type Range32 struct {
- Lo uint32
- Hi uint32
- Stride uint32
-}
-
-// CaseRange represents a range of Unicode code points for simple (one
-// code point to one code point) case conversion.
-// The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas
-// are the number to add to the code point to reach the code point for a
-// different case for that character. They may be negative. If zero, it
-// means the character is in the corresponding case. There is a special
-// case representing sequences of alternating corresponding Upper and Lower
-// pairs. It appears with a fixed Delta of
-// {UpperLower, UpperLower, UpperLower}
-// The constant UpperLower has an otherwise impossible delta value.
-type CaseRange struct {
- Lo uint32
- Hi uint32
- Delta d
-}
-
-// SpecialCase represents language-specific case mappings such as Turkish.
-// Methods of SpecialCase customize (by overriding) the standard mappings.
-type SpecialCase []CaseRange
-
-// BUG(r): There is no mechanism for full case folding, that is, for
-// characters that involve multiple runes in the input or output.
-
-// Indices into the Delta arrays inside CaseRanges for case mapping.
-const (
- UpperCase = iota
- LowerCase
- TitleCase
- MaxCase
-)
-
-type d [MaxCase]rune // to make the CaseRanges text shorter
-
-// If the Delta field of a CaseRange is UpperLower, it means
-// this CaseRange represents a sequence of the form (say)
-// Upper Lower Upper Lower.
-const (
- UpperLower = MaxRune + 1 // (Cannot be a valid delta.)
-)
-
-// linearMax is the maximum size table for linear search for non-Latin1 rune.
-// Derived by running 'go test -calibrate'.
-const linearMax = 18
-
-// is16 reports whether r is in the sorted slice of 16-bit ranges.
-func is16(ranges []Range16, r uint16) bool {
- if len(ranges) <= linearMax || r <= MaxLatin1 {
- for i := range ranges {
- range_ := &ranges[i]
- if r < range_.Lo {
- return false
- }
- if r <= range_.Hi {
- return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
- }
- }
- return false
- }
-
- // binary search over ranges
- lo := 0
- hi := len(ranges)
- for lo < hi {
- m := lo + (hi-lo)/2
- range_ := &ranges[m]
- if range_.Lo <= r && r <= range_.Hi {
- return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
- }
- if r < range_.Lo {
- hi = m
- } else {
- lo = m + 1
- }
- }
- return false
-}
-
-// is32 reports whether r is in the sorted slice of 32-bit ranges.
-func is32(ranges []Range32, r uint32) bool {
- if len(ranges) <= linearMax {
- for i := range ranges {
- range_ := &ranges[i]
- if r < range_.Lo {
- return false
- }
- if r <= range_.Hi {
- return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
- }
- }
- return false
- }
-
- // binary search over ranges
- lo := 0
- hi := len(ranges)
- for lo < hi {
- m := lo + (hi-lo)/2
- range_ := ranges[m]
- if range_.Lo <= r && r <= range_.Hi {
- return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
- }
- if r < range_.Lo {
- hi = m
- } else {
- lo = m + 1
- }
- }
- return false
-}
-
-// Is reports whether the rune is in the specified table of ranges.
-func Is(rangeTab *RangeTable, r rune) bool {
- r16 := rangeTab.R16
- // Compare as uint32 to correctly handle negative runes.
- if len(r16) > 0 && uint32(r) <= uint32(r16[len(r16)-1].Hi) {
- return is16(r16, uint16(r))
- }
- r32 := rangeTab.R32
- if len(r32) > 0 && r >= rune(r32[0].Lo) {
- return is32(r32, uint32(r))
- }
- return false
-}
-
-func isExcludingLatin(rangeTab *RangeTable, r rune) bool {
- r16 := rangeTab.R16
- // Compare as uint32 to correctly handle negative runes.
- if off := rangeTab.LatinOffset; len(r16) > off && uint32(r) <= uint32(r16[len(r16)-1].Hi) {
- return is16(r16[off:], uint16(r))
- }
- r32 := rangeTab.R32
- if len(r32) > 0 && r >= rune(r32[0].Lo) {
- return is32(r32, uint32(r))
- }
- return false
-}
-
-// IsUpper reports whether the rune is an upper case letter.
-func IsUpper(r rune) bool {
- // See comment in IsGraphic.
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pLmask == pLu
- }
- return isExcludingLatin(Upper, r)
-}
-
-// IsLower reports whether the rune is a lower case letter.
-func IsLower(r rune) bool {
- // See comment in IsGraphic.
- if uint32(r) <= MaxLatin1 {
- return properties[uint8(r)]&pLmask == pLl
- }
- return isExcludingLatin(Lower, r)
-}
-
-// IsTitle reports whether the rune is a title case letter.
-func IsTitle(r rune) bool {
- if r <= MaxLatin1 {
- return false
- }
- return isExcludingLatin(Title, r)
-}
-
-// to maps the rune using the specified case mapping.
-// It additionally reports whether caseRange contained a mapping for r.
-func to(_case int, r rune, caseRange []CaseRange) (mappedRune rune, foundMapping bool) {
- if _case < 0 || MaxCase <= _case {
- return ReplacementChar, false // as reasonable an error as any
- }
- // binary search over ranges
- lo := 0
- hi := len(caseRange)
- for lo < hi {
- m := lo + (hi-lo)/2
- cr := caseRange[m]
- if rune(cr.Lo) <= r && r <= rune(cr.Hi) {
- delta := cr.Delta[_case]
- if delta > MaxRune {
- // In an Upper-Lower sequence, which always starts with
- // an UpperCase letter, the real deltas always look like:
- // {0, 1, 0} UpperCase (Lower is next)
- // {-1, 0, -1} LowerCase (Upper, Title are previous)
- // The characters at even offsets from the beginning of the
- // sequence are upper case; the ones at odd offsets are lower.
- // The correct mapping can be done by clearing or setting the low
- // bit in the sequence offset.
- // The constants UpperCase and TitleCase are even while LowerCase
- // is odd so we take the low bit from _case.
- return rune(cr.Lo) + ((r-rune(cr.Lo))&^1 | rune(_case&1)), true
- }
- return r + delta, true
- }
- if r < rune(cr.Lo) {
- hi = m
- } else {
- lo = m + 1
- }
- }
- return r, false
-}
-
-// To maps the rune to the specified case: UpperCase, LowerCase, or TitleCase.
-func To(_case int, r rune) rune {
- r, _ = to(_case, r, CaseRanges)
- return r
-}
-
-// ToUpper maps the rune to upper case.
-func ToUpper(r rune) rune {
- if r <= MaxASCII {
- if 'a' <= r && r <= 'z' {
- r -= 'a' - 'A'
- }
- return r
- }
- return To(UpperCase, r)
-}
-
-// ToLower maps the rune to lower case.
-func ToLower(r rune) rune {
- if r <= MaxASCII {
- if 'A' <= r && r <= 'Z' {
- r += 'a' - 'A'
- }
- return r
- }
- return To(LowerCase, r)
-}
-
-// ToTitle maps the rune to title case.
-func ToTitle(r rune) rune {
- if r <= MaxASCII {
- if 'a' <= r && r <= 'z' { // title case is upper case for ASCII
- r -= 'a' - 'A'
- }
- return r
- }
- return To(TitleCase, r)
-}
-
-// ToUpper maps the rune to upper case giving priority to the special mapping.
-func (special SpecialCase) ToUpper(r rune) rune {
- r1, hadMapping := to(UpperCase, r, []CaseRange(special))
- if r1 == r && !hadMapping {
- r1 = ToUpper(r)
- }
- return r1
-}
-
-// ToTitle maps the rune to title case giving priority to the special mapping.
-func (special SpecialCase) ToTitle(r rune) rune {
- r1, hadMapping := to(TitleCase, r, []CaseRange(special))
- if r1 == r && !hadMapping {
- r1 = ToTitle(r)
- }
- return r1
-}
-
-// ToLower maps the rune to lower case giving priority to the special mapping.
-func (special SpecialCase) ToLower(r rune) rune {
- r1, hadMapping := to(LowerCase, r, []CaseRange(special))
- if r1 == r && !hadMapping {
- r1 = ToLower(r)
- }
- return r1
-}
-
-// caseOrbit is defined in tables.go as []foldPair. Right now all the
-// entries fit in uint16, so use uint16. If that changes, compilation
-// will fail (the constants in the composite literal will not fit in uint16)
-// and the types here can change to uint32.
-type foldPair struct {
- From uint16
- To uint16
-}
-
-// SimpleFold iterates over Unicode code points equivalent under
-// the Unicode-defined simple case folding. Among the code points
-// equivalent to rune (including rune itself), SimpleFold returns the
-// smallest rune > r if one exists, or else the smallest rune >= 0.
-// If r is not a valid Unicode code point, SimpleFold(r) returns r.
-//
-// For example:
-// SimpleFold('A') = 'a'
-// SimpleFold('a') = 'A'
-//
-// SimpleFold('K') = 'k'
-// SimpleFold('k') = '\u212A' (Kelvin symbol, K)
-// SimpleFold('\u212A') = 'K'
-//
-// SimpleFold('1') = '1'
-//
-// SimpleFold(-2) = -2
-//
-func SimpleFold(r rune) rune {
- if r < 0 || r > MaxRune {
- return r
- }
-
- if int(r) < len(asciiFold) {
- return rune(asciiFold[r])
- }
-
- // Consult caseOrbit table for special cases.
- lo := 0
- hi := len(caseOrbit)
- for lo < hi {
- m := lo + (hi-lo)/2
- if rune(caseOrbit[m].From) < r {
- lo = m + 1
- } else {
- hi = m
- }
- }
- if lo < len(caseOrbit) && rune(caseOrbit[lo].From) == r {
- return rune(caseOrbit[lo].To)
- }
-
- // No folding specified. This is a one- or two-element
- // equivalence class containing rune and ToLower(rune)
- // and ToUpper(rune) if they are different from rune.
- if l := ToLower(r); l != r {
- return l
- }
- return ToUpper(r)
-}
diff --git a/contrib/go/_std_1.18/src/unicode/utf8/utf8.go b/contrib/go/_std_1.18/src/unicode/utf8/utf8.go
deleted file mode 100644
index 6938c7e6a7..0000000000
--- a/contrib/go/_std_1.18/src/unicode/utf8/utf8.go
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package utf8 implements functions and constants to support text encoded in
-// UTF-8. It includes functions to translate between runes and UTF-8 byte sequences.
-// See https://en.wikipedia.org/wiki/UTF-8
-package utf8
-
-// The conditions RuneError==unicode.ReplacementChar and
-// MaxRune==unicode.MaxRune are verified in the tests.
-// Defining them locally avoids this package depending on package unicode.
-
-// Numbers fundamental to the encoding.
-const (
- RuneError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
- RuneSelf = 0x80 // characters below RuneSelf are represented as themselves in a single byte.
- MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
- UTFMax = 4 // maximum number of bytes of a UTF-8 encoded Unicode character.
-)
-
-// Code points in the surrogate range are not valid for UTF-8.
-const (
- surrogateMin = 0xD800
- surrogateMax = 0xDFFF
-)
-
-const (
- t1 = 0b00000000
- tx = 0b10000000
- t2 = 0b11000000
- t3 = 0b11100000
- t4 = 0b11110000
- t5 = 0b11111000
-
- maskx = 0b00111111
- mask2 = 0b00011111
- mask3 = 0b00001111
- mask4 = 0b00000111
-
- rune1Max = 1<<7 - 1
- rune2Max = 1<<11 - 1
- rune3Max = 1<<16 - 1
-
- // The default lowest and highest continuation byte.
- locb = 0b10000000
- hicb = 0b10111111
-
- // These names of these constants are chosen to give nice alignment in the
- // table below. The first nibble is an index into acceptRanges or F for
- // special one-byte cases. The second nibble is the Rune length or the
- // Status for the special one-byte case.
- xx = 0xF1 // invalid: size 1
- as = 0xF0 // ASCII: size 1
- s1 = 0x02 // accept 0, size 2
- s2 = 0x13 // accept 1, size 3
- s3 = 0x03 // accept 0, size 3
- s4 = 0x23 // accept 2, size 3
- s5 = 0x34 // accept 3, size 4
- s6 = 0x04 // accept 0, size 4
- s7 = 0x44 // accept 4, size 4
-)
-
-// first is information about the first byte in a UTF-8 sequence.
-var first = [256]uint8{
- // 1 2 3 4 5 6 7 8 9 A B C D E F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
- as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
- // 1 2 3 4 5 6 7 8 9 A B C D E F
- xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
- xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
- xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
- xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
- xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
- s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
- s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
- s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
-}
-
-// acceptRange gives the range of valid values for the second byte in a UTF-8
-// sequence.
-type acceptRange struct {
- lo uint8 // lowest value for second byte.
- hi uint8 // highest value for second byte.
-}
-
-// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
-var acceptRanges = [16]acceptRange{
- 0: {locb, hicb},
- 1: {0xA0, hicb},
- 2: {locb, 0x9F},
- 3: {0x90, hicb},
- 4: {locb, 0x8F},
-}
-
-// FullRune reports whether the bytes in p begin with a full UTF-8 encoding of a rune.
-// An invalid encoding is considered a full Rune since it will convert as a width-1 error rune.
-func FullRune(p []byte) bool {
- n := len(p)
- if n == 0 {
- return false
- }
- x := first[p[0]]
- if n >= int(x&7) {
- return true // ASCII, invalid or valid.
- }
- // Must be short or invalid.
- accept := acceptRanges[x>>4]
- if n > 1 && (p[1] < accept.lo || accept.hi < p[1]) {
- return true
- } else if n > 2 && (p[2] < locb || hicb < p[2]) {
- return true
- }
- return false
-}
-
-// FullRuneInString is like FullRune but its input is a string.
-func FullRuneInString(s string) bool {
- n := len(s)
- if n == 0 {
- return false
- }
- x := first[s[0]]
- if n >= int(x&7) {
- return true // ASCII, invalid, or valid.
- }
- // Must be short or invalid.
- accept := acceptRanges[x>>4]
- if n > 1 && (s[1] < accept.lo || accept.hi < s[1]) {
- return true
- } else if n > 2 && (s[2] < locb || hicb < s[2]) {
- return true
- }
- return false
-}
-
-// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and
-// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
-// the encoding is invalid, it returns (RuneError, 1). Both are impossible
-// results for correct, non-empty UTF-8.
-//
-// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
-// out of range, or is not the shortest possible UTF-8 encoding for the
-// value. No other validation is performed.
-func DecodeRune(p []byte) (r rune, size int) {
- n := len(p)
- if n < 1 {
- return RuneError, 0
- }
- p0 := p[0]
- x := first[p0]
- if x >= as {
- // The following code simulates an additional check for x == xx and
- // handling the ASCII and invalid cases accordingly. This mask-and-or
- // approach prevents an additional branch.
- mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
- return rune(p[0])&^mask | RuneError&mask, 1
- }
- sz := int(x & 7)
- accept := acceptRanges[x>>4]
- if n < sz {
- return RuneError, 1
- }
- b1 := p[1]
- if b1 < accept.lo || accept.hi < b1 {
- return RuneError, 1
- }
- if sz <= 2 { // <= instead of == to help the compiler eliminate some bounds checks
- return rune(p0&mask2)<<6 | rune(b1&maskx), 2
- }
- b2 := p[2]
- if b2 < locb || hicb < b2 {
- return RuneError, 1
- }
- if sz <= 3 {
- return rune(p0&mask3)<<12 | rune(b1&maskx)<<6 | rune(b2&maskx), 3
- }
- b3 := p[3]
- if b3 < locb || hicb < b3 {
- return RuneError, 1
- }
- return rune(p0&mask4)<<18 | rune(b1&maskx)<<12 | rune(b2&maskx)<<6 | rune(b3&maskx), 4
-}
-
-// DecodeRuneInString is like DecodeRune but its input is a string. If s is
-// empty it returns (RuneError, 0). Otherwise, if the encoding is invalid, it
-// returns (RuneError, 1). Both are impossible results for correct, non-empty
-// UTF-8.
-//
-// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
-// out of range, or is not the shortest possible UTF-8 encoding for the
-// value. No other validation is performed.
-func DecodeRuneInString(s string) (r rune, size int) {
- n := len(s)
- if n < 1 {
- return RuneError, 0
- }
- s0 := s[0]
- x := first[s0]
- if x >= as {
- // The following code simulates an additional check for x == xx and
- // handling the ASCII and invalid cases accordingly. This mask-and-or
- // approach prevents an additional branch.
- mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
- return rune(s[0])&^mask | RuneError&mask, 1
- }
- sz := int(x & 7)
- accept := acceptRanges[x>>4]
- if n < sz {
- return RuneError, 1
- }
- s1 := s[1]
- if s1 < accept.lo || accept.hi < s1 {
- return RuneError, 1
- }
- if sz <= 2 { // <= instead of == to help the compiler eliminate some bounds checks
- return rune(s0&mask2)<<6 | rune(s1&maskx), 2
- }
- s2 := s[2]
- if s2 < locb || hicb < s2 {
- return RuneError, 1
- }
- if sz <= 3 {
- return rune(s0&mask3)<<12 | rune(s1&maskx)<<6 | rune(s2&maskx), 3
- }
- s3 := s[3]
- if s3 < locb || hicb < s3 {
- return RuneError, 1
- }
- return rune(s0&mask4)<<18 | rune(s1&maskx)<<12 | rune(s2&maskx)<<6 | rune(s3&maskx), 4
-}
-
-// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and
-// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
-// the encoding is invalid, it returns (RuneError, 1). Both are impossible
-// results for correct, non-empty UTF-8.
-//
-// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
-// out of range, or is not the shortest possible UTF-8 encoding for the
-// value. No other validation is performed.
-func DecodeLastRune(p []byte) (r rune, size int) {
- end := len(p)
- if end == 0 {
- return RuneError, 0
- }
- start := end - 1
- r = rune(p[start])
- if r < RuneSelf {
- return r, 1
- }
- // guard against O(n^2) behavior when traversing
- // backwards through strings with long sequences of
- // invalid UTF-8.
- lim := end - UTFMax
- if lim < 0 {
- lim = 0
- }
- for start--; start >= lim; start-- {
- if RuneStart(p[start]) {
- break
- }
- }
- if start < 0 {
- start = 0
- }
- r, size = DecodeRune(p[start:end])
- if start+size != end {
- return RuneError, 1
- }
- return r, size
-}
-
-// DecodeLastRuneInString is like DecodeLastRune but its input is a string. If
-// s is empty it returns (RuneError, 0). Otherwise, if the encoding is invalid,
-// it returns (RuneError, 1). Both are impossible results for correct,
-// non-empty UTF-8.
-//
-// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
-// out of range, or is not the shortest possible UTF-8 encoding for the
-// value. No other validation is performed.
-func DecodeLastRuneInString(s string) (r rune, size int) {
- end := len(s)
- if end == 0 {
- return RuneError, 0
- }
- start := end - 1
- r = rune(s[start])
- if r < RuneSelf {
- return r, 1
- }
- // guard against O(n^2) behavior when traversing
- // backwards through strings with long sequences of
- // invalid UTF-8.
- lim := end - UTFMax
- if lim < 0 {
- lim = 0
- }
- for start--; start >= lim; start-- {
- if RuneStart(s[start]) {
- break
- }
- }
- if start < 0 {
- start = 0
- }
- r, size = DecodeRuneInString(s[start:end])
- if start+size != end {
- return RuneError, 1
- }
- return r, size
-}
-
-// RuneLen returns the number of bytes required to encode the rune.
-// It returns -1 if the rune is not a valid value to encode in UTF-8.
-func RuneLen(r rune) int {
- switch {
- case r < 0:
- return -1
- case r <= rune1Max:
- return 1
- case r <= rune2Max:
- return 2
- case surrogateMin <= r && r <= surrogateMax:
- return -1
- case r <= rune3Max:
- return 3
- case r <= MaxRune:
- return 4
- }
- return -1
-}
-
-// EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
-// If the rune is out of range, it writes the encoding of RuneError.
-// It returns the number of bytes written.
-func EncodeRune(p []byte, r rune) int {
- // Negative values are erroneous. Making it unsigned addresses the problem.
- switch i := uint32(r); {
- case i <= rune1Max:
- p[0] = byte(r)
- return 1
- case i <= rune2Max:
- _ = p[1] // eliminate bounds checks
- p[0] = t2 | byte(r>>6)
- p[1] = tx | byte(r)&maskx
- return 2
- case i > MaxRune, surrogateMin <= i && i <= surrogateMax:
- r = RuneError
- fallthrough
- case i <= rune3Max:
- _ = p[2] // eliminate bounds checks
- p[0] = t3 | byte(r>>12)
- p[1] = tx | byte(r>>6)&maskx
- p[2] = tx | byte(r)&maskx
- return 3
- default:
- _ = p[3] // eliminate bounds checks
- p[0] = t4 | byte(r>>18)
- p[1] = tx | byte(r>>12)&maskx
- p[2] = tx | byte(r>>6)&maskx
- p[3] = tx | byte(r)&maskx
- return 4
- }
-}
-
-// AppendRune appends the UTF-8 encoding of r to the end of p and
-// returns the extended buffer. If the rune is out of range,
-// it appends the encoding of RuneError.
-func AppendRune(p []byte, r rune) []byte {
- // This function is inlineable for fast handling of ASCII.
- if uint32(r) <= rune1Max {
- return append(p, byte(r))
- }
- return appendRuneNonASCII(p, r)
-}
-
-func appendRuneNonASCII(p []byte, r rune) []byte {
- // Negative values are erroneous. Making it unsigned addresses the problem.
- switch i := uint32(r); {
- case i <= rune2Max:
- return append(p, t2|byte(r>>6), tx|byte(r)&maskx)
- case i > MaxRune, surrogateMin <= i && i <= surrogateMax:
- r = RuneError
- fallthrough
- case i <= rune3Max:
- return append(p, t3|byte(r>>12), tx|byte(r>>6)&maskx, tx|byte(r)&maskx)
- default:
- return append(p, t4|byte(r>>18), tx|byte(r>>12)&maskx, tx|byte(r>>6)&maskx, tx|byte(r)&maskx)
- }
-}
-
-// RuneCount returns the number of runes in p. Erroneous and short
-// encodings are treated as single runes of width 1 byte.
-func RuneCount(p []byte) int {
- np := len(p)
- var n int
- for i := 0; i < np; {
- n++
- c := p[i]
- if c < RuneSelf {
- // ASCII fast path
- i++
- continue
- }
- x := first[c]
- if x == xx {
- i++ // invalid.
- continue
- }
- size := int(x & 7)
- if i+size > np {
- i++ // Short or invalid.
- continue
- }
- accept := acceptRanges[x>>4]
- if c := p[i+1]; c < accept.lo || accept.hi < c {
- size = 1
- } else if size == 2 {
- } else if c := p[i+2]; c < locb || hicb < c {
- size = 1
- } else if size == 3 {
- } else if c := p[i+3]; c < locb || hicb < c {
- size = 1
- }
- i += size
- }
- return n
-}
-
-// RuneCountInString is like RuneCount but its input is a string.
-func RuneCountInString(s string) (n int) {
- ns := len(s)
- for i := 0; i < ns; n++ {
- c := s[i]
- if c < RuneSelf {
- // ASCII fast path
- i++
- continue
- }
- x := first[c]
- if x == xx {
- i++ // invalid.
- continue
- }
- size := int(x & 7)
- if i+size > ns {
- i++ // Short or invalid.
- continue
- }
- accept := acceptRanges[x>>4]
- if c := s[i+1]; c < accept.lo || accept.hi < c {
- size = 1
- } else if size == 2 {
- } else if c := s[i+2]; c < locb || hicb < c {
- size = 1
- } else if size == 3 {
- } else if c := s[i+3]; c < locb || hicb < c {
- size = 1
- }
- i += size
- }
- return n
-}
-
-// RuneStart reports whether the byte could be the first byte of an encoded,
-// possibly invalid rune. Second and subsequent bytes always have the top two
-// bits set to 10.
-func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
-
-// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
-func Valid(p []byte) bool {
- // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
- for len(p) >= 8 {
- // Combining two 32 bit loads allows the same code to be used
- // for 32 and 64 bit platforms.
- // The compiler can generate a 32bit load for first32 and second32
- // on many platforms. See test/codegen/memcombine.go.
- first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
- second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
- if (first32|second32)&0x80808080 != 0 {
- // Found a non ASCII byte (>= RuneSelf).
- break
- }
- p = p[8:]
- }
- n := len(p)
- for i := 0; i < n; {
- pi := p[i]
- if pi < RuneSelf {
- i++
- continue
- }
- x := first[pi]
- if x == xx {
- return false // Illegal starter byte.
- }
- size := int(x & 7)
- if i+size > n {
- return false // Short or invalid.
- }
- accept := acceptRanges[x>>4]
- if c := p[i+1]; c < accept.lo || accept.hi < c {
- return false
- } else if size == 2 {
- } else if c := p[i+2]; c < locb || hicb < c {
- return false
- } else if size == 3 {
- } else if c := p[i+3]; c < locb || hicb < c {
- return false
- }
- i += size
- }
- return true
-}
-
-// ValidString reports whether s consists entirely of valid UTF-8-encoded runes.
-func ValidString(s string) bool {
- // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
- for len(s) >= 8 {
- // Combining two 32 bit loads allows the same code to be used
- // for 32 and 64 bit platforms.
- // The compiler can generate a 32bit load for first32 and second32
- // on many platforms. See test/codegen/memcombine.go.
- first32 := uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24
- second32 := uint32(s[4]) | uint32(s[5])<<8 | uint32(s[6])<<16 | uint32(s[7])<<24
- if (first32|second32)&0x80808080 != 0 {
- // Found a non ASCII byte (>= RuneSelf).
- break
- }
- s = s[8:]
- }
- n := len(s)
- for i := 0; i < n; {
- si := s[i]
- if si < RuneSelf {
- i++
- continue
- }
- x := first[si]
- if x == xx {
- return false // Illegal starter byte.
- }
- size := int(x & 7)
- if i+size > n {
- return false // Short or invalid.
- }
- accept := acceptRanges[x>>4]
- if c := s[i+1]; c < accept.lo || accept.hi < c {
- return false
- } else if size == 2 {
- } else if c := s[i+2]; c < locb || hicb < c {
- return false
- } else if size == 3 {
- } else if c := s[i+3]; c < locb || hicb < c {
- return false
- }
- i += size
- }
- return true
-}
-
-// ValidRune reports whether r can be legally encoded as UTF-8.
-// Code points that are out of range or a surrogate half are illegal.
-func ValidRune(r rune) bool {
- switch {
- case 0 <= r && r < surrogateMin:
- return true
- case surrogateMax < r && r <= MaxRune:
- return true
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/builder.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
deleted file mode 100644
index ca7b1db5ce..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cryptobyte
-
-import (
- "errors"
- "fmt"
-)
-
-// A Builder builds byte strings from fixed-length and length-prefixed values.
-// Builders either allocate space as needed, or are ‘fixed’, which means that
-// they write into a given buffer and produce an error if it's exhausted.
-//
-// The zero value is a usable Builder that allocates space as needed.
-//
-// Simple values are marshaled and appended to a Builder using methods on the
-// Builder. Length-prefixed values are marshaled by providing a
-// BuilderContinuation, which is a function that writes the inner contents of
-// the value to a given Builder. See the documentation for BuilderContinuation
-// for details.
-type Builder struct {
- err error
- result []byte
- fixedSize bool
- child *Builder
- offset int
- pendingLenLen int
- pendingIsASN1 bool
- inContinuation *bool
-}
-
-// NewBuilder creates a Builder that appends its output to the given buffer.
-// Like append(), the slice will be reallocated if its capacity is exceeded.
-// Use Bytes to get the final buffer.
-func NewBuilder(buffer []byte) *Builder {
- return &Builder{
- result: buffer,
- }
-}
-
-// NewFixedBuilder creates a Builder that appends its output into the given
-// buffer. This builder does not reallocate the output buffer. Writes that
-// would exceed the buffer's capacity are treated as an error.
-func NewFixedBuilder(buffer []byte) *Builder {
- return &Builder{
- result: buffer,
- fixedSize: true,
- }
-}
-
-// SetError sets the value to be returned as the error from Bytes. Writes
-// performed after calling SetError are ignored.
-func (b *Builder) SetError(err error) {
- b.err = err
-}
-
-// Bytes returns the bytes written by the builder or an error if one has
-// occurred during building.
-func (b *Builder) Bytes() ([]byte, error) {
- if b.err != nil {
- return nil, b.err
- }
- return b.result[b.offset:], nil
-}
-
-// BytesOrPanic returns the bytes written by the builder or panics if an error
-// has occurred during building.
-func (b *Builder) BytesOrPanic() []byte {
- if b.err != nil {
- panic(b.err)
- }
- return b.result[b.offset:]
-}
-
-// AddUint8 appends an 8-bit value to the byte string.
-func (b *Builder) AddUint8(v uint8) {
- b.add(byte(v))
-}
-
-// AddUint16 appends a big-endian, 16-bit value to the byte string.
-func (b *Builder) AddUint16(v uint16) {
- b.add(byte(v>>8), byte(v))
-}
-
-// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
-// byte of the 32-bit input value is silently truncated.
-func (b *Builder) AddUint24(v uint32) {
- b.add(byte(v>>16), byte(v>>8), byte(v))
-}
-
-// AddUint32 appends a big-endian, 32-bit value to the byte string.
-func (b *Builder) AddUint32(v uint32) {
- b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
-}
-
-// AddBytes appends a sequence of bytes to the byte string.
-func (b *Builder) AddBytes(v []byte) {
- b.add(v...)
-}
-
-// BuilderContinuation is a continuation-passing interface for building
-// length-prefixed byte sequences. Builder methods for length-prefixed
-// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
-// supplied to them. The child builder passed to the continuation can be used
-// to build the content of the length-prefixed sequence. For example:
-//
-// parent := cryptobyte.NewBuilder()
-// parent.AddUint8LengthPrefixed(func (child *Builder) {
-// child.AddUint8(42)
-// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
-// grandchild.AddUint8(5)
-// })
-// })
-//
-// It is an error to write more bytes to the child than allowed by the reserved
-// length prefix. After the continuation returns, the child must be considered
-// invalid, i.e. users must not store any copies or references of the child
-// that outlive the continuation.
-//
-// If the continuation panics with a value of type BuildError then the inner
-// error will be returned as the error from Bytes. If the child panics
-// otherwise then Bytes will repanic with the same value.
-type BuilderContinuation func(child *Builder)
-
-// BuildError wraps an error. If a BuilderContinuation panics with this value,
-// the panic will be recovered and the inner error will be returned from
-// Builder.Bytes.
-type BuildError struct {
- Err error
-}
-
-// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
-func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(1, false, f)
-}
-
-// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
-func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(2, false, f)
-}
-
-// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
-func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(3, false, f)
-}
-
-// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
-func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(4, false, f)
-}
-
-func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
- if !*b.inContinuation {
- *b.inContinuation = true
-
- defer func() {
- *b.inContinuation = false
-
- r := recover()
- if r == nil {
- return
- }
-
- if buildError, ok := r.(BuildError); ok {
- b.err = buildError.Err
- } else {
- panic(r)
- }
- }()
- }
-
- f(arg)
-}
-
-func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
- // Subsequent writes can be ignored if the builder has encountered an error.
- if b.err != nil {
- return
- }
-
- offset := len(b.result)
- b.add(make([]byte, lenLen)...)
-
- if b.inContinuation == nil {
- b.inContinuation = new(bool)
- }
-
- b.child = &Builder{
- result: b.result,
- fixedSize: b.fixedSize,
- offset: offset,
- pendingLenLen: lenLen,
- pendingIsASN1: isASN1,
- inContinuation: b.inContinuation,
- }
-
- b.callContinuation(f, b.child)
- b.flushChild()
- if b.child != nil {
- panic("cryptobyte: internal error")
- }
-}
-
-func (b *Builder) flushChild() {
- if b.child == nil {
- return
- }
- b.child.flushChild()
- child := b.child
- b.child = nil
-
- if child.err != nil {
- b.err = child.err
- return
- }
-
- length := len(child.result) - child.pendingLenLen - child.offset
-
- if length < 0 {
- panic("cryptobyte: internal error") // result unexpectedly shrunk
- }
-
- if child.pendingIsASN1 {
- // For ASN.1, we reserved a single byte for the length. If that turned out
- // to be incorrect, we have to move the contents along in order to make
- // space.
- if child.pendingLenLen != 1 {
- panic("cryptobyte: internal error")
- }
- var lenLen, lenByte uint8
- if int64(length) > 0xfffffffe {
- b.err = errors.New("pending ASN.1 child too long")
- return
- } else if length > 0xffffff {
- lenLen = 5
- lenByte = 0x80 | 4
- } else if length > 0xffff {
- lenLen = 4
- lenByte = 0x80 | 3
- } else if length > 0xff {
- lenLen = 3
- lenByte = 0x80 | 2
- } else if length > 0x7f {
- lenLen = 2
- lenByte = 0x80 | 1
- } else {
- lenLen = 1
- lenByte = uint8(length)
- length = 0
- }
-
- // Insert the initial length byte, make space for successive length bytes,
- // and adjust the offset.
- child.result[child.offset] = lenByte
- extraBytes := int(lenLen - 1)
- if extraBytes != 0 {
- child.add(make([]byte, extraBytes)...)
- childStart := child.offset + child.pendingLenLen
- copy(child.result[childStart+extraBytes:], child.result[childStart:])
- }
- child.offset++
- child.pendingLenLen = extraBytes
- }
-
- l := length
- for i := child.pendingLenLen - 1; i >= 0; i-- {
- child.result[child.offset+i] = uint8(l)
- l >>= 8
- }
- if l != 0 {
- b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
- return
- }
-
- if b.fixedSize && &b.result[0] != &child.result[0] {
- panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
- }
-
- b.result = child.result
-}
-
-func (b *Builder) add(bytes ...byte) {
- if b.err != nil {
- return
- }
- if b.child != nil {
- panic("cryptobyte: attempted write while child is pending")
- }
- if len(b.result)+len(bytes) < len(bytes) {
- b.err = errors.New("cryptobyte: length overflow")
- }
- if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
- b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
- return
- }
- b.result = append(b.result, bytes...)
-}
-
-// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
-// child builder passed to a continuation to unwrite bytes from its parent will
-// panic.
-func (b *Builder) Unwrite(n int) {
- if b.err != nil {
- return
- }
- if b.child != nil {
- panic("cryptobyte: attempted unwrite while child is pending")
- }
- length := len(b.result) - b.pendingLenLen - b.offset
- if length < 0 {
- panic("cryptobyte: internal error")
- }
- if n > length {
- panic("cryptobyte: attempted to unwrite more than was written")
- }
- b.result = b.result[:len(b.result)-n]
-}
-
-// A MarshalingValue marshals itself into a Builder.
-type MarshalingValue interface {
- // Marshal is called by Builder.AddValue. It receives a pointer to a builder
- // to marshal itself into. It may return an error that occurred during
- // marshaling, such as unset or invalid values.
- Marshal(b *Builder) error
-}
-
-// AddValue calls Marshal on v, passing a pointer to the builder to append to.
-// If Marshal returns an error, it is set on the Builder so that subsequent
-// appends don't have an effect.
-func (b *Builder) AddValue(v MarshalingValue) {
- err := v.Marshal(b)
- if err != nil {
- b.err = err
- }
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
deleted file mode 100644
index 44dc8e8caf..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-// +build amd64,gc,!purego
-
-package field
-
-// feMul sets out = a * b. It works like feMulGeneric.
-//go:noescape
-func feMul(out *Element, a *Element, b *Element)
-
-// feSquare sets out = a * a. It works like feSquareGeneric.
-//go:noescape
-func feSquare(out *Element, a *Element)
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
deleted file mode 100644
index c942a65904..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides the generic implementation of Sum and MAC. Other files
-// might provide optimized assembly implementations of some of this code.
-
-package poly1305
-
-import "encoding/binary"
-
-// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
-// for a 64 bytes message is approximately
-//
-// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
-//
-// for some secret r and s. It can be computed sequentially like
-//
-// for len(msg) > 0:
-// h += read(msg, 16)
-// h *= r
-// h %= 2¹³⁰ - 5
-// return h + s
-//
-// All the complexity is about doing performant constant-time math on numbers
-// larger than any available numeric type.
-
-func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
- h := newMACGeneric(key)
- h.Write(msg)
- h.Sum(out)
-}
-
-func newMACGeneric(key *[32]byte) macGeneric {
- m := macGeneric{}
- initialize(key, &m.macState)
- return m
-}
-
-// macState holds numbers in saturated 64-bit little-endian limbs. That is,
-// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
-type macState struct {
- // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
- // can grow larger during and after rounds. It must, however, remain below
- // 2 * (2¹³⁰ - 5).
- h [3]uint64
- // r and s are the private key components.
- r [2]uint64
- s [2]uint64
-}
-
-type macGeneric struct {
- macState
-
- buffer [TagSize]byte
- offset int
-}
-
-// Write splits the incoming message into TagSize chunks, and passes them to
-// update. It buffers incomplete chunks.
-func (h *macGeneric) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- updateGeneric(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- updateGeneric(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-// Sum flushes the last incomplete chunk from the buffer, if any, and generates
-// the MAC output. It does not modify its state, in order to allow for multiple
-// calls to Sum, even if no Write is allowed after Sum.
-func (h *macGeneric) Sum(out *[TagSize]byte) {
- state := h.macState
- if h.offset > 0 {
- updateGeneric(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
-
-// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
-// clears some bits of the secret coefficient to make it possible to implement
-// multiplication more efficiently.
-const (
- rMask0 = 0x0FFFFFFC0FFFFFFF
- rMask1 = 0x0FFFFFFC0FFFFFFC
-)
-
-// initialize loads the 256-bit key into the two 128-bit secret values r and s.
-func initialize(key *[32]byte, m *macState) {
- m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
- m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
- m.s[0] = binary.LittleEndian.Uint64(key[16:24])
- m.s[1] = binary.LittleEndian.Uint64(key[24:32])
-}
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-func mul64(a, b uint64) uint128 {
- hi, lo := bitsMul64(a, b)
- return uint128{lo, hi}
-}
-
-func add128(a, b uint128) uint128 {
- lo, c := bitsAdd64(a.lo, b.lo, 0)
- hi, c := bitsAdd64(a.hi, b.hi, c)
- if c != 0 {
- panic("poly1305: unexpected overflow")
- }
- return uint128{lo, hi}
-}
-
-func shiftRightBy2(a uint128) uint128 {
- a.lo = a.lo>>2 | (a.hi&3)<<62
- a.hi = a.hi >> 2
- return a
-}
-
-// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
-// 128 bits of message, it computes
-//
-// h₊ = (h + m) * r mod 2¹³⁰ - 5
-//
-// If the msg length is not a multiple of TagSize, it assumes the last
-// incomplete chunk is the final one.
-func updateGeneric(state *macState, msg []byte) {
- h0, h1, h2 := state.h[0], state.h[1], state.h[2]
- r0, r1 := state.r[0], state.r[1]
-
- for len(msg) > 0 {
- var c uint64
-
- // For the first step, h + m, we use a chain of bits.Add64 intrinsics.
- // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
- // reduced at the end of the multiplication below.
- //
- // The spec requires us to set a bit just above the message size, not to
- // hide leading zeroes. For full chunks, that's 1 << 128, so we can just
- // add 1 to the most significant (2¹²⁸) limb, h2.
- if len(msg) >= TagSize {
- h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
- h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
- h2 += c + 1
-
- msg = msg[TagSize:]
- } else {
- var buf [TagSize]byte
- copy(buf[:], msg)
- buf[len(msg)] = 1
-
- h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
- h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
- h2 += c
-
- msg = nil
- }
-
- // Multiplication of big number limbs is similar to elementary school
- // columnar multiplication. Instead of digits, there are 64-bit limbs.
- //
- // We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
- //
- // h2 h1 h0 x
- // r1 r0 =
- // ----------------
- // h2r0 h1r0 h0r0 <-- individual 128-bit products
- // + h2r1 h1r1 h0r1
- // ------------------------
- // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
- // ------------------------
- // m3.hi m2.hi m1.hi m0.hi <-- carry propagation
- // + m3.lo m2.lo m1.lo m0.lo
- // -------------------------------
- // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
- //
- // The main difference from pen-and-paper multiplication is that we do
- // carry propagation in a separate step, as if we wrote two digit sums
- // at first (the 128-bit limbs), and then carried the tens all at once.
-
- h0r0 := mul64(h0, r0)
- h1r0 := mul64(h1, r0)
- h2r0 := mul64(h2, r0)
- h0r1 := mul64(h0, r1)
- h1r1 := mul64(h1, r1)
- h2r1 := mul64(h2, r1)
-
- // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
- // top 4 bits cleared by rMask{0,1}, we know that their product is not going
- // to overflow 64 bits, so we can ignore the high part of the products.
- //
- // This also means that the product doesn't have a fifth limb (t4).
- if h2r0.hi != 0 {
- panic("poly1305: unexpected overflow")
- }
- if h2r1.hi != 0 {
- panic("poly1305: unexpected overflow")
- }
-
- m0 := h0r0
- m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
- m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
- m3 := h2r1
-
- t0 := m0.lo
- t1, c := bitsAdd64(m1.lo, m0.hi, 0)
- t2, c := bitsAdd64(m2.lo, m1.hi, c)
- t3, _ := bitsAdd64(m3.lo, m2.hi, c)
-
- // Now we have the result as 4 64-bit limbs, and we need to reduce it
- // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
- // a cheap partial reduction according to the reduction identity
- //
- // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
- //
- // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
- // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
- // assumptions we make about h in the rest of the code.
- //
- // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
-
- // We split the final result at the 2¹³⁰ mark into h and cc, the carry.
- // Note that the carry bits are effectively shifted left by 2, in other
- // words, cc = c * 4 for the c in the reduction identity.
- h0, h1, h2 = t0, t1, t2&maskLow2Bits
- cc := uint128{t2 & maskNotLow2Bits, t3}
-
- // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
-
- h0, c = bitsAdd64(h0, cc.lo, 0)
- h1, c = bitsAdd64(h1, cc.hi, c)
- h2 += c
-
- cc = shiftRightBy2(cc)
-
- h0, c = bitsAdd64(h0, cc.lo, 0)
- h1, c = bitsAdd64(h1, cc.hi, c)
- h2 += c
-
- // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
- //
- // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
- }
-
- state.h[0], state.h[1], state.h[2] = h0, h1, h2
-}
-
-const (
- maskLow2Bits uint64 = 0x0000000000000003
- maskNotLow2Bits uint64 = ^maskLow2Bits
-)
-
-// select64 returns x if v == 1 and y if v == 0, in constant time.
-func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
-
-// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
-const (
- p0 = 0xFFFFFFFFFFFFFFFB
- p1 = 0xFFFFFFFFFFFFFFFF
- p2 = 0x0000000000000003
-)
-
-// finalize completes the modular reduction of h and computes
-//
-// out = h + s mod 2¹²⁸
-//
-func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
- h0, h1, h2 := h[0], h[1], h[2]
-
- // After the partial reduction in updateGeneric, h might be more than
- // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
- // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
- // result if the subtraction underflows, and t otherwise.
-
- hMinusP0, b := bitsSub64(h0, p0, 0)
- hMinusP1, b := bitsSub64(h1, p1, b)
- _, b = bitsSub64(h2, p2, b)
-
- // h = h if h < p else h - p
- h0 = select64(b, h0, hMinusP0)
- h1 = select64(b, h1, hMinusP1)
-
- // Finally, we compute the last Poly1305 step
- //
- // tag = h + s mod 2¹²⁸
- //
- // by just doing a wide addition with the 128 low bits of h and discarding
- // the overflow.
- h0, c := bitsAdd64(h0, s[0], 0)
- h1, _ = bitsAdd64(h1, s[1], c)
-
- binary.LittleEndian.PutUint64(out[0:8], h0)
- binary.LittleEndian.PutUint64(out[8:16], h1)
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/dns/dnsmessage/message.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
deleted file mode 100644
index 8c24430c5c..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
+++ /dev/null
@@ -1,2664 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package dnsmessage provides a mostly RFC 1035 compliant implementation of
-// DNS message packing and unpacking.
-//
-// The package also supports messages with Extension Mechanisms for DNS
-// (EDNS(0)) as defined in RFC 6891.
-//
-// This implementation is designed to minimize heap allocations and avoid
-// unnecessary packing and unpacking as much as possible.
-package dnsmessage
-
-import (
- "errors"
-)
-
-// Message formats
-
-// A Type is a type of DNS request and response.
-type Type uint16
-
-const (
- // ResourceHeader.Type and Question.Type
- TypeA Type = 1
- TypeNS Type = 2
- TypeCNAME Type = 5
- TypeSOA Type = 6
- TypePTR Type = 12
- TypeMX Type = 15
- TypeTXT Type = 16
- TypeAAAA Type = 28
- TypeSRV Type = 33
- TypeOPT Type = 41
-
- // Question.Type
- TypeWKS Type = 11
- TypeHINFO Type = 13
- TypeMINFO Type = 14
- TypeAXFR Type = 252
- TypeALL Type = 255
-)
-
-var typeNames = map[Type]string{
- TypeA: "TypeA",
- TypeNS: "TypeNS",
- TypeCNAME: "TypeCNAME",
- TypeSOA: "TypeSOA",
- TypePTR: "TypePTR",
- TypeMX: "TypeMX",
- TypeTXT: "TypeTXT",
- TypeAAAA: "TypeAAAA",
- TypeSRV: "TypeSRV",
- TypeOPT: "TypeOPT",
- TypeWKS: "TypeWKS",
- TypeHINFO: "TypeHINFO",
- TypeMINFO: "TypeMINFO",
- TypeAXFR: "TypeAXFR",
- TypeALL: "TypeALL",
-}
-
-// String implements fmt.Stringer.String.
-func (t Type) String() string {
- if n, ok := typeNames[t]; ok {
- return n
- }
- return printUint16(uint16(t))
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (t Type) GoString() string {
- if n, ok := typeNames[t]; ok {
- return "dnsmessage." + n
- }
- return printUint16(uint16(t))
-}
-
-// A Class is a type of network.
-type Class uint16
-
-const (
- // ResourceHeader.Class and Question.Class
- ClassINET Class = 1
- ClassCSNET Class = 2
- ClassCHAOS Class = 3
- ClassHESIOD Class = 4
-
- // Question.Class
- ClassANY Class = 255
-)
-
-var classNames = map[Class]string{
- ClassINET: "ClassINET",
- ClassCSNET: "ClassCSNET",
- ClassCHAOS: "ClassCHAOS",
- ClassHESIOD: "ClassHESIOD",
- ClassANY: "ClassANY",
-}
-
-// String implements fmt.Stringer.String.
-func (c Class) String() string {
- if n, ok := classNames[c]; ok {
- return n
- }
- return printUint16(uint16(c))
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (c Class) GoString() string {
- if n, ok := classNames[c]; ok {
- return "dnsmessage." + n
- }
- return printUint16(uint16(c))
-}
-
-// An OpCode is a DNS operation code.
-type OpCode uint16
-
-// GoString implements fmt.GoStringer.GoString.
-func (o OpCode) GoString() string {
- return printUint16(uint16(o))
-}
-
-// An RCode is a DNS response status code.
-type RCode uint16
-
-// Header.RCode values.
-const (
- RCodeSuccess RCode = 0 // NoError
- RCodeFormatError RCode = 1 // FormErr
- RCodeServerFailure RCode = 2 // ServFail
- RCodeNameError RCode = 3 // NXDomain
- RCodeNotImplemented RCode = 4 // NotImp
- RCodeRefused RCode = 5 // Refused
-)
-
-var rCodeNames = map[RCode]string{
- RCodeSuccess: "RCodeSuccess",
- RCodeFormatError: "RCodeFormatError",
- RCodeServerFailure: "RCodeServerFailure",
- RCodeNameError: "RCodeNameError",
- RCodeNotImplemented: "RCodeNotImplemented",
- RCodeRefused: "RCodeRefused",
-}
-
-// String implements fmt.Stringer.String.
-func (r RCode) String() string {
- if n, ok := rCodeNames[r]; ok {
- return n
- }
- return printUint16(uint16(r))
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r RCode) GoString() string {
- if n, ok := rCodeNames[r]; ok {
- return "dnsmessage." + n
- }
- return printUint16(uint16(r))
-}
-
-func printPaddedUint8(i uint8) string {
- b := byte(i)
- return string([]byte{
- b/100 + '0',
- b/10%10 + '0',
- b%10 + '0',
- })
-}
-
-func printUint8Bytes(buf []byte, i uint8) []byte {
- b := byte(i)
- if i >= 100 {
- buf = append(buf, b/100+'0')
- }
- if i >= 10 {
- buf = append(buf, b/10%10+'0')
- }
- return append(buf, b%10+'0')
-}
-
-func printByteSlice(b []byte) string {
- if len(b) == 0 {
- return ""
- }
- buf := make([]byte, 0, 5*len(b))
- buf = printUint8Bytes(buf, uint8(b[0]))
- for _, n := range b[1:] {
- buf = append(buf, ',', ' ')
- buf = printUint8Bytes(buf, uint8(n))
- }
- return string(buf)
-}
-
-const hexDigits = "0123456789abcdef"
-
-func printString(str []byte) string {
- buf := make([]byte, 0, len(str))
- for i := 0; i < len(str); i++ {
- c := str[i]
- if c == '.' || c == '-' || c == ' ' ||
- 'A' <= c && c <= 'Z' ||
- 'a' <= c && c <= 'z' ||
- '0' <= c && c <= '9' {
- buf = append(buf, c)
- continue
- }
-
- upper := c >> 4
- lower := (c << 4) >> 4
- buf = append(
- buf,
- '\\',
- 'x',
- hexDigits[upper],
- hexDigits[lower],
- )
- }
- return string(buf)
-}
-
-func printUint16(i uint16) string {
- return printUint32(uint32(i))
-}
-
-func printUint32(i uint32) string {
- // Max value is 4294967295.
- buf := make([]byte, 10)
- for b, d := buf, uint32(1000000000); d > 0; d /= 10 {
- b[0] = byte(i/d%10 + '0')
- if b[0] == '0' && len(b) == len(buf) && len(buf) > 1 {
- buf = buf[1:]
- }
- b = b[1:]
- i %= d
- }
- return string(buf)
-}
-
-func printBool(b bool) string {
- if b {
- return "true"
- }
- return "false"
-}
-
-var (
- // ErrNotStarted indicates that the prerequisite information isn't
- // available yet because the previous records haven't been appropriately
- // parsed, skipped or finished.
- ErrNotStarted = errors.New("parsing/packing of this type isn't available yet")
-
- // ErrSectionDone indicated that all records in the section have been
- // parsed or finished.
- ErrSectionDone = errors.New("parsing/packing of this section has completed")
-
- errBaseLen = errors.New("insufficient data for base length type")
- errCalcLen = errors.New("insufficient data for calculated length type")
- errReserved = errors.New("segment prefix is reserved")
- errTooManyPtr = errors.New("too many pointers (>10)")
- errInvalidPtr = errors.New("invalid pointer")
- errNilResouceBody = errors.New("nil resource body")
- errResourceLen = errors.New("insufficient data for resource body length")
- errSegTooLong = errors.New("segment length too long")
- errZeroSegLen = errors.New("zero length segment")
- errResTooLong = errors.New("resource length too long")
- errTooManyQuestions = errors.New("too many Questions to pack (>65535)")
- errTooManyAnswers = errors.New("too many Answers to pack (>65535)")
- errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)")
- errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)")
- errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)")
- errStringTooLong = errors.New("character string exceeds maximum length (255)")
- errCompressedSRV = errors.New("compressed name in SRV resource data")
-)
-
-// Internal constants.
-const (
- // packStartingCap is the default initial buffer size allocated during
- // packing.
- //
- // The starting capacity doesn't matter too much, but most DNS responses
- // Will be <= 512 bytes as it is the limit for DNS over UDP.
- packStartingCap = 512
-
- // uint16Len is the length (in bytes) of a uint16.
- uint16Len = 2
-
- // uint32Len is the length (in bytes) of a uint32.
- uint32Len = 4
-
- // headerLen is the length (in bytes) of a DNS header.
- //
- // A header is comprised of 6 uint16s and no padding.
- headerLen = 6 * uint16Len
-)
-
-type nestedError struct {
- // s is the current level's error message.
- s string
-
- // err is the nested error.
- err error
-}
-
-// nestedError implements error.Error.
-func (e *nestedError) Error() string {
- return e.s + ": " + e.err.Error()
-}
-
-// Header is a representation of a DNS message header.
-type Header struct {
- ID uint16
- Response bool
- OpCode OpCode
- Authoritative bool
- Truncated bool
- RecursionDesired bool
- RecursionAvailable bool
- RCode RCode
-}
-
-func (m *Header) pack() (id uint16, bits uint16) {
- id = m.ID
- bits = uint16(m.OpCode)<<11 | uint16(m.RCode)
- if m.RecursionAvailable {
- bits |= headerBitRA
- }
- if m.RecursionDesired {
- bits |= headerBitRD
- }
- if m.Truncated {
- bits |= headerBitTC
- }
- if m.Authoritative {
- bits |= headerBitAA
- }
- if m.Response {
- bits |= headerBitQR
- }
- return
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (m *Header) GoString() string {
- return "dnsmessage.Header{" +
- "ID: " + printUint16(m.ID) + ", " +
- "Response: " + printBool(m.Response) + ", " +
- "OpCode: " + m.OpCode.GoString() + ", " +
- "Authoritative: " + printBool(m.Authoritative) + ", " +
- "Truncated: " + printBool(m.Truncated) + ", " +
- "RecursionDesired: " + printBool(m.RecursionDesired) + ", " +
- "RecursionAvailable: " + printBool(m.RecursionAvailable) + ", " +
- "RCode: " + m.RCode.GoString() + "}"
-}
-
-// Message is a representation of a DNS message.
-type Message struct {
- Header
- Questions []Question
- Answers []Resource
- Authorities []Resource
- Additionals []Resource
-}
-
-type section uint8
-
-const (
- sectionNotStarted section = iota
- sectionHeader
- sectionQuestions
- sectionAnswers
- sectionAuthorities
- sectionAdditionals
- sectionDone
-
- headerBitQR = 1 << 15 // query/response (response=1)
- headerBitAA = 1 << 10 // authoritative
- headerBitTC = 1 << 9 // truncated
- headerBitRD = 1 << 8 // recursion desired
- headerBitRA = 1 << 7 // recursion available
-)
-
-var sectionNames = map[section]string{
- sectionHeader: "header",
- sectionQuestions: "Question",
- sectionAnswers: "Answer",
- sectionAuthorities: "Authority",
- sectionAdditionals: "Additional",
-}
-
-// header is the wire format for a DNS message header.
-type header struct {
- id uint16
- bits uint16
- questions uint16
- answers uint16
- authorities uint16
- additionals uint16
-}
-
-func (h *header) count(sec section) uint16 {
- switch sec {
- case sectionQuestions:
- return h.questions
- case sectionAnswers:
- return h.answers
- case sectionAuthorities:
- return h.authorities
- case sectionAdditionals:
- return h.additionals
- }
- return 0
-}
-
-// pack appends the wire format of the header to msg.
-func (h *header) pack(msg []byte) []byte {
- msg = packUint16(msg, h.id)
- msg = packUint16(msg, h.bits)
- msg = packUint16(msg, h.questions)
- msg = packUint16(msg, h.answers)
- msg = packUint16(msg, h.authorities)
- return packUint16(msg, h.additionals)
-}
-
-func (h *header) unpack(msg []byte, off int) (int, error) {
- newOff := off
- var err error
- if h.id, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"id", err}
- }
- if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"bits", err}
- }
- if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"questions", err}
- }
- if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"answers", err}
- }
- if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"authorities", err}
- }
- if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"additionals", err}
- }
- return newOff, nil
-}
-
-func (h *header) header() Header {
- return Header{
- ID: h.id,
- Response: (h.bits & headerBitQR) != 0,
- OpCode: OpCode(h.bits>>11) & 0xF,
- Authoritative: (h.bits & headerBitAA) != 0,
- Truncated: (h.bits & headerBitTC) != 0,
- RecursionDesired: (h.bits & headerBitRD) != 0,
- RecursionAvailable: (h.bits & headerBitRA) != 0,
- RCode: RCode(h.bits & 0xF),
- }
-}
-
-// A Resource is a DNS resource record.
-type Resource struct {
- Header ResourceHeader
- Body ResourceBody
-}
-
-func (r *Resource) GoString() string {
- return "dnsmessage.Resource{" +
- "Header: " + r.Header.GoString() +
- ", Body: &" + r.Body.GoString() +
- "}"
-}
-
-// A ResourceBody is a DNS resource record minus the header.
-type ResourceBody interface {
- // pack packs a Resource except for its header.
- pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error)
-
- // realType returns the actual type of the Resource. This is used to
- // fill in the header Type field.
- realType() Type
-
- // GoString implements fmt.GoStringer.GoString.
- GoString() string
-}
-
-// pack appends the wire format of the Resource to msg.
-func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- if r.Body == nil {
- return msg, errNilResouceBody
- }
- oldMsg := msg
- r.Header.Type = r.Body.realType()
- msg, lenOff, err := r.Header.pack(msg, compression, compressionOff)
- if err != nil {
- return msg, &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- msg, err = r.Body.pack(msg, compression, compressionOff)
- if err != nil {
- return msg, &nestedError{"content", err}
- }
- if err := r.Header.fixLen(msg, lenOff, preLen); err != nil {
- return oldMsg, err
- }
- return msg, nil
-}
-
-// A Parser allows incrementally parsing a DNS message.
-//
-// When parsing is started, the Header is parsed. Next, each Question can be
-// either parsed or skipped. Alternatively, all Questions can be skipped at
-// once. When all Questions have been parsed, attempting to parse Questions
-// will return (nil, nil) and attempting to skip Questions will return
-// (true, nil). After all Questions have been either parsed or skipped, all
-// Answers, Authorities and Additionals can be either parsed or skipped in the
-// same way, and each type of Resource must be fully parsed or skipped before
-// proceeding to the next type of Resource.
-//
-// Note that there is no requirement to fully skip or parse the message.
-type Parser struct {
- msg []byte
- header header
-
- section section
- off int
- index int
- resHeaderValid bool
- resHeader ResourceHeader
-}
-
-// Start parses the header and enables the parsing of Questions.
-func (p *Parser) Start(msg []byte) (Header, error) {
- if p.msg != nil {
- *p = Parser{}
- }
- p.msg = msg
- var err error
- if p.off, err = p.header.unpack(msg, 0); err != nil {
- return Header{}, &nestedError{"unpacking header", err}
- }
- p.section = sectionQuestions
- return p.header.header(), nil
-}
-
-func (p *Parser) checkAdvance(sec section) error {
- if p.section < sec {
- return ErrNotStarted
- }
- if p.section > sec {
- return ErrSectionDone
- }
- p.resHeaderValid = false
- if p.index == int(p.header.count(sec)) {
- p.index = 0
- p.section++
- return ErrSectionDone
- }
- return nil
-}
-
-func (p *Parser) resource(sec section) (Resource, error) {
- var r Resource
- var err error
- r.Header, err = p.resourceHeader(sec)
- if err != nil {
- return r, err
- }
- p.resHeaderValid = false
- r.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header)
- if err != nil {
- return Resource{}, &nestedError{"unpacking " + sectionNames[sec], err}
- }
- p.index++
- return r, nil
-}
-
-func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) {
- if p.resHeaderValid {
- return p.resHeader, nil
- }
- if err := p.checkAdvance(sec); err != nil {
- return ResourceHeader{}, err
- }
- var hdr ResourceHeader
- off, err := hdr.unpack(p.msg, p.off)
- if err != nil {
- return ResourceHeader{}, err
- }
- p.resHeaderValid = true
- p.resHeader = hdr
- p.off = off
- return hdr, nil
-}
-
-func (p *Parser) skipResource(sec section) error {
- if p.resHeaderValid {
- newOff := p.off + int(p.resHeader.Length)
- if newOff > len(p.msg) {
- return errResourceLen
- }
- p.off = newOff
- p.resHeaderValid = false
- p.index++
- return nil
- }
- if err := p.checkAdvance(sec); err != nil {
- return err
- }
- var err error
- p.off, err = skipResource(p.msg, p.off)
- if err != nil {
- return &nestedError{"skipping: " + sectionNames[sec], err}
- }
- p.index++
- return nil
-}
-
-// Question parses a single Question.
-func (p *Parser) Question() (Question, error) {
- if err := p.checkAdvance(sectionQuestions); err != nil {
- return Question{}, err
- }
- var name Name
- off, err := name.unpack(p.msg, p.off)
- if err != nil {
- return Question{}, &nestedError{"unpacking Question.Name", err}
- }
- typ, off, err := unpackType(p.msg, off)
- if err != nil {
- return Question{}, &nestedError{"unpacking Question.Type", err}
- }
- class, off, err := unpackClass(p.msg, off)
- if err != nil {
- return Question{}, &nestedError{"unpacking Question.Class", err}
- }
- p.off = off
- p.index++
- return Question{name, typ, class}, nil
-}
-
-// AllQuestions parses all Questions.
-func (p *Parser) AllQuestions() ([]Question, error) {
- // Multiple questions are valid according to the spec,
- // but servers don't actually support them. There will
- // be at most one question here.
- //
- // Do not pre-allocate based on info in p.header, since
- // the data is untrusted.
- qs := []Question{}
- for {
- q, err := p.Question()
- if err == ErrSectionDone {
- return qs, nil
- }
- if err != nil {
- return nil, err
- }
- qs = append(qs, q)
- }
-}
-
-// SkipQuestion skips a single Question.
-func (p *Parser) SkipQuestion() error {
- if err := p.checkAdvance(sectionQuestions); err != nil {
- return err
- }
- off, err := skipName(p.msg, p.off)
- if err != nil {
- return &nestedError{"skipping Question Name", err}
- }
- if off, err = skipType(p.msg, off); err != nil {
- return &nestedError{"skipping Question Type", err}
- }
- if off, err = skipClass(p.msg, off); err != nil {
- return &nestedError{"skipping Question Class", err}
- }
- p.off = off
- p.index++
- return nil
-}
-
-// SkipAllQuestions skips all Questions.
-func (p *Parser) SkipAllQuestions() error {
- for {
- if err := p.SkipQuestion(); err == ErrSectionDone {
- return nil
- } else if err != nil {
- return err
- }
- }
-}
-
-// AnswerHeader parses a single Answer ResourceHeader.
-func (p *Parser) AnswerHeader() (ResourceHeader, error) {
- return p.resourceHeader(sectionAnswers)
-}
-
-// Answer parses a single Answer Resource.
-func (p *Parser) Answer() (Resource, error) {
- return p.resource(sectionAnswers)
-}
-
-// AllAnswers parses all Answer Resources.
-func (p *Parser) AllAnswers() ([]Resource, error) {
- // The most common query is for A/AAAA, which usually returns
- // a handful of IPs.
- //
- // Pre-allocate up to a certain limit, since p.header is
- // untrusted data.
- n := int(p.header.answers)
- if n > 20 {
- n = 20
- }
- as := make([]Resource, 0, n)
- for {
- a, err := p.Answer()
- if err == ErrSectionDone {
- return as, nil
- }
- if err != nil {
- return nil, err
- }
- as = append(as, a)
- }
-}
-
-// SkipAnswer skips a single Answer Resource.
-func (p *Parser) SkipAnswer() error {
- return p.skipResource(sectionAnswers)
-}
-
-// SkipAllAnswers skips all Answer Resources.
-func (p *Parser) SkipAllAnswers() error {
- for {
- if err := p.SkipAnswer(); err == ErrSectionDone {
- return nil
- } else if err != nil {
- return err
- }
- }
-}
-
-// AuthorityHeader parses a single Authority ResourceHeader.
-func (p *Parser) AuthorityHeader() (ResourceHeader, error) {
- return p.resourceHeader(sectionAuthorities)
-}
-
-// Authority parses a single Authority Resource.
-func (p *Parser) Authority() (Resource, error) {
- return p.resource(sectionAuthorities)
-}
-
-// AllAuthorities parses all Authority Resources.
-func (p *Parser) AllAuthorities() ([]Resource, error) {
- // Authorities contains SOA in case of NXDOMAIN and friends,
- // otherwise it is empty.
- //
- // Pre-allocate up to a certain limit, since p.header is
- // untrusted data.
- n := int(p.header.authorities)
- if n > 10 {
- n = 10
- }
- as := make([]Resource, 0, n)
- for {
- a, err := p.Authority()
- if err == ErrSectionDone {
- return as, nil
- }
- if err != nil {
- return nil, err
- }
- as = append(as, a)
- }
-}
-
-// SkipAuthority skips a single Authority Resource.
-func (p *Parser) SkipAuthority() error {
- return p.skipResource(sectionAuthorities)
-}
-
-// SkipAllAuthorities skips all Authority Resources.
-func (p *Parser) SkipAllAuthorities() error {
- for {
- if err := p.SkipAuthority(); err == ErrSectionDone {
- return nil
- } else if err != nil {
- return err
- }
- }
-}
-
-// AdditionalHeader parses a single Additional ResourceHeader.
-func (p *Parser) AdditionalHeader() (ResourceHeader, error) {
- return p.resourceHeader(sectionAdditionals)
-}
-
-// Additional parses a single Additional Resource.
-func (p *Parser) Additional() (Resource, error) {
- return p.resource(sectionAdditionals)
-}
-
-// AllAdditionals parses all Additional Resources.
-func (p *Parser) AllAdditionals() ([]Resource, error) {
- // Additionals usually contain OPT, and sometimes A/AAAA
- // glue records.
- //
- // Pre-allocate up to a certain limit, since p.header is
- // untrusted data.
- n := int(p.header.additionals)
- if n > 10 {
- n = 10
- }
- as := make([]Resource, 0, n)
- for {
- a, err := p.Additional()
- if err == ErrSectionDone {
- return as, nil
- }
- if err != nil {
- return nil, err
- }
- as = append(as, a)
- }
-}
-
-// SkipAdditional skips a single Additional Resource.
-func (p *Parser) SkipAdditional() error {
- return p.skipResource(sectionAdditionals)
-}
-
-// SkipAllAdditionals skips all Additional Resources.
-func (p *Parser) SkipAllAdditionals() error {
- for {
- if err := p.SkipAdditional(); err == ErrSectionDone {
- return nil
- } else if err != nil {
- return err
- }
- }
-}
-
-// CNAMEResource parses a single CNAMEResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) CNAMEResource() (CNAMEResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeCNAME {
- return CNAMEResource{}, ErrNotStarted
- }
- r, err := unpackCNAMEResource(p.msg, p.off)
- if err != nil {
- return CNAMEResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// MXResource parses a single MXResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) MXResource() (MXResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeMX {
- return MXResource{}, ErrNotStarted
- }
- r, err := unpackMXResource(p.msg, p.off)
- if err != nil {
- return MXResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// NSResource parses a single NSResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) NSResource() (NSResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeNS {
- return NSResource{}, ErrNotStarted
- }
- r, err := unpackNSResource(p.msg, p.off)
- if err != nil {
- return NSResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// PTRResource parses a single PTRResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) PTRResource() (PTRResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypePTR {
- return PTRResource{}, ErrNotStarted
- }
- r, err := unpackPTRResource(p.msg, p.off)
- if err != nil {
- return PTRResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// SOAResource parses a single SOAResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) SOAResource() (SOAResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeSOA {
- return SOAResource{}, ErrNotStarted
- }
- r, err := unpackSOAResource(p.msg, p.off)
- if err != nil {
- return SOAResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// TXTResource parses a single TXTResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) TXTResource() (TXTResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeTXT {
- return TXTResource{}, ErrNotStarted
- }
- r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length)
- if err != nil {
- return TXTResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// SRVResource parses a single SRVResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) SRVResource() (SRVResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeSRV {
- return SRVResource{}, ErrNotStarted
- }
- r, err := unpackSRVResource(p.msg, p.off)
- if err != nil {
- return SRVResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// AResource parses a single AResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) AResource() (AResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeA {
- return AResource{}, ErrNotStarted
- }
- r, err := unpackAResource(p.msg, p.off)
- if err != nil {
- return AResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// AAAAResource parses a single AAAAResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) AAAAResource() (AAAAResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeAAAA {
- return AAAAResource{}, ErrNotStarted
- }
- r, err := unpackAAAAResource(p.msg, p.off)
- if err != nil {
- return AAAAResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// OPTResource parses a single OPTResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) OPTResource() (OPTResource, error) {
- if !p.resHeaderValid || p.resHeader.Type != TypeOPT {
- return OPTResource{}, ErrNotStarted
- }
- r, err := unpackOPTResource(p.msg, p.off, p.resHeader.Length)
- if err != nil {
- return OPTResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// UnknownResource parses a single UnknownResource.
-//
-// One of the XXXHeader methods must have been called before calling this
-// method.
-func (p *Parser) UnknownResource() (UnknownResource, error) {
- if !p.resHeaderValid {
- return UnknownResource{}, ErrNotStarted
- }
- r, err := unpackUnknownResource(p.resHeader.Type, p.msg, p.off, p.resHeader.Length)
- if err != nil {
- return UnknownResource{}, err
- }
- p.off += int(p.resHeader.Length)
- p.resHeaderValid = false
- p.index++
- return r, nil
-}
-
-// Unpack parses a full Message.
-func (m *Message) Unpack(msg []byte) error {
- var p Parser
- var err error
- if m.Header, err = p.Start(msg); err != nil {
- return err
- }
- if m.Questions, err = p.AllQuestions(); err != nil {
- return err
- }
- if m.Answers, err = p.AllAnswers(); err != nil {
- return err
- }
- if m.Authorities, err = p.AllAuthorities(); err != nil {
- return err
- }
- if m.Additionals, err = p.AllAdditionals(); err != nil {
- return err
- }
- return nil
-}
-
-// Pack packs a full Message.
-func (m *Message) Pack() ([]byte, error) {
- return m.AppendPack(make([]byte, 0, packStartingCap))
-}
-
-// AppendPack is like Pack but appends the full Message to b and returns the
-// extended buffer.
-func (m *Message) AppendPack(b []byte) ([]byte, error) {
- // Validate the lengths. It is very unlikely that anyone will try to
- // pack more than 65535 of any particular type, but it is possible and
- // we should fail gracefully.
- if len(m.Questions) > int(^uint16(0)) {
- return nil, errTooManyQuestions
- }
- if len(m.Answers) > int(^uint16(0)) {
- return nil, errTooManyAnswers
- }
- if len(m.Authorities) > int(^uint16(0)) {
- return nil, errTooManyAuthorities
- }
- if len(m.Additionals) > int(^uint16(0)) {
- return nil, errTooManyAdditionals
- }
-
- var h header
- h.id, h.bits = m.Header.pack()
-
- h.questions = uint16(len(m.Questions))
- h.answers = uint16(len(m.Answers))
- h.authorities = uint16(len(m.Authorities))
- h.additionals = uint16(len(m.Additionals))
-
- compressionOff := len(b)
- msg := h.pack(b)
-
- // RFC 1035 allows (but does not require) compression for packing. RFC
- // 1035 requires unpacking implementations to support compression, so
- // unconditionally enabling it is fine.
- //
- // DNS lookups are typically done over UDP, and RFC 1035 states that UDP
- // DNS messages can be a maximum of 512 bytes long. Without compression,
- // many DNS response messages are over this limit, so enabling
- // compression will help ensure compliance.
- compression := map[string]int{}
-
- for i := range m.Questions {
- var err error
- if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil {
- return nil, &nestedError{"packing Question", err}
- }
- }
- for i := range m.Answers {
- var err error
- if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil {
- return nil, &nestedError{"packing Answer", err}
- }
- }
- for i := range m.Authorities {
- var err error
- if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil {
- return nil, &nestedError{"packing Authority", err}
- }
- }
- for i := range m.Additionals {
- var err error
- if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil {
- return nil, &nestedError{"packing Additional", err}
- }
- }
-
- return msg, nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (m *Message) GoString() string {
- s := "dnsmessage.Message{Header: " + m.Header.GoString() + ", " +
- "Questions: []dnsmessage.Question{"
- if len(m.Questions) > 0 {
- s += m.Questions[0].GoString()
- for _, q := range m.Questions[1:] {
- s += ", " + q.GoString()
- }
- }
- s += "}, Answers: []dnsmessage.Resource{"
- if len(m.Answers) > 0 {
- s += m.Answers[0].GoString()
- for _, a := range m.Answers[1:] {
- s += ", " + a.GoString()
- }
- }
- s += "}, Authorities: []dnsmessage.Resource{"
- if len(m.Authorities) > 0 {
- s += m.Authorities[0].GoString()
- for _, a := range m.Authorities[1:] {
- s += ", " + a.GoString()
- }
- }
- s += "}, Additionals: []dnsmessage.Resource{"
- if len(m.Additionals) > 0 {
- s += m.Additionals[0].GoString()
- for _, a := range m.Additionals[1:] {
- s += ", " + a.GoString()
- }
- }
- return s + "}}"
-}
-
-// A Builder allows incrementally packing a DNS message.
-//
-// Example usage:
-// buf := make([]byte, 2, 514)
-// b := NewBuilder(buf, Header{...})
-// b.EnableCompression()
-// // Optionally start a section and add things to that section.
-// // Repeat adding sections as necessary.
-// buf, err := b.Finish()
-// // If err is nil, buf[2:] will contain the built bytes.
-type Builder struct {
- // msg is the storage for the message being built.
- msg []byte
-
- // section keeps track of the current section being built.
- section section
-
- // header keeps track of what should go in the header when Finish is
- // called.
- header header
-
- // start is the starting index of the bytes allocated in msg for header.
- start int
-
- // compression is a mapping from name suffixes to their starting index
- // in msg.
- compression map[string]int
-}
-
-// NewBuilder creates a new builder with compression disabled.
-//
-// Note: Most users will want to immediately enable compression with the
-// EnableCompression method. See that method's comment for why you may or may
-// not want to enable compression.
-//
-// The DNS message is appended to the provided initial buffer buf (which may be
-// nil) as it is built. The final message is returned by the (*Builder).Finish
-// method, which includes buf[:len(buf)] and may return the same underlying
-// array if there was sufficient capacity in the slice.
-func NewBuilder(buf []byte, h Header) Builder {
- if buf == nil {
- buf = make([]byte, 0, packStartingCap)
- }
- b := Builder{msg: buf, start: len(buf)}
- b.header.id, b.header.bits = h.pack()
- var hb [headerLen]byte
- b.msg = append(b.msg, hb[:]...)
- b.section = sectionHeader
- return b
-}
-
-// EnableCompression enables compression in the Builder.
-//
-// Leaving compression disabled avoids compression related allocations, but can
-// result in larger message sizes. Be careful with this mode as it can cause
-// messages to exceed the UDP size limit.
-//
-// According to RFC 1035, section 4.1.4, the use of compression is optional, but
-// all implementations must accept both compressed and uncompressed DNS
-// messages.
-//
-// Compression should be enabled before any sections are added for best results.
-func (b *Builder) EnableCompression() {
- b.compression = map[string]int{}
-}
-
-func (b *Builder) startCheck(s section) error {
- if b.section <= sectionNotStarted {
- return ErrNotStarted
- }
- if b.section > s {
- return ErrSectionDone
- }
- return nil
-}
-
-// StartQuestions prepares the builder for packing Questions.
-func (b *Builder) StartQuestions() error {
- if err := b.startCheck(sectionQuestions); err != nil {
- return err
- }
- b.section = sectionQuestions
- return nil
-}
-
-// StartAnswers prepares the builder for packing Answers.
-func (b *Builder) StartAnswers() error {
- if err := b.startCheck(sectionAnswers); err != nil {
- return err
- }
- b.section = sectionAnswers
- return nil
-}
-
-// StartAuthorities prepares the builder for packing Authorities.
-func (b *Builder) StartAuthorities() error {
- if err := b.startCheck(sectionAuthorities); err != nil {
- return err
- }
- b.section = sectionAuthorities
- return nil
-}
-
-// StartAdditionals prepares the builder for packing Additionals.
-func (b *Builder) StartAdditionals() error {
- if err := b.startCheck(sectionAdditionals); err != nil {
- return err
- }
- b.section = sectionAdditionals
- return nil
-}
-
-func (b *Builder) incrementSectionCount() error {
- var count *uint16
- var err error
- switch b.section {
- case sectionQuestions:
- count = &b.header.questions
- err = errTooManyQuestions
- case sectionAnswers:
- count = &b.header.answers
- err = errTooManyAnswers
- case sectionAuthorities:
- count = &b.header.authorities
- err = errTooManyAuthorities
- case sectionAdditionals:
- count = &b.header.additionals
- err = errTooManyAdditionals
- }
- if *count == ^uint16(0) {
- return err
- }
- *count++
- return nil
-}
-
-// Question adds a single Question.
-func (b *Builder) Question(q Question) error {
- if b.section < sectionQuestions {
- return ErrNotStarted
- }
- if b.section > sectionQuestions {
- return ErrSectionDone
- }
- msg, err := q.pack(b.msg, b.compression, b.start)
- if err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-func (b *Builder) checkResourceSection() error {
- if b.section < sectionAnswers {
- return ErrNotStarted
- }
- if b.section > sectionAdditionals {
- return ErrSectionDone
- }
- return nil
-}
-
-// CNAMEResource adds a single CNAMEResource.
-func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"CNAMEResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// MXResource adds a single MXResource.
-func (b *Builder) MXResource(h ResourceHeader, r MXResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"MXResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// NSResource adds a single NSResource.
-func (b *Builder) NSResource(h ResourceHeader, r NSResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"NSResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// PTRResource adds a single PTRResource.
-func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"PTRResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// SOAResource adds a single SOAResource.
-func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"SOAResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// TXTResource adds a single TXTResource.
-func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"TXTResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// SRVResource adds a single SRVResource.
-func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"SRVResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// AResource adds a single AResource.
-func (b *Builder) AResource(h ResourceHeader, r AResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"AResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// AAAAResource adds a single AAAAResource.
-func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"AAAAResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// OPTResource adds a single OPTResource.
-func (b *Builder) OPTResource(h ResourceHeader, r OPTResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"OPTResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// UnknownResource adds a single UnknownResource.
-func (b *Builder) UnknownResource(h ResourceHeader, r UnknownResource) error {
- if err := b.checkResourceSection(); err != nil {
- return err
- }
- h.Type = r.realType()
- msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
- if err != nil {
- return &nestedError{"ResourceHeader", err}
- }
- preLen := len(msg)
- if msg, err = r.pack(msg, b.compression, b.start); err != nil {
- return &nestedError{"UnknownResource body", err}
- }
- if err := h.fixLen(msg, lenOff, preLen); err != nil {
- return err
- }
- if err := b.incrementSectionCount(); err != nil {
- return err
- }
- b.msg = msg
- return nil
-}
-
-// Finish ends message building and generates a binary message.
-func (b *Builder) Finish() ([]byte, error) {
- if b.section < sectionHeader {
- return nil, ErrNotStarted
- }
- b.section = sectionDone
- // Space for the header was allocated in NewBuilder.
- b.header.pack(b.msg[b.start:b.start])
- return b.msg, nil
-}
-
-// A ResourceHeader is the header of a DNS resource record. There are
-// many types of DNS resource records, but they all share the same header.
-type ResourceHeader struct {
- // Name is the domain name for which this resource record pertains.
- Name Name
-
- // Type is the type of DNS resource record.
- //
- // This field will be set automatically during packing.
- Type Type
-
- // Class is the class of network to which this DNS resource record
- // pertains.
- Class Class
-
- // TTL is the length of time (measured in seconds) which this resource
- // record is valid for (time to live). All Resources in a set should
- // have the same TTL (RFC 2181 Section 5.2).
- TTL uint32
-
- // Length is the length of data in the resource record after the header.
- //
- // This field will be set automatically during packing.
- Length uint16
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (h *ResourceHeader) GoString() string {
- return "dnsmessage.ResourceHeader{" +
- "Name: " + h.Name.GoString() + ", " +
- "Type: " + h.Type.GoString() + ", " +
- "Class: " + h.Class.GoString() + ", " +
- "TTL: " + printUint32(h.TTL) + ", " +
- "Length: " + printUint16(h.Length) + "}"
-}
-
-// pack appends the wire format of the ResourceHeader to oldMsg.
-//
-// lenOff is the offset in msg where the Length field was packed.
-func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, lenOff int, err error) {
- msg = oldMsg
- if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil {
- return oldMsg, 0, &nestedError{"Name", err}
- }
- msg = packType(msg, h.Type)
- msg = packClass(msg, h.Class)
- msg = packUint32(msg, h.TTL)
- lenOff = len(msg)
- msg = packUint16(msg, h.Length)
- return msg, lenOff, nil
-}
-
-func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) {
- newOff := off
- var err error
- if newOff, err = h.Name.unpack(msg, newOff); err != nil {
- return off, &nestedError{"Name", err}
- }
- if h.Type, newOff, err = unpackType(msg, newOff); err != nil {
- return off, &nestedError{"Type", err}
- }
- if h.Class, newOff, err = unpackClass(msg, newOff); err != nil {
- return off, &nestedError{"Class", err}
- }
- if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil {
- return off, &nestedError{"TTL", err}
- }
- if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil {
- return off, &nestedError{"Length", err}
- }
- return newOff, nil
-}
-
-// fixLen updates a packed ResourceHeader to include the length of the
-// ResourceBody.
-//
-// lenOff is the offset of the ResourceHeader.Length field in msg.
-//
-// preLen is the length that msg was before the ResourceBody was packed.
-func (h *ResourceHeader) fixLen(msg []byte, lenOff int, preLen int) error {
- conLen := len(msg) - preLen
- if conLen > int(^uint16(0)) {
- return errResTooLong
- }
-
- // Fill in the length now that we know how long the content is.
- packUint16(msg[lenOff:lenOff], uint16(conLen))
- h.Length = uint16(conLen)
-
- return nil
-}
-
-// EDNS(0) wire constants.
-const (
- edns0Version = 0
-
- edns0DNSSECOK = 0x00008000
- ednsVersionMask = 0x00ff0000
- edns0DNSSECOKMask = 0x00ff8000
-)
-
-// SetEDNS0 configures h for EDNS(0).
-//
-// The provided extRCode must be an extended RCode.
-func (h *ResourceHeader) SetEDNS0(udpPayloadLen int, extRCode RCode, dnssecOK bool) error {
- h.Name = Name{Data: [nameLen]byte{'.'}, Length: 1} // RFC 6891 section 6.1.2
- h.Type = TypeOPT
- h.Class = Class(udpPayloadLen)
- h.TTL = uint32(extRCode) >> 4 << 24
- if dnssecOK {
- h.TTL |= edns0DNSSECOK
- }
- return nil
-}
-
-// DNSSECAllowed reports whether the DNSSEC OK bit is set.
-func (h *ResourceHeader) DNSSECAllowed() bool {
- return h.TTL&edns0DNSSECOKMask == edns0DNSSECOK // RFC 6891 section 6.1.3
-}
-
-// ExtendedRCode returns an extended RCode.
-//
-// The provided rcode must be the RCode in DNS message header.
-func (h *ResourceHeader) ExtendedRCode(rcode RCode) RCode {
- if h.TTL&ednsVersionMask == edns0Version { // RFC 6891 section 6.1.3
- return RCode(h.TTL>>24<<4) | rcode
- }
- return rcode
-}
-
-func skipResource(msg []byte, off int) (int, error) {
- newOff, err := skipName(msg, off)
- if err != nil {
- return off, &nestedError{"Name", err}
- }
- if newOff, err = skipType(msg, newOff); err != nil {
- return off, &nestedError{"Type", err}
- }
- if newOff, err = skipClass(msg, newOff); err != nil {
- return off, &nestedError{"Class", err}
- }
- if newOff, err = skipUint32(msg, newOff); err != nil {
- return off, &nestedError{"TTL", err}
- }
- length, newOff, err := unpackUint16(msg, newOff)
- if err != nil {
- return off, &nestedError{"Length", err}
- }
- if newOff += int(length); newOff > len(msg) {
- return off, errResourceLen
- }
- return newOff, nil
-}
-
-// packUint16 appends the wire format of field to msg.
-func packUint16(msg []byte, field uint16) []byte {
- return append(msg, byte(field>>8), byte(field))
-}
-
-func unpackUint16(msg []byte, off int) (uint16, int, error) {
- if off+uint16Len > len(msg) {
- return 0, off, errBaseLen
- }
- return uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil
-}
-
-func skipUint16(msg []byte, off int) (int, error) {
- if off+uint16Len > len(msg) {
- return off, errBaseLen
- }
- return off + uint16Len, nil
-}
-
-// packType appends the wire format of field to msg.
-func packType(msg []byte, field Type) []byte {
- return packUint16(msg, uint16(field))
-}
-
-func unpackType(msg []byte, off int) (Type, int, error) {
- t, o, err := unpackUint16(msg, off)
- return Type(t), o, err
-}
-
-func skipType(msg []byte, off int) (int, error) {
- return skipUint16(msg, off)
-}
-
-// packClass appends the wire format of field to msg.
-func packClass(msg []byte, field Class) []byte {
- return packUint16(msg, uint16(field))
-}
-
-func unpackClass(msg []byte, off int) (Class, int, error) {
- c, o, err := unpackUint16(msg, off)
- return Class(c), o, err
-}
-
-func skipClass(msg []byte, off int) (int, error) {
- return skipUint16(msg, off)
-}
-
-// packUint32 appends the wire format of field to msg.
-func packUint32(msg []byte, field uint32) []byte {
- return append(
- msg,
- byte(field>>24),
- byte(field>>16),
- byte(field>>8),
- byte(field),
- )
-}
-
-func unpackUint32(msg []byte, off int) (uint32, int, error) {
- if off+uint32Len > len(msg) {
- return 0, off, errBaseLen
- }
- v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])
- return v, off + uint32Len, nil
-}
-
-func skipUint32(msg []byte, off int) (int, error) {
- if off+uint32Len > len(msg) {
- return off, errBaseLen
- }
- return off + uint32Len, nil
-}
-
-// packText appends the wire format of field to msg.
-func packText(msg []byte, field string) ([]byte, error) {
- l := len(field)
- if l > 255 {
- return nil, errStringTooLong
- }
- msg = append(msg, byte(l))
- msg = append(msg, field...)
-
- return msg, nil
-}
-
-func unpackText(msg []byte, off int) (string, int, error) {
- if off >= len(msg) {
- return "", off, errBaseLen
- }
- beginOff := off + 1
- endOff := beginOff + int(msg[off])
- if endOff > len(msg) {
- return "", off, errCalcLen
- }
- return string(msg[beginOff:endOff]), endOff, nil
-}
-
-// packBytes appends the wire format of field to msg.
-func packBytes(msg []byte, field []byte) []byte {
- return append(msg, field...)
-}
-
-func unpackBytes(msg []byte, off int, field []byte) (int, error) {
- newOff := off + len(field)
- if newOff > len(msg) {
- return off, errBaseLen
- }
- copy(field, msg[off:newOff])
- return newOff, nil
-}
-
-const nameLen = 255
-
-// A Name is a non-encoded domain name. It is used instead of strings to avoid
-// allocations.
-type Name struct {
- Data [nameLen]byte // 255 bytes
- Length uint8
-}
-
-// NewName creates a new Name from a string.
-func NewName(name string) (Name, error) {
- if len([]byte(name)) > nameLen {
- return Name{}, errCalcLen
- }
- n := Name{Length: uint8(len(name))}
- copy(n.Data[:], []byte(name))
- return n, nil
-}
-
-// MustNewName creates a new Name from a string and panics on error.
-func MustNewName(name string) Name {
- n, err := NewName(name)
- if err != nil {
- panic("creating name: " + err.Error())
- }
- return n
-}
-
-// String implements fmt.Stringer.String.
-func (n Name) String() string {
- return string(n.Data[:n.Length])
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (n *Name) GoString() string {
- return `dnsmessage.MustNewName("` + printString(n.Data[:n.Length]) + `")`
-}
-
-// pack appends the wire format of the Name to msg.
-//
-// Domain names are a sequence of counted strings split at the dots. They end
-// with a zero-length string. Compression can be used to reuse domain suffixes.
-//
-// The compression map will be updated with new domain suffixes. If compression
-// is nil, compression will not be used.
-func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- oldMsg := msg
-
- // Add a trailing dot to canonicalize name.
- if n.Length == 0 || n.Data[n.Length-1] != '.' {
- return oldMsg, errNonCanonicalName
- }
-
- // Allow root domain.
- if n.Data[0] == '.' && n.Length == 1 {
- return append(msg, 0), nil
- }
-
- // Emit sequence of counted strings, chopping at dots.
- for i, begin := 0, 0; i < int(n.Length); i++ {
- // Check for the end of the segment.
- if n.Data[i] == '.' {
- // The two most significant bits have special meaning.
- // It isn't allowed for segments to be long enough to
- // need them.
- if i-begin >= 1<<6 {
- return oldMsg, errSegTooLong
- }
-
- // Segments must have a non-zero length.
- if i-begin == 0 {
- return oldMsg, errZeroSegLen
- }
-
- msg = append(msg, byte(i-begin))
-
- for j := begin; j < i; j++ {
- msg = append(msg, n.Data[j])
- }
-
- begin = i + 1
- continue
- }
-
- // We can only compress domain suffixes starting with a new
- // segment. A pointer is two bytes with the two most significant
- // bits set to 1 to indicate that it is a pointer.
- if (i == 0 || n.Data[i-1] == '.') && compression != nil {
- if ptr, ok := compression[string(n.Data[i:])]; ok {
- // Hit. Emit a pointer instead of the rest of
- // the domain.
- return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil
- }
-
- // Miss. Add the suffix to the compression table if the
- // offset can be stored in the available 14 bytes.
- if len(msg) <= int(^uint16(0)>>2) {
- compression[string(n.Data[i:])] = len(msg) - compressionOff
- }
- }
- }
- return append(msg, 0), nil
-}
-
-// unpack unpacks a domain name.
-func (n *Name) unpack(msg []byte, off int) (int, error) {
- return n.unpackCompressed(msg, off, true /* allowCompression */)
-}
-
-func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) {
- // currOff is the current working offset.
- currOff := off
-
- // newOff is the offset where the next record will start. Pointers lead
- // to data that belongs to other names and thus doesn't count towards to
- // the usage of this name.
- newOff := off
-
- // ptr is the number of pointers followed.
- var ptr int
-
- // Name is a slice representation of the name data.
- name := n.Data[:0]
-
-Loop:
- for {
- if currOff >= len(msg) {
- return off, errBaseLen
- }
- c := int(msg[currOff])
- currOff++
- switch c & 0xC0 {
- case 0x00: // String segment
- if c == 0x00 {
- // A zero length signals the end of the name.
- break Loop
- }
- endOff := currOff + c
- if endOff > len(msg) {
- return off, errCalcLen
- }
- name = append(name, msg[currOff:endOff]...)
- name = append(name, '.')
- currOff = endOff
- case 0xC0: // Pointer
- if !allowCompression {
- return off, errCompressedSRV
- }
- if currOff >= len(msg) {
- return off, errInvalidPtr
- }
- c1 := msg[currOff]
- currOff++
- if ptr == 0 {
- newOff = currOff
- }
- // Don't follow too many pointers, maybe there's a loop.
- if ptr++; ptr > 10 {
- return off, errTooManyPtr
- }
- currOff = (c^0xC0)<<8 | int(c1)
- default:
- // Prefixes 0x80 and 0x40 are reserved.
- return off, errReserved
- }
- }
- if len(name) == 0 {
- name = append(name, '.')
- }
- if len(name) > len(n.Data) {
- return off, errCalcLen
- }
- n.Length = uint8(len(name))
- if ptr == 0 {
- newOff = currOff
- }
- return newOff, nil
-}
-
-func skipName(msg []byte, off int) (int, error) {
- // newOff is the offset where the next record will start. Pointers lead
- // to data that belongs to other names and thus doesn't count towards to
- // the usage of this name.
- newOff := off
-
-Loop:
- for {
- if newOff >= len(msg) {
- return off, errBaseLen
- }
- c := int(msg[newOff])
- newOff++
- switch c & 0xC0 {
- case 0x00:
- if c == 0x00 {
- // A zero length signals the end of the name.
- break Loop
- }
- // literal string
- newOff += c
- if newOff > len(msg) {
- return off, errCalcLen
- }
- case 0xC0:
- // Pointer to somewhere else in msg.
-
- // Pointers are two bytes.
- newOff++
-
- // Don't follow the pointer as the data here has ended.
- break Loop
- default:
- // Prefixes 0x80 and 0x40 are reserved.
- return off, errReserved
- }
- }
-
- return newOff, nil
-}
-
-// A Question is a DNS query.
-type Question struct {
- Name Name
- Type Type
- Class Class
-}
-
-// pack appends the wire format of the Question to msg.
-func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- msg, err := q.Name.pack(msg, compression, compressionOff)
- if err != nil {
- return msg, &nestedError{"Name", err}
- }
- msg = packType(msg, q.Type)
- return packClass(msg, q.Class), nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (q *Question) GoString() string {
- return "dnsmessage.Question{" +
- "Name: " + q.Name.GoString() + ", " +
- "Type: " + q.Type.GoString() + ", " +
- "Class: " + q.Class.GoString() + "}"
-}
-
-func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) {
- var (
- r ResourceBody
- err error
- name string
- )
- switch hdr.Type {
- case TypeA:
- var rb AResource
- rb, err = unpackAResource(msg, off)
- r = &rb
- name = "A"
- case TypeNS:
- var rb NSResource
- rb, err = unpackNSResource(msg, off)
- r = &rb
- name = "NS"
- case TypeCNAME:
- var rb CNAMEResource
- rb, err = unpackCNAMEResource(msg, off)
- r = &rb
- name = "CNAME"
- case TypeSOA:
- var rb SOAResource
- rb, err = unpackSOAResource(msg, off)
- r = &rb
- name = "SOA"
- case TypePTR:
- var rb PTRResource
- rb, err = unpackPTRResource(msg, off)
- r = &rb
- name = "PTR"
- case TypeMX:
- var rb MXResource
- rb, err = unpackMXResource(msg, off)
- r = &rb
- name = "MX"
- case TypeTXT:
- var rb TXTResource
- rb, err = unpackTXTResource(msg, off, hdr.Length)
- r = &rb
- name = "TXT"
- case TypeAAAA:
- var rb AAAAResource
- rb, err = unpackAAAAResource(msg, off)
- r = &rb
- name = "AAAA"
- case TypeSRV:
- var rb SRVResource
- rb, err = unpackSRVResource(msg, off)
- r = &rb
- name = "SRV"
- case TypeOPT:
- var rb OPTResource
- rb, err = unpackOPTResource(msg, off, hdr.Length)
- r = &rb
- name = "OPT"
- default:
- var rb UnknownResource
- rb, err = unpackUnknownResource(hdr.Type, msg, off, hdr.Length)
- r = &rb
- name = "Unknown"
- }
- if err != nil {
- return nil, off, &nestedError{name + " record", err}
- }
- return r, off + int(hdr.Length), nil
-}
-
-// A CNAMEResource is a CNAME Resource record.
-type CNAMEResource struct {
- CNAME Name
-}
-
-func (r *CNAMEResource) realType() Type {
- return TypeCNAME
-}
-
-// pack appends the wire format of the CNAMEResource to msg.
-func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- return r.CNAME.pack(msg, compression, compressionOff)
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *CNAMEResource) GoString() string {
- return "dnsmessage.CNAMEResource{CNAME: " + r.CNAME.GoString() + "}"
-}
-
-func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) {
- var cname Name
- if _, err := cname.unpack(msg, off); err != nil {
- return CNAMEResource{}, err
- }
- return CNAMEResource{cname}, nil
-}
-
-// An MXResource is an MX Resource record.
-type MXResource struct {
- Pref uint16
- MX Name
-}
-
-func (r *MXResource) realType() Type {
- return TypeMX
-}
-
-// pack appends the wire format of the MXResource to msg.
-func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- oldMsg := msg
- msg = packUint16(msg, r.Pref)
- msg, err := r.MX.pack(msg, compression, compressionOff)
- if err != nil {
- return oldMsg, &nestedError{"MXResource.MX", err}
- }
- return msg, nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *MXResource) GoString() string {
- return "dnsmessage.MXResource{" +
- "Pref: " + printUint16(r.Pref) + ", " +
- "MX: " + r.MX.GoString() + "}"
-}
-
-func unpackMXResource(msg []byte, off int) (MXResource, error) {
- pref, off, err := unpackUint16(msg, off)
- if err != nil {
- return MXResource{}, &nestedError{"Pref", err}
- }
- var mx Name
- if _, err := mx.unpack(msg, off); err != nil {
- return MXResource{}, &nestedError{"MX", err}
- }
- return MXResource{pref, mx}, nil
-}
-
-// An NSResource is an NS Resource record.
-type NSResource struct {
- NS Name
-}
-
-func (r *NSResource) realType() Type {
- return TypeNS
-}
-
-// pack appends the wire format of the NSResource to msg.
-func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- return r.NS.pack(msg, compression, compressionOff)
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *NSResource) GoString() string {
- return "dnsmessage.NSResource{NS: " + r.NS.GoString() + "}"
-}
-
-func unpackNSResource(msg []byte, off int) (NSResource, error) {
- var ns Name
- if _, err := ns.unpack(msg, off); err != nil {
- return NSResource{}, err
- }
- return NSResource{ns}, nil
-}
-
-// A PTRResource is a PTR Resource record.
-type PTRResource struct {
- PTR Name
-}
-
-func (r *PTRResource) realType() Type {
- return TypePTR
-}
-
-// pack appends the wire format of the PTRResource to msg.
-func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- return r.PTR.pack(msg, compression, compressionOff)
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *PTRResource) GoString() string {
- return "dnsmessage.PTRResource{PTR: " + r.PTR.GoString() + "}"
-}
-
-func unpackPTRResource(msg []byte, off int) (PTRResource, error) {
- var ptr Name
- if _, err := ptr.unpack(msg, off); err != nil {
- return PTRResource{}, err
- }
- return PTRResource{ptr}, nil
-}
-
-// An SOAResource is an SOA Resource record.
-type SOAResource struct {
- NS Name
- MBox Name
- Serial uint32
- Refresh uint32
- Retry uint32
- Expire uint32
-
- // MinTTL the is the default TTL of Resources records which did not
- // contain a TTL value and the TTL of negative responses. (RFC 2308
- // Section 4)
- MinTTL uint32
-}
-
-func (r *SOAResource) realType() Type {
- return TypeSOA
-}
-
-// pack appends the wire format of the SOAResource to msg.
-func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- oldMsg := msg
- msg, err := r.NS.pack(msg, compression, compressionOff)
- if err != nil {
- return oldMsg, &nestedError{"SOAResource.NS", err}
- }
- msg, err = r.MBox.pack(msg, compression, compressionOff)
- if err != nil {
- return oldMsg, &nestedError{"SOAResource.MBox", err}
- }
- msg = packUint32(msg, r.Serial)
- msg = packUint32(msg, r.Refresh)
- msg = packUint32(msg, r.Retry)
- msg = packUint32(msg, r.Expire)
- return packUint32(msg, r.MinTTL), nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *SOAResource) GoString() string {
- return "dnsmessage.SOAResource{" +
- "NS: " + r.NS.GoString() + ", " +
- "MBox: " + r.MBox.GoString() + ", " +
- "Serial: " + printUint32(r.Serial) + ", " +
- "Refresh: " + printUint32(r.Refresh) + ", " +
- "Retry: " + printUint32(r.Retry) + ", " +
- "Expire: " + printUint32(r.Expire) + ", " +
- "MinTTL: " + printUint32(r.MinTTL) + "}"
-}
-
-func unpackSOAResource(msg []byte, off int) (SOAResource, error) {
- var ns Name
- off, err := ns.unpack(msg, off)
- if err != nil {
- return SOAResource{}, &nestedError{"NS", err}
- }
- var mbox Name
- if off, err = mbox.unpack(msg, off); err != nil {
- return SOAResource{}, &nestedError{"MBox", err}
- }
- serial, off, err := unpackUint32(msg, off)
- if err != nil {
- return SOAResource{}, &nestedError{"Serial", err}
- }
- refresh, off, err := unpackUint32(msg, off)
- if err != nil {
- return SOAResource{}, &nestedError{"Refresh", err}
- }
- retry, off, err := unpackUint32(msg, off)
- if err != nil {
- return SOAResource{}, &nestedError{"Retry", err}
- }
- expire, off, err := unpackUint32(msg, off)
- if err != nil {
- return SOAResource{}, &nestedError{"Expire", err}
- }
- minTTL, _, err := unpackUint32(msg, off)
- if err != nil {
- return SOAResource{}, &nestedError{"MinTTL", err}
- }
- return SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil
-}
-
-// A TXTResource is a TXT Resource record.
-type TXTResource struct {
- TXT []string
-}
-
-func (r *TXTResource) realType() Type {
- return TypeTXT
-}
-
-// pack appends the wire format of the TXTResource to msg.
-func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- oldMsg := msg
- for _, s := range r.TXT {
- var err error
- msg, err = packText(msg, s)
- if err != nil {
- return oldMsg, err
- }
- }
- return msg, nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *TXTResource) GoString() string {
- s := "dnsmessage.TXTResource{TXT: []string{"
- if len(r.TXT) == 0 {
- return s + "}}"
- }
- s += `"` + printString([]byte(r.TXT[0]))
- for _, t := range r.TXT[1:] {
- s += `", "` + printString([]byte(t))
- }
- return s + `"}}`
-}
-
-func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) {
- txts := make([]string, 0, 1)
- for n := uint16(0); n < length; {
- var t string
- var err error
- if t, off, err = unpackText(msg, off); err != nil {
- return TXTResource{}, &nestedError{"text", err}
- }
- // Check if we got too many bytes.
- if length-n < uint16(len(t))+1 {
- return TXTResource{}, errCalcLen
- }
- n += uint16(len(t)) + 1
- txts = append(txts, t)
- }
- return TXTResource{txts}, nil
-}
-
-// An SRVResource is an SRV Resource record.
-type SRVResource struct {
- Priority uint16
- Weight uint16
- Port uint16
- Target Name // Not compressed as per RFC 2782.
-}
-
-func (r *SRVResource) realType() Type {
- return TypeSRV
-}
-
-// pack appends the wire format of the SRVResource to msg.
-func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- oldMsg := msg
- msg = packUint16(msg, r.Priority)
- msg = packUint16(msg, r.Weight)
- msg = packUint16(msg, r.Port)
- msg, err := r.Target.pack(msg, nil, compressionOff)
- if err != nil {
- return oldMsg, &nestedError{"SRVResource.Target", err}
- }
- return msg, nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *SRVResource) GoString() string {
- return "dnsmessage.SRVResource{" +
- "Priority: " + printUint16(r.Priority) + ", " +
- "Weight: " + printUint16(r.Weight) + ", " +
- "Port: " + printUint16(r.Port) + ", " +
- "Target: " + r.Target.GoString() + "}"
-}
-
-func unpackSRVResource(msg []byte, off int) (SRVResource, error) {
- priority, off, err := unpackUint16(msg, off)
- if err != nil {
- return SRVResource{}, &nestedError{"Priority", err}
- }
- weight, off, err := unpackUint16(msg, off)
- if err != nil {
- return SRVResource{}, &nestedError{"Weight", err}
- }
- port, off, err := unpackUint16(msg, off)
- if err != nil {
- return SRVResource{}, &nestedError{"Port", err}
- }
- var target Name
- if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil {
- return SRVResource{}, &nestedError{"Target", err}
- }
- return SRVResource{priority, weight, port, target}, nil
-}
-
-// An AResource is an A Resource record.
-type AResource struct {
- A [4]byte
-}
-
-func (r *AResource) realType() Type {
- return TypeA
-}
-
-// pack appends the wire format of the AResource to msg.
-func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- return packBytes(msg, r.A[:]), nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *AResource) GoString() string {
- return "dnsmessage.AResource{" +
- "A: [4]byte{" + printByteSlice(r.A[:]) + "}}"
-}
-
-func unpackAResource(msg []byte, off int) (AResource, error) {
- var a [4]byte
- if _, err := unpackBytes(msg, off, a[:]); err != nil {
- return AResource{}, err
- }
- return AResource{a}, nil
-}
-
-// An AAAAResource is an AAAA Resource record.
-type AAAAResource struct {
- AAAA [16]byte
-}
-
-func (r *AAAAResource) realType() Type {
- return TypeAAAA
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *AAAAResource) GoString() string {
- return "dnsmessage.AAAAResource{" +
- "AAAA: [16]byte{" + printByteSlice(r.AAAA[:]) + "}}"
-}
-
-// pack appends the wire format of the AAAAResource to msg.
-func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- return packBytes(msg, r.AAAA[:]), nil
-}
-
-func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) {
- var aaaa [16]byte
- if _, err := unpackBytes(msg, off, aaaa[:]); err != nil {
- return AAAAResource{}, err
- }
- return AAAAResource{aaaa}, nil
-}
-
-// An OPTResource is an OPT pseudo Resource record.
-//
-// The pseudo resource record is part of the extension mechanisms for DNS
-// as defined in RFC 6891.
-type OPTResource struct {
- Options []Option
-}
-
-// An Option represents a DNS message option within OPTResource.
-//
-// The message option is part of the extension mechanisms for DNS as
-// defined in RFC 6891.
-type Option struct {
- Code uint16 // option code
- Data []byte
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (o *Option) GoString() string {
- return "dnsmessage.Option{" +
- "Code: " + printUint16(o.Code) + ", " +
- "Data: []byte{" + printByteSlice(o.Data) + "}}"
-}
-
-func (r *OPTResource) realType() Type {
- return TypeOPT
-}
-
-func (r *OPTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- for _, opt := range r.Options {
- msg = packUint16(msg, opt.Code)
- l := uint16(len(opt.Data))
- msg = packUint16(msg, l)
- msg = packBytes(msg, opt.Data)
- }
- return msg, nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *OPTResource) GoString() string {
- s := "dnsmessage.OPTResource{Options: []dnsmessage.Option{"
- if len(r.Options) == 0 {
- return s + "}}"
- }
- s += r.Options[0].GoString()
- for _, o := range r.Options[1:] {
- s += ", " + o.GoString()
- }
- return s + "}}"
-}
-
-func unpackOPTResource(msg []byte, off int, length uint16) (OPTResource, error) {
- var opts []Option
- for oldOff := off; off < oldOff+int(length); {
- var err error
- var o Option
- o.Code, off, err = unpackUint16(msg, off)
- if err != nil {
- return OPTResource{}, &nestedError{"Code", err}
- }
- var l uint16
- l, off, err = unpackUint16(msg, off)
- if err != nil {
- return OPTResource{}, &nestedError{"Data", err}
- }
- o.Data = make([]byte, l)
- if copy(o.Data, msg[off:]) != int(l) {
- return OPTResource{}, &nestedError{"Data", errCalcLen}
- }
- off += int(l)
- opts = append(opts, o)
- }
- return OPTResource{opts}, nil
-}
-
-// An UnknownResource is a catch-all container for unknown record types.
-type UnknownResource struct {
- Type Type
- Data []byte
-}
-
-func (r *UnknownResource) realType() Type {
- return r.Type
-}
-
-// pack appends the wire format of the UnknownResource to msg.
-func (r *UnknownResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
- return packBytes(msg, r.Data[:]), nil
-}
-
-// GoString implements fmt.GoStringer.GoString.
-func (r *UnknownResource) GoString() string {
- return "dnsmessage.UnknownResource{" +
- "Type: " + r.Type.GoString() + ", " +
- "Data: []byte{" + printByteSlice(r.Data) + "}}"
-}
-
-func unpackUnknownResource(recordType Type, msg []byte, off int, length uint16) (UnknownResource, error) {
- parsed := UnknownResource{
- Type: recordType,
- Data: make([]byte, length),
- }
- if _, err := unpackBytes(msg, off, parsed.Data); err != nil {
- return UnknownResource{}, err
- }
- return parsed, nil
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/httplex.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/httplex.go
deleted file mode 100644
index c79aa73f28..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/httplex.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package httpguts
-
-import (
- "net"
- "strings"
- "unicode/utf8"
-
- "golang.org/x/net/idna"
-)
-
-var isTokenTable = [127]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
-
-func IsTokenRune(r rune) bool {
- i := int(r)
- return i < len(isTokenTable) && isTokenTable[i]
-}
-
-func isNotToken(r rune) bool {
- return !IsTokenRune(r)
-}
-
-// HeaderValuesContainsToken reports whether any string in values
-// contains the provided token, ASCII case-insensitively.
-func HeaderValuesContainsToken(values []string, token string) bool {
- for _, v := range values {
- if headerValueContainsToken(v, token) {
- return true
- }
- }
- return false
-}
-
-// isOWS reports whether b is an optional whitespace byte, as defined
-// by RFC 7230 section 3.2.3.
-func isOWS(b byte) bool { return b == ' ' || b == '\t' }
-
-// trimOWS returns x with all optional whitespace removes from the
-// beginning and end.
-func trimOWS(x string) string {
- // TODO: consider using strings.Trim(x, " \t") instead,
- // if and when it's fast enough. See issue 10292.
- // But this ASCII-only code will probably always beat UTF-8
- // aware code.
- for len(x) > 0 && isOWS(x[0]) {
- x = x[1:]
- }
- for len(x) > 0 && isOWS(x[len(x)-1]) {
- x = x[:len(x)-1]
- }
- return x
-}
-
-// headerValueContainsToken reports whether v (assumed to be a
-// 0#element, in the ABNF extension described in RFC 7230 section 7)
-// contains token amongst its comma-separated tokens, ASCII
-// case-insensitively.
-func headerValueContainsToken(v string, token string) bool {
- for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
- if tokenEqual(trimOWS(v[:comma]), token) {
- return true
- }
- v = v[comma+1:]
- }
- return tokenEqual(trimOWS(v), token)
-}
-
-// lowerASCII returns the ASCII lowercase version of b.
-func lowerASCII(b byte) byte {
- if 'A' <= b && b <= 'Z' {
- return b + ('a' - 'A')
- }
- return b
-}
-
-// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
-func tokenEqual(t1, t2 string) bool {
- if len(t1) != len(t2) {
- return false
- }
- for i, b := range t1 {
- if b >= utf8.RuneSelf {
- // No UTF-8 or non-ASCII allowed in tokens.
- return false
- }
- if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
- return false
- }
- }
- return true
-}
-
-// isLWS reports whether b is linear white space, according
-// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
-// LWS = [CRLF] 1*( SP | HT )
-func isLWS(b byte) bool { return b == ' ' || b == '\t' }
-
-// isCTL reports whether b is a control byte, according
-// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
-// CTL = <any US-ASCII control character
-// (octets 0 - 31) and DEL (127)>
-func isCTL(b byte) bool {
- const del = 0x7f // a CTL
- return b < ' ' || b == del
-}
-
-// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
-// HTTP/2 imposes the additional restriction that uppercase ASCII
-// letters are not allowed.
-//
-// RFC 7230 says:
-// header-field = field-name ":" OWS field-value OWS
-// field-name = token
-// token = 1*tchar
-// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
-// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
-func ValidHeaderFieldName(v string) bool {
- if len(v) == 0 {
- return false
- }
- for _, r := range v {
- if !IsTokenRune(r) {
- return false
- }
- }
- return true
-}
-
-// ValidHostHeader reports whether h is a valid host header.
-func ValidHostHeader(h string) bool {
- // The latest spec is actually this:
- //
- // http://tools.ietf.org/html/rfc7230#section-5.4
- // Host = uri-host [ ":" port ]
- //
- // Where uri-host is:
- // http://tools.ietf.org/html/rfc3986#section-3.2.2
- //
- // But we're going to be much more lenient for now and just
- // search for any byte that's not a valid byte in any of those
- // expressions.
- for i := 0; i < len(h); i++ {
- if !validHostByte[h[i]] {
- return false
- }
- }
- return true
-}
-
-// See the validHostHeader comment.
-var validHostByte = [256]bool{
- '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
- '8': true, '9': true,
-
- 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
- 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
- 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
- 'y': true, 'z': true,
-
- 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
- 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
- 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
- 'Y': true, 'Z': true,
-
- '!': true, // sub-delims
- '$': true, // sub-delims
- '%': true, // pct-encoded (and used in IPv6 zones)
- '&': true, // sub-delims
- '(': true, // sub-delims
- ')': true, // sub-delims
- '*': true, // sub-delims
- '+': true, // sub-delims
- ',': true, // sub-delims
- '-': true, // unreserved
- '.': true, // unreserved
- ':': true, // IPv6address + Host expression's optional port
- ';': true, // sub-delims
- '=': true, // sub-delims
- '[': true,
- '\'': true, // sub-delims
- ']': true,
- '_': true, // unreserved
- '~': true, // unreserved
-}
-
-// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
-// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
-//
-// message-header = field-name ":" [ field-value ]
-// field-value = *( field-content | LWS )
-// field-content = <the OCTETs making up the field-value
-// and consisting of either *TEXT or combinations
-// of token, separators, and quoted-string>
-//
-// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
-//
-// TEXT = <any OCTET except CTLs,
-// but including LWS>
-// LWS = [CRLF] 1*( SP | HT )
-// CTL = <any US-ASCII control character
-// (octets 0 - 31) and DEL (127)>
-//
-// RFC 7230 says:
-// field-value = *( field-content / obs-fold )
-// obj-fold = N/A to http2, and deprecated
-// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
-// field-vchar = VCHAR / obs-text
-// obs-text = %x80-FF
-// VCHAR = "any visible [USASCII] character"
-//
-// http2 further says: "Similarly, HTTP/2 allows header field values
-// that are not valid. While most of the values that can be encoded
-// will not alter header field parsing, carriage return (CR, ASCII
-// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
-// 0x0) might be exploited by an attacker if they are translated
-// verbatim. Any request or response that contains a character not
-// permitted in a header field value MUST be treated as malformed
-// (Section 8.1.2.6). Valid characters are defined by the
-// field-content ABNF rule in Section 3.2 of [RFC7230]."
-//
-// This function does not (yet?) properly handle the rejection of
-// strings that begin or end with SP or HTAB.
-func ValidHeaderFieldValue(v string) bool {
- for i := 0; i < len(v); i++ {
- b := v[i]
- if isCTL(b) && !isLWS(b) {
- return false
- }
- }
- return true
-}
-
-func isASCII(s string) bool {
- for i := 0; i < len(s); i++ {
- if s[i] >= utf8.RuneSelf {
- return false
- }
- }
- return true
-}
-
-// PunycodeHostPort returns the IDNA Punycode version
-// of the provided "host" or "host:port" string.
-func PunycodeHostPort(v string) (string, error) {
- if isASCII(v) {
- return v, nil
- }
-
- host, port, err := net.SplitHostPort(v)
- if err != nil {
- // The input 'v' argument was just a "host" argument,
- // without a port. This error should not be returned
- // to the caller.
- host = v
- port = ""
- }
- host, err = idna.ToASCII(host)
- if err != nil {
- // Non-UTF-8? Not representable in Punycode, in any
- // case.
- return "", err
- }
- if port == "" {
- return host, nil
- }
- return net.JoinHostPort(host, port), nil
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpproxy/proxy.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpproxy/proxy.go
deleted file mode 100644
index d2c8c87eab..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpproxy/proxy.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package httpproxy provides support for HTTP proxy determination
-// based on environment variables, as provided by net/http's
-// ProxyFromEnvironment function.
-//
-// The API is not subject to the Go 1 compatibility promise and may change at
-// any time.
-package httpproxy
-
-import (
- "errors"
- "fmt"
- "net"
- "net/url"
- "os"
- "strings"
- "unicode/utf8"
-
- "golang.org/x/net/idna"
-)
-
-// Config holds configuration for HTTP proxy settings. See
-// FromEnvironment for details.
-type Config struct {
- // HTTPProxy represents the value of the HTTP_PROXY or
- // http_proxy environment variable. It will be used as the proxy
- // URL for HTTP requests unless overridden by NoProxy.
- HTTPProxy string
-
- // HTTPSProxy represents the HTTPS_PROXY or https_proxy
- // environment variable. It will be used as the proxy URL for
- // HTTPS requests unless overridden by NoProxy.
- HTTPSProxy string
-
- // NoProxy represents the NO_PROXY or no_proxy environment
- // variable. It specifies a string that contains comma-separated values
- // specifying hosts that should be excluded from proxying. Each value is
- // represented by an IP address prefix (1.2.3.4), an IP address prefix in
- // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*).
- // An IP address prefix and domain name can also include a literal port
- // number (1.2.3.4:80).
- // A domain name matches that name and all subdomains. A domain name with
- // a leading "." matches subdomains only. For example "foo.com" matches
- // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com".
- // A single asterisk (*) indicates that no proxying should be done.
- // A best effort is made to parse the string and errors are
- // ignored.
- NoProxy string
-
- // CGI holds whether the current process is running
- // as a CGI handler (FromEnvironment infers this from the
- // presence of a REQUEST_METHOD environment variable).
- // When this is set, ProxyForURL will return an error
- // when HTTPProxy applies, because a client could be
- // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy.
- CGI bool
-}
-
-// config holds the parsed configuration for HTTP proxy settings.
-type config struct {
- // Config represents the original configuration as defined above.
- Config
-
- // httpsProxy is the parsed URL of the HTTPSProxy if defined.
- httpsProxy *url.URL
-
- // httpProxy is the parsed URL of the HTTPProxy if defined.
- httpProxy *url.URL
-
- // ipMatchers represent all values in the NoProxy that are IP address
- // prefixes or an IP address in CIDR notation.
- ipMatchers []matcher
-
- // domainMatchers represent all values in the NoProxy that are a domain
- // name or hostname & domain name
- domainMatchers []matcher
-}
-
-// FromEnvironment returns a Config instance populated from the
-// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the
-// lowercase versions thereof). HTTPS_PROXY takes precedence over
-// HTTP_PROXY for https requests.
-//
-// The environment values may be either a complete URL or a
-// "host[:port]", in which case the "http" scheme is assumed. An error
-// is returned if the value is a different form.
-func FromEnvironment() *Config {
- return &Config{
- HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"),
- HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"),
- NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
- CGI: os.Getenv("REQUEST_METHOD") != "",
- }
-}
-
-func getEnvAny(names ...string) string {
- for _, n := range names {
- if val := os.Getenv(n); val != "" {
- return val
- }
- }
- return ""
-}
-
-// ProxyFunc returns a function that determines the proxy URL to use for
-// a given request URL. Changing the contents of cfg will not affect
-// proxy functions created earlier.
-//
-// A nil URL and nil error are returned if no proxy is defined in the
-// environment, or a proxy should not be used for the given request, as
-// defined by NO_PROXY.
-//
-// As a special case, if req.URL.Host is "localhost" or a loopback address
-// (with or without a port number), then a nil URL and nil error will be returned.
-func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) {
- // Preprocess the Config settings for more efficient evaluation.
- cfg1 := &config{
- Config: *cfg,
- }
- cfg1.init()
- return cfg1.proxyForURL
-}
-
-func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) {
- var proxy *url.URL
- if reqURL.Scheme == "https" {
- proxy = cfg.httpsProxy
- } else if reqURL.Scheme == "http" {
- proxy = cfg.httpProxy
- if proxy != nil && cfg.CGI {
- return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy")
- }
- }
- if proxy == nil {
- return nil, nil
- }
- if !cfg.useProxy(canonicalAddr(reqURL)) {
- return nil, nil
- }
-
- return proxy, nil
-}
-
-func parseProxy(proxy string) (*url.URL, error) {
- if proxy == "" {
- return nil, nil
- }
-
- proxyURL, err := url.Parse(proxy)
- if err != nil ||
- (proxyURL.Scheme != "http" &&
- proxyURL.Scheme != "https" &&
- proxyURL.Scheme != "socks5") {
- // proxy was bogus. Try prepending "http://" to it and
- // see if that parses correctly. If not, we fall
- // through and complain about the original one.
- if proxyURL, err := url.Parse("http://" + proxy); err == nil {
- return proxyURL, nil
- }
- }
- if err != nil {
- return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
- }
- return proxyURL, nil
-}
-
-// useProxy reports whether requests to addr should use a proxy,
-// according to the NO_PROXY or no_proxy environment variable.
-// addr is always a canonicalAddr with a host and port.
-func (cfg *config) useProxy(addr string) bool {
- if len(addr) == 0 {
- return true
- }
- host, port, err := net.SplitHostPort(addr)
- if err != nil {
- return false
- }
- if host == "localhost" {
- return false
- }
- ip := net.ParseIP(host)
- if ip != nil {
- if ip.IsLoopback() {
- return false
- }
- }
-
- addr = strings.ToLower(strings.TrimSpace(host))
-
- if ip != nil {
- for _, m := range cfg.ipMatchers {
- if m.match(addr, port, ip) {
- return false
- }
- }
- }
- for _, m := range cfg.domainMatchers {
- if m.match(addr, port, ip) {
- return false
- }
- }
- return true
-}
-
-func (c *config) init() {
- if parsed, err := parseProxy(c.HTTPProxy); err == nil {
- c.httpProxy = parsed
- }
- if parsed, err := parseProxy(c.HTTPSProxy); err == nil {
- c.httpsProxy = parsed
- }
-
- for _, p := range strings.Split(c.NoProxy, ",") {
- p = strings.ToLower(strings.TrimSpace(p))
- if len(p) == 0 {
- continue
- }
-
- if p == "*" {
- c.ipMatchers = []matcher{allMatch{}}
- c.domainMatchers = []matcher{allMatch{}}
- return
- }
-
- // IPv4/CIDR, IPv6/CIDR
- if _, pnet, err := net.ParseCIDR(p); err == nil {
- c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet})
- continue
- }
-
- // IPv4:port, [IPv6]:port
- phost, pport, err := net.SplitHostPort(p)
- if err == nil {
- if len(phost) == 0 {
- // There is no host part, likely the entry is malformed; ignore.
- continue
- }
- if phost[0] == '[' && phost[len(phost)-1] == ']' {
- phost = phost[1 : len(phost)-1]
- }
- } else {
- phost = p
- }
- // IPv4, IPv6
- if pip := net.ParseIP(phost); pip != nil {
- c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport})
- continue
- }
-
- if len(phost) == 0 {
- // There is no host part, likely the entry is malformed; ignore.
- continue
- }
-
- // domain.com or domain.com:80
- // foo.com matches bar.foo.com
- // .domain.com or .domain.com:port
- // *.domain.com or *.domain.com:port
- if strings.HasPrefix(phost, "*.") {
- phost = phost[1:]
- }
- matchHost := false
- if phost[0] != '.' {
- matchHost = true
- phost = "." + phost
- }
- c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost})
- }
-}
-
-var portMap = map[string]string{
- "http": "80",
- "https": "443",
- "socks5": "1080",
-}
-
-// canonicalAddr returns url.Host but always with a ":port" suffix
-func canonicalAddr(url *url.URL) string {
- addr := url.Hostname()
- if v, err := idnaASCII(addr); err == nil {
- addr = v
- }
- port := url.Port()
- if port == "" {
- port = portMap[url.Scheme]
- }
- return net.JoinHostPort(addr, port)
-}
-
-// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
-// return true if the string includes a port.
-func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
-
-func idnaASCII(v string) (string, error) {
- // TODO: Consider removing this check after verifying performance is okay.
- // Right now punycode verification, length checks, context checks, and the
- // permissible character tests are all omitted. It also prevents the ToASCII
- // call from salvaging an invalid IDN, when possible. As a result it may be
- // possible to have two IDNs that appear identical to the user where the
- // ASCII-only version causes an error downstream whereas the non-ASCII
- // version does not.
- // Note that for correct ASCII IDNs ToASCII will only do considerably more
- // work, but it will not cause an allocation.
- if isASCII(v) {
- return v, nil
- }
- return idna.Lookup.ToASCII(v)
-}
-
-func isASCII(s string) bool {
- for i := 0; i < len(s); i++ {
- if s[i] >= utf8.RuneSelf {
- return false
- }
- }
- return true
-}
-
-// matcher represents the matching rule for a given value in the NO_PROXY list
-type matcher interface {
- // match returns true if the host and optional port or ip and optional port
- // are allowed
- match(host, port string, ip net.IP) bool
-}
-
-// allMatch matches on all possible inputs
-type allMatch struct{}
-
-func (a allMatch) match(host, port string, ip net.IP) bool {
- return true
-}
-
-type cidrMatch struct {
- cidr *net.IPNet
-}
-
-func (m cidrMatch) match(host, port string, ip net.IP) bool {
- return m.cidr.Contains(ip)
-}
-
-type ipMatch struct {
- ip net.IP
- port string
-}
-
-func (m ipMatch) match(host, port string, ip net.IP) bool {
- if m.ip.Equal(ip) {
- return m.port == "" || m.port == port
- }
- return false
-}
-
-type domainMatch struct {
- host string
- port string
-
- matchHost bool
-}
-
-func (m domainMatch) match(host, port string, ip net.IP) bool {
- if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) {
- return m.port == "" || m.port == port
- }
- return false
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trieval.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trieval.go
deleted file mode 100644
index 7a8cf889b5..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trieval.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-
-package idna
-
-// This file contains definitions for interpreting the trie value of the idna
-// trie generated by "go run gen*.go". It is shared by both the generator
-// program and the resultant package. Sharing is achieved by the generator
-// copying gen_trieval.go to trieval.go and changing what's above this comment.
-
-// info holds information from the IDNA mapping table for a single rune. It is
-// the value returned by a trie lookup. In most cases, all information fits in
-// a 16-bit value. For mappings, this value may contain an index into a slice
-// with the mapped string. Such mappings can consist of the actual mapped value
-// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
-// input rune. This technique is used by the cases packages and reduces the
-// table size significantly.
-//
-// The per-rune values have the following format:
-//
-// if mapped {
-// if inlinedXOR {
-// 15..13 inline XOR marker
-// 12..11 unused
-// 10..3 inline XOR mask
-// } else {
-// 15..3 index into xor or mapping table
-// }
-// } else {
-// 15..14 unused
-// 13 mayNeedNorm
-// 12..11 attributes
-// 10..8 joining type
-// 7..3 category type
-// }
-// 2 use xor pattern
-// 1..0 mapped category
-//
-// See the definitions below for a more detailed description of the various
-// bits.
-type info uint16
-
-const (
- catSmallMask = 0x3
- catBigMask = 0xF8
- indexShift = 3
- xorBit = 0x4 // interpret the index as an xor pattern
- inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
-
- joinShift = 8
- joinMask = 0x07
-
- // Attributes
- attributesMask = 0x1800
- viramaModifier = 0x1800
- modifier = 0x1000
- rtl = 0x0800
-
- mayNeedNorm = 0x2000
-)
-
-// A category corresponds to a category defined in the IDNA mapping table.
-type category uint16
-
-const (
- unknown category = 0 // not currently defined in unicode.
- mapped category = 1
- disallowedSTD3Mapped category = 2
- deviation category = 3
-)
-
-const (
- valid category = 0x08
- validNV8 category = 0x18
- validXV8 category = 0x28
- disallowed category = 0x40
- disallowedSTD3Valid category = 0x80
- ignored category = 0xC0
-)
-
-// join types and additional rune information
-const (
- joiningL = (iota + 1)
- joiningD
- joiningT
- joiningR
-
- //the following types are derived during processing
- joinZWJ
- joinZWNJ
- joinVirama
- numJoinTypes
-)
-
-func (c info) isMapped() bool {
- return c&0x3 != 0
-}
-
-func (c info) category() category {
- small := c & catSmallMask
- if small != 0 {
- return category(small)
- }
- return category(c & catBigMask)
-}
-
-func (c info) joinType() info {
- if c.isMapped() {
- return 0
- }
- return (c >> joinShift) & joinMask
-}
-
-func (c info) isModifier() bool {
- return c&(modifier|catSmallMask) == modifier
-}
-
-func (c info) isViramaModifier() bool {
- return c&(attributesMask|catSmallMask) == viramaModifier
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/byteorder.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/byteorder.go
deleted file mode 100644
index dcbb14ef35..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/byteorder.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import (
- "runtime"
-)
-
-// byteOrder is a subset of encoding/binary.ByteOrder.
-type byteOrder interface {
- Uint32([]byte) uint32
- Uint64([]byte) uint64
-}
-
-type littleEndian struct{}
-type bigEndian struct{}
-
-func (littleEndian) Uint32(b []byte) uint32 {
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func (littleEndian) Uint64(b []byte) uint64 {
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func (bigEndian) Uint32(b []byte) uint32 {
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
-}
-
-func (bigEndian) Uint64(b []byte) uint64 {
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
-}
-
-// hostByteOrder returns littleEndian on little-endian machines and
-// bigEndian on big-endian machines.
-func hostByteOrder() byteOrder {
- switch runtime.GOARCH {
- case "386", "amd64", "amd64p32",
- "alpha",
- "arm", "arm64",
- "mipsle", "mips64le", "mips64p32le",
- "nios2",
- "ppc64le",
- "riscv", "riscv64",
- "sh":
- return littleEndian{}
- case "armbe", "arm64be",
- "m68k",
- "mips", "mips64", "mips64p32",
- "ppc", "ppc64",
- "s390", "s390x",
- "shbe",
- "sparc", "sparc64":
- return bigEndian{}
- }
- panic("unknown architecture")
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu.go
deleted file mode 100644
index b56886f261..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cpu implements processor feature detection for
-// various CPU architectures.
-package cpu
-
-import (
- "os"
- "strings"
-)
-
-// Initialized reports whether the CPU features were initialized.
-//
-// For some GOOS/GOARCH combinations initialization of the CPU features depends
-// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
-// Initialized will report false if reading the file fails.
-var Initialized bool
-
-// CacheLinePad is used to pad structs to avoid false sharing.
-type CacheLinePad struct{ _ [cacheLineSize]byte }
-
-// X86 contains the supported CPU features of the
-// current X86/AMD64 platform. If the current platform
-// is not X86/AMD64 then all feature flags are false.
-//
-// X86 is padded to avoid false sharing. Further the HasAVX
-// and HasAVX2 are only set if the OS supports XMM and YMM
-// registers in addition to the CPUID feature bit being set.
-var X86 struct {
- _ CacheLinePad
- HasAES bool // AES hardware implementation (AES NI)
- HasADX bool // Multi-precision add-carry instruction extensions
- HasAVX bool // Advanced vector extension
- HasAVX2 bool // Advanced vector extension 2
- HasAVX512 bool // Advanced vector extension 512
- HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
- HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
- HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
- HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions
- HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
- HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
- HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
- HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
- HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
- HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
- HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
- HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
- HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
- HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
- HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
- HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
- HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
- HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
- HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
- HasBMI1 bool // Bit manipulation instruction set 1
- HasBMI2 bool // Bit manipulation instruction set 2
- HasCX16 bool // Compare and exchange 16 Bytes
- HasERMS bool // Enhanced REP for MOVSB and STOSB
- HasFMA bool // Fused-multiply-add instructions
- HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
- HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
- HasPOPCNT bool // Hamming weight instruction POPCNT.
- HasRDRAND bool // RDRAND instruction (on-chip random number generator)
- HasRDSEED bool // RDSEED instruction (on-chip random number generator)
- HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
- HasSSE3 bool // Streaming SIMD extension 3
- HasSSSE3 bool // Supplemental streaming SIMD extension 3
- HasSSE41 bool // Streaming SIMD extension 4 and 4.1
- HasSSE42 bool // Streaming SIMD extension 4 and 4.2
- _ CacheLinePad
-}
-
-// ARM64 contains the supported CPU features of the
-// current ARMv8(aarch64) platform. If the current platform
-// is not arm64 then all feature flags are false.
-var ARM64 struct {
- _ CacheLinePad
- HasFP bool // Floating-point instruction set (always available)
- HasASIMD bool // Advanced SIMD (always available)
- HasEVTSTRM bool // Event stream support
- HasAES bool // AES hardware implementation
- HasPMULL bool // Polynomial multiplication instruction set
- HasSHA1 bool // SHA1 hardware implementation
- HasSHA2 bool // SHA2 hardware implementation
- HasCRC32 bool // CRC32 hardware implementation
- HasATOMICS bool // Atomic memory operation instruction set
- HasFPHP bool // Half precision floating-point instruction set
- HasASIMDHP bool // Advanced SIMD half precision instruction set
- HasCPUID bool // CPUID identification scheme registers
- HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
- HasJSCVT bool // Javascript conversion from floating-point to integer
- HasFCMA bool // Floating-point multiplication and addition of complex numbers
- HasLRCPC bool // Release Consistent processor consistent support
- HasDCPOP bool // Persistent memory support
- HasSHA3 bool // SHA3 hardware implementation
- HasSM3 bool // SM3 hardware implementation
- HasSM4 bool // SM4 hardware implementation
- HasASIMDDP bool // Advanced SIMD double precision instruction set
- HasSHA512 bool // SHA512 hardware implementation
- HasSVE bool // Scalable Vector Extensions
- HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
- _ CacheLinePad
-}
-
-// ARM contains the supported CPU features of the current ARM (32-bit) platform.
-// All feature flags are false if:
-// 1. the current platform is not arm, or
-// 2. the current operating system is not Linux.
-var ARM struct {
- _ CacheLinePad
- HasSWP bool // SWP instruction support
- HasHALF bool // Half-word load and store support
- HasTHUMB bool // ARM Thumb instruction set
- Has26BIT bool // Address space limited to 26-bits
- HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
- HasFPA bool // Floating point arithmetic support
- HasVFP bool // Vector floating point support
- HasEDSP bool // DSP Extensions support
- HasJAVA bool // Java instruction set
- HasIWMMXT bool // Intel Wireless MMX technology support
- HasCRUNCH bool // MaverickCrunch context switching and handling
- HasTHUMBEE bool // Thumb EE instruction set
- HasNEON bool // NEON instruction set
- HasVFPv3 bool // Vector floating point version 3 support
- HasVFPv3D16 bool // Vector floating point version 3 D8-D15
- HasTLS bool // Thread local storage support
- HasVFPv4 bool // Vector floating point version 4 support
- HasIDIVA bool // Integer divide instruction support in ARM mode
- HasIDIVT bool // Integer divide instruction support in Thumb mode
- HasVFPD32 bool // Vector floating point version 3 D15-D31
- HasLPAE bool // Large Physical Address Extensions
- HasEVTSTRM bool // Event stream support
- HasAES bool // AES hardware implementation
- HasPMULL bool // Polynomial multiplication instruction set
- HasSHA1 bool // SHA1 hardware implementation
- HasSHA2 bool // SHA2 hardware implementation
- HasCRC32 bool // CRC32 hardware implementation
- _ CacheLinePad
-}
-
-// MIPS64X contains the supported CPU features of the current mips64/mips64le
-// platforms. If the current platform is not mips64/mips64le or the current
-// operating system is not Linux then all feature flags are false.
-var MIPS64X struct {
- _ CacheLinePad
- HasMSA bool // MIPS SIMD architecture
- _ CacheLinePad
-}
-
-// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
-// If the current platform is not ppc64/ppc64le then all feature flags are false.
-//
-// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
-// since there are no optional categories. There are some exceptions that also
-// require kernel support to work (DARN, SCV), so there are feature bits for
-// those as well. The struct is padded to avoid false sharing.
-var PPC64 struct {
- _ CacheLinePad
- HasDARN bool // Hardware random number generator (requires kernel enablement)
- HasSCV bool // Syscall vectored (requires kernel enablement)
- IsPOWER8 bool // ISA v2.07 (POWER8)
- IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
- _ CacheLinePad
-}
-
-// S390X contains the supported CPU features of the current IBM Z
-// (s390x) platform. If the current platform is not IBM Z then all
-// feature flags are false.
-//
-// S390X is padded to avoid false sharing. Further HasVX is only set
-// if the OS supports vector registers in addition to the STFLE
-// feature bit being set.
-var S390X struct {
- _ CacheLinePad
- HasZARCH bool // z/Architecture mode is active [mandatory]
- HasSTFLE bool // store facility list extended
- HasLDISP bool // long (20-bit) displacements
- HasEIMM bool // 32-bit immediates
- HasDFP bool // decimal floating point
- HasETF3EH bool // ETF-3 enhanced
- HasMSA bool // message security assist (CPACF)
- HasAES bool // KM-AES{128,192,256} functions
- HasAESCBC bool // KMC-AES{128,192,256} functions
- HasAESCTR bool // KMCTR-AES{128,192,256} functions
- HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
- HasGHASH bool // KIMD-GHASH function
- HasSHA1 bool // K{I,L}MD-SHA-1 functions
- HasSHA256 bool // K{I,L}MD-SHA-256 functions
- HasSHA512 bool // K{I,L}MD-SHA-512 functions
- HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
- HasVX bool // vector facility
- HasVXE bool // vector-enhancements facility 1
- _ CacheLinePad
-}
-
-func init() {
- archInit()
- initOptions()
- processOptions()
-}
-
-// options contains the cpu debug options that can be used in GODEBUG.
-// Options are arch dependent and are added by the arch specific initOptions functions.
-// Features that are mandatory for the specific GOARCH should have the Required field set
-// (e.g. SSE2 on amd64).
-var options []option
-
-// Option names should be lower case. e.g. avx instead of AVX.
-type option struct {
- Name string
- Feature *bool
- Specified bool // whether feature value was specified in GODEBUG
- Enable bool // whether feature should be enabled
- Required bool // whether feature is mandatory and can not be disabled
-}
-
-func processOptions() {
- env := os.Getenv("GODEBUG")
-field:
- for env != "" {
- field := ""
- i := strings.IndexByte(env, ',')
- if i < 0 {
- field, env = env, ""
- } else {
- field, env = env[:i], env[i+1:]
- }
- if len(field) < 4 || field[:4] != "cpu." {
- continue
- }
- i = strings.IndexByte(field, '=')
- if i < 0 {
- print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
- continue
- }
- key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
-
- var enable bool
- switch value {
- case "on":
- enable = true
- case "off":
- enable = false
- default:
- print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
- continue field
- }
-
- if key == "all" {
- for i := range options {
- options[i].Specified = true
- options[i].Enable = enable || options[i].Required
- }
- continue field
- }
-
- for i := range options {
- if options[i].Name == key {
- options[i].Specified = true
- options[i].Enable = enable
- continue field
- }
- }
-
- print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
- }
-
- for _, o := range options {
- if !o.Specified {
- continue
- }
-
- if o.Enable && !*o.Feature {
- print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
- continue
- }
-
- if !o.Enable && o.Required {
- print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
- continue
- }
-
- *o.Feature = o.Enable
- }
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/core.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/core.go
deleted file mode 100644
index fde188a33b..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/core.go
+++ /dev/null
@@ -1,1071 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bidi
-
-import (
- "fmt"
- "log"
-)
-
-// This implementation is a port based on the reference implementation found at:
-// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/
-//
-// described in Unicode Bidirectional Algorithm (UAX #9).
-//
-// Input:
-// There are two levels of input to the algorithm, since clients may prefer to
-// supply some information from out-of-band sources rather than relying on the
-// default behavior.
-//
-// - Bidi class array
-// - Bidi class array, with externally supplied base line direction
-//
-// Output:
-// Output is separated into several stages:
-//
-// - levels array over entire paragraph
-// - reordering array over entire paragraph
-// - levels array over line
-// - reordering array over line
-//
-// Note that for conformance to the Unicode Bidirectional Algorithm,
-// implementations are only required to generate correct reordering and
-// character directionality (odd or even levels) over a line. Generating
-// identical level arrays over a line is not required. Bidi explicit format
-// codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and
-// positions as long as the rest of the input is properly reordered.
-//
-// As the algorithm is defined to operate on a single paragraph at a time, this
-// implementation is written to handle single paragraphs. Thus rule P1 is
-// presumed by this implementation-- the data provided to the implementation is
-// assumed to be a single paragraph, and either contains no 'B' codes, or a
-// single 'B' code at the end of the input. 'B' is allowed as input to
-// illustrate how the algorithm assigns it a level.
-//
-// Also note that rules L3 and L4 depend on the rendering engine that uses the
-// result of the bidi algorithm. This implementation assumes that the rendering
-// engine expects combining marks in visual order (e.g. to the left of their
-// base character in RTL runs) and that it adjusts the glyphs used to render
-// mirrored characters that are in RTL runs so that they render appropriately.
-
-// level is the embedding level of a character. Even embedding levels indicate
-// left-to-right order and odd levels indicate right-to-left order. The special
-// level of -1 is reserved for undefined order.
-type level int8
-
-const implicitLevel level = -1
-
-// in returns if x is equal to any of the values in set.
-func (c Class) in(set ...Class) bool {
- for _, s := range set {
- if c == s {
- return true
- }
- }
- return false
-}
-
-// A paragraph contains the state of a paragraph.
-type paragraph struct {
- initialTypes []Class
-
- // Arrays of properties needed for paired bracket evaluation in N0
- pairTypes []bracketType // paired Bracket types for paragraph
- pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone
-
- embeddingLevel level // default: = implicitLevel;
-
- // at the paragraph levels
- resultTypes []Class
- resultLevels []level
-
- // Index of matching PDI for isolate initiator characters. For other
- // characters, the value of matchingPDI will be set to -1. For isolate
- // initiators with no matching PDI, matchingPDI will be set to the length of
- // the input string.
- matchingPDI []int
-
- // Index of matching isolate initiator for PDI characters. For other
- // characters, and for PDIs with no matching isolate initiator, the value of
- // matchingIsolateInitiator will be set to -1.
- matchingIsolateInitiator []int
-}
-
-// newParagraph initializes a paragraph. The user needs to supply a few arrays
-// corresponding to the preprocessed text input. The types correspond to the
-// Unicode BiDi classes for each rune. pairTypes indicates the bracket type for
-// each rune. pairValues provides a unique bracket class identifier for each
-// rune (suggested is the rune of the open bracket for opening and matching
-// close brackets, after normalization). The embedding levels are optional, but
-// may be supplied to encode embedding levels of styled text.
-func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) (*paragraph, error) {
- var err error
- if err = validateTypes(types); err != nil {
- return nil, err
- }
- if err = validatePbTypes(pairTypes); err != nil {
- return nil, err
- }
- if err = validatePbValues(pairValues, pairTypes); err != nil {
- return nil, err
- }
- if err = validateParagraphEmbeddingLevel(levels); err != nil {
- return nil, err
- }
-
- p := &paragraph{
- initialTypes: append([]Class(nil), types...),
- embeddingLevel: levels,
-
- pairTypes: pairTypes,
- pairValues: pairValues,
-
- resultTypes: append([]Class(nil), types...),
- }
- p.run()
- return p, nil
-}
-
-func (p *paragraph) Len() int { return len(p.initialTypes) }
-
-// The algorithm. Does not include line-based processing (Rules L1, L2).
-// These are applied later in the line-based phase of the algorithm.
-func (p *paragraph) run() {
- p.determineMatchingIsolates()
-
- // 1) determining the paragraph level
- // Rule P1 is the requirement for entering this algorithm.
- // Rules P2, P3.
- // If no externally supplied paragraph embedding level, use default.
- if p.embeddingLevel == implicitLevel {
- p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len())
- }
-
- // Initialize result levels to paragraph embedding level.
- p.resultLevels = make([]level, p.Len())
- setLevels(p.resultLevels, p.embeddingLevel)
-
- // 2) Explicit levels and directions
- // Rules X1-X8.
- p.determineExplicitEmbeddingLevels()
-
- // Rule X9.
- // We do not remove the embeddings, the overrides, the PDFs, and the BNs
- // from the string explicitly. But they are not copied into isolating run
- // sequences when they are created, so they are removed for all
- // practical purposes.
-
- // Rule X10.
- // Run remainder of algorithm one isolating run sequence at a time
- for _, seq := range p.determineIsolatingRunSequences() {
- // 3) resolving weak types
- // Rules W1-W7.
- seq.resolveWeakTypes()
-
- // 4a) resolving paired brackets
- // Rule N0
- resolvePairedBrackets(seq)
-
- // 4b) resolving neutral types
- // Rules N1-N3.
- seq.resolveNeutralTypes()
-
- // 5) resolving implicit embedding levels
- // Rules I1, I2.
- seq.resolveImplicitLevels()
-
- // Apply the computed levels and types
- seq.applyLevelsAndTypes()
- }
-
- // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and
- // BNs. This is for convenience, so the resulting level array will have
- // a value for every character.
- p.assignLevelsToCharactersRemovedByX9()
-}
-
-// determineMatchingIsolates determines the matching PDI for each isolate
-// initiator and vice versa.
-//
-// Definition BD9.
-//
-// At the end of this function:
-//
-// - The member variable matchingPDI is set to point to the index of the
-// matching PDI character for each isolate initiator character. If there is
-// no matching PDI, it is set to the length of the input text. For other
-// characters, it is set to -1.
-// - The member variable matchingIsolateInitiator is set to point to the
-// index of the matching isolate initiator character for each PDI character.
-// If there is no matching isolate initiator, or the character is not a PDI,
-// it is set to -1.
-func (p *paragraph) determineMatchingIsolates() {
- p.matchingPDI = make([]int, p.Len())
- p.matchingIsolateInitiator = make([]int, p.Len())
-
- for i := range p.matchingIsolateInitiator {
- p.matchingIsolateInitiator[i] = -1
- }
-
- for i := range p.matchingPDI {
- p.matchingPDI[i] = -1
-
- if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) {
- depthCounter := 1
- for j := i + 1; j < p.Len(); j++ {
- if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) {
- depthCounter++
- } else if u == PDI {
- if depthCounter--; depthCounter == 0 {
- p.matchingPDI[i] = j
- p.matchingIsolateInitiator[j] = i
- break
- }
- }
- }
- if p.matchingPDI[i] == -1 {
- p.matchingPDI[i] = p.Len()
- }
- }
- }
-}
-
-// determineParagraphEmbeddingLevel reports the resolved paragraph direction of
-// the substring limited by the given range [start, end).
-//
-// Determines the paragraph level based on rules P2, P3. This is also used
-// in rule X5c to find if an FSI should resolve to LRI or RLI.
-func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level {
- var strongType Class = unknownClass
-
- // Rule P2.
- for i := start; i < end; i++ {
- if t := p.resultTypes[i]; t.in(L, AL, R) {
- strongType = t
- break
- } else if t.in(FSI, LRI, RLI) {
- i = p.matchingPDI[i] // skip over to the matching PDI
- if i > end {
- log.Panic("assert (i <= end)")
- }
- }
- }
- // Rule P3.
- switch strongType {
- case unknownClass: // none found
- // default embedding level when no strong types found is 0.
- return 0
- case L:
- return 0
- default: // AL, R
- return 1
- }
-}
-
-const maxDepth = 125
-
-// This stack will store the embedding levels and override and isolated
-// statuses
-type directionalStatusStack struct {
- stackCounter int
- embeddingLevelStack [maxDepth + 1]level
- overrideStatusStack [maxDepth + 1]Class
- isolateStatusStack [maxDepth + 1]bool
-}
-
-func (s *directionalStatusStack) empty() { s.stackCounter = 0 }
-func (s *directionalStatusStack) pop() { s.stackCounter-- }
-func (s *directionalStatusStack) depth() int { return s.stackCounter }
-
-func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) {
- s.embeddingLevelStack[s.stackCounter] = level
- s.overrideStatusStack[s.stackCounter] = overrideStatus
- s.isolateStatusStack[s.stackCounter] = isolateStatus
- s.stackCounter++
-}
-
-func (s *directionalStatusStack) lastEmbeddingLevel() level {
- return s.embeddingLevelStack[s.stackCounter-1]
-}
-
-func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class {
- return s.overrideStatusStack[s.stackCounter-1]
-}
-
-func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool {
- return s.isolateStatusStack[s.stackCounter-1]
-}
-
-// Determine explicit levels using rules X1 - X8
-func (p *paragraph) determineExplicitEmbeddingLevels() {
- var stack directionalStatusStack
- var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int
-
- // Rule X1.
- stack.push(p.embeddingLevel, ON, false)
-
- for i, t := range p.resultTypes {
- // Rules X2, X3, X4, X5, X5a, X5b, X5c
- switch t {
- case RLE, LRE, RLO, LRO, RLI, LRI, FSI:
- isIsolate := t.in(RLI, LRI, FSI)
- isRTL := t.in(RLE, RLO, RLI)
-
- // override if this is an FSI that resolves to RLI
- if t == FSI {
- isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1)
- }
- if isIsolate {
- p.resultLevels[i] = stack.lastEmbeddingLevel()
- if stack.lastDirectionalOverrideStatus() != ON {
- p.resultTypes[i] = stack.lastDirectionalOverrideStatus()
- }
- }
-
- var newLevel level
- if isRTL {
- // least greater odd
- newLevel = (stack.lastEmbeddingLevel() + 1) | 1
- } else {
- // least greater even
- newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1
- }
-
- if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 {
- if isIsolate {
- validIsolateCount++
- }
- // Push new embedding level, override status, and isolated
- // status.
- // No check for valid stack counter, since the level check
- // suffices.
- switch t {
- case LRO:
- stack.push(newLevel, L, isIsolate)
- case RLO:
- stack.push(newLevel, R, isIsolate)
- default:
- stack.push(newLevel, ON, isIsolate)
- }
- // Not really part of the spec
- if !isIsolate {
- p.resultLevels[i] = newLevel
- }
- } else {
- // This is an invalid explicit formatting character,
- // so apply the "Otherwise" part of rules X2-X5b.
- if isIsolate {
- overflowIsolateCount++
- } else { // !isIsolate
- if overflowIsolateCount == 0 {
- overflowEmbeddingCount++
- }
- }
- }
-
- // Rule X6a
- case PDI:
- if overflowIsolateCount > 0 {
- overflowIsolateCount--
- } else if validIsolateCount == 0 {
- // do nothing
- } else {
- overflowEmbeddingCount = 0
- for !stack.lastDirectionalIsolateStatus() {
- stack.pop()
- }
- stack.pop()
- validIsolateCount--
- }
- p.resultLevels[i] = stack.lastEmbeddingLevel()
-
- // Rule X7
- case PDF:
- // Not really part of the spec
- p.resultLevels[i] = stack.lastEmbeddingLevel()
-
- if overflowIsolateCount > 0 {
- // do nothing
- } else if overflowEmbeddingCount > 0 {
- overflowEmbeddingCount--
- } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 {
- stack.pop()
- }
-
- case B: // paragraph separator.
- // Rule X8.
-
- // These values are reset for clarity, in this implementation B
- // can only occur as the last code in the array.
- stack.empty()
- overflowIsolateCount = 0
- overflowEmbeddingCount = 0
- validIsolateCount = 0
- p.resultLevels[i] = p.embeddingLevel
-
- default:
- p.resultLevels[i] = stack.lastEmbeddingLevel()
- if stack.lastDirectionalOverrideStatus() != ON {
- p.resultTypes[i] = stack.lastDirectionalOverrideStatus()
- }
- }
- }
-}
-
-type isolatingRunSequence struct {
- p *paragraph
-
- indexes []int // indexes to the original string
-
- types []Class // type of each character using the index
- resolvedLevels []level // resolved levels after application of rules
- level level
- sos, eos Class
-}
-
-func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
-
-func maxLevel(a, b level) level {
- if a > b {
- return a
- }
- return b
-}
-
-// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
-// either L or R, for each isolating run sequence.
-func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
- length := len(indexes)
- types := make([]Class, length)
- for i, x := range indexes {
- types[i] = p.resultTypes[x]
- }
-
- // assign level, sos and eos
- prevChar := indexes[0] - 1
- for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) {
- prevChar--
- }
- prevLevel := p.embeddingLevel
- if prevChar >= 0 {
- prevLevel = p.resultLevels[prevChar]
- }
-
- var succLevel level
- lastType := types[length-1]
- if lastType.in(LRI, RLI, FSI) {
- succLevel = p.embeddingLevel
- } else {
- // the first character after the end of run sequence
- limit := indexes[length-1] + 1
- for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ {
-
- }
- succLevel = p.embeddingLevel
- if limit < p.Len() {
- succLevel = p.resultLevels[limit]
- }
- }
- level := p.resultLevels[indexes[0]]
- return &isolatingRunSequence{
- p: p,
- indexes: indexes,
- types: types,
- level: level,
- sos: typeForLevel(maxLevel(prevLevel, level)),
- eos: typeForLevel(maxLevel(succLevel, level)),
- }
-}
-
-// Resolving weak types Rules W1-W7.
-//
-// Note that some weak types (EN, AN) remain after this processing is
-// complete.
-func (s *isolatingRunSequence) resolveWeakTypes() {
-
- // on entry, only these types remain
- s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI)
-
- // Rule W1.
- // Changes all NSMs.
- precedingCharacterType := s.sos
- for i, t := range s.types {
- if t == NSM {
- s.types[i] = precedingCharacterType
- } else {
- // if t.in(LRI, RLI, FSI, PDI) {
- // precedingCharacterType = ON
- // }
- precedingCharacterType = t
- }
- }
-
- // Rule W2.
- // EN does not change at the start of the run, because sos != AL.
- for i, t := range s.types {
- if t == EN {
- for j := i - 1; j >= 0; j-- {
- if t := s.types[j]; t.in(L, R, AL) {
- if t == AL {
- s.types[i] = AN
- }
- break
- }
- }
- }
- }
-
- // Rule W3.
- for i, t := range s.types {
- if t == AL {
- s.types[i] = R
- }
- }
-
- // Rule W4.
- // Since there must be values on both sides for this rule to have an
- // effect, the scan skips the first and last value.
- //
- // Although the scan proceeds left to right, and changes the type
- // values in a way that would appear to affect the computations
- // later in the scan, there is actually no problem. A change in the
- // current value can only affect the value to its immediate right,
- // and only affect it if it is ES or CS. But the current value can
- // only change if the value to its right is not ES or CS. Thus
- // either the current value will not change, or its change will have
- // no effect on the remainder of the analysis.
-
- for i := 1; i < s.Len()-1; i++ {
- t := s.types[i]
- if t == ES || t == CS {
- prevSepType := s.types[i-1]
- succSepType := s.types[i+1]
- if prevSepType == EN && succSepType == EN {
- s.types[i] = EN
- } else if s.types[i] == CS && prevSepType == AN && succSepType == AN {
- s.types[i] = AN
- }
- }
- }
-
- // Rule W5.
- for i, t := range s.types {
- if t == ET {
- // locate end of sequence
- runStart := i
- runEnd := s.findRunLimit(runStart, ET)
-
- // check values at ends of sequence
- t := s.sos
- if runStart > 0 {
- t = s.types[runStart-1]
- }
- if t != EN {
- t = s.eos
- if runEnd < len(s.types) {
- t = s.types[runEnd]
- }
- }
- if t == EN {
- setTypes(s.types[runStart:runEnd], EN)
- }
- // continue at end of sequence
- i = runEnd
- }
- }
-
- // Rule W6.
- for i, t := range s.types {
- if t.in(ES, ET, CS) {
- s.types[i] = ON
- }
- }
-
- // Rule W7.
- for i, t := range s.types {
- if t == EN {
- // set default if we reach start of run
- prevStrongType := s.sos
- for j := i - 1; j >= 0; j-- {
- t = s.types[j]
- if t == L || t == R { // AL's have been changed to R
- prevStrongType = t
- break
- }
- }
- if prevStrongType == L {
- s.types[i] = L
- }
- }
- }
-}
-
-// 6) resolving neutral types Rules N1-N2.
-func (s *isolatingRunSequence) resolveNeutralTypes() {
-
- // on entry, only these types can be in resultTypes
- s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI)
-
- for i, t := range s.types {
- switch t {
- case WS, ON, B, S, RLI, LRI, FSI, PDI:
- // find bounds of run of neutrals
- runStart := i
- runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI)
-
- // determine effective types at ends of run
- var leadType, trailType Class
-
- // Note that the character found can only be L, R, AN, or
- // EN.
- if runStart == 0 {
- leadType = s.sos
- } else {
- leadType = s.types[runStart-1]
- if leadType.in(AN, EN) {
- leadType = R
- }
- }
- if runEnd == len(s.types) {
- trailType = s.eos
- } else {
- trailType = s.types[runEnd]
- if trailType.in(AN, EN) {
- trailType = R
- }
- }
-
- var resolvedType Class
- if leadType == trailType {
- // Rule N1.
- resolvedType = leadType
- } else {
- // Rule N2.
- // Notice the embedding level of the run is used, not
- // the paragraph embedding level.
- resolvedType = typeForLevel(s.level)
- }
-
- setTypes(s.types[runStart:runEnd], resolvedType)
-
- // skip over run of (former) neutrals
- i = runEnd
- }
- }
-}
-
-func setLevels(levels []level, newLevel level) {
- for i := range levels {
- levels[i] = newLevel
- }
-}
-
-func setTypes(types []Class, newType Class) {
- for i := range types {
- types[i] = newType
- }
-}
-
-// 7) resolving implicit embedding levels Rules I1, I2.
-func (s *isolatingRunSequence) resolveImplicitLevels() {
-
- // on entry, only these types can be in resultTypes
- s.assertOnly(L, R, EN, AN)
-
- s.resolvedLevels = make([]level, len(s.types))
- setLevels(s.resolvedLevels, s.level)
-
- if (s.level & 1) == 0 { // even level
- for i, t := range s.types {
- // Rule I1.
- if t == L {
- // no change
- } else if t == R {
- s.resolvedLevels[i] += 1
- } else { // t == AN || t == EN
- s.resolvedLevels[i] += 2
- }
- }
- } else { // odd level
- for i, t := range s.types {
- // Rule I2.
- if t == R {
- // no change
- } else { // t == L || t == AN || t == EN
- s.resolvedLevels[i] += 1
- }
- }
- }
-}
-
-// Applies the levels and types resolved in rules W1-I2 to the
-// resultLevels array.
-func (s *isolatingRunSequence) applyLevelsAndTypes() {
- for i, x := range s.indexes {
- s.p.resultTypes[x] = s.types[i]
- s.p.resultLevels[x] = s.resolvedLevels[i]
- }
-}
-
-// Return the limit of the run consisting only of the types in validSet
-// starting at index. This checks the value at index, and will return
-// index if that value is not in validSet.
-func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int {
-loop:
- for ; index < len(s.types); index++ {
- t := s.types[index]
- for _, valid := range validSet {
- if t == valid {
- continue loop
- }
- }
- return index // didn't find a match in validSet
- }
- return len(s.types)
-}
-
-// Algorithm validation. Assert that all values in types are in the
-// provided set.
-func (s *isolatingRunSequence) assertOnly(codes ...Class) {
-loop:
- for i, t := range s.types {
- for _, c := range codes {
- if t == c {
- continue loop
- }
- }
- log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i])
- }
-}
-
-// determineLevelRuns returns an array of level runs. Each level run is
-// described as an array of indexes into the input string.
-//
-// Determines the level runs. Rule X9 will be applied in determining the
-// runs, in the way that makes sure the characters that are supposed to be
-// removed are not included in the runs.
-func (p *paragraph) determineLevelRuns() [][]int {
- run := []int{}
- allRuns := [][]int{}
- currentLevel := implicitLevel
-
- for i := range p.initialTypes {
- if !isRemovedByX9(p.initialTypes[i]) {
- if p.resultLevels[i] != currentLevel {
- // we just encountered a new run; wrap up last run
- if currentLevel >= 0 { // only wrap it up if there was a run
- allRuns = append(allRuns, run)
- run = nil
- }
- // Start new run
- currentLevel = p.resultLevels[i]
- }
- run = append(run, i)
- }
- }
- // Wrap up the final run, if any
- if len(run) > 0 {
- allRuns = append(allRuns, run)
- }
- return allRuns
-}
-
-// Definition BD13. Determine isolating run sequences.
-func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence {
- levelRuns := p.determineLevelRuns()
-
- // Compute the run that each character belongs to
- runForCharacter := make([]int, p.Len())
- for i, run := range levelRuns {
- for _, index := range run {
- runForCharacter[index] = i
- }
- }
-
- sequences := []*isolatingRunSequence{}
-
- var currentRunSequence []int
-
- for _, run := range levelRuns {
- first := run[0]
- if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 {
- currentRunSequence = nil
- // int run = i;
- for {
- // Copy this level run into currentRunSequence
- currentRunSequence = append(currentRunSequence, run...)
-
- last := currentRunSequence[len(currentRunSequence)-1]
- lastT := p.initialTypes[last]
- if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() {
- run = levelRuns[runForCharacter[p.matchingPDI[last]]]
- } else {
- break
- }
- }
- sequences = append(sequences, p.isolatingRunSequence(currentRunSequence))
- }
- }
- return sequences
-}
-
-// Assign level information to characters removed by rule X9. This is for
-// ease of relating the level information to the original input data. Note
-// that the levels assigned to these codes are arbitrary, they're chosen so
-// as to avoid breaking level runs.
-func (p *paragraph) assignLevelsToCharactersRemovedByX9() {
- for i, t := range p.initialTypes {
- if t.in(LRE, RLE, LRO, RLO, PDF, BN) {
- p.resultTypes[i] = t
- p.resultLevels[i] = -1
- }
- }
- // now propagate forward the levels information (could have
- // propagated backward, the main thing is not to introduce a level
- // break where one doesn't already exist).
-
- if p.resultLevels[0] == -1 {
- p.resultLevels[0] = p.embeddingLevel
- }
- for i := 1; i < len(p.initialTypes); i++ {
- if p.resultLevels[i] == -1 {
- p.resultLevels[i] = p.resultLevels[i-1]
- }
- }
- // Embedding information is for informational purposes only so need not be
- // adjusted.
-}
-
-//
-// Output
-//
-
-// getLevels computes levels array breaking lines at offsets in linebreaks.
-// Rule L1.
-//
-// The linebreaks array must include at least one value. The values must be
-// in strictly increasing order (no duplicates) between 1 and the length of
-// the text, inclusive. The last value must be the length of the text.
-func (p *paragraph) getLevels(linebreaks []int) []level {
- // Note that since the previous processing has removed all
- // P, S, and WS values from resultTypes, the values referred to
- // in these rules are the initial types, before any processing
- // has been applied (including processing of overrides).
- //
- // This example implementation has reinserted explicit format codes
- // and BN, in order that the levels array correspond to the
- // initial text. Their final placement is not normative.
- // These codes are treated like WS in this implementation,
- // so they don't interrupt sequences of WS.
-
- validateLineBreaks(linebreaks, p.Len())
-
- result := append([]level(nil), p.resultLevels...)
-
- // don't worry about linebreaks since if there is a break within
- // a series of WS values preceding S, the linebreak itself
- // causes the reset.
- for i, t := range p.initialTypes {
- if t.in(B, S) {
- // Rule L1, clauses one and two.
- result[i] = p.embeddingLevel
-
- // Rule L1, clause three.
- for j := i - 1; j >= 0; j-- {
- if isWhitespace(p.initialTypes[j]) { // including format codes
- result[j] = p.embeddingLevel
- } else {
- break
- }
- }
- }
- }
-
- // Rule L1, clause four.
- start := 0
- for _, limit := range linebreaks {
- for j := limit - 1; j >= start; j-- {
- if isWhitespace(p.initialTypes[j]) { // including format codes
- result[j] = p.embeddingLevel
- } else {
- break
- }
- }
- start = limit
- }
-
- return result
-}
-
-// getReordering returns the reordering of lines from a visual index to a
-// logical index for line breaks at the given offsets.
-//
-// Lines are concatenated from left to right. So for example, the fifth
-// character from the left on the third line is
-//
-// getReordering(linebreaks)[linebreaks[1] + 4]
-//
-// (linebreaks[1] is the position after the last character of the second
-// line, which is also the index of the first character on the third line,
-// and adding four gets the fifth character from the left).
-//
-// The linebreaks array must include at least one value. The values must be
-// in strictly increasing order (no duplicates) between 1 and the length of
-// the text, inclusive. The last value must be the length of the text.
-func (p *paragraph) getReordering(linebreaks []int) []int {
- validateLineBreaks(linebreaks, p.Len())
-
- return computeMultilineReordering(p.getLevels(linebreaks), linebreaks)
-}
-
-// Return multiline reordering array for a given level array. Reordering
-// does not occur across a line break.
-func computeMultilineReordering(levels []level, linebreaks []int) []int {
- result := make([]int, len(levels))
-
- start := 0
- for _, limit := range linebreaks {
- tempLevels := make([]level, limit-start)
- copy(tempLevels, levels[start:])
-
- for j, order := range computeReordering(tempLevels) {
- result[start+j] = order + start
- }
- start = limit
- }
- return result
-}
-
-// Return reordering array for a given level array. This reorders a single
-// line. The reordering is a visual to logical map. For example, the
-// leftmost char is string.charAt(order[0]). Rule L2.
-func computeReordering(levels []level) []int {
- result := make([]int, len(levels))
- // initialize order
- for i := range result {
- result[i] = i
- }
-
- // locate highest level found on line.
- // Note the rules say text, but no reordering across line bounds is
- // performed, so this is sufficient.
- highestLevel := level(0)
- lowestOddLevel := level(maxDepth + 2)
- for _, level := range levels {
- if level > highestLevel {
- highestLevel = level
- }
- if level&1 != 0 && level < lowestOddLevel {
- lowestOddLevel = level
- }
- }
-
- for level := highestLevel; level >= lowestOddLevel; level-- {
- for i := 0; i < len(levels); i++ {
- if levels[i] >= level {
- // find range of text at or above this level
- start := i
- limit := i + 1
- for limit < len(levels) && levels[limit] >= level {
- limit++
- }
-
- for j, k := start, limit-1; j < k; j, k = j+1, k-1 {
- result[j], result[k] = result[k], result[j]
- }
- // skip to end of level run
- i = limit
- }
- }
- }
-
- return result
-}
-
-// isWhitespace reports whether the type is considered a whitespace type for the
-// line break rules.
-func isWhitespace(c Class) bool {
- switch c {
- case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS:
- return true
- }
- return false
-}
-
-// isRemovedByX9 reports whether the type is one of the types removed in X9.
-func isRemovedByX9(c Class) bool {
- switch c {
- case LRE, RLE, LRO, RLO, PDF, BN:
- return true
- }
- return false
-}
-
-// typeForLevel reports the strong type (L or R) corresponding to the level.
-func typeForLevel(level level) Class {
- if (level & 0x1) == 0 {
- return L
- }
- return R
-}
-
-func validateTypes(types []Class) error {
- if len(types) == 0 {
- return fmt.Errorf("types is null")
- }
- for i, t := range types[:len(types)-1] {
- if t == B {
- return fmt.Errorf("B type before end of paragraph at index: %d", i)
- }
- }
- return nil
-}
-
-func validateParagraphEmbeddingLevel(embeddingLevel level) error {
- if embeddingLevel != implicitLevel &&
- embeddingLevel != 0 &&
- embeddingLevel != 1 {
- return fmt.Errorf("illegal paragraph embedding level: %d", embeddingLevel)
- }
- return nil
-}
-
-func validateLineBreaks(linebreaks []int, textLength int) error {
- prev := 0
- for i, next := range linebreaks {
- if next <= prev {
- return fmt.Errorf("bad linebreak: %d at index: %d", next, i)
- }
- prev = next
- }
- if prev != textLength {
- return fmt.Errorf("last linebreak was %d, want %d", prev, textLength)
- }
- return nil
-}
-
-func validatePbTypes(pairTypes []bracketType) error {
- if len(pairTypes) == 0 {
- return fmt.Errorf("pairTypes is null")
- }
- for i, pt := range pairTypes {
- switch pt {
- case bpNone, bpOpen, bpClose:
- default:
- return fmt.Errorf("illegal pairType value at %d: %v", i, pairTypes[i])
- }
- }
- return nil
-}
-
-func validatePbValues(pairValues []rune, pairTypes []bracketType) error {
- if pairValues == nil {
- return fmt.Errorf("pairValues is null")
- }
- if len(pairTypes) != len(pairValues) {
- return fmt.Errorf("pairTypes is different length from pairValues")
- }
- return nil
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/forminfo.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/forminfo.go
deleted file mode 100644
index 526c7033ac..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/forminfo.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package norm
-
-import "encoding/binary"
-
-// This file contains Form-specific logic and wrappers for data in tables.go.
-
-// Rune info is stored in a separate trie per composing form. A composing form
-// and its corresponding decomposing form share the same trie. Each trie maps
-// a rune to a uint16. The values take two forms. For v >= 0x8000:
-// bits
-// 15: 1 (inverse of NFD_QC bit of qcInfo)
-// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
-// 6..0: ccc (compressed CCC value).
-// For v < 0x8000, the respective rune has a decomposition and v is an index
-// into a byte array of UTF-8 decomposition sequences and additional info and
-// has the form:
-// <header> <decomp_byte>* [<tccc> [<lccc>]]
-// The header contains the number of bytes in the decomposition (excluding this
-// length byte). The two most significant bits of this length byte correspond
-// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
-// The byte sequence is followed by a trailing and leading CCC if the values
-// for these are not zero. The value of v determines which ccc are appended
-// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
-// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
-// there is an additional leading ccc. The value of tccc itself is the
-// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
-// are the number of trailing non-starters.
-
-const (
- qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
- headerLenMask = 0x3F // extract the length value from the header byte
- headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
-)
-
-// Properties provides access to normalization properties of a rune.
-type Properties struct {
- pos uint8 // start position in reorderBuffer; used in composition.go
- size uint8 // length of UTF-8 encoding of this rune
- ccc uint8 // leading canonical combining class (ccc if not decomposition)
- tccc uint8 // trailing canonical combining class (ccc if not decomposition)
- nLead uint8 // number of leading non-starters.
- flags qcInfo // quick check flags
- index uint16
-}
-
-// functions dispatchable per form
-type lookupFunc func(b input, i int) Properties
-
-// formInfo holds Form-specific functions and tables.
-type formInfo struct {
- form Form
- composing, compatibility bool // form type
- info lookupFunc
- nextMain iterFunc
-}
-
-var formTable = []*formInfo{{
- form: NFC,
- composing: true,
- compatibility: false,
- info: lookupInfoNFC,
- nextMain: nextComposed,
-}, {
- form: NFD,
- composing: false,
- compatibility: false,
- info: lookupInfoNFC,
- nextMain: nextDecomposed,
-}, {
- form: NFKC,
- composing: true,
- compatibility: true,
- info: lookupInfoNFKC,
- nextMain: nextComposed,
-}, {
- form: NFKD,
- composing: false,
- compatibility: true,
- info: lookupInfoNFKC,
- nextMain: nextDecomposed,
-}}
-
-// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
-// unexpected behavior for the user. For example, in NFD, there is a boundary
-// after 'a'. However, 'a' might combine with modifiers, so from the application's
-// perspective it is not a good boundary. We will therefore always use the
-// boundaries for the combining variants.
-
-// BoundaryBefore returns true if this rune starts a new segment and
-// cannot combine with any rune on the left.
-func (p Properties) BoundaryBefore() bool {
- if p.ccc == 0 && !p.combinesBackward() {
- return true
- }
- // We assume that the CCC of the first character in a decomposition
- // is always non-zero if different from info.ccc and that we can return
- // false at this point. This is verified by maketables.
- return false
-}
-
-// BoundaryAfter returns true if runes cannot combine with or otherwise
-// interact with this or previous runes.
-func (p Properties) BoundaryAfter() bool {
- // TODO: loosen these conditions.
- return p.isInert()
-}
-
-// We pack quick check data in 4 bits:
-// 5: Combines forward (0 == false, 1 == true)
-// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
-// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
-// 1..0: Number of trailing non-starters.
-//
-// When all 4 bits are zero, the character is inert, meaning it is never
-// influenced by normalization.
-type qcInfo uint8
-
-func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
-func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
-
-func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
-func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
-func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
-
-func (p Properties) isInert() bool {
- return p.flags&qcInfoMask == 0 && p.ccc == 0
-}
-
-func (p Properties) multiSegment() bool {
- return p.index >= firstMulti && p.index < endMulti
-}
-
-func (p Properties) nLeadingNonStarters() uint8 {
- return p.nLead
-}
-
-func (p Properties) nTrailingNonStarters() uint8 {
- return uint8(p.flags & 0x03)
-}
-
-// Decomposition returns the decomposition for the underlying rune
-// or nil if there is none.
-func (p Properties) Decomposition() []byte {
- // TODO: create the decomposition for Hangul?
- if p.index == 0 {
- return nil
- }
- i := p.index
- n := decomps[i] & headerLenMask
- i++
- return decomps[i : i+uint16(n)]
-}
-
-// Size returns the length of UTF-8 encoding of the rune.
-func (p Properties) Size() int {
- return int(p.size)
-}
-
-// CCC returns the canonical combining class of the underlying rune.
-func (p Properties) CCC() uint8 {
- if p.index >= firstCCCZeroExcept {
- return 0
- }
- return ccc[p.ccc]
-}
-
-// LeadCCC returns the CCC of the first rune in the decomposition.
-// If there is no decomposition, LeadCCC equals CCC.
-func (p Properties) LeadCCC() uint8 {
- return ccc[p.ccc]
-}
-
-// TrailCCC returns the CCC of the last rune in the decomposition.
-// If there is no decomposition, TrailCCC equals CCC.
-func (p Properties) TrailCCC() uint8 {
- return ccc[p.tccc]
-}
-
-func buildRecompMap() {
- recompMap = make(map[uint32]rune, len(recompMapPacked)/8)
- var buf [8]byte
- for i := 0; i < len(recompMapPacked); i += 8 {
- copy(buf[:], recompMapPacked[i:i+8])
- key := binary.BigEndian.Uint32(buf[:4])
- val := binary.BigEndian.Uint32(buf[4:])
- recompMap[key] = rune(val)
- }
-}
-
-// Recomposition
-// We use 32-bit keys instead of 64-bit for the two codepoint keys.
-// This clips off the bits of three entries, but we know this will not
-// result in a collision. In the unlikely event that changes to
-// UnicodeData.txt introduce collisions, the compiler will catch it.
-// Note that the recomposition map for NFC and NFKC are identical.
-
-// combine returns the combined rune or 0 if it doesn't exist.
-//
-// The caller is responsible for calling
-// recompMapOnce.Do(buildRecompMap) sometime before this is called.
-func combine(a, b rune) rune {
- key := uint32(uint16(a))<<16 + uint32(uint16(b))
- if recompMap == nil {
- panic("caller error") // see func comment
- }
- return recompMap[key]
-}
-
-func lookupInfoNFC(b input, i int) Properties {
- v, sz := b.charinfoNFC(i)
- return compInfo(v, sz)
-}
-
-func lookupInfoNFKC(b input, i int) Properties {
- v, sz := b.charinfoNFKC(i)
- return compInfo(v, sz)
-}
-
-// Properties returns properties for the first rune in s.
-func (f Form) Properties(s []byte) Properties {
- if f == NFC || f == NFD {
- return compInfo(nfcData.lookup(s))
- }
- return compInfo(nfkcData.lookup(s))
-}
-
-// PropertiesString returns properties for the first rune in s.
-func (f Form) PropertiesString(s string) Properties {
- if f == NFC || f == NFD {
- return compInfo(nfcData.lookupString(s))
- }
- return compInfo(nfkcData.lookupString(s))
-}
-
-// compInfo converts the information contained in v and sz
-// to a Properties. See the comment at the top of the file
-// for more information on the format.
-func compInfo(v uint16, sz int) Properties {
- if v == 0 {
- return Properties{size: uint8(sz)}
- } else if v >= 0x8000 {
- p := Properties{
- size: uint8(sz),
- ccc: uint8(v),
- tccc: uint8(v),
- flags: qcInfo(v >> 8),
- }
- if p.ccc > 0 || p.combinesBackward() {
- p.nLead = uint8(p.flags & 0x3)
- }
- return p
- }
- // has decomposition
- h := decomps[v]
- f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
- p := Properties{size: uint8(sz), flags: f, index: v}
- if v >= firstCCC {
- v += uint16(h&headerLenMask) + 1
- c := decomps[v]
- p.tccc = c >> 2
- p.flags |= qcInfo(c & 0x3)
- if v >= firstLeadingCCC {
- p.nLead = c & 0x3
- if v >= firstStarterWithNLead {
- // We were tricked. Remove the decomposition.
- p.flags &= 0x03
- p.index = 0
- return p
- }
- p.ccc = decomps[v+1]
- }
- }
- return p
-}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/normalize.go b/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/normalize.go
deleted file mode 100644
index 95efcf26e8..0000000000
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/normalize.go
+++ /dev/null
@@ -1,609 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Note: the file data_test.go that is generated should not be checked in.
-//go:generate go run maketables.go triegen.go
-//go:generate go test -tags test
-
-// Package norm contains types and functions for normalizing Unicode strings.
-package norm // import "golang.org/x/text/unicode/norm"
-
-import (
- "unicode/utf8"
-
- "golang.org/x/text/transform"
-)
-
-// A Form denotes a canonical representation of Unicode code points.
-// The Unicode-defined normalization and equivalence forms are:
-//
-// NFC Unicode Normalization Form C
-// NFD Unicode Normalization Form D
-// NFKC Unicode Normalization Form KC
-// NFKD Unicode Normalization Form KD
-//
-// For a Form f, this documentation uses the notation f(x) to mean
-// the bytes or string x converted to the given form.
-// A position n in x is called a boundary if conversion to the form can
-// proceed independently on both sides:
-// f(x) == append(f(x[0:n]), f(x[n:])...)
-//
-// References: https://unicode.org/reports/tr15/ and
-// https://unicode.org/notes/tn5/.
-type Form int
-
-const (
- NFC Form = iota
- NFD
- NFKC
- NFKD
-)
-
-// Bytes returns f(b). May return b if f(b) = b.
-func (f Form) Bytes(b []byte) []byte {
- src := inputBytes(b)
- ft := formTable[f]
- n, ok := ft.quickSpan(src, 0, len(b), true)
- if ok {
- return b
- }
- out := make([]byte, n, len(b))
- copy(out, b[0:n])
- rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush}
- return doAppendInner(&rb, n)
-}
-
-// String returns f(s).
-func (f Form) String(s string) string {
- src := inputString(s)
- ft := formTable[f]
- n, ok := ft.quickSpan(src, 0, len(s), true)
- if ok {
- return s
- }
- out := make([]byte, n, len(s))
- copy(out, s[0:n])
- rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush}
- return string(doAppendInner(&rb, n))
-}
-
-// IsNormal returns true if b == f(b).
-func (f Form) IsNormal(b []byte) bool {
- src := inputBytes(b)
- ft := formTable[f]
- bp, ok := ft.quickSpan(src, 0, len(b), true)
- if ok {
- return true
- }
- rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)}
- rb.setFlusher(nil, cmpNormalBytes)
- for bp < len(b) {
- rb.out = b[bp:]
- if bp = decomposeSegment(&rb, bp, true); bp < 0 {
- return false
- }
- bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true)
- }
- return true
-}
-
-func cmpNormalBytes(rb *reorderBuffer) bool {
- b := rb.out
- for i := 0; i < rb.nrune; i++ {
- info := rb.rune[i]
- if int(info.size) > len(b) {
- return false
- }
- p := info.pos
- pe := p + info.size
- for ; p < pe; p++ {
- if b[0] != rb.byte[p] {
- return false
- }
- b = b[1:]
- }
- }
- return true
-}
-
-// IsNormalString returns true if s == f(s).
-func (f Form) IsNormalString(s string) bool {
- src := inputString(s)
- ft := formTable[f]
- bp, ok := ft.quickSpan(src, 0, len(s), true)
- if ok {
- return true
- }
- rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)}
- rb.setFlusher(nil, func(rb *reorderBuffer) bool {
- for i := 0; i < rb.nrune; i++ {
- info := rb.rune[i]
- if bp+int(info.size) > len(s) {
- return false
- }
- p := info.pos
- pe := p + info.size
- for ; p < pe; p++ {
- if s[bp] != rb.byte[p] {
- return false
- }
- bp++
- }
- }
- return true
- })
- for bp < len(s) {
- if bp = decomposeSegment(&rb, bp, true); bp < 0 {
- return false
- }
- bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true)
- }
- return true
-}
-
-// patchTail fixes a case where a rune may be incorrectly normalized
-// if it is followed by illegal continuation bytes. It returns the
-// patched buffer and whether the decomposition is still in progress.
-func patchTail(rb *reorderBuffer) bool {
- info, p := lastRuneStart(&rb.f, rb.out)
- if p == -1 || info.size == 0 {
- return true
- }
- end := p + int(info.size)
- extra := len(rb.out) - end
- if extra > 0 {
- // Potentially allocating memory. However, this only
- // happens with ill-formed UTF-8.
- x := make([]byte, 0)
- x = append(x, rb.out[len(rb.out)-extra:]...)
- rb.out = rb.out[:end]
- decomposeToLastBoundary(rb)
- rb.doFlush()
- rb.out = append(rb.out, x...)
- return false
- }
- buf := rb.out[p:]
- rb.out = rb.out[:p]
- decomposeToLastBoundary(rb)
- if s := rb.ss.next(info); s == ssStarter {
- rb.doFlush()
- rb.ss.first(info)
- } else if s == ssOverflow {
- rb.doFlush()
- rb.insertCGJ()
- rb.ss = 0
- }
- rb.insertUnsafe(inputBytes(buf), 0, info)
- return true
-}
-
-func appendQuick(rb *reorderBuffer, i int) int {
- if rb.nsrc == i {
- return i
- }
- end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true)
- rb.out = rb.src.appendSlice(rb.out, i, end)
- return end
-}
-
-// Append returns f(append(out, b...)).
-// The buffer out must be nil, empty, or equal to f(out).
-func (f Form) Append(out []byte, src ...byte) []byte {
- return f.doAppend(out, inputBytes(src), len(src))
-}
-
-func (f Form) doAppend(out []byte, src input, n int) []byte {
- if n == 0 {
- return out
- }
- ft := formTable[f]
- // Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer.
- if len(out) == 0 {
- p, _ := ft.quickSpan(src, 0, n, true)
- out = src.appendSlice(out, 0, p)
- if p == n {
- return out
- }
- rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush}
- return doAppendInner(&rb, p)
- }
- rb := reorderBuffer{f: *ft, src: src, nsrc: n}
- return doAppend(&rb, out, 0)
-}
-
-func doAppend(rb *reorderBuffer, out []byte, p int) []byte {
- rb.setFlusher(out, appendFlush)
- src, n := rb.src, rb.nsrc
- doMerge := len(out) > 0
- if q := src.skipContinuationBytes(p); q > p {
- // Move leading non-starters to destination.
- rb.out = src.appendSlice(rb.out, p, q)
- p = q
- doMerge = patchTail(rb)
- }
- fd := &rb.f
- if doMerge {
- var info Properties
- if p < n {
- info = fd.info(src, p)
- if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 {
- if p == 0 {
- decomposeToLastBoundary(rb)
- }
- p = decomposeSegment(rb, p, true)
- }
- }
- if info.size == 0 {
- rb.doFlush()
- // Append incomplete UTF-8 encoding.
- return src.appendSlice(rb.out, p, n)
- }
- if rb.nrune > 0 {
- return doAppendInner(rb, p)
- }
- }
- p = appendQuick(rb, p)
- return doAppendInner(rb, p)
-}
-
-func doAppendInner(rb *reorderBuffer, p int) []byte {
- for n := rb.nsrc; p < n; {
- p = decomposeSegment(rb, p, true)
- p = appendQuick(rb, p)
- }
- return rb.out
-}
-
-// AppendString returns f(append(out, []byte(s))).
-// The buffer out must be nil, empty, or equal to f(out).
-func (f Form) AppendString(out []byte, src string) []byte {
- return f.doAppend(out, inputString(src), len(src))
-}
-
-// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]).
-// It is not guaranteed to return the largest such n.
-func (f Form) QuickSpan(b []byte) int {
- n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true)
- return n
-}
-
-// Span implements transform.SpanningTransformer. It returns a boundary n such
-// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
-func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
- n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
- if n < len(b) {
- if !ok {
- err = transform.ErrEndOfSpan
- } else {
- err = transform.ErrShortSrc
- }
- }
- return n, err
-}
-
-// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
-// It is not guaranteed to return the largest such n.
-func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
- n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
- if n < len(s) {
- if !ok {
- err = transform.ErrEndOfSpan
- } else {
- err = transform.ErrShortSrc
- }
- }
- return n, err
-}
-
-// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
-// whether any non-normalized parts were found. If atEOF is false, n will
-// not point past the last segment if this segment might be become
-// non-normalized by appending other runes.
-func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) {
- var lastCC uint8
- ss := streamSafe(0)
- lastSegStart := i
- for n = end; i < n; {
- if j := src.skipASCII(i, n); i != j {
- i = j
- lastSegStart = i - 1
- lastCC = 0
- ss = 0
- continue
- }
- info := f.info(src, i)
- if info.size == 0 {
- if atEOF {
- // include incomplete runes
- return n, true
- }
- return lastSegStart, true
- }
- // This block needs to be before the next, because it is possible to
- // have an overflow for runes that are starters (e.g. with U+FF9E).
- switch ss.next(info) {
- case ssStarter:
- lastSegStart = i
- case ssOverflow:
- return lastSegStart, false
- case ssSuccess:
- if lastCC > info.ccc {
- return lastSegStart, false
- }
- }
- if f.composing {
- if !info.isYesC() {
- break
- }
- } else {
- if !info.isYesD() {
- break
- }
- }
- lastCC = info.ccc
- i += int(info.size)
- }
- if i == n {
- if !atEOF {
- n = lastSegStart
- }
- return n, true
- }
- return lastSegStart, false
-}
-
-// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
-// It is not guaranteed to return the largest such n.
-func (f Form) QuickSpanString(s string) int {
- n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
- return n
-}
-
-// FirstBoundary returns the position i of the first boundary in b
-// or -1 if b contains no boundary.
-func (f Form) FirstBoundary(b []byte) int {
- return f.firstBoundary(inputBytes(b), len(b))
-}
-
-func (f Form) firstBoundary(src input, nsrc int) int {
- i := src.skipContinuationBytes(0)
- if i >= nsrc {
- return -1
- }
- fd := formTable[f]
- ss := streamSafe(0)
- // We should call ss.first here, but we can't as the first rune is
- // skipped already. This means FirstBoundary can't really determine
- // CGJ insertion points correctly. Luckily it doesn't have to.
- for {
- info := fd.info(src, i)
- if info.size == 0 {
- return -1
- }
- if s := ss.next(info); s != ssSuccess {
- return i
- }
- i += int(info.size)
- if i >= nsrc {
- if !info.BoundaryAfter() && !ss.isMax() {
- return -1
- }
- return nsrc
- }
- }
-}
-
-// FirstBoundaryInString returns the position i of the first boundary in s
-// or -1 if s contains no boundary.
-func (f Form) FirstBoundaryInString(s string) int {
- return f.firstBoundary(inputString(s), len(s))
-}
-
-// NextBoundary reports the index of the boundary between the first and next
-// segment in b or -1 if atEOF is false and there are not enough bytes to
-// determine this boundary.
-func (f Form) NextBoundary(b []byte, atEOF bool) int {
- return f.nextBoundary(inputBytes(b), len(b), atEOF)
-}
-
-// NextBoundaryInString reports the index of the boundary between the first and
-// next segment in b or -1 if atEOF is false and there are not enough bytes to
-// determine this boundary.
-func (f Form) NextBoundaryInString(s string, atEOF bool) int {
- return f.nextBoundary(inputString(s), len(s), atEOF)
-}
-
-func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
- if nsrc == 0 {
- if atEOF {
- return 0
- }
- return -1
- }
- fd := formTable[f]
- info := fd.info(src, 0)
- if info.size == 0 {
- if atEOF {
- return 1
- }
- return -1
- }
- ss := streamSafe(0)
- ss.first(info)
-
- for i := int(info.size); i < nsrc; i += int(info.size) {
- info = fd.info(src, i)
- if info.size == 0 {
- if atEOF {
- return i
- }
- return -1
- }
- // TODO: Using streamSafe to determine the boundary isn't the same as
- // using BoundaryBefore. Determine which should be used.
- if s := ss.next(info); s != ssSuccess {
- return i
- }
- }
- if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
- return -1
- }
- return nsrc
-}
-
-// LastBoundary returns the position i of the last boundary in b
-// or -1 if b contains no boundary.
-func (f Form) LastBoundary(b []byte) int {
- return lastBoundary(formTable[f], b)
-}
-
-func lastBoundary(fd *formInfo, b []byte) int {
- i := len(b)
- info, p := lastRuneStart(fd, b)
- if p == -1 {
- return -1
- }
- if info.size == 0 { // ends with incomplete rune
- if p == 0 { // starts with incomplete rune
- return -1
- }
- i = p
- info, p = lastRuneStart(fd, b[:i])
- if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter
- return i
- }
- }
- if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
- return i
- }
- if info.BoundaryAfter() {
- return i
- }
- ss := streamSafe(0)
- v := ss.backwards(info)
- for i = p; i >= 0 && v != ssStarter; i = p {
- info, p = lastRuneStart(fd, b[:i])
- if v = ss.backwards(info); v == ssOverflow {
- break
- }
- if p+int(info.size) != i {
- if p == -1 { // no boundary found
- return -1
- }
- return i // boundary after an illegal UTF-8 encoding
- }
- }
- return i
-}
-
-// decomposeSegment scans the first segment in src into rb. It inserts 0x034f
-// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters
-// and returns the number of bytes consumed from src or iShortDst or iShortSrc.
-func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
- // Force one character to be consumed.
- info := rb.f.info(rb.src, sp)
- if info.size == 0 {
- return 0
- }
- if s := rb.ss.next(info); s == ssStarter {
- // TODO: this could be removed if we don't support merging.
- if rb.nrune > 0 {
- goto end
- }
- } else if s == ssOverflow {
- rb.insertCGJ()
- goto end
- }
- if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
- return int(err)
- }
- for {
- sp += int(info.size)
- if sp >= rb.nsrc {
- if !atEOF && !info.BoundaryAfter() {
- return int(iShortSrc)
- }
- break
- }
- info = rb.f.info(rb.src, sp)
- if info.size == 0 {
- if !atEOF {
- return int(iShortSrc)
- }
- break
- }
- if s := rb.ss.next(info); s == ssStarter {
- break
- } else if s == ssOverflow {
- rb.insertCGJ()
- break
- }
- if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
- return int(err)
- }
- }
-end:
- if !rb.doFlush() {
- return int(iShortDst)
- }
- return sp
-}
-
-// lastRuneStart returns the runeInfo and position of the last
-// rune in buf or the zero runeInfo and -1 if no rune was found.
-func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) {
- p := len(buf) - 1
- for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
- }
- if p < 0 {
- return Properties{}, -1
- }
- return fd.info(inputBytes(buf), p), p
-}
-
-// decomposeToLastBoundary finds an open segment at the end of the buffer
-// and scans it into rb. Returns the buffer minus the last segment.
-func decomposeToLastBoundary(rb *reorderBuffer) {
- fd := &rb.f
- info, i := lastRuneStart(fd, rb.out)
- if int(info.size) != len(rb.out)-i {
- // illegal trailing continuation bytes
- return
- }
- if info.BoundaryAfter() {
- return
- }
- var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order
- padd := 0
- ss := streamSafe(0)
- p := len(rb.out)
- for {
- add[padd] = info
- v := ss.backwards(info)
- if v == ssOverflow {
- // Note that if we have an overflow, it the string we are appending to
- // is not correctly normalized. In this case the behavior is undefined.
- break
- }
- padd++
- p -= int(info.size)
- if v == ssStarter || p < 0 {
- break
- }
- info, i = lastRuneStart(fd, rb.out[:p])
- if int(info.size) != p-i {
- break
- }
- }
- rb.ss = ss
- // Copy bytes for insertion as we may need to overwrite rb.out.
- var buf [maxBufferSize * utf8.UTFMax]byte
- cp := buf[:copy(buf[:], rb.out[p:])]
- rb.out = rb.out[:p]
- for padd--; padd >= 0; padd-- {
- info = add[padd]
- rb.insertUnsafe(inputBytes(cp), 0, info)
- cp = cp[info.size:]
- }
-}
diff --git a/contrib/go/_std_1.19/src/bufio/bufio.go b/contrib/go/_std_1.19/src/bufio/bufio.go
new file mode 100644
index 0000000000..1da8ffa951
--- /dev/null
+++ b/contrib/go/_std_1.19/src/bufio/bufio.go
@@ -0,0 +1,829 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
+// object, creating another object (Reader or Writer) that also implements
+// the interface but provides buffering and some help for textual I/O.
+package bufio
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ defaultBufSize = 4096
+)
+
+var (
+ ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
+ ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
+ ErrBufferFull = errors.New("bufio: buffer full")
+ ErrNegativeCount = errors.New("bufio: negative count")
+)
+
+// Buffered input.
+
+// Reader implements buffering for an io.Reader object.
+type Reader struct {
+ buf []byte
+ rd io.Reader // reader provided by the client
+ r, w int // buf read and write positions
+ err error
+ lastByte int // last byte read for UnreadByte; -1 means invalid
+ lastRuneSize int // size of last rune read for UnreadRune; -1 means invalid
+}
+
+const minReadBufferSize = 16
+const maxConsecutiveEmptyReads = 100
+
+// NewReaderSize returns a new Reader whose buffer has at least the specified
+// size. If the argument io.Reader is already a Reader with large enough
+// size, it returns the underlying Reader.
+func NewReaderSize(rd io.Reader, size int) *Reader {
+ // Is it already a Reader?
+ b, ok := rd.(*Reader)
+ if ok && len(b.buf) >= size {
+ return b
+ }
+ if size < minReadBufferSize {
+ size = minReadBufferSize
+ }
+ r := new(Reader)
+ r.reset(make([]byte, size), rd)
+ return r
+}
+
+// NewReader returns a new Reader whose buffer has the default size.
+func NewReader(rd io.Reader) *Reader {
+ return NewReaderSize(rd, defaultBufSize)
+}
+
+// Size returns the size of the underlying buffer in bytes.
+func (b *Reader) Size() int { return len(b.buf) }
+
+// Reset discards any buffered data, resets all state, and switches
+// the buffered reader to read from r.
+// Calling Reset on the zero value of Reader initializes the internal buffer
+// to the default size.
+func (b *Reader) Reset(r io.Reader) {
+ if b.buf == nil {
+ b.buf = make([]byte, defaultBufSize)
+ }
+ b.reset(b.buf, r)
+}
+
+func (b *Reader) reset(buf []byte, r io.Reader) {
+ *b = Reader{
+ buf: buf,
+ rd: r,
+ lastByte: -1,
+ lastRuneSize: -1,
+ }
+}
+
+var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
+
+// fill reads a new chunk into the buffer.
+func (b *Reader) fill() {
+ // Slide existing data to beginning.
+ if b.r > 0 {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ if b.w >= len(b.buf) {
+ panic("bufio: tried to fill full buffer")
+ }
+
+ // Read new data: try a limited number of times.
+ for i := maxConsecutiveEmptyReads; i > 0; i-- {
+ n, err := b.rd.Read(b.buf[b.w:])
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ b.w += n
+ if err != nil {
+ b.err = err
+ return
+ }
+ if n > 0 {
+ return
+ }
+ }
+ b.err = io.ErrNoProgress
+}
+
+func (b *Reader) readErr() error {
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// Peek returns the next n bytes without advancing the reader. The bytes stop
+// being valid at the next read call. If Peek returns fewer than n bytes, it
+// also returns an error explaining why the read is short. The error is
+// ErrBufferFull if n is larger than b's buffer size.
+//
+// Calling Peek prevents a UnreadByte or UnreadRune call from succeeding
+// until the next read operation.
+func (b *Reader) Peek(n int) ([]byte, error) {
+ if n < 0 {
+ return nil, ErrNegativeCount
+ }
+
+ b.lastByte = -1
+ b.lastRuneSize = -1
+
+ for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
+ b.fill() // b.w-b.r < len(b.buf) => buffer is not full
+ }
+
+ if n > len(b.buf) {
+ return b.buf[b.r:b.w], ErrBufferFull
+ }
+
+ // 0 <= n <= len(b.buf)
+ var err error
+ if avail := b.w - b.r; avail < n {
+ // not enough data in buffer
+ n = avail
+ err = b.readErr()
+ if err == nil {
+ err = ErrBufferFull
+ }
+ }
+ return b.buf[b.r : b.r+n], err
+}
+
+// Discard skips the next n bytes, returning the number of bytes discarded.
+//
+// If Discard skips fewer than n bytes, it also returns an error.
+// If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
+// reading from the underlying io.Reader.
+func (b *Reader) Discard(n int) (discarded int, err error) {
+ if n < 0 {
+ return 0, ErrNegativeCount
+ }
+ if n == 0 {
+ return
+ }
+
+ b.lastByte = -1
+ b.lastRuneSize = -1
+
+ remain := n
+ for {
+ skip := b.Buffered()
+ if skip == 0 {
+ b.fill()
+ skip = b.Buffered()
+ }
+ if skip > remain {
+ skip = remain
+ }
+ b.r += skip
+ remain -= skip
+ if remain == 0 {
+ return n, nil
+ }
+ if b.err != nil {
+ return n - remain, b.readErr()
+ }
+ }
+}
+
+// Read reads data into p.
+// It returns the number of bytes read into p.
+// The bytes are taken from at most one Read on the underlying Reader,
+// hence n may be less than len(p).
+// To read exactly len(p) bytes, use io.ReadFull(b, p).
+// If the underlying Reader can return a non-zero count with io.EOF,
+// then this Read method can do so as well; see the [io.Reader] docs.
+func (b *Reader) Read(p []byte) (n int, err error) {
+ n = len(p)
+ if n == 0 {
+ if b.Buffered() > 0 {
+ return 0, nil
+ }
+ return 0, b.readErr()
+ }
+ if b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ if len(p) >= len(b.buf) {
+ // Large read, empty buffer.
+ // Read directly into p to avoid copy.
+ n, b.err = b.rd.Read(p)
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ if n > 0 {
+ b.lastByte = int(p[n-1])
+ b.lastRuneSize = -1
+ }
+ return n, b.readErr()
+ }
+ // One read.
+ // Do not use b.fill, which will loop.
+ b.r = 0
+ b.w = 0
+ n, b.err = b.rd.Read(b.buf)
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ if n == 0 {
+ return 0, b.readErr()
+ }
+ b.w += n
+ }
+
+ // copy as much as we can
+ // Note: if the slice panics here, it is probably because
+ // the underlying reader returned a bad count. See issue 49795.
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ b.lastByte = int(b.buf[b.r-1])
+ b.lastRuneSize = -1
+ return n, nil
+}
+
+// ReadByte reads and returns a single byte.
+// If no byte is available, returns an error.
+func (b *Reader) ReadByte() (byte, error) {
+ b.lastRuneSize = -1
+ for b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ b.fill() // buffer is empty
+ }
+ c := b.buf[b.r]
+ b.r++
+ b.lastByte = int(c)
+ return c, nil
+}
+
+// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
+//
+// UnreadByte returns an error if the most recent method called on the
+// Reader was not a read operation. Notably, Peek, Discard, and WriteTo are not
+// considered read operations.
+func (b *Reader) UnreadByte() error {
+ if b.lastByte < 0 || b.r == 0 && b.w > 0 {
+ return ErrInvalidUnreadByte
+ }
+ // b.r > 0 || b.w == 0
+ if b.r > 0 {
+ b.r--
+ } else {
+ // b.r == 0 && b.w == 0
+ b.w = 1
+ }
+ b.buf[b.r] = byte(b.lastByte)
+ b.lastByte = -1
+ b.lastRuneSize = -1
+ return nil
+}
+
+// ReadRune reads a single UTF-8 encoded Unicode character and returns the
+// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
+func (b *Reader) ReadRune() (r rune, size int, err error) {
+ for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
+ b.fill() // b.w-b.r < len(buf) => buffer is not full
+ }
+ b.lastRuneSize = -1
+ if b.r == b.w {
+ return 0, 0, b.readErr()
+ }
+ r, size = rune(b.buf[b.r]), 1
+ if r >= utf8.RuneSelf {
+ r, size = utf8.DecodeRune(b.buf[b.r:b.w])
+ }
+ b.r += size
+ b.lastByte = int(b.buf[b.r-1])
+ b.lastRuneSize = size
+ return r, size, nil
+}
+
+// UnreadRune unreads the last rune. If the most recent method called on
+// the Reader was not a ReadRune, UnreadRune returns an error. (In this
+// regard it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Reader) UnreadRune() error {
+ if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
+ return ErrInvalidUnreadRune
+ }
+ b.r -= b.lastRuneSize
+ b.lastByte = -1
+ b.lastRuneSize = -1
+ return nil
+}
+
+// Buffered returns the number of bytes that can be read from the current buffer.
+func (b *Reader) Buffered() int { return b.w - b.r }
+
+// ReadSlice reads until the first occurrence of delim in the input,
+// returning a slice pointing at the bytes in the buffer.
+// The bytes stop being valid at the next read.
+// If ReadSlice encounters an error before finding a delimiter,
+// it returns all the data in the buffer and the error itself (often io.EOF).
+// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
+// Because the data returned from ReadSlice will be overwritten
+// by the next I/O operation, most clients should use
+// ReadBytes or ReadString instead.
+// ReadSlice returns err != nil if and only if line does not end in delim.
+func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
+ s := 0 // search start index
+ for {
+ // Search buffer.
+ if i := bytes.IndexByte(b.buf[b.r+s:b.w], delim); i >= 0 {
+ i += s
+ line = b.buf[b.r : b.r+i+1]
+ b.r += i + 1
+ break
+ }
+
+ // Pending error?
+ if b.err != nil {
+ line = b.buf[b.r:b.w]
+ b.r = b.w
+ err = b.readErr()
+ break
+ }
+
+ // Buffer full?
+ if b.Buffered() >= len(b.buf) {
+ b.r = b.w
+ line = b.buf
+ err = ErrBufferFull
+ break
+ }
+
+ s = b.w - b.r // do not rescan area we scanned before
+
+ b.fill() // buffer is not full
+ }
+
+ // Handle last byte, if any.
+ if i := len(line) - 1; i >= 0 {
+ b.lastByte = int(line[i])
+ b.lastRuneSize = -1
+ }
+
+ return
+}
+
+// ReadLine is a low-level line-reading primitive. Most callers should use
+// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
+//
+// ReadLine tries to return a single line, not including the end-of-line bytes.
+// If the line was too long for the buffer then isPrefix is set and the
+// beginning of the line is returned. The rest of the line will be returned
+// from future calls. isPrefix will be false when returning the last fragment
+// of the line. The returned buffer is only valid until the next call to
+// ReadLine. ReadLine either returns a non-nil line or it returns an error,
+// never both.
+//
+// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
+// No indication or error is given if the input ends without a final line end.
+// Calling UnreadByte after ReadLine will always unread the last byte read
+// (possibly a character belonging to the line end) even if that byte is not
+// part of the line returned by ReadLine.
+func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
+ line, err = b.ReadSlice('\n')
+ if err == ErrBufferFull {
+ // Handle the case where "\r\n" straddles the buffer.
+ if len(line) > 0 && line[len(line)-1] == '\r' {
+ // Put the '\r' back on buf and drop it from line.
+ // Let the next call to ReadLine check for "\r\n".
+ if b.r == 0 {
+ // should be unreachable
+ panic("bufio: tried to rewind past start of buffer")
+ }
+ b.r--
+ line = line[:len(line)-1]
+ }
+ return line, true, nil
+ }
+
+ if len(line) == 0 {
+ if err != nil {
+ line = nil
+ }
+ return
+ }
+ err = nil
+
+ if line[len(line)-1] == '\n' {
+ drop := 1
+ if len(line) > 1 && line[len(line)-2] == '\r' {
+ drop = 2
+ }
+ line = line[:len(line)-drop]
+ }
+ return
+}
+
+// collectFragments reads until the first occurrence of delim in the input. It
+// returns (slice of full buffers, remaining bytes before delim, total number
+// of bytes in the combined first two elements, error).
+// The complete result is equal to
+// `bytes.Join(append(fullBuffers, finalFragment), nil)`, which has a
+// length of `totalLen`. The result is structured in this way to allow callers
+// to minimize allocations and copies.
+func (b *Reader) collectFragments(delim byte) (fullBuffers [][]byte, finalFragment []byte, totalLen int, err error) {
+ var frag []byte
+ // Use ReadSlice to look for delim, accumulating full buffers.
+ for {
+ var e error
+ frag, e = b.ReadSlice(delim)
+ if e == nil { // got final fragment
+ break
+ }
+ if e != ErrBufferFull { // unexpected error
+ err = e
+ break
+ }
+
+ // Make a copy of the buffer.
+ buf := make([]byte, len(frag))
+ copy(buf, frag)
+ fullBuffers = append(fullBuffers, buf)
+ totalLen += len(buf)
+ }
+
+ totalLen += len(frag)
+ return fullBuffers, frag, totalLen, err
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
+ full, frag, n, err := b.collectFragments(delim)
+ // Allocate new buffer to hold the full pieces and the fragment.
+ buf := make([]byte, n)
+ n = 0
+ // Copy full pieces and fragment in.
+ for i := range full {
+ n += copy(buf[n:], full[i])
+ }
+ copy(buf[n:], frag)
+ return buf, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadString(delim byte) (string, error) {
+ full, frag, n, err := b.collectFragments(delim)
+ // Allocate new buffer to hold the full pieces and the fragment.
+ var buf strings.Builder
+ buf.Grow(n)
+ // Copy full pieces and fragment in.
+ for _, fb := range full {
+ buf.Write(fb)
+ }
+ buf.Write(frag)
+ return buf.String(), err
+}
+
+// WriteTo implements io.WriterTo.
+// This may make multiple calls to the Read method of the underlying Reader.
+// If the underlying reader supports the WriteTo method,
+// this calls the underlying WriteTo without buffering.
+func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ b.lastByte = -1
+ b.lastRuneSize = -1
+
+ n, err = b.writeBuf(w)
+ if err != nil {
+ return
+ }
+
+ if r, ok := b.rd.(io.WriterTo); ok {
+ m, err := r.WriteTo(w)
+ n += m
+ return n, err
+ }
+
+ if w, ok := w.(io.ReaderFrom); ok {
+ m, err := w.ReadFrom(b.rd)
+ n += m
+ return n, err
+ }
+
+ if b.w-b.r < len(b.buf) {
+ b.fill() // buffer not full
+ }
+
+ for b.r < b.w {
+ // b.r < b.w => buffer is not empty
+ m, err := b.writeBuf(w)
+ n += m
+ if err != nil {
+ return n, err
+ }
+ b.fill() // buffer is empty
+ }
+
+ if b.err == io.EOF {
+ b.err = nil
+ }
+
+ return n, b.readErr()
+}
+
+var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
+
+// writeBuf writes the Reader's buffer to the writer.
+func (b *Reader) writeBuf(w io.Writer) (int64, error) {
+ n, err := w.Write(b.buf[b.r:b.w])
+ if n < 0 {
+ panic(errNegativeWrite)
+ }
+ b.r += n
+ return int64(n), err
+}
+
+// buffered output
+
+// Writer implements buffering for an io.Writer object.
+// If an error occurs writing to a Writer, no more data will be
+// accepted and all subsequent writes, and Flush, will return the error.
+// After all data has been written, the client should call the
+// Flush method to guarantee all data has been forwarded to
+// the underlying io.Writer.
+type Writer struct {
+ err error
+ buf []byte
+ n int
+ wr io.Writer
+}
+
+// NewWriterSize returns a new Writer whose buffer has at least the specified
+// size. If the argument io.Writer is already a Writer with large enough
+// size, it returns the underlying Writer.
+func NewWriterSize(w io.Writer, size int) *Writer {
+ // Is it already a Writer?
+ b, ok := w.(*Writer)
+ if ok && len(b.buf) >= size {
+ return b
+ }
+ if size <= 0 {
+ size = defaultBufSize
+ }
+ return &Writer{
+ buf: make([]byte, size),
+ wr: w,
+ }
+}
+
+// NewWriter returns a new Writer whose buffer has the default size.
+// If the argument io.Writer is already a Writer with large enough buffer size,
+// it returns the underlying Writer.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterSize(w, defaultBufSize)
+}
+
+// Size returns the size of the underlying buffer in bytes.
+func (b *Writer) Size() int { return len(b.buf) }
+
+// Reset discards any unflushed buffered data, clears any error, and
+// resets b to write its output to w.
+// Calling Reset on the zero value of Writer initializes the internal buffer
+// to the default size.
+func (b *Writer) Reset(w io.Writer) {
+ if b.buf == nil {
+ b.buf = make([]byte, defaultBufSize)
+ }
+ b.err = nil
+ b.n = 0
+ b.wr = w
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (b *Writer) Flush() error {
+ if b.err != nil {
+ return b.err
+ }
+ if b.n == 0 {
+ return nil
+ }
+ n, err := b.wr.Write(b.buf[0:b.n])
+ if n < b.n && err == nil {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ if n > 0 && n < b.n {
+ copy(b.buf[0:b.n-n], b.buf[n:b.n])
+ }
+ b.n -= n
+ b.err = err
+ return err
+ }
+ b.n = 0
+ return nil
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (b *Writer) Available() int { return len(b.buf) - b.n }
+
+// AvailableBuffer returns an empty buffer with b.Available() capacity.
+// This buffer is intended to be appended to and
+// passed to an immediately succeeding Write call.
+// The buffer is only valid until the next write operation on b.
+func (b *Writer) AvailableBuffer() []byte {
+ return b.buf[b.n:][:0]
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (b *Writer) Buffered() int { return b.n }
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (b *Writer) Write(p []byte) (nn int, err error) {
+ for len(p) > b.Available() && b.err == nil {
+ var n int
+ if b.Buffered() == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, b.err = b.wr.Write(p)
+ } else {
+ n = copy(b.buf[b.n:], p)
+ b.n += n
+ b.Flush()
+ }
+ nn += n
+ p = p[n:]
+ }
+ if b.err != nil {
+ return nn, b.err
+ }
+ n := copy(b.buf[b.n:], p)
+ b.n += n
+ nn += n
+ return nn, nil
+}
+
+// WriteByte writes a single byte.
+func (b *Writer) WriteByte(c byte) error {
+ if b.err != nil {
+ return b.err
+ }
+ if b.Available() <= 0 && b.Flush() != nil {
+ return b.err
+ }
+ b.buf[b.n] = c
+ b.n++
+ return nil
+}
+
+// WriteRune writes a single Unicode code point, returning
+// the number of bytes written and any error.
+func (b *Writer) WriteRune(r rune) (size int, err error) {
+ // Compare as uint32 to correctly handle negative runes.
+ if uint32(r) < utf8.RuneSelf {
+ err = b.WriteByte(byte(r))
+ if err != nil {
+ return 0, err
+ }
+ return 1, nil
+ }
+ if b.err != nil {
+ return 0, b.err
+ }
+ n := b.Available()
+ if n < utf8.UTFMax {
+ if b.Flush(); b.err != nil {
+ return 0, b.err
+ }
+ n = b.Available()
+ if n < utf8.UTFMax {
+ // Can only happen if buffer is silly small.
+ return b.WriteString(string(r))
+ }
+ }
+ size = utf8.EncodeRune(b.buf[b.n:], r)
+ b.n += size
+ return size, nil
+}
+
+// WriteString writes a string.
+// It returns the number of bytes written.
+// If the count is less than len(s), it also returns an error explaining
+// why the write is short.
+func (b *Writer) WriteString(s string) (int, error) {
+ var sw io.StringWriter
+ tryStringWriter := true
+
+ nn := 0
+ for len(s) > b.Available() && b.err == nil {
+ var n int
+ if b.Buffered() == 0 && sw == nil && tryStringWriter {
+ // Check at most once whether b.wr is a StringWriter.
+ sw, tryStringWriter = b.wr.(io.StringWriter)
+ }
+ if b.Buffered() == 0 && tryStringWriter {
+ // Large write, empty buffer, and the underlying writer supports
+ // WriteString: forward the write to the underlying StringWriter.
+ // This avoids an extra copy.
+ n, b.err = sw.WriteString(s)
+ } else {
+ n = copy(b.buf[b.n:], s)
+ b.n += n
+ b.Flush()
+ }
+ nn += n
+ s = s[n:]
+ }
+ if b.err != nil {
+ return nn, b.err
+ }
+ n := copy(b.buf[b.n:], s)
+ b.n += n
+ nn += n
+ return nn, nil
+}
+
+// ReadFrom implements io.ReaderFrom. If the underlying writer
+// supports the ReadFrom method, this calls the underlying ReadFrom.
+// If there is buffered data and an underlying ReadFrom, this fills
+// the buffer and writes it before calling ReadFrom.
+func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ if b.err != nil {
+ return 0, b.err
+ }
+ readerFrom, readerFromOK := b.wr.(io.ReaderFrom)
+ var m int
+ for {
+ if b.Available() == 0 {
+ if err1 := b.Flush(); err1 != nil {
+ return n, err1
+ }
+ }
+ if readerFromOK && b.Buffered() == 0 {
+ nn, err := readerFrom.ReadFrom(r)
+ b.err = err
+ n += nn
+ return n, err
+ }
+ nr := 0
+ for nr < maxConsecutiveEmptyReads {
+ m, err = r.Read(b.buf[b.n:])
+ if m != 0 || err != nil {
+ break
+ }
+ nr++
+ }
+ if nr == maxConsecutiveEmptyReads {
+ return n, io.ErrNoProgress
+ }
+ b.n += m
+ n += int64(m)
+ if err != nil {
+ break
+ }
+ }
+ if err == io.EOF {
+ // If we filled the buffer exactly, flush preemptively.
+ if b.Available() == 0 {
+ err = b.Flush()
+ } else {
+ err = nil
+ }
+ }
+ return n, err
+}
+
+// buffered input and output
+
+// ReadWriter stores pointers to a Reader and a Writer.
+// It implements io.ReadWriter.
+type ReadWriter struct {
+ *Reader
+ *Writer
+}
+
+// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
+func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
+ return &ReadWriter{r, w}
+}
diff --git a/contrib/go/_std_1.19/src/bufio/scan.go b/contrib/go/_std_1.19/src/bufio/scan.go
new file mode 100644
index 0000000000..e247cbcf32
--- /dev/null
+++ b/contrib/go/_std_1.19/src/bufio/scan.go
@@ -0,0 +1,419 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+// Scanner provides a convenient interface for reading data such as
+// a file of newline-delimited lines of text. Successive calls to
+// the Scan method will step through the 'tokens' of a file, skipping
+// the bytes between the tokens. The specification of a token is
+// defined by a split function of type SplitFunc; the default split
+// function breaks the input into lines with line termination stripped. Split
+// functions are defined in this package for scanning a file into
+// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
+// client may instead provide a custom split function.
+//
+// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
+// large to fit in the buffer. When a scan stops, the reader may have
+// advanced arbitrarily far past the last token. Programs that need more
+// control over error handling or large tokens, or must run sequential scans
+// on a reader, should use bufio.Reader instead.
+type Scanner struct {
+ r io.Reader // The reader provided by the client.
+ split SplitFunc // The function to split the tokens.
+ maxTokenSize int // Maximum size of a token; modified by tests.
+ token []byte // Last token returned by split.
+ buf []byte // Buffer used as argument to split.
+ start int // First non-processed byte in buf.
+ end int // End of data in buf.
+ err error // Sticky error.
+ empties int // Count of successive empty tokens.
+ scanCalled bool // Scan has been called; buffer is in use.
+ done bool // Scan has finished.
+}
+
+// SplitFunc is the signature of the split function used to tokenize the
+// input. The arguments are an initial substring of the remaining unprocessed
+// data and a flag, atEOF, that reports whether the Reader has no more data
+// to give. The return values are the number of bytes to advance the input
+// and the next token to return to the user, if any, plus an error, if any.
+//
+// Scanning stops if the function returns an error, in which case some of
+// the input may be discarded. If that error is ErrFinalToken, scanning
+// stops with no error.
+//
+// Otherwise, the Scanner advances the input. If the token is not nil,
+// the Scanner returns it to the user. If the token is nil, the
+// Scanner reads more data and continues scanning; if there is no more
+// data--if atEOF was true--the Scanner returns. If the data does not
+// yet hold a complete token, for instance if it has no newline while
+// scanning lines, a SplitFunc can return (0, nil, nil) to signal the
+// Scanner to read more data into the slice and try again with a
+// longer slice starting at the same point in the input.
+//
+// The function is never called with an empty data slice unless atEOF
+// is true. If atEOF is true, however, data may be non-empty and,
+// as always, holds unprocessed text.
+type SplitFunc func(data []byte, atEOF bool) (advance int, token []byte, err error)
+
+// Errors returned by Scanner.
+var (
+ ErrTooLong = errors.New("bufio.Scanner: token too long")
+ ErrNegativeAdvance = errors.New("bufio.Scanner: SplitFunc returns negative advance count")
+ ErrAdvanceTooFar = errors.New("bufio.Scanner: SplitFunc returns advance count beyond input")
+ ErrBadReadCount = errors.New("bufio.Scanner: Read returned impossible count")
+)
+
+const (
+ // MaxScanTokenSize is the maximum size used to buffer a token
+ // unless the user provides an explicit buffer with Scanner.Buffer.
+ // The actual maximum token size may be smaller as the buffer
+ // may need to include, for instance, a newline.
+ MaxScanTokenSize = 64 * 1024
+
+ startBufSize = 4096 // Size of initial allocation for buffer.
+)
+
+// NewScanner returns a new Scanner to read from r.
+// The split function defaults to ScanLines.
+func NewScanner(r io.Reader) *Scanner {
+ return &Scanner{
+ r: r,
+ split: ScanLines,
+ maxTokenSize: MaxScanTokenSize,
+ }
+}
+
+// Err returns the first non-EOF error that was encountered by the Scanner.
+func (s *Scanner) Err() error {
+ if s.err == io.EOF {
+ return nil
+ }
+ return s.err
+}
+
+// Bytes returns the most recent token generated by a call to Scan.
+// The underlying array may point to data that will be overwritten
+// by a subsequent call to Scan. It does no allocation.
+func (s *Scanner) Bytes() []byte {
+ return s.token
+}
+
+// Text returns the most recent token generated by a call to Scan
+// as a newly allocated string holding its bytes.
+func (s *Scanner) Text() string {
+ return string(s.token)
+}
+
+// ErrFinalToken is a special sentinel error value. It is intended to be
+// returned by a Split function to indicate that the token being delivered
+// with the error is the last token and scanning should stop after this one.
+// After ErrFinalToken is received by Scan, scanning stops with no error.
+// The value is useful to stop processing early or when it is necessary to
+// deliver a final empty token. One could achieve the same behavior
+// with a custom error value but providing one here is tidier.
+// See the emptyFinalToken example for a use of this value.
+var ErrFinalToken = errors.New("final token")
+
+// Scan advances the Scanner to the next token, which will then be
+// available through the Bytes or Text method. It returns false when the
+// scan stops, either by reaching the end of the input or an error.
+// After Scan returns false, the Err method will return any error that
+// occurred during scanning, except that if it was io.EOF, Err
+// will return nil.
+// Scan panics if the split function returns too many empty
+// tokens without advancing the input. This is a common error mode for
+// scanners.
+func (s *Scanner) Scan() bool {
+ if s.done {
+ return false
+ }
+ s.scanCalled = true
+ // Loop until we have a token.
+ for {
+ // See if we can get a token with what we already have.
+ // If we've run out of data but have an error, give the split function
+ // a chance to recover any remaining, possibly empty token.
+ if s.end > s.start || s.err != nil {
+ advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
+ if err != nil {
+ if err == ErrFinalToken {
+ s.token = token
+ s.done = true
+ return true
+ }
+ s.setErr(err)
+ return false
+ }
+ if !s.advance(advance) {
+ return false
+ }
+ s.token = token
+ if token != nil {
+ if s.err == nil || advance > 0 {
+ s.empties = 0
+ } else {
+ // Returning tokens not advancing input at EOF.
+ s.empties++
+ if s.empties > maxConsecutiveEmptyReads {
+ panic("bufio.Scan: too many empty tokens without progressing")
+ }
+ }
+ return true
+ }
+ }
+ // We cannot generate a token with what we are holding.
+ // If we've already hit EOF or an I/O error, we are done.
+ if s.err != nil {
+ // Shut it down.
+ s.start = 0
+ s.end = 0
+ return false
+ }
+ // Must read more data.
+ // First, shift data to beginning of buffer if there's lots of empty space
+ // or space is needed.
+ if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
+ copy(s.buf, s.buf[s.start:s.end])
+ s.end -= s.start
+ s.start = 0
+ }
+ // Is the buffer full? If so, resize.
+ if s.end == len(s.buf) {
+ // Guarantee no overflow in the multiplication below.
+ const maxInt = int(^uint(0) >> 1)
+ if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
+ s.setErr(ErrTooLong)
+ return false
+ }
+ newSize := len(s.buf) * 2
+ if newSize == 0 {
+ newSize = startBufSize
+ }
+ if newSize > s.maxTokenSize {
+ newSize = s.maxTokenSize
+ }
+ newBuf := make([]byte, newSize)
+ copy(newBuf, s.buf[s.start:s.end])
+ s.buf = newBuf
+ s.end -= s.start
+ s.start = 0
+ }
+ // Finally we can read some input. Make sure we don't get stuck with
+ // a misbehaving Reader. Officially we don't need to do this, but let's
+ // be extra careful: Scanner is for safe, simple jobs.
+ for loop := 0; ; {
+ n, err := s.r.Read(s.buf[s.end:len(s.buf)])
+ if n < 0 || len(s.buf)-s.end < n {
+ s.setErr(ErrBadReadCount)
+ break
+ }
+ s.end += n
+ if err != nil {
+ s.setErr(err)
+ break
+ }
+ if n > 0 {
+ s.empties = 0
+ break
+ }
+ loop++
+ if loop > maxConsecutiveEmptyReads {
+ s.setErr(io.ErrNoProgress)
+ break
+ }
+ }
+ }
+}
+
+// advance consumes n bytes of the buffer. It reports whether the advance was legal.
+func (s *Scanner) advance(n int) bool {
+ if n < 0 {
+ s.setErr(ErrNegativeAdvance)
+ return false
+ }
+ if n > s.end-s.start {
+ s.setErr(ErrAdvanceTooFar)
+ return false
+ }
+ s.start += n
+ return true
+}
+
+// setErr records the first error encountered.
+func (s *Scanner) setErr(err error) {
+ if s.err == nil || s.err == io.EOF {
+ s.err = err
+ }
+}
+
+// Buffer sets the initial buffer to use when scanning and the maximum
+// size of buffer that may be allocated during scanning. The maximum
+// token size is the larger of max and cap(buf). If max <= cap(buf),
+// Scan will use this buffer only and do no allocation.
+//
+// By default, Scan uses an internal buffer and sets the
+// maximum token size to MaxScanTokenSize.
+//
+// Buffer panics if it is called after scanning has started.
+func (s *Scanner) Buffer(buf []byte, max int) {
+ if s.scanCalled {
+ panic("Buffer called after Scan")
+ }
+ s.buf = buf[0:cap(buf)]
+ s.maxTokenSize = max
+}
+
+// Split sets the split function for the Scanner.
+// The default split function is ScanLines.
+//
+// Split panics if it is called after scanning has started.
+func (s *Scanner) Split(split SplitFunc) {
+ if s.scanCalled {
+ panic("Split called after Scan")
+ }
+ s.split = split
+}
+
+// Split functions
+
+// ScanBytes is a split function for a Scanner that returns each byte as a token.
+func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ return 1, data[0:1], nil
+}
+
+var errorRune = []byte(string(utf8.RuneError))
+
+// ScanRunes is a split function for a Scanner that returns each
+// UTF-8-encoded rune as a token. The sequence of runes returned is
+// equivalent to that from a range loop over the input as a string, which
+// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
+// Because of the Scan interface, this makes it impossible for the client to
+// distinguish correctly encoded replacement runes from encoding errors.
+func ScanRunes(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+
+ // Fast path 1: ASCII.
+ if data[0] < utf8.RuneSelf {
+ return 1, data[0:1], nil
+ }
+
+ // Fast path 2: Correct UTF-8 decode without error.
+ _, width := utf8.DecodeRune(data)
+ if width > 1 {
+ // It's a valid encoding. Width cannot be one for a correctly encoded
+ // non-ASCII rune.
+ return width, data[0:width], nil
+ }
+
+ // We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
+ // Is the error because there wasn't a full rune to be decoded?
+ // FullRune distinguishes correctly between erroneous and incomplete encodings.
+ if !atEOF && !utf8.FullRune(data) {
+ // Incomplete; get more bytes.
+ return 0, nil, nil
+ }
+
+ // We have a real UTF-8 encoding error. Return a properly encoded error rune
+ // but advance only one byte. This matches the behavior of a range loop over
+ // an incorrectly encoded string.
+ return 1, errorRune, nil
+}
+
+// dropCR drops a terminal \r from the data.
+func dropCR(data []byte) []byte {
+ if len(data) > 0 && data[len(data)-1] == '\r' {
+ return data[0 : len(data)-1]
+ }
+ return data
+}
+
+// ScanLines is a split function for a Scanner that returns each line of
+// text, stripped of any trailing end-of-line marker. The returned line may
+// be empty. The end-of-line marker is one optional carriage return followed
+// by one mandatory newline. In regular expression notation, it is `\r?\n`.
+// The last non-empty line of input will be returned even if it has no
+// newline.
+func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ if i := bytes.IndexByte(data, '\n'); i >= 0 {
+ // We have a full newline-terminated line.
+ return i + 1, dropCR(data[0:i]), nil
+ }
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ if atEOF {
+ return len(data), dropCR(data), nil
+ }
+ // Request more data.
+ return 0, nil, nil
+}
+
+// isSpace reports whether the character is a Unicode white space character.
+// We avoid dependency on the unicode package, but check validity of the implementation
+// in the tests.
+func isSpace(r rune) bool {
+ if r <= '\u00FF' {
+ // Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
+ switch r {
+ case ' ', '\t', '\n', '\v', '\f', '\r':
+ return true
+ case '\u0085', '\u00A0':
+ return true
+ }
+ return false
+ }
+ // High-valued ones.
+ if '\u2000' <= r && r <= '\u200a' {
+ return true
+ }
+ switch r {
+ case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
+ return true
+ }
+ return false
+}
+
+// ScanWords is a split function for a Scanner that returns each
+// space-separated word of text, with surrounding spaces deleted. It will
+// never return an empty string. The definition of space is set by
+// unicode.IsSpace.
+func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ // Skip leading spaces.
+ start := 0
+ for width := 0; start < len(data); start += width {
+ var r rune
+ r, width = utf8.DecodeRune(data[start:])
+ if !isSpace(r) {
+ break
+ }
+ }
+ // Scan until space, marking end of word.
+ for width, i := 0, start; i < len(data); i += width {
+ var r rune
+ r, width = utf8.DecodeRune(data[i:])
+ if isSpace(r) {
+ return i + width, data[start:i], nil
+ }
+ }
+ // If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
+ if atEOF && len(data) > start {
+ return len(data), data[start:], nil
+ }
+ // Request more data.
+ return start, nil, nil
+}
diff --git a/contrib/go/_std_1.19/src/bytes/buffer.go b/contrib/go/_std_1.19/src/bytes/buffer.go
new file mode 100644
index 0000000000..0bacbda164
--- /dev/null
+++ b/contrib/go/_std_1.19/src/bytes/buffer.go
@@ -0,0 +1,474 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+// Simple byte buffer for marshaling data.
+
+import (
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+// smallBufferSize is an initial allocation minimal capacity.
+const smallBufferSize = 64
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ lastRead readOp // last read operation, so that Unread* can work correctly.
+}
+
+// The readOp constants describe the last action performed on
+// the buffer, so that UnreadRune and UnreadByte can check for
+// invalid usage. opReadRuneX constants are chosen such that
+// converted to int they correspond to the rune size that was read.
+type readOp int8
+
+// Don't use iota for these, as the values need to correspond with the
+// names and comments, which is easier to see when being explicit.
+const (
+ opRead readOp = -1 // Any other read operation.
+ opInvalid readOp = 0 // Non-read operation.
+ opReadRune1 readOp = 1 // Read rune of size 1.
+ opReadRune2 readOp = 2 // Read rune of size 2.
+ opReadRune3 readOp = 3 // Read rune of size 3.
+ opReadRune4 readOp = 4 // Read rune of size 4.
+)
+
+// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
+var ErrTooLarge = errors.New("bytes.Buffer: too large")
+var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
+
+const maxInt = int(^uint(0) >> 1)
+
+// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
+// The slice is valid for use only until the next buffer modification (that is,
+// only until the next call to a method like Read, Write, Reset, or Truncate).
+// The slice aliases the buffer content at least until the next buffer modification,
+// so immediate changes to the slice will affect the result of future reads.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+//
+// To build strings more efficiently, see the strings.Builder type.
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// empty reports whether the unread portion of the buffer is empty.
+func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Cap returns the capacity of the buffer's underlying byte slice, that is, the
+// total space allocated for the buffer's data.
+func (b *Buffer) Cap() int { return cap(b.buf) }
+
+// Truncate discards all but the first n unread bytes from the buffer
+// but continues to use the same allocated storage.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ if n == 0 {
+ b.Reset()
+ return
+ }
+ b.lastRead = opInvalid
+ if n < 0 || n > b.Len() {
+ panic("bytes.Buffer: truncation out of range")
+ }
+ b.buf = b.buf[:b.off+n]
+}
+
+// Reset resets the buffer to be empty,
+// but it retains the underlying storage for use by future writes.
+// Reset is the same as Truncate(0).
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.off = 0
+ b.lastRead = opInvalid
+}
+
+// tryGrowByReslice is a inlineable version of grow for the fast-case where the
+// internal buffer only needs to be resliced.
+// It returns the index where bytes should be written and whether it succeeded.
+func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
+ if l := len(b.buf); n <= cap(b.buf)-l {
+ b.buf = b.buf[:l+n]
+ return l, true
+ }
+ return 0, false
+}
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Reset()
+ }
+ // Try to grow by means of a reslice.
+ if i, ok := b.tryGrowByReslice(n); ok {
+ return i
+ }
+ if b.buf == nil && n <= smallBufferSize {
+ b.buf = make([]byte, n, smallBufferSize)
+ return 0
+ }
+ c := cap(b.buf)
+ if n <= c/2-m {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= c to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf, b.buf[b.off:])
+ } else if c > maxInt-c-n {
+ panic(ErrTooLarge)
+ } else {
+ // Add b.off to account for b.buf[:b.off] being sliced off the front.
+ b.buf = growSlice(b.buf[b.off:], b.off+n)
+ }
+ // Restore b.off and len(b.buf).
+ b.off = 0
+ b.buf = b.buf[:m+n]
+ return m
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("bytes.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(len(p))
+ if !ok {
+ m = b.grow(len(p))
+ }
+ return copy(b.buf[m:], p), nil
+}
+
+// WriteString appends the contents of s to the buffer, growing the buffer as
+// needed. The return value n is the length of s; err is always nil. If the
+// buffer becomes too large, WriteString will panic with ErrTooLarge.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(len(s))
+ if !ok {
+ m = b.grow(len(s))
+ }
+ return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ b.lastRead = opInvalid
+ for {
+ i := b.grow(MinRead)
+ b.buf = b.buf[:i]
+ m, e := r.Read(b.buf[i:cap(b.buf)])
+ if m < 0 {
+ panic(errNegativeRead)
+ }
+
+ b.buf = b.buf[:i+m]
+ n += int64(m)
+ if e == io.EOF {
+ return n, nil // e is EOF, so return nil explicitly
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+}
+
+// growSlice grows b by n, preserving the original content of b.
+// If the allocation fails, it panics with ErrTooLarge.
+func growSlice(b []byte, n int) []byte {
+ defer func() {
+ if recover() != nil {
+ panic(ErrTooLarge)
+ }
+ }()
+ // TODO(http://golang.org/issue/51462): We should rely on the append-make
+ // pattern so that the compiler can call runtime.growslice. For example:
+ // return append(b, make([]byte, n)...)
+ // This avoids unnecessary zero-ing of the first len(b) bytes of the
+ // allocated slice, but this pattern causes b to escape onto the heap.
+ //
+ // Instead use the append-make pattern with a nil slice to ensure that
+ // we allocate buffers rounded up to the closest size class.
+ c := len(b) + n // ensure enough space for n elements
+ if c < 2*cap(b) {
+ // The growth rate has historically always been 2x. In the future,
+ // we could rely purely on append to determine the growth rate.
+ c = 2 * cap(b)
+ }
+ b2 := append([]byte(nil), make([]byte, c)...)
+ copy(b2, b)
+ return b2[:len(b)]
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ b.lastRead = opInvalid
+ if nBytes := b.Len(); nBytes > 0 {
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("bytes.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Reset()
+ return n, nil
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(1)
+ if !ok {
+ m = b.grow(1)
+ }
+ b.buf[m] = c
+ return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to the
+// buffer, returning its length and an error, which is always nil but is
+// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with ErrTooLarge.
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
+ // Compare as uint32 to correctly handle negative runes.
+ if uint32(r) < utf8.RuneSelf {
+ b.WriteByte(byte(r))
+ return 1, nil
+ }
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(utf8.UTFMax)
+ if !ok {
+ m = b.grow(utf8.UTFMax)
+ }
+ n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
+ b.buf = b.buf[:m+n]
+ return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ if b.empty() {
+ // Buffer is empty, reset to recover space.
+ b.Reset()
+ if len(p) == 0 {
+ return 0, nil
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return n, nil
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ b.lastRead = opInvalid
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (byte, error) {
+ if b.empty() {
+ // Buffer is empty, reset to recover space.
+ b.Reset()
+ return 0, io.EOF
+ }
+ c := b.buf[b.off]
+ b.off++
+ b.lastRead = opRead
+ return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is io.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
+ if b.empty() {
+ // Buffer is empty, reset to recover space.
+ b.Reset()
+ return 0, 0, io.EOF
+ }
+ c := b.buf[b.off]
+ if c < utf8.RuneSelf {
+ b.off++
+ b.lastRead = opReadRune1
+ return rune(c), 1, nil
+ }
+ r, n := utf8.DecodeRune(b.buf[b.off:])
+ b.off += n
+ b.lastRead = readOp(n)
+ return r, n, nil
+}
+
+// UnreadRune unreads the last rune returned by ReadRune.
+// If the most recent read or write operation on the buffer was
+// not a successful ReadRune, UnreadRune returns an error. (In this regard
+// it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Buffer) UnreadRune() error {
+ if b.lastRead <= opInvalid {
+ return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
+ }
+ if b.off >= int(b.lastRead) {
+ b.off -= int(b.lastRead)
+ }
+ b.lastRead = opInvalid
+ return nil
+}
+
+var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
+
+// UnreadByte unreads the last byte returned by the most recent successful
+// read operation that read at least one byte. If a write has happened since
+// the last read, if the last read returned an error, or if the read read zero
+// bytes, UnreadByte returns an error.
+func (b *Buffer) UnreadByte() error {
+ if b.lastRead == opInvalid {
+ return errUnreadByte
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ b.off--
+ }
+ return nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return line, err
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ b.lastRead = opRead
+ return line, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end
+// in delim.
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
+ slice, err := b.readSlice(delim)
+ return string(slice), err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its
+// initial contents. The new Buffer takes ownership of buf, and the
+// caller should not use buf after this call. NewBuffer is intended to
+// prepare a Buffer to read existing data. It can also be used to set
+// the initial size of the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBufferString(s string) *Buffer {
+ return &Buffer{buf: []byte(s)}
+}
diff --git a/contrib/go/_std_1.19/src/bytes/bytes.go b/contrib/go/_std_1.19/src/bytes/bytes.go
new file mode 100644
index 0000000000..659a82bcc8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/bytes/bytes.go
@@ -0,0 +1,1301 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bytes implements functions for the manipulation of byte slices.
+// It is analogous to the facilities of the strings package.
+package bytes
+
+import (
+ "internal/bytealg"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Equal reports whether a and b
+// are the same length and contain the same bytes.
+// A nil argument is equivalent to an empty slice.
+func Equal(a, b []byte) bool {
+ // Neither cmd/compile nor gccgo allocates for these string conversions.
+ return string(a) == string(b)
+}
+
+// Compare returns an integer comparing two byte slices lexicographically.
+// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
+// A nil argument is equivalent to an empty slice.
+func Compare(a, b []byte) int {
+ return bytealg.Compare(a, b)
+}
+
+// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
+// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
+func explode(s []byte, n int) [][]byte {
+ if n <= 0 || n > len(s) {
+ n = len(s)
+ }
+ a := make([][]byte, n)
+ var size int
+ na := 0
+ for len(s) > 0 {
+ if na+1 >= n {
+ a[na] = s
+ na++
+ break
+ }
+ _, size = utf8.DecodeRune(s)
+ a[na] = s[0:size:size]
+ s = s[size:]
+ na++
+ }
+ return a[0:na]
+}
+
+// Count counts the number of non-overlapping instances of sep in s.
+// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
+func Count(s, sep []byte) int {
+ // special case
+ if len(sep) == 0 {
+ return utf8.RuneCount(s) + 1
+ }
+ if len(sep) == 1 {
+ return bytealg.Count(s, sep[0])
+ }
+ n := 0
+ for {
+ i := Index(s, sep)
+ if i == -1 {
+ return n
+ }
+ n++
+ s = s[i+len(sep):]
+ }
+}
+
+// Contains reports whether subslice is within b.
+func Contains(b, subslice []byte) bool {
+ return Index(b, subslice) != -1
+}
+
+// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
+func ContainsAny(b []byte, chars string) bool {
+ return IndexAny(b, chars) >= 0
+}
+
+// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
+func ContainsRune(b []byte, r rune) bool {
+ return IndexRune(b, r) >= 0
+}
+
+// IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
+func IndexByte(b []byte, c byte) int {
+ return bytealg.IndexByte(b, c)
+}
+
+func indexBytePortable(s []byte, c byte) int {
+ for i, b := range s {
+ if b == c {
+ return i
+ }
+ }
+ return -1
+}
+
+// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
+func LastIndex(s, sep []byte) int {
+ n := len(sep)
+ switch {
+ case n == 0:
+ return len(s)
+ case n == 1:
+ return LastIndexByte(s, sep[0])
+ case n == len(s):
+ if Equal(s, sep) {
+ return 0
+ }
+ return -1
+ case n > len(s):
+ return -1
+ }
+ // Rabin-Karp search from the end of the string
+ hashss, pow := bytealg.HashStrRevBytes(sep)
+ last := len(s) - n
+ var h uint32
+ for i := len(s) - 1; i >= last; i-- {
+ h = h*bytealg.PrimeRK + uint32(s[i])
+ }
+ if h == hashss && Equal(s[last:], sep) {
+ return last
+ }
+ for i := last - 1; i >= 0; i-- {
+ h *= bytealg.PrimeRK
+ h += uint32(s[i])
+ h -= pow * uint32(s[i+n])
+ if h == hashss && Equal(s[i:i+n], sep) {
+ return i
+ }
+ }
+ return -1
+}
+
+// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
+func LastIndexByte(s []byte, c byte) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexRune interprets s as a sequence of UTF-8-encoded code points.
+// It returns the byte index of the first occurrence in s of the given rune.
+// It returns -1 if rune is not present in s.
+// If r is utf8.RuneError, it returns the first instance of any
+// invalid UTF-8 byte sequence.
+func IndexRune(s []byte, r rune) int {
+ switch {
+ case 0 <= r && r < utf8.RuneSelf:
+ return IndexByte(s, byte(r))
+ case r == utf8.RuneError:
+ for i := 0; i < len(s); {
+ r1, n := utf8.DecodeRune(s[i:])
+ if r1 == utf8.RuneError {
+ return i
+ }
+ i += n
+ }
+ return -1
+ case !utf8.ValidRune(r):
+ return -1
+ default:
+ var b [utf8.UTFMax]byte
+ n := utf8.EncodeRune(b[:], r)
+ return Index(s, b[:n])
+ }
+}
+
+// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It returns the byte index of the first occurrence in s of any of the Unicode
+// code points in chars. It returns -1 if chars is empty or if there is no code
+// point in common.
+func IndexAny(s []byte, chars string) int {
+ if chars == "" {
+ // Avoid scanning all of s.
+ return -1
+ }
+ if len(s) == 1 {
+ r := rune(s[0])
+ if r >= utf8.RuneSelf {
+ // search utf8.RuneError.
+ for _, r = range chars {
+ if r == utf8.RuneError {
+ return 0
+ }
+ }
+ return -1
+ }
+ if bytealg.IndexByteString(chars, s[0]) >= 0 {
+ return 0
+ }
+ return -1
+ }
+ if len(chars) == 1 {
+ r := rune(chars[0])
+ if r >= utf8.RuneSelf {
+ r = utf8.RuneError
+ }
+ return IndexRune(s, r)
+ }
+ if len(s) > 8 {
+ if as, isASCII := makeASCIISet(chars); isASCII {
+ for i, c := range s {
+ if as.contains(c) {
+ return i
+ }
+ }
+ return -1
+ }
+ }
+ var width int
+ for i := 0; i < len(s); i += width {
+ r := rune(s[i])
+ if r < utf8.RuneSelf {
+ if bytealg.IndexByteString(chars, s[i]) >= 0 {
+ return i
+ }
+ width = 1
+ continue
+ }
+ r, width = utf8.DecodeRune(s[i:])
+ if r != utf8.RuneError {
+ // r is 2 to 4 bytes
+ if len(chars) == width {
+ if chars == string(r) {
+ return i
+ }
+ continue
+ }
+ // Use bytealg.IndexString for performance if available.
+ if bytealg.MaxLen >= width {
+ if bytealg.IndexString(chars, string(r)) >= 0 {
+ return i
+ }
+ continue
+ }
+ }
+ for _, ch := range chars {
+ if r == ch {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
+// points. It returns the byte index of the last occurrence in s of any of
+// the Unicode code points in chars. It returns -1 if chars is empty or if
+// there is no code point in common.
+func LastIndexAny(s []byte, chars string) int {
+ if chars == "" {
+ // Avoid scanning all of s.
+ return -1
+ }
+ if len(s) > 8 {
+ if as, isASCII := makeASCIISet(chars); isASCII {
+ for i := len(s) - 1; i >= 0; i-- {
+ if as.contains(s[i]) {
+ return i
+ }
+ }
+ return -1
+ }
+ }
+ if len(s) == 1 {
+ r := rune(s[0])
+ if r >= utf8.RuneSelf {
+ for _, r = range chars {
+ if r == utf8.RuneError {
+ return 0
+ }
+ }
+ return -1
+ }
+ if bytealg.IndexByteString(chars, s[0]) >= 0 {
+ return 0
+ }
+ return -1
+ }
+ if len(chars) == 1 {
+ cr := rune(chars[0])
+ if cr >= utf8.RuneSelf {
+ cr = utf8.RuneError
+ }
+ for i := len(s); i > 0; {
+ r, size := utf8.DecodeLastRune(s[:i])
+ i -= size
+ if r == cr {
+ return i
+ }
+ }
+ return -1
+ }
+ for i := len(s); i > 0; {
+ r := rune(s[i-1])
+ if r < utf8.RuneSelf {
+ if bytealg.IndexByteString(chars, s[i-1]) >= 0 {
+ return i - 1
+ }
+ i--
+ continue
+ }
+ r, size := utf8.DecodeLastRune(s[:i])
+ i -= size
+ if r != utf8.RuneError {
+ // r is 2 to 4 bytes
+ if len(chars) == size {
+ if chars == string(r) {
+ return i
+ }
+ continue
+ }
+ // Use bytealg.IndexString for performance if available.
+ if bytealg.MaxLen >= size {
+ if bytealg.IndexString(chars, string(r)) >= 0 {
+ return i
+ }
+ continue
+ }
+ }
+ for _, ch := range chars {
+ if r == ch {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// Generic split: splits after each instance of sep,
+// including sepSave bytes of sep in the subslices.
+func genSplit(s, sep []byte, sepSave, n int) [][]byte {
+ if n == 0 {
+ return nil
+ }
+ if len(sep) == 0 {
+ return explode(s, n)
+ }
+ if n < 0 {
+ n = Count(s, sep) + 1
+ }
+ if n > len(s)+1 {
+ n = len(s) + 1
+ }
+
+ a := make([][]byte, n)
+ n--
+ i := 0
+ for i < n {
+ m := Index(s, sep)
+ if m < 0 {
+ break
+ }
+ a[i] = s[: m+sepSave : m+sepSave]
+ s = s[m+len(sep):]
+ i++
+ }
+ a[i] = s
+ return a[:i+1]
+}
+
+// SplitN slices s into subslices separated by sep and returns a slice of
+// the subslices between those separators.
+// If sep is empty, SplitN splits after each UTF-8 sequence.
+// The count determines the number of subslices to return:
+//
+// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+// n == 0: the result is nil (zero subslices)
+// n < 0: all subslices
+//
+// To split around the first instance of a separator, see Cut.
+func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
+
+// SplitAfterN slices s into subslices after each instance of sep and
+// returns a slice of those subslices.
+// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
+// The count determines the number of subslices to return:
+//
+// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+// n == 0: the result is nil (zero subslices)
+// n < 0: all subslices
+func SplitAfterN(s, sep []byte, n int) [][]byte {
+ return genSplit(s, sep, len(sep), n)
+}
+
+// Split slices s into all subslices separated by sep and returns a slice of
+// the subslices between those separators.
+// If sep is empty, Split splits after each UTF-8 sequence.
+// It is equivalent to SplitN with a count of -1.
+//
+// To split around the first instance of a separator, see Cut.
+func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
+
+// SplitAfter slices s into all subslices after each instance of sep and
+// returns a slice of those subslices.
+// If sep is empty, SplitAfter splits after each UTF-8 sequence.
+// It is equivalent to SplitAfterN with a count of -1.
+func SplitAfter(s, sep []byte) [][]byte {
+ return genSplit(s, sep, len(sep), -1)
+}
+
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
+// Fields interprets s as a sequence of UTF-8-encoded code points.
+// It splits the slice s around each instance of one or more consecutive white space
+// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
+// empty slice if s contains only white space.
+func Fields(s []byte) [][]byte {
+ // First count the fields.
+ // This is an exact count if s is ASCII, otherwise it is an approximation.
+ n := 0
+ wasSpace := 1
+ // setBits is used to track which bits are set in the bytes of s.
+ setBits := uint8(0)
+ for i := 0; i < len(s); i++ {
+ r := s[i]
+ setBits |= r
+ isSpace := int(asciiSpace[r])
+ n += wasSpace & ^isSpace
+ wasSpace = isSpace
+ }
+
+ if setBits >= utf8.RuneSelf {
+ // Some runes in the input slice are not ASCII.
+ return FieldsFunc(s, unicode.IsSpace)
+ }
+
+ // ASCII fast path
+ a := make([][]byte, n)
+ na := 0
+ fieldStart := 0
+ i := 0
+ // Skip spaces in the front of the input.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ for i < len(s) {
+ if asciiSpace[s[i]] == 0 {
+ i++
+ continue
+ }
+ a[na] = s[fieldStart:i:i]
+ na++
+ i++
+ // Skip spaces in between fields.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ }
+ if fieldStart < len(s) { // Last field might end at EOF.
+ a[na] = s[fieldStart:len(s):len(s)]
+ }
+ return a
+}
+
+// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
+// It splits the slice s at each run of code points c satisfying f(c) and
+// returns a slice of subslices of s. If all code points in s satisfy f(c), or
+// len(s) == 0, an empty slice is returned.
+//
+// FieldsFunc makes no guarantees about the order in which it calls f(c)
+// and assumes that f always returns the same value for a given c.
+func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
+ // A span is used to record a slice of s of the form s[start:end].
+ // The start index is inclusive and the end index is exclusive.
+ type span struct {
+ start int
+ end int
+ }
+ spans := make([]span, 0, 32)
+
+ // Find the field start and end indices.
+ // Doing this in a separate pass (rather than slicing the string s
+ // and collecting the result substrings right away) is significantly
+ // more efficient, possibly due to cache effects.
+ start := -1 // valid span start if >= 0
+ for i := 0; i < len(s); {
+ size := 1
+ r := rune(s[i])
+ if r >= utf8.RuneSelf {
+ r, size = utf8.DecodeRune(s[i:])
+ }
+ if f(r) {
+ if start >= 0 {
+ spans = append(spans, span{start, i})
+ start = -1
+ }
+ } else {
+ if start < 0 {
+ start = i
+ }
+ }
+ i += size
+ }
+
+ // Last field might end at EOF.
+ if start >= 0 {
+ spans = append(spans, span{start, len(s)})
+ }
+
+ // Create subslices from recorded field indices.
+ a := make([][]byte, len(spans))
+ for i, span := range spans {
+ a[i] = s[span.start:span.end:span.end]
+ }
+
+ return a
+}
+
+// Join concatenates the elements of s to create a new byte slice. The separator
+// sep is placed between elements in the resulting slice.
+func Join(s [][]byte, sep []byte) []byte {
+ if len(s) == 0 {
+ return []byte{}
+ }
+ if len(s) == 1 {
+ // Just return a copy.
+ return append([]byte(nil), s[0]...)
+ }
+ n := len(sep) * (len(s) - 1)
+ for _, v := range s {
+ n += len(v)
+ }
+
+ b := make([]byte, n)
+ bp := copy(b, s[0])
+ for _, v := range s[1:] {
+ bp += copy(b[bp:], sep)
+ bp += copy(b[bp:], v)
+ }
+ return b
+}
+
+// HasPrefix tests whether the byte slice s begins with prefix.
+func HasPrefix(s, prefix []byte) bool {
+ return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
+}
+
+// HasSuffix tests whether the byte slice s ends with suffix.
+func HasSuffix(s, suffix []byte) bool {
+ return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
+}
+
+// Map returns a copy of the byte slice s with all its characters modified
+// according to the mapping function. If mapping returns a negative value, the character is
+// dropped from the byte slice with no replacement. The characters in s and the
+// output are interpreted as UTF-8-encoded code points.
+func Map(mapping func(r rune) rune, s []byte) []byte {
+ // In the worst case, the slice can grow when mapped, making
+ // things unpleasant. But it's so rare we barge in assuming it's
+ // fine. It could also shrink but that falls out naturally.
+ maxbytes := len(s) // length of b
+ nbytes := 0 // number of bytes encoded in b
+ b := make([]byte, maxbytes)
+ for i := 0; i < len(s); {
+ wid := 1
+ r := rune(s[i])
+ if r >= utf8.RuneSelf {
+ r, wid = utf8.DecodeRune(s[i:])
+ }
+ r = mapping(r)
+ if r >= 0 {
+ rl := utf8.RuneLen(r)
+ if rl < 0 {
+ rl = len(string(utf8.RuneError))
+ }
+ if nbytes+rl > maxbytes {
+ // Grow the buffer.
+ maxbytes = maxbytes*2 + utf8.UTFMax
+ nb := make([]byte, maxbytes)
+ copy(nb, b[0:nbytes])
+ b = nb
+ }
+ nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
+ }
+ i += wid
+ }
+ return b[0:nbytes]
+}
+
+// Repeat returns a new byte slice consisting of count copies of b.
+//
+// It panics if count is negative or if
+// the result of (len(b) * count) overflows.
+func Repeat(b []byte, count int) []byte {
+ if count == 0 {
+ return []byte{}
+ }
+ // Since we cannot return an error on overflow,
+ // we should panic if the repeat will generate
+ // an overflow.
+ // See Issue golang.org/issue/16237.
+ if count < 0 {
+ panic("bytes: negative Repeat count")
+ } else if len(b)*count/count != len(b) {
+ panic("bytes: Repeat count causes overflow")
+ }
+
+ nb := make([]byte, len(b)*count)
+ bp := copy(nb, b)
+ for bp < len(nb) {
+ copy(nb[bp:], nb[:bp])
+ bp *= 2
+ }
+ return nb
+}
+
+// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
+// their upper case.
+func ToUpper(s []byte) []byte {
+ isASCII, hasLower := true, false
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= utf8.RuneSelf {
+ isASCII = false
+ break
+ }
+ hasLower = hasLower || ('a' <= c && c <= 'z')
+ }
+
+ if isASCII { // optimize for ASCII-only byte slices.
+ if !hasLower {
+ // Just return a copy.
+ return append([]byte(""), s...)
+ }
+ b := make([]byte, len(s))
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if 'a' <= c && c <= 'z' {
+ c -= 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return b
+ }
+ return Map(unicode.ToUpper, s)
+}
+
+// ToLower returns a copy of the byte slice s with all Unicode letters mapped to
+// their lower case.
+func ToLower(s []byte) []byte {
+ isASCII, hasUpper := true, false
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= utf8.RuneSelf {
+ isASCII = false
+ break
+ }
+ hasUpper = hasUpper || ('A' <= c && c <= 'Z')
+ }
+
+ if isASCII { // optimize for ASCII-only byte slices.
+ if !hasUpper {
+ return append([]byte(""), s...)
+ }
+ b := make([]byte, len(s))
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return b
+ }
+ return Map(unicode.ToLower, s)
+}
+
+// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
+func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
+
+// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
+// upper case, giving priority to the special casing rules.
+func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
+ return Map(c.ToUpper, s)
+}
+
+// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
+// lower case, giving priority to the special casing rules.
+func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
+ return Map(c.ToLower, s)
+}
+
+// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
+// title case, giving priority to the special casing rules.
+func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
+ return Map(c.ToTitle, s)
+}
+
+// ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
+// representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
+func ToValidUTF8(s, replacement []byte) []byte {
+ b := make([]byte, 0, len(s)+len(replacement))
+ invalid := false // previous byte was from an invalid UTF-8 sequence
+ for i := 0; i < len(s); {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ i++
+ invalid = false
+ b = append(b, c)
+ continue
+ }
+ _, wid := utf8.DecodeRune(s[i:])
+ if wid == 1 {
+ i++
+ if !invalid {
+ invalid = true
+ b = append(b, replacement...)
+ }
+ continue
+ }
+ invalid = false
+ b = append(b, s[i:i+wid]...)
+ i += wid
+ }
+ return b
+}
+
+// isSeparator reports whether the rune could mark a word boundary.
+// TODO: update when package unicode captures more of the properties.
+func isSeparator(r rune) bool {
+ // ASCII alphanumerics and underscore are not separators
+ if r <= 0x7F {
+ switch {
+ case '0' <= r && r <= '9':
+ return false
+ case 'a' <= r && r <= 'z':
+ return false
+ case 'A' <= r && r <= 'Z':
+ return false
+ case r == '_':
+ return false
+ }
+ return true
+ }
+ // Letters and digits are not separators
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return false
+ }
+ // Otherwise, all we can do for now is treat spaces as separators.
+ return unicode.IsSpace(r)
+}
+
+// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
+// words mapped to their title case.
+//
+// Deprecated: The rule Title uses for word boundaries does not handle Unicode
+// punctuation properly. Use golang.org/x/text/cases instead.
+func Title(s []byte) []byte {
+ // Use a closure here to remember state.
+ // Hackish but effective. Depends on Map scanning in order and calling
+ // the closure once per rune.
+ prev := ' '
+ return Map(
+ func(r rune) rune {
+ if isSeparator(prev) {
+ prev = r
+ return unicode.ToTitle(r)
+ }
+ prev = r
+ return r
+ },
+ s)
+}
+
+// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
+// all leading UTF-8-encoded code points c that satisfy f(c).
+func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
+ i := indexFunc(s, f, false)
+ if i == -1 {
+ return nil
+ }
+ return s[i:]
+}
+
+// TrimRightFunc returns a subslice of s by slicing off all trailing
+// UTF-8-encoded code points c that satisfy f(c).
+func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
+ i := lastIndexFunc(s, f, false)
+ if i >= 0 && s[i] >= utf8.RuneSelf {
+ _, wid := utf8.DecodeRune(s[i:])
+ i += wid
+ } else {
+ i++
+ }
+ return s[0:i]
+}
+
+// TrimFunc returns a subslice of s by slicing off all leading and trailing
+// UTF-8-encoded code points c that satisfy f(c).
+func TrimFunc(s []byte, f func(r rune) bool) []byte {
+ return TrimRightFunc(TrimLeftFunc(s, f), f)
+}
+
+// TrimPrefix returns s without the provided leading prefix string.
+// If s doesn't start with prefix, s is returned unchanged.
+func TrimPrefix(s, prefix []byte) []byte {
+ if HasPrefix(s, prefix) {
+ return s[len(prefix):]
+ }
+ return s
+}
+
+// TrimSuffix returns s without the provided trailing suffix string.
+// If s doesn't end with suffix, s is returned unchanged.
+func TrimSuffix(s, suffix []byte) []byte {
+ if HasSuffix(s, suffix) {
+ return s[:len(s)-len(suffix)]
+ }
+ return s
+}
+
+// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
+// It returns the byte index in s of the first Unicode
+// code point satisfying f(c), or -1 if none do.
+func IndexFunc(s []byte, f func(r rune) bool) int {
+ return indexFunc(s, f, true)
+}
+
+// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
+// It returns the byte index in s of the last Unicode
+// code point satisfying f(c), or -1 if none do.
+func LastIndexFunc(s []byte, f func(r rune) bool) int {
+ return lastIndexFunc(s, f, true)
+}
+
+// indexFunc is the same as IndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
+ start := 0
+ for start < len(s) {
+ wid := 1
+ r := rune(s[start])
+ if r >= utf8.RuneSelf {
+ r, wid = utf8.DecodeRune(s[start:])
+ }
+ if f(r) == truth {
+ return start
+ }
+ start += wid
+ }
+ return -1
+}
+
+// lastIndexFunc is the same as LastIndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
+ for i := len(s); i > 0; {
+ r, size := rune(s[i-1]), 1
+ if r >= utf8.RuneSelf {
+ r, size = utf8.DecodeLastRune(s[0:i])
+ }
+ i -= size
+ if f(r) == truth {
+ return i
+ }
+ }
+ return -1
+}
+
+// asciiSet is a 32-byte value, where each bit represents the presence of a
+// given ASCII character in the set. The 128-bits of the lower 16 bytes,
+// starting with the least-significant bit of the lowest word to the
+// most-significant bit of the highest word, map to the full range of all
+// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
+// ensuring that any non-ASCII character will be reported as not in the set.
+// This allocates a total of 32 bytes even though the upper half
+// is unused to avoid bounds checks in asciiSet.contains.
+type asciiSet [8]uint32
+
+// makeASCIISet creates a set of ASCII characters and reports whether all
+// characters in chars are ASCII.
+func makeASCIISet(chars string) (as asciiSet, ok bool) {
+ for i := 0; i < len(chars); i++ {
+ c := chars[i]
+ if c >= utf8.RuneSelf {
+ return as, false
+ }
+ as[c/32] |= 1 << (c % 32)
+ }
+ return as, true
+}
+
+// contains reports whether c is inside the set.
+func (as *asciiSet) contains(c byte) bool {
+ return (as[c/32] & (1 << (c % 32))) != 0
+}
+
+// containsRune is a simplified version of strings.ContainsRune
+// to avoid importing the strings package.
+// We avoid bytes.ContainsRune to avoid allocating a temporary copy of s.
+func containsRune(s string, r rune) bool {
+ for _, c := range s {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+// Trim returns a subslice of s by slicing off all leading and
+// trailing UTF-8-encoded code points contained in cutset.
+func Trim(s []byte, cutset string) []byte {
+ if len(s) == 0 {
+ // This is what we've historically done.
+ return nil
+ }
+ if cutset == "" {
+ return s
+ }
+ if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+ return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
+ }
+ if as, ok := makeASCIISet(cutset); ok {
+ return trimLeftASCII(trimRightASCII(s, &as), &as)
+ }
+ return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
+}
+
+// TrimLeft returns a subslice of s by slicing off all leading
+// UTF-8-encoded code points contained in cutset.
+func TrimLeft(s []byte, cutset string) []byte {
+ if len(s) == 0 {
+ // This is what we've historically done.
+ return nil
+ }
+ if cutset == "" {
+ return s
+ }
+ if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+ return trimLeftByte(s, cutset[0])
+ }
+ if as, ok := makeASCIISet(cutset); ok {
+ return trimLeftASCII(s, &as)
+ }
+ return trimLeftUnicode(s, cutset)
+}
+
+func trimLeftByte(s []byte, c byte) []byte {
+ for len(s) > 0 && s[0] == c {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ // This is what we've historically done.
+ return nil
+ }
+ return s
+}
+
+func trimLeftASCII(s []byte, as *asciiSet) []byte {
+ for len(s) > 0 {
+ if !as.contains(s[0]) {
+ break
+ }
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ // This is what we've historically done.
+ return nil
+ }
+ return s
+}
+
+func trimLeftUnicode(s []byte, cutset string) []byte {
+ for len(s) > 0 {
+ r, n := rune(s[0]), 1
+ if r >= utf8.RuneSelf {
+ r, n = utf8.DecodeRune(s)
+ }
+ if !containsRune(cutset, r) {
+ break
+ }
+ s = s[n:]
+ }
+ if len(s) == 0 {
+ // This is what we've historically done.
+ return nil
+ }
+ return s
+}
+
+// TrimRight returns a subslice of s by slicing off all trailing
+// UTF-8-encoded code points that are contained in cutset.
+func TrimRight(s []byte, cutset string) []byte {
+ if len(s) == 0 || cutset == "" {
+ return s
+ }
+ if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+ return trimRightByte(s, cutset[0])
+ }
+ if as, ok := makeASCIISet(cutset); ok {
+ return trimRightASCII(s, &as)
+ }
+ return trimRightUnicode(s, cutset)
+}
+
+func trimRightByte(s []byte, c byte) []byte {
+ for len(s) > 0 && s[len(s)-1] == c {
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func trimRightASCII(s []byte, as *asciiSet) []byte {
+ for len(s) > 0 {
+ if !as.contains(s[len(s)-1]) {
+ break
+ }
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func trimRightUnicode(s []byte, cutset string) []byte {
+ for len(s) > 0 {
+ r, n := rune(s[len(s)-1]), 1
+ if r >= utf8.RuneSelf {
+ r, n = utf8.DecodeLastRune(s)
+ }
+ if !containsRune(cutset, r) {
+ break
+ }
+ s = s[:len(s)-n]
+ }
+ return s
+}
+
+// TrimSpace returns a subslice of s by slicing off all leading and
+// trailing white space, as defined by Unicode.
+func TrimSpace(s []byte) []byte {
+ // Fast path for ASCII: look for the first ASCII non-space byte
+ start := 0
+ for ; start < len(s); start++ {
+ c := s[start]
+ if c >= utf8.RuneSelf {
+ // If we run into a non-ASCII byte, fall back to the
+ // slower unicode-aware method on the remaining bytes
+ return TrimFunc(s[start:], unicode.IsSpace)
+ }
+ if asciiSpace[c] == 0 {
+ break
+ }
+ }
+
+ // Now look for the first ASCII non-space byte from the end
+ stop := len(s)
+ for ; stop > start; stop-- {
+ c := s[stop-1]
+ if c >= utf8.RuneSelf {
+ return TrimFunc(s[start:stop], unicode.IsSpace)
+ }
+ if asciiSpace[c] == 0 {
+ break
+ }
+ }
+
+ // At this point s[start:stop] starts and ends with an ASCII
+ // non-space bytes, so we're done. Non-ASCII cases have already
+ // been handled above.
+ if start == stop {
+ // Special case to preserve previous TrimLeftFunc behavior,
+ // returning nil instead of empty slice if all spaces.
+ return nil
+ }
+ return s[start:stop]
+}
+
+// Runes interprets s as a sequence of UTF-8-encoded code points.
+// It returns a slice of runes (Unicode code points) equivalent to s.
+func Runes(s []byte) []rune {
+ t := make([]rune, utf8.RuneCount(s))
+ i := 0
+ for len(s) > 0 {
+ r, l := utf8.DecodeRune(s)
+ t[i] = r
+ i++
+ s = s[l:]
+ }
+ return t
+}
+
+// Replace returns a copy of the slice s with the first n
+// non-overlapping instances of old replaced by new.
+// If old is empty, it matches at the beginning of the slice
+// and after each UTF-8 sequence, yielding up to k+1 replacements
+// for a k-rune slice.
+// If n < 0, there is no limit on the number of replacements.
+func Replace(s, old, new []byte, n int) []byte {
+ m := 0
+ if n != 0 {
+ // Compute number of replacements.
+ m = Count(s, old)
+ }
+ if m == 0 {
+ // Just return a copy.
+ return append([]byte(nil), s...)
+ }
+ if n < 0 || m < n {
+ n = m
+ }
+
+ // Apply replacements to buffer.
+ t := make([]byte, len(s)+n*(len(new)-len(old)))
+ w := 0
+ start := 0
+ for i := 0; i < n; i++ {
+ j := start
+ if len(old) == 0 {
+ if i > 0 {
+ _, wid := utf8.DecodeRune(s[start:])
+ j += wid
+ }
+ } else {
+ j += Index(s[start:], old)
+ }
+ w += copy(t[w:], s[start:j])
+ w += copy(t[w:], new)
+ start = j + len(old)
+ }
+ w += copy(t[w:], s[start:])
+ return t[0:w]
+}
+
+// ReplaceAll returns a copy of the slice s with all
+// non-overlapping instances of old replaced by new.
+// If old is empty, it matches at the beginning of the slice
+// and after each UTF-8 sequence, yielding up to k+1 replacements
+// for a k-rune slice.
+func ReplaceAll(s, old, new []byte) []byte {
+ return Replace(s, old, new, -1)
+}
+
+// EqualFold reports whether s and t, interpreted as UTF-8 strings,
+// are equal under simple Unicode case-folding, which is a more general
+// form of case-insensitivity.
+func EqualFold(s, t []byte) bool {
+ for len(s) != 0 && len(t) != 0 {
+ // Extract first rune from each.
+ var sr, tr rune
+ if s[0] < utf8.RuneSelf {
+ sr, s = rune(s[0]), s[1:]
+ } else {
+ r, size := utf8.DecodeRune(s)
+ sr, s = r, s[size:]
+ }
+ if t[0] < utf8.RuneSelf {
+ tr, t = rune(t[0]), t[1:]
+ } else {
+ r, size := utf8.DecodeRune(t)
+ tr, t = r, t[size:]
+ }
+
+ // If they match, keep going; if not, return false.
+
+ // Easy case.
+ if tr == sr {
+ continue
+ }
+
+ // Make sr < tr to simplify what follows.
+ if tr < sr {
+ tr, sr = sr, tr
+ }
+ // Fast check for ASCII.
+ if tr < utf8.RuneSelf {
+ // ASCII only, sr/tr must be upper/lower case
+ if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
+ continue
+ }
+ return false
+ }
+
+ // General case. SimpleFold(x) returns the next equivalent rune > x
+ // or wraps around to smaller values.
+ r := unicode.SimpleFold(sr)
+ for r != sr && r < tr {
+ r = unicode.SimpleFold(r)
+ }
+ if r == tr {
+ continue
+ }
+ return false
+ }
+
+ // One string is empty. Are both?
+ return len(s) == len(t)
+}
+
+// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
+func Index(s, sep []byte) int {
+ n := len(sep)
+ switch {
+ case n == 0:
+ return 0
+ case n == 1:
+ return IndexByte(s, sep[0])
+ case n == len(s):
+ if Equal(sep, s) {
+ return 0
+ }
+ return -1
+ case n > len(s):
+ return -1
+ case n <= bytealg.MaxLen:
+ // Use brute force when s and sep both are small
+ if len(s) <= bytealg.MaxBruteForce {
+ return bytealg.Index(s, sep)
+ }
+ c0 := sep[0]
+ c1 := sep[1]
+ i := 0
+ t := len(s) - n + 1
+ fails := 0
+ for i < t {
+ if s[i] != c0 {
+ // IndexByte is faster than bytealg.Index, so use it as long as
+ // we're not getting lots of false positives.
+ o := IndexByte(s[i+1:t], c0)
+ if o < 0 {
+ return -1
+ }
+ i += o + 1
+ }
+ if s[i+1] == c1 && Equal(s[i:i+n], sep) {
+ return i
+ }
+ fails++
+ i++
+ // Switch to bytealg.Index when IndexByte produces too many false positives.
+ if fails > bytealg.Cutover(i) {
+ r := bytealg.Index(s[i:], sep)
+ if r >= 0 {
+ return r + i
+ }
+ return -1
+ }
+ }
+ return -1
+ }
+ c0 := sep[0]
+ c1 := sep[1]
+ i := 0
+ fails := 0
+ t := len(s) - n + 1
+ for i < t {
+ if s[i] != c0 {
+ o := IndexByte(s[i+1:t], c0)
+ if o < 0 {
+ break
+ }
+ i += o + 1
+ }
+ if s[i+1] == c1 && Equal(s[i:i+n], sep) {
+ return i
+ }
+ i++
+ fails++
+ if fails >= 4+i>>4 && i < t {
+ // Give up on IndexByte, it isn't skipping ahead
+ // far enough to be better than Rabin-Karp.
+ // Experiments (using IndexPeriodic) suggest
+ // the cutover is about 16 byte skips.
+ // TODO: if large prefixes of sep are matching
+ // we should cutover at even larger average skips,
+ // because Equal becomes that much more expensive.
+ // This code does not take that effect into account.
+ j := bytealg.IndexRabinKarpBytes(s[i:], sep)
+ if j < 0 {
+ return -1
+ }
+ return i + j
+ }
+ }
+ return -1
+}
+
+// Cut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, nil, false.
+//
+// Cut returns slices of the original slice s, not copies.
+func Cut(s, sep []byte) (before, after []byte, found bool) {
+ if i := Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, nil, false
+}
diff --git a/contrib/go/_std_1.19/src/bytes/reader.go b/contrib/go/_std_1.19/src/bytes/reader.go
new file mode 100644
index 0000000000..81c22aa029
--- /dev/null
+++ b/contrib/go/_std_1.19/src/bytes/reader.go
@@ -0,0 +1,159 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+import (
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+// A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
+// io.ByteScanner, and io.RuneScanner interfaces by reading from
+// a byte slice.
+// Unlike a Buffer, a Reader is read-only and supports seeking.
+// The zero value for Reader operates like a Reader of an empty slice.
+type Reader struct {
+ s []byte
+ i int64 // current reading index
+ prevRune int // index of previous rune; or < 0
+}
+
+// Len returns the number of bytes of the unread portion of the
+// slice.
+func (r *Reader) Len() int {
+ if r.i >= int64(len(r.s)) {
+ return 0
+ }
+ return int(int64(len(r.s)) - r.i)
+}
+
+// Size returns the original length of the underlying byte slice.
+// Size is the number of bytes available for reading via ReadAt.
+// The result is unaffected by any method calls except Reset.
+func (r *Reader) Size() int64 { return int64(len(r.s)) }
+
+// Read implements the io.Reader interface.
+func (r *Reader) Read(b []byte) (n int, err error) {
+ if r.i >= int64(len(r.s)) {
+ return 0, io.EOF
+ }
+ r.prevRune = -1
+ n = copy(b, r.s[r.i:])
+ r.i += int64(n)
+ return
+}
+
+// ReadAt implements the io.ReaderAt interface.
+func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
+ // cannot modify state - see io.ReaderAt
+ if off < 0 {
+ return 0, errors.New("bytes.Reader.ReadAt: negative offset")
+ }
+ if off >= int64(len(r.s)) {
+ return 0, io.EOF
+ }
+ n = copy(b, r.s[off:])
+ if n < len(b) {
+ err = io.EOF
+ }
+ return
+}
+
+// ReadByte implements the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+ r.prevRune = -1
+ if r.i >= int64(len(r.s)) {
+ return 0, io.EOF
+ }
+ b := r.s[r.i]
+ r.i++
+ return b, nil
+}
+
+// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
+func (r *Reader) UnreadByte() error {
+ if r.i <= 0 {
+ return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
+ }
+ r.prevRune = -1
+ r.i--
+ return nil
+}
+
+// ReadRune implements the io.RuneReader interface.
+func (r *Reader) ReadRune() (ch rune, size int, err error) {
+ if r.i >= int64(len(r.s)) {
+ r.prevRune = -1
+ return 0, 0, io.EOF
+ }
+ r.prevRune = int(r.i)
+ if c := r.s[r.i]; c < utf8.RuneSelf {
+ r.i++
+ return rune(c), 1, nil
+ }
+ ch, size = utf8.DecodeRune(r.s[r.i:])
+ r.i += int64(size)
+ return
+}
+
+// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
+func (r *Reader) UnreadRune() error {
+ if r.i <= 0 {
+ return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
+ }
+ if r.prevRune < 0 {
+ return errors.New("bytes.Reader.UnreadRune: previous operation was not ReadRune")
+ }
+ r.i = int64(r.prevRune)
+ r.prevRune = -1
+ return nil
+}
+
+// Seek implements the io.Seeker interface.
+func (r *Reader) Seek(offset int64, whence int) (int64, error) {
+ r.prevRune = -1
+ var abs int64
+ switch whence {
+ case io.SeekStart:
+ abs = offset
+ case io.SeekCurrent:
+ abs = r.i + offset
+ case io.SeekEnd:
+ abs = int64(len(r.s)) + offset
+ default:
+ return 0, errors.New("bytes.Reader.Seek: invalid whence")
+ }
+ if abs < 0 {
+ return 0, errors.New("bytes.Reader.Seek: negative position")
+ }
+ r.i = abs
+ return abs, nil
+}
+
+// WriteTo implements the io.WriterTo interface.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ r.prevRune = -1
+ if r.i >= int64(len(r.s)) {
+ return 0, nil
+ }
+ b := r.s[r.i:]
+ m, err := w.Write(b)
+ if m > len(b) {
+ panic("bytes.Reader.WriteTo: invalid Write count")
+ }
+ r.i += int64(m)
+ n = int64(m)
+ if m != len(b) && err == nil {
+ err = io.ErrShortWrite
+ }
+ return
+}
+
+// Reset resets the Reader to be reading from b.
+func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
+
+// NewReader returns a new Reader reading from b.
+func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }
diff --git a/contrib/go/_std_1.19/src/compress/flate/deflate.go b/contrib/go/_std_1.19/src/compress/flate/deflate.go
new file mode 100644
index 0000000000..93efd7cafb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/compress/flate/deflate.go
@@ -0,0 +1,746 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+)
+
+const (
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+
+ // The LZ77 step produces a sequence of literal tokens and <length, offset>
+ // pair tokens. The offset is also known as distance. The underlying wire
+ // format limits the range of lengths and offsets. For example, there are
+ // 256 legitimate lengths: those in the range [3, 258]. This package's
+ // compressor uses a higher minimum match length, enabling optimizations
+ // such as finding matches via 32-bit loads and compares.
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ minMatchLength = 4 // The smallest match length that the compressor actually emits
+ maxMatchLength = 258 // The largest match length
+ baseMatchOffset = 1 // The smallest match offset
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ // The maximum number of tokens we put into a single flate block, just to
+ // stop things from getting too large.
+ maxFlateBlockTokens = 1 << 14
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ maxHashOffset = 1 << 24
+
+ skipNever = math.MaxInt32
+)
+
+type compressionLevel struct {
+ level, good, lazy, nice, chain, fastSkipHashing int
+}
+
+var levels = []compressionLevel{
+ {0, 0, 0, 0, 0, 0}, // NoCompression.
+ {1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go.
+ // For levels 2-3 we don't bother trying with lazy matches.
+ {2, 4, 0, 16, 8, 5},
+ {3, 4, 0, 32, 32, 6},
+ // Levels 4-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {4, 4, 4, 16, 16, skipNever},
+ {5, 8, 16, 32, 32, skipNever},
+ {6, 8, 16, 128, 128, skipNever},
+ {7, 8, 32, 128, 256, skipNever},
+ {8, 32, 128, 258, 1024, skipNever},
+ {9, 32, 258, 258, 4096, skipNever},
+}
+
+type compressor struct {
+ compressionLevel
+
+ w *huffmanBitWriter
+ bulkHasher func([]byte, []uint32)
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+ sync bool // requesting flush
+ bestSpeed *deflateFast // Encoder for BestSpeed
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ chainHead int
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+ hashOffset int
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ byteAvailable bool // if true, still need to process window[index-1].
+
+ // queued output tokens
+ tokens []token
+
+ // deflate state
+ length int
+ offset int
+ maxInsertIndex int
+ err error
+
+ // hashMatch must be able to contain hashes for the maximum match length.
+ hashMatch [maxMatchLength - 1]uint32
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ copy(d.window, d.window[windowSize:2*windowSize])
+ d.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ d.hashOffset += windowSize
+ if d.hashOffset > maxHashOffset {
+ delta := d.hashOffset - 1
+ d.hashOffset -= delta
+ d.chainHead -= delta
+
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range d.hashPrev[:] {
+ if int(v) > delta {
+ d.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ d.hashPrev[i] = 0
+ }
+ }
+ for i, v := range d.hashHead[:] {
+ if int(v) > delta {
+ d.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ d.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tokens []token, index int) error {
+ if index > 0 {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ d.w.writeBlock(tokens, false, window)
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only mode.
+ if d.compressionLevel.level < 2 {
+ return
+ }
+ if d.index != 0 || d.windowEnd != 0 {
+ panic("internal error: fillWindow called with stale data")
+ }
+
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window, b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ index := j * 256
+ end := index + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ toCheck := d.window[index:end]
+ dstSize := len(toCheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := d.hashMatch[:dstSize]
+ d.bulkHasher(toCheck, dst)
+ for i, val := range dst {
+ di := i + index
+ hh := &d.hashHead[val&hashMask]
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ d.hashPrev[di&windowMask] = *hh
+ // Set the head of the hash chain to us.
+ *hh = uint32(di + d.hashOffset)
+ }
+ }
+ // Update window information.
+ d.windowEnd = n
+ d.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = prevLength
+ if length >= d.good {
+ tries >>= 2
+ }
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:], wPos, minMatchLook)
+
+ if n > length && (n > minMatchLength || pos-i <= 4096) {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i == minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+ if i < minIndex || i < 0 {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+const hashmul = 0x1e35a7bd
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < minMatchLength {
+ return
+ }
+ hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ dst[0] = (hb * hashmul) >> (32 - hashBits)
+ end := len(b) - minMatchLength + 1
+ for i := 1; i < end; i++ {
+ hb = (hb << 8) | uint32(b[i+3])
+ dst[i] = (hb * hashmul) >> (32 - hashBits)
+ }
+}
+
+// matchLen returns the number of matching bytes in a and b
+// up to length 'max'. Both slices must be at least 'max'
+// bytes in size.
+func matchLen(a, b []byte, max int) int {
+ a = a[:max]
+ b = b[:len(a)]
+ for i, av := range a {
+ if b[i] != av {
+ return i
+ }
+ }
+ return max
+}
+
+// encSpeed will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) encSpeed() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < maxStoreBlockSize {
+ if !d.sync {
+ return
+ }
+
+ // Handle small sizes.
+ if d.windowEnd < 128 {
+ switch {
+ case d.windowEnd == 0:
+ return
+ case d.windowEnd <= 16:
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ default:
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.err = d.w.err
+ }
+ d.windowEnd = 0
+ d.bestSpeed.reset()
+ return
+ }
+
+ }
+ // Encode the block.
+ d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd])
+
+ // If we removed less than 1/16th, Huffman compress the block.
+ if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ } else {
+ d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
+ }
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.hashOffset = 1
+ d.tokens = make([]token, 0, maxFlateBlockTokens+1)
+ d.length = minMatchLength - 1
+ d.offset = 0
+ d.byteAvailable = false
+ d.index = 0
+ d.chainHead = -1
+ d.bulkHasher = bulkHash4
+}
+
+func (d *compressor) deflate() {
+ if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+
+Loop:
+ for {
+ if d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - d.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ break Loop
+ }
+ if d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1])))
+ d.byteAvailable = false
+ }
+ if len(d.tokens) > 0 {
+ if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
+ return
+ }
+ d.tokens = d.tokens[:0]
+ }
+ break Loop
+ }
+ }
+ if d.index < d.maxInsertIndex {
+ // Update the hash
+ hash := hash4(d.window[d.index : d.index+minMatchLength])
+ hh := &d.hashHead[hash&hashMask]
+ d.chainHead = int(*hh)
+ d.hashPrev[d.index&windowMask] = uint32(d.chainHead)
+ *hh = uint32(d.index + d.hashOffset)
+ }
+ prevLength := d.length
+ prevOffset := d.offset
+ d.length = minMatchLength - 1
+ d.offset = 0
+ minIndex := d.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if d.chainHead-d.hashOffset >= minIndex &&
+ (d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
+ d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) {
+ if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+ d.length = newLength
+ d.offset = newOffset
+ }
+ }
+ if d.fastSkipHashing != skipNever && d.length >= minMatchLength ||
+ d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength {
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ if d.fastSkipHashing != skipNever {
+ d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset)))
+ } else {
+ d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset)))
+ }
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ if d.length <= d.fastSkipHashing {
+ var newIndex int
+ if d.fastSkipHashing != skipNever {
+ newIndex = d.index + d.length
+ } else {
+ newIndex = d.index + prevLength - 1
+ }
+ index := d.index
+ for index++; index < newIndex; index++ {
+ if index < d.maxInsertIndex {
+ hash := hash4(d.window[index : index+minMatchLength])
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ hh := &d.hashHead[hash&hashMask]
+ d.hashPrev[index&windowMask] = *hh
+ // Set the head of the hash chain to us.
+ *hh = uint32(index + d.hashOffset)
+ }
+ }
+ d.index = index
+
+ if d.fastSkipHashing == skipNever {
+ d.byteAvailable = false
+ d.length = minMatchLength - 1
+ }
+ } else {
+ // For matches this long, we don't bother inserting each individual
+ // item into the table.
+ d.index += d.length
+ }
+ if len(d.tokens) == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
+ return
+ }
+ d.tokens = d.tokens[:0]
+ }
+ } else {
+ if d.fastSkipHashing != skipNever || d.byteAvailable {
+ i := d.index - 1
+ if d.fastSkipHashing != skipNever {
+ i = d.index
+ }
+ d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
+ if len(d.tokens) == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, i+1); d.err != nil {
+ return
+ }
+ d.tokens = d.tokens[:0]
+ }
+ }
+ d.index++
+ if d.fastSkipHashing == skipNever {
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) fillStore(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// storeHuff compresses and stores the currently added data
+// when the d.window is full or we are at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ d.step(d)
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, nil
+}
+
+func (d *compressor) syncFlush() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillStore
+ d.step = (*compressor).store
+ case level == HuffmanOnly:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillStore
+ d.step = (*compressor).storeHuff
+ case level == BestSpeed:
+ d.compressionLevel = levels[level]
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillStore
+ d.step = (*compressor).encSpeed
+ d.bestSpeed = newDeflateFast()
+ d.tokens = make([]token, maxStoreBlockSize)
+ case level == DefaultCompression:
+ level = 6
+ fallthrough
+ case 2 <= level && level <= 9:
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ d.step = (*compressor).deflate
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ return nil
+}
+
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ switch d.compressionLevel.level {
+ case NoCompression:
+ d.windowEnd = 0
+ case BestSpeed:
+ d.windowEnd = 0
+ d.tokens = d.tokens[:0]
+ d.bestSpeed.reset()
+ default:
+ d.chainHead = -1
+ for i := range d.hashHead {
+ d.hashHead[i] = 0
+ }
+ for i := range d.hashPrev {
+ d.hashPrev[i] = 0
+ }
+ d.hashOffset = 1
+ d.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens = d.tokens[:0]
+ d.length = minMatchLength - 1
+ d.offset = 0
+ d.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err == errWriterClosed {
+ return nil
+ }
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ if d.w.err != nil {
+ return d.w.err
+ }
+ d.err = errWriterClosed
+ return nil
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more. Level 0
+// (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (HuffmanOnly) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ dw := &dictWriter{w}
+ zw, err := NewWriter(dw, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+type dictWriter struct {
+ w io.Writer
+}
+
+func (w *dictWriter) Write(b []byte) (n int, err error) {
+ return w.w.Write(b)
+}
+
+var errWriterClosed = errors.New("flate: closed writer")
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // https://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if dw, ok := w.d.w.writer.(*dictWriter); ok {
+ // w was created with NewWriterDict
+ dw.w = dst
+ w.d.reset(dw)
+ w.d.fillWindow(w.dict)
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
diff --git a/contrib/go/_std_1.18/src/compress/flate/deflatefast.go b/contrib/go/_std_1.19/src/compress/flate/deflatefast.go
index 6aa439f13d..6aa439f13d 100644
--- a/contrib/go/_std_1.18/src/compress/flate/deflatefast.go
+++ b/contrib/go/_std_1.19/src/compress/flate/deflatefast.go
diff --git a/contrib/go/_std_1.19/src/compress/flate/dict_decoder.go b/contrib/go/_std_1.19/src/compress/flate/dict_decoder.go
new file mode 100644
index 0000000000..d2c19040f5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/compress/flate/dict_decoder.go
@@ -0,0 +1,182 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/contrib/go/_std_1.19/src/compress/flate/huffman_bit_writer.go b/contrib/go/_std_1.19/src/compress/flate/huffman_bit_writer.go
new file mode 100644
index 0000000000..6a4e48e9ee
--- /dev/null
+++ b/contrib/go/_std_1.19/src/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,704 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "io"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 240
+
+ // bufferSize is the actual output byte buffer size.
+ // It must have additional headroom for a flush
+ // which can contain up to 8 bytes.
+ bufferSize = bufferFlushSize + 8
+)
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = []int8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = []uint32{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// offset code word extra bits.
+var offsetExtraBits = []int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+}
+
+var offsetBase = []uint32{
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits. Data is always written
+ // sequentially into the bytes array.
+ bits uint64
+ nbits uint
+ bytes [bufferSize]byte
+ codegenFreq [codegenCodeCount]int32
+ nbytes int
+ literalFreq []int32
+ offsetFreq []int32
+ codegen []uint8
+ literalEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+}
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalFreq: make([]int32, maxNumLit),
+ offsetFreq: make([]int32, offsetCodeCount),
+ codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
+ literalEncoding: newHuffmanEncoder(maxNumLit),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
+ if w.err != nil {
+ return
+ }
+ w.bits |= uint64(b) << w.nbits
+ w.nbits += nb
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ bytes := w.bytes[n : n+6]
+ bytes[0] = byte(bits)
+ bytes[1] = byte(bits >> 8)
+ bytes[2] = byte(bits >> 16)
+ bytes[3] = byte(bits >> 24)
+ bytes[4] = byte(bits >> 32)
+ bytes[5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = uint8(litEnc.codes[i].len)
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = uint8(offEnc.codes[i].len)
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7
+ size = header +
+ litEnc.bitLength(w.literalFreq) +
+ offEnc.bitLength(w.offsetFreq) +
+ extraBits
+
+ return size, numCodegens
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ if w.err != nil {
+ return
+ }
+ w.bits |= uint64(c.code) << w.nbits
+ w.nbits += uint(c.len)
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ bytes := w.bytes[n : n+6]
+ bytes[0] = byte(bits)
+ bytes[1] = byte(bits >> 8)
+ bytes[2] = byte(bits >> 16)
+ bytes[3] = byte(bits >> 24)
+ bytes[4] = byte(bits >> 32)
+ bytes[5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+ }
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord int = int(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ break
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ break
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ break
+ }
+ }
+}
+
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens = append(tokens, endBlockMarker)
+ numLiterals, numOffsets := w.indexTokens(tokens)
+
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ // We only bother calculating the costs of the extra bits required by
+ // the length of offset fields (which will be the same for both fixed
+ // and dynamic encoding), if we need to compare those two encodings
+ // against stored encoding.
+ for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
+ // First eight length codes have extra size = 0.
+ extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
+ }
+ for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
+ // First four offset codes have extra size = 0.
+ extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
+ }
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = w.fixedSize(extraBits)
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize < size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens = append(tokens, endBlockMarker)
+ numLiterals, numOffsets := w.indexTokens(tokens)
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+
+ // Write the tokens.
+ w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
+ for i := range w.literalFreq {
+ w.literalFreq[i] = 0
+ }
+ for i := range w.offsetFreq {
+ w.offsetFreq[i] = 0
+ }
+
+ for _, t := range tokens {
+ if t < matchType {
+ w.literalFreq[t.literal()]++
+ continue
+ }
+ length := t.length()
+ offset := t.offset()
+ w.literalFreq[lengthCodesStart+lengthCode(length)]++
+ w.offsetFreq[offsetCode(offset)]++
+ }
+
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ w.literalEncoding.generate(w.literalFreq, 15)
+ w.offsetEncoding.generate(w.offsetFreq, 15)
+ return
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ for _, t := range tokens {
+ if t < matchType {
+ w.writeCode(leCodes[t.literal()])
+ continue
+ }
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length)
+ w.writeCode(leCodes[lengthCode+lengthCodesStart])
+ extraLengthBits := uint(lengthExtraBits[lengthCode])
+ if extraLengthBits > 0 {
+ extraLength := int32(length - lengthBase[lengthCode])
+ w.writeBits(extraLength, extraLengthBits)
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := offsetCode(offset)
+ w.writeCode(oeCodes[offsetCode])
+ extraOffsetBits := uint(offsetExtraBits[offsetCode])
+ if extraOffsetBits > 0 {
+ extraOffset := int32(offset - offsetBase[offsetCode])
+ w.writeBits(extraOffset, extraOffsetBits)
+ }
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ offsetFreq := make([]int32, offsetCodeCount)
+ offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(offsetFreq, 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq {
+ w.literalFreq[i] = 0
+ }
+
+ // Add everything as literals
+ histogram(input, w.literalFreq)
+
+ w.literalFreq[endBlockMarker] = 1
+
+ const numLiterals = endBlockMarker + 1
+ w.offsetFreq[0] = 1
+ const numOffsets = 1
+
+ w.literalEncoding.generate(w.literalFreq, 15)
+
+ // Figure out smallest code.
+ // Always use dynamic Huffman or Store
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ encoding := w.literalEncoding.codes[:257]
+ n := w.nbytes
+ for _, t := range input {
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ w.bits |= uint64(c.code) << w.nbits
+ w.nbits += uint(c.len)
+ if w.nbits < 48 {
+ continue
+ }
+ // Store 6 bytes
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ bytes := w.bytes[n : n+6]
+ bytes[0] = byte(bits)
+ bytes[1] = byte(bits >> 8)
+ bytes[2] = byte(bits >> 16)
+ bytes[3] = byte(bits >> 24)
+ bytes[4] = byte(bits >> 32)
+ bytes[5] = byte(bits >> 40)
+ n += 6
+ if n < bufferFlushSize {
+ continue
+ }
+ w.write(w.bytes[:n])
+ if w.err != nil {
+ return // Return early in the event of write failures
+ }
+ n = 0
+ }
+ w.nbytes = n
+ w.writeCode(encoding[endBlockMarker])
+}
+
+// histogram accumulates a histogram of b in h.
+//
+// len(h) must be >= 256, and h's elements must be all zeroes.
+func histogram(b []byte, h []int32) {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+}
diff --git a/contrib/go/_std_1.19/src/compress/flate/huffman_code.go b/contrib/go/_std_1.19/src/compress/flate/huffman_code.go
new file mode 100644
index 0000000000..a3fe80b442
--- /dev/null
+++ b/contrib/go/_std_1.19/src/compress/flate/huffman_code.go
@@ -0,0 +1,348 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "math/bits"
+ "sort"
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode struct {
+ code, len uint16
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ freqcache []literalNode
+ bitCount [17]int32
+ lns byLiteral // stored to avoid repeated allocation in generate
+ lfs byFreq // stored to avoid repeated allocation in generate
+}
+
+type literalNode struct {
+ literal uint16
+ freq int32
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint16) {
+ h.len = length
+ h.code = code
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ return &huffmanEncoder{codes: make([]hcode, size)}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(maxNumLit)
+ codes := h.codes
+ var ch uint16
+ for ch = 0; ch < maxNumLit; ch++ {
+ var bits uint16
+ var size uint16
+ switch {
+ case ch < 144:
+ // size 8, 000110000 .. 10111111
+ bits = ch + 48
+ size = 8
+ break
+ case ch < 256:
+ // size 9, 110010000 .. 111111111
+ bits = ch + 400 - 144
+ size = 9
+ break
+ case ch < 280:
+ // size 7, 0000000 .. 0010111
+ bits = ch - 256
+ size = 7
+ break
+ default:
+ // size 8, 11000000 .. 11000111
+ bits = ch + 192 - 280
+ size = 8
+ }
+ codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
+ }
+ return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(30)
+ codes := h.codes
+ for ch := range codes {
+ codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
+ }
+ return h
+}
+
+var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
+var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []int32) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ total += int(f) * int(h.codes[i].len)
+ }
+ }
+ return total
+}
+
+const maxBitsLimit = 16
+
+// bitCounts computes the number of literals assigned to each bit size in the Huffman encoding.
+// It is only called when list.length >= 3.
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list is an array of the literals with non-zero frequencies
+// and their associated frequencies. The array is in order of increasing
+// frequency and has as its last element a special element with frequency
+// MaxInt32.
+//
+// maxBits is the maximum number of bits that should be used to encode any literal.
+// It must be less than 16.
+//
+// bitCounts returns an integer slice in which slice[i] indicates the number of literals
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: list[1].freq,
+ nextCharFreq: list[2].freq,
+ nextPairFreq: list[0].freq + list[1].freq,
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := maxBits
+ for {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ l.nextCharFreq = list[n].freq
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ h.lns.sort(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq is an array of frequencies, in which freq[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
+ if h.freqcache == nil {
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
+ // The largest of these is maxNumLit, so we allocate for that case.
+ h.freqcache = make([]literalNode, maxNumLit+1)
+ }
+ list := h.freqcache[:len(freq)+1]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ h.codes[i].len = 0
+ }
+ }
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ h.lfs.sort(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+type byLiteral []literalNode
+
+func (s *byLiteral) sort(a []literalNode) {
+ *s = byLiteral(a)
+ sort.Sort(s)
+}
+
+func (s byLiteral) Len() int { return len(s) }
+
+func (s byLiteral) Less(i, j int) bool {
+ return s[i].literal < s[j].literal
+}
+
+func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type byFreq []literalNode
+
+func (s *byFreq) sort(a []literalNode) {
+ *s = byFreq(a)
+ sort.Sort(s)
+}
+
+func (s byFreq) Len() int { return len(s) }
+
+func (s byFreq) Less(i, j int) bool {
+ if s[i].freq == s[j].freq {
+ return s[i].literal < s[j].literal
+ }
+ return s[i].freq < s[j].freq
+}
+
+func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << (16 - bitLength))
+}
diff --git a/contrib/go/_std_1.18/src/compress/flate/inflate.go b/contrib/go/_std_1.19/src/compress/flate/inflate.go
index 49921398e2..49921398e2 100644
--- a/contrib/go/_std_1.18/src/compress/flate/inflate.go
+++ b/contrib/go/_std_1.19/src/compress/flate/inflate.go
diff --git a/contrib/go/_std_1.18/src/compress/flate/token.go b/contrib/go/_std_1.19/src/compress/flate/token.go
index ae01391f9c..ae01391f9c 100644
--- a/contrib/go/_std_1.18/src/compress/flate/token.go
+++ b/contrib/go/_std_1.19/src/compress/flate/token.go
diff --git a/contrib/go/_std_1.19/src/compress/gzip/gunzip.go b/contrib/go/_std_1.19/src/compress/gzip/gunzip.go
new file mode 100644
index 0000000000..ba8de97e6a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/compress/gzip/gunzip.go
@@ -0,0 +1,290 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+package gzip
+
+import (
+ "bufio"
+ "compress/flate"
+ "encoding/binary"
+ "errors"
+ "hash/crc32"
+ "io"
+ "time"
+)
+
+const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ flagText = 1 << 0
+ flagHdrCrc = 1 << 1
+ flagExtra = 1 << 2
+ flagName = 1 << 3
+ flagComment = 1 << 4
+)
+
+var (
+ // ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+ ErrChecksum = errors.New("gzip: invalid checksum")
+ // ErrHeader is returned when reading GZIP data that has an invalid header.
+ ErrHeader = errors.New("gzip: invalid header")
+)
+
+var le = binary.LittleEndian
+
+// noEOF converts io.EOF to io.ErrUnexpectedEOF.
+func noEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+//
+// Strings must be UTF-8 encoded and may only contain Unicode code points
+// U+0001 through U+00FF, due to limitations of the GZIP file format.
+type Header struct {
+ Comment string // comment
+ Extra []byte // "extra data"
+ ModTime time.Time // modification time
+ Name string // file name
+ OS byte // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header. Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return an ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum. Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+ Header // valid after NewReader or Reader.Reset
+ r flate.Reader
+ decompressor io.ReadCloser
+ digest uint32 // CRC-32, IEEE polynomial (section 8)
+ size uint32 // Uncompressed size (section 2.3.1)
+ buf [512]byte
+ err error
+ multistream bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+//
+// It is the caller's responsibility to call Close on the Reader when done.
+//
+// The Reader.Header fields will be valid in the Reader returned.
+func NewReader(r io.Reader) (*Reader, error) {
+ z := new(Reader)
+ if err := z.Reset(r); err != nil {
+ return nil, err
+ }
+ return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+ *z = Reader{
+ decompressor: z.decompressor,
+ multistream: true,
+ }
+ if rr, ok := r.(flate.Reader); ok {
+ z.r = rr
+ } else {
+ z.r = bufio.NewReader(r)
+ }
+ z.Header, z.err = z.readHeader()
+ return z.err
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. The underlying reader must implement io.ByteReader
+// in order to be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+ z.multistream = ok
+}
+
+// readString reads a NUL-terminated string from z.r.
+// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
+// will output a string encoded using UTF-8.
+// This method always updates z.digest with the data read.
+func (z *Reader) readString() (string, error) {
+ var err error
+ needConv := false
+ for i := 0; ; i++ {
+ if i >= len(z.buf) {
+ return "", ErrHeader
+ }
+ z.buf[i], err = z.r.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if z.buf[i] > 0x7f {
+ needConv = true
+ }
+ if z.buf[i] == 0 {
+ // Digest covers the NUL terminator.
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
+
+ // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
+ if needConv {
+ s := make([]rune, 0, i)
+ for _, v := range z.buf[:i] {
+ s = append(s, rune(v))
+ }
+ return string(s), nil
+ }
+ return string(z.buf[:i]), nil
+ }
+ }
+}
+
+// readHeader reads the GZIP header according to section 2.3.1.
+// This method does not set z.err.
+func (z *Reader) readHeader() (hdr Header, err error) {
+ if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
+ // RFC 1952, section 2.2, says the following:
+ // A gzip file consists of a series of "members" (compressed data sets).
+ //
+ // Other than this, the specification does not clarify whether a
+ // "series" is defined as "one or more" or "zero or more". To err on the
+ // side of caution, Go interprets this to mean "zero or more".
+ // Thus, it is okay to return io.EOF here.
+ return hdr, err
+ }
+ if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+ return hdr, ErrHeader
+ }
+ flg := z.buf[3]
+ if t := int64(le.Uint32(z.buf[4:8])); t > 0 {
+ // Section 2.3.1, the zero value for MTIME means that the
+ // modified time is not set.
+ hdr.ModTime = time.Unix(t, 0)
+ }
+ // z.buf[8] is XFL and is currently ignored.
+ hdr.OS = z.buf[9]
+ z.digest = crc32.ChecksumIEEE(z.buf[:10])
+
+ if flg&flagExtra != 0 {
+ if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+ return hdr, noEOF(err)
+ }
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
+ data := make([]byte, le.Uint16(z.buf[:2]))
+ if _, err = io.ReadFull(z.r, data); err != nil {
+ return hdr, noEOF(err)
+ }
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
+ hdr.Extra = data
+ }
+
+ var s string
+ if flg&flagName != 0 {
+ if s, err = z.readString(); err != nil {
+ return hdr, noEOF(err)
+ }
+ hdr.Name = s
+ }
+
+ if flg&flagComment != 0 {
+ if s, err = z.readString(); err != nil {
+ return hdr, noEOF(err)
+ }
+ hdr.Comment = s
+ }
+
+ if flg&flagHdrCrc != 0 {
+ if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+ return hdr, noEOF(err)
+ }
+ digest := le.Uint16(z.buf[:2])
+ if digest != uint16(z.digest) {
+ return hdr, ErrHeader
+ }
+ }
+
+ z.digest = 0
+ if z.decompressor == nil {
+ z.decompressor = flate.NewReader(z.r)
+ } else {
+ z.decompressor.(flate.Resetter).Reset(z.r, nil)
+ }
+ return hdr, nil
+}
+
+// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+func (z *Reader) Read(p []byte) (n int, err error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+
+ for n == 0 {
+ n, z.err = z.decompressor.Read(p)
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+ z.size += uint32(n)
+ if z.err != io.EOF {
+ // In the normal case we return here.
+ return n, z.err
+ }
+
+ // Finished file; check checksum and size.
+ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+ z.err = noEOF(err)
+ return n, z.err
+ }
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return n, z.err
+ }
+ z.digest, z.size = 0, 0
+
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return n, io.EOF
+ }
+ z.err = nil // Remove io.EOF
+
+ if _, z.err = z.readHeader(); z.err != nil {
+ return n, z.err
+ }
+ }
+
+ return n, nil
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+// In order for the GZIP checksum to be verified, the reader must be
+// fully consumed until the io.EOF.
+func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/contrib/go/_std_1.18/src/compress/gzip/gzip.go b/contrib/go/_std_1.19/src/compress/gzip/gzip.go
index eaeb185795..eaeb185795 100644
--- a/contrib/go/_std_1.18/src/compress/gzip/gzip.go
+++ b/contrib/go/_std_1.19/src/compress/gzip/gzip.go
diff --git a/contrib/go/_std_1.19/src/container/list/list.go b/contrib/go/_std_1.19/src/container/list/list.go
new file mode 100644
index 0000000000..f2d77f0560
--- /dev/null
+++ b/contrib/go/_std_1.19/src/container/list/list.go
@@ -0,0 +1,235 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package list implements a doubly linked list.
+//
+// To iterate over a list (where l is a *List):
+//
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.Value
+// }
+package list
+
+// Element is an element of a linked list.
+type Element struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *Element
+
+ // The list to which this element belongs.
+ list *List
+
+ // The value stored with this element.
+ Value any
+}
+
+// Next returns the next list element or nil.
+func (e *Element) Next() *Element {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// Prev returns the previous list element or nil.
+func (e *Element) Prev() *Element {
+ if p := e.prev; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List struct {
+ root Element // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length excluding (this) sentinel element
+}
+
+// Init initializes or clears list l.
+func (l *List) Init() *List {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// New returns an initialized list.
+func New() *List { return new(List).Init() }
+
+// Len returns the number of elements of list l.
+// The complexity is O(1).
+func (l *List) Len() int { return l.len }
+
+// Front returns the first element of list l or nil if the list is empty.
+func (l *List) Front() *Element {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.next
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *List) Back() *Element {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.prev
+}
+
+// lazyInit lazily initializes a zero List value.
+func (l *List) lazyInit() {
+ if l.root.next == nil {
+ l.Init()
+ }
+}
+
+// insert inserts e after at, increments l.len, and returns e.
+func (l *List) insert(e, at *Element) *Element {
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+ e.list = l
+ l.len++
+ return e
+}
+
+// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
+func (l *List) insertValue(v any, at *Element) *Element {
+ return l.insert(&Element{Value: v}, at)
+}
+
+// remove removes e from its list, decrements l.len
+func (l *List) remove(e *Element) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+ e.list = nil
+ l.len--
+}
+
+// move moves e to next to at.
+func (l *List) move(e, at *Element) {
+ if e == at {
+ return
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+}
+
+// Remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *List) Remove(e *Element) any {
+ if e.list == l {
+ // if e.list == l, l must have been initialized when e was inserted
+ // in l or l == nil (e is a zero Element) and l.remove will crash
+ l.remove(e)
+ }
+ return e.Value
+}
+
+// PushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *List) PushFront(v any) *Element {
+ l.lazyInit()
+ return l.insertValue(v, &l.root)
+}
+
+// PushBack inserts a new element e with value v at the back of list l and returns e.
+func (l *List) PushBack(v any) *Element {
+ l.lazyInit()
+ return l.insertValue(v, l.root.prev)
+}
+
+// InsertBefore inserts a new element e with value v immediately before mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List) InsertBefore(v any, mark *Element) *Element {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark.prev)
+}
+
+// InsertAfter inserts a new element e with value v immediately after mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List) InsertAfter(v any, mark *Element) *Element {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List) MoveToFront(e *Element) {
+ if e.list != l || l.root.next == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, &l.root)
+}
+
+// MoveToBack moves element e to the back of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List) MoveToBack(e *Element) {
+ if e.list != l || l.root.prev == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, l.root.prev)
+}
+
+// MoveBefore moves element e to its new position before mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List) MoveBefore(e, mark *Element) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark.prev)
+}
+
+// MoveAfter moves element e to its new position after mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List) MoveAfter(e, mark *Element) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark)
+}
+
+// PushBackList inserts a copy of another list at the back of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List) PushBackList(other *List) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
+ l.insertValue(e.Value, l.root.prev)
+ }
+}
+
+// PushFrontList inserts a copy of another list at the front of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List) PushFrontList(other *List) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
+ l.insertValue(e.Value, &l.root)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/context/context.go b/contrib/go/_std_1.19/src/context/context.go
new file mode 100644
index 0000000000..1070111efa
--- /dev/null
+++ b/contrib/go/_std_1.19/src/context/context.go
@@ -0,0 +1,593 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancellation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing
+// calls to servers should accept a Context. The chain of function
+// calls between them must propagate the Context, optionally replacing
+// it with a derived Context created using WithCancel, WithDeadline,
+// WithTimeout, or WithValue. When a Context is canceled, all
+// Contexts derived from it are also canceled.
+//
+// The WithCancel, WithDeadline, and WithTimeout functions take a
+// Context (the parent) and return a derived Context (the child) and a
+// CancelFunc. Calling the CancelFunc cancels the child and its
+// children, removes the parent's reference to the child, and stops
+// any associated timers. Failing to call the CancelFunc leaks the
+// child and its children until the parent is canceled or the timer
+// fires. The go vet tool checks that CancelFuncs are used on all
+// control-flow paths.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See https://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context
+
+import (
+ "errors"
+ "internal/reflectlite"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// A Context carries a deadline, a cancellation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ // The close of the Done channel may happen asynchronously,
+ // after the cancel function returns.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See https://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancellation.
+ Done() <-chan struct{}
+
+ // If Done is not yet closed, Err returns nil.
+ // If Done is closed, Err returns a non-nil error explaining why:
+ // Canceled if the context was canceled
+ // or DeadlineExceeded if the context's deadline passed.
+ // After Err returns a non-nil error, successive calls to Err return the same error.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stored using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key any) any
+}
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded error = deadlineExceededError{}
+
+type deadlineExceededError struct{}
+
+func (deadlineExceededError) Error() string { return "context deadline exceeded" }
+func (deadlineExceededError) Timeout() bool { return true }
+func (deadlineExceededError) Temporary() bool { return true }
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key any) any {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter).
+func TODO() Context {
+ return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// A CancelFunc may be called by multiple goroutines simultaneously.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ if parent == nil {
+ panic("cannot create context from nil parent")
+ }
+ c := newCancelCtx(parent)
+ propagateCancel(parent, &c)
+ return &c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) cancelCtx {
+ return cancelCtx{Context: parent}
+}
+
+// goroutines counts the number of goroutines ever created; for testing.
+var goroutines int32
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ done := parent.Done()
+ if done == nil {
+ return // parent is never canceled
+ }
+
+ select {
+ case <-done:
+ // parent is already canceled
+ child.cancel(false, parent.Err())
+ return
+ default:
+ }
+
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]struct{})
+ }
+ p.children[child] = struct{}{}
+ }
+ p.mu.Unlock()
+ } else {
+ atomic.AddInt32(&goroutines, +1)
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// &cancelCtxKey is the key that a cancelCtx returns itself for.
+var cancelCtxKey int
+
+// parentCancelCtx returns the underlying *cancelCtx for parent.
+// It does this by looking up parent.Value(&cancelCtxKey) to find
+// the innermost enclosing *cancelCtx and then checking whether
+// parent.Done() matches that *cancelCtx. (If not, the *cancelCtx
+// has been wrapped in a custom implementation providing a
+// different done channel, in which case we should not bypass it.)
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ done := parent.Done()
+ if done == closedchan || done == nil {
+ return nil, false
+ }
+ p, ok := parent.Value(&cancelCtxKey).(*cancelCtx)
+ if !ok {
+ return nil, false
+ }
+ pdone, _ := p.done.Load().(chan struct{})
+ if pdone != done {
+ return nil, false
+ }
+ return p, true
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// closedchan is a reusable closed channel.
+var closedchan = make(chan struct{})
+
+func init() {
+ close(closedchan)
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ mu sync.Mutex // protects following fields
+ done atomic.Value // of chan struct{}, created lazily, closed by first cancel call
+ children map[canceler]struct{} // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Value(key any) any {
+ if key == &cancelCtxKey {
+ return c
+ }
+ return value(c.Context, key)
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ d := c.done.Load()
+ if d != nil {
+ return d.(chan struct{})
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ d = c.done.Load()
+ if d == nil {
+ d = make(chan struct{})
+ c.done.Store(d)
+ }
+ return d.(chan struct{})
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ err := c.err
+ c.mu.Unlock()
+ return err
+}
+
+type stringer interface {
+ String() string
+}
+
+func contextName(c Context) string {
+ if s, ok := c.(stringer); ok {
+ return s.String()
+ }
+ return reflectlite.TypeOf(c).String()
+}
+
+func (c *cancelCtx) String() string {
+ return contextName(c.Context) + ".WithCancel"
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ d, _ := c.done.Load().(chan struct{})
+ if d == nil {
+ c.done.Store(closedchan)
+ } else {
+ close(d)
+ }
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
+ if parent == nil {
+ panic("cannot create context from nil parent")
+ }
+ if cur, ok := parent.Deadline(); ok && cur.Before(d) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: d,
+ }
+ propagateCancel(parent, c)
+ dur := time.Until(d)
+ if dur <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(false, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(dur, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return contextName(c.cancelCtx.Context) + ".WithDeadline(" +
+ c.deadline.String() + " [" +
+ time.Until(c.deadline).String() + "])"
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The provided key must be comparable and should not be of type
+// string or any other built-in type to avoid collisions between
+// packages using context. Users of WithValue should define their own
+// types for keys. To avoid allocating when assigning to an
+// interface{}, context keys often have concrete type
+// struct{}. Alternatively, exported context key variables' static
+// type should be a pointer or interface.
+func WithValue(parent Context, key, val any) Context {
+ if parent == nil {
+ panic("cannot create context from nil parent")
+ }
+ if key == nil {
+ panic("nil key")
+ }
+ if !reflectlite.TypeOf(key).Comparable() {
+ panic("key is not comparable")
+ }
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val any
+}
+
+// stringify tries a bit to stringify v, without using fmt, since we don't
+// want context depending on the unicode tables. This is only used by
+// *valueCtx.String().
+func stringify(v any) string {
+ switch s := v.(type) {
+ case stringer:
+ return s.String()
+ case string:
+ return s
+ }
+ return "<not Stringer>"
+}
+
+func (c *valueCtx) String() string {
+ return contextName(c.Context) + ".WithValue(type " +
+ reflectlite.TypeOf(c.key).String() +
+ ", val " + stringify(c.val) + ")"
+}
+
+func (c *valueCtx) Value(key any) any {
+ if c.key == key {
+ return c.val
+ }
+ return value(c.Context, key)
+}
+
+func value(c Context, key any) any {
+ for {
+ switch ctx := c.(type) {
+ case *valueCtx:
+ if key == ctx.key {
+ return ctx.val
+ }
+ c = ctx.Context
+ case *cancelCtx:
+ if key == &cancelCtxKey {
+ return c
+ }
+ c = ctx.Context
+ case *timerCtx:
+ if key == &cancelCtxKey {
+ return &ctx.cancelCtx
+ }
+ c = ctx.Context
+ case *emptyCtx:
+ return nil
+ default:
+ return c.Value(key)
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/crypto/aes/aes_gcm.go b/contrib/go/_std_1.19/src/crypto/aes/aes_gcm.go
new file mode 100644
index 0000000000..ebae646a13
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/aes/aes_gcm.go
@@ -0,0 +1,186 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package aes
+
+import (
+ "crypto/cipher"
+ subtleoverlap "crypto/internal/subtle"
+ "crypto/subtle"
+ "errors"
+)
+
+// The following functions are defined in gcm_*.s.
+
+//go:noescape
+func gcmAesInit(productTable *[256]byte, ks []uint32)
+
+//go:noescape
+func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte)
+
+//go:noescape
+func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+
+//go:noescape
+func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+
+//go:noescape
+func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64)
+
+const (
+ gcmBlockSize = 16
+ gcmTagSize = 16
+ gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes.
+ gcmStandardNonceSize = 12
+)
+
+var errOpen = errors.New("cipher: message authentication failed")
+
+// Assert that aesCipherGCM implements the gcmAble interface.
+var _ gcmAble = (*aesCipherGCM)(nil)
+
+// NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
+// called by crypto/cipher.NewGCM via the gcmAble interface.
+func (c *aesCipherGCM) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
+ g := &gcmAsm{ks: c.enc, nonceSize: nonceSize, tagSize: tagSize}
+ gcmAesInit(&g.productTable, g.ks)
+ return g, nil
+}
+
+type gcmAsm struct {
+ // ks is the key schedule, the length of which depends on the size of
+ // the AES key.
+ ks []uint32
+ // productTable contains pre-computed multiples of the binary-field
+ // element used in GHASH.
+ productTable [256]byte
+ // nonceSize contains the expected size of the nonce, in bytes.
+ nonceSize int
+ // tagSize contains the size of the tag, in bytes.
+ tagSize int
+}
+
+func (g *gcmAsm) NonceSize() int {
+ return g.nonceSize
+}
+
+func (g *gcmAsm) Overhead() int {
+ return g.tagSize
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// details.
+func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*BlockSize {
+ panic("crypto/cipher: message too large for GCM")
+ }
+
+ var counter, tagMask [gcmBlockSize]byte
+
+ if len(nonce) == gcmStandardNonceSize {
+ // Init counter to nonce||1
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ // Otherwise counter = GHASH(nonce)
+ gcmAesData(&g.productTable, nonce, &counter)
+ gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
+ }
+
+ encryptBlockAsm(len(g.ks)/4-1, &g.ks[0], &tagMask[0], &counter[0])
+
+ var tagOut [gcmTagSize]byte
+ gcmAesData(&g.productTable, data, &tagOut)
+
+ ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
+ if subtleoverlap.InexactOverlap(out[:len(plaintext)], plaintext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(plaintext) > 0 {
+ gcmAesEnc(&g.productTable, out, plaintext, &counter, &tagOut, g.ks)
+ }
+ gcmAesFinish(&g.productTable, &tagMask, &tagOut, uint64(len(plaintext)), uint64(len(data)))
+ copy(out[len(plaintext):], tagOut[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// for details.
+func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ // Sanity check to prevent the authentication from always succeeding if an implementation
+ // leaves tagSize uninitialized, for example.
+ if g.tagSize < gcmMinimumTagSize {
+ panic("crypto/cipher: incorrect GCM tag size")
+ }
+
+ if len(ciphertext) < g.tagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(BlockSize)+uint64(g.tagSize) {
+ return nil, errOpen
+ }
+
+ tag := ciphertext[len(ciphertext)-g.tagSize:]
+ ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
+
+ // See GCM spec, section 7.1.
+ var counter, tagMask [gcmBlockSize]byte
+
+ if len(nonce) == gcmStandardNonceSize {
+ // Init counter to nonce||1
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ // Otherwise counter = GHASH(nonce)
+ gcmAesData(&g.productTable, nonce, &counter)
+ gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
+ }
+
+ encryptBlockAsm(len(g.ks)/4-1, &g.ks[0], &tagMask[0], &counter[0])
+
+ var expectedTag [gcmTagSize]byte
+ gcmAesData(&g.productTable, data, &expectedTag)
+
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if subtleoverlap.InexactOverlap(out, ciphertext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(ciphertext) > 0 {
+ gcmAesDec(&g.productTable, out, ciphertext, &counter, &expectedTag, g.ks)
+ }
+ gcmAesFinish(&g.productTable, &tagMask, &expectedTag, uint64(len(ciphertext)), uint64(len(data)))
+
+ if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ return ret, nil
+}
diff --git a/contrib/go/_std_1.18/src/crypto/aes/asm_amd64.s b/contrib/go/_std_1.19/src/crypto/aes/asm_amd64.s
index ed831bf47f..ed831bf47f 100644
--- a/contrib/go/_std_1.18/src/crypto/aes/asm_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/aes/asm_amd64.s
diff --git a/contrib/go/_std_1.18/src/crypto/aes/block.go b/contrib/go/_std_1.19/src/crypto/aes/block.go
index 53308ae92e..53308ae92e 100644
--- a/contrib/go/_std_1.18/src/crypto/aes/block.go
+++ b/contrib/go/_std_1.19/src/crypto/aes/block.go
diff --git a/contrib/go/_std_1.19/src/crypto/aes/cipher.go b/contrib/go/_std_1.19/src/crypto/aes/cipher.go
new file mode 100644
index 0000000000..db0ee38b78
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/aes/cipher.go
@@ -0,0 +1,82 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/boring"
+ "crypto/internal/subtle"
+ "strconv"
+)
+
+// The AES block size in bytes.
+const BlockSize = 16
+
+// A cipher is an instance of AES encryption using a particular key.
+type aesCipher struct {
+ enc []uint32
+ dec []uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a new cipher.Block.
+// The key argument should be the AES key,
+// either 16, 24, or 32 bytes to select
+// AES-128, AES-192, or AES-256.
+func NewCipher(key []byte) (cipher.Block, error) {
+ k := len(key)
+ switch k {
+ default:
+ return nil, KeySizeError(k)
+ case 16, 24, 32:
+ break
+ }
+ if boring.Enabled {
+ return boring.NewAESCipher(key)
+ }
+ return newCipher(key)
+}
+
+// newCipherGeneric creates and returns a new cipher.Block
+// implemented in pure Go.
+func newCipherGeneric(key []byte) (cipher.Block, error) {
+ n := len(key) + 28
+ c := aesCipher{make([]uint32, n), make([]uint32, n)}
+ expandKeyGo(key, c.enc, c.dec)
+ return &c, nil
+}
+
+func (c *aesCipher) BlockSize() int { return BlockSize }
+
+func (c *aesCipher) Encrypt(dst, src []byte) {
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ encryptBlockGo(c.enc, dst, src)
+}
+
+func (c *aesCipher) Decrypt(dst, src []byte) {
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ decryptBlockGo(c.dec, dst, src)
+}
diff --git a/contrib/go/_std_1.19/src/crypto/aes/cipher_asm.go b/contrib/go/_std_1.19/src/crypto/aes/cipher_asm.go
new file mode 100644
index 0000000000..1482b22d08
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/aes/cipher_asm.go
@@ -0,0 +1,113 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || ppc64 || ppc64le
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/boring"
+ "crypto/internal/subtle"
+ "internal/cpu"
+ "internal/goarch"
+)
+
+// defined in asm_*.s
+
+//go:noescape
+func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+
+//go:noescape
+func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+
+//go:noescape
+func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
+
+type aesCipherAsm struct {
+ aesCipher
+}
+
+// aesCipherGCM implements crypto/cipher.gcmAble so that crypto/cipher.NewGCM
+// will use the optimised implementation in aes_gcm.go when possible.
+// Instances of this type only exist when hasGCMAsm returns true. Likewise,
+// the gcmAble implementation is in aes_gcm.go.
+type aesCipherGCM struct {
+ aesCipherAsm
+}
+
+var supportsAES = cpu.X86.HasAES || cpu.ARM64.HasAES || goarch.IsPpc64 == 1 || goarch.IsPpc64le == 1
+var supportsGFMUL = cpu.X86.HasPCLMULQDQ || cpu.ARM64.HasPMULL
+
+func newCipher(key []byte) (cipher.Block, error) {
+ if !supportsAES {
+ return newCipherGeneric(key)
+ }
+ n := len(key) + 28
+ c := aesCipherAsm{aesCipher{make([]uint32, n), make([]uint32, n)}}
+ var rounds int
+ switch len(key) {
+ case 128 / 8:
+ rounds = 10
+ case 192 / 8:
+ rounds = 12
+ case 256 / 8:
+ rounds = 14
+ default:
+ return nil, KeySizeError(len(key))
+ }
+
+ expandKeyAsm(rounds, &key[0], &c.enc[0], &c.dec[0])
+ if supportsAES && supportsGFMUL {
+ return &aesCipherGCM{c}, nil
+ }
+ return &c, nil
+}
+
+func (c *aesCipherAsm) BlockSize() int { return BlockSize }
+
+func (c *aesCipherAsm) Encrypt(dst, src []byte) {
+ boring.Unreachable()
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ encryptBlockAsm(len(c.enc)/4-1, &c.enc[0], &dst[0], &src[0])
+}
+
+func (c *aesCipherAsm) Decrypt(dst, src []byte) {
+ boring.Unreachable()
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if subtle.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ decryptBlockAsm(len(c.dec)/4-1, &c.dec[0], &dst[0], &src[0])
+}
+
+// expandKey is used by BenchmarkExpand to ensure that the asm implementation
+// of key expansion is used for the benchmark when it is available.
+func expandKey(key []byte, enc, dec []uint32) {
+ if supportsAES {
+ rounds := 10 // rounds needed for AES128
+ switch len(key) {
+ case 192 / 8:
+ rounds = 12
+ case 256 / 8:
+ rounds = 14
+ }
+ expandKeyAsm(rounds, &key[0], &enc[0], &dec[0])
+ } else {
+ expandKeyGo(key, enc, dec)
+ }
+}
diff --git a/contrib/go/_std_1.18/src/crypto/aes/const.go b/contrib/go/_std_1.19/src/crypto/aes/const.go
index 4eca4b9aff..4eca4b9aff 100644
--- a/contrib/go/_std_1.18/src/crypto/aes/const.go
+++ b/contrib/go/_std_1.19/src/crypto/aes/const.go
diff --git a/contrib/go/_std_1.18/src/crypto/aes/gcm_amd64.s b/contrib/go/_std_1.19/src/crypto/aes/gcm_amd64.s
index e6eedf3264..e6eedf3264 100644
--- a/contrib/go/_std_1.18/src/crypto/aes/gcm_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/aes/gcm_amd64.s
diff --git a/contrib/go/_std_1.18/src/crypto/aes/modes.go b/contrib/go/_std_1.19/src/crypto/aes/modes.go
index 5c0b08eb6d..5c0b08eb6d 100644
--- a/contrib/go/_std_1.18/src/crypto/aes/modes.go
+++ b/contrib/go/_std_1.19/src/crypto/aes/modes.go
diff --git a/contrib/go/_std_1.19/src/crypto/cipher/cbc.go b/contrib/go/_std_1.19/src/crypto/cipher/cbc.go
new file mode 100644
index 0000000000..a719b61e24
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/cipher/cbc.go
@@ -0,0 +1,185 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Cipher block chaining (CBC) mode.
+
+// CBC provides confidentiality by xoring (chaining) each plaintext block
+// with the previous ciphertext block before applying the block cipher.
+
+// See NIST SP 800-38A, pp 10-11
+
+package cipher
+
+import "crypto/internal/subtle"
+
+type cbc struct {
+ b Block
+ blockSize int
+ iv []byte
+ tmp []byte
+}
+
+func newCBC(b Block, iv []byte) *cbc {
+ return &cbc{
+ b: b,
+ blockSize: b.BlockSize(),
+ iv: dup(iv),
+ tmp: make([]byte, b.BlockSize()),
+ }
+}
+
+type cbcEncrypter cbc
+
+// cbcEncAble is an interface implemented by ciphers that have a specific
+// optimized implementation of CBC encryption, like crypto/aes.
+// NewCBCEncrypter will check for this interface and return the specific
+// BlockMode if found.
+type cbcEncAble interface {
+ NewCBCEncrypter(iv []byte) BlockMode
+}
+
+// NewCBCEncrypter returns a BlockMode which encrypts in cipher block chaining
+// mode, using the given Block. The length of iv must be the same as the
+// Block's block size.
+func NewCBCEncrypter(b Block, iv []byte) BlockMode {
+ if len(iv) != b.BlockSize() {
+ panic("cipher.NewCBCEncrypter: IV length must equal block size")
+ }
+ if cbc, ok := b.(cbcEncAble); ok {
+ return cbc.NewCBCEncrypter(iv)
+ }
+ return (*cbcEncrypter)(newCBC(b, iv))
+}
+
+// newCBCGenericEncrypter returns a BlockMode which encrypts in cipher block chaining
+// mode, using the given Block. The length of iv must be the same as the
+// Block's block size. This always returns the generic non-asm encrypter for use
+// in fuzz testing.
+func newCBCGenericEncrypter(b Block, iv []byte) BlockMode {
+ if len(iv) != b.BlockSize() {
+ panic("cipher.NewCBCEncrypter: IV length must equal block size")
+ }
+ return (*cbcEncrypter)(newCBC(b, iv))
+}
+
+func (x *cbcEncrypter) BlockSize() int { return x.blockSize }
+
+func (x *cbcEncrypter) CryptBlocks(dst, src []byte) {
+ if len(src)%x.blockSize != 0 {
+ panic("crypto/cipher: input not full blocks")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if subtle.InexactOverlap(dst[:len(src)], src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ iv := x.iv
+
+ for len(src) > 0 {
+ // Write the xor to dst, then encrypt in place.
+ xorBytes(dst[:x.blockSize], src[:x.blockSize], iv)
+ x.b.Encrypt(dst[:x.blockSize], dst[:x.blockSize])
+
+ // Move to the next block with this block as the next iv.
+ iv = dst[:x.blockSize]
+ src = src[x.blockSize:]
+ dst = dst[x.blockSize:]
+ }
+
+ // Save the iv for the next CryptBlocks call.
+ copy(x.iv, iv)
+}
+
+func (x *cbcEncrypter) SetIV(iv []byte) {
+ if len(iv) != len(x.iv) {
+ panic("cipher: incorrect length IV")
+ }
+ copy(x.iv, iv)
+}
+
+type cbcDecrypter cbc
+
+// cbcDecAble is an interface implemented by ciphers that have a specific
+// optimized implementation of CBC decryption, like crypto/aes.
+// NewCBCDecrypter will check for this interface and return the specific
+// BlockMode if found.
+type cbcDecAble interface {
+ NewCBCDecrypter(iv []byte) BlockMode
+}
+
+// NewCBCDecrypter returns a BlockMode which decrypts in cipher block chaining
+// mode, using the given Block. The length of iv must be the same as the
+// Block's block size and must match the iv used to encrypt the data.
+func NewCBCDecrypter(b Block, iv []byte) BlockMode {
+ if len(iv) != b.BlockSize() {
+ panic("cipher.NewCBCDecrypter: IV length must equal block size")
+ }
+ if cbc, ok := b.(cbcDecAble); ok {
+ return cbc.NewCBCDecrypter(iv)
+ }
+ return (*cbcDecrypter)(newCBC(b, iv))
+}
+
+// newCBCGenericDecrypter returns a BlockMode which encrypts in cipher block chaining
+// mode, using the given Block. The length of iv must be the same as the
+// Block's block size. This always returns the generic non-asm decrypter for use in
+// fuzz testing.
+func newCBCGenericDecrypter(b Block, iv []byte) BlockMode {
+ if len(iv) != b.BlockSize() {
+ panic("cipher.NewCBCDecrypter: IV length must equal block size")
+ }
+ return (*cbcDecrypter)(newCBC(b, iv))
+}
+
+func (x *cbcDecrypter) BlockSize() int { return x.blockSize }
+
+func (x *cbcDecrypter) CryptBlocks(dst, src []byte) {
+ if len(src)%x.blockSize != 0 {
+ panic("crypto/cipher: input not full blocks")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if subtle.InexactOverlap(dst[:len(src)], src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src) == 0 {
+ return
+ }
+
+ // For each block, we need to xor the decrypted data with the previous block's ciphertext (the iv).
+ // To avoid making a copy each time, we loop over the blocks BACKWARDS.
+ end := len(src)
+ start := end - x.blockSize
+ prev := start - x.blockSize
+
+ // Copy the last block of ciphertext in preparation as the new iv.
+ copy(x.tmp, src[start:end])
+
+ // Loop over all but the first block.
+ for start > 0 {
+ x.b.Decrypt(dst[start:end], src[start:end])
+ xorBytes(dst[start:end], dst[start:end], src[prev:start])
+
+ end = start
+ start = prev
+ prev -= x.blockSize
+ }
+
+ // The first block is special because it uses the saved iv.
+ x.b.Decrypt(dst[start:end], src[start:end])
+ xorBytes(dst[start:end], dst[start:end], x.iv)
+
+ // Set the new iv to the first block we copied earlier.
+ x.iv, x.tmp = x.tmp, x.iv
+}
+
+func (x *cbcDecrypter) SetIV(iv []byte) {
+ if len(iv) != len(x.iv) {
+ panic("cipher: incorrect length IV")
+ }
+ copy(x.iv, iv)
+}
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/cfb.go b/contrib/go/_std_1.19/src/crypto/cipher/cfb.go
index 80c9bc24ea..80c9bc24ea 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/cfb.go
+++ b/contrib/go/_std_1.19/src/crypto/cipher/cfb.go
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/cipher.go b/contrib/go/_std_1.19/src/crypto/cipher/cipher.go
index 7e1a4de9a3..7e1a4de9a3 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/cipher.go
+++ b/contrib/go/_std_1.19/src/crypto/cipher/cipher.go
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/ctr.go b/contrib/go/_std_1.19/src/crypto/cipher/ctr.go
index cba028d2a4..cba028d2a4 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/ctr.go
+++ b/contrib/go/_std_1.19/src/crypto/cipher/ctr.go
diff --git a/contrib/go/_std_1.19/src/crypto/cipher/gcm.go b/contrib/go/_std_1.19/src/crypto/cipher/gcm.go
new file mode 100644
index 0000000000..5b14c0a7e2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/cipher/gcm.go
@@ -0,0 +1,427 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cipher
+
+import (
+ subtleoverlap "crypto/internal/subtle"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+// AEAD is a cipher mode providing authenticated encryption with associated
+// data. For a description of the methodology, see
+// https://en.wikipedia.org/wiki/Authenticated_encryption.
+type AEAD interface {
+ // NonceSize returns the size of the nonce that must be passed to Seal
+ // and Open.
+ NonceSize() int
+
+ // Overhead returns the maximum difference between the lengths of a
+ // plaintext and its ciphertext.
+ Overhead() int
+
+ // Seal encrypts and authenticates plaintext, authenticates the
+ // additional data and appends the result to dst, returning the updated
+ // slice. The nonce must be NonceSize() bytes long and unique for all
+ // time, for a given key.
+ //
+ // To reuse plaintext's storage for the encrypted output, use plaintext[:0]
+ // as dst. Otherwise, the remaining capacity of dst must not overlap plaintext.
+ Seal(dst, nonce, plaintext, additionalData []byte) []byte
+
+ // Open decrypts and authenticates ciphertext, authenticates the
+ // additional data and, if successful, appends the resulting plaintext
+ // to dst, returning the updated slice. The nonce must be NonceSize()
+ // bytes long and both it and the additional data must match the
+ // value passed to Seal.
+ //
+ // To reuse ciphertext's storage for the decrypted output, use ciphertext[:0]
+ // as dst. Otherwise, the remaining capacity of dst must not overlap plaintext.
+ //
+ // Even if the function fails, the contents of dst, up to its capacity,
+ // may be overwritten.
+ Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error)
+}
+
+// gcmAble is an interface implemented by ciphers that have a specific optimized
+// implementation of GCM, like crypto/aes. NewGCM will check for this interface
+// and return the specific AEAD if found.
+type gcmAble interface {
+ NewGCM(nonceSize, tagSize int) (AEAD, error)
+}
+
+// gcmFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM
+// standard and make binary.BigEndian suitable for marshaling these values, the
+// bits are stored in big endian order. For example:
+//
+// the coefficient of x⁰ can be obtained by v.low >> 63.
+// the coefficient of x⁶³ can be obtained by v.low & 1.
+// the coefficient of x⁶⁴ can be obtained by v.high >> 63.
+// the coefficient of x¹²⁷ can be obtained by v.high & 1.
+type gcmFieldElement struct {
+ low, high uint64
+}
+
+// gcm represents a Galois Counter Mode with a specific key. See
+// https://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
+type gcm struct {
+ cipher Block
+ nonceSize int
+ tagSize int
+ // productTable contains the first sixteen powers of the key, H.
+ // However, they are in bit reversed order. See NewGCMWithNonceSize.
+ productTable [16]gcmFieldElement
+}
+
+// NewGCM returns the given 128-bit, block cipher wrapped in Galois Counter Mode
+// with the standard nonce length.
+//
+// In general, the GHASH operation performed by this implementation of GCM is not constant-time.
+// An exception is when the underlying Block was created by aes.NewCipher
+// on systems with hardware support for AES. See the crypto/aes package documentation for details.
+func NewGCM(cipher Block) (AEAD, error) {
+ return newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, gcmTagSize)
+}
+
+// NewGCMWithNonceSize returns the given 128-bit, block cipher wrapped in Galois
+// Counter Mode, which accepts nonces of the given length. The length must not
+// be zero.
+//
+// Only use this function if you require compatibility with an existing
+// cryptosystem that uses non-standard nonce lengths. All other users should use
+// NewGCM, which is faster and more resistant to misuse.
+func NewGCMWithNonceSize(cipher Block, size int) (AEAD, error) {
+ return newGCMWithNonceAndTagSize(cipher, size, gcmTagSize)
+}
+
+// NewGCMWithTagSize returns the given 128-bit, block cipher wrapped in Galois
+// Counter Mode, which generates tags with the given length.
+//
+// Tag sizes between 12 and 16 bytes are allowed.
+//
+// Only use this function if you require compatibility with an existing
+// cryptosystem that uses non-standard tag lengths. All other users should use
+// NewGCM, which is more resistant to misuse.
+func NewGCMWithTagSize(cipher Block, tagSize int) (AEAD, error) {
+ return newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, tagSize)
+}
+
+func newGCMWithNonceAndTagSize(cipher Block, nonceSize, tagSize int) (AEAD, error) {
+ if tagSize < gcmMinimumTagSize || tagSize > gcmBlockSize {
+ return nil, errors.New("cipher: incorrect tag size given to GCM")
+ }
+
+ if nonceSize <= 0 {
+ return nil, errors.New("cipher: the nonce can't have zero length, or the security of the key will be immediately compromised")
+ }
+
+ if cipher, ok := cipher.(gcmAble); ok {
+ return cipher.NewGCM(nonceSize, tagSize)
+ }
+
+ if cipher.BlockSize() != gcmBlockSize {
+ return nil, errors.New("cipher: NewGCM requires 128-bit block cipher")
+ }
+
+ var key [gcmBlockSize]byte
+ cipher.Encrypt(key[:], key[:])
+
+ g := &gcm{cipher: cipher, nonceSize: nonceSize, tagSize: tagSize}
+
+ // We precompute 16 multiples of |key|. However, when we do lookups
+ // into this table we'll be using bits from a field element and
+ // therefore the bits will be in the reverse order. So normally one
+ // would expect, say, 4*key to be in index 4 of the table but due to
+ // this bit ordering it will actually be in index 0010 (base 2) = 2.
+ x := gcmFieldElement{
+ binary.BigEndian.Uint64(key[:8]),
+ binary.BigEndian.Uint64(key[8:]),
+ }
+ g.productTable[reverseBits(1)] = x
+
+ for i := 2; i < 16; i += 2 {
+ g.productTable[reverseBits(i)] = gcmDouble(&g.productTable[reverseBits(i/2)])
+ g.productTable[reverseBits(i+1)] = gcmAdd(&g.productTable[reverseBits(i)], &x)
+ }
+
+ return g, nil
+}
+
+const (
+ gcmBlockSize = 16
+ gcmTagSize = 16
+ gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes.
+ gcmStandardNonceSize = 12
+)
+
+func (g *gcm) NonceSize() int {
+ return g.nonceSize
+}
+
+func (g *gcm) Overhead() int {
+ return g.tagSize
+}
+
+func (g *gcm) Seal(dst, nonce, plaintext, data []byte) []byte {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*uint64(g.cipher.BlockSize()) {
+ panic("crypto/cipher: message too large for GCM")
+ }
+
+ ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
+ if subtleoverlap.InexactOverlap(out, plaintext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ var counter, tagMask [gcmBlockSize]byte
+ g.deriveCounter(&counter, nonce)
+
+ g.cipher.Encrypt(tagMask[:], counter[:])
+ gcmInc32(&counter)
+
+ g.counterCrypt(out, plaintext, &counter)
+
+ var tag [gcmTagSize]byte
+ g.auth(tag[:], out[:len(plaintext)], data, &tagMask)
+ copy(out[len(plaintext):], tag[:])
+
+ return ret
+}
+
+var errOpen = errors.New("cipher: message authentication failed")
+
+func (g *gcm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ // Sanity check to prevent the authentication from always succeeding if an implementation
+ // leaves tagSize uninitialized, for example.
+ if g.tagSize < gcmMinimumTagSize {
+ panic("crypto/cipher: incorrect GCM tag size")
+ }
+
+ if len(ciphertext) < g.tagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(g.cipher.BlockSize())+uint64(g.tagSize) {
+ return nil, errOpen
+ }
+
+ tag := ciphertext[len(ciphertext)-g.tagSize:]
+ ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
+
+ var counter, tagMask [gcmBlockSize]byte
+ g.deriveCounter(&counter, nonce)
+
+ g.cipher.Encrypt(tagMask[:], counter[:])
+ gcmInc32(&counter)
+
+ var expectedTag [gcmTagSize]byte
+ g.auth(expectedTag[:], ciphertext, data, &tagMask)
+
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if subtleoverlap.InexactOverlap(out, ciphertext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
+ // The AESNI code decrypts and authenticates concurrently, and
+ // so overwrites dst in the event of a tag mismatch. That
+ // behavior is mimicked here in order to be consistent across
+ // platforms.
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ g.counterCrypt(out, ciphertext, &counter)
+
+ return ret, nil
+}
+
+// reverseBits reverses the order of the bits of 4-bit number in i.
+func reverseBits(i int) int {
+ i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
+ i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
+ return i
+}
+
+// gcmAdd adds two elements of GF(2¹²⁸) and returns the sum.
+func gcmAdd(x, y *gcmFieldElement) gcmFieldElement {
+ // Addition in a characteristic 2 field is just XOR.
+ return gcmFieldElement{x.low ^ y.low, x.high ^ y.high}
+}
+
+// gcmDouble returns the result of doubling an element of GF(2¹²⁸).
+func gcmDouble(x *gcmFieldElement) (double gcmFieldElement) {
+ msbSet := x.high&1 == 1
+
+ // Because of the bit-ordering, doubling is actually a right shift.
+ double.high = x.high >> 1
+ double.high |= x.low << 63
+ double.low = x.low >> 1
+
+ // If the most-significant bit was set before shifting then it,
+ // conceptually, becomes a term of x^128. This is greater than the
+ // irreducible polynomial so the result has to be reduced. The
+ // irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to
+ // eliminate the term at x^128 which also means subtracting the other
+ // four terms. In characteristic 2 fields, subtraction == addition ==
+ // XOR.
+ if msbSet {
+ double.low ^= 0xe100000000000000
+ }
+
+ return
+}
+
+var gcmReductionTable = []uint16{
+ 0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
+ 0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
+}
+
+// mul sets y to y*H, where H is the GCM key, fixed during NewGCMWithNonceSize.
+func (g *gcm) mul(y *gcmFieldElement) {
+ var z gcmFieldElement
+
+ for i := 0; i < 2; i++ {
+ word := y.high
+ if i == 1 {
+ word = y.low
+ }
+
+ // Multiplication works by multiplying z by 16 and adding in
+ // one of the precomputed multiples of H.
+ for j := 0; j < 64; j += 4 {
+ msw := z.high & 0xf
+ z.high >>= 4
+ z.high |= z.low << 60
+ z.low >>= 4
+ z.low ^= uint64(gcmReductionTable[msw]) << 48
+
+ // the values in |table| are ordered for
+ // little-endian bit positions. See the comment
+ // in NewGCMWithNonceSize.
+ t := &g.productTable[word&0xf]
+
+ z.low ^= t.low
+ z.high ^= t.high
+ word >>= 4
+ }
+ }
+
+ *y = z
+}
+
+// updateBlocks extends y with more polynomial terms from blocks, based on
+// Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks.
+func (g *gcm) updateBlocks(y *gcmFieldElement, blocks []byte) {
+ for len(blocks) > 0 {
+ y.low ^= binary.BigEndian.Uint64(blocks)
+ y.high ^= binary.BigEndian.Uint64(blocks[8:])
+ g.mul(y)
+ blocks = blocks[gcmBlockSize:]
+ }
+}
+
+// update extends y with more polynomial terms from data. If data is not a
+// multiple of gcmBlockSize bytes long then the remainder is zero padded.
+func (g *gcm) update(y *gcmFieldElement, data []byte) {
+ fullBlocks := (len(data) >> 4) << 4
+ g.updateBlocks(y, data[:fullBlocks])
+
+ if len(data) != fullBlocks {
+ var partialBlock [gcmBlockSize]byte
+ copy(partialBlock[:], data[fullBlocks:])
+ g.updateBlocks(y, partialBlock[:])
+ }
+}
+
+// gcmInc32 treats the final four bytes of counterBlock as a big-endian value
+// and increments it.
+func gcmInc32(counterBlock *[16]byte) {
+ ctr := counterBlock[len(counterBlock)-4:]
+ binary.BigEndian.PutUint32(ctr, binary.BigEndian.Uint32(ctr)+1)
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// counterCrypt crypts in to out using g.cipher in counter mode.
+func (g *gcm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) {
+ var mask [gcmBlockSize]byte
+
+ for len(in) >= gcmBlockSize {
+ g.cipher.Encrypt(mask[:], counter[:])
+ gcmInc32(counter)
+
+ xorWords(out, in, mask[:])
+ out = out[gcmBlockSize:]
+ in = in[gcmBlockSize:]
+ }
+
+ if len(in) > 0 {
+ g.cipher.Encrypt(mask[:], counter[:])
+ gcmInc32(counter)
+ xorBytes(out, in, mask[:])
+ }
+}
+
+// deriveCounter computes the initial GCM counter state from the given nonce.
+// See NIST SP 800-38D, section 7.1. This assumes that counter is filled with
+// zeros on entry.
+func (g *gcm) deriveCounter(counter *[gcmBlockSize]byte, nonce []byte) {
+ // GCM has two modes of operation with respect to the initial counter
+ // state: a "fast path" for 96-bit (12-byte) nonces, and a "slow path"
+ // for nonces of other lengths. For a 96-bit nonce, the nonce, along
+ // with a four-byte big-endian counter starting at one, is used
+ // directly as the starting counter. For other nonce sizes, the counter
+ // is computed by passing it through the GHASH function.
+ if len(nonce) == gcmStandardNonceSize {
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ var y gcmFieldElement
+ g.update(&y, nonce)
+ y.high ^= uint64(len(nonce)) * 8
+ g.mul(&y)
+ binary.BigEndian.PutUint64(counter[:8], y.low)
+ binary.BigEndian.PutUint64(counter[8:], y.high)
+ }
+}
+
+// auth calculates GHASH(ciphertext, additionalData), masks the result with
+// tagMask and writes the result to out.
+func (g *gcm) auth(out, ciphertext, additionalData []byte, tagMask *[gcmTagSize]byte) {
+ var y gcmFieldElement
+ g.update(&y, additionalData)
+ g.update(&y, ciphertext)
+
+ y.low ^= uint64(len(additionalData)) * 8
+ y.high ^= uint64(len(ciphertext)) * 8
+
+ g.mul(&y)
+
+ binary.BigEndian.PutUint64(out, y.low)
+ binary.BigEndian.PutUint64(out[8:], y.high)
+
+ xorWords(out, out, tagMask[:])
+}
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/io.go b/contrib/go/_std_1.19/src/crypto/cipher/io.go
index 0974ac748e..0974ac748e 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/io.go
+++ b/contrib/go/_std_1.19/src/crypto/cipher/io.go
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/ofb.go b/contrib/go/_std_1.19/src/crypto/cipher/ofb.go
index fc47724865..fc47724865 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/ofb.go
+++ b/contrib/go/_std_1.19/src/crypto/cipher/ofb.go
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/xor_amd64.go b/contrib/go/_std_1.19/src/crypto/cipher/xor_amd64.go
index a595acc017..a595acc017 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/xor_amd64.go
+++ b/contrib/go/_std_1.19/src/crypto/cipher/xor_amd64.go
diff --git a/contrib/go/_std_1.18/src/crypto/cipher/xor_amd64.s b/contrib/go/_std_1.19/src/crypto/cipher/xor_amd64.s
index 780d37a06e..780d37a06e 100644
--- a/contrib/go/_std_1.18/src/crypto/cipher/xor_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/cipher/xor_amd64.s
diff --git a/contrib/go/_std_1.19/src/crypto/crypto.go b/contrib/go/_std_1.19/src/crypto/crypto.go
new file mode 100644
index 0000000000..10a1cd8403
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/crypto.go
@@ -0,0 +1,223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crypto collects common cryptographic constants.
+package crypto
+
+import (
+ "hash"
+ "io"
+ "strconv"
+)
+
+// Hash identifies a cryptographic hash function that is implemented in another
+// package.
+type Hash uint
+
+// HashFunc simply returns the value of h so that Hash implements SignerOpts.
+func (h Hash) HashFunc() Hash {
+ return h
+}
+
+func (h Hash) String() string {
+ switch h {
+ case MD4:
+ return "MD4"
+ case MD5:
+ return "MD5"
+ case SHA1:
+ return "SHA-1"
+ case SHA224:
+ return "SHA-224"
+ case SHA256:
+ return "SHA-256"
+ case SHA384:
+ return "SHA-384"
+ case SHA512:
+ return "SHA-512"
+ case MD5SHA1:
+ return "MD5+SHA1"
+ case RIPEMD160:
+ return "RIPEMD-160"
+ case SHA3_224:
+ return "SHA3-224"
+ case SHA3_256:
+ return "SHA3-256"
+ case SHA3_384:
+ return "SHA3-384"
+ case SHA3_512:
+ return "SHA3-512"
+ case SHA512_224:
+ return "SHA-512/224"
+ case SHA512_256:
+ return "SHA-512/256"
+ case BLAKE2s_256:
+ return "BLAKE2s-256"
+ case BLAKE2b_256:
+ return "BLAKE2b-256"
+ case BLAKE2b_384:
+ return "BLAKE2b-384"
+ case BLAKE2b_512:
+ return "BLAKE2b-512"
+ default:
+ return "unknown hash value " + strconv.Itoa(int(h))
+ }
+}
+
+const (
+ MD4 Hash = 1 + iota // import golang.org/x/crypto/md4
+ MD5 // import crypto/md5
+ SHA1 // import crypto/sha1
+ SHA224 // import crypto/sha256
+ SHA256 // import crypto/sha256
+ SHA384 // import crypto/sha512
+ SHA512 // import crypto/sha512
+ MD5SHA1 // no implementation; MD5+SHA1 used for TLS RSA
+ RIPEMD160 // import golang.org/x/crypto/ripemd160
+ SHA3_224 // import golang.org/x/crypto/sha3
+ SHA3_256 // import golang.org/x/crypto/sha3
+ SHA3_384 // import golang.org/x/crypto/sha3
+ SHA3_512 // import golang.org/x/crypto/sha3
+ SHA512_224 // import crypto/sha512
+ SHA512_256 // import crypto/sha512
+ BLAKE2s_256 // import golang.org/x/crypto/blake2s
+ BLAKE2b_256 // import golang.org/x/crypto/blake2b
+ BLAKE2b_384 // import golang.org/x/crypto/blake2b
+ BLAKE2b_512 // import golang.org/x/crypto/blake2b
+ maxHash
+)
+
+var digestSizes = []uint8{
+ MD4: 16,
+ MD5: 16,
+ SHA1: 20,
+ SHA224: 28,
+ SHA256: 32,
+ SHA384: 48,
+ SHA512: 64,
+ SHA512_224: 28,
+ SHA512_256: 32,
+ SHA3_224: 28,
+ SHA3_256: 32,
+ SHA3_384: 48,
+ SHA3_512: 64,
+ MD5SHA1: 36,
+ RIPEMD160: 20,
+ BLAKE2s_256: 32,
+ BLAKE2b_256: 32,
+ BLAKE2b_384: 48,
+ BLAKE2b_512: 64,
+}
+
+// Size returns the length, in bytes, of a digest resulting from the given hash
+// function. It doesn't require that the hash function in question be linked
+// into the program.
+func (h Hash) Size() int {
+ if h > 0 && h < maxHash {
+ return int(digestSizes[h])
+ }
+ panic("crypto: Size of unknown hash function")
+}
+
+var hashes = make([]func() hash.Hash, maxHash)
+
+// New returns a new hash.Hash calculating the given hash function. New panics
+// if the hash function is not linked into the binary.
+func (h Hash) New() hash.Hash {
+ if h > 0 && h < maxHash {
+ f := hashes[h]
+ if f != nil {
+ return f()
+ }
+ }
+ panic("crypto: requested hash function #" + strconv.Itoa(int(h)) + " is unavailable")
+}
+
+// Available reports whether the given hash function is linked into the binary.
+func (h Hash) Available() bool {
+ return h < maxHash && hashes[h] != nil
+}
+
+// RegisterHash registers a function that returns a new instance of the given
+// hash function. This is intended to be called from the init function in
+// packages that implement hash functions.
+func RegisterHash(h Hash, f func() hash.Hash) {
+ if h >= maxHash {
+ panic("crypto: RegisterHash of unknown hash function")
+ }
+ hashes[h] = f
+}
+
+// PublicKey represents a public key using an unspecified algorithm.
+//
+// Although this type is an empty interface for backwards compatibility reasons,
+// all public key types in the standard library implement the following interface
+//
+// interface{
+// Equal(x crypto.PublicKey) bool
+// }
+//
+// which can be used for increased type safety within applications.
+type PublicKey any
+
+// PrivateKey represents a private key using an unspecified algorithm.
+//
+// Although this type is an empty interface for backwards compatibility reasons,
+// all private key types in the standard library implement the following interface
+//
+// interface{
+// Public() crypto.PublicKey
+// Equal(x crypto.PrivateKey) bool
+// }
+//
+// as well as purpose-specific interfaces such as Signer and Decrypter, which
+// can be used for increased type safety within applications.
+type PrivateKey any
+
+// Signer is an interface for an opaque private key that can be used for
+// signing operations. For example, an RSA key kept in a hardware module.
+type Signer interface {
+ // Public returns the public key corresponding to the opaque,
+ // private key.
+ Public() PublicKey
+
+ // Sign signs digest with the private key, possibly using entropy from
+ // rand. For an RSA key, the resulting signature should be either a
+ // PKCS #1 v1.5 or PSS signature (as indicated by opts). For an (EC)DSA
+ // key, it should be a DER-serialised, ASN.1 signature structure.
+ //
+ // Hash implements the SignerOpts interface and, in most cases, one can
+ // simply pass in the hash function used as opts. Sign may also attempt
+ // to type assert opts to other types in order to obtain algorithm
+ // specific values. See the documentation in each package for details.
+ //
+ // Note that when a signature of a hash of a larger message is needed,
+ // the caller is responsible for hashing the larger message and passing
+ // the hash (as digest) and the hash function (as opts) to Sign.
+ Sign(rand io.Reader, digest []byte, opts SignerOpts) (signature []byte, err error)
+}
+
+// SignerOpts contains options for signing with a Signer.
+type SignerOpts interface {
+ // HashFunc returns an identifier for the hash function used to produce
+ // the message passed to Signer.Sign, or else zero to indicate that no
+ // hashing was done.
+ HashFunc() Hash
+}
+
+// Decrypter is an interface for an opaque private key that can be used for
+// asymmetric decryption operations. An example would be an RSA key
+// kept in a hardware module.
+type Decrypter interface {
+ // Public returns the public key corresponding to the opaque,
+ // private key.
+ Public() PublicKey
+
+ // Decrypt decrypts msg. The opts argument should be appropriate for
+ // the primitive used. See the documentation in each implementation for
+ // details.
+ Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error)
+}
+
+type DecrypterOpts any
diff --git a/contrib/go/_std_1.19/src/crypto/des/block.go b/contrib/go/_std_1.19/src/crypto/des/block.go
new file mode 100644
index 0000000000..c649dee94f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/des/block.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package des
+
+import (
+ "encoding/binary"
+ "sync"
+)
+
+func cryptBlock(subkeys []uint64, dst, src []byte, decrypt bool) {
+ b := binary.BigEndian.Uint64(src)
+ b = permuteInitialBlock(b)
+ left, right := uint32(b>>32), uint32(b)
+
+ left = (left << 1) | (left >> 31)
+ right = (right << 1) | (right >> 31)
+
+ if decrypt {
+ for i := 0; i < 8; i++ {
+ left, right = feistel(left, right, subkeys[15-2*i], subkeys[15-(2*i+1)])
+ }
+ } else {
+ for i := 0; i < 8; i++ {
+ left, right = feistel(left, right, subkeys[2*i], subkeys[2*i+1])
+ }
+ }
+
+ left = (left << 31) | (left >> 1)
+ right = (right << 31) | (right >> 1)
+
+ // switch left & right and perform final permutation
+ preOutput := (uint64(right) << 32) | uint64(left)
+ binary.BigEndian.PutUint64(dst, permuteFinalBlock(preOutput))
+}
+
+// Encrypt one block from src into dst, using the subkeys.
+func encryptBlock(subkeys []uint64, dst, src []byte) {
+ cryptBlock(subkeys, dst, src, false)
+}
+
+// Decrypt one block from src into dst, using the subkeys.
+func decryptBlock(subkeys []uint64, dst, src []byte) {
+ cryptBlock(subkeys, dst, src, true)
+}
+
+// DES Feistel function. feistelBox must be initialized via
+// feistelBoxOnce.Do(initFeistelBox) first.
+func feistel(l, r uint32, k0, k1 uint64) (lout, rout uint32) {
+ var t uint32
+
+ t = r ^ uint32(k0>>32)
+ l ^= feistelBox[7][t&0x3f] ^
+ feistelBox[5][(t>>8)&0x3f] ^
+ feistelBox[3][(t>>16)&0x3f] ^
+ feistelBox[1][(t>>24)&0x3f]
+
+ t = ((r << 28) | (r >> 4)) ^ uint32(k0)
+ l ^= feistelBox[6][(t)&0x3f] ^
+ feistelBox[4][(t>>8)&0x3f] ^
+ feistelBox[2][(t>>16)&0x3f] ^
+ feistelBox[0][(t>>24)&0x3f]
+
+ t = l ^ uint32(k1>>32)
+ r ^= feistelBox[7][t&0x3f] ^
+ feistelBox[5][(t>>8)&0x3f] ^
+ feistelBox[3][(t>>16)&0x3f] ^
+ feistelBox[1][(t>>24)&0x3f]
+
+ t = ((l << 28) | (l >> 4)) ^ uint32(k1)
+ r ^= feistelBox[6][(t)&0x3f] ^
+ feistelBox[4][(t>>8)&0x3f] ^
+ feistelBox[2][(t>>16)&0x3f] ^
+ feistelBox[0][(t>>24)&0x3f]
+
+ return l, r
+}
+
+// feistelBox[s][16*i+j] contains the output of permutationFunction
+// for sBoxes[s][i][j] << 4*(7-s)
+var feistelBox [8][64]uint32
+
+var feistelBoxOnce sync.Once
+
+// general purpose function to perform DES block permutations
+func permuteBlock(src uint64, permutation []uint8) (block uint64) {
+ for position, n := range permutation {
+ bit := (src >> n) & 1
+ block |= bit << uint((len(permutation)-1)-position)
+ }
+ return
+}
+
+func initFeistelBox() {
+ for s := range sBoxes {
+ for i := 0; i < 4; i++ {
+ for j := 0; j < 16; j++ {
+ f := uint64(sBoxes[s][i][j]) << (4 * (7 - uint(s)))
+ f = permuteBlock(f, permutationFunction[:])
+
+ // Row is determined by the 1st and 6th bit.
+ // Column is the middle four bits.
+ row := uint8(((i & 2) << 4) | i&1)
+ col := uint8(j << 1)
+ t := row | col
+
+ // The rotation was performed in the feistel rounds, being factored out and now mixed into the feistelBox.
+ f = (f << 1) | (f >> 31)
+
+ feistelBox[s][t] = uint32(f)
+ }
+ }
+ }
+}
+
+// permuteInitialBlock is equivalent to the permutation defined
+// by initialPermutation.
+func permuteInitialBlock(block uint64) uint64 {
+ // block = b7 b6 b5 b4 b3 b2 b1 b0 (8 bytes)
+ b1 := block >> 48
+ b2 := block << 48
+ block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
+
+ // block = b1 b0 b5 b4 b3 b2 b7 b6
+ b1 = block >> 32 & 0xff00ff
+ b2 = (block & 0xff00ff00)
+ block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24 // exchange b0 b4 with b3 b7
+
+ // block is now b1 b3 b5 b7 b0 b2 b4 b6, the permutation:
+ // ... 8
+ // ... 24
+ // ... 40
+ // ... 56
+ // 7 6 5 4 3 2 1 0
+ // 23 22 21 20 19 18 17 16
+ // ... 32
+ // ... 48
+
+ // exchange 4,5,6,7 with 32,33,34,35 etc.
+ b1 = block & 0x0f0f00000f0f0000
+ b2 = block & 0x0000f0f00000f0f0
+ block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
+
+ // block is the permutation:
+ //
+ // [+8] [+40]
+ //
+ // 7 6 5 4
+ // 23 22 21 20
+ // 3 2 1 0
+ // 19 18 17 16 [+32]
+
+ // exchange 0,1,4,5 with 18,19,22,23
+ b1 = block & 0x3300330033003300
+ b2 = block & 0x00cc00cc00cc00cc
+ block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
+
+ // block is the permutation:
+ // 15 14
+ // 13 12
+ // 11 10
+ // 9 8
+ // 7 6
+ // 5 4
+ // 3 2
+ // 1 0 [+16] [+32] [+64]
+
+ // exchange 0,2,4,6 with 9,11,13,15:
+ b1 = block & 0xaaaaaaaa55555555
+ block ^= b1 ^ b1>>33 ^ b1<<33
+
+ // block is the permutation:
+ // 6 14 22 30 38 46 54 62
+ // 4 12 20 28 36 44 52 60
+ // 2 10 18 26 34 42 50 58
+ // 0 8 16 24 32 40 48 56
+ // 7 15 23 31 39 47 55 63
+ // 5 13 21 29 37 45 53 61
+ // 3 11 19 27 35 43 51 59
+ // 1 9 17 25 33 41 49 57
+ return block
+}
+
+// permuteInitialBlock is equivalent to the permutation defined
+// by finalPermutation.
+func permuteFinalBlock(block uint64) uint64 {
+ // Perform the same bit exchanges as permuteInitialBlock
+ // but in reverse order.
+ b1 := block & 0xaaaaaaaa55555555
+ block ^= b1 ^ b1>>33 ^ b1<<33
+
+ b1 = block & 0x3300330033003300
+ b2 := block & 0x00cc00cc00cc00cc
+ block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
+
+ b1 = block & 0x0f0f00000f0f0000
+ b2 = block & 0x0000f0f00000f0f0
+ block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
+
+ b1 = block >> 32 & 0xff00ff
+ b2 = (block & 0xff00ff00)
+ block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24
+
+ b1 = block >> 48
+ b2 = block << 48
+ block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
+ return block
+}
+
+// creates 16 28-bit blocks rotated according
+// to the rotation schedule
+func ksRotate(in uint32) (out []uint32) {
+ out = make([]uint32, 16)
+ last := in
+ for i := 0; i < 16; i++ {
+ // 28-bit circular left shift
+ left := (last << (4 + ksRotations[i])) >> 4
+ right := (last << 4) >> (32 - ksRotations[i])
+ out[i] = left | right
+ last = out[i]
+ }
+ return
+}
+
+// creates 16 56-bit subkeys from the original key
+func (c *desCipher) generateSubkeys(keyBytes []byte) {
+ feistelBoxOnce.Do(initFeistelBox)
+
+ // apply PC1 permutation to key
+ key := binary.BigEndian.Uint64(keyBytes)
+ permutedKey := permuteBlock(key, permutedChoice1[:])
+
+ // rotate halves of permuted key according to the rotation schedule
+ leftRotations := ksRotate(uint32(permutedKey >> 28))
+ rightRotations := ksRotate(uint32(permutedKey<<4) >> 4)
+
+ // generate subkeys
+ for i := 0; i < 16; i++ {
+ // combine halves to form 56-bit input to PC2
+ pc2Input := uint64(leftRotations[i])<<28 | uint64(rightRotations[i])
+ // apply PC2 permutation to 7 byte input
+ c.subkeys[i] = unpack(permuteBlock(pc2Input, permutedChoice2[:]))
+ }
+}
+
+// Expand 48-bit input to 64-bit, with each 6-bit block padded by extra two bits at the top.
+// By doing so, we can have the input blocks (four bits each), and the key blocks (six bits each) well-aligned without
+// extra shifts/rotations for alignments.
+func unpack(x uint64) uint64 {
+ return ((x>>(6*1))&0xff)<<(8*0) |
+ ((x>>(6*3))&0xff)<<(8*1) |
+ ((x>>(6*5))&0xff)<<(8*2) |
+ ((x>>(6*7))&0xff)<<(8*3) |
+ ((x>>(6*0))&0xff)<<(8*4) |
+ ((x>>(6*2))&0xff)<<(8*5) |
+ ((x>>(6*4))&0xff)<<(8*6) |
+ ((x>>(6*6))&0xff)<<(8*7)
+}
diff --git a/contrib/go/_std_1.18/src/crypto/des/cipher.go b/contrib/go/_std_1.19/src/crypto/des/cipher.go
index 9e6779c216..9e6779c216 100644
--- a/contrib/go/_std_1.18/src/crypto/des/cipher.go
+++ b/contrib/go/_std_1.19/src/crypto/des/cipher.go
diff --git a/contrib/go/_std_1.18/src/crypto/des/const.go b/contrib/go/_std_1.19/src/crypto/des/const.go
index a20879d574..a20879d574 100644
--- a/contrib/go/_std_1.18/src/crypto/des/const.go
+++ b/contrib/go/_std_1.19/src/crypto/des/const.go
diff --git a/contrib/go/_std_1.18/src/crypto/dsa/dsa.go b/contrib/go/_std_1.19/src/crypto/dsa/dsa.go
index a83359996d..a83359996d 100644
--- a/contrib/go/_std_1.18/src/crypto/dsa/dsa.go
+++ b/contrib/go/_std_1.19/src/crypto/dsa/dsa.go
diff --git a/contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa.go b/contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa.go
new file mode 100644
index 0000000000..d0e52ad864
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa.go
@@ -0,0 +1,427 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as
+// defined in FIPS 186-4 and SEC 1, Version 2.0.
+//
+// Signatures generated by this package are not deterministic, but entropy is
+// mixed with the private key and the message, achieving the same level of
+// security in case of randomness source failure.
+package ecdsa
+
+// [FIPS 186-4] references ANSI X9.62-2005 for the bulk of the ECDSA algorithm.
+// That standard is not freely available, which is a problem in an open source
+// implementation, because not only the implementer, but also any maintainer,
+// contributor, reviewer, auditor, and learner needs access to it. Instead, this
+// package references and follows the equivalent [SEC 1, Version 2.0].
+//
+// [FIPS 186-4]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
+// [SEC 1, Version 2.0]: https://www.secg.org/sec1-v2.pdf
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/elliptic"
+ "crypto/internal/boring"
+ "crypto/internal/boring/bbig"
+ "crypto/internal/randutil"
+ "crypto/sha512"
+ "errors"
+ "io"
+ "math/big"
+
+ "golang.org/x/crypto/cryptobyte"
+ "golang.org/x/crypto/cryptobyte/asn1"
+)
+
+// A invertible implements fast inverse in GF(N).
+type invertible interface {
+ // Inverse returns the inverse of k mod Params().N.
+ Inverse(k *big.Int) *big.Int
+}
+
+// A combinedMult implements fast combined multiplication for verification.
+type combinedMult interface {
+ // CombinedMult returns [s1]G + [s2]P where G is the generator.
+ CombinedMult(Px, Py *big.Int, s1, s2 []byte) (x, y *big.Int)
+}
+
+const (
+ aesIV = "IV for ECDSA CTR"
+)
+
+// PublicKey represents an ECDSA public key.
+type PublicKey struct {
+ elliptic.Curve
+ X, Y *big.Int
+}
+
+// Any methods implemented on PublicKey might need to also be implemented on
+// PrivateKey, as the latter embeds the former and will expose its methods.
+
+// Equal reports whether pub and x have the same value.
+//
+// Two keys are only considered to have the same value if they have the same Curve value.
+// Note that for example elliptic.P256() and elliptic.P256().Params() are different
+// values, as the latter is a generic not constant time implementation.
+func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
+ xx, ok := x.(*PublicKey)
+ if !ok {
+ return false
+ }
+ return pub.X.Cmp(xx.X) == 0 && pub.Y.Cmp(xx.Y) == 0 &&
+ // Standard library Curve implementations are singletons, so this check
+ // will work for those. Other Curves might be equivalent even if not
+ // singletons, but there is no definitive way to check for that, and
+ // better to err on the side of safety.
+ pub.Curve == xx.Curve
+}
+
+// PrivateKey represents an ECDSA private key.
+type PrivateKey struct {
+ PublicKey
+ D *big.Int
+}
+
+// Public returns the public key corresponding to priv.
+func (priv *PrivateKey) Public() crypto.PublicKey {
+ return &priv.PublicKey
+}
+
+// Equal reports whether priv and x have the same value.
+//
+// See PublicKey.Equal for details on how Curve is compared.
+func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
+ xx, ok := x.(*PrivateKey)
+ if !ok {
+ return false
+ }
+ return priv.PublicKey.Equal(&xx.PublicKey) && priv.D.Cmp(xx.D) == 0
+}
+
+// Sign signs digest with priv, reading randomness from rand. The opts argument
+// is not currently used but, in keeping with the crypto.Signer interface,
+// should be the hash function used to digest the message.
+//
+// This method implements crypto.Signer, which is an interface to support keys
+// where the private part is kept in, for example, a hardware module. Common
+// uses can use the SignASN1 function in this package directly.
+func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ if boring.Enabled && rand == boring.RandReader {
+ b, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return boring.SignMarshalECDSA(b, digest)
+ }
+ boring.UnreachableExceptTests()
+
+ r, s, err := Sign(rand, priv, digest)
+ if err != nil {
+ return nil, err
+ }
+
+ var b cryptobyte.Builder
+ b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1BigInt(r)
+ b.AddASN1BigInt(s)
+ })
+ return b.Bytes()
+}
+
+var one = new(big.Int).SetInt64(1)
+
+// randFieldElement returns a random element of the order of the given
+// curve using the procedure given in FIPS 186-4, Appendix B.5.1.
+func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
+ params := c.Params()
+ // Note that for P-521 this will actually be 63 bits more than the order, as
+ // division rounds down, but the extra bit is inconsequential.
+ b := make([]byte, params.N.BitLen()/8+8)
+ _, err = io.ReadFull(rand, b)
+ if err != nil {
+ return
+ }
+
+ k = new(big.Int).SetBytes(b)
+ n := new(big.Int).Sub(params.N, one)
+ k.Mod(k, n)
+ k.Add(k, one)
+ return
+}
+
+// GenerateKey generates a public and private key pair.
+func GenerateKey(c elliptic.Curve, rand io.Reader) (*PrivateKey, error) {
+ if boring.Enabled && rand == boring.RandReader {
+ x, y, d, err := boring.GenerateKeyECDSA(c.Params().Name)
+ if err != nil {
+ return nil, err
+ }
+ return &PrivateKey{PublicKey: PublicKey{Curve: c, X: bbig.Dec(x), Y: bbig.Dec(y)}, D: bbig.Dec(d)}, nil
+ }
+ boring.UnreachableExceptTests()
+
+ k, err := randFieldElement(c, rand)
+ if err != nil {
+ return nil, err
+ }
+
+ priv := new(PrivateKey)
+ priv.PublicKey.Curve = c
+ priv.D = k
+ priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
+ return priv, nil
+}
+
+// hashToInt converts a hash value to an integer. Per FIPS 186-4, Section 6.4,
+// we use the left-most bits of the hash to match the bit-length of the order of
+// the curve. This also performs Step 5 of SEC 1, Version 2.0, Section 4.1.3.
+func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
+ orderBits := c.Params().N.BitLen()
+ orderBytes := (orderBits + 7) / 8
+ if len(hash) > orderBytes {
+ hash = hash[:orderBytes]
+ }
+
+ ret := new(big.Int).SetBytes(hash)
+ excess := len(hash)*8 - orderBits
+ if excess > 0 {
+ ret.Rsh(ret, uint(excess))
+ }
+ return ret
+}
+
+// fermatInverse calculates the inverse of k in GF(P) using Fermat's method
+// (exponentiation modulo P - 2, per Euler's theorem). This has better
+// constant-time properties than Euclid's method (implemented in
+// math/big.Int.ModInverse and FIPS 186-4, Appendix C.1) although math/big
+// itself isn't strictly constant-time so it's not perfect.
+func fermatInverse(k, N *big.Int) *big.Int {
+ two := big.NewInt(2)
+ nMinus2 := new(big.Int).Sub(N, two)
+ return new(big.Int).Exp(k, nMinus2, N)
+}
+
+var errZeroParam = errors.New("zero parameter")
+
+// Sign signs a hash (which should be the result of hashing a larger message)
+// using the private key, priv. If the hash is longer than the bit-length of the
+// private key's curve order, the hash will be truncated to that length. It
+// returns the signature as a pair of integers. Most applications should use
+// SignASN1 instead of dealing directly with r, s.
+func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
+ randutil.MaybeReadByte(rand)
+
+ if boring.Enabled && rand == boring.RandReader {
+ b, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, nil, err
+ }
+ sig, err := boring.SignMarshalECDSA(b, hash)
+ if err != nil {
+ return nil, nil, err
+ }
+ var r, s big.Int
+ var inner cryptobyte.String
+ input := cryptobyte.String(sig)
+ if !input.ReadASN1(&inner, asn1.SEQUENCE) ||
+ !input.Empty() ||
+ !inner.ReadASN1Integer(&r) ||
+ !inner.ReadASN1Integer(&s) ||
+ !inner.Empty() {
+ return nil, nil, errors.New("invalid ASN.1 from boringcrypto")
+ }
+ return &r, &s, nil
+ }
+ boring.UnreachableExceptTests()
+
+ // This implementation derives the nonce from an AES-CTR CSPRNG keyed by:
+ //
+ // SHA2-512(priv.D || entropy || hash)[:32]
+ //
+ // The CSPRNG key is indifferentiable from a random oracle as shown in
+ // [Coron], the AES-CTR stream is indifferentiable from a random oracle
+ // under standard cryptographic assumptions (see [Larsson] for examples).
+ //
+ // [Coron]: https://cs.nyu.edu/~dodis/ps/merkle.pdf
+ // [Larsson]: https://web.archive.org/web/20040719170906/https://www.nada.kth.se/kurser/kth/2D1441/semteo03/lecturenotes/assump.pdf
+
+ // Get 256 bits of entropy from rand.
+ entropy := make([]byte, 32)
+ _, err = io.ReadFull(rand, entropy)
+ if err != nil {
+ return
+ }
+
+ // Initialize an SHA-512 hash context; digest...
+ md := sha512.New()
+ md.Write(priv.D.Bytes()) // the private key,
+ md.Write(entropy) // the entropy,
+ md.Write(hash) // and the input hash;
+ key := md.Sum(nil)[:32] // and compute ChopMD-256(SHA-512),
+ // which is an indifferentiable MAC.
+
+ // Create an AES-CTR instance to use as a CSPRNG.
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Create a CSPRNG that xors a stream of zeros with
+ // the output of the AES-CTR instance.
+ csprng := &cipher.StreamReader{
+ R: zeroReader,
+ S: cipher.NewCTR(block, []byte(aesIV)),
+ }
+
+ c := priv.PublicKey.Curve
+ return sign(priv, csprng, c, hash)
+}
+
+func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash []byte) (r, s *big.Int, err error) {
+ // SEC 1, Version 2.0, Section 4.1.3
+ N := c.Params().N
+ if N.Sign() == 0 {
+ return nil, nil, errZeroParam
+ }
+ var k, kInv *big.Int
+ for {
+ for {
+ k, err = randFieldElement(c, *csprng)
+ if err != nil {
+ r = nil
+ return
+ }
+
+ if in, ok := priv.Curve.(invertible); ok {
+ kInv = in.Inverse(k)
+ } else {
+ kInv = fermatInverse(k, N) // N != 0
+ }
+
+ r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
+ r.Mod(r, N)
+ if r.Sign() != 0 {
+ break
+ }
+ }
+
+ e := hashToInt(hash, c)
+ s = new(big.Int).Mul(priv.D, r)
+ s.Add(s, e)
+ s.Mul(s, kInv)
+ s.Mod(s, N) // N != 0
+ if s.Sign() != 0 {
+ break
+ }
+ }
+
+ return
+}
+
+// SignASN1 signs a hash (which should be the result of hashing a larger message)
+// using the private key, priv. If the hash is longer than the bit-length of the
+// private key's curve order, the hash will be truncated to that length. It
+// returns the ASN.1 encoded signature.
+func SignASN1(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error) {
+ return priv.Sign(rand, hash, nil)
+}
+
+// Verify verifies the signature in r, s of hash using the public key, pub. Its
+// return value records whether the signature is valid. Most applications should
+// use VerifyASN1 instead of dealing directly with r, s.
+func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
+ if boring.Enabled {
+ key, err := boringPublicKey(pub)
+ if err != nil {
+ return false
+ }
+ var b cryptobyte.Builder
+ b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1BigInt(r)
+ b.AddASN1BigInt(s)
+ })
+ sig, err := b.Bytes()
+ if err != nil {
+ return false
+ }
+ return boring.VerifyECDSA(key, hash, sig)
+ }
+ boring.UnreachableExceptTests()
+
+ c := pub.Curve
+ N := c.Params().N
+
+ if r.Sign() <= 0 || s.Sign() <= 0 {
+ return false
+ }
+ if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
+ return false
+ }
+ return verify(pub, c, hash, r, s)
+}
+
+func verifyGeneric(pub *PublicKey, c elliptic.Curve, hash []byte, r, s *big.Int) bool {
+ // SEC 1, Version 2.0, Section 4.1.4
+ e := hashToInt(hash, c)
+ var w *big.Int
+ N := c.Params().N
+ if in, ok := c.(invertible); ok {
+ w = in.Inverse(s)
+ } else {
+ w = new(big.Int).ModInverse(s, N)
+ }
+
+ u1 := e.Mul(e, w)
+ u1.Mod(u1, N)
+ u2 := w.Mul(r, w)
+ u2.Mod(u2, N)
+
+ // Check if implements S1*g + S2*p
+ var x, y *big.Int
+ if opt, ok := c.(combinedMult); ok {
+ x, y = opt.CombinedMult(pub.X, pub.Y, u1.Bytes(), u2.Bytes())
+ } else {
+ x1, y1 := c.ScalarBaseMult(u1.Bytes())
+ x2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes())
+ x, y = c.Add(x1, y1, x2, y2)
+ }
+
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return false
+ }
+ x.Mod(x, N)
+ return x.Cmp(r) == 0
+}
+
+// VerifyASN1 verifies the ASN.1 encoded signature, sig, of hash using the
+// public key, pub. Its return value records whether the signature is valid.
+func VerifyASN1(pub *PublicKey, hash, sig []byte) bool {
+ var (
+ r, s = &big.Int{}, &big.Int{}
+ inner cryptobyte.String
+ )
+ input := cryptobyte.String(sig)
+ if !input.ReadASN1(&inner, asn1.SEQUENCE) ||
+ !input.Empty() ||
+ !inner.ReadASN1Integer(r) ||
+ !inner.ReadASN1Integer(s) ||
+ !inner.Empty() {
+ return false
+ }
+ return Verify(pub, hash, r, s)
+}
+
+type zr struct{}
+
+// Read replaces the contents of dst with zeros. It is safe for concurrent use.
+func (zr) Read(dst []byte) (n int, err error) {
+ for i := range dst {
+ dst[i] = 0
+ }
+ return len(dst), nil
+}
+
+var zeroReader = zr{}
diff --git a/contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa_noasm.go b/contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa_noasm.go
index 7fbca10b56..7fbca10b56 100644
--- a/contrib/go/_std_1.18/src/crypto/ecdsa/ecdsa_noasm.go
+++ b/contrib/go/_std_1.19/src/crypto/ecdsa/ecdsa_noasm.go
diff --git a/contrib/go/_std_1.19/src/crypto/ecdsa/notboring.go b/contrib/go/_std_1.19/src/crypto/ecdsa/notboring.go
new file mode 100644
index 0000000000..039bd82ed2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/ecdsa/notboring.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !boringcrypto
+
+package ecdsa
+
+import "crypto/internal/boring"
+
+func boringPublicKey(*PublicKey) (*boring.PublicKeyECDSA, error) {
+ panic("boringcrypto: not available")
+}
+func boringPrivateKey(*PrivateKey) (*boring.PrivateKeyECDSA, error) {
+ panic("boringcrypto: not available")
+}
diff --git a/contrib/go/_std_1.19/src/crypto/ed25519/ed25519.go b/contrib/go/_std_1.19/src/crypto/ed25519/ed25519.go
new file mode 100644
index 0000000000..d43dd12d08
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/ed25519/ed25519.go
@@ -0,0 +1,230 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ed25519 implements the Ed25519 signature algorithm. See
+// https://ed25519.cr.yp.to/.
+//
+// These functions are also compatible with the “Ed25519” function defined in
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seed”.
+package ed25519
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/internal/edwards25519"
+ cryptorand "crypto/rand"
+ "crypto/sha512"
+ "errors"
+ "io"
+ "strconv"
+)
+
+const (
+ // PublicKeySize is the size, in bytes, of public keys as used in this package.
+ PublicKeySize = 32
+ // PrivateKeySize is the size, in bytes, of private keys as used in this package.
+ PrivateKeySize = 64
+ // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
+ SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
+)
+
+// PublicKey is the type of Ed25519 public keys.
+type PublicKey []byte
+
+// Any methods implemented on PublicKey might need to also be implemented on
+// PrivateKey, as the latter embeds the former and will expose its methods.
+
+// Equal reports whether pub and x have the same value.
+func (pub PublicKey) Equal(x crypto.PublicKey) bool {
+ xx, ok := x.(PublicKey)
+ if !ok {
+ return false
+ }
+ return bytes.Equal(pub, xx)
+}
+
+// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
+type PrivateKey []byte
+
+// Public returns the PublicKey corresponding to priv.
+func (priv PrivateKey) Public() crypto.PublicKey {
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, priv[32:])
+ return PublicKey(publicKey)
+}
+
+// Equal reports whether priv and x have the same value.
+func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
+ xx, ok := x.(PrivateKey)
+ if !ok {
+ return false
+ }
+ return bytes.Equal(priv, xx)
+}
+
+// Seed returns the private key seed corresponding to priv. It is provided for
+// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
+// in this package.
+func (priv PrivateKey) Seed() []byte {
+ seed := make([]byte, SeedSize)
+ copy(seed, priv[:32])
+ return seed
+}
+
+// Sign signs the given message with priv.
+// Ed25519 performs two passes over messages to be signed and therefore cannot
+// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
+// indicate the message hasn't been hashed. This can be achieved by passing
+// crypto.Hash(0) as the value for opts.
+func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
+ if opts.HashFunc() != crypto.Hash(0) {
+ return nil, errors.New("ed25519: cannot sign hashed message")
+ }
+
+ return Sign(priv, message), nil
+}
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
+ if rand == nil {
+ rand = cryptorand.Reader
+ }
+
+ seed := make([]byte, SeedSize)
+ if _, err := io.ReadFull(rand, seed); err != nil {
+ return nil, nil, err
+ }
+
+ privateKey := NewKeyFromSeed(seed)
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, privateKey[32:])
+
+ return publicKey, privateKey, nil
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ // Outline the function body so that the returned key can be stack-allocated.
+ privateKey := make([]byte, PrivateKeySize)
+ newKeyFromSeed(privateKey, seed)
+ return privateKey
+}
+
+func newKeyFromSeed(privateKey, seed []byte) {
+ if l := len(seed); l != SeedSize {
+ panic("ed25519: bad seed length: " + strconv.Itoa(l))
+ }
+
+ h := sha512.Sum512(seed)
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
+ A := (&edwards25519.Point{}).ScalarBaseMult(s)
+
+ publicKey := A.Bytes()
+
+ copy(privateKey, seed)
+ copy(privateKey[32:], publicKey)
+}
+
+// Sign signs the message with privateKey and returns a signature. It will
+// panic if len(privateKey) is not PrivateKeySize.
+func Sign(privateKey PrivateKey, message []byte) []byte {
+ // Outline the function body so that the returned signature can be
+ // stack-allocated.
+ signature := make([]byte, SignatureSize)
+ sign(signature, privateKey, message)
+ return signature
+}
+
+func sign(signature, privateKey, message []byte) {
+ if l := len(privateKey); l != PrivateKeySize {
+ panic("ed25519: bad private key length: " + strconv.Itoa(l))
+ }
+ seed, publicKey := privateKey[:SeedSize], privateKey[SeedSize:]
+
+ h := sha512.Sum512(seed)
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
+ prefix := h[32:]
+
+ mh := sha512.New()
+ mh.Write(prefix)
+ mh.Write(message)
+ messageDigest := make([]byte, 0, sha512.Size)
+ messageDigest = mh.Sum(messageDigest)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
+
+ R := (&edwards25519.Point{}).ScalarBaseMult(r)
+
+ kh := sha512.New()
+ kh.Write(R.Bytes())
+ kh.Write(publicKey)
+ kh.Write(message)
+ hramDigest := make([]byte, 0, sha512.Size)
+ hramDigest = kh.Sum(hramDigest)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
+
+ S := edwards25519.NewScalar().MultiplyAdd(k, s, r)
+
+ copy(signature[:32], R.Bytes())
+ copy(signature[32:], S.Bytes())
+}
+
+// Verify reports whether sig is a valid signature of message by publicKey. It
+// will panic if len(publicKey) is not PublicKeySize.
+func Verify(publicKey PublicKey, message, sig []byte) bool {
+ if l := len(publicKey); l != PublicKeySize {
+ panic("ed25519: bad public key length: " + strconv.Itoa(l))
+ }
+
+ if len(sig) != SignatureSize || sig[63]&224 != 0 {
+ return false
+ }
+
+ A, err := (&edwards25519.Point{}).SetBytes(publicKey)
+ if err != nil {
+ return false
+ }
+
+ kh := sha512.New()
+ kh.Write(sig[:32])
+ kh.Write(publicKey)
+ kh.Write(message)
+ hramDigest := make([]byte, 0, sha512.Size)
+ hramDigest = kh.Sum(hramDigest)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ panic("ed25519: internal error: setting scalar failed")
+ }
+
+ S, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:])
+ if err != nil {
+ return false
+ }
+
+ // [S]B = R + [k]A --> [k](-A) + [S]B = R
+ minusA := (&edwards25519.Point{}).Negate(A)
+ R := (&edwards25519.Point{}).VarTimeDoubleScalarBaseMult(k, minusA, S)
+
+ return bytes.Equal(sig[:32], R.Bytes())
+}
diff --git a/contrib/go/_std_1.19/src/crypto/elliptic/elliptic.go b/contrib/go/_std_1.19/src/crypto/elliptic/elliptic.go
new file mode 100644
index 0000000000..8c0b60b889
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/elliptic/elliptic.go
@@ -0,0 +1,242 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package elliptic implements the standard NIST P-224, P-256, P-384, and P-521
+// elliptic curves over prime fields.
+package elliptic
+
+import (
+ "io"
+ "math/big"
+ "sync"
+)
+
+// A Curve represents a short-form Weierstrass curve with a=-3.
+//
+// The behavior of Add, Double, and ScalarMult when the input is not a point on
+// the curve is undefined.
+//
+// Note that the conventional point at infinity (0, 0) is not considered on the
+// curve, although it can be returned by Add, Double, ScalarMult, or
+// ScalarBaseMult (but not the Unmarshal or UnmarshalCompressed functions).
+type Curve interface {
+ // Params returns the parameters for the curve.
+ Params() *CurveParams
+ // IsOnCurve reports whether the given (x,y) lies on the curve.
+ IsOnCurve(x, y *big.Int) bool
+ // Add returns the sum of (x1,y1) and (x2,y2)
+ Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int)
+ // Double returns 2*(x,y)
+ Double(x1, y1 *big.Int) (x, y *big.Int)
+ // ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
+ ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int)
+ // ScalarBaseMult returns k*G, where G is the base point of the group
+ // and k is an integer in big-endian form.
+ ScalarBaseMult(k []byte) (x, y *big.Int)
+}
+
+var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
+
+// GenerateKey returns a public/private key pair. The private key is
+// generated using the given reader, which must return random data.
+func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
+ N := curve.Params().N
+ bitSize := N.BitLen()
+ byteLen := (bitSize + 7) / 8
+ priv = make([]byte, byteLen)
+
+ for x == nil {
+ _, err = io.ReadFull(rand, priv)
+ if err != nil {
+ return
+ }
+ // We have to mask off any excess bits in the case that the size of the
+ // underlying field is not a whole number of bytes.
+ priv[0] &= mask[bitSize%8]
+ // This is because, in tests, rand will return all zeros and we don't
+ // want to get the point at infinity and loop forever.
+ priv[1] ^= 0x42
+
+ // If the scalar is out of range, sample another random number.
+ if new(big.Int).SetBytes(priv).Cmp(N) >= 0 {
+ continue
+ }
+
+ x, y = curve.ScalarBaseMult(priv)
+ }
+ return
+}
+
+// Marshal converts a point on the curve into the uncompressed form specified in
+// SEC 1, Version 2.0, Section 2.3.3. If the point is not on the curve (or is
+// the conventional point at infinity), the behavior is undefined.
+func Marshal(curve Curve, x, y *big.Int) []byte {
+ panicIfNotOnCurve(curve, x, y)
+
+ byteLen := (curve.Params().BitSize + 7) / 8
+
+ ret := make([]byte, 1+2*byteLen)
+ ret[0] = 4 // uncompressed point
+
+ x.FillBytes(ret[1 : 1+byteLen])
+ y.FillBytes(ret[1+byteLen : 1+2*byteLen])
+
+ return ret
+}
+
+// MarshalCompressed converts a point on the curve into the compressed form
+// specified in SEC 1, Version 2.0, Section 2.3.3. If the point is not on the
+// curve (or is the conventional point at infinity), the behavior is undefined.
+func MarshalCompressed(curve Curve, x, y *big.Int) []byte {
+ panicIfNotOnCurve(curve, x, y)
+ byteLen := (curve.Params().BitSize + 7) / 8
+ compressed := make([]byte, 1+byteLen)
+ compressed[0] = byte(y.Bit(0)) | 2
+ x.FillBytes(compressed[1:])
+ return compressed
+}
+
+// unmarshaler is implemented by curves with their own constant-time Unmarshal.
+//
+// There isn't an equivalent interface for Marshal/MarshalCompressed because
+// that doesn't involve any mathematical operations, only FillBytes and Bit.
+type unmarshaler interface {
+ Unmarshal([]byte) (x, y *big.Int)
+ UnmarshalCompressed([]byte) (x, y *big.Int)
+}
+
+// Assert that the known curves implement unmarshaler.
+var _ = []unmarshaler{p224, p256, p384, p521}
+
+// Unmarshal converts a point, serialized by Marshal, into an x, y pair. It is
+// an error if the point is not in uncompressed form, is not on the curve, or is
+// the point at infinity. On error, x = nil.
+func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
+ if c, ok := curve.(unmarshaler); ok {
+ return c.Unmarshal(data)
+ }
+
+ byteLen := (curve.Params().BitSize + 7) / 8
+ if len(data) != 1+2*byteLen {
+ return nil, nil
+ }
+ if data[0] != 4 { // uncompressed form
+ return nil, nil
+ }
+ p := curve.Params().P
+ x = new(big.Int).SetBytes(data[1 : 1+byteLen])
+ y = new(big.Int).SetBytes(data[1+byteLen:])
+ if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 {
+ return nil, nil
+ }
+ if !curve.IsOnCurve(x, y) {
+ return nil, nil
+ }
+ return
+}
+
+// UnmarshalCompressed converts a point, serialized by MarshalCompressed, into
+// an x, y pair. It is an error if the point is not in compressed form, is not
+// on the curve, or is the point at infinity. On error, x = nil.
+func UnmarshalCompressed(curve Curve, data []byte) (x, y *big.Int) {
+ if c, ok := curve.(unmarshaler); ok {
+ return c.UnmarshalCompressed(data)
+ }
+
+ byteLen := (curve.Params().BitSize + 7) / 8
+ if len(data) != 1+byteLen {
+ return nil, nil
+ }
+ if data[0] != 2 && data[0] != 3 { // compressed form
+ return nil, nil
+ }
+ p := curve.Params().P
+ x = new(big.Int).SetBytes(data[1:])
+ if x.Cmp(p) >= 0 {
+ return nil, nil
+ }
+ // y² = x³ - 3x + b
+ y = curve.Params().polynomial(x)
+ y = y.ModSqrt(y, p)
+ if y == nil {
+ return nil, nil
+ }
+ if byte(y.Bit(0)) != data[0]&1 {
+ y.Neg(y).Mod(y, p)
+ }
+ if !curve.IsOnCurve(x, y) {
+ return nil, nil
+ }
+ return
+}
+
+func panicIfNotOnCurve(curve Curve, x, y *big.Int) {
+ // (0, 0) is the point at infinity by convention. It's ok to operate on it,
+ // although IsOnCurve is documented to return false for it. See Issue 37294.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return
+ }
+
+ if !curve.IsOnCurve(x, y) {
+ panic("crypto/elliptic: attempted operation on invalid point")
+ }
+}
+
+var initonce sync.Once
+
+func initAll() {
+ initP224()
+ initP256()
+ initP384()
+ initP521()
+}
+
+// P224 returns a Curve which implements NIST P-224 (FIPS 186-3, section D.2.2),
+// also known as secp224r1. The CurveParams.Name of this Curve is "P-224".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P224() Curve {
+ initonce.Do(initAll)
+ return p224
+}
+
+// P256 returns a Curve which implements NIST P-256 (FIPS 186-3, section D.2.3),
+// also known as secp256r1 or prime256v1. The CurveParams.Name of this Curve is
+// "P-256".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P256() Curve {
+ initonce.Do(initAll)
+ return p256
+}
+
+// P384 returns a Curve which implements NIST P-384 (FIPS 186-3, section D.2.4),
+// also known as secp384r1. The CurveParams.Name of this Curve is "P-384".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P384() Curve {
+ initonce.Do(initAll)
+ return p384
+}
+
+// P521 returns a Curve which implements NIST P-521 (FIPS 186-3, section D.2.5),
+// also known as secp521r1. The CurveParams.Name of this Curve is "P-521".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P521() Curve {
+ initonce.Do(initAll)
+ return p521
+}
diff --git a/contrib/go/_std_1.19/src/crypto/elliptic/nistec.go b/contrib/go/_std_1.19/src/crypto/elliptic/nistec.go
new file mode 100644
index 0000000000..9bb46008b5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/elliptic/nistec.go
@@ -0,0 +1,295 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "crypto/internal/nistec"
+ "errors"
+ "math/big"
+)
+
+var p224 = &nistCurve[*nistec.P224Point]{
+ newPoint: nistec.NewP224Point,
+}
+
+func initP224() {
+ p224.params = &CurveParams{
+ Name: "P-224",
+ BitSize: 224,
+ // FIPS 186-4, section D.1.2.2
+ P: bigFromDecimal("26959946667150639794667015087019630673557916260026308143510066298881"),
+ N: bigFromDecimal("26959946667150639794667015087019625940457807714424391721682722368061"),
+ B: bigFromHex("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4"),
+ Gx: bigFromHex("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21"),
+ Gy: bigFromHex("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34"),
+ }
+}
+
+type p256Curve struct {
+ nistCurve[*nistec.P256Point]
+}
+
+var p256 = &p256Curve{nistCurve[*nistec.P256Point]{
+ newPoint: nistec.NewP256Point,
+}}
+
+func initP256() {
+ p256.params = &CurveParams{
+ Name: "P-256",
+ BitSize: 256,
+ // FIPS 186-4, section D.1.2.3
+ P: bigFromDecimal("115792089210356248762697446949407573530086143415290314195533631308867097853951"),
+ N: bigFromDecimal("115792089210356248762697446949407573529996955224135760342422259061068512044369"),
+ B: bigFromHex("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b"),
+ Gx: bigFromHex("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296"),
+ Gy: bigFromHex("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5"),
+ }
+}
+
+var p384 = &nistCurve[*nistec.P384Point]{
+ newPoint: nistec.NewP384Point,
+}
+
+func initP384() {
+ p384.params = &CurveParams{
+ Name: "P-384",
+ BitSize: 384,
+ // FIPS 186-4, section D.1.2.4
+ P: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
+ "46667948293404245721771496870329047266088258938001861606973112319"),
+ N: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
+ "46667946905279627659399113263569398956308152294913554433653942643"),
+ B: bigFromHex("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088" +
+ "f5013875ac656398d8a2ed19d2a85c8edd3ec2aef"),
+ Gx: bigFromHex("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741" +
+ "e082542a385502f25dbf55296c3a545e3872760ab7"),
+ Gy: bigFromHex("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da31" +
+ "13b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f"),
+ }
+}
+
+var p521 = &nistCurve[*nistec.P521Point]{
+ newPoint: nistec.NewP521Point,
+}
+
+func initP521() {
+ p521.params = &CurveParams{
+ Name: "P-521",
+ BitSize: 521,
+ // FIPS 186-4, section D.1.2.5
+ P: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
+ "0540939446345918554318339765605212255964066145455497729631139148" +
+ "0858037121987999716643812574028291115057151"),
+ N: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
+ "0540939446345918554318339765539424505774633321719753296399637136" +
+ "3321113864768612440380340372808892707005449"),
+ B: bigFromHex("0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8" +
+ "b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef" +
+ "451fd46b503f00"),
+ Gx: bigFromHex("00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f8" +
+ "28af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf9" +
+ "7e7e31c2e5bd66"),
+ Gy: bigFromHex("011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817" +
+ "afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088" +
+ "be94769fd16650"),
+ }
+}
+
+// nistCurve is a Curve implementation based on a nistec Point.
+//
+// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
+// legacy idiosyncrasies it requires, such as invalid and infinity point
+// handling.
+//
+// To interact with the nistec package, points are encoded into and decoded from
+// properly formatted byte slices. All big.Int use is limited to this package.
+// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
+// so the overhead is acceptable.
+type nistCurve[Point nistPoint[Point]] struct {
+ newPoint func() Point
+ params *CurveParams
+}
+
+// nistPoint is a generic constraint for the nistec Point types.
+type nistPoint[T any] interface {
+ Bytes() []byte
+ SetBytes([]byte) (T, error)
+ Add(T, T) T
+ Double(T) T
+ ScalarMult(T, []byte) (T, error)
+ ScalarBaseMult([]byte) (T, error)
+}
+
+func (curve *nistCurve[Point]) Params() *CurveParams {
+ return curve.params
+}
+
+func (curve *nistCurve[Point]) IsOnCurve(x, y *big.Int) bool {
+ // IsOnCurve is documented to reject (0, 0), the conventional point at
+ // infinity, which however is accepted by pointFromAffine.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return false
+ }
+ _, err := curve.pointFromAffine(x, y)
+ return err == nil
+}
+
+func (curve *nistCurve[Point]) pointFromAffine(x, y *big.Int) (p Point, err error) {
+ p = curve.newPoint()
+ // (0, 0) is by convention the point at infinity, which can't be represented
+ // in affine coordinates. See Issue 37294.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return p, nil
+ }
+ // Reject values that would not get correctly encoded.
+ if x.Sign() < 0 || y.Sign() < 0 {
+ return p, errors.New("negative coordinate")
+ }
+ if x.BitLen() > curve.params.BitSize || y.BitLen() > curve.params.BitSize {
+ return p, errors.New("overflowing coordinate")
+ }
+ // Encode the coordinates and let SetBytes reject invalid points.
+ byteLen := (curve.params.BitSize + 7) / 8
+ buf := make([]byte, 1+2*byteLen)
+ buf[0] = 4 // uncompressed point
+ x.FillBytes(buf[1 : 1+byteLen])
+ y.FillBytes(buf[1+byteLen : 1+2*byteLen])
+ return p.SetBytes(buf)
+}
+
+func (curve *nistCurve[Point]) pointToAffine(p Point) (x, y *big.Int) {
+ out := p.Bytes()
+ if len(out) == 1 && out[0] == 0 {
+ // This is the encoding of the point at infinity, which the affine
+ // coordinates API represents as (0, 0) by convention.
+ return new(big.Int), new(big.Int)
+ }
+ byteLen := (curve.params.BitSize + 7) / 8
+ x = new(big.Int).SetBytes(out[1 : 1+byteLen])
+ y = new(big.Int).SetBytes(out[1+byteLen:])
+ return x, y
+}
+
+func (curve *nistCurve[Point]) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+ p1, err := curve.pointFromAffine(x1, y1)
+ if err != nil {
+ panic("crypto/elliptic: Add was called on an invalid point")
+ }
+ p2, err := curve.pointFromAffine(x2, y2)
+ if err != nil {
+ panic("crypto/elliptic: Add was called on an invalid point")
+ }
+ return curve.pointToAffine(p1.Add(p1, p2))
+}
+
+func (curve *nistCurve[Point]) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+ p, err := curve.pointFromAffine(x1, y1)
+ if err != nil {
+ panic("crypto/elliptic: Double was called on an invalid point")
+ }
+ return curve.pointToAffine(p.Double(p))
+}
+
+// normalizeScalar brings the scalar within the byte size of the order of the
+// curve, as expected by the nistec scalar multiplication functions.
+func (curve *nistCurve[Point]) normalizeScalar(scalar []byte) []byte {
+ byteSize := (curve.params.N.BitLen() + 7) / 8
+ if len(scalar) == byteSize {
+ return scalar
+ }
+ s := new(big.Int).SetBytes(scalar)
+ if len(scalar) > byteSize {
+ s.Mod(s, curve.params.N)
+ }
+ out := make([]byte, byteSize)
+ return s.FillBytes(out)
+}
+
+func (curve *nistCurve[Point]) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
+ p, err := curve.pointFromAffine(Bx, By)
+ if err != nil {
+ panic("crypto/elliptic: ScalarMult was called on an invalid point")
+ }
+ scalar = curve.normalizeScalar(scalar)
+ p, err = p.ScalarMult(p, scalar)
+ if err != nil {
+ panic("crypto/elliptic: nistec rejected normalized scalar")
+ }
+ return curve.pointToAffine(p)
+}
+
+func (curve *nistCurve[Point]) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
+ scalar = curve.normalizeScalar(scalar)
+ p, err := curve.newPoint().ScalarBaseMult(scalar)
+ if err != nil {
+ panic("crypto/elliptic: nistec rejected normalized scalar")
+ }
+ return curve.pointToAffine(p)
+}
+
+// CombinedMult returns [s1]G + [s2]P where G is the generator. It's used
+// through an interface upgrade in crypto/ecdsa.
+func (curve *nistCurve[Point]) CombinedMult(Px, Py *big.Int, s1, s2 []byte) (x, y *big.Int) {
+ s1 = curve.normalizeScalar(s1)
+ q, err := curve.newPoint().ScalarBaseMult(s1)
+ if err != nil {
+ panic("crypto/elliptic: nistec rejected normalized scalar")
+ }
+ p, err := curve.pointFromAffine(Px, Py)
+ if err != nil {
+ panic("crypto/elliptic: CombinedMult was called on an invalid point")
+ }
+ s2 = curve.normalizeScalar(s2)
+ p, err = p.ScalarMult(p, s2)
+ if err != nil {
+ panic("crypto/elliptic: nistec rejected normalized scalar")
+ }
+ return curve.pointToAffine(p.Add(p, q))
+}
+
+func (curve *nistCurve[Point]) Unmarshal(data []byte) (x, y *big.Int) {
+ if len(data) == 0 || data[0] != 4 {
+ return nil, nil
+ }
+ // Use SetBytes to check that data encodes a valid point.
+ _, err := curve.newPoint().SetBytes(data)
+ if err != nil {
+ return nil, nil
+ }
+ // We don't use pointToAffine because it involves an expensive field
+ // inversion to convert from Jacobian to affine coordinates, which we
+ // already have.
+ byteLen := (curve.params.BitSize + 7) / 8
+ x = new(big.Int).SetBytes(data[1 : 1+byteLen])
+ y = new(big.Int).SetBytes(data[1+byteLen:])
+ return x, y
+}
+
+func (curve *nistCurve[Point]) UnmarshalCompressed(data []byte) (x, y *big.Int) {
+ if len(data) == 0 || (data[0] != 2 && data[0] != 3) {
+ return nil, nil
+ }
+ p, err := curve.newPoint().SetBytes(data)
+ if err != nil {
+ return nil, nil
+ }
+ return curve.pointToAffine(p)
+}
+
+func bigFromDecimal(s string) *big.Int {
+ b, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("crypto/elliptic: internal error: invalid encoding")
+ }
+ return b
+}
+
+func bigFromHex(s string) *big.Int {
+ b, ok := new(big.Int).SetString(s, 16)
+ if !ok {
+ panic("crypto/elliptic: internal error: invalid encoding")
+ }
+ return b
+}
diff --git a/contrib/go/_std_1.19/src/crypto/elliptic/nistec_p256.go b/contrib/go/_std_1.19/src/crypto/elliptic/nistec_p256.go
new file mode 100644
index 0000000000..304f8f2659
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/elliptic/nistec_p256.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package elliptic
+
+import (
+ "crypto/internal/nistec"
+ "math/big"
+)
+
+func (c p256Curve) Inverse(k *big.Int) *big.Int {
+ if k.Sign() < 0 {
+ // This should never happen.
+ k = new(big.Int).Neg(k)
+ }
+ if k.Cmp(c.params.N) >= 0 {
+ // This should never happen.
+ k = new(big.Int).Mod(k, c.params.N)
+ }
+ scalar := k.FillBytes(make([]byte, 32))
+ inverse, err := nistec.P256OrdInverse(scalar)
+ if err != nil {
+ panic("crypto/elliptic: nistec rejected normalized scalar")
+ }
+ return new(big.Int).SetBytes(inverse)
+}
diff --git a/contrib/go/_std_1.19/src/crypto/elliptic/params.go b/contrib/go/_std_1.19/src/crypto/elliptic/params.go
new file mode 100644
index 0000000000..0ed929d61f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/elliptic/params.go
@@ -0,0 +1,300 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import "math/big"
+
+// CurveParams contains the parameters of an elliptic curve and also provides
+// a generic, non-constant time implementation of Curve.
+type CurveParams struct {
+ P *big.Int // the order of the underlying field
+ N *big.Int // the order of the base point
+ B *big.Int // the constant of the curve equation
+ Gx, Gy *big.Int // (x,y) of the base point
+ BitSize int // the size of the underlying field
+ Name string // the canonical name of the curve
+}
+
+func (curve *CurveParams) Params() *CurveParams {
+ return curve
+}
+
+// CurveParams operates, internally, on Jacobian coordinates. For a given
+// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
+// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
+// calculation can be performed within the transform (as in ScalarMult and
+// ScalarBaseMult). But even for Add and Double, it's faster to apply and
+// reverse the transform than to operate in affine coordinates.
+
+// polynomial returns x³ - 3x + b.
+func (curve *CurveParams) polynomial(x *big.Int) *big.Int {
+ x3 := new(big.Int).Mul(x, x)
+ x3.Mul(x3, x)
+
+ threeX := new(big.Int).Lsh(x, 1)
+ threeX.Add(threeX, x)
+
+ x3.Sub(x3, threeX)
+ x3.Add(x3, curve.B)
+ x3.Mod(x3, curve.P)
+
+ return x3
+}
+
+func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve); ok {
+ return specific.IsOnCurve(x, y)
+ }
+
+ if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
+ y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
+ return false
+ }
+
+ // y² = x³ - 3x + b
+ y2 := new(big.Int).Mul(y, y)
+ y2.Mod(y2, curve.P)
+
+ return curve.polynomial(x).Cmp(y2) == 0
+}
+
+// zForAffine returns a Jacobian Z value for the affine point (x, y). If x and
+// y are zero, it assumes that they represent the point at infinity because (0,
+// 0) is not on the any of the curves handled here.
+func zForAffine(x, y *big.Int) *big.Int {
+ z := new(big.Int)
+ if x.Sign() != 0 || y.Sign() != 0 {
+ z.SetInt64(1)
+ }
+ return z
+}
+
+// affineFromJacobian reverses the Jacobian transform. See the comment at the
+// top of the file. If the point is ∞ it returns 0, 0.
+func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
+ if z.Sign() == 0 {
+ return new(big.Int), new(big.Int)
+ }
+
+ zinv := new(big.Int).ModInverse(z, curve.P)
+ zinvsq := new(big.Int).Mul(zinv, zinv)
+
+ xOut = new(big.Int).Mul(x, zinvsq)
+ xOut.Mod(xOut, curve.P)
+ zinvsq.Mul(zinvsq, zinv)
+ yOut = new(big.Int).Mul(y, zinvsq)
+ yOut.Mod(yOut, curve.P)
+ return
+}
+
+func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve); ok {
+ return specific.Add(x1, y1, x2, y2)
+ }
+ panicIfNotOnCurve(curve, x1, y1)
+ panicIfNotOnCurve(curve, x2, y2)
+
+ z1 := zForAffine(x1, y1)
+ z2 := zForAffine(x2, y2)
+ return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
+}
+
+// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
+// (x2, y2, z2) and returns their sum, also in Jacobian form.
+func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ x3, y3, z3 := new(big.Int), new(big.Int), new(big.Int)
+ if z1.Sign() == 0 {
+ x3.Set(x2)
+ y3.Set(y2)
+ z3.Set(z2)
+ return x3, y3, z3
+ }
+ if z2.Sign() == 0 {
+ x3.Set(x1)
+ y3.Set(y1)
+ z3.Set(z1)
+ return x3, y3, z3
+ }
+
+ z1z1 := new(big.Int).Mul(z1, z1)
+ z1z1.Mod(z1z1, curve.P)
+ z2z2 := new(big.Int).Mul(z2, z2)
+ z2z2.Mod(z2z2, curve.P)
+
+ u1 := new(big.Int).Mul(x1, z2z2)
+ u1.Mod(u1, curve.P)
+ u2 := new(big.Int).Mul(x2, z1z1)
+ u2.Mod(u2, curve.P)
+ h := new(big.Int).Sub(u2, u1)
+ xEqual := h.Sign() == 0
+ if h.Sign() == -1 {
+ h.Add(h, curve.P)
+ }
+ i := new(big.Int).Lsh(h, 1)
+ i.Mul(i, i)
+ j := new(big.Int).Mul(h, i)
+
+ s1 := new(big.Int).Mul(y1, z2)
+ s1.Mul(s1, z2z2)
+ s1.Mod(s1, curve.P)
+ s2 := new(big.Int).Mul(y2, z1)
+ s2.Mul(s2, z1z1)
+ s2.Mod(s2, curve.P)
+ r := new(big.Int).Sub(s2, s1)
+ if r.Sign() == -1 {
+ r.Add(r, curve.P)
+ }
+ yEqual := r.Sign() == 0
+ if xEqual && yEqual {
+ return curve.doubleJacobian(x1, y1, z1)
+ }
+ r.Lsh(r, 1)
+ v := new(big.Int).Mul(u1, i)
+
+ x3.Set(r)
+ x3.Mul(x3, x3)
+ x3.Sub(x3, j)
+ x3.Sub(x3, v)
+ x3.Sub(x3, v)
+ x3.Mod(x3, curve.P)
+
+ y3.Set(r)
+ v.Sub(v, x3)
+ y3.Mul(y3, v)
+ s1.Mul(s1, j)
+ s1.Lsh(s1, 1)
+ y3.Sub(y3, s1)
+ y3.Mod(y3, curve.P)
+
+ z3.Add(z1, z2)
+ z3.Mul(z3, z3)
+ z3.Sub(z3, z1z1)
+ z3.Sub(z3, z2z2)
+ z3.Mul(z3, h)
+ z3.Mod(z3, curve.P)
+
+ return x3, y3, z3
+}
+
+func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve); ok {
+ return specific.Double(x1, y1)
+ }
+ panicIfNotOnCurve(curve, x1, y1)
+
+ z1 := zForAffine(x1, y1)
+ return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
+}
+
+// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
+// returns its double, also in Jacobian form.
+func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
+ delta := new(big.Int).Mul(z, z)
+ delta.Mod(delta, curve.P)
+ gamma := new(big.Int).Mul(y, y)
+ gamma.Mod(gamma, curve.P)
+ alpha := new(big.Int).Sub(x, delta)
+ if alpha.Sign() == -1 {
+ alpha.Add(alpha, curve.P)
+ }
+ alpha2 := new(big.Int).Add(x, delta)
+ alpha.Mul(alpha, alpha2)
+ alpha2.Set(alpha)
+ alpha.Lsh(alpha, 1)
+ alpha.Add(alpha, alpha2)
+
+ beta := alpha2.Mul(x, gamma)
+
+ x3 := new(big.Int).Mul(alpha, alpha)
+ beta8 := new(big.Int).Lsh(beta, 3)
+ beta8.Mod(beta8, curve.P)
+ x3.Sub(x3, beta8)
+ if x3.Sign() == -1 {
+ x3.Add(x3, curve.P)
+ }
+ x3.Mod(x3, curve.P)
+
+ z3 := new(big.Int).Add(y, z)
+ z3.Mul(z3, z3)
+ z3.Sub(z3, gamma)
+ if z3.Sign() == -1 {
+ z3.Add(z3, curve.P)
+ }
+ z3.Sub(z3, delta)
+ if z3.Sign() == -1 {
+ z3.Add(z3, curve.P)
+ }
+ z3.Mod(z3, curve.P)
+
+ beta.Lsh(beta, 2)
+ beta.Sub(beta, x3)
+ if beta.Sign() == -1 {
+ beta.Add(beta, curve.P)
+ }
+ y3 := alpha.Mul(alpha, beta)
+
+ gamma.Mul(gamma, gamma)
+ gamma.Lsh(gamma, 3)
+ gamma.Mod(gamma, curve.P)
+
+ y3.Sub(y3, gamma)
+ if y3.Sign() == -1 {
+ y3.Add(y3, curve.P)
+ }
+ y3.Mod(y3, curve.P)
+
+ return x3, y3, z3
+}
+
+func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve); ok {
+ return specific.ScalarMult(Bx, By, k)
+ }
+ panicIfNotOnCurve(curve, Bx, By)
+
+ Bz := new(big.Int).SetInt64(1)
+ x, y, z := new(big.Int), new(big.Int), new(big.Int)
+
+ for _, byte := range k {
+ for bitNum := 0; bitNum < 8; bitNum++ {
+ x, y, z = curve.doubleJacobian(x, y, z)
+ if byte&0x80 == 0x80 {
+ x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
+ }
+ byte <<= 1
+ }
+ }
+
+ return curve.affineFromJacobian(x, y, z)
+}
+
+func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve); ok {
+ return specific.ScalarBaseMult(k)
+ }
+
+ return curve.ScalarMult(curve.Gx, curve.Gy, k)
+}
+
+func matchesSpecificCurve(params *CurveParams) (Curve, bool) {
+ for _, c := range []Curve{p224, p256, p384, p521} {
+ if params == c.Params() {
+ return c, true
+ }
+ }
+ return nil, false
+}
diff --git a/contrib/go/_std_1.19/src/crypto/hmac/hmac.go b/contrib/go/_std_1.19/src/crypto/hmac/hmac.go
new file mode 100644
index 0000000000..ed3ebc0602
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/hmac/hmac.go
@@ -0,0 +1,180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package hmac implements the Keyed-Hash Message Authentication Code (HMAC) as
+defined in U.S. Federal Information Processing Standards Publication 198.
+An HMAC is a cryptographic hash that uses a key to sign a message.
+The receiver verifies the hash by recomputing it using the same key.
+
+Receivers should be careful to use Equal to compare MACs in order to avoid
+timing side-channels:
+
+ // ValidMAC reports whether messageMAC is a valid HMAC tag for message.
+ func ValidMAC(message, messageMAC, key []byte) bool {
+ mac := hmac.New(sha256.New, key)
+ mac.Write(message)
+ expectedMAC := mac.Sum(nil)
+ return hmac.Equal(messageMAC, expectedMAC)
+ }
+*/
+package hmac
+
+import (
+ "crypto/internal/boring"
+ "crypto/subtle"
+ "hash"
+)
+
+// FIPS 198-1:
+// https://csrc.nist.gov/publications/fips/fips198-1/FIPS-198-1_final.pdf
+
+// key is zero padded to the block size of the hash function
+// ipad = 0x36 byte repeated for key length
+// opad = 0x5c byte repeated for key length
+// hmac = H([key ^ opad] H([key ^ ipad] text))
+
+// Marshalable is the combination of encoding.BinaryMarshaler and
+// encoding.BinaryUnmarshaler. Their method definitions are repeated here to
+// avoid a dependency on the encoding package.
+type marshalable interface {
+ MarshalBinary() ([]byte, error)
+ UnmarshalBinary([]byte) error
+}
+
+type hmac struct {
+ opad, ipad []byte
+ outer, inner hash.Hash
+
+ // If marshaled is true, then opad and ipad do not contain a padded
+ // copy of the key, but rather the marshaled state of outer/inner after
+ // opad/ipad has been fed into it.
+ marshaled bool
+}
+
+func (h *hmac) Sum(in []byte) []byte {
+ origLen := len(in)
+ in = h.inner.Sum(in)
+
+ if h.marshaled {
+ if err := h.outer.(marshalable).UnmarshalBinary(h.opad); err != nil {
+ panic(err)
+ }
+ } else {
+ h.outer.Reset()
+ h.outer.Write(h.opad)
+ }
+ h.outer.Write(in[origLen:])
+ return h.outer.Sum(in[:origLen])
+}
+
+func (h *hmac) Write(p []byte) (n int, err error) {
+ return h.inner.Write(p)
+}
+
+func (h *hmac) Size() int { return h.outer.Size() }
+func (h *hmac) BlockSize() int { return h.inner.BlockSize() }
+
+func (h *hmac) Reset() {
+ if h.marshaled {
+ if err := h.inner.(marshalable).UnmarshalBinary(h.ipad); err != nil {
+ panic(err)
+ }
+ return
+ }
+
+ h.inner.Reset()
+ h.inner.Write(h.ipad)
+
+ // If the underlying hash is marshalable, we can save some time by
+ // saving a copy of the hash state now, and restoring it on future
+ // calls to Reset and Sum instead of writing ipad/opad every time.
+ //
+ // If either hash is unmarshalable for whatever reason,
+ // it's safe to bail out here.
+ marshalableInner, innerOK := h.inner.(marshalable)
+ if !innerOK {
+ return
+ }
+ marshalableOuter, outerOK := h.outer.(marshalable)
+ if !outerOK {
+ return
+ }
+
+ imarshal, err := marshalableInner.MarshalBinary()
+ if err != nil {
+ return
+ }
+
+ h.outer.Reset()
+ h.outer.Write(h.opad)
+ omarshal, err := marshalableOuter.MarshalBinary()
+ if err != nil {
+ return
+ }
+
+ // Marshaling succeeded; save the marshaled state for later
+ h.ipad = imarshal
+ h.opad = omarshal
+ h.marshaled = true
+}
+
+// New returns a new HMAC hash using the given hash.Hash type and key.
+// New functions like sha256.New from crypto/sha256 can be used as h.
+// h must return a new Hash every time it is called.
+// Note that unlike other hash implementations in the standard library,
+// the returned Hash does not implement encoding.BinaryMarshaler
+// or encoding.BinaryUnmarshaler.
+func New(h func() hash.Hash, key []byte) hash.Hash {
+ if boring.Enabled {
+ hm := boring.NewHMAC(h, key)
+ if hm != nil {
+ return hm
+ }
+ // BoringCrypto did not recognize h, so fall through to standard Go code.
+ }
+ hm := new(hmac)
+ hm.outer = h()
+ hm.inner = h()
+ unique := true
+ func() {
+ defer func() {
+ // The comparison might panic if the underlying types are not comparable.
+ _ = recover()
+ }()
+ if hm.outer == hm.inner {
+ unique = false
+ }
+ }()
+ if !unique {
+ panic("crypto/hmac: hash generation function does not produce unique values")
+ }
+ blocksize := hm.inner.BlockSize()
+ hm.ipad = make([]byte, blocksize)
+ hm.opad = make([]byte, blocksize)
+ if len(key) > blocksize {
+ // If key is too big, hash it.
+ hm.outer.Write(key)
+ key = hm.outer.Sum(nil)
+ }
+ copy(hm.ipad, key)
+ copy(hm.opad, key)
+ for i := range hm.ipad {
+ hm.ipad[i] ^= 0x36
+ }
+ for i := range hm.opad {
+ hm.opad[i] ^= 0x5c
+ }
+ hm.inner.Write(hm.ipad)
+
+ return hm
+}
+
+// Equal compares two MACs for equality without leaking timing information.
+func Equal(mac1, mac2 []byte) bool {
+ // We don't have to be constant time if the lengths of the MACs are
+ // different as that suggests that a completely different hash function
+ // was used.
+ return subtle.ConstantTimeCompare(mac1, mac2) == 1
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/boring/bbig/big.go b/contrib/go/_std_1.19/src/crypto/internal/boring/bbig/big.go
new file mode 100644
index 0000000000..5ce46972b3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/boring/bbig/big.go
@@ -0,0 +1,33 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bbig
+
+import (
+ "crypto/internal/boring"
+ "math/big"
+ "unsafe"
+)
+
+func Enc(b *big.Int) boring.BigInt {
+ if b == nil {
+ return nil
+ }
+ x := b.Bits()
+ if len(x) == 0 {
+ return boring.BigInt{}
+ }
+ return unsafe.Slice((*uint)(&x[0]), len(x))
+}
+
+func Dec(b boring.BigInt) *big.Int {
+ if b == nil {
+ return nil
+ }
+ if len(b) == 0 {
+ return new(big.Int)
+ }
+ x := unsafe.Slice((*big.Word)(&b[0]), len(b))
+ return new(big.Int).SetBits(x)
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/boring/doc.go b/contrib/go/_std_1.19/src/crypto/internal/boring/doc.go
new file mode 100644
index 0000000000..6060fe5951
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/boring/doc.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package boring provides access to BoringCrypto implementation functions.
+// Check the constant Enabled to find out whether BoringCrypto is available.
+// If BoringCrypto is not available, the functions in this package all panic.
+package boring
+
+// Enabled reports whether BoringCrypto is available.
+// When enabled is false, all functions in this package panic.
+//
+// BoringCrypto is only available on linux/amd64 systems.
+const Enabled = available
+
+// A BigInt is the raw words from a BigInt.
+// This definition allows us to avoid importing math/big.
+// Conversion between BigInt and *big.Int is in crypto/internal/boring/bbig.
+type BigInt []uint
diff --git a/contrib/go/_std_1.19/src/crypto/internal/boring/notboring.go b/contrib/go/_std_1.19/src/crypto/internal/boring/notboring.go
new file mode 100644
index 0000000000..53096a68d1
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/boring/notboring.go
@@ -0,0 +1,113 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !boringcrypto || !linux || !amd64 || !cgo || android || cmd_go_bootstrap || msan
+// +build !boringcrypto !linux !amd64 !cgo android cmd_go_bootstrap msan
+
+package boring
+
+import (
+ "crypto"
+ "crypto/cipher"
+ "crypto/internal/boring/sig"
+ "hash"
+)
+
+const available = false
+
+// Unreachable marks code that should be unreachable
+// when BoringCrypto is in use. It is a no-op without BoringCrypto.
+func Unreachable() {
+ // Code that's unreachable when using BoringCrypto
+ // is exactly the code we want to detect for reporting
+ // standard Go crypto.
+ sig.StandardCrypto()
+}
+
+// UnreachableExceptTests marks code that should be unreachable
+// when BoringCrypto is in use. It is a no-op without BoringCrypto.
+func UnreachableExceptTests() {}
+
+type randReader int
+
+func (randReader) Read(b []byte) (int, error) { panic("boringcrypto: not available") }
+
+const RandReader = randReader(0)
+
+func NewSHA1() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA224() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA256() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA384() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA512() hash.Hash { panic("boringcrypto: not available") }
+
+func SHA1([]byte) [20]byte { panic("boringcrypto: not available") }
+func SHA224([]byte) [28]byte { panic("boringcrypto: not available") }
+func SHA256([]byte) [32]byte { panic("boringcrypto: not available") }
+func SHA384([]byte) [48]byte { panic("boringcrypto: not available") }
+func SHA512([]byte) [64]byte { panic("boringcrypto: not available") }
+
+func NewHMAC(h func() hash.Hash, key []byte) hash.Hash { panic("boringcrypto: not available") }
+
+func NewAESCipher(key []byte) (cipher.Block, error) { panic("boringcrypto: not available") }
+func NewGCMTLS(cipher.Block) (cipher.AEAD, error) { panic("boringcrypto: not available") }
+
+type PublicKeyECDSA struct{ _ int }
+type PrivateKeyECDSA struct{ _ int }
+
+func GenerateKeyECDSA(curve string) (X, Y, D BigInt, err error) {
+ panic("boringcrypto: not available")
+}
+func NewPrivateKeyECDSA(curve string, X, Y, D BigInt) (*PrivateKeyECDSA, error) {
+ panic("boringcrypto: not available")
+}
+func NewPublicKeyECDSA(curve string, X, Y BigInt) (*PublicKeyECDSA, error) {
+ panic("boringcrypto: not available")
+}
+func SignMarshalECDSA(priv *PrivateKeyECDSA, hash []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func VerifyECDSA(pub *PublicKeyECDSA, hash []byte, sig []byte) bool {
+ panic("boringcrypto: not available")
+}
+
+type PublicKeyRSA struct{ _ int }
+type PrivateKeyRSA struct{ _ int }
+
+func DecryptRSAOAEP(h hash.Hash, priv *PrivateKeyRSA, ciphertext, label []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func DecryptRSAPKCS1(priv *PrivateKeyRSA, ciphertext []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func DecryptRSANoPadding(priv *PrivateKeyRSA, ciphertext []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func EncryptRSAOAEP(h hash.Hash, pub *PublicKeyRSA, msg, label []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func EncryptRSAPKCS1(pub *PublicKeyRSA, msg []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func EncryptRSANoPadding(pub *PublicKeyRSA, msg []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func GenerateKeyRSA(bits int) (N, E, D, P, Q, Dp, Dq, Qinv BigInt, err error) {
+ panic("boringcrypto: not available")
+}
+func NewPrivateKeyRSA(N, E, D, P, Q, Dp, Dq, Qinv BigInt) (*PrivateKeyRSA, error) {
+ panic("boringcrypto: not available")
+}
+func NewPublicKeyRSA(N, E BigInt) (*PublicKeyRSA, error) { panic("boringcrypto: not available") }
+func SignRSAPKCS1v15(priv *PrivateKeyRSA, h crypto.Hash, hashed []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func SignRSAPSS(priv *PrivateKeyRSA, h crypto.Hash, hashed []byte, saltLen int) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func VerifyRSAPKCS1v15(pub *PublicKeyRSA, h crypto.Hash, hashed, sig []byte) error {
+ panic("boringcrypto: not available")
+}
+func VerifyRSAPSS(pub *PublicKeyRSA, h crypto.Hash, hashed, sig []byte, saltLen int) error {
+ panic("boringcrypto: not available")
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig.go b/contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig.go
new file mode 100644
index 0000000000..716c03c5e9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sig holds “code signatures” that can be called
+// and will result in certain code sequences being linked into
+// the final binary. The functions themselves are no-ops.
+package sig
+
+// BoringCrypto indicates that the BoringCrypto module is present.
+func BoringCrypto()
+
+// FIPSOnly indicates that package crypto/tls/fipsonly is present.
+func FIPSOnly()
+
+// StandardCrypto indicates that standard Go crypto is present.
+func StandardCrypto()
diff --git a/contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig_amd64.s b/contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig_amd64.s
new file mode 100644
index 0000000000..64e3462e4e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/boring/sig/sig_amd64.s
@@ -0,0 +1,54 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// These functions are no-ops, but you can search for their implementations
+// to find out whether they are linked into a particular binary.
+//
+// Each function consists of a two-byte jump over the next 29-bytes,
+// then a 5-byte indicator sequence unlikely to occur in real x86 instructions,
+// then a randomly-chosen 24-byte sequence, and finally a return instruction
+// (the target of the jump).
+//
+// These sequences are known to rsc.io/goversion.
+
+#define START \
+ BYTE $0xEB; BYTE $0x1D; BYTE $0xF4; BYTE $0x48; BYTE $0xF4; BYTE $0x4B; BYTE $0xF4
+
+#define END \
+ BYTE $0xC3
+
+// BoringCrypto indicates that BoringCrypto (in particular, its func init) is present.
+TEXT ·BoringCrypto(SB),NOSPLIT,$0
+ START
+ BYTE $0xB3; BYTE $0x32; BYTE $0xF5; BYTE $0x28;
+ BYTE $0x13; BYTE $0xA3; BYTE $0xB4; BYTE $0x50;
+ BYTE $0xD4; BYTE $0x41; BYTE $0xCC; BYTE $0x24;
+ BYTE $0x85; BYTE $0xF0; BYTE $0x01; BYTE $0x45;
+ BYTE $0x4E; BYTE $0x92; BYTE $0x10; BYTE $0x1B;
+ BYTE $0x1D; BYTE $0x2F; BYTE $0x19; BYTE $0x50;
+ END
+
+// StandardCrypto indicates that standard Go crypto is present.
+TEXT ·StandardCrypto(SB),NOSPLIT,$0
+ START
+ BYTE $0xba; BYTE $0xee; BYTE $0x4d; BYTE $0xfa;
+ BYTE $0x98; BYTE $0x51; BYTE $0xca; BYTE $0x56;
+ BYTE $0xa9; BYTE $0x11; BYTE $0x45; BYTE $0xe8;
+ BYTE $0x3e; BYTE $0x99; BYTE $0xc5; BYTE $0x9c;
+ BYTE $0xf9; BYTE $0x11; BYTE $0xcb; BYTE $0x8e;
+ BYTE $0x80; BYTE $0xda; BYTE $0xf1; BYTE $0x2f;
+ END
+
+// FIPSOnly indicates that crypto/tls/fipsonly is present.
+TEXT ·FIPSOnly(SB),NOSPLIT,$0
+ START
+ BYTE $0x36; BYTE $0x3C; BYTE $0xB9; BYTE $0xCE;
+ BYTE $0x9D; BYTE $0x68; BYTE $0x04; BYTE $0x7D;
+ BYTE $0x31; BYTE $0xF2; BYTE $0x8D; BYTE $0x32;
+ BYTE $0x5D; BYTE $0x5C; BYTE $0xA5; BYTE $0x87;
+ BYTE $0x3F; BYTE $0x5D; BYTE $0x80; BYTE $0xCA;
+ BYTE $0xF6; BYTE $0xD6; BYTE $0x15; BYTE $0x1B;
+ END
diff --git a/contrib/go/_std_1.19/src/crypto/internal/edwards25519/doc.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/doc.go
new file mode 100644
index 0000000000..8cba6febfe
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/doc.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use filippo.io/edwards25519, an extended version of this
+// package repackaged as an importable module.
+//
+// (Note that filippo.io/edwards25519 and github.com/gtank/ristretto255 are not
+// maintained by the Go team and are not covered by the Go 1 Compatibility Promise.)
+package edwards25519
diff --git a/contrib/go/_std_1.19/src/crypto/internal/edwards25519/edwards25519.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/edwards25519.go
new file mode 100644
index 0000000000..71e9c097a9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/edwards25519.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/internal/edwards25519/field"
+ "errors"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe.go
new file mode 100644
index 0000000000..5518ef2b90
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+ // An element t represents the integer
+ // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+ //
+ // Between operations, all limbs are expected to be lower than 2^52.
+ l0 uint64
+ l1 uint64
+ l2 uint64
+ l3 uint64
+ l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+ *v = *feZero
+ return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+ *v = *feOne
+ return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+ v.carryPropagate()
+
+ // After the light reduction we now have a field element representation
+ // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+ // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+ // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+ c := (v.l0 + 19) >> 51
+ c = (v.l1 + c) >> 51
+ c = (v.l2 + c) >> 51
+ c = (v.l3 + c) >> 51
+ c = (v.l4 + c) >> 51
+
+ // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+ // effectively applying the reduction identity to the carry.
+ v.l0 += 19 * c
+
+ v.l1 += v.l0 >> 51
+ v.l0 = v.l0 & maskLow51Bits
+ v.l2 += v.l1 >> 51
+ v.l1 = v.l1 & maskLow51Bits
+ v.l3 += v.l2 >> 51
+ v.l2 = v.l2 & maskLow51Bits
+ v.l4 += v.l3 >> 51
+ v.l3 = v.l3 & maskLow51Bits
+ // no additional carry
+ v.l4 = v.l4 & maskLow51Bits
+
+ return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+ v.l0 = a.l0 + b.l0
+ v.l1 = a.l1 + b.l1
+ v.l2 = a.l2 + b.l2
+ v.l3 = a.l3 + b.l3
+ v.l4 = a.l4 + b.l4
+ // Using the generic implementation here is actually faster than the
+ // assembly. Probably because the body of this function is so simple that
+ // the compiler can figure out better optimizations by inlining the carry
+ // propagation.
+ return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+ // We first add 2 * p, to guarantee the subtraction won't underflow, and
+ // then subtract b (which can be up to 2^255 + 2^13 * 19).
+ v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+ v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+ v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+ v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+ v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+ return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+ return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2. It uses the
+ // same sequence of 255 squarings and 11 multiplications as [Curve25519].
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+ z2.Square(z) // 2
+ t.Square(&z2) // 4
+ t.Square(&t) // 8
+ z9.Multiply(&t, z) // 9
+ z11.Multiply(&z9, &z2) // 11
+ t.Square(&z11) // 22
+ z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+ t.Square(&z2_5_0) // 2^6 - 2^1
+ for i := 0; i < 4; i++ {
+ t.Square(&t) // 2^10 - 2^5
+ }
+ z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+ t.Square(&z2_10_0) // 2^11 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^20 - 2^10
+ }
+ z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+ t.Square(&z2_20_0) // 2^21 - 2^1
+ for i := 0; i < 19; i++ {
+ t.Square(&t) // 2^40 - 2^20
+ }
+ t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+ t.Square(&t) // 2^41 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^50 - 2^10
+ }
+ z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+ t.Square(&z2_50_0) // 2^51 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^100 - 2^50
+ }
+ z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+ t.Square(&z2_100_0) // 2^101 - 2^1
+ for i := 0; i < 99; i++ {
+ t.Square(&t) // 2^200 - 2^100
+ }
+ t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+ t.Square(&t) // 2^201 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^250 - 2^50
+ }
+ t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+ t.Square(&t) // 2^251 - 2^1
+ t.Square(&t) // 2^252 - 2^2
+ t.Square(&t) // 2^253 - 2^3
+ t.Square(&t) // 2^254 - 2^4
+ t.Square(&t) // 2^255 - 2^5
+
+ return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+ *v = *a
+ return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid field element input size")
+ }
+
+ // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+ v.l0 = binary.LittleEndian.Uint64(x[0:8])
+ v.l0 &= maskLow51Bits
+ // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+ v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+ v.l1 &= maskLow51Bits
+ // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+ v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+ v.l2 &= maskLow51Bits
+ // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+ v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+ v.l3 &= maskLow51Bits
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Note: not bytes 25:33, shift 4, to avoid overread.
+ v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+ v.l4 &= maskLow51Bits
+
+ return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [32]byte
+ return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+ t := *v
+ t.reduce()
+
+ var buf [8]byte
+ for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+ bitsOffset := i * 51
+ binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+ for i, bb := range buf {
+ off := bitsOffset/8 + i
+ if off >= len(out) {
+ break
+ }
+ out[off] |= bb
+ }
+ }
+
+ return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+ sa, sv := u.Bytes(), v.Bytes()
+ return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+ m := mask64Bits(cond)
+ v.l0 = (m & a.l0) | (^m & b.l0)
+ v.l1 = (m & a.l1) | (^m & b.l1)
+ v.l2 = (m & a.l2) | (^m & b.l2)
+ v.l3 = (m & a.l3) | (^m & b.l3)
+ v.l4 = (m & a.l4) | (^m & b.l4)
+ return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+ m := mask64Bits(cond)
+ t := m & (v.l0 ^ u.l0)
+ v.l0 ^= t
+ u.l0 ^= t
+ t = m & (v.l1 ^ u.l1)
+ v.l1 ^= t
+ u.l1 ^= t
+ t = m & (v.l2 ^ u.l2)
+ v.l2 ^= t
+ u.l2 ^= t
+ t = m & (v.l3 ^ u.l3)
+ v.l3 ^= t
+ u.l3 ^= t
+ t = m & (v.l4 ^ u.l4)
+ v.l4 ^= t
+ u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+ return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+ return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+ feMul(v, x, y)
+ return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+ feSquare(v, x)
+ return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+ x0lo, x0hi := mul51(x.l0, y)
+ x1lo, x1hi := mul51(x.l1, y)
+ x2lo, x2hi := mul51(x.l2, y)
+ x3lo, x3hi := mul51(x.l3, y)
+ x4lo, x4hi := mul51(x.l4, y)
+ v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+ v.l1 = x1lo + x0hi
+ v.l2 = x2lo + x1hi
+ v.l3 = x3lo + x2hi
+ v.l4 = x4lo + x3hi
+ // The hi portions are going to be only 32 bits, plus any previous excess,
+ // so we can skip the carry propagation.
+ return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+ mh, ml := bits.Mul64(a, uint64(b))
+ lo = ml & maskLow51Bits
+ hi = (mh << 13) | (ml >> 51)
+ return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+ var t0, t1, t2 Element
+
+ t0.Square(x) // x^2
+ t1.Square(&t0) // x^4
+ t1.Square(&t1) // x^8
+ t1.Multiply(x, &t1) // x^9
+ t0.Multiply(&t0, &t1) // x^11
+ t0.Square(&t0) // x^22
+ t0.Multiply(&t1, &t0) // x^31
+ t1.Square(&t0) // x^62
+ for i := 1; i < 5; i++ { // x^992
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
+ t1.Square(&t0) // 2^11 - 2
+ for i := 1; i < 10; i++ { // 2^20 - 2^10
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^20 - 1
+ t2.Square(&t1) // 2^21 - 2
+ for i := 1; i < 20; i++ { // 2^40 - 2^20
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^40 - 1
+ t1.Square(&t1) // 2^41 - 2
+ for i := 1; i < 10; i++ { // 2^50 - 2^10
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^50 - 1
+ t1.Square(&t0) // 2^51 - 2
+ for i := 1; i < 50; i++ { // 2^100 - 2^50
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^100 - 1
+ t2.Square(&t1) // 2^101 - 2
+ for i := 1; i < 100; i++ { // 2^200 - 2^100
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^200 - 1
+ t1.Square(&t1) // 2^201 - 2
+ for i := 1; i < 50; i++ { // 2^250 - 2^50
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^250 - 1
+ t0.Square(&t0) // 2^251 - 2
+ t0.Square(&t0) // 2^252 - 4
+ return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+ 2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
+
+ // r = (u * v3) * (u * v7)^((p-5)/8)
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+ uNeg := new(Element).Negate(u)
+ correctSignSqrt := check.Equal(u)
+ flippedSignSqrt := check.Equal(uNeg)
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+ // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+ r.Absolute(rr) // Choose the nonnegative square root.
+ return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.go
new file mode 100644
index 0000000000..70c541692c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.go
@@ -0,0 +1,15 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.s b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.s
index 0aa1e86d98..0aa1e86d98 100644
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_amd64.s
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_arm64_noasm.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go
index fc029ac12d..fc029ac12d 100644
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/field/fe_arm64_noasm.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_generic.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_generic.go
new file mode 100644
index 0000000000..d6667b27be
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ lo, c := bits.Add64(lo, v.lo, 0)
+ hi, _ = bits.Add64(hi, v.hi, c)
+ return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+ return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+ a0 := a.l0
+ a1 := a.l1
+ a2 := a.l2
+ a3 := a.l3
+ a4 := a.l4
+
+ b0 := b.l0
+ b1 := b.l1
+ b2 := b.l2
+ b3 := b.l3
+ b4 := b.l4
+
+ // Limb multiplication works like pen-and-paper columnar multiplication, but
+ // with 51-bit limbs instead of digits.
+ //
+ // a4 a3 a2 a1 a0 x
+ // b4 b3 b2 b1 b0 =
+ // ------------------------
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a4b1 a3b1 a2b1 a1b1 a0b1 +
+ // a4b2 a3b2 a2b2 a1b2 a0b2 +
+ // a4b3 a3b3 a2b3 a1b3 a0b3 +
+ // a4b4 a3b4 a2b4 a1b4 a0b4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+ // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+ // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+ //
+ // Reduction can be carried out simultaneously to multiplication. For
+ // example, we do not compute r5: whenever the result of a multiplication
+ // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+ //
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
+ // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
+ // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
+ // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // Finally we add up the columns into wide, overlapping limbs.
+
+ a1_19 := a1 * 19
+ a2_19 := a2 * 19
+ a3_19 := a3 * 19
+ a4_19 := a4 * 19
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := mul64(a0, b0)
+ r0 = addMul64(r0, a1_19, b4)
+ r0 = addMul64(r0, a2_19, b3)
+ r0 = addMul64(r0, a3_19, b2)
+ r0 = addMul64(r0, a4_19, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := mul64(a0, b1)
+ r1 = addMul64(r1, a1, b0)
+ r1 = addMul64(r1, a2_19, b4)
+ r1 = addMul64(r1, a3_19, b3)
+ r1 = addMul64(r1, a4_19, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := mul64(a0, b2)
+ r2 = addMul64(r2, a1, b1)
+ r2 = addMul64(r2, a2, b0)
+ r2 = addMul64(r2, a3_19, b4)
+ r2 = addMul64(r2, a4_19, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := mul64(a0, b3)
+ r3 = addMul64(r3, a1, b2)
+ r3 = addMul64(r3, a2, b1)
+ r3 = addMul64(r3, a3, b0)
+ r3 = addMul64(r3, a4_19, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := mul64(a0, b4)
+ r4 = addMul64(r4, a1, b3)
+ r4 = addMul64(r4, a2, b2)
+ r4 = addMul64(r4, a3, b1)
+ r4 = addMul64(r4, a4, b0)
+
+ // After the multiplication, we need to reduce (carry) the five coefficients
+ // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+ // to respect the Element invariant.
+ //
+ // Overall, the reduction works the same as carryPropagate, except with
+ // wider inputs: we take the carry for each coefficient by shifting it right
+ // by 51, and add it to the limb above it. The top carry is multiplied by 19
+ // according to the reduction identity and added to the lowest limb.
+ //
+ // The largest coefficient (r0) will be at most 111 bits, which guarantees
+ // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+ //
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+ // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+ // r0 < 2⁷ × 2⁵² × 2⁵²
+ // r0 < 2¹¹¹
+ //
+ // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+ // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+ // allows us to easily apply the reduction identity.
+ //
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ // r4 < 5 × 2⁵² × 2⁵²
+ // r4 < 2¹⁰⁷
+ //
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ // Now all coefficients fit into 64-bit registers but are still too large to
+ // be passed around as a Element. We therefore do one last carry chain,
+ // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+ l0 := a.l0
+ l1 := a.l1
+ l2 := a.l2
+ l3 := a.l3
+ l4 := a.l4
+
+ // Squaring works precisely like multiplication above, but thanks to its
+ // symmetry we get to group a few terms together.
+ //
+ // l4 l3 l2 l1 l0 x
+ // l4 l3 l2 l1 l0 =
+ // ------------------------
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l4l1 l3l1 l2l1 l1l1 l0l1 +
+ // l4l2 l3l2 l2l2 l1l2 l0l2 +
+ // l4l3 l3l3 l2l3 l1l3 l0l3 +
+ // l4l4 l3l4 l2l4 l1l4 l0l4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
+ // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
+ // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
+ // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+ // only three Mul64 and four Add64, instead of five and eight.
+
+ l0_2 := l0 * 2
+ l1_2 := l1 * 2
+
+ l1_38 := l1 * 38
+ l2_38 := l2 * 38
+ l3_38 := l3 * 38
+
+ l3_19 := l3 * 19
+ l4_19 := l4 * 19
+
+ // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := mul64(l0, l0)
+ r0 = addMul64(r0, l1_38, l4)
+ r0 = addMul64(r0, l2_38, l3)
+
+ // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := mul64(l0_2, l1)
+ r1 = addMul64(r1, l2_38, l4)
+ r1 = addMul64(r1, l3_19, l3)
+
+ // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := mul64(l0_2, l2)
+ r2 = addMul64(r2, l1, l1)
+ r2 = addMul64(r2, l3_38, l4)
+
+ // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := mul64(l0_2, l3)
+ r3 = addMul64(r3, l1_2, l2)
+ r3 = addMul64(r3, l4_19, l4)
+
+ // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := mul64(l0_2, l4)
+ r4 = addMul64(r4, l1_2, l3)
+ r4 = addMul64(r4, l2, l2)
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+// carryPropagate brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+ c0 := v.l0 >> 51
+ c1 := v.l1 >> 51
+ c2 := v.l2 >> 51
+ c3 := v.l3 >> 51
+ c4 := v.l4 >> 51
+
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
+ v.l0 = v.l0&maskLow51Bits + c4*19
+ v.l1 = v.l1&maskLow51Bits + c0
+ v.l2 = v.l2&maskLow51Bits + c1
+ v.l3 = v.l3&maskLow51Bits + c2
+ v.l4 = v.l4&maskLow51Bits + c3
+
+ return v
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalar.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalar.go
new file mode 100644
index 0000000000..4530bc3ce2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalar.go
@@ -0,0 +1,1034 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the Scalar value in little-endian. The value is always reduced
+ // modulo l between operations.
+ s [32]byte
+}
+
+var (
+ scZero = Scalar{[32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+
+ scOne = Scalar{[32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+
+ scMinusOne = Scalar{[32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}}
+)
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ scMulAdd(&s.s, &x.s, &y.s, &z.s)
+ return s
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ scMulAdd(&s.s, &scOne.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ scMulAdd(&s.s, &scMinusOne.s, &y.s, &x.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ scMulAdd(&s.s, &scMinusOne.s, &x.s, &scZero.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ scMulAdd(&s.s, &x.s, &y.s, &scZero.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to an uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ scReduce(&s.s, &wideBytes)
+ return s, nil
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ ss := &Scalar{}
+ copy(ss.s[:], x)
+ if !isReduced(ss) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+ s.s = ss.s
+ return s, nil
+}
+
+// isReduced returns whether the given scalar is reduced modulo l.
+func isReduced(s *Scalar) bool {
+ for i := len(s.s) - 1; i >= 0; i-- {
+ switch {
+ case s.s[i] > scMinusOne.s[i]:
+ return false
+ case s.s[i] < scMinusOne.s[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ scReduce(&s.s, &wideBytes)
+ return s, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ buf := make([]byte, 32)
+ copy(buf, s.s[:])
+ return buf
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ return subtle.ConstantTimeCompare(s.s[:], t.s[:])
+}
+
+// scMulAdd and scReduce are ported from the public domain, “ref10”
+// implementation of ed25519 from SUPERCOP.
+
+func load3(in []byte) int64 {
+ r := int64(in[0])
+ r |= int64(in[1]) << 8
+ r |= int64(in[2]) << 16
+ return r
+}
+
+func load4(in []byte) int64 {
+ r := int64(in[0])
+ r |= int64(in[1]) << 8
+ r |= int64(in[2]) << 16
+ r |= int64(in[3]) << 24
+ return r
+}
+
+// Input:
+//
+// a[0]+256*a[1]+...+256^31*a[31] = a
+// b[0]+256*b[1]+...+256^31*b[31] = b
+// c[0]+256*c[1]+...+256^31*c[31] = c
+//
+// Output:
+//
+// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
+// where l = 2^252 + 27742317777372353535851937790883648493.
+func scMulAdd(s, a, b, c *[32]byte) {
+ a0 := 2097151 & load3(a[:])
+ a1 := 2097151 & (load4(a[2:]) >> 5)
+ a2 := 2097151 & (load3(a[5:]) >> 2)
+ a3 := 2097151 & (load4(a[7:]) >> 7)
+ a4 := 2097151 & (load4(a[10:]) >> 4)
+ a5 := 2097151 & (load3(a[13:]) >> 1)
+ a6 := 2097151 & (load4(a[15:]) >> 6)
+ a7 := 2097151 & (load3(a[18:]) >> 3)
+ a8 := 2097151 & load3(a[21:])
+ a9 := 2097151 & (load4(a[23:]) >> 5)
+ a10 := 2097151 & (load3(a[26:]) >> 2)
+ a11 := (load4(a[28:]) >> 7)
+ b0 := 2097151 & load3(b[:])
+ b1 := 2097151 & (load4(b[2:]) >> 5)
+ b2 := 2097151 & (load3(b[5:]) >> 2)
+ b3 := 2097151 & (load4(b[7:]) >> 7)
+ b4 := 2097151 & (load4(b[10:]) >> 4)
+ b5 := 2097151 & (load3(b[13:]) >> 1)
+ b6 := 2097151 & (load4(b[15:]) >> 6)
+ b7 := 2097151 & (load3(b[18:]) >> 3)
+ b8 := 2097151 & load3(b[21:])
+ b9 := 2097151 & (load4(b[23:]) >> 5)
+ b10 := 2097151 & (load3(b[26:]) >> 2)
+ b11 := (load4(b[28:]) >> 7)
+ c0 := 2097151 & load3(c[:])
+ c1 := 2097151 & (load4(c[2:]) >> 5)
+ c2 := 2097151 & (load3(c[5:]) >> 2)
+ c3 := 2097151 & (load4(c[7:]) >> 7)
+ c4 := 2097151 & (load4(c[10:]) >> 4)
+ c5 := 2097151 & (load3(c[13:]) >> 1)
+ c6 := 2097151 & (load4(c[15:]) >> 6)
+ c7 := 2097151 & (load3(c[18:]) >> 3)
+ c8 := 2097151 & load3(c[21:])
+ c9 := 2097151 & (load4(c[23:]) >> 5)
+ c10 := 2097151 & (load3(c[26:]) >> 2)
+ c11 := (load4(c[28:]) >> 7)
+ var carry [23]int64
+
+ s0 := c0 + a0*b0
+ s1 := c1 + a0*b1 + a1*b0
+ s2 := c2 + a0*b2 + a1*b1 + a2*b0
+ s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
+ s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
+ s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
+ s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
+ s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
+ s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
+ s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
+ s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
+ s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
+ s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
+ s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
+ s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
+ s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
+ s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
+ s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
+ s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
+ s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
+ s20 := a9*b11 + a10*b10 + a11*b9
+ s21 := a10*b11 + a11*b10
+ s22 := a11 * b11
+ s23 := int64(0)
+
+ carry[0] = (s0 + (1 << 20)) >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[2] = (s2 + (1 << 20)) >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[4] = (s4 + (1 << 20)) >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[12] = (s12 + (1 << 20)) >> 21
+ s13 += carry[12]
+ s12 -= carry[12] << 21
+ carry[14] = (s14 + (1 << 20)) >> 21
+ s15 += carry[14]
+ s14 -= carry[14] << 21
+ carry[16] = (s16 + (1 << 20)) >> 21
+ s17 += carry[16]
+ s16 -= carry[16] << 21
+ carry[18] = (s18 + (1 << 20)) >> 21
+ s19 += carry[18]
+ s18 -= carry[18] << 21
+ carry[20] = (s20 + (1 << 20)) >> 21
+ s21 += carry[20]
+ s20 -= carry[20] << 21
+ carry[22] = (s22 + (1 << 20)) >> 21
+ s23 += carry[22]
+ s22 -= carry[22] << 21
+
+ carry[1] = (s1 + (1 << 20)) >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[3] = (s3 + (1 << 20)) >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[5] = (s5 + (1 << 20)) >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+ carry[13] = (s13 + (1 << 20)) >> 21
+ s14 += carry[13]
+ s13 -= carry[13] << 21
+ carry[15] = (s15 + (1 << 20)) >> 21
+ s16 += carry[15]
+ s15 -= carry[15] << 21
+ carry[17] = (s17 + (1 << 20)) >> 21
+ s18 += carry[17]
+ s17 -= carry[17] << 21
+ carry[19] = (s19 + (1 << 20)) >> 21
+ s20 += carry[19]
+ s19 -= carry[19] << 21
+ carry[21] = (s21 + (1 << 20)) >> 21
+ s22 += carry[21]
+ s21 -= carry[21] << 21
+
+ s11 += s23 * 666643
+ s12 += s23 * 470296
+ s13 += s23 * 654183
+ s14 -= s23 * 997805
+ s15 += s23 * 136657
+ s16 -= s23 * 683901
+ s23 = 0
+
+ s10 += s22 * 666643
+ s11 += s22 * 470296
+ s12 += s22 * 654183
+ s13 -= s22 * 997805
+ s14 += s22 * 136657
+ s15 -= s22 * 683901
+ s22 = 0
+
+ s9 += s21 * 666643
+ s10 += s21 * 470296
+ s11 += s21 * 654183
+ s12 -= s21 * 997805
+ s13 += s21 * 136657
+ s14 -= s21 * 683901
+ s21 = 0
+
+ s8 += s20 * 666643
+ s9 += s20 * 470296
+ s10 += s20 * 654183
+ s11 -= s20 * 997805
+ s12 += s20 * 136657
+ s13 -= s20 * 683901
+ s20 = 0
+
+ s7 += s19 * 666643
+ s8 += s19 * 470296
+ s9 += s19 * 654183
+ s10 -= s19 * 997805
+ s11 += s19 * 136657
+ s12 -= s19 * 683901
+ s19 = 0
+
+ s6 += s18 * 666643
+ s7 += s18 * 470296
+ s8 += s18 * 654183
+ s9 -= s18 * 997805
+ s10 += s18 * 136657
+ s11 -= s18 * 683901
+ s18 = 0
+
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[12] = (s12 + (1 << 20)) >> 21
+ s13 += carry[12]
+ s12 -= carry[12] << 21
+ carry[14] = (s14 + (1 << 20)) >> 21
+ s15 += carry[14]
+ s14 -= carry[14] << 21
+ carry[16] = (s16 + (1 << 20)) >> 21
+ s17 += carry[16]
+ s16 -= carry[16] << 21
+
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+ carry[13] = (s13 + (1 << 20)) >> 21
+ s14 += carry[13]
+ s13 -= carry[13] << 21
+ carry[15] = (s15 + (1 << 20)) >> 21
+ s16 += carry[15]
+ s15 -= carry[15] << 21
+
+ s5 += s17 * 666643
+ s6 += s17 * 470296
+ s7 += s17 * 654183
+ s8 -= s17 * 997805
+ s9 += s17 * 136657
+ s10 -= s17 * 683901
+ s17 = 0
+
+ s4 += s16 * 666643
+ s5 += s16 * 470296
+ s6 += s16 * 654183
+ s7 -= s16 * 997805
+ s8 += s16 * 136657
+ s9 -= s16 * 683901
+ s16 = 0
+
+ s3 += s15 * 666643
+ s4 += s15 * 470296
+ s5 += s15 * 654183
+ s6 -= s15 * 997805
+ s7 += s15 * 136657
+ s8 -= s15 * 683901
+ s15 = 0
+
+ s2 += s14 * 666643
+ s3 += s14 * 470296
+ s4 += s14 * 654183
+ s5 -= s14 * 997805
+ s6 += s14 * 136657
+ s7 -= s14 * 683901
+ s14 = 0
+
+ s1 += s13 * 666643
+ s2 += s13 * 470296
+ s3 += s13 * 654183
+ s4 -= s13 * 997805
+ s5 += s13 * 136657
+ s6 -= s13 * 683901
+ s13 = 0
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = (s0 + (1 << 20)) >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[2] = (s2 + (1 << 20)) >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[4] = (s4 + (1 << 20)) >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ carry[1] = (s1 + (1 << 20)) >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[3] = (s3 + (1 << 20)) >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[5] = (s5 + (1 << 20)) >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[11] = s11 >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ s[0] = byte(s0 >> 0)
+ s[1] = byte(s0 >> 8)
+ s[2] = byte((s0 >> 16) | (s1 << 5))
+ s[3] = byte(s1 >> 3)
+ s[4] = byte(s1 >> 11)
+ s[5] = byte((s1 >> 19) | (s2 << 2))
+ s[6] = byte(s2 >> 6)
+ s[7] = byte((s2 >> 14) | (s3 << 7))
+ s[8] = byte(s3 >> 1)
+ s[9] = byte(s3 >> 9)
+ s[10] = byte((s3 >> 17) | (s4 << 4))
+ s[11] = byte(s4 >> 4)
+ s[12] = byte(s4 >> 12)
+ s[13] = byte((s4 >> 20) | (s5 << 1))
+ s[14] = byte(s5 >> 7)
+ s[15] = byte((s5 >> 15) | (s6 << 6))
+ s[16] = byte(s6 >> 2)
+ s[17] = byte(s6 >> 10)
+ s[18] = byte((s6 >> 18) | (s7 << 3))
+ s[19] = byte(s7 >> 5)
+ s[20] = byte(s7 >> 13)
+ s[21] = byte(s8 >> 0)
+ s[22] = byte(s8 >> 8)
+ s[23] = byte((s8 >> 16) | (s9 << 5))
+ s[24] = byte(s9 >> 3)
+ s[25] = byte(s9 >> 11)
+ s[26] = byte((s9 >> 19) | (s10 << 2))
+ s[27] = byte(s10 >> 6)
+ s[28] = byte((s10 >> 14) | (s11 << 7))
+ s[29] = byte(s11 >> 1)
+ s[30] = byte(s11 >> 9)
+ s[31] = byte(s11 >> 17)
+}
+
+// Input:
+//
+// s[0]+256*s[1]+...+256^63*s[63] = s
+//
+// Output:
+//
+// s[0]+256*s[1]+...+256^31*s[31] = s mod l
+// where l = 2^252 + 27742317777372353535851937790883648493.
+func scReduce(out *[32]byte, s *[64]byte) {
+ s0 := 2097151 & load3(s[:])
+ s1 := 2097151 & (load4(s[2:]) >> 5)
+ s2 := 2097151 & (load3(s[5:]) >> 2)
+ s3 := 2097151 & (load4(s[7:]) >> 7)
+ s4 := 2097151 & (load4(s[10:]) >> 4)
+ s5 := 2097151 & (load3(s[13:]) >> 1)
+ s6 := 2097151 & (load4(s[15:]) >> 6)
+ s7 := 2097151 & (load3(s[18:]) >> 3)
+ s8 := 2097151 & load3(s[21:])
+ s9 := 2097151 & (load4(s[23:]) >> 5)
+ s10 := 2097151 & (load3(s[26:]) >> 2)
+ s11 := 2097151 & (load4(s[28:]) >> 7)
+ s12 := 2097151 & (load4(s[31:]) >> 4)
+ s13 := 2097151 & (load3(s[34:]) >> 1)
+ s14 := 2097151 & (load4(s[36:]) >> 6)
+ s15 := 2097151 & (load3(s[39:]) >> 3)
+ s16 := 2097151 & load3(s[42:])
+ s17 := 2097151 & (load4(s[44:]) >> 5)
+ s18 := 2097151 & (load3(s[47:]) >> 2)
+ s19 := 2097151 & (load4(s[49:]) >> 7)
+ s20 := 2097151 & (load4(s[52:]) >> 4)
+ s21 := 2097151 & (load3(s[55:]) >> 1)
+ s22 := 2097151 & (load4(s[57:]) >> 6)
+ s23 := (load4(s[60:]) >> 3)
+
+ s11 += s23 * 666643
+ s12 += s23 * 470296
+ s13 += s23 * 654183
+ s14 -= s23 * 997805
+ s15 += s23 * 136657
+ s16 -= s23 * 683901
+ s23 = 0
+
+ s10 += s22 * 666643
+ s11 += s22 * 470296
+ s12 += s22 * 654183
+ s13 -= s22 * 997805
+ s14 += s22 * 136657
+ s15 -= s22 * 683901
+ s22 = 0
+
+ s9 += s21 * 666643
+ s10 += s21 * 470296
+ s11 += s21 * 654183
+ s12 -= s21 * 997805
+ s13 += s21 * 136657
+ s14 -= s21 * 683901
+ s21 = 0
+
+ s8 += s20 * 666643
+ s9 += s20 * 470296
+ s10 += s20 * 654183
+ s11 -= s20 * 997805
+ s12 += s20 * 136657
+ s13 -= s20 * 683901
+ s20 = 0
+
+ s7 += s19 * 666643
+ s8 += s19 * 470296
+ s9 += s19 * 654183
+ s10 -= s19 * 997805
+ s11 += s19 * 136657
+ s12 -= s19 * 683901
+ s19 = 0
+
+ s6 += s18 * 666643
+ s7 += s18 * 470296
+ s8 += s18 * 654183
+ s9 -= s18 * 997805
+ s10 += s18 * 136657
+ s11 -= s18 * 683901
+ s18 = 0
+
+ var carry [17]int64
+
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[12] = (s12 + (1 << 20)) >> 21
+ s13 += carry[12]
+ s12 -= carry[12] << 21
+ carry[14] = (s14 + (1 << 20)) >> 21
+ s15 += carry[14]
+ s14 -= carry[14] << 21
+ carry[16] = (s16 + (1 << 20)) >> 21
+ s17 += carry[16]
+ s16 -= carry[16] << 21
+
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+ carry[13] = (s13 + (1 << 20)) >> 21
+ s14 += carry[13]
+ s13 -= carry[13] << 21
+ carry[15] = (s15 + (1 << 20)) >> 21
+ s16 += carry[15]
+ s15 -= carry[15] << 21
+
+ s5 += s17 * 666643
+ s6 += s17 * 470296
+ s7 += s17 * 654183
+ s8 -= s17 * 997805
+ s9 += s17 * 136657
+ s10 -= s17 * 683901
+ s17 = 0
+
+ s4 += s16 * 666643
+ s5 += s16 * 470296
+ s6 += s16 * 654183
+ s7 -= s16 * 997805
+ s8 += s16 * 136657
+ s9 -= s16 * 683901
+ s16 = 0
+
+ s3 += s15 * 666643
+ s4 += s15 * 470296
+ s5 += s15 * 654183
+ s6 -= s15 * 997805
+ s7 += s15 * 136657
+ s8 -= s15 * 683901
+ s15 = 0
+
+ s2 += s14 * 666643
+ s3 += s14 * 470296
+ s4 += s14 * 654183
+ s5 -= s14 * 997805
+ s6 += s14 * 136657
+ s7 -= s14 * 683901
+ s14 = 0
+
+ s1 += s13 * 666643
+ s2 += s13 * 470296
+ s3 += s13 * 654183
+ s4 -= s13 * 997805
+ s5 += s13 * 136657
+ s6 -= s13 * 683901
+ s13 = 0
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = (s0 + (1 << 20)) >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[2] = (s2 + (1 << 20)) >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[4] = (s4 + (1 << 20)) >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ carry[1] = (s1 + (1 << 20)) >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[3] = (s3 + (1 << 20)) >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[5] = (s5 + (1 << 20)) >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[11] = s11 >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ out[0] = byte(s0 >> 0)
+ out[1] = byte(s0 >> 8)
+ out[2] = byte((s0 >> 16) | (s1 << 5))
+ out[3] = byte(s1 >> 3)
+ out[4] = byte(s1 >> 11)
+ out[5] = byte((s1 >> 19) | (s2 << 2))
+ out[6] = byte(s2 >> 6)
+ out[7] = byte((s2 >> 14) | (s3 << 7))
+ out[8] = byte(s3 >> 1)
+ out[9] = byte(s3 >> 9)
+ out[10] = byte((s3 >> 17) | (s4 << 4))
+ out[11] = byte(s4 >> 4)
+ out[12] = byte(s4 >> 12)
+ out[13] = byte((s4 >> 20) | (s5 << 1))
+ out[14] = byte(s5 >> 7)
+ out[15] = byte((s5 >> 15) | (s6 << 6))
+ out[16] = byte(s6 >> 2)
+ out[17] = byte(s6 >> 10)
+ out[18] = byte((s6 >> 18) | (s7 << 3))
+ out[19] = byte(s7 >> 5)
+ out[20] = byte(s7 >> 13)
+ out[21] = byte(s8 >> 0)
+ out[22] = byte(s8 >> 8)
+ out[23] = byte((s8 >> 16) | (s9 << 5))
+ out[24] = byte(s9 >> 3)
+ out[25] = byte(s9 >> 11)
+ out[26] = byte((s9 >> 19) | (s10 << 2))
+ out[27] = byte(s10 >> 6)
+ out[28] = byte((s10 >> 14) | (s11 << 7))
+ out[29] = byte(s11 >> 1)
+ out[30] = byte(s11 >> 9)
+ out[31] = byte(s11 >> 17)
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ if s.s[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(s.s[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ if s.s[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(s.s[i] & 15)
+ digits[2*i+1] = int8((s.s[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalarmult.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalarmult.go
index f7ca3cef99..f7ca3cef99 100644
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/scalarmult.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/scalarmult.go
diff --git a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/tables.go b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/tables.go
index 5ca40f7bfa..5ca40f7bfa 100644
--- a/contrib/go/_std_1.18/src/crypto/ed25519/internal/edwards25519/tables.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/edwards25519/tables.go
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224.go
index 4dddeb07a4..4dddeb07a4 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_fiat64.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_fiat64.go
new file mode 100644
index 0000000000..9337bfefef
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_fiat64.go
@@ -0,0 +1,1461 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p224 64 '2^224 - 2^96 + 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p224
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xffffffffffffffffffffffffffffffff000000000000000000000001 (from "2^224 - 2^96 + 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package fiat
+
+import "math/bits"
+
+type p224Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p224Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p224MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p224MontgomeryDomainFieldElement [4]uint64
+
+// The type p224NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p224NonMontgomeryDomainFieldElement [4]uint64
+
+// p224CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p224CmovznzU64(out1 *uint64, arg1 p224Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p224Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p224Mul(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ x19 := (uint64(p224Uint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
+ x32 := (uint64(p224Uint1(x31)) + x23)
+ var x34 uint64
+ _, x34 = bits.Add64(x11, x20, uint64(0x0))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[3])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[2])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[1])
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x1, arg2[0])
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x50, x47, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
+ x57 := (uint64(p224Uint1(x56)) + x44)
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(0x0))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
+ var x68 uint64
+ _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x68, 0xffffffff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x75, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
+ x80 := (uint64(p224Uint1(x79)) + x71)
+ var x82 uint64
+ _, x82 = bits.Add64(x58, x68, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
+ x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[3])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[2])
+ var x96 uint64
+ var x97 uint64
+ x97, x96 = bits.Mul64(x2, arg2[1])
+ var x98 uint64
+ var x99 uint64
+ x99, x98 = bits.Mul64(x2, arg2[0])
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x99, x96, uint64(0x0))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
+ x106 := (uint64(p224Uint1(x105)) + x93)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
+ var x117 uint64
+ _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x117, 0xffffffff)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
+ x129 := (uint64(p224Uint1(x128)) + x120)
+ var x131 uint64
+ _, x131 = bits.Add64(x107, x117, uint64(0x0))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
+ x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[3])
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x3, arg2[2])
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x3, arg2[1])
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x3, arg2[0])
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x148, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
+ x155 := (uint64(p224Uint1(x154)) + x142)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
+ var x166 uint64
+ _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
+ var x168 uint64
+ var x169 uint64
+ x169, x168 = bits.Mul64(x166, 0xffffffff)
+ var x170 uint64
+ var x171 uint64
+ x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
+ var x172 uint64
+ var x173 uint64
+ x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x173, x170, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
+ x178 := (uint64(p224Uint1(x177)) + x169)
+ var x180 uint64
+ _, x180 = bits.Add64(x156, x166, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
+ x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
+ var x199 uint64
+ _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
+ var x200 uint64
+ p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
+ var x201 uint64
+ p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
+ var x202 uint64
+ p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
+ var x203 uint64
+ p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
+ out1[0] = x200
+ out1[1] = x201
+ out1[2] = x202
+ out1[3] = x203
+}
+
+// p224Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p224Square(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg1[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg1[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg1[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg1[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ x19 := (uint64(p224Uint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
+ x32 := (uint64(p224Uint1(x31)) + x23)
+ var x34 uint64
+ _, x34 = bits.Add64(x11, x20, uint64(0x0))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg1[3])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg1[2])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg1[1])
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x1, arg1[0])
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x50, x47, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
+ x57 := (uint64(p224Uint1(x56)) + x44)
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(0x0))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
+ var x68 uint64
+ _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x68, 0xffffffff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x75, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
+ x80 := (uint64(p224Uint1(x79)) + x71)
+ var x82 uint64
+ _, x82 = bits.Add64(x58, x68, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
+ x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg1[3])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg1[2])
+ var x96 uint64
+ var x97 uint64
+ x97, x96 = bits.Mul64(x2, arg1[1])
+ var x98 uint64
+ var x99 uint64
+ x99, x98 = bits.Mul64(x2, arg1[0])
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x99, x96, uint64(0x0))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
+ x106 := (uint64(p224Uint1(x105)) + x93)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
+ var x117 uint64
+ _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x117, 0xffffffff)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
+ x129 := (uint64(p224Uint1(x128)) + x120)
+ var x131 uint64
+ _, x131 = bits.Add64(x107, x117, uint64(0x0))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
+ x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg1[3])
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x3, arg1[2])
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x3, arg1[1])
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x3, arg1[0])
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x148, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
+ x155 := (uint64(p224Uint1(x154)) + x142)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
+ var x166 uint64
+ _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
+ var x168 uint64
+ var x169 uint64
+ x169, x168 = bits.Mul64(x166, 0xffffffff)
+ var x170 uint64
+ var x171 uint64
+ x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
+ var x172 uint64
+ var x173 uint64
+ x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x173, x170, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
+ x178 := (uint64(p224Uint1(x177)) + x169)
+ var x180 uint64
+ _, x180 = bits.Add64(x156, x166, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
+ x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
+ var x199 uint64
+ _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
+ var x200 uint64
+ p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
+ var x201 uint64
+ p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
+ var x202 uint64
+ p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
+ var x203 uint64
+ p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
+ out1[0] = x200
+ out1[1] = x201
+ out1[2] = x202
+ out1[3] = x203
+}
+
+// p224Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p224Add(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, uint64(0x1), uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0xffffffff00000000, uint64(p224Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p224Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0xffffffff, uint64(p224Uint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(p224Uint1(x8)), uint64(0x0), uint64(p224Uint1(x16)))
+ var x19 uint64
+ p224CmovznzU64(&x19, p224Uint1(x18), x9, x1)
+ var x20 uint64
+ p224CmovznzU64(&x20, p224Uint1(x18), x11, x3)
+ var x21 uint64
+ p224CmovznzU64(&x21, p224Uint1(x18), x13, x5)
+ var x22 uint64
+ p224CmovznzU64(&x22, p224Uint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// p224Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p224Sub(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
+ var x9 uint64
+ p224CmovznzU64(&x9, p224Uint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, uint64((p224Uint1(x9) & 0x1)), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0xffffffff00000000), uint64(p224Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, x9, uint64(p224Uint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0xffffffff), uint64(p224Uint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// p224SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p224SetOne(out1 *p224MontgomeryDomainFieldElement) {
+ out1[0] = 0xffffffff00000000
+ out1[1] = 0xffffffffffffffff
+ out1[2] = uint64(0x0)
+ out1[3] = uint64(0x0)
+}
+
+// p224FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func p224FromMontgomery(out1 *p224NonMontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0xffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0xffffffff00000000)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x7, x4, uint64(p224Uint1(x11)))
+ var x15 uint64
+ _, x15 = bits.Add64(x1, x2, uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(uint64(0x0), x8, uint64(p224Uint1(x15)))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(uint64(0x0), x10, uint64(p224Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(uint64(0x0), x12, uint64(p224Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x16, arg1[1], uint64(0x0))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x18, uint64(0x0), uint64(p224Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x20, uint64(0x0), uint64(p224Uint1(x25)))
+ var x28 uint64
+ _, x28 = bits.Mul64(x22, 0xffffffffffffffff)
+ var x30 uint64
+ var x31 uint64
+ x31, x30 = bits.Mul64(x28, 0xffffffff)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x28, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x28, 0xffffffff00000000)
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x35, x32, uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x33, x30, uint64(p224Uint1(x37)))
+ var x41 uint64
+ _, x41 = bits.Add64(x22, x28, uint64(0x0))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x24, x34, uint64(p224Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x26, x36, uint64(p224Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64((uint64(p224Uint1(x27)) + (uint64(p224Uint1(x21)) + (uint64(p224Uint1(x13)) + x5))), x38, uint64(p224Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x42, arg1[2], uint64(0x0))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x44, uint64(0x0), uint64(p224Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x46, uint64(0x0), uint64(p224Uint1(x51)))
+ var x54 uint64
+ _, x54 = bits.Mul64(x48, 0xffffffffffffffff)
+ var x56 uint64
+ var x57 uint64
+ x57, x56 = bits.Mul64(x54, 0xffffffff)
+ var x58 uint64
+ var x59 uint64
+ x59, x58 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x54, 0xffffffff00000000)
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x61, x58, uint64(0x0))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x59, x56, uint64(p224Uint1(x63)))
+ var x67 uint64
+ _, x67 = bits.Add64(x48, x54, uint64(0x0))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x50, x60, uint64(p224Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x52, x62, uint64(p224Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64((uint64(p224Uint1(x53)) + (uint64(p224Uint1(x47)) + (uint64(p224Uint1(x39)) + x31))), x64, uint64(p224Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x68, arg1[3], uint64(0x0))
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x70, uint64(0x0), uint64(p224Uint1(x75)))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x72, uint64(0x0), uint64(p224Uint1(x77)))
+ var x80 uint64
+ _, x80 = bits.Mul64(x74, 0xffffffffffffffff)
+ var x82 uint64
+ var x83 uint64
+ x83, x82 = bits.Mul64(x80, 0xffffffff)
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x80, 0xffffffffffffffff)
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x80, 0xffffffff00000000)
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x87, x84, uint64(0x0))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x85, x82, uint64(p224Uint1(x89)))
+ var x93 uint64
+ _, x93 = bits.Add64(x74, x80, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x76, x86, uint64(p224Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x78, x88, uint64(p224Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64((uint64(p224Uint1(x79)) + (uint64(p224Uint1(x73)) + (uint64(p224Uint1(x65)) + x57))), x90, uint64(p224Uint1(x97)))
+ x100 := (uint64(p224Uint1(x99)) + (uint64(p224Uint1(x91)) + x83))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Sub64(x94, uint64(0x1), uint64(0x0))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Sub64(x96, 0xffffffff00000000, uint64(p224Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Sub64(x98, 0xffffffffffffffff, uint64(p224Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Sub64(x100, 0xffffffff, uint64(p224Uint1(x106)))
+ var x110 uint64
+ _, x110 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x108)))
+ var x111 uint64
+ p224CmovznzU64(&x111, p224Uint1(x110), x101, x94)
+ var x112 uint64
+ p224CmovznzU64(&x112, p224Uint1(x110), x103, x96)
+ var x113 uint64
+ p224CmovznzU64(&x113, p224Uint1(x110), x105, x98)
+ var x114 uint64
+ p224CmovznzU64(&x114, p224Uint1(x110), x107, x100)
+ out1[0] = x111
+ out1[1] = x112
+ out1[2] = x113
+ out1[3] = x114
+}
+
+// p224ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p224ToMontgomery(out1 *p224MontgomeryDomainFieldElement, arg1 *p224NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0xffffffff)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xfffffffe00000000)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xffffffff00000000)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xffffffff00000001)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0xffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0xffffffff00000000)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x24, x21, uint64(p224Uint1(x28)))
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x19, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x25, uint64(p224Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x27, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x29, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xffffffff)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xfffffffe00000000)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xffffffff00000000)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, 0xffffffff00000001)
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p224Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p224Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(0x0))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(p224Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x37, x49, uint64(p224Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(((uint64(p224Uint1(x38)) + (uint64(p224Uint1(x18)) + x6)) + (uint64(p224Uint1(x30)) + x22)), x51, uint64(p224Uint1(x58)))
+ var x61 uint64
+ _, x61 = bits.Mul64(x53, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x61, 0xffffffff)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x61, 0xffffffffffffffff)
+ var x67 uint64
+ var x68 uint64
+ x68, x67 = bits.Mul64(x61, 0xffffffff00000000)
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x68, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x66, x63, uint64(p224Uint1(x70)))
+ var x74 uint64
+ _, x74 = bits.Add64(x53, x61, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x55, x67, uint64(p224Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x57, x69, uint64(p224Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x59, x71, uint64(p224Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xfffffffe00000000)
+ var x85 uint64
+ var x86 uint64
+ x86, x85 = bits.Mul64(x2, 0xffffffff00000000)
+ var x87 uint64
+ var x88 uint64
+ x88, x87 = bits.Mul64(x2, 0xffffffff00000001)
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x88, x85, uint64(0x0))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x86, x83, uint64(p224Uint1(x90)))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x84, x81, uint64(p224Uint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(0x0))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x77, x89, uint64(p224Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x79, x91, uint64(p224Uint1(x98)))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(((uint64(p224Uint1(x80)) + (uint64(p224Uint1(x60)) + (uint64(p224Uint1(x52)) + x40))) + (uint64(p224Uint1(x72)) + x64)), x93, uint64(p224Uint1(x100)))
+ var x103 uint64
+ _, x103 = bits.Mul64(x95, 0xffffffffffffffff)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x103, 0xffffffff)
+ var x107 uint64
+ var x108 uint64
+ x108, x107 = bits.Mul64(x103, 0xffffffffffffffff)
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x103, 0xffffffff00000000)
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x110, x107, uint64(0x0))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x108, x105, uint64(p224Uint1(x112)))
+ var x116 uint64
+ _, x116 = bits.Add64(x95, x103, uint64(0x0))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x97, x109, uint64(p224Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x99, x111, uint64(p224Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x101, x113, uint64(p224Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xffffffff)
+ var x125 uint64
+ var x126 uint64
+ x126, x125 = bits.Mul64(x3, 0xfffffffe00000000)
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(x3, 0xffffffff00000000)
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, 0xffffffff00000001)
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x130, x127, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x128, x125, uint64(p224Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x126, x123, uint64(p224Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x117, x129, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x119, x131, uint64(p224Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x121, x133, uint64(p224Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(((uint64(p224Uint1(x122)) + (uint64(p224Uint1(x102)) + (uint64(p224Uint1(x94)) + x82))) + (uint64(p224Uint1(x114)) + x106)), x135, uint64(p224Uint1(x142)))
+ var x145 uint64
+ _, x145 = bits.Mul64(x137, 0xffffffffffffffff)
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x145, 0xffffffff)
+ var x149 uint64
+ var x150 uint64
+ x150, x149 = bits.Mul64(x145, 0xffffffffffffffff)
+ var x151 uint64
+ var x152 uint64
+ x152, x151 = bits.Mul64(x145, 0xffffffff00000000)
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x152, x149, uint64(0x0))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x150, x147, uint64(p224Uint1(x154)))
+ var x158 uint64
+ _, x158 = bits.Add64(x137, x145, uint64(0x0))
+ var x159 uint64
+ var x160 uint64
+ x159, x160 = bits.Add64(x139, x151, uint64(p224Uint1(x158)))
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Add64(x141, x153, uint64(p224Uint1(x160)))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Add64(x143, x155, uint64(p224Uint1(x162)))
+ x165 := ((uint64(p224Uint1(x164)) + (uint64(p224Uint1(x144)) + (uint64(p224Uint1(x136)) + x124))) + (uint64(p224Uint1(x156)) + x148))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Sub64(x159, uint64(0x1), uint64(0x0))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Sub64(x161, 0xffffffff00000000, uint64(p224Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Sub64(x163, 0xffffffffffffffff, uint64(p224Uint1(x169)))
+ var x172 uint64
+ var x173 uint64
+ x172, x173 = bits.Sub64(x165, 0xffffffff, uint64(p224Uint1(x171)))
+ var x175 uint64
+ _, x175 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x173)))
+ var x176 uint64
+ p224CmovznzU64(&x176, p224Uint1(x175), x166, x159)
+ var x177 uint64
+ p224CmovznzU64(&x177, p224Uint1(x175), x168, x161)
+ var x178 uint64
+ p224CmovznzU64(&x178, p224Uint1(x175), x170, x163)
+ var x179 uint64
+ p224CmovznzU64(&x179, p224Uint1(x175), x172, x165)
+ out1[0] = x176
+ out1[1] = x177
+ out1[2] = x178
+ out1[3] = x179
+}
+
+// p224Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p224Selectznz(out1 *[4]uint64, arg1 p224Uint1, arg2 *[4]uint64, arg3 *[4]uint64) {
+ var x1 uint64
+ p224CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p224CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p224CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p224CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+}
+
+// p224ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..27]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p224ToBytes(out1 *[28]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := uint8((x50 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x52
+}
+
+// p224FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
+func p224FromBytes(out1 *[4]uint64, arg1 *[28]uint8) {
+ x1 := (uint64(arg1[27]) << 24)
+ x2 := (uint64(arg1[26]) << 16)
+ x3 := (uint64(arg1[25]) << 8)
+ x4 := arg1[24]
+ x5 := (uint64(arg1[23]) << 56)
+ x6 := (uint64(arg1[22]) << 48)
+ x7 := (uint64(arg1[21]) << 40)
+ x8 := (uint64(arg1[20]) << 32)
+ x9 := (uint64(arg1[19]) << 24)
+ x10 := (uint64(arg1[18]) << 16)
+ x11 := (uint64(arg1[17]) << 8)
+ x12 := arg1[16]
+ x13 := (uint64(arg1[15]) << 56)
+ x14 := (uint64(arg1[14]) << 48)
+ x15 := (uint64(arg1[13]) << 40)
+ x16 := (uint64(arg1[12]) << 32)
+ x17 := (uint64(arg1[11]) << 24)
+ x18 := (uint64(arg1[10]) << 16)
+ x19 := (uint64(arg1[9]) << 8)
+ x20 := arg1[8]
+ x21 := (uint64(arg1[7]) << 56)
+ x22 := (uint64(arg1[6]) << 48)
+ x23 := (uint64(arg1[5]) << 40)
+ x24 := (uint64(arg1[4]) << 32)
+ x25 := (uint64(arg1[3]) << 24)
+ x26 := (uint64(arg1[2]) << 16)
+ x27 := (uint64(arg1[1]) << 8)
+ x28 := arg1[0]
+ x29 := (x27 + uint64(x28))
+ x30 := (x26 + x29)
+ x31 := (x25 + x30)
+ x32 := (x24 + x31)
+ x33 := (x23 + x32)
+ x34 := (x22 + x33)
+ x35 := (x21 + x34)
+ x36 := (x19 + uint64(x20))
+ x37 := (x18 + x36)
+ x38 := (x17 + x37)
+ x39 := (x16 + x38)
+ x40 := (x15 + x39)
+ x41 := (x14 + x40)
+ x42 := (x13 + x41)
+ x43 := (x11 + uint64(x12))
+ x44 := (x10 + x43)
+ x45 := (x9 + x44)
+ x46 := (x8 + x45)
+ x47 := (x7 + x46)
+ x48 := (x6 + x47)
+ x49 := (x5 + x48)
+ x50 := (x3 + uint64(x4))
+ x51 := (x2 + x50)
+ x52 := (x1 + x51)
+ out1[0] = x35
+ out1[1] = x42
+ out1[2] = x49
+ out1[3] = x52
+}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_invert.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_invert.go
index 4163ed0c67..4163ed0c67 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p224_invert.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p224_invert.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256.go
new file mode 100644
index 0000000000..dfdd0a7c69
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256.go
@@ -0,0 +1,135 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P256Element is an integer modulo 2^256 - 2^224 + 2^192 + 2^96 - 1.
+//
+// The zero value is a valid zero element.
+type P256Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p256MontgomeryDomainFieldElement
+}
+
+const p256ElementLen = 32
+
+type p256UntypedFieldElement = [4]uint64
+
+// One sets e = 1, and returns e.
+func (e *P256Element) One() *P256Element {
+ p256SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P256Element) Equal(t *P256Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+var p256ZeroEncoding = new(P256Element).Bytes()
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P256Element) IsZero() int {
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, p256ZeroEncoding)
+}
+
+// Set sets e = t, and returns e.
+func (e *P256Element) Set(t *P256Element) *P256Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 32-byte big-endian encoding of e.
+func (e *P256Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P256Element) bytes(out *[p256ElementLen]byte) []byte {
+ var tmp p256NonMontgomeryDomainFieldElement
+ p256FromMontgomery(&tmp, &e.x)
+ p256ToBytes(out, (*p256UntypedFieldElement)(&tmp))
+ p256InvertEndianness(out[:])
+ return out[:]
+}
+
+// p256MinusOneEncoding is the encoding of -1 mod p, so p - 1, the
+// highest canonical encoding. It is used by SetBytes to check for non-canonical
+// encodings such as p + k, 2p + k, etc.
+var p256MinusOneEncoding = new(P256Element).Sub(
+ new(P256Element), new(P256Element).One()).Bytes()
+
+// SetBytes sets e = v, where v is a big-endian 32-byte encoding, and returns e.
+// If v is not 32 bytes or it encodes a value higher than 2^256 - 2^224 + 2^192 + 2^96 - 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P256Element) SetBytes(v []byte) (*P256Element, error) {
+ if len(v) != p256ElementLen {
+ return nil, errors.New("invalid P256Element encoding")
+ }
+ for i := range v {
+ if v[i] < p256MinusOneEncoding[i] {
+ break
+ }
+ if v[i] > p256MinusOneEncoding[i] {
+ return nil, errors.New("invalid P256Element encoding")
+ }
+ }
+ var in [p256ElementLen]byte
+ copy(in[:], v)
+ p256InvertEndianness(in[:])
+ var tmp p256NonMontgomeryDomainFieldElement
+ p256FromBytes((*p256UntypedFieldElement)(&tmp), &in)
+ p256ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P256Element) Add(t1, t2 *P256Element) *P256Element {
+ p256Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P256Element) Sub(t1, t2 *P256Element) *P256Element {
+ p256Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P256Element) Mul(t1, t2 *P256Element) *P256Element {
+ p256Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P256Element) Square(t *P256Element) *P256Element {
+ p256Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P256Element) Select(a, b *P256Element, cond int) *P256Element {
+ p256Selectznz((*p256UntypedFieldElement)(&v.x), p256Uint1(cond),
+ (*p256UntypedFieldElement)(&b.x), (*p256UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p256InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_fiat64.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_fiat64.go
new file mode 100644
index 0000000000..75352d5d26
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_fiat64.go
@@ -0,0 +1,1400 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p256 64 '2^256 - 2^224 + 2^192 + 2^96 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p256
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff (from "2^256 - 2^224 + 2^192 + 2^96 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package fiat
+
+import "math/bits"
+
+type p256Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p256Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p256MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p256MontgomeryDomainFieldElement [4]uint64
+
+// The type p256NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p256NonMontgomeryDomainFieldElement [4]uint64
+
+// p256CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p256CmovznzU64(out1 *uint64, arg1 p256Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p256Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p256Mul(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement, arg2 *p256MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p256Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p256Uint1(x16)))
+ x19 := (uint64(p256Uint1(x18)) + x6)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x11, 0xffffffff00000001)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x11, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ x28 := (uint64(p256Uint1(x27)) + x23)
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x24, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x26, uint64(p256Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, x28, uint64(p256Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x20, uint64(p256Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x19, x21, uint64(p256Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, arg2[3])
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[2])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[1])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[0])
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p256Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p256Uint1(x50)))
+ x53 := (uint64(p256Uint1(x52)) + x40)
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x31, x45, uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(p256Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(p256Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p256Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(uint64(p256Uint1(x38)), x53, uint64(p256Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x54, 0xffffffff00000001)
+ var x66 uint64
+ var x67 uint64
+ x67, x66 = bits.Mul64(x54, 0xffffffff)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x69, x66, uint64(0x0))
+ x72 := (uint64(p256Uint1(x71)) + x67)
+ var x74 uint64
+ _, x74 = bits.Add64(x54, x68, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x56, x70, uint64(p256Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x58, x72, uint64(p256Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x60, x64, uint64(p256Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x62, x65, uint64(p256Uint1(x80)))
+ x83 := (uint64(p256Uint1(x82)) + uint64(p256Uint1(x63)))
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x2, arg2[3])
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x2, arg2[2])
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[1])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[0])
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x91, x88, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x89, x86, uint64(p256Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x87, x84, uint64(p256Uint1(x95)))
+ x98 := (uint64(p256Uint1(x97)) + x85)
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x75, x90, uint64(0x0))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x77, x92, uint64(p256Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(p256Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(p256Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(p256Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x99, 0xffffffff00000001)
+ var x111 uint64
+ var x112 uint64
+ x112, x111 = bits.Mul64(x99, 0xffffffff)
+ var x113 uint64
+ var x114 uint64
+ x114, x113 = bits.Mul64(x99, 0xffffffffffffffff)
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x114, x111, uint64(0x0))
+ x117 := (uint64(p256Uint1(x116)) + x112)
+ var x119 uint64
+ _, x119 = bits.Add64(x99, x113, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x101, x115, uint64(p256Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x103, x117, uint64(p256Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x105, x109, uint64(p256Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x107, x110, uint64(p256Uint1(x125)))
+ x128 := (uint64(p256Uint1(x127)) + uint64(p256Uint1(x108)))
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, arg2[3])
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x3, arg2[2])
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x3, arg2[1])
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[0])
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x136, x133, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x134, x131, uint64(p256Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x132, x129, uint64(p256Uint1(x140)))
+ x143 := (uint64(p256Uint1(x142)) + x130)
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x120, x135, uint64(0x0))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x122, x137, uint64(p256Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x124, x139, uint64(p256Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(p256Uint1(x149)))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(p256Uint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x144, 0xffffffff00000001)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x144, 0xffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x144, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x159, x156, uint64(0x0))
+ x162 := (uint64(p256Uint1(x161)) + x157)
+ var x164 uint64
+ _, x164 = bits.Add64(x144, x158, uint64(0x0))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x146, x160, uint64(p256Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x148, x162, uint64(p256Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x150, x154, uint64(p256Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x152, x155, uint64(p256Uint1(x170)))
+ x173 := (uint64(p256Uint1(x172)) + uint64(p256Uint1(x153)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Sub64(x165, 0xffffffffffffffff, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Sub64(x167, 0xffffffff, uint64(p256Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Sub64(x169, uint64(0x0), uint64(p256Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Sub64(x171, 0xffffffff00000001, uint64(p256Uint1(x179)))
+ var x183 uint64
+ _, x183 = bits.Sub64(x173, uint64(0x0), uint64(p256Uint1(x181)))
+ var x184 uint64
+ p256CmovznzU64(&x184, p256Uint1(x183), x174, x165)
+ var x185 uint64
+ p256CmovznzU64(&x185, p256Uint1(x183), x176, x167)
+ var x186 uint64
+ p256CmovznzU64(&x186, p256Uint1(x183), x178, x169)
+ var x187 uint64
+ p256CmovznzU64(&x187, p256Uint1(x183), x180, x171)
+ out1[0] = x184
+ out1[1] = x185
+ out1[2] = x186
+ out1[3] = x187
+}
+
+// p256Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p256Square(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg1[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg1[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg1[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg1[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p256Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p256Uint1(x16)))
+ x19 := (uint64(p256Uint1(x18)) + x6)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x11, 0xffffffff00000001)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x11, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ x28 := (uint64(p256Uint1(x27)) + x23)
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x24, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x26, uint64(p256Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, x28, uint64(p256Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x20, uint64(p256Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x19, x21, uint64(p256Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, arg1[3])
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg1[2])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg1[1])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg1[0])
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p256Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p256Uint1(x50)))
+ x53 := (uint64(p256Uint1(x52)) + x40)
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x31, x45, uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(p256Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(p256Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p256Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(uint64(p256Uint1(x38)), x53, uint64(p256Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x54, 0xffffffff00000001)
+ var x66 uint64
+ var x67 uint64
+ x67, x66 = bits.Mul64(x54, 0xffffffff)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x69, x66, uint64(0x0))
+ x72 := (uint64(p256Uint1(x71)) + x67)
+ var x74 uint64
+ _, x74 = bits.Add64(x54, x68, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x56, x70, uint64(p256Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x58, x72, uint64(p256Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x60, x64, uint64(p256Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x62, x65, uint64(p256Uint1(x80)))
+ x83 := (uint64(p256Uint1(x82)) + uint64(p256Uint1(x63)))
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x2, arg1[3])
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x2, arg1[2])
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg1[1])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg1[0])
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x91, x88, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x89, x86, uint64(p256Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x87, x84, uint64(p256Uint1(x95)))
+ x98 := (uint64(p256Uint1(x97)) + x85)
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x75, x90, uint64(0x0))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x77, x92, uint64(p256Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(p256Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(p256Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(p256Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x99, 0xffffffff00000001)
+ var x111 uint64
+ var x112 uint64
+ x112, x111 = bits.Mul64(x99, 0xffffffff)
+ var x113 uint64
+ var x114 uint64
+ x114, x113 = bits.Mul64(x99, 0xffffffffffffffff)
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x114, x111, uint64(0x0))
+ x117 := (uint64(p256Uint1(x116)) + x112)
+ var x119 uint64
+ _, x119 = bits.Add64(x99, x113, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x101, x115, uint64(p256Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x103, x117, uint64(p256Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x105, x109, uint64(p256Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x107, x110, uint64(p256Uint1(x125)))
+ x128 := (uint64(p256Uint1(x127)) + uint64(p256Uint1(x108)))
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, arg1[3])
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x3, arg1[2])
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x3, arg1[1])
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg1[0])
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x136, x133, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x134, x131, uint64(p256Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x132, x129, uint64(p256Uint1(x140)))
+ x143 := (uint64(p256Uint1(x142)) + x130)
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x120, x135, uint64(0x0))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x122, x137, uint64(p256Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x124, x139, uint64(p256Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(p256Uint1(x149)))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(p256Uint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x144, 0xffffffff00000001)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x144, 0xffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x144, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x159, x156, uint64(0x0))
+ x162 := (uint64(p256Uint1(x161)) + x157)
+ var x164 uint64
+ _, x164 = bits.Add64(x144, x158, uint64(0x0))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x146, x160, uint64(p256Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x148, x162, uint64(p256Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x150, x154, uint64(p256Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x152, x155, uint64(p256Uint1(x170)))
+ x173 := (uint64(p256Uint1(x172)) + uint64(p256Uint1(x153)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Sub64(x165, 0xffffffffffffffff, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Sub64(x167, 0xffffffff, uint64(p256Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Sub64(x169, uint64(0x0), uint64(p256Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Sub64(x171, 0xffffffff00000001, uint64(p256Uint1(x179)))
+ var x183 uint64
+ _, x183 = bits.Sub64(x173, uint64(0x0), uint64(p256Uint1(x181)))
+ var x184 uint64
+ p256CmovznzU64(&x184, p256Uint1(x183), x174, x165)
+ var x185 uint64
+ p256CmovznzU64(&x185, p256Uint1(x183), x176, x167)
+ var x186 uint64
+ p256CmovznzU64(&x186, p256Uint1(x183), x178, x169)
+ var x187 uint64
+ p256CmovznzU64(&x187, p256Uint1(x183), x180, x171)
+ out1[0] = x184
+ out1[1] = x185
+ out1[2] = x186
+ out1[3] = x187
+}
+
+// p256Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p256Add(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement, arg2 *p256MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p256Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p256Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p256Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0xffffffffffffffff, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0xffffffff, uint64(p256Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(p256Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0xffffffff00000001, uint64(p256Uint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(p256Uint1(x8)), uint64(0x0), uint64(p256Uint1(x16)))
+ var x19 uint64
+ p256CmovznzU64(&x19, p256Uint1(x18), x9, x1)
+ var x20 uint64
+ p256CmovznzU64(&x20, p256Uint1(x18), x11, x3)
+ var x21 uint64
+ p256CmovznzU64(&x21, p256Uint1(x18), x13, x5)
+ var x22 uint64
+ p256CmovznzU64(&x22, p256Uint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// p256Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p256Sub(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement, arg2 *p256MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p256Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p256Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p256Uint1(x6)))
+ var x9 uint64
+ p256CmovznzU64(&x9, p256Uint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, x9, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0xffffffff), uint64(p256Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(p256Uint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0xffffffff00000001), uint64(p256Uint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// p256SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p256SetOne(out1 *p256MontgomeryDomainFieldElement) {
+ out1[0] = uint64(0x1)
+ out1[1] = 0xffffffff00000000
+ out1[2] = 0xffffffffffffffff
+ out1[3] = 0xfffffffe
+}
+
+// p256FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func p256FromMontgomery(out1 *p256NonMontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ var x3 uint64
+ x3, x2 = bits.Mul64(x1, 0xffffffff00000001)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x1, 0xffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x8, x9 = bits.Add64(x7, x4, uint64(0x0))
+ var x11 uint64
+ _, x11 = bits.Add64(x1, x6, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(uint64(0x0), x8, uint64(p256Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x12, arg1[1], uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x14, 0xffffffff00000001)
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x14, 0xffffffff)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x14, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x21, x18, uint64(0x0))
+ var x25 uint64
+ _, x25 = bits.Add64(x14, x20, uint64(0x0))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64((uint64(p256Uint1(x15)) + (uint64(p256Uint1(x13)) + (uint64(p256Uint1(x9)) + x5))), x22, uint64(p256Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x2, (uint64(p256Uint1(x23)) + x19), uint64(p256Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x3, x16, uint64(p256Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x26, arg1[2], uint64(0x0))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x28, uint64(0x0), uint64(p256Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, uint64(0x0), uint64(p256Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x32, 0xffffffff00000001)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x32, 0xffffffff)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x32, 0xffffffffffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x47 uint64
+ _, x47 = bits.Add64(x32, x42, uint64(0x0))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x34, x44, uint64(p256Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x36, (uint64(p256Uint1(x45)) + x41), uint64(p256Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64((uint64(p256Uint1(x37)) + (uint64(p256Uint1(x31)) + x17)), x38, uint64(p256Uint1(x51)))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x48, arg1[3], uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x50, uint64(0x0), uint64(p256Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x52, uint64(0x0), uint64(p256Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x54, 0xffffffff00000001)
+ var x62 uint64
+ var x63 uint64
+ x63, x62 = bits.Mul64(x54, 0xffffffff)
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x65, x62, uint64(0x0))
+ var x69 uint64
+ _, x69 = bits.Add64(x54, x64, uint64(0x0))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x56, x66, uint64(p256Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64(x58, (uint64(p256Uint1(x67)) + x63), uint64(p256Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64((uint64(p256Uint1(x59)) + (uint64(p256Uint1(x53)) + x39)), x60, uint64(p256Uint1(x73)))
+ x76 := (uint64(p256Uint1(x75)) + x61)
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Sub64(x70, 0xffffffffffffffff, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Sub64(x72, 0xffffffff, uint64(p256Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Sub64(x74, uint64(0x0), uint64(p256Uint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Sub64(x76, 0xffffffff00000001, uint64(p256Uint1(x82)))
+ var x86 uint64
+ _, x86 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p256Uint1(x84)))
+ var x87 uint64
+ p256CmovznzU64(&x87, p256Uint1(x86), x77, x70)
+ var x88 uint64
+ p256CmovznzU64(&x88, p256Uint1(x86), x79, x72)
+ var x89 uint64
+ p256CmovznzU64(&x89, p256Uint1(x86), x81, x74)
+ var x90 uint64
+ p256CmovznzU64(&x90, p256Uint1(x86), x83, x76)
+ out1[0] = x87
+ out1[1] = x88
+ out1[2] = x89
+ out1[3] = x90
+}
+
+// p256ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p256ToMontgomery(out1 *p256MontgomeryDomainFieldElement, arg1 *p256NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x4fffffffd)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xfffffffffffffffe)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xfffffffbffffffff)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0x3)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p256Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p256Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x20, x19 = bits.Mul64(x11, 0xffffffff00000001)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x11, 0xffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x24, x21, uint64(0x0))
+ var x28 uint64
+ _, x28 = bits.Add64(x11, x23, uint64(0x0))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x13, x25, uint64(p256Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x15, (uint64(p256Uint1(x26)) + x22), uint64(p256Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x17, x19, uint64(p256Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64((uint64(p256Uint1(x18)) + x6), x20, uint64(p256Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x4fffffffd)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xfffffffffffffffe)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xfffffffbffffffff)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0x3)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(p256Uint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(p256Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x29, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x31, x45, uint64(p256Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x33, x47, uint64(p256Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x35, x49, uint64(p256Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x51, 0xffffffff00000001)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x51, 0xffffffff)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x51, 0xffffffffffffffff)
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x64, x61, uint64(0x0))
+ var x68 uint64
+ _, x68 = bits.Add64(x51, x63, uint64(0x0))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x53, x65, uint64(p256Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x55, (uint64(p256Uint1(x66)) + x62), uint64(p256Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x57, x59, uint64(p256Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(((uint64(p256Uint1(x58)) + uint64(p256Uint1(x36))) + (uint64(p256Uint1(x50)) + x38)), x60, uint64(p256Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x4fffffffd)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xfffffffffffffffe)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xfffffffbffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0x3)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(p256Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(p256Uint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x69, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x71, x85, uint64(p256Uint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x73, x87, uint64(p256Uint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x75, x89, uint64(p256Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x100, x99 = bits.Mul64(x91, 0xffffffff00000001)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x91, 0xffffffff)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x91, 0xffffffffffffffff)
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x104, x101, uint64(0x0))
+ var x108 uint64
+ _, x108 = bits.Add64(x91, x103, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x93, x105, uint64(p256Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x95, (uint64(p256Uint1(x106)) + x102), uint64(p256Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x97, x99, uint64(p256Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(((uint64(p256Uint1(x98)) + uint64(p256Uint1(x76))) + (uint64(p256Uint1(x90)) + x78)), x100, uint64(p256Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x4fffffffd)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xfffffffffffffffe)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xfffffffbffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0x3)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p256Uint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(p256Uint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x109, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x111, x125, uint64(p256Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x113, x127, uint64(p256Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x115, x129, uint64(p256Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x131, 0xffffffff00000001)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x131, 0xffffffff)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x131, 0xffffffffffffffff)
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x144, x141, uint64(0x0))
+ var x148 uint64
+ _, x148 = bits.Add64(x131, x143, uint64(0x0))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x133, x145, uint64(p256Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x135, (uint64(p256Uint1(x146)) + x142), uint64(p256Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x137, x139, uint64(p256Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(((uint64(p256Uint1(x138)) + uint64(p256Uint1(x116))) + (uint64(p256Uint1(x130)) + x118)), x140, uint64(p256Uint1(x154)))
+ var x157 uint64
+ var x158 uint64
+ x157, x158 = bits.Sub64(x149, 0xffffffffffffffff, uint64(0x0))
+ var x159 uint64
+ var x160 uint64
+ x159, x160 = bits.Sub64(x151, 0xffffffff, uint64(p256Uint1(x158)))
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Sub64(x153, uint64(0x0), uint64(p256Uint1(x160)))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Sub64(x155, 0xffffffff00000001, uint64(p256Uint1(x162)))
+ var x166 uint64
+ _, x166 = bits.Sub64(uint64(p256Uint1(x156)), uint64(0x0), uint64(p256Uint1(x164)))
+ var x167 uint64
+ p256CmovznzU64(&x167, p256Uint1(x166), x157, x149)
+ var x168 uint64
+ p256CmovznzU64(&x168, p256Uint1(x166), x159, x151)
+ var x169 uint64
+ p256CmovznzU64(&x169, p256Uint1(x166), x161, x153)
+ var x170 uint64
+ p256CmovznzU64(&x170, p256Uint1(x166), x163, x155)
+ out1[0] = x167
+ out1[1] = x168
+ out1[2] = x169
+ out1[3] = x170
+}
+
+// p256Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p256Selectznz(out1 *[4]uint64, arg1 p256Uint1, arg2 *[4]uint64, arg3 *[4]uint64) {
+ var x1 uint64
+ p256CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p256CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p256CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p256CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+}
+
+// p256ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p256ToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// p256FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p256FromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_invert.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_invert.go
new file mode 100644
index 0000000000..d0101e1d4f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p256_invert.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P256Element) Invert(x *P256Element) *P256Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 12 multiplications and 255 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x15 = x12 << 3 + _111
+ // x16 = 2*x15 + 1
+ // x32 = x16 << 16 + x16
+ // i53 = x32 << 15
+ // x47 = x15 + i53
+ // i263 = ((i53 << 17 + 1) << 143 + x47) << 47
+ // return (x47 + i263) << 2 + 1
+ //
+
+ var z = new(P256Element).Set(e)
+ var t0 = new(P256Element)
+ var t1 = new(P256Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ z.Square(z)
+ z.Mul(x, z)
+ t0.Square(z)
+ for s := 1; s < 3; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(z, t0)
+ t1.Square(t0)
+ for s := 1; s < 6; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 3; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ t1.Square(t0)
+ for s := 1; s < 16; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 15; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 17; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(x, t0)
+ for s := 0; s < 143; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(z, t0)
+ for s := 0; s < 47; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 2; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+
+ return e.Set(z)
+}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384.go
index 5474d77d48..5474d77d48 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_fiat64.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_fiat64.go
new file mode 100644
index 0000000000..979eadd2df
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_fiat64.go
@@ -0,0 +1,3036 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p384 64 '2^384 - 2^128 - 2^96 + 2^32 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p384
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff (from "2^384 - 2^128 - 2^96 + 2^32 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) in
+//
+// if x1 & (2^384-1) < 2^383 then x1 & (2^384-1) else (x1 & (2^384-1)) - 2^384
+
+package fiat
+
+import "math/bits"
+
+type p384Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p384Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p384MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p384MontgomeryDomainFieldElement [6]uint64
+
+// The type p384NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p384NonMontgomeryDomainFieldElement [6]uint64
+
+// p384CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p384CmovznzU64(out1 *uint64, arg1 p384Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p384Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p384Mul(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, arg2[5])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, arg2[4])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, arg2[3])
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, arg2[2])
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x6, arg2[1])
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x6, arg2[0])
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x18, x15, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
+ x29 := (uint64(p384Uint1(x28)) + x8)
+ var x30 uint64
+ _, x30 = bits.Mul64(x17, 0x100000001)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x36 uint64
+ var x37 uint64
+ x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x30, 0xffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
+ x54 := (uint64(p384Uint1(x53)) + x33)
+ var x56 uint64
+ _, x56 = bits.Add64(x17, x42, uint64(0x0))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x70, x69 = bits.Mul64(x1, arg2[5])
+ var x71 uint64
+ var x72 uint64
+ x72, x71 = bits.Mul64(x1, arg2[4])
+ var x73 uint64
+ var x74 uint64
+ x74, x73 = bits.Mul64(x1, arg2[3])
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x1, arg2[2])
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x1, arg2[1])
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x1, arg2[0])
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x80, x77, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
+ x91 := (uint64(p384Uint1(x90)) + x70)
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x57, x79, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
+ var x106 uint64
+ _, x106 = bits.Mul64(x92, 0x100000001)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
+ var x118 uint64
+ var x119 uint64
+ x119, x118 = bits.Mul64(x106, 0xffffffff)
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x119, x116, uint64(0x0))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
+ x130 := (uint64(p384Uint1(x129)) + x109)
+ var x132 uint64
+ _, x132 = bits.Add64(x92, x118, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
+ x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x2, arg2[5])
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x2, arg2[4])
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x2, arg2[3])
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x2, arg2[2])
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x2, arg2[1])
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x2, arg2[0])
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x157, x154, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
+ x168 := (uint64(p384Uint1(x167)) + x147)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x133, x156, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
+ var x183 uint64
+ _, x183 = bits.Mul64(x169, 0x100000001)
+ var x185 uint64
+ var x186 uint64
+ x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x187 uint64
+ var x188 uint64
+ x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x189 uint64
+ var x190 uint64
+ x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x191 uint64
+ var x192 uint64
+ x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x183, 0xffffffff)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x196, x193, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
+ x207 := (uint64(p384Uint1(x206)) + x186)
+ var x209 uint64
+ _, x209 = bits.Add64(x169, x195, uint64(0x0))
+ var x210 uint64
+ var x211 uint64
+ x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
+ var x212 uint64
+ var x213 uint64
+ x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
+ var x214 uint64
+ var x215 uint64
+ x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
+ x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x3, arg2[5])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x3, arg2[4])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x3, arg2[3])
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x3, arg2[2])
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x3, arg2[1])
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x3, arg2[0])
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ x245 := (uint64(p384Uint1(x244)) + x224)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x210, x233, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
+ var x260 uint64
+ _, x260 = bits.Mul64(x246, 0x100000001)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x260, 0xffffffff)
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x273, x270, uint64(0x0))
+ var x276 uint64
+ var x277 uint64
+ x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
+ x284 := (uint64(p384Uint1(x283)) + x263)
+ var x286 uint64
+ _, x286 = bits.Add64(x246, x272, uint64(0x0))
+ var x287 uint64
+ var x288 uint64
+ x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
+ var x289 uint64
+ var x290 uint64
+ x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
+ var x291 uint64
+ var x292 uint64
+ x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
+ var x293 uint64
+ var x294 uint64
+ x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
+ var x295 uint64
+ var x296 uint64
+ x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
+ x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
+ var x300 uint64
+ var x301 uint64
+ x301, x300 = bits.Mul64(x4, arg2[5])
+ var x302 uint64
+ var x303 uint64
+ x303, x302 = bits.Mul64(x4, arg2[4])
+ var x304 uint64
+ var x305 uint64
+ x305, x304 = bits.Mul64(x4, arg2[3])
+ var x306 uint64
+ var x307 uint64
+ x307, x306 = bits.Mul64(x4, arg2[2])
+ var x308 uint64
+ var x309 uint64
+ x309, x308 = bits.Mul64(x4, arg2[1])
+ var x310 uint64
+ var x311 uint64
+ x311, x310 = bits.Mul64(x4, arg2[0])
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x311, x308, uint64(0x0))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
+ x322 := (uint64(p384Uint1(x321)) + x301)
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x287, x310, uint64(0x0))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
+ var x337 uint64
+ _, x337 = bits.Mul64(x323, 0x100000001)
+ var x339 uint64
+ var x340 uint64
+ x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x341 uint64
+ var x342 uint64
+ x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x343 uint64
+ var x344 uint64
+ x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x345 uint64
+ var x346 uint64
+ x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x337, 0xffffffff)
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x350, x347, uint64(0x0))
+ var x353 uint64
+ var x354 uint64
+ x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
+ var x355 uint64
+ var x356 uint64
+ x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
+ x361 := (uint64(p384Uint1(x360)) + x340)
+ var x363 uint64
+ _, x363 = bits.Add64(x323, x349, uint64(0x0))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
+ x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x5, arg2[5])
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x5, arg2[4])
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x5, arg2[3])
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x5, arg2[2])
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x5, arg2[1])
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x5, arg2[0])
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x388, x385, uint64(0x0))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
+ x399 := (uint64(p384Uint1(x398)) + x378)
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x364, x387, uint64(0x0))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
+ var x412 uint64
+ var x413 uint64
+ x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
+ var x414 uint64
+ _, x414 = bits.Mul64(x400, 0x100000001)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x414, 0xffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
+ x438 := (uint64(p384Uint1(x437)) + x417)
+ var x440 uint64
+ _, x440 = bits.Add64(x400, x426, uint64(0x0))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
+ var x449 uint64
+ var x450 uint64
+ x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
+ x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
+ var x467 uint64
+ _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
+ var x468 uint64
+ p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
+ var x469 uint64
+ p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
+ var x470 uint64
+ p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
+ var x471 uint64
+ p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
+ var x472 uint64
+ p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
+ var x473 uint64
+ p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
+ out1[0] = x468
+ out1[1] = x469
+ out1[2] = x470
+ out1[3] = x471
+ out1[4] = x472
+ out1[5] = x473
+}
+
+// p384Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p384Square(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, arg1[5])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, arg1[4])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, arg1[3])
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, arg1[2])
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x6, arg1[1])
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x6, arg1[0])
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x18, x15, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
+ x29 := (uint64(p384Uint1(x28)) + x8)
+ var x30 uint64
+ _, x30 = bits.Mul64(x17, 0x100000001)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x36 uint64
+ var x37 uint64
+ x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x30, 0xffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
+ x54 := (uint64(p384Uint1(x53)) + x33)
+ var x56 uint64
+ _, x56 = bits.Add64(x17, x42, uint64(0x0))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x70, x69 = bits.Mul64(x1, arg1[5])
+ var x71 uint64
+ var x72 uint64
+ x72, x71 = bits.Mul64(x1, arg1[4])
+ var x73 uint64
+ var x74 uint64
+ x74, x73 = bits.Mul64(x1, arg1[3])
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x1, arg1[2])
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x1, arg1[1])
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x1, arg1[0])
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x80, x77, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
+ x91 := (uint64(p384Uint1(x90)) + x70)
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x57, x79, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
+ var x106 uint64
+ _, x106 = bits.Mul64(x92, 0x100000001)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
+ var x118 uint64
+ var x119 uint64
+ x119, x118 = bits.Mul64(x106, 0xffffffff)
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x119, x116, uint64(0x0))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
+ x130 := (uint64(p384Uint1(x129)) + x109)
+ var x132 uint64
+ _, x132 = bits.Add64(x92, x118, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
+ x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x2, arg1[5])
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x2, arg1[4])
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x2, arg1[3])
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x2, arg1[2])
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x2, arg1[1])
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x2, arg1[0])
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x157, x154, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
+ x168 := (uint64(p384Uint1(x167)) + x147)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x133, x156, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
+ var x183 uint64
+ _, x183 = bits.Mul64(x169, 0x100000001)
+ var x185 uint64
+ var x186 uint64
+ x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x187 uint64
+ var x188 uint64
+ x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x189 uint64
+ var x190 uint64
+ x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x191 uint64
+ var x192 uint64
+ x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x183, 0xffffffff)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x196, x193, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
+ x207 := (uint64(p384Uint1(x206)) + x186)
+ var x209 uint64
+ _, x209 = bits.Add64(x169, x195, uint64(0x0))
+ var x210 uint64
+ var x211 uint64
+ x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
+ var x212 uint64
+ var x213 uint64
+ x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
+ var x214 uint64
+ var x215 uint64
+ x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
+ x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x3, arg1[5])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x3, arg1[4])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x3, arg1[3])
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x3, arg1[2])
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x3, arg1[1])
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x3, arg1[0])
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ x245 := (uint64(p384Uint1(x244)) + x224)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x210, x233, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
+ var x260 uint64
+ _, x260 = bits.Mul64(x246, 0x100000001)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x260, 0xffffffff)
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x273, x270, uint64(0x0))
+ var x276 uint64
+ var x277 uint64
+ x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
+ x284 := (uint64(p384Uint1(x283)) + x263)
+ var x286 uint64
+ _, x286 = bits.Add64(x246, x272, uint64(0x0))
+ var x287 uint64
+ var x288 uint64
+ x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
+ var x289 uint64
+ var x290 uint64
+ x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
+ var x291 uint64
+ var x292 uint64
+ x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
+ var x293 uint64
+ var x294 uint64
+ x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
+ var x295 uint64
+ var x296 uint64
+ x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
+ x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
+ var x300 uint64
+ var x301 uint64
+ x301, x300 = bits.Mul64(x4, arg1[5])
+ var x302 uint64
+ var x303 uint64
+ x303, x302 = bits.Mul64(x4, arg1[4])
+ var x304 uint64
+ var x305 uint64
+ x305, x304 = bits.Mul64(x4, arg1[3])
+ var x306 uint64
+ var x307 uint64
+ x307, x306 = bits.Mul64(x4, arg1[2])
+ var x308 uint64
+ var x309 uint64
+ x309, x308 = bits.Mul64(x4, arg1[1])
+ var x310 uint64
+ var x311 uint64
+ x311, x310 = bits.Mul64(x4, arg1[0])
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x311, x308, uint64(0x0))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
+ x322 := (uint64(p384Uint1(x321)) + x301)
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x287, x310, uint64(0x0))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
+ var x337 uint64
+ _, x337 = bits.Mul64(x323, 0x100000001)
+ var x339 uint64
+ var x340 uint64
+ x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x341 uint64
+ var x342 uint64
+ x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x343 uint64
+ var x344 uint64
+ x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x345 uint64
+ var x346 uint64
+ x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x337, 0xffffffff)
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x350, x347, uint64(0x0))
+ var x353 uint64
+ var x354 uint64
+ x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
+ var x355 uint64
+ var x356 uint64
+ x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
+ x361 := (uint64(p384Uint1(x360)) + x340)
+ var x363 uint64
+ _, x363 = bits.Add64(x323, x349, uint64(0x0))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
+ x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x5, arg1[5])
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x5, arg1[4])
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x5, arg1[3])
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x5, arg1[2])
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x5, arg1[1])
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x5, arg1[0])
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x388, x385, uint64(0x0))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
+ x399 := (uint64(p384Uint1(x398)) + x378)
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x364, x387, uint64(0x0))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
+ var x412 uint64
+ var x413 uint64
+ x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
+ var x414 uint64
+ _, x414 = bits.Mul64(x400, 0x100000001)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x414, 0xffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
+ x438 := (uint64(p384Uint1(x437)) + x417)
+ var x440 uint64
+ _, x440 = bits.Add64(x400, x426, uint64(0x0))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
+ var x449 uint64
+ var x450 uint64
+ x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
+ x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
+ var x467 uint64
+ _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
+ var x468 uint64
+ p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
+ var x469 uint64
+ p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
+ var x470 uint64
+ p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
+ var x471 uint64
+ p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
+ var x472 uint64
+ p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
+ var x473 uint64
+ p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
+ out1[0] = x468
+ out1[1] = x469
+ out1[2] = x470
+ out1[3] = x471
+ out1[4] = x472
+ out1[5] = x473
+}
+
+// p384Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p384Add(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x1, 0xffffffff, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x3, 0xffffffff00000000, uint64(p384Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Sub64(x5, 0xfffffffffffffffe, uint64(p384Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p384Uint1(x18)))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p384Uint1(x22)))
+ var x26 uint64
+ _, x26 = bits.Sub64(uint64(p384Uint1(x12)), uint64(0x0), uint64(p384Uint1(x24)))
+ var x27 uint64
+ p384CmovznzU64(&x27, p384Uint1(x26), x13, x1)
+ var x28 uint64
+ p384CmovznzU64(&x28, p384Uint1(x26), x15, x3)
+ var x29 uint64
+ p384CmovznzU64(&x29, p384Uint1(x26), x17, x5)
+ var x30 uint64
+ p384CmovznzU64(&x30, p384Uint1(x26), x19, x7)
+ var x31 uint64
+ p384CmovznzU64(&x31, p384Uint1(x26), x21, x9)
+ var x32 uint64
+ p384CmovznzU64(&x32, p384Uint1(x26), x23, x11)
+ out1[0] = x27
+ out1[1] = x28
+ out1[2] = x29
+ out1[3] = x30
+ out1[4] = x31
+ out1[5] = x32
+}
+
+// p384Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p384Sub(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
+ var x13 uint64
+ p384CmovznzU64(&x13, p384Uint1(x12), uint64(0x0), 0xffffffffffffffff)
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x1, (x13 & 0xffffffff), uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x3, (x13 & 0xffffffff00000000), uint64(p384Uint1(x15)))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(x5, (x13 & 0xfffffffffffffffe), uint64(p384Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x7, x13, uint64(p384Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x9, x13, uint64(p384Uint1(x21)))
+ var x24 uint64
+ x24, _ = bits.Add64(x11, x13, uint64(p384Uint1(x23)))
+ out1[0] = x14
+ out1[1] = x16
+ out1[2] = x18
+ out1[3] = x20
+ out1[4] = x22
+ out1[5] = x24
+}
+
+// p384SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p384SetOne(out1 *p384MontgomeryDomainFieldElement) {
+ out1[0] = 0xffffffff00000001
+ out1[1] = 0xffffffff
+ out1[2] = uint64(0x1)
+ out1[3] = uint64(0x0)
+ out1[4] = uint64(0x0)
+ out1[5] = uint64(0x0)
+}
+
+// p384FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^6) mod m
+// 0 ≤ eval out1 < m
+func p384FromMontgomery(out1 *p384NonMontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0x100000001)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x2, 0xfffffffffffffffe)
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x2, 0xffffffff00000000)
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x2, 0xffffffff)
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x15, x12, uint64(0x0))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(x13, x10, uint64(p384Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x11, x8, uint64(p384Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x9, x6, uint64(p384Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x7, x4, uint64(p384Uint1(x23)))
+ var x27 uint64
+ _, x27 = bits.Add64(x1, x14, uint64(0x0))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(uint64(0x0), x16, uint64(p384Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(uint64(0x0), x18, uint64(p384Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(uint64(0x0), x20, uint64(p384Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(uint64(0x0), x22, uint64(p384Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(uint64(0x0), x24, uint64(p384Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x25)) + x5), uint64(p384Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x28, arg1[1], uint64(0x0))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x30, uint64(0x0), uint64(p384Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x32, uint64(0x0), uint64(p384Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x34, uint64(0x0), uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x36, uint64(0x0), uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x38, uint64(0x0), uint64(p384Uint1(x49)))
+ var x52 uint64
+ _, x52 = bits.Mul64(x40, 0x100000001)
+ var x54 uint64
+ var x55 uint64
+ x55, x54 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x56 uint64
+ var x57 uint64
+ x57, x56 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x58 uint64
+ var x59 uint64
+ x59, x58 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x52, 0xfffffffffffffffe)
+ var x62 uint64
+ var x63 uint64
+ x63, x62 = bits.Mul64(x52, 0xffffffff00000000)
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x52, 0xffffffff)
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x65, x62, uint64(0x0))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x63, x60, uint64(p384Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x61, x58, uint64(p384Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64(x59, x56, uint64(p384Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x57, x54, uint64(p384Uint1(x73)))
+ var x77 uint64
+ _, x77 = bits.Add64(x40, x64, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x42, x66, uint64(p384Uint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x44, x68, uint64(p384Uint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x46, x70, uint64(p384Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x48, x72, uint64(p384Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x50, x74, uint64(p384Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64((uint64(p384Uint1(x51)) + uint64(p384Uint1(x39))), (uint64(p384Uint1(x75)) + x55), uint64(p384Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x78, arg1[2], uint64(0x0))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x80, uint64(0x0), uint64(p384Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x82, uint64(0x0), uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x84, uint64(0x0), uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x86, uint64(0x0), uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x88, uint64(0x0), uint64(p384Uint1(x99)))
+ var x102 uint64
+ _, x102 = bits.Mul64(x90, 0x100000001)
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x102, 0xfffffffffffffffe)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x102, 0xffffffff00000000)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x102, 0xffffffff)
+ var x116 uint64
+ var x117 uint64
+ x116, x117 = bits.Add64(x115, x112, uint64(0x0))
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x113, x110, uint64(p384Uint1(x117)))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x111, x108, uint64(p384Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x109, x106, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x107, x104, uint64(p384Uint1(x123)))
+ var x127 uint64
+ _, x127 = bits.Add64(x90, x114, uint64(0x0))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x92, x116, uint64(p384Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x94, x118, uint64(p384Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x96, x120, uint64(p384Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x98, x122, uint64(p384Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x100, x124, uint64(p384Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64((uint64(p384Uint1(x101)) + uint64(p384Uint1(x89))), (uint64(p384Uint1(x125)) + x105), uint64(p384Uint1(x137)))
+ var x140 uint64
+ var x141 uint64
+ x140, x141 = bits.Add64(x128, arg1[3], uint64(0x0))
+ var x142 uint64
+ var x143 uint64
+ x142, x143 = bits.Add64(x130, uint64(0x0), uint64(p384Uint1(x141)))
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x132, uint64(0x0), uint64(p384Uint1(x143)))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x134, uint64(0x0), uint64(p384Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x136, uint64(0x0), uint64(p384Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x138, uint64(0x0), uint64(p384Uint1(x149)))
+ var x152 uint64
+ _, x152 = bits.Mul64(x140, 0x100000001)
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x161, x160 = bits.Mul64(x152, 0xfffffffffffffffe)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x152, 0xffffffff00000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x152, 0xffffffff)
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x165, x162, uint64(0x0))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x163, x160, uint64(p384Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Add64(x161, x158, uint64(p384Uint1(x169)))
+ var x172 uint64
+ var x173 uint64
+ x172, x173 = bits.Add64(x159, x156, uint64(p384Uint1(x171)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x157, x154, uint64(p384Uint1(x173)))
+ var x177 uint64
+ _, x177 = bits.Add64(x140, x164, uint64(0x0))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Add64(x142, x166, uint64(p384Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Add64(x144, x168, uint64(p384Uint1(x179)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Add64(x146, x170, uint64(p384Uint1(x181)))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Add64(x148, x172, uint64(p384Uint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Add64(x150, x174, uint64(p384Uint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Add64((uint64(p384Uint1(x151)) + uint64(p384Uint1(x139))), (uint64(p384Uint1(x175)) + x155), uint64(p384Uint1(x187)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Add64(x178, arg1[4], uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x180, uint64(0x0), uint64(p384Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x182, uint64(0x0), uint64(p384Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x184, uint64(0x0), uint64(p384Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x186, uint64(0x0), uint64(p384Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x188, uint64(0x0), uint64(p384Uint1(x199)))
+ var x202 uint64
+ _, x202 = bits.Mul64(x190, 0x100000001)
+ var x204 uint64
+ var x205 uint64
+ x205, x204 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x206 uint64
+ var x207 uint64
+ x207, x206 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x208 uint64
+ var x209 uint64
+ x209, x208 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x210 uint64
+ var x211 uint64
+ x211, x210 = bits.Mul64(x202, 0xfffffffffffffffe)
+ var x212 uint64
+ var x213 uint64
+ x213, x212 = bits.Mul64(x202, 0xffffffff00000000)
+ var x214 uint64
+ var x215 uint64
+ x215, x214 = bits.Mul64(x202, 0xffffffff)
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x215, x212, uint64(0x0))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x213, x210, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x211, x208, uint64(p384Uint1(x219)))
+ var x222 uint64
+ var x223 uint64
+ x222, x223 = bits.Add64(x209, x206, uint64(p384Uint1(x221)))
+ var x224 uint64
+ var x225 uint64
+ x224, x225 = bits.Add64(x207, x204, uint64(p384Uint1(x223)))
+ var x227 uint64
+ _, x227 = bits.Add64(x190, x214, uint64(0x0))
+ var x228 uint64
+ var x229 uint64
+ x228, x229 = bits.Add64(x192, x216, uint64(p384Uint1(x227)))
+ var x230 uint64
+ var x231 uint64
+ x230, x231 = bits.Add64(x194, x218, uint64(p384Uint1(x229)))
+ var x232 uint64
+ var x233 uint64
+ x232, x233 = bits.Add64(x196, x220, uint64(p384Uint1(x231)))
+ var x234 uint64
+ var x235 uint64
+ x234, x235 = bits.Add64(x198, x222, uint64(p384Uint1(x233)))
+ var x236 uint64
+ var x237 uint64
+ x236, x237 = bits.Add64(x200, x224, uint64(p384Uint1(x235)))
+ var x238 uint64
+ var x239 uint64
+ x238, x239 = bits.Add64((uint64(p384Uint1(x201)) + uint64(p384Uint1(x189))), (uint64(p384Uint1(x225)) + x205), uint64(p384Uint1(x237)))
+ var x240 uint64
+ var x241 uint64
+ x240, x241 = bits.Add64(x228, arg1[5], uint64(0x0))
+ var x242 uint64
+ var x243 uint64
+ x242, x243 = bits.Add64(x230, uint64(0x0), uint64(p384Uint1(x241)))
+ var x244 uint64
+ var x245 uint64
+ x244, x245 = bits.Add64(x232, uint64(0x0), uint64(p384Uint1(x243)))
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x234, uint64(0x0), uint64(p384Uint1(x245)))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x236, uint64(0x0), uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x238, uint64(0x0), uint64(p384Uint1(x249)))
+ var x252 uint64
+ _, x252 = bits.Mul64(x240, 0x100000001)
+ var x254 uint64
+ var x255 uint64
+ x255, x254 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x256 uint64
+ var x257 uint64
+ x257, x256 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x258 uint64
+ var x259 uint64
+ x259, x258 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x260 uint64
+ var x261 uint64
+ x261, x260 = bits.Mul64(x252, 0xfffffffffffffffe)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x252, 0xffffffff00000000)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x252, 0xffffffff)
+ var x266 uint64
+ var x267 uint64
+ x266, x267 = bits.Add64(x265, x262, uint64(0x0))
+ var x268 uint64
+ var x269 uint64
+ x268, x269 = bits.Add64(x263, x260, uint64(p384Uint1(x267)))
+ var x270 uint64
+ var x271 uint64
+ x270, x271 = bits.Add64(x261, x258, uint64(p384Uint1(x269)))
+ var x272 uint64
+ var x273 uint64
+ x272, x273 = bits.Add64(x259, x256, uint64(p384Uint1(x271)))
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x257, x254, uint64(p384Uint1(x273)))
+ var x277 uint64
+ _, x277 = bits.Add64(x240, x264, uint64(0x0))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x242, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x244, x268, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x246, x270, uint64(p384Uint1(x281)))
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x248, x272, uint64(p384Uint1(x283)))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x250, x274, uint64(p384Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64((uint64(p384Uint1(x251)) + uint64(p384Uint1(x239))), (uint64(p384Uint1(x275)) + x255), uint64(p384Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Sub64(x278, 0xffffffff, uint64(0x0))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Sub64(x280, 0xffffffff00000000, uint64(p384Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Sub64(x282, 0xfffffffffffffffe, uint64(p384Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Sub64(x284, 0xffffffffffffffff, uint64(p384Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Sub64(x286, 0xffffffffffffffff, uint64(p384Uint1(x297)))
+ var x300 uint64
+ var x301 uint64
+ x300, x301 = bits.Sub64(x288, 0xffffffffffffffff, uint64(p384Uint1(x299)))
+ var x303 uint64
+ _, x303 = bits.Sub64(uint64(p384Uint1(x289)), uint64(0x0), uint64(p384Uint1(x301)))
+ var x304 uint64
+ p384CmovznzU64(&x304, p384Uint1(x303), x290, x278)
+ var x305 uint64
+ p384CmovznzU64(&x305, p384Uint1(x303), x292, x280)
+ var x306 uint64
+ p384CmovznzU64(&x306, p384Uint1(x303), x294, x282)
+ var x307 uint64
+ p384CmovznzU64(&x307, p384Uint1(x303), x296, x284)
+ var x308 uint64
+ p384CmovznzU64(&x308, p384Uint1(x303), x298, x286)
+ var x309 uint64
+ p384CmovznzU64(&x309, p384Uint1(x303), x300, x288)
+ out1[0] = x304
+ out1[1] = x305
+ out1[2] = x306
+ out1[3] = x307
+ out1[4] = x308
+ out1[5] = x309
+}
+
+// p384ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p384ToMontgomery(out1 *p384MontgomeryDomainFieldElement, arg1 *p384NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, 0x200000000)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, 0xfffffffe00000000)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, 0x200000000)
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, 0xfffffffe00000001)
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x14, x11, uint64(0x0))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x12, x9, uint64(p384Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x10, x7, uint64(p384Uint1(x18)))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x8, x6, uint64(p384Uint1(x20)))
+ var x23 uint64
+ _, x23 = bits.Mul64(x13, 0x100000001)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x27 uint64
+ var x28 uint64
+ x28, x27 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x29 uint64
+ var x30 uint64
+ x30, x29 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x31 uint64
+ var x32 uint64
+ x32, x31 = bits.Mul64(x23, 0xfffffffffffffffe)
+ var x33 uint64
+ var x34 uint64
+ x34, x33 = bits.Mul64(x23, 0xffffffff00000000)
+ var x35 uint64
+ var x36 uint64
+ x36, x35 = bits.Mul64(x23, 0xffffffff)
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x36, x33, uint64(0x0))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x34, x31, uint64(p384Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x32, x29, uint64(p384Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x43, x44 = bits.Add64(x30, x27, uint64(p384Uint1(x42)))
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x28, x25, uint64(p384Uint1(x44)))
+ var x48 uint64
+ _, x48 = bits.Add64(x13, x35, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x15, x37, uint64(p384Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x17, x39, uint64(p384Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x19, x41, uint64(p384Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x21, x43, uint64(p384Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(uint64(p384Uint1(x22)), x45, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x46)) + x26), uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x1, 0x200000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x1, 0xfffffffe00000000)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x1, 0x200000000)
+ var x67 uint64
+ var x68 uint64
+ x68, x67 = bits.Mul64(x1, 0xfffffffe00000001)
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x68, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x66, x63, uint64(p384Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x64, x61, uint64(p384Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x62, x1, uint64(p384Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x49, x67, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x51, x69, uint64(p384Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x53, x71, uint64(p384Uint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x55, x73, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x57, x75, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x59, uint64(p384Uint1(x76)), uint64(p384Uint1(x86)))
+ var x89 uint64
+ _, x89 = bits.Mul64(x77, 0x100000001)
+ var x91 uint64
+ var x92 uint64
+ x92, x91 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x93 uint64
+ var x94 uint64
+ x94, x93 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x95 uint64
+ var x96 uint64
+ x96, x95 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x97 uint64
+ var x98 uint64
+ x98, x97 = bits.Mul64(x89, 0xfffffffffffffffe)
+ var x99 uint64
+ var x100 uint64
+ x100, x99 = bits.Mul64(x89, 0xffffffff00000000)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x89, 0xffffffff)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x102, x99, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x100, x97, uint64(p384Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x98, x95, uint64(p384Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x96, x93, uint64(p384Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x94, x91, uint64(p384Uint1(x110)))
+ var x114 uint64
+ _, x114 = bits.Add64(x77, x101, uint64(0x0))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x79, x103, uint64(p384Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x81, x105, uint64(p384Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x83, x107, uint64(p384Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x85, x109, uint64(p384Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x123, x124 = bits.Add64(x87, x111, uint64(p384Uint1(x122)))
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64((uint64(p384Uint1(x88)) + uint64(p384Uint1(x60))), (uint64(p384Uint1(x112)) + x92), uint64(p384Uint1(x124)))
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(x2, 0x200000000)
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x2, 0xfffffffe00000000)
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x2, 0x200000000)
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x2, 0xfffffffe00000001)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x134, x131, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x132, x129, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x130, x127, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x128, x2, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x115, x133, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x117, x135, uint64(p384Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x119, x137, uint64(p384Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x121, x139, uint64(p384Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x123, x141, uint64(p384Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x125, uint64(p384Uint1(x142)), uint64(p384Uint1(x152)))
+ var x155 uint64
+ _, x155 = bits.Mul64(x143, 0x100000001)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x155, 0xfffffffffffffffe)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x155, 0xffffffff00000000)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x155, 0xffffffff)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x168, x165, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x166, x163, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x164, x161, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x162, x159, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x160, x157, uint64(p384Uint1(x176)))
+ var x180 uint64
+ _, x180 = bits.Add64(x143, x167, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x169, uint64(p384Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x147, x171, uint64(p384Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x149, x173, uint64(p384Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x151, x175, uint64(p384Uint1(x186)))
+ var x189 uint64
+ var x190 uint64
+ x189, x190 = bits.Add64(x153, x177, uint64(p384Uint1(x188)))
+ var x191 uint64
+ var x192 uint64
+ x191, x192 = bits.Add64((uint64(p384Uint1(x154)) + uint64(p384Uint1(x126))), (uint64(p384Uint1(x178)) + x158), uint64(p384Uint1(x190)))
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x3, 0x200000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x3, 0xfffffffe00000000)
+ var x197 uint64
+ var x198 uint64
+ x198, x197 = bits.Mul64(x3, 0x200000000)
+ var x199 uint64
+ var x200 uint64
+ x200, x199 = bits.Mul64(x3, 0xfffffffe00000001)
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x200, x197, uint64(0x0))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x198, x195, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x196, x193, uint64(p384Uint1(x204)))
+ var x207 uint64
+ var x208 uint64
+ x207, x208 = bits.Add64(x194, x3, uint64(p384Uint1(x206)))
+ var x209 uint64
+ var x210 uint64
+ x209, x210 = bits.Add64(x181, x199, uint64(0x0))
+ var x211 uint64
+ var x212 uint64
+ x211, x212 = bits.Add64(x183, x201, uint64(p384Uint1(x210)))
+ var x213 uint64
+ var x214 uint64
+ x213, x214 = bits.Add64(x185, x203, uint64(p384Uint1(x212)))
+ var x215 uint64
+ var x216 uint64
+ x215, x216 = bits.Add64(x187, x205, uint64(p384Uint1(x214)))
+ var x217 uint64
+ var x218 uint64
+ x217, x218 = bits.Add64(x189, x207, uint64(p384Uint1(x216)))
+ var x219 uint64
+ var x220 uint64
+ x219, x220 = bits.Add64(x191, uint64(p384Uint1(x208)), uint64(p384Uint1(x218)))
+ var x221 uint64
+ _, x221 = bits.Mul64(x209, 0x100000001)
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x221, 0xfffffffffffffffe)
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x221, 0xffffffff00000000)
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x221, 0xffffffff)
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ var x246 uint64
+ _, x246 = bits.Add64(x209, x233, uint64(0x0))
+ var x247 uint64
+ var x248 uint64
+ x247, x248 = bits.Add64(x211, x235, uint64(p384Uint1(x246)))
+ var x249 uint64
+ var x250 uint64
+ x249, x250 = bits.Add64(x213, x237, uint64(p384Uint1(x248)))
+ var x251 uint64
+ var x252 uint64
+ x251, x252 = bits.Add64(x215, x239, uint64(p384Uint1(x250)))
+ var x253 uint64
+ var x254 uint64
+ x253, x254 = bits.Add64(x217, x241, uint64(p384Uint1(x252)))
+ var x255 uint64
+ var x256 uint64
+ x255, x256 = bits.Add64(x219, x243, uint64(p384Uint1(x254)))
+ var x257 uint64
+ var x258 uint64
+ x257, x258 = bits.Add64((uint64(p384Uint1(x220)) + uint64(p384Uint1(x192))), (uint64(p384Uint1(x244)) + x224), uint64(p384Uint1(x256)))
+ var x259 uint64
+ var x260 uint64
+ x260, x259 = bits.Mul64(x4, 0x200000000)
+ var x261 uint64
+ var x262 uint64
+ x262, x261 = bits.Mul64(x4, 0xfffffffe00000000)
+ var x263 uint64
+ var x264 uint64
+ x264, x263 = bits.Mul64(x4, 0x200000000)
+ var x265 uint64
+ var x266 uint64
+ x266, x265 = bits.Mul64(x4, 0xfffffffe00000001)
+ var x267 uint64
+ var x268 uint64
+ x267, x268 = bits.Add64(x266, x263, uint64(0x0))
+ var x269 uint64
+ var x270 uint64
+ x269, x270 = bits.Add64(x264, x261, uint64(p384Uint1(x268)))
+ var x271 uint64
+ var x272 uint64
+ x271, x272 = bits.Add64(x262, x259, uint64(p384Uint1(x270)))
+ var x273 uint64
+ var x274 uint64
+ x273, x274 = bits.Add64(x260, x4, uint64(p384Uint1(x272)))
+ var x275 uint64
+ var x276 uint64
+ x275, x276 = bits.Add64(x247, x265, uint64(0x0))
+ var x277 uint64
+ var x278 uint64
+ x277, x278 = bits.Add64(x249, x267, uint64(p384Uint1(x276)))
+ var x279 uint64
+ var x280 uint64
+ x279, x280 = bits.Add64(x251, x269, uint64(p384Uint1(x278)))
+ var x281 uint64
+ var x282 uint64
+ x281, x282 = bits.Add64(x253, x271, uint64(p384Uint1(x280)))
+ var x283 uint64
+ var x284 uint64
+ x283, x284 = bits.Add64(x255, x273, uint64(p384Uint1(x282)))
+ var x285 uint64
+ var x286 uint64
+ x285, x286 = bits.Add64(x257, uint64(p384Uint1(x274)), uint64(p384Uint1(x284)))
+ var x287 uint64
+ _, x287 = bits.Mul64(x275, 0x100000001)
+ var x289 uint64
+ var x290 uint64
+ x290, x289 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x291 uint64
+ var x292 uint64
+ x292, x291 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x293 uint64
+ var x294 uint64
+ x294, x293 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x295 uint64
+ var x296 uint64
+ x296, x295 = bits.Mul64(x287, 0xfffffffffffffffe)
+ var x297 uint64
+ var x298 uint64
+ x298, x297 = bits.Mul64(x287, 0xffffffff00000000)
+ var x299 uint64
+ var x300 uint64
+ x300, x299 = bits.Mul64(x287, 0xffffffff)
+ var x301 uint64
+ var x302 uint64
+ x301, x302 = bits.Add64(x300, x297, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x298, x295, uint64(p384Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x296, x293, uint64(p384Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x294, x291, uint64(p384Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x292, x289, uint64(p384Uint1(x308)))
+ var x312 uint64
+ _, x312 = bits.Add64(x275, x299, uint64(0x0))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x277, x301, uint64(p384Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x279, x303, uint64(p384Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x281, x305, uint64(p384Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x283, x307, uint64(p384Uint1(x318)))
+ var x321 uint64
+ var x322 uint64
+ x321, x322 = bits.Add64(x285, x309, uint64(p384Uint1(x320)))
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64((uint64(p384Uint1(x286)) + uint64(p384Uint1(x258))), (uint64(p384Uint1(x310)) + x290), uint64(p384Uint1(x322)))
+ var x325 uint64
+ var x326 uint64
+ x326, x325 = bits.Mul64(x5, 0x200000000)
+ var x327 uint64
+ var x328 uint64
+ x328, x327 = bits.Mul64(x5, 0xfffffffe00000000)
+ var x329 uint64
+ var x330 uint64
+ x330, x329 = bits.Mul64(x5, 0x200000000)
+ var x331 uint64
+ var x332 uint64
+ x332, x331 = bits.Mul64(x5, 0xfffffffe00000001)
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x332, x329, uint64(0x0))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x330, x327, uint64(p384Uint1(x334)))
+ var x337 uint64
+ var x338 uint64
+ x337, x338 = bits.Add64(x328, x325, uint64(p384Uint1(x336)))
+ var x339 uint64
+ var x340 uint64
+ x339, x340 = bits.Add64(x326, x5, uint64(p384Uint1(x338)))
+ var x341 uint64
+ var x342 uint64
+ x341, x342 = bits.Add64(x313, x331, uint64(0x0))
+ var x343 uint64
+ var x344 uint64
+ x343, x344 = bits.Add64(x315, x333, uint64(p384Uint1(x342)))
+ var x345 uint64
+ var x346 uint64
+ x345, x346 = bits.Add64(x317, x335, uint64(p384Uint1(x344)))
+ var x347 uint64
+ var x348 uint64
+ x347, x348 = bits.Add64(x319, x337, uint64(p384Uint1(x346)))
+ var x349 uint64
+ var x350 uint64
+ x349, x350 = bits.Add64(x321, x339, uint64(p384Uint1(x348)))
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x323, uint64(p384Uint1(x340)), uint64(p384Uint1(x350)))
+ var x353 uint64
+ _, x353 = bits.Mul64(x341, 0x100000001)
+ var x355 uint64
+ var x356 uint64
+ x356, x355 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x357 uint64
+ var x358 uint64
+ x358, x357 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x359 uint64
+ var x360 uint64
+ x360, x359 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x361 uint64
+ var x362 uint64
+ x362, x361 = bits.Mul64(x353, 0xfffffffffffffffe)
+ var x363 uint64
+ var x364 uint64
+ x364, x363 = bits.Mul64(x353, 0xffffffff00000000)
+ var x365 uint64
+ var x366 uint64
+ x366, x365 = bits.Mul64(x353, 0xffffffff)
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x366, x363, uint64(0x0))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x364, x361, uint64(p384Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x362, x359, uint64(p384Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x360, x357, uint64(p384Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x358, x355, uint64(p384Uint1(x374)))
+ var x378 uint64
+ _, x378 = bits.Add64(x341, x365, uint64(0x0))
+ var x379 uint64
+ var x380 uint64
+ x379, x380 = bits.Add64(x343, x367, uint64(p384Uint1(x378)))
+ var x381 uint64
+ var x382 uint64
+ x381, x382 = bits.Add64(x345, x369, uint64(p384Uint1(x380)))
+ var x383 uint64
+ var x384 uint64
+ x383, x384 = bits.Add64(x347, x371, uint64(p384Uint1(x382)))
+ var x385 uint64
+ var x386 uint64
+ x385, x386 = bits.Add64(x349, x373, uint64(p384Uint1(x384)))
+ var x387 uint64
+ var x388 uint64
+ x387, x388 = bits.Add64(x351, x375, uint64(p384Uint1(x386)))
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64((uint64(p384Uint1(x352)) + uint64(p384Uint1(x324))), (uint64(p384Uint1(x376)) + x356), uint64(p384Uint1(x388)))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Sub64(x379, 0xffffffff, uint64(0x0))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Sub64(x381, 0xffffffff00000000, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Sub64(x383, 0xfffffffffffffffe, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Sub64(x385, 0xffffffffffffffff, uint64(p384Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Sub64(x387, 0xffffffffffffffff, uint64(p384Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Sub64(x389, 0xffffffffffffffff, uint64(p384Uint1(x400)))
+ var x404 uint64
+ _, x404 = bits.Sub64(uint64(p384Uint1(x390)), uint64(0x0), uint64(p384Uint1(x402)))
+ var x405 uint64
+ p384CmovznzU64(&x405, p384Uint1(x404), x391, x379)
+ var x406 uint64
+ p384CmovznzU64(&x406, p384Uint1(x404), x393, x381)
+ var x407 uint64
+ p384CmovznzU64(&x407, p384Uint1(x404), x395, x383)
+ var x408 uint64
+ p384CmovznzU64(&x408, p384Uint1(x404), x397, x385)
+ var x409 uint64
+ p384CmovznzU64(&x409, p384Uint1(x404), x399, x387)
+ var x410 uint64
+ p384CmovznzU64(&x410, p384Uint1(x404), x401, x389)
+ out1[0] = x405
+ out1[1] = x406
+ out1[2] = x407
+ out1[3] = x408
+ out1[4] = x409
+ out1[5] = x410
+}
+
+// p384Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p384Selectznz(out1 *[6]uint64, arg1 p384Uint1, arg2 *[6]uint64, arg3 *[6]uint64) {
+ var x1 uint64
+ p384CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p384CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p384CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p384CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ var x5 uint64
+ p384CmovznzU64(&x5, arg1, arg2[4], arg3[4])
+ var x6 uint64
+ p384CmovznzU64(&x6, arg1, arg2[5], arg3[5])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+ out1[4] = x5
+ out1[5] = x6
+}
+
+// p384ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..47]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p384ToBytes(out1 *[48]uint8, arg1 *[6]uint64) {
+ x1 := arg1[5]
+ x2 := arg1[4]
+ x3 := arg1[3]
+ x4 := arg1[2]
+ x5 := arg1[1]
+ x6 := arg1[0]
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := (x16 >> 8)
+ x19 := (uint8(x18) & 0xff)
+ x20 := uint8((x18 >> 8))
+ x21 := (uint8(x5) & 0xff)
+ x22 := (x5 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := (x30 >> 8)
+ x33 := (uint8(x32) & 0xff)
+ x34 := uint8((x32 >> 8))
+ x35 := (uint8(x4) & 0xff)
+ x36 := (x4 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := (x44 >> 8)
+ x47 := (uint8(x46) & 0xff)
+ x48 := uint8((x46 >> 8))
+ x49 := (uint8(x3) & 0xff)
+ x50 := (x3 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := (x58 >> 8)
+ x61 := (uint8(x60) & 0xff)
+ x62 := uint8((x60 >> 8))
+ x63 := (uint8(x2) & 0xff)
+ x64 := (x2 >> 8)
+ x65 := (uint8(x64) & 0xff)
+ x66 := (x64 >> 8)
+ x67 := (uint8(x66) & 0xff)
+ x68 := (x66 >> 8)
+ x69 := (uint8(x68) & 0xff)
+ x70 := (x68 >> 8)
+ x71 := (uint8(x70) & 0xff)
+ x72 := (x70 >> 8)
+ x73 := (uint8(x72) & 0xff)
+ x74 := (x72 >> 8)
+ x75 := (uint8(x74) & 0xff)
+ x76 := uint8((x74 >> 8))
+ x77 := (uint8(x1) & 0xff)
+ x78 := (x1 >> 8)
+ x79 := (uint8(x78) & 0xff)
+ x80 := (x78 >> 8)
+ x81 := (uint8(x80) & 0xff)
+ x82 := (x80 >> 8)
+ x83 := (uint8(x82) & 0xff)
+ x84 := (x82 >> 8)
+ x85 := (uint8(x84) & 0xff)
+ x86 := (x84 >> 8)
+ x87 := (uint8(x86) & 0xff)
+ x88 := (x86 >> 8)
+ x89 := (uint8(x88) & 0xff)
+ x90 := uint8((x88 >> 8))
+ out1[0] = x7
+ out1[1] = x9
+ out1[2] = x11
+ out1[3] = x13
+ out1[4] = x15
+ out1[5] = x17
+ out1[6] = x19
+ out1[7] = x20
+ out1[8] = x21
+ out1[9] = x23
+ out1[10] = x25
+ out1[11] = x27
+ out1[12] = x29
+ out1[13] = x31
+ out1[14] = x33
+ out1[15] = x34
+ out1[16] = x35
+ out1[17] = x37
+ out1[18] = x39
+ out1[19] = x41
+ out1[20] = x43
+ out1[21] = x45
+ out1[22] = x47
+ out1[23] = x48
+ out1[24] = x49
+ out1[25] = x51
+ out1[26] = x53
+ out1[27] = x55
+ out1[28] = x57
+ out1[29] = x59
+ out1[30] = x61
+ out1[31] = x62
+ out1[32] = x63
+ out1[33] = x65
+ out1[34] = x67
+ out1[35] = x69
+ out1[36] = x71
+ out1[37] = x73
+ out1[38] = x75
+ out1[39] = x76
+ out1[40] = x77
+ out1[41] = x79
+ out1[42] = x81
+ out1[43] = x83
+ out1[44] = x85
+ out1[45] = x87
+ out1[46] = x89
+ out1[47] = x90
+}
+
+// p384FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p384FromBytes(out1 *[6]uint64, arg1 *[48]uint8) {
+ x1 := (uint64(arg1[47]) << 56)
+ x2 := (uint64(arg1[46]) << 48)
+ x3 := (uint64(arg1[45]) << 40)
+ x4 := (uint64(arg1[44]) << 32)
+ x5 := (uint64(arg1[43]) << 24)
+ x6 := (uint64(arg1[42]) << 16)
+ x7 := (uint64(arg1[41]) << 8)
+ x8 := arg1[40]
+ x9 := (uint64(arg1[39]) << 56)
+ x10 := (uint64(arg1[38]) << 48)
+ x11 := (uint64(arg1[37]) << 40)
+ x12 := (uint64(arg1[36]) << 32)
+ x13 := (uint64(arg1[35]) << 24)
+ x14 := (uint64(arg1[34]) << 16)
+ x15 := (uint64(arg1[33]) << 8)
+ x16 := arg1[32]
+ x17 := (uint64(arg1[31]) << 56)
+ x18 := (uint64(arg1[30]) << 48)
+ x19 := (uint64(arg1[29]) << 40)
+ x20 := (uint64(arg1[28]) << 32)
+ x21 := (uint64(arg1[27]) << 24)
+ x22 := (uint64(arg1[26]) << 16)
+ x23 := (uint64(arg1[25]) << 8)
+ x24 := arg1[24]
+ x25 := (uint64(arg1[23]) << 56)
+ x26 := (uint64(arg1[22]) << 48)
+ x27 := (uint64(arg1[21]) << 40)
+ x28 := (uint64(arg1[20]) << 32)
+ x29 := (uint64(arg1[19]) << 24)
+ x30 := (uint64(arg1[18]) << 16)
+ x31 := (uint64(arg1[17]) << 8)
+ x32 := arg1[16]
+ x33 := (uint64(arg1[15]) << 56)
+ x34 := (uint64(arg1[14]) << 48)
+ x35 := (uint64(arg1[13]) << 40)
+ x36 := (uint64(arg1[12]) << 32)
+ x37 := (uint64(arg1[11]) << 24)
+ x38 := (uint64(arg1[10]) << 16)
+ x39 := (uint64(arg1[9]) << 8)
+ x40 := arg1[8]
+ x41 := (uint64(arg1[7]) << 56)
+ x42 := (uint64(arg1[6]) << 48)
+ x43 := (uint64(arg1[5]) << 40)
+ x44 := (uint64(arg1[4]) << 32)
+ x45 := (uint64(arg1[3]) << 24)
+ x46 := (uint64(arg1[2]) << 16)
+ x47 := (uint64(arg1[1]) << 8)
+ x48 := arg1[0]
+ x49 := (x47 + uint64(x48))
+ x50 := (x46 + x49)
+ x51 := (x45 + x50)
+ x52 := (x44 + x51)
+ x53 := (x43 + x52)
+ x54 := (x42 + x53)
+ x55 := (x41 + x54)
+ x56 := (x39 + uint64(x40))
+ x57 := (x38 + x56)
+ x58 := (x37 + x57)
+ x59 := (x36 + x58)
+ x60 := (x35 + x59)
+ x61 := (x34 + x60)
+ x62 := (x33 + x61)
+ x63 := (x31 + uint64(x32))
+ x64 := (x30 + x63)
+ x65 := (x29 + x64)
+ x66 := (x28 + x65)
+ x67 := (x27 + x66)
+ x68 := (x26 + x67)
+ x69 := (x25 + x68)
+ x70 := (x23 + uint64(x24))
+ x71 := (x22 + x70)
+ x72 := (x21 + x71)
+ x73 := (x20 + x72)
+ x74 := (x19 + x73)
+ x75 := (x18 + x74)
+ x76 := (x17 + x75)
+ x77 := (x15 + uint64(x16))
+ x78 := (x14 + x77)
+ x79 := (x13 + x78)
+ x80 := (x12 + x79)
+ x81 := (x11 + x80)
+ x82 := (x10 + x81)
+ x83 := (x9 + x82)
+ x84 := (x7 + uint64(x8))
+ x85 := (x6 + x84)
+ x86 := (x5 + x85)
+ x87 := (x4 + x86)
+ x88 := (x3 + x87)
+ x89 := (x2 + x88)
+ x90 := (x1 + x89)
+ out1[0] = x55
+ out1[1] = x62
+ out1[2] = x69
+ out1[3] = x76
+ out1[4] = x83
+ out1[5] = x90
+}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_invert.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_invert.go
index 24169e98d9..24169e98d9 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p384_invert.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p384_invert.go
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521.go
index 3d12117e49..3d12117e49 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_fiat64.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_fiat64.go
new file mode 100644
index 0000000000..87a359e88e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_fiat64.go
@@ -0,0 +1,5541 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p521 64 '2^521 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p521
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff (from "2^521 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178) + (z[48] << 0x180) + (z[49] << 0x188) + (z[50] << 0x190) + (z[51] << 0x198) + (z[52] << 0x1a0) + (z[53] << 0x1a8) + (z[54] << 0x1b0) + (z[55] << 0x1b8) + (z[56] << 0x1c0) + (z[57] << 0x1c8) + (z[58] << 0x1d0) + (z[59] << 0x1d8) + (z[60] << 0x1e0) + (z[61] << 0x1e8) + (z[62] << 0x1f0) + (z[63] << 0x1f8) + (z[64] << 2^9) + (z[65] << 0x208)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9) in
+//
+// if x1 & (2^576-1) < 2^575 then x1 & (2^576-1) else (x1 & (2^576-1)) - 2^576
+
+package fiat
+
+import "math/bits"
+
+type p521Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p521Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p521MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p521MontgomeryDomainFieldElement [9]uint64
+
+// The type p521NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p521NonMontgomeryDomainFieldElement [9]uint64
+
+// p521CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p521CmovznzU64(out1 *uint64, arg1 p521Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p521Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p521Mul(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[6]
+ x7 := arg1[7]
+ x8 := arg1[8]
+ x9 := arg1[0]
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x9, arg2[8])
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x9, arg2[7])
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x9, arg2[6])
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x9, arg2[5])
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x9, arg2[4])
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x9, arg2[3])
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x9, arg2[2])
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x9, arg2[1])
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x9, arg2[0])
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
+ x44 := (uint64(p521Uint1(x43)) + x11)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x26, 0x1ff)
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x51 uint64
+ var x52 uint64
+ x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x53 uint64
+ var x54 uint64
+ x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x55 uint64
+ var x56 uint64
+ x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x57 uint64
+ var x58 uint64
+ x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x62, x59, uint64(0x0))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
+ x79 := (uint64(p521Uint1(x78)) + x46)
+ var x81 uint64
+ _, x81 = bits.Add64(x26, x61, uint64(0x0))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x101, x100 = bits.Mul64(x1, arg2[8])
+ var x102 uint64
+ var x103 uint64
+ x103, x102 = bits.Mul64(x1, arg2[7])
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x1, arg2[6])
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x1, arg2[5])
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x1, arg2[4])
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x1, arg2[3])
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x1, arg2[2])
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x1, arg2[1])
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x1, arg2[0])
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x117, x114, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
+ x134 := (uint64(p521Uint1(x133)) + x101)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x82, x116, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x135, 0x1ff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x169 uint64
+ var x170 uint64
+ x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x171 uint64
+ var x172 uint64
+ x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x172, x169, uint64(0x0))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
+ x189 := (uint64(p521Uint1(x188)) + x156)
+ var x191 uint64
+ _, x191 = bits.Add64(x135, x171, uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
+ var x208 uint64
+ var x209 uint64
+ x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
+ x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x2, arg2[8])
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x2, arg2[7])
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x2, arg2[6])
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x2, arg2[5])
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x2, arg2[4])
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x2, arg2[3])
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x2, arg2[2])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x2, arg2[1])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x2, arg2[0])
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ x245 := (uint64(p521Uint1(x244)) + x212)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x192, x227, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x246, 0x1ff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x283, x280, uint64(0x0))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
+ x300 := (uint64(p521Uint1(x299)) + x267)
+ var x302 uint64
+ _, x302 = bits.Add64(x246, x282, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
+ x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
+ var x322 uint64
+ var x323 uint64
+ x323, x322 = bits.Mul64(x3, arg2[8])
+ var x324 uint64
+ var x325 uint64
+ x325, x324 = bits.Mul64(x3, arg2[7])
+ var x326 uint64
+ var x327 uint64
+ x327, x326 = bits.Mul64(x3, arg2[6])
+ var x328 uint64
+ var x329 uint64
+ x329, x328 = bits.Mul64(x3, arg2[5])
+ var x330 uint64
+ var x331 uint64
+ x331, x330 = bits.Mul64(x3, arg2[4])
+ var x332 uint64
+ var x333 uint64
+ x333, x332 = bits.Mul64(x3, arg2[3])
+ var x334 uint64
+ var x335 uint64
+ x335, x334 = bits.Mul64(x3, arg2[2])
+ var x336 uint64
+ var x337 uint64
+ x337, x336 = bits.Mul64(x3, arg2[1])
+ var x338 uint64
+ var x339 uint64
+ x339, x338 = bits.Mul64(x3, arg2[0])
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x339, x336, uint64(0x0))
+ var x342 uint64
+ var x343 uint64
+ x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
+ var x344 uint64
+ var x345 uint64
+ x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
+ var x346 uint64
+ var x347 uint64
+ x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
+ var x348 uint64
+ var x349 uint64
+ x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
+ var x350 uint64
+ var x351 uint64
+ x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
+ var x352 uint64
+ var x353 uint64
+ x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
+ var x354 uint64
+ var x355 uint64
+ x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
+ x356 := (uint64(p521Uint1(x355)) + x323)
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x303, x338, uint64(0x0))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
+ var x361 uint64
+ var x362 uint64
+ x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
+ var x363 uint64
+ var x364 uint64
+ x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x357, 0x1ff)
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x389 uint64
+ var x390 uint64
+ x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x391 uint64
+ var x392 uint64
+ x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x393 uint64
+ var x394 uint64
+ x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x394, x391, uint64(0x0))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
+ x411 := (uint64(p521Uint1(x410)) + x378)
+ var x413 uint64
+ _, x413 = bits.Add64(x357, x393, uint64(0x0))
+ var x414 uint64
+ var x415 uint64
+ x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
+ var x416 uint64
+ var x417 uint64
+ x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
+ var x418 uint64
+ var x419 uint64
+ x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
+ var x420 uint64
+ var x421 uint64
+ x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
+ var x422 uint64
+ var x423 uint64
+ x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
+ var x424 uint64
+ var x425 uint64
+ x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
+ var x426 uint64
+ var x427 uint64
+ x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
+ x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
+ var x433 uint64
+ var x434 uint64
+ x434, x433 = bits.Mul64(x4, arg2[8])
+ var x435 uint64
+ var x436 uint64
+ x436, x435 = bits.Mul64(x4, arg2[7])
+ var x437 uint64
+ var x438 uint64
+ x438, x437 = bits.Mul64(x4, arg2[6])
+ var x439 uint64
+ var x440 uint64
+ x440, x439 = bits.Mul64(x4, arg2[5])
+ var x441 uint64
+ var x442 uint64
+ x442, x441 = bits.Mul64(x4, arg2[4])
+ var x443 uint64
+ var x444 uint64
+ x444, x443 = bits.Mul64(x4, arg2[3])
+ var x445 uint64
+ var x446 uint64
+ x446, x445 = bits.Mul64(x4, arg2[2])
+ var x447 uint64
+ var x448 uint64
+ x448, x447 = bits.Mul64(x4, arg2[1])
+ var x449 uint64
+ var x450 uint64
+ x450, x449 = bits.Mul64(x4, arg2[0])
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x450, x447, uint64(0x0))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
+ x467 := (uint64(p521Uint1(x466)) + x434)
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x414, x449, uint64(0x0))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
+ var x480 uint64
+ var x481 uint64
+ x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
+ var x482 uint64
+ var x483 uint64
+ x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
+ var x484 uint64
+ var x485 uint64
+ x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
+ var x486 uint64
+ var x487 uint64
+ x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x468, 0x1ff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x498 uint64
+ var x499 uint64
+ x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x500 uint64
+ var x501 uint64
+ x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x502 uint64
+ var x503 uint64
+ x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x504 uint64
+ var x505 uint64
+ x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x505, x502, uint64(0x0))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
+ var x512 uint64
+ var x513 uint64
+ x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
+ x522 := (uint64(p521Uint1(x521)) + x489)
+ var x524 uint64
+ _, x524 = bits.Add64(x468, x504, uint64(0x0))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
+ var x535 uint64
+ var x536 uint64
+ x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
+ var x537 uint64
+ var x538 uint64
+ x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
+ var x539 uint64
+ var x540 uint64
+ x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
+ var x541 uint64
+ var x542 uint64
+ x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
+ x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
+ var x544 uint64
+ var x545 uint64
+ x545, x544 = bits.Mul64(x5, arg2[8])
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x5, arg2[7])
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x5, arg2[6])
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x5, arg2[5])
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x5, arg2[4])
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x5, arg2[3])
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x5, arg2[2])
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x5, arg2[1])
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x5, arg2[0])
+ var x562 uint64
+ var x563 uint64
+ x562, x563 = bits.Add64(x561, x558, uint64(0x0))
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
+ x578 := (uint64(p521Uint1(x577)) + x545)
+ var x579 uint64
+ var x580 uint64
+ x579, x580 = bits.Add64(x525, x560, uint64(0x0))
+ var x581 uint64
+ var x582 uint64
+ x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
+ var x583 uint64
+ var x584 uint64
+ x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
+ var x585 uint64
+ var x586 uint64
+ x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
+ var x587 uint64
+ var x588 uint64
+ x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
+ var x589 uint64
+ var x590 uint64
+ x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
+ var x591 uint64
+ var x592 uint64
+ x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
+ var x593 uint64
+ var x594 uint64
+ x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
+ var x595 uint64
+ var x596 uint64
+ x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
+ var x597 uint64
+ var x598 uint64
+ x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
+ var x599 uint64
+ var x600 uint64
+ x600, x599 = bits.Mul64(x579, 0x1ff)
+ var x601 uint64
+ var x602 uint64
+ x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x603 uint64
+ var x604 uint64
+ x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x605 uint64
+ var x606 uint64
+ x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x607 uint64
+ var x608 uint64
+ x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x609 uint64
+ var x610 uint64
+ x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x611 uint64
+ var x612 uint64
+ x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x613 uint64
+ var x614 uint64
+ x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x615 uint64
+ var x616 uint64
+ x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x617 uint64
+ var x618 uint64
+ x617, x618 = bits.Add64(x616, x613, uint64(0x0))
+ var x619 uint64
+ var x620 uint64
+ x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
+ var x621 uint64
+ var x622 uint64
+ x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
+ var x623 uint64
+ var x624 uint64
+ x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
+ var x625 uint64
+ var x626 uint64
+ x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
+ var x627 uint64
+ var x628 uint64
+ x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
+ var x629 uint64
+ var x630 uint64
+ x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
+ var x631 uint64
+ var x632 uint64
+ x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
+ x633 := (uint64(p521Uint1(x632)) + x600)
+ var x635 uint64
+ _, x635 = bits.Add64(x579, x615, uint64(0x0))
+ var x636 uint64
+ var x637 uint64
+ x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
+ var x638 uint64
+ var x639 uint64
+ x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
+ var x640 uint64
+ var x641 uint64
+ x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
+ var x642 uint64
+ var x643 uint64
+ x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
+ var x644 uint64
+ var x645 uint64
+ x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
+ var x646 uint64
+ var x647 uint64
+ x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
+ var x648 uint64
+ var x649 uint64
+ x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
+ var x650 uint64
+ var x651 uint64
+ x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
+ var x652 uint64
+ var x653 uint64
+ x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
+ x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
+ var x655 uint64
+ var x656 uint64
+ x656, x655 = bits.Mul64(x6, arg2[8])
+ var x657 uint64
+ var x658 uint64
+ x658, x657 = bits.Mul64(x6, arg2[7])
+ var x659 uint64
+ var x660 uint64
+ x660, x659 = bits.Mul64(x6, arg2[6])
+ var x661 uint64
+ var x662 uint64
+ x662, x661 = bits.Mul64(x6, arg2[5])
+ var x663 uint64
+ var x664 uint64
+ x664, x663 = bits.Mul64(x6, arg2[4])
+ var x665 uint64
+ var x666 uint64
+ x666, x665 = bits.Mul64(x6, arg2[3])
+ var x667 uint64
+ var x668 uint64
+ x668, x667 = bits.Mul64(x6, arg2[2])
+ var x669 uint64
+ var x670 uint64
+ x670, x669 = bits.Mul64(x6, arg2[1])
+ var x671 uint64
+ var x672 uint64
+ x672, x671 = bits.Mul64(x6, arg2[0])
+ var x673 uint64
+ var x674 uint64
+ x673, x674 = bits.Add64(x672, x669, uint64(0x0))
+ var x675 uint64
+ var x676 uint64
+ x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
+ var x677 uint64
+ var x678 uint64
+ x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
+ var x679 uint64
+ var x680 uint64
+ x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
+ var x681 uint64
+ var x682 uint64
+ x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
+ var x683 uint64
+ var x684 uint64
+ x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
+ var x685 uint64
+ var x686 uint64
+ x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
+ var x687 uint64
+ var x688 uint64
+ x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
+ x689 := (uint64(p521Uint1(x688)) + x656)
+ var x690 uint64
+ var x691 uint64
+ x690, x691 = bits.Add64(x636, x671, uint64(0x0))
+ var x692 uint64
+ var x693 uint64
+ x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
+ var x694 uint64
+ var x695 uint64
+ x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
+ var x696 uint64
+ var x697 uint64
+ x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
+ var x698 uint64
+ var x699 uint64
+ x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
+ var x700 uint64
+ var x701 uint64
+ x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
+ var x702 uint64
+ var x703 uint64
+ x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
+ var x704 uint64
+ var x705 uint64
+ x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
+ var x706 uint64
+ var x707 uint64
+ x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
+ var x708 uint64
+ var x709 uint64
+ x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
+ var x710 uint64
+ var x711 uint64
+ x711, x710 = bits.Mul64(x690, 0x1ff)
+ var x712 uint64
+ var x713 uint64
+ x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x714 uint64
+ var x715 uint64
+ x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x716 uint64
+ var x717 uint64
+ x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x718 uint64
+ var x719 uint64
+ x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x720 uint64
+ var x721 uint64
+ x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x722 uint64
+ var x723 uint64
+ x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x724 uint64
+ var x725 uint64
+ x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x726 uint64
+ var x727 uint64
+ x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x728 uint64
+ var x729 uint64
+ x728, x729 = bits.Add64(x727, x724, uint64(0x0))
+ var x730 uint64
+ var x731 uint64
+ x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
+ var x732 uint64
+ var x733 uint64
+ x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
+ var x734 uint64
+ var x735 uint64
+ x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
+ var x736 uint64
+ var x737 uint64
+ x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
+ var x738 uint64
+ var x739 uint64
+ x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
+ var x740 uint64
+ var x741 uint64
+ x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
+ var x742 uint64
+ var x743 uint64
+ x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
+ x744 := (uint64(p521Uint1(x743)) + x711)
+ var x746 uint64
+ _, x746 = bits.Add64(x690, x726, uint64(0x0))
+ var x747 uint64
+ var x748 uint64
+ x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
+ var x749 uint64
+ var x750 uint64
+ x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
+ var x751 uint64
+ var x752 uint64
+ x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
+ var x753 uint64
+ var x754 uint64
+ x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
+ var x755 uint64
+ var x756 uint64
+ x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
+ var x757 uint64
+ var x758 uint64
+ x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
+ var x759 uint64
+ var x760 uint64
+ x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
+ var x761 uint64
+ var x762 uint64
+ x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
+ var x763 uint64
+ var x764 uint64
+ x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
+ x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
+ var x766 uint64
+ var x767 uint64
+ x767, x766 = bits.Mul64(x7, arg2[8])
+ var x768 uint64
+ var x769 uint64
+ x769, x768 = bits.Mul64(x7, arg2[7])
+ var x770 uint64
+ var x771 uint64
+ x771, x770 = bits.Mul64(x7, arg2[6])
+ var x772 uint64
+ var x773 uint64
+ x773, x772 = bits.Mul64(x7, arg2[5])
+ var x774 uint64
+ var x775 uint64
+ x775, x774 = bits.Mul64(x7, arg2[4])
+ var x776 uint64
+ var x777 uint64
+ x777, x776 = bits.Mul64(x7, arg2[3])
+ var x778 uint64
+ var x779 uint64
+ x779, x778 = bits.Mul64(x7, arg2[2])
+ var x780 uint64
+ var x781 uint64
+ x781, x780 = bits.Mul64(x7, arg2[1])
+ var x782 uint64
+ var x783 uint64
+ x783, x782 = bits.Mul64(x7, arg2[0])
+ var x784 uint64
+ var x785 uint64
+ x784, x785 = bits.Add64(x783, x780, uint64(0x0))
+ var x786 uint64
+ var x787 uint64
+ x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
+ var x788 uint64
+ var x789 uint64
+ x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
+ var x790 uint64
+ var x791 uint64
+ x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
+ var x792 uint64
+ var x793 uint64
+ x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
+ var x794 uint64
+ var x795 uint64
+ x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
+ var x796 uint64
+ var x797 uint64
+ x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
+ var x798 uint64
+ var x799 uint64
+ x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
+ x800 := (uint64(p521Uint1(x799)) + x767)
+ var x801 uint64
+ var x802 uint64
+ x801, x802 = bits.Add64(x747, x782, uint64(0x0))
+ var x803 uint64
+ var x804 uint64
+ x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
+ var x805 uint64
+ var x806 uint64
+ x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
+ var x807 uint64
+ var x808 uint64
+ x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
+ var x809 uint64
+ var x810 uint64
+ x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
+ var x811 uint64
+ var x812 uint64
+ x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
+ var x813 uint64
+ var x814 uint64
+ x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
+ var x815 uint64
+ var x816 uint64
+ x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
+ var x817 uint64
+ var x818 uint64
+ x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
+ var x819 uint64
+ var x820 uint64
+ x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
+ var x821 uint64
+ var x822 uint64
+ x822, x821 = bits.Mul64(x801, 0x1ff)
+ var x823 uint64
+ var x824 uint64
+ x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x825 uint64
+ var x826 uint64
+ x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x827 uint64
+ var x828 uint64
+ x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x829 uint64
+ var x830 uint64
+ x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x831 uint64
+ var x832 uint64
+ x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x833 uint64
+ var x834 uint64
+ x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x835 uint64
+ var x836 uint64
+ x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x837 uint64
+ var x838 uint64
+ x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x839 uint64
+ var x840 uint64
+ x839, x840 = bits.Add64(x838, x835, uint64(0x0))
+ var x841 uint64
+ var x842 uint64
+ x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
+ var x843 uint64
+ var x844 uint64
+ x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
+ var x845 uint64
+ var x846 uint64
+ x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
+ var x847 uint64
+ var x848 uint64
+ x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
+ var x849 uint64
+ var x850 uint64
+ x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
+ var x851 uint64
+ var x852 uint64
+ x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
+ var x853 uint64
+ var x854 uint64
+ x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
+ x855 := (uint64(p521Uint1(x854)) + x822)
+ var x857 uint64
+ _, x857 = bits.Add64(x801, x837, uint64(0x0))
+ var x858 uint64
+ var x859 uint64
+ x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
+ var x860 uint64
+ var x861 uint64
+ x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
+ var x862 uint64
+ var x863 uint64
+ x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
+ var x864 uint64
+ var x865 uint64
+ x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
+ var x866 uint64
+ var x867 uint64
+ x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
+ var x868 uint64
+ var x869 uint64
+ x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
+ var x870 uint64
+ var x871 uint64
+ x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
+ var x872 uint64
+ var x873 uint64
+ x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
+ var x874 uint64
+ var x875 uint64
+ x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
+ x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
+ var x877 uint64
+ var x878 uint64
+ x878, x877 = bits.Mul64(x8, arg2[8])
+ var x879 uint64
+ var x880 uint64
+ x880, x879 = bits.Mul64(x8, arg2[7])
+ var x881 uint64
+ var x882 uint64
+ x882, x881 = bits.Mul64(x8, arg2[6])
+ var x883 uint64
+ var x884 uint64
+ x884, x883 = bits.Mul64(x8, arg2[5])
+ var x885 uint64
+ var x886 uint64
+ x886, x885 = bits.Mul64(x8, arg2[4])
+ var x887 uint64
+ var x888 uint64
+ x888, x887 = bits.Mul64(x8, arg2[3])
+ var x889 uint64
+ var x890 uint64
+ x890, x889 = bits.Mul64(x8, arg2[2])
+ var x891 uint64
+ var x892 uint64
+ x892, x891 = bits.Mul64(x8, arg2[1])
+ var x893 uint64
+ var x894 uint64
+ x894, x893 = bits.Mul64(x8, arg2[0])
+ var x895 uint64
+ var x896 uint64
+ x895, x896 = bits.Add64(x894, x891, uint64(0x0))
+ var x897 uint64
+ var x898 uint64
+ x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
+ var x899 uint64
+ var x900 uint64
+ x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
+ var x901 uint64
+ var x902 uint64
+ x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
+ var x903 uint64
+ var x904 uint64
+ x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
+ var x905 uint64
+ var x906 uint64
+ x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
+ var x907 uint64
+ var x908 uint64
+ x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
+ var x909 uint64
+ var x910 uint64
+ x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
+ x911 := (uint64(p521Uint1(x910)) + x878)
+ var x912 uint64
+ var x913 uint64
+ x912, x913 = bits.Add64(x858, x893, uint64(0x0))
+ var x914 uint64
+ var x915 uint64
+ x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
+ var x916 uint64
+ var x917 uint64
+ x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
+ var x918 uint64
+ var x919 uint64
+ x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
+ var x920 uint64
+ var x921 uint64
+ x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
+ var x922 uint64
+ var x923 uint64
+ x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
+ var x924 uint64
+ var x925 uint64
+ x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
+ var x926 uint64
+ var x927 uint64
+ x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
+ var x928 uint64
+ var x929 uint64
+ x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
+ var x930 uint64
+ var x931 uint64
+ x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
+ var x932 uint64
+ var x933 uint64
+ x933, x932 = bits.Mul64(x912, 0x1ff)
+ var x934 uint64
+ var x935 uint64
+ x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x936 uint64
+ var x937 uint64
+ x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x938 uint64
+ var x939 uint64
+ x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x940 uint64
+ var x941 uint64
+ x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x942 uint64
+ var x943 uint64
+ x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x944 uint64
+ var x945 uint64
+ x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x946 uint64
+ var x947 uint64
+ x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x948 uint64
+ var x949 uint64
+ x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x950 uint64
+ var x951 uint64
+ x950, x951 = bits.Add64(x949, x946, uint64(0x0))
+ var x952 uint64
+ var x953 uint64
+ x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
+ var x954 uint64
+ var x955 uint64
+ x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
+ var x956 uint64
+ var x957 uint64
+ x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
+ var x958 uint64
+ var x959 uint64
+ x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
+ var x960 uint64
+ var x961 uint64
+ x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
+ var x962 uint64
+ var x963 uint64
+ x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
+ var x964 uint64
+ var x965 uint64
+ x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
+ x966 := (uint64(p521Uint1(x965)) + x933)
+ var x968 uint64
+ _, x968 = bits.Add64(x912, x948, uint64(0x0))
+ var x969 uint64
+ var x970 uint64
+ x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
+ var x971 uint64
+ var x972 uint64
+ x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
+ var x973 uint64
+ var x974 uint64
+ x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
+ var x975 uint64
+ var x976 uint64
+ x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
+ var x977 uint64
+ var x978 uint64
+ x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
+ var x979 uint64
+ var x980 uint64
+ x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
+ var x981 uint64
+ var x982 uint64
+ x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
+ var x983 uint64
+ var x984 uint64
+ x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
+ var x985 uint64
+ var x986 uint64
+ x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
+ x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
+ var x988 uint64
+ var x989 uint64
+ x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
+ var x990 uint64
+ var x991 uint64
+ x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
+ var x992 uint64
+ var x993 uint64
+ x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
+ var x994 uint64
+ var x995 uint64
+ x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
+ var x996 uint64
+ var x997 uint64
+ x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
+ var x998 uint64
+ var x999 uint64
+ x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
+ var x1000 uint64
+ var x1001 uint64
+ x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
+ var x1002 uint64
+ var x1003 uint64
+ x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
+ var x1004 uint64
+ var x1005 uint64
+ x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
+ var x1007 uint64
+ _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
+ var x1008 uint64
+ p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
+ var x1009 uint64
+ p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
+ var x1010 uint64
+ p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
+ var x1011 uint64
+ p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
+ var x1012 uint64
+ p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
+ var x1013 uint64
+ p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
+ var x1014 uint64
+ p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
+ var x1015 uint64
+ p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
+ var x1016 uint64
+ p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
+ out1[0] = x1008
+ out1[1] = x1009
+ out1[2] = x1010
+ out1[3] = x1011
+ out1[4] = x1012
+ out1[5] = x1013
+ out1[6] = x1014
+ out1[7] = x1015
+ out1[8] = x1016
+}
+
+// p521Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p521Square(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[6]
+ x7 := arg1[7]
+ x8 := arg1[8]
+ x9 := arg1[0]
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x9, arg1[8])
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x9, arg1[7])
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x9, arg1[6])
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x9, arg1[5])
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x9, arg1[4])
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x9, arg1[3])
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x9, arg1[2])
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x9, arg1[1])
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x9, arg1[0])
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
+ x44 := (uint64(p521Uint1(x43)) + x11)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x26, 0x1ff)
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x51 uint64
+ var x52 uint64
+ x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x53 uint64
+ var x54 uint64
+ x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x55 uint64
+ var x56 uint64
+ x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x57 uint64
+ var x58 uint64
+ x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x62, x59, uint64(0x0))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
+ x79 := (uint64(p521Uint1(x78)) + x46)
+ var x81 uint64
+ _, x81 = bits.Add64(x26, x61, uint64(0x0))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x101, x100 = bits.Mul64(x1, arg1[8])
+ var x102 uint64
+ var x103 uint64
+ x103, x102 = bits.Mul64(x1, arg1[7])
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x1, arg1[6])
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x1, arg1[5])
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x1, arg1[4])
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x1, arg1[3])
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x1, arg1[2])
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x1, arg1[1])
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x1, arg1[0])
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x117, x114, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
+ x134 := (uint64(p521Uint1(x133)) + x101)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x82, x116, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x135, 0x1ff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x169 uint64
+ var x170 uint64
+ x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x171 uint64
+ var x172 uint64
+ x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x172, x169, uint64(0x0))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
+ x189 := (uint64(p521Uint1(x188)) + x156)
+ var x191 uint64
+ _, x191 = bits.Add64(x135, x171, uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
+ var x208 uint64
+ var x209 uint64
+ x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
+ x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x2, arg1[8])
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x2, arg1[7])
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x2, arg1[6])
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x2, arg1[5])
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x2, arg1[4])
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x2, arg1[3])
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x2, arg1[2])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x2, arg1[1])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x2, arg1[0])
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ x245 := (uint64(p521Uint1(x244)) + x212)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x192, x227, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x246, 0x1ff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x283, x280, uint64(0x0))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
+ x300 := (uint64(p521Uint1(x299)) + x267)
+ var x302 uint64
+ _, x302 = bits.Add64(x246, x282, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
+ x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
+ var x322 uint64
+ var x323 uint64
+ x323, x322 = bits.Mul64(x3, arg1[8])
+ var x324 uint64
+ var x325 uint64
+ x325, x324 = bits.Mul64(x3, arg1[7])
+ var x326 uint64
+ var x327 uint64
+ x327, x326 = bits.Mul64(x3, arg1[6])
+ var x328 uint64
+ var x329 uint64
+ x329, x328 = bits.Mul64(x3, arg1[5])
+ var x330 uint64
+ var x331 uint64
+ x331, x330 = bits.Mul64(x3, arg1[4])
+ var x332 uint64
+ var x333 uint64
+ x333, x332 = bits.Mul64(x3, arg1[3])
+ var x334 uint64
+ var x335 uint64
+ x335, x334 = bits.Mul64(x3, arg1[2])
+ var x336 uint64
+ var x337 uint64
+ x337, x336 = bits.Mul64(x3, arg1[1])
+ var x338 uint64
+ var x339 uint64
+ x339, x338 = bits.Mul64(x3, arg1[0])
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x339, x336, uint64(0x0))
+ var x342 uint64
+ var x343 uint64
+ x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
+ var x344 uint64
+ var x345 uint64
+ x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
+ var x346 uint64
+ var x347 uint64
+ x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
+ var x348 uint64
+ var x349 uint64
+ x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
+ var x350 uint64
+ var x351 uint64
+ x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
+ var x352 uint64
+ var x353 uint64
+ x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
+ var x354 uint64
+ var x355 uint64
+ x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
+ x356 := (uint64(p521Uint1(x355)) + x323)
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x303, x338, uint64(0x0))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
+ var x361 uint64
+ var x362 uint64
+ x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
+ var x363 uint64
+ var x364 uint64
+ x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x357, 0x1ff)
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x389 uint64
+ var x390 uint64
+ x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x391 uint64
+ var x392 uint64
+ x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x393 uint64
+ var x394 uint64
+ x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x394, x391, uint64(0x0))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
+ x411 := (uint64(p521Uint1(x410)) + x378)
+ var x413 uint64
+ _, x413 = bits.Add64(x357, x393, uint64(0x0))
+ var x414 uint64
+ var x415 uint64
+ x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
+ var x416 uint64
+ var x417 uint64
+ x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
+ var x418 uint64
+ var x419 uint64
+ x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
+ var x420 uint64
+ var x421 uint64
+ x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
+ var x422 uint64
+ var x423 uint64
+ x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
+ var x424 uint64
+ var x425 uint64
+ x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
+ var x426 uint64
+ var x427 uint64
+ x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
+ x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
+ var x433 uint64
+ var x434 uint64
+ x434, x433 = bits.Mul64(x4, arg1[8])
+ var x435 uint64
+ var x436 uint64
+ x436, x435 = bits.Mul64(x4, arg1[7])
+ var x437 uint64
+ var x438 uint64
+ x438, x437 = bits.Mul64(x4, arg1[6])
+ var x439 uint64
+ var x440 uint64
+ x440, x439 = bits.Mul64(x4, arg1[5])
+ var x441 uint64
+ var x442 uint64
+ x442, x441 = bits.Mul64(x4, arg1[4])
+ var x443 uint64
+ var x444 uint64
+ x444, x443 = bits.Mul64(x4, arg1[3])
+ var x445 uint64
+ var x446 uint64
+ x446, x445 = bits.Mul64(x4, arg1[2])
+ var x447 uint64
+ var x448 uint64
+ x448, x447 = bits.Mul64(x4, arg1[1])
+ var x449 uint64
+ var x450 uint64
+ x450, x449 = bits.Mul64(x4, arg1[0])
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x450, x447, uint64(0x0))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
+ x467 := (uint64(p521Uint1(x466)) + x434)
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x414, x449, uint64(0x0))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
+ var x480 uint64
+ var x481 uint64
+ x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
+ var x482 uint64
+ var x483 uint64
+ x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
+ var x484 uint64
+ var x485 uint64
+ x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
+ var x486 uint64
+ var x487 uint64
+ x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x468, 0x1ff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x498 uint64
+ var x499 uint64
+ x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x500 uint64
+ var x501 uint64
+ x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x502 uint64
+ var x503 uint64
+ x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x504 uint64
+ var x505 uint64
+ x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x505, x502, uint64(0x0))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
+ var x512 uint64
+ var x513 uint64
+ x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
+ x522 := (uint64(p521Uint1(x521)) + x489)
+ var x524 uint64
+ _, x524 = bits.Add64(x468, x504, uint64(0x0))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
+ var x535 uint64
+ var x536 uint64
+ x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
+ var x537 uint64
+ var x538 uint64
+ x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
+ var x539 uint64
+ var x540 uint64
+ x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
+ var x541 uint64
+ var x542 uint64
+ x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
+ x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
+ var x544 uint64
+ var x545 uint64
+ x545, x544 = bits.Mul64(x5, arg1[8])
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x5, arg1[7])
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x5, arg1[6])
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x5, arg1[5])
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x5, arg1[4])
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x5, arg1[3])
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x5, arg1[2])
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x5, arg1[1])
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x5, arg1[0])
+ var x562 uint64
+ var x563 uint64
+ x562, x563 = bits.Add64(x561, x558, uint64(0x0))
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
+ x578 := (uint64(p521Uint1(x577)) + x545)
+ var x579 uint64
+ var x580 uint64
+ x579, x580 = bits.Add64(x525, x560, uint64(0x0))
+ var x581 uint64
+ var x582 uint64
+ x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
+ var x583 uint64
+ var x584 uint64
+ x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
+ var x585 uint64
+ var x586 uint64
+ x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
+ var x587 uint64
+ var x588 uint64
+ x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
+ var x589 uint64
+ var x590 uint64
+ x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
+ var x591 uint64
+ var x592 uint64
+ x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
+ var x593 uint64
+ var x594 uint64
+ x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
+ var x595 uint64
+ var x596 uint64
+ x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
+ var x597 uint64
+ var x598 uint64
+ x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
+ var x599 uint64
+ var x600 uint64
+ x600, x599 = bits.Mul64(x579, 0x1ff)
+ var x601 uint64
+ var x602 uint64
+ x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x603 uint64
+ var x604 uint64
+ x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x605 uint64
+ var x606 uint64
+ x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x607 uint64
+ var x608 uint64
+ x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x609 uint64
+ var x610 uint64
+ x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x611 uint64
+ var x612 uint64
+ x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x613 uint64
+ var x614 uint64
+ x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x615 uint64
+ var x616 uint64
+ x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x617 uint64
+ var x618 uint64
+ x617, x618 = bits.Add64(x616, x613, uint64(0x0))
+ var x619 uint64
+ var x620 uint64
+ x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
+ var x621 uint64
+ var x622 uint64
+ x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
+ var x623 uint64
+ var x624 uint64
+ x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
+ var x625 uint64
+ var x626 uint64
+ x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
+ var x627 uint64
+ var x628 uint64
+ x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
+ var x629 uint64
+ var x630 uint64
+ x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
+ var x631 uint64
+ var x632 uint64
+ x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
+ x633 := (uint64(p521Uint1(x632)) + x600)
+ var x635 uint64
+ _, x635 = bits.Add64(x579, x615, uint64(0x0))
+ var x636 uint64
+ var x637 uint64
+ x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
+ var x638 uint64
+ var x639 uint64
+ x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
+ var x640 uint64
+ var x641 uint64
+ x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
+ var x642 uint64
+ var x643 uint64
+ x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
+ var x644 uint64
+ var x645 uint64
+ x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
+ var x646 uint64
+ var x647 uint64
+ x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
+ var x648 uint64
+ var x649 uint64
+ x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
+ var x650 uint64
+ var x651 uint64
+ x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
+ var x652 uint64
+ var x653 uint64
+ x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
+ x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
+ var x655 uint64
+ var x656 uint64
+ x656, x655 = bits.Mul64(x6, arg1[8])
+ var x657 uint64
+ var x658 uint64
+ x658, x657 = bits.Mul64(x6, arg1[7])
+ var x659 uint64
+ var x660 uint64
+ x660, x659 = bits.Mul64(x6, arg1[6])
+ var x661 uint64
+ var x662 uint64
+ x662, x661 = bits.Mul64(x6, arg1[5])
+ var x663 uint64
+ var x664 uint64
+ x664, x663 = bits.Mul64(x6, arg1[4])
+ var x665 uint64
+ var x666 uint64
+ x666, x665 = bits.Mul64(x6, arg1[3])
+ var x667 uint64
+ var x668 uint64
+ x668, x667 = bits.Mul64(x6, arg1[2])
+ var x669 uint64
+ var x670 uint64
+ x670, x669 = bits.Mul64(x6, arg1[1])
+ var x671 uint64
+ var x672 uint64
+ x672, x671 = bits.Mul64(x6, arg1[0])
+ var x673 uint64
+ var x674 uint64
+ x673, x674 = bits.Add64(x672, x669, uint64(0x0))
+ var x675 uint64
+ var x676 uint64
+ x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
+ var x677 uint64
+ var x678 uint64
+ x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
+ var x679 uint64
+ var x680 uint64
+ x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
+ var x681 uint64
+ var x682 uint64
+ x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
+ var x683 uint64
+ var x684 uint64
+ x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
+ var x685 uint64
+ var x686 uint64
+ x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
+ var x687 uint64
+ var x688 uint64
+ x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
+ x689 := (uint64(p521Uint1(x688)) + x656)
+ var x690 uint64
+ var x691 uint64
+ x690, x691 = bits.Add64(x636, x671, uint64(0x0))
+ var x692 uint64
+ var x693 uint64
+ x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
+ var x694 uint64
+ var x695 uint64
+ x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
+ var x696 uint64
+ var x697 uint64
+ x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
+ var x698 uint64
+ var x699 uint64
+ x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
+ var x700 uint64
+ var x701 uint64
+ x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
+ var x702 uint64
+ var x703 uint64
+ x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
+ var x704 uint64
+ var x705 uint64
+ x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
+ var x706 uint64
+ var x707 uint64
+ x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
+ var x708 uint64
+ var x709 uint64
+ x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
+ var x710 uint64
+ var x711 uint64
+ x711, x710 = bits.Mul64(x690, 0x1ff)
+ var x712 uint64
+ var x713 uint64
+ x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x714 uint64
+ var x715 uint64
+ x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x716 uint64
+ var x717 uint64
+ x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x718 uint64
+ var x719 uint64
+ x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x720 uint64
+ var x721 uint64
+ x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x722 uint64
+ var x723 uint64
+ x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x724 uint64
+ var x725 uint64
+ x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x726 uint64
+ var x727 uint64
+ x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x728 uint64
+ var x729 uint64
+ x728, x729 = bits.Add64(x727, x724, uint64(0x0))
+ var x730 uint64
+ var x731 uint64
+ x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
+ var x732 uint64
+ var x733 uint64
+ x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
+ var x734 uint64
+ var x735 uint64
+ x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
+ var x736 uint64
+ var x737 uint64
+ x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
+ var x738 uint64
+ var x739 uint64
+ x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
+ var x740 uint64
+ var x741 uint64
+ x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
+ var x742 uint64
+ var x743 uint64
+ x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
+ x744 := (uint64(p521Uint1(x743)) + x711)
+ var x746 uint64
+ _, x746 = bits.Add64(x690, x726, uint64(0x0))
+ var x747 uint64
+ var x748 uint64
+ x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
+ var x749 uint64
+ var x750 uint64
+ x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
+ var x751 uint64
+ var x752 uint64
+ x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
+ var x753 uint64
+ var x754 uint64
+ x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
+ var x755 uint64
+ var x756 uint64
+ x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
+ var x757 uint64
+ var x758 uint64
+ x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
+ var x759 uint64
+ var x760 uint64
+ x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
+ var x761 uint64
+ var x762 uint64
+ x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
+ var x763 uint64
+ var x764 uint64
+ x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
+ x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
+ var x766 uint64
+ var x767 uint64
+ x767, x766 = bits.Mul64(x7, arg1[8])
+ var x768 uint64
+ var x769 uint64
+ x769, x768 = bits.Mul64(x7, arg1[7])
+ var x770 uint64
+ var x771 uint64
+ x771, x770 = bits.Mul64(x7, arg1[6])
+ var x772 uint64
+ var x773 uint64
+ x773, x772 = bits.Mul64(x7, arg1[5])
+ var x774 uint64
+ var x775 uint64
+ x775, x774 = bits.Mul64(x7, arg1[4])
+ var x776 uint64
+ var x777 uint64
+ x777, x776 = bits.Mul64(x7, arg1[3])
+ var x778 uint64
+ var x779 uint64
+ x779, x778 = bits.Mul64(x7, arg1[2])
+ var x780 uint64
+ var x781 uint64
+ x781, x780 = bits.Mul64(x7, arg1[1])
+ var x782 uint64
+ var x783 uint64
+ x783, x782 = bits.Mul64(x7, arg1[0])
+ var x784 uint64
+ var x785 uint64
+ x784, x785 = bits.Add64(x783, x780, uint64(0x0))
+ var x786 uint64
+ var x787 uint64
+ x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
+ var x788 uint64
+ var x789 uint64
+ x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
+ var x790 uint64
+ var x791 uint64
+ x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
+ var x792 uint64
+ var x793 uint64
+ x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
+ var x794 uint64
+ var x795 uint64
+ x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
+ var x796 uint64
+ var x797 uint64
+ x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
+ var x798 uint64
+ var x799 uint64
+ x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
+ x800 := (uint64(p521Uint1(x799)) + x767)
+ var x801 uint64
+ var x802 uint64
+ x801, x802 = bits.Add64(x747, x782, uint64(0x0))
+ var x803 uint64
+ var x804 uint64
+ x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
+ var x805 uint64
+ var x806 uint64
+ x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
+ var x807 uint64
+ var x808 uint64
+ x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
+ var x809 uint64
+ var x810 uint64
+ x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
+ var x811 uint64
+ var x812 uint64
+ x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
+ var x813 uint64
+ var x814 uint64
+ x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
+ var x815 uint64
+ var x816 uint64
+ x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
+ var x817 uint64
+ var x818 uint64
+ x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
+ var x819 uint64
+ var x820 uint64
+ x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
+ var x821 uint64
+ var x822 uint64
+ x822, x821 = bits.Mul64(x801, 0x1ff)
+ var x823 uint64
+ var x824 uint64
+ x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x825 uint64
+ var x826 uint64
+ x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x827 uint64
+ var x828 uint64
+ x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x829 uint64
+ var x830 uint64
+ x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x831 uint64
+ var x832 uint64
+ x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x833 uint64
+ var x834 uint64
+ x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x835 uint64
+ var x836 uint64
+ x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x837 uint64
+ var x838 uint64
+ x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x839 uint64
+ var x840 uint64
+ x839, x840 = bits.Add64(x838, x835, uint64(0x0))
+ var x841 uint64
+ var x842 uint64
+ x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
+ var x843 uint64
+ var x844 uint64
+ x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
+ var x845 uint64
+ var x846 uint64
+ x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
+ var x847 uint64
+ var x848 uint64
+ x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
+ var x849 uint64
+ var x850 uint64
+ x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
+ var x851 uint64
+ var x852 uint64
+ x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
+ var x853 uint64
+ var x854 uint64
+ x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
+ x855 := (uint64(p521Uint1(x854)) + x822)
+ var x857 uint64
+ _, x857 = bits.Add64(x801, x837, uint64(0x0))
+ var x858 uint64
+ var x859 uint64
+ x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
+ var x860 uint64
+ var x861 uint64
+ x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
+ var x862 uint64
+ var x863 uint64
+ x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
+ var x864 uint64
+ var x865 uint64
+ x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
+ var x866 uint64
+ var x867 uint64
+ x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
+ var x868 uint64
+ var x869 uint64
+ x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
+ var x870 uint64
+ var x871 uint64
+ x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
+ var x872 uint64
+ var x873 uint64
+ x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
+ var x874 uint64
+ var x875 uint64
+ x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
+ x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
+ var x877 uint64
+ var x878 uint64
+ x878, x877 = bits.Mul64(x8, arg1[8])
+ var x879 uint64
+ var x880 uint64
+ x880, x879 = bits.Mul64(x8, arg1[7])
+ var x881 uint64
+ var x882 uint64
+ x882, x881 = bits.Mul64(x8, arg1[6])
+ var x883 uint64
+ var x884 uint64
+ x884, x883 = bits.Mul64(x8, arg1[5])
+ var x885 uint64
+ var x886 uint64
+ x886, x885 = bits.Mul64(x8, arg1[4])
+ var x887 uint64
+ var x888 uint64
+ x888, x887 = bits.Mul64(x8, arg1[3])
+ var x889 uint64
+ var x890 uint64
+ x890, x889 = bits.Mul64(x8, arg1[2])
+ var x891 uint64
+ var x892 uint64
+ x892, x891 = bits.Mul64(x8, arg1[1])
+ var x893 uint64
+ var x894 uint64
+ x894, x893 = bits.Mul64(x8, arg1[0])
+ var x895 uint64
+ var x896 uint64
+ x895, x896 = bits.Add64(x894, x891, uint64(0x0))
+ var x897 uint64
+ var x898 uint64
+ x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
+ var x899 uint64
+ var x900 uint64
+ x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
+ var x901 uint64
+ var x902 uint64
+ x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
+ var x903 uint64
+ var x904 uint64
+ x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
+ var x905 uint64
+ var x906 uint64
+ x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
+ var x907 uint64
+ var x908 uint64
+ x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
+ var x909 uint64
+ var x910 uint64
+ x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
+ x911 := (uint64(p521Uint1(x910)) + x878)
+ var x912 uint64
+ var x913 uint64
+ x912, x913 = bits.Add64(x858, x893, uint64(0x0))
+ var x914 uint64
+ var x915 uint64
+ x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
+ var x916 uint64
+ var x917 uint64
+ x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
+ var x918 uint64
+ var x919 uint64
+ x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
+ var x920 uint64
+ var x921 uint64
+ x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
+ var x922 uint64
+ var x923 uint64
+ x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
+ var x924 uint64
+ var x925 uint64
+ x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
+ var x926 uint64
+ var x927 uint64
+ x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
+ var x928 uint64
+ var x929 uint64
+ x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
+ var x930 uint64
+ var x931 uint64
+ x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
+ var x932 uint64
+ var x933 uint64
+ x933, x932 = bits.Mul64(x912, 0x1ff)
+ var x934 uint64
+ var x935 uint64
+ x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x936 uint64
+ var x937 uint64
+ x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x938 uint64
+ var x939 uint64
+ x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x940 uint64
+ var x941 uint64
+ x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x942 uint64
+ var x943 uint64
+ x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x944 uint64
+ var x945 uint64
+ x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x946 uint64
+ var x947 uint64
+ x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x948 uint64
+ var x949 uint64
+ x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x950 uint64
+ var x951 uint64
+ x950, x951 = bits.Add64(x949, x946, uint64(0x0))
+ var x952 uint64
+ var x953 uint64
+ x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
+ var x954 uint64
+ var x955 uint64
+ x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
+ var x956 uint64
+ var x957 uint64
+ x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
+ var x958 uint64
+ var x959 uint64
+ x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
+ var x960 uint64
+ var x961 uint64
+ x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
+ var x962 uint64
+ var x963 uint64
+ x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
+ var x964 uint64
+ var x965 uint64
+ x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
+ x966 := (uint64(p521Uint1(x965)) + x933)
+ var x968 uint64
+ _, x968 = bits.Add64(x912, x948, uint64(0x0))
+ var x969 uint64
+ var x970 uint64
+ x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
+ var x971 uint64
+ var x972 uint64
+ x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
+ var x973 uint64
+ var x974 uint64
+ x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
+ var x975 uint64
+ var x976 uint64
+ x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
+ var x977 uint64
+ var x978 uint64
+ x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
+ var x979 uint64
+ var x980 uint64
+ x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
+ var x981 uint64
+ var x982 uint64
+ x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
+ var x983 uint64
+ var x984 uint64
+ x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
+ var x985 uint64
+ var x986 uint64
+ x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
+ x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
+ var x988 uint64
+ var x989 uint64
+ x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
+ var x990 uint64
+ var x991 uint64
+ x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
+ var x992 uint64
+ var x993 uint64
+ x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
+ var x994 uint64
+ var x995 uint64
+ x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
+ var x996 uint64
+ var x997 uint64
+ x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
+ var x998 uint64
+ var x999 uint64
+ x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
+ var x1000 uint64
+ var x1001 uint64
+ x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
+ var x1002 uint64
+ var x1003 uint64
+ x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
+ var x1004 uint64
+ var x1005 uint64
+ x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
+ var x1007 uint64
+ _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
+ var x1008 uint64
+ p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
+ var x1009 uint64
+ p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
+ var x1010 uint64
+ p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
+ var x1011 uint64
+ p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
+ var x1012 uint64
+ p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
+ var x1013 uint64
+ p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
+ var x1014 uint64
+ p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
+ var x1015 uint64
+ p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
+ var x1016 uint64
+ p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
+ out1[0] = x1008
+ out1[1] = x1009
+ out1[2] = x1010
+ out1[3] = x1011
+ out1[4] = x1012
+ out1[5] = x1013
+ out1[6] = x1014
+ out1[7] = x1015
+ out1[8] = x1016
+}
+
+// p521Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p521Add(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Sub64(x1, 0xffffffffffffffff, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Sub64(x3, 0xffffffffffffffff, uint64(p521Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p521Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p521Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p521Uint1(x26)))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p521Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Sub64(x13, 0xffffffffffffffff, uint64(p521Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Sub64(x15, 0xffffffffffffffff, uint64(p521Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Sub64(x17, 0x1ff, uint64(p521Uint1(x34)))
+ var x38 uint64
+ _, x38 = bits.Sub64(uint64(p521Uint1(x18)), uint64(0x0), uint64(p521Uint1(x36)))
+ var x39 uint64
+ p521CmovznzU64(&x39, p521Uint1(x38), x19, x1)
+ var x40 uint64
+ p521CmovznzU64(&x40, p521Uint1(x38), x21, x3)
+ var x41 uint64
+ p521CmovznzU64(&x41, p521Uint1(x38), x23, x5)
+ var x42 uint64
+ p521CmovznzU64(&x42, p521Uint1(x38), x25, x7)
+ var x43 uint64
+ p521CmovznzU64(&x43, p521Uint1(x38), x27, x9)
+ var x44 uint64
+ p521CmovznzU64(&x44, p521Uint1(x38), x29, x11)
+ var x45 uint64
+ p521CmovznzU64(&x45, p521Uint1(x38), x31, x13)
+ var x46 uint64
+ p521CmovznzU64(&x46, p521Uint1(x38), x33, x15)
+ var x47 uint64
+ p521CmovznzU64(&x47, p521Uint1(x38), x35, x17)
+ out1[0] = x39
+ out1[1] = x40
+ out1[2] = x41
+ out1[3] = x42
+ out1[4] = x43
+ out1[5] = x44
+ out1[6] = x45
+ out1[7] = x46
+ out1[8] = x47
+}
+
+// p521Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p521Sub(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Sub64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
+ var x19 uint64
+ p521CmovznzU64(&x19, p521Uint1(x18), uint64(0x0), 0xffffffffffffffff)
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x1, x19, uint64(0x0))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x3, x19, uint64(p521Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x5, x19, uint64(p521Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x7, x19, uint64(p521Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x9, x19, uint64(p521Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x11, x19, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x13, x19, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x15, x19, uint64(p521Uint1(x33)))
+ var x36 uint64
+ x36, _ = bits.Add64(x17, (x19 & 0x1ff), uint64(p521Uint1(x35)))
+ out1[0] = x20
+ out1[1] = x22
+ out1[2] = x24
+ out1[3] = x26
+ out1[4] = x28
+ out1[5] = x30
+ out1[6] = x32
+ out1[7] = x34
+ out1[8] = x36
+}
+
+// p521SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p521SetOne(out1 *p521MontgomeryDomainFieldElement) {
+ out1[0] = 0x80000000000000
+ out1[1] = uint64(0x0)
+ out1[2] = uint64(0x0)
+ out1[3] = uint64(0x0)
+ out1[4] = uint64(0x0)
+ out1[5] = uint64(0x0)
+ out1[6] = uint64(0x0)
+ out1[7] = uint64(0x0)
+ out1[8] = uint64(0x0)
+}
+
+// p521FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^9) mod m
+// 0 ≤ eval out1 < m
+func p521FromMontgomery(out1 *p521NonMontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ var x3 uint64
+ x3, x2 = bits.Mul64(x1, 0x1ff)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x19, x16, uint64(0x0))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x17, x14, uint64(p521Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x15, x12, uint64(p521Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x13, x10, uint64(p521Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x11, x8, uint64(p521Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x9, x6, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x7, x4, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x2, uint64(p521Uint1(x33)))
+ var x37 uint64
+ _, x37 = bits.Add64(x1, x18, uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(uint64(0x0), x20, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(uint64(0x0), x22, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(uint64(0x0), x24, uint64(p521Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(uint64(0x0), x26, uint64(p521Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(uint64(0x0), x28, uint64(p521Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(uint64(0x0), x30, uint64(p521Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(uint64(0x0), x32, uint64(p521Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(uint64(0x0), x34, uint64(p521Uint1(x51)))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, arg1[1], uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, uint64(0x0), uint64(p521Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x42, uint64(0x0), uint64(p521Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x44, uint64(0x0), uint64(p521Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x46, uint64(0x0), uint64(p521Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x48, uint64(0x0), uint64(p521Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x50, uint64(0x0), uint64(p521Uint1(x65)))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x52, uint64(0x0), uint64(p521Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x54, 0x1ff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x76 uint64
+ var x77 uint64
+ x77, x76 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x78 uint64
+ var x79 uint64
+ x79, x78 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x80 uint64
+ var x81 uint64
+ x81, x80 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x82 uint64
+ var x83 uint64
+ x83, x82 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x87, x84, uint64(0x0))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x85, x82, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x83, x80, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x81, x78, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x79, x76, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x77, x74, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x75, x72, uint64(p521Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x73, x70, uint64(p521Uint1(x101)))
+ var x105 uint64
+ _, x105 = bits.Add64(x54, x86, uint64(0x0))
+ var x106 uint64
+ var x107 uint64
+ x106, x107 = bits.Add64(x56, x88, uint64(p521Uint1(x105)))
+ var x108 uint64
+ var x109 uint64
+ x108, x109 = bits.Add64(x58, x90, uint64(p521Uint1(x107)))
+ var x110 uint64
+ var x111 uint64
+ x110, x111 = bits.Add64(x60, x92, uint64(p521Uint1(x109)))
+ var x112 uint64
+ var x113 uint64
+ x112, x113 = bits.Add64(x62, x94, uint64(p521Uint1(x111)))
+ var x114 uint64
+ var x115 uint64
+ x114, x115 = bits.Add64(x64, x96, uint64(p521Uint1(x113)))
+ var x116 uint64
+ var x117 uint64
+ x116, x117 = bits.Add64(x66, x98, uint64(p521Uint1(x115)))
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x68, x100, uint64(p521Uint1(x117)))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64((uint64(p521Uint1(x69)) + (uint64(p521Uint1(x53)) + (uint64(p521Uint1(x35)) + x3))), x102, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x106, arg1[2], uint64(0x0))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x108, uint64(0x0), uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x110, uint64(0x0), uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x112, uint64(0x0), uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x114, uint64(0x0), uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x116, uint64(0x0), uint64(p521Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x118, uint64(0x0), uint64(p521Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x120, uint64(0x0), uint64(p521Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x139, x138 = bits.Mul64(x122, 0x1ff)
+ var x140 uint64
+ var x141 uint64
+ x141, x140 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x142 uint64
+ var x143 uint64
+ x143, x142 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x144 uint64
+ var x145 uint64
+ x145, x144 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x155, x152, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x153, x150, uint64(p521Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x151, x148, uint64(p521Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x149, x146, uint64(p521Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x147, x144, uint64(p521Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x145, x142, uint64(p521Uint1(x165)))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x143, x140, uint64(p521Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Add64(x141, x138, uint64(p521Uint1(x169)))
+ var x173 uint64
+ _, x173 = bits.Add64(x122, x154, uint64(0x0))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x124, x156, uint64(p521Uint1(x173)))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x126, x158, uint64(p521Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Add64(x128, x160, uint64(p521Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Add64(x130, x162, uint64(p521Uint1(x179)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Add64(x132, x164, uint64(p521Uint1(x181)))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Add64(x134, x166, uint64(p521Uint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Add64(x136, x168, uint64(p521Uint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Add64((uint64(p521Uint1(x137)) + (uint64(p521Uint1(x121)) + (uint64(p521Uint1(x103)) + x71))), x170, uint64(p521Uint1(x187)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Add64(x174, arg1[3], uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x176, uint64(0x0), uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x178, uint64(0x0), uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x180, uint64(0x0), uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x182, uint64(0x0), uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x184, uint64(0x0), uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x186, uint64(0x0), uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x188, uint64(0x0), uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x207, x206 = bits.Mul64(x190, 0x1ff)
+ var x208 uint64
+ var x209 uint64
+ x209, x208 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x210 uint64
+ var x211 uint64
+ x211, x210 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x212 uint64
+ var x213 uint64
+ x213, x212 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x214 uint64
+ var x215 uint64
+ x215, x214 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x216 uint64
+ var x217 uint64
+ x217, x216 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x218 uint64
+ var x219 uint64
+ x219, x218 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x220 uint64
+ var x221 uint64
+ x221, x220 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x222 uint64
+ var x223 uint64
+ x223, x222 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x224 uint64
+ var x225 uint64
+ x224, x225 = bits.Add64(x223, x220, uint64(0x0))
+ var x226 uint64
+ var x227 uint64
+ x226, x227 = bits.Add64(x221, x218, uint64(p521Uint1(x225)))
+ var x228 uint64
+ var x229 uint64
+ x228, x229 = bits.Add64(x219, x216, uint64(p521Uint1(x227)))
+ var x230 uint64
+ var x231 uint64
+ x230, x231 = bits.Add64(x217, x214, uint64(p521Uint1(x229)))
+ var x232 uint64
+ var x233 uint64
+ x232, x233 = bits.Add64(x215, x212, uint64(p521Uint1(x231)))
+ var x234 uint64
+ var x235 uint64
+ x234, x235 = bits.Add64(x213, x210, uint64(p521Uint1(x233)))
+ var x236 uint64
+ var x237 uint64
+ x236, x237 = bits.Add64(x211, x208, uint64(p521Uint1(x235)))
+ var x238 uint64
+ var x239 uint64
+ x238, x239 = bits.Add64(x209, x206, uint64(p521Uint1(x237)))
+ var x241 uint64
+ _, x241 = bits.Add64(x190, x222, uint64(0x0))
+ var x242 uint64
+ var x243 uint64
+ x242, x243 = bits.Add64(x192, x224, uint64(p521Uint1(x241)))
+ var x244 uint64
+ var x245 uint64
+ x244, x245 = bits.Add64(x194, x226, uint64(p521Uint1(x243)))
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x196, x228, uint64(p521Uint1(x245)))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x198, x230, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x200, x232, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x202, x234, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x204, x236, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64((uint64(p521Uint1(x205)) + (uint64(p521Uint1(x189)) + (uint64(p521Uint1(x171)) + x139))), x238, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x242, arg1[4], uint64(0x0))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x244, uint64(0x0), uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x246, uint64(0x0), uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x248, uint64(0x0), uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x266, x267 = bits.Add64(x250, uint64(0x0), uint64(p521Uint1(x265)))
+ var x268 uint64
+ var x269 uint64
+ x268, x269 = bits.Add64(x252, uint64(0x0), uint64(p521Uint1(x267)))
+ var x270 uint64
+ var x271 uint64
+ x270, x271 = bits.Add64(x254, uint64(0x0), uint64(p521Uint1(x269)))
+ var x272 uint64
+ var x273 uint64
+ x272, x273 = bits.Add64(x256, uint64(0x0), uint64(p521Uint1(x271)))
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x258, 0x1ff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x285, x284 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x286 uint64
+ var x287 uint64
+ x287, x286 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x288 uint64
+ var x289 uint64
+ x289, x288 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x290 uint64
+ var x291 uint64
+ x291, x290 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x291, x288, uint64(0x0))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x289, x286, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x287, x284, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x285, x282, uint64(p521Uint1(x297)))
+ var x300 uint64
+ var x301 uint64
+ x300, x301 = bits.Add64(x283, x280, uint64(p521Uint1(x299)))
+ var x302 uint64
+ var x303 uint64
+ x302, x303 = bits.Add64(x281, x278, uint64(p521Uint1(x301)))
+ var x304 uint64
+ var x305 uint64
+ x304, x305 = bits.Add64(x279, x276, uint64(p521Uint1(x303)))
+ var x306 uint64
+ var x307 uint64
+ x306, x307 = bits.Add64(x277, x274, uint64(p521Uint1(x305)))
+ var x309 uint64
+ _, x309 = bits.Add64(x258, x290, uint64(0x0))
+ var x310 uint64
+ var x311 uint64
+ x310, x311 = bits.Add64(x260, x292, uint64(p521Uint1(x309)))
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x262, x294, uint64(p521Uint1(x311)))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x264, x296, uint64(p521Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x266, x298, uint64(p521Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x268, x300, uint64(p521Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x270, x302, uint64(p521Uint1(x319)))
+ var x322 uint64
+ var x323 uint64
+ x322, x323 = bits.Add64(x272, x304, uint64(p521Uint1(x321)))
+ var x324 uint64
+ var x325 uint64
+ x324, x325 = bits.Add64((uint64(p521Uint1(x273)) + (uint64(p521Uint1(x257)) + (uint64(p521Uint1(x239)) + x207))), x306, uint64(p521Uint1(x323)))
+ var x326 uint64
+ var x327 uint64
+ x326, x327 = bits.Add64(x310, arg1[5], uint64(0x0))
+ var x328 uint64
+ var x329 uint64
+ x328, x329 = bits.Add64(x312, uint64(0x0), uint64(p521Uint1(x327)))
+ var x330 uint64
+ var x331 uint64
+ x330, x331 = bits.Add64(x314, uint64(0x0), uint64(p521Uint1(x329)))
+ var x332 uint64
+ var x333 uint64
+ x332, x333 = bits.Add64(x316, uint64(0x0), uint64(p521Uint1(x331)))
+ var x334 uint64
+ var x335 uint64
+ x334, x335 = bits.Add64(x318, uint64(0x0), uint64(p521Uint1(x333)))
+ var x336 uint64
+ var x337 uint64
+ x336, x337 = bits.Add64(x320, uint64(0x0), uint64(p521Uint1(x335)))
+ var x338 uint64
+ var x339 uint64
+ x338, x339 = bits.Add64(x322, uint64(0x0), uint64(p521Uint1(x337)))
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x324, uint64(0x0), uint64(p521Uint1(x339)))
+ var x342 uint64
+ var x343 uint64
+ x343, x342 = bits.Mul64(x326, 0x1ff)
+ var x344 uint64
+ var x345 uint64
+ x345, x344 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x346 uint64
+ var x347 uint64
+ x347, x346 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x348 uint64
+ var x349 uint64
+ x349, x348 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x350 uint64
+ var x351 uint64
+ x351, x350 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x352 uint64
+ var x353 uint64
+ x353, x352 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x354 uint64
+ var x355 uint64
+ x355, x354 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x356 uint64
+ var x357 uint64
+ x357, x356 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x358 uint64
+ var x359 uint64
+ x359, x358 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x360 uint64
+ var x361 uint64
+ x360, x361 = bits.Add64(x359, x356, uint64(0x0))
+ var x362 uint64
+ var x363 uint64
+ x362, x363 = bits.Add64(x357, x354, uint64(p521Uint1(x361)))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x355, x352, uint64(p521Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x353, x350, uint64(p521Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x351, x348, uint64(p521Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x349, x346, uint64(p521Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x347, x344, uint64(p521Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x345, x342, uint64(p521Uint1(x373)))
+ var x377 uint64
+ _, x377 = bits.Add64(x326, x358, uint64(0x0))
+ var x378 uint64
+ var x379 uint64
+ x378, x379 = bits.Add64(x328, x360, uint64(p521Uint1(x377)))
+ var x380 uint64
+ var x381 uint64
+ x380, x381 = bits.Add64(x330, x362, uint64(p521Uint1(x379)))
+ var x382 uint64
+ var x383 uint64
+ x382, x383 = bits.Add64(x332, x364, uint64(p521Uint1(x381)))
+ var x384 uint64
+ var x385 uint64
+ x384, x385 = bits.Add64(x334, x366, uint64(p521Uint1(x383)))
+ var x386 uint64
+ var x387 uint64
+ x386, x387 = bits.Add64(x336, x368, uint64(p521Uint1(x385)))
+ var x388 uint64
+ var x389 uint64
+ x388, x389 = bits.Add64(x338, x370, uint64(p521Uint1(x387)))
+ var x390 uint64
+ var x391 uint64
+ x390, x391 = bits.Add64(x340, x372, uint64(p521Uint1(x389)))
+ var x392 uint64
+ var x393 uint64
+ x392, x393 = bits.Add64((uint64(p521Uint1(x341)) + (uint64(p521Uint1(x325)) + (uint64(p521Uint1(x307)) + x275))), x374, uint64(p521Uint1(x391)))
+ var x394 uint64
+ var x395 uint64
+ x394, x395 = bits.Add64(x378, arg1[6], uint64(0x0))
+ var x396 uint64
+ var x397 uint64
+ x396, x397 = bits.Add64(x380, uint64(0x0), uint64(p521Uint1(x395)))
+ var x398 uint64
+ var x399 uint64
+ x398, x399 = bits.Add64(x382, uint64(0x0), uint64(p521Uint1(x397)))
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x384, uint64(0x0), uint64(p521Uint1(x399)))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x386, uint64(0x0), uint64(p521Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x388, uint64(0x0), uint64(p521Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x390, uint64(0x0), uint64(p521Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x392, uint64(0x0), uint64(p521Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x411, x410 = bits.Mul64(x394, 0x1ff)
+ var x412 uint64
+ var x413 uint64
+ x413, x412 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x414 uint64
+ var x415 uint64
+ x415, x414 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p521Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p521Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p521Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p521Uint1(x435)))
+ var x438 uint64
+ var x439 uint64
+ x438, x439 = bits.Add64(x417, x414, uint64(p521Uint1(x437)))
+ var x440 uint64
+ var x441 uint64
+ x440, x441 = bits.Add64(x415, x412, uint64(p521Uint1(x439)))
+ var x442 uint64
+ var x443 uint64
+ x442, x443 = bits.Add64(x413, x410, uint64(p521Uint1(x441)))
+ var x445 uint64
+ _, x445 = bits.Add64(x394, x426, uint64(0x0))
+ var x446 uint64
+ var x447 uint64
+ x446, x447 = bits.Add64(x396, x428, uint64(p521Uint1(x445)))
+ var x448 uint64
+ var x449 uint64
+ x448, x449 = bits.Add64(x398, x430, uint64(p521Uint1(x447)))
+ var x450 uint64
+ var x451 uint64
+ x450, x451 = bits.Add64(x400, x432, uint64(p521Uint1(x449)))
+ var x452 uint64
+ var x453 uint64
+ x452, x453 = bits.Add64(x402, x434, uint64(p521Uint1(x451)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Add64(x404, x436, uint64(p521Uint1(x453)))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Add64(x406, x438, uint64(p521Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Add64(x408, x440, uint64(p521Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Add64((uint64(p521Uint1(x409)) + (uint64(p521Uint1(x393)) + (uint64(p521Uint1(x375)) + x343))), x442, uint64(p521Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Add64(x446, arg1[7], uint64(0x0))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Add64(x448, uint64(0x0), uint64(p521Uint1(x463)))
+ var x466 uint64
+ var x467 uint64
+ x466, x467 = bits.Add64(x450, uint64(0x0), uint64(p521Uint1(x465)))
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x452, uint64(0x0), uint64(p521Uint1(x467)))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x454, uint64(0x0), uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x456, uint64(0x0), uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x458, uint64(0x0), uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x460, uint64(0x0), uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x479, x478 = bits.Mul64(x462, 0x1ff)
+ var x480 uint64
+ var x481 uint64
+ x481, x480 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x482 uint64
+ var x483 uint64
+ x483, x482 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x484 uint64
+ var x485 uint64
+ x485, x484 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x486 uint64
+ var x487 uint64
+ x487, x486 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x496, x497 = bits.Add64(x495, x492, uint64(0x0))
+ var x498 uint64
+ var x499 uint64
+ x498, x499 = bits.Add64(x493, x490, uint64(p521Uint1(x497)))
+ var x500 uint64
+ var x501 uint64
+ x500, x501 = bits.Add64(x491, x488, uint64(p521Uint1(x499)))
+ var x502 uint64
+ var x503 uint64
+ x502, x503 = bits.Add64(x489, x486, uint64(p521Uint1(x501)))
+ var x504 uint64
+ var x505 uint64
+ x504, x505 = bits.Add64(x487, x484, uint64(p521Uint1(x503)))
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x485, x482, uint64(p521Uint1(x505)))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x483, x480, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x481, x478, uint64(p521Uint1(x509)))
+ var x513 uint64
+ _, x513 = bits.Add64(x462, x494, uint64(0x0))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x464, x496, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x466, x498, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x468, x500, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x470, x502, uint64(p521Uint1(x519)))
+ var x522 uint64
+ var x523 uint64
+ x522, x523 = bits.Add64(x472, x504, uint64(p521Uint1(x521)))
+ var x524 uint64
+ var x525 uint64
+ x524, x525 = bits.Add64(x474, x506, uint64(p521Uint1(x523)))
+ var x526 uint64
+ var x527 uint64
+ x526, x527 = bits.Add64(x476, x508, uint64(p521Uint1(x525)))
+ var x528 uint64
+ var x529 uint64
+ x528, x529 = bits.Add64((uint64(p521Uint1(x477)) + (uint64(p521Uint1(x461)) + (uint64(p521Uint1(x443)) + x411))), x510, uint64(p521Uint1(x527)))
+ var x530 uint64
+ var x531 uint64
+ x530, x531 = bits.Add64(x514, arg1[8], uint64(0x0))
+ var x532 uint64
+ var x533 uint64
+ x532, x533 = bits.Add64(x516, uint64(0x0), uint64(p521Uint1(x531)))
+ var x534 uint64
+ var x535 uint64
+ x534, x535 = bits.Add64(x518, uint64(0x0), uint64(p521Uint1(x533)))
+ var x536 uint64
+ var x537 uint64
+ x536, x537 = bits.Add64(x520, uint64(0x0), uint64(p521Uint1(x535)))
+ var x538 uint64
+ var x539 uint64
+ x538, x539 = bits.Add64(x522, uint64(0x0), uint64(p521Uint1(x537)))
+ var x540 uint64
+ var x541 uint64
+ x540, x541 = bits.Add64(x524, uint64(0x0), uint64(p521Uint1(x539)))
+ var x542 uint64
+ var x543 uint64
+ x542, x543 = bits.Add64(x526, uint64(0x0), uint64(p521Uint1(x541)))
+ var x544 uint64
+ var x545 uint64
+ x544, x545 = bits.Add64(x528, uint64(0x0), uint64(p521Uint1(x543)))
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x530, 0x1ff)
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x562 uint64
+ var x563 uint64
+ x563, x562 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x563, x560, uint64(0x0))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x561, x558, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x559, x556, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x557, x554, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x555, x552, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x553, x550, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x551, x548, uint64(p521Uint1(x575)))
+ var x578 uint64
+ var x579 uint64
+ x578, x579 = bits.Add64(x549, x546, uint64(p521Uint1(x577)))
+ var x581 uint64
+ _, x581 = bits.Add64(x530, x562, uint64(0x0))
+ var x582 uint64
+ var x583 uint64
+ x582, x583 = bits.Add64(x532, x564, uint64(p521Uint1(x581)))
+ var x584 uint64
+ var x585 uint64
+ x584, x585 = bits.Add64(x534, x566, uint64(p521Uint1(x583)))
+ var x586 uint64
+ var x587 uint64
+ x586, x587 = bits.Add64(x536, x568, uint64(p521Uint1(x585)))
+ var x588 uint64
+ var x589 uint64
+ x588, x589 = bits.Add64(x538, x570, uint64(p521Uint1(x587)))
+ var x590 uint64
+ var x591 uint64
+ x590, x591 = bits.Add64(x540, x572, uint64(p521Uint1(x589)))
+ var x592 uint64
+ var x593 uint64
+ x592, x593 = bits.Add64(x542, x574, uint64(p521Uint1(x591)))
+ var x594 uint64
+ var x595 uint64
+ x594, x595 = bits.Add64(x544, x576, uint64(p521Uint1(x593)))
+ var x596 uint64
+ var x597 uint64
+ x596, x597 = bits.Add64((uint64(p521Uint1(x545)) + (uint64(p521Uint1(x529)) + (uint64(p521Uint1(x511)) + x479))), x578, uint64(p521Uint1(x595)))
+ x598 := (uint64(p521Uint1(x597)) + (uint64(p521Uint1(x579)) + x547))
+ var x599 uint64
+ var x600 uint64
+ x599, x600 = bits.Sub64(x582, 0xffffffffffffffff, uint64(0x0))
+ var x601 uint64
+ var x602 uint64
+ x601, x602 = bits.Sub64(x584, 0xffffffffffffffff, uint64(p521Uint1(x600)))
+ var x603 uint64
+ var x604 uint64
+ x603, x604 = bits.Sub64(x586, 0xffffffffffffffff, uint64(p521Uint1(x602)))
+ var x605 uint64
+ var x606 uint64
+ x605, x606 = bits.Sub64(x588, 0xffffffffffffffff, uint64(p521Uint1(x604)))
+ var x607 uint64
+ var x608 uint64
+ x607, x608 = bits.Sub64(x590, 0xffffffffffffffff, uint64(p521Uint1(x606)))
+ var x609 uint64
+ var x610 uint64
+ x609, x610 = bits.Sub64(x592, 0xffffffffffffffff, uint64(p521Uint1(x608)))
+ var x611 uint64
+ var x612 uint64
+ x611, x612 = bits.Sub64(x594, 0xffffffffffffffff, uint64(p521Uint1(x610)))
+ var x613 uint64
+ var x614 uint64
+ x613, x614 = bits.Sub64(x596, 0xffffffffffffffff, uint64(p521Uint1(x612)))
+ var x615 uint64
+ var x616 uint64
+ x615, x616 = bits.Sub64(x598, 0x1ff, uint64(p521Uint1(x614)))
+ var x618 uint64
+ _, x618 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x616)))
+ var x619 uint64
+ p521CmovznzU64(&x619, p521Uint1(x618), x599, x582)
+ var x620 uint64
+ p521CmovznzU64(&x620, p521Uint1(x618), x601, x584)
+ var x621 uint64
+ p521CmovznzU64(&x621, p521Uint1(x618), x603, x586)
+ var x622 uint64
+ p521CmovznzU64(&x622, p521Uint1(x618), x605, x588)
+ var x623 uint64
+ p521CmovznzU64(&x623, p521Uint1(x618), x607, x590)
+ var x624 uint64
+ p521CmovznzU64(&x624, p521Uint1(x618), x609, x592)
+ var x625 uint64
+ p521CmovznzU64(&x625, p521Uint1(x618), x611, x594)
+ var x626 uint64
+ p521CmovznzU64(&x626, p521Uint1(x618), x613, x596)
+ var x627 uint64
+ p521CmovznzU64(&x627, p521Uint1(x618), x615, x598)
+ out1[0] = x619
+ out1[1] = x620
+ out1[2] = x621
+ out1[3] = x622
+ out1[4] = x623
+ out1[5] = x624
+ out1[6] = x625
+ out1[7] = x626
+ out1[8] = x627
+}
+
+// p521ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p521ToMontgomery(out1 *p521MontgomeryDomainFieldElement, arg1 *p521NonMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x2, x1 = bits.Mul64(arg1[0], 0x400000000000)
+ var x3 uint64
+ var x4 uint64
+ x4, x3 = bits.Mul64(arg1[1], 0x400000000000)
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(x2, x3, uint64(0x0))
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x1, 0x1ff)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x19 uint64
+ var x20 uint64
+ x20, x19 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x24, x21, uint64(0x0))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x22, x19, uint64(p521Uint1(x26)))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x20, x17, uint64(p521Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x18, x15, uint64(p521Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x16, x13, uint64(p521Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x14, x11, uint64(p521Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x12, x9, uint64(p521Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x10, x7, uint64(p521Uint1(x38)))
+ var x42 uint64
+ _, x42 = bits.Add64(x1, x23, uint64(0x0))
+ var x43 uint64
+ var x44 uint64
+ x43, x44 = bits.Add64(x5, x25, uint64(p521Uint1(x42)))
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64((uint64(p521Uint1(x6)) + x4), x27, uint64(p521Uint1(x44)))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(uint64(0x0), x29, uint64(p521Uint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(uint64(0x0), x31, uint64(p521Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(uint64(0x0), x33, uint64(p521Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(uint64(0x0), x35, uint64(p521Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(uint64(0x0), x37, uint64(p521Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(uint64(0x0), x39, uint64(p521Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(arg1[2], 0x400000000000)
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x45, x59, uint64(0x0))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x47, x60, uint64(p521Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x49, uint64(0x0), uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x51, uint64(0x0), uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x53, uint64(0x0), uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x55, uint64(0x0), uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x57, uint64(0x0), uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x43, 0x1ff)
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x85 uint64
+ var x86 uint64
+ x86, x85 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x87 uint64
+ var x88 uint64
+ x88, x87 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x89 uint64
+ var x90 uint64
+ x90, x89 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x91 uint64
+ var x92 uint64
+ x92, x91 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x92, x89, uint64(0x0))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x90, x87, uint64(p521Uint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x88, x85, uint64(p521Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x86, x83, uint64(p521Uint1(x98)))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x84, x81, uint64(p521Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x82, x79, uint64(p521Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x80, x77, uint64(p521Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x78, x75, uint64(p521Uint1(x106)))
+ var x110 uint64
+ _, x110 = bits.Add64(x43, x91, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x61, x93, uint64(p521Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x63, x95, uint64(p521Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x65, x97, uint64(p521Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x67, x99, uint64(p521Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x69, x101, uint64(p521Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x71, x103, uint64(p521Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x123, x124 = bits.Add64(x73, x105, uint64(p521Uint1(x122)))
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64((uint64(p521Uint1(x74)) + (uint64(p521Uint1(x58)) + (uint64(p521Uint1(x40)) + x8))), x107, uint64(p521Uint1(x124)))
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(arg1[3], 0x400000000000)
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x113, x127, uint64(0x0))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x115, x128, uint64(p521Uint1(x130)))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x117, uint64(0x0), uint64(p521Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x119, uint64(0x0), uint64(p521Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x121, uint64(0x0), uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x123, uint64(0x0), uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x125, uint64(0x0), uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x111, 0x1ff)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x149 uint64
+ var x150 uint64
+ x150, x149 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x151 uint64
+ var x152 uint64
+ x152, x151 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x153 uint64
+ var x154 uint64
+ x154, x153 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Add64(x160, x157, uint64(0x0))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Add64(x158, x155, uint64(p521Uint1(x162)))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x156, x153, uint64(p521Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x154, x151, uint64(p521Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x152, x149, uint64(p521Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x150, x147, uint64(p521Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x148, x145, uint64(p521Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x146, x143, uint64(p521Uint1(x174)))
+ var x178 uint64
+ _, x178 = bits.Add64(x111, x159, uint64(0x0))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x129, x161, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x131, x163, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x133, x165, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x135, x167, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x137, x169, uint64(p521Uint1(x186)))
+ var x189 uint64
+ var x190 uint64
+ x189, x190 = bits.Add64(x139, x171, uint64(p521Uint1(x188)))
+ var x191 uint64
+ var x192 uint64
+ x191, x192 = bits.Add64(x141, x173, uint64(p521Uint1(x190)))
+ var x193 uint64
+ var x194 uint64
+ x193, x194 = bits.Add64((uint64(p521Uint1(x142)) + (uint64(p521Uint1(x126)) + (uint64(p521Uint1(x108)) + x76))), x175, uint64(p521Uint1(x192)))
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(arg1[4], 0x400000000000)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x181, x195, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x183, x196, uint64(p521Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x185, uint64(0x0), uint64(p521Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x187, uint64(0x0), uint64(p521Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x189, uint64(0x0), uint64(p521Uint1(x204)))
+ var x207 uint64
+ var x208 uint64
+ x207, x208 = bits.Add64(x191, uint64(0x0), uint64(p521Uint1(x206)))
+ var x209 uint64
+ var x210 uint64
+ x209, x210 = bits.Add64(x193, uint64(0x0), uint64(p521Uint1(x208)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x179, 0x1ff)
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ var x246 uint64
+ _, x246 = bits.Add64(x179, x227, uint64(0x0))
+ var x247 uint64
+ var x248 uint64
+ x247, x248 = bits.Add64(x197, x229, uint64(p521Uint1(x246)))
+ var x249 uint64
+ var x250 uint64
+ x249, x250 = bits.Add64(x199, x231, uint64(p521Uint1(x248)))
+ var x251 uint64
+ var x252 uint64
+ x251, x252 = bits.Add64(x201, x233, uint64(p521Uint1(x250)))
+ var x253 uint64
+ var x254 uint64
+ x253, x254 = bits.Add64(x203, x235, uint64(p521Uint1(x252)))
+ var x255 uint64
+ var x256 uint64
+ x255, x256 = bits.Add64(x205, x237, uint64(p521Uint1(x254)))
+ var x257 uint64
+ var x258 uint64
+ x257, x258 = bits.Add64(x207, x239, uint64(p521Uint1(x256)))
+ var x259 uint64
+ var x260 uint64
+ x259, x260 = bits.Add64(x209, x241, uint64(p521Uint1(x258)))
+ var x261 uint64
+ var x262 uint64
+ x261, x262 = bits.Add64((uint64(p521Uint1(x210)) + (uint64(p521Uint1(x194)) + (uint64(p521Uint1(x176)) + x144))), x243, uint64(p521Uint1(x260)))
+ var x263 uint64
+ var x264 uint64
+ x264, x263 = bits.Mul64(arg1[5], 0x400000000000)
+ var x265 uint64
+ var x266 uint64
+ x265, x266 = bits.Add64(x249, x263, uint64(0x0))
+ var x267 uint64
+ var x268 uint64
+ x267, x268 = bits.Add64(x251, x264, uint64(p521Uint1(x266)))
+ var x269 uint64
+ var x270 uint64
+ x269, x270 = bits.Add64(x253, uint64(0x0), uint64(p521Uint1(x268)))
+ var x271 uint64
+ var x272 uint64
+ x271, x272 = bits.Add64(x255, uint64(0x0), uint64(p521Uint1(x270)))
+ var x273 uint64
+ var x274 uint64
+ x273, x274 = bits.Add64(x257, uint64(0x0), uint64(p521Uint1(x272)))
+ var x275 uint64
+ var x276 uint64
+ x275, x276 = bits.Add64(x259, uint64(0x0), uint64(p521Uint1(x274)))
+ var x277 uint64
+ var x278 uint64
+ x277, x278 = bits.Add64(x261, uint64(0x0), uint64(p521Uint1(x276)))
+ var x279 uint64
+ var x280 uint64
+ x280, x279 = bits.Mul64(x247, 0x1ff)
+ var x281 uint64
+ var x282 uint64
+ x282, x281 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x283 uint64
+ var x284 uint64
+ x284, x283 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x285 uint64
+ var x286 uint64
+ x286, x285 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x287 uint64
+ var x288 uint64
+ x288, x287 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x289 uint64
+ var x290 uint64
+ x290, x289 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x291 uint64
+ var x292 uint64
+ x292, x291 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x293 uint64
+ var x294 uint64
+ x294, x293 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x295 uint64
+ var x296 uint64
+ x296, x295 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x296, x293, uint64(0x0))
+ var x299 uint64
+ var x300 uint64
+ x299, x300 = bits.Add64(x294, x291, uint64(p521Uint1(x298)))
+ var x301 uint64
+ var x302 uint64
+ x301, x302 = bits.Add64(x292, x289, uint64(p521Uint1(x300)))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x290, x287, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x288, x285, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x286, x283, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x284, x281, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x282, x279, uint64(p521Uint1(x310)))
+ var x314 uint64
+ _, x314 = bits.Add64(x247, x295, uint64(0x0))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x265, x297, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x267, x299, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x269, x301, uint64(p521Uint1(x318)))
+ var x321 uint64
+ var x322 uint64
+ x321, x322 = bits.Add64(x271, x303, uint64(p521Uint1(x320)))
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x273, x305, uint64(p521Uint1(x322)))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x275, x307, uint64(p521Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x277, x309, uint64(p521Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64((uint64(p521Uint1(x278)) + (uint64(p521Uint1(x262)) + (uint64(p521Uint1(x244)) + x212))), x311, uint64(p521Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x332, x331 = bits.Mul64(arg1[6], 0x400000000000)
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x317, x331, uint64(0x0))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x319, x332, uint64(p521Uint1(x334)))
+ var x337 uint64
+ var x338 uint64
+ x337, x338 = bits.Add64(x321, uint64(0x0), uint64(p521Uint1(x336)))
+ var x339 uint64
+ var x340 uint64
+ x339, x340 = bits.Add64(x323, uint64(0x0), uint64(p521Uint1(x338)))
+ var x341 uint64
+ var x342 uint64
+ x341, x342 = bits.Add64(x325, uint64(0x0), uint64(p521Uint1(x340)))
+ var x343 uint64
+ var x344 uint64
+ x343, x344 = bits.Add64(x327, uint64(0x0), uint64(p521Uint1(x342)))
+ var x345 uint64
+ var x346 uint64
+ x345, x346 = bits.Add64(x329, uint64(0x0), uint64(p521Uint1(x344)))
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x315, 0x1ff)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x351 uint64
+ var x352 uint64
+ x352, x351 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x353 uint64
+ var x354 uint64
+ x354, x353 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x355 uint64
+ var x356 uint64
+ x356, x355 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x357 uint64
+ var x358 uint64
+ x358, x357 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x359 uint64
+ var x360 uint64
+ x360, x359 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x361 uint64
+ var x362 uint64
+ x362, x361 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x363 uint64
+ var x364 uint64
+ x364, x363 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x364, x361, uint64(0x0))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x362, x359, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x360, x357, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x358, x355, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x356, x353, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x354, x351, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x377, x378 = bits.Add64(x352, x349, uint64(p521Uint1(x376)))
+ var x379 uint64
+ var x380 uint64
+ x379, x380 = bits.Add64(x350, x347, uint64(p521Uint1(x378)))
+ var x382 uint64
+ _, x382 = bits.Add64(x315, x363, uint64(0x0))
+ var x383 uint64
+ var x384 uint64
+ x383, x384 = bits.Add64(x333, x365, uint64(p521Uint1(x382)))
+ var x385 uint64
+ var x386 uint64
+ x385, x386 = bits.Add64(x335, x367, uint64(p521Uint1(x384)))
+ var x387 uint64
+ var x388 uint64
+ x387, x388 = bits.Add64(x337, x369, uint64(p521Uint1(x386)))
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x339, x371, uint64(p521Uint1(x388)))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x341, x373, uint64(p521Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x343, x375, uint64(p521Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x345, x377, uint64(p521Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64((uint64(p521Uint1(x346)) + (uint64(p521Uint1(x330)) + (uint64(p521Uint1(x312)) + x280))), x379, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x400, x399 = bits.Mul64(arg1[7], 0x400000000000)
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x385, x399, uint64(0x0))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x387, x400, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x389, uint64(0x0), uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x391, uint64(0x0), uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x393, uint64(0x0), uint64(p521Uint1(x408)))
+ var x411 uint64
+ var x412 uint64
+ x411, x412 = bits.Add64(x395, uint64(0x0), uint64(p521Uint1(x410)))
+ var x413 uint64
+ var x414 uint64
+ x413, x414 = bits.Add64(x397, uint64(0x0), uint64(p521Uint1(x412)))
+ var x415 uint64
+ var x416 uint64
+ x416, x415 = bits.Mul64(x383, 0x1ff)
+ var x417 uint64
+ var x418 uint64
+ x418, x417 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x419 uint64
+ var x420 uint64
+ x420, x419 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x421 uint64
+ var x422 uint64
+ x422, x421 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x423 uint64
+ var x424 uint64
+ x424, x423 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x425 uint64
+ var x426 uint64
+ x426, x425 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x427 uint64
+ var x428 uint64
+ x428, x427 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x429 uint64
+ var x430 uint64
+ x430, x429 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x431 uint64
+ var x432 uint64
+ x432, x431 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x433 uint64
+ var x434 uint64
+ x433, x434 = bits.Add64(x432, x429, uint64(0x0))
+ var x435 uint64
+ var x436 uint64
+ x435, x436 = bits.Add64(x430, x427, uint64(p521Uint1(x434)))
+ var x437 uint64
+ var x438 uint64
+ x437, x438 = bits.Add64(x428, x425, uint64(p521Uint1(x436)))
+ var x439 uint64
+ var x440 uint64
+ x439, x440 = bits.Add64(x426, x423, uint64(p521Uint1(x438)))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x424, x421, uint64(p521Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x422, x419, uint64(p521Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x420, x417, uint64(p521Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x418, x415, uint64(p521Uint1(x446)))
+ var x450 uint64
+ _, x450 = bits.Add64(x383, x431, uint64(0x0))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x401, x433, uint64(p521Uint1(x450)))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x403, x435, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x405, x437, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x407, x439, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x409, x441, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x411, x443, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x413, x445, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64((uint64(p521Uint1(x414)) + (uint64(p521Uint1(x398)) + (uint64(p521Uint1(x380)) + x348))), x447, uint64(p521Uint1(x464)))
+ var x467 uint64
+ var x468 uint64
+ x468, x467 = bits.Mul64(arg1[8], 0x400000000000)
+ var x469 uint64
+ var x470 uint64
+ x469, x470 = bits.Add64(x453, x467, uint64(0x0))
+ var x471 uint64
+ var x472 uint64
+ x471, x472 = bits.Add64(x455, x468, uint64(p521Uint1(x470)))
+ var x473 uint64
+ var x474 uint64
+ x473, x474 = bits.Add64(x457, uint64(0x0), uint64(p521Uint1(x472)))
+ var x475 uint64
+ var x476 uint64
+ x475, x476 = bits.Add64(x459, uint64(0x0), uint64(p521Uint1(x474)))
+ var x477 uint64
+ var x478 uint64
+ x477, x478 = bits.Add64(x461, uint64(0x0), uint64(p521Uint1(x476)))
+ var x479 uint64
+ var x480 uint64
+ x479, x480 = bits.Add64(x463, uint64(0x0), uint64(p521Uint1(x478)))
+ var x481 uint64
+ var x482 uint64
+ x481, x482 = bits.Add64(x465, uint64(0x0), uint64(p521Uint1(x480)))
+ var x483 uint64
+ var x484 uint64
+ x484, x483 = bits.Mul64(x451, 0x1ff)
+ var x485 uint64
+ var x486 uint64
+ x486, x485 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x487 uint64
+ var x488 uint64
+ x488, x487 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x489 uint64
+ var x490 uint64
+ x490, x489 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x491 uint64
+ var x492 uint64
+ x492, x491 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x493 uint64
+ var x494 uint64
+ x494, x493 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x495 uint64
+ var x496 uint64
+ x496, x495 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x497 uint64
+ var x498 uint64
+ x498, x497 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x499 uint64
+ var x500 uint64
+ x500, x499 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x501 uint64
+ var x502 uint64
+ x501, x502 = bits.Add64(x500, x497, uint64(0x0))
+ var x503 uint64
+ var x504 uint64
+ x503, x504 = bits.Add64(x498, x495, uint64(p521Uint1(x502)))
+ var x505 uint64
+ var x506 uint64
+ x505, x506 = bits.Add64(x496, x493, uint64(p521Uint1(x504)))
+ var x507 uint64
+ var x508 uint64
+ x507, x508 = bits.Add64(x494, x491, uint64(p521Uint1(x506)))
+ var x509 uint64
+ var x510 uint64
+ x509, x510 = bits.Add64(x492, x489, uint64(p521Uint1(x508)))
+ var x511 uint64
+ var x512 uint64
+ x511, x512 = bits.Add64(x490, x487, uint64(p521Uint1(x510)))
+ var x513 uint64
+ var x514 uint64
+ x513, x514 = bits.Add64(x488, x485, uint64(p521Uint1(x512)))
+ var x515 uint64
+ var x516 uint64
+ x515, x516 = bits.Add64(x486, x483, uint64(p521Uint1(x514)))
+ var x518 uint64
+ _, x518 = bits.Add64(x451, x499, uint64(0x0))
+ var x519 uint64
+ var x520 uint64
+ x519, x520 = bits.Add64(x469, x501, uint64(p521Uint1(x518)))
+ var x521 uint64
+ var x522 uint64
+ x521, x522 = bits.Add64(x471, x503, uint64(p521Uint1(x520)))
+ var x523 uint64
+ var x524 uint64
+ x523, x524 = bits.Add64(x473, x505, uint64(p521Uint1(x522)))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x475, x507, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x477, x509, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x479, x511, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x481, x513, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64((uint64(p521Uint1(x482)) + (uint64(p521Uint1(x466)) + (uint64(p521Uint1(x448)) + x416))), x515, uint64(p521Uint1(x532)))
+ x535 := (uint64(p521Uint1(x534)) + (uint64(p521Uint1(x516)) + x484))
+ var x536 uint64
+ var x537 uint64
+ x536, x537 = bits.Sub64(x519, 0xffffffffffffffff, uint64(0x0))
+ var x538 uint64
+ var x539 uint64
+ x538, x539 = bits.Sub64(x521, 0xffffffffffffffff, uint64(p521Uint1(x537)))
+ var x540 uint64
+ var x541 uint64
+ x540, x541 = bits.Sub64(x523, 0xffffffffffffffff, uint64(p521Uint1(x539)))
+ var x542 uint64
+ var x543 uint64
+ x542, x543 = bits.Sub64(x525, 0xffffffffffffffff, uint64(p521Uint1(x541)))
+ var x544 uint64
+ var x545 uint64
+ x544, x545 = bits.Sub64(x527, 0xffffffffffffffff, uint64(p521Uint1(x543)))
+ var x546 uint64
+ var x547 uint64
+ x546, x547 = bits.Sub64(x529, 0xffffffffffffffff, uint64(p521Uint1(x545)))
+ var x548 uint64
+ var x549 uint64
+ x548, x549 = bits.Sub64(x531, 0xffffffffffffffff, uint64(p521Uint1(x547)))
+ var x550 uint64
+ var x551 uint64
+ x550, x551 = bits.Sub64(x533, 0xffffffffffffffff, uint64(p521Uint1(x549)))
+ var x552 uint64
+ var x553 uint64
+ x552, x553 = bits.Sub64(x535, 0x1ff, uint64(p521Uint1(x551)))
+ var x555 uint64
+ _, x555 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x553)))
+ var x556 uint64
+ p521CmovznzU64(&x556, p521Uint1(x555), x536, x519)
+ var x557 uint64
+ p521CmovznzU64(&x557, p521Uint1(x555), x538, x521)
+ var x558 uint64
+ p521CmovznzU64(&x558, p521Uint1(x555), x540, x523)
+ var x559 uint64
+ p521CmovznzU64(&x559, p521Uint1(x555), x542, x525)
+ var x560 uint64
+ p521CmovznzU64(&x560, p521Uint1(x555), x544, x527)
+ var x561 uint64
+ p521CmovznzU64(&x561, p521Uint1(x555), x546, x529)
+ var x562 uint64
+ p521CmovznzU64(&x562, p521Uint1(x555), x548, x531)
+ var x563 uint64
+ p521CmovznzU64(&x563, p521Uint1(x555), x550, x533)
+ var x564 uint64
+ p521CmovznzU64(&x564, p521Uint1(x555), x552, x535)
+ out1[0] = x556
+ out1[1] = x557
+ out1[2] = x558
+ out1[3] = x559
+ out1[4] = x560
+ out1[5] = x561
+ out1[6] = x562
+ out1[7] = x563
+ out1[8] = x564
+}
+
+// p521Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p521Selectznz(out1 *[9]uint64, arg1 p521Uint1, arg2 *[9]uint64, arg3 *[9]uint64) {
+ var x1 uint64
+ p521CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p521CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p521CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p521CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ var x5 uint64
+ p521CmovznzU64(&x5, arg1, arg2[4], arg3[4])
+ var x6 uint64
+ p521CmovznzU64(&x6, arg1, arg2[5], arg3[5])
+ var x7 uint64
+ p521CmovznzU64(&x7, arg1, arg2[6], arg3[6])
+ var x8 uint64
+ p521CmovznzU64(&x8, arg1, arg2[7], arg3[7])
+ var x9 uint64
+ p521CmovznzU64(&x9, arg1, arg2[8], arg3[8])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+ out1[4] = x5
+ out1[5] = x6
+ out1[6] = x7
+ out1[7] = x8
+ out1[8] = x9
+}
+
+// p521ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..65]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
+func p521ToBytes(out1 *[66]uint8, arg1 *[9]uint64) {
+ x1 := arg1[8]
+ x2 := arg1[7]
+ x3 := arg1[6]
+ x4 := arg1[5]
+ x5 := arg1[4]
+ x6 := arg1[3]
+ x7 := arg1[2]
+ x8 := arg1[1]
+ x9 := arg1[0]
+ x10 := (uint8(x9) & 0xff)
+ x11 := (x9 >> 8)
+ x12 := (uint8(x11) & 0xff)
+ x13 := (x11 >> 8)
+ x14 := (uint8(x13) & 0xff)
+ x15 := (x13 >> 8)
+ x16 := (uint8(x15) & 0xff)
+ x17 := (x15 >> 8)
+ x18 := (uint8(x17) & 0xff)
+ x19 := (x17 >> 8)
+ x20 := (uint8(x19) & 0xff)
+ x21 := (x19 >> 8)
+ x22 := (uint8(x21) & 0xff)
+ x23 := uint8((x21 >> 8))
+ x24 := (uint8(x8) & 0xff)
+ x25 := (x8 >> 8)
+ x26 := (uint8(x25) & 0xff)
+ x27 := (x25 >> 8)
+ x28 := (uint8(x27) & 0xff)
+ x29 := (x27 >> 8)
+ x30 := (uint8(x29) & 0xff)
+ x31 := (x29 >> 8)
+ x32 := (uint8(x31) & 0xff)
+ x33 := (x31 >> 8)
+ x34 := (uint8(x33) & 0xff)
+ x35 := (x33 >> 8)
+ x36 := (uint8(x35) & 0xff)
+ x37 := uint8((x35 >> 8))
+ x38 := (uint8(x7) & 0xff)
+ x39 := (x7 >> 8)
+ x40 := (uint8(x39) & 0xff)
+ x41 := (x39 >> 8)
+ x42 := (uint8(x41) & 0xff)
+ x43 := (x41 >> 8)
+ x44 := (uint8(x43) & 0xff)
+ x45 := (x43 >> 8)
+ x46 := (uint8(x45) & 0xff)
+ x47 := (x45 >> 8)
+ x48 := (uint8(x47) & 0xff)
+ x49 := (x47 >> 8)
+ x50 := (uint8(x49) & 0xff)
+ x51 := uint8((x49 >> 8))
+ x52 := (uint8(x6) & 0xff)
+ x53 := (x6 >> 8)
+ x54 := (uint8(x53) & 0xff)
+ x55 := (x53 >> 8)
+ x56 := (uint8(x55) & 0xff)
+ x57 := (x55 >> 8)
+ x58 := (uint8(x57) & 0xff)
+ x59 := (x57 >> 8)
+ x60 := (uint8(x59) & 0xff)
+ x61 := (x59 >> 8)
+ x62 := (uint8(x61) & 0xff)
+ x63 := (x61 >> 8)
+ x64 := (uint8(x63) & 0xff)
+ x65 := uint8((x63 >> 8))
+ x66 := (uint8(x5) & 0xff)
+ x67 := (x5 >> 8)
+ x68 := (uint8(x67) & 0xff)
+ x69 := (x67 >> 8)
+ x70 := (uint8(x69) & 0xff)
+ x71 := (x69 >> 8)
+ x72 := (uint8(x71) & 0xff)
+ x73 := (x71 >> 8)
+ x74 := (uint8(x73) & 0xff)
+ x75 := (x73 >> 8)
+ x76 := (uint8(x75) & 0xff)
+ x77 := (x75 >> 8)
+ x78 := (uint8(x77) & 0xff)
+ x79 := uint8((x77 >> 8))
+ x80 := (uint8(x4) & 0xff)
+ x81 := (x4 >> 8)
+ x82 := (uint8(x81) & 0xff)
+ x83 := (x81 >> 8)
+ x84 := (uint8(x83) & 0xff)
+ x85 := (x83 >> 8)
+ x86 := (uint8(x85) & 0xff)
+ x87 := (x85 >> 8)
+ x88 := (uint8(x87) & 0xff)
+ x89 := (x87 >> 8)
+ x90 := (uint8(x89) & 0xff)
+ x91 := (x89 >> 8)
+ x92 := (uint8(x91) & 0xff)
+ x93 := uint8((x91 >> 8))
+ x94 := (uint8(x3) & 0xff)
+ x95 := (x3 >> 8)
+ x96 := (uint8(x95) & 0xff)
+ x97 := (x95 >> 8)
+ x98 := (uint8(x97) & 0xff)
+ x99 := (x97 >> 8)
+ x100 := (uint8(x99) & 0xff)
+ x101 := (x99 >> 8)
+ x102 := (uint8(x101) & 0xff)
+ x103 := (x101 >> 8)
+ x104 := (uint8(x103) & 0xff)
+ x105 := (x103 >> 8)
+ x106 := (uint8(x105) & 0xff)
+ x107 := uint8((x105 >> 8))
+ x108 := (uint8(x2) & 0xff)
+ x109 := (x2 >> 8)
+ x110 := (uint8(x109) & 0xff)
+ x111 := (x109 >> 8)
+ x112 := (uint8(x111) & 0xff)
+ x113 := (x111 >> 8)
+ x114 := (uint8(x113) & 0xff)
+ x115 := (x113 >> 8)
+ x116 := (uint8(x115) & 0xff)
+ x117 := (x115 >> 8)
+ x118 := (uint8(x117) & 0xff)
+ x119 := (x117 >> 8)
+ x120 := (uint8(x119) & 0xff)
+ x121 := uint8((x119 >> 8))
+ x122 := (uint8(x1) & 0xff)
+ x123 := p521Uint1((x1 >> 8))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+ out1[4] = x18
+ out1[5] = x20
+ out1[6] = x22
+ out1[7] = x23
+ out1[8] = x24
+ out1[9] = x26
+ out1[10] = x28
+ out1[11] = x30
+ out1[12] = x32
+ out1[13] = x34
+ out1[14] = x36
+ out1[15] = x37
+ out1[16] = x38
+ out1[17] = x40
+ out1[18] = x42
+ out1[19] = x44
+ out1[20] = x46
+ out1[21] = x48
+ out1[22] = x50
+ out1[23] = x51
+ out1[24] = x52
+ out1[25] = x54
+ out1[26] = x56
+ out1[27] = x58
+ out1[28] = x60
+ out1[29] = x62
+ out1[30] = x64
+ out1[31] = x65
+ out1[32] = x66
+ out1[33] = x68
+ out1[34] = x70
+ out1[35] = x72
+ out1[36] = x74
+ out1[37] = x76
+ out1[38] = x78
+ out1[39] = x79
+ out1[40] = x80
+ out1[41] = x82
+ out1[42] = x84
+ out1[43] = x86
+ out1[44] = x88
+ out1[45] = x90
+ out1[46] = x92
+ out1[47] = x93
+ out1[48] = x94
+ out1[49] = x96
+ out1[50] = x98
+ out1[51] = x100
+ out1[52] = x102
+ out1[53] = x104
+ out1[54] = x106
+ out1[55] = x107
+ out1[56] = x108
+ out1[57] = x110
+ out1[58] = x112
+ out1[59] = x114
+ out1[60] = x116
+ out1[61] = x118
+ out1[62] = x120
+ out1[63] = x121
+ out1[64] = x122
+ out1[65] = uint8(x123)
+}
+
+// p521FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
+func p521FromBytes(out1 *[9]uint64, arg1 *[66]uint8) {
+ x1 := (uint64(p521Uint1(arg1[65])) << 8)
+ x2 := arg1[64]
+ x3 := (uint64(arg1[63]) << 56)
+ x4 := (uint64(arg1[62]) << 48)
+ x5 := (uint64(arg1[61]) << 40)
+ x6 := (uint64(arg1[60]) << 32)
+ x7 := (uint64(arg1[59]) << 24)
+ x8 := (uint64(arg1[58]) << 16)
+ x9 := (uint64(arg1[57]) << 8)
+ x10 := arg1[56]
+ x11 := (uint64(arg1[55]) << 56)
+ x12 := (uint64(arg1[54]) << 48)
+ x13 := (uint64(arg1[53]) << 40)
+ x14 := (uint64(arg1[52]) << 32)
+ x15 := (uint64(arg1[51]) << 24)
+ x16 := (uint64(arg1[50]) << 16)
+ x17 := (uint64(arg1[49]) << 8)
+ x18 := arg1[48]
+ x19 := (uint64(arg1[47]) << 56)
+ x20 := (uint64(arg1[46]) << 48)
+ x21 := (uint64(arg1[45]) << 40)
+ x22 := (uint64(arg1[44]) << 32)
+ x23 := (uint64(arg1[43]) << 24)
+ x24 := (uint64(arg1[42]) << 16)
+ x25 := (uint64(arg1[41]) << 8)
+ x26 := arg1[40]
+ x27 := (uint64(arg1[39]) << 56)
+ x28 := (uint64(arg1[38]) << 48)
+ x29 := (uint64(arg1[37]) << 40)
+ x30 := (uint64(arg1[36]) << 32)
+ x31 := (uint64(arg1[35]) << 24)
+ x32 := (uint64(arg1[34]) << 16)
+ x33 := (uint64(arg1[33]) << 8)
+ x34 := arg1[32]
+ x35 := (uint64(arg1[31]) << 56)
+ x36 := (uint64(arg1[30]) << 48)
+ x37 := (uint64(arg1[29]) << 40)
+ x38 := (uint64(arg1[28]) << 32)
+ x39 := (uint64(arg1[27]) << 24)
+ x40 := (uint64(arg1[26]) << 16)
+ x41 := (uint64(arg1[25]) << 8)
+ x42 := arg1[24]
+ x43 := (uint64(arg1[23]) << 56)
+ x44 := (uint64(arg1[22]) << 48)
+ x45 := (uint64(arg1[21]) << 40)
+ x46 := (uint64(arg1[20]) << 32)
+ x47 := (uint64(arg1[19]) << 24)
+ x48 := (uint64(arg1[18]) << 16)
+ x49 := (uint64(arg1[17]) << 8)
+ x50 := arg1[16]
+ x51 := (uint64(arg1[15]) << 56)
+ x52 := (uint64(arg1[14]) << 48)
+ x53 := (uint64(arg1[13]) << 40)
+ x54 := (uint64(arg1[12]) << 32)
+ x55 := (uint64(arg1[11]) << 24)
+ x56 := (uint64(arg1[10]) << 16)
+ x57 := (uint64(arg1[9]) << 8)
+ x58 := arg1[8]
+ x59 := (uint64(arg1[7]) << 56)
+ x60 := (uint64(arg1[6]) << 48)
+ x61 := (uint64(arg1[5]) << 40)
+ x62 := (uint64(arg1[4]) << 32)
+ x63 := (uint64(arg1[3]) << 24)
+ x64 := (uint64(arg1[2]) << 16)
+ x65 := (uint64(arg1[1]) << 8)
+ x66 := arg1[0]
+ x67 := (x65 + uint64(x66))
+ x68 := (x64 + x67)
+ x69 := (x63 + x68)
+ x70 := (x62 + x69)
+ x71 := (x61 + x70)
+ x72 := (x60 + x71)
+ x73 := (x59 + x72)
+ x74 := (x57 + uint64(x58))
+ x75 := (x56 + x74)
+ x76 := (x55 + x75)
+ x77 := (x54 + x76)
+ x78 := (x53 + x77)
+ x79 := (x52 + x78)
+ x80 := (x51 + x79)
+ x81 := (x49 + uint64(x50))
+ x82 := (x48 + x81)
+ x83 := (x47 + x82)
+ x84 := (x46 + x83)
+ x85 := (x45 + x84)
+ x86 := (x44 + x85)
+ x87 := (x43 + x86)
+ x88 := (x41 + uint64(x42))
+ x89 := (x40 + x88)
+ x90 := (x39 + x89)
+ x91 := (x38 + x90)
+ x92 := (x37 + x91)
+ x93 := (x36 + x92)
+ x94 := (x35 + x93)
+ x95 := (x33 + uint64(x34))
+ x96 := (x32 + x95)
+ x97 := (x31 + x96)
+ x98 := (x30 + x97)
+ x99 := (x29 + x98)
+ x100 := (x28 + x99)
+ x101 := (x27 + x100)
+ x102 := (x25 + uint64(x26))
+ x103 := (x24 + x102)
+ x104 := (x23 + x103)
+ x105 := (x22 + x104)
+ x106 := (x21 + x105)
+ x107 := (x20 + x106)
+ x108 := (x19 + x107)
+ x109 := (x17 + uint64(x18))
+ x110 := (x16 + x109)
+ x111 := (x15 + x110)
+ x112 := (x14 + x111)
+ x113 := (x13 + x112)
+ x114 := (x12 + x113)
+ x115 := (x11 + x114)
+ x116 := (x9 + uint64(x10))
+ x117 := (x8 + x116)
+ x118 := (x7 + x117)
+ x119 := (x6 + x118)
+ x120 := (x5 + x119)
+ x121 := (x4 + x120)
+ x122 := (x3 + x121)
+ x123 := (x1 + uint64(x2))
+ out1[0] = x73
+ out1[1] = x80
+ out1[2] = x87
+ out1[3] = x94
+ out1[4] = x101
+ out1[5] = x108
+ out1[6] = x115
+ out1[7] = x122
+ out1[8] = x123
+}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_invert.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_invert.go
index 407711af36..407711af36 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/internal/fiat/p521_invert.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/fiat/p521_invert.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/nistec.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/nistec.go
new file mode 100644
index 0000000000..d898d409ca
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/nistec.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nistec implements the NIST P elliptic curves from FIPS 186-4.
+//
+// This package uses fiat-crypto or specialized assembly and Go code for its
+// backend field arithmetic (not math/big) and exposes constant-time, heap
+// allocation-free, byte slice-based safe APIs. Group operations use modern and
+// safe complete addition formulas where possible. The point at infinity is
+// handled and encoded according to SEC 1, Version 2.0, and invalid curve points
+// can't be represented.
+package nistec
+
+//go:generate go run generate.go
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p224.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/p224.go
new file mode 100644
index 0000000000..8d236b33d7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p224.go
@@ -0,0 +1,428 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+var p224B, _ = new(fiat.P224Element).SetBytes([]byte{0xb4, 0x5, 0xa, 0x85, 0xc, 0x4, 0xb3, 0xab, 0xf5, 0x41, 0x32, 0x56, 0x50, 0x44, 0xb0, 0xb7, 0xd7, 0xbf, 0xd8, 0xba, 0x27, 0xb, 0x39, 0x43, 0x23, 0x55, 0xff, 0xb4})
+
+var p224G, _ = NewP224Point().SetBytes([]byte{0x4, 0xb7, 0xe, 0xc, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f, 0x32, 0x13, 0x90, 0xb9, 0x4a, 0x3, 0xc1, 0xd3, 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6, 0x11, 0x5c, 0x1d, 0x21, 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x7, 0x47, 0x64, 0x44, 0xd5, 0x81, 0x99, 0x85, 0x0, 0x7e, 0x34})
+
+// p224ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p224ElementLength = 28
+
+// P224Point is a P224 point. The zero value is NOT valid.
+type P224Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P224Element
+}
+
+// NewP224Point returns a new P224Point representing the point at infinity point.
+func NewP224Point() *P224Point {
+ return &P224Point{
+ x: new(fiat.P224Element),
+ y: new(fiat.P224Element).One(),
+ z: new(fiat.P224Element),
+ }
+}
+
+// NewP224Generator returns a new P224Point set to the canonical generator.
+func NewP224Generator() *P224Point {
+ return (&P224Point{
+ x: new(fiat.P224Element),
+ y: new(fiat.P224Element),
+ z: new(fiat.P224Element),
+ }).Set(p224G)
+}
+
+// Set sets p = q and returns p.
+func (p *P224Point) Set(q *P224Point) *P224Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P224Point) SetBytes(b []byte) (*P224Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP224Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p224ElementLength && b[0] == 4:
+ x, err := new(fiat.P224Element).SetBytes(b[1 : 1+p224ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P224Element).SetBytes(b[1+p224ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p224CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p224ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P224Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p224Polynomial(new(fiat.P224Element), x)
+ if !p224Sqrt(y, y) {
+ return nil, errors.New("invalid P224 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P224Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p224ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P224 point encoding")
+ }
+}
+
+// p224Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p224Polynomial(y2, x *fiat.P224Element) *fiat.P224Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P224Element).Add(x, x)
+ threeX.Add(threeX, x)
+
+ y2.Sub(y2, threeX)
+ return y2.Add(y2, p224B)
+}
+
+func p224CheckOnCurve(x, y *fiat.P224Element) error {
+ // y² = x³ - 3x + b
+ rhs := p224Polynomial(new(fiat.P224Element), x)
+ lhs := new(fiat.P224Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P224 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P224Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p224ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P224Point) bytes(out *[1 + 2*p224ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P224Element).Invert(p.z)
+ x := new(fiat.P224Element).Mul(p.x, zinv)
+ y := new(fiat.P224Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P224Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p224ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P224Point) bytesCompressed(out *[1 + p224ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P224Element).Invert(p.z)
+ x := new(fiat.P224Element).Mul(p.x, zinv)
+ y := new(fiat.P224Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p224ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P224Point) Add(p1, p2 *P224Point) *P224Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P224Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P224Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P224Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P224Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P224Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P224Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P224Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P224Element).Mul(p224B, t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p224B, y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P224Point) Double(p *P224Point) *P224Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P224Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P224Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P224Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P224Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P224Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P224Element).Mul(p224B, t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P224Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p224B, z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P224Point) Select(p1, p2 *P224Point, cond int) *P224Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p224Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p224Table [15]*P224Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p224Table) Select(p *P224Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p224Table called with out-of-bounds value")
+ }
+ p.Set(NewP224Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P224Point) ScalarMult(q *P224Point, scalar []byte) (*P224Point, error) {
+ // Compute a p224Table for the base point q. The explicit NewP224Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p224Table{NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP224Point()
+ p.Set(NewP224Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p224GeneratorTable *[p224ElementLength * 2]p224Table
+var p224GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p224Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P224Point) generatorTable() *[p224ElementLength * 2]p224Table {
+ p224GeneratorTableOnce.Do(func() {
+ p224GeneratorTable = new([p224ElementLength * 2]p224Table)
+ base := NewP224Generator()
+ for i := 0; i < p224ElementLength*2; i++ {
+ p224GeneratorTable[i][0] = NewP224Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p224GeneratorTable[i][j] = NewP224Point().Add(p224GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p224GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P224Point) ScalarBaseMult(scalar []byte) (*P224Point, error) {
+ if len(scalar) != p224ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP224Point()
+ p.Set(NewP224Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p224Sqrt sets e to a square root of x. If x is not a square, p224Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p224Sqrt(e, x *fiat.P224Element) (isSquare bool) {
+ candidate := new(fiat.P224Element)
+ p224SqrtCandidate(candidate, x)
+ square := new(fiat.P224Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p224_sqrt.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/p224_sqrt.go
new file mode 100644
index 0000000000..9a35cea6aa
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p224_sqrt.go
@@ -0,0 +1,132 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "sync"
+)
+
+var p224GG *[96]fiat.P224Element
+var p224GGOnce sync.Once
+
+var p224MinusOne = new(fiat.P224Element).Sub(
+ new(fiat.P224Element), new(fiat.P224Element).One())
+
+// p224SqrtCandidate sets r to a square root candidate for x. r and x must not overlap.
+func p224SqrtCandidate(r, x *fiat.P224Element) {
+ // Since p = 1 mod 4, we can't use the exponentiation by (p + 1) / 4 like
+ // for the other primes. Instead, implement a variation of Tonelli–Shanks.
+ // The constant-time implementation is adapted from Thomas Pornin's ecGFp5.
+ //
+ // https://github.com/pornin/ecgfp5/blob/82325b965/rust/src/field.rs#L337-L385
+
+ // p = q*2^n + 1 with q odd -> q = 2^128 - 1 and n = 96
+ // g^(2^n) = 1 -> g = 11 ^ q (where 11 is the smallest non-square)
+ // GG[j] = g^(2^j) for j = 0 to n-1
+
+ p224GGOnce.Do(func() {
+ p224GG = new([96]fiat.P224Element)
+ for i := range p224GG {
+ if i == 0 {
+ p224GG[i].SetBytes([]byte{0x6a, 0x0f, 0xec, 0x67,
+ 0x85, 0x98, 0xa7, 0x92, 0x0c, 0x55, 0xb2, 0xd4,
+ 0x0b, 0x2d, 0x6f, 0xfb, 0xbe, 0xa3, 0xd8, 0xce,
+ 0xf3, 0xfb, 0x36, 0x32, 0xdc, 0x69, 0x1b, 0x74})
+ } else {
+ p224GG[i].Square(&p224GG[i-1])
+ }
+ }
+ })
+
+ // r <- x^((q+1)/2) = x^(2^127)
+ // v <- x^q = x^(2^128-1)
+
+ // Compute x^(2^127-1) first.
+ //
+ // The sequence of 10 multiplications and 126 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // _1111110 = 2*_111111
+ // _1111111 = 1 + _1111110
+ // x12 = _1111110 << 5 + _111111
+ // x24 = x12 << 12 + x12
+ // i36 = x24 << 7
+ // x31 = _1111111 + i36
+ // x48 = i36 << 17 + x24
+ // x96 = x48 << 48 + x48
+ // return x96 << 31 + x31
+ //
+ var t0 = new(fiat.P224Element)
+ var t1 = new(fiat.P224Element)
+
+ r.Square(x)
+ r.Mul(x, r)
+ r.Square(r)
+ r.Mul(x, r)
+ t0.Square(r)
+ for s := 1; s < 3; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(r, t0)
+ t1.Square(t0)
+ r.Mul(x, t1)
+ for s := 0; s < 5; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 12; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 7; s++ {
+ t1.Square(t1)
+ }
+ r.Mul(r, t1)
+ for s := 0; s < 17; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 48; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 31; s++ {
+ t0.Square(t0)
+ }
+ r.Mul(r, t0)
+
+ // v = x^(2^127-1)^2 * x
+ v := new(fiat.P224Element).Square(r)
+ v.Mul(v, x)
+
+ // r = x^(2^127-1) * x
+ r.Mul(r, x)
+
+ // for i = n-1 down to 1:
+ // w = v^(2^(i-1))
+ // if w == -1 then:
+ // v <- v*GG[n-i]
+ // r <- r*GG[n-i-1]
+
+ for i := 96 - 1; i >= 1; i-- {
+ w := new(fiat.P224Element).Set(v)
+ for j := 0; j < i-1; j++ {
+ w.Square(w)
+ }
+ cond := w.Equal(p224MinusOne)
+ v.Select(t0.Mul(v, &p224GG[96-i]), v, cond)
+ r.Select(t0.Mul(r, &p224GG[96-i-1]), r, cond)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm.go
new file mode 100644
index 0000000000..bc443ba323
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm.go
@@ -0,0 +1,704 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the Go wrapper for the constant-time, 64-bit assembly
+// implementation of P256. The optimizations performed here are described in
+// detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+//go:build amd64 || arm64 || ppc64le || s390x
+
+package nistec
+
+import (
+ _ "embed"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+ "runtime"
+ "unsafe"
+)
+
+// p256Element is a P-256 base field element in [0, P-1] in the Montgomery
+// domain (with R 2²⁵⁶) as four limbs in little-endian order value.
+type p256Element [4]uint64
+
+// p256One is one in the Montgomery domain.
+var p256One = p256Element{0x0000000000000001, 0xffffffff00000000,
+ 0xffffffffffffffff, 0x00000000fffffffe}
+
+var p256Zero = p256Element{}
+
+// p256P is 2²⁵⁶ - 2²²⁴ + 2¹⁹² + 2⁹⁶ - 1 in the Montgomery domain.
+var p256P = p256Element{0xffffffffffffffff, 0x00000000ffffffff,
+ 0x0000000000000000, 0xffffffff00000001}
+
+// P256Point is a P-256 point. The zero value should not be assumed to be valid
+// (although it is in this implementation).
+type P256Point struct {
+ // (X:Y:Z) are Jacobian coordinates where x = X/Z² and y = Y/Z³. The point
+ // at infinity can be represented by any set of coordinates with Z = 0.
+ x, y, z p256Element
+}
+
+// NewP256Point returns a new P256Point representing the point at infinity.
+func NewP256Point() *P256Point {
+ return &P256Point{
+ x: p256One, y: p256One, z: p256Zero,
+ }
+}
+
+// NewP256Generator returns a new P256Point set to the canonical generator.
+func NewP256Generator() *P256Point {
+ return &P256Point{
+ x: p256Element{0x79e730d418a9143c, 0x75ba95fc5fedb601,
+ 0x79fb732b77622510, 0x18905f76a53755c6},
+ y: p256Element{0xddf25357ce95560a, 0x8b4ab8e4ba19e45c,
+ 0xd2e88688dd21f325, 0x8571ff1825885d85},
+ z: p256One,
+ }
+}
+
+// Set sets p = q and returns p.
+func (p *P256Point) Set(q *P256Point) *P256Point {
+ p.x, p.y, p.z = q.x, q.y, q.z
+ return p
+}
+
+const p256ElementLength = 32
+const p256UncompressedLength = 1 + 2*p256ElementLength
+const p256CompressedLength = 1 + p256ElementLength
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P256Point) SetBytes(b []byte) (*P256Point, error) {
+ // p256Mul operates in the Montgomery domain with R = 2²⁵⁶ mod p. Thus rr
+ // here is R in the Montgomery domain, or R×R mod p. See comment in
+ // P256OrdInverse about how this is used.
+ rr := p256Element{0x0000000000000003, 0xfffffffbffffffff,
+ 0xfffffffffffffffe, 0x00000004fffffffd}
+
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP256Point()), nil
+
+ // Uncompressed form.
+ case len(b) == p256UncompressedLength && b[0] == 4:
+ var r P256Point
+ p256BigToLittle(&r.x, (*[32]byte)(b[1:33]))
+ p256BigToLittle(&r.y, (*[32]byte)(b[33:65]))
+ if p256LessThanP(&r.x) == 0 || p256LessThanP(&r.y) == 0 {
+ return nil, errors.New("invalid P256 element encoding")
+ }
+ p256Mul(&r.x, &r.x, &rr)
+ p256Mul(&r.y, &r.y, &rr)
+ if err := p256CheckOnCurve(&r.x, &r.y); err != nil {
+ return nil, err
+ }
+ r.z = p256One
+ return p.Set(&r), nil
+
+ // Compressed form.
+ case len(b) == p256CompressedLength && (b[0] == 2 || b[0] == 3):
+ var r P256Point
+ p256BigToLittle(&r.x, (*[32]byte)(b[1:33]))
+ if p256LessThanP(&r.x) == 0 {
+ return nil, errors.New("invalid P256 element encoding")
+ }
+ p256Mul(&r.x, &r.x, &rr)
+
+ // y² = x³ - 3x + b
+ p256Polynomial(&r.y, &r.x)
+ if !p256Sqrt(&r.y, &r.y) {
+ return nil, errors.New("invalid P256 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ yy := new(p256Element)
+ p256FromMont(yy, &r.y)
+ cond := int(yy[0]&1) ^ int(b[0]&1)
+ p256NegCond(&r.y, cond)
+
+ r.z = p256One
+ return p.Set(&r), nil
+
+ default:
+ return nil, errors.New("invalid P256 point encoding")
+ }
+}
+
+// p256Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p256Polynomial(y2, x *p256Element) *p256Element {
+ x3 := new(p256Element)
+ p256Sqr(x3, x, 1)
+ p256Mul(x3, x3, x)
+
+ threeX := new(p256Element)
+ p256Add(threeX, x, x)
+ p256Add(threeX, threeX, x)
+ p256NegCond(threeX, 1)
+
+ p256B := &p256Element{0xd89cdf6229c4bddf, 0xacf005cd78843090,
+ 0xe5a220abf7212ed6, 0xdc30061d04874834}
+
+ p256Add(x3, x3, threeX)
+ p256Add(x3, x3, p256B)
+
+ *y2 = *x3
+ return y2
+}
+
+func p256CheckOnCurve(x, y *p256Element) error {
+ // y² = x³ - 3x + b
+ rhs := p256Polynomial(new(p256Element), x)
+ lhs := new(p256Element)
+ p256Sqr(lhs, y, 1)
+ if p256Equal(lhs, rhs) != 1 {
+ return errors.New("P256 point not on curve")
+ }
+ return nil
+}
+
+// p256LessThanP returns 1 if x < p, and 0 otherwise. Note that a p256Element is
+// not allowed to be equal to or greater than p, so if this function returns 0
+// then x is invalid.
+func p256LessThanP(x *p256Element) int {
+ var b uint64
+ _, b = bits.Sub64(x[0], p256P[0], b)
+ _, b = bits.Sub64(x[1], p256P[1], b)
+ _, b = bits.Sub64(x[2], p256P[2], b)
+ _, b = bits.Sub64(x[3], p256P[3], b)
+ return int(b)
+}
+
+// p256Add sets res = x + y.
+func p256Add(res, x, y *p256Element) {
+ var c, b uint64
+ t1 := make([]uint64, 4)
+ t1[0], c = bits.Add64(x[0], y[0], 0)
+ t1[1], c = bits.Add64(x[1], y[1], c)
+ t1[2], c = bits.Add64(x[2], y[2], c)
+ t1[3], c = bits.Add64(x[3], y[3], c)
+ t2 := make([]uint64, 4)
+ t2[0], b = bits.Sub64(t1[0], p256P[0], 0)
+ t2[1], b = bits.Sub64(t1[1], p256P[1], b)
+ t2[2], b = bits.Sub64(t1[2], p256P[2], b)
+ t2[3], b = bits.Sub64(t1[3], p256P[3], b)
+ // Three options:
+ // - a+b < p
+ // then c is 0, b is 1, and t1 is correct
+ // - p <= a+b < 2^256
+ // then c is 0, b is 0, and t2 is correct
+ // - 2^256 <= a+b
+ // then c is 1, b is 1, and t2 is correct
+ t2Mask := (c ^ b) - 1
+ res[0] = (t1[0] & ^t2Mask) | (t2[0] & t2Mask)
+ res[1] = (t1[1] & ^t2Mask) | (t2[1] & t2Mask)
+ res[2] = (t1[2] & ^t2Mask) | (t2[2] & t2Mask)
+ res[3] = (t1[3] & ^t2Mask) | (t2[3] & t2Mask)
+}
+
+// p256Sqrt sets e to a square root of x. If x is not a square, p256Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p256Sqrt(e, x *p256Element) (isSquare bool) {
+ t0, t1 := new(p256Element), new(p256Element)
+
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 7 multiplications and 253 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _1100 = _11 << 2
+ // _1111 = _11 + _1100
+ // _11110000 = _1111 << 4
+ // _11111111 = _1111 + _11110000
+ // x16 = _11111111 << 8 + _11111111
+ // x32 = x16 << 16 + x16
+ // return ((x32 << 32 + 1) << 96 + 1) << 94
+ //
+ p256Sqr(t0, x, 1)
+ p256Mul(t0, x, t0)
+ p256Sqr(t1, t0, 2)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t1, t0, 4)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t1, t0, 8)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t1, t0, 16)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t0, t0, 32)
+ p256Mul(t0, x, t0)
+ p256Sqr(t0, t0, 96)
+ p256Mul(t0, x, t0)
+ p256Sqr(t0, t0, 94)
+
+ p256Sqr(t1, t0, 1)
+ if p256Equal(t1, x) != 1 {
+ return false
+ }
+ *e = *t0
+ return true
+}
+
+// The following assembly functions are implemented in p256_asm_*.s
+
+// Montgomery multiplication. Sets res = in1 * in2 * R⁻¹ mod p.
+//
+//go:noescape
+func p256Mul(res, in1, in2 *p256Element)
+
+// Montgomery square, repeated n times (n >= 1).
+//
+//go:noescape
+func p256Sqr(res, in *p256Element, n int)
+
+// Montgomery multiplication by R⁻¹, or 1 outside the domain.
+// Sets res = in * R⁻¹, bringing res out of the Montgomery domain.
+//
+//go:noescape
+func p256FromMont(res, in *p256Element)
+
+// If cond is not 0, sets val = -val mod p.
+//
+//go:noescape
+func p256NegCond(val *p256Element, cond int)
+
+// If cond is 0, sets res = b, otherwise sets res = a.
+//
+//go:noescape
+func p256MovCond(res, a, b *P256Point, cond int)
+
+//go:noescape
+func p256BigToLittle(res *p256Element, in *[32]byte)
+
+//go:noescape
+func p256LittleToBig(res *[32]byte, in *p256Element)
+
+//go:noescape
+func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+
+//go:noescape
+func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+
+// p256Table is a table of the first 16 multiples of a point. Points are stored
+// at an index offset of -1 so [8]P is at index 7, P is at 0, and [16]P is at 15.
+// [0]P is the point at infinity and it's not stored.
+type p256Table [16]P256Point
+
+// p256Select sets res to the point at index idx in the table.
+// idx must be in [0, 15]. It executes in constant time.
+//
+//go:noescape
+func p256Select(res *P256Point, table *p256Table, idx int)
+
+// p256AffinePoint is a point in affine coordinates (x, y). x and y are still
+// Montgomery domain elements. The point can't be the point at infinity.
+type p256AffinePoint struct {
+ x, y p256Element
+}
+
+// p256AffineTable is a table of the first 32 multiples of a point. Points are
+// stored at an index offset of -1 like in p256Table, and [0]P is not stored.
+type p256AffineTable [32]p256AffinePoint
+
+// p256Precomputed is a series of precomputed multiples of G, the canonical
+// generator. The first p256AffineTable contains multiples of G. The second one
+// multiples of [2⁶]G, the third one of [2¹²]G, and so on, where each successive
+// table is the previous table doubled six times. Six is the width of the
+// sliding window used in p256ScalarMult, and having each table already
+// pre-doubled lets us avoid the doublings between windows entirely. This table
+// MUST NOT be modified, as it aliases into p256PrecomputedEmbed below.
+var p256Precomputed *[43]p256AffineTable
+
+//go:embed p256_asm_table.bin
+var p256PrecomputedEmbed string
+
+func init() {
+ p256PrecomputedPtr := (*unsafe.Pointer)(unsafe.Pointer(&p256PrecomputedEmbed))
+ if runtime.GOARCH == "s390x" {
+ var newTable [43 * 32 * 2 * 4]uint64
+ for i, x := range (*[43 * 32 * 2 * 4][8]byte)(*p256PrecomputedPtr) {
+ newTable[i] = binary.LittleEndian.Uint64(x[:])
+ }
+ newTablePtr := unsafe.Pointer(&newTable)
+ p256PrecomputedPtr = &newTablePtr
+ }
+ p256Precomputed = (*[43]p256AffineTable)(*p256PrecomputedPtr)
+}
+
+// p256SelectAffine sets res to the point at index idx in the table.
+// idx must be in [0, 31]. It executes in constant time.
+//
+//go:noescape
+func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+
+// Point addition with an affine point and constant time conditions.
+// If zero is 0, sets res = in2. If sel is 0, sets res = in1.
+// If sign is not 0, sets res = in1 + -in2. Otherwise, sets res = in1 + in2
+//
+//go:noescape
+func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+
+// Point addition. Sets res = in1 + in2. Returns one if the two input points
+// were equal and zero otherwise. If in1 or in2 are the point at infinity, res
+// and the return value are undefined.
+//
+//go:noescape
+func p256PointAddAsm(res, in1, in2 *P256Point) int
+
+// Point doubling. Sets res = in + in. in can be the point at infinity.
+//
+//go:noescape
+func p256PointDoubleAsm(res, in *P256Point)
+
+// p256OrdElement is a P-256 scalar field element in [0, ord(G)-1] in the
+// Montgomery domain (with R 2²⁵⁶) as four uint64 limbs in little-endian order.
+type p256OrdElement [4]uint64
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P256Point) Add(r1, r2 *P256Point) *P256Point {
+ var sum, double P256Point
+ r1IsInfinity := r1.isInfinity()
+ r2IsInfinity := r2.isInfinity()
+ pointsEqual := p256PointAddAsm(&sum, r1, r2)
+ p256PointDoubleAsm(&double, r1)
+ p256MovCond(&sum, &double, &sum, pointsEqual)
+ p256MovCond(&sum, r1, &sum, r2IsInfinity)
+ p256MovCond(&sum, r2, &sum, r1IsInfinity)
+ return q.Set(&sum)
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P256Point) Double(p *P256Point) *P256Point {
+ var double P256Point
+ p256PointDoubleAsm(&double, p)
+ return q.Set(&double)
+}
+
+// ScalarBaseMult sets r = scalar * generator, where scalar is a 32-byte big
+// endian value, and returns r. If scalar is not 32 bytes long, ScalarBaseMult
+// returns an error and the receiver is unchanged.
+func (r *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) {
+ if len(scalar) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ scalarReversed := new(p256OrdElement)
+ p256OrdBigToLittle(scalarReversed, (*[32]byte)(scalar))
+
+ r.p256BaseMult(scalarReversed)
+ return r, nil
+}
+
+// ScalarMult sets r = scalar * q, where scalar is a 32-byte big endian value,
+// and returns r. If scalar is not 32 bytes long, ScalarBaseMult returns an
+// error and the receiver is unchanged.
+func (r *P256Point) ScalarMult(q *P256Point, scalar []byte) (*P256Point, error) {
+ if len(scalar) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ scalarReversed := new(p256OrdElement)
+ p256OrdBigToLittle(scalarReversed, (*[32]byte)(scalar))
+
+ r.Set(q).p256ScalarMult(scalarReversed)
+ return r, nil
+}
+
+// uint64IsZero returns 1 if x is zero and zero otherwise.
+func uint64IsZero(x uint64) int {
+ x = ^x
+ x &= x >> 32
+ x &= x >> 16
+ x &= x >> 8
+ x &= x >> 4
+ x &= x >> 2
+ x &= x >> 1
+ return int(x & 1)
+}
+
+// p256Equal returns 1 if a and b are equal and 0 otherwise.
+func p256Equal(a, b *p256Element) int {
+ var acc uint64
+ for i := range a {
+ acc |= a[i] ^ b[i]
+ }
+ return uint64IsZero(acc)
+}
+
+// isInfinity returns 1 if p is the point at infinity and 0 otherwise.
+func (p *P256Point) isInfinity() int {
+ return p256Equal(&p.z, &p256Zero)
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P256Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256UncompressedLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P256Point) bytes(out *[p256UncompressedLength]byte) []byte {
+ // The proper representation of the point at infinity is a single zero byte.
+ if p.isInfinity() == 1 {
+ return append(out[:0], 0)
+ }
+
+ x, y := new(p256Element), new(p256Element)
+ p.affineFromMont(x, y)
+
+ out[0] = 4 // Uncompressed form.
+ p256LittleToBig((*[32]byte)(out[1:33]), x)
+ p256LittleToBig((*[32]byte)(out[33:65]), y)
+
+ return out[:]
+}
+
+// affineFromMont sets (x, y) to the affine coordinates of p, converted out of the
+// Montgomery domain.
+func (p *P256Point) affineFromMont(x, y *p256Element) {
+ p256Inverse(y, &p.z)
+ p256Sqr(x, y, 1)
+ p256Mul(y, y, x)
+
+ p256Mul(x, &p.x, x)
+ p256Mul(y, &p.y, y)
+
+ p256FromMont(x, x)
+ p256FromMont(y, y)
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P256Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256CompressedLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P256Point) bytesCompressed(out *[p256CompressedLength]byte) []byte {
+ if p.isInfinity() == 1 {
+ return append(out[:0], 0)
+ }
+
+ x, y := new(p256Element), new(p256Element)
+ p.affineFromMont(x, y)
+
+ out[0] = 2 | byte(y[0]&1)
+ p256LittleToBig((*[32]byte)(out[1:33]), x)
+
+ return out[:]
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P256Point) Select(p1, p2 *P256Point, cond int) *P256Point {
+ p256MovCond(q, p1, p2, cond)
+ return q
+}
+
+// p256Inverse sets out to in⁻¹ mod p. If in is zero, out will be zero.
+func p256Inverse(out, in *p256Element) {
+ // Inversion is calculated through exponentiation by p - 2, per Fermat's
+ // little theorem.
+ //
+ // The sequence of 12 multiplications and 255 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain
+ // v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x15 = x12 << 3 + _111
+ // x16 = 2*x15 + 1
+ // x32 = x16 << 16 + x16
+ // i53 = x32 << 15
+ // x47 = x15 + i53
+ // i263 = ((i53 << 17 + 1) << 143 + x47) << 47
+ // return (x47 + i263) << 2 + 1
+ //
+ var z = new(p256Element)
+ var t0 = new(p256Element)
+ var t1 = new(p256Element)
+
+ p256Sqr(z, in, 1)
+ p256Mul(z, in, z)
+ p256Sqr(z, z, 1)
+ p256Mul(z, in, z)
+ p256Sqr(t0, z, 3)
+ p256Mul(t0, z, t0)
+ p256Sqr(t1, t0, 6)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t0, t0, 3)
+ p256Mul(z, z, t0)
+ p256Sqr(t0, z, 1)
+ p256Mul(t0, in, t0)
+ p256Sqr(t1, t0, 16)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t0, t0, 15)
+ p256Mul(z, z, t0)
+ p256Sqr(t0, t0, 17)
+ p256Mul(t0, in, t0)
+ p256Sqr(t0, t0, 143)
+ p256Mul(t0, z, t0)
+ p256Sqr(t0, t0, 47)
+ p256Mul(z, z, t0)
+ p256Sqr(z, z, 2)
+ p256Mul(out, in, z)
+}
+
+func boothW5(in uint) (int, int) {
+ var s uint = ^((in >> 5) - 1)
+ var d uint = (1 << 6) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func boothW6(in uint) (int, int) {
+ var s uint = ^((in >> 6) - 1)
+ var d uint = (1 << 7) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func (p *P256Point) p256BaseMult(scalar *p256OrdElement) {
+ var t0 p256AffinePoint
+
+ wvalue := (scalar[0] << 1) & 0x7f
+ sel, sign := boothW6(uint(wvalue))
+ p256SelectAffine(&t0, &p256Precomputed[0], sel)
+ p.x, p.y, p.z = t0.x, t0.y, p256One
+ p256NegCond(&p.y, sign)
+
+ index := uint(5)
+ zero := sel
+
+ for i := 1; i < 43; i++ {
+ if index < 192 {
+ wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x7f
+ } else {
+ wvalue = (scalar[index/64] >> (index % 64)) & 0x7f
+ }
+ index += 6
+ sel, sign = boothW6(uint(wvalue))
+ p256SelectAffine(&t0, &p256Precomputed[i], sel)
+ p256PointAddAffineAsm(p, p, &t0, sign, sel, zero)
+ zero |= sel
+ }
+
+ // If the whole scalar was zero, set to the point at infinity.
+ p256MovCond(p, p, NewP256Point(), zero)
+}
+
+func (p *P256Point) p256ScalarMult(scalar *p256OrdElement) {
+ // precomp is a table of precomputed points that stores powers of p
+ // from p^1 to p^16.
+ var precomp p256Table
+ var t0, t1, t2, t3 P256Point
+
+ // Prepare the table
+ precomp[0] = *p // 1
+
+ p256PointDoubleAsm(&t0, p)
+ p256PointDoubleAsm(&t1, &t0)
+ p256PointDoubleAsm(&t2, &t1)
+ p256PointDoubleAsm(&t3, &t2)
+ precomp[1] = t0 // 2
+ precomp[3] = t1 // 4
+ precomp[7] = t2 // 8
+ precomp[15] = t3 // 16
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ p256PointAddAsm(&t2, &t2, p)
+ precomp[2] = t0 // 3
+ precomp[4] = t1 // 5
+ precomp[8] = t2 // 9
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t1, &t1)
+ precomp[5] = t0 // 6
+ precomp[9] = t1 // 10
+
+ p256PointAddAsm(&t2, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ precomp[6] = t2 // 7
+ precomp[10] = t1 // 11
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t2, &t2)
+ precomp[11] = t0 // 12
+ precomp[13] = t2 // 14
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t2, &t2, p)
+ precomp[12] = t0 // 13
+ precomp[14] = t2 // 15
+
+ // Start scanning the window from top bit
+ index := uint(254)
+ var sel, sign int
+
+ wvalue := (scalar[index/64] >> (index % 64)) & 0x3f
+ sel, _ = boothW5(uint(wvalue))
+
+ p256Select(p, &precomp, sel)
+ zero := sel
+
+ for index > 4 {
+ index -= 5
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ if index < 192 {
+ wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x3f
+ } else {
+ wvalue = (scalar[index/64] >> (index % 64)) & 0x3f
+ }
+
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, &precomp, sel)
+ p256NegCond(&t0.y, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+ zero |= sel
+ }
+
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ wvalue = (scalar[0] << 1) & 0x3f
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, &precomp, sel)
+ p256NegCond(&t0.y, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_amd64.s b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_amd64.s
new file mode 100644
index 0000000000..84e4cee903
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_amd64.s
@@ -0,0 +1,2350 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains constant-time, 64-bit assembly implementation of
+// P256. The optimizations performed here are described in detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+#include "textflag.h"
+
+#define res_ptr DI
+#define x_ptr SI
+#define y_ptr CX
+
+#define acc0 R8
+#define acc1 R9
+#define acc2 R10
+#define acc3 R11
+#define acc4 R12
+#define acc5 R13
+#define t0 R14
+#define t1 R15
+
+DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff
+DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001
+DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551
+DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x00(SB)/8, $0x0000000000000001
+DATA p256one<>+0x08(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe
+GLOBL p256const0<>(SB), 8, $8
+GLOBL p256const1<>(SB), 8, $8
+GLOBL p256ordK0<>(SB), 8, $8
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256one<>(SB), 8, $32
+
+/* ---------------------------------------*/
+// func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+TEXT ·p256OrdLittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+TEXT ·p256OrdBigToLittle(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256LittleToBig(res *[32]byte, in *p256Element)
+TEXT ·p256LittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256BigToLittle(res *p256Element, in *[32]byte)
+TEXT ·p256BigToLittle(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+
+ MOVQ (8*0)(x_ptr), acc0
+ MOVQ (8*1)(x_ptr), acc1
+ MOVQ (8*2)(x_ptr), acc2
+ MOVQ (8*3)(x_ptr), acc3
+
+ BSWAPQ acc0
+ BSWAPQ acc1
+ BSWAPQ acc2
+ BSWAPQ acc3
+
+ MOVQ acc3, (8*0)(res_ptr)
+ MOVQ acc2, (8*1)(res_ptr)
+ MOVQ acc1, (8*2)(res_ptr)
+ MOVQ acc0, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256MovCond(res, a, b *P256Point, cond int)
+TEXT ·p256MovCond(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ a+8(FP), x_ptr
+ MOVQ b+16(FP), y_ptr
+ MOVQ cond+24(FP), X12
+
+ PXOR X13, X13
+ PSHUFD $0, X12, X12
+ PCMPEQL X13, X12
+
+ MOVOU X12, X0
+ MOVOU (16*0)(x_ptr), X6
+ PANDN X6, X0
+ MOVOU X12, X1
+ MOVOU (16*1)(x_ptr), X7
+ PANDN X7, X1
+ MOVOU X12, X2
+ MOVOU (16*2)(x_ptr), X8
+ PANDN X8, X2
+ MOVOU X12, X3
+ MOVOU (16*3)(x_ptr), X9
+ PANDN X9, X3
+ MOVOU X12, X4
+ MOVOU (16*4)(x_ptr), X10
+ PANDN X10, X4
+ MOVOU X12, X5
+ MOVOU (16*5)(x_ptr), X11
+ PANDN X11, X5
+
+ MOVOU (16*0)(y_ptr), X6
+ MOVOU (16*1)(y_ptr), X7
+ MOVOU (16*2)(y_ptr), X8
+ MOVOU (16*3)(y_ptr), X9
+ MOVOU (16*4)(y_ptr), X10
+ MOVOU (16*5)(y_ptr), X11
+
+ PAND X12, X6
+ PAND X12, X7
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X6, X0
+ PXOR X7, X1
+ PXOR X8, X2
+ PXOR X9, X3
+ PXOR X10, X4
+ PXOR X11, X5
+
+ MOVOU X0, (16*0)(res_ptr)
+ MOVOU X1, (16*1)(res_ptr)
+ MOVOU X2, (16*2)(res_ptr)
+ MOVOU X3, (16*3)(res_ptr)
+ MOVOU X4, (16*4)(res_ptr)
+ MOVOU X5, (16*5)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256NegCond(val *p256Element, cond int)
+TEXT ·p256NegCond(SB),NOSPLIT,$0
+ MOVQ val+0(FP), res_ptr
+ MOVQ cond+8(FP), t0
+ // acc = poly
+ MOVQ $-1, acc0
+ MOVQ p256const0<>(SB), acc1
+ MOVQ $0, acc2
+ MOVQ p256const1<>(SB), acc3
+ // Load the original value
+ MOVQ (8*0)(res_ptr), acc5
+ MOVQ (8*1)(res_ptr), x_ptr
+ MOVQ (8*2)(res_ptr), y_ptr
+ MOVQ (8*3)(res_ptr), t1
+ // Speculatively subtract
+ SUBQ acc5, acc0
+ SBBQ x_ptr, acc1
+ SBBQ y_ptr, acc2
+ SBBQ t1, acc3
+ // If condition is 0, keep original value
+ TESTQ t0, t0
+ CMOVQEQ acc5, acc0
+ CMOVQEQ x_ptr, acc1
+ CMOVQEQ y_ptr, acc2
+ CMOVQEQ t1, acc3
+ // Store result
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Sqr(res, in *p256Element, n int)
+TEXT ·p256Sqr(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+ MOVQ n+16(FP), BX
+
+sqrLoop:
+
+ // y[1:] * y[0]
+ MOVQ (8*0)(x_ptr), t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc1
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ // y[2:] * y[1]
+ MOVQ (8*1)(x_ptr), t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, acc5
+ // y[3] * y[2]
+ MOVQ (8*2)(x_ptr), t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, y_ptr
+ XORQ t1, t1
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ acc4, acc4
+ ADCQ acc5, acc5
+ ADCQ y_ptr, y_ptr
+ ADCQ $0, t1
+ // Missing products
+ MOVQ (8*0)(x_ptr), AX
+ MULQ AX
+ MOVQ AX, acc0
+ MOVQ DX, t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc3
+ ADCQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc5
+ ADCQ AX, y_ptr
+ ADCQ DX, t1
+ MOVQ t1, x_ptr
+ // First reduction step
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc0
+ ADCQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+ // Last reduction step
+ XORQ t0, t0
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc0
+ ADCQ t1, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+ // Add bits [511:256] of the sqr result
+ ADCQ acc4, acc0
+ ADCQ acc5, acc1
+ ADCQ y_ptr, acc2
+ ADCQ x_ptr, acc3
+ ADCQ $0, t0
+
+ MOVQ acc0, acc4
+ MOVQ acc1, acc5
+ MOVQ acc2, y_ptr
+ MOVQ acc3, t1
+ // Subtract p256
+ SUBQ $-1, acc0
+ SBBQ p256const0<>(SB) ,acc1
+ SBBQ $0, acc2
+ SBBQ p256const1<>(SB), acc3
+ SBBQ $0, t0
+
+ CMOVQCS acc4, acc0
+ CMOVQCS acc5, acc1
+ CMOVQCS y_ptr, acc2
+ CMOVQCS t1, acc3
+
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+ MOVQ res_ptr, x_ptr
+ DECQ BX
+ JNE sqrLoop
+
+ RET
+/* ---------------------------------------*/
+// func p256Mul(res, in1, in2 *p256Element)
+TEXT ·p256Mul(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in1+8(FP), x_ptr
+ MOVQ in2+16(FP), y_ptr
+ // x * y[0]
+ MOVQ (8*0)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc0
+ MOVQ DX, acc1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ XORQ acc5, acc5
+ // First reduction step
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ DX, acc4
+ ADCQ $0, acc5
+ XORQ acc0, acc0
+ // x * y[1]
+ MOVQ (8*1)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ XORQ acc1, acc1
+ // x * y[2]
+ MOVQ (8*2)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc4
+ ADCQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ XORQ acc2, acc2
+ // x * y[3]
+ MOVQ (8*3)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc4
+ ADCQ t1, acc5
+ ADCQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Copy result [255:0]
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc0
+ SBBQ p256const1<>(SB), acc1
+ SBBQ $0, acc2
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256FromMont(res, in *p256Element)
+TEXT ·p256FromMont(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+
+ MOVQ (8*0)(x_ptr), acc0
+ MOVQ (8*1)(x_ptr), acc1
+ MOVQ (8*2)(x_ptr), acc2
+ MOVQ (8*3)(x_ptr), acc3
+ XORQ acc4, acc4
+
+ // Only reduce, no multiplications are needed
+ // First stage
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ DX, acc4
+ XORQ acc5, acc5
+ // Second stage
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc4
+ ADCQ DX, acc5
+ XORQ acc0, acc0
+ // Third stage
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc4
+ ADCQ AX, acc5
+ ADCQ DX, acc0
+ XORQ acc1, acc1
+ // Last stage
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc4
+ ADCQ t1, acc5
+ ADCQ AX, acc0
+ ADCQ DX, acc1
+
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB), acc5
+ SBBQ $0, acc0
+ SBBQ p256const1<>(SB), acc1
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Select(res *P256Point, table *p256Table, idx int)
+TEXT ·p256Select(SB),NOSPLIT,$0
+ MOVQ idx+16(FP),AX
+ MOVQ table+8(FP),DI
+ MOVQ res+0(FP),DX
+
+ PXOR X15, X15 // X15 = 0
+ PCMPEQL X14, X14 // X14 = -1
+ PSUBL X14, X15 // X15 = 1
+ MOVL AX, X14
+ PSHUFD $0, X14, X14
+
+ PXOR X0, X0
+ PXOR X1, X1
+ PXOR X2, X2
+ PXOR X3, X3
+ PXOR X4, X4
+ PXOR X5, X5
+ MOVQ $16, AX
+
+ MOVOU X15, X13
+
+loop_select:
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ MOVOU (16*0)(DI), X6
+ MOVOU (16*1)(DI), X7
+ MOVOU (16*2)(DI), X8
+ MOVOU (16*3)(DI), X9
+ MOVOU (16*4)(DI), X10
+ MOVOU (16*5)(DI), X11
+ ADDQ $(16*6), DI
+
+ PAND X12, X6
+ PAND X12, X7
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X6, X0
+ PXOR X7, X1
+ PXOR X8, X2
+ PXOR X9, X3
+ PXOR X10, X4
+ PXOR X11, X5
+
+ DECQ AX
+ JNE loop_select
+
+ MOVOU X0, (16*0)(DX)
+ MOVOU X1, (16*1)(DX)
+ MOVOU X2, (16*2)(DX)
+ MOVOU X3, (16*3)(DX)
+ MOVOU X4, (16*4)(DX)
+ MOVOU X5, (16*5)(DX)
+
+ RET
+/* ---------------------------------------*/
+// func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+TEXT ·p256SelectAffine(SB),NOSPLIT,$0
+ MOVQ idx+16(FP),AX
+ MOVQ table+8(FP),DI
+ MOVQ res+0(FP),DX
+
+ PXOR X15, X15 // X15 = 0
+ PCMPEQL X14, X14 // X14 = -1
+ PSUBL X14, X15 // X15 = 1
+ MOVL AX, X14
+ PSHUFD $0, X14, X14
+
+ PXOR X0, X0
+ PXOR X1, X1
+ PXOR X2, X2
+ PXOR X3, X3
+ MOVQ $16, AX
+
+ MOVOU X15, X13
+
+loop_select_base:
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ MOVOU (16*0)(DI), X4
+ MOVOU (16*1)(DI), X5
+ MOVOU (16*2)(DI), X6
+ MOVOU (16*3)(DI), X7
+
+ MOVOU (16*4)(DI), X8
+ MOVOU (16*5)(DI), X9
+ MOVOU (16*6)(DI), X10
+ MOVOU (16*7)(DI), X11
+
+ ADDQ $(16*8), DI
+
+ PAND X12, X4
+ PAND X12, X5
+ PAND X12, X6
+ PAND X12, X7
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X4, X0
+ PXOR X5, X1
+ PXOR X6, X2
+ PXOR X7, X3
+
+ PXOR X8, X0
+ PXOR X9, X1
+ PXOR X10, X2
+ PXOR X11, X3
+
+ DECQ AX
+ JNE loop_select_base
+
+ MOVOU X0, (16*0)(DX)
+ MOVOU X1, (16*1)(DX)
+ MOVOU X2, (16*2)(DX)
+ MOVOU X3, (16*3)(DX)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdMul(res, in1, in2 *p256OrdElement)
+TEXT ·p256OrdMul(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in1+8(FP), x_ptr
+ MOVQ in2+16(FP), y_ptr
+ // x * y[0]
+ MOVQ (8*0)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc0
+ MOVQ DX, acc1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ XORQ acc5, acc5
+ // First reduction step
+ MOVQ acc0, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc1
+ ADCQ $0, DX
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ DX, acc4
+ ADCQ $0, acc5
+ // x * y[1]
+ MOVQ (8*1)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // x * y[2]
+ MOVQ (8*2)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // x * y[3]
+ MOVQ (8*3)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Copy result [255:0]
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ // Subtract p256
+ SUBQ p256ord<>+0x00(SB), acc4
+ SBBQ p256ord<>+0x08(SB) ,acc5
+ SBBQ p256ord<>+0x10(SB), acc0
+ SBBQ p256ord<>+0x18(SB), acc1
+ SBBQ $0, acc2
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdSqr(res, in *p256OrdElement, n int)
+TEXT ·p256OrdSqr(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+ MOVQ n+16(FP), BX
+
+ordSqrLoop:
+
+ // y[1:] * y[0]
+ MOVQ (8*0)(x_ptr), t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc1
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ // y[2:] * y[1]
+ MOVQ (8*1)(x_ptr), t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, acc5
+ // y[3] * y[2]
+ MOVQ (8*2)(x_ptr), t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, y_ptr
+ XORQ t1, t1
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ acc4, acc4
+ ADCQ acc5, acc5
+ ADCQ y_ptr, y_ptr
+ ADCQ $0, t1
+ // Missing products
+ MOVQ (8*0)(x_ptr), AX
+ MULQ AX
+ MOVQ AX, acc0
+ MOVQ DX, t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc3
+ ADCQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc5
+ ADCQ AX, y_ptr
+ ADCQ DX, t1
+ MOVQ t1, x_ptr
+ // First reduction step
+ MOVQ acc0, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc1
+ ADCQ $0, DX
+ ADDQ AX, acc1
+
+ MOVQ t0, t1
+ ADCQ DX, acc2
+ ADCQ $0, t1
+ SUBQ t0, acc2
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc0
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc3
+ ADCQ $0, acc0
+ SUBQ AX, acc3
+ SBBQ DX, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+
+ MOVQ t0, t1
+ ADCQ DX, acc3
+ ADCQ $0, t1
+ SUBQ t0, acc3
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc1
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc0
+ ADCQ $0, acc1
+ SUBQ AX, acc0
+ SBBQ DX, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+
+ MOVQ t0, t1
+ ADCQ DX, acc0
+ ADCQ $0, t1
+ SUBQ t0, acc0
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc2
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc1
+ ADCQ $0, acc2
+ SUBQ AX, acc1
+ SBBQ DX, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ t0, t1
+ ADCQ DX, acc1
+ ADCQ $0, t1
+ SUBQ t0, acc1
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc3
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc2
+ ADCQ $0, acc3
+ SUBQ AX, acc2
+ SBBQ DX, acc3
+ XORQ t0, t0
+ // Add bits [511:256] of the sqr result
+ ADCQ acc4, acc0
+ ADCQ acc5, acc1
+ ADCQ y_ptr, acc2
+ ADCQ x_ptr, acc3
+ ADCQ $0, t0
+
+ MOVQ acc0, acc4
+ MOVQ acc1, acc5
+ MOVQ acc2, y_ptr
+ MOVQ acc3, t1
+ // Subtract p256
+ SUBQ p256ord<>+0x00(SB), acc0
+ SBBQ p256ord<>+0x08(SB) ,acc1
+ SBBQ p256ord<>+0x10(SB), acc2
+ SBBQ p256ord<>+0x18(SB), acc3
+ SBBQ $0, t0
+
+ CMOVQCS acc4, acc0
+ CMOVQCS acc5, acc1
+ CMOVQCS y_ptr, acc2
+ CMOVQCS t1, acc3
+
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+ MOVQ res_ptr, x_ptr
+ DECQ BX
+ JNE ordSqrLoop
+
+ RET
+/* ---------------------------------------*/
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+
+#undef acc0
+#undef acc1
+#undef acc2
+#undef acc3
+#undef acc4
+#undef acc5
+#undef t0
+#undef t1
+/* ---------------------------------------*/
+#define mul0 AX
+#define mul1 DX
+#define acc0 BX
+#define acc1 CX
+#define acc2 R8
+#define acc3 R9
+#define acc4 R10
+#define acc5 R11
+#define acc6 R12
+#define acc7 R13
+#define t0 R14
+#define t1 R15
+#define t2 DI
+#define t3 SI
+#define hlp BP
+/* ---------------------------------------*/
+TEXT p256SubInternal(SB),NOSPLIT,$0
+ XORQ mul0, mul0
+ SUBQ t0, acc4
+ SBBQ t1, acc5
+ SBBQ t2, acc6
+ SBBQ t3, acc7
+ SBBQ $0, mul0
+
+ MOVQ acc4, acc0
+ MOVQ acc5, acc1
+ MOVQ acc6, acc2
+ MOVQ acc7, acc3
+
+ ADDQ $-1, acc4
+ ADCQ p256const0<>(SB), acc5
+ ADCQ $0, acc6
+ ADCQ p256const1<>(SB), acc7
+ ANDQ $1, mul0
+
+ CMOVQEQ acc0, acc4
+ CMOVQEQ acc1, acc5
+ CMOVQEQ acc2, acc6
+ CMOVQEQ acc3, acc7
+
+ RET
+/* ---------------------------------------*/
+TEXT p256MulInternal(SB),NOSPLIT,$8
+ MOVQ acc4, mul0
+ MULQ t0
+ MOVQ mul0, acc0
+ MOVQ mul1, acc1
+
+ MOVQ acc4, mul0
+ MULQ t1
+ ADDQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+
+ MOVQ acc4, mul0
+ MULQ t2
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+
+ MOVQ acc4, mul0
+ MULQ t3
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc4
+
+ MOVQ acc5, mul0
+ MULQ t0
+ ADDQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t1
+ ADDQ hlp, acc2
+ ADCQ $0, mul1
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t2
+ ADDQ hlp, acc3
+ ADCQ $0, mul1
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t3
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, acc5
+
+ MOVQ acc6, mul0
+ MULQ t0
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t1
+ ADDQ hlp, acc3
+ ADCQ $0, mul1
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t2
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t3
+ ADDQ hlp, acc5
+ ADCQ $0, mul1
+ ADDQ mul0, acc5
+ ADCQ $0, mul1
+ MOVQ mul1, acc6
+
+ MOVQ acc7, mul0
+ MULQ t0
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t1
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t2
+ ADDQ hlp, acc5
+ ADCQ $0, mul1
+ ADDQ mul0, acc5
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t3
+ ADDQ hlp, acc6
+ ADCQ $0, mul1
+ ADDQ mul0, acc6
+ ADCQ $0, mul1
+ MOVQ mul1, acc7
+ // First reduction step
+ MOVQ acc0, mul0
+ MOVQ acc0, hlp
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc0, acc1
+ ADCQ hlp, acc2
+ ADCQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc0
+ // Second reduction step
+ MOVQ acc1, mul0
+ MOVQ acc1, hlp
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc1, acc2
+ ADCQ hlp, acc3
+ ADCQ mul0, acc0
+ ADCQ $0, mul1
+ MOVQ mul1, acc1
+ // Third reduction step
+ MOVQ acc2, mul0
+ MOVQ acc2, hlp
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc2, acc3
+ ADCQ hlp, acc0
+ ADCQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+ // Last reduction step
+ MOVQ acc3, mul0
+ MOVQ acc3, hlp
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc3, acc0
+ ADCQ hlp, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+ MOVQ $0, BP
+ // Add bits [511:256] of the result
+ ADCQ acc0, acc4
+ ADCQ acc1, acc5
+ ADCQ acc2, acc6
+ ADCQ acc3, acc7
+ ADCQ $0, hlp
+ // Copy result
+ MOVQ acc4, acc0
+ MOVQ acc5, acc1
+ MOVQ acc6, acc2
+ MOVQ acc7, acc3
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc6
+ SBBQ p256const1<>(SB), acc7
+ SBBQ $0, hlp
+ // If the result of the subtraction is negative, restore the previous result
+ CMOVQCS acc0, acc4
+ CMOVQCS acc1, acc5
+ CMOVQCS acc2, acc6
+ CMOVQCS acc3, acc7
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SqrInternal(SB),NOSPLIT,$8
+
+ MOVQ acc4, mul0
+ MULQ acc5
+ MOVQ mul0, acc1
+ MOVQ mul1, acc2
+
+ MOVQ acc4, mul0
+ MULQ acc6
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+
+ MOVQ acc4, mul0
+ MULQ acc7
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, t0
+
+ MOVQ acc5, mul0
+ MULQ acc6
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ acc7
+ ADDQ hlp, t0
+ ADCQ $0, mul1
+ ADDQ mul0, t0
+ ADCQ $0, mul1
+ MOVQ mul1, t1
+
+ MOVQ acc6, mul0
+ MULQ acc7
+ ADDQ mul0, t1
+ ADCQ $0, mul1
+ MOVQ mul1, t2
+ XORQ t3, t3
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ t0, t0
+ ADCQ t1, t1
+ ADCQ t2, t2
+ ADCQ $0, t3
+ // Missing products
+ MOVQ acc4, mul0
+ MULQ mul0
+ MOVQ mul0, acc0
+ MOVQ DX, acc4
+
+ MOVQ acc5, mul0
+ MULQ mul0
+ ADDQ acc4, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc4
+
+ MOVQ acc6, mul0
+ MULQ mul0
+ ADDQ acc4, acc3
+ ADCQ mul0, t0
+ ADCQ $0, DX
+ MOVQ DX, acc4
+
+ MOVQ acc7, mul0
+ MULQ mul0
+ ADDQ acc4, t1
+ ADCQ mul0, t2
+ ADCQ DX, t3
+ // First reduction step
+ MOVQ acc0, mul0
+ MOVQ acc0, hlp
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc0, acc1
+ ADCQ hlp, acc2
+ ADCQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc0
+ // Second reduction step
+ MOVQ acc1, mul0
+ MOVQ acc1, hlp
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc1, acc2
+ ADCQ hlp, acc3
+ ADCQ mul0, acc0
+ ADCQ $0, mul1
+ MOVQ mul1, acc1
+ // Third reduction step
+ MOVQ acc2, mul0
+ MOVQ acc2, hlp
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc2, acc3
+ ADCQ hlp, acc0
+ ADCQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+ // Last reduction step
+ MOVQ acc3, mul0
+ MOVQ acc3, hlp
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc3, acc0
+ ADCQ hlp, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+ MOVQ $0, BP
+ // Add bits [511:256] of the result
+ ADCQ acc0, t0
+ ADCQ acc1, t1
+ ADCQ acc2, t2
+ ADCQ acc3, t3
+ ADCQ $0, hlp
+ // Copy result
+ MOVQ t0, acc4
+ MOVQ t1, acc5
+ MOVQ t2, acc6
+ MOVQ t3, acc7
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc6
+ SBBQ p256const1<>(SB), acc7
+ SBBQ $0, hlp
+ // If the result of the subtraction is negative, restore the previous result
+ CMOVQCS t0, acc4
+ CMOVQCS t1, acc5
+ CMOVQCS t2, acc6
+ CMOVQCS t3, acc7
+
+ RET
+/* ---------------------------------------*/
+#define p256MulBy2Inline\
+ XORQ mul0, mul0;\
+ ADDQ acc4, acc4;\
+ ADCQ acc5, acc5;\
+ ADCQ acc6, acc6;\
+ ADCQ acc7, acc7;\
+ ADCQ $0, mul0;\
+ MOVQ acc4, t0;\
+ MOVQ acc5, t1;\
+ MOVQ acc6, t2;\
+ MOVQ acc7, t3;\
+ SUBQ $-1, t0;\
+ SBBQ p256const0<>(SB), t1;\
+ SBBQ $0, t2;\
+ SBBQ p256const1<>(SB), t3;\
+ SBBQ $0, mul0;\
+ CMOVQCS acc4, t0;\
+ CMOVQCS acc5, t1;\
+ CMOVQCS acc6, t2;\
+ CMOVQCS acc7, t3;
+/* ---------------------------------------*/
+#define p256AddInline \
+ XORQ mul0, mul0;\
+ ADDQ t0, acc4;\
+ ADCQ t1, acc5;\
+ ADCQ t2, acc6;\
+ ADCQ t3, acc7;\
+ ADCQ $0, mul0;\
+ MOVQ acc4, t0;\
+ MOVQ acc5, t1;\
+ MOVQ acc6, t2;\
+ MOVQ acc7, t3;\
+ SUBQ $-1, t0;\
+ SBBQ p256const0<>(SB), t1;\
+ SBBQ $0, t2;\
+ SBBQ p256const1<>(SB), t3;\
+ SBBQ $0, mul0;\
+ CMOVQCS acc4, t0;\
+ CMOVQCS acc5, t1;\
+ CMOVQCS acc6, t2;\
+ CMOVQCS acc7, t3;
+/* ---------------------------------------*/
+#define LDacc(src) MOVQ src(8*0), acc4; MOVQ src(8*1), acc5; MOVQ src(8*2), acc6; MOVQ src(8*3), acc7
+#define LDt(src) MOVQ src(8*0), t0; MOVQ src(8*1), t1; MOVQ src(8*2), t2; MOVQ src(8*3), t3
+#define ST(dst) MOVQ acc4, dst(8*0); MOVQ acc5, dst(8*1); MOVQ acc6, dst(8*2); MOVQ acc7, dst(8*3)
+#define STt(dst) MOVQ t0, dst(8*0); MOVQ t1, dst(8*1); MOVQ t2, dst(8*2); MOVQ t3, dst(8*3)
+#define acc2t MOVQ acc4, t0; MOVQ acc5, t1; MOVQ acc6, t2; MOVQ acc7, t3
+#define t2acc MOVQ t0, acc4; MOVQ t1, acc5; MOVQ t2, acc6; MOVQ t3, acc7
+/* ---------------------------------------*/
+#define x1in(off) (32*0 + off)(SP)
+#define y1in(off) (32*1 + off)(SP)
+#define z1in(off) (32*2 + off)(SP)
+#define x2in(off) (32*3 + off)(SP)
+#define y2in(off) (32*4 + off)(SP)
+#define xout(off) (32*5 + off)(SP)
+#define yout(off) (32*6 + off)(SP)
+#define zout(off) (32*7 + off)(SP)
+#define s2(off) (32*8 + off)(SP)
+#define z1sqr(off) (32*9 + off)(SP)
+#define h(off) (32*10 + off)(SP)
+#define r(off) (32*11 + off)(SP)
+#define hsqr(off) (32*12 + off)(SP)
+#define rsqr(off) (32*13 + off)(SP)
+#define hcub(off) (32*14 + off)(SP)
+#define rptr (32*15)(SP)
+#define sel_save (32*15 + 8)(SP)
+#define zero_save (32*15 + 8 + 4)(SP)
+
+// func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+TEXT ·p256PointAddAffineAsm(SB),0,$512-48
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in1+8(FP), BX
+ MOVQ in2+16(FP), CX
+ MOVQ sign+24(FP), DX
+ MOVQ sel+32(FP), t1
+ MOVQ zero+40(FP), t2
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x1in(16*0)
+ MOVOU X1, x1in(16*1)
+ MOVOU X2, y1in(16*0)
+ MOVOU X3, y1in(16*1)
+ MOVOU X4, z1in(16*0)
+ MOVOU X5, z1in(16*1)
+
+ MOVOU (16*0)(CX), X0
+ MOVOU (16*1)(CX), X1
+
+ MOVOU X0, x2in(16*0)
+ MOVOU X1, x2in(16*1)
+ // Store pointer to result
+ MOVQ mul0, rptr
+ MOVL t1, sel_save
+ MOVL t2, zero_save
+ // Negate y2in based on sign
+ MOVQ (16*2 + 8*0)(CX), acc4
+ MOVQ (16*2 + 8*1)(CX), acc5
+ MOVQ (16*2 + 8*2)(CX), acc6
+ MOVQ (16*2 + 8*3)(CX), acc7
+ MOVQ $-1, acc0
+ MOVQ p256const0<>(SB), acc1
+ MOVQ $0, acc2
+ MOVQ p256const1<>(SB), acc3
+ XORQ mul0, mul0
+ // Speculatively subtract
+ SUBQ acc4, acc0
+ SBBQ acc5, acc1
+ SBBQ acc6, acc2
+ SBBQ acc7, acc3
+ SBBQ $0, mul0
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ MOVQ acc2, t2
+ MOVQ acc3, t3
+ // Add in case the operand was > p256
+ ADDQ $-1, acc0
+ ADCQ p256const0<>(SB), acc1
+ ADCQ $0, acc2
+ ADCQ p256const1<>(SB), acc3
+ ADCQ $0, mul0
+ CMOVQNE t0, acc0
+ CMOVQNE t1, acc1
+ CMOVQNE t2, acc2
+ CMOVQNE t3, acc3
+ // If condition is 0, keep original value
+ TESTQ DX, DX
+ CMOVQEQ acc4, acc0
+ CMOVQEQ acc5, acc1
+ CMOVQEQ acc6, acc2
+ CMOVQEQ acc7, acc3
+ // Store result
+ MOVQ acc0, y2in(8*0)
+ MOVQ acc1, y2in(8*1)
+ MOVQ acc2, y2in(8*2)
+ MOVQ acc3, y2in(8*3)
+ // Begin point add
+ LDacc (z1in)
+ CALL p256SqrInternal(SB) // z1ˆ2
+ ST (z1sqr)
+
+ LDt (x2in)
+ CALL p256MulInternal(SB) // x2 * z1ˆ2
+
+ LDt (x1in)
+ CALL p256SubInternal(SB) // h = u2 - u1
+ ST (h)
+
+ LDt (z1in)
+ CALL p256MulInternal(SB) // z3 = h * z1
+ ST (zout)
+
+ LDacc (z1sqr)
+ CALL p256MulInternal(SB) // z1ˆ3
+
+ LDt (y2in)
+ CALL p256MulInternal(SB) // s2 = y2 * z1ˆ3
+ ST (s2)
+
+ LDt (y1in)
+ CALL p256SubInternal(SB) // r = s2 - s1
+ ST (r)
+
+ CALL p256SqrInternal(SB) // rsqr = rˆ2
+ ST (rsqr)
+
+ LDacc (h)
+ CALL p256SqrInternal(SB) // hsqr = hˆ2
+ ST (hsqr)
+
+ LDt (h)
+ CALL p256MulInternal(SB) // hcub = hˆ3
+ ST (hcub)
+
+ LDt (y1in)
+ CALL p256MulInternal(SB) // y1 * hˆ3
+ ST (s2)
+
+ LDacc (x1in)
+ LDt (hsqr)
+ CALL p256MulInternal(SB) // u1 * hˆ2
+ ST (h)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDacc (rsqr)
+ CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ LDt (hcub)
+ CALL p256SubInternal(SB)
+ ST (xout)
+
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+ LDacc (h)
+ CALL p256SubInternal(SB)
+
+ LDt (r)
+ CALL p256MulInternal(SB)
+
+ LDt (s2)
+ CALL p256SubInternal(SB)
+ ST (yout)
+ // Load stored values from stack
+ MOVQ rptr, AX
+ MOVL sel_save, BX
+ MOVL zero_save, CX
+ // The result is not valid if (sel == 0), conditional choose
+ MOVOU xout(16*0), X0
+ MOVOU xout(16*1), X1
+ MOVOU yout(16*0), X2
+ MOVOU yout(16*1), X3
+ MOVOU zout(16*0), X4
+ MOVOU zout(16*1), X5
+
+ MOVL BX, X6
+ MOVL CX, X7
+
+ PXOR X8, X8
+ PCMPEQL X9, X9
+
+ PSHUFD $0, X6, X6
+ PSHUFD $0, X7, X7
+
+ PCMPEQL X8, X6
+ PCMPEQL X8, X7
+
+ MOVOU X6, X15
+ PANDN X9, X15
+
+ MOVOU x1in(16*0), X9
+ MOVOU x1in(16*1), X10
+ MOVOU y1in(16*0), X11
+ MOVOU y1in(16*1), X12
+ MOVOU z1in(16*0), X13
+ MOVOU z1in(16*1), X14
+
+ PAND X15, X0
+ PAND X15, X1
+ PAND X15, X2
+ PAND X15, X3
+ PAND X15, X4
+ PAND X15, X5
+
+ PAND X6, X9
+ PAND X6, X10
+ PAND X6, X11
+ PAND X6, X12
+ PAND X6, X13
+ PAND X6, X14
+
+ PXOR X9, X0
+ PXOR X10, X1
+ PXOR X11, X2
+ PXOR X12, X3
+ PXOR X13, X4
+ PXOR X14, X5
+ // Similarly if zero == 0
+ PCMPEQL X9, X9
+ MOVOU X7, X15
+ PANDN X9, X15
+
+ MOVOU x2in(16*0), X9
+ MOVOU x2in(16*1), X10
+ MOVOU y2in(16*0), X11
+ MOVOU y2in(16*1), X12
+ MOVOU p256one<>+0x00(SB), X13
+ MOVOU p256one<>+0x10(SB), X14
+
+ PAND X15, X0
+ PAND X15, X1
+ PAND X15, X2
+ PAND X15, X3
+ PAND X15, X4
+ PAND X15, X5
+
+ PAND X7, X9
+ PAND X7, X10
+ PAND X7, X11
+ PAND X7, X12
+ PAND X7, X13
+ PAND X7, X14
+
+ PXOR X9, X0
+ PXOR X10, X1
+ PXOR X11, X2
+ PXOR X12, X3
+ PXOR X13, X4
+ PXOR X14, X5
+ // Finally output the result
+ MOVOU X0, (16*0)(AX)
+ MOVOU X1, (16*1)(AX)
+ MOVOU X2, (16*2)(AX)
+ MOVOU X3, (16*3)(AX)
+ MOVOU X4, (16*4)(AX)
+ MOVOU X5, (16*5)(AX)
+ MOVQ $0, rptr
+
+ RET
+#undef x1in
+#undef y1in
+#undef z1in
+#undef x2in
+#undef y2in
+#undef xout
+#undef yout
+#undef zout
+#undef s2
+#undef z1sqr
+#undef h
+#undef r
+#undef hsqr
+#undef rsqr
+#undef hcub
+#undef rptr
+#undef sel_save
+#undef zero_save
+
+// p256IsZero returns 1 in AX if [acc4..acc7] represents zero and zero
+// otherwise. It writes to [acc4..acc7], t0 and t1.
+TEXT p256IsZero(SB),NOSPLIT,$0
+ // AX contains a flag that is set if the input is zero.
+ XORQ AX, AX
+ MOVQ $1, t1
+
+ // Check whether [acc4..acc7] are all zero.
+ MOVQ acc4, t0
+ ORQ acc5, t0
+ ORQ acc6, t0
+ ORQ acc7, t0
+
+ // Set the zero flag if so. (CMOV of a constant to a register doesn't
+ // appear to be supported in Go. Thus t1 = 1.)
+ CMOVQEQ t1, AX
+
+ // XOR [acc4..acc7] with P and compare with zero again.
+ XORQ $-1, acc4
+ XORQ p256const0<>(SB), acc5
+ XORQ p256const1<>(SB), acc7
+ ORQ acc5, acc4
+ ORQ acc6, acc4
+ ORQ acc7, acc4
+
+ // Set the zero flag if so.
+ CMOVQEQ t1, AX
+ RET
+
+/* ---------------------------------------*/
+#define x1in(off) (32*0 + off)(SP)
+#define y1in(off) (32*1 + off)(SP)
+#define z1in(off) (32*2 + off)(SP)
+#define x2in(off) (32*3 + off)(SP)
+#define y2in(off) (32*4 + off)(SP)
+#define z2in(off) (32*5 + off)(SP)
+
+#define xout(off) (32*6 + off)(SP)
+#define yout(off) (32*7 + off)(SP)
+#define zout(off) (32*8 + off)(SP)
+
+#define u1(off) (32*9 + off)(SP)
+#define u2(off) (32*10 + off)(SP)
+#define s1(off) (32*11 + off)(SP)
+#define s2(off) (32*12 + off)(SP)
+#define z1sqr(off) (32*13 + off)(SP)
+#define z2sqr(off) (32*14 + off)(SP)
+#define h(off) (32*15 + off)(SP)
+#define r(off) (32*16 + off)(SP)
+#define hsqr(off) (32*17 + off)(SP)
+#define rsqr(off) (32*18 + off)(SP)
+#define hcub(off) (32*19 + off)(SP)
+#define rptr (32*20)(SP)
+#define points_eq (32*20+8)(SP)
+
+//func p256PointAddAsm(res, in1, in2 *P256Point) int
+TEXT ·p256PointAddAsm(SB),0,$680-32
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in1+8(FP), BX
+ MOVQ in2+16(FP), CX
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x1in(16*0)
+ MOVOU X1, x1in(16*1)
+ MOVOU X2, y1in(16*0)
+ MOVOU X3, y1in(16*1)
+ MOVOU X4, z1in(16*0)
+ MOVOU X5, z1in(16*1)
+
+ MOVOU (16*0)(CX), X0
+ MOVOU (16*1)(CX), X1
+ MOVOU (16*2)(CX), X2
+ MOVOU (16*3)(CX), X3
+ MOVOU (16*4)(CX), X4
+ MOVOU (16*5)(CX), X5
+
+ MOVOU X0, x2in(16*0)
+ MOVOU X1, x2in(16*1)
+ MOVOU X2, y2in(16*0)
+ MOVOU X3, y2in(16*1)
+ MOVOU X4, z2in(16*0)
+ MOVOU X5, z2in(16*1)
+ // Store pointer to result
+ MOVQ AX, rptr
+ // Begin point add
+ LDacc (z2in)
+ CALL p256SqrInternal(SB) // z2ˆ2
+ ST (z2sqr)
+ LDt (z2in)
+ CALL p256MulInternal(SB) // z2ˆ3
+ LDt (y1in)
+ CALL p256MulInternal(SB) // s1 = z2ˆ3*y1
+ ST (s1)
+
+ LDacc (z1in)
+ CALL p256SqrInternal(SB) // z1ˆ2
+ ST (z1sqr)
+ LDt (z1in)
+ CALL p256MulInternal(SB) // z1ˆ3
+ LDt (y2in)
+ CALL p256MulInternal(SB) // s2 = z1ˆ3*y2
+ ST (s2)
+
+ LDt (s1)
+ CALL p256SubInternal(SB) // r = s2 - s1
+ ST (r)
+ CALL p256IsZero(SB)
+ MOVQ AX, points_eq
+
+ LDacc (z2sqr)
+ LDt (x1in)
+ CALL p256MulInternal(SB) // u1 = x1 * z2ˆ2
+ ST (u1)
+ LDacc (z1sqr)
+ LDt (x2in)
+ CALL p256MulInternal(SB) // u2 = x2 * z1ˆ2
+ ST (u2)
+
+ LDt (u1)
+ CALL p256SubInternal(SB) // h = u2 - u1
+ ST (h)
+ CALL p256IsZero(SB)
+ ANDQ points_eq, AX
+ MOVQ AX, points_eq
+
+ LDacc (r)
+ CALL p256SqrInternal(SB) // rsqr = rˆ2
+ ST (rsqr)
+
+ LDacc (h)
+ CALL p256SqrInternal(SB) // hsqr = hˆ2
+ ST (hsqr)
+
+ LDt (h)
+ CALL p256MulInternal(SB) // hcub = hˆ3
+ ST (hcub)
+
+ LDt (s1)
+ CALL p256MulInternal(SB)
+ ST (s2)
+
+ LDacc (z1in)
+ LDt (z2in)
+ CALL p256MulInternal(SB) // z1 * z2
+ LDt (h)
+ CALL p256MulInternal(SB) // z1 * z2 * h
+ ST (zout)
+
+ LDacc (hsqr)
+ LDt (u1)
+ CALL p256MulInternal(SB) // hˆ2 * u1
+ ST (u2)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDacc (rsqr)
+ CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ LDt (hcub)
+ CALL p256SubInternal(SB)
+ ST (xout)
+
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+ LDacc (u2)
+ CALL p256SubInternal(SB)
+
+ LDt (r)
+ CALL p256MulInternal(SB)
+
+ LDt (s2)
+ CALL p256SubInternal(SB)
+ ST (yout)
+
+ MOVOU xout(16*0), X0
+ MOVOU xout(16*1), X1
+ MOVOU yout(16*0), X2
+ MOVOU yout(16*1), X3
+ MOVOU zout(16*0), X4
+ MOVOU zout(16*1), X5
+ // Finally output the result
+ MOVQ rptr, AX
+ MOVQ $0, rptr
+ MOVOU X0, (16*0)(AX)
+ MOVOU X1, (16*1)(AX)
+ MOVOU X2, (16*2)(AX)
+ MOVOU X3, (16*3)(AX)
+ MOVOU X4, (16*4)(AX)
+ MOVOU X5, (16*5)(AX)
+
+ MOVQ points_eq, AX
+ MOVQ AX, ret+24(FP)
+
+ RET
+#undef x1in
+#undef y1in
+#undef z1in
+#undef x2in
+#undef y2in
+#undef z2in
+#undef xout
+#undef yout
+#undef zout
+#undef s1
+#undef s2
+#undef u1
+#undef u2
+#undef z1sqr
+#undef z2sqr
+#undef h
+#undef r
+#undef hsqr
+#undef rsqr
+#undef hcub
+#undef rptr
+/* ---------------------------------------*/
+#define x(off) (32*0 + off)(SP)
+#define y(off) (32*1 + off)(SP)
+#define z(off) (32*2 + off)(SP)
+
+#define s(off) (32*3 + off)(SP)
+#define m(off) (32*4 + off)(SP)
+#define zsqr(off) (32*5 + off)(SP)
+#define tmp(off) (32*6 + off)(SP)
+#define rptr (32*7)(SP)
+
+//func p256PointDoubleAsm(res, in *P256Point)
+TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$256-16
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in+8(FP), BX
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x(16*0)
+ MOVOU X1, x(16*1)
+ MOVOU X2, y(16*0)
+ MOVOU X3, y(16*1)
+ MOVOU X4, z(16*0)
+ MOVOU X5, z(16*1)
+ // Store pointer to result
+ MOVQ AX, rptr
+ // Begin point double
+ LDacc (z)
+ CALL p256SqrInternal(SB)
+ ST (zsqr)
+
+ LDt (x)
+ p256AddInline
+ STt (m)
+
+ LDacc (z)
+ LDt (y)
+ CALL p256MulInternal(SB)
+ p256MulBy2Inline
+ MOVQ rptr, AX
+ // Store z
+ MOVQ t0, (16*4 + 8*0)(AX)
+ MOVQ t1, (16*4 + 8*1)(AX)
+ MOVQ t2, (16*4 + 8*2)(AX)
+ MOVQ t3, (16*4 + 8*3)(AX)
+
+ LDacc (x)
+ LDt (zsqr)
+ CALL p256SubInternal(SB)
+ LDt (m)
+ CALL p256MulInternal(SB)
+ ST (m)
+ // Multiply by 3
+ p256MulBy2Inline
+ LDacc (m)
+ p256AddInline
+ STt (m)
+ ////////////////////////
+ LDacc (y)
+ p256MulBy2Inline
+ t2acc
+ CALL p256SqrInternal(SB)
+ ST (s)
+ CALL p256SqrInternal(SB)
+ // Divide by 2
+ XORQ mul0, mul0
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+
+ ADDQ $-1, acc4
+ ADCQ p256const0<>(SB), acc5
+ ADCQ $0, acc6
+ ADCQ p256const1<>(SB), acc7
+ ADCQ $0, mul0
+ TESTQ $1, t0
+
+ CMOVQEQ t0, acc4
+ CMOVQEQ t1, acc5
+ CMOVQEQ t2, acc6
+ CMOVQEQ t3, acc7
+ ANDQ t0, mul0
+
+ SHRQ $1, acc5, acc4
+ SHRQ $1, acc6, acc5
+ SHRQ $1, acc7, acc6
+ SHRQ $1, mul0, acc7
+ ST (y)
+ /////////////////////////
+ LDacc (x)
+ LDt (s)
+ CALL p256MulInternal(SB)
+ ST (s)
+ p256MulBy2Inline
+ STt (tmp)
+
+ LDacc (m)
+ CALL p256SqrInternal(SB)
+ LDt (tmp)
+ CALL p256SubInternal(SB)
+
+ MOVQ rptr, AX
+ // Store x
+ MOVQ acc4, (16*0 + 8*0)(AX)
+ MOVQ acc5, (16*0 + 8*1)(AX)
+ MOVQ acc6, (16*0 + 8*2)(AX)
+ MOVQ acc7, (16*0 + 8*3)(AX)
+
+ acc2t
+ LDacc (s)
+ CALL p256SubInternal(SB)
+
+ LDt (m)
+ CALL p256MulInternal(SB)
+
+ LDt (y)
+ CALL p256SubInternal(SB)
+ MOVQ rptr, AX
+ // Store y
+ MOVQ acc4, (16*2 + 8*0)(AX)
+ MOVQ acc5, (16*2 + 8*1)(AX)
+ MOVQ acc6, (16*2 + 8*2)(AX)
+ MOVQ acc7, (16*2 + 8*3)(AX)
+ ///////////////////////
+ MOVQ $0, rptr
+
+ RET
+/* ---------------------------------------*/
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_ordinv.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_ordinv.go
new file mode 100644
index 0000000000..86a7a230bd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_ordinv.go
@@ -0,0 +1,101 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package nistec
+
+import "errors"
+
+// Montgomery multiplication modulo org(G). Sets res = in1 * in2 * R⁻¹.
+//
+//go:noescape
+func p256OrdMul(res, in1, in2 *p256OrdElement)
+
+// Montgomery square modulo org(G), repeated n times (n >= 1).
+//
+//go:noescape
+func p256OrdSqr(res, in *p256OrdElement, n int)
+
+func P256OrdInverse(k []byte) ([]byte, error) {
+ if len(k) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+
+ x := new(p256OrdElement)
+ p256OrdBigToLittle(x, (*[32]byte)(k))
+
+ // Inversion is implemented as exponentiation by n - 2, per Fermat's little theorem.
+ //
+ // The sequence of 38 multiplications and 254 squarings is derived from
+ // https://briansmith.org/ecc-inversion-addition-chains-01#p256_scalar_inversion
+ _1 := new(p256OrdElement)
+ _11 := new(p256OrdElement)
+ _101 := new(p256OrdElement)
+ _111 := new(p256OrdElement)
+ _1111 := new(p256OrdElement)
+ _10101 := new(p256OrdElement)
+ _101111 := new(p256OrdElement)
+ t := new(p256OrdElement)
+
+ // This code operates in the Montgomery domain where R = 2²⁵⁶ mod n and n is
+ // the order of the scalar field. Elements in the Montgomery domain take the
+ // form a×R and p256OrdMul calculates (a × b × R⁻¹) mod n. RR is R in the
+ // domain, or R×R mod n, thus p256OrdMul(x, RR) gives x×R, i.e. converts x
+ // into the Montgomery domain.
+ RR := &p256OrdElement{0x83244c95be79eea2, 0x4699799c49bd6fa6,
+ 0x2845b2392b6bec59, 0x66e12d94f3d95620}
+
+ p256OrdMul(_1, x, RR) // _1
+ p256OrdSqr(x, _1, 1) // _10
+ p256OrdMul(_11, x, _1) // _11
+ p256OrdMul(_101, x, _11) // _101
+ p256OrdMul(_111, x, _101) // _111
+ p256OrdSqr(x, _101, 1) // _1010
+ p256OrdMul(_1111, _101, x) // _1111
+
+ p256OrdSqr(t, x, 1) // _10100
+ p256OrdMul(_10101, t, _1) // _10101
+ p256OrdSqr(x, _10101, 1) // _101010
+ p256OrdMul(_101111, _101, x) // _101111
+ p256OrdMul(x, _10101, x) // _111111 = x6
+ p256OrdSqr(t, x, 2) // _11111100
+ p256OrdMul(t, t, _11) // _11111111 = x8
+ p256OrdSqr(x, t, 8) // _ff00
+ p256OrdMul(x, x, t) // _ffff = x16
+ p256OrdSqr(t, x, 16) // _ffff0000
+ p256OrdMul(t, t, x) // _ffffffff = x32
+
+ p256OrdSqr(x, t, 64)
+ p256OrdMul(x, x, t)
+ p256OrdSqr(x, x, 32)
+ p256OrdMul(x, x, t)
+
+ sqrs := []int{
+ 6, 5, 4, 5, 5,
+ 4, 3, 3, 5, 9,
+ 6, 2, 5, 6, 5,
+ 4, 5, 5, 3, 10,
+ 2, 5, 5, 3, 7, 6}
+ muls := []*p256OrdElement{
+ _101111, _111, _11, _1111, _10101,
+ _101, _101, _101, _111, _101111,
+ _1111, _1, _1, _1111, _111,
+ _111, _111, _101, _11, _101111,
+ _11, _11, _11, _1, _10101, _1111}
+
+ for i, s := range sqrs {
+ p256OrdSqr(x, x, s)
+ p256OrdMul(x, x, muls[i])
+ }
+
+ // Montgomery multiplication by R⁻¹, or 1 outside the domain as R⁻¹×R = 1,
+ // converts a Montgomery value out of the domain.
+ one := &p256OrdElement{1}
+ p256OrdMul(x, x, one)
+
+ var xOut [32]byte
+ p256OrdLittleToBig(&xOut, x)
+ return xOut[:], nil
+}
diff --git a/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_table.bin b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_table.bin
index 20c527e4e0..20c527e4e0 100644
--- a/contrib/go/_std_1.18/src/crypto/elliptic/p256_asm_table.bin
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p256_asm_table.bin
Binary files differ
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p384.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/p384.go
new file mode 100644
index 0000000000..1a855cb713
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p384.go
@@ -0,0 +1,515 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+var p384B, _ = new(fiat.P384Element).SetBytes([]byte{0xb3, 0x31, 0x2f, 0xa7, 0xe2, 0x3e, 0xe7, 0xe4, 0x98, 0x8e, 0x5, 0x6b, 0xe3, 0xf8, 0x2d, 0x19, 0x18, 0x1d, 0x9c, 0x6e, 0xfe, 0x81, 0x41, 0x12, 0x3, 0x14, 0x8, 0x8f, 0x50, 0x13, 0x87, 0x5a, 0xc6, 0x56, 0x39, 0x8d, 0x8a, 0x2e, 0xd1, 0x9d, 0x2a, 0x85, 0xc8, 0xed, 0xd3, 0xec, 0x2a, 0xef})
+
+var p384G, _ = NewP384Point().SetBytes([]byte{0x4, 0xaa, 0x87, 0xca, 0x22, 0xbe, 0x8b, 0x5, 0x37, 0x8e, 0xb1, 0xc7, 0x1e, 0xf3, 0x20, 0xad, 0x74, 0x6e, 0x1d, 0x3b, 0x62, 0x8b, 0xa7, 0x9b, 0x98, 0x59, 0xf7, 0x41, 0xe0, 0x82, 0x54, 0x2a, 0x38, 0x55, 0x2, 0xf2, 0x5d, 0xbf, 0x55, 0x29, 0x6c, 0x3a, 0x54, 0x5e, 0x38, 0x72, 0x76, 0xa, 0xb7, 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf, 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c, 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0xa, 0x60, 0xb1, 0xce, 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0xe, 0x5f})
+
+// p384ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p384ElementLength = 48
+
+// P384Point is a P384 point. The zero value is NOT valid.
+type P384Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P384Element
+}
+
+// NewP384Point returns a new P384Point representing the point at infinity point.
+func NewP384Point() *P384Point {
+ return &P384Point{
+ x: new(fiat.P384Element),
+ y: new(fiat.P384Element).One(),
+ z: new(fiat.P384Element),
+ }
+}
+
+// NewP384Generator returns a new P384Point set to the canonical generator.
+func NewP384Generator() *P384Point {
+ return (&P384Point{
+ x: new(fiat.P384Element),
+ y: new(fiat.P384Element),
+ z: new(fiat.P384Element),
+ }).Set(p384G)
+}
+
+// Set sets p = q and returns p.
+func (p *P384Point) Set(q *P384Point) *P384Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P384Point) SetBytes(b []byte) (*P384Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP384Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p384ElementLength && b[0] == 4:
+ x, err := new(fiat.P384Element).SetBytes(b[1 : 1+p384ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P384Element).SetBytes(b[1+p384ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p384CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p384ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P384Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p384Polynomial(new(fiat.P384Element), x)
+ if !p384Sqrt(y, y) {
+ return nil, errors.New("invalid P384 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P384Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p384ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P384 point encoding")
+ }
+}
+
+// p384Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p384Polynomial(y2, x *fiat.P384Element) *fiat.P384Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P384Element).Add(x, x)
+ threeX.Add(threeX, x)
+
+ y2.Sub(y2, threeX)
+ return y2.Add(y2, p384B)
+}
+
+func p384CheckOnCurve(x, y *fiat.P384Element) error {
+ // y² = x³ - 3x + b
+ rhs := p384Polynomial(new(fiat.P384Element), x)
+ lhs := new(fiat.P384Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P384 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P384Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p384ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P384Point) bytes(out *[1 + 2*p384ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P384Element).Invert(p.z)
+ x := new(fiat.P384Element).Mul(p.x, zinv)
+ y := new(fiat.P384Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P384Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p384ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P384Point) bytesCompressed(out *[1 + p384ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P384Element).Invert(p.z)
+ x := new(fiat.P384Element).Mul(p.x, zinv)
+ y := new(fiat.P384Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p384ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P384Point) Add(p1, p2 *P384Point) *P384Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P384Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P384Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P384Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P384Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P384Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P384Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P384Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P384Element).Mul(p384B, t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p384B, y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P384Point) Double(p *P384Point) *P384Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P384Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P384Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P384Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P384Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P384Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P384Element).Mul(p384B, t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P384Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p384B, z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P384Point) Select(p1, p2 *P384Point, cond int) *P384Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p384Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p384Table [15]*P384Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p384Table) Select(p *P384Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p384Table called with out-of-bounds value")
+ }
+ p.Set(NewP384Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P384Point) ScalarMult(q *P384Point, scalar []byte) (*P384Point, error) {
+ // Compute a p384Table for the base point q. The explicit NewP384Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p384Table{NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP384Point()
+ p.Set(NewP384Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p384GeneratorTable *[p384ElementLength * 2]p384Table
+var p384GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p384Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P384Point) generatorTable() *[p384ElementLength * 2]p384Table {
+ p384GeneratorTableOnce.Do(func() {
+ p384GeneratorTable = new([p384ElementLength * 2]p384Table)
+ base := NewP384Generator()
+ for i := 0; i < p384ElementLength*2; i++ {
+ p384GeneratorTable[i][0] = NewP384Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p384GeneratorTable[i][j] = NewP384Point().Add(p384GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p384GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P384Point) ScalarBaseMult(scalar []byte) (*P384Point, error) {
+ if len(scalar) != p384ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP384Point()
+ p.Set(NewP384Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p384Sqrt sets e to a square root of x. If x is not a square, p384Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p384Sqrt(e, x *fiat.P384Element) (isSquare bool) {
+ candidate := new(fiat.P384Element)
+ p384SqrtCandidate(candidate, x)
+ square := new(fiat.P384Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
+
+// p384SqrtCandidate sets z to a square root candidate for x. z and x must not overlap.
+func p384SqrtCandidate(z, x *fiat.P384Element) {
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 14 multiplications and 381 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // _1111110 = 2*_111111
+ // _1111111 = 1 + _1111110
+ // x12 = _1111110 << 5 + _111111
+ // x24 = x12 << 12 + x12
+ // x31 = x24 << 7 + _1111111
+ // x32 = 2*x31 + 1
+ // x63 = x32 << 31 + x31
+ // x126 = x63 << 63 + x63
+ // x252 = x126 << 126 + x126
+ // x255 = x252 << 3 + _111
+ // return ((x255 << 33 + x32) << 64 + 1) << 30
+ //
+ var t0 = new(fiat.P384Element)
+ var t1 = new(fiat.P384Element)
+ var t2 = new(fiat.P384Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ z.Square(z)
+ t0.Mul(x, z)
+ z.Square(t0)
+ for s := 1; s < 3; s++ {
+ z.Square(z)
+ }
+ t1.Mul(t0, z)
+ t2.Square(t1)
+ z.Mul(x, t2)
+ for s := 0; s < 5; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ t2.Square(t1)
+ for s := 1; s < 12; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 7; s++ {
+ t1.Square(t1)
+ }
+ t1.Mul(z, t1)
+ z.Square(t1)
+ z.Mul(x, z)
+ t2.Square(z)
+ for s := 1; s < 31; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ t2.Square(t1)
+ for s := 1; s < 63; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ t2.Square(t1)
+ for s := 1; s < 126; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 3; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 33; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 64; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+ for s := 0; s < 30; s++ {
+ z.Square(z)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/crypto/internal/nistec/p521.go b/contrib/go/_std_1.19/src/crypto/internal/nistec/p521.go
new file mode 100644
index 0000000000..f285d57576
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/internal/nistec/p521.go
@@ -0,0 +1,444 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+var p521B, _ = new(fiat.P521Element).SetBytes([]byte{0x0, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, 0x9a, 0x1f, 0x92, 0x9a, 0x21, 0xa0, 0xb6, 0x85, 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3, 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1, 0x9, 0xe1, 0x56, 0x19, 0x39, 0x51, 0xec, 0x7e, 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1, 0xbf, 0x7, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c, 0x34, 0xf1, 0xef, 0x45, 0x1f, 0xd4, 0x6b, 0x50, 0x3f, 0x0})
+
+var p521G, _ = NewP521Point().SetBytes([]byte{0x4, 0x0, 0xc6, 0x85, 0x8e, 0x6, 0xb7, 0x4, 0x4, 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x5, 0x3f, 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d, 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff, 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a, 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, 0xe5, 0xbd, 0x66, 0x1, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, 0xc0, 0x4, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x1, 0x3f, 0xad, 0x7, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50})
+
+// p521ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p521ElementLength = 66
+
+// P521Point is a P521 point. The zero value is NOT valid.
+type P521Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P521Element
+}
+
+// NewP521Point returns a new P521Point representing the point at infinity point.
+func NewP521Point() *P521Point {
+ return &P521Point{
+ x: new(fiat.P521Element),
+ y: new(fiat.P521Element).One(),
+ z: new(fiat.P521Element),
+ }
+}
+
+// NewP521Generator returns a new P521Point set to the canonical generator.
+func NewP521Generator() *P521Point {
+ return (&P521Point{
+ x: new(fiat.P521Element),
+ y: new(fiat.P521Element),
+ z: new(fiat.P521Element),
+ }).Set(p521G)
+}
+
+// Set sets p = q and returns p.
+func (p *P521Point) Set(q *P521Point) *P521Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P521Point) SetBytes(b []byte) (*P521Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP521Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p521ElementLength && b[0] == 4:
+ x, err := new(fiat.P521Element).SetBytes(b[1 : 1+p521ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P521Element).SetBytes(b[1+p521ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p521CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p521ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P521Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p521Polynomial(new(fiat.P521Element), x)
+ if !p521Sqrt(y, y) {
+ return nil, errors.New("invalid P521 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P521Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p521ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P521 point encoding")
+ }
+}
+
+// p521Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p521Polynomial(y2, x *fiat.P521Element) *fiat.P521Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P521Element).Add(x, x)
+ threeX.Add(threeX, x)
+
+ y2.Sub(y2, threeX)
+ return y2.Add(y2, p521B)
+}
+
+func p521CheckOnCurve(x, y *fiat.P521Element) error {
+ // y² = x³ - 3x + b
+ rhs := p521Polynomial(new(fiat.P521Element), x)
+ lhs := new(fiat.P521Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P521 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P521Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p521ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P521Point) bytes(out *[1 + 2*p521ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P521Element).Invert(p.z)
+ x := new(fiat.P521Element).Mul(p.x, zinv)
+ y := new(fiat.P521Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P521Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p521ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P521Point) bytesCompressed(out *[1 + p521ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P521Element).Invert(p.z)
+ x := new(fiat.P521Element).Mul(p.x, zinv)
+ y := new(fiat.P521Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p521ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P521Point) Add(p1, p2 *P521Point) *P521Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P521Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P521Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P521Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P521Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P521Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P521Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P521Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P521Element).Mul(p521B, t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p521B, y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P521Point) Double(p *P521Point) *P521Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P521Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P521Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P521Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P521Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P521Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P521Element).Mul(p521B, t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P521Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p521B, z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P521Point) Select(p1, p2 *P521Point, cond int) *P521Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p521Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p521Table [15]*P521Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p521Table) Select(p *P521Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p521Table called with out-of-bounds value")
+ }
+ p.Set(NewP521Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P521Point) ScalarMult(q *P521Point, scalar []byte) (*P521Point, error) {
+ // Compute a p521Table for the base point q. The explicit NewP521Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p521Table{NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP521Point()
+ p.Set(NewP521Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p521GeneratorTable *[p521ElementLength * 2]p521Table
+var p521GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p521Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P521Point) generatorTable() *[p521ElementLength * 2]p521Table {
+ p521GeneratorTableOnce.Do(func() {
+ p521GeneratorTable = new([p521ElementLength * 2]p521Table)
+ base := NewP521Generator()
+ for i := 0; i < p521ElementLength*2; i++ {
+ p521GeneratorTable[i][0] = NewP521Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p521GeneratorTable[i][j] = NewP521Point().Add(p521GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p521GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P521Point) ScalarBaseMult(scalar []byte) (*P521Point, error) {
+ if len(scalar) != p521ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP521Point()
+ p.Set(NewP521Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p521Sqrt sets e to a square root of x. If x is not a square, p521Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p521Sqrt(e, x *fiat.P521Element) (isSquare bool) {
+ candidate := new(fiat.P521Element)
+ p521SqrtCandidate(candidate, x)
+ square := new(fiat.P521Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
+
+// p521SqrtCandidate sets z to a square root candidate for x. z and x must not overlap.
+func p521SqrtCandidate(z, x *fiat.P521Element) {
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 0 multiplications and 519 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // return 1 << 519
+ //
+
+ z.Square(x)
+ for s := 1; s < 519; s++ {
+ z.Square(z)
+ }
+}
diff --git a/contrib/go/_std_1.18/src/crypto/internal/randutil/randutil.go b/contrib/go/_std_1.19/src/crypto/internal/randutil/randutil.go
index 84b1295a87..84b1295a87 100644
--- a/contrib/go/_std_1.18/src/crypto/internal/randutil/randutil.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/randutil/randutil.go
diff --git a/contrib/go/_std_1.18/src/crypto/internal/subtle/aliasing.go b/contrib/go/_std_1.19/src/crypto/internal/subtle/aliasing.go
index 16e2fcab12..16e2fcab12 100644
--- a/contrib/go/_std_1.18/src/crypto/internal/subtle/aliasing.go
+++ b/contrib/go/_std_1.19/src/crypto/internal/subtle/aliasing.go
diff --git a/contrib/go/_std_1.18/src/crypto/md5/md5.go b/contrib/go/_std_1.19/src/crypto/md5/md5.go
index 0115784047..0115784047 100644
--- a/contrib/go/_std_1.18/src/crypto/md5/md5.go
+++ b/contrib/go/_std_1.19/src/crypto/md5/md5.go
diff --git a/contrib/go/_std_1.18/src/crypto/md5/md5block.go b/contrib/go/_std_1.19/src/crypto/md5/md5block.go
index 4ff289e860..4ff289e860 100644
--- a/contrib/go/_std_1.18/src/crypto/md5/md5block.go
+++ b/contrib/go/_std_1.19/src/crypto/md5/md5block.go
diff --git a/contrib/go/_std_1.18/src/crypto/md5/md5block_amd64.s b/contrib/go/_std_1.19/src/crypto/md5/md5block_amd64.s
index 7c7d92d7e8..7c7d92d7e8 100644
--- a/contrib/go/_std_1.18/src/crypto/md5/md5block_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/md5/md5block_amd64.s
diff --git a/contrib/go/_std_1.18/src/crypto/md5/md5block_decl.go b/contrib/go/_std_1.19/src/crypto/md5/md5block_decl.go
index 6716a0c9db..6716a0c9db 100644
--- a/contrib/go/_std_1.18/src/crypto/md5/md5block_decl.go
+++ b/contrib/go/_std_1.19/src/crypto/md5/md5block_decl.go
diff --git a/contrib/go/_std_1.18/src/crypto/rand/rand.go b/contrib/go/_std_1.19/src/crypto/rand/rand.go
index af85b966df..af85b966df 100644
--- a/contrib/go/_std_1.18/src/crypto/rand/rand.go
+++ b/contrib/go/_std_1.19/src/crypto/rand/rand.go
diff --git a/contrib/go/_std_1.19/src/crypto/rand/rand_getentropy.go b/contrib/go/_std_1.19/src/crypto/rand/rand_getentropy.go
new file mode 100644
index 0000000000..68f921b0fc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rand/rand_getentropy.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin && !ios) || openbsd
+
+package rand
+
+import "internal/syscall/unix"
+
+func init() {
+ // getentropy(2) returns a maximum of 256 bytes per call
+ altGetRandom = batched(unix.GetEntropy, 256)
+}
diff --git a/contrib/go/_std_1.19/src/crypto/rand/rand_getrandom.go b/contrib/go/_std_1.19/src/crypto/rand/rand_getrandom.go
new file mode 100644
index 0000000000..478aa5c459
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rand/rand_getrandom.go
@@ -0,0 +1,48 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux || freebsd || dragonfly || solaris
+
+package rand
+
+import (
+ "internal/syscall/unix"
+ "runtime"
+ "syscall"
+)
+
+func init() {
+ var maxGetRandomRead int
+ switch runtime.GOOS {
+ case "linux", "android":
+ // Per the manpage:
+ // When reading from the urandom source, a maximum of 33554431 bytes
+ // is returned by a single call to getrandom() on systems where int
+ // has a size of 32 bits.
+ maxGetRandomRead = (1 << 25) - 1
+ case "freebsd", "dragonfly", "solaris", "illumos":
+ maxGetRandomRead = 1 << 8
+ default:
+ panic("no maximum specified for GetRandom")
+ }
+ altGetRandom = batched(getRandom, maxGetRandomRead)
+}
+
+// If the kernel is too old to support the getrandom syscall(),
+// unix.GetRandom will immediately return ENOSYS and we will then fall back to
+// reading from /dev/urandom in rand_unix.go. unix.GetRandom caches the ENOSYS
+// result so we only suffer the syscall overhead once in this case.
+// If the kernel supports the getrandom() syscall, unix.GetRandom will block
+// until the kernel has sufficient randomness (as we don't use GRND_NONBLOCK).
+// In this case, unix.GetRandom will not return an error.
+func getRandom(p []byte) error {
+ n, err := unix.GetRandom(p, 0)
+ if err != nil {
+ return err
+ }
+ if n != len(p) {
+ return syscall.EIO
+ }
+ return nil
+}
diff --git a/contrib/go/_std_1.19/src/crypto/rand/rand_unix.go b/contrib/go/_std_1.19/src/crypto/rand/rand_unix.go
new file mode 100644
index 0000000000..746e90cc91
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rand/rand_unix.go
@@ -0,0 +1,87 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+// Unix cryptographically secure pseudorandom number
+// generator.
+
+package rand
+
+import (
+ "crypto/internal/boring"
+ "errors"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+)
+
+const urandomDevice = "/dev/urandom"
+
+func init() {
+ if boring.Enabled {
+ Reader = boring.RandReader
+ return
+ }
+ Reader = &reader{}
+}
+
+// A reader satisfies reads by reading from urandomDevice
+type reader struct {
+ f io.Reader
+ mu sync.Mutex
+ used uint32 // Atomic: 0 - never used, 1 - used, but f == nil, 2 - used, and f != nil
+}
+
+// altGetRandom if non-nil specifies an OS-specific function to get
+// urandom-style randomness.
+var altGetRandom func([]byte) (err error)
+
+func warnBlocked() {
+ println("crypto/rand: blocked for 60 seconds waiting to read random data from the kernel")
+}
+
+func (r *reader) Read(b []byte) (n int, err error) {
+ boring.Unreachable()
+ if atomic.CompareAndSwapUint32(&r.used, 0, 1) {
+ // First use of randomness. Start timer to warn about
+ // being blocked on entropy not being available.
+ t := time.AfterFunc(time.Minute, warnBlocked)
+ defer t.Stop()
+ }
+ if altGetRandom != nil && altGetRandom(b) == nil {
+ return len(b), nil
+ }
+ if atomic.LoadUint32(&r.used) != 2 {
+ r.mu.Lock()
+ if atomic.LoadUint32(&r.used) != 2 {
+ f, err := os.Open(urandomDevice)
+ if err != nil {
+ r.mu.Unlock()
+ return 0, err
+ }
+ r.f = hideAgainReader{f}
+ atomic.StoreUint32(&r.used, 2)
+ }
+ r.mu.Unlock()
+ }
+ return io.ReadFull(r.f, b)
+}
+
+// hideAgainReader masks EAGAIN reads from /dev/urandom.
+// See golang.org/issue/9205
+type hideAgainReader struct {
+ r io.Reader
+}
+
+func (hr hideAgainReader) Read(p []byte) (n int, err error) {
+ n, err = hr.r.Read(p)
+ if errors.Is(err, syscall.EAGAIN) {
+ err = nil
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/crypto/rand/util.go b/contrib/go/_std_1.19/src/crypto/rand/util.go
new file mode 100644
index 0000000000..11b1a28ec5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rand/util.go
@@ -0,0 +1,99 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "crypto/internal/randutil"
+ "errors"
+ "io"
+ "math/big"
+)
+
+// Prime returns a number of the given bit length that is prime with high probability.
+// Prime will return error for any error returned by rand.Read or if bits < 2.
+func Prime(rand io.Reader, bits int) (*big.Int, error) {
+ if bits < 2 {
+ return nil, errors.New("crypto/rand: prime size must be at least 2-bit")
+ }
+
+ randutil.MaybeReadByte(rand)
+
+ b := uint(bits % 8)
+ if b == 0 {
+ b = 8
+ }
+
+ bytes := make([]byte, (bits+7)/8)
+ p := new(big.Int)
+
+ for {
+ if _, err := io.ReadFull(rand, bytes); err != nil {
+ return nil, err
+ }
+
+ // Clear bits in the first byte to make sure the candidate has a size <= bits.
+ bytes[0] &= uint8(int(1<<b) - 1)
+ // Don't let the value be too small, i.e, set the most significant two bits.
+ // Setting the top two bits, rather than just the top bit,
+ // means that when two of these values are multiplied together,
+ // the result isn't ever one bit short.
+ if b >= 2 {
+ bytes[0] |= 3 << (b - 2)
+ } else {
+ // Here b==1, because b cannot be zero.
+ bytes[0] |= 1
+ if len(bytes) > 1 {
+ bytes[1] |= 0x80
+ }
+ }
+ // Make the value odd since an even number this large certainly isn't prime.
+ bytes[len(bytes)-1] |= 1
+
+ p.SetBytes(bytes)
+ if p.ProbablyPrime(20) {
+ return p, nil
+ }
+ }
+}
+
+// Int returns a uniform random value in [0, max). It panics if max <= 0.
+func Int(rand io.Reader, max *big.Int) (n *big.Int, err error) {
+ if max.Sign() <= 0 {
+ panic("crypto/rand: argument to Int is <= 0")
+ }
+ n = new(big.Int)
+ n.Sub(max, n.SetUint64(1))
+ // bitLen is the maximum bit length needed to encode a value < max.
+ bitLen := n.BitLen()
+ if bitLen == 0 {
+ // the only valid result is 0
+ return
+ }
+ // k is the maximum byte length needed to encode a value < max.
+ k := (bitLen + 7) / 8
+ // b is the number of bits in the most significant byte of max-1.
+ b := uint(bitLen % 8)
+ if b == 0 {
+ b = 8
+ }
+
+ bytes := make([]byte, k)
+
+ for {
+ _, err = io.ReadFull(rand, bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Clear bits in the first byte to increase the probability
+ // that the candidate is < max.
+ bytes[0] &= uint8(int(1<<b) - 1)
+
+ n.SetBytes(bytes)
+ if n.Cmp(max) < 0 {
+ return
+ }
+ }
+}
diff --git a/contrib/go/_std_1.18/src/crypto/rc4/rc4.go b/contrib/go/_std_1.19/src/crypto/rc4/rc4.go
index c2df0db2dc..c2df0db2dc 100644
--- a/contrib/go/_std_1.18/src/crypto/rc4/rc4.go
+++ b/contrib/go/_std_1.19/src/crypto/rc4/rc4.go
diff --git a/contrib/go/_std_1.19/src/crypto/rsa/notboring.go b/contrib/go/_std_1.19/src/crypto/rsa/notboring.go
new file mode 100644
index 0000000000..2abc043640
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rsa/notboring.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !boringcrypto
+
+package rsa
+
+import "crypto/internal/boring"
+
+func boringPublicKey(*PublicKey) (*boring.PublicKeyRSA, error) {
+ panic("boringcrypto: not available")
+}
+func boringPrivateKey(*PrivateKey) (*boring.PrivateKeyRSA, error) {
+ panic("boringcrypto: not available")
+}
diff --git a/contrib/go/_std_1.19/src/crypto/rsa/pkcs1v15.go b/contrib/go/_std_1.19/src/crypto/rsa/pkcs1v15.go
new file mode 100644
index 0000000000..ab19229a6c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rsa/pkcs1v15.go
@@ -0,0 +1,387 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rsa
+
+import (
+ "crypto"
+ "crypto/internal/boring"
+ "crypto/internal/randutil"
+ "crypto/subtle"
+ "errors"
+ "io"
+ "math/big"
+)
+
+// This file implements encryption and decryption using PKCS #1 v1.5 padding.
+
+// PKCS1v15DecrypterOpts is for passing options to PKCS #1 v1.5 decryption using
+// the crypto.Decrypter interface.
+type PKCS1v15DecryptOptions struct {
+ // SessionKeyLen is the length of the session key that is being
+ // decrypted. If not zero, then a padding error during decryption will
+ // cause a random plaintext of this length to be returned rather than
+ // an error. These alternatives happen in constant time.
+ SessionKeyLen int
+}
+
+// EncryptPKCS1v15 encrypts the given message with RSA and the padding
+// scheme from PKCS #1 v1.5. The message must be no longer than the
+// length of the public modulus minus 11 bytes.
+//
+// The random parameter is used as a source of entropy to ensure that
+// encrypting the same message twice doesn't result in the same
+// ciphertext.
+//
+// WARNING: use of this function to encrypt plaintexts other than
+// session keys is dangerous. Use RSA OAEP in new protocols.
+func EncryptPKCS1v15(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error) {
+ randutil.MaybeReadByte(random)
+
+ if err := checkPub(pub); err != nil {
+ return nil, err
+ }
+ k := pub.Size()
+ if len(msg) > k-11 {
+ return nil, ErrMessageTooLong
+ }
+
+ if boring.Enabled && random == boring.RandReader {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSAPKCS1(bkey, msg)
+ }
+ boring.UnreachableExceptTests()
+
+ // EM = 0x00 || 0x02 || PS || 0x00 || M
+ em := make([]byte, k)
+ em[1] = 2
+ ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):]
+ err := nonZeroRandomBytes(ps, random)
+ if err != nil {
+ return nil, err
+ }
+ em[len(em)-len(msg)-1] = 0
+ copy(mm, msg)
+
+ if boring.Enabled {
+ var bkey *boring.PublicKeyRSA
+ bkey, err = boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSANoPadding(bkey, em)
+ }
+
+ m := new(big.Int).SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+ return c.FillBytes(em), nil
+}
+
+// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS #1 v1.5.
+// If random != nil, it uses RSA blinding to avoid timing side-channel attacks.
+//
+// Note that whether this function returns an error or not discloses secret
+// information. If an attacker can cause this function to run repeatedly and
+// learn whether each instance returned an error then they can decrypt and
+// forge signatures as if they had the private key. See
+// DecryptPKCS1v15SessionKey for a way of solving this problem.
+func DecryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return nil, err
+ }
+
+ if boring.Enabled {
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ out, err := boring.DecryptRSAPKCS1(bkey, ciphertext)
+ if err != nil {
+ return nil, ErrDecryption
+ }
+ return out, nil
+ }
+
+ valid, out, index, err := decryptPKCS1v15(random, priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+ if valid == 0 {
+ return nil, ErrDecryption
+ }
+ return out[index:], nil
+}
+
+// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS #1 v1.5.
+// If random != nil, it uses RSA blinding to avoid timing side-channel attacks.
+// It returns an error if the ciphertext is the wrong length or if the
+// ciphertext is greater than the public modulus. Otherwise, no error is
+// returned. If the padding is valid, the resulting plaintext message is copied
+// into key. Otherwise, key is unchanged. These alternatives occur in constant
+// time. It is intended that the user of this function generate a random
+// session key beforehand and continue the protocol with the resulting value.
+// This will remove any possibility that an attacker can learn any information
+// about the plaintext.
+// See “Chosen Ciphertext Attacks Against Protocols Based on the RSA
+// Encryption Standard PKCS #1”, Daniel Bleichenbacher, Advances in Cryptology
+// (Crypto '98).
+//
+// Note that if the session key is too small then it may be possible for an
+// attacker to brute-force it. If they can do that then they can learn whether
+// a random value was used (because it'll be different for the same ciphertext)
+// and thus whether the padding was correct. This defeats the point of this
+// function. Using at least a 16-byte key will protect against this attack.
+func DecryptPKCS1v15SessionKey(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return err
+ }
+ k := priv.Size()
+ if k-(len(key)+3+8) < 0 {
+ return ErrDecryption
+ }
+
+ valid, em, index, err := decryptPKCS1v15(random, priv, ciphertext)
+ if err != nil {
+ return err
+ }
+
+ if len(em) != k {
+ // This should be impossible because decryptPKCS1v15 always
+ // returns the full slice.
+ return ErrDecryption
+ }
+
+ valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key)))
+ subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):])
+ return nil
+}
+
+// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
+// random is not nil. It returns one or zero in valid that indicates whether the
+// plaintext was correctly structured. In either case, the plaintext is
+// returned in em so that it may be read independently of whether it was valid
+// in order to maintain constant memory access patterns. If the plaintext was
+// valid then index contains the index of the original message in em.
+func decryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
+ k := priv.Size()
+ if k < 11 {
+ err = ErrDecryption
+ return
+ }
+
+ if boring.Enabled {
+ var bkey *boring.PrivateKeyRSA
+ bkey, err = boringPrivateKey(priv)
+ if err != nil {
+ return
+ }
+ em, err = boring.DecryptRSANoPadding(bkey, ciphertext)
+ if err != nil {
+ return
+ }
+ } else {
+ c := new(big.Int).SetBytes(ciphertext)
+ var m *big.Int
+ m, err = decrypt(random, priv, c)
+ if err != nil {
+ return
+ }
+ em = m.FillBytes(make([]byte, k))
+ }
+
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+ secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
+
+ // The remainder of the plaintext must be a string of non-zero random
+ // octets, followed by a 0, followed by the message.
+ // lookingForIndex: 1 iff we are still looking for the zero.
+ // index: the offset of the first zero byte.
+ lookingForIndex := 1
+
+ for i := 2; i < len(em); i++ {
+ equals0 := subtle.ConstantTimeByteEq(em[i], 0)
+ index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
+ lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
+ }
+
+ // The PS padding must be at least 8 bytes long, and it starts two
+ // bytes into em.
+ validPS := subtle.ConstantTimeLessOrEq(2+8, index)
+
+ valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS
+ index = subtle.ConstantTimeSelect(valid, index+1, 0)
+ return valid, em, index, nil
+}
+
+// nonZeroRandomBytes fills the given slice with non-zero random octets.
+func nonZeroRandomBytes(s []byte, random io.Reader) (err error) {
+ _, err = io.ReadFull(random, s)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(s); i++ {
+ for s[i] == 0 {
+ _, err = io.ReadFull(random, s[i:i+1])
+ if err != nil {
+ return
+ }
+ // In tests, the PRNG may return all zeros so we do
+ // this to break the loop.
+ s[i] ^= 0x42
+ }
+ }
+
+ return
+}
+
+// These are ASN1 DER structures:
+//
+// DigestInfo ::= SEQUENCE {
+// digestAlgorithm AlgorithmIdentifier,
+// digest OCTET STRING
+// }
+//
+// For performance, we don't use the generic ASN1 encoder. Rather, we
+// precompute a prefix of the digest value that makes a valid ASN1 DER string
+// with the correct contents.
+var hashPrefixes = map[crypto.Hash][]byte{
+ crypto.MD5: {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10},
+ crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14},
+ crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c},
+ crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20},
+ crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30},
+ crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40},
+ crypto.MD5SHA1: {}, // A special TLS case which doesn't use an ASN1 prefix.
+ crypto.RIPEMD160: {0x30, 0x20, 0x30, 0x08, 0x06, 0x06, 0x28, 0xcf, 0x06, 0x03, 0x00, 0x31, 0x04, 0x14},
+}
+
+// SignPKCS1v15 calculates the signature of hashed using
+// RSASSA-PKCS1-V1_5-SIGN from RSA PKCS #1 v1.5. Note that hashed must
+// be the result of hashing the input message using the given hash
+// function. If hash is zero, hashed is signed directly. This isn't
+// advisable except for interoperability.
+//
+// If random is not nil then RSA blinding will be used to avoid timing
+// side-channel attacks.
+//
+// This function is deterministic. Thus, if the set of possible
+// messages is small, an attacker may be able to build a map from
+// messages to signatures and identify the signed messages. As ever,
+// signatures provide authenticity, not confidentiality.
+func SignPKCS1v15(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
+ hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
+ if err != nil {
+ return nil, err
+ }
+
+ tLen := len(prefix) + hashLen
+ k := priv.Size()
+ if k < tLen+11 {
+ return nil, ErrMessageTooLong
+ }
+
+ if boring.Enabled {
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return boring.SignRSAPKCS1v15(bkey, hash, hashed)
+ }
+
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+ em := make([]byte, k)
+ em[1] = 1
+ for i := 2; i < k-tLen-1; i++ {
+ em[i] = 0xff
+ }
+ copy(em[k-tLen:k-hashLen], prefix)
+ copy(em[k-hashLen:k], hashed)
+
+ m := new(big.Int).SetBytes(em)
+ c, err := decryptAndCheck(random, priv, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.FillBytes(em), nil
+}
+
+// VerifyPKCS1v15 verifies an RSA PKCS #1 v1.5 signature.
+// hashed is the result of hashing the input message using the given hash
+// function and sig is the signature. A valid signature is indicated by
+// returning a nil error. If hash is zero then hashed is used directly. This
+// isn't advisable except for interoperability.
+func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error {
+ if boring.Enabled {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return err
+ }
+ if err := boring.VerifyRSAPKCS1v15(bkey, hash, hashed, sig); err != nil {
+ return ErrVerification
+ }
+ return nil
+ }
+
+ hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
+ if err != nil {
+ return err
+ }
+
+ tLen := len(prefix) + hashLen
+ k := pub.Size()
+ if k < tLen+11 {
+ return ErrVerification
+ }
+
+ // RFC 8017 Section 8.2.2: If the length of the signature S is not k
+ // octets (where k is the length in octets of the RSA modulus n), output
+ // "invalid signature" and stop.
+ if k != len(sig) {
+ return ErrVerification
+ }
+
+ c := new(big.Int).SetBytes(sig)
+ m := encrypt(new(big.Int), pub, c)
+ em := m.FillBytes(make([]byte, k))
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+
+ ok := subtle.ConstantTimeByteEq(em[0], 0)
+ ok &= subtle.ConstantTimeByteEq(em[1], 1)
+ ok &= subtle.ConstantTimeCompare(em[k-hashLen:k], hashed)
+ ok &= subtle.ConstantTimeCompare(em[k-tLen:k-hashLen], prefix)
+ ok &= subtle.ConstantTimeByteEq(em[k-tLen-1], 0)
+
+ for i := 2; i < k-tLen-1; i++ {
+ ok &= subtle.ConstantTimeByteEq(em[i], 0xff)
+ }
+
+ if ok != 1 {
+ return ErrVerification
+ }
+
+ return nil
+}
+
+func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) {
+ // Special case: crypto.Hash(0) is used to indicate that the data is
+ // signed directly.
+ if hash == 0 {
+ return inLen, nil, nil
+ }
+
+ hashLen = hash.Size()
+ if inLen != hashLen {
+ return 0, nil, errors.New("crypto/rsa: input must be hashed message")
+ }
+ prefix, ok := hashPrefixes[hash]
+ if !ok {
+ return 0, nil, errors.New("crypto/rsa: unsupported hash function")
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/crypto/rsa/pss.go b/contrib/go/_std_1.19/src/crypto/rsa/pss.go
new file mode 100644
index 0000000000..29e79bd342
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rsa/pss.go
@@ -0,0 +1,338 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rsa
+
+// This file implements the RSASSA-PSS signature scheme according to RFC 8017.
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/internal/boring"
+ "errors"
+ "hash"
+ "io"
+ "math/big"
+)
+
+// Per RFC 8017, Section 9.1
+//
+// EM = MGF1 xor DB || H( 8*0x00 || mHash || salt ) || 0xbc
+//
+// where
+//
+// DB = PS || 0x01 || salt
+//
+// and PS can be empty so
+//
+// emLen = dbLen + hLen + 1 = psLen + sLen + hLen + 2
+//
+
+func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {
+ // See RFC 8017, Section 9.1.1.
+
+ hLen := hash.Size()
+ sLen := len(salt)
+ emLen := (emBits + 7) / 8
+
+ // 1. If the length of M is greater than the input limitation for the
+ // hash function (2^61 - 1 octets for SHA-1), output "message too
+ // long" and stop.
+ //
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+
+ if len(mHash) != hLen {
+ return nil, errors.New("crypto/rsa: input must be hashed with given hash")
+ }
+
+ // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
+
+ if emLen < hLen+sLen+2 {
+ return nil, errors.New("crypto/rsa: key size too small for PSS signature")
+ }
+
+ em := make([]byte, emLen)
+ psLen := emLen - sLen - hLen - 2
+ db := em[:psLen+1+sLen]
+ h := em[psLen+1+sLen : emLen-1]
+
+ // 4. Generate a random octet string salt of length sLen; if sLen = 0,
+ // then salt is the empty string.
+ //
+ // 5. Let
+ // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt;
+ //
+ // M' is an octet string of length 8 + hLen + sLen with eight
+ // initial zero octets.
+ //
+ // 6. Let H = Hash(M'), an octet string of length hLen.
+
+ var prefix [8]byte
+
+ hash.Write(prefix[:])
+ hash.Write(mHash)
+ hash.Write(salt)
+
+ h = hash.Sum(h[:0])
+ hash.Reset()
+
+ // 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2
+ // zero octets. The length of PS may be 0.
+ //
+ // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length
+ // emLen - hLen - 1.
+
+ db[psLen] = 0x01
+ copy(db[psLen+1:], salt)
+
+ // 9. Let dbMask = MGF(H, emLen - hLen - 1).
+ //
+ // 10. Let maskedDB = DB \xor dbMask.
+
+ mgf1XOR(db, hash, h)
+
+ // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in
+ // maskedDB to zero.
+
+ db[0] &= 0xff >> (8*emLen - emBits)
+
+ // 12. Let EM = maskedDB || H || 0xbc.
+ em[emLen-1] = 0xbc
+
+ // 13. Output EM.
+ return em, nil
+}
+
+func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ // See RFC 8017, Section 9.1.2.
+
+ hLen := hash.Size()
+ if sLen == PSSSaltLengthEqualsHash {
+ sLen = hLen
+ }
+ emLen := (emBits + 7) / 8
+ if emLen != len(em) {
+ return errors.New("rsa: internal error: inconsistent length")
+ }
+
+ // 1. If the length of M is greater than the input limitation for the
+ // hash function (2^61 - 1 octets for SHA-1), output "inconsistent"
+ // and stop.
+ //
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+ if hLen != len(mHash) {
+ return ErrVerification
+ }
+
+ // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
+ if emLen < hLen+sLen+2 {
+ return ErrVerification
+ }
+
+ // 4. If the rightmost octet of EM does not have hexadecimal value
+ // 0xbc, output "inconsistent" and stop.
+ if em[emLen-1] != 0xbc {
+ return ErrVerification
+ }
+
+ // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and
+ // let H be the next hLen octets.
+ db := em[:emLen-hLen-1]
+ h := em[emLen-hLen-1 : emLen-1]
+
+ // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in
+ // maskedDB are not all equal to zero, output "inconsistent" and
+ // stop.
+ var bitMask byte = 0xff >> (8*emLen - emBits)
+ if em[0] & ^bitMask != 0 {
+ return ErrVerification
+ }
+
+ // 7. Let dbMask = MGF(H, emLen - hLen - 1).
+ //
+ // 8. Let DB = maskedDB \xor dbMask.
+ mgf1XOR(db, hash, h)
+
+ // 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB
+ // to zero.
+ db[0] &= bitMask
+
+ // If we don't know the salt length, look for the 0x01 delimiter.
+ if sLen == PSSSaltLengthAuto {
+ psLen := bytes.IndexByte(db, 0x01)
+ if psLen < 0 {
+ return ErrVerification
+ }
+ sLen = len(db) - psLen - 1
+ }
+
+ // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
+ // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
+ // position is "position 1") does not have hexadecimal value 0x01,
+ // output "inconsistent" and stop.
+ psLen := emLen - hLen - sLen - 2
+ for _, e := range db[:psLen] {
+ if e != 0x00 {
+ return ErrVerification
+ }
+ }
+ if db[psLen] != 0x01 {
+ return ErrVerification
+ }
+
+ // 11. Let salt be the last sLen octets of DB.
+ salt := db[len(db)-sLen:]
+
+ // 12. Let
+ // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;
+ // M' is an octet string of length 8 + hLen + sLen with eight
+ // initial zero octets.
+ //
+ // 13. Let H' = Hash(M'), an octet string of length hLen.
+ var prefix [8]byte
+ hash.Write(prefix[:])
+ hash.Write(mHash)
+ hash.Write(salt)
+
+ h0 := hash.Sum(nil)
+
+ // 14. If H = H', output "consistent." Otherwise, output "inconsistent."
+ if !bytes.Equal(h0, h) { // TODO: constant time?
+ return ErrVerification
+ }
+ return nil
+}
+
+// signPSSWithSalt calculates the signature of hashed using PSS with specified salt.
+// Note that hashed must be the result of hashing the input message using the
+// given hash function. salt is a random sequence of bytes whose length will be
+// later used to verify the signature.
+func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
+ emBits := priv.N.BitLen() - 1
+ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+ return nil, err
+ }
+
+ if boring.Enabled {
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ // Note: BoringCrypto takes care of the "AndCheck" part of "decryptAndCheck".
+ // (It's not just decrypt.)
+ s, err := boring.DecryptRSANoPadding(bkey, em)
+ if err != nil {
+ return nil, err
+ }
+ return s, nil
+ }
+
+ m := new(big.Int).SetBytes(em)
+ c, err := decryptAndCheck(rand, priv, m)
+ if err != nil {
+ return nil, err
+ }
+ s := make([]byte, priv.Size())
+ return c.FillBytes(s), nil
+}
+
+const (
+ // PSSSaltLengthAuto causes the salt in a PSS signature to be as large
+ // as possible when signing, and to be auto-detected when verifying.
+ PSSSaltLengthAuto = 0
+ // PSSSaltLengthEqualsHash causes the salt length to equal the length
+ // of the hash used in the signature.
+ PSSSaltLengthEqualsHash = -1
+)
+
+// PSSOptions contains options for creating and verifying PSS signatures.
+type PSSOptions struct {
+ // SaltLength controls the length of the salt used in the PSS
+ // signature. It can either be a number of bytes, or one of the special
+ // PSSSaltLength constants.
+ SaltLength int
+
+ // Hash is the hash function used to generate the message digest. If not
+ // zero, it overrides the hash function passed to SignPSS. It's required
+ // when using PrivateKey.Sign.
+ Hash crypto.Hash
+}
+
+// HashFunc returns opts.Hash so that PSSOptions implements crypto.SignerOpts.
+func (opts *PSSOptions) HashFunc() crypto.Hash {
+ return opts.Hash
+}
+
+func (opts *PSSOptions) saltLength() int {
+ if opts == nil {
+ return PSSSaltLengthAuto
+ }
+ return opts.SaltLength
+}
+
+// SignPSS calculates the signature of digest using PSS.
+//
+// digest must be the result of hashing the input message using the given hash
+// function. The opts argument may be nil, in which case sensible defaults are
+// used. If opts.Hash is set, it overrides hash.
+func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error) {
+ if opts != nil && opts.Hash != 0 {
+ hash = opts.Hash
+ }
+
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+ saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+
+ if boring.Enabled && rand == boring.RandReader {
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return boring.SignRSAPSS(bkey, hash, digest, saltLength)
+ }
+ boring.UnreachableExceptTests()
+
+ salt := make([]byte, saltLength)
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return nil, err
+ }
+ return signPSSWithSalt(rand, priv, hash, digest, salt)
+}
+
+// VerifyPSS verifies a PSS signature.
+//
+// A valid signature is indicated by returning a nil error. digest must be the
+// result of hashing the input message using the given hash function. The opts
+// argument may be nil, in which case sensible defaults are used. opts.Hash is
+// ignored.
+func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error {
+ if boring.Enabled {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return err
+ }
+ if err := boring.VerifyRSAPSS(bkey, hash, digest, sig, opts.saltLength()); err != nil {
+ return ErrVerification
+ }
+ return nil
+ }
+ if len(sig) != pub.Size() {
+ return ErrVerification
+ }
+ s := new(big.Int).SetBytes(sig)
+ m := encrypt(new(big.Int), pub, s)
+ emBits := pub.N.BitLen() - 1
+ emLen := (emBits + 7) / 8
+ if m.BitLen() > emLen*8 {
+ return ErrVerification
+ }
+ em := m.FillBytes(make([]byte, emLen))
+ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+}
diff --git a/contrib/go/_std_1.19/src/crypto/rsa/rsa.go b/contrib/go/_std_1.19/src/crypto/rsa/rsa.go
new file mode 100644
index 0000000000..c941124fb2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/rsa/rsa.go
@@ -0,0 +1,727 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rsa implements RSA encryption as specified in PKCS #1 and RFC 8017.
+//
+// RSA is a single, fundamental operation that is used in this package to
+// implement either public-key encryption or public-key signatures.
+//
+// The original specification for encryption and signatures with RSA is PKCS #1
+// and the terms "RSA encryption" and "RSA signatures" by default refer to
+// PKCS #1 version 1.5. However, that specification has flaws and new designs
+// should use version 2, usually called by just OAEP and PSS, where
+// possible.
+//
+// Two sets of interfaces are included in this package. When a more abstract
+// interface isn't necessary, there are functions for encrypting/decrypting
+// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract
+// over the public key primitive, the PrivateKey type implements the
+// Decrypter and Signer interfaces from the crypto package.
+//
+// The RSA operations in this package are not implemented using constant-time algorithms.
+package rsa
+
+import (
+ "crypto"
+ "crypto/internal/boring"
+ "crypto/internal/boring/bbig"
+ "crypto/internal/randutil"
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "hash"
+ "io"
+ "math"
+ "math/big"
+)
+
+var bigZero = big.NewInt(0)
+var bigOne = big.NewInt(1)
+
+// A PublicKey represents the public part of an RSA key.
+type PublicKey struct {
+ N *big.Int // modulus
+ E int // public exponent
+}
+
+// Any methods implemented on PublicKey might need to also be implemented on
+// PrivateKey, as the latter embeds the former and will expose its methods.
+
+// Size returns the modulus size in bytes. Raw signatures and ciphertexts
+// for or by this public key will have the same size.
+func (pub *PublicKey) Size() int {
+ return (pub.N.BitLen() + 7) / 8
+}
+
+// Equal reports whether pub and x have the same value.
+func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
+ xx, ok := x.(*PublicKey)
+ if !ok {
+ return false
+ }
+ return pub.N.Cmp(xx.N) == 0 && pub.E == xx.E
+}
+
+// OAEPOptions is an interface for passing options to OAEP decryption using the
+// crypto.Decrypter interface.
+type OAEPOptions struct {
+ // Hash is the hash function that will be used when generating the mask.
+ Hash crypto.Hash
+ // Label is an arbitrary byte string that must be equal to the value
+ // used when encrypting.
+ Label []byte
+}
+
+var (
+ errPublicModulus = errors.New("crypto/rsa: missing public modulus")
+ errPublicExponentSmall = errors.New("crypto/rsa: public exponent too small")
+ errPublicExponentLarge = errors.New("crypto/rsa: public exponent too large")
+)
+
+// checkPub sanity checks the public key before we use it.
+// We require pub.E to fit into a 32-bit integer so that we
+// do not have different behavior depending on whether
+// int is 32 or 64 bits. See also
+// https://www.imperialviolet.org/2012/03/16/rsae.html.
+func checkPub(pub *PublicKey) error {
+ if pub.N == nil {
+ return errPublicModulus
+ }
+ if pub.E < 2 {
+ return errPublicExponentSmall
+ }
+ if pub.E > 1<<31-1 {
+ return errPublicExponentLarge
+ }
+ return nil
+}
+
+// A PrivateKey represents an RSA key
+type PrivateKey struct {
+ PublicKey // public part.
+ D *big.Int // private exponent
+ Primes []*big.Int // prime factors of N, has >= 2 elements.
+
+ // Precomputed contains precomputed values that speed up private
+ // operations, if available.
+ Precomputed PrecomputedValues
+}
+
+// Public returns the public key corresponding to priv.
+func (priv *PrivateKey) Public() crypto.PublicKey {
+ return &priv.PublicKey
+}
+
+// Equal reports whether priv and x have equivalent values. It ignores
+// Precomputed values.
+func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
+ xx, ok := x.(*PrivateKey)
+ if !ok {
+ return false
+ }
+ if !priv.PublicKey.Equal(&xx.PublicKey) || priv.D.Cmp(xx.D) != 0 {
+ return false
+ }
+ if len(priv.Primes) != len(xx.Primes) {
+ return false
+ }
+ for i := range priv.Primes {
+ if priv.Primes[i].Cmp(xx.Primes[i]) != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Sign signs digest with priv, reading randomness from rand. If opts is a
+// *PSSOptions then the PSS algorithm will be used, otherwise PKCS #1 v1.5 will
+// be used. digest must be the result of hashing the input message using
+// opts.HashFunc().
+//
+// This method implements crypto.Signer, which is an interface to support keys
+// where the private part is kept in, for example, a hardware module. Common
+// uses should use the Sign* functions in this package directly.
+func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
+ if pssOpts, ok := opts.(*PSSOptions); ok {
+ return SignPSS(rand, priv, pssOpts.Hash, digest, pssOpts)
+ }
+
+ return SignPKCS1v15(rand, priv, opts.HashFunc(), digest)
+}
+
+// Decrypt decrypts ciphertext with priv. If opts is nil or of type
+// *PKCS1v15DecryptOptions then PKCS #1 v1.5 decryption is performed. Otherwise
+// opts must have type *OAEPOptions and OAEP decryption is done.
+func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
+ if opts == nil {
+ return DecryptPKCS1v15(rand, priv, ciphertext)
+ }
+
+ switch opts := opts.(type) {
+ case *OAEPOptions:
+ return DecryptOAEP(opts.Hash.New(), rand, priv, ciphertext, opts.Label)
+
+ case *PKCS1v15DecryptOptions:
+ if l := opts.SessionKeyLen; l > 0 {
+ plaintext = make([]byte, l)
+ if _, err := io.ReadFull(rand, plaintext); err != nil {
+ return nil, err
+ }
+ if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil {
+ return nil, err
+ }
+ return plaintext, nil
+ } else {
+ return DecryptPKCS1v15(rand, priv, ciphertext)
+ }
+
+ default:
+ return nil, errors.New("crypto/rsa: invalid options for Decrypt")
+ }
+}
+
+type PrecomputedValues struct {
+ Dp, Dq *big.Int // D mod (P-1) (or mod Q-1)
+ Qinv *big.Int // Q^-1 mod P
+
+ // CRTValues is used for the 3rd and subsequent primes. Due to a
+ // historical accident, the CRT for the first two primes is handled
+ // differently in PKCS #1 and interoperability is sufficiently
+ // important that we mirror this.
+ CRTValues []CRTValue
+}
+
+// CRTValue contains the precomputed Chinese remainder theorem values.
+type CRTValue struct {
+ Exp *big.Int // D mod (prime-1).
+ Coeff *big.Int // R·Coeff ≡ 1 mod Prime.
+ R *big.Int // product of primes prior to this (inc p and q).
+}
+
+// Validate performs basic sanity checks on the key.
+// It returns nil if the key is valid, or else an error describing a problem.
+func (priv *PrivateKey) Validate() error {
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return err
+ }
+
+ // Check that Πprimes == n.
+ modulus := new(big.Int).Set(bigOne)
+ for _, prime := range priv.Primes {
+ // Any primes ≤ 1 will cause divide-by-zero panics later.
+ if prime.Cmp(bigOne) <= 0 {
+ return errors.New("crypto/rsa: invalid prime value")
+ }
+ modulus.Mul(modulus, prime)
+ }
+ if modulus.Cmp(priv.N) != 0 {
+ return errors.New("crypto/rsa: invalid modulus")
+ }
+
+ // Check that de ≡ 1 mod p-1, for each prime.
+ // This implies that e is coprime to each p-1 as e has a multiplicative
+ // inverse. Therefore e is coprime to lcm(p-1,q-1,r-1,...) =
+ // exponent(ℤ/nℤ). It also implies that a^de ≡ a mod p as a^(p-1) ≡ 1
+ // mod p. Thus a^de ≡ a mod n for all a coprime to n, as required.
+ congruence := new(big.Int)
+ de := new(big.Int).SetInt64(int64(priv.E))
+ de.Mul(de, priv.D)
+ for _, prime := range priv.Primes {
+ pminus1 := new(big.Int).Sub(prime, bigOne)
+ congruence.Mod(de, pminus1)
+ if congruence.Cmp(bigOne) != 0 {
+ return errors.New("crypto/rsa: invalid exponents")
+ }
+ }
+ return nil
+}
+
+// GenerateKey generates an RSA keypair of the given bit size using the
+// random source random (for example, crypto/rand.Reader).
+func GenerateKey(random io.Reader, bits int) (*PrivateKey, error) {
+ return GenerateMultiPrimeKey(random, 2, bits)
+}
+
+// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit
+// size and the given random source, as suggested in [1]. Although the public
+// keys are compatible (actually, indistinguishable) from the 2-prime case,
+// the private keys are not. Thus it may not be possible to export multi-prime
+// private keys in certain formats or to subsequently import them into other
+// code.
+//
+// Table 1 in [2] suggests maximum numbers of primes for a given size.
+//
+// [1] US patent 4405829 (1972, expired)
+// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
+func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (*PrivateKey, error) {
+ randutil.MaybeReadByte(random)
+
+ if boring.Enabled && random == boring.RandReader && nprimes == 2 && (bits == 2048 || bits == 3072) {
+ bN, bE, bD, bP, bQ, bDp, bDq, bQinv, err := boring.GenerateKeyRSA(bits)
+ if err != nil {
+ return nil, err
+ }
+ N := bbig.Dec(bN)
+ E := bbig.Dec(bE)
+ D := bbig.Dec(bD)
+ P := bbig.Dec(bP)
+ Q := bbig.Dec(bQ)
+ Dp := bbig.Dec(bDp)
+ Dq := bbig.Dec(bDq)
+ Qinv := bbig.Dec(bQinv)
+ e64 := E.Int64()
+ if !E.IsInt64() || int64(int(e64)) != e64 {
+ return nil, errors.New("crypto/rsa: generated key exponent too large")
+ }
+ key := &PrivateKey{
+ PublicKey: PublicKey{
+ N: N,
+ E: int(e64),
+ },
+ D: D,
+ Primes: []*big.Int{P, Q},
+ Precomputed: PrecomputedValues{
+ Dp: Dp,
+ Dq: Dq,
+ Qinv: Qinv,
+ CRTValues: make([]CRTValue, 0), // non-nil, to match Precompute
+ },
+ }
+ return key, nil
+ }
+
+ priv := new(PrivateKey)
+ priv.E = 65537
+
+ if nprimes < 2 {
+ return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2")
+ }
+
+ if bits < 64 {
+ primeLimit := float64(uint64(1) << uint(bits/nprimes))
+ // pi approximates the number of primes less than primeLimit
+ pi := primeLimit / (math.Log(primeLimit) - 1)
+ // Generated primes start with 11 (in binary) so we can only
+ // use a quarter of them.
+ pi /= 4
+ // Use a factor of two to ensure that key generation terminates
+ // in a reasonable amount of time.
+ pi /= 2
+ if pi <= float64(nprimes) {
+ return nil, errors.New("crypto/rsa: too few primes of given length to generate an RSA key")
+ }
+ }
+
+ primes := make([]*big.Int, nprimes)
+
+NextSetOfPrimes:
+ for {
+ todo := bits
+ // crypto/rand should set the top two bits in each prime.
+ // Thus each prime has the form
+ // p_i = 2^bitlen(p_i) × 0.11... (in base 2).
+ // And the product is:
+ // P = 2^todo × α
+ // where α is the product of nprimes numbers of the form 0.11...
+ //
+ // If α < 1/2 (which can happen for nprimes > 2), we need to
+ // shift todo to compensate for lost bits: the mean value of 0.11...
+ // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
+ // will give good results.
+ if nprimes >= 7 {
+ todo += (nprimes - 2) / 5
+ }
+ for i := 0; i < nprimes; i++ {
+ var err error
+ primes[i], err = rand.Prime(random, todo/(nprimes-i))
+ if err != nil {
+ return nil, err
+ }
+ todo -= primes[i].BitLen()
+ }
+
+ // Make sure that primes is pairwise unequal.
+ for i, prime := range primes {
+ for j := 0; j < i; j++ {
+ if prime.Cmp(primes[j]) == 0 {
+ continue NextSetOfPrimes
+ }
+ }
+ }
+
+ n := new(big.Int).Set(bigOne)
+ totient := new(big.Int).Set(bigOne)
+ pminus1 := new(big.Int)
+ for _, prime := range primes {
+ n.Mul(n, prime)
+ pminus1.Sub(prime, bigOne)
+ totient.Mul(totient, pminus1)
+ }
+ if n.BitLen() != bits {
+ // This should never happen for nprimes == 2 because
+ // crypto/rand should set the top two bits in each prime.
+ // For nprimes > 2 we hope it does not happen often.
+ continue NextSetOfPrimes
+ }
+
+ priv.D = new(big.Int)
+ e := big.NewInt(int64(priv.E))
+ ok := priv.D.ModInverse(e, totient)
+
+ if ok != nil {
+ priv.Primes = primes
+ priv.N = n
+ break
+ }
+ }
+
+ priv.Precompute()
+ return priv, nil
+}
+
+// incCounter increments a four byte, big-endian counter.
+func incCounter(c *[4]byte) {
+ if c[3]++; c[3] != 0 {
+ return
+ }
+ if c[2]++; c[2] != 0 {
+ return
+ }
+ if c[1]++; c[1] != 0 {
+ return
+ }
+ c[0]++
+}
+
+// mgf1XOR XORs the bytes in out with a mask generated using the MGF1 function
+// specified in PKCS #1 v2.1.
+func mgf1XOR(out []byte, hash hash.Hash, seed []byte) {
+ var counter [4]byte
+ var digest []byte
+
+ done := 0
+ for done < len(out) {
+ hash.Write(seed)
+ hash.Write(counter[0:4])
+ digest = hash.Sum(digest[:0])
+ hash.Reset()
+
+ for i := 0; i < len(digest) && done < len(out); i++ {
+ out[done] ^= digest[i]
+ done++
+ }
+ incCounter(&counter)
+ }
+}
+
+// ErrMessageTooLong is returned when attempting to encrypt a message which is
+// too large for the size of the public key.
+var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
+
+func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
+ boring.Unreachable()
+ e := big.NewInt(int64(pub.E))
+ c.Exp(m, e, pub.N)
+ return c
+}
+
+// EncryptOAEP encrypts the given message with RSA-OAEP.
+//
+// OAEP is parameterised by a hash function that is used as a random oracle.
+// Encryption and decryption of a given message must use the same hash function
+// and sha256.New() is a reasonable choice.
+//
+// The random parameter is used as a source of entropy to ensure that
+// encrypting the same message twice doesn't result in the same ciphertext.
+//
+// The label parameter may contain arbitrary data that will not be encrypted,
+// but which gives important context to the message. For example, if a given
+// public key is used to encrypt two types of messages then distinct label
+// values could be used to ensure that a ciphertext for one purpose cannot be
+// used for another by an attacker. If not required it can be empty.
+//
+// The message must be no longer than the length of the public modulus minus
+// twice the hash length, minus a further 2.
+func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error) {
+ if err := checkPub(pub); err != nil {
+ return nil, err
+ }
+ hash.Reset()
+ k := pub.Size()
+ if len(msg) > k-2*hash.Size()-2 {
+ return nil, ErrMessageTooLong
+ }
+
+ if boring.Enabled && random == boring.RandReader {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSAOAEP(hash, bkey, msg, label)
+ }
+ boring.UnreachableExceptTests()
+
+ hash.Write(label)
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+ em := make([]byte, k)
+ seed := em[1 : 1+hash.Size()]
+ db := em[1+hash.Size():]
+
+ copy(db[0:hash.Size()], lHash)
+ db[len(db)-len(msg)-1] = 1
+ copy(db[len(db)-len(msg):], msg)
+
+ _, err := io.ReadFull(random, seed)
+ if err != nil {
+ return nil, err
+ }
+
+ mgf1XOR(db, hash, seed)
+ mgf1XOR(seed, hash, db)
+
+ if boring.Enabled {
+ var bkey *boring.PublicKeyRSA
+ bkey, err = boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSANoPadding(bkey, em)
+ }
+
+ m := new(big.Int)
+ m.SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+
+ out := make([]byte, k)
+ return c.FillBytes(out), nil
+}
+
+// ErrDecryption represents a failure to decrypt a message.
+// It is deliberately vague to avoid adaptive attacks.
+var ErrDecryption = errors.New("crypto/rsa: decryption error")
+
+// ErrVerification represents a failure to verify a signature.
+// It is deliberately vague to avoid adaptive attacks.
+var ErrVerification = errors.New("crypto/rsa: verification error")
+
+// Precompute performs some calculations that speed up private key operations
+// in the future.
+func (priv *PrivateKey) Precompute() {
+ if priv.Precomputed.Dp != nil {
+ return
+ }
+
+ priv.Precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne)
+ priv.Precomputed.Dp.Mod(priv.D, priv.Precomputed.Dp)
+
+ priv.Precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne)
+ priv.Precomputed.Dq.Mod(priv.D, priv.Precomputed.Dq)
+
+ priv.Precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0])
+
+ r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1])
+ priv.Precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2)
+ for i := 2; i < len(priv.Primes); i++ {
+ prime := priv.Primes[i]
+ values := &priv.Precomputed.CRTValues[i-2]
+
+ values.Exp = new(big.Int).Sub(prime, bigOne)
+ values.Exp.Mod(priv.D, values.Exp)
+
+ values.R = new(big.Int).Set(r)
+ values.Coeff = new(big.Int).ModInverse(r, prime)
+
+ r.Mul(r, prime)
+ }
+}
+
+// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
+// random source is given, RSA blinding is used.
+func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+ if len(priv.Primes) <= 2 {
+ boring.Unreachable()
+ }
+ // TODO(agl): can we get away with reusing blinds?
+ if c.Cmp(priv.N) > 0 {
+ err = ErrDecryption
+ return
+ }
+ if priv.N.Sign() == 0 {
+ return nil, ErrDecryption
+ }
+
+ var ir *big.Int
+ if random != nil {
+ randutil.MaybeReadByte(random)
+
+ // Blinding enabled. Blinding involves multiplying c by r^e.
+ // Then the decryption operation performs (m^e * r^e)^d mod n
+ // which equals mr mod n. The factor of r can then be removed
+ // by multiplying by the multiplicative inverse of r.
+
+ var r *big.Int
+ ir = new(big.Int)
+ for {
+ r, err = rand.Int(random, priv.N)
+ if err != nil {
+ return
+ }
+ if r.Cmp(bigZero) == 0 {
+ r = bigOne
+ }
+ ok := ir.ModInverse(r, priv.N)
+ if ok != nil {
+ break
+ }
+ }
+ bigE := big.NewInt(int64(priv.E))
+ rpowe := new(big.Int).Exp(r, bigE, priv.N) // N != 0
+ cCopy := new(big.Int).Set(c)
+ cCopy.Mul(cCopy, rpowe)
+ cCopy.Mod(cCopy, priv.N)
+ c = cCopy
+ }
+
+ if priv.Precomputed.Dp == nil {
+ m = new(big.Int).Exp(c, priv.D, priv.N)
+ } else {
+ // We have the precalculated values needed for the CRT.
+ m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
+ m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
+ m.Sub(m, m2)
+ if m.Sign() < 0 {
+ m.Add(m, priv.Primes[0])
+ }
+ m.Mul(m, priv.Precomputed.Qinv)
+ m.Mod(m, priv.Primes[0])
+ m.Mul(m, priv.Primes[1])
+ m.Add(m, m2)
+
+ for i, values := range priv.Precomputed.CRTValues {
+ prime := priv.Primes[2+i]
+ m2.Exp(c, values.Exp, prime)
+ m2.Sub(m2, m)
+ m2.Mul(m2, values.Coeff)
+ m2.Mod(m2, prime)
+ if m2.Sign() < 0 {
+ m2.Add(m2, prime)
+ }
+ m2.Mul(m2, values.R)
+ m.Add(m, m2)
+ }
+ }
+
+ if ir != nil {
+ // Unblind.
+ m.Mul(m, ir)
+ m.Mod(m, priv.N)
+ }
+
+ return
+}
+
+func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+ m, err = decrypt(random, priv, c)
+ if err != nil {
+ return nil, err
+ }
+
+ // In order to defend against errors in the CRT computation, m^e is
+ // calculated, which should match the original ciphertext.
+ check := encrypt(new(big.Int), &priv.PublicKey, m)
+ if c.Cmp(check) != 0 {
+ return nil, errors.New("rsa: internal error")
+ }
+ return m, nil
+}
+
+// DecryptOAEP decrypts ciphertext using RSA-OAEP.
+//
+// OAEP is parameterised by a hash function that is used as a random oracle.
+// Encryption and decryption of a given message must use the same hash function
+// and sha256.New() is a reasonable choice.
+//
+// The random parameter, if not nil, is used to blind the private-key operation
+// and avoid timing side-channel attacks. Blinding is purely internal to this
+// function – the random data need not match that used when encrypting.
+//
+// The label parameter must match the value given when encrypting. See
+// EncryptOAEP for details.
+func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error) {
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return nil, err
+ }
+ k := priv.Size()
+ if len(ciphertext) > k ||
+ k < hash.Size()*2+2 {
+ return nil, ErrDecryption
+ }
+
+ if boring.Enabled {
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ out, err := boring.DecryptRSAOAEP(hash, bkey, ciphertext, label)
+ if err != nil {
+ return nil, ErrDecryption
+ }
+ return out, nil
+ }
+ c := new(big.Int).SetBytes(ciphertext)
+
+ m, err := decrypt(random, priv, c)
+ if err != nil {
+ return nil, err
+ }
+
+ hash.Write(label)
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+ // We probably leak the number of leading zeros.
+ // It's not clear that we can do anything about this.
+ em := m.FillBytes(make([]byte, k))
+
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+
+ seed := em[1 : hash.Size()+1]
+ db := em[hash.Size()+1:]
+
+ mgf1XOR(seed, hash, db)
+ mgf1XOR(db, hash, seed)
+
+ lHash2 := db[0:hash.Size()]
+
+ // We have to validate the plaintext in constant time in order to avoid
+ // attacks like: J. Manger. A Chosen Ciphertext Attack on RSA Optimal
+ // Asymmetric Encryption Padding (OAEP) as Standardized in PKCS #1
+ // v2.0. In J. Kilian, editor, Advances in Cryptology.
+ lHash2Good := subtle.ConstantTimeCompare(lHash, lHash2)
+
+ // The remainder of the plaintext must be zero or more 0x00, followed
+ // by 0x01, followed by the message.
+ // lookingForIndex: 1 iff we are still looking for the 0x01
+ // index: the offset of the first 0x01 byte
+ // invalid: 1 iff we saw a non-zero byte before the 0x01.
+ var lookingForIndex, index, invalid int
+ lookingForIndex = 1
+ rest := db[hash.Size():]
+
+ for i := 0; i < len(rest); i++ {
+ equals0 := subtle.ConstantTimeByteEq(rest[i], 0)
+ equals1 := subtle.ConstantTimeByteEq(rest[i], 1)
+ index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index)
+ lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex)
+ invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid)
+ }
+
+ if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 {
+ return nil, ErrDecryption
+ }
+
+ return rest[index+1:], nil
+}
diff --git a/contrib/go/_std_1.19/src/crypto/sha1/boring.go b/contrib/go/_std_1.19/src/crypto/sha1/boring.go
new file mode 100644
index 0000000000..b5786d1bf4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/sha1/boring.go
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extra indirection here so that when building go_bootstrap
+// cmd/internal/boring is not even imported, so that we don't
+// have to maintain changes to cmd/dist's deps graph.
+
+//go:build !cmd_go_bootstrap && cgo
+// +build !cmd_go_bootstrap,cgo
+
+package sha1
+
+import (
+ "crypto/internal/boring"
+ "hash"
+)
+
+const boringEnabled = boring.Enabled
+
+func boringNewSHA1() hash.Hash { return boring.NewSHA1() }
+
+func boringUnreachable() { boring.Unreachable() }
+
+func boringSHA1(p []byte) [20]byte { return boring.SHA1(p) }
diff --git a/contrib/go/_std_1.19/src/crypto/sha1/sha1.go b/contrib/go/_std_1.19/src/crypto/sha1/sha1.go
new file mode 100644
index 0000000000..271852d21b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/sha1/sha1.go
@@ -0,0 +1,274 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha1 implements the SHA-1 hash algorithm as defined in RFC 3174.
+//
+// SHA-1 is cryptographically broken and should not be used for secure
+// applications.
+package sha1
+
+import (
+ "crypto"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA1, New)
+}
+
+// The size of a SHA-1 checksum in bytes.
+const Size = 20
+
+// The blocksize of SHA-1 in bytes.
+const BlockSize = 64
+
+const (
+ chunk = 64
+ init0 = 0x67452301
+ init1 = 0xEFCDAB89
+ init2 = 0x98BADCFE
+ init3 = 0x10325476
+ init4 = 0xC3D2E1F0
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ h [5]uint32
+ x [chunk]byte
+ nx int
+ len uint64
+}
+
+const (
+ magic = "sha\x01"
+ marshaledSize = len(magic) + 5*4 + chunk + 8
+)
+
+func (d *digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint32(b, d.h[0])
+ b = appendUint32(b, d.h[1])
+ b = appendUint32(b, d.h[2])
+ b = appendUint32(b, d.h[3])
+ b = appendUint32(b, d.h[4])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, d.len)
+ return b, nil
+}
+
+func (d *digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("crypto/sha1: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("crypto/sha1: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, d.len = consumeUint64(b)
+ d.nx = int(d.len % chunk)
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.BigEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func appendUint32(b []byte, x uint32) []byte {
+ var a [4]byte
+ binary.BigEndian.PutUint32(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ _ = b[7]
+ x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ return b[8:], x
+}
+
+func consumeUint32(b []byte) ([]byte, uint32) {
+ _ = b[3]
+ x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ return b[4:], x
+}
+
+func (d *digest) Reset() {
+ d.h[0] = init0
+ d.h[1] = init1
+ d.h[2] = init2
+ d.h[3] = init3
+ d.h[4] = init4
+ d.nx = 0
+ d.len = 0
+}
+
+// New returns a new hash.Hash computing the SHA1 checksum. The Hash also
+// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
+// marshal and unmarshal the internal state of the hash.
+func New() hash.Hash {
+ if boringEnabled {
+ return boringNewSHA1()
+ }
+ d := new(digest)
+ d.Reset()
+ return d
+}
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ boringUnreachable()
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := copy(d.x[d.nx:], p)
+ d.nx += n
+ if d.nx == chunk {
+ block(d, d.x[:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ if len(p) >= chunk {
+ n := len(p) &^ (chunk - 1)
+ block(d, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d *digest) Sum(in []byte) []byte {
+ boringUnreachable()
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := *d
+ hash := d0.checkSum()
+ return append(in, hash[:]...)
+}
+
+func (d *digest) checkSum() [Size]byte {
+ len := d.len
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ var tmp [64]byte
+ tmp[0] = 0x80
+ if len%64 < 56 {
+ d.Write(tmp[0 : 56-len%64])
+ } else {
+ d.Write(tmp[0 : 64+56-len%64])
+ }
+
+ // Length in bits.
+ len <<= 3
+ binary.BigEndian.PutUint64(tmp[:], len)
+ d.Write(tmp[0:8])
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ var digest [Size]byte
+
+ binary.BigEndian.PutUint32(digest[0:], d.h[0])
+ binary.BigEndian.PutUint32(digest[4:], d.h[1])
+ binary.BigEndian.PutUint32(digest[8:], d.h[2])
+ binary.BigEndian.PutUint32(digest[12:], d.h[3])
+ binary.BigEndian.PutUint32(digest[16:], d.h[4])
+
+ return digest
+}
+
+// ConstantTimeSum computes the same result of Sum() but in constant time
+func (d *digest) ConstantTimeSum(in []byte) []byte {
+ d0 := *d
+ hash := d0.constSum()
+ return append(in, hash[:]...)
+}
+
+func (d *digest) constSum() [Size]byte {
+ var length [8]byte
+ l := d.len << 3
+ for i := uint(0); i < 8; i++ {
+ length[i] = byte(l >> (56 - 8*i))
+ }
+
+ nx := byte(d.nx)
+ t := nx - 56 // if nx < 56 then the MSB of t is one
+ mask1b := byte(int8(t) >> 7) // mask1b is 0xFF iff one block is enough
+
+ separator := byte(0x80) // gets reset to 0x00 once used
+ for i := byte(0); i < chunk; i++ {
+ mask := byte(int8(i-nx) >> 7) // 0x00 after the end of data
+
+ // if we reached the end of the data, replace with 0x80 or 0x00
+ d.x[i] = (^mask & separator) | (mask & d.x[i])
+
+ // zero the separator once used
+ separator &= mask
+
+ if i >= 56 {
+ // we might have to write the length here if all fit in one block
+ d.x[i] |= mask1b & length[i-56]
+ }
+ }
+
+ // compress, and only keep the digest if all fit in one block
+ block(d, d.x[:])
+
+ var digest [Size]byte
+ for i, s := range d.h {
+ digest[i*4] = mask1b & byte(s>>24)
+ digest[i*4+1] = mask1b & byte(s>>16)
+ digest[i*4+2] = mask1b & byte(s>>8)
+ digest[i*4+3] = mask1b & byte(s)
+ }
+
+ for i := byte(0); i < chunk; i++ {
+ // second block, it's always past the end of data, might start with 0x80
+ if i < 56 {
+ d.x[i] = separator
+ separator = 0
+ } else {
+ d.x[i] = length[i-56]
+ }
+ }
+
+ // compress, and only keep the digest if we actually needed the second block
+ block(d, d.x[:])
+
+ for i, s := range d.h {
+ digest[i*4] |= ^mask1b & byte(s>>24)
+ digest[i*4+1] |= ^mask1b & byte(s>>16)
+ digest[i*4+2] |= ^mask1b & byte(s>>8)
+ digest[i*4+3] |= ^mask1b & byte(s)
+ }
+
+ return digest
+}
+
+// Sum returns the SHA-1 checksum of the data.
+func Sum(data []byte) [Size]byte {
+ if boringEnabled {
+ return boringSHA1(data)
+ }
+ var d digest
+ d.Reset()
+ d.Write(data)
+ return d.checkSum()
+}
diff --git a/contrib/go/_std_1.18/src/crypto/sha1/sha1block.go b/contrib/go/_std_1.19/src/crypto/sha1/sha1block.go
index 321d34351c..321d34351c 100644
--- a/contrib/go/_std_1.18/src/crypto/sha1/sha1block.go
+++ b/contrib/go/_std_1.19/src/crypto/sha1/sha1block.go
diff --git a/contrib/go/_std_1.18/src/crypto/sha1/sha1block_amd64.go b/contrib/go/_std_1.19/src/crypto/sha1/sha1block_amd64.go
index 039813d7dc..039813d7dc 100644
--- a/contrib/go/_std_1.18/src/crypto/sha1/sha1block_amd64.go
+++ b/contrib/go/_std_1.19/src/crypto/sha1/sha1block_amd64.go
diff --git a/contrib/go/_std_1.18/src/crypto/sha1/sha1block_amd64.s b/contrib/go/_std_1.19/src/crypto/sha1/sha1block_amd64.s
index 42f03fb268..42f03fb268 100644
--- a/contrib/go/_std_1.18/src/crypto/sha1/sha1block_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/sha1/sha1block_amd64.s
diff --git a/contrib/go/_std_1.19/src/crypto/sha256/sha256.go b/contrib/go/_std_1.19/src/crypto/sha256/sha256.go
new file mode 100644
index 0000000000..e3c15e66ca
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/sha256/sha256.go
@@ -0,0 +1,285 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined
+// in FIPS 180-4.
+package sha256
+
+import (
+ "crypto"
+ "crypto/internal/boring"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA224, New224)
+ crypto.RegisterHash(crypto.SHA256, New)
+}
+
+// The size of a SHA256 checksum in bytes.
+const Size = 32
+
+// The size of a SHA224 checksum in bytes.
+const Size224 = 28
+
+// The blocksize of SHA256 and SHA224 in bytes.
+const BlockSize = 64
+
+const (
+ chunk = 64
+ init0 = 0x6A09E667
+ init1 = 0xBB67AE85
+ init2 = 0x3C6EF372
+ init3 = 0xA54FF53A
+ init4 = 0x510E527F
+ init5 = 0x9B05688C
+ init6 = 0x1F83D9AB
+ init7 = 0x5BE0CD19
+ init0_224 = 0xC1059ED8
+ init1_224 = 0x367CD507
+ init2_224 = 0x3070DD17
+ init3_224 = 0xF70E5939
+ init4_224 = 0xFFC00B31
+ init5_224 = 0x68581511
+ init6_224 = 0x64F98FA7
+ init7_224 = 0xBEFA4FA4
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ h [8]uint32
+ x [chunk]byte
+ nx int
+ len uint64
+ is224 bool // mark if this digest is SHA-224
+}
+
+const (
+ magic224 = "sha\x02"
+ magic256 = "sha\x03"
+ marshaledSize = len(magic256) + 8*4 + chunk + 8
+)
+
+func (d *digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ if d.is224 {
+ b = append(b, magic224...)
+ } else {
+ b = append(b, magic256...)
+ }
+ b = appendUint32(b, d.h[0])
+ b = appendUint32(b, d.h[1])
+ b = appendUint32(b, d.h[2])
+ b = appendUint32(b, d.h[3])
+ b = appendUint32(b, d.h[4])
+ b = appendUint32(b, d.h[5])
+ b = appendUint32(b, d.h[6])
+ b = appendUint32(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, d.len)
+ return b, nil
+}
+
+func (d *digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic224) || (d.is224 && string(b[:len(magic224)]) != magic224) || (!d.is224 && string(b[:len(magic256)]) != magic256) {
+ return errors.New("crypto/sha256: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("crypto/sha256: invalid hash state size")
+ }
+ b = b[len(magic224):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b, d.h[5] = consumeUint32(b)
+ b, d.h[6] = consumeUint32(b)
+ b, d.h[7] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, d.len = consumeUint64(b)
+ d.nx = int(d.len % chunk)
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.BigEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func appendUint32(b []byte, x uint32) []byte {
+ var a [4]byte
+ binary.BigEndian.PutUint32(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ _ = b[7]
+ x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ return b[8:], x
+}
+
+func consumeUint32(b []byte) ([]byte, uint32) {
+ _ = b[3]
+ x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ return b[4:], x
+}
+
+func (d *digest) Reset() {
+ if !d.is224 {
+ d.h[0] = init0
+ d.h[1] = init1
+ d.h[2] = init2
+ d.h[3] = init3
+ d.h[4] = init4
+ d.h[5] = init5
+ d.h[6] = init6
+ d.h[7] = init7
+ } else {
+ d.h[0] = init0_224
+ d.h[1] = init1_224
+ d.h[2] = init2_224
+ d.h[3] = init3_224
+ d.h[4] = init4_224
+ d.h[5] = init5_224
+ d.h[6] = init6_224
+ d.h[7] = init7_224
+ }
+ d.nx = 0
+ d.len = 0
+}
+
+// New returns a new hash.Hash computing the SHA256 checksum. The Hash
+// also implements encoding.BinaryMarshaler and
+// encoding.BinaryUnmarshaler to marshal and unmarshal the internal
+// state of the hash.
+func New() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA256()
+ }
+ d := new(digest)
+ d.Reset()
+ return d
+}
+
+// New224 returns a new hash.Hash computing the SHA224 checksum.
+func New224() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA224()
+ }
+ d := new(digest)
+ d.is224 = true
+ d.Reset()
+ return d
+}
+
+func (d *digest) Size() int {
+ if !d.is224 {
+ return Size
+ }
+ return Size224
+}
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ boring.Unreachable()
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := copy(d.x[d.nx:], p)
+ d.nx += n
+ if d.nx == chunk {
+ block(d, d.x[:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ if len(p) >= chunk {
+ n := len(p) &^ (chunk - 1)
+ block(d, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d *digest) Sum(in []byte) []byte {
+ boring.Unreachable()
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := *d
+ hash := d0.checkSum()
+ if d0.is224 {
+ return append(in, hash[:Size224]...)
+ }
+ return append(in, hash[:]...)
+}
+
+func (d *digest) checkSum() [Size]byte {
+ len := d.len
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ var tmp [64]byte
+ tmp[0] = 0x80
+ if len%64 < 56 {
+ d.Write(tmp[0 : 56-len%64])
+ } else {
+ d.Write(tmp[0 : 64+56-len%64])
+ }
+
+ // Length in bits.
+ len <<= 3
+ binary.BigEndian.PutUint64(tmp[:], len)
+ d.Write(tmp[0:8])
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ var digest [Size]byte
+
+ binary.BigEndian.PutUint32(digest[0:], d.h[0])
+ binary.BigEndian.PutUint32(digest[4:], d.h[1])
+ binary.BigEndian.PutUint32(digest[8:], d.h[2])
+ binary.BigEndian.PutUint32(digest[12:], d.h[3])
+ binary.BigEndian.PutUint32(digest[16:], d.h[4])
+ binary.BigEndian.PutUint32(digest[20:], d.h[5])
+ binary.BigEndian.PutUint32(digest[24:], d.h[6])
+ if !d.is224 {
+ binary.BigEndian.PutUint32(digest[28:], d.h[7])
+ }
+
+ return digest
+}
+
+// Sum256 returns the SHA256 checksum of the data.
+func Sum256(data []byte) [Size]byte {
+ if boring.Enabled {
+ return boring.SHA256(data)
+ }
+ var d digest
+ d.Reset()
+ d.Write(data)
+ return d.checkSum()
+}
+
+// Sum224 returns the SHA224 checksum of the data.
+func Sum224(data []byte) [Size224]byte {
+ if boring.Enabled {
+ return boring.SHA224(data)
+ }
+ var d digest
+ d.is224 = true
+ d.Reset()
+ d.Write(data)
+ sum := d.checkSum()
+ ap := (*[Size224]byte)(sum[:])
+ return *ap
+}
diff --git a/contrib/go/_std_1.18/src/crypto/sha256/sha256block.go b/contrib/go/_std_1.19/src/crypto/sha256/sha256block.go
index bd2f9da93c..bd2f9da93c 100644
--- a/contrib/go/_std_1.18/src/crypto/sha256/sha256block.go
+++ b/contrib/go/_std_1.19/src/crypto/sha256/sha256block.go
diff --git a/contrib/go/_std_1.18/src/crypto/sha256/sha256block_amd64.go b/contrib/go/_std_1.19/src/crypto/sha256/sha256block_amd64.go
index 27464e2c12..27464e2c12 100644
--- a/contrib/go/_std_1.18/src/crypto/sha256/sha256block_amd64.go
+++ b/contrib/go/_std_1.19/src/crypto/sha256/sha256block_amd64.go
diff --git a/contrib/go/_std_1.18/src/crypto/sha256/sha256block_amd64.s b/contrib/go/_std_1.19/src/crypto/sha256/sha256block_amd64.s
index f6af47c50e..f6af47c50e 100644
--- a/contrib/go/_std_1.18/src/crypto/sha256/sha256block_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/sha256/sha256block_amd64.s
diff --git a/contrib/go/_std_1.19/src/crypto/sha256/sha256block_decl.go b/contrib/go/_std_1.19/src/crypto/sha256/sha256block_decl.go
new file mode 100644
index 0000000000..18ba1c0ec1
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/sha256/sha256block_decl.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || s390x || ppc64le || ppc64
+
+package sha256
+
+//go:noescape
+
+func block(dig *digest, p []byte)
diff --git a/contrib/go/_std_1.19/src/crypto/sha512/sha512.go b/contrib/go/_std_1.19/src/crypto/sha512/sha512.go
new file mode 100644
index 0000000000..c800a294a2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/sha512/sha512.go
@@ -0,0 +1,386 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha512 implements the SHA-384, SHA-512, SHA-512/224, and SHA-512/256
+// hash algorithms as defined in FIPS 180-4.
+//
+// All the hash.Hash implementations returned by this package also
+// implement encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
+// marshal and unmarshal the internal state of the hash.
+package sha512
+
+import (
+ "crypto"
+ "crypto/internal/boring"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA384, New384)
+ crypto.RegisterHash(crypto.SHA512, New)
+ crypto.RegisterHash(crypto.SHA512_224, New512_224)
+ crypto.RegisterHash(crypto.SHA512_256, New512_256)
+}
+
+const (
+ // Size is the size, in bytes, of a SHA-512 checksum.
+ Size = 64
+
+ // Size224 is the size, in bytes, of a SHA-512/224 checksum.
+ Size224 = 28
+
+ // Size256 is the size, in bytes, of a SHA-512/256 checksum.
+ Size256 = 32
+
+ // Size384 is the size, in bytes, of a SHA-384 checksum.
+ Size384 = 48
+
+ // BlockSize is the block size, in bytes, of the SHA-512/224,
+ // SHA-512/256, SHA-384 and SHA-512 hash functions.
+ BlockSize = 128
+)
+
+const (
+ chunk = 128
+ init0 = 0x6a09e667f3bcc908
+ init1 = 0xbb67ae8584caa73b
+ init2 = 0x3c6ef372fe94f82b
+ init3 = 0xa54ff53a5f1d36f1
+ init4 = 0x510e527fade682d1
+ init5 = 0x9b05688c2b3e6c1f
+ init6 = 0x1f83d9abfb41bd6b
+ init7 = 0x5be0cd19137e2179
+ init0_224 = 0x8c3d37c819544da2
+ init1_224 = 0x73e1996689dcd4d6
+ init2_224 = 0x1dfab7ae32ff9c82
+ init3_224 = 0x679dd514582f9fcf
+ init4_224 = 0x0f6d2b697bd44da8
+ init5_224 = 0x77e36f7304c48942
+ init6_224 = 0x3f9d85a86a1d36c8
+ init7_224 = 0x1112e6ad91d692a1
+ init0_256 = 0x22312194fc2bf72c
+ init1_256 = 0x9f555fa3c84c64c2
+ init2_256 = 0x2393b86b6f53b151
+ init3_256 = 0x963877195940eabd
+ init4_256 = 0x96283ee2a88effe3
+ init5_256 = 0xbe5e1e2553863992
+ init6_256 = 0x2b0199fc2c85b8aa
+ init7_256 = 0x0eb72ddc81c52ca2
+ init0_384 = 0xcbbb9d5dc1059ed8
+ init1_384 = 0x629a292a367cd507
+ init2_384 = 0x9159015a3070dd17
+ init3_384 = 0x152fecd8f70e5939
+ init4_384 = 0x67332667ffc00b31
+ init5_384 = 0x8eb44a8768581511
+ init6_384 = 0xdb0c2e0d64f98fa7
+ init7_384 = 0x47b5481dbefa4fa4
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ h [8]uint64
+ x [chunk]byte
+ nx int
+ len uint64
+ function crypto.Hash
+}
+
+func (d *digest) Reset() {
+ switch d.function {
+ case crypto.SHA384:
+ d.h[0] = init0_384
+ d.h[1] = init1_384
+ d.h[2] = init2_384
+ d.h[3] = init3_384
+ d.h[4] = init4_384
+ d.h[5] = init5_384
+ d.h[6] = init6_384
+ d.h[7] = init7_384
+ case crypto.SHA512_224:
+ d.h[0] = init0_224
+ d.h[1] = init1_224
+ d.h[2] = init2_224
+ d.h[3] = init3_224
+ d.h[4] = init4_224
+ d.h[5] = init5_224
+ d.h[6] = init6_224
+ d.h[7] = init7_224
+ case crypto.SHA512_256:
+ d.h[0] = init0_256
+ d.h[1] = init1_256
+ d.h[2] = init2_256
+ d.h[3] = init3_256
+ d.h[4] = init4_256
+ d.h[5] = init5_256
+ d.h[6] = init6_256
+ d.h[7] = init7_256
+ default:
+ d.h[0] = init0
+ d.h[1] = init1
+ d.h[2] = init2
+ d.h[3] = init3
+ d.h[4] = init4
+ d.h[5] = init5
+ d.h[6] = init6
+ d.h[7] = init7
+ }
+ d.nx = 0
+ d.len = 0
+}
+
+const (
+ magic384 = "sha\x04"
+ magic512_224 = "sha\x05"
+ magic512_256 = "sha\x06"
+ magic512 = "sha\x07"
+ marshaledSize = len(magic512) + 8*8 + chunk + 8
+)
+
+func (d *digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ switch d.function {
+ case crypto.SHA384:
+ b = append(b, magic384...)
+ case crypto.SHA512_224:
+ b = append(b, magic512_224...)
+ case crypto.SHA512_256:
+ b = append(b, magic512_256...)
+ case crypto.SHA512:
+ b = append(b, magic512...)
+ default:
+ return nil, errors.New("crypto/sha512: invalid hash function")
+ }
+ b = appendUint64(b, d.h[0])
+ b = appendUint64(b, d.h[1])
+ b = appendUint64(b, d.h[2])
+ b = appendUint64(b, d.h[3])
+ b = appendUint64(b, d.h[4])
+ b = appendUint64(b, d.h[5])
+ b = appendUint64(b, d.h[6])
+ b = appendUint64(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, d.len)
+ return b, nil
+}
+
+func (d *digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic512) {
+ return errors.New("crypto/sha512: invalid hash state identifier")
+ }
+ switch {
+ case d.function == crypto.SHA384 && string(b[:len(magic384)]) == magic384:
+ case d.function == crypto.SHA512_224 && string(b[:len(magic512_224)]) == magic512_224:
+ case d.function == crypto.SHA512_256 && string(b[:len(magic512_256)]) == magic512_256:
+ case d.function == crypto.SHA512 && string(b[:len(magic512)]) == magic512:
+ default:
+ return errors.New("crypto/sha512: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("crypto/sha512: invalid hash state size")
+ }
+ b = b[len(magic512):]
+ b, d.h[0] = consumeUint64(b)
+ b, d.h[1] = consumeUint64(b)
+ b, d.h[2] = consumeUint64(b)
+ b, d.h[3] = consumeUint64(b)
+ b, d.h[4] = consumeUint64(b)
+ b, d.h[5] = consumeUint64(b)
+ b, d.h[6] = consumeUint64(b)
+ b, d.h[7] = consumeUint64(b)
+ b = b[copy(d.x[:], b):]
+ b, d.len = consumeUint64(b)
+ d.nx = int(d.len % chunk)
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.BigEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ _ = b[7]
+ x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ return b[8:], x
+}
+
+// New returns a new hash.Hash computing the SHA-512 checksum.
+func New() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA512()
+ }
+ d := &digest{function: crypto.SHA512}
+ d.Reset()
+ return d
+}
+
+// New512_224 returns a new hash.Hash computing the SHA-512/224 checksum.
+func New512_224() hash.Hash {
+ d := &digest{function: crypto.SHA512_224}
+ d.Reset()
+ return d
+}
+
+// New512_256 returns a new hash.Hash computing the SHA-512/256 checksum.
+func New512_256() hash.Hash {
+ d := &digest{function: crypto.SHA512_256}
+ d.Reset()
+ return d
+}
+
+// New384 returns a new hash.Hash computing the SHA-384 checksum.
+func New384() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA384()
+ }
+ d := &digest{function: crypto.SHA384}
+ d.Reset()
+ return d
+}
+
+func (d *digest) Size() int {
+ switch d.function {
+ case crypto.SHA512_224:
+ return Size224
+ case crypto.SHA512_256:
+ return Size256
+ case crypto.SHA384:
+ return Size384
+ default:
+ return Size
+ }
+}
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ if d.function != crypto.SHA512_224 && d.function != crypto.SHA512_256 {
+ boring.Unreachable()
+ }
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := copy(d.x[d.nx:], p)
+ d.nx += n
+ if d.nx == chunk {
+ block(d, d.x[:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ if len(p) >= chunk {
+ n := len(p) &^ (chunk - 1)
+ block(d, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d *digest) Sum(in []byte) []byte {
+ if d.function != crypto.SHA512_224 && d.function != crypto.SHA512_256 {
+ boring.Unreachable()
+ }
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := new(digest)
+ *d0 = *d
+ hash := d0.checkSum()
+ switch d0.function {
+ case crypto.SHA384:
+ return append(in, hash[:Size384]...)
+ case crypto.SHA512_224:
+ return append(in, hash[:Size224]...)
+ case crypto.SHA512_256:
+ return append(in, hash[:Size256]...)
+ default:
+ return append(in, hash[:]...)
+ }
+}
+
+func (d *digest) checkSum() [Size]byte {
+ // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
+ len := d.len
+ var tmp [128]byte
+ tmp[0] = 0x80
+ if len%128 < 112 {
+ d.Write(tmp[0 : 112-len%128])
+ } else {
+ d.Write(tmp[0 : 128+112-len%128])
+ }
+
+ // Length in bits.
+ len <<= 3
+ binary.BigEndian.PutUint64(tmp[0:], 0) // upper 64 bits are always zero, because len variable has type uint64
+ binary.BigEndian.PutUint64(tmp[8:], len)
+ d.Write(tmp[0:16])
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ var digest [Size]byte
+ binary.BigEndian.PutUint64(digest[0:], d.h[0])
+ binary.BigEndian.PutUint64(digest[8:], d.h[1])
+ binary.BigEndian.PutUint64(digest[16:], d.h[2])
+ binary.BigEndian.PutUint64(digest[24:], d.h[3])
+ binary.BigEndian.PutUint64(digest[32:], d.h[4])
+ binary.BigEndian.PutUint64(digest[40:], d.h[5])
+ if d.function != crypto.SHA384 {
+ binary.BigEndian.PutUint64(digest[48:], d.h[6])
+ binary.BigEndian.PutUint64(digest[56:], d.h[7])
+ }
+
+ return digest
+}
+
+// Sum512 returns the SHA512 checksum of the data.
+func Sum512(data []byte) [Size]byte {
+ if boring.Enabled {
+ return boring.SHA512(data)
+ }
+ d := digest{function: crypto.SHA512}
+ d.Reset()
+ d.Write(data)
+ return d.checkSum()
+}
+
+// Sum384 returns the SHA384 checksum of the data.
+func Sum384(data []byte) [Size384]byte {
+ if boring.Enabled {
+ return boring.SHA384(data)
+ }
+ d := digest{function: crypto.SHA384}
+ d.Reset()
+ d.Write(data)
+ sum := d.checkSum()
+ ap := (*[Size384]byte)(sum[:])
+ return *ap
+}
+
+// Sum512_224 returns the Sum512/224 checksum of the data.
+func Sum512_224(data []byte) [Size224]byte {
+ d := digest{function: crypto.SHA512_224}
+ d.Reset()
+ d.Write(data)
+ sum := d.checkSum()
+ ap := (*[Size224]byte)(sum[:])
+ return *ap
+}
+
+// Sum512_256 returns the Sum512/256 checksum of the data.
+func Sum512_256(data []byte) [Size256]byte {
+ d := digest{function: crypto.SHA512_256}
+ d.Reset()
+ d.Write(data)
+ sum := d.checkSum()
+ ap := (*[Size256]byte)(sum[:])
+ return *ap
+}
diff --git a/contrib/go/_std_1.18/src/crypto/sha512/sha512block.go b/contrib/go/_std_1.19/src/crypto/sha512/sha512block.go
index 81569c5f84..81569c5f84 100644
--- a/contrib/go/_std_1.18/src/crypto/sha512/sha512block.go
+++ b/contrib/go/_std_1.19/src/crypto/sha512/sha512block.go
diff --git a/contrib/go/_std_1.18/src/crypto/sha512/sha512block_amd64.go b/contrib/go/_std_1.19/src/crypto/sha512/sha512block_amd64.go
index 8da3e1473f..8da3e1473f 100644
--- a/contrib/go/_std_1.18/src/crypto/sha512/sha512block_amd64.go
+++ b/contrib/go/_std_1.19/src/crypto/sha512/sha512block_amd64.go
diff --git a/contrib/go/_std_1.18/src/crypto/sha512/sha512block_amd64.s b/contrib/go/_std_1.19/src/crypto/sha512/sha512block_amd64.s
index 0fa0df2f60..0fa0df2f60 100644
--- a/contrib/go/_std_1.18/src/crypto/sha512/sha512block_amd64.s
+++ b/contrib/go/_std_1.19/src/crypto/sha512/sha512block_amd64.s
diff --git a/contrib/go/_std_1.19/src/crypto/subtle/constant_time.go b/contrib/go/_std_1.19/src/crypto/subtle/constant_time.go
new file mode 100644
index 0000000000..4e0527f9d5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/subtle/constant_time.go
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package subtle implements functions that are often useful in cryptographic
+// code but require careful thought to use correctly.
+package subtle
+
+// ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents
+// and 0 otherwise. The time taken is a function of the length of the slices and
+// is independent of the contents. If the lengths of x and y do not match it
+// returns 0 immediately.
+func ConstantTimeCompare(x, y []byte) int {
+ if len(x) != len(y) {
+ return 0
+ }
+
+ var v byte
+
+ for i := 0; i < len(x); i++ {
+ v |= x[i] ^ y[i]
+ }
+
+ return ConstantTimeByteEq(v, 0)
+}
+
+// ConstantTimeSelect returns x if v == 1 and y if v == 0.
+// Its behavior is undefined if v takes any other value.
+func ConstantTimeSelect(v, x, y int) int { return ^(v-1)&x | (v-1)&y }
+
+// ConstantTimeByteEq returns 1 if x == y and 0 otherwise.
+func ConstantTimeByteEq(x, y uint8) int {
+ return int((uint32(x^y) - 1) >> 31)
+}
+
+// ConstantTimeEq returns 1 if x == y and 0 otherwise.
+func ConstantTimeEq(x, y int32) int {
+ return int((uint64(uint32(x^y)) - 1) >> 63)
+}
+
+// ConstantTimeCopy copies the contents of y into x (a slice of equal length)
+// if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v
+// takes any other value.
+func ConstantTimeCopy(v int, x, y []byte) {
+ if len(x) != len(y) {
+ panic("subtle: slices have different lengths")
+ }
+
+ xmask := byte(v - 1)
+ ymask := byte(^(v - 1))
+ for i := 0; i < len(x); i++ {
+ x[i] = x[i]&xmask | y[i]&ymask
+ }
+}
+
+// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise.
+// Its behavior is undefined if x or y are negative or > 2**31 - 1.
+func ConstantTimeLessOrEq(x, y int) int {
+ x32 := int32(x)
+ y32 := int32(y)
+ return int(((x32 - y32 - 1) >> 31) & 1)
+}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/alert.go b/contrib/go/_std_1.19/src/crypto/tls/alert.go
index 4790b73724..4790b73724 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/alert.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/alert.go
diff --git a/contrib/go/_std_1.19/src/crypto/tls/auth.go b/contrib/go/_std_1.19/src/crypto/tls/auth.go
new file mode 100644
index 0000000000..7c5675c6d9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/auth.go
@@ -0,0 +1,293 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+)
+
+// verifyHandshakeSignature verifies a signature against pre-hashed
+// (if required) handshake contents.
+func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, signed, sig []byte) error {
+ switch sigType {
+ case signatureECDSA:
+ pubKey, ok := pubkey.(*ecdsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("expected an ECDSA public key, got %T", pubkey)
+ }
+ if !ecdsa.VerifyASN1(pubKey, signed, sig) {
+ return errors.New("ECDSA verification failure")
+ }
+ case signatureEd25519:
+ pubKey, ok := pubkey.(ed25519.PublicKey)
+ if !ok {
+ return fmt.Errorf("expected an Ed25519 public key, got %T", pubkey)
+ }
+ if !ed25519.Verify(pubKey, signed, sig) {
+ return errors.New("Ed25519 verification failure")
+ }
+ case signaturePKCS1v15:
+ pubKey, ok := pubkey.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("expected an RSA public key, got %T", pubkey)
+ }
+ if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, signed, sig); err != nil {
+ return err
+ }
+ case signatureRSAPSS:
+ pubKey, ok := pubkey.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("expected an RSA public key, got %T", pubkey)
+ }
+ signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}
+ if err := rsa.VerifyPSS(pubKey, hashFunc, signed, sig, signOpts); err != nil {
+ return err
+ }
+ default:
+ return errors.New("internal error: unknown signature type")
+ }
+ return nil
+}
+
+const (
+ serverSignatureContext = "TLS 1.3, server CertificateVerify\x00"
+ clientSignatureContext = "TLS 1.3, client CertificateVerify\x00"
+)
+
+var signaturePadding = []byte{
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+}
+
+// signedMessage returns the pre-hashed (if necessary) message to be signed by
+// certificate keys in TLS 1.3. See RFC 8446, Section 4.4.3.
+func signedMessage(sigHash crypto.Hash, context string, transcript hash.Hash) []byte {
+ if sigHash == directSigning {
+ b := &bytes.Buffer{}
+ b.Write(signaturePadding)
+ io.WriteString(b, context)
+ b.Write(transcript.Sum(nil))
+ return b.Bytes()
+ }
+ h := sigHash.New()
+ h.Write(signaturePadding)
+ io.WriteString(h, context)
+ h.Write(transcript.Sum(nil))
+ return h.Sum(nil)
+}
+
+// typeAndHashFromSignatureScheme returns the corresponding signature type and
+// crypto.Hash for a given TLS SignatureScheme.
+func typeAndHashFromSignatureScheme(signatureAlgorithm SignatureScheme) (sigType uint8, hash crypto.Hash, err error) {
+ switch signatureAlgorithm {
+ case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512:
+ sigType = signaturePKCS1v15
+ case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512:
+ sigType = signatureRSAPSS
+ case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512:
+ sigType = signatureECDSA
+ case Ed25519:
+ sigType = signatureEd25519
+ default:
+ return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
+ }
+ switch signatureAlgorithm {
+ case PKCS1WithSHA1, ECDSAWithSHA1:
+ hash = crypto.SHA1
+ case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256:
+ hash = crypto.SHA256
+ case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384:
+ hash = crypto.SHA384
+ case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512:
+ hash = crypto.SHA512
+ case Ed25519:
+ hash = directSigning
+ default:
+ return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
+ }
+ return sigType, hash, nil
+}
+
+// legacyTypeAndHashFromPublicKey returns the fixed signature type and crypto.Hash for
+// a given public key used with TLS 1.0 and 1.1, before the introduction of
+// signature algorithm negotiation.
+func legacyTypeAndHashFromPublicKey(pub crypto.PublicKey) (sigType uint8, hash crypto.Hash, err error) {
+ switch pub.(type) {
+ case *rsa.PublicKey:
+ return signaturePKCS1v15, crypto.MD5SHA1, nil
+ case *ecdsa.PublicKey:
+ return signatureECDSA, crypto.SHA1, nil
+ case ed25519.PublicKey:
+ // RFC 8422 specifies support for Ed25519 in TLS 1.0 and 1.1,
+ // but it requires holding on to a handshake transcript to do a
+ // full signature, and not even OpenSSL bothers with the
+ // complexity, so we can't even test it properly.
+ return 0, 0, fmt.Errorf("tls: Ed25519 public keys are not supported before TLS 1.2")
+ default:
+ return 0, 0, fmt.Errorf("tls: unsupported public key: %T", pub)
+ }
+}
+
+var rsaSignatureSchemes = []struct {
+ scheme SignatureScheme
+ minModulusBytes int
+ maxVersion uint16
+}{
+ // RSA-PSS is used with PSSSaltLengthEqualsHash, and requires
+ // emLen >= hLen + sLen + 2
+ {PSSWithSHA256, crypto.SHA256.Size()*2 + 2, VersionTLS13},
+ {PSSWithSHA384, crypto.SHA384.Size()*2 + 2, VersionTLS13},
+ {PSSWithSHA512, crypto.SHA512.Size()*2 + 2, VersionTLS13},
+ // PKCS #1 v1.5 uses prefixes from hashPrefixes in crypto/rsa, and requires
+ // emLen >= len(prefix) + hLen + 11
+ // TLS 1.3 dropped support for PKCS #1 v1.5 in favor of RSA-PSS.
+ {PKCS1WithSHA256, 19 + crypto.SHA256.Size() + 11, VersionTLS12},
+ {PKCS1WithSHA384, 19 + crypto.SHA384.Size() + 11, VersionTLS12},
+ {PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11, VersionTLS12},
+ {PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11, VersionTLS12},
+}
+
+// signatureSchemesForCertificate returns the list of supported SignatureSchemes
+// for a given certificate, based on the public key and the protocol version,
+// and optionally filtered by its explicit SupportedSignatureAlgorithms.
+//
+// This function must be kept in sync with supportedSignatureAlgorithms.
+// FIPS filtering is applied in the caller, selectSignatureScheme.
+func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme {
+ priv, ok := cert.PrivateKey.(crypto.Signer)
+ if !ok {
+ return nil
+ }
+
+ var sigAlgs []SignatureScheme
+ switch pub := priv.Public().(type) {
+ case *ecdsa.PublicKey:
+ if version != VersionTLS13 {
+ // In TLS 1.2 and earlier, ECDSA algorithms are not
+ // constrained to a single curve.
+ sigAlgs = []SignatureScheme{
+ ECDSAWithP256AndSHA256,
+ ECDSAWithP384AndSHA384,
+ ECDSAWithP521AndSHA512,
+ ECDSAWithSHA1,
+ }
+ break
+ }
+ switch pub.Curve {
+ case elliptic.P256():
+ sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256}
+ case elliptic.P384():
+ sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384}
+ case elliptic.P521():
+ sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512}
+ default:
+ return nil
+ }
+ case *rsa.PublicKey:
+ size := pub.Size()
+ sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes))
+ for _, candidate := range rsaSignatureSchemes {
+ if size >= candidate.minModulusBytes && version <= candidate.maxVersion {
+ sigAlgs = append(sigAlgs, candidate.scheme)
+ }
+ }
+ case ed25519.PublicKey:
+ sigAlgs = []SignatureScheme{Ed25519}
+ default:
+ return nil
+ }
+
+ if cert.SupportedSignatureAlgorithms != nil {
+ var filteredSigAlgs []SignatureScheme
+ for _, sigAlg := range sigAlgs {
+ if isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) {
+ filteredSigAlgs = append(filteredSigAlgs, sigAlg)
+ }
+ }
+ return filteredSigAlgs
+ }
+ return sigAlgs
+}
+
+// selectSignatureScheme picks a SignatureScheme from the peer's preference list
+// that works with the selected certificate. It's only called for protocol
+// versions that support signature algorithms, so TLS 1.2 and 1.3.
+func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) {
+ supportedAlgs := signatureSchemesForCertificate(vers, c)
+ if len(supportedAlgs) == 0 {
+ return 0, unsupportedCertificateError(c)
+ }
+ if len(peerAlgs) == 0 && vers == VersionTLS12 {
+ // For TLS 1.2, if the client didn't send signature_algorithms then we
+ // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1.
+ peerAlgs = []SignatureScheme{PKCS1WithSHA1, ECDSAWithSHA1}
+ }
+ // Pick signature scheme in the peer's preference order, as our
+ // preference order is not configurable.
+ for _, preferredAlg := range peerAlgs {
+ if needFIPS() && !isSupportedSignatureAlgorithm(preferredAlg, fipsSupportedSignatureAlgorithms) {
+ continue
+ }
+ if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) {
+ return preferredAlg, nil
+ }
+ }
+ return 0, errors.New("tls: peer doesn't support any of the certificate's signature algorithms")
+}
+
+// unsupportedCertificateError returns a helpful error for certificates with
+// an unsupported private key.
+func unsupportedCertificateError(cert *Certificate) error {
+ switch cert.PrivateKey.(type) {
+ case rsa.PrivateKey, ecdsa.PrivateKey:
+ return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T",
+ cert.PrivateKey, cert.PrivateKey)
+ case *ed25519.PrivateKey:
+ return fmt.Errorf("tls: unsupported certificate: private key is *ed25519.PrivateKey, expected ed25519.PrivateKey")
+ }
+
+ signer, ok := cert.PrivateKey.(crypto.Signer)
+ if !ok {
+ return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer",
+ cert.PrivateKey)
+ }
+
+ switch pub := signer.Public().(type) {
+ case *ecdsa.PublicKey:
+ switch pub.Curve {
+ case elliptic.P256():
+ case elliptic.P384():
+ case elliptic.P521():
+ default:
+ return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name)
+ }
+ case *rsa.PublicKey:
+ return fmt.Errorf("tls: certificate RSA key size too small for supported signature algorithms")
+ case ed25519.PublicKey:
+ default:
+ return fmt.Errorf("tls: unsupported certificate key (%T)", pub)
+ }
+
+ if cert.SupportedSignatureAlgorithms != nil {
+ return fmt.Errorf("tls: peer doesn't support the certificate custom signature algorithms")
+ }
+
+ return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey)
+}
diff --git a/contrib/go/_std_1.19/src/crypto/tls/cipher_suites.go b/contrib/go/_std_1.19/src/crypto/tls/cipher_suites.go
new file mode 100644
index 0000000000..9a1fa3104b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/cipher_suites.go
@@ -0,0 +1,702 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/hmac"
+ "crypto/internal/boring"
+ "crypto/rc4"
+ "crypto/sha1"
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "internal/cpu"
+ "runtime"
+
+ "golang.org/x/crypto/chacha20poly1305"
+)
+
+// CipherSuite is a TLS cipher suite. Note that most functions in this package
+// accept and expose cipher suite IDs instead of this type.
+type CipherSuite struct {
+ ID uint16
+ Name string
+
+ // Supported versions is the list of TLS protocol versions that can
+ // negotiate this cipher suite.
+ SupportedVersions []uint16
+
+ // Insecure is true if the cipher suite has known security issues
+ // due to its primitives, design, or implementation.
+ Insecure bool
+}
+
+var (
+ supportedUpToTLS12 = []uint16{VersionTLS10, VersionTLS11, VersionTLS12}
+ supportedOnlyTLS12 = []uint16{VersionTLS12}
+ supportedOnlyTLS13 = []uint16{VersionTLS13}
+)
+
+// CipherSuites returns a list of cipher suites currently implemented by this
+// package, excluding those with security issues, which are returned by
+// InsecureCipherSuites.
+//
+// The list is sorted by ID. Note that the default cipher suites selected by
+// this package might depend on logic that can't be captured by a static list,
+// and might not match those returned by this function.
+func CipherSuites() []*CipherSuite {
+ return []*CipherSuite{
+ {TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
+ {TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
+ {TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
+ {TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
+
+ {TLS_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", supportedOnlyTLS13, false},
+ {TLS_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", supportedOnlyTLS13, false},
+ {TLS_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", supportedOnlyTLS13, false},
+
+ {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
+ {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
+ {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
+ {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
+ {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
+ {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
+ {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
+ {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
+ {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
+ {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
+ }
+}
+
+// InsecureCipherSuites returns a list of cipher suites currently implemented by
+// this package and which have security issues.
+//
+// Most applications should not use the cipher suites in this list, and should
+// only use those returned by CipherSuites.
+func InsecureCipherSuites() []*CipherSuite {
+ // This list includes RC4, CBC_SHA256, and 3DES cipher suites. See
+ // cipherSuitesPreferenceOrder for details.
+ return []*CipherSuite{
+ {TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
+ {TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
+ {TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
+ {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
+ {TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
+ {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
+ {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
+ {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
+ }
+}
+
+// CipherSuiteName returns the standard name for the passed cipher suite ID
+// (e.g. "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"), or a fallback representation
+// of the ID value if the cipher suite is not implemented by this package.
+func CipherSuiteName(id uint16) string {
+ for _, c := range CipherSuites() {
+ if c.ID == id {
+ return c.Name
+ }
+ }
+ for _, c := range InsecureCipherSuites() {
+ if c.ID == id {
+ return c.Name
+ }
+ }
+ return fmt.Sprintf("0x%04X", id)
+}
+
+const (
+ // suiteECDHE indicates that the cipher suite involves elliptic curve
+ // Diffie-Hellman. This means that it should only be selected when the
+ // client indicates that it supports ECC with a curve and point format
+ // that we're happy with.
+ suiteECDHE = 1 << iota
+ // suiteECSign indicates that the cipher suite involves an ECDSA or
+ // EdDSA signature and therefore may only be selected when the server's
+ // certificate is ECDSA or EdDSA. If this is not set then the cipher suite
+ // is RSA based.
+ suiteECSign
+ // suiteTLS12 indicates that the cipher suite should only be advertised
+ // and accepted when using TLS 1.2.
+ suiteTLS12
+ // suiteSHA384 indicates that the cipher suite uses SHA384 as the
+ // handshake hash.
+ suiteSHA384
+)
+
+// A cipherSuite is a TLS 1.0–1.2 cipher suite, and defines the key exchange
+// mechanism, as well as the cipher+MAC pair or the AEAD.
+type cipherSuite struct {
+ id uint16
+ // the lengths, in bytes, of the key material needed for each component.
+ keyLen int
+ macLen int
+ ivLen int
+ ka func(version uint16) keyAgreement
+ // flags is a bitmask of the suite* values, above.
+ flags int
+ cipher func(key, iv []byte, isRead bool) any
+ mac func(key []byte) hash.Hash
+ aead func(key, fixedNonce []byte) aead
+}
+
+var cipherSuites = []*cipherSuite{ // TODO: replace with a map, since the order doesn't matter.
+ {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
+ {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
+ {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
+ {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadAESGCM},
+ {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
+ {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
+ {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12, cipherAES, macSHA256, nil},
+ {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
+ {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, cipherAES, macSHA256, nil},
+ {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
+ {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
+ {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
+ {TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM},
+ {TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
+ {TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12, cipherAES, macSHA256, nil},
+ {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
+ {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
+ {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
+ {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
+ {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil},
+ {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil},
+ {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherRC4, macSHA1, nil},
+}
+
+// selectCipherSuite returns the first TLS 1.0–1.2 cipher suite from ids which
+// is also in supportedIDs and passes the ok filter.
+func selectCipherSuite(ids, supportedIDs []uint16, ok func(*cipherSuite) bool) *cipherSuite {
+ for _, id := range ids {
+ candidate := cipherSuiteByID(id)
+ if candidate == nil || !ok(candidate) {
+ continue
+ }
+
+ for _, suppID := range supportedIDs {
+ if id == suppID {
+ return candidate
+ }
+ }
+ }
+ return nil
+}
+
+// A cipherSuiteTLS13 defines only the pair of the AEAD algorithm and hash
+// algorithm to be used with HKDF. See RFC 8446, Appendix B.4.
+type cipherSuiteTLS13 struct {
+ id uint16
+ keyLen int
+ aead func(key, fixedNonce []byte) aead
+ hash crypto.Hash
+}
+
+var cipherSuitesTLS13 = []*cipherSuiteTLS13{ // TODO: replace with a map.
+ {TLS_AES_128_GCM_SHA256, 16, aeadAESGCMTLS13, crypto.SHA256},
+ {TLS_CHACHA20_POLY1305_SHA256, 32, aeadChaCha20Poly1305, crypto.SHA256},
+ {TLS_AES_256_GCM_SHA384, 32, aeadAESGCMTLS13, crypto.SHA384},
+}
+
+// cipherSuitesPreferenceOrder is the order in which we'll select (on the
+// server) or advertise (on the client) TLS 1.0–1.2 cipher suites.
+//
+// Cipher suites are filtered but not reordered based on the application and
+// peer's preferences, meaning we'll never select a suite lower in this list if
+// any higher one is available. This makes it more defensible to keep weaker
+// cipher suites enabled, especially on the server side where we get the last
+// word, since there are no known downgrade attacks on cipher suites selection.
+//
+// The list is sorted by applying the following priority rules, stopping at the
+// first (most important) applicable one:
+//
+// - Anything else comes before RC4
+//
+// RC4 has practically exploitable biases. See https://www.rc4nomore.com.
+//
+// - Anything else comes before CBC_SHA256
+//
+// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13
+// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and
+// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
+//
+// - Anything else comes before 3DES
+//
+// 3DES has 64-bit blocks, which makes it fundamentally susceptible to
+// birthday attacks. See https://sweet32.info.
+//
+// - ECDHE comes before anything else
+//
+// Once we got the broken stuff out of the way, the most important
+// property a cipher suite can have is forward secrecy. We don't
+// implement FFDHE, so that means ECDHE.
+//
+// - AEADs come before CBC ciphers
+//
+// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites
+// are fundamentally fragile, and suffered from an endless sequence of
+// padding oracle attacks. See https://eprint.iacr.org/2015/1129,
+// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and
+// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/.
+//
+// - AES comes before ChaCha20
+//
+// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster
+// than ChaCha20Poly1305.
+//
+// When AES hardware is not available, AES-128-GCM is one or more of: much
+// slower, way more complex, and less safe (because not constant time)
+// than ChaCha20Poly1305.
+//
+// We use this list if we think both peers have AES hardware, and
+// cipherSuitesPreferenceOrderNoAES otherwise.
+//
+// - AES-128 comes before AES-256
+//
+// The only potential advantages of AES-256 are better multi-target
+// margins, and hypothetical post-quantum properties. Neither apply to
+// TLS, and AES-256 is slower due to its four extra rounds (which don't
+// contribute to the advantages above).
+//
+// - ECDSA comes before RSA
+//
+// The relative order of ECDSA and RSA cipher suites doesn't matter,
+// as they depend on the certificate. Pick one to get a stable order.
+var cipherSuitesPreferenceOrder = []uint16{
+ // AEADs w/ ECDHE
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+
+ // CBC w/ ECDHE
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+
+ // AEADs w/o ECDHE
+ TLS_RSA_WITH_AES_128_GCM_SHA256,
+ TLS_RSA_WITH_AES_256_GCM_SHA384,
+
+ // CBC w/o ECDHE
+ TLS_RSA_WITH_AES_128_CBC_SHA,
+ TLS_RSA_WITH_AES_256_CBC_SHA,
+
+ // 3DES
+ TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+
+ // CBC_SHA256
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ TLS_RSA_WITH_AES_128_CBC_SHA256,
+
+ // RC4
+ TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ TLS_RSA_WITH_RC4_128_SHA,
+}
+
+var cipherSuitesPreferenceOrderNoAES = []uint16{
+ // ChaCha20Poly1305
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+
+ // AES-GCM w/ ECDHE
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+
+ // The rest of cipherSuitesPreferenceOrder.
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ TLS_RSA_WITH_AES_128_GCM_SHA256,
+ TLS_RSA_WITH_AES_256_GCM_SHA384,
+ TLS_RSA_WITH_AES_128_CBC_SHA,
+ TLS_RSA_WITH_AES_256_CBC_SHA,
+ TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ TLS_RSA_WITH_AES_128_CBC_SHA256,
+ TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ TLS_RSA_WITH_RC4_128_SHA,
+}
+
+// disabledCipherSuites are not used unless explicitly listed in
+// Config.CipherSuites. They MUST be at the end of cipherSuitesPreferenceOrder.
+var disabledCipherSuites = []uint16{
+ // CBC_SHA256
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ TLS_RSA_WITH_AES_128_CBC_SHA256,
+
+ // RC4
+ TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ TLS_RSA_WITH_RC4_128_SHA,
+}
+
+var (
+ defaultCipherSuitesLen = len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites)
+ defaultCipherSuites = cipherSuitesPreferenceOrder[:defaultCipherSuitesLen]
+)
+
+// defaultCipherSuitesTLS13 is also the preference order, since there are no
+// disabled by default TLS 1.3 cipher suites. The same AES vs ChaCha20 logic as
+// cipherSuitesPreferenceOrder applies.
+var defaultCipherSuitesTLS13 = []uint16{
+ TLS_AES_128_GCM_SHA256,
+ TLS_AES_256_GCM_SHA384,
+ TLS_CHACHA20_POLY1305_SHA256,
+}
+
+var defaultCipherSuitesTLS13NoAES = []uint16{
+ TLS_CHACHA20_POLY1305_SHA256,
+ TLS_AES_128_GCM_SHA256,
+ TLS_AES_256_GCM_SHA384,
+}
+
+var (
+ hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
+ hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
+ // Keep in sync with crypto/aes/cipher_s390x.go.
+ hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR &&
+ (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
+
+ hasAESGCMHardwareSupport = runtime.GOARCH == "amd64" && hasGCMAsmAMD64 ||
+ runtime.GOARCH == "arm64" && hasGCMAsmARM64 ||
+ runtime.GOARCH == "s390x" && hasGCMAsmS390X
+)
+
+var aesgcmCiphers = map[uint16]bool{
+ // TLS 1.2
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: true,
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: true,
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: true,
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: true,
+ // TLS 1.3
+ TLS_AES_128_GCM_SHA256: true,
+ TLS_AES_256_GCM_SHA384: true,
+}
+
+var nonAESGCMAEADCiphers = map[uint16]bool{
+ // TLS 1.2
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: true,
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: true,
+ // TLS 1.3
+ TLS_CHACHA20_POLY1305_SHA256: true,
+}
+
+// aesgcmPreferred returns whether the first known cipher in the preference list
+// is an AES-GCM cipher, implying the peer has hardware support for it.
+func aesgcmPreferred(ciphers []uint16) bool {
+ for _, cID := range ciphers {
+ if c := cipherSuiteByID(cID); c != nil {
+ return aesgcmCiphers[cID]
+ }
+ if c := cipherSuiteTLS13ByID(cID); c != nil {
+ return aesgcmCiphers[cID]
+ }
+ }
+ return false
+}
+
+func cipherRC4(key, iv []byte, isRead bool) any {
+ cipher, _ := rc4.NewCipher(key)
+ return cipher
+}
+
+func cipher3DES(key, iv []byte, isRead bool) any {
+ block, _ := des.NewTripleDESCipher(key)
+ if isRead {
+ return cipher.NewCBCDecrypter(block, iv)
+ }
+ return cipher.NewCBCEncrypter(block, iv)
+}
+
+func cipherAES(key, iv []byte, isRead bool) any {
+ block, _ := aes.NewCipher(key)
+ if isRead {
+ return cipher.NewCBCDecrypter(block, iv)
+ }
+ return cipher.NewCBCEncrypter(block, iv)
+}
+
+// macSHA1 returns a SHA-1 based constant time MAC.
+func macSHA1(key []byte) hash.Hash {
+ h := sha1.New
+ // The BoringCrypto SHA1 does not have a constant-time
+ // checksum function, so don't try to use it.
+ if !boring.Enabled {
+ h = newConstantTimeHash(h)
+ }
+ return hmac.New(h, key)
+}
+
+// macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and
+// is currently only used in disabled-by-default cipher suites.
+func macSHA256(key []byte) hash.Hash {
+ return hmac.New(sha256.New, key)
+}
+
+type aead interface {
+ cipher.AEAD
+
+ // explicitNonceLen returns the number of bytes of explicit nonce
+ // included in each record. This is eight for older AEADs and
+ // zero for modern ones.
+ explicitNonceLen() int
+}
+
+const (
+ aeadNonceLength = 12
+ noncePrefixLength = 4
+)
+
+// prefixNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
+// each call.
+type prefixNonceAEAD struct {
+ // nonce contains the fixed part of the nonce in the first four bytes.
+ nonce [aeadNonceLength]byte
+ aead cipher.AEAD
+}
+
+func (f *prefixNonceAEAD) NonceSize() int { return aeadNonceLength - noncePrefixLength }
+func (f *prefixNonceAEAD) Overhead() int { return f.aead.Overhead() }
+func (f *prefixNonceAEAD) explicitNonceLen() int { return f.NonceSize() }
+
+func (f *prefixNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
+ copy(f.nonce[4:], nonce)
+ return f.aead.Seal(out, f.nonce[:], plaintext, additionalData)
+}
+
+func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ copy(f.nonce[4:], nonce)
+ return f.aead.Open(out, f.nonce[:], ciphertext, additionalData)
+}
+
+// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
+// before each call.
+type xorNonceAEAD struct {
+ nonceMask [aeadNonceLength]byte
+ aead cipher.AEAD
+}
+
+func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number
+func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() }
+func (f *xorNonceAEAD) explicitNonceLen() int { return 0 }
+
+func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
+ for i, b := range nonce {
+ f.nonceMask[4+i] ^= b
+ }
+ result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData)
+ for i, b := range nonce {
+ f.nonceMask[4+i] ^= b
+ }
+
+ return result
+}
+
+func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ for i, b := range nonce {
+ f.nonceMask[4+i] ^= b
+ }
+ result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData)
+ for i, b := range nonce {
+ f.nonceMask[4+i] ^= b
+ }
+
+ return result, err
+}
+
+func aeadAESGCM(key, noncePrefix []byte) aead {
+ if len(noncePrefix) != noncePrefixLength {
+ panic("tls: internal error: wrong nonce length")
+ }
+ aes, err := aes.NewCipher(key)
+ if err != nil {
+ panic(err)
+ }
+ var aead cipher.AEAD
+ if boring.Enabled {
+ aead, err = boring.NewGCMTLS(aes)
+ } else {
+ boring.Unreachable()
+ aead, err = cipher.NewGCM(aes)
+ }
+ if err != nil {
+ panic(err)
+ }
+
+ ret := &prefixNonceAEAD{aead: aead}
+ copy(ret.nonce[:], noncePrefix)
+ return ret
+}
+
+func aeadAESGCMTLS13(key, nonceMask []byte) aead {
+ if len(nonceMask) != aeadNonceLength {
+ panic("tls: internal error: wrong nonce length")
+ }
+ aes, err := aes.NewCipher(key)
+ if err != nil {
+ panic(err)
+ }
+ aead, err := cipher.NewGCM(aes)
+ if err != nil {
+ panic(err)
+ }
+
+ ret := &xorNonceAEAD{aead: aead}
+ copy(ret.nonceMask[:], nonceMask)
+ return ret
+}
+
+func aeadChaCha20Poly1305(key, nonceMask []byte) aead {
+ if len(nonceMask) != aeadNonceLength {
+ panic("tls: internal error: wrong nonce length")
+ }
+ aead, err := chacha20poly1305.New(key)
+ if err != nil {
+ panic(err)
+ }
+
+ ret := &xorNonceAEAD{aead: aead}
+ copy(ret.nonceMask[:], nonceMask)
+ return ret
+}
+
+type constantTimeHash interface {
+ hash.Hash
+ ConstantTimeSum(b []byte) []byte
+}
+
+// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces
+// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC.
+type cthWrapper struct {
+ h constantTimeHash
+}
+
+func (c *cthWrapper) Size() int { return c.h.Size() }
+func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() }
+func (c *cthWrapper) Reset() { c.h.Reset() }
+func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) }
+func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
+
+func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
+ boring.Unreachable()
+ return func() hash.Hash {
+ return &cthWrapper{h().(constantTimeHash)}
+ }
+}
+
+// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3.
+func tls10MAC(h hash.Hash, out, seq, header, data, extra []byte) []byte {
+ h.Reset()
+ h.Write(seq)
+ h.Write(header)
+ h.Write(data)
+ res := h.Sum(out)
+ if extra != nil {
+ h.Write(extra)
+ }
+ return res
+}
+
+func rsaKA(version uint16) keyAgreement {
+ return rsaKeyAgreement{}
+}
+
+func ecdheECDSAKA(version uint16) keyAgreement {
+ return &ecdheKeyAgreement{
+ isRSA: false,
+ version: version,
+ }
+}
+
+func ecdheRSAKA(version uint16) keyAgreement {
+ return &ecdheKeyAgreement{
+ isRSA: true,
+ version: version,
+ }
+}
+
+// mutualCipherSuite returns a cipherSuite given a list of supported
+// ciphersuites and the id requested by the peer.
+func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
+ for _, id := range have {
+ if id == want {
+ return cipherSuiteByID(id)
+ }
+ }
+ return nil
+}
+
+func cipherSuiteByID(id uint16) *cipherSuite {
+ for _, cipherSuite := range cipherSuites {
+ if cipherSuite.id == id {
+ return cipherSuite
+ }
+ }
+ return nil
+}
+
+func mutualCipherSuiteTLS13(have []uint16, want uint16) *cipherSuiteTLS13 {
+ for _, id := range have {
+ if id == want {
+ return cipherSuiteTLS13ByID(id)
+ }
+ }
+ return nil
+}
+
+func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 {
+ for _, cipherSuite := range cipherSuitesTLS13 {
+ if cipherSuite.id == id {
+ return cipherSuite
+ }
+ }
+ return nil
+}
+
+// A list of cipher suite IDs that are, or have been, implemented by this
+// package.
+//
+// See https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
+const (
+ // TLS 1.0 - 1.2 cipher suites.
+ TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
+ TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
+ TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c
+ TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c
+ TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d
+ TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
+ TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
+ TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca8
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca9
+
+ // TLS 1.3 cipher suites.
+ TLS_AES_128_GCM_SHA256 uint16 = 0x1301
+ TLS_AES_256_GCM_SHA384 uint16 = 0x1302
+ TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303
+
+ // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
+ // that the client is doing version fallback. See RFC 7507.
+ TLS_FALLBACK_SCSV uint16 = 0x5600
+
+ // Legacy names for the corresponding cipher suites with the correct _SHA256
+ // suffix, retained for backward compatibility.
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
+)
diff --git a/contrib/go/_std_1.19/src/crypto/tls/common.go b/contrib/go/_std_1.19/src/crypto/tls/common.go
new file mode 100644
index 0000000000..1861efce66
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/common.go
@@ -0,0 +1,1485 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "bytes"
+ "container/list"
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha512"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ VersionTLS10 = 0x0301
+ VersionTLS11 = 0x0302
+ VersionTLS12 = 0x0303
+ VersionTLS13 = 0x0304
+
+ // Deprecated: SSLv3 is cryptographically broken, and is no longer
+ // supported by this package. See golang.org/issue/32716.
+ VersionSSL30 = 0x0300
+)
+
+const (
+ maxPlaintext = 16384 // maximum plaintext payload length
+ maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
+ maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3
+ recordHeaderLen = 5 // record header length
+ maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
+ maxUselessRecords = 16 // maximum number of consecutive non-advancing records
+)
+
+// TLS record types.
+type recordType uint8
+
+const (
+ recordTypeChangeCipherSpec recordType = 20
+ recordTypeAlert recordType = 21
+ recordTypeHandshake recordType = 22
+ recordTypeApplicationData recordType = 23
+)
+
+// TLS handshake message types.
+const (
+ typeHelloRequest uint8 = 0
+ typeClientHello uint8 = 1
+ typeServerHello uint8 = 2
+ typeNewSessionTicket uint8 = 4
+ typeEndOfEarlyData uint8 = 5
+ typeEncryptedExtensions uint8 = 8
+ typeCertificate uint8 = 11
+ typeServerKeyExchange uint8 = 12
+ typeCertificateRequest uint8 = 13
+ typeServerHelloDone uint8 = 14
+ typeCertificateVerify uint8 = 15
+ typeClientKeyExchange uint8 = 16
+ typeFinished uint8 = 20
+ typeCertificateStatus uint8 = 22
+ typeKeyUpdate uint8 = 24
+ typeNextProtocol uint8 = 67 // Not IANA assigned
+ typeMessageHash uint8 = 254 // synthetic message
+)
+
+// TLS compression types.
+const (
+ compressionNone uint8 = 0
+)
+
+// TLS extension numbers
+const (
+ extensionServerName uint16 = 0
+ extensionStatusRequest uint16 = 5
+ extensionSupportedCurves uint16 = 10 // supported_groups in TLS 1.3, see RFC 8446, Section 4.2.7
+ extensionSupportedPoints uint16 = 11
+ extensionSignatureAlgorithms uint16 = 13
+ extensionALPN uint16 = 16
+ extensionSCT uint16 = 18
+ extensionSessionTicket uint16 = 35
+ extensionPreSharedKey uint16 = 41
+ extensionEarlyData uint16 = 42
+ extensionSupportedVersions uint16 = 43
+ extensionCookie uint16 = 44
+ extensionPSKModes uint16 = 45
+ extensionCertificateAuthorities uint16 = 47
+ extensionSignatureAlgorithmsCert uint16 = 50
+ extensionKeyShare uint16 = 51
+ extensionRenegotiationInfo uint16 = 0xff01
+)
+
+// TLS signaling cipher suite values
+const (
+ scsvRenegotiation uint16 = 0x00ff
+)
+
+// CurveID is the type of a TLS identifier for an elliptic curve. See
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8.
+//
+// In TLS 1.3, this type is called NamedGroup, but at this time this library
+// only supports Elliptic Curve based groups. See RFC 8446, Section 4.2.7.
+type CurveID uint16
+
+const (
+ CurveP256 CurveID = 23
+ CurveP384 CurveID = 24
+ CurveP521 CurveID = 25
+ X25519 CurveID = 29
+)
+
+// TLS 1.3 Key Share. See RFC 8446, Section 4.2.8.
+type keyShare struct {
+ group CurveID
+ data []byte
+}
+
+// TLS 1.3 PSK Key Exchange Modes. See RFC 8446, Section 4.2.9.
+const (
+ pskModePlain uint8 = 0
+ pskModeDHE uint8 = 1
+)
+
+// TLS 1.3 PSK Identity. Can be a Session Ticket, or a reference to a saved
+// session. See RFC 8446, Section 4.2.11.
+type pskIdentity struct {
+ label []byte
+ obfuscatedTicketAge uint32
+}
+
+// TLS Elliptic Curve Point Formats
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
+const (
+ pointFormatUncompressed uint8 = 0
+)
+
+// TLS CertificateStatusType (RFC 3546)
+const (
+ statusTypeOCSP uint8 = 1
+)
+
+// Certificate types (for certificateRequestMsg)
+const (
+ certTypeRSASign = 1
+ certTypeECDSASign = 64 // ECDSA or EdDSA keys, see RFC 8422, Section 3.
+)
+
+// Signature algorithms (for internal signaling use). Starting at 225 to avoid overlap with
+// TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do.
+const (
+ signaturePKCS1v15 uint8 = iota + 225
+ signatureRSAPSS
+ signatureECDSA
+ signatureEd25519
+)
+
+// directSigning is a standard Hash value that signals that no pre-hashing
+// should be performed, and that the input should be signed directly. It is the
+// hash function associated with the Ed25519 signature scheme.
+var directSigning crypto.Hash = 0
+
+// defaultSupportedSignatureAlgorithms contains the signature and hash algorithms that
+// the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+
+// CertificateRequest. The two fields are merged to match with TLS 1.3.
+// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
+var defaultSupportedSignatureAlgorithms = []SignatureScheme{
+ PSSWithSHA256,
+ ECDSAWithP256AndSHA256,
+ Ed25519,
+ PSSWithSHA384,
+ PSSWithSHA512,
+ PKCS1WithSHA256,
+ PKCS1WithSHA384,
+ PKCS1WithSHA512,
+ ECDSAWithP384AndSHA384,
+ ECDSAWithP521AndSHA512,
+ PKCS1WithSHA1,
+ ECDSAWithSHA1,
+}
+
+// helloRetryRequestRandom is set as the Random value of a ServerHello
+// to signal that the message is actually a HelloRetryRequest.
+var helloRetryRequestRandom = []byte{ // See RFC 8446, Section 4.1.3.
+ 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11,
+ 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,
+ 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E,
+ 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C,
+}
+
+const (
+ // downgradeCanaryTLS12 or downgradeCanaryTLS11 is embedded in the server
+ // random as a downgrade protection if the server would be capable of
+ // negotiating a higher version. See RFC 8446, Section 4.1.3.
+ downgradeCanaryTLS12 = "DOWNGRD\x01"
+ downgradeCanaryTLS11 = "DOWNGRD\x00"
+)
+
+// testingOnlyForceDowngradeCanary is set in tests to force the server side to
+// include downgrade canaries even if it's using its highers supported version.
+var testingOnlyForceDowngradeCanary bool
+
+// ConnectionState records basic TLS details about the connection.
+type ConnectionState struct {
+ // Version is the TLS version used by the connection (e.g. VersionTLS12).
+ Version uint16
+
+ // HandshakeComplete is true if the handshake has concluded.
+ HandshakeComplete bool
+
+ // DidResume is true if this connection was successfully resumed from a
+ // previous session with a session ticket or similar mechanism.
+ DidResume bool
+
+ // CipherSuite is the cipher suite negotiated for the connection (e.g.
+ // TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_AES_128_GCM_SHA256).
+ CipherSuite uint16
+
+ // NegotiatedProtocol is the application protocol negotiated with ALPN.
+ NegotiatedProtocol string
+
+ // NegotiatedProtocolIsMutual used to indicate a mutual NPN negotiation.
+ //
+ // Deprecated: this value is always true.
+ NegotiatedProtocolIsMutual bool
+
+ // ServerName is the value of the Server Name Indication extension sent by
+ // the client. It's available both on the server and on the client side.
+ ServerName string
+
+ // PeerCertificates are the parsed certificates sent by the peer, in the
+ // order in which they were sent. The first element is the leaf certificate
+ // that the connection is verified against.
+ //
+ // On the client side, it can't be empty. On the server side, it can be
+ // empty if Config.ClientAuth is not RequireAnyClientCert or
+ // RequireAndVerifyClientCert.
+ PeerCertificates []*x509.Certificate
+
+ // VerifiedChains is a list of one or more chains where the first element is
+ // PeerCertificates[0] and the last element is from Config.RootCAs (on the
+ // client side) or Config.ClientCAs (on the server side).
+ //
+ // On the client side, it's set if Config.InsecureSkipVerify is false. On
+ // the server side, it's set if Config.ClientAuth is VerifyClientCertIfGiven
+ // (and the peer provided a certificate) or RequireAndVerifyClientCert.
+ VerifiedChains [][]*x509.Certificate
+
+ // SignedCertificateTimestamps is a list of SCTs provided by the peer
+ // through the TLS handshake for the leaf certificate, if any.
+ SignedCertificateTimestamps [][]byte
+
+ // OCSPResponse is a stapled Online Certificate Status Protocol (OCSP)
+ // response provided by the peer for the leaf certificate, if any.
+ OCSPResponse []byte
+
+ // TLSUnique contains the "tls-unique" channel binding value (see RFC 5929,
+ // Section 3). This value will be nil for TLS 1.3 connections and for all
+ // resumed connections.
+ //
+ // Deprecated: there are conditions in which this value might not be unique
+ // to a connection. See the Security Considerations sections of RFC 5705 and
+ // RFC 7627, and https://mitls.org/pages/attacks/3SHAKE#channelbindings.
+ TLSUnique []byte
+
+ // ekm is a closure exposed via ExportKeyingMaterial.
+ ekm func(label string, context []byte, length int) ([]byte, error)
+}
+
+// ExportKeyingMaterial returns length bytes of exported key material in a new
+// slice as defined in RFC 5705. If context is nil, it is not used as part of
+// the seed. If the connection was set to allow renegotiation via
+// Config.Renegotiation, this function will return an error.
+func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
+ return cs.ekm(label, context, length)
+}
+
+// ClientAuthType declares the policy the server will follow for
+// TLS Client Authentication.
+type ClientAuthType int
+
+const (
+ // NoClientCert indicates that no client certificate should be requested
+ // during the handshake, and if any certificates are sent they will not
+ // be verified.
+ NoClientCert ClientAuthType = iota
+ // RequestClientCert indicates that a client certificate should be requested
+ // during the handshake, but does not require that the client send any
+ // certificates.
+ RequestClientCert
+ // RequireAnyClientCert indicates that a client certificate should be requested
+ // during the handshake, and that at least one certificate is required to be
+ // sent by the client, but that certificate is not required to be valid.
+ RequireAnyClientCert
+ // VerifyClientCertIfGiven indicates that a client certificate should be requested
+ // during the handshake, but does not require that the client sends a
+ // certificate. If the client does send a certificate it is required to be
+ // valid.
+ VerifyClientCertIfGiven
+ // RequireAndVerifyClientCert indicates that a client certificate should be requested
+ // during the handshake, and that at least one valid certificate is required
+ // to be sent by the client.
+ RequireAndVerifyClientCert
+)
+
+// requiresClientCert reports whether the ClientAuthType requires a client
+// certificate to be provided.
+func requiresClientCert(c ClientAuthType) bool {
+ switch c {
+ case RequireAnyClientCert, RequireAndVerifyClientCert:
+ return true
+ default:
+ return false
+ }
+}
+
+// ClientSessionState contains the state needed by clients to resume TLS
+// sessions.
+type ClientSessionState struct {
+ sessionTicket []uint8 // Encrypted ticket used for session resumption with server
+ vers uint16 // TLS version negotiated for the session
+ cipherSuite uint16 // Ciphersuite negotiated for the session
+ masterSecret []byte // Full handshake MasterSecret, or TLS 1.3 resumption_master_secret
+ serverCertificates []*x509.Certificate // Certificate chain presented by the server
+ verifiedChains [][]*x509.Certificate // Certificate chains we built for verification
+ receivedAt time.Time // When the session ticket was received from the server
+ ocspResponse []byte // Stapled OCSP response presented by the server
+ scts [][]byte // SCTs presented by the server
+
+ // TLS 1.3 fields.
+ nonce []byte // Ticket nonce sent by the server, to derive PSK
+ useBy time.Time // Expiration of the ticket lifetime as set by the server
+ ageAdd uint32 // Random obfuscation factor for sending the ticket age
+}
+
+// ClientSessionCache is a cache of ClientSessionState objects that can be used
+// by a client to resume a TLS session with a given server. ClientSessionCache
+// implementations should expect to be called concurrently from different
+// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not
+// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which
+// are supported via this interface.
+type ClientSessionCache interface {
+ // Get searches for a ClientSessionState associated with the given key.
+ // On return, ok is true if one was found.
+ Get(sessionKey string) (session *ClientSessionState, ok bool)
+
+ // Put adds the ClientSessionState to the cache with the given key. It might
+ // get called multiple times in a connection if a TLS 1.3 server provides
+ // more than one session ticket. If called with a nil *ClientSessionState,
+ // it should remove the cache entry.
+ Put(sessionKey string, cs *ClientSessionState)
+}
+
+//go:generate stringer -type=SignatureScheme,CurveID,ClientAuthType -output=common_string.go
+
+// SignatureScheme identifies a signature algorithm supported by TLS. See
+// RFC 8446, Section 4.2.3.
+type SignatureScheme uint16
+
+const (
+ // RSASSA-PKCS1-v1_5 algorithms.
+ PKCS1WithSHA256 SignatureScheme = 0x0401
+ PKCS1WithSHA384 SignatureScheme = 0x0501
+ PKCS1WithSHA512 SignatureScheme = 0x0601
+
+ // RSASSA-PSS algorithms with public key OID rsaEncryption.
+ PSSWithSHA256 SignatureScheme = 0x0804
+ PSSWithSHA384 SignatureScheme = 0x0805
+ PSSWithSHA512 SignatureScheme = 0x0806
+
+ // ECDSA algorithms. Only constrained to a specific curve in TLS 1.3.
+ ECDSAWithP256AndSHA256 SignatureScheme = 0x0403
+ ECDSAWithP384AndSHA384 SignatureScheme = 0x0503
+ ECDSAWithP521AndSHA512 SignatureScheme = 0x0603
+
+ // EdDSA algorithms.
+ Ed25519 SignatureScheme = 0x0807
+
+ // Legacy signature and hash algorithms for TLS 1.2.
+ PKCS1WithSHA1 SignatureScheme = 0x0201
+ ECDSAWithSHA1 SignatureScheme = 0x0203
+)
+
+// ClientHelloInfo contains information from a ClientHello message in order to
+// guide application logic in the GetCertificate and GetConfigForClient callbacks.
+type ClientHelloInfo struct {
+ // CipherSuites lists the CipherSuites supported by the client (e.g.
+ // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).
+ CipherSuites []uint16
+
+ // ServerName indicates the name of the server requested by the client
+ // in order to support virtual hosting. ServerName is only set if the
+ // client is using SNI (see RFC 4366, Section 3.1).
+ ServerName string
+
+ // SupportedCurves lists the elliptic curves supported by the client.
+ // SupportedCurves is set only if the Supported Elliptic Curves
+ // Extension is being used (see RFC 4492, Section 5.1.1).
+ SupportedCurves []CurveID
+
+ // SupportedPoints lists the point formats supported by the client.
+ // SupportedPoints is set only if the Supported Point Formats Extension
+ // is being used (see RFC 4492, Section 5.1.2).
+ SupportedPoints []uint8
+
+ // SignatureSchemes lists the signature and hash schemes that the client
+ // is willing to verify. SignatureSchemes is set only if the Signature
+ // Algorithms Extension is being used (see RFC 5246, Section 7.4.1.4.1).
+ SignatureSchemes []SignatureScheme
+
+ // SupportedProtos lists the application protocols supported by the client.
+ // SupportedProtos is set only if the Application-Layer Protocol
+ // Negotiation Extension is being used (see RFC 7301, Section 3.1).
+ //
+ // Servers can select a protocol by setting Config.NextProtos in a
+ // GetConfigForClient return value.
+ SupportedProtos []string
+
+ // SupportedVersions lists the TLS versions supported by the client.
+ // For TLS versions less than 1.3, this is extrapolated from the max
+ // version advertised by the client, so values other than the greatest
+ // might be rejected if used.
+ SupportedVersions []uint16
+
+ // Conn is the underlying net.Conn for the connection. Do not read
+ // from, or write to, this connection; that will cause the TLS
+ // connection to fail.
+ Conn net.Conn
+
+ // config is embedded by the GetCertificate or GetConfigForClient caller,
+ // for use with SupportsCertificate.
+ config *Config
+
+ // ctx is the context of the handshake that is in progress.
+ ctx context.Context
+}
+
+// Context returns the context of the handshake that is in progress.
+// This context is a child of the context passed to HandshakeContext,
+// if any, and is canceled when the handshake concludes.
+func (c *ClientHelloInfo) Context() context.Context {
+ return c.ctx
+}
+
+// CertificateRequestInfo contains information from a server's
+// CertificateRequest message, which is used to demand a certificate and proof
+// of control from a client.
+type CertificateRequestInfo struct {
+ // AcceptableCAs contains zero or more, DER-encoded, X.501
+ // Distinguished Names. These are the names of root or intermediate CAs
+ // that the server wishes the returned certificate to be signed by. An
+ // empty slice indicates that the server has no preference.
+ AcceptableCAs [][]byte
+
+ // SignatureSchemes lists the signature schemes that the server is
+ // willing to verify.
+ SignatureSchemes []SignatureScheme
+
+ // Version is the TLS version that was negotiated for this connection.
+ Version uint16
+
+ // ctx is the context of the handshake that is in progress.
+ ctx context.Context
+}
+
+// Context returns the context of the handshake that is in progress.
+// This context is a child of the context passed to HandshakeContext,
+// if any, and is canceled when the handshake concludes.
+func (c *CertificateRequestInfo) Context() context.Context {
+ return c.ctx
+}
+
+// RenegotiationSupport enumerates the different levels of support for TLS
+// renegotiation. TLS renegotiation is the act of performing subsequent
+// handshakes on a connection after the first. This significantly complicates
+// the state machine and has been the source of numerous, subtle security
+// issues. Initiating a renegotiation is not supported, but support for
+// accepting renegotiation requests may be enabled.
+//
+// Even when enabled, the server may not change its identity between handshakes
+// (i.e. the leaf certificate must be the same). Additionally, concurrent
+// handshake and application data flow is not permitted so renegotiation can
+// only be used with protocols that synchronise with the renegotiation, such as
+// HTTPS.
+//
+// Renegotiation is not defined in TLS 1.3.
+type RenegotiationSupport int
+
+const (
+ // RenegotiateNever disables renegotiation.
+ RenegotiateNever RenegotiationSupport = iota
+
+ // RenegotiateOnceAsClient allows a remote server to request
+ // renegotiation once per connection.
+ RenegotiateOnceAsClient
+
+ // RenegotiateFreelyAsClient allows a remote server to repeatedly
+ // request renegotiation.
+ RenegotiateFreelyAsClient
+)
+
+// A Config structure is used to configure a TLS client or server.
+// After one has been passed to a TLS function it must not be
+// modified. A Config may be reused; the tls package will also not
+// modify it.
+type Config struct {
+ // Rand provides the source of entropy for nonces and RSA blinding.
+ // If Rand is nil, TLS uses the cryptographic random reader in package
+ // crypto/rand.
+ // The Reader must be safe for use by multiple goroutines.
+ Rand io.Reader
+
+ // Time returns the current time as the number of seconds since the epoch.
+ // If Time is nil, TLS uses time.Now.
+ Time func() time.Time
+
+ // Certificates contains one or more certificate chains to present to the
+ // other side of the connection. The first certificate compatible with the
+ // peer's requirements is selected automatically.
+ //
+ // Server configurations must set one of Certificates, GetCertificate or
+ // GetConfigForClient. Clients doing client-authentication may set either
+ // Certificates or GetClientCertificate.
+ //
+ // Note: if there are multiple Certificates, and they don't have the
+ // optional field Leaf set, certificate selection will incur a significant
+ // per-handshake performance cost.
+ Certificates []Certificate
+
+ // NameToCertificate maps from a certificate name to an element of
+ // Certificates. Note that a certificate name can be of the form
+ // '*.example.com' and so doesn't have to be a domain name as such.
+ //
+ // Deprecated: NameToCertificate only allows associating a single
+ // certificate with a given name. Leave this field nil to let the library
+ // select the first compatible chain from Certificates.
+ NameToCertificate map[string]*Certificate
+
+ // GetCertificate returns a Certificate based on the given
+ // ClientHelloInfo. It will only be called if the client supplies SNI
+ // information or if Certificates is empty.
+ //
+ // If GetCertificate is nil or returns nil, then the certificate is
+ // retrieved from NameToCertificate. If NameToCertificate is nil, the
+ // best element of Certificates will be used.
+ GetCertificate func(*ClientHelloInfo) (*Certificate, error)
+
+ // GetClientCertificate, if not nil, is called when a server requests a
+ // certificate from a client. If set, the contents of Certificates will
+ // be ignored.
+ //
+ // If GetClientCertificate returns an error, the handshake will be
+ // aborted and that error will be returned. Otherwise
+ // GetClientCertificate must return a non-nil Certificate. If
+ // Certificate.Certificate is empty then no certificate will be sent to
+ // the server. If this is unacceptable to the server then it may abort
+ // the handshake.
+ //
+ // GetClientCertificate may be called multiple times for the same
+ // connection if renegotiation occurs or if TLS 1.3 is in use.
+ GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error)
+
+ // GetConfigForClient, if not nil, is called after a ClientHello is
+ // received from a client. It may return a non-nil Config in order to
+ // change the Config that will be used to handle this connection. If
+ // the returned Config is nil, the original Config will be used. The
+ // Config returned by this callback may not be subsequently modified.
+ //
+ // If GetConfigForClient is nil, the Config passed to Server() will be
+ // used for all connections.
+ //
+ // If SessionTicketKey was explicitly set on the returned Config, or if
+ // SetSessionTicketKeys was called on the returned Config, those keys will
+ // be used. Otherwise, the original Config keys will be used (and possibly
+ // rotated if they are automatically managed).
+ GetConfigForClient func(*ClientHelloInfo) (*Config, error)
+
+ // VerifyPeerCertificate, if not nil, is called after normal
+ // certificate verification by either a TLS client or server. It
+ // receives the raw ASN.1 certificates provided by the peer and also
+ // any verified chains that normal processing found. If it returns a
+ // non-nil error, the handshake is aborted and that error results.
+ //
+ // If normal verification fails then the handshake will abort before
+ // considering this callback. If normal verification is disabled by
+ // setting InsecureSkipVerify, or (for a server) when ClientAuth is
+ // RequestClientCert or RequireAnyClientCert, then this callback will
+ // be considered but the verifiedChains argument will always be nil.
+ VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
+
+ // VerifyConnection, if not nil, is called after normal certificate
+ // verification and after VerifyPeerCertificate by either a TLS client
+ // or server. If it returns a non-nil error, the handshake is aborted
+ // and that error results.
+ //
+ // If normal verification fails then the handshake will abort before
+ // considering this callback. This callback will run for all connections
+ // regardless of InsecureSkipVerify or ClientAuth settings.
+ VerifyConnection func(ConnectionState) error
+
+ // RootCAs defines the set of root certificate authorities
+ // that clients use when verifying server certificates.
+ // If RootCAs is nil, TLS uses the host's root CA set.
+ RootCAs *x509.CertPool
+
+ // NextProtos is a list of supported application level protocols, in
+ // order of preference. If both peers support ALPN, the selected
+ // protocol will be one from this list, and the connection will fail
+ // if there is no mutually supported protocol. If NextProtos is empty
+ // or the peer doesn't support ALPN, the connection will succeed and
+ // ConnectionState.NegotiatedProtocol will be empty.
+ NextProtos []string
+
+ // ServerName is used to verify the hostname on the returned
+ // certificates unless InsecureSkipVerify is given. It is also included
+ // in the client's handshake to support virtual hosting unless it is
+ // an IP address.
+ ServerName string
+
+ // ClientAuth determines the server's policy for
+ // TLS Client Authentication. The default is NoClientCert.
+ ClientAuth ClientAuthType
+
+ // ClientCAs defines the set of root certificate authorities
+ // that servers use if required to verify a client certificate
+ // by the policy in ClientAuth.
+ ClientCAs *x509.CertPool
+
+ // InsecureSkipVerify controls whether a client verifies the server's
+ // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls
+ // accepts any certificate presented by the server and any host name in that
+ // certificate. In this mode, TLS is susceptible to machine-in-the-middle
+ // attacks unless custom verification is used. This should be used only for
+ // testing or in combination with VerifyConnection or VerifyPeerCertificate.
+ InsecureSkipVerify bool
+
+ // CipherSuites is a list of enabled TLS 1.0–1.2 cipher suites. The order of
+ // the list is ignored. Note that TLS 1.3 ciphersuites are not configurable.
+ //
+ // If CipherSuites is nil, a safe default list is used. The default cipher
+ // suites might change over time.
+ CipherSuites []uint16
+
+ // PreferServerCipherSuites is a legacy field and has no effect.
+ //
+ // It used to control whether the server would follow the client's or the
+ // server's preference. Servers now select the best mutually supported
+ // cipher suite based on logic that takes into account inferred client
+ // hardware, server hardware, and security.
+ //
+ // Deprecated: PreferServerCipherSuites is ignored.
+ PreferServerCipherSuites bool
+
+ // SessionTicketsDisabled may be set to true to disable session ticket and
+ // PSK (resumption) support. Note that on clients, session ticket support is
+ // also disabled if ClientSessionCache is nil.
+ SessionTicketsDisabled bool
+
+ // SessionTicketKey is used by TLS servers to provide session resumption.
+ // See RFC 5077 and the PSK mode of RFC 8446. If zero, it will be filled
+ // with random data before the first server handshake.
+ //
+ // Deprecated: if this field is left at zero, session ticket keys will be
+ // automatically rotated every day and dropped after seven days. For
+ // customizing the rotation schedule or synchronizing servers that are
+ // terminating connections for the same host, use SetSessionTicketKeys.
+ SessionTicketKey [32]byte
+
+ // ClientSessionCache is a cache of ClientSessionState entries for TLS
+ // session resumption. It is only used by clients.
+ ClientSessionCache ClientSessionCache
+
+ // MinVersion contains the minimum TLS version that is acceptable.
+ //
+ // By default, TLS 1.2 is currently used as the minimum when acting as a
+ // client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum
+ // supported by this package, both as a client and as a server.
+ //
+ // The client-side default can temporarily be reverted to TLS 1.0 by
+ // including the value "x509sha1=1" in the GODEBUG environment variable.
+ // Note that this option will be removed in Go 1.19 (but it will still be
+ // possible to set this field to VersionTLS10 explicitly).
+ MinVersion uint16
+
+ // MaxVersion contains the maximum TLS version that is acceptable.
+ //
+ // By default, the maximum version supported by this package is used,
+ // which is currently TLS 1.3.
+ MaxVersion uint16
+
+ // CurvePreferences contains the elliptic curves that will be used in
+ // an ECDHE handshake, in preference order. If empty, the default will
+ // be used. The client will use the first preference as the type for
+ // its key share in TLS 1.3. This may change in the future.
+ CurvePreferences []CurveID
+
+ // DynamicRecordSizingDisabled disables adaptive sizing of TLS records.
+ // When true, the largest possible TLS record size is always used. When
+ // false, the size of TLS records may be adjusted in an attempt to
+ // improve latency.
+ DynamicRecordSizingDisabled bool
+
+ // Renegotiation controls what types of renegotiation are supported.
+ // The default, none, is correct for the vast majority of applications.
+ Renegotiation RenegotiationSupport
+
+ // KeyLogWriter optionally specifies a destination for TLS master secrets
+ // in NSS key log format that can be used to allow external programs
+ // such as Wireshark to decrypt TLS connections.
+ // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
+ // Use of KeyLogWriter compromises security and should only be
+ // used for debugging.
+ KeyLogWriter io.Writer
+
+ // mutex protects sessionTicketKeys and autoSessionTicketKeys.
+ mutex sync.RWMutex
+ // sessionTicketKeys contains zero or more ticket keys. If set, it means the
+ // the keys were set with SessionTicketKey or SetSessionTicketKeys. The
+ // first key is used for new tickets and any subsequent keys can be used to
+ // decrypt old tickets. The slice contents are not protected by the mutex
+ // and are immutable.
+ sessionTicketKeys []ticketKey
+ // autoSessionTicketKeys is like sessionTicketKeys but is owned by the
+ // auto-rotation logic. See Config.ticketKeys.
+ autoSessionTicketKeys []ticketKey
+}
+
+const (
+ // ticketKeyNameLen is the number of bytes of identifier that is prepended to
+ // an encrypted session ticket in order to identify the key used to encrypt it.
+ ticketKeyNameLen = 16
+
+ // ticketKeyLifetime is how long a ticket key remains valid and can be used to
+ // resume a client connection.
+ ticketKeyLifetime = 7 * 24 * time.Hour // 7 days
+
+ // ticketKeyRotation is how often the server should rotate the session ticket key
+ // that is used for new tickets.
+ ticketKeyRotation = 24 * time.Hour
+)
+
+// ticketKey is the internal representation of a session ticket key.
+type ticketKey struct {
+ // keyName is an opaque byte string that serves to identify the session
+ // ticket key. It's exposed as plaintext in every session ticket.
+ keyName [ticketKeyNameLen]byte
+ aesKey [16]byte
+ hmacKey [16]byte
+ // created is the time at which this ticket key was created. See Config.ticketKeys.
+ created time.Time
+}
+
+// ticketKeyFromBytes converts from the external representation of a session
+// ticket key to a ticketKey. Externally, session ticket keys are 32 random
+// bytes and this function expands that into sufficient name and key material.
+func (c *Config) ticketKeyFromBytes(b [32]byte) (key ticketKey) {
+ hashed := sha512.Sum512(b[:])
+ copy(key.keyName[:], hashed[:ticketKeyNameLen])
+ copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16])
+ copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32])
+ key.created = c.time()
+ return key
+}
+
+// maxSessionTicketLifetime is the maximum allowed lifetime of a TLS 1.3 session
+// ticket, and the lifetime we set for tickets we send.
+const maxSessionTicketLifetime = 7 * 24 * time.Hour
+
+// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a Config that is
+// being used concurrently by a TLS client or server.
+func (c *Config) Clone() *Config {
+ if c == nil {
+ return nil
+ }
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+ return &Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ GetClientCertificate: c.GetClientCertificate,
+ GetConfigForClient: c.GetConfigForClient,
+ VerifyPeerCertificate: c.VerifyPeerCertificate,
+ VerifyConnection: c.VerifyConnection,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ KeyLogWriter: c.KeyLogWriter,
+ sessionTicketKeys: c.sessionTicketKeys,
+ autoSessionTicketKeys: c.autoSessionTicketKeys,
+ }
+}
+
+// deprecatedSessionTicketKey is set as the prefix of SessionTicketKey if it was
+// randomized for backwards compatibility but is not in use.
+var deprecatedSessionTicketKey = []byte("DEPRECATED")
+
+// initLegacySessionTicketKeyRLocked ensures the legacy SessionTicketKey field is
+// randomized if empty, and that sessionTicketKeys is populated from it otherwise.
+func (c *Config) initLegacySessionTicketKeyRLocked() {
+ // Don't write if SessionTicketKey is already defined as our deprecated string,
+ // or if it is defined by the user but sessionTicketKeys is already set.
+ if c.SessionTicketKey != [32]byte{} &&
+ (bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) || len(c.sessionTicketKeys) > 0) {
+ return
+ }
+
+ // We need to write some data, so get an exclusive lock and re-check any conditions.
+ c.mutex.RUnlock()
+ defer c.mutex.RLock()
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if c.SessionTicketKey == [32]byte{} {
+ if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil {
+ panic(fmt.Sprintf("tls: unable to generate random session ticket key: %v", err))
+ }
+ // Write the deprecated prefix at the beginning so we know we created
+ // it. This key with the DEPRECATED prefix isn't used as an actual
+ // session ticket key, and is only randomized in case the application
+ // reuses it for some reason.
+ copy(c.SessionTicketKey[:], deprecatedSessionTicketKey)
+ } else if !bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) && len(c.sessionTicketKeys) == 0 {
+ c.sessionTicketKeys = []ticketKey{c.ticketKeyFromBytes(c.SessionTicketKey)}
+ }
+
+}
+
+// ticketKeys returns the ticketKeys for this connection.
+// If configForClient has explicitly set keys, those will
+// be returned. Otherwise, the keys on c will be used and
+// may be rotated if auto-managed.
+// During rotation, any expired session ticket keys are deleted from
+// c.sessionTicketKeys. If the session ticket key that is currently
+// encrypting tickets (ie. the first ticketKey in c.sessionTicketKeys)
+// is not fresh, then a new session ticket key will be
+// created and prepended to c.sessionTicketKeys.
+func (c *Config) ticketKeys(configForClient *Config) []ticketKey {
+ // If the ConfigForClient callback returned a Config with explicitly set
+ // keys, use those, otherwise just use the original Config.
+ if configForClient != nil {
+ configForClient.mutex.RLock()
+ if configForClient.SessionTicketsDisabled {
+ return nil
+ }
+ configForClient.initLegacySessionTicketKeyRLocked()
+ if len(configForClient.sessionTicketKeys) != 0 {
+ ret := configForClient.sessionTicketKeys
+ configForClient.mutex.RUnlock()
+ return ret
+ }
+ configForClient.mutex.RUnlock()
+ }
+
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+ if c.SessionTicketsDisabled {
+ return nil
+ }
+ c.initLegacySessionTicketKeyRLocked()
+ if len(c.sessionTicketKeys) != 0 {
+ return c.sessionTicketKeys
+ }
+ // Fast path for the common case where the key is fresh enough.
+ if len(c.autoSessionTicketKeys) > 0 && c.time().Sub(c.autoSessionTicketKeys[0].created) < ticketKeyRotation {
+ return c.autoSessionTicketKeys
+ }
+
+ // autoSessionTicketKeys are managed by auto-rotation.
+ c.mutex.RUnlock()
+ defer c.mutex.RLock()
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ // Re-check the condition in case it changed since obtaining the new lock.
+ if len(c.autoSessionTicketKeys) == 0 || c.time().Sub(c.autoSessionTicketKeys[0].created) >= ticketKeyRotation {
+ var newKey [32]byte
+ if _, err := io.ReadFull(c.rand(), newKey[:]); err != nil {
+ panic(fmt.Sprintf("unable to generate random session ticket key: %v", err))
+ }
+ valid := make([]ticketKey, 0, len(c.autoSessionTicketKeys)+1)
+ valid = append(valid, c.ticketKeyFromBytes(newKey))
+ for _, k := range c.autoSessionTicketKeys {
+ // While rotating the current key, also remove any expired ones.
+ if c.time().Sub(k.created) < ticketKeyLifetime {
+ valid = append(valid, k)
+ }
+ }
+ c.autoSessionTicketKeys = valid
+ }
+ return c.autoSessionTicketKeys
+}
+
+// SetSessionTicketKeys updates the session ticket keys for a server.
+//
+// The first key will be used when creating new tickets, while all keys can be
+// used for decrypting tickets. It is safe to call this function while the
+// server is running in order to rotate the session ticket keys. The function
+// will panic if keys is empty.
+//
+// Calling this function will turn off automatic session ticket key rotation.
+//
+// If multiple servers are terminating connections for the same host they should
+// all have the same session ticket keys. If the session ticket keys leaks,
+// previously recorded and future TLS connections using those keys might be
+// compromised.
+func (c *Config) SetSessionTicketKeys(keys [][32]byte) {
+ if len(keys) == 0 {
+ panic("tls: keys must have at least one key")
+ }
+
+ newKeys := make([]ticketKey, len(keys))
+ for i, bytes := range keys {
+ newKeys[i] = c.ticketKeyFromBytes(bytes)
+ }
+
+ c.mutex.Lock()
+ c.sessionTicketKeys = newKeys
+ c.mutex.Unlock()
+}
+
+func (c *Config) rand() io.Reader {
+ r := c.Rand
+ if r == nil {
+ return rand.Reader
+ }
+ return r
+}
+
+func (c *Config) time() time.Time {
+ t := c.Time
+ if t == nil {
+ t = time.Now
+ }
+ return t()
+}
+
+func (c *Config) cipherSuites() []uint16 {
+ if needFIPS() {
+ return fipsCipherSuites(c)
+ }
+ if c.CipherSuites != nil {
+ return c.CipherSuites
+ }
+ return defaultCipherSuites
+}
+
+var supportedVersions = []uint16{
+ VersionTLS13,
+ VersionTLS12,
+ VersionTLS11,
+ VersionTLS10,
+}
+
+// roleClient and roleServer are meant to call supportedVersions and parents
+// with more readability at the callsite.
+const roleClient = true
+const roleServer = false
+
+func (c *Config) supportedVersions(isClient bool) []uint16 {
+ versions := make([]uint16, 0, len(supportedVersions))
+ for _, v := range supportedVersions {
+ if needFIPS() && (v < fipsMinVersion(c) || v > fipsMaxVersion(c)) {
+ continue
+ }
+ if (c == nil || c.MinVersion == 0) &&
+ isClient && v < VersionTLS12 {
+ continue
+ }
+ if c != nil && c.MinVersion != 0 && v < c.MinVersion {
+ continue
+ }
+ if c != nil && c.MaxVersion != 0 && v > c.MaxVersion {
+ continue
+ }
+ versions = append(versions, v)
+ }
+ return versions
+}
+
+func (c *Config) maxSupportedVersion(isClient bool) uint16 {
+ supportedVersions := c.supportedVersions(isClient)
+ if len(supportedVersions) == 0 {
+ return 0
+ }
+ return supportedVersions[0]
+}
+
+// supportedVersionsFromMax returns a list of supported versions derived from a
+// legacy maximum version value. Note that only versions supported by this
+// library are returned. Any newer peer will use supportedVersions anyway.
+func supportedVersionsFromMax(maxVersion uint16) []uint16 {
+ versions := make([]uint16, 0, len(supportedVersions))
+ for _, v := range supportedVersions {
+ if v > maxVersion {
+ continue
+ }
+ versions = append(versions, v)
+ }
+ return versions
+}
+
+var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521}
+
+func (c *Config) curvePreferences() []CurveID {
+ if needFIPS() {
+ return fipsCurvePreferences(c)
+ }
+ if c == nil || len(c.CurvePreferences) == 0 {
+ return defaultCurvePreferences
+ }
+ return c.CurvePreferences
+}
+
+func (c *Config) supportsCurve(curve CurveID) bool {
+ for _, cc := range c.curvePreferences() {
+ if cc == curve {
+ return true
+ }
+ }
+ return false
+}
+
+// mutualVersion returns the protocol version to use given the advertised
+// versions of the peer. Priority is given to the peer preference order.
+func (c *Config) mutualVersion(isClient bool, peerVersions []uint16) (uint16, bool) {
+ supportedVersions := c.supportedVersions(isClient)
+ for _, peerVersion := range peerVersions {
+ for _, v := range supportedVersions {
+ if v == peerVersion {
+ return v, true
+ }
+ }
+ }
+ return 0, false
+}
+
+var errNoCertificates = errors.New("tls: no certificates configured")
+
+// getCertificate returns the best certificate for the given ClientHelloInfo,
+// defaulting to the first element of c.Certificates.
+func (c *Config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) {
+ if c.GetCertificate != nil &&
+ (len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) {
+ cert, err := c.GetCertificate(clientHello)
+ if cert != nil || err != nil {
+ return cert, err
+ }
+ }
+
+ if len(c.Certificates) == 0 {
+ return nil, errNoCertificates
+ }
+
+ if len(c.Certificates) == 1 {
+ // There's only one choice, so no point doing any work.
+ return &c.Certificates[0], nil
+ }
+
+ if c.NameToCertificate != nil {
+ name := strings.ToLower(clientHello.ServerName)
+ if cert, ok := c.NameToCertificate[name]; ok {
+ return cert, nil
+ }
+ if len(name) > 0 {
+ labels := strings.Split(name, ".")
+ labels[0] = "*"
+ wildcardName := strings.Join(labels, ".")
+ if cert, ok := c.NameToCertificate[wildcardName]; ok {
+ return cert, nil
+ }
+ }
+ }
+
+ for _, cert := range c.Certificates {
+ if err := clientHello.SupportsCertificate(&cert); err == nil {
+ return &cert, nil
+ }
+ }
+
+ // If nothing matches, return the first certificate.
+ return &c.Certificates[0], nil
+}
+
+// SupportsCertificate returns nil if the provided certificate is supported by
+// the client that sent the ClientHello. Otherwise, it returns an error
+// describing the reason for the incompatibility.
+//
+// If this ClientHelloInfo was passed to a GetConfigForClient or GetCertificate
+// callback, this method will take into account the associated Config. Note that
+// if GetConfigForClient returns a different Config, the change can't be
+// accounted for by this method.
+//
+// This function will call x509.ParseCertificate unless c.Leaf is set, which can
+// incur a significant performance cost.
+func (chi *ClientHelloInfo) SupportsCertificate(c *Certificate) error {
+ // Note we don't currently support certificate_authorities nor
+ // signature_algorithms_cert, and don't check the algorithms of the
+ // signatures on the chain (which anyway are a SHOULD, see RFC 8446,
+ // Section 4.4.2.2).
+
+ config := chi.config
+ if config == nil {
+ config = &Config{}
+ }
+ vers, ok := config.mutualVersion(roleServer, chi.SupportedVersions)
+ if !ok {
+ return errors.New("no mutually supported protocol versions")
+ }
+
+ // If the client specified the name they are trying to connect to, the
+ // certificate needs to be valid for it.
+ if chi.ServerName != "" {
+ x509Cert, err := c.leaf()
+ if err != nil {
+ return fmt.Errorf("failed to parse certificate: %w", err)
+ }
+ if err := x509Cert.VerifyHostname(chi.ServerName); err != nil {
+ return fmt.Errorf("certificate is not valid for requested server name: %w", err)
+ }
+ }
+
+ // supportsRSAFallback returns nil if the certificate and connection support
+ // the static RSA key exchange, and unsupported otherwise. The logic for
+ // supporting static RSA is completely disjoint from the logic for
+ // supporting signed key exchanges, so we just check it as a fallback.
+ supportsRSAFallback := func(unsupported error) error {
+ // TLS 1.3 dropped support for the static RSA key exchange.
+ if vers == VersionTLS13 {
+ return unsupported
+ }
+ // The static RSA key exchange works by decrypting a challenge with the
+ // RSA private key, not by signing, so check the PrivateKey implements
+ // crypto.Decrypter, like *rsa.PrivateKey does.
+ if priv, ok := c.PrivateKey.(crypto.Decrypter); ok {
+ if _, ok := priv.Public().(*rsa.PublicKey); !ok {
+ return unsupported
+ }
+ } else {
+ return unsupported
+ }
+ // Finally, there needs to be a mutual cipher suite that uses the static
+ // RSA key exchange instead of ECDHE.
+ rsaCipherSuite := selectCipherSuite(chi.CipherSuites, config.cipherSuites(), func(c *cipherSuite) bool {
+ if c.flags&suiteECDHE != 0 {
+ return false
+ }
+ if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
+ return false
+ }
+ return true
+ })
+ if rsaCipherSuite == nil {
+ return unsupported
+ }
+ return nil
+ }
+
+ // If the client sent the signature_algorithms extension, ensure it supports
+ // schemes we can use with this certificate and TLS version.
+ if len(chi.SignatureSchemes) > 0 {
+ if _, err := selectSignatureScheme(vers, c, chi.SignatureSchemes); err != nil {
+ return supportsRSAFallback(err)
+ }
+ }
+
+ // In TLS 1.3 we are done because supported_groups is only relevant to the
+ // ECDHE computation, point format negotiation is removed, cipher suites are
+ // only relevant to the AEAD choice, and static RSA does not exist.
+ if vers == VersionTLS13 {
+ return nil
+ }
+
+ // The only signed key exchange we support is ECDHE.
+ if !supportsECDHE(config, chi.SupportedCurves, chi.SupportedPoints) {
+ return supportsRSAFallback(errors.New("client doesn't support ECDHE, can only use legacy RSA key exchange"))
+ }
+
+ var ecdsaCipherSuite bool
+ if priv, ok := c.PrivateKey.(crypto.Signer); ok {
+ switch pub := priv.Public().(type) {
+ case *ecdsa.PublicKey:
+ var curve CurveID
+ switch pub.Curve {
+ case elliptic.P256():
+ curve = CurveP256
+ case elliptic.P384():
+ curve = CurveP384
+ case elliptic.P521():
+ curve = CurveP521
+ default:
+ return supportsRSAFallback(unsupportedCertificateError(c))
+ }
+ var curveOk bool
+ for _, c := range chi.SupportedCurves {
+ if c == curve && config.supportsCurve(c) {
+ curveOk = true
+ break
+ }
+ }
+ if !curveOk {
+ return errors.New("client doesn't support certificate curve")
+ }
+ ecdsaCipherSuite = true
+ case ed25519.PublicKey:
+ if vers < VersionTLS12 || len(chi.SignatureSchemes) == 0 {
+ return errors.New("connection doesn't support Ed25519")
+ }
+ ecdsaCipherSuite = true
+ case *rsa.PublicKey:
+ default:
+ return supportsRSAFallback(unsupportedCertificateError(c))
+ }
+ } else {
+ return supportsRSAFallback(unsupportedCertificateError(c))
+ }
+
+ // Make sure that there is a mutually supported cipher suite that works with
+ // this certificate. Cipher suite selection will then apply the logic in
+ // reverse to pick it. See also serverHandshakeState.cipherSuiteOk.
+ cipherSuite := selectCipherSuite(chi.CipherSuites, config.cipherSuites(), func(c *cipherSuite) bool {
+ if c.flags&suiteECDHE == 0 {
+ return false
+ }
+ if c.flags&suiteECSign != 0 {
+ if !ecdsaCipherSuite {
+ return false
+ }
+ } else {
+ if ecdsaCipherSuite {
+ return false
+ }
+ }
+ if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
+ return false
+ }
+ return true
+ })
+ if cipherSuite == nil {
+ return supportsRSAFallback(errors.New("client doesn't support any cipher suites compatible with the certificate"))
+ }
+
+ return nil
+}
+
+// SupportsCertificate returns nil if the provided certificate is supported by
+// the server that sent the CertificateRequest. Otherwise, it returns an error
+// describing the reason for the incompatibility.
+func (cri *CertificateRequestInfo) SupportsCertificate(c *Certificate) error {
+ if _, err := selectSignatureScheme(cri.Version, c, cri.SignatureSchemes); err != nil {
+ return err
+ }
+
+ if len(cri.AcceptableCAs) == 0 {
+ return nil
+ }
+
+ for j, cert := range c.Certificate {
+ x509Cert := c.Leaf
+ // Parse the certificate if this isn't the leaf node, or if
+ // chain.Leaf was nil.
+ if j != 0 || x509Cert == nil {
+ var err error
+ if x509Cert, err = x509.ParseCertificate(cert); err != nil {
+ return fmt.Errorf("failed to parse certificate #%d in the chain: %w", j, err)
+ }
+ }
+
+ for _, ca := range cri.AcceptableCAs {
+ if bytes.Equal(x509Cert.RawIssuer, ca) {
+ return nil
+ }
+ }
+ }
+ return errors.New("chain is not signed by an acceptable CA")
+}
+
+// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate
+// from the CommonName and SubjectAlternateName fields of each of the leaf
+// certificates.
+//
+// Deprecated: NameToCertificate only allows associating a single certificate
+// with a given name. Leave that field nil to let the library select the first
+// compatible chain from Certificates.
+func (c *Config) BuildNameToCertificate() {
+ c.NameToCertificate = make(map[string]*Certificate)
+ for i := range c.Certificates {
+ cert := &c.Certificates[i]
+ x509Cert, err := cert.leaf()
+ if err != nil {
+ continue
+ }
+ // If SANs are *not* present, some clients will consider the certificate
+ // valid for the name in the Common Name.
+ if x509Cert.Subject.CommonName != "" && len(x509Cert.DNSNames) == 0 {
+ c.NameToCertificate[x509Cert.Subject.CommonName] = cert
+ }
+ for _, san := range x509Cert.DNSNames {
+ c.NameToCertificate[san] = cert
+ }
+ }
+}
+
+const (
+ keyLogLabelTLS12 = "CLIENT_RANDOM"
+ keyLogLabelClientHandshake = "CLIENT_HANDSHAKE_TRAFFIC_SECRET"
+ keyLogLabelServerHandshake = "SERVER_HANDSHAKE_TRAFFIC_SECRET"
+ keyLogLabelClientTraffic = "CLIENT_TRAFFIC_SECRET_0"
+ keyLogLabelServerTraffic = "SERVER_TRAFFIC_SECRET_0"
+)
+
+func (c *Config) writeKeyLog(label string, clientRandom, secret []byte) error {
+ if c.KeyLogWriter == nil {
+ return nil
+ }
+
+ logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))
+
+ writerMutex.Lock()
+ _, err := c.KeyLogWriter.Write(logLine)
+ writerMutex.Unlock()
+
+ return err
+}
+
+// writerMutex protects all KeyLogWriters globally. It is rarely enabled,
+// and is only for debugging, so a global mutex saves space.
+var writerMutex sync.Mutex
+
+// A Certificate is a chain of one or more certificates, leaf first.
+type Certificate struct {
+ Certificate [][]byte
+ // PrivateKey contains the private key corresponding to the public key in
+ // Leaf. This must implement crypto.Signer with an RSA, ECDSA or Ed25519 PublicKey.
+ // For a server up to TLS 1.2, it can also implement crypto.Decrypter with
+ // an RSA PublicKey.
+ PrivateKey crypto.PrivateKey
+ // SupportedSignatureAlgorithms is an optional list restricting what
+ // signature algorithms the PrivateKey can be used for.
+ SupportedSignatureAlgorithms []SignatureScheme
+ // OCSPStaple contains an optional OCSP response which will be served
+ // to clients that request it.
+ OCSPStaple []byte
+ // SignedCertificateTimestamps contains an optional list of Signed
+ // Certificate Timestamps which will be served to clients that request it.
+ SignedCertificateTimestamps [][]byte
+ // Leaf is the parsed form of the leaf certificate, which may be initialized
+ // using x509.ParseCertificate to reduce per-handshake processing. If nil,
+ // the leaf certificate will be parsed as needed.
+ Leaf *x509.Certificate
+}
+
+// leaf returns the parsed leaf certificate, either from c.Leaf or by parsing
+// the corresponding c.Certificate[0].
+func (c *Certificate) leaf() (*x509.Certificate, error) {
+ if c.Leaf != nil {
+ return c.Leaf, nil
+ }
+ return x509.ParseCertificate(c.Certificate[0])
+}
+
+type handshakeMessage interface {
+ marshal() []byte
+ unmarshal([]byte) bool
+}
+
+// lruSessionCache is a ClientSessionCache implementation that uses an LRU
+// caching strategy.
+type lruSessionCache struct {
+ sync.Mutex
+
+ m map[string]*list.Element
+ q *list.List
+ capacity int
+}
+
+type lruSessionCacheEntry struct {
+ sessionKey string
+ state *ClientSessionState
+}
+
+// NewLRUClientSessionCache returns a ClientSessionCache with the given
+// capacity that uses an LRU strategy. If capacity is < 1, a default capacity
+// is used instead.
+func NewLRUClientSessionCache(capacity int) ClientSessionCache {
+ const defaultSessionCacheCapacity = 64
+
+ if capacity < 1 {
+ capacity = defaultSessionCacheCapacity
+ }
+ return &lruSessionCache{
+ m: make(map[string]*list.Element),
+ q: list.New(),
+ capacity: capacity,
+ }
+}
+
+// Put adds the provided (sessionKey, cs) pair to the cache. If cs is nil, the entry
+// corresponding to sessionKey is removed from the cache instead.
+func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) {
+ c.Lock()
+ defer c.Unlock()
+
+ if elem, ok := c.m[sessionKey]; ok {
+ if cs == nil {
+ c.q.Remove(elem)
+ delete(c.m, sessionKey)
+ } else {
+ entry := elem.Value.(*lruSessionCacheEntry)
+ entry.state = cs
+ c.q.MoveToFront(elem)
+ }
+ return
+ }
+
+ if c.q.Len() < c.capacity {
+ entry := &lruSessionCacheEntry{sessionKey, cs}
+ c.m[sessionKey] = c.q.PushFront(entry)
+ return
+ }
+
+ elem := c.q.Back()
+ entry := elem.Value.(*lruSessionCacheEntry)
+ delete(c.m, entry.sessionKey)
+ entry.sessionKey = sessionKey
+ entry.state = cs
+ c.q.MoveToFront(elem)
+ c.m[sessionKey] = elem
+}
+
+// Get returns the ClientSessionState value associated with a given key. It
+// returns (nil, false) if no value is found.
+func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) {
+ c.Lock()
+ defer c.Unlock()
+
+ if elem, ok := c.m[sessionKey]; ok {
+ c.q.MoveToFront(elem)
+ return elem.Value.(*lruSessionCacheEntry).state, true
+ }
+ return nil, false
+}
+
+var emptyConfig Config
+
+func defaultConfig() *Config {
+ return &emptyConfig
+}
+
+func unexpectedMessageError(wanted, got any) error {
+ return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted)
+}
+
+func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool {
+ for _, s := range supportedSignatureAlgorithms {
+ if s == sigAlg {
+ return true
+ }
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/common_string.go b/contrib/go/_std_1.19/src/crypto/tls/common_string.go
index 238108811f..238108811f 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/common_string.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/common_string.go
diff --git a/contrib/go/_std_1.19/src/crypto/tls/conn.go b/contrib/go/_std_1.19/src/crypto/tls/conn.go
new file mode 100644
index 0000000000..1861a312f1
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/conn.go
@@ -0,0 +1,1545 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TLS low level connection and record layer
+
+package tls
+
+import (
+ "bytes"
+ "context"
+ "crypto/cipher"
+ "crypto/subtle"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// A Conn represents a secured connection.
+// It implements the net.Conn interface.
+type Conn struct {
+ // constant
+ conn net.Conn
+ isClient bool
+ handshakeFn func(context.Context) error // (*Conn).clientHandshake or serverHandshake
+
+ // handshakeStatus is 1 if the connection is currently transferring
+ // application data (i.e. is not currently processing a handshake).
+ // handshakeStatus == 1 implies handshakeErr == nil.
+ // This field is only to be accessed with sync/atomic.
+ handshakeStatus uint32
+ // constant after handshake; protected by handshakeMutex
+ handshakeMutex sync.Mutex
+ handshakeErr error // error resulting from handshake
+ vers uint16 // TLS version
+ haveVers bool // version has been negotiated
+ config *Config // configuration passed to constructor
+ // handshakes counts the number of handshakes performed on the
+ // connection so far. If renegotiation is disabled then this is either
+ // zero or one.
+ handshakes int
+ didResume bool // whether this connection was a session resumption
+ cipherSuite uint16
+ ocspResponse []byte // stapled OCSP response
+ scts [][]byte // signed certificate timestamps from server
+ peerCertificates []*x509.Certificate
+ // verifiedChains contains the certificate chains that we built, as
+ // opposed to the ones presented by the server.
+ verifiedChains [][]*x509.Certificate
+ // serverName contains the server name indicated by the client, if any.
+ serverName string
+ // secureRenegotiation is true if the server echoed the secure
+ // renegotiation extension. (This is meaningless as a server because
+ // renegotiation is not supported in that case.)
+ secureRenegotiation bool
+ // ekm is a closure for exporting keying material.
+ ekm func(label string, context []byte, length int) ([]byte, error)
+ // resumptionSecret is the resumption_master_secret for handling
+ // NewSessionTicket messages. nil if config.SessionTicketsDisabled.
+ resumptionSecret []byte
+
+ // ticketKeys is the set of active session ticket keys for this
+ // connection. The first one is used to encrypt new tickets and
+ // all are tried to decrypt tickets.
+ ticketKeys []ticketKey
+
+ // clientFinishedIsFirst is true if the client sent the first Finished
+ // message during the most recent handshake. This is recorded because
+ // the first transmitted Finished message is the tls-unique
+ // channel-binding value.
+ clientFinishedIsFirst bool
+
+ // closeNotifyErr is any error from sending the alertCloseNotify record.
+ closeNotifyErr error
+ // closeNotifySent is true if the Conn attempted to send an
+ // alertCloseNotify record.
+ closeNotifySent bool
+
+ // clientFinished and serverFinished contain the Finished message sent
+ // by the client or server in the most recent handshake. This is
+ // retained to support the renegotiation extension and tls-unique
+ // channel-binding.
+ clientFinished [12]byte
+ serverFinished [12]byte
+
+ // clientProtocol is the negotiated ALPN protocol.
+ clientProtocol string
+
+ // input/output
+ in, out halfConn
+ rawInput bytes.Buffer // raw input, starting with a record header
+ input bytes.Reader // application data waiting to be read, from rawInput.Next
+ hand bytes.Buffer // handshake data waiting to be read
+ buffering bool // whether records are buffered in sendBuf
+ sendBuf []byte // a buffer of records waiting to be sent
+
+ // bytesSent counts the bytes of application data sent.
+ // packetsSent counts packets.
+ bytesSent int64
+ packetsSent int64
+
+ // retryCount counts the number of consecutive non-advancing records
+ // received by Conn.readRecord. That is, records that neither advance the
+ // handshake, nor deliver application data. Protected by in.Mutex.
+ retryCount int
+
+ // activeCall is an atomic int32; the low bit is whether Close has
+ // been called. the rest of the bits are the number of goroutines
+ // in Conn.Write.
+ activeCall int32
+
+ tmp [16]byte
+}
+
+// Access to net.Conn methods.
+// Cannot just embed net.Conn because that would
+// export the struct field too.
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// SetDeadline sets the read and write deadlines associated with the connection.
+// A zero value for t means Read and Write will not time out.
+// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
+func (c *Conn) SetDeadline(t time.Time) error {
+ return c.conn.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline on the underlying connection.
+// A zero value for t means Read will not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline on the underlying connection.
+// A zero value for t means Write will not time out.
+// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ return c.conn.SetWriteDeadline(t)
+}
+
+// NetConn returns the underlying connection that is wrapped by c.
+// Note that writing to or reading from this connection directly will corrupt the
+// TLS session.
+func (c *Conn) NetConn() net.Conn {
+ return c.conn
+}
+
+// A halfConn represents one direction of the record layer
+// connection, either sending or receiving.
+type halfConn struct {
+ sync.Mutex
+
+ err error // first permanent error
+ version uint16 // protocol version
+ cipher any // cipher algorithm
+ mac hash.Hash
+ seq [8]byte // 64-bit sequence number
+
+ scratchBuf [13]byte // to avoid allocs; interface method args escape
+
+ nextCipher any // next encryption state
+ nextMac hash.Hash // next MAC algorithm
+
+ trafficSecret []byte // current TLS 1.3 traffic secret
+}
+
+type permanentError struct {
+ err net.Error
+}
+
+func (e *permanentError) Error() string { return e.err.Error() }
+func (e *permanentError) Unwrap() error { return e.err }
+func (e *permanentError) Timeout() bool { return e.err.Timeout() }
+func (e *permanentError) Temporary() bool { return false }
+
+func (hc *halfConn) setErrorLocked(err error) error {
+ if e, ok := err.(net.Error); ok {
+ hc.err = &permanentError{err: e}
+ } else {
+ hc.err = err
+ }
+ return hc.err
+}
+
+// prepareCipherSpec sets the encryption and MAC states
+// that a subsequent changeCipherSpec will use.
+func (hc *halfConn) prepareCipherSpec(version uint16, cipher any, mac hash.Hash) {
+ hc.version = version
+ hc.nextCipher = cipher
+ hc.nextMac = mac
+}
+
+// changeCipherSpec changes the encryption and MAC states
+// to the ones previously passed to prepareCipherSpec.
+func (hc *halfConn) changeCipherSpec() error {
+ if hc.nextCipher == nil || hc.version == VersionTLS13 {
+ return alertInternalError
+ }
+ hc.cipher = hc.nextCipher
+ hc.mac = hc.nextMac
+ hc.nextCipher = nil
+ hc.nextMac = nil
+ for i := range hc.seq {
+ hc.seq[i] = 0
+ }
+ return nil
+}
+
+func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, secret []byte) {
+ hc.trafficSecret = secret
+ key, iv := suite.trafficKey(secret)
+ hc.cipher = suite.aead(key, iv)
+ for i := range hc.seq {
+ hc.seq[i] = 0
+ }
+}
+
+// incSeq increments the sequence number.
+func (hc *halfConn) incSeq() {
+ for i := 7; i >= 0; i-- {
+ hc.seq[i]++
+ if hc.seq[i] != 0 {
+ return
+ }
+ }
+
+ // Not allowed to let sequence number wrap.
+ // Instead, must renegotiate before it does.
+ // Not likely enough to bother.
+ panic("TLS: sequence number wraparound")
+}
+
+// explicitNonceLen returns the number of bytes of explicit nonce or IV included
+// in each record. Explicit nonces are present only in CBC modes after TLS 1.0
+// and in certain AEAD modes in TLS 1.2.
+func (hc *halfConn) explicitNonceLen() int {
+ if hc.cipher == nil {
+ return 0
+ }
+
+ switch c := hc.cipher.(type) {
+ case cipher.Stream:
+ return 0
+ case aead:
+ return c.explicitNonceLen()
+ case cbcMode:
+ // TLS 1.1 introduced a per-record explicit IV to fix the BEAST attack.
+ if hc.version >= VersionTLS11 {
+ return c.BlockSize()
+ }
+ return 0
+ default:
+ panic("unknown cipher type")
+ }
+}
+
+// extractPadding returns, in constant time, the length of the padding to remove
+// from the end of payload. It also returns a byte which is equal to 255 if the
+// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
+func extractPadding(payload []byte) (toRemove int, good byte) {
+ if len(payload) < 1 {
+ return 0, 0
+ }
+
+ paddingLen := payload[len(payload)-1]
+ t := uint(len(payload)-1) - uint(paddingLen)
+ // if len(payload) >= (paddingLen - 1) then the MSB of t is zero
+ good = byte(int32(^t) >> 31)
+
+ // The maximum possible padding length plus the actual length field
+ toCheck := 256
+ // The length of the padded data is public, so we can use an if here
+ if toCheck > len(payload) {
+ toCheck = len(payload)
+ }
+
+ for i := 0; i < toCheck; i++ {
+ t := uint(paddingLen) - uint(i)
+ // if i <= paddingLen then the MSB of t is zero
+ mask := byte(int32(^t) >> 31)
+ b := payload[len(payload)-1-i]
+ good &^= mask&paddingLen ^ mask&b
+ }
+
+ // We AND together the bits of good and replicate the result across
+ // all the bits.
+ good &= good << 4
+ good &= good << 2
+ good &= good << 1
+ good = uint8(int8(good) >> 7)
+
+ // Zero the padding length on error. This ensures any unchecked bytes
+ // are included in the MAC. Otherwise, an attacker that could
+ // distinguish MAC failures from padding failures could mount an attack
+ // similar to POODLE in SSL 3.0: given a good ciphertext that uses a
+ // full block's worth of padding, replace the final block with another
+ // block. If the MAC check passed but the padding check failed, the
+ // last byte of that block decrypted to the block size.
+ //
+ // See also macAndPaddingGood logic below.
+ paddingLen &= good
+
+ toRemove = int(paddingLen) + 1
+ return
+}
+
+func roundUp(a, b int) int {
+ return a + (b-a%b)%b
+}
+
+// cbcMode is an interface for block ciphers using cipher block chaining.
+type cbcMode interface {
+ cipher.BlockMode
+ SetIV([]byte)
+}
+
+// decrypt authenticates and decrypts the record if protection is active at
+// this stage. The returned plaintext might overlap with the input.
+func (hc *halfConn) decrypt(record []byte) ([]byte, recordType, error) {
+ var plaintext []byte
+ typ := recordType(record[0])
+ payload := record[recordHeaderLen:]
+
+ // In TLS 1.3, change_cipher_spec messages are to be ignored without being
+ // decrypted. See RFC 8446, Appendix D.4.
+ if hc.version == VersionTLS13 && typ == recordTypeChangeCipherSpec {
+ return payload, typ, nil
+ }
+
+ paddingGood := byte(255)
+ paddingLen := 0
+
+ explicitNonceLen := hc.explicitNonceLen()
+
+ if hc.cipher != nil {
+ switch c := hc.cipher.(type) {
+ case cipher.Stream:
+ c.XORKeyStream(payload, payload)
+ case aead:
+ if len(payload) < explicitNonceLen {
+ return nil, 0, alertBadRecordMAC
+ }
+ nonce := payload[:explicitNonceLen]
+ if len(nonce) == 0 {
+ nonce = hc.seq[:]
+ }
+ payload = payload[explicitNonceLen:]
+
+ var additionalData []byte
+ if hc.version == VersionTLS13 {
+ additionalData = record[:recordHeaderLen]
+ } else {
+ additionalData = append(hc.scratchBuf[:0], hc.seq[:]...)
+ additionalData = append(additionalData, record[:3]...)
+ n := len(payload) - c.Overhead()
+ additionalData = append(additionalData, byte(n>>8), byte(n))
+ }
+
+ var err error
+ plaintext, err = c.Open(payload[:0], nonce, payload, additionalData)
+ if err != nil {
+ return nil, 0, alertBadRecordMAC
+ }
+ case cbcMode:
+ blockSize := c.BlockSize()
+ minPayload := explicitNonceLen + roundUp(hc.mac.Size()+1, blockSize)
+ if len(payload)%blockSize != 0 || len(payload) < minPayload {
+ return nil, 0, alertBadRecordMAC
+ }
+
+ if explicitNonceLen > 0 {
+ c.SetIV(payload[:explicitNonceLen])
+ payload = payload[explicitNonceLen:]
+ }
+ c.CryptBlocks(payload, payload)
+
+ // In a limited attempt to protect against CBC padding oracles like
+ // Lucky13, the data past paddingLen (which is secret) is passed to
+ // the MAC function as extra data, to be fed into the HMAC after
+ // computing the digest. This makes the MAC roughly constant time as
+ // long as the digest computation is constant time and does not
+ // affect the subsequent write, modulo cache effects.
+ paddingLen, paddingGood = extractPadding(payload)
+ default:
+ panic("unknown cipher type")
+ }
+
+ if hc.version == VersionTLS13 {
+ if typ != recordTypeApplicationData {
+ return nil, 0, alertUnexpectedMessage
+ }
+ if len(plaintext) > maxPlaintext+1 {
+ return nil, 0, alertRecordOverflow
+ }
+ // Remove padding and find the ContentType scanning from the end.
+ for i := len(plaintext) - 1; i >= 0; i-- {
+ if plaintext[i] != 0 {
+ typ = recordType(plaintext[i])
+ plaintext = plaintext[:i]
+ break
+ }
+ if i == 0 {
+ return nil, 0, alertUnexpectedMessage
+ }
+ }
+ }
+ } else {
+ plaintext = payload
+ }
+
+ if hc.mac != nil {
+ macSize := hc.mac.Size()
+ if len(payload) < macSize {
+ return nil, 0, alertBadRecordMAC
+ }
+
+ n := len(payload) - macSize - paddingLen
+ n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 }
+ record[3] = byte(n >> 8)
+ record[4] = byte(n)
+ remoteMAC := payload[n : n+macSize]
+ localMAC := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload[:n], payload[n+macSize:])
+
+ // This is equivalent to checking the MACs and paddingGood
+ // separately, but in constant-time to prevent distinguishing
+ // padding failures from MAC failures. Depending on what value
+ // of paddingLen was returned on bad padding, distinguishing
+ // bad MAC from bad padding can lead to an attack.
+ //
+ // See also the logic at the end of extractPadding.
+ macAndPaddingGood := subtle.ConstantTimeCompare(localMAC, remoteMAC) & int(paddingGood)
+ if macAndPaddingGood != 1 {
+ return nil, 0, alertBadRecordMAC
+ }
+
+ plaintext = payload[:n]
+ }
+
+ hc.incSeq()
+ return plaintext, typ, nil
+}
+
+// sliceForAppend extends the input slice by n bytes. head is the full extended
+// slice, while tail is the appended part. If the original slice has sufficient
+// capacity no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// encrypt encrypts payload, adding the appropriate nonce and/or MAC, and
+// appends it to record, which must already contain the record header.
+func (hc *halfConn) encrypt(record, payload []byte, rand io.Reader) ([]byte, error) {
+ if hc.cipher == nil {
+ return append(record, payload...), nil
+ }
+
+ var explicitNonce []byte
+ if explicitNonceLen := hc.explicitNonceLen(); explicitNonceLen > 0 {
+ record, explicitNonce = sliceForAppend(record, explicitNonceLen)
+ if _, isCBC := hc.cipher.(cbcMode); !isCBC && explicitNonceLen < 16 {
+ // The AES-GCM construction in TLS has an explicit nonce so that the
+ // nonce can be random. However, the nonce is only 8 bytes which is
+ // too small for a secure, random nonce. Therefore we use the
+ // sequence number as the nonce. The 3DES-CBC construction also has
+ // an 8 bytes nonce but its nonces must be unpredictable (see RFC
+ // 5246, Appendix F.3), forcing us to use randomness. That's not
+ // 3DES' biggest problem anyway because the birthday bound on block
+ // collision is reached first due to its similarly small block size
+ // (see the Sweet32 attack).
+ copy(explicitNonce, hc.seq[:])
+ } else {
+ if _, err := io.ReadFull(rand, explicitNonce); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var dst []byte
+ switch c := hc.cipher.(type) {
+ case cipher.Stream:
+ mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
+ record, dst = sliceForAppend(record, len(payload)+len(mac))
+ c.XORKeyStream(dst[:len(payload)], payload)
+ c.XORKeyStream(dst[len(payload):], mac)
+ case aead:
+ nonce := explicitNonce
+ if len(nonce) == 0 {
+ nonce = hc.seq[:]
+ }
+
+ if hc.version == VersionTLS13 {
+ record = append(record, payload...)
+
+ // Encrypt the actual ContentType and replace the plaintext one.
+ record = append(record, record[0])
+ record[0] = byte(recordTypeApplicationData)
+
+ n := len(payload) + 1 + c.Overhead()
+ record[3] = byte(n >> 8)
+ record[4] = byte(n)
+
+ record = c.Seal(record[:recordHeaderLen],
+ nonce, record[recordHeaderLen:], record[:recordHeaderLen])
+ } else {
+ additionalData := append(hc.scratchBuf[:0], hc.seq[:]...)
+ additionalData = append(additionalData, record[:recordHeaderLen]...)
+ record = c.Seal(record, nonce, payload, additionalData)
+ }
+ case cbcMode:
+ mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
+ blockSize := c.BlockSize()
+ plaintextLen := len(payload) + len(mac)
+ paddingLen := blockSize - plaintextLen%blockSize
+ record, dst = sliceForAppend(record, plaintextLen+paddingLen)
+ copy(dst, payload)
+ copy(dst[len(payload):], mac)
+ for i := plaintextLen; i < len(dst); i++ {
+ dst[i] = byte(paddingLen - 1)
+ }
+ if len(explicitNonce) > 0 {
+ c.SetIV(explicitNonce)
+ }
+ c.CryptBlocks(dst, dst)
+ default:
+ panic("unknown cipher type")
+ }
+
+ // Update length to include nonce, MAC and any block padding needed.
+ n := len(record) - recordHeaderLen
+ record[3] = byte(n >> 8)
+ record[4] = byte(n)
+ hc.incSeq()
+
+ return record, nil
+}
+
+// RecordHeaderError is returned when a TLS record header is invalid.
+type RecordHeaderError struct {
+ // Msg contains a human readable string that describes the error.
+ Msg string
+ // RecordHeader contains the five bytes of TLS record header that
+ // triggered the error.
+ RecordHeader [5]byte
+ // Conn provides the underlying net.Conn in the case that a client
+ // sent an initial handshake that didn't look like TLS.
+ // It is nil if there's already been a handshake or a TLS alert has
+ // been written to the connection.
+ Conn net.Conn
+}
+
+func (e RecordHeaderError) Error() string { return "tls: " + e.Msg }
+
+func (c *Conn) newRecordHeaderError(conn net.Conn, msg string) (err RecordHeaderError) {
+ err.Msg = msg
+ err.Conn = conn
+ copy(err.RecordHeader[:], c.rawInput.Bytes())
+ return err
+}
+
+func (c *Conn) readRecord() error {
+ return c.readRecordOrCCS(false)
+}
+
+func (c *Conn) readChangeCipherSpec() error {
+ return c.readRecordOrCCS(true)
+}
+
+// readRecordOrCCS reads one or more TLS records from the connection and
+// updates the record layer state. Some invariants:
+// - c.in must be locked
+// - c.input must be empty
+//
+// During the handshake one and only one of the following will happen:
+// - c.hand grows
+// - c.in.changeCipherSpec is called
+// - an error is returned
+//
+// After the handshake one and only one of the following will happen:
+// - c.hand grows
+// - c.input is set
+// - an error is returned
+func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error {
+ if c.in.err != nil {
+ return c.in.err
+ }
+ handshakeComplete := c.handshakeComplete()
+
+ // This function modifies c.rawInput, which owns the c.input memory.
+ if c.input.Len() != 0 {
+ return c.in.setErrorLocked(errors.New("tls: internal error: attempted to read record with pending application data"))
+ }
+ c.input.Reset(nil)
+
+ // Read header, payload.
+ if err := c.readFromUntil(c.conn, recordHeaderLen); err != nil {
+ // RFC 8446, Section 6.1 suggests that EOF without an alertCloseNotify
+ // is an error, but popular web sites seem to do this, so we accept it
+ // if and only if at the record boundary.
+ if err == io.ErrUnexpectedEOF && c.rawInput.Len() == 0 {
+ err = io.EOF
+ }
+ if e, ok := err.(net.Error); !ok || !e.Temporary() {
+ c.in.setErrorLocked(err)
+ }
+ return err
+ }
+ hdr := c.rawInput.Bytes()[:recordHeaderLen]
+ typ := recordType(hdr[0])
+
+ // No valid TLS record has a type of 0x80, however SSLv2 handshakes
+ // start with a uint16 length where the MSB is set and the first record
+ // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests
+ // an SSLv2 client.
+ if !handshakeComplete && typ == 0x80 {
+ c.sendAlert(alertProtocolVersion)
+ return c.in.setErrorLocked(c.newRecordHeaderError(nil, "unsupported SSLv2 handshake received"))
+ }
+
+ vers := uint16(hdr[1])<<8 | uint16(hdr[2])
+ n := int(hdr[3])<<8 | int(hdr[4])
+ if c.haveVers && c.vers != VersionTLS13 && vers != c.vers {
+ c.sendAlert(alertProtocolVersion)
+ msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers)
+ return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
+ }
+ if !c.haveVers {
+ // First message, be extra suspicious: this might not be a TLS
+ // client. Bail out before reading a full 'body', if possible.
+ // The current max version is 3.3 so if the version is >= 16.0,
+ // it's probably not real.
+ if (typ != recordTypeAlert && typ != recordTypeHandshake) || vers >= 0x1000 {
+ return c.in.setErrorLocked(c.newRecordHeaderError(c.conn, "first record does not look like a TLS handshake"))
+ }
+ }
+ if c.vers == VersionTLS13 && n > maxCiphertextTLS13 || n > maxCiphertext {
+ c.sendAlert(alertRecordOverflow)
+ msg := fmt.Sprintf("oversized record received with length %d", n)
+ return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
+ }
+ if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
+ if e, ok := err.(net.Error); !ok || !e.Temporary() {
+ c.in.setErrorLocked(err)
+ }
+ return err
+ }
+
+ // Process message.
+ record := c.rawInput.Next(recordHeaderLen + n)
+ data, typ, err := c.in.decrypt(record)
+ if err != nil {
+ return c.in.setErrorLocked(c.sendAlert(err.(alert)))
+ }
+ if len(data) > maxPlaintext {
+ return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow))
+ }
+
+ // Application Data messages are always protected.
+ if c.in.cipher == nil && typ == recordTypeApplicationData {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+
+ if typ != recordTypeAlert && typ != recordTypeChangeCipherSpec && len(data) > 0 {
+ // This is a state-advancing message: reset the retry count.
+ c.retryCount = 0
+ }
+
+ // Handshake messages MUST NOT be interleaved with other record types in TLS 1.3.
+ if c.vers == VersionTLS13 && typ != recordTypeHandshake && c.hand.Len() > 0 {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+
+ switch typ {
+ default:
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+
+ case recordTypeAlert:
+ if len(data) != 2 {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+ if alert(data[1]) == alertCloseNotify {
+ return c.in.setErrorLocked(io.EOF)
+ }
+ if c.vers == VersionTLS13 {
+ return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
+ }
+ switch data[0] {
+ case alertLevelWarning:
+ // Drop the record on the floor and retry.
+ return c.retryReadRecord(expectChangeCipherSpec)
+ case alertLevelError:
+ return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
+ default:
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+
+ case recordTypeChangeCipherSpec:
+ if len(data) != 1 || data[0] != 1 {
+ return c.in.setErrorLocked(c.sendAlert(alertDecodeError))
+ }
+ // Handshake messages are not allowed to fragment across the CCS.
+ if c.hand.Len() > 0 {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+ // In TLS 1.3, change_cipher_spec records are ignored until the
+ // Finished. See RFC 8446, Appendix D.4. Note that according to Section
+ // 5, a server can send a ChangeCipherSpec before its ServerHello, when
+ // c.vers is still unset. That's not useful though and suspicious if the
+ // server then selects a lower protocol version, so don't allow that.
+ if c.vers == VersionTLS13 {
+ return c.retryReadRecord(expectChangeCipherSpec)
+ }
+ if !expectChangeCipherSpec {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+ if err := c.in.changeCipherSpec(); err != nil {
+ return c.in.setErrorLocked(c.sendAlert(err.(alert)))
+ }
+
+ case recordTypeApplicationData:
+ if !handshakeComplete || expectChangeCipherSpec {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+ // Some OpenSSL servers send empty records in order to randomize the
+ // CBC IV. Ignore a limited number of empty records.
+ if len(data) == 0 {
+ return c.retryReadRecord(expectChangeCipherSpec)
+ }
+ // Note that data is owned by c.rawInput, following the Next call above,
+ // to avoid copying the plaintext. This is safe because c.rawInput is
+ // not read from or written to until c.input is drained.
+ c.input.Reset(data)
+
+ case recordTypeHandshake:
+ if len(data) == 0 || expectChangeCipherSpec {
+ return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+ c.hand.Write(data)
+ }
+
+ return nil
+}
+
+// retryReadRecord recurs into readRecordOrCCS to drop a non-advancing record, like
+// a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3.
+func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error {
+ c.retryCount++
+ if c.retryCount > maxUselessRecords {
+ c.sendAlert(alertUnexpectedMessage)
+ return c.in.setErrorLocked(errors.New("tls: too many ignored records"))
+ }
+ return c.readRecordOrCCS(expectChangeCipherSpec)
+}
+
+// atLeastReader reads from R, stopping with EOF once at least N bytes have been
+// read. It is different from an io.LimitedReader in that it doesn't cut short
+// the last Read call, and in that it considers an early EOF an error.
+type atLeastReader struct {
+ R io.Reader
+ N int64
+}
+
+func (r *atLeastReader) Read(p []byte) (int, error) {
+ if r.N <= 0 {
+ return 0, io.EOF
+ }
+ n, err := r.R.Read(p)
+ r.N -= int64(n) // won't underflow unless len(p) >= n > 9223372036854775809
+ if r.N > 0 && err == io.EOF {
+ return n, io.ErrUnexpectedEOF
+ }
+ if r.N <= 0 && err == nil {
+ return n, io.EOF
+ }
+ return n, err
+}
+
+// readFromUntil reads from r into c.rawInput until c.rawInput contains
+// at least n bytes or else returns an error.
+func (c *Conn) readFromUntil(r io.Reader, n int) error {
+ if c.rawInput.Len() >= n {
+ return nil
+ }
+ needs := n - c.rawInput.Len()
+ // There might be extra input waiting on the wire. Make a best effort
+ // attempt to fetch it so that it can be used in (*Conn).Read to
+ // "predict" closeNotify alerts.
+ c.rawInput.Grow(needs + bytes.MinRead)
+ _, err := c.rawInput.ReadFrom(&atLeastReader{r, int64(needs)})
+ return err
+}
+
+// sendAlert sends a TLS alert message.
+func (c *Conn) sendAlertLocked(err alert) error {
+ switch err {
+ case alertNoRenegotiation, alertCloseNotify:
+ c.tmp[0] = alertLevelWarning
+ default:
+ c.tmp[0] = alertLevelError
+ }
+ c.tmp[1] = byte(err)
+
+ _, writeErr := c.writeRecordLocked(recordTypeAlert, c.tmp[0:2])
+ if err == alertCloseNotify {
+ // closeNotify is a special case in that it isn't an error.
+ return writeErr
+ }
+
+ return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
+}
+
+// sendAlert sends a TLS alert message.
+func (c *Conn) sendAlert(err alert) error {
+ c.out.Lock()
+ defer c.out.Unlock()
+ return c.sendAlertLocked(err)
+}
+
+const (
+ // tcpMSSEstimate is a conservative estimate of the TCP maximum segment
+ // size (MSS). A constant is used, rather than querying the kernel for
+ // the actual MSS, to avoid complexity. The value here is the IPv6
+ // minimum MTU (1280 bytes) minus the overhead of an IPv6 header (40
+ // bytes) and a TCP header with timestamps (32 bytes).
+ tcpMSSEstimate = 1208
+
+ // recordSizeBoostThreshold is the number of bytes of application data
+ // sent after which the TLS record size will be increased to the
+ // maximum.
+ recordSizeBoostThreshold = 128 * 1024
+)
+
+// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the
+// next application data record. There is the following trade-off:
+//
+// - For latency-sensitive applications, such as web browsing, each TLS
+// record should fit in one TCP segment.
+// - For throughput-sensitive applications, such as large file transfers,
+// larger TLS records better amortize framing and encryption overheads.
+//
+// A simple heuristic that works well in practice is to use small records for
+// the first 1MB of data, then use larger records for subsequent data, and
+// reset back to smaller records after the connection becomes idle. See "High
+// Performance Web Networking", Chapter 4, or:
+// https://www.igvita.com/2013/10/24/optimizing-tls-record-size-and-buffering-latency/
+//
+// In the interests of simplicity and determinism, this code does not attempt
+// to reset the record size once the connection is idle, however.
+func (c *Conn) maxPayloadSizeForWrite(typ recordType) int {
+ if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData {
+ return maxPlaintext
+ }
+
+ if c.bytesSent >= recordSizeBoostThreshold {
+ return maxPlaintext
+ }
+
+ // Subtract TLS overheads to get the maximum payload size.
+ payloadBytes := tcpMSSEstimate - recordHeaderLen - c.out.explicitNonceLen()
+ if c.out.cipher != nil {
+ switch ciph := c.out.cipher.(type) {
+ case cipher.Stream:
+ payloadBytes -= c.out.mac.Size()
+ case cipher.AEAD:
+ payloadBytes -= ciph.Overhead()
+ case cbcMode:
+ blockSize := ciph.BlockSize()
+ // The payload must fit in a multiple of blockSize, with
+ // room for at least one padding byte.
+ payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
+ // The MAC is appended before padding so affects the
+ // payload size directly.
+ payloadBytes -= c.out.mac.Size()
+ default:
+ panic("unknown cipher type")
+ }
+ }
+ if c.vers == VersionTLS13 {
+ payloadBytes-- // encrypted ContentType
+ }
+
+ // Allow packet growth in arithmetic progression up to max.
+ pkt := c.packetsSent
+ c.packetsSent++
+ if pkt > 1000 {
+ return maxPlaintext // avoid overflow in multiply below
+ }
+
+ n := payloadBytes * int(pkt+1)
+ if n > maxPlaintext {
+ n = maxPlaintext
+ }
+ return n
+}
+
+func (c *Conn) write(data []byte) (int, error) {
+ if c.buffering {
+ c.sendBuf = append(c.sendBuf, data...)
+ return len(data), nil
+ }
+
+ n, err := c.conn.Write(data)
+ c.bytesSent += int64(n)
+ return n, err
+}
+
+func (c *Conn) flush() (int, error) {
+ if len(c.sendBuf) == 0 {
+ return 0, nil
+ }
+
+ n, err := c.conn.Write(c.sendBuf)
+ c.bytesSent += int64(n)
+ c.sendBuf = nil
+ c.buffering = false
+ return n, err
+}
+
+// outBufPool pools the record-sized scratch buffers used by writeRecordLocked.
+var outBufPool = sync.Pool{
+ New: func() any {
+ return new([]byte)
+ },
+}
+
+// writeRecordLocked writes a TLS record with the given type and payload to the
+// connection and updates the record layer state.
+func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
+ outBufPtr := outBufPool.Get().(*[]byte)
+ outBuf := *outBufPtr
+ defer func() {
+ // You might be tempted to simplify this by just passing &outBuf to Put,
+ // but that would make the local copy of the outBuf slice header escape
+ // to the heap, causing an allocation. Instead, we keep around the
+ // pointer to the slice header returned by Get, which is already on the
+ // heap, and overwrite and return that.
+ *outBufPtr = outBuf
+ outBufPool.Put(outBufPtr)
+ }()
+
+ var n int
+ for len(data) > 0 {
+ m := len(data)
+ if maxPayload := c.maxPayloadSizeForWrite(typ); m > maxPayload {
+ m = maxPayload
+ }
+
+ _, outBuf = sliceForAppend(outBuf[:0], recordHeaderLen)
+ outBuf[0] = byte(typ)
+ vers := c.vers
+ if vers == 0 {
+ // Some TLS servers fail if the record version is
+ // greater than TLS 1.0 for the initial ClientHello.
+ vers = VersionTLS10
+ } else if vers == VersionTLS13 {
+ // TLS 1.3 froze the record layer version to 1.2.
+ // See RFC 8446, Section 5.1.
+ vers = VersionTLS12
+ }
+ outBuf[1] = byte(vers >> 8)
+ outBuf[2] = byte(vers)
+ outBuf[3] = byte(m >> 8)
+ outBuf[4] = byte(m)
+
+ var err error
+ outBuf, err = c.out.encrypt(outBuf, data[:m], c.config.rand())
+ if err != nil {
+ return n, err
+ }
+ if _, err := c.write(outBuf); err != nil {
+ return n, err
+ }
+ n += m
+ data = data[m:]
+ }
+
+ if typ == recordTypeChangeCipherSpec && c.vers != VersionTLS13 {
+ if err := c.out.changeCipherSpec(); err != nil {
+ return n, c.sendAlertLocked(err.(alert))
+ }
+ }
+
+ return n, nil
+}
+
+// writeRecord writes a TLS record with the given type and payload to the
+// connection and updates the record layer state.
+func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
+ c.out.Lock()
+ defer c.out.Unlock()
+
+ return c.writeRecordLocked(typ, data)
+}
+
+// readHandshake reads the next handshake message from
+// the record layer.
+func (c *Conn) readHandshake() (any, error) {
+ for c.hand.Len() < 4 {
+ if err := c.readRecord(); err != nil {
+ return nil, err
+ }
+ }
+
+ data := c.hand.Bytes()
+ n := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
+ if n > maxHandshake {
+ c.sendAlertLocked(alertInternalError)
+ return nil, c.in.setErrorLocked(fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake))
+ }
+ for c.hand.Len() < 4+n {
+ if err := c.readRecord(); err != nil {
+ return nil, err
+ }
+ }
+ data = c.hand.Next(4 + n)
+ var m handshakeMessage
+ switch data[0] {
+ case typeHelloRequest:
+ m = new(helloRequestMsg)
+ case typeClientHello:
+ m = new(clientHelloMsg)
+ case typeServerHello:
+ m = new(serverHelloMsg)
+ case typeNewSessionTicket:
+ if c.vers == VersionTLS13 {
+ m = new(newSessionTicketMsgTLS13)
+ } else {
+ m = new(newSessionTicketMsg)
+ }
+ case typeCertificate:
+ if c.vers == VersionTLS13 {
+ m = new(certificateMsgTLS13)
+ } else {
+ m = new(certificateMsg)
+ }
+ case typeCertificateRequest:
+ if c.vers == VersionTLS13 {
+ m = new(certificateRequestMsgTLS13)
+ } else {
+ m = &certificateRequestMsg{
+ hasSignatureAlgorithm: c.vers >= VersionTLS12,
+ }
+ }
+ case typeCertificateStatus:
+ m = new(certificateStatusMsg)
+ case typeServerKeyExchange:
+ m = new(serverKeyExchangeMsg)
+ case typeServerHelloDone:
+ m = new(serverHelloDoneMsg)
+ case typeClientKeyExchange:
+ m = new(clientKeyExchangeMsg)
+ case typeCertificateVerify:
+ m = &certificateVerifyMsg{
+ hasSignatureAlgorithm: c.vers >= VersionTLS12,
+ }
+ case typeFinished:
+ m = new(finishedMsg)
+ case typeEncryptedExtensions:
+ m = new(encryptedExtensionsMsg)
+ case typeEndOfEarlyData:
+ m = new(endOfEarlyDataMsg)
+ case typeKeyUpdate:
+ m = new(keyUpdateMsg)
+ default:
+ return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+
+ // The handshake message unmarshalers
+ // expect to be able to keep references to data,
+ // so pass in a fresh copy that won't be overwritten.
+ data = append([]byte(nil), data...)
+
+ if !m.unmarshal(data) {
+ return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
+ }
+ return m, nil
+}
+
+var (
+ errShutdown = errors.New("tls: protocol is shutdown")
+)
+
+// Write writes data to the connection.
+//
+// As Write calls Handshake, in order to prevent indefinite blocking a deadline
+// must be set for both Read and Write before Write is called when the handshake
+// has not yet completed. See SetDeadline, SetReadDeadline, and
+// SetWriteDeadline.
+func (c *Conn) Write(b []byte) (int, error) {
+ // interlock with Close below
+ for {
+ x := atomic.LoadInt32(&c.activeCall)
+ if x&1 != 0 {
+ return 0, net.ErrClosed
+ }
+ if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
+ break
+ }
+ }
+ defer atomic.AddInt32(&c.activeCall, -2)
+
+ if err := c.Handshake(); err != nil {
+ return 0, err
+ }
+
+ c.out.Lock()
+ defer c.out.Unlock()
+
+ if err := c.out.err; err != nil {
+ return 0, err
+ }
+
+ if !c.handshakeComplete() {
+ return 0, alertInternalError
+ }
+
+ if c.closeNotifySent {
+ return 0, errShutdown
+ }
+
+ // TLS 1.0 is susceptible to a chosen-plaintext
+ // attack when using block mode ciphers due to predictable IVs.
+ // This can be prevented by splitting each Application Data
+ // record into two records, effectively randomizing the IV.
+ //
+ // https://www.openssl.org/~bodo/tls-cbc.txt
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=665814
+ // https://www.imperialviolet.org/2012/01/15/beastfollowup.html
+
+ var m int
+ if len(b) > 1 && c.vers == VersionTLS10 {
+ if _, ok := c.out.cipher.(cipher.BlockMode); ok {
+ n, err := c.writeRecordLocked(recordTypeApplicationData, b[:1])
+ if err != nil {
+ return n, c.out.setErrorLocked(err)
+ }
+ m, b = 1, b[1:]
+ }
+ }
+
+ n, err := c.writeRecordLocked(recordTypeApplicationData, b)
+ return n + m, c.out.setErrorLocked(err)
+}
+
+// handleRenegotiation processes a HelloRequest handshake message.
+func (c *Conn) handleRenegotiation() error {
+ if c.vers == VersionTLS13 {
+ return errors.New("tls: internal error: unexpected renegotiation")
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ helloReq, ok := msg.(*helloRequestMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(helloReq, msg)
+ }
+
+ if !c.isClient {
+ return c.sendAlert(alertNoRenegotiation)
+ }
+
+ switch c.config.Renegotiation {
+ case RenegotiateNever:
+ return c.sendAlert(alertNoRenegotiation)
+ case RenegotiateOnceAsClient:
+ if c.handshakes > 1 {
+ return c.sendAlert(alertNoRenegotiation)
+ }
+ case RenegotiateFreelyAsClient:
+ // Ok.
+ default:
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: unknown Renegotiation value")
+ }
+
+ c.handshakeMutex.Lock()
+ defer c.handshakeMutex.Unlock()
+
+ atomic.StoreUint32(&c.handshakeStatus, 0)
+ if c.handshakeErr = c.clientHandshake(context.Background()); c.handshakeErr == nil {
+ c.handshakes++
+ }
+ return c.handshakeErr
+}
+
+// handlePostHandshakeMessage processes a handshake message arrived after the
+// handshake is complete. Up to TLS 1.2, it indicates the start of a renegotiation.
+func (c *Conn) handlePostHandshakeMessage() error {
+ if c.vers != VersionTLS13 {
+ return c.handleRenegotiation()
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ c.retryCount++
+ if c.retryCount > maxUselessRecords {
+ c.sendAlert(alertUnexpectedMessage)
+ return c.in.setErrorLocked(errors.New("tls: too many non-advancing records"))
+ }
+
+ switch msg := msg.(type) {
+ case *newSessionTicketMsgTLS13:
+ return c.handleNewSessionTicket(msg)
+ case *keyUpdateMsg:
+ return c.handleKeyUpdate(msg)
+ default:
+ c.sendAlert(alertUnexpectedMessage)
+ return fmt.Errorf("tls: received unexpected handshake message of type %T", msg)
+ }
+}
+
+func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
+ cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
+ if cipherSuite == nil {
+ return c.in.setErrorLocked(c.sendAlert(alertInternalError))
+ }
+
+ newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret)
+ c.in.setTrafficSecret(cipherSuite, newSecret)
+
+ if keyUpdate.updateRequested {
+ c.out.Lock()
+ defer c.out.Unlock()
+
+ msg := &keyUpdateMsg{}
+ _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
+ if err != nil {
+ // Surface the error at the next write.
+ c.out.setErrorLocked(err)
+ return nil
+ }
+
+ newSecret := cipherSuite.nextTrafficSecret(c.out.trafficSecret)
+ c.out.setTrafficSecret(cipherSuite, newSecret)
+ }
+
+ return nil
+}
+
+// Read reads data from the connection.
+//
+// As Read calls Handshake, in order to prevent indefinite blocking a deadline
+// must be set for both Read and Write before Read is called when the handshake
+// has not yet completed. See SetDeadline, SetReadDeadline, and
+// SetWriteDeadline.
+func (c *Conn) Read(b []byte) (int, error) {
+ if err := c.Handshake(); err != nil {
+ return 0, err
+ }
+ if len(b) == 0 {
+ // Put this after Handshake, in case people were calling
+ // Read(nil) for the side effect of the Handshake.
+ return 0, nil
+ }
+
+ c.in.Lock()
+ defer c.in.Unlock()
+
+ for c.input.Len() == 0 {
+ if err := c.readRecord(); err != nil {
+ return 0, err
+ }
+ for c.hand.Len() > 0 {
+ if err := c.handlePostHandshakeMessage(); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ n, _ := c.input.Read(b)
+
+ // If a close-notify alert is waiting, read it so that we can return (n,
+ // EOF) instead of (n, nil), to signal to the HTTP response reading
+ // goroutine that the connection is now closed. This eliminates a race
+ // where the HTTP response reading goroutine would otherwise not observe
+ // the EOF until its next read, by which time a client goroutine might
+ // have already tried to reuse the HTTP connection for a new request.
+ // See https://golang.org/cl/76400046 and https://golang.org/issue/3514
+ if n != 0 && c.input.Len() == 0 && c.rawInput.Len() > 0 &&
+ recordType(c.rawInput.Bytes()[0]) == recordTypeAlert {
+ if err := c.readRecord(); err != nil {
+ return n, err // will be io.EOF on closeNotify
+ }
+ }
+
+ return n, nil
+}
+
+// Close closes the connection.
+func (c *Conn) Close() error {
+ // Interlock with Conn.Write above.
+ var x int32
+ for {
+ x = atomic.LoadInt32(&c.activeCall)
+ if x&1 != 0 {
+ return net.ErrClosed
+ }
+ if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
+ break
+ }
+ }
+ if x != 0 {
+ // io.Writer and io.Closer should not be used concurrently.
+ // If Close is called while a Write is currently in-flight,
+ // interpret that as a sign that this Close is really just
+ // being used to break the Write and/or clean up resources and
+ // avoid sending the alertCloseNotify, which may block
+ // waiting on handshakeMutex or the c.out mutex.
+ return c.conn.Close()
+ }
+
+ var alertErr error
+ if c.handshakeComplete() {
+ if err := c.closeNotify(); err != nil {
+ alertErr = fmt.Errorf("tls: failed to send closeNotify alert (but connection was closed anyway): %w", err)
+ }
+ }
+
+ if err := c.conn.Close(); err != nil {
+ return err
+ }
+ return alertErr
+}
+
+var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake complete")
+
+// CloseWrite shuts down the writing side of the connection. It should only be
+// called once the handshake has completed and does not call CloseWrite on the
+// underlying connection. Most callers should just use Close.
+func (c *Conn) CloseWrite() error {
+ if !c.handshakeComplete() {
+ return errEarlyCloseWrite
+ }
+
+ return c.closeNotify()
+}
+
+func (c *Conn) closeNotify() error {
+ c.out.Lock()
+ defer c.out.Unlock()
+
+ if !c.closeNotifySent {
+ // Set a Write Deadline to prevent possibly blocking forever.
+ c.SetWriteDeadline(time.Now().Add(time.Second * 5))
+ c.closeNotifyErr = c.sendAlertLocked(alertCloseNotify)
+ c.closeNotifySent = true
+ // Any subsequent writes will fail.
+ c.SetWriteDeadline(time.Now())
+ }
+ return c.closeNotifyErr
+}
+
+// Handshake runs the client or server handshake
+// protocol if it has not yet been run.
+//
+// Most uses of this package need not call Handshake explicitly: the
+// first Read or Write will call it automatically.
+//
+// For control over canceling or setting a timeout on a handshake, use
+// HandshakeContext or the Dialer's DialContext method instead.
+func (c *Conn) Handshake() error {
+ return c.HandshakeContext(context.Background())
+}
+
+// HandshakeContext runs the client or server handshake
+// protocol if it has not yet been run.
+//
+// The provided Context must be non-nil. If the context is canceled before
+// the handshake is complete, the handshake is interrupted and an error is returned.
+// Once the handshake has completed, cancellation of the context will not affect the
+// connection.
+//
+// Most uses of this package need not call HandshakeContext explicitly: the
+// first Read or Write will call it automatically.
+func (c *Conn) HandshakeContext(ctx context.Context) error {
+ // Delegate to unexported method for named return
+ // without confusing documented signature.
+ return c.handshakeContext(ctx)
+}
+
+func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
+ // Fast sync/atomic-based exit if there is no handshake in flight and the
+ // last one succeeded without an error. Avoids the expensive context setup
+ // and mutex for most Read and Write calls.
+ if c.handshakeComplete() {
+ return nil
+ }
+
+ handshakeCtx, cancel := context.WithCancel(ctx)
+ // Note: defer this before starting the "interrupter" goroutine
+ // so that we can tell the difference between the input being canceled and
+ // this cancellation. In the former case, we need to close the connection.
+ defer cancel()
+
+ // Start the "interrupter" goroutine, if this context might be canceled.
+ // (The background context cannot).
+ //
+ // The interrupter goroutine waits for the input context to be done and
+ // closes the connection if this happens before the function returns.
+ if ctx.Done() != nil {
+ done := make(chan struct{})
+ interruptRes := make(chan error, 1)
+ defer func() {
+ close(done)
+ if ctxErr := <-interruptRes; ctxErr != nil {
+ // Return context error to user.
+ ret = ctxErr
+ }
+ }()
+ go func() {
+ select {
+ case <-handshakeCtx.Done():
+ // Close the connection, discarding the error
+ _ = c.conn.Close()
+ interruptRes <- handshakeCtx.Err()
+ case <-done:
+ interruptRes <- nil
+ }
+ }()
+ }
+
+ c.handshakeMutex.Lock()
+ defer c.handshakeMutex.Unlock()
+
+ if err := c.handshakeErr; err != nil {
+ return err
+ }
+ if c.handshakeComplete() {
+ return nil
+ }
+
+ c.in.Lock()
+ defer c.in.Unlock()
+
+ c.handshakeErr = c.handshakeFn(handshakeCtx)
+ if c.handshakeErr == nil {
+ c.handshakes++
+ } else {
+ // If an error occurred during the handshake try to flush the
+ // alert that might be left in the buffer.
+ c.flush()
+ }
+
+ if c.handshakeErr == nil && !c.handshakeComplete() {
+ c.handshakeErr = errors.New("tls: internal error: handshake should have had a result")
+ }
+ if c.handshakeErr != nil && c.handshakeComplete() {
+ panic("tls: internal error: handshake returned an error but is marked successful")
+ }
+
+ return c.handshakeErr
+}
+
+// ConnectionState returns basic TLS details about the connection.
+func (c *Conn) ConnectionState() ConnectionState {
+ c.handshakeMutex.Lock()
+ defer c.handshakeMutex.Unlock()
+ return c.connectionStateLocked()
+}
+
+func (c *Conn) connectionStateLocked() ConnectionState {
+ var state ConnectionState
+ state.HandshakeComplete = c.handshakeComplete()
+ state.Version = c.vers
+ state.NegotiatedProtocol = c.clientProtocol
+ state.DidResume = c.didResume
+ state.NegotiatedProtocolIsMutual = true
+ state.ServerName = c.serverName
+ state.CipherSuite = c.cipherSuite
+ state.PeerCertificates = c.peerCertificates
+ state.VerifiedChains = c.verifiedChains
+ state.SignedCertificateTimestamps = c.scts
+ state.OCSPResponse = c.ocspResponse
+ if !c.didResume && c.vers != VersionTLS13 {
+ if c.clientFinishedIsFirst {
+ state.TLSUnique = c.clientFinished[:]
+ } else {
+ state.TLSUnique = c.serverFinished[:]
+ }
+ }
+ if c.config.Renegotiation != RenegotiateNever {
+ state.ekm = noExportedKeyingMaterial
+ } else {
+ state.ekm = c.ekm
+ }
+ return state
+}
+
+// OCSPResponse returns the stapled OCSP response from the TLS server, if
+// any. (Only valid for client connections.)
+func (c *Conn) OCSPResponse() []byte {
+ c.handshakeMutex.Lock()
+ defer c.handshakeMutex.Unlock()
+
+ return c.ocspResponse
+}
+
+// VerifyHostname checks that the peer certificate chain is valid for
+// connecting to host. If so, it returns nil; if not, it returns an error
+// describing the problem.
+func (c *Conn) VerifyHostname(host string) error {
+ c.handshakeMutex.Lock()
+ defer c.handshakeMutex.Unlock()
+ if !c.isClient {
+ return errors.New("tls: VerifyHostname called on TLS server connection")
+ }
+ if !c.handshakeComplete() {
+ return errors.New("tls: handshake has not yet been performed")
+ }
+ if len(c.verifiedChains) == 0 {
+ return errors.New("tls: handshake did not verify certificate chain")
+ }
+ return c.peerCertificates[0].VerifyHostname(host)
+}
+
+func (c *Conn) handshakeComplete() bool {
+ return atomic.LoadUint32(&c.handshakeStatus) == 1
+}
diff --git a/contrib/go/_std_1.19/src/crypto/tls/handshake_client.go b/contrib/go/_std_1.19/src/crypto/tls/handshake_client.go
new file mode 100644
index 0000000000..e61e3eb540
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/handshake_client.go
@@ -0,0 +1,1017 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/subtle"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "net"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+type clientHandshakeState struct {
+ c *Conn
+ ctx context.Context
+ serverHello *serverHelloMsg
+ hello *clientHelloMsg
+ suite *cipherSuite
+ finishedHash finishedHash
+ masterSecret []byte
+ session *ClientSessionState
+}
+
+var testingOnlyForceClientHelloSignatureAlgorithms []SignatureScheme
+
+func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
+ config := c.config
+ if len(config.ServerName) == 0 && !config.InsecureSkipVerify {
+ return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config")
+ }
+
+ nextProtosLength := 0
+ for _, proto := range config.NextProtos {
+ if l := len(proto); l == 0 || l > 255 {
+ return nil, nil, errors.New("tls: invalid NextProtos value")
+ } else {
+ nextProtosLength += 1 + l
+ }
+ }
+ if nextProtosLength > 0xffff {
+ return nil, nil, errors.New("tls: NextProtos values too large")
+ }
+
+ supportedVersions := config.supportedVersions(roleClient)
+ if len(supportedVersions) == 0 {
+ return nil, nil, errors.New("tls: no supported versions satisfy MinVersion and MaxVersion")
+ }
+
+ clientHelloVersion := config.maxSupportedVersion(roleClient)
+ // The version at the beginning of the ClientHello was capped at TLS 1.2
+ // for compatibility reasons. The supported_versions extension is used
+ // to negotiate versions now. See RFC 8446, Section 4.2.1.
+ if clientHelloVersion > VersionTLS12 {
+ clientHelloVersion = VersionTLS12
+ }
+
+ hello := &clientHelloMsg{
+ vers: clientHelloVersion,
+ compressionMethods: []uint8{compressionNone},
+ random: make([]byte, 32),
+ sessionId: make([]byte, 32),
+ ocspStapling: true,
+ scts: true,
+ serverName: hostnameInSNI(config.ServerName),
+ supportedCurves: config.curvePreferences(),
+ supportedPoints: []uint8{pointFormatUncompressed},
+ secureRenegotiationSupported: true,
+ alpnProtocols: config.NextProtos,
+ supportedVersions: supportedVersions,
+ }
+
+ if c.handshakes > 0 {
+ hello.secureRenegotiation = c.clientFinished[:]
+ }
+
+ preferenceOrder := cipherSuitesPreferenceOrder
+ if !hasAESGCMHardwareSupport {
+ preferenceOrder = cipherSuitesPreferenceOrderNoAES
+ }
+ configCipherSuites := config.cipherSuites()
+ hello.cipherSuites = make([]uint16, 0, len(configCipherSuites))
+
+ for _, suiteId := range preferenceOrder {
+ suite := mutualCipherSuite(configCipherSuites, suiteId)
+ if suite == nil {
+ continue
+ }
+ // Don't advertise TLS 1.2-only cipher suites unless
+ // we're attempting TLS 1.2.
+ if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 {
+ continue
+ }
+ hello.cipherSuites = append(hello.cipherSuites, suiteId)
+ }
+
+ _, err := io.ReadFull(config.rand(), hello.random)
+ if err != nil {
+ return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
+ }
+
+ // A random session ID is used to detect when the server accepted a ticket
+ // and is resuming a session (see RFC 5077). In TLS 1.3, it's always set as
+ // a compatibility measure (see RFC 8446, Section 4.1.2).
+ if _, err := io.ReadFull(config.rand(), hello.sessionId); err != nil {
+ return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
+ }
+
+ if hello.vers >= VersionTLS12 {
+ hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
+ }
+ if testingOnlyForceClientHelloSignatureAlgorithms != nil {
+ hello.supportedSignatureAlgorithms = testingOnlyForceClientHelloSignatureAlgorithms
+ }
+
+ var params ecdheParameters
+ if hello.supportedVersions[0] == VersionTLS13 {
+ if hasAESGCMHardwareSupport {
+ hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
+ } else {
+ hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
+ }
+
+ curveID := config.curvePreferences()[0]
+ if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
+ return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve")
+ }
+ params, err = generateECDHEParameters(config.rand(), curveID)
+ if err != nil {
+ return nil, nil, err
+ }
+ hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
+ }
+
+ return hello, params, nil
+}
+
+func (c *Conn) clientHandshake(ctx context.Context) (err error) {
+ if c.config == nil {
+ c.config = defaultConfig()
+ }
+
+ // This may be a renegotiation handshake, in which case some fields
+ // need to be reset.
+ c.didResume = false
+
+ hello, ecdheParams, err := c.makeClientHello()
+ if err != nil {
+ return err
+ }
+ c.serverName = hello.serverName
+
+ cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
+ if cacheKey != "" && session != nil {
+ defer func() {
+ // If we got a handshake failure when resuming a session, throw away
+ // the session ticket. See RFC 5077, Section 3.2.
+ //
+ // RFC 8446 makes no mention of dropping tickets on failure, but it
+ // does require servers to abort on invalid binders, so we need to
+ // delete tickets to recover from a corrupted PSK.
+ if err != nil {
+ c.config.ClientSessionCache.Put(cacheKey, nil)
+ }
+ }()
+ }
+
+ if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
+ return err
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ serverHello, ok := msg.(*serverHelloMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(serverHello, msg)
+ }
+
+ if err := c.pickTLSVersion(serverHello); err != nil {
+ return err
+ }
+
+ // If we are negotiating a protocol version that's lower than what we
+ // support, check for the server downgrade canaries.
+ // See RFC 8446, Section 4.1.3.
+ maxVers := c.config.maxSupportedVersion(roleClient)
+ tls12Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS12
+ tls11Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS11
+ if maxVers == VersionTLS13 && c.vers <= VersionTLS12 && (tls12Downgrade || tls11Downgrade) ||
+ maxVers == VersionTLS12 && c.vers <= VersionTLS11 && tls11Downgrade {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: downgrade attempt detected, possibly due to a MitM attack or a broken middlebox")
+ }
+
+ if c.vers == VersionTLS13 {
+ hs := &clientHandshakeStateTLS13{
+ c: c,
+ ctx: ctx,
+ serverHello: serverHello,
+ hello: hello,
+ ecdheParams: ecdheParams,
+ session: session,
+ earlySecret: earlySecret,
+ binderKey: binderKey,
+ }
+
+ // In TLS 1.3, session tickets are delivered after the handshake.
+ return hs.handshake()
+ }
+
+ hs := &clientHandshakeState{
+ c: c,
+ ctx: ctx,
+ serverHello: serverHello,
+ hello: hello,
+ session: session,
+ }
+
+ if err := hs.handshake(); err != nil {
+ return err
+ }
+
+ // If we had a successful handshake and hs.session is different from
+ // the one already cached - cache a new one.
+ if cacheKey != "" && hs.session != nil && session != hs.session {
+ c.config.ClientSessionCache.Put(cacheKey, hs.session)
+ }
+
+ return nil
+}
+
+func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
+ session *ClientSessionState, earlySecret, binderKey []byte) {
+ if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
+ return "", nil, nil, nil
+ }
+
+ hello.ticketSupported = true
+
+ if hello.supportedVersions[0] == VersionTLS13 {
+ // Require DHE on resumption as it guarantees forward secrecy against
+ // compromise of the session ticket key. See RFC 8446, Section 4.2.9.
+ hello.pskModes = []uint8{pskModeDHE}
+ }
+
+ // Session resumption is not allowed if renegotiating because
+ // renegotiation is primarily used to allow a client to send a client
+ // certificate, which would be skipped if session resumption occurred.
+ if c.handshakes != 0 {
+ return "", nil, nil, nil
+ }
+
+ // Try to resume a previously negotiated TLS session, if available.
+ cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
+ session, ok := c.config.ClientSessionCache.Get(cacheKey)
+ if !ok || session == nil {
+ return cacheKey, nil, nil, nil
+ }
+
+ // Check that version used for the previous session is still valid.
+ versOk := false
+ for _, v := range hello.supportedVersions {
+ if v == session.vers {
+ versOk = true
+ break
+ }
+ }
+ if !versOk {
+ return cacheKey, nil, nil, nil
+ }
+
+ // Check that the cached server certificate is not expired, and that it's
+ // valid for the ServerName. This should be ensured by the cache key, but
+ // protect the application from a faulty ClientSessionCache implementation.
+ if !c.config.InsecureSkipVerify {
+ if len(session.verifiedChains) == 0 {
+ // The original connection had InsecureSkipVerify, while this doesn't.
+ return cacheKey, nil, nil, nil
+ }
+ serverCert := session.serverCertificates[0]
+ if c.config.time().After(serverCert.NotAfter) {
+ // Expired certificate, delete the entry.
+ c.config.ClientSessionCache.Put(cacheKey, nil)
+ return cacheKey, nil, nil, nil
+ }
+ if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
+ return cacheKey, nil, nil, nil
+ }
+ }
+
+ if session.vers != VersionTLS13 {
+ // In TLS 1.2 the cipher suite must match the resumed session. Ensure we
+ // are still offering it.
+ if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
+ return cacheKey, nil, nil, nil
+ }
+
+ hello.sessionTicket = session.sessionTicket
+ return
+ }
+
+ // Check that the session ticket is not expired.
+ if c.config.time().After(session.useBy) {
+ c.config.ClientSessionCache.Put(cacheKey, nil)
+ return cacheKey, nil, nil, nil
+ }
+
+ // In TLS 1.3 the KDF hash must match the resumed session. Ensure we
+ // offer at least one cipher suite with that hash.
+ cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
+ if cipherSuite == nil {
+ return cacheKey, nil, nil, nil
+ }
+ cipherSuiteOk := false
+ for _, offeredID := range hello.cipherSuites {
+ offeredSuite := cipherSuiteTLS13ByID(offeredID)
+ if offeredSuite != nil && offeredSuite.hash == cipherSuite.hash {
+ cipherSuiteOk = true
+ break
+ }
+ }
+ if !cipherSuiteOk {
+ return cacheKey, nil, nil, nil
+ }
+
+ // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
+ ticketAge := uint32(c.config.time().Sub(session.receivedAt) / time.Millisecond)
+ identity := pskIdentity{
+ label: session.sessionTicket,
+ obfuscatedTicketAge: ticketAge + session.ageAdd,
+ }
+ hello.pskIdentities = []pskIdentity{identity}
+ hello.pskBinders = [][]byte{make([]byte, cipherSuite.hash.Size())}
+
+ // Compute the PSK binders. See RFC 8446, Section 4.2.11.2.
+ psk := cipherSuite.expandLabel(session.masterSecret, "resumption",
+ session.nonce, cipherSuite.hash.Size())
+ earlySecret = cipherSuite.extract(psk, nil)
+ binderKey = cipherSuite.deriveSecret(earlySecret, resumptionBinderLabel, nil)
+ transcript := cipherSuite.hash.New()
+ transcript.Write(hello.marshalWithoutBinders())
+ pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
+ hello.updateBinders(pskBinders)
+
+ return
+}
+
+func (c *Conn) pickTLSVersion(serverHello *serverHelloMsg) error {
+ peerVersion := serverHello.vers
+ if serverHello.supportedVersion != 0 {
+ peerVersion = serverHello.supportedVersion
+ }
+
+ vers, ok := c.config.mutualVersion(roleClient, []uint16{peerVersion})
+ if !ok {
+ c.sendAlert(alertProtocolVersion)
+ return fmt.Errorf("tls: server selected unsupported protocol version %x", peerVersion)
+ }
+
+ c.vers = vers
+ c.haveVers = true
+ c.in.version = vers
+ c.out.version = vers
+
+ return nil
+}
+
+// Does the handshake, either a full one or resumes old session. Requires hs.c,
+// hs.hello, hs.serverHello, and, optionally, hs.session to be set.
+func (hs *clientHandshakeState) handshake() error {
+ c := hs.c
+
+ isResume, err := hs.processServerHello()
+ if err != nil {
+ return err
+ }
+
+ hs.finishedHash = newFinishedHash(c.vers, hs.suite)
+
+ // No signatures of the handshake are needed in a resumption.
+ // Otherwise, in a full handshake, if we don't have any certificates
+ // configured then we will never send a CertificateVerify message and
+ // thus no signatures are needed in that case either.
+ if isResume || (len(c.config.Certificates) == 0 && c.config.GetClientCertificate == nil) {
+ hs.finishedHash.discardHandshakeBuffer()
+ }
+
+ hs.finishedHash.Write(hs.hello.marshal())
+ hs.finishedHash.Write(hs.serverHello.marshal())
+
+ c.buffering = true
+ c.didResume = isResume
+ if isResume {
+ if err := hs.establishKeys(); err != nil {
+ return err
+ }
+ if err := hs.readSessionTicket(); err != nil {
+ return err
+ }
+ if err := hs.readFinished(c.serverFinished[:]); err != nil {
+ return err
+ }
+ c.clientFinishedIsFirst = false
+ // Make sure the connection is still being verified whether or not this
+ // is a resumption. Resumptions currently don't reverify certificates so
+ // they don't call verifyServerCertificate. See Issue 31641.
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+ if err := hs.sendFinished(c.clientFinished[:]); err != nil {
+ return err
+ }
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+ } else {
+ if err := hs.doFullHandshake(); err != nil {
+ return err
+ }
+ if err := hs.establishKeys(); err != nil {
+ return err
+ }
+ if err := hs.sendFinished(c.clientFinished[:]); err != nil {
+ return err
+ }
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+ c.clientFinishedIsFirst = true
+ if err := hs.readSessionTicket(); err != nil {
+ return err
+ }
+ if err := hs.readFinished(c.serverFinished[:]); err != nil {
+ return err
+ }
+ }
+
+ c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random)
+ atomic.StoreUint32(&c.handshakeStatus, 1)
+
+ return nil
+}
+
+func (hs *clientHandshakeState) pickCipherSuite() error {
+ if hs.suite = mutualCipherSuite(hs.hello.cipherSuites, hs.serverHello.cipherSuite); hs.suite == nil {
+ hs.c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: server chose an unconfigured cipher suite")
+ }
+
+ hs.c.cipherSuite = hs.suite.id
+ return nil
+}
+
+func (hs *clientHandshakeState) doFullHandshake() error {
+ c := hs.c
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+ certMsg, ok := msg.(*certificateMsg)
+ if !ok || len(certMsg.certificates) == 0 {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+ hs.finishedHash.Write(certMsg.marshal())
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ cs, ok := msg.(*certificateStatusMsg)
+ if ok {
+ // RFC4366 on Certificate Status Request:
+ // The server MAY return a "certificate_status" message.
+
+ if !hs.serverHello.ocspStapling {
+ // If a server returns a "CertificateStatus" message, then the
+ // server MUST have included an extension of type "status_request"
+ // with empty "extension_data" in the extended server hello.
+
+ c.sendAlert(alertUnexpectedMessage)
+ return errors.New("tls: received unexpected CertificateStatus message")
+ }
+ hs.finishedHash.Write(cs.marshal())
+
+ c.ocspResponse = cs.response
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+ }
+
+ if c.handshakes == 0 {
+ // If this is the first handshake on a connection, process and
+ // (optionally) verify the server's certificates.
+ if err := c.verifyServerCertificate(certMsg.certificates); err != nil {
+ return err
+ }
+ } else {
+ // This is a renegotiation handshake. We require that the
+ // server's identity (i.e. leaf certificate) is unchanged and
+ // thus any previous trust decision is still valid.
+ //
+ // See https://mitls.org/pages/attacks/3SHAKE for the
+ // motivation behind this requirement.
+ if !bytes.Equal(c.peerCertificates[0].Raw, certMsg.certificates[0]) {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: server's identity changed during renegotiation")
+ }
+ }
+
+ keyAgreement := hs.suite.ka(c.vers)
+
+ skx, ok := msg.(*serverKeyExchangeMsg)
+ if ok {
+ hs.finishedHash.Write(skx.marshal())
+ err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
+ if err != nil {
+ c.sendAlert(alertUnexpectedMessage)
+ return err
+ }
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+ }
+
+ var chainToSend *Certificate
+ var certRequested bool
+ certReq, ok := msg.(*certificateRequestMsg)
+ if ok {
+ certRequested = true
+ hs.finishedHash.Write(certReq.marshal())
+
+ cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq)
+ if chainToSend, err = c.getClientCertificate(cri); err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+ }
+
+ shd, ok := msg.(*serverHelloDoneMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(shd, msg)
+ }
+ hs.finishedHash.Write(shd.marshal())
+
+ // If the server requested a certificate then we have to send a
+ // Certificate message, even if it's empty because we don't have a
+ // certificate to send.
+ if certRequested {
+ certMsg = new(certificateMsg)
+ certMsg.certificates = chainToSend.Certificate
+ hs.finishedHash.Write(certMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ return err
+ }
+ }
+
+ preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hs.hello, c.peerCertificates[0])
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ if ckx != nil {
+ hs.finishedHash.Write(ckx.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
+ return err
+ }
+ }
+
+ if chainToSend != nil && len(chainToSend.Certificate) > 0 {
+ certVerify := &certificateVerifyMsg{}
+
+ key, ok := chainToSend.PrivateKey.(crypto.Signer)
+ if !ok {
+ c.sendAlert(alertInternalError)
+ return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey)
+ }
+
+ var sigType uint8
+ var sigHash crypto.Hash
+ if c.vers >= VersionTLS12 {
+ signatureAlgorithm, err := selectSignatureScheme(c.vers, chainToSend, certReq.supportedSignatureAlgorithms)
+ if err != nil {
+ c.sendAlert(alertIllegalParameter)
+ return err
+ }
+ sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
+ if err != nil {
+ return c.sendAlert(alertInternalError)
+ }
+ certVerify.hasSignatureAlgorithm = true
+ certVerify.signatureAlgorithm = signatureAlgorithm
+ } else {
+ sigType, sigHash, err = legacyTypeAndHashFromPublicKey(key.Public())
+ if err != nil {
+ c.sendAlert(alertIllegalParameter)
+ return err
+ }
+ }
+
+ signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
+ signOpts := crypto.SignerOpts(sigHash)
+ if sigType == signatureRSAPSS {
+ signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
+ }
+ certVerify.signature, err = key.Sign(c.config.rand(), signed, signOpts)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ hs.finishedHash.Write(certVerify.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
+ return err
+ }
+ }
+
+ hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.hello.random, hs.serverHello.random)
+ if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.hello.random, hs.masterSecret); err != nil {
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: failed to write to key log: " + err.Error())
+ }
+
+ hs.finishedHash.discardHandshakeBuffer()
+
+ return nil
+}
+
+func (hs *clientHandshakeState) establishKeys() error {
+ c := hs.c
+
+ clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
+ keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
+ var clientCipher, serverCipher any
+ var clientHash, serverHash hash.Hash
+ if hs.suite.cipher != nil {
+ clientCipher = hs.suite.cipher(clientKey, clientIV, false /* not for reading */)
+ clientHash = hs.suite.mac(clientMAC)
+ serverCipher = hs.suite.cipher(serverKey, serverIV, true /* for reading */)
+ serverHash = hs.suite.mac(serverMAC)
+ } else {
+ clientCipher = hs.suite.aead(clientKey, clientIV)
+ serverCipher = hs.suite.aead(serverKey, serverIV)
+ }
+
+ c.in.prepareCipherSpec(c.vers, serverCipher, serverHash)
+ c.out.prepareCipherSpec(c.vers, clientCipher, clientHash)
+ return nil
+}
+
+func (hs *clientHandshakeState) serverResumedSession() bool {
+ // If the server responded with the same sessionId then it means the
+ // sessionTicket is being used to resume a TLS session.
+ return hs.session != nil && hs.hello.sessionId != nil &&
+ bytes.Equal(hs.serverHello.sessionId, hs.hello.sessionId)
+}
+
+func (hs *clientHandshakeState) processServerHello() (bool, error) {
+ c := hs.c
+
+ if err := hs.pickCipherSuite(); err != nil {
+ return false, err
+ }
+
+ if hs.serverHello.compressionMethod != compressionNone {
+ c.sendAlert(alertUnexpectedMessage)
+ return false, errors.New("tls: server selected unsupported compression format")
+ }
+
+ if c.handshakes == 0 && hs.serverHello.secureRenegotiationSupported {
+ c.secureRenegotiation = true
+ if len(hs.serverHello.secureRenegotiation) != 0 {
+ c.sendAlert(alertHandshakeFailure)
+ return false, errors.New("tls: initial handshake had non-empty renegotiation extension")
+ }
+ }
+
+ if c.handshakes > 0 && c.secureRenegotiation {
+ var expectedSecureRenegotiation [24]byte
+ copy(expectedSecureRenegotiation[:], c.clientFinished[:])
+ copy(expectedSecureRenegotiation[12:], c.serverFinished[:])
+ if !bytes.Equal(hs.serverHello.secureRenegotiation, expectedSecureRenegotiation[:]) {
+ c.sendAlert(alertHandshakeFailure)
+ return false, errors.New("tls: incorrect renegotiation extension contents")
+ }
+ }
+
+ if err := checkALPN(hs.hello.alpnProtocols, hs.serverHello.alpnProtocol); err != nil {
+ c.sendAlert(alertUnsupportedExtension)
+ return false, err
+ }
+ c.clientProtocol = hs.serverHello.alpnProtocol
+
+ c.scts = hs.serverHello.scts
+
+ if !hs.serverResumedSession() {
+ return false, nil
+ }
+
+ if hs.session.vers != c.vers {
+ c.sendAlert(alertHandshakeFailure)
+ return false, errors.New("tls: server resumed a session with a different version")
+ }
+
+ if hs.session.cipherSuite != hs.suite.id {
+ c.sendAlert(alertHandshakeFailure)
+ return false, errors.New("tls: server resumed a session with a different cipher suite")
+ }
+
+ // Restore masterSecret, peerCerts, and ocspResponse from previous state
+ hs.masterSecret = hs.session.masterSecret
+ c.peerCertificates = hs.session.serverCertificates
+ c.verifiedChains = hs.session.verifiedChains
+ c.ocspResponse = hs.session.ocspResponse
+ // Let the ServerHello SCTs override the session SCTs from the original
+ // connection, if any are provided
+ if len(c.scts) == 0 && len(hs.session.scts) != 0 {
+ c.scts = hs.session.scts
+ }
+
+ return true, nil
+}
+
+// checkALPN ensure that the server's choice of ALPN protocol is compatible with
+// the protocols that we advertised in the Client Hello.
+func checkALPN(clientProtos []string, serverProto string) error {
+ if serverProto == "" {
+ return nil
+ }
+ if len(clientProtos) == 0 {
+ return errors.New("tls: server advertised unrequested ALPN extension")
+ }
+ for _, proto := range clientProtos {
+ if proto == serverProto {
+ return nil
+ }
+ }
+ return errors.New("tls: server selected unadvertised ALPN protocol")
+}
+
+func (hs *clientHandshakeState) readFinished(out []byte) error {
+ c := hs.c
+
+ if err := c.readChangeCipherSpec(); err != nil {
+ return err
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+ serverFinished, ok := msg.(*finishedMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(serverFinished, msg)
+ }
+
+ verify := hs.finishedHash.serverSum(hs.masterSecret)
+ if len(verify) != len(serverFinished.verifyData) ||
+ subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: server's Finished message was incorrect")
+ }
+ hs.finishedHash.Write(serverFinished.marshal())
+ copy(out, verify)
+ return nil
+}
+
+func (hs *clientHandshakeState) readSessionTicket() error {
+ if !hs.serverHello.ticketSupported {
+ return nil
+ }
+
+ c := hs.c
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+ sessionTicketMsg, ok := msg.(*newSessionTicketMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(sessionTicketMsg, msg)
+ }
+ hs.finishedHash.Write(sessionTicketMsg.marshal())
+
+ hs.session = &ClientSessionState{
+ sessionTicket: sessionTicketMsg.ticket,
+ vers: c.vers,
+ cipherSuite: hs.suite.id,
+ masterSecret: hs.masterSecret,
+ serverCertificates: c.peerCertificates,
+ verifiedChains: c.verifiedChains,
+ receivedAt: c.config.time(),
+ ocspResponse: c.ocspResponse,
+ scts: c.scts,
+ }
+
+ return nil
+}
+
+func (hs *clientHandshakeState) sendFinished(out []byte) error {
+ c := hs.c
+
+ if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ return err
+ }
+
+ finished := new(finishedMsg)
+ finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
+ hs.finishedHash.Write(finished.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ return err
+ }
+ copy(out, finished.verifyData)
+ return nil
+}
+
+// verifyServerCertificate parses and verifies the provided chain, setting
+// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
+func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+ certs := make([]*x509.Certificate, len(certificates))
+ for i, asn1Data := range certificates {
+ cert, err := x509.ParseCertificate(asn1Data)
+ if err != nil {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse certificate from server: " + err.Error())
+ }
+ certs[i] = cert
+ }
+
+ if !c.config.InsecureSkipVerify {
+ opts := x509.VerifyOptions{
+ Roots: c.config.RootCAs,
+ CurrentTime: c.config.time(),
+ DNSName: c.config.ServerName,
+ Intermediates: x509.NewCertPool(),
+ }
+
+ for _, cert := range certs[1:] {
+ opts.Intermediates.AddCert(cert)
+ }
+ var err error
+ c.verifiedChains, err = certs[0].Verify(opts)
+ if err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ switch certs[0].PublicKey.(type) {
+ case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
+ break
+ default:
+ c.sendAlert(alertUnsupportedCertificate)
+ return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey)
+ }
+
+ c.peerCertificates = certs
+
+ if c.config.VerifyPeerCertificate != nil {
+ if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// certificateRequestInfoFromMsg generates a CertificateRequestInfo from a TLS
+// <= 1.2 CertificateRequest, making an effort to fill in missing information.
+func certificateRequestInfoFromMsg(ctx context.Context, vers uint16, certReq *certificateRequestMsg) *CertificateRequestInfo {
+ cri := &CertificateRequestInfo{
+ AcceptableCAs: certReq.certificateAuthorities,
+ Version: vers,
+ ctx: ctx,
+ }
+
+ var rsaAvail, ecAvail bool
+ for _, certType := range certReq.certificateTypes {
+ switch certType {
+ case certTypeRSASign:
+ rsaAvail = true
+ case certTypeECDSASign:
+ ecAvail = true
+ }
+ }
+
+ if !certReq.hasSignatureAlgorithm {
+ // Prior to TLS 1.2, signature schemes did not exist. In this case we
+ // make up a list based on the acceptable certificate types, to help
+ // GetClientCertificate and SupportsCertificate select the right certificate.
+ // The hash part of the SignatureScheme is a lie here, because
+ // TLS 1.0 and 1.1 always use MD5+SHA1 for RSA and SHA1 for ECDSA.
+ switch {
+ case rsaAvail && ecAvail:
+ cri.SignatureSchemes = []SignatureScheme{
+ ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
+ PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
+ }
+ case rsaAvail:
+ cri.SignatureSchemes = []SignatureScheme{
+ PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
+ }
+ case ecAvail:
+ cri.SignatureSchemes = []SignatureScheme{
+ ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
+ }
+ }
+ return cri
+ }
+
+ // Filter the signature schemes based on the certificate types.
+ // See RFC 5246, Section 7.4.4 (where it calls this "somewhat complicated").
+ cri.SignatureSchemes = make([]SignatureScheme, 0, len(certReq.supportedSignatureAlgorithms))
+ for _, sigScheme := range certReq.supportedSignatureAlgorithms {
+ sigType, _, err := typeAndHashFromSignatureScheme(sigScheme)
+ if err != nil {
+ continue
+ }
+ switch sigType {
+ case signatureECDSA, signatureEd25519:
+ if ecAvail {
+ cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
+ }
+ case signatureRSAPSS, signaturePKCS1v15:
+ if rsaAvail {
+ cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
+ }
+ }
+ }
+
+ return cri
+}
+
+func (c *Conn) getClientCertificate(cri *CertificateRequestInfo) (*Certificate, error) {
+ if c.config.GetClientCertificate != nil {
+ return c.config.GetClientCertificate(cri)
+ }
+
+ for _, chain := range c.config.Certificates {
+ if err := cri.SupportsCertificate(&chain); err != nil {
+ continue
+ }
+ return &chain, nil
+ }
+
+ // No acceptable certificate found. Don't send a certificate.
+ return new(Certificate), nil
+}
+
+// clientSessionCacheKey returns a key used to cache sessionTickets that could
+// be used to resume previously negotiated TLS sessions with a server.
+func clientSessionCacheKey(serverAddr net.Addr, config *Config) string {
+ if len(config.ServerName) > 0 {
+ return config.ServerName
+ }
+ return serverAddr.String()
+}
+
+// hostnameInSNI converts name into an appropriate hostname for SNI.
+// Literal IP addresses and absolute FQDNs are not permitted as SNI values.
+// See RFC 6066, Section 3.
+func hostnameInSNI(name string) string {
+ host := name
+ if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
+ host = host[1 : len(host)-1]
+ }
+ if i := strings.LastIndex(host, "%"); i > 0 {
+ host = host[:i]
+ }
+ if net.ParseIP(host) != nil {
+ return ""
+ }
+ for len(name) > 0 && name[len(name)-1] == '.' {
+ name = name[:len(name)-1]
+ }
+ return name
+}
diff --git a/contrib/go/_std_1.19/src/crypto/tls/handshake_client_tls13.go b/contrib/go/_std_1.19/src/crypto/tls/handshake_client_tls13.go
new file mode 100644
index 0000000000..c7989867f5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/handshake_client_tls13.go
@@ -0,0 +1,686 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/hmac"
+ "crypto/rsa"
+ "errors"
+ "hash"
+ "sync/atomic"
+ "time"
+)
+
+type clientHandshakeStateTLS13 struct {
+ c *Conn
+ ctx context.Context
+ serverHello *serverHelloMsg
+ hello *clientHelloMsg
+ ecdheParams ecdheParameters
+
+ session *ClientSessionState
+ earlySecret []byte
+ binderKey []byte
+
+ certReq *certificateRequestMsgTLS13
+ usingPSK bool
+ sentDummyCCS bool
+ suite *cipherSuiteTLS13
+ transcript hash.Hash
+ masterSecret []byte
+ trafficSecret []byte // client_application_traffic_secret_0
+}
+
+// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and,
+// optionally, hs.session, hs.earlySecret and hs.binderKey to be set.
+func (hs *clientHandshakeStateTLS13) handshake() error {
+ c := hs.c
+
+ if needFIPS() {
+ return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode")
+ }
+
+ // The server must not select TLS 1.3 in a renegotiation. See RFC 8446,
+ // sections 4.1.2 and 4.1.3.
+ if c.handshakes > 0 {
+ c.sendAlert(alertProtocolVersion)
+ return errors.New("tls: server selected TLS 1.3 in a renegotiation")
+ }
+
+ // Consistency check on the presence of a keyShare and its parameters.
+ if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 {
+ return c.sendAlert(alertInternalError)
+ }
+
+ if err := hs.checkServerHelloOrHRR(); err != nil {
+ return err
+ }
+
+ hs.transcript = hs.suite.hash.New()
+ hs.transcript.Write(hs.hello.marshal())
+
+ if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
+ if err := hs.sendDummyChangeCipherSpec(); err != nil {
+ return err
+ }
+ if err := hs.processHelloRetryRequest(); err != nil {
+ return err
+ }
+ }
+
+ hs.transcript.Write(hs.serverHello.marshal())
+
+ c.buffering = true
+ if err := hs.processServerHello(); err != nil {
+ return err
+ }
+ if err := hs.sendDummyChangeCipherSpec(); err != nil {
+ return err
+ }
+ if err := hs.establishHandshakeKeys(); err != nil {
+ return err
+ }
+ if err := hs.readServerParameters(); err != nil {
+ return err
+ }
+ if err := hs.readServerCertificate(); err != nil {
+ return err
+ }
+ if err := hs.readServerFinished(); err != nil {
+ return err
+ }
+ if err := hs.sendClientCertificate(); err != nil {
+ return err
+ }
+ if err := hs.sendClientFinished(); err != nil {
+ return err
+ }
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+
+ atomic.StoreUint32(&c.handshakeStatus, 1)
+
+ return nil
+}
+
+// checkServerHelloOrHRR does validity checks that apply to both ServerHello and
+// HelloRetryRequest messages. It sets hs.suite.
+func (hs *clientHandshakeStateTLS13) checkServerHelloOrHRR() error {
+ c := hs.c
+
+ if hs.serverHello.supportedVersion == 0 {
+ c.sendAlert(alertMissingExtension)
+ return errors.New("tls: server selected TLS 1.3 using the legacy version field")
+ }
+
+ if hs.serverHello.supportedVersion != VersionTLS13 {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server selected an invalid version after a HelloRetryRequest")
+ }
+
+ if hs.serverHello.vers != VersionTLS12 {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server sent an incorrect legacy version")
+ }
+
+ if hs.serverHello.ocspStapling ||
+ hs.serverHello.ticketSupported ||
+ hs.serverHello.secureRenegotiationSupported ||
+ len(hs.serverHello.secureRenegotiation) != 0 ||
+ len(hs.serverHello.alpnProtocol) != 0 ||
+ len(hs.serverHello.scts) != 0 {
+ c.sendAlert(alertUnsupportedExtension)
+ return errors.New("tls: server sent a ServerHello extension forbidden in TLS 1.3")
+ }
+
+ if !bytes.Equal(hs.hello.sessionId, hs.serverHello.sessionId) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server did not echo the legacy session ID")
+ }
+
+ if hs.serverHello.compressionMethod != compressionNone {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server selected unsupported compression format")
+ }
+
+ selectedSuite := mutualCipherSuiteTLS13(hs.hello.cipherSuites, hs.serverHello.cipherSuite)
+ if hs.suite != nil && selectedSuite != hs.suite {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server changed cipher suite after a HelloRetryRequest")
+ }
+ if selectedSuite == nil {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server chose an unconfigured cipher suite")
+ }
+ hs.suite = selectedSuite
+ c.cipherSuite = hs.suite.id
+
+ return nil
+}
+
+// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
+// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
+func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
+ if hs.sentDummyCCS {
+ return nil
+ }
+ hs.sentDummyCCS = true
+
+ _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+ return err
+}
+
+// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
+// resends hs.hello, and reads the new ServerHello into hs.serverHello.
+func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
+ c := hs.c
+
+ // The first ClientHello gets double-hashed into the transcript upon a
+ // HelloRetryRequest. (The idea is that the server might offload transcript
+ // storage to the client in the cookie.) See RFC 8446, Section 4.4.1.
+ chHash := hs.transcript.Sum(nil)
+ hs.transcript.Reset()
+ hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
+ hs.transcript.Write(chHash)
+ hs.transcript.Write(hs.serverHello.marshal())
+
+ // The only HelloRetryRequest extensions we support are key_share and
+ // cookie, and clients must abort the handshake if the HRR would not result
+ // in any change in the ClientHello.
+ if hs.serverHello.selectedGroup == 0 && hs.serverHello.cookie == nil {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server sent an unnecessary HelloRetryRequest message")
+ }
+
+ if hs.serverHello.cookie != nil {
+ hs.hello.cookie = hs.serverHello.cookie
+ }
+
+ if hs.serverHello.serverShare.group != 0 {
+ c.sendAlert(alertDecodeError)
+ return errors.New("tls: received malformed key_share extension")
+ }
+
+ // If the server sent a key_share extension selecting a group, ensure it's
+ // a group we advertised but did not send a key share for, and send a key
+ // share for it this time.
+ if curveID := hs.serverHello.selectedGroup; curveID != 0 {
+ curveOK := false
+ for _, id := range hs.hello.supportedCurves {
+ if id == curveID {
+ curveOK = true
+ break
+ }
+ }
+ if !curveOK {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server selected unsupported group")
+ }
+ if hs.ecdheParams.CurveID() == curveID {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share")
+ }
+ if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: CurvePreferences includes unsupported curve")
+ }
+ params, err := generateECDHEParameters(c.config.rand(), curveID)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ hs.ecdheParams = params
+ hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
+ }
+
+ hs.hello.raw = nil
+ if len(hs.hello.pskIdentities) > 0 {
+ pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
+ if pskSuite == nil {
+ return c.sendAlert(alertInternalError)
+ }
+ if pskSuite.hash == hs.suite.hash {
+ // Update binders and obfuscated_ticket_age.
+ ticketAge := uint32(c.config.time().Sub(hs.session.receivedAt) / time.Millisecond)
+ hs.hello.pskIdentities[0].obfuscatedTicketAge = ticketAge + hs.session.ageAdd
+
+ transcript := hs.suite.hash.New()
+ transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
+ transcript.Write(chHash)
+ transcript.Write(hs.serverHello.marshal())
+ transcript.Write(hs.hello.marshalWithoutBinders())
+ pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
+ hs.hello.updateBinders(pskBinders)
+ } else {
+ // Server selected a cipher suite incompatible with the PSK.
+ hs.hello.pskIdentities = nil
+ hs.hello.pskBinders = nil
+ }
+ }
+
+ hs.transcript.Write(hs.hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ return err
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ serverHello, ok := msg.(*serverHelloMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(serverHello, msg)
+ }
+ hs.serverHello = serverHello
+
+ if err := hs.checkServerHelloOrHRR(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) processServerHello() error {
+ c := hs.c
+
+ if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
+ c.sendAlert(alertUnexpectedMessage)
+ return errors.New("tls: server sent two HelloRetryRequest messages")
+ }
+
+ if len(hs.serverHello.cookie) != 0 {
+ c.sendAlert(alertUnsupportedExtension)
+ return errors.New("tls: server sent a cookie in a normal ServerHello")
+ }
+
+ if hs.serverHello.selectedGroup != 0 {
+ c.sendAlert(alertDecodeError)
+ return errors.New("tls: malformed key_share extension")
+ }
+
+ if hs.serverHello.serverShare.group == 0 {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server did not send a key share")
+ }
+ if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server selected unsupported group")
+ }
+
+ if !hs.serverHello.selectedIdentityPresent {
+ return nil
+ }
+
+ if int(hs.serverHello.selectedIdentity) >= len(hs.hello.pskIdentities) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server selected an invalid PSK")
+ }
+
+ if len(hs.hello.pskIdentities) != 1 || hs.session == nil {
+ return c.sendAlert(alertInternalError)
+ }
+ pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
+ if pskSuite == nil {
+ return c.sendAlert(alertInternalError)
+ }
+ if pskSuite.hash != hs.suite.hash {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: server selected an invalid PSK and cipher suite pair")
+ }
+
+ hs.usingPSK = true
+ c.didResume = true
+ c.peerCertificates = hs.session.serverCertificates
+ c.verifiedChains = hs.session.verifiedChains
+ c.ocspResponse = hs.session.ocspResponse
+ c.scts = hs.session.scts
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
+ c := hs.c
+
+ sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data)
+ if sharedKey == nil {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: invalid server key share")
+ }
+
+ earlySecret := hs.earlySecret
+ if !hs.usingPSK {
+ earlySecret = hs.suite.extract(nil, nil)
+ }
+ handshakeSecret := hs.suite.extract(sharedKey,
+ hs.suite.deriveSecret(earlySecret, "derived", nil))
+
+ clientSecret := hs.suite.deriveSecret(handshakeSecret,
+ clientHandshakeTrafficLabel, hs.transcript)
+ c.out.setTrafficSecret(hs.suite, clientSecret)
+ serverSecret := hs.suite.deriveSecret(handshakeSecret,
+ serverHandshakeTrafficLabel, hs.transcript)
+ c.in.setTrafficSecret(hs.suite, serverSecret)
+
+ err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.hello.random, serverSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ hs.masterSecret = hs.suite.extract(nil,
+ hs.suite.deriveSecret(handshakeSecret, "derived", nil))
+
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) readServerParameters() error {
+ c := hs.c
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ encryptedExtensions, ok := msg.(*encryptedExtensionsMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(encryptedExtensions, msg)
+ }
+ hs.transcript.Write(encryptedExtensions.marshal())
+
+ if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil {
+ c.sendAlert(alertUnsupportedExtension)
+ return err
+ }
+ c.clientProtocol = encryptedExtensions.alpnProtocol
+
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
+ c := hs.c
+
+ // Either a PSK or a certificate is always used, but not both.
+ // See RFC 8446, Section 4.1.1.
+ if hs.usingPSK {
+ // Make sure the connection is still being verified whether or not this
+ // is a resumption. Resumptions currently don't reverify certificates so
+ // they don't call verifyServerCertificate. See Issue 31641.
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+ return nil
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ certReq, ok := msg.(*certificateRequestMsgTLS13)
+ if ok {
+ hs.transcript.Write(certReq.marshal())
+
+ hs.certReq = certReq
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+ }
+
+ certMsg, ok := msg.(*certificateMsgTLS13)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+ if len(certMsg.certificate.Certificate) == 0 {
+ c.sendAlert(alertDecodeError)
+ return errors.New("tls: received empty certificates message")
+ }
+ hs.transcript.Write(certMsg.marshal())
+
+ c.scts = certMsg.certificate.SignedCertificateTimestamps
+ c.ocspResponse = certMsg.certificate.OCSPStaple
+
+ if err := c.verifyServerCertificate(certMsg.certificate.Certificate); err != nil {
+ return err
+ }
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ certVerify, ok := msg.(*certificateVerifyMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certVerify, msg)
+ }
+
+ // See RFC 8446, Section 4.4.3.
+ if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms()) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: certificate used with invalid signature algorithm")
+ }
+ sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
+ if err != nil {
+ return c.sendAlert(alertInternalError)
+ }
+ if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: certificate used with invalid signature algorithm")
+ }
+ signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
+ if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
+ sigHash, signed, certVerify.signature); err != nil {
+ c.sendAlert(alertDecryptError)
+ return errors.New("tls: invalid signature by the server certificate: " + err.Error())
+ }
+
+ hs.transcript.Write(certVerify.marshal())
+
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) readServerFinished() error {
+ c := hs.c
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ finished, ok := msg.(*finishedMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(finished, msg)
+ }
+
+ expectedMAC := hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
+ if !hmac.Equal(expectedMAC, finished.verifyData) {
+ c.sendAlert(alertDecryptError)
+ return errors.New("tls: invalid server finished hash")
+ }
+
+ hs.transcript.Write(finished.marshal())
+
+ // Derive secrets that take context through the server Finished.
+
+ hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
+ clientApplicationTrafficLabel, hs.transcript)
+ serverSecret := hs.suite.deriveSecret(hs.masterSecret,
+ serverApplicationTrafficLabel, hs.transcript)
+ c.in.setTrafficSecret(hs.suite, serverSecret)
+
+ err = c.config.writeKeyLog(keyLogLabelClientTraffic, hs.hello.random, hs.trafficSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.hello.random, serverSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
+
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
+ c := hs.c
+
+ if hs.certReq == nil {
+ return nil
+ }
+
+ cert, err := c.getClientCertificate(&CertificateRequestInfo{
+ AcceptableCAs: hs.certReq.certificateAuthorities,
+ SignatureSchemes: hs.certReq.supportedSignatureAlgorithms,
+ Version: c.vers,
+ ctx: hs.ctx,
+ })
+ if err != nil {
+ return err
+ }
+
+ certMsg := new(certificateMsgTLS13)
+
+ certMsg.certificate = *cert
+ certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
+ certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
+
+ hs.transcript.Write(certMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ return err
+ }
+
+ // If we sent an empty certificate message, skip the CertificateVerify.
+ if len(cert.Certificate) == 0 {
+ return nil
+ }
+
+ certVerifyMsg := new(certificateVerifyMsg)
+ certVerifyMsg.hasSignatureAlgorithm = true
+
+ certVerifyMsg.signatureAlgorithm, err = selectSignatureScheme(c.vers, cert, hs.certReq.supportedSignatureAlgorithms)
+ if err != nil {
+ // getClientCertificate returned a certificate incompatible with the
+ // CertificateRequestInfo supported signature algorithms.
+ c.sendAlert(alertHandshakeFailure)
+ return err
+ }
+
+ sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerifyMsg.signatureAlgorithm)
+ if err != nil {
+ return c.sendAlert(alertInternalError)
+ }
+
+ signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
+ signOpts := crypto.SignerOpts(sigHash)
+ if sigType == signatureRSAPSS {
+ signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
+ }
+ sig, err := cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: failed to sign handshake: " + err.Error())
+ }
+ certVerifyMsg.signature = sig
+
+ hs.transcript.Write(certVerifyMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
+ c := hs.c
+
+ finished := &finishedMsg{
+ verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
+ }
+
+ hs.transcript.Write(finished.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ return err
+ }
+
+ c.out.setTrafficSecret(hs.suite, hs.trafficSecret)
+
+ if !c.config.SessionTicketsDisabled && c.config.ClientSessionCache != nil {
+ c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret,
+ resumptionLabel, hs.transcript)
+ }
+
+ return nil
+}
+
+func (c *Conn) handleNewSessionTicket(msg *newSessionTicketMsgTLS13) error {
+ if !c.isClient {
+ c.sendAlert(alertUnexpectedMessage)
+ return errors.New("tls: received new session ticket from a client")
+ }
+
+ if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
+ return nil
+ }
+
+ // See RFC 8446, Section 4.6.1.
+ if msg.lifetime == 0 {
+ return nil
+ }
+ lifetime := time.Duration(msg.lifetime) * time.Second
+ if lifetime > maxSessionTicketLifetime {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: received a session ticket with invalid lifetime")
+ }
+
+ cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
+ if cipherSuite == nil || c.resumptionSecret == nil {
+ return c.sendAlert(alertInternalError)
+ }
+
+ // Save the resumption_master_secret and nonce instead of deriving the PSK
+ // to do the least amount of work on NewSessionTicket messages before we
+ // know if the ticket will be used. Forward secrecy of resumed connections
+ // is guaranteed by the requirement for pskModeDHE.
+ session := &ClientSessionState{
+ sessionTicket: msg.label,
+ vers: c.vers,
+ cipherSuite: c.cipherSuite,
+ masterSecret: c.resumptionSecret,
+ serverCertificates: c.peerCertificates,
+ verifiedChains: c.verifiedChains,
+ receivedAt: c.config.time(),
+ nonce: msg.nonce,
+ useBy: c.config.time().Add(lifetime),
+ ageAdd: msg.ageAdd,
+ ocspResponse: c.ocspResponse,
+ scts: c.scts,
+ }
+
+ cacheKey := clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
+ c.config.ClientSessionCache.Put(cacheKey, session)
+
+ return nil
+}
diff --git a/contrib/go/_std_1.19/src/crypto/tls/handshake_messages.go b/contrib/go/_std_1.19/src/crypto/tls/handshake_messages.go
new file mode 100644
index 0000000000..7ab0f100b8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/handshake_messages.go
@@ -0,0 +1,1820 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "fmt"
+ "strings"
+
+ "golang.org/x/crypto/cryptobyte"
+)
+
+// The marshalingFunction type is an adapter to allow the use of ordinary
+// functions as cryptobyte.MarshalingValue.
+type marshalingFunction func(b *cryptobyte.Builder) error
+
+func (f marshalingFunction) Marshal(b *cryptobyte.Builder) error {
+ return f(b)
+}
+
+// addBytesWithLength appends a sequence of bytes to the cryptobyte.Builder. If
+// the length of the sequence is not the value specified, it produces an error.
+func addBytesWithLength(b *cryptobyte.Builder, v []byte, n int) {
+ b.AddValue(marshalingFunction(func(b *cryptobyte.Builder) error {
+ if len(v) != n {
+ return fmt.Errorf("invalid value length: expected %d, got %d", n, len(v))
+ }
+ b.AddBytes(v)
+ return nil
+ }))
+}
+
+// addUint64 appends a big-endian, 64-bit value to the cryptobyte.Builder.
+func addUint64(b *cryptobyte.Builder, v uint64) {
+ b.AddUint32(uint32(v >> 32))
+ b.AddUint32(uint32(v))
+}
+
+// readUint64 decodes a big-endian, 64-bit value into out and advances over it.
+// It reports whether the read was successful.
+func readUint64(s *cryptobyte.String, out *uint64) bool {
+ var hi, lo uint32
+ if !s.ReadUint32(&hi) || !s.ReadUint32(&lo) {
+ return false
+ }
+ *out = uint64(hi)<<32 | uint64(lo)
+ return true
+}
+
+// readUint8LengthPrefixed acts like s.ReadUint8LengthPrefixed, but targets a
+// []byte instead of a cryptobyte.String.
+func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
+ return s.ReadUint8LengthPrefixed((*cryptobyte.String)(out))
+}
+
+// readUint16LengthPrefixed acts like s.ReadUint16LengthPrefixed, but targets a
+// []byte instead of a cryptobyte.String.
+func readUint16LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
+ return s.ReadUint16LengthPrefixed((*cryptobyte.String)(out))
+}
+
+// readUint24LengthPrefixed acts like s.ReadUint24LengthPrefixed, but targets a
+// []byte instead of a cryptobyte.String.
+func readUint24LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
+ return s.ReadUint24LengthPrefixed((*cryptobyte.String)(out))
+}
+
+type clientHelloMsg struct {
+ raw []byte
+ vers uint16
+ random []byte
+ sessionId []byte
+ cipherSuites []uint16
+ compressionMethods []uint8
+ serverName string
+ ocspStapling bool
+ supportedCurves []CurveID
+ supportedPoints []uint8
+ ticketSupported bool
+ sessionTicket []uint8
+ supportedSignatureAlgorithms []SignatureScheme
+ supportedSignatureAlgorithmsCert []SignatureScheme
+ secureRenegotiationSupported bool
+ secureRenegotiation []byte
+ alpnProtocols []string
+ scts bool
+ supportedVersions []uint16
+ cookie []byte
+ keyShares []keyShare
+ earlyData bool
+ pskModes []uint8
+ pskIdentities []pskIdentity
+ pskBinders [][]byte
+}
+
+func (m *clientHelloMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeClientHello)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16(m.vers)
+ addBytesWithLength(b, m.random, 32)
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.sessionId)
+ })
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, suite := range m.cipherSuites {
+ b.AddUint16(suite)
+ }
+ })
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.compressionMethods)
+ })
+
+ // If extensions aren't present, omit them.
+ var extensionsPresent bool
+ bWithoutExtensions := *b
+
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ if len(m.serverName) > 0 {
+ // RFC 6066, Section 3
+ b.AddUint16(extensionServerName)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8(0) // name_type = host_name
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(m.serverName))
+ })
+ })
+ })
+ }
+ if m.ocspStapling {
+ // RFC 4366, Section 3.6
+ b.AddUint16(extensionStatusRequest)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8(1) // status_type = ocsp
+ b.AddUint16(0) // empty responder_id_list
+ b.AddUint16(0) // empty request_extensions
+ })
+ }
+ if len(m.supportedCurves) > 0 {
+ // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
+ b.AddUint16(extensionSupportedCurves)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, curve := range m.supportedCurves {
+ b.AddUint16(uint16(curve))
+ }
+ })
+ })
+ }
+ if len(m.supportedPoints) > 0 {
+ // RFC 4492, Section 5.1.2
+ b.AddUint16(extensionSupportedPoints)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.supportedPoints)
+ })
+ })
+ }
+ if m.ticketSupported {
+ // RFC 5077, Section 3.2
+ b.AddUint16(extensionSessionTicket)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.sessionTicket)
+ })
+ }
+ if len(m.supportedSignatureAlgorithms) > 0 {
+ // RFC 5246, Section 7.4.1.4.1
+ b.AddUint16(extensionSignatureAlgorithms)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithms {
+ b.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if len(m.supportedSignatureAlgorithmsCert) > 0 {
+ // RFC 8446, Section 4.2.3
+ b.AddUint16(extensionSignatureAlgorithmsCert)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
+ b.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if m.secureRenegotiationSupported {
+ // RFC 5746, Section 3.2
+ b.AddUint16(extensionRenegotiationInfo)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.secureRenegotiation)
+ })
+ })
+ }
+ if len(m.alpnProtocols) > 0 {
+ // RFC 7301, Section 3.1
+ b.AddUint16(extensionALPN)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, proto := range m.alpnProtocols {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(proto))
+ })
+ }
+ })
+ })
+ }
+ if m.scts {
+ // RFC 6962, Section 3.3.1
+ b.AddUint16(extensionSCT)
+ b.AddUint16(0) // empty extension_data
+ }
+ if len(m.supportedVersions) > 0 {
+ // RFC 8446, Section 4.2.1
+ b.AddUint16(extensionSupportedVersions)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, vers := range m.supportedVersions {
+ b.AddUint16(vers)
+ }
+ })
+ })
+ }
+ if len(m.cookie) > 0 {
+ // RFC 8446, Section 4.2.2
+ b.AddUint16(extensionCookie)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.cookie)
+ })
+ })
+ }
+ if len(m.keyShares) > 0 {
+ // RFC 8446, Section 4.2.8
+ b.AddUint16(extensionKeyShare)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, ks := range m.keyShares {
+ b.AddUint16(uint16(ks.group))
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(ks.data)
+ })
+ }
+ })
+ })
+ }
+ if m.earlyData {
+ // RFC 8446, Section 4.2.10
+ b.AddUint16(extensionEarlyData)
+ b.AddUint16(0) // empty extension_data
+ }
+ if len(m.pskModes) > 0 {
+ // RFC 8446, Section 4.2.9
+ b.AddUint16(extensionPSKModes)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.pskModes)
+ })
+ })
+ }
+ if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
+ // RFC 8446, Section 4.2.11
+ b.AddUint16(extensionPreSharedKey)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, psk := range m.pskIdentities {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(psk.label)
+ })
+ b.AddUint32(psk.obfuscatedTicketAge)
+ }
+ })
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, binder := range m.pskBinders {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(binder)
+ })
+ }
+ })
+ })
+ }
+
+ extensionsPresent = len(b.BytesOrPanic()) > 2
+ })
+
+ if !extensionsPresent {
+ *b = bWithoutExtensions
+ }
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+// marshalWithoutBinders returns the ClientHello through the
+// PreSharedKeyExtension.identities field, according to RFC 8446, Section
+// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
+func (m *clientHelloMsg) marshalWithoutBinders() []byte {
+ bindersLen := 2 // uint16 length prefix
+ for _, binder := range m.pskBinders {
+ bindersLen += 1 // uint8 length prefix
+ bindersLen += len(binder)
+ }
+
+ fullMessage := m.marshal()
+ return fullMessage[:len(fullMessage)-bindersLen]
+}
+
+// updateBinders updates the m.pskBinders field, if necessary updating the
+// cached marshaled representation. The supplied binders must have the same
+// length as the current m.pskBinders.
+func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
+ if len(pskBinders) != len(m.pskBinders) {
+ panic("tls: internal error: pskBinders length mismatch")
+ }
+ for i := range m.pskBinders {
+ if len(pskBinders[i]) != len(m.pskBinders[i]) {
+ panic("tls: internal error: pskBinders length mismatch")
+ }
+ }
+ m.pskBinders = pskBinders
+ if m.raw != nil {
+ lenWithoutBinders := len(m.marshalWithoutBinders())
+ b := cryptobyte.NewFixedBuilder(m.raw[:lenWithoutBinders])
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, binder := range m.pskBinders {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(binder)
+ })
+ }
+ })
+ if out, err := b.Bytes(); err != nil || len(out) != len(m.raw) {
+ panic("tls: internal error: failed to update binders")
+ }
+ }
+}
+
+func (m *clientHelloMsg) unmarshal(data []byte) bool {
+ *m = clientHelloMsg{raw: data}
+ s := cryptobyte.String(data)
+
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
+ !readUint8LengthPrefixed(&s, &m.sessionId) {
+ return false
+ }
+
+ var cipherSuites cryptobyte.String
+ if !s.ReadUint16LengthPrefixed(&cipherSuites) {
+ return false
+ }
+ m.cipherSuites = []uint16{}
+ m.secureRenegotiationSupported = false
+ for !cipherSuites.Empty() {
+ var suite uint16
+ if !cipherSuites.ReadUint16(&suite) {
+ return false
+ }
+ if suite == scsvRenegotiation {
+ m.secureRenegotiationSupported = true
+ }
+ m.cipherSuites = append(m.cipherSuites, suite)
+ }
+
+ if !readUint8LengthPrefixed(&s, &m.compressionMethods) {
+ return false
+ }
+
+ if s.Empty() {
+ // ClientHello is optionally followed by extension data
+ return true
+ }
+
+ var extensions cryptobyte.String
+ if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
+ return false
+ }
+
+ seenExts := make(map[uint16]bool)
+ for !extensions.Empty() {
+ var extension uint16
+ var extData cryptobyte.String
+ if !extensions.ReadUint16(&extension) ||
+ !extensions.ReadUint16LengthPrefixed(&extData) {
+ return false
+ }
+
+ if seenExts[extension] {
+ return false
+ }
+ seenExts[extension] = true
+
+ switch extension {
+ case extensionServerName:
+ // RFC 6066, Section 3
+ var nameList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
+ return false
+ }
+ for !nameList.Empty() {
+ var nameType uint8
+ var serverName cryptobyte.String
+ if !nameList.ReadUint8(&nameType) ||
+ !nameList.ReadUint16LengthPrefixed(&serverName) ||
+ serverName.Empty() {
+ return false
+ }
+ if nameType != 0 {
+ continue
+ }
+ if len(m.serverName) != 0 {
+ // Multiple names of the same name_type are prohibited.
+ return false
+ }
+ m.serverName = string(serverName)
+ // An SNI value may not include a trailing dot.
+ if strings.HasSuffix(m.serverName, ".") {
+ return false
+ }
+ }
+ case extensionStatusRequest:
+ // RFC 4366, Section 3.6
+ var statusType uint8
+ var ignored cryptobyte.String
+ if !extData.ReadUint8(&statusType) ||
+ !extData.ReadUint16LengthPrefixed(&ignored) ||
+ !extData.ReadUint16LengthPrefixed(&ignored) {
+ return false
+ }
+ m.ocspStapling = statusType == statusTypeOCSP
+ case extensionSupportedCurves:
+ // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
+ var curves cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&curves) || curves.Empty() {
+ return false
+ }
+ for !curves.Empty() {
+ var curve uint16
+ if !curves.ReadUint16(&curve) {
+ return false
+ }
+ m.supportedCurves = append(m.supportedCurves, CurveID(curve))
+ }
+ case extensionSupportedPoints:
+ // RFC 4492, Section 5.1.2
+ if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
+ len(m.supportedPoints) == 0 {
+ return false
+ }
+ case extensionSessionTicket:
+ // RFC 5077, Section 3.2
+ m.ticketSupported = true
+ extData.ReadBytes(&m.sessionTicket, len(extData))
+ case extensionSignatureAlgorithms:
+ // RFC 5246, Section 7.4.1.4.1
+ var sigAndAlgs cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
+ return false
+ }
+ for !sigAndAlgs.Empty() {
+ var sigAndAlg uint16
+ if !sigAndAlgs.ReadUint16(&sigAndAlg) {
+ return false
+ }
+ m.supportedSignatureAlgorithms = append(
+ m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
+ }
+ case extensionSignatureAlgorithmsCert:
+ // RFC 8446, Section 4.2.3
+ var sigAndAlgs cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
+ return false
+ }
+ for !sigAndAlgs.Empty() {
+ var sigAndAlg uint16
+ if !sigAndAlgs.ReadUint16(&sigAndAlg) {
+ return false
+ }
+ m.supportedSignatureAlgorithmsCert = append(
+ m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
+ }
+ case extensionRenegotiationInfo:
+ // RFC 5746, Section 3.2
+ if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
+ return false
+ }
+ m.secureRenegotiationSupported = true
+ case extensionALPN:
+ // RFC 7301, Section 3.1
+ var protoList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
+ return false
+ }
+ for !protoList.Empty() {
+ var proto cryptobyte.String
+ if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
+ return false
+ }
+ m.alpnProtocols = append(m.alpnProtocols, string(proto))
+ }
+ case extensionSCT:
+ // RFC 6962, Section 3.3.1
+ m.scts = true
+ case extensionSupportedVersions:
+ // RFC 8446, Section 4.2.1
+ var versList cryptobyte.String
+ if !extData.ReadUint8LengthPrefixed(&versList) || versList.Empty() {
+ return false
+ }
+ for !versList.Empty() {
+ var vers uint16
+ if !versList.ReadUint16(&vers) {
+ return false
+ }
+ m.supportedVersions = append(m.supportedVersions, vers)
+ }
+ case extensionCookie:
+ // RFC 8446, Section 4.2.2
+ if !readUint16LengthPrefixed(&extData, &m.cookie) ||
+ len(m.cookie) == 0 {
+ return false
+ }
+ case extensionKeyShare:
+ // RFC 8446, Section 4.2.8
+ var clientShares cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&clientShares) {
+ return false
+ }
+ for !clientShares.Empty() {
+ var ks keyShare
+ if !clientShares.ReadUint16((*uint16)(&ks.group)) ||
+ !readUint16LengthPrefixed(&clientShares, &ks.data) ||
+ len(ks.data) == 0 {
+ return false
+ }
+ m.keyShares = append(m.keyShares, ks)
+ }
+ case extensionEarlyData:
+ // RFC 8446, Section 4.2.10
+ m.earlyData = true
+ case extensionPSKModes:
+ // RFC 8446, Section 4.2.9
+ if !readUint8LengthPrefixed(&extData, &m.pskModes) {
+ return false
+ }
+ case extensionPreSharedKey:
+ // RFC 8446, Section 4.2.11
+ if !extensions.Empty() {
+ return false // pre_shared_key must be the last extension
+ }
+ var identities cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&identities) || identities.Empty() {
+ return false
+ }
+ for !identities.Empty() {
+ var psk pskIdentity
+ if !readUint16LengthPrefixed(&identities, &psk.label) ||
+ !identities.ReadUint32(&psk.obfuscatedTicketAge) ||
+ len(psk.label) == 0 {
+ return false
+ }
+ m.pskIdentities = append(m.pskIdentities, psk)
+ }
+ var binders cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&binders) || binders.Empty() {
+ return false
+ }
+ for !binders.Empty() {
+ var binder []byte
+ if !readUint8LengthPrefixed(&binders, &binder) ||
+ len(binder) == 0 {
+ return false
+ }
+ m.pskBinders = append(m.pskBinders, binder)
+ }
+ default:
+ // Ignore unknown extensions.
+ continue
+ }
+
+ if !extData.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+type serverHelloMsg struct {
+ raw []byte
+ vers uint16
+ random []byte
+ sessionId []byte
+ cipherSuite uint16
+ compressionMethod uint8
+ ocspStapling bool
+ ticketSupported bool
+ secureRenegotiationSupported bool
+ secureRenegotiation []byte
+ alpnProtocol string
+ scts [][]byte
+ supportedVersion uint16
+ serverShare keyShare
+ selectedIdentityPresent bool
+ selectedIdentity uint16
+ supportedPoints []uint8
+
+ // HelloRetryRequest extensions
+ cookie []byte
+ selectedGroup CurveID
+}
+
+func (m *serverHelloMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeServerHello)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16(m.vers)
+ addBytesWithLength(b, m.random, 32)
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.sessionId)
+ })
+ b.AddUint16(m.cipherSuite)
+ b.AddUint8(m.compressionMethod)
+
+ // If extensions aren't present, omit them.
+ var extensionsPresent bool
+ bWithoutExtensions := *b
+
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ if m.ocspStapling {
+ b.AddUint16(extensionStatusRequest)
+ b.AddUint16(0) // empty extension_data
+ }
+ if m.ticketSupported {
+ b.AddUint16(extensionSessionTicket)
+ b.AddUint16(0) // empty extension_data
+ }
+ if m.secureRenegotiationSupported {
+ b.AddUint16(extensionRenegotiationInfo)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.secureRenegotiation)
+ })
+ })
+ }
+ if len(m.alpnProtocol) > 0 {
+ b.AddUint16(extensionALPN)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(m.alpnProtocol))
+ })
+ })
+ })
+ }
+ if len(m.scts) > 0 {
+ b.AddUint16(extensionSCT)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, sct := range m.scts {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(sct)
+ })
+ }
+ })
+ })
+ }
+ if m.supportedVersion != 0 {
+ b.AddUint16(extensionSupportedVersions)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16(m.supportedVersion)
+ })
+ }
+ if m.serverShare.group != 0 {
+ b.AddUint16(extensionKeyShare)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16(uint16(m.serverShare.group))
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.serverShare.data)
+ })
+ })
+ }
+ if m.selectedIdentityPresent {
+ b.AddUint16(extensionPreSharedKey)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16(m.selectedIdentity)
+ })
+ }
+
+ if len(m.cookie) > 0 {
+ b.AddUint16(extensionCookie)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.cookie)
+ })
+ })
+ }
+ if m.selectedGroup != 0 {
+ b.AddUint16(extensionKeyShare)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16(uint16(m.selectedGroup))
+ })
+ }
+ if len(m.supportedPoints) > 0 {
+ b.AddUint16(extensionSupportedPoints)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.supportedPoints)
+ })
+ })
+ }
+
+ extensionsPresent = len(b.BytesOrPanic()) > 2
+ })
+
+ if !extensionsPresent {
+ *b = bWithoutExtensions
+ }
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *serverHelloMsg) unmarshal(data []byte) bool {
+ *m = serverHelloMsg{raw: data}
+ s := cryptobyte.String(data)
+
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
+ !readUint8LengthPrefixed(&s, &m.sessionId) ||
+ !s.ReadUint16(&m.cipherSuite) ||
+ !s.ReadUint8(&m.compressionMethod) {
+ return false
+ }
+
+ if s.Empty() {
+ // ServerHello is optionally followed by extension data
+ return true
+ }
+
+ var extensions cryptobyte.String
+ if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
+ return false
+ }
+
+ seenExts := make(map[uint16]bool)
+ for !extensions.Empty() {
+ var extension uint16
+ var extData cryptobyte.String
+ if !extensions.ReadUint16(&extension) ||
+ !extensions.ReadUint16LengthPrefixed(&extData) {
+ return false
+ }
+
+ if seenExts[extension] {
+ return false
+ }
+ seenExts[extension] = true
+
+ switch extension {
+ case extensionStatusRequest:
+ m.ocspStapling = true
+ case extensionSessionTicket:
+ m.ticketSupported = true
+ case extensionRenegotiationInfo:
+ if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
+ return false
+ }
+ m.secureRenegotiationSupported = true
+ case extensionALPN:
+ var protoList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
+ return false
+ }
+ var proto cryptobyte.String
+ if !protoList.ReadUint8LengthPrefixed(&proto) ||
+ proto.Empty() || !protoList.Empty() {
+ return false
+ }
+ m.alpnProtocol = string(proto)
+ case extensionSCT:
+ var sctList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
+ return false
+ }
+ for !sctList.Empty() {
+ var sct []byte
+ if !readUint16LengthPrefixed(&sctList, &sct) ||
+ len(sct) == 0 {
+ return false
+ }
+ m.scts = append(m.scts, sct)
+ }
+ case extensionSupportedVersions:
+ if !extData.ReadUint16(&m.supportedVersion) {
+ return false
+ }
+ case extensionCookie:
+ if !readUint16LengthPrefixed(&extData, &m.cookie) ||
+ len(m.cookie) == 0 {
+ return false
+ }
+ case extensionKeyShare:
+ // This extension has different formats in SH and HRR, accept either
+ // and let the handshake logic decide. See RFC 8446, Section 4.2.8.
+ if len(extData) == 2 {
+ if !extData.ReadUint16((*uint16)(&m.selectedGroup)) {
+ return false
+ }
+ } else {
+ if !extData.ReadUint16((*uint16)(&m.serverShare.group)) ||
+ !readUint16LengthPrefixed(&extData, &m.serverShare.data) {
+ return false
+ }
+ }
+ case extensionPreSharedKey:
+ m.selectedIdentityPresent = true
+ if !extData.ReadUint16(&m.selectedIdentity) {
+ return false
+ }
+ case extensionSupportedPoints:
+ // RFC 4492, Section 5.1.2
+ if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
+ len(m.supportedPoints) == 0 {
+ return false
+ }
+ default:
+ // Ignore unknown extensions.
+ continue
+ }
+
+ if !extData.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+type encryptedExtensionsMsg struct {
+ raw []byte
+ alpnProtocol string
+}
+
+func (m *encryptedExtensionsMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeEncryptedExtensions)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ if len(m.alpnProtocol) > 0 {
+ b.AddUint16(extensionALPN)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(m.alpnProtocol))
+ })
+ })
+ })
+ }
+ })
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
+ *m = encryptedExtensionsMsg{raw: data}
+ s := cryptobyte.String(data)
+
+ var extensions cryptobyte.String
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
+ return false
+ }
+
+ for !extensions.Empty() {
+ var extension uint16
+ var extData cryptobyte.String
+ if !extensions.ReadUint16(&extension) ||
+ !extensions.ReadUint16LengthPrefixed(&extData) {
+ return false
+ }
+
+ switch extension {
+ case extensionALPN:
+ var protoList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
+ return false
+ }
+ var proto cryptobyte.String
+ if !protoList.ReadUint8LengthPrefixed(&proto) ||
+ proto.Empty() || !protoList.Empty() {
+ return false
+ }
+ m.alpnProtocol = string(proto)
+ default:
+ // Ignore unknown extensions.
+ continue
+ }
+
+ if !extData.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+type endOfEarlyDataMsg struct{}
+
+func (m *endOfEarlyDataMsg) marshal() []byte {
+ x := make([]byte, 4)
+ x[0] = typeEndOfEarlyData
+ return x
+}
+
+func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
+ return len(data) == 4
+}
+
+type keyUpdateMsg struct {
+ raw []byte
+ updateRequested bool
+}
+
+func (m *keyUpdateMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeKeyUpdate)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ if m.updateRequested {
+ b.AddUint8(1)
+ } else {
+ b.AddUint8(0)
+ }
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *keyUpdateMsg) unmarshal(data []byte) bool {
+ m.raw = data
+ s := cryptobyte.String(data)
+
+ var updateRequested uint8
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint8(&updateRequested) || !s.Empty() {
+ return false
+ }
+ switch updateRequested {
+ case 0:
+ m.updateRequested = false
+ case 1:
+ m.updateRequested = true
+ default:
+ return false
+ }
+ return true
+}
+
+type newSessionTicketMsgTLS13 struct {
+ raw []byte
+ lifetime uint32
+ ageAdd uint32
+ nonce []byte
+ label []byte
+ maxEarlyData uint32
+}
+
+func (m *newSessionTicketMsgTLS13) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeNewSessionTicket)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint32(m.lifetime)
+ b.AddUint32(m.ageAdd)
+ b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.nonce)
+ })
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.label)
+ })
+
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ if m.maxEarlyData > 0 {
+ b.AddUint16(extensionEarlyData)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint32(m.maxEarlyData)
+ })
+ }
+ })
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
+ *m = newSessionTicketMsgTLS13{raw: data}
+ s := cryptobyte.String(data)
+
+ var extensions cryptobyte.String
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint32(&m.lifetime) ||
+ !s.ReadUint32(&m.ageAdd) ||
+ !readUint8LengthPrefixed(&s, &m.nonce) ||
+ !readUint16LengthPrefixed(&s, &m.label) ||
+ !s.ReadUint16LengthPrefixed(&extensions) ||
+ !s.Empty() {
+ return false
+ }
+
+ for !extensions.Empty() {
+ var extension uint16
+ var extData cryptobyte.String
+ if !extensions.ReadUint16(&extension) ||
+ !extensions.ReadUint16LengthPrefixed(&extData) {
+ return false
+ }
+
+ switch extension {
+ case extensionEarlyData:
+ if !extData.ReadUint32(&m.maxEarlyData) {
+ return false
+ }
+ default:
+ // Ignore unknown extensions.
+ continue
+ }
+
+ if !extData.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+type certificateRequestMsgTLS13 struct {
+ raw []byte
+ ocspStapling bool
+ scts bool
+ supportedSignatureAlgorithms []SignatureScheme
+ supportedSignatureAlgorithmsCert []SignatureScheme
+ certificateAuthorities [][]byte
+}
+
+func (m *certificateRequestMsgTLS13) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeCertificateRequest)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ // certificate_request_context (SHALL be zero length unless used for
+ // post-handshake authentication)
+ b.AddUint8(0)
+
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ if m.ocspStapling {
+ b.AddUint16(extensionStatusRequest)
+ b.AddUint16(0) // empty extension_data
+ }
+ if m.scts {
+ // RFC 8446, Section 4.4.2.1 makes no mention of
+ // signed_certificate_timestamp in CertificateRequest, but
+ // "Extensions in the Certificate message from the client MUST
+ // correspond to extensions in the CertificateRequest message
+ // from the server." and it appears in the table in Section 4.2.
+ b.AddUint16(extensionSCT)
+ b.AddUint16(0) // empty extension_data
+ }
+ if len(m.supportedSignatureAlgorithms) > 0 {
+ b.AddUint16(extensionSignatureAlgorithms)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithms {
+ b.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if len(m.supportedSignatureAlgorithmsCert) > 0 {
+ b.AddUint16(extensionSignatureAlgorithmsCert)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
+ b.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if len(m.certificateAuthorities) > 0 {
+ b.AddUint16(extensionCertificateAuthorities)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, ca := range m.certificateAuthorities {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(ca)
+ })
+ }
+ })
+ })
+ }
+ })
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
+ *m = certificateRequestMsgTLS13{raw: data}
+ s := cryptobyte.String(data)
+
+ var context, extensions cryptobyte.String
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
+ !s.ReadUint16LengthPrefixed(&extensions) ||
+ !s.Empty() {
+ return false
+ }
+
+ for !extensions.Empty() {
+ var extension uint16
+ var extData cryptobyte.String
+ if !extensions.ReadUint16(&extension) ||
+ !extensions.ReadUint16LengthPrefixed(&extData) {
+ return false
+ }
+
+ switch extension {
+ case extensionStatusRequest:
+ m.ocspStapling = true
+ case extensionSCT:
+ m.scts = true
+ case extensionSignatureAlgorithms:
+ var sigAndAlgs cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
+ return false
+ }
+ for !sigAndAlgs.Empty() {
+ var sigAndAlg uint16
+ if !sigAndAlgs.ReadUint16(&sigAndAlg) {
+ return false
+ }
+ m.supportedSignatureAlgorithms = append(
+ m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
+ }
+ case extensionSignatureAlgorithmsCert:
+ var sigAndAlgs cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
+ return false
+ }
+ for !sigAndAlgs.Empty() {
+ var sigAndAlg uint16
+ if !sigAndAlgs.ReadUint16(&sigAndAlg) {
+ return false
+ }
+ m.supportedSignatureAlgorithmsCert = append(
+ m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
+ }
+ case extensionCertificateAuthorities:
+ var auths cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&auths) || auths.Empty() {
+ return false
+ }
+ for !auths.Empty() {
+ var ca []byte
+ if !readUint16LengthPrefixed(&auths, &ca) || len(ca) == 0 {
+ return false
+ }
+ m.certificateAuthorities = append(m.certificateAuthorities, ca)
+ }
+ default:
+ // Ignore unknown extensions.
+ continue
+ }
+
+ if !extData.Empty() {
+ return false
+ }
+ }
+
+ return true
+}
+
+type certificateMsg struct {
+ raw []byte
+ certificates [][]byte
+}
+
+func (m *certificateMsg) marshal() (x []byte) {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var i int
+ for _, slice := range m.certificates {
+ i += len(slice)
+ }
+
+ length := 3 + 3*len(m.certificates) + i
+ x = make([]byte, 4+length)
+ x[0] = typeCertificate
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+ x[3] = uint8(length)
+
+ certificateOctets := length - 3
+ x[4] = uint8(certificateOctets >> 16)
+ x[5] = uint8(certificateOctets >> 8)
+ x[6] = uint8(certificateOctets)
+
+ y := x[7:]
+ for _, slice := range m.certificates {
+ y[0] = uint8(len(slice) >> 16)
+ y[1] = uint8(len(slice) >> 8)
+ y[2] = uint8(len(slice))
+ copy(y[3:], slice)
+ y = y[3+len(slice):]
+ }
+
+ m.raw = x
+ return
+}
+
+func (m *certificateMsg) unmarshal(data []byte) bool {
+ if len(data) < 7 {
+ return false
+ }
+
+ m.raw = data
+ certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6])
+ if uint32(len(data)) != certsLen+7 {
+ return false
+ }
+
+ numCerts := 0
+ d := data[7:]
+ for certsLen > 0 {
+ if len(d) < 4 {
+ return false
+ }
+ certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
+ if uint32(len(d)) < 3+certLen {
+ return false
+ }
+ d = d[3+certLen:]
+ certsLen -= 3 + certLen
+ numCerts++
+ }
+
+ m.certificates = make([][]byte, numCerts)
+ d = data[7:]
+ for i := 0; i < numCerts; i++ {
+ certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
+ m.certificates[i] = d[3 : 3+certLen]
+ d = d[3+certLen:]
+ }
+
+ return true
+}
+
+type certificateMsgTLS13 struct {
+ raw []byte
+ certificate Certificate
+ ocspStapling bool
+ scts bool
+}
+
+func (m *certificateMsgTLS13) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeCertificate)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8(0) // certificate_request_context
+
+ certificate := m.certificate
+ if !m.ocspStapling {
+ certificate.OCSPStaple = nil
+ }
+ if !m.scts {
+ certificate.SignedCertificateTimestamps = nil
+ }
+ marshalCertificate(b, certificate)
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ for i, cert := range certificate.Certificate {
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(cert)
+ })
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ if i > 0 {
+ // This library only supports OCSP and SCT for leaf certificates.
+ return
+ }
+ if certificate.OCSPStaple != nil {
+ b.AddUint16(extensionStatusRequest)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8(statusTypeOCSP)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(certificate.OCSPStaple)
+ })
+ })
+ }
+ if certificate.SignedCertificateTimestamps != nil {
+ b.AddUint16(extensionSCT)
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ for _, sct := range certificate.SignedCertificateTimestamps {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(sct)
+ })
+ }
+ })
+ })
+ }
+ })
+ }
+ })
+}
+
+func (m *certificateMsgTLS13) unmarshal(data []byte) bool {
+ *m = certificateMsgTLS13{raw: data}
+ s := cryptobyte.String(data)
+
+ var context cryptobyte.String
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
+ !unmarshalCertificate(&s, &m.certificate) ||
+ !s.Empty() {
+ return false
+ }
+
+ m.scts = m.certificate.SignedCertificateTimestamps != nil
+ m.ocspStapling = m.certificate.OCSPStaple != nil
+
+ return true
+}
+
+func unmarshalCertificate(s *cryptobyte.String, certificate *Certificate) bool {
+ var certList cryptobyte.String
+ if !s.ReadUint24LengthPrefixed(&certList) {
+ return false
+ }
+ for !certList.Empty() {
+ var cert []byte
+ var extensions cryptobyte.String
+ if !readUint24LengthPrefixed(&certList, &cert) ||
+ !certList.ReadUint16LengthPrefixed(&extensions) {
+ return false
+ }
+ certificate.Certificate = append(certificate.Certificate, cert)
+ for !extensions.Empty() {
+ var extension uint16
+ var extData cryptobyte.String
+ if !extensions.ReadUint16(&extension) ||
+ !extensions.ReadUint16LengthPrefixed(&extData) {
+ return false
+ }
+ if len(certificate.Certificate) > 1 {
+ // This library only supports OCSP and SCT for leaf certificates.
+ continue
+ }
+
+ switch extension {
+ case extensionStatusRequest:
+ var statusType uint8
+ if !extData.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
+ !readUint24LengthPrefixed(&extData, &certificate.OCSPStaple) ||
+ len(certificate.OCSPStaple) == 0 {
+ return false
+ }
+ case extensionSCT:
+ var sctList cryptobyte.String
+ if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
+ return false
+ }
+ for !sctList.Empty() {
+ var sct []byte
+ if !readUint16LengthPrefixed(&sctList, &sct) ||
+ len(sct) == 0 {
+ return false
+ }
+ certificate.SignedCertificateTimestamps = append(
+ certificate.SignedCertificateTimestamps, sct)
+ }
+ default:
+ // Ignore unknown extensions.
+ continue
+ }
+
+ if !extData.Empty() {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+type serverKeyExchangeMsg struct {
+ raw []byte
+ key []byte
+}
+
+func (m *serverKeyExchangeMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+ length := len(m.key)
+ x := make([]byte, length+4)
+ x[0] = typeServerKeyExchange
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+ x[3] = uint8(length)
+ copy(x[4:], m.key)
+
+ m.raw = x
+ return x
+}
+
+func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
+ m.raw = data
+ if len(data) < 4 {
+ return false
+ }
+ m.key = data[4:]
+ return true
+}
+
+type certificateStatusMsg struct {
+ raw []byte
+ response []byte
+}
+
+func (m *certificateStatusMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeCertificateStatus)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddUint8(statusTypeOCSP)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.response)
+ })
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *certificateStatusMsg) unmarshal(data []byte) bool {
+ m.raw = data
+ s := cryptobyte.String(data)
+
+ var statusType uint8
+ if !s.Skip(4) || // message type and uint24 length field
+ !s.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
+ !readUint24LengthPrefixed(&s, &m.response) ||
+ len(m.response) == 0 || !s.Empty() {
+ return false
+ }
+ return true
+}
+
+type serverHelloDoneMsg struct{}
+
+func (m *serverHelloDoneMsg) marshal() []byte {
+ x := make([]byte, 4)
+ x[0] = typeServerHelloDone
+ return x
+}
+
+func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
+ return len(data) == 4
+}
+
+type clientKeyExchangeMsg struct {
+ raw []byte
+ ciphertext []byte
+}
+
+func (m *clientKeyExchangeMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+ length := len(m.ciphertext)
+ x := make([]byte, length+4)
+ x[0] = typeClientKeyExchange
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+ x[3] = uint8(length)
+ copy(x[4:], m.ciphertext)
+
+ m.raw = x
+ return x
+}
+
+func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
+ m.raw = data
+ if len(data) < 4 {
+ return false
+ }
+ l := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
+ if l != len(data)-4 {
+ return false
+ }
+ m.ciphertext = data[4:]
+ return true
+}
+
+type finishedMsg struct {
+ raw []byte
+ verifyData []byte
+}
+
+func (m *finishedMsg) marshal() []byte {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeFinished)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.verifyData)
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *finishedMsg) unmarshal(data []byte) bool {
+ m.raw = data
+ s := cryptobyte.String(data)
+ return s.Skip(1) &&
+ readUint24LengthPrefixed(&s, &m.verifyData) &&
+ s.Empty()
+}
+
+type certificateRequestMsg struct {
+ raw []byte
+ // hasSignatureAlgorithm indicates whether this message includes a list of
+ // supported signature algorithms. This change was introduced with TLS 1.2.
+ hasSignatureAlgorithm bool
+
+ certificateTypes []byte
+ supportedSignatureAlgorithms []SignatureScheme
+ certificateAuthorities [][]byte
+}
+
+func (m *certificateRequestMsg) marshal() (x []byte) {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ // See RFC 4346, Section 7.4.4.
+ length := 1 + len(m.certificateTypes) + 2
+ casLength := 0
+ for _, ca := range m.certificateAuthorities {
+ casLength += 2 + len(ca)
+ }
+ length += casLength
+
+ if m.hasSignatureAlgorithm {
+ length += 2 + 2*len(m.supportedSignatureAlgorithms)
+ }
+
+ x = make([]byte, 4+length)
+ x[0] = typeCertificateRequest
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+ x[3] = uint8(length)
+
+ x[4] = uint8(len(m.certificateTypes))
+
+ copy(x[5:], m.certificateTypes)
+ y := x[5+len(m.certificateTypes):]
+
+ if m.hasSignatureAlgorithm {
+ n := len(m.supportedSignatureAlgorithms) * 2
+ y[0] = uint8(n >> 8)
+ y[1] = uint8(n)
+ y = y[2:]
+ for _, sigAlgo := range m.supportedSignatureAlgorithms {
+ y[0] = uint8(sigAlgo >> 8)
+ y[1] = uint8(sigAlgo)
+ y = y[2:]
+ }
+ }
+
+ y[0] = uint8(casLength >> 8)
+ y[1] = uint8(casLength)
+ y = y[2:]
+ for _, ca := range m.certificateAuthorities {
+ y[0] = uint8(len(ca) >> 8)
+ y[1] = uint8(len(ca))
+ y = y[2:]
+ copy(y, ca)
+ y = y[len(ca):]
+ }
+
+ m.raw = x
+ return
+}
+
+func (m *certificateRequestMsg) unmarshal(data []byte) bool {
+ m.raw = data
+
+ if len(data) < 5 {
+ return false
+ }
+
+ length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
+ if uint32(len(data))-4 != length {
+ return false
+ }
+
+ numCertTypes := int(data[4])
+ data = data[5:]
+ if numCertTypes == 0 || len(data) <= numCertTypes {
+ return false
+ }
+
+ m.certificateTypes = make([]byte, numCertTypes)
+ if copy(m.certificateTypes, data) != numCertTypes {
+ return false
+ }
+
+ data = data[numCertTypes:]
+
+ if m.hasSignatureAlgorithm {
+ if len(data) < 2 {
+ return false
+ }
+ sigAndHashLen := uint16(data[0])<<8 | uint16(data[1])
+ data = data[2:]
+ if sigAndHashLen&1 != 0 {
+ return false
+ }
+ if len(data) < int(sigAndHashLen) {
+ return false
+ }
+ numSigAlgos := sigAndHashLen / 2
+ m.supportedSignatureAlgorithms = make([]SignatureScheme, numSigAlgos)
+ for i := range m.supportedSignatureAlgorithms {
+ m.supportedSignatureAlgorithms[i] = SignatureScheme(data[0])<<8 | SignatureScheme(data[1])
+ data = data[2:]
+ }
+ }
+
+ if len(data) < 2 {
+ return false
+ }
+ casLength := uint16(data[0])<<8 | uint16(data[1])
+ data = data[2:]
+ if len(data) < int(casLength) {
+ return false
+ }
+ cas := make([]byte, casLength)
+ copy(cas, data)
+ data = data[casLength:]
+
+ m.certificateAuthorities = nil
+ for len(cas) > 0 {
+ if len(cas) < 2 {
+ return false
+ }
+ caLen := uint16(cas[0])<<8 | uint16(cas[1])
+ cas = cas[2:]
+
+ if len(cas) < int(caLen) {
+ return false
+ }
+
+ m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen])
+ cas = cas[caLen:]
+ }
+
+ return len(data) == 0
+}
+
+type certificateVerifyMsg struct {
+ raw []byte
+ hasSignatureAlgorithm bool // format change introduced in TLS 1.2
+ signatureAlgorithm SignatureScheme
+ signature []byte
+}
+
+func (m *certificateVerifyMsg) marshal() (x []byte) {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ var b cryptobyte.Builder
+ b.AddUint8(typeCertificateVerify)
+ b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
+ if m.hasSignatureAlgorithm {
+ b.AddUint16(uint16(m.signatureAlgorithm))
+ }
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(m.signature)
+ })
+ })
+
+ m.raw = b.BytesOrPanic()
+ return m.raw
+}
+
+func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
+ m.raw = data
+ s := cryptobyte.String(data)
+
+ if !s.Skip(4) { // message type and uint24 length field
+ return false
+ }
+ if m.hasSignatureAlgorithm {
+ if !s.ReadUint16((*uint16)(&m.signatureAlgorithm)) {
+ return false
+ }
+ }
+ return readUint16LengthPrefixed(&s, &m.signature) && s.Empty()
+}
+
+type newSessionTicketMsg struct {
+ raw []byte
+ ticket []byte
+}
+
+func (m *newSessionTicketMsg) marshal() (x []byte) {
+ if m.raw != nil {
+ return m.raw
+ }
+
+ // See RFC 5077, Section 3.3.
+ ticketLen := len(m.ticket)
+ length := 2 + 4 + ticketLen
+ x = make([]byte, 4+length)
+ x[0] = typeNewSessionTicket
+ x[1] = uint8(length >> 16)
+ x[2] = uint8(length >> 8)
+ x[3] = uint8(length)
+ x[8] = uint8(ticketLen >> 8)
+ x[9] = uint8(ticketLen)
+ copy(x[10:], m.ticket)
+
+ m.raw = x
+
+ return
+}
+
+func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
+ m.raw = data
+
+ if len(data) < 10 {
+ return false
+ }
+
+ length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
+ if uint32(len(data))-4 != length {
+ return false
+ }
+
+ ticketLen := int(data[8])<<8 + int(data[9])
+ if len(data)-10 != ticketLen {
+ return false
+ }
+
+ m.ticket = data[10:]
+
+ return true
+}
+
+type helloRequestMsg struct {
+}
+
+func (*helloRequestMsg) marshal() []byte {
+ return []byte{typeHelloRequest, 0, 0, 0}
+}
+
+func (*helloRequestMsg) unmarshal(data []byte) bool {
+ return len(data) == 4
+}
diff --git a/contrib/go/_std_1.19/src/crypto/tls/handshake_server.go b/contrib/go/_std_1.19/src/crypto/tls/handshake_server.go
new file mode 100644
index 0000000000..92b38cb11b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/handshake_server.go
@@ -0,0 +1,882 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/subtle"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "sync/atomic"
+ "time"
+)
+
+// serverHandshakeState contains details of a server handshake in progress.
+// It's discarded once the handshake has completed.
+type serverHandshakeState struct {
+ c *Conn
+ ctx context.Context
+ clientHello *clientHelloMsg
+ hello *serverHelloMsg
+ suite *cipherSuite
+ ecdheOk bool
+ ecSignOk bool
+ rsaDecryptOk bool
+ rsaSignOk bool
+ sessionState *sessionState
+ finishedHash finishedHash
+ masterSecret []byte
+ cert *Certificate
+}
+
+// serverHandshake performs a TLS handshake as a server.
+func (c *Conn) serverHandshake(ctx context.Context) error {
+ clientHello, err := c.readClientHello(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.vers == VersionTLS13 {
+ hs := serverHandshakeStateTLS13{
+ c: c,
+ ctx: ctx,
+ clientHello: clientHello,
+ }
+ return hs.handshake()
+ }
+
+ hs := serverHandshakeState{
+ c: c,
+ ctx: ctx,
+ clientHello: clientHello,
+ }
+ return hs.handshake()
+}
+
+func (hs *serverHandshakeState) handshake() error {
+ c := hs.c
+
+ if err := hs.processClientHello(); err != nil {
+ return err
+ }
+
+ // For an overview of TLS handshaking, see RFC 5246, Section 7.3.
+ c.buffering = true
+ if hs.checkForResumption() {
+ // The client has included a session ticket and so we do an abbreviated handshake.
+ c.didResume = true
+ if err := hs.doResumeHandshake(); err != nil {
+ return err
+ }
+ if err := hs.establishKeys(); err != nil {
+ return err
+ }
+ if err := hs.sendSessionTicket(); err != nil {
+ return err
+ }
+ if err := hs.sendFinished(c.serverFinished[:]); err != nil {
+ return err
+ }
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+ c.clientFinishedIsFirst = false
+ if err := hs.readFinished(nil); err != nil {
+ return err
+ }
+ } else {
+ // The client didn't include a session ticket, or it wasn't
+ // valid so we do a full handshake.
+ if err := hs.pickCipherSuite(); err != nil {
+ return err
+ }
+ if err := hs.doFullHandshake(); err != nil {
+ return err
+ }
+ if err := hs.establishKeys(); err != nil {
+ return err
+ }
+ if err := hs.readFinished(c.clientFinished[:]); err != nil {
+ return err
+ }
+ c.clientFinishedIsFirst = true
+ c.buffering = true
+ if err := hs.sendSessionTicket(); err != nil {
+ return err
+ }
+ if err := hs.sendFinished(nil); err != nil {
+ return err
+ }
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+ }
+
+ c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
+ atomic.StoreUint32(&c.handshakeStatus, 1)
+
+ return nil
+}
+
+// readClientHello reads a ClientHello message and selects the protocol version.
+func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) {
+ msg, err := c.readHandshake()
+ if err != nil {
+ return nil, err
+ }
+ clientHello, ok := msg.(*clientHelloMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return nil, unexpectedMessageError(clientHello, msg)
+ }
+
+ var configForClient *Config
+ originalConfig := c.config
+ if c.config.GetConfigForClient != nil {
+ chi := clientHelloInfo(ctx, c, clientHello)
+ if configForClient, err = c.config.GetConfigForClient(chi); err != nil {
+ c.sendAlert(alertInternalError)
+ return nil, err
+ } else if configForClient != nil {
+ c.config = configForClient
+ }
+ }
+ c.ticketKeys = originalConfig.ticketKeys(configForClient)
+
+ clientVersions := clientHello.supportedVersions
+ if len(clientHello.supportedVersions) == 0 {
+ clientVersions = supportedVersionsFromMax(clientHello.vers)
+ }
+ c.vers, ok = c.config.mutualVersion(roleServer, clientVersions)
+ if !ok {
+ c.sendAlert(alertProtocolVersion)
+ return nil, fmt.Errorf("tls: client offered only unsupported versions: %x", clientVersions)
+ }
+ c.haveVers = true
+ c.in.version = c.vers
+ c.out.version = c.vers
+
+ return clientHello, nil
+}
+
+func (hs *serverHandshakeState) processClientHello() error {
+ c := hs.c
+
+ hs.hello = new(serverHelloMsg)
+ hs.hello.vers = c.vers
+
+ foundCompression := false
+ // We only support null compression, so check that the client offered it.
+ for _, compression := range hs.clientHello.compressionMethods {
+ if compression == compressionNone {
+ foundCompression = true
+ break
+ }
+ }
+
+ if !foundCompression {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: client does not support uncompressed connections")
+ }
+
+ hs.hello.random = make([]byte, 32)
+ serverRandom := hs.hello.random
+ // Downgrade protection canaries. See RFC 8446, Section 4.1.3.
+ maxVers := c.config.maxSupportedVersion(roleServer)
+ if maxVers >= VersionTLS12 && c.vers < maxVers || testingOnlyForceDowngradeCanary {
+ if c.vers == VersionTLS12 {
+ copy(serverRandom[24:], downgradeCanaryTLS12)
+ } else {
+ copy(serverRandom[24:], downgradeCanaryTLS11)
+ }
+ serverRandom = serverRandom[:24]
+ }
+ _, err := io.ReadFull(c.config.rand(), serverRandom)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ if len(hs.clientHello.secureRenegotiation) != 0 {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: initial handshake had non-empty renegotiation extension")
+ }
+
+ hs.hello.secureRenegotiationSupported = hs.clientHello.secureRenegotiationSupported
+ hs.hello.compressionMethod = compressionNone
+ if len(hs.clientHello.serverName) > 0 {
+ c.serverName = hs.clientHello.serverName
+ }
+
+ selectedProto, err := negotiateALPN(c.config.NextProtos, hs.clientHello.alpnProtocols)
+ if err != nil {
+ c.sendAlert(alertNoApplicationProtocol)
+ return err
+ }
+ hs.hello.alpnProtocol = selectedProto
+ c.clientProtocol = selectedProto
+
+ hs.cert, err = c.config.getCertificate(clientHelloInfo(hs.ctx, c, hs.clientHello))
+ if err != nil {
+ if err == errNoCertificates {
+ c.sendAlert(alertUnrecognizedName)
+ } else {
+ c.sendAlert(alertInternalError)
+ }
+ return err
+ }
+ if hs.clientHello.scts {
+ hs.hello.scts = hs.cert.SignedCertificateTimestamps
+ }
+
+ hs.ecdheOk = supportsECDHE(c.config, hs.clientHello.supportedCurves, hs.clientHello.supportedPoints)
+
+ if hs.ecdheOk && len(hs.clientHello.supportedPoints) > 0 {
+ // Although omitting the ec_point_formats extension is permitted, some
+ // old OpenSSL version will refuse to handshake if not present.
+ //
+ // Per RFC 4492, section 5.1.2, implementations MUST support the
+ // uncompressed point format. See golang.org/issue/31943.
+ hs.hello.supportedPoints = []uint8{pointFormatUncompressed}
+ }
+
+ if priv, ok := hs.cert.PrivateKey.(crypto.Signer); ok {
+ switch priv.Public().(type) {
+ case *ecdsa.PublicKey:
+ hs.ecSignOk = true
+ case ed25519.PublicKey:
+ hs.ecSignOk = true
+ case *rsa.PublicKey:
+ hs.rsaSignOk = true
+ default:
+ c.sendAlert(alertInternalError)
+ return fmt.Errorf("tls: unsupported signing key type (%T)", priv.Public())
+ }
+ }
+ if priv, ok := hs.cert.PrivateKey.(crypto.Decrypter); ok {
+ switch priv.Public().(type) {
+ case *rsa.PublicKey:
+ hs.rsaDecryptOk = true
+ default:
+ c.sendAlert(alertInternalError)
+ return fmt.Errorf("tls: unsupported decryption key type (%T)", priv.Public())
+ }
+ }
+
+ return nil
+}
+
+// negotiateALPN picks a shared ALPN protocol that both sides support in server
+// preference order. If ALPN is not configured or the peer doesn't support it,
+// it returns "" and no error.
+func negotiateALPN(serverProtos, clientProtos []string) (string, error) {
+ if len(serverProtos) == 0 || len(clientProtos) == 0 {
+ return "", nil
+ }
+ var http11fallback bool
+ for _, s := range serverProtos {
+ for _, c := range clientProtos {
+ if s == c {
+ return s, nil
+ }
+ if s == "h2" && c == "http/1.1" {
+ http11fallback = true
+ }
+ }
+ }
+ // As a special case, let http/1.1 clients connect to h2 servers as if they
+ // didn't support ALPN. We used not to enforce protocol overlap, so over
+ // time a number of HTTP servers were configured with only "h2", but
+ // expected to accept connections from "http/1.1" clients. See Issue 46310.
+ if http11fallback {
+ return "", nil
+ }
+ return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos)
+}
+
+// supportsECDHE returns whether ECDHE key exchanges can be used with this
+// pre-TLS 1.3 client.
+func supportsECDHE(c *Config, supportedCurves []CurveID, supportedPoints []uint8) bool {
+ supportsCurve := false
+ for _, curve := range supportedCurves {
+ if c.supportsCurve(curve) {
+ supportsCurve = true
+ break
+ }
+ }
+
+ supportsPointFormat := false
+ for _, pointFormat := range supportedPoints {
+ if pointFormat == pointFormatUncompressed {
+ supportsPointFormat = true
+ break
+ }
+ }
+ // Per RFC 8422, Section 5.1.2, if the Supported Point Formats extension is
+ // missing, uncompressed points are supported. If supportedPoints is empty,
+ // the extension must be missing, as an empty extension body is rejected by
+ // the parser. See https://go.dev/issue/49126.
+ if len(supportedPoints) == 0 {
+ supportsPointFormat = true
+ }
+
+ return supportsCurve && supportsPointFormat
+}
+
+func (hs *serverHandshakeState) pickCipherSuite() error {
+ c := hs.c
+
+ preferenceOrder := cipherSuitesPreferenceOrder
+ if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
+ preferenceOrder = cipherSuitesPreferenceOrderNoAES
+ }
+
+ configCipherSuites := c.config.cipherSuites()
+ preferenceList := make([]uint16, 0, len(configCipherSuites))
+ for _, suiteID := range preferenceOrder {
+ for _, id := range configCipherSuites {
+ if id == suiteID {
+ preferenceList = append(preferenceList, id)
+ break
+ }
+ }
+ }
+
+ hs.suite = selectCipherSuite(preferenceList, hs.clientHello.cipherSuites, hs.cipherSuiteOk)
+ if hs.suite == nil {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: no cipher suite supported by both client and server")
+ }
+ c.cipherSuite = hs.suite.id
+
+ for _, id := range hs.clientHello.cipherSuites {
+ if id == TLS_FALLBACK_SCSV {
+ // The client is doing a fallback connection. See RFC 7507.
+ if hs.clientHello.vers < c.config.maxSupportedVersion(roleServer) {
+ c.sendAlert(alertInappropriateFallback)
+ return errors.New("tls: client using inappropriate protocol fallback")
+ }
+ break
+ }
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeState) cipherSuiteOk(c *cipherSuite) bool {
+ if c.flags&suiteECDHE != 0 {
+ if !hs.ecdheOk {
+ return false
+ }
+ if c.flags&suiteECSign != 0 {
+ if !hs.ecSignOk {
+ return false
+ }
+ } else if !hs.rsaSignOk {
+ return false
+ }
+ } else if !hs.rsaDecryptOk {
+ return false
+ }
+ if hs.c.vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
+ return false
+ }
+ return true
+}
+
+// checkForResumption reports whether we should perform resumption on this connection.
+func (hs *serverHandshakeState) checkForResumption() bool {
+ c := hs.c
+
+ if c.config.SessionTicketsDisabled {
+ return false
+ }
+
+ plaintext, usedOldKey := c.decryptTicket(hs.clientHello.sessionTicket)
+ if plaintext == nil {
+ return false
+ }
+ hs.sessionState = &sessionState{usedOldKey: usedOldKey}
+ ok := hs.sessionState.unmarshal(plaintext)
+ if !ok {
+ return false
+ }
+
+ createdAt := time.Unix(int64(hs.sessionState.createdAt), 0)
+ if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
+ return false
+ }
+
+ // Never resume a session for a different TLS version.
+ if c.vers != hs.sessionState.vers {
+ return false
+ }
+
+ cipherSuiteOk := false
+ // Check that the client is still offering the ciphersuite in the session.
+ for _, id := range hs.clientHello.cipherSuites {
+ if id == hs.sessionState.cipherSuite {
+ cipherSuiteOk = true
+ break
+ }
+ }
+ if !cipherSuiteOk {
+ return false
+ }
+
+ // Check that we also support the ciphersuite from the session.
+ hs.suite = selectCipherSuite([]uint16{hs.sessionState.cipherSuite},
+ c.config.cipherSuites(), hs.cipherSuiteOk)
+ if hs.suite == nil {
+ return false
+ }
+
+ sessionHasClientCerts := len(hs.sessionState.certificates) != 0
+ needClientCerts := requiresClientCert(c.config.ClientAuth)
+ if needClientCerts && !sessionHasClientCerts {
+ return false
+ }
+ if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
+ return false
+ }
+
+ return true
+}
+
+func (hs *serverHandshakeState) doResumeHandshake() error {
+ c := hs.c
+
+ hs.hello.cipherSuite = hs.suite.id
+ c.cipherSuite = hs.suite.id
+ // We echo the client's session ID in the ServerHello to let it know
+ // that we're doing a resumption.
+ hs.hello.sessionId = hs.clientHello.sessionId
+ hs.hello.ticketSupported = hs.sessionState.usedOldKey
+ hs.finishedHash = newFinishedHash(c.vers, hs.suite)
+ hs.finishedHash.discardHandshakeBuffer()
+ hs.finishedHash.Write(hs.clientHello.marshal())
+ hs.finishedHash.Write(hs.hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ return err
+ }
+
+ if err := c.processCertsFromClient(Certificate{
+ Certificate: hs.sessionState.certificates,
+ }); err != nil {
+ return err
+ }
+
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ hs.masterSecret = hs.sessionState.masterSecret
+
+ return nil
+}
+
+func (hs *serverHandshakeState) doFullHandshake() error {
+ c := hs.c
+
+ if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 {
+ hs.hello.ocspStapling = true
+ }
+
+ hs.hello.ticketSupported = hs.clientHello.ticketSupported && !c.config.SessionTicketsDisabled
+ hs.hello.cipherSuite = hs.suite.id
+
+ hs.finishedHash = newFinishedHash(hs.c.vers, hs.suite)
+ if c.config.ClientAuth == NoClientCert {
+ // No need to keep a full record of the handshake if client
+ // certificates won't be used.
+ hs.finishedHash.discardHandshakeBuffer()
+ }
+ hs.finishedHash.Write(hs.clientHello.marshal())
+ hs.finishedHash.Write(hs.hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ return err
+ }
+
+ certMsg := new(certificateMsg)
+ certMsg.certificates = hs.cert.Certificate
+ hs.finishedHash.Write(certMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ return err
+ }
+
+ if hs.hello.ocspStapling {
+ certStatus := new(certificateStatusMsg)
+ certStatus.response = hs.cert.OCSPStaple
+ hs.finishedHash.Write(certStatus.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
+ return err
+ }
+ }
+
+ keyAgreement := hs.suite.ka(c.vers)
+ skx, err := keyAgreement.generateServerKeyExchange(c.config, hs.cert, hs.clientHello, hs.hello)
+ if err != nil {
+ c.sendAlert(alertHandshakeFailure)
+ return err
+ }
+ if skx != nil {
+ hs.finishedHash.Write(skx.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
+ return err
+ }
+ }
+
+ var certReq *certificateRequestMsg
+ if c.config.ClientAuth >= RequestClientCert {
+ // Request a client certificate
+ certReq = new(certificateRequestMsg)
+ certReq.certificateTypes = []byte{
+ byte(certTypeRSASign),
+ byte(certTypeECDSASign),
+ }
+ if c.vers >= VersionTLS12 {
+ certReq.hasSignatureAlgorithm = true
+ certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
+ }
+
+ // An empty list of certificateAuthorities signals to
+ // the client that it may send any certificate in response
+ // to our request. When we know the CAs we trust, then
+ // we can send them down, so that the client can choose
+ // an appropriate certificate to give to us.
+ if c.config.ClientCAs != nil {
+ certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
+ }
+ hs.finishedHash.Write(certReq.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ return err
+ }
+ }
+
+ helloDone := new(serverHelloDoneMsg)
+ hs.finishedHash.Write(helloDone.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
+ return err
+ }
+
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+
+ var pub crypto.PublicKey // public key for client auth, if any
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ // If we requested a client certificate, then the client must send a
+ // certificate message, even if it's empty.
+ if c.config.ClientAuth >= RequestClientCert {
+ certMsg, ok := msg.(*certificateMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+ hs.finishedHash.Write(certMsg.marshal())
+
+ if err := c.processCertsFromClient(Certificate{
+ Certificate: certMsg.certificates,
+ }); err != nil {
+ return err
+ }
+ if len(certMsg.certificates) != 0 {
+ pub = c.peerCertificates[0].PublicKey
+ }
+
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+ }
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ // Get client key exchange
+ ckx, ok := msg.(*clientKeyExchangeMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(ckx, msg)
+ }
+ hs.finishedHash.Write(ckx.marshal())
+
+ preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
+ if err != nil {
+ c.sendAlert(alertHandshakeFailure)
+ return err
+ }
+ hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.clientHello.random, hs.hello.random)
+ if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.clientHello.random, hs.masterSecret); err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ // If we received a client cert in response to our certificate request message,
+ // the client will send us a certificateVerifyMsg immediately after the
+ // clientKeyExchangeMsg. This message is a digest of all preceding
+ // handshake-layer messages that is signed using the private key corresponding
+ // to the client's certificate. This allows us to verify that the client is in
+ // possession of the private key of the certificate.
+ if len(c.peerCertificates) > 0 {
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+ certVerify, ok := msg.(*certificateVerifyMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certVerify, msg)
+ }
+
+ var sigType uint8
+ var sigHash crypto.Hash
+ if c.vers >= VersionTLS12 {
+ if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, certReq.supportedSignatureAlgorithms) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client certificate used with invalid signature algorithm")
+ }
+ sigType, sigHash, err = typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
+ if err != nil {
+ return c.sendAlert(alertInternalError)
+ }
+ } else {
+ sigType, sigHash, err = legacyTypeAndHashFromPublicKey(pub)
+ if err != nil {
+ c.sendAlert(alertIllegalParameter)
+ return err
+ }
+ }
+
+ signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
+ if err := verifyHandshakeSignature(sigType, pub, sigHash, signed, certVerify.signature); err != nil {
+ c.sendAlert(alertDecryptError)
+ return errors.New("tls: invalid signature by the client certificate: " + err.Error())
+ }
+
+ hs.finishedHash.Write(certVerify.marshal())
+ }
+
+ hs.finishedHash.discardHandshakeBuffer()
+
+ return nil
+}
+
+func (hs *serverHandshakeState) establishKeys() error {
+ c := hs.c
+
+ clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
+ keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
+
+ var clientCipher, serverCipher any
+ var clientHash, serverHash hash.Hash
+
+ if hs.suite.aead == nil {
+ clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */)
+ clientHash = hs.suite.mac(clientMAC)
+ serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */)
+ serverHash = hs.suite.mac(serverMAC)
+ } else {
+ clientCipher = hs.suite.aead(clientKey, clientIV)
+ serverCipher = hs.suite.aead(serverKey, serverIV)
+ }
+
+ c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
+ c.out.prepareCipherSpec(c.vers, serverCipher, serverHash)
+
+ return nil
+}
+
+func (hs *serverHandshakeState) readFinished(out []byte) error {
+ c := hs.c
+
+ if err := c.readChangeCipherSpec(); err != nil {
+ return err
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+ clientFinished, ok := msg.(*finishedMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(clientFinished, msg)
+ }
+
+ verify := hs.finishedHash.clientSum(hs.masterSecret)
+ if len(verify) != len(clientFinished.verifyData) ||
+ subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: client's Finished message is incorrect")
+ }
+
+ hs.finishedHash.Write(clientFinished.marshal())
+ copy(out, verify)
+ return nil
+}
+
+func (hs *serverHandshakeState) sendSessionTicket() error {
+ // ticketSupported is set in a resumption handshake if the
+ // ticket from the client was encrypted with an old session
+ // ticket key and thus a refreshed ticket should be sent.
+ if !hs.hello.ticketSupported {
+ return nil
+ }
+
+ c := hs.c
+ m := new(newSessionTicketMsg)
+
+ createdAt := uint64(c.config.time().Unix())
+ if hs.sessionState != nil {
+ // If this is re-wrapping an old key, then keep
+ // the original time it was created.
+ createdAt = hs.sessionState.createdAt
+ }
+
+ var certsFromClient [][]byte
+ for _, cert := range c.peerCertificates {
+ certsFromClient = append(certsFromClient, cert.Raw)
+ }
+ state := sessionState{
+ vers: c.vers,
+ cipherSuite: hs.suite.id,
+ createdAt: createdAt,
+ masterSecret: hs.masterSecret,
+ certificates: certsFromClient,
+ }
+ var err error
+ m.ticket, err = c.encryptTicket(state.marshal())
+ if err != nil {
+ return err
+ }
+
+ hs.finishedHash.Write(m.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeState) sendFinished(out []byte) error {
+ c := hs.c
+
+ if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ return err
+ }
+
+ finished := new(finishedMsg)
+ finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
+ hs.finishedHash.Write(finished.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ return err
+ }
+
+ copy(out, finished.verifyData)
+
+ return nil
+}
+
+// processCertsFromClient takes a chain of client certificates either from a
+// Certificates message or from a sessionState and verifies them. It returns
+// the public key of the leaf certificate.
+func (c *Conn) processCertsFromClient(certificate Certificate) error {
+ certificates := certificate.Certificate
+ certs := make([]*x509.Certificate, len(certificates))
+ var err error
+ for i, asn1Data := range certificates {
+ if certs[i], err = x509.ParseCertificate(asn1Data); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse client certificate: " + err.Error())
+ }
+ }
+
+ if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: client didn't provide a certificate")
+ }
+
+ if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 {
+ opts := x509.VerifyOptions{
+ Roots: c.config.ClientCAs,
+ CurrentTime: c.config.time(),
+ Intermediates: x509.NewCertPool(),
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
+ }
+
+ for _, cert := range certs[1:] {
+ opts.Intermediates.AddCert(cert)
+ }
+
+ chains, err := certs[0].Verify(opts)
+ if err != nil {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to verify client certificate: " + err.Error())
+ }
+
+ c.verifiedChains = chains
+ }
+
+ c.peerCertificates = certs
+ c.ocspResponse = certificate.OCSPStaple
+ c.scts = certificate.SignedCertificateTimestamps
+
+ if len(certs) > 0 {
+ switch certs[0].PublicKey.(type) {
+ case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
+ default:
+ c.sendAlert(alertUnsupportedCertificate)
+ return fmt.Errorf("tls: client certificate contains an unsupported public key of type %T", certs[0].PublicKey)
+ }
+ }
+
+ if c.config.VerifyPeerCertificate != nil {
+ if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func clientHelloInfo(ctx context.Context, c *Conn, clientHello *clientHelloMsg) *ClientHelloInfo {
+ supportedVersions := clientHello.supportedVersions
+ if len(clientHello.supportedVersions) == 0 {
+ supportedVersions = supportedVersionsFromMax(clientHello.vers)
+ }
+
+ return &ClientHelloInfo{
+ CipherSuites: clientHello.cipherSuites,
+ ServerName: clientHello.serverName,
+ SupportedCurves: clientHello.supportedCurves,
+ SupportedPoints: clientHello.supportedPoints,
+ SignatureSchemes: clientHello.supportedSignatureAlgorithms,
+ SupportedProtos: clientHello.alpnProtocols,
+ SupportedVersions: supportedVersions,
+ Conn: c.conn,
+ config: c.config,
+ ctx: ctx,
+ }
+}
diff --git a/contrib/go/_std_1.19/src/crypto/tls/handshake_server_tls13.go b/contrib/go/_std_1.19/src/crypto/tls/handshake_server_tls13.go
new file mode 100644
index 0000000000..03a477f7be
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/handshake_server_tls13.go
@@ -0,0 +1,876 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tls
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/hmac"
+ "crypto/rsa"
+ "encoding/binary"
+ "errors"
+ "hash"
+ "io"
+ "sync/atomic"
+ "time"
+)
+
+// maxClientPSKIdentities is the number of client PSK identities the server will
+// attempt to validate. It will ignore the rest not to let cheap ClientHello
+// messages cause too much work in session ticket decryption attempts.
+const maxClientPSKIdentities = 5
+
+type serverHandshakeStateTLS13 struct {
+ c *Conn
+ ctx context.Context
+ clientHello *clientHelloMsg
+ hello *serverHelloMsg
+ sentDummyCCS bool
+ usingPSK bool
+ suite *cipherSuiteTLS13
+ cert *Certificate
+ sigAlg SignatureScheme
+ earlySecret []byte
+ sharedKey []byte
+ handshakeSecret []byte
+ masterSecret []byte
+ trafficSecret []byte // client_application_traffic_secret_0
+ transcript hash.Hash
+ clientFinished []byte
+}
+
+func (hs *serverHandshakeStateTLS13) handshake() error {
+ c := hs.c
+
+ if needFIPS() {
+ return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode")
+ }
+
+ // For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2.
+ if err := hs.processClientHello(); err != nil {
+ return err
+ }
+ if err := hs.checkForResumption(); err != nil {
+ return err
+ }
+ if err := hs.pickCertificate(); err != nil {
+ return err
+ }
+ c.buffering = true
+ if err := hs.sendServerParameters(); err != nil {
+ return err
+ }
+ if err := hs.sendServerCertificate(); err != nil {
+ return err
+ }
+ if err := hs.sendServerFinished(); err != nil {
+ return err
+ }
+ // Note that at this point we could start sending application data without
+ // waiting for the client's second flight, but the application might not
+ // expect the lack of replay protection of the ClientHello parameters.
+ if _, err := c.flush(); err != nil {
+ return err
+ }
+ if err := hs.readClientCertificate(); err != nil {
+ return err
+ }
+ if err := hs.readClientFinished(); err != nil {
+ return err
+ }
+
+ atomic.StoreUint32(&c.handshakeStatus, 1)
+
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) processClientHello() error {
+ c := hs.c
+
+ hs.hello = new(serverHelloMsg)
+
+ // TLS 1.3 froze the ServerHello.legacy_version field, and uses
+ // supported_versions instead. See RFC 8446, sections 4.1.3 and 4.2.1.
+ hs.hello.vers = VersionTLS12
+ hs.hello.supportedVersion = c.vers
+
+ if len(hs.clientHello.supportedVersions) == 0 {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client used the legacy version field to negotiate TLS 1.3")
+ }
+
+ // Abort if the client is doing a fallback and landing lower than what we
+ // support. See RFC 7507, which however does not specify the interaction
+ // with supported_versions. The only difference is that with
+ // supported_versions a client has a chance to attempt a [TLS 1.2, TLS 1.4]
+ // handshake in case TLS 1.3 is broken but 1.2 is not. Alas, in that case,
+ // it will have to drop the TLS_FALLBACK_SCSV protection if it falls back to
+ // TLS 1.2, because a TLS 1.3 server would abort here. The situation before
+ // supported_versions was not better because there was just no way to do a
+ // TLS 1.4 handshake without risking the server selecting TLS 1.3.
+ for _, id := range hs.clientHello.cipherSuites {
+ if id == TLS_FALLBACK_SCSV {
+ // Use c.vers instead of max(supported_versions) because an attacker
+ // could defeat this by adding an arbitrary high version otherwise.
+ if c.vers < c.config.maxSupportedVersion(roleServer) {
+ c.sendAlert(alertInappropriateFallback)
+ return errors.New("tls: client using inappropriate protocol fallback")
+ }
+ break
+ }
+ }
+
+ if len(hs.clientHello.compressionMethods) != 1 ||
+ hs.clientHello.compressionMethods[0] != compressionNone {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: TLS 1.3 client supports illegal compression methods")
+ }
+
+ hs.hello.random = make([]byte, 32)
+ if _, err := io.ReadFull(c.config.rand(), hs.hello.random); err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ if len(hs.clientHello.secureRenegotiation) != 0 {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: initial handshake had non-empty renegotiation extension")
+ }
+
+ if hs.clientHello.earlyData {
+ // See RFC 8446, Section 4.2.10 for the complicated behavior required
+ // here. The scenario is that a different server at our address offered
+ // to accept early data in the past, which we can't handle. For now, all
+ // 0-RTT enabled session tickets need to expire before a Go server can
+ // replace a server or join a pool. That's the same requirement that
+ // applies to mixing or replacing with any TLS 1.2 server.
+ c.sendAlert(alertUnsupportedExtension)
+ return errors.New("tls: client sent unexpected early data")
+ }
+
+ hs.hello.sessionId = hs.clientHello.sessionId
+ hs.hello.compressionMethod = compressionNone
+
+ preferenceList := defaultCipherSuitesTLS13
+ if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
+ preferenceList = defaultCipherSuitesTLS13NoAES
+ }
+ for _, suiteID := range preferenceList {
+ hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
+ if hs.suite != nil {
+ break
+ }
+ }
+ if hs.suite == nil {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: no cipher suite supported by both client and server")
+ }
+ c.cipherSuite = hs.suite.id
+ hs.hello.cipherSuite = hs.suite.id
+ hs.transcript = hs.suite.hash.New()
+
+ // Pick the ECDHE group in server preference order, but give priority to
+ // groups with a key share, to avoid a HelloRetryRequest round-trip.
+ var selectedGroup CurveID
+ var clientKeyShare *keyShare
+GroupSelection:
+ for _, preferredGroup := range c.config.curvePreferences() {
+ for _, ks := range hs.clientHello.keyShares {
+ if ks.group == preferredGroup {
+ selectedGroup = ks.group
+ clientKeyShare = &ks
+ break GroupSelection
+ }
+ }
+ if selectedGroup != 0 {
+ continue
+ }
+ for _, group := range hs.clientHello.supportedCurves {
+ if group == preferredGroup {
+ selectedGroup = group
+ break
+ }
+ }
+ }
+ if selectedGroup == 0 {
+ c.sendAlert(alertHandshakeFailure)
+ return errors.New("tls: no ECDHE curve supported by both client and server")
+ }
+ if clientKeyShare == nil {
+ if err := hs.doHelloRetryRequest(selectedGroup); err != nil {
+ return err
+ }
+ clientKeyShare = &hs.clientHello.keyShares[0]
+ }
+
+ if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok {
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: CurvePreferences includes unsupported curve")
+ }
+ params, err := generateECDHEParameters(c.config.rand(), selectedGroup)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()}
+ hs.sharedKey = params.SharedKey(clientKeyShare.data)
+ if hs.sharedKey == nil {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: invalid client key share")
+ }
+
+ c.serverName = hs.clientHello.serverName
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) checkForResumption() error {
+ c := hs.c
+
+ if c.config.SessionTicketsDisabled {
+ return nil
+ }
+
+ modeOK := false
+ for _, mode := range hs.clientHello.pskModes {
+ if mode == pskModeDHE {
+ modeOK = true
+ break
+ }
+ }
+ if !modeOK {
+ return nil
+ }
+
+ if len(hs.clientHello.pskIdentities) != len(hs.clientHello.pskBinders) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: invalid or missing PSK binders")
+ }
+ if len(hs.clientHello.pskIdentities) == 0 {
+ return nil
+ }
+
+ for i, identity := range hs.clientHello.pskIdentities {
+ if i >= maxClientPSKIdentities {
+ break
+ }
+
+ plaintext, _ := c.decryptTicket(identity.label)
+ if plaintext == nil {
+ continue
+ }
+ sessionState := new(sessionStateTLS13)
+ if ok := sessionState.unmarshal(plaintext); !ok {
+ continue
+ }
+
+ createdAt := time.Unix(int64(sessionState.createdAt), 0)
+ if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
+ continue
+ }
+
+ // We don't check the obfuscated ticket age because it's affected by
+ // clock skew and it's only a freshness signal useful for shrinking the
+ // window for replay attacks, which don't affect us as we don't do 0-RTT.
+
+ pskSuite := cipherSuiteTLS13ByID(sessionState.cipherSuite)
+ if pskSuite == nil || pskSuite.hash != hs.suite.hash {
+ continue
+ }
+
+ // PSK connections don't re-establish client certificates, but carry
+ // them over in the session ticket. Ensure the presence of client certs
+ // in the ticket is consistent with the configured requirements.
+ sessionHasClientCerts := len(sessionState.certificate.Certificate) != 0
+ needClientCerts := requiresClientCert(c.config.ClientAuth)
+ if needClientCerts && !sessionHasClientCerts {
+ continue
+ }
+ if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
+ continue
+ }
+
+ psk := hs.suite.expandLabel(sessionState.resumptionSecret, "resumption",
+ nil, hs.suite.hash.Size())
+ hs.earlySecret = hs.suite.extract(psk, nil)
+ binderKey := hs.suite.deriveSecret(hs.earlySecret, resumptionBinderLabel, nil)
+ // Clone the transcript in case a HelloRetryRequest was recorded.
+ transcript := cloneHash(hs.transcript, hs.suite.hash)
+ if transcript == nil {
+ c.sendAlert(alertInternalError)
+ return errors.New("tls: internal error: failed to clone hash")
+ }
+ transcript.Write(hs.clientHello.marshalWithoutBinders())
+ pskBinder := hs.suite.finishedHash(binderKey, transcript)
+ if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
+ c.sendAlert(alertDecryptError)
+ return errors.New("tls: invalid PSK binder")
+ }
+
+ c.didResume = true
+ if err := c.processCertsFromClient(sessionState.certificate); err != nil {
+ return err
+ }
+
+ hs.hello.selectedIdentityPresent = true
+ hs.hello.selectedIdentity = uint16(i)
+ hs.usingPSK = true
+ return nil
+ }
+
+ return nil
+}
+
+// cloneHash uses the encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
+// interfaces implemented by standard library hashes to clone the state of in
+// to a new instance of h. It returns nil if the operation fails.
+func cloneHash(in hash.Hash, h crypto.Hash) hash.Hash {
+ // Recreate the interface to avoid importing encoding.
+ type binaryMarshaler interface {
+ MarshalBinary() (data []byte, err error)
+ UnmarshalBinary(data []byte) error
+ }
+ marshaler, ok := in.(binaryMarshaler)
+ if !ok {
+ return nil
+ }
+ state, err := marshaler.MarshalBinary()
+ if err != nil {
+ return nil
+ }
+ out := h.New()
+ unmarshaler, ok := out.(binaryMarshaler)
+ if !ok {
+ return nil
+ }
+ if err := unmarshaler.UnmarshalBinary(state); err != nil {
+ return nil
+ }
+ return out
+}
+
+func (hs *serverHandshakeStateTLS13) pickCertificate() error {
+ c := hs.c
+
+ // Only one of PSK and certificates are used at a time.
+ if hs.usingPSK {
+ return nil
+ }
+
+ // signature_algorithms is required in TLS 1.3. See RFC 8446, Section 4.2.3.
+ if len(hs.clientHello.supportedSignatureAlgorithms) == 0 {
+ return c.sendAlert(alertMissingExtension)
+ }
+
+ certificate, err := c.config.getCertificate(clientHelloInfo(hs.ctx, c, hs.clientHello))
+ if err != nil {
+ if err == errNoCertificates {
+ c.sendAlert(alertUnrecognizedName)
+ } else {
+ c.sendAlert(alertInternalError)
+ }
+ return err
+ }
+ hs.sigAlg, err = selectSignatureScheme(c.vers, certificate, hs.clientHello.supportedSignatureAlgorithms)
+ if err != nil {
+ // getCertificate returned a certificate that is unsupported or
+ // incompatible with the client's signature algorithms.
+ c.sendAlert(alertHandshakeFailure)
+ return err
+ }
+ hs.cert = certificate
+
+ return nil
+}
+
+// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
+// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
+func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
+ if hs.sentDummyCCS {
+ return nil
+ }
+ hs.sentDummyCCS = true
+
+ _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+ return err
+}
+
+func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
+ c := hs.c
+
+ // The first ClientHello gets double-hashed into the transcript upon a
+ // HelloRetryRequest. See RFC 8446, Section 4.4.1.
+ hs.transcript.Write(hs.clientHello.marshal())
+ chHash := hs.transcript.Sum(nil)
+ hs.transcript.Reset()
+ hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
+ hs.transcript.Write(chHash)
+
+ helloRetryRequest := &serverHelloMsg{
+ vers: hs.hello.vers,
+ random: helloRetryRequestRandom,
+ sessionId: hs.hello.sessionId,
+ cipherSuite: hs.hello.cipherSuite,
+ compressionMethod: hs.hello.compressionMethod,
+ supportedVersion: hs.hello.supportedVersion,
+ selectedGroup: selectedGroup,
+ }
+
+ hs.transcript.Write(helloRetryRequest.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
+ return err
+ }
+
+ if err := hs.sendDummyChangeCipherSpec(); err != nil {
+ return err
+ }
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ clientHello, ok := msg.(*clientHelloMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(clientHello, msg)
+ }
+
+ if len(clientHello.keyShares) != 1 || clientHello.keyShares[0].group != selectedGroup {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client sent invalid key share in second ClientHello")
+ }
+
+ if clientHello.earlyData {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client indicated early data in second ClientHello")
+ }
+
+ if illegalClientHelloChange(clientHello, hs.clientHello) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client illegally modified second ClientHello")
+ }
+
+ hs.clientHello = clientHello
+ return nil
+}
+
+// illegalClientHelloChange reports whether the two ClientHello messages are
+// different, with the exception of the changes allowed before and after a
+// HelloRetryRequest. See RFC 8446, Section 4.1.2.
+func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool {
+ if len(ch.supportedVersions) != len(ch1.supportedVersions) ||
+ len(ch.cipherSuites) != len(ch1.cipherSuites) ||
+ len(ch.supportedCurves) != len(ch1.supportedCurves) ||
+ len(ch.supportedSignatureAlgorithms) != len(ch1.supportedSignatureAlgorithms) ||
+ len(ch.supportedSignatureAlgorithmsCert) != len(ch1.supportedSignatureAlgorithmsCert) ||
+ len(ch.alpnProtocols) != len(ch1.alpnProtocols) {
+ return true
+ }
+ for i := range ch.supportedVersions {
+ if ch.supportedVersions[i] != ch1.supportedVersions[i] {
+ return true
+ }
+ }
+ for i := range ch.cipherSuites {
+ if ch.cipherSuites[i] != ch1.cipherSuites[i] {
+ return true
+ }
+ }
+ for i := range ch.supportedCurves {
+ if ch.supportedCurves[i] != ch1.supportedCurves[i] {
+ return true
+ }
+ }
+ for i := range ch.supportedSignatureAlgorithms {
+ if ch.supportedSignatureAlgorithms[i] != ch1.supportedSignatureAlgorithms[i] {
+ return true
+ }
+ }
+ for i := range ch.supportedSignatureAlgorithmsCert {
+ if ch.supportedSignatureAlgorithmsCert[i] != ch1.supportedSignatureAlgorithmsCert[i] {
+ return true
+ }
+ }
+ for i := range ch.alpnProtocols {
+ if ch.alpnProtocols[i] != ch1.alpnProtocols[i] {
+ return true
+ }
+ }
+ return ch.vers != ch1.vers ||
+ !bytes.Equal(ch.random, ch1.random) ||
+ !bytes.Equal(ch.sessionId, ch1.sessionId) ||
+ !bytes.Equal(ch.compressionMethods, ch1.compressionMethods) ||
+ ch.serverName != ch1.serverName ||
+ ch.ocspStapling != ch1.ocspStapling ||
+ !bytes.Equal(ch.supportedPoints, ch1.supportedPoints) ||
+ ch.ticketSupported != ch1.ticketSupported ||
+ !bytes.Equal(ch.sessionTicket, ch1.sessionTicket) ||
+ ch.secureRenegotiationSupported != ch1.secureRenegotiationSupported ||
+ !bytes.Equal(ch.secureRenegotiation, ch1.secureRenegotiation) ||
+ ch.scts != ch1.scts ||
+ !bytes.Equal(ch.cookie, ch1.cookie) ||
+ !bytes.Equal(ch.pskModes, ch1.pskModes)
+}
+
+func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
+ c := hs.c
+
+ hs.transcript.Write(hs.clientHello.marshal())
+ hs.transcript.Write(hs.hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ return err
+ }
+
+ if err := hs.sendDummyChangeCipherSpec(); err != nil {
+ return err
+ }
+
+ earlySecret := hs.earlySecret
+ if earlySecret == nil {
+ earlySecret = hs.suite.extract(nil, nil)
+ }
+ hs.handshakeSecret = hs.suite.extract(hs.sharedKey,
+ hs.suite.deriveSecret(earlySecret, "derived", nil))
+
+ clientSecret := hs.suite.deriveSecret(hs.handshakeSecret,
+ clientHandshakeTrafficLabel, hs.transcript)
+ c.in.setTrafficSecret(hs.suite, clientSecret)
+ serverSecret := hs.suite.deriveSecret(hs.handshakeSecret,
+ serverHandshakeTrafficLabel, hs.transcript)
+ c.out.setTrafficSecret(hs.suite, serverSecret)
+
+ err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.clientHello.random, clientSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.clientHello.random, serverSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ encryptedExtensions := new(encryptedExtensionsMsg)
+
+ selectedProto, err := negotiateALPN(c.config.NextProtos, hs.clientHello.alpnProtocols)
+ if err != nil {
+ c.sendAlert(alertNoApplicationProtocol)
+ return err
+ }
+ encryptedExtensions.alpnProtocol = selectedProto
+ c.clientProtocol = selectedProto
+
+ hs.transcript.Write(encryptedExtensions.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, encryptedExtensions.marshal()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) requestClientCert() bool {
+ return hs.c.config.ClientAuth >= RequestClientCert && !hs.usingPSK
+}
+
+func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
+ c := hs.c
+
+ // Only one of PSK and certificates are used at a time.
+ if hs.usingPSK {
+ return nil
+ }
+
+ if hs.requestClientCert() {
+ // Request a client certificate
+ certReq := new(certificateRequestMsgTLS13)
+ certReq.ocspStapling = true
+ certReq.scts = true
+ certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
+ if c.config.ClientCAs != nil {
+ certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
+ }
+
+ hs.transcript.Write(certReq.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ return err
+ }
+ }
+
+ certMsg := new(certificateMsgTLS13)
+
+ certMsg.certificate = *hs.cert
+ certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
+ certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
+
+ hs.transcript.Write(certMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ return err
+ }
+
+ certVerifyMsg := new(certificateVerifyMsg)
+ certVerifyMsg.hasSignatureAlgorithm = true
+ certVerifyMsg.signatureAlgorithm = hs.sigAlg
+
+ sigType, sigHash, err := typeAndHashFromSignatureScheme(hs.sigAlg)
+ if err != nil {
+ return c.sendAlert(alertInternalError)
+ }
+
+ signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
+ signOpts := crypto.SignerOpts(sigHash)
+ if sigType == signatureRSAPSS {
+ signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
+ }
+ sig, err := hs.cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
+ if err != nil {
+ public := hs.cert.PrivateKey.(crypto.Signer).Public()
+ if rsaKey, ok := public.(*rsa.PublicKey); ok && sigType == signatureRSAPSS &&
+ rsaKey.N.BitLen()/8 < sigHash.Size()*2+2 { // key too small for RSA-PSS
+ c.sendAlert(alertHandshakeFailure)
+ } else {
+ c.sendAlert(alertInternalError)
+ }
+ return errors.New("tls: failed to sign handshake: " + err.Error())
+ }
+ certVerifyMsg.signature = sig
+
+ hs.transcript.Write(certVerifyMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
+ c := hs.c
+
+ finished := &finishedMsg{
+ verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
+ }
+
+ hs.transcript.Write(finished.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ return err
+ }
+
+ // Derive secrets that take context through the server Finished.
+
+ hs.masterSecret = hs.suite.extract(nil,
+ hs.suite.deriveSecret(hs.handshakeSecret, "derived", nil))
+
+ hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
+ clientApplicationTrafficLabel, hs.transcript)
+ serverSecret := hs.suite.deriveSecret(hs.masterSecret,
+ serverApplicationTrafficLabel, hs.transcript)
+ c.out.setTrafficSecret(hs.suite, serverSecret)
+
+ err := c.config.writeKeyLog(keyLogLabelClientTraffic, hs.clientHello.random, hs.trafficSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.clientHello.random, serverSecret)
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+
+ c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
+
+ // If we did not request client certificates, at this point we can
+ // precompute the client finished and roll the transcript forward to send
+ // session tickets in our first flight.
+ if !hs.requestClientCert() {
+ if err := hs.sendSessionTickets(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) shouldSendSessionTickets() bool {
+ if hs.c.config.SessionTicketsDisabled {
+ return false
+ }
+
+ // Don't send tickets the client wouldn't use. See RFC 8446, Section 4.2.9.
+ for _, pskMode := range hs.clientHello.pskModes {
+ if pskMode == pskModeDHE {
+ return true
+ }
+ }
+ return false
+}
+
+func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
+ c := hs.c
+
+ hs.clientFinished = hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
+ finishedMsg := &finishedMsg{
+ verifyData: hs.clientFinished,
+ }
+ hs.transcript.Write(finishedMsg.marshal())
+
+ if !hs.shouldSendSessionTickets() {
+ return nil
+ }
+
+ resumptionSecret := hs.suite.deriveSecret(hs.masterSecret,
+ resumptionLabel, hs.transcript)
+
+ m := new(newSessionTicketMsgTLS13)
+
+ var certsFromClient [][]byte
+ for _, cert := range c.peerCertificates {
+ certsFromClient = append(certsFromClient, cert.Raw)
+ }
+ state := sessionStateTLS13{
+ cipherSuite: hs.suite.id,
+ createdAt: uint64(c.config.time().Unix()),
+ resumptionSecret: resumptionSecret,
+ certificate: Certificate{
+ Certificate: certsFromClient,
+ OCSPStaple: c.ocspResponse,
+ SignedCertificateTimestamps: c.scts,
+ },
+ }
+ var err error
+ m.label, err = c.encryptTicket(state.marshal())
+ if err != nil {
+ return err
+ }
+ m.lifetime = uint32(maxSessionTicketLifetime / time.Second)
+
+ // ticket_age_add is a random 32-bit value. See RFC 8446, section 4.6.1
+ // The value is not stored anywhere; we never need to check the ticket age
+ // because 0-RTT is not supported.
+ ageAdd := make([]byte, 4)
+ _, err = hs.c.config.rand().Read(ageAdd)
+ if err != nil {
+ return err
+ }
+ m.ageAdd = binary.LittleEndian.Uint32(ageAdd)
+
+ // ticket_nonce, which must be unique per connection, is always left at
+ // zero because we only ever send one ticket per connection.
+
+ if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
+ c := hs.c
+
+ if !hs.requestClientCert() {
+ // Make sure the connection is still being verified whether or not
+ // the server requested a client certificate.
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+ return nil
+ }
+
+ // If we requested a client certificate, then the client must send a
+ // certificate message. If it's empty, no CertificateVerify is sent.
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ certMsg, ok := msg.(*certificateMsgTLS13)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certMsg, msg)
+ }
+ hs.transcript.Write(certMsg.marshal())
+
+ if err := c.processCertsFromClient(certMsg.certificate); err != nil {
+ return err
+ }
+
+ if c.config.VerifyConnection != nil {
+ if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
+ c.sendAlert(alertBadCertificate)
+ return err
+ }
+ }
+
+ if len(certMsg.certificate.Certificate) != 0 {
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ certVerify, ok := msg.(*certificateVerifyMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(certVerify, msg)
+ }
+
+ // See RFC 8446, Section 4.4.3.
+ if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms()) {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client certificate used with invalid signature algorithm")
+ }
+ sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
+ if err != nil {
+ return c.sendAlert(alertInternalError)
+ }
+ if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: client certificate used with invalid signature algorithm")
+ }
+ signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
+ if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
+ sigHash, signed, certVerify.signature); err != nil {
+ c.sendAlert(alertDecryptError)
+ return errors.New("tls: invalid signature by the client certificate: " + err.Error())
+ }
+
+ hs.transcript.Write(certVerify.marshal())
+ }
+
+ // If we waited until the client certificates to send session tickets, we
+ // are ready to do it now.
+ if err := hs.sendSessionTickets(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (hs *serverHandshakeStateTLS13) readClientFinished() error {
+ c := hs.c
+
+ msg, err := c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ finished, ok := msg.(*finishedMsg)
+ if !ok {
+ c.sendAlert(alertUnexpectedMessage)
+ return unexpectedMessageError(finished, msg)
+ }
+
+ if !hmac.Equal(hs.clientFinished, finished.verifyData) {
+ c.sendAlert(alertDecryptError)
+ return errors.New("tls: invalid client finished hash")
+ }
+
+ c.in.setTrafficSecret(hs.suite, hs.trafficSecret)
+
+ return nil
+}
diff --git a/contrib/go/_std_1.18/src/crypto/tls/key_agreement.go b/contrib/go/_std_1.19/src/crypto/tls/key_agreement.go
index c28a64f3a8..c28a64f3a8 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/key_agreement.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/key_agreement.go
diff --git a/contrib/go/_std_1.18/src/crypto/tls/key_schedule.go b/contrib/go/_std_1.19/src/crypto/tls/key_schedule.go
index 314016979a..314016979a 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/key_schedule.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/key_schedule.go
diff --git a/contrib/go/_std_1.19/src/crypto/tls/notboring.go b/contrib/go/_std_1.19/src/crypto/tls/notboring.go
new file mode 100644
index 0000000000..7d85b39c59
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/tls/notboring.go
@@ -0,0 +1,20 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !boringcrypto
+
+package tls
+
+func needFIPS() bool { return false }
+
+func supportedSignatureAlgorithms() []SignatureScheme {
+ return defaultSupportedSignatureAlgorithms
+}
+
+func fipsMinVersion(c *Config) uint16 { panic("fipsMinVersion") }
+func fipsMaxVersion(c *Config) uint16 { panic("fipsMaxVersion") }
+func fipsCurvePreferences(c *Config) []CurveID { panic("fipsCurvePreferences") }
+func fipsCipherSuites(c *Config) []uint16 { panic("fipsCipherSuites") }
+
+var fipsSupportedSignatureAlgorithms []SignatureScheme
diff --git a/contrib/go/_std_1.18/src/crypto/tls/prf.go b/contrib/go/_std_1.19/src/crypto/tls/prf.go
index 13bfa009ca..13bfa009ca 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/prf.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/prf.go
diff --git a/contrib/go/_std_1.18/src/crypto/tls/ticket.go b/contrib/go/_std_1.19/src/crypto/tls/ticket.go
index 6c1d20da20..6c1d20da20 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/ticket.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/ticket.go
diff --git a/contrib/go/_std_1.18/src/crypto/tls/tls.go b/contrib/go/_std_1.19/src/crypto/tls/tls.go
index b529c70523..b529c70523 100644
--- a/contrib/go/_std_1.18/src/crypto/tls/tls.go
+++ b/contrib/go/_std_1.19/src/crypto/tls/tls.go
diff --git a/contrib/go/_std_1.19/src/crypto/x509/cert_pool.go b/contrib/go/_std_1.19/src/crypto/x509/cert_pool.go
new file mode 100644
index 0000000000..e9b2c122b9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/cert_pool.go
@@ -0,0 +1,268 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/pem"
+ "sync"
+)
+
+type sum224 [sha256.Size224]byte
+
+// CertPool is a set of certificates.
+type CertPool struct {
+ byName map[string][]int // cert.RawSubject => index into lazyCerts
+
+ // lazyCerts contains funcs that return a certificate,
+ // lazily parsing/decompressing it as needed.
+ lazyCerts []lazyCert
+
+ // haveSum maps from sum224(cert.Raw) to true. It's used only
+ // for AddCert duplicate detection, to avoid CertPool.contains
+ // calls in the AddCert path (because the contains method can
+ // call getCert and otherwise negate savings from lazy getCert
+ // funcs).
+ haveSum map[sum224]bool
+
+ // systemPool indicates whether this is a special pool derived from the
+ // system roots. If it includes additional roots, it requires doing two
+ // verifications, one using the roots provided by the caller, and one using
+ // the system platform verifier.
+ systemPool bool
+}
+
+// lazyCert is minimal metadata about a Cert and a func to retrieve it
+// in its normal expanded *Certificate form.
+type lazyCert struct {
+ // rawSubject is the Certificate.RawSubject value.
+ // It's the same as the CertPool.byName key, but in []byte
+ // form to make CertPool.Subjects (as used by crypto/tls) do
+ // fewer allocations.
+ rawSubject []byte
+
+ // getCert returns the certificate.
+ //
+ // It is not meant to do network operations or anything else
+ // where a failure is likely; the func is meant to lazily
+ // parse/decompress data that is already known to be good. The
+ // error in the signature primarily is meant for use in the
+ // case where a cert file existed on local disk when the program
+ // started up is deleted later before it's read.
+ getCert func() (*Certificate, error)
+}
+
+// NewCertPool returns a new, empty CertPool.
+func NewCertPool() *CertPool {
+ return &CertPool{
+ byName: make(map[string][]int),
+ haveSum: make(map[sum224]bool),
+ }
+}
+
+// len returns the number of certs in the set.
+// A nil set is a valid empty set.
+func (s *CertPool) len() int {
+ if s == nil {
+ return 0
+ }
+ return len(s.lazyCerts)
+}
+
+// cert returns cert index n in s.
+func (s *CertPool) cert(n int) (*Certificate, error) {
+ return s.lazyCerts[n].getCert()
+}
+
+// Clone returns a copy of s.
+func (s *CertPool) Clone() *CertPool {
+ p := &CertPool{
+ byName: make(map[string][]int, len(s.byName)),
+ lazyCerts: make([]lazyCert, len(s.lazyCerts)),
+ haveSum: make(map[sum224]bool, len(s.haveSum)),
+ systemPool: s.systemPool,
+ }
+ for k, v := range s.byName {
+ indexes := make([]int, len(v))
+ copy(indexes, v)
+ p.byName[k] = indexes
+ }
+ for k := range s.haveSum {
+ p.haveSum[k] = true
+ }
+ copy(p.lazyCerts, s.lazyCerts)
+ return p
+}
+
+// SystemCertPool returns a copy of the system cert pool.
+//
+// On Unix systems other than macOS the environment variables SSL_CERT_FILE and
+// SSL_CERT_DIR can be used to override the system default locations for the SSL
+// certificate file and SSL certificate files directory, respectively. The
+// latter can be a colon-separated list.
+//
+// Any mutations to the returned pool are not written to disk and do not affect
+// any other pool returned by SystemCertPool.
+//
+// New changes in the system cert pool might not be reflected in subsequent calls.
+func SystemCertPool() (*CertPool, error) {
+ if sysRoots := systemRootsPool(); sysRoots != nil {
+ return sysRoots.Clone(), nil
+ }
+
+ return loadSystemRoots()
+}
+
+// findPotentialParents returns the indexes of certificates in s which might
+// have signed cert.
+func (s *CertPool) findPotentialParents(cert *Certificate) []*Certificate {
+ if s == nil {
+ return nil
+ }
+
+ // consider all candidates where cert.Issuer matches cert.Subject.
+ // when picking possible candidates the list is built in the order
+ // of match plausibility as to save cycles in buildChains:
+ // AKID and SKID match
+ // AKID present, SKID missing / AKID missing, SKID present
+ // AKID and SKID don't match
+ var matchingKeyID, oneKeyID, mismatchKeyID []*Certificate
+ for _, c := range s.byName[string(cert.RawIssuer)] {
+ candidate, err := s.cert(c)
+ if err != nil {
+ continue
+ }
+ kidMatch := bytes.Equal(candidate.SubjectKeyId, cert.AuthorityKeyId)
+ switch {
+ case kidMatch:
+ matchingKeyID = append(matchingKeyID, candidate)
+ case (len(candidate.SubjectKeyId) == 0 && len(cert.AuthorityKeyId) > 0) ||
+ (len(candidate.SubjectKeyId) > 0 && len(cert.AuthorityKeyId) == 0):
+ oneKeyID = append(oneKeyID, candidate)
+ default:
+ mismatchKeyID = append(mismatchKeyID, candidate)
+ }
+ }
+
+ found := len(matchingKeyID) + len(oneKeyID) + len(mismatchKeyID)
+ if found == 0 {
+ return nil
+ }
+ candidates := make([]*Certificate, 0, found)
+ candidates = append(candidates, matchingKeyID...)
+ candidates = append(candidates, oneKeyID...)
+ candidates = append(candidates, mismatchKeyID...)
+ return candidates
+}
+
+func (s *CertPool) contains(cert *Certificate) bool {
+ if s == nil {
+ return false
+ }
+ return s.haveSum[sha256.Sum224(cert.Raw)]
+}
+
+// AddCert adds a certificate to a pool.
+func (s *CertPool) AddCert(cert *Certificate) {
+ if cert == nil {
+ panic("adding nil Certificate to CertPool")
+ }
+ s.addCertFunc(sha256.Sum224(cert.Raw), string(cert.RawSubject), func() (*Certificate, error) {
+ return cert, nil
+ })
+}
+
+// addCertFunc adds metadata about a certificate to a pool, along with
+// a func to fetch that certificate later when needed.
+//
+// The rawSubject is Certificate.RawSubject and must be non-empty.
+// The getCert func may be called 0 or more times.
+func (s *CertPool) addCertFunc(rawSum224 sum224, rawSubject string, getCert func() (*Certificate, error)) {
+ if getCert == nil {
+ panic("getCert can't be nil")
+ }
+
+ // Check that the certificate isn't being added twice.
+ if s.haveSum[rawSum224] {
+ return
+ }
+
+ s.haveSum[rawSum224] = true
+ s.lazyCerts = append(s.lazyCerts, lazyCert{
+ rawSubject: []byte(rawSubject),
+ getCert: getCert,
+ })
+ s.byName[rawSubject] = append(s.byName[rawSubject], len(s.lazyCerts)-1)
+}
+
+// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
+// It appends any certificates found to s and reports whether any certificates
+// were successfully parsed.
+//
+// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
+// of root CAs in a format suitable for this function.
+func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+
+ certBytes := block.Bytes
+ cert, err := ParseCertificate(certBytes)
+ if err != nil {
+ continue
+ }
+ var lazyCert struct {
+ sync.Once
+ v *Certificate
+ }
+ s.addCertFunc(sha256.Sum224(cert.Raw), string(cert.RawSubject), func() (*Certificate, error) {
+ lazyCert.Do(func() {
+ // This can't fail, as the same bytes already parsed above.
+ lazyCert.v, _ = ParseCertificate(certBytes)
+ certBytes = nil
+ })
+ return lazyCert.v, nil
+ })
+ ok = true
+ }
+
+ return ok
+}
+
+// Subjects returns a list of the DER-encoded subjects of
+// all of the certificates in the pool.
+//
+// Deprecated: if s was returned by SystemCertPool, Subjects
+// will not include the system roots.
+func (s *CertPool) Subjects() [][]byte {
+ res := make([][]byte, s.len())
+ for i, lc := range s.lazyCerts {
+ res[i] = lc.rawSubject
+ }
+ return res
+}
+
+// Equal reports whether s and other are equal.
+func (s *CertPool) Equal(other *CertPool) bool {
+ if s == nil || other == nil {
+ return s == other
+ }
+ if s.systemPool != other.systemPool || len(s.haveSum) != len(other.haveSum) {
+ return false
+ }
+ for h := range s.haveSum {
+ if !other.haveSum[h] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.go b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.go
new file mode 100644
index 0000000000..2677ff706a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.go
@@ -0,0 +1,214 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+// Package macOS provides cgo-less wrappers for Core Foundation and
+// Security.framework, similarly to how package syscall provides access to
+// libSystem.dylib.
+package macOS
+
+import (
+ "errors"
+ "internal/abi"
+ "reflect"
+ "runtime"
+ "time"
+ "unsafe"
+)
+
+// Core Foundation linker flags for the external linker. See Issue 42459.
+//
+//go:cgo_ldflag "-framework"
+//go:cgo_ldflag "CoreFoundation"
+
+// CFRef is an opaque reference to a Core Foundation object. It is a pointer,
+// but to memory not owned by Go, so not an unsafe.Pointer.
+type CFRef uintptr
+
+// CFDataToSlice returns a copy of the contents of data as a bytes slice.
+func CFDataToSlice(data CFRef) []byte {
+ length := CFDataGetLength(data)
+ ptr := CFDataGetBytePtr(data)
+ src := (*[1 << 20]byte)(unsafe.Pointer(ptr))[:length:length]
+ out := make([]byte, length)
+ copy(out, src)
+ return out
+}
+
+// CFStringToString returns a Go string representation of the passed
+// in CFString, or an empty string if it's invalid.
+func CFStringToString(ref CFRef) string {
+ data, err := CFStringCreateExternalRepresentation(ref)
+ if err != nil {
+ return ""
+ }
+ b := CFDataToSlice(data)
+ CFRelease(data)
+ return string(b)
+}
+
+// TimeToCFDateRef converts a time.Time into an apple CFDateRef
+func TimeToCFDateRef(t time.Time) CFRef {
+ secs := t.Sub(time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC)).Seconds()
+ ref := CFDateCreate(secs)
+ return ref
+}
+
+type CFString CFRef
+
+const kCFAllocatorDefault = 0
+const kCFStringEncodingUTF8 = 0x08000100
+
+//go:cgo_import_dynamic x509_CFDataCreate CFDataCreate "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func BytesToCFData(b []byte) CFRef {
+ p := unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&b)).Data)
+ ret := syscall(abi.FuncPCABI0(x509_CFDataCreate_trampoline), kCFAllocatorDefault, uintptr(p), uintptr(len(b)), 0, 0, 0)
+ runtime.KeepAlive(p)
+ return CFRef(ret)
+}
+func x509_CFDataCreate_trampoline()
+
+//go:cgo_import_dynamic x509_CFStringCreateWithBytes CFStringCreateWithBytes "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+// StringToCFString returns a copy of the UTF-8 contents of s as a new CFString.
+func StringToCFString(s string) CFString {
+ p := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&s)).Data)
+ ret := syscall(abi.FuncPCABI0(x509_CFStringCreateWithBytes_trampoline), kCFAllocatorDefault, uintptr(p),
+ uintptr(len(s)), uintptr(kCFStringEncodingUTF8), 0 /* isExternalRepresentation */, 0)
+ runtime.KeepAlive(p)
+ return CFString(ret)
+}
+func x509_CFStringCreateWithBytes_trampoline()
+
+//go:cgo_import_dynamic x509_CFDictionaryGetValueIfPresent CFDictionaryGetValueIfPresent "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFDictionaryGetValueIfPresent(dict CFRef, key CFString) (value CFRef, ok bool) {
+ ret := syscall(abi.FuncPCABI0(x509_CFDictionaryGetValueIfPresent_trampoline), uintptr(dict), uintptr(key),
+ uintptr(unsafe.Pointer(&value)), 0, 0, 0)
+ if ret == 0 {
+ return 0, false
+ }
+ return value, true
+}
+func x509_CFDictionaryGetValueIfPresent_trampoline()
+
+const kCFNumberSInt32Type = 3
+
+//go:cgo_import_dynamic x509_CFNumberGetValue CFNumberGetValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFNumberGetValue(num CFRef) (int32, error) {
+ var value int32
+ ret := syscall(abi.FuncPCABI0(x509_CFNumberGetValue_trampoline), uintptr(num), uintptr(kCFNumberSInt32Type),
+ uintptr(unsafe.Pointer(&value)), 0, 0, 0)
+ if ret == 0 {
+ return 0, errors.New("CFNumberGetValue call failed")
+ }
+ return value, nil
+}
+func x509_CFNumberGetValue_trampoline()
+
+//go:cgo_import_dynamic x509_CFDataGetLength CFDataGetLength "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFDataGetLength(data CFRef) int {
+ ret := syscall(abi.FuncPCABI0(x509_CFDataGetLength_trampoline), uintptr(data), 0, 0, 0, 0, 0)
+ return int(ret)
+}
+func x509_CFDataGetLength_trampoline()
+
+//go:cgo_import_dynamic x509_CFDataGetBytePtr CFDataGetBytePtr "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFDataGetBytePtr(data CFRef) uintptr {
+ ret := syscall(abi.FuncPCABI0(x509_CFDataGetBytePtr_trampoline), uintptr(data), 0, 0, 0, 0, 0)
+ return ret
+}
+func x509_CFDataGetBytePtr_trampoline()
+
+//go:cgo_import_dynamic x509_CFArrayGetCount CFArrayGetCount "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFArrayGetCount(array CFRef) int {
+ ret := syscall(abi.FuncPCABI0(x509_CFArrayGetCount_trampoline), uintptr(array), 0, 0, 0, 0, 0)
+ return int(ret)
+}
+func x509_CFArrayGetCount_trampoline()
+
+//go:cgo_import_dynamic x509_CFArrayGetValueAtIndex CFArrayGetValueAtIndex "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFArrayGetValueAtIndex(array CFRef, index int) CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_CFArrayGetValueAtIndex_trampoline), uintptr(array), uintptr(index), 0, 0, 0, 0)
+ return CFRef(ret)
+}
+func x509_CFArrayGetValueAtIndex_trampoline()
+
+//go:cgo_import_dynamic x509_CFEqual CFEqual "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFEqual(a, b CFRef) bool {
+ ret := syscall(abi.FuncPCABI0(x509_CFEqual_trampoline), uintptr(a), uintptr(b), 0, 0, 0, 0)
+ return ret == 1
+}
+func x509_CFEqual_trampoline()
+
+//go:cgo_import_dynamic x509_CFRelease CFRelease "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFRelease(ref CFRef) {
+ syscall(abi.FuncPCABI0(x509_CFRelease_trampoline), uintptr(ref), 0, 0, 0, 0, 0)
+}
+func x509_CFRelease_trampoline()
+
+//go:cgo_import_dynamic x509_CFArrayCreateMutable CFArrayCreateMutable "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFArrayCreateMutable() CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_CFArrayCreateMutable_trampoline), kCFAllocatorDefault, 0, 0 /* kCFTypeArrayCallBacks */, 0, 0, 0)
+ return CFRef(ret)
+}
+func x509_CFArrayCreateMutable_trampoline()
+
+//go:cgo_import_dynamic x509_CFArrayAppendValue CFArrayAppendValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFArrayAppendValue(array CFRef, val CFRef) {
+ syscall(abi.FuncPCABI0(x509_CFArrayAppendValue_trampoline), uintptr(array), uintptr(val), 0, 0, 0, 0)
+}
+func x509_CFArrayAppendValue_trampoline()
+
+//go:cgo_import_dynamic x509_CFDateCreate CFDateCreate "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFDateCreate(seconds float64) CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_CFDateCreate_trampoline), kCFAllocatorDefault, 0, 0, 0, 0, seconds)
+ return CFRef(ret)
+}
+func x509_CFDateCreate_trampoline()
+
+//go:cgo_import_dynamic x509_CFErrorCopyDescription CFErrorCopyDescription "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFErrorCopyDescription(errRef CFRef) CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_CFErrorCopyDescription_trampoline), uintptr(errRef), 0, 0, 0, 0, 0)
+ return CFRef(ret)
+}
+func x509_CFErrorCopyDescription_trampoline()
+
+//go:cgo_import_dynamic x509_CFStringCreateExternalRepresentation CFStringCreateExternalRepresentation "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
+
+func CFStringCreateExternalRepresentation(strRef CFRef) (CFRef, error) {
+ ret := syscall(abi.FuncPCABI0(x509_CFStringCreateExternalRepresentation_trampoline), kCFAllocatorDefault, uintptr(strRef), kCFStringEncodingUTF8, 0, 0, 0)
+ if ret == 0 {
+ return 0, errors.New("string can't be represented as UTF-8")
+ }
+ return CFRef(ret), nil
+}
+func x509_CFStringCreateExternalRepresentation_trampoline()
+
+// syscall is implemented in the runtime package (runtime/sys_darwin.go)
+func syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) uintptr
+
+// ReleaseCFArray iterates through an array, releasing its contents, and then
+// releases the array itself. This is necessary because we cannot, easily, set the
+// CFArrayCallBacks argument when creating CFArrays.
+func ReleaseCFArray(array CFRef) {
+ for i := 0; i < CFArrayGetCount(array); i++ {
+ ref := CFArrayGetValueAtIndex(array, i)
+ CFRelease(ref)
+ }
+ CFRelease(array)
+}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.s b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.s
index d69f72f795..d69f72f795 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/corefoundation.s
+++ b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/corefoundation.s
diff --git a/contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.go b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.go
new file mode 100644
index 0000000000..d8147ba8ba
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.go
@@ -0,0 +1,240 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+package macOS
+
+import (
+ "errors"
+ "fmt"
+ "internal/abi"
+ "strconv"
+ "unsafe"
+)
+
+// Security.framework linker flags for the external linker. See Issue 42459.
+//
+//go:cgo_ldflag "-framework"
+//go:cgo_ldflag "Security"
+
+// Based on https://opensource.apple.com/source/Security/Security-59306.41.2/base/Security.h
+
+type SecTrustSettingsResult int32
+
+const (
+ SecTrustSettingsResultInvalid SecTrustSettingsResult = iota
+ SecTrustSettingsResultTrustRoot
+ SecTrustSettingsResultTrustAsRoot
+ SecTrustSettingsResultDeny
+ SecTrustSettingsResultUnspecified
+)
+
+type SecTrustResultType int32
+
+const (
+ SecTrustResultInvalid SecTrustResultType = iota
+ SecTrustResultProceed
+ SecTrustResultConfirm // deprecated
+ SecTrustResultDeny
+ SecTrustResultUnspecified
+ SecTrustResultRecoverableTrustFailure
+ SecTrustResultFatalTrustFailure
+ SecTrustResultOtherError
+)
+
+type SecTrustSettingsDomain int32
+
+const (
+ SecTrustSettingsDomainUser SecTrustSettingsDomain = iota
+ SecTrustSettingsDomainAdmin
+ SecTrustSettingsDomainSystem
+)
+
+type OSStatus struct {
+ call string
+ status int32
+}
+
+func (s OSStatus) Error() string {
+ return s.call + " error: " + strconv.Itoa(int(s.status))
+}
+
+// Dictionary keys are defined as build-time strings with CFSTR, but the Go
+// linker's internal linking mode can't handle CFSTR relocations. Create our
+// own dynamic strings instead and just never release them.
+//
+// Note that this might be the only thing that can break over time if
+// these values change, as the ABI arguably requires using the strings
+// pointed to by the symbols, not values that happen to be equal to them.
+
+var SecTrustSettingsResultKey = StringToCFString("kSecTrustSettingsResult")
+var SecTrustSettingsPolicy = StringToCFString("kSecTrustSettingsPolicy")
+var SecTrustSettingsPolicyString = StringToCFString("kSecTrustSettingsPolicyString")
+var SecPolicyOid = StringToCFString("SecPolicyOid")
+var SecPolicyAppleSSL = StringToCFString("1.2.840.113635.100.1.3") // defined by POLICYMACRO
+
+var ErrNoTrustSettings = errors.New("no trust settings found")
+
+const errSecNoTrustSettings = -25263
+
+//go:cgo_import_dynamic x509_SecTrustSettingsCopyCertificates SecTrustSettingsCopyCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustSettingsCopyCertificates(domain SecTrustSettingsDomain) (certArray CFRef, err error) {
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustSettingsCopyCertificates_trampoline), uintptr(domain),
+ uintptr(unsafe.Pointer(&certArray)), 0, 0, 0, 0)
+ if int32(ret) == errSecNoTrustSettings {
+ return 0, ErrNoTrustSettings
+ } else if ret != 0 {
+ return 0, OSStatus{"SecTrustSettingsCopyCertificates", int32(ret)}
+ }
+ return certArray, nil
+}
+func x509_SecTrustSettingsCopyCertificates_trampoline()
+
+const errSecItemNotFound = -25300
+
+//go:cgo_import_dynamic x509_SecTrustSettingsCopyTrustSettings SecTrustSettingsCopyTrustSettings "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustSettingsCopyTrustSettings(cert CFRef, domain SecTrustSettingsDomain) (trustSettings CFRef, err error) {
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustSettingsCopyTrustSettings_trampoline), uintptr(cert), uintptr(domain),
+ uintptr(unsafe.Pointer(&trustSettings)), 0, 0, 0)
+ if int32(ret) == errSecItemNotFound {
+ return 0, ErrNoTrustSettings
+ } else if ret != 0 {
+ return 0, OSStatus{"SecTrustSettingsCopyTrustSettings", int32(ret)}
+ }
+ return trustSettings, nil
+}
+func x509_SecTrustSettingsCopyTrustSettings_trampoline()
+
+//go:cgo_import_dynamic x509_SecPolicyCopyProperties SecPolicyCopyProperties "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecPolicyCopyProperties(policy CFRef) CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_SecPolicyCopyProperties_trampoline), uintptr(policy), 0, 0, 0, 0, 0)
+ return CFRef(ret)
+}
+func x509_SecPolicyCopyProperties_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustCreateWithCertificates SecTrustCreateWithCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustCreateWithCertificates(certs CFRef, policies CFRef) (CFRef, error) {
+ var trustObj CFRef
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustCreateWithCertificates_trampoline), uintptr(certs), uintptr(policies),
+ uintptr(unsafe.Pointer(&trustObj)), 0, 0, 0)
+ if int32(ret) != 0 {
+ return 0, OSStatus{"SecTrustCreateWithCertificates", int32(ret)}
+ }
+ return trustObj, nil
+}
+func x509_SecTrustCreateWithCertificates_trampoline()
+
+//go:cgo_import_dynamic x509_SecCertificateCreateWithData SecCertificateCreateWithData "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecCertificateCreateWithData(b []byte) (CFRef, error) {
+ data := BytesToCFData(b)
+ defer CFRelease(data)
+ ret := syscall(abi.FuncPCABI0(x509_SecCertificateCreateWithData_trampoline), kCFAllocatorDefault, uintptr(data), 0, 0, 0, 0)
+ // Returns NULL if the data passed in the data parameter is not a valid
+ // DER-encoded X.509 certificate.
+ if ret == 0 {
+ return 0, errors.New("SecCertificateCreateWithData: invalid certificate")
+ }
+ return CFRef(ret), nil
+}
+func x509_SecCertificateCreateWithData_trampoline()
+
+//go:cgo_import_dynamic x509_SecPolicyCreateSSL SecPolicyCreateSSL "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecPolicyCreateSSL(name string) CFRef {
+ var hostname CFString
+ if name != "" {
+ hostname = StringToCFString(name)
+ defer CFRelease(CFRef(hostname))
+ }
+ ret := syscall(abi.FuncPCABI0(x509_SecPolicyCreateSSL_trampoline), 1 /* true */, uintptr(hostname), 0, 0, 0, 0)
+ return CFRef(ret)
+}
+func x509_SecPolicyCreateSSL_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustSetVerifyDate SecTrustSetVerifyDate "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustSetVerifyDate(trustObj CFRef, dateRef CFRef) error {
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustSetVerifyDate_trampoline), uintptr(trustObj), uintptr(dateRef), 0, 0, 0, 0)
+ if int32(ret) != 0 {
+ return OSStatus{"SecTrustSetVerifyDate", int32(ret)}
+ }
+ return nil
+}
+func x509_SecTrustSetVerifyDate_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustEvaluate SecTrustEvaluate "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustEvaluate(trustObj CFRef) (CFRef, error) {
+ var result CFRef
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustEvaluate_trampoline), uintptr(trustObj), uintptr(unsafe.Pointer(&result)), 0, 0, 0, 0)
+ if int32(ret) != 0 {
+ return 0, OSStatus{"SecTrustEvaluate", int32(ret)}
+ }
+ return CFRef(result), nil
+}
+func x509_SecTrustEvaluate_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustGetResult SecTrustGetResult "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustGetResult(trustObj CFRef, result CFRef) (CFRef, CFRef, error) {
+ var chain, info CFRef
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustGetResult_trampoline), uintptr(trustObj), uintptr(unsafe.Pointer(&result)),
+ uintptr(unsafe.Pointer(&chain)), uintptr(unsafe.Pointer(&info)), 0, 0)
+ if int32(ret) != 0 {
+ return 0, 0, OSStatus{"SecTrustGetResult", int32(ret)}
+ }
+ return chain, info, nil
+}
+func x509_SecTrustGetResult_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustEvaluateWithError SecTrustEvaluateWithError "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustEvaluateWithError(trustObj CFRef) error {
+ var errRef CFRef
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustEvaluateWithError_trampoline), uintptr(trustObj), uintptr(unsafe.Pointer(&errRef)), 0, 0, 0, 0)
+ if int32(ret) != 1 {
+ errStr := CFErrorCopyDescription(errRef)
+ err := fmt.Errorf("x509: %s", CFStringToString(errStr))
+ CFRelease(errRef)
+ CFRelease(errStr)
+ return err
+ }
+ return nil
+}
+func x509_SecTrustEvaluateWithError_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustGetCertificateCount SecTrustGetCertificateCount "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustGetCertificateCount(trustObj CFRef) int {
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustGetCertificateCount_trampoline), uintptr(trustObj), 0, 0, 0, 0, 0)
+ return int(ret)
+}
+func x509_SecTrustGetCertificateCount_trampoline()
+
+//go:cgo_import_dynamic x509_SecTrustGetCertificateAtIndex SecTrustGetCertificateAtIndex "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecTrustGetCertificateAtIndex(trustObj CFRef, i int) CFRef {
+ ret := syscall(abi.FuncPCABI0(x509_SecTrustGetCertificateAtIndex_trampoline), uintptr(trustObj), uintptr(i), 0, 0, 0, 0)
+ return CFRef(ret)
+}
+func x509_SecTrustGetCertificateAtIndex_trampoline()
+
+//go:cgo_import_dynamic x509_SecCertificateCopyData SecCertificateCopyData "/System/Library/Frameworks/Security.framework/Versions/A/Security"
+
+func SecCertificateCopyData(cert CFRef) ([]byte, error) {
+ ret := syscall(abi.FuncPCABI0(x509_SecCertificateCopyData_trampoline), uintptr(cert), 0, 0, 0, 0, 0)
+ if ret == 0 {
+ return nil, errors.New("x509: invalid certificate object")
+ }
+ b := CFDataToSlice(CFRef(ret))
+ CFRelease(CFRef(ret))
+ return b, nil
+}
+func x509_SecCertificateCopyData_trampoline()
diff --git a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.s b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.s
index 36f814f3cd..36f814f3cd 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/internal/macos/security.s
+++ b/contrib/go/_std_1.19/src/crypto/x509/internal/macos/security.s
diff --git a/contrib/go/_std_1.19/src/crypto/x509/notboring.go b/contrib/go/_std_1.19/src/crypto/x509/notboring.go
new file mode 100644
index 0000000000..c83a7272c9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/notboring.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !boringcrypto
+
+package x509
+
+func boringAllowCert(c *Certificate) bool { return true }
diff --git a/contrib/go/_std_1.19/src/crypto/x509/parser.go b/contrib/go/_std_1.19/src/crypto/x509/parser.go
new file mode 100644
index 0000000000..a2d3d80964
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/parser.go
@@ -0,0 +1,1162 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package x509
+
+import (
+ "bytes"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "golang.org/x/crypto/cryptobyte"
+ cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
+)
+
+// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
+// This is a simplified version of encoding/asn1.isPrintable.
+func isPrintable(b byte) bool {
+ return 'a' <= b && b <= 'z' ||
+ 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' ||
+ '\'' <= b && b <= ')' ||
+ '+' <= b && b <= '/' ||
+ b == ' ' ||
+ b == ':' ||
+ b == '=' ||
+ b == '?' ||
+ // This is technically not allowed in a PrintableString.
+ // However, x509 certificates with wildcard strings don't
+ // always use the correct string type so we permit it.
+ b == '*' ||
+ // This is not technically allowed either. However, not
+ // only is it relatively common, but there are also a
+ // handful of CA certificates that contain it. At least
+ // one of which will not expire until 2027.
+ b == '&'
+}
+
+// parseASN1String parses the ASN.1 string types T61String, PrintableString,
+// UTF8String, BMPString, IA5String, and NumericString. This is mostly copied
+// from the respective encoding/asn1.parse... methods, rather than just
+// increasing the API surface of that package.
+func parseASN1String(tag cryptobyte_asn1.Tag, value []byte) (string, error) {
+ switch tag {
+ case cryptobyte_asn1.T61String:
+ return string(value), nil
+ case cryptobyte_asn1.PrintableString:
+ for _, b := range value {
+ if !isPrintable(b) {
+ return "", errors.New("invalid PrintableString")
+ }
+ }
+ return string(value), nil
+ case cryptobyte_asn1.UTF8String:
+ if !utf8.Valid(value) {
+ return "", errors.New("invalid UTF-8 string")
+ }
+ return string(value), nil
+ case cryptobyte_asn1.Tag(asn1.TagBMPString):
+ if len(value)%2 != 0 {
+ return "", errors.New("invalid BMPString")
+ }
+
+ // Strip terminator if present.
+ if l := len(value); l >= 2 && value[l-1] == 0 && value[l-2] == 0 {
+ value = value[:l-2]
+ }
+
+ s := make([]uint16, 0, len(value)/2)
+ for len(value) > 0 {
+ s = append(s, uint16(value[0])<<8+uint16(value[1]))
+ value = value[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+ case cryptobyte_asn1.IA5String:
+ s := string(value)
+ if isIA5String(s) != nil {
+ return "", errors.New("invalid IA5String")
+ }
+ return s, nil
+ case cryptobyte_asn1.Tag(asn1.TagNumericString):
+ for _, b := range value {
+ if !('0' <= b && b <= '9' || b == ' ') {
+ return "", errors.New("invalid NumericString")
+ }
+ }
+ return string(value), nil
+ }
+ return "", fmt.Errorf("unsupported string type: %v", tag)
+}
+
+// parseName parses a DER encoded Name as defined in RFC 5280. We may
+// want to export this function in the future for use in crypto/tls.
+func parseName(raw cryptobyte.String) (*pkix.RDNSequence, error) {
+ if !raw.ReadASN1(&raw, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: invalid RDNSequence")
+ }
+
+ var rdnSeq pkix.RDNSequence
+ for !raw.Empty() {
+ var rdnSet pkix.RelativeDistinguishedNameSET
+ var set cryptobyte.String
+ if !raw.ReadASN1(&set, cryptobyte_asn1.SET) {
+ return nil, errors.New("x509: invalid RDNSequence")
+ }
+ for !set.Empty() {
+ var atav cryptobyte.String
+ if !set.ReadASN1(&atav, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: invalid RDNSequence: invalid attribute")
+ }
+ var attr pkix.AttributeTypeAndValue
+ if !atav.ReadASN1ObjectIdentifier(&attr.Type) {
+ return nil, errors.New("x509: invalid RDNSequence: invalid attribute type")
+ }
+ var rawValue cryptobyte.String
+ var valueTag cryptobyte_asn1.Tag
+ if !atav.ReadAnyASN1(&rawValue, &valueTag) {
+ return nil, errors.New("x509: invalid RDNSequence: invalid attribute value")
+ }
+ var err error
+ attr.Value, err = parseASN1String(valueTag, rawValue)
+ if err != nil {
+ return nil, fmt.Errorf("x509: invalid RDNSequence: invalid attribute value: %s", err)
+ }
+ rdnSet = append(rdnSet, attr)
+ }
+
+ rdnSeq = append(rdnSeq, rdnSet)
+ }
+
+ return &rdnSeq, nil
+}
+
+func parseAI(der cryptobyte.String) (pkix.AlgorithmIdentifier, error) {
+ ai := pkix.AlgorithmIdentifier{}
+ if !der.ReadASN1ObjectIdentifier(&ai.Algorithm) {
+ return ai, errors.New("x509: malformed OID")
+ }
+ if der.Empty() {
+ return ai, nil
+ }
+ var params cryptobyte.String
+ var tag cryptobyte_asn1.Tag
+ if !der.ReadAnyASN1Element(&params, &tag) {
+ return ai, errors.New("x509: malformed parameters")
+ }
+ ai.Parameters.Tag = int(tag)
+ ai.Parameters.FullBytes = params
+ return ai, nil
+}
+
+func parseTime(der *cryptobyte.String) (time.Time, error) {
+ var t time.Time
+ switch {
+ case der.PeekASN1Tag(cryptobyte_asn1.UTCTime):
+ if !der.ReadASN1UTCTime(&t) {
+ return t, errors.New("x509: malformed UTCTime")
+ }
+ case der.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime):
+ if !der.ReadASN1GeneralizedTime(&t) {
+ return t, errors.New("x509: malformed GeneralizedTime")
+ }
+ default:
+ return t, errors.New("x509: unsupported time format")
+ }
+ return t, nil
+}
+
+func parseValidity(der cryptobyte.String) (time.Time, time.Time, error) {
+ notBefore, err := parseTime(&der)
+ if err != nil {
+ return time.Time{}, time.Time{}, err
+ }
+ notAfter, err := parseTime(&der)
+ if err != nil {
+ return time.Time{}, time.Time{}, err
+ }
+
+ return notBefore, notAfter, nil
+}
+
+func parseExtension(der cryptobyte.String) (pkix.Extension, error) {
+ var ext pkix.Extension
+ if !der.ReadASN1ObjectIdentifier(&ext.Id) {
+ return ext, errors.New("x509: malformed extension OID field")
+ }
+ if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) {
+ if !der.ReadASN1Boolean(&ext.Critical) {
+ return ext, errors.New("x509: malformed extension critical field")
+ }
+ }
+ var val cryptobyte.String
+ if !der.ReadASN1(&val, cryptobyte_asn1.OCTET_STRING) {
+ return ext, errors.New("x509: malformed extension value field")
+ }
+ ext.Value = val
+ return ext, nil
+}
+
+func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (any, error) {
+ der := cryptobyte.String(keyData.PublicKey.RightAlign())
+ switch algo {
+ case RSA:
+ // RSA public keys must have a NULL in the parameters.
+ // See RFC 3279, Section 2.3.1.
+ if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) {
+ return nil, errors.New("x509: RSA key missing NULL parameters")
+ }
+
+ p := &pkcs1PublicKey{N: new(big.Int)}
+ if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: invalid RSA public key")
+ }
+ if !der.ReadASN1Integer(p.N) {
+ return nil, errors.New("x509: invalid RSA modulus")
+ }
+ if !der.ReadASN1Integer(&p.E) {
+ return nil, errors.New("x509: invalid RSA public exponent")
+ }
+
+ if p.N.Sign() <= 0 {
+ return nil, errors.New("x509: RSA modulus is not a positive number")
+ }
+ if p.E <= 0 {
+ return nil, errors.New("x509: RSA public exponent is not a positive number")
+ }
+
+ pub := &rsa.PublicKey{
+ E: p.E,
+ N: p.N,
+ }
+ return pub, nil
+ case ECDSA:
+ paramsDer := cryptobyte.String(keyData.Algorithm.Parameters.FullBytes)
+ namedCurveOID := new(asn1.ObjectIdentifier)
+ if !paramsDer.ReadASN1ObjectIdentifier(namedCurveOID) {
+ return nil, errors.New("x509: invalid ECDSA parameters")
+ }
+ namedCurve := namedCurveFromOID(*namedCurveOID)
+ if namedCurve == nil {
+ return nil, errors.New("x509: unsupported elliptic curve")
+ }
+ x, y := elliptic.Unmarshal(namedCurve, der)
+ if x == nil {
+ return nil, errors.New("x509: failed to unmarshal elliptic curve point")
+ }
+ pub := &ecdsa.PublicKey{
+ Curve: namedCurve,
+ X: x,
+ Y: y,
+ }
+ return pub, nil
+ case Ed25519:
+ // RFC 8410, Section 3
+ // > For all of the OIDs, the parameters MUST be absent.
+ if len(keyData.Algorithm.Parameters.FullBytes) != 0 {
+ return nil, errors.New("x509: Ed25519 key encoded with illegal parameters")
+ }
+ if len(der) != ed25519.PublicKeySize {
+ return nil, errors.New("x509: wrong Ed25519 public key size")
+ }
+ return ed25519.PublicKey(der), nil
+ case DSA:
+ y := new(big.Int)
+ if !der.ReadASN1Integer(y) {
+ return nil, errors.New("x509: invalid DSA public key")
+ }
+ pub := &dsa.PublicKey{
+ Y: y,
+ Parameters: dsa.Parameters{
+ P: new(big.Int),
+ Q: new(big.Int),
+ G: new(big.Int),
+ },
+ }
+ paramsDer := cryptobyte.String(keyData.Algorithm.Parameters.FullBytes)
+ if !paramsDer.ReadASN1(&paramsDer, cryptobyte_asn1.SEQUENCE) ||
+ !paramsDer.ReadASN1Integer(pub.Parameters.P) ||
+ !paramsDer.ReadASN1Integer(pub.Parameters.Q) ||
+ !paramsDer.ReadASN1Integer(pub.Parameters.G) {
+ return nil, errors.New("x509: invalid DSA parameters")
+ }
+ if pub.Y.Sign() <= 0 || pub.Parameters.P.Sign() <= 0 ||
+ pub.Parameters.Q.Sign() <= 0 || pub.Parameters.G.Sign() <= 0 {
+ return nil, errors.New("x509: zero or negative DSA parameter")
+ }
+ return pub, nil
+ default:
+ return nil, nil
+ }
+}
+
+func parseKeyUsageExtension(der cryptobyte.String) (KeyUsage, error) {
+ var usageBits asn1.BitString
+ if !der.ReadASN1BitString(&usageBits) {
+ return 0, errors.New("x509: invalid key usage")
+ }
+
+ var usage int
+ for i := 0; i < 9; i++ {
+ if usageBits.At(i) != 0 {
+ usage |= 1 << uint(i)
+ }
+ }
+ return KeyUsage(usage), nil
+}
+
+func parseBasicConstraintsExtension(der cryptobyte.String) (bool, int, error) {
+ var isCA bool
+ if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
+ return false, 0, errors.New("x509: invalid basic constraints a")
+ }
+ if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) {
+ if !der.ReadASN1Boolean(&isCA) {
+ return false, 0, errors.New("x509: invalid basic constraints b")
+ }
+ }
+ maxPathLen := -1
+ if !der.Empty() && der.PeekASN1Tag(cryptobyte_asn1.INTEGER) {
+ if !der.ReadASN1Integer(&maxPathLen) {
+ return false, 0, errors.New("x509: invalid basic constraints c")
+ }
+ }
+
+ // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285)
+ return isCA, maxPathLen, nil
+}
+
+func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error) error {
+ if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
+ return errors.New("x509: invalid subject alternative names")
+ }
+ for !der.Empty() {
+ var san cryptobyte.String
+ var tag cryptobyte_asn1.Tag
+ if !der.ReadAnyASN1(&san, &tag) {
+ return errors.New("x509: invalid subject alternative name")
+ }
+ if err := callback(int(tag^0x80), san); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) {
+ err = forEachSAN(der, func(tag int, data []byte) error {
+ switch tag {
+ case nameTypeEmail:
+ email := string(data)
+ if err := isIA5String(email); err != nil {
+ return errors.New("x509: SAN rfc822Name is malformed")
+ }
+ emailAddresses = append(emailAddresses, email)
+ case nameTypeDNS:
+ name := string(data)
+ if err := isIA5String(name); err != nil {
+ return errors.New("x509: SAN dNSName is malformed")
+ }
+ dnsNames = append(dnsNames, string(name))
+ case nameTypeURI:
+ uriStr := string(data)
+ if err := isIA5String(uriStr); err != nil {
+ return errors.New("x509: SAN uniformResourceIdentifier is malformed")
+ }
+ uri, err := url.Parse(uriStr)
+ if err != nil {
+ return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err)
+ }
+ if len(uri.Host) > 0 {
+ if _, ok := domainToReverseLabels(uri.Host); !ok {
+ return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr)
+ }
+ }
+ uris = append(uris, uri)
+ case nameTypeIP:
+ switch len(data) {
+ case net.IPv4len, net.IPv6len:
+ ipAddresses = append(ipAddresses, data)
+ default:
+ return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))
+ }
+ }
+
+ return nil
+ })
+
+ return
+}
+
+func parseExtKeyUsageExtension(der cryptobyte.String) ([]ExtKeyUsage, []asn1.ObjectIdentifier, error) {
+ var extKeyUsages []ExtKeyUsage
+ var unknownUsages []asn1.ObjectIdentifier
+ if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
+ return nil, nil, errors.New("x509: invalid extended key usages")
+ }
+ for !der.Empty() {
+ var eku asn1.ObjectIdentifier
+ if !der.ReadASN1ObjectIdentifier(&eku) {
+ return nil, nil, errors.New("x509: invalid extended key usages")
+ }
+ if extKeyUsage, ok := extKeyUsageFromOID(eku); ok {
+ extKeyUsages = append(extKeyUsages, extKeyUsage)
+ } else {
+ unknownUsages = append(unknownUsages, eku)
+ }
+ }
+ return extKeyUsages, unknownUsages, nil
+}
+
+func parseCertificatePoliciesExtension(der cryptobyte.String) ([]asn1.ObjectIdentifier, error) {
+ var oids []asn1.ObjectIdentifier
+ if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: invalid certificate policies")
+ }
+ for !der.Empty() {
+ var cp cryptobyte.String
+ if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: invalid certificate policies")
+ }
+ var oid asn1.ObjectIdentifier
+ if !cp.ReadASN1ObjectIdentifier(&oid) {
+ return nil, errors.New("x509: invalid certificate policies")
+ }
+ oids = append(oids, oid)
+ }
+
+ return oids, nil
+}
+
+// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits.
+func isValidIPMask(mask []byte) bool {
+ seenZero := false
+
+ for _, b := range mask {
+ if seenZero {
+ if b != 0 {
+ return false
+ }
+
+ continue
+ }
+
+ switch b {
+ case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe:
+ seenZero = true
+ case 0xff:
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandled bool, err error) {
+ // RFC 5280, 4.2.1.10
+
+ // NameConstraints ::= SEQUENCE {
+ // permittedSubtrees [0] GeneralSubtrees OPTIONAL,
+ // excludedSubtrees [1] GeneralSubtrees OPTIONAL }
+ //
+ // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree
+ //
+ // GeneralSubtree ::= SEQUENCE {
+ // base GeneralName,
+ // minimum [0] BaseDistance DEFAULT 0,
+ // maximum [1] BaseDistance OPTIONAL }
+ //
+ // BaseDistance ::= INTEGER (0..MAX)
+
+ outer := cryptobyte.String(e.Value)
+ var toplevel, permitted, excluded cryptobyte.String
+ var havePermitted, haveExcluded bool
+ if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) ||
+ !outer.Empty() ||
+ !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) ||
+ !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) ||
+ !toplevel.Empty() {
+ return false, errors.New("x509: invalid NameConstraints extension")
+ }
+
+ if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 {
+ // From RFC 5280, Section 4.2.1.10:
+ // “either the permittedSubtrees field
+ // or the excludedSubtrees MUST be
+ // present”
+ return false, errors.New("x509: empty name constraints extension")
+ }
+
+ getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) {
+ for !subtrees.Empty() {
+ var seq, value cryptobyte.String
+ var tag cryptobyte_asn1.Tag
+ if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) ||
+ !seq.ReadAnyASN1(&value, &tag) {
+ return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension")
+ }
+
+ var (
+ dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific()
+ emailTag = cryptobyte_asn1.Tag(1).ContextSpecific()
+ ipTag = cryptobyte_asn1.Tag(7).ContextSpecific()
+ uriTag = cryptobyte_asn1.Tag(6).ContextSpecific()
+ )
+
+ switch tag {
+ case dnsTag:
+ domain := string(value)
+ if err := isIA5String(domain); err != nil {
+ return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
+ }
+
+ trimmedDomain := domain
+ if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
+ // constraints can have a leading
+ // period to exclude the domain
+ // itself, but that's not valid in a
+ // normal domain name.
+ trimmedDomain = trimmedDomain[1:]
+ }
+ if _, ok := domainToReverseLabels(trimmedDomain); !ok {
+ return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)
+ }
+ dnsNames = append(dnsNames, domain)
+
+ case ipTag:
+ l := len(value)
+ var ip, mask []byte
+
+ switch l {
+ case 8:
+ ip = value[:4]
+ mask = value[4:]
+
+ case 32:
+ ip = value[:16]
+ mask = value[16:]
+
+ default:
+ return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l)
+ }
+
+ if !isValidIPMask(mask) {
+ return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask)
+ }
+
+ ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)})
+
+ case emailTag:
+ constraint := string(value)
+ if err := isIA5String(constraint); err != nil {
+ return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
+ }
+
+ // If the constraint contains an @ then
+ // it specifies an exact mailbox name.
+ if strings.Contains(constraint, "@") {
+ if _, ok := parseRFC2821Mailbox(constraint); !ok {
+ return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
+ }
+ } else {
+ // Otherwise it's a domain name.
+ domain := constraint
+ if len(domain) > 0 && domain[0] == '.' {
+ domain = domain[1:]
+ }
+ if _, ok := domainToReverseLabels(domain); !ok {
+ return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)
+ }
+ }
+ emails = append(emails, constraint)
+
+ case uriTag:
+ domain := string(value)
+ if err := isIA5String(domain); err != nil {
+ return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error())
+ }
+
+ if net.ParseIP(domain) != nil {
+ return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain)
+ }
+
+ trimmedDomain := domain
+ if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' {
+ // constraints can have a leading
+ // period to exclude the domain itself,
+ // but that's not valid in a normal
+ // domain name.
+ trimmedDomain = trimmedDomain[1:]
+ }
+ if _, ok := domainToReverseLabels(trimmedDomain); !ok {
+ return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain)
+ }
+ uriDomains = append(uriDomains, domain)
+
+ default:
+ unhandled = true
+ }
+ }
+
+ return dnsNames, ips, emails, uriDomains, nil
+ }
+
+ if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil {
+ return false, err
+ }
+ if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil {
+ return false, err
+ }
+ out.PermittedDNSDomainsCritical = e.Critical
+
+ return unhandled, nil
+}
+
+func processExtensions(out *Certificate) error {
+ var err error
+ for _, e := range out.Extensions {
+ unhandled := false
+
+ if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 {
+ switch e.Id[3] {
+ case 15:
+ out.KeyUsage, err = parseKeyUsageExtension(e.Value)
+ if err != nil {
+ return err
+ }
+ case 19:
+ out.IsCA, out.MaxPathLen, err = parseBasicConstraintsExtension(e.Value)
+ if err != nil {
+ return err
+ }
+ out.BasicConstraintsValid = true
+ out.MaxPathLenZero = out.MaxPathLen == 0
+ case 17:
+ out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value)
+ if err != nil {
+ return err
+ }
+
+ if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 {
+ // If we didn't parse anything then we do the critical check, below.
+ unhandled = true
+ }
+
+ case 30:
+ unhandled, err = parseNameConstraintsExtension(out, e)
+ if err != nil {
+ return err
+ }
+
+ case 31:
+ // RFC 5280, 4.2.1.13
+
+ // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
+ //
+ // DistributionPoint ::= SEQUENCE {
+ // distributionPoint [0] DistributionPointName OPTIONAL,
+ // reasons [1] ReasonFlags OPTIONAL,
+ // cRLIssuer [2] GeneralNames OPTIONAL }
+ //
+ // DistributionPointName ::= CHOICE {
+ // fullName [0] GeneralNames,
+ // nameRelativeToCRLIssuer [1] RelativeDistinguishedName }
+ val := cryptobyte.String(e.Value)
+ if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) {
+ return errors.New("x509: invalid CRL distribution points")
+ }
+ for !val.Empty() {
+ var dpDER cryptobyte.String
+ if !val.ReadASN1(&dpDER, cryptobyte_asn1.SEQUENCE) {
+ return errors.New("x509: invalid CRL distribution point")
+ }
+ var dpNameDER cryptobyte.String
+ var dpNamePresent bool
+ if !dpDER.ReadOptionalASN1(&dpNameDER, &dpNamePresent, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) {
+ return errors.New("x509: invalid CRL distribution point")
+ }
+ if !dpNamePresent {
+ continue
+ }
+ if !dpNameDER.ReadASN1(&dpNameDER, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) {
+ return errors.New("x509: invalid CRL distribution point")
+ }
+ for !dpNameDER.Empty() {
+ if !dpNameDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) {
+ break
+ }
+ var uri cryptobyte.String
+ if !dpNameDER.ReadASN1(&uri, cryptobyte_asn1.Tag(6).ContextSpecific()) {
+ return errors.New("x509: invalid CRL distribution point")
+ }
+ out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(uri))
+ }
+ }
+
+ case 35:
+ // RFC 5280, 4.2.1.1
+ val := cryptobyte.String(e.Value)
+ var akid cryptobyte.String
+ if !val.ReadASN1(&akid, cryptobyte_asn1.SEQUENCE) {
+ return errors.New("x509: invalid authority key identifier")
+ }
+ if akid.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) {
+ if !akid.ReadASN1(&akid, cryptobyte_asn1.Tag(0).ContextSpecific()) {
+ return errors.New("x509: invalid authority key identifier")
+ }
+ out.AuthorityKeyId = akid
+ }
+ case 37:
+ out.ExtKeyUsage, out.UnknownExtKeyUsage, err = parseExtKeyUsageExtension(e.Value)
+ if err != nil {
+ return err
+ }
+ case 14:
+ // RFC 5280, 4.2.1.2
+ val := cryptobyte.String(e.Value)
+ var skid cryptobyte.String
+ if !val.ReadASN1(&skid, cryptobyte_asn1.OCTET_STRING) {
+ return errors.New("x509: invalid subject key identifier")
+ }
+ out.SubjectKeyId = skid
+ case 32:
+ out.PolicyIdentifiers, err = parseCertificatePoliciesExtension(e.Value)
+ if err != nil {
+ return err
+ }
+ default:
+ // Unknown extensions are recorded if critical.
+ unhandled = true
+ }
+ } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) {
+ // RFC 5280 4.2.2.1: Authority Information Access
+ val := cryptobyte.String(e.Value)
+ if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) {
+ return errors.New("x509: invalid authority info access")
+ }
+ for !val.Empty() {
+ var aiaDER cryptobyte.String
+ if !val.ReadASN1(&aiaDER, cryptobyte_asn1.SEQUENCE) {
+ return errors.New("x509: invalid authority info access")
+ }
+ var method asn1.ObjectIdentifier
+ if !aiaDER.ReadASN1ObjectIdentifier(&method) {
+ return errors.New("x509: invalid authority info access")
+ }
+ if !aiaDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) {
+ continue
+ }
+ if !aiaDER.ReadASN1(&aiaDER, cryptobyte_asn1.Tag(6).ContextSpecific()) {
+ return errors.New("x509: invalid authority info access")
+ }
+ switch {
+ case method.Equal(oidAuthorityInfoAccessOcsp):
+ out.OCSPServer = append(out.OCSPServer, string(aiaDER))
+ case method.Equal(oidAuthorityInfoAccessIssuers):
+ out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(aiaDER))
+ }
+ }
+ } else {
+ // Unknown extensions are recorded if critical.
+ unhandled = true
+ }
+
+ if e.Critical && unhandled {
+ out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id)
+ }
+ }
+
+ return nil
+}
+
+func parseCertificate(der []byte) (*Certificate, error) {
+ cert := &Certificate{}
+
+ input := cryptobyte.String(der)
+ // we read the SEQUENCE including length and tag bytes so that
+ // we can populate Certificate.Raw, before unwrapping the
+ // SEQUENCE so it can be operated on
+ if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed certificate")
+ }
+ cert.Raw = input
+ if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed certificate")
+ }
+
+ var tbs cryptobyte.String
+ // do the same trick again as above to extract the raw
+ // bytes for Certificate.RawTBSCertificate
+ if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed tbs certificate")
+ }
+ cert.RawTBSCertificate = tbs
+ if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed tbs certificate")
+ }
+
+ if !tbs.ReadOptionalASN1Integer(&cert.Version, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific(), 0) {
+ return nil, errors.New("x509: malformed version")
+ }
+ if cert.Version < 0 {
+ return nil, errors.New("x509: malformed version")
+ }
+ // for backwards compat reasons Version is one-indexed,
+ // rather than zero-indexed as defined in 5280
+ cert.Version++
+ if cert.Version > 3 {
+ return nil, errors.New("x509: invalid version")
+ }
+
+ serial := new(big.Int)
+ if !tbs.ReadASN1Integer(serial) {
+ return nil, errors.New("x509: malformed serial number")
+ }
+ // we ignore the presence of negative serial numbers because
+ // of their prevalence, despite them being invalid
+ // TODO(rolandshoemaker): revisit this decision, there are currently
+ // only 10 trusted certificates with negative serial numbers
+ // according to censys.io.
+ cert.SerialNumber = serial
+
+ var sigAISeq cryptobyte.String
+ if !tbs.ReadASN1(&sigAISeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed signature algorithm identifier")
+ }
+ // Before parsing the inner algorithm identifier, extract
+ // the outer algorithm identifier and make sure that they
+ // match.
+ var outerSigAISeq cryptobyte.String
+ if !input.ReadASN1(&outerSigAISeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed algorithm identifier")
+ }
+ if !bytes.Equal(outerSigAISeq, sigAISeq) {
+ return nil, errors.New("x509: inner and outer signature algorithm identifiers don't match")
+ }
+ sigAI, err := parseAI(sigAISeq)
+ if err != nil {
+ return nil, err
+ }
+ cert.SignatureAlgorithm = getSignatureAlgorithmFromAI(sigAI)
+
+ var issuerSeq cryptobyte.String
+ if !tbs.ReadASN1Element(&issuerSeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed issuer")
+ }
+ cert.RawIssuer = issuerSeq
+ issuerRDNs, err := parseName(issuerSeq)
+ if err != nil {
+ return nil, err
+ }
+ cert.Issuer.FillFromRDNSequence(issuerRDNs)
+
+ var validity cryptobyte.String
+ if !tbs.ReadASN1(&validity, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed validity")
+ }
+ cert.NotBefore, cert.NotAfter, err = parseValidity(validity)
+ if err != nil {
+ return nil, err
+ }
+
+ var subjectSeq cryptobyte.String
+ if !tbs.ReadASN1Element(&subjectSeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed issuer")
+ }
+ cert.RawSubject = subjectSeq
+ subjectRDNs, err := parseName(subjectSeq)
+ if err != nil {
+ return nil, err
+ }
+ cert.Subject.FillFromRDNSequence(subjectRDNs)
+
+ var spki cryptobyte.String
+ if !tbs.ReadASN1Element(&spki, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed spki")
+ }
+ cert.RawSubjectPublicKeyInfo = spki
+ if !spki.ReadASN1(&spki, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed spki")
+ }
+ var pkAISeq cryptobyte.String
+ if !spki.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed public key algorithm identifier")
+ }
+ pkAI, err := parseAI(pkAISeq)
+ if err != nil {
+ return nil, err
+ }
+ cert.PublicKeyAlgorithm = getPublicKeyAlgorithmFromOID(pkAI.Algorithm)
+ var spk asn1.BitString
+ if !spki.ReadASN1BitString(&spk) {
+ return nil, errors.New("x509: malformed subjectPublicKey")
+ }
+ cert.PublicKey, err = parsePublicKey(cert.PublicKeyAlgorithm, &publicKeyInfo{
+ Algorithm: pkAI,
+ PublicKey: spk,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if cert.Version > 1 {
+ if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(1).ContextSpecific()) {
+ return nil, errors.New("x509: malformed issuerUniqueID")
+ }
+ if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(2).ContextSpecific()) {
+ return nil, errors.New("x509: malformed subjectUniqueID")
+ }
+ if cert.Version == 3 {
+ var extensions cryptobyte.String
+ var present bool
+ if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.Tag(3).Constructed().ContextSpecific()) {
+ return nil, errors.New("x509: malformed extensions")
+ }
+ if present {
+ seenExts := make(map[string]bool)
+ if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed extensions")
+ }
+ for !extensions.Empty() {
+ var extension cryptobyte.String
+ if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed extension")
+ }
+ ext, err := parseExtension(extension)
+ if err != nil {
+ return nil, err
+ }
+ oidStr := ext.Id.String()
+ if seenExts[oidStr] {
+ return nil, errors.New("x509: certificate contains duplicate extensions")
+ }
+ seenExts[oidStr] = true
+ cert.Extensions = append(cert.Extensions, ext)
+ }
+ err = processExtensions(cert)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ var signature asn1.BitString
+ if !input.ReadASN1BitString(&signature) {
+ return nil, errors.New("x509: malformed signature")
+ }
+ cert.Signature = signature.RightAlign()
+
+ return cert, nil
+}
+
+// ParseCertificate parses a single certificate from the given ASN.1 DER data.
+func ParseCertificate(der []byte) (*Certificate, error) {
+ cert, err := parseCertificate(der)
+ if err != nil {
+ return nil, err
+ }
+ if len(der) != len(cert.Raw) {
+ return nil, errors.New("x509: trailing data")
+ }
+ return cert, err
+}
+
+// ParseCertificates parses one or more certificates from the given ASN.1 DER
+// data. The certificates must be concatenated with no intermediate padding.
+func ParseCertificates(der []byte) ([]*Certificate, error) {
+ var certs []*Certificate
+ for len(der) > 0 {
+ cert, err := parseCertificate(der)
+ if err != nil {
+ return nil, err
+ }
+ certs = append(certs, cert)
+ der = der[len(cert.Raw):]
+ }
+ return certs, nil
+}
+
+// The X.509 standards confusingly 1-indexed the version names, but 0-indexed
+// the actual encoded version, so the version for X.509v2 is 1.
+const x509v2Version = 1
+
+// ParseRevocationList parses a X509 v2 Certificate Revocation List from the given
+// ASN.1 DER data.
+func ParseRevocationList(der []byte) (*RevocationList, error) {
+ rl := &RevocationList{}
+
+ input := cryptobyte.String(der)
+ // we read the SEQUENCE including length and tag bytes so that
+ // we can populate RevocationList.Raw, before unwrapping the
+ // SEQUENCE so it can be operated on
+ if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed crl")
+ }
+ rl.Raw = input
+ if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed crl")
+ }
+
+ var tbs cryptobyte.String
+ // do the same trick again as above to extract the raw
+ // bytes for Certificate.RawTBSCertificate
+ if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed tbs crl")
+ }
+ rl.RawTBSRevocationList = tbs
+ if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed tbs crl")
+ }
+
+ var version int
+ if !tbs.PeekASN1Tag(cryptobyte_asn1.INTEGER) {
+ return nil, errors.New("x509: unsupported crl version")
+ }
+ if !tbs.ReadASN1Integer(&version) {
+ return nil, errors.New("x509: malformed crl")
+ }
+ if version != x509v2Version {
+ return nil, fmt.Errorf("x509: unsupported crl version: %d", version)
+ }
+
+ var sigAISeq cryptobyte.String
+ if !tbs.ReadASN1(&sigAISeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed signature algorithm identifier")
+ }
+ // Before parsing the inner algorithm identifier, extract
+ // the outer algorithm identifier and make sure that they
+ // match.
+ var outerSigAISeq cryptobyte.String
+ if !input.ReadASN1(&outerSigAISeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed algorithm identifier")
+ }
+ if !bytes.Equal(outerSigAISeq, sigAISeq) {
+ return nil, errors.New("x509: inner and outer signature algorithm identifiers don't match")
+ }
+ sigAI, err := parseAI(sigAISeq)
+ if err != nil {
+ return nil, err
+ }
+ rl.SignatureAlgorithm = getSignatureAlgorithmFromAI(sigAI)
+
+ var signature asn1.BitString
+ if !input.ReadASN1BitString(&signature) {
+ return nil, errors.New("x509: malformed signature")
+ }
+ rl.Signature = signature.RightAlign()
+
+ var issuerSeq cryptobyte.String
+ if !tbs.ReadASN1Element(&issuerSeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed issuer")
+ }
+ rl.RawIssuer = issuerSeq
+ issuerRDNs, err := parseName(issuerSeq)
+ if err != nil {
+ return nil, err
+ }
+ rl.Issuer.FillFromRDNSequence(issuerRDNs)
+
+ rl.ThisUpdate, err = parseTime(&tbs)
+ if err != nil {
+ return nil, err
+ }
+ if tbs.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime) || tbs.PeekASN1Tag(cryptobyte_asn1.UTCTime) {
+ rl.NextUpdate, err = parseTime(&tbs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if tbs.PeekASN1Tag(cryptobyte_asn1.SEQUENCE) {
+ var revokedSeq cryptobyte.String
+ if !tbs.ReadASN1(&revokedSeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed crl")
+ }
+ for !revokedSeq.Empty() {
+ var certSeq cryptobyte.String
+ if !revokedSeq.ReadASN1(&certSeq, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed crl")
+ }
+ rc := pkix.RevokedCertificate{}
+ rc.SerialNumber = new(big.Int)
+ if !certSeq.ReadASN1Integer(rc.SerialNumber) {
+ return nil, errors.New("x509: malformed serial number")
+ }
+ rc.RevocationTime, err = parseTime(&certSeq)
+ if err != nil {
+ return nil, err
+ }
+ var extensions cryptobyte.String
+ var present bool
+ if !certSeq.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed extensions")
+ }
+ if present {
+ for !extensions.Empty() {
+ var extension cryptobyte.String
+ if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed extension")
+ }
+ ext, err := parseExtension(extension)
+ if err != nil {
+ return nil, err
+ }
+ rc.Extensions = append(rc.Extensions, ext)
+ }
+ }
+
+ rl.RevokedCertificates = append(rl.RevokedCertificates, rc)
+ }
+ }
+
+ var extensions cryptobyte.String
+ var present bool
+ if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) {
+ return nil, errors.New("x509: malformed extensions")
+ }
+ if present {
+ if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed extensions")
+ }
+ for !extensions.Empty() {
+ var extension cryptobyte.String
+ if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
+ return nil, errors.New("x509: malformed extension")
+ }
+ ext, err := parseExtension(extension)
+ if err != nil {
+ return nil, err
+ }
+ if ext.Id.Equal(oidExtensionAuthorityKeyId) {
+ rl.AuthorityKeyId = ext.Value
+ } else if ext.Id.Equal(oidExtensionCRLNumber) {
+ value := cryptobyte.String(ext.Value)
+ rl.Number = new(big.Int)
+ if !value.ReadASN1Integer(rl.Number) {
+ return nil, errors.New("x509: malformed crl number")
+ }
+ }
+ rl.Extensions = append(rl.Extensions, ext)
+ }
+ }
+
+ return rl, nil
+}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/pem_decrypt.go b/contrib/go/_std_1.19/src/crypto/x509/pem_decrypt.go
index 682923ac53..682923ac53 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/pem_decrypt.go
+++ b/contrib/go/_std_1.19/src/crypto/x509/pem_decrypt.go
diff --git a/contrib/go/_std_1.18/src/crypto/x509/pkcs1.go b/contrib/go/_std_1.19/src/crypto/x509/pkcs1.go
index f9d384018a..f9d384018a 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/pkcs1.go
+++ b/contrib/go/_std_1.19/src/crypto/x509/pkcs1.go
diff --git a/contrib/go/_std_1.18/src/crypto/x509/pkcs8.go b/contrib/go/_std_1.19/src/crypto/x509/pkcs8.go
index d77efa3156..d77efa3156 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/pkcs8.go
+++ b/contrib/go/_std_1.19/src/crypto/x509/pkcs8.go
diff --git a/contrib/go/_std_1.19/src/crypto/x509/pkix/pkix.go b/contrib/go/_std_1.19/src/crypto/x509/pkix/pkix.go
new file mode 100644
index 0000000000..22a50eef39
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/pkix/pkix.go
@@ -0,0 +1,320 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkix contains shared, low level structures used for ASN.1 parsing
+// and serialization of X.509 certificates, CRL and OCSP.
+package pkix
+
+import (
+ "encoding/asn1"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "time"
+)
+
+// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.1.1.2.
+type AlgorithmIdentifier struct {
+ Algorithm asn1.ObjectIdentifier
+ Parameters asn1.RawValue `asn1:"optional"`
+}
+
+type RDNSequence []RelativeDistinguishedNameSET
+
+var attributeTypeNames = map[string]string{
+ "2.5.4.6": "C",
+ "2.5.4.10": "O",
+ "2.5.4.11": "OU",
+ "2.5.4.3": "CN",
+ "2.5.4.5": "SERIALNUMBER",
+ "2.5.4.7": "L",
+ "2.5.4.8": "ST",
+ "2.5.4.9": "STREET",
+ "2.5.4.17": "POSTALCODE",
+}
+
+// String returns a string representation of the sequence r,
+// roughly following the RFC 2253 Distinguished Names syntax.
+func (r RDNSequence) String() string {
+ s := ""
+ for i := 0; i < len(r); i++ {
+ rdn := r[len(r)-1-i]
+ if i > 0 {
+ s += ","
+ }
+ for j, tv := range rdn {
+ if j > 0 {
+ s += "+"
+ }
+
+ oidString := tv.Type.String()
+ typeName, ok := attributeTypeNames[oidString]
+ if !ok {
+ derBytes, err := asn1.Marshal(tv.Value)
+ if err == nil {
+ s += oidString + "=#" + hex.EncodeToString(derBytes)
+ continue // No value escaping necessary.
+ }
+
+ typeName = oidString
+ }
+
+ valueString := fmt.Sprint(tv.Value)
+ escaped := make([]rune, 0, len(valueString))
+
+ for k, c := range valueString {
+ escape := false
+
+ switch c {
+ case ',', '+', '"', '\\', '<', '>', ';':
+ escape = true
+
+ case ' ':
+ escape = k == 0 || k == len(valueString)-1
+
+ case '#':
+ escape = k == 0
+ }
+
+ if escape {
+ escaped = append(escaped, '\\', c)
+ } else {
+ escaped = append(escaped, c)
+ }
+ }
+
+ s += typeName + "=" + string(escaped)
+ }
+ }
+
+ return s
+}
+
+type RelativeDistinguishedNameSET []AttributeTypeAndValue
+
+// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
+// RFC 5280, Section 4.1.2.4.
+type AttributeTypeAndValue struct {
+ Type asn1.ObjectIdentifier
+ Value any
+}
+
+// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
+// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
+type AttributeTypeAndValueSET struct {
+ Type asn1.ObjectIdentifier
+ Value [][]AttributeTypeAndValue `asn1:"set"`
+}
+
+// Extension represents the ASN.1 structure of the same name. See RFC
+// 5280, section 4.2.
+type Extension struct {
+ Id asn1.ObjectIdentifier
+ Critical bool `asn1:"optional"`
+ Value []byte
+}
+
+// Name represents an X.509 distinguished name. This only includes the common
+// elements of a DN. Note that Name is only an approximation of the X.509
+// structure. If an accurate representation is needed, asn1.Unmarshal the raw
+// subject or issuer as an RDNSequence.
+type Name struct {
+ Country, Organization, OrganizationalUnit []string
+ Locality, Province []string
+ StreetAddress, PostalCode []string
+ SerialNumber, CommonName string
+
+ // Names contains all parsed attributes. When parsing distinguished names,
+ // this can be used to extract non-standard attributes that are not parsed
+ // by this package. When marshaling to RDNSequences, the Names field is
+ // ignored, see ExtraNames.
+ Names []AttributeTypeAndValue
+
+ // ExtraNames contains attributes to be copied, raw, into any marshaled
+ // distinguished names. Values override any attributes with the same OID.
+ // The ExtraNames field is not populated when parsing, see Names.
+ ExtraNames []AttributeTypeAndValue
+}
+
+// FillFromRDNSequence populates n from the provided RDNSequence.
+// Multi-entry RDNs are flattened, all entries are added to the
+// relevant n fields, and the grouping is not preserved.
+func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
+ for _, rdn := range *rdns {
+ if len(rdn) == 0 {
+ continue
+ }
+
+ for _, atv := range rdn {
+ n.Names = append(n.Names, atv)
+ value, ok := atv.Value.(string)
+ if !ok {
+ continue
+ }
+
+ t := atv.Type
+ if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
+ switch t[3] {
+ case 3:
+ n.CommonName = value
+ case 5:
+ n.SerialNumber = value
+ case 6:
+ n.Country = append(n.Country, value)
+ case 7:
+ n.Locality = append(n.Locality, value)
+ case 8:
+ n.Province = append(n.Province, value)
+ case 9:
+ n.StreetAddress = append(n.StreetAddress, value)
+ case 10:
+ n.Organization = append(n.Organization, value)
+ case 11:
+ n.OrganizationalUnit = append(n.OrganizationalUnit, value)
+ case 17:
+ n.PostalCode = append(n.PostalCode, value)
+ }
+ }
+ }
+ }
+}
+
+var (
+ oidCountry = []int{2, 5, 4, 6}
+ oidOrganization = []int{2, 5, 4, 10}
+ oidOrganizationalUnit = []int{2, 5, 4, 11}
+ oidCommonName = []int{2, 5, 4, 3}
+ oidSerialNumber = []int{2, 5, 4, 5}
+ oidLocality = []int{2, 5, 4, 7}
+ oidProvince = []int{2, 5, 4, 8}
+ oidStreetAddress = []int{2, 5, 4, 9}
+ oidPostalCode = []int{2, 5, 4, 17}
+)
+
+// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
+// and returns the new value. The relativeDistinguishedNameSET contains an
+// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
+// search for AttributeTypeAndValue.
+func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
+ if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
+ return in
+ }
+
+ s := make([]AttributeTypeAndValue, len(values))
+ for i, value := range values {
+ s[i].Type = oid
+ s[i].Value = value
+ }
+
+ return append(in, s)
+}
+
+// ToRDNSequence converts n into a single RDNSequence. The following
+// attributes are encoded as multi-value RDNs:
+//
+// - Country
+// - Organization
+// - OrganizationalUnit
+// - Locality
+// - Province
+// - StreetAddress
+// - PostalCode
+//
+// Each ExtraNames entry is encoded as an individual RDN.
+func (n Name) ToRDNSequence() (ret RDNSequence) {
+ ret = n.appendRDNs(ret, n.Country, oidCountry)
+ ret = n.appendRDNs(ret, n.Province, oidProvince)
+ ret = n.appendRDNs(ret, n.Locality, oidLocality)
+ ret = n.appendRDNs(ret, n.StreetAddress, oidStreetAddress)
+ ret = n.appendRDNs(ret, n.PostalCode, oidPostalCode)
+ ret = n.appendRDNs(ret, n.Organization, oidOrganization)
+ ret = n.appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
+ if len(n.CommonName) > 0 {
+ ret = n.appendRDNs(ret, []string{n.CommonName}, oidCommonName)
+ }
+ if len(n.SerialNumber) > 0 {
+ ret = n.appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
+ }
+ for _, atv := range n.ExtraNames {
+ ret = append(ret, []AttributeTypeAndValue{atv})
+ }
+
+ return ret
+}
+
+// String returns the string form of n, roughly following
+// the RFC 2253 Distinguished Names syntax.
+func (n Name) String() string {
+ var rdns RDNSequence
+ // If there are no ExtraNames, surface the parsed value (all entries in
+ // Names) instead.
+ if n.ExtraNames == nil {
+ for _, atv := range n.Names {
+ t := atv.Type
+ if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
+ switch t[3] {
+ case 3, 5, 6, 7, 8, 9, 10, 11, 17:
+ // These attributes were already parsed into named fields.
+ continue
+ }
+ }
+ // Place non-standard parsed values at the beginning of the sequence
+ // so they will be at the end of the string. See Issue 39924.
+ rdns = append(rdns, []AttributeTypeAndValue{atv})
+ }
+ }
+ rdns = append(rdns, n.ToRDNSequence()...)
+ return rdns.String()
+}
+
+// oidInAttributeTypeAndValue reports whether a type with the given OID exists
+// in atv.
+func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
+ for _, a := range atv {
+ if a.Type.Equal(oid) {
+ return true
+ }
+ }
+ return false
+}
+
+// CertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
+// signature.
+//
+// Deprecated: x509.RevocationList should be used instead.
+type CertificateList struct {
+ TBSCertList TBSCertificateList
+ SignatureAlgorithm AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// HasExpired reports whether certList should have been updated by now.
+func (certList *CertificateList) HasExpired(now time.Time) bool {
+ return !now.Before(certList.TBSCertList.NextUpdate)
+}
+
+// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1.
+//
+// Deprecated: x509.RevocationList should be used instead.
+type TBSCertificateList struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:0"`
+ Signature AlgorithmIdentifier
+ Issuer RDNSequence
+ ThisUpdate time.Time
+ NextUpdate time.Time `asn1:"optional"`
+ RevokedCertificates []RevokedCertificate `asn1:"optional"`
+ Extensions []Extension `asn1:"tag:0,optional,explicit"`
+}
+
+// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
+// 5280, section 5.1.
+type RevokedCertificate struct {
+ SerialNumber *big.Int
+ RevocationTime time.Time
+ Extensions []Extension `asn1:"optional"`
+}
diff --git a/contrib/go/_std_1.19/src/crypto/x509/root.go b/contrib/go/_std_1.19/src/crypto/x509/root.go
new file mode 100644
index 0000000000..91f4d29a1f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/root.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+// To update the embedded iOS root store, update the -version
+// argument to the latest security_certificates version from
+// https://opensource.apple.com/source/security_certificates/
+// and run "go generate". See https://golang.org/issue/38843.
+//
+//go:generate go run root_ios_gen.go -version 55188.120.1.0.1
+
+import "sync"
+
+var (
+ once sync.Once
+ systemRoots *CertPool
+ systemRootsErr error
+)
+
+func systemRootsPool() *CertPool {
+ once.Do(initSystemRoots)
+ return systemRoots
+}
+
+func initSystemRoots() {
+ systemRoots, systemRootsErr = loadSystemRoots()
+ if systemRootsErr != nil {
+ systemRoots = nil
+ }
+}
diff --git a/contrib/go/_std_1.19/src/crypto/x509/root_darwin.go b/contrib/go/_std_1.19/src/crypto/x509/root_darwin.go
new file mode 100644
index 0000000000..4759462653
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/root_darwin.go
@@ -0,0 +1,113 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ macOS "crypto/x509/internal/macos"
+ "errors"
+)
+
+func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ certs := macOS.CFArrayCreateMutable()
+ defer macOS.ReleaseCFArray(certs)
+ leaf, err := macOS.SecCertificateCreateWithData(c.Raw)
+ if err != nil {
+ return nil, errors.New("invalid leaf certificate")
+ }
+ macOS.CFArrayAppendValue(certs, leaf)
+ if opts.Intermediates != nil {
+ for _, lc := range opts.Intermediates.lazyCerts {
+ c, err := lc.getCert()
+ if err != nil {
+ return nil, err
+ }
+ sc, err := macOS.SecCertificateCreateWithData(c.Raw)
+ if err == nil {
+ macOS.CFArrayAppendValue(certs, sc)
+ }
+ }
+ }
+
+ policies := macOS.CFArrayCreateMutable()
+ defer macOS.ReleaseCFArray(policies)
+ sslPolicy := macOS.SecPolicyCreateSSL(opts.DNSName)
+ macOS.CFArrayAppendValue(policies, sslPolicy)
+
+ trustObj, err := macOS.SecTrustCreateWithCertificates(certs, policies)
+ if err != nil {
+ return nil, err
+ }
+ defer macOS.CFRelease(trustObj)
+
+ if !opts.CurrentTime.IsZero() {
+ dateRef := macOS.TimeToCFDateRef(opts.CurrentTime)
+ defer macOS.CFRelease(dateRef)
+ if err := macOS.SecTrustSetVerifyDate(trustObj, dateRef); err != nil {
+ return nil, err
+ }
+ }
+
+ // TODO(roland): we may want to allow passing in SCTs via VerifyOptions and
+ // set them via SecTrustSetSignedCertificateTimestamps, since Apple will
+ // always enforce its SCT requirements, and there are still _some_ people
+ // using TLS or OCSP for that.
+
+ if err := macOS.SecTrustEvaluateWithError(trustObj); err != nil {
+ return nil, err
+ }
+
+ chain := [][]*Certificate{{}}
+ numCerts := macOS.SecTrustGetCertificateCount(trustObj)
+ for i := 0; i < numCerts; i++ {
+ certRef := macOS.SecTrustGetCertificateAtIndex(trustObj, i)
+ cert, err := exportCertificate(certRef)
+ if err != nil {
+ return nil, err
+ }
+ chain[0] = append(chain[0], cert)
+ }
+ if len(chain[0]) == 0 {
+ // This should _never_ happen, but to be safe
+ return nil, errors.New("x509: macOS certificate verification internal error")
+ }
+
+ if opts.DNSName != "" {
+ // If we have a DNS name, apply our own name verification
+ if err := chain[0][0].VerifyHostname(opts.DNSName); err != nil {
+ return nil, err
+ }
+ }
+
+ keyUsages := opts.KeyUsages
+ if len(keyUsages) == 0 {
+ keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
+ }
+
+ // If any key usage is acceptable then we're done.
+ for _, usage := range keyUsages {
+ if usage == ExtKeyUsageAny {
+ return chain, nil
+ }
+ }
+
+ if !checkChainForKeyUsage(chain[0], keyUsages) {
+ return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
+ }
+
+ return chain, nil
+}
+
+// exportCertificate returns a *Certificate for a SecCertificateRef.
+func exportCertificate(cert macOS.CFRef) (*Certificate, error) {
+ data, err := macOS.SecCertificateCopyData(cert)
+ if err != nil {
+ return nil, err
+ }
+ return ParseCertificate(data)
+}
+
+func loadSystemRoots() (*CertPool, error) {
+ return &CertPool{systemPool: true}, nil
+}
diff --git a/contrib/go/_std_1.18/src/crypto/x509/root_linux.go b/contrib/go/_std_1.19/src/crypto/x509/root_linux.go
index e32989b999..e32989b999 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/root_linux.go
+++ b/contrib/go/_std_1.19/src/crypto/x509/root_linux.go
diff --git a/contrib/go/_std_1.18/src/crypto/x509/root_unix.go b/contrib/go/_std_1.19/src/crypto/x509/root_unix.go
index aa54f891ca..aa54f891ca 100644
--- a/contrib/go/_std_1.18/src/crypto/x509/root_unix.go
+++ b/contrib/go/_std_1.19/src/crypto/x509/root_unix.go
diff --git a/contrib/go/_std_1.19/src/crypto/x509/sec1.go b/contrib/go/_std_1.19/src/crypto/x509/sec1.go
new file mode 100644
index 0000000000..ff48e0cc9e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/sec1.go
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+)
+
+const ecPrivKeyVersion = 1
+
+// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
+// References:
+//
+// RFC 5915
+// SEC1 - http://www.secg.org/sec1-v2.pdf
+//
+// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
+// most cases it is not.
+type ecPrivateKey struct {
+ Version int
+ PrivateKey []byte
+ NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
+ PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
+}
+
+// ParseECPrivateKey parses an EC private key in SEC 1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
+func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) {
+ return parseECPrivateKey(nil, der)
+}
+
+// MarshalECPrivateKey converts an EC private key to SEC 1, ASN.1 DER form.
+//
+// This kind of key is commonly encoded in PEM blocks of type "EC PRIVATE KEY".
+// For a more flexible key format which is not EC specific, use
+// MarshalPKCS8PrivateKey.
+func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ oid, ok := oidFromNamedCurve(key.Curve)
+ if !ok {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ return marshalECPrivateKeyWithOID(key, oid)
+}
+
+// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
+// sets the curve ID to the given OID, or omits it if OID is nil.
+func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
+ if !key.Curve.IsOnCurve(key.X, key.Y) {
+ return nil, errors.New("invalid elliptic key public key")
+ }
+ privateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+ PrivateKey: key.D.FillBytes(privateKey),
+ NamedCurveOID: oid,
+ PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
+ })
+}
+
+// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+// The OID for the named curve may be provided from another source (such as
+// the PKCS8 container) - if it is provided then use this instead of the OID
+// that may exist in the EC private key structure.
+func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
+ var privKey ecPrivateKey
+ if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ if _, err := asn1.Unmarshal(der, &pkcs8{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS8PrivateKey instead for this key format)")
+ }
+ if _, err := asn1.Unmarshal(der, &pkcs1PrivateKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)")
+ }
+ return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
+ }
+ if privKey.Version != ecPrivKeyVersion {
+ return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
+ }
+
+ var curve elliptic.Curve
+ if namedCurveOID != nil {
+ curve = namedCurveFromOID(*namedCurveOID)
+ } else {
+ curve = namedCurveFromOID(privKey.NamedCurveOID)
+ }
+ if curve == nil {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ k := new(big.Int).SetBytes(privKey.PrivateKey)
+ curveOrder := curve.Params().N
+ if k.Cmp(curveOrder) >= 0 {
+ return nil, errors.New("x509: invalid elliptic curve private key value")
+ }
+ priv := new(ecdsa.PrivateKey)
+ priv.Curve = curve
+ priv.D = k
+
+ privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
+
+ // Some private keys have leading zero padding. This is invalid
+ // according to [SEC1], but this code will ignore it.
+ for len(privKey.PrivateKey) > len(privateKey) {
+ if privKey.PrivateKey[0] != 0 {
+ return nil, errors.New("x509: invalid private key length")
+ }
+ privKey.PrivateKey = privKey.PrivateKey[1:]
+ }
+
+ // Some private keys remove all leading zeros, this is also invalid
+ // according to [SEC1] but since OpenSSL used to do this, we ignore
+ // this too.
+ copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)
+ priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
+
+ return priv, nil
+}
diff --git a/contrib/go/_std_1.19/src/crypto/x509/verify.go b/contrib/go/_std_1.19/src/crypto/x509/verify.go
new file mode 100644
index 0000000000..c49335d225
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/verify.go
@@ -0,0 +1,1170 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509/pkix"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type InvalidReason int
+
+const (
+ // NotAuthorizedToSign results when a certificate is signed by another
+ // which isn't marked as a CA certificate.
+ NotAuthorizedToSign InvalidReason = iota
+ // Expired results when a certificate has expired, based on the time
+ // given in the VerifyOptions.
+ Expired
+ // CANotAuthorizedForThisName results when an intermediate or root
+ // certificate has a name constraint which doesn't permit a DNS or
+ // other name (including IP address) in the leaf certificate.
+ CANotAuthorizedForThisName
+ // TooManyIntermediates results when a path length constraint is
+ // violated.
+ TooManyIntermediates
+ // IncompatibleUsage results when the certificate's key usage indicates
+ // that it may only be used for a different purpose.
+ IncompatibleUsage
+ // NameMismatch results when the subject name of a parent certificate
+ // does not match the issuer name in the child.
+ NameMismatch
+ // NameConstraintsWithoutSANs is a legacy error and is no longer returned.
+ NameConstraintsWithoutSANs
+ // UnconstrainedName results when a CA certificate contains permitted
+ // name constraints, but leaf certificate contains a name of an
+ // unsupported or unconstrained type.
+ UnconstrainedName
+ // TooManyConstraints results when the number of comparison operations
+ // needed to check a certificate exceeds the limit set by
+ // VerifyOptions.MaxConstraintComparisions. This limit exists to
+ // prevent pathological certificates can consuming excessive amounts of
+ // CPU time to verify.
+ TooManyConstraints
+ // CANotAuthorizedForExtKeyUsage results when an intermediate or root
+ // certificate does not permit a requested extended key usage.
+ CANotAuthorizedForExtKeyUsage
+)
+
+// CertificateInvalidError results when an odd error occurs. Users of this
+// library probably want to handle all these errors uniformly.
+type CertificateInvalidError struct {
+ Cert *Certificate
+ Reason InvalidReason
+ Detail string
+}
+
+func (e CertificateInvalidError) Error() string {
+ switch e.Reason {
+ case NotAuthorizedToSign:
+ return "x509: certificate is not authorized to sign other certificates"
+ case Expired:
+ return "x509: certificate has expired or is not yet valid: " + e.Detail
+ case CANotAuthorizedForThisName:
+ return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail
+ case CANotAuthorizedForExtKeyUsage:
+ return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail
+ case TooManyIntermediates:
+ return "x509: too many intermediates for path length constraint"
+ case IncompatibleUsage:
+ return "x509: certificate specifies an incompatible key usage"
+ case NameMismatch:
+ return "x509: issuer name does not match subject from issuing certificate"
+ case NameConstraintsWithoutSANs:
+ return "x509: issuer has name constraints but leaf doesn't have a SAN extension"
+ case UnconstrainedName:
+ return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail
+ }
+ return "x509: unknown error"
+}
+
+// HostnameError results when the set of authorized names doesn't match the
+// requested name.
+type HostnameError struct {
+ Certificate *Certificate
+ Host string
+}
+
+func (h HostnameError) Error() string {
+ c := h.Certificate
+
+ if !c.hasSANExtension() && matchHostnames(c.Subject.CommonName, h.Host) {
+ return "x509: certificate relies on legacy Common Name field, use SANs instead"
+ }
+
+ var valid string
+ if ip := net.ParseIP(h.Host); ip != nil {
+ // Trying to validate an IP
+ if len(c.IPAddresses) == 0 {
+ return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
+ }
+ for _, san := range c.IPAddresses {
+ if len(valid) > 0 {
+ valid += ", "
+ }
+ valid += san.String()
+ }
+ } else {
+ valid = strings.Join(c.DNSNames, ", ")
+ }
+
+ if len(valid) == 0 {
+ return "x509: certificate is not valid for any names, but wanted to match " + h.Host
+ }
+ return "x509: certificate is valid for " + valid + ", not " + h.Host
+}
+
+// UnknownAuthorityError results when the certificate issuer is unknown
+type UnknownAuthorityError struct {
+ Cert *Certificate
+ // hintErr contains an error that may be helpful in determining why an
+ // authority wasn't found.
+ hintErr error
+ // hintCert contains a possible authority certificate that was rejected
+ // because of the error in hintErr.
+ hintCert *Certificate
+}
+
+func (e UnknownAuthorityError) Error() string {
+ s := "x509: certificate signed by unknown authority"
+ if e.hintErr != nil {
+ certName := e.hintCert.Subject.CommonName
+ if len(certName) == 0 {
+ if len(e.hintCert.Subject.Organization) > 0 {
+ certName = e.hintCert.Subject.Organization[0]
+ } else {
+ certName = "serial:" + e.hintCert.SerialNumber.String()
+ }
+ }
+ s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
+ }
+ return s
+}
+
+// SystemRootsError results when we fail to load the system root certificates.
+type SystemRootsError struct {
+ Err error
+}
+
+func (se SystemRootsError) Error() string {
+ msg := "x509: failed to load system roots and no roots provided"
+ if se.Err != nil {
+ return msg + "; " + se.Err.Error()
+ }
+ return msg
+}
+
+func (se SystemRootsError) Unwrap() error { return se.Err }
+
+// errNotParsed is returned when a certificate without ASN.1 contents is
+// verified. Platform-specific verification needs the ASN.1 contents.
+var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate")
+
+// VerifyOptions contains parameters for Certificate.Verify.
+type VerifyOptions struct {
+ // DNSName, if set, is checked against the leaf certificate with
+ // Certificate.VerifyHostname or the platform verifier.
+ DNSName string
+
+ // Intermediates is an optional pool of certificates that are not trust
+ // anchors, but can be used to form a chain from the leaf certificate to a
+ // root certificate.
+ Intermediates *CertPool
+ // Roots is the set of trusted root certificates the leaf certificate needs
+ // to chain up to. If nil, the system roots or the platform verifier are used.
+ Roots *CertPool
+
+ // CurrentTime is used to check the validity of all certificates in the
+ // chain. If zero, the current time is used.
+ CurrentTime time.Time
+
+ // KeyUsages specifies which Extended Key Usage values are acceptable. A
+ // chain is accepted if it allows any of the listed values. An empty list
+ // means ExtKeyUsageServerAuth. To accept any key usage, include ExtKeyUsageAny.
+ KeyUsages []ExtKeyUsage
+
+ // MaxConstraintComparisions is the maximum number of comparisons to
+ // perform when checking a given certificate's name constraints. If
+ // zero, a sensible default is used. This limit prevents pathological
+ // certificates from consuming excessive amounts of CPU time when
+ // validating. It does not apply to the platform verifier.
+ MaxConstraintComparisions int
+}
+
+const (
+ leafCertificate = iota
+ intermediateCertificate
+ rootCertificate
+)
+
+// rfc2821Mailbox represents a “mailbox” (which is an email address to most
+// people) by breaking it into the “local” (i.e. before the '@') and “domain”
+// parts.
+type rfc2821Mailbox struct {
+ local, domain string
+}
+
+// parseRFC2821Mailbox parses an email address into local and domain parts,
+// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280,
+// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The
+// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”.
+func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
+ if len(in) == 0 {
+ return mailbox, false
+ }
+
+ localPartBytes := make([]byte, 0, len(in)/2)
+
+ if in[0] == '"' {
+ // Quoted-string = DQUOTE *qcontent DQUOTE
+ // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127
+ // qcontent = qtext / quoted-pair
+ // qtext = non-whitespace-control /
+ // %d33 / %d35-91 / %d93-126
+ // quoted-pair = ("\" text) / obs-qp
+ // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text
+ //
+ // (Names beginning with “obs-” are the obsolete syntax from RFC 2822,
+ // Section 4. Since it has been 16 years, we no longer accept that.)
+ in = in[1:]
+ QuotedString:
+ for {
+ if len(in) == 0 {
+ return mailbox, false
+ }
+ c := in[0]
+ in = in[1:]
+
+ switch {
+ case c == '"':
+ break QuotedString
+
+ case c == '\\':
+ // quoted-pair
+ if len(in) == 0 {
+ return mailbox, false
+ }
+ if in[0] == 11 ||
+ in[0] == 12 ||
+ (1 <= in[0] && in[0] <= 9) ||
+ (14 <= in[0] && in[0] <= 127) {
+ localPartBytes = append(localPartBytes, in[0])
+ in = in[1:]
+ } else {
+ return mailbox, false
+ }
+
+ case c == 11 ||
+ c == 12 ||
+ // Space (char 32) is not allowed based on the
+ // BNF, but RFC 3696 gives an example that
+ // assumes that it is. Several “verified”
+ // errata continue to argue about this point.
+ // We choose to accept it.
+ c == 32 ||
+ c == 33 ||
+ c == 127 ||
+ (1 <= c && c <= 8) ||
+ (14 <= c && c <= 31) ||
+ (35 <= c && c <= 91) ||
+ (93 <= c && c <= 126):
+ // qtext
+ localPartBytes = append(localPartBytes, c)
+
+ default:
+ return mailbox, false
+ }
+ }
+ } else {
+ // Atom ("." Atom)*
+ NextChar:
+ for len(in) > 0 {
+ // atext from RFC 2822, Section 3.2.4
+ c := in[0]
+
+ switch {
+ case c == '\\':
+ // Examples given in RFC 3696 suggest that
+ // escaped characters can appear outside of a
+ // quoted string. Several “verified” errata
+ // continue to argue the point. We choose to
+ // accept it.
+ in = in[1:]
+ if len(in) == 0 {
+ return mailbox, false
+ }
+ fallthrough
+
+ case ('0' <= c && c <= '9') ||
+ ('a' <= c && c <= 'z') ||
+ ('A' <= c && c <= 'Z') ||
+ c == '!' || c == '#' || c == '$' || c == '%' ||
+ c == '&' || c == '\'' || c == '*' || c == '+' ||
+ c == '-' || c == '/' || c == '=' || c == '?' ||
+ c == '^' || c == '_' || c == '`' || c == '{' ||
+ c == '|' || c == '}' || c == '~' || c == '.':
+ localPartBytes = append(localPartBytes, in[0])
+ in = in[1:]
+
+ default:
+ break NextChar
+ }
+ }
+
+ if len(localPartBytes) == 0 {
+ return mailbox, false
+ }
+
+ // From RFC 3696, Section 3:
+ // “period (".") may also appear, but may not be used to start
+ // or end the local part, nor may two or more consecutive
+ // periods appear.”
+ twoDots := []byte{'.', '.'}
+ if localPartBytes[0] == '.' ||
+ localPartBytes[len(localPartBytes)-1] == '.' ||
+ bytes.Contains(localPartBytes, twoDots) {
+ return mailbox, false
+ }
+ }
+
+ if len(in) == 0 || in[0] != '@' {
+ return mailbox, false
+ }
+ in = in[1:]
+
+ // The RFC species a format for domains, but that's known to be
+ // violated in practice so we accept that anything after an '@' is the
+ // domain part.
+ if _, ok := domainToReverseLabels(in); !ok {
+ return mailbox, false
+ }
+
+ mailbox.local = string(localPartBytes)
+ mailbox.domain = in
+ return mailbox, true
+}
+
+// domainToReverseLabels converts a textual domain name like foo.example.com to
+// the list of labels in reverse order, e.g. ["com", "example", "foo"].
+func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
+ for len(domain) > 0 {
+ if i := strings.LastIndexByte(domain, '.'); i == -1 {
+ reverseLabels = append(reverseLabels, domain)
+ domain = ""
+ } else {
+ reverseLabels = append(reverseLabels, domain[i+1:])
+ domain = domain[:i]
+ }
+ }
+
+ if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 {
+ // An empty label at the end indicates an absolute value.
+ return nil, false
+ }
+
+ for _, label := range reverseLabels {
+ if len(label) == 0 {
+ // Empty labels are otherwise invalid.
+ return nil, false
+ }
+
+ for _, c := range label {
+ if c < 33 || c > 126 {
+ // Invalid character.
+ return nil, false
+ }
+ }
+ }
+
+ return reverseLabels, true
+}
+
+func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) {
+ // If the constraint contains an @, then it specifies an exact mailbox
+ // name.
+ if strings.Contains(constraint, "@") {
+ constraintMailbox, ok := parseRFC2821Mailbox(constraint)
+ if !ok {
+ return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint)
+ }
+ return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil
+ }
+
+ // Otherwise the constraint is like a DNS constraint of the domain part
+ // of the mailbox.
+ return matchDomainConstraint(mailbox.domain, constraint)
+}
+
+func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
+ // From RFC 5280, Section 4.2.1.10:
+ // “a uniformResourceIdentifier that does not include an authority
+ // component with a host name specified as a fully qualified domain
+ // name (e.g., if the URI either does not include an authority
+ // component or includes an authority component in which the host name
+ // is specified as an IP address), then the application MUST reject the
+ // certificate.”
+
+ host := uri.Host
+ if len(host) == 0 {
+ return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String())
+ }
+
+ if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") {
+ var err error
+ host, _, err = net.SplitHostPort(uri.Host)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") ||
+ net.ParseIP(host) != nil {
+ return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String())
+ }
+
+ return matchDomainConstraint(host, constraint)
+}
+
+func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) {
+ if len(ip) != len(constraint.IP) {
+ return false, nil
+ }
+
+ for i := range ip {
+ if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func matchDomainConstraint(domain, constraint string) (bool, error) {
+ // The meaning of zero length constraints is not specified, but this
+ // code follows NSS and accepts them as matching everything.
+ if len(constraint) == 0 {
+ return true, nil
+ }
+
+ domainLabels, ok := domainToReverseLabels(domain)
+ if !ok {
+ return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain)
+ }
+
+ // RFC 5280 says that a leading period in a domain name means that at
+ // least one label must be prepended, but only for URI and email
+ // constraints, not DNS constraints. The code also supports that
+ // behaviour for DNS constraints.
+
+ mustHaveSubdomains := false
+ if constraint[0] == '.' {
+ mustHaveSubdomains = true
+ constraint = constraint[1:]
+ }
+
+ constraintLabels, ok := domainToReverseLabels(constraint)
+ if !ok {
+ return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint)
+ }
+
+ if len(domainLabels) < len(constraintLabels) ||
+ (mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) {
+ return false, nil
+ }
+
+ for i, constraintLabel := range constraintLabels {
+ if !strings.EqualFold(constraintLabel, domainLabels[i]) {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// checkNameConstraints checks that c permits a child certificate to claim the
+// given name, of type nameType. The argument parsedName contains the parsed
+// form of name, suitable for passing to the match function. The total number
+// of comparisons is tracked in the given count and should not exceed the given
+// limit.
+func (c *Certificate) checkNameConstraints(count *int,
+ maxConstraintComparisons int,
+ nameType string,
+ name string,
+ parsedName any,
+ match func(parsedName, constraint any) (match bool, err error),
+ permitted, excluded any) error {
+
+ excludedValue := reflect.ValueOf(excluded)
+
+ *count += excludedValue.Len()
+ if *count > maxConstraintComparisons {
+ return CertificateInvalidError{c, TooManyConstraints, ""}
+ }
+
+ for i := 0; i < excludedValue.Len(); i++ {
+ constraint := excludedValue.Index(i).Interface()
+ match, err := match(parsedName, constraint)
+ if err != nil {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
+ }
+
+ if match {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)}
+ }
+ }
+
+ permittedValue := reflect.ValueOf(permitted)
+
+ *count += permittedValue.Len()
+ if *count > maxConstraintComparisons {
+ return CertificateInvalidError{c, TooManyConstraints, ""}
+ }
+
+ ok := true
+ for i := 0; i < permittedValue.Len(); i++ {
+ constraint := permittedValue.Index(i).Interface()
+
+ var err error
+ if ok, err = match(parsedName, constraint); err != nil {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
+ }
+
+ if ok {
+ break
+ }
+ }
+
+ if !ok {
+ return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)}
+ }
+
+ return nil
+}
+
+// isValid performs validity checks on c given that it is a candidate to append
+// to the chain in currentChain.
+func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
+ if len(c.UnhandledCriticalExtensions) > 0 {
+ return UnhandledCriticalExtension{}
+ }
+
+ if len(currentChain) > 0 {
+ child := currentChain[len(currentChain)-1]
+ if !bytes.Equal(child.RawIssuer, c.RawSubject) {
+ return CertificateInvalidError{c, NameMismatch, ""}
+ }
+ }
+
+ now := opts.CurrentTime
+ if now.IsZero() {
+ now = time.Now()
+ }
+ if now.Before(c.NotBefore) {
+ return CertificateInvalidError{
+ Cert: c,
+ Reason: Expired,
+ Detail: fmt.Sprintf("current time %s is before %s", now.Format(time.RFC3339), c.NotBefore.Format(time.RFC3339)),
+ }
+ } else if now.After(c.NotAfter) {
+ return CertificateInvalidError{
+ Cert: c,
+ Reason: Expired,
+ Detail: fmt.Sprintf("current time %s is after %s", now.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)),
+ }
+ }
+
+ maxConstraintComparisons := opts.MaxConstraintComparisions
+ if maxConstraintComparisons == 0 {
+ maxConstraintComparisons = 250000
+ }
+ comparisonCount := 0
+
+ var leaf *Certificate
+ if certType == intermediateCertificate || certType == rootCertificate {
+ if len(currentChain) == 0 {
+ return errors.New("x509: internal error: empty chain when appending CA cert")
+ }
+ leaf = currentChain[0]
+ }
+
+ if (certType == intermediateCertificate || certType == rootCertificate) &&
+ c.hasNameConstraints() {
+ toCheck := []*Certificate{}
+ if leaf.hasSANExtension() {
+ toCheck = append(toCheck, leaf)
+ }
+ if c.hasSANExtension() {
+ toCheck = append(toCheck, c)
+ }
+ for _, sanCert := range toCheck {
+ err := forEachSAN(sanCert.getSANExtension(), func(tag int, data []byte) error {
+ switch tag {
+ case nameTypeEmail:
+ name := string(data)
+ mailbox, ok := parseRFC2821Mailbox(name)
+ if !ok {
+ return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
+ func(parsedName, constraint any) (bool, error) {
+ return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string))
+ }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil {
+ return err
+ }
+
+ case nameTypeDNS:
+ name := string(data)
+ if _, ok := domainToReverseLabels(name); !ok {
+ return fmt.Errorf("x509: cannot parse dnsName %q", name)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
+ func(parsedName, constraint any) (bool, error) {
+ return matchDomainConstraint(parsedName.(string), constraint.(string))
+ }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil {
+ return err
+ }
+
+ case nameTypeURI:
+ name := string(data)
+ uri, err := url.Parse(name)
+ if err != nil {
+ return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri,
+ func(parsedName, constraint any) (bool, error) {
+ return matchURIConstraint(parsedName.(*url.URL), constraint.(string))
+ }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil {
+ return err
+ }
+
+ case nameTypeIP:
+ ip := net.IP(data)
+ if l := len(ip); l != net.IPv4len && l != net.IPv6len {
+ return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data)
+ }
+
+ if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip,
+ func(parsedName, constraint any) (bool, error) {
+ return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet))
+ }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil {
+ return err
+ }
+
+ default:
+ // Unknown SAN types are ignored.
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // KeyUsage status flags are ignored. From Engineering Security, Peter
+ // Gutmann: A European government CA marked its signing certificates as
+ // being valid for encryption only, but no-one noticed. Another
+ // European CA marked its signature keys as not being valid for
+ // signatures. A different CA marked its own trusted root certificate
+ // as being invalid for certificate signing. Another national CA
+ // distributed a certificate to be used to encrypt data for the
+ // country’s tax authority that was marked as only being usable for
+ // digital signatures but not for encryption. Yet another CA reversed
+ // the order of the bit flags in the keyUsage due to confusion over
+ // encoding endianness, essentially setting a random keyUsage in
+ // certificates that it issued. Another CA created a self-invalidating
+ // certificate by adding a certificate policy statement stipulating
+ // that the certificate had to be used strictly as specified in the
+ // keyUsage, and a keyUsage containing a flag indicating that the RSA
+ // encryption key could only be used for Diffie-Hellman key agreement.
+
+ if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
+ return CertificateInvalidError{c, NotAuthorizedToSign, ""}
+ }
+
+ if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
+ numIntermediates := len(currentChain) - 1
+ if numIntermediates > c.MaxPathLen {
+ return CertificateInvalidError{c, TooManyIntermediates, ""}
+ }
+ }
+
+ if !boringAllowCert(c) {
+ // IncompatibleUsage is not quite right here,
+ // but it's also the "no chains found" error
+ // and is close enough.
+ return CertificateInvalidError{c, IncompatibleUsage, ""}
+ }
+
+ return nil
+}
+
+// Verify attempts to verify c by building one or more chains from c to a
+// certificate in opts.Roots, using certificates in opts.Intermediates if
+// needed. If successful, it returns one or more chains where the first
+// element of the chain is c and the last element is from opts.Roots.
+//
+// If opts.Roots is nil, the platform verifier might be used, and
+// verification details might differ from what is described below. If system
+// roots are unavailable the returned error will be of type SystemRootsError.
+//
+// Name constraints in the intermediates will be applied to all names claimed
+// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim
+// example.com if an intermediate doesn't permit it, even if example.com is not
+// the name being validated. Note that DirectoryName constraints are not
+// supported.
+//
+// Name constraint validation follows the rules from RFC 5280, with the
+// addition that DNS name constraints may use the leading period format
+// defined for emails and URIs. When a constraint has a leading period
+// it indicates that at least one additional label must be prepended to
+// the constrained name to be considered valid.
+//
+// Extended Key Usage values are enforced nested down a chain, so an intermediate
+// or root that enumerates EKUs prevents a leaf from asserting an EKU not in that
+// list. (While this is not specified, it is common practice in order to limit
+// the types of certificates a CA can issue.)
+//
+// Certificates that use SHA1WithRSA and ECDSAWithSHA1 signatures are not supported,
+// and will not be used to build chains.
+//
+// WARNING: this function doesn't do any revocation checking.
+func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
+ // Platform-specific verification needs the ASN.1 contents so
+ // this makes the behavior consistent across platforms.
+ if len(c.Raw) == 0 {
+ return nil, errNotParsed
+ }
+ for i := 0; i < opts.Intermediates.len(); i++ {
+ c, err := opts.Intermediates.cert(i)
+ if err != nil {
+ return nil, fmt.Errorf("crypto/x509: error fetching intermediate: %w", err)
+ }
+ if len(c.Raw) == 0 {
+ return nil, errNotParsed
+ }
+ }
+
+ // Use platform verifiers, where available, if Roots is from SystemCertPool.
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
+ if opts.Roots == nil {
+ return c.systemVerify(&opts)
+ }
+ if opts.Roots != nil && opts.Roots.systemPool {
+ platformChains, err := c.systemVerify(&opts)
+ // If the platform verifier succeeded, or there are no additional
+ // roots, return the platform verifier result. Otherwise, continue
+ // with the Go verifier.
+ if err == nil || opts.Roots.len() == 0 {
+ return platformChains, err
+ }
+ }
+ }
+
+ if opts.Roots == nil {
+ opts.Roots = systemRootsPool()
+ if opts.Roots == nil {
+ return nil, SystemRootsError{systemRootsErr}
+ }
+ }
+
+ err = c.isValid(leafCertificate, nil, &opts)
+ if err != nil {
+ return
+ }
+
+ if len(opts.DNSName) > 0 {
+ err = c.VerifyHostname(opts.DNSName)
+ if err != nil {
+ return
+ }
+ }
+
+ var candidateChains [][]*Certificate
+ if opts.Roots.contains(c) {
+ candidateChains = [][]*Certificate{{c}}
+ } else {
+ candidateChains, err = c.buildChains([]*Certificate{c}, nil, &opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.KeyUsages) == 0 {
+ opts.KeyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
+ }
+
+ for _, eku := range opts.KeyUsages {
+ if eku == ExtKeyUsageAny {
+ // If any key usage is acceptable, no need to check the chain for
+ // key usages.
+ return candidateChains, nil
+ }
+ }
+
+ chains = make([][]*Certificate, 0, len(candidateChains))
+ for _, candidate := range candidateChains {
+ if checkChainForKeyUsage(candidate, opts.KeyUsages) {
+ chains = append(chains, candidate)
+ }
+ }
+
+ if len(chains) == 0 {
+ return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
+ }
+
+ return chains, nil
+}
+
+func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
+ n := make([]*Certificate, len(chain)+1)
+ copy(n, chain)
+ n[len(chain)] = cert
+ return n
+}
+
+// alreadyInChain checks whether a candidate certificate is present in a chain.
+// Rather than doing a direct byte for byte equivalency check, we check if the
+// subject, public key, and SAN, if present, are equal. This prevents loops that
+// are created by mutual cross-signatures, or other cross-signature bridge
+// oddities.
+func alreadyInChain(candidate *Certificate, chain []*Certificate) bool {
+ type pubKeyEqual interface {
+ Equal(crypto.PublicKey) bool
+ }
+
+ var candidateSAN *pkix.Extension
+ for _, ext := range candidate.Extensions {
+ if ext.Id.Equal(oidExtensionSubjectAltName) {
+ candidateSAN = &ext
+ break
+ }
+ }
+
+ for _, cert := range chain {
+ if !bytes.Equal(candidate.RawSubject, cert.RawSubject) {
+ continue
+ }
+ if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) {
+ continue
+ }
+ var certSAN *pkix.Extension
+ for _, ext := range cert.Extensions {
+ if ext.Id.Equal(oidExtensionSubjectAltName) {
+ certSAN = &ext
+ break
+ }
+ }
+ if candidateSAN == nil && certSAN == nil {
+ return true
+ } else if candidateSAN == nil || certSAN == nil {
+ return false
+ }
+ if bytes.Equal(candidateSAN.Value, certSAN.Value) {
+ return true
+ }
+ }
+ return false
+}
+
+// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls
+// that an invocation of buildChains will (transitively) make. Most chains are
+// less than 15 certificates long, so this leaves space for multiple chains and
+// for failed checks due to different intermediates having the same Subject.
+const maxChainSignatureChecks = 100
+
+func (c *Certificate) buildChains(currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) {
+ var (
+ hintErr error
+ hintCert *Certificate
+ )
+
+ considerCandidate := func(certType int, candidate *Certificate) {
+ if alreadyInChain(candidate, currentChain) {
+ return
+ }
+
+ if sigChecks == nil {
+ sigChecks = new(int)
+ }
+ *sigChecks++
+ if *sigChecks > maxChainSignatureChecks {
+ err = errors.New("x509: signature check attempts limit reached while verifying certificate chain")
+ return
+ }
+
+ if err := c.CheckSignatureFrom(candidate); err != nil {
+ if hintErr == nil {
+ hintErr = err
+ hintCert = candidate
+ }
+ return
+ }
+
+ err = candidate.isValid(certType, currentChain, opts)
+ if err != nil {
+ return
+ }
+
+ switch certType {
+ case rootCertificate:
+ chains = append(chains, appendToFreshChain(currentChain, candidate))
+ case intermediateCertificate:
+ var childChains [][]*Certificate
+ childChains, err = candidate.buildChains(appendToFreshChain(currentChain, candidate), sigChecks, opts)
+ chains = append(chains, childChains...)
+ }
+ }
+
+ for _, root := range opts.Roots.findPotentialParents(c) {
+ considerCandidate(rootCertificate, root)
+ }
+ for _, intermediate := range opts.Intermediates.findPotentialParents(c) {
+ considerCandidate(intermediateCertificate, intermediate)
+ }
+
+ if len(chains) > 0 {
+ err = nil
+ }
+ if len(chains) == 0 && err == nil {
+ err = UnknownAuthorityError{c, hintErr, hintCert}
+ }
+
+ return
+}
+
+func validHostnamePattern(host string) bool { return validHostname(host, true) }
+func validHostnameInput(host string) bool { return validHostname(host, false) }
+
+// validHostname reports whether host is a valid hostname that can be matched or
+// matched against according to RFC 6125 2.2, with some leniency to accommodate
+// legacy values.
+func validHostname(host string, isPattern bool) bool {
+ if !isPattern {
+ host = strings.TrimSuffix(host, ".")
+ }
+ if len(host) == 0 {
+ return false
+ }
+
+ for i, part := range strings.Split(host, ".") {
+ if part == "" {
+ // Empty label.
+ return false
+ }
+ if isPattern && i == 0 && part == "*" {
+ // Only allow full left-most wildcards, as those are the only ones
+ // we match, and matching literal '*' characters is probably never
+ // the expected behavior.
+ continue
+ }
+ for j, c := range part {
+ if 'a' <= c && c <= 'z' {
+ continue
+ }
+ if '0' <= c && c <= '9' {
+ continue
+ }
+ if 'A' <= c && c <= 'Z' {
+ continue
+ }
+ if c == '-' && j != 0 {
+ continue
+ }
+ if c == '_' {
+ // Not a valid character in hostnames, but commonly
+ // found in deployments outside the WebPKI.
+ continue
+ }
+ return false
+ }
+ }
+
+ return true
+}
+
+func matchExactly(hostA, hostB string) bool {
+ if hostA == "" || hostA == "." || hostB == "" || hostB == "." {
+ return false
+ }
+ return toLowerCaseASCII(hostA) == toLowerCaseASCII(hostB)
+}
+
+func matchHostnames(pattern, host string) bool {
+ pattern = toLowerCaseASCII(pattern)
+ host = toLowerCaseASCII(strings.TrimSuffix(host, "."))
+
+ if len(pattern) == 0 || len(host) == 0 {
+ return false
+ }
+
+ patternParts := strings.Split(pattern, ".")
+ hostParts := strings.Split(host, ".")
+
+ if len(patternParts) != len(hostParts) {
+ return false
+ }
+
+ for i, patternPart := range patternParts {
+ if i == 0 && patternPart == "*" {
+ continue
+ }
+ if patternPart != hostParts[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
+// an explicitly ASCII function to avoid any sharp corners resulting from
+// performing Unicode operations on DNS labels.
+func toLowerCaseASCII(in string) string {
+ // If the string is already lower-case then there's nothing to do.
+ isAlreadyLowerCase := true
+ for _, c := range in {
+ if c == utf8.RuneError {
+ // If we get a UTF-8 error then there might be
+ // upper-case ASCII bytes in the invalid sequence.
+ isAlreadyLowerCase = false
+ break
+ }
+ if 'A' <= c && c <= 'Z' {
+ isAlreadyLowerCase = false
+ break
+ }
+ }
+
+ if isAlreadyLowerCase {
+ return in
+ }
+
+ out := []byte(in)
+ for i, c := range out {
+ if 'A' <= c && c <= 'Z' {
+ out[i] += 'a' - 'A'
+ }
+ }
+ return string(out)
+}
+
+// VerifyHostname returns nil if c is a valid certificate for the named host.
+// Otherwise it returns an error describing the mismatch.
+//
+// IP addresses can be optionally enclosed in square brackets and are checked
+// against the IPAddresses field. Other names are checked case insensitively
+// against the DNSNames field. If the names are valid hostnames, the certificate
+// fields can have a wildcard as the left-most label.
+//
+// Note that the legacy Common Name field is ignored.
+func (c *Certificate) VerifyHostname(h string) error {
+ // IP addresses may be written in [ ].
+ candidateIP := h
+ if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
+ candidateIP = h[1 : len(h)-1]
+ }
+ if ip := net.ParseIP(candidateIP); ip != nil {
+ // We only match IP addresses against IP SANs.
+ // See RFC 6125, Appendix B.2.
+ for _, candidate := range c.IPAddresses {
+ if ip.Equal(candidate) {
+ return nil
+ }
+ }
+ return HostnameError{c, candidateIP}
+ }
+
+ candidateName := toLowerCaseASCII(h) // Save allocations inside the loop.
+ validCandidateName := validHostnameInput(candidateName)
+
+ for _, match := range c.DNSNames {
+ // Ideally, we'd only match valid hostnames according to RFC 6125 like
+ // browsers (more or less) do, but in practice Go is used in a wider
+ // array of contexts and can't even assume DNS resolution. Instead,
+ // always allow perfect matches, and only apply wildcard and trailing
+ // dot processing to valid hostnames.
+ if validCandidateName && validHostnamePattern(match) {
+ if matchHostnames(match, candidateName) {
+ return nil
+ }
+ } else {
+ if matchExactly(match, candidateName) {
+ return nil
+ }
+ }
+ }
+
+ return HostnameError{c, h}
+}
+
+func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
+ usages := make([]ExtKeyUsage, len(keyUsages))
+ copy(usages, keyUsages)
+
+ if len(chain) == 0 {
+ return false
+ }
+
+ usagesRemaining := len(usages)
+
+ // We walk down the list and cross out any usages that aren't supported
+ // by each certificate. If we cross out all the usages, then the chain
+ // is unacceptable.
+
+NextCert:
+ for i := len(chain) - 1; i >= 0; i-- {
+ cert := chain[i]
+ if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
+ // The certificate doesn't have any extended key usage specified.
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if usage == ExtKeyUsageAny {
+ // The certificate is explicitly good for any usage.
+ continue NextCert
+ }
+ }
+
+ const invalidUsage ExtKeyUsage = -1
+
+ NextRequestedUsage:
+ for i, requestedUsage := range usages {
+ if requestedUsage == invalidUsage {
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if requestedUsage == usage {
+ continue NextRequestedUsage
+ }
+ }
+
+ usages[i] = invalidUsage
+ usagesRemaining--
+ if usagesRemaining == 0 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
diff --git a/contrib/go/_std_1.19/src/crypto/x509/x509.go b/contrib/go/_std_1.19/src/crypto/x509/x509.go
new file mode 100644
index 0000000000..7c64761bd7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/crypto/x509/x509.go
@@ -0,0 +1,2284 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package x509 parses X.509-encoded keys and certificates.
+package x509
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "math/big"
+ "net"
+ "net/url"
+ "strconv"
+ "time"
+ "unicode"
+
+ // Explicitly import these for their crypto.RegisterHash init side-effects.
+ // Keep these as blank imports, even if they're imported above.
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+
+ "golang.org/x/crypto/cryptobyte"
+ cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1"
+)
+
+// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo
+// in RFC 3280.
+type pkixPublicKey struct {
+ Algo pkix.AlgorithmIdentifier
+ BitString asn1.BitString
+}
+
+// ParsePKIXPublicKey parses a public key in PKIX, ASN.1 DER form.
+// The encoded public key is a SubjectPublicKeyInfo structure
+// (see RFC 5280, Section 4.1).
+//
+// It returns a *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, or
+// ed25519.PublicKey. More types might be supported in the future.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
+func ParsePKIXPublicKey(derBytes []byte) (pub any, err error) {
+ var pki publicKeyInfo
+ if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil {
+ if _, err := asn1.Unmarshal(derBytes, &pkcs1PublicKey{}); err == nil {
+ return nil, errors.New("x509: failed to parse public key (use ParsePKCS1PublicKey instead for this key format)")
+ }
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after ASN.1 of public-key")
+ }
+ algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
+ if algo == UnknownPublicKeyAlgorithm {
+ return nil, errors.New("x509: unknown public key algorithm")
+ }
+ return parsePublicKey(algo, &pki)
+}
+
+func marshalPublicKey(pub any) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) {
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{
+ N: pub.N,
+ E: pub.E,
+ })
+ if err != nil {
+ return nil, pkix.AlgorithmIdentifier{}, err
+ }
+ publicKeyAlgorithm.Algorithm = oidPublicKeyRSA
+ // This is a NULL parameters value which is required by
+ // RFC 3279, Section 2.3.1.
+ publicKeyAlgorithm.Parameters = asn1.NullRawValue
+ case *ecdsa.PublicKey:
+ oid, ok := oidFromNamedCurve(pub.Curve)
+ if !ok {
+ return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve")
+ }
+ if !pub.Curve.IsOnCurve(pub.X, pub.Y) {
+ return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: invalid elliptic curve public key")
+ }
+ publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
+ publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA
+ var paramBytes []byte
+ paramBytes, err = asn1.Marshal(oid)
+ if err != nil {
+ return
+ }
+ publicKeyAlgorithm.Parameters.FullBytes = paramBytes
+ case ed25519.PublicKey:
+ publicKeyBytes = pub
+ publicKeyAlgorithm.Algorithm = oidPublicKeyEd25519
+ default:
+ return nil, pkix.AlgorithmIdentifier{}, fmt.Errorf("x509: unsupported public key type: %T", pub)
+ }
+
+ return publicKeyBytes, publicKeyAlgorithm, nil
+}
+
+// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form.
+// The encoded public key is a SubjectPublicKeyInfo structure
+// (see RFC 5280, Section 4.1).
+//
+// The following key types are currently supported: *rsa.PublicKey, *ecdsa.PublicKey
+// and ed25519.PublicKey. Unsupported key types result in an error.
+//
+// This kind of key is commonly encoded in PEM blocks of type "PUBLIC KEY".
+func MarshalPKIXPublicKey(pub any) ([]byte, error) {
+ var publicKeyBytes []byte
+ var publicKeyAlgorithm pkix.AlgorithmIdentifier
+ var err error
+
+ if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil {
+ return nil, err
+ }
+
+ pkix := pkixPublicKey{
+ Algo: publicKeyAlgorithm,
+ BitString: asn1.BitString{
+ Bytes: publicKeyBytes,
+ BitLength: 8 * len(publicKeyBytes),
+ },
+ }
+
+ ret, _ := asn1.Marshal(pkix)
+ return ret, nil
+}
+
+// These structures reflect the ASN.1 structure of X.509 certificates.:
+
+type certificate struct {
+ Raw asn1.RawContent
+ TBSCertificate tbsCertificate
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+type tbsCertificate struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,explicit,default:0,tag:0"`
+ SerialNumber *big.Int
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Issuer asn1.RawValue
+ Validity validity
+ Subject asn1.RawValue
+ PublicKey publicKeyInfo
+ UniqueId asn1.BitString `asn1:"optional,tag:1"`
+ SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"`
+ Extensions []pkix.Extension `asn1:"omitempty,optional,explicit,tag:3"`
+}
+
+type dsaAlgorithmParameters struct {
+ P, Q, G *big.Int
+}
+
+type validity struct {
+ NotBefore, NotAfter time.Time
+}
+
+type publicKeyInfo struct {
+ Raw asn1.RawContent
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+}
+
+// RFC 5280, 4.2.1.1
+type authKeyId struct {
+ Id []byte `asn1:"optional,tag:0"`
+}
+
+type SignatureAlgorithm int
+
+const (
+ UnknownSignatureAlgorithm SignatureAlgorithm = iota
+
+ MD2WithRSA // Unsupported.
+ MD5WithRSA // Only supported for signing, not verification.
+ SHA1WithRSA // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses.
+ SHA256WithRSA
+ SHA384WithRSA
+ SHA512WithRSA
+ DSAWithSHA1 // Unsupported.
+ DSAWithSHA256 // Unsupported.
+ ECDSAWithSHA1 // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses.
+ ECDSAWithSHA256
+ ECDSAWithSHA384
+ ECDSAWithSHA512
+ SHA256WithRSAPSS
+ SHA384WithRSAPSS
+ SHA512WithRSAPSS
+ PureEd25519
+)
+
+func (algo SignatureAlgorithm) isRSAPSS() bool {
+ switch algo {
+ case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS:
+ return true
+ default:
+ return false
+ }
+}
+
+func (algo SignatureAlgorithm) String() string {
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == algo {
+ return details.name
+ }
+ }
+ return strconv.Itoa(int(algo))
+}
+
+type PublicKeyAlgorithm int
+
+const (
+ UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota
+ RSA
+ DSA // Unsupported.
+ ECDSA
+ Ed25519
+)
+
+var publicKeyAlgoName = [...]string{
+ RSA: "RSA",
+ DSA: "DSA",
+ ECDSA: "ECDSA",
+ Ed25519: "Ed25519",
+}
+
+func (algo PublicKeyAlgorithm) String() string {
+ if 0 < algo && int(algo) < len(publicKeyAlgoName) {
+ return publicKeyAlgoName[algo]
+ }
+ return strconv.Itoa(int(algo))
+}
+
+// OIDs for signature algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 }
+//
+// RFC 3279 2.2.1 RSA Signature Algorithms
+//
+// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 }
+//
+// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 }
+//
+// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 }
+//
+// dsaWithSha1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 }
+//
+// RFC 3279 2.2.3 ECDSA Signature Algorithm
+//
+// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-x962(10045)
+// signatures(4) ecdsa-with-SHA1(1)}
+//
+// RFC 4055 5 PKCS #1 Version 1.5
+//
+// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 }
+//
+// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 }
+//
+// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 }
+//
+// RFC 5758 3.1 DSA Signature Algorithms
+//
+// dsaWithSha256 OBJECT IDENTIFIER ::= {
+// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+// csor(3) algorithms(4) id-dsa-with-sha2(3) 2}
+//
+// RFC 5758 3.2 ECDSA Signature Algorithm
+//
+// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 }
+//
+// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 }
+//
+// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2)
+// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 }
+//
+// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers
+//
+// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 }
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+ oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112}
+
+ oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
+ oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
+ oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
+
+ oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8}
+
+ // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA
+ // but it's specified by ISO. Microsoft's makecert.exe has been known
+ // to produce certificates with this OID.
+ oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29}
+)
+
+var signatureAlgorithmDetails = []struct {
+ algo SignatureAlgorithm
+ name string
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */},
+ {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, RSA, crypto.MD5},
+ {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, RSA, crypto.SHA1},
+ {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1},
+ {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, RSA, crypto.SHA256},
+ {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, RSA, crypto.SHA384},
+ {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, RSA, crypto.SHA512},
+ {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA256},
+ {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA384},
+ {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA512},
+ {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, DSA, crypto.SHA1},
+ {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, DSA, crypto.SHA256},
+ {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1},
+ {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256},
+ {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384},
+ {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512},
+ {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */},
+}
+
+// hashToPSSParameters contains the DER encoded RSA PSS parameters for the
+// SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3.
+// The parameters contain the following values:
+// - hashAlgorithm contains the associated hash identifier with NULL parameters
+// - maskGenAlgorithm always contains the default mgf1SHA1 identifier
+// - saltLength contains the length of the associated hash
+// - trailerField always contains the default trailerFieldBC value
+var hashToPSSParameters = map[crypto.Hash]asn1.RawValue{
+ crypto.SHA256: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}},
+ crypto.SHA384: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}},
+ crypto.SHA512: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}},
+}
+
+// pssParameters reflects the parameters in an AlgorithmIdentifier that
+// specifies RSA PSS. See RFC 3447, Appendix A.2.3.
+type pssParameters struct {
+ // The following three fields are not marked as
+ // optional because the default values specify SHA-1,
+ // which is no longer suitable for use in signatures.
+ Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"`
+ MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"`
+ SaltLength int `asn1:"explicit,tag:2"`
+ TrailerField int `asn1:"optional,explicit,tag:3,default:1"`
+}
+
+func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm {
+ if ai.Algorithm.Equal(oidSignatureEd25519) {
+ // RFC 8410, Section 3
+ // > For all of the OIDs, the parameters MUST be absent.
+ if len(ai.Parameters.FullBytes) != 0 {
+ return UnknownSignatureAlgorithm
+ }
+ }
+
+ if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
+ for _, details := range signatureAlgorithmDetails {
+ if ai.Algorithm.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return UnknownSignatureAlgorithm
+ }
+
+ // RSA PSS is special because it encodes important parameters
+ // in the Parameters.
+
+ var params pssParameters
+ if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, &params); err != nil {
+ return UnknownSignatureAlgorithm
+ }
+
+ var mgf1HashFunc pkix.AlgorithmIdentifier
+ if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil {
+ return UnknownSignatureAlgorithm
+ }
+
+ // PSS is greatly overburdened with options. This code forces them into
+ // three buckets by requiring that the MGF1 hash function always match the
+ // message hash function (as recommended in RFC 3447, Section 8.1), that the
+ // salt length matches the hash length, and that the trailer field has the
+ // default value.
+ if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) ||
+ !params.MGF.Algorithm.Equal(oidMGF1) ||
+ !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
+ (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) ||
+ params.TrailerField != 1 {
+ return UnknownSignatureAlgorithm
+ }
+
+ switch {
+ case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32:
+ return SHA256WithRSAPSS
+ case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48:
+ return SHA384WithRSAPSS
+ case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64:
+ return SHA512WithRSAPSS
+ }
+
+ return UnknownSignatureAlgorithm
+}
+
+// RFC 3279, 2.3 Public Key Algorithms
+//
+// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+// rsadsi(113549) pkcs(1) 1 }
+//
+// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 }
+//
+// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840)
+// x9-57(10040) x9cm(4) 1 }
+//
+// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters
+//
+// id-ecPublicKey OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
+var (
+ oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+ oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
+ oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
+ oidPublicKeyEd25519 = oidSignatureEd25519
+)
+
+func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm {
+ switch {
+ case oid.Equal(oidPublicKeyRSA):
+ return RSA
+ case oid.Equal(oidPublicKeyDSA):
+ return DSA
+ case oid.Equal(oidPublicKeyECDSA):
+ return ECDSA
+ case oid.Equal(oidPublicKeyEd25519):
+ return Ed25519
+ }
+ return UnknownPublicKeyAlgorithm
+}
+
+// RFC 5480, 2.1.1.1. Named Curve
+//
+// secp224r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 33 }
+//
+// secp256r1 OBJECT IDENTIFIER ::= {
+// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3)
+// prime(1) 7 }
+//
+// secp384r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 34 }
+//
+// secp521r1 OBJECT IDENTIFIER ::= {
+// iso(1) identified-organization(3) certicom(132) curve(0) 35 }
+//
+// NB: secp256r1 is equivalent to prime256v1
+var (
+ oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
+ oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
+ oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
+ oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
+)
+
+func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve {
+ switch {
+ case oid.Equal(oidNamedCurveP224):
+ return elliptic.P224()
+ case oid.Equal(oidNamedCurveP256):
+ return elliptic.P256()
+ case oid.Equal(oidNamedCurveP384):
+ return elliptic.P384()
+ case oid.Equal(oidNamedCurveP521):
+ return elliptic.P521()
+ }
+ return nil
+}
+
+func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
+ switch curve {
+ case elliptic.P224():
+ return oidNamedCurveP224, true
+ case elliptic.P256():
+ return oidNamedCurveP256, true
+ case elliptic.P384():
+ return oidNamedCurveP384, true
+ case elliptic.P521():
+ return oidNamedCurveP521, true
+ }
+
+ return nil, false
+}
+
+// KeyUsage represents the set of actions that are valid for a given key. It's
+// a bitmap of the KeyUsage* constants.
+type KeyUsage int
+
+const (
+ KeyUsageDigitalSignature KeyUsage = 1 << iota
+ KeyUsageContentCommitment
+ KeyUsageKeyEncipherment
+ KeyUsageDataEncipherment
+ KeyUsageKeyAgreement
+ KeyUsageCertSign
+ KeyUsageCRLSign
+ KeyUsageEncipherOnly
+ KeyUsageDecipherOnly
+)
+
+// RFC 5280, 4.2.1.12 Extended Key Usage
+//
+// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
+//
+// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
+//
+// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
+// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
+// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
+// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
+// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
+// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
+var (
+ oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
+ oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
+ oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
+ oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
+ oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
+ oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5}
+ oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6}
+ oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7}
+ oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
+ oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
+ oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3}
+ oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1}
+ oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22}
+ oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1}
+)
+
+// ExtKeyUsage represents an extended set of actions that are valid for a given key.
+// Each of the ExtKeyUsage* constants define a unique action.
+type ExtKeyUsage int
+
+const (
+ ExtKeyUsageAny ExtKeyUsage = iota
+ ExtKeyUsageServerAuth
+ ExtKeyUsageClientAuth
+ ExtKeyUsageCodeSigning
+ ExtKeyUsageEmailProtection
+ ExtKeyUsageIPSECEndSystem
+ ExtKeyUsageIPSECTunnel
+ ExtKeyUsageIPSECUser
+ ExtKeyUsageTimeStamping
+ ExtKeyUsageOCSPSigning
+ ExtKeyUsageMicrosoftServerGatedCrypto
+ ExtKeyUsageNetscapeServerGatedCrypto
+ ExtKeyUsageMicrosoftCommercialCodeSigning
+ ExtKeyUsageMicrosoftKernelCodeSigning
+)
+
+// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID.
+var extKeyUsageOIDs = []struct {
+ extKeyUsage ExtKeyUsage
+ oid asn1.ObjectIdentifier
+}{
+ {ExtKeyUsageAny, oidExtKeyUsageAny},
+ {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth},
+ {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth},
+ {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning},
+ {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection},
+ {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem},
+ {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel},
+ {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser},
+ {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping},
+ {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning},
+ {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto},
+ {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto},
+ {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning},
+ {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning},
+}
+
+func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) {
+ for _, pair := range extKeyUsageOIDs {
+ if oid.Equal(pair.oid) {
+ return pair.extKeyUsage, true
+ }
+ }
+ return
+}
+
+func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) {
+ for _, pair := range extKeyUsageOIDs {
+ if eku == pair.extKeyUsage {
+ return pair.oid, true
+ }
+ }
+ return
+}
+
+// A Certificate represents an X.509 certificate.
+type Certificate struct {
+ Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature).
+ RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content.
+ RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
+ RawSubject []byte // DER encoded Subject
+ RawIssuer []byte // DER encoded Issuer
+
+ Signature []byte
+ SignatureAlgorithm SignatureAlgorithm
+
+ PublicKeyAlgorithm PublicKeyAlgorithm
+ PublicKey any
+
+ Version int
+ SerialNumber *big.Int
+ Issuer pkix.Name
+ Subject pkix.Name
+ NotBefore, NotAfter time.Time // Validity bounds.
+ KeyUsage KeyUsage
+
+ // Extensions contains raw X.509 extensions. When parsing certificates,
+ // this can be used to extract non-critical extensions that are not
+ // parsed by this package. When marshaling certificates, the Extensions
+ // field is ignored, see ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any
+ // marshaled certificates. Values override any extensions that would
+ // otherwise be produced based on the other fields. The ExtraExtensions
+ // field is not populated when parsing certificates, see Extensions.
+ ExtraExtensions []pkix.Extension
+
+ // UnhandledCriticalExtensions contains a list of extension IDs that
+ // were not (fully) processed when parsing. Verify will fail if this
+ // slice is non-empty, unless verification is delegated to an OS
+ // library which understands all the critical extensions.
+ //
+ // Users can access these extensions using Extensions and can remove
+ // elements from this slice if they believe that they have been
+ // handled.
+ UnhandledCriticalExtensions []asn1.ObjectIdentifier
+
+ ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
+ UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
+
+ // BasicConstraintsValid indicates whether IsCA, MaxPathLen,
+ // and MaxPathLenZero are valid.
+ BasicConstraintsValid bool
+ IsCA bool
+
+ // MaxPathLen and MaxPathLenZero indicate the presence and
+ // value of the BasicConstraints' "pathLenConstraint".
+ //
+ // When parsing a certificate, a positive non-zero MaxPathLen
+ // means that the field was specified, -1 means it was unset,
+ // and MaxPathLenZero being true mean that the field was
+ // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false
+ // should be treated equivalent to -1 (unset).
+ //
+ // When generating a certificate, an unset pathLenConstraint
+ // can be requested with either MaxPathLen == -1 or using the
+ // zero value for both MaxPathLen and MaxPathLenZero.
+ MaxPathLen int
+ // MaxPathLenZero indicates that BasicConstraintsValid==true
+ // and MaxPathLen==0 should be interpreted as an actual
+ // maximum path length of zero. Otherwise, that combination is
+ // interpreted as MaxPathLen not being set.
+ MaxPathLenZero bool
+
+ SubjectKeyId []byte
+ AuthorityKeyId []byte
+
+ // RFC 5280, 4.2.2.1 (Authority Information Access)
+ OCSPServer []string
+ IssuingCertificateURL []string
+
+ // Subject Alternate Name values. (Note that these values may not be valid
+ // if invalid values were contained within a parsed certificate. For
+ // example, an element of DNSNames may not be a valid DNS domain name.)
+ DNSNames []string
+ EmailAddresses []string
+ IPAddresses []net.IP
+ URIs []*url.URL
+
+ // Name constraints
+ PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical.
+ PermittedDNSDomains []string
+ ExcludedDNSDomains []string
+ PermittedIPRanges []*net.IPNet
+ ExcludedIPRanges []*net.IPNet
+ PermittedEmailAddresses []string
+ ExcludedEmailAddresses []string
+ PermittedURIDomains []string
+ ExcludedURIDomains []string
+
+ // CRL Distribution Points
+ CRLDistributionPoints []string
+
+ PolicyIdentifiers []asn1.ObjectIdentifier
+}
+
+// ErrUnsupportedAlgorithm results from attempting to perform an operation that
+// involves algorithms that are not currently implemented.
+var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented")
+
+// debugAllowSHA1 allows SHA-1 signatures. See issue 41682.
+var debugAllowSHA1 = godebug.Get("x509sha1") == "1"
+
+// An InsecureAlgorithmError indicates that the SignatureAlgorithm used to
+// generate the signature is not secure, and the signature has been rejected.
+//
+// To temporarily restore support for SHA-1 signatures, include the value
+// "x509sha1=1" in the GODEBUG environment variable. Note that this option will
+// be removed in a future release.
+type InsecureAlgorithmError SignatureAlgorithm
+
+func (e InsecureAlgorithmError) Error() string {
+ var override string
+ if SignatureAlgorithm(e) == SHA1WithRSA || SignatureAlgorithm(e) == ECDSAWithSHA1 {
+ override = " (temporarily override with GODEBUG=x509sha1=1)"
+ }
+ return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) + override
+}
+
+// ConstraintViolationError results when a requested usage is not permitted by
+// a certificate. For example: checking a signature when the public key isn't a
+// certificate signing key.
+type ConstraintViolationError struct{}
+
+func (ConstraintViolationError) Error() string {
+ return "x509: invalid signature: parent certificate cannot sign this kind of certificate"
+}
+
+func (c *Certificate) Equal(other *Certificate) bool {
+ if c == nil || other == nil {
+ return c == other
+ }
+ return bytes.Equal(c.Raw, other.Raw)
+}
+
+func (c *Certificate) hasSANExtension() bool {
+ return oidInExtensions(oidExtensionSubjectAltName, c.Extensions)
+}
+
+// CheckSignatureFrom verifies that the signature on c is a valid signature
+// from parent. SHA1WithRSA and ECDSAWithSHA1 signatures are not supported.
+func (c *Certificate) CheckSignatureFrom(parent *Certificate) error {
+ // RFC 5280, 4.2.1.9:
+ // "If the basic constraints extension is not present in a version 3
+ // certificate, or the extension is present but the cA boolean is not
+ // asserted, then the certified public key MUST NOT be used to verify
+ // certificate signatures."
+ if parent.Version == 3 && !parent.BasicConstraintsValid ||
+ parent.BasicConstraintsValid && !parent.IsCA {
+ return ConstraintViolationError{}
+ }
+
+ if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 {
+ return ConstraintViolationError{}
+ }
+
+ if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
+ return ErrUnsupportedAlgorithm
+ }
+
+ // TODO(agl): don't ignore the path length constraint.
+
+ return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature, parent.PublicKey, debugAllowSHA1)
+}
+
+// CheckSignature verifies that signature is a valid signature over signed from
+// c's public key.
+func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error {
+ return checkSignature(algo, signed, signature, c.PublicKey, true)
+}
+
+func (c *Certificate) hasNameConstraints() bool {
+ return oidInExtensions(oidExtensionNameConstraints, c.Extensions)
+}
+
+func (c *Certificate) getSANExtension() []byte {
+ for _, e := range c.Extensions {
+ if e.Id.Equal(oidExtensionSubjectAltName) {
+ return e.Value
+ }
+ }
+ return nil
+}
+
+func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey any) error {
+ return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey)
+}
+
+// checkSignature verifies that signature is a valid signature over signed from
+// a crypto.PublicKey.
+func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey, allowSHA1 bool) (err error) {
+ var hashType crypto.Hash
+ var pubKeyAlgo PublicKeyAlgorithm
+
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == algo {
+ hashType = details.hash
+ pubKeyAlgo = details.pubKeyAlgo
+ }
+ }
+
+ switch hashType {
+ case crypto.Hash(0):
+ if pubKeyAlgo != Ed25519 {
+ return ErrUnsupportedAlgorithm
+ }
+ case crypto.MD5:
+ return InsecureAlgorithmError(algo)
+ case crypto.SHA1:
+ if !allowSHA1 {
+ return InsecureAlgorithmError(algo)
+ }
+ fallthrough
+ default:
+ if !hashType.Available() {
+ return ErrUnsupportedAlgorithm
+ }
+ h := hashType.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ switch pub := publicKey.(type) {
+ case *rsa.PublicKey:
+ if pubKeyAlgo != RSA {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ if algo.isRSAPSS() {
+ return rsa.VerifyPSS(pub, hashType, signed, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash})
+ } else {
+ return rsa.VerifyPKCS1v15(pub, hashType, signed, signature)
+ }
+ case *ecdsa.PublicKey:
+ if pubKeyAlgo != ECDSA {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ if !ecdsa.VerifyASN1(pub, signed, signature) {
+ return errors.New("x509: ECDSA verification failure")
+ }
+ return
+ case ed25519.PublicKey:
+ if pubKeyAlgo != Ed25519 {
+ return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub)
+ }
+ if !ed25519.Verify(pub, signed, signature) {
+ return errors.New("x509: Ed25519 verification failure")
+ }
+ return
+ }
+ return ErrUnsupportedAlgorithm
+}
+
+// CheckCRLSignature checks that the signature in crl is from c.
+//
+// Deprecated: Use RevocationList.CheckSignatureFrom instead.
+func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error {
+ algo := getSignatureAlgorithmFromAI(crl.SignatureAlgorithm)
+ return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
+}
+
+type UnhandledCriticalExtension struct{}
+
+func (h UnhandledCriticalExtension) Error() string {
+ return "x509: unhandled critical extension"
+}
+
+type basicConstraints struct {
+ IsCA bool `asn1:"optional"`
+ MaxPathLen int `asn1:"optional,default:-1"`
+}
+
+// RFC 5280 4.2.1.4
+type policyInformation struct {
+ Policy asn1.ObjectIdentifier
+ // policyQualifiers omitted
+}
+
+const (
+ nameTypeEmail = 1
+ nameTypeDNS = 2
+ nameTypeURI = 6
+ nameTypeIP = 7
+)
+
+// RFC 5280, 4.2.2.1
+type authorityInfoAccess struct {
+ Method asn1.ObjectIdentifier
+ Location asn1.RawValue
+}
+
+// RFC 5280, 4.2.1.14
+type distributionPoint struct {
+ DistributionPoint distributionPointName `asn1:"optional,tag:0"`
+ Reason asn1.BitString `asn1:"optional,tag:1"`
+ CRLIssuer asn1.RawValue `asn1:"optional,tag:2"`
+}
+
+type distributionPointName struct {
+ FullName []asn1.RawValue `asn1:"optional,tag:0"`
+ RelativeName pkix.RDNSequence `asn1:"optional,tag:1"`
+}
+
+func reverseBitsInAByte(in byte) byte {
+ b1 := in>>4 | in<<4
+ b2 := b1>>2&0x33 | b1<<2&0xcc
+ b3 := b2>>1&0x55 | b2<<1&0xaa
+ return b3
+}
+
+// asn1BitLength returns the bit-length of bitString by considering the
+// most-significant bit in a byte to be the "first" bit. This convention
+// matches ASN.1, but differs from almost everything else.
+func asn1BitLength(bitString []byte) int {
+ bitLen := len(bitString) * 8
+
+ for i := range bitString {
+ b := bitString[len(bitString)-i-1]
+
+ for bit := uint(0); bit < 8; bit++ {
+ if (b>>bit)&1 == 1 {
+ return bitLen
+ }
+ bitLen--
+ }
+ }
+
+ return 0
+}
+
+var (
+ oidExtensionSubjectKeyId = []int{2, 5, 29, 14}
+ oidExtensionKeyUsage = []int{2, 5, 29, 15}
+ oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37}
+ oidExtensionAuthorityKeyId = []int{2, 5, 29, 35}
+ oidExtensionBasicConstraints = []int{2, 5, 29, 19}
+ oidExtensionSubjectAltName = []int{2, 5, 29, 17}
+ oidExtensionCertificatePolicies = []int{2, 5, 29, 32}
+ oidExtensionNameConstraints = []int{2, 5, 29, 30}
+ oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31}
+ oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1}
+ oidExtensionCRLNumber = []int{2, 5, 29, 20}
+)
+
+var (
+ oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1}
+ oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2}
+)
+
+// oidNotInExtensions reports whether an extension with the given oid exists in
+// extensions.
+func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
+ for _, e := range extensions {
+ if e.Id.Equal(oid) {
+ return true
+ }
+ }
+ return false
+}
+
+// marshalSANs marshals a list of addresses into a the contents of an X.509
+// SubjectAlternativeName extension.
+func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) {
+ var rawValues []asn1.RawValue
+ for _, name := range dnsNames {
+ if err := isIA5String(name); err != nil {
+ return nil, err
+ }
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)})
+ }
+ for _, email := range emailAddresses {
+ if err := isIA5String(email); err != nil {
+ return nil, err
+ }
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)})
+ }
+ for _, rawIP := range ipAddresses {
+ // If possible, we always want to encode IPv4 addresses in 4 bytes.
+ ip := rawIP.To4()
+ if ip == nil {
+ ip = rawIP
+ }
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip})
+ }
+ for _, uri := range uris {
+ uriStr := uri.String()
+ if err := isIA5String(uriStr); err != nil {
+ return nil, err
+ }
+ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uriStr)})
+ }
+ return asn1.Marshal(rawValues)
+}
+
+func isIA5String(s string) error {
+ for _, r := range s {
+ // Per RFC5280 "IA5String is limited to the set of ASCII characters"
+ if r > unicode.MaxASCII {
+ return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s)
+ }
+ }
+
+ return nil
+}
+
+func buildCertExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte, subjectKeyId []byte) (ret []pkix.Extension, err error) {
+ ret = make([]pkix.Extension, 10 /* maximum number of elements. */)
+ n := 0
+
+ if template.KeyUsage != 0 &&
+ !oidInExtensions(oidExtensionKeyUsage, template.ExtraExtensions) {
+ ret[n], err = marshalKeyUsage(template.KeyUsage)
+ if err != nil {
+ return nil, err
+ }
+ n++
+ }
+
+ if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) &&
+ !oidInExtensions(oidExtensionExtendedKeyUsage, template.ExtraExtensions) {
+ ret[n], err = marshalExtKeyUsage(template.ExtKeyUsage, template.UnknownExtKeyUsage)
+ if err != nil {
+ return nil, err
+ }
+ n++
+ }
+
+ if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) {
+ ret[n], err = marshalBasicConstraints(template.IsCA, template.MaxPathLen, template.MaxPathLenZero)
+ if err != nil {
+ return nil, err
+ }
+ n++
+ }
+
+ if len(subjectKeyId) > 0 && !oidInExtensions(oidExtensionSubjectKeyId, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionSubjectKeyId
+ ret[n].Value, err = asn1.Marshal(subjectKeyId)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(authorityKeyId) > 0 && !oidInExtensions(oidExtensionAuthorityKeyId, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionAuthorityKeyId
+ ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId})
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) &&
+ !oidInExtensions(oidExtensionAuthorityInfoAccess, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionAuthorityInfoAccess
+ var aiaValues []authorityInfoAccess
+ for _, name := range template.OCSPServer {
+ aiaValues = append(aiaValues, authorityInfoAccess{
+ Method: oidAuthorityInfoAccessOcsp,
+ Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)},
+ })
+ }
+ for _, name := range template.IssuingCertificateURL {
+ aiaValues = append(aiaValues, authorityInfoAccess{
+ Method: oidAuthorityInfoAccessIssuers,
+ Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)},
+ })
+ }
+ ret[n].Value, err = asn1.Marshal(aiaValues)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
+ !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionSubjectAltName
+ // From RFC 5280, Section 4.2.1.6:
+ // “If the subject field contains an empty sequence ... then
+ // subjectAltName extension ... is marked as critical”
+ ret[n].Critical = subjectIsEmpty
+ ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ if len(template.PolicyIdentifiers) > 0 &&
+ !oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) {
+ ret[n], err = marshalCertificatePolicies(template.PolicyIdentifiers)
+ if err != nil {
+ return nil, err
+ }
+ n++
+ }
+
+ if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 ||
+ len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 ||
+ len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 ||
+ len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) &&
+ !oidInExtensions(oidExtensionNameConstraints, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionNameConstraints
+ ret[n].Critical = template.PermittedDNSDomainsCritical
+
+ ipAndMask := func(ipNet *net.IPNet) []byte {
+ maskedIP := ipNet.IP.Mask(ipNet.Mask)
+ ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask))
+ ipAndMask = append(ipAndMask, maskedIP...)
+ ipAndMask = append(ipAndMask, ipNet.Mask...)
+ return ipAndMask
+ }
+
+ serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) {
+ var b cryptobyte.Builder
+
+ for _, name := range dns {
+ if err = isIA5String(name); err != nil {
+ return nil, err
+ }
+
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(name))
+ })
+ })
+ }
+
+ for _, ipNet := range ips {
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes(ipAndMask(ipNet))
+ })
+ })
+ }
+
+ for _, email := range emails {
+ if err = isIA5String(email); err != nil {
+ return nil, err
+ }
+
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(email))
+ })
+ })
+ }
+
+ for _, uriDomain := range uriDomains {
+ if err = isIA5String(uriDomain); err != nil {
+ return nil, err
+ }
+
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte(uriDomain))
+ })
+ })
+ }
+
+ return b.Bytes()
+ }
+
+ permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains)
+ if err != nil {
+ return nil, err
+ }
+
+ excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains)
+ if err != nil {
+ return nil, err
+ }
+
+ var b cryptobyte.Builder
+ b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) {
+ if len(permitted) > 0 {
+ b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
+ b.AddBytes(permitted)
+ })
+ }
+
+ if len(excluded) > 0 {
+ b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) {
+ b.AddBytes(excluded)
+ })
+ }
+ })
+
+ ret[n].Value, err = b.Bytes()
+ if err != nil {
+ return nil, err
+ }
+ n++
+ }
+
+ if len(template.CRLDistributionPoints) > 0 &&
+ !oidInExtensions(oidExtensionCRLDistributionPoints, template.ExtraExtensions) {
+ ret[n].Id = oidExtensionCRLDistributionPoints
+
+ var crlDp []distributionPoint
+ for _, name := range template.CRLDistributionPoints {
+ dp := distributionPoint{
+ DistributionPoint: distributionPointName{
+ FullName: []asn1.RawValue{
+ {Tag: 6, Class: 2, Bytes: []byte(name)},
+ },
+ },
+ }
+ crlDp = append(crlDp, dp)
+ }
+
+ ret[n].Value, err = asn1.Marshal(crlDp)
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ // Adding another extension here? Remember to update the maximum number
+ // of elements in the make() at the top of the function and the list of
+ // template fields used in CreateCertificate documentation.
+
+ return append(ret[:n], template.ExtraExtensions...), nil
+}
+
+func marshalKeyUsage(ku KeyUsage) (pkix.Extension, error) {
+ ext := pkix.Extension{Id: oidExtensionKeyUsage, Critical: true}
+
+ var a [2]byte
+ a[0] = reverseBitsInAByte(byte(ku))
+ a[1] = reverseBitsInAByte(byte(ku >> 8))
+
+ l := 1
+ if a[1] != 0 {
+ l = 2
+ }
+
+ bitString := a[:l]
+ var err error
+ ext.Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)})
+ return ext, err
+}
+
+func marshalExtKeyUsage(extUsages []ExtKeyUsage, unknownUsages []asn1.ObjectIdentifier) (pkix.Extension, error) {
+ ext := pkix.Extension{Id: oidExtensionExtendedKeyUsage}
+
+ oids := make([]asn1.ObjectIdentifier, len(extUsages)+len(unknownUsages))
+ for i, u := range extUsages {
+ if oid, ok := oidFromExtKeyUsage(u); ok {
+ oids[i] = oid
+ } else {
+ return ext, errors.New("x509: unknown extended key usage")
+ }
+ }
+
+ copy(oids[len(extUsages):], unknownUsages)
+
+ var err error
+ ext.Value, err = asn1.Marshal(oids)
+ return ext, err
+}
+
+func marshalBasicConstraints(isCA bool, maxPathLen int, maxPathLenZero bool) (pkix.Extension, error) {
+ ext := pkix.Extension{Id: oidExtensionBasicConstraints, Critical: true}
+ // Leaving MaxPathLen as zero indicates that no maximum path
+ // length is desired, unless MaxPathLenZero is set. A value of
+ // -1 causes encoding/asn1 to omit the value as desired.
+ if maxPathLen == 0 && !maxPathLenZero {
+ maxPathLen = -1
+ }
+ var err error
+ ext.Value, err = asn1.Marshal(basicConstraints{isCA, maxPathLen})
+ return ext, err
+}
+
+func marshalCertificatePolicies(policyIdentifiers []asn1.ObjectIdentifier) (pkix.Extension, error) {
+ ext := pkix.Extension{Id: oidExtensionCertificatePolicies}
+ policies := make([]policyInformation, len(policyIdentifiers))
+ for i, policy := range policyIdentifiers {
+ policies[i].Policy = policy
+ }
+ var err error
+ ext.Value, err = asn1.Marshal(policies)
+ return ext, err
+}
+
+func buildCSRExtensions(template *CertificateRequest) ([]pkix.Extension, error) {
+ var ret []pkix.Extension
+
+ if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) &&
+ !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) {
+ sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs)
+ if err != nil {
+ return nil, err
+ }
+
+ ret = append(ret, pkix.Extension{
+ Id: oidExtensionSubjectAltName,
+ Value: sanBytes,
+ })
+ }
+
+ return append(ret, template.ExtraExtensions...), nil
+}
+
+func subjectBytes(cert *Certificate) ([]byte, error) {
+ if len(cert.RawSubject) > 0 {
+ return cert.RawSubject, nil
+ }
+
+ return asn1.Marshal(cert.Subject.ToRDNSequence())
+}
+
+// signingParamsForPublicKey returns the parameters to use for signing with
+// priv. If requestedSigAlgo is not zero then it overrides the default
+// signature algorithm.
+func signingParamsForPublicKey(pub any, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+ var pubType PublicKeyAlgorithm
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ pubType = RSA
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+ sigAlgo.Parameters = asn1.NullRawValue
+
+ case *ecdsa.PublicKey:
+ pubType = ECDSA
+
+ switch pub.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ err = errors.New("x509: unknown elliptic curve")
+ }
+
+ case ed25519.PublicKey:
+ pubType = Ed25519
+ sigAlgo.Algorithm = oidSignatureEd25519
+
+ default:
+ err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if requestedSigAlgo == 0 {
+ return
+ }
+
+ found := false
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == requestedSigAlgo {
+ if details.pubKeyAlgo != pubType {
+ err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+ return
+ }
+ sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+ if hashFunc == 0 && pubType != Ed25519 {
+ err = errors.New("x509: cannot sign with hash function requested")
+ return
+ }
+ if hashFunc == crypto.MD5 {
+ err = errors.New("x509: signing with MD5 is not supported")
+ return
+ }
+ if requestedSigAlgo.isRSAPSS() {
+ sigAlgo.Parameters = hashToPSSParameters[hashFunc]
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ err = errors.New("x509: unknown SignatureAlgorithm")
+ }
+
+ return
+}
+
+// emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is
+// just an empty SEQUENCE.
+var emptyASN1Subject = []byte{0x30, 0}
+
+// CreateCertificate creates a new X.509 v3 certificate based on a template.
+// The following members of template are currently used:
+//
+// - AuthorityKeyId
+// - BasicConstraintsValid
+// - CRLDistributionPoints
+// - DNSNames
+// - EmailAddresses
+// - ExcludedDNSDomains
+// - ExcludedEmailAddresses
+// - ExcludedIPRanges
+// - ExcludedURIDomains
+// - ExtKeyUsage
+// - ExtraExtensions
+// - IPAddresses
+// - IsCA
+// - IssuingCertificateURL
+// - KeyUsage
+// - MaxPathLen
+// - MaxPathLenZero
+// - NotAfter
+// - NotBefore
+// - OCSPServer
+// - PermittedDNSDomains
+// - PermittedDNSDomainsCritical
+// - PermittedEmailAddresses
+// - PermittedIPRanges
+// - PermittedURIDomains
+// - PolicyIdentifiers
+// - SerialNumber
+// - SignatureAlgorithm
+// - Subject
+// - SubjectKeyId
+// - URIs
+// - UnknownExtKeyUsage
+//
+// The certificate is signed by parent. If parent is equal to template then the
+// certificate is self-signed. The parameter pub is the public key of the
+// certificate to be generated and priv is the private key of the signer.
+//
+// The returned slice is the certificate in DER encoding.
+//
+// The currently supported key types are *rsa.PublicKey, *ecdsa.PublicKey and
+// ed25519.PublicKey. pub must be a supported key type, and priv must be a
+// crypto.Signer with a supported public key.
+//
+// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any,
+// unless the resulting certificate is self-signed. Otherwise the value from
+// template will be used.
+//
+// If SubjectKeyId from template is empty and the template is a CA, SubjectKeyId
+// will be generated from the hash of the public key.
+func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv any) ([]byte, error) {
+ key, ok := priv.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
+ }
+
+ if template.SerialNumber == nil {
+ return nil, errors.New("x509: no SerialNumber given")
+ }
+
+ // RFC 5280 Section 4.1.2.2: serial number must positive
+ //
+ // We _should_ also restrict serials to <= 20 octets, but it turns out a lot of people
+ // get this wrong, in part because the encoding can itself alter the length of the
+ // serial. For now we accept these non-conformant serials.
+ if template.SerialNumber.Sign() == -1 {
+ return nil, errors.New("x509: serial number must be positive")
+ }
+
+ if template.BasicConstraintsValid && !template.IsCA && template.MaxPathLen != -1 && (template.MaxPathLen != 0 || template.MaxPathLenZero) {
+ return nil, errors.New("x509: only CAs are allowed to specify MaxPathLen")
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+
+ asn1Issuer, err := subjectBytes(parent)
+ if err != nil {
+ return nil, err
+ }
+
+ asn1Subject, err := subjectBytes(template)
+ if err != nil {
+ return nil, err
+ }
+
+ authorityKeyId := template.AuthorityKeyId
+ if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 {
+ authorityKeyId = parent.SubjectKeyId
+ }
+
+ subjectKeyId := template.SubjectKeyId
+ if len(subjectKeyId) == 0 && template.IsCA {
+ // SubjectKeyId generated using method 1 in RFC 5280, Section 4.2.1.2:
+ // (1) The keyIdentifier is composed of the 160-bit SHA-1 hash of the
+ // value of the BIT STRING subjectPublicKey (excluding the tag,
+ // length, and number of unused bits).
+ h := sha1.Sum(publicKeyBytes)
+ subjectKeyId = h[:]
+ }
+
+ // Check that the signer's public key matches the private key, if available.
+ type privateKey interface {
+ Equal(crypto.PublicKey) bool
+ }
+ if privPub, ok := key.Public().(privateKey); !ok {
+ return nil, errors.New("x509: internal error: supported public key does not implement Equal")
+ } else if parent.PublicKey != nil && !privPub.Equal(parent.PublicKey) {
+ return nil, errors.New("x509: provided PrivateKey doesn't match parent's PublicKey")
+ }
+
+ extensions, err := buildCertExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId, subjectKeyId)
+ if err != nil {
+ return nil, err
+ }
+
+ encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes}
+ c := tbsCertificate{
+ Version: 2,
+ SerialNumber: template.SerialNumber,
+ SignatureAlgorithm: signatureAlgorithm,
+ Issuer: asn1.RawValue{FullBytes: asn1Issuer},
+ Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()},
+ Subject: asn1.RawValue{FullBytes: asn1Subject},
+ PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey},
+ Extensions: extensions,
+ }
+
+ tbsCertContents, err := asn1.Marshal(c)
+ if err != nil {
+ return nil, err
+ }
+ c.Raw = tbsCertContents
+
+ signed := tbsCertContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ var signerOpts crypto.SignerOpts = hashFunc
+ if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() {
+ signerOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ Hash: hashFunc,
+ }
+ }
+
+ var signature []byte
+ signature, err = key.Sign(rand, signed, signerOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ signedCert, err := asn1.Marshal(certificate{
+ nil,
+ c,
+ signatureAlgorithm,
+ asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Check the signature to ensure the crypto.Signer behaved correctly.
+ if err := checkSignature(getSignatureAlgorithmFromAI(signatureAlgorithm), c.Raw, signature, key.Public(), true); err != nil {
+ return nil, fmt.Errorf("x509: signature over certificate returned by signer is invalid: %w", err)
+ }
+
+ return signedCert, nil
+}
+
+// pemCRLPrefix is the magic string that indicates that we have a PEM encoded
+// CRL.
+var pemCRLPrefix = []byte("-----BEGIN X509 CRL")
+
+// pemType is the type of a PEM encoded CRL.
+var pemType = "X509 CRL"
+
+// ParseCRL parses a CRL from the given bytes. It's often the case that PEM
+// encoded CRLs will appear where they should be DER encoded, so this function
+// will transparently handle PEM encoding as long as there isn't any leading
+// garbage.
+//
+// Deprecated: Use ParseRevocationList instead.
+func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) {
+ if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
+ block, _ := pem.Decode(crlBytes)
+ if block != nil && block.Type == pemType {
+ crlBytes = block.Bytes
+ }
+ }
+ return ParseDERCRL(crlBytes)
+}
+
+// ParseDERCRL parses a DER encoded CRL from the given bytes.
+//
+// Deprecated: Use ParseRevocationList instead.
+func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) {
+ certList := new(pkix.CertificateList)
+ if rest, err := asn1.Unmarshal(derBytes, certList); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after CRL")
+ }
+ return certList, nil
+}
+
+// CreateCRL returns a DER encoded CRL, signed by this Certificate, that
+// contains the given list of revoked certificates.
+//
+// Deprecated: this method does not generate an RFC 5280 conformant X.509 v2 CRL.
+// To generate a standards compliant CRL, use CreateRevocationList instead.
+func (c *Certificate) CreateCRL(rand io.Reader, priv any, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) {
+ key, ok := priv.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // Force revocation times to UTC per RFC 5280.
+ revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts))
+ for i, rc := range revokedCerts {
+ rc.RevocationTime = rc.RevocationTime.UTC()
+ revokedCertsUTC[i] = rc
+ }
+
+ tbsCertList := pkix.TBSCertificateList{
+ Version: 1,
+ Signature: signatureAlgorithm,
+ Issuer: c.Subject.ToRDNSequence(),
+ ThisUpdate: now.UTC(),
+ NextUpdate: expiry.UTC(),
+ RevokedCertificates: revokedCertsUTC,
+ }
+
+ // Authority Key Id
+ if len(c.SubjectKeyId) > 0 {
+ var aki pkix.Extension
+ aki.Id = oidExtensionAuthorityKeyId
+ aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId})
+ if err != nil {
+ return
+ }
+ tbsCertList.Extensions = append(tbsCertList.Extensions, aki)
+ }
+
+ tbsCertListContents, err := asn1.Marshal(tbsCertList)
+ if err != nil {
+ return
+ }
+
+ signed := tbsCertListContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ var signature []byte
+ signature, err = key.Sign(rand, signed, hashFunc)
+ if err != nil {
+ return
+ }
+
+ return asn1.Marshal(pkix.CertificateList{
+ TBSCertList: tbsCertList,
+ SignatureAlgorithm: signatureAlgorithm,
+ SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+}
+
+// CertificateRequest represents a PKCS #10, certificate signature request.
+type CertificateRequest struct {
+ Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature).
+ RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content.
+ RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo.
+ RawSubject []byte // DER encoded Subject.
+
+ Version int
+ Signature []byte
+ SignatureAlgorithm SignatureAlgorithm
+
+ PublicKeyAlgorithm PublicKeyAlgorithm
+ PublicKey any
+
+ Subject pkix.Name
+
+ // Attributes contains the CSR attributes that can parse as
+ // pkix.AttributeTypeAndValueSET.
+ //
+ // Deprecated: Use Extensions and ExtraExtensions instead for parsing and
+ // generating the requestedExtensions attribute.
+ Attributes []pkix.AttributeTypeAndValueSET
+
+ // Extensions contains all requested extensions, in raw form. When parsing
+ // CSRs, this can be used to extract extensions that are not parsed by this
+ // package.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any CSR
+ // marshaled by CreateCertificateRequest. Values override any extensions
+ // that would otherwise be produced based on the other fields but are
+ // overridden by any extensions specified in Attributes.
+ //
+ // The ExtraExtensions field is not populated by ParseCertificateRequest,
+ // see Extensions instead.
+ ExtraExtensions []pkix.Extension
+
+ // Subject Alternate Name values.
+ DNSNames []string
+ EmailAddresses []string
+ IPAddresses []net.IP
+ URIs []*url.URL
+}
+
+// These structures reflect the ASN.1 structure of X.509 certificate
+// signature requests (see RFC 2986):
+
+type tbsCertificateRequest struct {
+ Raw asn1.RawContent
+ Version int
+ Subject asn1.RawValue
+ PublicKey publicKeyInfo
+ RawAttributes []asn1.RawValue `asn1:"tag:0"`
+}
+
+type certificateRequest struct {
+ Raw asn1.RawContent
+ TBSCSR tbsCertificateRequest
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ SignatureValue asn1.BitString
+}
+
+// oidExtensionRequest is a PKCS #9 OBJECT IDENTIFIER that indicates requested
+// extensions in a CSR.
+var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14}
+
+// newRawAttributes converts AttributeTypeAndValueSETs from a template
+// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes.
+func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) {
+ var rawAttributes []asn1.RawValue
+ b, err := asn1.Marshal(attributes)
+ if err != nil {
+ return nil, err
+ }
+ rest, err := asn1.Unmarshal(b, &rawAttributes)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) != 0 {
+ return nil, errors.New("x509: failed to unmarshal raw CSR Attributes")
+ }
+ return rawAttributes, nil
+}
+
+// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs.
+func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET {
+ var attributes []pkix.AttributeTypeAndValueSET
+ for _, rawAttr := range rawAttributes {
+ var attr pkix.AttributeTypeAndValueSET
+ rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr)
+ // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET
+ // (i.e.: challengePassword or unstructuredName).
+ if err == nil && len(rest) == 0 {
+ attributes = append(attributes, attr)
+ }
+ }
+ return attributes
+}
+
+// parseCSRExtensions parses the attributes from a CSR and extracts any
+// requested extensions.
+func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) {
+ // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1.
+ type pkcs10Attribute struct {
+ Id asn1.ObjectIdentifier
+ Values []asn1.RawValue `asn1:"set"`
+ }
+
+ var ret []pkix.Extension
+ seenExts := make(map[string]bool)
+ for _, rawAttr := range rawAttributes {
+ var attr pkcs10Attribute
+ if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 {
+ // Ignore attributes that don't parse.
+ continue
+ }
+ oidStr := attr.Id.String()
+ if seenExts[oidStr] {
+ return nil, errors.New("x509: certificate request contains duplicate extensions")
+ }
+ seenExts[oidStr] = true
+
+ if !attr.Id.Equal(oidExtensionRequest) {
+ continue
+ }
+
+ var extensions []pkix.Extension
+ if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil {
+ return nil, err
+ }
+ requestedExts := make(map[string]bool)
+ for _, ext := range extensions {
+ oidStr := ext.Id.String()
+ if requestedExts[oidStr] {
+ return nil, errors.New("x509: certificate request contains duplicate requested extensions")
+ }
+ requestedExts[oidStr] = true
+ }
+ ret = append(ret, extensions...)
+ }
+
+ return ret, nil
+}
+
+// CreateCertificateRequest creates a new certificate request based on a
+// template. The following members of template are used:
+//
+// - SignatureAlgorithm
+// - Subject
+// - DNSNames
+// - EmailAddresses
+// - IPAddresses
+// - URIs
+// - ExtraExtensions
+// - Attributes (deprecated)
+//
+// priv is the private key to sign the CSR with, and the corresponding public
+// key will be included in the CSR. It must implement crypto.Signer and its
+// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey or a
+// ed25519.PublicKey. (A *rsa.PrivateKey, *ecdsa.PrivateKey or
+// ed25519.PrivateKey satisfies this.)
+//
+// The returned slice is the certificate request in DER encoding.
+func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error) {
+ key, ok := priv.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("x509: certificate private key does not implement crypto.Signer")
+ }
+
+ var hashFunc crypto.Hash
+ var sigAlgo pkix.AlgorithmIdentifier
+ hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ var publicKeyBytes []byte
+ var publicKeyAlgorithm pkix.AlgorithmIdentifier
+ publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public())
+ if err != nil {
+ return nil, err
+ }
+
+ extensions, err := buildCSRExtensions(template)
+ if err != nil {
+ return nil, err
+ }
+
+ // Make a copy of template.Attributes because we may alter it below.
+ attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes))
+ for _, attr := range template.Attributes {
+ values := make([][]pkix.AttributeTypeAndValue, len(attr.Value))
+ copy(values, attr.Value)
+ attributes = append(attributes, pkix.AttributeTypeAndValueSET{
+ Type: attr.Type,
+ Value: values,
+ })
+ }
+
+ extensionsAppended := false
+ if len(extensions) > 0 {
+ // Append the extensions to an existing attribute if possible.
+ for _, atvSet := range attributes {
+ if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 {
+ continue
+ }
+
+ // specifiedExtensions contains all the extensions that we
+ // found specified via template.Attributes.
+ specifiedExtensions := make(map[string]bool)
+
+ for _, atvs := range atvSet.Value {
+ for _, atv := range atvs {
+ specifiedExtensions[atv.Type.String()] = true
+ }
+ }
+
+ newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions))
+ newValue = append(newValue, atvSet.Value[0]...)
+
+ for _, e := range extensions {
+ if specifiedExtensions[e.Id.String()] {
+ // Attributes already contained a value for
+ // this extension and it takes priority.
+ continue
+ }
+
+ newValue = append(newValue, pkix.AttributeTypeAndValue{
+ // There is no place for the critical
+ // flag in an AttributeTypeAndValue.
+ Type: e.Id,
+ Value: e.Value,
+ })
+ }
+
+ atvSet.Value[0] = newValue
+ extensionsAppended = true
+ break
+ }
+ }
+
+ rawAttributes, err := newRawAttributes(attributes)
+ if err != nil {
+ return
+ }
+
+ // If not included in attributes, add a new attribute for the
+ // extensions.
+ if len(extensions) > 0 && !extensionsAppended {
+ attr := struct {
+ Type asn1.ObjectIdentifier
+ Value [][]pkix.Extension `asn1:"set"`
+ }{
+ Type: oidExtensionRequest,
+ Value: [][]pkix.Extension{extensions},
+ }
+
+ b, err := asn1.Marshal(attr)
+ if err != nil {
+ return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error())
+ }
+
+ var rawValue asn1.RawValue
+ if _, err := asn1.Unmarshal(b, &rawValue); err != nil {
+ return nil, err
+ }
+
+ rawAttributes = append(rawAttributes, rawValue)
+ }
+
+ asn1Subject := template.RawSubject
+ if len(asn1Subject) == 0 {
+ asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ tbsCSR := tbsCertificateRequest{
+ Version: 0, // PKCS #10, RFC 2986
+ Subject: asn1.RawValue{FullBytes: asn1Subject},
+ PublicKey: publicKeyInfo{
+ Algorithm: publicKeyAlgorithm,
+ PublicKey: asn1.BitString{
+ Bytes: publicKeyBytes,
+ BitLength: len(publicKeyBytes) * 8,
+ },
+ },
+ RawAttributes: rawAttributes,
+ }
+
+ tbsCSRContents, err := asn1.Marshal(tbsCSR)
+ if err != nil {
+ return
+ }
+ tbsCSR.Raw = tbsCSRContents
+
+ signed := tbsCSRContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(signed)
+ signed = h.Sum(nil)
+ }
+
+ var signature []byte
+ signature, err = key.Sign(rand, signed, hashFunc)
+ if err != nil {
+ return
+ }
+
+ return asn1.Marshal(certificateRequest{
+ TBSCSR: tbsCSR,
+ SignatureAlgorithm: sigAlgo,
+ SignatureValue: asn1.BitString{
+ Bytes: signature,
+ BitLength: len(signature) * 8,
+ },
+ })
+}
+
+// ParseCertificateRequest parses a single certificate request from the
+// given ASN.1 DER data.
+func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) {
+ var csr certificateRequest
+
+ rest, err := asn1.Unmarshal(asn1Data, &csr)
+ if err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
+ }
+
+ return parseCertificateRequest(&csr)
+}
+
+func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) {
+ out := &CertificateRequest{
+ Raw: in.Raw,
+ RawTBSCertificateRequest: in.TBSCSR.Raw,
+ RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw,
+ RawSubject: in.TBSCSR.Subject.FullBytes,
+
+ Signature: in.SignatureValue.RightAlign(),
+ SignatureAlgorithm: getSignatureAlgorithmFromAI(in.SignatureAlgorithm),
+
+ PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm),
+
+ Version: in.TBSCSR.Version,
+ Attributes: parseRawAttributes(in.TBSCSR.RawAttributes),
+ }
+
+ var err error
+ out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var subject pkix.RDNSequence
+ if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil {
+ return nil, err
+ } else if len(rest) != 0 {
+ return nil, errors.New("x509: trailing data after X.509 Subject")
+ }
+
+ out.Subject.FillFromRDNSequence(&subject)
+
+ if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil {
+ return nil, err
+ }
+
+ for _, extension := range out.Extensions {
+ switch {
+ case extension.Id.Equal(oidExtensionSubjectAltName):
+ out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return out, nil
+}
+
+// CheckSignature reports whether the signature on c is valid.
+func (c *CertificateRequest) CheckSignature() error {
+ return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey, true)
+}
+
+// RevocationList contains the fields used to create an X.509 v2 Certificate
+// Revocation list with CreateRevocationList.
+type RevocationList struct {
+ // Raw contains the complete ASN.1 DER content of the CRL (tbsCertList,
+ // signatureAlgorithm, and signatureValue.)
+ Raw []byte
+ // RawTBSRevocationList contains just the tbsCertList portion of the ASN.1
+ // DER.
+ RawTBSRevocationList []byte
+ // RawIssuer contains the DER encoded Issuer.
+ RawIssuer []byte
+
+ // Issuer contains the DN of the issuing certificate.
+ Issuer pkix.Name
+ // AuthorityKeyId is used to identify the public key associated with the
+ // issuing certificate. It is populated from the authorityKeyIdentifier
+ // extension when parsing a CRL. It is ignored when creating a CRL; the
+ // extension is populated from the issuing certificate itself.
+ AuthorityKeyId []byte
+
+ Signature []byte
+ // SignatureAlgorithm is used to determine the signature algorithm to be
+ // used when signing the CRL. If 0 the default algorithm for the signing
+ // key will be used.
+ SignatureAlgorithm SignatureAlgorithm
+
+ // RevokedCertificates is used to populate the revokedCertificates
+ // sequence in the CRL, it may be empty. RevokedCertificates may be nil,
+ // in which case an empty CRL will be created.
+ RevokedCertificates []pkix.RevokedCertificate
+
+ // Number is used to populate the X.509 v2 cRLNumber extension in the CRL,
+ // which should be a monotonically increasing sequence number for a given
+ // CRL scope and CRL issuer. It is also populated from the cRLNumber
+ // extension when parsing a CRL.
+ Number *big.Int
+
+ // ThisUpdate is used to populate the thisUpdate field in the CRL, which
+ // indicates the issuance date of the CRL.
+ ThisUpdate time.Time
+ // NextUpdate is used to populate the nextUpdate field in the CRL, which
+ // indicates the date by which the next CRL will be issued. NextUpdate
+ // must be greater than ThisUpdate.
+ NextUpdate time.Time
+
+ // Extensions contains raw X.509 extensions. When creating a CRL,
+ // the Extensions field is ignored, see ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains any additional extensions to add directly to
+ // the CRL.
+ ExtraExtensions []pkix.Extension
+}
+
+// CreateRevocationList creates a new X.509 v2 Certificate Revocation List,
+// according to RFC 5280, based on template.
+//
+// The CRL is signed by priv which should be the private key associated with
+// the public key in the issuer certificate.
+//
+// The issuer may not be nil, and the crlSign bit must be set in KeyUsage in
+// order to use it as a CRL issuer.
+//
+// The issuer distinguished name CRL field and authority key identifier
+// extension are populated using the issuer certificate. issuer must have
+// SubjectKeyId set.
+func CreateRevocationList(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error) {
+ if template == nil {
+ return nil, errors.New("x509: template can not be nil")
+ }
+ if issuer == nil {
+ return nil, errors.New("x509: issuer can not be nil")
+ }
+ if (issuer.KeyUsage & KeyUsageCRLSign) == 0 {
+ return nil, errors.New("x509: issuer must have the crlSign key usage bit set")
+ }
+ if len(issuer.SubjectKeyId) == 0 {
+ return nil, errors.New("x509: issuer certificate doesn't contain a subject key identifier")
+ }
+ if template.NextUpdate.Before(template.ThisUpdate) {
+ return nil, errors.New("x509: template.ThisUpdate is after template.NextUpdate")
+ }
+ if template.Number == nil {
+ return nil, errors.New("x509: template contains nil Number field")
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ // Force revocation times to UTC per RFC 5280.
+ revokedCertsUTC := make([]pkix.RevokedCertificate, len(template.RevokedCertificates))
+ for i, rc := range template.RevokedCertificates {
+ rc.RevocationTime = rc.RevocationTime.UTC()
+ revokedCertsUTC[i] = rc
+ }
+
+ aki, err := asn1.Marshal(authKeyId{Id: issuer.SubjectKeyId})
+ if err != nil {
+ return nil, err
+ }
+
+ if numBytes := template.Number.Bytes(); len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) {
+ return nil, errors.New("x509: CRL number exceeds 20 octets")
+ }
+ crlNum, err := asn1.Marshal(template.Number)
+ if err != nil {
+ return nil, err
+ }
+
+ tbsCertList := pkix.TBSCertificateList{
+ Version: 1, // v2
+ Signature: signatureAlgorithm,
+ Issuer: issuer.Subject.ToRDNSequence(),
+ ThisUpdate: template.ThisUpdate.UTC(),
+ NextUpdate: template.NextUpdate.UTC(),
+ Extensions: []pkix.Extension{
+ {
+ Id: oidExtensionAuthorityKeyId,
+ Value: aki,
+ },
+ {
+ Id: oidExtensionCRLNumber,
+ Value: crlNum,
+ },
+ },
+ }
+ if len(revokedCertsUTC) > 0 {
+ tbsCertList.RevokedCertificates = revokedCertsUTC
+ }
+
+ if len(template.ExtraExtensions) > 0 {
+ tbsCertList.Extensions = append(tbsCertList.Extensions, template.ExtraExtensions...)
+ }
+
+ tbsCertListContents, err := asn1.Marshal(tbsCertList)
+ if err != nil {
+ return nil, err
+ }
+
+ input := tbsCertListContents
+ if hashFunc != 0 {
+ h := hashFunc.New()
+ h.Write(tbsCertListContents)
+ input = h.Sum(nil)
+ }
+ var signerOpts crypto.SignerOpts = hashFunc
+ if template.SignatureAlgorithm.isRSAPSS() {
+ signerOpts = &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ Hash: hashFunc,
+ }
+ }
+
+ signature, err := priv.Sign(rand, input, signerOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ return asn1.Marshal(pkix.CertificateList{
+ TBSCertList: tbsCertList,
+ SignatureAlgorithm: signatureAlgorithm,
+ SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8},
+ })
+}
+
+// CheckSignatureFrom verifies that the signature on rl is a valid signature
+// from issuer.
+func (rl *RevocationList) CheckSignatureFrom(parent *Certificate) error {
+ if parent.Version == 3 && !parent.BasicConstraintsValid ||
+ parent.BasicConstraintsValid && !parent.IsCA {
+ return ConstraintViolationError{}
+ }
+
+ if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCRLSign == 0 {
+ return ConstraintViolationError{}
+ }
+
+ if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm {
+ return ErrUnsupportedAlgorithm
+ }
+
+ return parent.CheckSignature(rl.SignatureAlgorithm, rl.RawTBSRevocationList, rl.Signature)
+}
diff --git a/contrib/go/_std_1.19/src/embed/embed.go b/contrib/go/_std_1.19/src/embed/embed.go
new file mode 100644
index 0000000000..c54b961d15
--- /dev/null
+++ b/contrib/go/_std_1.19/src/embed/embed.go
@@ -0,0 +1,432 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package embed provides access to files embedded in the running Go program.
+//
+// Go source files that import "embed" can use the //go:embed directive
+// to initialize a variable of type string, []byte, or FS with the contents of
+// files read from the package directory or subdirectories at compile time.
+//
+// For example, here are three ways to embed a file named hello.txt
+// and then print its contents at run time.
+//
+// Embedding one file into a string:
+//
+// import _ "embed"
+//
+// //go:embed hello.txt
+// var s string
+// print(s)
+//
+// Embedding one file into a slice of bytes:
+//
+// import _ "embed"
+//
+// //go:embed hello.txt
+// var b []byte
+// print(string(b))
+//
+// Embedded one or more files into a file system:
+//
+// import "embed"
+//
+// //go:embed hello.txt
+// var f embed.FS
+// data, _ := f.ReadFile("hello.txt")
+// print(string(data))
+//
+// # Directives
+//
+// A //go:embed directive above a variable declaration specifies which files to embed,
+// using one or more path.Match patterns.
+//
+// The directive must immediately precede a line containing the declaration of a single variable.
+// Only blank lines and ‘//’ line comments are permitted between the directive and the declaration.
+//
+// The type of the variable must be a string type, or a slice of a byte type,
+// or FS (or an alias of FS).
+//
+// For example:
+//
+// package server
+//
+// import "embed"
+//
+// // content holds our static web server content.
+// //go:embed image/* template/*
+// //go:embed html/index.html
+// var content embed.FS
+//
+// The Go build system will recognize the directives and arrange for the declared variable
+// (in the example above, content) to be populated with the matching files from the file system.
+//
+// The //go:embed directive accepts multiple space-separated patterns for
+// brevity, but it can also be repeated, to avoid very long lines when there are
+// many patterns. The patterns are interpreted relative to the package directory
+// containing the source file. The path separator is a forward slash, even on
+// Windows systems. Patterns may not contain ‘.’ or ‘..’ or empty path elements,
+// nor may they begin or end with a slash. To match everything in the current
+// directory, use ‘*’ instead of ‘.’. To allow for naming files with spaces in
+// their names, patterns can be written as Go double-quoted or back-quoted
+// string literals.
+//
+// If a pattern names a directory, all files in the subtree rooted at that directory are
+// embedded (recursively), except that files with names beginning with ‘.’ or ‘_’
+// are excluded. So the variable in the above example is almost equivalent to:
+//
+// // content is our static web server content.
+// //go:embed image template html/index.html
+// var content embed.FS
+//
+// The difference is that ‘image/*’ embeds ‘image/.tempfile’ while ‘image’ does not.
+// Neither embeds ‘image/dir/.tempfile’.
+//
+// If a pattern begins with the prefix ‘all:’, then the rule for walking directories is changed
+// to include those files beginning with ‘.’ or ‘_’. For example, ‘all:image’ embeds
+// both ‘image/.tempfile’ and ‘image/dir/.tempfile’.
+//
+// The //go:embed directive can be used with both exported and unexported variables,
+// depending on whether the package wants to make the data available to other packages.
+// It can only be used with variables at package scope, not with local variables.
+//
+// Patterns must not match files outside the package's module, such as ‘.git/*’ or symbolic links.
+// Patterns must not match files whose names include the special punctuation characters " * < > ? ` ' | / \ and :.
+// Matches for empty directories are ignored. After that, each pattern in a //go:embed line
+// must match at least one file or non-empty directory.
+//
+// If any patterns are invalid or have invalid matches, the build will fail.
+//
+// # Strings and Bytes
+//
+// The //go:embed line for a variable of type string or []byte can have only a single pattern,
+// and that pattern can match only a single file. The string or []byte is initialized with
+// the contents of that file.
+//
+// The //go:embed directive requires importing "embed", even when using a string or []byte.
+// In source files that don't refer to embed.FS, use a blank import (import _ "embed").
+//
+// # File Systems
+//
+// For embedding a single file, a variable of type string or []byte is often best.
+// The FS type enables embedding a tree of files, such as a directory of static
+// web server content, as in the example above.
+//
+// FS implements the io/fs package's FS interface, so it can be used with any package that
+// understands file systems, including net/http, text/template, and html/template.
+//
+// For example, given the content variable in the example above, we can write:
+//
+// http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.FS(content))))
+//
+// template.ParseFS(content, "*.tmpl")
+//
+// # Tools
+//
+// To support tools that analyze Go packages, the patterns found in //go:embed lines
+// are available in “go list” output. See the EmbedPatterns, TestEmbedPatterns,
+// and XTestEmbedPatterns fields in the “go help list” output.
+package embed
+
+import (
+ "errors"
+ "io"
+ "io/fs"
+ "time"
+)
+
+// An FS is a read-only collection of files, usually initialized with a //go:embed directive.
+// When declared without a //go:embed directive, an FS is an empty file system.
+//
+// An FS is a read-only value, so it is safe to use from multiple goroutines
+// simultaneously and also safe to assign values of type FS to each other.
+//
+// FS implements fs.FS, so it can be used with any package that understands
+// file system interfaces, including net/http, text/template, and html/template.
+//
+// See the package documentation for more details about initializing an FS.
+type FS struct {
+ // The compiler knows the layout of this struct.
+ // See cmd/compile/internal/staticdata's WriteEmbed.
+ //
+ // The files list is sorted by name but not by simple string comparison.
+ // Instead, each file's name takes the form "dir/elem" or "dir/elem/".
+ // The optional trailing slash indicates that the file is itself a directory.
+ // The files list is sorted first by dir (if dir is missing, it is taken to be ".")
+ // and then by base, so this list of files:
+ //
+ // p
+ // q/
+ // q/r
+ // q/s/
+ // q/s/t
+ // q/s/u
+ // q/v
+ // w
+ //
+ // is actually sorted as:
+ //
+ // p # dir=. elem=p
+ // q/ # dir=. elem=q
+ // w/ # dir=. elem=w
+ // q/r # dir=q elem=r
+ // q/s/ # dir=q elem=s
+ // q/v # dir=q elem=v
+ // q/s/t # dir=q/s elem=t
+ // q/s/u # dir=q/s elem=u
+ //
+ // This order brings directory contents together in contiguous sections
+ // of the list, allowing a directory read to use binary search to find
+ // the relevant sequence of entries.
+ files *[]file
+}
+
+// split splits the name into dir and elem as described in the
+// comment in the FS struct above. isDir reports whether the
+// final trailing slash was present, indicating that name is a directory.
+func split(name string) (dir, elem string, isDir bool) {
+ if name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+// trimSlash trims a trailing slash from name, if present,
+// returning the possibly shortened name.
+func trimSlash(name string) string {
+ if len(name) > 0 && name[len(name)-1] == '/' {
+ return name[:len(name)-1]
+ }
+ return name
+}
+
+var (
+ _ fs.ReadDirFS = FS{}
+ _ fs.ReadFileFS = FS{}
+)
+
+// A file is a single file in the FS.
+// It implements fs.FileInfo and fs.DirEntry.
+type file struct {
+ // The compiler knows the layout of this struct.
+ // See cmd/compile/internal/staticdata's WriteEmbed.
+ name string
+ data string
+ hash [16]byte // truncated SHA256 hash
+}
+
+var (
+ _ fs.FileInfo = (*file)(nil)
+ _ fs.DirEntry = (*file)(nil)
+)
+
+func (f *file) Name() string { _, elem, _ := split(f.name); return elem }
+func (f *file) Size() int64 { return int64(len(f.data)) }
+func (f *file) ModTime() time.Time { return time.Time{} }
+func (f *file) IsDir() bool { _, _, isDir := split(f.name); return isDir }
+func (f *file) Sys() any { return nil }
+func (f *file) Type() fs.FileMode { return f.Mode().Type() }
+func (f *file) Info() (fs.FileInfo, error) { return f, nil }
+
+func (f *file) Mode() fs.FileMode {
+ if f.IsDir() {
+ return fs.ModeDir | 0555
+ }
+ return 0444
+}
+
+// dotFile is a file for the root directory,
+// which is omitted from the files list in a FS.
+var dotFile = &file{name: "./"}
+
+// lookup returns the named file, or nil if it is not present.
+func (f FS) lookup(name string) *file {
+ if !fs.ValidPath(name) {
+ // The compiler should never emit a file with an invalid name,
+ // so this check is not strictly necessary (if name is invalid,
+ // we shouldn't find a match below), but it's a good backstop anyway.
+ return nil
+ }
+ if name == "." {
+ return dotFile
+ }
+ if f.files == nil {
+ return nil
+ }
+
+ // Binary search to find where name would be in the list,
+ // and then check if name is at that position.
+ dir, elem, _ := split(name)
+ files := *f.files
+ i := sortSearch(len(files), func(i int) bool {
+ idir, ielem, _ := split(files[i].name)
+ return idir > dir || idir == dir && ielem >= elem
+ })
+ if i < len(files) && trimSlash(files[i].name) == name {
+ return &files[i]
+ }
+ return nil
+}
+
+// readDir returns the list of files corresponding to the directory dir.
+func (f FS) readDir(dir string) []file {
+ if f.files == nil {
+ return nil
+ }
+ // Binary search to find where dir starts and ends in the list
+ // and then return that slice of the list.
+ files := *f.files
+ i := sortSearch(len(files), func(i int) bool {
+ idir, _, _ := split(files[i].name)
+ return idir >= dir
+ })
+ j := sortSearch(len(files), func(j int) bool {
+ jdir, _, _ := split(files[j].name)
+ return jdir > dir
+ })
+ return files[i:j]
+}
+
+// Open opens the named file for reading and returns it as an fs.File.
+//
+// The returned file implements io.Seeker when the file is not a directory.
+func (f FS) Open(name string) (fs.File, error) {
+ file := f.lookup(name)
+ if file == nil {
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
+ }
+ if file.IsDir() {
+ return &openDir{file, f.readDir(name), 0}, nil
+ }
+ return &openFile{file, 0}, nil
+}
+
+// ReadDir reads and returns the entire named directory.
+func (f FS) ReadDir(name string) ([]fs.DirEntry, error) {
+ file, err := f.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ dir, ok := file.(*openDir)
+ if !ok {
+ return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("not a directory")}
+ }
+ list := make([]fs.DirEntry, len(dir.files))
+ for i := range list {
+ list[i] = &dir.files[i]
+ }
+ return list, nil
+}
+
+// ReadFile reads and returns the content of the named file.
+func (f FS) ReadFile(name string) ([]byte, error) {
+ file, err := f.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ ofile, ok := file.(*openFile)
+ if !ok {
+ return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("is a directory")}
+ }
+ return []byte(ofile.f.data), nil
+}
+
+// An openFile is a regular file open for reading.
+type openFile struct {
+ f *file // the file itself
+ offset int64 // current read offset
+}
+
+var (
+ _ io.Seeker = (*openFile)(nil)
+)
+
+func (f *openFile) Close() error { return nil }
+func (f *openFile) Stat() (fs.FileInfo, error) { return f.f, nil }
+
+func (f *openFile) Read(b []byte) (int, error) {
+ if f.offset >= int64(len(f.f.data)) {
+ return 0, io.EOF
+ }
+ if f.offset < 0 {
+ return 0, &fs.PathError{Op: "read", Path: f.f.name, Err: fs.ErrInvalid}
+ }
+ n := copy(b, f.f.data[f.offset:])
+ f.offset += int64(n)
+ return n, nil
+}
+
+func (f *openFile) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case 0:
+ // offset += 0
+ case 1:
+ offset += f.offset
+ case 2:
+ offset += int64(len(f.f.data))
+ }
+ if offset < 0 || offset > int64(len(f.f.data)) {
+ return 0, &fs.PathError{Op: "seek", Path: f.f.name, Err: fs.ErrInvalid}
+ }
+ f.offset = offset
+ return offset, nil
+}
+
+// An openDir is a directory open for reading.
+type openDir struct {
+ f *file // the directory file itself
+ files []file // the directory contents
+ offset int // the read offset, an index into the files slice
+}
+
+func (d *openDir) Close() error { return nil }
+func (d *openDir) Stat() (fs.FileInfo, error) { return d.f, nil }
+
+func (d *openDir) Read([]byte) (int, error) {
+ return 0, &fs.PathError{Op: "read", Path: d.f.name, Err: errors.New("is a directory")}
+}
+
+func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
+ n := len(d.files) - d.offset
+ if n == 0 {
+ if count <= 0 {
+ return nil, nil
+ }
+ return nil, io.EOF
+ }
+ if count > 0 && n > count {
+ n = count
+ }
+ list := make([]fs.DirEntry, n)
+ for i := range list {
+ list[i] = &d.files[d.offset+i]
+ }
+ d.offset += n
+ return list, nil
+}
+
+// sortSearch is like sort.Search, avoiding an import.
+func sortSearch(n int, f func(int) bool) int {
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !f(h) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
diff --git a/contrib/go/_std_1.19/src/encoding/asn1/asn1.go b/contrib/go/_std_1.19/src/encoding/asn1/asn1.go
new file mode 100644
index 0000000000..c90bba47dc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/asn1/asn1.go
@@ -0,0 +1,1122 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,”
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "reflect"
+ "strconv"
+ "time"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+ Msg string
+}
+
+func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+ Msg string
+}
+
+func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte) (ret bool, err error) {
+ if len(bytes) != 1 {
+ err = SyntaxError{"invalid boolean"}
+ return
+ }
+
+ // DER demands that "If the encoding represents the boolean value TRUE,
+ // its single contents octet shall have all eight bits set to one."
+ // Thus only 0 and 255 are valid encoded values.
+ switch bytes[0] {
+ case 0:
+ ret = false
+ case 0xff:
+ ret = true
+ default:
+ err = SyntaxError{"invalid boolean"}
+ }
+
+ return
+}
+
+// INTEGER
+
+// checkInteger returns nil if the given bytes are a valid DER-encoded
+// INTEGER and an error otherwise.
+func checkInteger(bytes []byte) error {
+ if len(bytes) == 0 {
+ return StructuralError{"empty integer"}
+ }
+ if len(bytes) == 1 {
+ return nil
+ }
+ if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
+ return StructuralError{"integer not minimally-encoded"}
+ }
+ return nil
+}
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte) (ret int64, err error) {
+ err = checkInteger(bytes)
+ if err != nil {
+ return
+ }
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = StructuralError{"integer too large"}
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt32(bytes []byte) (int32, error) {
+ if err := checkInteger(bytes); err != nil {
+ return 0, err
+ }
+ ret64, err := parseInt64(bytes)
+ if err != nil {
+ return 0, err
+ }
+ if ret64 != int64(int32(ret64)) {
+ return 0, StructuralError{"integer too large"}
+ }
+ return int32(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) (*big.Int, error) {
+ if err := checkInteger(bytes); err != nil {
+ return nil, err
+ }
+ ret := new(big.Int)
+ if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+ // This is a negative number.
+ notBytes := make([]byte, len(bytes))
+ for i := range notBytes {
+ notBytes[i] = ^bytes[i]
+ }
+ ret.SetBytes(notBytes)
+ ret.Add(ret, bigOne)
+ ret.Neg(ret)
+ return ret, nil
+ }
+ ret.SetBytes(bytes)
+ return ret, nil
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+ Bytes []byte // bits packed into bytes.
+ BitLength int // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+ if i < 0 || i >= b.BitLength {
+ return 0
+ }
+ x := i / 8
+ y := 7 - uint(i%8)
+ return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+ shift := uint(8 - (b.BitLength % 8))
+ if shift == 8 || len(b.Bytes) == 0 {
+ return b.Bytes
+ }
+
+ a := make([]byte, len(b.Bytes))
+ a[0] = b.Bytes[0] >> shift
+ for i := 1; i < len(b.Bytes); i++ {
+ a[i] = b.Bytes[i-1] << (8 - shift)
+ a[i] |= b.Bytes[i] >> shift
+ }
+
+ return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte) (ret BitString, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length BIT STRING"}
+ return
+ }
+ paddingBits := int(bytes[0])
+ if paddingBits > 7 ||
+ len(bytes) == 1 && paddingBits > 0 ||
+ bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
+ err = SyntaxError{"invalid padding bits in BIT STRING"}
+ return
+ }
+ ret.BitLength = (len(bytes)-1)*8 - paddingBits
+ ret.Bytes = bytes[1:]
+ return
+}
+
+// NULL
+
+// NullRawValue is a RawValue with its Tag set to the ASN.1 NULL type tag (5).
+var NullRawValue = RawValue{Tag: TagNull}
+
+// NullBytes contains bytes representing the DER-encoded ASN.1 NULL type.
+var NullBytes = []byte{TagNull, 0}
+
+// OBJECT IDENTIFIER
+
+// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
+type ObjectIdentifier []int
+
+// Equal reports whether oi and other represent the same identifier.
+func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
+ if len(oi) != len(other) {
+ return false
+ }
+ for i := 0; i < len(oi); i++ {
+ if oi[i] != other[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (oi ObjectIdentifier) String() string {
+ var s string
+
+ for i, v := range oi {
+ if i > 0 {
+ s += "."
+ }
+ s += strconv.Itoa(v)
+ }
+
+ return s
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s ObjectIdentifier, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length OBJECT IDENTIFIER"}
+ return
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ s = make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ v, offset, err := parseBase128Int(bytes, 0)
+ if err != nil {
+ return
+ }
+ if v < 80 {
+ s[0] = v / 40
+ s[1] = v % 40
+ } else {
+ s[0] = 2
+ s[1] = v - 80
+ }
+
+ i := 2
+ for ; offset < len(bytes); i++ {
+ v, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+ offset = initOffset
+ var ret64 int64
+ for shifted := 0; offset < len(bytes); shifted++ {
+ // 5 * 7 bits per byte == 35 bits of data
+ // Thus the representation is either non-minimal or too large for an int32
+ if shifted == 5 {
+ err = StructuralError{"base 128 integer too large"}
+ return
+ }
+ ret64 <<= 7
+ b := bytes[offset]
+ // integers should be minimally encoded, so the leading octet should
+ // never be 0x80
+ if shifted == 0 && b == 0x80 {
+ err = SyntaxError{"integer is not minimally encoded"}
+ return
+ }
+ ret64 |= int64(b & 0x7f)
+ offset++
+ if b&0x80 == 0 {
+ ret = int(ret64)
+ // Ensure that the returned value fits in an int on all platforms
+ if ret64 > math.MaxInt32 {
+ err = StructuralError{"base 128 integer too large"}
+ }
+ return
+ }
+ }
+ err = SyntaxError{"truncated base 128 integer"}
+ return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret time.Time, err error) {
+ s := string(bytes)
+
+ formatStr := "0601021504Z0700"
+ ret, err = time.Parse(formatStr, s)
+ if err != nil {
+ formatStr = "060102150405Z0700"
+ ret, err = time.Parse(formatStr, s)
+ }
+ if err != nil {
+ return
+ }
+
+ if serialized := ret.Format(formatStr); serialized != s {
+ err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+ return
+ }
+
+ if ret.Year() >= 2050 {
+ // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+ ret = ret.AddDate(-100, 0, 0)
+ }
+
+ return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
+ const formatStr = "20060102150405Z0700"
+ s := string(bytes)
+
+ if ret, err = time.Parse(formatStr, s); err != nil {
+ return
+ }
+
+ if serialized := ret.Format(formatStr); serialized != s {
+ err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+ }
+
+ return
+}
+
+// NumericString
+
+// parseNumericString parses an ASN.1 NumericString from the given byte array
+// and returns it.
+func parseNumericString(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if !isNumeric(b) {
+ return "", SyntaxError{"NumericString contains invalid character"}
+ }
+ }
+ return string(bytes), nil
+}
+
+// isNumeric reports whether the given b is in the ASN.1 NumericString set.
+func isNumeric(b byte) bool {
+ return '0' <= b && b <= '9' ||
+ b == ' '
+}
+
+// PrintableString
+
+// parsePrintableString parses an ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if !isPrintable(b, allowAsterisk, allowAmpersand) {
+ err = SyntaxError{"PrintableString contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+type asteriskFlag bool
+type ampersandFlag bool
+
+const (
+ allowAsterisk asteriskFlag = true
+ rejectAsterisk asteriskFlag = false
+
+ allowAmpersand ampersandFlag = true
+ rejectAmpersand ampersandFlag = false
+)
+
+// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
+// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing
+// practice. If ampersand is allowAmpersand then '&' is allowed as well.
+func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool {
+ return 'a' <= b && b <= 'z' ||
+ 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' ||
+ '\'' <= b && b <= ')' ||
+ '+' <= b && b <= '/' ||
+ b == ' ' ||
+ b == ':' ||
+ b == '=' ||
+ b == '?' ||
+ // This is technically not allowed in a PrintableString.
+ // However, x509 certificates with wildcard strings don't
+ // always use the correct string type so we permit it.
+ (bool(asterisk) && b == '*') ||
+ // This is not technically allowed either. However, not
+ // only is it relatively common, but there are also a
+ // handful of CA certificates that contain it. At least
+ // one of which will not expire until 2027.
+ (bool(ampersand) && b == '&')
+}
+
+// IA5String
+
+// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if b >= utf8.RuneSelf {
+ err = SyntaxError{"IA5String contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// T61String
+
+// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+ if !utf8.Valid(bytes) {
+ return "", errors.New("asn1: invalid UTF-8 string")
+ }
+ return string(bytes), nil
+}
+
+// BMPString
+
+// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of
+// ISO/IEC/ITU 10646-1) from the given byte slice and returns it.
+func parseBMPString(bmpString []byte) (string, error) {
+ if len(bmpString)%2 != 0 {
+ return "", errors.New("pkcs12: odd-length BMP string")
+ }
+
+ // Strip terminator if present.
+ if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
+ bmpString = bmpString[:l-2]
+ }
+
+ s := make([]uint16, 0, len(bmpString)/2)
+ for len(bmpString) > 0 {
+ s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
+ bmpString = bmpString[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+ Class, Tag int
+ IsCompound bool
+ Bytes []byte
+ FullBytes []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
+ offset = initOffset
+ // parseTagAndLength should not be called without at least a single
+ // byte to read. Thus this check is for robustness:
+ if offset >= len(bytes) {
+ err = errors.New("asn1: internal error in parseTagAndLength")
+ return
+ }
+ b := bytes[offset]
+ offset++
+ ret.class = int(b >> 6)
+ ret.isCompound = b&0x20 == 0x20
+ ret.tag = int(b & 0x1f)
+
+ // If the bottom five bits are set, then the tag number is actually base 128
+ // encoded afterwards
+ if ret.tag == 0x1f {
+ ret.tag, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ // Tags should be encoded in minimal form.
+ if ret.tag < 0x1f {
+ err = SyntaxError{"non-minimal tag"}
+ return
+ }
+ }
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if b&0x80 == 0 {
+ // The length is encoded in the bottom 7 bits.
+ ret.length = int(b & 0x7f)
+ } else {
+ // Bottom 7 bits give the number of length bytes to follow.
+ numBytes := int(b & 0x7f)
+ if numBytes == 0 {
+ err = SyntaxError{"indefinite length found (not DER)"}
+ return
+ }
+ ret.length = 0
+ for i := 0; i < numBytes; i++ {
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if ret.length >= 1<<23 {
+ // We can't shift ret.length up without
+ // overflowing.
+ err = StructuralError{"length too large"}
+ return
+ }
+ ret.length <<= 8
+ ret.length |= int(b)
+ if ret.length == 0 {
+ // DER requires that lengths be minimal.
+ err = StructuralError{"superfluous leading zeros in length"}
+ return
+ }
+ }
+ // Short lengths must be encoded in short form.
+ if ret.length < 0x80 {
+ err = StructuralError{"non-minimal length"}
+ return
+ }
+ }
+
+ return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
+ matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
+ if !ok {
+ err = StructuralError{"unknown Go type for slice"}
+ return
+ }
+
+ // First we iterate over the input and count the number of elements,
+ // checking that the types are correct in each case.
+ numElements := 0
+ for offset := 0; offset < len(bytes); {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ switch t.tag {
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
+ // We pretend that various other string types are
+ // PRINTABLE STRINGs so that a sequence of them can be
+ // parsed into a []string.
+ t.tag = TagPrintableString
+ case TagGeneralizedTime, TagUTCTime:
+ // Likewise, both time types are treated the same.
+ t.tag = TagUTCTime
+ }
+
+ if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) {
+ err = StructuralError{"sequence tag mismatch"}
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"truncated sequence"}
+ return
+ }
+ offset += t.length
+ numElements++
+ }
+ ret = reflect.MakeSlice(sliceType, numElements, numElements)
+ params := fieldParameters{}
+ offset := 0
+ for i := 0; i < numElements; i++ {
+ offset, err = parseField(ret.Index(i), bytes, offset, params)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+var (
+ bitStringType = reflect.TypeOf(BitString{})
+ objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+ enumeratedType = reflect.TypeOf(Enumerated(0))
+ flagType = reflect.TypeOf(Flag(false))
+ timeType = reflect.TypeOf(time.Time{})
+ rawValueType = reflect.TypeOf(RawValue{})
+ rawContentsType = reflect.TypeOf(RawContent(nil))
+ bigIntType = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength reports whether offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+ return offset+length < offset || offset+length > sliceLength
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+ offset = initOffset
+ fieldType := v.Type()
+
+ // If we have run out of data, it may be that there are optional elements at the end.
+ if offset == len(bytes) {
+ if !setDefaultValue(v, params) {
+ err = SyntaxError{"sequence truncated"}
+ }
+ return
+ }
+
+ // Deal with the ANY type.
+ if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ var result any
+ if !t.isCompound && t.class == ClassUniversal {
+ innerBytes := bytes[offset : offset+t.length]
+ switch t.tag {
+ case TagPrintableString:
+ result, err = parsePrintableString(innerBytes)
+ case TagNumericString:
+ result, err = parseNumericString(innerBytes)
+ case TagIA5String:
+ result, err = parseIA5String(innerBytes)
+ case TagT61String:
+ result, err = parseT61String(innerBytes)
+ case TagUTF8String:
+ result, err = parseUTF8String(innerBytes)
+ case TagInteger:
+ result, err = parseInt64(innerBytes)
+ case TagBitString:
+ result, err = parseBitString(innerBytes)
+ case TagOID:
+ result, err = parseObjectIdentifier(innerBytes)
+ case TagUTCTime:
+ result, err = parseUTCTime(innerBytes)
+ case TagGeneralizedTime:
+ result, err = parseGeneralizedTime(innerBytes)
+ case TagOctetString:
+ result = innerBytes
+ case TagBMPString:
+ result, err = parseBMPString(innerBytes)
+ default:
+ // If we don't know how to handle the type, we just leave Value as nil.
+ }
+ }
+ offset += t.length
+ if err != nil {
+ return
+ }
+ if result != nil {
+ v.Set(reflect.ValueOf(result))
+ }
+ return
+ }
+
+ t, offset, err := parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if params.explicit {
+ expectedClass := ClassContextSpecific
+ if params.application {
+ expectedClass = ClassApplication
+ }
+ if offset == len(bytes) {
+ err = StructuralError{"explicit tag has no child"}
+ return
+ }
+ if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+ if fieldType == rawValueType {
+ // The inner element should not be parsed for RawValues.
+ } else if t.length > 0 {
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ } else {
+ if fieldType != flagType {
+ err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
+ return
+ }
+ v.SetBool(true)
+ return
+ }
+ } else {
+ // The tags didn't match, it might be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{"explicitly tagged member didn't match"}
+ }
+ return
+ }
+ }
+
+ matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType)
+ if !ok1 {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
+ return
+ }
+
+ // Special case for strings: all the ASN.1 string types map to the Go
+ // type string. getUniversalType returns the tag for PrintableString
+ // when it sees a string, so if we see a different string type on the
+ // wire, we change the universal type to match.
+ if universalTag == TagPrintableString {
+ if t.class == ClassUniversal {
+ switch t.tag {
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
+ universalTag = t.tag
+ }
+ } else if params.stringType != 0 {
+ universalTag = params.stringType
+ }
+ }
+
+ // Special case for time: UTCTime and GeneralizedTime both map to the
+ // Go type time.Time.
+ if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal {
+ universalTag = TagGeneralizedTime
+ }
+
+ if params.set {
+ universalTag = TagSet
+ }
+
+ matchAnyClassAndTag := matchAny
+ expectedClass := ClassUniversal
+ expectedTag := universalTag
+
+ if !params.explicit && params.tag != nil {
+ expectedClass = ClassContextSpecific
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
+ if !params.explicit && params.application && params.tag != nil {
+ expectedClass = ClassApplication
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
+ if !params.explicit && params.private && params.tag != nil {
+ expectedClass = ClassPrivate
+ expectedTag = *params.tag
+ matchAnyClassAndTag = false
+ }
+
+ // We have unwrapped any explicit tagging at this point.
+ if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
+ (!matchAny && t.isCompound != compoundType) {
+ // Tags don't match. Again, it could be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
+ }
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ innerBytes := bytes[offset : offset+t.length]
+ offset += t.length
+
+ // We deal with the structures defined in this package first.
+ switch v := v.Addr().Interface().(type) {
+ case *RawValue:
+ *v = RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]}
+ return
+ case *ObjectIdentifier:
+ *v, err = parseObjectIdentifier(innerBytes)
+ return
+ case *BitString:
+ *v, err = parseBitString(innerBytes)
+ return
+ case *time.Time:
+ if universalTag == TagUTCTime {
+ *v, err = parseUTCTime(innerBytes)
+ return
+ }
+ *v, err = parseGeneralizedTime(innerBytes)
+ return
+ case *Enumerated:
+ parsedInt, err1 := parseInt32(innerBytes)
+ if err1 == nil {
+ *v = Enumerated(parsedInt)
+ }
+ err = err1
+ return
+ case *Flag:
+ *v = true
+ return
+ case **big.Int:
+ parsedInt, err1 := parseBigInt(innerBytes)
+ if err1 == nil {
+ *v = parsedInt
+ }
+ err = err1
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Bool:
+ parsedBool, err1 := parseBool(innerBytes)
+ if err1 == nil {
+ val.SetBool(parsedBool)
+ }
+ err = err1
+ return
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ if val.Type().Size() == 4 {
+ parsedInt, err1 := parseInt32(innerBytes)
+ if err1 == nil {
+ val.SetInt(int64(parsedInt))
+ }
+ err = err1
+ } else {
+ parsedInt, err1 := parseInt64(innerBytes)
+ if err1 == nil {
+ val.SetInt(parsedInt)
+ }
+ err = err1
+ }
+ return
+ // TODO(dfc) Add support for the remaining integer types
+ case reflect.Struct:
+ structType := fieldType
+
+ for i := 0; i < structType.NumField(); i++ {
+ if !structType.Field(i).IsExported() {
+ err = StructuralError{"struct contains unexported fields"}
+ return
+ }
+ }
+
+ if structType.NumField() > 0 &&
+ structType.Field(0).Type == rawContentsType {
+ bytes := bytes[initOffset:offset]
+ val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+ }
+
+ innerOffset := 0
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ if i == 0 && field.Type == rawContentsType {
+ continue
+ }
+ innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ // We allow extra bytes at the end of the SEQUENCE because
+ // adding elements to the end has been used in X.509 as the
+ // version numbers have increased.
+ return
+ case reflect.Slice:
+ sliceType := fieldType
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+ reflect.Copy(val, reflect.ValueOf(innerBytes))
+ return
+ }
+ newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
+ if err1 == nil {
+ val.Set(newSlice)
+ }
+ err = err1
+ return
+ case reflect.String:
+ var v string
+ switch universalTag {
+ case TagPrintableString:
+ v, err = parsePrintableString(innerBytes)
+ case TagNumericString:
+ v, err = parseNumericString(innerBytes)
+ case TagIA5String:
+ v, err = parseIA5String(innerBytes)
+ case TagT61String:
+ v, err = parseT61String(innerBytes)
+ case TagUTF8String:
+ v, err = parseUTF8String(innerBytes)
+ case TagGeneralString:
+ // GeneralString is specified in ISO-2022/ECMA-35,
+ // A brief review suggests that it includes structures
+ // that allow the encoding to change midstring and
+ // such. We give up and pass it as an 8-bit string.
+ v, err = parseT61String(innerBytes)
+ case TagBMPString:
+ v, err = parseBMPString(innerBytes)
+
+ default:
+ err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
+ }
+ if err == nil {
+ val.SetString(v)
+ }
+ return
+ }
+ err = StructuralError{"unsupported: " + v.Type().String()}
+ return
+}
+
+// canHaveDefaultValue reports whether k is a Kind that we will set a default
+// value for. (A signed integer, essentially.)
+func canHaveDefaultValue(k reflect.Kind) bool {
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ }
+
+ return false
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful if the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+ if !params.optional {
+ return
+ }
+ ok = true
+ if params.defaultValue == nil {
+ return
+ }
+ if canHaveDefaultValue(v.Kind()) {
+ v.SetInt(*params.defaultValue)
+ }
+ return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names. If val
+// is nil or not a pointer, Unmarshal returns an error.
+//
+// After parsing b, any bytes that were leftover and not used to fill
+// val will be returned in rest. When parsing a SEQUENCE into a struct,
+// any trailing elements of the SEQUENCE that do not have matching
+// fields in val will not be included in rest, as these are considered
+// valid elements of the SEQUENCE and not trailing data.
+//
+// An ASN.1 INTEGER can be written to an int, int32, int64,
+// or *big.Int (from the math/big package).
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//
+// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+// application specifies that an APPLICATION tag is used
+// private specifies that a PRIVATE tag is used
+// default:x sets the default value for optional integer fields (only used if optional is also present)
+// explicit specifies that an additional, explicit tag wraps the implicit one
+// optional marks the field as ASN.1 OPTIONAL
+// set causes a SET, rather than a SEQUENCE type to be expected
+// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+//
+// When decoding an ASN.1 value with an IMPLICIT tag into a string field,
+// Unmarshal will default to a PrintableString, which doesn't support
+// characters such as '@' and '&'. To force other encodings, use the following
+// tags:
+//
+// ia5 causes strings to be unmarshaled as ASN.1 IA5String values
+// numeric causes strings to be unmarshaled as ASN.1 NumericString values
+// utf8 causes strings to be unmarshaled as ASN.1 UTF8String values
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// If the name of a slice type ends with "SET" then it's treated as if
+// the "set" tag was set on it. This results in interpreting the type as a
+// SET OF x rather than a SEQUENCE OF x. This can be used with nested slices
+// where a struct tag cannot be given.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val any) (rest []byte, err error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// An invalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type invalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *invalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "asn1: Unmarshal recipient value is nil"
+ }
+
+ if e.Type.Kind() != reflect.Pointer {
+ return "asn1: Unmarshal recipient value is non-pointer " + e.Type.String()
+ }
+ return "asn1: Unmarshal recipient value is nil " + e.Type.String()
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val any, params string) (rest []byte, err error) {
+ v := reflect.ValueOf(val)
+ if v.Kind() != reflect.Pointer || v.IsNil() {
+ return nil, &invalidUnmarshalError{reflect.TypeOf(val)}
+ }
+ offset, err := parseField(v.Elem(), b, 0, parseFieldParameters(params))
+ if err != nil {
+ return nil, err
+ }
+ return b[offset:], nil
+}
diff --git a/contrib/go/_std_1.18/src/encoding/asn1/common.go b/contrib/go/_std_1.19/src/encoding/asn1/common.go
index 40115df8b4..40115df8b4 100644
--- a/contrib/go/_std_1.18/src/encoding/asn1/common.go
+++ b/contrib/go/_std_1.19/src/encoding/asn1/common.go
diff --git a/contrib/go/_std_1.18/src/encoding/asn1/marshal.go b/contrib/go/_std_1.19/src/encoding/asn1/marshal.go
index c243349175..c243349175 100644
--- a/contrib/go/_std_1.18/src/encoding/asn1/marshal.go
+++ b/contrib/go/_std_1.19/src/encoding/asn1/marshal.go
diff --git a/contrib/go/_std_1.18/src/encoding/base64/base64.go b/contrib/go/_std_1.19/src/encoding/base64/base64.go
index 4a3e590649..4a3e590649 100644
--- a/contrib/go/_std_1.18/src/encoding/base64/base64.go
+++ b/contrib/go/_std_1.19/src/encoding/base64/base64.go
diff --git a/contrib/go/_std_1.19/src/encoding/binary/binary.go b/contrib/go/_std_1.19/src/encoding/binary/binary.go
new file mode 100644
index 0000000000..0681511fbb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/binary/binary.go
@@ -0,0 +1,804 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package binary implements simple translation between numbers and byte
+// sequences and encoding and decoding of varints.
+//
+// Numbers are translated by reading and writing fixed-size values.
+// A fixed-size value is either a fixed-size arithmetic
+// type (bool, int8, uint8, int16, float32, complex64, ...)
+// or an array or struct containing only fixed-size values.
+//
+// The varint functions encode and decode single integer values using
+// a variable-length encoding; smaller values require fewer bytes.
+// For a specification, see
+// https://developers.google.com/protocol-buffers/docs/encoding.
+//
+// This package favors simplicity over efficiency. Clients that require
+// high-performance serialization, especially for large data structures,
+// should look at more advanced solutions such as the encoding/gob
+// package or protocol buffers.
+package binary
+
+import (
+ "errors"
+ "io"
+ "math"
+ "reflect"
+ "sync"
+)
+
+// A ByteOrder specifies how to convert byte slices into
+// 16-, 32-, or 64-bit unsigned integers.
+type ByteOrder interface {
+ Uint16([]byte) uint16
+ Uint32([]byte) uint32
+ Uint64([]byte) uint64
+ PutUint16([]byte, uint16)
+ PutUint32([]byte, uint32)
+ PutUint64([]byte, uint64)
+ String() string
+}
+
+// AppendByteOrder specifies how to append 16-, 32-, or 64-bit unsigned integers
+// into a byte slice.
+type AppendByteOrder interface {
+ AppendUint16([]byte, uint16) []byte
+ AppendUint32([]byte, uint32) []byte
+ AppendUint64([]byte, uint64) []byte
+ String() string
+}
+
+// LittleEndian is the little-endian implementation of ByteOrder and AppendByteOrder.
+var LittleEndian littleEndian
+
+// BigEndian is the big-endian implementation of ByteOrder and AppendByteOrder.
+var BigEndian bigEndian
+
+type littleEndian struct{}
+
+func (littleEndian) Uint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func (littleEndian) PutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+}
+
+func (littleEndian) AppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ )
+}
+
+func (littleEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func (littleEndian) PutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+}
+
+func (littleEndian) AppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ )
+}
+
+func (littleEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (littleEndian) PutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ b[4] = byte(v >> 32)
+ b[5] = byte(v >> 40)
+ b[6] = byte(v >> 48)
+ b[7] = byte(v >> 56)
+}
+
+func (littleEndian) AppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56),
+ )
+}
+
+func (littleEndian) String() string { return "LittleEndian" }
+
+func (littleEndian) GoString() string { return "binary.LittleEndian" }
+
+type bigEndian struct{}
+
+func (bigEndian) Uint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[1]) | uint16(b[0])<<8
+}
+
+func (bigEndian) PutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 8)
+ b[1] = byte(v)
+}
+
+func (bigEndian) AppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func (bigEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func (bigEndian) PutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 24)
+ b[1] = byte(v >> 16)
+ b[2] = byte(v >> 8)
+ b[3] = byte(v)
+}
+
+func (bigEndian) AppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func (bigEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+func (bigEndian) PutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 56)
+ b[1] = byte(v >> 48)
+ b[2] = byte(v >> 40)
+ b[3] = byte(v >> 32)
+ b[4] = byte(v >> 24)
+ b[5] = byte(v >> 16)
+ b[6] = byte(v >> 8)
+ b[7] = byte(v)
+}
+
+func (bigEndian) AppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>56),
+ byte(v>>48),
+ byte(v>>40),
+ byte(v>>32),
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func (bigEndian) String() string { return "BigEndian" }
+
+func (bigEndian) GoString() string { return "binary.BigEndian" }
+
+// Read reads structured binary data from r into data.
+// Data must be a pointer to a fixed-size value or a slice
+// of fixed-size values.
+// Bytes read from r are decoded using the specified byte order
+// and written to successive fields of the data.
+// When decoding boolean values, a zero byte is decoded as false, and
+// any other non-zero byte is decoded as true.
+// When reading into structs, the field data for fields with
+// blank (_) field names is skipped; i.e., blank field names
+// may be used for padding.
+// When reading into a struct, all non-blank fields must be exported
+// or Read may panic.
+//
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading some but not all the bytes,
+// Read returns ErrUnexpectedEOF.
+func Read(r io.Reader, order ByteOrder, data any) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ bs := make([]byte, n)
+ if _, err := io.ReadFull(r, bs); err != nil {
+ return err
+ }
+ switch data := data.(type) {
+ case *bool:
+ *data = bs[0] != 0
+ case *int8:
+ *data = int8(bs[0])
+ case *uint8:
+ *data = bs[0]
+ case *int16:
+ *data = int16(order.Uint16(bs))
+ case *uint16:
+ *data = order.Uint16(bs)
+ case *int32:
+ *data = int32(order.Uint32(bs))
+ case *uint32:
+ *data = order.Uint32(bs)
+ case *int64:
+ *data = int64(order.Uint64(bs))
+ case *uint64:
+ *data = order.Uint64(bs)
+ case *float32:
+ *data = math.Float32frombits(order.Uint32(bs))
+ case *float64:
+ *data = math.Float64frombits(order.Uint64(bs))
+ case []bool:
+ for i, x := range bs { // Easier to loop over the input for 8-bit values.
+ data[i] = x != 0
+ }
+ case []int8:
+ for i, x := range bs {
+ data[i] = int8(x)
+ }
+ case []uint8:
+ copy(data, bs)
+ case []int16:
+ for i := range data {
+ data[i] = int16(order.Uint16(bs[2*i:]))
+ }
+ case []uint16:
+ for i := range data {
+ data[i] = order.Uint16(bs[2*i:])
+ }
+ case []int32:
+ for i := range data {
+ data[i] = int32(order.Uint32(bs[4*i:]))
+ }
+ case []uint32:
+ for i := range data {
+ data[i] = order.Uint32(bs[4*i:])
+ }
+ case []int64:
+ for i := range data {
+ data[i] = int64(order.Uint64(bs[8*i:]))
+ }
+ case []uint64:
+ for i := range data {
+ data[i] = order.Uint64(bs[8*i:])
+ }
+ case []float32:
+ for i := range data {
+ data[i] = math.Float32frombits(order.Uint32(bs[4*i:]))
+ }
+ case []float64:
+ for i := range data {
+ data[i] = math.Float64frombits(order.Uint64(bs[8*i:]))
+ }
+ default:
+ n = 0 // fast path doesn't apply
+ }
+ if n != 0 {
+ return nil
+ }
+ }
+
+ // Fallback to reflect-based decoding.
+ v := reflect.ValueOf(data)
+ size := -1
+ switch v.Kind() {
+ case reflect.Pointer:
+ v = v.Elem()
+ size = dataSize(v)
+ case reflect.Slice:
+ size = dataSize(v)
+ }
+ if size < 0 {
+ return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
+ }
+ d := &decoder{order: order, buf: make([]byte, size)}
+ if _, err := io.ReadFull(r, d.buf); err != nil {
+ return err
+ }
+ d.value(v)
+ return nil
+}
+
+// Write writes the binary representation of data into w.
+// Data must be a fixed-size value or a slice of fixed-size
+// values, or a pointer to such data.
+// Boolean values encode as one byte: 1 for true, and 0 for false.
+// Bytes written to w are encoded using the specified byte order
+// and read from successive fields of the data.
+// When writing structs, zero values are written for fields
+// with blank (_) field names.
+func Write(w io.Writer, order ByteOrder, data any) error {
+ // Fast path for basic types and slices.
+ if n := intDataSize(data); n != 0 {
+ bs := make([]byte, n)
+ switch v := data.(type) {
+ case *bool:
+ if *v {
+ bs[0] = 1
+ } else {
+ bs[0] = 0
+ }
+ case bool:
+ if v {
+ bs[0] = 1
+ } else {
+ bs[0] = 0
+ }
+ case []bool:
+ for i, x := range v {
+ if x {
+ bs[i] = 1
+ } else {
+ bs[i] = 0
+ }
+ }
+ case *int8:
+ bs[0] = byte(*v)
+ case int8:
+ bs[0] = byte(v)
+ case []int8:
+ for i, x := range v {
+ bs[i] = byte(x)
+ }
+ case *uint8:
+ bs[0] = *v
+ case uint8:
+ bs[0] = v
+ case []uint8:
+ bs = v
+ case *int16:
+ order.PutUint16(bs, uint16(*v))
+ case int16:
+ order.PutUint16(bs, uint16(v))
+ case []int16:
+ for i, x := range v {
+ order.PutUint16(bs[2*i:], uint16(x))
+ }
+ case *uint16:
+ order.PutUint16(bs, *v)
+ case uint16:
+ order.PutUint16(bs, v)
+ case []uint16:
+ for i, x := range v {
+ order.PutUint16(bs[2*i:], x)
+ }
+ case *int32:
+ order.PutUint32(bs, uint32(*v))
+ case int32:
+ order.PutUint32(bs, uint32(v))
+ case []int32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], uint32(x))
+ }
+ case *uint32:
+ order.PutUint32(bs, *v)
+ case uint32:
+ order.PutUint32(bs, v)
+ case []uint32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], x)
+ }
+ case *int64:
+ order.PutUint64(bs, uint64(*v))
+ case int64:
+ order.PutUint64(bs, uint64(v))
+ case []int64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], uint64(x))
+ }
+ case *uint64:
+ order.PutUint64(bs, *v)
+ case uint64:
+ order.PutUint64(bs, v)
+ case []uint64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], x)
+ }
+ case *float32:
+ order.PutUint32(bs, math.Float32bits(*v))
+ case float32:
+ order.PutUint32(bs, math.Float32bits(v))
+ case []float32:
+ for i, x := range v {
+ order.PutUint32(bs[4*i:], math.Float32bits(x))
+ }
+ case *float64:
+ order.PutUint64(bs, math.Float64bits(*v))
+ case float64:
+ order.PutUint64(bs, math.Float64bits(v))
+ case []float64:
+ for i, x := range v {
+ order.PutUint64(bs[8*i:], math.Float64bits(x))
+ }
+ }
+ _, err := w.Write(bs)
+ return err
+ }
+
+ // Fallback to reflect-based encoding.
+ v := reflect.Indirect(reflect.ValueOf(data))
+ size := dataSize(v)
+ if size < 0 {
+ return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String())
+ }
+ buf := make([]byte, size)
+ e := &encoder{order: order, buf: buf}
+ e.value(v)
+ _, err := w.Write(buf)
+ return err
+}
+
+// Size returns how many bytes Write would generate to encode the value v, which
+// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
+// If v is neither of these, Size returns -1.
+func Size(v any) int {
+ return dataSize(reflect.Indirect(reflect.ValueOf(v)))
+}
+
+var structSize sync.Map // map[reflect.Type]int
+
+// dataSize returns the number of bytes the actual data represented by v occupies in memory.
+// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
+// it returns the length of the slice times the element size and does not count the memory
+// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
+func dataSize(v reflect.Value) int {
+ switch v.Kind() {
+ case reflect.Slice:
+ if s := sizeof(v.Type().Elem()); s >= 0 {
+ return s * v.Len()
+ }
+ return -1
+
+ case reflect.Struct:
+ t := v.Type()
+ if size, ok := structSize.Load(t); ok {
+ return size.(int)
+ }
+ size := sizeof(t)
+ structSize.Store(t, size)
+ return size
+
+ default:
+ return sizeof(v.Type())
+ }
+}
+
+// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
+func sizeof(t reflect.Type) int {
+ switch t.Kind() {
+ case reflect.Array:
+ if s := sizeof(t.Elem()); s >= 0 {
+ return s * t.Len()
+ }
+
+ case reflect.Struct:
+ sum := 0
+ for i, n := 0, t.NumField(); i < n; i++ {
+ s := sizeof(t.Field(i).Type)
+ if s < 0 {
+ return -1
+ }
+ sum += s
+ }
+ return sum
+
+ case reflect.Bool,
+ reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ return int(t.Size())
+ }
+
+ return -1
+}
+
+type coder struct {
+ order ByteOrder
+ buf []byte
+ offset int
+}
+
+type decoder coder
+type encoder coder
+
+func (d *decoder) bool() bool {
+ x := d.buf[d.offset]
+ d.offset++
+ return x != 0
+}
+
+func (e *encoder) bool(x bool) {
+ if x {
+ e.buf[e.offset] = 1
+ } else {
+ e.buf[e.offset] = 0
+ }
+ e.offset++
+}
+
+func (d *decoder) uint8() uint8 {
+ x := d.buf[d.offset]
+ d.offset++
+ return x
+}
+
+func (e *encoder) uint8(x uint8) {
+ e.buf[e.offset] = x
+ e.offset++
+}
+
+func (d *decoder) uint16() uint16 {
+ x := d.order.Uint16(d.buf[d.offset : d.offset+2])
+ d.offset += 2
+ return x
+}
+
+func (e *encoder) uint16(x uint16) {
+ e.order.PutUint16(e.buf[e.offset:e.offset+2], x)
+ e.offset += 2
+}
+
+func (d *decoder) uint32() uint32 {
+ x := d.order.Uint32(d.buf[d.offset : d.offset+4])
+ d.offset += 4
+ return x
+}
+
+func (e *encoder) uint32(x uint32) {
+ e.order.PutUint32(e.buf[e.offset:e.offset+4], x)
+ e.offset += 4
+}
+
+func (d *decoder) uint64() uint64 {
+ x := d.order.Uint64(d.buf[d.offset : d.offset+8])
+ d.offset += 8
+ return x
+}
+
+func (e *encoder) uint64(x uint64) {
+ e.order.PutUint64(e.buf[e.offset:e.offset+8], x)
+ e.offset += 8
+}
+
+func (d *decoder) int8() int8 { return int8(d.uint8()) }
+
+func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
+
+func (d *decoder) int16() int16 { return int16(d.uint16()) }
+
+func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
+
+func (d *decoder) int32() int32 { return int32(d.uint32()) }
+
+func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
+
+func (d *decoder) int64() int64 { return int64(d.uint64()) }
+
+func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
+
+func (d *decoder) value(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Array:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ d.value(v.Index(i))
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ // Note: Calling v.CanSet() below is an optimization.
+ // It would be sufficient to check the field name,
+ // but creating the StructField info for each field is
+ // costly (run "go test -bench=ReadStruct" and compare
+ // results when making changes to this code).
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ d.value(v)
+ } else {
+ d.skip(v)
+ }
+ }
+
+ case reflect.Slice:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ d.value(v.Index(i))
+ }
+
+ case reflect.Bool:
+ v.SetBool(d.bool())
+
+ case reflect.Int8:
+ v.SetInt(int64(d.int8()))
+ case reflect.Int16:
+ v.SetInt(int64(d.int16()))
+ case reflect.Int32:
+ v.SetInt(int64(d.int32()))
+ case reflect.Int64:
+ v.SetInt(d.int64())
+
+ case reflect.Uint8:
+ v.SetUint(uint64(d.uint8()))
+ case reflect.Uint16:
+ v.SetUint(uint64(d.uint16()))
+ case reflect.Uint32:
+ v.SetUint(uint64(d.uint32()))
+ case reflect.Uint64:
+ v.SetUint(d.uint64())
+
+ case reflect.Float32:
+ v.SetFloat(float64(math.Float32frombits(d.uint32())))
+ case reflect.Float64:
+ v.SetFloat(math.Float64frombits(d.uint64()))
+
+ case reflect.Complex64:
+ v.SetComplex(complex(
+ float64(math.Float32frombits(d.uint32())),
+ float64(math.Float32frombits(d.uint32())),
+ ))
+ case reflect.Complex128:
+ v.SetComplex(complex(
+ math.Float64frombits(d.uint64()),
+ math.Float64frombits(d.uint64()),
+ ))
+ }
+}
+
+func (e *encoder) value(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Array:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ e.value(v.Index(i))
+ }
+
+ case reflect.Struct:
+ t := v.Type()
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ // see comment for corresponding code in decoder.value()
+ if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ e.value(v)
+ } else {
+ e.skip(v)
+ }
+ }
+
+ case reflect.Slice:
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ e.value(v.Index(i))
+ }
+
+ case reflect.Bool:
+ e.bool(v.Bool())
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch v.Type().Kind() {
+ case reflect.Int8:
+ e.int8(int8(v.Int()))
+ case reflect.Int16:
+ e.int16(int16(v.Int()))
+ case reflect.Int32:
+ e.int32(int32(v.Int()))
+ case reflect.Int64:
+ e.int64(v.Int())
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch v.Type().Kind() {
+ case reflect.Uint8:
+ e.uint8(uint8(v.Uint()))
+ case reflect.Uint16:
+ e.uint16(uint16(v.Uint()))
+ case reflect.Uint32:
+ e.uint32(uint32(v.Uint()))
+ case reflect.Uint64:
+ e.uint64(v.Uint())
+ }
+
+ case reflect.Float32, reflect.Float64:
+ switch v.Type().Kind() {
+ case reflect.Float32:
+ e.uint32(math.Float32bits(float32(v.Float())))
+ case reflect.Float64:
+ e.uint64(math.Float64bits(v.Float()))
+ }
+
+ case reflect.Complex64, reflect.Complex128:
+ switch v.Type().Kind() {
+ case reflect.Complex64:
+ x := v.Complex()
+ e.uint32(math.Float32bits(float32(real(x))))
+ e.uint32(math.Float32bits(float32(imag(x))))
+ case reflect.Complex128:
+ x := v.Complex()
+ e.uint64(math.Float64bits(real(x)))
+ e.uint64(math.Float64bits(imag(x)))
+ }
+ }
+}
+
+func (d *decoder) skip(v reflect.Value) {
+ d.offset += dataSize(v)
+}
+
+func (e *encoder) skip(v reflect.Value) {
+ n := dataSize(v)
+ zero := e.buf[e.offset : e.offset+n]
+ for i := range zero {
+ zero[i] = 0
+ }
+ e.offset += n
+}
+
+// intDataSize returns the size of the data required to represent the data when encoded.
+// It returns zero if the type cannot be implemented by the fast path in Read or Write.
+func intDataSize(data any) int {
+ switch data := data.(type) {
+ case bool, int8, uint8, *bool, *int8, *uint8:
+ return 1
+ case []bool:
+ return len(data)
+ case []int8:
+ return len(data)
+ case []uint8:
+ return len(data)
+ case int16, uint16, *int16, *uint16:
+ return 2
+ case []int16:
+ return 2 * len(data)
+ case []uint16:
+ return 2 * len(data)
+ case int32, uint32, *int32, *uint32:
+ return 4
+ case []int32:
+ return 4 * len(data)
+ case []uint32:
+ return 4 * len(data)
+ case int64, uint64, *int64, *uint64:
+ return 8
+ case []int64:
+ return 8 * len(data)
+ case []uint64:
+ return 8 * len(data)
+ case float32, *float32:
+ return 4
+ case float64, *float64:
+ return 8
+ case []float32:
+ return 4 * len(data)
+ case []float64:
+ return 8 * len(data)
+ }
+ return 0
+}
diff --git a/contrib/go/_std_1.19/src/encoding/binary/varint.go b/contrib/go/_std_1.19/src/encoding/binary/varint.go
new file mode 100644
index 0000000000..c807d15f44
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/binary/varint.go
@@ -0,0 +1,157 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binary
+
+// This file implements "varint" encoding of 64-bit integers.
+// The encoding is:
+// - unsigned integers are serialized 7 bits at a time, starting with the
+// least significant bits
+// - the most significant bit (msb) in each output byte indicates if there
+// is a continuation byte (msb = 1)
+// - signed integers are mapped to unsigned integers using "zig-zag"
+// encoding: Positive values x are written as 2*x + 0, negative values
+// are written as 2*(^x) + 1; that is, negative numbers are complemented
+// and whether to complement is encoded in bit 0.
+//
+// Design note:
+// At most 10 bytes are needed for 64-bit values. The encoding could
+// be more dense: a full 64-bit value needs an extra byte just to hold bit 63.
+// Instead, the msb of the previous byte could be used to hold bit 63 since we
+// know there can't be more than 64 bits. This is a trivial improvement and
+// would reduce the maximum encoding length to 9 bytes. However, it breaks the
+// invariant that the msb is always the "continuation bit" and thus makes the
+// format incompatible with a varint encoding for larger numbers (say 128-bit).
+
+import (
+ "errors"
+ "io"
+)
+
+// MaxVarintLenN is the maximum length of a varint-encoded N-bit integer.
+const (
+ MaxVarintLen16 = 3
+ MaxVarintLen32 = 5
+ MaxVarintLen64 = 10
+)
+
+// AppendUvarint appends the varint-encoded form of x,
+// as generated by PutUvarint, to buf and returns the extended buffer.
+func AppendUvarint(buf []byte, x uint64) []byte {
+ for x >= 0x80 {
+ buf = append(buf, byte(x)|0x80)
+ x >>= 7
+ }
+ return append(buf, byte(x))
+}
+
+// PutUvarint encodes a uint64 into buf and returns the number of bytes written.
+// If the buffer is too small, PutUvarint will panic.
+func PutUvarint(buf []byte, x uint64) int {
+ i := 0
+ for x >= 0x80 {
+ buf[i] = byte(x) | 0x80
+ x >>= 7
+ i++
+ }
+ buf[i] = byte(x)
+ return i + 1
+}
+
+// Uvarint decodes a uint64 from buf and returns that value and the
+// number of bytes read (> 0). If an error occurred, the value is 0
+// and the number of bytes n is <= 0 meaning:
+//
+// n == 0: buf too small
+// n < 0: value larger than 64 bits (overflow)
+// and -n is the number of bytes read
+func Uvarint(buf []byte) (uint64, int) {
+ var x uint64
+ var s uint
+ for i, b := range buf {
+ if i == MaxVarintLen64 {
+ // Catch byte reads past MaxVarintLen64.
+ // See issue https://golang.org/issues/41185
+ return 0, -(i + 1) // overflow
+ }
+ if b < 0x80 {
+ if i == MaxVarintLen64-1 && b > 1 {
+ return 0, -(i + 1) // overflow
+ }
+ return x | uint64(b)<<s, i + 1
+ }
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
+ return 0, 0
+}
+
+// AppendVarint appends the varint-encoded form of x,
+// as generated by PutVarint, to buf and returns the extended buffer.
+func AppendVarint(buf []byte, x int64) []byte {
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+ return AppendUvarint(buf, ux)
+}
+
+// PutVarint encodes an int64 into buf and returns the number of bytes written.
+// If the buffer is too small, PutVarint will panic.
+func PutVarint(buf []byte, x int64) int {
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+ return PutUvarint(buf, ux)
+}
+
+// Varint decodes an int64 from buf and returns that value and the
+// number of bytes read (> 0). If an error occurred, the value is 0
+// and the number of bytes n is <= 0 with the following meaning:
+//
+// n == 0: buf too small
+// n < 0: value larger than 64 bits (overflow)
+// and -n is the number of bytes read
+func Varint(buf []byte) (int64, int) {
+ ux, n := Uvarint(buf) // ok to continue in presence of error
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x, n
+}
+
+var overflow = errors.New("binary: varint overflows a 64-bit integer")
+
+// ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
+func ReadUvarint(r io.ByteReader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := 0; i < MaxVarintLen64; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ return x, err
+ }
+ if b < 0x80 {
+ if i == MaxVarintLen64-1 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<<s, nil
+ }
+ x |= uint64(b&0x7f) << s
+ s += 7
+ }
+ return x, overflow
+}
+
+// ReadVarint reads an encoded signed integer from r and returns it as an int64.
+func ReadVarint(r io.ByteReader) (int64, error) {
+ ux, err := ReadUvarint(r) // ok to continue in presence of error
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x, err
+}
diff --git a/contrib/go/_std_1.19/src/encoding/csv/reader.go b/contrib/go/_std_1.19/src/encoding/csv/reader.go
new file mode 100644
index 0000000000..90a37e6074
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/csv/reader.go
@@ -0,0 +1,462 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package csv reads and writes comma-separated values (CSV) files.
+// There are many kinds of CSV files; this package supports the format
+// described in RFC 4180.
+//
+// A csv file contains zero or more records of one or more fields per record.
+// Each record is separated by the newline character. The final record may
+// optionally be followed by a newline character.
+//
+// field1,field2,field3
+//
+// White space is considered part of a field.
+//
+// Carriage returns before newline characters are silently removed.
+//
+// Blank lines are ignored. A line with only whitespace characters (excluding
+// the ending newline character) is not considered a blank line.
+//
+// Fields which start and stop with the quote character " are called
+// quoted-fields. The beginning and ending quote are not part of the
+// field.
+//
+// The source:
+//
+// normal string,"quoted-field"
+//
+// results in the fields
+//
+// {`normal string`, `quoted-field`}
+//
+// Within a quoted-field a quote character followed by a second quote
+// character is considered a single quote.
+//
+// "the ""word"" is true","a ""quoted-field"""
+//
+// results in
+//
+// {`the "word" is true`, `a "quoted-field"`}
+//
+// Newlines and commas may be included in a quoted-field
+//
+// "Multi-line
+// field","comma is ,"
+//
+// results in
+//
+// {`Multi-line
+// field`, `comma is ,`}
+package csv
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A ParseError is returned for parsing errors.
+// Line numbers are 1-indexed and columns are 0-indexed.
+type ParseError struct {
+ StartLine int // Line where the record starts
+ Line int // Line where the error occurred
+ Column int // Column (1-based byte index) where the error occurred
+ Err error // The actual error
+}
+
+func (e *ParseError) Error() string {
+ if e.Err == ErrFieldCount {
+ return fmt.Sprintf("record on line %d: %v", e.Line, e.Err)
+ }
+ if e.StartLine != e.Line {
+ return fmt.Sprintf("record on line %d; parse error on line %d, column %d: %v", e.StartLine, e.Line, e.Column, e.Err)
+ }
+ return fmt.Sprintf("parse error on line %d, column %d: %v", e.Line, e.Column, e.Err)
+}
+
+func (e *ParseError) Unwrap() error { return e.Err }
+
+// These are the errors that can be returned in ParseError.Err.
+var (
+ ErrTrailingComma = errors.New("extra delimiter at end of line") // Deprecated: No longer used.
+ ErrBareQuote = errors.New("bare \" in non-quoted-field")
+ ErrQuote = errors.New("extraneous or missing \" in quoted-field")
+ ErrFieldCount = errors.New("wrong number of fields")
+)
+
+var errInvalidDelim = errors.New("csv: invalid field or comment delimiter")
+
+func validDelim(r rune) bool {
+ return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
+}
+
+// A Reader reads records from a CSV-encoded file.
+//
+// As returned by NewReader, a Reader expects input conforming to RFC 4180.
+// The exported fields can be changed to customize the details before the
+// first call to Read or ReadAll.
+//
+// The Reader converts all \r\n sequences in its input to plain \n,
+// including in multiline field values, so that the returned data does
+// not depend on which line-ending convention an input file uses.
+type Reader struct {
+ // Comma is the field delimiter.
+ // It is set to comma (',') by NewReader.
+ // Comma must be a valid rune and must not be \r, \n,
+ // or the Unicode replacement character (0xFFFD).
+ Comma rune
+
+ // Comment, if not 0, is the comment character. Lines beginning with the
+ // Comment character without preceding whitespace are ignored.
+ // With leading whitespace the Comment character becomes part of the
+ // field, even if TrimLeadingSpace is true.
+ // Comment must be a valid rune and must not be \r, \n,
+ // or the Unicode replacement character (0xFFFD).
+ // It must also not be equal to Comma.
+ Comment rune
+
+ // FieldsPerRecord is the number of expected fields per record.
+ // If FieldsPerRecord is positive, Read requires each record to
+ // have the given number of fields. If FieldsPerRecord is 0, Read sets it to
+ // the number of fields in the first record, so that future records must
+ // have the same field count. If FieldsPerRecord is negative, no check is
+ // made and records may have a variable number of fields.
+ FieldsPerRecord int
+
+ // If LazyQuotes is true, a quote may appear in an unquoted field and a
+ // non-doubled quote may appear in a quoted field.
+ LazyQuotes bool
+
+ // If TrimLeadingSpace is true, leading white space in a field is ignored.
+ // This is done even if the field delimiter, Comma, is white space.
+ TrimLeadingSpace bool
+
+ // ReuseRecord controls whether calls to Read may return a slice sharing
+ // the backing array of the previous call's returned slice for performance.
+ // By default, each call to Read returns newly allocated memory owned by the caller.
+ ReuseRecord bool
+
+ TrailingComma bool // Deprecated: No longer used.
+
+ r *bufio.Reader
+
+ // numLine is the current line being read in the CSV file.
+ numLine int
+
+ // offset is the input stream byte offset of the current reader position.
+ offset int64
+
+ // rawBuffer is a line buffer only used by the readLine method.
+ rawBuffer []byte
+
+ // recordBuffer holds the unescaped fields, one after another.
+ // The fields can be accessed by using the indexes in fieldIndexes.
+ // E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
+ // and fieldIndexes will contain the indexes [1, 2, 5, 6].
+ recordBuffer []byte
+
+ // fieldIndexes is an index of fields inside recordBuffer.
+ // The i'th field ends at offset fieldIndexes[i] in recordBuffer.
+ fieldIndexes []int
+
+ // fieldPositions is an index of field positions for the
+ // last record returned by Read.
+ fieldPositions []position
+
+ // lastRecord is a record cache and only used when ReuseRecord == true.
+ lastRecord []string
+}
+
+// NewReader returns a new Reader that reads from r.
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ Comma: ',',
+ r: bufio.NewReader(r),
+ }
+}
+
+// Read reads one record (a slice of fields) from r.
+// If the record has an unexpected number of fields,
+// Read returns the record along with the error ErrFieldCount.
+// Except for that case, Read always returns either a non-nil
+// record or a non-nil error, but not both.
+// If there is no data left to be read, Read returns nil, io.EOF.
+// If ReuseRecord is true, the returned slice may be shared
+// between multiple calls to Read.
+func (r *Reader) Read() (record []string, err error) {
+ if r.ReuseRecord {
+ record, err = r.readRecord(r.lastRecord)
+ r.lastRecord = record
+ } else {
+ record, err = r.readRecord(nil)
+ }
+ return record, err
+}
+
+// FieldPos returns the line and column corresponding to
+// the start of the field with the given index in the slice most recently
+// returned by Read. Numbering of lines and columns starts at 1;
+// columns are counted in bytes, not runes.
+//
+// If this is called with an out-of-bounds index, it panics.
+func (r *Reader) FieldPos(field int) (line, column int) {
+ if field < 0 || field >= len(r.fieldPositions) {
+ panic("out of range index passed to FieldPos")
+ }
+ p := &r.fieldPositions[field]
+ return p.line, p.col
+}
+
+// InputOffset returns the input stream byte offset of the current reader
+// position. The offset gives the location of the end of the most recently
+// read row and the beginning of the next row.
+func (r *Reader) InputOffset() int64 {
+ return r.offset
+}
+
+// pos holds the position of a field in the current line.
+type position struct {
+ line, col int
+}
+
+// ReadAll reads all the remaining records from r.
+// Each record is a slice of fields.
+// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
+// defined to read until EOF, it does not treat end of file as an error to be
+// reported.
+func (r *Reader) ReadAll() (records [][]string, err error) {
+ for {
+ record, err := r.readRecord(nil)
+ if err == io.EOF {
+ return records, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ records = append(records, record)
+ }
+}
+
+// readLine reads the next line (with the trailing endline).
+// If EOF is hit without a trailing endline, it will be omitted.
+// If some bytes were read, then the error is never io.EOF.
+// The result is only valid until the next call to readLine.
+func (r *Reader) readLine() ([]byte, error) {
+ line, err := r.r.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ r.rawBuffer = append(r.rawBuffer[:0], line...)
+ for err == bufio.ErrBufferFull {
+ line, err = r.r.ReadSlice('\n')
+ r.rawBuffer = append(r.rawBuffer, line...)
+ }
+ line = r.rawBuffer
+ }
+ readSize := len(line)
+ if readSize > 0 && err == io.EOF {
+ err = nil
+ // For backwards compatibility, drop trailing \r before EOF.
+ if line[readSize-1] == '\r' {
+ line = line[:readSize-1]
+ }
+ }
+ r.numLine++
+ r.offset += int64(readSize)
+ // Normalize \r\n to \n on all input lines.
+ if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' {
+ line[n-2] = '\n'
+ line = line[:n-1]
+ }
+ return line, err
+}
+
+// lengthNL reports the number of bytes for the trailing \n.
+func lengthNL(b []byte) int {
+ if len(b) > 0 && b[len(b)-1] == '\n' {
+ return 1
+ }
+ return 0
+}
+
+// nextRune returns the next rune in b or utf8.RuneError.
+func nextRune(b []byte) rune {
+ r, _ := utf8.DecodeRune(b)
+ return r
+}
+
+func (r *Reader) readRecord(dst []string) ([]string, error) {
+ if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) {
+ return nil, errInvalidDelim
+ }
+
+ // Read line (automatically skipping past empty lines and any comments).
+ var line []byte
+ var errRead error
+ for errRead == nil {
+ line, errRead = r.readLine()
+ if r.Comment != 0 && nextRune(line) == r.Comment {
+ line = nil
+ continue // Skip comment lines
+ }
+ if errRead == nil && len(line) == lengthNL(line) {
+ line = nil
+ continue // Skip empty lines
+ }
+ break
+ }
+ if errRead == io.EOF {
+ return nil, errRead
+ }
+
+ // Parse each field in the record.
+ var err error
+ const quoteLen = len(`"`)
+ commaLen := utf8.RuneLen(r.Comma)
+ recLine := r.numLine // Starting line for record
+ r.recordBuffer = r.recordBuffer[:0]
+ r.fieldIndexes = r.fieldIndexes[:0]
+ r.fieldPositions = r.fieldPositions[:0]
+ pos := position{line: r.numLine, col: 1}
+parseField:
+ for {
+ if r.TrimLeadingSpace {
+ i := bytes.IndexFunc(line, func(r rune) bool {
+ return !unicode.IsSpace(r)
+ })
+ if i < 0 {
+ i = len(line)
+ pos.col -= lengthNL(line)
+ }
+ line = line[i:]
+ pos.col += i
+ }
+ if len(line) == 0 || line[0] != '"' {
+ // Non-quoted string field
+ i := bytes.IndexRune(line, r.Comma)
+ field := line
+ if i >= 0 {
+ field = field[:i]
+ } else {
+ field = field[:len(field)-lengthNL(field)]
+ }
+ // Check to make sure a quote does not appear in field.
+ if !r.LazyQuotes {
+ if j := bytes.IndexByte(field, '"'); j >= 0 {
+ col := pos.col + j
+ err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote}
+ break parseField
+ }
+ }
+ r.recordBuffer = append(r.recordBuffer, field...)
+ r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
+ r.fieldPositions = append(r.fieldPositions, pos)
+ if i >= 0 {
+ line = line[i+commaLen:]
+ pos.col += i + commaLen
+ continue parseField
+ }
+ break parseField
+ } else {
+ // Quoted string field
+ fieldPos := pos
+ line = line[quoteLen:]
+ pos.col += quoteLen
+ for {
+ i := bytes.IndexByte(line, '"')
+ if i >= 0 {
+ // Hit next quote.
+ r.recordBuffer = append(r.recordBuffer, line[:i]...)
+ line = line[i+quoteLen:]
+ pos.col += i + quoteLen
+ switch rn := nextRune(line); {
+ case rn == '"':
+ // `""` sequence (append quote).
+ r.recordBuffer = append(r.recordBuffer, '"')
+ line = line[quoteLen:]
+ pos.col += quoteLen
+ case rn == r.Comma:
+ // `",` sequence (end of field).
+ line = line[commaLen:]
+ pos.col += commaLen
+ r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
+ r.fieldPositions = append(r.fieldPositions, fieldPos)
+ continue parseField
+ case lengthNL(line) == len(line):
+ // `"\n` sequence (end of line).
+ r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
+ r.fieldPositions = append(r.fieldPositions, fieldPos)
+ break parseField
+ case r.LazyQuotes:
+ // `"` sequence (bare quote).
+ r.recordBuffer = append(r.recordBuffer, '"')
+ default:
+ // `"*` sequence (invalid non-escaped quote).
+ err = &ParseError{StartLine: recLine, Line: r.numLine, Column: pos.col - quoteLen, Err: ErrQuote}
+ break parseField
+ }
+ } else if len(line) > 0 {
+ // Hit end of line (copy all data so far).
+ r.recordBuffer = append(r.recordBuffer, line...)
+ if errRead != nil {
+ break parseField
+ }
+ pos.col += len(line)
+ line, errRead = r.readLine()
+ if len(line) > 0 {
+ pos.line++
+ pos.col = 1
+ }
+ if errRead == io.EOF {
+ errRead = nil
+ }
+ } else {
+ // Abrupt end of file (EOF or error).
+ if !r.LazyQuotes && errRead == nil {
+ err = &ParseError{StartLine: recLine, Line: pos.line, Column: pos.col, Err: ErrQuote}
+ break parseField
+ }
+ r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
+ r.fieldPositions = append(r.fieldPositions, fieldPos)
+ break parseField
+ }
+ }
+ }
+ }
+ if err == nil {
+ err = errRead
+ }
+
+ // Create a single string and create slices out of it.
+ // This pins the memory of the fields together, but allocates once.
+ str := string(r.recordBuffer) // Convert to string once to batch allocations
+ dst = dst[:0]
+ if cap(dst) < len(r.fieldIndexes) {
+ dst = make([]string, len(r.fieldIndexes))
+ }
+ dst = dst[:len(r.fieldIndexes)]
+ var preIdx int
+ for i, idx := range r.fieldIndexes {
+ dst[i] = str[preIdx:idx]
+ preIdx = idx
+ }
+
+ // Check or update the expected fields per record.
+ if r.FieldsPerRecord > 0 {
+ if len(dst) != r.FieldsPerRecord && err == nil {
+ err = &ParseError{
+ StartLine: recLine,
+ Line: recLine,
+ Column: 1,
+ Err: ErrFieldCount,
+ }
+ }
+ } else if r.FieldsPerRecord == 0 {
+ r.FieldsPerRecord = len(dst)
+ }
+ return dst, err
+}
diff --git a/contrib/go/_std_1.18/src/encoding/csv/writer.go b/contrib/go/_std_1.19/src/encoding/csv/writer.go
index ac64b4d54c..ac64b4d54c 100644
--- a/contrib/go/_std_1.18/src/encoding/csv/writer.go
+++ b/contrib/go/_std_1.19/src/encoding/csv/writer.go
diff --git a/contrib/go/_std_1.19/src/encoding/encoding.go b/contrib/go/_std_1.19/src/encoding/encoding.go
new file mode 100644
index 0000000000..cc5a536996
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/encoding.go
@@ -0,0 +1,48 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package encoding defines interfaces shared by other packages that
+// convert data to and from byte-level and textual representations.
+// Packages that check for these interfaces include encoding/gob,
+// encoding/json, and encoding/xml. As a result, implementing an
+// interface once can make a type useful in multiple encodings.
+// Standard types that implement these interfaces include time.Time and net.IP.
+// The interfaces come in pairs that produce and consume encoded data.
+package encoding
+
+// BinaryMarshaler is the interface implemented by an object that can
+// marshal itself into a binary form.
+//
+// MarshalBinary encodes the receiver into a binary form and returns the result.
+type BinaryMarshaler interface {
+ MarshalBinary() (data []byte, err error)
+}
+
+// BinaryUnmarshaler is the interface implemented by an object that can
+// unmarshal a binary representation of itself.
+//
+// UnmarshalBinary must be able to decode the form generated by MarshalBinary.
+// UnmarshalBinary must copy the data if it wishes to retain the data
+// after returning.
+type BinaryUnmarshaler interface {
+ UnmarshalBinary(data []byte) error
+}
+
+// TextMarshaler is the interface implemented by an object that can
+// marshal itself into a textual form.
+//
+// MarshalText encodes the receiver into UTF-8-encoded text and returns the result.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is the interface implemented by an object that can
+// unmarshal a textual representation of itself.
+//
+// UnmarshalText must be able to decode the form generated by MarshalText.
+// UnmarshalText must copy the text if it wishes to retain the text
+// after returning.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/contrib/go/_std_1.19/src/encoding/hex/hex.go b/contrib/go/_std_1.19/src/encoding/hex/hex.go
new file mode 100644
index 0000000000..375f583170
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/hex/hex.go
@@ -0,0 +1,335 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hex implements hexadecimal encoding and decoding.
+package hex
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+const (
+ hextable = "0123456789abcdef"
+ reverseHexTable = "" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" +
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+)
+
+// EncodedLen returns the length of an encoding of n source bytes.
+// Specifically, it returns n * 2.
+func EncodedLen(n int) int { return n * 2 }
+
+// Encode encodes src into EncodedLen(len(src))
+// bytes of dst. As a convenience, it returns the number
+// of bytes written to dst, but this value is always EncodedLen(len(src)).
+// Encode implements hexadecimal encoding.
+func Encode(dst, src []byte) int {
+ j := 0
+ for _, v := range src {
+ dst[j] = hextable[v>>4]
+ dst[j+1] = hextable[v&0x0f]
+ j += 2
+ }
+ return len(src) * 2
+}
+
+// ErrLength reports an attempt to decode an odd-length input
+// using Decode or DecodeString.
+// The stream-based Decoder returns io.ErrUnexpectedEOF instead of ErrLength.
+var ErrLength = errors.New("encoding/hex: odd length hex string")
+
+// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
+type InvalidByteError byte
+
+func (e InvalidByteError) Error() string {
+ return fmt.Sprintf("encoding/hex: invalid byte: %#U", rune(e))
+}
+
+// DecodedLen returns the length of a decoding of x source bytes.
+// Specifically, it returns x / 2.
+func DecodedLen(x int) int { return x / 2 }
+
+// Decode decodes src into DecodedLen(len(src)) bytes,
+// returning the actual number of bytes written to dst.
+//
+// Decode expects that src contains only hexadecimal
+// characters and that src has even length.
+// If the input is malformed, Decode returns the number
+// of bytes decoded before the error.
+func Decode(dst, src []byte) (int, error) {
+ i, j := 0, 1
+ for ; j < len(src); j += 2 {
+ p := src[j-1]
+ q := src[j]
+
+ a := reverseHexTable[p]
+ b := reverseHexTable[q]
+ if a > 0x0f {
+ return i, InvalidByteError(p)
+ }
+ if b > 0x0f {
+ return i, InvalidByteError(q)
+ }
+ dst[i] = (a << 4) | b
+ i++
+ }
+ if len(src)%2 == 1 {
+ // Check for invalid char before reporting bad length,
+ // since the invalid char (if present) is an earlier problem.
+ if reverseHexTable[src[j-1]] > 0x0f {
+ return i, InvalidByteError(src[j-1])
+ }
+ return i, ErrLength
+ }
+ return i, nil
+}
+
+// EncodeToString returns the hexadecimal encoding of src.
+func EncodeToString(src []byte) string {
+ dst := make([]byte, EncodedLen(len(src)))
+ Encode(dst, src)
+ return string(dst)
+}
+
+// DecodeString returns the bytes represented by the hexadecimal string s.
+//
+// DecodeString expects that src contains only hexadecimal
+// characters and that src has even length.
+// If the input is malformed, DecodeString returns
+// the bytes decoded before the error.
+func DecodeString(s string) ([]byte, error) {
+ src := []byte(s)
+ // We can use the source slice itself as the destination
+ // because the decode loop increments by one and then the 'seen' byte is not used anymore.
+ n, err := Decode(src, src)
+ return src[:n], err
+}
+
+// Dump returns a string that contains a hex dump of the given data. The format
+// of the hex dump matches the output of `hexdump -C` on the command line.
+func Dump(data []byte) string {
+ if len(data) == 0 {
+ return ""
+ }
+
+ var buf strings.Builder
+ // Dumper will write 79 bytes per complete 16 byte chunk, and at least
+ // 64 bytes for whatever remains. Round the allocation up, since only a
+ // maximum of 15 bytes will be wasted.
+ buf.Grow((1 + ((len(data) - 1) / 16)) * 79)
+
+ dumper := Dumper(&buf)
+ dumper.Write(data)
+ dumper.Close()
+ return buf.String()
+}
+
+// bufferSize is the number of hexadecimal characters to buffer in encoder and decoder.
+const bufferSize = 1024
+
+type encoder struct {
+ w io.Writer
+ err error
+ out [bufferSize]byte // output buffer
+}
+
+// NewEncoder returns an io.Writer that writes lowercase hexadecimal characters to w.
+func NewEncoder(w io.Writer) io.Writer {
+ return &encoder{w: w}
+}
+
+func (e *encoder) Write(p []byte) (n int, err error) {
+ for len(p) > 0 && e.err == nil {
+ chunkSize := bufferSize / 2
+ if len(p) < chunkSize {
+ chunkSize = len(p)
+ }
+
+ var written int
+ encoded := Encode(e.out[:], p[:chunkSize])
+ written, e.err = e.w.Write(e.out[:encoded])
+ n += written / 2
+ p = p[chunkSize:]
+ }
+ return n, e.err
+}
+
+type decoder struct {
+ r io.Reader
+ err error
+ in []byte // input buffer (encoded form)
+ arr [bufferSize]byte // backing array for in
+}
+
+// NewDecoder returns an io.Reader that decodes hexadecimal characters from r.
+// NewDecoder expects that r contain only an even number of hexadecimal characters.
+func NewDecoder(r io.Reader) io.Reader {
+ return &decoder{r: r}
+}
+
+func (d *decoder) Read(p []byte) (n int, err error) {
+ // Fill internal buffer with sufficient bytes to decode
+ if len(d.in) < 2 && d.err == nil {
+ var numCopy, numRead int
+ numCopy = copy(d.arr[:], d.in) // Copies either 0 or 1 bytes
+ numRead, d.err = d.r.Read(d.arr[numCopy:])
+ d.in = d.arr[:numCopy+numRead]
+ if d.err == io.EOF && len(d.in)%2 != 0 {
+
+ if a := reverseHexTable[d.in[len(d.in)-1]]; a > 0x0f {
+ d.err = InvalidByteError(d.in[len(d.in)-1])
+ } else {
+ d.err = io.ErrUnexpectedEOF
+ }
+ }
+ }
+
+ // Decode internal buffer into output buffer
+ if numAvail := len(d.in) / 2; len(p) > numAvail {
+ p = p[:numAvail]
+ }
+ numDec, err := Decode(p, d.in[:len(p)*2])
+ d.in = d.in[2*numDec:]
+ if err != nil {
+ d.in, d.err = nil, err // Decode error; discard input remainder
+ }
+
+ if len(d.in) < 2 {
+ return numDec, d.err // Only expose errors when buffer fully consumed
+ }
+ return numDec, nil
+}
+
+// Dumper returns a WriteCloser that writes a hex dump of all written data to
+// w. The format of the dump matches the output of `hexdump -C` on the command
+// line.
+func Dumper(w io.Writer) io.WriteCloser {
+ return &dumper{w: w}
+}
+
+type dumper struct {
+ w io.Writer
+ rightChars [18]byte
+ buf [14]byte
+ used int // number of bytes in the current line
+ n uint // number of bytes, total
+ closed bool
+}
+
+func toChar(b byte) byte {
+ if b < 32 || b > 126 {
+ return '.'
+ }
+ return b
+}
+
+func (h *dumper) Write(data []byte) (n int, err error) {
+ if h.closed {
+ return 0, errors.New("encoding/hex: dumper closed")
+ }
+
+ // Output lines look like:
+ // 00000010 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d |./0123456789:;<=|
+ // ^ offset ^ extra space ^ ASCII of line.
+ for i := range data {
+ if h.used == 0 {
+ // At the beginning of a line we print the current
+ // offset in hex.
+ h.buf[0] = byte(h.n >> 24)
+ h.buf[1] = byte(h.n >> 16)
+ h.buf[2] = byte(h.n >> 8)
+ h.buf[3] = byte(h.n)
+ Encode(h.buf[4:], h.buf[:4])
+ h.buf[12] = ' '
+ h.buf[13] = ' '
+ _, err = h.w.Write(h.buf[4:])
+ if err != nil {
+ return
+ }
+ }
+ Encode(h.buf[:], data[i:i+1])
+ h.buf[2] = ' '
+ l := 3
+ if h.used == 7 {
+ // There's an additional space after the 8th byte.
+ h.buf[3] = ' '
+ l = 4
+ } else if h.used == 15 {
+ // At the end of the line there's an extra space and
+ // the bar for the right column.
+ h.buf[3] = ' '
+ h.buf[4] = '|'
+ l = 5
+ }
+ _, err = h.w.Write(h.buf[:l])
+ if err != nil {
+ return
+ }
+ n++
+ h.rightChars[h.used] = toChar(data[i])
+ h.used++
+ h.n++
+ if h.used == 16 {
+ h.rightChars[16] = '|'
+ h.rightChars[17] = '\n'
+ _, err = h.w.Write(h.rightChars[:])
+ if err != nil {
+ return
+ }
+ h.used = 0
+ }
+ }
+ return
+}
+
+func (h *dumper) Close() (err error) {
+ // See the comments in Write() for the details of this format.
+ if h.closed {
+ return
+ }
+ h.closed = true
+ if h.used == 0 {
+ return
+ }
+ h.buf[0] = ' '
+ h.buf[1] = ' '
+ h.buf[2] = ' '
+ h.buf[3] = ' '
+ h.buf[4] = '|'
+ nBytes := h.used
+ for h.used < 16 {
+ l := 3
+ if h.used == 7 {
+ l = 4
+ } else if h.used == 15 {
+ l = 5
+ }
+ _, err = h.w.Write(h.buf[:l])
+ if err != nil {
+ return
+ }
+ h.used++
+ }
+ h.rightChars[nBytes] = '|'
+ h.rightChars[nBytes+1] = '\n'
+ _, err = h.w.Write(h.rightChars[:nBytes+2])
+ return
+}
diff --git a/contrib/go/_std_1.19/src/encoding/pem/pem.go b/contrib/go/_std_1.19/src/encoding/pem/pem.go
new file mode 100644
index 0000000000..d26e4c8399
--- /dev/null
+++ b/contrib/go/_std_1.19/src/encoding/pem/pem.go
@@ -0,0 +1,316 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pem implements the PEM data encoding, which originated in Privacy
+// Enhanced Mail. The most common use of PEM encoding today is in TLS keys and
+// certificates. See RFC 1421.
+package pem
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "io"
+ "sort"
+ "strings"
+)
+
+// A Block represents a PEM encoded structure.
+//
+// The encoded form is:
+//
+// -----BEGIN Type-----
+// Headers
+// base64-encoded Bytes
+// -----END Type-----
+//
+// where Headers is a possibly empty sequence of Key: Value lines.
+type Block struct {
+ Type string // The type, taken from the preamble (i.e. "RSA PRIVATE KEY").
+ Headers map[string]string // Optional headers.
+ Bytes []byte // The decoded bytes of the contents. Typically a DER encoded ASN.1 structure.
+}
+
+// getLine results the first \r\n or \n delineated line from the given byte
+// array. The line does not include trailing whitespace or the trailing new
+// line bytes. The remainder of the byte array (also not including the new line
+// bytes) is also returned and this will always be smaller than the original
+// argument.
+func getLine(data []byte) (line, rest []byte) {
+ i := bytes.IndexByte(data, '\n')
+ var j int
+ if i < 0 {
+ i = len(data)
+ j = i
+ } else {
+ j = i + 1
+ if i > 0 && data[i-1] == '\r' {
+ i--
+ }
+ }
+ return bytes.TrimRight(data[0:i], " \t"), data[j:]
+}
+
+// removeSpacesAndTabs returns a copy of its input with all spaces and tabs
+// removed, if there were any. Otherwise, the input is returned unchanged.
+//
+// The base64 decoder already skips newline characters, so we don't need to
+// filter them out here.
+func removeSpacesAndTabs(data []byte) []byte {
+ if !bytes.ContainsAny(data, " \t") {
+ // Fast path; most base64 data within PEM contains newlines, but
+ // no spaces nor tabs. Skip the extra alloc and work.
+ return data
+ }
+ result := make([]byte, len(data))
+ n := 0
+
+ for _, b := range data {
+ if b == ' ' || b == '\t' {
+ continue
+ }
+ result[n] = b
+ n++
+ }
+
+ return result[0:n]
+}
+
+var pemStart = []byte("\n-----BEGIN ")
+var pemEnd = []byte("\n-----END ")
+var pemEndOfLine = []byte("-----")
+var colon = []byte(":")
+
+// Decode will find the next PEM formatted block (certificate, private key
+// etc) in the input. It returns that block and the remainder of the input. If
+// no PEM data is found, p is nil and the whole of the input is returned in
+// rest.
+func Decode(data []byte) (p *Block, rest []byte) {
+ // pemStart begins with a newline. However, at the very beginning of
+ // the byte array, we'll accept the start string without it.
+ rest = data
+ for {
+ if bytes.HasPrefix(rest, pemStart[1:]) {
+ rest = rest[len(pemStart)-1:]
+ } else if _, after, ok := bytes.Cut(rest, pemStart); ok {
+ rest = after
+ } else {
+ return nil, data
+ }
+
+ var typeLine []byte
+ typeLine, rest = getLine(rest)
+ if !bytes.HasSuffix(typeLine, pemEndOfLine) {
+ continue
+ }
+ typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
+
+ p = &Block{
+ Headers: make(map[string]string),
+ Type: string(typeLine),
+ }
+
+ for {
+ // This loop terminates because getLine's second result is
+ // always smaller than its argument.
+ if len(rest) == 0 {
+ return nil, data
+ }
+ line, next := getLine(rest)
+
+ key, val, ok := bytes.Cut(line, colon)
+ if !ok {
+ break
+ }
+
+ // TODO(agl): need to cope with values that spread across lines.
+ key = bytes.TrimSpace(key)
+ val = bytes.TrimSpace(val)
+ p.Headers[string(key)] = string(val)
+ rest = next
+ }
+
+ var endIndex, endTrailerIndex int
+
+ // If there were no headers, the END line might occur
+ // immediately, without a leading newline.
+ if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
+ endIndex = 0
+ endTrailerIndex = len(pemEnd) - 1
+ } else {
+ endIndex = bytes.Index(rest, pemEnd)
+ endTrailerIndex = endIndex + len(pemEnd)
+ }
+
+ if endIndex < 0 {
+ continue
+ }
+
+ // After the "-----" of the ending line, there should be the same type
+ // and then a final five dashes.
+ endTrailer := rest[endTrailerIndex:]
+ endTrailerLen := len(typeLine) + len(pemEndOfLine)
+ if len(endTrailer) < endTrailerLen {
+ continue
+ }
+
+ restOfEndLine := endTrailer[endTrailerLen:]
+ endTrailer = endTrailer[:endTrailerLen]
+ if !bytes.HasPrefix(endTrailer, typeLine) ||
+ !bytes.HasSuffix(endTrailer, pemEndOfLine) {
+ continue
+ }
+
+ // The line must end with only whitespace.
+ if s, _ := getLine(restOfEndLine); len(s) != 0 {
+ continue
+ }
+
+ base64Data := removeSpacesAndTabs(rest[:endIndex])
+ p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
+ n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
+ if err != nil {
+ continue
+ }
+ p.Bytes = p.Bytes[:n]
+
+ // the -1 is because we might have only matched pemEnd without the
+ // leading newline if the PEM block was empty.
+ _, rest = getLine(rest[endIndex+len(pemEnd)-1:])
+ return p, rest
+ }
+}
+
+const pemLineLength = 64
+
+type lineBreaker struct {
+ line [pemLineLength]byte
+ used int
+ out io.Writer
+}
+
+var nl = []byte{'\n'}
+
+func (l *lineBreaker) Write(b []byte) (n int, err error) {
+ if l.used+len(b) < pemLineLength {
+ copy(l.line[l.used:], b)
+ l.used += len(b)
+ return len(b), nil
+ }
+
+ n, err = l.out.Write(l.line[0:l.used])
+ if err != nil {
+ return
+ }
+ excess := pemLineLength - l.used
+ l.used = 0
+
+ n, err = l.out.Write(b[0:excess])
+ if err != nil {
+ return
+ }
+
+ n, err = l.out.Write(nl)
+ if err != nil {
+ return
+ }
+
+ return l.Write(b[excess:])
+}
+
+func (l *lineBreaker) Close() (err error) {
+ if l.used > 0 {
+ _, err = l.out.Write(l.line[0:l.used])
+ if err != nil {
+ return
+ }
+ _, err = l.out.Write(nl)
+ }
+
+ return
+}
+
+func writeHeader(out io.Writer, k, v string) error {
+ _, err := out.Write([]byte(k + ": " + v + "\n"))
+ return err
+}
+
+// Encode writes the PEM encoding of b to out.
+func Encode(out io.Writer, b *Block) error {
+ // Check for invalid block before writing any output.
+ for k := range b.Headers {
+ if strings.Contains(k, ":") {
+ return errors.New("pem: cannot encode a header key that contains a colon")
+ }
+ }
+
+ // All errors below are relayed from underlying io.Writer,
+ // so it is now safe to write data.
+
+ if _, err := out.Write(pemStart[1:]); err != nil {
+ return err
+ }
+ if _, err := out.Write([]byte(b.Type + "-----\n")); err != nil {
+ return err
+ }
+
+ if len(b.Headers) > 0 {
+ const procType = "Proc-Type"
+ h := make([]string, 0, len(b.Headers))
+ hasProcType := false
+ for k := range b.Headers {
+ if k == procType {
+ hasProcType = true
+ continue
+ }
+ h = append(h, k)
+ }
+ // The Proc-Type header must be written first.
+ // See RFC 1421, section 4.6.1.1
+ if hasProcType {
+ if err := writeHeader(out, procType, b.Headers[procType]); err != nil {
+ return err
+ }
+ }
+ // For consistency of output, write other headers sorted by key.
+ sort.Strings(h)
+ for _, k := range h {
+ if err := writeHeader(out, k, b.Headers[k]); err != nil {
+ return err
+ }
+ }
+ if _, err := out.Write(nl); err != nil {
+ return err
+ }
+ }
+
+ var breaker lineBreaker
+ breaker.out = out
+
+ b64 := base64.NewEncoder(base64.StdEncoding, &breaker)
+ if _, err := b64.Write(b.Bytes); err != nil {
+ return err
+ }
+ b64.Close()
+ breaker.Close()
+
+ if _, err := out.Write(pemEnd[1:]); err != nil {
+ return err
+ }
+ _, err := out.Write([]byte(b.Type + "-----\n"))
+ return err
+}
+
+// EncodeToMemory returns the PEM encoding of b.
+//
+// If b has invalid headers and cannot be encoded,
+// EncodeToMemory returns nil. If it is important to
+// report details about this error case, use Encode instead.
+func EncodeToMemory(b *Block) []byte {
+ var buf bytes.Buffer
+ if err := Encode(&buf, b); err != nil {
+ return nil
+ }
+ return buf.Bytes()
+}
diff --git a/contrib/go/_std_1.18/src/errors/errors.go b/contrib/go/_std_1.19/src/errors/errors.go
index f2fabacd4e..f2fabacd4e 100644
--- a/contrib/go/_std_1.18/src/errors/errors.go
+++ b/contrib/go/_std_1.19/src/errors/errors.go
diff --git a/contrib/go/_std_1.18/src/errors/wrap.go b/contrib/go/_std_1.19/src/errors/wrap.go
index 263ae16b48..263ae16b48 100644
--- a/contrib/go/_std_1.18/src/errors/wrap.go
+++ b/contrib/go/_std_1.19/src/errors/wrap.go
diff --git a/contrib/go/_std_1.19/src/flag/flag.go b/contrib/go/_std_1.19/src/flag/flag.go
new file mode 100644
index 0000000000..9abf8d769e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/flag/flag.go
@@ -0,0 +1,1180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package flag implements command-line flag parsing.
+
+# Usage
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -n, stored in the pointer nFlag, with type *int:
+
+ import "flag"
+ var nFlag = flag.Int("n", 1234, "help message for flag n")
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+ flag.Var(&flagVal, "name", "help message for flagname")
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+ flag.Parse()
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments following the flags are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+# Command line flag syntax
+
+The following forms are permitted:
+
+ -flag
+ --flag // double dashes are also permitted
+ -flag=x
+ -flag x // non-boolean flags only
+
+One or two dashes may be used; they are equivalent.
+The last form is not permitted for boolean flags because the
+meaning of the command
+
+ cmd -x *
+
+where * is a Unix shell wildcard, will change if there is a file
+called 0, false, etc. You must use the -flag=false form to turn
+off a boolean flag.
+
+Flag parsing stops just before the first non-flag argument
+("-" is a non-flag argument) or after the terminator "--".
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags may be:
+
+ 1, 0, t, f, T, F, true, false, TRUE, FALSE, True, False
+
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package flag
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// ErrHelp is the error returned if the -help or -h flag is invoked
+// but no such flag is defined.
+var ErrHelp = errors.New("flag: help requested")
+
+// errParse is returned by Set if a flag's value fails to parse, such as with an invalid integer for Int.
+// It then gets wrapped through failf to provide more information.
+var errParse = errors.New("parse error")
+
+// errRange is returned by Set if a flag's value is out of range.
+// It then gets wrapped through failf to provide more information.
+var errRange = errors.New("value out of range")
+
+func numError(err error) error {
+ ne, ok := err.(*strconv.NumError)
+ if !ok {
+ return err
+ }
+ if ne.Err == strconv.ErrSyntax {
+ return errParse
+ }
+ if ne.Err == strconv.ErrRange {
+ return errRange
+ }
+ return err
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ if err != nil {
+ err = errParse
+ }
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Get() any { return bool(*b) }
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, strconv.IntSize)
+ if err != nil {
+ err = numError(err)
+ }
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Get() any { return int(*i) }
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ if err != nil {
+ err = numError(err)
+ }
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Get() any { return int64(*i) }
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, strconv.IntSize)
+ if err != nil {
+ err = numError(err)
+ }
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Get() any { return uint(*i) }
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err != nil {
+ err = numError(err)
+ }
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Get() any { return uint64(*i) }
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+
+func (s *stringValue) Get() any { return string(*s) }
+
+func (s *stringValue) String() string { return string(*s) }
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ err = numError(err)
+ }
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Get() any { return float64(*f) }
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ err = errParse
+ }
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Get() any { return time.Duration(*d) }
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+// -- encoding.TextUnmarshaler Value
+type textValue struct{ p encoding.TextUnmarshaler }
+
+func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue {
+ ptrVal := reflect.ValueOf(p)
+ if ptrVal.Kind() != reflect.Ptr {
+ panic("variable value type must be a pointer")
+ }
+ defVal := reflect.ValueOf(val)
+ if defVal.Kind() == reflect.Ptr {
+ defVal = defVal.Elem()
+ }
+ if defVal.Type() != ptrVal.Type().Elem() {
+ panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem()))
+ }
+ ptrVal.Elem().Set(defVal)
+ return textValue{p}
+}
+
+func (v textValue) Set(s string) error {
+ return v.p.UnmarshalText([]byte(s))
+}
+
+func (v textValue) Get() interface{} {
+ return v.p
+}
+
+func (v textValue) String() string {
+ if m, ok := v.p.(encoding.TextMarshaler); ok {
+ if b, err := m.MarshalText(); err == nil {
+ return string(b)
+ }
+ }
+ return ""
+}
+
+// -- func Value
+type funcValue func(string) error
+
+func (f funcValue) Set(s string) error { return f(s) }
+
+func (f funcValue) String() string { return "" }
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+//
+// If a Value has an IsBoolFlag() bool method returning true,
+// the command-line parser makes -name equivalent to -name=true
+// rather than using the next command-line argument.
+//
+// Set is called once, in command line order, for each flag present.
+// The flag package may call the String method with a zero-valued receiver,
+// such as a nil pointer.
+type Value interface {
+ String() string
+ Set(string) error
+}
+
+// Getter is an interface that allows the contents of a Value to be retrieved.
+// It wraps the Value interface, rather than being part of it, because it
+// appeared after Go 1 and its compatibility rules. All Value types provided
+// by this package satisfy the Getter interface, except the type used by Func.
+type Getter interface {
+ Value
+ Get() any
+}
+
+// ErrorHandling defines how FlagSet.Parse behaves if the parse fails.
+type ErrorHandling int
+
+// These constants cause FlagSet.Parse to behave as described if the parse fails.
+const (
+ ContinueOnError ErrorHandling = iota // Return a descriptive error.
+ ExitOnError // Call os.Exit(2) or for -h/-help Exit(0).
+ PanicOnError // Call panic with a descriptive error.
+)
+
+// A FlagSet represents a set of defined flags. The zero value of a FlagSet
+// has no name and has ContinueOnError error handling.
+//
+// Flag names must be unique within a FlagSet. An attempt to define a flag whose
+// name is already in use will cause a panic.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler. What happens after Usage is called depends
+ // on the ErrorHandling setting; for the command line, this defaults
+ // to ExitOnError, which exits the program after calling Usage.
+ Usage func()
+
+ name string
+ parsed bool
+ actual map[string]*Flag
+ formal map[string]*Flag
+ args []string // arguments after flags
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use Output() accessor
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[string]*Flag) []*Flag {
+ result := make([]*Flag, len(flags))
+ i := 0
+ for _, f := range flags {
+ result[i] = f
+ i++
+ }
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].Name < result[j].Name
+ })
+ return result
+}
+
+// Output returns the destination for usage and error messages. os.Stderr is returned if
+// output was not set or was set to nil.
+func (f *FlagSet) Output() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// Name returns the name of the flag set.
+func (f *FlagSet) Name() string {
+ return f.name
+}
+
+// ErrorHandling returns the error handling behavior of the flag set.
+func (f *FlagSet) ErrorHandling() ErrorHandling {
+ return f.errorHandling
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.formal) {
+ fn(flag)
+ }
+}
+
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(f.actual) {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.formal[name]
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.formal[name]
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ flag, ok := f.formal[name]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ err := flag.Value.Set(value)
+ if err != nil {
+ return err
+ }
+ if f.actual == nil {
+ f.actual = make(map[string]*Flag)
+ }
+ f.actual[name] = flag
+ return nil
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// isZeroValue determines whether the string represents the zero
+// value for a flag.
+func isZeroValue(flag *Flag, value string) (ok bool, err error) {
+ // Build a zero value of the flag's Value type, and see if the
+ // result of calling its String method equals the value passed in.
+ // This works unless the Value type is itself an interface type.
+ typ := reflect.TypeOf(flag.Value)
+ var z reflect.Value
+ if typ.Kind() == reflect.Pointer {
+ z = reflect.New(typ.Elem())
+ } else {
+ z = reflect.Zero(typ)
+ }
+ // Catch panics calling the String method, which shouldn't prevent the
+ // usage message from being printed, but that we should report to the
+ // user so that they know to fix their code.
+ defer func() {
+ if e := recover(); e != nil {
+ if typ.Kind() == reflect.Pointer {
+ typ = typ.Elem()
+ }
+ err = fmt.Errorf("panic calling String method on zero %v for flag %s: %v", typ, flag.Name, e)
+ }
+ }()
+ return value == z.Interface().(Value).String(), nil
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+ // No explicit name, so use type if we can find one.
+ name = "value"
+ switch flag.Value.(type) {
+ case boolFlag:
+ name = ""
+ case *durationValue:
+ name = "duration"
+ case *float64Value:
+ name = "float"
+ case *intValue, *int64Value:
+ name = "int"
+ case *stringValue:
+ name = "string"
+ case *uintValue, *uint64Value:
+ name = "uint"
+ }
+ return
+}
+
+// PrintDefaults prints, to standard error unless configured otherwise, the
+// default values of all defined command-line flags in the set. See the
+// documentation for the global function PrintDefaults for more information.
+func (f *FlagSet) PrintDefaults() {
+ var isZeroValueErrs []error
+ f.VisitAll(func(flag *Flag) {
+ var b strings.Builder
+ fmt.Fprintf(&b, " -%s", flag.Name) // Two spaces before -; see next two comments.
+ name, usage := UnquoteUsage(flag)
+ if len(name) > 0 {
+ b.WriteString(" ")
+ b.WriteString(name)
+ }
+ // Boolean flags of one ASCII letter are so common we
+ // treat them specially, putting their usage on the same line.
+ if b.Len() <= 4 { // space, space, '-', 'x'.
+ b.WriteString("\t")
+ } else {
+ // Four spaces before the tab triggers good alignment
+ // for both 4- and 8-space tab stops.
+ b.WriteString("\n \t")
+ }
+ b.WriteString(strings.ReplaceAll(usage, "\n", "\n \t"))
+
+ // Print the default value only if it differs to the zero value
+ // for this flag type.
+ if isZero, err := isZeroValue(flag, flag.DefValue); err != nil {
+ isZeroValueErrs = append(isZeroValueErrs, err)
+ } else if !isZero {
+ if _, ok := flag.Value.(*stringValue); ok {
+ // put quotes on the value
+ fmt.Fprintf(&b, " (default %q)", flag.DefValue)
+ } else {
+ fmt.Fprintf(&b, " (default %v)", flag.DefValue)
+ }
+ }
+ fmt.Fprint(f.Output(), b.String(), "\n")
+ })
+ // If calling String on any zero flag.Values triggered a panic, print
+ // the messages after the full set of defaults so that the programmer
+ // knows to fix the panic.
+ if errs := isZeroValueErrs; len(errs) > 0 {
+ fmt.Fprintln(f.Output())
+ for _, err := range errs {
+ fmt.Fprintln(f.Output(), err)
+ }
+ }
+}
+
+// PrintDefaults prints, to standard error unless configured otherwise,
+// a usage message showing the default settings of all defined
+// command-line flags.
+// For an integer valued flag x, the default output has the form
+//
+// -x int
+// usage-message-for-x (default 7)
+//
+// The usage message will appear on a separate line for anything but
+// a bool flag with a one-byte name. For bool flags, the type is
+// omitted and if the flag name is one byte the usage message appears
+// on the same line. The parenthetical default is omitted if the
+// default is the zero value for the type. The listed type, here int,
+// can be changed by placing a back-quoted name in the flag's usage
+// string; the first such item in the message is taken to be a parameter
+// name to show in the message and the back quotes are stripped from
+// the message when displayed. For instance, given
+//
+// flag.String("I", "", "search `directory` for include files")
+//
+// the output will be
+//
+// -I directory
+// search directory for include files.
+//
+// To change the destination for flag messages, call CommandLine.SetOutput.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func (f *FlagSet) defaultUsage() {
+ if f.name == "" {
+ fmt.Fprintf(f.Output(), "Usage:\n")
+ } else {
+ fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name)
+ }
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints a usage message documenting all defined command-line flags
+// to CommandLine's output, which by default is os.Stderr.
+// It is called when an error occurs while parsing flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+// Custom usage functions may choose to exit the program; by default exiting
+// happens anyway as the command line's error handling strategy is set to
+// ExitOnError.
+var Usage = func() {
+ fmt.Fprintf(CommandLine.Output(), "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed. Arg returns an empty string if the
+// requested element does not exist.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed. Arg returns an empty string if the
+// requested element does not exist.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.Var(newBoolValue(value, p), name, usage)
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ CommandLine.Var(newBoolValue(value, p), name, usage)
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVar(p, name, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return CommandLine.Bool(name, value, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.Var(newIntValue(value, p), name, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.Var(newIntValue(value, p), name, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVar(p, name, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.Int(name, value, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.Var(newInt64Value(value, p), name, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.Var(newInt64Value(value, p), name, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64Var(p, name, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64(name, value, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.Var(newUintValue(value, p), name, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.Var(newUintValue(value, p), name, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVar(p, name, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.Uint(name, value, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.Var(newUint64Value(value, p), name, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.Var(newUint64Value(value, p), name, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64Var(p, name, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64(name, value, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.Var(newStringValue(value, p), name, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.Var(newStringValue(value, p), name, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVar(p, name, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.String(name, value, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.Var(newFloat64Value(value, p), name, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.Var(newFloat64Value(value, p), name, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64Var(p, name, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64(name, value, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+// The flag accepts a value acceptable to time.ParseDuration.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.Var(newDurationValue(value, p), name, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+// The flag accepts a value acceptable to time.ParseDuration.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.Var(newDurationValue(value, p), name, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+// The flag accepts a value acceptable to time.ParseDuration.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVar(p, name, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+// The flag accepts a value acceptable to time.ParseDuration.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.Duration(name, value, usage)
+}
+
+// TextVar defines a flag with a specified name, default value, and usage string.
+// The argument p must be a pointer to a variable that will hold the value
+// of the flag, and p must implement encoding.TextUnmarshaler.
+// If the flag is used, the flag value will be passed to p's UnmarshalText method.
+// The type of the default value must be the same as the type of p.
+func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) {
+ f.Var(newTextValue(value, p), name, usage)
+}
+
+// TextVar defines a flag with a specified name, default value, and usage string.
+// The argument p must be a pointer to a variable that will hold the value
+// of the flag, and p must implement encoding.TextUnmarshaler.
+// If the flag is used, the flag value will be passed to p's UnmarshalText method.
+// The type of the default value must be the same as the type of p.
+func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) {
+ CommandLine.Var(newTextValue(value, p), name, usage)
+}
+
+// Func defines a flag with the specified name and usage string.
+// Each time the flag is seen, fn is called with the value of the flag.
+// If fn returns a non-nil error, it will be treated as a flag value parsing error.
+func (f *FlagSet) Func(name, usage string, fn func(string) error) {
+ f.Var(funcValue(fn), name, usage)
+}
+
+// Func defines a flag with the specified name and usage string.
+// Each time the flag is seen, fn is called with the value of the flag.
+// If fn returns a non-nil error, it will be treated as a flag value parsing error.
+func Func(name, usage string, fn func(string) error) {
+ CommandLine.Func(name, usage, fn)
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ // Flag must not begin "-" or contain "=".
+ if strings.HasPrefix(name, "-") {
+ panic(f.sprintf("flag %q begins with -", name))
+ } else if strings.Contains(name, "=") {
+ panic(f.sprintf("flag %q contains =", name))
+ }
+
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{name, usage, value, value.String()}
+ _, alreadythere := f.formal[name]
+ if alreadythere {
+ var msg string
+ if f.name == "" {
+ msg = f.sprintf("flag redefined: %s", name)
+ } else {
+ msg = f.sprintf("%s flag redefined: %s", f.name, name)
+ }
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[string]*Flag)
+ }
+ f.formal[name] = flag
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.Var(value, name, usage)
+}
+
+// sprintf formats the message, prints it to output, and returns it.
+func (f *FlagSet) sprintf(format string, a ...any) string {
+ msg := fmt.Sprintf(format, a...)
+ fmt.Fprintln(f.Output(), msg)
+ return msg
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...any) error {
+ msg := f.sprintf(format, a...)
+ f.usage()
+ return errors.New(msg)
+}
+
+// usage calls the Usage method for the flag set if one is specified,
+// or the appropriate default usage function otherwise.
+func (f *FlagSet) usage() {
+ if f.Usage == nil {
+ f.defaultUsage()
+ } else {
+ f.Usage()
+ }
+}
+
+// parseOne parses one flag. It reports whether a flag was seen.
+func (f *FlagSet) parseOne() (bool, error) {
+ if len(f.args) == 0 {
+ return false, nil
+ }
+ s := f.args[0]
+ if len(s) < 2 || s[0] != '-' {
+ return false, nil
+ }
+ numMinuses := 1
+ if s[1] == '-' {
+ numMinuses++
+ if len(s) == 2 { // "--" terminates the flags
+ f.args = f.args[1:]
+ return false, nil
+ }
+ }
+ name := s[numMinuses:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ return false, f.failf("bad flag syntax: %s", s)
+ }
+
+ // it's a flag. does it have an argument?
+ f.args = f.args[1:]
+ hasValue := false
+ value := ""
+ for i := 1; i < len(name); i++ { // equals cannot be first
+ if name[i] == '=' {
+ value = name[i+1:]
+ hasValue = true
+ name = name[0:i]
+ break
+ }
+ }
+ m := f.formal
+ flag, alreadythere := m[name] // BUG
+ if !alreadythere {
+ if name == "help" || name == "h" { // special case for nice help message.
+ f.usage()
+ return false, ErrHelp
+ }
+ return false, f.failf("flag provided but not defined: -%s", name)
+ }
+
+ if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
+ if hasValue {
+ if err := fv.Set(value); err != nil {
+ return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err)
+ }
+ } else {
+ if err := fv.Set("true"); err != nil {
+ return false, f.failf("invalid boolean flag %s: %v", name, err)
+ }
+ }
+ } else {
+ // It must have a value, which might be the next argument.
+ if !hasValue && len(f.args) > 0 {
+ // value is the next arg
+ hasValue = true
+ value, f.args = f.args[0], f.args[1:]
+ }
+ if !hasValue {
+ return false, f.failf("flag needs an argument: -%s", name)
+ }
+ if err := flag.Value.Set(value); err != nil {
+ return false, f.failf("invalid value %q for flag -%s: %v", value, name, err)
+ }
+ }
+ if f.actual == nil {
+ f.actual = make(map[string]*Flag)
+ }
+ f.actual[name] = flag
+ return true, nil
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help or -h were set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ f.parsed = true
+ f.args = arguments
+ for {
+ seen, err := f.parseOne()
+ if seen {
+ continue
+ }
+ if err == nil {
+ break
+ }
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ if err == ErrHelp {
+ os.Exit(0)
+ }
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// Parsed reports whether the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+// The top-level functions such as BoolVar, Arg, and so on are wrappers for the
+// methods of CommandLine.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+func init() {
+ // Override generic FlagSet default Usage with call to global Usage.
+ // Note: This is not CommandLine.Usage = Usage,
+ // because we want any eventual call to use any updated value of Usage,
+ // not the value it has when this line is run.
+ CommandLine.Usage = commandLineUsage
+}
+
+func commandLineUsage() {
+ Usage()
+}
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property. If the name is not empty, it will be printed
+// in the default usage message and in error messages.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ }
+ f.Usage = f.defaultUsage
+ return f
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+}
diff --git a/contrib/go/_std_1.19/src/fmt/doc.go b/contrib/go/_std_1.19/src/fmt/doc.go
new file mode 100644
index 0000000000..9785ed9526
--- /dev/null
+++ b/contrib/go/_std_1.19/src/fmt/doc.go
@@ -0,0 +1,383 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package fmt implements formatted I/O with functions analogous
+to C's printf and scanf. The format 'verbs' are derived from C's but
+are simpler.
+
+# Printing
+
+The verbs:
+
+General:
+
+ %v the value in a default format
+ when printing structs, the plus flag (%+v) adds field names
+ %#v a Go-syntax representation of the value
+ %T a Go-syntax representation of the type of the value
+ %% a literal percent sign; consumes no value
+
+Boolean:
+
+ %t the word true or false
+
+Integer:
+
+ %b base 2
+ %c the character represented by the corresponding Unicode code point
+ %d base 10
+ %o base 8
+ %O base 8 with 0o prefix
+ %q a single-quoted character literal safely escaped with Go syntax.
+ %x base 16, with lower-case letters for a-f
+ %X base 16, with upper-case letters for A-F
+ %U Unicode format: U+1234; same as "U+%04X"
+
+Floating-point and complex constituents:
+
+ %b decimalless scientific notation with exponent a power of two,
+ in the manner of strconv.FormatFloat with the 'b' format,
+ e.g. -123456p-78
+ %e scientific notation, e.g. -1.234456e+78
+ %E scientific notation, e.g. -1.234456E+78
+ %f decimal point but no exponent, e.g. 123.456
+ %F synonym for %f
+ %g %e for large exponents, %f otherwise. Precision is discussed below.
+ %G %E for large exponents, %F otherwise
+ %x hexadecimal notation (with decimal power of two exponent), e.g. -0x1.23abcp+20
+ %X upper-case hexadecimal notation, e.g. -0X1.23ABCP+20
+
+String and slice of bytes (treated equivalently with these verbs):
+
+ %s the uninterpreted bytes of the string or slice
+ %q a double-quoted string safely escaped with Go syntax
+ %x base 16, lower-case, two characters per byte
+ %X base 16, upper-case, two characters per byte
+
+Slice:
+
+ %p address of 0th element in base 16 notation, with leading 0x
+
+Pointer:
+
+ %p base 16 notation, with leading 0x
+ The %b, %d, %o, %x and %X verbs also work with pointers,
+ formatting the value exactly as if it were an integer.
+
+The default format for %v is:
+
+ bool: %t
+ int, int8 etc.: %d
+ uint, uint8 etc.: %d, %#x if printed with %#v
+ float32, complex64, etc: %g
+ string: %s
+ chan: %p
+ pointer: %p
+
+For compound objects, the elements are printed using these rules, recursively,
+laid out like this:
+
+ struct: {field0 field1 ...}
+ array, slice: [elem0 elem1 ...]
+ maps: map[key1:value1 key2:value2 ...]
+ pointer to above: &{}, &[], &map[]
+
+Width is specified by an optional decimal number immediately preceding the verb.
+If absent, the width is whatever is necessary to represent the value.
+Precision is specified after the (optional) width by a period followed by a
+decimal number. If no period is present, a default precision is used.
+A period with no following number specifies a precision of zero.
+Examples:
+
+ %f default width, default precision
+ %9f width 9, default precision
+ %.2f default width, precision 2
+ %9.2f width 9, precision 2
+ %9.f width 9, precision 0
+
+Width and precision are measured in units of Unicode code points,
+that is, runes. (This differs from C's printf where the
+units are always measured in bytes.) Either or both of the flags
+may be replaced with the character '*', causing their values to be
+obtained from the next operand (preceding the one to format),
+which must be of type int.
+
+For most values, width is the minimum number of runes to output,
+padding the formatted form with spaces if necessary.
+
+For strings, byte slices and byte arrays, however, precision
+limits the length of the input to be formatted (not the size of
+the output), truncating if necessary. Normally it is measured in
+runes, but for these types when formatted with the %x or %X format
+it is measured in bytes.
+
+For floating-point values, width sets the minimum width of the field and
+precision sets the number of places after the decimal, if appropriate,
+except that for %g/%G precision sets the maximum number of significant
+digits (trailing zeros are removed). For example, given 12.345 the format
+%6.3f prints 12.345 while %.3g prints 12.3. The default precision for %e, %f
+and %#g is 6; for %g it is the smallest number of digits necessary to identify
+the value uniquely.
+
+For complex numbers, the width and precision apply to the two
+components independently and the result is parenthesized, so %f applied
+to 1.2+3.4i produces (1.200000+3.400000i).
+
+When formatting a single integer code point or a rune string (type []rune)
+with %q, invalid Unicode code points are changed to the Unicode replacement
+character, U+FFFD, as in strconv.QuoteRune.
+
+Other flags:
+
+ '+' always print a sign for numeric values;
+ guarantee ASCII-only output for %q (%+q)
+ '-' pad with spaces on the right rather than the left (left-justify the field)
+ '#' alternate format: add leading 0b for binary (%#b), 0 for octal (%#o),
+ 0x or 0X for hex (%#x or %#X); suppress 0x for %p (%#p);
+ for %q, print a raw (backquoted) string if strconv.CanBackquote
+ returns true;
+ always print a decimal point for %e, %E, %f, %F, %g and %G;
+ do not remove trailing zeros for %g and %G;
+ write e.g. U+0078 'x' if the character is printable for %U (%#U).
+ ' ' (space) leave a space for elided sign in numbers (% d);
+ put spaces between bytes printing strings or slices in hex (% x, % X)
+ '0' pad with leading zeros rather than spaces;
+ for numbers, this moves the padding after the sign;
+ ignored for strings, byte slices and byte arrays
+
+Flags are ignored by verbs that do not expect them.
+For example there is no alternate decimal format, so %#d and %d
+behave identically.
+
+For each Printf-like function, there is also a Print function
+that takes no format and is equivalent to saying %v for every
+operand. Another variant Println inserts blanks between
+operands and appends a newline.
+
+Regardless of the verb, if an operand is an interface value,
+the internal concrete value is used, not the interface itself.
+Thus:
+
+ var i interface{} = 23
+ fmt.Printf("%v\n", i)
+
+will print 23.
+
+Except when printed using the verbs %T and %p, special
+formatting considerations apply for operands that implement
+certain interfaces. In order of application:
+
+1. If the operand is a reflect.Value, the operand is replaced by the
+concrete value that it holds, and printing continues with the next rule.
+
+2. If an operand implements the Formatter interface, it will
+be invoked. In this case the interpretation of verbs and flags is
+controlled by that implementation.
+
+3. If the %v verb is used with the # flag (%#v) and the operand
+implements the GoStringer interface, that will be invoked.
+
+If the format (which is implicitly %v for Println etc.) is valid
+for a string (%s %q %v %x %X), the following two rules apply:
+
+4. If an operand implements the error interface, the Error method
+will be invoked to convert the object to a string, which will then
+be formatted as required by the verb (if any).
+
+5. If an operand implements method String() string, that method
+will be invoked to convert the object to a string, which will then
+be formatted as required by the verb (if any).
+
+For compound operands such as slices and structs, the format
+applies to the elements of each operand, recursively, not to the
+operand as a whole. Thus %q will quote each element of a slice
+of strings, and %6.2f will control formatting for each element
+of a floating-point array.
+
+However, when printing a byte slice with a string-like verb
+(%s %q %x %X), it is treated identically to a string, as a single item.
+
+To avoid recursion in cases such as
+
+ type X string
+ func (x X) String() string { return Sprintf("<%s>", x) }
+
+convert the value before recurring:
+
+ func (x X) String() string { return Sprintf("<%s>", string(x)) }
+
+Infinite recursion can also be triggered by self-referential data
+structures, such as a slice that contains itself as an element, if
+that type has a String method. Such pathologies are rare, however,
+and the package does not protect against them.
+
+When printing a struct, fmt cannot and therefore does not invoke
+formatting methods such as Error or String on unexported fields.
+
+# Explicit argument indexes
+
+In Printf, Sprintf, and Fprintf, the default behavior is for each
+formatting verb to format successive arguments passed in the call.
+However, the notation [n] immediately before the verb indicates that the
+nth one-indexed argument is to be formatted instead. The same notation
+before a '*' for a width or precision selects the argument index holding
+the value. After processing a bracketed expression [n], subsequent verbs
+will use arguments n+1, n+2, etc. unless otherwise directed.
+
+For example,
+
+ fmt.Sprintf("%[2]d %[1]d\n", 11, 22)
+
+will yield "22 11", while
+
+ fmt.Sprintf("%[3]*.[2]*[1]f", 12.0, 2, 6)
+
+equivalent to
+
+ fmt.Sprintf("%6.2f", 12.0)
+
+will yield " 12.00". Because an explicit index affects subsequent verbs,
+this notation can be used to print the same values multiple times
+by resetting the index for the first argument to be repeated:
+
+ fmt.Sprintf("%d %d %#[1]x %#x", 16, 17)
+
+will yield "16 17 0x10 0x11".
+
+# Format errors
+
+If an invalid argument is given for a verb, such as providing
+a string to %d, the generated string will contain a
+description of the problem, as in these examples:
+
+ Wrong type or unknown verb: %!verb(type=value)
+ Printf("%d", "hi"): %!d(string=hi)
+ Too many arguments: %!(EXTRA type=value)
+ Printf("hi", "guys"): hi%!(EXTRA string=guys)
+ Too few arguments: %!verb(MISSING)
+ Printf("hi%d"): hi%!d(MISSING)
+ Non-int for width or precision: %!(BADWIDTH) or %!(BADPREC)
+ Printf("%*s", 4.5, "hi"): %!(BADWIDTH)hi
+ Printf("%.*s", 4.5, "hi"): %!(BADPREC)hi
+ Invalid or invalid use of argument index: %!(BADINDEX)
+ Printf("%*[2]d", 7): %!d(BADINDEX)
+ Printf("%.[2]d", 7): %!d(BADINDEX)
+
+All errors begin with the string "%!" followed sometimes
+by a single character (the verb) and end with a parenthesized
+description.
+
+If an Error or String method triggers a panic when called by a
+print routine, the fmt package reformats the error message
+from the panic, decorating it with an indication that it came
+through the fmt package. For example, if a String method
+calls panic("bad"), the resulting formatted message will look
+like
+
+ %!s(PANIC=bad)
+
+The %!s just shows the print verb in use when the failure
+occurred. If the panic is caused by a nil receiver to an Error
+or String method, however, the output is the undecorated
+string, "<nil>".
+
+# Scanning
+
+An analogous set of functions scans formatted text to yield
+values. Scan, Scanf and Scanln read from os.Stdin; Fscan,
+Fscanf and Fscanln read from a specified io.Reader; Sscan,
+Sscanf and Sscanln read from an argument string.
+
+Scan, Fscan, Sscan treat newlines in the input as spaces.
+
+Scanln, Fscanln and Sscanln stop scanning at a newline and
+require that the items be followed by a newline or EOF.
+
+Scanf, Fscanf, and Sscanf parse the arguments according to a
+format string, analogous to that of Printf. In the text that
+follows, 'space' means any Unicode whitespace character
+except newline.
+
+In the format string, a verb introduced by the % character
+consumes and parses input; these verbs are described in more
+detail below. A character other than %, space, or newline in
+the format consumes exactly that input character, which must
+be present. A newline with zero or more spaces before it in
+the format string consumes zero or more spaces in the input
+followed by a single newline or the end of the input. A space
+following a newline in the format string consumes zero or more
+spaces in the input. Otherwise, any run of one or more spaces
+in the format string consumes as many spaces as possible in
+the input. Unless the run of spaces in the format string
+appears adjacent to a newline, the run must consume at least
+one space from the input or find the end of the input.
+
+The handling of spaces and newlines differs from that of C's
+scanf family: in C, newlines are treated as any other space,
+and it is never an error when a run of spaces in the format
+string finds no spaces to consume in the input.
+
+The verbs behave analogously to those of Printf.
+For example, %x will scan an integer as a hexadecimal number,
+and %v will scan the default representation format for the value.
+The Printf verbs %p and %T and the flags # and + are not implemented.
+For floating-point and complex values, all valid formatting verbs
+(%b %e %E %f %F %g %G %x %X and %v) are equivalent and accept
+both decimal and hexadecimal notation (for example: "2.3e+7", "0x4.5p-8")
+and digit-separating underscores (for example: "3.14159_26535_89793").
+
+Input processed by verbs is implicitly space-delimited: the
+implementation of every verb except %c starts by discarding
+leading spaces from the remaining input, and the %s verb
+(and %v reading into a string) stops consuming input at the first
+space or newline character.
+
+The familiar base-setting prefixes 0b (binary), 0o and 0 (octal),
+and 0x (hexadecimal) are accepted when scanning integers
+without a format or with the %v verb, as are digit-separating
+underscores.
+
+Width is interpreted in the input text but there is no
+syntax for scanning with a precision (no %5.2f, just %5f).
+If width is provided, it applies after leading spaces are
+trimmed and specifies the maximum number of runes to read
+to satisfy the verb. For example,
+
+ Sscanf(" 1234567 ", "%5s%d", &s, &i)
+
+will set s to "12345" and i to 67 while
+
+ Sscanf(" 12 34 567 ", "%5s%d", &s, &i)
+
+will set s to "12" and i to 34.
+
+In all the scanning functions, a carriage return followed
+immediately by a newline is treated as a plain newline
+(\r\n means the same as \n).
+
+In all the scanning functions, if an operand implements method
+Scan (that is, it implements the Scanner interface) that
+method will be used to scan the text for that operand. Also,
+if the number of arguments scanned is less than the number of
+arguments provided, an error is returned.
+
+All arguments to be scanned must be either pointers to basic
+types or implementations of the Scanner interface.
+
+Like Scanf and Fscanf, Sscanf need not consume its entire input.
+There is no way to recover how much of the input string Sscanf used.
+
+Note: Fscan etc. can read one character (rune) past the input
+they return, which means that a loop calling a scan routine
+may skip some of the input. This is usually a problem only
+when there is no space between input values. If the reader
+provided to Fscan implements ReadRune, that method will be used
+to read characters. If the reader also implements UnreadRune,
+that method will be used to save the character and successive
+calls will not lose data. To attach ReadRune and UnreadRune
+methods to a reader without that capability, use
+bufio.NewReader.
+*/
+package fmt
diff --git a/contrib/go/_std_1.18/src/fmt/errors.go b/contrib/go/_std_1.19/src/fmt/errors.go
index 4f4daf19e1..4f4daf19e1 100644
--- a/contrib/go/_std_1.18/src/fmt/errors.go
+++ b/contrib/go/_std_1.19/src/fmt/errors.go
diff --git a/contrib/go/_std_1.18/src/fmt/format.go b/contrib/go/_std_1.19/src/fmt/format.go
index bd00e5a5e0..bd00e5a5e0 100644
--- a/contrib/go/_std_1.18/src/fmt/format.go
+++ b/contrib/go/_std_1.19/src/fmt/format.go
diff --git a/contrib/go/_std_1.19/src/fmt/print.go b/contrib/go/_std_1.19/src/fmt/print.go
new file mode 100644
index 0000000000..2af7bd0c42
--- /dev/null
+++ b/contrib/go/_std_1.19/src/fmt/print.go
@@ -0,0 +1,1203 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fmt
+
+import (
+ "internal/fmtsort"
+ "io"
+ "os"
+ "reflect"
+ "sync"
+ "unicode/utf8"
+)
+
+// Strings for use with buffer.WriteString.
+// This is less overhead than using buffer.Write with byte arrays.
+const (
+ commaSpaceString = ", "
+ nilAngleString = "<nil>"
+ nilParenString = "(nil)"
+ nilString = "nil"
+ mapString = "map["
+ percentBangString = "%!"
+ missingString = "(MISSING)"
+ badIndexString = "(BADINDEX)"
+ panicString = "(PANIC="
+ extraString = "%!(EXTRA "
+ badWidthString = "%!(BADWIDTH)"
+ badPrecString = "%!(BADPREC)"
+ noVerbString = "%!(NOVERB)"
+ invReflectString = "<invalid reflect.Value>"
+)
+
+// State represents the printer state passed to custom formatters.
+// It provides access to the io.Writer interface plus information about
+// the flags and options for the operand's format specifier.
+type State interface {
+ // Write is the function to call to emit formatted output to be printed.
+ Write(b []byte) (n int, err error)
+ // Width returns the value of the width option and whether it has been set.
+ Width() (wid int, ok bool)
+ // Precision returns the value of the precision option and whether it has been set.
+ Precision() (prec int, ok bool)
+
+ // Flag reports whether the flag c, a character, has been set.
+ Flag(c int) bool
+}
+
+// Formatter is implemented by any value that has a Format method.
+// The implementation controls how State and rune are interpreted,
+// and may call Sprint(f) or Fprint(f) etc. to generate its output.
+type Formatter interface {
+ Format(f State, verb rune)
+}
+
+// Stringer is implemented by any value that has a String method,
+// which defines the “native” format for that value.
+// The String method is used to print values passed as an operand
+// to any format that accepts a string or to an unformatted printer
+// such as Print.
+type Stringer interface {
+ String() string
+}
+
+// GoStringer is implemented by any value that has a GoString method,
+// which defines the Go syntax for that value.
+// The GoString method is used to print values passed as an operand
+// to a %#v format.
+type GoStringer interface {
+ GoString() string
+}
+
+// Use simple []byte instead of bytes.Buffer to avoid large dependency.
+type buffer []byte
+
+func (b *buffer) write(p []byte) {
+ *b = append(*b, p...)
+}
+
+func (b *buffer) writeString(s string) {
+ *b = append(*b, s...)
+}
+
+func (b *buffer) writeByte(c byte) {
+ *b = append(*b, c)
+}
+
+func (bp *buffer) writeRune(r rune) {
+ if r < utf8.RuneSelf {
+ *bp = append(*bp, byte(r))
+ return
+ }
+
+ b := *bp
+ n := len(b)
+ for n+utf8.UTFMax > cap(b) {
+ b = append(b, 0)
+ }
+ w := utf8.EncodeRune(b[n:n+utf8.UTFMax], r)
+ *bp = b[:n+w]
+}
+
+// pp is used to store a printer's state and is reused with sync.Pool to avoid allocations.
+type pp struct {
+ buf buffer
+
+ // arg holds the current item, as an interface{}.
+ arg any
+
+ // value is used instead of arg for reflect values.
+ value reflect.Value
+
+ // fmt is used to format basic items such as integers or strings.
+ fmt fmt
+
+ // reordered records whether the format string used argument reordering.
+ reordered bool
+ // goodArgNum records whether the most recent reordering directive was valid.
+ goodArgNum bool
+ // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion.
+ panicking bool
+ // erroring is set when printing an error string to guard against calling handleMethods.
+ erroring bool
+ // wrapErrs is set when the format string may contain a %w verb.
+ wrapErrs bool
+ // wrappedErr records the target of the %w verb.
+ wrappedErr error
+}
+
+var ppFree = sync.Pool{
+ New: func() any { return new(pp) },
+}
+
+// newPrinter allocates a new pp struct or grabs a cached one.
+func newPrinter() *pp {
+ p := ppFree.Get().(*pp)
+ p.panicking = false
+ p.erroring = false
+ p.wrapErrs = false
+ p.fmt.init(&p.buf)
+ return p
+}
+
+// free saves used pp structs in ppFree; avoids an allocation per invocation.
+func (p *pp) free() {
+ // Proper usage of a sync.Pool requires each entry to have approximately
+ // the same memory cost. To obtain this property when the stored type
+ // contains a variably-sized buffer, we add a hard limit on the maximum buffer
+ // to place back in the pool.
+ //
+ // See https://golang.org/issue/23199
+ if cap(p.buf) > 64<<10 {
+ return
+ }
+
+ p.buf = p.buf[:0]
+ p.arg = nil
+ p.value = reflect.Value{}
+ p.wrappedErr = nil
+ ppFree.Put(p)
+}
+
+func (p *pp) Width() (wid int, ok bool) { return p.fmt.wid, p.fmt.widPresent }
+
+func (p *pp) Precision() (prec int, ok bool) { return p.fmt.prec, p.fmt.precPresent }
+
+func (p *pp) Flag(b int) bool {
+ switch b {
+ case '-':
+ return p.fmt.minus
+ case '+':
+ return p.fmt.plus || p.fmt.plusV
+ case '#':
+ return p.fmt.sharp || p.fmt.sharpV
+ case ' ':
+ return p.fmt.space
+ case '0':
+ return p.fmt.zero
+ }
+ return false
+}
+
+// Implement Write so we can call Fprintf on a pp (through State), for
+// recursive use in custom verbs.
+func (p *pp) Write(b []byte) (ret int, err error) {
+ p.buf.write(b)
+ return len(b), nil
+}
+
+// Implement WriteString so that we can call io.WriteString
+// on a pp (through state), for efficiency.
+func (p *pp) WriteString(s string) (ret int, err error) {
+ p.buf.writeString(s)
+ return len(s), nil
+}
+
+// These routines end in 'f' and take a format string.
+
+// Fprintf formats according to a format specifier and writes to w.
+// It returns the number of bytes written and any write error encountered.
+func Fprintf(w io.Writer, format string, a ...any) (n int, err error) {
+ p := newPrinter()
+ p.doPrintf(format, a)
+ n, err = w.Write(p.buf)
+ p.free()
+ return
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+func Printf(format string, a ...any) (n int, err error) {
+ return Fprintf(os.Stdout, format, a...)
+}
+
+// Sprintf formats according to a format specifier and returns the resulting string.
+func Sprintf(format string, a ...any) string {
+ p := newPrinter()
+ p.doPrintf(format, a)
+ s := string(p.buf)
+ p.free()
+ return s
+}
+
+// Appendf formats according to a format specifier, appends the result to the byte
+// slice, and returns the updated slice.
+func Appendf(b []byte, format string, a ...any) []byte {
+ p := newPrinter()
+ p.doPrintf(format, a)
+ b = append(b, p.buf...)
+ p.free()
+ return b
+}
+
+// These routines do not take a format string
+
+// Fprint formats using the default formats for its operands and writes to w.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func Fprint(w io.Writer, a ...any) (n int, err error) {
+ p := newPrinter()
+ p.doPrint(a)
+ n, err = w.Write(p.buf)
+ p.free()
+ return
+}
+
+// Print formats using the default formats for its operands and writes to standard output.
+// Spaces are added between operands when neither is a string.
+// It returns the number of bytes written and any write error encountered.
+func Print(a ...any) (n int, err error) {
+ return Fprint(os.Stdout, a...)
+}
+
+// Sprint formats using the default formats for its operands and returns the resulting string.
+// Spaces are added between operands when neither is a string.
+func Sprint(a ...any) string {
+ p := newPrinter()
+ p.doPrint(a)
+ s := string(p.buf)
+ p.free()
+ return s
+}
+
+// Append formats using the default formats for its operands, appends the result to
+// the byte slice, and returns the updated slice.
+func Append(b []byte, a ...any) []byte {
+ p := newPrinter()
+ p.doPrint(a)
+ b = append(b, p.buf...)
+ p.free()
+ return b
+}
+
+// These routines end in 'ln', do not take a format string,
+// always add spaces between operands, and add a newline
+// after the last operand.
+
+// Fprintln formats using the default formats for its operands and writes to w.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func Fprintln(w io.Writer, a ...any) (n int, err error) {
+ p := newPrinter()
+ p.doPrintln(a)
+ n, err = w.Write(p.buf)
+ p.free()
+ return
+}
+
+// Println formats using the default formats for its operands and writes to standard output.
+// Spaces are always added between operands and a newline is appended.
+// It returns the number of bytes written and any write error encountered.
+func Println(a ...any) (n int, err error) {
+ return Fprintln(os.Stdout, a...)
+}
+
+// Sprintln formats using the default formats for its operands and returns the resulting string.
+// Spaces are always added between operands and a newline is appended.
+func Sprintln(a ...any) string {
+ p := newPrinter()
+ p.doPrintln(a)
+ s := string(p.buf)
+ p.free()
+ return s
+}
+
+// Appendln formats using the default formats for its operands, appends the result
+// to the byte slice, and returns the updated slice. Spaces are always added
+// between operands and a newline is appended.
+func Appendln(b []byte, a ...any) []byte {
+ p := newPrinter()
+ p.doPrintln(a)
+ b = append(b, p.buf...)
+ p.free()
+ return b
+}
+
+// getField gets the i'th field of the struct value.
+// If the field is itself is an interface, return a value for
+// the thing inside the interface, not the interface itself.
+func getField(v reflect.Value, i int) reflect.Value {
+ val := v.Field(i)
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ val = val.Elem()
+ }
+ return val
+}
+
+// tooLarge reports whether the magnitude of the integer is
+// too large to be used as a formatting width or precision.
+func tooLarge(x int) bool {
+ const max int = 1e6
+ return x > max || x < -max
+}
+
+// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present.
+func parsenum(s string, start, end int) (num int, isnum bool, newi int) {
+ if start >= end {
+ return 0, false, end
+ }
+ for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ {
+ if tooLarge(num) {
+ return 0, false, end // Overflow; crazy long number most likely.
+ }
+ num = num*10 + int(s[newi]-'0')
+ isnum = true
+ }
+ return
+}
+
+func (p *pp) unknownType(v reflect.Value) {
+ if !v.IsValid() {
+ p.buf.writeString(nilAngleString)
+ return
+ }
+ p.buf.writeByte('?')
+ p.buf.writeString(v.Type().String())
+ p.buf.writeByte('?')
+}
+
+func (p *pp) badVerb(verb rune) {
+ p.erroring = true
+ p.buf.writeString(percentBangString)
+ p.buf.writeRune(verb)
+ p.buf.writeByte('(')
+ switch {
+ case p.arg != nil:
+ p.buf.writeString(reflect.TypeOf(p.arg).String())
+ p.buf.writeByte('=')
+ p.printArg(p.arg, 'v')
+ case p.value.IsValid():
+ p.buf.writeString(p.value.Type().String())
+ p.buf.writeByte('=')
+ p.printValue(p.value, 'v', 0)
+ default:
+ p.buf.writeString(nilAngleString)
+ }
+ p.buf.writeByte(')')
+ p.erroring = false
+}
+
+func (p *pp) fmtBool(v bool, verb rune) {
+ switch verb {
+ case 't', 'v':
+ p.fmt.fmtBoolean(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or
+// not, as requested, by temporarily setting the sharp flag.
+func (p *pp) fmt0x64(v uint64, leading0x bool) {
+ sharp := p.fmt.sharp
+ p.fmt.sharp = leading0x
+ p.fmt.fmtInteger(v, 16, unsigned, 'v', ldigits)
+ p.fmt.sharp = sharp
+}
+
+// fmtInteger formats a signed or unsigned integer.
+func (p *pp) fmtInteger(v uint64, isSigned bool, verb rune) {
+ switch verb {
+ case 'v':
+ if p.fmt.sharpV && !isSigned {
+ p.fmt0x64(v, true)
+ } else {
+ p.fmt.fmtInteger(v, 10, isSigned, verb, ldigits)
+ }
+ case 'd':
+ p.fmt.fmtInteger(v, 10, isSigned, verb, ldigits)
+ case 'b':
+ p.fmt.fmtInteger(v, 2, isSigned, verb, ldigits)
+ case 'o', 'O':
+ p.fmt.fmtInteger(v, 8, isSigned, verb, ldigits)
+ case 'x':
+ p.fmt.fmtInteger(v, 16, isSigned, verb, ldigits)
+ case 'X':
+ p.fmt.fmtInteger(v, 16, isSigned, verb, udigits)
+ case 'c':
+ p.fmt.fmtC(v)
+ case 'q':
+ p.fmt.fmtQc(v)
+ case 'U':
+ p.fmt.fmtUnicode(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmtFloat formats a float. The default precision for each verb
+// is specified as last argument in the call to fmt_float.
+func (p *pp) fmtFloat(v float64, size int, verb rune) {
+ switch verb {
+ case 'v':
+ p.fmt.fmtFloat(v, size, 'g', -1)
+ case 'b', 'g', 'G', 'x', 'X':
+ p.fmt.fmtFloat(v, size, verb, -1)
+ case 'f', 'e', 'E':
+ p.fmt.fmtFloat(v, size, verb, 6)
+ case 'F':
+ p.fmt.fmtFloat(v, size, 'f', 6)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmtComplex formats a complex number v with
+// r = real(v) and j = imag(v) as (r+ji) using
+// fmtFloat for r and j formatting.
+func (p *pp) fmtComplex(v complex128, size int, verb rune) {
+ // Make sure any unsupported verbs are found before the
+ // calls to fmtFloat to not generate an incorrect error string.
+ switch verb {
+ case 'v', 'b', 'g', 'G', 'x', 'X', 'f', 'F', 'e', 'E':
+ oldPlus := p.fmt.plus
+ p.buf.writeByte('(')
+ p.fmtFloat(real(v), size/2, verb)
+ // Imaginary part always has a sign.
+ p.fmt.plus = true
+ p.fmtFloat(imag(v), size/2, verb)
+ p.buf.writeString("i)")
+ p.fmt.plus = oldPlus
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *pp) fmtString(v string, verb rune) {
+ switch verb {
+ case 'v':
+ if p.fmt.sharpV {
+ p.fmt.fmtQ(v)
+ } else {
+ p.fmt.fmtS(v)
+ }
+ case 's':
+ p.fmt.fmtS(v)
+ case 'x':
+ p.fmt.fmtSx(v, ldigits)
+ case 'X':
+ p.fmt.fmtSx(v, udigits)
+ case 'q':
+ p.fmt.fmtQ(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *pp) fmtBytes(v []byte, verb rune, typeString string) {
+ switch verb {
+ case 'v', 'd':
+ if p.fmt.sharpV {
+ p.buf.writeString(typeString)
+ if v == nil {
+ p.buf.writeString(nilParenString)
+ return
+ }
+ p.buf.writeByte('{')
+ for i, c := range v {
+ if i > 0 {
+ p.buf.writeString(commaSpaceString)
+ }
+ p.fmt0x64(uint64(c), true)
+ }
+ p.buf.writeByte('}')
+ } else {
+ p.buf.writeByte('[')
+ for i, c := range v {
+ if i > 0 {
+ p.buf.writeByte(' ')
+ }
+ p.fmt.fmtInteger(uint64(c), 10, unsigned, verb, ldigits)
+ }
+ p.buf.writeByte(']')
+ }
+ case 's':
+ p.fmt.fmtBs(v)
+ case 'x':
+ p.fmt.fmtBx(v, ldigits)
+ case 'X':
+ p.fmt.fmtBx(v, udigits)
+ case 'q':
+ p.fmt.fmtQ(string(v))
+ default:
+ p.printValue(reflect.ValueOf(v), verb, 0)
+ }
+}
+
+func (p *pp) fmtPointer(value reflect.Value, verb rune) {
+ var u uintptr
+ switch value.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Slice, reflect.UnsafePointer:
+ u = value.Pointer()
+ default:
+ p.badVerb(verb)
+ return
+ }
+
+ switch verb {
+ case 'v':
+ if p.fmt.sharpV {
+ p.buf.writeByte('(')
+ p.buf.writeString(value.Type().String())
+ p.buf.writeString(")(")
+ if u == 0 {
+ p.buf.writeString(nilString)
+ } else {
+ p.fmt0x64(uint64(u), true)
+ }
+ p.buf.writeByte(')')
+ } else {
+ if u == 0 {
+ p.fmt.padString(nilAngleString)
+ } else {
+ p.fmt0x64(uint64(u), !p.fmt.sharp)
+ }
+ }
+ case 'p':
+ p.fmt0x64(uint64(u), !p.fmt.sharp)
+ case 'b', 'o', 'd', 'x', 'X':
+ p.fmtInteger(uint64(u), unsigned, verb)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *pp) catchPanic(arg any, verb rune, method string) {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "<nil>". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "<nil>" is a nice result.
+ if v := reflect.ValueOf(arg); v.Kind() == reflect.Pointer && v.IsNil() {
+ p.buf.writeString(nilAngleString)
+ return
+ }
+ // Otherwise print a concise panic message. Most of the time the panic
+ // value will print itself nicely.
+ if p.panicking {
+ // Nested panics; the recursion in printArg cannot succeed.
+ panic(err)
+ }
+
+ oldFlags := p.fmt.fmtFlags
+ // For this output we want default behavior.
+ p.fmt.clearflags()
+
+ p.buf.writeString(percentBangString)
+ p.buf.writeRune(verb)
+ p.buf.writeString(panicString)
+ p.buf.writeString(method)
+ p.buf.writeString(" method: ")
+ p.panicking = true
+ p.printArg(err, 'v')
+ p.panicking = false
+ p.buf.writeByte(')')
+
+ p.fmt.fmtFlags = oldFlags
+ }
+}
+
+func (p *pp) handleMethods(verb rune) (handled bool) {
+ if p.erroring {
+ return
+ }
+ if verb == 'w' {
+ // It is invalid to use %w other than with Errorf, more than once,
+ // or with a non-error arg.
+ err, ok := p.arg.(error)
+ if !ok || !p.wrapErrs || p.wrappedErr != nil {
+ p.wrappedErr = nil
+ p.wrapErrs = false
+ p.badVerb(verb)
+ return true
+ }
+ p.wrappedErr = err
+ // If the arg is a Formatter, pass 'v' as the verb to it.
+ verb = 'v'
+ }
+
+ // Is it a Formatter?
+ if formatter, ok := p.arg.(Formatter); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb, "Format")
+ formatter.Format(p, verb)
+ return
+ }
+
+ // If we're doing Go syntax and the argument knows how to supply it, take care of it now.
+ if p.fmt.sharpV {
+ if stringer, ok := p.arg.(GoStringer); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb, "GoString")
+ // Print the result of GoString unadorned.
+ p.fmt.fmtS(stringer.GoString())
+ return
+ }
+ } else {
+ // If a string is acceptable according to the format, see if
+ // the value satisfies one of the string-valued interfaces.
+ // Println etc. set verb to %v, which is "stringable".
+ switch verb {
+ case 'v', 's', 'x', 'X', 'q':
+ // Is it an error or Stringer?
+ // The duplication in the bodies is necessary:
+ // setting handled and deferring catchPanic
+ // must happen before calling the method.
+ switch v := p.arg.(type) {
+ case error:
+ handled = true
+ defer p.catchPanic(p.arg, verb, "Error")
+ p.fmtString(v.Error(), verb)
+ return
+
+ case Stringer:
+ handled = true
+ defer p.catchPanic(p.arg, verb, "String")
+ p.fmtString(v.String(), verb)
+ return
+ }
+ }
+ }
+ return false
+}
+
+func (p *pp) printArg(arg any, verb rune) {
+ p.arg = arg
+ p.value = reflect.Value{}
+
+ if arg == nil {
+ switch verb {
+ case 'T', 'v':
+ p.fmt.padString(nilAngleString)
+ default:
+ p.badVerb(verb)
+ }
+ return
+ }
+
+ // Special processing considerations.
+ // %T (the value's type) and %p (its address) are special; we always do them first.
+ switch verb {
+ case 'T':
+ p.fmt.fmtS(reflect.TypeOf(arg).String())
+ return
+ case 'p':
+ p.fmtPointer(reflect.ValueOf(arg), 'p')
+ return
+ }
+
+ // Some types can be done without reflection.
+ switch f := arg.(type) {
+ case bool:
+ p.fmtBool(f, verb)
+ case float32:
+ p.fmtFloat(float64(f), 32, verb)
+ case float64:
+ p.fmtFloat(f, 64, verb)
+ case complex64:
+ p.fmtComplex(complex128(f), 64, verb)
+ case complex128:
+ p.fmtComplex(f, 128, verb)
+ case int:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int8:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int16:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int32:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int64:
+ p.fmtInteger(uint64(f), signed, verb)
+ case uint:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint8:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint16:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint32:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint64:
+ p.fmtInteger(f, unsigned, verb)
+ case uintptr:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case string:
+ p.fmtString(f, verb)
+ case []byte:
+ p.fmtBytes(f, verb, "[]byte")
+ case reflect.Value:
+ // Handle extractable values with special methods
+ // since printValue does not handle them at depth 0.
+ if f.IsValid() && f.CanInterface() {
+ p.arg = f.Interface()
+ if p.handleMethods(verb) {
+ return
+ }
+ }
+ p.printValue(f, verb, 0)
+ default:
+ // If the type is not simple, it might have methods.
+ if !p.handleMethods(verb) {
+ // Need to use reflection, since the type had no
+ // interface methods that could be used for formatting.
+ p.printValue(reflect.ValueOf(f), verb, 0)
+ }
+ }
+}
+
+// printValue is similar to printArg but starts with a reflect value, not an interface{} value.
+// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg.
+func (p *pp) printValue(value reflect.Value, verb rune, depth int) {
+ // Handle values with special methods if not already handled by printArg (depth == 0).
+ if depth > 0 && value.IsValid() && value.CanInterface() {
+ p.arg = value.Interface()
+ if p.handleMethods(verb) {
+ return
+ }
+ }
+ p.arg = nil
+ p.value = value
+
+ switch f := value; value.Kind() {
+ case reflect.Invalid:
+ if depth == 0 {
+ p.buf.writeString(invReflectString)
+ } else {
+ switch verb {
+ case 'v':
+ p.buf.writeString(nilAngleString)
+ default:
+ p.badVerb(verb)
+ }
+ }
+ case reflect.Bool:
+ p.fmtBool(f.Bool(), verb)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.fmtInteger(uint64(f.Int()), signed, verb)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p.fmtInteger(f.Uint(), unsigned, verb)
+ case reflect.Float32:
+ p.fmtFloat(f.Float(), 32, verb)
+ case reflect.Float64:
+ p.fmtFloat(f.Float(), 64, verb)
+ case reflect.Complex64:
+ p.fmtComplex(f.Complex(), 64, verb)
+ case reflect.Complex128:
+ p.fmtComplex(f.Complex(), 128, verb)
+ case reflect.String:
+ p.fmtString(f.String(), verb)
+ case reflect.Map:
+ if p.fmt.sharpV {
+ p.buf.writeString(f.Type().String())
+ if f.IsNil() {
+ p.buf.writeString(nilParenString)
+ return
+ }
+ p.buf.writeByte('{')
+ } else {
+ p.buf.writeString(mapString)
+ }
+ sorted := fmtsort.Sort(f)
+ for i, key := range sorted.Key {
+ if i > 0 {
+ if p.fmt.sharpV {
+ p.buf.writeString(commaSpaceString)
+ } else {
+ p.buf.writeByte(' ')
+ }
+ }
+ p.printValue(key, verb, depth+1)
+ p.buf.writeByte(':')
+ p.printValue(sorted.Value[i], verb, depth+1)
+ }
+ if p.fmt.sharpV {
+ p.buf.writeByte('}')
+ } else {
+ p.buf.writeByte(']')
+ }
+ case reflect.Struct:
+ if p.fmt.sharpV {
+ p.buf.writeString(f.Type().String())
+ }
+ p.buf.writeByte('{')
+ for i := 0; i < f.NumField(); i++ {
+ if i > 0 {
+ if p.fmt.sharpV {
+ p.buf.writeString(commaSpaceString)
+ } else {
+ p.buf.writeByte(' ')
+ }
+ }
+ if p.fmt.plusV || p.fmt.sharpV {
+ if name := f.Type().Field(i).Name; name != "" {
+ p.buf.writeString(name)
+ p.buf.writeByte(':')
+ }
+ }
+ p.printValue(getField(f, i), verb, depth+1)
+ }
+ p.buf.writeByte('}')
+ case reflect.Interface:
+ value := f.Elem()
+ if !value.IsValid() {
+ if p.fmt.sharpV {
+ p.buf.writeString(f.Type().String())
+ p.buf.writeString(nilParenString)
+ } else {
+ p.buf.writeString(nilAngleString)
+ }
+ } else {
+ p.printValue(value, verb, depth+1)
+ }
+ case reflect.Array, reflect.Slice:
+ switch verb {
+ case 's', 'q', 'x', 'X':
+ // Handle byte and uint8 slices and arrays special for the above verbs.
+ t := f.Type()
+ if t.Elem().Kind() == reflect.Uint8 {
+ var bytes []byte
+ if f.Kind() == reflect.Slice {
+ bytes = f.Bytes()
+ } else if f.CanAddr() {
+ bytes = f.Slice(0, f.Len()).Bytes()
+ } else {
+ // We have an array, but we cannot Slice() a non-addressable array,
+ // so we build a slice by hand. This is a rare case but it would be nice
+ // if reflection could help a little more.
+ bytes = make([]byte, f.Len())
+ for i := range bytes {
+ bytes[i] = byte(f.Index(i).Uint())
+ }
+ }
+ p.fmtBytes(bytes, verb, t.String())
+ return
+ }
+ }
+ if p.fmt.sharpV {
+ p.buf.writeString(f.Type().String())
+ if f.Kind() == reflect.Slice && f.IsNil() {
+ p.buf.writeString(nilParenString)
+ return
+ }
+ p.buf.writeByte('{')
+ for i := 0; i < f.Len(); i++ {
+ if i > 0 {
+ p.buf.writeString(commaSpaceString)
+ }
+ p.printValue(f.Index(i), verb, depth+1)
+ }
+ p.buf.writeByte('}')
+ } else {
+ p.buf.writeByte('[')
+ for i := 0; i < f.Len(); i++ {
+ if i > 0 {
+ p.buf.writeByte(' ')
+ }
+ p.printValue(f.Index(i), verb, depth+1)
+ }
+ p.buf.writeByte(']')
+ }
+ case reflect.Pointer:
+ // pointer to array or slice or struct? ok at top level
+ // but not embedded (avoid loops)
+ if depth == 0 && f.Pointer() != 0 {
+ switch a := f.Elem(); a.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map:
+ p.buf.writeByte('&')
+ p.printValue(a, verb, depth+1)
+ return
+ }
+ }
+ fallthrough
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ p.fmtPointer(f, verb)
+ default:
+ p.unknownType(f)
+ }
+}
+
+// intFromArg gets the argNumth element of a. On return, isInt reports whether the argument has integer type.
+func intFromArg(a []any, argNum int) (num int, isInt bool, newArgNum int) {
+ newArgNum = argNum
+ if argNum < len(a) {
+ num, isInt = a[argNum].(int) // Almost always OK.
+ if !isInt {
+ // Work harder.
+ switch v := reflect.ValueOf(a[argNum]); v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := v.Int()
+ if int64(int(n)) == n {
+ num = int(n)
+ isInt = true
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n := v.Uint()
+ if int64(n) >= 0 && uint64(int(n)) == n {
+ num = int(n)
+ isInt = true
+ }
+ default:
+ // Already 0, false.
+ }
+ }
+ newArgNum = argNum + 1
+ if tooLarge(num) {
+ num = 0
+ isInt = false
+ }
+ }
+ return
+}
+
+// parseArgNumber returns the value of the bracketed number, minus 1
+// (explicit argument numbers are one-indexed but we want zero-indexed).
+// The opening bracket is known to be present at format[0].
+// The returned values are the index, the number of bytes to consume
+// up to the closing paren, if present, and whether the number parsed
+// ok. The bytes to consume will be 1 if no closing paren is present.
+func parseArgNumber(format string) (index int, wid int, ok bool) {
+ // There must be at least 3 bytes: [n].
+ if len(format) < 3 {
+ return 0, 1, false
+ }
+
+ // Find closing bracket.
+ for i := 1; i < len(format); i++ {
+ if format[i] == ']' {
+ width, ok, newi := parsenum(format, 1, i)
+ if !ok || newi != i {
+ return 0, i + 1, false
+ }
+ return width - 1, i + 1, true // arg numbers are one-indexed and skip paren.
+ }
+ }
+ return 0, 1, false
+}
+
+// argNumber returns the next argument to evaluate, which is either the value of the passed-in
+// argNum or the value of the bracketed integer that begins format[i:]. It also returns
+// the new value of i, that is, the index of the next byte of the format to process.
+func (p *pp) argNumber(argNum int, format string, i int, numArgs int) (newArgNum, newi int, found bool) {
+ if len(format) <= i || format[i] != '[' {
+ return argNum, i, false
+ }
+ p.reordered = true
+ index, wid, ok := parseArgNumber(format[i:])
+ if ok && 0 <= index && index < numArgs {
+ return index, i + wid, true
+ }
+ p.goodArgNum = false
+ return argNum, i + wid, ok
+}
+
+func (p *pp) badArgNum(verb rune) {
+ p.buf.writeString(percentBangString)
+ p.buf.writeRune(verb)
+ p.buf.writeString(badIndexString)
+}
+
+func (p *pp) missingArg(verb rune) {
+ p.buf.writeString(percentBangString)
+ p.buf.writeRune(verb)
+ p.buf.writeString(missingString)
+}
+
+func (p *pp) doPrintf(format string, a []any) {
+ end := len(format)
+ argNum := 0 // we process one argument per non-trivial format
+ afterIndex := false // previous item in format was an index like [3].
+ p.reordered = false
+formatLoop:
+ for i := 0; i < end; {
+ p.goodArgNum = true
+ lasti := i
+ for i < end && format[i] != '%' {
+ i++
+ }
+ if i > lasti {
+ p.buf.writeString(format[lasti:i])
+ }
+ if i >= end {
+ // done processing format string
+ break
+ }
+
+ // Process one verb
+ i++
+
+ // Do we have flags?
+ p.fmt.clearflags()
+ simpleFormat:
+ for ; i < end; i++ {
+ c := format[i]
+ switch c {
+ case '#':
+ p.fmt.sharp = true
+ case '0':
+ p.fmt.zero = !p.fmt.minus // Only allow zero padding to the left.
+ case '+':
+ p.fmt.plus = true
+ case '-':
+ p.fmt.minus = true
+ p.fmt.zero = false // Do not pad with zeros to the right.
+ case ' ':
+ p.fmt.space = true
+ default:
+ // Fast path for common case of ascii lower case simple verbs
+ // without precision or width or argument indices.
+ if 'a' <= c && c <= 'z' && argNum < len(a) {
+ if c == 'v' {
+ // Go syntax
+ p.fmt.sharpV = p.fmt.sharp
+ p.fmt.sharp = false
+ // Struct-field syntax
+ p.fmt.plusV = p.fmt.plus
+ p.fmt.plus = false
+ }
+ p.printArg(a[argNum], rune(c))
+ argNum++
+ i++
+ continue formatLoop
+ }
+ // Format is more complex than simple flags and a verb or is malformed.
+ break simpleFormat
+ }
+ }
+
+ // Do we have an explicit argument index?
+ argNum, i, afterIndex = p.argNumber(argNum, format, i, len(a))
+
+ // Do we have width?
+ if i < end && format[i] == '*' {
+ i++
+ p.fmt.wid, p.fmt.widPresent, argNum = intFromArg(a, argNum)
+
+ if !p.fmt.widPresent {
+ p.buf.writeString(badWidthString)
+ }
+
+ // We have a negative width, so take its value and ensure
+ // that the minus flag is set
+ if p.fmt.wid < 0 {
+ p.fmt.wid = -p.fmt.wid
+ p.fmt.minus = true
+ p.fmt.zero = false // Do not pad with zeros to the right.
+ }
+ afterIndex = false
+ } else {
+ p.fmt.wid, p.fmt.widPresent, i = parsenum(format, i, end)
+ if afterIndex && p.fmt.widPresent { // "%[3]2d"
+ p.goodArgNum = false
+ }
+ }
+
+ // Do we have precision?
+ if i+1 < end && format[i] == '.' {
+ i++
+ if afterIndex { // "%[3].2d"
+ p.goodArgNum = false
+ }
+ argNum, i, afterIndex = p.argNumber(argNum, format, i, len(a))
+ if i < end && format[i] == '*' {
+ i++
+ p.fmt.prec, p.fmt.precPresent, argNum = intFromArg(a, argNum)
+ // Negative precision arguments don't make sense
+ if p.fmt.prec < 0 {
+ p.fmt.prec = 0
+ p.fmt.precPresent = false
+ }
+ if !p.fmt.precPresent {
+ p.buf.writeString(badPrecString)
+ }
+ afterIndex = false
+ } else {
+ p.fmt.prec, p.fmt.precPresent, i = parsenum(format, i, end)
+ if !p.fmt.precPresent {
+ p.fmt.prec = 0
+ p.fmt.precPresent = true
+ }
+ }
+ }
+
+ if !afterIndex {
+ argNum, i, afterIndex = p.argNumber(argNum, format, i, len(a))
+ }
+
+ if i >= end {
+ p.buf.writeString(noVerbString)
+ break
+ }
+
+ verb, size := rune(format[i]), 1
+ if verb >= utf8.RuneSelf {
+ verb, size = utf8.DecodeRuneInString(format[i:])
+ }
+ i += size
+
+ switch {
+ case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec.
+ p.buf.writeByte('%')
+ case !p.goodArgNum:
+ p.badArgNum(verb)
+ case argNum >= len(a): // No argument left over to print for the current verb.
+ p.missingArg(verb)
+ case verb == 'v':
+ // Go syntax
+ p.fmt.sharpV = p.fmt.sharp
+ p.fmt.sharp = false
+ // Struct-field syntax
+ p.fmt.plusV = p.fmt.plus
+ p.fmt.plus = false
+ fallthrough
+ default:
+ p.printArg(a[argNum], verb)
+ argNum++
+ }
+ }
+
+ // Check for extra arguments unless the call accessed the arguments
+ // out of order, in which case it's too expensive to detect if they've all
+ // been used and arguably OK if they're not.
+ if !p.reordered && argNum < len(a) {
+ p.fmt.clearflags()
+ p.buf.writeString(extraString)
+ for i, arg := range a[argNum:] {
+ if i > 0 {
+ p.buf.writeString(commaSpaceString)
+ }
+ if arg == nil {
+ p.buf.writeString(nilAngleString)
+ } else {
+ p.buf.writeString(reflect.TypeOf(arg).String())
+ p.buf.writeByte('=')
+ p.printArg(arg, 'v')
+ }
+ }
+ p.buf.writeByte(')')
+ }
+}
+
+func (p *pp) doPrint(a []any) {
+ prevString := false
+ for argNum, arg := range a {
+ isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
+ // Add a space between two non-string arguments.
+ if argNum > 0 && !isString && !prevString {
+ p.buf.writeByte(' ')
+ }
+ p.printArg(arg, 'v')
+ prevString = isString
+ }
+}
+
+// doPrintln is like doPrint but always adds a space between arguments
+// and a newline after the last argument.
+func (p *pp) doPrintln(a []any) {
+ for argNum, arg := range a {
+ if argNum > 0 {
+ p.buf.writeByte(' ')
+ }
+ p.printArg(arg, 'v')
+ }
+ p.buf.writeByte('\n')
+}
diff --git a/contrib/go/_std_1.18/src/fmt/scan.go b/contrib/go/_std_1.19/src/fmt/scan.go
index d38610df35..d38610df35 100644
--- a/contrib/go/_std_1.18/src/fmt/scan.go
+++ b/contrib/go/_std_1.19/src/fmt/scan.go
diff --git a/contrib/go/_std_1.18/src/hash/crc32/crc32.go b/contrib/go/_std_1.19/src/hash/crc32/crc32.go
index f330fdb77a..f330fdb77a 100644
--- a/contrib/go/_std_1.18/src/hash/crc32/crc32.go
+++ b/contrib/go/_std_1.19/src/hash/crc32/crc32.go
diff --git a/contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.go b/contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.go
new file mode 100644
index 0000000000..6be129f5dd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.go
@@ -0,0 +1,225 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
+// description of the interface that each architecture-specific file
+// implements.
+
+package crc32
+
+import (
+ "internal/cpu"
+ "unsafe"
+)
+
+// This file contains the code to call the SSE 4.2 version of the Castagnoli
+// and IEEE CRC.
+
+// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
+// instruction.
+//
+//go:noescape
+func castagnoliSSE42(crc uint32, p []byte) uint32
+
+// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
+// instruction.
+//
+//go:noescape
+func castagnoliSSE42Triple(
+ crcA, crcB, crcC uint32,
+ a, b, c []byte,
+ rounds uint32,
+) (retA uint32, retB uint32, retC uint32)
+
+// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
+// instruction as well as SSE 4.1.
+//
+//go:noescape
+func ieeeCLMUL(crc uint32, p []byte) uint32
+
+const castagnoliK1 = 168
+const castagnoliK2 = 1344
+
+type sse42Table [4]Table
+
+var castagnoliSSE42TableK1 *sse42Table
+var castagnoliSSE42TableK2 *sse42Table
+
+func archAvailableCastagnoli() bool {
+ return cpu.X86.HasSSE42
+}
+
+func archInitCastagnoli() {
+ if !cpu.X86.HasSSE42 {
+ panic("arch-specific Castagnoli not available")
+ }
+ castagnoliSSE42TableK1 = new(sse42Table)
+ castagnoliSSE42TableK2 = new(sse42Table)
+ // See description in updateCastagnoli.
+ // t[0][i] = CRC(i000, O)
+ // t[1][i] = CRC(0i00, O)
+ // t[2][i] = CRC(00i0, O)
+ // t[3][i] = CRC(000i, O)
+ // where O is a sequence of K zeros.
+ var tmp [castagnoliK2]byte
+ for b := 0; b < 4; b++ {
+ for i := 0; i < 256; i++ {
+ val := uint32(i) << uint32(b*8)
+ castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
+ castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
+ }
+ }
+}
+
+// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
+// table given) with the given initial crc value. This corresponds to
+// CRC(crc, O) in the description in updateCastagnoli.
+func castagnoliShift(table *sse42Table, crc uint32) uint32 {
+ return table[3][crc>>24] ^
+ table[2][(crc>>16)&0xFF] ^
+ table[1][(crc>>8)&0xFF] ^
+ table[0][crc&0xFF]
+}
+
+func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
+ if !cpu.X86.HasSSE42 {
+ panic("not available")
+ }
+
+ // This method is inspired from the algorithm in Intel's white paper:
+ // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
+ // The same strategy of splitting the buffer in three is used but the
+ // combining calculation is different; the complete derivation is explained
+ // below.
+ //
+ // -- The basic idea --
+ //
+ // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
+ // time. In recent Intel architectures the instruction takes 3 cycles;
+ // however the processor can pipeline up to three instructions if they
+ // don't depend on each other.
+ //
+ // Roughly this means that we can process three buffers in about the same
+ // time we can process one buffer.
+ //
+ // The idea is then to split the buffer in three, CRC the three pieces
+ // separately and then combine the results.
+ //
+ // Combining the results requires precomputed tables, so we must choose a
+ // fixed buffer length to optimize. The longer the length, the faster; but
+ // only buffers longer than this length will use the optimization. We choose
+ // two cutoffs and compute tables for both:
+ // - one around 512: 168*3=504
+ // - one around 4KB: 1344*3=4032
+ //
+ // -- The nitty gritty --
+ //
+ // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
+ // initial non-inverted CRC I). This function has the following properties:
+ // (a) CRC(I, AB) = CRC(CRC(I, A), B)
+ // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
+ //
+ // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
+ // K bytes each, where K is a fixed constant. Let O be the sequence of K zero
+ // bytes.
+ //
+ // CRC(I, ABC) = CRC(I, ABO xor C)
+ // = CRC(I, ABO) xor CRC(0, C)
+ // = CRC(CRC(I, AB), O) xor CRC(0, C)
+ // = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
+ // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
+ // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
+ //
+ // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
+ // and CRC(0, C) efficiently. We just need to find a way to quickly compute
+ // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
+ // values; since we can't have a 32-bit table, we break it up into four
+ // 8-bit tables:
+ //
+ // CRC(uvwx, O) = CRC(u000, O) xor
+ // CRC(0v00, O) xor
+ // CRC(00w0, O) xor
+ // CRC(000x, O)
+ //
+ // We can compute tables corresponding to the four terms for all 8-bit
+ // values.
+
+ crc = ^crc
+
+ // If a buffer is long enough to use the optimization, process the first few
+ // bytes to align the buffer to an 8 byte boundary (if necessary).
+ if len(p) >= castagnoliK1*3 {
+ delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
+ if delta != 0 {
+ delta = 8 - delta
+ crc = castagnoliSSE42(crc, p[:delta])
+ p = p[delta:]
+ }
+ }
+
+ // Process 3*K2 at a time.
+ for len(p) >= castagnoliK2*3 {
+ // Compute CRC(I, A), CRC(0, B), and CRC(0, C).
+ crcA, crcB, crcC := castagnoliSSE42Triple(
+ crc, 0, 0,
+ p, p[castagnoliK2:], p[castagnoliK2*2:],
+ castagnoliK2/24)
+
+ // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
+ crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
+ // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
+ crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
+ p = p[castagnoliK2*3:]
+ }
+
+ // Process 3*K1 at a time.
+ for len(p) >= castagnoliK1*3 {
+ // Compute CRC(I, A), CRC(0, B), and CRC(0, C).
+ crcA, crcB, crcC := castagnoliSSE42Triple(
+ crc, 0, 0,
+ p, p[castagnoliK1:], p[castagnoliK1*2:],
+ castagnoliK1/24)
+
+ // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
+ crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
+ // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
+ crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
+ p = p[castagnoliK1*3:]
+ }
+
+ // Use the simple implementation for what's left.
+ crc = castagnoliSSE42(crc, p)
+ return ^crc
+}
+
+func archAvailableIEEE() bool {
+ return cpu.X86.HasPCLMULQDQ && cpu.X86.HasSSE41
+}
+
+var archIeeeTable8 *slicing8Table
+
+func archInitIEEE() {
+ if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 {
+ panic("not available")
+ }
+ // We still use slicing-by-8 for small buffers.
+ archIeeeTable8 = slicingMakeTable(IEEE)
+}
+
+func archUpdateIEEE(crc uint32, p []byte) uint32 {
+ if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 {
+ panic("not available")
+ }
+
+ if len(p) >= 64 {
+ left := len(p) & 15
+ do := len(p) - left
+ crc = ^ieeeCLMUL(^crc, p[:do])
+ p = p[do:]
+ }
+ if len(p) == 0 {
+ return crc
+ }
+ return slicingUpdate(crc, archIeeeTable8, p)
+}
diff --git a/contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.s b/contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.s
index 6af6c253a7..6af6c253a7 100644
--- a/contrib/go/_std_1.18/src/hash/crc32/crc32_amd64.s
+++ b/contrib/go/_std_1.19/src/hash/crc32/crc32_amd64.s
diff --git a/contrib/go/_std_1.18/src/hash/crc32/crc32_generic.go b/contrib/go/_std_1.19/src/hash/crc32/crc32_generic.go
index abacbb663d..abacbb663d 100644
--- a/contrib/go/_std_1.18/src/hash/crc32/crc32_generic.go
+++ b/contrib/go/_std_1.19/src/hash/crc32/crc32_generic.go
diff --git a/contrib/go/_std_1.18/src/hash/hash.go b/contrib/go/_std_1.19/src/hash/hash.go
index 62cf6a4518..62cf6a4518 100644
--- a/contrib/go/_std_1.18/src/hash/hash.go
+++ b/contrib/go/_std_1.19/src/hash/hash.go
diff --git a/contrib/go/_std_1.18/src/internal/abi/abi.go b/contrib/go/_std_1.19/src/internal/abi/abi.go
index 11acac346f..11acac346f 100644
--- a/contrib/go/_std_1.18/src/internal/abi/abi.go
+++ b/contrib/go/_std_1.19/src/internal/abi/abi.go
diff --git a/contrib/go/_std_1.18/src/internal/abi/abi_amd64.go b/contrib/go/_std_1.19/src/internal/abi/abi_amd64.go
index d3c5678223..d3c5678223 100644
--- a/contrib/go/_std_1.18/src/internal/abi/abi_amd64.go
+++ b/contrib/go/_std_1.19/src/internal/abi/abi_amd64.go
diff --git a/contrib/go/_std_1.18/src/internal/abi/abi_test.s b/contrib/go/_std_1.19/src/internal/abi/abi_test.s
index 93ace3ef48..93ace3ef48 100644
--- a/contrib/go/_std_1.18/src/internal/abi/abi_test.s
+++ b/contrib/go/_std_1.19/src/internal/abi/abi_test.s
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/bytealg.go b/contrib/go/_std_1.19/src/internal/bytealg/bytealg.go
index ebebce75fe..ebebce75fe 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/bytealg.go
+++ b/contrib/go/_std_1.19/src/internal/bytealg/bytealg.go
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/compare_amd64.s b/contrib/go/_std_1.19/src/internal/bytealg/compare_amd64.s
index 4ccaca5e87..4ccaca5e87 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/compare_amd64.s
+++ b/contrib/go/_std_1.19/src/internal/bytealg/compare_amd64.s
diff --git a/contrib/go/_std_1.19/src/internal/bytealg/compare_native.go b/contrib/go/_std_1.19/src/internal/bytealg/compare_native.go
new file mode 100644
index 0000000000..34964e281c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/bytealg/compare_native.go
@@ -0,0 +1,19 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || s390x || arm || arm64 || loong64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le || riscv64
+
+package bytealg
+
+import _ "unsafe" // For go:linkname
+
+//go:noescape
+func Compare(a, b []byte) int
+
+// The declaration below generates ABI wrappers for functions
+// implemented in assembly in this package but declared in another
+// package.
+
+//go:linkname abigen_runtime_cmpstring runtime.cmpstring
+func abigen_runtime_cmpstring(a, b string) int
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/count_amd64.s b/contrib/go/_std_1.19/src/internal/bytealg/count_amd64.s
index fa864c4c76..fa864c4c76 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/count_amd64.s
+++ b/contrib/go/_std_1.19/src/internal/bytealg/count_amd64.s
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/count_native.go b/contrib/go/_std_1.19/src/internal/bytealg/count_native.go
index 90189c9fe0..90189c9fe0 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/count_native.go
+++ b/contrib/go/_std_1.19/src/internal/bytealg/count_native.go
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/equal_amd64.s b/contrib/go/_std_1.19/src/internal/bytealg/equal_amd64.s
index dd46e2e0fd..dd46e2e0fd 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/equal_amd64.s
+++ b/contrib/go/_std_1.19/src/internal/bytealg/equal_amd64.s
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/equal_generic.go b/contrib/go/_std_1.19/src/internal/bytealg/equal_generic.go
index 59bdf8fdd5..59bdf8fdd5 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/equal_generic.go
+++ b/contrib/go/_std_1.19/src/internal/bytealg/equal_generic.go
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/equal_native.go b/contrib/go/_std_1.19/src/internal/bytealg/equal_native.go
index cf3a245bc0..cf3a245bc0 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/equal_native.go
+++ b/contrib/go/_std_1.19/src/internal/bytealg/equal_native.go
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/index_amd64.go b/contrib/go/_std_1.19/src/internal/bytealg/index_amd64.go
index c7a1941e5f..c7a1941e5f 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/index_amd64.go
+++ b/contrib/go/_std_1.19/src/internal/bytealg/index_amd64.go
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/index_amd64.s b/contrib/go/_std_1.19/src/internal/bytealg/index_amd64.s
index 6193b57239..6193b57239 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/index_amd64.s
+++ b/contrib/go/_std_1.19/src/internal/bytealg/index_amd64.s
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/index_native.go b/contrib/go/_std_1.19/src/internal/bytealg/index_native.go
index 6e4a2f39e4..6e4a2f39e4 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/index_native.go
+++ b/contrib/go/_std_1.19/src/internal/bytealg/index_native.go
diff --git a/contrib/go/_std_1.18/src/internal/bytealg/indexbyte_amd64.s b/contrib/go/_std_1.19/src/internal/bytealg/indexbyte_amd64.s
index f78093c539..f78093c539 100644
--- a/contrib/go/_std_1.18/src/internal/bytealg/indexbyte_amd64.s
+++ b/contrib/go/_std_1.19/src/internal/bytealg/indexbyte_amd64.s
diff --git a/contrib/go/_std_1.19/src/internal/bytealg/indexbyte_native.go b/contrib/go/_std_1.19/src/internal/bytealg/indexbyte_native.go
new file mode 100644
index 0000000000..c5bb2df5ea
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/bytealg/indexbyte_native.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64 || s390x || arm || arm64 || loong64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm
+
+package bytealg
+
+//go:noescape
+func IndexByte(b []byte, c byte) int
+
+//go:noescape
+func IndexByteString(s string, c byte) int
diff --git a/contrib/go/_std_1.19/src/internal/cpu/cpu.go b/contrib/go/_std_1.19/src/internal/cpu/cpu.go
new file mode 100644
index 0000000000..ae23b59617
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/cpu/cpu.go
@@ -0,0 +1,221 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cpu implements processor feature detection
+// used by the Go standard library.
+package cpu
+
+// DebugOptions is set to true by the runtime if the OS supports reading
+// GODEBUG early in runtime startup.
+// This should not be changed after it is initialized.
+var DebugOptions bool
+
+// CacheLinePad is used to pad structs to avoid false sharing.
+type CacheLinePad struct{ _ [CacheLinePadSize]byte }
+
+// CacheLineSize is the CPU's assumed cache line size.
+// There is currently no runtime detection of the real cache line size
+// so we use the constant per GOARCH CacheLinePadSize as an approximation.
+var CacheLineSize uintptr = CacheLinePadSize
+
+// The booleans in X86 contain the correspondingly named cpuid feature bit.
+// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers
+// in addition to the cpuid feature bit being set.
+// The struct is padded to avoid false sharing.
+var X86 struct {
+ _ CacheLinePad
+ HasAES bool
+ HasADX bool
+ HasAVX bool
+ HasAVX2 bool
+ HasBMI1 bool
+ HasBMI2 bool
+ HasERMS bool
+ HasFMA bool
+ HasOSXSAVE bool
+ HasPCLMULQDQ bool
+ HasPOPCNT bool
+ HasRDTSCP bool
+ HasSSE3 bool
+ HasSSSE3 bool
+ HasSSE41 bool
+ HasSSE42 bool
+ _ CacheLinePad
+}
+
+// The booleans in ARM contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var ARM struct {
+ _ CacheLinePad
+ HasVFPv4 bool
+ HasIDIVA bool
+ _ CacheLinePad
+}
+
+// The booleans in ARM64 contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var ARM64 struct {
+ _ CacheLinePad
+ HasAES bool
+ HasPMULL bool
+ HasSHA1 bool
+ HasSHA2 bool
+ HasCRC32 bool
+ HasATOMICS bool
+ HasCPUID bool
+ IsNeoverseN1 bool
+ IsZeus bool
+ _ CacheLinePad
+}
+
+var MIPS64X struct {
+ _ CacheLinePad
+ HasMSA bool // MIPS SIMD architecture
+ _ CacheLinePad
+}
+
+// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00,
+// since there are no optional categories. There are some exceptions that also
+// require kernel support to work (darn, scv), so there are feature bits for
+// those as well. The minimum processor requirement is POWER8 (ISA 2.07).
+// The struct is padded to avoid false sharing.
+var PPC64 struct {
+ _ CacheLinePad
+ HasDARN bool // Hardware random number generator (requires kernel enablement)
+ HasSCV bool // Syscall vectored (requires kernel enablement)
+ IsPOWER8 bool // ISA v2.07 (POWER8)
+ IsPOWER9 bool // ISA v3.00 (POWER9)
+ IsPOWER10 bool // ISA v3.1 (POWER10)
+ _ CacheLinePad
+}
+
+var S390X struct {
+ _ CacheLinePad
+ HasZARCH bool // z architecture mode is active [mandatory]
+ HasSTFLE bool // store facility list extended [mandatory]
+ HasLDISP bool // long (20-bit) displacements [mandatory]
+ HasEIMM bool // 32-bit immediates [mandatory]
+ HasDFP bool // decimal floating point
+ HasETF3EH bool // ETF-3 enhanced
+ HasMSA bool // message security assist (CPACF)
+ HasAES bool // KM-AES{128,192,256} functions
+ HasAESCBC bool // KMC-AES{128,192,256} functions
+ HasAESCTR bool // KMCTR-AES{128,192,256} functions
+ HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
+ HasGHASH bool // KIMD-GHASH function
+ HasSHA1 bool // K{I,L}MD-SHA-1 functions
+ HasSHA256 bool // K{I,L}MD-SHA-256 functions
+ HasSHA512 bool // K{I,L}MD-SHA-512 functions
+ HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
+ HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records.
+ HasVXE bool // vector-enhancements facility 1
+ HasKDSA bool // elliptic curve functions
+ HasECDSA bool // NIST curves
+ HasEDDSA bool // Edwards curves
+ _ CacheLinePad
+}
+
+// Initialize examines the processor and sets the relevant variables above.
+// This is called by the runtime package early in program initialization,
+// before normal init functions are run. env is set by runtime if the OS supports
+// cpu feature options in GODEBUG.
+func Initialize(env string) {
+ doinit()
+ processOptions(env)
+}
+
+// options contains the cpu debug options that can be used in GODEBUG.
+// Options are arch dependent and are added by the arch specific doinit functions.
+// Features that are mandatory for the specific GOARCH should not be added to options
+// (e.g. SSE2 on amd64).
+var options []option
+
+// Option names should be lower case. e.g. avx instead of AVX.
+type option struct {
+ Name string
+ Feature *bool
+ Specified bool // whether feature value was specified in GODEBUG
+ Enable bool // whether feature should be enabled
+}
+
+// processOptions enables or disables CPU feature values based on the parsed env string.
+// The env string is expected to be of the form cpu.feature1=value1,cpu.feature2=value2...
+// where feature names is one of the architecture specific list stored in the
+// cpu packages options variable and values are either 'on' or 'off'.
+// If env contains cpu.all=off then all cpu features referenced through the options
+// variable are disabled. Other feature names and values result in warning messages.
+func processOptions(env string) {
+field:
+ for env != "" {
+ field := ""
+ i := indexByte(env, ',')
+ if i < 0 {
+ field, env = env, ""
+ } else {
+ field, env = env[:i], env[i+1:]
+ }
+ if len(field) < 4 || field[:4] != "cpu." {
+ continue
+ }
+ i = indexByte(field, '=')
+ if i < 0 {
+ print("GODEBUG: no value specified for \"", field, "\"\n")
+ continue
+ }
+ key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
+
+ var enable bool
+ switch value {
+ case "on":
+ enable = true
+ case "off":
+ enable = false
+ default:
+ print("GODEBUG: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
+ continue field
+ }
+
+ if key == "all" {
+ for i := range options {
+ options[i].Specified = true
+ options[i].Enable = enable
+ }
+ continue field
+ }
+
+ for i := range options {
+ if options[i].Name == key {
+ options[i].Specified = true
+ options[i].Enable = enable
+ continue field
+ }
+ }
+
+ print("GODEBUG: unknown cpu feature \"", key, "\"\n")
+ }
+
+ for _, o := range options {
+ if !o.Specified {
+ continue
+ }
+
+ if o.Enable && !*o.Feature {
+ print("GODEBUG: can not enable \"", o.Name, "\", missing CPU support\n")
+ continue
+ }
+
+ *o.Feature = o.Enable
+ }
+}
+
+// indexByte returns the index of the first instance of c in s,
+// or -1 if c is not present in s.
+func indexByte(s string, c byte) int {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/contrib/go/_std_1.18/src/internal/cpu/cpu.s b/contrib/go/_std_1.19/src/internal/cpu/cpu.s
index 3c770c132d..3c770c132d 100644
--- a/contrib/go/_std_1.18/src/internal/cpu/cpu.s
+++ b/contrib/go/_std_1.19/src/internal/cpu/cpu.s
diff --git a/contrib/go/_std_1.19/src/internal/cpu/cpu_x86.go b/contrib/go/_std_1.19/src/internal/cpu/cpu_x86.go
new file mode 100644
index 0000000000..6fd979a747
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/cpu/cpu_x86.go
@@ -0,0 +1,187 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+package cpu
+
+const CacheLinePadSize = 64
+
+// cpuid is implemented in cpu_x86.s.
+func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
+
+// xgetbv with ecx = 0 is implemented in cpu_x86.s.
+func xgetbv() (eax, edx uint32)
+
+// getGOAMD64level is implemented in cpu_x86.s. Returns number in [1,4].
+func getGOAMD64level() int32
+
+const (
+ // edx bits
+ cpuid_SSE2 = 1 << 26
+
+ // ecx bits
+ cpuid_SSE3 = 1 << 0
+ cpuid_PCLMULQDQ = 1 << 1
+ cpuid_SSSE3 = 1 << 9
+ cpuid_FMA = 1 << 12
+ cpuid_SSE41 = 1 << 19
+ cpuid_SSE42 = 1 << 20
+ cpuid_POPCNT = 1 << 23
+ cpuid_AES = 1 << 25
+ cpuid_OSXSAVE = 1 << 27
+ cpuid_AVX = 1 << 28
+
+ // ebx bits
+ cpuid_BMI1 = 1 << 3
+ cpuid_AVX2 = 1 << 5
+ cpuid_BMI2 = 1 << 8
+ cpuid_ERMS = 1 << 9
+ cpuid_ADX = 1 << 19
+
+ // edx bits for CPUID 0x80000001
+ cpuid_RDTSCP = 1 << 27
+)
+
+var maxExtendedFunctionInformation uint32
+
+func doinit() {
+ options = []option{
+ {Name: "adx", Feature: &X86.HasADX},
+ {Name: "aes", Feature: &X86.HasAES},
+ {Name: "erms", Feature: &X86.HasERMS},
+ {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
+ {Name: "rdtscp", Feature: &X86.HasRDTSCP},
+ }
+ level := getGOAMD64level()
+ if level < 2 {
+ // These options are required at level 2. At lower levels
+ // they can be turned off.
+ options = append(options,
+ option{Name: "popcnt", Feature: &X86.HasPOPCNT},
+ option{Name: "sse3", Feature: &X86.HasSSE3},
+ option{Name: "sse41", Feature: &X86.HasSSE41},
+ option{Name: "sse42", Feature: &X86.HasSSE42},
+ option{Name: "ssse3", Feature: &X86.HasSSSE3})
+ }
+ if level < 3 {
+ // These options are required at level 3. At lower levels
+ // they can be turned off.
+ options = append(options,
+ option{Name: "avx", Feature: &X86.HasAVX},
+ option{Name: "avx2", Feature: &X86.HasAVX2},
+ option{Name: "bmi1", Feature: &X86.HasBMI1},
+ option{Name: "bmi2", Feature: &X86.HasBMI2},
+ option{Name: "fma", Feature: &X86.HasFMA})
+ }
+
+ maxID, _, _, _ := cpuid(0, 0)
+
+ if maxID < 1 {
+ return
+ }
+
+ maxExtendedFunctionInformation, _, _, _ = cpuid(0x80000000, 0)
+
+ _, _, ecx1, _ := cpuid(1, 0)
+
+ X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
+ X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
+ X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
+ X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
+ X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
+ X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
+ X86.HasAES = isSet(ecx1, cpuid_AES)
+
+ // OSXSAVE can be false when using older Operating Systems
+ // or when explicitly disabled on newer Operating Systems by
+ // e.g. setting the xsavedisable boot option on Windows 10.
+ X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
+
+ // The FMA instruction set extension only has VEX prefixed instructions.
+ // VEX prefixed instructions require OSXSAVE to be enabled.
+ // See Intel 64 and IA-32 Architecture Software Developer’s Manual Volume 2
+ // Section 2.4 "AVX and SSE Instruction Exception Specification"
+ X86.HasFMA = isSet(ecx1, cpuid_FMA) && X86.HasOSXSAVE
+
+ osSupportsAVX := false
+ // For XGETBV, OSXSAVE bit is required and sufficient.
+ if X86.HasOSXSAVE {
+ eax, _ := xgetbv()
+ // Check if XMM and YMM registers have OS support.
+ osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
+ }
+
+ X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
+
+ if maxID < 7 {
+ return
+ }
+
+ _, ebx7, _, _ := cpuid(7, 0)
+ X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
+ X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
+ X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
+ X86.HasERMS = isSet(ebx7, cpuid_ERMS)
+ X86.HasADX = isSet(ebx7, cpuid_ADX)
+
+ var maxExtendedInformation uint32
+ maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0)
+
+ if maxExtendedInformation < 0x80000001 {
+ return
+ }
+
+ _, _, _, edxExt1 := cpuid(0x80000001, 0)
+ X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP)
+}
+
+func isSet(hwc uint32, value uint32) bool {
+ return hwc&value != 0
+}
+
+// Name returns the CPU name given by the vendor.
+// If the CPU name can not be determined an
+// empty string is returned.
+func Name() string {
+ if maxExtendedFunctionInformation < 0x80000004 {
+ return ""
+ }
+
+ data := make([]byte, 0, 3*4*4)
+
+ var eax, ebx, ecx, edx uint32
+ eax, ebx, ecx, edx = cpuid(0x80000002, 0)
+ data = appendBytes(data, eax, ebx, ecx, edx)
+ eax, ebx, ecx, edx = cpuid(0x80000003, 0)
+ data = appendBytes(data, eax, ebx, ecx, edx)
+ eax, ebx, ecx, edx = cpuid(0x80000004, 0)
+ data = appendBytes(data, eax, ebx, ecx, edx)
+
+ // Trim leading spaces.
+ for len(data) > 0 && data[0] == ' ' {
+ data = data[1:]
+ }
+
+ // Trim tail after and including the first null byte.
+ for i, c := range data {
+ if c == '\x00' {
+ data = data[:i]
+ break
+ }
+ }
+
+ return string(data)
+}
+
+func appendBytes(b []byte, args ...uint32) []byte {
+ for _, arg := range args {
+ b = append(b,
+ byte((arg >> 0)),
+ byte((arg >> 8)),
+ byte((arg >> 16)),
+ byte((arg >> 24)))
+ }
+ return b
+}
diff --git a/contrib/go/_std_1.19/src/internal/cpu/cpu_x86.s b/contrib/go/_std_1.19/src/internal/cpu/cpu_x86.s
new file mode 100644
index 0000000000..2ee8eca248
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/cpu/cpu_x86.s
@@ -0,0 +1,43 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+#include "textflag.h"
+
+// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·cpuid(SB), NOSPLIT, $0-24
+ MOVL eaxArg+0(FP), AX
+ MOVL ecxArg+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func xgetbv() (eax, edx uint32)
+TEXT ·xgetbv(SB),NOSPLIT,$0-8
+ MOVL $0, CX
+ XGETBV
+ MOVL AX, eax+0(FP)
+ MOVL DX, edx+4(FP)
+ RET
+
+// func getGOAMD64level() int32
+TEXT ·getGOAMD64level(SB),NOSPLIT,$0-4
+#ifdef GOAMD64_v4
+ MOVL $4, ret+0(FP)
+#else
+#ifdef GOAMD64_v3
+ MOVL $3, ret+0(FP)
+#else
+#ifdef GOAMD64_v2
+ MOVL $2, ret+0(FP)
+#else
+ MOVL $1, ret+0(FP)
+#endif
+#endif
+#endif
+ RET
diff --git a/contrib/go/_std_1.19/src/internal/fmtsort/sort.go b/contrib/go/_std_1.19/src/internal/fmtsort/sort.go
new file mode 100644
index 0000000000..278a89bd75
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/fmtsort/sort.go
@@ -0,0 +1,219 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fmtsort provides a general stable ordering mechanism
+// for maps, on behalf of the fmt and text/template packages.
+// It is not guaranteed to be efficient and works only for types
+// that are valid map keys.
+package fmtsort
+
+import (
+ "reflect"
+ "sort"
+)
+
+// Note: Throughout this package we avoid calling reflect.Value.Interface as
+// it is not always legal to do so and it's easier to avoid the issue than to face it.
+
+// SortedMap represents a map's keys and values. The keys and values are
+// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
+type SortedMap struct {
+ Key []reflect.Value
+ Value []reflect.Value
+}
+
+func (o *SortedMap) Len() int { return len(o.Key) }
+func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
+func (o *SortedMap) Swap(i, j int) {
+ o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
+ o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
+}
+
+// Sort accepts a map and returns a SortedMap that has the same keys and
+// values but in a stable sorted order according to the keys, modulo issues
+// raised by unorderable key values such as NaNs.
+//
+// The ordering rules are more general than with Go's < operator:
+//
+// - when applicable, nil compares low
+// - ints, floats, and strings order by <
+// - NaN compares less than non-NaN floats
+// - bool compares false before true
+// - complex compares real, then imag
+// - pointers compare by machine address
+// - channel values compare by machine address
+// - structs compare each field in turn
+// - arrays compare each element in turn.
+// Otherwise identical arrays compare by length.
+// - interface values compare first by reflect.Type describing the concrete type
+// and then by concrete value as described in the previous rules.
+func Sort(mapValue reflect.Value) *SortedMap {
+ if mapValue.Type().Kind() != reflect.Map {
+ return nil
+ }
+ // Note: this code is arranged to not panic even in the presence
+ // of a concurrent map update. The runtime is responsible for
+ // yelling loudly if that happens. See issue 33275.
+ n := mapValue.Len()
+ key := make([]reflect.Value, 0, n)
+ value := make([]reflect.Value, 0, n)
+ iter := mapValue.MapRange()
+ for iter.Next() {
+ key = append(key, iter.Key())
+ value = append(value, iter.Value())
+ }
+ sorted := &SortedMap{
+ Key: key,
+ Value: value,
+ }
+ sort.Stable(sorted)
+ return sorted
+}
+
+// compare compares two values of the same type. It returns -1, 0, 1
+// according to whether a > b (1), a == b (0), or a < b (-1).
+// If the types differ, it returns -1.
+// See the comment on Sort for the comparison rules.
+func compare(aVal, bVal reflect.Value) int {
+ aType, bType := aVal.Type(), bVal.Type()
+ if aType != bType {
+ return -1 // No good answer possible, but don't return 0: they're not equal.
+ }
+ switch aVal.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ a, b := aVal.Int(), bVal.Int()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ a, b := aVal.Uint(), bVal.Uint()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.String:
+ a, b := aVal.String(), bVal.String()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Float32, reflect.Float64:
+ return floatCompare(aVal.Float(), bVal.Float())
+ case reflect.Complex64, reflect.Complex128:
+ a, b := aVal.Complex(), bVal.Complex()
+ if c := floatCompare(real(a), real(b)); c != 0 {
+ return c
+ }
+ return floatCompare(imag(a), imag(b))
+ case reflect.Bool:
+ a, b := aVal.Bool(), bVal.Bool()
+ switch {
+ case a == b:
+ return 0
+ case a:
+ return 1
+ default:
+ return -1
+ }
+ case reflect.Pointer, reflect.UnsafePointer:
+ a, b := aVal.Pointer(), bVal.Pointer()
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Chan:
+ if c, ok := nilCompare(aVal, bVal); ok {
+ return c
+ }
+ ap, bp := aVal.Pointer(), bVal.Pointer()
+ switch {
+ case ap < bp:
+ return -1
+ case ap > bp:
+ return 1
+ default:
+ return 0
+ }
+ case reflect.Struct:
+ for i := 0; i < aVal.NumField(); i++ {
+ if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
+ return c
+ }
+ }
+ return 0
+ case reflect.Array:
+ for i := 0; i < aVal.Len(); i++ {
+ if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
+ return c
+ }
+ }
+ return 0
+ case reflect.Interface:
+ if c, ok := nilCompare(aVal, bVal); ok {
+ return c
+ }
+ c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
+ if c != 0 {
+ return c
+ }
+ return compare(aVal.Elem(), bVal.Elem())
+ default:
+ // Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
+ panic("bad type in compare: " + aType.String())
+ }
+}
+
+// nilCompare checks whether either value is nil. If not, the boolean is false.
+// If either value is nil, the boolean is true and the integer is the comparison
+// value. The comparison is defined to be 0 if both are nil, otherwise the one
+// nil value compares low. Both arguments must represent a chan, func,
+// interface, map, pointer, or slice.
+func nilCompare(aVal, bVal reflect.Value) (int, bool) {
+ if aVal.IsNil() {
+ if bVal.IsNil() {
+ return 0, true
+ }
+ return -1, true
+ }
+ if bVal.IsNil() {
+ return 1, true
+ }
+ return 0, false
+}
+
+// floatCompare compares two floating-point values. NaNs compare low.
+func floatCompare(a, b float64) int {
+ switch {
+ case isNaN(a):
+ return -1 // No good answer if b is a NaN so don't bother checking.
+ case isNaN(b):
+ return 1
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ }
+ return 0
+}
+
+func isNaN(a float64) bool {
+ return a != a
+}
diff --git a/contrib/go/_std_1.19/src/internal/goarch/goarch.go b/contrib/go/_std_1.19/src/internal/goarch/goarch.go
new file mode 100644
index 0000000000..3dda62fadc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/goarch/goarch.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package goarch contains GOARCH-specific constants.
+package goarch
+
+// The next line makes 'go generate' write the zgoarch*.go files with
+// per-arch information, including constants named $GOARCH for every
+// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying
+// by them is useful for defining GOARCH-specific constants.
+//
+//go:generate go run gengoarch.go
+
+type ArchFamilyType int
+
+const (
+ AMD64 ArchFamilyType = iota
+ ARM
+ ARM64
+ I386
+ LOONG64
+ MIPS
+ MIPS64
+ PPC64
+ RISCV64
+ S390X
+ WASM
+)
+
+// PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant.
+// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
+const PtrSize = 4 << (^uintptr(0) >> 63)
+
+// ArchFamily is the architecture family (AMD64, ARM, ...)
+const ArchFamily ArchFamilyType = _ArchFamily
+
+// BigEndian reports whether the architecture is big-endian.
+const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1
+
+// DefaultPhysPageSize is the default physical page size.
+const DefaultPhysPageSize = _DefaultPhysPageSize
+
+// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems).
+// The various PC tables record PC deltas pre-divided by PCQuantum.
+const PCQuantum = _PCQuantum
+
+// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit).
+const Int64Align = PtrSize
+
+// MinFrameSize is the size of the system-reserved words at the bottom
+// of a frame (just above the architectural stack pointer).
+// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems.
+// On PowerPC it is larger, to cover three more reserved words:
+// the compiler word, the link editor word, and the TOC save word.
+const MinFrameSize = _MinFrameSize
+
+// StackAlign is the required alignment of the SP register.
+// The stack must be at least word aligned, but some architectures require more.
+const StackAlign = _StackAlign
diff --git a/contrib/go/_std_1.18/src/internal/goarch/goarch_amd64.go b/contrib/go/_std_1.19/src/internal/goarch/goarch_amd64.go
index 911e3e7242..911e3e7242 100644
--- a/contrib/go/_std_1.18/src/internal/goarch/goarch_amd64.go
+++ b/contrib/go/_std_1.19/src/internal/goarch/goarch_amd64.go
diff --git a/contrib/go/_std_1.18/src/internal/goarch/zgoarch_amd64.go b/contrib/go/_std_1.19/src/internal/goarch/zgoarch_amd64.go
index 7926392b77..7926392b77 100644
--- a/contrib/go/_std_1.18/src/internal/goarch/zgoarch_amd64.go
+++ b/contrib/go/_std_1.19/src/internal/goarch/zgoarch_amd64.go
diff --git a/contrib/go/_std_1.18/src/internal/godebug/godebug.go b/contrib/go/_std_1.19/src/internal/godebug/godebug.go
index ac434e5fd8..ac434e5fd8 100644
--- a/contrib/go/_std_1.18/src/internal/godebug/godebug.go
+++ b/contrib/go/_std_1.19/src/internal/godebug/godebug.go
diff --git a/contrib/go/_std_1.19/src/internal/goexperiment/exp_boringcrypto_off.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_boringcrypto_off.go
new file mode 100644
index 0000000000..020c75bd53
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_boringcrypto_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.boringcrypto
+// +build !goexperiment.boringcrypto
+
+package goexperiment
+
+const BoringCrypto = false
+const BoringCryptoInt = 0
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_fieldtrack_off.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_fieldtrack_off.go
index e5e132660e..e5e132660e 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_fieldtrack_off.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_fieldtrack_off.go
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_heapminimum512kib_off.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_heapminimum512kib_off.go
index 09da431b40..09da431b40 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_heapminimum512kib_off.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_heapminimum512kib_off.go
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_preemptibleloops_off.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_preemptibleloops_off.go
index 7a26088e80..7a26088e80 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_preemptibleloops_off.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_preemptibleloops_off.go
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabiargs_on.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_regabiargs_on.go
index 9b26f3c9cb..9b26f3c9cb 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabiargs_on.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_regabiargs_on.go
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabiwrappers_on.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_regabiwrappers_on.go
index 11ffffbbff..11ffffbbff 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_regabiwrappers_on.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_regabiwrappers_on.go
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_staticlockranking_off.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_staticlockranking_off.go
index 3d546c04b4..3d546c04b4 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_staticlockranking_off.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_staticlockranking_off.go
diff --git a/contrib/go/_std_1.18/src/internal/goexperiment/exp_unified_off.go b/contrib/go/_std_1.19/src/internal/goexperiment/exp_unified_off.go
index 4c16fd8562..4c16fd8562 100644
--- a/contrib/go/_std_1.18/src/internal/goexperiment/exp_unified_off.go
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/exp_unified_off.go
diff --git a/contrib/go/_std_1.19/src/internal/goexperiment/flags.go b/contrib/go/_std_1.19/src/internal/goexperiment/flags.go
new file mode 100644
index 0000000000..20d9c2da5d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/goexperiment/flags.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package goexperiment implements support for toolchain experiments.
+//
+// Toolchain experiments are controlled by the GOEXPERIMENT
+// environment variable. GOEXPERIMENT is a comma-separated list of
+// experiment names. GOEXPERIMENT can be set at make.bash time, which
+// sets the default experiments for binaries built with the tool
+// chain; or it can be set at build time. GOEXPERIMENT can also be set
+// to "none", which disables any experiments that were enabled at
+// make.bash time.
+//
+// Experiments are exposed to the build in the following ways:
+//
+// - Build tag goexperiment.x is set if experiment x (lower case) is
+// enabled.
+//
+// - For each experiment x (in camel case), this package contains a
+// boolean constant x and an integer constant xInt.
+//
+// - In runtime assembly, the macro GOEXPERIMENT_x is defined if
+// experiment x (lower case) is enabled.
+//
+// In the toolchain, the set of experiments enabled for the current
+// build should be accessed via objabi.Experiment.
+//
+// The set of experiments is included in the output of runtime.Version()
+// and "go version <binary>" if it differs from the default experiments.
+//
+// For the set of experiments supported by the current toolchain, see
+// "go doc goexperiment.Flags".
+//
+// Note that this package defines the set of experiments (in Flags)
+// and records the experiments that were enabled when the package
+// was compiled (as boolean and integer constants).
+//
+// Note especially that this package does not itself change behavior
+// at run time based on the GOEXPERIMENT variable.
+// The code used in builds to interpret the GOEXPERIMENT variable
+// is in the separate package internal/buildcfg.
+package goexperiment
+
+//go:generate go run mkconsts.go
+
+// Flags is the set of experiments that can be enabled or disabled in
+// the current toolchain.
+//
+// When specified in the GOEXPERIMENT environment variable or as build
+// tags, experiments use the strings.ToLower of their field name.
+//
+// For the baseline experimental configuration, see
+// objabi.experimentBaseline.
+//
+// If you change this struct definition, run "go generate".
+type Flags struct {
+ FieldTrack bool
+ PreemptibleLoops bool
+ StaticLockRanking bool
+ BoringCrypto bool
+
+ // Unified enables the compiler's unified IR construction
+ // experiment.
+ Unified bool
+
+ // Regabi is split into several sub-experiments that can be
+ // enabled individually. Not all combinations work.
+ // The "regabi" GOEXPERIMENT is an alias for all "working"
+ // subexperiments.
+
+ // RegabiWrappers enables ABI wrappers for calling between
+ // ABI0 and ABIInternal functions. Without this, the ABIs are
+ // assumed to be identical so cross-ABI calls are direct.
+ RegabiWrappers bool
+ // RegabiArgs enables register arguments/results in all
+ // compiled Go functions.
+ //
+ // Requires wrappers (to do ABI translation), and reflect (so
+ // reflection calls use registers).
+ RegabiArgs bool
+
+ // HeapMinimum512KiB reduces the minimum heap size to 512 KiB.
+ //
+ // This was originally reduced as part of PacerRedesign, but
+ // has been broken out to its own experiment that is disabled
+ // by default.
+ HeapMinimum512KiB bool
+}
diff --git a/contrib/go/_std_1.19/src/internal/goos/goos.go b/contrib/go/_std_1.19/src/internal/goos/goos.go
new file mode 100644
index 0000000000..02dc9688cb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/goos/goos.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package goos contains GOOS-specific constants.
+package goos
+
+// The next line makes 'go generate' write the zgoos*.go files with
+// per-OS information, including constants named Is$GOOS for every
+// known GOOS. The constant is 1 on the current system, 0 otherwise;
+// multiplying by them is useful for defining GOOS-specific constants.
+//
+//go:generate go run gengoos.go
diff --git a/contrib/go/_std_1.18/src/internal/goos/zgoos_darwin.go b/contrib/go/_std_1.19/src/internal/goos/zgoos_darwin.go
index decdd49642..decdd49642 100644
--- a/contrib/go/_std_1.18/src/internal/goos/zgoos_darwin.go
+++ b/contrib/go/_std_1.19/src/internal/goos/zgoos_darwin.go
diff --git a/contrib/go/_std_1.18/src/internal/goos/zgoos_linux.go b/contrib/go/_std_1.19/src/internal/goos/zgoos_linux.go
index cb9d6e8afa..cb9d6e8afa 100644
--- a/contrib/go/_std_1.18/src/internal/goos/zgoos_linux.go
+++ b/contrib/go/_std_1.19/src/internal/goos/zgoos_linux.go
diff --git a/contrib/go/_std_1.19/src/internal/intern/intern.go b/contrib/go/_std_1.19/src/internal/intern/intern.go
new file mode 100644
index 0000000000..c7639b4668
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/intern/intern.go
@@ -0,0 +1,179 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package intern lets you make smaller comparable values by boxing
+// a larger comparable value (such as a 16 byte string header) down
+// into a globally unique 8 byte pointer.
+//
+// The globally unique pointers are garbage collected with weak
+// references and finalizers. This package hides that.
+package intern
+
+import (
+ "internal/godebug"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+// A Value pointer is the handle to an underlying comparable value.
+// See func Get for how Value pointers may be used.
+type Value struct {
+ _ [0]func() // prevent people from accidentally using value type as comparable
+ cmpVal any
+ // resurrected is guarded by mu (for all instances of Value).
+ // It is set true whenever v is synthesized from a uintptr.
+ resurrected bool
+}
+
+// Get returns the comparable value passed to the Get func
+// that returned v.
+func (v *Value) Get() any { return v.cmpVal }
+
+// key is a key in our global value map.
+// It contains type-specialized fields to avoid allocations
+// when converting common types to empty interfaces.
+type key struct {
+ s string
+ cmpVal any
+ // isString reports whether key contains a string.
+ // Without it, the zero value of key is ambiguous.
+ isString bool
+}
+
+// keyFor returns a key to use with cmpVal.
+func keyFor(cmpVal any) key {
+ if s, ok := cmpVal.(string); ok {
+ return key{s: s, isString: true}
+ }
+ return key{cmpVal: cmpVal}
+}
+
+// Value returns a *Value built from k.
+func (k key) Value() *Value {
+ if k.isString {
+ return &Value{cmpVal: k.s}
+ }
+ return &Value{cmpVal: k.cmpVal}
+}
+
+var (
+ // mu guards valMap, a weakref map of *Value by underlying value.
+ // It also guards the resurrected field of all *Values.
+ mu sync.Mutex
+ valMap = map[key]uintptr{} // to uintptr(*Value)
+ valSafe = safeMap() // non-nil in safe+leaky mode
+)
+
+// safeMap returns a non-nil map if we're in safe-but-leaky mode,
+// as controlled by GODEBUG=intern=leaky
+func safeMap() map[key]*Value {
+ if godebug.Get("intern") == "leaky" {
+ return map[key]*Value{}
+ }
+ return nil
+}
+
+// Get returns a pointer representing the comparable value cmpVal.
+//
+// The returned pointer will be the same for Get(v) and Get(v2)
+// if and only if v == v2, and can be used as a map key.
+func Get(cmpVal any) *Value {
+ return get(keyFor(cmpVal))
+}
+
+// GetByString is identical to Get, except that it is specialized for strings.
+// This avoids an allocation from putting a string into an interface{}
+// to pass as an argument to Get.
+func GetByString(s string) *Value {
+ return get(key{s: s, isString: true})
+}
+
+// We play unsafe games that violate Go's rules (and assume a non-moving
+// collector). So we quiet Go here.
+// See the comment below Get for more implementation details.
+//
+//go:nocheckptr
+func get(k key) *Value {
+ mu.Lock()
+ defer mu.Unlock()
+
+ var v *Value
+ if valSafe != nil {
+ v = valSafe[k]
+ } else if addr, ok := valMap[k]; ok {
+ v = (*Value)(unsafe.Pointer(addr))
+ v.resurrected = true
+ }
+ if v != nil {
+ return v
+ }
+ v = k.Value()
+ if valSafe != nil {
+ valSafe[k] = v
+ } else {
+ // SetFinalizer before uintptr conversion (theoretical concern;
+ // see https://github.com/go4org/intern/issues/13)
+ runtime.SetFinalizer(v, finalize)
+ valMap[k] = uintptr(unsafe.Pointer(v))
+ }
+ return v
+}
+
+func finalize(v *Value) {
+ mu.Lock()
+ defer mu.Unlock()
+ if v.resurrected {
+ // We lost the race. Somebody resurrected it while we
+ // were about to finalize it. Try again next round.
+ v.resurrected = false
+ runtime.SetFinalizer(v, finalize)
+ return
+ }
+ delete(valMap, keyFor(v.cmpVal))
+}
+
+// Interning is simple if you don't require that unused values be
+// garbage collectable. But we do require that; we don't want to be
+// DOS vector. We do this by using a uintptr to hide the pointer from
+// the garbage collector, and using a finalizer to eliminate the
+// pointer when no other code is using it.
+//
+// The obvious implementation of this is to use a
+// map[interface{}]uintptr-of-*interface{}, and set up a finalizer to
+// delete from the map. Unfortunately, this is racy. Because pointers
+// are being created in violation of Go's unsafety rules, it's
+// possible to create a pointer to a value concurrently with the GC
+// concluding that the value can be collected. There are other races
+// that break the equality invariant as well, but the use-after-free
+// will cause a runtime crash.
+//
+// To make this work, the finalizer needs to know that no references
+// have been unsafely created since the finalizer was set up. To do
+// this, values carry a "resurrected" sentinel, which gets set
+// whenever a pointer is unsafely created. If the finalizer encounters
+// the sentinel, it clears the sentinel and delays collection for one
+// additional GC cycle, by re-installing itself as finalizer. This
+// ensures that the unsafely created pointer is visible to the GC, and
+// will correctly prevent collection.
+//
+// This technique does mean that interned values that get reused take
+// at least 3 GC cycles to fully collect (1 to clear the sentinel, 1
+// to clean up the unsafe map, 1 to be actually deleted).
+//
+// @ianlancetaylor commented in
+// https://github.com/golang/go/issues/41303#issuecomment-717401656
+// that it is possible to implement weak references in terms of
+// finalizers without unsafe. Unfortunately, the approach he outlined
+// does not work here, for two reasons. First, there is no way to
+// construct a strong pointer out of a weak pointer; our map stores
+// weak pointers, but we must return strong pointers to callers.
+// Second, and more fundamentally, we must return not just _a_ strong
+// pointer to callers, but _the same_ strong pointer to callers. In
+// order to return _the same_ strong pointer to callers, we must track
+// it, which is exactly what we cannot do with strong pointers.
+//
+// See https://github.com/inetaf/netaddr/issues/53 for more
+// discussion, and https://github.com/go4org/intern/issues/2 for an
+// illustration of the subtleties at play.
diff --git a/contrib/go/_std_1.18/src/internal/itoa/itoa.go b/contrib/go/_std_1.19/src/internal/itoa/itoa.go
index c6062d9fe1..c6062d9fe1 100644
--- a/contrib/go/_std_1.18/src/internal/itoa/itoa.go
+++ b/contrib/go/_std_1.19/src/internal/itoa/itoa.go
diff --git a/contrib/go/_std_1.19/src/internal/nettrace/nettrace.go b/contrib/go/_std_1.19/src/internal/nettrace/nettrace.go
new file mode 100644
index 0000000000..0a2bf925e9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/nettrace/nettrace.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nettrace contains internal hooks for tracing activity in
+// the net package. This package is purely internal for use by the
+// net/http/httptrace package and has no stable API exposed to end
+// users.
+package nettrace
+
+// TraceKey is a context.Context Value key. Its associated value should
+// be a *Trace struct.
+type TraceKey struct{}
+
+// LookupIPAltResolverKey is a context.Context Value key used by tests to
+// specify an alternate resolver func.
+// It is not exposed to outsider users. (But see issue 12503)
+// The value should be the same type as lookupIP:
+//
+// func lookupIP(ctx context.Context, host string) ([]IPAddr, error)
+type LookupIPAltResolverKey struct{}
+
+// Trace contains a set of hooks for tracing events within
+// the net package. Any specific hook may be nil.
+type Trace struct {
+ // DNSStart is called with the hostname of a DNS lookup
+ // before it begins.
+ DNSStart func(name string)
+
+ // DNSDone is called after a DNS lookup completes (or fails).
+ // The coalesced parameter is whether singleflight de-duped
+ // the call. The addrs are of type net.IPAddr but can't
+ // actually be for circular dependency reasons.
+ DNSDone func(netIPs []any, coalesced bool, err error)
+
+ // ConnectStart is called before a Dial, excluding Dials made
+ // during DNS lookups. In the case of DualStack (Happy Eyeballs)
+ // dialing, this may be called multiple times, from multiple
+ // goroutines.
+ ConnectStart func(network, addr string)
+
+ // ConnectStart is called after a Dial with the results, excluding
+ // Dials made during DNS lookups. It may also be called multiple
+ // times, like ConnectStart.
+ ConnectDone func(network, addr string, err error)
+}
diff --git a/contrib/go/_std_1.18/src/internal/oserror/errors.go b/contrib/go/_std_1.19/src/internal/oserror/errors.go
index 28a1ab32d3..28a1ab32d3 100644
--- a/contrib/go/_std_1.18/src/internal/oserror/errors.go
+++ b/contrib/go/_std_1.19/src/internal/oserror/errors.go
diff --git a/contrib/go/_std_1.18/src/internal/poll/copy_file_range_linux.go b/contrib/go/_std_1.19/src/internal/poll/copy_file_range_linux.go
index 5b9e5d4020..5b9e5d4020 100644
--- a/contrib/go/_std_1.18/src/internal/poll/copy_file_range_linux.go
+++ b/contrib/go/_std_1.19/src/internal/poll/copy_file_range_linux.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/errno_unix.go b/contrib/go/_std_1.19/src/internal/poll/errno_unix.go
new file mode 100644
index 0000000000..8eed93a31c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/errno_unix.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package poll
+
+import "syscall"
+
+// Do the interface allocations only once for common
+// Errno values.
+var (
+ errEAGAIN error = syscall.EAGAIN
+ errEINVAL error = syscall.EINVAL
+ errENOENT error = syscall.ENOENT
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case syscall.EAGAIN:
+ return errEAGAIN
+ case syscall.EINVAL:
+ return errEINVAL
+ case syscall.ENOENT:
+ return errENOENT
+ }
+ return e
+}
diff --git a/contrib/go/_std_1.19/src/internal/poll/fcntl_libc.go b/contrib/go/_std_1.19/src/internal/poll/fcntl_libc.go
new file mode 100644
index 0000000000..13614dc3e8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fcntl_libc.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || solaris
+
+package poll
+
+import _ "unsafe" // for go:linkname
+
+// Implemented in the syscall package.
+//
+//go:linkname fcntl syscall.fcntl
+func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/contrib/go/_std_1.18/src/internal/poll/fcntl_syscall.go b/contrib/go/_std_1.19/src/internal/poll/fcntl_syscall.go
index accff5e043..accff5e043 100644
--- a/contrib/go/_std_1.18/src/internal/poll/fcntl_syscall.go
+++ b/contrib/go/_std_1.19/src/internal/poll/fcntl_syscall.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/fd.go b/contrib/go/_std_1.19/src/internal/poll/fd.go
new file mode 100644
index 0000000000..ef61d0cb3f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fd.go
@@ -0,0 +1,83 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package poll supports non-blocking I/O on file descriptors with polling.
+// This supports I/O operations that block only a goroutine, not a thread.
+// This is used by the net and os packages.
+// It uses a poller built into the runtime, with support from the
+// runtime scheduler.
+package poll
+
+import (
+ "errors"
+)
+
+// errNetClosing is the type of the variable ErrNetClosing.
+// This is used to implement the net.Error interface.
+type errNetClosing struct{}
+
+// Error returns the error message for ErrNetClosing.
+// Keep this string consistent because of issue #4373:
+// since historically programs have not been able to detect
+// this error, they look for the string.
+func (e errNetClosing) Error() string { return "use of closed network connection" }
+
+func (e errNetClosing) Timeout() bool { return false }
+func (e errNetClosing) Temporary() bool { return false }
+
+// ErrNetClosing is returned when a network descriptor is used after
+// it has been closed.
+var ErrNetClosing = errNetClosing{}
+
+// ErrFileClosing is returned when a file descriptor is used after it
+// has been closed.
+var ErrFileClosing = errors.New("use of closed file")
+
+// ErrNoDeadline is returned when a request is made to set a deadline
+// on a file type that does not use the poller.
+var ErrNoDeadline = errors.New("file type does not support deadline")
+
+// Return the appropriate closing error based on isFile.
+func errClosing(isFile bool) error {
+ if isFile {
+ return ErrFileClosing
+ }
+ return ErrNetClosing
+}
+
+// ErrDeadlineExceeded is returned for an expired deadline.
+// This is exported by the os package as os.ErrDeadlineExceeded.
+var ErrDeadlineExceeded error = &DeadlineExceededError{}
+
+// DeadlineExceededError is returned for an expired deadline.
+type DeadlineExceededError struct{}
+
+// Implement the net.Error interface.
+// The string is "i/o timeout" because that is what was returned
+// by earlier Go versions. Changing it may break programs that
+// match on error strings.
+func (e *DeadlineExceededError) Error() string { return "i/o timeout" }
+func (e *DeadlineExceededError) Timeout() bool { return true }
+func (e *DeadlineExceededError) Temporary() bool { return true }
+
+// ErrNotPollable is returned when the file or socket is not suitable
+// for event notification.
+var ErrNotPollable = errors.New("not pollable")
+
+// consume removes data from a slice of byte slices, for writev.
+func consume(v *[][]byte, n int64) {
+ for len(*v) > 0 {
+ ln0 := int64(len((*v)[0]))
+ if ln0 > n {
+ (*v)[0] = (*v)[0][n:]
+ return
+ }
+ n -= ln0
+ (*v)[0] = nil
+ *v = (*v)[1:]
+ }
+}
+
+// TestHookDidWritev is a hook for testing writev.
+var TestHookDidWritev = func(wrote int) {}
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_fsync_darwin.go b/contrib/go/_std_1.19/src/internal/poll/fd_fsync_darwin.go
index 48e7596922..48e7596922 100644
--- a/contrib/go/_std_1.18/src/internal/poll/fd_fsync_darwin.go
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_fsync_darwin.go
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_fsync_posix.go b/contrib/go/_std_1.19/src/internal/poll/fd_fsync_posix.go
index 6f17019e73..6f17019e73 100644
--- a/contrib/go/_std_1.18/src/internal/poll/fd_fsync_posix.go
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_fsync_posix.go
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_mutex.go b/contrib/go/_std_1.19/src/internal/poll/fd_mutex.go
index 0a8ee6f0d4..0a8ee6f0d4 100644
--- a/contrib/go/_std_1.18/src/internal/poll/fd_mutex.go
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_mutex.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/fd_opendir_darwin.go b/contrib/go/_std_1.19/src/internal/poll/fd_opendir_darwin.go
new file mode 100644
index 0000000000..3ae2dc8448
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_opendir_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poll
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+// OpenDir returns a pointer to a DIR structure suitable for
+// ReadDir. In case of an error, the name of the failed
+// syscall is returned along with a syscall.Errno.
+func (fd *FD) OpenDir() (uintptr, string, error) {
+ // fdopendir(3) takes control of the file descriptor,
+ // so use a dup.
+ fd2, call, err := fd.Dup()
+ if err != nil {
+ return 0, call, err
+ }
+ var dir uintptr
+ for {
+ dir, err = fdopendir(fd2)
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ if err != nil {
+ syscall.Close(fd2)
+ return 0, "fdopendir", err
+ }
+ return dir, "", nil
+}
+
+// Implemented in syscall/syscall_darwin.go.
+//
+//go:linkname fdopendir syscall.fdopendir
+func fdopendir(fd int) (dir uintptr, err error)
diff --git a/contrib/go/_std_1.19/src/internal/poll/fd_poll_runtime.go b/contrib/go/_std_1.19/src/internal/poll/fd_poll_runtime.go
new file mode 100644
index 0000000000..4d3cc78405
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_poll_runtime.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package poll
+
+import (
+ "errors"
+ "sync"
+ "syscall"
+ "time"
+ _ "unsafe" // for go:linkname
+)
+
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+//
+//go:linkname runtimeNano runtime.nanotime
+func runtimeNano() int64
+
+func runtime_pollServerInit()
+func runtime_pollOpen(fd uintptr) (uintptr, int)
+func runtime_pollClose(ctx uintptr)
+func runtime_pollWait(ctx uintptr, mode int) int
+func runtime_pollWaitCanceled(ctx uintptr, mode int) int
+func runtime_pollReset(ctx uintptr, mode int) int
+func runtime_pollSetDeadline(ctx uintptr, d int64, mode int)
+func runtime_pollUnblock(ctx uintptr)
+func runtime_isPollServerDescriptor(fd uintptr) bool
+
+type pollDesc struct {
+ runtimeCtx uintptr
+}
+
+var serverInit sync.Once
+
+func (pd *pollDesc) init(fd *FD) error {
+ serverInit.Do(runtime_pollServerInit)
+ ctx, errno := runtime_pollOpen(uintptr(fd.Sysfd))
+ if errno != 0 {
+ return errnoErr(syscall.Errno(errno))
+ }
+ pd.runtimeCtx = ctx
+ return nil
+}
+
+func (pd *pollDesc) close() {
+ if pd.runtimeCtx == 0 {
+ return
+ }
+ runtime_pollClose(pd.runtimeCtx)
+ pd.runtimeCtx = 0
+}
+
+// Evict evicts fd from the pending list, unblocking any I/O running on fd.
+func (pd *pollDesc) evict() {
+ if pd.runtimeCtx == 0 {
+ return
+ }
+ runtime_pollUnblock(pd.runtimeCtx)
+}
+
+func (pd *pollDesc) prepare(mode int, isFile bool) error {
+ if pd.runtimeCtx == 0 {
+ return nil
+ }
+ res := runtime_pollReset(pd.runtimeCtx, mode)
+ return convertErr(res, isFile)
+}
+
+func (pd *pollDesc) prepareRead(isFile bool) error {
+ return pd.prepare('r', isFile)
+}
+
+func (pd *pollDesc) prepareWrite(isFile bool) error {
+ return pd.prepare('w', isFile)
+}
+
+func (pd *pollDesc) wait(mode int, isFile bool) error {
+ if pd.runtimeCtx == 0 {
+ return errors.New("waiting for unsupported file type")
+ }
+ res := runtime_pollWait(pd.runtimeCtx, mode)
+ return convertErr(res, isFile)
+}
+
+func (pd *pollDesc) waitRead(isFile bool) error {
+ return pd.wait('r', isFile)
+}
+
+func (pd *pollDesc) waitWrite(isFile bool) error {
+ return pd.wait('w', isFile)
+}
+
+func (pd *pollDesc) waitCanceled(mode int) {
+ if pd.runtimeCtx == 0 {
+ return
+ }
+ runtime_pollWaitCanceled(pd.runtimeCtx, mode)
+}
+
+func (pd *pollDesc) pollable() bool {
+ return pd.runtimeCtx != 0
+}
+
+// Error values returned by runtime_pollReset and runtime_pollWait.
+// These must match the values in runtime/netpoll.go.
+const (
+ pollNoError = 0
+ pollErrClosing = 1
+ pollErrTimeout = 2
+ pollErrNotPollable = 3
+)
+
+func convertErr(res int, isFile bool) error {
+ switch res {
+ case pollNoError:
+ return nil
+ case pollErrClosing:
+ return errClosing(isFile)
+ case pollErrTimeout:
+ return ErrDeadlineExceeded
+ case pollErrNotPollable:
+ return ErrNotPollable
+ }
+ println("unreachable: ", res)
+ panic("unreachable")
+}
+
+// SetDeadline sets the read and write deadlines associated with fd.
+func (fd *FD) SetDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r'+'w')
+}
+
+// SetReadDeadline sets the read deadline associated with fd.
+func (fd *FD) SetReadDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'r')
+}
+
+// SetWriteDeadline sets the write deadline associated with fd.
+func (fd *FD) SetWriteDeadline(t time.Time) error {
+ return setDeadlineImpl(fd, t, 'w')
+}
+
+func setDeadlineImpl(fd *FD, t time.Time, mode int) error {
+ var d int64
+ if !t.IsZero() {
+ d = int64(time.Until(t))
+ if d == 0 {
+ d = -1 // don't confuse deadline right now with no deadline
+ }
+ }
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ if fd.pd.runtimeCtx == 0 {
+ return ErrNoDeadline
+ }
+ runtime_pollSetDeadline(fd.pd.runtimeCtx, d, mode)
+ return nil
+}
+
+// IsPollDescriptor reports whether fd is the descriptor being used by the poller.
+// This is only used for testing.
+func IsPollDescriptor(fd uintptr) bool {
+ return runtime_isPollServerDescriptor(fd)
+}
diff --git a/contrib/go/_std_1.19/src/internal/poll/fd_posix.go b/contrib/go/_std_1.19/src/internal/poll/fd_posix.go
new file mode 100644
index 0000000000..778fe1e5c1
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_posix.go
@@ -0,0 +1,79 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package poll
+
+import (
+ "io"
+ "syscall"
+)
+
+// eofError returns io.EOF when fd is available for reading end of
+// file.
+func (fd *FD) eofError(n int, err error) error {
+ if n == 0 && err == nil && fd.ZeroReadIsEOF {
+ return io.EOF
+ }
+ return err
+}
+
+// Shutdown wraps syscall.Shutdown.
+func (fd *FD) Shutdown(how int) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Shutdown(fd.Sysfd, how)
+}
+
+// Fchown wraps syscall.Fchown.
+func (fd *FD) Fchown(uid, gid int) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fchown(fd.Sysfd, uid, gid)
+ })
+}
+
+// Ftruncate wraps syscall.Ftruncate.
+func (fd *FD) Ftruncate(size int64) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Ftruncate(fd.Sysfd, size)
+ })
+}
+
+// RawControl invokes the user-defined function f for a non-IO
+// operation.
+func (fd *FD) RawControl(f func(uintptr)) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ f(uintptr(fd.Sysfd))
+ return nil
+}
+
+// ignoringEINTR makes a function call and repeats it if it returns
+// an EINTR error. This appears to be required even though we install all
+// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
+// Also #20400 and #36644 are issues in which a signal handler is
+// installed without setting SA_RESTART. None of these are the common case,
+// but there are enough of them that it seems that we can't avoid
+// an EINTR loop.
+func ignoringEINTR(fn func() error) error {
+ for {
+ err := fn()
+ if err != syscall.EINTR {
+ return err
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/internal/poll/fd_unix.go b/contrib/go/_std_1.19/src/internal/poll/fd_unix.go
new file mode 100644
index 0000000000..2786064d9f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_unix.go
@@ -0,0 +1,799 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package poll
+
+import (
+ "internal/syscall/unix"
+ "io"
+ "sync/atomic"
+ "syscall"
+)
+
+// FD is a file descriptor. The net and os packages use this type as a
+// field of a larger type representing a network connection or OS file.
+type FD struct {
+ // Lock sysfd and serialize access to Read and Write methods.
+ fdmu fdMutex
+
+ // System file descriptor. Immutable until Close.
+ Sysfd int
+
+ // I/O poller.
+ pd pollDesc
+
+ // Writev cache.
+ iovecs *[]syscall.Iovec
+
+ // Semaphore signaled when file is closed.
+ csema uint32
+
+ // Non-zero if this file has been set to blocking mode.
+ isBlocking uint32
+
+ // Whether this is a streaming descriptor, as opposed to a
+ // packet-based descriptor like a UDP socket. Immutable.
+ IsStream bool
+
+ // Whether a zero byte read indicates EOF. This is false for a
+ // message based socket connection.
+ ZeroReadIsEOF bool
+
+ // Whether this is a file rather than a network socket.
+ isFile bool
+}
+
+// Init initializes the FD. The Sysfd field should already be set.
+// This can be called multiple times on a single FD.
+// The net argument is a network name from the net package (e.g., "tcp"),
+// or "file".
+// Set pollable to true if fd should be managed by runtime netpoll.
+func (fd *FD) Init(net string, pollable bool) error {
+ // We don't actually care about the various network types.
+ if net == "file" {
+ fd.isFile = true
+ }
+ if !pollable {
+ fd.isBlocking = 1
+ return nil
+ }
+ err := fd.pd.init(fd)
+ if err != nil {
+ // If we could not initialize the runtime poller,
+ // assume we are using blocking mode.
+ fd.isBlocking = 1
+ }
+ return err
+}
+
+// Destroy closes the file descriptor. This is called when there are
+// no remaining references.
+func (fd *FD) destroy() error {
+ // Poller may want to unregister fd in readiness notification mechanism,
+ // so this must be executed before CloseFunc.
+ fd.pd.close()
+
+ // We don't use ignoringEINTR here because POSIX does not define
+ // whether the descriptor is closed if close returns EINTR.
+ // If the descriptor is indeed closed, using a loop would race
+ // with some other goroutine opening a new descriptor.
+ // (The Linux kernel guarantees that it is closed on an EINTR error.)
+ err := CloseFunc(fd.Sysfd)
+
+ fd.Sysfd = -1
+ runtime_Semrelease(&fd.csema)
+ return err
+}
+
+// Close closes the FD. The underlying file descriptor is closed by the
+// destroy method when there are no remaining references.
+func (fd *FD) Close() error {
+ if !fd.fdmu.increfAndClose() {
+ return errClosing(fd.isFile)
+ }
+
+ // Unblock any I/O. Once it all unblocks and returns,
+ // so that it cannot be referring to fd.sysfd anymore,
+ // the final decref will close fd.sysfd. This should happen
+ // fairly quickly, since all the I/O is non-blocking, and any
+ // attempts to block in the pollDesc will return errClosing(fd.isFile).
+ fd.pd.evict()
+
+ // The call to decref will call destroy if there are no other
+ // references.
+ err := fd.decref()
+
+ // Wait until the descriptor is closed. If this was the only
+ // reference, it is already closed. Only wait if the file has
+ // not been set to blocking mode, as otherwise any current I/O
+ // may be blocking, and that would block the Close.
+ // No need for an atomic read of isBlocking, increfAndClose means
+ // we have exclusive access to fd.
+ if fd.isBlocking == 0 {
+ runtime_Semacquire(&fd.csema)
+ }
+
+ return err
+}
+
+// SetBlocking puts the file into blocking mode.
+func (fd *FD) SetBlocking() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ // Atomic store so that concurrent calls to SetBlocking
+ // do not cause a race condition. isBlocking only ever goes
+ // from 0 to 1 so there is no real race here.
+ atomic.StoreUint32(&fd.isBlocking, 1)
+ return syscall.SetNonblock(fd.Sysfd, false)
+}
+
+// Darwin and FreeBSD can't read or write 2GB+ files at a time,
+// even on 64-bit systems.
+// The same is true of socket implementations on many systems.
+// See golang.org/issue/7812 and golang.org/issue/16266.
+// Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned.
+const maxRW = 1 << 30
+
+// Read implements io.Reader.
+func (fd *FD) Read(p []byte) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if len(p) == 0 {
+ // If the caller wanted a zero byte read, return immediately
+ // without trying (but after acquiring the readLock).
+ // Otherwise syscall.Read returns 0, nil which looks like
+ // io.EOF.
+ // TODO(bradfitz): make it wait for readability? (Issue 15735)
+ return 0, nil
+ }
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, err
+ }
+ if fd.IsStream && len(p) > maxRW {
+ p = p[:maxRW]
+ }
+ for {
+ n, err := ignoringEINTRIO(syscall.Read, fd.Sysfd, p)
+ if err != nil {
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, err
+ }
+}
+
+// Pread wraps the pread system call.
+func (fd *FD) Pread(p []byte, off int64) (int, error) {
+ // Call incref, not readLock, because since pread specifies the
+ // offset it is independent from other reads.
+ // Similarly, using the poller doesn't make sense for pread.
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ if fd.IsStream && len(p) > maxRW {
+ p = p[:maxRW]
+ }
+ var (
+ n int
+ err error
+ )
+ for {
+ n, err = syscall.Pread(fd.Sysfd, p, off)
+ if err != syscall.EINTR {
+ break
+ }
+ }
+ if err != nil {
+ n = 0
+ }
+ fd.decref()
+ err = fd.eofError(n, err)
+ return n, err
+}
+
+// ReadFrom wraps the recvfrom network call.
+func (fd *FD) ReadFrom(p []byte) (int, syscall.Sockaddr, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, nil, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, nil, err
+ }
+ for {
+ n, sa, err := syscall.Recvfrom(fd.Sysfd, p, 0)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, sa, err
+ }
+}
+
+// ReadFromInet4 wraps the recvfrom network call for IPv4.
+func (fd *FD) ReadFromInet4(p []byte, from *syscall.SockaddrInet4) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ n, err := unix.RecvfromInet4(fd.Sysfd, p, 0, from)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, err
+ }
+}
+
+// ReadFromInet6 wraps the recvfrom network call for IPv6.
+func (fd *FD) ReadFromInet6(p []byte, from *syscall.SockaddrInet6) (int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ n, err := unix.RecvfromInet6(fd.Sysfd, p, 0, from)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, err
+ }
+}
+
+// ReadMsg wraps the recvmsg network call.
+func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.Sockaddr, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, nil, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, 0, 0, nil, err
+ }
+ for {
+ n, oobn, sysflags, sa, err := syscall.Recvmsg(fd.Sysfd, p, oob, flags)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ // TODO(dfc) should n and oobn be set to 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, oobn, sysflags, sa, err
+ }
+}
+
+// ReadMsgInet4 is ReadMsg, but specialized for syscall.SockaddrInet4.
+func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.SockaddrInet4) (int, int, int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, 0, 0, err
+ }
+ for {
+ n, oobn, sysflags, err := unix.RecvmsgInet4(fd.Sysfd, p, oob, flags, sa4)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ // TODO(dfc) should n and oobn be set to 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, oobn, sysflags, err
+ }
+}
+
+// ReadMsgInet6 is ReadMsg, but specialized for syscall.SockaddrInet6.
+func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.SockaddrInet6) (int, int, int, error) {
+ if err := fd.readLock(); err != nil {
+ return 0, 0, 0, err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return 0, 0, 0, err
+ }
+ for {
+ n, oobn, sysflags, err := unix.RecvmsgInet6(fd.Sysfd, p, oob, flags, sa6)
+ if err != nil {
+ if err == syscall.EINTR {
+ continue
+ }
+ // TODO(dfc) should n and oobn be set to 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ err = fd.eofError(n, err)
+ return n, oobn, sysflags, err
+ }
+}
+
+// Write implements io.Writer.
+func (fd *FD) Write(p []byte) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ var nn int
+ for {
+ max := len(p)
+ if fd.IsStream && max-nn > maxRW {
+ max = nn + maxRW
+ }
+ n, err := ignoringEINTRIO(syscall.Write, fd.Sysfd, p[nn:max])
+ if n > 0 {
+ nn += n
+ }
+ if nn == len(p) {
+ return nn, err
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return nn, err
+ }
+ if n == 0 {
+ return nn, io.ErrUnexpectedEOF
+ }
+ }
+}
+
+// Pwrite wraps the pwrite system call.
+func (fd *FD) Pwrite(p []byte, off int64) (int, error) {
+ // Call incref, not writeLock, because since pwrite specifies the
+ // offset it is independent from other writes.
+ // Similarly, using the poller doesn't make sense for pwrite.
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ var nn int
+ for {
+ max := len(p)
+ if fd.IsStream && max-nn > maxRW {
+ max = nn + maxRW
+ }
+ n, err := syscall.Pwrite(fd.Sysfd, p[nn:max], off+int64(nn))
+ if err == syscall.EINTR {
+ continue
+ }
+ if n > 0 {
+ nn += n
+ }
+ if nn == len(p) {
+ return nn, err
+ }
+ if err != nil {
+ return nn, err
+ }
+ if n == 0 {
+ return nn, io.ErrUnexpectedEOF
+ }
+ }
+}
+
+// WriteToInet4 wraps the sendto network call for IPv4 addresses.
+func (fd *FD) WriteToInet4(p []byte, sa *syscall.SockaddrInet4) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ err := unix.SendtoInet4(fd.Sysfd, p, 0, sa)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+}
+
+// WriteToInet6 wraps the sendto network call for IPv6 addresses.
+func (fd *FD) WriteToInet6(p []byte, sa *syscall.SockaddrInet6) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ err := unix.SendtoInet6(fd.Sysfd, p, 0, sa)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+}
+
+// WriteTo wraps the sendto network call.
+func (fd *FD) WriteTo(p []byte, sa syscall.Sockaddr) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, err
+ }
+ for {
+ err := syscall.Sendto(fd.Sysfd, p, 0, sa)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+}
+
+// WriteMsg wraps the sendmsg network call.
+func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, 0, err
+ }
+ for {
+ n, err := syscall.SendmsgN(fd.Sysfd, p, oob, sa, 0)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return n, 0, err
+ }
+ return n, len(oob), err
+ }
+}
+
+// WriteMsgInet4 is WriteMsg specialized for syscall.SockaddrInet4.
+func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (int, int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, 0, err
+ }
+ for {
+ n, err := unix.SendmsgNInet4(fd.Sysfd, p, oob, sa, 0)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return n, 0, err
+ }
+ return n, len(oob), err
+ }
+}
+
+// WriteMsgInet6 is WriteMsg specialized for syscall.SockaddrInet6.
+func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (int, int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, 0, err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return 0, 0, err
+ }
+ for {
+ n, err := unix.SendmsgNInet6(fd.Sysfd, p, oob, sa, 0)
+ if err == syscall.EINTR {
+ continue
+ }
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitWrite(fd.isFile); err == nil {
+ continue
+ }
+ }
+ if err != nil {
+ return n, 0, err
+ }
+ return n, len(oob), err
+ }
+}
+
+// Accept wraps the accept network call.
+func (fd *FD) Accept() (int, syscall.Sockaddr, string, error) {
+ if err := fd.readLock(); err != nil {
+ return -1, nil, "", err
+ }
+ defer fd.readUnlock()
+
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return -1, nil, "", err
+ }
+ for {
+ s, rsa, errcall, err := accept(fd.Sysfd)
+ if err == nil {
+ return s, rsa, "", err
+ }
+ switch err {
+ case syscall.EINTR:
+ continue
+ case syscall.EAGAIN:
+ if fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ case syscall.ECONNABORTED:
+ // This means that a socket on the listen
+ // queue was closed before we Accept()ed it;
+ // it's a silly error, so try again.
+ continue
+ }
+ return -1, nil, errcall, err
+ }
+}
+
+// Seek wraps syscall.Seek.
+func (fd *FD) Seek(offset int64, whence int) (int64, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ return syscall.Seek(fd.Sysfd, offset, whence)
+}
+
+// ReadDirent wraps syscall.ReadDirent.
+// We treat this like an ordinary system call rather than a call
+// that tries to fill the buffer.
+func (fd *FD) ReadDirent(buf []byte) (int, error) {
+ if err := fd.incref(); err != nil {
+ return 0, err
+ }
+ defer fd.decref()
+ for {
+ n, err := ignoringEINTRIO(syscall.ReadDirent, fd.Sysfd, buf)
+ if err != nil {
+ n = 0
+ if err == syscall.EAGAIN && fd.pd.pollable() {
+ if err = fd.pd.waitRead(fd.isFile); err == nil {
+ continue
+ }
+ }
+ }
+ // Do not call eofError; caller does not expect to see io.EOF.
+ return n, err
+ }
+}
+
+// Fchmod wraps syscall.Fchmod.
+func (fd *FD) Fchmod(mode uint32) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fchmod(fd.Sysfd, mode)
+ })
+}
+
+// Fchdir wraps syscall.Fchdir.
+func (fd *FD) Fchdir() error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.Fchdir(fd.Sysfd)
+}
+
+// Fstat wraps syscall.Fstat
+func (fd *FD) Fstat(s *syscall.Stat_t) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return ignoringEINTR(func() error {
+ return syscall.Fstat(fd.Sysfd, s)
+ })
+}
+
+// tryDupCloexec indicates whether F_DUPFD_CLOEXEC should be used.
+// If the kernel doesn't support it, this is set to 0.
+var tryDupCloexec = int32(1)
+
+// DupCloseOnExec dups fd and marks it close-on-exec.
+func DupCloseOnExec(fd int) (int, string, error) {
+ if syscall.F_DUPFD_CLOEXEC != 0 && atomic.LoadInt32(&tryDupCloexec) == 1 {
+ r0, e1 := fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0)
+ if e1 == nil {
+ return r0, "", nil
+ }
+ switch e1.(syscall.Errno) {
+ case syscall.EINVAL, syscall.ENOSYS:
+ // Old kernel, or js/wasm (which returns
+ // ENOSYS). Fall back to the portable way from
+ // now on.
+ atomic.StoreInt32(&tryDupCloexec, 0)
+ default:
+ return -1, "fcntl", e1
+ }
+ }
+ return dupCloseOnExecOld(fd)
+}
+
+// dupCloseOnExecOld is the traditional way to dup an fd and
+// set its O_CLOEXEC bit, using two system calls.
+func dupCloseOnExecOld(fd int) (int, string, error) {
+ syscall.ForkLock.RLock()
+ defer syscall.ForkLock.RUnlock()
+ newfd, err := syscall.Dup(fd)
+ if err != nil {
+ return -1, "dup", err
+ }
+ syscall.CloseOnExec(newfd)
+ return newfd, "", nil
+}
+
+// Dup duplicates the file descriptor.
+func (fd *FD) Dup() (int, string, error) {
+ if err := fd.incref(); err != nil {
+ return -1, "", err
+ }
+ defer fd.decref()
+ return DupCloseOnExec(fd.Sysfd)
+}
+
+// On Unix variants only, expose the IO event for the net code.
+
+// WaitWrite waits until data can be read from fd.
+func (fd *FD) WaitWrite() error {
+ return fd.pd.waitWrite(fd.isFile)
+}
+
+// WriteOnce is for testing only. It makes a single write call.
+func (fd *FD) WriteOnce(p []byte) (int, error) {
+ if err := fd.writeLock(); err != nil {
+ return 0, err
+ }
+ defer fd.writeUnlock()
+ return ignoringEINTRIO(syscall.Write, fd.Sysfd, p)
+}
+
+// RawRead invokes the user-defined function f for a read operation.
+func (fd *FD) RawRead(f func(uintptr) bool) error {
+ if err := fd.readLock(); err != nil {
+ return err
+ }
+ defer fd.readUnlock()
+ if err := fd.pd.prepareRead(fd.isFile); err != nil {
+ return err
+ }
+ for {
+ if f(uintptr(fd.Sysfd)) {
+ return nil
+ }
+ if err := fd.pd.waitRead(fd.isFile); err != nil {
+ return err
+ }
+ }
+}
+
+// RawWrite invokes the user-defined function f for a write operation.
+func (fd *FD) RawWrite(f func(uintptr) bool) error {
+ if err := fd.writeLock(); err != nil {
+ return err
+ }
+ defer fd.writeUnlock()
+ if err := fd.pd.prepareWrite(fd.isFile); err != nil {
+ return err
+ }
+ for {
+ if f(uintptr(fd.Sysfd)) {
+ return nil
+ }
+ if err := fd.pd.waitWrite(fd.isFile); err != nil {
+ return err
+ }
+ }
+}
+
+// ignoringEINTRIO is like ignoringEINTR, but just for IO calls.
+func ignoringEINTRIO(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) {
+ for {
+ n, err := fn(fd, p)
+ if err != syscall.EINTR {
+ return n, err
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/internal/poll/fd_writev_darwin.go b/contrib/go/_std_1.19/src/internal/poll/fd_writev_darwin.go
new file mode 100644
index 0000000000..b5b8998df8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_writev_darwin.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+package poll
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+// Implemented in syscall/syscall_darwin.go.
+//
+//go:linkname writev syscall.writev
+func writev(fd int, iovecs []syscall.Iovec) (uintptr, error)
diff --git a/contrib/go/_std_1.18/src/internal/poll/fd_writev_unix.go b/contrib/go/_std_1.19/src/internal/poll/fd_writev_unix.go
index aa96d104c8..aa96d104c8 100644
--- a/contrib/go/_std_1.18/src/internal/poll/fd_writev_unix.go
+++ b/contrib/go/_std_1.19/src/internal/poll/fd_writev_unix.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/hook_cloexec.go b/contrib/go/_std_1.19/src/internal/poll/hook_cloexec.go
new file mode 100644
index 0000000000..5b3cdcec28
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/hook_cloexec.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package poll
+
+import "syscall"
+
+// Accept4Func is used to hook the accept4 call.
+var Accept4Func func(int, int) (int, syscall.Sockaddr, error) = syscall.Accept4
diff --git a/contrib/go/_std_1.19/src/internal/poll/hook_unix.go b/contrib/go/_std_1.19/src/internal/poll/hook_unix.go
new file mode 100644
index 0000000000..1a5035675d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/hook_unix.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package poll
+
+import "syscall"
+
+// CloseFunc is used to hook the close call.
+var CloseFunc func(int) error = syscall.Close
+
+// AcceptFunc is used to hook the accept call.
+var AcceptFunc func(int) (int, syscall.Sockaddr, error) = syscall.Accept
diff --git a/contrib/go/_std_1.18/src/internal/poll/iovec_unix.go b/contrib/go/_std_1.19/src/internal/poll/iovec_unix.go
index c1500840ac..c1500840ac 100644
--- a/contrib/go/_std_1.18/src/internal/poll/iovec_unix.go
+++ b/contrib/go/_std_1.19/src/internal/poll/iovec_unix.go
diff --git a/contrib/go/_std_1.18/src/internal/poll/sendfile_bsd.go b/contrib/go/_std_1.19/src/internal/poll/sendfile_bsd.go
index 89315a8c67..89315a8c67 100644
--- a/contrib/go/_std_1.18/src/internal/poll/sendfile_bsd.go
+++ b/contrib/go/_std_1.19/src/internal/poll/sendfile_bsd.go
diff --git a/contrib/go/_std_1.18/src/internal/poll/sendfile_linux.go b/contrib/go/_std_1.19/src/internal/poll/sendfile_linux.go
index 6e7852347b..6e7852347b 100644
--- a/contrib/go/_std_1.18/src/internal/poll/sendfile_linux.go
+++ b/contrib/go/_std_1.19/src/internal/poll/sendfile_linux.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/sock_cloexec.go b/contrib/go/_std_1.19/src/internal/poll/sock_cloexec.go
new file mode 100644
index 0000000000..e106b28377
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/sock_cloexec.go
@@ -0,0 +1,50 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements accept for platforms that provide a fast path for
+// setting SetNonblock and CloseOnExec.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package poll
+
+import "syscall"
+
+// Wrapper around the accept system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func accept(s int) (int, syscall.Sockaddr, string, error) {
+ ns, sa, err := Accept4Func(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
+ // On Linux the accept4 system call was introduced in 2.6.28
+ // kernel and on FreeBSD it was introduced in 10 kernel. If we
+ // get an ENOSYS error on both Linux and FreeBSD, or EINVAL
+ // error on Linux, fall back to using accept.
+ switch err {
+ case nil:
+ return ns, sa, "", nil
+ default: // errors other than the ones listed
+ return -1, sa, "accept4", err
+ case syscall.ENOSYS: // syscall missing
+ case syscall.EINVAL: // some Linux use this instead of ENOSYS
+ case syscall.EACCES: // some Linux use this instead of ENOSYS
+ case syscall.EFAULT: // some Linux use this instead of ENOSYS
+ }
+
+ // See ../syscall/exec_unix.go for description of ForkLock.
+ // It is probably okay to hold the lock across syscall.Accept
+ // because we have put fd.sysfd into non-blocking mode.
+ // However, a call to the File method will put it back into
+ // blocking mode. We can't take that risk, so no use of ForkLock here.
+ ns, sa, err = AcceptFunc(s)
+ if err == nil {
+ syscall.CloseOnExec(ns)
+ }
+ if err != nil {
+ return -1, nil, "accept", err
+ }
+ if err = syscall.SetNonblock(ns, true); err != nil {
+ CloseFunc(ns)
+ return -1, nil, "setnonblock", err
+ }
+ return ns, sa, "", nil
+}
diff --git a/contrib/go/_std_1.19/src/internal/poll/sockopt.go b/contrib/go/_std_1.19/src/internal/poll/sockopt.go
new file mode 100644
index 0000000000..a7c9d115b4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/sockopt.go
@@ -0,0 +1,36 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package poll
+
+import "syscall"
+
+// SetsockoptInt wraps the setsockopt network call with an int argument.
+func (fd *FD) SetsockoptInt(level, name, arg int) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptInt(fd.Sysfd, level, name, arg)
+}
+
+// SetsockoptInet4Addr wraps the setsockopt network call with an IPv4 address.
+func (fd *FD) SetsockoptInet4Addr(level, name int, arg [4]byte) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptInet4Addr(fd.Sysfd, level, name, arg)
+}
+
+// SetsockoptLinger wraps the setsockopt network call with a Linger argument.
+func (fd *FD) SetsockoptLinger(level, name int, l *syscall.Linger) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptLinger(fd.Sysfd, level, name, l)
+}
diff --git a/contrib/go/_std_1.18/src/internal/poll/sockopt_linux.go b/contrib/go/_std_1.19/src/internal/poll/sockopt_linux.go
index bc79c350ac..bc79c350ac 100644
--- a/contrib/go/_std_1.18/src/internal/poll/sockopt_linux.go
+++ b/contrib/go/_std_1.19/src/internal/poll/sockopt_linux.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/sockopt_unix.go b/contrib/go/_std_1.19/src/internal/poll/sockopt_unix.go
new file mode 100644
index 0000000000..9cba44da9d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/sockopt_unix.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package poll
+
+import "syscall"
+
+// SetsockoptByte wraps the setsockopt network call with a byte argument.
+func (fd *FD) SetsockoptByte(level, name int, arg byte) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptByte(fd.Sysfd, level, name, arg)
+}
diff --git a/contrib/go/_std_1.19/src/internal/poll/sockoptip.go b/contrib/go/_std_1.19/src/internal/poll/sockoptip.go
new file mode 100644
index 0000000000..41955e1fda
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/sockoptip.go
@@ -0,0 +1,27 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package poll
+
+import "syscall"
+
+// SetsockoptIPMreq wraps the setsockopt network call with an IPMreq argument.
+func (fd *FD) SetsockoptIPMreq(level, name int, mreq *syscall.IPMreq) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptIPMreq(fd.Sysfd, level, name, mreq)
+}
+
+// SetsockoptIPv6Mreq wraps the setsockopt network call with an IPv6Mreq argument.
+func (fd *FD) SetsockoptIPv6Mreq(level, name int, mreq *syscall.IPv6Mreq) error {
+ if err := fd.incref(); err != nil {
+ return err
+ }
+ defer fd.decref()
+ return syscall.SetsockoptIPv6Mreq(fd.Sysfd, level, name, mreq)
+}
diff --git a/contrib/go/_std_1.18/src/internal/poll/splice_linux.go b/contrib/go/_std_1.19/src/internal/poll/splice_linux.go
index 43eec04a71..43eec04a71 100644
--- a/contrib/go/_std_1.18/src/internal/poll/splice_linux.go
+++ b/contrib/go/_std_1.19/src/internal/poll/splice_linux.go
diff --git a/contrib/go/_std_1.19/src/internal/poll/sys_cloexec.go b/contrib/go/_std_1.19/src/internal/poll/sys_cloexec.go
new file mode 100644
index 0000000000..7cd80019f4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/poll/sys_cloexec.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements accept for platforms that do not provide a fast path for
+// setting SetNonblock and CloseOnExec.
+
+//go:build aix || darwin || (js && wasm)
+
+package poll
+
+import (
+ "syscall"
+)
+
+// Wrapper around the accept system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func accept(s int) (int, syscall.Sockaddr, string, error) {
+ // See ../syscall/exec_unix.go for description of ForkLock.
+ // It is probably okay to hold the lock across syscall.Accept
+ // because we have put fd.sysfd into non-blocking mode.
+ // However, a call to the File method will put it back into
+ // blocking mode. We can't take that risk, so no use of ForkLock here.
+ ns, sa, err := AcceptFunc(s)
+ if err == nil {
+ syscall.CloseOnExec(ns)
+ }
+ if err != nil {
+ return -1, nil, "accept", err
+ }
+ if err = syscall.SetNonblock(ns, true); err != nil {
+ CloseFunc(ns)
+ return -1, nil, "setnonblock", err
+ }
+ return ns, sa, "", nil
+}
diff --git a/contrib/go/_std_1.18/src/internal/poll/writev.go b/contrib/go/_std_1.19/src/internal/poll/writev.go
index cd600b63d7..cd600b63d7 100644
--- a/contrib/go/_std_1.18/src/internal/poll/writev.go
+++ b/contrib/go/_std_1.19/src/internal/poll/writev.go
diff --git a/contrib/go/_std_1.18/src/internal/race/doc.go b/contrib/go/_std_1.19/src/internal/race/doc.go
index 8fa44ce6f1..8fa44ce6f1 100644
--- a/contrib/go/_std_1.18/src/internal/race/doc.go
+++ b/contrib/go/_std_1.19/src/internal/race/doc.go
diff --git a/contrib/go/_std_1.18/src/internal/race/norace.go b/contrib/go/_std_1.19/src/internal/race/norace.go
index 67b1305713..67b1305713 100644
--- a/contrib/go/_std_1.18/src/internal/race/norace.go
+++ b/contrib/go/_std_1.19/src/internal/race/norace.go
diff --git a/contrib/go/_std_1.18/src/internal/reflectlite/asm.s b/contrib/go/_std_1.19/src/internal/reflectlite/asm.s
index a7b69b65ba..a7b69b65ba 100644
--- a/contrib/go/_std_1.18/src/internal/reflectlite/asm.s
+++ b/contrib/go/_std_1.19/src/internal/reflectlite/asm.s
diff --git a/contrib/go/_std_1.18/src/internal/reflectlite/swapper.go b/contrib/go/_std_1.19/src/internal/reflectlite/swapper.go
index fc402bb38a..fc402bb38a 100644
--- a/contrib/go/_std_1.18/src/internal/reflectlite/swapper.go
+++ b/contrib/go/_std_1.19/src/internal/reflectlite/swapper.go
diff --git a/contrib/go/_std_1.19/src/internal/reflectlite/type.go b/contrib/go/_std_1.19/src/internal/reflectlite/type.go
new file mode 100644
index 0000000000..21e3c1278d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/reflectlite/type.go
@@ -0,0 +1,983 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflectlite implements lightweight version of reflect, not using
+// any package except for "runtime" and "unsafe".
+package reflectlite
+
+import (
+ "internal/unsafeheader"
+ "unsafe"
+)
+
+// Type is the representation of a Go type.
+//
+// Not all methods apply to all kinds of types. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of type before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run-time panic.
+//
+// Type values are comparable, such as with the == operator,
+// so they can be used as map keys.
+// Two Type values are equal if they represent identical types.
+type Type interface {
+ // Methods applicable to all types.
+
+ // Name returns the type's name within its package for a defined type.
+ // For other (non-defined) types it returns the empty string.
+ Name() string
+
+ // PkgPath returns a defined type's package path, that is, the import path
+ // that uniquely identifies the package, such as "encoding/base64".
+ // If the type was predeclared (string, error) or not defined (*T, struct{},
+ // []int, or A where A is an alias for a non-defined type), the package path
+ // will be the empty string.
+ PkgPath() string
+
+ // Size returns the number of bytes needed to store
+ // a value of the given type; it is analogous to unsafe.Sizeof.
+ Size() uintptr
+
+ // Kind returns the specific kind of this type.
+ Kind() Kind
+
+ // Implements reports whether the type implements the interface type u.
+ Implements(u Type) bool
+
+ // AssignableTo reports whether a value of the type is assignable to type u.
+ AssignableTo(u Type) bool
+
+ // Comparable reports whether values of this type are comparable.
+ Comparable() bool
+
+ // String returns a string representation of the type.
+ // The string representation may use shortened package names
+ // (e.g., base64 instead of "encoding/base64") and is not
+ // guaranteed to be unique among types. To test for type identity,
+ // compare the Types directly.
+ String() string
+
+ // Elem returns a type's element type.
+ // It panics if the type's Kind is not Ptr.
+ Elem() Type
+
+ common() *rtype
+ uncommon() *uncommonType
+}
+
+/*
+ * These data structures are known to the compiler (../../cmd/internal/reflectdata/reflect.go).
+ * A few are known to ../runtime/type.go to convey to debuggers.
+ * They are also known to ../runtime/type.go.
+ */
+
+// A Kind represents the specific kind of type that a Type represents.
+// The zero Kind is not a valid kind.
+type Kind uint
+
+const (
+ Invalid Kind = iota
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ Array
+ Chan
+ Func
+ Interface
+ Map
+ Pointer
+ Slice
+ String
+ Struct
+ UnsafePointer
+)
+
+const Ptr = Pointer
+
+// tflag is used by an rtype to signal what extra type information is
+// available in the memory directly following the rtype value.
+//
+// tflag values must be kept in sync with copies in:
+//
+// cmd/compile/internal/reflectdata/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// runtime/type.go
+type tflag uint8
+
+const (
+ // tflagUncommon means that there is a pointer, *uncommonType,
+ // just beyond the outer type structure.
+ //
+ // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
+ // then t has uncommonType data and it can be accessed as:
+ //
+ // type tUncommon struct {
+ // structType
+ // u uncommonType
+ // }
+ // u := &(*tUncommon)(unsafe.Pointer(t)).u
+ tflagUncommon tflag = 1 << 0
+
+ // tflagExtraStar means the name in the str field has an
+ // extraneous '*' prefix. This is because for most types T in
+ // a program, the type *T also exists and reusing the str data
+ // saves binary size.
+ tflagExtraStar tflag = 1 << 1
+
+ // tflagNamed means the type has a name.
+ tflagNamed tflag = 1 << 2
+
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
+)
+
+// rtype is the common implementation of most values.
+// It is embedded in other struct types.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
+type rtype struct {
+ size uintptr
+ ptrdata uintptr // number of bytes in the type that can contain pointers
+ hash uint32 // hash of type; avoids computation in hash tables
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte // garbage collection data
+ str nameOff // string form
+ ptrToThis typeOff // type for pointer to this type, may be zero
+}
+
+// Method on non-interface type
+type method struct {
+ name nameOff // name of method
+ mtyp typeOff // method type (without receiver)
+ ifn textOff // fn used in interface call (one-word receiver)
+ tfn textOff // fn used for normal method call
+}
+
+// uncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type uncommonType struct {
+ pkgPath nameOff // import path; empty for built-in types like int, string
+ mcount uint16 // number of methods
+ xcount uint16 // number of exported methods
+ moff uint32 // offset from this uncommontype to [mcount]method
+ _ uint32 // unused
+}
+
+// chanDir represents a channel type's direction.
+type chanDir int
+
+const (
+ recvDir chanDir = 1 << iota // <-chan
+ sendDir // chan<-
+ bothDir = recvDir | sendDir // chan
+)
+
+// arrayType represents a fixed array type.
+type arrayType struct {
+ rtype
+ elem *rtype // array element type
+ slice *rtype // slice type
+ len uintptr
+}
+
+// chanType represents a channel type.
+type chanType struct {
+ rtype
+ elem *rtype // channel element type
+ dir uintptr // channel direction (chanDir)
+}
+
+// funcType represents a function type.
+//
+// A *rtype for each in and out parameter is stored in an array that
+// directly follows the funcType (and possibly its uncommonType). So
+// a function type with one method, one input, and one output is:
+//
+// struct {
+// funcType
+// uncommonType
+// [2]*rtype // [0] is in, [1] is out
+// }
+type funcType struct {
+ rtype
+ inCount uint16
+ outCount uint16 // top bit is set if last input parameter is ...
+}
+
+// imethod represents a method on an interface type
+type imethod struct {
+ name nameOff // name of method
+ typ typeOff // .(*FuncType) underneath
+}
+
+// interfaceType represents an interface type.
+type interfaceType struct {
+ rtype
+ pkgPath name // import path
+ methods []imethod // sorted by hash
+}
+
+// mapType represents a map type.
+type mapType struct {
+ rtype
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8 // size of key slot
+ valuesize uint8 // size of value slot
+ bucketsize uint16 // size of bucket
+ flags uint32
+}
+
+// ptrType represents a pointer type.
+type ptrType struct {
+ rtype
+ elem *rtype // pointer element (pointed at) type
+}
+
+// sliceType represents a slice type.
+type sliceType struct {
+ rtype
+ elem *rtype // slice element type
+}
+
+// Struct field
+type structField struct {
+ name name // name is always non-empty
+ typ *rtype // type of field
+ offset uintptr // byte offset of field
+}
+
+func (f *structField) embedded() bool {
+ return f.name.embedded()
+}
+
+// structType represents a struct type.
+type structType struct {
+ rtype
+ pkgPath name
+ fields []structField // sorted by offset
+}
+
+// name is an encoded type name with optional extra data.
+//
+// The first byte is a bit field containing:
+//
+// 1<<0 the name is exported
+// 1<<1 tag data follows the name
+// 1<<2 pkgPath nameOff follows the name and tag
+//
+// The next two bytes are the data length:
+//
+// l := uint16(data[1])<<8 | uint16(data[2])
+//
+// Bytes [3:3+l] are the string data.
+//
+// If tag data follows then bytes 3+l and 3+l+1 are the tag length,
+// with the data following.
+//
+// If the import path follows, then 4 bytes at the end of
+// the data form a nameOff. The import path is only set for concrete
+// methods that are defined in a different package than their type.
+//
+// If a name starts with "*", then the exported bit represents
+// whether the pointed to type is exported.
+type name struct {
+ bytes *byte
+}
+
+func (n name) data(off int, whySafe string) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
+}
+
+func (n name) isExported() bool {
+ return (*n.bytes)&(1<<0) != 0
+}
+
+func (n name) hasTag() bool {
+ return (*n.bytes)&(1<<1) != 0
+}
+
+func (n name) embedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
+// readVarint parses a varint as encoded by encoding/binary.
+// It returns the number of encoded bytes and the encoded value.
+func (n name) readVarint(off int) (int, int) {
+ v := 0
+ for i := 0; ; i++ {
+ x := *n.data(off+i, "read varint")
+ v += int(x&0x7f) << (7 * i)
+ if x&0x80 == 0 {
+ return i + 1, v
+ }
+ }
+}
+
+func (n name) name() (s string) {
+ if n.bytes == nil {
+ return
+ }
+ i, l := n.readVarint(1)
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(n.data(1+i, "non-empty string"))
+ hdr.Len = l
+ return
+}
+
+func (n name) tag() (s string) {
+ if !n.hasTag() {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ i2, l2 := n.readVarint(1 + i + l)
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(n.data(1+i+l+i2, "non-empty string"))
+ hdr.Len = l2
+ return
+}
+
+func (n name) pkgPath() string {
+ if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ off := 1 + i + l
+ if n.hasTag() {
+ i2, l2 := n.readVarint(off)
+ off += i2 + l2
+ }
+ var nameOff int32
+ // Note that this field may not be aligned in memory,
+ // so we cannot use a direct int32 assignment here.
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
+ pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
+ return pkgPathName.name()
+}
+
+/*
+ * The compiler knows the exact layout of all the data structures above.
+ * The compiler does not know about the data structures and methods below.
+ */
+
+const (
+ kindDirectIface = 1 << 5
+ kindGCProg = 1 << 6 // Type.gc points to GC program
+ kindMask = (1 << 5) - 1
+)
+
+// String returns the name of k.
+func (k Kind) String() string {
+ if int(k) < len(kindNames) {
+ return kindNames[k]
+ }
+ return kindNames[0]
+}
+
+var kindNames = []string{
+ Invalid: "invalid",
+ Bool: "bool",
+ Int: "int",
+ Int8: "int8",
+ Int16: "int16",
+ Int32: "int32",
+ Int64: "int64",
+ Uint: "uint",
+ Uint8: "uint8",
+ Uint16: "uint16",
+ Uint32: "uint32",
+ Uint64: "uint64",
+ Uintptr: "uintptr",
+ Float32: "float32",
+ Float64: "float64",
+ Complex64: "complex64",
+ Complex128: "complex128",
+ Array: "array",
+ Chan: "chan",
+ Func: "func",
+ Interface: "interface",
+ Map: "map",
+ Ptr: "ptr",
+ Slice: "slice",
+ String: "string",
+ Struct: "struct",
+ UnsafePointer: "unsafe.Pointer",
+}
+
+func (t *uncommonType) methods() []method {
+ if t.mcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
+}
+
+func (t *uncommonType) exportedMethods() []method {
+ if t.xcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
+}
+
+// resolveNameOff resolves a name offset from a base pointer.
+// The (*rtype).nameOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+type nameOff int32 // offset to a name
+type typeOff int32 // offset to an *rtype
+type textOff int32 // offset from top of text section
+
+func (t *rtype) nameOff(off nameOff) name {
+ return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
+}
+
+func (t *rtype) typeOff(off typeOff) *rtype {
+ return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+}
+
+func (t *rtype) uncommon() *uncommonType {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.Kind() {
+ case Struct:
+ return &(*structTypeUncommon)(unsafe.Pointer(t)).u
+ case Ptr:
+ type u struct {
+ ptrType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Func:
+ type u struct {
+ funcType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Slice:
+ type u struct {
+ sliceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Array:
+ type u struct {
+ arrayType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Chan:
+ type u struct {
+ chanType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Map:
+ type u struct {
+ mapType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Interface:
+ type u struct {
+ interfaceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ rtype
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
+func (t *rtype) String() string {
+ s := t.nameOff(t.str).name()
+ if t.tflag&tflagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t *rtype) Size() uintptr { return t.size }
+
+func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
+
+func (t *rtype) pointers() bool { return t.ptrdata != 0 }
+
+func (t *rtype) common() *rtype { return t }
+
+func (t *rtype) exportedMethods() []method {
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
+ }
+ return ut.exportedMethods()
+}
+
+func (t *rtype) NumMethod() int {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.NumMethod()
+ }
+ return len(t.exportedMethods())
+}
+
+func (t *rtype) PkgPath() string {
+ if t.tflag&tflagNamed == 0 {
+ return ""
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return ""
+ }
+ return t.nameOff(ut.pkgPath).name()
+}
+
+func (t *rtype) hasName() bool {
+ return t.tflag&tflagNamed != 0
+}
+
+func (t *rtype) Name() string {
+ if !t.hasName() {
+ return ""
+ }
+ s := t.String()
+ i := len(s) - 1
+ sqBrackets := 0
+ for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
+ switch s[i] {
+ case ']':
+ sqBrackets++
+ case '[':
+ sqBrackets--
+ }
+ i--
+ }
+ return s[i+1:]
+}
+
+func (t *rtype) chanDir() chanDir {
+ if t.Kind() != Chan {
+ panic("reflect: chanDir of non-chan type")
+ }
+ tt := (*chanType)(unsafe.Pointer(t))
+ return chanDir(tt.dir)
+}
+
+func (t *rtype) Elem() Type {
+ switch t.Kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Chan:
+ tt := (*chanType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Map:
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Ptr:
+ tt := (*ptrType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Slice:
+ tt := (*sliceType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ }
+ panic("reflect: Elem of invalid type")
+}
+
+func (t *rtype) In(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: In of non-func type")
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.in()[i])
+}
+
+func (t *rtype) Key() Type {
+ if t.Kind() != Map {
+ panic("reflect: Key of non-map type")
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.key)
+}
+
+func (t *rtype) Len() int {
+ if t.Kind() != Array {
+ panic("reflect: Len of non-array type")
+ }
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return int(tt.len)
+}
+
+func (t *rtype) NumField() int {
+ if t.Kind() != Struct {
+ panic("reflect: NumField of non-struct type")
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return len(tt.fields)
+}
+
+func (t *rtype) NumIn() int {
+ if t.Kind() != Func {
+ panic("reflect: NumIn of non-func type")
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return int(tt.inCount)
+}
+
+func (t *rtype) NumOut() int {
+ if t.Kind() != Func {
+ panic("reflect: NumOut of non-func type")
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return len(tt.out())
+}
+
+func (t *rtype) Out(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: Out of non-func type")
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.out()[i])
+}
+
+func (t *funcType) in() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ if t.inCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
+}
+
+func (t *funcType) out() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ if outCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+// NumMethod returns the number of interface methods in the type's method set.
+func (t *interfaceType) NumMethod() int { return len(t.methods) }
+
+// TypeOf returns the reflection Type that represents the dynamic type of i.
+// If i is a nil interface value, TypeOf returns nil.
+func TypeOf(i any) Type {
+ eface := *(*emptyInterface)(unsafe.Pointer(&i))
+ return toType(eface.typ)
+}
+
+func (t *rtype) Implements(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.Implements")
+ }
+ if u.Kind() != Interface {
+ panic("reflect: non-interface type passed to Type.Implements")
+ }
+ return implements(u.(*rtype), t)
+}
+
+func (t *rtype) AssignableTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.AssignableTo")
+ }
+ uu := u.(*rtype)
+ return directlyAssignable(uu, t) || implements(uu, t)
+}
+
+func (t *rtype) Comparable() bool {
+ return t.equal != nil
+}
+
+// implements reports whether the type V implements the interface type T.
+func implements(T, V *rtype) bool {
+ if T.Kind() != Interface {
+ return false
+ }
+ t := (*interfaceType)(unsafe.Pointer(T))
+ if len(t.methods) == 0 {
+ return true
+ }
+
+ // The same algorithm applies in both cases, but the
+ // method tables for an interface type and a concrete type
+ // are different, so the code is duplicated.
+ // In both cases the algorithm is a linear scan over the two
+ // lists - T's methods and V's methods - simultaneously.
+ // Since method tables are stored in a unique sorted order
+ // (alphabetical, with no duplicate method names), the scan
+ // through V's methods must hit a match for each of T's
+ // methods along the way, or else V does not implement T.
+ // This lets us run the scan in overall linear time instead of
+ // the quadratic time a naive search would require.
+ // See also ../runtime/iface.go.
+ if V.Kind() == Interface {
+ v := (*interfaceType)(unsafe.Pointer(V))
+ i := 0
+ for j := 0; j < len(v.methods); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := &v.methods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = v.pkgPath.name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ v := V.uncommon()
+ if v == nil {
+ return false
+ }
+ i := 0
+ vmethods := v.methods()
+ for j := 0; j < int(v.mcount); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := vmethods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = V.nameOff(v.pkgPath).name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// directlyAssignable reports whether a value x of type V can be directly
+// assigned (using memmove) to a value of type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// Ignoring the interface rules (implemented elsewhere)
+// and the ideal constant rules (no ideal constants at run time).
+func directlyAssignable(T, V *rtype) bool {
+ // x's type V is identical to T?
+ if T == V {
+ return true
+ }
+
+ // Otherwise at least one of T and V must not be defined
+ // and they must have the same kind.
+ if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ // x's type T and V must have identical underlying types.
+ return haveIdenticalUnderlyingType(T, V, true)
+}
+
+func haveIdenticalType(T, V Type, cmpTags bool) bool {
+ if cmpTags {
+ return T == V
+ }
+
+ if T.Name() != V.Name() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ return haveIdenticalUnderlyingType(T.common(), V.common(), false)
+}
+
+func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
+ if T == V {
+ return true
+ }
+
+ kind := T.Kind()
+ if kind != V.Kind() {
+ return false
+ }
+
+ // Non-composite types of equal kind have same underlying type
+ // (the predefined instance of the type).
+ if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
+ return true
+ }
+
+ // Composite types.
+ switch kind {
+ case Array:
+ return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Chan:
+ // Special case:
+ // x is a bidirectional channel value, T is a channel type,
+ // and x's type V and T have identical element types.
+ if V.chanDir() == bothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
+ return true
+ }
+
+ // Otherwise continue test for identical underlying type.
+ return V.chanDir() == T.chanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Func:
+ t := (*funcType)(unsafe.Pointer(T))
+ v := (*funcType)(unsafe.Pointer(V))
+ if t.outCount != v.outCount || t.inCount != v.inCount {
+ return false
+ }
+ for i := 0; i < t.NumIn(); i++ {
+ if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
+ return false
+ }
+ }
+ for i := 0; i < t.NumOut(); i++ {
+ if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
+ return false
+ }
+ }
+ return true
+
+ case Interface:
+ t := (*interfaceType)(unsafe.Pointer(T))
+ v := (*interfaceType)(unsafe.Pointer(V))
+ if len(t.methods) == 0 && len(v.methods) == 0 {
+ return true
+ }
+ // Might have the same methods but still
+ // need a run time conversion.
+ return false
+
+ case Map:
+ return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Ptr, Slice:
+ return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Struct:
+ t := (*structType)(unsafe.Pointer(T))
+ v := (*structType)(unsafe.Pointer(V))
+ if len(t.fields) != len(v.fields) {
+ return false
+ }
+ if t.pkgPath.name() != v.pkgPath.name() {
+ return false
+ }
+ for i := range t.fields {
+ tf := &t.fields[i]
+ vf := &v.fields[i]
+ if tf.name.name() != vf.name.name() {
+ return false
+ }
+ if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
+ return false
+ }
+ if cmpTags && tf.name.tag() != vf.name.tag() {
+ return false
+ }
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.embedded() != vf.embedded() {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+type structTypeUncommon struct {
+ structType
+ u uncommonType
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+func toType(t *rtype) Type {
+ if t == nil {
+ return nil
+ }
+ return t
+}
+
+// ifaceIndir reports whether t is stored indirectly in an interface value.
+func ifaceIndir(t *rtype) bool {
+ return t.kind&kindDirectIface == 0
+}
diff --git a/contrib/go/_std_1.19/src/internal/reflectlite/value.go b/contrib/go/_std_1.19/src/internal/reflectlite/value.go
new file mode 100644
index 0000000000..b9bca3ab44
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/reflectlite/value.go
@@ -0,0 +1,477 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectlite
+
+import (
+ "internal/goarch"
+ "internal/unsafeheader"
+ "runtime"
+ "unsafe"
+)
+
+// Value is the reflection interface to a Go value.
+//
+// Not all methods apply to all kinds of values. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of value before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run time panic.
+//
+// The zero Value represents no value.
+// Its IsValid method returns false, its Kind method returns Invalid,
+// its String method returns "<invalid Value>", and all other methods panic.
+// Most functions and methods never return an invalid value.
+// If one does, its documentation states the conditions explicitly.
+//
+// A Value can be used concurrently by multiple goroutines provided that
+// the underlying Go value can be used concurrently for the equivalent
+// direct operations.
+//
+// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
+type Value struct {
+ // typ holds the type of the value represented by a Value.
+ typ *rtype
+
+ // Pointer-valued data or, if flagIndir is set, pointer to data.
+ // Valid when either flagIndir is set or typ.pointers() is true.
+ ptr unsafe.Pointer
+
+ // flag holds metadata about the value.
+ // The lowest bits are flag bits:
+ // - flagStickyRO: obtained via unexported not embedded field, so read-only
+ // - flagEmbedRO: obtained via unexported embedded field, so read-only
+ // - flagIndir: val holds a pointer to the data
+ // - flagAddr: v.CanAddr is true (implies flagIndir)
+ // Value cannot represent method values.
+ // The next five bits give the Kind of the value.
+ // This repeats typ.Kind() except for method values.
+ // The remaining 23+ bits give a method number for method values.
+ // If flag.kind() != Func, code can assume that flagMethod is unset.
+ // If ifaceIndir(typ), code can assume that flagIndir is set.
+ flag
+
+ // A method value represents a curried method invocation
+ // like r.Read for some receiver r. The typ+val+flag bits describe
+ // the receiver r, but the flag's Kind bits say Func (methods are
+ // functions), and the top bits of the flag give the method number
+ // in r's type's method table.
+}
+
+type flag uintptr
+
+const (
+ flagKindWidth = 5 // there are 27 kinds
+ flagKindMask flag = 1<<flagKindWidth - 1
+ flagStickyRO flag = 1 << 5
+ flagEmbedRO flag = 1 << 6
+ flagIndir flag = 1 << 7
+ flagAddr flag = 1 << 8
+ flagMethod flag = 1 << 9
+ flagMethodShift = 10
+ flagRO flag = flagStickyRO | flagEmbedRO
+)
+
+func (f flag) kind() Kind {
+ return Kind(f & flagKindMask)
+}
+
+func (f flag) ro() flag {
+ if f&flagRO != 0 {
+ return flagStickyRO
+ }
+ return 0
+}
+
+// pointer returns the underlying pointer represented by v.
+// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
+func (v Value) pointer() unsafe.Pointer {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
+ panic("can't call pointer on a non-pointer Value")
+ }
+ if v.flag&flagIndir != 0 {
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ return v.ptr
+}
+
+// packEface converts v to the empty interface.
+func packEface(v Value) any {
+ t := v.typ
+ var i any
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // First, fill in the data portion of the interface.
+ switch {
+ case ifaceIndir(t):
+ if v.flag&flagIndir == 0 {
+ panic("bad indir")
+ }
+ // Value is indirect, and so is the interface we're making.
+ ptr := v.ptr
+ if v.flag&flagAddr != 0 {
+ // TODO: pass safe boolean from valueInterface so
+ // we don't need to copy if safe==true?
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ }
+ e.word = ptr
+ case v.flag&flagIndir != 0:
+ // Value is indirect, but interface is direct. We need
+ // to load the data at v.ptr into the interface data word.
+ e.word = *(*unsafe.Pointer)(v.ptr)
+ default:
+ // Value is direct, and so is the interface.
+ e.word = v.ptr
+ }
+ // Now, fill in the type portion. We're very careful here not
+ // to have any operation between the e.word and e.typ assignments
+ // that would let the garbage collector observe the partially-built
+ // interface value.
+ e.typ = t
+ return i
+}
+
+// unpackEface converts the empty interface i to a Value.
+func unpackEface(i any) Value {
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // NOTE: don't read e.word until we know whether it is really a pointer or not.
+ t := e.typ
+ if t == nil {
+ return Value{}
+ }
+ f := flag(t.Kind())
+ if ifaceIndir(t) {
+ f |= flagIndir
+ }
+ return Value{t, e.word, f}
+}
+
+// A ValueError occurs when a Value method is invoked on
+// a Value that does not support it. Such cases are documented
+// in the description of each method.
+type ValueError struct {
+ Method string
+ Kind Kind
+}
+
+func (e *ValueError) Error() string {
+ if e.Kind == 0 {
+ return "reflect: call of " + e.Method + " on zero Value"
+ }
+ return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
+}
+
+// methodName returns the name of the calling method,
+// assumed to be two stack frames above.
+func methodName() string {
+ pc, _, _, _ := runtime.Caller(2)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+// emptyInterface is the header for an interface{} value.
+type emptyInterface struct {
+ typ *rtype
+ word unsafe.Pointer
+}
+
+// mustBeExported panics if f records that the value was obtained using
+// an unexported field.
+func (f flag) mustBeExported() {
+ if f == 0 {
+ panic(&ValueError{methodName(), 0})
+ }
+ if f&flagRO != 0 {
+ panic("reflect: " + methodName() + " using value obtained using unexported field")
+ }
+}
+
+// mustBeAssignable panics if f records that the value is not assignable,
+// which is to say that either it was obtained using an unexported field
+// or it is not addressable.
+func (f flag) mustBeAssignable() {
+ if f == 0 {
+ panic(&ValueError{methodName(), Invalid})
+ }
+ // Assignable if addressable and not read-only.
+ if f&flagRO != 0 {
+ panic("reflect: " + methodName() + " using value obtained using unexported field")
+ }
+ if f&flagAddr == 0 {
+ panic("reflect: " + methodName() + " using unaddressable value")
+ }
+}
+
+// CanSet reports whether the value of v can be changed.
+// A Value can be changed only if it is addressable and was not
+// obtained by the use of unexported struct fields.
+// If CanSet returns false, calling Set or any type-specific
+// setter (e.g., SetBool, SetInt) will panic.
+func (v Value) CanSet() bool {
+ return v.flag&(flagAddr|flagRO) == flagAddr
+}
+
+// Elem returns the value that the interface v contains
+// or that the pointer v points to.
+// It panics if v's Kind is not Interface or Pointer.
+// It returns the zero Value if v is nil.
+func (v Value) Elem() Value {
+ k := v.kind()
+ switch k {
+ case Interface:
+ var eface any
+ if v.typ.NumMethod() == 0 {
+ eface = *(*any)(v.ptr)
+ } else {
+ eface = (any)(*(*interface {
+ M()
+ })(v.ptr))
+ }
+ x := unpackEface(eface)
+ if x.flag != 0 {
+ x.flag |= v.flag.ro()
+ }
+ return x
+ case Pointer:
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ // The returned value's address is v's value.
+ if ptr == nil {
+ return Value{}
+ }
+ tt := (*ptrType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ fl := v.flag&flagRO | flagIndir | flagAddr
+ fl |= flag(typ.Kind())
+ return Value{typ, ptr, fl}
+ }
+ panic(&ValueError{"reflectlite.Value.Elem", v.kind()})
+}
+
+func valueInterface(v Value) any {
+ if v.flag == 0 {
+ panic(&ValueError{"reflectlite.Value.Interface", 0})
+ }
+
+ if v.kind() == Interface {
+ // Special case: return the element inside the interface.
+ // Empty interface has one layout, all interfaces with
+ // methods have a second layout.
+ if v.numMethod() == 0 {
+ return *(*any)(v.ptr)
+ }
+ return *(*interface {
+ M()
+ })(v.ptr)
+ }
+
+ // TODO: pass safe to packEface so we don't need to copy if safe==true?
+ return packEface(v)
+}
+
+// IsNil reports whether its argument v is nil. The argument must be
+// a chan, func, interface, map, pointer, or slice value; if it is
+// not, IsNil panics. Note that IsNil is not always equivalent to a
+// regular comparison with nil in Go. For example, if v was created
+// by calling ValueOf with an uninitialized interface variable i,
+// i==nil will be true but v.IsNil will panic as v will be the zero
+// Value.
+func (v Value) IsNil() bool {
+ k := v.kind()
+ switch k {
+ case Chan, Func, Map, Pointer, UnsafePointer:
+ // if v.flag&flagMethod != 0 {
+ // return false
+ // }
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ return ptr == nil
+ case Interface, Slice:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return *(*unsafe.Pointer)(v.ptr) == nil
+ }
+ panic(&ValueError{"reflectlite.Value.IsNil", v.kind()})
+}
+
+// IsValid reports whether v represents a value.
+// It returns false if v is the zero Value.
+// If IsValid returns false, all other methods except String panic.
+// Most functions and methods never return an invalid Value.
+// If one does, its documentation states the conditions explicitly.
+func (v Value) IsValid() bool {
+ return v.flag != 0
+}
+
+// Kind returns v's Kind.
+// If v is the zero Value (IsValid returns false), Kind returns Invalid.
+func (v Value) Kind() Kind {
+ return v.kind()
+}
+
+// implemented in runtime:
+func chanlen(unsafe.Pointer) int
+func maplen(unsafe.Pointer) int
+
+// Len returns v's length.
+// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
+func (v Value) Len() int {
+ k := v.kind()
+ switch k {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ return int(tt.len)
+ case Chan:
+ return chanlen(v.pointer())
+ case Map:
+ return maplen(v.pointer())
+ case Slice:
+ // Slice is bigger than a word; assume flagIndir.
+ return (*unsafeheader.Slice)(v.ptr).Len
+ case String:
+ // String is bigger than a word; assume flagIndir.
+ return (*unsafeheader.String)(v.ptr).Len
+ }
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
+}
+
+// NumMethod returns the number of exported methods in the value's method set.
+func (v Value) numMethod() int {
+ if v.typ == nil {
+ panic(&ValueError{"reflectlite.Value.NumMethod", Invalid})
+ }
+ return v.typ.NumMethod()
+}
+
+// Set assigns x to the value v.
+// It panics if CanSet returns false.
+// As in Go, x's value must be assignable to v's type.
+func (v Value) Set(x Value) {
+ v.mustBeAssignable()
+ x.mustBeExported() // do not let unexported x leak
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+ x = x.assignTo("reflectlite.Set", v.typ, target)
+ if x.flag&flagIndir != 0 {
+ typedmemmove(v.typ, v.ptr, x.ptr)
+ } else {
+ *(*unsafe.Pointer)(v.ptr) = x.ptr
+ }
+}
+
+// Type returns v's type.
+func (v Value) Type() Type {
+ f := v.flag
+ if f == 0 {
+ panic(&ValueError{"reflectlite.Value.Type", Invalid})
+ }
+ // Method values not supported.
+ return v.typ
+}
+
+/*
+ * constructors
+ */
+
+// implemented in package runtime
+func unsafe_New(*rtype) unsafe.Pointer
+
+// ValueOf returns a new Value initialized to the concrete value
+// stored in the interface i. ValueOf(nil) returns the zero Value.
+func ValueOf(i any) Value {
+ if i == nil {
+ return Value{}
+ }
+
+ // TODO: Maybe allow contents of a Value to live on the stack.
+ // For now we make the contents always escape to the heap. It
+ // makes life easier in a few places (see chanrecv/mapassign
+ // comment below).
+ escapes(i)
+
+ return unpackEface(i)
+}
+
+// assignTo returns a value v that can be assigned directly to typ.
+// It panics if v is not assignable to typ.
+// For a conversion to an interface type, target is a suggested scratch space to use.
+func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
+ // if v.flag&flagMethod != 0 {
+ // v = makeMethodValue(context, v)
+ // }
+
+ switch {
+ case directlyAssignable(dst, v.typ):
+ // Overwrite type so that they match.
+ // Same memory layout, so no harm done.
+ fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
+ fl |= flag(dst.Kind())
+ return Value{dst, v.ptr, fl}
+
+ case implements(dst, v.typ):
+ if target == nil {
+ target = unsafe_New(dst)
+ }
+ if v.Kind() == Interface && v.IsNil() {
+ // A nil ReadWriter passed to nil Reader is OK,
+ // but using ifaceE2I below will panic.
+ // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
+ return Value{dst, nil, flag(Interface)}
+ }
+ x := valueInterface(v)
+ if dst.NumMethod() == 0 {
+ *(*any)(target) = x
+ } else {
+ ifaceE2I(dst, x, target)
+ }
+ return Value{dst, target, flagIndir | flag(Interface)}
+ }
+
+ // Failed.
+ panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+ return add(p, uintptr(i)*eltSize, "i < len")
+}
+
+func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
+
+// typedmemmove copies a value of type t to dst from src.
+//
+//go:noescape
+func typedmemmove(t *rtype, dst, src unsafe.Pointer)
+
+// Dummy annotation marking that the value x escapes,
+// for use in cases where the reflect code is so clever that
+// the compiler cannot follow.
+func escapes(x any) {
+ if dummy.b {
+ dummy.x = x
+ }
+}
+
+var dummy struct {
+ b bool
+ x any
+}
diff --git a/contrib/go/_std_1.18/src/internal/singleflight/singleflight.go b/contrib/go/_std_1.19/src/internal/singleflight/singleflight.go
index 07b3f40ec0..07b3f40ec0 100644
--- a/contrib/go/_std_1.18/src/internal/singleflight/singleflight.go
+++ b/contrib/go/_std_1.19/src/internal/singleflight/singleflight.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/execenv/execenv_default.go b/contrib/go/_std_1.19/src/internal/syscall/execenv/execenv_default.go
index 335647c638..335647c638 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/execenv/execenv_default.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/execenv/execenv_default.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/asm_darwin.s b/contrib/go/_std_1.19/src/internal/syscall/unix/asm_darwin.s
index 8fbdc1d866..8fbdc1d866 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/asm_darwin.s
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/asm_darwin.s
diff --git a/contrib/go/_std_1.19/src/internal/syscall/unix/at.go b/contrib/go/_std_1.19/src/internal/syscall/unix/at.go
new file mode 100644
index 0000000000..965162e3d2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/at.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux || openbsd || netbsd || dragonfly
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Unlinkat(dirfd int, path string, flags int) error {
+ var p *byte
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ _, _, errno := syscall.Syscall(unlinkatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags))
+ if errno != 0 {
+ return errno
+ }
+
+ return nil
+}
+
+func Openat(dirfd int, path string, flags int, perm uint32) (int, error) {
+ var p *byte
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+
+ fd, _, errno := syscall.Syscall6(openatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0)
+ if errno != 0 {
+ return 0, errno
+ }
+
+ return int(fd), nil
+}
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/at_darwin.go b/contrib/go/_std_1.19/src/internal/syscall/unix/at_darwin.go
index a88a27e0c6..a88a27e0c6 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/at_darwin.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/at_darwin.go
diff --git a/contrib/go/_std_1.19/src/internal/syscall/unix/at_fstatat.go b/contrib/go/_std_1.19/src/internal/syscall/unix/at_fstatat.go
new file mode 100644
index 0000000000..25318d2014
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/at_fstatat.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (linux && !loong64) || openbsd || netbsd || dragonfly
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error {
+ var p *byte
+ p, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ _, _, errno := syscall.Syscall6(fstatatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if errno != 0 {
+ return errno
+ }
+
+ return nil
+
+}
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_darwin.go b/contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_darwin.go
index aaaaa4751c..aaaaa4751c 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_darwin.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_darwin.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_linux.go b/contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_linux.go
index fa7cd75d42..fa7cd75d42 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_linux.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_linux.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go b/contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go
index 76edf67522..76edf67522 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/copy_file_range_linux.go b/contrib/go/_std_1.19/src/internal/syscall/unix/copy_file_range_linux.go
index cf0a279a7a..cf0a279a7a 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/copy_file_range_linux.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/copy_file_range_linux.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/getentropy_darwin.go b/contrib/go/_std_1.19/src/internal/syscall/unix/getentropy_darwin.go
index 7bab1f27b0..7bab1f27b0 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/getentropy_darwin.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/getentropy_darwin.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/getrandom.go b/contrib/go/_std_1.19/src/internal/syscall/unix/getrandom.go
index a6659331e4..a6659331e4 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/getrandom.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/getrandom.go
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/getrandom_linux.go b/contrib/go/_std_1.19/src/internal/syscall/unix/getrandom_linux.go
index 8ccd8d328a..8ccd8d328a 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/getrandom_linux.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/getrandom_linux.go
diff --git a/contrib/go/_std_1.19/src/internal/syscall/unix/net.go b/contrib/go/_std_1.19/src/internal/syscall/unix/net.go
new file mode 100644
index 0000000000..5618d40ae0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/net.go
@@ -0,0 +1,44 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe"
+)
+
+//go:linkname RecvfromInet4 syscall.recvfromInet4
+//go:noescape
+func RecvfromInet4(fd int, p []byte, flags int, from *syscall.SockaddrInet4) (int, error)
+
+//go:linkname RecvfromInet6 syscall.recvfromInet6
+//go:noescape
+func RecvfromInet6(fd int, p []byte, flags int, from *syscall.SockaddrInet6) (n int, err error)
+
+//go:linkname SendtoInet4 syscall.sendtoInet4
+//go:noescape
+func SendtoInet4(fd int, p []byte, flags int, to *syscall.SockaddrInet4) (err error)
+
+//go:linkname SendtoInet6 syscall.sendtoInet6
+//go:noescape
+func SendtoInet6(fd int, p []byte, flags int, to *syscall.SockaddrInet6) (err error)
+
+//go:linkname SendmsgNInet4 syscall.sendmsgNInet4
+//go:noescape
+func SendmsgNInet4(fd int, p, oob []byte, to *syscall.SockaddrInet4, flags int) (n int, err error)
+
+//go:linkname SendmsgNInet6 syscall.sendmsgNInet6
+//go:noescape
+func SendmsgNInet6(fd int, p, oob []byte, to *syscall.SockaddrInet6, flags int) (n int, err error)
+
+//go:linkname RecvmsgInet4 syscall.recvmsgInet4
+//go:noescape
+func RecvmsgInet4(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet4) (n, oobn int, recvflags int, err error)
+
+//go:linkname RecvmsgInet6 syscall.recvmsgInet6
+//go:noescape
+func RecvmsgInet6(fd int, p, oob []byte, flags int, from *syscall.SockaddrInet6) (n, oobn int, recvflags int, err error)
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking.go b/contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking.go
index 9e5f0fb4a2..9e5f0fb4a2 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/nonblocking.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking.go
diff --git a/contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking_libc.go b/contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking_libc.go
new file mode 100644
index 0000000000..84940714c3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/nonblocking_libc.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || solaris
+
+package unix
+
+import (
+ "syscall"
+ _ "unsafe" // for go:linkname
+)
+
+func IsNonblock(fd int) (nonblocking bool, err error) {
+ flag, e1 := fcntl(fd, syscall.F_GETFL, 0)
+ if e1 != nil {
+ return false, e1
+ }
+ return flag&syscall.O_NONBLOCK != 0, nil
+}
+
+// Implemented in the syscall package.
+//
+//go:linkname fcntl syscall.fcntl
+func fcntl(fd int, cmd int, arg int) (int, error)
diff --git a/contrib/go/_std_1.18/src/internal/syscall/unix/sysnum_linux_amd64.go b/contrib/go/_std_1.19/src/internal/syscall/unix/sysnum_linux_amd64.go
index ae5239ebfb..ae5239ebfb 100644
--- a/contrib/go/_std_1.18/src/internal/syscall/unix/sysnum_linux_amd64.go
+++ b/contrib/go/_std_1.19/src/internal/syscall/unix/sysnum_linux_amd64.go
diff --git a/contrib/go/_std_1.18/src/internal/testlog/exit.go b/contrib/go/_std_1.19/src/internal/testlog/exit.go
index e15defdb5b..e15defdb5b 100644
--- a/contrib/go/_std_1.18/src/internal/testlog/exit.go
+++ b/contrib/go/_std_1.19/src/internal/testlog/exit.go
diff --git a/contrib/go/_std_1.18/src/internal/testlog/log.go b/contrib/go/_std_1.19/src/internal/testlog/log.go
index 3c5f780ac4..3c5f780ac4 100644
--- a/contrib/go/_std_1.18/src/internal/testlog/log.go
+++ b/contrib/go/_std_1.19/src/internal/testlog/log.go
diff --git a/contrib/go/_std_1.18/src/internal/unsafeheader/unsafeheader.go b/contrib/go/_std_1.19/src/internal/unsafeheader/unsafeheader.go
index 6d092c629a..6d092c629a 100644
--- a/contrib/go/_std_1.18/src/internal/unsafeheader/unsafeheader.go
+++ b/contrib/go/_std_1.19/src/internal/unsafeheader/unsafeheader.go
diff --git a/contrib/go/_std_1.19/src/io/fs/fs.go b/contrib/go/_std_1.19/src/io/fs/fs.go
new file mode 100644
index 0000000000..4ce4d1a528
--- /dev/null
+++ b/contrib/go/_std_1.19/src/io/fs/fs.go
@@ -0,0 +1,258 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fs defines basic interfaces to a file system.
+// A file system can be provided by the host operating system
+// but also by other packages.
+package fs
+
+import (
+ "internal/oserror"
+ "time"
+ "unicode/utf8"
+)
+
+// An FS provides access to a hierarchical file system.
+//
+// The FS interface is the minimum implementation required of the file system.
+// A file system may implement additional interfaces,
+// such as ReadFileFS, to provide additional or optimized functionality.
+type FS interface {
+ // Open opens the named file.
+ //
+ // When Open returns an error, it should be of type *PathError
+ // with the Op field set to "open", the Path field set to name,
+ // and the Err field describing the problem.
+ //
+ // Open should reject attempts to open names that do not satisfy
+ // ValidPath(name), returning a *PathError with Err set to
+ // ErrInvalid or ErrNotExist.
+ Open(name string) (File, error)
+}
+
+// ValidPath reports whether the given path name
+// is valid for use in a call to Open.
+//
+// Path names passed to open are UTF-8-encoded,
+// unrooted, slash-separated sequences of path elements, like “x/y/z”.
+// Path names must not contain an element that is “.” or “..” or the empty string,
+// except for the special case that the root directory is named “.”.
+// Paths must not start or end with a slash: “/x” and “x/” are invalid.
+//
+// Note that paths are slash-separated on all systems, even Windows.
+// Paths containing other characters such as backslash and colon
+// are accepted as valid, but those characters must never be
+// interpreted by an FS implementation as path element separators.
+func ValidPath(name string) bool {
+ if !utf8.ValidString(name) {
+ return false
+ }
+
+ if name == "." {
+ // special case
+ return true
+ }
+
+ // Iterate over elements in name, checking each.
+ for {
+ i := 0
+ for i < len(name) && name[i] != '/' {
+ i++
+ }
+ elem := name[:i]
+ if elem == "" || elem == "." || elem == ".." {
+ return false
+ }
+ if i == len(name) {
+ return true // reached clean ending
+ }
+ name = name[i+1:]
+ }
+}
+
+// A File provides access to a single file.
+// The File interface is the minimum implementation required of the file.
+// Directory files should also implement ReadDirFile.
+// A file may implement io.ReaderAt or io.Seeker as optimizations.
+type File interface {
+ Stat() (FileInfo, error)
+ Read([]byte) (int, error)
+ Close() error
+}
+
+// A DirEntry is an entry read from a directory
+// (using the ReadDir function or a ReadDirFile's ReadDir method).
+type DirEntry interface {
+ // Name returns the name of the file (or subdirectory) described by the entry.
+ // This name is only the final element of the path (the base name), not the entire path.
+ // For example, Name would return "hello.go" not "home/gopher/hello.go".
+ Name() string
+
+ // IsDir reports whether the entry describes a directory.
+ IsDir() bool
+
+ // Type returns the type bits for the entry.
+ // The type bits are a subset of the usual FileMode bits, those returned by the FileMode.Type method.
+ Type() FileMode
+
+ // Info returns the FileInfo for the file or subdirectory described by the entry.
+ // The returned FileInfo may be from the time of the original directory read
+ // or from the time of the call to Info. If the file has been removed or renamed
+ // since the directory read, Info may return an error satisfying errors.Is(err, ErrNotExist).
+ // If the entry denotes a symbolic link, Info reports the information about the link itself,
+ // not the link's target.
+ Info() (FileInfo, error)
+}
+
+// A ReadDirFile is a directory file whose entries can be read with the ReadDir method.
+// Every directory file should implement this interface.
+// (It is permissible for any file to implement this interface,
+// but if so ReadDir should return an error for non-directories.)
+type ReadDirFile interface {
+ File
+
+ // ReadDir reads the contents of the directory and returns
+ // a slice of up to n DirEntry values in directory order.
+ // Subsequent calls on the same file will yield further DirEntry values.
+ //
+ // If n > 0, ReadDir returns at most n DirEntry structures.
+ // In this case, if ReadDir returns an empty slice, it will return
+ // a non-nil error explaining why.
+ // At the end of a directory, the error is io.EOF.
+ // (ReadDir must return io.EOF itself, not an error wrapping io.EOF.)
+ //
+ // If n <= 0, ReadDir returns all the DirEntry values from the directory
+ // in a single slice. In this case, if ReadDir succeeds (reads all the way
+ // to the end of the directory), it returns the slice and a nil error.
+ // If it encounters an error before the end of the directory,
+ // ReadDir returns the DirEntry list read until that point and a non-nil error.
+ ReadDir(n int) ([]DirEntry, error)
+}
+
+// Generic file system errors.
+// Errors returned by file systems can be tested against these errors
+// using errors.Is.
+var (
+ ErrInvalid = errInvalid() // "invalid argument"
+ ErrPermission = errPermission() // "permission denied"
+ ErrExist = errExist() // "file already exists"
+ ErrNotExist = errNotExist() // "file does not exist"
+ ErrClosed = errClosed() // "file already closed"
+)
+
+func errInvalid() error { return oserror.ErrInvalid }
+func errPermission() error { return oserror.ErrPermission }
+func errExist() error { return oserror.ErrExist }
+func errNotExist() error { return oserror.ErrNotExist }
+func errClosed() error { return oserror.ErrClosed }
+
+// A FileInfo describes a file and is returned by Stat.
+type FileInfo interface {
+ Name() string // base name of the file
+ Size() int64 // length in bytes for regular files; system-dependent for others
+ Mode() FileMode // file mode bits
+ ModTime() time.Time // modification time
+ IsDir() bool // abbreviation for Mode().IsDir()
+ Sys() any // underlying data source (can return nil)
+}
+
+// A FileMode represents a file's mode and permission bits.
+// The bits have the same definition on all systems, so that
+// information about files can be moved from one system
+// to another portably. Not all bits apply to all systems.
+// The only required bit is ModeDir for directories.
+type FileMode uint32
+
+// The defined file mode bits are the most significant bits of the FileMode.
+// The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
+// The values of these bits should be considered part of the public API and
+// may be used in wire protocols or disk representations: they must not be
+// changed, although new bits might be added.
+const (
+ // The single letters are the abbreviations
+ // used by the String method's formatting.
+ ModeDir FileMode = 1 << (32 - 1 - iota) // d: is a directory
+ ModeAppend // a: append-only
+ ModeExclusive // l: exclusive use
+ ModeTemporary // T: temporary file; Plan 9 only
+ ModeSymlink // L: symbolic link
+ ModeDevice // D: device file
+ ModeNamedPipe // p: named pipe (FIFO)
+ ModeSocket // S: Unix domain socket
+ ModeSetuid // u: setuid
+ ModeSetgid // g: setgid
+ ModeCharDevice // c: Unix character device, when ModeDevice is set
+ ModeSticky // t: sticky
+ ModeIrregular // ?: non-regular file; nothing else is known about this file
+
+ // Mask for the type bits. For regular files, none will be set.
+ ModeType = ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice | ModeCharDevice | ModeIrregular
+
+ ModePerm FileMode = 0777 // Unix permission bits
+)
+
+func (m FileMode) String() string {
+ const str = "dalTLDpSugct?"
+ var buf [32]byte // Mode is uint32.
+ w := 0
+ for i, c := range str {
+ if m&(1<<uint(32-1-i)) != 0 {
+ buf[w] = byte(c)
+ w++
+ }
+ }
+ if w == 0 {
+ buf[w] = '-'
+ w++
+ }
+ const rwx = "rwxrwxrwx"
+ for i, c := range rwx {
+ if m&(1<<uint(9-1-i)) != 0 {
+ buf[w] = byte(c)
+ } else {
+ buf[w] = '-'
+ }
+ w++
+ }
+ return string(buf[:w])
+}
+
+// IsDir reports whether m describes a directory.
+// That is, it tests for the ModeDir bit being set in m.
+func (m FileMode) IsDir() bool {
+ return m&ModeDir != 0
+}
+
+// IsRegular reports whether m describes a regular file.
+// That is, it tests that no mode type bits are set.
+func (m FileMode) IsRegular() bool {
+ return m&ModeType == 0
+}
+
+// Perm returns the Unix permission bits in m (m & ModePerm).
+func (m FileMode) Perm() FileMode {
+ return m & ModePerm
+}
+
+// Type returns type bits in m (m & ModeType).
+func (m FileMode) Type() FileMode {
+ return m & ModeType
+}
+
+// PathError records an error and the operation and file path that caused it.
+type PathError struct {
+ Op string
+ Path string
+ Err error
+}
+
+func (e *PathError) Error() string { return e.Op + " " + e.Path + ": " + e.Err.Error() }
+
+func (e *PathError) Unwrap() error { return e.Err }
+
+// Timeout reports whether this error represents a timeout.
+func (e *PathError) Timeout() bool {
+ t, ok := e.Err.(interface{ Timeout() bool })
+ return ok && t.Timeout()
+}
diff --git a/contrib/go/_std_1.18/src/io/fs/glob.go b/contrib/go/_std_1.19/src/io/fs/glob.go
index 0e529cd05d..0e529cd05d 100644
--- a/contrib/go/_std_1.18/src/io/fs/glob.go
+++ b/contrib/go/_std_1.19/src/io/fs/glob.go
diff --git a/contrib/go/_std_1.18/src/io/fs/readdir.go b/contrib/go/_std_1.19/src/io/fs/readdir.go
index 2b10ddb0a3..2b10ddb0a3 100644
--- a/contrib/go/_std_1.18/src/io/fs/readdir.go
+++ b/contrib/go/_std_1.19/src/io/fs/readdir.go
diff --git a/contrib/go/_std_1.18/src/io/fs/readfile.go b/contrib/go/_std_1.19/src/io/fs/readfile.go
index d3c181c0a9..d3c181c0a9 100644
--- a/contrib/go/_std_1.18/src/io/fs/readfile.go
+++ b/contrib/go/_std_1.19/src/io/fs/readfile.go
diff --git a/contrib/go/_std_1.18/src/io/fs/stat.go b/contrib/go/_std_1.19/src/io/fs/stat.go
index 735a6e3281..735a6e3281 100644
--- a/contrib/go/_std_1.18/src/io/fs/stat.go
+++ b/contrib/go/_std_1.19/src/io/fs/stat.go
diff --git a/contrib/go/_std_1.18/src/io/fs/sub.go b/contrib/go/_std_1.19/src/io/fs/sub.go
index ae20e030a9..ae20e030a9 100644
--- a/contrib/go/_std_1.18/src/io/fs/sub.go
+++ b/contrib/go/_std_1.19/src/io/fs/sub.go
diff --git a/contrib/go/_std_1.19/src/io/fs/walk.go b/contrib/go/_std_1.19/src/io/fs/walk.go
new file mode 100644
index 0000000000..37800794a2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/io/fs/walk.go
@@ -0,0 +1,129 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import (
+ "errors"
+ "path"
+)
+
+// SkipDir is used as a return value from WalkDirFuncs to indicate that
+// the directory named in the call is to be skipped. It is not returned
+// as an error by any function.
+var SkipDir = errors.New("skip this directory")
+
+// WalkDirFunc is the type of the function called by WalkDir to visit
+// each file or directory.
+//
+// The path argument contains the argument to WalkDir as a prefix.
+// That is, if WalkDir is called with root argument "dir" and finds a file
+// named "a" in that directory, the walk function will be called with
+// argument "dir/a".
+//
+// The d argument is the fs.DirEntry for the named path.
+//
+// The error result returned by the function controls how WalkDir
+// continues. If the function returns the special value SkipDir, WalkDir
+// skips the current directory (path if d.IsDir() is true, otherwise
+// path's parent directory). Otherwise, if the function returns a non-nil
+// error, WalkDir stops entirely and returns that error.
+//
+// The err argument reports an error related to path, signaling that
+// WalkDir will not walk into that directory. The function can decide how
+// to handle that error; as described earlier, returning the error will
+// cause WalkDir to stop walking the entire tree.
+//
+// WalkDir calls the function with a non-nil err argument in two cases.
+//
+// First, if the initial fs.Stat on the root directory fails, WalkDir
+// calls the function with path set to root, d set to nil, and err set to
+// the error from fs.Stat.
+//
+// Second, if a directory's ReadDir method fails, WalkDir calls the
+// function with path set to the directory's path, d set to an
+// fs.DirEntry describing the directory, and err set to the error from
+// ReadDir. In this second case, the function is called twice with the
+// path of the directory: the first call is before the directory read is
+// attempted and has err set to nil, giving the function a chance to
+// return SkipDir and avoid the ReadDir entirely. The second call is
+// after a failed ReadDir and reports the error from ReadDir.
+// (If ReadDir succeeds, there is no second call.)
+//
+// The differences between WalkDirFunc compared to filepath.WalkFunc are:
+//
+// - The second argument has type fs.DirEntry instead of fs.FileInfo.
+// - The function is called before reading a directory, to allow SkipDir
+// to bypass the directory read entirely.
+// - If a directory read fails, the function is called a second time
+// for that directory to report the error.
+type WalkDirFunc func(path string, d DirEntry, err error) error
+
+// walkDir recursively descends path, calling walkDirFn.
+func walkDir(fsys FS, name string, d DirEntry, walkDirFn WalkDirFunc) error {
+ if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() {
+ if err == SkipDir && d.IsDir() {
+ // Successfully skipped directory.
+ err = nil
+ }
+ return err
+ }
+
+ dirs, err := ReadDir(fsys, name)
+ if err != nil {
+ // Second call, to report ReadDir error.
+ err = walkDirFn(name, d, err)
+ if err != nil {
+ if err == SkipDir && d.IsDir() {
+ err = nil
+ }
+ return err
+ }
+ }
+
+ for _, d1 := range dirs {
+ name1 := path.Join(name, d1.Name())
+ if err := walkDir(fsys, name1, d1, walkDirFn); err != nil {
+ if err == SkipDir {
+ break
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// WalkDir walks the file tree rooted at root, calling fn for each file or
+// directory in the tree, including root.
+//
+// All errors that arise visiting files and directories are filtered by fn:
+// see the fs.WalkDirFunc documentation for details.
+//
+// The files are walked in lexical order, which makes the output deterministic
+// but requires WalkDir to read an entire directory into memory before proceeding
+// to walk that directory.
+//
+// WalkDir does not follow symbolic links found in directories,
+// but if root itself is a symbolic link, its target will be walked.
+func WalkDir(fsys FS, root string, fn WalkDirFunc) error {
+ info, err := Stat(fsys, root)
+ if err != nil {
+ err = fn(root, nil, err)
+ } else {
+ err = walkDir(fsys, root, &statDirEntry{info}, fn)
+ }
+ if err == SkipDir {
+ return nil
+ }
+ return err
+}
+
+type statDirEntry struct {
+ info FileInfo
+}
+
+func (d *statDirEntry) Name() string { return d.info.Name() }
+func (d *statDirEntry) IsDir() bool { return d.info.IsDir() }
+func (d *statDirEntry) Type() FileMode { return d.info.Mode().Type() }
+func (d *statDirEntry) Info() (FileInfo, error) { return d.info, nil }
diff --git a/contrib/go/_std_1.19/src/io/io.go b/contrib/go/_std_1.19/src/io/io.go
new file mode 100644
index 0000000000..9d4c0d2506
--- /dev/null
+++ b/contrib/go/_std_1.19/src/io/io.go
@@ -0,0 +1,670 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package io provides basic interfaces to I/O primitives.
+// Its primary job is to wrap existing implementations of such primitives,
+// such as those in package os, into shared public interfaces that
+// abstract the functionality, plus some other related primitives.
+//
+// Because these interfaces and primitives wrap lower-level operations with
+// various implementations, unless otherwise informed clients should not
+// assume they are safe for parallel execution.
+package io
+
+import (
+ "errors"
+ "sync"
+)
+
+// Seek whence values.
+const (
+ SeekStart = 0 // seek relative to the origin of the file
+ SeekCurrent = 1 // seek relative to the current offset
+ SeekEnd = 2 // seek relative to the end
+)
+
+// ErrShortWrite means that a write accepted fewer bytes than requested
+// but failed to return an explicit error.
+var ErrShortWrite = errors.New("short write")
+
+// errInvalidWrite means that a write returned an impossible count.
+var errInvalidWrite = errors.New("invalid write result")
+
+// ErrShortBuffer means that a read required a longer buffer than was provided.
+var ErrShortBuffer = errors.New("short buffer")
+
+// EOF is the error returned by Read when no more input is available.
+// (Read must return EOF itself, not an error wrapping EOF,
+// because callers will test for EOF using ==.)
+// Functions should return EOF only to signal a graceful end of input.
+// If the EOF occurs unexpectedly in a structured data stream,
+// the appropriate error is either ErrUnexpectedEOF or some other error
+// giving more detail.
+var EOF = errors.New("EOF")
+
+// ErrUnexpectedEOF means that EOF was encountered in the
+// middle of reading a fixed-size block or data structure.
+var ErrUnexpectedEOF = errors.New("unexpected EOF")
+
+// ErrNoProgress is returned by some clients of a Reader when
+// many calls to Read have failed to return any data or error,
+// usually the sign of a broken Reader implementation.
+var ErrNoProgress = errors.New("multiple Read calls return no data or error")
+
+// Reader is the interface that wraps the basic Read method.
+//
+// Read reads up to len(p) bytes into p. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered. Even if Read
+// returns n < len(p), it may use all of p as scratch space during the call.
+// If some data is available but not len(p) bytes, Read conventionally
+// returns what is available instead of waiting for more.
+//
+// When Read encounters an error or end-of-file condition after
+// successfully reading n > 0 bytes, it returns the number of
+// bytes read. It may return the (non-nil) error from the same call
+// or return the error (and n == 0) from a subsequent call.
+// An instance of this general case is that a Reader returning
+// a non-zero number of bytes at the end of the input stream may
+// return either err == EOF or err == nil. The next Read should
+// return 0, EOF.
+//
+// Callers should always process the n > 0 bytes returned before
+// considering the error err. Doing so correctly handles I/O errors
+// that happen after reading some bytes and also both of the
+// allowed EOF behaviors.
+//
+// Implementations of Read are discouraged from returning a
+// zero byte count with a nil error, except when len(p) == 0.
+// Callers should treat a return of 0 and nil as indicating that
+// nothing happened; in particular it does not indicate EOF.
+//
+// Implementations must not retain p.
+type Reader interface {
+ Read(p []byte) (n int, err error)
+}
+
+// Writer is the interface that wraps the basic Write method.
+//
+// Write writes len(p) bytes from p to the underlying data stream.
+// It returns the number of bytes written from p (0 <= n <= len(p))
+// and any error encountered that caused the write to stop early.
+// Write must return a non-nil error if it returns n < len(p).
+// Write must not modify the slice data, even temporarily.
+//
+// Implementations must not retain p.
+type Writer interface {
+ Write(p []byte) (n int, err error)
+}
+
+// Closer is the interface that wraps the basic Close method.
+//
+// The behavior of Close after the first call is undefined.
+// Specific implementations may document their own behavior.
+type Closer interface {
+ Close() error
+}
+
+// Seeker is the interface that wraps the basic Seek method.
+//
+// Seek sets the offset for the next Read or Write to offset,
+// interpreted according to whence:
+// SeekStart means relative to the start of the file,
+// SeekCurrent means relative to the current offset, and
+// SeekEnd means relative to the end
+// (for example, offset = -2 specifies the penultimate byte of the file).
+// Seek returns the new offset relative to the start of the
+// file or an error, if any.
+//
+// Seeking to an offset before the start of the file is an error.
+// Seeking to any positive offset may be allowed, but if the new offset exceeds
+// the size of the underlying object the behavior of subsequent I/O operations
+// is implementation-dependent.
+type Seeker interface {
+ Seek(offset int64, whence int) (int64, error)
+}
+
+// ReadWriter is the interface that groups the basic Read and Write methods.
+type ReadWriter interface {
+ Reader
+ Writer
+}
+
+// ReadCloser is the interface that groups the basic Read and Close methods.
+type ReadCloser interface {
+ Reader
+ Closer
+}
+
+// WriteCloser is the interface that groups the basic Write and Close methods.
+type WriteCloser interface {
+ Writer
+ Closer
+}
+
+// ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.
+type ReadWriteCloser interface {
+ Reader
+ Writer
+ Closer
+}
+
+// ReadSeeker is the interface that groups the basic Read and Seek methods.
+type ReadSeeker interface {
+ Reader
+ Seeker
+}
+
+// ReadSeekCloser is the interface that groups the basic Read, Seek and Close
+// methods.
+type ReadSeekCloser interface {
+ Reader
+ Seeker
+ Closer
+}
+
+// WriteSeeker is the interface that groups the basic Write and Seek methods.
+type WriteSeeker interface {
+ Writer
+ Seeker
+}
+
+// ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.
+type ReadWriteSeeker interface {
+ Reader
+ Writer
+ Seeker
+}
+
+// ReaderFrom is the interface that wraps the ReadFrom method.
+//
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except EOF encountered during the read is also returned.
+//
+// The Copy function uses ReaderFrom if available.
+type ReaderFrom interface {
+ ReadFrom(r Reader) (n int64, err error)
+}
+
+// WriterTo is the interface that wraps the WriteTo method.
+//
+// WriteTo writes data to w until there's no more data to write or
+// when an error occurs. The return value n is the number of bytes
+// written. Any error encountered during the write is also returned.
+//
+// The Copy function uses WriterTo if available.
+type WriterTo interface {
+ WriteTo(w Writer) (n int64, err error)
+}
+
+// ReaderAt is the interface that wraps the basic ReadAt method.
+//
+// ReadAt reads len(p) bytes into p starting at offset off in the
+// underlying input source. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered.
+//
+// When ReadAt returns n < len(p), it returns a non-nil error
+// explaining why more bytes were not returned. In this respect,
+// ReadAt is stricter than Read.
+//
+// Even if ReadAt returns n < len(p), it may use all of p as scratch
+// space during the call. If some data is available but not len(p) bytes,
+// ReadAt blocks until either all the data is available or an error occurs.
+// In this respect ReadAt is different from Read.
+//
+// If the n = len(p) bytes returned by ReadAt are at the end of the
+// input source, ReadAt may return either err == EOF or err == nil.
+//
+// If ReadAt is reading from an input source with a seek offset,
+// ReadAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of ReadAt can execute parallel ReadAt calls on the
+// same input source.
+//
+// Implementations must not retain p.
+type ReaderAt interface {
+ ReadAt(p []byte, off int64) (n int, err error)
+}
+
+// WriterAt is the interface that wraps the basic WriteAt method.
+//
+// WriteAt writes len(p) bytes from p to the underlying data stream
+// at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
+// and any error encountered that caused the write to stop early.
+// WriteAt must return a non-nil error if it returns n < len(p).
+//
+// If WriteAt is writing to a destination with a seek offset,
+// WriteAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of WriteAt can execute parallel WriteAt calls on the same
+// destination if the ranges do not overlap.
+//
+// Implementations must not retain p.
+type WriterAt interface {
+ WriteAt(p []byte, off int64) (n int, err error)
+}
+
+// ByteReader is the interface that wraps the ReadByte method.
+//
+// ReadByte reads and returns the next byte from the input or
+// any error encountered. If ReadByte returns an error, no input
+// byte was consumed, and the returned byte value is undefined.
+//
+// ReadByte provides an efficient interface for byte-at-time
+// processing. A Reader that does not implement ByteReader
+// can be wrapped using bufio.NewReader to add this method.
+type ByteReader interface {
+ ReadByte() (byte, error)
+}
+
+// ByteScanner is the interface that adds the UnreadByte method to the
+// basic ReadByte method.
+//
+// UnreadByte causes the next call to ReadByte to return the last byte read.
+// If the last operation was not a successful call to ReadByte, UnreadByte may
+// return an error, unread the last byte read (or the byte prior to the
+// last-unread byte), or (in implementations that support the Seeker interface)
+// seek to one byte before the current offset.
+type ByteScanner interface {
+ ByteReader
+ UnreadByte() error
+}
+
+// ByteWriter is the interface that wraps the WriteByte method.
+type ByteWriter interface {
+ WriteByte(c byte) error
+}
+
+// RuneReader is the interface that wraps the ReadRune method.
+//
+// ReadRune reads a single encoded Unicode character
+// and returns the rune and its size in bytes. If no character is
+// available, err will be set.
+type RuneReader interface {
+ ReadRune() (r rune, size int, err error)
+}
+
+// RuneScanner is the interface that adds the UnreadRune method to the
+// basic ReadRune method.
+//
+// UnreadRune causes the next call to ReadRune to return the last rune read.
+// If the last operation was not a successful call to ReadRune, UnreadRune may
+// return an error, unread the last rune read (or the rune prior to the
+// last-unread rune), or (in implementations that support the Seeker interface)
+// seek to the start of the rune before the current offset.
+type RuneScanner interface {
+ RuneReader
+ UnreadRune() error
+}
+
+// StringWriter is the interface that wraps the WriteString method.
+type StringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// WriteString writes the contents of the string s to w, which accepts a slice of bytes.
+// If w implements StringWriter, its WriteString method is invoked directly.
+// Otherwise, w.Write is called exactly once.
+func WriteString(w Writer, s string) (n int, err error) {
+ if sw, ok := w.(StringWriter); ok {
+ return sw.WriteString(s)
+ }
+ return w.Write([]byte(s))
+}
+
+// ReadAtLeast reads from r into buf until it has read at least min bytes.
+// It returns the number of bytes copied and an error if fewer bytes were read.
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading fewer than min bytes,
+// ReadAtLeast returns ErrUnexpectedEOF.
+// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer.
+// On return, n >= min if and only if err == nil.
+// If r returns an error having read at least min bytes, the error is dropped.
+func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error) {
+ if len(buf) < min {
+ return 0, ErrShortBuffer
+ }
+ for n < min && err == nil {
+ var nn int
+ nn, err = r.Read(buf[n:])
+ n += nn
+ }
+ if n >= min {
+ err = nil
+ } else if n > 0 && err == EOF {
+ err = ErrUnexpectedEOF
+ }
+ return
+}
+
+// ReadFull reads exactly len(buf) bytes from r into buf.
+// It returns the number of bytes copied and an error if fewer bytes were read.
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading some but not all the bytes,
+// ReadFull returns ErrUnexpectedEOF.
+// On return, n == len(buf) if and only if err == nil.
+// If r returns an error having read at least len(buf) bytes, the error is dropped.
+func ReadFull(r Reader, buf []byte) (n int, err error) {
+ return ReadAtLeast(r, buf, len(buf))
+}
+
+// CopyN copies n bytes (or until an error) from src to dst.
+// It returns the number of bytes copied and the earliest
+// error encountered while copying.
+// On return, written == n if and only if err == nil.
+//
+// If dst implements the ReaderFrom interface,
+// the copy is implemented using it.
+func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
+ written, err = Copy(dst, LimitReader(src, n))
+ if written == n {
+ return n, nil
+ }
+ if written < n && err == nil {
+ // src stopped early; must have been EOF.
+ err = EOF
+ }
+ return
+}
+
+// Copy copies from src to dst until either EOF is reached
+// on src or an error occurs. It returns the number of bytes
+// copied and the first error encountered while copying, if any.
+//
+// A successful Copy returns err == nil, not err == EOF.
+// Because Copy is defined to read from src until EOF, it does
+// not treat an EOF from Read as an error to be reported.
+//
+// If src implements the WriterTo interface,
+// the copy is implemented by calling src.WriteTo(dst).
+// Otherwise, if dst implements the ReaderFrom interface,
+// the copy is implemented by calling dst.ReadFrom(src).
+func Copy(dst Writer, src Reader) (written int64, err error) {
+ return copyBuffer(dst, src, nil)
+}
+
+// CopyBuffer is identical to Copy except that it stages through the
+// provided buffer (if one is required) rather than allocating a
+// temporary one. If buf is nil, one is allocated; otherwise if it has
+// zero length, CopyBuffer panics.
+//
+// If either src implements WriterTo or dst implements ReaderFrom,
+// buf will not be used to perform the copy.
+func CopyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
+ if buf != nil && len(buf) == 0 {
+ panic("empty buffer in CopyBuffer")
+ }
+ return copyBuffer(dst, src, buf)
+}
+
+// copyBuffer is the actual implementation of Copy and CopyBuffer.
+// if buf is nil, one is allocated.
+func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
+ // If the reader has a WriteTo method, use it to do the copy.
+ // Avoids an allocation and a copy.
+ if wt, ok := src.(WriterTo); ok {
+ return wt.WriteTo(dst)
+ }
+ // Similarly, if the writer has a ReadFrom method, use it to do the copy.
+ if rt, ok := dst.(ReaderFrom); ok {
+ return rt.ReadFrom(src)
+ }
+ if buf == nil {
+ size := 32 * 1024
+ if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
+ if l.N < 1 {
+ size = 1
+ } else {
+ size = int(l.N)
+ }
+ }
+ buf = make([]byte, size)
+ }
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ nw, ew := dst.Write(buf[0:nr])
+ if nw < 0 || nr < nw {
+ nw = 0
+ if ew == nil {
+ ew = errInvalidWrite
+ }
+ }
+ written += int64(nw)
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = ErrShortWrite
+ break
+ }
+ }
+ if er != nil {
+ if er != EOF {
+ err = er
+ }
+ break
+ }
+ }
+ return written, err
+}
+
+// LimitReader returns a Reader that reads from r
+// but stops with EOF after n bytes.
+// The underlying implementation is a *LimitedReader.
+func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
+
+// A LimitedReader reads from R but limits the amount of
+// data returned to just N bytes. Each call to Read
+// updates N to reflect the new amount remaining.
+// Read returns EOF when N <= 0 or when the underlying R returns EOF.
+type LimitedReader struct {
+ R Reader // underlying reader
+ N int64 // max bytes remaining
+}
+
+func (l *LimitedReader) Read(p []byte) (n int, err error) {
+ if l.N <= 0 {
+ return 0, EOF
+ }
+ if int64(len(p)) > l.N {
+ p = p[0:l.N]
+ }
+ n, err = l.R.Read(p)
+ l.N -= int64(n)
+ return
+}
+
+// NewSectionReader returns a SectionReader that reads from r
+// starting at offset off and stops with EOF after n bytes.
+func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
+ var remaining int64
+ const maxint64 = 1<<63 - 1
+ if off <= maxint64-n {
+ remaining = n + off
+ } else {
+ // Overflow, with no way to return error.
+ // Assume we can read up to an offset of 1<<63 - 1.
+ remaining = maxint64
+ }
+ return &SectionReader{r, off, off, remaining}
+}
+
+// SectionReader implements Read, Seek, and ReadAt on a section
+// of an underlying ReaderAt.
+type SectionReader struct {
+ r ReaderAt
+ base int64
+ off int64
+ limit int64
+}
+
+func (s *SectionReader) Read(p []byte) (n int, err error) {
+ if s.off >= s.limit {
+ return 0, EOF
+ }
+ if max := s.limit - s.off; int64(len(p)) > max {
+ p = p[0:max]
+ }
+ n, err = s.r.ReadAt(p, s.off)
+ s.off += int64(n)
+ return
+}
+
+var errWhence = errors.New("Seek: invalid whence")
+var errOffset = errors.New("Seek: invalid offset")
+
+func (s *SectionReader) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ default:
+ return 0, errWhence
+ case SeekStart:
+ offset += s.base
+ case SeekCurrent:
+ offset += s.off
+ case SeekEnd:
+ offset += s.limit
+ }
+ if offset < s.base {
+ return 0, errOffset
+ }
+ s.off = offset
+ return offset - s.base, nil
+}
+
+func (s *SectionReader) ReadAt(p []byte, off int64) (n int, err error) {
+ if off < 0 || off >= s.limit-s.base {
+ return 0, EOF
+ }
+ off += s.base
+ if max := s.limit - off; int64(len(p)) > max {
+ p = p[0:max]
+ n, err = s.r.ReadAt(p, off)
+ if err == nil {
+ err = EOF
+ }
+ return n, err
+ }
+ return s.r.ReadAt(p, off)
+}
+
+// Size returns the size of the section in bytes.
+func (s *SectionReader) Size() int64 { return s.limit - s.base }
+
+// TeeReader returns a Reader that writes to w what it reads from r.
+// All reads from r performed through it are matched with
+// corresponding writes to w. There is no internal buffering -
+// the write must complete before the read completes.
+// Any error encountered while writing is reported as a read error.
+func TeeReader(r Reader, w Writer) Reader {
+ return &teeReader{r, w}
+}
+
+type teeReader struct {
+ r Reader
+ w Writer
+}
+
+func (t *teeReader) Read(p []byte) (n int, err error) {
+ n, err = t.r.Read(p)
+ if n > 0 {
+ if n, err := t.w.Write(p[:n]); err != nil {
+ return n, err
+ }
+ }
+ return
+}
+
+// Discard is a Writer on which all Write calls succeed
+// without doing anything.
+var Discard Writer = discard{}
+
+type discard struct{}
+
+// discard implements ReaderFrom as an optimization so Copy to
+// io.Discard can avoid doing unnecessary work.
+var _ ReaderFrom = discard{}
+
+func (discard) Write(p []byte) (int, error) {
+ return len(p), nil
+}
+
+func (discard) WriteString(s string) (int, error) {
+ return len(s), nil
+}
+
+var blackHolePool = sync.Pool{
+ New: func() any {
+ b := make([]byte, 8192)
+ return &b
+ },
+}
+
+func (discard) ReadFrom(r Reader) (n int64, err error) {
+ bufp := blackHolePool.Get().(*[]byte)
+ readSize := 0
+ for {
+ readSize, err = r.Read(*bufp)
+ n += int64(readSize)
+ if err != nil {
+ blackHolePool.Put(bufp)
+ if err == EOF {
+ return n, nil
+ }
+ return
+ }
+ }
+}
+
+// NopCloser returns a ReadCloser with a no-op Close method wrapping
+// the provided Reader r.
+// If r implements WriterTo, the returned ReadCloser will implement WriterTo
+// by forwarding calls to r.
+func NopCloser(r Reader) ReadCloser {
+ if _, ok := r.(WriterTo); ok {
+ return nopCloserWriterTo{r}
+ }
+ return nopCloser{r}
+}
+
+type nopCloser struct {
+ Reader
+}
+
+func (nopCloser) Close() error { return nil }
+
+type nopCloserWriterTo struct {
+ Reader
+}
+
+func (nopCloserWriterTo) Close() error { return nil }
+
+func (c nopCloserWriterTo) WriteTo(w Writer) (n int64, err error) {
+ return c.Reader.(WriterTo).WriteTo(w)
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r Reader) ([]byte, error) {
+ b := make([]byte, 0, 512)
+ for {
+ if len(b) == cap(b) {
+ // Add more capacity (let append pick how much).
+ b = append(b, 0)[:len(b)]
+ }
+ n, err := r.Read(b[len(b):cap(b)])
+ b = b[:len(b)+n]
+ if err != nil {
+ if err == EOF {
+ err = nil
+ }
+ return b, err
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/io/ioutil/ioutil.go b/contrib/go/_std_1.19/src/io/ioutil/ioutil.go
new file mode 100644
index 0000000000..6a1d69172c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/io/ioutil/ioutil.go
@@ -0,0 +1,95 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ioutil implements some I/O utility functions.
+//
+// Deprecated: As of Go 1.16, the same functionality is now provided
+// by package io or package os, and those implementations
+// should be preferred in new code.
+// See the specific function documentation for details.
+package ioutil
+
+import (
+ "io"
+ "io/fs"
+ "os"
+ "sort"
+)
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+//
+// Deprecated: As of Go 1.16, this function simply calls io.ReadAll.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(r)
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+//
+// Deprecated: As of Go 1.16, this function simply calls os.ReadFile.
+func ReadFile(filename string) ([]byte, error) {
+ return os.ReadFile(filename)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm
+// (before umask); otherwise WriteFile truncates it before writing, without changing permissions.
+//
+// Deprecated: As of Go 1.16, this function simply calls os.WriteFile.
+func WriteFile(filename string, data []byte, perm fs.FileMode) error {
+ return os.WriteFile(filename, data, perm)
+}
+
+// ReadDir reads the directory named by dirname and returns
+// a list of fs.FileInfo for the directory's contents,
+// sorted by filename. If an error occurs reading the directory,
+// ReadDir returns no directory entries along with the error.
+//
+// Deprecated: As of Go 1.16, os.ReadDir is a more efficient and correct choice:
+// it returns a list of fs.DirEntry instead of fs.FileInfo,
+// and it returns partial results in the case of an error
+// midway through reading a directory.
+//
+// If you must continue obtaining a list of fs.FileInfo, you still can:
+//
+// entries, err := os.ReadDir(dirname)
+// if err != nil { ... }
+// infos := make([]fs.FileInfo, 0, len(entries))
+// for _, entry := range entries {
+// info, err := entry.Info()
+// if err != nil { ... }
+// infos = append(infos, info)
+// }
+func ReadDir(dirname string) ([]fs.FileInfo, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ list, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Slice(list, func(i, j int) bool { return list[i].Name() < list[j].Name() })
+ return list, nil
+}
+
+// NopCloser returns a ReadCloser with a no-op Close method wrapping
+// the provided Reader r.
+//
+// Deprecated: As of Go 1.16, this function simply calls io.NopCloser.
+func NopCloser(r io.Reader) io.ReadCloser {
+ return io.NopCloser(r)
+}
+
+// Discard is an io.Writer on which all Write calls succeed
+// without doing anything.
+//
+// Deprecated: As of Go 1.16, this value is simply io.Discard.
+var Discard io.Writer = io.Discard
diff --git a/contrib/go/_std_1.19/src/io/ioutil/tempfile.go b/contrib/go/_std_1.19/src/io/ioutil/tempfile.go
new file mode 100644
index 0000000000..0561ad5a27
--- /dev/null
+++ b/contrib/go/_std_1.19/src/io/ioutil/tempfile.go
@@ -0,0 +1,41 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ioutil
+
+import (
+ "os"
+)
+
+// TempFile creates a new temporary file in the directory dir,
+// opens the file for reading and writing, and returns the resulting *os.File.
+// The filename is generated by taking pattern and adding a random
+// string to the end. If pattern includes a "*", the random string
+// replaces the last "*".
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+//
+// Deprecated: As of Go 1.17, this function simply calls os.CreateTemp.
+func TempFile(dir, pattern string) (f *os.File, err error) {
+ return os.CreateTemp(dir, pattern)
+}
+
+// TempDir creates a new temporary directory in the directory dir.
+// The directory name is generated by taking pattern and applying a
+// random string to the end. If pattern includes a "*", the random string
+// replaces the last "*". TempDir returns the name of the new directory.
+// If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory. It is the caller's responsibility
+// to remove the directory when no longer needed.
+//
+// Deprecated: As of Go 1.17, this function simply calls os.MkdirTemp.
+func TempDir(dir, pattern string) (name string, err error) {
+ return os.MkdirTemp(dir, pattern)
+}
diff --git a/contrib/go/_std_1.19/src/io/multi.go b/contrib/go/_std_1.19/src/io/multi.go
new file mode 100644
index 0000000000..07a9afffda
--- /dev/null
+++ b/contrib/go/_std_1.19/src/io/multi.go
@@ -0,0 +1,137 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package io
+
+type eofReader struct{}
+
+func (eofReader) Read([]byte) (int, error) {
+ return 0, EOF
+}
+
+type multiReader struct {
+ readers []Reader
+}
+
+func (mr *multiReader) Read(p []byte) (n int, err error) {
+ for len(mr.readers) > 0 {
+ // Optimization to flatten nested multiReaders (Issue 13558).
+ if len(mr.readers) == 1 {
+ if r, ok := mr.readers[0].(*multiReader); ok {
+ mr.readers = r.readers
+ continue
+ }
+ }
+ n, err = mr.readers[0].Read(p)
+ if err == EOF {
+ // Use eofReader instead of nil to avoid nil panic
+ // after performing flatten (Issue 18232).
+ mr.readers[0] = eofReader{} // permit earlier GC
+ mr.readers = mr.readers[1:]
+ }
+ if n > 0 || err != EOF {
+ if err == EOF && len(mr.readers) > 0 {
+ // Don't return EOF yet. More readers remain.
+ err = nil
+ }
+ return
+ }
+ }
+ return 0, EOF
+}
+
+func (mr *multiReader) WriteTo(w Writer) (sum int64, err error) {
+ return mr.writeToWithBuffer(w, make([]byte, 1024*32))
+}
+
+func (mr *multiReader) writeToWithBuffer(w Writer, buf []byte) (sum int64, err error) {
+ for i, r := range mr.readers {
+ var n int64
+ if subMr, ok := r.(*multiReader); ok { // reuse buffer with nested multiReaders
+ n, err = subMr.writeToWithBuffer(w, buf)
+ } else {
+ n, err = copyBuffer(w, r, buf)
+ }
+ sum += n
+ if err != nil {
+ mr.readers = mr.readers[i:] // permit resume / retry after error
+ return sum, err
+ }
+ mr.readers[i] = nil // permit early GC
+ }
+ mr.readers = nil
+ return sum, nil
+}
+
+var _ WriterTo = (*multiReader)(nil)
+
+// MultiReader returns a Reader that's the logical concatenation of
+// the provided input readers. They're read sequentially. Once all
+// inputs have returned EOF, Read will return EOF. If any of the readers
+// return a non-nil, non-EOF error, Read will return that error.
+func MultiReader(readers ...Reader) Reader {
+ r := make([]Reader, len(readers))
+ copy(r, readers)
+ return &multiReader{r}
+}
+
+type multiWriter struct {
+ writers []Writer
+}
+
+func (t *multiWriter) Write(p []byte) (n int, err error) {
+ for _, w := range t.writers {
+ n, err = w.Write(p)
+ if err != nil {
+ return
+ }
+ if n != len(p) {
+ err = ErrShortWrite
+ return
+ }
+ }
+ return len(p), nil
+}
+
+var _ StringWriter = (*multiWriter)(nil)
+
+func (t *multiWriter) WriteString(s string) (n int, err error) {
+ var p []byte // lazily initialized if/when needed
+ for _, w := range t.writers {
+ if sw, ok := w.(StringWriter); ok {
+ n, err = sw.WriteString(s)
+ } else {
+ if p == nil {
+ p = []byte(s)
+ }
+ n, err = w.Write(p)
+ }
+ if err != nil {
+ return
+ }
+ if n != len(s) {
+ err = ErrShortWrite
+ return
+ }
+ }
+ return len(s), nil
+}
+
+// MultiWriter creates a writer that duplicates its writes to all the
+// provided writers, similar to the Unix tee(1) command.
+//
+// Each write is written to each listed writer, one at a time.
+// If a listed writer returns an error, that overall write operation
+// stops and returns the error; it does not continue down the list.
+func MultiWriter(writers ...Writer) Writer {
+ allWriters := make([]Writer, 0, len(writers))
+ for _, w := range writers {
+ if mw, ok := w.(*multiWriter); ok {
+ allWriters = append(allWriters, mw.writers...)
+ } else {
+ allWriters = append(allWriters, w)
+ }
+ }
+ return &multiWriter{allWriters}
+}
diff --git a/contrib/go/_std_1.18/src/io/pipe.go b/contrib/go/_std_1.19/src/io/pipe.go
index 2724e3f7ab..2724e3f7ab 100644
--- a/contrib/go/_std_1.18/src/io/pipe.go
+++ b/contrib/go/_std_1.19/src/io/pipe.go
diff --git a/contrib/go/_std_1.19/src/log/log.go b/contrib/go/_std_1.19/src/log/log.go
new file mode 100644
index 0000000000..f7e48d5599
--- /dev/null
+++ b/contrib/go/_std_1.19/src/log/log.go
@@ -0,0 +1,414 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package log implements a simple logging package. It defines a type, Logger,
+// with methods for formatting output. It also has a predefined 'standard'
+// Logger accessible through helper functions Print[f|ln], Fatal[f|ln], and
+// Panic[f|ln], which are easier to use than creating a Logger manually.
+// That logger writes to standard error and prints the date and time
+// of each logged message.
+// Every log message is output on a separate line: if the message being
+// printed does not end in a newline, the logger will add one.
+// The Fatal functions call os.Exit(1) after writing the log message.
+// The Panic functions call panic after writing the log message.
+package log
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// These flags define which text to prefix to each log entry generated by the Logger.
+// Bits are or'ed together to control what's printed.
+// With the exception of the Lmsgprefix flag, there is no
+// control over the order they appear (the order listed here)
+// or the format they present (as described in the comments).
+// The prefix is followed by a colon only when Llongfile or Lshortfile
+// is specified.
+// For example, flags Ldate | Ltime (or LstdFlags) produce,
+//
+// 2009/01/23 01:23:23 message
+//
+// while flags Ldate | Ltime | Lmicroseconds | Llongfile produce,
+//
+// 2009/01/23 01:23:23.123123 /a/b/c/d.go:23: message
+const (
+ Ldate = 1 << iota // the date in the local time zone: 2009/01/23
+ Ltime // the time in the local time zone: 01:23:23
+ Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
+ Llongfile // full file name and line number: /a/b/c/d.go:23
+ Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
+ LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
+ Lmsgprefix // move the "prefix" from the beginning of the line to before the message
+ LstdFlags = Ldate | Ltime // initial values for the standard logger
+)
+
+// A Logger represents an active logging object that generates lines of
+// output to an io.Writer. Each logging operation makes a single call to
+// the Writer's Write method. A Logger can be used simultaneously from
+// multiple goroutines; it guarantees to serialize access to the Writer.
+type Logger struct {
+ mu sync.Mutex // ensures atomic writes; protects the following fields
+ prefix string // prefix on each line to identify the logger (but see Lmsgprefix)
+ flag int // properties
+ out io.Writer // destination for output
+ buf []byte // for accumulating text to write
+ isDiscard int32 // atomic boolean: whether out == io.Discard
+}
+
+// New creates a new Logger. The out variable sets the
+// destination to which log data will be written.
+// The prefix appears at the beginning of each generated log line, or
+// after the log header if the Lmsgprefix flag is provided.
+// The flag argument defines the logging properties.
+func New(out io.Writer, prefix string, flag int) *Logger {
+ l := &Logger{out: out, prefix: prefix, flag: flag}
+ if out == io.Discard {
+ l.isDiscard = 1
+ }
+ return l
+}
+
+// SetOutput sets the output destination for the logger.
+func (l *Logger) SetOutput(w io.Writer) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.out = w
+ isDiscard := int32(0)
+ if w == io.Discard {
+ isDiscard = 1
+ }
+ atomic.StoreInt32(&l.isDiscard, isDiscard)
+}
+
+var std = New(os.Stderr, "", LstdFlags)
+
+// Default returns the standard logger used by the package-level output functions.
+func Default() *Logger { return std }
+
+// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
+func itoa(buf *[]byte, i int, wid int) {
+ // Assemble decimal in reverse order.
+ var b [20]byte
+ bp := len(b) - 1
+ for i >= 10 || wid > 1 {
+ wid--
+ q := i / 10
+ b[bp] = byte('0' + i - q*10)
+ bp--
+ i = q
+ }
+ // i < 10
+ b[bp] = byte('0' + i)
+ *buf = append(*buf, b[bp:]...)
+}
+
+// formatHeader writes log header to buf in following order:
+// - l.prefix (if it's not blank and Lmsgprefix is unset),
+// - date and/or time (if corresponding flags are provided),
+// - file and line number (if corresponding flags are provided),
+// - l.prefix (if it's not blank and Lmsgprefix is set).
+func (l *Logger) formatHeader(buf *[]byte, t time.Time, file string, line int) {
+ if l.flag&Lmsgprefix == 0 {
+ *buf = append(*buf, l.prefix...)
+ }
+ if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
+ if l.flag&LUTC != 0 {
+ t = t.UTC()
+ }
+ if l.flag&Ldate != 0 {
+ year, month, day := t.Date()
+ itoa(buf, year, 4)
+ *buf = append(*buf, '/')
+ itoa(buf, int(month), 2)
+ *buf = append(*buf, '/')
+ itoa(buf, day, 2)
+ *buf = append(*buf, ' ')
+ }
+ if l.flag&(Ltime|Lmicroseconds) != 0 {
+ hour, min, sec := t.Clock()
+ itoa(buf, hour, 2)
+ *buf = append(*buf, ':')
+ itoa(buf, min, 2)
+ *buf = append(*buf, ':')
+ itoa(buf, sec, 2)
+ if l.flag&Lmicroseconds != 0 {
+ *buf = append(*buf, '.')
+ itoa(buf, t.Nanosecond()/1e3, 6)
+ }
+ *buf = append(*buf, ' ')
+ }
+ }
+ if l.flag&(Lshortfile|Llongfile) != 0 {
+ if l.flag&Lshortfile != 0 {
+ short := file
+ for i := len(file) - 1; i > 0; i-- {
+ if file[i] == '/' {
+ short = file[i+1:]
+ break
+ }
+ }
+ file = short
+ }
+ *buf = append(*buf, file...)
+ *buf = append(*buf, ':')
+ itoa(buf, line, -1)
+ *buf = append(*buf, ": "...)
+ }
+ if l.flag&Lmsgprefix != 0 {
+ *buf = append(*buf, l.prefix...)
+ }
+}
+
+// Output writes the output for a logging event. The string s contains
+// the text to print after the prefix specified by the flags of the
+// Logger. A newline is appended if the last character of s is not
+// already a newline. Calldepth is used to recover the PC and is
+// provided for generality, although at the moment on all pre-defined
+// paths it will be 2.
+func (l *Logger) Output(calldepth int, s string) error {
+ now := time.Now() // get this early.
+ var file string
+ var line int
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.flag&(Lshortfile|Llongfile) != 0 {
+ // Release lock while getting caller info - it's expensive.
+ l.mu.Unlock()
+ var ok bool
+ _, file, line, ok = runtime.Caller(calldepth)
+ if !ok {
+ file = "???"
+ line = 0
+ }
+ l.mu.Lock()
+ }
+ l.buf = l.buf[:0]
+ l.formatHeader(&l.buf, now, file, line)
+ l.buf = append(l.buf, s...)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ l.buf = append(l.buf, '\n')
+ }
+ _, err := l.out.Write(l.buf)
+ return err
+}
+
+// Printf calls l.Output to print to the logger.
+// Arguments are handled in the manner of fmt.Printf.
+func (l *Logger) Printf(format string, v ...any) {
+ if atomic.LoadInt32(&l.isDiscard) != 0 {
+ return
+ }
+ l.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Print calls l.Output to print to the logger.
+// Arguments are handled in the manner of fmt.Print.
+func (l *Logger) Print(v ...any) {
+ if atomic.LoadInt32(&l.isDiscard) != 0 {
+ return
+ }
+ l.Output(2, fmt.Sprint(v...))
+}
+
+// Println calls l.Output to print to the logger.
+// Arguments are handled in the manner of fmt.Println.
+func (l *Logger) Println(v ...any) {
+ if atomic.LoadInt32(&l.isDiscard) != 0 {
+ return
+ }
+ l.Output(2, fmt.Sprintln(v...))
+}
+
+// Fatal is equivalent to l.Print() followed by a call to os.Exit(1).
+func (l *Logger) Fatal(v ...any) {
+ l.Output(2, fmt.Sprint(v...))
+ os.Exit(1)
+}
+
+// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
+func (l *Logger) Fatalf(format string, v ...any) {
+ l.Output(2, fmt.Sprintf(format, v...))
+ os.Exit(1)
+}
+
+// Fatalln is equivalent to l.Println() followed by a call to os.Exit(1).
+func (l *Logger) Fatalln(v ...any) {
+ l.Output(2, fmt.Sprintln(v...))
+ os.Exit(1)
+}
+
+// Panic is equivalent to l.Print() followed by a call to panic().
+func (l *Logger) Panic(v ...any) {
+ s := fmt.Sprint(v...)
+ l.Output(2, s)
+ panic(s)
+}
+
+// Panicf is equivalent to l.Printf() followed by a call to panic().
+func (l *Logger) Panicf(format string, v ...any) {
+ s := fmt.Sprintf(format, v...)
+ l.Output(2, s)
+ panic(s)
+}
+
+// Panicln is equivalent to l.Println() followed by a call to panic().
+func (l *Logger) Panicln(v ...any) {
+ s := fmt.Sprintln(v...)
+ l.Output(2, s)
+ panic(s)
+}
+
+// Flags returns the output flags for the logger.
+// The flag bits are Ldate, Ltime, and so on.
+func (l *Logger) Flags() int {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.flag
+}
+
+// SetFlags sets the output flags for the logger.
+// The flag bits are Ldate, Ltime, and so on.
+func (l *Logger) SetFlags(flag int) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.flag = flag
+}
+
+// Prefix returns the output prefix for the logger.
+func (l *Logger) Prefix() string {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.prefix
+}
+
+// SetPrefix sets the output prefix for the logger.
+func (l *Logger) SetPrefix(prefix string) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.prefix = prefix
+}
+
+// Writer returns the output destination for the logger.
+func (l *Logger) Writer() io.Writer {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.out
+}
+
+// SetOutput sets the output destination for the standard logger.
+func SetOutput(w io.Writer) {
+ std.SetOutput(w)
+}
+
+// Flags returns the output flags for the standard logger.
+// The flag bits are Ldate, Ltime, and so on.
+func Flags() int {
+ return std.Flags()
+}
+
+// SetFlags sets the output flags for the standard logger.
+// The flag bits are Ldate, Ltime, and so on.
+func SetFlags(flag int) {
+ std.SetFlags(flag)
+}
+
+// Prefix returns the output prefix for the standard logger.
+func Prefix() string {
+ return std.Prefix()
+}
+
+// SetPrefix sets the output prefix for the standard logger.
+func SetPrefix(prefix string) {
+ std.SetPrefix(prefix)
+}
+
+// Writer returns the output destination for the standard logger.
+func Writer() io.Writer {
+ return std.Writer()
+}
+
+// These functions write to the standard logger.
+
+// Print calls Output to print to the standard logger.
+// Arguments are handled in the manner of fmt.Print.
+func Print(v ...any) {
+ if atomic.LoadInt32(&std.isDiscard) != 0 {
+ return
+ }
+ std.Output(2, fmt.Sprint(v...))
+}
+
+// Printf calls Output to print to the standard logger.
+// Arguments are handled in the manner of fmt.Printf.
+func Printf(format string, v ...any) {
+ if atomic.LoadInt32(&std.isDiscard) != 0 {
+ return
+ }
+ std.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Println calls Output to print to the standard logger.
+// Arguments are handled in the manner of fmt.Println.
+func Println(v ...any) {
+ if atomic.LoadInt32(&std.isDiscard) != 0 {
+ return
+ }
+ std.Output(2, fmt.Sprintln(v...))
+}
+
+// Fatal is equivalent to Print() followed by a call to os.Exit(1).
+func Fatal(v ...any) {
+ std.Output(2, fmt.Sprint(v...))
+ os.Exit(1)
+}
+
+// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
+func Fatalf(format string, v ...any) {
+ std.Output(2, fmt.Sprintf(format, v...))
+ os.Exit(1)
+}
+
+// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
+func Fatalln(v ...any) {
+ std.Output(2, fmt.Sprintln(v...))
+ os.Exit(1)
+}
+
+// Panic is equivalent to Print() followed by a call to panic().
+func Panic(v ...any) {
+ s := fmt.Sprint(v...)
+ std.Output(2, s)
+ panic(s)
+}
+
+// Panicf is equivalent to Printf() followed by a call to panic().
+func Panicf(format string, v ...any) {
+ s := fmt.Sprintf(format, v...)
+ std.Output(2, s)
+ panic(s)
+}
+
+// Panicln is equivalent to Println() followed by a call to panic().
+func Panicln(v ...any) {
+ s := fmt.Sprintln(v...)
+ std.Output(2, s)
+ panic(s)
+}
+
+// Output writes the output for a logging event. The string s contains
+// the text to print after the prefix specified by the flags of the
+// Logger. A newline is appended if the last character of s is not
+// already a newline. Calldepth is the count of the number of
+// frames to skip when computing the file name and line number
+// if Llongfile or Lshortfile is set; a value of 1 will print the details
+// for the caller of Output.
+func Output(calldepth int, s string) error {
+ return std.Output(calldepth+1, s) // +1 for this frame.
+}
diff --git a/contrib/go/_std_1.19/src/math/abs.go b/contrib/go/_std_1.19/src/math/abs.go
new file mode 100644
index 0000000000..08be14548d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/abs.go
@@ -0,0 +1,15 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Abs returns the absolute value of x.
+//
+// Special cases are:
+//
+// Abs(±Inf) = +Inf
+// Abs(NaN) = NaN
+func Abs(x float64) float64 {
+ return Float64frombits(Float64bits(x) &^ (1 << 63))
+}
diff --git a/contrib/go/_std_1.19/src/math/acosh.go b/contrib/go/_std_1.19/src/math/acosh.go
new file mode 100644
index 0000000000..a85d003d3e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/acosh.go
@@ -0,0 +1,65 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/e_acosh.c
+// and came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+//
+// __ieee754_acosh(x)
+// Method :
+// Based on
+// acosh(x) = log [ x + sqrt(x*x-1) ]
+// we have
+// acosh(x) := log(x)+ln2, if x is large; else
+// acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
+// acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
+//
+// Special cases:
+// acosh(x) is NaN with signal if x<1.
+// acosh(NaN) is NaN without signal.
+//
+
+// Acosh returns the inverse hyperbolic cosine of x.
+//
+// Special cases are:
+//
+// Acosh(+Inf) = +Inf
+// Acosh(x) = NaN if x < 1
+// Acosh(NaN) = NaN
+func Acosh(x float64) float64 {
+ if haveArchAcosh {
+ return archAcosh(x)
+ }
+ return acosh(x)
+}
+
+func acosh(x float64) float64 {
+ const Large = 1 << 28 // 2**28
+ // first case is special case
+ switch {
+ case x < 1 || IsNaN(x):
+ return NaN()
+ case x == 1:
+ return 0
+ case x >= Large:
+ return Log(x) + Ln2 // x > 2**28
+ case x > 2:
+ return Log(2*x - 1/(x+Sqrt(x*x-1))) // 2**28 > x > 2
+ }
+ t := x - 1
+ return Log1p(t + Sqrt(2*t+t*t)) // 2 >= x > 1
+}
diff --git a/contrib/go/_std_1.19/src/math/asin.go b/contrib/go/_std_1.19/src/math/asin.go
new file mode 100644
index 0000000000..8e1b2ab491
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/asin.go
@@ -0,0 +1,67 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point arcsine and arccosine.
+
+ They are implemented by computing the arctangent
+ after appropriate range reduction.
+*/
+
+// Asin returns the arcsine, in radians, of x.
+//
+// Special cases are:
+//
+// Asin(±0) = ±0
+// Asin(x) = NaN if x < -1 or x > 1
+func Asin(x float64) float64 {
+ if haveArchAsin {
+ return archAsin(x)
+ }
+ return asin(x)
+}
+
+func asin(x float64) float64 {
+ if x == 0 {
+ return x // special case
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ if x > 1 {
+ return NaN() // special case
+ }
+
+ temp := Sqrt(1 - x*x)
+ if x > 0.7 {
+ temp = Pi/2 - satan(temp/x)
+ } else {
+ temp = satan(x / temp)
+ }
+
+ if sign {
+ temp = -temp
+ }
+ return temp
+}
+
+// Acos returns the arccosine, in radians, of x.
+//
+// Special case is:
+//
+// Acos(x) = NaN if x < -1 or x > 1
+func Acos(x float64) float64 {
+ if haveArchAcos {
+ return archAcos(x)
+ }
+ return acos(x)
+}
+
+func acos(x float64) float64 {
+ return Pi/2 - Asin(x)
+}
diff --git a/contrib/go/_std_1.19/src/math/asinh.go b/contrib/go/_std_1.19/src/math/asinh.go
new file mode 100644
index 0000000000..6f6e9e4608
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/asinh.go
@@ -0,0 +1,77 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/s_asinh.c
+// and came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+//
+// asinh(x)
+// Method :
+// Based on
+// asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
+// we have
+// asinh(x) := x if 1+x*x=1,
+// := sign(x)*(log(x)+ln2)) for large |x|, else
+// := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
+// := sign(x)*log1p(|x| + x**2/(1 + sqrt(1+x**2)))
+//
+
+// Asinh returns the inverse hyperbolic sine of x.
+//
+// Special cases are:
+//
+// Asinh(±0) = ±0
+// Asinh(±Inf) = ±Inf
+// Asinh(NaN) = NaN
+func Asinh(x float64) float64 {
+ if haveArchAsinh {
+ return archAsinh(x)
+ }
+ return asinh(x)
+}
+
+func asinh(x float64) float64 {
+ const (
+ Ln2 = 6.93147180559945286227e-01 // 0x3FE62E42FEFA39EF
+ NearZero = 1.0 / (1 << 28) // 2**-28
+ Large = 1 << 28 // 2**28
+ )
+ // special cases
+ if IsNaN(x) || IsInf(x, 0) {
+ return x
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ var temp float64
+ switch {
+ case x > Large:
+ temp = Log(x) + Ln2 // |x| > 2**28
+ case x > 2:
+ temp = Log(2*x + 1/(Sqrt(x*x+1)+x)) // 2**28 > |x| > 2.0
+ case x < NearZero:
+ temp = x // |x| < 2**-28
+ default:
+ temp = Log1p(x + x*x/(1+Sqrt(1+x*x))) // 2.0 > |x| > 2**-28
+ }
+ if sign {
+ temp = -temp
+ }
+ return temp
+}
diff --git a/contrib/go/_std_1.19/src/math/atan.go b/contrib/go/_std_1.19/src/math/atan.go
new file mode 100644
index 0000000000..e722e99757
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/atan.go
@@ -0,0 +1,111 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point arctangent.
+*/
+
+// The original C code, the long comment, and the constants below were
+// from http://netlib.sandia.gov/cephes/cmath/atan.c, available from
+// http://www.netlib.org/cephes/cmath.tgz.
+// The go code is a version of the original C.
+//
+// atan.c
+// Inverse circular tangent (arctangent)
+//
+// SYNOPSIS:
+// double x, y, atan();
+// y = atan( x );
+//
+// DESCRIPTION:
+// Returns radian angle between -pi/2 and +pi/2 whose tangent is x.
+//
+// Range reduction is from three intervals into the interval from zero to 0.66.
+// The approximant uses a rational function of degree 4/5 of the form
+// x + x**3 P(x)/Q(x).
+//
+// ACCURACY:
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -10, 10 50000 2.4e-17 8.3e-18
+// IEEE -10, 10 10^6 1.8e-16 5.0e-17
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// xatan evaluates a series valid in the range [0, 0.66].
+func xatan(x float64) float64 {
+ const (
+ P0 = -8.750608600031904122785e-01
+ P1 = -1.615753718733365076637e+01
+ P2 = -7.500855792314704667340e+01
+ P3 = -1.228866684490136173410e+02
+ P4 = -6.485021904942025371773e+01
+ Q0 = +2.485846490142306297962e+01
+ Q1 = +1.650270098316988542046e+02
+ Q2 = +4.328810604912902668951e+02
+ Q3 = +4.853903996359136964868e+02
+ Q4 = +1.945506571482613964425e+02
+ )
+ z := x * x
+ z = z * ((((P0*z+P1)*z+P2)*z+P3)*z + P4) / (((((z+Q0)*z+Q1)*z+Q2)*z+Q3)*z + Q4)
+ z = x*z + x
+ return z
+}
+
+// satan reduces its argument (known to be positive)
+// to the range [0, 0.66] and calls xatan.
+func satan(x float64) float64 {
+ const (
+ Morebits = 6.123233995736765886130e-17 // pi/2 = PIO2 + Morebits
+ Tan3pio8 = 2.41421356237309504880 // tan(3*pi/8)
+ )
+ if x <= 0.66 {
+ return xatan(x)
+ }
+ if x > Tan3pio8 {
+ return Pi/2 - xatan(1/x) + Morebits
+ }
+ return Pi/4 + xatan((x-1)/(x+1)) + 0.5*Morebits
+}
+
+// Atan returns the arctangent, in radians, of x.
+//
+// Special cases are:
+//
+// Atan(±0) = ±0
+// Atan(±Inf) = ±Pi/2
+func Atan(x float64) float64 {
+ if haveArchAtan {
+ return archAtan(x)
+ }
+ return atan(x)
+}
+
+func atan(x float64) float64 {
+ if x == 0 {
+ return x
+ }
+ if x > 0 {
+ return satan(x)
+ }
+ return -satan(-x)
+}
diff --git a/contrib/go/_std_1.19/src/math/atan2.go b/contrib/go/_std_1.19/src/math/atan2.go
new file mode 100644
index 0000000000..c324ed0a15
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/atan2.go
@@ -0,0 +1,77 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Atan2 returns the arc tangent of y/x, using
+// the signs of the two to determine the quadrant
+// of the return value.
+//
+// Special cases are (in order):
+//
+// Atan2(y, NaN) = NaN
+// Atan2(NaN, x) = NaN
+// Atan2(+0, x>=0) = +0
+// Atan2(-0, x>=0) = -0
+// Atan2(+0, x<=-0) = +Pi
+// Atan2(-0, x<=-0) = -Pi
+// Atan2(y>0, 0) = +Pi/2
+// Atan2(y<0, 0) = -Pi/2
+// Atan2(+Inf, +Inf) = +Pi/4
+// Atan2(-Inf, +Inf) = -Pi/4
+// Atan2(+Inf, -Inf) = 3Pi/4
+// Atan2(-Inf, -Inf) = -3Pi/4
+// Atan2(y, +Inf) = 0
+// Atan2(y>0, -Inf) = +Pi
+// Atan2(y<0, -Inf) = -Pi
+// Atan2(+Inf, x) = +Pi/2
+// Atan2(-Inf, x) = -Pi/2
+func Atan2(y, x float64) float64 {
+ if haveArchAtan2 {
+ return archAtan2(y, x)
+ }
+ return atan2(y, x)
+}
+
+func atan2(y, x float64) float64 {
+ // special cases
+ switch {
+ case IsNaN(y) || IsNaN(x):
+ return NaN()
+ case y == 0:
+ if x >= 0 && !Signbit(x) {
+ return Copysign(0, y)
+ }
+ return Copysign(Pi, y)
+ case x == 0:
+ return Copysign(Pi/2, y)
+ case IsInf(x, 0):
+ if IsInf(x, 1) {
+ switch {
+ case IsInf(y, 0):
+ return Copysign(Pi/4, y)
+ default:
+ return Copysign(0, y)
+ }
+ }
+ switch {
+ case IsInf(y, 0):
+ return Copysign(3*Pi/4, y)
+ default:
+ return Copysign(Pi, y)
+ }
+ case IsInf(y, 0):
+ return Copysign(Pi/2, y)
+ }
+
+ // Call atan and determine the quadrant.
+ q := Atan(y / x)
+ if x < 0 {
+ if q <= 0 {
+ return q + Pi
+ }
+ return q - Pi
+ }
+ return q
+}
diff --git a/contrib/go/_std_1.19/src/math/atanh.go b/contrib/go/_std_1.19/src/math/atanh.go
new file mode 100644
index 0000000000..9d594625a5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/atanh.go
@@ -0,0 +1,85 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/e_atanh.c
+// and came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+//
+// __ieee754_atanh(x)
+// Method :
+// 1. Reduce x to positive by atanh(-x) = -atanh(x)
+// 2. For x>=0.5
+// 1 2x x
+// atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------)
+// 2 1 - x 1 - x
+//
+// For x<0.5
+// atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
+//
+// Special cases:
+// atanh(x) is NaN if |x| > 1 with signal;
+// atanh(NaN) is that NaN with no signal;
+// atanh(+-1) is +-INF with signal.
+//
+
+// Atanh returns the inverse hyperbolic tangent of x.
+//
+// Special cases are:
+//
+// Atanh(1) = +Inf
+// Atanh(±0) = ±0
+// Atanh(-1) = -Inf
+// Atanh(x) = NaN if x < -1 or x > 1
+// Atanh(NaN) = NaN
+func Atanh(x float64) float64 {
+ if haveArchAtanh {
+ return archAtanh(x)
+ }
+ return atanh(x)
+}
+
+func atanh(x float64) float64 {
+ const NearZero = 1.0 / (1 << 28) // 2**-28
+ // special cases
+ switch {
+ case x < -1 || x > 1 || IsNaN(x):
+ return NaN()
+ case x == 1:
+ return Inf(1)
+ case x == -1:
+ return Inf(-1)
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ var temp float64
+ switch {
+ case x < NearZero:
+ temp = x
+ case x < 0.5:
+ temp = x + x
+ temp = 0.5 * Log1p(temp+temp*x/(1-x))
+ default:
+ temp = 0.5 * Log1p((x+x)/(1-x))
+ }
+ if sign {
+ temp = -temp
+ }
+ return temp
+}
diff --git a/contrib/go/_std_1.18/src/math/big/accuracy_string.go b/contrib/go/_std_1.19/src/math/big/accuracy_string.go
index 1501ace00d..1501ace00d 100644
--- a/contrib/go/_std_1.18/src/math/big/accuracy_string.go
+++ b/contrib/go/_std_1.19/src/math/big/accuracy_string.go
diff --git a/contrib/go/_std_1.19/src/math/big/arith.go b/contrib/go/_std_1.19/src/math/big/arith.go
new file mode 100644
index 0000000000..06e63e2574
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/arith.go
@@ -0,0 +1,277 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides Go implementations of elementary multi-precision
+// arithmetic operations on word vectors. These have the suffix _g.
+// These are needed for platforms without assembly implementations of these routines.
+// This file also contains elementary operations that can be implemented
+// sufficiently efficiently in Go.
+
+package big
+
+import "math/bits"
+
+// A Word represents a single digit of a multi-precision unsigned integer.
+type Word uint
+
+const (
+ _S = _W / 8 // word size in bytes
+
+ _W = bits.UintSize // word size in bits
+ _B = 1 << _W // digit base
+ _M = _B - 1 // digit mask
+)
+
+// Many of the loops in this file are of the form
+// for i := 0; i < len(z) && i < len(x) && i < len(y); i++
+// i < len(z) is the real condition.
+// However, checking i < len(x) && i < len(y) as well is faster than
+// having the compiler do a bounds check in the body of the loop;
+// remarkably it is even faster than hoisting the bounds check
+// out of the loop, by doing something like
+// _, _ = x[len(z)-1], y[len(z)-1]
+// There are other ways to hoist the bounds check out of the loop,
+// but the compiler's BCE isn't powerful enough for them (yet?).
+// See the discussion in CL 164966.
+
+// ----------------------------------------------------------------------------
+// Elementary operations on words
+//
+// These operations are used by the vector operations below.
+
+// z1<<_W + z0 = x*y
+func mulWW(x, y Word) (z1, z0 Word) {
+ hi, lo := bits.Mul(uint(x), uint(y))
+ return Word(hi), Word(lo)
+}
+
+// z1<<_W + z0 = x*y + c
+func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
+ hi, lo := bits.Mul(uint(x), uint(y))
+ var cc uint
+ lo, cc = bits.Add(lo, uint(c), 0)
+ return Word(hi + cc), Word(lo)
+}
+
+// nlz returns the number of leading zeros in x.
+// Wraps bits.LeadingZeros call for convenience.
+func nlz(x Word) uint {
+ return uint(bits.LeadingZeros(uint(x)))
+}
+
+// The resulting carry c is either 0 or 1.
+func addVV_g(z, x, y []Word) (c Word) {
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x) && i < len(y); i++ {
+ zi, cc := bits.Add(uint(x[i]), uint(y[i]), uint(c))
+ z[i] = Word(zi)
+ c = Word(cc)
+ }
+ return
+}
+
+// The resulting carry c is either 0 or 1.
+func subVV_g(z, x, y []Word) (c Word) {
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x) && i < len(y); i++ {
+ zi, cc := bits.Sub(uint(x[i]), uint(y[i]), uint(c))
+ z[i] = Word(zi)
+ c = Word(cc)
+ }
+ return
+}
+
+// The resulting carry c is either 0 or 1.
+func addVW_g(z, x []Word, y Word) (c Word) {
+ c = y
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x); i++ {
+ zi, cc := bits.Add(uint(x[i]), uint(c), 0)
+ z[i] = Word(zi)
+ c = Word(cc)
+ }
+ return
+}
+
+// addVWlarge is addVW, but intended for large z.
+// The only difference is that we check on every iteration
+// whether we are done with carries,
+// and if so, switch to a much faster copy instead.
+// This is only a good idea for large z,
+// because the overhead of the check and the function call
+// outweigh the benefits when z is small.
+func addVWlarge(z, x []Word, y Word) (c Word) {
+ c = y
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x); i++ {
+ if c == 0 {
+ copy(z[i:], x[i:])
+ return
+ }
+ zi, cc := bits.Add(uint(x[i]), uint(c), 0)
+ z[i] = Word(zi)
+ c = Word(cc)
+ }
+ return
+}
+
+func subVW_g(z, x []Word, y Word) (c Word) {
+ c = y
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x); i++ {
+ zi, cc := bits.Sub(uint(x[i]), uint(c), 0)
+ z[i] = Word(zi)
+ c = Word(cc)
+ }
+ return
+}
+
+// subVWlarge is to subVW as addVWlarge is to addVW.
+func subVWlarge(z, x []Word, y Word) (c Word) {
+ c = y
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x); i++ {
+ if c == 0 {
+ copy(z[i:], x[i:])
+ return
+ }
+ zi, cc := bits.Sub(uint(x[i]), uint(c), 0)
+ z[i] = Word(zi)
+ c = Word(cc)
+ }
+ return
+}
+
+func shlVU_g(z, x []Word, s uint) (c Word) {
+ if s == 0 {
+ copy(z, x)
+ return
+ }
+ if len(z) == 0 {
+ return
+ }
+ s &= _W - 1 // hint to the compiler that shifts by s don't need guard code
+ ŝ := _W - s
+ ŝ &= _W - 1 // ditto
+ c = x[len(z)-1] >> ŝ
+ for i := len(z) - 1; i > 0; i-- {
+ z[i] = x[i]<<s | x[i-1]>>ŝ
+ }
+ z[0] = x[0] << s
+ return
+}
+
+func shrVU_g(z, x []Word, s uint) (c Word) {
+ if s == 0 {
+ copy(z, x)
+ return
+ }
+ if len(z) == 0 {
+ return
+ }
+ if len(x) != len(z) {
+ // This is an invariant guaranteed by the caller.
+ panic("len(x) != len(z)")
+ }
+ s &= _W - 1 // hint to the compiler that shifts by s don't need guard code
+ ŝ := _W - s
+ ŝ &= _W - 1 // ditto
+ c = x[0] << ŝ
+ for i := 1; i < len(z); i++ {
+ z[i-1] = x[i-1]>>s | x[i]<<ŝ
+ }
+ z[len(z)-1] = x[len(z)-1] >> s
+ return
+}
+
+func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
+ c = r
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x); i++ {
+ c, z[i] = mulAddWWW_g(x[i], y, c)
+ }
+ return
+}
+
+func addMulVVW_g(z, x []Word, y Word) (c Word) {
+ // The comment near the top of this file discusses this for loop condition.
+ for i := 0; i < len(z) && i < len(x); i++ {
+ z1, z0 := mulAddWWW_g(x[i], y, z[i])
+ lo, cc := bits.Add(uint(z0), uint(c), 0)
+ c, z[i] = Word(cc), Word(lo)
+ c += z1
+ }
+ return
+}
+
+// q = ( x1 << _W + x0 - r)/y. m = floor(( _B^2 - 1 ) / d - _B). Requiring x1<y.
+// An approximate reciprocal with a reference to "Improved Division by Invariant Integers
+// (IEEE Transactions on Computers, 11 Jun. 2010)"
+func divWW(x1, x0, y, m Word) (q, r Word) {
+ s := nlz(y)
+ if s != 0 {
+ x1 = x1<<s | x0>>(_W-s)
+ x0 <<= s
+ y <<= s
+ }
+ d := uint(y)
+ // We know that
+ // m = ⎣(B^2-1)/d⎦-B
+ // ⎣(B^2-1)/d⎦ = m+B
+ // (B^2-1)/d = m+B+delta1 0 <= delta1 <= (d-1)/d
+ // B^2/d = m+B+delta2 0 <= delta2 <= 1
+ // The quotient we're trying to compute is
+ // quotient = ⎣(x1*B+x0)/d⎦
+ // = ⎣(x1*B*(B^2/d)+x0*(B^2/d))/B^2⎦
+ // = ⎣(x1*B*(m+B+delta2)+x0*(m+B+delta2))/B^2⎦
+ // = ⎣(x1*m+x1*B+x0)/B + x0*m/B^2 + delta2*(x1*B+x0)/B^2⎦
+ // The latter two terms of this three-term sum are between 0 and 1.
+ // So we can compute just the first term, and we will be low by at most 2.
+ t1, t0 := bits.Mul(uint(m), uint(x1))
+ _, c := bits.Add(t0, uint(x0), 0)
+ t1, _ = bits.Add(t1, uint(x1), c)
+ // The quotient is either t1, t1+1, or t1+2.
+ // We'll try t1 and adjust if needed.
+ qq := t1
+ // compute remainder r=x-d*q.
+ dq1, dq0 := bits.Mul(d, qq)
+ r0, b := bits.Sub(uint(x0), dq0, 0)
+ r1, _ := bits.Sub(uint(x1), dq1, b)
+ // The remainder we just computed is bounded above by B+d:
+ // r = x1*B + x0 - d*q.
+ // = x1*B + x0 - d*⎣(x1*m+x1*B+x0)/B⎦
+ // = x1*B + x0 - d*((x1*m+x1*B+x0)/B-alpha) 0 <= alpha < 1
+ // = x1*B + x0 - x1*d/B*m - x1*d - x0*d/B + d*alpha
+ // = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha
+ // = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha
+ // = x1*B + x0 - x1*d/B*((B^2-1)/d-B-beta) - x1*d - x0*d/B + d*alpha 0 <= beta < 1
+ // = x1*B + x0 - x1*B + x1/B + x1*d + x1*d/B*beta - x1*d - x0*d/B + d*alpha
+ // = x0 + x1/B + x1*d/B*beta - x0*d/B + d*alpha
+ // = x0*(1-d/B) + x1*(1+d*beta)/B + d*alpha
+ // < B*(1-d/B) + d*B/B + d because x0<B (and 1-d/B>0), x1<d, 1+d*beta<=B, alpha<1
+ // = B - d + d + d
+ // = B+d
+ // So r1 can only be 0 or 1. If r1 is 1, then we know q was too small.
+ // Add 1 to q and subtract d from r. That guarantees that r is <B, so
+ // we no longer need to keep track of r1.
+ if r1 != 0 {
+ qq++
+ r0 -= d
+ }
+ // If the remainder is still too large, increment q one more time.
+ if r0 >= d {
+ qq++
+ r0 -= d
+ }
+ return Word(qq), Word(r0 >> s)
+}
+
+// reciprocalWord return the reciprocal of the divisor. rec = floor(( _B^2 - 1 ) / u - _B). u = d1 << nlz(d1).
+func reciprocalWord(d1 Word) Word {
+ u := uint(d1 << nlz(d1))
+ x1 := ^u
+ x0 := uint(_M)
+ rec, _ := bits.Div(x1, x0, u) // (_B^2-1)/U-_B = (_B*(_M-C)+_M)/U
+ return Word(rec)
+}
diff --git a/contrib/go/_std_1.18/src/math/big/arith_amd64.go b/contrib/go/_std_1.19/src/math/big/arith_amd64.go
index 89108fe149..89108fe149 100644
--- a/contrib/go/_std_1.18/src/math/big/arith_amd64.go
+++ b/contrib/go/_std_1.19/src/math/big/arith_amd64.go
diff --git a/contrib/go/_std_1.19/src/math/big/arith_amd64.s b/contrib/go/_std_1.19/src/math/big/arith_amd64.s
new file mode 100644
index 0000000000..b1e914c2bd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/arith_amd64.s
@@ -0,0 +1,516 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !math_big_pure_go
+// +build !math_big_pure_go
+
+#include "textflag.h"
+
+// This file provides fast assembly versions for the elementary
+// arithmetic operations on vectors implemented in arith.go.
+
+// The carry bit is saved with SBBQ Rx, Rx: if the carry was set, Rx is -1, otherwise it is 0.
+// It is restored with ADDQ Rx, Rx: if Rx was -1 the carry is set, otherwise it is cleared.
+// This is faster than using rotate instructions.
+
+// func addVV(z, x, y []Word) (c Word)
+TEXT ·addVV(SB),NOSPLIT,$0
+ MOVQ z_len+8(FP), DI
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), R9
+ MOVQ z+0(FP), R10
+
+ MOVQ $0, CX // c = 0
+ MOVQ $0, SI // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUBQ $4, DI // n -= 4
+ JL V1 // if n < 0 goto V1
+
+U1: // n >= 0
+ // regular loop body unrolled 4x
+ ADDQ CX, CX // restore CF
+ MOVQ 0(R8)(SI*8), R11
+ MOVQ 8(R8)(SI*8), R12
+ MOVQ 16(R8)(SI*8), R13
+ MOVQ 24(R8)(SI*8), R14
+ ADCQ 0(R9)(SI*8), R11
+ ADCQ 8(R9)(SI*8), R12
+ ADCQ 16(R9)(SI*8), R13
+ ADCQ 24(R9)(SI*8), R14
+ MOVQ R11, 0(R10)(SI*8)
+ MOVQ R12, 8(R10)(SI*8)
+ MOVQ R13, 16(R10)(SI*8)
+ MOVQ R14, 24(R10)(SI*8)
+ SBBQ CX, CX // save CF
+
+ ADDQ $4, SI // i += 4
+ SUBQ $4, DI // n -= 4
+ JGE U1 // if n >= 0 goto U1
+
+V1: ADDQ $4, DI // n += 4
+ JLE E1 // if n <= 0 goto E1
+
+L1: // n > 0
+ ADDQ CX, CX // restore CF
+ MOVQ 0(R8)(SI*8), R11
+ ADCQ 0(R9)(SI*8), R11
+ MOVQ R11, 0(R10)(SI*8)
+ SBBQ CX, CX // save CF
+
+ ADDQ $1, SI // i++
+ SUBQ $1, DI // n--
+ JG L1 // if n > 0 goto L1
+
+E1: NEGQ CX
+ MOVQ CX, c+72(FP) // return c
+ RET
+
+
+// func subVV(z, x, y []Word) (c Word)
+// (same as addVV except for SBBQ instead of ADCQ and label names)
+TEXT ·subVV(SB),NOSPLIT,$0
+ MOVQ z_len+8(FP), DI
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), R9
+ MOVQ z+0(FP), R10
+
+ MOVQ $0, CX // c = 0
+ MOVQ $0, SI // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUBQ $4, DI // n -= 4
+ JL V2 // if n < 0 goto V2
+
+U2: // n >= 0
+ // regular loop body unrolled 4x
+ ADDQ CX, CX // restore CF
+ MOVQ 0(R8)(SI*8), R11
+ MOVQ 8(R8)(SI*8), R12
+ MOVQ 16(R8)(SI*8), R13
+ MOVQ 24(R8)(SI*8), R14
+ SBBQ 0(R9)(SI*8), R11
+ SBBQ 8(R9)(SI*8), R12
+ SBBQ 16(R9)(SI*8), R13
+ SBBQ 24(R9)(SI*8), R14
+ MOVQ R11, 0(R10)(SI*8)
+ MOVQ R12, 8(R10)(SI*8)
+ MOVQ R13, 16(R10)(SI*8)
+ MOVQ R14, 24(R10)(SI*8)
+ SBBQ CX, CX // save CF
+
+ ADDQ $4, SI // i += 4
+ SUBQ $4, DI // n -= 4
+ JGE U2 // if n >= 0 goto U2
+
+V2: ADDQ $4, DI // n += 4
+ JLE E2 // if n <= 0 goto E2
+
+L2: // n > 0
+ ADDQ CX, CX // restore CF
+ MOVQ 0(R8)(SI*8), R11
+ SBBQ 0(R9)(SI*8), R11
+ MOVQ R11, 0(R10)(SI*8)
+ SBBQ CX, CX // save CF
+
+ ADDQ $1, SI // i++
+ SUBQ $1, DI // n--
+ JG L2 // if n > 0 goto L2
+
+E2: NEGQ CX
+ MOVQ CX, c+72(FP) // return c
+ RET
+
+
+// func addVW(z, x []Word, y Word) (c Word)
+TEXT ·addVW(SB),NOSPLIT,$0
+ MOVQ z_len+8(FP), DI
+ CMPQ DI, $32
+ JG large
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), CX // c = y
+ MOVQ z+0(FP), R10
+
+ MOVQ $0, SI // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUBQ $4, DI // n -= 4
+ JL V3 // if n < 4 goto V3
+
+U3: // n >= 0
+ // regular loop body unrolled 4x
+ MOVQ 0(R8)(SI*8), R11
+ MOVQ 8(R8)(SI*8), R12
+ MOVQ 16(R8)(SI*8), R13
+ MOVQ 24(R8)(SI*8), R14
+ ADDQ CX, R11
+ ADCQ $0, R12
+ ADCQ $0, R13
+ ADCQ $0, R14
+ SBBQ CX, CX // save CF
+ NEGQ CX
+ MOVQ R11, 0(R10)(SI*8)
+ MOVQ R12, 8(R10)(SI*8)
+ MOVQ R13, 16(R10)(SI*8)
+ MOVQ R14, 24(R10)(SI*8)
+
+ ADDQ $4, SI // i += 4
+ SUBQ $4, DI // n -= 4
+ JGE U3 // if n >= 0 goto U3
+
+V3: ADDQ $4, DI // n += 4
+ JLE E3 // if n <= 0 goto E3
+
+L3: // n > 0
+ ADDQ 0(R8)(SI*8), CX
+ MOVQ CX, 0(R10)(SI*8)
+ SBBQ CX, CX // save CF
+ NEGQ CX
+
+ ADDQ $1, SI // i++
+ SUBQ $1, DI // n--
+ JG L3 // if n > 0 goto L3
+
+E3: MOVQ CX, c+56(FP) // return c
+ RET
+large:
+ JMP ·addVWlarge(SB)
+
+
+// func subVW(z, x []Word, y Word) (c Word)
+// (same as addVW except for SUBQ/SBBQ instead of ADDQ/ADCQ and label names)
+TEXT ·subVW(SB),NOSPLIT,$0
+ MOVQ z_len+8(FP), DI
+ CMPQ DI, $32
+ JG large
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), CX // c = y
+ MOVQ z+0(FP), R10
+
+ MOVQ $0, SI // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUBQ $4, DI // n -= 4
+ JL V4 // if n < 4 goto V4
+
+U4: // n >= 0
+ // regular loop body unrolled 4x
+ MOVQ 0(R8)(SI*8), R11
+ MOVQ 8(R8)(SI*8), R12
+ MOVQ 16(R8)(SI*8), R13
+ MOVQ 24(R8)(SI*8), R14
+ SUBQ CX, R11
+ SBBQ $0, R12
+ SBBQ $0, R13
+ SBBQ $0, R14
+ SBBQ CX, CX // save CF
+ NEGQ CX
+ MOVQ R11, 0(R10)(SI*8)
+ MOVQ R12, 8(R10)(SI*8)
+ MOVQ R13, 16(R10)(SI*8)
+ MOVQ R14, 24(R10)(SI*8)
+
+ ADDQ $4, SI // i += 4
+ SUBQ $4, DI // n -= 4
+ JGE U4 // if n >= 0 goto U4
+
+V4: ADDQ $4, DI // n += 4
+ JLE E4 // if n <= 0 goto E4
+
+L4: // n > 0
+ MOVQ 0(R8)(SI*8), R11
+ SUBQ CX, R11
+ MOVQ R11, 0(R10)(SI*8)
+ SBBQ CX, CX // save CF
+ NEGQ CX
+
+ ADDQ $1, SI // i++
+ SUBQ $1, DI // n--
+ JG L4 // if n > 0 goto L4
+
+E4: MOVQ CX, c+56(FP) // return c
+ RET
+large:
+ JMP ·subVWlarge(SB)
+
+
+// func shlVU(z, x []Word, s uint) (c Word)
+TEXT ·shlVU(SB),NOSPLIT,$0
+ MOVQ z_len+8(FP), BX // i = z
+ SUBQ $1, BX // i--
+ JL X8b // i < 0 (n <= 0)
+
+ // n > 0
+ MOVQ z+0(FP), R10
+ MOVQ x+24(FP), R8
+ MOVQ s+48(FP), CX
+ MOVQ (R8)(BX*8), AX // w1 = x[n-1]
+ MOVQ $0, DX
+ SHLQ CX, AX, DX // w1>>ŝ
+ MOVQ DX, c+56(FP)
+
+ CMPQ BX, $0
+ JLE X8a // i <= 0
+
+ // i > 0
+L8: MOVQ AX, DX // w = w1
+ MOVQ -8(R8)(BX*8), AX // w1 = x[i-1]
+ SHLQ CX, AX, DX // w<<s | w1>>ŝ
+ MOVQ DX, (R10)(BX*8) // z[i] = w<<s | w1>>ŝ
+ SUBQ $1, BX // i--
+ JG L8 // i > 0
+
+ // i <= 0
+X8a: SHLQ CX, AX // w1<<s
+ MOVQ AX, (R10) // z[0] = w1<<s
+ RET
+
+X8b: MOVQ $0, c+56(FP)
+ RET
+
+
+// func shrVU(z, x []Word, s uint) (c Word)
+TEXT ·shrVU(SB),NOSPLIT,$0
+ MOVQ z_len+8(FP), R11
+ SUBQ $1, R11 // n--
+ JL X9b // n < 0 (n <= 0)
+
+ // n > 0
+ MOVQ z+0(FP), R10
+ MOVQ x+24(FP), R8
+ MOVQ s+48(FP), CX
+ MOVQ (R8), AX // w1 = x[0]
+ MOVQ $0, DX
+ SHRQ CX, AX, DX // w1<<ŝ
+ MOVQ DX, c+56(FP)
+
+ MOVQ $0, BX // i = 0
+ JMP E9
+
+ // i < n-1
+L9: MOVQ AX, DX // w = w1
+ MOVQ 8(R8)(BX*8), AX // w1 = x[i+1]
+ SHRQ CX, AX, DX // w>>s | w1<<ŝ
+ MOVQ DX, (R10)(BX*8) // z[i] = w>>s | w1<<ŝ
+ ADDQ $1, BX // i++
+
+E9: CMPQ BX, R11
+ JL L9 // i < n-1
+
+ // i >= n-1
+X9a: SHRQ CX, AX // w1>>s
+ MOVQ AX, (R10)(R11*8) // z[n-1] = w1>>s
+ RET
+
+X9b: MOVQ $0, c+56(FP)
+ RET
+
+
+// func mulAddVWW(z, x []Word, y, r Word) (c Word)
+TEXT ·mulAddVWW(SB),NOSPLIT,$0
+ MOVQ z+0(FP), R10
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), R9
+ MOVQ r+56(FP), CX // c = r
+ MOVQ z_len+8(FP), R11
+ MOVQ $0, BX // i = 0
+
+ CMPQ R11, $4
+ JL E5
+
+U5: // i+4 <= n
+ // regular loop body unrolled 4x
+ MOVQ (0*8)(R8)(BX*8), AX
+ MULQ R9
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ AX, (0*8)(R10)(BX*8)
+ MOVQ DX, CX
+ MOVQ (1*8)(R8)(BX*8), AX
+ MULQ R9
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ AX, (1*8)(R10)(BX*8)
+ MOVQ DX, CX
+ MOVQ (2*8)(R8)(BX*8), AX
+ MULQ R9
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ AX, (2*8)(R10)(BX*8)
+ MOVQ DX, CX
+ MOVQ (3*8)(R8)(BX*8), AX
+ MULQ R9
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ AX, (3*8)(R10)(BX*8)
+ MOVQ DX, CX
+ ADDQ $4, BX // i += 4
+
+ LEAQ 4(BX), DX
+ CMPQ DX, R11
+ JLE U5
+ JMP E5
+
+L5: MOVQ (R8)(BX*8), AX
+ MULQ R9
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ AX, (R10)(BX*8)
+ MOVQ DX, CX
+ ADDQ $1, BX // i++
+
+E5: CMPQ BX, R11 // i < n
+ JL L5
+
+ MOVQ CX, c+64(FP)
+ RET
+
+
+// func addMulVVW(z, x []Word, y Word) (c Word)
+TEXT ·addMulVVW(SB),NOSPLIT,$0
+ CMPB ·support_adx(SB), $1
+ JEQ adx
+ MOVQ z+0(FP), R10
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), R9
+ MOVQ z_len+8(FP), R11
+ MOVQ $0, BX // i = 0
+ MOVQ $0, CX // c = 0
+ MOVQ R11, R12
+ ANDQ $-2, R12
+ CMPQ R11, $2
+ JAE A6
+ JMP E6
+
+A6:
+ MOVQ (R8)(BX*8), AX
+ MULQ R9
+ ADDQ (R10)(BX*8), AX
+ ADCQ $0, DX
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ DX, CX
+ MOVQ AX, (R10)(BX*8)
+
+ MOVQ (8)(R8)(BX*8), AX
+ MULQ R9
+ ADDQ (8)(R10)(BX*8), AX
+ ADCQ $0, DX
+ ADDQ CX, AX
+ ADCQ $0, DX
+ MOVQ DX, CX
+ MOVQ AX, (8)(R10)(BX*8)
+
+ ADDQ $2, BX
+ CMPQ BX, R12
+ JL A6
+ JMP E6
+
+L6: MOVQ (R8)(BX*8), AX
+ MULQ R9
+ ADDQ CX, AX
+ ADCQ $0, DX
+ ADDQ AX, (R10)(BX*8)
+ ADCQ $0, DX
+ MOVQ DX, CX
+ ADDQ $1, BX // i++
+
+E6: CMPQ BX, R11 // i < n
+ JL L6
+
+ MOVQ CX, c+56(FP)
+ RET
+
+adx:
+ MOVQ z_len+8(FP), R11
+ MOVQ z+0(FP), R10
+ MOVQ x+24(FP), R8
+ MOVQ y+48(FP), DX
+ MOVQ $0, BX // i = 0
+ MOVQ $0, CX // carry
+ CMPQ R11, $8
+ JAE adx_loop_header
+ CMPQ BX, R11
+ JL adx_short
+ MOVQ CX, c+56(FP)
+ RET
+
+adx_loop_header:
+ MOVQ R11, R13
+ ANDQ $-8, R13
+adx_loop:
+ XORQ R9, R9 // unset flags
+ MULXQ (R8), SI, DI
+ ADCXQ CX,SI
+ ADOXQ (R10), SI
+ MOVQ SI,(R10)
+
+ MULXQ 8(R8), AX, CX
+ ADCXQ DI, AX
+ ADOXQ 8(R10), AX
+ MOVQ AX, 8(R10)
+
+ MULXQ 16(R8), SI, DI
+ ADCXQ CX, SI
+ ADOXQ 16(R10), SI
+ MOVQ SI, 16(R10)
+
+ MULXQ 24(R8), AX, CX
+ ADCXQ DI, AX
+ ADOXQ 24(R10), AX
+ MOVQ AX, 24(R10)
+
+ MULXQ 32(R8), SI, DI
+ ADCXQ CX, SI
+ ADOXQ 32(R10), SI
+ MOVQ SI, 32(R10)
+
+ MULXQ 40(R8), AX, CX
+ ADCXQ DI, AX
+ ADOXQ 40(R10), AX
+ MOVQ AX, 40(R10)
+
+ MULXQ 48(R8), SI, DI
+ ADCXQ CX, SI
+ ADOXQ 48(R10), SI
+ MOVQ SI, 48(R10)
+
+ MULXQ 56(R8), AX, CX
+ ADCXQ DI, AX
+ ADOXQ 56(R10), AX
+ MOVQ AX, 56(R10)
+
+ ADCXQ R9, CX
+ ADOXQ R9, CX
+
+ ADDQ $64, R8
+ ADDQ $64, R10
+ ADDQ $8, BX
+
+ CMPQ BX, R13
+ JL adx_loop
+ MOVQ z+0(FP), R10
+ MOVQ x+24(FP), R8
+ CMPQ BX, R11
+ JL adx_short
+ MOVQ CX, c+56(FP)
+ RET
+
+adx_short:
+ MULXQ (R8)(BX*8), SI, DI
+ ADDQ CX, SI
+ ADCQ $0, DI
+ ADDQ SI, (R10)(BX*8)
+ ADCQ $0, DI
+ MOVQ DI, CX
+ ADDQ $1, BX // i++
+
+ CMPQ BX, R11
+ JL adx_short
+
+ MOVQ CX, c+56(FP)
+ RET
+
+
+
diff --git a/contrib/go/_std_1.19/src/math/big/arith_decl.go b/contrib/go/_std_1.19/src/math/big/arith_decl.go
new file mode 100644
index 0000000000..301aa55f1a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/arith_decl.go
@@ -0,0 +1,18 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !math_big_pure_go
+// +build !math_big_pure_go
+
+package big
+
+// implemented in arith_$GOARCH.s
+func addVV(z, x, y []Word) (c Word)
+func subVV(z, x, y []Word) (c Word)
+func addVW(z, x []Word, y Word) (c Word)
+func subVW(z, x []Word, y Word) (c Word)
+func shlVU(z, x []Word, s uint) (c Word)
+func shrVU(z, x []Word, s uint) (c Word)
+func mulAddVWW(z, x []Word, y, r Word) (c Word)
+func addMulVVW(z, x []Word, y Word) (c Word)
diff --git a/contrib/go/_std_1.18/src/math/big/decimal.go b/contrib/go/_std_1.19/src/math/big/decimal.go
index 716f03bfa4..716f03bfa4 100644
--- a/contrib/go/_std_1.18/src/math/big/decimal.go
+++ b/contrib/go/_std_1.19/src/math/big/decimal.go
diff --git a/contrib/go/_std_1.18/src/math/big/doc.go b/contrib/go/_std_1.19/src/math/big/doc.go
index 65ed019b74..65ed019b74 100644
--- a/contrib/go/_std_1.18/src/math/big/doc.go
+++ b/contrib/go/_std_1.19/src/math/big/doc.go
diff --git a/contrib/go/_std_1.19/src/math/big/float.go b/contrib/go/_std_1.19/src/math/big/float.go
new file mode 100644
index 0000000000..84666d817b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/float.go
@@ -0,0 +1,1729 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements multi-precision floating-point numbers.
+// Like in the GNU MPFR library (https://www.mpfr.org/), operands
+// can be of mixed precision. Unlike MPFR, the rounding mode is
+// not specified with each operation, but with each operand. The
+// rounding mode of the result operand determines the rounding
+// mode of an operation. This is a from-scratch implementation.
+
+package big
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+)
+
+const debugFloat = false // enable for debugging
+
+// A nonzero finite Float represents a multi-precision floating point number
+//
+// sign × mantissa × 2**exponent
+//
+// with 0.5 <= mantissa < 1.0, and MinExp <= exponent <= MaxExp.
+// A Float may also be zero (+0, -0) or infinite (+Inf, -Inf).
+// All Floats are ordered, and the ordering of two Floats x and y
+// is defined by x.Cmp(y).
+//
+// Each Float value also has a precision, rounding mode, and accuracy.
+// The precision is the maximum number of mantissa bits available to
+// represent the value. The rounding mode specifies how a result should
+// be rounded to fit into the mantissa bits, and accuracy describes the
+// rounding error with respect to the exact result.
+//
+// Unless specified otherwise, all operations (including setters) that
+// specify a *Float variable for the result (usually via the receiver
+// with the exception of MantExp), round the numeric result according
+// to the precision and rounding mode of the result variable.
+//
+// If the provided result precision is 0 (see below), it is set to the
+// precision of the argument with the largest precision value before any
+// rounding takes place, and the rounding mode remains unchanged. Thus,
+// uninitialized Floats provided as result arguments will have their
+// precision set to a reasonable value determined by the operands, and
+// their mode is the zero value for RoundingMode (ToNearestEven).
+//
+// By setting the desired precision to 24 or 53 and using matching rounding
+// mode (typically ToNearestEven), Float operations produce the same results
+// as the corresponding float32 or float64 IEEE-754 arithmetic for operands
+// that correspond to normal (i.e., not denormal) float32 or float64 numbers.
+// Exponent underflow and overflow lead to a 0 or an Infinity for different
+// values than IEEE-754 because Float exponents have a much larger range.
+//
+// The zero (uninitialized) value for a Float is ready to use and represents
+// the number +0.0 exactly, with precision 0 and rounding mode ToNearestEven.
+//
+// Operations always take pointer arguments (*Float) rather
+// than Float values, and each unique Float value requires
+// its own unique *Float pointer. To "copy" a Float value,
+// an existing (or newly allocated) Float must be set to
+// a new value using the Float.Set method; shallow copies
+// of Floats are not supported and may lead to errors.
+type Float struct {
+ prec uint32
+ mode RoundingMode
+ acc Accuracy
+ form form
+ neg bool
+ mant nat
+ exp int32
+}
+
+// An ErrNaN panic is raised by a Float operation that would lead to
+// a NaN under IEEE-754 rules. An ErrNaN implements the error interface.
+type ErrNaN struct {
+ msg string
+}
+
+func (err ErrNaN) Error() string {
+ return err.msg
+}
+
+// NewFloat allocates and returns a new Float set to x,
+// with precision 53 and rounding mode ToNearestEven.
+// NewFloat panics with ErrNaN if x is a NaN.
+func NewFloat(x float64) *Float {
+ if math.IsNaN(x) {
+ panic(ErrNaN{"NewFloat(NaN)"})
+ }
+ return new(Float).SetFloat64(x)
+}
+
+// Exponent and precision limits.
+const (
+ MaxExp = math.MaxInt32 // largest supported exponent
+ MinExp = math.MinInt32 // smallest supported exponent
+ MaxPrec = math.MaxUint32 // largest (theoretically) supported precision; likely memory-limited
+)
+
+// Internal representation: The mantissa bits x.mant of a nonzero finite
+// Float x are stored in a nat slice long enough to hold up to x.prec bits;
+// the slice may (but doesn't have to) be shorter if the mantissa contains
+// trailing 0 bits. x.mant is normalized if the msb of x.mant == 1 (i.e.,
+// the msb is shifted all the way "to the left"). Thus, if the mantissa has
+// trailing 0 bits or x.prec is not a multiple of the Word size _W,
+// x.mant[0] has trailing zero bits. The msb of the mantissa corresponds
+// to the value 0.5; the exponent x.exp shifts the binary point as needed.
+//
+// A zero or non-finite Float x ignores x.mant and x.exp.
+//
+// x form neg mant exp
+// ----------------------------------------------------------
+// ±0 zero sign - -
+// 0 < |x| < +Inf finite sign mantissa exponent
+// ±Inf inf sign - -
+
+// A form value describes the internal representation.
+type form byte
+
+// The form value order is relevant - do not change!
+const (
+ zero form = iota
+ finite
+ inf
+)
+
+// RoundingMode determines how a Float value is rounded to the
+// desired precision. Rounding may change the Float value; the
+// rounding error is described by the Float's Accuracy.
+type RoundingMode byte
+
+// These constants define supported rounding modes.
+const (
+ ToNearestEven RoundingMode = iota // == IEEE 754-2008 roundTiesToEven
+ ToNearestAway // == IEEE 754-2008 roundTiesToAway
+ ToZero // == IEEE 754-2008 roundTowardZero
+ AwayFromZero // no IEEE 754-2008 equivalent
+ ToNegativeInf // == IEEE 754-2008 roundTowardNegative
+ ToPositiveInf // == IEEE 754-2008 roundTowardPositive
+)
+
+//go:generate stringer -type=RoundingMode
+
+// Accuracy describes the rounding error produced by the most recent
+// operation that generated a Float value, relative to the exact value.
+type Accuracy int8
+
+// Constants describing the Accuracy of a Float.
+const (
+ Below Accuracy = -1
+ Exact Accuracy = 0
+ Above Accuracy = +1
+)
+
+//go:generate stringer -type=Accuracy
+
+// SetPrec sets z's precision to prec and returns the (possibly) rounded
+// value of z. Rounding occurs according to z's rounding mode if the mantissa
+// cannot be represented in prec bits without loss of precision.
+// SetPrec(0) maps all finite values to ±0; infinite values remain unchanged.
+// If prec > MaxPrec, it is set to MaxPrec.
+func (z *Float) SetPrec(prec uint) *Float {
+ z.acc = Exact // optimistically assume no rounding is needed
+
+ // special case
+ if prec == 0 {
+ z.prec = 0
+ if z.form == finite {
+ // truncate z to 0
+ z.acc = makeAcc(z.neg)
+ z.form = zero
+ }
+ return z
+ }
+
+ // general case
+ if prec > MaxPrec {
+ prec = MaxPrec
+ }
+ old := z.prec
+ z.prec = uint32(prec)
+ if z.prec < old {
+ z.round(0)
+ }
+ return z
+}
+
+func makeAcc(above bool) Accuracy {
+ if above {
+ return Above
+ }
+ return Below
+}
+
+// SetMode sets z's rounding mode to mode and returns an exact z.
+// z remains unchanged otherwise.
+// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to Exact.
+func (z *Float) SetMode(mode RoundingMode) *Float {
+ z.mode = mode
+ z.acc = Exact
+ return z
+}
+
+// Prec returns the mantissa precision of x in bits.
+// The result may be 0 for |x| == 0 and |x| == Inf.
+func (x *Float) Prec() uint {
+ return uint(x.prec)
+}
+
+// MinPrec returns the minimum precision required to represent x exactly
+// (i.e., the smallest prec before x.SetPrec(prec) would start rounding x).
+// The result is 0 for |x| == 0 and |x| == Inf.
+func (x *Float) MinPrec() uint {
+ if x.form != finite {
+ return 0
+ }
+ return uint(len(x.mant))*_W - x.mant.trailingZeroBits()
+}
+
+// Mode returns the rounding mode of x.
+func (x *Float) Mode() RoundingMode {
+ return x.mode
+}
+
+// Acc returns the accuracy of x produced by the most recent
+// operation, unless explicitly documented otherwise by that
+// operation.
+func (x *Float) Acc() Accuracy {
+ return x.acc
+}
+
+// Sign returns:
+//
+// -1 if x < 0
+// 0 if x is ±0
+// +1 if x > 0
+func (x *Float) Sign() int {
+ if debugFloat {
+ x.validate()
+ }
+ if x.form == zero {
+ return 0
+ }
+ if x.neg {
+ return -1
+ }
+ return 1
+}
+
+// MantExp breaks x into its mantissa and exponent components
+// and returns the exponent. If a non-nil mant argument is
+// provided its value is set to the mantissa of x, with the
+// same precision and rounding mode as x. The components
+// satisfy x == mant × 2**exp, with 0.5 <= |mant| < 1.0.
+// Calling MantExp with a nil argument is an efficient way to
+// get the exponent of the receiver.
+//
+// Special cases are:
+//
+// ( ±0).MantExp(mant) = 0, with mant set to ±0
+// (±Inf).MantExp(mant) = 0, with mant set to ±Inf
+//
+// x and mant may be the same in which case x is set to its
+// mantissa value.
+func (x *Float) MantExp(mant *Float) (exp int) {
+ if debugFloat {
+ x.validate()
+ }
+ if x.form == finite {
+ exp = int(x.exp)
+ }
+ if mant != nil {
+ mant.Copy(x)
+ if mant.form == finite {
+ mant.exp = 0
+ }
+ }
+ return
+}
+
+func (z *Float) setExpAndRound(exp int64, sbit uint) {
+ if exp < MinExp {
+ // underflow
+ z.acc = makeAcc(z.neg)
+ z.form = zero
+ return
+ }
+
+ if exp > MaxExp {
+ // overflow
+ z.acc = makeAcc(!z.neg)
+ z.form = inf
+ return
+ }
+
+ z.form = finite
+ z.exp = int32(exp)
+ z.round(sbit)
+}
+
+// SetMantExp sets z to mant × 2**exp and returns z.
+// The result z has the same precision and rounding mode
+// as mant. SetMantExp is an inverse of MantExp but does
+// not require 0.5 <= |mant| < 1.0. Specifically, for a
+// given x of type *Float, SetMantExp relates to MantExp
+// as follows:
+//
+// mant := new(Float)
+// new(Float).SetMantExp(mant, x.MantExp(mant)).Cmp(x) == 0
+//
+// Special cases are:
+//
+// z.SetMantExp( ±0, exp) = ±0
+// z.SetMantExp(±Inf, exp) = ±Inf
+//
+// z and mant may be the same in which case z's exponent
+// is set to exp.
+func (z *Float) SetMantExp(mant *Float, exp int) *Float {
+ if debugFloat {
+ z.validate()
+ mant.validate()
+ }
+ z.Copy(mant)
+
+ if z.form == finite {
+ // 0 < |mant| < +Inf
+ z.setExpAndRound(int64(z.exp)+int64(exp), 0)
+ }
+ return z
+}
+
+// Signbit reports whether x is negative or negative zero.
+func (x *Float) Signbit() bool {
+ return x.neg
+}
+
+// IsInf reports whether x is +Inf or -Inf.
+func (x *Float) IsInf() bool {
+ return x.form == inf
+}
+
+// IsInt reports whether x is an integer.
+// ±Inf values are not integers.
+func (x *Float) IsInt() bool {
+ if debugFloat {
+ x.validate()
+ }
+ // special cases
+ if x.form != finite {
+ return x.form == zero
+ }
+ // x.form == finite
+ if x.exp <= 0 {
+ return false
+ }
+ // x.exp > 0
+ return x.prec <= uint32(x.exp) || x.MinPrec() <= uint(x.exp) // not enough bits for fractional mantissa
+}
+
+// debugging support
+func (x *Float) validate() {
+ if !debugFloat {
+ // avoid performance bugs
+ panic("validate called but debugFloat is not set")
+ }
+ if x.form != finite {
+ return
+ }
+ m := len(x.mant)
+ if m == 0 {
+ panic("nonzero finite number with empty mantissa")
+ }
+ const msb = 1 << (_W - 1)
+ if x.mant[m-1]&msb == 0 {
+ panic(fmt.Sprintf("msb not set in last word %#x of %s", x.mant[m-1], x.Text('p', 0)))
+ }
+ if x.prec == 0 {
+ panic("zero precision finite number")
+ }
+}
+
+// round rounds z according to z.mode to z.prec bits and sets z.acc accordingly.
+// sbit must be 0 or 1 and summarizes any "sticky bit" information one might
+// have before calling round. z's mantissa must be normalized (with the msb set)
+// or empty.
+//
+// CAUTION: The rounding modes ToNegativeInf, ToPositiveInf are affected by the
+// sign of z. For correct rounding, the sign of z must be set correctly before
+// calling round.
+func (z *Float) round(sbit uint) {
+ if debugFloat {
+ z.validate()
+ }
+
+ z.acc = Exact
+ if z.form != finite {
+ // ±0 or ±Inf => nothing left to do
+ return
+ }
+ // z.form == finite && len(z.mant) > 0
+ // m > 0 implies z.prec > 0 (checked by validate)
+
+ m := uint32(len(z.mant)) // present mantissa length in words
+ bits := m * _W // present mantissa bits; bits > 0
+ if bits <= z.prec {
+ // mantissa fits => nothing to do
+ return
+ }
+ // bits > z.prec
+
+ // Rounding is based on two bits: the rounding bit (rbit) and the
+ // sticky bit (sbit). The rbit is the bit immediately before the
+ // z.prec leading mantissa bits (the "0.5"). The sbit is set if any
+ // of the bits before the rbit are set (the "0.25", "0.125", etc.):
+ //
+ // rbit sbit => "fractional part"
+ //
+ // 0 0 == 0
+ // 0 1 > 0 , < 0.5
+ // 1 0 == 0.5
+ // 1 1 > 0.5, < 1.0
+
+ // bits > z.prec: mantissa too large => round
+ r := uint(bits - z.prec - 1) // rounding bit position; r >= 0
+ rbit := z.mant.bit(r) & 1 // rounding bit; be safe and ensure it's a single bit
+ // The sticky bit is only needed for rounding ToNearestEven
+ // or when the rounding bit is zero. Avoid computation otherwise.
+ if sbit == 0 && (rbit == 0 || z.mode == ToNearestEven) {
+ sbit = z.mant.sticky(r)
+ }
+ sbit &= 1 // be safe and ensure it's a single bit
+
+ // cut off extra words
+ n := (z.prec + (_W - 1)) / _W // mantissa length in words for desired precision
+ if m > n {
+ copy(z.mant, z.mant[m-n:]) // move n last words to front
+ z.mant = z.mant[:n]
+ }
+
+ // determine number of trailing zero bits (ntz) and compute lsb mask of mantissa's least-significant word
+ ntz := n*_W - z.prec // 0 <= ntz < _W
+ lsb := Word(1) << ntz
+
+ // round if result is inexact
+ if rbit|sbit != 0 {
+ // Make rounding decision: The result mantissa is truncated ("rounded down")
+ // by default. Decide if we need to increment, or "round up", the (unsigned)
+ // mantissa.
+ inc := false
+ switch z.mode {
+ case ToNegativeInf:
+ inc = z.neg
+ case ToZero:
+ // nothing to do
+ case ToNearestEven:
+ inc = rbit != 0 && (sbit != 0 || z.mant[0]&lsb != 0)
+ case ToNearestAway:
+ inc = rbit != 0
+ case AwayFromZero:
+ inc = true
+ case ToPositiveInf:
+ inc = !z.neg
+ default:
+ panic("unreachable")
+ }
+
+ // A positive result (!z.neg) is Above the exact result if we increment,
+ // and it's Below if we truncate (Exact results require no rounding).
+ // For a negative result (z.neg) it is exactly the opposite.
+ z.acc = makeAcc(inc != z.neg)
+
+ if inc {
+ // add 1 to mantissa
+ if addVW(z.mant, z.mant, lsb) != 0 {
+ // mantissa overflow => adjust exponent
+ if z.exp >= MaxExp {
+ // exponent overflow
+ z.form = inf
+ return
+ }
+ z.exp++
+ // adjust mantissa: divide by 2 to compensate for exponent adjustment
+ shrVU(z.mant, z.mant, 1)
+ // set msb == carry == 1 from the mantissa overflow above
+ const msb = 1 << (_W - 1)
+ z.mant[n-1] |= msb
+ }
+ }
+ }
+
+ // zero out trailing bits in least-significant word
+ z.mant[0] &^= lsb - 1
+
+ if debugFloat {
+ z.validate()
+ }
+}
+
+func (z *Float) setBits64(neg bool, x uint64) *Float {
+ if z.prec == 0 {
+ z.prec = 64
+ }
+ z.acc = Exact
+ z.neg = neg
+ if x == 0 {
+ z.form = zero
+ return z
+ }
+ // x != 0
+ z.form = finite
+ s := bits.LeadingZeros64(x)
+ z.mant = z.mant.setUint64(x << uint(s))
+ z.exp = int32(64 - s) // always fits
+ if z.prec < 64 {
+ z.round(0)
+ }
+ return z
+}
+
+// SetUint64 sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to 64 (and rounding will have
+// no effect).
+func (z *Float) SetUint64(x uint64) *Float {
+ return z.setBits64(false, x)
+}
+
+// SetInt64 sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to 64 (and rounding will have
+// no effect).
+func (z *Float) SetInt64(x int64) *Float {
+ u := x
+ if u < 0 {
+ u = -u
+ }
+ // We cannot simply call z.SetUint64(uint64(u)) and change
+ // the sign afterwards because the sign affects rounding.
+ return z.setBits64(x < 0, uint64(u))
+}
+
+// SetFloat64 sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to 53 (and rounding will have
+// no effect). SetFloat64 panics with ErrNaN if x is a NaN.
+func (z *Float) SetFloat64(x float64) *Float {
+ if z.prec == 0 {
+ z.prec = 53
+ }
+ if math.IsNaN(x) {
+ panic(ErrNaN{"Float.SetFloat64(NaN)"})
+ }
+ z.acc = Exact
+ z.neg = math.Signbit(x) // handle -0, -Inf correctly
+ if x == 0 {
+ z.form = zero
+ return z
+ }
+ if math.IsInf(x, 0) {
+ z.form = inf
+ return z
+ }
+ // normalized x != 0
+ z.form = finite
+ fmant, exp := math.Frexp(x) // get normalized mantissa
+ z.mant = z.mant.setUint64(1<<63 | math.Float64bits(fmant)<<11)
+ z.exp = int32(exp) // always fits
+ if z.prec < 53 {
+ z.round(0)
+ }
+ return z
+}
+
+// fnorm normalizes mantissa m by shifting it to the left
+// such that the msb of the most-significant word (msw) is 1.
+// It returns the shift amount. It assumes that len(m) != 0.
+func fnorm(m nat) int64 {
+ if debugFloat && (len(m) == 0 || m[len(m)-1] == 0) {
+ panic("msw of mantissa is 0")
+ }
+ s := nlz(m[len(m)-1])
+ if s > 0 {
+ c := shlVU(m, m, s)
+ if debugFloat && c != 0 {
+ panic("nlz or shlVU incorrect")
+ }
+ }
+ return int64(s)
+}
+
+// SetInt sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to the larger of x.BitLen()
+// or 64 (and rounding will have no effect).
+func (z *Float) SetInt(x *Int) *Float {
+ // TODO(gri) can be more efficient if z.prec > 0
+ // but small compared to the size of x, or if there
+ // are many trailing 0's.
+ bits := uint32(x.BitLen())
+ if z.prec == 0 {
+ z.prec = umax32(bits, 64)
+ }
+ z.acc = Exact
+ z.neg = x.neg
+ if len(x.abs) == 0 {
+ z.form = zero
+ return z
+ }
+ // x != 0
+ z.mant = z.mant.set(x.abs)
+ fnorm(z.mant)
+ z.setExpAndRound(int64(bits), 0)
+ return z
+}
+
+// SetRat sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to the largest of a.BitLen(),
+// b.BitLen(), or 64; with x = a/b.
+func (z *Float) SetRat(x *Rat) *Float {
+ if x.IsInt() {
+ return z.SetInt(x.Num())
+ }
+ var a, b Float
+ a.SetInt(x.Num())
+ b.SetInt(x.Denom())
+ if z.prec == 0 {
+ z.prec = umax32(a.prec, b.prec)
+ }
+ return z.Quo(&a, &b)
+}
+
+// SetInf sets z to the infinite Float -Inf if signbit is
+// set, or +Inf if signbit is not set, and returns z. The
+// precision of z is unchanged and the result is always
+// Exact.
+func (z *Float) SetInf(signbit bool) *Float {
+ z.acc = Exact
+ z.form = inf
+ z.neg = signbit
+ return z
+}
+
+// Set sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to the precision of x
+// before setting z (and rounding will have no effect).
+// Rounding is performed according to z's precision and rounding
+// mode; and z's accuracy reports the result error relative to the
+// exact (not rounded) result.
+func (z *Float) Set(x *Float) *Float {
+ if debugFloat {
+ x.validate()
+ }
+ z.acc = Exact
+ if z != x {
+ z.form = x.form
+ z.neg = x.neg
+ if x.form == finite {
+ z.exp = x.exp
+ z.mant = z.mant.set(x.mant)
+ }
+ if z.prec == 0 {
+ z.prec = x.prec
+ } else if z.prec < x.prec {
+ z.round(0)
+ }
+ }
+ return z
+}
+
+// Copy sets z to x, with the same precision, rounding mode, and
+// accuracy as x, and returns z. x is not changed even if z and
+// x are the same.
+func (z *Float) Copy(x *Float) *Float {
+ if debugFloat {
+ x.validate()
+ }
+ if z != x {
+ z.prec = x.prec
+ z.mode = x.mode
+ z.acc = x.acc
+ z.form = x.form
+ z.neg = x.neg
+ if z.form == finite {
+ z.mant = z.mant.set(x.mant)
+ z.exp = x.exp
+ }
+ }
+ return z
+}
+
+// msb32 returns the 32 most significant bits of x.
+func msb32(x nat) uint32 {
+ i := len(x) - 1
+ if i < 0 {
+ return 0
+ }
+ if debugFloat && x[i]&(1<<(_W-1)) == 0 {
+ panic("x not normalized")
+ }
+ switch _W {
+ case 32:
+ return uint32(x[i])
+ case 64:
+ return uint32(x[i] >> 32)
+ }
+ panic("unreachable")
+}
+
+// msb64 returns the 64 most significant bits of x.
+func msb64(x nat) uint64 {
+ i := len(x) - 1
+ if i < 0 {
+ return 0
+ }
+ if debugFloat && x[i]&(1<<(_W-1)) == 0 {
+ panic("x not normalized")
+ }
+ switch _W {
+ case 32:
+ v := uint64(x[i]) << 32
+ if i > 0 {
+ v |= uint64(x[i-1])
+ }
+ return v
+ case 64:
+ return uint64(x[i])
+ }
+ panic("unreachable")
+}
+
+// Uint64 returns the unsigned integer resulting from truncating x
+// towards zero. If 0 <= x <= math.MaxUint64, the result is Exact
+// if x is an integer and Below otherwise.
+// The result is (0, Above) for x < 0, and (math.MaxUint64, Below)
+// for x > math.MaxUint64.
+func (x *Float) Uint64() (uint64, Accuracy) {
+ if debugFloat {
+ x.validate()
+ }
+
+ switch x.form {
+ case finite:
+ if x.neg {
+ return 0, Above
+ }
+ // 0 < x < +Inf
+ if x.exp <= 0 {
+ // 0 < x < 1
+ return 0, Below
+ }
+ // 1 <= x < Inf
+ if x.exp <= 64 {
+ // u = trunc(x) fits into a uint64
+ u := msb64(x.mant) >> (64 - uint32(x.exp))
+ if x.MinPrec() <= 64 {
+ return u, Exact
+ }
+ return u, Below // x truncated
+ }
+ // x too large
+ return math.MaxUint64, Below
+
+ case zero:
+ return 0, Exact
+
+ case inf:
+ if x.neg {
+ return 0, Above
+ }
+ return math.MaxUint64, Below
+ }
+
+ panic("unreachable")
+}
+
+// Int64 returns the integer resulting from truncating x towards zero.
+// If math.MinInt64 <= x <= math.MaxInt64, the result is Exact if x is
+// an integer, and Above (x < 0) or Below (x > 0) otherwise.
+// The result is (math.MinInt64, Above) for x < math.MinInt64,
+// and (math.MaxInt64, Below) for x > math.MaxInt64.
+func (x *Float) Int64() (int64, Accuracy) {
+ if debugFloat {
+ x.validate()
+ }
+
+ switch x.form {
+ case finite:
+ // 0 < |x| < +Inf
+ acc := makeAcc(x.neg)
+ if x.exp <= 0 {
+ // 0 < |x| < 1
+ return 0, acc
+ }
+ // x.exp > 0
+
+ // 1 <= |x| < +Inf
+ if x.exp <= 63 {
+ // i = trunc(x) fits into an int64 (excluding math.MinInt64)
+ i := int64(msb64(x.mant) >> (64 - uint32(x.exp)))
+ if x.neg {
+ i = -i
+ }
+ if x.MinPrec() <= uint(x.exp) {
+ return i, Exact
+ }
+ return i, acc // x truncated
+ }
+ if x.neg {
+ // check for special case x == math.MinInt64 (i.e., x == -(0.5 << 64))
+ if x.exp == 64 && x.MinPrec() == 1 {
+ acc = Exact
+ }
+ return math.MinInt64, acc
+ }
+ // x too large
+ return math.MaxInt64, Below
+
+ case zero:
+ return 0, Exact
+
+ case inf:
+ if x.neg {
+ return math.MinInt64, Above
+ }
+ return math.MaxInt64, Below
+ }
+
+ panic("unreachable")
+}
+
+// Float32 returns the float32 value nearest to x. If x is too small to be
+// represented by a float32 (|x| < math.SmallestNonzeroFloat32), the result
+// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
+// If x is too large to be represented by a float32 (|x| > math.MaxFloat32),
+// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
+func (x *Float) Float32() (float32, Accuracy) {
+ if debugFloat {
+ x.validate()
+ }
+
+ switch x.form {
+ case finite:
+ // 0 < |x| < +Inf
+
+ const (
+ fbits = 32 // float size
+ mbits = 23 // mantissa size (excluding implicit msb)
+ ebits = fbits - mbits - 1 // 8 exponent size
+ bias = 1<<(ebits-1) - 1 // 127 exponent bias
+ dmin = 1 - bias - mbits // -149 smallest unbiased exponent (denormal)
+ emin = 1 - bias // -126 smallest unbiased exponent (normal)
+ emax = bias // 127 largest unbiased exponent (normal)
+ )
+
+ // Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float32 mantissa.
+ e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
+
+ // Compute precision p for float32 mantissa.
+ // If the exponent is too small, we have a denormal number before
+ // rounding and fewer than p mantissa bits of precision available
+ // (the exponent remains fixed but the mantissa gets shifted right).
+ p := mbits + 1 // precision of normal float
+ if e < emin {
+ // recompute precision
+ p = mbits + 1 - emin + int(e)
+ // If p == 0, the mantissa of x is shifted so much to the right
+ // that its msb falls immediately to the right of the float32
+ // mantissa space. In other words, if the smallest denormal is
+ // considered "1.0", for p == 0, the mantissa value m is >= 0.5.
+ // If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
+ // If m == 0.5, it is rounded down to even, i.e., 0.0.
+ // If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
+ if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
+ // underflow to ±0
+ if x.neg {
+ var z float32
+ return -z, Above
+ }
+ return 0.0, Below
+ }
+ // otherwise, round up
+ // We handle p == 0 explicitly because it's easy and because
+ // Float.round doesn't support rounding to 0 bits of precision.
+ if p == 0 {
+ if x.neg {
+ return -math.SmallestNonzeroFloat32, Below
+ }
+ return math.SmallestNonzeroFloat32, Above
+ }
+ }
+ // p > 0
+
+ // round
+ var r Float
+ r.prec = uint32(p)
+ r.Set(x)
+ e = r.exp - 1
+
+ // Rounding may have caused r to overflow to ±Inf
+ // (rounding never causes underflows to 0).
+ // If the exponent is too large, also overflow to ±Inf.
+ if r.form == inf || e > emax {
+ // overflow
+ if x.neg {
+ return float32(math.Inf(-1)), Below
+ }
+ return float32(math.Inf(+1)), Above
+ }
+ // e <= emax
+
+ // Determine sign, biased exponent, and mantissa.
+ var sign, bexp, mant uint32
+ if x.neg {
+ sign = 1 << (fbits - 1)
+ }
+
+ // Rounding may have caused a denormal number to
+ // become normal. Check again.
+ if e < emin {
+ // denormal number: recompute precision
+ // Since rounding may have at best increased precision
+ // and we have eliminated p <= 0 early, we know p > 0.
+ // bexp == 0 for denormals
+ p = mbits + 1 - emin + int(e)
+ mant = msb32(r.mant) >> uint(fbits-p)
+ } else {
+ // normal number: emin <= e <= emax
+ bexp = uint32(e+bias) << mbits
+ mant = msb32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
+ }
+
+ return math.Float32frombits(sign | bexp | mant), r.acc
+
+ case zero:
+ if x.neg {
+ var z float32
+ return -z, Exact
+ }
+ return 0.0, Exact
+
+ case inf:
+ if x.neg {
+ return float32(math.Inf(-1)), Exact
+ }
+ return float32(math.Inf(+1)), Exact
+ }
+
+ panic("unreachable")
+}
+
+// Float64 returns the float64 value nearest to x. If x is too small to be
+// represented by a float64 (|x| < math.SmallestNonzeroFloat64), the result
+// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
+// If x is too large to be represented by a float64 (|x| > math.MaxFloat64),
+// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
+func (x *Float) Float64() (float64, Accuracy) {
+ if debugFloat {
+ x.validate()
+ }
+
+ switch x.form {
+ case finite:
+ // 0 < |x| < +Inf
+
+ const (
+ fbits = 64 // float size
+ mbits = 52 // mantissa size (excluding implicit msb)
+ ebits = fbits - mbits - 1 // 11 exponent size
+ bias = 1<<(ebits-1) - 1 // 1023 exponent bias
+ dmin = 1 - bias - mbits // -1074 smallest unbiased exponent (denormal)
+ emin = 1 - bias // -1022 smallest unbiased exponent (normal)
+ emax = bias // 1023 largest unbiased exponent (normal)
+ )
+
+ // Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float64 mantissa.
+ e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
+
+ // Compute precision p for float64 mantissa.
+ // If the exponent is too small, we have a denormal number before
+ // rounding and fewer than p mantissa bits of precision available
+ // (the exponent remains fixed but the mantissa gets shifted right).
+ p := mbits + 1 // precision of normal float
+ if e < emin {
+ // recompute precision
+ p = mbits + 1 - emin + int(e)
+ // If p == 0, the mantissa of x is shifted so much to the right
+ // that its msb falls immediately to the right of the float64
+ // mantissa space. In other words, if the smallest denormal is
+ // considered "1.0", for p == 0, the mantissa value m is >= 0.5.
+ // If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
+ // If m == 0.5, it is rounded down to even, i.e., 0.0.
+ // If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
+ if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
+ // underflow to ±0
+ if x.neg {
+ var z float64
+ return -z, Above
+ }
+ return 0.0, Below
+ }
+ // otherwise, round up
+ // We handle p == 0 explicitly because it's easy and because
+ // Float.round doesn't support rounding to 0 bits of precision.
+ if p == 0 {
+ if x.neg {
+ return -math.SmallestNonzeroFloat64, Below
+ }
+ return math.SmallestNonzeroFloat64, Above
+ }
+ }
+ // p > 0
+
+ // round
+ var r Float
+ r.prec = uint32(p)
+ r.Set(x)
+ e = r.exp - 1
+
+ // Rounding may have caused r to overflow to ±Inf
+ // (rounding never causes underflows to 0).
+ // If the exponent is too large, also overflow to ±Inf.
+ if r.form == inf || e > emax {
+ // overflow
+ if x.neg {
+ return math.Inf(-1), Below
+ }
+ return math.Inf(+1), Above
+ }
+ // e <= emax
+
+ // Determine sign, biased exponent, and mantissa.
+ var sign, bexp, mant uint64
+ if x.neg {
+ sign = 1 << (fbits - 1)
+ }
+
+ // Rounding may have caused a denormal number to
+ // become normal. Check again.
+ if e < emin {
+ // denormal number: recompute precision
+ // Since rounding may have at best increased precision
+ // and we have eliminated p <= 0 early, we know p > 0.
+ // bexp == 0 for denormals
+ p = mbits + 1 - emin + int(e)
+ mant = msb64(r.mant) >> uint(fbits-p)
+ } else {
+ // normal number: emin <= e <= emax
+ bexp = uint64(e+bias) << mbits
+ mant = msb64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
+ }
+
+ return math.Float64frombits(sign | bexp | mant), r.acc
+
+ case zero:
+ if x.neg {
+ var z float64
+ return -z, Exact
+ }
+ return 0.0, Exact
+
+ case inf:
+ if x.neg {
+ return math.Inf(-1), Exact
+ }
+ return math.Inf(+1), Exact
+ }
+
+ panic("unreachable")
+}
+
+// Int returns the result of truncating x towards zero;
+// or nil if x is an infinity.
+// The result is Exact if x.IsInt(); otherwise it is Below
+// for x > 0, and Above for x < 0.
+// If a non-nil *Int argument z is provided, Int stores
+// the result in z instead of allocating a new Int.
+func (x *Float) Int(z *Int) (*Int, Accuracy) {
+ if debugFloat {
+ x.validate()
+ }
+
+ if z == nil && x.form <= finite {
+ z = new(Int)
+ }
+
+ switch x.form {
+ case finite:
+ // 0 < |x| < +Inf
+ acc := makeAcc(x.neg)
+ if x.exp <= 0 {
+ // 0 < |x| < 1
+ return z.SetInt64(0), acc
+ }
+ // x.exp > 0
+
+ // 1 <= |x| < +Inf
+ // determine minimum required precision for x
+ allBits := uint(len(x.mant)) * _W
+ exp := uint(x.exp)
+ if x.MinPrec() <= exp {
+ acc = Exact
+ }
+ // shift mantissa as needed
+ if z == nil {
+ z = new(Int)
+ }
+ z.neg = x.neg
+ switch {
+ case exp > allBits:
+ z.abs = z.abs.shl(x.mant, exp-allBits)
+ default:
+ z.abs = z.abs.set(x.mant)
+ case exp < allBits:
+ z.abs = z.abs.shr(x.mant, allBits-exp)
+ }
+ return z, acc
+
+ case zero:
+ return z.SetInt64(0), Exact
+
+ case inf:
+ return nil, makeAcc(x.neg)
+ }
+
+ panic("unreachable")
+}
+
+// Rat returns the rational number corresponding to x;
+// or nil if x is an infinity.
+// The result is Exact if x is not an Inf.
+// If a non-nil *Rat argument z is provided, Rat stores
+// the result in z instead of allocating a new Rat.
+func (x *Float) Rat(z *Rat) (*Rat, Accuracy) {
+ if debugFloat {
+ x.validate()
+ }
+
+ if z == nil && x.form <= finite {
+ z = new(Rat)
+ }
+
+ switch x.form {
+ case finite:
+ // 0 < |x| < +Inf
+ allBits := int32(len(x.mant)) * _W
+ // build up numerator and denominator
+ z.a.neg = x.neg
+ switch {
+ case x.exp > allBits:
+ z.a.abs = z.a.abs.shl(x.mant, uint(x.exp-allBits))
+ z.b.abs = z.b.abs[:0] // == 1 (see Rat)
+ // z already in normal form
+ default:
+ z.a.abs = z.a.abs.set(x.mant)
+ z.b.abs = z.b.abs[:0] // == 1 (see Rat)
+ // z already in normal form
+ case x.exp < allBits:
+ z.a.abs = z.a.abs.set(x.mant)
+ t := z.b.abs.setUint64(1)
+ z.b.abs = t.shl(t, uint(allBits-x.exp))
+ z.norm()
+ }
+ return z, Exact
+
+ case zero:
+ return z.SetInt64(0), Exact
+
+ case inf:
+ return nil, makeAcc(x.neg)
+ }
+
+ panic("unreachable")
+}
+
+// Abs sets z to the (possibly rounded) value |x| (the absolute value of x)
+// and returns z.
+func (z *Float) Abs(x *Float) *Float {
+ z.Set(x)
+ z.neg = false
+ return z
+}
+
+// Neg sets z to the (possibly rounded) value of x with its sign negated,
+// and returns z.
+func (z *Float) Neg(x *Float) *Float {
+ z.Set(x)
+ z.neg = !z.neg
+ return z
+}
+
+func validateBinaryOperands(x, y *Float) {
+ if !debugFloat {
+ // avoid performance bugs
+ panic("validateBinaryOperands called but debugFloat is not set")
+ }
+ if len(x.mant) == 0 {
+ panic("empty mantissa for x")
+ }
+ if len(y.mant) == 0 {
+ panic("empty mantissa for y")
+ }
+}
+
+// z = x + y, ignoring signs of x and y for the addition
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) uadd(x, y *Float) {
+ // Note: This implementation requires 2 shifts most of the
+ // time. It is also inefficient if exponents or precisions
+ // differ by wide margins. The following article describes
+ // an efficient (but much more complicated) implementation
+ // compatible with the internal representation used here:
+ //
+ // Vincent Lefèvre: "The Generic Multiple-Precision Floating-
+ // Point Addition With Exact Rounding (as in the MPFR Library)"
+ // http://www.vinc17.net/research/papers/rnc6.pdf
+
+ if debugFloat {
+ validateBinaryOperands(x, y)
+ }
+
+ // compute exponents ex, ey for mantissa with "binary point"
+ // on the right (mantissa.0) - use int64 to avoid overflow
+ ex := int64(x.exp) - int64(len(x.mant))*_W
+ ey := int64(y.exp) - int64(len(y.mant))*_W
+
+ al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
+
+ // TODO(gri) having a combined add-and-shift primitive
+ // could make this code significantly faster
+ switch {
+ case ex < ey:
+ if al {
+ t := nat(nil).shl(y.mant, uint(ey-ex))
+ z.mant = z.mant.add(x.mant, t)
+ } else {
+ z.mant = z.mant.shl(y.mant, uint(ey-ex))
+ z.mant = z.mant.add(x.mant, z.mant)
+ }
+ default:
+ // ex == ey, no shift needed
+ z.mant = z.mant.add(x.mant, y.mant)
+ case ex > ey:
+ if al {
+ t := nat(nil).shl(x.mant, uint(ex-ey))
+ z.mant = z.mant.add(t, y.mant)
+ } else {
+ z.mant = z.mant.shl(x.mant, uint(ex-ey))
+ z.mant = z.mant.add(z.mant, y.mant)
+ }
+ ex = ey
+ }
+ // len(z.mant) > 0
+
+ z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
+}
+
+// z = x - y for |x| > |y|, ignoring signs of x and y for the subtraction
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) usub(x, y *Float) {
+ // This code is symmetric to uadd.
+ // We have not factored the common code out because
+ // eventually uadd (and usub) should be optimized
+ // by special-casing, and the code will diverge.
+
+ if debugFloat {
+ validateBinaryOperands(x, y)
+ }
+
+ ex := int64(x.exp) - int64(len(x.mant))*_W
+ ey := int64(y.exp) - int64(len(y.mant))*_W
+
+ al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
+
+ switch {
+ case ex < ey:
+ if al {
+ t := nat(nil).shl(y.mant, uint(ey-ex))
+ z.mant = t.sub(x.mant, t)
+ } else {
+ z.mant = z.mant.shl(y.mant, uint(ey-ex))
+ z.mant = z.mant.sub(x.mant, z.mant)
+ }
+ default:
+ // ex == ey, no shift needed
+ z.mant = z.mant.sub(x.mant, y.mant)
+ case ex > ey:
+ if al {
+ t := nat(nil).shl(x.mant, uint(ex-ey))
+ z.mant = t.sub(t, y.mant)
+ } else {
+ z.mant = z.mant.shl(x.mant, uint(ex-ey))
+ z.mant = z.mant.sub(z.mant, y.mant)
+ }
+ ex = ey
+ }
+
+ // operands may have canceled each other out
+ if len(z.mant) == 0 {
+ z.acc = Exact
+ z.form = zero
+ z.neg = false
+ return
+ }
+ // len(z.mant) > 0
+
+ z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
+}
+
+// z = x * y, ignoring signs of x and y for the multiplication
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) umul(x, y *Float) {
+ if debugFloat {
+ validateBinaryOperands(x, y)
+ }
+
+ // Note: This is doing too much work if the precision
+ // of z is less than the sum of the precisions of x
+ // and y which is often the case (e.g., if all floats
+ // have the same precision).
+ // TODO(gri) Optimize this for the common case.
+
+ e := int64(x.exp) + int64(y.exp)
+ if x == y {
+ z.mant = z.mant.sqr(x.mant)
+ } else {
+ z.mant = z.mant.mul(x.mant, y.mant)
+ }
+ z.setExpAndRound(e-fnorm(z.mant), 0)
+}
+
+// z = x / y, ignoring signs of x and y for the division
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) uquo(x, y *Float) {
+ if debugFloat {
+ validateBinaryOperands(x, y)
+ }
+
+ // mantissa length in words for desired result precision + 1
+ // (at least one extra bit so we get the rounding bit after
+ // the division)
+ n := int(z.prec/_W) + 1
+
+ // compute adjusted x.mant such that we get enough result precision
+ xadj := x.mant
+ if d := n - len(x.mant) + len(y.mant); d > 0 {
+ // d extra words needed => add d "0 digits" to x
+ xadj = make(nat, len(x.mant)+d)
+ copy(xadj[d:], x.mant)
+ }
+ // TODO(gri): If we have too many digits (d < 0), we should be able
+ // to shorten x for faster division. But we must be extra careful
+ // with rounding in that case.
+
+ // Compute d before division since there may be aliasing of x.mant
+ // (via xadj) or y.mant with z.mant.
+ d := len(xadj) - len(y.mant)
+
+ // divide
+ var r nat
+ z.mant, r = z.mant.div(nil, xadj, y.mant)
+ e := int64(x.exp) - int64(y.exp) - int64(d-len(z.mant))*_W
+
+ // The result is long enough to include (at least) the rounding bit.
+ // If there's a non-zero remainder, the corresponding fractional part
+ // (if it were computed), would have a non-zero sticky bit (if it were
+ // zero, it couldn't have a non-zero remainder).
+ var sbit uint
+ if len(r) > 0 {
+ sbit = 1
+ }
+
+ z.setExpAndRound(e-fnorm(z.mant), sbit)
+}
+
+// ucmp returns -1, 0, or +1, depending on whether
+// |x| < |y|, |x| == |y|, or |x| > |y|.
+// x and y must have a non-empty mantissa and valid exponent.
+func (x *Float) ucmp(y *Float) int {
+ if debugFloat {
+ validateBinaryOperands(x, y)
+ }
+
+ switch {
+ case x.exp < y.exp:
+ return -1
+ case x.exp > y.exp:
+ return +1
+ }
+ // x.exp == y.exp
+
+ // compare mantissas
+ i := len(x.mant)
+ j := len(y.mant)
+ for i > 0 || j > 0 {
+ var xm, ym Word
+ if i > 0 {
+ i--
+ xm = x.mant[i]
+ }
+ if j > 0 {
+ j--
+ ym = y.mant[j]
+ }
+ switch {
+ case xm < ym:
+ return -1
+ case xm > ym:
+ return +1
+ }
+ }
+
+ return 0
+}
+
+// Handling of sign bit as defined by IEEE 754-2008, section 6.3:
+//
+// When neither the inputs nor result are NaN, the sign of a product or
+// quotient is the exclusive OR of the operands’ signs; the sign of a sum,
+// or of a difference x−y regarded as a sum x+(−y), differs from at most
+// one of the addends’ signs; and the sign of the result of conversions,
+// the quantize operation, the roundToIntegral operations, and the
+// roundToIntegralExact (see 5.3.1) is the sign of the first or only operand.
+// These rules shall apply even when operands or results are zero or infinite.
+//
+// When the sum of two operands with opposite signs (or the difference of
+// two operands with like signs) is exactly zero, the sign of that sum (or
+// difference) shall be +0 in all rounding-direction attributes except
+// roundTowardNegative; under that attribute, the sign of an exact zero
+// sum (or difference) shall be −0. However, x+x = x−(−x) retains the same
+// sign as x even when x is zero.
+//
+// See also: https://play.golang.org/p/RtH3UCt5IH
+
+// Add sets z to the rounded sum x+y and returns z. If z's precision is 0,
+// it is changed to the larger of x's or y's precision before the operation.
+// Rounding is performed according to z's precision and rounding mode; and
+// z's accuracy reports the result error relative to the exact (not rounded)
+// result. Add panics with ErrNaN if x and y are infinities with opposite
+// signs. The value of z is undefined in that case.
+func (z *Float) Add(x, y *Float) *Float {
+ if debugFloat {
+ x.validate()
+ y.validate()
+ }
+
+ if z.prec == 0 {
+ z.prec = umax32(x.prec, y.prec)
+ }
+
+ if x.form == finite && y.form == finite {
+ // x + y (common case)
+
+ // Below we set z.neg = x.neg, and when z aliases y this will
+ // change the y operand's sign. This is fine, because if an
+ // operand aliases the receiver it'll be overwritten, but we still
+ // want the original x.neg and y.neg values when we evaluate
+ // x.neg != y.neg, so we need to save y.neg before setting z.neg.
+ yneg := y.neg
+
+ z.neg = x.neg
+ if x.neg == yneg {
+ // x + y == x + y
+ // (-x) + (-y) == -(x + y)
+ z.uadd(x, y)
+ } else {
+ // x + (-y) == x - y == -(y - x)
+ // (-x) + y == y - x == -(x - y)
+ if x.ucmp(y) > 0 {
+ z.usub(x, y)
+ } else {
+ z.neg = !z.neg
+ z.usub(y, x)
+ }
+ }
+ if z.form == zero && z.mode == ToNegativeInf && z.acc == Exact {
+ z.neg = true
+ }
+ return z
+ }
+
+ if x.form == inf && y.form == inf && x.neg != y.neg {
+ // +Inf + -Inf
+ // -Inf + +Inf
+ // value of z is undefined but make sure it's valid
+ z.acc = Exact
+ z.form = zero
+ z.neg = false
+ panic(ErrNaN{"addition of infinities with opposite signs"})
+ }
+
+ if x.form == zero && y.form == zero {
+ // ±0 + ±0
+ z.acc = Exact
+ z.form = zero
+ z.neg = x.neg && y.neg // -0 + -0 == -0
+ return z
+ }
+
+ if x.form == inf || y.form == zero {
+ // ±Inf + y
+ // x + ±0
+ return z.Set(x)
+ }
+
+ // ±0 + y
+ // x + ±Inf
+ return z.Set(y)
+}
+
+// Sub sets z to the rounded difference x-y and returns z.
+// Precision, rounding, and accuracy reporting are as for Add.
+// Sub panics with ErrNaN if x and y are infinities with equal
+// signs. The value of z is undefined in that case.
+func (z *Float) Sub(x, y *Float) *Float {
+ if debugFloat {
+ x.validate()
+ y.validate()
+ }
+
+ if z.prec == 0 {
+ z.prec = umax32(x.prec, y.prec)
+ }
+
+ if x.form == finite && y.form == finite {
+ // x - y (common case)
+ yneg := y.neg
+ z.neg = x.neg
+ if x.neg != yneg {
+ // x - (-y) == x + y
+ // (-x) - y == -(x + y)
+ z.uadd(x, y)
+ } else {
+ // x - y == x - y == -(y - x)
+ // (-x) - (-y) == y - x == -(x - y)
+ if x.ucmp(y) > 0 {
+ z.usub(x, y)
+ } else {
+ z.neg = !z.neg
+ z.usub(y, x)
+ }
+ }
+ if z.form == zero && z.mode == ToNegativeInf && z.acc == Exact {
+ z.neg = true
+ }
+ return z
+ }
+
+ if x.form == inf && y.form == inf && x.neg == y.neg {
+ // +Inf - +Inf
+ // -Inf - -Inf
+ // value of z is undefined but make sure it's valid
+ z.acc = Exact
+ z.form = zero
+ z.neg = false
+ panic(ErrNaN{"subtraction of infinities with equal signs"})
+ }
+
+ if x.form == zero && y.form == zero {
+ // ±0 - ±0
+ z.acc = Exact
+ z.form = zero
+ z.neg = x.neg && !y.neg // -0 - +0 == -0
+ return z
+ }
+
+ if x.form == inf || y.form == zero {
+ // ±Inf - y
+ // x - ±0
+ return z.Set(x)
+ }
+
+ // ±0 - y
+ // x - ±Inf
+ return z.Neg(y)
+}
+
+// Mul sets z to the rounded product x*y and returns z.
+// Precision, rounding, and accuracy reporting are as for Add.
+// Mul panics with ErrNaN if one operand is zero and the other
+// operand an infinity. The value of z is undefined in that case.
+func (z *Float) Mul(x, y *Float) *Float {
+ if debugFloat {
+ x.validate()
+ y.validate()
+ }
+
+ if z.prec == 0 {
+ z.prec = umax32(x.prec, y.prec)
+ }
+
+ z.neg = x.neg != y.neg
+
+ if x.form == finite && y.form == finite {
+ // x * y (common case)
+ z.umul(x, y)
+ return z
+ }
+
+ z.acc = Exact
+ if x.form == zero && y.form == inf || x.form == inf && y.form == zero {
+ // ±0 * ±Inf
+ // ±Inf * ±0
+ // value of z is undefined but make sure it's valid
+ z.form = zero
+ z.neg = false
+ panic(ErrNaN{"multiplication of zero with infinity"})
+ }
+
+ if x.form == inf || y.form == inf {
+ // ±Inf * y
+ // x * ±Inf
+ z.form = inf
+ return z
+ }
+
+ // ±0 * y
+ // x * ±0
+ z.form = zero
+ return z
+}
+
+// Quo sets z to the rounded quotient x/y and returns z.
+// Precision, rounding, and accuracy reporting are as for Add.
+// Quo panics with ErrNaN if both operands are zero or infinities.
+// The value of z is undefined in that case.
+func (z *Float) Quo(x, y *Float) *Float {
+ if debugFloat {
+ x.validate()
+ y.validate()
+ }
+
+ if z.prec == 0 {
+ z.prec = umax32(x.prec, y.prec)
+ }
+
+ z.neg = x.neg != y.neg
+
+ if x.form == finite && y.form == finite {
+ // x / y (common case)
+ z.uquo(x, y)
+ return z
+ }
+
+ z.acc = Exact
+ if x.form == zero && y.form == zero || x.form == inf && y.form == inf {
+ // ±0 / ±0
+ // ±Inf / ±Inf
+ // value of z is undefined but make sure it's valid
+ z.form = zero
+ z.neg = false
+ panic(ErrNaN{"division of zero by zero or infinity by infinity"})
+ }
+
+ if x.form == zero || y.form == inf {
+ // ±0 / y
+ // x / ±Inf
+ z.form = zero
+ return z
+ }
+
+ // x / ±0
+ // ±Inf / y
+ z.form = inf
+ return z
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y (incl. -0 == 0, -Inf == -Inf, and +Inf == +Inf)
+// +1 if x > y
+func (x *Float) Cmp(y *Float) int {
+ if debugFloat {
+ x.validate()
+ y.validate()
+ }
+
+ mx := x.ord()
+ my := y.ord()
+ switch {
+ case mx < my:
+ return -1
+ case mx > my:
+ return +1
+ }
+ // mx == my
+
+ // only if |mx| == 1 we have to compare the mantissae
+ switch mx {
+ case -1:
+ return y.ucmp(x)
+ case +1:
+ return x.ucmp(y)
+ }
+
+ return 0
+}
+
+// ord classifies x and returns:
+//
+// -2 if -Inf == x
+// -1 if -Inf < x < 0
+// 0 if x == 0 (signed or unsigned)
+// +1 if 0 < x < +Inf
+// +2 if x == +Inf
+func (x *Float) ord() int {
+ var m int
+ switch x.form {
+ case finite:
+ m = 1
+ case zero:
+ return 0
+ case inf:
+ m = 2
+ }
+ if x.neg {
+ m = -m
+ }
+ return m
+}
+
+func umax32(x, y uint32) uint32 {
+ if x > y {
+ return x
+ }
+ return y
+}
diff --git a/contrib/go/_std_1.19/src/math/big/floatconv.go b/contrib/go/_std_1.19/src/math/big/floatconv.go
new file mode 100644
index 0000000000..3bb51c7dea
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/floatconv.go
@@ -0,0 +1,302 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements string-to-Float conversion functions.
+
+package big
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+var floatZero Float
+
+// SetString sets z to the value of s and returns z and a boolean indicating
+// success. s must be a floating-point number of the same format as accepted
+// by Parse, with base argument 0. The entire string (not just a prefix) must
+// be valid for success. If the operation failed, the value of z is undefined
+// but the returned value is nil.
+func (z *Float) SetString(s string) (*Float, bool) {
+ if f, _, err := z.Parse(s, 0); err == nil {
+ return f, true
+ }
+ return nil, false
+}
+
+// scan is like Parse but reads the longest possible prefix representing a valid
+// floating point number from an io.ByteScanner rather than a string. It serves
+// as the implementation of Parse. It does not recognize ±Inf and does not expect
+// EOF at the end.
+func (z *Float) scan(r io.ByteScanner, base int) (f *Float, b int, err error) {
+ prec := z.prec
+ if prec == 0 {
+ prec = 64
+ }
+
+ // A reasonable value in case of an error.
+ z.form = zero
+
+ // sign
+ z.neg, err = scanSign(r)
+ if err != nil {
+ return
+ }
+
+ // mantissa
+ var fcount int // fractional digit count; valid if <= 0
+ z.mant, b, fcount, err = z.mant.scan(r, base, true)
+ if err != nil {
+ return
+ }
+
+ // exponent
+ var exp int64
+ var ebase int
+ exp, ebase, err = scanExponent(r, true, base == 0)
+ if err != nil {
+ return
+ }
+
+ // special-case 0
+ if len(z.mant) == 0 {
+ z.prec = prec
+ z.acc = Exact
+ z.form = zero
+ f = z
+ return
+ }
+ // len(z.mant) > 0
+
+ // The mantissa may have a radix point (fcount <= 0) and there
+ // may be a nonzero exponent exp. The radix point amounts to a
+ // division by b**(-fcount). An exponent means multiplication by
+ // ebase**exp. Finally, mantissa normalization (shift left) requires
+ // a correcting multiplication by 2**(-shiftcount). Multiplications
+ // are commutative, so we can apply them in any order as long as there
+ // is no loss of precision. We only have powers of 2 and 10, and
+ // we split powers of 10 into the product of the same powers of
+ // 2 and 5. This reduces the size of the multiplication factor
+ // needed for base-10 exponents.
+
+ // normalize mantissa and determine initial exponent contributions
+ exp2 := int64(len(z.mant))*_W - fnorm(z.mant)
+ exp5 := int64(0)
+
+ // determine binary or decimal exponent contribution of radix point
+ if fcount < 0 {
+ // The mantissa has a radix point ddd.dddd; and
+ // -fcount is the number of digits to the right
+ // of '.'. Adjust relevant exponent accordingly.
+ d := int64(fcount)
+ switch b {
+ case 10:
+ exp5 = d
+ fallthrough // 10**e == 5**e * 2**e
+ case 2:
+ exp2 += d
+ case 8:
+ exp2 += d * 3 // octal digits are 3 bits each
+ case 16:
+ exp2 += d * 4 // hexadecimal digits are 4 bits each
+ default:
+ panic("unexpected mantissa base")
+ }
+ // fcount consumed - not needed anymore
+ }
+
+ // take actual exponent into account
+ switch ebase {
+ case 10:
+ exp5 += exp
+ fallthrough // see fallthrough above
+ case 2:
+ exp2 += exp
+ default:
+ panic("unexpected exponent base")
+ }
+ // exp consumed - not needed anymore
+
+ // apply 2**exp2
+ if MinExp <= exp2 && exp2 <= MaxExp {
+ z.prec = prec
+ z.form = finite
+ z.exp = int32(exp2)
+ f = z
+ } else {
+ err = fmt.Errorf("exponent overflow")
+ return
+ }
+
+ if exp5 == 0 {
+ // no decimal exponent contribution
+ z.round(0)
+ return
+ }
+ // exp5 != 0
+
+ // apply 5**exp5
+ p := new(Float).SetPrec(z.Prec() + 64) // use more bits for p -- TODO(gri) what is the right number?
+ if exp5 < 0 {
+ z.Quo(z, p.pow5(uint64(-exp5)))
+ } else {
+ z.Mul(z, p.pow5(uint64(exp5)))
+ }
+
+ return
+}
+
+// These powers of 5 fit into a uint64.
+//
+// for p, q := uint64(0), uint64(1); p < q; p, q = q, q*5 {
+// fmt.Println(q)
+// }
+var pow5tab = [...]uint64{
+ 1,
+ 5,
+ 25,
+ 125,
+ 625,
+ 3125,
+ 15625,
+ 78125,
+ 390625,
+ 1953125,
+ 9765625,
+ 48828125,
+ 244140625,
+ 1220703125,
+ 6103515625,
+ 30517578125,
+ 152587890625,
+ 762939453125,
+ 3814697265625,
+ 19073486328125,
+ 95367431640625,
+ 476837158203125,
+ 2384185791015625,
+ 11920928955078125,
+ 59604644775390625,
+ 298023223876953125,
+ 1490116119384765625,
+ 7450580596923828125,
+}
+
+// pow5 sets z to 5**n and returns z.
+// n must not be negative.
+func (z *Float) pow5(n uint64) *Float {
+ const m = uint64(len(pow5tab) - 1)
+ if n <= m {
+ return z.SetUint64(pow5tab[n])
+ }
+ // n > m
+
+ z.SetUint64(pow5tab[m])
+ n -= m
+
+ // use more bits for f than for z
+ // TODO(gri) what is the right number?
+ f := new(Float).SetPrec(z.Prec() + 64).SetUint64(5)
+
+ for n > 0 {
+ if n&1 != 0 {
+ z.Mul(z, f)
+ }
+ f.Mul(f, f)
+ n >>= 1
+ }
+
+ return z
+}
+
+// Parse parses s which must contain a text representation of a floating-
+// point number with a mantissa in the given conversion base (the exponent
+// is always a decimal number), or a string representing an infinite value.
+//
+// For base 0, an underscore character “_” may appear between a base
+// prefix and an adjacent digit, and between successive digits; such
+// underscores do not change the value of the number, or the returned
+// digit count. Incorrect placement of underscores is reported as an
+// error if there are no other errors. If base != 0, underscores are
+// not recognized and thus terminate scanning like any other character
+// that is not a valid radix point or digit.
+//
+// It sets z to the (possibly rounded) value of the corresponding floating-
+// point value, and returns z, the actual base b, and an error err, if any.
+// The entire string (not just a prefix) must be consumed for success.
+// If z's precision is 0, it is changed to 64 before rounding takes effect.
+// The number must be of the form:
+//
+// number = [ sign ] ( float | "inf" | "Inf" ) .
+// sign = "+" | "-" .
+// float = ( mantissa | prefix pmantissa ) [ exponent ] .
+// prefix = "0" [ "b" | "B" | "o" | "O" | "x" | "X" ] .
+// mantissa = digits "." [ digits ] | digits | "." digits .
+// pmantissa = [ "_" ] digits "." [ digits ] | [ "_" ] digits | "." digits .
+// exponent = ( "e" | "E" | "p" | "P" ) [ sign ] digits .
+// digits = digit { [ "_" ] digit } .
+// digit = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
+//
+// The base argument must be 0, 2, 8, 10, or 16. Providing an invalid base
+// argument will lead to a run-time panic.
+//
+// For base 0, the number prefix determines the actual base: A prefix of
+// “0b” or “0B” selects base 2, “0o” or “0O” selects base 8, and
+// “0x” or “0X” selects base 16. Otherwise, the actual base is 10 and
+// no prefix is accepted. The octal prefix "0" is not supported (a leading
+// "0" is simply considered a "0").
+//
+// A "p" or "P" exponent indicates a base 2 (rather then base 10) exponent;
+// for instance, "0x1.fffffffffffffp1023" (using base 0) represents the
+// maximum float64 value. For hexadecimal mantissae, the exponent character
+// must be one of 'p' or 'P', if present (an "e" or "E" exponent indicator
+// cannot be distinguished from a mantissa digit).
+//
+// The returned *Float f is nil and the value of z is valid but not
+// defined if an error is reported.
+func (z *Float) Parse(s string, base int) (f *Float, b int, err error) {
+ // scan doesn't handle ±Inf
+ if len(s) == 3 && (s == "Inf" || s == "inf") {
+ f = z.SetInf(false)
+ return
+ }
+ if len(s) == 4 && (s[0] == '+' || s[0] == '-') && (s[1:] == "Inf" || s[1:] == "inf") {
+ f = z.SetInf(s[0] == '-')
+ return
+ }
+
+ r := strings.NewReader(s)
+ if f, b, err = z.scan(r, base); err != nil {
+ return
+ }
+
+ // entire string must have been consumed
+ if ch, err2 := r.ReadByte(); err2 == nil {
+ err = fmt.Errorf("expected end of string, found %q", ch)
+ } else if err2 != io.EOF {
+ err = err2
+ }
+
+ return
+}
+
+// ParseFloat is like f.Parse(s, base) with f set to the given precision
+// and rounding mode.
+func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {
+ return new(Float).SetPrec(prec).SetMode(mode).Parse(s, base)
+}
+
+var _ fmt.Scanner = (*Float)(nil) // *Float must implement fmt.Scanner
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts formats whose verbs are supported by
+// fmt.Scan for floating point values, which are:
+// 'b' (binary), 'e', 'E', 'f', 'F', 'g' and 'G'.
+// Scan doesn't handle ±Inf.
+func (z *Float) Scan(s fmt.ScanState, ch rune) error {
+ s.SkipSpace()
+ _, _, err := z.scan(byteReader{s}, 0)
+ return err
+}
diff --git a/contrib/go/_std_1.19/src/math/big/floatmarsh.go b/contrib/go/_std_1.19/src/math/big/floatmarsh.go
new file mode 100644
index 0000000000..990e085abe
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/floatmarsh.go
@@ -0,0 +1,127 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements encoding/decoding of Floats.
+
+package big
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+)
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const floatGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+// The Float value and all its attributes (precision,
+// rounding mode, accuracy) are marshaled.
+func (x *Float) GobEncode() ([]byte, error) {
+ if x == nil {
+ return nil, nil
+ }
+
+ // determine max. space (bytes) required for encoding
+ sz := 1 + 1 + 4 // version + mode|acc|form|neg (3+2+2+1bit) + prec
+ n := 0 // number of mantissa words
+ if x.form == finite {
+ // add space for mantissa and exponent
+ n = int((x.prec + (_W - 1)) / _W) // required mantissa length in words for given precision
+ // actual mantissa slice could be shorter (trailing 0's) or longer (unused bits):
+ // - if shorter, only encode the words present
+ // - if longer, cut off unused words when encoding in bytes
+ // (in practice, this should never happen since rounding
+ // takes care of it, but be safe and do it always)
+ if len(x.mant) < n {
+ n = len(x.mant)
+ }
+ // len(x.mant) >= n
+ sz += 4 + n*_S // exp + mant
+ }
+ buf := make([]byte, sz)
+
+ buf[0] = floatGobVersion
+ b := byte(x.mode&7)<<5 | byte((x.acc+1)&3)<<3 | byte(x.form&3)<<1
+ if x.neg {
+ b |= 1
+ }
+ buf[1] = b
+ binary.BigEndian.PutUint32(buf[2:], x.prec)
+
+ if x.form == finite {
+ binary.BigEndian.PutUint32(buf[6:], uint32(x.exp))
+ x.mant[len(x.mant)-n:].bytes(buf[10:]) // cut off unused trailing words
+ }
+
+ return buf, nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+// The result is rounded per the precision and rounding mode of
+// z unless z's precision is 0, in which case z is set exactly
+// to the decoded value.
+func (z *Float) GobDecode(buf []byte) error {
+ if len(buf) == 0 {
+ // Other side sent a nil or default value.
+ *z = Float{}
+ return nil
+ }
+ if len(buf) < 6 {
+ return errors.New("Float.GobDecode: buffer too small")
+ }
+
+ if buf[0] != floatGobVersion {
+ return fmt.Errorf("Float.GobDecode: encoding version %d not supported", buf[0])
+ }
+
+ oldPrec := z.prec
+ oldMode := z.mode
+
+ b := buf[1]
+ z.mode = RoundingMode((b >> 5) & 7)
+ z.acc = Accuracy((b>>3)&3) - 1
+ z.form = form((b >> 1) & 3)
+ z.neg = b&1 != 0
+ z.prec = binary.BigEndian.Uint32(buf[2:])
+
+ if z.form == finite {
+ if len(buf) < 10 {
+ return errors.New("Float.GobDecode: buffer too small for finite form float")
+ }
+ z.exp = int32(binary.BigEndian.Uint32(buf[6:]))
+ z.mant = z.mant.setBytes(buf[10:])
+ }
+
+ if oldPrec != 0 {
+ z.mode = oldMode
+ z.SetPrec(uint(oldPrec))
+ }
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// Only the Float value is marshaled (in full precision), other
+// attributes such as precision or accuracy are ignored.
+func (x *Float) MarshalText() (text []byte, err error) {
+ if x == nil {
+ return []byte("<nil>"), nil
+ }
+ var buf []byte
+ return x.Append(buf, 'g', -1), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The result is rounded per the precision and rounding mode of z.
+// If z's precision is 0, it is changed to 64 before rounding takes
+// effect.
+func (z *Float) UnmarshalText(text []byte) error {
+ // TODO(gri): get rid of the []byte/string conversion
+ _, _, err := z.Parse(string(text), 0)
+ if err != nil {
+ err = fmt.Errorf("math/big: cannot unmarshal %q into a *big.Float (%v)", text, err)
+ }
+ return err
+}
diff --git a/contrib/go/_std_1.18/src/math/big/ftoa.go b/contrib/go/_std_1.19/src/math/big/ftoa.go
index 5506e6e425..5506e6e425 100644
--- a/contrib/go/_std_1.18/src/math/big/ftoa.go
+++ b/contrib/go/_std_1.19/src/math/big/ftoa.go
diff --git a/contrib/go/_std_1.19/src/math/big/int.go b/contrib/go/_std_1.19/src/math/big/int.go
new file mode 100644
index 0000000000..ec168f8ffe
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/int.go
@@ -0,0 +1,1225 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements signed multi-precision integers.
+
+package big
+
+import (
+ "fmt"
+ "io"
+ "math/rand"
+ "strings"
+)
+
+// An Int represents a signed multi-precision integer.
+// The zero value for an Int represents the value 0.
+//
+// Operations always take pointer arguments (*Int) rather
+// than Int values, and each unique Int value requires
+// its own unique *Int pointer. To "copy" an Int value,
+// an existing (or newly allocated) Int must be set to
+// a new value using the Int.Set method; shallow copies
+// of Ints are not supported and may lead to errors.
+type Int struct {
+ neg bool // sign
+ abs nat // absolute value of the integer
+}
+
+var intOne = &Int{false, natOne}
+
+// Sign returns:
+//
+// -1 if x < 0
+// 0 if x == 0
+// +1 if x > 0
+func (x *Int) Sign() int {
+ if len(x.abs) == 0 {
+ return 0
+ }
+ if x.neg {
+ return -1
+ }
+ return 1
+}
+
+// SetInt64 sets z to x and returns z.
+func (z *Int) SetInt64(x int64) *Int {
+ neg := false
+ if x < 0 {
+ neg = true
+ x = -x
+ }
+ z.abs = z.abs.setUint64(uint64(x))
+ z.neg = neg
+ return z
+}
+
+// SetUint64 sets z to x and returns z.
+func (z *Int) SetUint64(x uint64) *Int {
+ z.abs = z.abs.setUint64(x)
+ z.neg = false
+ return z
+}
+
+// NewInt allocates and returns a new Int set to x.
+func NewInt(x int64) *Int {
+ return new(Int).SetInt64(x)
+}
+
+// Set sets z to x and returns z.
+func (z *Int) Set(x *Int) *Int {
+ if z != x {
+ z.abs = z.abs.set(x.abs)
+ z.neg = x.neg
+ }
+ return z
+}
+
+// Bits provides raw (unchecked but fast) access to x by returning its
+// absolute value as a little-endian Word slice. The result and x share
+// the same underlying array.
+// Bits is intended to support implementation of missing low-level Int
+// functionality outside this package; it should be avoided otherwise.
+func (x *Int) Bits() []Word {
+ return x.abs
+}
+
+// SetBits provides raw (unchecked but fast) access to z by setting its
+// value to abs, interpreted as a little-endian Word slice, and returning
+// z. The result and abs share the same underlying array.
+// SetBits is intended to support implementation of missing low-level Int
+// functionality outside this package; it should be avoided otherwise.
+func (z *Int) SetBits(abs []Word) *Int {
+ z.abs = nat(abs).norm()
+ z.neg = false
+ return z
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Int) Abs(x *Int) *Int {
+ z.Set(x)
+ z.neg = false
+ return z
+}
+
+// Neg sets z to -x and returns z.
+func (z *Int) Neg(x *Int) *Int {
+ z.Set(x)
+ z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
+ return z
+}
+
+// Add sets z to the sum x+y and returns z.
+func (z *Int) Add(x, y *Int) *Int {
+ neg := x.neg
+ if x.neg == y.neg {
+ // x + y == x + y
+ // (-x) + (-y) == -(x + y)
+ z.abs = z.abs.add(x.abs, y.abs)
+ } else {
+ // x + (-y) == x - y == -(y - x)
+ // (-x) + y == y - x == -(x - y)
+ if x.abs.cmp(y.abs) >= 0 {
+ z.abs = z.abs.sub(x.abs, y.abs)
+ } else {
+ neg = !neg
+ z.abs = z.abs.sub(y.abs, x.abs)
+ }
+ }
+ z.neg = len(z.abs) > 0 && neg // 0 has no sign
+ return z
+}
+
+// Sub sets z to the difference x-y and returns z.
+func (z *Int) Sub(x, y *Int) *Int {
+ neg := x.neg
+ if x.neg != y.neg {
+ // x - (-y) == x + y
+ // (-x) - y == -(x + y)
+ z.abs = z.abs.add(x.abs, y.abs)
+ } else {
+ // x - y == x - y == -(y - x)
+ // (-x) - (-y) == y - x == -(x - y)
+ if x.abs.cmp(y.abs) >= 0 {
+ z.abs = z.abs.sub(x.abs, y.abs)
+ } else {
+ neg = !neg
+ z.abs = z.abs.sub(y.abs, x.abs)
+ }
+ }
+ z.neg = len(z.abs) > 0 && neg // 0 has no sign
+ return z
+}
+
+// Mul sets z to the product x*y and returns z.
+func (z *Int) Mul(x, y *Int) *Int {
+ // x * y == x * y
+ // x * (-y) == -(x * y)
+ // (-x) * y == -(x * y)
+ // (-x) * (-y) == x * y
+ if x == y {
+ z.abs = z.abs.sqr(x.abs)
+ z.neg = false
+ return z
+ }
+ z.abs = z.abs.mul(x.abs, y.abs)
+ z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
+ return z
+}
+
+// MulRange sets z to the product of all integers
+// in the range [a, b] inclusively and returns z.
+// If a > b (empty range), the result is 1.
+func (z *Int) MulRange(a, b int64) *Int {
+ switch {
+ case a > b:
+ return z.SetInt64(1) // empty range
+ case a <= 0 && b >= 0:
+ return z.SetInt64(0) // range includes 0
+ }
+ // a <= b && (b < 0 || a > 0)
+
+ neg := false
+ if a < 0 {
+ neg = (b-a)&1 == 0
+ a, b = -b, -a
+ }
+
+ z.abs = z.abs.mulRange(uint64(a), uint64(b))
+ z.neg = neg
+ return z
+}
+
+// Binomial sets z to the binomial coefficient of (n, k) and returns z.
+func (z *Int) Binomial(n, k int64) *Int {
+ // reduce the number of multiplications by reducing k
+ if n/2 < k && k <= n {
+ k = n - k // Binomial(n, k) == Binomial(n, n-k)
+ }
+ var a, b Int
+ a.MulRange(n-k+1, n)
+ b.MulRange(1, k)
+ return z.Quo(&a, &b)
+}
+
+// Quo sets z to the quotient x/y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Quo implements truncated division (like Go); see QuoRem for more details.
+func (z *Int) Quo(x, y *Int) *Int {
+ z.abs, _ = z.abs.div(nil, x.abs, y.abs)
+ z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
+ return z
+}
+
+// Rem sets z to the remainder x%y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Rem implements truncated modulus (like Go); see QuoRem for more details.
+func (z *Int) Rem(x, y *Int) *Int {
+ _, z.abs = nat(nil).div(z.abs, x.abs, y.abs)
+ z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
+ return z
+}
+
+// QuoRem sets z to the quotient x/y and r to the remainder x%y
+// and returns the pair (z, r) for y != 0.
+// If y == 0, a division-by-zero run-time panic occurs.
+//
+// QuoRem implements T-division and modulus (like Go):
+//
+// q = x/y with the result truncated to zero
+// r = x - y*q
+//
+// (See Daan Leijen, “Division and Modulus for Computer Scientists”.)
+// See DivMod for Euclidean division and modulus (unlike Go).
+func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
+ z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
+ z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
+ return z, r
+}
+
+// Div sets z to the quotient x/y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Div implements Euclidean division (unlike Go); see DivMod for more details.
+func (z *Int) Div(x, y *Int) *Int {
+ y_neg := y.neg // z may be an alias for y
+ var r Int
+ z.QuoRem(x, y, &r)
+ if r.neg {
+ if y_neg {
+ z.Add(z, intOne)
+ } else {
+ z.Sub(z, intOne)
+ }
+ }
+ return z
+}
+
+// Mod sets z to the modulus x%y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
+func (z *Int) Mod(x, y *Int) *Int {
+ y0 := y // save y
+ if z == y || alias(z.abs, y.abs) {
+ y0 = new(Int).Set(y)
+ }
+ var q Int
+ q.QuoRem(x, y, z)
+ if z.neg {
+ if y0.neg {
+ z.Sub(z, y0)
+ } else {
+ z.Add(z, y0)
+ }
+ }
+ return z
+}
+
+// DivMod sets z to the quotient x div y and m to the modulus x mod y
+// and returns the pair (z, m) for y != 0.
+// If y == 0, a division-by-zero run-time panic occurs.
+//
+// DivMod implements Euclidean division and modulus (unlike Go):
+//
+// q = x div y such that
+// m = x - y*q with 0 <= m < |y|
+//
+// (See Raymond T. Boute, “The Euclidean definition of the functions
+// div and mod”. ACM Transactions on Programming Languages and
+// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
+// ACM press.)
+// See QuoRem for T-division and modulus (like Go).
+func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
+ y0 := y // save y
+ if z == y || alias(z.abs, y.abs) {
+ y0 = new(Int).Set(y)
+ }
+ z.QuoRem(x, y, m)
+ if m.neg {
+ if y0.neg {
+ z.Add(z, intOne)
+ m.Sub(m, y0)
+ } else {
+ z.Sub(z, intOne)
+ m.Add(m, y0)
+ }
+ }
+ return z, m
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+func (x *Int) Cmp(y *Int) (r int) {
+ // x cmp y == x cmp y
+ // x cmp (-y) == x
+ // (-x) cmp y == y
+ // (-x) cmp (-y) == -(x cmp y)
+ switch {
+ case x == y:
+ // nothing to do
+ case x.neg == y.neg:
+ r = x.abs.cmp(y.abs)
+ if x.neg {
+ r = -r
+ }
+ case x.neg:
+ r = -1
+ default:
+ r = 1
+ }
+ return
+}
+
+// CmpAbs compares the absolute values of x and y and returns:
+//
+// -1 if |x| < |y|
+// 0 if |x| == |y|
+// +1 if |x| > |y|
+func (x *Int) CmpAbs(y *Int) int {
+ return x.abs.cmp(y.abs)
+}
+
+// low32 returns the least significant 32 bits of x.
+func low32(x nat) uint32 {
+ if len(x) == 0 {
+ return 0
+ }
+ return uint32(x[0])
+}
+
+// low64 returns the least significant 64 bits of x.
+func low64(x nat) uint64 {
+ if len(x) == 0 {
+ return 0
+ }
+ v := uint64(x[0])
+ if _W == 32 && len(x) > 1 {
+ return uint64(x[1])<<32 | v
+ }
+ return v
+}
+
+// Int64 returns the int64 representation of x.
+// If x cannot be represented in an int64, the result is undefined.
+func (x *Int) Int64() int64 {
+ v := int64(low64(x.abs))
+ if x.neg {
+ v = -v
+ }
+ return v
+}
+
+// Uint64 returns the uint64 representation of x.
+// If x cannot be represented in a uint64, the result is undefined.
+func (x *Int) Uint64() uint64 {
+ return low64(x.abs)
+}
+
+// IsInt64 reports whether x can be represented as an int64.
+func (x *Int) IsInt64() bool {
+ if len(x.abs) <= 64/_W {
+ w := int64(low64(x.abs))
+ return w >= 0 || x.neg && w == -w
+ }
+ return false
+}
+
+// IsUint64 reports whether x can be represented as a uint64.
+func (x *Int) IsUint64() bool {
+ return !x.neg && len(x.abs) <= 64/_W
+}
+
+// SetString sets z to the value of s, interpreted in the given base,
+// and returns z and a boolean indicating success. The entire string
+// (not just a prefix) must be valid for success. If SetString fails,
+// the value of z is undefined but the returned value is nil.
+//
+// The base argument must be 0 or a value between 2 and MaxBase.
+// For base 0, the number prefix determines the actual base: A prefix of
+// “0b” or “0B” selects base 2, “0”, “0o” or “0O” selects base 8,
+// and “0x” or “0X” selects base 16. Otherwise, the selected base is 10
+// and no prefix is accepted.
+//
+// For bases <= 36, lower and upper case letters are considered the same:
+// The letters 'a' to 'z' and 'A' to 'Z' represent digit values 10 to 35.
+// For bases > 36, the upper case letters 'A' to 'Z' represent the digit
+// values 36 to 61.
+//
+// For base 0, an underscore character “_” may appear between a base
+// prefix and an adjacent digit, and between successive digits; such
+// underscores do not change the value of the number.
+// Incorrect placement of underscores is reported as an error if there
+// are no other errors. If base != 0, underscores are not recognized
+// and act like any other character that is not a valid digit.
+func (z *Int) SetString(s string, base int) (*Int, bool) {
+ return z.setFromScanner(strings.NewReader(s), base)
+}
+
+// setFromScanner implements SetString given an io.ByteScanner.
+// For documentation see comments of SetString.
+func (z *Int) setFromScanner(r io.ByteScanner, base int) (*Int, bool) {
+ if _, _, err := z.scan(r, base); err != nil {
+ return nil, false
+ }
+ // entire content must have been consumed
+ if _, err := r.ReadByte(); err != io.EOF {
+ return nil, false
+ }
+ return z, true // err == io.EOF => scan consumed all content of r
+}
+
+// SetBytes interprets buf as the bytes of a big-endian unsigned
+// integer, sets z to that value, and returns z.
+func (z *Int) SetBytes(buf []byte) *Int {
+ z.abs = z.abs.setBytes(buf)
+ z.neg = false
+ return z
+}
+
+// Bytes returns the absolute value of x as a big-endian byte slice.
+//
+// To use a fixed length slice, or a preallocated one, use FillBytes.
+func (x *Int) Bytes() []byte {
+ buf := make([]byte, len(x.abs)*_S)
+ return buf[x.abs.bytes(buf):]
+}
+
+// FillBytes sets buf to the absolute value of x, storing it as a zero-extended
+// big-endian byte slice, and returns buf.
+//
+// If the absolute value of x doesn't fit in buf, FillBytes will panic.
+func (x *Int) FillBytes(buf []byte) []byte {
+ // Clear whole buffer. (This gets optimized into a memclr.)
+ for i := range buf {
+ buf[i] = 0
+ }
+ x.abs.bytes(buf)
+ return buf
+}
+
+// BitLen returns the length of the absolute value of x in bits.
+// The bit length of 0 is 0.
+func (x *Int) BitLen() int {
+ return x.abs.bitLen()
+}
+
+// TrailingZeroBits returns the number of consecutive least significant zero
+// bits of |x|.
+func (x *Int) TrailingZeroBits() uint {
+ return x.abs.trailingZeroBits()
+}
+
+// Exp sets z = x**y mod |m| (i.e. the sign of m is ignored), and returns z.
+// If m == nil or m == 0, z = x**y unless y <= 0 then z = 1. If m != 0, y < 0,
+// and x and m are not relatively prime, z is unchanged and nil is returned.
+//
+// Modular exponentiation of inputs of a particular size is not a
+// cryptographically constant-time operation.
+func (z *Int) Exp(x, y, m *Int) *Int {
+ // See Knuth, volume 2, section 4.6.3.
+ xWords := x.abs
+ if y.neg {
+ if m == nil || len(m.abs) == 0 {
+ return z.SetInt64(1)
+ }
+ // for y < 0: x**y mod m == (x**(-1))**|y| mod m
+ inverse := new(Int).ModInverse(x, m)
+ if inverse == nil {
+ return nil
+ }
+ xWords = inverse.abs
+ }
+ yWords := y.abs
+
+ var mWords nat
+ if m != nil {
+ if z == m || alias(z.abs, m.abs) {
+ m = new(Int).Set(m)
+ }
+ mWords = m.abs // m.abs may be nil for m == 0
+ }
+
+ z.abs = z.abs.expNN(xWords, yWords, mWords)
+ z.neg = len(z.abs) > 0 && x.neg && len(yWords) > 0 && yWords[0]&1 == 1 // 0 has no sign
+ if z.neg && len(mWords) > 0 {
+ // make modulus result positive
+ z.abs = z.abs.sub(mWords, z.abs) // z == x**y mod |m| && 0 <= z < |m|
+ z.neg = false
+ }
+
+ return z
+}
+
+// GCD sets z to the greatest common divisor of a and b and returns z.
+// If x or y are not nil, GCD sets their value such that z = a*x + b*y.
+//
+// a and b may be positive, zero or negative. (Before Go 1.14 both had
+// to be > 0.) Regardless of the signs of a and b, z is always >= 0.
+//
+// If a == b == 0, GCD sets z = x = y = 0.
+//
+// If a == 0 and b != 0, GCD sets z = |b|, x = 0, y = sign(b) * 1.
+//
+// If a != 0 and b == 0, GCD sets z = |a|, x = sign(a) * 1, y = 0.
+func (z *Int) GCD(x, y, a, b *Int) *Int {
+ if len(a.abs) == 0 || len(b.abs) == 0 {
+ lenA, lenB, negA, negB := len(a.abs), len(b.abs), a.neg, b.neg
+ if lenA == 0 {
+ z.Set(b)
+ } else {
+ z.Set(a)
+ }
+ z.neg = false
+ if x != nil {
+ if lenA == 0 {
+ x.SetUint64(0)
+ } else {
+ x.SetUint64(1)
+ x.neg = negA
+ }
+ }
+ if y != nil {
+ if lenB == 0 {
+ y.SetUint64(0)
+ } else {
+ y.SetUint64(1)
+ y.neg = negB
+ }
+ }
+ return z
+ }
+
+ return z.lehmerGCD(x, y, a, b)
+}
+
+// lehmerSimulate attempts to simulate several Euclidean update steps
+// using the leading digits of A and B. It returns u0, u1, v0, v1
+// such that A and B can be updated as:
+//
+// A = u0*A + v0*B
+// B = u1*A + v1*B
+//
+// Requirements: A >= B and len(B.abs) >= 2
+// Since we are calculating with full words to avoid overflow,
+// we use 'even' to track the sign of the cosequences.
+// For even iterations: u0, v1 >= 0 && u1, v0 <= 0
+// For odd iterations: u0, v1 <= 0 && u1, v0 >= 0
+func lehmerSimulate(A, B *Int) (u0, u1, v0, v1 Word, even bool) {
+ // initialize the digits
+ var a1, a2, u2, v2 Word
+
+ m := len(B.abs) // m >= 2
+ n := len(A.abs) // n >= m >= 2
+
+ // extract the top Word of bits from A and B
+ h := nlz(A.abs[n-1])
+ a1 = A.abs[n-1]<<h | A.abs[n-2]>>(_W-h)
+ // B may have implicit zero words in the high bits if the lengths differ
+ switch {
+ case n == m:
+ a2 = B.abs[n-1]<<h | B.abs[n-2]>>(_W-h)
+ case n == m+1:
+ a2 = B.abs[n-2] >> (_W - h)
+ default:
+ a2 = 0
+ }
+
+ // Since we are calculating with full words to avoid overflow,
+ // we use 'even' to track the sign of the cosequences.
+ // For even iterations: u0, v1 >= 0 && u1, v0 <= 0
+ // For odd iterations: u0, v1 <= 0 && u1, v0 >= 0
+ // The first iteration starts with k=1 (odd).
+ even = false
+ // variables to track the cosequences
+ u0, u1, u2 = 0, 1, 0
+ v0, v1, v2 = 0, 0, 1
+
+ // Calculate the quotient and cosequences using Collins' stopping condition.
+ // Note that overflow of a Word is not possible when computing the remainder
+ // sequence and cosequences since the cosequence size is bounded by the input size.
+ // See section 4.2 of Jebelean for details.
+ for a2 >= v2 && a1-a2 >= v1+v2 {
+ q, r := a1/a2, a1%a2
+ a1, a2 = a2, r
+ u0, u1, u2 = u1, u2, u1+q*u2
+ v0, v1, v2 = v1, v2, v1+q*v2
+ even = !even
+ }
+ return
+}
+
+// lehmerUpdate updates the inputs A and B such that:
+//
+// A = u0*A + v0*B
+// B = u1*A + v1*B
+//
+// where the signs of u0, u1, v0, v1 are given by even
+// For even == true: u0, v1 >= 0 && u1, v0 <= 0
+// For even == false: u0, v1 <= 0 && u1, v0 >= 0
+// q, r, s, t are temporary variables to avoid allocations in the multiplication
+func lehmerUpdate(A, B, q, r, s, t *Int, u0, u1, v0, v1 Word, even bool) {
+
+ t.abs = t.abs.setWord(u0)
+ s.abs = s.abs.setWord(v0)
+ t.neg = !even
+ s.neg = even
+
+ t.Mul(A, t)
+ s.Mul(B, s)
+
+ r.abs = r.abs.setWord(u1)
+ q.abs = q.abs.setWord(v1)
+ r.neg = even
+ q.neg = !even
+
+ r.Mul(A, r)
+ q.Mul(B, q)
+
+ A.Add(t, s)
+ B.Add(r, q)
+}
+
+// euclidUpdate performs a single step of the Euclidean GCD algorithm
+// if extended is true, it also updates the cosequence Ua, Ub
+func euclidUpdate(A, B, Ua, Ub, q, r, s, t *Int, extended bool) {
+ q, r = q.QuoRem(A, B, r)
+
+ *A, *B, *r = *B, *r, *A
+
+ if extended {
+ // Ua, Ub = Ub, Ua - q*Ub
+ t.Set(Ub)
+ s.Mul(Ub, q)
+ Ub.Sub(Ua, s)
+ Ua.Set(t)
+ }
+}
+
+// lehmerGCD sets z to the greatest common divisor of a and b,
+// which both must be != 0, and returns z.
+// If x or y are not nil, their values are set such that z = a*x + b*y.
+// See Knuth, The Art of Computer Programming, Vol. 2, Section 4.5.2, Algorithm L.
+// This implementation uses the improved condition by Collins requiring only one
+// quotient and avoiding the possibility of single Word overflow.
+// See Jebelean, "Improving the multiprecision Euclidean algorithm",
+// Design and Implementation of Symbolic Computation Systems, pp 45-58.
+// The cosequences are updated according to Algorithm 10.45 from
+// Cohen et al. "Handbook of Elliptic and Hyperelliptic Curve Cryptography" pp 192.
+func (z *Int) lehmerGCD(x, y, a, b *Int) *Int {
+ var A, B, Ua, Ub *Int
+
+ A = new(Int).Abs(a)
+ B = new(Int).Abs(b)
+
+ extended := x != nil || y != nil
+
+ if extended {
+ // Ua (Ub) tracks how many times input a has been accumulated into A (B).
+ Ua = new(Int).SetInt64(1)
+ Ub = new(Int)
+ }
+
+ // temp variables for multiprecision update
+ q := new(Int)
+ r := new(Int)
+ s := new(Int)
+ t := new(Int)
+
+ // ensure A >= B
+ if A.abs.cmp(B.abs) < 0 {
+ A, B = B, A
+ Ub, Ua = Ua, Ub
+ }
+
+ // loop invariant A >= B
+ for len(B.abs) > 1 {
+ // Attempt to calculate in single-precision using leading words of A and B.
+ u0, u1, v0, v1, even := lehmerSimulate(A, B)
+
+ // multiprecision Step
+ if v0 != 0 {
+ // Simulate the effect of the single-precision steps using the cosequences.
+ // A = u0*A + v0*B
+ // B = u1*A + v1*B
+ lehmerUpdate(A, B, q, r, s, t, u0, u1, v0, v1, even)
+
+ if extended {
+ // Ua = u0*Ua + v0*Ub
+ // Ub = u1*Ua + v1*Ub
+ lehmerUpdate(Ua, Ub, q, r, s, t, u0, u1, v0, v1, even)
+ }
+
+ } else {
+ // Single-digit calculations failed to simulate any quotients.
+ // Do a standard Euclidean step.
+ euclidUpdate(A, B, Ua, Ub, q, r, s, t, extended)
+ }
+ }
+
+ if len(B.abs) > 0 {
+ // extended Euclidean algorithm base case if B is a single Word
+ if len(A.abs) > 1 {
+ // A is longer than a single Word, so one update is needed.
+ euclidUpdate(A, B, Ua, Ub, q, r, s, t, extended)
+ }
+ if len(B.abs) > 0 {
+ // A and B are both a single Word.
+ aWord, bWord := A.abs[0], B.abs[0]
+ if extended {
+ var ua, ub, va, vb Word
+ ua, ub = 1, 0
+ va, vb = 0, 1
+ even := true
+ for bWord != 0 {
+ q, r := aWord/bWord, aWord%bWord
+ aWord, bWord = bWord, r
+ ua, ub = ub, ua+q*ub
+ va, vb = vb, va+q*vb
+ even = !even
+ }
+
+ t.abs = t.abs.setWord(ua)
+ s.abs = s.abs.setWord(va)
+ t.neg = !even
+ s.neg = even
+
+ t.Mul(Ua, t)
+ s.Mul(Ub, s)
+
+ Ua.Add(t, s)
+ } else {
+ for bWord != 0 {
+ aWord, bWord = bWord, aWord%bWord
+ }
+ }
+ A.abs[0] = aWord
+ }
+ }
+ negA := a.neg
+ if y != nil {
+ // avoid aliasing b needed in the division below
+ if y == b {
+ B.Set(b)
+ } else {
+ B = b
+ }
+ // y = (z - a*x)/b
+ y.Mul(a, Ua) // y can safely alias a
+ if negA {
+ y.neg = !y.neg
+ }
+ y.Sub(A, y)
+ y.Div(y, B)
+ }
+
+ if x != nil {
+ *x = *Ua
+ if negA {
+ x.neg = !x.neg
+ }
+ }
+
+ *z = *A
+
+ return z
+}
+
+// Rand sets z to a pseudo-random number in [0, n) and returns z.
+//
+// As this uses the math/rand package, it must not be used for
+// security-sensitive work. Use crypto/rand.Int instead.
+func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
+ // z.neg is not modified before the if check, because z and n might alias.
+ if n.neg || len(n.abs) == 0 {
+ z.neg = false
+ z.abs = nil
+ return z
+ }
+ z.neg = false
+ z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
+ return z
+}
+
+// ModInverse sets z to the multiplicative inverse of g in the ring ℤ/nℤ
+// and returns z. If g and n are not relatively prime, g has no multiplicative
+// inverse in the ring ℤ/nℤ. In this case, z is unchanged and the return value
+// is nil. If n == 0, a division-by-zero run-time panic occurs.
+func (z *Int) ModInverse(g, n *Int) *Int {
+ // GCD expects parameters a and b to be > 0.
+ if n.neg {
+ var n2 Int
+ n = n2.Neg(n)
+ }
+ if g.neg {
+ var g2 Int
+ g = g2.Mod(g, n)
+ }
+ var d, x Int
+ d.GCD(&x, nil, g, n)
+
+ // if and only if d==1, g and n are relatively prime
+ if d.Cmp(intOne) != 0 {
+ return nil
+ }
+
+ // x and y are such that g*x + n*y = 1, therefore x is the inverse element,
+ // but it may be negative, so convert to the range 0 <= z < |n|
+ if x.neg {
+ z.Add(&x, n)
+ } else {
+ z.Set(&x)
+ }
+ return z
+}
+
+// Jacobi returns the Jacobi symbol (x/y), either +1, -1, or 0.
+// The y argument must be an odd integer.
+func Jacobi(x, y *Int) int {
+ if len(y.abs) == 0 || y.abs[0]&1 == 0 {
+ panic(fmt.Sprintf("big: invalid 2nd argument to Int.Jacobi: need odd integer but got %s", y.String()))
+ }
+
+ // We use the formulation described in chapter 2, section 2.4,
+ // "The Yacas Book of Algorithms":
+ // http://yacas.sourceforge.net/Algo.book.pdf
+
+ var a, b, c Int
+ a.Set(x)
+ b.Set(y)
+ j := 1
+
+ if b.neg {
+ if a.neg {
+ j = -1
+ }
+ b.neg = false
+ }
+
+ for {
+ if b.Cmp(intOne) == 0 {
+ return j
+ }
+ if len(a.abs) == 0 {
+ return 0
+ }
+ a.Mod(&a, &b)
+ if len(a.abs) == 0 {
+ return 0
+ }
+ // a > 0
+
+ // handle factors of 2 in 'a'
+ s := a.abs.trailingZeroBits()
+ if s&1 != 0 {
+ bmod8 := b.abs[0] & 7
+ if bmod8 == 3 || bmod8 == 5 {
+ j = -j
+ }
+ }
+ c.Rsh(&a, s) // a = 2^s*c
+
+ // swap numerator and denominator
+ if b.abs[0]&3 == 3 && c.abs[0]&3 == 3 {
+ j = -j
+ }
+ a.Set(&b)
+ b.Set(&c)
+ }
+}
+
+// modSqrt3Mod4 uses the identity
+//
+// (a^((p+1)/4))^2 mod p
+// == u^(p+1) mod p
+// == u^2 mod p
+//
+// to calculate the square root of any quadratic residue mod p quickly for 3
+// mod 4 primes.
+func (z *Int) modSqrt3Mod4Prime(x, p *Int) *Int {
+ e := new(Int).Add(p, intOne) // e = p + 1
+ e.Rsh(e, 2) // e = (p + 1) / 4
+ z.Exp(x, e, p) // z = x^e mod p
+ return z
+}
+
+// modSqrt5Mod8 uses Atkin's observation that 2 is not a square mod p
+//
+// alpha == (2*a)^((p-5)/8) mod p
+// beta == 2*a*alpha^2 mod p is a square root of -1
+// b == a*alpha*(beta-1) mod p is a square root of a
+//
+// to calculate the square root of any quadratic residue mod p quickly for 5
+// mod 8 primes.
+func (z *Int) modSqrt5Mod8Prime(x, p *Int) *Int {
+ // p == 5 mod 8 implies p = e*8 + 5
+ // e is the quotient and 5 the remainder on division by 8
+ e := new(Int).Rsh(p, 3) // e = (p - 5) / 8
+ tx := new(Int).Lsh(x, 1) // tx = 2*x
+ alpha := new(Int).Exp(tx, e, p)
+ beta := new(Int).Mul(alpha, alpha)
+ beta.Mod(beta, p)
+ beta.Mul(beta, tx)
+ beta.Mod(beta, p)
+ beta.Sub(beta, intOne)
+ beta.Mul(beta, x)
+ beta.Mod(beta, p)
+ beta.Mul(beta, alpha)
+ z.Mod(beta, p)
+ return z
+}
+
+// modSqrtTonelliShanks uses the Tonelli-Shanks algorithm to find the square
+// root of a quadratic residue modulo any prime.
+func (z *Int) modSqrtTonelliShanks(x, p *Int) *Int {
+ // Break p-1 into s*2^e such that s is odd.
+ var s Int
+ s.Sub(p, intOne)
+ e := s.abs.trailingZeroBits()
+ s.Rsh(&s, e)
+
+ // find some non-square n
+ var n Int
+ n.SetInt64(2)
+ for Jacobi(&n, p) != -1 {
+ n.Add(&n, intOne)
+ }
+
+ // Core of the Tonelli-Shanks algorithm. Follows the description in
+ // section 6 of "Square roots from 1; 24, 51, 10 to Dan Shanks" by Ezra
+ // Brown:
+ // https://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020786.02p0470a.pdf
+ var y, b, g, t Int
+ y.Add(&s, intOne)
+ y.Rsh(&y, 1)
+ y.Exp(x, &y, p) // y = x^((s+1)/2)
+ b.Exp(x, &s, p) // b = x^s
+ g.Exp(&n, &s, p) // g = n^s
+ r := e
+ for {
+ // find the least m such that ord_p(b) = 2^m
+ var m uint
+ t.Set(&b)
+ for t.Cmp(intOne) != 0 {
+ t.Mul(&t, &t).Mod(&t, p)
+ m++
+ }
+
+ if m == 0 {
+ return z.Set(&y)
+ }
+
+ t.SetInt64(0).SetBit(&t, int(r-m-1), 1).Exp(&g, &t, p)
+ // t = g^(2^(r-m-1)) mod p
+ g.Mul(&t, &t).Mod(&g, p) // g = g^(2^(r-m)) mod p
+ y.Mul(&y, &t).Mod(&y, p)
+ b.Mul(&b, &g).Mod(&b, p)
+ r = m
+ }
+}
+
+// ModSqrt sets z to a square root of x mod p if such a square root exists, and
+// returns z. The modulus p must be an odd prime. If x is not a square mod p,
+// ModSqrt leaves z unchanged and returns nil. This function panics if p is
+// not an odd integer, its behavior is undefined if p is odd but not prime.
+func (z *Int) ModSqrt(x, p *Int) *Int {
+ switch Jacobi(x, p) {
+ case -1:
+ return nil // x is not a square mod p
+ case 0:
+ return z.SetInt64(0) // sqrt(0) mod p = 0
+ case 1:
+ break
+ }
+ if x.neg || x.Cmp(p) >= 0 { // ensure 0 <= x < p
+ x = new(Int).Mod(x, p)
+ }
+
+ switch {
+ case p.abs[0]%4 == 3:
+ // Check whether p is 3 mod 4, and if so, use the faster algorithm.
+ return z.modSqrt3Mod4Prime(x, p)
+ case p.abs[0]%8 == 5:
+ // Check whether p is 5 mod 8, use Atkin's algorithm.
+ return z.modSqrt5Mod8Prime(x, p)
+ default:
+ // Otherwise, use Tonelli-Shanks.
+ return z.modSqrtTonelliShanks(x, p)
+ }
+}
+
+// Lsh sets z = x << n and returns z.
+func (z *Int) Lsh(x *Int, n uint) *Int {
+ z.abs = z.abs.shl(x.abs, n)
+ z.neg = x.neg
+ return z
+}
+
+// Rsh sets z = x >> n and returns z.
+func (z *Int) Rsh(x *Int, n uint) *Int {
+ if x.neg {
+ // (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
+ t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
+ t = t.shr(t, n)
+ z.abs = t.add(t, natOne)
+ z.neg = true // z cannot be zero if x is negative
+ return z
+ }
+
+ z.abs = z.abs.shr(x.abs, n)
+ z.neg = false
+ return z
+}
+
+// Bit returns the value of the i'th bit of x. That is, it
+// returns (x>>i)&1. The bit index i must be >= 0.
+func (x *Int) Bit(i int) uint {
+ if i == 0 {
+ // optimization for common case: odd/even test of x
+ if len(x.abs) > 0 {
+ return uint(x.abs[0] & 1) // bit 0 is same for -x
+ }
+ return 0
+ }
+ if i < 0 {
+ panic("negative bit index")
+ }
+ if x.neg {
+ t := nat(nil).sub(x.abs, natOne)
+ return t.bit(uint(i)) ^ 1
+ }
+
+ return x.abs.bit(uint(i))
+}
+
+// SetBit sets z to x, with x's i'th bit set to b (0 or 1).
+// That is, if b is 1 SetBit sets z = x | (1 << i);
+// if b is 0 SetBit sets z = x &^ (1 << i). If b is not 0 or 1,
+// SetBit will panic.
+func (z *Int) SetBit(x *Int, i int, b uint) *Int {
+ if i < 0 {
+ panic("negative bit index")
+ }
+ if x.neg {
+ t := z.abs.sub(x.abs, natOne)
+ t = t.setBit(t, uint(i), b^1)
+ z.abs = t.add(t, natOne)
+ z.neg = len(z.abs) > 0
+ return z
+ }
+ z.abs = z.abs.setBit(x.abs, uint(i), b)
+ z.neg = false
+ return z
+}
+
+// And sets z = x & y and returns z.
+func (z *Int) And(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
+ x1 := nat(nil).sub(x.abs, natOne)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
+ z.neg = true // z cannot be zero if x and y are negative
+ return z
+ }
+
+ // x & y == x & y
+ z.abs = z.abs.and(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ // x.neg != y.neg
+ if x.neg {
+ x, y = y, x // & is symmetric
+ }
+
+ // x & (-y) == x & ^(y-1) == x &^ (y-1)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.andNot(x.abs, y1)
+ z.neg = false
+ return z
+}
+
+// AndNot sets z = x &^ y and returns z.
+func (z *Int) AndNot(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
+ x1 := nat(nil).sub(x.abs, natOne)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.andNot(y1, x1)
+ z.neg = false
+ return z
+ }
+
+ // x &^ y == x &^ y
+ z.abs = z.abs.andNot(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ if x.neg {
+ // (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
+ x1 := nat(nil).sub(x.abs, natOne)
+ z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
+ z.neg = true // z cannot be zero if x is negative and y is positive
+ return z
+ }
+
+ // x &^ (-y) == x &^ ^(y-1) == x & (y-1)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.and(x.abs, y1)
+ z.neg = false
+ return z
+}
+
+// Or sets z = x | y and returns z.
+func (z *Int) Or(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
+ x1 := nat(nil).sub(x.abs, natOne)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
+ z.neg = true // z cannot be zero if x and y are negative
+ return z
+ }
+
+ // x | y == x | y
+ z.abs = z.abs.or(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ // x.neg != y.neg
+ if x.neg {
+ x, y = y, x // | is symmetric
+ }
+
+ // x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
+ z.neg = true // z cannot be zero if one of x or y is negative
+ return z
+}
+
+// Xor sets z = x ^ y and returns z.
+func (z *Int) Xor(x, y *Int) *Int {
+ if x.neg == y.neg {
+ if x.neg {
+ // (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
+ x1 := nat(nil).sub(x.abs, natOne)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.xor(x1, y1)
+ z.neg = false
+ return z
+ }
+
+ // x ^ y == x ^ y
+ z.abs = z.abs.xor(x.abs, y.abs)
+ z.neg = false
+ return z
+ }
+
+ // x.neg != y.neg
+ if x.neg {
+ x, y = y, x // ^ is symmetric
+ }
+
+ // x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+ y1 := nat(nil).sub(y.abs, natOne)
+ z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
+ z.neg = true // z cannot be zero if only one of x or y is negative
+ return z
+}
+
+// Not sets z = ^x and returns z.
+func (z *Int) Not(x *Int) *Int {
+ if x.neg {
+ // ^(-x) == ^(^(x-1)) == x-1
+ z.abs = z.abs.sub(x.abs, natOne)
+ z.neg = false
+ return z
+ }
+
+ // ^x == -x-1 == -(x+1)
+ z.abs = z.abs.add(x.abs, natOne)
+ z.neg = true // z cannot be zero if x is positive
+ return z
+}
+
+// Sqrt sets z to ⌊√x⌋, the largest integer such that z² ≤ x, and returns z.
+// It panics if x is negative.
+func (z *Int) Sqrt(x *Int) *Int {
+ if x.neg {
+ panic("square root of negative number")
+ }
+ z.neg = false
+ z.abs = z.abs.sqrt(x.abs)
+ return z
+}
diff --git a/contrib/go/_std_1.19/src/math/big/intconv.go b/contrib/go/_std_1.19/src/math/big/intconv.go
new file mode 100644
index 0000000000..a3a4023caa
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/intconv.go
@@ -0,0 +1,255 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements int-to-string conversion functions.
+
+package big
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Text returns the string representation of x in the given base.
+// Base must be between 2 and 62, inclusive. The result uses the
+// lower-case letters 'a' to 'z' for digit values 10 to 35, and
+// the upper-case letters 'A' to 'Z' for digit values 36 to 61.
+// No prefix (such as "0x") is added to the string. If x is a nil
+// pointer it returns "<nil>".
+func (x *Int) Text(base int) string {
+ if x == nil {
+ return "<nil>"
+ }
+ return string(x.abs.itoa(x.neg, base))
+}
+
+// Append appends the string representation of x, as generated by
+// x.Text(base), to buf and returns the extended buffer.
+func (x *Int) Append(buf []byte, base int) []byte {
+ if x == nil {
+ return append(buf, "<nil>"...)
+ }
+ return append(buf, x.abs.itoa(x.neg, base)...)
+}
+
+// String returns the decimal representation of x as generated by
+// x.Text(10).
+func (x *Int) String() string {
+ return x.Text(10)
+}
+
+// write count copies of text to s
+func writeMultiple(s fmt.State, text string, count int) {
+ if len(text) > 0 {
+ b := []byte(text)
+ for ; count > 0; count-- {
+ s.Write(b)
+ }
+ }
+}
+
+var _ fmt.Formatter = intOne // *Int must implement fmt.Formatter
+
+// Format implements fmt.Formatter. It accepts the formats
+// 'b' (binary), 'o' (octal with 0 prefix), 'O' (octal with 0o prefix),
+// 'd' (decimal), 'x' (lowercase hexadecimal), and
+// 'X' (uppercase hexadecimal).
+// Also supported are the full suite of package fmt's format
+// flags for integral types, including '+' and ' ' for sign
+// control, '#' for leading zero in octal and for hexadecimal,
+// a leading "0x" or "0X" for "%#x" and "%#X" respectively,
+// specification of minimum digits precision, output field
+// width, space or zero padding, and '-' for left or right
+// justification.
+func (x *Int) Format(s fmt.State, ch rune) {
+ // determine base
+ var base int
+ switch ch {
+ case 'b':
+ base = 2
+ case 'o', 'O':
+ base = 8
+ case 'd', 's', 'v':
+ base = 10
+ case 'x', 'X':
+ base = 16
+ default:
+ // unknown format
+ fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
+ return
+ }
+
+ if x == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ // determine sign character
+ sign := ""
+ switch {
+ case x.neg:
+ sign = "-"
+ case s.Flag('+'): // supersedes ' ' when both specified
+ sign = "+"
+ case s.Flag(' '):
+ sign = " "
+ }
+
+ // determine prefix characters for indicating output base
+ prefix := ""
+ if s.Flag('#') {
+ switch ch {
+ case 'b': // binary
+ prefix = "0b"
+ case 'o': // octal
+ prefix = "0"
+ case 'x': // hexadecimal
+ prefix = "0x"
+ case 'X':
+ prefix = "0X"
+ }
+ }
+ if ch == 'O' {
+ prefix = "0o"
+ }
+
+ digits := x.abs.utoa(base)
+ if ch == 'X' {
+ // faster than bytes.ToUpper
+ for i, d := range digits {
+ if 'a' <= d && d <= 'z' {
+ digits[i] = 'A' + (d - 'a')
+ }
+ }
+ }
+
+ // number of characters for the three classes of number padding
+ var left int // space characters to left of digits for right justification ("%8d")
+ var zeros int // zero characters (actually cs[0]) as left-most digits ("%.8d")
+ var right int // space characters to right of digits for left justification ("%-8d")
+
+ // determine number padding from precision: the least number of digits to output
+ precision, precisionSet := s.Precision()
+ if precisionSet {
+ switch {
+ case len(digits) < precision:
+ zeros = precision - len(digits) // count of zero padding
+ case len(digits) == 1 && digits[0] == '0' && precision == 0:
+ return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
+ }
+ }
+
+ // determine field pad from width: the least number of characters to output
+ length := len(sign) + len(prefix) + zeros + len(digits)
+ if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
+ switch d := width - length; {
+ case s.Flag('-'):
+ // pad on the right with spaces; supersedes '0' when both specified
+ right = d
+ case s.Flag('0') && !precisionSet:
+ // pad with zeros unless precision also specified
+ zeros = d
+ default:
+ // pad on the left with spaces
+ left = d
+ }
+ }
+
+ // print number as [left pad][sign][prefix][zero pad][digits][right pad]
+ writeMultiple(s, " ", left)
+ writeMultiple(s, sign, 1)
+ writeMultiple(s, prefix, 1)
+ writeMultiple(s, "0", zeros)
+ s.Write(digits)
+ writeMultiple(s, " ", right)
+}
+
+// scan sets z to the integer value corresponding to the longest possible prefix
+// read from r representing a signed integer number in a given conversion base.
+// It returns z, the actual conversion base used, and an error, if any. In the
+// error case, the value of z is undefined but the returned value is nil. The
+// syntax follows the syntax of integer literals in Go.
+//
+// The base argument must be 0 or a value from 2 through MaxBase. If the base
+// is 0, the string prefix determines the actual conversion base. A prefix of
+// “0b” or “0B” selects base 2; a “0”, “0o”, or “0O” prefix selects
+// base 8, and a “0x” or “0X” prefix selects base 16. Otherwise the selected
+// base is 10.
+func (z *Int) scan(r io.ByteScanner, base int) (*Int, int, error) {
+ // determine sign
+ neg, err := scanSign(r)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // determine mantissa
+ z.abs, base, _, err = z.abs.scan(r, base, false)
+ if err != nil {
+ return nil, base, err
+ }
+ z.neg = len(z.abs) > 0 && neg // 0 has no sign
+
+ return z, base, nil
+}
+
+func scanSign(r io.ByteScanner) (neg bool, err error) {
+ var ch byte
+ if ch, err = r.ReadByte(); err != nil {
+ return false, err
+ }
+ switch ch {
+ case '-':
+ neg = true
+ case '+':
+ // nothing to do
+ default:
+ r.UnreadByte()
+ }
+ return
+}
+
+// byteReader is a local wrapper around fmt.ScanState;
+// it implements the ByteReader interface.
+type byteReader struct {
+ fmt.ScanState
+}
+
+func (r byteReader) ReadByte() (byte, error) {
+ ch, size, err := r.ReadRune()
+ if size != 1 && err == nil {
+ err = fmt.Errorf("invalid rune %#U", ch)
+ }
+ return byte(ch), err
+}
+
+func (r byteReader) UnreadByte() error {
+ return r.UnreadRune()
+}
+
+var _ fmt.Scanner = intOne // *Int must implement fmt.Scanner
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
+// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
+func (z *Int) Scan(s fmt.ScanState, ch rune) error {
+ s.SkipSpace() // skip leading space characters
+ base := 0
+ switch ch {
+ case 'b':
+ base = 2
+ case 'o':
+ base = 8
+ case 'd':
+ base = 10
+ case 'x', 'X':
+ base = 16
+ case 's', 'v':
+ // let scan determine the base
+ default:
+ return errors.New("Int.Scan: invalid verb")
+ }
+ _, _, err := z.scan(byteReader{s}, base)
+ return err
+}
diff --git a/contrib/go/_std_1.19/src/math/big/intmarsh.go b/contrib/go/_std_1.19/src/math/big/intmarsh.go
new file mode 100644
index 0000000000..ce429ffc11
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/intmarsh.go
@@ -0,0 +1,83 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements encoding/decoding of Ints.
+
+package big
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const intGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Int) GobEncode() ([]byte, error) {
+ if x == nil {
+ return nil, nil
+ }
+ buf := make([]byte, 1+len(x.abs)*_S) // extra byte for version and sign bit
+ i := x.abs.bytes(buf) - 1 // i >= 0
+ b := intGobVersion << 1 // make space for sign bit
+ if x.neg {
+ b |= 1
+ }
+ buf[i] = b
+ return buf[i:], nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Int) GobDecode(buf []byte) error {
+ if len(buf) == 0 {
+ // Other side sent a nil or default value.
+ *z = Int{}
+ return nil
+ }
+ b := buf[0]
+ if b>>1 != intGobVersion {
+ return fmt.Errorf("Int.GobDecode: encoding version %d not supported", b>>1)
+ }
+ z.neg = b&1 != 0
+ z.abs = z.abs.setBytes(buf[1:])
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (x *Int) MarshalText() (text []byte, err error) {
+ if x == nil {
+ return []byte("<nil>"), nil
+ }
+ return x.abs.itoa(x.neg, 10), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (z *Int) UnmarshalText(text []byte) error {
+ if _, ok := z.setFromScanner(bytes.NewReader(text), 0); !ok {
+ return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
+ }
+ return nil
+}
+
+// The JSON marshalers are only here for API backward compatibility
+// (programs that explicitly look for these two methods). JSON works
+// fine with the TextMarshaler only.
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Int) MarshalJSON() ([]byte, error) {
+ if x == nil {
+ return []byte("null"), nil
+ }
+ return x.abs.itoa(x.neg, 10), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (z *Int) UnmarshalJSON(text []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(text) == "null" {
+ return nil
+ }
+ return z.UnmarshalText(text)
+}
diff --git a/contrib/go/_std_1.19/src/math/big/nat.go b/contrib/go/_std_1.19/src/math/big/nat.go
new file mode 100644
index 0000000000..5cc42b80dc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/nat.go
@@ -0,0 +1,1244 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements unsigned multi-precision integers (natural
+// numbers). They are the building blocks for the implementation
+// of signed integers, rationals, and floating-point numbers.
+//
+// Caution: This implementation relies on the function "alias"
+// which assumes that (nat) slice capacities are never
+// changed (no 3-operand slice expressions). If that
+// changes, alias needs to be updated for correctness.
+
+package big
+
+import (
+ "encoding/binary"
+ "math/bits"
+ "math/rand"
+ "sync"
+)
+
+// An unsigned integer x of the form
+//
+// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
+//
+// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
+// with the digits x[i] as the slice elements.
+//
+// A number is normalized if the slice contains no leading 0 digits.
+// During arithmetic operations, denormalized values may occur but are
+// always normalized before returning the final result. The normalized
+// representation of 0 is the empty or nil slice (length = 0).
+type nat []Word
+
+var (
+ natOne = nat{1}
+ natTwo = nat{2}
+ natFive = nat{5}
+ natTen = nat{10}
+)
+
+func (z nat) clear() {
+ for i := range z {
+ z[i] = 0
+ }
+}
+
+func (z nat) norm() nat {
+ i := len(z)
+ for i > 0 && z[i-1] == 0 {
+ i--
+ }
+ return z[0:i]
+}
+
+func (z nat) make(n int) nat {
+ if n <= cap(z) {
+ return z[:n] // reuse z
+ }
+ if n == 1 {
+ // Most nats start small and stay that way; don't over-allocate.
+ return make(nat, 1)
+ }
+ // Choosing a good value for e has significant performance impact
+ // because it increases the chance that a value can be reused.
+ const e = 4 // extra capacity
+ return make(nat, n, n+e)
+}
+
+func (z nat) setWord(x Word) nat {
+ if x == 0 {
+ return z[:0]
+ }
+ z = z.make(1)
+ z[0] = x
+ return z
+}
+
+func (z nat) setUint64(x uint64) nat {
+ // single-word value
+ if w := Word(x); uint64(w) == x {
+ return z.setWord(w)
+ }
+ // 2-word value
+ z = z.make(2)
+ z[1] = Word(x >> 32)
+ z[0] = Word(x)
+ return z
+}
+
+func (z nat) set(x nat) nat {
+ z = z.make(len(x))
+ copy(z, x)
+ return z
+}
+
+func (z nat) add(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+
+ switch {
+ case m < n:
+ return z.add(y, x)
+ case m == 0:
+ // n == 0 because m >= n; result is 0
+ return z[:0]
+ case n == 0:
+ // result is x
+ return z.set(x)
+ }
+ // m > 0
+
+ z = z.make(m + 1)
+ c := addVV(z[0:n], x, y)
+ if m > n {
+ c = addVW(z[n:m], x[n:], c)
+ }
+ z[m] = c
+
+ return z.norm()
+}
+
+func (z nat) sub(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+
+ switch {
+ case m < n:
+ panic("underflow")
+ case m == 0:
+ // n == 0 because m >= n; result is 0
+ return z[:0]
+ case n == 0:
+ // result is x
+ return z.set(x)
+ }
+ // m > 0
+
+ z = z.make(m)
+ c := subVV(z[0:n], x, y)
+ if m > n {
+ c = subVW(z[n:], x[n:], c)
+ }
+ if c != 0 {
+ panic("underflow")
+ }
+
+ return z.norm()
+}
+
+func (x nat) cmp(y nat) (r int) {
+ m := len(x)
+ n := len(y)
+ if m != n || m == 0 {
+ switch {
+ case m < n:
+ r = -1
+ case m > n:
+ r = 1
+ }
+ return
+ }
+
+ i := m - 1
+ for i > 0 && x[i] == y[i] {
+ i--
+ }
+
+ switch {
+ case x[i] < y[i]:
+ r = -1
+ case x[i] > y[i]:
+ r = 1
+ }
+ return
+}
+
+func (z nat) mulAddWW(x nat, y, r Word) nat {
+ m := len(x)
+ if m == 0 || y == 0 {
+ return z.setWord(r) // result is r
+ }
+ // m > 0
+
+ z = z.make(m + 1)
+ z[m] = mulAddVWW(z[0:m], x, y, r)
+
+ return z.norm()
+}
+
+// basicMul multiplies x and y and leaves the result in z.
+// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
+func basicMul(z, x, y nat) {
+ z[0 : len(x)+len(y)].clear() // initialize z
+ for i, d := range y {
+ if d != 0 {
+ z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d)
+ }
+ }
+}
+
+// montgomery computes z mod m = x*y*2**(-n*_W) mod m,
+// assuming k = -1/m mod 2**_W.
+// z is used for storing the result which is returned;
+// z must not alias x, y or m.
+// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
+// https://eprint.iacr.org/2011/239.pdf
+// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
+// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
+// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
+func (z nat) montgomery(x, y, m nat, k Word, n int) nat {
+ // This code assumes x, y, m are all the same length, n.
+ // (required by addMulVVW and the for loop).
+ // It also assumes that x, y are already reduced mod m,
+ // or else the result will not be properly reduced.
+ if len(x) != n || len(y) != n || len(m) != n {
+ panic("math/big: mismatched montgomery number lengths")
+ }
+ z = z.make(n * 2)
+ z.clear()
+ var c Word
+ for i := 0; i < n; i++ {
+ d := y[i]
+ c2 := addMulVVW(z[i:n+i], x, d)
+ t := z[i] * k
+ c3 := addMulVVW(z[i:n+i], m, t)
+ cx := c + c2
+ cy := cx + c3
+ z[n+i] = cy
+ if cx < c2 || cy < c3 {
+ c = 1
+ } else {
+ c = 0
+ }
+ }
+ if c != 0 {
+ subVV(z[:n], z[n:], m)
+ } else {
+ copy(z[:n], z[n:])
+ }
+ return z[:n]
+}
+
+// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
+// Factored out for readability - do not use outside karatsuba.
+func karatsubaAdd(z, x nat, n int) {
+ if c := addVV(z[0:n], z, x); c != 0 {
+ addVW(z[n:n+n>>1], z[n:], c)
+ }
+}
+
+// Like karatsubaAdd, but does subtract.
+func karatsubaSub(z, x nat, n int) {
+ if c := subVV(z[0:n], z, x); c != 0 {
+ subVW(z[n:n+n>>1], z[n:], c)
+ }
+}
+
+// Operands that are shorter than karatsubaThreshold are multiplied using
+// "grade school" multiplication; for longer operands the Karatsuba algorithm
+// is used.
+var karatsubaThreshold = 40 // computed by calibrate_test.go
+
+// karatsuba multiplies x and y and leaves the result in z.
+// Both x and y must have the same length n and n must be a
+// power of 2. The result vector z must have len(z) >= 6*n.
+// The (non-normalized) result is placed in z[0 : 2*n].
+func karatsuba(z, x, y nat) {
+ n := len(y)
+
+ // Switch to basic multiplication if numbers are odd or small.
+ // (n is always even if karatsubaThreshold is even, but be
+ // conservative)
+ if n&1 != 0 || n < karatsubaThreshold || n < 2 {
+ basicMul(z, x, y)
+ return
+ }
+ // n&1 == 0 && n >= karatsubaThreshold && n >= 2
+
+ // Karatsuba multiplication is based on the observation that
+ // for two numbers x and y with:
+ //
+ // x = x1*b + x0
+ // y = y1*b + y0
+ //
+ // the product x*y can be obtained with 3 products z2, z1, z0
+ // instead of 4:
+ //
+ // x*y = x1*y1*b*b + (x1*y0 + x0*y1)*b + x0*y0
+ // = z2*b*b + z1*b + z0
+ //
+ // with:
+ //
+ // xd = x1 - x0
+ // yd = y0 - y1
+ //
+ // z1 = xd*yd + z2 + z0
+ // = (x1-x0)*(y0 - y1) + z2 + z0
+ // = x1*y0 - x1*y1 - x0*y0 + x0*y1 + z2 + z0
+ // = x1*y0 - z2 - z0 + x0*y1 + z2 + z0
+ // = x1*y0 + x0*y1
+
+ // split x, y into "digits"
+ n2 := n >> 1 // n2 >= 1
+ x1, x0 := x[n2:], x[0:n2] // x = x1*b + y0
+ y1, y0 := y[n2:], y[0:n2] // y = y1*b + y0
+
+ // z is used for the result and temporary storage:
+ //
+ // 6*n 5*n 4*n 3*n 2*n 1*n 0*n
+ // z = [z2 copy|z0 copy| xd*yd | yd:xd | x1*y1 | x0*y0 ]
+ //
+ // For each recursive call of karatsuba, an unused slice of
+ // z is passed in that has (at least) half the length of the
+ // caller's z.
+
+ // compute z0 and z2 with the result "in place" in z
+ karatsuba(z, x0, y0) // z0 = x0*y0
+ karatsuba(z[n:], x1, y1) // z2 = x1*y1
+
+ // compute xd (or the negative value if underflow occurs)
+ s := 1 // sign of product xd*yd
+ xd := z[2*n : 2*n+n2]
+ if subVV(xd, x1, x0) != 0 { // x1-x0
+ s = -s
+ subVV(xd, x0, x1) // x0-x1
+ }
+
+ // compute yd (or the negative value if underflow occurs)
+ yd := z[2*n+n2 : 3*n]
+ if subVV(yd, y0, y1) != 0 { // y0-y1
+ s = -s
+ subVV(yd, y1, y0) // y1-y0
+ }
+
+ // p = (x1-x0)*(y0-y1) == x1*y0 - x1*y1 - x0*y0 + x0*y1 for s > 0
+ // p = (x0-x1)*(y0-y1) == x0*y0 - x0*y1 - x1*y0 + x1*y1 for s < 0
+ p := z[n*3:]
+ karatsuba(p, xd, yd)
+
+ // save original z2:z0
+ // (ok to use upper half of z since we're done recurring)
+ r := z[n*4:]
+ copy(r, z[:n*2])
+
+ // add up all partial products
+ //
+ // 2*n n 0
+ // z = [ z2 | z0 ]
+ // + [ z0 ]
+ // + [ z2 ]
+ // + [ p ]
+ //
+ karatsubaAdd(z[n2:], r, n)
+ karatsubaAdd(z[n2:], r[n:], n)
+ if s > 0 {
+ karatsubaAdd(z[n2:], p, n)
+ } else {
+ karatsubaSub(z[n2:], p, n)
+ }
+}
+
+// alias reports whether x and y share the same base array.
+//
+// Note: alias assumes that the capacity of underlying arrays
+// is never changed for nat values; i.e. that there are
+// no 3-operand slice expressions in this code (or worse,
+// reflect-based operations to the same effect).
+func alias(x, y nat) bool {
+ return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
+}
+
+// addAt implements z += x<<(_W*i); z must be long enough.
+// (we don't use nat.add because we need z to stay the same
+// slice, and we don't need to normalize z after each addition)
+func addAt(z, x nat, i int) {
+ if n := len(x); n > 0 {
+ if c := addVV(z[i:i+n], z[i:], x); c != 0 {
+ j := i + n
+ if j < len(z) {
+ addVW(z[j:], z[j:], c)
+ }
+ }
+ }
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+// karatsubaLen computes an approximation to the maximum k <= n such that
+// k = p<<i for a number p <= threshold and an i >= 0. Thus, the
+// result is the largest number that can be divided repeatedly by 2 before
+// becoming about the value of threshold.
+func karatsubaLen(n, threshold int) int {
+ i := uint(0)
+ for n > threshold {
+ n >>= 1
+ i++
+ }
+ return n << i
+}
+
+func (z nat) mul(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+
+ switch {
+ case m < n:
+ return z.mul(y, x)
+ case m == 0 || n == 0:
+ return z[:0]
+ case n == 1:
+ return z.mulAddWW(x, y[0], 0)
+ }
+ // m >= n > 1
+
+ // determine if z can be reused
+ if alias(z, x) || alias(z, y) {
+ z = nil // z is an alias for x or y - cannot reuse
+ }
+
+ // use basic multiplication if the numbers are small
+ if n < karatsubaThreshold {
+ z = z.make(m + n)
+ basicMul(z, x, y)
+ return z.norm()
+ }
+ // m >= n && n >= karatsubaThreshold && n >= 2
+
+ // determine Karatsuba length k such that
+ //
+ // x = xh*b + x0 (0 <= x0 < b)
+ // y = yh*b + y0 (0 <= y0 < b)
+ // b = 1<<(_W*k) ("base" of digits xi, yi)
+ //
+ k := karatsubaLen(n, karatsubaThreshold)
+ // k <= n
+
+ // multiply x0 and y0 via Karatsuba
+ x0 := x[0:k] // x0 is not normalized
+ y0 := y[0:k] // y0 is not normalized
+ z = z.make(max(6*k, m+n)) // enough space for karatsuba of x0*y0 and full result of x*y
+ karatsuba(z, x0, y0)
+ z = z[0 : m+n] // z has final length but may be incomplete
+ z[2*k:].clear() // upper portion of z is garbage (and 2*k <= m+n since k <= n <= m)
+
+ // If xh != 0 or yh != 0, add the missing terms to z. For
+ //
+ // xh = xi*b^i + ... + x2*b^2 + x1*b (0 <= xi < b)
+ // yh = y1*b (0 <= y1 < b)
+ //
+ // the missing terms are
+ //
+ // x0*y1*b and xi*y0*b^i, xi*y1*b^(i+1) for i > 0
+ //
+ // since all the yi for i > 1 are 0 by choice of k: If any of them
+ // were > 0, then yh >= b^2 and thus y >= b^2. Then k' = k*2 would
+ // be a larger valid threshold contradicting the assumption about k.
+ //
+ if k < n || m != n {
+ tp := getNat(3 * k)
+ t := *tp
+
+ // add x0*y1*b
+ x0 := x0.norm()
+ y1 := y[k:] // y1 is normalized because y is
+ t = t.mul(x0, y1) // update t so we don't lose t's underlying array
+ addAt(z, t, k)
+
+ // add xi*y0<<i, xi*y1*b<<(i+k)
+ y0 := y0.norm()
+ for i := k; i < len(x); i += k {
+ xi := x[i:]
+ if len(xi) > k {
+ xi = xi[:k]
+ }
+ xi = xi.norm()
+ t = t.mul(xi, y0)
+ addAt(z, t, i)
+ t = t.mul(xi, y1)
+ addAt(z, t, i+k)
+ }
+
+ putNat(tp)
+ }
+
+ return z.norm()
+}
+
+// basicSqr sets z = x*x and is asymptotically faster than basicMul
+// by about a factor of 2, but slower for small arguments due to overhead.
+// Requirements: len(x) > 0, len(z) == 2*len(x)
+// The (non-normalized) result is placed in z.
+func basicSqr(z, x nat) {
+ n := len(x)
+ tp := getNat(2 * n)
+ t := *tp // temporary variable to hold the products
+ t.clear()
+ z[1], z[0] = mulWW(x[0], x[0]) // the initial square
+ for i := 1; i < n; i++ {
+ d := x[i]
+ // z collects the squares x[i] * x[i]
+ z[2*i+1], z[2*i] = mulWW(d, d)
+ // t collects the products x[i] * x[j] where j < i
+ t[2*i] = addMulVVW(t[i:2*i], x[0:i], d)
+ }
+ t[2*n-1] = shlVU(t[1:2*n-1], t[1:2*n-1], 1) // double the j < i products
+ addVV(z, z, t) // combine the result
+ putNat(tp)
+}
+
+// karatsubaSqr squares x and leaves the result in z.
+// len(x) must be a power of 2 and len(z) >= 6*len(x).
+// The (non-normalized) result is placed in z[0 : 2*len(x)].
+//
+// The algorithm and the layout of z are the same as for karatsuba.
+func karatsubaSqr(z, x nat) {
+ n := len(x)
+
+ if n&1 != 0 || n < karatsubaSqrThreshold || n < 2 {
+ basicSqr(z[:2*n], x)
+ return
+ }
+
+ n2 := n >> 1
+ x1, x0 := x[n2:], x[0:n2]
+
+ karatsubaSqr(z, x0)
+ karatsubaSqr(z[n:], x1)
+
+ // s = sign(xd*yd) == -1 for xd != 0; s == 1 for xd == 0
+ xd := z[2*n : 2*n+n2]
+ if subVV(xd, x1, x0) != 0 {
+ subVV(xd, x0, x1)
+ }
+
+ p := z[n*3:]
+ karatsubaSqr(p, xd)
+
+ r := z[n*4:]
+ copy(r, z[:n*2])
+
+ karatsubaAdd(z[n2:], r, n)
+ karatsubaAdd(z[n2:], r[n:], n)
+ karatsubaSub(z[n2:], p, n) // s == -1 for p != 0; s == 1 for p == 0
+}
+
+// Operands that are shorter than basicSqrThreshold are squared using
+// "grade school" multiplication; for operands longer than karatsubaSqrThreshold
+// we use the Karatsuba algorithm optimized for x == y.
+var basicSqrThreshold = 20 // computed by calibrate_test.go
+var karatsubaSqrThreshold = 260 // computed by calibrate_test.go
+
+// z = x*x
+func (z nat) sqr(x nat) nat {
+ n := len(x)
+ switch {
+ case n == 0:
+ return z[:0]
+ case n == 1:
+ d := x[0]
+ z = z.make(2)
+ z[1], z[0] = mulWW(d, d)
+ return z.norm()
+ }
+
+ if alias(z, x) {
+ z = nil // z is an alias for x - cannot reuse
+ }
+
+ if n < basicSqrThreshold {
+ z = z.make(2 * n)
+ basicMul(z, x, x)
+ return z.norm()
+ }
+ if n < karatsubaSqrThreshold {
+ z = z.make(2 * n)
+ basicSqr(z, x)
+ return z.norm()
+ }
+
+ // Use Karatsuba multiplication optimized for x == y.
+ // The algorithm and layout of z are the same as for mul.
+
+ // z = (x1*b + x0)^2 = x1^2*b^2 + 2*x1*x0*b + x0^2
+
+ k := karatsubaLen(n, karatsubaSqrThreshold)
+
+ x0 := x[0:k]
+ z = z.make(max(6*k, 2*n))
+ karatsubaSqr(z, x0) // z = x0^2
+ z = z[0 : 2*n]
+ z[2*k:].clear()
+
+ if k < n {
+ tp := getNat(2 * k)
+ t := *tp
+ x0 := x0.norm()
+ x1 := x[k:]
+ t = t.mul(x0, x1)
+ addAt(z, t, k)
+ addAt(z, t, k) // z = 2*x1*x0*b + x0^2
+ t = t.sqr(x1)
+ addAt(z, t, 2*k) // z = x1^2*b^2 + 2*x1*x0*b + x0^2
+ putNat(tp)
+ }
+
+ return z.norm()
+}
+
+// mulRange computes the product of all the unsigned integers in the
+// range [a, b] inclusively. If a > b (empty range), the result is 1.
+func (z nat) mulRange(a, b uint64) nat {
+ switch {
+ case a == 0:
+ // cut long ranges short (optimization)
+ return z.setUint64(0)
+ case a > b:
+ return z.setUint64(1)
+ case a == b:
+ return z.setUint64(a)
+ case a+1 == b:
+ return z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b))
+ }
+ m := (a + b) / 2
+ return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
+}
+
+// getNat returns a *nat of len n. The contents may not be zero.
+// The pool holds *nat to avoid allocation when converting to interface{}.
+func getNat(n int) *nat {
+ var z *nat
+ if v := natPool.Get(); v != nil {
+ z = v.(*nat)
+ }
+ if z == nil {
+ z = new(nat)
+ }
+ *z = z.make(n)
+ return z
+}
+
+func putNat(x *nat) {
+ natPool.Put(x)
+}
+
+var natPool sync.Pool
+
+// Length of x in bits. x must be normalized.
+func (x nat) bitLen() int {
+ if i := len(x) - 1; i >= 0 {
+ return i*_W + bits.Len(uint(x[i]))
+ }
+ return 0
+}
+
+// trailingZeroBits returns the number of consecutive least significant zero
+// bits of x.
+func (x nat) trailingZeroBits() uint {
+ if len(x) == 0 {
+ return 0
+ }
+ var i uint
+ for x[i] == 0 {
+ i++
+ }
+ // x[i] != 0
+ return i*_W + uint(bits.TrailingZeros(uint(x[i])))
+}
+
+func same(x, y nat) bool {
+ return len(x) == len(y) && len(x) > 0 && &x[0] == &y[0]
+}
+
+// z = x << s
+func (z nat) shl(x nat, s uint) nat {
+ if s == 0 {
+ if same(z, x) {
+ return z
+ }
+ if !alias(z, x) {
+ return z.set(x)
+ }
+ }
+
+ m := len(x)
+ if m == 0 {
+ return z[:0]
+ }
+ // m > 0
+
+ n := m + int(s/_W)
+ z = z.make(n + 1)
+ z[n] = shlVU(z[n-m:n], x, s%_W)
+ z[0 : n-m].clear()
+
+ return z.norm()
+}
+
+// z = x >> s
+func (z nat) shr(x nat, s uint) nat {
+ if s == 0 {
+ if same(z, x) {
+ return z
+ }
+ if !alias(z, x) {
+ return z.set(x)
+ }
+ }
+
+ m := len(x)
+ n := m - int(s/_W)
+ if n <= 0 {
+ return z[:0]
+ }
+ // n > 0
+
+ z = z.make(n)
+ shrVU(z, x[m-n:], s%_W)
+
+ return z.norm()
+}
+
+func (z nat) setBit(x nat, i uint, b uint) nat {
+ j := int(i / _W)
+ m := Word(1) << (i % _W)
+ n := len(x)
+ switch b {
+ case 0:
+ z = z.make(n)
+ copy(z, x)
+ if j >= n {
+ // no need to grow
+ return z
+ }
+ z[j] &^= m
+ return z.norm()
+ case 1:
+ if j >= n {
+ z = z.make(j + 1)
+ z[n:].clear()
+ } else {
+ z = z.make(n)
+ }
+ copy(z, x)
+ z[j] |= m
+ // no need to normalize
+ return z
+ }
+ panic("set bit is not 0 or 1")
+}
+
+// bit returns the value of the i'th bit, with lsb == bit 0.
+func (x nat) bit(i uint) uint {
+ j := i / _W
+ if j >= uint(len(x)) {
+ return 0
+ }
+ // 0 <= j < len(x)
+ return uint(x[j] >> (i % _W) & 1)
+}
+
+// sticky returns 1 if there's a 1 bit within the
+// i least significant bits, otherwise it returns 0.
+func (x nat) sticky(i uint) uint {
+ j := i / _W
+ if j >= uint(len(x)) {
+ if len(x) == 0 {
+ return 0
+ }
+ return 1
+ }
+ // 0 <= j < len(x)
+ for _, x := range x[:j] {
+ if x != 0 {
+ return 1
+ }
+ }
+ if x[j]<<(_W-i%_W) != 0 {
+ return 1
+ }
+ return 0
+}
+
+func (z nat) and(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ if m > n {
+ m = n
+ }
+ // m <= n
+
+ z = z.make(m)
+ for i := 0; i < m; i++ {
+ z[i] = x[i] & y[i]
+ }
+
+ return z.norm()
+}
+
+func (z nat) andNot(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ if n > m {
+ n = m
+ }
+ // m >= n
+
+ z = z.make(m)
+ for i := 0; i < n; i++ {
+ z[i] = x[i] &^ y[i]
+ }
+ copy(z[n:m], x[n:m])
+
+ return z.norm()
+}
+
+func (z nat) or(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ s := x
+ if m < n {
+ n, m = m, n
+ s = y
+ }
+ // m >= n
+
+ z = z.make(m)
+ for i := 0; i < n; i++ {
+ z[i] = x[i] | y[i]
+ }
+ copy(z[n:m], s[n:m])
+
+ return z.norm()
+}
+
+func (z nat) xor(x, y nat) nat {
+ m := len(x)
+ n := len(y)
+ s := x
+ if m < n {
+ n, m = m, n
+ s = y
+ }
+ // m >= n
+
+ z = z.make(m)
+ for i := 0; i < n; i++ {
+ z[i] = x[i] ^ y[i]
+ }
+ copy(z[n:m], s[n:m])
+
+ return z.norm()
+}
+
+// random creates a random integer in [0..limit), using the space in z if
+// possible. n is the bit length of limit.
+func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
+ if alias(z, limit) {
+ z = nil // z is an alias for limit - cannot reuse
+ }
+ z = z.make(len(limit))
+
+ bitLengthOfMSW := uint(n % _W)
+ if bitLengthOfMSW == 0 {
+ bitLengthOfMSW = _W
+ }
+ mask := Word((1 << bitLengthOfMSW) - 1)
+
+ for {
+ switch _W {
+ case 32:
+ for i := range z {
+ z[i] = Word(rand.Uint32())
+ }
+ case 64:
+ for i := range z {
+ z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
+ }
+ default:
+ panic("unknown word size")
+ }
+ z[len(limit)-1] &= mask
+ if z.cmp(limit) < 0 {
+ break
+ }
+ }
+
+ return z.norm()
+}
+
+// If m != 0 (i.e., len(m) != 0), expNN sets z to x**y mod m;
+// otherwise it sets z to x**y. The result is the value of z.
+func (z nat) expNN(x, y, m nat) nat {
+ if alias(z, x) || alias(z, y) {
+ // We cannot allow in-place modification of x or y.
+ z = nil
+ }
+
+ // x**y mod 1 == 0
+ if len(m) == 1 && m[0] == 1 {
+ return z.setWord(0)
+ }
+ // m == 0 || m > 1
+
+ // x**0 == 1
+ if len(y) == 0 {
+ return z.setWord(1)
+ }
+ // y > 0
+
+ // x**1 mod m == x mod m
+ if len(y) == 1 && y[0] == 1 && len(m) != 0 {
+ _, z = nat(nil).div(z, x, m)
+ return z
+ }
+ // y > 1
+
+ if len(m) != 0 {
+ // We likely end up being as long as the modulus.
+ z = z.make(len(m))
+ }
+ z = z.set(x)
+
+ // If the base is non-trivial and the exponent is large, we use
+ // 4-bit, windowed exponentiation. This involves precomputing 14 values
+ // (x^2...x^15) but then reduces the number of multiply-reduces by a
+ // third. Even for a 32-bit exponent, this reduces the number of
+ // operations. Uses Montgomery method for odd moduli.
+ if x.cmp(natOne) > 0 && len(y) > 1 && len(m) > 0 {
+ if m[0]&1 == 1 {
+ return z.expNNMontgomery(x, y, m)
+ }
+ return z.expNNWindowed(x, y, m)
+ }
+
+ v := y[len(y)-1] // v > 0 because y is normalized and y > 0
+ shift := nlz(v) + 1
+ v <<= shift
+ var q nat
+
+ const mask = 1 << (_W - 1)
+
+ // We walk through the bits of the exponent one by one. Each time we
+ // see a bit, we square, thus doubling the power. If the bit is a one,
+ // we also multiply by x, thus adding one to the power.
+
+ w := _W - int(shift)
+ // zz and r are used to avoid allocating in mul and div as
+ // otherwise the arguments would alias.
+ var zz, r nat
+ for j := 0; j < w; j++ {
+ zz = zz.sqr(z)
+ zz, z = z, zz
+
+ if v&mask != 0 {
+ zz = zz.mul(z, x)
+ zz, z = z, zz
+ }
+
+ if len(m) != 0 {
+ zz, r = zz.div(r, z, m)
+ zz, r, q, z = q, z, zz, r
+ }
+
+ v <<= 1
+ }
+
+ for i := len(y) - 2; i >= 0; i-- {
+ v = y[i]
+
+ for j := 0; j < _W; j++ {
+ zz = zz.sqr(z)
+ zz, z = z, zz
+
+ if v&mask != 0 {
+ zz = zz.mul(z, x)
+ zz, z = z, zz
+ }
+
+ if len(m) != 0 {
+ zz, r = zz.div(r, z, m)
+ zz, r, q, z = q, z, zz, r
+ }
+
+ v <<= 1
+ }
+ }
+
+ return z.norm()
+}
+
+// expNNWindowed calculates x**y mod m using a fixed, 4-bit window.
+func (z nat) expNNWindowed(x, y, m nat) nat {
+ // zz and r are used to avoid allocating in mul and div as otherwise
+ // the arguments would alias.
+ var zz, r nat
+
+ const n = 4
+ // powers[i] contains x^i.
+ var powers [1 << n]nat
+ powers[0] = natOne
+ powers[1] = x
+ for i := 2; i < 1<<n; i += 2 {
+ p2, p, p1 := &powers[i/2], &powers[i], &powers[i+1]
+ *p = p.sqr(*p2)
+ zz, r = zz.div(r, *p, m)
+ *p, r = r, *p
+ *p1 = p1.mul(*p, x)
+ zz, r = zz.div(r, *p1, m)
+ *p1, r = r, *p1
+ }
+
+ z = z.setWord(1)
+
+ for i := len(y) - 1; i >= 0; i-- {
+ yi := y[i]
+ for j := 0; j < _W; j += n {
+ if i != len(y)-1 || j != 0 {
+ // Unrolled loop for significant performance
+ // gain. Use go test -bench=".*" in crypto/rsa
+ // to check performance before making changes.
+ zz = zz.sqr(z)
+ zz, z = z, zz
+ zz, r = zz.div(r, z, m)
+ z, r = r, z
+
+ zz = zz.sqr(z)
+ zz, z = z, zz
+ zz, r = zz.div(r, z, m)
+ z, r = r, z
+
+ zz = zz.sqr(z)
+ zz, z = z, zz
+ zz, r = zz.div(r, z, m)
+ z, r = r, z
+
+ zz = zz.sqr(z)
+ zz, z = z, zz
+ zz, r = zz.div(r, z, m)
+ z, r = r, z
+ }
+
+ zz = zz.mul(z, powers[yi>>(_W-n)])
+ zz, z = z, zz
+ zz, r = zz.div(r, z, m)
+ z, r = r, z
+
+ yi <<= n
+ }
+ }
+
+ return z.norm()
+}
+
+// expNNMontgomery calculates x**y mod m using a fixed, 4-bit window.
+// Uses Montgomery representation.
+func (z nat) expNNMontgomery(x, y, m nat) nat {
+ numWords := len(m)
+
+ // We want the lengths of x and m to be equal.
+ // It is OK if x >= m as long as len(x) == len(m).
+ if len(x) > numWords {
+ _, x = nat(nil).div(nil, x, m)
+ // Note: now len(x) <= numWords, not guaranteed ==.
+ }
+ if len(x) < numWords {
+ rr := make(nat, numWords)
+ copy(rr, x)
+ x = rr
+ }
+
+ // Ideally the precomputations would be performed outside, and reused
+ // k0 = -m**-1 mod 2**_W. Algorithm from: Dumas, J.G. "On Newton–Raphson
+ // Iteration for Multiplicative Inverses Modulo Prime Powers".
+ k0 := 2 - m[0]
+ t := m[0] - 1
+ for i := 1; i < _W; i <<= 1 {
+ t *= t
+ k0 *= (t + 1)
+ }
+ k0 = -k0
+
+ // RR = 2**(2*_W*len(m)) mod m
+ RR := nat(nil).setWord(1)
+ zz := nat(nil).shl(RR, uint(2*numWords*_W))
+ _, RR = nat(nil).div(RR, zz, m)
+ if len(RR) < numWords {
+ zz = zz.make(numWords)
+ copy(zz, RR)
+ RR = zz
+ }
+ // one = 1, with equal length to that of m
+ one := make(nat, numWords)
+ one[0] = 1
+
+ const n = 4
+ // powers[i] contains x^i
+ var powers [1 << n]nat
+ powers[0] = powers[0].montgomery(one, RR, m, k0, numWords)
+ powers[1] = powers[1].montgomery(x, RR, m, k0, numWords)
+ for i := 2; i < 1<<n; i++ {
+ powers[i] = powers[i].montgomery(powers[i-1], powers[1], m, k0, numWords)
+ }
+
+ // initialize z = 1 (Montgomery 1)
+ z = z.make(numWords)
+ copy(z, powers[0])
+
+ zz = zz.make(numWords)
+
+ // same windowed exponent, but with Montgomery multiplications
+ for i := len(y) - 1; i >= 0; i-- {
+ yi := y[i]
+ for j := 0; j < _W; j += n {
+ if i != len(y)-1 || j != 0 {
+ zz = zz.montgomery(z, z, m, k0, numWords)
+ z = z.montgomery(zz, zz, m, k0, numWords)
+ zz = zz.montgomery(z, z, m, k0, numWords)
+ z = z.montgomery(zz, zz, m, k0, numWords)
+ }
+ zz = zz.montgomery(z, powers[yi>>(_W-n)], m, k0, numWords)
+ z, zz = zz, z
+ yi <<= n
+ }
+ }
+ // convert to regular number
+ zz = zz.montgomery(z, one, m, k0, numWords)
+
+ // One last reduction, just in case.
+ // See golang.org/issue/13907.
+ if zz.cmp(m) >= 0 {
+ // Common case is m has high bit set; in that case,
+ // since zz is the same length as m, there can be just
+ // one multiple of m to remove. Just subtract.
+ // We think that the subtract should be sufficient in general,
+ // so do that unconditionally, but double-check,
+ // in case our beliefs are wrong.
+ // The div is not expected to be reached.
+ zz = zz.sub(zz, m)
+ if zz.cmp(m) >= 0 {
+ _, zz = nat(nil).div(nil, zz, m)
+ }
+ }
+
+ return zz.norm()
+}
+
+// bytes writes the value of z into buf using big-endian encoding.
+// The value of z is encoded in the slice buf[i:]. If the value of z
+// cannot be represented in buf, bytes panics. The number i of unused
+// bytes at the beginning of buf is returned as result.
+func (z nat) bytes(buf []byte) (i int) {
+ i = len(buf)
+ for _, d := range z {
+ for j := 0; j < _S; j++ {
+ i--
+ if i >= 0 {
+ buf[i] = byte(d)
+ } else if byte(d) != 0 {
+ panic("math/big: buffer too small to fit value")
+ }
+ d >>= 8
+ }
+ }
+
+ if i < 0 {
+ i = 0
+ }
+ for i < len(buf) && buf[i] == 0 {
+ i++
+ }
+
+ return
+}
+
+// bigEndianWord returns the contents of buf interpreted as a big-endian encoded Word value.
+func bigEndianWord(buf []byte) Word {
+ if _W == 64 {
+ return Word(binary.BigEndian.Uint64(buf))
+ }
+ return Word(binary.BigEndian.Uint32(buf))
+}
+
+// setBytes interprets buf as the bytes of a big-endian unsigned
+// integer, sets z to that value, and returns z.
+func (z nat) setBytes(buf []byte) nat {
+ z = z.make((len(buf) + _S - 1) / _S)
+
+ i := len(buf)
+ for k := 0; i >= _S; k++ {
+ z[k] = bigEndianWord(buf[i-_S : i])
+ i -= _S
+ }
+ if i > 0 {
+ var d Word
+ for s := uint(0); i > 0; s += 8 {
+ d |= Word(buf[i-1]) << s
+ i--
+ }
+ z[len(z)-1] = d
+ }
+
+ return z.norm()
+}
+
+// sqrt sets z = ⌊√x⌋
+func (z nat) sqrt(x nat) nat {
+ if x.cmp(natOne) <= 0 {
+ return z.set(x)
+ }
+ if alias(z, x) {
+ z = nil
+ }
+
+ // Start with value known to be too large and repeat "z = ⌊(z + ⌊x/z⌋)/2⌋" until it stops getting smaller.
+ // See Brent and Zimmermann, Modern Computer Arithmetic, Algorithm 1.13 (SqrtInt).
+ // https://members.loria.fr/PZimmermann/mca/pub226.html
+ // If x is one less than a perfect square, the sequence oscillates between the correct z and z+1;
+ // otherwise it converges to the correct z and stays there.
+ var z1, z2 nat
+ z1 = z
+ z1 = z1.setUint64(1)
+ z1 = z1.shl(z1, uint(x.bitLen()+1)/2) // must be ≥ √x
+ for n := 0; ; n++ {
+ z2, _ = z2.div(nil, x, z1)
+ z2 = z2.add(z2, z1)
+ z2 = z2.shr(z2, 1)
+ if z2.cmp(z1) >= 0 {
+ // z1 is answer.
+ // Figure out whether z1 or z2 is currently aliased to z by looking at loop count.
+ if n&1 == 0 {
+ return z1
+ }
+ return z.set(z1)
+ }
+ z1, z2 = z2, z1
+ }
+}
diff --git a/contrib/go/_std_1.19/src/math/big/natconv.go b/contrib/go/_std_1.19/src/math/big/natconv.go
new file mode 100644
index 0000000000..21fdab53fd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/natconv.go
@@ -0,0 +1,511 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements nat-to-string conversion functions.
+
+package big
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+ "sync"
+)
+
+const digits = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+// Note: MaxBase = len(digits), but it must remain an untyped rune constant
+// for API compatibility.
+
+// MaxBase is the largest number base accepted for string conversions.
+const MaxBase = 10 + ('z' - 'a' + 1) + ('Z' - 'A' + 1)
+const maxBaseSmall = 10 + ('z' - 'a' + 1)
+
+// maxPow returns (b**n, n) such that b**n is the largest power b**n <= _M.
+// For instance maxPow(10) == (1e19, 19) for 19 decimal digits in a 64bit Word.
+// In other words, at most n digits in base b fit into a Word.
+// TODO(gri) replace this with a table, generated at build time.
+func maxPow(b Word) (p Word, n int) {
+ p, n = b, 1 // assuming b <= _M
+ for max := _M / b; p <= max; {
+ // p == b**n && p <= max
+ p *= b
+ n++
+ }
+ // p == b**n && p <= _M
+ return
+}
+
+// pow returns x**n for n > 0, and 1 otherwise.
+func pow(x Word, n int) (p Word) {
+ // n == sum of bi * 2**i, for 0 <= i < imax, and bi is 0 or 1
+ // thus x**n == product of x**(2**i) for all i where bi == 1
+ // (Russian Peasant Method for exponentiation)
+ p = 1
+ for n > 0 {
+ if n&1 != 0 {
+ p *= x
+ }
+ x *= x
+ n >>= 1
+ }
+ return
+}
+
+// scan errors
+var (
+ errNoDigits = errors.New("number has no digits")
+ errInvalSep = errors.New("'_' must separate successive digits")
+)
+
+// scan scans the number corresponding to the longest possible prefix
+// from r representing an unsigned number in a given conversion base.
+// scan returns the corresponding natural number res, the actual base b,
+// a digit count, and a read or syntax error err, if any.
+//
+// For base 0, an underscore character “_” may appear between a base
+// prefix and an adjacent digit, and between successive digits; such
+// underscores do not change the value of the number, or the returned
+// digit count. Incorrect placement of underscores is reported as an
+// error if there are no other errors. If base != 0, underscores are
+// not recognized and thus terminate scanning like any other character
+// that is not a valid radix point or digit.
+//
+// number = mantissa | prefix pmantissa .
+// prefix = "0" [ "b" | "B" | "o" | "O" | "x" | "X" ] .
+// mantissa = digits "." [ digits ] | digits | "." digits .
+// pmantissa = [ "_" ] digits "." [ digits ] | [ "_" ] digits | "." digits .
+// digits = digit { [ "_" ] digit } .
+// digit = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
+//
+// Unless fracOk is set, the base argument must be 0 or a value between
+// 2 and MaxBase. If fracOk is set, the base argument must be one of
+// 0, 2, 8, 10, or 16. Providing an invalid base argument leads to a run-
+// time panic.
+//
+// For base 0, the number prefix determines the actual base: A prefix of
+// “0b” or “0B” selects base 2, “0o” or “0O” selects base 8, and
+// “0x” or “0X” selects base 16. If fracOk is false, a “0” prefix
+// (immediately followed by digits) selects base 8 as well. Otherwise,
+// the selected base is 10 and no prefix is accepted.
+//
+// If fracOk is set, a period followed by a fractional part is permitted.
+// The result value is computed as if there were no period present; and
+// the count value is used to determine the fractional part.
+//
+// For bases <= 36, lower and upper case letters are considered the same:
+// The letters 'a' to 'z' and 'A' to 'Z' represent digit values 10 to 35.
+// For bases > 36, the upper case letters 'A' to 'Z' represent the digit
+// values 36 to 61.
+//
+// A result digit count > 0 corresponds to the number of (non-prefix) digits
+// parsed. A digit count <= 0 indicates the presence of a period (if fracOk
+// is set, only), and -count is the number of fractional digits found.
+// In this case, the actual value of the scanned number is res * b**count.
+func (z nat) scan(r io.ByteScanner, base int, fracOk bool) (res nat, b, count int, err error) {
+ // reject invalid bases
+ baseOk := base == 0 ||
+ !fracOk && 2 <= base && base <= MaxBase ||
+ fracOk && (base == 2 || base == 8 || base == 10 || base == 16)
+ if !baseOk {
+ panic(fmt.Sprintf("invalid number base %d", base))
+ }
+
+ // prev encodes the previously seen char: it is one
+ // of '_', '0' (a digit), or '.' (anything else). A
+ // valid separator '_' may only occur after a digit
+ // and if base == 0.
+ prev := '.'
+ invalSep := false
+
+ // one char look-ahead
+ ch, err := r.ReadByte()
+
+ // determine actual base
+ b, prefix := base, 0
+ if base == 0 {
+ // actual base is 10 unless there's a base prefix
+ b = 10
+ if err == nil && ch == '0' {
+ prev = '0'
+ count = 1
+ ch, err = r.ReadByte()
+ if err == nil {
+ // possibly one of 0b, 0B, 0o, 0O, 0x, 0X
+ switch ch {
+ case 'b', 'B':
+ b, prefix = 2, 'b'
+ case 'o', 'O':
+ b, prefix = 8, 'o'
+ case 'x', 'X':
+ b, prefix = 16, 'x'
+ default:
+ if !fracOk {
+ b, prefix = 8, '0'
+ }
+ }
+ if prefix != 0 {
+ count = 0 // prefix is not counted
+ if prefix != '0' {
+ ch, err = r.ReadByte()
+ }
+ }
+ }
+ }
+ }
+
+ // convert string
+ // Algorithm: Collect digits in groups of at most n digits in di
+ // and then use mulAddWW for every such group to add them to the
+ // result.
+ z = z[:0]
+ b1 := Word(b)
+ bn, n := maxPow(b1) // at most n digits in base b1 fit into Word
+ di := Word(0) // 0 <= di < b1**i < bn
+ i := 0 // 0 <= i < n
+ dp := -1 // position of decimal point
+ for err == nil {
+ if ch == '.' && fracOk {
+ fracOk = false
+ if prev == '_' {
+ invalSep = true
+ }
+ prev = '.'
+ dp = count
+ } else if ch == '_' && base == 0 {
+ if prev != '0' {
+ invalSep = true
+ }
+ prev = '_'
+ } else {
+ // convert rune into digit value d1
+ var d1 Word
+ switch {
+ case '0' <= ch && ch <= '9':
+ d1 = Word(ch - '0')
+ case 'a' <= ch && ch <= 'z':
+ d1 = Word(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'Z':
+ if b <= maxBaseSmall {
+ d1 = Word(ch - 'A' + 10)
+ } else {
+ d1 = Word(ch - 'A' + maxBaseSmall)
+ }
+ default:
+ d1 = MaxBase + 1
+ }
+ if d1 >= b1 {
+ r.UnreadByte() // ch does not belong to number anymore
+ break
+ }
+ prev = '0'
+ count++
+
+ // collect d1 in di
+ di = di*b1 + d1
+ i++
+
+ // if di is "full", add it to the result
+ if i == n {
+ z = z.mulAddWW(z, bn, di)
+ di = 0
+ i = 0
+ }
+ }
+
+ ch, err = r.ReadByte()
+ }
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ // other errors take precedence over invalid separators
+ if err == nil && (invalSep || prev == '_') {
+ err = errInvalSep
+ }
+
+ if count == 0 {
+ // no digits found
+ if prefix == '0' {
+ // there was only the octal prefix 0 (possibly followed by separators and digits > 7);
+ // interpret as decimal 0
+ return z[:0], 10, 1, err
+ }
+ err = errNoDigits // fall through; result will be 0
+ }
+
+ // add remaining digits to result
+ if i > 0 {
+ z = z.mulAddWW(z, pow(b1, i), di)
+ }
+ res = z.norm()
+
+ // adjust count for fraction, if any
+ if dp >= 0 {
+ // 0 <= dp <= count
+ count = dp - count
+ }
+
+ return
+}
+
+// utoa converts x to an ASCII representation in the given base;
+// base must be between 2 and MaxBase, inclusive.
+func (x nat) utoa(base int) []byte {
+ return x.itoa(false, base)
+}
+
+// itoa is like utoa but it prepends a '-' if neg && x != 0.
+func (x nat) itoa(neg bool, base int) []byte {
+ if base < 2 || base > MaxBase {
+ panic("invalid base")
+ }
+
+ // x == 0
+ if len(x) == 0 {
+ return []byte("0")
+ }
+ // len(x) > 0
+
+ // allocate buffer for conversion
+ i := int(float64(x.bitLen())/math.Log2(float64(base))) + 1 // off by 1 at most
+ if neg {
+ i++
+ }
+ s := make([]byte, i)
+
+ // convert power of two and non power of two bases separately
+ if b := Word(base); b == b&-b {
+ // shift is base b digit size in bits
+ shift := uint(bits.TrailingZeros(uint(b))) // shift > 0 because b >= 2
+ mask := Word(1<<shift - 1)
+ w := x[0] // current word
+ nbits := uint(_W) // number of unprocessed bits in w
+
+ // convert less-significant words (include leading zeros)
+ for k := 1; k < len(x); k++ {
+ // convert full digits
+ for nbits >= shift {
+ i--
+ s[i] = digits[w&mask]
+ w >>= shift
+ nbits -= shift
+ }
+
+ // convert any partial leading digit and advance to next word
+ if nbits == 0 {
+ // no partial digit remaining, just advance
+ w = x[k]
+ nbits = _W
+ } else {
+ // partial digit in current word w (== x[k-1]) and next word x[k]
+ w |= x[k] << nbits
+ i--
+ s[i] = digits[w&mask]
+
+ // advance
+ w = x[k] >> (shift - nbits)
+ nbits = _W - (shift - nbits)
+ }
+ }
+
+ // convert digits of most-significant word w (omit leading zeros)
+ for w != 0 {
+ i--
+ s[i] = digits[w&mask]
+ w >>= shift
+ }
+
+ } else {
+ bb, ndigits := maxPow(b)
+
+ // construct table of successive squares of bb*leafSize to use in subdivisions
+ // result (table != nil) <=> (len(x) > leafSize > 0)
+ table := divisors(len(x), b, ndigits, bb)
+
+ // preserve x, create local copy for use by convertWords
+ q := nat(nil).set(x)
+
+ // convert q to string s in base b
+ q.convertWords(s, b, ndigits, bb, table)
+
+ // strip leading zeros
+ // (x != 0; thus s must contain at least one non-zero digit
+ // and the loop will terminate)
+ i = 0
+ for s[i] == '0' {
+ i++
+ }
+ }
+
+ if neg {
+ i--
+ s[i] = '-'
+ }
+
+ return s[i:]
+}
+
+// Convert words of q to base b digits in s. If q is large, it is recursively "split in half"
+// by nat/nat division using tabulated divisors. Otherwise, it is converted iteratively using
+// repeated nat/Word division.
+//
+// The iterative method processes n Words by n divW() calls, each of which visits every Word in the
+// incrementally shortened q for a total of n + (n-1) + (n-2) ... + 2 + 1, or n(n+1)/2 divW()'s.
+// Recursive conversion divides q by its approximate square root, yielding two parts, each half
+// the size of q. Using the iterative method on both halves means 2 * (n/2)(n/2 + 1)/2 divW()'s
+// plus the expensive long div(). Asymptotically, the ratio is favorable at 1/2 the divW()'s, and
+// is made better by splitting the subblocks recursively. Best is to split blocks until one more
+// split would take longer (because of the nat/nat div()) than the twice as many divW()'s of the
+// iterative approach. This threshold is represented by leafSize. Benchmarking of leafSize in the
+// range 2..64 shows that values of 8 and 16 work well, with a 4x speedup at medium lengths and
+// ~30x for 20000 digits. Use nat_test.go's BenchmarkLeafSize tests to optimize leafSize for
+// specific hardware.
+func (q nat) convertWords(s []byte, b Word, ndigits int, bb Word, table []divisor) {
+ // split larger blocks recursively
+ if table != nil {
+ // len(q) > leafSize > 0
+ var r nat
+ index := len(table) - 1
+ for len(q) > leafSize {
+ // find divisor close to sqrt(q) if possible, but in any case < q
+ maxLength := q.bitLen() // ~= log2 q, or at of least largest possible q of this bit length
+ minLength := maxLength >> 1 // ~= log2 sqrt(q)
+ for index > 0 && table[index-1].nbits > minLength {
+ index-- // desired
+ }
+ if table[index].nbits >= maxLength && table[index].bbb.cmp(q) >= 0 {
+ index--
+ if index < 0 {
+ panic("internal inconsistency")
+ }
+ }
+
+ // split q into the two digit number (q'*bbb + r) to form independent subblocks
+ q, r = q.div(r, q, table[index].bbb)
+
+ // convert subblocks and collect results in s[:h] and s[h:]
+ h := len(s) - table[index].ndigits
+ r.convertWords(s[h:], b, ndigits, bb, table[0:index])
+ s = s[:h] // == q.convertWords(s, b, ndigits, bb, table[0:index+1])
+ }
+ }
+
+ // having split any large blocks now process the remaining (small) block iteratively
+ i := len(s)
+ var r Word
+ if b == 10 {
+ // hard-coding for 10 here speeds this up by 1.25x (allows for / and % by constants)
+ for len(q) > 0 {
+ // extract least significant, base bb "digit"
+ q, r = q.divW(q, bb)
+ for j := 0; j < ndigits && i > 0; j++ {
+ i--
+ // avoid % computation since r%10 == r - int(r/10)*10;
+ // this appears to be faster for BenchmarkString10000Base10
+ // and smaller strings (but a bit slower for larger ones)
+ t := r / 10
+ s[i] = '0' + byte(r-t*10)
+ r = t
+ }
+ }
+ } else {
+ for len(q) > 0 {
+ // extract least significant, base bb "digit"
+ q, r = q.divW(q, bb)
+ for j := 0; j < ndigits && i > 0; j++ {
+ i--
+ s[i] = digits[r%b]
+ r /= b
+ }
+ }
+ }
+
+ // prepend high-order zeros
+ for i > 0 { // while need more leading zeros
+ i--
+ s[i] = '0'
+ }
+}
+
+// Split blocks greater than leafSize Words (or set to 0 to disable recursive conversion)
+// Benchmark and configure leafSize using: go test -bench="Leaf"
+//
+// 8 and 16 effective on 3.0 GHz Xeon "Clovertown" CPU (128 byte cache lines)
+// 8 and 16 effective on 2.66 GHz Core 2 Duo "Penryn" CPU
+var leafSize int = 8 // number of Word-size binary values treat as a monolithic block
+
+type divisor struct {
+ bbb nat // divisor
+ nbits int // bit length of divisor (discounting leading zeros) ~= log2(bbb)
+ ndigits int // digit length of divisor in terms of output base digits
+}
+
+var cacheBase10 struct {
+ sync.Mutex
+ table [64]divisor // cached divisors for base 10
+}
+
+// expWW computes x**y
+func (z nat) expWW(x, y Word) nat {
+ return z.expNN(nat(nil).setWord(x), nat(nil).setWord(y), nil)
+}
+
+// construct table of powers of bb*leafSize to use in subdivisions
+func divisors(m int, b Word, ndigits int, bb Word) []divisor {
+ // only compute table when recursive conversion is enabled and x is large
+ if leafSize == 0 || m <= leafSize {
+ return nil
+ }
+
+ // determine k where (bb**leafSize)**(2**k) >= sqrt(x)
+ k := 1
+ for words := leafSize; words < m>>1 && k < len(cacheBase10.table); words <<= 1 {
+ k++
+ }
+
+ // reuse and extend existing table of divisors or create new table as appropriate
+ var table []divisor // for b == 10, table overlaps with cacheBase10.table
+ if b == 10 {
+ cacheBase10.Lock()
+ table = cacheBase10.table[0:k] // reuse old table for this conversion
+ } else {
+ table = make([]divisor, k) // create new table for this conversion
+ }
+
+ // extend table
+ if table[k-1].ndigits == 0 {
+ // add new entries as needed
+ var larger nat
+ for i := 0; i < k; i++ {
+ if table[i].ndigits == 0 {
+ if i == 0 {
+ table[0].bbb = nat(nil).expWW(bb, Word(leafSize))
+ table[0].ndigits = ndigits * leafSize
+ } else {
+ table[i].bbb = nat(nil).sqr(table[i-1].bbb)
+ table[i].ndigits = 2 * table[i-1].ndigits
+ }
+
+ // optimization: exploit aggregated extra bits in macro blocks
+ larger = nat(nil).set(table[i].bbb)
+ for mulAddVWW(larger, larger, b, 0) == 0 {
+ table[i].bbb = table[i].bbb.set(larger)
+ table[i].ndigits++
+ }
+
+ table[i].nbits = table[i].bbb.bitLen()
+ }
+ }
+ }
+
+ if b == 10 {
+ cacheBase10.Unlock()
+ }
+
+ return table
+}
diff --git a/contrib/go/_std_1.18/src/math/big/natdiv.go b/contrib/go/_std_1.19/src/math/big/natdiv.go
index 882bb6d3ba..882bb6d3ba 100644
--- a/contrib/go/_std_1.18/src/math/big/natdiv.go
+++ b/contrib/go/_std_1.19/src/math/big/natdiv.go
diff --git a/contrib/go/_std_1.18/src/math/big/prime.go b/contrib/go/_std_1.19/src/math/big/prime.go
index d9a5f1ec96..d9a5f1ec96 100644
--- a/contrib/go/_std_1.18/src/math/big/prime.go
+++ b/contrib/go/_std_1.19/src/math/big/prime.go
diff --git a/contrib/go/_std_1.19/src/math/big/rat.go b/contrib/go/_std_1.19/src/math/big/rat.go
new file mode 100644
index 0000000000..700a643265
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/rat.go
@@ -0,0 +1,542 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements multi-precision rational numbers.
+
+package big
+
+import (
+ "fmt"
+ "math"
+)
+
+// A Rat represents a quotient a/b of arbitrary precision.
+// The zero value for a Rat represents the value 0.
+//
+// Operations always take pointer arguments (*Rat) rather
+// than Rat values, and each unique Rat value requires
+// its own unique *Rat pointer. To "copy" a Rat value,
+// an existing (or newly allocated) Rat must be set to
+// a new value using the Rat.Set method; shallow copies
+// of Rats are not supported and may lead to errors.
+type Rat struct {
+ // To make zero values for Rat work w/o initialization,
+ // a zero value of b (len(b) == 0) acts like b == 1. At
+ // the earliest opportunity (when an assignment to the Rat
+ // is made), such uninitialized denominators are set to 1.
+ // a.neg determines the sign of the Rat, b.neg is ignored.
+ a, b Int
+}
+
+// NewRat creates a new Rat with numerator a and denominator b.
+func NewRat(a, b int64) *Rat {
+ return new(Rat).SetFrac64(a, b)
+}
+
+// SetFloat64 sets z to exactly f and returns z.
+// If f is not finite, SetFloat returns nil.
+func (z *Rat) SetFloat64(f float64) *Rat {
+ const expMask = 1<<11 - 1
+ bits := math.Float64bits(f)
+ mantissa := bits & (1<<52 - 1)
+ exp := int((bits >> 52) & expMask)
+ switch exp {
+ case expMask: // non-finite
+ return nil
+ case 0: // denormal
+ exp -= 1022
+ default: // normal
+ mantissa |= 1 << 52
+ exp -= 1023
+ }
+
+ shift := 52 - exp
+
+ // Optimization (?): partially pre-normalise.
+ for mantissa&1 == 0 && shift > 0 {
+ mantissa >>= 1
+ shift--
+ }
+
+ z.a.SetUint64(mantissa)
+ z.a.neg = f < 0
+ z.b.Set(intOne)
+ if shift > 0 {
+ z.b.Lsh(&z.b, uint(shift))
+ } else {
+ z.a.Lsh(&z.a, uint(-shift))
+ }
+ return z.norm()
+}
+
+// quotToFloat32 returns the non-negative float32 value
+// nearest to the quotient a/b, using round-to-even in
+// halfway cases. It does not mutate its arguments.
+// Preconditions: b is non-zero; a and b have no common factors.
+func quotToFloat32(a, b nat) (f float32, exact bool) {
+ const (
+ // float size in bits
+ Fsize = 32
+
+ // mantissa
+ Msize = 23
+ Msize1 = Msize + 1 // incl. implicit 1
+ Msize2 = Msize1 + 1
+
+ // exponent
+ Esize = Fsize - Msize1
+ Ebias = 1<<(Esize-1) - 1
+ Emin = 1 - Ebias
+ Emax = Ebias
+ )
+
+ // TODO(adonovan): specialize common degenerate cases: 1.0, integers.
+ alen := a.bitLen()
+ if alen == 0 {
+ return 0, true
+ }
+ blen := b.bitLen()
+ if blen == 0 {
+ panic("division by zero")
+ }
+
+ // 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
+ // (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
+ // This is 2 or 3 more than the float32 mantissa field width of Msize:
+ // - the optional extra bit is shifted away in step 3 below.
+ // - the high-order 1 is omitted in "normal" representation;
+ // - the low-order 1 will be used during rounding then discarded.
+ exp := alen - blen
+ var a2, b2 nat
+ a2 = a2.set(a)
+ b2 = b2.set(b)
+ if shift := Msize2 - exp; shift > 0 {
+ a2 = a2.shl(a2, uint(shift))
+ } else if shift < 0 {
+ b2 = b2.shl(b2, uint(-shift))
+ }
+
+ // 2. Compute quotient and remainder (q, r). NB: due to the
+ // extra shift, the low-order bit of q is logically the
+ // high-order bit of r.
+ var q nat
+ q, r := q.div(a2, a2, b2) // (recycle a2)
+ mantissa := low32(q)
+ haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
+
+ // 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
+ // (in effect---we accomplish this incrementally).
+ if mantissa>>Msize2 == 1 {
+ if mantissa&1 == 1 {
+ haveRem = true
+ }
+ mantissa >>= 1
+ exp++
+ }
+ if mantissa>>Msize1 != 1 {
+ panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
+ }
+
+ // 4. Rounding.
+ if Emin-Msize <= exp && exp <= Emin {
+ // Denormal case; lose 'shift' bits of precision.
+ shift := uint(Emin - (exp - 1)) // [1..Esize1)
+ lostbits := mantissa & (1<<shift - 1)
+ haveRem = haveRem || lostbits != 0
+ mantissa >>= shift
+ exp = 2 - Ebias // == exp + shift
+ }
+ // Round q using round-half-to-even.
+ exact = !haveRem
+ if mantissa&1 != 0 {
+ exact = false
+ if haveRem || mantissa&2 != 0 {
+ if mantissa++; mantissa >= 1<<Msize2 {
+ // Complete rollover 11...1 => 100...0, so shift is safe
+ mantissa >>= 1
+ exp++
+ }
+ }
+ }
+ mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1.
+
+ f = float32(math.Ldexp(float64(mantissa), exp-Msize1))
+ if math.IsInf(float64(f), 0) {
+ exact = false
+ }
+ return
+}
+
+// quotToFloat64 returns the non-negative float64 value
+// nearest to the quotient a/b, using round-to-even in
+// halfway cases. It does not mutate its arguments.
+// Preconditions: b is non-zero; a and b have no common factors.
+func quotToFloat64(a, b nat) (f float64, exact bool) {
+ const (
+ // float size in bits
+ Fsize = 64
+
+ // mantissa
+ Msize = 52
+ Msize1 = Msize + 1 // incl. implicit 1
+ Msize2 = Msize1 + 1
+
+ // exponent
+ Esize = Fsize - Msize1
+ Ebias = 1<<(Esize-1) - 1
+ Emin = 1 - Ebias
+ Emax = Ebias
+ )
+
+ // TODO(adonovan): specialize common degenerate cases: 1.0, integers.
+ alen := a.bitLen()
+ if alen == 0 {
+ return 0, true
+ }
+ blen := b.bitLen()
+ if blen == 0 {
+ panic("division by zero")
+ }
+
+ // 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
+ // (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
+ // This is 2 or 3 more than the float64 mantissa field width of Msize:
+ // - the optional extra bit is shifted away in step 3 below.
+ // - the high-order 1 is omitted in "normal" representation;
+ // - the low-order 1 will be used during rounding then discarded.
+ exp := alen - blen
+ var a2, b2 nat
+ a2 = a2.set(a)
+ b2 = b2.set(b)
+ if shift := Msize2 - exp; shift > 0 {
+ a2 = a2.shl(a2, uint(shift))
+ } else if shift < 0 {
+ b2 = b2.shl(b2, uint(-shift))
+ }
+
+ // 2. Compute quotient and remainder (q, r). NB: due to the
+ // extra shift, the low-order bit of q is logically the
+ // high-order bit of r.
+ var q nat
+ q, r := q.div(a2, a2, b2) // (recycle a2)
+ mantissa := low64(q)
+ haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
+
+ // 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
+ // (in effect---we accomplish this incrementally).
+ if mantissa>>Msize2 == 1 {
+ if mantissa&1 == 1 {
+ haveRem = true
+ }
+ mantissa >>= 1
+ exp++
+ }
+ if mantissa>>Msize1 != 1 {
+ panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
+ }
+
+ // 4. Rounding.
+ if Emin-Msize <= exp && exp <= Emin {
+ // Denormal case; lose 'shift' bits of precision.
+ shift := uint(Emin - (exp - 1)) // [1..Esize1)
+ lostbits := mantissa & (1<<shift - 1)
+ haveRem = haveRem || lostbits != 0
+ mantissa >>= shift
+ exp = 2 - Ebias // == exp + shift
+ }
+ // Round q using round-half-to-even.
+ exact = !haveRem
+ if mantissa&1 != 0 {
+ exact = false
+ if haveRem || mantissa&2 != 0 {
+ if mantissa++; mantissa >= 1<<Msize2 {
+ // Complete rollover 11...1 => 100...0, so shift is safe
+ mantissa >>= 1
+ exp++
+ }
+ }
+ }
+ mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1.
+
+ f = math.Ldexp(float64(mantissa), exp-Msize1)
+ if math.IsInf(f, 0) {
+ exact = false
+ }
+ return
+}
+
+// Float32 returns the nearest float32 value for x and a bool indicating
+// whether f represents x exactly. If the magnitude of x is too large to
+// be represented by a float32, f is an infinity and exact is false.
+// The sign of f always matches the sign of x, even if f == 0.
+func (x *Rat) Float32() (f float32, exact bool) {
+ b := x.b.abs
+ if len(b) == 0 {
+ b = natOne
+ }
+ f, exact = quotToFloat32(x.a.abs, b)
+ if x.a.neg {
+ f = -f
+ }
+ return
+}
+
+// Float64 returns the nearest float64 value for x and a bool indicating
+// whether f represents x exactly. If the magnitude of x is too large to
+// be represented by a float64, f is an infinity and exact is false.
+// The sign of f always matches the sign of x, even if f == 0.
+func (x *Rat) Float64() (f float64, exact bool) {
+ b := x.b.abs
+ if len(b) == 0 {
+ b = natOne
+ }
+ f, exact = quotToFloat64(x.a.abs, b)
+ if x.a.neg {
+ f = -f
+ }
+ return
+}
+
+// SetFrac sets z to a/b and returns z.
+// If b == 0, SetFrac panics.
+func (z *Rat) SetFrac(a, b *Int) *Rat {
+ z.a.neg = a.neg != b.neg
+ babs := b.abs
+ if len(babs) == 0 {
+ panic("division by zero")
+ }
+ if &z.a == b || alias(z.a.abs, babs) {
+ babs = nat(nil).set(babs) // make a copy
+ }
+ z.a.abs = z.a.abs.set(a.abs)
+ z.b.abs = z.b.abs.set(babs)
+ return z.norm()
+}
+
+// SetFrac64 sets z to a/b and returns z.
+// If b == 0, SetFrac64 panics.
+func (z *Rat) SetFrac64(a, b int64) *Rat {
+ if b == 0 {
+ panic("division by zero")
+ }
+ z.a.SetInt64(a)
+ if b < 0 {
+ b = -b
+ z.a.neg = !z.a.neg
+ }
+ z.b.abs = z.b.abs.setUint64(uint64(b))
+ return z.norm()
+}
+
+// SetInt sets z to x (by making a copy of x) and returns z.
+func (z *Rat) SetInt(x *Int) *Rat {
+ z.a.Set(x)
+ z.b.abs = z.b.abs.setWord(1)
+ return z
+}
+
+// SetInt64 sets z to x and returns z.
+func (z *Rat) SetInt64(x int64) *Rat {
+ z.a.SetInt64(x)
+ z.b.abs = z.b.abs.setWord(1)
+ return z
+}
+
+// SetUint64 sets z to x and returns z.
+func (z *Rat) SetUint64(x uint64) *Rat {
+ z.a.SetUint64(x)
+ z.b.abs = z.b.abs.setWord(1)
+ return z
+}
+
+// Set sets z to x (by making a copy of x) and returns z.
+func (z *Rat) Set(x *Rat) *Rat {
+ if z != x {
+ z.a.Set(&x.a)
+ z.b.Set(&x.b)
+ }
+ if len(z.b.abs) == 0 {
+ z.b.abs = z.b.abs.setWord(1)
+ }
+ return z
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Rat) Abs(x *Rat) *Rat {
+ z.Set(x)
+ z.a.neg = false
+ return z
+}
+
+// Neg sets z to -x and returns z.
+func (z *Rat) Neg(x *Rat) *Rat {
+ z.Set(x)
+ z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
+ return z
+}
+
+// Inv sets z to 1/x and returns z.
+// If x == 0, Inv panics.
+func (z *Rat) Inv(x *Rat) *Rat {
+ if len(x.a.abs) == 0 {
+ panic("division by zero")
+ }
+ z.Set(x)
+ z.a.abs, z.b.abs = z.b.abs, z.a.abs
+ return z
+}
+
+// Sign returns:
+//
+// -1 if x < 0
+// 0 if x == 0
+// +1 if x > 0
+func (x *Rat) Sign() int {
+ return x.a.Sign()
+}
+
+// IsInt reports whether the denominator of x is 1.
+func (x *Rat) IsInt() bool {
+ return len(x.b.abs) == 0 || x.b.abs.cmp(natOne) == 0
+}
+
+// Num returns the numerator of x; it may be <= 0.
+// The result is a reference to x's numerator; it
+// may change if a new value is assigned to x, and vice versa.
+// The sign of the numerator corresponds to the sign of x.
+func (x *Rat) Num() *Int {
+ return &x.a
+}
+
+// Denom returns the denominator of x; it is always > 0.
+// The result is a reference to x's denominator, unless
+// x is an uninitialized (zero value) Rat, in which case
+// the result is a new Int of value 1. (To initialize x,
+// any operation that sets x will do, including x.Set(x).)
+// If the result is a reference to x's denominator it
+// may change if a new value is assigned to x, and vice versa.
+func (x *Rat) Denom() *Int {
+ // Note that x.b.neg is guaranteed false.
+ if len(x.b.abs) == 0 {
+ // Note: If this proves problematic, we could
+ // panic instead and require the Rat to
+ // be explicitly initialized.
+ return &Int{abs: nat{1}}
+ }
+ return &x.b
+}
+
+func (z *Rat) norm() *Rat {
+ switch {
+ case len(z.a.abs) == 0:
+ // z == 0; normalize sign and denominator
+ z.a.neg = false
+ fallthrough
+ case len(z.b.abs) == 0:
+ // z is integer; normalize denominator
+ z.b.abs = z.b.abs.setWord(1)
+ default:
+ // z is fraction; normalize numerator and denominator
+ neg := z.a.neg
+ z.a.neg = false
+ z.b.neg = false
+ if f := NewInt(0).lehmerGCD(nil, nil, &z.a, &z.b); f.Cmp(intOne) != 0 {
+ z.a.abs, _ = z.a.abs.div(nil, z.a.abs, f.abs)
+ z.b.abs, _ = z.b.abs.div(nil, z.b.abs, f.abs)
+ }
+ z.a.neg = neg
+ }
+ return z
+}
+
+// mulDenom sets z to the denominator product x*y (by taking into
+// account that 0 values for x or y must be interpreted as 1) and
+// returns z.
+func mulDenom(z, x, y nat) nat {
+ switch {
+ case len(x) == 0 && len(y) == 0:
+ return z.setWord(1)
+ case len(x) == 0:
+ return z.set(y)
+ case len(y) == 0:
+ return z.set(x)
+ }
+ return z.mul(x, y)
+}
+
+// scaleDenom sets z to the product x*f.
+// If f == 0 (zero value of denominator), z is set to (a copy of) x.
+func (z *Int) scaleDenom(x *Int, f nat) {
+ if len(f) == 0 {
+ z.Set(x)
+ return
+ }
+ z.abs = z.abs.mul(x.abs, f)
+ z.neg = x.neg
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+func (x *Rat) Cmp(y *Rat) int {
+ var a, b Int
+ a.scaleDenom(&x.a, y.b.abs)
+ b.scaleDenom(&y.a, x.b.abs)
+ return a.Cmp(&b)
+}
+
+// Add sets z to the sum x+y and returns z.
+func (z *Rat) Add(x, y *Rat) *Rat {
+ var a1, a2 Int
+ a1.scaleDenom(&x.a, y.b.abs)
+ a2.scaleDenom(&y.a, x.b.abs)
+ z.a.Add(&a1, &a2)
+ z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
+ return z.norm()
+}
+
+// Sub sets z to the difference x-y and returns z.
+func (z *Rat) Sub(x, y *Rat) *Rat {
+ var a1, a2 Int
+ a1.scaleDenom(&x.a, y.b.abs)
+ a2.scaleDenom(&y.a, x.b.abs)
+ z.a.Sub(&a1, &a2)
+ z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
+ return z.norm()
+}
+
+// Mul sets z to the product x*y and returns z.
+func (z *Rat) Mul(x, y *Rat) *Rat {
+ if x == y {
+ // a squared Rat is positive and can't be reduced (no need to call norm())
+ z.a.neg = false
+ z.a.abs = z.a.abs.sqr(x.a.abs)
+ if len(x.b.abs) == 0 {
+ z.b.abs = z.b.abs.setWord(1)
+ } else {
+ z.b.abs = z.b.abs.sqr(x.b.abs)
+ }
+ return z
+ }
+ z.a.Mul(&x.a, &y.a)
+ z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
+ return z.norm()
+}
+
+// Quo sets z to the quotient x/y and returns z.
+// If y == 0, Quo panics.
+func (z *Rat) Quo(x, y *Rat) *Rat {
+ if len(y.a.abs) == 0 {
+ panic("division by zero")
+ }
+ var a, b Int
+ a.scaleDenom(&x.a, y.b.abs)
+ b.scaleDenom(&y.a, x.b.abs)
+ z.a.abs = a.abs
+ z.b.abs = b.abs
+ z.a.neg = a.neg != b.neg
+ return z.norm()
+}
diff --git a/contrib/go/_std_1.19/src/math/big/ratconv.go b/contrib/go/_std_1.19/src/math/big/ratconv.go
new file mode 100644
index 0000000000..794a51d007
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/ratconv.go
@@ -0,0 +1,380 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements rat-to-string conversion functions.
+
+package big
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func ratTok(ch rune) bool {
+ return strings.ContainsRune("+-/0123456789.eE", ch)
+}
+
+var ratZero Rat
+var _ fmt.Scanner = &ratZero // *Rat must implement fmt.Scanner
+
+// Scan is a support routine for fmt.Scanner. It accepts the formats
+// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
+func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
+ tok, err := s.Token(true, ratTok)
+ if err != nil {
+ return err
+ }
+ if !strings.ContainsRune("efgEFGv", ch) {
+ return errors.New("Rat.Scan: invalid verb")
+ }
+ if _, ok := z.SetString(string(tok)); !ok {
+ return errors.New("Rat.Scan: invalid syntax")
+ }
+ return nil
+}
+
+// SetString sets z to the value of s and returns z and a boolean indicating
+// success. s can be given as a (possibly signed) fraction "a/b", or as a
+// floating-point number optionally followed by an exponent.
+// If a fraction is provided, both the dividend and the divisor may be a
+// decimal integer or independently use a prefix of “0b”, “0” or “0o”,
+// or “0x” (or their upper-case variants) to denote a binary, octal, or
+// hexadecimal integer, respectively. The divisor may not be signed.
+// If a floating-point number is provided, it may be in decimal form or
+// use any of the same prefixes as above but for “0” to denote a non-decimal
+// mantissa. A leading “0” is considered a decimal leading 0; it does not
+// indicate octal representation in this case.
+// An optional base-10 “e” or base-2 “p” (or their upper-case variants)
+// exponent may be provided as well, except for hexadecimal floats which
+// only accept an (optional) “p” exponent (because an “e” or “E” cannot
+// be distinguished from a mantissa digit). If the exponent's absolute value
+// is too large, the operation may fail.
+// The entire string, not just a prefix, must be valid for success. If the
+// operation failed, the value of z is undefined but the returned value is nil.
+func (z *Rat) SetString(s string) (*Rat, bool) {
+ if len(s) == 0 {
+ return nil, false
+ }
+ // len(s) > 0
+
+ // parse fraction a/b, if any
+ if sep := strings.Index(s, "/"); sep >= 0 {
+ if _, ok := z.a.SetString(s[:sep], 0); !ok {
+ return nil, false
+ }
+ r := strings.NewReader(s[sep+1:])
+ var err error
+ if z.b.abs, _, _, err = z.b.abs.scan(r, 0, false); err != nil {
+ return nil, false
+ }
+ // entire string must have been consumed
+ if _, err = r.ReadByte(); err != io.EOF {
+ return nil, false
+ }
+ if len(z.b.abs) == 0 {
+ return nil, false
+ }
+ return z.norm(), true
+ }
+
+ // parse floating-point number
+ r := strings.NewReader(s)
+
+ // sign
+ neg, err := scanSign(r)
+ if err != nil {
+ return nil, false
+ }
+
+ // mantissa
+ var base int
+ var fcount int // fractional digit count; valid if <= 0
+ z.a.abs, base, fcount, err = z.a.abs.scan(r, 0, true)
+ if err != nil {
+ return nil, false
+ }
+
+ // exponent
+ var exp int64
+ var ebase int
+ exp, ebase, err = scanExponent(r, true, true)
+ if err != nil {
+ return nil, false
+ }
+
+ // there should be no unread characters left
+ if _, err = r.ReadByte(); err != io.EOF {
+ return nil, false
+ }
+
+ // special-case 0 (see also issue #16176)
+ if len(z.a.abs) == 0 {
+ return z.norm(), true
+ }
+ // len(z.a.abs) > 0
+
+ // The mantissa may have a radix point (fcount <= 0) and there
+ // may be a nonzero exponent exp. The radix point amounts to a
+ // division by base**(-fcount), which equals a multiplication by
+ // base**fcount. An exponent means multiplication by ebase**exp.
+ // Multiplications are commutative, so we can apply them in any
+ // order. We only have powers of 2 and 10, and we split powers
+ // of 10 into the product of the same powers of 2 and 5. This
+ // may reduce the size of shift/multiplication factors or
+ // divisors required to create the final fraction, depending
+ // on the actual floating-point value.
+
+ // determine binary or decimal exponent contribution of radix point
+ var exp2, exp5 int64
+ if fcount < 0 {
+ // The mantissa has a radix point ddd.dddd; and
+ // -fcount is the number of digits to the right
+ // of '.'. Adjust relevant exponent accordingly.
+ d := int64(fcount)
+ switch base {
+ case 10:
+ exp5 = d
+ fallthrough // 10**e == 5**e * 2**e
+ case 2:
+ exp2 = d
+ case 8:
+ exp2 = d * 3 // octal digits are 3 bits each
+ case 16:
+ exp2 = d * 4 // hexadecimal digits are 4 bits each
+ default:
+ panic("unexpected mantissa base")
+ }
+ // fcount consumed - not needed anymore
+ }
+
+ // take actual exponent into account
+ switch ebase {
+ case 10:
+ exp5 += exp
+ fallthrough // see fallthrough above
+ case 2:
+ exp2 += exp
+ default:
+ panic("unexpected exponent base")
+ }
+ // exp consumed - not needed anymore
+
+ // apply exp5 contributions
+ // (start with exp5 so the numbers to multiply are smaller)
+ if exp5 != 0 {
+ n := exp5
+ if n < 0 {
+ n = -n
+ if n < 0 {
+ // This can occur if -n overflows. -(-1 << 63) would become
+ // -1 << 63, which is still negative.
+ return nil, false
+ }
+ }
+ if n > 1e6 {
+ return nil, false // avoid excessively large exponents
+ }
+ pow5 := z.b.abs.expNN(natFive, nat(nil).setWord(Word(n)), nil) // use underlying array of z.b.abs
+ if exp5 > 0 {
+ z.a.abs = z.a.abs.mul(z.a.abs, pow5)
+ z.b.abs = z.b.abs.setWord(1)
+ } else {
+ z.b.abs = pow5
+ }
+ } else {
+ z.b.abs = z.b.abs.setWord(1)
+ }
+
+ // apply exp2 contributions
+ if exp2 < -1e7 || exp2 > 1e7 {
+ return nil, false // avoid excessively large exponents
+ }
+ if exp2 > 0 {
+ z.a.abs = z.a.abs.shl(z.a.abs, uint(exp2))
+ } else if exp2 < 0 {
+ z.b.abs = z.b.abs.shl(z.b.abs, uint(-exp2))
+ }
+
+ z.a.neg = neg && len(z.a.abs) > 0 // 0 has no sign
+
+ return z.norm(), true
+}
+
+// scanExponent scans the longest possible prefix of r representing a base 10
+// (“e”, “E”) or a base 2 (“p”, “P”) exponent, if any. It returns the
+// exponent, the exponent base (10 or 2), or a read or syntax error, if any.
+//
+// If sepOk is set, an underscore character “_” may appear between successive
+// exponent digits; such underscores do not change the value of the exponent.
+// Incorrect placement of underscores is reported as an error if there are no
+// other errors. If sepOk is not set, underscores are not recognized and thus
+// terminate scanning like any other character that is not a valid digit.
+//
+// exponent = ( "e" | "E" | "p" | "P" ) [ sign ] digits .
+// sign = "+" | "-" .
+// digits = digit { [ '_' ] digit } .
+// digit = "0" ... "9" .
+//
+// A base 2 exponent is only permitted if base2ok is set.
+func scanExponent(r io.ByteScanner, base2ok, sepOk bool) (exp int64, base int, err error) {
+ // one char look-ahead
+ ch, err := r.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return 0, 10, err
+ }
+
+ // exponent char
+ switch ch {
+ case 'e', 'E':
+ base = 10
+ case 'p', 'P':
+ if base2ok {
+ base = 2
+ break // ok
+ }
+ fallthrough // binary exponent not permitted
+ default:
+ r.UnreadByte() // ch does not belong to exponent anymore
+ return 0, 10, nil
+ }
+
+ // sign
+ var digits []byte
+ ch, err = r.ReadByte()
+ if err == nil && (ch == '+' || ch == '-') {
+ if ch == '-' {
+ digits = append(digits, '-')
+ }
+ ch, err = r.ReadByte()
+ }
+
+ // prev encodes the previously seen char: it is one
+ // of '_', '0' (a digit), or '.' (anything else). A
+ // valid separator '_' may only occur after a digit.
+ prev := '.'
+ invalSep := false
+
+ // exponent value
+ hasDigits := false
+ for err == nil {
+ if '0' <= ch && ch <= '9' {
+ digits = append(digits, ch)
+ prev = '0'
+ hasDigits = true
+ } else if ch == '_' && sepOk {
+ if prev != '0' {
+ invalSep = true
+ }
+ prev = '_'
+ } else {
+ r.UnreadByte() // ch does not belong to number anymore
+ break
+ }
+ ch, err = r.ReadByte()
+ }
+
+ if err == io.EOF {
+ err = nil
+ }
+ if err == nil && !hasDigits {
+ err = errNoDigits
+ }
+ if err == nil {
+ exp, err = strconv.ParseInt(string(digits), 10, 64)
+ }
+ // other errors take precedence over invalid separators
+ if err == nil && (invalSep || prev == '_') {
+ err = errInvalSep
+ }
+
+ return
+}
+
+// String returns a string representation of x in the form "a/b" (even if b == 1).
+func (x *Rat) String() string {
+ return string(x.marshal())
+}
+
+// marshal implements String returning a slice of bytes
+func (x *Rat) marshal() []byte {
+ var buf []byte
+ buf = x.a.Append(buf, 10)
+ buf = append(buf, '/')
+ if len(x.b.abs) != 0 {
+ buf = x.b.Append(buf, 10)
+ } else {
+ buf = append(buf, '1')
+ }
+ return buf
+}
+
+// RatString returns a string representation of x in the form "a/b" if b != 1,
+// and in the form "a" if b == 1.
+func (x *Rat) RatString() string {
+ if x.IsInt() {
+ return x.a.String()
+ }
+ return x.String()
+}
+
+// FloatString returns a string representation of x in decimal form with prec
+// digits of precision after the radix point. The last digit is rounded to
+// nearest, with halves rounded away from zero.
+func (x *Rat) FloatString(prec int) string {
+ var buf []byte
+
+ if x.IsInt() {
+ buf = x.a.Append(buf, 10)
+ if prec > 0 {
+ buf = append(buf, '.')
+ for i := prec; i > 0; i-- {
+ buf = append(buf, '0')
+ }
+ }
+ return string(buf)
+ }
+ // x.b.abs != 0
+
+ q, r := nat(nil).div(nat(nil), x.a.abs, x.b.abs)
+
+ p := natOne
+ if prec > 0 {
+ p = nat(nil).expNN(natTen, nat(nil).setUint64(uint64(prec)), nil)
+ }
+
+ r = r.mul(r, p)
+ r, r2 := r.div(nat(nil), r, x.b.abs)
+
+ // see if we need to round up
+ r2 = r2.add(r2, r2)
+ if x.b.abs.cmp(r2) <= 0 {
+ r = r.add(r, natOne)
+ if r.cmp(p) >= 0 {
+ q = nat(nil).add(q, natOne)
+ r = nat(nil).sub(r, p)
+ }
+ }
+
+ if x.a.neg {
+ buf = append(buf, '-')
+ }
+ buf = append(buf, q.utoa(10)...) // itoa ignores sign if q == 0
+
+ if prec > 0 {
+ buf = append(buf, '.')
+ rs := r.utoa(10)
+ for i := prec - len(rs); i > 0; i-- {
+ buf = append(buf, '0')
+ }
+ buf = append(buf, rs...)
+ }
+
+ return string(buf)
+}
diff --git a/contrib/go/_std_1.19/src/math/big/ratmarsh.go b/contrib/go/_std_1.19/src/math/big/ratmarsh.go
new file mode 100644
index 0000000000..56102e845b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/ratmarsh.go
@@ -0,0 +1,81 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements encoding/decoding of Rats.
+
+package big
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+)
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const ratGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Rat) GobEncode() ([]byte, error) {
+ if x == nil {
+ return nil, nil
+ }
+ buf := make([]byte, 1+4+(len(x.a.abs)+len(x.b.abs))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
+ i := x.b.abs.bytes(buf)
+ j := x.a.abs.bytes(buf[:i])
+ n := i - j
+ if int(uint32(n)) != n {
+ // this should never happen
+ return nil, errors.New("Rat.GobEncode: numerator too large")
+ }
+ binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
+ j -= 1 + 4
+ b := ratGobVersion << 1 // make space for sign bit
+ if x.a.neg {
+ b |= 1
+ }
+ buf[j] = b
+ return buf[j:], nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Rat) GobDecode(buf []byte) error {
+ if len(buf) == 0 {
+ // Other side sent a nil or default value.
+ *z = Rat{}
+ return nil
+ }
+ if len(buf) < 5 {
+ return errors.New("Rat.GobDecode: buffer too small")
+ }
+ b := buf[0]
+ if b>>1 != ratGobVersion {
+ return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
+ }
+ const j = 1 + 4
+ i := j + binary.BigEndian.Uint32(buf[j-4:j])
+ if len(buf) < int(i) {
+ return errors.New("Rat.GobDecode: buffer too small")
+ }
+ z.a.neg = b&1 != 0
+ z.a.abs = z.a.abs.setBytes(buf[j:i])
+ z.b.abs = z.b.abs.setBytes(buf[i:])
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (x *Rat) MarshalText() (text []byte, err error) {
+ if x.IsInt() {
+ return x.a.MarshalText()
+ }
+ return x.marshal(), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (z *Rat) UnmarshalText(text []byte) error {
+ // TODO(gri): get rid of the []byte/string conversion
+ if _, ok := z.SetString(string(text)); !ok {
+ return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Rat", text)
+ }
+ return nil
+}
diff --git a/contrib/go/_std_1.18/src/math/big/roundingmode_string.go b/contrib/go/_std_1.19/src/math/big/roundingmode_string.go
index c7629eb98b..c7629eb98b 100644
--- a/contrib/go/_std_1.18/src/math/big/roundingmode_string.go
+++ b/contrib/go/_std_1.19/src/math/big/roundingmode_string.go
diff --git a/contrib/go/_std_1.19/src/math/big/sqrt.go b/contrib/go/_std_1.19/src/math/big/sqrt.go
new file mode 100644
index 0000000000..b4b03743f4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/big/sqrt.go
@@ -0,0 +1,130 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+ "math"
+ "sync"
+)
+
+var threeOnce struct {
+ sync.Once
+ v *Float
+}
+
+func three() *Float {
+ threeOnce.Do(func() {
+ threeOnce.v = NewFloat(3.0)
+ })
+ return threeOnce.v
+}
+
+// Sqrt sets z to the rounded square root of x, and returns it.
+//
+// If z's precision is 0, it is changed to x's precision before the
+// operation. Rounding is performed according to z's precision and
+// rounding mode, but z's accuracy is not computed. Specifically, the
+// result of z.Acc() is undefined.
+//
+// The function panics if z < 0. The value of z is undefined in that
+// case.
+func (z *Float) Sqrt(x *Float) *Float {
+ if debugFloat {
+ x.validate()
+ }
+
+ if z.prec == 0 {
+ z.prec = x.prec
+ }
+
+ if x.Sign() == -1 {
+ // following IEEE754-2008 (section 7.2)
+ panic(ErrNaN{"square root of negative operand"})
+ }
+
+ // handle ±0 and +∞
+ if x.form != finite {
+ z.acc = Exact
+ z.form = x.form
+ z.neg = x.neg // IEEE754-2008 requires √±0 = ±0
+ return z
+ }
+
+ // MantExp sets the argument's precision to the receiver's, and
+ // when z.prec > x.prec this will lower z.prec. Restore it after
+ // the MantExp call.
+ prec := z.prec
+ b := x.MantExp(z)
+ z.prec = prec
+
+ // Compute √(z·2**b) as
+ // √( z)·2**(½b) if b is even
+ // √(2z)·2**(⌊½b⌋) if b > 0 is odd
+ // √(½z)·2**(⌈½b⌉) if b < 0 is odd
+ switch b % 2 {
+ case 0:
+ // nothing to do
+ case 1:
+ z.exp++
+ case -1:
+ z.exp--
+ }
+ // 0.25 <= z < 2.0
+
+ // Solving 1/x² - z = 0 avoids Quo calls and is faster, especially
+ // for high precisions.
+ z.sqrtInverse(z)
+
+ // re-attach halved exponent
+ return z.SetMantExp(z, b/2)
+}
+
+// Compute √x (to z.prec precision) by solving
+//
+// 1/t² - x = 0
+//
+// for t (using Newton's method), and then inverting.
+func (z *Float) sqrtInverse(x *Float) {
+ // let
+ // f(t) = 1/t² - x
+ // then
+ // g(t) = f(t)/f'(t) = -½t(1 - xt²)
+ // and the next guess is given by
+ // t2 = t - g(t) = ½t(3 - xt²)
+ u := newFloat(z.prec)
+ v := newFloat(z.prec)
+ three := three()
+ ng := func(t *Float) *Float {
+ u.prec = t.prec
+ v.prec = t.prec
+ u.Mul(t, t) // u = t²
+ u.Mul(x, u) // = xt²
+ v.Sub(three, u) // v = 3 - xt²
+ u.Mul(t, v) // u = t(3 - xt²)
+ u.exp-- // = ½t(3 - xt²)
+ return t.Set(u)
+ }
+
+ xf, _ := x.Float64()
+ sqi := newFloat(z.prec)
+ sqi.SetFloat64(1 / math.Sqrt(xf))
+ for prec := z.prec + 32; sqi.prec < prec; {
+ sqi.prec *= 2
+ sqi = ng(sqi)
+ }
+ // sqi = 1/√x
+
+ // x/√x = √x
+ z.Mul(x, sqi)
+}
+
+// newFloat returns a new *Float with space for twice the given
+// precision.
+func newFloat(prec2 uint32) *Float {
+ z := new(Float)
+ // nat.make ensures the slice length is > 0
+ z.mant = z.mant.make(int(prec2/_W) * 2)
+ return z
+}
diff --git a/contrib/go/_std_1.19/src/math/bits.go b/contrib/go/_std_1.19/src/math/bits.go
new file mode 100644
index 0000000000..c5cb93b159
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/bits.go
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+const (
+ uvnan = 0x7FF8000000000001
+ uvinf = 0x7FF0000000000000
+ uvneginf = 0xFFF0000000000000
+ uvone = 0x3FF0000000000000
+ mask = 0x7FF
+ shift = 64 - 11 - 1
+ bias = 1023
+ signMask = 1 << 63
+ fracMask = 1<<shift - 1
+)
+
+// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0.
+func Inf(sign int) float64 {
+ var v uint64
+ if sign >= 0 {
+ v = uvinf
+ } else {
+ v = uvneginf
+ }
+ return Float64frombits(v)
+}
+
+// NaN returns an IEEE 754 “not-a-number” value.
+func NaN() float64 { return Float64frombits(uvnan) }
+
+// IsNaN reports whether f is an IEEE 754 “not-a-number” value.
+func IsNaN(f float64) (is bool) {
+ // IEEE 754 says that only NaNs satisfy f != f.
+ // To avoid the floating-point hardware, could use:
+ // x := Float64bits(f);
+ // return uint32(x>>shift)&mask == mask && x != uvinf && x != uvneginf
+ return f != f
+}
+
+// IsInf reports whether f is an infinity, according to sign.
+// If sign > 0, IsInf reports whether f is positive infinity.
+// If sign < 0, IsInf reports whether f is negative infinity.
+// If sign == 0, IsInf reports whether f is either infinity.
+func IsInf(f float64, sign int) bool {
+ // Test for infinity by comparing against maximum float.
+ // To avoid the floating-point hardware, could use:
+ // x := Float64bits(f);
+ // return sign >= 0 && x == uvinf || sign <= 0 && x == uvneginf;
+ return sign >= 0 && f > MaxFloat64 || sign <= 0 && f < -MaxFloat64
+}
+
+// normalize returns a normal number y and exponent exp
+// satisfying x == y × 2**exp. It assumes x is finite and non-zero.
+func normalize(x float64) (y float64, exp int) {
+ const SmallestNormal = 2.2250738585072014e-308 // 2**-1022
+ if Abs(x) < SmallestNormal {
+ return x * (1 << 52), -52
+ }
+ return x, 0
+}
diff --git a/contrib/go/_std_1.18/src/math/bits/bits.go b/contrib/go/_std_1.19/src/math/bits/bits.go
index 65452feda2..65452feda2 100644
--- a/contrib/go/_std_1.18/src/math/bits/bits.go
+++ b/contrib/go/_std_1.19/src/math/bits/bits.go
diff --git a/contrib/go/_std_1.18/src/math/bits/bits_errors.go b/contrib/go/_std_1.19/src/math/bits/bits_errors.go
index 61cb5c9457..61cb5c9457 100644
--- a/contrib/go/_std_1.18/src/math/bits/bits_errors.go
+++ b/contrib/go/_std_1.19/src/math/bits/bits_errors.go
diff --git a/contrib/go/_std_1.18/src/math/bits/bits_tables.go b/contrib/go/_std_1.19/src/math/bits/bits_tables.go
index f869b8d5c3..f869b8d5c3 100644
--- a/contrib/go/_std_1.18/src/math/bits/bits_tables.go
+++ b/contrib/go/_std_1.19/src/math/bits/bits_tables.go
diff --git a/contrib/go/_std_1.19/src/math/cbrt.go b/contrib/go/_std_1.19/src/math/cbrt.go
new file mode 100644
index 0000000000..e5e9548cb1
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/cbrt.go
@@ -0,0 +1,85 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The go code is a modified version of the original C code from
+// http://www.netlib.org/fdlibm/s_cbrt.c and came with this notice.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+// Cbrt returns the cube root of x.
+//
+// Special cases are:
+//
+// Cbrt(±0) = ±0
+// Cbrt(±Inf) = ±Inf
+// Cbrt(NaN) = NaN
+func Cbrt(x float64) float64 {
+ if haveArchCbrt {
+ return archCbrt(x)
+ }
+ return cbrt(x)
+}
+
+func cbrt(x float64) float64 {
+ const (
+ B1 = 715094163 // (682-0.03306235651)*2**20
+ B2 = 696219795 // (664-0.03306235651)*2**20
+ C = 5.42857142857142815906e-01 // 19/35 = 0x3FE15F15F15F15F1
+ D = -7.05306122448979611050e-01 // -864/1225 = 0xBFE691DE2532C834
+ E = 1.41428571428571436819e+00 // 99/70 = 0x3FF6A0EA0EA0EA0F
+ F = 1.60714285714285720630e+00 // 45/28 = 0x3FF9B6DB6DB6DB6E
+ G = 3.57142857142857150787e-01 // 5/14 = 0x3FD6DB6DB6DB6DB7
+ SmallestNormal = 2.22507385850720138309e-308 // 2**-1022 = 0x0010000000000000
+ )
+ // special cases
+ switch {
+ case x == 0 || IsNaN(x) || IsInf(x, 0):
+ return x
+ }
+
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+
+ // rough cbrt to 5 bits
+ t := Float64frombits(Float64bits(x)/3 + B1<<32)
+ if x < SmallestNormal {
+ // subnormal number
+ t = float64(1 << 54) // set t= 2**54
+ t *= x
+ t = Float64frombits(Float64bits(t)/3 + B2<<32)
+ }
+
+ // new cbrt to 23 bits
+ r := t * t / x
+ s := C + r*t
+ t *= G + F/(s+E+D/s)
+
+ // chop to 22 bits, make larger than cbrt(x)
+ t = Float64frombits(Float64bits(t)&(0xFFFFFFFFC<<28) + 1<<30)
+
+ // one step newton iteration to 53 bits with error less than 0.667ulps
+ s = t * t // t*t is exact
+ r = x / s
+ w := t + t
+ r = (r - t) / (w + r) // r-s is exact
+ t = t + t*r
+
+ // restore the sign bit
+ if sign {
+ t = -t
+ }
+ return t
+}
diff --git a/contrib/go/_std_1.18/src/math/const.go b/contrib/go/_std_1.19/src/math/const.go
index 5ea935fb42..5ea935fb42 100644
--- a/contrib/go/_std_1.18/src/math/const.go
+++ b/contrib/go/_std_1.19/src/math/const.go
diff --git a/contrib/go/_std_1.19/src/math/copysign.go b/contrib/go/_std_1.19/src/math/copysign.go
new file mode 100644
index 0000000000..3a30afb413
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/copysign.go
@@ -0,0 +1,12 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Copysign returns a value with the magnitude of f
+// and the sign of sign.
+func Copysign(f, sign float64) float64 {
+ const signBit = 1 << 63
+ return Float64frombits(Float64bits(f)&^signBit | Float64bits(sign)&signBit)
+}
diff --git a/contrib/go/_std_1.19/src/math/dim.go b/contrib/go/_std_1.19/src/math/dim.go
new file mode 100644
index 0000000000..6a286cdc75
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/dim.go
@@ -0,0 +1,94 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Dim returns the maximum of x-y or 0.
+//
+// Special cases are:
+//
+// Dim(+Inf, +Inf) = NaN
+// Dim(-Inf, -Inf) = NaN
+// Dim(x, NaN) = Dim(NaN, x) = NaN
+func Dim(x, y float64) float64 {
+ // The special cases result in NaN after the subtraction:
+ // +Inf - +Inf = NaN
+ // -Inf - -Inf = NaN
+ // NaN - y = NaN
+ // x - NaN = NaN
+ v := x - y
+ if v <= 0 {
+ // v is negative or 0
+ return 0
+ }
+ // v is positive or NaN
+ return v
+}
+
+// Max returns the larger of x or y.
+//
+// Special cases are:
+//
+// Max(x, +Inf) = Max(+Inf, x) = +Inf
+// Max(x, NaN) = Max(NaN, x) = NaN
+// Max(+0, ±0) = Max(±0, +0) = +0
+// Max(-0, -0) = -0
+func Max(x, y float64) float64 {
+ if haveArchMax {
+ return archMax(x, y)
+ }
+ return max(x, y)
+}
+
+func max(x, y float64) float64 {
+ // special cases
+ switch {
+ case IsInf(x, 1) || IsInf(y, 1):
+ return Inf(1)
+ case IsNaN(x) || IsNaN(y):
+ return NaN()
+ case x == 0 && x == y:
+ if Signbit(x) {
+ return y
+ }
+ return x
+ }
+ if x > y {
+ return x
+ }
+ return y
+}
+
+// Min returns the smaller of x or y.
+//
+// Special cases are:
+//
+// Min(x, -Inf) = Min(-Inf, x) = -Inf
+// Min(x, NaN) = Min(NaN, x) = NaN
+// Min(-0, ±0) = Min(±0, -0) = -0
+func Min(x, y float64) float64 {
+ if haveArchMin {
+ return archMin(x, y)
+ }
+ return min(x, y)
+}
+
+func min(x, y float64) float64 {
+ // special cases
+ switch {
+ case IsInf(x, -1) || IsInf(y, -1):
+ return Inf(-1)
+ case IsNaN(x) || IsNaN(y):
+ return NaN()
+ case x == 0 && x == y:
+ if Signbit(x) {
+ return x
+ }
+ return y
+ }
+ if x < y {
+ return x
+ }
+ return y
+}
diff --git a/contrib/go/_std_1.18/src/math/dim_amd64.s b/contrib/go/_std_1.19/src/math/dim_amd64.s
index 253f03b97e..253f03b97e 100644
--- a/contrib/go/_std_1.18/src/math/dim_amd64.s
+++ b/contrib/go/_std_1.19/src/math/dim_amd64.s
diff --git a/contrib/go/_std_1.18/src/math/dim_asm.go b/contrib/go/_std_1.19/src/math/dim_asm.go
index f4adbd0ae5..f4adbd0ae5 100644
--- a/contrib/go/_std_1.18/src/math/dim_asm.go
+++ b/contrib/go/_std_1.19/src/math/dim_asm.go
diff --git a/contrib/go/_std_1.19/src/math/erf.go b/contrib/go/_std_1.19/src/math/erf.go
new file mode 100644
index 0000000000..ba00c7d03e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/erf.go
@@ -0,0 +1,351 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point error function and complementary error function.
+*/
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/s_erf.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+//
+// double erf(double x)
+// double erfc(double x)
+// x
+// 2 |\
+// erf(x) = --------- | exp(-t*t)dt
+// sqrt(pi) \|
+// 0
+//
+// erfc(x) = 1-erf(x)
+// Note that
+// erf(-x) = -erf(x)
+// erfc(-x) = 2 - erfc(x)
+//
+// Method:
+// 1. For |x| in [0, 0.84375]
+// erf(x) = x + x*R(x**2)
+// erfc(x) = 1 - erf(x) if x in [-.84375,0.25]
+// = 0.5 + ((0.5-x)-x*R) if x in [0.25,0.84375]
+// where R = P/Q where P is an odd poly of degree 8 and
+// Q is an odd poly of degree 10.
+// -57.90
+// | R - (erf(x)-x)/x | <= 2
+//
+//
+// Remark. The formula is derived by noting
+// erf(x) = (2/sqrt(pi))*(x - x**3/3 + x**5/10 - x**7/42 + ....)
+// and that
+// 2/sqrt(pi) = 1.128379167095512573896158903121545171688
+// is close to one. The interval is chosen because the fix
+// point of erf(x) is near 0.6174 (i.e., erf(x)=x when x is
+// near 0.6174), and by some experiment, 0.84375 is chosen to
+// guarantee the error is less than one ulp for erf.
+//
+// 2. For |x| in [0.84375,1.25], let s = |x| - 1, and
+// c = 0.84506291151 rounded to single (24 bits)
+// erf(x) = sign(x) * (c + P1(s)/Q1(s))
+// erfc(x) = (1-c) - P1(s)/Q1(s) if x > 0
+// 1+(c+P1(s)/Q1(s)) if x < 0
+// |P1/Q1 - (erf(|x|)-c)| <= 2**-59.06
+// Remark: here we use the taylor series expansion at x=1.
+// erf(1+s) = erf(1) + s*Poly(s)
+// = 0.845.. + P1(s)/Q1(s)
+// That is, we use rational approximation to approximate
+// erf(1+s) - (c = (single)0.84506291151)
+// Note that |P1/Q1|< 0.078 for x in [0.84375,1.25]
+// where
+// P1(s) = degree 6 poly in s
+// Q1(s) = degree 6 poly in s
+//
+// 3. For x in [1.25,1/0.35(~2.857143)],
+// erfc(x) = (1/x)*exp(-x*x-0.5625+R1/S1)
+// erf(x) = 1 - erfc(x)
+// where
+// R1(z) = degree 7 poly in z, (z=1/x**2)
+// S1(z) = degree 8 poly in z
+//
+// 4. For x in [1/0.35,28]
+// erfc(x) = (1/x)*exp(-x*x-0.5625+R2/S2) if x > 0
+// = 2.0 - (1/x)*exp(-x*x-0.5625+R2/S2) if -6<x<0
+// = 2.0 - tiny (if x <= -6)
+// erf(x) = sign(x)*(1.0 - erfc(x)) if x < 6, else
+// erf(x) = sign(x)*(1.0 - tiny)
+// where
+// R2(z) = degree 6 poly in z, (z=1/x**2)
+// S2(z) = degree 7 poly in z
+//
+// Note1:
+// To compute exp(-x*x-0.5625+R/S), let s be a single
+// precision number and s := x; then
+// -x*x = -s*s + (s-x)*(s+x)
+// exp(-x*x-0.5626+R/S) =
+// exp(-s*s-0.5625)*exp((s-x)*(s+x)+R/S);
+// Note2:
+// Here 4 and 5 make use of the asymptotic series
+// exp(-x*x)
+// erfc(x) ~ ---------- * ( 1 + Poly(1/x**2) )
+// x*sqrt(pi)
+// We use rational approximation to approximate
+// g(s)=f(1/x**2) = log(erfc(x)*x) - x*x + 0.5625
+// Here is the error bound for R1/S1 and R2/S2
+// |R1/S1 - f(x)| < 2**(-62.57)
+// |R2/S2 - f(x)| < 2**(-61.52)
+//
+// 5. For inf > x >= 28
+// erf(x) = sign(x) *(1 - tiny) (raise inexact)
+// erfc(x) = tiny*tiny (raise underflow) if x > 0
+// = 2 - tiny if x<0
+//
+// 7. Special case:
+// erf(0) = 0, erf(inf) = 1, erf(-inf) = -1,
+// erfc(0) = 1, erfc(inf) = 0, erfc(-inf) = 2,
+// erfc/erf(NaN) is NaN
+
+const (
+ erx = 8.45062911510467529297e-01 // 0x3FEB0AC160000000
+ // Coefficients for approximation to erf in [0, 0.84375]
+ efx = 1.28379167095512586316e-01 // 0x3FC06EBA8214DB69
+ efx8 = 1.02703333676410069053e+00 // 0x3FF06EBA8214DB69
+ pp0 = 1.28379167095512558561e-01 // 0x3FC06EBA8214DB68
+ pp1 = -3.25042107247001499370e-01 // 0xBFD4CD7D691CB913
+ pp2 = -2.84817495755985104766e-02 // 0xBF9D2A51DBD7194F
+ pp3 = -5.77027029648944159157e-03 // 0xBF77A291236668E4
+ pp4 = -2.37630166566501626084e-05 // 0xBEF8EAD6120016AC
+ qq1 = 3.97917223959155352819e-01 // 0x3FD97779CDDADC09
+ qq2 = 6.50222499887672944485e-02 // 0x3FB0A54C5536CEBA
+ qq3 = 5.08130628187576562776e-03 // 0x3F74D022C4D36B0F
+ qq4 = 1.32494738004321644526e-04 // 0x3F215DC9221C1A10
+ qq5 = -3.96022827877536812320e-06 // 0xBED09C4342A26120
+ // Coefficients for approximation to erf in [0.84375, 1.25]
+ pa0 = -2.36211856075265944077e-03 // 0xBF6359B8BEF77538
+ pa1 = 4.14856118683748331666e-01 // 0x3FDA8D00AD92B34D
+ pa2 = -3.72207876035701323847e-01 // 0xBFD7D240FBB8C3F1
+ pa3 = 3.18346619901161753674e-01 // 0x3FD45FCA805120E4
+ pa4 = -1.10894694282396677476e-01 // 0xBFBC63983D3E28EC
+ pa5 = 3.54783043256182359371e-02 // 0x3FA22A36599795EB
+ pa6 = -2.16637559486879084300e-03 // 0xBF61BF380A96073F
+ qa1 = 1.06420880400844228286e-01 // 0x3FBB3E6618EEE323
+ qa2 = 5.40397917702171048937e-01 // 0x3FE14AF092EB6F33
+ qa3 = 7.18286544141962662868e-02 // 0x3FB2635CD99FE9A7
+ qa4 = 1.26171219808761642112e-01 // 0x3FC02660E763351F
+ qa5 = 1.36370839120290507362e-02 // 0x3F8BEDC26B51DD1C
+ qa6 = 1.19844998467991074170e-02 // 0x3F888B545735151D
+ // Coefficients for approximation to erfc in [1.25, 1/0.35]
+ ra0 = -9.86494403484714822705e-03 // 0xBF843412600D6435
+ ra1 = -6.93858572707181764372e-01 // 0xBFE63416E4BA7360
+ ra2 = -1.05586262253232909814e+01 // 0xC0251E0441B0E726
+ ra3 = -6.23753324503260060396e+01 // 0xC04F300AE4CBA38D
+ ra4 = -1.62396669462573470355e+02 // 0xC0644CB184282266
+ ra5 = -1.84605092906711035994e+02 // 0xC067135CEBCCABB2
+ ra6 = -8.12874355063065934246e+01 // 0xC054526557E4D2F2
+ ra7 = -9.81432934416914548592e+00 // 0xC023A0EFC69AC25C
+ sa1 = 1.96512716674392571292e+01 // 0x4033A6B9BD707687
+ sa2 = 1.37657754143519042600e+02 // 0x4061350C526AE721
+ sa3 = 4.34565877475229228821e+02 // 0x407B290DD58A1A71
+ sa4 = 6.45387271733267880336e+02 // 0x40842B1921EC2868
+ sa5 = 4.29008140027567833386e+02 // 0x407AD02157700314
+ sa6 = 1.08635005541779435134e+02 // 0x405B28A3EE48AE2C
+ sa7 = 6.57024977031928170135e+00 // 0x401A47EF8E484A93
+ sa8 = -6.04244152148580987438e-02 // 0xBFAEEFF2EE749A62
+ // Coefficients for approximation to erfc in [1/.35, 28]
+ rb0 = -9.86494292470009928597e-03 // 0xBF84341239E86F4A
+ rb1 = -7.99283237680523006574e-01 // 0xBFE993BA70C285DE
+ rb2 = -1.77579549177547519889e+01 // 0xC031C209555F995A
+ rb3 = -1.60636384855821916062e+02 // 0xC064145D43C5ED98
+ rb4 = -6.37566443368389627722e+02 // 0xC083EC881375F228
+ rb5 = -1.02509513161107724954e+03 // 0xC09004616A2E5992
+ rb6 = -4.83519191608651397019e+02 // 0xC07E384E9BDC383F
+ sb1 = 3.03380607434824582924e+01 // 0x403E568B261D5190
+ sb2 = 3.25792512996573918826e+02 // 0x40745CAE221B9F0A
+ sb3 = 1.53672958608443695994e+03 // 0x409802EB189D5118
+ sb4 = 3.19985821950859553908e+03 // 0x40A8FFB7688C246A
+ sb5 = 2.55305040643316442583e+03 // 0x40A3F219CEDF3BE6
+ sb6 = 4.74528541206955367215e+02 // 0x407DA874E79FE763
+ sb7 = -2.24409524465858183362e+01 // 0xC03670E242712D62
+)
+
+// Erf returns the error function of x.
+//
+// Special cases are:
+//
+// Erf(+Inf) = 1
+// Erf(-Inf) = -1
+// Erf(NaN) = NaN
+func Erf(x float64) float64 {
+ if haveArchErf {
+ return archErf(x)
+ }
+ return erf(x)
+}
+
+func erf(x float64) float64 {
+ const (
+ VeryTiny = 2.848094538889218e-306 // 0x0080000000000000
+ Small = 1.0 / (1 << 28) // 2**-28
+ )
+ // special cases
+ switch {
+ case IsNaN(x):
+ return NaN()
+ case IsInf(x, 1):
+ return 1
+ case IsInf(x, -1):
+ return -1
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ if x < 0.84375 { // |x| < 0.84375
+ var temp float64
+ if x < Small { // |x| < 2**-28
+ if x < VeryTiny {
+ temp = 0.125 * (8.0*x + efx8*x) // avoid underflow
+ } else {
+ temp = x + efx*x
+ }
+ } else {
+ z := x * x
+ r := pp0 + z*(pp1+z*(pp2+z*(pp3+z*pp4)))
+ s := 1 + z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))))
+ y := r / s
+ temp = x + x*y
+ }
+ if sign {
+ return -temp
+ }
+ return temp
+ }
+ if x < 1.25 { // 0.84375 <= |x| < 1.25
+ s := x - 1
+ P := pa0 + s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))))
+ Q := 1 + s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))))
+ if sign {
+ return -erx - P/Q
+ }
+ return erx + P/Q
+ }
+ if x >= 6 { // inf > |x| >= 6
+ if sign {
+ return -1
+ }
+ return 1
+ }
+ s := 1 / (x * x)
+ var R, S float64
+ if x < 1/0.35 { // |x| < 1 / 0.35 ~ 2.857143
+ R = ra0 + s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(ra5+s*(ra6+s*ra7))))))
+ S = 1 + s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(sa5+s*(sa6+s*(sa7+s*sa8)))))))
+ } else { // |x| >= 1 / 0.35 ~ 2.857143
+ R = rb0 + s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(rb5+s*rb6)))))
+ S = 1 + s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(sb5+s*(sb6+s*sb7))))))
+ }
+ z := Float64frombits(Float64bits(x) & 0xffffffff00000000) // pseudo-single (20-bit) precision x
+ r := Exp(-z*z-0.5625) * Exp((z-x)*(z+x)+R/S)
+ if sign {
+ return r/x - 1
+ }
+ return 1 - r/x
+}
+
+// Erfc returns the complementary error function of x.
+//
+// Special cases are:
+//
+// Erfc(+Inf) = 0
+// Erfc(-Inf) = 2
+// Erfc(NaN) = NaN
+func Erfc(x float64) float64 {
+ if haveArchErfc {
+ return archErfc(x)
+ }
+ return erfc(x)
+}
+
+func erfc(x float64) float64 {
+ const Tiny = 1.0 / (1 << 56) // 2**-56
+ // special cases
+ switch {
+ case IsNaN(x):
+ return NaN()
+ case IsInf(x, 1):
+ return 0
+ case IsInf(x, -1):
+ return 2
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ if x < 0.84375 { // |x| < 0.84375
+ var temp float64
+ if x < Tiny { // |x| < 2**-56
+ temp = x
+ } else {
+ z := x * x
+ r := pp0 + z*(pp1+z*(pp2+z*(pp3+z*pp4)))
+ s := 1 + z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))))
+ y := r / s
+ if x < 0.25 { // |x| < 1/4
+ temp = x + x*y
+ } else {
+ temp = 0.5 + (x*y + (x - 0.5))
+ }
+ }
+ if sign {
+ return 1 + temp
+ }
+ return 1 - temp
+ }
+ if x < 1.25 { // 0.84375 <= |x| < 1.25
+ s := x - 1
+ P := pa0 + s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))))
+ Q := 1 + s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))))
+ if sign {
+ return 1 + erx + P/Q
+ }
+ return 1 - erx - P/Q
+
+ }
+ if x < 28 { // |x| < 28
+ s := 1 / (x * x)
+ var R, S float64
+ if x < 1/0.35 { // |x| < 1 / 0.35 ~ 2.857143
+ R = ra0 + s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(ra5+s*(ra6+s*ra7))))))
+ S = 1 + s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(sa5+s*(sa6+s*(sa7+s*sa8)))))))
+ } else { // |x| >= 1 / 0.35 ~ 2.857143
+ if sign && x > 6 {
+ return 2 // x < -6
+ }
+ R = rb0 + s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(rb5+s*rb6)))))
+ S = 1 + s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(sb5+s*(sb6+s*sb7))))))
+ }
+ z := Float64frombits(Float64bits(x) & 0xffffffff00000000) // pseudo-single (20-bit) precision x
+ r := Exp(-z*z-0.5625) * Exp((z-x)*(z+x)+R/S)
+ if sign {
+ return 2 - r/x
+ }
+ return r / x
+ }
+ if sign {
+ return 2
+ }
+ return 0
+}
diff --git a/contrib/go/_std_1.19/src/math/erfinv.go b/contrib/go/_std_1.19/src/math/erfinv.go
new file mode 100644
index 0000000000..eed0feb42d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/erfinv.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Inverse of the floating-point error function.
+*/
+
+// This implementation is based on the rational approximation
+// of percentage points of normal distribution available from
+// https://www.jstor.org/stable/2347330.
+
+const (
+ // Coefficients for approximation to erf in |x| <= 0.85
+ a0 = 1.1975323115670912564578e0
+ a1 = 4.7072688112383978012285e1
+ a2 = 6.9706266534389598238465e2
+ a3 = 4.8548868893843886794648e3
+ a4 = 1.6235862515167575384252e4
+ a5 = 2.3782041382114385731252e4
+ a6 = 1.1819493347062294404278e4
+ a7 = 8.8709406962545514830200e2
+ b0 = 1.0000000000000000000e0
+ b1 = 4.2313330701600911252e1
+ b2 = 6.8718700749205790830e2
+ b3 = 5.3941960214247511077e3
+ b4 = 2.1213794301586595867e4
+ b5 = 3.9307895800092710610e4
+ b6 = 2.8729085735721942674e4
+ b7 = 5.2264952788528545610e3
+ // Coefficients for approximation to erf in 0.85 < |x| <= 1-2*exp(-25)
+ c0 = 1.42343711074968357734e0
+ c1 = 4.63033784615654529590e0
+ c2 = 5.76949722146069140550e0
+ c3 = 3.64784832476320460504e0
+ c4 = 1.27045825245236838258e0
+ c5 = 2.41780725177450611770e-1
+ c6 = 2.27238449892691845833e-2
+ c7 = 7.74545014278341407640e-4
+ d0 = 1.4142135623730950488016887e0
+ d1 = 2.9036514445419946173133295e0
+ d2 = 2.3707661626024532365971225e0
+ d3 = 9.7547832001787427186894837e-1
+ d4 = 2.0945065210512749128288442e-1
+ d5 = 2.1494160384252876777097297e-2
+ d6 = 7.7441459065157709165577218e-4
+ d7 = 1.4859850019840355905497876e-9
+ // Coefficients for approximation to erf in 1-2*exp(-25) < |x| < 1
+ e0 = 6.65790464350110377720e0
+ e1 = 5.46378491116411436990e0
+ e2 = 1.78482653991729133580e0
+ e3 = 2.96560571828504891230e-1
+ e4 = 2.65321895265761230930e-2
+ e5 = 1.24266094738807843860e-3
+ e6 = 2.71155556874348757815e-5
+ e7 = 2.01033439929228813265e-7
+ f0 = 1.414213562373095048801689e0
+ f1 = 8.482908416595164588112026e-1
+ f2 = 1.936480946950659106176712e-1
+ f3 = 2.103693768272068968719679e-2
+ f4 = 1.112800997078859844711555e-3
+ f5 = 2.611088405080593625138020e-5
+ f6 = 2.010321207683943062279931e-7
+ f7 = 2.891024605872965461538222e-15
+)
+
+// Erfinv returns the inverse error function of x.
+//
+// Special cases are:
+//
+// Erfinv(1) = +Inf
+// Erfinv(-1) = -Inf
+// Erfinv(x) = NaN if x < -1 or x > 1
+// Erfinv(NaN) = NaN
+func Erfinv(x float64) float64 {
+ // special cases
+ if IsNaN(x) || x <= -1 || x >= 1 {
+ if x == -1 || x == 1 {
+ return Inf(int(x))
+ }
+ return NaN()
+ }
+
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+
+ var ans float64
+ if x <= 0.85 { // |x| <= 0.85
+ r := 0.180625 - 0.25*x*x
+ z1 := ((((((a7*r+a6)*r+a5)*r+a4)*r+a3)*r+a2)*r+a1)*r + a0
+ z2 := ((((((b7*r+b6)*r+b5)*r+b4)*r+b3)*r+b2)*r+b1)*r + b0
+ ans = (x * z1) / z2
+ } else {
+ var z1, z2 float64
+ r := Sqrt(Ln2 - Log(1.0-x))
+ if r <= 5.0 {
+ r -= 1.6
+ z1 = ((((((c7*r+c6)*r+c5)*r+c4)*r+c3)*r+c2)*r+c1)*r + c0
+ z2 = ((((((d7*r+d6)*r+d5)*r+d4)*r+d3)*r+d2)*r+d1)*r + d0
+ } else {
+ r -= 5.0
+ z1 = ((((((e7*r+e6)*r+e5)*r+e4)*r+e3)*r+e2)*r+e1)*r + e0
+ z2 = ((((((f7*r+f6)*r+f5)*r+f4)*r+f3)*r+f2)*r+f1)*r + f0
+ }
+ ans = z1 / z2
+ }
+
+ if sign {
+ return -ans
+ }
+ return ans
+}
+
+// Erfcinv returns the inverse of Erfc(x).
+//
+// Special cases are:
+//
+// Erfcinv(0) = +Inf
+// Erfcinv(2) = -Inf
+// Erfcinv(x) = NaN if x < 0 or x > 2
+// Erfcinv(NaN) = NaN
+func Erfcinv(x float64) float64 {
+ return Erfinv(1 - x)
+}
diff --git a/contrib/go/_std_1.19/src/math/exp.go b/contrib/go/_std_1.19/src/math/exp.go
new file mode 100644
index 0000000000..760795f46f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/exp.go
@@ -0,0 +1,203 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Exp returns e**x, the base-e exponential of x.
+//
+// Special cases are:
+//
+// Exp(+Inf) = +Inf
+// Exp(NaN) = NaN
+//
+// Very large values overflow to 0 or +Inf.
+// Very small values underflow to 1.
+func Exp(x float64) float64 {
+ if haveArchExp {
+ return archExp(x)
+ }
+ return exp(x)
+}
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/e_exp.c
+// and came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved.
+//
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+//
+// exp(x)
+// Returns the exponential of x.
+//
+// Method
+// 1. Argument reduction:
+// Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+// Given x, find r and integer k such that
+//
+// x = k*ln2 + r, |r| <= 0.5*ln2.
+//
+// Here r will be represented as r = hi-lo for better
+// accuracy.
+//
+// 2. Approximation of exp(r) by a special rational function on
+// the interval [0,0.34658]:
+// Write
+// R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+// We use a special Remez algorithm on [0,0.34658] to generate
+// a polynomial of degree 5 to approximate R. The maximum error
+// of this polynomial approximation is bounded by 2**-59. In
+// other words,
+// R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+// (where z=r*r, and the values of P1 to P5 are listed below)
+// and
+// | 5 | -59
+// | 2.0+P1*z+...+P5*z - R(z) | <= 2
+// | |
+// The computation of exp(r) thus becomes
+// 2*r
+// exp(r) = 1 + -------
+// R - r
+// r*R1(r)
+// = 1 + r + ----------- (for better accuracy)
+// 2 - R1(r)
+// where
+// 2 4 10
+// R1(r) = r - (P1*r + P2*r + ... + P5*r ).
+//
+// 3. Scale back to obtain exp(x):
+// From step 1, we have
+// exp(x) = 2**k * exp(r)
+//
+// Special cases:
+// exp(INF) is INF, exp(NaN) is NaN;
+// exp(-INF) is 0, and
+// for finite argument, only exp(0)=1 is exact.
+//
+// Accuracy:
+// according to an error analysis, the error is always less than
+// 1 ulp (unit in the last place).
+//
+// Misc. info.
+// For IEEE double
+// if x > 7.09782712893383973096e+02 then exp(x) overflow
+// if x < -7.45133219101941108420e+02 then exp(x) underflow
+//
+// Constants:
+// The hexadecimal values are the intended ones for the following
+// constants. The decimal values may be used, provided that the
+// compiler will convert from decimal to binary accurately enough
+// to produce the hexadecimal values shown.
+
+func exp(x float64) float64 {
+ const (
+ Ln2Hi = 6.93147180369123816490e-01
+ Ln2Lo = 1.90821492927058770002e-10
+ Log2e = 1.44269504088896338700e+00
+
+ Overflow = 7.09782712893383973096e+02
+ Underflow = -7.45133219101941108420e+02
+ NearZero = 1.0 / (1 << 28) // 2**-28
+ )
+
+ // special cases
+ switch {
+ case IsNaN(x) || IsInf(x, 1):
+ return x
+ case IsInf(x, -1):
+ return 0
+ case x > Overflow:
+ return Inf(1)
+ case x < Underflow:
+ return 0
+ case -NearZero < x && x < NearZero:
+ return 1 + x
+ }
+
+ // reduce; computed as r = hi - lo for extra precision.
+ var k int
+ switch {
+ case x < 0:
+ k = int(Log2e*x - 0.5)
+ case x > 0:
+ k = int(Log2e*x + 0.5)
+ }
+ hi := x - float64(k)*Ln2Hi
+ lo := float64(k) * Ln2Lo
+
+ // compute
+ return expmulti(hi, lo, k)
+}
+
+// Exp2 returns 2**x, the base-2 exponential of x.
+//
+// Special cases are the same as Exp.
+func Exp2(x float64) float64 {
+ if haveArchExp2 {
+ return archExp2(x)
+ }
+ return exp2(x)
+}
+
+func exp2(x float64) float64 {
+ const (
+ Ln2Hi = 6.93147180369123816490e-01
+ Ln2Lo = 1.90821492927058770002e-10
+
+ Overflow = 1.0239999999999999e+03
+ Underflow = -1.0740e+03
+ )
+
+ // special cases
+ switch {
+ case IsNaN(x) || IsInf(x, 1):
+ return x
+ case IsInf(x, -1):
+ return 0
+ case x > Overflow:
+ return Inf(1)
+ case x < Underflow:
+ return 0
+ }
+
+ // argument reduction; x = r×lg(e) + k with |r| ≤ ln(2)/2.
+ // computed as r = hi - lo for extra precision.
+ var k int
+ switch {
+ case x > 0:
+ k = int(x + 0.5)
+ case x < 0:
+ k = int(x - 0.5)
+ }
+ t := x - float64(k)
+ hi := t * Ln2Hi
+ lo := -t * Ln2Lo
+
+ // compute
+ return expmulti(hi, lo, k)
+}
+
+// exp1 returns e**r × 2**k where r = hi - lo and |r| ≤ ln(2)/2.
+func expmulti(hi, lo float64, k int) float64 {
+ const (
+ P1 = 1.66666666666666657415e-01 /* 0x3FC55555; 0x55555555 */
+ P2 = -2.77777777770155933842e-03 /* 0xBF66C16C; 0x16BEBD93 */
+ P3 = 6.61375632143793436117e-05 /* 0x3F11566A; 0xAF25DE2C */
+ P4 = -1.65339022054652515390e-06 /* 0xBEBBBD41; 0xC5D26BF1 */
+ P5 = 4.13813679705723846039e-08 /* 0x3E663769; 0x72BEA4D0 */
+ )
+
+ r := hi - lo
+ t := r * r
+ c := r - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))))
+ y := 1 - ((lo - (r*c)/(2-c)) - hi)
+ // TODO(rsc): make sure Ldexp can handle boundary k
+ return Ldexp(y, k)
+}
diff --git a/contrib/go/_std_1.18/src/math/exp2_noasm.go b/contrib/go/_std_1.19/src/math/exp2_noasm.go
index c2b409329f..c2b409329f 100644
--- a/contrib/go/_std_1.18/src/math/exp2_noasm.go
+++ b/contrib/go/_std_1.19/src/math/exp2_noasm.go
diff --git a/contrib/go/_std_1.18/src/math/exp_amd64.go b/contrib/go/_std_1.19/src/math/exp_amd64.go
index 0f701b1d6d..0f701b1d6d 100644
--- a/contrib/go/_std_1.18/src/math/exp_amd64.go
+++ b/contrib/go/_std_1.19/src/math/exp_amd64.go
diff --git a/contrib/go/_std_1.18/src/math/exp_amd64.s b/contrib/go/_std_1.19/src/math/exp_amd64.s
index 02b71c81eb..02b71c81eb 100644
--- a/contrib/go/_std_1.18/src/math/exp_amd64.s
+++ b/contrib/go/_std_1.19/src/math/exp_amd64.s
diff --git a/contrib/go/_std_1.18/src/math/exp_asm.go b/contrib/go/_std_1.19/src/math/exp_asm.go
index 424442845b..424442845b 100644
--- a/contrib/go/_std_1.18/src/math/exp_asm.go
+++ b/contrib/go/_std_1.19/src/math/exp_asm.go
diff --git a/contrib/go/_std_1.19/src/math/expm1.go b/contrib/go/_std_1.19/src/math/expm1.go
new file mode 100644
index 0000000000..ff1c82f524
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/expm1.go
@@ -0,0 +1,244 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/s_expm1.c
+// and came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// expm1(x)
+// Returns exp(x)-1, the exponential of x minus 1.
+//
+// Method
+// 1. Argument reduction:
+// Given x, find r and integer k such that
+//
+// x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
+//
+// Here a correction term c will be computed to compensate
+// the error in r when rounded to a floating-point number.
+//
+// 2. Approximating expm1(r) by a special rational function on
+// the interval [0,0.34658]:
+// Since
+// r*(exp(r)+1)/(exp(r)-1) = 2+ r**2/6 - r**4/360 + ...
+// we define R1(r*r) by
+// r*(exp(r)+1)/(exp(r)-1) = 2+ r**2/6 * R1(r*r)
+// That is,
+// R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+// = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+// = 1 - r**2/60 + r**4/2520 - r**6/100800 + ...
+// We use a special Reme algorithm on [0,0.347] to generate
+// a polynomial of degree 5 in r*r to approximate R1. The
+// maximum error of this polynomial approximation is bounded
+// by 2**-61. In other words,
+// R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+// where Q1 = -1.6666666666666567384E-2,
+// Q2 = 3.9682539681370365873E-4,
+// Q3 = -9.9206344733435987357E-6,
+// Q4 = 2.5051361420808517002E-7,
+// Q5 = -6.2843505682382617102E-9;
+// (where z=r*r, and the values of Q1 to Q5 are listed below)
+// with error bounded by
+// | 5 | -61
+// | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
+// | |
+//
+// expm1(r) = exp(r)-1 is then computed by the following
+// specific way which minimize the accumulation rounding error:
+// 2 3
+// r r [ 3 - (R1 + R1*r/2) ]
+// expm1(r) = r + --- + --- * [--------------------]
+// 2 2 [ 6 - r*(3 - R1*r/2) ]
+//
+// To compensate the error in the argument reduction, we use
+// expm1(r+c) = expm1(r) + c + expm1(r)*c
+// ~ expm1(r) + c + r*c
+// Thus c+r*c will be added in as the correction terms for
+// expm1(r+c). Now rearrange the term to avoid optimization
+// screw up:
+// ( 2 2 )
+// ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
+// expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+// ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
+// ( )
+//
+// = r - E
+// 3. Scale back to obtain expm1(x):
+// From step 1, we have
+// expm1(x) = either 2**k*[expm1(r)+1] - 1
+// = or 2**k*[expm1(r) + (1-2**-k)]
+// 4. Implementation notes:
+// (A). To save one multiplication, we scale the coefficient Qi
+// to Qi*2**i, and replace z by (x**2)/2.
+// (B). To achieve maximum accuracy, we compute expm1(x) by
+// (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+// (ii) if k=0, return r-E
+// (iii) if k=-1, return 0.5*(r-E)-0.5
+// (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
+// else return 1.0+2.0*(r-E);
+// (v) if (k<-2||k>56) return 2**k(1-(E-r)) - 1 (or exp(x)-1)
+// (vi) if k <= 20, return 2**k((1-2**-k)-(E-r)), else
+// (vii) return 2**k(1-((E+2**-k)-r))
+//
+// Special cases:
+// expm1(INF) is INF, expm1(NaN) is NaN;
+// expm1(-INF) is -1, and
+// for finite argument, only expm1(0)=0 is exact.
+//
+// Accuracy:
+// according to an error analysis, the error is always less than
+// 1 ulp (unit in the last place).
+//
+// Misc. info.
+// For IEEE double
+// if x > 7.09782712893383973096e+02 then expm1(x) overflow
+//
+// Constants:
+// The hexadecimal values are the intended ones for the following
+// constants. The decimal values may be used, provided that the
+// compiler will convert from decimal to binary accurately enough
+// to produce the hexadecimal values shown.
+//
+
+// Expm1 returns e**x - 1, the base-e exponential of x minus 1.
+// It is more accurate than Exp(x) - 1 when x is near zero.
+//
+// Special cases are:
+//
+// Expm1(+Inf) = +Inf
+// Expm1(-Inf) = -1
+// Expm1(NaN) = NaN
+//
+// Very large values overflow to -1 or +Inf.
+func Expm1(x float64) float64 {
+ if haveArchExpm1 {
+ return archExpm1(x)
+ }
+ return expm1(x)
+}
+
+func expm1(x float64) float64 {
+ const (
+ Othreshold = 7.09782712893383973096e+02 // 0x40862E42FEFA39EF
+ Ln2X56 = 3.88162421113569373274e+01 // 0x4043687a9f1af2b1
+ Ln2HalfX3 = 1.03972077083991796413e+00 // 0x3ff0a2b23f3bab73
+ Ln2Half = 3.46573590279972654709e-01 // 0x3fd62e42fefa39ef
+ Ln2Hi = 6.93147180369123816490e-01 // 0x3fe62e42fee00000
+ Ln2Lo = 1.90821492927058770002e-10 // 0x3dea39ef35793c76
+ InvLn2 = 1.44269504088896338700e+00 // 0x3ff71547652b82fe
+ Tiny = 1.0 / (1 << 54) // 2**-54 = 0x3c90000000000000
+ // scaled coefficients related to expm1
+ Q1 = -3.33333333333331316428e-02 // 0xBFA11111111110F4
+ Q2 = 1.58730158725481460165e-03 // 0x3F5A01A019FE5585
+ Q3 = -7.93650757867487942473e-05 // 0xBF14CE199EAADBB7
+ Q4 = 4.00821782732936239552e-06 // 0x3ED0CFCA86E65239
+ Q5 = -2.01099218183624371326e-07 // 0xBE8AFDB76E09C32D
+ )
+
+ // special cases
+ switch {
+ case IsInf(x, 1) || IsNaN(x):
+ return x
+ case IsInf(x, -1):
+ return -1
+ }
+
+ absx := x
+ sign := false
+ if x < 0 {
+ absx = -absx
+ sign = true
+ }
+
+ // filter out huge argument
+ if absx >= Ln2X56 { // if |x| >= 56 * ln2
+ if sign {
+ return -1 // x < -56*ln2, return -1
+ }
+ if absx >= Othreshold { // if |x| >= 709.78...
+ return Inf(1)
+ }
+ }
+
+ // argument reduction
+ var c float64
+ var k int
+ if absx > Ln2Half { // if |x| > 0.5 * ln2
+ var hi, lo float64
+ if absx < Ln2HalfX3 { // and |x| < 1.5 * ln2
+ if !sign {
+ hi = x - Ln2Hi
+ lo = Ln2Lo
+ k = 1
+ } else {
+ hi = x + Ln2Hi
+ lo = -Ln2Lo
+ k = -1
+ }
+ } else {
+ if !sign {
+ k = int(InvLn2*x + 0.5)
+ } else {
+ k = int(InvLn2*x - 0.5)
+ }
+ t := float64(k)
+ hi = x - t*Ln2Hi // t * Ln2Hi is exact here
+ lo = t * Ln2Lo
+ }
+ x = hi - lo
+ c = (hi - x) - lo
+ } else if absx < Tiny { // when |x| < 2**-54, return x
+ return x
+ } else {
+ k = 0
+ }
+
+ // x is now in primary range
+ hfx := 0.5 * x
+ hxs := x * hfx
+ r1 := 1 + hxs*(Q1+hxs*(Q2+hxs*(Q3+hxs*(Q4+hxs*Q5))))
+ t := 3 - r1*hfx
+ e := hxs * ((r1 - t) / (6.0 - x*t))
+ if k == 0 {
+ return x - (x*e - hxs) // c is 0
+ }
+ e = (x*(e-c) - c)
+ e -= hxs
+ switch {
+ case k == -1:
+ return 0.5*(x-e) - 0.5
+ case k == 1:
+ if x < -0.25 {
+ return -2 * (e - (x + 0.5))
+ }
+ return 1 + 2*(x-e)
+ case k <= -2 || k > 56: // suffice to return exp(x)-1
+ y := 1 - (e - x)
+ y = Float64frombits(Float64bits(y) + uint64(k)<<52) // add k to y's exponent
+ return y - 1
+ }
+ if k < 20 {
+ t := Float64frombits(0x3ff0000000000000 - (0x20000000000000 >> uint(k))) // t=1-2**-k
+ y := t - (e - x)
+ y = Float64frombits(Float64bits(y) + uint64(k)<<52) // add k to y's exponent
+ return y
+ }
+ t = Float64frombits(uint64(0x3ff-k) << 52) // 2**-k
+ y := x - (e + t)
+ y++
+ y = Float64frombits(Float64bits(y) + uint64(k)<<52) // add k to y's exponent
+ return y
+}
diff --git a/contrib/go/_std_1.19/src/math/floor.go b/contrib/go/_std_1.19/src/math/floor.go
new file mode 100644
index 0000000000..cb5856424b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/floor.go
@@ -0,0 +1,151 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Floor returns the greatest integer value less than or equal to x.
+//
+// Special cases are:
+//
+// Floor(±0) = ±0
+// Floor(±Inf) = ±Inf
+// Floor(NaN) = NaN
+func Floor(x float64) float64 {
+ if haveArchFloor {
+ return archFloor(x)
+ }
+ return floor(x)
+}
+
+func floor(x float64) float64 {
+ if x == 0 || IsNaN(x) || IsInf(x, 0) {
+ return x
+ }
+ if x < 0 {
+ d, fract := Modf(-x)
+ if fract != 0.0 {
+ d = d + 1
+ }
+ return -d
+ }
+ d, _ := Modf(x)
+ return d
+}
+
+// Ceil returns the least integer value greater than or equal to x.
+//
+// Special cases are:
+//
+// Ceil(±0) = ±0
+// Ceil(±Inf) = ±Inf
+// Ceil(NaN) = NaN
+func Ceil(x float64) float64 {
+ if haveArchCeil {
+ return archCeil(x)
+ }
+ return ceil(x)
+}
+
+func ceil(x float64) float64 {
+ return -Floor(-x)
+}
+
+// Trunc returns the integer value of x.
+//
+// Special cases are:
+//
+// Trunc(±0) = ±0
+// Trunc(±Inf) = ±Inf
+// Trunc(NaN) = NaN
+func Trunc(x float64) float64 {
+ if haveArchTrunc {
+ return archTrunc(x)
+ }
+ return trunc(x)
+}
+
+func trunc(x float64) float64 {
+ if x == 0 || IsNaN(x) || IsInf(x, 0) {
+ return x
+ }
+ d, _ := Modf(x)
+ return d
+}
+
+// Round returns the nearest integer, rounding half away from zero.
+//
+// Special cases are:
+//
+// Round(±0) = ±0
+// Round(±Inf) = ±Inf
+// Round(NaN) = NaN
+func Round(x float64) float64 {
+ // Round is a faster implementation of:
+ //
+ // func Round(x float64) float64 {
+ // t := Trunc(x)
+ // if Abs(x-t) >= 0.5 {
+ // return t + Copysign(1, x)
+ // }
+ // return t
+ // }
+ bits := Float64bits(x)
+ e := uint(bits>>shift) & mask
+ if e < bias {
+ // Round abs(x) < 1 including denormals.
+ bits &= signMask // +-0
+ if e == bias-1 {
+ bits |= uvone // +-1
+ }
+ } else if e < bias+shift {
+ // Round any abs(x) >= 1 containing a fractional component [0,1).
+ //
+ // Numbers with larger exponents are returned unchanged since they
+ // must be either an integer, infinity, or NaN.
+ const half = 1 << (shift - 1)
+ e -= bias
+ bits += half >> e
+ bits &^= fracMask >> e
+ }
+ return Float64frombits(bits)
+}
+
+// RoundToEven returns the nearest integer, rounding ties to even.
+//
+// Special cases are:
+//
+// RoundToEven(±0) = ±0
+// RoundToEven(±Inf) = ±Inf
+// RoundToEven(NaN) = NaN
+func RoundToEven(x float64) float64 {
+ // RoundToEven is a faster implementation of:
+ //
+ // func RoundToEven(x float64) float64 {
+ // t := math.Trunc(x)
+ // odd := math.Remainder(t, 2) != 0
+ // if d := math.Abs(x - t); d > 0.5 || (d == 0.5 && odd) {
+ // return t + math.Copysign(1, x)
+ // }
+ // return t
+ // }
+ bits := Float64bits(x)
+ e := uint(bits>>shift) & mask
+ if e >= bias {
+ // Round abs(x) >= 1.
+ // - Large numbers without fractional components, infinity, and NaN are unchanged.
+ // - Add 0.499.. or 0.5 before truncating depending on whether the truncated
+ // number is even or odd (respectively).
+ const halfMinusULP = (1 << (shift - 1)) - 1
+ e -= bias
+ bits += (halfMinusULP + (bits>>(shift-e))&1) >> e
+ bits &^= fracMask >> e
+ } else if e == bias-1 && bits&fracMask != 0 {
+ // Round 0.5 < abs(x) < 1.
+ bits = bits&signMask | uvone // +-1
+ } else {
+ // Round abs(x) <= 0.5 including denormals.
+ bits &= signMask // +-0
+ }
+ return Float64frombits(bits)
+}
diff --git a/contrib/go/_std_1.18/src/math/floor_amd64.s b/contrib/go/_std_1.19/src/math/floor_amd64.s
index 088049958a..088049958a 100644
--- a/contrib/go/_std_1.18/src/math/floor_amd64.s
+++ b/contrib/go/_std_1.19/src/math/floor_amd64.s
diff --git a/contrib/go/_std_1.18/src/math/floor_asm.go b/contrib/go/_std_1.19/src/math/floor_asm.go
index fb419d6da2..fb419d6da2 100644
--- a/contrib/go/_std_1.18/src/math/floor_asm.go
+++ b/contrib/go/_std_1.19/src/math/floor_asm.go
diff --git a/contrib/go/_std_1.18/src/math/fma.go b/contrib/go/_std_1.19/src/math/fma.go
index ca0bf99f21..ca0bf99f21 100644
--- a/contrib/go/_std_1.18/src/math/fma.go
+++ b/contrib/go/_std_1.19/src/math/fma.go
diff --git a/contrib/go/_std_1.19/src/math/frexp.go b/contrib/go/_std_1.19/src/math/frexp.go
new file mode 100644
index 0000000000..e194947e64
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/frexp.go
@@ -0,0 +1,39 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Frexp breaks f into a normalized fraction
+// and an integral power of two.
+// It returns frac and exp satisfying f == frac × 2**exp,
+// with the absolute value of frac in the interval [½, 1).
+//
+// Special cases are:
+//
+// Frexp(±0) = ±0, 0
+// Frexp(±Inf) = ±Inf, 0
+// Frexp(NaN) = NaN, 0
+func Frexp(f float64) (frac float64, exp int) {
+ if haveArchFrexp {
+ return archFrexp(f)
+ }
+ return frexp(f)
+}
+
+func frexp(f float64) (frac float64, exp int) {
+ // special cases
+ switch {
+ case f == 0:
+ return f, 0 // correctly return -0
+ case IsInf(f, 0) || IsNaN(f):
+ return f, 0
+ }
+ f, exp = normalize(f)
+ x := Float64bits(f)
+ exp += int((x>>shift)&mask) - bias + 1
+ x &^= mask << shift
+ x |= (-1 + bias) << shift
+ frac = Float64frombits(x)
+ return
+}
diff --git a/contrib/go/_std_1.19/src/math/gamma.go b/contrib/go/_std_1.19/src/math/gamma.go
new file mode 100644
index 0000000000..86c6723258
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/gamma.go
@@ -0,0 +1,222 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below are from http://netlib.sandia.gov/cephes/cprob/gamma.c.
+// The go code is a simplified version of the original C.
+//
+// tgamma.c
+//
+// Gamma function
+//
+// SYNOPSIS:
+//
+// double x, y, tgamma();
+// extern int signgam;
+//
+// y = tgamma( x );
+//
+// DESCRIPTION:
+//
+// Returns gamma function of the argument. The result is
+// correctly signed, and the sign (+1 or -1) is also
+// returned in a global (extern) variable named signgam.
+// This variable is also filled in by the logarithmic gamma
+// function lgamma().
+//
+// Arguments |x| <= 34 are reduced by recurrence and the function
+// approximated by a rational function of degree 6/7 in the
+// interval (2,3). Large arguments are handled by Stirling's
+// formula. Large negative arguments are made positive using
+// a reflection formula.
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC -34, 34 10000 1.3e-16 2.5e-17
+// IEEE -170,-33 20000 2.3e-15 3.3e-16
+// IEEE -33, 33 20000 9.4e-16 2.2e-16
+// IEEE 33, 171.6 20000 2.3e-15 3.2e-16
+//
+// Error for arguments outside the test range will be larger
+// owing to error amplification by the exponential function.
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+var _gamP = [...]float64{
+ 1.60119522476751861407e-04,
+ 1.19135147006586384913e-03,
+ 1.04213797561761569935e-02,
+ 4.76367800457137231464e-02,
+ 2.07448227648435975150e-01,
+ 4.94214826801497100753e-01,
+ 9.99999999999999996796e-01,
+}
+var _gamQ = [...]float64{
+ -2.31581873324120129819e-05,
+ 5.39605580493303397842e-04,
+ -4.45641913851797240494e-03,
+ 1.18139785222060435552e-02,
+ 3.58236398605498653373e-02,
+ -2.34591795718243348568e-01,
+ 7.14304917030273074085e-02,
+ 1.00000000000000000320e+00,
+}
+var _gamS = [...]float64{
+ 7.87311395793093628397e-04,
+ -2.29549961613378126380e-04,
+ -2.68132617805781232825e-03,
+ 3.47222221605458667310e-03,
+ 8.33333333333482257126e-02,
+}
+
+// Gamma function computed by Stirling's formula.
+// The pair of results must be multiplied together to get the actual answer.
+// The multiplication is left to the caller so that, if careful, the caller can avoid
+// infinity for 172 <= x <= 180.
+// The polynomial is valid for 33 <= x <= 172; larger values are only used
+// in reciprocal and produce denormalized floats. The lower precision there
+// masks any imprecision in the polynomial.
+func stirling(x float64) (float64, float64) {
+ if x > 200 {
+ return Inf(1), 1
+ }
+ const (
+ SqrtTwoPi = 2.506628274631000502417
+ MaxStirling = 143.01608
+ )
+ w := 1 / x
+ w = 1 + w*((((_gamS[0]*w+_gamS[1])*w+_gamS[2])*w+_gamS[3])*w+_gamS[4])
+ y1 := Exp(x)
+ y2 := 1.0
+ if x > MaxStirling { // avoid Pow() overflow
+ v := Pow(x, 0.5*x-0.25)
+ y1, y2 = v, v/y1
+ } else {
+ y1 = Pow(x, x-0.5) / y1
+ }
+ return y1, SqrtTwoPi * w * y2
+}
+
+// Gamma returns the Gamma function of x.
+//
+// Special cases are:
+//
+// Gamma(+Inf) = +Inf
+// Gamma(+0) = +Inf
+// Gamma(-0) = -Inf
+// Gamma(x) = NaN for integer x < 0
+// Gamma(-Inf) = NaN
+// Gamma(NaN) = NaN
+func Gamma(x float64) float64 {
+ const Euler = 0.57721566490153286060651209008240243104215933593992 // A001620
+ // special cases
+ switch {
+ case isNegInt(x) || IsInf(x, -1) || IsNaN(x):
+ return NaN()
+ case IsInf(x, 1):
+ return Inf(1)
+ case x == 0:
+ if Signbit(x) {
+ return Inf(-1)
+ }
+ return Inf(1)
+ }
+ q := Abs(x)
+ p := Floor(q)
+ if q > 33 {
+ if x >= 0 {
+ y1, y2 := stirling(x)
+ return y1 * y2
+ }
+ // Note: x is negative but (checked above) not a negative integer,
+ // so x must be small enough to be in range for conversion to int64.
+ // If |x| were >= 2⁶³ it would have to be an integer.
+ signgam := 1
+ if ip := int64(p); ip&1 == 0 {
+ signgam = -1
+ }
+ z := q - p
+ if z > 0.5 {
+ p = p + 1
+ z = q - p
+ }
+ z = q * Sin(Pi*z)
+ if z == 0 {
+ return Inf(signgam)
+ }
+ sq1, sq2 := stirling(q)
+ absz := Abs(z)
+ d := absz * sq1 * sq2
+ if IsInf(d, 0) {
+ z = Pi / absz / sq1 / sq2
+ } else {
+ z = Pi / d
+ }
+ return float64(signgam) * z
+ }
+
+ // Reduce argument
+ z := 1.0
+ for x >= 3 {
+ x = x - 1
+ z = z * x
+ }
+ for x < 0 {
+ if x > -1e-09 {
+ goto small
+ }
+ z = z / x
+ x = x + 1
+ }
+ for x < 2 {
+ if x < 1e-09 {
+ goto small
+ }
+ z = z / x
+ x = x + 1
+ }
+
+ if x == 2 {
+ return z
+ }
+
+ x = x - 2
+ p = (((((x*_gamP[0]+_gamP[1])*x+_gamP[2])*x+_gamP[3])*x+_gamP[4])*x+_gamP[5])*x + _gamP[6]
+ q = ((((((x*_gamQ[0]+_gamQ[1])*x+_gamQ[2])*x+_gamQ[3])*x+_gamQ[4])*x+_gamQ[5])*x+_gamQ[6])*x + _gamQ[7]
+ return z * p / q
+
+small:
+ if x == 0 {
+ return Inf(1)
+ }
+ return z / ((1 + Euler*x) * x)
+}
+
+func isNegInt(x float64) bool {
+ if x < 0 {
+ _, xf := Modf(x)
+ return xf == 0
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.19/src/math/hypot.go b/contrib/go/_std_1.19/src/math/hypot.go
new file mode 100644
index 0000000000..4e79de0e9b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/hypot.go
@@ -0,0 +1,44 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Hypot -- sqrt(p*p + q*q), but overflows only if the result does.
+*/
+
+// Hypot returns Sqrt(p*p + q*q), taking care to avoid
+// unnecessary overflow and underflow.
+//
+// Special cases are:
+//
+// Hypot(±Inf, q) = +Inf
+// Hypot(p, ±Inf) = +Inf
+// Hypot(NaN, q) = NaN
+// Hypot(p, NaN) = NaN
+func Hypot(p, q float64) float64 {
+ if haveArchHypot {
+ return archHypot(p, q)
+ }
+ return hypot(p, q)
+}
+
+func hypot(p, q float64) float64 {
+ // special cases
+ switch {
+ case IsInf(p, 0) || IsInf(q, 0):
+ return Inf(1)
+ case IsNaN(p) || IsNaN(q):
+ return NaN()
+ }
+ p, q = Abs(p), Abs(q)
+ if p < q {
+ p, q = q, p
+ }
+ if p == 0 {
+ return 0
+ }
+ q = q / p
+ return p * Sqrt(1+q*q)
+}
diff --git a/contrib/go/_std_1.18/src/math/hypot_amd64.s b/contrib/go/_std_1.19/src/math/hypot_amd64.s
index fe326c9281..fe326c9281 100644
--- a/contrib/go/_std_1.18/src/math/hypot_amd64.s
+++ b/contrib/go/_std_1.19/src/math/hypot_amd64.s
diff --git a/contrib/go/_std_1.18/src/math/hypot_asm.go b/contrib/go/_std_1.19/src/math/hypot_asm.go
index 852691037f..852691037f 100644
--- a/contrib/go/_std_1.18/src/math/hypot_asm.go
+++ b/contrib/go/_std_1.19/src/math/hypot_asm.go
diff --git a/contrib/go/_std_1.19/src/math/j0.go b/contrib/go/_std_1.19/src/math/j0.go
new file mode 100644
index 0000000000..a311e18d62
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/j0.go
@@ -0,0 +1,429 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Bessel function of the first and second kinds of order zero.
+*/
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/e_j0.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_j0(x), __ieee754_y0(x)
+// Bessel function of the first and second kinds of order zero.
+// Method -- j0(x):
+// 1. For tiny x, we use j0(x) = 1 - x**2/4 + x**4/64 - ...
+// 2. Reduce x to |x| since j0(x)=j0(-x), and
+// for x in (0,2)
+// j0(x) = 1-z/4+ z**2*R0/S0, where z = x*x;
+// (precision: |j0-1+z/4-z**2R0/S0 |<2**-63.67 )
+// for x in (2,inf)
+// j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)-q0(x)*sin(x0))
+// where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+// as follow:
+// cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+// = 1/sqrt(2) * (cos(x) + sin(x))
+// sin(x0) = sin(x)cos(pi/4)-cos(x)sin(pi/4)
+// = 1/sqrt(2) * (sin(x) - cos(x))
+// (To avoid cancellation, use
+// sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+// to compute the worse one.)
+//
+// 3 Special cases
+// j0(nan)= nan
+// j0(0) = 1
+// j0(inf) = 0
+//
+// Method -- y0(x):
+// 1. For x<2.
+// Since
+// y0(x) = 2/pi*(j0(x)*(ln(x/2)+Euler) + x**2/4 - ...)
+// therefore y0(x)-2/pi*j0(x)*ln(x) is an even function.
+// We use the following function to approximate y0,
+// y0(x) = U(z)/V(z) + (2/pi)*(j0(x)*ln(x)), z= x**2
+// where
+// U(z) = u00 + u01*z + ... + u06*z**6
+// V(z) = 1 + v01*z + ... + v04*z**4
+// with absolute approximation error bounded by 2**-72.
+// Note: For tiny x, U/V = u0 and j0(x)~1, hence
+// y0(tiny) = u0 + (2/pi)*ln(tiny), (choose tiny<2**-27)
+// 2. For x>=2.
+// y0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)+q0(x)*sin(x0))
+// where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+// by the method mentioned above.
+// 3. Special cases: y0(0)=-inf, y0(x<0)=NaN, y0(inf)=0.
+//
+
+// J0 returns the order-zero Bessel function of the first kind.
+//
+// Special cases are:
+//
+// J0(±Inf) = 0
+// J0(0) = 1
+// J0(NaN) = NaN
+func J0(x float64) float64 {
+ const (
+ Huge = 1e300
+ TwoM27 = 1.0 / (1 << 27) // 2**-27 0x3e40000000000000
+ TwoM13 = 1.0 / (1 << 13) // 2**-13 0x3f20000000000000
+ Two129 = 1 << 129 // 2**129 0x4800000000000000
+ // R0/S0 on [0, 2]
+ R02 = 1.56249999999999947958e-02 // 0x3F8FFFFFFFFFFFFD
+ R03 = -1.89979294238854721751e-04 // 0xBF28E6A5B61AC6E9
+ R04 = 1.82954049532700665670e-06 // 0x3EBEB1D10C503919
+ R05 = -4.61832688532103189199e-09 // 0xBE33D5E773D63FCE
+ S01 = 1.56191029464890010492e-02 // 0x3F8FFCE882C8C2A4
+ S02 = 1.16926784663337450260e-04 // 0x3F1EA6D2DD57DBF4
+ S03 = 5.13546550207318111446e-07 // 0x3EA13B54CE84D5A9
+ S04 = 1.16614003333790000205e-09 // 0x3E1408BCF4745D8F
+ )
+ // special cases
+ switch {
+ case IsNaN(x):
+ return x
+ case IsInf(x, 0):
+ return 0
+ case x == 0:
+ return 1
+ }
+
+ x = Abs(x)
+ if x >= 2 {
+ s, c := Sincos(x)
+ ss := s - c
+ cc := s + c
+
+ // make sure x+x does not overflow
+ if x < MaxFloat64/2 {
+ z := -Cos(x + x)
+ if s*c < 0 {
+ cc = z / ss
+ } else {
+ ss = z / cc
+ }
+ }
+
+ // j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+ // y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+
+ var z float64
+ if x > Two129 { // |x| > ~6.8056e+38
+ z = (1 / SqrtPi) * cc / Sqrt(x)
+ } else {
+ u := pzero(x)
+ v := qzero(x)
+ z = (1 / SqrtPi) * (u*cc - v*ss) / Sqrt(x)
+ }
+ return z // |x| >= 2.0
+ }
+ if x < TwoM13 { // |x| < ~1.2207e-4
+ if x < TwoM27 {
+ return 1 // |x| < ~7.4506e-9
+ }
+ return 1 - 0.25*x*x // ~7.4506e-9 < |x| < ~1.2207e-4
+ }
+ z := x * x
+ r := z * (R02 + z*(R03+z*(R04+z*R05)))
+ s := 1 + z*(S01+z*(S02+z*(S03+z*S04)))
+ if x < 1 {
+ return 1 + z*(-0.25+(r/s)) // |x| < 1.00
+ }
+ u := 0.5 * x
+ return (1+u)*(1-u) + z*(r/s) // 1.0 < |x| < 2.0
+}
+
+// Y0 returns the order-zero Bessel function of the second kind.
+//
+// Special cases are:
+//
+// Y0(+Inf) = 0
+// Y0(0) = -Inf
+// Y0(x < 0) = NaN
+// Y0(NaN) = NaN
+func Y0(x float64) float64 {
+ const (
+ TwoM27 = 1.0 / (1 << 27) // 2**-27 0x3e40000000000000
+ Two129 = 1 << 129 // 2**129 0x4800000000000000
+ U00 = -7.38042951086872317523e-02 // 0xBFB2E4D699CBD01F
+ U01 = 1.76666452509181115538e-01 // 0x3FC69D019DE9E3FC
+ U02 = -1.38185671945596898896e-02 // 0xBF8C4CE8B16CFA97
+ U03 = 3.47453432093683650238e-04 // 0x3F36C54D20B29B6B
+ U04 = -3.81407053724364161125e-06 // 0xBECFFEA773D25CAD
+ U05 = 1.95590137035022920206e-08 // 0x3E5500573B4EABD4
+ U06 = -3.98205194132103398453e-11 // 0xBDC5E43D693FB3C8
+ V01 = 1.27304834834123699328e-02 // 0x3F8A127091C9C71A
+ V02 = 7.60068627350353253702e-05 // 0x3F13ECBBF578C6C1
+ V03 = 2.59150851840457805467e-07 // 0x3E91642D7FF202FD
+ V04 = 4.41110311332675467403e-10 // 0x3DFE50183BD6D9EF
+ )
+ // special cases
+ switch {
+ case x < 0 || IsNaN(x):
+ return NaN()
+ case IsInf(x, 1):
+ return 0
+ case x == 0:
+ return Inf(-1)
+ }
+
+ if x >= 2 { // |x| >= 2.0
+
+ // y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0))
+ // where x0 = x-pi/4
+ // Better formula:
+ // cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+ // = 1/sqrt(2) * (sin(x) + cos(x))
+ // sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ // = 1/sqrt(2) * (sin(x) - cos(x))
+ // To avoid cancellation, use
+ // sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ // to compute the worse one.
+
+ s, c := Sincos(x)
+ ss := s - c
+ cc := s + c
+
+ // j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+ // y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+
+ // make sure x+x does not overflow
+ if x < MaxFloat64/2 {
+ z := -Cos(x + x)
+ if s*c < 0 {
+ cc = z / ss
+ } else {
+ ss = z / cc
+ }
+ }
+ var z float64
+ if x > Two129 { // |x| > ~6.8056e+38
+ z = (1 / SqrtPi) * ss / Sqrt(x)
+ } else {
+ u := pzero(x)
+ v := qzero(x)
+ z = (1 / SqrtPi) * (u*ss + v*cc) / Sqrt(x)
+ }
+ return z // |x| >= 2.0
+ }
+ if x <= TwoM27 {
+ return U00 + (2/Pi)*Log(x) // |x| < ~7.4506e-9
+ }
+ z := x * x
+ u := U00 + z*(U01+z*(U02+z*(U03+z*(U04+z*(U05+z*U06)))))
+ v := 1 + z*(V01+z*(V02+z*(V03+z*V04)))
+ return u/v + (2/Pi)*J0(x)*Log(x) // ~7.4506e-9 < |x| < 2.0
+}
+
+// The asymptotic expansions of pzero is
+// 1 - 9/128 s**2 + 11025/98304 s**4 - ..., where s = 1/x.
+// For x >= 2, We approximate pzero by
+// pzero(x) = 1 + (R/S)
+// where R = pR0 + pR1*s**2 + pR2*s**4 + ... + pR5*s**10
+// S = 1 + pS0*s**2 + ... + pS4*s**10
+// and
+// | pzero(x)-1-R/S | <= 2 ** ( -60.26)
+
+// for x in [inf, 8]=1/[0,0.125]
+var p0R8 = [6]float64{
+ 0.00000000000000000000e+00, // 0x0000000000000000
+ -7.03124999999900357484e-02, // 0xBFB1FFFFFFFFFD32
+ -8.08167041275349795626e+00, // 0xC02029D0B44FA779
+ -2.57063105679704847262e+02, // 0xC07011027B19E863
+ -2.48521641009428822144e+03, // 0xC0A36A6ECD4DCAFC
+ -5.25304380490729545272e+03, // 0xC0B4850B36CC643D
+}
+var p0S8 = [5]float64{
+ 1.16534364619668181717e+02, // 0x405D223307A96751
+ 3.83374475364121826715e+03, // 0x40ADF37D50596938
+ 4.05978572648472545552e+04, // 0x40E3D2BB6EB6B05F
+ 1.16752972564375915681e+05, // 0x40FC810F8F9FA9BD
+ 4.76277284146730962675e+04, // 0x40E741774F2C49DC
+}
+
+// for x in [8,4.5454]=1/[0.125,0.22001]
+var p0R5 = [6]float64{
+ -1.14125464691894502584e-11, // 0xBDA918B147E495CC
+ -7.03124940873599280078e-02, // 0xBFB1FFFFE69AFBC6
+ -4.15961064470587782438e+00, // 0xC010A370F90C6BBF
+ -6.76747652265167261021e+01, // 0xC050EB2F5A7D1783
+ -3.31231299649172967747e+02, // 0xC074B3B36742CC63
+ -3.46433388365604912451e+02, // 0xC075A6EF28A38BD7
+}
+var p0S5 = [5]float64{
+ 6.07539382692300335975e+01, // 0x404E60810C98C5DE
+ 1.05125230595704579173e+03, // 0x40906D025C7E2864
+ 5.97897094333855784498e+03, // 0x40B75AF88FBE1D60
+ 9.62544514357774460223e+03, // 0x40C2CCB8FA76FA38
+ 2.40605815922939109441e+03, // 0x40A2CC1DC70BE864
+}
+
+// for x in [4.547,2.8571]=1/[0.2199,0.35001]
+var p0R3 = [6]float64{
+ -2.54704601771951915620e-09, // 0xBE25E1036FE1AA86
+ -7.03119616381481654654e-02, // 0xBFB1FFF6F7C0E24B
+ -2.40903221549529611423e+00, // 0xC00345B2AEA48074
+ -2.19659774734883086467e+01, // 0xC035F74A4CB94E14
+ -5.80791704701737572236e+01, // 0xC04D0A22420A1A45
+ -3.14479470594888503854e+01, // 0xC03F72ACA892D80F
+}
+var p0S3 = [5]float64{
+ 3.58560338055209726349e+01, // 0x4041ED9284077DD3
+ 3.61513983050303863820e+02, // 0x40769839464A7C0E
+ 1.19360783792111533330e+03, // 0x4092A66E6D1061D6
+ 1.12799679856907414432e+03, // 0x40919FFCB8C39B7E
+ 1.73580930813335754692e+02, // 0x4065B296FC379081
+}
+
+// for x in [2.8570,2]=1/[0.3499,0.5]
+var p0R2 = [6]float64{
+ -8.87534333032526411254e-08, // 0xBE77D316E927026D
+ -7.03030995483624743247e-02, // 0xBFB1FF62495E1E42
+ -1.45073846780952986357e+00, // 0xBFF736398A24A843
+ -7.63569613823527770791e+00, // 0xC01E8AF3EDAFA7F3
+ -1.11931668860356747786e+01, // 0xC02662E6C5246303
+ -3.23364579351335335033e+00, // 0xC009DE81AF8FE70F
+}
+var p0S2 = [5]float64{
+ 2.22202997532088808441e+01, // 0x40363865908B5959
+ 1.36206794218215208048e+02, // 0x4061069E0EE8878F
+ 2.70470278658083486789e+02, // 0x4070E78642EA079B
+ 1.53875394208320329881e+02, // 0x40633C033AB6FAFF
+ 1.46576176948256193810e+01, // 0x402D50B344391809
+}
+
+func pzero(x float64) float64 {
+ var p *[6]float64
+ var q *[5]float64
+ if x >= 8 {
+ p = &p0R8
+ q = &p0S8
+ } else if x >= 4.5454 {
+ p = &p0R5
+ q = &p0S5
+ } else if x >= 2.8571 {
+ p = &p0R3
+ q = &p0S3
+ } else if x >= 2 {
+ p = &p0R2
+ q = &p0S2
+ }
+ z := 1 / (x * x)
+ r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
+ s := 1 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))))
+ return 1 + r/s
+}
+
+// For x >= 8, the asymptotic expansions of qzero is
+// -1/8 s + 75/1024 s**3 - ..., where s = 1/x.
+// We approximate pzero by
+// qzero(x) = s*(-1.25 + (R/S))
+// where R = qR0 + qR1*s**2 + qR2*s**4 + ... + qR5*s**10
+// S = 1 + qS0*s**2 + ... + qS5*s**12
+// and
+// | qzero(x)/s +1.25-R/S | <= 2**(-61.22)
+
+// for x in [inf, 8]=1/[0,0.125]
+var q0R8 = [6]float64{
+ 0.00000000000000000000e+00, // 0x0000000000000000
+ 7.32421874999935051953e-02, // 0x3FB2BFFFFFFFFE2C
+ 1.17682064682252693899e+01, // 0x402789525BB334D6
+ 5.57673380256401856059e+02, // 0x40816D6315301825
+ 8.85919720756468632317e+03, // 0x40C14D993E18F46D
+ 3.70146267776887834771e+04, // 0x40E212D40E901566
+}
+var q0S8 = [6]float64{
+ 1.63776026895689824414e+02, // 0x406478D5365B39BC
+ 8.09834494656449805916e+03, // 0x40BFA2584E6B0563
+ 1.42538291419120476348e+05, // 0x4101665254D38C3F
+ 8.03309257119514397345e+05, // 0x412883DA83A52B43
+ 8.40501579819060512818e+05, // 0x4129A66B28DE0B3D
+ -3.43899293537866615225e+05, // 0xC114FD6D2C9530C5
+}
+
+// for x in [8,4.5454]=1/[0.125,0.22001]
+var q0R5 = [6]float64{
+ 1.84085963594515531381e-11, // 0x3DB43D8F29CC8CD9
+ 7.32421766612684765896e-02, // 0x3FB2BFFFD172B04C
+ 5.83563508962056953777e+00, // 0x401757B0B9953DD3
+ 1.35111577286449829671e+02, // 0x4060E3920A8788E9
+ 1.02724376596164097464e+03, // 0x40900CF99DC8C481
+ 1.98997785864605384631e+03, // 0x409F17E953C6E3A6
+}
+var q0S5 = [6]float64{
+ 8.27766102236537761883e+01, // 0x4054B1B3FB5E1543
+ 2.07781416421392987104e+03, // 0x40A03BA0DA21C0CE
+ 1.88472887785718085070e+04, // 0x40D267D27B591E6D
+ 5.67511122894947329769e+04, // 0x40EBB5E397E02372
+ 3.59767538425114471465e+04, // 0x40E191181F7A54A0
+ -5.35434275601944773371e+03, // 0xC0B4EA57BEDBC609
+}
+
+// for x in [4.547,2.8571]=1/[0.2199,0.35001]
+var q0R3 = [6]float64{
+ 4.37741014089738620906e-09, // 0x3E32CD036ADECB82
+ 7.32411180042911447163e-02, // 0x3FB2BFEE0E8D0842
+ 3.34423137516170720929e+00, // 0x400AC0FC61149CF5
+ 4.26218440745412650017e+01, // 0x40454F98962DAEDD
+ 1.70808091340565596283e+02, // 0x406559DBE25EFD1F
+ 1.66733948696651168575e+02, // 0x4064D77C81FA21E0
+}
+var q0S3 = [6]float64{
+ 4.87588729724587182091e+01, // 0x40486122BFE343A6
+ 7.09689221056606015736e+02, // 0x40862D8386544EB3
+ 3.70414822620111362994e+03, // 0x40ACF04BE44DFC63
+ 6.46042516752568917582e+03, // 0x40B93C6CD7C76A28
+ 2.51633368920368957333e+03, // 0x40A3A8AAD94FB1C0
+ -1.49247451836156386662e+02, // 0xC062A7EB201CF40F
+}
+
+// for x in [2.8570,2]=1/[0.3499,0.5]
+var q0R2 = [6]float64{
+ 1.50444444886983272379e-07, // 0x3E84313B54F76BDB
+ 7.32234265963079278272e-02, // 0x3FB2BEC53E883E34
+ 1.99819174093815998816e+00, // 0x3FFFF897E727779C
+ 1.44956029347885735348e+01, // 0x402CFDBFAAF96FE5
+ 3.16662317504781540833e+01, // 0x403FAA8E29FBDC4A
+ 1.62527075710929267416e+01, // 0x403040B171814BB4
+}
+var q0S2 = [6]float64{
+ 3.03655848355219184498e+01, // 0x403E5D96F7C07AED
+ 2.69348118608049844624e+02, // 0x4070D591E4D14B40
+ 8.44783757595320139444e+02, // 0x408A664522B3BF22
+ 8.82935845112488550512e+02, // 0x408B977C9C5CC214
+ 2.12666388511798828631e+02, // 0x406A95530E001365
+ -5.31095493882666946917e+00, // 0xC0153E6AF8B32931
+}
+
+func qzero(x float64) float64 {
+ var p, q *[6]float64
+ if x >= 8 {
+ p = &q0R8
+ q = &q0S8
+ } else if x >= 4.5454 {
+ p = &q0R5
+ q = &q0S5
+ } else if x >= 2.8571 {
+ p = &q0R3
+ q = &q0S3
+ } else if x >= 2 {
+ p = &q0R2
+ q = &q0S2
+ }
+ z := 1 / (x * x)
+ r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
+ s := 1 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))))
+ return (-0.125 + r/s) / x
+}
diff --git a/contrib/go/_std_1.19/src/math/j1.go b/contrib/go/_std_1.19/src/math/j1.go
new file mode 100644
index 0000000000..cc19e75b95
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/j1.go
@@ -0,0 +1,424 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Bessel function of the first and second kinds of order one.
+*/
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/e_j1.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_j1(x), __ieee754_y1(x)
+// Bessel function of the first and second kinds of order one.
+// Method -- j1(x):
+// 1. For tiny x, we use j1(x) = x/2 - x**3/16 + x**5/384 - ...
+// 2. Reduce x to |x| since j1(x)=-j1(-x), and
+// for x in (0,2)
+// j1(x) = x/2 + x*z*R0/S0, where z = x*x;
+// (precision: |j1/x - 1/2 - R0/S0 |<2**-61.51 )
+// for x in (2,inf)
+// j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
+// y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+// where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+// as follow:
+// cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+// = 1/sqrt(2) * (sin(x) - cos(x))
+// sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+// = -1/sqrt(2) * (sin(x) + cos(x))
+// (To avoid cancellation, use
+// sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+// to compute the worse one.)
+//
+// 3 Special cases
+// j1(nan)= nan
+// j1(0) = 0
+// j1(inf) = 0
+//
+// Method -- y1(x):
+// 1. screen out x<=0 cases: y1(0)=-inf, y1(x<0)=NaN
+// 2. For x<2.
+// Since
+// y1(x) = 2/pi*(j1(x)*(ln(x/2)+Euler)-1/x-x/2+5/64*x**3-...)
+// therefore y1(x)-2/pi*j1(x)*ln(x)-1/x is an odd function.
+// We use the following function to approximate y1,
+// y1(x) = x*U(z)/V(z) + (2/pi)*(j1(x)*ln(x)-1/x), z= x**2
+// where for x in [0,2] (abs err less than 2**-65.89)
+// U(z) = U0[0] + U0[1]*z + ... + U0[4]*z**4
+// V(z) = 1 + v0[0]*z + ... + v0[4]*z**5
+// Note: For tiny x, 1/x dominate y1 and hence
+// y1(tiny) = -2/pi/tiny, (choose tiny<2**-54)
+// 3. For x>=2.
+// y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+// where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+// by method mentioned above.
+
+// J1 returns the order-one Bessel function of the first kind.
+//
+// Special cases are:
+//
+// J1(±Inf) = 0
+// J1(NaN) = NaN
+func J1(x float64) float64 {
+ const (
+ TwoM27 = 1.0 / (1 << 27) // 2**-27 0x3e40000000000000
+ Two129 = 1 << 129 // 2**129 0x4800000000000000
+ // R0/S0 on [0, 2]
+ R00 = -6.25000000000000000000e-02 // 0xBFB0000000000000
+ R01 = 1.40705666955189706048e-03 // 0x3F570D9F98472C61
+ R02 = -1.59955631084035597520e-05 // 0xBEF0C5C6BA169668
+ R03 = 4.96727999609584448412e-08 // 0x3E6AAAFA46CA0BD9
+ S01 = 1.91537599538363460805e-02 // 0x3F939D0B12637E53
+ S02 = 1.85946785588630915560e-04 // 0x3F285F56B9CDF664
+ S03 = 1.17718464042623683263e-06 // 0x3EB3BFF8333F8498
+ S04 = 5.04636257076217042715e-09 // 0x3E35AC88C97DFF2C
+ S05 = 1.23542274426137913908e-11 // 0x3DAB2ACFCFB97ED8
+ )
+ // special cases
+ switch {
+ case IsNaN(x):
+ return x
+ case IsInf(x, 0) || x == 0:
+ return 0
+ }
+
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ if x >= 2 {
+ s, c := Sincos(x)
+ ss := -s - c
+ cc := s - c
+
+ // make sure x+x does not overflow
+ if x < MaxFloat64/2 {
+ z := Cos(x + x)
+ if s*c > 0 {
+ cc = z / ss
+ } else {
+ ss = z / cc
+ }
+ }
+
+ // j1(x) = 1/sqrt(pi) * (P(1,x)*cc - Q(1,x)*ss) / sqrt(x)
+ // y1(x) = 1/sqrt(pi) * (P(1,x)*ss + Q(1,x)*cc) / sqrt(x)
+
+ var z float64
+ if x > Two129 {
+ z = (1 / SqrtPi) * cc / Sqrt(x)
+ } else {
+ u := pone(x)
+ v := qone(x)
+ z = (1 / SqrtPi) * (u*cc - v*ss) / Sqrt(x)
+ }
+ if sign {
+ return -z
+ }
+ return z
+ }
+ if x < TwoM27 { // |x|<2**-27
+ return 0.5 * x // inexact if x!=0 necessary
+ }
+ z := x * x
+ r := z * (R00 + z*(R01+z*(R02+z*R03)))
+ s := 1.0 + z*(S01+z*(S02+z*(S03+z*(S04+z*S05))))
+ r *= x
+ z = 0.5*x + r/s
+ if sign {
+ return -z
+ }
+ return z
+}
+
+// Y1 returns the order-one Bessel function of the second kind.
+//
+// Special cases are:
+//
+// Y1(+Inf) = 0
+// Y1(0) = -Inf
+// Y1(x < 0) = NaN
+// Y1(NaN) = NaN
+func Y1(x float64) float64 {
+ const (
+ TwoM54 = 1.0 / (1 << 54) // 2**-54 0x3c90000000000000
+ Two129 = 1 << 129 // 2**129 0x4800000000000000
+ U00 = -1.96057090646238940668e-01 // 0xBFC91866143CBC8A
+ U01 = 5.04438716639811282616e-02 // 0x3FA9D3C776292CD1
+ U02 = -1.91256895875763547298e-03 // 0xBF5F55E54844F50F
+ U03 = 2.35252600561610495928e-05 // 0x3EF8AB038FA6B88E
+ U04 = -9.19099158039878874504e-08 // 0xBE78AC00569105B8
+ V00 = 1.99167318236649903973e-02 // 0x3F94650D3F4DA9F0
+ V01 = 2.02552581025135171496e-04 // 0x3F2A8C896C257764
+ V02 = 1.35608801097516229404e-06 // 0x3EB6C05A894E8CA6
+ V03 = 6.22741452364621501295e-09 // 0x3E3ABF1D5BA69A86
+ V04 = 1.66559246207992079114e-11 // 0x3DB25039DACA772A
+ )
+ // special cases
+ switch {
+ case x < 0 || IsNaN(x):
+ return NaN()
+ case IsInf(x, 1):
+ return 0
+ case x == 0:
+ return Inf(-1)
+ }
+
+ if x >= 2 {
+ s, c := Sincos(x)
+ ss := -s - c
+ cc := s - c
+
+ // make sure x+x does not overflow
+ if x < MaxFloat64/2 {
+ z := Cos(x + x)
+ if s*c > 0 {
+ cc = z / ss
+ } else {
+ ss = z / cc
+ }
+ }
+ // y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x0)+q1(x)*cos(x0))
+ // where x0 = x-3pi/4
+ // Better formula:
+ // cos(x0) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+ // = 1/sqrt(2) * (sin(x) - cos(x))
+ // sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ // = -1/sqrt(2) * (cos(x) + sin(x))
+ // To avoid cancellation, use
+ // sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ // to compute the worse one.
+
+ var z float64
+ if x > Two129 {
+ z = (1 / SqrtPi) * ss / Sqrt(x)
+ } else {
+ u := pone(x)
+ v := qone(x)
+ z = (1 / SqrtPi) * (u*ss + v*cc) / Sqrt(x)
+ }
+ return z
+ }
+ if x <= TwoM54 { // x < 2**-54
+ return -(2 / Pi) / x
+ }
+ z := x * x
+ u := U00 + z*(U01+z*(U02+z*(U03+z*U04)))
+ v := 1 + z*(V00+z*(V01+z*(V02+z*(V03+z*V04))))
+ return x*(u/v) + (2/Pi)*(J1(x)*Log(x)-1/x)
+}
+
+// For x >= 8, the asymptotic expansions of pone is
+// 1 + 15/128 s**2 - 4725/2**15 s**4 - ..., where s = 1/x.
+// We approximate pone by
+// pone(x) = 1 + (R/S)
+// where R = pr0 + pr1*s**2 + pr2*s**4 + ... + pr5*s**10
+// S = 1 + ps0*s**2 + ... + ps4*s**10
+// and
+// | pone(x)-1-R/S | <= 2**(-60.06)
+
+// for x in [inf, 8]=1/[0,0.125]
+var p1R8 = [6]float64{
+ 0.00000000000000000000e+00, // 0x0000000000000000
+ 1.17187499999988647970e-01, // 0x3FBDFFFFFFFFFCCE
+ 1.32394806593073575129e+01, // 0x402A7A9D357F7FCE
+ 4.12051854307378562225e+02, // 0x4079C0D4652EA590
+ 3.87474538913960532227e+03, // 0x40AE457DA3A532CC
+ 7.91447954031891731574e+03, // 0x40BEEA7AC32782DD
+}
+var p1S8 = [5]float64{
+ 1.14207370375678408436e+02, // 0x405C8D458E656CAC
+ 3.65093083420853463394e+03, // 0x40AC85DC964D274F
+ 3.69562060269033463555e+04, // 0x40E20B8697C5BB7F
+ 9.76027935934950801311e+04, // 0x40F7D42CB28F17BB
+ 3.08042720627888811578e+04, // 0x40DE1511697A0B2D
+}
+
+// for x in [8,4.5454] = 1/[0.125,0.22001]
+var p1R5 = [6]float64{
+ 1.31990519556243522749e-11, // 0x3DAD0667DAE1CA7D
+ 1.17187493190614097638e-01, // 0x3FBDFFFFE2C10043
+ 6.80275127868432871736e+00, // 0x401B36046E6315E3
+ 1.08308182990189109773e+02, // 0x405B13B9452602ED
+ 5.17636139533199752805e+02, // 0x40802D16D052D649
+ 5.28715201363337541807e+02, // 0x408085B8BB7E0CB7
+}
+var p1S5 = [5]float64{
+ 5.92805987221131331921e+01, // 0x404DA3EAA8AF633D
+ 9.91401418733614377743e+02, // 0x408EFB361B066701
+ 5.35326695291487976647e+03, // 0x40B4E9445706B6FB
+ 7.84469031749551231769e+03, // 0x40BEA4B0B8A5BB15
+ 1.50404688810361062679e+03, // 0x40978030036F5E51
+}
+
+// for x in[4.5453,2.8571] = 1/[0.2199,0.35001]
+var p1R3 = [6]float64{
+ 3.02503916137373618024e-09, // 0x3E29FC21A7AD9EDD
+ 1.17186865567253592491e-01, // 0x3FBDFFF55B21D17B
+ 3.93297750033315640650e+00, // 0x400F76BCE85EAD8A
+ 3.51194035591636932736e+01, // 0x40418F489DA6D129
+ 9.10550110750781271918e+01, // 0x4056C3854D2C1837
+ 4.85590685197364919645e+01, // 0x4048478F8EA83EE5
+}
+var p1S3 = [5]float64{
+ 3.47913095001251519989e+01, // 0x40416549A134069C
+ 3.36762458747825746741e+02, // 0x40750C3307F1A75F
+ 1.04687139975775130551e+03, // 0x40905B7C5037D523
+ 8.90811346398256432622e+02, // 0x408BD67DA32E31E9
+ 1.03787932439639277504e+02, // 0x4059F26D7C2EED53
+}
+
+// for x in [2.8570,2] = 1/[0.3499,0.5]
+var p1R2 = [6]float64{
+ 1.07710830106873743082e-07, // 0x3E7CE9D4F65544F4
+ 1.17176219462683348094e-01, // 0x3FBDFF42BE760D83
+ 2.36851496667608785174e+00, // 0x4002F2B7F98FAEC0
+ 1.22426109148261232917e+01, // 0x40287C377F71A964
+ 1.76939711271687727390e+01, // 0x4031B1A8177F8EE2
+ 5.07352312588818499250e+00, // 0x40144B49A574C1FE
+}
+var p1S2 = [5]float64{
+ 2.14364859363821409488e+01, // 0x40356FBD8AD5ECDC
+ 1.25290227168402751090e+02, // 0x405F529314F92CD5
+ 2.32276469057162813669e+02, // 0x406D08D8D5A2DBD9
+ 1.17679373287147100768e+02, // 0x405D6B7ADA1884A9
+ 8.36463893371618283368e+00, // 0x4020BAB1F44E5192
+}
+
+func pone(x float64) float64 {
+ var p *[6]float64
+ var q *[5]float64
+ if x >= 8 {
+ p = &p1R8
+ q = &p1S8
+ } else if x >= 4.5454 {
+ p = &p1R5
+ q = &p1S5
+ } else if x >= 2.8571 {
+ p = &p1R3
+ q = &p1S3
+ } else if x >= 2 {
+ p = &p1R2
+ q = &p1S2
+ }
+ z := 1 / (x * x)
+ r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
+ s := 1.0 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))))
+ return 1 + r/s
+}
+
+// For x >= 8, the asymptotic expansions of qone is
+// 3/8 s - 105/1024 s**3 - ..., where s = 1/x.
+// We approximate qone by
+// qone(x) = s*(0.375 + (R/S))
+// where R = qr1*s**2 + qr2*s**4 + ... + qr5*s**10
+// S = 1 + qs1*s**2 + ... + qs6*s**12
+// and
+// | qone(x)/s -0.375-R/S | <= 2**(-61.13)
+
+// for x in [inf, 8] = 1/[0,0.125]
+var q1R8 = [6]float64{
+ 0.00000000000000000000e+00, // 0x0000000000000000
+ -1.02539062499992714161e-01, // 0xBFBA3FFFFFFFFDF3
+ -1.62717534544589987888e+01, // 0xC0304591A26779F7
+ -7.59601722513950107896e+02, // 0xC087BCD053E4B576
+ -1.18498066702429587167e+04, // 0xC0C724E740F87415
+ -4.84385124285750353010e+04, // 0xC0E7A6D065D09C6A
+}
+var q1S8 = [6]float64{
+ 1.61395369700722909556e+02, // 0x40642CA6DE5BCDE5
+ 7.82538599923348465381e+03, // 0x40BE9162D0D88419
+ 1.33875336287249578163e+05, // 0x4100579AB0B75E98
+ 7.19657723683240939863e+05, // 0x4125F65372869C19
+ 6.66601232617776375264e+05, // 0x412457D27719AD5C
+ -2.94490264303834643215e+05, // 0xC111F9690EA5AA18
+}
+
+// for x in [8,4.5454] = 1/[0.125,0.22001]
+var q1R5 = [6]float64{
+ -2.08979931141764104297e-11, // 0xBDB6FA431AA1A098
+ -1.02539050241375426231e-01, // 0xBFBA3FFFCB597FEF
+ -8.05644828123936029840e+00, // 0xC0201CE6CA03AD4B
+ -1.83669607474888380239e+02, // 0xC066F56D6CA7B9B0
+ -1.37319376065508163265e+03, // 0xC09574C66931734F
+ -2.61244440453215656817e+03, // 0xC0A468E388FDA79D
+}
+var q1S5 = [6]float64{
+ 8.12765501384335777857e+01, // 0x405451B2FF5A11B2
+ 1.99179873460485964642e+03, // 0x409F1F31E77BF839
+ 1.74684851924908907677e+04, // 0x40D10F1F0D64CE29
+ 4.98514270910352279316e+04, // 0x40E8576DAABAD197
+ 2.79480751638918118260e+04, // 0x40DB4B04CF7C364B
+ -4.71918354795128470869e+03, // 0xC0B26F2EFCFFA004
+}
+
+// for x in [4.5454,2.8571] = 1/[0.2199,0.35001] ???
+var q1R3 = [6]float64{
+ -5.07831226461766561369e-09, // 0xBE35CFA9D38FC84F
+ -1.02537829820837089745e-01, // 0xBFBA3FEB51AEED54
+ -4.61011581139473403113e+00, // 0xC01270C23302D9FF
+ -5.78472216562783643212e+01, // 0xC04CEC71C25D16DA
+ -2.28244540737631695038e+02, // 0xC06C87D34718D55F
+ -2.19210128478909325622e+02, // 0xC06B66B95F5C1BF6
+}
+var q1S3 = [6]float64{
+ 4.76651550323729509273e+01, // 0x4047D523CCD367E4
+ 6.73865112676699709482e+02, // 0x40850EEBC031EE3E
+ 3.38015286679526343505e+03, // 0x40AA684E448E7C9A
+ 5.54772909720722782367e+03, // 0x40B5ABBAA61D54A6
+ 1.90311919338810798763e+03, // 0x409DBC7A0DD4DF4B
+ -1.35201191444307340817e+02, // 0xC060E670290A311F
+}
+
+// for x in [2.8570,2] = 1/[0.3499,0.5]
+var q1R2 = [6]float64{
+ -1.78381727510958865572e-07, // 0xBE87F12644C626D2
+ -1.02517042607985553460e-01, // 0xBFBA3E8E9148B010
+ -2.75220568278187460720e+00, // 0xC006048469BB4EDA
+ -1.96636162643703720221e+01, // 0xC033A9E2C168907F
+ -4.23253133372830490089e+01, // 0xC04529A3DE104AAA
+ -2.13719211703704061733e+01, // 0xC0355F3639CF6E52
+}
+var q1S2 = [6]float64{
+ 2.95333629060523854548e+01, // 0x403D888A78AE64FF
+ 2.52981549982190529136e+02, // 0x406F9F68DB821CBA
+ 7.57502834868645436472e+02, // 0x4087AC05CE49A0F7
+ 7.39393205320467245656e+02, // 0x40871B2548D4C029
+ 1.55949003336666123687e+02, // 0x40637E5E3C3ED8D4
+ -4.95949898822628210127e+00, // 0xC013D686E71BE86B
+}
+
+func qone(x float64) float64 {
+ var p, q *[6]float64
+ if x >= 8 {
+ p = &q1R8
+ q = &q1S8
+ } else if x >= 4.5454 {
+ p = &q1R5
+ q = &q1S5
+ } else if x >= 2.8571 {
+ p = &q1R3
+ q = &q1S3
+ } else if x >= 2 {
+ p = &q1R2
+ q = &q1S2
+ }
+ z := 1 / (x * x)
+ r := p[0] + z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))))
+ s := 1 + z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))))
+ return (0.375 + r/s) / x
+}
diff --git a/contrib/go/_std_1.19/src/math/jn.go b/contrib/go/_std_1.19/src/math/jn.go
new file mode 100644
index 0000000000..3491692a96
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/jn.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Bessel function of the first and second kinds of order n.
+*/
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/e_jn.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_jn(n, x), __ieee754_yn(n, x)
+// floating point Bessel's function of the 1st and 2nd kind
+// of order n
+//
+// Special cases:
+// y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
+// y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
+// Note 2. About jn(n,x), yn(n,x)
+// For n=0, j0(x) is called,
+// for n=1, j1(x) is called,
+// for n<x, forward recursion is used starting
+// from values of j0(x) and j1(x).
+// for n>x, a continued fraction approximation to
+// j(n,x)/j(n-1,x) is evaluated and then backward
+// recursion is used starting from a supposed value
+// for j(n,x). The resulting value of j(0,x) is
+// compared with the actual value to correct the
+// supposed value of j(n,x).
+//
+// yn(n,x) is similar in all respects, except
+// that forward recursion is used for all
+// values of n>1.
+
+// Jn returns the order-n Bessel function of the first kind.
+//
+// Special cases are:
+//
+// Jn(n, ±Inf) = 0
+// Jn(n, NaN) = NaN
+func Jn(n int, x float64) float64 {
+ const (
+ TwoM29 = 1.0 / (1 << 29) // 2**-29 0x3e10000000000000
+ Two302 = 1 << 302 // 2**302 0x52D0000000000000
+ )
+ // special cases
+ switch {
+ case IsNaN(x):
+ return x
+ case IsInf(x, 0):
+ return 0
+ }
+ // J(-n, x) = (-1)**n * J(n, x), J(n, -x) = (-1)**n * J(n, x)
+ // Thus, J(-n, x) = J(n, -x)
+
+ if n == 0 {
+ return J0(x)
+ }
+ if x == 0 {
+ return 0
+ }
+ if n < 0 {
+ n, x = -n, -x
+ }
+ if n == 1 {
+ return J1(x)
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ if n&1 == 1 {
+ sign = true // odd n and negative x
+ }
+ }
+ var b float64
+ if float64(n) <= x {
+ // Safe to use J(n+1,x)=2n/x *J(n,x)-J(n-1,x)
+ if x >= Two302 { // x > 2**302
+
+ // (x >> n**2)
+ // Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ // Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ // Let s=sin(x), c=cos(x),
+ // xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+ //
+ // n sin(xn)*sqt2 cos(xn)*sqt2
+ // ----------------------------------
+ // 0 s-c c+s
+ // 1 -s-c -c+s
+ // 2 -s+c -c-s
+ // 3 s+c c-s
+
+ var temp float64
+ switch s, c := Sincos(x); n & 3 {
+ case 0:
+ temp = c + s
+ case 1:
+ temp = -c + s
+ case 2:
+ temp = -c - s
+ case 3:
+ temp = c - s
+ }
+ b = (1 / SqrtPi) * temp / Sqrt(x)
+ } else {
+ b = J1(x)
+ for i, a := 1, J0(x); i < n; i++ {
+ a, b = b, b*(float64(i+i)/x)-a // avoid underflow
+ }
+ }
+ } else {
+ if x < TwoM29 { // x < 2**-29
+ // x is tiny, return the first Taylor expansion of J(n,x)
+ // J(n,x) = 1/n!*(x/2)**n - ...
+
+ if n > 33 { // underflow
+ b = 0
+ } else {
+ temp := x * 0.5
+ b = temp
+ a := 1.0
+ for i := 2; i <= n; i++ {
+ a *= float64(i) // a = n!
+ b *= temp // b = (x/2)**n
+ }
+ b /= a
+ }
+ } else {
+ // use backward recurrence
+ // x x**2 x**2
+ // J(n,x)/J(n-1,x) = ---- ------ ------ .....
+ // 2n - 2(n+1) - 2(n+2)
+ //
+ // 1 1 1
+ // (for large x) = ---- ------ ------ .....
+ // 2n 2(n+1) 2(n+2)
+ // -- - ------ - ------ -
+ // x x x
+ //
+ // Let w = 2n/x and h=2/x, then the above quotient
+ // is equal to the continued fraction:
+ // 1
+ // = -----------------------
+ // 1
+ // w - -----------------
+ // 1
+ // w+h - ---------
+ // w+2h - ...
+ //
+ // To determine how many terms needed, let
+ // Q(0) = w, Q(1) = w(w+h) - 1,
+ // Q(k) = (w+k*h)*Q(k-1) - Q(k-2),
+ // When Q(k) > 1e4 good for single
+ // When Q(k) > 1e9 good for double
+ // When Q(k) > 1e17 good for quadruple
+
+ // determine k
+ w := float64(n+n) / x
+ h := 2 / x
+ q0 := w
+ z := w + h
+ q1 := w*z - 1
+ k := 1
+ for q1 < 1e9 {
+ k++
+ z += h
+ q0, q1 = q1, z*q1-q0
+ }
+ m := n + n
+ t := 0.0
+ for i := 2 * (n + k); i >= m; i -= 2 {
+ t = 1 / (float64(i)/x - t)
+ }
+ a := t
+ b = 1
+ // estimate log((2/x)**n*n!) = n*log(2/x)+n*ln(n)
+ // Hence, if n*(log(2n/x)) > ...
+ // single 8.8722839355e+01
+ // double 7.09782712893383973096e+02
+ // long double 1.1356523406294143949491931077970765006170e+04
+ // then recurrent value may overflow and the result is
+ // likely underflow to zero
+
+ tmp := float64(n)
+ v := 2 / x
+ tmp = tmp * Log(Abs(v*tmp))
+ if tmp < 7.09782712893383973096e+02 {
+ for i := n - 1; i > 0; i-- {
+ di := float64(i + i)
+ a, b = b, b*di/x-a
+ }
+ } else {
+ for i := n - 1; i > 0; i-- {
+ di := float64(i + i)
+ a, b = b, b*di/x-a
+ // scale b to avoid spurious overflow
+ if b > 1e100 {
+ a /= b
+ t /= b
+ b = 1
+ }
+ }
+ }
+ b = t * J0(x) / b
+ }
+ }
+ if sign {
+ return -b
+ }
+ return b
+}
+
+// Yn returns the order-n Bessel function of the second kind.
+//
+// Special cases are:
+//
+// Yn(n, +Inf) = 0
+// Yn(n ≥ 0, 0) = -Inf
+// Yn(n < 0, 0) = +Inf if n is odd, -Inf if n is even
+// Yn(n, x < 0) = NaN
+// Yn(n, NaN) = NaN
+func Yn(n int, x float64) float64 {
+ const Two302 = 1 << 302 // 2**302 0x52D0000000000000
+ // special cases
+ switch {
+ case x < 0 || IsNaN(x):
+ return NaN()
+ case IsInf(x, 1):
+ return 0
+ }
+
+ if n == 0 {
+ return Y0(x)
+ }
+ if x == 0 {
+ if n < 0 && n&1 == 1 {
+ return Inf(1)
+ }
+ return Inf(-1)
+ }
+ sign := false
+ if n < 0 {
+ n = -n
+ if n&1 == 1 {
+ sign = true // sign true if n < 0 && |n| odd
+ }
+ }
+ if n == 1 {
+ if sign {
+ return -Y1(x)
+ }
+ return Y1(x)
+ }
+ var b float64
+ if x >= Two302 { // x > 2**302
+ // (x >> n**2)
+ // Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ // Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ // Let s=sin(x), c=cos(x),
+ // xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+ //
+ // n sin(xn)*sqt2 cos(xn)*sqt2
+ // ----------------------------------
+ // 0 s-c c+s
+ // 1 -s-c -c+s
+ // 2 -s+c -c-s
+ // 3 s+c c-s
+
+ var temp float64
+ switch s, c := Sincos(x); n & 3 {
+ case 0:
+ temp = s - c
+ case 1:
+ temp = -s - c
+ case 2:
+ temp = -s + c
+ case 3:
+ temp = s + c
+ }
+ b = (1 / SqrtPi) * temp / Sqrt(x)
+ } else {
+ a := Y0(x)
+ b = Y1(x)
+ // quit if b is -inf
+ for i := 1; i < n && !IsInf(b, -1); i++ {
+ a, b = b, (float64(i+i)/x)*b-a
+ }
+ }
+ if sign {
+ return -b
+ }
+ return b
+}
diff --git a/contrib/go/_std_1.19/src/math/ldexp.go b/contrib/go/_std_1.19/src/math/ldexp.go
new file mode 100644
index 0000000000..df365c0b1a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/ldexp.go
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Ldexp is the inverse of Frexp.
+// It returns frac × 2**exp.
+//
+// Special cases are:
+//
+// Ldexp(±0, exp) = ±0
+// Ldexp(±Inf, exp) = ±Inf
+// Ldexp(NaN, exp) = NaN
+func Ldexp(frac float64, exp int) float64 {
+ if haveArchLdexp {
+ return archLdexp(frac, exp)
+ }
+ return ldexp(frac, exp)
+}
+
+func ldexp(frac float64, exp int) float64 {
+ // special cases
+ switch {
+ case frac == 0:
+ return frac // correctly return -0
+ case IsInf(frac, 0) || IsNaN(frac):
+ return frac
+ }
+ frac, e := normalize(frac)
+ exp += e
+ x := Float64bits(frac)
+ exp += int(x>>shift)&mask - bias
+ if exp < -1075 {
+ return Copysign(0, frac) // underflow
+ }
+ if exp > 1023 { // overflow
+ if frac < 0 {
+ return Inf(-1)
+ }
+ return Inf(1)
+ }
+ var m float64 = 1
+ if exp < -1022 { // denormal
+ exp += 53
+ m = 1.0 / (1 << 53) // 2**-53
+ }
+ x &^= mask << shift
+ x |= uint64(exp+bias) << shift
+ return m * Float64frombits(x)
+}
diff --git a/contrib/go/_std_1.19/src/math/lgamma.go b/contrib/go/_std_1.19/src/math/lgamma.go
new file mode 100644
index 0000000000..4058ad6631
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/lgamma.go
@@ -0,0 +1,366 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point logarithm of the Gamma function.
+*/
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/e_lgamma_r.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_lgamma_r(x, signgamp)
+// Reentrant version of the logarithm of the Gamma function
+// with user provided pointer for the sign of Gamma(x).
+//
+// Method:
+// 1. Argument Reduction for 0 < x <= 8
+// Since gamma(1+s)=s*gamma(s), for x in [0,8], we may
+// reduce x to a number in [1.5,2.5] by
+// lgamma(1+s) = log(s) + lgamma(s)
+// for example,
+// lgamma(7.3) = log(6.3) + lgamma(6.3)
+// = log(6.3*5.3) + lgamma(5.3)
+// = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
+// 2. Polynomial approximation of lgamma around its
+// minimum (ymin=1.461632144968362245) to maintain monotonicity.
+// On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
+// Let z = x-ymin;
+// lgamma(x) = -1.214862905358496078218 + z**2*poly(z)
+// poly(z) is a 14 degree polynomial.
+// 2. Rational approximation in the primary interval [2,3]
+// We use the following approximation:
+// s = x-2.0;
+// lgamma(x) = 0.5*s + s*P(s)/Q(s)
+// with accuracy
+// |P/Q - (lgamma(x)-0.5s)| < 2**-61.71
+// Our algorithms are based on the following observation
+//
+// zeta(2)-1 2 zeta(3)-1 3
+// lgamma(2+s) = s*(1-Euler) + --------- * s - --------- * s + ...
+// 2 3
+//
+// where Euler = 0.5772156649... is the Euler constant, which
+// is very close to 0.5.
+//
+// 3. For x>=8, we have
+// lgamma(x)~(x-0.5)log(x)-x+0.5*log(2pi)+1/(12x)-1/(360x**3)+....
+// (better formula:
+// lgamma(x)~(x-0.5)*(log(x)-1)-.5*(log(2pi)-1) + ...)
+// Let z = 1/x, then we approximation
+// f(z) = lgamma(x) - (x-0.5)(log(x)-1)
+// by
+// 3 5 11
+// w = w0 + w1*z + w2*z + w3*z + ... + w6*z
+// where
+// |w - f(z)| < 2**-58.74
+//
+// 4. For negative x, since (G is gamma function)
+// -x*G(-x)*G(x) = pi/sin(pi*x),
+// we have
+// G(x) = pi/(sin(pi*x)*(-x)*G(-x))
+// since G(-x) is positive, sign(G(x)) = sign(sin(pi*x)) for x<0
+// Hence, for x<0, signgam = sign(sin(pi*x)) and
+// lgamma(x) = log(|Gamma(x)|)
+// = log(pi/(|x*sin(pi*x)|)) - lgamma(-x);
+// Note: one should avoid computing pi*(-x) directly in the
+// computation of sin(pi*(-x)).
+//
+// 5. Special Cases
+// lgamma(2+s) ~ s*(1-Euler) for tiny s
+// lgamma(1)=lgamma(2)=0
+// lgamma(x) ~ -log(x) for tiny x
+// lgamma(0) = lgamma(inf) = inf
+// lgamma(-integer) = +-inf
+//
+//
+
+var _lgamA = [...]float64{
+ 7.72156649015328655494e-02, // 0x3FB3C467E37DB0C8
+ 3.22467033424113591611e-01, // 0x3FD4A34CC4A60FAD
+ 6.73523010531292681824e-02, // 0x3FB13E001A5562A7
+ 2.05808084325167332806e-02, // 0x3F951322AC92547B
+ 7.38555086081402883957e-03, // 0x3F7E404FB68FEFE8
+ 2.89051383673415629091e-03, // 0x3F67ADD8CCB7926B
+ 1.19270763183362067845e-03, // 0x3F538A94116F3F5D
+ 5.10069792153511336608e-04, // 0x3F40B6C689B99C00
+ 2.20862790713908385557e-04, // 0x3F2CF2ECED10E54D
+ 1.08011567247583939954e-04, // 0x3F1C5088987DFB07
+ 2.52144565451257326939e-05, // 0x3EFA7074428CFA52
+ 4.48640949618915160150e-05, // 0x3F07858E90A45837
+}
+var _lgamR = [...]float64{
+ 1.0, // placeholder
+ 1.39200533467621045958e+00, // 0x3FF645A762C4AB74
+ 7.21935547567138069525e-01, // 0x3FE71A1893D3DCDC
+ 1.71933865632803078993e-01, // 0x3FC601EDCCFBDF27
+ 1.86459191715652901344e-02, // 0x3F9317EA742ED475
+ 7.77942496381893596434e-04, // 0x3F497DDACA41A95B
+ 7.32668430744625636189e-06, // 0x3EDEBAF7A5B38140
+}
+var _lgamS = [...]float64{
+ -7.72156649015328655494e-02, // 0xBFB3C467E37DB0C8
+ 2.14982415960608852501e-01, // 0x3FCB848B36E20878
+ 3.25778796408930981787e-01, // 0x3FD4D98F4F139F59
+ 1.46350472652464452805e-01, // 0x3FC2BB9CBEE5F2F7
+ 2.66422703033638609560e-02, // 0x3F9B481C7E939961
+ 1.84028451407337715652e-03, // 0x3F5E26B67368F239
+ 3.19475326584100867617e-05, // 0x3F00BFECDD17E945
+}
+var _lgamT = [...]float64{
+ 4.83836122723810047042e-01, // 0x3FDEF72BC8EE38A2
+ -1.47587722994593911752e-01, // 0xBFC2E4278DC6C509
+ 6.46249402391333854778e-02, // 0x3FB08B4294D5419B
+ -3.27885410759859649565e-02, // 0xBFA0C9A8DF35B713
+ 1.79706750811820387126e-02, // 0x3F9266E7970AF9EC
+ -1.03142241298341437450e-02, // 0xBF851F9FBA91EC6A
+ 6.10053870246291332635e-03, // 0x3F78FCE0E370E344
+ -3.68452016781138256760e-03, // 0xBF6E2EFFB3E914D7
+ 2.25964780900612472250e-03, // 0x3F6282D32E15C915
+ -1.40346469989232843813e-03, // 0xBF56FE8EBF2D1AF1
+ 8.81081882437654011382e-04, // 0x3F4CDF0CEF61A8E9
+ -5.38595305356740546715e-04, // 0xBF41A6109C73E0EC
+ 3.15632070903625950361e-04, // 0x3F34AF6D6C0EBBF7
+ -3.12754168375120860518e-04, // 0xBF347F24ECC38C38
+ 3.35529192635519073543e-04, // 0x3F35FD3EE8C2D3F4
+}
+var _lgamU = [...]float64{
+ -7.72156649015328655494e-02, // 0xBFB3C467E37DB0C8
+ 6.32827064025093366517e-01, // 0x3FE4401E8B005DFF
+ 1.45492250137234768737e+00, // 0x3FF7475CD119BD6F
+ 9.77717527963372745603e-01, // 0x3FEF497644EA8450
+ 2.28963728064692451092e-01, // 0x3FCD4EAEF6010924
+ 1.33810918536787660377e-02, // 0x3F8B678BBF2BAB09
+}
+var _lgamV = [...]float64{
+ 1.0,
+ 2.45597793713041134822e+00, // 0x4003A5D7C2BD619C
+ 2.12848976379893395361e+00, // 0x40010725A42B18F5
+ 7.69285150456672783825e-01, // 0x3FE89DFBE45050AF
+ 1.04222645593369134254e-01, // 0x3FBAAE55D6537C88
+ 3.21709242282423911810e-03, // 0x3F6A5ABB57D0CF61
+}
+var _lgamW = [...]float64{
+ 4.18938533204672725052e-01, // 0x3FDACFE390C97D69
+ 8.33333333333329678849e-02, // 0x3FB555555555553B
+ -2.77777777728775536470e-03, // 0xBF66C16C16B02E5C
+ 7.93650558643019558500e-04, // 0x3F4A019F98CF38B6
+ -5.95187557450339963135e-04, // 0xBF4380CB8C0FE741
+ 8.36339918996282139126e-04, // 0x3F4B67BA4CDAD5D1
+ -1.63092934096575273989e-03, // 0xBF5AB89D0B9E43E4
+}
+
+// Lgamma returns the natural logarithm and sign (-1 or +1) of Gamma(x).
+//
+// Special cases are:
+//
+// Lgamma(+Inf) = +Inf
+// Lgamma(0) = +Inf
+// Lgamma(-integer) = +Inf
+// Lgamma(-Inf) = -Inf
+// Lgamma(NaN) = NaN
+func Lgamma(x float64) (lgamma float64, sign int) {
+ const (
+ Ymin = 1.461632144968362245
+ Two52 = 1 << 52 // 0x4330000000000000 ~4.5036e+15
+ Two53 = 1 << 53 // 0x4340000000000000 ~9.0072e+15
+ Two58 = 1 << 58 // 0x4390000000000000 ~2.8823e+17
+ Tiny = 1.0 / (1 << 70) // 0x3b90000000000000 ~8.47033e-22
+ Tc = 1.46163214496836224576e+00 // 0x3FF762D86356BE3F
+ Tf = -1.21486290535849611461e-01 // 0xBFBF19B9BCC38A42
+ // Tt = -(tail of Tf)
+ Tt = -3.63867699703950536541e-18 // 0xBC50C7CAA48A971F
+ )
+ // special cases
+ sign = 1
+ switch {
+ case IsNaN(x):
+ lgamma = x
+ return
+ case IsInf(x, 0):
+ lgamma = x
+ return
+ case x == 0:
+ lgamma = Inf(1)
+ return
+ }
+
+ neg := false
+ if x < 0 {
+ x = -x
+ neg = true
+ }
+
+ if x < Tiny { // if |x| < 2**-70, return -log(|x|)
+ if neg {
+ sign = -1
+ }
+ lgamma = -Log(x)
+ return
+ }
+ var nadj float64
+ if neg {
+ if x >= Two52 { // |x| >= 2**52, must be -integer
+ lgamma = Inf(1)
+ return
+ }
+ t := sinPi(x)
+ if t == 0 {
+ lgamma = Inf(1) // -integer
+ return
+ }
+ nadj = Log(Pi / Abs(t*x))
+ if t < 0 {
+ sign = -1
+ }
+ }
+
+ switch {
+ case x == 1 || x == 2: // purge off 1 and 2
+ lgamma = 0
+ return
+ case x < 2: // use lgamma(x) = lgamma(x+1) - log(x)
+ var y float64
+ var i int
+ if x <= 0.9 {
+ lgamma = -Log(x)
+ switch {
+ case x >= (Ymin - 1 + 0.27): // 0.7316 <= x <= 0.9
+ y = 1 - x
+ i = 0
+ case x >= (Ymin - 1 - 0.27): // 0.2316 <= x < 0.7316
+ y = x - (Tc - 1)
+ i = 1
+ default: // 0 < x < 0.2316
+ y = x
+ i = 2
+ }
+ } else {
+ lgamma = 0
+ switch {
+ case x >= (Ymin + 0.27): // 1.7316 <= x < 2
+ y = 2 - x
+ i = 0
+ case x >= (Ymin - 0.27): // 1.2316 <= x < 1.7316
+ y = x - Tc
+ i = 1
+ default: // 0.9 < x < 1.2316
+ y = x - 1
+ i = 2
+ }
+ }
+ switch i {
+ case 0:
+ z := y * y
+ p1 := _lgamA[0] + z*(_lgamA[2]+z*(_lgamA[4]+z*(_lgamA[6]+z*(_lgamA[8]+z*_lgamA[10]))))
+ p2 := z * (_lgamA[1] + z*(+_lgamA[3]+z*(_lgamA[5]+z*(_lgamA[7]+z*(_lgamA[9]+z*_lgamA[11])))))
+ p := y*p1 + p2
+ lgamma += (p - 0.5*y)
+ case 1:
+ z := y * y
+ w := z * y
+ p1 := _lgamT[0] + w*(_lgamT[3]+w*(_lgamT[6]+w*(_lgamT[9]+w*_lgamT[12]))) // parallel comp
+ p2 := _lgamT[1] + w*(_lgamT[4]+w*(_lgamT[7]+w*(_lgamT[10]+w*_lgamT[13])))
+ p3 := _lgamT[2] + w*(_lgamT[5]+w*(_lgamT[8]+w*(_lgamT[11]+w*_lgamT[14])))
+ p := z*p1 - (Tt - w*(p2+y*p3))
+ lgamma += (Tf + p)
+ case 2:
+ p1 := y * (_lgamU[0] + y*(_lgamU[1]+y*(_lgamU[2]+y*(_lgamU[3]+y*(_lgamU[4]+y*_lgamU[5])))))
+ p2 := 1 + y*(_lgamV[1]+y*(_lgamV[2]+y*(_lgamV[3]+y*(_lgamV[4]+y*_lgamV[5]))))
+ lgamma += (-0.5*y + p1/p2)
+ }
+ case x < 8: // 2 <= x < 8
+ i := int(x)
+ y := x - float64(i)
+ p := y * (_lgamS[0] + y*(_lgamS[1]+y*(_lgamS[2]+y*(_lgamS[3]+y*(_lgamS[4]+y*(_lgamS[5]+y*_lgamS[6]))))))
+ q := 1 + y*(_lgamR[1]+y*(_lgamR[2]+y*(_lgamR[3]+y*(_lgamR[4]+y*(_lgamR[5]+y*_lgamR[6])))))
+ lgamma = 0.5*y + p/q
+ z := 1.0 // Lgamma(1+s) = Log(s) + Lgamma(s)
+ switch i {
+ case 7:
+ z *= (y + 6)
+ fallthrough
+ case 6:
+ z *= (y + 5)
+ fallthrough
+ case 5:
+ z *= (y + 4)
+ fallthrough
+ case 4:
+ z *= (y + 3)
+ fallthrough
+ case 3:
+ z *= (y + 2)
+ lgamma += Log(z)
+ }
+ case x < Two58: // 8 <= x < 2**58
+ t := Log(x)
+ z := 1 / x
+ y := z * z
+ w := _lgamW[0] + z*(_lgamW[1]+y*(_lgamW[2]+y*(_lgamW[3]+y*(_lgamW[4]+y*(_lgamW[5]+y*_lgamW[6])))))
+ lgamma = (x-0.5)*(t-1) + w
+ default: // 2**58 <= x <= Inf
+ lgamma = x * (Log(x) - 1)
+ }
+ if neg {
+ lgamma = nadj - lgamma
+ }
+ return
+}
+
+// sinPi(x) is a helper function for negative x
+func sinPi(x float64) float64 {
+ const (
+ Two52 = 1 << 52 // 0x4330000000000000 ~4.5036e+15
+ Two53 = 1 << 53 // 0x4340000000000000 ~9.0072e+15
+ )
+ if x < 0.25 {
+ return -Sin(Pi * x)
+ }
+
+ // argument reduction
+ z := Floor(x)
+ var n int
+ if z != x { // inexact
+ x = Mod(x, 2)
+ n = int(x * 4)
+ } else {
+ if x >= Two53 { // x must be even
+ x = 0
+ n = 0
+ } else {
+ if x < Two52 {
+ z = x + Two52 // exact
+ }
+ n = int(1 & Float64bits(z))
+ x = float64(n)
+ n <<= 2
+ }
+ }
+ switch n {
+ case 0:
+ x = Sin(Pi * x)
+ case 1, 2:
+ x = Cos(Pi * (0.5 - x))
+ case 3, 4:
+ x = Sin(Pi * (1 - x))
+ case 5, 6:
+ x = -Cos(Pi * (x - 1.5))
+ default:
+ x = Sin(Pi * (x - 2))
+ }
+ return -x
+}
diff --git a/contrib/go/_std_1.19/src/math/log.go b/contrib/go/_std_1.19/src/math/log.go
new file mode 100644
index 0000000000..695a545e7f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/log.go
@@ -0,0 +1,129 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point logarithm.
+*/
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/e_log.c
+// and came with this notice. The go code is a simpler
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_log(x)
+// Return the logarithm of x
+//
+// Method :
+// 1. Argument Reduction: find k and f such that
+// x = 2**k * (1+f),
+// where sqrt(2)/2 < 1+f < sqrt(2) .
+//
+// 2. Approximation of log(1+f).
+// Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+// = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+// = 2s + s*R
+// We use a special Reme algorithm on [0,0.1716] to generate
+// a polynomial of degree 14 to approximate R. The maximum error
+// of this polynomial approximation is bounded by 2**-58.45. In
+// other words,
+// 2 4 6 8 10 12 14
+// R(z) ~ L1*s +L2*s +L3*s +L4*s +L5*s +L6*s +L7*s
+// (the values of L1 to L7 are listed in the program) and
+// | 2 14 | -58.45
+// | L1*s +...+L7*s - R(z) | <= 2
+// | |
+// Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+// In order to guarantee error in log below 1ulp, we compute log by
+// log(1+f) = f - s*(f - R) (if f is not too large)
+// log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+//
+// 3. Finally, log(x) = k*Ln2 + log(1+f).
+// = k*Ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*Ln2_lo)))
+// Here Ln2 is split into two floating point number:
+// Ln2_hi + Ln2_lo,
+// where n*Ln2_hi is always exact for |n| < 2000.
+//
+// Special cases:
+// log(x) is NaN with signal if x < 0 (including -INF) ;
+// log(+INF) is +INF; log(0) is -INF with signal;
+// log(NaN) is that NaN with no signal.
+//
+// Accuracy:
+// according to an error analysis, the error is always less than
+// 1 ulp (unit in the last place).
+//
+// Constants:
+// The hexadecimal values are the intended ones for the following
+// constants. The decimal values may be used, provided that the
+// compiler will convert from decimal to binary accurately enough
+// to produce the hexadecimal values shown.
+
+// Log returns the natural logarithm of x.
+//
+// Special cases are:
+//
+// Log(+Inf) = +Inf
+// Log(0) = -Inf
+// Log(x < 0) = NaN
+// Log(NaN) = NaN
+func Log(x float64) float64 {
+ if haveArchLog {
+ return archLog(x)
+ }
+ return log(x)
+}
+
+func log(x float64) float64 {
+ const (
+ Ln2Hi = 6.93147180369123816490e-01 /* 3fe62e42 fee00000 */
+ Ln2Lo = 1.90821492927058770002e-10 /* 3dea39ef 35793c76 */
+ L1 = 6.666666666666735130e-01 /* 3FE55555 55555593 */
+ L2 = 3.999999999940941908e-01 /* 3FD99999 9997FA04 */
+ L3 = 2.857142874366239149e-01 /* 3FD24924 94229359 */
+ L4 = 2.222219843214978396e-01 /* 3FCC71C5 1D8E78AF */
+ L5 = 1.818357216161805012e-01 /* 3FC74664 96CB03DE */
+ L6 = 1.531383769920937332e-01 /* 3FC39A09 D078C69F */
+ L7 = 1.479819860511658591e-01 /* 3FC2F112 DF3E5244 */
+ )
+
+ // special cases
+ switch {
+ case IsNaN(x) || IsInf(x, 1):
+ return x
+ case x < 0:
+ return NaN()
+ case x == 0:
+ return Inf(-1)
+ }
+
+ // reduce
+ f1, ki := Frexp(x)
+ if f1 < Sqrt2/2 {
+ f1 *= 2
+ ki--
+ }
+ f := f1 - 1
+ k := float64(ki)
+
+ // compute
+ s := f / (2 + f)
+ s2 := s * s
+ s4 := s2 * s2
+ t1 := s2 * (L1 + s4*(L3+s4*(L5+s4*L7)))
+ t2 := s4 * (L2 + s4*(L4+s4*L6))
+ R := t1 + t2
+ hfsq := 0.5 * f * f
+ return k*Ln2Hi - ((hfsq - (s*(hfsq+R) + k*Ln2Lo)) - f)
+}
diff --git a/contrib/go/_std_1.18/src/math/log10.go b/contrib/go/_std_1.19/src/math/log10.go
index e6916a53b6..e6916a53b6 100644
--- a/contrib/go/_std_1.18/src/math/log10.go
+++ b/contrib/go/_std_1.19/src/math/log10.go
diff --git a/contrib/go/_std_1.19/src/math/log1p.go b/contrib/go/_std_1.19/src/math/log1p.go
new file mode 100644
index 0000000000..3a7b3854a8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/log1p.go
@@ -0,0 +1,203 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below are from FreeBSD's /usr/src/lib/msun/src/s_log1p.c
+// and came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+//
+// double log1p(double x)
+//
+// Method :
+// 1. Argument Reduction: find k and f such that
+// 1+x = 2**k * (1+f),
+// where sqrt(2)/2 < 1+f < sqrt(2) .
+//
+// Note. If k=0, then f=x is exact. However, if k!=0, then f
+// may not be representable exactly. In that case, a correction
+// term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+// log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+// and add back the correction term c/u.
+// (Note: when x > 2**53, one can simply return log(x))
+//
+// 2. Approximation of log1p(f).
+// Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+// = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+// = 2s + s*R
+// We use a special Reme algorithm on [0,0.1716] to generate
+// a polynomial of degree 14 to approximate R The maximum error
+// of this polynomial approximation is bounded by 2**-58.45. In
+// other words,
+// 2 4 6 8 10 12 14
+// R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s
+// (the values of Lp1 to Lp7 are listed in the program)
+// and
+// | 2 14 | -58.45
+// | Lp1*s +...+Lp7*s - R(z) | <= 2
+// | |
+// Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+// In order to guarantee error in log below 1ulp, we compute log
+// by
+// log1p(f) = f - (hfsq - s*(hfsq+R)).
+//
+// 3. Finally, log1p(x) = k*ln2 + log1p(f).
+// = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+// Here ln2 is split into two floating point number:
+// ln2_hi + ln2_lo,
+// where n*ln2_hi is always exact for |n| < 2000.
+//
+// Special cases:
+// log1p(x) is NaN with signal if x < -1 (including -INF) ;
+// log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+// log1p(NaN) is that NaN with no signal.
+//
+// Accuracy:
+// according to an error analysis, the error is always less than
+// 1 ulp (unit in the last place).
+//
+// Constants:
+// The hexadecimal values are the intended ones for the following
+// constants. The decimal values may be used, provided that the
+// compiler will convert from decimal to binary accurately enough
+// to produce the hexadecimal values shown.
+//
+// Note: Assuming log() return accurate answer, the following
+// algorithm can be used to compute log1p(x) to within a few ULP:
+//
+// u = 1+x;
+// if(u==1.0) return x ; else
+// return log(u)*(x/(u-1.0));
+//
+// See HP-15C Advanced Functions Handbook, p.193.
+
+// Log1p returns the natural logarithm of 1 plus its argument x.
+// It is more accurate than Log(1 + x) when x is near zero.
+//
+// Special cases are:
+//
+// Log1p(+Inf) = +Inf
+// Log1p(±0) = ±0
+// Log1p(-1) = -Inf
+// Log1p(x < -1) = NaN
+// Log1p(NaN) = NaN
+func Log1p(x float64) float64 {
+ if haveArchLog1p {
+ return archLog1p(x)
+ }
+ return log1p(x)
+}
+
+func log1p(x float64) float64 {
+ const (
+ Sqrt2M1 = 4.142135623730950488017e-01 // Sqrt(2)-1 = 0x3fda827999fcef34
+ Sqrt2HalfM1 = -2.928932188134524755992e-01 // Sqrt(2)/2-1 = 0xbfd2bec333018866
+ Small = 1.0 / (1 << 29) // 2**-29 = 0x3e20000000000000
+ Tiny = 1.0 / (1 << 54) // 2**-54
+ Two53 = 1 << 53 // 2**53
+ Ln2Hi = 6.93147180369123816490e-01 // 3fe62e42fee00000
+ Ln2Lo = 1.90821492927058770002e-10 // 3dea39ef35793c76
+ Lp1 = 6.666666666666735130e-01 // 3FE5555555555593
+ Lp2 = 3.999999999940941908e-01 // 3FD999999997FA04
+ Lp3 = 2.857142874366239149e-01 // 3FD2492494229359
+ Lp4 = 2.222219843214978396e-01 // 3FCC71C51D8E78AF
+ Lp5 = 1.818357216161805012e-01 // 3FC7466496CB03DE
+ Lp6 = 1.531383769920937332e-01 // 3FC39A09D078C69F
+ Lp7 = 1.479819860511658591e-01 // 3FC2F112DF3E5244
+ )
+
+ // special cases
+ switch {
+ case x < -1 || IsNaN(x): // includes -Inf
+ return NaN()
+ case x == -1:
+ return Inf(-1)
+ case IsInf(x, 1):
+ return Inf(1)
+ }
+
+ absx := Abs(x)
+
+ var f float64
+ var iu uint64
+ k := 1
+ if absx < Sqrt2M1 { // |x| < Sqrt(2)-1
+ if absx < Small { // |x| < 2**-29
+ if absx < Tiny { // |x| < 2**-54
+ return x
+ }
+ return x - x*x*0.5
+ }
+ if x > Sqrt2HalfM1 { // Sqrt(2)/2-1 < x
+ // (Sqrt(2)/2-1) < x < (Sqrt(2)-1)
+ k = 0
+ f = x
+ iu = 1
+ }
+ }
+ var c float64
+ if k != 0 {
+ var u float64
+ if absx < Two53 { // 1<<53
+ u = 1.0 + x
+ iu = Float64bits(u)
+ k = int((iu >> 52) - 1023)
+ // correction term
+ if k > 0 {
+ c = 1.0 - (u - x)
+ } else {
+ c = x - (u - 1.0)
+ }
+ c /= u
+ } else {
+ u = x
+ iu = Float64bits(u)
+ k = int((iu >> 52) - 1023)
+ c = 0
+ }
+ iu &= 0x000fffffffffffff
+ if iu < 0x0006a09e667f3bcd { // mantissa of Sqrt(2)
+ u = Float64frombits(iu | 0x3ff0000000000000) // normalize u
+ } else {
+ k++
+ u = Float64frombits(iu | 0x3fe0000000000000) // normalize u/2
+ iu = (0x0010000000000000 - iu) >> 2
+ }
+ f = u - 1.0 // Sqrt(2)/2 < u < Sqrt(2)
+ }
+ hfsq := 0.5 * f * f
+ var s, R, z float64
+ if iu == 0 { // |f| < 2**-20
+ if f == 0 {
+ if k == 0 {
+ return 0
+ }
+ c += float64(k) * Ln2Lo
+ return float64(k)*Ln2Hi + c
+ }
+ R = hfsq * (1.0 - 0.66666666666666666*f) // avoid division
+ if k == 0 {
+ return f - R
+ }
+ return float64(k)*Ln2Hi - ((R - (float64(k)*Ln2Lo + c)) - f)
+ }
+ s = f / (2.0 + f)
+ z = s * s
+ R = z * (Lp1 + z*(Lp2+z*(Lp3+z*(Lp4+z*(Lp5+z*(Lp6+z*Lp7))))))
+ if k == 0 {
+ return f - (hfsq - s*(hfsq+R))
+ }
+ return float64(k)*Ln2Hi - ((hfsq - (s*(hfsq+R) + (float64(k)*Ln2Lo + c))) - f)
+}
diff --git a/contrib/go/_std_1.18/src/math/log_amd64.s b/contrib/go/_std_1.19/src/math/log_amd64.s
index d84091f23a..d84091f23a 100644
--- a/contrib/go/_std_1.18/src/math/log_amd64.s
+++ b/contrib/go/_std_1.19/src/math/log_amd64.s
diff --git a/contrib/go/_std_1.18/src/math/log_asm.go b/contrib/go/_std_1.19/src/math/log_asm.go
index 848cce13b2..848cce13b2 100644
--- a/contrib/go/_std_1.18/src/math/log_asm.go
+++ b/contrib/go/_std_1.19/src/math/log_asm.go
diff --git a/contrib/go/_std_1.19/src/math/logb.go b/contrib/go/_std_1.19/src/math/logb.go
new file mode 100644
index 0000000000..04ba3e968e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/logb.go
@@ -0,0 +1,52 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Logb returns the binary exponent of x.
+//
+// Special cases are:
+//
+// Logb(±Inf) = +Inf
+// Logb(0) = -Inf
+// Logb(NaN) = NaN
+func Logb(x float64) float64 {
+ // special cases
+ switch {
+ case x == 0:
+ return Inf(-1)
+ case IsInf(x, 0):
+ return Inf(1)
+ case IsNaN(x):
+ return x
+ }
+ return float64(ilogb(x))
+}
+
+// Ilogb returns the binary exponent of x as an integer.
+//
+// Special cases are:
+//
+// Ilogb(±Inf) = MaxInt32
+// Ilogb(0) = MinInt32
+// Ilogb(NaN) = MaxInt32
+func Ilogb(x float64) int {
+ // special cases
+ switch {
+ case x == 0:
+ return MinInt32
+ case IsNaN(x):
+ return MaxInt32
+ case IsInf(x, 0):
+ return MaxInt32
+ }
+ return ilogb(x)
+}
+
+// logb returns the binary exponent of x. It assumes x is finite and
+// non-zero.
+func ilogb(x float64) int {
+ x, exp := normalize(x)
+ return int((Float64bits(x)>>shift)&mask) - bias + exp
+}
diff --git a/contrib/go/_std_1.19/src/math/mod.go b/contrib/go/_std_1.19/src/math/mod.go
new file mode 100644
index 0000000000..6f24250cfb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/mod.go
@@ -0,0 +1,52 @@
+// Copyright 2009-2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point mod function.
+*/
+
+// Mod returns the floating-point remainder of x/y.
+// The magnitude of the result is less than y and its
+// sign agrees with that of x.
+//
+// Special cases are:
+//
+// Mod(±Inf, y) = NaN
+// Mod(NaN, y) = NaN
+// Mod(x, 0) = NaN
+// Mod(x, ±Inf) = x
+// Mod(x, NaN) = NaN
+func Mod(x, y float64) float64 {
+ if haveArchMod {
+ return archMod(x, y)
+ }
+ return mod(x, y)
+}
+
+func mod(x, y float64) float64 {
+ if y == 0 || IsInf(x, 0) || IsNaN(x) || IsNaN(y) {
+ return NaN()
+ }
+ y = Abs(y)
+
+ yfr, yexp := Frexp(y)
+ r := x
+ if x < 0 {
+ r = -x
+ }
+
+ for r >= y {
+ rfr, rexp := Frexp(r)
+ if rfr < yfr {
+ rexp = rexp - 1
+ }
+ r = r - Ldexp(y, rexp-yexp)
+ }
+ if x < 0 {
+ r = -r
+ }
+ return r
+}
diff --git a/contrib/go/_std_1.19/src/math/modf.go b/contrib/go/_std_1.19/src/math/modf.go
new file mode 100644
index 0000000000..613a75fc9a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/modf.go
@@ -0,0 +1,43 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Modf returns integer and fractional floating-point numbers
+// that sum to f. Both values have the same sign as f.
+//
+// Special cases are:
+//
+// Modf(±Inf) = ±Inf, NaN
+// Modf(NaN) = NaN, NaN
+func Modf(f float64) (int float64, frac float64) {
+ if haveArchModf {
+ return archModf(f)
+ }
+ return modf(f)
+}
+
+func modf(f float64) (int float64, frac float64) {
+ if f < 1 {
+ switch {
+ case f < 0:
+ int, frac = Modf(-f)
+ return -int, -frac
+ case f == 0:
+ return f, f // Return -0, -0 when f == -0
+ }
+ return 0, f
+ }
+
+ x := Float64bits(f)
+ e := uint(x>>shift)&mask - bias
+
+ // Keep the top 12+e bits, the integer part; clear the rest.
+ if e < 64-12 {
+ x &^= 1<<(64-12-e) - 1
+ }
+ int = Float64frombits(x)
+ frac = f - int
+ return
+}
diff --git a/contrib/go/_std_1.18/src/math/modf_noasm.go b/contrib/go/_std_1.19/src/math/modf_noasm.go
index 55c6a7f6e2..55c6a7f6e2 100644
--- a/contrib/go/_std_1.18/src/math/modf_noasm.go
+++ b/contrib/go/_std_1.19/src/math/modf_noasm.go
diff --git a/contrib/go/_std_1.19/src/math/nextafter.go b/contrib/go/_std_1.19/src/math/nextafter.go
new file mode 100644
index 0000000000..ec18d542d9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/nextafter.go
@@ -0,0 +1,51 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Nextafter32 returns the next representable float32 value after x towards y.
+//
+// Special cases are:
+//
+// Nextafter32(x, x) = x
+// Nextafter32(NaN, y) = NaN
+// Nextafter32(x, NaN) = NaN
+func Nextafter32(x, y float32) (r float32) {
+ switch {
+ case IsNaN(float64(x)) || IsNaN(float64(y)): // special case
+ r = float32(NaN())
+ case x == y:
+ r = x
+ case x == 0:
+ r = float32(Copysign(float64(Float32frombits(1)), float64(y)))
+ case (y > x) == (x > 0):
+ r = Float32frombits(Float32bits(x) + 1)
+ default:
+ r = Float32frombits(Float32bits(x) - 1)
+ }
+ return
+}
+
+// Nextafter returns the next representable float64 value after x towards y.
+//
+// Special cases are:
+//
+// Nextafter(x, x) = x
+// Nextafter(NaN, y) = NaN
+// Nextafter(x, NaN) = NaN
+func Nextafter(x, y float64) (r float64) {
+ switch {
+ case IsNaN(x) || IsNaN(y): // special case
+ r = NaN()
+ case x == y:
+ r = x
+ case x == 0:
+ r = Copysign(Float64frombits(1), y)
+ case (y > x) == (x > 0):
+ r = Float64frombits(Float64bits(x) + 1)
+ default:
+ r = Float64frombits(Float64bits(x) - 1)
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/math/pow.go b/contrib/go/_std_1.19/src/math/pow.go
new file mode 100644
index 0000000000..3af8c8b649
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/pow.go
@@ -0,0 +1,157 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+func isOddInt(x float64) bool {
+ xi, xf := Modf(x)
+ return xf == 0 && int64(xi)&1 == 1
+}
+
+// Special cases taken from FreeBSD's /usr/src/lib/msun/src/e_pow.c
+// updated by IEEE Std. 754-2008 "Section 9.2.1 Special values".
+
+// Pow returns x**y, the base-x exponential of y.
+//
+// Special cases are (in order):
+//
+// Pow(x, ±0) = 1 for any x
+// Pow(1, y) = 1 for any y
+// Pow(x, 1) = x for any x
+// Pow(NaN, y) = NaN
+// Pow(x, NaN) = NaN
+// Pow(±0, y) = ±Inf for y an odd integer < 0
+// Pow(±0, -Inf) = +Inf
+// Pow(±0, +Inf) = +0
+// Pow(±0, y) = +Inf for finite y < 0 and not an odd integer
+// Pow(±0, y) = ±0 for y an odd integer > 0
+// Pow(±0, y) = +0 for finite y > 0 and not an odd integer
+// Pow(-1, ±Inf) = 1
+// Pow(x, +Inf) = +Inf for |x| > 1
+// Pow(x, -Inf) = +0 for |x| > 1
+// Pow(x, +Inf) = +0 for |x| < 1
+// Pow(x, -Inf) = +Inf for |x| < 1
+// Pow(+Inf, y) = +Inf for y > 0
+// Pow(+Inf, y) = +0 for y < 0
+// Pow(-Inf, y) = Pow(-0, -y)
+// Pow(x, y) = NaN for finite x < 0 and finite non-integer y
+func Pow(x, y float64) float64 {
+ if haveArchPow {
+ return archPow(x, y)
+ }
+ return pow(x, y)
+}
+
+func pow(x, y float64) float64 {
+ switch {
+ case y == 0 || x == 1:
+ return 1
+ case y == 1:
+ return x
+ case IsNaN(x) || IsNaN(y):
+ return NaN()
+ case x == 0:
+ switch {
+ case y < 0:
+ if isOddInt(y) {
+ return Copysign(Inf(1), x)
+ }
+ return Inf(1)
+ case y > 0:
+ if isOddInt(y) {
+ return x
+ }
+ return 0
+ }
+ case IsInf(y, 0):
+ switch {
+ case x == -1:
+ return 1
+ case (Abs(x) < 1) == IsInf(y, 1):
+ return 0
+ default:
+ return Inf(1)
+ }
+ case IsInf(x, 0):
+ if IsInf(x, -1) {
+ return Pow(1/x, -y) // Pow(-0, -y)
+ }
+ switch {
+ case y < 0:
+ return 0
+ case y > 0:
+ return Inf(1)
+ }
+ case y == 0.5:
+ return Sqrt(x)
+ case y == -0.5:
+ return 1 / Sqrt(x)
+ }
+
+ yi, yf := Modf(Abs(y))
+ if yf != 0 && x < 0 {
+ return NaN()
+ }
+ if yi >= 1<<63 {
+ // yi is a large even int that will lead to overflow (or underflow to 0)
+ // for all x except -1 (x == 1 was handled earlier)
+ switch {
+ case x == -1:
+ return 1
+ case (Abs(x) < 1) == (y > 0):
+ return 0
+ default:
+ return Inf(1)
+ }
+ }
+
+ // ans = a1 * 2**ae (= 1 for now).
+ a1 := 1.0
+ ae := 0
+
+ // ans *= x**yf
+ if yf != 0 {
+ if yf > 0.5 {
+ yf--
+ yi++
+ }
+ a1 = Exp(yf * Log(x))
+ }
+
+ // ans *= x**yi
+ // by multiplying in successive squarings
+ // of x according to bits of yi.
+ // accumulate powers of two into exp.
+ x1, xe := Frexp(x)
+ for i := int64(yi); i != 0; i >>= 1 {
+ if xe < -1<<12 || 1<<12 < xe {
+ // catch xe before it overflows the left shift below
+ // Since i !=0 it has at least one bit still set, so ae will accumulate xe
+ // on at least one more iteration, ae += xe is a lower bound on ae
+ // the lower bound on ae exceeds the size of a float64 exp
+ // so the final call to Ldexp will produce under/overflow (0/Inf)
+ ae += xe
+ break
+ }
+ if i&1 == 1 {
+ a1 *= x1
+ ae += xe
+ }
+ x1 *= x1
+ xe <<= 1
+ if x1 < .5 {
+ x1 += x1
+ xe--
+ }
+ }
+
+ // ans = a1*2**ae
+ // if y < 0 { ans = 1 / ans }
+ // but in the opposite order
+ if y < 0 {
+ a1 = 1 / a1
+ ae = -ae
+ }
+ return Ldexp(a1, ae)
+}
diff --git a/contrib/go/_std_1.19/src/math/pow10.go b/contrib/go/_std_1.19/src/math/pow10.go
new file mode 100644
index 0000000000..c31ad8dbc7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/pow10.go
@@ -0,0 +1,47 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// pow10tab stores the pre-computed values 10**i for i < 32.
+var pow10tab = [...]float64{
+ 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
+ 1e30, 1e31,
+}
+
+// pow10postab32 stores the pre-computed value for 10**(i*32) at index i.
+var pow10postab32 = [...]float64{
+ 1e00, 1e32, 1e64, 1e96, 1e128, 1e160, 1e192, 1e224, 1e256, 1e288,
+}
+
+// pow10negtab32 stores the pre-computed value for 10**(-i*32) at index i.
+var pow10negtab32 = [...]float64{
+ 1e-00, 1e-32, 1e-64, 1e-96, 1e-128, 1e-160, 1e-192, 1e-224, 1e-256, 1e-288, 1e-320,
+}
+
+// Pow10 returns 10**n, the base-10 exponential of n.
+//
+// Special cases are:
+//
+// Pow10(n) = 0 for n < -323
+// Pow10(n) = +Inf for n > 308
+func Pow10(n int) float64 {
+ if 0 <= n && n <= 308 {
+ return pow10postab32[uint(n)/32] * pow10tab[uint(n)%32]
+ }
+
+ if -323 <= n && n <= 0 {
+ return pow10negtab32[uint(-n)/32] / pow10tab[uint(-n)%32]
+ }
+
+ // n < -323 || 308 < n
+ if n > 0 {
+ return Inf(1)
+ }
+
+ // n < -323
+ return 0
+}
diff --git a/contrib/go/_std_1.19/src/math/rand/exp.go b/contrib/go/_std_1.19/src/math/rand/exp.go
new file mode 100644
index 0000000000..c1162c19b6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/rand/exp.go
@@ -0,0 +1,221 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "math"
+)
+
+/*
+ * Exponential distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * https://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+ re = 7.69711747013104972
+)
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1).
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+// sample = ExpFloat64() / desiredRateParameter
+func (r *Rand) ExpFloat64() float64 {
+ for {
+ j := r.Uint32()
+ i := j & 0xFF
+ x := float64(j) * float64(we[i])
+ if j < ke[i] {
+ return x
+ }
+ if i == 0 {
+ return re - math.Log(r.Float64())
+ }
+ if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) {
+ return x
+ }
+ }
+}
+
+var ke = [256]uint32{
+ 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
+ 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
+ 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78,
+ 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651,
+ 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca,
+ 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8,
+ 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea,
+ 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba,
+ 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed,
+ 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662,
+ 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3,
+ 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace,
+ 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6,
+ 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7,
+ 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415,
+ 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4,
+ 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36,
+ 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46,
+ 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac,
+ 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245,
+ 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52,
+ 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06,
+ 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0,
+ 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9,
+ 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76,
+ 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516,
+ 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289,
+ 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed,
+ 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb,
+ 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e,
+ 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a,
+ 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1,
+ 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b,
+ 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621,
+ 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d,
+ 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3,
+ 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73,
+ 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88,
+ 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a,
+ 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb,
+ 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176,
+ 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be,
+ 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192,
+ 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed,
+ 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936,
+ 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b,
+ 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4,
+ 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1,
+ 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482,
+ 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023,
+ 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
+ 0xe6da6ecf,
+}
+var we = [256]float32{
+ 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
+ 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
+ 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11,
+ 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11,
+ 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10,
+ 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10,
+ 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10,
+ 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10,
+ 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10,
+ 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10,
+ 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10,
+ 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10,
+ 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10,
+ 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10,
+ 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10,
+ 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10,
+ 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10,
+ 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10,
+ 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10,
+ 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10,
+ 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10,
+ 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10,
+ 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10,
+ 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10,
+ 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10,
+ 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10,
+ 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10,
+ 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10,
+ 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10,
+ 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10,
+ 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10,
+ 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10,
+ 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10,
+ 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10,
+ 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10,
+ 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10,
+ 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10,
+ 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10,
+ 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10,
+ 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10,
+ 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10,
+ 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10,
+ 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10,
+ 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10,
+ 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10,
+ 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10,
+ 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10,
+ 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10,
+ 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10,
+ 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10,
+ 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10,
+ 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10,
+ 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10,
+ 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10,
+ 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10,
+ 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10,
+ 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10,
+ 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10,
+ 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10,
+ 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09,
+ 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09,
+ 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09,
+ 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
+ 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
+}
+var fe = [256]float32{
+ 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
+ 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,
+ 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665,
+ 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967,
+ 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896,
+ 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092,
+ 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386,
+ 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495,
+ 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752,
+ 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325,
+ 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955,
+ 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694,
+ 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218,
+ 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763,
+ 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044,
+ 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796,
+ 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408,
+ 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928,
+ 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393,
+ 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625,
+ 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107,
+ 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878,
+ 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438,
+ 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682,
+ 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852,
+ 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479,
+ 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354,
+ 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494,
+ 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119,
+ 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624,
+ 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574,
+ 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672,
+ 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763,
+ 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816,
+ 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919,
+ 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274,
+ 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195,
+ 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106,
+ 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434,
+ 0.062193416, 0.060783047, 0.059384305, 0.057997175,
+ 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236,
+ 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623,
+ 0.043502413, 0.042254124, 0.041017443, 0.039792392,
+ 0.038578995, 0.037377283, 0.036187284, 0.035009038,
+ 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566,
+ 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421,
+ 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867,
+ 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392,
+ 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414,
+ 0.008780315, 0.007963077, 0.0071633533, 0.006381906,
+ 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648,
+ 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693,
+ 0.00045413437,
+}
diff --git a/contrib/go/_std_1.19/src/math/rand/normal.go b/contrib/go/_std_1.19/src/math/rand/normal.go
new file mode 100644
index 0000000000..6654479a00
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/rand/normal.go
@@ -0,0 +1,156 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rand
+
+import (
+ "math"
+)
+
+/*
+ * Normal distribution
+ *
+ * See "The Ziggurat Method for Generating Random Variables"
+ * (Marsaglia & Tsang, 2000)
+ * http://www.jstatsoft.org/v05/i08/paper [pdf]
+ */
+
+const (
+ rn = 3.442619855899
+)
+
+func absInt32(i int32) uint32 {
+ if i < 0 {
+ return uint32(-i)
+ }
+ return uint32(i)
+}
+
+// NormFloat64 returns a normally distributed float64 in
+// the range -math.MaxFloat64 through +math.MaxFloat64 inclusive,
+// with standard normal distribution (mean = 0, stddev = 1).
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+// sample = NormFloat64() * desiredStdDev + desiredMean
+func (r *Rand) NormFloat64() float64 {
+ for {
+ j := int32(r.Uint32()) // Possibly negative
+ i := j & 0x7F
+ x := float64(j) * float64(wn[i])
+ if absInt32(j) < kn[i] {
+ // This case should be hit better than 99% of the time.
+ return x
+ }
+
+ if i == 0 {
+ // This extra work is only required for the base strip.
+ for {
+ x = -math.Log(r.Float64()) * (1.0 / rn)
+ y := -math.Log(r.Float64())
+ if y+y >= x*x {
+ break
+ }
+ }
+ if j > 0 {
+ return rn + x
+ }
+ return -rn - x
+ }
+ if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) {
+ return x
+ }
+ }
+}
+
+var kn = [128]uint32{
+ 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
+ 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
+ 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7,
+ 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883,
+ 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30,
+ 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa,
+ 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d,
+ 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18,
+ 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924,
+ 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a,
+ 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4,
+ 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62,
+ 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e,
+ 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473,
+ 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd,
+ 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568,
+ 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08,
+ 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc,
+ 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94,
+ 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb,
+ 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075,
+ 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba,
+ 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded,
+ 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72,
+ 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
+ 0x7ba90bdc, 0x7a722176, 0x77d664e5,
+}
+var wn = [128]float32{
+ 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
+ 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
+ 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10,
+ 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10,
+ 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10,
+ 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10,
+ 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10,
+ 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10,
+ 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10,
+ 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10,
+ 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10,
+ 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10,
+ 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10,
+ 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10,
+ 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10,
+ 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10,
+ 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10,
+ 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10,
+ 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10,
+ 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10,
+ 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10,
+ 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10,
+ 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10,
+ 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10,
+ 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10,
+ 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09,
+ 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09,
+ 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09,
+ 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09,
+ 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09,
+ 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
+ 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
+}
+var fn = [128]float32{
+ 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303,
+ 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177,
+ 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569,
+ 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277,
+ 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434,
+ 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903,
+ 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055,
+ 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766,
+ 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872,
+ 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642,
+ 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446,
+ 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685,
+ 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484,
+ 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807,
+ 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239,
+ 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877,
+ 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499,
+ 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037,
+ 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265,
+ 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602,
+ 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467,
+ 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058,
+ 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863,
+ 0.040742867, 0.03688439, 0.033087887, 0.029356318,
+ 0.025693292, 0.022103304, 0.018592102, 0.015167298,
+ 0.011839478, 0.008624485, 0.005548995, 0.0026696292,
+}
diff --git a/contrib/go/_std_1.19/src/math/rand/rand.go b/contrib/go/_std_1.19/src/math/rand/rand.go
new file mode 100644
index 0000000000..4cce3dab64
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/rand/rand.go
@@ -0,0 +1,419 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rand implements pseudo-random number generators unsuitable for
+// security-sensitive work.
+//
+// Random numbers are generated by a Source. Top-level functions, such as
+// Float64 and Int, use a default shared Source that produces a deterministic
+// sequence of values each time a program is run. Use the Seed function to
+// initialize the default Source if different behavior is required for each run.
+// The default Source is safe for concurrent use by multiple goroutines, but
+// Sources created by NewSource are not.
+//
+// This package's outputs might be easily predictable regardless of how it's
+// seeded. For random numbers suitable for security-sensitive work, see the
+// crypto/rand package.
+package rand
+
+import "sync"
+
+// A Source represents a source of uniformly-distributed
+// pseudo-random int64 values in the range [0, 1<<63).
+type Source interface {
+ Int63() int64
+ Seed(seed int64)
+}
+
+// A Source64 is a Source that can also generate
+// uniformly-distributed pseudo-random uint64 values in
+// the range [0, 1<<64) directly.
+// If a Rand r's underlying Source s implements Source64,
+// then r.Uint64 returns the result of one call to s.Uint64
+// instead of making two calls to s.Int63.
+type Source64 interface {
+ Source
+ Uint64() uint64
+}
+
+// NewSource returns a new pseudo-random Source seeded with the given value.
+// Unlike the default Source used by top-level functions, this source is not
+// safe for concurrent use by multiple goroutines.
+func NewSource(seed int64) Source {
+ var rng rngSource
+ rng.Seed(seed)
+ return &rng
+}
+
+// A Rand is a source of random numbers.
+type Rand struct {
+ src Source
+ s64 Source64 // non-nil if src is source64
+
+ // readVal contains remainder of 63-bit integer used for bytes
+ // generation during most recent Read call.
+ // It is saved so next Read call can start where the previous
+ // one finished.
+ readVal int64
+ // readPos indicates the number of low-order bytes of readVal
+ // that are still valid.
+ readPos int8
+}
+
+// New returns a new Rand that uses random values from src
+// to generate other random values.
+func New(src Source) *Rand {
+ s64, _ := src.(Source64)
+ return &Rand{src: src, s64: s64}
+}
+
+// Seed uses the provided seed value to initialize the generator to a deterministic state.
+// Seed should not be called concurrently with any other Rand method.
+func (r *Rand) Seed(seed int64) {
+ if lk, ok := r.src.(*lockedSource); ok {
+ lk.seedPos(seed, &r.readPos)
+ return
+ }
+
+ r.src.Seed(seed)
+ r.readPos = 0
+}
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (r *Rand) Int63() int64 { return r.src.Int63() }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32.
+func (r *Rand) Uint32() uint32 { return uint32(r.Int63() >> 31) }
+
+// Uint64 returns a pseudo-random 64-bit value as a uint64.
+func (r *Rand) Uint64() uint64 {
+ if r.s64 != nil {
+ return r.s64.Uint64()
+ }
+ return uint64(r.Int63())>>31 | uint64(r.Int63())<<32
+}
+
+// Int31 returns a non-negative pseudo-random 31-bit integer as an int32.
+func (r *Rand) Int31() int32 { return int32(r.Int63() >> 32) }
+
+// Int returns a non-negative pseudo-random int.
+func (r *Rand) Int() int {
+ u := uint(r.Int63())
+ return int(u << 1 >> 1) // clear sign bit if int == int32
+}
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n <= 0.
+func (r *Rand) Int63n(n int64) int64 {
+ if n <= 0 {
+ panic("invalid argument to Int63n")
+ }
+ if n&(n-1) == 0 { // n is power of two, can mask
+ return r.Int63() & (n - 1)
+ }
+ max := int64((1 << 63) - 1 - (1<<63)%uint64(n))
+ v := r.Int63()
+ for v > max {
+ v = r.Int63()
+ }
+ return v % n
+}
+
+// Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n <= 0.
+func (r *Rand) Int31n(n int32) int32 {
+ if n <= 0 {
+ panic("invalid argument to Int31n")
+ }
+ if n&(n-1) == 0 { // n is power of two, can mask
+ return r.Int31() & (n - 1)
+ }
+ max := int32((1 << 31) - 1 - (1<<31)%uint32(n))
+ v := r.Int31()
+ for v > max {
+ v = r.Int31()
+ }
+ return v % n
+}
+
+// int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n).
+// n must be > 0, but int31n does not check this; the caller must ensure it.
+// int31n exists because Int31n is inefficient, but Go 1 compatibility
+// requires that the stream of values produced by math/rand remain unchanged.
+// int31n can thus only be used internally, by newly introduced APIs.
+//
+// For implementation details, see:
+// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction
+// https://lemire.me/blog/2016/06/30/fast-random-shuffling
+func (r *Rand) int31n(n int32) int32 {
+ v := r.Uint32()
+ prod := uint64(v) * uint64(n)
+ low := uint32(prod)
+ if low < uint32(n) {
+ thresh := uint32(-n) % uint32(n)
+ for low < thresh {
+ v = r.Uint32()
+ prod = uint64(v) * uint64(n)
+ low = uint32(prod)
+ }
+ }
+ return int32(prod >> 32)
+}
+
+// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n).
+// It panics if n <= 0.
+func (r *Rand) Intn(n int) int {
+ if n <= 0 {
+ panic("invalid argument to Intn")
+ }
+ if n <= 1<<31-1 {
+ return int(r.Int31n(int32(n)))
+ }
+ return int(r.Int63n(int64(n)))
+}
+
+// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0).
+func (r *Rand) Float64() float64 {
+ // A clearer, simpler implementation would be:
+ // return float64(r.Int63n(1<<53)) / (1<<53)
+ // However, Go 1 shipped with
+ // return float64(r.Int63()) / (1 << 63)
+ // and we want to preserve that value stream.
+ //
+ // There is one bug in the value stream: r.Int63() may be so close
+ // to 1<<63 that the division rounds up to 1.0, and we've guaranteed
+ // that the result is always less than 1.0.
+ //
+ // We tried to fix this by mapping 1.0 back to 0.0, but since float64
+ // values near 0 are much denser than near 1, mapping 1 to 0 caused
+ // a theoretically significant overshoot in the probability of returning 0.
+ // Instead of that, if we round up to 1, just try again.
+ // Getting 1 only happens 1/2⁵³ of the time, so most clients
+ // will not observe it anyway.
+again:
+ f := float64(r.Int63()) / (1 << 63)
+ if f == 1 {
+ goto again // resample; this branch is taken O(never)
+ }
+ return f
+}
+
+// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0).
+func (r *Rand) Float32() float32 {
+ // Same rationale as in Float64: we want to preserve the Go 1 value
+ // stream except we want to fix it not to return 1.0
+ // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64).
+again:
+ f := float32(r.Float64())
+ if f == 1 {
+ goto again // resample; this branch is taken O(very rarely)
+ }
+ return f
+}
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
+// in the half-open interval [0,n).
+func (r *Rand) Perm(n int) []int {
+ m := make([]int, n)
+ // In the following loop, the iteration when i=0 always swaps m[0] with m[0].
+ // A change to remove this useless iteration is to assign 1 to i in the init
+ // statement. But Perm also effects r. Making this change will affect
+ // the final state of r. So this change can't be made for compatibility
+ // reasons for Go 1.
+ for i := 0; i < n; i++ {
+ j := r.Intn(i + 1)
+ m[i] = m[j]
+ m[j] = i
+ }
+ return m
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func (r *Rand) Shuffle(n int, swap func(i, j int)) {
+ if n < 0 {
+ panic("invalid argument to Shuffle")
+ }
+
+ // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
+ // Shuffle really ought not be called with n that doesn't fit in 32 bits.
+ // Not only will it take a very long time, but with 2³¹! possible permutations,
+ // there's no way that any PRNG can have a big enough internal state to
+ // generate even a minuscule percentage of the possible permutations.
+ // Nevertheless, the right API signature accepts an int n, so handle it as best we can.
+ i := n - 1
+ for ; i > 1<<31-1-1; i-- {
+ j := int(r.Int63n(int64(i + 1)))
+ swap(i, j)
+ }
+ for ; i > 0; i-- {
+ j := int(r.int31n(int32(i + 1)))
+ swap(i, j)
+ }
+}
+
+// Read generates len(p) random bytes and writes them into p. It
+// always returns len(p) and a nil error.
+// Read should not be called concurrently with any other Rand method.
+func (r *Rand) Read(p []byte) (n int, err error) {
+ if lk, ok := r.src.(*lockedSource); ok {
+ return lk.read(p, &r.readVal, &r.readPos)
+ }
+ return read(p, r.src, &r.readVal, &r.readPos)
+}
+
+func read(p []byte, src Source, readVal *int64, readPos *int8) (n int, err error) {
+ pos := *readPos
+ val := *readVal
+ rng, _ := src.(*rngSource)
+ for n = 0; n < len(p); n++ {
+ if pos == 0 {
+ if rng != nil {
+ val = rng.Int63()
+ } else {
+ val = src.Int63()
+ }
+ pos = 7
+ }
+ p[n] = byte(val)
+ val >>= 8
+ pos--
+ }
+ *readPos = pos
+ *readVal = val
+ return
+}
+
+/*
+ * Top-level convenience functions
+ */
+
+var globalRand = New(&lockedSource{src: NewSource(1).(*rngSource)})
+
+// Type assert that globalRand's source is a lockedSource whose src is a *rngSource.
+var _ *rngSource = globalRand.src.(*lockedSource).src
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as
+// if seeded by Seed(1). Seed values that have the same remainder when
+// divided by 2³¹-1 generate the same pseudo-random sequence.
+// Seed, unlike the Rand.Seed method, is safe for concurrent use.
+func Seed(seed int64) { globalRand.Seed(seed) }
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64
+// from the default Source.
+func Int63() int64 { return globalRand.Int63() }
+
+// Uint32 returns a pseudo-random 32-bit value as a uint32
+// from the default Source.
+func Uint32() uint32 { return globalRand.Uint32() }
+
+// Uint64 returns a pseudo-random 64-bit value as a uint64
+// from the default Source.
+func Uint64() uint64 { return globalRand.Uint64() }
+
+// Int31 returns a non-negative pseudo-random 31-bit integer as an int32
+// from the default Source.
+func Int31() int32 { return globalRand.Int31() }
+
+// Int returns a non-negative pseudo-random int from the default Source.
+func Int() int { return globalRand.Int() }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return globalRand.Int63n(n) }
+
+// Int31n returns, as an int32, a non-negative pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Int31n(n int32) int32 { return globalRand.Int31n(n) }
+
+// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n)
+// from the default Source.
+// It panics if n <= 0.
+func Intn(n int) int { return globalRand.Intn(n) }
+
+// Float64 returns, as a float64, a pseudo-random number in the half-open interval [0.0,1.0)
+// from the default Source.
+func Float64() float64 { return globalRand.Float64() }
+
+// Float32 returns, as a float32, a pseudo-random number in the half-open interval [0.0,1.0)
+// from the default Source.
+func Float32() float32 { return globalRand.Float32() }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers
+// in the half-open interval [0,n) from the default Source.
+func Perm(n int) []int { return globalRand.Perm(n) }
+
+// Shuffle pseudo-randomizes the order of elements using the default Source.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) }
+
+// Read generates len(p) random bytes from the default Source and
+// writes them into p. It always returns len(p) and a nil error.
+// Read, unlike the Rand.Read method, is safe for concurrent use.
+func Read(p []byte) (n int, err error) { return globalRand.Read(p) }
+
+// NormFloat64 returns a normally distributed float64 in the range
+// [-math.MaxFloat64, +math.MaxFloat64] with
+// standard normal distribution (mean = 0, stddev = 1)
+// from the default Source.
+// To produce a different normal distribution, callers can
+// adjust the output using:
+//
+// sample = NormFloat64() * desiredStdDev + desiredMean
+func NormFloat64() float64 { return globalRand.NormFloat64() }
+
+// ExpFloat64 returns an exponentially distributed float64 in the range
+// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter
+// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source.
+// To produce a distribution with a different rate parameter,
+// callers can adjust the output using:
+//
+// sample = ExpFloat64() / desiredRateParameter
+func ExpFloat64() float64 { return globalRand.ExpFloat64() }
+
+type lockedSource struct {
+ lk sync.Mutex
+ src *rngSource
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Uint64() (n uint64) {
+ r.lk.Lock()
+ n = r.src.Uint64()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// seedPos implements Seed for a lockedSource without a race condition.
+func (r *lockedSource) seedPos(seed int64, readPos *int8) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ *readPos = 0
+ r.lk.Unlock()
+}
+
+// read implements Read for a lockedSource without a race condition.
+func (r *lockedSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) {
+ r.lk.Lock()
+ n, err = read(p, r.src, readVal, readPos)
+ r.lk.Unlock()
+ return
+}
diff --git a/contrib/go/_std_1.18/src/math/rand/rng.go b/contrib/go/_std_1.19/src/math/rand/rng.go
index f305df1a20..f305df1a20 100644
--- a/contrib/go/_std_1.18/src/math/rand/rng.go
+++ b/contrib/go/_std_1.19/src/math/rand/rng.go
diff --git a/contrib/go/_std_1.18/src/math/rand/zipf.go b/contrib/go/_std_1.19/src/math/rand/zipf.go
index f04c814eb7..f04c814eb7 100644
--- a/contrib/go/_std_1.18/src/math/rand/zipf.go
+++ b/contrib/go/_std_1.19/src/math/rand/zipf.go
diff --git a/contrib/go/_std_1.19/src/math/remainder.go b/contrib/go/_std_1.19/src/math/remainder.go
new file mode 100644
index 0000000000..8e99345c59
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/remainder.go
@@ -0,0 +1,95 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code and the comment below are from
+// FreeBSD's /usr/src/lib/msun/src/e_remainder.c and came
+// with this notice. The go code is a simplified version of
+// the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_remainder(x,y)
+// Return :
+// returns x REM y = x - [x/y]*y as if in infinite
+// precision arithmetic, where [x/y] is the (infinite bit)
+// integer nearest x/y (in half way cases, choose the even one).
+// Method :
+// Based on Mod() returning x - [x/y]chopped * y exactly.
+
+// Remainder returns the IEEE 754 floating-point remainder of x/y.
+//
+// Special cases are:
+//
+// Remainder(±Inf, y) = NaN
+// Remainder(NaN, y) = NaN
+// Remainder(x, 0) = NaN
+// Remainder(x, ±Inf) = x
+// Remainder(x, NaN) = NaN
+func Remainder(x, y float64) float64 {
+ if haveArchRemainder {
+ return archRemainder(x, y)
+ }
+ return remainder(x, y)
+}
+
+func remainder(x, y float64) float64 {
+ const (
+ Tiny = 4.45014771701440276618e-308 // 0x0020000000000000
+ HalfMax = MaxFloat64 / 2
+ )
+ // special cases
+ switch {
+ case IsNaN(x) || IsNaN(y) || IsInf(x, 0) || y == 0:
+ return NaN()
+ case IsInf(y, 0):
+ return x
+ }
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ if y < 0 {
+ y = -y
+ }
+ if x == y {
+ if sign {
+ zero := 0.0
+ return -zero
+ }
+ return 0
+ }
+ if y <= HalfMax {
+ x = Mod(x, y+y) // now x < 2y
+ }
+ if y < Tiny {
+ if x+x > y {
+ x -= y
+ if x+x >= y {
+ x -= y
+ }
+ }
+ } else {
+ yHalf := 0.5 * y
+ if x > yHalf {
+ x -= y
+ if x >= yHalf {
+ x -= y
+ }
+ }
+ }
+ if sign {
+ x = -x
+ }
+ return x
+}
diff --git a/contrib/go/_std_1.18/src/math/signbit.go b/contrib/go/_std_1.19/src/math/signbit.go
index f6e61d660e..f6e61d660e 100644
--- a/contrib/go/_std_1.18/src/math/signbit.go
+++ b/contrib/go/_std_1.19/src/math/signbit.go
diff --git a/contrib/go/_std_1.19/src/math/sin.go b/contrib/go/_std_1.19/src/math/sin.go
new file mode 100644
index 0000000000..4793d7e7cd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/sin.go
@@ -0,0 +1,244 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point sine and cosine.
+*/
+
+// The original C code, the long comment, and the constants
+// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
+// available from http://www.netlib.org/cephes/cmath.tgz.
+// The go code is a simplified version of the original C.
+//
+// sin.c
+//
+// Circular sine
+//
+// SYNOPSIS:
+//
+// double x, y, sin();
+// y = sin( x );
+//
+// DESCRIPTION:
+//
+// Range reduction is into intervals of pi/4. The reduction error is nearly
+// eliminated by contriving an extended precision modular arithmetic.
+//
+// Two polynomial approximating functions are employed.
+// Between 0 and pi/4 the sine is approximated by
+// x + x**3 P(x**2).
+// Between pi/4 and pi/2 the cosine is represented as
+// 1 - x**2 Q(x**2).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC 0, 10 150000 3.0e-17 7.8e-18
+// IEEE -1.07e9,+1.07e9 130000 2.1e-16 5.4e-17
+//
+// Partial loss of accuracy begins to occur at x = 2**30 = 1.074e9. The loss
+// is not gradual, but jumps suddenly to about 1 part in 10e7. Results may
+// be meaningless for x > 2**49 = 5.6e14.
+//
+// cos.c
+//
+// Circular cosine
+//
+// SYNOPSIS:
+//
+// double x, y, cos();
+// y = cos( x );
+//
+// DESCRIPTION:
+//
+// Range reduction is into intervals of pi/4. The reduction error is nearly
+// eliminated by contriving an extended precision modular arithmetic.
+//
+// Two polynomial approximating functions are employed.
+// Between 0 and pi/4 the cosine is approximated by
+// 1 - x**2 Q(x**2).
+// Between pi/4 and pi/2 the sine is represented as
+// x + x**3 P(x**2).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -1.07e9,+1.07e9 130000 2.1e-16 5.4e-17
+// DEC 0,+1.07e9 17000 3.0e-17 7.2e-18
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// sin coefficients
+var _sin = [...]float64{
+ 1.58962301576546568060e-10, // 0x3de5d8fd1fd19ccd
+ -2.50507477628578072866e-8, // 0xbe5ae5e5a9291f5d
+ 2.75573136213857245213e-6, // 0x3ec71de3567d48a1
+ -1.98412698295895385996e-4, // 0xbf2a01a019bfdf03
+ 8.33333333332211858878e-3, // 0x3f8111111110f7d0
+ -1.66666666666666307295e-1, // 0xbfc5555555555548
+}
+
+// cos coefficients
+var _cos = [...]float64{
+ -1.13585365213876817300e-11, // 0xbda8fa49a0861a9b
+ 2.08757008419747316778e-9, // 0x3e21ee9d7b4e3f05
+ -2.75573141792967388112e-7, // 0xbe927e4f7eac4bc6
+ 2.48015872888517045348e-5, // 0x3efa01a019c844f5
+ -1.38888888888730564116e-3, // 0xbf56c16c16c14f91
+ 4.16666666666665929218e-2, // 0x3fa555555555554b
+}
+
+// Cos returns the cosine of the radian argument x.
+//
+// Special cases are:
+//
+// Cos(±Inf) = NaN
+// Cos(NaN) = NaN
+func Cos(x float64) float64 {
+ if haveArchCos {
+ return archCos(x)
+ }
+ return cos(x)
+}
+
+func cos(x float64) float64 {
+ const (
+ PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
+ PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
+ )
+ // special cases
+ switch {
+ case IsNaN(x) || IsInf(x, 0):
+ return NaN()
+ }
+
+ // make argument positive
+ sign := false
+ x = Abs(x)
+
+ var j uint64
+ var y, z float64
+ if x >= reduceThreshold {
+ j, z = trigReduce(x)
+ } else {
+ j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y = float64(j) // integer part of x/(Pi/4), as float
+
+ // map zeros to origin
+ if j&1 == 1 {
+ j++
+ y++
+ }
+ j &= 7 // octant modulo 2Pi radians (360 degrees)
+ z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
+ }
+
+ if j > 3 {
+ j -= 4
+ sign = !sign
+ }
+ if j > 1 {
+ sign = !sign
+ }
+
+ zz := z * z
+ if j == 1 || j == 2 {
+ y = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
+ } else {
+ y = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
+ }
+ if sign {
+ y = -y
+ }
+ return y
+}
+
+// Sin returns the sine of the radian argument x.
+//
+// Special cases are:
+//
+// Sin(±0) = ±0
+// Sin(±Inf) = NaN
+// Sin(NaN) = NaN
+func Sin(x float64) float64 {
+ if haveArchSin {
+ return archSin(x)
+ }
+ return sin(x)
+}
+
+func sin(x float64) float64 {
+ const (
+ PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
+ PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
+ )
+ // special cases
+ switch {
+ case x == 0 || IsNaN(x):
+ return x // return ±0 || NaN()
+ case IsInf(x, 0):
+ return NaN()
+ }
+
+ // make argument positive but save the sign
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+
+ var j uint64
+ var y, z float64
+ if x >= reduceThreshold {
+ j, z = trigReduce(x)
+ } else {
+ j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y = float64(j) // integer part of x/(Pi/4), as float
+
+ // map zeros to origin
+ if j&1 == 1 {
+ j++
+ y++
+ }
+ j &= 7 // octant modulo 2Pi radians (360 degrees)
+ z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
+ }
+ // reflect in x axis
+ if j > 3 {
+ sign = !sign
+ j -= 4
+ }
+ zz := z * z
+ if j == 1 || j == 2 {
+ y = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
+ } else {
+ y = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
+ }
+ if sign {
+ y = -y
+ }
+ return y
+}
diff --git a/contrib/go/_std_1.19/src/math/sincos.go b/contrib/go/_std_1.19/src/math/sincos.go
new file mode 100644
index 0000000000..e3fb96094f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/sincos.go
@@ -0,0 +1,73 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// Coefficients _sin[] and _cos[] are found in pkg/math/sin.go.
+
+// Sincos returns Sin(x), Cos(x).
+//
+// Special cases are:
+//
+// Sincos(±0) = ±0, 1
+// Sincos(±Inf) = NaN, NaN
+// Sincos(NaN) = NaN, NaN
+func Sincos(x float64) (sin, cos float64) {
+ const (
+ PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
+ PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
+ )
+ // special cases
+ switch {
+ case x == 0:
+ return x, 1 // return ±0.0, 1.0
+ case IsNaN(x) || IsInf(x, 0):
+ return NaN(), NaN()
+ }
+
+ // make argument positive
+ sinSign, cosSign := false, false
+ if x < 0 {
+ x = -x
+ sinSign = true
+ }
+
+ var j uint64
+ var y, z float64
+ if x >= reduceThreshold {
+ j, z = trigReduce(x)
+ } else {
+ j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y = float64(j) // integer part of x/(Pi/4), as float
+
+ if j&1 == 1 { // map zeros to origin
+ j++
+ y++
+ }
+ j &= 7 // octant modulo 2Pi radians (360 degrees)
+ z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic
+ }
+ if j > 3 { // reflect in x axis
+ j -= 4
+ sinSign, cosSign = !sinSign, !cosSign
+ }
+ if j > 1 {
+ cosSign = !cosSign
+ }
+
+ zz := z * z
+ cos = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5])
+ sin = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5])
+ if j == 1 || j == 2 {
+ sin, cos = cos, sin
+ }
+ if cosSign {
+ cos = -cos
+ }
+ if sinSign {
+ sin = -sin
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/math/sinh.go b/contrib/go/_std_1.19/src/math/sinh.go
new file mode 100644
index 0000000000..78b3c299d6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/sinh.go
@@ -0,0 +1,93 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point hyperbolic sine and cosine.
+
+ The exponential func is called for arguments
+ greater in magnitude than 0.5.
+
+ A series is used for arguments smaller in magnitude than 0.5.
+
+ Cosh(x) is computed from the exponential func for
+ all arguments.
+*/
+
+// Sinh returns the hyperbolic sine of x.
+//
+// Special cases are:
+//
+// Sinh(±0) = ±0
+// Sinh(±Inf) = ±Inf
+// Sinh(NaN) = NaN
+func Sinh(x float64) float64 {
+ if haveArchSinh {
+ return archSinh(x)
+ }
+ return sinh(x)
+}
+
+func sinh(x float64) float64 {
+ // The coefficients are #2029 from Hart & Cheney. (20.36D)
+ const (
+ P0 = -0.6307673640497716991184787251e+6
+ P1 = -0.8991272022039509355398013511e+5
+ P2 = -0.2894211355989563807284660366e+4
+ P3 = -0.2630563213397497062819489e+2
+ Q0 = -0.6307673640497716991212077277e+6
+ Q1 = 0.1521517378790019070696485176e+5
+ Q2 = -0.173678953558233699533450911e+3
+ )
+
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+
+ var temp float64
+ switch {
+ case x > 21:
+ temp = Exp(x) * 0.5
+
+ case x > 0.5:
+ ex := Exp(x)
+ temp = (ex - 1/ex) * 0.5
+
+ default:
+ sq := x * x
+ temp = (((P3*sq+P2)*sq+P1)*sq + P0) * x
+ temp = temp / (((sq+Q2)*sq+Q1)*sq + Q0)
+ }
+
+ if sign {
+ temp = -temp
+ }
+ return temp
+}
+
+// Cosh returns the hyperbolic cosine of x.
+//
+// Special cases are:
+//
+// Cosh(±0) = 1
+// Cosh(±Inf) = +Inf
+// Cosh(NaN) = NaN
+func Cosh(x float64) float64 {
+ if haveArchCosh {
+ return archCosh(x)
+ }
+ return cosh(x)
+}
+
+func cosh(x float64) float64 {
+ x = Abs(x)
+ if x > 21 {
+ return Exp(x) * 0.5
+ }
+ ex := Exp(x)
+ return (ex + 1/ex) * 0.5
+}
diff --git a/contrib/go/_std_1.19/src/math/sqrt.go b/contrib/go/_std_1.19/src/math/sqrt.go
new file mode 100644
index 0000000000..b6d80c2c6f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/sqrt.go
@@ -0,0 +1,150 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code and the long comment below are
+// from FreeBSD's /usr/src/lib/msun/src/e_sqrt.c and
+// came with this notice. The go code is a simplified
+// version of the original C.
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// __ieee754_sqrt(x)
+// Return correctly rounded sqrt.
+// -----------------------------------------
+// | Use the hardware sqrt if you have one |
+// -----------------------------------------
+// Method:
+// Bit by bit method using integer arithmetic. (Slow, but portable)
+// 1. Normalization
+// Scale x to y in [1,4) with even powers of 2:
+// find an integer k such that 1 <= (y=x*2**(2k)) < 4, then
+// sqrt(x) = 2**k * sqrt(y)
+// 2. Bit by bit computation
+// Let q = sqrt(y) truncated to i bit after binary point (q = 1),
+// i 0
+// i+1 2
+// s = 2*q , and y = 2 * ( y - q ). (1)
+// i i i i
+//
+// To compute q from q , one checks whether
+// i+1 i
+//
+// -(i+1) 2
+// (q + 2 ) <= y. (2)
+// i
+// -(i+1)
+// If (2) is false, then q = q ; otherwise q = q + 2 .
+// i+1 i i+1 i
+//
+// With some algebraic manipulation, it is not difficult to see
+// that (2) is equivalent to
+// -(i+1)
+// s + 2 <= y (3)
+// i i
+//
+// The advantage of (3) is that s and y can be computed by
+// i i
+// the following recurrence formula:
+// if (3) is false
+//
+// s = s , y = y ; (4)
+// i+1 i i+1 i
+//
+// otherwise,
+// -i -(i+1)
+// s = s + 2 , y = y - s - 2 (5)
+// i+1 i i+1 i i
+//
+// One may easily use induction to prove (4) and (5).
+// Note. Since the left hand side of (3) contain only i+2 bits,
+// it is not necessary to do a full (53-bit) comparison
+// in (3).
+// 3. Final rounding
+// After generating the 53 bits result, we compute one more bit.
+// Together with the remainder, we can decide whether the
+// result is exact, bigger than 1/2ulp, or less than 1/2ulp
+// (it will never equal to 1/2ulp).
+// The rounding mode can be detected by checking whether
+// huge + tiny is equal to huge, and whether huge - tiny is
+// equal to huge for some floating point number "huge" and "tiny".
+//
+//
+// Notes: Rounding mode detection omitted. The constants "mask", "shift",
+// and "bias" are found in src/math/bits.go
+
+// Sqrt returns the square root of x.
+//
+// Special cases are:
+//
+// Sqrt(+Inf) = +Inf
+// Sqrt(±0) = ±0
+// Sqrt(x < 0) = NaN
+// Sqrt(NaN) = NaN
+func Sqrt(x float64) float64 {
+ if haveArchSqrt {
+ return archSqrt(x)
+ }
+ return sqrt(x)
+}
+
+// Note: Sqrt is implemented in assembly on some systems.
+// Others have assembly stubs that jump to func sqrt below.
+// On systems where Sqrt is a single instruction, the compiler
+// may turn a direct call into a direct use of that instruction instead.
+
+func sqrt(x float64) float64 {
+ // special cases
+ switch {
+ case x == 0 || IsNaN(x) || IsInf(x, 1):
+ return x
+ case x < 0:
+ return NaN()
+ }
+ ix := Float64bits(x)
+ // normalize x
+ exp := int((ix >> shift) & mask)
+ if exp == 0 { // subnormal x
+ for ix&(1<<shift) == 0 {
+ ix <<= 1
+ exp--
+ }
+ exp++
+ }
+ exp -= bias // unbias exponent
+ ix &^= mask << shift
+ ix |= 1 << shift
+ if exp&1 == 1 { // odd exp, double x to make it even
+ ix <<= 1
+ }
+ exp >>= 1 // exp = exp/2, exponent of square root
+ // generate sqrt(x) bit by bit
+ ix <<= 1
+ var q, s uint64 // q = sqrt(x)
+ r := uint64(1 << (shift + 1)) // r = moving bit from MSB to LSB
+ for r != 0 {
+ t := s + r
+ if t <= ix {
+ s = t + r
+ ix -= t
+ q += r
+ }
+ ix <<= 1
+ r >>= 1
+ }
+ // final rounding
+ if ix != 0 { // remainder, result not exact
+ q += q & 1 // round according to extra bit
+ }
+ ix = q>>1 + uint64(exp-1+bias)<<shift // significand + biased exponent
+ return Float64frombits(ix)
+}
diff --git a/contrib/go/_std_1.18/src/math/sqrt_amd64.s b/contrib/go/_std_1.19/src/math/sqrt_amd64.s
index c3b110e7c0..c3b110e7c0 100644
--- a/contrib/go/_std_1.18/src/math/sqrt_amd64.s
+++ b/contrib/go/_std_1.19/src/math/sqrt_amd64.s
diff --git a/contrib/go/_std_1.18/src/math/sqrt_asm.go b/contrib/go/_std_1.19/src/math/sqrt_asm.go
index 2cec1a5903..2cec1a5903 100644
--- a/contrib/go/_std_1.18/src/math/sqrt_asm.go
+++ b/contrib/go/_std_1.19/src/math/sqrt_asm.go
diff --git a/contrib/go/_std_1.18/src/math/stubs.go b/contrib/go/_std_1.19/src/math/stubs.go
index c4350d4b87..c4350d4b87 100644
--- a/contrib/go/_std_1.18/src/math/stubs.go
+++ b/contrib/go/_std_1.19/src/math/stubs.go
diff --git a/contrib/go/_std_1.19/src/math/tan.go b/contrib/go/_std_1.19/src/math/tan.go
new file mode 100644
index 0000000000..8f6e71e82b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/tan.go
@@ -0,0 +1,140 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+/*
+ Floating-point tangent.
+*/
+
+// The original C code, the long comment, and the constants
+// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
+// available from http://www.netlib.org/cephes/cmath.tgz.
+// The go code is a simplified version of the original C.
+//
+// tan.c
+//
+// Circular tangent
+//
+// SYNOPSIS:
+//
+// double x, y, tan();
+// y = tan( x );
+//
+// DESCRIPTION:
+//
+// Returns the circular tangent of the radian argument x.
+//
+// Range reduction is modulo pi/4. A rational function
+// x + x**3 P(x**2)/Q(x**2)
+// is employed in the basic interval [0, pi/4].
+//
+// ACCURACY:
+// Relative error:
+// arithmetic domain # trials peak rms
+// DEC +-1.07e9 44000 4.1e-17 1.0e-17
+// IEEE +-1.07e9 30000 2.9e-16 8.1e-17
+//
+// Partial loss of accuracy begins to occur at x = 2**30 = 1.074e9. The loss
+// is not gradual, but jumps suddenly to about 1 part in 10e7. Results may
+// be meaningless for x > 2**49 = 5.6e14.
+// [Accuracy loss statement from sin.go comments.]
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+
+// tan coefficients
+var _tanP = [...]float64{
+ -1.30936939181383777646e4, // 0xc0c992d8d24f3f38
+ 1.15351664838587416140e6, // 0x413199eca5fc9ddd
+ -1.79565251976484877988e7, // 0xc1711fead3299176
+}
+var _tanQ = [...]float64{
+ 1.00000000000000000000e0,
+ 1.36812963470692954678e4, // 0x40cab8a5eeb36572
+ -1.32089234440210967447e6, // 0xc13427bc582abc96
+ 2.50083801823357915839e7, // 0x4177d98fc2ead8ef
+ -5.38695755929454629881e7, // 0xc189afe03cbe5a31
+}
+
+// Tan returns the tangent of the radian argument x.
+//
+// Special cases are:
+//
+// Tan(±0) = ±0
+// Tan(±Inf) = NaN
+// Tan(NaN) = NaN
+func Tan(x float64) float64 {
+ if haveArchTan {
+ return archTan(x)
+ }
+ return tan(x)
+}
+
+func tan(x float64) float64 {
+ const (
+ PI4A = 7.85398125648498535156e-1 // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B = 3.77489470793079817668e-8 // 0x3e64442d00000000,
+ PI4C = 2.69515142907905952645e-15 // 0x3ce8469898cc5170,
+ )
+ // special cases
+ switch {
+ case x == 0 || IsNaN(x):
+ return x // return ±0 || NaN()
+ case IsInf(x, 0):
+ return NaN()
+ }
+
+ // make argument positive but save the sign
+ sign := false
+ if x < 0 {
+ x = -x
+ sign = true
+ }
+ var j uint64
+ var y, z float64
+ if x >= reduceThreshold {
+ j, z = trigReduce(x)
+ } else {
+ j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y = float64(j) // integer part of x/(Pi/4), as float
+
+ /* map zeros and singularities to origin */
+ if j&1 == 1 {
+ j++
+ y++
+ }
+
+ z = ((x - y*PI4A) - y*PI4B) - y*PI4C
+ }
+ zz := z * z
+
+ if zz > 1e-14 {
+ y = z + z*(zz*(((_tanP[0]*zz)+_tanP[1])*zz+_tanP[2])/((((zz+_tanQ[1])*zz+_tanQ[2])*zz+_tanQ[3])*zz+_tanQ[4]))
+ } else {
+ y = z
+ }
+ if j&2 == 2 {
+ y = -1 / y
+ }
+ if sign {
+ y = -y
+ }
+ return y
+}
diff --git a/contrib/go/_std_1.19/src/math/tanh.go b/contrib/go/_std_1.19/src/math/tanh.go
new file mode 100644
index 0000000000..94ebc3b651
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/tanh.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+// The original C code, the long comment, and the constants
+// below were from http://netlib.sandia.gov/cephes/cmath/sin.c,
+// available from http://www.netlib.org/cephes/cmath.tgz.
+// The go code is a simplified version of the original C.
+// tanh.c
+//
+// Hyperbolic tangent
+//
+// SYNOPSIS:
+//
+// double x, y, tanh();
+//
+// y = tanh( x );
+//
+// DESCRIPTION:
+//
+// Returns hyperbolic tangent of argument in the range MINLOG to MAXLOG.
+// MAXLOG = 8.8029691931113054295988e+01 = log(2**127)
+// MINLOG = -8.872283911167299960540e+01 = log(2**-128)
+//
+// A rational function is used for |x| < 0.625. The form
+// x + x**3 P(x)/Q(x) of Cody & Waite is employed.
+// Otherwise,
+// tanh(x) = sinh(x)/cosh(x) = 1 - 2/(exp(2x) + 1).
+//
+// ACCURACY:
+//
+// Relative error:
+// arithmetic domain # trials peak rms
+// IEEE -2,2 30000 2.5e-16 5.8e-17
+//
+// Cephes Math Library Release 2.8: June, 2000
+// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier
+//
+// The readme file at http://netlib.sandia.gov/cephes/ says:
+// Some software in this archive may be from the book _Methods and
+// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster
+// International, 1989) or from the Cephes Mathematical Library, a
+// commercial product. In either event, it is copyrighted by the author.
+// What you see here may be used freely but it comes with no support or
+// guarantee.
+//
+// The two known misprints in the book are repaired here in the
+// source listings for the gamma function and the incomplete beta
+// integral.
+//
+// Stephen L. Moshier
+// moshier@na-net.ornl.gov
+//
+
+var tanhP = [...]float64{
+ -9.64399179425052238628e-1,
+ -9.92877231001918586564e1,
+ -1.61468768441708447952e3,
+}
+var tanhQ = [...]float64{
+ 1.12811678491632931402e2,
+ 2.23548839060100448583e3,
+ 4.84406305325125486048e3,
+}
+
+// Tanh returns the hyperbolic tangent of x.
+//
+// Special cases are:
+//
+// Tanh(±0) = ±0
+// Tanh(±Inf) = ±1
+// Tanh(NaN) = NaN
+func Tanh(x float64) float64 {
+ if haveArchTanh {
+ return archTanh(x)
+ }
+ return tanh(x)
+}
+
+func tanh(x float64) float64 {
+ const MAXLOG = 8.8029691931113054295988e+01 // log(2**127)
+ z := Abs(x)
+ switch {
+ case z > 0.5*MAXLOG:
+ if x < 0 {
+ return -1
+ }
+ return 1
+ case z >= 0.625:
+ s := Exp(2 * z)
+ z = 1 - 2/(s+1)
+ if x < 0 {
+ z = -z
+ }
+ default:
+ if x == 0 {
+ return x
+ }
+ s := x * x
+ z = x + x*s*((tanhP[0]*s+tanhP[1])*s+tanhP[2])/(((s+tanhQ[0])*s+tanhQ[1])*s+tanhQ[2])
+ }
+ return z
+}
diff --git a/contrib/go/_std_1.19/src/math/trig_reduce.go b/contrib/go/_std_1.19/src/math/trig_reduce.go
new file mode 100644
index 0000000000..5ecdd8375e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/math/trig_reduce.go
@@ -0,0 +1,102 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+import (
+ "math/bits"
+)
+
+// reduceThreshold is the maximum value of x where the reduction using Pi/4
+// in 3 float64 parts still gives accurate results. This threshold
+// is set by y*C being representable as a float64 without error
+// where y is given by y = floor(x * (4 / Pi)) and C is the leading partial
+// terms of 4/Pi. Since the leading terms (PI4A and PI4B in sin.go) have 30
+// and 32 trailing zero bits, y should have less than 30 significant bits.
+//
+// y < 1<<30 -> floor(x*4/Pi) < 1<<30 -> x < (1<<30 - 1) * Pi/4
+//
+// So, conservatively we can take x < 1<<29.
+// Above this threshold Payne-Hanek range reduction must be used.
+const reduceThreshold = 1 << 29
+
+// trigReduce implements Payne-Hanek range reduction by Pi/4
+// for x > 0. It returns the integer part mod 8 (j) and
+// the fractional part (z) of x / (Pi/4).
+// The implementation is based on:
+// "ARGUMENT REDUCTION FOR HUGE ARGUMENTS: Good to the Last Bit"
+// K. C. Ng et al, March 24, 1992
+// The simulated multi-precision calculation of x*B uses 64-bit integer arithmetic.
+func trigReduce(x float64) (j uint64, z float64) {
+ const PI4 = Pi / 4
+ if x < PI4 {
+ return 0, x
+ }
+ // Extract out the integer and exponent such that,
+ // x = ix * 2 ** exp.
+ ix := Float64bits(x)
+ exp := int(ix>>shift&mask) - bias - shift
+ ix &^= mask << shift
+ ix |= 1 << shift
+ // Use the exponent to extract the 3 appropriate uint64 digits from mPi4,
+ // B ~ (z0, z1, z2), such that the product leading digit has the exponent -61.
+ // Note, exp >= -53 since x >= PI4 and exp < 971 for maximum float64.
+ digit, bitshift := uint(exp+61)/64, uint(exp+61)%64
+ z0 := (mPi4[digit] << bitshift) | (mPi4[digit+1] >> (64 - bitshift))
+ z1 := (mPi4[digit+1] << bitshift) | (mPi4[digit+2] >> (64 - bitshift))
+ z2 := (mPi4[digit+2] << bitshift) | (mPi4[digit+3] >> (64 - bitshift))
+ // Multiply mantissa by the digits and extract the upper two digits (hi, lo).
+ z2hi, _ := bits.Mul64(z2, ix)
+ z1hi, z1lo := bits.Mul64(z1, ix)
+ z0lo := z0 * ix
+ lo, c := bits.Add64(z1lo, z2hi, 0)
+ hi, _ := bits.Add64(z0lo, z1hi, c)
+ // The top 3 bits are j.
+ j = hi >> 61
+ // Extract the fraction and find its magnitude.
+ hi = hi<<3 | lo>>61
+ lz := uint(bits.LeadingZeros64(hi))
+ e := uint64(bias - (lz + 1))
+ // Clear implicit mantissa bit and shift into place.
+ hi = (hi << (lz + 1)) | (lo >> (64 - (lz + 1)))
+ hi >>= 64 - shift
+ // Include the exponent and convert to a float.
+ hi |= e << shift
+ z = Float64frombits(hi)
+ // Map zeros to origin.
+ if j&1 == 1 {
+ j++
+ j &= 7
+ z--
+ }
+ // Multiply the fractional part by pi/4.
+ return j, z * PI4
+}
+
+// mPi4 is the binary digits of 4/pi as a uint64 array,
+// that is, 4/pi = Sum mPi4[i]*2^(-64*i)
+// 19 64-bit digits and the leading one bit give 1217 bits
+// of precision to handle the largest possible float64 exponent.
+var mPi4 = [...]uint64{
+ 0x0000000000000001,
+ 0x45f306dc9c882a53,
+ 0xf84eafa3ea69bb81,
+ 0xb6c52b3278872083,
+ 0xfca2c757bd778ac3,
+ 0x6e48dc74849ba5c0,
+ 0x0c925dd413a32439,
+ 0xfc3bd63962534e7d,
+ 0xd1046bea5d768909,
+ 0xd338e04d68befc82,
+ 0x7323ac7306a673e9,
+ 0x3908bf177bf25076,
+ 0x3ff12fffbc0b301f,
+ 0xde5e2316b414da3e,
+ 0xda6cfd9e4f96136e,
+ 0x9e8c7ecd3cbfd45a,
+ 0xea4f758fd7cbe2f6,
+ 0x7a0e73ef14a525d4,
+ 0xd7f6bf623f1aba10,
+ 0xac06608df8f6d757,
+}
diff --git a/contrib/go/_std_1.18/src/math/unsafe.go b/contrib/go/_std_1.19/src/math/unsafe.go
index e59f50ca62..e59f50ca62 100644
--- a/contrib/go/_std_1.18/src/math/unsafe.go
+++ b/contrib/go/_std_1.19/src/math/unsafe.go
diff --git a/contrib/go/_std_1.18/src/mime/encodedword.go b/contrib/go/_std_1.19/src/mime/encodedword.go
index e6b470b1fb..e6b470b1fb 100644
--- a/contrib/go/_std_1.18/src/mime/encodedword.go
+++ b/contrib/go/_std_1.19/src/mime/encodedword.go
diff --git a/contrib/go/_std_1.18/src/mime/grammar.go b/contrib/go/_std_1.19/src/mime/grammar.go
index 6a6f71dbd4..6a6f71dbd4 100644
--- a/contrib/go/_std_1.18/src/mime/grammar.go
+++ b/contrib/go/_std_1.19/src/mime/grammar.go
diff --git a/contrib/go/_std_1.18/src/mime/mediatype.go b/contrib/go/_std_1.19/src/mime/mediatype.go
index 6c1b095065..6c1b095065 100644
--- a/contrib/go/_std_1.18/src/mime/mediatype.go
+++ b/contrib/go/_std_1.19/src/mime/mediatype.go
diff --git a/contrib/go/_std_1.18/src/mime/multipart/formdata.go b/contrib/go/_std_1.19/src/mime/multipart/formdata.go
index fca5f9e15f..fca5f9e15f 100644
--- a/contrib/go/_std_1.18/src/mime/multipart/formdata.go
+++ b/contrib/go/_std_1.19/src/mime/multipart/formdata.go
diff --git a/contrib/go/_std_1.19/src/mime/multipart/multipart.go b/contrib/go/_std_1.19/src/mime/multipart/multipart.go
new file mode 100644
index 0000000000..aa05ac8f9c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/mime/multipart/multipart.go
@@ -0,0 +1,447 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+
+/*
+Package multipart implements MIME multipart parsing, as defined in RFC
+2046.
+
+The implementation is sufficient for HTTP (RFC 2388) and the multipart
+bodies generated by popular browsers.
+*/
+package multipart
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "mime"
+ "mime/quotedprintable"
+ "net/textproto"
+ "path/filepath"
+ "strings"
+)
+
+var emptyParams = make(map[string]string)
+
+// This constant needs to be at least 76 for this package to work correctly.
+// This is because \r\n--separator_of_len_70- would fill the buffer and it
+// wouldn't be safe to consume a single byte from it.
+const peekBufferSize = 4096
+
+// A Part represents a single part in a multipart body.
+type Part struct {
+ // The headers of the body, if any, with the keys canonicalized
+ // in the same fashion that the Go http.Request headers are.
+ // For example, "foo-bar" changes case to "Foo-Bar"
+ Header textproto.MIMEHeader
+
+ mr *Reader
+
+ disposition string
+ dispositionParams map[string]string
+
+ // r is either a reader directly reading from mr, or it's a
+ // wrapper around such a reader, decoding the
+ // Content-Transfer-Encoding
+ r io.Reader
+
+ n int // known data bytes waiting in mr.bufReader
+ total int64 // total data bytes read already
+ err error // error to return when n == 0
+ readErr error // read error observed from mr.bufReader
+}
+
+// FormName returns the name parameter if p has a Content-Disposition
+// of type "form-data". Otherwise it returns the empty string.
+func (p *Part) FormName() string {
+ // See https://tools.ietf.org/html/rfc2183 section 2 for EBNF
+ // of Content-Disposition value format.
+ if p.dispositionParams == nil {
+ p.parseContentDisposition()
+ }
+ if p.disposition != "form-data" {
+ return ""
+ }
+ return p.dispositionParams["name"]
+}
+
+// FileName returns the filename parameter of the Part's Content-Disposition
+// header. If not empty, the filename is passed through filepath.Base (which is
+// platform dependent) before being returned.
+func (p *Part) FileName() string {
+ if p.dispositionParams == nil {
+ p.parseContentDisposition()
+ }
+ filename := p.dispositionParams["filename"]
+ if filename == "" {
+ return ""
+ }
+ // RFC 7578, Section 4.2 requires that if a filename is provided, the
+ // directory path information must not be used.
+ return filepath.Base(filename)
+}
+
+func (p *Part) parseContentDisposition() {
+ v := p.Header.Get("Content-Disposition")
+ var err error
+ p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
+ if err != nil {
+ p.dispositionParams = emptyParams
+ }
+}
+
+// NewReader creates a new multipart Reader reading from r using the
+// given MIME boundary.
+//
+// The boundary is usually obtained from the "boundary" parameter of
+// the message's "Content-Type" header. Use mime.ParseMediaType to
+// parse such headers.
+func NewReader(r io.Reader, boundary string) *Reader {
+ b := []byte("\r\n--" + boundary + "--")
+ return &Reader{
+ bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
+ nl: b[:2],
+ nlDashBoundary: b[:len(b)-2],
+ dashBoundaryDash: b[2:],
+ dashBoundary: b[2 : len(b)-2],
+ }
+}
+
+// stickyErrorReader is an io.Reader which never calls Read on its
+// underlying Reader once an error has been seen. (the io.Reader
+// interface's contract promises nothing about the return values of
+// Read calls after an error, yet this package does do multiple Reads
+// after error)
+type stickyErrorReader struct {
+ r io.Reader
+ err error
+}
+
+func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ n, r.err = r.r.Read(p)
+ return n, r.err
+}
+
+func newPart(mr *Reader, rawPart bool) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+ if err := bp.populateHeaders(); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+
+ // rawPart is used to switch between Part.NextPart and Part.NextRawPart.
+ if !rawPart {
+ const cte = "Content-Transfer-Encoding"
+ if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") {
+ bp.Header.Del(cte)
+ bp.r = quotedprintable.NewReader(bp.r)
+ }
+ }
+ return bp, nil
+}
+
+func (p *Part) populateHeaders() error {
+ r := textproto.NewReader(p.mr.bufReader)
+ header, err := r.ReadMIMEHeader()
+ if err == nil {
+ p.Header = header
+ }
+ return err
+}
+
+// Read reads the body of a part, after its headers and before the
+// next part (if any) begins.
+func (p *Part) Read(d []byte) (n int, err error) {
+ return p.r.Read(d)
+}
+
+// partReader implements io.Reader by reading raw bytes directly from the
+// wrapped *Part, without doing any Transfer-Encoding decoding.
+type partReader struct {
+ p *Part
+}
+
+func (pr partReader) Read(d []byte) (int, error) {
+ p := pr.p
+ br := p.mr.bufReader
+
+ // Read into buffer until we identify some data to return,
+ // or we find a reason to stop (boundary or read error).
+ for p.n == 0 && p.err == nil {
+ peek, _ := br.Peek(br.Buffered())
+ p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr)
+ if p.n == 0 && p.err == nil {
+ // Force buffered I/O to read more into buffer.
+ _, p.readErr = br.Peek(len(peek) + 1)
+ if p.readErr == io.EOF {
+ p.readErr = io.ErrUnexpectedEOF
+ }
+ }
+ }
+
+ // Read out from "data to return" part of buffer.
+ if p.n == 0 {
+ return 0, p.err
+ }
+ n := len(d)
+ if n > p.n {
+ n = p.n
+ }
+ n, _ = br.Read(d[:n])
+ p.total += int64(n)
+ p.n -= n
+ if p.n == 0 {
+ return n, p.err
+ }
+ return n, nil
+}
+
+// scanUntilBoundary scans buf to identify how much of it can be safely
+// returned as part of the Part body.
+// dashBoundary is "--boundary".
+// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
+// The comments below (and the name) assume "\n--boundary", but either is accepted.
+// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
+// readErr is the read error, if any, that followed reading the bytes in buf.
+// scanUntilBoundary returns the number of data bytes from buf that can be
+// returned as part of the Part body and also the error to return (if any)
+// once those data bytes are done.
+func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
+ if total == 0 {
+ // At beginning of body, allow dashBoundary.
+ if bytes.HasPrefix(buf, dashBoundary) {
+ switch matchAfterPrefix(buf, dashBoundary, readErr) {
+ case -1:
+ return len(dashBoundary), nil
+ case 0:
+ return 0, nil
+ case +1:
+ return 0, io.EOF
+ }
+ }
+ if bytes.HasPrefix(dashBoundary, buf) {
+ return 0, readErr
+ }
+ }
+
+ // Search for "\n--boundary".
+ if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
+ switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
+ case -1:
+ return i + len(nlDashBoundary), nil
+ case 0:
+ return i, nil
+ case +1:
+ return i, io.EOF
+ }
+ }
+ if bytes.HasPrefix(nlDashBoundary, buf) {
+ return 0, readErr
+ }
+
+ // Otherwise, anything up to the final \n is not part of the boundary
+ // and so must be part of the body.
+ // Also if the section from the final \n onward is not a prefix of the boundary,
+ // it too must be part of the body.
+ i := bytes.LastIndexByte(buf, nlDashBoundary[0])
+ if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
+ return i, nil
+ }
+ return len(buf), readErr
+}
+
+// matchAfterPrefix checks whether buf should be considered to match the boundary.
+// The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary",
+// and the caller has verified already that bytes.HasPrefix(buf, prefix) is true.
+//
+// matchAfterPrefix returns +1 if the buffer does match the boundary,
+// meaning the prefix is followed by a double dash, space, tab, cr, nl,
+// or end of input.
+// It returns -1 if the buffer definitely does NOT match the boundary,
+// meaning the prefix is followed by some other character.
+// For example, "--foobar" does not match "--foo".
+// It returns 0 more input needs to be read to make the decision,
+// meaning that len(buf) == len(prefix) and readErr == nil.
+func matchAfterPrefix(buf, prefix []byte, readErr error) int {
+ if len(buf) == len(prefix) {
+ if readErr != nil {
+ return +1
+ }
+ return 0
+ }
+ c := buf[len(prefix)]
+
+ if c == ' ' || c == '\t' || c == '\r' || c == '\n' {
+ return +1
+ }
+
+ // Try to detect boundaryDash
+ if c == '-' {
+ if len(buf) == len(prefix)+1 {
+ if readErr != nil {
+ // Prefix + "-" does not match
+ return -1
+ }
+ return 0
+ }
+ if buf[len(prefix)+1] == '-' {
+ return +1
+ }
+ }
+
+ return -1
+}
+
+func (p *Part) Close() error {
+ io.Copy(io.Discard, p)
+ return nil
+}
+
+// Reader is an iterator over parts in a MIME multipart body.
+// Reader's underlying parser consumes its input as needed. Seeking
+// isn't supported.
+type Reader struct {
+ bufReader *bufio.Reader
+
+ currentPart *Part
+ partsRead int
+
+ nl []byte // "\r\n" or "\n" (set after seeing first boundary line)
+ nlDashBoundary []byte // nl + "--boundary"
+ dashBoundaryDash []byte // "--boundary--"
+ dashBoundary []byte // "--boundary"
+}
+
+// NextPart returns the next part in the multipart or an error.
+// When there are no more parts, the error io.EOF is returned.
+//
+// As a special case, if the "Content-Transfer-Encoding" header
+// has a value of "quoted-printable", that header is instead
+// hidden and the body is transparently decoded during Read calls.
+func (r *Reader) NextPart() (*Part, error) {
+ return r.nextPart(false)
+}
+
+// NextRawPart returns the next part in the multipart or an error.
+// When there are no more parts, the error io.EOF is returned.
+//
+// Unlike NextPart, it does not have special handling for
+// "Content-Transfer-Encoding: quoted-printable".
+func (r *Reader) NextRawPart() (*Part, error) {
+ return r.nextPart(true)
+}
+
+func (r *Reader) nextPart(rawPart bool) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+ if string(r.dashBoundary) == "--" {
+ return nil, fmt.Errorf("multipart: boundary is empty")
+ }
+ expectNewPart := false
+ for {
+ line, err := r.bufReader.ReadSlice('\n')
+
+ if err == io.EOF && r.isFinalBoundary(line) {
+ // If the buffer ends in "--boundary--" without the
+ // trailing "\r\n", ReadSlice will return an error
+ // (since it's missing the '\n'), but this is a valid
+ // multipart EOF so we need to return io.EOF instead of
+ // a fmt-wrapped one.
+ return nil, io.EOF
+ }
+ if err != nil {
+ return nil, fmt.Errorf("multipart: NextPart: %v", err)
+ }
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+ bp, err := newPart(r, rawPart)
+ if err != nil {
+ return nil, err
+ }
+ r.currentPart = bp
+ return bp, nil
+ }
+
+ if r.isFinalBoundary(line) {
+ // Expected EOF
+ return nil, io.EOF
+ }
+
+ if expectNewPart {
+ return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
+ }
+
+ if r.partsRead == 0 {
+ // skip line
+ continue
+ }
+
+ // Consume the "\n" or "\r\n" separator between the
+ // body of the previous part and the boundary line we
+ // now expect will follow. (either a new part or the
+ // end boundary)
+ if bytes.Equal(line, r.nl) {
+ expectNewPart = true
+ continue
+ }
+
+ return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
+ }
+}
+
+// isFinalBoundary reports whether line is the final boundary line
+// indicating that all parts are over.
+// It matches `^--boundary--[ \t]*(\r\n)?$`
+func (r *Reader) isFinalBoundary(line []byte) bool {
+ if !bytes.HasPrefix(line, r.dashBoundaryDash) {
+ return false
+ }
+ rest := line[len(r.dashBoundaryDash):]
+ rest = skipLWSPChar(rest)
+ return len(rest) == 0 || bytes.Equal(rest, r.nl)
+}
+
+func (r *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) {
+ // https://tools.ietf.org/html/rfc2046#section-5.1
+ // The boundary delimiter line is then defined as a line
+ // consisting entirely of two hyphen characters ("-",
+ // decimal value 45) followed by the boundary parameter
+ // value from the Content-Type header field, optional linear
+ // whitespace, and a terminating CRLF.
+ if !bytes.HasPrefix(line, r.dashBoundary) {
+ return false
+ }
+ rest := line[len(r.dashBoundary):]
+ rest = skipLWSPChar(rest)
+
+ // On the first part, see our lines are ending in \n instead of \r\n
+ // and switch into that mode if so. This is a violation of the spec,
+ // but occurs in practice.
+ if r.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' {
+ r.nl = r.nl[1:]
+ r.nlDashBoundary = r.nlDashBoundary[1:]
+ }
+ return bytes.Equal(rest, r.nl)
+}
+
+// skipLWSPChar returns b with leading spaces and tabs removed.
+// RFC 822 defines:
+//
+// LWSP-char = SPACE / HTAB
+func skipLWSPChar(b []byte) []byte {
+ for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') {
+ b = b[1:]
+ }
+ return b
+}
diff --git a/contrib/go/_std_1.18/src/mime/multipart/writer.go b/contrib/go/_std_1.19/src/mime/multipart/writer.go
index d1ff151a7d..d1ff151a7d 100644
--- a/contrib/go/_std_1.18/src/mime/multipart/writer.go
+++ b/contrib/go/_std_1.19/src/mime/multipart/writer.go
diff --git a/contrib/go/_std_1.18/src/mime/quotedprintable/reader.go b/contrib/go/_std_1.19/src/mime/quotedprintable/reader.go
index 4239625402..4239625402 100644
--- a/contrib/go/_std_1.18/src/mime/quotedprintable/reader.go
+++ b/contrib/go/_std_1.19/src/mime/quotedprintable/reader.go
diff --git a/contrib/go/_std_1.18/src/mime/quotedprintable/writer.go b/contrib/go/_std_1.19/src/mime/quotedprintable/writer.go
index 16ea0bf7d6..16ea0bf7d6 100644
--- a/contrib/go/_std_1.18/src/mime/quotedprintable/writer.go
+++ b/contrib/go/_std_1.19/src/mime/quotedprintable/writer.go
diff --git a/contrib/go/_std_1.19/src/mime/type.go b/contrib/go/_std_1.19/src/mime/type.go
new file mode 100644
index 0000000000..465ecf0d59
--- /dev/null
+++ b/contrib/go/_std_1.19/src/mime/type.go
@@ -0,0 +1,202 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mime implements parts of the MIME spec.
+package mime
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ mimeTypes sync.Map // map[string]string; ".Z" => "application/x-compress"
+ mimeTypesLower sync.Map // map[string]string; ".z" => "application/x-compress"
+
+ // extensions maps from MIME type to list of lowercase file
+ // extensions: "image/jpeg" => [".jpg", ".jpeg"]
+ extensionsMu sync.Mutex // Guards stores (but not loads) on extensions.
+ extensions sync.Map // map[string][]string; slice values are append-only.
+)
+
+func clearSyncMap(m *sync.Map) {
+ m.Range(func(k, _ any) bool {
+ m.Delete(k)
+ return true
+ })
+}
+
+// setMimeTypes is used by initMime's non-test path, and by tests.
+func setMimeTypes(lowerExt, mixExt map[string]string) {
+ clearSyncMap(&mimeTypes)
+ clearSyncMap(&mimeTypesLower)
+ clearSyncMap(&extensions)
+
+ for k, v := range lowerExt {
+ mimeTypesLower.Store(k, v)
+ }
+ for k, v := range mixExt {
+ mimeTypes.Store(k, v)
+ }
+
+ extensionsMu.Lock()
+ defer extensionsMu.Unlock()
+ for k, v := range lowerExt {
+ justType, _, err := ParseMediaType(v)
+ if err != nil {
+ panic(err)
+ }
+ var exts []string
+ if ei, ok := extensions.Load(justType); ok {
+ exts = ei.([]string)
+ }
+ extensions.Store(justType, append(exts, k))
+ }
+}
+
+var builtinTypesLower = map[string]string{
+ ".avif": "image/avif",
+ ".css": "text/css; charset=utf-8",
+ ".gif": "image/gif",
+ ".htm": "text/html; charset=utf-8",
+ ".html": "text/html; charset=utf-8",
+ ".jpeg": "image/jpeg",
+ ".jpg": "image/jpeg",
+ ".js": "text/javascript; charset=utf-8",
+ ".json": "application/json",
+ ".mjs": "text/javascript; charset=utf-8",
+ ".pdf": "application/pdf",
+ ".png": "image/png",
+ ".svg": "image/svg+xml",
+ ".wasm": "application/wasm",
+ ".webp": "image/webp",
+ ".xml": "text/xml; charset=utf-8",
+}
+
+var once sync.Once // guards initMime
+
+var testInitMime, osInitMime func()
+
+func initMime() {
+ if fn := testInitMime; fn != nil {
+ fn()
+ } else {
+ setMimeTypes(builtinTypesLower, builtinTypesLower)
+ osInitMime()
+ }
+}
+
+// TypeByExtension returns the MIME type associated with the file extension ext.
+// The extension ext should begin with a leading dot, as in ".html".
+// When ext has no associated type, TypeByExtension returns "".
+//
+// Extensions are looked up first case-sensitively, then case-insensitively.
+//
+// The built-in table is small but on unix it is augmented by the local
+// system's MIME-info database or mime.types file(s) if available under one or
+// more of these names:
+//
+// /usr/local/share/mime/globs2
+// /usr/share/mime/globs2
+// /etc/mime.types
+// /etc/apache2/mime.types
+// /etc/apache/mime.types
+//
+// On Windows, MIME types are extracted from the registry.
+//
+// Text types have the charset parameter set to "utf-8" by default.
+func TypeByExtension(ext string) string {
+ once.Do(initMime)
+
+ // Case-sensitive lookup.
+ if v, ok := mimeTypes.Load(ext); ok {
+ return v.(string)
+ }
+
+ // Case-insensitive lookup.
+ // Optimistically assume a short ASCII extension and be
+ // allocation-free in that case.
+ var buf [10]byte
+ lower := buf[:0]
+ const utf8RuneSelf = 0x80 // from utf8 package, but not importing it.
+ for i := 0; i < len(ext); i++ {
+ c := ext[i]
+ if c >= utf8RuneSelf {
+ // Slow path.
+ si, _ := mimeTypesLower.Load(strings.ToLower(ext))
+ s, _ := si.(string)
+ return s
+ }
+ if 'A' <= c && c <= 'Z' {
+ lower = append(lower, c+('a'-'A'))
+ } else {
+ lower = append(lower, c)
+ }
+ }
+ si, _ := mimeTypesLower.Load(string(lower))
+ s, _ := si.(string)
+ return s
+}
+
+// ExtensionsByType returns the extensions known to be associated with the MIME
+// type typ. The returned extensions will each begin with a leading dot, as in
+// ".html". When typ has no associated extensions, ExtensionsByType returns an
+// nil slice.
+func ExtensionsByType(typ string) ([]string, error) {
+ justType, _, err := ParseMediaType(typ)
+ if err != nil {
+ return nil, err
+ }
+
+ once.Do(initMime)
+ s, ok := extensions.Load(justType)
+ if !ok {
+ return nil, nil
+ }
+ ret := append([]string(nil), s.([]string)...)
+ sort.Strings(ret)
+ return ret, nil
+}
+
+// AddExtensionType sets the MIME type associated with
+// the extension ext to typ. The extension should begin with
+// a leading dot, as in ".html".
+func AddExtensionType(ext, typ string) error {
+ if !strings.HasPrefix(ext, ".") {
+ return fmt.Errorf("mime: extension %q missing leading dot", ext)
+ }
+ once.Do(initMime)
+ return setExtensionType(ext, typ)
+}
+
+func setExtensionType(extension, mimeType string) error {
+ justType, param, err := ParseMediaType(mimeType)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
+ param["charset"] = "utf-8"
+ mimeType = FormatMediaType(mimeType, param)
+ }
+ extLower := strings.ToLower(extension)
+
+ mimeTypes.Store(extension, mimeType)
+ mimeTypesLower.Store(extLower, mimeType)
+
+ extensionsMu.Lock()
+ defer extensionsMu.Unlock()
+ var exts []string
+ if ei, ok := extensions.Load(justType); ok {
+ exts = ei.([]string)
+ }
+ for _, v := range exts {
+ if v == extLower {
+ return nil
+ }
+ }
+ extensions.Store(justType, append(exts, extLower))
+ return nil
+}
diff --git a/contrib/go/_std_1.19/src/mime/type_unix.go b/contrib/go/_std_1.19/src/mime/type_unix.go
new file mode 100644
index 0000000000..649d9001e3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/mime/type_unix.go
@@ -0,0 +1,126 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package mime
+
+import (
+ "bufio"
+ "os"
+ "strings"
+)
+
+func init() {
+ osInitMime = initMimeUnix
+}
+
+// See https://specifications.freedesktop.org/shared-mime-info-spec/shared-mime-info-spec-0.21.html
+// for the FreeDesktop Shared MIME-info Database specification.
+var mimeGlobs = []string{
+ "/usr/local/share/mime/globs2",
+ "/usr/share/mime/globs2",
+}
+
+// Common locations for mime.types files on unix.
+var typeFiles = []string{
+ "/etc/mime.types",
+ "/etc/apache2/mime.types",
+ "/etc/apache/mime.types",
+ "/etc/httpd/conf/mime.types",
+}
+
+func loadMimeGlobsFile(filename string) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ // Each line should be of format: weight:mimetype:glob[:morefields...]
+ fields := strings.Split(scanner.Text(), ":")
+ if len(fields) < 3 || len(fields[0]) < 1 || len(fields[2]) < 3 {
+ continue
+ } else if fields[0][0] == '#' || fields[2][0] != '*' || fields[2][1] != '.' {
+ continue
+ }
+
+ extension := fields[2][1:]
+ if strings.ContainsAny(extension, "?*[") {
+ // Not a bare extension, but a glob. Ignore for now:
+ // - we do not have an implementation for this glob
+ // syntax (translation to path/filepath.Match could
+ // be possible)
+ // - support for globs with weight ordering would have
+ // performance impact to all lookups to support the
+ // rarely seen glob entries
+ // - trying to match glob metacharacters literally is
+ // not useful
+ continue
+ }
+ if _, ok := mimeTypes.Load(extension); ok {
+ // We've already seen this extension.
+ // The file is in weight order, so we keep
+ // the first entry that we see.
+ continue
+ }
+
+ setExtensionType(extension, fields[1])
+ }
+ if err := scanner.Err(); err != nil {
+ panic(err)
+ }
+ return nil
+}
+
+func loadMimeFile(filename string) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) <= 1 || fields[0][0] == '#' {
+ continue
+ }
+ mimeType := fields[0]
+ for _, ext := range fields[1:] {
+ if ext[0] == '#' {
+ break
+ }
+ setExtensionType("."+ext, mimeType)
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func initMimeUnix() {
+ for _, filename := range mimeGlobs {
+ if err := loadMimeGlobsFile(filename); err == nil {
+ return // Stop checking more files if mimetype database is found.
+ }
+ }
+
+ // Fallback if no system-generated mimetype database exists.
+ for _, filename := range typeFiles {
+ loadMimeFile(filename)
+ }
+}
+
+func initMimeForTests() map[string]string {
+ mimeGlobs = []string{""}
+ typeFiles = []string{"testdata/test.types"}
+ return map[string]string{
+ ".T1": "application/test",
+ ".t2": "text/test; charset=utf-8",
+ ".png": "image/png",
+ }
+}
diff --git a/contrib/go/_std_1.19/src/net/addrselect.go b/contrib/go/_std_1.19/src/net/addrselect.go
new file mode 100644
index 0000000000..59380b9486
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/addrselect.go
@@ -0,0 +1,388 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Minimal RFC 6724 address selection.
+
+package net
+
+import "sort"
+
+func sortByRFC6724(addrs []IPAddr) {
+ if len(addrs) < 2 {
+ return
+ }
+ sortByRFC6724withSrcs(addrs, srcAddrs(addrs))
+}
+
+func sortByRFC6724withSrcs(addrs []IPAddr, srcs []IP) {
+ if len(addrs) != len(srcs) {
+ panic("internal error")
+ }
+ addrAttr := make([]ipAttr, len(addrs))
+ srcAttr := make([]ipAttr, len(srcs))
+ for i, v := range addrs {
+ addrAttr[i] = ipAttrOf(v.IP)
+ srcAttr[i] = ipAttrOf(srcs[i])
+ }
+ sort.Stable(&byRFC6724{
+ addrs: addrs,
+ addrAttr: addrAttr,
+ srcs: srcs,
+ srcAttr: srcAttr,
+ })
+}
+
+// srcsAddrs tries to UDP-connect to each address to see if it has a
+// route. (This doesn't send any packets). The destination port
+// number is irrelevant.
+func srcAddrs(addrs []IPAddr) []IP {
+ srcs := make([]IP, len(addrs))
+ dst := UDPAddr{Port: 9}
+ for i := range addrs {
+ dst.IP = addrs[i].IP
+ dst.Zone = addrs[i].Zone
+ c, err := DialUDP("udp", nil, &dst)
+ if err == nil {
+ if src, ok := c.LocalAddr().(*UDPAddr); ok {
+ srcs[i] = src.IP
+ }
+ c.Close()
+ }
+ }
+ return srcs
+}
+
+type ipAttr struct {
+ Scope scope
+ Precedence uint8
+ Label uint8
+}
+
+func ipAttrOf(ip IP) ipAttr {
+ if ip == nil {
+ return ipAttr{}
+ }
+ match := rfc6724policyTable.Classify(ip)
+ return ipAttr{
+ Scope: classifyScope(ip),
+ Precedence: match.Precedence,
+ Label: match.Label,
+ }
+}
+
+type byRFC6724 struct {
+ addrs []IPAddr // addrs to sort
+ addrAttr []ipAttr
+ srcs []IP // or nil if unreachable
+ srcAttr []ipAttr
+}
+
+func (s *byRFC6724) Len() int { return len(s.addrs) }
+
+func (s *byRFC6724) Swap(i, j int) {
+ s.addrs[i], s.addrs[j] = s.addrs[j], s.addrs[i]
+ s.srcs[i], s.srcs[j] = s.srcs[j], s.srcs[i]
+ s.addrAttr[i], s.addrAttr[j] = s.addrAttr[j], s.addrAttr[i]
+ s.srcAttr[i], s.srcAttr[j] = s.srcAttr[j], s.srcAttr[i]
+}
+
+// Less reports whether i is a better destination address for this
+// host than j.
+//
+// The algorithm and variable names comes from RFC 6724 section 6.
+func (s *byRFC6724) Less(i, j int) bool {
+ DA := s.addrs[i].IP
+ DB := s.addrs[j].IP
+ SourceDA := s.srcs[i]
+ SourceDB := s.srcs[j]
+ attrDA := &s.addrAttr[i]
+ attrDB := &s.addrAttr[j]
+ attrSourceDA := &s.srcAttr[i]
+ attrSourceDB := &s.srcAttr[j]
+
+ const preferDA = true
+ const preferDB = false
+
+ // Rule 1: Avoid unusable destinations.
+ // If DB is known to be unreachable or if Source(DB) is undefined, then
+ // prefer DA. Similarly, if DA is known to be unreachable or if
+ // Source(DA) is undefined, then prefer DB.
+ if SourceDA == nil && SourceDB == nil {
+ return false // "equal"
+ }
+ if SourceDB == nil {
+ return preferDA
+ }
+ if SourceDA == nil {
+ return preferDB
+ }
+
+ // Rule 2: Prefer matching scope.
+ // If Scope(DA) = Scope(Source(DA)) and Scope(DB) <> Scope(Source(DB)),
+ // then prefer DA. Similarly, if Scope(DA) <> Scope(Source(DA)) and
+ // Scope(DB) = Scope(Source(DB)), then prefer DB.
+ if attrDA.Scope == attrSourceDA.Scope && attrDB.Scope != attrSourceDB.Scope {
+ return preferDA
+ }
+ if attrDA.Scope != attrSourceDA.Scope && attrDB.Scope == attrSourceDB.Scope {
+ return preferDB
+ }
+
+ // Rule 3: Avoid deprecated addresses.
+ // If Source(DA) is deprecated and Source(DB) is not, then prefer DB.
+ // Similarly, if Source(DA) is not deprecated and Source(DB) is
+ // deprecated, then prefer DA.
+
+ // TODO(bradfitz): implement? low priority for now.
+
+ // Rule 4: Prefer home addresses.
+ // If Source(DA) is simultaneously a home address and care-of address
+ // and Source(DB) is not, then prefer DA. Similarly, if Source(DB) is
+ // simultaneously a home address and care-of address and Source(DA) is
+ // not, then prefer DB.
+
+ // TODO(bradfitz): implement? low priority for now.
+
+ // Rule 5: Prefer matching label.
+ // If Label(Source(DA)) = Label(DA) and Label(Source(DB)) <> Label(DB),
+ // then prefer DA. Similarly, if Label(Source(DA)) <> Label(DA) and
+ // Label(Source(DB)) = Label(DB), then prefer DB.
+ if attrSourceDA.Label == attrDA.Label &&
+ attrSourceDB.Label != attrDB.Label {
+ return preferDA
+ }
+ if attrSourceDA.Label != attrDA.Label &&
+ attrSourceDB.Label == attrDB.Label {
+ return preferDB
+ }
+
+ // Rule 6: Prefer higher precedence.
+ // If Precedence(DA) > Precedence(DB), then prefer DA. Similarly, if
+ // Precedence(DA) < Precedence(DB), then prefer DB.
+ if attrDA.Precedence > attrDB.Precedence {
+ return preferDA
+ }
+ if attrDA.Precedence < attrDB.Precedence {
+ return preferDB
+ }
+
+ // Rule 7: Prefer native transport.
+ // If DA is reached via an encapsulating transition mechanism (e.g.,
+ // IPv6 in IPv4) and DB is not, then prefer DB. Similarly, if DB is
+ // reached via encapsulation and DA is not, then prefer DA.
+
+ // TODO(bradfitz): implement? low priority for now.
+
+ // Rule 8: Prefer smaller scope.
+ // If Scope(DA) < Scope(DB), then prefer DA. Similarly, if Scope(DA) >
+ // Scope(DB), then prefer DB.
+ if attrDA.Scope < attrDB.Scope {
+ return preferDA
+ }
+ if attrDA.Scope > attrDB.Scope {
+ return preferDB
+ }
+
+ // Rule 9: Use longest matching prefix.
+ // When DA and DB belong to the same address family (both are IPv6 or
+ // both are IPv4 [but see below]): If CommonPrefixLen(Source(DA), DA) >
+ // CommonPrefixLen(Source(DB), DB), then prefer DA. Similarly, if
+ // CommonPrefixLen(Source(DA), DA) < CommonPrefixLen(Source(DB), DB),
+ // then prefer DB.
+ //
+ // However, applying this rule to IPv4 addresses causes
+ // problems (see issues 13283 and 18518), so limit to IPv6.
+ if DA.To4() == nil && DB.To4() == nil {
+ commonA := commonPrefixLen(SourceDA, DA)
+ commonB := commonPrefixLen(SourceDB, DB)
+
+ if commonA > commonB {
+ return preferDA
+ }
+ if commonA < commonB {
+ return preferDB
+ }
+ }
+
+ // Rule 10: Otherwise, leave the order unchanged.
+ // If DA preceded DB in the original list, prefer DA.
+ // Otherwise, prefer DB.
+ return false // "equal"
+}
+
+type policyTableEntry struct {
+ Prefix *IPNet
+ Precedence uint8
+ Label uint8
+}
+
+type policyTable []policyTableEntry
+
+// RFC 6724 section 2.1.
+var rfc6724policyTable = policyTable{
+ {
+ Prefix: mustCIDR("::1/128"),
+ Precedence: 50,
+ Label: 0,
+ },
+ {
+ Prefix: mustCIDR("::/0"),
+ Precedence: 40,
+ Label: 1,
+ },
+ {
+ // IPv4-compatible, etc.
+ Prefix: mustCIDR("::ffff:0:0/96"),
+ Precedence: 35,
+ Label: 4,
+ },
+ {
+ // 6to4
+ Prefix: mustCIDR("2002::/16"),
+ Precedence: 30,
+ Label: 2,
+ },
+ {
+ // Teredo
+ Prefix: mustCIDR("2001::/32"),
+ Precedence: 5,
+ Label: 5,
+ },
+ {
+ Prefix: mustCIDR("fc00::/7"),
+ Precedence: 3,
+ Label: 13,
+ },
+ {
+ Prefix: mustCIDR("::/96"),
+ Precedence: 1,
+ Label: 3,
+ },
+ {
+ Prefix: mustCIDR("fec0::/10"),
+ Precedence: 1,
+ Label: 11,
+ },
+ {
+ Prefix: mustCIDR("3ffe::/16"),
+ Precedence: 1,
+ Label: 12,
+ },
+}
+
+func init() {
+ sort.Sort(sort.Reverse(byMaskLength(rfc6724policyTable)))
+}
+
+// byMaskLength sorts policyTableEntry by the size of their Prefix.Mask.Size,
+// from smallest mask, to largest.
+type byMaskLength []policyTableEntry
+
+func (s byMaskLength) Len() int { return len(s) }
+func (s byMaskLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byMaskLength) Less(i, j int) bool {
+ isize, _ := s[i].Prefix.Mask.Size()
+ jsize, _ := s[j].Prefix.Mask.Size()
+ return isize < jsize
+}
+
+// mustCIDR calls ParseCIDR and panics on any error, or if the network
+// is not IPv6.
+func mustCIDR(s string) *IPNet {
+ ip, ipNet, err := ParseCIDR(s)
+ if err != nil {
+ panic(err.Error())
+ }
+ if len(ip) != IPv6len {
+ panic("unexpected IP length")
+ }
+ return ipNet
+}
+
+// Classify returns the policyTableEntry of the entry with the longest
+// matching prefix that contains ip.
+// The table t must be sorted from largest mask size to smallest.
+func (t policyTable) Classify(ip IP) policyTableEntry {
+ for _, ent := range t {
+ if ent.Prefix.Contains(ip) {
+ return ent
+ }
+ }
+ return policyTableEntry{}
+}
+
+// RFC 6724 section 3.1.
+type scope uint8
+
+const (
+ scopeInterfaceLocal scope = 0x1
+ scopeLinkLocal scope = 0x2
+ scopeAdminLocal scope = 0x4
+ scopeSiteLocal scope = 0x5
+ scopeOrgLocal scope = 0x8
+ scopeGlobal scope = 0xe
+)
+
+func classifyScope(ip IP) scope {
+ if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
+ return scopeLinkLocal
+ }
+ ipv6 := len(ip) == IPv6len && ip.To4() == nil
+ if ipv6 && ip.IsMulticast() {
+ return scope(ip[1] & 0xf)
+ }
+ // Site-local addresses are defined in RFC 3513 section 2.5.6
+ // (and deprecated in RFC 3879).
+ if ipv6 && ip[0] == 0xfe && ip[1]&0xc0 == 0xc0 {
+ return scopeSiteLocal
+ }
+ return scopeGlobal
+}
+
+// commonPrefixLen reports the length of the longest prefix (looking
+// at the most significant, or leftmost, bits) that the
+// two addresses have in common, up to the length of a's prefix (i.e.,
+// the portion of the address not including the interface ID).
+//
+// If a or b is an IPv4 address as an IPv6 address, the IPv4 addresses
+// are compared (with max common prefix length of 32).
+// If a and b are different IP versions, 0 is returned.
+//
+// See https://tools.ietf.org/html/rfc6724#section-2.2
+func commonPrefixLen(a, b IP) (cpl int) {
+ if a4 := a.To4(); a4 != nil {
+ a = a4
+ }
+ if b4 := b.To4(); b4 != nil {
+ b = b4
+ }
+ if len(a) != len(b) {
+ return 0
+ }
+ // If IPv6, only up to the prefix (first 64 bits)
+ if len(a) > 8 {
+ a = a[:8]
+ b = b[:8]
+ }
+ for len(a) > 0 {
+ if a[0] == b[0] {
+ cpl += 8
+ a = a[1:]
+ b = b[1:]
+ continue
+ }
+ bits := 8
+ ab, bb := a[0], b[0]
+ for {
+ ab >>= 1
+ bb >>= 1
+ bits--
+ if ab == bb {
+ cpl += bits
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/contrib/go/_std_1.18/src/net/cgo_bsd.go b/contrib/go/_std_1.19/src/net/cgo_bsd.go
index 1456289b06..1456289b06 100644
--- a/contrib/go/_std_1.18/src/net/cgo_bsd.go
+++ b/contrib/go/_std_1.19/src/net/cgo_bsd.go
diff --git a/contrib/go/_std_1.18/src/net/cgo_linux.go b/contrib/go/_std_1.19/src/net/cgo_linux.go
index de6e87f176..de6e87f176 100644
--- a/contrib/go/_std_1.18/src/net/cgo_linux.go
+++ b/contrib/go/_std_1.19/src/net/cgo_linux.go
diff --git a/contrib/go/_std_1.18/src/net/cgo_resnew.go b/contrib/go/_std_1.19/src/net/cgo_resnew.go
index fa6e68770c..fa6e68770c 100644
--- a/contrib/go/_std_1.18/src/net/cgo_resnew.go
+++ b/contrib/go/_std_1.19/src/net/cgo_resnew.go
diff --git a/contrib/go/_std_1.18/src/net/cgo_socknew.go b/contrib/go/_std_1.19/src/net/cgo_socknew.go
index fbb9e10f34..fbb9e10f34 100644
--- a/contrib/go/_std_1.18/src/net/cgo_socknew.go
+++ b/contrib/go/_std_1.19/src/net/cgo_socknew.go
diff --git a/contrib/go/_std_1.18/src/net/cgo_sockold.go b/contrib/go/_std_1.19/src/net/cgo_sockold.go
index 4d9869de04..4d9869de04 100644
--- a/contrib/go/_std_1.18/src/net/cgo_sockold.go
+++ b/contrib/go/_std_1.19/src/net/cgo_sockold.go
diff --git a/contrib/go/_std_1.19/src/net/cgo_unix.go b/contrib/go/_std_1.19/src/net/cgo_unix.go
new file mode 100644
index 0000000000..71d90560ac
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/cgo_unix.go
@@ -0,0 +1,348 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && !netgo && unix
+
+package net
+
+/*
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netdb.h>
+#include <unistd.h>
+#include <string.h>
+
+// If nothing else defined EAI_OVERFLOW, make sure it has a value.
+#ifndef EAI_OVERFLOW
+#define EAI_OVERFLOW -12
+#endif
+*/
+import "C"
+
+import (
+ "context"
+ "syscall"
+ "unsafe"
+)
+
+// An addrinfoErrno represents a getaddrinfo, getnameinfo-specific
+// error number. It's a signed number and a zero value is a non-error
+// by convention.
+type addrinfoErrno int
+
+func (eai addrinfoErrno) Error() string { return C.GoString(C.gai_strerror(C.int(eai))) }
+func (eai addrinfoErrno) Temporary() bool { return eai == C.EAI_AGAIN }
+func (eai addrinfoErrno) Timeout() bool { return false }
+
+type portLookupResult struct {
+ port int
+ err error
+}
+
+type ipLookupResult struct {
+ addrs []IPAddr
+ cname string
+ err error
+}
+
+type reverseLookupResult struct {
+ names []string
+ err error
+}
+
+func cgoLookupHost(ctx context.Context, name string) (hosts []string, err error, completed bool) {
+ addrs, err, completed := cgoLookupIP(ctx, "ip", name)
+ for _, addr := range addrs {
+ hosts = append(hosts, addr.String())
+ }
+ return
+}
+
+func cgoLookupPort(ctx context.Context, network, service string) (port int, err error, completed bool) {
+ var hints C.struct_addrinfo
+ switch network {
+ case "": // no hints
+ case "tcp", "tcp4", "tcp6":
+ hints.ai_socktype = C.SOCK_STREAM
+ hints.ai_protocol = C.IPPROTO_TCP
+ case "udp", "udp4", "udp6":
+ hints.ai_socktype = C.SOCK_DGRAM
+ hints.ai_protocol = C.IPPROTO_UDP
+ default:
+ return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}, true
+ }
+ switch ipVersion(network) {
+ case '4':
+ hints.ai_family = C.AF_INET
+ case '6':
+ hints.ai_family = C.AF_INET6
+ }
+ if ctx.Done() == nil {
+ port, err := cgoLookupServicePort(&hints, network, service)
+ return port, err, true
+ }
+ result := make(chan portLookupResult, 1)
+ go cgoPortLookup(result, &hints, network, service)
+ select {
+ case r := <-result:
+ return r.port, r.err, true
+ case <-ctx.Done():
+ // Since there isn't a portable way to cancel the lookup,
+ // we just let it finish and write to the buffered channel.
+ return 0, mapErr(ctx.Err()), false
+ }
+}
+
+func cgoLookupServicePort(hints *C.struct_addrinfo, network, service string) (port int, err error) {
+ cservice := make([]byte, len(service)+1)
+ copy(cservice, service)
+ // Lowercase the C service name.
+ for i, b := range cservice[:len(service)] {
+ cservice[i] = lowerASCII(b)
+ }
+ var res *C.struct_addrinfo
+ gerrno, err := C.getaddrinfo(nil, (*C.char)(unsafe.Pointer(&cservice[0])), hints, &res)
+ if gerrno != 0 {
+ isTemporary := false
+ switch gerrno {
+ case C.EAI_SYSTEM:
+ if err == nil { // see golang.org/issue/6232
+ err = syscall.EMFILE
+ }
+ default:
+ err = addrinfoErrno(gerrno)
+ isTemporary = addrinfoErrno(gerrno).Temporary()
+ }
+ return 0, &DNSError{Err: err.Error(), Name: network + "/" + service, IsTemporary: isTemporary}
+ }
+ defer C.freeaddrinfo(res)
+
+ for r := res; r != nil; r = r.ai_next {
+ switch r.ai_family {
+ case C.AF_INET:
+ sa := (*syscall.RawSockaddrInet4)(unsafe.Pointer(r.ai_addr))
+ p := (*[2]byte)(unsafe.Pointer(&sa.Port))
+ return int(p[0])<<8 | int(p[1]), nil
+ case C.AF_INET6:
+ sa := (*syscall.RawSockaddrInet6)(unsafe.Pointer(r.ai_addr))
+ p := (*[2]byte)(unsafe.Pointer(&sa.Port))
+ return int(p[0])<<8 | int(p[1]), nil
+ }
+ }
+ return 0, &DNSError{Err: "unknown port", Name: network + "/" + service}
+}
+
+func cgoPortLookup(result chan<- portLookupResult, hints *C.struct_addrinfo, network, service string) {
+ port, err := cgoLookupServicePort(hints, network, service)
+ result <- portLookupResult{port, err}
+}
+
+func cgoLookupIPCNAME(network, name string) (addrs []IPAddr, cname string, err error) {
+ acquireThread()
+ defer releaseThread()
+
+ var hints C.struct_addrinfo
+ hints.ai_flags = cgoAddrInfoFlags
+ hints.ai_socktype = C.SOCK_STREAM
+ hints.ai_family = C.AF_UNSPEC
+ switch ipVersion(network) {
+ case '4':
+ hints.ai_family = C.AF_INET
+ case '6':
+ hints.ai_family = C.AF_INET6
+ }
+
+ h := make([]byte, len(name)+1)
+ copy(h, name)
+ var res *C.struct_addrinfo
+ gerrno, err := C.getaddrinfo((*C.char)(unsafe.Pointer(&h[0])), nil, &hints, &res)
+ if gerrno != 0 {
+ isErrorNoSuchHost := false
+ isTemporary := false
+ switch gerrno {
+ case C.EAI_SYSTEM:
+ if err == nil {
+ // err should not be nil, but sometimes getaddrinfo returns
+ // gerrno == C.EAI_SYSTEM with err == nil on Linux.
+ // The report claims that it happens when we have too many
+ // open files, so use syscall.EMFILE (too many open files in system).
+ // Most system calls would return ENFILE (too many open files),
+ // so at the least EMFILE should be easy to recognize if this
+ // comes up again. golang.org/issue/6232.
+ err = syscall.EMFILE
+ }
+ case C.EAI_NONAME:
+ err = errNoSuchHost
+ isErrorNoSuchHost = true
+ default:
+ err = addrinfoErrno(gerrno)
+ isTemporary = addrinfoErrno(gerrno).Temporary()
+ }
+
+ return nil, "", &DNSError{Err: err.Error(), Name: name, IsNotFound: isErrorNoSuchHost, IsTemporary: isTemporary}
+ }
+ defer C.freeaddrinfo(res)
+
+ if res != nil {
+ cname = C.GoString(res.ai_canonname)
+ if cname == "" {
+ cname = name
+ }
+ if len(cname) > 0 && cname[len(cname)-1] != '.' {
+ cname += "."
+ }
+ }
+ for r := res; r != nil; r = r.ai_next {
+ // We only asked for SOCK_STREAM, but check anyhow.
+ if r.ai_socktype != C.SOCK_STREAM {
+ continue
+ }
+ switch r.ai_family {
+ case C.AF_INET:
+ sa := (*syscall.RawSockaddrInet4)(unsafe.Pointer(r.ai_addr))
+ addr := IPAddr{IP: copyIP(sa.Addr[:])}
+ addrs = append(addrs, addr)
+ case C.AF_INET6:
+ sa := (*syscall.RawSockaddrInet6)(unsafe.Pointer(r.ai_addr))
+ addr := IPAddr{IP: copyIP(sa.Addr[:]), Zone: zoneCache.name(int(sa.Scope_id))}
+ addrs = append(addrs, addr)
+ }
+ }
+ return addrs, cname, nil
+}
+
+func cgoIPLookup(result chan<- ipLookupResult, network, name string) {
+ addrs, cname, err := cgoLookupIPCNAME(network, name)
+ result <- ipLookupResult{addrs, cname, err}
+}
+
+func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error, completed bool) {
+ if ctx.Done() == nil {
+ addrs, _, err = cgoLookupIPCNAME(network, name)
+ return addrs, err, true
+ }
+ result := make(chan ipLookupResult, 1)
+ go cgoIPLookup(result, network, name)
+ select {
+ case r := <-result:
+ return r.addrs, r.err, true
+ case <-ctx.Done():
+ return nil, mapErr(ctx.Err()), false
+ }
+}
+
+func cgoLookupCNAME(ctx context.Context, name string) (cname string, err error, completed bool) {
+ if ctx.Done() == nil {
+ _, cname, err = cgoLookupIPCNAME("ip", name)
+ return cname, err, true
+ }
+ result := make(chan ipLookupResult, 1)
+ go cgoIPLookup(result, "ip", name)
+ select {
+ case r := <-result:
+ return r.cname, r.err, true
+ case <-ctx.Done():
+ return "", mapErr(ctx.Err()), false
+ }
+}
+
+// These are roughly enough for the following:
+//
+// Source Encoding Maximum length of single name entry
+// Unicast DNS ASCII or <=253 + a NUL terminator
+// Unicode in RFC 5892 252 * total number of labels + delimiters + a NUL terminator
+// Multicast DNS UTF-8 in RFC 5198 or <=253 + a NUL terminator
+// the same as unicast DNS ASCII <=253 + a NUL terminator
+// Local database various depends on implementation
+const (
+ nameinfoLen = 64
+ maxNameinfoLen = 4096
+)
+
+func cgoLookupPTR(ctx context.Context, addr string) (names []string, err error, completed bool) {
+ var zone string
+ ip := parseIPv4(addr)
+ if ip == nil {
+ ip, zone = parseIPv6Zone(addr)
+ }
+ if ip == nil {
+ return nil, &DNSError{Err: "invalid address", Name: addr}, true
+ }
+ sa, salen := cgoSockaddr(ip, zone)
+ if sa == nil {
+ return nil, &DNSError{Err: "invalid address " + ip.String(), Name: addr}, true
+ }
+ if ctx.Done() == nil {
+ names, err := cgoLookupAddrPTR(addr, sa, salen)
+ return names, err, true
+ }
+ result := make(chan reverseLookupResult, 1)
+ go cgoReverseLookup(result, addr, sa, salen)
+ select {
+ case r := <-result:
+ return r.names, r.err, true
+ case <-ctx.Done():
+ return nil, mapErr(ctx.Err()), false
+ }
+}
+
+func cgoLookupAddrPTR(addr string, sa *C.struct_sockaddr, salen C.socklen_t) (names []string, err error) {
+ acquireThread()
+ defer releaseThread()
+
+ var gerrno int
+ var b []byte
+ for l := nameinfoLen; l <= maxNameinfoLen; l *= 2 {
+ b = make([]byte, l)
+ gerrno, err = cgoNameinfoPTR(b, sa, salen)
+ if gerrno == 0 || gerrno != C.EAI_OVERFLOW {
+ break
+ }
+ }
+ if gerrno != 0 {
+ isTemporary := false
+ switch gerrno {
+ case C.EAI_SYSTEM:
+ if err == nil { // see golang.org/issue/6232
+ err = syscall.EMFILE
+ }
+ default:
+ err = addrinfoErrno(gerrno)
+ isTemporary = addrinfoErrno(gerrno).Temporary()
+ }
+ return nil, &DNSError{Err: err.Error(), Name: addr, IsTemporary: isTemporary}
+ }
+ for i := 0; i < len(b); i++ {
+ if b[i] == 0 {
+ b = b[:i]
+ break
+ }
+ }
+ return []string{absDomainName(string(b))}, nil
+}
+
+func cgoReverseLookup(result chan<- reverseLookupResult, addr string, sa *C.struct_sockaddr, salen C.socklen_t) {
+ names, err := cgoLookupAddrPTR(addr, sa, salen)
+ result <- reverseLookupResult{names, err}
+}
+
+func cgoSockaddr(ip IP, zone string) (*C.struct_sockaddr, C.socklen_t) {
+ if ip4 := ip.To4(); ip4 != nil {
+ return cgoSockaddrInet4(ip4), C.socklen_t(syscall.SizeofSockaddrInet4)
+ }
+ if ip6 := ip.To16(); ip6 != nil {
+ return cgoSockaddrInet6(ip6, zoneCache.index(zone)), C.socklen_t(syscall.SizeofSockaddrInet6)
+ }
+ return nil, 0
+}
+
+func copyIP(x IP) IP {
+ if len(x) < 16 {
+ return x.To16()
+ }
+ y := make(IP, len(x))
+ copy(y, x)
+ return y
+}
diff --git a/contrib/go/_std_1.19/src/net/conf.go b/contrib/go/_std_1.19/src/net/conf.go
new file mode 100644
index 0000000000..b08bbc7d7a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/conf.go
@@ -0,0 +1,352 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js
+
+package net
+
+import (
+ "internal/bytealg"
+ "internal/godebug"
+ "os"
+ "runtime"
+ "sync"
+ "syscall"
+)
+
+// conf represents a system's network configuration.
+type conf struct {
+ // forceCgoLookupHost forces CGO to always be used, if available.
+ forceCgoLookupHost bool
+
+ netGo bool // go DNS resolution forced
+ netCgo bool // non-go DNS resolution forced (cgo, or win32)
+
+ // machine has an /etc/mdns.allow file
+ hasMDNSAllow bool
+
+ goos string // the runtime.GOOS, to ease testing
+ dnsDebugLevel int
+
+ nss *nssConf
+ resolv *dnsConfig
+}
+
+var (
+ confOnce sync.Once // guards init of confVal via initConfVal
+ confVal = &conf{goos: runtime.GOOS}
+)
+
+// systemConf returns the machine's network configuration.
+func systemConf() *conf {
+ confOnce.Do(initConfVal)
+ return confVal
+}
+
+func initConfVal() {
+ dnsMode, debugLevel := goDebugNetDNS()
+ confVal.dnsDebugLevel = debugLevel
+ confVal.netGo = netGo || dnsMode == "go"
+ confVal.netCgo = netCgo || dnsMode == "cgo"
+ if !confVal.netGo && !confVal.netCgo && (runtime.GOOS == "windows" || runtime.GOOS == "plan9") {
+ // Neither of these platforms actually use cgo.
+ //
+ // The meaning of "cgo" mode in the net package is
+ // really "the native OS way", which for libc meant
+ // cgo on the original platforms that motivated
+ // PreferGo support before Windows and Plan9 got support,
+ // at which time the GODEBUG=netdns=go and GODEBUG=netdns=cgo
+ // names were already kinda locked in.
+ confVal.netCgo = true
+ }
+
+ if confVal.dnsDebugLevel > 0 {
+ defer func() {
+ if confVal.dnsDebugLevel > 1 {
+ println("go package net: confVal.netCgo =", confVal.netCgo, " netGo =", confVal.netGo)
+ }
+ switch {
+ case confVal.netGo:
+ if netGo {
+ println("go package net: built with netgo build tag; using Go's DNS resolver")
+ } else {
+ println("go package net: GODEBUG setting forcing use of Go's resolver")
+ }
+ case confVal.forceCgoLookupHost:
+ println("go package net: using cgo DNS resolver")
+ default:
+ println("go package net: dynamic selection of DNS resolver")
+ }
+ }()
+ }
+
+ // Darwin pops up annoying dialog boxes if programs try to do
+ // their own DNS requests. So always use cgo instead, which
+ // avoids that.
+ if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
+ confVal.forceCgoLookupHost = true
+ return
+ }
+
+ if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
+ return
+ }
+
+ // If any environment-specified resolver options are specified,
+ // force cgo. Note that LOCALDOMAIN can change behavior merely
+ // by being specified with the empty string.
+ _, localDomainDefined := syscall.Getenv("LOCALDOMAIN")
+ if os.Getenv("RES_OPTIONS") != "" ||
+ os.Getenv("HOSTALIASES") != "" ||
+ confVal.netCgo ||
+ localDomainDefined {
+ confVal.forceCgoLookupHost = true
+ return
+ }
+
+ // OpenBSD apparently lets you override the location of resolv.conf
+ // with ASR_CONFIG. If we notice that, defer to libc.
+ if runtime.GOOS == "openbsd" && os.Getenv("ASR_CONFIG") != "" {
+ confVal.forceCgoLookupHost = true
+ return
+ }
+
+ if runtime.GOOS != "openbsd" {
+ confVal.nss = parseNSSConfFile("/etc/nsswitch.conf")
+ }
+
+ confVal.resolv = dnsReadConfig("/etc/resolv.conf")
+ if confVal.resolv.err != nil && !os.IsNotExist(confVal.resolv.err) &&
+ !os.IsPermission(confVal.resolv.err) {
+ // If we can't read the resolv.conf file, assume it
+ // had something important in it and defer to cgo.
+ // libc's resolver might then fail too, but at least
+ // it wasn't our fault.
+ confVal.forceCgoLookupHost = true
+ }
+
+ if _, err := os.Stat("/etc/mdns.allow"); err == nil {
+ confVal.hasMDNSAllow = true
+ }
+}
+
+// canUseCgo reports whether calling cgo functions is allowed
+// for non-hostname lookups.
+func (c *conf) canUseCgo() bool {
+ return c.hostLookupOrder(nil, "") == hostLookupCgo
+}
+
+// hostLookupOrder determines which strategy to use to resolve hostname.
+// The provided Resolver is optional. nil means to not consider its options.
+func (c *conf) hostLookupOrder(r *Resolver, hostname string) (ret hostLookupOrder) {
+ if c.dnsDebugLevel > 1 {
+ defer func() {
+ print("go package net: hostLookupOrder(", hostname, ") = ", ret.String(), "\n")
+ }()
+ }
+ fallbackOrder := hostLookupCgo
+ if c.netGo || r.preferGo() {
+ switch c.goos {
+ case "windows":
+ // TODO(bradfitz): implement files-based
+ // lookup on Windows too? I guess /etc/hosts
+ // kinda exists on Windows. But for now, only
+ // do DNS.
+ fallbackOrder = hostLookupDNS
+ default:
+ fallbackOrder = hostLookupFilesDNS
+ }
+ }
+ if c.goos == "windows" || c.goos == "plan9" {
+ return fallbackOrder
+ }
+ if c.forceCgoLookupHost || c.resolv.unknownOpt || c.goos == "android" {
+ return fallbackOrder
+ }
+ if bytealg.IndexByteString(hostname, '\\') != -1 || bytealg.IndexByteString(hostname, '%') != -1 {
+ // Don't deal with special form hostnames with backslashes
+ // or '%'.
+ return fallbackOrder
+ }
+
+ // OpenBSD is unique and doesn't use nsswitch.conf.
+ // It also doesn't support mDNS.
+ if c.goos == "openbsd" {
+ // OpenBSD's resolv.conf manpage says that a non-existent
+ // resolv.conf means "lookup" defaults to only "files",
+ // without DNS lookups.
+ if os.IsNotExist(c.resolv.err) {
+ return hostLookupFiles
+ }
+ lookup := c.resolv.lookup
+ if len(lookup) == 0 {
+ // https://www.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man5/resolv.conf.5
+ // "If the lookup keyword is not used in the
+ // system's resolv.conf file then the assumed
+ // order is 'bind file'"
+ return hostLookupDNSFiles
+ }
+ if len(lookup) < 1 || len(lookup) > 2 {
+ return fallbackOrder
+ }
+ switch lookup[0] {
+ case "bind":
+ if len(lookup) == 2 {
+ if lookup[1] == "file" {
+ return hostLookupDNSFiles
+ }
+ return fallbackOrder
+ }
+ return hostLookupDNS
+ case "file":
+ if len(lookup) == 2 {
+ if lookup[1] == "bind" {
+ return hostLookupFilesDNS
+ }
+ return fallbackOrder
+ }
+ return hostLookupFiles
+ default:
+ return fallbackOrder
+ }
+ }
+
+ // Canonicalize the hostname by removing any trailing dot.
+ if stringsHasSuffix(hostname, ".") {
+ hostname = hostname[:len(hostname)-1]
+ }
+ if stringsHasSuffixFold(hostname, ".local") {
+ // Per RFC 6762, the ".local" TLD is special. And
+ // because Go's native resolver doesn't do mDNS or
+ // similar local resolution mechanisms, assume that
+ // libc might (via Avahi, etc) and use cgo.
+ return fallbackOrder
+ }
+
+ nss := c.nss
+ srcs := nss.sources["hosts"]
+ // If /etc/nsswitch.conf doesn't exist or doesn't specify any
+ // sources for "hosts", assume Go's DNS will work fine.
+ if os.IsNotExist(nss.err) || (nss.err == nil && len(srcs) == 0) {
+ if c.goos == "solaris" {
+ // illumos defaults to "nis [NOTFOUND=return] files"
+ return fallbackOrder
+ }
+ return hostLookupFilesDNS
+ }
+ if nss.err != nil {
+ // We failed to parse or open nsswitch.conf, so
+ // conservatively assume we should use cgo if it's
+ // available.
+ return fallbackOrder
+ }
+
+ var mdnsSource, filesSource, dnsSource bool
+ var first string
+ for _, src := range srcs {
+ if src.source == "myhostname" {
+ if isLocalhost(hostname) || isGateway(hostname) {
+ return fallbackOrder
+ }
+ hn, err := getHostname()
+ if err != nil || stringsEqualFold(hostname, hn) {
+ return fallbackOrder
+ }
+ continue
+ }
+ if src.source == "files" || src.source == "dns" {
+ if !src.standardCriteria() {
+ return fallbackOrder // non-standard; let libc deal with it.
+ }
+ if src.source == "files" {
+ filesSource = true
+ } else if src.source == "dns" {
+ dnsSource = true
+ }
+ if first == "" {
+ first = src.source
+ }
+ continue
+ }
+ if stringsHasPrefix(src.source, "mdns") {
+ // e.g. "mdns4", "mdns4_minimal"
+ // We already returned true before if it was *.local.
+ // libc wouldn't have found a hit on this anyway.
+ mdnsSource = true
+ continue
+ }
+ // Some source we don't know how to deal with.
+ return fallbackOrder
+ }
+
+ // We don't parse mdns.allow files. They're rare. If one
+ // exists, it might list other TLDs (besides .local) or even
+ // '*', so just let libc deal with it.
+ if mdnsSource && c.hasMDNSAllow {
+ return fallbackOrder
+ }
+
+ // Cases where Go can handle it without cgo and C thread
+ // overhead.
+ switch {
+ case filesSource && dnsSource:
+ if first == "files" {
+ return hostLookupFilesDNS
+ } else {
+ return hostLookupDNSFiles
+ }
+ case filesSource:
+ return hostLookupFiles
+ case dnsSource:
+ return hostLookupDNS
+ }
+
+ // Something weird. Let libc deal with it.
+ return fallbackOrder
+}
+
+// goDebugNetDNS parses the value of the GODEBUG "netdns" value.
+// The netdns value can be of the form:
+//
+// 1 // debug level 1
+// 2 // debug level 2
+// cgo // use cgo for DNS lookups
+// go // use go for DNS lookups
+// cgo+1 // use cgo for DNS lookups + debug level 1
+// 1+cgo // same
+// cgo+2 // same, but debug level 2
+//
+// etc.
+func goDebugNetDNS() (dnsMode string, debugLevel int) {
+ goDebug := godebug.Get("netdns")
+ parsePart := func(s string) {
+ if s == "" {
+ return
+ }
+ if '0' <= s[0] && s[0] <= '9' {
+ debugLevel, _, _ = dtoi(s)
+ } else {
+ dnsMode = s
+ }
+ }
+ if i := bytealg.IndexByteString(goDebug, '+'); i != -1 {
+ parsePart(goDebug[:i])
+ parsePart(goDebug[i+1:])
+ return
+ }
+ parsePart(goDebug)
+ return
+}
+
+// isLocalhost reports whether h should be considered a "localhost"
+// name for the myhostname NSS module.
+func isLocalhost(h string) bool {
+ return stringsEqualFold(h, "localhost") || stringsEqualFold(h, "localhost.localdomain") || stringsHasSuffixFold(h, ".localhost") || stringsHasSuffixFold(h, ".localhost.localdomain")
+}
+
+// isGateway reports whether h should be considered a "gateway"
+// name for the myhostname NSS module.
+func isGateway(h string) bool {
+ return stringsEqualFold(h, "gateway")
+}
diff --git a/contrib/go/_std_1.19/src/net/dial.go b/contrib/go/_std_1.19/src/net/dial.go
new file mode 100644
index 0000000000..c538342566
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/dial.go
@@ -0,0 +1,742 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "context"
+ "internal/nettrace"
+ "syscall"
+ "time"
+)
+
+// defaultTCPKeepAlive is a default constant value for TCPKeepAlive times
+// See golang.org/issue/31510
+const (
+ defaultTCPKeepAlive = 15 * time.Second
+)
+
+// A Dialer contains options for connecting to an address.
+//
+// The zero value for each field is equivalent to dialing
+// without that option. Dialing with the zero value of Dialer
+// is therefore equivalent to just calling the Dial function.
+//
+// It is safe to call Dialer's methods concurrently.
+type Dialer struct {
+ // Timeout is the maximum amount of time a dial will wait for
+ // a connect to complete. If Deadline is also set, it may fail
+ // earlier.
+ //
+ // The default is no timeout.
+ //
+ // When using TCP and dialing a host name with multiple IP
+ // addresses, the timeout may be divided between them.
+ //
+ // With or without a timeout, the operating system may impose
+ // its own earlier timeout. For instance, TCP timeouts are
+ // often around 3 minutes.
+ Timeout time.Duration
+
+ // Deadline is the absolute point in time after which dials
+ // will fail. If Timeout is set, it may fail earlier.
+ // Zero means no deadline, or dependent on the operating system
+ // as with the Timeout option.
+ Deadline time.Time
+
+ // LocalAddr is the local address to use when dialing an
+ // address. The address must be of a compatible type for the
+ // network being dialed.
+ // If nil, a local address is automatically chosen.
+ LocalAddr Addr
+
+ // DualStack previously enabled RFC 6555 Fast Fallback
+ // support, also known as "Happy Eyeballs", in which IPv4 is
+ // tried soon if IPv6 appears to be misconfigured and
+ // hanging.
+ //
+ // Deprecated: Fast Fallback is enabled by default. To
+ // disable, set FallbackDelay to a negative value.
+ DualStack bool
+
+ // FallbackDelay specifies the length of time to wait before
+ // spawning a RFC 6555 Fast Fallback connection. That is, this
+ // is the amount of time to wait for IPv6 to succeed before
+ // assuming that IPv6 is misconfigured and falling back to
+ // IPv4.
+ //
+ // If zero, a default delay of 300ms is used.
+ // A negative value disables Fast Fallback support.
+ FallbackDelay time.Duration
+
+ // KeepAlive specifies the interval between keep-alive
+ // probes for an active network connection.
+ // If zero, keep-alive probes are sent with a default value
+ // (currently 15 seconds), if supported by the protocol and operating
+ // system. Network protocols or operating systems that do
+ // not support keep-alives ignore this field.
+ // If negative, keep-alive probes are disabled.
+ KeepAlive time.Duration
+
+ // Resolver optionally specifies an alternate resolver to use.
+ Resolver *Resolver
+
+ // Cancel is an optional channel whose closure indicates that
+ // the dial should be canceled. Not all types of dials support
+ // cancellation.
+ //
+ // Deprecated: Use DialContext instead.
+ Cancel <-chan struct{}
+
+ // If Control is not nil, it is called after creating the network
+ // connection but before actually dialing.
+ //
+ // Network and address parameters passed to Control method are not
+ // necessarily the ones passed to Dial. For example, passing "tcp" to Dial
+ // will cause the Control function to be called with "tcp4" or "tcp6".
+ Control func(network, address string, c syscall.RawConn) error
+}
+
+func (d *Dialer) dualStack() bool { return d.FallbackDelay >= 0 }
+
+func minNonzeroTime(a, b time.Time) time.Time {
+ if a.IsZero() {
+ return b
+ }
+ if b.IsZero() || a.Before(b) {
+ return a
+ }
+ return b
+}
+
+// deadline returns the earliest of:
+// - now+Timeout
+// - d.Deadline
+// - the context's deadline
+//
+// Or zero, if none of Timeout, Deadline, or context's deadline is set.
+func (d *Dialer) deadline(ctx context.Context, now time.Time) (earliest time.Time) {
+ if d.Timeout != 0 { // including negative, for historical reasons
+ earliest = now.Add(d.Timeout)
+ }
+ if d, ok := ctx.Deadline(); ok {
+ earliest = minNonzeroTime(earliest, d)
+ }
+ return minNonzeroTime(earliest, d.Deadline)
+}
+
+func (d *Dialer) resolver() *Resolver {
+ if d.Resolver != nil {
+ return d.Resolver
+ }
+ return DefaultResolver
+}
+
+// partialDeadline returns the deadline to use for a single address,
+// when multiple addresses are pending.
+func partialDeadline(now, deadline time.Time, addrsRemaining int) (time.Time, error) {
+ if deadline.IsZero() {
+ return deadline, nil
+ }
+ timeRemaining := deadline.Sub(now)
+ if timeRemaining <= 0 {
+ return time.Time{}, errTimeout
+ }
+ // Tentatively allocate equal time to each remaining address.
+ timeout := timeRemaining / time.Duration(addrsRemaining)
+ // If the time per address is too short, steal from the end of the list.
+ const saneMinimum = 2 * time.Second
+ if timeout < saneMinimum {
+ if timeRemaining < saneMinimum {
+ timeout = timeRemaining
+ } else {
+ timeout = saneMinimum
+ }
+ }
+ return now.Add(timeout), nil
+}
+
+func (d *Dialer) fallbackDelay() time.Duration {
+ if d.FallbackDelay > 0 {
+ return d.FallbackDelay
+ } else {
+ return 300 * time.Millisecond
+ }
+}
+
+func parseNetwork(ctx context.Context, network string, needsProto bool) (afnet string, proto int, err error) {
+ i := last(network, ':')
+ if i < 0 { // no colon
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ case "udp", "udp4", "udp6":
+ case "ip", "ip4", "ip6":
+ if needsProto {
+ return "", 0, UnknownNetworkError(network)
+ }
+ case "unix", "unixgram", "unixpacket":
+ default:
+ return "", 0, UnknownNetworkError(network)
+ }
+ return network, 0, nil
+ }
+ afnet = network[:i]
+ switch afnet {
+ case "ip", "ip4", "ip6":
+ protostr := network[i+1:]
+ proto, i, ok := dtoi(protostr)
+ if !ok || i != len(protostr) {
+ proto, err = lookupProtocol(ctx, protostr)
+ if err != nil {
+ return "", 0, err
+ }
+ }
+ return afnet, proto, nil
+ }
+ return "", 0, UnknownNetworkError(network)
+}
+
+// resolveAddrList resolves addr using hint and returns a list of
+// addresses. The result contains at least one address when error is
+// nil.
+func (r *Resolver) resolveAddrList(ctx context.Context, op, network, addr string, hint Addr) (addrList, error) {
+ afnet, _, err := parseNetwork(ctx, network, true)
+ if err != nil {
+ return nil, err
+ }
+ if op == "dial" && addr == "" {
+ return nil, errMissingAddress
+ }
+ switch afnet {
+ case "unix", "unixgram", "unixpacket":
+ addr, err := ResolveUnixAddr(afnet, addr)
+ if err != nil {
+ return nil, err
+ }
+ if op == "dial" && hint != nil && addr.Network() != hint.Network() {
+ return nil, &AddrError{Err: "mismatched local address type", Addr: hint.String()}
+ }
+ return addrList{addr}, nil
+ }
+ addrs, err := r.internetAddrList(ctx, afnet, addr)
+ if err != nil || op != "dial" || hint == nil {
+ return addrs, err
+ }
+ var (
+ tcp *TCPAddr
+ udp *UDPAddr
+ ip *IPAddr
+ wildcard bool
+ )
+ switch hint := hint.(type) {
+ case *TCPAddr:
+ tcp = hint
+ wildcard = tcp.isWildcard()
+ case *UDPAddr:
+ udp = hint
+ wildcard = udp.isWildcard()
+ case *IPAddr:
+ ip = hint
+ wildcard = ip.isWildcard()
+ }
+ naddrs := addrs[:0]
+ for _, addr := range addrs {
+ if addr.Network() != hint.Network() {
+ return nil, &AddrError{Err: "mismatched local address type", Addr: hint.String()}
+ }
+ switch addr := addr.(type) {
+ case *TCPAddr:
+ if !wildcard && !addr.isWildcard() && !addr.IP.matchAddrFamily(tcp.IP) {
+ continue
+ }
+ naddrs = append(naddrs, addr)
+ case *UDPAddr:
+ if !wildcard && !addr.isWildcard() && !addr.IP.matchAddrFamily(udp.IP) {
+ continue
+ }
+ naddrs = append(naddrs, addr)
+ case *IPAddr:
+ if !wildcard && !addr.isWildcard() && !addr.IP.matchAddrFamily(ip.IP) {
+ continue
+ }
+ naddrs = append(naddrs, addr)
+ }
+ }
+ if len(naddrs) == 0 {
+ return nil, &AddrError{Err: errNoSuitableAddress.Error(), Addr: hint.String()}
+ }
+ return naddrs, nil
+}
+
+// Dial connects to the address on the named network.
+//
+// Known networks are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only),
+// "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4"
+// (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and
+// "unixpacket".
+//
+// For TCP and UDP networks, the address has the form "host:port".
+// The host must be a literal IP address, or a host name that can be
+// resolved to IP addresses.
+// The port must be a literal port number or a service name.
+// If the host is a literal IPv6 address it must be enclosed in square
+// brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80".
+// The zone specifies the scope of the literal IPv6 address as defined
+// in RFC 4007.
+// The functions JoinHostPort and SplitHostPort manipulate a pair of
+// host and port in this form.
+// When using TCP, and the host resolves to multiple IP addresses,
+// Dial will try each IP address in order until one succeeds.
+//
+// Examples:
+//
+// Dial("tcp", "golang.org:http")
+// Dial("tcp", "192.0.2.1:http")
+// Dial("tcp", "198.51.100.1:80")
+// Dial("udp", "[2001:db8::1]:domain")
+// Dial("udp", "[fe80::1%lo0]:53")
+// Dial("tcp", ":80")
+//
+// For IP networks, the network must be "ip", "ip4" or "ip6" followed
+// by a colon and a literal protocol number or a protocol name, and
+// the address has the form "host". The host must be a literal IP
+// address or a literal IPv6 address with zone.
+// It depends on each operating system how the operating system
+// behaves with a non-well known protocol number such as "0" or "255".
+//
+// Examples:
+//
+// Dial("ip4:1", "192.0.2.1")
+// Dial("ip6:ipv6-icmp", "2001:db8::1")
+// Dial("ip6:58", "fe80::1%lo0")
+//
+// For TCP, UDP and IP networks, if the host is empty or a literal
+// unspecified IP address, as in ":80", "0.0.0.0:80" or "[::]:80" for
+// TCP and UDP, "", "0.0.0.0" or "::" for IP, the local system is
+// assumed.
+//
+// For Unix networks, the address must be a file system path.
+func Dial(network, address string) (Conn, error) {
+ var d Dialer
+ return d.Dial(network, address)
+}
+
+// DialTimeout acts like Dial but takes a timeout.
+//
+// The timeout includes name resolution, if required.
+// When using TCP, and the host in the address parameter resolves to
+// multiple IP addresses, the timeout is spread over each consecutive
+// dial, such that each is given an appropriate fraction of the time
+// to connect.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func DialTimeout(network, address string, timeout time.Duration) (Conn, error) {
+ d := Dialer{Timeout: timeout}
+ return d.Dial(network, address)
+}
+
+// sysDialer contains a Dial's parameters and configuration.
+type sysDialer struct {
+ Dialer
+ network, address string
+ testHookDialTCP func(ctx context.Context, net string, laddr, raddr *TCPAddr) (*TCPConn, error)
+}
+
+// Dial connects to the address on the named network.
+//
+// See func Dial for a description of the network and address
+// parameters.
+//
+// Dial uses context.Background internally; to specify the context, use
+// DialContext.
+func (d *Dialer) Dial(network, address string) (Conn, error) {
+ return d.DialContext(context.Background(), network, address)
+}
+
+// DialContext connects to the address on the named network using
+// the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// When using TCP, and the host in the address parameter resolves to multiple
+// network addresses, any dial timeout (from d.Timeout or ctx) is spread
+// over each consecutive dial, such that each is given an appropriate
+// fraction of the time to connect.
+// For example, if a host has 4 IP addresses and the timeout is 1 minute,
+// the connect to each single address will be given 15 seconds to complete
+// before trying the next one.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) {
+ if ctx == nil {
+ panic("nil context")
+ }
+ deadline := d.deadline(ctx, time.Now())
+ if !deadline.IsZero() {
+ if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
+ subCtx, cancel := context.WithDeadline(ctx, deadline)
+ defer cancel()
+ ctx = subCtx
+ }
+ }
+ if oldCancel := d.Cancel; oldCancel != nil {
+ subCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ go func() {
+ select {
+ case <-oldCancel:
+ cancel()
+ case <-subCtx.Done():
+ }
+ }()
+ ctx = subCtx
+ }
+
+ // Shadow the nettrace (if any) during resolve so Connect events don't fire for DNS lookups.
+ resolveCtx := ctx
+ if trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace); trace != nil {
+ shadow := *trace
+ shadow.ConnectStart = nil
+ shadow.ConnectDone = nil
+ resolveCtx = context.WithValue(resolveCtx, nettrace.TraceKey{}, &shadow)
+ }
+
+ addrs, err := d.resolver().resolveAddrList(resolveCtx, "dial", network, address, d.LocalAddr)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: network, Source: nil, Addr: nil, Err: err}
+ }
+
+ sd := &sysDialer{
+ Dialer: *d,
+ network: network,
+ address: address,
+ }
+
+ var primaries, fallbacks addrList
+ if d.dualStack() && network == "tcp" {
+ primaries, fallbacks = addrs.partition(isIPv4)
+ } else {
+ primaries = addrs
+ }
+
+ c, err := sd.dialParallel(ctx, primaries, fallbacks)
+ if err != nil {
+ return nil, err
+ }
+
+ if tc, ok := c.(*TCPConn); ok && d.KeepAlive >= 0 {
+ setKeepAlive(tc.fd, true)
+ ka := d.KeepAlive
+ if d.KeepAlive == 0 {
+ ka = defaultTCPKeepAlive
+ }
+ setKeepAlivePeriod(tc.fd, ka)
+ testHookSetKeepAlive(ka)
+ }
+ return c, nil
+}
+
+// dialParallel races two copies of dialSerial, giving the first a
+// head start. It returns the first established connection and
+// closes the others. Otherwise it returns an error from the first
+// primary address.
+func (sd *sysDialer) dialParallel(ctx context.Context, primaries, fallbacks addrList) (Conn, error) {
+ if len(fallbacks) == 0 {
+ return sd.dialSerial(ctx, primaries)
+ }
+
+ returned := make(chan struct{})
+ defer close(returned)
+
+ type dialResult struct {
+ Conn
+ error
+ primary bool
+ done bool
+ }
+ results := make(chan dialResult) // unbuffered
+
+ startRacer := func(ctx context.Context, primary bool) {
+ ras := primaries
+ if !primary {
+ ras = fallbacks
+ }
+ c, err := sd.dialSerial(ctx, ras)
+ select {
+ case results <- dialResult{Conn: c, error: err, primary: primary, done: true}:
+ case <-returned:
+ if c != nil {
+ c.Close()
+ }
+ }
+ }
+
+ var primary, fallback dialResult
+
+ // Start the main racer.
+ primaryCtx, primaryCancel := context.WithCancel(ctx)
+ defer primaryCancel()
+ go startRacer(primaryCtx, true)
+
+ // Start the timer for the fallback racer.
+ fallbackTimer := time.NewTimer(sd.fallbackDelay())
+ defer fallbackTimer.Stop()
+
+ for {
+ select {
+ case <-fallbackTimer.C:
+ fallbackCtx, fallbackCancel := context.WithCancel(ctx)
+ defer fallbackCancel()
+ go startRacer(fallbackCtx, false)
+
+ case res := <-results:
+ if res.error == nil {
+ return res.Conn, nil
+ }
+ if res.primary {
+ primary = res
+ } else {
+ fallback = res
+ }
+ if primary.done && fallback.done {
+ return nil, primary.error
+ }
+ if res.primary && fallbackTimer.Stop() {
+ // If we were able to stop the timer, that means it
+ // was running (hadn't yet started the fallback), but
+ // we just got an error on the primary path, so start
+ // the fallback immediately (in 0 nanoseconds).
+ fallbackTimer.Reset(0)
+ }
+ }
+ }
+}
+
+// dialSerial connects to a list of addresses in sequence, returning
+// either the first successful connection, or the first error.
+func (sd *sysDialer) dialSerial(ctx context.Context, ras addrList) (Conn, error) {
+ var firstErr error // The error from the first address is most relevant.
+
+ for i, ra := range ras {
+ select {
+ case <-ctx.Done():
+ return nil, &OpError{Op: "dial", Net: sd.network, Source: sd.LocalAddr, Addr: ra, Err: mapErr(ctx.Err())}
+ default:
+ }
+
+ dialCtx := ctx
+ if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
+ partialDeadline, err := partialDeadline(time.Now(), deadline, len(ras)-i)
+ if err != nil {
+ // Ran out of time.
+ if firstErr == nil {
+ firstErr = &OpError{Op: "dial", Net: sd.network, Source: sd.LocalAddr, Addr: ra, Err: err}
+ }
+ break
+ }
+ if partialDeadline.Before(deadline) {
+ var cancel context.CancelFunc
+ dialCtx, cancel = context.WithDeadline(ctx, partialDeadline)
+ defer cancel()
+ }
+ }
+
+ c, err := sd.dialSingle(dialCtx, ra)
+ if err == nil {
+ return c, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ firstErr = &OpError{Op: "dial", Net: sd.network, Source: nil, Addr: nil, Err: errMissingAddress}
+ }
+ return nil, firstErr
+}
+
+// dialSingle attempts to establish and returns a single connection to
+// the destination address.
+func (sd *sysDialer) dialSingle(ctx context.Context, ra Addr) (c Conn, err error) {
+ trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace)
+ if trace != nil {
+ raStr := ra.String()
+ if trace.ConnectStart != nil {
+ trace.ConnectStart(sd.network, raStr)
+ }
+ if trace.ConnectDone != nil {
+ defer func() { trace.ConnectDone(sd.network, raStr, err) }()
+ }
+ }
+ la := sd.LocalAddr
+ switch ra := ra.(type) {
+ case *TCPAddr:
+ la, _ := la.(*TCPAddr)
+ c, err = sd.dialTCP(ctx, la, ra)
+ case *UDPAddr:
+ la, _ := la.(*UDPAddr)
+ c, err = sd.dialUDP(ctx, la, ra)
+ case *IPAddr:
+ la, _ := la.(*IPAddr)
+ c, err = sd.dialIP(ctx, la, ra)
+ case *UnixAddr:
+ la, _ := la.(*UnixAddr)
+ c, err = sd.dialUnix(ctx, la, ra)
+ default:
+ return nil, &OpError{Op: "dial", Net: sd.network, Source: la, Addr: ra, Err: &AddrError{Err: "unexpected address type", Addr: sd.address}}
+ }
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: sd.network, Source: la, Addr: ra, Err: err} // c is non-nil interface containing nil pointer
+ }
+ return c, nil
+}
+
+// ListenConfig contains options for listening to an address.
+type ListenConfig struct {
+ // If Control is not nil, it is called after creating the network
+ // connection but before binding it to the operating system.
+ //
+ // Network and address parameters passed to Control method are not
+ // necessarily the ones passed to Listen. For example, passing "tcp" to
+ // Listen will cause the Control function to be called with "tcp4" or "tcp6".
+ Control func(network, address string, c syscall.RawConn) error
+
+ // KeepAlive specifies the keep-alive period for network
+ // connections accepted by this listener.
+ // If zero, keep-alives are enabled if supported by the protocol
+ // and operating system. Network protocols or operating systems
+ // that do not support keep-alives ignore this field.
+ // If negative, keep-alives are disabled.
+ KeepAlive time.Duration
+}
+
+// Listen announces on the local network address.
+//
+// See func Listen for a description of the network and address
+// parameters.
+func (lc *ListenConfig) Listen(ctx context.Context, network, address string) (Listener, error) {
+ addrs, err := DefaultResolver.resolveAddrList(ctx, "listen", network, address, nil)
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: err}
+ }
+ sl := &sysListener{
+ ListenConfig: *lc,
+ network: network,
+ address: address,
+ }
+ var l Listener
+ la := addrs.first(isIPv4)
+ switch la := la.(type) {
+ case *TCPAddr:
+ l, err = sl.listenTCP(ctx, la)
+ case *UnixAddr:
+ l, err = sl.listenUnix(ctx, la)
+ default:
+ return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: address}}
+ }
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: err} // l is non-nil interface containing nil pointer
+ }
+ return l, nil
+}
+
+// ListenPacket announces on the local network address.
+//
+// See func ListenPacket for a description of the network and address
+// parameters.
+func (lc *ListenConfig) ListenPacket(ctx context.Context, network, address string) (PacketConn, error) {
+ addrs, err := DefaultResolver.resolveAddrList(ctx, "listen", network, address, nil)
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: err}
+ }
+ sl := &sysListener{
+ ListenConfig: *lc,
+ network: network,
+ address: address,
+ }
+ var c PacketConn
+ la := addrs.first(isIPv4)
+ switch la := la.(type) {
+ case *UDPAddr:
+ c, err = sl.listenUDP(ctx, la)
+ case *IPAddr:
+ c, err = sl.listenIP(ctx, la)
+ case *UnixAddr:
+ c, err = sl.listenUnixgram(ctx, la)
+ default:
+ return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: address}}
+ }
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: sl.network, Source: nil, Addr: la, Err: err} // c is non-nil interface containing nil pointer
+ }
+ return c, nil
+}
+
+// sysListener contains a Listen's parameters and configuration.
+type sysListener struct {
+ ListenConfig
+ network, address string
+}
+
+// Listen announces on the local network address.
+//
+// The network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket".
+//
+// For TCP networks, if the host in the address parameter is empty or
+// a literal unspecified IP address, Listen listens on all available
+// unicast and anycast IP addresses of the local system.
+// To only use IPv4, use network "tcp4".
+// The address can use a host name, but this is not recommended,
+// because it will create a listener for at most one of the host's IP
+// addresses.
+// If the port in the address parameter is empty or "0", as in
+// "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
+// The Addr method of Listener can be used to discover the chosen
+// port.
+//
+// See func Dial for a description of the network and address
+// parameters.
+//
+// Listen uses context.Background internally; to specify the context, use
+// ListenConfig.Listen.
+func Listen(network, address string) (Listener, error) {
+ var lc ListenConfig
+ return lc.Listen(context.Background(), network, address)
+}
+
+// ListenPacket announces on the local network address.
+//
+// The network must be "udp", "udp4", "udp6", "unixgram", or an IP
+// transport. The IP transports are "ip", "ip4", or "ip6" followed by
+// a colon and a literal protocol number or a protocol name, as in
+// "ip:1" or "ip:icmp".
+//
+// For UDP and IP networks, if the host in the address parameter is
+// empty or a literal unspecified IP address, ListenPacket listens on
+// all available IP addresses of the local system except multicast IP
+// addresses.
+// To only use IPv4, use network "udp4" or "ip4:proto".
+// The address can use a host name, but this is not recommended,
+// because it will create a listener for at most one of the host's IP
+// addresses.
+// If the port in the address parameter is empty or "0", as in
+// "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
+// The LocalAddr method of PacketConn can be used to discover the
+// chosen port.
+//
+// See func Dial for a description of the network and address
+// parameters.
+//
+// ListenPacket uses context.Background internally; to specify the context, use
+// ListenConfig.ListenPacket.
+func ListenPacket(network, address string) (PacketConn, error) {
+ var lc ListenConfig
+ return lc.ListenPacket(context.Background(), network, address)
+}
diff --git a/contrib/go/_std_1.19/src/net/dnsclient.go b/contrib/go/_std_1.19/src/net/dnsclient.go
new file mode 100644
index 0000000000..b609dbd468
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/dnsclient.go
@@ -0,0 +1,228 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "internal/bytealg"
+ "internal/itoa"
+ "sort"
+
+ "golang.org/x/net/dns/dnsmessage"
+)
+
+// provided by runtime
+func fastrandu() uint
+
+func randInt() int {
+ return int(fastrandu() >> 1) // clear sign bit
+}
+
+func randIntn(n int) int {
+ return randInt() % n
+}
+
+// reverseaddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
+// address addr suitable for rDNS (PTR) record lookup or an error if it fails
+// to parse the IP address.
+func reverseaddr(addr string) (arpa string, err error) {
+ ip := ParseIP(addr)
+ if ip == nil {
+ return "", &DNSError{Err: "unrecognized address", Name: addr}
+ }
+ if ip.To4() != nil {
+ return itoa.Uitoa(uint(ip[15])) + "." + itoa.Uitoa(uint(ip[14])) + "." + itoa.Uitoa(uint(ip[13])) + "." + itoa.Uitoa(uint(ip[12])) + ".in-addr.arpa.", nil
+ }
+ // Must be IPv6
+ buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
+ // Add it, in reverse, to the buffer
+ for i := len(ip) - 1; i >= 0; i-- {
+ v := ip[i]
+ buf = append(buf, hexDigit[v&0xF],
+ '.',
+ hexDigit[v>>4],
+ '.')
+ }
+ // Append "ip6.arpa." and return (buf already has the final .)
+ buf = append(buf, "ip6.arpa."...)
+ return string(buf), nil
+}
+
+func equalASCIIName(x, y dnsmessage.Name) bool {
+ if x.Length != y.Length {
+ return false
+ }
+ for i := 0; i < int(x.Length); i++ {
+ a := x.Data[i]
+ b := y.Data[i]
+ if 'A' <= a && a <= 'Z' {
+ a += 0x20
+ }
+ if 'A' <= b && b <= 'Z' {
+ b += 0x20
+ }
+ if a != b {
+ return false
+ }
+ }
+ return true
+}
+
+// isDomainName checks if a string is a presentation-format domain name
+// (currently restricted to hostname-compatible "preferred name" LDH labels and
+// SRV-like "underscore labels"; see golang.org/issue/12421).
+func isDomainName(s string) bool {
+ // The root domain name is valid. See golang.org/issue/45715.
+ if s == "." {
+ return true
+ }
+
+ // See RFC 1035, RFC 3696.
+ // Presentation format has dots before every label except the first, and the
+ // terminal empty label is optional here because we assume fully-qualified
+ // (absolute) input. We must therefore reserve space for the first and last
+ // labels' length octets in wire format, where they are necessary and the
+ // maximum total length is 255.
+ // So our _effective_ maximum is 253, but 254 is not rejected if the last
+ // character is a dot.
+ l := len(s)
+ if l == 0 || l > 254 || l == 254 && s[l-1] != '.' {
+ return false
+ }
+
+ last := byte('.')
+ nonNumeric := false // true once we've seen a letter or hyphen
+ partlen := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch {
+ default:
+ return false
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_':
+ nonNumeric = true
+ partlen++
+ case '0' <= c && c <= '9':
+ // fine
+ partlen++
+ case c == '-':
+ // Byte before dash cannot be dot.
+ if last == '.' {
+ return false
+ }
+ partlen++
+ nonNumeric = true
+ case c == '.':
+ // Byte before dot cannot be dot, dash.
+ if last == '.' || last == '-' {
+ return false
+ }
+ if partlen > 63 || partlen == 0 {
+ return false
+ }
+ partlen = 0
+ }
+ last = c
+ }
+ if last == '-' || partlen > 63 {
+ return false
+ }
+
+ return nonNumeric
+}
+
+// absDomainName returns an absolute domain name which ends with a
+// trailing dot to match pure Go reverse resolver and all other lookup
+// routines.
+// See golang.org/issue/12189.
+// But we don't want to add dots for local names from /etc/hosts.
+// It's hard to tell so we settle on the heuristic that names without dots
+// (like "localhost" or "myhost") do not get trailing dots, but any other
+// names do.
+func absDomainName(s string) string {
+ if bytealg.IndexByteString(s, '.') != -1 && s[len(s)-1] != '.' {
+ s += "."
+ }
+ return s
+}
+
+// An SRV represents a single DNS SRV record.
+type SRV struct {
+ Target string
+ Port uint16
+ Priority uint16
+ Weight uint16
+}
+
+// byPriorityWeight sorts SRV records by ascending priority and weight.
+type byPriorityWeight []*SRV
+
+func (s byPriorityWeight) Len() int { return len(s) }
+func (s byPriorityWeight) Less(i, j int) bool {
+ return s[i].Priority < s[j].Priority || (s[i].Priority == s[j].Priority && s[i].Weight < s[j].Weight)
+}
+func (s byPriorityWeight) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// shuffleByWeight shuffles SRV records by weight using the algorithm
+// described in RFC 2782.
+func (addrs byPriorityWeight) shuffleByWeight() {
+ sum := 0
+ for _, addr := range addrs {
+ sum += int(addr.Weight)
+ }
+ for sum > 0 && len(addrs) > 1 {
+ s := 0
+ n := randIntn(sum)
+ for i := range addrs {
+ s += int(addrs[i].Weight)
+ if s > n {
+ if i > 0 {
+ addrs[0], addrs[i] = addrs[i], addrs[0]
+ }
+ break
+ }
+ }
+ sum -= int(addrs[0].Weight)
+ addrs = addrs[1:]
+ }
+}
+
+// sort reorders SRV records as specified in RFC 2782.
+func (addrs byPriorityWeight) sort() {
+ sort.Sort(addrs)
+ i := 0
+ for j := 1; j < len(addrs); j++ {
+ if addrs[i].Priority != addrs[j].Priority {
+ addrs[i:j].shuffleByWeight()
+ i = j
+ }
+ }
+ addrs[i:].shuffleByWeight()
+}
+
+// An MX represents a single DNS MX record.
+type MX struct {
+ Host string
+ Pref uint16
+}
+
+// byPref implements sort.Interface to sort MX records by preference
+type byPref []*MX
+
+func (s byPref) Len() int { return len(s) }
+func (s byPref) Less(i, j int) bool { return s[i].Pref < s[j].Pref }
+func (s byPref) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort reorders MX records as specified in RFC 5321.
+func (s byPref) sort() {
+ for i := range s {
+ j := randIntn(i + 1)
+ s[i], s[j] = s[j], s[i]
+ }
+ sort.Sort(s)
+}
+
+// An NS represents a single DNS NS record.
+type NS struct {
+ Host string
+}
diff --git a/contrib/go/_std_1.19/src/net/dnsclient_unix.go b/contrib/go/_std_1.19/src/net/dnsclient_unix.go
new file mode 100644
index 0000000000..088c81adee
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/dnsclient_unix.go
@@ -0,0 +1,826 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js
+
+// DNS client: see RFC 1035.
+// Has to be linked into package net for Dial.
+
+// TODO(rsc):
+// Could potentially handle many outstanding lookups faster.
+// Random UDP source port (net.Dial should do that for us).
+// Random request IDs.
+
+package net
+
+import (
+ "context"
+ "errors"
+ "internal/itoa"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+
+ "golang.org/x/net/dns/dnsmessage"
+)
+
+const (
+ // to be used as a useTCP parameter to exchange
+ useTCPOnly = true
+ useUDPOrTCP = false
+
+ // Maximum DNS packet size.
+ // Value taken from https://dnsflagday.net/2020/.
+ maxDNSPacketSize = 1232
+)
+
+var (
+ errLameReferral = errors.New("lame referral")
+ errCannotUnmarshalDNSMessage = errors.New("cannot unmarshal DNS message")
+ errCannotMarshalDNSMessage = errors.New("cannot marshal DNS message")
+ errServerMisbehaving = errors.New("server misbehaving")
+ errInvalidDNSResponse = errors.New("invalid DNS response")
+ errNoAnswerFromDNSServer = errors.New("no answer from DNS server")
+
+ // errServerTemporarilyMisbehaving is like errServerMisbehaving, except
+ // that when it gets translated to a DNSError, the IsTemporary field
+ // gets set to true.
+ errServerTemporarilyMisbehaving = errors.New("server misbehaving")
+)
+
+func newRequest(q dnsmessage.Question) (id uint16, udpReq, tcpReq []byte, err error) {
+ id = uint16(randInt())
+ b := dnsmessage.NewBuilder(make([]byte, 2, 514), dnsmessage.Header{ID: id, RecursionDesired: true})
+ b.EnableCompression()
+ if err := b.StartQuestions(); err != nil {
+ return 0, nil, nil, err
+ }
+ if err := b.Question(q); err != nil {
+ return 0, nil, nil, err
+ }
+
+ // Accept packets up to maxDNSPacketSize. RFC 6891.
+ if err := b.StartAdditionals(); err != nil {
+ return 0, nil, nil, err
+ }
+ var rh dnsmessage.ResourceHeader
+ if err := rh.SetEDNS0(maxDNSPacketSize, dnsmessage.RCodeSuccess, false); err != nil {
+ return 0, nil, nil, err
+ }
+ if err := b.OPTResource(rh, dnsmessage.OPTResource{}); err != nil {
+ return 0, nil, nil, err
+ }
+
+ tcpReq, err = b.Finish()
+ if err != nil {
+ return 0, nil, nil, err
+ }
+ udpReq = tcpReq[2:]
+ l := len(tcpReq) - 2
+ tcpReq[0] = byte(l >> 8)
+ tcpReq[1] = byte(l)
+ return id, udpReq, tcpReq, nil
+}
+
+func checkResponse(reqID uint16, reqQues dnsmessage.Question, respHdr dnsmessage.Header, respQues dnsmessage.Question) bool {
+ if !respHdr.Response {
+ return false
+ }
+ if reqID != respHdr.ID {
+ return false
+ }
+ if reqQues.Type != respQues.Type || reqQues.Class != respQues.Class || !equalASCIIName(reqQues.Name, respQues.Name) {
+ return false
+ }
+ return true
+}
+
+func dnsPacketRoundTrip(c Conn, id uint16, query dnsmessage.Question, b []byte) (dnsmessage.Parser, dnsmessage.Header, error) {
+ if _, err := c.Write(b); err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, err
+ }
+
+ b = make([]byte, maxDNSPacketSize)
+ for {
+ n, err := c.Read(b)
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, err
+ }
+ var p dnsmessage.Parser
+ // Ignore invalid responses as they may be malicious
+ // forgery attempts. Instead continue waiting until
+ // timeout. See golang.org/issue/13281.
+ h, err := p.Start(b[:n])
+ if err != nil {
+ continue
+ }
+ q, err := p.Question()
+ if err != nil || !checkResponse(id, query, h, q) {
+ continue
+ }
+ return p, h, nil
+ }
+}
+
+func dnsStreamRoundTrip(c Conn, id uint16, query dnsmessage.Question, b []byte) (dnsmessage.Parser, dnsmessage.Header, error) {
+ if _, err := c.Write(b); err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, err
+ }
+
+ b = make([]byte, 1280) // 1280 is a reasonable initial size for IP over Ethernet, see RFC 4035
+ if _, err := io.ReadFull(c, b[:2]); err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, err
+ }
+ l := int(b[0])<<8 | int(b[1])
+ if l > len(b) {
+ b = make([]byte, l)
+ }
+ n, err := io.ReadFull(c, b[:l])
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, err
+ }
+ var p dnsmessage.Parser
+ h, err := p.Start(b[:n])
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotUnmarshalDNSMessage
+ }
+ q, err := p.Question()
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotUnmarshalDNSMessage
+ }
+ if !checkResponse(id, query, h, q) {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse
+ }
+ return p, h, nil
+}
+
+// exchange sends a query on the connection and hopes for a response.
+func (r *Resolver) exchange(ctx context.Context, server string, q dnsmessage.Question, timeout time.Duration, useTCP bool) (dnsmessage.Parser, dnsmessage.Header, error) {
+ q.Class = dnsmessage.ClassINET
+ id, udpReq, tcpReq, err := newRequest(q)
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotMarshalDNSMessage
+ }
+ var networks []string
+ if useTCP {
+ networks = []string{"tcp"}
+ } else {
+ networks = []string{"udp", "tcp"}
+ }
+ for _, network := range networks {
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout))
+ defer cancel()
+
+ c, err := r.dial(ctx, network, server)
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, err
+ }
+ if d, ok := ctx.Deadline(); ok && !d.IsZero() {
+ c.SetDeadline(d)
+ }
+ var p dnsmessage.Parser
+ var h dnsmessage.Header
+ if _, ok := c.(PacketConn); ok {
+ p, h, err = dnsPacketRoundTrip(c, id, q, udpReq)
+ } else {
+ p, h, err = dnsStreamRoundTrip(c, id, q, tcpReq)
+ }
+ c.Close()
+ if err != nil {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, mapErr(err)
+ }
+ if err := p.SkipQuestion(); err != dnsmessage.ErrSectionDone {
+ return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse
+ }
+ if h.Truncated { // see RFC 5966
+ continue
+ }
+ return p, h, nil
+ }
+ return dnsmessage.Parser{}, dnsmessage.Header{}, errNoAnswerFromDNSServer
+}
+
+// checkHeader performs basic sanity checks on the header.
+func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header) error {
+ if h.RCode == dnsmessage.RCodeNameError {
+ return errNoSuchHost
+ }
+
+ _, err := p.AnswerHeader()
+ if err != nil && err != dnsmessage.ErrSectionDone {
+ return errCannotUnmarshalDNSMessage
+ }
+
+ // libresolv continues to the next server when it receives
+ // an invalid referral response. See golang.org/issue/15434.
+ if h.RCode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone {
+ return errLameReferral
+ }
+
+ if h.RCode != dnsmessage.RCodeSuccess && h.RCode != dnsmessage.RCodeNameError {
+ // None of the error codes make sense
+ // for the query we sent. If we didn't get
+ // a name error and we didn't get success,
+ // the server is behaving incorrectly or
+ // having temporary trouble.
+ if h.RCode == dnsmessage.RCodeServerFailure {
+ return errServerTemporarilyMisbehaving
+ }
+ return errServerMisbehaving
+ }
+
+ return nil
+}
+
+func skipToAnswer(p *dnsmessage.Parser, qtype dnsmessage.Type) error {
+ for {
+ h, err := p.AnswerHeader()
+ if err == dnsmessage.ErrSectionDone {
+ return errNoSuchHost
+ }
+ if err != nil {
+ return errCannotUnmarshalDNSMessage
+ }
+ if h.Type == qtype {
+ return nil
+ }
+ if err := p.SkipAnswer(); err != nil {
+ return errCannotUnmarshalDNSMessage
+ }
+ }
+}
+
+// Do a lookup for a single name, which must be rooted
+// (otherwise answer will not find the answers).
+func (r *Resolver) tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype dnsmessage.Type) (dnsmessage.Parser, string, error) {
+ var lastErr error
+ serverOffset := cfg.serverOffset()
+ sLen := uint32(len(cfg.servers))
+
+ n, err := dnsmessage.NewName(name)
+ if err != nil {
+ return dnsmessage.Parser{}, "", errCannotMarshalDNSMessage
+ }
+ q := dnsmessage.Question{
+ Name: n,
+ Type: qtype,
+ Class: dnsmessage.ClassINET,
+ }
+
+ for i := 0; i < cfg.attempts; i++ {
+ for j := uint32(0); j < sLen; j++ {
+ server := cfg.servers[(serverOffset+j)%sLen]
+
+ p, h, err := r.exchange(ctx, server, q, cfg.timeout, cfg.useTCP)
+ if err != nil {
+ dnsErr := &DNSError{
+ Err: err.Error(),
+ Name: name,
+ Server: server,
+ }
+ if nerr, ok := err.(Error); ok && nerr.Timeout() {
+ dnsErr.IsTimeout = true
+ }
+ // Set IsTemporary for socket-level errors. Note that this flag
+ // may also be used to indicate a SERVFAIL response.
+ if _, ok := err.(*OpError); ok {
+ dnsErr.IsTemporary = true
+ }
+ lastErr = dnsErr
+ continue
+ }
+
+ if err := checkHeader(&p, h); err != nil {
+ dnsErr := &DNSError{
+ Err: err.Error(),
+ Name: name,
+ Server: server,
+ }
+ if err == errServerTemporarilyMisbehaving {
+ dnsErr.IsTemporary = true
+ }
+ if err == errNoSuchHost {
+ // The name does not exist, so trying
+ // another server won't help.
+
+ dnsErr.IsNotFound = true
+ return p, server, dnsErr
+ }
+ lastErr = dnsErr
+ continue
+ }
+
+ err = skipToAnswer(&p, qtype)
+ if err == nil {
+ return p, server, nil
+ }
+ lastErr = &DNSError{
+ Err: err.Error(),
+ Name: name,
+ Server: server,
+ }
+ if err == errNoSuchHost {
+ // The name does not exist, so trying another
+ // server won't help.
+
+ lastErr.(*DNSError).IsNotFound = true
+ return p, server, lastErr
+ }
+ }
+ }
+ return dnsmessage.Parser{}, "", lastErr
+}
+
+// A resolverConfig represents a DNS stub resolver configuration.
+type resolverConfig struct {
+ initOnce sync.Once // guards init of resolverConfig
+
+ // ch is used as a semaphore that only allows one lookup at a
+ // time to recheck resolv.conf.
+ ch chan struct{} // guards lastChecked and modTime
+ lastChecked time.Time // last time resolv.conf was checked
+
+ mu sync.RWMutex // protects dnsConfig
+ dnsConfig *dnsConfig // parsed resolv.conf structure used in lookups
+}
+
+var resolvConf resolverConfig
+
+// init initializes conf and is only called via conf.initOnce.
+func (conf *resolverConfig) init() {
+ // Set dnsConfig and lastChecked so we don't parse
+ // resolv.conf twice the first time.
+ conf.dnsConfig = systemConf().resolv
+ if conf.dnsConfig == nil {
+ conf.dnsConfig = dnsReadConfig("/etc/resolv.conf")
+ }
+ conf.lastChecked = time.Now()
+
+ // Prepare ch so that only one update of resolverConfig may
+ // run at once.
+ conf.ch = make(chan struct{}, 1)
+}
+
+// tryUpdate tries to update conf with the named resolv.conf file.
+// The name variable only exists for testing. It is otherwise always
+// "/etc/resolv.conf".
+func (conf *resolverConfig) tryUpdate(name string) {
+ conf.initOnce.Do(conf.init)
+
+ // Ensure only one update at a time checks resolv.conf.
+ if !conf.tryAcquireSema() {
+ return
+ }
+ defer conf.releaseSema()
+
+ now := time.Now()
+ if conf.lastChecked.After(now.Add(-5 * time.Second)) {
+ return
+ }
+ conf.lastChecked = now
+
+ switch runtime.GOOS {
+ case "windows":
+ // There's no file on disk, so don't bother checking
+ // and failing.
+ //
+ // The Windows implementation of dnsReadConfig (called
+ // below) ignores the name.
+ default:
+ var mtime time.Time
+ if fi, err := os.Stat(name); err == nil {
+ mtime = fi.ModTime()
+ }
+ if mtime.Equal(conf.dnsConfig.mtime) {
+ return
+ }
+ }
+
+ dnsConf := dnsReadConfig(name)
+ conf.mu.Lock()
+ conf.dnsConfig = dnsConf
+ conf.mu.Unlock()
+}
+
+func (conf *resolverConfig) tryAcquireSema() bool {
+ select {
+ case conf.ch <- struct{}{}:
+ return true
+ default:
+ return false
+ }
+}
+
+func (conf *resolverConfig) releaseSema() {
+ <-conf.ch
+}
+
+func (r *Resolver) lookup(ctx context.Context, name string, qtype dnsmessage.Type) (dnsmessage.Parser, string, error) {
+ if !isDomainName(name) {
+ // We used to use "invalid domain name" as the error,
+ // but that is a detail of the specific lookup mechanism.
+ // Other lookups might allow broader name syntax
+ // (for example Multicast DNS allows UTF-8; see RFC 6762).
+ // For consistency with libc resolvers, report no such host.
+ return dnsmessage.Parser{}, "", &DNSError{Err: errNoSuchHost.Error(), Name: name, IsNotFound: true}
+ }
+ resolvConf.tryUpdate("/etc/resolv.conf")
+ resolvConf.mu.RLock()
+ conf := resolvConf.dnsConfig
+ resolvConf.mu.RUnlock()
+ var (
+ p dnsmessage.Parser
+ server string
+ err error
+ )
+ for _, fqdn := range conf.nameList(name) {
+ p, server, err = r.tryOneName(ctx, conf, fqdn, qtype)
+ if err == nil {
+ break
+ }
+ if nerr, ok := err.(Error); ok && nerr.Temporary() && r.strictErrors() {
+ // If we hit a temporary error with StrictErrors enabled,
+ // stop immediately instead of trying more names.
+ break
+ }
+ }
+ if err == nil {
+ return p, server, nil
+ }
+ if err, ok := err.(*DNSError); ok {
+ // Show original name passed to lookup, not suffixed one.
+ // In general we might have tried many suffixes; showing
+ // just one is misleading. See also golang.org/issue/6324.
+ err.Name = name
+ }
+ return dnsmessage.Parser{}, "", err
+}
+
+// avoidDNS reports whether this is a hostname for which we should not
+// use DNS. Currently this includes only .onion, per RFC 7686. See
+// golang.org/issue/13705. Does not cover .local names (RFC 6762),
+// see golang.org/issue/16739.
+func avoidDNS(name string) bool {
+ if name == "" {
+ return true
+ }
+ if name[len(name)-1] == '.' {
+ name = name[:len(name)-1]
+ }
+ return stringsHasSuffixFold(name, ".onion")
+}
+
+// nameList returns a list of names for sequential DNS queries.
+func (conf *dnsConfig) nameList(name string) []string {
+ if avoidDNS(name) {
+ return nil
+ }
+
+ // Check name length (see isDomainName).
+ l := len(name)
+ rooted := l > 0 && name[l-1] == '.'
+ if l > 254 || l == 254 && rooted {
+ return nil
+ }
+
+ // If name is rooted (trailing dot), try only that name.
+ if rooted {
+ return []string{name}
+ }
+
+ hasNdots := count(name, '.') >= conf.ndots
+ name += "."
+ l++
+
+ // Build list of search choices.
+ names := make([]string, 0, 1+len(conf.search))
+ // If name has enough dots, try unsuffixed first.
+ if hasNdots {
+ names = append(names, name)
+ }
+ // Try suffixes that are not too long (see isDomainName).
+ for _, suffix := range conf.search {
+ if l+len(suffix) <= 254 {
+ names = append(names, name+suffix)
+ }
+ }
+ // Try unsuffixed, if not tried first above.
+ if !hasNdots {
+ names = append(names, name)
+ }
+ return names
+}
+
+// hostLookupOrder specifies the order of LookupHost lookup strategies.
+// It is basically a simplified representation of nsswitch.conf.
+// "files" means /etc/hosts.
+type hostLookupOrder int
+
+const (
+ // hostLookupCgo means defer to cgo.
+ hostLookupCgo hostLookupOrder = iota
+ hostLookupFilesDNS // files first
+ hostLookupDNSFiles // dns first
+ hostLookupFiles // only files
+ hostLookupDNS // only DNS
+)
+
+var lookupOrderName = map[hostLookupOrder]string{
+ hostLookupCgo: "cgo",
+ hostLookupFilesDNS: "files,dns",
+ hostLookupDNSFiles: "dns,files",
+ hostLookupFiles: "files",
+ hostLookupDNS: "dns",
+}
+
+func (o hostLookupOrder) String() string {
+ if s, ok := lookupOrderName[o]; ok {
+ return s
+ }
+ return "hostLookupOrder=" + itoa.Itoa(int(o)) + "??"
+}
+
+// goLookupHost is the native Go implementation of LookupHost.
+// Used only if cgoLookupHost refuses to handle the request
+// (that is, only if cgoLookupHost is the stub in cgo_stub.go).
+// Normally we let cgo use the C library resolver instead of
+// depending on our lookup code, so that Go and C get the same
+// answers.
+func (r *Resolver) goLookupHost(ctx context.Context, name string) (addrs []string, err error) {
+ return r.goLookupHostOrder(ctx, name, hostLookupFilesDNS)
+}
+
+func (r *Resolver) goLookupHostOrder(ctx context.Context, name string, order hostLookupOrder) (addrs []string, err error) {
+ if order == hostLookupFilesDNS || order == hostLookupFiles {
+ // Use entries from /etc/hosts if they match.
+ addrs = lookupStaticHost(name)
+ if len(addrs) > 0 || order == hostLookupFiles {
+ return
+ }
+ }
+ ips, _, err := r.goLookupIPCNAMEOrder(ctx, "ip", name, order)
+ if err != nil {
+ return
+ }
+ addrs = make([]string, 0, len(ips))
+ for _, ip := range ips {
+ addrs = append(addrs, ip.String())
+ }
+ return
+}
+
+// lookup entries from /etc/hosts
+func goLookupIPFiles(name string) (addrs []IPAddr) {
+ for _, haddr := range lookupStaticHost(name) {
+ haddr, zone := splitHostZone(haddr)
+ if ip := ParseIP(haddr); ip != nil {
+ addr := IPAddr{IP: ip, Zone: zone}
+ addrs = append(addrs, addr)
+ }
+ }
+ sortByRFC6724(addrs)
+ return
+}
+
+// goLookupIP is the native Go implementation of LookupIP.
+// The libc versions are in cgo_*.go.
+func (r *Resolver) goLookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
+ order := systemConf().hostLookupOrder(r, host)
+ addrs, _, err = r.goLookupIPCNAMEOrder(ctx, network, host, order)
+ return
+}
+
+func (r *Resolver) goLookupIPCNAMEOrder(ctx context.Context, network, name string, order hostLookupOrder) (addrs []IPAddr, cname dnsmessage.Name, err error) {
+ if order == hostLookupFilesDNS || order == hostLookupFiles {
+ addrs = goLookupIPFiles(name)
+ if len(addrs) > 0 || order == hostLookupFiles {
+ return addrs, dnsmessage.Name{}, nil
+ }
+ }
+ if !isDomainName(name) {
+ // See comment in func lookup above about use of errNoSuchHost.
+ return nil, dnsmessage.Name{}, &DNSError{Err: errNoSuchHost.Error(), Name: name, IsNotFound: true}
+ }
+ resolvConf.tryUpdate("/etc/resolv.conf")
+ resolvConf.mu.RLock()
+ conf := resolvConf.dnsConfig
+ resolvConf.mu.RUnlock()
+ type result struct {
+ p dnsmessage.Parser
+ server string
+ error
+ }
+ lane := make(chan result, 1)
+ qtypes := []dnsmessage.Type{dnsmessage.TypeA, dnsmessage.TypeAAAA}
+ switch ipVersion(network) {
+ case '4':
+ qtypes = []dnsmessage.Type{dnsmessage.TypeA}
+ case '6':
+ qtypes = []dnsmessage.Type{dnsmessage.TypeAAAA}
+ }
+ var queryFn func(fqdn string, qtype dnsmessage.Type)
+ var responseFn func(fqdn string, qtype dnsmessage.Type) result
+ if conf.singleRequest {
+ queryFn = func(fqdn string, qtype dnsmessage.Type) {}
+ responseFn = func(fqdn string, qtype dnsmessage.Type) result {
+ dnsWaitGroup.Add(1)
+ defer dnsWaitGroup.Done()
+ p, server, err := r.tryOneName(ctx, conf, fqdn, qtype)
+ return result{p, server, err}
+ }
+ } else {
+ queryFn = func(fqdn string, qtype dnsmessage.Type) {
+ dnsWaitGroup.Add(1)
+ go func(qtype dnsmessage.Type) {
+ p, server, err := r.tryOneName(ctx, conf, fqdn, qtype)
+ lane <- result{p, server, err}
+ dnsWaitGroup.Done()
+ }(qtype)
+ }
+ responseFn = func(fqdn string, qtype dnsmessage.Type) result {
+ return <-lane
+ }
+ }
+ var lastErr error
+ for _, fqdn := range conf.nameList(name) {
+ for _, qtype := range qtypes {
+ queryFn(fqdn, qtype)
+ }
+ hitStrictError := false
+ for _, qtype := range qtypes {
+ result := responseFn(fqdn, qtype)
+ if result.error != nil {
+ if nerr, ok := result.error.(Error); ok && nerr.Temporary() && r.strictErrors() {
+ // This error will abort the nameList loop.
+ hitStrictError = true
+ lastErr = result.error
+ } else if lastErr == nil || fqdn == name+"." {
+ // Prefer error for original name.
+ lastErr = result.error
+ }
+ continue
+ }
+
+ // Presotto says it's okay to assume that servers listed in
+ // /etc/resolv.conf are recursive resolvers.
+ //
+ // We asked for recursion, so it should have included all the
+ // answers we need in this one packet.
+ //
+ // Further, RFC 1035 section 4.3.1 says that "the recursive
+ // response to a query will be... The answer to the query,
+ // possibly preface by one or more CNAME RRs that specify
+ // aliases encountered on the way to an answer."
+ //
+ // Therefore, we should be able to assume that we can ignore
+ // CNAMEs and that the A and AAAA records we requested are
+ // for the canonical name.
+
+ loop:
+ for {
+ h, err := result.p.AnswerHeader()
+ if err != nil && err != dnsmessage.ErrSectionDone {
+ lastErr = &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: name,
+ Server: result.server,
+ }
+ }
+ if err != nil {
+ break
+ }
+ switch h.Type {
+ case dnsmessage.TypeA:
+ a, err := result.p.AResource()
+ if err != nil {
+ lastErr = &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: name,
+ Server: result.server,
+ }
+ break loop
+ }
+ addrs = append(addrs, IPAddr{IP: IP(a.A[:])})
+
+ case dnsmessage.TypeAAAA:
+ aaaa, err := result.p.AAAAResource()
+ if err != nil {
+ lastErr = &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: name,
+ Server: result.server,
+ }
+ break loop
+ }
+ addrs = append(addrs, IPAddr{IP: IP(aaaa.AAAA[:])})
+
+ default:
+ if err := result.p.SkipAnswer(); err != nil {
+ lastErr = &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: name,
+ Server: result.server,
+ }
+ break loop
+ }
+ continue
+ }
+ if cname.Length == 0 && h.Name.Length != 0 {
+ cname = h.Name
+ }
+ }
+ }
+ if hitStrictError {
+ // If either family hit an error with StrictErrors enabled,
+ // discard all addresses. This ensures that network flakiness
+ // cannot turn a dualstack hostname IPv4/IPv6-only.
+ addrs = nil
+ break
+ }
+ if len(addrs) > 0 {
+ break
+ }
+ }
+ if lastErr, ok := lastErr.(*DNSError); ok {
+ // Show original name passed to lookup, not suffixed one.
+ // In general we might have tried many suffixes; showing
+ // just one is misleading. See also golang.org/issue/6324.
+ lastErr.Name = name
+ }
+ sortByRFC6724(addrs)
+ if len(addrs) == 0 {
+ if order == hostLookupDNSFiles {
+ addrs = goLookupIPFiles(name)
+ }
+ if len(addrs) == 0 && lastErr != nil {
+ return nil, dnsmessage.Name{}, lastErr
+ }
+ }
+ return addrs, cname, nil
+}
+
+// goLookupCNAME is the native Go (non-cgo) implementation of LookupCNAME.
+func (r *Resolver) goLookupCNAME(ctx context.Context, host string) (string, error) {
+ order := systemConf().hostLookupOrder(r, host)
+ _, cname, err := r.goLookupIPCNAMEOrder(ctx, "ip", host, order)
+ return cname.String(), err
+}
+
+// goLookupPTR is the native Go implementation of LookupAddr.
+// Used only if cgoLookupPTR refuses to handle the request (that is,
+// only if cgoLookupPTR is the stub in cgo_stub.go).
+// Normally we let cgo use the C library resolver instead of depending
+// on our lookup code, so that Go and C get the same answers.
+func (r *Resolver) goLookupPTR(ctx context.Context, addr string) ([]string, error) {
+ names := lookupStaticAddr(addr)
+ if len(names) > 0 {
+ return names, nil
+ }
+ arpa, err := reverseaddr(addr)
+ if err != nil {
+ return nil, err
+ }
+ p, server, err := r.lookup(ctx, arpa, dnsmessage.TypePTR)
+ if err != nil {
+ return nil, err
+ }
+ var ptrs []string
+ for {
+ h, err := p.AnswerHeader()
+ if err == dnsmessage.ErrSectionDone {
+ break
+ }
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: addr,
+ Server: server,
+ }
+ }
+ if h.Type != dnsmessage.TypePTR {
+ err := p.SkipAnswer()
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: addr,
+ Server: server,
+ }
+ }
+ continue
+ }
+ ptr, err := p.PTRResource()
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot marshal DNS message",
+ Name: addr,
+ Server: server,
+ }
+ }
+ ptrs = append(ptrs, ptr.PTR.String())
+
+ }
+ return ptrs, nil
+}
diff --git a/contrib/go/_std_1.19/src/net/dnsconfig.go b/contrib/go/_std_1.19/src/net/dnsconfig.go
new file mode 100644
index 0000000000..091b548301
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/dnsconfig.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "os"
+ "sync/atomic"
+ "time"
+)
+
+var (
+ defaultNS = []string{"127.0.0.1:53", "[::1]:53"}
+ getHostname = os.Hostname // variable for testing
+)
+
+type dnsConfig struct {
+ servers []string // server addresses (in host:port form) to use
+ search []string // rooted suffixes to append to local name
+ ndots int // number of dots in name to trigger absolute lookup
+ timeout time.Duration // wait before giving up on a query, including retries
+ attempts int // lost packets before giving up on server
+ rotate bool // round robin among servers
+ unknownOpt bool // anything unknown was encountered
+ lookup []string // OpenBSD top-level database "lookup" order
+ err error // any error that occurs during open of resolv.conf
+ mtime time.Time // time of resolv.conf modification
+ soffset uint32 // used by serverOffset
+ singleRequest bool // use sequential A and AAAA queries instead of parallel queries
+ useTCP bool // force usage of TCP for DNS resolutions
+}
+
+// serverOffset returns an offset that can be used to determine
+// indices of servers in c.servers when making queries.
+// When the rotate option is enabled, this offset increases.
+// Otherwise it is always 0.
+func (c *dnsConfig) serverOffset() uint32 {
+ if c.rotate {
+ return atomic.AddUint32(&c.soffset, 1) - 1 // return 0 to start
+ }
+ return 0
+}
diff --git a/contrib/go/_std_1.19/src/net/dnsconfig_unix.go b/contrib/go/_std_1.19/src/net/dnsconfig_unix.go
new file mode 100644
index 0000000000..94cd09ec71
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/dnsconfig_unix.go
@@ -0,0 +1,157 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js && !windows
+
+// Read system DNS config from /etc/resolv.conf
+
+package net
+
+import (
+ "internal/bytealg"
+ "time"
+)
+
+// See resolv.conf(5) on a Linux machine.
+func dnsReadConfig(filename string) *dnsConfig {
+ conf := &dnsConfig{
+ ndots: 1,
+ timeout: 5 * time.Second,
+ attempts: 2,
+ }
+ file, err := open(filename)
+ if err != nil {
+ conf.servers = defaultNS
+ conf.search = dnsDefaultSearch()
+ conf.err = err
+ return conf
+ }
+ defer file.close()
+ if fi, err := file.file.Stat(); err == nil {
+ conf.mtime = fi.ModTime()
+ } else {
+ conf.servers = defaultNS
+ conf.search = dnsDefaultSearch()
+ conf.err = err
+ return conf
+ }
+ for line, ok := file.readLine(); ok; line, ok = file.readLine() {
+ if len(line) > 0 && (line[0] == ';' || line[0] == '#') {
+ // comment.
+ continue
+ }
+ f := getFields(line)
+ if len(f) < 1 {
+ continue
+ }
+ switch f[0] {
+ case "nameserver": // add one name server
+ if len(f) > 1 && len(conf.servers) < 3 { // small, but the standard limit
+ // One more check: make sure server name is
+ // just an IP address. Otherwise we need DNS
+ // to look it up.
+ if parseIPv4(f[1]) != nil {
+ conf.servers = append(conf.servers, JoinHostPort(f[1], "53"))
+ } else if ip, _ := parseIPv6Zone(f[1]); ip != nil {
+ conf.servers = append(conf.servers, JoinHostPort(f[1], "53"))
+ }
+ }
+
+ case "domain": // set search path to just this domain
+ if len(f) > 1 {
+ conf.search = []string{ensureRooted(f[1])}
+ }
+
+ case "search": // set search path to given servers
+ conf.search = make([]string, len(f)-1)
+ for i := 0; i < len(conf.search); i++ {
+ conf.search[i] = ensureRooted(f[i+1])
+ }
+
+ case "options": // magic options
+ for _, s := range f[1:] {
+ switch {
+ case hasPrefix(s, "ndots:"):
+ n, _, _ := dtoi(s[6:])
+ if n < 0 {
+ n = 0
+ } else if n > 15 {
+ n = 15
+ }
+ conf.ndots = n
+ case hasPrefix(s, "timeout:"):
+ n, _, _ := dtoi(s[8:])
+ if n < 1 {
+ n = 1
+ }
+ conf.timeout = time.Duration(n) * time.Second
+ case hasPrefix(s, "attempts:"):
+ n, _, _ := dtoi(s[9:])
+ if n < 1 {
+ n = 1
+ }
+ conf.attempts = n
+ case s == "rotate":
+ conf.rotate = true
+ case s == "single-request" || s == "single-request-reopen":
+ // Linux option:
+ // http://man7.org/linux/man-pages/man5/resolv.conf.5.html
+ // "By default, glibc performs IPv4 and IPv6 lookups in parallel [...]
+ // This option disables the behavior and makes glibc
+ // perform the IPv6 and IPv4 requests sequentially."
+ conf.singleRequest = true
+ case s == "use-vc" || s == "usevc" || s == "tcp":
+ // Linux (use-vc), FreeBSD (usevc) and OpenBSD (tcp) option:
+ // http://man7.org/linux/man-pages/man5/resolv.conf.5.html
+ // "Sets RES_USEVC in _res.options.
+ // This option forces the use of TCP for DNS resolutions."
+ // https://www.freebsd.org/cgi/man.cgi?query=resolv.conf&sektion=5&manpath=freebsd-release-ports
+ // https://man.openbsd.org/resolv.conf.5
+ conf.useTCP = true
+ default:
+ conf.unknownOpt = true
+ }
+ }
+
+ case "lookup":
+ // OpenBSD option:
+ // https://www.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man5/resolv.conf.5
+ // "the legal space-separated values are: bind, file, yp"
+ conf.lookup = f[1:]
+
+ default:
+ conf.unknownOpt = true
+ }
+ }
+ if len(conf.servers) == 0 {
+ conf.servers = defaultNS
+ }
+ if len(conf.search) == 0 {
+ conf.search = dnsDefaultSearch()
+ }
+ return conf
+}
+
+func dnsDefaultSearch() []string {
+ hn, err := getHostname()
+ if err != nil {
+ // best effort
+ return nil
+ }
+ if i := bytealg.IndexByteString(hn, '.'); i >= 0 && i < len(hn)-1 {
+ return []string{ensureRooted(hn[i+1:])}
+ }
+ return nil
+}
+
+func hasPrefix(s, prefix string) bool {
+ return len(s) >= len(prefix) && s[:len(prefix)] == prefix
+}
+
+func ensureRooted(s string) string {
+ if len(s) > 0 && s[len(s)-1] == '.' {
+ return s
+ }
+ return s + "."
+}
diff --git a/contrib/go/_std_1.19/src/net/error_posix.go b/contrib/go/_std_1.19/src/net/error_posix.go
new file mode 100644
index 0000000000..8fc7d0bb73
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/error_posix.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "os"
+ "syscall"
+)
+
+// wrapSyscallError takes an error and a syscall name. If the error is
+// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name.
+func wrapSyscallError(name string, err error) error {
+ if _, ok := err.(syscall.Errno); ok {
+ err = os.NewSyscallError(name, err)
+ }
+ return err
+}
diff --git a/contrib/go/_std_1.19/src/net/error_unix.go b/contrib/go/_std_1.19/src/net/error_unix.go
new file mode 100644
index 0000000000..1f9b6eb78c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/error_unix.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || js
+
+package net
+
+import "syscall"
+
+func isConnError(err error) bool {
+ if se, ok := err.(syscall.Errno); ok {
+ return se == syscall.ECONNRESET || se == syscall.ECONNABORTED
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.19/src/net/fd_posix.go b/contrib/go/_std_1.19/src/net/fd_posix.go
new file mode 100644
index 0000000000..ffb9bcf8b9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/fd_posix.go
@@ -0,0 +1,147 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package net
+
+import (
+ "internal/poll"
+ "runtime"
+ "syscall"
+ "time"
+)
+
+// Network file descriptor.
+type netFD struct {
+ pfd poll.FD
+
+ // immutable until Close
+ family int
+ sotype int
+ isConnected bool // handshake completed or use of association with peer
+ net string
+ laddr Addr
+ raddr Addr
+}
+
+func (fd *netFD) setAddr(laddr, raddr Addr) {
+ fd.laddr = laddr
+ fd.raddr = raddr
+ runtime.SetFinalizer(fd, (*netFD).Close)
+}
+
+func (fd *netFD) Close() error {
+ runtime.SetFinalizer(fd, nil)
+ return fd.pfd.Close()
+}
+
+func (fd *netFD) shutdown(how int) error {
+ err := fd.pfd.Shutdown(how)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("shutdown", err)
+}
+
+func (fd *netFD) closeRead() error {
+ return fd.shutdown(syscall.SHUT_RD)
+}
+
+func (fd *netFD) closeWrite() error {
+ return fd.shutdown(syscall.SHUT_WR)
+}
+
+func (fd *netFD) Read(p []byte) (n int, err error) {
+ n, err = fd.pfd.Read(p)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError(readSyscallName, err)
+}
+
+func (fd *netFD) readFrom(p []byte) (n int, sa syscall.Sockaddr, err error) {
+ n, sa, err = fd.pfd.ReadFrom(p)
+ runtime.KeepAlive(fd)
+ return n, sa, wrapSyscallError(readFromSyscallName, err)
+}
+func (fd *netFD) readFromInet4(p []byte, from *syscall.SockaddrInet4) (n int, err error) {
+ n, err = fd.pfd.ReadFromInet4(p, from)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError(readFromSyscallName, err)
+}
+
+func (fd *netFD) readFromInet6(p []byte, from *syscall.SockaddrInet6) (n int, err error) {
+ n, err = fd.pfd.ReadFromInet6(p, from)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError(readFromSyscallName, err)
+}
+
+func (fd *netFD) readMsg(p []byte, oob []byte, flags int) (n, oobn, retflags int, sa syscall.Sockaddr, err error) {
+ n, oobn, retflags, sa, err = fd.pfd.ReadMsg(p, oob, flags)
+ runtime.KeepAlive(fd)
+ return n, oobn, retflags, sa, wrapSyscallError(readMsgSyscallName, err)
+}
+
+func (fd *netFD) readMsgInet4(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet4) (n, oobn, retflags int, err error) {
+ n, oobn, retflags, err = fd.pfd.ReadMsgInet4(p, oob, flags, sa)
+ runtime.KeepAlive(fd)
+ return n, oobn, retflags, wrapSyscallError(readMsgSyscallName, err)
+}
+
+func (fd *netFD) readMsgInet6(p []byte, oob []byte, flags int, sa *syscall.SockaddrInet6) (n, oobn, retflags int, err error) {
+ n, oobn, retflags, err = fd.pfd.ReadMsgInet6(p, oob, flags, sa)
+ runtime.KeepAlive(fd)
+ return n, oobn, retflags, wrapSyscallError(readMsgSyscallName, err)
+}
+
+func (fd *netFD) Write(p []byte) (nn int, err error) {
+ nn, err = fd.pfd.Write(p)
+ runtime.KeepAlive(fd)
+ return nn, wrapSyscallError(writeSyscallName, err)
+}
+
+func (fd *netFD) writeTo(p []byte, sa syscall.Sockaddr) (n int, err error) {
+ n, err = fd.pfd.WriteTo(p, sa)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError(writeToSyscallName, err)
+}
+
+func (fd *netFD) writeToInet4(p []byte, sa *syscall.SockaddrInet4) (n int, err error) {
+ n, err = fd.pfd.WriteToInet4(p, sa)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError(writeToSyscallName, err)
+}
+
+func (fd *netFD) writeToInet6(p []byte, sa *syscall.SockaddrInet6) (n int, err error) {
+ n, err = fd.pfd.WriteToInet6(p, sa)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError(writeToSyscallName, err)
+}
+
+func (fd *netFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
+ n, oobn, err = fd.pfd.WriteMsg(p, oob, sa)
+ runtime.KeepAlive(fd)
+ return n, oobn, wrapSyscallError(writeMsgSyscallName, err)
+}
+
+func (fd *netFD) writeMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (n int, oobn int, err error) {
+ n, oobn, err = fd.pfd.WriteMsgInet4(p, oob, sa)
+ runtime.KeepAlive(fd)
+ return n, oobn, wrapSyscallError(writeMsgSyscallName, err)
+}
+
+func (fd *netFD) writeMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (n int, oobn int, err error) {
+ n, oobn, err = fd.pfd.WriteMsgInet6(p, oob, sa)
+ runtime.KeepAlive(fd)
+ return n, oobn, wrapSyscallError(writeMsgSyscallName, err)
+}
+
+func (fd *netFD) SetDeadline(t time.Time) error {
+ return fd.pfd.SetDeadline(t)
+}
+
+func (fd *netFD) SetReadDeadline(t time.Time) error {
+ return fd.pfd.SetReadDeadline(t)
+}
+
+func (fd *netFD) SetWriteDeadline(t time.Time) error {
+ return fd.pfd.SetWriteDeadline(t)
+}
diff --git a/contrib/go/_std_1.19/src/net/fd_unix.go b/contrib/go/_std_1.19/src/net/fd_unix.go
new file mode 100644
index 0000000000..a400c6075e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/fd_unix.go
@@ -0,0 +1,203 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package net
+
+import (
+ "context"
+ "internal/poll"
+ "os"
+ "runtime"
+ "syscall"
+)
+
+const (
+ readSyscallName = "read"
+ readFromSyscallName = "recvfrom"
+ readMsgSyscallName = "recvmsg"
+ writeSyscallName = "write"
+ writeToSyscallName = "sendto"
+ writeMsgSyscallName = "sendmsg"
+)
+
+func newFD(sysfd, family, sotype int, net string) (*netFD, error) {
+ ret := &netFD{
+ pfd: poll.FD{
+ Sysfd: sysfd,
+ IsStream: sotype == syscall.SOCK_STREAM,
+ ZeroReadIsEOF: sotype != syscall.SOCK_DGRAM && sotype != syscall.SOCK_RAW,
+ },
+ family: family,
+ sotype: sotype,
+ net: net,
+ }
+ return ret, nil
+}
+
+func (fd *netFD) init() error {
+ return fd.pfd.Init(fd.net, true)
+}
+
+func (fd *netFD) name() string {
+ var ls, rs string
+ if fd.laddr != nil {
+ ls = fd.laddr.String()
+ }
+ if fd.raddr != nil {
+ rs = fd.raddr.String()
+ }
+ return fd.net + ":" + ls + "->" + rs
+}
+
+func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (rsa syscall.Sockaddr, ret error) {
+ // Do not need to call fd.writeLock here,
+ // because fd is not yet accessible to user,
+ // so no concurrent operations are possible.
+ switch err := connectFunc(fd.pfd.Sysfd, ra); err {
+ case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR:
+ case nil, syscall.EISCONN:
+ select {
+ case <-ctx.Done():
+ return nil, mapErr(ctx.Err())
+ default:
+ }
+ if err := fd.pfd.Init(fd.net, true); err != nil {
+ return nil, err
+ }
+ runtime.KeepAlive(fd)
+ return nil, nil
+ case syscall.EINVAL:
+ // On Solaris and illumos we can see EINVAL if the socket has
+ // already been accepted and closed by the server. Treat this
+ // as a successful connection--writes to the socket will see
+ // EOF. For details and a test case in C see
+ // https://golang.org/issue/6828.
+ if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" {
+ return nil, nil
+ }
+ fallthrough
+ default:
+ return nil, os.NewSyscallError("connect", err)
+ }
+ if err := fd.pfd.Init(fd.net, true); err != nil {
+ return nil, err
+ }
+ if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
+ fd.pfd.SetWriteDeadline(deadline)
+ defer fd.pfd.SetWriteDeadline(noDeadline)
+ }
+
+ // Start the "interrupter" goroutine, if this context might be canceled.
+ //
+ // The interrupter goroutine waits for the context to be done and
+ // interrupts the dial (by altering the fd's write deadline, which
+ // wakes up waitWrite).
+ ctxDone := ctx.Done()
+ if ctxDone != nil {
+ // Wait for the interrupter goroutine to exit before returning
+ // from connect.
+ done := make(chan struct{})
+ interruptRes := make(chan error)
+ defer func() {
+ close(done)
+ if ctxErr := <-interruptRes; ctxErr != nil && ret == nil {
+ // The interrupter goroutine called SetWriteDeadline,
+ // but the connect code below had returned from
+ // waitWrite already and did a successful connect (ret
+ // == nil). Because we've now poisoned the connection
+ // by making it unwritable, don't return a successful
+ // dial. This was issue 16523.
+ ret = mapErr(ctxErr)
+ fd.Close() // prevent a leak
+ }
+ }()
+ go func() {
+ select {
+ case <-ctxDone:
+ // Force the runtime's poller to immediately give up
+ // waiting for writability, unblocking waitWrite
+ // below.
+ fd.pfd.SetWriteDeadline(aLongTimeAgo)
+ testHookCanceledDial()
+ interruptRes <- ctx.Err()
+ case <-done:
+ interruptRes <- nil
+ }
+ }()
+ }
+
+ for {
+ // Performing multiple connect system calls on a
+ // non-blocking socket under Unix variants does not
+ // necessarily result in earlier errors being
+ // returned. Instead, once runtime-integrated network
+ // poller tells us that the socket is ready, get the
+ // SO_ERROR socket option to see if the connection
+ // succeeded or failed. See issue 7474 for further
+ // details.
+ if err := fd.pfd.WaitWrite(); err != nil {
+ select {
+ case <-ctxDone:
+ return nil, mapErr(ctx.Err())
+ default:
+ }
+ return nil, err
+ }
+ nerr, err := getsockoptIntFunc(fd.pfd.Sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
+ if err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ switch err := syscall.Errno(nerr); err {
+ case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR:
+ case syscall.EISCONN:
+ return nil, nil
+ case syscall.Errno(0):
+ // The runtime poller can wake us up spuriously;
+ // see issues 14548 and 19289. Check that we are
+ // really connected; if not, wait again.
+ if rsa, err := syscall.Getpeername(fd.pfd.Sysfd); err == nil {
+ return rsa, nil
+ }
+ default:
+ return nil, os.NewSyscallError("connect", err)
+ }
+ runtime.KeepAlive(fd)
+ }
+}
+
+func (fd *netFD) accept() (netfd *netFD, err error) {
+ d, rsa, errcall, err := fd.pfd.Accept()
+ if err != nil {
+ if errcall != "" {
+ err = wrapSyscallError(errcall, err)
+ }
+ return nil, err
+ }
+
+ if netfd, err = newFD(d, fd.family, fd.sotype, fd.net); err != nil {
+ poll.CloseFunc(d)
+ return nil, err
+ }
+ if err = netfd.init(); err != nil {
+ netfd.Close()
+ return nil, err
+ }
+ lsa, _ := syscall.Getsockname(netfd.pfd.Sysfd)
+ netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa))
+ return netfd, nil
+}
+
+func (fd *netFD) dup() (f *os.File, err error) {
+ ns, call, err := fd.pfd.Dup()
+ if err != nil {
+ if call != "" {
+ err = os.NewSyscallError(call, err)
+ }
+ return nil, err
+ }
+
+ return os.NewFile(uintptr(ns), fd.name()), nil
+}
diff --git a/contrib/go/_std_1.18/src/net/file.go b/contrib/go/_std_1.19/src/net/file.go
index c13332c188..c13332c188 100644
--- a/contrib/go/_std_1.18/src/net/file.go
+++ b/contrib/go/_std_1.19/src/net/file.go
diff --git a/contrib/go/_std_1.19/src/net/file_unix.go b/contrib/go/_std_1.19/src/net/file_unix.go
new file mode 100644
index 0000000000..0df67db501
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/file_unix.go
@@ -0,0 +1,119 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package net
+
+import (
+ "internal/poll"
+ "os"
+ "syscall"
+)
+
+func dupSocket(f *os.File) (int, error) {
+ s, call, err := poll.DupCloseOnExec(int(f.Fd()))
+ if err != nil {
+ if call != "" {
+ err = os.NewSyscallError(call, err)
+ }
+ return -1, err
+ }
+ if err := syscall.SetNonblock(s, true); err != nil {
+ poll.CloseFunc(s)
+ return -1, os.NewSyscallError("setnonblock", err)
+ }
+ return s, nil
+}
+
+func newFileFD(f *os.File) (*netFD, error) {
+ s, err := dupSocket(f)
+ if err != nil {
+ return nil, err
+ }
+ family := syscall.AF_UNSPEC
+ sotype, err := syscall.GetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_TYPE)
+ if err != nil {
+ poll.CloseFunc(s)
+ return nil, os.NewSyscallError("getsockopt", err)
+ }
+ lsa, _ := syscall.Getsockname(s)
+ rsa, _ := syscall.Getpeername(s)
+ switch lsa.(type) {
+ case *syscall.SockaddrInet4:
+ family = syscall.AF_INET
+ case *syscall.SockaddrInet6:
+ family = syscall.AF_INET6
+ case *syscall.SockaddrUnix:
+ family = syscall.AF_UNIX
+ default:
+ poll.CloseFunc(s)
+ return nil, syscall.EPROTONOSUPPORT
+ }
+ fd, err := newFD(s, family, sotype, "")
+ if err != nil {
+ poll.CloseFunc(s)
+ return nil, err
+ }
+ laddr := fd.addrFunc()(lsa)
+ raddr := fd.addrFunc()(rsa)
+ fd.net = laddr.Network()
+ if err := fd.init(); err != nil {
+ fd.Close()
+ return nil, err
+ }
+ fd.setAddr(laddr, raddr)
+ return fd, nil
+}
+
+func fileConn(f *os.File) (Conn, error) {
+ fd, err := newFileFD(f)
+ if err != nil {
+ return nil, err
+ }
+ switch fd.laddr.(type) {
+ case *TCPAddr:
+ return newTCPConn(fd), nil
+ case *UDPAddr:
+ return newUDPConn(fd), nil
+ case *IPAddr:
+ return newIPConn(fd), nil
+ case *UnixAddr:
+ return newUnixConn(fd), nil
+ }
+ fd.Close()
+ return nil, syscall.EINVAL
+}
+
+func fileListener(f *os.File) (Listener, error) {
+ fd, err := newFileFD(f)
+ if err != nil {
+ return nil, err
+ }
+ switch laddr := fd.laddr.(type) {
+ case *TCPAddr:
+ return &TCPListener{fd: fd}, nil
+ case *UnixAddr:
+ return &UnixListener{fd: fd, path: laddr.Name, unlink: false}, nil
+ }
+ fd.Close()
+ return nil, syscall.EINVAL
+}
+
+func filePacketConn(f *os.File) (PacketConn, error) {
+ fd, err := newFileFD(f)
+ if err != nil {
+ return nil, err
+ }
+ switch fd.laddr.(type) {
+ case *UDPAddr:
+ return newUDPConn(fd), nil
+ case *IPAddr:
+ return newIPConn(fd), nil
+ case *UnixAddr:
+ return newUnixConn(fd), nil
+ }
+ fd.Close()
+ return nil, syscall.EINVAL
+}
diff --git a/contrib/go/_std_1.18/src/net/hook.go b/contrib/go/_std_1.19/src/net/hook.go
index ea71803e22..ea71803e22 100644
--- a/contrib/go/_std_1.18/src/net/hook.go
+++ b/contrib/go/_std_1.19/src/net/hook.go
diff --git a/contrib/go/_std_1.19/src/net/hook_unix.go b/contrib/go/_std_1.19/src/net/hook_unix.go
new file mode 100644
index 0000000000..fa82c7e52b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/hook_unix.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package net
+
+import "syscall"
+
+var (
+ testHookDialChannel = func() {} // for golang.org/issue/5349
+ testHookCanceledDial = func() {} // for golang.org/issue/16523
+
+ // Placeholders for socket system calls.
+ socketFunc func(int, int, int) (int, error) = syscall.Socket
+ connectFunc func(int, syscall.Sockaddr) error = syscall.Connect
+ listenFunc func(int, int) error = syscall.Listen
+ getsockoptIntFunc func(int, int, int) (int, error) = syscall.GetsockoptInt
+)
diff --git a/contrib/go/_std_1.18/src/net/hosts.go b/contrib/go/_std_1.19/src/net/hosts.go
index e604031920..e604031920 100644
--- a/contrib/go/_std_1.18/src/net/hosts.go
+++ b/contrib/go/_std_1.19/src/net/hosts.go
diff --git a/contrib/go/_std_1.19/src/net/http/client.go b/contrib/go/_std_1.19/src/net/http/client.go
new file mode 100644
index 0000000000..992817c0f5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/client.go
@@ -0,0 +1,1023 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP client. See RFC 7230 through 7235.
+//
+// This is the high-level Client interface.
+// The low-level implementation is in transport.go.
+
+package http
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http/internal/ascii"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// A Client is an HTTP client. Its zero value (DefaultClient) is a
+// usable client that uses DefaultTransport.
+//
+// The Client's Transport typically has internal state (cached TCP
+// connections), so Clients should be reused instead of created as
+// needed. Clients are safe for concurrent use by multiple goroutines.
+//
+// A Client is higher-level than a RoundTripper (such as Transport)
+// and additionally handles HTTP details such as cookies and
+// redirects.
+//
+// When following redirects, the Client will forward all headers set on the
+// initial Request except:
+//
+// • when forwarding sensitive headers like "Authorization",
+// "WWW-Authenticate", and "Cookie" to untrusted targets.
+// These headers will be ignored when following a redirect to a domain
+// that is not a subdomain match or exact match of the initial domain.
+// For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
+// will forward the sensitive headers, but a redirect to "bar.com" will not.
+//
+// • when forwarding the "Cookie" header with a non-nil cookie Jar.
+// Since each redirect may mutate the state of the cookie jar,
+// a redirect may possibly alter a cookie set in the initial request.
+// When forwarding the "Cookie" header, any mutated cookies will be omitted,
+// with the expectation that the Jar will insert those mutated cookies
+// with the updated values (assuming the origin matches).
+// If Jar is nil, the initial cookies are forwarded without change.
+type Client struct {
+ // Transport specifies the mechanism by which individual
+ // HTTP requests are made.
+ // If nil, DefaultTransport is used.
+ Transport RoundTripper
+
+ // CheckRedirect specifies the policy for handling redirects.
+ // If CheckRedirect is not nil, the client calls it before
+ // following an HTTP redirect. The arguments req and via are
+ // the upcoming request and the requests made already, oldest
+ // first. If CheckRedirect returns an error, the Client's Get
+ // method returns both the previous Response (with its Body
+ // closed) and CheckRedirect's error (wrapped in a url.Error)
+ // instead of issuing the Request req.
+ // As a special case, if CheckRedirect returns ErrUseLastResponse,
+ // then the most recent response is returned with its body
+ // unclosed, along with a nil error.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect func(req *Request, via []*Request) error
+
+ // Jar specifies the cookie jar.
+ //
+ // The Jar is used to insert relevant cookies into every
+ // outbound Request and is updated with the cookie values
+ // of every inbound Response. The Jar is consulted for every
+ // redirect that the Client follows.
+ //
+ // If Jar is nil, cookies are only sent if they are explicitly
+ // set on the Request.
+ Jar CookieJar
+
+ // Timeout specifies a time limit for requests made by this
+ // Client. The timeout includes connection time, any
+ // redirects, and reading the response body. The timer remains
+ // running after Get, Head, Post, or Do return and will
+ // interrupt reading of the Response.Body.
+ //
+ // A Timeout of zero means no timeout.
+ //
+ // The Client cancels requests to the underlying Transport
+ // as if the Request's Context ended.
+ //
+ // For compatibility, the Client will also use the deprecated
+ // CancelRequest method on Transport if found. New
+ // RoundTripper implementations should use the Request's Context
+ // for cancellation instead of implementing CancelRequest.
+ Timeout time.Duration
+}
+
+// DefaultClient is the default Client and is used by Get, Head, and Post.
+var DefaultClient = &Client{}
+
+// RoundTripper is an interface representing the ability to execute a
+// single HTTP transaction, obtaining the Response for a given Request.
+//
+// A RoundTripper must be safe for concurrent use by multiple
+// goroutines.
+type RoundTripper interface {
+ // RoundTrip executes a single HTTP transaction, returning
+ // a Response for the provided Request.
+ //
+ // RoundTrip should not attempt to interpret the response. In
+ // particular, RoundTrip must return err == nil if it obtained
+ // a response, regardless of the response's HTTP status code.
+ // A non-nil err should be reserved for failure to obtain a
+ // response. Similarly, RoundTrip should not attempt to
+ // handle higher-level protocol details such as redirects,
+ // authentication, or cookies.
+ //
+ // RoundTrip should not modify the request, except for
+ // consuming and closing the Request's Body. RoundTrip may
+ // read fields of the request in a separate goroutine. Callers
+ // should not mutate or reuse the request until the Response's
+ // Body has been closed.
+ //
+ // RoundTrip must always close the body, including on errors,
+ // but depending on the implementation may do so in a separate
+ // goroutine even after RoundTrip returns. This means that
+ // callers wanting to reuse the body for subsequent requests
+ // must arrange to wait for the Close call before doing so.
+ //
+ // The Request's URL and Header fields must be initialized.
+ RoundTrip(*Request) (*Response, error)
+}
+
+// refererForURL returns a referer without any authentication info or
+// an empty string if lastReq scheme is https and newReq scheme is http.
+func refererForURL(lastReq, newReq *url.URL) string {
+ // https://tools.ietf.org/html/rfc7231#section-5.5.2
+ // "Clients SHOULD NOT include a Referer header field in a
+ // (non-secure) HTTP request if the referring page was
+ // transferred with a secure protocol."
+ if lastReq.Scheme == "https" && newReq.Scheme == "http" {
+ return ""
+ }
+ referer := lastReq.String()
+ if lastReq.User != nil {
+ // This is not very efficient, but is the best we can
+ // do without:
+ // - introducing a new method on URL
+ // - creating a race condition
+ // - copying the URL struct manually, which would cause
+ // maintenance problems down the line
+ auth := lastReq.User.String() + "@"
+ referer = strings.Replace(referer, auth, "", 1)
+ }
+ return referer
+}
+
+// didTimeout is non-nil only if err != nil.
+func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) {
+ if c.Jar != nil {
+ for _, cookie := range c.Jar.Cookies(req.URL) {
+ req.AddCookie(cookie)
+ }
+ }
+ resp, didTimeout, err = send(req, c.transport(), deadline)
+ if err != nil {
+ return nil, didTimeout, err
+ }
+ if c.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ c.Jar.SetCookies(req.URL, rc)
+ }
+ }
+ return resp, nil, nil
+}
+
+func (c *Client) deadline() time.Time {
+ if c.Timeout > 0 {
+ return time.Now().Add(c.Timeout)
+ }
+ return time.Time{}
+}
+
+func (c *Client) transport() RoundTripper {
+ if c.Transport != nil {
+ return c.Transport
+ }
+ return DefaultTransport
+}
+
+// send issues an HTTP request.
+// Caller should close resp.Body when done reading from it.
+func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, didTimeout func() bool, err error) {
+ req := ireq // req is either the original request, or a modified fork
+
+ if rt == nil {
+ req.closeBody()
+ return nil, alwaysFalse, errors.New("http: no Client.Transport or DefaultTransport")
+ }
+
+ if req.URL == nil {
+ req.closeBody()
+ return nil, alwaysFalse, errors.New("http: nil Request.URL")
+ }
+
+ if req.RequestURI != "" {
+ req.closeBody()
+ return nil, alwaysFalse, errors.New("http: Request.RequestURI can't be set in client requests")
+ }
+
+ // forkReq forks req into a shallow clone of ireq the first
+ // time it's called.
+ forkReq := func() {
+ if ireq == req {
+ req = new(Request)
+ *req = *ireq // shallow clone
+ }
+ }
+
+ // Most the callers of send (Get, Post, et al) don't need
+ // Headers, leaving it uninitialized. We guarantee to the
+ // Transport that this has been initialized, though.
+ if req.Header == nil {
+ forkReq()
+ req.Header = make(Header)
+ }
+
+ if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" {
+ username := u.Username()
+ password, _ := u.Password()
+ forkReq()
+ req.Header = cloneOrMakeHeader(ireq.Header)
+ req.Header.Set("Authorization", "Basic "+basicAuth(username, password))
+ }
+
+ if !deadline.IsZero() {
+ forkReq()
+ }
+ stopTimer, didTimeout := setRequestCancel(req, rt, deadline)
+
+ resp, err = rt.RoundTrip(req)
+ if err != nil {
+ stopTimer()
+ if resp != nil {
+ log.Printf("RoundTripper returned a response & error; ignoring response")
+ }
+ if tlsErr, ok := err.(tls.RecordHeaderError); ok {
+ // If we get a bad TLS record header, check to see if the
+ // response looks like HTTP and give a more helpful error.
+ // See golang.org/issue/11111.
+ if string(tlsErr.RecordHeader[:]) == "HTTP/" {
+ err = errors.New("http: server gave HTTP response to HTTPS client")
+ }
+ }
+ return nil, didTimeout, err
+ }
+ if resp == nil {
+ return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt)
+ }
+ if resp.Body == nil {
+ // The documentation on the Body field says “The http Client and Transport
+ // guarantee that Body is always non-nil, even on responses without a body
+ // or responses with a zero-length body.” Unfortunately, we didn't document
+ // that same constraint for arbitrary RoundTripper implementations, and
+ // RoundTripper implementations in the wild (mostly in tests) assume that
+ // they can use a nil Body to mean an empty one (similar to Request.Body).
+ // (See https://golang.org/issue/38095.)
+ //
+ // If the ContentLength allows the Body to be empty, fill in an empty one
+ // here to ensure that it is non-nil.
+ if resp.ContentLength > 0 && req.Method != "HEAD" {
+ return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with content length %d but a nil Body", rt, resp.ContentLength)
+ }
+ resp.Body = io.NopCloser(strings.NewReader(""))
+ }
+ if !deadline.IsZero() {
+ resp.Body = &cancelTimerBody{
+ stop: stopTimer,
+ rc: resp.Body,
+ reqDidTimeout: didTimeout,
+ }
+ }
+ return resp, nil, nil
+}
+
+// timeBeforeContextDeadline reports whether the non-zero Time t is
+// before ctx's deadline, if any. If ctx does not have a deadline, it
+// always reports true (the deadline is considered infinite).
+func timeBeforeContextDeadline(t time.Time, ctx context.Context) bool {
+ d, ok := ctx.Deadline()
+ if !ok {
+ return true
+ }
+ return t.Before(d)
+}
+
+// knownRoundTripperImpl reports whether rt is a RoundTripper that's
+// maintained by the Go team and known to implement the latest
+// optional semantics (notably contexts). The Request is used
+// to check whether this particular request is using an alternate protocol,
+// in which case we need to check the RoundTripper for that protocol.
+func knownRoundTripperImpl(rt RoundTripper, req *Request) bool {
+ switch t := rt.(type) {
+ case *Transport:
+ if altRT := t.alternateRoundTripper(req); altRT != nil {
+ return knownRoundTripperImpl(altRT, req)
+ }
+ return true
+ case *http2Transport, http2noDialH2RoundTripper:
+ return true
+ }
+ // There's a very minor chance of a false positive with this.
+ // Instead of detecting our golang.org/x/net/http2.Transport,
+ // it might detect a Transport type in a different http2
+ // package. But I know of none, and the only problem would be
+ // some temporarily leaked goroutines if the transport didn't
+ // support contexts. So this is a good enough heuristic:
+ if reflect.TypeOf(rt).String() == "*http2.Transport" {
+ return true
+ }
+ return false
+}
+
+// setRequestCancel sets req.Cancel and adds a deadline context to req
+// if deadline is non-zero. The RoundTripper's type is used to
+// determine whether the legacy CancelRequest behavior should be used.
+//
+// As background, there are three ways to cancel a request:
+// First was Transport.CancelRequest. (deprecated)
+// Second was Request.Cancel.
+// Third was Request.Context.
+// This function populates the second and third, and uses the first if it really needs to.
+func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTimer func(), didTimeout func() bool) {
+ if deadline.IsZero() {
+ return nop, alwaysFalse
+ }
+ knownTransport := knownRoundTripperImpl(rt, req)
+ oldCtx := req.Context()
+
+ if req.Cancel == nil && knownTransport {
+ // If they already had a Request.Context that's
+ // expiring sooner, do nothing:
+ if !timeBeforeContextDeadline(deadline, oldCtx) {
+ return nop, alwaysFalse
+ }
+
+ var cancelCtx func()
+ req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline)
+ return cancelCtx, func() bool { return time.Now().After(deadline) }
+ }
+ initialReqCancel := req.Cancel // the user's original Request.Cancel, if any
+
+ var cancelCtx func()
+ if oldCtx := req.Context(); timeBeforeContextDeadline(deadline, oldCtx) {
+ req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline)
+ }
+
+ cancel := make(chan struct{})
+ req.Cancel = cancel
+
+ doCancel := func() {
+ // The second way in the func comment above:
+ close(cancel)
+ // The first way, used only for RoundTripper
+ // implementations written before Go 1.5 or Go 1.6.
+ type canceler interface{ CancelRequest(*Request) }
+ if v, ok := rt.(canceler); ok {
+ v.CancelRequest(req)
+ }
+ }
+
+ stopTimerCh := make(chan struct{})
+ var once sync.Once
+ stopTimer = func() {
+ once.Do(func() {
+ close(stopTimerCh)
+ if cancelCtx != nil {
+ cancelCtx()
+ }
+ })
+ }
+
+ timer := time.NewTimer(time.Until(deadline))
+ var timedOut atomicBool
+
+ go func() {
+ select {
+ case <-initialReqCancel:
+ doCancel()
+ timer.Stop()
+ case <-timer.C:
+ timedOut.setTrue()
+ doCancel()
+ case <-stopTimerCh:
+ timer.Stop()
+ }
+ }()
+
+ return stopTimer, timedOut.isSet
+}
+
+// See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt
+// "To receive authorization, the client sends the userid and password,
+// separated by a single colon (":") character, within a base64
+// encoded string in the credentials."
+// It is not meant to be urlencoded.
+func basicAuth(username, password string) string {
+ auth := username + ":" + password
+ return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+// Get issues a GET to the specified URL. If the response is one of
+// the following redirect codes, Get follows the redirect, up to a
+// maximum of 10 redirects:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// An error is returned if there were too many redirects or if there
+// was an HTTP protocol error. A non-2xx response doesn't cause an
+// error. Any returned error will be of type *url.Error. The url.Error
+// value's Timeout method will report true if the request timed out.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// Get is a wrapper around DefaultClient.Get.
+//
+// To make a request with custom headers, use NewRequest and
+// DefaultClient.Do.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and DefaultClient.Do.
+func Get(url string) (resp *Response, err error) {
+ return DefaultClient.Get(url)
+}
+
+// Get issues a GET to the specified URL. If the response is one of the
+// following redirect codes, Get follows the redirect after calling the
+// Client's CheckRedirect function:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// An error is returned if the Client's CheckRedirect function fails
+// or if there was an HTTP protocol error. A non-2xx response doesn't
+// cause an error. Any returned error will be of type *url.Error. The
+// url.Error value's Timeout method will report true if the request
+// timed out.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// To make a request with custom headers, use NewRequest and Client.Do.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and Client.Do.
+func (c *Client) Get(url string) (resp *Response, err error) {
+ req, err := NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.Do(req)
+}
+
+func alwaysFalse() bool { return false }
+
+// ErrUseLastResponse can be returned by Client.CheckRedirect hooks to
+// control how redirects are processed. If returned, the next request
+// is not sent and the most recent response is returned with its body
+// unclosed.
+var ErrUseLastResponse = errors.New("net/http: use last response")
+
+// checkRedirect calls either the user's configured CheckRedirect
+// function, or the default.
+func (c *Client) checkRedirect(req *Request, via []*Request) error {
+ fn := c.CheckRedirect
+ if fn == nil {
+ fn = defaultCheckRedirect
+ }
+ return fn(req, via)
+}
+
+// redirectBehavior describes what should happen when the
+// client encounters a 3xx status code from the server
+func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) {
+ switch resp.StatusCode {
+ case 301, 302, 303:
+ redirectMethod = reqMethod
+ shouldRedirect = true
+ includeBody = false
+
+ // RFC 2616 allowed automatic redirection only with GET and
+ // HEAD requests. RFC 7231 lifts this restriction, but we still
+ // restrict other methods to GET to maintain compatibility.
+ // See Issue 18570.
+ if reqMethod != "GET" && reqMethod != "HEAD" {
+ redirectMethod = "GET"
+ }
+ case 307, 308:
+ redirectMethod = reqMethod
+ shouldRedirect = true
+ includeBody = true
+
+ if ireq.GetBody == nil && ireq.outgoingLength() != 0 {
+ // We had a request body, and 307/308 require
+ // re-sending it, but GetBody is not defined. So just
+ // return this response to the user instead of an
+ // error, like we did in Go 1.7 and earlier.
+ shouldRedirect = false
+ }
+ }
+ return redirectMethod, shouldRedirect, includeBody
+}
+
+// urlErrorOp returns the (*url.Error).Op value to use for the
+// provided (*Request).Method value.
+func urlErrorOp(method string) string {
+ if method == "" {
+ return "Get"
+ }
+ if lowerMethod, ok := ascii.ToLower(method); ok {
+ return method[:1] + lowerMethod[1:]
+ }
+ return method
+}
+
+// Do sends an HTTP request and returns an HTTP response, following
+// policy (such as redirects, cookies, auth) as configured on the
+// client.
+//
+// An error is returned if caused by client policy (such as
+// CheckRedirect), or failure to speak HTTP (such as a network
+// connectivity problem). A non-2xx status code doesn't cause an
+// error.
+//
+// If the returned error is nil, the Response will contain a non-nil
+// Body which the user is expected to close. If the Body is not both
+// read to EOF and closed, the Client's underlying RoundTripper
+// (typically Transport) may not be able to re-use a persistent TCP
+// connection to the server for a subsequent "keep-alive" request.
+//
+// The request Body, if non-nil, will be closed by the underlying
+// Transport, even on errors.
+//
+// On error, any Response can be ignored. A non-nil Response with a
+// non-nil error only occurs when CheckRedirect fails, and even then
+// the returned Response.Body is already closed.
+//
+// Generally Get, Post, or PostForm will be used instead of Do.
+//
+// If the server replies with a redirect, the Client first uses the
+// CheckRedirect function to determine whether the redirect should be
+// followed. If permitted, a 301, 302, or 303 redirect causes
+// subsequent requests to use HTTP method GET
+// (or HEAD if the original request was HEAD), with no body.
+// A 307 or 308 redirect preserves the original HTTP method and body,
+// provided that the Request.GetBody function is defined.
+// The NewRequest function automatically sets GetBody for common
+// standard library body types.
+//
+// Any returned error will be of type *url.Error. The url.Error
+// value's Timeout method will report true if the request timed out.
+func (c *Client) Do(req *Request) (*Response, error) {
+ return c.do(req)
+}
+
+var testHookClientDoResult func(retres *Response, reterr error)
+
+func (c *Client) do(req *Request) (retres *Response, reterr error) {
+ if testHookClientDoResult != nil {
+ defer func() { testHookClientDoResult(retres, reterr) }()
+ }
+ if req.URL == nil {
+ req.closeBody()
+ return nil, &url.Error{
+ Op: urlErrorOp(req.Method),
+ Err: errors.New("http: nil Request.URL"),
+ }
+ }
+
+ var (
+ deadline = c.deadline()
+ reqs []*Request
+ resp *Response
+ copyHeaders = c.makeHeadersCopier(req)
+ reqBodyClosed = false // have we closed the current req.Body?
+
+ // Redirect behavior:
+ redirectMethod string
+ includeBody bool
+ )
+ uerr := func(err error) error {
+ // the body may have been closed already by c.send()
+ if !reqBodyClosed {
+ req.closeBody()
+ }
+ var urlStr string
+ if resp != nil && resp.Request != nil {
+ urlStr = stripPassword(resp.Request.URL)
+ } else {
+ urlStr = stripPassword(req.URL)
+ }
+ return &url.Error{
+ Op: urlErrorOp(reqs[0].Method),
+ URL: urlStr,
+ Err: err,
+ }
+ }
+ for {
+ // For all but the first request, create the next
+ // request hop and replace req.
+ if len(reqs) > 0 {
+ loc := resp.Header.Get("Location")
+ if loc == "" {
+ // While most 3xx responses include a Location, it is not
+ // required and 3xx responses without a Location have been
+ // observed in the wild. See issues #17773 and #49281.
+ return resp, nil
+ }
+ u, err := req.URL.Parse(loc)
+ if err != nil {
+ resp.closeBody()
+ return nil, uerr(fmt.Errorf("failed to parse Location header %q: %v", loc, err))
+ }
+ host := ""
+ if req.Host != "" && req.Host != req.URL.Host {
+ // If the caller specified a custom Host header and the
+ // redirect location is relative, preserve the Host header
+ // through the redirect. See issue #22233.
+ if u, _ := url.Parse(loc); u != nil && !u.IsAbs() {
+ host = req.Host
+ }
+ }
+ ireq := reqs[0]
+ req = &Request{
+ Method: redirectMethod,
+ Response: resp,
+ URL: u,
+ Header: make(Header),
+ Host: host,
+ Cancel: ireq.Cancel,
+ ctx: ireq.ctx,
+ }
+ if includeBody && ireq.GetBody != nil {
+ req.Body, err = ireq.GetBody()
+ if err != nil {
+ resp.closeBody()
+ return nil, uerr(err)
+ }
+ req.ContentLength = ireq.ContentLength
+ }
+
+ // Copy original headers before setting the Referer,
+ // in case the user set Referer on their first request.
+ // If they really want to override, they can do it in
+ // their CheckRedirect func.
+ copyHeaders(req)
+
+ // Add the Referer header from the most recent
+ // request URL to the new one, if it's not https->http:
+ if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL); ref != "" {
+ req.Header.Set("Referer", ref)
+ }
+ err = c.checkRedirect(req, reqs)
+
+ // Sentinel error to let users select the
+ // previous response, without closing its
+ // body. See Issue 10069.
+ if err == ErrUseLastResponse {
+ return resp, nil
+ }
+
+ // Close the previous response's body. But
+ // read at least some of the body so if it's
+ // small the underlying TCP connection will be
+ // re-used. No need to check for errors: if it
+ // fails, the Transport won't reuse it anyway.
+ const maxBodySlurpSize = 2 << 10
+ if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
+ io.CopyN(io.Discard, resp.Body, maxBodySlurpSize)
+ }
+ resp.Body.Close()
+
+ if err != nil {
+ // Special case for Go 1 compatibility: return both the response
+ // and an error if the CheckRedirect function failed.
+ // See https://golang.org/issue/3795
+ // The resp.Body has already been closed.
+ ue := uerr(err)
+ ue.(*url.Error).URL = loc
+ return resp, ue
+ }
+ }
+
+ reqs = append(reqs, req)
+ var err error
+ var didTimeout func() bool
+ if resp, didTimeout, err = c.send(req, deadline); err != nil {
+ // c.send() always closes req.Body
+ reqBodyClosed = true
+ if !deadline.IsZero() && didTimeout() {
+ err = &httpError{
+ err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
+ timeout: true,
+ }
+ }
+ return nil, uerr(err)
+ }
+
+ var shouldRedirect bool
+ redirectMethod, shouldRedirect, includeBody = redirectBehavior(req.Method, resp, reqs[0])
+ if !shouldRedirect {
+ return resp, nil
+ }
+
+ req.closeBody()
+ }
+}
+
+// makeHeadersCopier makes a function that copies headers from the
+// initial Request, ireq. For every redirect, this function must be called
+// so that it can copy headers into the upcoming Request.
+func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) {
+ // The headers to copy are from the very initial request.
+ // We use a closured callback to keep a reference to these original headers.
+ var (
+ ireqhdr = cloneOrMakeHeader(ireq.Header)
+ icookies map[string][]*Cookie
+ )
+ if c.Jar != nil && ireq.Header.Get("Cookie") != "" {
+ icookies = make(map[string][]*Cookie)
+ for _, c := range ireq.Cookies() {
+ icookies[c.Name] = append(icookies[c.Name], c)
+ }
+ }
+
+ preq := ireq // The previous request
+ return func(req *Request) {
+ // If Jar is present and there was some initial cookies provided
+ // via the request header, then we may need to alter the initial
+ // cookies as we follow redirects since each redirect may end up
+ // modifying a pre-existing cookie.
+ //
+ // Since cookies already set in the request header do not contain
+ // information about the original domain and path, the logic below
+ // assumes any new set cookies override the original cookie
+ // regardless of domain or path.
+ //
+ // See https://golang.org/issue/17494
+ if c.Jar != nil && icookies != nil {
+ var changed bool
+ resp := req.Response // The response that caused the upcoming redirect
+ for _, c := range resp.Cookies() {
+ if _, ok := icookies[c.Name]; ok {
+ delete(icookies, c.Name)
+ changed = true
+ }
+ }
+ if changed {
+ ireqhdr.Del("Cookie")
+ var ss []string
+ for _, cs := range icookies {
+ for _, c := range cs {
+ ss = append(ss, c.Name+"="+c.Value)
+ }
+ }
+ sort.Strings(ss) // Ensure deterministic headers
+ ireqhdr.Set("Cookie", strings.Join(ss, "; "))
+ }
+ }
+
+ // Copy the initial request's Header values
+ // (at least the safe ones).
+ for k, vv := range ireqhdr {
+ if shouldCopyHeaderOnRedirect(k, preq.URL, req.URL) {
+ req.Header[k] = vv
+ }
+ }
+
+ preq = req // Update previous Request with the current request
+ }
+}
+
+func defaultCheckRedirect(req *Request, via []*Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+ return nil
+}
+
+// Post issues a POST to the specified URL.
+//
+// Caller should close resp.Body when done reading from it.
+//
+// If the provided body is an io.Closer, it is closed after the
+// request.
+//
+// Post is a wrapper around DefaultClient.Post.
+//
+// To set custom headers, use NewRequest and DefaultClient.Do.
+//
+// See the Client.Do method documentation for details on how redirects
+// are handled.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and DefaultClient.Do.
+func Post(url, contentType string, body io.Reader) (resp *Response, err error) {
+ return DefaultClient.Post(url, contentType, body)
+}
+
+// Post issues a POST to the specified URL.
+//
+// Caller should close resp.Body when done reading from it.
+//
+// If the provided body is an io.Closer, it is closed after the
+// request.
+//
+// To set custom headers, use NewRequest and Client.Do.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and Client.Do.
+//
+// See the Client.Do method documentation for details on how redirects
+// are handled.
+func (c *Client) Post(url, contentType string, body io.Reader) (resp *Response, err error) {
+ req, err := NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", contentType)
+ return c.Do(req)
+}
+
+// PostForm issues a POST to the specified URL, with data's keys and
+// values URL-encoded as the request body.
+//
+// The Content-Type header is set to application/x-www-form-urlencoded.
+// To set other headers, use NewRequest and DefaultClient.Do.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// PostForm is a wrapper around DefaultClient.PostForm.
+//
+// See the Client.Do method documentation for details on how redirects
+// are handled.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and DefaultClient.Do.
+func PostForm(url string, data url.Values) (resp *Response, err error) {
+ return DefaultClient.PostForm(url, data)
+}
+
+// PostForm issues a POST to the specified URL,
+// with data's keys and values URL-encoded as the request body.
+//
+// The Content-Type header is set to application/x-www-form-urlencoded.
+// To set other headers, use NewRequest and Client.Do.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// See the Client.Do method documentation for details on how redirects
+// are handled.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and Client.Do.
+func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) {
+ return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// Head issues a HEAD to the specified URL. If the response is one of
+// the following redirect codes, Head follows the redirect, up to a
+// maximum of 10 redirects:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// Head is a wrapper around DefaultClient.Head.
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and DefaultClient.Do.
+func Head(url string) (resp *Response, err error) {
+ return DefaultClient.Head(url)
+}
+
+// Head issues a HEAD to the specified URL. If the response is one of the
+// following redirect codes, Head follows the redirect after calling the
+// Client's CheckRedirect function:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// To make a request with a specified context.Context, use NewRequestWithContext
+// and Client.Do.
+func (c *Client) Head(url string) (resp *Response, err error) {
+ req, err := NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.Do(req)
+}
+
+// CloseIdleConnections closes any connections on its Transport which
+// were previously connected from previous requests but are now
+// sitting idle in a "keep-alive" state. It does not interrupt any
+// connections currently in use.
+//
+// If the Client's Transport does not have a CloseIdleConnections method
+// then this method does nothing.
+func (c *Client) CloseIdleConnections() {
+ type closeIdler interface {
+ CloseIdleConnections()
+ }
+ if tr, ok := c.transport().(closeIdler); ok {
+ tr.CloseIdleConnections()
+ }
+}
+
+// cancelTimerBody is an io.ReadCloser that wraps rc with two features:
+// 1. On Read error or close, the stop func is called.
+// 2. On Read failure, if reqDidTimeout is true, the error is wrapped and
+// marked as net.Error that hit its timeout.
+type cancelTimerBody struct {
+ stop func() // stops the time.Timer waiting to cancel the request
+ rc io.ReadCloser
+ reqDidTimeout func() bool
+}
+
+func (b *cancelTimerBody) Read(p []byte) (n int, err error) {
+ n, err = b.rc.Read(p)
+ if err == nil {
+ return n, nil
+ }
+ if err == io.EOF {
+ return n, err
+ }
+ if b.reqDidTimeout() {
+ err = &httpError{
+ err: err.Error() + " (Client.Timeout or context cancellation while reading body)",
+ timeout: true,
+ }
+ }
+ return n, err
+}
+
+func (b *cancelTimerBody) Close() error {
+ err := b.rc.Close()
+ b.stop()
+ return err
+}
+
+func shouldCopyHeaderOnRedirect(headerKey string, initial, dest *url.URL) bool {
+ switch CanonicalHeaderKey(headerKey) {
+ case "Authorization", "Www-Authenticate", "Cookie", "Cookie2":
+ // Permit sending auth/cookie headers from "foo.com"
+ // to "sub.foo.com".
+
+ // Note that we don't send all cookies to subdomains
+ // automatically. This function is only used for
+ // Cookies set explicitly on the initial outgoing
+ // client request. Cookies automatically added via the
+ // CookieJar mechanism continue to follow each
+ // cookie's scope as set by Set-Cookie. But for
+ // outgoing requests with the Cookie header set
+ // directly, we don't know their scope, so we assume
+ // it's for *.domain.com.
+
+ ihost := canonicalAddr(initial)
+ dhost := canonicalAddr(dest)
+ return isDomainOrSubdomain(dhost, ihost)
+ }
+ // All other headers are copied:
+ return true
+}
+
+// isDomainOrSubdomain reports whether sub is a subdomain (or exact
+// match) of the parent domain.
+//
+// Both domains must already be in canonical form.
+func isDomainOrSubdomain(sub, parent string) bool {
+ if sub == parent {
+ return true
+ }
+ // If sub is "foo.example.com" and parent is "example.com",
+ // that means sub must end in "."+parent.
+ // Do it without allocating.
+ if !strings.HasSuffix(sub, parent) {
+ return false
+ }
+ return sub[len(sub)-len(parent)-1] == '.'
+}
+
+func stripPassword(u *url.URL) string {
+ _, passSet := u.User.Password()
+ if passSet {
+ return strings.Replace(u.String(), u.User.String()+"@", u.User.Username()+":***@", 1)
+ }
+ return u.String()
+}
diff --git a/contrib/go/_std_1.18/src/net/http/clone.go b/contrib/go/_std_1.19/src/net/http/clone.go
index 3a3375bff7..3a3375bff7 100644
--- a/contrib/go/_std_1.18/src/net/http/clone.go
+++ b/contrib/go/_std_1.19/src/net/http/clone.go
diff --git a/contrib/go/_std_1.19/src/net/http/cookie.go b/contrib/go/_std_1.19/src/net/http/cookie.go
new file mode 100644
index 0000000000..9cb0804f8f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/cookie.go
@@ -0,0 +1,466 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
+// HTTP response or the Cookie header of an HTTP request.
+//
+// See https://tools.ietf.org/html/rfc6265 for details.
+type Cookie struct {
+ Name string
+ Value string
+
+ Path string // optional
+ Domain string // optional
+ Expires time.Time // optional
+ RawExpires string // for reading cookies only
+
+ // MaxAge=0 means no 'Max-Age' attribute specified.
+ // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
+ // MaxAge>0 means Max-Age attribute present and given in seconds
+ MaxAge int
+ Secure bool
+ HttpOnly bool
+ SameSite SameSite
+ Raw string
+ Unparsed []string // Raw text of unparsed attribute-value pairs
+}
+
+// SameSite allows a server to define a cookie attribute making it impossible for
+// the browser to send this cookie along with cross-site requests. The main
+// goal is to mitigate the risk of cross-origin information leakage, and provide
+// some protection against cross-site request forgery attacks.
+//
+// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details.
+type SameSite int
+
+const (
+ SameSiteDefaultMode SameSite = iota + 1
+ SameSiteLaxMode
+ SameSiteStrictMode
+ SameSiteNoneMode
+)
+
+// readSetCookies parses all "Set-Cookie" values from
+// the header h and returns the successfully parsed Cookies.
+func readSetCookies(h Header) []*Cookie {
+ cookieCount := len(h["Set-Cookie"])
+ if cookieCount == 0 {
+ return []*Cookie{}
+ }
+ cookies := make([]*Cookie, 0, cookieCount)
+ for _, line := range h["Set-Cookie"] {
+ parts := strings.Split(textproto.TrimString(line), ";")
+ if len(parts) == 1 && parts[0] == "" {
+ continue
+ }
+ parts[0] = textproto.TrimString(parts[0])
+ name, value, ok := strings.Cut(parts[0], "=")
+ if !ok {
+ continue
+ }
+ if !isCookieNameValid(name) {
+ continue
+ }
+ value, ok = parseCookieValue(value, true)
+ if !ok {
+ continue
+ }
+ c := &Cookie{
+ Name: name,
+ Value: value,
+ Raw: line,
+ }
+ for i := 1; i < len(parts); i++ {
+ parts[i] = textproto.TrimString(parts[i])
+ if len(parts[i]) == 0 {
+ continue
+ }
+
+ attr, val, _ := strings.Cut(parts[i], "=")
+ lowerAttr, isASCII := ascii.ToLower(attr)
+ if !isASCII {
+ continue
+ }
+ val, ok = parseCookieValue(val, false)
+ if !ok {
+ c.Unparsed = append(c.Unparsed, parts[i])
+ continue
+ }
+
+ switch lowerAttr {
+ case "samesite":
+ lowerVal, ascii := ascii.ToLower(val)
+ if !ascii {
+ c.SameSite = SameSiteDefaultMode
+ continue
+ }
+ switch lowerVal {
+ case "lax":
+ c.SameSite = SameSiteLaxMode
+ case "strict":
+ c.SameSite = SameSiteStrictMode
+ case "none":
+ c.SameSite = SameSiteNoneMode
+ default:
+ c.SameSite = SameSiteDefaultMode
+ }
+ continue
+ case "secure":
+ c.Secure = true
+ continue
+ case "httponly":
+ c.HttpOnly = true
+ continue
+ case "domain":
+ c.Domain = val
+ continue
+ case "max-age":
+ secs, err := strconv.Atoi(val)
+ if err != nil || secs != 0 && val[0] == '0' {
+ break
+ }
+ if secs <= 0 {
+ secs = -1
+ }
+ c.MaxAge = secs
+ continue
+ case "expires":
+ c.RawExpires = val
+ exptime, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
+ if err != nil {
+ c.Expires = time.Time{}
+ break
+ }
+ }
+ c.Expires = exptime.UTC()
+ continue
+ case "path":
+ c.Path = val
+ continue
+ }
+ c.Unparsed = append(c.Unparsed, parts[i])
+ }
+ cookies = append(cookies, c)
+ }
+ return cookies
+}
+
+// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
+// The provided cookie must have a valid Name. Invalid cookies may be
+// silently dropped.
+func SetCookie(w ResponseWriter, cookie *Cookie) {
+ if v := cookie.String(); v != "" {
+ w.Header().Add("Set-Cookie", v)
+ }
+}
+
+// String returns the serialization of the cookie for use in a Cookie
+// header (if only Name and Value are set) or a Set-Cookie response
+// header (if other fields are set).
+// If c is nil or c.Name is invalid, the empty string is returned.
+func (c *Cookie) String() string {
+ if c == nil || !isCookieNameValid(c.Name) {
+ return ""
+ }
+ // extraCookieLength derived from typical length of cookie attributes
+ // see RFC 6265 Sec 4.1.
+ const extraCookieLength = 110
+ var b strings.Builder
+ b.Grow(len(c.Name) + len(c.Value) + len(c.Domain) + len(c.Path) + extraCookieLength)
+ b.WriteString(c.Name)
+ b.WriteRune('=')
+ b.WriteString(sanitizeCookieValue(c.Value))
+
+ if len(c.Path) > 0 {
+ b.WriteString("; Path=")
+ b.WriteString(sanitizeCookiePath(c.Path))
+ }
+ if len(c.Domain) > 0 {
+ if validCookieDomain(c.Domain) {
+ // A c.Domain containing illegal characters is not
+ // sanitized but simply dropped which turns the cookie
+ // into a host-only cookie. A leading dot is okay
+ // but won't be sent.
+ d := c.Domain
+ if d[0] == '.' {
+ d = d[1:]
+ }
+ b.WriteString("; Domain=")
+ b.WriteString(d)
+ } else {
+ log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", c.Domain)
+ }
+ }
+ var buf [len(TimeFormat)]byte
+ if validCookieExpires(c.Expires) {
+ b.WriteString("; Expires=")
+ b.Write(c.Expires.UTC().AppendFormat(buf[:0], TimeFormat))
+ }
+ if c.MaxAge > 0 {
+ b.WriteString("; Max-Age=")
+ b.Write(strconv.AppendInt(buf[:0], int64(c.MaxAge), 10))
+ } else if c.MaxAge < 0 {
+ b.WriteString("; Max-Age=0")
+ }
+ if c.HttpOnly {
+ b.WriteString("; HttpOnly")
+ }
+ if c.Secure {
+ b.WriteString("; Secure")
+ }
+ switch c.SameSite {
+ case SameSiteDefaultMode:
+ // Skip, default mode is obtained by not emitting the attribute.
+ case SameSiteNoneMode:
+ b.WriteString("; SameSite=None")
+ case SameSiteLaxMode:
+ b.WriteString("; SameSite=Lax")
+ case SameSiteStrictMode:
+ b.WriteString("; SameSite=Strict")
+ }
+ return b.String()
+}
+
+// Valid reports whether the cookie is valid.
+func (c *Cookie) Valid() error {
+ if c == nil {
+ return errors.New("http: nil Cookie")
+ }
+ if !isCookieNameValid(c.Name) {
+ return errors.New("http: invalid Cookie.Name")
+ }
+ if !validCookieExpires(c.Expires) {
+ return errors.New("http: invalid Cookie.Expires")
+ }
+ for i := 0; i < len(c.Value); i++ {
+ if !validCookieValueByte(c.Value[i]) {
+ return fmt.Errorf("http: invalid byte %q in Cookie.Value", c.Value[i])
+ }
+ }
+ if len(c.Path) > 0 {
+ for i := 0; i < len(c.Path); i++ {
+ if !validCookiePathByte(c.Path[i]) {
+ return fmt.Errorf("http: invalid byte %q in Cookie.Path", c.Path[i])
+ }
+ }
+ }
+ if len(c.Domain) > 0 {
+ if !validCookieDomain(c.Domain) {
+ return errors.New("http: invalid Cookie.Domain")
+ }
+ }
+ return nil
+}
+
+// readCookies parses all "Cookie" values from the header h and
+// returns the successfully parsed Cookies.
+//
+// if filter isn't empty, only cookies of that name are returned
+func readCookies(h Header, filter string) []*Cookie {
+ lines := h["Cookie"]
+ if len(lines) == 0 {
+ return []*Cookie{}
+ }
+
+ cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";"))
+ for _, line := range lines {
+ line = textproto.TrimString(line)
+
+ var part string
+ for len(line) > 0 { // continue since we have rest
+ part, line, _ = strings.Cut(line, ";")
+ part = textproto.TrimString(part)
+ if part == "" {
+ continue
+ }
+ name, val, _ := strings.Cut(part, "=")
+ if !isCookieNameValid(name) {
+ continue
+ }
+ if filter != "" && filter != name {
+ continue
+ }
+ val, ok := parseCookieValue(val, true)
+ if !ok {
+ continue
+ }
+ cookies = append(cookies, &Cookie{Name: name, Value: val})
+ }
+ }
+ return cookies
+}
+
+// validCookieDomain reports whether v is a valid cookie domain-value.
+func validCookieDomain(v string) bool {
+ if isCookieDomainName(v) {
+ return true
+ }
+ if net.ParseIP(v) != nil && !strings.Contains(v, ":") {
+ return true
+ }
+ return false
+}
+
+// validCookieExpires reports whether v is a valid cookie expires-value.
+func validCookieExpires(t time.Time) bool {
+ // IETF RFC 6265 Section 5.1.1.5, the year must not be less than 1601
+ return t.Year() >= 1601
+}
+
+// isCookieDomainName reports whether s is a valid domain name or a valid
+// domain name with a leading dot '.'. It is almost a direct copy of
+// package net's isDomainName.
+func isCookieDomainName(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+ if len(s) > 255 {
+ return false
+ }
+
+ if s[0] == '.' {
+ // A cookie a domain attribute may start with a leading dot.
+ s = s[1:]
+ }
+ last := byte('.')
+ ok := false // Ok once we've seen a letter.
+ partlen := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch {
+ default:
+ return false
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ // No '_' allowed here (in contrast to package net).
+ ok = true
+ partlen++
+ case '0' <= c && c <= '9':
+ // fine
+ partlen++
+ case c == '-':
+ // Byte before dash cannot be dot.
+ if last == '.' {
+ return false
+ }
+ partlen++
+ case c == '.':
+ // Byte before dot cannot be dot, dash.
+ if last == '.' || last == '-' {
+ return false
+ }
+ if partlen > 63 || partlen == 0 {
+ return false
+ }
+ partlen = 0
+ }
+ last = c
+ }
+ if last == '-' || partlen > 63 {
+ return false
+ }
+
+ return ok
+}
+
+var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
+
+func sanitizeCookieName(n string) string {
+ return cookieNameSanitizer.Replace(n)
+}
+
+// sanitizeCookieValue produces a suitable cookie-value from v.
+// https://tools.ietf.org/html/rfc6265#section-4.1.1
+//
+// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
+// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+// ; US-ASCII characters excluding CTLs,
+// ; whitespace DQUOTE, comma, semicolon,
+// ; and backslash
+//
+// We loosen this as spaces and commas are common in cookie values
+// but we produce a quoted cookie-value if and only if v contains
+// commas or spaces.
+// See https://golang.org/issue/7243 for the discussion.
+func sanitizeCookieValue(v string) string {
+ v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v)
+ if len(v) == 0 {
+ return v
+ }
+ if strings.ContainsAny(v, " ,") {
+ return `"` + v + `"`
+ }
+ return v
+}
+
+func validCookieValueByte(b byte) bool {
+ return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\'
+}
+
+// path-av = "Path=" path-value
+// path-value = <any CHAR except CTLs or ";">
+func sanitizeCookiePath(v string) string {
+ return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v)
+}
+
+func validCookiePathByte(b byte) bool {
+ return 0x20 <= b && b < 0x7f && b != ';'
+}
+
+func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string {
+ ok := true
+ for i := 0; i < len(v); i++ {
+ if valid(v[i]) {
+ continue
+ }
+ log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName)
+ ok = false
+ break
+ }
+ if ok {
+ return v
+ }
+ buf := make([]byte, 0, len(v))
+ for i := 0; i < len(v); i++ {
+ if b := v[i]; valid(b) {
+ buf = append(buf, b)
+ }
+ }
+ return string(buf)
+}
+
+func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) {
+ // Strip the quotes, if present.
+ if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' {
+ raw = raw[1 : len(raw)-1]
+ }
+ for i := 0; i < len(raw); i++ {
+ if !validCookieValueByte(raw[i]) {
+ return "", false
+ }
+ }
+ return raw, true
+}
+
+func isCookieNameValid(raw string) bool {
+ if raw == "" {
+ return false
+ }
+ return strings.IndexFunc(raw, isNotToken) < 0
+}
diff --git a/contrib/go/_std_1.19/src/net/http/doc.go b/contrib/go/_std_1.19/src/net/http/doc.go
new file mode 100644
index 0000000000..67c4246c60
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/doc.go
@@ -0,0 +1,106 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package http provides HTTP client and server implementations.
+
+Get, Head, Post, and PostForm make HTTP (or HTTPS) requests:
+
+ resp, err := http.Get("http://example.com/")
+ ...
+ resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
+ ...
+ resp, err := http.PostForm("http://example.com/form",
+ url.Values{"key": {"Value"}, "id": {"123"}})
+
+The client must close the response body when finished with it:
+
+ resp, err := http.Get("http://example.com/")
+ if err != nil {
+ // handle error
+ }
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ // ...
+
+For control over HTTP client headers, redirect policy, and other
+settings, create a Client:
+
+ client := &http.Client{
+ CheckRedirect: redirectPolicyFunc,
+ }
+
+ resp, err := client.Get("http://example.com")
+ // ...
+
+ req, err := http.NewRequest("GET", "http://example.com", nil)
+ // ...
+ req.Header.Add("If-None-Match", `W/"wyzzy"`)
+ resp, err := client.Do(req)
+ // ...
+
+For control over proxies, TLS configuration, keep-alives,
+compression, and other settings, create a Transport:
+
+ tr := &http.Transport{
+ MaxIdleConns: 10,
+ IdleConnTimeout: 30 * time.Second,
+ DisableCompression: true,
+ }
+ client := &http.Client{Transport: tr}
+ resp, err := client.Get("https://example.com")
+
+Clients and Transports are safe for concurrent use by multiple
+goroutines and for efficiency should only be created once and re-used.
+
+ListenAndServe starts an HTTP server with a given address and handler.
+The handler is usually nil, which means to use DefaultServeMux.
+Handle and HandleFunc add handlers to DefaultServeMux:
+
+ http.Handle("/foo", fooHandler)
+
+ http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
+ })
+
+ log.Fatal(http.ListenAndServe(":8080", nil))
+
+More control over the server's behavior is available by creating a
+custom Server:
+
+ s := &http.Server{
+ Addr: ":8080",
+ Handler: myHandler,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ log.Fatal(s.ListenAndServe())
+
+Starting with Go 1.6, the http package has transparent support for the
+HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
+can do so by setting Transport.TLSNextProto (for clients) or
+Server.TLSNextProto (for servers) to a non-nil, empty
+map. Alternatively, the following GODEBUG environment variables are
+currently supported:
+
+ GODEBUG=http2client=0 # disable HTTP/2 client support
+ GODEBUG=http2server=0 # disable HTTP/2 server support
+ GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
+ GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
+
+The GODEBUG variables are not covered by Go's API compatibility
+promise. Please report any issues before disabling HTTP/2
+support: https://golang.org/s/http2bug
+
+The http package's Transport and Server both automatically enable
+HTTP/2 support for simple configurations. To enable HTTP/2 for more
+complex configurations, to use lower-level HTTP/2 features, or to use
+a newer version of Go's http2 package, import "golang.org/x/net/http2"
+directly and use its ConfigureTransport and/or ConfigureServer
+functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
+package takes precedence over the net/http package's built-in HTTP/2
+support.
+*/
+package http
diff --git a/contrib/go/_std_1.19/src/net/http/filetransport.go b/contrib/go/_std_1.19/src/net/http/filetransport.go
new file mode 100644
index 0000000000..94684b07a1
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/filetransport.go
@@ -0,0 +1,123 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "fmt"
+ "io"
+)
+
+// fileTransport implements RoundTripper for the 'file' protocol.
+type fileTransport struct {
+ fh fileHandler
+}
+
+// NewFileTransport returns a new RoundTripper, serving the provided
+// FileSystem. The returned RoundTripper ignores the URL host in its
+// incoming requests, as well as most other properties of the
+// request.
+//
+// The typical use case for NewFileTransport is to register the "file"
+// protocol with a Transport, as in:
+//
+// t := &http.Transport{}
+// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
+// c := &http.Client{Transport: t}
+// res, err := c.Get("file:///etc/passwd")
+// ...
+func NewFileTransport(fs FileSystem) RoundTripper {
+ return fileTransport{fileHandler{fs}}
+}
+
+func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
+ // We start ServeHTTP in a goroutine, which may take a long
+ // time if the file is large. The newPopulateResponseWriter
+ // call returns a channel which either ServeHTTP or finish()
+ // sends our *Response on, once the *Response itself has been
+ // populated (even if the body itself is still being
+ // written to the res.Body, a pipe)
+ rw, resc := newPopulateResponseWriter()
+ go func() {
+ t.fh.ServeHTTP(rw, req)
+ rw.finish()
+ }()
+ return <-resc, nil
+}
+
+func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
+ pr, pw := io.Pipe()
+ rw := &populateResponse{
+ ch: make(chan *Response),
+ pw: pw,
+ res: &Response{
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ Header: make(Header),
+ Close: true,
+ Body: pr,
+ },
+ }
+ return rw, rw.ch
+}
+
+// populateResponse is a ResponseWriter that populates the *Response
+// in res, and writes its body to a pipe connected to the response
+// body. Once writes begin or finish() is called, the response is sent
+// on ch.
+type populateResponse struct {
+ res *Response
+ ch chan *Response
+ wroteHeader bool
+ hasContent bool
+ sentResponse bool
+ pw *io.PipeWriter
+}
+
+func (pr *populateResponse) finish() {
+ if !pr.wroteHeader {
+ pr.WriteHeader(500)
+ }
+ if !pr.sentResponse {
+ pr.sendResponse()
+ }
+ pr.pw.Close()
+}
+
+func (pr *populateResponse) sendResponse() {
+ if pr.sentResponse {
+ return
+ }
+ pr.sentResponse = true
+
+ if pr.hasContent {
+ pr.res.ContentLength = -1
+ }
+ pr.ch <- pr.res
+}
+
+func (pr *populateResponse) Header() Header {
+ return pr.res.Header
+}
+
+func (pr *populateResponse) WriteHeader(code int) {
+ if pr.wroteHeader {
+ return
+ }
+ pr.wroteHeader = true
+
+ pr.res.StatusCode = code
+ pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
+}
+
+func (pr *populateResponse) Write(p []byte) (n int, err error) {
+ if !pr.wroteHeader {
+ pr.WriteHeader(StatusOK)
+ }
+ pr.hasContent = true
+ if !pr.sentResponse {
+ pr.sendResponse()
+ }
+ return pr.pw.Write(p)
+}
diff --git a/contrib/go/_std_1.19/src/net/http/fs.go b/contrib/go/_std_1.19/src/net/http/fs.go
new file mode 100644
index 0000000000..4f144ebad2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/fs.go
@@ -0,0 +1,972 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP file system request handler
+
+package http
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "mime"
+ "mime/multipart"
+ "net/textproto"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// A Dir implements FileSystem using the native file system restricted to a
+// specific directory tree.
+//
+// While the FileSystem.Open method takes '/'-separated paths, a Dir's string
+// value is a filename on the native file system, not a URL, so it is separated
+// by filepath.Separator, which isn't necessarily '/'.
+//
+// Note that Dir could expose sensitive files and directories. Dir will follow
+// symlinks pointing out of the directory tree, which can be especially dangerous
+// if serving from a directory in which users are able to create arbitrary symlinks.
+// Dir will also allow access to files and directories starting with a period,
+// which could expose sensitive directories like .git or sensitive files like
+// .htpasswd. To exclude files with a leading period, remove the files/directories
+// from the server or create a custom FileSystem implementation.
+//
+// An empty Dir is treated as ".".
+type Dir string
+
+// mapOpenError maps the provided non-nil error from opening name
+// to a possibly better non-nil error. In particular, it turns OS-specific errors
+// about opening files in non-directories into fs.ErrNotExist. See Issues 18984 and 49552.
+func mapOpenError(originalErr error, name string, sep rune, stat func(string) (fs.FileInfo, error)) error {
+ if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) {
+ return originalErr
+ }
+
+ parts := strings.Split(name, string(sep))
+ for i := range parts {
+ if parts[i] == "" {
+ continue
+ }
+ fi, err := stat(strings.Join(parts[:i+1], string(sep)))
+ if err != nil {
+ return originalErr
+ }
+ if !fi.IsDir() {
+ return fs.ErrNotExist
+ }
+ }
+ return originalErr
+}
+
+// Open implements FileSystem using os.Open, opening files for reading rooted
+// and relative to the directory d.
+func (d Dir) Open(name string) (File, error) {
+ if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ dir := string(d)
+ if dir == "" {
+ dir = "."
+ }
+ fullName := filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))
+ f, err := os.Open(fullName)
+ if err != nil {
+ return nil, mapOpenError(err, fullName, filepath.Separator, os.Stat)
+ }
+ return f, nil
+}
+
+// A FileSystem implements access to a collection of named files.
+// The elements in a file path are separated by slash ('/', U+002F)
+// characters, regardless of host operating system convention.
+// See the FileServer function to convert a FileSystem to a Handler.
+//
+// This interface predates the fs.FS interface, which can be used instead:
+// the FS adapter function converts an fs.FS to a FileSystem.
+type FileSystem interface {
+ Open(name string) (File, error)
+}
+
+// A File is returned by a FileSystem's Open method and can be
+// served by the FileServer implementation.
+//
+// The methods should behave the same as those on an *os.File.
+type File interface {
+ io.Closer
+ io.Reader
+ io.Seeker
+ Readdir(count int) ([]fs.FileInfo, error)
+ Stat() (fs.FileInfo, error)
+}
+
+type anyDirs interface {
+ len() int
+ name(i int) string
+ isDir(i int) bool
+}
+
+type fileInfoDirs []fs.FileInfo
+
+func (d fileInfoDirs) len() int { return len(d) }
+func (d fileInfoDirs) isDir(i int) bool { return d[i].IsDir() }
+func (d fileInfoDirs) name(i int) string { return d[i].Name() }
+
+type dirEntryDirs []fs.DirEntry
+
+func (d dirEntryDirs) len() int { return len(d) }
+func (d dirEntryDirs) isDir(i int) bool { return d[i].IsDir() }
+func (d dirEntryDirs) name(i int) string { return d[i].Name() }
+
+func dirList(w ResponseWriter, r *Request, f File) {
+ // Prefer to use ReadDir instead of Readdir,
+ // because the former doesn't require calling
+ // Stat on every entry of a directory on Unix.
+ var dirs anyDirs
+ var err error
+ if d, ok := f.(fs.ReadDirFile); ok {
+ var list dirEntryDirs
+ list, err = d.ReadDir(-1)
+ dirs = list
+ } else {
+ var list fileInfoDirs
+ list, err = f.Readdir(-1)
+ dirs = list
+ }
+
+ if err != nil {
+ logf(r, "http: error reading directory: %v", err)
+ Error(w, "Error reading directory", StatusInternalServerError)
+ return
+ }
+ sort.Slice(dirs, func(i, j int) bool { return dirs.name(i) < dirs.name(j) })
+
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ fmt.Fprintf(w, "<pre>\n")
+ for i, n := 0, dirs.len(); i < n; i++ {
+ name := dirs.name(i)
+ if dirs.isDir(i) {
+ name += "/"
+ }
+ // name may contain '?' or '#', which must be escaped to remain
+ // part of the URL path, and not indicate the start of a query
+ // string or fragment.
+ url := url.URL{Path: name}
+ fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name))
+ }
+ fmt.Fprintf(w, "</pre>\n")
+}
+
+// ServeContent replies to the request using the content in the
+// provided ReadSeeker. The main benefit of ServeContent over io.Copy
+// is that it handles Range requests properly, sets the MIME type, and
+// handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since,
+// and If-Range requests.
+//
+// If the response's Content-Type header is not set, ServeContent
+// first tries to deduce the type from name's file extension and,
+// if that fails, falls back to reading the first block of the content
+// and passing it to DetectContentType.
+// The name is otherwise unused; in particular it can be empty and is
+// never sent in the response.
+//
+// If modtime is not the zero time or Unix epoch, ServeContent
+// includes it in a Last-Modified header in the response. If the
+// request includes an If-Modified-Since header, ServeContent uses
+// modtime to decide whether the content needs to be sent at all.
+//
+// The content's Seek method must work: ServeContent uses
+// a seek to the end of the content to determine its size.
+//
+// If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
+// ServeContent uses it to handle requests using If-Match, If-None-Match, or If-Range.
+//
+// Note that *os.File implements the io.ReadSeeker interface.
+func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
+ sizeFunc := func() (int64, error) {
+ size, err := content.Seek(0, io.SeekEnd)
+ if err != nil {
+ return 0, errSeeker
+ }
+ _, err = content.Seek(0, io.SeekStart)
+ if err != nil {
+ return 0, errSeeker
+ }
+ return size, nil
+ }
+ serveContent(w, req, name, modtime, sizeFunc, content)
+}
+
+// errSeeker is returned by ServeContent's sizeFunc when the content
+// doesn't seek properly. The underlying Seeker's error text isn't
+// included in the sizeFunc reply so it's not sent over HTTP to end
+// users.
+var errSeeker = errors.New("seeker can't seek")
+
+// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of
+// all of the byte-range-spec values is greater than the content size.
+var errNoOverlap = errors.New("invalid range: failed to overlap")
+
+// if name is empty, filename is unknown. (used for mime type, before sniffing)
+// if modtime.IsZero(), modtime is unknown.
+// content must be seeked to the beginning of the file.
+// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response.
+func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) {
+ setLastModified(w, modtime)
+ done, rangeReq := checkPreconditions(w, r, modtime)
+ if done {
+ return
+ }
+
+ code := StatusOK
+
+ // If Content-Type isn't set, use the file's extension to find it, but
+ // if the Content-Type is unset explicitly, do not sniff the type.
+ ctypes, haveType := w.Header()["Content-Type"]
+ var ctype string
+ if !haveType {
+ ctype = mime.TypeByExtension(filepath.Ext(name))
+ if ctype == "" {
+ // read a chunk to decide between utf-8 text and binary
+ var buf [sniffLen]byte
+ n, _ := io.ReadFull(content, buf[:])
+ ctype = DetectContentType(buf[:n])
+ _, err := content.Seek(0, io.SeekStart) // rewind to output whole file
+ if err != nil {
+ Error(w, "seeker can't seek", StatusInternalServerError)
+ return
+ }
+ }
+ w.Header().Set("Content-Type", ctype)
+ } else if len(ctypes) > 0 {
+ ctype = ctypes[0]
+ }
+
+ size, err := sizeFunc()
+ if err != nil {
+ Error(w, err.Error(), StatusInternalServerError)
+ return
+ }
+
+ // handle Content-Range header.
+ sendSize := size
+ var sendContent io.Reader = content
+ if size >= 0 {
+ ranges, err := parseRange(rangeReq, size)
+ if err != nil {
+ if err == errNoOverlap {
+ w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
+ }
+ Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ if sumRangesSize(ranges) > size {
+ // The total number of bytes in all the ranges
+ // is larger than the size of the file by
+ // itself, so this is probably an attack, or a
+ // dumb client. Ignore the range request.
+ ranges = nil
+ }
+ switch {
+ case len(ranges) == 1:
+ // RFC 7233, Section 4.1:
+ // "If a single part is being transferred, the server
+ // generating the 206 response MUST generate a
+ // Content-Range header field, describing what range
+ // of the selected representation is enclosed, and a
+ // payload consisting of the range.
+ // ...
+ // A server MUST NOT generate a multipart response to
+ // a request for a single range, since a client that
+ // does not request multiple parts might not support
+ // multipart responses."
+ ra := ranges[0]
+ if _, err := content.Seek(ra.start, io.SeekStart); err != nil {
+ Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ sendSize = ra.length
+ code = StatusPartialContent
+ w.Header().Set("Content-Range", ra.contentRange(size))
+ case len(ranges) > 1:
+ sendSize = rangesMIMESize(ranges, ctype, size)
+ code = StatusPartialContent
+
+ pr, pw := io.Pipe()
+ mw := multipart.NewWriter(pw)
+ w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
+ sendContent = pr
+ defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
+ go func() {
+ for _, ra := range ranges {
+ part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ if _, err := content.Seek(ra.start, io.SeekStart); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ if _, err := io.CopyN(part, content, ra.length); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+ mw.Close()
+ pw.Close()
+ }()
+ }
+
+ w.Header().Set("Accept-Ranges", "bytes")
+ if w.Header().Get("Content-Encoding") == "" {
+ w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
+ }
+ }
+
+ w.WriteHeader(code)
+
+ if r.Method != "HEAD" {
+ io.CopyN(w, sendContent, sendSize)
+ }
+}
+
+// scanETag determines if a syntactically valid ETag is present at s. If so,
+// the ETag and remaining text after consuming ETag is returned. Otherwise,
+// it returns "", "".
+func scanETag(s string) (etag string, remain string) {
+ s = textproto.TrimString(s)
+ start := 0
+ if strings.HasPrefix(s, "W/") {
+ start = 2
+ }
+ if len(s[start:]) < 2 || s[start] != '"' {
+ return "", ""
+ }
+ // ETag is either W/"text" or "text".
+ // See RFC 7232 2.3.
+ for i := start + 1; i < len(s); i++ {
+ c := s[i]
+ switch {
+ // Character values allowed in ETags.
+ case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:
+ case c == '"':
+ return s[:i+1], s[i+1:]
+ default:
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// etagStrongMatch reports whether a and b match using strong ETag comparison.
+// Assumes a and b are valid ETags.
+func etagStrongMatch(a, b string) bool {
+ return a == b && a != "" && a[0] == '"'
+}
+
+// etagWeakMatch reports whether a and b match using weak ETag comparison.
+// Assumes a and b are valid ETags.
+func etagWeakMatch(a, b string) bool {
+ return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/")
+}
+
+// condResult is the result of an HTTP request precondition check.
+// See https://tools.ietf.org/html/rfc7232 section 3.
+type condResult int
+
+const (
+ condNone condResult = iota
+ condTrue
+ condFalse
+)
+
+func checkIfMatch(w ResponseWriter, r *Request) condResult {
+ im := r.Header.Get("If-Match")
+ if im == "" {
+ return condNone
+ }
+ for {
+ im = textproto.TrimString(im)
+ if len(im) == 0 {
+ break
+ }
+ if im[0] == ',' {
+ im = im[1:]
+ continue
+ }
+ if im[0] == '*' {
+ return condTrue
+ }
+ etag, remain := scanETag(im)
+ if etag == "" {
+ break
+ }
+ if etagStrongMatch(etag, w.Header().get("Etag")) {
+ return condTrue
+ }
+ im = remain
+ }
+
+ return condFalse
+}
+
+func checkIfUnmodifiedSince(r *Request, modtime time.Time) condResult {
+ ius := r.Header.Get("If-Unmodified-Since")
+ if ius == "" || isZeroTime(modtime) {
+ return condNone
+ }
+ t, err := ParseTime(ius)
+ if err != nil {
+ return condNone
+ }
+
+ // The Last-Modified header truncates sub-second precision so
+ // the modtime needs to be truncated too.
+ modtime = modtime.Truncate(time.Second)
+ if modtime.Before(t) || modtime.Equal(t) {
+ return condTrue
+ }
+ return condFalse
+}
+
+func checkIfNoneMatch(w ResponseWriter, r *Request) condResult {
+ inm := r.Header.get("If-None-Match")
+ if inm == "" {
+ return condNone
+ }
+ buf := inm
+ for {
+ buf = textproto.TrimString(buf)
+ if len(buf) == 0 {
+ break
+ }
+ if buf[0] == ',' {
+ buf = buf[1:]
+ continue
+ }
+ if buf[0] == '*' {
+ return condFalse
+ }
+ etag, remain := scanETag(buf)
+ if etag == "" {
+ break
+ }
+ if etagWeakMatch(etag, w.Header().get("Etag")) {
+ return condFalse
+ }
+ buf = remain
+ }
+ return condTrue
+}
+
+func checkIfModifiedSince(r *Request, modtime time.Time) condResult {
+ if r.Method != "GET" && r.Method != "HEAD" {
+ return condNone
+ }
+ ims := r.Header.Get("If-Modified-Since")
+ if ims == "" || isZeroTime(modtime) {
+ return condNone
+ }
+ t, err := ParseTime(ims)
+ if err != nil {
+ return condNone
+ }
+ // The Last-Modified header truncates sub-second precision so
+ // the modtime needs to be truncated too.
+ modtime = modtime.Truncate(time.Second)
+ if modtime.Before(t) || modtime.Equal(t) {
+ return condFalse
+ }
+ return condTrue
+}
+
+func checkIfRange(w ResponseWriter, r *Request, modtime time.Time) condResult {
+ if r.Method != "GET" && r.Method != "HEAD" {
+ return condNone
+ }
+ ir := r.Header.get("If-Range")
+ if ir == "" {
+ return condNone
+ }
+ etag, _ := scanETag(ir)
+ if etag != "" {
+ if etagStrongMatch(etag, w.Header().Get("Etag")) {
+ return condTrue
+ } else {
+ return condFalse
+ }
+ }
+ // The If-Range value is typically the ETag value, but it may also be
+ // the modtime date. See golang.org/issue/8367.
+ if modtime.IsZero() {
+ return condFalse
+ }
+ t, err := ParseTime(ir)
+ if err != nil {
+ return condFalse
+ }
+ if t.Unix() == modtime.Unix() {
+ return condTrue
+ }
+ return condFalse
+}
+
+var unixEpochTime = time.Unix(0, 0)
+
+// isZeroTime reports whether t is obviously unspecified (either zero or Unix()=0).
+func isZeroTime(t time.Time) bool {
+ return t.IsZero() || t.Equal(unixEpochTime)
+}
+
+func setLastModified(w ResponseWriter, modtime time.Time) {
+ if !isZeroTime(modtime) {
+ w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat))
+ }
+}
+
+func writeNotModified(w ResponseWriter) {
+ // RFC 7232 section 4.1:
+ // a sender SHOULD NOT generate representation metadata other than the
+ // above listed fields unless said metadata exists for the purpose of
+ // guiding cache updates (e.g., Last-Modified might be useful if the
+ // response does not have an ETag field).
+ h := w.Header()
+ delete(h, "Content-Type")
+ delete(h, "Content-Length")
+ delete(h, "Content-Encoding")
+ if h.Get("Etag") != "" {
+ delete(h, "Last-Modified")
+ }
+ w.WriteHeader(StatusNotModified)
+}
+
+// checkPreconditions evaluates request preconditions and reports whether a precondition
+// resulted in sending StatusNotModified or StatusPreconditionFailed.
+func checkPreconditions(w ResponseWriter, r *Request, modtime time.Time) (done bool, rangeHeader string) {
+ // This function carefully follows RFC 7232 section 6.
+ ch := checkIfMatch(w, r)
+ if ch == condNone {
+ ch = checkIfUnmodifiedSince(r, modtime)
+ }
+ if ch == condFalse {
+ w.WriteHeader(StatusPreconditionFailed)
+ return true, ""
+ }
+ switch checkIfNoneMatch(w, r) {
+ case condFalse:
+ if r.Method == "GET" || r.Method == "HEAD" {
+ writeNotModified(w)
+ return true, ""
+ } else {
+ w.WriteHeader(StatusPreconditionFailed)
+ return true, ""
+ }
+ case condNone:
+ if checkIfModifiedSince(r, modtime) == condFalse {
+ writeNotModified(w)
+ return true, ""
+ }
+ }
+
+ rangeHeader = r.Header.get("Range")
+ if rangeHeader != "" && checkIfRange(w, r, modtime) == condFalse {
+ rangeHeader = ""
+ }
+ return false, rangeHeader
+}
+
+// name is '/'-separated, not filepath.Separator.
+func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
+ const indexPage = "/index.html"
+
+ // redirect .../index.html to .../
+ // can't use Redirect() because that would make the path absolute,
+ // which would be a problem running under StripPrefix
+ if strings.HasSuffix(r.URL.Path, indexPage) {
+ localRedirect(w, r, "./")
+ return
+ }
+
+ f, err := fs.Open(name)
+ if err != nil {
+ msg, code := toHTTPError(err)
+ Error(w, msg, code)
+ return
+ }
+ defer f.Close()
+
+ d, err := f.Stat()
+ if err != nil {
+ msg, code := toHTTPError(err)
+ Error(w, msg, code)
+ return
+ }
+
+ if redirect {
+ // redirect to canonical path: / at end of directory url
+ // r.URL.Path always begins with /
+ url := r.URL.Path
+ if d.IsDir() {
+ if url[len(url)-1] != '/' {
+ localRedirect(w, r, path.Base(url)+"/")
+ return
+ }
+ } else {
+ if url[len(url)-1] == '/' {
+ localRedirect(w, r, "../"+path.Base(url))
+ return
+ }
+ }
+ }
+
+ if d.IsDir() {
+ url := r.URL.Path
+ // redirect if the directory name doesn't end in a slash
+ if url == "" || url[len(url)-1] != '/' {
+ localRedirect(w, r, path.Base(url)+"/")
+ return
+ }
+
+ // use contents of index.html for directory, if present
+ index := strings.TrimSuffix(name, "/") + indexPage
+ ff, err := fs.Open(index)
+ if err == nil {
+ defer ff.Close()
+ dd, err := ff.Stat()
+ if err == nil {
+ name = index
+ d = dd
+ f = ff
+ }
+ }
+ }
+
+ // Still a directory? (we didn't find an index.html file)
+ if d.IsDir() {
+ if checkIfModifiedSince(r, d.ModTime()) == condFalse {
+ writeNotModified(w)
+ return
+ }
+ setLastModified(w, d.ModTime())
+ dirList(w, r, f)
+ return
+ }
+
+ // serveContent will check modification time
+ sizeFunc := func() (int64, error) { return d.Size(), nil }
+ serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f)
+}
+
+// toHTTPError returns a non-specific HTTP error message and status code
+// for a given non-nil error value. It's important that toHTTPError does not
+// actually return err.Error(), since msg and httpStatus are returned to users,
+// and historically Go's ServeContent always returned just "404 Not Found" for
+// all errors. We don't want to start leaking information in error messages.
+func toHTTPError(err error) (msg string, httpStatus int) {
+ if errors.Is(err, fs.ErrNotExist) {
+ return "404 page not found", StatusNotFound
+ }
+ if errors.Is(err, fs.ErrPermission) {
+ return "403 Forbidden", StatusForbidden
+ }
+ // Default:
+ return "500 Internal Server Error", StatusInternalServerError
+}
+
+// localRedirect gives a Moved Permanently response.
+// It does not convert relative paths to absolute paths like Redirect does.
+func localRedirect(w ResponseWriter, r *Request, newPath string) {
+ if q := r.URL.RawQuery; q != "" {
+ newPath += "?" + q
+ }
+ w.Header().Set("Location", newPath)
+ w.WriteHeader(StatusMovedPermanently)
+}
+
+// ServeFile replies to the request with the contents of the named
+// file or directory.
+//
+// If the provided file or directory name is a relative path, it is
+// interpreted relative to the current directory and may ascend to
+// parent directories. If the provided name is constructed from user
+// input, it should be sanitized before calling ServeFile.
+//
+// As a precaution, ServeFile will reject requests where r.URL.Path
+// contains a ".." path element; this protects against callers who
+// might unsafely use filepath.Join on r.URL.Path without sanitizing
+// it and then use that filepath.Join result as the name argument.
+//
+// As another special case, ServeFile redirects any request where r.URL.Path
+// ends in "/index.html" to the same path, without the final
+// "index.html". To avoid such redirects either modify the path or
+// use ServeContent.
+//
+// Outside of those two special cases, ServeFile does not use
+// r.URL.Path for selecting the file or directory to serve; only the
+// file or directory provided in the name argument is used.
+func ServeFile(w ResponseWriter, r *Request, name string) {
+ if containsDotDot(r.URL.Path) {
+ // Too many programs use r.URL.Path to construct the argument to
+ // serveFile. Reject the request under the assumption that happened
+ // here and ".." may not be wanted.
+ // Note that name might not contain "..", for example if code (still
+ // incorrectly) used filepath.Join(myDir, r.URL.Path).
+ Error(w, "invalid URL path", StatusBadRequest)
+ return
+ }
+ dir, file := filepath.Split(name)
+ serveFile(w, r, Dir(dir), file, false)
+}
+
+func containsDotDot(v string) bool {
+ if !strings.Contains(v, "..") {
+ return false
+ }
+ for _, ent := range strings.FieldsFunc(v, isSlashRune) {
+ if ent == ".." {
+ return true
+ }
+ }
+ return false
+}
+
+func isSlashRune(r rune) bool { return r == '/' || r == '\\' }
+
+type fileHandler struct {
+ root FileSystem
+}
+
+type ioFS struct {
+ fsys fs.FS
+}
+
+type ioFile struct {
+ file fs.File
+}
+
+func (f ioFS) Open(name string) (File, error) {
+ if name == "/" {
+ name = "."
+ } else {
+ name = strings.TrimPrefix(name, "/")
+ }
+ file, err := f.fsys.Open(name)
+ if err != nil {
+ return nil, mapOpenError(err, name, '/', func(path string) (fs.FileInfo, error) {
+ return fs.Stat(f.fsys, path)
+ })
+ }
+ return ioFile{file}, nil
+}
+
+func (f ioFile) Close() error { return f.file.Close() }
+func (f ioFile) Read(b []byte) (int, error) { return f.file.Read(b) }
+func (f ioFile) Stat() (fs.FileInfo, error) { return f.file.Stat() }
+
+var errMissingSeek = errors.New("io.File missing Seek method")
+var errMissingReadDir = errors.New("io.File directory missing ReadDir method")
+
+func (f ioFile) Seek(offset int64, whence int) (int64, error) {
+ s, ok := f.file.(io.Seeker)
+ if !ok {
+ return 0, errMissingSeek
+ }
+ return s.Seek(offset, whence)
+}
+
+func (f ioFile) ReadDir(count int) ([]fs.DirEntry, error) {
+ d, ok := f.file.(fs.ReadDirFile)
+ if !ok {
+ return nil, errMissingReadDir
+ }
+ return d.ReadDir(count)
+}
+
+func (f ioFile) Readdir(count int) ([]fs.FileInfo, error) {
+ d, ok := f.file.(fs.ReadDirFile)
+ if !ok {
+ return nil, errMissingReadDir
+ }
+ var list []fs.FileInfo
+ for {
+ dirs, err := d.ReadDir(count - len(list))
+ for _, dir := range dirs {
+ info, err := dir.Info()
+ if err != nil {
+ // Pretend it doesn't exist, like (*os.File).Readdir does.
+ continue
+ }
+ list = append(list, info)
+ }
+ if err != nil {
+ return list, err
+ }
+ if count < 0 || len(list) >= count {
+ break
+ }
+ }
+ return list, nil
+}
+
+// FS converts fsys to a FileSystem implementation,
+// for use with FileServer and NewFileTransport.
+func FS(fsys fs.FS) FileSystem {
+ return ioFS{fsys}
+}
+
+// FileServer returns a handler that serves HTTP requests
+// with the contents of the file system rooted at root.
+//
+// As a special case, the returned file server redirects any request
+// ending in "/index.html" to the same path, without the final
+// "index.html".
+//
+// To use the operating system's file system implementation,
+// use http.Dir:
+//
+// http.Handle("/", http.FileServer(http.Dir("/tmp")))
+//
+// To use an fs.FS implementation, use http.FS to convert it:
+//
+// http.Handle("/", http.FileServer(http.FS(fsys)))
+func FileServer(root FileSystem) Handler {
+ return &fileHandler{root}
+}
+
+func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ upath := r.URL.Path
+ if !strings.HasPrefix(upath, "/") {
+ upath = "/" + upath
+ r.URL.Path = upath
+ }
+ serveFile(w, r, f.root, path.Clean(upath), true)
+}
+
+// httpRange specifies the byte range to be sent to the client.
+type httpRange struct {
+ start, length int64
+}
+
+func (r httpRange) contentRange(size int64) string {
+ return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
+}
+
+func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
+ return textproto.MIMEHeader{
+ "Content-Range": {r.contentRange(size)},
+ "Content-Type": {contentType},
+ }
+}
+
+// parseRange parses a Range header string as per RFC 7233.
+// errNoOverlap is returned if none of the ranges overlap.
+func parseRange(s string, size int64) ([]httpRange, error) {
+ if s == "" {
+ return nil, nil // header not present
+ }
+ const b = "bytes="
+ if !strings.HasPrefix(s, b) {
+ return nil, errors.New("invalid range")
+ }
+ var ranges []httpRange
+ noOverlap := false
+ for _, ra := range strings.Split(s[len(b):], ",") {
+ ra = textproto.TrimString(ra)
+ if ra == "" {
+ continue
+ }
+ start, end, ok := strings.Cut(ra, "-")
+ if !ok {
+ return nil, errors.New("invalid range")
+ }
+ start, end = textproto.TrimString(start), textproto.TrimString(end)
+ var r httpRange
+ if start == "" {
+ // If no start is specified, end specifies the
+ // range start relative to the end of the file,
+ // and we are dealing with <suffix-length>
+ // which has to be a non-negative integer as per
+ // RFC 7233 Section 2.1 "Byte-Ranges".
+ if end == "" || end[0] == '-' {
+ return nil, errors.New("invalid range")
+ }
+ i, err := strconv.ParseInt(end, 10, 64)
+ if i < 0 || err != nil {
+ return nil, errors.New("invalid range")
+ }
+ if i > size {
+ i = size
+ }
+ r.start = size - i
+ r.length = size - r.start
+ } else {
+ i, err := strconv.ParseInt(start, 10, 64)
+ if err != nil || i < 0 {
+ return nil, errors.New("invalid range")
+ }
+ if i >= size {
+ // If the range begins after the size of the content,
+ // then it does not overlap.
+ noOverlap = true
+ continue
+ }
+ r.start = i
+ if end == "" {
+ // If no end is specified, range extends to end of the file.
+ r.length = size - r.start
+ } else {
+ i, err := strconv.ParseInt(end, 10, 64)
+ if err != nil || r.start > i {
+ return nil, errors.New("invalid range")
+ }
+ if i >= size {
+ i = size - 1
+ }
+ r.length = i - r.start + 1
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ if noOverlap && len(ranges) == 0 {
+ // The specified ranges did not overlap with the content.
+ return nil, errNoOverlap
+ }
+ return ranges, nil
+}
+
+// countingWriter counts how many bytes have been written to it.
+type countingWriter int64
+
+func (w *countingWriter) Write(p []byte) (n int, err error) {
+ *w += countingWriter(len(p))
+ return len(p), nil
+}
+
+// rangesMIMESize returns the number of bytes it takes to encode the
+// provided ranges as a multipart response.
+func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
+ var w countingWriter
+ mw := multipart.NewWriter(&w)
+ for _, ra := range ranges {
+ mw.CreatePart(ra.mimeHeader(contentType, contentSize))
+ encSize += ra.length
+ }
+ mw.Close()
+ encSize += int64(w)
+ return
+}
+
+func sumRangesSize(ranges []httpRange) (size int64) {
+ for _, ra := range ranges {
+ size += ra.length
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/net/http/h2_bundle.go b/contrib/go/_std_1.19/src/net/http/h2_bundle.go
new file mode 100644
index 0000000000..0e5fa6712e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/h2_bundle.go
@@ -0,0 +1,10924 @@
+//go:build !nethttpomithttp2
+// +build !nethttpomithttp2
+
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+// $ bundle -o=h2_bundle.go -prefix=http2 -tags=!nethttpomithttp2 golang.org/x/net/http2
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+//
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/rand"
+ "crypto/tls"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ mathrand "math/rand"
+ "net"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/idna"
+)
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func http2asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if http2lower(s[i]) != http2lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func http2lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func http2isASCIIPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func http2asciiToLower(s string) (lower string, ok bool) {
+ if !http2isASCIIPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
+
+// A list of the possible cipher suite ids. Taken from
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+
+const (
+ http2cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
+ http2cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
+ http2cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
+ http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
+ http2cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
+ http2cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
+ http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
+ http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
+ http2cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
+ http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
+ http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
+ http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
+ http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
+ http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
+ http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
+ http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
+ http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
+ http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
+ http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
+ http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
+ http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
+ http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
+ http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
+ http2cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
+ http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
+ http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
+ http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
+ // Reserved uint16 = 0x001C-1D
+ http2cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
+ http2cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
+ http2cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
+ http2cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
+ http2cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
+ http2cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
+ // Reserved uint16 = 0x0047-4F
+ // Reserved uint16 = 0x0050-58
+ // Reserved uint16 = 0x0059-5C
+ // Unassigned uint16 = 0x005D-5F
+ // Reserved uint16 = 0x0060-66
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
+ // Unassigned uint16 = 0x006E-83
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
+ http2cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
+ http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
+ http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
+ http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
+ http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
+ http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
+ http2cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
+ http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
+ http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
+ http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
+ http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
+ http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
+ http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
+ http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
+ http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
+ http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
+ http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
+ http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
+ http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
+ http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
+ http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
+ http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
+ http2cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
+ http2cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
+ // Unassigned uint16 = 0x00C6-FE
+ http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
+ // Unassigned uint16 = 0x01-55,*
+ http2cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
+ // Unassigned uint16 = 0x5601 - 0xC000
+ http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
+ http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
+ http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
+ http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
+ http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
+ http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
+ http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
+ http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
+ http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
+ http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
+ http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
+ http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
+ http2cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
+ http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
+ http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
+ http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
+ http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
+ http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
+ http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
+ http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
+ http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
+ http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
+ http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
+ http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
+ http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
+ http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
+ http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
+ http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
+ http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
+ http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
+ http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
+ http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
+ http2cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
+ http2cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
+ http2cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
+ http2cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
+ http2cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
+ http2cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
+ http2cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
+ http2cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
+ http2cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
+ http2cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
+ // Unassigned uint16 = 0xC0B0-FF
+ // Unassigned uint16 = 0xC1-CB,*
+ // Unassigned uint16 = 0xCC00-A7
+ http2cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
+ http2cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
+ http2cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
+ http2cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
+ http2cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
+ http2cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
+)
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+// References:
+// https://tools.ietf.org/html/rfc7540#appendix-A
+// Reject cipher suites from Appendix A.
+// "This list includes those cipher suites that do not
+// offer an ephemeral key exchange and those that are
+// based on the TLS null, stream or block cipher type"
+func http2isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case http2cipher_TLS_NULL_WITH_NULL_NULL,
+ http2cipher_TLS_RSA_WITH_NULL_MD5,
+ http2cipher_TLS_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_RSA_WITH_RC4_128_MD5,
+ http2cipher_TLS_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ http2cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_NULL_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_AES_128_CCM,
+ http2cipher_TLS_RSA_WITH_AES_256_CCM,
+ http2cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ http2cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ http2cipher_TLS_PSK_WITH_AES_128_CCM,
+ http2cipher_TLS_PSK_WITH_AES_256_CCM,
+ http2cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ http2cipher_TLS_PSK_WITH_AES_256_CCM_8:
+ return true
+ default:
+ return false
+ }
+}
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type http2ClientConnPool interface {
+ // GetClientConn returns a specific HTTP/2 connection (usually
+ // a TLS-TCP connection) to an HTTP/2 server. On success, the
+ // returned ClientConn accounts for the upcoming RoundTrip
+ // call, so the caller should not omit it. If the caller needs
+ // to, ClientConn.RoundTrip can be called with a bogus
+ // new(http.Request) to release the stream reservation.
+ GetClientConn(req *Request, addr string) (*http2ClientConn, error)
+ MarkDead(*http2ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type http2clientConnPoolIdleCloser interface {
+ http2ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ http2clientConnPoolIdleCloser = (*http2clientConnPool)(nil)
+ _ http2clientConnPoolIdleCloser = http2noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type http2clientConnPool struct {
+ t *http2Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*http2ClientConn // key is host:port
+ dialing map[string]*http2dialCall // currently in-flight dials
+ keys map[*http2ClientConn][]string
+ addConnCalls map[string]*http2addConnCall // in-flight addConnIfNeeded calls
+}
+
+func (p *http2clientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
+ return p.getClientConn(req, addr, http2dialOnMiss)
+}
+
+const (
+ http2dialOnMiss = true
+ http2noDialOnMiss = false
+)
+
+func (p *http2clientConnPool) getClientConn(req *Request, addr string, dialOnMiss bool) (*http2ClientConn, error) {
+ // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
+ if http2isConnectionCloseRequest(req) && dialOnMiss {
+ // It gets its own connection.
+ http2traceGetConn(req, addr)
+ const singleUse = true
+ cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
+ if err != nil {
+ return nil, err
+ }
+ return cc, nil
+ }
+ for {
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.ReserveNewRequest() {
+ // When a connection is presented to us by the net/http package,
+ // the GetConn hook has already been called.
+ // Don't call it a second time here.
+ if !cc.getConnCalled {
+ http2traceGetConn(req, addr)
+ }
+ cc.getConnCalled = false
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, http2ErrNoCachedConn
+ }
+ http2traceGetConn(req, addr)
+ call := p.getStartDialLocked(req.Context(), addr)
+ p.mu.Unlock()
+ <-call.done
+ if http2shouldRetryDial(call, req) {
+ continue
+ }
+ cc, err := call.res, call.err
+ if err != nil {
+ return nil, err
+ }
+ if cc.ReserveNewRequest() {
+ return cc, nil
+ }
+ }
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type http2dialCall struct {
+ _ http2incomparable
+ p *http2clientConnPool
+ // the context associated with the request
+ // that created this dialCall
+ ctx context.Context
+ done chan struct{} // closed when done
+ res *http2ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *http2clientConnPool) getStartDialLocked(ctx context.Context, addr string) *http2dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &http2dialCall{p: p, done: make(chan struct{}), ctx: ctx}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*http2dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(call.ctx, addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *http2dialCall) dial(ctx context.Context, addr string) {
+ const singleUse = false // shared conn
+ c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *http2clientConnPool) addConnIfNeeded(key string, t *http2Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*http2addConnCall)
+ }
+ call = &http2addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type http2addConnCall struct {
+ _ http2incomparable
+ p *http2clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *http2addConnCall) run(t *http2Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ cc.getConnCalled = true // already called by the net/http package
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+// p.mu must be held
+func (p *http2clientConnPool) addConnLocked(key string, cc *http2ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*http2ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*http2ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *http2clientConnPool) MarkDead(cc *http2ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := http2filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *http2clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) []*http2ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type http2noDialClientConnPool struct{ *http2clientConnPool }
+
+func (p http2noDialClientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
+ return p.getClientConn(req, addr, http2noDialOnMiss)
+}
+
+// shouldRetryDial reports whether the current request should
+// retry dialing after the call finished unsuccessfully, for example
+// if the dial was canceled because of a context cancellation or
+// deadline expiry.
+func http2shouldRetryDial(call *http2dialCall, req *Request) bool {
+ if call.err == nil {
+ // No error, no need to retry
+ return false
+ }
+ if call.ctx == req.Context() {
+ // If the call has the same context as the request, the dial
+ // should not be retried, since any cancellation will have come
+ // from this request.
+ return false
+ }
+ if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
+ // If the call error is not because of a context cancellation or a deadline expiry,
+ // the dial should not be retried.
+ return false
+ }
+ // Only retry if the error is a context cancellation error or deadline expiry
+ // and the context associated with the call was canceled or expired.
+ return call.ctx.Err() != nil
+}
+
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var (
+ http2dataChunkSizeClasses = []int{
+ 1 << 10,
+ 2 << 10,
+ 4 << 10,
+ 8 << 10,
+ 16 << 10,
+ }
+ http2dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return make([]byte, 1<<10) }},
+ {New: func() interface{} { return make([]byte, 2<<10) }},
+ {New: func() interface{} { return make([]byte, 4<<10) }},
+ {New: func() interface{} { return make([]byte, 8<<10) }},
+ {New: func() interface{} { return make([]byte, 16<<10) }},
+ }
+)
+
+func http2getDataBufferChunk(size int64) []byte {
+ i := 0
+ for ; i < len(http2dataChunkSizeClasses)-1; i++ {
+ if size <= int64(http2dataChunkSizeClasses[i]) {
+ break
+ }
+ }
+ return http2dataChunkPools[i].Get().([]byte)
+}
+
+func http2putDataBufferChunk(p []byte) {
+ for i, n := range http2dataChunkSizeClasses {
+ if len(p) == n {
+ http2dataChunkPools[i].Put(p)
+ return
+ }
+ }
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type http2dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var http2errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *http2dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, http2errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ http2putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *http2dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *http2dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *http2dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *http2dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := http2getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type http2ErrCode uint32
+
+const (
+ http2ErrCodeNo http2ErrCode = 0x0
+ http2ErrCodeProtocol http2ErrCode = 0x1
+ http2ErrCodeInternal http2ErrCode = 0x2
+ http2ErrCodeFlowControl http2ErrCode = 0x3
+ http2ErrCodeSettingsTimeout http2ErrCode = 0x4
+ http2ErrCodeStreamClosed http2ErrCode = 0x5
+ http2ErrCodeFrameSize http2ErrCode = 0x6
+ http2ErrCodeRefusedStream http2ErrCode = 0x7
+ http2ErrCodeCancel http2ErrCode = 0x8
+ http2ErrCodeCompression http2ErrCode = 0x9
+ http2ErrCodeConnect http2ErrCode = 0xa
+ http2ErrCodeEnhanceYourCalm http2ErrCode = 0xb
+ http2ErrCodeInadequateSecurity http2ErrCode = 0xc
+ http2ErrCodeHTTP11Required http2ErrCode = 0xd
+)
+
+var http2errCodeName = map[http2ErrCode]string{
+ http2ErrCodeNo: "NO_ERROR",
+ http2ErrCodeProtocol: "PROTOCOL_ERROR",
+ http2ErrCodeInternal: "INTERNAL_ERROR",
+ http2ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ http2ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ http2ErrCodeStreamClosed: "STREAM_CLOSED",
+ http2ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ http2ErrCodeRefusedStream: "REFUSED_STREAM",
+ http2ErrCodeCancel: "CANCEL",
+ http2ErrCodeCompression: "COMPRESSION_ERROR",
+ http2ErrCodeConnect: "CONNECT_ERROR",
+ http2ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ http2ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ http2ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e http2ErrCode) String() string {
+ if s, ok := http2errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+func (e http2ErrCode) stringToken() string {
+ if s, ok := http2errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type http2ConnectionError http2ErrCode
+
+func (e http2ConnectionError) Error() string {
+ return fmt.Sprintf("connection error: %s", http2ErrCode(e))
+}
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type http2StreamError struct {
+ StreamID uint32
+ Code http2ErrCode
+ Cause error // optional additional detail
+}
+
+// errFromPeer is a sentinel error value for StreamError.Cause to
+// indicate that the StreamError was sent from the peer over the wire
+// and wasn't locally generated in the Transport.
+var http2errFromPeer = errors.New("received from peer")
+
+func http2streamError(id uint32, code http2ErrCode) http2StreamError {
+ return http2StreamError{StreamID: id, Code: code}
+}
+
+func (e http2StreamError) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+ }
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type http2goAwayFlowError struct{}
+
+func (http2goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connError represents an HTTP/2 ConnectionError error code, along
+// with a string (for debugging) explaining why.
+//
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(Code), after stashing away
+// the Reason into the Framer's errDetail field, accessible via
+// the (*Framer).ErrorDetail method.
+type http2connError struct {
+ Code http2ErrCode // the ConnectionError error code
+ Reason string // additional reason
+}
+
+func (e http2connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type http2pseudoHeaderError string
+
+func (e http2pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type http2duplicatePseudoHeaderError string
+
+func (e http2duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type http2headerFieldNameError string
+
+func (e http2headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type http2headerFieldValueError string
+
+func (e http2headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value for %q", string(e))
+}
+
+var (
+ http2errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ http2errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
+
+// flow is the flow control window's size.
+type http2flow struct {
+ _ http2incomparable
+
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *http2flow
+}
+
+func (f *http2flow) setConnFlow(cf *http2flow) { f.conn = cf }
+
+func (f *http2flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *http2flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *http2flow) add(n int32) bool {
+ sum := f.n + n
+ if (sum > n) == (f.n > 0) {
+ f.n = sum
+ return true
+ }
+ return false
+}
+
+const http2frameHeaderLen = 9
+
+var http2padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type http2FrameType uint8
+
+const (
+ http2FrameData http2FrameType = 0x0
+ http2FrameHeaders http2FrameType = 0x1
+ http2FramePriority http2FrameType = 0x2
+ http2FrameRSTStream http2FrameType = 0x3
+ http2FrameSettings http2FrameType = 0x4
+ http2FramePushPromise http2FrameType = 0x5
+ http2FramePing http2FrameType = 0x6
+ http2FrameGoAway http2FrameType = 0x7
+ http2FrameWindowUpdate http2FrameType = 0x8
+ http2FrameContinuation http2FrameType = 0x9
+)
+
+var http2frameName = map[http2FrameType]string{
+ http2FrameData: "DATA",
+ http2FrameHeaders: "HEADERS",
+ http2FramePriority: "PRIORITY",
+ http2FrameRSTStream: "RST_STREAM",
+ http2FrameSettings: "SETTINGS",
+ http2FramePushPromise: "PUSH_PROMISE",
+ http2FramePing: "PING",
+ http2FrameGoAway: "GOAWAY",
+ http2FrameWindowUpdate: "WINDOW_UPDATE",
+ http2FrameContinuation: "CONTINUATION",
+}
+
+func (t http2FrameType) String() string {
+ if s, ok := http2frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type http2Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f http2Flags) Has(v http2Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ http2FlagDataEndStream http2Flags = 0x1
+ http2FlagDataPadded http2Flags = 0x8
+
+ // Headers Frame
+ http2FlagHeadersEndStream http2Flags = 0x1
+ http2FlagHeadersEndHeaders http2Flags = 0x4
+ http2FlagHeadersPadded http2Flags = 0x8
+ http2FlagHeadersPriority http2Flags = 0x20
+
+ // Settings Frame
+ http2FlagSettingsAck http2Flags = 0x1
+
+ // Ping Frame
+ http2FlagPingAck http2Flags = 0x1
+
+ // Continuation Frame
+ http2FlagContinuationEndHeaders http2Flags = 0x4
+
+ http2FlagPushPromiseEndHeaders http2Flags = 0x4
+ http2FlagPushPromisePadded http2Flags = 0x8
+)
+
+var http2flagName = map[http2FrameType]map[http2Flags]string{
+ http2FrameData: {
+ http2FlagDataEndStream: "END_STREAM",
+ http2FlagDataPadded: "PADDED",
+ },
+ http2FrameHeaders: {
+ http2FlagHeadersEndStream: "END_STREAM",
+ http2FlagHeadersEndHeaders: "END_HEADERS",
+ http2FlagHeadersPadded: "PADDED",
+ http2FlagHeadersPriority: "PRIORITY",
+ },
+ http2FrameSettings: {
+ http2FlagSettingsAck: "ACK",
+ },
+ http2FramePing: {
+ http2FlagPingAck: "ACK",
+ },
+ http2FrameContinuation: {
+ http2FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ http2FramePushPromise: {
+ http2FlagPushPromiseEndHeaders: "END_HEADERS",
+ http2FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type http2frameParser func(fc *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error)
+
+var http2frameParsers = map[http2FrameType]http2frameParser{
+ http2FrameData: http2parseDataFrame,
+ http2FrameHeaders: http2parseHeadersFrame,
+ http2FramePriority: http2parsePriorityFrame,
+ http2FrameRSTStream: http2parseRSTStreamFrame,
+ http2FrameSettings: http2parseSettingsFrame,
+ http2FramePushPromise: http2parsePushPromise,
+ http2FramePing: http2parsePingFrame,
+ http2FrameGoAway: http2parseGoAwayFrame,
+ http2FrameWindowUpdate: http2parseWindowUpdateFrame,
+ http2FrameContinuation: http2parseContinuationFrame,
+}
+
+func http2typeFrameParser(t http2FrameType) http2frameParser {
+ if f := http2frameParsers[t]; f != nil {
+ return f
+ }
+ return http2parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type http2FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type http2FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags http2Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h http2FrameHeader) Header() http2FrameHeader { return h }
+
+func (h http2FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h http2FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := http2flagName[h.Type][http2Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *http2FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *http2FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var http2fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, http2frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func http2ReadFrameHeader(r io.Reader) (http2FrameHeader, error) {
+ bufp := http2fhBytes.Get().(*[]byte)
+ defer http2fhBytes.Put(bufp)
+ return http2readFrameHeader(*bufp, r)
+}
+
+func http2readFrameHeader(buf []byte, r io.Reader) (http2FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:http2frameHeaderLen])
+ if err != nil {
+ return http2FrameHeader{}, err
+ }
+ return http2FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: http2FrameType(buf[3]),
+ Flags: http2Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type http2Frame interface {
+ Header() http2FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type http2Framer struct {
+ r io.Reader
+ lastFrame http2Frame
+ errDetail error
+
+ // countError is a non-nil func that's called on a frame parse
+ // error with some unique error path token. It's initialized
+ // from Transport.CountError or Server.CountError.
+ countError func(errToken string)
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [http2frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads, logWrites bool
+
+ debugFramer *http2Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+ debugReadLoggerf func(string, ...interface{})
+ debugWriteLoggerf func(string, ...interface{})
+
+ frameCache *http2frameCache // nil if frames aren't reused (default)
+}
+
+func (fr *http2Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
+}
+
+func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *http2Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - http2frameHeaderLen
+ if length >= (1 << 24) {
+ return http2ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if f.logWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *http2Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = http2NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, http2summarizeFrame(fr))
+}
+
+func (f *http2Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+
+func (f *http2Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+
+func (f *http2Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+
+func (f *http2Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ http2minMaxFrameSize = 1 << 14
+ http2maxFrameSize = 1<<24 - 1
+)
+
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *http2Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &http2frameCache{}
+}
+
+type http2frameCache struct {
+ dataFrame http2DataFrame
+}
+
+func (fc *http2frameCache) getDataFrame() *http2DataFrame {
+ if fc == nil {
+ return &http2DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func http2NewFramer(w io.Writer, r io.Reader) *http2Framer {
+ fr := &http2Framer{
+ w: w,
+ r: r,
+ countError: func(string) {},
+ logReads: http2logFrameReads,
+ logWrites: http2logFrameWrites,
+ debugReadLoggerf: log.Printf,
+ debugWriteLoggerf: log.Printf,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(http2maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *http2Framer) SetMaxReadFrameSize(v uint32) {
+ if v > http2maxFrameSize {
+ v = http2maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *http2Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var http2ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func http2terminalReadFrameError(err error) bool {
+ if _, ok := err.(http2StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+func (fr *http2Framer) ReadFrame() (http2Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := http2readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, http2ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := http2typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
+ if err != nil {
+ if ce, ok := err.(http2connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ fr.debugReadLoggerf("http2: Framer %p: read %v", fr, http2summarizeFrame(f))
+ }
+ if fh.Type == http2FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*http2HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *http2Framer) connError(code http2ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return http2ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *http2Framer) checkFrameOrder(f http2Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != http2FrameContinuation {
+ return fr.connError(http2ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(http2ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == http2FrameContinuation {
+ return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case http2FrameHeaders, http2FrameContinuation:
+ if fh.Flags.Has(http2FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type http2DataFrame struct {
+ http2FrameHeader
+ data []byte
+}
+
+func (f *http2DataFrame) StreamEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *http2DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func http2parseDataFrame(fc *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ countError("frame_data_stream_0")
+ return nil, http2connError{http2ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := fc.getDataFrame()
+ f.http2FrameHeader = fh
+
+ var padSize byte
+ if fh.Flags.Has(http2FlagDataPadded) {
+ var err error
+ payload, padSize, err = http2readByte(payload)
+ if err != nil {
+ countError("frame_data_pad_byte_short")
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ countError("frame_data_pad_too_big")
+ return nil, http2connError{http2ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ http2errStreamID = errors.New("invalid stream ID")
+ http2errDepStreamID = errors.New("invalid dependent stream ID")
+ http2errPadLength = errors.New("pad length too large")
+ http2errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
+)
+
+func http2validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func http2validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *http2Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteDataPadded writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return http2errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return http2errPadBytes
+ }
+ }
+ }
+ }
+ var flags http2Flags
+ if endStream {
+ flags |= http2FlagDataEndStream
+ }
+ if pad != nil {
+ flags |= http2FlagDataPadded
+ }
+ f.startWrite(http2FrameData, flags, streamID)
+ if pad != nil {
+ f.wbuf = append(f.wbuf, byte(len(pad)))
+ }
+ f.wbuf = append(f.wbuf, data...)
+ f.wbuf = append(f.wbuf, pad...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type http2SettingsFrame struct {
+ http2FrameHeader
+ p []byte
+}
+
+func http2parseSettingsFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if fh.Flags.Has(http2FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ countError("frame_settings_ack_with_length")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ countError("frame_settings_has_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ countError("frame_settings_mod_6")
+ // Expecting even number of 6 byte settings.
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ f := &http2SettingsFrame{http2FrameHeader: fh, p: p}
+ if v, ok := f.Value(http2SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ countError("frame_settings_window_size_too_big")
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *http2SettingsFrame) IsAck() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagSettingsAck)
+}
+
+func (f *http2SettingsFrame) Value(id http2SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ for i := 0; i < f.NumSettings(); i++ {
+ if s := f.Setting(i); s.ID == id {
+ return s.Val, true
+ }
+ }
+ return 0, false
+}
+
+// Setting returns the setting from the frame at the given 0-based index.
+// The index must be >= 0 and less than f.NumSettings().
+func (f *http2SettingsFrame) Setting(i int) http2Setting {
+ buf := f.p
+ return http2Setting{
+ ID: http2SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
+ Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
+ }
+}
+
+func (f *http2SettingsFrame) NumSettings() int { return len(f.p) / 6 }
+
+// HasDuplicates reports whether f contains any duplicate setting IDs.
+func (f *http2SettingsFrame) HasDuplicates() bool {
+ num := f.NumSettings()
+ if num == 0 {
+ return false
+ }
+ // If it's small enough (the common case), just do the n^2
+ // thing and avoid a map allocation.
+ if num < 10 {
+ for i := 0; i < num; i++ {
+ idi := f.Setting(i).ID
+ for j := i + 1; j < num; j++ {
+ idj := f.Setting(j).ID
+ if idi == idj {
+ return true
+ }
+ }
+ }
+ return false
+ }
+ seen := map[http2SettingID]bool{}
+ for i := 0; i < num; i++ {
+ id := f.Setting(i).ID
+ if seen[id] {
+ return true
+ }
+ seen[id] = true
+ }
+ return false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *http2SettingsFrame) ForeachSetting(fn func(http2Setting) error) error {
+ f.checkValid()
+ for i := 0; i < f.NumSettings(); i++ {
+ if err := fn(f.Setting(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteSettings(settings ...http2Setting) error {
+ f.startWrite(http2FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteSettingsAck() error {
+ f.startWrite(http2FrameSettings, http2FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type http2PingFrame struct {
+ http2FrameHeader
+ Data [8]byte
+}
+
+func (f *http2PingFrame) IsAck() bool { return f.Flags.Has(http2FlagPingAck) }
+
+func http2parsePingFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
+ if len(payload) != 8 {
+ countError("frame_ping_length")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ countError("frame_ping_has_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ f := &http2PingFrame{http2FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *http2Framer) WritePing(ack bool, data [8]byte) error {
+ var flags http2Flags
+ if ack {
+ flags = http2FlagPingAck
+ }
+ f.startWrite(http2FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type http2GoAwayFrame struct {
+ http2FrameHeader
+ LastStreamID uint32
+ ErrCode http2ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *http2GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func http2parseGoAwayFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if fh.StreamID != 0 {
+ countError("frame_goaway_has_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ countError("frame_goaway_short")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ return &http2GoAwayFrame{
+ http2FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: http2ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *http2Framer) WriteGoAway(maxStreamID uint32, code http2ErrCode, debugData []byte) error {
+ f.startWrite(http2FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type http2UnknownFrame struct {
+ http2FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *http2UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func http2parseUnknownFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ return &http2UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type http2WindowUpdateFrame struct {
+ http2FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func http2parseWindowUpdateFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if len(p) != 4 {
+ countError("frame_windowupdate_bad_len")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ countError("frame_windowupdate_zero_inc_conn")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ countError("frame_windowupdate_zero_inc_stream")
+ return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
+ }
+ return &http2WindowUpdateFrame{
+ http2FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *http2Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(http2FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type http2HeadersFrame struct {
+ http2FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority http2PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *http2HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *http2HeadersFrame) HeadersEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndHeaders)
+}
+
+func (f *http2HeadersFrame) StreamEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndStream)
+}
+
+func (f *http2HeadersFrame) HasPriority() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagHeadersPriority)
+}
+
+func http2parseHeadersFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (_ http2Frame, err error) {
+ hf := &http2HeadersFrame{
+ http2FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ countError("frame_headers_zero_stream")
+ return nil, http2connError{http2ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(http2FlagHeadersPadded) {
+ if p, padLength, err = http2readByte(p); err != nil {
+ countError("frame_headers_pad_short")
+ return
+ }
+ }
+ if fh.Flags.Has(http2FlagHeadersPriority) {
+ var v uint32
+ p, v, err = http2readUint32(p)
+ if err != nil {
+ countError("frame_headers_prio_short")
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = http2readByte(p)
+ if err != nil {
+ countError("frame_headers_prio_weight_short")
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) < 0 {
+ countError("frame_headers_pad_too_big")
+ return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type http2HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority http2PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteHeaders(p http2HeadersFrameParam) error {
+ if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ var flags http2Flags
+ if p.PadLength != 0 {
+ flags |= http2FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= http2FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= http2FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= http2FlagHeadersPriority
+ }
+ f.startWrite(http2FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !http2validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return http2errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type http2PriorityFrame struct {
+ http2FrameHeader
+ http2PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type http2PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p http2PriorityParam) IsZero() bool {
+ return p == http2PriorityParam{}
+}
+
+func http2parsePriorityFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
+ if fh.StreamID == 0 {
+ countError("frame_priority_zero_stream")
+ return nil, http2connError{http2ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ countError("frame_priority_bad_length")
+ return nil, http2connError{http2ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &http2PriorityFrame{
+ http2FrameHeader: fh,
+ http2PriorityParam: http2PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WritePriority(streamID uint32, p http2PriorityParam) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ if !http2validStreamIDOrZero(p.StreamDep) {
+ return http2errDepStreamID
+ }
+ f.startWrite(http2FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type http2RSTStreamFrame struct {
+ http2FrameHeader
+ ErrCode http2ErrCode
+}
+
+func http2parseRSTStreamFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if len(p) != 4 {
+ countError("frame_rststream_bad_len")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ countError("frame_rststream_zero_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ return &http2RSTStreamFrame{fh, http2ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteRSTStream(streamID uint32, code http2ErrCode) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ f.startWrite(http2FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type http2ContinuationFrame struct {
+ http2FrameHeader
+ headerFragBuf []byte
+}
+
+func http2parseContinuationFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if fh.StreamID == 0 {
+ countError("frame_continuation_zero_stream")
+ return nil, http2connError{http2ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &http2ContinuationFrame{fh, p}, nil
+}
+
+func (f *http2ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *http2ContinuationFrame) HeadersEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ var flags http2Flags
+ if endHeaders {
+ flags |= http2FlagContinuationEndHeaders
+ }
+ f.startWrite(http2FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type http2PushPromiseFrame struct {
+ http2FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *http2PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *http2PushPromiseFrame) HeadersEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagPushPromiseEndHeaders)
+}
+
+func http2parsePushPromise(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (_ http2Frame, err error) {
+ pp := &http2PushPromiseFrame{
+ http2FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ countError("frame_pushpromise_zero_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(http2FlagPushPromisePadded) {
+ if p, padLength, err = http2readByte(p); err != nil {
+ countError("frame_pushpromise_pad_short")
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = http2readUint32(p)
+ if err != nil {
+ countError("frame_pushpromise_promiseid_short")
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ countError("frame_pushpromise_pad_too_big")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type http2PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WritePushPromise(p http2PushPromiseParam) error {
+ if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ var flags http2Flags
+ if p.PadLength != 0 {
+ flags |= http2FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= http2FlagPushPromiseEndHeaders
+ }
+ f.startWrite(http2FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !http2validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *http2Framer) WriteRawFrame(t http2FrameType, flags http2Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func http2readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func http2readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type http2streamEnder interface {
+ StreamEnded() bool
+}
+
+type http2headersEnder interface {
+ HeadersEnded() bool
+}
+
+type http2headersOrContinuation interface {
+ http2headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type http2MetaHeadersFrame struct {
+ *http2HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *http2MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *http2MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *http2MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *http2MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return http2pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return http2duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return http2errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *http2Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &http2MetaHeadersFrame{
+ http2HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if http2VerboseLogs && fr.logReads {
+ fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
+ }
+ if !httpguts.ValidHeaderFieldValue(hf.Value) {
+ // Don't include the value in the error, because it may be sensitive.
+ invalid = http2headerFieldValueError(hf.Name)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = http2errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !http2validWireHeaderFieldName(hf.Name) {
+ invalid = http2headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc http2headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, http2ConnectionError(http2ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*http2ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.http2HeadersFrame.headerFragBuf = nil
+ mh.http2HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, http2ConnectionError(http2ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ if http2VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, invalid}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ if http2VerboseLogs {
+ log.Printf("http2: invalid pseudo headers: %v", err)
+ }
+ return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, err}
+ }
+ return mh, nil
+}
+
+func http2summarizeFrame(f http2Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *http2SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s http2Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *http2DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *http2WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *http2PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *http2GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *http2RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
+
+func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
+ return trace != nil && trace.WroteHeaderField != nil
+}
+
+func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(k, []string{v})
+ }
+}
+
+func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ if trace != nil {
+ return trace.Got1xxResponse
+ }
+ return nil
+}
+
+// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
+// connection.
+func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
+ dialer := &tls.Dialer{
+ Config: cfg,
+ }
+ cn, err := dialer.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
+ return tlsCn, nil
+}
+
+func http2tlsUnderlyingConn(tc *tls.Conn) net.Conn {
+ return tc.NetConn()
+}
+
+var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type http2goroutineLock uint64
+
+func http2newGoroutineLock() http2goroutineLock {
+ if !http2DebugGoroutines {
+ return 0
+ }
+ return http2goroutineLock(http2curGoroutineID())
+}
+
+func (g http2goroutineLock) check() {
+ if !http2DebugGoroutines {
+ return
+ }
+ if http2curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g http2goroutineLock) checkNotOn() {
+ if !http2DebugGoroutines {
+ return
+ }
+ if http2curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var http2goroutineSpace = []byte("goroutine ")
+
+func http2curGoroutineID() uint64 {
+ bp := http2littleBuf.Get().(*[]byte)
+ defer http2littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, http2goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := http2parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var http2littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = http2cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func http2cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
+
+var (
+ http2commonBuildOnce sync.Once
+ http2commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
+ http2commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
+)
+
+func http2buildCommonHeaderMapsOnce() {
+ http2commonBuildOnce.Do(http2buildCommonHeaderMaps)
+}
+
+func http2buildCommonHeaderMaps() {
+ common := []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ }
+ http2commonLowerHeader = make(map[string]string, len(common))
+ http2commonCanonHeader = make(map[string]string, len(common))
+ for _, v := range common {
+ chk := CanonicalHeaderKey(v)
+ http2commonLowerHeader[chk] = v
+ http2commonCanonHeader[v] = chk
+ }
+}
+
+func http2lowerHeader(v string) (lower string, ascii bool) {
+ http2buildCommonHeaderMapsOnce()
+ if s, ok := http2commonLowerHeader[v]; ok {
+ return s, true
+ }
+ return http2asciiToLower(v)
+}
+
+var (
+ http2VerboseLogs bool
+ http2logFrameWrites bool
+ http2logFrameReads bool
+ http2inTests bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ http2VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ http2VerboseLogs = true
+ http2logFrameWrites = true
+ http2logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ http2ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ http2initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ http2NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ http2initialHeaderTableSize = 4096
+
+ http2initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ http2defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ http2clientPreface = []byte(http2ClientPreface)
+)
+
+type http2streamState int
+
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
+const (
+ http2stateIdle http2streamState = iota
+ http2stateOpen
+ http2stateHalfClosedLocal
+ http2stateHalfClosedRemote
+ http2stateClosed
+)
+
+var http2stateName = [...]string{
+ http2stateIdle: "Idle",
+ http2stateOpen: "Open",
+ http2stateHalfClosedLocal: "HalfClosedLocal",
+ http2stateHalfClosedRemote: "HalfClosedRemote",
+ http2stateClosed: "Closed",
+}
+
+func (st http2streamState) String() string {
+ return http2stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type http2Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID http2SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s http2Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s http2Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case http2SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ case http2SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ case http2SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type http2SettingID uint16
+
+const (
+ http2SettingHeaderTableSize http2SettingID = 0x1
+ http2SettingEnablePush http2SettingID = 0x2
+ http2SettingMaxConcurrentStreams http2SettingID = 0x3
+ http2SettingInitialWindowSize http2SettingID = 0x4
+ http2SettingMaxFrameSize http2SettingID = 0x5
+ http2SettingMaxHeaderListSize http2SettingID = 0x6
+)
+
+var http2settingName = map[http2SettingID]string{
+ http2SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ http2SettingEnablePush: "ENABLE_PUSH",
+ http2SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ http2SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ http2SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ http2SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s http2SettingID) String() string {
+ if v, ok := http2settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httpguts.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+//
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func http2validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httpguts.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func http2httpCodeString(code int) string {
+ switch code {
+ case 200:
+ return "200"
+ case 404:
+ return "404"
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type http2stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type http2gate chan struct{}
+
+func (g http2gate) Done() { g <- struct{}{} }
+
+func (g http2gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type http2closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *http2closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw http2closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw http2closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type http2bufferedWriter struct {
+ _ http2incomparable
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func http2newBufferedWriter(w io.Writer) *http2bufferedWriter {
+ return &http2bufferedWriter{w: w}
+}
+
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const http2bufWriterPoolBufferSize = 4 << 10
+
+var http2bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewWriterSize(nil, http2bufWriterPoolBufferSize)
+ },
+}
+
+func (w *http2bufferedWriter) Available() int {
+ if w.bw == nil {
+ return http2bufWriterPoolBufferSize
+ }
+ return w.bw.Available()
+}
+
+func (w *http2bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := http2bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *http2bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ http2bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func http2mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 7230, section 3.3.
+func http2bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type http2httpError struct {
+ _ http2incomparable
+ msg string
+ timeout bool
+}
+
+func (e *http2httpError) Error() string { return e.msg }
+
+func (e *http2httpError) Timeout() bool { return e.timeout }
+
+func (e *http2httpError) Temporary() bool { return true }
+
+var http2errTimeout error = &http2httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type http2connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var http2sorterPool = sync.Pool{New: func() interface{} { return new(http2sorter) }}
+
+type http2sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *http2sorter) Len() int { return len(s.v) }
+
+func (s *http2sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+
+func (s *http2sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *http2sorter) Keys(h Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *http2sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owns, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// - a non-empty string starting with '/'
+// - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func http2validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type http2incomparable [0]func()
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type http2pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b http2pipeBuffer // nil when done reading
+ unread int // bytes unread when done
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type http2pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+// setBuffer initializes the pipe buffer.
+// It has no effect if the pipe is already closed.
+func (p *http2pipe) setBuffer(b http2pipeBuffer) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.err != nil || p.breakErr != nil {
+ return
+ }
+ p.b = b
+}
+
+func (p *http2pipe) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.b == nil {
+ return p.unread
+ }
+ return p.b.Len()
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *http2pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b != nil && p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ p.b = nil
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var http2errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *http2pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil {
+ return 0, http2errClosedPipeWrite
+ }
+ if p.breakErr != nil {
+ p.unread += len(d)
+ return len(d), nil // discard when there is no reader
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *http2pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *http2pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *http2pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *http2pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ if dst == &p.breakErr {
+ if p.b != nil {
+ p.unread += p.b.Len()
+ }
+ p.b = nil
+ }
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *http2pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *http2pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *http2pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
+
+const (
+ http2prefaceTimeout = 10 * time.Second
+ http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ http2handlerChunkWriteSize = 4 << 10
+ http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ http2maxQueuedControlFrames = 10000
+)
+
+var (
+ http2errClientDisconnected = errors.New("client disconnected")
+ http2errClosedBody = errors.New("body closed by handler")
+ http2errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ http2errStreamClosed = errors.New("http2: stream closed")
+)
+
+var http2responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &http2responseWriterState{}
+ rws.bw = bufio.NewWriterSize(http2chunkWriter{rws}, http2handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ http2testHookOnConn func()
+ http2testHookGetServerConn func(*http2serverConn)
+ http2testHookOnPanicMu *sync.Mutex // nil except in tests
+ http2testHookOnPanic func(sc *http2serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type http2Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+
+ // IdleTimeout specifies how long until idle clients should be
+ // closed with a GOAWAY frame. PING frames are not considered
+ // activity for the purposes of IdleTimeout.
+ IdleTimeout time.Duration
+
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
+ // NewWriteScheduler constructs a write scheduler for a connection.
+ // If nil, a default scheduler is chosen.
+ NewWriteScheduler func() http2WriteScheduler
+
+ // CountError, if non-nil, is called on HTTP/2 server errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
+ // Internal state. This is a pointer (rather than embedded directly)
+ // so that we don't embed a Mutex in this struct, which will make the
+ // struct non-copyable, which might break some callers.
+ state *http2serverInternalState
+}
+
+func (s *http2Server) initialConnRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerConnection > http2initialWindowSize {
+ return s.MaxUploadBufferPerConnection
+ }
+ return 1 << 20
+}
+
+func (s *http2Server) initialStreamRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerStream > 0 {
+ return s.MaxUploadBufferPerStream
+ }
+ return 1 << 20
+}
+
+func (s *http2Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= http2minMaxFrameSize && v <= http2maxFrameSize {
+ return v
+ }
+ return http2defaultMaxReadFrameSize
+}
+
+func (s *http2Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return http2defaultMaxStreams
+}
+
+// maxQueuedControlFrames is the maximum number of control frames like
+// SETTINGS, PING and RST_STREAM that will be queued for writing before
+// the connection is closed to prevent memory exhaustion attacks.
+func (s *http2Server) maxQueuedControlFrames() int {
+ // TODO: if anybody asks, add a Server field, and remember to define the
+ // behavior of negative values.
+ return http2maxQueuedControlFrames
+}
+
+type http2serverInternalState struct {
+ mu sync.Mutex
+ activeConns map[*http2serverConn]struct{}
+}
+
+func (s *http2serverInternalState) registerConn(sc *http2serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ s.activeConns[sc] = struct{}{}
+ s.mu.Unlock()
+}
+
+func (s *http2serverInternalState) unregisterConn(sc *http2serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ delete(s.activeConns, sc)
+ s.mu.Unlock()
+}
+
+func (s *http2serverInternalState) startGracefulShutdown() {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ for sc := range s.activeConns {
+ sc.startGracefulShutdown()
+ }
+ s.mu.Unlock()
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func http2ConfigureServer(s *Server, conf *http2Server) error {
+ if s == nil {
+ panic("nil *http.Server")
+ }
+ if conf == nil {
+ conf = new(http2Server)
+ }
+ conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})}
+ if h1, h2 := s, conf; h2.IdleTimeout == 0 {
+ if h1.IdleTimeout != 0 {
+ h2.IdleTimeout = h1.IdleTimeout
+ } else {
+ h2.IdleTimeout = h1.ReadTimeout
+ }
+ }
+ s.RegisterOnShutdown(conf.state.startGracefulShutdown)
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
+ // If they already provided a TLS 1.0–1.2 CipherSuite list, return an
+ // error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
+ // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
+ haveRequired := false
+ for _, cs := range s.TLSConfig.CipherSuites {
+ switch cs {
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ // Alternative MTI cipher to not discourage ECDSA-only servers.
+ // See http://golang.org/cl/30721 for further information.
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ haveRequired = true
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ if !http2strSliceContains(s.TLSConfig.NextProtos, http2NextProtoTLS) {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, http2NextProtoTLS)
+ }
+ if !http2strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
+ }
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){}
+ }
+ protoHandler := func(hs *Server, c *tls.Conn, h Handler) {
+ if http2testHookOnConn != nil {
+ http2testHookOnConn()
+ }
+ // The TLSNextProto interface predates contexts, so
+ // the net/http package passes down its per-connection
+ // base context via an exported but unadvertised
+ // method on the Handler. This is for internal
+ // net/http<=>http2 use only.
+ var ctx context.Context
+ type baseContexter interface {
+ BaseContext() context.Context
+ }
+ if bc, ok := h.(baseContexter); ok {
+ ctx = bc.BaseContext()
+ }
+ conf.ServeConn(c, &http2ServeConnOpts{
+ Context: ctx,
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[http2NextProtoTLS] = protoHandler
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type http2ServeConnOpts struct {
+ // Context is the base context to use.
+ // If nil, context.Background is used.
+ Context context.Context
+
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler Handler
+}
+
+func (o *http2ServeConnOpts) context() context.Context {
+ if o != nil && o.Context != nil {
+ return o.Context
+ }
+ return context.Background()
+}
+
+func (o *http2ServeConnOpts) baseConfig() *Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(Server)
+}
+
+func (o *http2ServeConnOpts) handler() Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
+ baseCtx, cancel := http2serverConnBaseContext(c, opts)
+ defer cancel()
+
+ sc := &http2serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: http2newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*http2stream),
+ readFrameCh: make(chan http2readFrameResult),
+ wantWriteFrameCh: make(chan http2FrameWriteRequest, 8),
+ serveMsgCh: make(chan interface{}, 8),
+ wroteFrameCh: make(chan http2frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan http2bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
+ initialStreamSendWindowSize: http2initialWindowSize,
+ maxFrameSize: http2initialMaxFrameSize,
+ headerTableSize: http2initialHeaderTableSize,
+ serveG: http2newGoroutineLock(),
+ pushEnabled: true,
+ }
+
+ s.state.registerConn(sc)
+ defer s.state.unregisterConn(sc)
+
+ // The net/http package sets the write deadline from the
+ // http.Server.WriteTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already set.
+ // Write deadlines are set per stream in serverConn.newStream.
+ // Disarm the net.Conn write deadline here.
+ if sc.hs.WriteTimeout != 0 {
+ sc.conn.SetWriteDeadline(time.Time{})
+ }
+
+ if s.NewWriteScheduler != nil {
+ sc.writeSched = s.NewWriteScheduler()
+ } else {
+ sc.writeSched = http2NewPriorityWriteScheduler(nil)
+ }
+
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
+ sc.flow.add(http2initialWindowSize)
+ sc.inflow.add(http2initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+
+ fr := http2NewFramer(sc.bw, c)
+ if s.CountError != nil {
+ fr.countError = s.CountError
+ }
+ fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(http2connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(http2ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && http2isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(http2ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := http2testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx context.Context, cancel func()) {
+ ctx, cancel = context.WithCancel(opts.context())
+ ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, ServerContextKey, hs)
+ }
+ return
+}
+
+func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type http2serverConn struct {
+ // Immutable:
+ srv *http2Server
+ hs *Server
+ conn net.Conn
+ bw *http2bufferedWriter // writing to conn
+ handler Handler
+ baseCtx context.Context
+ framer *http2Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan http2readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan http2FrameWriteRequest // from handlers -> serve
+ wroteFrameCh chan http2frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan http2bodyReadMsg // from handlers -> serve
+ serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
+ flow http2flow // conn-wide (not stream-specific) outbound flow control
+ inflow http2flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+ writeSched http2WriteScheduler
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG http2goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ queuedControlFrames int // control frames in the writeSched queue
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*http2stream
+ initialStreamSendWindowSize int32
+ maxFrameSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode http2ErrCode
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+
+ // Used by startGracefulShutdown.
+ shutdownOnce sync.Once
+}
+
+func (sc *http2serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+func (sc *http2serverConn) curOpenStreams() uint32 {
+ sc.serveG.check()
+ return sc.curClientStreams + sc.curPushedStreams
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type http2stream struct {
+ // immutable:
+ sc *http2serverConn
+ id uint32
+ body *http2pipe // non-nil if expecting DATA frames
+ cw http2closeWaiter // closed wait stream transitions to closed state
+ ctx context.Context
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow http2flow // limits writing from Handler to client
+ inflow http2flow // what the client is allowed to POST/etc to us
+ state http2streamState
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ writeDeadline *time.Timer // nil if unused
+
+ trailer Header // accumulated trailers
+ reqTrailer Header // handler's Request.Trailer
+}
+
+func (sc *http2serverConn) Framer() *http2Framer { return sc.framer }
+
+func (sc *http2serverConn) CloseConn() error { return sc.conn.Close() }
+
+func (sc *http2serverConn) Flush() error { return sc.bw.Flush() }
+
+func (sc *http2serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *http2serverConn) state(streamID uint32) (http2streamState, *http2stream) {
+ sc.serveG.check()
+ // http://tools.ietf.org/html/rfc7540#section-5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID%2 == 1 {
+ if streamID <= sc.maxClientStreamID {
+ return http2stateClosed, nil
+ }
+ } else {
+ if streamID <= sc.maxPushPromiseID {
+ return http2stateClosed, nil
+ }
+ }
+ return http2stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *http2serverConn) setConnState(state ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *http2serverConn) vlogf(format string, args ...interface{}) {
+ if http2VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *http2serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func http2errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func http2isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := http2errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *http2serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) || err == http2errPrefaceTimeout {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ http2buildCommonHeaderMapsOnce()
+ cv, ok := http2commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+ // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
+ // entries in the canonHeader cache. This should be larger than the number
+ // of unique, uncommon header keys likely to be sent by the peer, while not
+ // so high as to permit unreasonable memory usage if the peer sends an unbounded
+ // number of unique header keys.
+ const maxCachedCanonicalHeaders = 32
+ if len(sc.canonHeader) < maxCachedCanonicalHeaders {
+ sc.canonHeader[v] = cv
+ }
+ return cv
+}
+
+type http2readFrameResult struct {
+ f http2Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *http2serverConn) readFrames() {
+ gate := make(http2gate)
+ gateDone := gate.Done
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- http2readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if http2terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type http2frameWriteResult struct {
+ _ http2incomparable
+ wr http2FrameWriteRequest // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *http2serverConn) writeFrameAsync(wr http2FrameWriteRequest) {
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrameCh <- http2frameWriteResult{wr: wr, err: err}
+}
+
+func (sc *http2serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, http2errClientDisconnected)
+ }
+}
+
+func (sc *http2serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *http2serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if http2testHookOnPanicMu != nil {
+ http2testHookOnPanicMu.Lock()
+ defer http2testHookOnPanicMu.Unlock()
+ }
+ if http2testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if http2testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *http2serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if http2VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(http2FrameWriteRequest{
+ write: http2writeSettings{
+ {http2SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {http2SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {http2SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {http2SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
+ },
+ })
+ sc.unackedSettings++
+
+ // Each connection starts with initialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := sc.srv.initialConnRecvWindowSize() - http2initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(StateActive)
+ sc.setConnState(StateIdle)
+
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ defer sc.idleTimer.Stop()
+ }
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.AfterFunc(http2firstSettingsTimeout, sc.onSettingsTimer)
+ defer settingsTimer.Stop()
+
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wr := <-sc.wantWriteFrameCh:
+ if se, ok := wr.write.(http2StreamError); ok {
+ sc.resetStream(se)
+ break
+ }
+ sc.writeFrame(wr)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ // Process any written frames before reading new frames from the client since a
+ // written frame could have triggered a new stream to be started.
+ if sc.writingFrameAsync {
+ select {
+ case wroteRes := <-sc.wroteFrameCh:
+ sc.wroteFrame(wroteRes)
+ default:
+ }
+ }
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer != nil {
+ settingsTimer.Stop()
+ settingsTimer = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case msg := <-sc.serveMsgCh:
+ switch v := msg.(type) {
+ case func(int):
+ v(loopNum) // for testing
+ case *http2serverMessage:
+ switch v {
+ case http2settingsTimerMsg:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case http2idleTimerMsg:
+ sc.vlogf("connection is idle")
+ sc.goAway(http2ErrCodeNo)
+ case http2shutdownTimerMsg:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case http2gracefulShutdownMsg:
+ sc.startGracefulShutdownInternal()
+ default:
+ panic("unknown timer")
+ }
+ case *http2startPushRequest:
+ sc.startPush(v)
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+ }
+
+ // If the peer is causing us to generate a lot of control frames,
+ // but not reading them from us, assume they are trying to make us
+ // run out of memory.
+ if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ sc.vlogf("http2: too many control frames in send queue, closing connection")
+ return
+ }
+
+ // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
+ // with no error code (graceful shutdown), don't start the timer until
+ // all open streams have been completed.
+ sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
+ gracefulShutdownComplete := sc.goAwayCode == http2ErrCodeNo && sc.curOpenStreams() == 0
+ if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != http2ErrCodeNo || gracefulShutdownComplete) {
+ sc.shutDownIn(http2goAwayTimeout)
+ }
+ }
+}
+
+func (sc *http2serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
+ select {
+ case <-sc.doneServing:
+ case <-sharedCh:
+ close(privateCh)
+ }
+}
+
+type http2serverMessage int
+
+// Message values sent to serveMsgCh.
+var (
+ http2settingsTimerMsg = new(http2serverMessage)
+ http2idleTimerMsg = new(http2serverMessage)
+ http2shutdownTimerMsg = new(http2serverMessage)
+ http2gracefulShutdownMsg = new(http2serverMessage)
+)
+
+func (sc *http2serverConn) onSettingsTimer() { sc.sendServeMsg(http2settingsTimerMsg) }
+
+func (sc *http2serverConn) onIdleTimer() { sc.sendServeMsg(http2idleTimerMsg) }
+
+func (sc *http2serverConn) onShutdownTimer() { sc.sendServeMsg(http2shutdownTimerMsg) }
+
+func (sc *http2serverConn) sendServeMsg(msg interface{}) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.serveMsgCh <- msg:
+ case <-sc.doneServing:
+ }
+}
+
+var http2errPrefaceTimeout = errors.New("timeout waiting for client preface")
+
+// readPreface reads the ClientPreface greeting from the peer or
+// returns errPrefaceTimeout on timeout, or an error if the greeting
+// is invalid.
+func (sc *http2serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(http2ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, http2clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(http2prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return http2errPrefaceTimeout
+ case err := <-errc:
+ if err == nil {
+ if http2VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var http2errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var http2writeDataPool = sync.Pool{
+ New: func() interface{} { return new(http2writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error {
+ ch := http2errChanPool.Get().(chan error)
+ writeArg := http2writeDataPool.Get().(*http2writeData)
+ *writeArg = http2writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return http2errStreamClosed
+ }
+ }
+ http2errChanPool.Put(ch)
+ if frameWriteDone {
+ http2writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *http2serverConn) writeFrameFromHandler(wr http2FrameWriteRequest) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wr:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return http2errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *http2serverConn) writeFrame(wr http2FrameWriteRequest) {
+ sc.serveG.check()
+
+ // If true, wr will not be written and wr.done will not be signaled.
+ var ignoreWrite bool
+
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
+ if wr.StreamID() != 0 {
+ _, isReset := wr.write.(http2StreamError)
+ if state, _ := sc.state(wr.StreamID()); state == http2stateClosed && !isReset {
+ ignoreWrite = true
+ }
+ }
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wr.write.(type) {
+ case *http2writeResHeaders:
+ wr.stream.wroteHeaders = true
+ case http2write100ContinueHeadersFrame:
+ if wr.stream.wroteHeaders {
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
+ if wr.done != nil {
+ panic("wr.done != nil for write100ContinueHeadersFrame")
+ }
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ if wr.isControl() {
+ sc.queuedControlFrames++
+ // For extra safety, detect wraparounds, which should not happen,
+ // and pull the plug.
+ if sc.queuedControlFrames < 0 {
+ sc.conn.Close()
+ }
+ }
+ sc.writeSched.Push(wr)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wr (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *http2serverConn) startFrameWrite(wr http2FrameWriteRequest) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wr.stream
+ if st != nil {
+ switch st.state {
+ case http2stateHalfClosedLocal:
+ switch wr.write.(type) {
+ case http2StreamError, http2handlerPanicRST, http2writeWindowUpdate:
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
+ default:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
+ }
+ case http2stateClosed:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
+ }
+ }
+ if wpp, ok := wr.write.(*http2writePushPromise); ok {
+ var err error
+ wpp.promisedID, err = wpp.allocatePromisedID()
+ if err != nil {
+ sc.writingFrameAsync = false
+ wr.replyToWriter(err)
+ return
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ if wr.write.staysWithinBuffer(sc.bw.Available()) {
+ sc.writingFrameAsync = false
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrame(http2frameWriteResult{wr: wr, err: err})
+ } else {
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr)
+ }
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// main ServeHTTP goroutine, this will show up rarely.
+var http2errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+ sc.writingFrameAsync = false
+
+ wr := res.wr
+
+ if http2writeEndsStream(wr.write) {
+ st := wr.stream
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case http2stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
+ st.state = http2stateHalfClosedLocal
+ // Section 8.1: a server MAY request that the client abort
+ // transmission of a request without error by sending a
+ // RST_STREAM with an error code of NO_ERROR after sending
+ // a complete response.
+ sc.resetStream(http2streamError(st.id, http2ErrCodeNo))
+ case http2stateHalfClosedRemote:
+ sc.closeStream(st, http2errHandlerComplete)
+ }
+ } else {
+ switch v := wr.write.(type) {
+ case http2StreamError:
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
+ if st, ok := sc.streams[v.StreamID]; ok {
+ sc.closeStream(st, v)
+ }
+ case http2handlerPanicRST:
+ sc.closeStream(wr.stream, http2errHandlerPanicked)
+ }
+ }
+
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
+ wr.replyToWriter(res.err)
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written and we need to send one, the best frame
+// to send is selected by writeSched.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *http2serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame || sc.inFrameScheduleLoop {
+ return
+ }
+ sc.inFrameScheduleLoop = true
+ for !sc.writingFrameAsync {
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(http2FrameWriteRequest{
+ write: &http2writeGoAway{
+ maxStreamID: sc.maxClientStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ continue
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(http2FrameWriteRequest{write: http2writeSettingsAck{}})
+ continue
+ }
+ if !sc.inGoAway || sc.goAwayCode == http2ErrCodeNo {
+ if wr, ok := sc.writeSched.Pop(); ok {
+ if wr.isControl() {
+ sc.queuedControlFrames--
+ }
+ sc.startFrameWrite(wr)
+ continue
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(http2FrameWriteRequest{write: http2flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ continue
+ }
+ break
+ }
+ sc.inFrameScheduleLoop = false
+}
+
+// startGracefulShutdown gracefully shuts down a connection. This
+// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
+// shutting down. The connection isn't closed until all current
+// streams are done.
+//
+// startGracefulShutdown returns immediately; it does not wait until
+// the connection has shut down.
+func (sc *http2serverConn) startGracefulShutdown() {
+ sc.serveG.checkNotOn() // NOT
+ sc.shutdownOnce.Do(func() { sc.sendServeMsg(http2gracefulShutdownMsg) })
+}
+
+// After sending GOAWAY with an error code (non-graceful shutdown), the
+// connection will close after goAwayTimeout.
+//
+// If we close the connection immediately after sending GOAWAY, there may
+// be unsent data in our kernel receive buffer, which will cause the kernel
+// to send a TCP RST on close() instead of a FIN. This RST will abort the
+// connection immediately, whether or not the client had received the GOAWAY.
+//
+// Ideally we should delay for at least 1 RTT + epsilon so the client has
+// a chance to read the GOAWAY and stop sending messages. Measuring RTT
+// is hard, so we approximate with 1 second. See golang.org/issue/18701.
+//
+// This is a var so it can be shorter in tests, where all requests uses the
+// loopback interface making the expected RTT very small.
+//
+// TODO: configurable?
+var http2goAwayTimeout = 1 * time.Second
+
+func (sc *http2serverConn) startGracefulShutdownInternal() {
+ sc.goAway(http2ErrCodeNo)
+}
+
+func (sc *http2serverConn) goAway(code http2ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ if sc.goAwayCode == http2ErrCodeNo {
+ sc.goAwayCode = code
+ }
+ return
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *http2serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
+}
+
+func (sc *http2serverConn) resetStream(se http2StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(http2FrameWriteRequest{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.resetQueued = true
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == http2ErrFrameTooLarge {
+ sc.goAway(http2ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if http2VerboseLogs {
+ sc.vlogf("http2: server read frame %v", http2summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case http2StreamError:
+ sc.resetStream(ev)
+ return true
+ case http2goAwayFlowError:
+ sc.goAway(http2ErrCodeFlowControl)
+ return true
+ case http2ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(http2ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *http2serverConn) processFrame(f http2Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*http2SettingsFrame); !ok {
+ return sc.countError("first_settings", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ sc.sawFirstSettings = true
+ }
+
+ switch f := f.(type) {
+ case *http2SettingsFrame:
+ return sc.processSettings(f)
+ case *http2MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *http2WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *http2PingFrame:
+ return sc.processPing(f)
+ case *http2DataFrame:
+ return sc.processData(f)
+ case *http2RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *http2PriorityFrame:
+ return sc.processPriority(f)
+ case *http2GoAwayFrame:
+ return sc.processGoAway(f)
+ case *http2PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return sc.countError("push_promise", http2ConnectionError(http2ErrCodeProtocol))
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *http2serverConn) processPing(f *http2PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return sc.countError("ping_on_stream", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if sc.inGoAway && sc.goAwayCode != http2ErrCodeNo {
+ return nil
+ }
+ sc.writeFrame(http2FrameWriteRequest{write: http2writePingAck{f}})
+ return nil
+}
+
+func (sc *http2serverConn) processWindowUpdate(f *http2WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ state, st := sc.state(f.StreamID)
+ if state == http2stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return sc.countError("stream_idle", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return sc.countError("bad_flow", http2streamError(f.StreamID, http2ErrCodeFlowControl))
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return http2goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *http2serverConn) processResetStream(f *http2RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == http2stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return sc.countError("reset_idle_stream", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if st != nil {
+ st.cancelCtx()
+ sc.closeStream(st, http2streamError(f.StreamID, f.ErrCode))
+ }
+ return nil
+}
+
+func (sc *http2serverConn) closeStream(st *http2stream, err error) {
+ sc.serveG.check()
+ if st.state == http2stateIdle || st.state == http2stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = http2stateClosed
+ if st.writeDeadline != nil {
+ st.writeDeadline.Stop()
+ }
+ if st.isPushed() {
+ sc.curPushedStreams--
+ } else {
+ sc.curClientStreams--
+ }
+ delete(sc.streams, st.id)
+ if len(sc.streams) == 0 {
+ sc.setConnState(StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
+ if http2h1ServerKeepAlivesDisabled(sc.hs) {
+ sc.startGracefulShutdownInternal()
+ }
+ }
+ if p := st.body; p != nil {
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
+ sc.sendWindowUpdate(nil, p.Len())
+
+ p.CloseWithError(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.CloseStream(st.id)
+}
+
+func (sc *http2serverConn) processSettings(f *http2SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return sc.countError("ack_mystery", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ return nil
+ }
+ if f.NumSettings() > 100 || f.HasDuplicates() {
+ // This isn't actually in the spec, but hang up on
+ // suspiciously large settings frames or those with
+ // duplicate entries.
+ return sc.countError("settings_big_or_dups", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
+ // acknowledged individually, even if multiple are received before the ACK.
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *http2serverConn) processSetting(s http2Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if http2VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case http2SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case http2SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case http2SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case http2SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case http2SettingMaxFrameSize:
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
+ case http2SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if http2VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return sc.countError("setting_win_size", http2ConnectionError(http2ErrCodeFlowControl))
+ }
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processData(f *http2DataFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+ if sc.inGoAway && (sc.goAwayCode != http2ErrCodeNo || id > sc.maxClientStreamID) {
+ // Discard all DATA frames if the GOAWAY is due to an
+ // error, or:
+ //
+ // Section 6.8: After sending a GOAWAY frame, the sender
+ // can discard frames for streams initiated by the
+ // receiver with identifiers higher than the identified
+ // last stream.
+ return nil
+ }
+
+ data := f.Data()
+ state, st := sc.state(id)
+ if id == 0 || state == http2stateIdle {
+ // Section 6.1: "DATA frames MUST be associated with a
+ // stream. If a DATA frame is received whose stream
+ // identifier field is 0x0, the recipient MUST respond
+ // with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ //
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return sc.countError("data_on_idle", http2ConnectionError(http2ErrCodeProtocol))
+ }
+
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ if st == nil || state != http2stateOpen || st.gotTrailerHeader || st.resetQueued {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
+ if sc.inflow.available() < int32(f.Length) {
+ return sc.countError("data_flow", http2streamError(id, http2ErrCodeFlowControl))
+ }
+ // Deduct the flow control from inflow, since we're
+ // going to immediately add it back in
+ // sendWindowUpdate, which also schedules sending the
+ // frames.
+ sc.inflow.take(int32(f.Length))
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ if st != nil && st.resetQueued {
+ // Already have a stream error in flight. Don't send another.
+ return nil
+ }
+ return sc.countError("closed", http2streamError(id, http2ErrCodeStreamClosed))
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
+ // value of a content-length header field does not equal the sum of the
+ // DATA frame payload lengths that form the body.
+ return sc.countError("send_too_much", http2streamError(id, http2ErrCodeProtocol))
+ }
+ if f.Length > 0 {
+ // Check whether the client has flow control quota.
+ if st.inflow.available() < int32(f.Length) {
+ return sc.countError("flow_on_data_length", http2streamError(id, http2ErrCodeFlowControl))
+ }
+ st.inflow.take(int32(f.Length))
+
+ if len(data) > 0 {
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ sc.sendWindowUpdate(nil, int(f.Length)-wrote)
+ return sc.countError("body_write_err", http2streamError(id, http2ErrCodeStreamClosed))
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
+ }
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processGoAway(f *http2GoAwayFrame) error {
+ sc.serveG.check()
+ if f.ErrCode != http2ErrCodeNo {
+ sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ } else {
+ sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ }
+ sc.startGracefulShutdownInternal()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
+ sc.pushEnabled = false
+ return nil
+}
+
+// isPushed reports whether the stream is server-initiated.
+func (st *http2stream) isPushed() bool {
+ return st.id%2 == 0
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *http2stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = http2stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *http2stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's WriteTimeout has fired.
+func (st *http2stream) onWriteTimeout() {
+ st.sc.writeFrameFromHandler(http2FrameWriteRequest{write: http2streamError(st.id, http2ErrCodeInternal)})
+}
+
+func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return sc.countError("headers_even", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ if st := sc.streams[f.StreamID]; st != nil {
+ if st.resetQueued {
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
+ return nil
+ }
+ // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
+ // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
+ // this state, it MUST respond with a stream error (Section 5.4.2) of
+ // type STREAM_CLOSED.
+ if st.state == http2stateHalfClosedRemote {
+ return sc.countError("headers_half_closed", http2streamError(id, http2ErrCodeStreamClosed))
+ }
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxClientStreamID {
+ return sc.countError("stream_went_down", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ sc.maxClientStreamID = id
+
+ if sc.idleTimer != nil {
+ sc.idleTimer.Stop()
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
+ if sc.curClientStreams+1 > sc.advMaxStreams {
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return sc.countError("over_max_streams", http2streamError(id, http2ErrCodeProtocol))
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return sc.countError("over_max_streams_race", http2streamError(id, http2ErrCodeRefusedStream))
+ }
+
+ initialState := http2stateOpen
+ if f.StreamEnded() {
+ initialState = http2stateHalfClosedRemote
+ }
+ st := sc.newStream(id, 0, initialState)
+
+ if f.HasPriority() {
+ if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(st.id, f.Priority)
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(Header)
+ }
+ st.body = req.Body.(*http2requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = http2handleHeaderListTooLong
+ } else if err := http2checkValidHTTP2RequestHeaders(req.Header); err != nil {
+ handler = http2new400Handler(err)
+ }
+
+ // The net/http package sets the read deadline from the
+ // http.Server.ReadTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
+ if sc.hs.ReadTimeout != 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ }
+
+ go sc.runHandler(rw, req, handler)
+ return nil
+}
+
+func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return sc.countError("dup_trailers", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return sc.countError("trailers_not_ended", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return sc.countError("trailers_pseudo", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !httpguts.ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return sc.countError("trailers_bogus", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func (sc *http2serverConn) checkPriority(streamID uint32, p http2PriorityParam) error {
+ if streamID == p.StreamDep {
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
+ return sc.countError("priority", http2streamError(streamID, http2ErrCodeProtocol))
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processPriority(f *http2PriorityFrame) error {
+ if sc.inGoAway {
+ return nil
+ }
+ if err := sc.checkPriority(f.StreamID, f.http2PriorityParam); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(f.StreamID, f.http2PriorityParam)
+ return nil
+}
+
+func (sc *http2serverConn) newStream(id, pusherID uint32, state http2streamState) *http2stream {
+ sc.serveG.check()
+ if id == 0 {
+ panic("internal error: cannot create stream with id 0")
+ }
+
+ ctx, cancelCtx := context.WithCancel(sc.baseCtx)
+ st := &http2stream{
+ sc: sc,
+ id: id,
+ state: state,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ st.cw.Init()
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(sc.srv.initialStreamRecvWindowSize())
+ if sc.hs.WriteTimeout != 0 {
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ }
+
+ sc.streams[id] = st
+ sc.writeSched.OpenStream(st.id, http2OpenStreamOptions{PusherID: pusherID})
+ if st.isPushed() {
+ sc.curPushedStreams++
+ } else {
+ sc.curClientStreams++
+ }
+ if sc.curOpenStreams() == 1 {
+ sc.setConnState(StateActive)
+ }
+
+ return st
+}
+
+func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHeadersFrame) (*http2responseWriter, *Request, error) {
+ sc.serveG.check()
+
+ rp := http2requestParam{
+ method: f.PseudoValue("method"),
+ scheme: f.PseudoValue("scheme"),
+ authority: f.PseudoValue("authority"),
+ path: f.PseudoValue("path"),
+ }
+
+ isConnect := rp.method == "CONNECT"
+ if isConnect {
+ if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ return nil, nil, sc.countError("bad_connect", http2streamError(f.StreamID, http2ErrCodeProtocol))
+ }
+ } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, sc.countError("bad_path_method", http2streamError(f.StreamID, http2ErrCodeProtocol))
+ }
+
+ bodyOpen := !f.StreamEnded()
+ if rp.method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, sc.countError("head_body", http2streamError(f.StreamID, http2ErrCodeProtocol))
+ }
+
+ rp.header = make(Header)
+ for _, hf := range f.RegularFields() {
+ rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+ if rp.authority == "" {
+ rp.authority = rp.header.Get("Host")
+ }
+
+ rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+ if err != nil {
+ return nil, nil, err
+ }
+ if bodyOpen {
+ if vv, ok := rp.header["Content-Length"]; ok {
+ if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
+ req.ContentLength = int64(cl)
+ } else {
+ req.ContentLength = 0
+ }
+ } else {
+ req.ContentLength = -1
+ }
+ req.Body.(*http2requestBody).pipe = &http2pipe{
+ b: &http2dataBuffer{expected: req.ContentLength},
+ }
+ }
+ return rw, req, nil
+}
+
+type http2requestParam struct {
+ method string
+ scheme, authority, path string
+ header Header
+}
+
+func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2requestParam) (*http2responseWriter, *Request, error) {
+ sc.serveG.check()
+
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ needsContinue := rp.header.Get("Expect") == "100-continue"
+ if needsContinue {
+ rp.header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+ rp.header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer Header
+ for _, v := range rp.header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = CanonicalHeaderKey(textproto.TrimString(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.header, "Trailer")
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.method == "CONNECT" {
+ url_ = &url.URL{Host: rp.authority}
+ requestURI = rp.authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.path)
+ if err != nil {
+ return nil, nil, sc.countError("bad_path", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+ requestURI = rp.path
+ }
+
+ body := &http2requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
+ }
+ req := &Request{
+ Method: rp.method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: rp.header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: rp.authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ req = req.WithContext(st.ctx)
+
+ rws := http2responseWriterStatePool.Get().(*http2responseWriterState)
+ bwSave := rws.bw
+ *rws = http2responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(http2chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ rws.body = body
+
+ rw := &http2responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+// Run on its own goroutine.
+func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) {
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if didPanic {
+ e := recover()
+ sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: http2handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ // Same as net/http:
+ if e != nil && e != ErrAbortHandler {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ }
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func http2handleHeaderListTooLong(w ResponseWriter, r *Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = http2errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ http2errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-st.cw:
+ return http2errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *http2serverConn) write100ContinueHeaders(st *http2stream) {
+ sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: http2write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type http2bodyReadMsg struct {
+ st *http2stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *http2serverConn) noteBodyReadFromHandler(st *http2stream, n int, err error) {
+ sc.serveG.checkNotOn() // NOT on
+ if n > 0 {
+ select {
+ case sc.bodyReadCh <- http2bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+ }
+}
+
+func (sc *http2serverConn) noteBodyRead(st *http2stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != http2stateHalfClosedRemote && st.state != http2stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *http2serverConn) sendWindowUpdate(st *http2stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *http2serverConn) sendWindowUpdate32(st *http2stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(http2FrameWriteRequest{
+ write: http2writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
+type http2requestBody struct {
+ _ http2incomparable
+ stream *http2stream
+ conn *http2serverConn
+ closeOnce sync.Once // for use by Close only
+ sawEOF bool // for use by Read only
+ pipe *http2pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *http2requestBody) Close() error {
+ b.closeOnce.Do(func() {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(http2errClosedBody)
+ }
+ })
+ return nil
+}
+
+func (b *http2requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil || b.sawEOF {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if err == io.EOF {
+ b.sawEOF = true
+ }
+ if b.conn == nil && http2inTests {
+ return
+ }
+ b.conn.noteBodyReadFromHandler(b.stream, n, err)
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type http2responseWriter struct {
+ rws *http2responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ CloseNotifier = (*http2responseWriter)(nil)
+ _ Flusher = (*http2responseWriter)(nil)
+ _ http2stringWriter = (*http2responseWriter)(nil)
+)
+
+type http2responseWriterState struct {
+ // immutable within a request:
+ stream *http2stream
+ req *Request
+ body *http2requestBody // to close at end of request, if DATA frames didn't
+ conn *http2serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader Header // nil until called
+ snapHeader Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+ dirty bool // a Write failed; don't reuse this responseWriterState
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type http2chunkWriter struct{ rws *http2responseWriterState }
+
+func (cw http2chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *http2responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
+
+func (rws *http2responseWriterState) hasNonemptyTrailers() bool {
+ for _, trailer := range rws.trailers {
+ if _, ok := rws.handlerHeader[trailer]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *http2responseWriterState) declareTrailer(k string) {
+ k = CanonicalHeaderKey(k)
+ if !httpguts.ValidTrailerHeader(k) {
+ // Forbidden by RFC 7230, section 4.1.2.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !http2strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
+ rws.sentContentLen = int64(cl)
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && http2bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ // If the Content-Encoding is non-blank, we shouldn't
+ // sniff the body. See Issue golang.org/issue/31753.
+ ce := rws.snapHeader.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !hasContentType && http2bodyAllowedForStatus(rws.status) && len(p) > 0 {
+ ctype = DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ http2foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
+ // but respect "Connection" == "close" to mean sending a GOAWAY and tearing
+ // down the TCP connection when idle, like we do for HTTP/1.
+ // TODO: remove more Connection-specific header fields here, in addition
+ // to "Connection".
+ if _, ok := rws.snapHeader["Connection"]; ok {
+ v := rws.snapHeader.Get("Connection")
+ delete(rws.snapHeader, "Connection")
+ if v == "close" {
+ rws.conn.startGracefulShutdown()
+ }
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ rws.dirty = true
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ // only send trailers if they have actually been defined by the
+ // server handler.
+ hasNonemptyTrailers := rws.hasNonemptyTrailers()
+ endStream := rws.handlerDone && !hasNonemptyTrailers
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ rws.dirty = true
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && hasNonemptyTrailers {
+ err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ if err != nil {
+ rws.dirty = true
+ }
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+//
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const http2TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 7230
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclaring them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *http2responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, http2TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, http2TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := http2sorterPool.Get().(*http2sorter)
+ sorter.SortStrings(rws.trailers)
+ http2sorterPool.Put(sorter)
+ }
+}
+
+func (w *http2responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *http2responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ cw := rws.stream.cw
+ go func() {
+ cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *http2responseWriter) Header() Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(Header)
+ }
+ return rws.handlerHeader
+}
+
+// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
+func http2checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at http://httpwg.org/specs/rfc7231.html#status.codes).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
+func (w *http2responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *http2responseWriterState) writeHeader(code int) {
+ if rws.wroteHeader {
+ return
+ }
+
+ http2checkWriteHeaderCode(code)
+
+ // Handle informational headers
+ if code >= 100 && code <= 199 {
+ // Per RFC 8297 we must not clear the current header map
+ h := rws.handlerHeader
+
+ _, cl := h["Content-Length"]
+ _, te := h["Transfer-Encoding"]
+ if cl || te {
+ h = h.Clone()
+ h.Del("Content-Length")
+ h.Del("Transfer-Encoding")
+ }
+
+ if rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: code,
+ h: h,
+ endStream: rws.handlerDone && !rws.hasTrailers(),
+ }) != nil {
+ rws.dirty = true
+ }
+
+ return
+ }
+
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = http2cloneHeader(rws.handlerHeader)
+ }
+}
+
+func http2cloneHeader(h Header) Header {
+ h2 := make(Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler might call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *http2responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *http2responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !http2bodyAllowedForStatus(rws.status) {
+ return 0, ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *http2responseWriter) handlerDone() {
+ rws := w.rws
+ dirty := rws.dirty
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ if !dirty {
+ // Only recycle the pool if all prior Write calls to
+ // the serverConn goroutine completed successfully. If
+ // they returned earlier due to resets from the peer
+ // there might still be write goroutines outstanding
+ // from the serverConn referencing the rws memory. See
+ // issue 20704.
+ http2responseWriterStatePool.Put(rws)
+ }
+}
+
+// Push errors.
+var (
+ http2ErrRecursivePush = errors.New("http2: recursive push not allowed")
+ http2ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+var _ Pusher = (*http2responseWriter)(nil)
+
+func (w *http2responseWriter) Push(target string, opts *PushOptions) error {
+ st := w.rws.stream
+ sc := st.sc
+ sc.serveG.checkNotOn()
+
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
+ if st.isPushed() {
+ return http2ErrRecursivePush
+ }
+
+ if opts == nil {
+ opts = new(PushOptions)
+ }
+
+ // Default options.
+ if opts.Method == "" {
+ opts.Method = "GET"
+ }
+ if opts.Header == nil {
+ opts.Header = Header{}
+ }
+ wantScheme := "http"
+ if w.rws.req.TLS != nil {
+ wantScheme = "https"
+ }
+
+ // Validate the request.
+ u, err := url.Parse(target)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ if !strings.HasPrefix(target, "/") {
+ return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+ }
+ u.Scheme = wantScheme
+ u.Host = w.rws.req.Host
+ } else {
+ if u.Scheme != wantScheme {
+ return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+ }
+ if u.Host == "" {
+ return errors.New("URL must have a host")
+ }
+ }
+ for k := range opts.Header {
+ if strings.HasPrefix(k, ":") {
+ return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
+ }
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
+ if http2asciiEqualFold(k, "content-length") ||
+ http2asciiEqualFold(k, "content-encoding") ||
+ http2asciiEqualFold(k, "trailer") ||
+ http2asciiEqualFold(k, "te") ||
+ http2asciiEqualFold(k, "expect") ||
+ http2asciiEqualFold(k, "host") {
+ return fmt.Errorf("promised request headers cannot include %q", k)
+ }
+ }
+ if err := http2checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+ return err
+ }
+
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ if opts.Method != "GET" && opts.Method != "HEAD" {
+ return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+ }
+
+ msg := &http2startPushRequest{
+ parent: st,
+ method: opts.Method,
+ url: u,
+ header: http2cloneHeader(opts.Header),
+ done: http2errChanPool.Get().(chan error),
+ }
+
+ select {
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-st.cw:
+ return http2errStreamClosed
+ case sc.serveMsgCh <- msg:
+ }
+
+ select {
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-st.cw:
+ return http2errStreamClosed
+ case err := <-msg.done:
+ http2errChanPool.Put(msg.done)
+ return err
+ }
+}
+
+type http2startPushRequest struct {
+ parent *http2stream
+ method string
+ url *url.URL
+ header Header
+ done chan error
+}
+
+func (sc *http2serverConn) startPush(msg *http2startPushRequest) {
+ sc.serveG.check()
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
+ if msg.parent.state != http2stateOpen && msg.parent.state != http2stateHalfClosedRemote {
+ // responseWriter.Push checks that the stream is peer-initiated.
+ msg.done <- http2errStreamClosed
+ return
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ if !sc.pushEnabled {
+ msg.done <- ErrNotSupported
+ return
+ }
+
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
+ allocatePromisedID := func() (uint32, error) {
+ sc.serveG.check()
+
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
+ if !sc.pushEnabled {
+ return 0, ErrNotSupported
+ }
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
+ if sc.curPushedStreams+1 > sc.clientMaxStreams {
+ return 0, http2ErrPushLimitReached
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
+ if sc.maxPushPromiseID+2 >= 1<<31 {
+ sc.startGracefulShutdownInternal()
+ return 0, http2ErrPushLimitReached
+ }
+ sc.maxPushPromiseID += 2
+ promisedID := sc.maxPushPromiseID
+
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
+ promised := sc.newStream(promisedID, msg.parent.id, http2stateHalfClosedRemote)
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, http2requestParam{
+ method: msg.method,
+ scheme: msg.url.Scheme,
+ authority: msg.url.Host,
+ path: msg.url.RequestURI(),
+ header: http2cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ })
+ if err != nil {
+ // Should not happen, since we've already validated msg.url.
+ panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+ }
+
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+ return promisedID, nil
+ }
+
+ sc.writeFrame(http2FrameWriteRequest{
+ write: &http2writePushPromise{
+ streamID: msg.parent.id,
+ method: msg.method,
+ url: msg.url,
+ h: msg.header,
+ allocatePromisedID: allocatePromisedID,
+ },
+ stream: msg.parent,
+ done: msg.done,
+ })
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 7230 section 7 and calls fn for each non-empty element.
+func http2foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var http2connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func http2checkValidHTTP2RequestHeaders(h Header) error {
+ for _, k := range http2connHeaders {
+ if _, ok := h[k]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", k)
+ }
+ }
+ te := h["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func http2new400Handler(err error) HandlerFunc {
+ return func(w ResponseWriter, r *Request) {
+ Error(w, err.Error(), StatusBadRequest)
+ }
+}
+
+// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
+// disabled. See comments on h1ServerShutdownChan above for why
+// the code is written this way.
+func http2h1ServerKeepAlivesDisabled(hs *Server) bool {
+ var x interface{} = hs
+ type I interface {
+ doKeepAlives() bool
+ }
+ if hs, ok := x.(I); ok {
+ return !hs.doKeepAlives()
+ }
+ return false
+}
+
+func (sc *http2serverConn) countError(name string, err error) error {
+ if sc == nil || sc.srv == nil {
+ return err
+ }
+ f := sc.srv.CountError
+ if f == nil {
+ return err
+ }
+ var typ string
+ var code http2ErrCode
+ switch e := err.(type) {
+ case http2ConnectionError:
+ typ = "conn"
+ code = http2ErrCode(e)
+ case http2StreamError:
+ typ = "stream"
+ code = http2ErrCode(e.Code)
+ default:
+ return err
+ }
+ codeStr := http2errCodeName[code]
+ if codeStr == "" {
+ codeStr = strconv.Itoa(int(code))
+ }
+ f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
+ return err
+}
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ http2transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ http2transportDefaultStreamFlow = 4 << 20
+
+ // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+ // a stream-level WINDOW_UPDATE for at a time.
+ http2transportDefaultStreamMinRefresh = 4 << 10
+
+ http2defaultUserAgent = "Go-http-client/2.0"
+
+ // initialMaxConcurrentStreams is a connections maxConcurrentStreams until
+ // it's received servers initial SETTINGS frame, which corresponds with the
+ // spec's minimum recommended value.
+ http2initialMaxConcurrentStreams = 100
+
+ // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams
+ // if the server doesn't include one in its initial SETTINGS frame.
+ http2defaultMaxConcurrentStreams = 1000
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type http2Transport struct {
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool http2ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+ // plain-text "http" scheme. Note that this does not enable h2c support.
+ AllowHTTP bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allowed. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an unlimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // StrictMaxConcurrentStreams controls whether the server's
+ // SETTINGS_MAX_CONCURRENT_STREAMS should be respected
+ // globally. If false, new TCP connections are created to the
+ // server as needed to keep each under the per-connection
+ // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
+ // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
+ // a global limit and callers of RoundTrip block when needed,
+ // waiting for their turn.
+ StrictMaxConcurrentStreams bool
+
+ // ReadIdleTimeout is the timeout after which a health check using ping
+ // frame will be carried out if no frame is received on the connection.
+ // Note that a ping response will is considered a received frame, so if
+ // there is no other traffic on the connection, the health check will
+ // be performed every ReadIdleTimeout interval.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to Ping is not received.
+ // Defaults to 15s.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which the connection will be
+ // closed no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ WriteByteTimeout time.Duration
+
+ // CountError, if non-nil, is called on HTTP/2 transport errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef http2ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *http2Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *http2Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+func (t *http2Transport) pingTimeout() time.Duration {
+ if t.PingTimeout == 0 {
+ return 15 * time.Second
+ }
+ return t.PingTimeout
+
+}
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It returns an error if t1 has already been HTTP/2-enabled.
+//
+// Use ConfigureTransports instead to configure the HTTP/2 Transport.
+func http2ConfigureTransport(t1 *Transport) error {
+ _, err := http2ConfigureTransports(t1)
+ return err
+}
+
+// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2.
+// It returns a new HTTP/2 Transport for further configuration.
+// It returns an error if t1 has already been HTTP/2-enabled.
+func http2ConfigureTransports(t1 *Transport) (*http2Transport, error) {
+ return http2configureTransports(t1)
+}
+
+func http2configureTransports(t1 *Transport) (*http2Transport, error) {
+ connPool := new(http2clientConnPool)
+ t2 := &http2Transport{
+ ConnPool: http2noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) RoundTripper {
+ addr := http2authorityAddr("https", authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return http2erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+func (t *http2Transport) connPool() http2ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *http2Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &http2clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type http2ClientConn struct {
+ t *http2Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ reused uint32 // whether conn is being reused; atomic
+ singleUse bool // whether being used for a single http.Request
+ getConnCalled bool // used by clientConnPool
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ idleTimeout time.Duration // or 0 for never
+ idleTimer *time.Timer
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow http2flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow http2flow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
+ closing bool
+ closed bool
+ seenSettings bool // true if we've seen a settings frame, false otherwise
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *http2GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*http2clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ br *bufio.Reader
+ lastActive time.Time
+ lastIdle time.Time // time last idle
+ // Settings from peer: (also guarded by wmu)
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ initialWindowSize uint32
+
+ // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
+ // Write to reqHeaderMu to lock it, read from it to unlock.
+ // Lock reqmu BEFORE mu or wmu.
+ reqHeaderMu chan struct{}
+
+ // wmu is held while writing.
+ // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
+ // Only acquire both at the same time when changing peer settings.
+ wmu sync.Mutex
+ bw *bufio.Writer
+ fr *http2Framer
+ werr error // first write error that has occurred
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type http2clientStream struct {
+ cc *http2ClientConn
+
+ // Fields of Request that we may access even after the response body is closed.
+ ctx context.Context
+ reqCancel <-chan struct{}
+
+ trace *httptrace.ClientTrace // or nil
+ ID uint32
+ bufPipe http2pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ isHead bool
+
+ abortOnce sync.Once
+ abort chan struct{} // closed to signal stream should end immediately
+ abortErr error // set if abort is closed
+
+ peerClosed chan struct{} // closed when the peer sends an END_STREAM flag
+ donec chan struct{} // closed after the stream is in the closed state
+ on100 chan struct{} // buffered; written to if a 100 is received
+
+ respHeaderRecv chan struct{} // closed when headers are received
+ res *Response // set if respHeaderRecv is closed
+
+ flow http2flow // guarded by cc.mu
+ inflow http2flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+
+ reqBody io.ReadCloser
+ reqBodyContentLength int64 // -1 means unknown
+ reqBodyClosed bool // body has been closed; guarded by cc.mu
+
+ // owned by writeRequest:
+ sentEndStream bool // sent an END_STREAM flag to the peer
+ sentHeaders bool
+
+ // owned by clientConnReadLoop:
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ num1xx uint8 // number of 1xx responses seen
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
+
+ trailer Header // accumulated trailers
+ resTrailer *Header // client's Response.Trailer
+}
+
+var http2got1xxFuncForTests func(int, textproto.MIMEHeader) error
+
+// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
+// if any. It returns nil if not set or if the Go version is too old.
+func (cs *http2clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
+ if fn := http2got1xxFuncForTests; fn != nil {
+ return fn
+ }
+ return http2traceGot1xxResponseFunc(cs.trace)
+}
+
+func (cs *http2clientStream) abortStream(err error) {
+ cs.cc.mu.Lock()
+ defer cs.cc.mu.Unlock()
+ cs.abortStreamLocked(err)
+}
+
+func (cs *http2clientStream) abortStreamLocked(err error) {
+ cs.abortOnce.Do(func() {
+ cs.abortErr = err
+ close(cs.abort)
+ })
+ if cs.reqBody != nil && !cs.reqBodyClosed {
+ cs.reqBody.Close()
+ cs.reqBodyClosed = true
+ }
+ // TODO(dneil): Clean up tests where cs.cc.cond is nil.
+ if cs.cc.cond != nil {
+ // Wake up writeRequestBody if it is waiting on flow control.
+ cs.cc.cond.Broadcast()
+ }
+}
+
+func (cs *http2clientStream) abortRequestBodyWrite() {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cs.reqBody != nil && !cs.reqBodyClosed {
+ cs.reqBody.Close()
+ cs.reqBodyClosed = true
+ cc.cond.Broadcast()
+ }
+}
+
+type http2stickyErrWriter struct {
+ conn net.Conn
+ timeout time.Duration
+ err *error
+}
+
+func (sew http2stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ for {
+ if sew.timeout != 0 {
+ sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
+ }
+ nn, err := sew.conn.Write(p[n:])
+ n += nn
+ if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
+ // Keep extending the deadline so long as we're making progress.
+ continue
+ }
+ if sew.timeout != 0 {
+ sew.conn.SetWriteDeadline(time.Time{})
+ }
+ *sew.err = err
+ return n, err
+ }
+}
+
+// noCachedConnError is the concrete type of ErrNoCachedConn, which
+// needs to be detected by net/http regardless of whether it's its
+// bundled version (in h2_bundle.go with a rewritten type name) or
+// from a user's x/net/http2. As such, as it has a unique method name
+// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
+// isNoCachedConnError.
+type http2noCachedConnError struct{}
+
+func (http2noCachedConnError) IsHTTP2NoCachedConnError() {}
+
+func (http2noCachedConnError) Error() string { return "http2: no cached connection was available" }
+
+// isNoCachedConnError reports whether err is of type noCachedConnError
+// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
+// may coexist in the same running program.
+func http2isNoCachedConnError(err error) bool {
+ _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
+ return ok
+}
+
+var http2ErrNoCachedConn error = http2noCachedConnError{}
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type http2RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *http2Transport) RoundTrip(req *Request) (*Response, error) {
+ return t.RoundTripOpt(req, http2RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func http2authorityAddr(scheme string, authority string) (addr string) {
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ host = authority
+ }
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
+ }
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
+ return net.JoinHostPort(host, port)
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *http2Transport) RoundTripOpt(req *Request, opt http2RoundTripOpt) (*Response, error) {
+ if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := http2authorityAddr(req.URL.Scheme, req.URL.Host)
+ for retry := 0; ; retry++ {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+ http2traceGotConn(req, cc, reused)
+ res, err := cc.RoundTrip(req)
+ if err != nil && retry <= 6 {
+ if req, err = http2shouldRetryRequest(req, err); err == nil {
+ // After the first retry, do exponential backoff with 10% jitter.
+ if retry == 0 {
+ t.vlogf("RoundTrip retrying after failure: %v", err)
+ continue
+ }
+ backoff := float64(uint(1) << (uint(retry) - 1))
+ backoff += backoff * (0.1 * mathrand.Float64())
+ select {
+ case <-time.After(time.Second * time.Duration(backoff)):
+ t.vlogf("RoundTrip retrying after failure: %v", err)
+ continue
+ case <-req.Context().Done():
+ err = req.Context().Err()
+ }
+ }
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *http2Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(http2clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ http2errClientConnClosed = errors.New("http2: client conn is closed")
+ http2errClientConnUnusable = errors.New("http2: client conn not usable")
+ http2errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+)
+
+// shouldRetryRequest is called by RoundTrip when a request fails to get
+// response headers. It is always called with a non-nil error.
+// It returns either a request to retry (either the same request, or a
+// modified clone), or an error if the request can't be replayed.
+func http2shouldRetryRequest(req *Request, err error) (*Request, error) {
+ if !http2canRetryError(err) {
+ return nil, err
+ }
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
+ if req.Body == nil || req.Body == NoBody {
+ return req, nil
+ }
+
+ // If the request body can be reset back to its original
+ // state via the optional req.GetBody, do that.
+ if req.GetBody != nil {
+ body, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = body
+ return &newReq, nil
+ }
+
+ // The Request.Body can't reset back to the beginning, but we
+ // don't seem to have started to read from it yet, so reuse
+ // the request directly.
+ if err == http2errClientConnUnusable {
+ return req, nil
+ }
+
+ return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
+}
+
+func http2canRetryError(err error) bool {
+ if err == http2errClientConnUnusable || err == http2errClientConnGotGoAway {
+ return true
+ }
+ if se, ok := err.(http2StreamError); ok {
+ if se.Code == http2ErrCodeProtocol && se.Cause == http2errFromPeer {
+ // See golang/go#47635, golang/go#42777
+ return true
+ }
+ return se.Code == http2ErrCodeRefusedStream
+ }
+ return false
+}
+
+func (t *http2Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*http2ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.newClientConn(tconn, singleUse)
+}
+
+func (t *http2Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *t.TLSClientConfig.Clone()
+ }
+ if !http2strSliceContains(cfg.NextProtos, http2NextProtoTLS) {
+ cfg.NextProtos = append([]string{http2NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *http2Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) {
+ if t.DialTLS != nil {
+ return t.DialTLS
+ }
+ return func(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ state := tlsCn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != http2NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, http2NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return tlsCn, nil
+ }
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *http2Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *http2Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return t.t1.ExpectContinueTimeout
+}
+
+func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) {
+ return t.newClientConn(c, t.disableKeepAlives())
+}
+
+func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2ClientConn, error) {
+ cc := &http2ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: http2initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*http2clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ }
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
+ }
+ if http2VerboseLogs {
+ t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+ }
+
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(http2initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(http2stickyErrWriter{
+ conn: c,
+ timeout: t.WriteByteTimeout,
+ err: &cc.werr,
+ })
+ cc.br = bufio.NewReader(c)
+ cc.fr = http2NewFramer(cc.bw, cc.br)
+ if t.CountError != nil {
+ cc.fr.countError = t.CountError
+ }
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ if t.AllowHTTP {
+ cc.nextStreamID = 3
+ }
+
+ if cs, ok := c.(http2connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []http2Setting{
+ {ID: http2SettingEnablePush, Val: 0},
+ {ID: http2SettingInitialWindowSize, Val: http2transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, http2Setting{ID: http2SettingMaxHeaderListSize, Val: max})
+ }
+
+ cc.bw.Write(http2clientPreface)
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, http2transportDefaultConnFlow)
+ cc.inflow.add(http2transportDefaultConnFlow + http2initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ cc.Close()
+ return nil, cc.werr
+ }
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *http2ClientConn) healthCheck() {
+ pingTimeout := cc.t.pingTimeout()
+ // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
+ // trigger the healthCheck again if there is no frame received.
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
+ defer cancel()
+ cc.vlogf("http2: Transport sending health check")
+ err := cc.Ping(ctx)
+ if err != nil {
+ cc.vlogf("http2: Transport health check failure: %v", err)
+ cc.closeForLostPing()
+ } else {
+ cc.vlogf("http2: Transport health check success")
+ }
+}
+
+// SetDoNotReuse marks cc as not reusable for future HTTP requests.
+func (cc *http2ClientConn) SetDoNotReuse() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.doNotReuse = true
+}
+
+func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ old := cc.goAway
+ cc.goAway = f
+
+ // Merge the previous and current GoAway error frames.
+ if cc.goAwayDebug == "" {
+ cc.goAwayDebug = string(f.DebugData())
+ }
+ if old != nil && old.ErrCode != http2ErrCodeNo {
+ cc.goAway.ErrCode = old.ErrCode
+ }
+ last := f.LastStreamID
+ for streamID, cs := range cc.streams {
+ if streamID > last {
+ cs.abortStreamLocked(http2errClientConnGotGoAway)
+ }
+ }
+}
+
+// CanTakeNewRequest reports whether the connection can take a new request,
+// meaning it has not been closed or received or sent a GOAWAY.
+//
+// If the caller is going to immediately make a new request on this
+// connection, use ReserveNewRequest instead.
+func (cc *http2ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+// ReserveNewRequest is like CanTakeNewRequest but also reserves a
+// concurrent stream in cc. The reservation is decremented on the
+// next call to RoundTrip.
+func (cc *http2ClientConn) ReserveNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if st := cc.idleStateLocked(); !st.canTakeNewRequest {
+ return false
+ }
+ cc.streamsReserved++
+ return true
+}
+
+// ClientConnState describes the state of a ClientConn.
+type http2ClientConnState struct {
+ // Closed is whether the connection is closed.
+ Closed bool
+
+ // Closing is whether the connection is in the process of
+ // closing. It may be closing due to shutdown, being a
+ // single-use connection, being marked as DoNotReuse, or
+ // having received a GOAWAY frame.
+ Closing bool
+
+ // StreamsActive is how many streams are active.
+ StreamsActive int
+
+ // StreamsReserved is how many streams have been reserved via
+ // ClientConn.ReserveNewRequest.
+ StreamsReserved int
+
+ // StreamsPending is how many requests have been sent in excess
+ // of the peer's advertised MaxConcurrentStreams setting and
+ // are waiting for other streams to complete.
+ StreamsPending int
+
+ // MaxConcurrentStreams is how many concurrent streams the
+ // peer advertised as acceptable. Zero means no SETTINGS
+ // frame has been received yet.
+ MaxConcurrentStreams uint32
+
+ // LastIdle, if non-zero, is when the connection last
+ // transitioned to idle state.
+ LastIdle time.Time
+}
+
+// State returns a snapshot of cc's state.
+func (cc *http2ClientConn) State() http2ClientConnState {
+ cc.wmu.Lock()
+ maxConcurrent := cc.maxConcurrentStreams
+ if !cc.seenSettings {
+ maxConcurrent = 0
+ }
+ cc.wmu.Unlock()
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return http2ClientConnState{
+ Closed: cc.closed,
+ Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
+ StreamsActive: len(cc.streams),
+ StreamsReserved: cc.streamsReserved,
+ StreamsPending: cc.pendingRequests,
+ LastIdle: cc.lastIdle,
+ MaxConcurrentStreams: maxConcurrent,
+ }
+}
+
+// clientConnIdleState describes the suitability of a client
+// connection to initiate a new RoundTrip request.
+type http2clientConnIdleState struct {
+ canTakeNewRequest bool
+}
+
+func (cc *http2ClientConn) idleState() http2clientConnIdleState {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.idleStateLocked()
+}
+
+func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) {
+ if cc.singleUse && cc.nextStreamID > 1 {
+ return
+ }
+ var maxConcurrentOkay bool
+ if cc.t.StrictMaxConcurrentStreams {
+ // We'll tell the caller we can take a new request to
+ // prevent the caller from dialing a new TCP
+ // connection, but then we'll block later before
+ // writing it.
+ maxConcurrentOkay = true
+ } else {
+ maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
+ }
+
+ st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
+ !cc.doNotReuse &&
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
+ return
+}
+
+func (cc *http2ClientConn) canTakeNewRequestLocked() bool {
+ st := cc.idleStateLocked()
+ return st.canTakeNewRequest
+}
+
+// tooIdleLocked reports whether this connection has been been sitting idle
+// for too much wall time.
+func (cc *http2ClientConn) tooIdleLocked() bool {
+ // The Round(0) strips the monontonic clock reading so the
+ // times are compared based on their wall time. We don't want
+ // to reuse a connection that's been sitting idle during
+ // VM/laptop suspend if monotonic time was also frozen.
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+}
+
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// only be called when we're idle, but because we're coming from a new
+// goroutine, there could be a new request coming in at the same time,
+// so this simply calls the synchronized closeIfIdle to shut down this
+// connection. The timer could just call closeIfIdle, but this is more
+// clear.
+func (cc *http2ClientConn) onIdleTimeout() {
+ cc.closeIfIdle()
+}
+
+func (cc *http2ClientConn) closeConn() error {
+ t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
+ defer t.Stop()
+ return cc.tconn.Close()
+}
+
+// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
+// Try to shut it down more aggressively.
+func (cc *http2ClientConn) forceCloseConn() {
+ tc, ok := cc.tconn.(*tls.Conn)
+ if !ok {
+ return
+ }
+ if nc := http2tlsUnderlyingConn(tc); nc != nil {
+ nc.Close()
+ }
+}
+
+func (cc *http2ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 || cc.streamsReserved > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ nextID := cc.nextStreamID
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+ }
+ cc.closeConn()
+}
+
+func (cc *http2ClientConn) isDoNotReuseAndIdle() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.doNotReuse && len(cc.streams) == 0
+}
+
+var http2shutdownEnterWaitStateHook = func() {}
+
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
+func (cc *http2ClientConn) Shutdown(ctx context.Context) error {
+ if err := cc.sendGoAway(); err != nil {
+ return err
+ }
+ // Wait for all in-flight streams to complete or connection to close
+ done := make(chan struct{})
+ cancelled := false // guarded by cc.mu
+ go func() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if len(cc.streams) == 0 || cc.closed {
+ cc.closed = true
+ close(done)
+ break
+ }
+ if cancelled {
+ break
+ }
+ cc.cond.Wait()
+ }
+ }()
+ http2shutdownEnterWaitStateHook()
+ select {
+ case <-done:
+ return cc.closeConn()
+ case <-ctx.Done():
+ cc.mu.Lock()
+ // Free the goroutine above
+ cancelled = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ return ctx.Err()
+ }
+}
+
+func (cc *http2ClientConn) sendGoAway() error {
+ cc.mu.Lock()
+ closing := cc.closing
+ cc.closing = true
+ maxStreamID := cc.nextStreamID
+ cc.mu.Unlock()
+ if closing {
+ // GOAWAY sent already
+ return nil
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ // Send a graceful shutdown frame to server
+ if err := cc.fr.WriteGoAway(maxStreamID, http2ErrCodeNo, nil); err != nil {
+ return err
+ }
+ if err := cc.bw.Flush(); err != nil {
+ return err
+ }
+ // Prevent new requests
+ return nil
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+// err is sent to streams.
+func (cc *http2ClientConn) closeForError(err error) error {
+ cc.mu.Lock()
+ cc.closed = true
+ for _, cs := range cc.streams {
+ cs.abortStreamLocked(err)
+ }
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ return cc.closeConn()
+}
+
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *http2ClientConn) Close() error {
+ err := errors.New("http2: client connection force closed via ClientConn.Close")
+ return cc.closeForError(err)
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+func (cc *http2ClientConn) closeForLostPing() error {
+ err := errors.New("http2: client connection lost")
+ if f := cc.t.CountError; f != nil {
+ f("conn_close_lost_ping")
+ }
+ return cc.closeForError(err)
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var http2errRequestCanceled = errors.New("net/http: request canceled")
+
+func http2commaSeparatedTrailers(req *Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", fmt.Errorf("invalid Trailer key %q", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *http2ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func http2checkConnHeaders(req *Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
+ }
+ if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
+ }
+ if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !http2asciiEqualFold(vv[0], "close") && !http2asciiEqualFold(vv[0], "keep-alive")) {
+ return fmt.Errorf("http2: invalid Connection request header: %q", vv)
+ }
+ return nil
+}
+
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func http2actualContentLength(req *Request) int64 {
+ if req.Body == nil || req.Body == NoBody {
+ return 0
+ }
+ if req.ContentLength != 0 {
+ return req.ContentLength
+ }
+ return -1
+}
+
+func (cc *http2ClientConn) decrStreamReservations() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.decrStreamReservationsLocked()
+}
+
+func (cc *http2ClientConn) decrStreamReservationsLocked() {
+ if cc.streamsReserved > 0 {
+ cc.streamsReserved--
+ }
+}
+
+func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
+ ctx := req.Context()
+ cs := &http2clientStream{
+ cc: cc,
+ ctx: ctx,
+ reqCancel: req.Cancel,
+ isHead: req.Method == "HEAD",
+ reqBody: req.Body,
+ reqBodyContentLength: http2actualContentLength(req),
+ trace: httptrace.ContextClientTrace(ctx),
+ peerClosed: make(chan struct{}),
+ abort: make(chan struct{}),
+ respHeaderRecv: make(chan struct{}),
+ donec: make(chan struct{}),
+ }
+ go cs.doRequest(req)
+
+ waitDone := func() error {
+ select {
+ case <-cs.donec:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ }
+ }
+
+ handleResponseHeaders := func() (*Response, error) {
+ res := cs.res
+ if res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ cs.abortRequestBodyWrite()
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ if res.Body == http2noBody && http2actualContentLength(req) == 0 {
+ // If there isn't a request or response body still being
+ // written, then wait for the stream to be closed before
+ // RoundTrip returns.
+ if err := waitDone(); err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+ }
+
+ for {
+ select {
+ case <-cs.respHeaderRecv:
+ return handleResponseHeaders()
+ case <-cs.abort:
+ select {
+ case <-cs.respHeaderRecv:
+ // If both cs.respHeaderRecv and cs.abort are signaling,
+ // pick respHeaderRecv. The server probably wrote the
+ // response and immediately reset the stream.
+ // golang.org/issue/49645
+ return handleResponseHeaders()
+ default:
+ waitDone()
+ return nil, cs.abortErr
+ }
+ case <-ctx.Done():
+ err := ctx.Err()
+ cs.abortStream(err)
+ return nil, err
+ case <-cs.reqCancel:
+ cs.abortStream(http2errRequestCanceled)
+ return nil, http2errRequestCanceled
+ }
+ }
+}
+
+// doRequest runs for the duration of the request lifetime.
+//
+// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
+func (cs *http2clientStream) doRequest(req *Request) {
+ err := cs.writeRequest(req)
+ cs.cleanupWriteRequest(err)
+}
+
+// writeRequest sends a request.
+//
+// It returns nil after the request is written, the response read,
+// and the request stream is half-closed by the peer.
+//
+// It returns non-nil if the request ends otherwise.
+// If the returned error is StreamError, the error Code may be used in resetting the stream.
+func (cs *http2clientStream) writeRequest(req *Request) (err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+
+ if err := http2checkConnHeaders(req); err != nil {
+ return err
+ }
+
+ // Acquire the new-request lock by writing to reqHeaderMu.
+ // This lock guards the critical section covering allocating a new stream ID
+ // (requires mu) and creating the stream (requires wmu).
+ if cc.reqHeaderMu == nil {
+ panic("RoundTrip on uninitialized ClientConn") // for tests
+ }
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ cc.mu.Lock()
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ cc.decrStreamReservationsLocked()
+ if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil {
+ cc.mu.Unlock()
+ <-cc.reqHeaderMu
+ return err
+ }
+ cc.addStreamLocked(cs) // assigns stream ID
+ if http2isConnectionCloseRequest(req) {
+ cc.doNotReuse = true
+ }
+ cc.mu.Unlock()
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ !cs.isHead {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ cs.requestedGzip = true
+ }
+
+ continueTimeout := cc.t.expectContinueTimeout()
+ if continueTimeout != 0 {
+ if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") {
+ continueTimeout = 0
+ } else {
+ cs.on100 = make(chan struct{}, 1)
+ }
+ }
+
+ // Past this point (where we send request headers), it is possible for
+ // RoundTrip to return successfully. Since the RoundTrip contract permits
+ // the caller to "mutate or reuse" the Request after closing the Response's Body,
+ // we must take care when referencing the Request from here on.
+ err = cs.encodeAndWriteHeaders(req)
+ <-cc.reqHeaderMu
+ if err != nil {
+ return err
+ }
+
+ hasBody := cs.reqBodyContentLength != 0
+ if !hasBody {
+ cs.sentEndStream = true
+ } else {
+ if continueTimeout != 0 {
+ http2traceWait100Continue(cs.trace)
+ timer := time.NewTimer(continueTimeout)
+ select {
+ case <-timer.C:
+ err = nil
+ case <-cs.on100:
+ err = nil
+ case <-cs.abort:
+ err = cs.abortErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-cs.reqCancel:
+ err = http2errRequestCanceled
+ }
+ timer.Stop()
+ if err != nil {
+ http2traceWroteRequest(cs.trace, err)
+ return err
+ }
+ }
+
+ if err = cs.writeRequestBody(req); err != nil {
+ if err != http2errStopReqBodyWrite {
+ http2traceWroteRequest(cs.trace, err)
+ return err
+ }
+ } else {
+ cs.sentEndStream = true
+ }
+ }
+
+ http2traceWroteRequest(cs.trace, err)
+
+ var respHeaderTimer <-chan time.Time
+ var respHeaderRecv chan struct{}
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ respHeaderRecv = cs.respHeaderRecv
+ }
+ // Wait until the peer half-closes its end of the stream,
+ // or until the request is aborted (via context, error, or otherwise),
+ // whichever comes first.
+ for {
+ select {
+ case <-cs.peerClosed:
+ return nil
+ case <-respHeaderTimer:
+ return http2errTimeout
+ case <-respHeaderRecv:
+ respHeaderRecv = nil
+ respHeaderTimer = nil // keep waiting for END_STREAM
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ }
+ }
+}
+
+func (cs *http2clientStream) encodeAndWriteHeaders(req *Request) error {
+ cc := cs.cc
+ ctx := cs.ctx
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // If the request was canceled while waiting for cc.mu, just quit.
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ default:
+ }
+
+ // Encode headers.
+ //
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ trailers, err := http2commaSeparatedTrailers(req)
+ if err != nil {
+ return err
+ }
+ hasTrailers := trailers != ""
+ contentLen := http2actualContentLength(req)
+ hasBody := contentLen != 0
+ hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
+ if err != nil {
+ return err
+ }
+
+ // Write the request.
+ endStream := !hasBody && !hasTrailers
+ cs.sentHeaders = true
+ err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
+ http2traceWroteHeaders(cs.trace)
+ return err
+}
+
+// cleanupWriteRequest performs post-request tasks.
+//
+// If err (the result of writeRequest) is non-nil and the stream is not closed,
+// cleanupWriteRequest will send a reset to the peer.
+func (cs *http2clientStream) cleanupWriteRequest(err error) {
+ cc := cs.cc
+
+ if cs.ID == 0 {
+ // We were canceled before creating the stream, so return our reservation.
+ cc.decrStreamReservations()
+ }
+
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cs.reqBodyClosed = true
+ cc.mu.Unlock()
+ if !bodyClosed && cs.reqBody != nil {
+ cs.reqBody.Close()
+ }
+
+ if err != nil && cs.sentEndStream {
+ // If the connection is closed immediately after the response is read,
+ // we may be aborted before finishing up here. If the stream was closed
+ // cleanly on both sides, there is no error.
+ select {
+ case <-cs.peerClosed:
+ err = nil
+ default:
+ }
+ }
+ if err != nil {
+ cs.abortStream(err) // possibly redundant, but harmless
+ if cs.sentHeaders {
+ if se, ok := err.(http2StreamError); ok {
+ if se.Cause != http2errFromPeer {
+ cc.writeStreamReset(cs.ID, se.Code, err)
+ }
+ } else {
+ cc.writeStreamReset(cs.ID, http2ErrCodeCancel, err)
+ }
+ }
+ cs.bufPipe.CloseWithError(err) // no-op if already closed
+ } else {
+ if cs.sentHeaders && !cs.sentEndStream {
+ cc.writeStreamReset(cs.ID, http2ErrCodeNo, nil)
+ }
+ cs.bufPipe.CloseWithError(http2errRequestCanceled)
+ }
+ if cs.ID != 0 {
+ cc.forgetStreamID(cs.ID)
+ }
+
+ cc.wmu.Lock()
+ werr := cc.werr
+ cc.wmu.Unlock()
+ if werr != nil {
+ cc.Close()
+ }
+
+ close(cs.donec)
+}
+
+// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams.
+// Must hold cc.mu.
+func (cc *http2ClientConn) awaitOpenSlotForStreamLocked(cs *http2clientStream) error {
+ for {
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ return http2errClientConnUnusable
+ }
+ cc.lastIdle = time.Time{}
+ if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
+ return nil
+ }
+ cc.pendingRequests++
+ cc.cond.Wait()
+ cc.pendingRequests--
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ default:
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > maxFrameSize {
+ chunk = chunk[:maxFrameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(http2HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ http2errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ http2errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+
+ http2errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
+)
+
+// frameScratchBufferLen returns the length of a buffer to use for
+// outgoing request bodies to read/write to/from.
+//
+// It returns max(1, min(peer's advertised max frame size,
+// Request.ContentLength+1, 512KB)).
+func (cs *http2clientStream) frameScratchBufferLen(maxFrameSize int) int {
+ const max = 512 << 10
+ n := int64(maxFrameSize)
+ if n > max {
+ n = max
+ }
+ if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {
+ // Add an extra byte past the declared content-length to
+ // give the caller's Request.Body io.Reader a chance to
+ // give us more bytes than they declared, so we can catch it
+ // early.
+ n = cl + 1
+ }
+ if n < 1 {
+ return 1
+ }
+ return int(n) // doesn't truncate; max is 512K
+}
+
+var http2bufPool sync.Pool // of *[]byte
+
+func (cs *http2clientStream) writeRequestBody(req *Request) (err error) {
+ cc := cs.cc
+ body := cs.reqBody
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+
+ hasTrailers := req.Trailer != nil
+ remainLen := cs.reqBodyContentLength
+ hasContentLen := remainLen != -1
+
+ cc.mu.Lock()
+ maxFrameSize := int(cc.maxFrameSize)
+ cc.mu.Unlock()
+
+ // Scratch buffer for reading into & writing from.
+ scratchLen := cs.frameScratchBufferLen(maxFrameSize)
+ var buf []byte
+ if bp, ok := http2bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer http2bufPool.Put(bp)
+ buf = *bp
+ } else {
+ buf = make([]byte, scratchLen)
+ defer http2bufPool.Put(&buf)
+ }
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf[:len(buf)])
+ if hasContentLen {
+ remainLen -= int64(n)
+ if remainLen == 0 && err == nil {
+ // The request body's Content-Length was predeclared and
+ // we just finished reading it all, but the underlying io.Reader
+ // returned the final chunk with a nil error (which is one of
+ // the two valid things a Reader can do at EOF). Because we'd prefer
+ // to send the END_STREAM bit early, double-check that we're actually
+ // at EOF. Subsequent reads should return (0, EOF) at this point.
+ // If either value is different, we return an error in one of two ways below.
+ var scratch [1]byte
+ var n1 int
+ n1, err = body.Read(scratch[:])
+ remainLen -= int64(n1)
+ }
+ if remainLen < 0 {
+ err = http2errReqBodyTooLong
+ return err
+ }
+ }
+ if err != nil {
+ cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cc.mu.Unlock()
+ switch {
+ case bodyClosed:
+ return http2errStopReqBodyWrite
+ case err == io.EOF:
+ sawEOF = true
+ err = nil
+ default:
+ return err
+ }
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ if err != nil {
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if sentEnd {
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
+ return nil
+ }
+
+ // Since the RoundTrip contract permits the caller to "mutate or reuse"
+ // a request after the Response's Body is closed, verify that this hasn't
+ // happened before accessing the trailers.
+ cc.mu.Lock()
+ trailer := req.Trailer
+ err = cs.abortErr
+ cc.mu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ var trls []byte
+ if len(trailer) > 0 {
+ trls, err = cc.encodeTrailers(trailer)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *http2clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, http2errClientConnClosed
+ }
+ if cs.reqBodyClosed {
+ return 0, http2errStopReqBodyWrite
+ }
+ select {
+ case <-cs.abort:
+ return 0, cs.abortErr
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ case <-cs.reqCancel:
+ return 0, http2errRequestCanceled
+ default:
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+var http2errNilRequestURL = errors.New("http2: Request.URI is nil")
+
+// requires cc.wmu be held.
+func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+ cc.hbuf.Reset()
+ if req.URL == nil {
+ return nil, http2errNilRequestURL
+ }
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return nil, err
+ }
+
+ var path string
+ if req.Method != "CONNECT" {
+ path = req.URL.RequestURI()
+ if !http2validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !http2validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return nil, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error, because it may be sensitive.
+ return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
+ }
+ }
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ m := req.Method
+ if m == "" {
+ m = MethodGet
+ }
+ f(":method", m)
+ if req.Method != "CONNECT" {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if http2asciiEqualFold(k, "host") || http2asciiEqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if http2asciiEqualFold(k, "connection") ||
+ http2asciiEqualFold(k, "proxy-connection") ||
+ http2asciiEqualFold(k, "transfer-encoding") ||
+ http2asciiEqualFold(k, "upgrade") ||
+ http2asciiEqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if http2asciiEqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ } else if http2asciiEqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if http2shouldSendReqContentLength(req.Method, contentLength) {
+ f("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", http2defaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, http2errRequestHeaderListSize
+ }
+
+ trace := httptrace.ContextClientTrace(req.Context())
+ traceHeaders := http2traceHasWroteHeaderField(trace)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name, ascii := http2asciiToLower(name)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ return
+ }
+ cc.writeHeader(name, value)
+ if traceHeaders {
+ http2traceWroteHeaderField(trace, name, value)
+ }
+ })
+
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func http2shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.wmu be held.
+func (cc *http2ClientConn) encodeTrailers(trailer Header) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ hlSize := uint64(0)
+ for k, vv := range trailer {
+ for _, v := range vv {
+ hf := hpack.HeaderField{Name: k, Value: v}
+ hlSize += uint64(hf.Size())
+ }
+ }
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, http2errRequestHeaderListSize
+ }
+
+ for k, vv := range trailer {
+ lowKey, ascii := http2asciiToLower(k)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ continue
+ }
+ // Transfer-Encoding, etc.. have already been filtered at the
+ // start of RoundTrip
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+func (cc *http2ClientConn) writeHeader(name, value string) {
+ if http2VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type http2resAndError struct {
+ _ http2incomparable
+ res *Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *http2ClientConn) addStreamLocked(cs *http2clientStream) {
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.add(http2transportDefaultStreamFlow)
+ cs.inflow.setConnFlow(&cc.inflow)
+ cs.ID = cc.nextStreamID
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ if cs.ID == 0 {
+ panic("assigned stream ID 0")
+ }
+}
+
+func (cc *http2ClientConn) forgetStreamID(id uint32) {
+ cc.mu.Lock()
+ slen := len(cc.streams)
+ delete(cc.streams, id)
+ if len(cc.streams) != slen-1 {
+ panic("forgetting unknown stream id")
+ }
+ cc.lastActive = time.Now()
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
+ }
+ // Wake up writeRequestBody via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
+
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives()
+ if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
+ }
+ cc.closed = true
+ defer cc.closeConn()
+ }
+
+ cc.mu.Unlock()
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type http2clientConnReadLoop struct {
+ _ http2incomparable
+ cc *http2ClientConn
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *http2ClientConn) readLoop() {
+ rl := &http2clientConnReadLoop{cc: cc}
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(http2ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, http2ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type http2GoAwayError struct {
+ LastStreamID uint32
+ ErrCode http2ErrCode
+ DebugData string
+}
+
+func (e http2GoAwayError) Error() string {
+ return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+ e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func http2isEOFOrNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ ne, ok := err.(*net.OpError)
+ return ok && ne.Op == "read"
+}
+
+func (rl *http2clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ defer cc.closeConn()
+ defer close(cc.readerDone)
+
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ cc.mu.Lock()
+ if cc.goAway != nil && http2isEOFOrNetReadError(err) {
+ err = http2GoAwayError{
+ LastStreamID: cc.goAway.LastStreamID,
+ ErrCode: cc.goAway.ErrCode,
+ DebugData: cc.goAwayDebug,
+ }
+ } else if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ cc.closed = true
+ for _, cs := range cc.streams {
+ select {
+ case <-cs.peerClosed:
+ // The server closed the stream before closing the conn,
+ // so no need to interrupt it.
+ default:
+ cs.abortStreamLocked(err)
+ }
+ }
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+// countReadFrameError calls Transport.CountError with a string
+// representing err.
+func (cc *http2ClientConn) countReadFrameError(err error) {
+ f := cc.t.CountError
+ if f == nil || err == nil {
+ return
+ }
+ if ce, ok := err.(http2ConnectionError); ok {
+ errCode := http2ErrCode(ce)
+ f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
+ return
+ }
+ if errors.Is(err, io.EOF) {
+ f("read_frame_eof")
+ return
+ }
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ f("read_frame_unexpected_eof")
+ return
+ }
+ if errors.Is(err, http2ErrFrameTooLarge) {
+ f("read_frame_too_large")
+ return
+ }
+ f("read_frame_other")
+}
+
+func (rl *http2clientConnReadLoop) run() error {
+ cc := rl.cc
+ gotSettings := false
+ readIdleTimeout := cc.t.ReadIdleTimeout
+ var t *time.Timer
+ if readIdleTimeout != 0 {
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
+ defer t.Stop()
+ }
+ for {
+ f, err := cc.fr.ReadFrame()
+ if t != nil {
+ t.Reset(readIdleTimeout)
+ }
+ if err != nil {
+ cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+ }
+ if se, ok := err.(http2StreamError); ok {
+ if cs := rl.streamByID(se.StreamID); cs != nil {
+ if se.Cause == nil {
+ se.Cause = cc.fr.errDetail
+ }
+ rl.endStreamError(cs, se)
+ }
+ continue
+ } else if err != nil {
+ cc.countReadFrameError(err)
+ return err
+ }
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport received %s", http2summarizeFrame(f))
+ }
+ if !gotSettings {
+ if _, ok := f.(*http2SettingsFrame); !ok {
+ cc.logf("protocol error: received %T before a SETTINGS frame", f)
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ gotSettings = true
+ }
+
+ switch f := f.(type) {
+ case *http2MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ case *http2DataFrame:
+ err = rl.processData(f)
+ case *http2GoAwayFrame:
+ err = rl.processGoAway(f)
+ case *http2RSTStreamFrame:
+ err = rl.processResetStream(f)
+ case *http2SettingsFrame:
+ err = rl.processSettings(f)
+ case *http2PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *http2WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *http2PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, http2summarizeFrame(f), err)
+ }
+ return err
+ }
+ }
+}
+
+func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) error {
+ cs := rl.streamByID(f.StreamID)
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if cs.readClosed {
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ Cause: errors.New("protocol error: headers after END_STREAM"),
+ })
+ return nil
+ }
+ if !cs.firstByte {
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ http2traceFirstResponseByte(cs.trace)
+ }
+ cs.firstByte = true
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(http2ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ Cause: err,
+ })
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ cs.resTrailer = &res.Trailer
+ cs.res = res
+ close(cs.respHeaderRecv)
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 1xx responses).
+func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http2MetaHeadersFrame) (*Response, error) {
+ if f.Truncated {
+ return nil, http2errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("malformed response from server: missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
+ }
+
+ regularFields := f.RegularFields()
+ strs := make([]string, len(regularFields))
+ header := make(Header, len(regularFields))
+ res := &Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + StatusText(statusCode),
+ }
+ for _, hf := range regularFields {
+ key := CanonicalHeaderKey(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(Header)
+ res.Trailer = t
+ }
+ http2foreachHeaderElement(hf.Value, func(v string) {
+ t[CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ vv := header[key]
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+ // Set the capacity on strs[0] to 1, so any future append
+ // won't extend the slice into the other strings.
+ vv, strs = strs[:1:1], strs[1:]
+ vv[0] = hf.Value
+ header[key] = vv
+ } else {
+ header[key] = append(vv, hf.Value)
+ }
+ }
+ }
+
+ if statusCode >= 100 && statusCode <= 199 {
+ if f.StreamEnded() {
+ return nil, errors.New("1xx informational response with END_STREAM flag")
+ }
+ cs.num1xx++
+ const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
+ if cs.num1xx > max1xxResponses {
+ return nil, errors.New("http2: too many 1xx informational responses")
+ }
+ if fn := cs.get1xxTraceFunc(); fn != nil {
+ if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
+ return nil, err
+ }
+ }
+ if statusCode == 100 {
+ http2traceGot100Continue(cs.trace)
+ select {
+ case cs.on100 <- struct{}{}:
+ default:
+ }
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
+ res.ContentLength = int64(cl)
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ } else if f.StreamEnded() && !cs.isHead {
+ res.ContentLength = 0
+ }
+
+ if cs.isHead {
+ res.Body = http2noBody
+ return res, nil
+ }
+
+ if f.StreamEnded() {
+ if res.ContentLength > 0 {
+ res.Body = http2missingBody{}
+ } else {
+ res.Body = http2noBody
+ }
+ return res, nil
+ }
+
+ cs.bufPipe.setBuffer(&http2dataBuffer{expected: res.ContentLength})
+ cs.bytesRemain = res.ContentLength
+ res.Body = http2transportResponseBody{cs}
+
+ if cs.requestedGzip && http2asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &http2gzipReader{body: res.Body}
+ res.Uncompressed = true
+ }
+ return res, nil
+}
+
+func (rl *http2clientConnReadLoop) processTrailers(cs *http2clientStream, f *http2MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+
+ trailer := make(Header)
+ for _, hf := range f.RegularFields() {
+ key := CanonicalHeaderKey(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser.
+type http2transportResponseBody struct {
+ cs *http2clientStream
+}
+
+func (b http2transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cs.abortStream(err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ var connAdd, streamAdd int32
+ // Check the conn-level first, before the stream-level.
+ if v := cc.inflow.available(); v < http2transportDefaultConnFlow/2 {
+ connAdd = http2transportDefaultConnFlow - v
+ cc.inflow.add(connAdd)
+ }
+ if err == nil { // No need to refresh if the stream is over or failed.
+ // Consider any buffered body data (read from the conn but not
+ // consumed by the client) when computing flow control for this
+ // stream.
+ v := int(cs.inflow.available()) + cs.bufPipe.Len()
+ if v < http2transportDefaultStreamFlow-http2transportDefaultStreamMinRefresh {
+ streamAdd = int32(http2transportDefaultStreamFlow - v)
+ cs.inflow.add(streamAdd)
+ }
+ }
+ cc.mu.Unlock()
+
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, http2mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, http2mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var http2errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b http2transportResponseBody) Close() error {
+ cs := b.cs
+ cc := cs.cc
+
+ unread := cs.bufPipe.Len()
+ if unread > 0 {
+ cc.mu.Lock()
+ // Return connection-level flow control.
+ if unread > 0 {
+ cc.inflow.add(int32(unread))
+ }
+ cc.mu.Unlock()
+
+ // TODO(dneil): Acquiring this mutex can block indefinitely.
+ // Move flow control return to a goroutine?
+ cc.wmu.Lock()
+ // Return connection-level flow control.
+ if unread > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(unread))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+
+ cs.bufPipe.BreakWithError(http2errClosedResponseBody)
+ cs.abortStream(http2errClosedResponseBody)
+
+ select {
+ case <-cs.donec:
+ case <-cs.ctx.Done():
+ // See golang/go#49366: The net/http package can cancel the
+ // request context after the response body is fully read.
+ // Don't treat this as an error.
+ return nil
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ }
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
+ cc := rl.cc
+ cs := rl.streamByID(f.StreamID)
+ data := f.Data()
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+
+ // But at least return their flow control:
+ if f.Length > 0 {
+ cc.mu.Lock()
+ cc.inflow.add(int32(f.Length))
+ cc.mu.Unlock()
+
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(f.Length))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ return nil
+ }
+ if cs.readClosed {
+ cc.logf("protocol error: received DATA after END_STREAM")
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ })
+ return nil
+ }
+ if !cs.firstByte {
+ cc.logf("protocol error: received DATA before a HEADERS frame")
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ })
+ return nil
+ }
+ if f.Length > 0 {
+ if cs.isHead && len(data) > 0 {
+ cc.logf("protocol error: received DATA on a HEAD request")
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ })
+ return nil
+ }
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(f.Length) {
+ cs.inflow.take(int32(f.Length))
+ } else {
+ cc.mu.Unlock()
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ var refund int
+ if pad := int(f.Length) - len(data); pad > 0 {
+ refund += pad
+ }
+
+ didReset := false
+ var err error
+ if len(data) > 0 {
+ if _, err = cs.bufPipe.Write(data); err != nil {
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset = true
+ refund += len(data)
+ }
+ }
+
+ if refund > 0 {
+ cc.inflow.add(int32(refund))
+ if !didReset {
+ cs.inflow.add(int32(refund))
+ }
+ }
+ cc.mu.Unlock()
+
+ if refund > 0 {
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(refund))
+ if !didReset {
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+
+ if err != nil {
+ rl.endStreamError(cs, err)
+ return nil
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) endStream(cs *http2clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ if !cs.readClosed {
+ cs.readClosed = true
+ // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a
+ // race condition: The caller can read io.EOF from Response.Body
+ // and close the body before we close cs.peerClosed, causing
+ // cleanupWriteRequest to send a RST_STREAM.
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers)
+ close(cs.peerClosed)
+ }
+}
+
+func (rl *http2clientConnReadLoop) endStreamError(cs *http2clientStream, err error) {
+ cs.readAborted = true
+ cs.abortStream(err)
+}
+
+func (rl *http2clientConnReadLoop) streamByID(id uint32) *http2clientStream {
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs := rl.cc.streams[id]
+ if cs != nil && !cs.readAborted {
+ return cs
+ }
+ return nil
+}
+
+func (cs *http2clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *http2clientConnReadLoop) processGoAway(f *http2GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ if fn := cc.t.CountError; fn != nil {
+ fn("recv_goaway_" + f.ErrCode.stringToken())
+ }
+
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error {
+ cc := rl.cc
+ // Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
+ // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ if err := rl.processSettingsNoWrite(f); err != nil {
+ return err
+ }
+ if !f.IsAck() {
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ }
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processSettingsNoWrite(f *http2SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ if f.IsAck() {
+ if cc.wantSettingsAck {
+ cc.wantSettingsAck = false
+ return nil
+ }
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+
+ var seenMaxConcurrentStreams bool
+ err := f.ForeachSetting(func(s http2Setting) error {
+ switch s.ID {
+ case http2SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case http2SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ seenMaxConcurrentStreams = true
+ case http2SettingMaxHeaderListSize:
+ cc.peerMaxHeaderListSize = uint64(s.Val)
+ case http2SettingInitialWindowSize:
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ if s.Val > math.MaxInt32 {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+
+ // Adjust flow control of currently-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ delta := int32(s.Val) - int32(cc.initialWindowSize)
+ for _, cs := range cc.streams {
+ cs.flow.add(delta)
+ }
+ cc.cond.Broadcast()
+
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !cc.seenSettings {
+ if !seenMaxConcurrentStreams {
+ // This was the servers initial SETTINGS frame and it
+ // didn't contain a MAX_CONCURRENT_STREAMS field so
+ // increase the number of concurrent streams this
+ // connection can establish to our default.
+ cc.maxConcurrentStreams = http2defaultMaxConcurrentStreams
+ }
+ cc.seenSettings = true
+ }
+
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processWindowUpdate(f *http2WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := rl.streamByID(f.StreamID)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) error {
+ cs := rl.streamByID(f.StreamID)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STREAM an idle stream
+ return nil
+ }
+ serr := http2streamError(cs.ID, f.ErrCode)
+ serr.Cause = http2errFromPeer
+ if f.ErrCode == http2ErrCodeProtocol {
+ rl.cc.SetDoNotReuse()
+ }
+ if fn := cs.cc.t.CountError; fn != nil {
+ fn("recv_rststream_" + f.ErrCode.stringToken())
+ }
+ cs.abortStream(serr)
+
+ cs.bufPipe.CloseWithError(serr)
+ return nil
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+func (cc *http2ClientConn) Ping(ctx context.Context) error {
+ c := make(chan struct{})
+ // Generate a random payload
+ var p [8]byte
+ for {
+ if _, err := rand.Read(p[:]); err != nil {
+ return err
+ }
+ cc.mu.Lock()
+ // check for dup before insert
+ if _, found := cc.pings[p]; !found {
+ cc.pings[p] = c
+ cc.mu.Unlock()
+ break
+ }
+ cc.mu.Unlock()
+ }
+ errc := make(chan error, 1)
+ go func() {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(false, p); err != nil {
+ errc <- err
+ return
+ }
+ if err := cc.bw.Flush(); err != nil {
+ errc <- err
+ return
+ }
+ }()
+ select {
+ case <-c:
+ return nil
+ case err := <-errc:
+ return err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.readerDone:
+ // connection closed
+ return cc.readerErr
+ }
+}
+
+func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error {
+ if f.IsAck() {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ // If ack, notify listener if any
+ if c, ok := cc.pings[f.Data]; ok {
+ close(c)
+ delete(cc.pings, f.Data)
+ }
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *http2clientConnReadLoop) processPushPromise(f *http2PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return http2ConnectionError(http2ErrCodeProtocol)
+}
+
+func (cc *http2ClientConn) writeStreamReset(streamID uint32, code http2ErrCode, err error) {
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ http2errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ http2errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
+)
+
+func (cc *http2ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *http2ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *http2Transport) vlogf(format string, args ...interface{}) {
+ if http2VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *http2Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var http2noBody io.ReadCloser = http2noBodyReader{}
+
+type http2noBodyReader struct{}
+
+func (http2noBodyReader) Close() error { return nil }
+
+func (http2noBodyReader) Read([]byte) (int, error) { return 0, io.EOF }
+
+type http2missingBody struct{}
+
+func (http2missingBody) Close() error { return nil }
+
+func (http2missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF }
+
+func http2strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type http2erringRoundTripper struct{ err error }
+
+func (rt http2erringRoundTripper) RoundTripErr() error { return rt.err }
+
+func (rt http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type http2gzipReader struct {
+ _ http2incomparable
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *http2gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *http2gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type http2errorReader struct{ err error }
+
+func (r http2errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func http2isConnectionCloseRequest(req *Request) bool {
+ return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// converting panics into errors.
+func http2registerHTTPSProtocol(t *Transport, rt http2noDialH2RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+// (The field is exported so it can be accessed via reflect from net/http; tested
+// by TestNoDialH2RoundTripperType)
+type http2noDialH2RoundTripper struct{ *http2Transport }
+
+func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) {
+ res, err := rt.http2Transport.RoundTrip(req)
+ if http2isNoCachedConnError(err) {
+ return nil, ErrSkipAltProtocol
+ }
+ return res, err
+}
+
+func (t *http2Transport) idleConnTimeout() time.Duration {
+ if t.t1 != nil {
+ return t.t1.IdleConnTimeout
+ }
+ return 0
+}
+
+func http2traceGetConn(req *Request, hostPort string) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GetConn == nil {
+ return
+ }
+ trace.GetConn(hostPort)
+}
+
+func http2traceGotConn(req *Request, cc *http2ClientConn, reused bool) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ ci.Reused = reused
+ cc.mu.Lock()
+ ci.WasIdle = len(cc.streams) == 0 && reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func http2traceWroteHeaders(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func http2traceGot100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func http2traceWait100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func http2traceWroteRequest(trace *httptrace.ClientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func http2traceFirstResponseByte(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+// writeFramer is implemented by any type that is used to write frames.
+type http2writeFramer interface {
+ writeFrame(http2writeContext) error
+
+ // staysWithinBuffer reports whether this writer promises that
+ // it will only write less than or equal to size bytes, and it
+ // won't Flush the write context.
+ staysWithinBuffer(size int) bool
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type http2writeContext interface {
+ Framer() *http2Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// writeEndsStream reports whether w writes a frame that will transition
+// the stream to a half-closed local state. This returns false for RST_STREAM,
+// which closes the entire stream (not just the local half).
+func http2writeEndsStream(w http2writeFramer) bool {
+ switch v := w.(type) {
+ case *http2writeData:
+ return v.endStream
+ case *http2writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("writeEndsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type http2flushFrameWriter struct{}
+
+func (http2flushFrameWriter) writeFrame(ctx http2writeContext) error {
+ return ctx.Flush()
+}
+
+func (http2flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
+type http2writeSettings []http2Setting
+
+func (s http2writeSettings) staysWithinBuffer(max int) bool {
+ const settingSize = 6 // uint16 + uint32
+ return http2frameHeaderLen+settingSize*len(s) <= max
+
+}
+
+func (s http2writeSettings) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteSettings([]http2Setting(s)...)
+}
+
+type http2writeGoAway struct {
+ maxStreamID uint32
+ code http2ErrCode
+}
+
+func (p *http2writeGoAway) writeFrame(ctx http2writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ return err
+}
+
+func (*http2writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
+type http2writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *http2writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *http2writeData) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+func (w *http2writeData) staysWithinBuffer(max int) bool {
+ return http2frameHeaderLen+len(w.p) <= max
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type http2handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp http2handlerPanicRST) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, http2ErrCodeInternal)
+}
+
+func (hp http2handlerPanicRST) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
+
+func (se http2StreamError) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+func (se http2StreamError) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
+
+type http2writePingAck struct{ pf *http2PingFrame }
+
+func (w http2writePingAck) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+func (w http2writePingAck) staysWithinBuffer(max int) bool {
+ return http2frameHeaderLen+len(w.pf.Data) <= max
+}
+
+type http2writeSettingsAck struct{}
+
+func (http2writeSettingsAck) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+func (http2writeSettingsAck) staysWithinBuffer(max int) bool { return http2frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func http2splitHeaderBlock(ctx http2writeContext, headerBlock []byte, fn func(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+ return err
+ }
+ first = false
+ }
+ return nil
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type http2writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func http2encKV(enc *hpack.Encoder, k, v string) {
+ if http2VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *http2writeResHeaders) staysWithinBuffer(max int) bool {
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // upper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
+ return false
+}
+
+func (w *http2writeResHeaders) writeFrame(ctx http2writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ http2encKV(enc, ":status", http2httpCodeString(w.httpResCode))
+ }
+
+ http2encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ http2encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ http2encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ http2encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *http2writeResHeaders) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type http2writePushPromise struct {
+ streamID uint32 // pusher stream
+ method string // for :method
+ url *url.URL // for :scheme, :authority, :path
+ h Header
+
+ // Creates an ID for a pushed stream. This runs on serveG just before
+ // the frame is written. The returned ID is copied to promisedID.
+ allocatePromisedID func() (uint32, error)
+ promisedID uint32
+}
+
+func (w *http2writePushPromise) staysWithinBuffer(max int) bool {
+ // TODO: see writeResHeaders.staysWithinBuffer
+ return false
+}
+
+func (w *http2writePushPromise) writeFrame(ctx http2writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ http2encKV(enc, ":method", w.method)
+ http2encKV(enc, ":scheme", w.url.Scheme)
+ http2encKV(enc, ":authority", w.url.Host)
+ http2encKV(enc, ":path", w.url.RequestURI())
+ http2encodeHeaders(enc, w.h, nil)
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *http2writePushPromise) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WritePushPromise(http2PushPromiseParam{
+ StreamID: w.streamID,
+ PromiseID: w.promisedID,
+ BlockFragment: frag,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+type http2write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w http2write100ContinueHeadersFrame) writeFrame(ctx http2writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ http2encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+func (w http2write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+ // Sloppy but conservative:
+ return 9+2*(len(":status")+len("100")) <= max
+}
+
+type http2writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu http2writeWindowUpdate) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
+
+func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only if k is in keys.
+func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) {
+ if keys == nil {
+ sorter := http2sorterPool.Get().(*http2sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer http2sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k, ascii := http2lowerHeader(k)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ continue
+ }
+ if !http2validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ http2encKV(enc, k, v)
+ }
+ }
+}
+
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type http2WriteScheduler interface {
+ // OpenStream opens a new stream in the write scheduler.
+ // It is illegal to call this with streamID=0 or with a streamID that is
+ // already open -- the call may panic.
+ OpenStream(streamID uint32, options http2OpenStreamOptions)
+
+ // CloseStream closes a stream in the write scheduler. Any frames queued on
+ // this stream should be discarded. It is illegal to call this on a stream
+ // that is not open -- the call may panic.
+ CloseStream(streamID uint32)
+
+ // AdjustStream adjusts the priority of the given stream. This may be called
+ // on a stream that has not yet been opened or has been closed. Note that
+ // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+ // https://tools.ietf.org/html/rfc7540#section-5.1
+ AdjustStream(streamID uint32, priority http2PriorityParam)
+
+ // Push queues a frame in the scheduler. In most cases, this will not be
+ // called with wr.StreamID()!=0 unless that stream is currently open. The one
+ // exception is RST_STREAM frames, which may be sent on idle or closed streams.
+ Push(wr http2FrameWriteRequest)
+
+ // Pop dequeues the next frame to write. Returns false if no frames can
+ // be written. Frames with a given wr.StreamID() are Pop'd in the same
+ // order they are Push'd, except RST_STREAM frames. No frames should be
+ // discarded except by CloseStream.
+ Pop() (wr http2FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type http2OpenStreamOptions struct {
+ // PusherID is zero if the stream was initiated by the client. Otherwise,
+ // PusherID names the stream that pushed the newly opened stream.
+ PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type http2FrameWriteRequest struct {
+ // write is the interface value that does the writing, once the
+ // WriteScheduler has selected this frame to write. The write
+ // functions are all defined in write.go.
+ write http2writeFramer
+
+ // stream is the stream on which this frame will be written.
+ // nil for non-stream frames like PING and SETTINGS.
+ // nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
+ stream *http2stream
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr http2FrameWriteRequest) StreamID() uint32 {
+ if wr.stream == nil {
+ if se, ok := wr.write.(http2StreamError); ok {
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
+ return se.StreamID
+ }
+ return 0
+ }
+ return wr.stream.id
+}
+
+// isControl reports whether wr is a control frame for MaxQueuedControlFrames
+// purposes. That includes non-stream frames and RST_STREAM frames.
+func (wr http2FrameWriteRequest) isControl() bool {
+ return wr.stream == nil
+}
+
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr http2FrameWriteRequest) DataSize() int {
+ if wd, ok := wr.write.(*http2writeData); ok {
+ return len(wd.p)
+ }
+ return 0
+}
+
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2FrameWriteRequest, int) {
+ var empty http2FrameWriteRequest
+
+ // Non-DATA frames are always consumed whole.
+ wd, ok := wr.write.(*http2writeData)
+ if !ok || len(wd.p) == 0 {
+ return wr, empty, 1
+ }
+
+ // Might need to split after applying limits.
+ allowed := wr.stream.flow.available()
+ if n < allowed {
+ allowed = n
+ }
+ if wr.stream.sc.maxFrameSize < allowed {
+ allowed = wr.stream.sc.maxFrameSize
+ }
+ if allowed <= 0 {
+ return empty, empty, 0
+ }
+ if len(wd.p) > int(allowed) {
+ wr.stream.flow.take(allowed)
+ consumed := http2FrameWriteRequest{
+ stream: wr.stream,
+ write: &http2writeData{
+ streamID: wd.streamID,
+ p: wd.p[:allowed],
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
+ endStream: false,
+ },
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
+ done: nil,
+ }
+ rest := http2FrameWriteRequest{
+ stream: wr.stream,
+ write: &http2writeData{
+ streamID: wd.streamID,
+ p: wd.p[allowed:],
+ endStream: wd.endStream,
+ },
+ done: wr.done,
+ }
+ return consumed, rest, 2
+ }
+
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+ wr.stream.flow.take(int32(len(wd.p)))
+ return wr, empty, 1
+}
+
+// String is for debugging only.
+func (wr http2FrameWriteRequest) String() string {
+ var des string
+ if s, ok := wr.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wr.write)
+ }
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
+}
+
+// replyToWriter sends err to wr.done and panics if the send must block
+// This does nothing if wr.done is nil.
+func (wr *http2FrameWriteRequest) replyToWriter(err error) {
+ if wr.done == nil {
+ return
+ }
+ select {
+ case wr.done <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
+ }
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
+}
+
+// writeQueue is used by implementations of WriteScheduler.
+type http2writeQueue struct {
+ s []http2FrameWriteRequest
+}
+
+func (q *http2writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *http2writeQueue) push(wr http2FrameWriteRequest) {
+ q.s = append(q.s, wr)
+}
+
+func (q *http2writeQueue) shift() http2FrameWriteRequest {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wr := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = http2FrameWriteRequest{}
+ q.s = q.s[:len(q.s)-1]
+ return wr
+}
+
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *http2writeQueue) consume(n int32) (http2FrameWriteRequest, bool) {
+ if len(q.s) == 0 {
+ return http2FrameWriteRequest{}, false
+ }
+ consumed, rest, numresult := q.s[0].Consume(n)
+ switch numresult {
+ case 0:
+ return http2FrameWriteRequest{}, false
+ case 1:
+ q.shift()
+ case 2:
+ q.s[0] = rest
+ }
+ return consumed, true
+}
+
+type http2writeQueuePool []*http2writeQueue
+
+// put inserts an unused writeQueue into the pool.
+
+// put inserts an unused writeQueue into the pool.
+func (p *http2writeQueuePool) put(q *http2writeQueue) {
+ for i := range q.s {
+ q.s[i] = http2FrameWriteRequest{}
+ }
+ q.s = q.s[:0]
+ *p = append(*p, q)
+}
+
+// get returns an empty writeQueue.
+func (p *http2writeQueuePool) get() *http2writeQueue {
+ ln := len(*p)
+ if ln == 0 {
+ return new(http2writeQueue)
+ }
+ x := ln - 1
+ q := (*p)[x]
+ (*p)[x] = nil
+ *p = (*p)[:x]
+ return q
+}
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const http2priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type http2PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http2WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &http2PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &http2priorityWriteScheduler{
+ nodes: make(map[uint32]*http2priorityNode),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type http2priorityNodeState int
+
+const (
+ http2priorityNodeOpen http2priorityNodeState = iota
+ http2priorityNodeClosed
+ http2priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type http2priorityNode struct {
+ q http2writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state http2priorityNodeState // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *http2priorityNode
+ kids *http2priorityNode // start of the kids list
+ prev, next *http2priorityNode // doubly-linked list of siblings
+}
+
+func (n *http2priorityNode) setParent(parent *http2priorityNode) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *http2priorityNode) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this function returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNode, f func(*http2priorityNode, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == http2priorityNodeOpen)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(http2sortPriorityNodeSiblings(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type http2sortPriorityNodeSiblings []*http2priorityNode
+
+func (z http2sortPriorityNodeSiblings) Len() int { return len(z) }
+
+func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+
+func (z http2sortPriorityNodeSiblings) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type http2priorityWriteScheduler struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root http2priorityNode
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*http2priorityNode
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*http2priorityNode
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*http2priorityNode
+
+ // pool of empty queues for reuse.
+ queuePool http2writeQueuePool
+}
+
+func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != http2priorityNodeIdle {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = http2priorityNodeOpen
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &http2priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: http2priorityDefaultWeight,
+ state: http2priorityNodeOpen,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != http2priorityNodeOpen {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = http2priorityNodeClosed
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &http2priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: http2priorityDefaultWeight,
+ state: http2priorityNodeIdle,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = http2priorityDefaultWeight
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) {
+ var n *http2priorityNode
+ if id := wr.StreamID(); id == 0 {
+ n = &ws.root
+ } else {
+ n = ws.nodes[id]
+ if n == nil {
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. However, wr can be a RST_STREAM. In this case, we
+ // push wr onto the root, rather than creating a new priorityNode,
+ // since RST_STREAM is tiny and the stream's priority is unknown
+ // anyway. See issue #17919.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNode, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorityNode, maxSize int, n *http2priorityNode) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) {
+ for k := n.kids; k != nil; k = k.next {
+ k.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func http2NewRandomWriteScheduler() http2WriteScheduler {
+ return &http2randomWriteScheduler{sq: make(map[uint32]*http2writeQueue)}
+}
+
+type http2randomWriteScheduler struct {
+ // zero are frames not associated with a specific stream.
+ zero http2writeQueue
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // When a stream is idle, closed, or emptied, it's deleted
+ // from the map.
+ sq map[uint32]*http2writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool http2writeQueuePool
+}
+
+func (ws *http2randomWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
+ // no-op: idle streams are not tracked
+}
+
+func (ws *http2randomWriteScheduler) CloseStream(streamID uint32) {
+ q, ok := ws.sq[streamID]
+ if !ok {
+ return
+ }
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *http2randomWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
+ // no-op: priorities are ignored
+}
+
+func (ws *http2randomWriteScheduler) Push(wr http2FrameWriteRequest) {
+ if wr.isControl() {
+ ws.zero.push(wr)
+ return
+ }
+ id := wr.StreamID()
+ q, ok := ws.sq[id]
+ if !ok {
+ q = ws.queuePool.get()
+ ws.sq[id] = q
+ }
+ q.push(wr)
+}
+
+func (ws *http2randomWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ // Iterate over all non-idle streams until finding one that can be consumed.
+ for streamID, q := range ws.sq {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if q.empty() {
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+ }
+ return wr, true
+ }
+ }
+ return http2FrameWriteRequest{}, false
+}
diff --git a/contrib/go/_std_1.19/src/net/http/header.go b/contrib/go/_std_1.19/src/net/http/header.go
new file mode 100644
index 0000000000..e0b342c63c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/header.go
@@ -0,0 +1,280 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "io"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// A Header represents the key-value pairs in an HTTP header.
+//
+// The keys should be in canonical form, as returned by
+// CanonicalHeaderKey.
+type Header map[string][]string
+
+// Add adds the key, value pair to the header.
+// It appends to any existing values associated with key.
+// The key is case insensitive; it is canonicalized by
+// CanonicalHeaderKey.
+func (h Header) Add(key, value string) {
+ textproto.MIMEHeader(h).Add(key, value)
+}
+
+// Set sets the header entries associated with key to the
+// single element value. It replaces any existing values
+// associated with key. The key is case insensitive; it is
+// canonicalized by textproto.CanonicalMIMEHeaderKey.
+// To use non-canonical keys, assign to the map directly.
+func (h Header) Set(key, value string) {
+ textproto.MIMEHeader(h).Set(key, value)
+}
+
+// Get gets the first value associated with the given key. If
+// there are no values associated with the key, Get returns "".
+// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
+// used to canonicalize the provided key. Get assumes that all
+// keys are stored in canonical form. To use non-canonical keys,
+// access the map directly.
+func (h Header) Get(key string) string {
+ return textproto.MIMEHeader(h).Get(key)
+}
+
+// Values returns all values associated with the given key.
+// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
+// used to canonicalize the provided key. To use non-canonical
+// keys, access the map directly.
+// The returned slice is not a copy.
+func (h Header) Values(key string) []string {
+ return textproto.MIMEHeader(h).Values(key)
+}
+
+// get is like Get, but key must already be in CanonicalHeaderKey form.
+func (h Header) get(key string) string {
+ if v := h[key]; len(v) > 0 {
+ return v[0]
+ }
+ return ""
+}
+
+// has reports whether h has the provided key defined, even if it's
+// set to 0-length slice.
+func (h Header) has(key string) bool {
+ _, ok := h[key]
+ return ok
+}
+
+// Del deletes the values associated with key.
+// The key is case insensitive; it is canonicalized by
+// CanonicalHeaderKey.
+func (h Header) Del(key string) {
+ textproto.MIMEHeader(h).Del(key)
+}
+
+// Write writes a header in wire format.
+func (h Header) Write(w io.Writer) error {
+ return h.write(w, nil)
+}
+
+func (h Header) write(w io.Writer, trace *httptrace.ClientTrace) error {
+ return h.writeSubset(w, nil, trace)
+}
+
+// Clone returns a copy of h or nil if h is nil.
+func (h Header) Clone() Header {
+ if h == nil {
+ return nil
+ }
+
+ // Find total number of values.
+ nv := 0
+ for _, vv := range h {
+ nv += len(vv)
+ }
+ sv := make([]string, nv) // shared backing array for headers' values
+ h2 := make(Header, len(h))
+ for k, vv := range h {
+ if vv == nil {
+ // Preserve nil values. ReverseProxy distinguishes
+ // between nil and zero-length header values.
+ h2[k] = nil
+ continue
+ }
+ n := copy(sv, vv)
+ h2[k] = sv[:n:n]
+ sv = sv[n:]
+ }
+ return h2
+}
+
+var timeFormats = []string{
+ TimeFormat,
+ time.RFC850,
+ time.ANSIC,
+}
+
+// ParseTime parses a time header (such as the Date: header),
+// trying each of the three formats allowed by HTTP/1.1:
+// TimeFormat, time.RFC850, and time.ANSIC.
+func ParseTime(text string) (t time.Time, err error) {
+ for _, layout := range timeFormats {
+ t, err = time.Parse(layout, text)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
+
+// stringWriter implements WriteString on a Writer.
+type stringWriter struct {
+ w io.Writer
+}
+
+func (w stringWriter) WriteString(s string) (n int, err error) {
+ return w.w.Write([]byte(s))
+}
+
+type keyValues struct {
+ key string
+ values []string
+}
+
+// A headerSorter implements sort.Interface by sorting a []keyValues
+// by key. It's used as a pointer, so it can fit in a sort.Interface
+// interface value without allocation.
+type headerSorter struct {
+ kvs []keyValues
+}
+
+func (s *headerSorter) Len() int { return len(s.kvs) }
+func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] }
+func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key }
+
+var headerSorterPool = sync.Pool{
+ New: func() any { return new(headerSorter) },
+}
+
+// sortedKeyValues returns h's keys sorted in the returned kvs
+// slice. The headerSorter used to sort is also returned, for possible
+// return to headerSorterCache.
+func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) {
+ hs = headerSorterPool.Get().(*headerSorter)
+ if cap(hs.kvs) < len(h) {
+ hs.kvs = make([]keyValues, 0, len(h))
+ }
+ kvs = hs.kvs[:0]
+ for k, vv := range h {
+ if !exclude[k] {
+ kvs = append(kvs, keyValues{k, vv})
+ }
+ }
+ hs.kvs = kvs
+ sort.Sort(hs)
+ return kvs, hs
+}
+
+// WriteSubset writes a header in wire format.
+// If exclude is not nil, keys where exclude[key] == true are not written.
+// Keys are not canonicalized before checking the exclude map.
+func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
+ return h.writeSubset(w, exclude, nil)
+}
+
+func (h Header) writeSubset(w io.Writer, exclude map[string]bool, trace *httptrace.ClientTrace) error {
+ ws, ok := w.(io.StringWriter)
+ if !ok {
+ ws = stringWriter{w}
+ }
+ kvs, sorter := h.sortedKeyValues(exclude)
+ var formattedVals []string
+ for _, kv := range kvs {
+ if !httpguts.ValidHeaderFieldName(kv.key) {
+ // This could be an error. In the common case of
+ // writing response headers, however, we have no good
+ // way to provide the error back to the server
+ // handler, so just drop invalid headers instead.
+ continue
+ }
+ for _, v := range kv.values {
+ v = headerNewlineToSpace.Replace(v)
+ v = textproto.TrimString(v)
+ for _, s := range []string{kv.key, ": ", v, "\r\n"} {
+ if _, err := ws.WriteString(s); err != nil {
+ headerSorterPool.Put(sorter)
+ return err
+ }
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ formattedVals = append(formattedVals, v)
+ }
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(kv.key, formattedVals)
+ formattedVals = nil
+ }
+ }
+ headerSorterPool.Put(sorter)
+ return nil
+}
+
+// CanonicalHeaderKey returns the canonical format of the
+// header key s. The canonicalization converts the first
+// letter and any letter following a hyphen to upper case;
+// the rest are converted to lowercase. For example, the
+// canonical key for "accept-encoding" is "Accept-Encoding".
+// If s contains a space or invalid header field bytes, it is
+// returned without modifications.
+func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
+
+// hasToken reports whether token appears with v, ASCII
+// case-insensitive, with space or comma boundaries.
+// token must be all lowercase.
+// v may contain mixed cased.
+func hasToken(v, token string) bool {
+ if len(token) > len(v) || token == "" {
+ return false
+ }
+ if v == token {
+ return true
+ }
+ for sp := 0; sp <= len(v)-len(token); sp++ {
+ // Check that first character is good.
+ // The token is ASCII, so checking only a single byte
+ // is sufficient. We skip this potential starting
+ // position if both the first byte and its potential
+ // ASCII uppercase equivalent (b|0x20) don't match.
+ // False positives ('^' => '~') are caught by EqualFold.
+ if b := v[sp]; b != token[0] && b|0x20 != token[0] {
+ continue
+ }
+ // Check that start pos is on a valid token boundary.
+ if sp > 0 && !isTokenBoundary(v[sp-1]) {
+ continue
+ }
+ // Check that end pos is on a valid token boundary.
+ if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) {
+ continue
+ }
+ if ascii.EqualFold(v[sp:sp+len(token)], token) {
+ return true
+ }
+ }
+ return false
+}
+
+func isTokenBoundary(b byte) bool {
+ return b == ' ' || b == ',' || b == '\t'
+}
diff --git a/contrib/go/_std_1.18/src/net/http/http.go b/contrib/go/_std_1.19/src/net/http/http.go
index 101799f574..101799f574 100644
--- a/contrib/go/_std_1.18/src/net/http/http.go
+++ b/contrib/go/_std_1.19/src/net/http/http.go
diff --git a/contrib/go/_std_1.18/src/net/http/httptrace/trace.go b/contrib/go/_std_1.19/src/net/http/httptrace/trace.go
index 6af30f78d1..6af30f78d1 100644
--- a/contrib/go/_std_1.18/src/net/http/httptrace/trace.go
+++ b/contrib/go/_std_1.19/src/net/http/httptrace/trace.go
diff --git a/contrib/go/_std_1.18/src/net/http/internal/ascii/print.go b/contrib/go/_std_1.19/src/net/http/internal/ascii/print.go
index 585e5baba4..585e5baba4 100644
--- a/contrib/go/_std_1.18/src/net/http/internal/ascii/print.go
+++ b/contrib/go/_std_1.19/src/net/http/internal/ascii/print.go
diff --git a/contrib/go/_std_1.19/src/net/http/internal/chunked.go b/contrib/go/_std_1.19/src/net/http/internal/chunked.go
new file mode 100644
index 0000000000..5a174415dc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/internal/chunked.go
@@ -0,0 +1,262 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The wire protocol for HTTP's "chunked" Transfer-Encoding.
+
+// Package internal contains HTTP internals shared by net/http and
+// net/http/httputil.
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
+
+var ErrLineTooLong = errors.New("header line too long")
+
+// NewChunkedReader returns a new chunkedReader that translates the data read from r
+// out of HTTP "chunked" format before returning it.
+// The chunkedReader returns io.EOF when the final 0-length chunk is read.
+//
+// NewChunkedReader is not needed by normal applications. The http package
+// automatically decodes chunking when reading response bodies.
+func NewChunkedReader(r io.Reader) io.Reader {
+ br, ok := r.(*bufio.Reader)
+ if !ok {
+ br = bufio.NewReader(r)
+ }
+ return &chunkedReader{r: br}
+}
+
+type chunkedReader struct {
+ r *bufio.Reader
+ n uint64 // unread bytes in chunk
+ err error
+ buf [2]byte
+ checkEnd bool // whether need to check for \r\n chunk footer
+}
+
+func (cr *chunkedReader) beginChunk() {
+ // chunk-size CRLF
+ var line []byte
+ line, cr.err = readChunkLine(cr.r)
+ if cr.err != nil {
+ return
+ }
+ cr.n, cr.err = parseHexUint(line)
+ if cr.err != nil {
+ return
+ }
+ if cr.n == 0 {
+ cr.err = io.EOF
+ }
+}
+
+func (cr *chunkedReader) chunkHeaderAvailable() bool {
+ n := cr.r.Buffered()
+ if n > 0 {
+ peek, _ := cr.r.Peek(n)
+ return bytes.IndexByte(peek, '\n') >= 0
+ }
+ return false
+}
+
+func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
+ for cr.err == nil {
+ if cr.checkEnd {
+ if n > 0 && cr.r.Buffered() < 2 {
+ // We have some data. Return early (per the io.Reader
+ // contract) instead of potentially blocking while
+ // reading more.
+ break
+ }
+ if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
+ if string(cr.buf[:]) != "\r\n" {
+ cr.err = errors.New("malformed chunked encoding")
+ break
+ }
+ } else {
+ if cr.err == io.EOF {
+ cr.err = io.ErrUnexpectedEOF
+ }
+ break
+ }
+ cr.checkEnd = false
+ }
+ if cr.n == 0 {
+ if n > 0 && !cr.chunkHeaderAvailable() {
+ // We've read enough. Don't potentially block
+ // reading a new chunk header.
+ break
+ }
+ cr.beginChunk()
+ continue
+ }
+ if len(b) == 0 {
+ break
+ }
+ rbuf := b
+ if uint64(len(rbuf)) > cr.n {
+ rbuf = rbuf[:cr.n]
+ }
+ var n0 int
+ n0, cr.err = cr.r.Read(rbuf)
+ n += n0
+ b = b[n0:]
+ cr.n -= uint64(n0)
+ // If we're at the end of a chunk, read the next two
+ // bytes to verify they are "\r\n".
+ if cr.n == 0 && cr.err == nil {
+ cr.checkEnd = true
+ } else if cr.err == io.EOF {
+ cr.err = io.ErrUnexpectedEOF
+ }
+ }
+ return n, cr.err
+}
+
+// Read a line of bytes (up to \n) from b.
+// Give up if the line exceeds maxLineLength.
+// The returned bytes are owned by the bufio.Reader
+// so they are only valid until the next bufio read.
+func readChunkLine(b *bufio.Reader) ([]byte, error) {
+ p, err := b.ReadSlice('\n')
+ if err != nil {
+ // We always know when EOF is coming.
+ // If the caller asked for a line, there should be a line.
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else if err == bufio.ErrBufferFull {
+ err = ErrLineTooLong
+ }
+ return nil, err
+ }
+ if len(p) >= maxLineLength {
+ return nil, ErrLineTooLong
+ }
+ p = trimTrailingWhitespace(p)
+ p, err = removeChunkExtension(p)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func trimTrailingWhitespace(b []byte) []byte {
+ for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
+ b = b[:len(b)-1]
+ }
+ return b
+}
+
+func isASCIISpace(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\n' || b == '\r'
+}
+
+var semi = []byte(";")
+
+// removeChunkExtension removes any chunk-extension from p.
+// For example,
+//
+// "0" => "0"
+// "0;token" => "0"
+// "0;token=val" => "0"
+// `0;token="quoted string"` => "0"
+func removeChunkExtension(p []byte) ([]byte, error) {
+ p, _, _ = bytes.Cut(p, semi)
+ // TODO: care about exact syntax of chunk extensions? We're
+ // ignoring and stripping them anyway. For now just never
+ // return an error.
+ return p, nil
+}
+
+// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
+// "chunked" format before writing them to w. Closing the returned chunkedWriter
+// sends the final 0-length chunk that marks the end of the stream but does
+// not send the final CRLF that appears after trailers; trailers and the last
+// CRLF must be written separately.
+//
+// NewChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using newChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
+func NewChunkedWriter(w io.Writer) io.WriteCloser {
+ return &chunkedWriter{w}
+}
+
+// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
+// Encoding wire format to the underlying Wire chunkedWriter.
+type chunkedWriter struct {
+ Wire io.Writer
+}
+
+// Write the contents of data as one chunk to Wire.
+// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
+// a bug since it does not check for success of io.WriteString
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
+
+ // Don't send 0-length data. It looks like EOF for chunked encoding.
+ if len(data) == 0 {
+ return 0, nil
+ }
+
+ if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
+ return 0, err
+ }
+ if n, err = cw.Wire.Write(data); err != nil {
+ return
+ }
+ if n != len(data) {
+ err = io.ErrShortWrite
+ return
+ }
+ if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil {
+ return
+ }
+ if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok {
+ err = bw.Flush()
+ }
+ return
+}
+
+func (cw *chunkedWriter) Close() error {
+ _, err := io.WriteString(cw.Wire, "0\r\n")
+ return err
+}
+
+// FlushAfterChunkWriter signals from the caller of NewChunkedWriter
+// that each chunk should be followed by a flush. It is used by the
+// http.Transport code to keep the buffering behavior for headers and
+// trailers, but flush out chunks aggressively in the middle for
+// request bodies which may be generated slowly. See Issue 6574.
+type FlushAfterChunkWriter struct {
+ *bufio.Writer
+}
+
+func parseHexUint(v []byte) (n uint64, err error) {
+ for i, b := range v {
+ switch {
+ case '0' <= b && b <= '9':
+ b = b - '0'
+ case 'a' <= b && b <= 'f':
+ b = b - 'a' + 10
+ case 'A' <= b && b <= 'F':
+ b = b - 'A' + 10
+ default:
+ return 0, errors.New("invalid byte in chunk length")
+ }
+ if i == 16 {
+ return 0, errors.New("http chunk length too large")
+ }
+ n <<= 4
+ n |= uint64(b)
+ }
+ return
+}
diff --git a/contrib/go/_std_1.18/src/net/http/jar.go b/contrib/go/_std_1.19/src/net/http/jar.go
index 5c3de0dad2..5c3de0dad2 100644
--- a/contrib/go/_std_1.18/src/net/http/jar.go
+++ b/contrib/go/_std_1.19/src/net/http/jar.go
diff --git a/contrib/go/_std_1.18/src/net/http/method.go b/contrib/go/_std_1.19/src/net/http/method.go
index 6f46155069..6f46155069 100644
--- a/contrib/go/_std_1.18/src/net/http/method.go
+++ b/contrib/go/_std_1.19/src/net/http/method.go
diff --git a/contrib/go/_std_1.19/src/net/http/request.go b/contrib/go/_std_1.19/src/net/http/request.go
new file mode 100644
index 0000000000..cead91d3d4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/request.go
@@ -0,0 +1,1483 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP Request reading and parsing.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "mime/multipart"
+ "net"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "net/url"
+ urlpkg "net/url"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/idna"
+)
+
+const (
+ defaultMaxMemory = 32 << 20 // 32 MB
+)
+
+// ErrMissingFile is returned by FormFile when the provided file field name
+// is either not present in the request or not a file field.
+var ErrMissingFile = errors.New("http: no such file")
+
+// ProtocolError represents an HTTP protocol error.
+//
+// Deprecated: Not all errors in the http package related to protocol errors
+// are of type ProtocolError.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (pe *ProtocolError) Error() string { return pe.ErrorString }
+
+var (
+ // ErrNotSupported is returned by the Push method of Pusher
+ // implementations to indicate that HTTP/2 Push support is not
+ // available.
+ ErrNotSupported = &ProtocolError{"feature not supported"}
+
+ // Deprecated: ErrUnexpectedTrailer is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"}
+
+ // ErrMissingBoundary is returned by Request.MultipartReader when the
+ // request's Content-Type does not include a "boundary" parameter.
+ ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"}
+
+ // ErrNotMultipart is returned by Request.MultipartReader when the
+ // request's Content-Type is not multipart/form-data.
+ ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"}
+
+ // Deprecated: ErrHeaderTooLong is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrHeaderTooLong = &ProtocolError{"header too long"}
+
+ // Deprecated: ErrShortBody is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrShortBody = &ProtocolError{"entity body too short"}
+
+ // Deprecated: ErrMissingContentLength is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"}
+)
+
+func badStringError(what, val string) error { return fmt.Errorf("%s %q", what, val) }
+
+// Headers that Request.Write handles itself and should be skipped.
+var reqWriteExcludeHeader = map[string]bool{
+ "Host": true, // not in Header map anyway
+ "User-Agent": true,
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// A Request represents an HTTP request received by a server
+// or to be sent by a client.
+//
+// The field semantics differ slightly between client and server
+// usage. In addition to the notes on the fields below, see the
+// documentation for Request.Write and RoundTripper.
+type Request struct {
+ // Method specifies the HTTP method (GET, POST, PUT, etc.).
+ // For client requests, an empty string means GET.
+ //
+ // Go's HTTP client does not support sending a request with
+ // the CONNECT method. See the documentation on Transport for
+ // details.
+ Method string
+
+ // URL specifies either the URI being requested (for server
+ // requests) or the URL to access (for client requests).
+ //
+ // For server requests, the URL is parsed from the URI
+ // supplied on the Request-Line as stored in RequestURI. For
+ // most requests, fields other than Path and RawQuery will be
+ // empty. (See RFC 7230, Section 5.3)
+ //
+ // For client requests, the URL's Host specifies the server to
+ // connect to, while the Request's Host field optionally
+ // specifies the Host header value to send in the HTTP
+ // request.
+ URL *url.URL
+
+ // The protocol version for incoming server requests.
+ //
+ // For client requests, these fields are ignored. The HTTP
+ // client code always uses either HTTP/1.1 or HTTP/2.
+ // See the docs on Transport for details.
+ Proto string // "HTTP/1.0"
+ ProtoMajor int // 1
+ ProtoMinor int // 0
+
+ // Header contains the request header fields either received
+ // by the server or to be sent by the client.
+ //
+ // If a server received a request with header lines,
+ //
+ // Host: example.com
+ // accept-encoding: gzip, deflate
+ // Accept-Language: en-us
+ // fOO: Bar
+ // foo: two
+ //
+ // then
+ //
+ // Header = map[string][]string{
+ // "Accept-Encoding": {"gzip, deflate"},
+ // "Accept-Language": {"en-us"},
+ // "Foo": {"Bar", "two"},
+ // }
+ //
+ // For incoming requests, the Host header is promoted to the
+ // Request.Host field and removed from the Header map.
+ //
+ // HTTP defines that header names are case-insensitive. The
+ // request parser implements this by using CanonicalHeaderKey,
+ // making the first character and any characters following a
+ // hyphen uppercase and the rest lowercase.
+ //
+ // For client requests, certain headers such as Content-Length
+ // and Connection are automatically written when needed and
+ // values in Header may be ignored. See the documentation
+ // for the Request.Write method.
+ Header Header
+
+ // Body is the request's body.
+ //
+ // For client requests, a nil body means the request has no
+ // body, such as a GET request. The HTTP Client's Transport
+ // is responsible for calling the Close method.
+ //
+ // For server requests, the Request Body is always non-nil
+ // but will return EOF immediately when no body is present.
+ // The Server will close the request body. The ServeHTTP
+ // Handler does not need to.
+ //
+ // Body must allow Read to be called concurrently with Close.
+ // In particular, calling Close should unblock a Read waiting
+ // for input.
+ Body io.ReadCloser
+
+ // GetBody defines an optional func to return a new copy of
+ // Body. It is used for client requests when a redirect requires
+ // reading the body more than once. Use of GetBody still
+ // requires setting Body.
+ //
+ // For server requests, it is unused.
+ GetBody func() (io.ReadCloser, error)
+
+ // ContentLength records the length of the associated content.
+ // The value -1 indicates that the length is unknown.
+ // Values >= 0 indicate that the given number of bytes may
+ // be read from Body.
+ //
+ // For client requests, a value of 0 with a non-nil Body is
+ // also treated as unknown.
+ ContentLength int64
+
+ // TransferEncoding lists the transfer encodings from outermost to
+ // innermost. An empty list denotes the "identity" encoding.
+ // TransferEncoding can usually be ignored; chunked encoding is
+ // automatically added and removed as necessary when sending and
+ // receiving requests.
+ TransferEncoding []string
+
+ // Close indicates whether to close the connection after
+ // replying to this request (for servers) or after sending this
+ // request and reading its response (for clients).
+ //
+ // For server requests, the HTTP server handles this automatically
+ // and this field is not needed by Handlers.
+ //
+ // For client requests, setting this field prevents re-use of
+ // TCP connections between requests to the same hosts, as if
+ // Transport.DisableKeepAlives were set.
+ Close bool
+
+ // For server requests, Host specifies the host on which the
+ // URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
+ // is either the value of the "Host" header or the host name
+ // given in the URL itself. For HTTP/2, it is the value of the
+ // ":authority" pseudo-header field.
+ // It may be of the form "host:port". For international domain
+ // names, Host may be in Punycode or Unicode form. Use
+ // golang.org/x/net/idna to convert it to either format if
+ // needed.
+ // To prevent DNS rebinding attacks, server Handlers should
+ // validate that the Host header has a value for which the
+ // Handler considers itself authoritative. The included
+ // ServeMux supports patterns registered to particular host
+ // names and thus protects its registered Handlers.
+ //
+ // For client requests, Host optionally overrides the Host
+ // header to send. If empty, the Request.Write method uses
+ // the value of URL.Host. Host may contain an international
+ // domain name.
+ Host string
+
+ // Form contains the parsed form data, including both the URL
+ // field's query parameters and the PATCH, POST, or PUT form data.
+ // This field is only available after ParseForm is called.
+ // The HTTP client ignores Form and uses Body instead.
+ Form url.Values
+
+ // PostForm contains the parsed form data from PATCH, POST
+ // or PUT body parameters.
+ //
+ // This field is only available after ParseForm is called.
+ // The HTTP client ignores PostForm and uses Body instead.
+ PostForm url.Values
+
+ // MultipartForm is the parsed multipart form, including file uploads.
+ // This field is only available after ParseMultipartForm is called.
+ // The HTTP client ignores MultipartForm and uses Body instead.
+ MultipartForm *multipart.Form
+
+ // Trailer specifies additional headers that are sent after the request
+ // body.
+ //
+ // For server requests, the Trailer map initially contains only the
+ // trailer keys, with nil values. (The client declares which trailers it
+ // will later send.) While the handler is reading from Body, it must
+ // not reference Trailer. After reading from Body returns EOF, Trailer
+ // can be read again and will contain non-nil values, if they were sent
+ // by the client.
+ //
+ // For client requests, Trailer must be initialized to a map containing
+ // the trailer keys to later send. The values may be nil or their final
+ // values. The ContentLength must be 0 or -1, to send a chunked request.
+ // After the HTTP request is sent the map values can be updated while
+ // the request body is read. Once the body returns EOF, the caller must
+ // not mutate Trailer.
+ //
+ // Few HTTP clients, servers, or proxies support HTTP trailers.
+ Trailer Header
+
+ // RemoteAddr allows HTTP servers and other software to record
+ // the network address that sent the request, usually for
+ // logging. This field is not filled in by ReadRequest and
+ // has no defined format. The HTTP server in this package
+ // sets RemoteAddr to an "IP:port" address before invoking a
+ // handler.
+ // This field is ignored by the HTTP client.
+ RemoteAddr string
+
+ // RequestURI is the unmodified request-target of the
+ // Request-Line (RFC 7230, Section 3.1.1) as sent by the client
+ // to a server. Usually the URL field should be used instead.
+ // It is an error to set this field in an HTTP client request.
+ RequestURI string
+
+ // TLS allows HTTP servers and other software to record
+ // information about the TLS connection on which the request
+ // was received. This field is not filled in by ReadRequest.
+ // The HTTP server in this package sets the field for
+ // TLS-enabled connections before invoking a handler;
+ // otherwise it leaves the field nil.
+ // This field is ignored by the HTTP client.
+ TLS *tls.ConnectionState
+
+ // Cancel is an optional channel whose closure indicates that the client
+ // request should be regarded as canceled. Not all implementations of
+ // RoundTripper may support Cancel.
+ //
+ // For server requests, this field is not applicable.
+ //
+ // Deprecated: Set the Request's context with NewRequestWithContext
+ // instead. If a Request's Cancel field and context are both
+ // set, it is undefined whether Cancel is respected.
+ Cancel <-chan struct{}
+
+ // Response is the redirect response which caused this request
+ // to be created. This field is only populated during client
+ // redirects.
+ Response *Response
+
+ // ctx is either the client or server context. It should only
+ // be modified via copying the whole Request using WithContext.
+ // It is unexported to prevent people from using Context wrong
+ // and mutating the contexts held by callers of the same request.
+ ctx context.Context
+}
+
+// Context returns the request's context. To change the context, use
+// WithContext.
+//
+// The returned context is always non-nil; it defaults to the
+// background context.
+//
+// For outgoing client requests, the context controls cancellation.
+//
+// For incoming server requests, the context is canceled when the
+// client's connection closes, the request is canceled (with HTTP/2),
+// or when the ServeHTTP method returns.
+func (r *Request) Context() context.Context {
+ if r.ctx != nil {
+ return r.ctx
+ }
+ return context.Background()
+}
+
+// WithContext returns a shallow copy of r with its context changed
+// to ctx. The provided ctx must be non-nil.
+//
+// For outgoing client request, the context controls the entire
+// lifetime of a request and its response: obtaining a connection,
+// sending the request, and reading the response headers and body.
+//
+// To create a new request with a context, use NewRequestWithContext.
+// To change the context of a request, such as an incoming request you
+// want to modify before sending back out, use Request.Clone. Between
+// those two uses, it's rare to need WithContext.
+func (r *Request) WithContext(ctx context.Context) *Request {
+ if ctx == nil {
+ panic("nil context")
+ }
+ r2 := new(Request)
+ *r2 = *r
+ r2.ctx = ctx
+ return r2
+}
+
+// Clone returns a deep copy of r with its context changed to ctx.
+// The provided ctx must be non-nil.
+//
+// For an outgoing client request, the context controls the entire
+// lifetime of a request and its response: obtaining a connection,
+// sending the request, and reading the response headers and body.
+func (r *Request) Clone(ctx context.Context) *Request {
+ if ctx == nil {
+ panic("nil context")
+ }
+ r2 := new(Request)
+ *r2 = *r
+ r2.ctx = ctx
+ r2.URL = cloneURL(r.URL)
+ if r.Header != nil {
+ r2.Header = r.Header.Clone()
+ }
+ if r.Trailer != nil {
+ r2.Trailer = r.Trailer.Clone()
+ }
+ if s := r.TransferEncoding; s != nil {
+ s2 := make([]string, len(s))
+ copy(s2, s)
+ r2.TransferEncoding = s2
+ }
+ r2.Form = cloneURLValues(r.Form)
+ r2.PostForm = cloneURLValues(r.PostForm)
+ r2.MultipartForm = cloneMultipartForm(r.MultipartForm)
+ return r2
+}
+
+// ProtoAtLeast reports whether the HTTP protocol used
+// in the request is at least major.minor.
+func (r *Request) ProtoAtLeast(major, minor int) bool {
+ return r.ProtoMajor > major ||
+ r.ProtoMajor == major && r.ProtoMinor >= minor
+}
+
+// UserAgent returns the client's User-Agent, if sent in the request.
+func (r *Request) UserAgent() string {
+ return r.Header.Get("User-Agent")
+}
+
+// Cookies parses and returns the HTTP cookies sent with the request.
+func (r *Request) Cookies() []*Cookie {
+ return readCookies(r.Header, "")
+}
+
+// ErrNoCookie is returned by Request's Cookie method when a cookie is not found.
+var ErrNoCookie = errors.New("http: named cookie not present")
+
+// Cookie returns the named cookie provided in the request or
+// ErrNoCookie if not found.
+// If multiple cookies match the given name, only one cookie will
+// be returned.
+func (r *Request) Cookie(name string) (*Cookie, error) {
+ for _, c := range readCookies(r.Header, name) {
+ return c, nil
+ }
+ return nil, ErrNoCookie
+}
+
+// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
+// AddCookie does not attach more than one Cookie header field. That
+// means all cookies, if any, are written into the same line,
+// separated by semicolon.
+// AddCookie only sanitizes c's name and value, and does not sanitize
+// a Cookie header already present in the request.
+func (r *Request) AddCookie(c *Cookie) {
+ s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value))
+ if c := r.Header.Get("Cookie"); c != "" {
+ r.Header.Set("Cookie", c+"; "+s)
+ } else {
+ r.Header.Set("Cookie", s)
+ }
+}
+
+// Referer returns the referring URL, if sent in the request.
+//
+// Referer is misspelled as in the request itself, a mistake from the
+// earliest days of HTTP. This value can also be fetched from the
+// Header map as Header["Referer"]; the benefit of making it available
+// as a method is that the compiler can diagnose programs that use the
+// alternate (correct English) spelling req.Referrer() but cannot
+// diagnose programs that use Header["Referrer"].
+func (r *Request) Referer() string {
+ return r.Header.Get("Referer")
+}
+
+// multipartByReader is a sentinel value.
+// Its presence in Request.MultipartForm indicates that parsing of the request
+// body has been handed off to a MultipartReader instead of ParseMultipartForm.
+var multipartByReader = &multipart.Form{
+ Value: make(map[string][]string),
+ File: make(map[string][]*multipart.FileHeader),
+}
+
+// MultipartReader returns a MIME multipart reader if this is a
+// multipart/form-data or a multipart/mixed POST request, else returns nil and an error.
+// Use this function instead of ParseMultipartForm to
+// process the request body as a stream.
+func (r *Request) MultipartReader() (*multipart.Reader, error) {
+ if r.MultipartForm == multipartByReader {
+ return nil, errors.New("http: MultipartReader called twice")
+ }
+ if r.MultipartForm != nil {
+ return nil, errors.New("http: multipart handled by ParseMultipartForm")
+ }
+ r.MultipartForm = multipartByReader
+ return r.multipartReader(true)
+}
+
+func (r *Request) multipartReader(allowMixed bool) (*multipart.Reader, error) {
+ v := r.Header.Get("Content-Type")
+ if v == "" {
+ return nil, ErrNotMultipart
+ }
+ if r.Body == nil {
+ return nil, errors.New("missing form body")
+ }
+ d, params, err := mime.ParseMediaType(v)
+ if err != nil || !(d == "multipart/form-data" || allowMixed && d == "multipart/mixed") {
+ return nil, ErrNotMultipart
+ }
+ boundary, ok := params["boundary"]
+ if !ok {
+ return nil, ErrMissingBoundary
+ }
+ return multipart.NewReader(r.Body, boundary), nil
+}
+
+// isH2Upgrade reports whether r represents the http2 "client preface"
+// magic string.
+func (r *Request) isH2Upgrade() bool {
+ return r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0"
+}
+
+// Return value if nonempty, def otherwise.
+func valueOrDefault(value, def string) string {
+ if value != "" {
+ return value
+ }
+ return def
+}
+
+// NOTE: This is not intended to reflect the actual Go version being used.
+// It was changed at the time of Go 1.1 release because the former User-Agent
+// had ended up blocked by some intrusion detection systems.
+// See https://codereview.appspot.com/7532043.
+const defaultUserAgent = "Go-http-client/1.1"
+
+// Write writes an HTTP/1.1 request, which is the header and body, in wire format.
+// This method consults the following fields of the request:
+//
+// Host
+// URL
+// Method (defaults to "GET")
+// Header
+// ContentLength
+// TransferEncoding
+// Body
+//
+// If Body is present, Content-Length is <= 0 and TransferEncoding
+// hasn't been set to "identity", Write adds "Transfer-Encoding:
+// chunked" to the header. Body is closed after it is sent.
+func (r *Request) Write(w io.Writer) error {
+ return r.write(w, false, nil, nil)
+}
+
+// WriteProxy is like Write but writes the request in the form
+// expected by an HTTP proxy. In particular, WriteProxy writes the
+// initial Request-URI line of the request with an absolute URI, per
+// section 5.3 of RFC 7230, including the scheme and host.
+// In either case, WriteProxy also writes a Host header, using
+// either r.Host or r.URL.Host.
+func (r *Request) WriteProxy(w io.Writer) error {
+ return r.write(w, true, nil, nil)
+}
+
+// errMissingHost is returned by Write when there is no Host or URL present in
+// the Request.
+var errMissingHost = errors.New("http: Request.Write on Request with no Host or URL set")
+
+// extraHeaders may be nil
+// waitForContinue may be nil
+// always closes body
+func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitForContinue func() bool) (err error) {
+ trace := httptrace.ContextClientTrace(r.Context())
+ if trace != nil && trace.WroteRequest != nil {
+ defer func() {
+ trace.WroteRequest(httptrace.WroteRequestInfo{
+ Err: err,
+ })
+ }()
+ }
+ closed := false
+ defer func() {
+ if closed {
+ return
+ }
+ if closeErr := r.closeBody(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }()
+
+ // Find the target host. Prefer the Host: header, but if that
+ // is not given, use the host from the request URL.
+ //
+ // Clean the host, in case it arrives with unexpected stuff in it.
+ host := cleanHost(r.Host)
+ if host == "" {
+ if r.URL == nil {
+ return errMissingHost
+ }
+ host = cleanHost(r.URL.Host)
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ host = removeZone(host)
+
+ ruri := r.URL.RequestURI()
+ if usingProxy && r.URL.Scheme != "" && r.URL.Opaque == "" {
+ ruri = r.URL.Scheme + "://" + host + ruri
+ } else if r.Method == "CONNECT" && r.URL.Path == "" {
+ // CONNECT requests normally give just the host and port, not a full URL.
+ ruri = host
+ if r.URL.Opaque != "" {
+ ruri = r.URL.Opaque
+ }
+ }
+ if stringContainsCTLByte(ruri) {
+ return errors.New("net/http: can't write control character in Request.URL")
+ }
+ // TODO: validate r.Method too? At least it's less likely to
+ // come from an attacker (more likely to be a constant in
+ // code).
+
+ // Wrap the writer in a bufio Writer if it's not already buffered.
+ // Don't always call NewWriter, as that forces a bytes.Buffer
+ // and other small bufio Writers to have a minimum 4k buffer
+ // size.
+ var bw *bufio.Writer
+ if _, ok := w.(io.ByteWriter); !ok {
+ bw = bufio.NewWriter(w)
+ w = bw
+ }
+
+ _, err = fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(r.Method, "GET"), ruri)
+ if err != nil {
+ return err
+ }
+
+ // Header lines
+ _, err = fmt.Fprintf(w, "Host: %s\r\n", host)
+ if err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Host", []string{host})
+ }
+
+ // Use the defaultUserAgent unless the Header contains one, which
+ // may be blank to not send the header.
+ userAgent := defaultUserAgent
+ if r.Header.has("User-Agent") {
+ userAgent = r.Header.Get("User-Agent")
+ }
+ if userAgent != "" {
+ _, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent)
+ if err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("User-Agent", []string{userAgent})
+ }
+ }
+
+ // Process Body,ContentLength,Close,Trailer
+ tw, err := newTransferWriter(r)
+ if err != nil {
+ return err
+ }
+ err = tw.writeHeader(w, trace)
+ if err != nil {
+ return err
+ }
+
+ err = r.Header.writeSubset(w, reqWriteExcludeHeader, trace)
+ if err != nil {
+ return err
+ }
+
+ if extraHeaders != nil {
+ err = extraHeaders.write(w, trace)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = io.WriteString(w, "\r\n")
+ if err != nil {
+ return err
+ }
+
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+
+ // Flush and wait for 100-continue if expected.
+ if waitForContinue != nil {
+ if bw, ok := w.(*bufio.Writer); ok {
+ err = bw.Flush()
+ if err != nil {
+ return err
+ }
+ }
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+ if !waitForContinue() {
+ closed = true
+ r.closeBody()
+ return nil
+ }
+ }
+
+ if bw, ok := w.(*bufio.Writer); ok && tw.FlushHeaders {
+ if err := bw.Flush(); err != nil {
+ return err
+ }
+ }
+
+ // Write body and trailer
+ closed = true
+ err = tw.writeBody(w)
+ if err != nil {
+ if tw.bodyReadError == err {
+ err = requestBodyReadError{err}
+ }
+ return err
+ }
+
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// requestBodyReadError wraps an error from (*Request).write to indicate
+// that the error came from a Read call on the Request.Body.
+// This error type should not escape the net/http package to users.
+type requestBodyReadError struct{ error }
+
+func idnaASCII(v string) (string, error) {
+ // TODO: Consider removing this check after verifying performance is okay.
+ // Right now punycode verification, length checks, context checks, and the
+ // permissible character tests are all omitted. It also prevents the ToASCII
+ // call from salvaging an invalid IDN, when possible. As a result it may be
+ // possible to have two IDNs that appear identical to the user where the
+ // ASCII-only version causes an error downstream whereas the non-ASCII
+ // version does not.
+ // Note that for correct ASCII IDNs ToASCII will only do considerably more
+ // work, but it will not cause an allocation.
+ if ascii.Is(v) {
+ return v, nil
+ }
+ return idna.Lookup.ToASCII(v)
+}
+
+// cleanHost cleans up the host sent in request's Host header.
+//
+// It both strips anything after '/' or ' ', and puts the value
+// into Punycode form, if necessary.
+//
+// Ideally we'd clean the Host header according to the spec:
+//
+// https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]")
+// https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host)
+// https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host)
+//
+// But practically, what we are trying to avoid is the situation in
+// issue 11206, where a malformed Host header used in the proxy context
+// would create a bad request. So it is enough to just truncate at the
+// first offending character.
+func cleanHost(in string) string {
+ if i := strings.IndexAny(in, " /"); i != -1 {
+ in = in[:i]
+ }
+ host, port, err := net.SplitHostPort(in)
+ if err != nil { // input was just a host
+ a, err := idnaASCII(in)
+ if err != nil {
+ return in // garbage in, garbage out
+ }
+ return a
+ }
+ a, err := idnaASCII(host)
+ if err != nil {
+ return in // garbage in, garbage out
+ }
+ return net.JoinHostPort(a, port)
+}
+
+// removeZone removes IPv6 zone identifier from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// ParseHTTPVersion parses an HTTP version string according to RFC 7230, section 2.6.
+// "HTTP/1.0" returns (1, 0, true). Note that strings without
+// a minor version, such as "HTTP/2", are not valid.
+func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
+ switch vers {
+ case "HTTP/1.1":
+ return 1, 1, true
+ case "HTTP/1.0":
+ return 1, 0, true
+ }
+ if !strings.HasPrefix(vers, "HTTP/") {
+ return 0, 0, false
+ }
+ if len(vers) != len("HTTP/X.Y") {
+ return 0, 0, false
+ }
+ if vers[6] != '.' {
+ return 0, 0, false
+ }
+ maj, err := strconv.ParseUint(vers[5:6], 10, 0)
+ if err != nil {
+ return 0, 0, false
+ }
+ min, err := strconv.ParseUint(vers[7:8], 10, 0)
+ if err != nil {
+ return 0, 0, false
+ }
+ return int(maj), int(min), true
+}
+
+func validMethod(method string) bool {
+ /*
+ Method = "OPTIONS" ; Section 9.2
+ | "GET" ; Section 9.3
+ | "HEAD" ; Section 9.4
+ | "POST" ; Section 9.5
+ | "PUT" ; Section 9.6
+ | "DELETE" ; Section 9.7
+ | "TRACE" ; Section 9.8
+ | "CONNECT" ; Section 9.9
+ | extension-method
+ extension-method = token
+ token = 1*<any CHAR except CTLs or separators>
+ */
+ return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1
+}
+
+// NewRequest wraps NewRequestWithContext using context.Background.
+func NewRequest(method, url string, body io.Reader) (*Request, error) {
+ return NewRequestWithContext(context.Background(), method, url, body)
+}
+
+// NewRequestWithContext returns a new Request given a method, URL, and
+// optional body.
+//
+// If the provided body is also an io.Closer, the returned
+// Request.Body is set to body and will be closed by the Client
+// methods Do, Post, and PostForm, and Transport.RoundTrip.
+//
+// NewRequestWithContext returns a Request suitable for use with
+// Client.Do or Transport.RoundTrip. To create a request for use with
+// testing a Server Handler, either use the NewRequest function in the
+// net/http/httptest package, use ReadRequest, or manually update the
+// Request fields. For an outgoing client request, the context
+// controls the entire lifetime of a request and its response:
+// obtaining a connection, sending the request, and reading the
+// response headers and body. See the Request type's documentation for
+// the difference between inbound and outbound request fields.
+//
+// If body is of type *bytes.Buffer, *bytes.Reader, or
+// *strings.Reader, the returned request's ContentLength is set to its
+// exact value (instead of -1), GetBody is populated (so 307 and 308
+// redirects can replay the body), and Body is set to NoBody if the
+// ContentLength is 0.
+func NewRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*Request, error) {
+ if method == "" {
+ // We document that "" means "GET" for Request.Method, and people have
+ // relied on that from NewRequest, so keep that working.
+ // We still enforce validMethod for non-empty methods.
+ method = "GET"
+ }
+ if !validMethod(method) {
+ return nil, fmt.Errorf("net/http: invalid method %q", method)
+ }
+ if ctx == nil {
+ return nil, errors.New("net/http: nil Context")
+ }
+ u, err := urlpkg.Parse(url)
+ if err != nil {
+ return nil, err
+ }
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = io.NopCloser(body)
+ }
+ // The host's colon:port should be normalized. See Issue 14836.
+ u.Host = removeEmptyPort(u.Host)
+ req := &Request{
+ ctx: ctx,
+ Method: method,
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(Header),
+ Body: rc,
+ Host: u.Host,
+ }
+ if body != nil {
+ switch v := body.(type) {
+ case *bytes.Buffer:
+ req.ContentLength = int64(v.Len())
+ buf := v.Bytes()
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := bytes.NewReader(buf)
+ return io.NopCloser(r), nil
+ }
+ case *bytes.Reader:
+ req.ContentLength = int64(v.Len())
+ snapshot := *v
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := snapshot
+ return io.NopCloser(&r), nil
+ }
+ case *strings.Reader:
+ req.ContentLength = int64(v.Len())
+ snapshot := *v
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := snapshot
+ return io.NopCloser(&r), nil
+ }
+ default:
+ // This is where we'd set it to -1 (at least
+ // if body != NoBody) to mean unknown, but
+ // that broke people during the Go 1.8 testing
+ // period. People depend on it being 0 I
+ // guess. Maybe retry later. See Issue 18117.
+ }
+ // For client requests, Request.ContentLength of 0
+ // means either actually 0, or unknown. The only way
+ // to explicitly say that the ContentLength is zero is
+ // to set the Body to nil. But turns out too much code
+ // depends on NewRequest returning a non-nil Body,
+ // so we use a well-known ReadCloser variable instead
+ // and have the http package also treat that sentinel
+ // variable to mean explicitly zero.
+ if req.GetBody != nil && req.ContentLength == 0 {
+ req.Body = NoBody
+ req.GetBody = func() (io.ReadCloser, error) { return NoBody, nil }
+ }
+ }
+
+ return req, nil
+}
+
+// BasicAuth returns the username and password provided in the request's
+// Authorization header, if the request uses HTTP Basic Authentication.
+// See RFC 2617, Section 2.
+func (r *Request) BasicAuth() (username, password string, ok bool) {
+ auth := r.Header.Get("Authorization")
+ if auth == "" {
+ return "", "", false
+ }
+ return parseBasicAuth(auth)
+}
+
+// parseBasicAuth parses an HTTP Basic Authentication string.
+// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
+func parseBasicAuth(auth string) (username, password string, ok bool) {
+ const prefix = "Basic "
+ // Case insensitive prefix match. See Issue 22736.
+ if len(auth) < len(prefix) || !ascii.EqualFold(auth[:len(prefix)], prefix) {
+ return "", "", false
+ }
+ c, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
+ if err != nil {
+ return "", "", false
+ }
+ cs := string(c)
+ username, password, ok = strings.Cut(cs, ":")
+ if !ok {
+ return "", "", false
+ }
+ return username, password, true
+}
+
+// SetBasicAuth sets the request's Authorization header to use HTTP
+// Basic Authentication with the provided username and password.
+//
+// With HTTP Basic Authentication the provided username and password
+// are not encrypted. It should generally only be used in an HTTPS
+// request.
+//
+// The username may not contain a colon. Some protocols may impose
+// additional requirements on pre-escaping the username and
+// password. For instance, when used with OAuth2, both arguments must
+// be URL encoded first with url.QueryEscape.
+func (r *Request) SetBasicAuth(username, password string) {
+ r.Header.Set("Authorization", "Basic "+basicAuth(username, password))
+}
+
+// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts.
+func parseRequestLine(line string) (method, requestURI, proto string, ok bool) {
+ method, rest, ok1 := strings.Cut(line, " ")
+ requestURI, proto, ok2 := strings.Cut(rest, " ")
+ if !ok1 || !ok2 {
+ return "", "", "", false
+ }
+ return method, requestURI, proto, true
+}
+
+var textprotoReaderPool sync.Pool
+
+func newTextprotoReader(br *bufio.Reader) *textproto.Reader {
+ if v := textprotoReaderPool.Get(); v != nil {
+ tr := v.(*textproto.Reader)
+ tr.R = br
+ return tr
+ }
+ return textproto.NewReader(br)
+}
+
+func putTextprotoReader(r *textproto.Reader) {
+ r.R = nil
+ textprotoReaderPool.Put(r)
+}
+
+// ReadRequest reads and parses an incoming request from b.
+//
+// ReadRequest is a low-level function and should only be used for
+// specialized applications; most code should use the Server to read
+// requests and handle them via the Handler interface. ReadRequest
+// only supports HTTP/1.x requests. For HTTP/2, use golang.org/x/net/http2.
+func ReadRequest(b *bufio.Reader) (*Request, error) {
+ req, err := readRequest(b)
+ if err != nil {
+ return nil, err
+ }
+
+ delete(req.Header, "Host")
+ return req, err
+}
+
+func readRequest(b *bufio.Reader) (req *Request, err error) {
+ tp := newTextprotoReader(b)
+ req = new(Request)
+
+ // First line: GET /index.html HTTP/1.0
+ var s string
+ if s, err = tp.ReadLine(); err != nil {
+ return nil, err
+ }
+ defer func() {
+ putTextprotoReader(tp)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
+
+ var ok bool
+ req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s)
+ if !ok {
+ return nil, badStringError("malformed HTTP request", s)
+ }
+ if !validMethod(req.Method) {
+ return nil, badStringError("invalid method", req.Method)
+ }
+ rawurl := req.RequestURI
+ if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
+ return nil, badStringError("malformed HTTP version", req.Proto)
+ }
+
+ // CONNECT requests are used two different ways, and neither uses a full URL:
+ // The standard use is to tunnel HTTPS through an HTTP proxy.
+ // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is
+ // just the authority section of a URL. This information should go in req.URL.Host.
+ //
+ // The net/rpc package also uses CONNECT, but there the parameter is a path
+ // that starts with a slash. It can be parsed with the regular URL parser,
+ // and the path will end up in req.URL.Path, where it needs to be in order for
+ // RPC to work.
+ justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/")
+ if justAuthority {
+ rawurl = "http://" + rawurl
+ }
+
+ if req.URL, err = url.ParseRequestURI(rawurl); err != nil {
+ return nil, err
+ }
+
+ if justAuthority {
+ // Strip the bogus "http://" back off.
+ req.URL.Scheme = ""
+ }
+
+ // Subsequent lines: Key: value.
+ mimeHeader, err := tp.ReadMIMEHeader()
+ if err != nil {
+ return nil, err
+ }
+ req.Header = Header(mimeHeader)
+ if len(req.Header["Host"]) > 1 {
+ return nil, fmt.Errorf("too many Host headers")
+ }
+
+ // RFC 7230, section 5.3: Must treat
+ // GET /index.html HTTP/1.1
+ // Host: www.google.com
+ // and
+ // GET http://www.google.com/index.html HTTP/1.1
+ // Host: doesntmatter
+ // the same. In the second case, any Host line is ignored.
+ req.Host = req.URL.Host
+ if req.Host == "" {
+ req.Host = req.Header.get("Host")
+ }
+
+ fixPragmaCacheControl(req.Header)
+
+ req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false)
+
+ err = readTransfer(req, b)
+ if err != nil {
+ return nil, err
+ }
+
+ if req.isH2Upgrade() {
+ // Because it's neither chunked, nor declared:
+ req.ContentLength = -1
+
+ // We want to give handlers a chance to hijack the
+ // connection, but we need to prevent the Server from
+ // dealing with the connection further if it's not
+ // hijacked. Set Close to ensure that:
+ req.Close = true
+ }
+ return req, nil
+}
+
+// MaxBytesReader is similar to io.LimitReader but is intended for
+// limiting the size of incoming request bodies. In contrast to
+// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
+// non-nil error of type *MaxBytesError for a Read beyond the limit,
+// and closes the underlying reader when its Close method is called.
+//
+// MaxBytesReader prevents clients from accidentally or maliciously
+// sending a large request and wasting server resources. If possible,
+// it tells the ResponseWriter to close the connection after the limit
+// has been reached.
+func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
+ if n < 0 { // Treat negative limits as equivalent to 0.
+ n = 0
+ }
+ return &maxBytesReader{w: w, r: r, i: n, n: n}
+}
+
+// MaxBytesError is returned by MaxBytesReader when its read limit is exceeded.
+type MaxBytesError struct {
+ Limit int64
+}
+
+func (e *MaxBytesError) Error() string {
+ // Due to Hyrum's law, this text cannot be changed.
+ return "http: request body too large"
+}
+
+type maxBytesReader struct {
+ w ResponseWriter
+ r io.ReadCloser // underlying reader
+ i int64 // max bytes initially, for MaxBytesError
+ n int64 // max bytes remaining
+ err error // sticky error
+}
+
+func (l *maxBytesReader) Read(p []byte) (n int, err error) {
+ if l.err != nil {
+ return 0, l.err
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ // If they asked for a 32KB byte read but only 5 bytes are
+ // remaining, no need to read 32KB. 6 bytes will answer the
+ // question of the whether we hit the limit or go past it.
+ if int64(len(p)) > l.n+1 {
+ p = p[:l.n+1]
+ }
+ n, err = l.r.Read(p)
+
+ if int64(n) <= l.n {
+ l.n -= int64(n)
+ l.err = err
+ return n, err
+ }
+
+ n = int(l.n)
+ l.n = 0
+
+ // The server code and client code both use
+ // maxBytesReader. This "requestTooLarge" check is
+ // only used by the server code. To prevent binaries
+ // which only using the HTTP Client code (such as
+ // cmd/go) from also linking in the HTTP server, don't
+ // use a static type assertion to the server
+ // "*response" type. Check this interface instead:
+ type requestTooLarger interface {
+ requestTooLarge()
+ }
+ if res, ok := l.w.(requestTooLarger); ok {
+ res.requestTooLarge()
+ }
+ l.err = &MaxBytesError{l.i}
+ return n, l.err
+}
+
+func (l *maxBytesReader) Close() error {
+ return l.r.Close()
+}
+
+func copyValues(dst, src url.Values) {
+ for k, vs := range src {
+ dst[k] = append(dst[k], vs...)
+ }
+}
+
+func parsePostForm(r *Request) (vs url.Values, err error) {
+ if r.Body == nil {
+ err = errors.New("missing form body")
+ return
+ }
+ ct := r.Header.Get("Content-Type")
+ // RFC 7231, section 3.1.1.5 - empty type
+ // MAY be treated as application/octet-stream
+ if ct == "" {
+ ct = "application/octet-stream"
+ }
+ ct, _, err = mime.ParseMediaType(ct)
+ switch {
+ case ct == "application/x-www-form-urlencoded":
+ var reader io.Reader = r.Body
+ maxFormSize := int64(1<<63 - 1)
+ if _, ok := r.Body.(*maxBytesReader); !ok {
+ maxFormSize = int64(10 << 20) // 10 MB is a lot of text.
+ reader = io.LimitReader(r.Body, maxFormSize+1)
+ }
+ b, e := io.ReadAll(reader)
+ if e != nil {
+ if err == nil {
+ err = e
+ }
+ break
+ }
+ if int64(len(b)) > maxFormSize {
+ err = errors.New("http: POST too large")
+ return
+ }
+ vs, e = url.ParseQuery(string(b))
+ if err == nil {
+ err = e
+ }
+ case ct == "multipart/form-data":
+ // handled by ParseMultipartForm (which is calling us, or should be)
+ // TODO(bradfitz): there are too many possible
+ // orders to call too many functions here.
+ // Clean this up and write more tests.
+ // request_test.go contains the start of this,
+ // in TestParseMultipartFormOrder and others.
+ }
+ return
+}
+
+// ParseForm populates r.Form and r.PostForm.
+//
+// For all requests, ParseForm parses the raw query from the URL and updates
+// r.Form.
+//
+// For POST, PUT, and PATCH requests, it also reads the request body, parses it
+// as a form and puts the results into both r.PostForm and r.Form. Request body
+// parameters take precedence over URL query string values in r.Form.
+//
+// If the request Body's size has not already been limited by MaxBytesReader,
+// the size is capped at 10MB.
+//
+// For other HTTP methods, or when the Content-Type is not
+// application/x-www-form-urlencoded, the request Body is not read, and
+// r.PostForm is initialized to a non-nil, empty value.
+//
+// ParseMultipartForm calls ParseForm automatically.
+// ParseForm is idempotent.
+func (r *Request) ParseForm() error {
+ var err error
+ if r.PostForm == nil {
+ if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" {
+ r.PostForm, err = parsePostForm(r)
+ }
+ if r.PostForm == nil {
+ r.PostForm = make(url.Values)
+ }
+ }
+ if r.Form == nil {
+ if len(r.PostForm) > 0 {
+ r.Form = make(url.Values)
+ copyValues(r.Form, r.PostForm)
+ }
+ var newValues url.Values
+ if r.URL != nil {
+ var e error
+ newValues, e = url.ParseQuery(r.URL.RawQuery)
+ if err == nil {
+ err = e
+ }
+ }
+ if newValues == nil {
+ newValues = make(url.Values)
+ }
+ if r.Form == nil {
+ r.Form = newValues
+ } else {
+ copyValues(r.Form, newValues)
+ }
+ }
+ return err
+}
+
+// ParseMultipartForm parses a request body as multipart/form-data.
+// The whole request body is parsed and up to a total of maxMemory bytes of
+// its file parts are stored in memory, with the remainder stored on
+// disk in temporary files.
+// ParseMultipartForm calls ParseForm if necessary.
+// If ParseForm returns an error, ParseMultipartForm returns it but also
+// continues parsing the request body.
+// After one call to ParseMultipartForm, subsequent calls have no effect.
+func (r *Request) ParseMultipartForm(maxMemory int64) error {
+ if r.MultipartForm == multipartByReader {
+ return errors.New("http: multipart handled by MultipartReader")
+ }
+ var parseFormErr error
+ if r.Form == nil {
+ // Let errors in ParseForm fall through, and just
+ // return it at the end.
+ parseFormErr = r.ParseForm()
+ }
+ if r.MultipartForm != nil {
+ return nil
+ }
+
+ mr, err := r.multipartReader(false)
+ if err != nil {
+ return err
+ }
+
+ f, err := mr.ReadForm(maxMemory)
+ if err != nil {
+ return err
+ }
+
+ if r.PostForm == nil {
+ r.PostForm = make(url.Values)
+ }
+ for k, v := range f.Value {
+ r.Form[k] = append(r.Form[k], v...)
+ // r.PostForm should also be populated. See Issue 9305.
+ r.PostForm[k] = append(r.PostForm[k], v...)
+ }
+
+ r.MultipartForm = f
+
+ return parseFormErr
+}
+
+// FormValue returns the first value for the named component of the query.
+// POST and PUT body parameters take precedence over URL query string values.
+// FormValue calls ParseMultipartForm and ParseForm if necessary and ignores
+// any errors returned by these functions.
+// If key is not present, FormValue returns the empty string.
+// To access multiple values of the same key, call ParseForm and
+// then inspect Request.Form directly.
+func (r *Request) FormValue(key string) string {
+ if r.Form == nil {
+ r.ParseMultipartForm(defaultMaxMemory)
+ }
+ if vs := r.Form[key]; len(vs) > 0 {
+ return vs[0]
+ }
+ return ""
+}
+
+// PostFormValue returns the first value for the named component of the POST,
+// PATCH, or PUT request body. URL query parameters are ignored.
+// PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores
+// any errors returned by these functions.
+// If key is not present, PostFormValue returns the empty string.
+func (r *Request) PostFormValue(key string) string {
+ if r.PostForm == nil {
+ r.ParseMultipartForm(defaultMaxMemory)
+ }
+ if vs := r.PostForm[key]; len(vs) > 0 {
+ return vs[0]
+ }
+ return ""
+}
+
+// FormFile returns the first file for the provided form key.
+// FormFile calls ParseMultipartForm and ParseForm if necessary.
+func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
+ if r.MultipartForm == multipartByReader {
+ return nil, nil, errors.New("http: multipart handled by MultipartReader")
+ }
+ if r.MultipartForm == nil {
+ err := r.ParseMultipartForm(defaultMaxMemory)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if r.MultipartForm != nil && r.MultipartForm.File != nil {
+ if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
+ f, err := fhs[0].Open()
+ return f, fhs[0], err
+ }
+ }
+ return nil, nil, ErrMissingFile
+}
+
+func (r *Request) expectsContinue() bool {
+ return hasToken(r.Header.get("Expect"), "100-continue")
+}
+
+func (r *Request) wantsHttp10KeepAlive() bool {
+ if r.ProtoMajor != 1 || r.ProtoMinor != 0 {
+ return false
+ }
+ return hasToken(r.Header.get("Connection"), "keep-alive")
+}
+
+func (r *Request) wantsClose() bool {
+ if r.Close {
+ return true
+ }
+ return hasToken(r.Header.get("Connection"), "close")
+}
+
+func (r *Request) closeBody() error {
+ if r.Body == nil {
+ return nil
+ }
+ return r.Body.Close()
+}
+
+func (r *Request) isReplayable() bool {
+ if r.Body == nil || r.Body == NoBody || r.GetBody != nil {
+ switch valueOrDefault(r.Method, "GET") {
+ case "GET", "HEAD", "OPTIONS", "TRACE":
+ return true
+ }
+ // The Idempotency-Key, while non-standard, is widely used to
+ // mean a POST or other request is idempotent. See
+ // https://golang.org/issue/19943#issuecomment-421092421
+ if r.Header.has("Idempotency-Key") || r.Header.has("X-Idempotency-Key") {
+ return true
+ }
+ }
+ return false
+}
+
+// outgoingLength reports the Content-Length of this outgoing (Client) request.
+// It maps 0 into -1 (unknown) when the Body is non-nil.
+func (r *Request) outgoingLength() int64 {
+ if r.Body == nil || r.Body == NoBody {
+ return 0
+ }
+ if r.ContentLength != 0 {
+ return r.ContentLength
+ }
+ return -1
+}
+
+// requestMethodUsuallyLacksBody reports whether the given request
+// method is one that typically does not involve a request body.
+// This is used by the Transport (via
+// transferWriter.shouldSendChunkedRequestBody) to determine whether
+// we try to test-read a byte from a non-nil Request.Body when
+// Request.outgoingLength() returns -1. See the comments in
+// shouldSendChunkedRequestBody.
+func requestMethodUsuallyLacksBody(method string) bool {
+ switch method {
+ case "GET", "HEAD", "DELETE", "OPTIONS", "PROPFIND", "SEARCH":
+ return true
+ }
+ return false
+}
+
+// requiresHTTP1 reports whether this request requires being sent on
+// an HTTP/1 connection.
+func (r *Request) requiresHTTP1() bool {
+ return hasToken(r.Header.Get("Connection"), "upgrade") &&
+ ascii.EqualFold(r.Header.Get("Upgrade"), "websocket")
+}
diff --git a/contrib/go/_std_1.19/src/net/http/response.go b/contrib/go/_std_1.19/src/net/http/response.go
new file mode 100644
index 0000000000..755c696557
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/response.go
@@ -0,0 +1,371 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP Response reading and parsing.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+var respExcludeHeader = map[string]bool{
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// Response represents the response from an HTTP request.
+//
+// The Client and Transport return Responses from servers once
+// the response headers have been received. The response body
+// is streamed on demand as the Body field is read.
+type Response struct {
+ Status string // e.g. "200 OK"
+ StatusCode int // e.g. 200
+ Proto string // e.g. "HTTP/1.0"
+ ProtoMajor int // e.g. 1
+ ProtoMinor int // e.g. 0
+
+ // Header maps header keys to values. If the response had multiple
+ // headers with the same key, they may be concatenated, with comma
+ // delimiters. (RFC 7230, section 3.2.2 requires that multiple headers
+ // be semantically equivalent to a comma-delimited sequence.) When
+ // Header values are duplicated by other fields in this struct (e.g.,
+ // ContentLength, TransferEncoding, Trailer), the field values are
+ // authoritative.
+ //
+ // Keys in the map are canonicalized (see CanonicalHeaderKey).
+ Header Header
+
+ // Body represents the response body.
+ //
+ // The response body is streamed on demand as the Body field
+ // is read. If the network connection fails or the server
+ // terminates the response, Body.Read calls return an error.
+ //
+ // The http Client and Transport guarantee that Body is always
+ // non-nil, even on responses without a body or responses with
+ // a zero-length body. It is the caller's responsibility to
+ // close Body. The default HTTP client's Transport may not
+ // reuse HTTP/1.x "keep-alive" TCP connections if the Body is
+ // not read to completion and closed.
+ //
+ // The Body is automatically dechunked if the server replied
+ // with a "chunked" Transfer-Encoding.
+ //
+ // As of Go 1.12, the Body will also implement io.Writer
+ // on a successful "101 Switching Protocols" response,
+ // as used by WebSockets and HTTP/2's "h2c" mode.
+ Body io.ReadCloser
+
+ // ContentLength records the length of the associated content. The
+ // value -1 indicates that the length is unknown. Unless Request.Method
+ // is "HEAD", values >= 0 indicate that the given number of bytes may
+ // be read from Body.
+ ContentLength int64
+
+ // Contains transfer encodings from outer-most to inner-most. Value is
+ // nil, means that "identity" encoding is used.
+ TransferEncoding []string
+
+ // Close records whether the header directed that the connection be
+ // closed after reading Body. The value is advice for clients: neither
+ // ReadResponse nor Response.Write ever closes a connection.
+ Close bool
+
+ // Uncompressed reports whether the response was sent compressed but
+ // was decompressed by the http package. When true, reading from
+ // Body yields the uncompressed content instead of the compressed
+ // content actually set from the server, ContentLength is set to -1,
+ // and the "Content-Length" and "Content-Encoding" fields are deleted
+ // from the responseHeader. To get the original response from
+ // the server, set Transport.DisableCompression to true.
+ Uncompressed bool
+
+ // Trailer maps trailer keys to values in the same
+ // format as Header.
+ //
+ // The Trailer initially contains only nil values, one for
+ // each key specified in the server's "Trailer" header
+ // value. Those values are not added to Header.
+ //
+ // Trailer must not be accessed concurrently with Read calls
+ // on the Body.
+ //
+ // After Body.Read has returned io.EOF, Trailer will contain
+ // any trailer values sent by the server.
+ Trailer Header
+
+ // Request is the request that was sent to obtain this Response.
+ // Request's Body is nil (having already been consumed).
+ // This is only populated for Client requests.
+ Request *Request
+
+ // TLS contains information about the TLS connection on which the
+ // response was received. It is nil for unencrypted responses.
+ // The pointer is shared between responses and should not be
+ // modified.
+ TLS *tls.ConnectionState
+}
+
+// Cookies parses and returns the cookies set in the Set-Cookie headers.
+func (r *Response) Cookies() []*Cookie {
+ return readSetCookies(r.Header)
+}
+
+// ErrNoLocation is returned by Response's Location method
+// when no Location header is present.
+var ErrNoLocation = errors.New("http: no Location header in response")
+
+// Location returns the URL of the response's "Location" header,
+// if present. Relative redirects are resolved relative to
+// the Response's Request. ErrNoLocation is returned if no
+// Location header is present.
+func (r *Response) Location() (*url.URL, error) {
+ lv := r.Header.Get("Location")
+ if lv == "" {
+ return nil, ErrNoLocation
+ }
+ if r.Request != nil && r.Request.URL != nil {
+ return r.Request.URL.Parse(lv)
+ }
+ return url.Parse(lv)
+}
+
+// ReadResponse reads and returns an HTTP response from r.
+// The req parameter optionally specifies the Request that corresponds
+// to this Response. If nil, a GET request is assumed.
+// Clients must call resp.Body.Close when finished reading resp.Body.
+// After that call, clients can inspect resp.Trailer to find key/value
+// pairs included in the response trailer.
+func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) {
+ tp := textproto.NewReader(r)
+ resp := &Response{
+ Request: req,
+ }
+
+ // Parse the first line of the response.
+ line, err := tp.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+ proto, status, ok := strings.Cut(line, " ")
+ if !ok {
+ return nil, badStringError("malformed HTTP response", line)
+ }
+ resp.Proto = proto
+ resp.Status = strings.TrimLeft(status, " ")
+
+ statusCode, _, _ := strings.Cut(resp.Status, " ")
+ if len(statusCode) != 3 {
+ return nil, badStringError("malformed HTTP status code", statusCode)
+ }
+ resp.StatusCode, err = strconv.Atoi(statusCode)
+ if err != nil || resp.StatusCode < 0 {
+ return nil, badStringError("malformed HTTP status code", statusCode)
+ }
+ if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
+ return nil, badStringError("malformed HTTP version", resp.Proto)
+ }
+
+ // Parse the response headers.
+ mimeHeader, err := tp.ReadMIMEHeader()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+ resp.Header = Header(mimeHeader)
+
+ fixPragmaCacheControl(resp.Header)
+
+ err = readTransfer(resp, r)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// RFC 7234, section 5.4: Should treat
+//
+// Pragma: no-cache
+//
+// like
+//
+// Cache-Control: no-cache
+func fixPragmaCacheControl(header Header) {
+ if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" {
+ if _, presentcc := header["Cache-Control"]; !presentcc {
+ header["Cache-Control"] = []string{"no-cache"}
+ }
+ }
+}
+
+// ProtoAtLeast reports whether the HTTP protocol used
+// in the response is at least major.minor.
+func (r *Response) ProtoAtLeast(major, minor int) bool {
+ return r.ProtoMajor > major ||
+ r.ProtoMajor == major && r.ProtoMinor >= minor
+}
+
+// Write writes r to w in the HTTP/1.x server response format,
+// including the status line, headers, body, and optional trailer.
+//
+// This method consults the following fields of the response r:
+//
+// StatusCode
+// ProtoMajor
+// ProtoMinor
+// Request.Method
+// TransferEncoding
+// Trailer
+// Body
+// ContentLength
+// Header, values for non-canonical keys will have unpredictable behavior
+//
+// The Response Body is closed after it is sent.
+func (r *Response) Write(w io.Writer) error {
+ // Status line
+ text := r.Status
+ if text == "" {
+ text = StatusText(r.StatusCode)
+ if text == "" {
+ text = "status code " + strconv.Itoa(r.StatusCode)
+ }
+ } else {
+ // Just to reduce stutter, if user set r.Status to "200 OK" and StatusCode to 200.
+ // Not important.
+ text = strings.TrimPrefix(text, strconv.Itoa(r.StatusCode)+" ")
+ }
+
+ if _, err := fmt.Fprintf(w, "HTTP/%d.%d %03d %s\r\n", r.ProtoMajor, r.ProtoMinor, r.StatusCode, text); err != nil {
+ return err
+ }
+
+ // Clone it, so we can modify r1 as needed.
+ r1 := new(Response)
+ *r1 = *r
+ if r1.ContentLength == 0 && r1.Body != nil {
+ // Is it actually 0 length? Or just unknown?
+ var buf [1]byte
+ n, err := r1.Body.Read(buf[:])
+ if err != nil && err != io.EOF {
+ return err
+ }
+ if n == 0 {
+ // Reset it to a known zero reader, in case underlying one
+ // is unhappy being read repeatedly.
+ r1.Body = NoBody
+ } else {
+ r1.ContentLength = -1
+ r1.Body = struct {
+ io.Reader
+ io.Closer
+ }{
+ io.MultiReader(bytes.NewReader(buf[:1]), r.Body),
+ r.Body,
+ }
+ }
+ }
+ // If we're sending a non-chunked HTTP/1.1 response without a
+ // content-length, the only way to do that is the old HTTP/1.0
+ // way, by noting the EOF with a connection close, so we need
+ // to set Close.
+ if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) && !r1.Uncompressed {
+ r1.Close = true
+ }
+
+ // Process Body,ContentLength,Close,Trailer
+ tw, err := newTransferWriter(r1)
+ if err != nil {
+ return err
+ }
+ err = tw.writeHeader(w, nil)
+ if err != nil {
+ return err
+ }
+
+ // Rest of header
+ err = r.Header.WriteSubset(w, respExcludeHeader)
+ if err != nil {
+ return err
+ }
+
+ // contentLengthAlreadySent may have been already sent for
+ // POST/PUT requests, even if zero length. See Issue 8180.
+ contentLengthAlreadySent := tw.shouldSendContentLength()
+ if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent && bodyAllowedForStatus(r.StatusCode) {
+ if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil {
+ return err
+ }
+ }
+
+ // End-of-header
+ if _, err := io.WriteString(w, "\r\n"); err != nil {
+ return err
+ }
+
+ // Write body and trailer
+ err = tw.writeBody(w)
+ if err != nil {
+ return err
+ }
+
+ // Success
+ return nil
+}
+
+func (r *Response) closeBody() {
+ if r.Body != nil {
+ r.Body.Close()
+ }
+}
+
+// bodyIsWritable reports whether the Body supports writing. The
+// Transport returns Writable bodies for 101 Switching Protocols
+// responses.
+// The Transport uses this method to determine whether a persistent
+// connection is done being managed from its perspective. Once we
+// return a writable response body to a user, the net/http package is
+// done managing that connection.
+func (r *Response) bodyIsWritable() bool {
+ _, ok := r.Body.(io.Writer)
+ return ok
+}
+
+// isProtocolSwitch reports whether the response code and header
+// indicate a successful protocol upgrade response.
+func (r *Response) isProtocolSwitch() bool {
+ return isProtocolSwitchResponse(r.StatusCode, r.Header)
+}
+
+// isProtocolSwitchResponse reports whether the response code and
+// response header indicate a successful protocol upgrade response.
+func isProtocolSwitchResponse(code int, h Header) bool {
+ return code == StatusSwitchingProtocols && isProtocolSwitchHeader(h)
+}
+
+// isProtocolSwitchHeader reports whether the request or response header
+// is for a protocol switch.
+func isProtocolSwitchHeader(h Header) bool {
+ return h.Get("Upgrade") != "" &&
+ httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade")
+}
diff --git a/contrib/go/_std_1.18/src/net/http/roundtrip.go b/contrib/go/_std_1.19/src/net/http/roundtrip.go
index c4c5d3b6eb..c4c5d3b6eb 100644
--- a/contrib/go/_std_1.18/src/net/http/roundtrip.go
+++ b/contrib/go/_std_1.19/src/net/http/roundtrip.go
diff --git a/contrib/go/_std_1.19/src/net/http/server.go b/contrib/go/_std_1.19/src/net/http/server.go
new file mode 100644
index 0000000000..87dd412984
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/server.go
@@ -0,0 +1,3655 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP server. See RFC 7230 through 7235.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "log"
+ "math/rand"
+ "net"
+ "net/textproto"
+ "net/url"
+ urlpkg "net/url"
+ "path"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// Errors used by the HTTP server.
+var (
+ // ErrBodyNotAllowed is returned by ResponseWriter.Write calls
+ // when the HTTP method or response code does not permit a
+ // body.
+ ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
+
+ // ErrHijacked is returned by ResponseWriter.Write calls when
+ // the underlying connection has been hijacked using the
+ // Hijacker interface. A zero-byte write on a hijacked
+ // connection will return ErrHijacked without any other side
+ // effects.
+ ErrHijacked = errors.New("http: connection has been hijacked")
+
+ // ErrContentLength is returned by ResponseWriter.Write calls
+ // when a Handler set a Content-Length response header with a
+ // declared size and then attempted to write more bytes than
+ // declared.
+ ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
+
+ // Deprecated: ErrWriteAfterFlush is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrWriteAfterFlush = errors.New("unused")
+)
+
+// A Handler responds to an HTTP request.
+//
+// ServeHTTP should write reply headers and data to the ResponseWriter
+// and then return. Returning signals that the request is finished; it
+// is not valid to use the ResponseWriter or read from the
+// Request.Body after or concurrently with the completion of the
+// ServeHTTP call.
+//
+// Depending on the HTTP client software, HTTP protocol version, and
+// any intermediaries between the client and the Go server, it may not
+// be possible to read from the Request.Body after writing to the
+// ResponseWriter. Cautious handlers should read the Request.Body
+// first, and then reply.
+//
+// Except for reading the body, handlers should not modify the
+// provided Request.
+//
+// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
+// that the effect of the panic was isolated to the active request.
+// It recovers the panic, logs a stack trace to the server error log,
+// and either closes the network connection or sends an HTTP/2
+// RST_STREAM, depending on the HTTP protocol. To abort a handler so
+// the client sees an interrupted response but the server doesn't log
+// an error, panic with the value ErrAbortHandler.
+type Handler interface {
+ ServeHTTP(ResponseWriter, *Request)
+}
+
+// A ResponseWriter interface is used by an HTTP handler to
+// construct an HTTP response.
+//
+// A ResponseWriter may not be used after the Handler.ServeHTTP method
+// has returned.
+type ResponseWriter interface {
+ // Header returns the header map that will be sent by
+ // WriteHeader. The Header map also is the mechanism with which
+ // Handlers can set HTTP trailers.
+ //
+ // Changing the header map after a call to WriteHeader (or
+ // Write) has no effect unless the HTTP status code was of the
+ // 1xx class or the modified headers are trailers.
+ //
+ // There are two ways to set Trailers. The preferred way is to
+ // predeclare in the headers which trailers you will later
+ // send by setting the "Trailer" header to the names of the
+ // trailer keys which will come later. In this case, those
+ // keys of the Header map are treated as if they were
+ // trailers. See the example. The second way, for trailer
+ // keys not known to the Handler until after the first Write,
+ // is to prefix the Header map keys with the TrailerPrefix
+ // constant value. See TrailerPrefix.
+ //
+ // To suppress automatic response headers (such as "Date"), set
+ // their value to nil.
+ Header() Header
+
+ // Write writes the data to the connection as part of an HTTP reply.
+ //
+ // If WriteHeader has not yet been called, Write calls
+ // WriteHeader(http.StatusOK) before writing the data. If the Header
+ // does not contain a Content-Type line, Write adds a Content-Type set
+ // to the result of passing the initial 512 bytes of written data to
+ // DetectContentType. Additionally, if the total size of all written
+ // data is under a few KB and there are no Flush calls, the
+ // Content-Length header is added automatically.
+ //
+ // Depending on the HTTP protocol version and the client, calling
+ // Write or WriteHeader may prevent future reads on the
+ // Request.Body. For HTTP/1.x requests, handlers should read any
+ // needed request body data before writing the response. Once the
+ // headers have been flushed (due to either an explicit Flusher.Flush
+ // call or writing enough data to trigger a flush), the request body
+ // may be unavailable. For HTTP/2 requests, the Go HTTP server permits
+ // handlers to continue to read the request body while concurrently
+ // writing the response. However, such behavior may not be supported
+ // by all HTTP/2 clients. Handlers should read before writing if
+ // possible to maximize compatibility.
+ Write([]byte) (int, error)
+
+ // WriteHeader sends an HTTP response header with the provided
+ // status code.
+ //
+ // If WriteHeader is not called explicitly, the first call to Write
+ // will trigger an implicit WriteHeader(http.StatusOK).
+ // Thus explicit calls to WriteHeader are mainly used to
+ // send error codes or 1xx informational responses.
+ //
+ // The provided code must be a valid HTTP 1xx-5xx status code.
+ // Any number of 1xx headers may be written, followed by at most
+ // one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx
+ // headers may be buffered. Use the Flusher interface to send
+ // buffered data. The header map is cleared when 2xx-5xx headers are
+ // sent, but not with 1xx headers.
+ //
+ // The server will automatically send a 100 (Continue) header
+ // on the first read from the request body if the request has
+ // an "Expect: 100-continue" header.
+ WriteHeader(statusCode int)
+}
+
+// The Flusher interface is implemented by ResponseWriters that allow
+// an HTTP handler to flush buffered data to the client.
+//
+// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
+// support Flusher, but ResponseWriter wrappers may not. Handlers
+// should always test for this ability at runtime.
+//
+// Note that even for ResponseWriters that support Flush,
+// if the client is connected through an HTTP proxy,
+// the buffered data may not reach the client until the response
+// completes.
+type Flusher interface {
+ // Flush sends any buffered data to the client.
+ Flush()
+}
+
+// The Hijacker interface is implemented by ResponseWriters that allow
+// an HTTP handler to take over the connection.
+//
+// The default ResponseWriter for HTTP/1.x connections supports
+// Hijacker, but HTTP/2 connections intentionally do not.
+// ResponseWriter wrappers may also not support Hijacker. Handlers
+// should always test for this ability at runtime.
+type Hijacker interface {
+ // Hijack lets the caller take over the connection.
+ // After a call to Hijack the HTTP server library
+ // will not do anything else with the connection.
+ //
+ // It becomes the caller's responsibility to manage
+ // and close the connection.
+ //
+ // The returned net.Conn may have read or write deadlines
+ // already set, depending on the configuration of the
+ // Server. It is the caller's responsibility to set
+ // or clear those deadlines as needed.
+ //
+ // The returned bufio.Reader may contain unprocessed buffered
+ // data from the client.
+ //
+ // After a call to Hijack, the original Request.Body must not
+ // be used. The original Request's Context remains valid and
+ // is not canceled until the Request's ServeHTTP method
+ // returns.
+ Hijack() (net.Conn, *bufio.ReadWriter, error)
+}
+
+// The CloseNotifier interface is implemented by ResponseWriters which
+// allow detecting when the underlying connection has gone away.
+//
+// This mechanism can be used to cancel long operations on the server
+// if the client has disconnected before the response is ready.
+//
+// Deprecated: the CloseNotifier interface predates Go's context package.
+// New code should use Request.Context instead.
+type CloseNotifier interface {
+ // CloseNotify returns a channel that receives at most a
+ // single value (true) when the client connection has gone
+ // away.
+ //
+ // CloseNotify may wait to notify until Request.Body has been
+ // fully read.
+ //
+ // After the Handler has returned, there is no guarantee
+ // that the channel receives a value.
+ //
+ // If the protocol is HTTP/1.1 and CloseNotify is called while
+ // processing an idempotent request (such a GET) while
+ // HTTP/1.1 pipelining is in use, the arrival of a subsequent
+ // pipelined request may cause a value to be sent on the
+ // returned channel. In practice HTTP/1.1 pipelining is not
+ // enabled in browsers and not seen often in the wild. If this
+ // is a problem, use HTTP/2 or only use CloseNotify on methods
+ // such as POST.
+ CloseNotify() <-chan bool
+}
+
+var (
+ // ServerContextKey is a context key. It can be used in HTTP
+ // handlers with Context.Value to access the server that
+ // started the handler. The associated value will be of
+ // type *Server.
+ ServerContextKey = &contextKey{"http-server"}
+
+ // LocalAddrContextKey is a context key. It can be used in
+ // HTTP handlers with Context.Value to access the local
+ // address the connection arrived on.
+ // The associated value will be of type net.Addr.
+ LocalAddrContextKey = &contextKey{"local-addr"}
+)
+
+// A conn represents the server side of an HTTP connection.
+type conn struct {
+ // server is the server on which the connection arrived.
+ // Immutable; never nil.
+ server *Server
+
+ // cancelCtx cancels the connection-level context.
+ cancelCtx context.CancelFunc
+
+ // rwc is the underlying network connection.
+ // This is never wrapped by other types and is the value given out
+ // to CloseNotifier callers. It is usually of type *net.TCPConn or
+ // *tls.Conn.
+ rwc net.Conn
+
+ // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
+ // inside the Listener's Accept goroutine, as some implementations block.
+ // It is populated immediately inside the (*conn).serve goroutine.
+ // This is the value of a Handler's (*Request).RemoteAddr.
+ remoteAddr string
+
+ // tlsState is the TLS connection state when using TLS.
+ // nil means not TLS.
+ tlsState *tls.ConnectionState
+
+ // werr is set to the first write error to rwc.
+ // It is set via checkConnErrorWriter{w}, where bufw writes.
+ werr error
+
+ // r is bufr's read source. It's a wrapper around rwc that provides
+ // io.LimitedReader-style limiting (while reading request headers)
+ // and functionality to support CloseNotifier. See *connReader docs.
+ r *connReader
+
+ // bufr reads from r.
+ bufr *bufio.Reader
+
+ // bufw writes to checkConnErrorWriter{c}, which populates werr on error.
+ bufw *bufio.Writer
+
+ // lastMethod is the method of the most recent request
+ // on this connection, if any.
+ lastMethod string
+
+ curReq atomic.Value // of *response (which has a Request in it)
+
+ curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState))
+
+ // mu guards hijackedv
+ mu sync.Mutex
+
+ // hijackedv is whether this connection has been hijacked
+ // by a Handler with the Hijacker interface.
+ // It is guarded by mu.
+ hijackedv bool
+}
+
+func (c *conn) hijacked() bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.hijackedv
+}
+
+// c.mu must be held.
+func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
+ if c.hijackedv {
+ return nil, nil, ErrHijacked
+ }
+ c.r.abortPendingRead()
+
+ c.hijackedv = true
+ rwc = c.rwc
+ rwc.SetDeadline(time.Time{})
+
+ buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
+ if c.r.hasByte {
+ if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
+ return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
+ }
+ }
+ c.setState(rwc, StateHijacked, runHooks)
+ return
+}
+
+// This should be >= 512 bytes for DetectContentType,
+// but otherwise it's somewhat arbitrary.
+const bufferBeforeChunkingSize = 2048
+
+// chunkWriter writes to a response's conn buffer, and is the writer
+// wrapped by the response.w buffered writer.
+//
+// chunkWriter also is responsible for finalizing the Header, including
+// conditionally setting the Content-Type and setting a Content-Length
+// in cases where the handler's final output is smaller than the buffer
+// size. It also conditionally adds chunk headers, when in chunking mode.
+//
+// See the comment above (*response).Write for the entire write flow.
+type chunkWriter struct {
+ res *response
+
+ // header is either nil or a deep clone of res.handlerHeader
+ // at the time of res.writeHeader, if res.writeHeader is
+ // called and extra buffering is being done to calculate
+ // Content-Type and/or Content-Length.
+ header Header
+
+ // wroteHeader tells whether the header's been written to "the
+ // wire" (or rather: w.conn.buf). this is unlike
+ // (*response).wroteHeader, which tells only whether it was
+ // logically written.
+ wroteHeader bool
+
+ // set by the writeHeader method:
+ chunking bool // using chunked transfer encoding for reply body
+}
+
+var (
+ crlf = []byte("\r\n")
+ colonSpace = []byte(": ")
+)
+
+func (cw *chunkWriter) Write(p []byte) (n int, err error) {
+ if !cw.wroteHeader {
+ cw.writeHeader(p)
+ }
+ if cw.res.req.Method == "HEAD" {
+ // Eat writes.
+ return len(p), nil
+ }
+ if cw.chunking {
+ _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
+ if err != nil {
+ cw.res.conn.rwc.Close()
+ return
+ }
+ }
+ n, err = cw.res.conn.bufw.Write(p)
+ if cw.chunking && err == nil {
+ _, err = cw.res.conn.bufw.Write(crlf)
+ }
+ if err != nil {
+ cw.res.conn.rwc.Close()
+ }
+ return
+}
+
+func (cw *chunkWriter) flush() {
+ if !cw.wroteHeader {
+ cw.writeHeader(nil)
+ }
+ cw.res.conn.bufw.Flush()
+}
+
+func (cw *chunkWriter) close() {
+ if !cw.wroteHeader {
+ cw.writeHeader(nil)
+ }
+ if cw.chunking {
+ bw := cw.res.conn.bufw // conn's bufio writer
+ // zero chunk to mark EOF
+ bw.WriteString("0\r\n")
+ if trailers := cw.res.finalTrailers(); trailers != nil {
+ trailers.Write(bw) // the writer handles noting errors
+ }
+ // final blank line after the trailers (whether
+ // present or not)
+ bw.WriteString("\r\n")
+ }
+}
+
+// A response represents the server side of an HTTP response.
+type response struct {
+ conn *conn
+ req *Request // request for this response
+ reqBody io.ReadCloser
+ cancelCtx context.CancelFunc // when ServeHTTP exits
+ wroteHeader bool // a non-1xx header has been (logically) written
+ wroteContinue bool // 100 Continue response was written
+ wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
+ wantsClose bool // HTTP request has Connection "close"
+
+ // canWriteContinue is a boolean value accessed as an atomic int32
+ // that says whether or not a 100 Continue header can be written
+ // to the connection.
+ // writeContinueMu must be held while writing the header.
+ // These two fields together synchronize the body reader
+ // (the expectContinueReader, which wants to write 100 Continue)
+ // against the main writer.
+ canWriteContinue atomicBool
+ writeContinueMu sync.Mutex
+
+ w *bufio.Writer // buffers output in chunks to chunkWriter
+ cw chunkWriter
+
+ // handlerHeader is the Header that Handlers get access to,
+ // which may be retained and mutated even after WriteHeader.
+ // handlerHeader is copied into cw.header at WriteHeader
+ // time, and privately mutated thereafter.
+ handlerHeader Header
+ calledHeader bool // handler accessed handlerHeader via Header
+
+ written int64 // number of bytes written in body
+ contentLength int64 // explicitly-declared Content-Length; or -1
+ status int // status code passed to WriteHeader
+
+ // close connection after this reply. set on request and
+ // updated after response from handler if there's a
+ // "Connection: keep-alive" response header and a
+ // Content-Length.
+ closeAfterReply bool
+
+ // requestBodyLimitHit is set by requestTooLarge when
+ // maxBytesReader hits its max size. It is checked in
+ // WriteHeader, to make sure we don't consume the
+ // remaining request body to try to advance to the next HTTP
+ // request. Instead, when this is set, we stop reading
+ // subsequent requests on this connection and stop reading
+ // input from it.
+ requestBodyLimitHit bool
+
+ // trailers are the headers to be sent after the handler
+ // finishes writing the body. This field is initialized from
+ // the Trailer response header when the response header is
+ // written.
+ trailers []string
+
+ handlerDone atomicBool // set true when the handler exits
+
+ // Buffers for Date, Content-Length, and status code
+ dateBuf [len(TimeFormat)]byte
+ clenBuf [10]byte
+ statusBuf [3]byte
+
+ // closeNotifyCh is the channel returned by CloseNotify.
+ // TODO(bradfitz): this is currently (for Go 1.8) always
+ // non-nil. Make this lazily-created again as it used to be?
+ closeNotifyCh chan bool
+ didCloseNotify int32 // atomic (only 0->1 winner should send)
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+//
+// https://pkg.go.dev/net/http#ResponseWriter
+// https://pkg.go.dev/net/http#example-ResponseWriter-Trailers
+const TrailerPrefix = "Trailer:"
+
+// finalTrailers is called after the Handler exits and returns a non-nil
+// value if the Handler set any trailers.
+func (w *response) finalTrailers() Header {
+ var t Header
+ for k, vv := range w.handlerHeader {
+ if strings.HasPrefix(k, TrailerPrefix) {
+ if t == nil {
+ t = make(Header)
+ }
+ t[strings.TrimPrefix(k, TrailerPrefix)] = vv
+ }
+ }
+ for _, k := range w.trailers {
+ if t == nil {
+ t = make(Header)
+ }
+ for _, v := range w.handlerHeader[k] {
+ t.Add(k, v)
+ }
+ }
+ return t
+}
+
+type atomicBool int32
+
+func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
+func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
+func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (w *response) declareTrailer(k string) {
+ k = CanonicalHeaderKey(k)
+ if !httpguts.ValidTrailerHeader(k) {
+ // Forbidden by RFC 7230, section 4.1.2
+ return
+ }
+ w.trailers = append(w.trailers, k)
+}
+
+// requestTooLarge is called by maxBytesReader when too much input has
+// been read from the client.
+func (w *response) requestTooLarge() {
+ w.closeAfterReply = true
+ w.requestBodyLimitHit = true
+ if !w.wroteHeader {
+ w.Header().Set("Connection", "close")
+ }
+}
+
+// needsSniff reports whether a Content-Type still needs to be sniffed.
+func (w *response) needsSniff() bool {
+ _, haveType := w.handlerHeader["Content-Type"]
+ return !w.cw.wroteHeader && !haveType && w.written < sniffLen
+}
+
+// writerOnly hides an io.Writer value's optional ReadFrom method
+// from io.Copy.
+type writerOnly struct {
+ io.Writer
+}
+
+// ReadFrom is here to optimize copying from an *os.File regular file
+// to a *net.TCPConn with sendfile, or from a supported src type such
+// as a *net.TCPConn on Linux with splice.
+func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
+ bufp := copyBufPool.Get().(*[]byte)
+ buf := *bufp
+ defer copyBufPool.Put(bufp)
+
+ // Our underlying w.conn.rwc is usually a *TCPConn (with its
+ // own ReadFrom method). If not, just fall back to the normal
+ // copy method.
+ rf, ok := w.conn.rwc.(io.ReaderFrom)
+ if !ok {
+ return io.CopyBuffer(writerOnly{w}, src, buf)
+ }
+
+ // Copy the first sniffLen bytes before switching to ReadFrom.
+ // This ensures we don't start writing the response before the
+ // source is available (see golang.org/issue/5660) and provides
+ // enough bytes to perform Content-Type sniffing when required.
+ if !w.cw.wroteHeader {
+ n0, err := io.CopyBuffer(writerOnly{w}, io.LimitReader(src, sniffLen), buf)
+ n += n0
+ if err != nil || n0 < sniffLen {
+ return n, err
+ }
+ }
+
+ w.w.Flush() // get rid of any previous writes
+ w.cw.flush() // make sure Header is written; flush data to rwc
+
+ // Now that cw has been flushed, its chunking field is guaranteed initialized.
+ if !w.cw.chunking && w.bodyAllowed() {
+ n0, err := rf.ReadFrom(src)
+ n += n0
+ w.written += n0
+ return n, err
+ }
+
+ n0, err := io.CopyBuffer(writerOnly{w}, src, buf)
+ n += n0
+ return n, err
+}
+
+// debugServerConnections controls whether all server connections are wrapped
+// with a verbose logging wrapper.
+const debugServerConnections = false
+
+// Create new connection from rwc.
+func (srv *Server) newConn(rwc net.Conn) *conn {
+ c := &conn{
+ server: srv,
+ rwc: rwc,
+ }
+ if debugServerConnections {
+ c.rwc = newLoggingConn("server", c.rwc)
+ }
+ return c
+}
+
+type readResult struct {
+ _ incomparable
+ n int
+ err error
+ b byte // byte read, if n == 1
+}
+
+// connReader is the io.Reader wrapper used by *conn. It combines a
+// selectively-activated io.LimitedReader (to bound request header
+// read sizes) with support for selectively keeping an io.Reader.Read
+// call blocked in a background goroutine to wait for activity and
+// trigger a CloseNotifier channel.
+type connReader struct {
+ conn *conn
+
+ mu sync.Mutex // guards following
+ hasByte bool
+ byteBuf [1]byte
+ cond *sync.Cond
+ inRead bool
+ aborted bool // set true before conn.rwc deadline is set to past
+ remain int64 // bytes remaining
+}
+
+func (cr *connReader) lock() {
+ cr.mu.Lock()
+ if cr.cond == nil {
+ cr.cond = sync.NewCond(&cr.mu)
+ }
+}
+
+func (cr *connReader) unlock() { cr.mu.Unlock() }
+
+func (cr *connReader) startBackgroundRead() {
+ cr.lock()
+ defer cr.unlock()
+ if cr.inRead {
+ panic("invalid concurrent Body.Read call")
+ }
+ if cr.hasByte {
+ return
+ }
+ cr.inRead = true
+ cr.conn.rwc.SetReadDeadline(time.Time{})
+ go cr.backgroundRead()
+}
+
+func (cr *connReader) backgroundRead() {
+ n, err := cr.conn.rwc.Read(cr.byteBuf[:])
+ cr.lock()
+ if n == 1 {
+ cr.hasByte = true
+ // We were past the end of the previous request's body already
+ // (since we wouldn't be in a background read otherwise), so
+ // this is a pipelined HTTP request. Prior to Go 1.11 we used to
+ // send on the CloseNotify channel and cancel the context here,
+ // but the behavior was documented as only "may", and we only
+ // did that because that's how CloseNotify accidentally behaved
+ // in very early Go releases prior to context support. Once we
+ // added context support, people used a Handler's
+ // Request.Context() and passed it along. Having that context
+ // cancel on pipelined HTTP requests caused problems.
+ // Fortunately, almost nothing uses HTTP/1.x pipelining.
+ // Unfortunately, apt-get does, or sometimes does.
+ // New Go 1.11 behavior: don't fire CloseNotify or cancel
+ // contexts on pipelined requests. Shouldn't affect people, but
+ // fixes cases like Issue 23921. This does mean that a client
+ // closing their TCP connection after sending a pipelined
+ // request won't cancel the context, but we'll catch that on any
+ // write failure (in checkConnErrorWriter.Write).
+ // If the server never writes, yes, there are still contrived
+ // server & client behaviors where this fails to ever cancel the
+ // context, but that's kinda why HTTP/1.x pipelining died
+ // anyway.
+ }
+ if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
+ // Ignore this error. It's the expected error from
+ // another goroutine calling abortPendingRead.
+ } else if err != nil {
+ cr.handleReadError(err)
+ }
+ cr.aborted = false
+ cr.inRead = false
+ cr.unlock()
+ cr.cond.Broadcast()
+}
+
+func (cr *connReader) abortPendingRead() {
+ cr.lock()
+ defer cr.unlock()
+ if !cr.inRead {
+ return
+ }
+ cr.aborted = true
+ cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
+ for cr.inRead {
+ cr.cond.Wait()
+ }
+ cr.conn.rwc.SetReadDeadline(time.Time{})
+}
+
+func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
+func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
+func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
+
+// handleReadError is called whenever a Read from the client returns a
+// non-nil error.
+//
+// The provided non-nil err is almost always io.EOF or a "use of
+// closed network connection". In any case, the error is not
+// particularly interesting, except perhaps for debugging during
+// development. Any error means the connection is dead and we should
+// down its context.
+//
+// It may be called from multiple goroutines.
+func (cr *connReader) handleReadError(_ error) {
+ cr.conn.cancelCtx()
+ cr.closeNotify()
+}
+
+// may be called from multiple goroutines.
+func (cr *connReader) closeNotify() {
+ res, _ := cr.conn.curReq.Load().(*response)
+ if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
+ res.closeNotifyCh <- true
+ }
+}
+
+func (cr *connReader) Read(p []byte) (n int, err error) {
+ cr.lock()
+ if cr.inRead {
+ cr.unlock()
+ if cr.conn.hijacked() {
+ panic("invalid Body.Read call. After hijacked, the original Request must not be used")
+ }
+ panic("invalid concurrent Body.Read call")
+ }
+ if cr.hitReadLimit() {
+ cr.unlock()
+ return 0, io.EOF
+ }
+ if len(p) == 0 {
+ cr.unlock()
+ return 0, nil
+ }
+ if int64(len(p)) > cr.remain {
+ p = p[:cr.remain]
+ }
+ if cr.hasByte {
+ p[0] = cr.byteBuf[0]
+ cr.hasByte = false
+ cr.unlock()
+ return 1, nil
+ }
+ cr.inRead = true
+ cr.unlock()
+ n, err = cr.conn.rwc.Read(p)
+
+ cr.lock()
+ cr.inRead = false
+ if err != nil {
+ cr.handleReadError(err)
+ }
+ cr.remain -= int64(n)
+ cr.unlock()
+
+ cr.cond.Broadcast()
+ return n, err
+}
+
+var (
+ bufioReaderPool sync.Pool
+ bufioWriter2kPool sync.Pool
+ bufioWriter4kPool sync.Pool
+)
+
+var copyBufPool = sync.Pool{
+ New: func() any {
+ b := make([]byte, 32*1024)
+ return &b
+ },
+}
+
+func bufioWriterPool(size int) *sync.Pool {
+ switch size {
+ case 2 << 10:
+ return &bufioWriter2kPool
+ case 4 << 10:
+ return &bufioWriter4kPool
+ }
+ return nil
+}
+
+func newBufioReader(r io.Reader) *bufio.Reader {
+ if v := bufioReaderPool.Get(); v != nil {
+ br := v.(*bufio.Reader)
+ br.Reset(r)
+ return br
+ }
+ // Note: if this reader size is ever changed, update
+ // TestHandlerBodyClose's assumptions.
+ return bufio.NewReader(r)
+}
+
+func putBufioReader(br *bufio.Reader) {
+ br.Reset(nil)
+ bufioReaderPool.Put(br)
+}
+
+func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
+ pool := bufioWriterPool(size)
+ if pool != nil {
+ if v := pool.Get(); v != nil {
+ bw := v.(*bufio.Writer)
+ bw.Reset(w)
+ return bw
+ }
+ }
+ return bufio.NewWriterSize(w, size)
+}
+
+func putBufioWriter(bw *bufio.Writer) {
+ bw.Reset(nil)
+ if pool := bufioWriterPool(bw.Available()); pool != nil {
+ pool.Put(bw)
+ }
+}
+
+// DefaultMaxHeaderBytes is the maximum permitted size of the headers
+// in an HTTP request.
+// This can be overridden by setting Server.MaxHeaderBytes.
+const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
+
+func (srv *Server) maxHeaderBytes() int {
+ if srv.MaxHeaderBytes > 0 {
+ return srv.MaxHeaderBytes
+ }
+ return DefaultMaxHeaderBytes
+}
+
+func (srv *Server) initialReadLimitSize() int64 {
+ return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
+}
+
+// tlsHandshakeTimeout returns the time limit permitted for the TLS
+// handshake, or zero for unlimited.
+//
+// It returns the minimum of any positive ReadHeaderTimeout,
+// ReadTimeout, or WriteTimeout.
+func (srv *Server) tlsHandshakeTimeout() time.Duration {
+ var ret time.Duration
+ for _, v := range [...]time.Duration{
+ srv.ReadHeaderTimeout,
+ srv.ReadTimeout,
+ srv.WriteTimeout,
+ } {
+ if v <= 0 {
+ continue
+ }
+ if ret == 0 || v < ret {
+ ret = v
+ }
+ }
+ return ret
+}
+
+// wrapper around io.ReadCloser which on first read, sends an
+// HTTP/1.1 100 Continue header
+type expectContinueReader struct {
+ resp *response
+ readCloser io.ReadCloser
+ closed atomicBool
+ sawEOF atomicBool
+}
+
+func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
+ if ecr.closed.isSet() {
+ return 0, ErrBodyReadAfterClose
+ }
+ w := ecr.resp
+ if !w.wroteContinue && w.canWriteContinue.isSet() && !w.conn.hijacked() {
+ w.wroteContinue = true
+ w.writeContinueMu.Lock()
+ if w.canWriteContinue.isSet() {
+ w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
+ w.conn.bufw.Flush()
+ w.canWriteContinue.setFalse()
+ }
+ w.writeContinueMu.Unlock()
+ }
+ n, err = ecr.readCloser.Read(p)
+ if err == io.EOF {
+ ecr.sawEOF.setTrue()
+ }
+ return
+}
+
+func (ecr *expectContinueReader) Close() error {
+ ecr.closed.setTrue()
+ return ecr.readCloser.Close()
+}
+
+// TimeFormat is the time format to use when generating times in HTTP
+// headers. It is like time.RFC1123 but hard-codes GMT as the time
+// zone. The time being formatted must be in UTC for Format to
+// generate the correct format.
+//
+// For parsing this time format, see ParseTime.
+const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
+func appendTime(b []byte, t time.Time) []byte {
+ const days = "SunMonTueWedThuFriSat"
+ const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
+
+ t = t.UTC()
+ yy, mm, dd := t.Date()
+ hh, mn, ss := t.Clock()
+ day := days[3*t.Weekday():]
+ mon := months[3*(mm-1):]
+
+ return append(b,
+ day[0], day[1], day[2], ',', ' ',
+ byte('0'+dd/10), byte('0'+dd%10), ' ',
+ mon[0], mon[1], mon[2], ' ',
+ byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
+ byte('0'+hh/10), byte('0'+hh%10), ':',
+ byte('0'+mn/10), byte('0'+mn%10), ':',
+ byte('0'+ss/10), byte('0'+ss%10), ' ',
+ 'G', 'M', 'T')
+}
+
+var errTooLarge = errors.New("http: request too large")
+
+// Read next request from connection.
+func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
+ if c.hijacked() {
+ return nil, ErrHijacked
+ }
+
+ var (
+ wholeReqDeadline time.Time // or zero if none
+ hdrDeadline time.Time // or zero if none
+ )
+ t0 := time.Now()
+ if d := c.server.readHeaderTimeout(); d > 0 {
+ hdrDeadline = t0.Add(d)
+ }
+ if d := c.server.ReadTimeout; d > 0 {
+ wholeReqDeadline = t0.Add(d)
+ }
+ c.rwc.SetReadDeadline(hdrDeadline)
+ if d := c.server.WriteTimeout; d > 0 {
+ defer func() {
+ c.rwc.SetWriteDeadline(time.Now().Add(d))
+ }()
+ }
+
+ c.r.setReadLimit(c.server.initialReadLimitSize())
+ if c.lastMethod == "POST" {
+ // RFC 7230 section 3 tolerance for old buggy clients.
+ peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
+ c.bufr.Discard(numLeadingCRorLF(peek))
+ }
+ req, err := readRequest(c.bufr)
+ if err != nil {
+ if c.r.hitReadLimit() {
+ return nil, errTooLarge
+ }
+ return nil, err
+ }
+
+ if !http1ServerSupportsRequest(req) {
+ return nil, statusError{StatusHTTPVersionNotSupported, "unsupported protocol version"}
+ }
+
+ c.lastMethod = req.Method
+ c.r.setInfiniteReadLimit()
+
+ hosts, haveHost := req.Header["Host"]
+ isH2Upgrade := req.isH2Upgrade()
+ if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
+ return nil, badRequestError("missing required Host header")
+ }
+ if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
+ return nil, badRequestError("malformed Host header")
+ }
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return nil, badRequestError("invalid header name")
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ return nil, badRequestError("invalid header value")
+ }
+ }
+ }
+ delete(req.Header, "Host")
+
+ ctx, cancelCtx := context.WithCancel(ctx)
+ req.ctx = ctx
+ req.RemoteAddr = c.remoteAddr
+ req.TLS = c.tlsState
+ if body, ok := req.Body.(*body); ok {
+ body.doEarlyClose = true
+ }
+
+ // Adjust the read deadline if necessary.
+ if !hdrDeadline.Equal(wholeReqDeadline) {
+ c.rwc.SetReadDeadline(wholeReqDeadline)
+ }
+
+ w = &response{
+ conn: c,
+ cancelCtx: cancelCtx,
+ req: req,
+ reqBody: req.Body,
+ handlerHeader: make(Header),
+ contentLength: -1,
+ closeNotifyCh: make(chan bool, 1),
+
+ // We populate these ahead of time so we're not
+ // reading from req.Header after their Handler starts
+ // and maybe mutates it (Issue 14940)
+ wants10KeepAlive: req.wantsHttp10KeepAlive(),
+ wantsClose: req.wantsClose(),
+ }
+ if isH2Upgrade {
+ w.closeAfterReply = true
+ }
+ w.cw.res = w
+ w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
+ return w, nil
+}
+
+// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
+// supports the given request.
+func http1ServerSupportsRequest(req *Request) bool {
+ if req.ProtoMajor == 1 {
+ return true
+ }
+ // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
+ // wire up their own HTTP/2 upgrades.
+ if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
+ req.Method == "PRI" && req.RequestURI == "*" {
+ return true
+ }
+ // Reject HTTP/0.x, and all other HTTP/2+ requests (which
+ // aren't encoded in ASCII anyway).
+ return false
+}
+
+func (w *response) Header() Header {
+ if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
+ // Accessing the header between logically writing it
+ // and physically writing it means we need to allocate
+ // a clone to snapshot the logically written state.
+ w.cw.header = w.handlerHeader.Clone()
+ }
+ w.calledHeader = true
+ return w.handlerHeader
+}
+
+// maxPostHandlerReadBytes is the max number of Request.Body bytes not
+// consumed by a handler that the server will read from the client
+// in order to keep a connection alive. If there are more bytes than
+// this then the server to be paranoid instead sends a "Connection:
+// close" response.
+//
+// This number is approximately what a typical machine's TCP buffer
+// size is anyway. (if we have the bytes on the machine, we might as
+// well read them)
+const maxPostHandlerReadBytes = 256 << 10
+
+func checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at https://httpwg.org/specs/rfc7231.html#status.codes).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
+// relevantCaller searches the call stack for the first function outside of net/http.
+// The purpose of this function is to provide more helpful error messages.
+func relevantCaller() runtime.Frame {
+ pc := make([]uintptr, 16)
+ n := runtime.Callers(1, pc)
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ for {
+ frame, more := frames.Next()
+ if !strings.HasPrefix(frame.Function, "net/http.") {
+ return frame
+ }
+ if !more {
+ break
+ }
+ }
+ return frame
+}
+
+func (w *response) WriteHeader(code int) {
+ if w.conn.hijacked() {
+ caller := relevantCaller()
+ w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ return
+ }
+ if w.wroteHeader {
+ caller := relevantCaller()
+ w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ return
+ }
+ checkWriteHeaderCode(code)
+
+ // Handle informational headers
+ if code >= 100 && code <= 199 {
+ // Prevent a potential race with an automatically-sent 100 Continue triggered by Request.Body.Read()
+ if code == 100 && w.canWriteContinue.isSet() {
+ w.writeContinueMu.Lock()
+ w.canWriteContinue.setFalse()
+ w.writeContinueMu.Unlock()
+ }
+
+ writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
+
+ // Per RFC 8297 we must not clear the current header map
+ w.handlerHeader.WriteSubset(w.conn.bufw, excludedHeadersNoBody)
+ w.conn.bufw.Write(crlf)
+ w.conn.bufw.Flush()
+
+ return
+ }
+
+ w.wroteHeader = true
+ w.status = code
+
+ if w.calledHeader && w.cw.header == nil {
+ w.cw.header = w.handlerHeader.Clone()
+ }
+
+ if cl := w.handlerHeader.get("Content-Length"); cl != "" {
+ v, err := strconv.ParseInt(cl, 10, 64)
+ if err == nil && v >= 0 {
+ w.contentLength = v
+ } else {
+ w.conn.server.logf("http: invalid Content-Length of %q", cl)
+ w.handlerHeader.Del("Content-Length")
+ }
+ }
+}
+
+// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
+// This type is used to avoid extra allocations from cloning and/or populating
+// the response Header map and all its 1-element slices.
+type extraHeader struct {
+ contentType string
+ connection string
+ transferEncoding string
+ date []byte // written if not nil
+ contentLength []byte // written if not nil
+}
+
+// Sorted the same as extraHeader.Write's loop.
+var extraHeaderKeys = [][]byte{
+ []byte("Content-Type"),
+ []byte("Connection"),
+ []byte("Transfer-Encoding"),
+}
+
+var (
+ headerContentLength = []byte("Content-Length: ")
+ headerDate = []byte("Date: ")
+)
+
+// Write writes the headers described in h to w.
+//
+// This method has a value receiver, despite the somewhat large size
+// of h, because it prevents an allocation. The escape analysis isn't
+// smart enough to realize this function doesn't mutate h.
+func (h extraHeader) Write(w *bufio.Writer) {
+ if h.date != nil {
+ w.Write(headerDate)
+ w.Write(h.date)
+ w.Write(crlf)
+ }
+ if h.contentLength != nil {
+ w.Write(headerContentLength)
+ w.Write(h.contentLength)
+ w.Write(crlf)
+ }
+ for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
+ if v != "" {
+ w.Write(extraHeaderKeys[i])
+ w.Write(colonSpace)
+ w.WriteString(v)
+ w.Write(crlf)
+ }
+ }
+}
+
+// writeHeader finalizes the header sent to the client and writes it
+// to cw.res.conn.bufw.
+//
+// p is not written by writeHeader, but is the first chunk of the body
+// that will be written. It is sniffed for a Content-Type if none is
+// set explicitly. It's also used to set the Content-Length, if the
+// total body size was small and the handler has already finished
+// running.
+func (cw *chunkWriter) writeHeader(p []byte) {
+ if cw.wroteHeader {
+ return
+ }
+ cw.wroteHeader = true
+
+ w := cw.res
+ keepAlivesEnabled := w.conn.server.doKeepAlives()
+ isHEAD := w.req.Method == "HEAD"
+
+ // header is written out to w.conn.buf below. Depending on the
+ // state of the handler, we either own the map or not. If we
+ // don't own it, the exclude map is created lazily for
+ // WriteSubset to remove headers. The setHeader struct holds
+ // headers we need to add.
+ header := cw.header
+ owned := header != nil
+ if !owned {
+ header = w.handlerHeader
+ }
+ var excludeHeader map[string]bool
+ delHeader := func(key string) {
+ if owned {
+ header.Del(key)
+ return
+ }
+ if _, ok := header[key]; !ok {
+ return
+ }
+ if excludeHeader == nil {
+ excludeHeader = make(map[string]bool)
+ }
+ excludeHeader[key] = true
+ }
+ var setHeader extraHeader
+
+ // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
+ trailers := false
+ for k := range cw.header {
+ if strings.HasPrefix(k, TrailerPrefix) {
+ if excludeHeader == nil {
+ excludeHeader = make(map[string]bool)
+ }
+ excludeHeader[k] = true
+ trailers = true
+ }
+ }
+ for _, v := range cw.header["Trailer"] {
+ trailers = true
+ foreachHeaderElement(v, cw.res.declareTrailer)
+ }
+
+ te := header.get("Transfer-Encoding")
+ hasTE := te != ""
+
+ // If the handler is done but never sent a Content-Length
+ // response header and this is our first (and last) write, set
+ // it, even to zero. This helps HTTP/1.0 clients keep their
+ // "keep-alive" connections alive.
+ // Exceptions: 304/204/1xx responses never get Content-Length, and if
+ // it was a HEAD request, we don't know the difference between
+ // 0 actual bytes and 0 bytes because the handler noticed it
+ // was a HEAD request and chose not to write anything. So for
+ // HEAD, the handler should either write the Content-Length or
+ // write non-zero bytes. If it's actually 0 bytes and the
+ // handler never looked at the Request.Method, we just don't
+ // send a Content-Length header.
+ // Further, we don't send an automatic Content-Length if they
+ // set a Transfer-Encoding, because they're generally incompatible.
+ if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
+ w.contentLength = int64(len(p))
+ setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
+ }
+
+ // If this was an HTTP/1.0 request with keep-alive and we sent a
+ // Content-Length back, we can make this a keep-alive response ...
+ if w.wants10KeepAlive && keepAlivesEnabled {
+ sentLength := header.get("Content-Length") != ""
+ if sentLength && header.get("Connection") == "keep-alive" {
+ w.closeAfterReply = false
+ }
+ }
+
+ // Check for an explicit (and valid) Content-Length header.
+ hasCL := w.contentLength != -1
+
+ if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
+ _, connectionHeaderSet := header["Connection"]
+ if !connectionHeaderSet {
+ setHeader.connection = "keep-alive"
+ }
+ } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
+ w.closeAfterReply = true
+ }
+
+ if header.get("Connection") == "close" || !keepAlivesEnabled {
+ w.closeAfterReply = true
+ }
+
+ // If the client wanted a 100-continue but we never sent it to
+ // them (or, more strictly: we never finished reading their
+ // request body), don't reuse this connection because it's now
+ // in an unknown state: we might be sending this response at
+ // the same time the client is now sending its request body
+ // after a timeout. (Some HTTP clients send Expect:
+ // 100-continue but knowing that some servers don't support
+ // it, the clients set a timer and send the body later anyway)
+ // If we haven't seen EOF, we can't skip over the unread body
+ // because we don't know if the next bytes on the wire will be
+ // the body-following-the-timer or the subsequent request.
+ // See Issue 11549.
+ if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.isSet() {
+ w.closeAfterReply = true
+ }
+
+ // Per RFC 2616, we should consume the request body before
+ // replying, if the handler hasn't already done so. But we
+ // don't want to do an unbounded amount of reading here for
+ // DoS reasons, so we only try up to a threshold.
+ // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
+ // about HTTP/1.x Handlers concurrently reading and writing, like
+ // HTTP/2 handlers can do. Maybe this code should be relaxed?
+ if w.req.ContentLength != 0 && !w.closeAfterReply {
+ var discard, tooBig bool
+
+ switch bdy := w.req.Body.(type) {
+ case *expectContinueReader:
+ if bdy.resp.wroteContinue {
+ discard = true
+ }
+ case *body:
+ bdy.mu.Lock()
+ switch {
+ case bdy.closed:
+ if !bdy.sawEOF {
+ // Body was closed in handler with non-EOF error.
+ w.closeAfterReply = true
+ }
+ case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
+ tooBig = true
+ default:
+ discard = true
+ }
+ bdy.mu.Unlock()
+ default:
+ discard = true
+ }
+
+ if discard {
+ _, err := io.CopyN(io.Discard, w.reqBody, maxPostHandlerReadBytes+1)
+ switch err {
+ case nil:
+ // There must be even more data left over.
+ tooBig = true
+ case ErrBodyReadAfterClose:
+ // Body was already consumed and closed.
+ case io.EOF:
+ // The remaining body was just consumed, close it.
+ err = w.reqBody.Close()
+ if err != nil {
+ w.closeAfterReply = true
+ }
+ default:
+ // Some other kind of error occurred, like a read timeout, or
+ // corrupt chunked encoding. In any case, whatever remains
+ // on the wire must not be parsed as another HTTP request.
+ w.closeAfterReply = true
+ }
+ }
+
+ if tooBig {
+ w.requestTooLarge()
+ delHeader("Connection")
+ setHeader.connection = "close"
+ }
+ }
+
+ code := w.status
+ if bodyAllowedForStatus(code) {
+ // If no content type, apply sniffing algorithm to body.
+ _, haveType := header["Content-Type"]
+
+ // If the Content-Encoding was set and is non-blank,
+ // we shouldn't sniff the body. See Issue 31753.
+ ce := header.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !haveType && !hasTE && len(p) > 0 {
+ setHeader.contentType = DetectContentType(p)
+ }
+ } else {
+ for _, k := range suppressedHeaders(code) {
+ delHeader(k)
+ }
+ }
+
+ if !header.has("Date") {
+ setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
+ }
+
+ if hasCL && hasTE && te != "identity" {
+ // TODO: return an error if WriteHeader gets a return parameter
+ // For now just ignore the Content-Length.
+ w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
+ te, w.contentLength)
+ delHeader("Content-Length")
+ hasCL = false
+ }
+
+ if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) || code == StatusNoContent {
+ // Response has no body.
+ delHeader("Transfer-Encoding")
+ } else if hasCL {
+ // Content-Length has been provided, so no chunking is to be done.
+ delHeader("Transfer-Encoding")
+ } else if w.req.ProtoAtLeast(1, 1) {
+ // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
+ // content-length has been provided. The connection must be closed after the
+ // reply is written, and no chunking is to be done. This is the setup
+ // recommended in the Server-Sent Events candidate recommendation 11,
+ // section 8.
+ if hasTE && te == "identity" {
+ cw.chunking = false
+ w.closeAfterReply = true
+ delHeader("Transfer-Encoding")
+ } else {
+ // HTTP/1.1 or greater: use chunked transfer encoding
+ // to avoid closing the connection at EOF.
+ cw.chunking = true
+ setHeader.transferEncoding = "chunked"
+ if hasTE && te == "chunked" {
+ // We will send the chunked Transfer-Encoding header later.
+ delHeader("Transfer-Encoding")
+ }
+ }
+ } else {
+ // HTTP version < 1.1: cannot do chunked transfer
+ // encoding and we don't know the Content-Length so
+ // signal EOF by closing connection.
+ w.closeAfterReply = true
+ delHeader("Transfer-Encoding") // in case already set
+ }
+
+ // Cannot use Content-Length with non-identity Transfer-Encoding.
+ if cw.chunking {
+ delHeader("Content-Length")
+ }
+ if !w.req.ProtoAtLeast(1, 0) {
+ return
+ }
+
+ // Only override the Connection header if it is not a successful
+ // protocol switch response and if KeepAlives are not enabled.
+ // See https://golang.org/issue/36381.
+ delConnectionHeader := w.closeAfterReply &&
+ (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) &&
+ !isProtocolSwitchResponse(w.status, header)
+ if delConnectionHeader {
+ delHeader("Connection")
+ if w.req.ProtoAtLeast(1, 1) {
+ setHeader.connection = "close"
+ }
+ }
+
+ writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
+ cw.header.WriteSubset(w.conn.bufw, excludeHeader)
+ setHeader.Write(w.conn.bufw)
+ w.conn.bufw.Write(crlf)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 7230 section 7 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
+// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
+// code is the response status code.
+// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
+func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
+ if is11 {
+ bw.WriteString("HTTP/1.1 ")
+ } else {
+ bw.WriteString("HTTP/1.0 ")
+ }
+ if text := StatusText(code); text != "" {
+ bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
+ bw.WriteByte(' ')
+ bw.WriteString(text)
+ bw.WriteString("\r\n")
+ } else {
+ // don't worry about performance
+ fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
+ }
+}
+
+// bodyAllowed reports whether a Write is allowed for this response type.
+// It's illegal to call this before the header has been flushed.
+func (w *response) bodyAllowed() bool {
+ if !w.wroteHeader {
+ panic("")
+ }
+ return bodyAllowedForStatus(w.status)
+}
+
+// The Life Of A Write is like this:
+//
+// Handler starts. No header has been sent. The handler can either
+// write a header, or just start writing. Writing before sending a header
+// sends an implicitly empty 200 OK header.
+//
+// If the handler didn't declare a Content-Length up front, we either
+// go into chunking mode or, if the handler finishes running before
+// the chunking buffer size, we compute a Content-Length and send that
+// in the header instead.
+//
+// Likewise, if the handler didn't set a Content-Type, we sniff that
+// from the initial chunk of output.
+//
+// The Writers are wired together like:
+//
+// 1. *response (the ResponseWriter) ->
+// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes ->
+// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
+// and which writes the chunk headers, if needed ->
+// 4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to ->
+// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
+// and populates c.werr with it if so, but otherwise writes to ->
+// 6. the rwc, the net.Conn.
+//
+// TODO(bradfitz): short-circuit some of the buffering when the
+// initial header contains both a Content-Type and Content-Length.
+// Also short-circuit in (1) when the header's been sent and not in
+// chunking mode, writing directly to (4) instead, if (2) has no
+// buffered data. More generally, we could short-circuit from (1) to
+// (3) even in chunking mode if the write size from (1) is over some
+// threshold and nothing is in (2). The answer might be mostly making
+// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
+// with this instead.
+func (w *response) Write(data []byte) (n int, err error) {
+ return w.write(len(data), data, "")
+}
+
+func (w *response) WriteString(data string) (n int, err error) {
+ return w.write(len(data), nil, data)
+}
+
+// either dataB or dataS is non-zero.
+func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ if w.conn.hijacked() {
+ if lenData > 0 {
+ caller := relevantCaller()
+ w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ }
+ return 0, ErrHijacked
+ }
+
+ if w.canWriteContinue.isSet() {
+ // Body reader wants to write 100 Continue but hasn't yet.
+ // Tell it not to. The store must be done while holding the lock
+ // because the lock makes sure that there is not an active write
+ // this very moment.
+ w.writeContinueMu.Lock()
+ w.canWriteContinue.setFalse()
+ w.writeContinueMu.Unlock()
+ }
+
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ if lenData == 0 {
+ return 0, nil
+ }
+ if !w.bodyAllowed() {
+ return 0, ErrBodyNotAllowed
+ }
+
+ w.written += int64(lenData) // ignoring errors, for errorKludge
+ if w.contentLength != -1 && w.written > w.contentLength {
+ return 0, ErrContentLength
+ }
+ if dataB != nil {
+ return w.w.Write(dataB)
+ } else {
+ return w.w.WriteString(dataS)
+ }
+}
+
+func (w *response) finishRequest() {
+ w.handlerDone.setTrue()
+
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+
+ w.w.Flush()
+ putBufioWriter(w.w)
+ w.cw.close()
+ w.conn.bufw.Flush()
+
+ w.conn.r.abortPendingRead()
+
+ // Close the body (regardless of w.closeAfterReply) so we can
+ // re-use its bufio.Reader later safely.
+ w.reqBody.Close()
+
+ if w.req.MultipartForm != nil {
+ w.req.MultipartForm.RemoveAll()
+ }
+}
+
+// shouldReuseConnection reports whether the underlying TCP connection can be reused.
+// It must only be called after the handler is done executing.
+func (w *response) shouldReuseConnection() bool {
+ if w.closeAfterReply {
+ // The request or something set while executing the
+ // handler indicated we shouldn't reuse this
+ // connection.
+ return false
+ }
+
+ if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
+ // Did not write enough. Avoid getting out of sync.
+ return false
+ }
+
+ // There was some error writing to the underlying connection
+ // during the request, so don't re-use this conn.
+ if w.conn.werr != nil {
+ return false
+ }
+
+ if w.closedRequestBodyEarly() {
+ return false
+ }
+
+ return true
+}
+
+func (w *response) closedRequestBodyEarly() bool {
+ body, ok := w.req.Body.(*body)
+ return ok && body.didEarlyClose()
+}
+
+func (w *response) Flush() {
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ w.w.Flush()
+ w.cw.flush()
+}
+
+func (c *conn) finalFlush() {
+ if c.bufr != nil {
+ // Steal the bufio.Reader (~4KB worth of memory) and its associated
+ // reader for a future connection.
+ putBufioReader(c.bufr)
+ c.bufr = nil
+ }
+
+ if c.bufw != nil {
+ c.bufw.Flush()
+ // Steal the bufio.Writer (~4KB worth of memory) and its associated
+ // writer for a future connection.
+ putBufioWriter(c.bufw)
+ c.bufw = nil
+ }
+}
+
+// Close the connection.
+func (c *conn) close() {
+ c.finalFlush()
+ c.rwc.Close()
+}
+
+// rstAvoidanceDelay is the amount of time we sleep after closing the
+// write side of a TCP connection before closing the entire socket.
+// By sleeping, we increase the chances that the client sees our FIN
+// and processes its final data before they process the subsequent RST
+// from closing a connection with known unread data.
+// This RST seems to occur mostly on BSD systems. (And Windows?)
+// This timeout is somewhat arbitrary (~latency around the planet).
+const rstAvoidanceDelay = 500 * time.Millisecond
+
+type closeWriter interface {
+ CloseWrite() error
+}
+
+var _ closeWriter = (*net.TCPConn)(nil)
+
+// closeWrite flushes any outstanding data and sends a FIN packet (if
+// client is connected via TCP), signaling that we're done. We then
+// pause for a bit, hoping the client processes it before any
+// subsequent RST.
+//
+// See https://golang.org/issue/3595
+func (c *conn) closeWriteAndWait() {
+ c.finalFlush()
+ if tcp, ok := c.rwc.(closeWriter); ok {
+ tcp.CloseWrite()
+ }
+ time.Sleep(rstAvoidanceDelay)
+}
+
+// validNextProto reports whether the proto is a valid ALPN protocol name.
+// Everything is valid except the empty string and built-in protocol types,
+// so that those can't be overridden with alternate implementations.
+func validNextProto(proto string) bool {
+ switch proto {
+ case "", "http/1.1", "http/1.0":
+ return false
+ }
+ return true
+}
+
+const (
+ runHooks = true
+ skipHooks = false
+)
+
+func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) {
+ srv := c.server
+ switch state {
+ case StateNew:
+ srv.trackConn(c, true)
+ case StateHijacked, StateClosed:
+ srv.trackConn(c, false)
+ }
+ if state > 0xff || state < 0 {
+ panic("internal error")
+ }
+ packedState := uint64(time.Now().Unix()<<8) | uint64(state)
+ atomic.StoreUint64(&c.curState.atomic, packedState)
+ if !runHook {
+ return
+ }
+ if hook := srv.ConnState; hook != nil {
+ hook(nc, state)
+ }
+}
+
+func (c *conn) getState() (state ConnState, unixSec int64) {
+ packedState := atomic.LoadUint64(&c.curState.atomic)
+ return ConnState(packedState & 0xff), int64(packedState >> 8)
+}
+
+// badRequestError is a literal string (used by in the server in HTML,
+// unescaped) to tell the user why their request was bad. It should
+// be plain text without user info or other embedded errors.
+func badRequestError(e string) error { return statusError{StatusBadRequest, e} }
+
+// statusError is an error used to respond to a request with an HTTP status.
+// The text should be plain text without user info or other embedded errors.
+type statusError struct {
+ code int
+ text string
+}
+
+func (e statusError) Error() string { return StatusText(e.code) + ": " + e.text }
+
+// ErrAbortHandler is a sentinel panic value to abort a handler.
+// While any panic from ServeHTTP aborts the response to the client,
+// panicking with ErrAbortHandler also suppresses logging of a stack
+// trace to the server's error log.
+var ErrAbortHandler = errors.New("net/http: abort Handler")
+
+// isCommonNetReadError reports whether err is a common error
+// encountered during reading a request off the network when the
+// client has gone away or had its read fail somehow. This is used to
+// determine which logs are interesting enough to log about.
+func isCommonNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
+ return true
+ }
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ return true
+ }
+ return false
+}
+
+// Serve a new connection.
+func (c *conn) serve(ctx context.Context) {
+ c.remoteAddr = c.rwc.RemoteAddr().String()
+ ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
+ var inFlightResponse *response
+ defer func() {
+ if err := recover(); err != nil && err != ErrAbortHandler {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
+ }
+ if inFlightResponse != nil {
+ inFlightResponse.cancelCtx()
+ }
+ if !c.hijacked() {
+ if inFlightResponse != nil {
+ inFlightResponse.conn.r.abortPendingRead()
+ inFlightResponse.reqBody.Close()
+ }
+ c.close()
+ c.setState(c.rwc, StateClosed, runHooks)
+ }
+ }()
+
+ if tlsConn, ok := c.rwc.(*tls.Conn); ok {
+ tlsTO := c.server.tlsHandshakeTimeout()
+ if tlsTO > 0 {
+ dl := time.Now().Add(tlsTO)
+ c.rwc.SetReadDeadline(dl)
+ c.rwc.SetWriteDeadline(dl)
+ }
+ if err := tlsConn.HandshakeContext(ctx); err != nil {
+ // If the handshake failed due to the client not speaking
+ // TLS, assume they're speaking plaintext HTTP and write a
+ // 400 response on the TLS conn's underlying net.Conn.
+ if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
+ io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
+ re.Conn.Close()
+ return
+ }
+ c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
+ return
+ }
+ // Restore Conn-level deadlines.
+ if tlsTO > 0 {
+ c.rwc.SetReadDeadline(time.Time{})
+ c.rwc.SetWriteDeadline(time.Time{})
+ }
+ c.tlsState = new(tls.ConnectionState)
+ *c.tlsState = tlsConn.ConnectionState()
+ if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
+ if fn := c.server.TLSNextProto[proto]; fn != nil {
+ h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
+ // Mark freshly created HTTP/2 as active and prevent any server state hooks
+ // from being run on these connections. This prevents closeIdleConns from
+ // closing such connections. See issue https://golang.org/issue/39776.
+ c.setState(c.rwc, StateActive, skipHooks)
+ fn(c.server, tlsConn, h)
+ }
+ return
+ }
+ }
+
+ // HTTP/1.x from here on.
+
+ ctx, cancelCtx := context.WithCancel(ctx)
+ c.cancelCtx = cancelCtx
+ defer cancelCtx()
+
+ c.r = &connReader{conn: c}
+ c.bufr = newBufioReader(c.r)
+ c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
+
+ for {
+ w, err := c.readRequest(ctx)
+ if c.r.remain != c.server.initialReadLimitSize() {
+ // If we read any bytes off the wire, we're active.
+ c.setState(c.rwc, StateActive, runHooks)
+ }
+ if err != nil {
+ const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
+
+ switch {
+ case err == errTooLarge:
+ // Their HTTP client may or may not be
+ // able to read this if we're
+ // responding to them and hanging up
+ // while they're still writing their
+ // request. Undefined behavior.
+ const publicErr = "431 Request Header Fields Too Large"
+ fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
+ c.closeWriteAndWait()
+ return
+
+ case isUnsupportedTEError(err):
+ // Respond as per RFC 7230 Section 3.3.1 which says,
+ // A server that receives a request message with a
+ // transfer coding it does not understand SHOULD
+ // respond with 501 (Unimplemented).
+ code := StatusNotImplemented
+
+ // We purposefully aren't echoing back the transfer-encoding's value,
+ // so as to mitigate the risk of cross side scripting by an attacker.
+ fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
+ return
+
+ case isCommonNetReadError(err):
+ return // don't reply
+
+ default:
+ if v, ok := err.(statusError); ok {
+ fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
+ return
+ }
+ publicErr := "400 Bad Request"
+ fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
+ return
+ }
+ }
+
+ // Expect 100 Continue support
+ req := w.req
+ if req.expectsContinue() {
+ if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
+ // Wrap the Body reader with one that replies on the connection
+ req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
+ w.canWriteContinue.setTrue()
+ }
+ } else if req.Header.get("Expect") != "" {
+ w.sendExpectationFailed()
+ return
+ }
+
+ c.curReq.Store(w)
+
+ if requestBodyRemains(req.Body) {
+ registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
+ } else {
+ w.conn.r.startBackgroundRead()
+ }
+
+ // HTTP cannot have multiple simultaneous active requests.[*]
+ // Until the server replies to this request, it can't read another,
+ // so we might as well run the handler in this goroutine.
+ // [*] Not strictly true: HTTP pipelining. We could let them all process
+ // in parallel even if their responses need to be serialized.
+ // But we're not going to implement HTTP pipelining because it
+ // was never deployed in the wild and the answer is HTTP/2.
+ inFlightResponse = w
+ serverHandler{c.server}.ServeHTTP(w, w.req)
+ inFlightResponse = nil
+ w.cancelCtx()
+ if c.hijacked() {
+ return
+ }
+ w.finishRequest()
+ if !w.shouldReuseConnection() {
+ if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
+ c.closeWriteAndWait()
+ }
+ return
+ }
+ c.setState(c.rwc, StateIdle, runHooks)
+ c.curReq.Store((*response)(nil))
+
+ if !w.conn.server.doKeepAlives() {
+ // We're in shutdown mode. We might've replied
+ // to the user without "Connection: close" and
+ // they might think they can send another
+ // request, but such is life with HTTP/1.1.
+ return
+ }
+
+ if d := c.server.idleTimeout(); d != 0 {
+ c.rwc.SetReadDeadline(time.Now().Add(d))
+ if _, err := c.bufr.Peek(4); err != nil {
+ return
+ }
+ }
+ c.rwc.SetReadDeadline(time.Time{})
+ }
+}
+
+func (w *response) sendExpectationFailed() {
+ // TODO(bradfitz): let ServeHTTP handlers handle
+ // requests with non-standard expectation[s]? Seems
+ // theoretical at best, and doesn't fit into the
+ // current ServeHTTP model anyway. We'd need to
+ // make the ResponseWriter an optional
+ // "ExpectReplier" interface or something.
+ //
+ // For now we'll just obey RFC 7231 5.1.1 which says
+ // "A server that receives an Expect field-value other
+ // than 100-continue MAY respond with a 417 (Expectation
+ // Failed) status code to indicate that the unexpected
+ // expectation cannot be met."
+ w.Header().Set("Connection", "close")
+ w.WriteHeader(StatusExpectationFailed)
+ w.finishRequest()
+}
+
+// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
+// and a Hijacker.
+func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
+ if w.handlerDone.isSet() {
+ panic("net/http: Hijack called after ServeHTTP finished")
+ }
+ if w.wroteHeader {
+ w.cw.flush()
+ }
+
+ c := w.conn
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Release the bufioWriter that writes to the chunk writer, it is not
+ // used after a connection has been hijacked.
+ rwc, buf, err = c.hijackLocked()
+ if err == nil {
+ putBufioWriter(w.w)
+ w.w = nil
+ }
+ return rwc, buf, err
+}
+
+func (w *response) CloseNotify() <-chan bool {
+ if w.handlerDone.isSet() {
+ panic("net/http: CloseNotify called after ServeHTTP finished")
+ }
+ return w.closeNotifyCh
+}
+
+func registerOnHitEOF(rc io.ReadCloser, fn func()) {
+ switch v := rc.(type) {
+ case *expectContinueReader:
+ registerOnHitEOF(v.readCloser, fn)
+ case *body:
+ v.registerOnHitEOF(fn)
+ default:
+ panic("unexpected type " + fmt.Sprintf("%T", rc))
+ }
+}
+
+// requestBodyRemains reports whether future calls to Read
+// on rc might yield more data.
+func requestBodyRemains(rc io.ReadCloser) bool {
+ if rc == NoBody {
+ return false
+ }
+ switch v := rc.(type) {
+ case *expectContinueReader:
+ return requestBodyRemains(v.readCloser)
+ case *body:
+ return v.bodyRemains()
+ default:
+ panic("unexpected type " + fmt.Sprintf("%T", rc))
+ }
+}
+
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as HTTP handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// Handler that calls f.
+type HandlerFunc func(ResponseWriter, *Request)
+
+// ServeHTTP calls f(w, r).
+func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
+ f(w, r)
+}
+
+// Helper handlers
+
+// Error replies to the request with the specified error message and HTTP code.
+// It does not otherwise end the request; the caller should ensure no further
+// writes are done to w.
+// The error message should be plain text.
+func Error(w ResponseWriter, error string, code int) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ w.WriteHeader(code)
+ fmt.Fprintln(w, error)
+}
+
+// NotFound replies to the request with an HTTP 404 not found error.
+func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
+
+// NotFoundHandler returns a simple request handler
+// that replies to each request with a “404 page not found” reply.
+func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
+
+// StripPrefix returns a handler that serves HTTP requests by removing the
+// given prefix from the request URL's Path (and RawPath if set) and invoking
+// the handler h. StripPrefix handles a request for a path that doesn't begin
+// with prefix by replying with an HTTP 404 not found error. The prefix must
+// match exactly: if the prefix in the request contains escaped characters
+// the reply is also an HTTP 404 not found error.
+func StripPrefix(prefix string, h Handler) Handler {
+ if prefix == "" {
+ return h
+ }
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ p := strings.TrimPrefix(r.URL.Path, prefix)
+ rp := strings.TrimPrefix(r.URL.RawPath, prefix)
+ if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) {
+ r2 := new(Request)
+ *r2 = *r
+ r2.URL = new(url.URL)
+ *r2.URL = *r.URL
+ r2.URL.Path = p
+ r2.URL.RawPath = rp
+ h.ServeHTTP(w, r2)
+ } else {
+ NotFound(w, r)
+ }
+ })
+}
+
+// Redirect replies to the request with a redirect to url,
+// which may be a path relative to the request path.
+//
+// The provided code should be in the 3xx range and is usually
+// StatusMovedPermanently, StatusFound or StatusSeeOther.
+//
+// If the Content-Type header has not been set, Redirect sets it
+// to "text/html; charset=utf-8" and writes a small HTML body.
+// Setting the Content-Type header to any value, including nil,
+// disables that behavior.
+func Redirect(w ResponseWriter, r *Request, url string, code int) {
+ if u, err := urlpkg.Parse(url); err == nil {
+ // If url was relative, make its path absolute by
+ // combining with request path.
+ // The client would probably do this for us,
+ // but doing it ourselves is more reliable.
+ // See RFC 7231, section 7.1.2
+ if u.Scheme == "" && u.Host == "" {
+ oldpath := r.URL.Path
+ if oldpath == "" { // should not happen, but avoid a crash if it does
+ oldpath = "/"
+ }
+
+ // no leading http://server
+ if url == "" || url[0] != '/' {
+ // make relative path absolute
+ olddir, _ := path.Split(oldpath)
+ url = olddir + url
+ }
+
+ var query string
+ if i := strings.Index(url, "?"); i != -1 {
+ url, query = url[:i], url[i:]
+ }
+
+ // clean up but preserve trailing slash
+ trailing := strings.HasSuffix(url, "/")
+ url = path.Clean(url)
+ if trailing && !strings.HasSuffix(url, "/") {
+ url += "/"
+ }
+ url += query
+ }
+ }
+
+ h := w.Header()
+
+ // RFC 7231 notes that a short HTML body is usually included in
+ // the response because older user agents may not understand 301/307.
+ // Do it only if the request didn't already have a Content-Type header.
+ _, hadCT := h["Content-Type"]
+
+ h.Set("Location", hexEscapeNonASCII(url))
+ if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
+ h.Set("Content-Type", "text/html; charset=utf-8")
+ }
+ w.WriteHeader(code)
+
+ // Shouldn't send the body for POST or HEAD; that leaves GET.
+ if !hadCT && r.Method == "GET" {
+ body := "<a href=\"" + htmlEscape(url) + "\">" + StatusText(code) + "</a>.\n"
+ fmt.Fprintln(w, body)
+ }
+}
+
+var htmlReplacer = strings.NewReplacer(
+ "&", "&amp;",
+ "<", "&lt;",
+ ">", "&gt;",
+ // "&#34;" is shorter than "&quot;".
+ `"`, "&#34;",
+ // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
+ "'", "&#39;",
+)
+
+func htmlEscape(s string) string {
+ return htmlReplacer.Replace(s)
+}
+
+// Redirect to a fixed URL
+type redirectHandler struct {
+ url string
+ code int
+}
+
+func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ Redirect(w, r, rh.url, rh.code)
+}
+
+// RedirectHandler returns a request handler that redirects
+// each request it receives to the given url using the given
+// status code.
+//
+// The provided code should be in the 3xx range and is usually
+// StatusMovedPermanently, StatusFound or StatusSeeOther.
+func RedirectHandler(url string, code int) Handler {
+ return &redirectHandler{url, code}
+}
+
+// ServeMux is an HTTP request multiplexer.
+// It matches the URL of each incoming request against a list of registered
+// patterns and calls the handler for the pattern that
+// most closely matches the URL.
+//
+// Patterns name fixed, rooted paths, like "/favicon.ico",
+// or rooted subtrees, like "/images/" (note the trailing slash).
+// Longer patterns take precedence over shorter ones, so that
+// if there are handlers registered for both "/images/"
+// and "/images/thumbnails/", the latter handler will be
+// called for paths beginning "/images/thumbnails/" and the
+// former will receive requests for any other paths in the
+// "/images/" subtree.
+//
+// Note that since a pattern ending in a slash names a rooted subtree,
+// the pattern "/" matches all paths not matched by other registered
+// patterns, not just the URL with Path == "/".
+//
+// If a subtree has been registered and a request is received naming the
+// subtree root without its trailing slash, ServeMux redirects that
+// request to the subtree root (adding the trailing slash). This behavior can
+// be overridden with a separate registration for the path without
+// the trailing slash. For example, registering "/images/" causes ServeMux
+// to redirect a request for "/images" to "/images/", unless "/images" has
+// been registered separately.
+//
+// Patterns may optionally begin with a host name, restricting matches to
+// URLs on that host only. Host-specific patterns take precedence over
+// general patterns, so that a handler might register for the two patterns
+// "/codesearch" and "codesearch.google.com/" without also taking over
+// requests for "http://www.google.com/".
+//
+// ServeMux also takes care of sanitizing the URL request path and the Host
+// header, stripping the port number and redirecting any request containing . or
+// .. elements or repeated slashes to an equivalent, cleaner URL.
+type ServeMux struct {
+ mu sync.RWMutex
+ m map[string]muxEntry
+ es []muxEntry // slice of entries sorted from longest to shortest.
+ hosts bool // whether any patterns contain hostnames
+}
+
+type muxEntry struct {
+ h Handler
+ pattern string
+}
+
+// NewServeMux allocates and returns a new ServeMux.
+func NewServeMux() *ServeMux { return new(ServeMux) }
+
+// DefaultServeMux is the default ServeMux used by Serve.
+var DefaultServeMux = &defaultServeMux
+
+var defaultServeMux ServeMux
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ // Fast path for common case of p being the string we want:
+ if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
+ np = p
+ } else {
+ np += "/"
+ }
+ }
+ return np
+}
+
+// stripHostPort returns h without any trailing ":<port>".
+func stripHostPort(h string) string {
+ // If no port on host, return unchanged
+ if !strings.Contains(h, ":") {
+ return h
+ }
+ host, _, err := net.SplitHostPort(h)
+ if err != nil {
+ return h // on error, return unchanged
+ }
+ return host
+}
+
+// Find a handler on a handler map given a path string.
+// Most-specific (longest) pattern wins.
+func (mux *ServeMux) match(path string) (h Handler, pattern string) {
+ // Check for exact match first.
+ v, ok := mux.m[path]
+ if ok {
+ return v.h, v.pattern
+ }
+
+ // Check for longest valid match. mux.es contains all patterns
+ // that end in / sorted from longest to shortest.
+ for _, e := range mux.es {
+ if strings.HasPrefix(path, e.pattern) {
+ return e.h, e.pattern
+ }
+ }
+ return nil, ""
+}
+
+// redirectToPathSlash determines if the given path needs appending "/" to it.
+// This occurs when a handler for path + "/" was already registered, but
+// not for path itself. If the path needs appending to, it creates a new
+// URL, setting the path to u.Path + "/" and returning true to indicate so.
+func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
+ mux.mu.RLock()
+ shouldRedirect := mux.shouldRedirectRLocked(host, path)
+ mux.mu.RUnlock()
+ if !shouldRedirect {
+ return u, false
+ }
+ path = path + "/"
+ u = &url.URL{Path: path, RawQuery: u.RawQuery}
+ return u, true
+}
+
+// shouldRedirectRLocked reports whether the given path and host should be redirected to
+// path+"/". This should happen if a handler is registered for path+"/" but
+// not path -- see comments at ServeMux.
+func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
+ p := []string{path, host + path}
+
+ for _, c := range p {
+ if _, exist := mux.m[c]; exist {
+ return false
+ }
+ }
+
+ n := len(path)
+ if n == 0 {
+ return false
+ }
+ for _, c := range p {
+ if _, exist := mux.m[c+"/"]; exist {
+ return path[n-1] != '/'
+ }
+ }
+
+ return false
+}
+
+// Handler returns the handler to use for the given request,
+// consulting r.Method, r.Host, and r.URL.Path. It always returns
+// a non-nil handler. If the path is not in its canonical form, the
+// handler will be an internally-generated handler that redirects
+// to the canonical path. If the host contains a port, it is ignored
+// when matching handlers.
+//
+// The path and host are used unchanged for CONNECT requests.
+//
+// Handler also returns the registered pattern that matches the
+// request or, in the case of internally-generated redirects,
+// the pattern that will match after following the redirect.
+//
+// If there is no registered handler that applies to the request,
+// Handler returns a “page not found” handler and an empty pattern.
+func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
+
+ // CONNECT requests are not canonicalized.
+ if r.Method == "CONNECT" {
+ // If r.URL.Path is /tree and its handler is not registered,
+ // the /tree -> /tree/ redirect applies to CONNECT requests
+ // but the path canonicalization does not.
+ if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
+ return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+ }
+
+ return mux.handler(r.Host, r.URL.Path)
+ }
+
+ // All other requests have any port stripped and path cleaned
+ // before passing to mux.handler.
+ host := stripHostPort(r.Host)
+ path := cleanPath(r.URL.Path)
+
+ // If the given path is /tree and its handler is not registered,
+ // redirect for /tree/.
+ if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
+ return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+ }
+
+ if path != r.URL.Path {
+ _, pattern = mux.handler(host, path)
+ u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
+ return RedirectHandler(u.String(), StatusMovedPermanently), pattern
+ }
+
+ return mux.handler(host, r.URL.Path)
+}
+
+// handler is the main implementation of Handler.
+// The path is known to be in canonical form, except for CONNECT methods.
+func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
+ mux.mu.RLock()
+ defer mux.mu.RUnlock()
+
+ // Host-specific pattern takes precedence over generic ones
+ if mux.hosts {
+ h, pattern = mux.match(host + path)
+ }
+ if h == nil {
+ h, pattern = mux.match(path)
+ }
+ if h == nil {
+ h, pattern = NotFoundHandler(), ""
+ }
+ return
+}
+
+// ServeHTTP dispatches the request to the handler whose
+// pattern most closely matches the request URL.
+func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
+ if r.RequestURI == "*" {
+ if r.ProtoAtLeast(1, 1) {
+ w.Header().Set("Connection", "close")
+ }
+ w.WriteHeader(StatusBadRequest)
+ return
+ }
+ h, _ := mux.Handler(r)
+ h.ServeHTTP(w, r)
+}
+
+// Handle registers the handler for the given pattern.
+// If a handler already exists for pattern, Handle panics.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ mux.mu.Lock()
+ defer mux.mu.Unlock()
+
+ if pattern == "" {
+ panic("http: invalid pattern")
+ }
+ if handler == nil {
+ panic("http: nil handler")
+ }
+ if _, exist := mux.m[pattern]; exist {
+ panic("http: multiple registrations for " + pattern)
+ }
+
+ if mux.m == nil {
+ mux.m = make(map[string]muxEntry)
+ }
+ e := muxEntry{h: handler, pattern: pattern}
+ mux.m[pattern] = e
+ if pattern[len(pattern)-1] == '/' {
+ mux.es = appendSorted(mux.es, e)
+ }
+
+ if pattern[0] != '/' {
+ mux.hosts = true
+ }
+}
+
+func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
+ n := len(es)
+ i := sort.Search(n, func(i int) bool {
+ return len(es[i].pattern) < len(e.pattern)
+ })
+ if i == n {
+ return append(es, e)
+ }
+ // we now know that i points at where we want to insert
+ es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
+ copy(es[i+1:], es[i:]) // Move shorter entries down
+ es[i] = e
+ return es
+}
+
+// HandleFunc registers the handler function for the given pattern.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ if handler == nil {
+ panic("http: nil handler")
+ }
+ mux.Handle(pattern, HandlerFunc(handler))
+}
+
+// Handle registers the handler for the given pattern
+// in the DefaultServeMux.
+// The documentation for ServeMux explains how patterns are matched.
+func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+
+// HandleFunc registers the handler function for the given pattern
+// in the DefaultServeMux.
+// The documentation for ServeMux explains how patterns are matched.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ DefaultServeMux.HandleFunc(pattern, handler)
+}
+
+// Serve accepts incoming HTTP connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+//
+// The handler is typically nil, in which case the DefaultServeMux is used.
+//
+// HTTP/2 support is only enabled if the Listener returns *tls.Conn
+// connections and they were configured with "h2" in the TLS
+// Config.NextProtos.
+//
+// Serve always returns a non-nil error.
+func Serve(l net.Listener, handler Handler) error {
+ srv := &Server{Handler: handler}
+ return srv.Serve(l)
+}
+
+// ServeTLS accepts incoming HTTPS connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+//
+// The handler is typically nil, in which case the DefaultServeMux is used.
+//
+// Additionally, files containing a certificate and matching private key
+// for the server must be provided. If the certificate is signed by a
+// certificate authority, the certFile should be the concatenation
+// of the server's certificate, any intermediates, and the CA's certificate.
+//
+// ServeTLS always returns a non-nil error.
+func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
+ srv := &Server{Handler: handler}
+ return srv.ServeTLS(l, certFile, keyFile)
+}
+
+// A Server defines parameters for running an HTTP server.
+// The zero value for Server is a valid configuration.
+type Server struct {
+ // Addr optionally specifies the TCP address for the server to listen on,
+ // in the form "host:port". If empty, ":http" (port 80) is used.
+ // The service names are defined in RFC 6335 and assigned by IANA.
+ // See net.Dial for details of the address format.
+ Addr string
+
+ Handler Handler // handler to invoke, http.DefaultServeMux if nil
+
+ // TLSConfig optionally provides a TLS configuration for use
+ // by ServeTLS and ListenAndServeTLS. Note that this value is
+ // cloned by ServeTLS and ListenAndServeTLS, so it's not
+ // possible to modify the configuration with methods like
+ // tls.Config.SetSessionTicketKeys. To use
+ // SetSessionTicketKeys, use Server.Serve with a TLS Listener
+ // instead.
+ TLSConfig *tls.Config
+
+ // ReadTimeout is the maximum duration for reading the entire
+ // request, including the body. A zero or negative value means
+ // there will be no timeout.
+ //
+ // Because ReadTimeout does not let Handlers make per-request
+ // decisions on each request body's acceptable deadline or
+ // upload rate, most users will prefer to use
+ // ReadHeaderTimeout. It is valid to use them both.
+ ReadTimeout time.Duration
+
+ // ReadHeaderTimeout is the amount of time allowed to read
+ // request headers. The connection's read deadline is reset
+ // after reading the headers and the Handler can decide what
+ // is considered too slow for the body. If ReadHeaderTimeout
+ // is zero, the value of ReadTimeout is used. If both are
+ // zero, there is no timeout.
+ ReadHeaderTimeout time.Duration
+
+ // WriteTimeout is the maximum duration before timing out
+ // writes of the response. It is reset whenever a new
+ // request's header is read. Like ReadTimeout, it does not
+ // let Handlers make decisions on a per-request basis.
+ // A zero or negative value means there will be no timeout.
+ WriteTimeout time.Duration
+
+ // IdleTimeout is the maximum amount of time to wait for the
+ // next request when keep-alives are enabled. If IdleTimeout
+ // is zero, the value of ReadTimeout is used. If both are
+ // zero, there is no timeout.
+ IdleTimeout time.Duration
+
+ // MaxHeaderBytes controls the maximum number of bytes the
+ // server will read parsing the request header's keys and
+ // values, including the request line. It does not limit the
+ // size of the request body.
+ // If zero, DefaultMaxHeaderBytes is used.
+ MaxHeaderBytes int
+
+ // TLSNextProto optionally specifies a function to take over
+ // ownership of the provided TLS connection when an ALPN
+ // protocol upgrade has occurred. The map key is the protocol
+ // name negotiated. The Handler argument should be used to
+ // handle HTTP requests and will initialize the Request's TLS
+ // and RemoteAddr if not already set. The connection is
+ // automatically closed when the function returns.
+ // If TLSNextProto is not nil, HTTP/2 support is not enabled
+ // automatically.
+ TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
+
+ // ConnState specifies an optional callback function that is
+ // called when a client connection changes state. See the
+ // ConnState type and associated constants for details.
+ ConnState func(net.Conn, ConnState)
+
+ // ErrorLog specifies an optional logger for errors accepting
+ // connections, unexpected behavior from handlers, and
+ // underlying FileSystem errors.
+ // If nil, logging is done via the log package's standard logger.
+ ErrorLog *log.Logger
+
+ // BaseContext optionally specifies a function that returns
+ // the base context for incoming requests on this server.
+ // The provided Listener is the specific Listener that's
+ // about to start accepting requests.
+ // If BaseContext is nil, the default is context.Background().
+ // If non-nil, it must return a non-nil context.
+ BaseContext func(net.Listener) context.Context
+
+ // ConnContext optionally specifies a function that modifies
+ // the context used for a new connection c. The provided ctx
+ // is derived from the base context and has a ServerContextKey
+ // value.
+ ConnContext func(ctx context.Context, c net.Conn) context.Context
+
+ inShutdown atomicBool // true when server is in shutdown
+
+ disableKeepAlives int32 // accessed atomically.
+ nextProtoOnce sync.Once // guards setupHTTP2_* init
+ nextProtoErr error // result of http2.ConfigureServer if used
+
+ mu sync.Mutex
+ listeners map[*net.Listener]struct{}
+ activeConn map[*conn]struct{}
+ doneChan chan struct{}
+ onShutdown []func()
+
+ listenerGroup sync.WaitGroup
+}
+
+func (s *Server) getDoneChan() <-chan struct{} {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.getDoneChanLocked()
+}
+
+func (s *Server) getDoneChanLocked() chan struct{} {
+ if s.doneChan == nil {
+ s.doneChan = make(chan struct{})
+ }
+ return s.doneChan
+}
+
+func (s *Server) closeDoneChanLocked() {
+ ch := s.getDoneChanLocked()
+ select {
+ case <-ch:
+ // Already closed. Don't close again.
+ default:
+ // Safe to close here. We're the only closer, guarded
+ // by s.mu.
+ close(ch)
+ }
+}
+
+// Close immediately closes all active net.Listeners and any
+// connections in state StateNew, StateActive, or StateIdle. For a
+// graceful shutdown, use Shutdown.
+//
+// Close does not attempt to close (and does not even know about)
+// any hijacked connections, such as WebSockets.
+//
+// Close returns any error returned from closing the Server's
+// underlying Listener(s).
+func (srv *Server) Close() error {
+ srv.inShutdown.setTrue()
+ srv.mu.Lock()
+ defer srv.mu.Unlock()
+ srv.closeDoneChanLocked()
+ err := srv.closeListenersLocked()
+
+ // Unlock srv.mu while waiting for listenerGroup.
+ // The group Add and Done calls are made with srv.mu held,
+ // to avoid adding a new listener in the window between
+ // us setting inShutdown above and waiting here.
+ srv.mu.Unlock()
+ srv.listenerGroup.Wait()
+ srv.mu.Lock()
+
+ for c := range srv.activeConn {
+ c.rwc.Close()
+ delete(srv.activeConn, c)
+ }
+ return err
+}
+
+// shutdownPollIntervalMax is the max polling interval when checking
+// quiescence during Server.Shutdown. Polling starts with a small
+// interval and backs off to the max.
+// Ideally we could find a solution that doesn't involve polling,
+// but which also doesn't have a high runtime cost (and doesn't
+// involve any contentious mutexes), but that is left as an
+// exercise for the reader.
+const shutdownPollIntervalMax = 500 * time.Millisecond
+
+// Shutdown gracefully shuts down the server without interrupting any
+// active connections. Shutdown works by first closing all open
+// listeners, then closing all idle connections, and then waiting
+// indefinitely for connections to return to idle and then shut down.
+// If the provided context expires before the shutdown is complete,
+// Shutdown returns the context's error, otherwise it returns any
+// error returned from closing the Server's underlying Listener(s).
+//
+// When Shutdown is called, Serve, ListenAndServe, and
+// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
+// program doesn't exit and waits instead for Shutdown to return.
+//
+// Shutdown does not attempt to close nor wait for hijacked
+// connections such as WebSockets. The caller of Shutdown should
+// separately notify such long-lived connections of shutdown and wait
+// for them to close, if desired. See RegisterOnShutdown for a way to
+// register shutdown notification functions.
+//
+// Once Shutdown has been called on a server, it may not be reused;
+// future calls to methods such as Serve will return ErrServerClosed.
+func (srv *Server) Shutdown(ctx context.Context) error {
+ srv.inShutdown.setTrue()
+
+ srv.mu.Lock()
+ lnerr := srv.closeListenersLocked()
+ srv.closeDoneChanLocked()
+ for _, f := range srv.onShutdown {
+ go f()
+ }
+ srv.mu.Unlock()
+ srv.listenerGroup.Wait()
+
+ pollIntervalBase := time.Millisecond
+ nextPollInterval := func() time.Duration {
+ // Add 10% jitter.
+ interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10)))
+ // Double and clamp for next time.
+ pollIntervalBase *= 2
+ if pollIntervalBase > shutdownPollIntervalMax {
+ pollIntervalBase = shutdownPollIntervalMax
+ }
+ return interval
+ }
+
+ timer := time.NewTimer(nextPollInterval())
+ defer timer.Stop()
+ for {
+ if srv.closeIdleConns() {
+ return lnerr
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-timer.C:
+ timer.Reset(nextPollInterval())
+ }
+ }
+}
+
+// RegisterOnShutdown registers a function to call on Shutdown.
+// This can be used to gracefully shutdown connections that have
+// undergone ALPN protocol upgrade or that have been hijacked.
+// This function should start protocol-specific graceful shutdown,
+// but should not wait for shutdown to complete.
+func (srv *Server) RegisterOnShutdown(f func()) {
+ srv.mu.Lock()
+ srv.onShutdown = append(srv.onShutdown, f)
+ srv.mu.Unlock()
+}
+
+// closeIdleConns closes all idle connections and reports whether the
+// server is quiescent.
+func (s *Server) closeIdleConns() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ quiescent := true
+ for c := range s.activeConn {
+ st, unixSec := c.getState()
+ // Issue 22682: treat StateNew connections as if
+ // they're idle if we haven't read the first request's
+ // header in over 5 seconds.
+ if st == StateNew && unixSec < time.Now().Unix()-5 {
+ st = StateIdle
+ }
+ if st != StateIdle || unixSec == 0 {
+ // Assume unixSec == 0 means it's a very new
+ // connection, without state set yet.
+ quiescent = false
+ continue
+ }
+ c.rwc.Close()
+ delete(s.activeConn, c)
+ }
+ return quiescent
+}
+
+func (s *Server) closeListenersLocked() error {
+ var err error
+ for ln := range s.listeners {
+ if cerr := (*ln).Close(); cerr != nil && err == nil {
+ err = cerr
+ }
+ }
+ return err
+}
+
+// A ConnState represents the state of a client connection to a server.
+// It's used by the optional Server.ConnState hook.
+type ConnState int
+
+const (
+ // StateNew represents a new connection that is expected to
+ // send a request immediately. Connections begin at this
+ // state and then transition to either StateActive or
+ // StateClosed.
+ StateNew ConnState = iota
+
+ // StateActive represents a connection that has read 1 or more
+ // bytes of a request. The Server.ConnState hook for
+ // StateActive fires before the request has entered a handler
+ // and doesn't fire again until the request has been
+ // handled. After the request is handled, the state
+ // transitions to StateClosed, StateHijacked, or StateIdle.
+ // For HTTP/2, StateActive fires on the transition from zero
+ // to one active request, and only transitions away once all
+ // active requests are complete. That means that ConnState
+ // cannot be used to do per-request work; ConnState only notes
+ // the overall state of the connection.
+ StateActive
+
+ // StateIdle represents a connection that has finished
+ // handling a request and is in the keep-alive state, waiting
+ // for a new request. Connections transition from StateIdle
+ // to either StateActive or StateClosed.
+ StateIdle
+
+ // StateHijacked represents a hijacked connection.
+ // This is a terminal state. It does not transition to StateClosed.
+ StateHijacked
+
+ // StateClosed represents a closed connection.
+ // This is a terminal state. Hijacked connections do not
+ // transition to StateClosed.
+ StateClosed
+)
+
+var stateName = map[ConnState]string{
+ StateNew: "new",
+ StateActive: "active",
+ StateIdle: "idle",
+ StateHijacked: "hijacked",
+ StateClosed: "closed",
+}
+
+func (c ConnState) String() string {
+ return stateName[c]
+}
+
+// serverHandler delegates to either the server's Handler or
+// DefaultServeMux and also handles "OPTIONS *" requests.
+type serverHandler struct {
+ srv *Server
+}
+
+func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
+ handler := sh.srv.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+ if req.RequestURI == "*" && req.Method == "OPTIONS" {
+ handler = globalOptionsHandler{}
+ }
+
+ if req.URL != nil && strings.Contains(req.URL.RawQuery, ";") {
+ var allowQuerySemicolonsInUse int32
+ req = req.WithContext(context.WithValue(req.Context(), silenceSemWarnContextKey, func() {
+ atomic.StoreInt32(&allowQuerySemicolonsInUse, 1)
+ }))
+ defer func() {
+ if atomic.LoadInt32(&allowQuerySemicolonsInUse) == 0 {
+ sh.srv.logf("http: URL query contains semicolon, which is no longer a supported separator; parts of the query may be stripped when parsed; see golang.org/issue/25192")
+ }
+ }()
+ }
+
+ handler.ServeHTTP(rw, req)
+}
+
+var silenceSemWarnContextKey = &contextKey{"silence-semicolons"}
+
+// AllowQuerySemicolons returns a handler that serves requests by converting any
+// unescaped semicolons in the URL query to ampersands, and invoking the handler h.
+//
+// This restores the pre-Go 1.17 behavior of splitting query parameters on both
+// semicolons and ampersands. (See golang.org/issue/25192). Note that this
+// behavior doesn't match that of many proxies, and the mismatch can lead to
+// security issues.
+//
+// AllowQuerySemicolons should be invoked before Request.ParseForm is called.
+func AllowQuerySemicolons(h Handler) Handler {
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ if silenceSemicolonsWarning, ok := r.Context().Value(silenceSemWarnContextKey).(func()); ok {
+ silenceSemicolonsWarning()
+ }
+ if strings.Contains(r.URL.RawQuery, ";") {
+ r2 := new(Request)
+ *r2 = *r
+ r2.URL = new(url.URL)
+ *r2.URL = *r.URL
+ r2.URL.RawQuery = strings.ReplaceAll(r.URL.RawQuery, ";", "&")
+ h.ServeHTTP(w, r2)
+ } else {
+ h.ServeHTTP(w, r)
+ }
+ })
+}
+
+// ListenAndServe listens on the TCP network address srv.Addr and then
+// calls Serve to handle requests on incoming connections.
+// Accepted connections are configured to enable TCP keep-alives.
+//
+// If srv.Addr is blank, ":http" is used.
+//
+// ListenAndServe always returns a non-nil error. After Shutdown or Close,
+// the returned error is ErrServerClosed.
+func (srv *Server) ListenAndServe() error {
+ if srv.shuttingDown() {
+ return ErrServerClosed
+ }
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":http"
+ }
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ return err
+ }
+ return srv.Serve(ln)
+}
+
+var testHookServerServe func(*Server, net.Listener) // used if non-nil
+
+// shouldDoServeHTTP2 reports whether Server.Serve should configure
+// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
+func (srv *Server) shouldConfigureHTTP2ForServe() bool {
+ if srv.TLSConfig == nil {
+ // Compatibility with Go 1.6:
+ // If there's no TLSConfig, it's possible that the user just
+ // didn't set it on the http.Server, but did pass it to
+ // tls.NewListener and passed that listener to Serve.
+ // So we should configure HTTP/2 (to set up srv.TLSNextProto)
+ // in case the listener returns an "h2" *tls.Conn.
+ return true
+ }
+ // The user specified a TLSConfig on their http.Server.
+ // In this, case, only configure HTTP/2 if their tls.Config
+ // explicitly mentions "h2". Otherwise http2.ConfigureServer
+ // would modify the tls.Config to add it, but they probably already
+ // passed this tls.Config to tls.NewListener. And if they did,
+ // it's too late anyway to fix it. It would only be potentially racy.
+ // See Issue 15908.
+ return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
+}
+
+// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
+// and ListenAndServeTLS methods after a call to Shutdown or Close.
+var ErrServerClosed = errors.New("http: Server closed")
+
+// Serve accepts incoming connections on the Listener l, creating a
+// new service goroutine for each. The service goroutines read requests and
+// then call srv.Handler to reply to them.
+//
+// HTTP/2 support is only enabled if the Listener returns *tls.Conn
+// connections and they were configured with "h2" in the TLS
+// Config.NextProtos.
+//
+// Serve always returns a non-nil error and closes l.
+// After Shutdown or Close, the returned error is ErrServerClosed.
+func (srv *Server) Serve(l net.Listener) error {
+ if fn := testHookServerServe; fn != nil {
+ fn(srv, l) // call hook with unwrapped listener
+ }
+
+ origListener := l
+ l = &onceCloseListener{Listener: l}
+ defer l.Close()
+
+ if err := srv.setupHTTP2_Serve(); err != nil {
+ return err
+ }
+
+ if !srv.trackListener(&l, true) {
+ return ErrServerClosed
+ }
+ defer srv.trackListener(&l, false)
+
+ baseCtx := context.Background()
+ if srv.BaseContext != nil {
+ baseCtx = srv.BaseContext(origListener)
+ if baseCtx == nil {
+ panic("BaseContext returned a nil context")
+ }
+ }
+
+ var tempDelay time.Duration // how long to sleep on accept failure
+
+ ctx := context.WithValue(baseCtx, ServerContextKey, srv)
+ for {
+ rw, err := l.Accept()
+ if err != nil {
+ select {
+ case <-srv.getDoneChan():
+ return ErrServerClosed
+ default:
+ }
+ if ne, ok := err.(net.Error); ok && ne.Temporary() {
+ if tempDelay == 0 {
+ tempDelay = 5 * time.Millisecond
+ } else {
+ tempDelay *= 2
+ }
+ if max := 1 * time.Second; tempDelay > max {
+ tempDelay = max
+ }
+ srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
+ time.Sleep(tempDelay)
+ continue
+ }
+ return err
+ }
+ connCtx := ctx
+ if cc := srv.ConnContext; cc != nil {
+ connCtx = cc(connCtx, rw)
+ if connCtx == nil {
+ panic("ConnContext returned nil")
+ }
+ }
+ tempDelay = 0
+ c := srv.newConn(rw)
+ c.setState(c.rwc, StateNew, runHooks) // before Serve can return
+ go c.serve(connCtx)
+ }
+}
+
+// ServeTLS accepts incoming connections on the Listener l, creating a
+// new service goroutine for each. The service goroutines perform TLS
+// setup and then read requests, calling srv.Handler to reply to them.
+//
+// Files containing a certificate and matching private key for the
+// server must be provided if neither the Server's
+// TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
+// If the certificate is signed by a certificate authority, the
+// certFile should be the concatenation of the server's certificate,
+// any intermediates, and the CA's certificate.
+//
+// ServeTLS always returns a non-nil error. After Shutdown or Close, the
+// returned error is ErrServerClosed.
+func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
+ // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
+ // before we clone it and create the TLS Listener.
+ if err := srv.setupHTTP2_ServeTLS(); err != nil {
+ return err
+ }
+
+ config := cloneTLSConfig(srv.TLSConfig)
+ if !strSliceContains(config.NextProtos, "http/1.1") {
+ config.NextProtos = append(config.NextProtos, "http/1.1")
+ }
+
+ configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
+ if !configHasCert || certFile != "" || keyFile != "" {
+ var err error
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+ }
+
+ tlsListener := tls.NewListener(l, config)
+ return srv.Serve(tlsListener)
+}
+
+// trackListener adds or removes a net.Listener to the set of tracked
+// listeners.
+//
+// We store a pointer to interface in the map set, in case the
+// net.Listener is not comparable. This is safe because we only call
+// trackListener via Serve and can track+defer untrack the same
+// pointer to local variable there. We never need to compare a
+// Listener from another caller.
+//
+// It reports whether the server is still up (not Shutdown or Closed).
+func (s *Server) trackListener(ln *net.Listener, add bool) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.listeners == nil {
+ s.listeners = make(map[*net.Listener]struct{})
+ }
+ if add {
+ if s.shuttingDown() {
+ return false
+ }
+ s.listeners[ln] = struct{}{}
+ s.listenerGroup.Add(1)
+ } else {
+ delete(s.listeners, ln)
+ s.listenerGroup.Done()
+ }
+ return true
+}
+
+func (s *Server) trackConn(c *conn, add bool) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.activeConn == nil {
+ s.activeConn = make(map[*conn]struct{})
+ }
+ if add {
+ s.activeConn[c] = struct{}{}
+ } else {
+ delete(s.activeConn, c)
+ }
+}
+
+func (s *Server) idleTimeout() time.Duration {
+ if s.IdleTimeout != 0 {
+ return s.IdleTimeout
+ }
+ return s.ReadTimeout
+}
+
+func (s *Server) readHeaderTimeout() time.Duration {
+ if s.ReadHeaderTimeout != 0 {
+ return s.ReadHeaderTimeout
+ }
+ return s.ReadTimeout
+}
+
+func (s *Server) doKeepAlives() bool {
+ return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown()
+}
+
+func (s *Server) shuttingDown() bool {
+ return s.inShutdown.isSet()
+}
+
+// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
+// By default, keep-alives are always enabled. Only very
+// resource-constrained environments or servers in the process of
+// shutting down should disable them.
+func (srv *Server) SetKeepAlivesEnabled(v bool) {
+ if v {
+ atomic.StoreInt32(&srv.disableKeepAlives, 0)
+ return
+ }
+ atomic.StoreInt32(&srv.disableKeepAlives, 1)
+
+ // Close idle HTTP/1 conns:
+ srv.closeIdleConns()
+
+ // TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
+}
+
+func (s *Server) logf(format string, args ...any) {
+ if s.ErrorLog != nil {
+ s.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// logf prints to the ErrorLog of the *Server associated with request r
+// via ServerContextKey. If there's no associated server, or if ErrorLog
+// is nil, logging is done via the log package's standard logger.
+func logf(r *Request, format string, args ...any) {
+ s, _ := r.Context().Value(ServerContextKey).(*Server)
+ if s != nil && s.ErrorLog != nil {
+ s.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// ListenAndServe listens on the TCP network address addr and then calls
+// Serve with handler to handle requests on incoming connections.
+// Accepted connections are configured to enable TCP keep-alives.
+//
+// The handler is typically nil, in which case the DefaultServeMux is used.
+//
+// ListenAndServe always returns a non-nil error.
+func ListenAndServe(addr string, handler Handler) error {
+ server := &Server{Addr: addr, Handler: handler}
+ return server.ListenAndServe()
+}
+
+// ListenAndServeTLS acts identically to ListenAndServe, except that it
+// expects HTTPS connections. Additionally, files containing a certificate and
+// matching private key for the server must be provided. If the certificate
+// is signed by a certificate authority, the certFile should be the concatenation
+// of the server's certificate, any intermediates, and the CA's certificate.
+func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
+ server := &Server{Addr: addr, Handler: handler}
+ return server.ListenAndServeTLS(certFile, keyFile)
+}
+
+// ListenAndServeTLS listens on the TCP network address srv.Addr and
+// then calls ServeTLS to handle requests on incoming TLS connections.
+// Accepted connections are configured to enable TCP keep-alives.
+//
+// Filenames containing a certificate and matching private key for the
+// server must be provided if neither the Server's TLSConfig.Certificates
+// nor TLSConfig.GetCertificate are populated. If the certificate is
+// signed by a certificate authority, the certFile should be the
+// concatenation of the server's certificate, any intermediates, and
+// the CA's certificate.
+//
+// If srv.Addr is blank, ":https" is used.
+//
+// ListenAndServeTLS always returns a non-nil error. After Shutdown or
+// Close, the returned error is ErrServerClosed.
+func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ if srv.shuttingDown() {
+ return ErrServerClosed
+ }
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ return err
+ }
+
+ defer ln.Close()
+
+ return srv.ServeTLS(ln, certFile, keyFile)
+}
+
+// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
+// srv and reports whether there was an error setting it up. If it is
+// not configured for policy reasons, nil is returned.
+func (srv *Server) setupHTTP2_ServeTLS() error {
+ srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
+ return srv.nextProtoErr
+}
+
+// setupHTTP2_Serve is called from (*Server).Serve and conditionally
+// configures HTTP/2 on srv using a more conservative policy than
+// setupHTTP2_ServeTLS because Serve is called after tls.Listen,
+// and may be called concurrently. See shouldConfigureHTTP2ForServe.
+//
+// The tests named TestTransportAutomaticHTTP2* and
+// TestConcurrentServerServe in server_test.go demonstrate some
+// of the supported use cases and motivations.
+func (srv *Server) setupHTTP2_Serve() error {
+ srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
+ return srv.nextProtoErr
+}
+
+func (srv *Server) onceSetNextProtoDefaults_Serve() {
+ if srv.shouldConfigureHTTP2ForServe() {
+ srv.onceSetNextProtoDefaults()
+ }
+}
+
+// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
+// configured otherwise. (by setting srv.TLSNextProto non-nil)
+// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
+func (srv *Server) onceSetNextProtoDefaults() {
+ if omitBundledHTTP2 || godebug.Get("http2server") == "0" {
+ return
+ }
+ // Enable HTTP/2 by default if the user hasn't otherwise
+ // configured their TLSNextProto map.
+ if srv.TLSNextProto == nil {
+ conf := &http2Server{
+ NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
+ }
+ srv.nextProtoErr = http2ConfigureServer(srv, conf)
+ }
+}
+
+// TimeoutHandler returns a Handler that runs h with the given time limit.
+//
+// The new Handler calls h.ServeHTTP to handle each request, but if a
+// call runs for longer than its time limit, the handler responds with
+// a 503 Service Unavailable error and the given message in its body.
+// (If msg is empty, a suitable default message will be sent.)
+// After such a timeout, writes by h to its ResponseWriter will return
+// ErrHandlerTimeout.
+//
+// TimeoutHandler supports the Pusher interface but does not support
+// the Hijacker or Flusher interfaces.
+func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
+ return &timeoutHandler{
+ handler: h,
+ body: msg,
+ dt: dt,
+ }
+}
+
+// ErrHandlerTimeout is returned on ResponseWriter Write calls
+// in handlers which have timed out.
+var ErrHandlerTimeout = errors.New("http: Handler timeout")
+
+type timeoutHandler struct {
+ handler Handler
+ body string
+ dt time.Duration
+
+ // When set, no context will be created and this context will
+ // be used instead.
+ testContext context.Context
+}
+
+func (h *timeoutHandler) errorBody() string {
+ if h.body != "" {
+ return h.body
+ }
+ return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
+}
+
+func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ ctx := h.testContext
+ if ctx == nil {
+ var cancelCtx context.CancelFunc
+ ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
+ defer cancelCtx()
+ }
+ r = r.WithContext(ctx)
+ done := make(chan struct{})
+ tw := &timeoutWriter{
+ w: w,
+ h: make(Header),
+ req: r,
+ }
+ panicChan := make(chan any, 1)
+ go func() {
+ defer func() {
+ if p := recover(); p != nil {
+ panicChan <- p
+ }
+ }()
+ h.handler.ServeHTTP(tw, r)
+ close(done)
+ }()
+ select {
+ case p := <-panicChan:
+ panic(p)
+ case <-done:
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ dst := w.Header()
+ for k, vv := range tw.h {
+ dst[k] = vv
+ }
+ if !tw.wroteHeader {
+ tw.code = StatusOK
+ }
+ w.WriteHeader(tw.code)
+ w.Write(tw.wbuf.Bytes())
+ case <-ctx.Done():
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ switch err := ctx.Err(); err {
+ case context.DeadlineExceeded:
+ w.WriteHeader(StatusServiceUnavailable)
+ io.WriteString(w, h.errorBody())
+ tw.err = ErrHandlerTimeout
+ default:
+ w.WriteHeader(StatusServiceUnavailable)
+ tw.err = err
+ }
+ }
+}
+
+type timeoutWriter struct {
+ w ResponseWriter
+ h Header
+ wbuf bytes.Buffer
+ req *Request
+
+ mu sync.Mutex
+ err error
+ wroteHeader bool
+ code int
+}
+
+var _ Pusher = (*timeoutWriter)(nil)
+
+// Push implements the Pusher interface.
+func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
+ if pusher, ok := tw.w.(Pusher); ok {
+ return pusher.Push(target, opts)
+ }
+ return ErrNotSupported
+}
+
+func (tw *timeoutWriter) Header() Header { return tw.h }
+
+func (tw *timeoutWriter) Write(p []byte) (int, error) {
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ if tw.err != nil {
+ return 0, tw.err
+ }
+ if !tw.wroteHeader {
+ tw.writeHeaderLocked(StatusOK)
+ }
+ return tw.wbuf.Write(p)
+}
+
+func (tw *timeoutWriter) writeHeaderLocked(code int) {
+ checkWriteHeaderCode(code)
+
+ switch {
+ case tw.err != nil:
+ return
+ case tw.wroteHeader:
+ if tw.req != nil {
+ caller := relevantCaller()
+ logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ }
+ default:
+ tw.wroteHeader = true
+ tw.code = code
+ }
+}
+
+func (tw *timeoutWriter) WriteHeader(code int) {
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ tw.writeHeaderLocked(code)
+}
+
+// onceCloseListener wraps a net.Listener, protecting it from
+// multiple Close calls.
+type onceCloseListener struct {
+ net.Listener
+ once sync.Once
+ closeErr error
+}
+
+func (oc *onceCloseListener) Close() error {
+ oc.once.Do(oc.close)
+ return oc.closeErr
+}
+
+func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
+
+// globalOptionsHandler responds to "OPTIONS *" requests.
+type globalOptionsHandler struct{}
+
+func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Length", "0")
+ if r.ContentLength != 0 {
+ // Read up to 4KB of OPTIONS body (as mentioned in the
+ // spec as being reserved for future use), but anything
+ // over that is considered a waste of server resources
+ // (or an attack) and we abort and close the connection,
+ // courtesy of MaxBytesReader's EOF behavior.
+ mb := MaxBytesReader(w, r.Body, 4<<10)
+ io.Copy(io.Discard, mb)
+ }
+}
+
+// initALPNRequest is an HTTP handler that initializes certain
+// uninitialized fields in its *Request. Such partially-initialized
+// Requests come from ALPN protocol handlers.
+type initALPNRequest struct {
+ ctx context.Context
+ c *tls.Conn
+ h serverHandler
+}
+
+// BaseContext is an exported but unadvertised http.Handler method
+// recognized by x/net/http2 to pass down a context; the TLSNextProto
+// API predates context support so we shoehorn through the only
+// interface we have available.
+func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
+
+func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
+ if req.TLS == nil {
+ req.TLS = &tls.ConnectionState{}
+ *req.TLS = h.c.ConnectionState()
+ }
+ if req.Body == nil {
+ req.Body = NoBody
+ }
+ if req.RemoteAddr == "" {
+ req.RemoteAddr = h.c.RemoteAddr().String()
+ }
+ h.h.ServeHTTP(rw, req)
+}
+
+// loggingConn is used for debugging.
+type loggingConn struct {
+ name string
+ net.Conn
+}
+
+var (
+ uniqNameMu sync.Mutex
+ uniqNameNext = make(map[string]int)
+)
+
+func newLoggingConn(baseName string, c net.Conn) net.Conn {
+ uniqNameMu.Lock()
+ defer uniqNameMu.Unlock()
+ uniqNameNext[baseName]++
+ return &loggingConn{
+ name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
+ Conn: c,
+ }
+}
+
+func (c *loggingConn) Write(p []byte) (n int, err error) {
+ log.Printf("%s.Write(%d) = ....", c.name, len(p))
+ n, err = c.Conn.Write(p)
+ log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
+ return
+}
+
+func (c *loggingConn) Read(p []byte) (n int, err error) {
+ log.Printf("%s.Read(%d) = ....", c.name, len(p))
+ n, err = c.Conn.Read(p)
+ log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
+ return
+}
+
+func (c *loggingConn) Close() (err error) {
+ log.Printf("%s.Close() = ...", c.name)
+ err = c.Conn.Close()
+ log.Printf("%s.Close() = %v", c.name, err)
+ return
+}
+
+// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
+// It only contains one field (and a pointer field at that), so it
+// fits in an interface value without an extra allocation.
+type checkConnErrorWriter struct {
+ c *conn
+}
+
+func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
+ n, err = w.c.rwc.Write(p)
+ if err != nil && w.c.werr == nil {
+ w.c.werr = err
+ w.c.cancelCtx()
+ }
+ return
+}
+
+func numLeadingCRorLF(v []byte) (n int) {
+ for _, b := range v {
+ if b == '\r' || b == '\n' {
+ n++
+ continue
+ }
+ break
+ }
+ return
+
+}
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
+// looks like it might've been a misdirected plaintext HTTP request.
+func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
+ switch string(hdr[:]) {
+ case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
+ return true
+ }
+ return false
+}
+
+// MaxBytesHandler returns a Handler that runs h with its ResponseWriter and Request.Body wrapped by a MaxBytesReader.
+func MaxBytesHandler(h Handler, n int64) Handler {
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ r2 := *r
+ r2.Body = MaxBytesReader(w, r.Body, n)
+ h.ServeHTTP(w, &r2)
+ })
+}
diff --git a/contrib/go/_std_1.19/src/net/http/sniff.go b/contrib/go/_std_1.19/src/net/http/sniff.go
new file mode 100644
index 0000000000..ac18ab979d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/sniff.go
@@ -0,0 +1,304 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// The algorithm uses at most sniffLen bytes to make its decision.
+const sniffLen = 512
+
+// DetectContentType implements the algorithm described
+// at https://mimesniff.spec.whatwg.org/ to determine the
+// Content-Type of the given data. It considers at most the
+// first 512 bytes of data. DetectContentType always returns
+// a valid MIME type: if it cannot determine a more specific one, it
+// returns "application/octet-stream".
+func DetectContentType(data []byte) string {
+ if len(data) > sniffLen {
+ data = data[:sniffLen]
+ }
+
+ // Index of the first non-whitespace byte in data.
+ firstNonWS := 0
+ for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ {
+ }
+
+ for _, sig := range sniffSignatures {
+ if ct := sig.match(data, firstNonWS); ct != "" {
+ return ct
+ }
+ }
+
+ return "application/octet-stream" // fallback
+}
+
+// isWS reports whether the provided byte is a whitespace byte (0xWS)
+// as defined in https://mimesniff.spec.whatwg.org/#terminology.
+func isWS(b byte) bool {
+ switch b {
+ case '\t', '\n', '\x0c', '\r', ' ':
+ return true
+ }
+ return false
+}
+
+// isTT reports whether the provided byte is a tag-terminating byte (0xTT)
+// as defined in https://mimesniff.spec.whatwg.org/#terminology.
+func isTT(b byte) bool {
+ switch b {
+ case ' ', '>':
+ return true
+ }
+ return false
+}
+
+type sniffSig interface {
+ // match returns the MIME type of the data, or "" if unknown.
+ match(data []byte, firstNonWS int) string
+}
+
+// Data matching the table in section 6.
+var sniffSignatures = []sniffSig{
+ htmlSig("<!DOCTYPE HTML"),
+ htmlSig("<HTML"),
+ htmlSig("<HEAD"),
+ htmlSig("<SCRIPT"),
+ htmlSig("<IFRAME"),
+ htmlSig("<H1"),
+ htmlSig("<DIV"),
+ htmlSig("<FONT"),
+ htmlSig("<TABLE"),
+ htmlSig("<A"),
+ htmlSig("<STYLE"),
+ htmlSig("<TITLE"),
+ htmlSig("<B"),
+ htmlSig("<BODY"),
+ htmlSig("<BR"),
+ htmlSig("<P"),
+ htmlSig("<!--"),
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("<?xml"),
+ skipWS: true,
+ ct: "text/xml; charset=utf-8"},
+ &exactSig{[]byte("%PDF-"), "application/pdf"},
+ &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"},
+
+ // UTF BOMs.
+ &maskedSig{
+ mask: []byte("\xFF\xFF\x00\x00"),
+ pat: []byte("\xFE\xFF\x00\x00"),
+ ct: "text/plain; charset=utf-16be",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\x00\x00"),
+ pat: []byte("\xFF\xFE\x00\x00"),
+ ct: "text/plain; charset=utf-16le",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\x00"),
+ pat: []byte("\xEF\xBB\xBF\x00"),
+ ct: "text/plain; charset=utf-8",
+ },
+
+ // Image types
+ // For posterity, we originally returned "image/vnd.microsoft.icon" from
+ // https://tools.ietf.org/html/draft-ietf-websec-mime-sniff-03#section-7
+ // https://codereview.appspot.com/4746042
+ // but that has since been replaced with "image/x-icon" in Section 6.2
+ // of https://mimesniff.spec.whatwg.org/#matching-an-image-type-pattern
+ &exactSig{[]byte("\x00\x00\x01\x00"), "image/x-icon"},
+ &exactSig{[]byte("\x00\x00\x02\x00"), "image/x-icon"},
+ &exactSig{[]byte("BM"), "image/bmp"},
+ &exactSig{[]byte("GIF87a"), "image/gif"},
+ &exactSig{[]byte("GIF89a"), "image/gif"},
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"),
+ ct: "image/webp",
+ },
+ &exactSig{[]byte("\x89PNG\x0D\x0A\x1A\x0A"), "image/png"},
+ &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"},
+
+ // Audio and Video types
+ // Enforce the pattern match ordering as prescribed in
+ // https://mimesniff.spec.whatwg.org/#matching-an-audio-or-video-type-pattern
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("FORM\x00\x00\x00\x00AIFF"),
+ ct: "audio/aiff",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF"),
+ pat: []byte("ID3"),
+ ct: "audio/mpeg",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("OggS\x00"),
+ ct: "application/ogg",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("MThd\x00\x00\x00\x06"),
+ ct: "audio/midi",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00AVI "),
+ ct: "video/avi",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00WAVE"),
+ ct: "audio/wave",
+ },
+ // 6.2.0.2. video/mp4
+ mp4Sig{},
+ // 6.2.0.3. video/webm
+ &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"},
+
+ // Font types
+ &maskedSig{
+ // 34 NULL bytes followed by the string "LP"
+ pat: []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LP"),
+ // 34 NULL bytes followed by \xF\xF
+ mask: []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF"),
+ ct: "application/vnd.ms-fontobject",
+ },
+ &exactSig{[]byte("\x00\x01\x00\x00"), "font/ttf"},
+ &exactSig{[]byte("OTTO"), "font/otf"},
+ &exactSig{[]byte("ttcf"), "font/collection"},
+ &exactSig{[]byte("wOFF"), "font/woff"},
+ &exactSig{[]byte("wOF2"), "font/woff2"},
+
+ // Archive types
+ &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"},
+ &exactSig{[]byte("PK\x03\x04"), "application/zip"},
+ // RAR's signatures are incorrectly defined by the MIME spec as per
+ // https://github.com/whatwg/mimesniff/issues/63
+ // However, RAR Labs correctly defines it at:
+ // https://www.rarlab.com/technote.htm#rarsign
+ // so we use the definition from RAR Labs.
+ // TODO: do whatever the spec ends up doing.
+ &exactSig{[]byte("Rar!\x1A\x07\x00"), "application/x-rar-compressed"}, // RAR v1.5-v4.0
+ &exactSig{[]byte("Rar!\x1A\x07\x01\x00"), "application/x-rar-compressed"}, // RAR v5+
+
+ &exactSig{[]byte("\x00\x61\x73\x6D"), "application/wasm"},
+
+ textSig{}, // should be last
+}
+
+type exactSig struct {
+ sig []byte
+ ct string
+}
+
+func (e *exactSig) match(data []byte, firstNonWS int) string {
+ if bytes.HasPrefix(data, e.sig) {
+ return e.ct
+ }
+ return ""
+}
+
+type maskedSig struct {
+ mask, pat []byte
+ skipWS bool
+ ct string
+}
+
+func (m *maskedSig) match(data []byte, firstNonWS int) string {
+ // pattern matching algorithm section 6
+ // https://mimesniff.spec.whatwg.org/#pattern-matching-algorithm
+
+ if m.skipWS {
+ data = data[firstNonWS:]
+ }
+ if len(m.pat) != len(m.mask) {
+ return ""
+ }
+ if len(data) < len(m.pat) {
+ return ""
+ }
+ for i, pb := range m.pat {
+ maskedData := data[i] & m.mask[i]
+ if maskedData != pb {
+ return ""
+ }
+ }
+ return m.ct
+}
+
+type htmlSig []byte
+
+func (h htmlSig) match(data []byte, firstNonWS int) string {
+ data = data[firstNonWS:]
+ if len(data) < len(h)+1 {
+ return ""
+ }
+ for i, b := range h {
+ db := data[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return ""
+ }
+ }
+ // Next byte must be a tag-terminating byte(0xTT).
+ if !isTT(data[len(h)]) {
+ return ""
+ }
+ return "text/html; charset=utf-8"
+}
+
+var mp4ftype = []byte("ftyp")
+var mp4 = []byte("mp4")
+
+type mp4Sig struct{}
+
+func (mp4Sig) match(data []byte, firstNonWS int) string {
+ // https://mimesniff.spec.whatwg.org/#signature-for-mp4
+ // c.f. section 6.2.1
+ if len(data) < 12 {
+ return ""
+ }
+ boxSize := int(binary.BigEndian.Uint32(data[:4]))
+ if len(data) < boxSize || boxSize%4 != 0 {
+ return ""
+ }
+ if !bytes.Equal(data[4:8], mp4ftype) {
+ return ""
+ }
+ for st := 8; st < boxSize; st += 4 {
+ if st == 12 {
+ // Ignores the four bytes that correspond to the version number of the "major brand".
+ continue
+ }
+ if bytes.Equal(data[st:st+3], mp4) {
+ return "video/mp4"
+ }
+ }
+ return ""
+}
+
+type textSig struct{}
+
+func (textSig) match(data []byte, firstNonWS int) string {
+ // c.f. section 5, step 4.
+ for _, b := range data[firstNonWS:] {
+ switch {
+ case b <= 0x08,
+ b == 0x0B,
+ 0x0E <= b && b <= 0x1A,
+ 0x1C <= b && b <= 0x1F:
+ return ""
+ }
+ }
+ return "text/plain; charset=utf-8"
+}
diff --git a/contrib/go/_std_1.18/src/net/http/socks_bundle.go b/contrib/go/_std_1.19/src/net/http/socks_bundle.go
index e446669589..e446669589 100644
--- a/contrib/go/_std_1.18/src/net/http/socks_bundle.go
+++ b/contrib/go/_std_1.19/src/net/http/socks_bundle.go
diff --git a/contrib/go/_std_1.19/src/net/http/status.go b/contrib/go/_std_1.19/src/net/http/status.go
new file mode 100644
index 0000000000..cd90877ef0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/status.go
@@ -0,0 +1,210 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// HTTP status codes as registered with IANA.
+// See: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
+const (
+ StatusContinue = 100 // RFC 9110, 15.2.1
+ StatusSwitchingProtocols = 101 // RFC 9110, 15.2.2
+ StatusProcessing = 102 // RFC 2518, 10.1
+ StatusEarlyHints = 103 // RFC 8297
+
+ StatusOK = 200 // RFC 9110, 15.3.1
+ StatusCreated = 201 // RFC 9110, 15.3.2
+ StatusAccepted = 202 // RFC 9110, 15.3.3
+ StatusNonAuthoritativeInfo = 203 // RFC 9110, 15.3.4
+ StatusNoContent = 204 // RFC 9110, 15.3.5
+ StatusResetContent = 205 // RFC 9110, 15.3.6
+ StatusPartialContent = 206 // RFC 9110, 15.3.7
+ StatusMultiStatus = 207 // RFC 4918, 11.1
+ StatusAlreadyReported = 208 // RFC 5842, 7.1
+ StatusIMUsed = 226 // RFC 3229, 10.4.1
+
+ StatusMultipleChoices = 300 // RFC 9110, 15.4.1
+ StatusMovedPermanently = 301 // RFC 9110, 15.4.2
+ StatusFound = 302 // RFC 9110, 15.4.3
+ StatusSeeOther = 303 // RFC 9110, 15.4.4
+ StatusNotModified = 304 // RFC 9110, 15.4.5
+ StatusUseProxy = 305 // RFC 9110, 15.4.6
+ _ = 306 // RFC 9110, 15.4.7 (Unused)
+ StatusTemporaryRedirect = 307 // RFC 9110, 15.4.8
+ StatusPermanentRedirect = 308 // RFC 9110, 15.4.9
+
+ StatusBadRequest = 400 // RFC 9110, 15.5.1
+ StatusUnauthorized = 401 // RFC 9110, 15.5.2
+ StatusPaymentRequired = 402 // RFC 9110, 15.5.3
+ StatusForbidden = 403 // RFC 9110, 15.5.4
+ StatusNotFound = 404 // RFC 9110, 15.5.5
+ StatusMethodNotAllowed = 405 // RFC 9110, 15.5.6
+ StatusNotAcceptable = 406 // RFC 9110, 15.5.7
+ StatusProxyAuthRequired = 407 // RFC 9110, 15.5.8
+ StatusRequestTimeout = 408 // RFC 9110, 15.5.9
+ StatusConflict = 409 // RFC 9110, 15.5.10
+ StatusGone = 410 // RFC 9110, 15.5.11
+ StatusLengthRequired = 411 // RFC 9110, 15.5.12
+ StatusPreconditionFailed = 412 // RFC 9110, 15.5.13
+ StatusRequestEntityTooLarge = 413 // RFC 9110, 15.5.14
+ StatusRequestURITooLong = 414 // RFC 9110, 15.5.15
+ StatusUnsupportedMediaType = 415 // RFC 9110, 15.5.16
+ StatusRequestedRangeNotSatisfiable = 416 // RFC 9110, 15.5.17
+ StatusExpectationFailed = 417 // RFC 9110, 15.5.18
+ StatusTeapot = 418 // RFC 9110, 15.5.19 (Unused)
+ StatusMisdirectedRequest = 421 // RFC 9110, 15.5.20
+ StatusUnprocessableEntity = 422 // RFC 9110, 15.5.21
+ StatusLocked = 423 // RFC 4918, 11.3
+ StatusFailedDependency = 424 // RFC 4918, 11.4
+ StatusTooEarly = 425 // RFC 8470, 5.2.
+ StatusUpgradeRequired = 426 // RFC 9110, 15.5.22
+ StatusPreconditionRequired = 428 // RFC 6585, 3
+ StatusTooManyRequests = 429 // RFC 6585, 4
+ StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
+ StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
+
+ StatusInternalServerError = 500 // RFC 9110, 15.6.1
+ StatusNotImplemented = 501 // RFC 9110, 15.6.2
+ StatusBadGateway = 502 // RFC 9110, 15.6.3
+ StatusServiceUnavailable = 503 // RFC 9110, 15.6.4
+ StatusGatewayTimeout = 504 // RFC 9110, 15.6.5
+ StatusHTTPVersionNotSupported = 505 // RFC 9110, 15.6.6
+ StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
+ StatusInsufficientStorage = 507 // RFC 4918, 11.5
+ StatusLoopDetected = 508 // RFC 5842, 7.2
+ StatusNotExtended = 510 // RFC 2774, 7
+ StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
+)
+
+// StatusText returns a text for the HTTP status code. It returns the empty
+// string if the code is unknown.
+func StatusText(code int) string {
+ switch code {
+ case StatusContinue:
+ return "Continue"
+ case StatusSwitchingProtocols:
+ return "Switching Protocols"
+ case StatusProcessing:
+ return "Processing"
+ case StatusEarlyHints:
+ return "Early Hints"
+ case StatusOK:
+ return "OK"
+ case StatusCreated:
+ return "Created"
+ case StatusAccepted:
+ return "Accepted"
+ case StatusNonAuthoritativeInfo:
+ return "Non-Authoritative Information"
+ case StatusNoContent:
+ return "No Content"
+ case StatusResetContent:
+ return "Reset Content"
+ case StatusPartialContent:
+ return "Partial Content"
+ case StatusMultiStatus:
+ return "Multi-Status"
+ case StatusAlreadyReported:
+ return "Already Reported"
+ case StatusIMUsed:
+ return "IM Used"
+ case StatusMultipleChoices:
+ return "Multiple Choices"
+ case StatusMovedPermanently:
+ return "Moved Permanently"
+ case StatusFound:
+ return "Found"
+ case StatusSeeOther:
+ return "See Other"
+ case StatusNotModified:
+ return "Not Modified"
+ case StatusUseProxy:
+ return "Use Proxy"
+ case StatusTemporaryRedirect:
+ return "Temporary Redirect"
+ case StatusPermanentRedirect:
+ return "Permanent Redirect"
+ case StatusBadRequest:
+ return "Bad Request"
+ case StatusUnauthorized:
+ return "Unauthorized"
+ case StatusPaymentRequired:
+ return "Payment Required"
+ case StatusForbidden:
+ return "Forbidden"
+ case StatusNotFound:
+ return "Not Found"
+ case StatusMethodNotAllowed:
+ return "Method Not Allowed"
+ case StatusNotAcceptable:
+ return "Not Acceptable"
+ case StatusProxyAuthRequired:
+ return "Proxy Authentication Required"
+ case StatusRequestTimeout:
+ return "Request Timeout"
+ case StatusConflict:
+ return "Conflict"
+ case StatusGone:
+ return "Gone"
+ case StatusLengthRequired:
+ return "Length Required"
+ case StatusPreconditionFailed:
+ return "Precondition Failed"
+ case StatusRequestEntityTooLarge:
+ return "Request Entity Too Large"
+ case StatusRequestURITooLong:
+ return "Request URI Too Long"
+ case StatusUnsupportedMediaType:
+ return "Unsupported Media Type"
+ case StatusRequestedRangeNotSatisfiable:
+ return "Requested Range Not Satisfiable"
+ case StatusExpectationFailed:
+ return "Expectation Failed"
+ case StatusTeapot:
+ return "I'm a teapot"
+ case StatusMisdirectedRequest:
+ return "Misdirected Request"
+ case StatusUnprocessableEntity:
+ return "Unprocessable Entity"
+ case StatusLocked:
+ return "Locked"
+ case StatusFailedDependency:
+ return "Failed Dependency"
+ case StatusTooEarly:
+ return "Too Early"
+ case StatusUpgradeRequired:
+ return "Upgrade Required"
+ case StatusPreconditionRequired:
+ return "Precondition Required"
+ case StatusTooManyRequests:
+ return "Too Many Requests"
+ case StatusRequestHeaderFieldsTooLarge:
+ return "Request Header Fields Too Large"
+ case StatusUnavailableForLegalReasons:
+ return "Unavailable For Legal Reasons"
+ case StatusInternalServerError:
+ return "Internal Server Error"
+ case StatusNotImplemented:
+ return "Not Implemented"
+ case StatusBadGateway:
+ return "Bad Gateway"
+ case StatusServiceUnavailable:
+ return "Service Unavailable"
+ case StatusGatewayTimeout:
+ return "Gateway Timeout"
+ case StatusHTTPVersionNotSupported:
+ return "HTTP Version Not Supported"
+ case StatusVariantAlsoNegotiates:
+ return "Variant Also Negotiates"
+ case StatusInsufficientStorage:
+ return "Insufficient Storage"
+ case StatusLoopDetected:
+ return "Loop Detected"
+ case StatusNotExtended:
+ return "Not Extended"
+ case StatusNetworkAuthenticationRequired:
+ return "Network Authentication Required"
+ default:
+ return ""
+ }
+}
diff --git a/contrib/go/_std_1.19/src/net/http/transfer.go b/contrib/go/_std_1.19/src/net/http/transfer.go
new file mode 100644
index 0000000000..4583c6b453
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/transfer.go
@@ -0,0 +1,1131 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net/http/httptrace"
+ "net/http/internal"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// ErrLineTooLong is returned when reading request or response bodies
+// with malformed chunked encoding.
+var ErrLineTooLong = internal.ErrLineTooLong
+
+type errorReader struct {
+ err error
+}
+
+func (r errorReader) Read(p []byte) (n int, err error) {
+ return 0, r.err
+}
+
+type byteReader struct {
+ b byte
+ done bool
+}
+
+func (br *byteReader) Read(p []byte) (n int, err error) {
+ if br.done {
+ return 0, io.EOF
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ br.done = true
+ p[0] = br.b
+ return 1, io.EOF
+}
+
+// transferWriter inspects the fields of a user-supplied Request or Response,
+// sanitizes them without changing the user object and provides methods for
+// writing the respective header, body and trailer in wire format.
+type transferWriter struct {
+ Method string
+ Body io.Reader
+ BodyCloser io.Closer
+ ResponseToHEAD bool
+ ContentLength int64 // -1 means unknown, 0 means exactly none
+ Close bool
+ TransferEncoding []string
+ Header Header
+ Trailer Header
+ IsResponse bool
+ bodyReadError error // any non-EOF error from reading Body
+
+ FlushHeaders bool // flush headers to network before body
+ ByteReadCh chan readResult // non-nil if probeRequestBody called
+}
+
+func newTransferWriter(r any) (t *transferWriter, err error) {
+ t = &transferWriter{}
+
+ // Extract relevant fields
+ atLeastHTTP11 := false
+ switch rr := r.(type) {
+ case *Request:
+ if rr.ContentLength != 0 && rr.Body == nil {
+ return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength)
+ }
+ t.Method = valueOrDefault(rr.Method, "GET")
+ t.Close = rr.Close
+ t.TransferEncoding = rr.TransferEncoding
+ t.Header = rr.Header
+ t.Trailer = rr.Trailer
+ t.Body = rr.Body
+ t.BodyCloser = rr.Body
+ t.ContentLength = rr.outgoingLength()
+ if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && t.shouldSendChunkedRequestBody() {
+ t.TransferEncoding = []string{"chunked"}
+ }
+ // If there's a body, conservatively flush the headers
+ // to any bufio.Writer we're writing to, just in case
+ // the server needs the headers early, before we copy
+ // the body and possibly block. We make an exception
+ // for the common standard library in-memory types,
+ // though, to avoid unnecessary TCP packets on the
+ // wire. (Issue 22088.)
+ if t.ContentLength != 0 && !isKnownInMemoryReader(t.Body) {
+ t.FlushHeaders = true
+ }
+
+ atLeastHTTP11 = true // Transport requests are always 1.1 or 2.0
+ case *Response:
+ t.IsResponse = true
+ if rr.Request != nil {
+ t.Method = rr.Request.Method
+ }
+ t.Body = rr.Body
+ t.BodyCloser = rr.Body
+ t.ContentLength = rr.ContentLength
+ t.Close = rr.Close
+ t.TransferEncoding = rr.TransferEncoding
+ t.Header = rr.Header
+ t.Trailer = rr.Trailer
+ atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
+ t.ResponseToHEAD = noResponseBodyExpected(t.Method)
+ }
+
+ // Sanitize Body,ContentLength,TransferEncoding
+ if t.ResponseToHEAD {
+ t.Body = nil
+ if chunked(t.TransferEncoding) {
+ t.ContentLength = -1
+ }
+ } else {
+ if !atLeastHTTP11 || t.Body == nil {
+ t.TransferEncoding = nil
+ }
+ if chunked(t.TransferEncoding) {
+ t.ContentLength = -1
+ } else if t.Body == nil { // no chunking, no body
+ t.ContentLength = 0
+ }
+ }
+
+ // Sanitize Trailer
+ if !chunked(t.TransferEncoding) {
+ t.Trailer = nil
+ }
+
+ return t, nil
+}
+
+// shouldSendChunkedRequestBody reports whether we should try to send a
+// chunked request body to the server. In particular, the case we really
+// want to prevent is sending a GET or other typically-bodyless request to a
+// server with a chunked body when the body has zero bytes, since GETs with
+// bodies (while acceptable according to specs), even zero-byte chunked
+// bodies, are approximately never seen in the wild and confuse most
+// servers. See Issue 18257, as one example.
+//
+// The only reason we'd send such a request is if the user set the Body to a
+// non-nil value (say, io.NopCloser(bytes.NewReader(nil))) and didn't
+// set ContentLength, or NewRequest set it to -1 (unknown), so then we assume
+// there's bytes to send.
+//
+// This code tries to read a byte from the Request.Body in such cases to see
+// whether the body actually has content (super rare) or is actually just
+// a non-nil content-less ReadCloser (the more common case). In that more
+// common case, we act as if their Body were nil instead, and don't send
+// a body.
+func (t *transferWriter) shouldSendChunkedRequestBody() bool {
+ // Note that t.ContentLength is the corrected content length
+ // from rr.outgoingLength, so 0 actually means zero, not unknown.
+ if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them
+ return false
+ }
+ if t.Method == "CONNECT" {
+ return false
+ }
+ if requestMethodUsuallyLacksBody(t.Method) {
+ // Only probe the Request.Body for GET/HEAD/DELETE/etc
+ // requests, because it's only those types of requests
+ // that confuse servers.
+ t.probeRequestBody() // adjusts t.Body, t.ContentLength
+ return t.Body != nil
+ }
+ // For all other request types (PUT, POST, PATCH, or anything
+ // made-up we've never heard of), assume it's normal and the server
+ // can deal with a chunked request body. Maybe we'll adjust this
+ // later.
+ return true
+}
+
+// probeRequestBody reads a byte from t.Body to see whether it's empty
+// (returns io.EOF right away).
+//
+// But because we've had problems with this blocking users in the past
+// (issue 17480) when the body is a pipe (perhaps waiting on the response
+// headers before the pipe is fed data), we need to be careful and bound how
+// long we wait for it. This delay will only affect users if all the following
+// are true:
+// - the request body blocks
+// - the content length is not set (or set to -1)
+// - the method doesn't usually have a body (GET, HEAD, DELETE, ...)
+// - there is no transfer-encoding=chunked already set.
+//
+// In other words, this delay will not normally affect anybody, and there
+// are workarounds if it does.
+func (t *transferWriter) probeRequestBody() {
+ t.ByteReadCh = make(chan readResult, 1)
+ go func(body io.Reader) {
+ var buf [1]byte
+ var rres readResult
+ rres.n, rres.err = body.Read(buf[:])
+ if rres.n == 1 {
+ rres.b = buf[0]
+ }
+ t.ByteReadCh <- rres
+ close(t.ByteReadCh)
+ }(t.Body)
+ timer := time.NewTimer(200 * time.Millisecond)
+ select {
+ case rres := <-t.ByteReadCh:
+ timer.Stop()
+ if rres.n == 0 && rres.err == io.EOF {
+ // It was empty.
+ t.Body = nil
+ t.ContentLength = 0
+ } else if rres.n == 1 {
+ if rres.err != nil {
+ t.Body = io.MultiReader(&byteReader{b: rres.b}, errorReader{rres.err})
+ } else {
+ t.Body = io.MultiReader(&byteReader{b: rres.b}, t.Body)
+ }
+ } else if rres.err != nil {
+ t.Body = errorReader{rres.err}
+ }
+ case <-timer.C:
+ // Too slow. Don't wait. Read it later, and keep
+ // assuming that this is ContentLength == -1
+ // (unknown), which means we'll send a
+ // "Transfer-Encoding: chunked" header.
+ t.Body = io.MultiReader(finishAsyncByteRead{t}, t.Body)
+ // Request that Request.Write flush the headers to the
+ // network before writing the body, since our body may not
+ // become readable until it's seen the response headers.
+ t.FlushHeaders = true
+ }
+}
+
+func noResponseBodyExpected(requestMethod string) bool {
+ return requestMethod == "HEAD"
+}
+
+func (t *transferWriter) shouldSendContentLength() bool {
+ if chunked(t.TransferEncoding) {
+ return false
+ }
+ if t.ContentLength > 0 {
+ return true
+ }
+ if t.ContentLength < 0 {
+ return false
+ }
+ // Many servers expect a Content-Length for these methods
+ if t.Method == "POST" || t.Method == "PUT" || t.Method == "PATCH" {
+ return true
+ }
+ if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
+ if t.Method == "GET" || t.Method == "HEAD" {
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+func (t *transferWriter) writeHeader(w io.Writer, trace *httptrace.ClientTrace) error {
+ if t.Close && !hasToken(t.Header.get("Connection"), "close") {
+ if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Connection", []string{"close"})
+ }
+ }
+
+ // Write Content-Length and/or Transfer-Encoding whose values are a
+ // function of the sanitized field triple (Body, ContentLength,
+ // TransferEncoding)
+ if t.shouldSendContentLength() {
+ if _, err := io.WriteString(w, "Content-Length: "); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Content-Length", []string{strconv.FormatInt(t.ContentLength, 10)})
+ }
+ } else if chunked(t.TransferEncoding) {
+ if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Transfer-Encoding", []string{"chunked"})
+ }
+ }
+
+ // Write Trailer header
+ if t.Trailer != nil {
+ keys := make([]string, 0, len(t.Trailer))
+ for k := range t.Trailer {
+ k = CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return badStringError("invalid Trailer key", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Trailer", keys)
+ }
+ }
+ }
+
+ return nil
+}
+
+// always closes t.BodyCloser
+func (t *transferWriter) writeBody(w io.Writer) (err error) {
+ var ncopy int64
+ closed := false
+ defer func() {
+ if closed || t.BodyCloser == nil {
+ return
+ }
+ if closeErr := t.BodyCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }()
+
+ // Write body. We "unwrap" the body first if it was wrapped in a
+ // nopCloser or readTrackingBody. This is to ensure that we can take advantage of
+ // OS-level optimizations in the event that the body is an
+ // *os.File.
+ if t.Body != nil {
+ var body = t.unwrapBody()
+ if chunked(t.TransferEncoding) {
+ if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse {
+ w = &internal.FlushAfterChunkWriter{Writer: bw}
+ }
+ cw := internal.NewChunkedWriter(w)
+ _, err = t.doBodyCopy(cw, body)
+ if err == nil {
+ err = cw.Close()
+ }
+ } else if t.ContentLength == -1 {
+ dst := w
+ if t.Method == "CONNECT" {
+ dst = bufioFlushWriter{dst}
+ }
+ ncopy, err = t.doBodyCopy(dst, body)
+ } else {
+ ncopy, err = t.doBodyCopy(w, io.LimitReader(body, t.ContentLength))
+ if err != nil {
+ return err
+ }
+ var nextra int64
+ nextra, err = t.doBodyCopy(io.Discard, body)
+ ncopy += nextra
+ }
+ if err != nil {
+ return err
+ }
+ }
+ if t.BodyCloser != nil {
+ closed = true
+ if err := t.BodyCloser.Close(); err != nil {
+ return err
+ }
+ }
+
+ if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy {
+ return fmt.Errorf("http: ContentLength=%d with Body length %d",
+ t.ContentLength, ncopy)
+ }
+
+ if chunked(t.TransferEncoding) {
+ // Write Trailer header
+ if t.Trailer != nil {
+ if err := t.Trailer.Write(w); err != nil {
+ return err
+ }
+ }
+ // Last chunk, empty trailer
+ _, err = io.WriteString(w, "\r\n")
+ }
+ return err
+}
+
+// doBodyCopy wraps a copy operation, with any resulting error also
+// being saved in bodyReadError.
+//
+// This function is only intended for use in writeBody.
+func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) {
+ n, err = io.Copy(dst, src)
+ if err != nil && err != io.EOF {
+ t.bodyReadError = err
+ }
+ return
+}
+
+// unwrapBodyReader unwraps the body's inner reader if it's a
+// nopCloser. This is to ensure that body writes sourced from local
+// files (*os.File types) are properly optimized.
+//
+// This function is only intended for use in writeBody.
+func (t *transferWriter) unwrapBody() io.Reader {
+ if r, ok := unwrapNopCloser(t.Body); ok {
+ return r
+ }
+ if r, ok := t.Body.(*readTrackingBody); ok {
+ r.didRead = true
+ return r.ReadCloser
+ }
+ return t.Body
+}
+
+type transferReader struct {
+ // Input
+ Header Header
+ StatusCode int
+ RequestMethod string
+ ProtoMajor int
+ ProtoMinor int
+ // Output
+ Body io.ReadCloser
+ ContentLength int64
+ Chunked bool
+ Close bool
+ Trailer Header
+}
+
+func (t *transferReader) protoAtLeast(m, n int) bool {
+ return t.ProtoMajor > m || (t.ProtoMajor == m && t.ProtoMinor >= n)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 7230, section 3.3.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+var (
+ suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"}
+ suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"}
+ excludedHeadersNoBody = map[string]bool{"Content-Length": true, "Transfer-Encoding": true}
+)
+
+func suppressedHeaders(status int) []string {
+ switch {
+ case status == 304:
+ // RFC 7232 section 4.1
+ return suppressedHeaders304
+ case !bodyAllowedForStatus(status):
+ return suppressedHeadersNoBody
+ }
+ return nil
+}
+
+// msg is *Request or *Response.
+func readTransfer(msg any, r *bufio.Reader) (err error) {
+ t := &transferReader{RequestMethod: "GET"}
+
+ // Unify input
+ isResponse := false
+ switch rr := msg.(type) {
+ case *Response:
+ t.Header = rr.Header
+ t.StatusCode = rr.StatusCode
+ t.ProtoMajor = rr.ProtoMajor
+ t.ProtoMinor = rr.ProtoMinor
+ t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true)
+ isResponse = true
+ if rr.Request != nil {
+ t.RequestMethod = rr.Request.Method
+ }
+ case *Request:
+ t.Header = rr.Header
+ t.RequestMethod = rr.Method
+ t.ProtoMajor = rr.ProtoMajor
+ t.ProtoMinor = rr.ProtoMinor
+ // Transfer semantics for Requests are exactly like those for
+ // Responses with status code 200, responding to a GET method
+ t.StatusCode = 200
+ t.Close = rr.Close
+ default:
+ panic("unexpected type")
+ }
+
+ // Default to HTTP/1.1
+ if t.ProtoMajor == 0 && t.ProtoMinor == 0 {
+ t.ProtoMajor, t.ProtoMinor = 1, 1
+ }
+
+ // Transfer-Encoding: chunked, and overriding Content-Length.
+ if err := t.parseTransferEncoding(); err != nil {
+ return err
+ }
+
+ realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
+ if err != nil {
+ return err
+ }
+ if isResponse && t.RequestMethod == "HEAD" {
+ if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil {
+ return err
+ } else {
+ t.ContentLength = n
+ }
+ } else {
+ t.ContentLength = realLength
+ }
+
+ // Trailer
+ t.Trailer, err = fixTrailer(t.Header, t.Chunked)
+ if err != nil {
+ return err
+ }
+
+ // If there is no Content-Length or chunked Transfer-Encoding on a *Response
+ // and the status is not 1xx, 204 or 304, then the body is unbounded.
+ // See RFC 7230, section 3.3.
+ switch msg.(type) {
+ case *Response:
+ if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
+ // Unbounded body.
+ t.Close = true
+ }
+ }
+
+ // Prepare body reader. ContentLength < 0 means chunked encoding
+ // or close connection when finished, since multipart is not supported yet
+ switch {
+ case t.Chunked:
+ if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
+ t.Body = NoBody
+ } else {
+ t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
+ }
+ case realLength == 0:
+ t.Body = NoBody
+ case realLength > 0:
+ t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close}
+ default:
+ // realLength < 0, i.e. "Content-Length" not mentioned in header
+ if t.Close {
+ // Close semantics (i.e. HTTP/1.0)
+ t.Body = &body{src: r, closing: t.Close}
+ } else {
+ // Persistent connection (i.e. HTTP/1.1)
+ t.Body = NoBody
+ }
+ }
+
+ // Unify output
+ switch rr := msg.(type) {
+ case *Request:
+ rr.Body = t.Body
+ rr.ContentLength = t.ContentLength
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
+ rr.Close = t.Close
+ rr.Trailer = t.Trailer
+ case *Response:
+ rr.Body = t.Body
+ rr.ContentLength = t.ContentLength
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
+ rr.Close = t.Close
+ rr.Trailer = t.Trailer
+ }
+
+ return nil
+}
+
+// Checks whether chunked is part of the encodings stack
+func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
+
+// Checks whether the encoding is explicitly "identity".
+func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
+
+// unsupportedTEError reports unsupported transfer-encodings.
+type unsupportedTEError struct {
+ err string
+}
+
+func (uste *unsupportedTEError) Error() string {
+ return uste.err
+}
+
+// isUnsupportedTEError checks if the error is of type
+// unsupportedTEError. It is usually invoked with a non-nil err.
+func isUnsupportedTEError(err error) bool {
+ _, ok := err.(*unsupportedTEError)
+ return ok
+}
+
+// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
+func (t *transferReader) parseTransferEncoding() error {
+ raw, present := t.Header["Transfer-Encoding"]
+ if !present {
+ return nil
+ }
+ delete(t.Header, "Transfer-Encoding")
+
+ // Issue 12785; ignore Transfer-Encoding on HTTP/1.0 requests.
+ if !t.protoAtLeast(1, 1) {
+ return nil
+ }
+
+ // Like nginx, we only support a single Transfer-Encoding header field, and
+ // only if set to "chunked". This is one of the most security sensitive
+ // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
+ // strict and simple.
+ if len(raw) != 1 {
+ return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
+ }
+ if !ascii.EqualFold(raw[0], "chunked") {
+ return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
+ }
+
+ // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
+ // in any message that contains a Transfer-Encoding header field."
+ //
+ // but also: "If a message is received with both a Transfer-Encoding and a
+ // Content-Length header field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an attempt to perform
+ // request smuggling (Section 9.5) or response splitting (Section 9.4) and
+ // ought to be handled as an error. A sender MUST remove the received
+ // Content-Length field prior to forwarding such a message downstream."
+ //
+ // Reportedly, these appear in the wild.
+ delete(t.Header, "Content-Length")
+
+ t.Chunked = true
+ return nil
+}
+
+// Determine the expected body length, using RFC 7230 Section 3.3. This
+// function is not a method, because ultimately it should be shared by
+// ReadResponse and ReadRequest.
+func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
+ isRequest := !isResponse
+ contentLens := header["Content-Length"]
+
+ // Hardening against HTTP request smuggling
+ if len(contentLens) > 1 {
+ // Per RFC 7230 Section 3.3.2, prevent multiple
+ // Content-Length headers if they differ in value.
+ // If there are dups of the value, remove the dups.
+ // See Issue 16490.
+ first := textproto.TrimString(contentLens[0])
+ for _, ct := range contentLens[1:] {
+ if first != textproto.TrimString(ct) {
+ return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
+ }
+ }
+
+ // deduplicate Content-Length
+ header.Del("Content-Length")
+ header.Add("Content-Length", first)
+
+ contentLens = header["Content-Length"]
+ }
+
+ // Logic based on response type or status
+ if noResponseBodyExpected(requestMethod) {
+ // For HTTP requests, as part of hardening against request
+ // smuggling (RFC 7230), don't allow a Content-Length header for
+ // methods which don't permit bodies. As an exception, allow
+ // exactly one Content-Length header if its value is "0".
+ if isRequest && len(contentLens) > 0 && !(len(contentLens) == 1 && contentLens[0] == "0") {
+ return 0, fmt.Errorf("http: method cannot contain a Content-Length; got %q", contentLens)
+ }
+ return 0, nil
+ }
+ if status/100 == 1 {
+ return 0, nil
+ }
+ switch status {
+ case 204, 304:
+ return 0, nil
+ }
+
+ // Logic based on Transfer-Encoding
+ if chunked {
+ return -1, nil
+ }
+
+ // Logic based on Content-Length
+ var cl string
+ if len(contentLens) == 1 {
+ cl = textproto.TrimString(contentLens[0])
+ }
+ if cl != "" {
+ n, err := parseContentLength(cl)
+ if err != nil {
+ return -1, err
+ }
+ return n, nil
+ }
+ header.Del("Content-Length")
+
+ if isRequest {
+ // RFC 7230 neither explicitly permits nor forbids an
+ // entity-body on a GET request so we permit one if
+ // declared, but we default to 0 here (not -1 below)
+ // if there's no mention of a body.
+ // Likewise, all other request methods are assumed to have
+ // no body if neither Transfer-Encoding chunked nor a
+ // Content-Length are set.
+ return 0, nil
+ }
+
+ // Body-EOF logic based on other methods (like closing, or chunked coding)
+ return -1, nil
+}
+
+// Determine whether to hang up after sending a request and body, or
+// receiving a response and body
+// 'header' is the request headers
+func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
+ if major < 1 {
+ return true
+ }
+
+ conv := header["Connection"]
+ hasClose := httpguts.HeaderValuesContainsToken(conv, "close")
+ if major == 1 && minor == 0 {
+ return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive")
+ }
+
+ if hasClose && removeCloseHeader {
+ header.Del("Connection")
+ }
+
+ return hasClose
+}
+
+// Parse the trailer header
+func fixTrailer(header Header, chunked bool) (Header, error) {
+ vv, ok := header["Trailer"]
+ if !ok {
+ return nil, nil
+ }
+ if !chunked {
+ // Trailer and no chunking:
+ // this is an invalid use case for trailer header.
+ // Nevertheless, no error will be returned and we
+ // let users decide if this is a valid HTTP message.
+ // The Trailer header will be kept in Response.Header
+ // but not populate Response.Trailer.
+ // See issue #27197.
+ return nil, nil
+ }
+ header.Del("Trailer")
+
+ trailer := make(Header)
+ var err error
+ for _, v := range vv {
+ foreachHeaderElement(v, func(key string) {
+ key = CanonicalHeaderKey(key)
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ if err == nil {
+ err = badStringError("bad trailer key", key)
+ return
+ }
+ }
+ trailer[key] = nil
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ if len(trailer) == 0 {
+ return nil, nil
+ }
+ return trailer, nil
+}
+
+// body turns a Reader into a ReadCloser.
+// Close ensures that the body has been fully read
+// and then reads the trailer if necessary.
+type body struct {
+ src io.Reader
+ hdr any // non-nil (Response or Request) value means read trailer
+ r *bufio.Reader // underlying wire-format reader for the trailer
+ closing bool // is the connection to be closed after reading body?
+ doEarlyClose bool // whether Close should stop early
+
+ mu sync.Mutex // guards following, and calls to Read and Close
+ sawEOF bool
+ closed bool
+ earlyClose bool // Close called and we didn't read to the end of src
+ onHitEOF func() // if non-nil, func to call when EOF is Read
+}
+
+// ErrBodyReadAfterClose is returned when reading a Request or Response
+// Body after the body has been closed. This typically happens when the body is
+// read after an HTTP Handler calls WriteHeader or Write on its
+// ResponseWriter.
+var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body")
+
+func (b *body) Read(p []byte) (n int, err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+ return b.readLocked(p)
+}
+
+// Must hold b.mu.
+func (b *body) readLocked(p []byte) (n int, err error) {
+ if b.sawEOF {
+ return 0, io.EOF
+ }
+ n, err = b.src.Read(p)
+
+ if err == io.EOF {
+ b.sawEOF = true
+ // Chunked case. Read the trailer.
+ if b.hdr != nil {
+ if e := b.readTrailer(); e != nil {
+ err = e
+ // Something went wrong in the trailer, we must not allow any
+ // further reads of any kind to succeed from body, nor any
+ // subsequent requests on the server connection. See
+ // golang.org/issue/12027
+ b.sawEOF = false
+ b.closed = true
+ }
+ b.hdr = nil
+ } else {
+ // If the server declared the Content-Length, our body is a LimitedReader
+ // and we need to check whether this EOF arrived early.
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ }
+
+ // If we can return an EOF here along with the read data, do
+ // so. This is optional per the io.Reader contract, but doing
+ // so helps the HTTP transport code recycle its connection
+ // earlier (since it will see this EOF itself), even if the
+ // client doesn't do future reads or Close.
+ if err == nil && n > 0 {
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 {
+ err = io.EOF
+ b.sawEOF = true
+ }
+ }
+
+ if b.sawEOF && b.onHitEOF != nil {
+ b.onHitEOF()
+ }
+
+ return n, err
+}
+
+var (
+ singleCRLF = []byte("\r\n")
+ doubleCRLF = []byte("\r\n\r\n")
+)
+
+func seeUpcomingDoubleCRLF(r *bufio.Reader) bool {
+ for peekSize := 4; ; peekSize++ {
+ // This loop stops when Peek returns an error,
+ // which it does when r's buffer has been filled.
+ buf, err := r.Peek(peekSize)
+ if bytes.HasSuffix(buf, doubleCRLF) {
+ return true
+ }
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+var errTrailerEOF = errors.New("http: unexpected EOF reading trailer")
+
+func (b *body) readTrailer() error {
+ // The common case, since nobody uses trailers.
+ buf, err := b.r.Peek(2)
+ if bytes.Equal(buf, singleCRLF) {
+ b.r.Discard(2)
+ return nil
+ }
+ if len(buf) < 2 {
+ return errTrailerEOF
+ }
+ if err != nil {
+ return err
+ }
+
+ // Make sure there's a header terminator coming up, to prevent
+ // a DoS with an unbounded size Trailer. It's not easy to
+ // slip in a LimitReader here, as textproto.NewReader requires
+ // a concrete *bufio.Reader. Also, we can't get all the way
+ // back up to our conn's LimitedReader that *might* be backing
+ // this bufio.Reader. Instead, a hack: we iteratively Peek up
+ // to the bufio.Reader's max size, looking for a double CRLF.
+ // This limits the trailer to the underlying buffer size, typically 4kB.
+ if !seeUpcomingDoubleCRLF(b.r) {
+ return errors.New("http: suspiciously long trailer after chunked body")
+ }
+
+ hdr, err := textproto.NewReader(b.r).ReadMIMEHeader()
+ if err != nil {
+ if err == io.EOF {
+ return errTrailerEOF
+ }
+ return err
+ }
+ switch rr := b.hdr.(type) {
+ case *Request:
+ mergeSetHeader(&rr.Trailer, Header(hdr))
+ case *Response:
+ mergeSetHeader(&rr.Trailer, Header(hdr))
+ }
+ return nil
+}
+
+func mergeSetHeader(dst *Header, src Header) {
+ if *dst == nil {
+ *dst = src
+ return
+ }
+ for k, vv := range src {
+ (*dst)[k] = vv
+ }
+}
+
+// unreadDataSizeLocked returns the number of bytes of unread input.
+// It returns -1 if unknown.
+// b.mu must be held.
+func (b *body) unreadDataSizeLocked() int64 {
+ if lr, ok := b.src.(*io.LimitedReader); ok {
+ return lr.N
+ }
+ return -1
+}
+
+func (b *body) Close() error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return nil
+ }
+ var err error
+ switch {
+ case b.sawEOF:
+ // Already saw EOF, so no need going to look for it.
+ case b.hdr == nil && b.closing:
+ // no trailer and closing the connection next.
+ // no point in reading to EOF.
+ case b.doEarlyClose:
+ // Read up to maxPostHandlerReadBytes bytes of the body, looking
+ // for EOF (and trailers), so we can re-use this connection.
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes {
+ // There was a declared Content-Length, and we have more bytes remaining
+ // than our maxPostHandlerReadBytes tolerance. So, give up.
+ b.earlyClose = true
+ } else {
+ var n int64
+ // Consume the body, or, which will also lead to us reading
+ // the trailer headers after the body, if present.
+ n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
+ if err == io.EOF {
+ err = nil
+ }
+ if n == maxPostHandlerReadBytes {
+ b.earlyClose = true
+ }
+ }
+ default:
+ // Fully consume the body, which will also lead to us reading
+ // the trailer headers after the body, if present.
+ _, err = io.Copy(io.Discard, bodyLocked{b})
+ }
+ b.closed = true
+ return err
+}
+
+func (b *body) didEarlyClose() bool {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.earlyClose
+}
+
+// bodyRemains reports whether future Read calls might
+// yield data.
+func (b *body) bodyRemains() bool {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return !b.sawEOF
+}
+
+func (b *body) registerOnHitEOF(fn func()) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.onHitEOF = fn
+}
+
+// bodyLocked is an io.Reader reading from a *body when its mutex is
+// already held.
+type bodyLocked struct {
+ b *body
+}
+
+func (bl bodyLocked) Read(p []byte) (n int, err error) {
+ if bl.b.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+ return bl.b.readLocked(p)
+}
+
+// parseContentLength trims whitespace from s and returns -1 if no value
+// is set, or the value if it's >= 0.
+func parseContentLength(cl string) (int64, error) {
+ cl = textproto.TrimString(cl)
+ if cl == "" {
+ return -1, nil
+ }
+ n, err := strconv.ParseUint(cl, 10, 63)
+ if err != nil {
+ return 0, badStringError("bad Content-Length", cl)
+ }
+ return int64(n), nil
+
+}
+
+// finishAsyncByteRead finishes reading the 1-byte sniff
+// from the ContentLength==0, Body!=nil case.
+type finishAsyncByteRead struct {
+ tw *transferWriter
+}
+
+func (fr finishAsyncByteRead) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+ rres := <-fr.tw.ByteReadCh
+ n, err = rres.n, rres.err
+ if n == 1 {
+ p[0] = rres.b
+ }
+ if err == nil {
+ err = io.EOF
+ }
+ return
+}
+
+var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
+var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct {
+ io.Reader
+ io.WriterTo
+}{}))
+
+// unwrapNopCloser return the underlying reader and true if r is a NopCloser
+// else it return false
+func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) {
+ switch reflect.TypeOf(r) {
+ case nopCloserType, nopCloserWriterToType:
+ return reflect.ValueOf(r).Field(0).Interface().(io.Reader), true
+ default:
+ return nil, false
+ }
+}
+
+// isKnownInMemoryReader reports whether r is a type known to not
+// block on Read. Its caller uses this as an optional optimization to
+// send fewer TCP packets.
+func isKnownInMemoryReader(r io.Reader) bool {
+ switch r.(type) {
+ case *bytes.Reader, *bytes.Buffer, *strings.Reader:
+ return true
+ }
+ if r, ok := unwrapNopCloser(r); ok {
+ return isKnownInMemoryReader(r)
+ }
+ if r, ok := r.(*readTrackingBody); ok {
+ return isKnownInMemoryReader(r.ReadCloser)
+ }
+ return false
+}
+
+// bufioFlushWriter is an io.Writer wrapper that flushes all writes
+// on its wrapped writer if it's a *bufio.Writer.
+type bufioFlushWriter struct{ w io.Writer }
+
+func (fw bufioFlushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok {
+ ferr := bw.Flush()
+ if ferr != nil && err == nil {
+ err = ferr
+ }
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/net/http/transport.go b/contrib/go/_std_1.19/src/net/http/transport.go
new file mode 100644
index 0000000000..e470a6c080
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/http/transport.go
@@ -0,0 +1,2906 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP client implementation. See RFC 7230 through 7235.
+//
+// This is the low-level Transport implementation of RoundTripper.
+// The high-level interface is in client.go.
+
+package http
+
+import (
+ "bufio"
+ "compress/gzip"
+ "container/list"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "log"
+ "net"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http/httpproxy"
+)
+
+// DefaultTransport is the default implementation of Transport and is
+// used by DefaultClient. It establishes network connections as needed
+// and caches them for reuse by subsequent calls. It uses HTTP proxies
+// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and
+// $no_proxy) environment variables.
+var DefaultTransport RoundTripper = &Transport{
+ Proxy: ProxyFromEnvironment,
+ DialContext: defaultTransportDialContext(&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }),
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+// DefaultMaxIdleConnsPerHost is the default value of Transport's
+// MaxIdleConnsPerHost.
+const DefaultMaxIdleConnsPerHost = 2
+
+// Transport is an implementation of RoundTripper that supports HTTP,
+// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
+//
+// By default, Transport caches connections for future re-use.
+// This may leave many open connections when accessing many hosts.
+// This behavior can be managed using Transport's CloseIdleConnections method
+// and the MaxIdleConnsPerHost and DisableKeepAlives fields.
+//
+// Transports should be reused instead of created as needed.
+// Transports are safe for concurrent use by multiple goroutines.
+//
+// A Transport is a low-level primitive for making HTTP and HTTPS requests.
+// For high-level functionality, such as cookies and redirects, see Client.
+//
+// Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2
+// for HTTPS URLs, depending on whether the server supports HTTP/2,
+// and how the Transport is configured. The DefaultTransport supports HTTP/2.
+// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
+// and call ConfigureTransport. See the package docs for more about HTTP/2.
+//
+// Responses with status codes in the 1xx range are either handled
+// automatically (100 expect-continue) or ignored. The one
+// exception is HTTP status code 101 (Switching Protocols), which is
+// considered a terminal status and returned by RoundTrip. To see the
+// ignored 1xx responses, use the httptrace trace package's
+// ClientTrace.Got1xxResponse.
+//
+// Transport only retries a request upon encountering a network error
+// if the request is idempotent and either has no body or has its
+// Request.GetBody defined. HTTP requests are considered idempotent if
+// they have HTTP methods GET, HEAD, OPTIONS, or TRACE; or if their
+// Header map contains an "Idempotency-Key" or "X-Idempotency-Key"
+// entry. If the idempotency key value is a zero-length slice, the
+// request is treated as idempotent but the header is not sent on the
+// wire.
+type Transport struct {
+ idleMu sync.Mutex
+ closeIdle bool // user has requested to close all idle conns
+ idleConn map[connectMethodKey][]*persistConn // most recently used at end
+ idleConnWait map[connectMethodKey]wantConnQueue // waiting getConns
+ idleLRU connLRU
+
+ reqMu sync.Mutex
+ reqCanceler map[cancelKey]func(error)
+
+ altMu sync.Mutex // guards changing altProto only
+ altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
+
+ connsPerHostMu sync.Mutex
+ connsPerHost map[connectMethodKey]int
+ connsPerHostWait map[connectMethodKey]wantConnQueue // waiting getConns
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ //
+ // The proxy type is determined by the URL scheme. "http",
+ // "https", and "socks5" are supported. If the scheme is empty,
+ // "http" is assumed.
+ //
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*Request) (*url.URL, error)
+
+ // DialContext specifies the dial function for creating unencrypted TCP connections.
+ // If DialContext is nil (and the deprecated Dial below is also nil),
+ // then the transport dials using package net.
+ //
+ // DialContext runs concurrently with calls to RoundTrip.
+ // A RoundTrip call that initiates a dial may end up using
+ // a connection dialed previously when the earlier connection
+ // becomes idle before the later DialContext completes.
+ DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Dial specifies the dial function for creating unencrypted TCP connections.
+ //
+ // Dial runs concurrently with calls to RoundTrip.
+ // A RoundTrip call that initiates a dial may end up using
+ // a connection dialed previously when the earlier connection
+ // becomes idle before the later Dial completes.
+ //
+ // Deprecated: Use DialContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialContext takes priority.
+ Dial func(network, addr string) (net.Conn, error)
+
+ // DialTLSContext specifies an optional dial function for creating
+ // TLS connections for non-proxied HTTPS requests.
+ //
+ // If DialTLSContext is nil (and the deprecated DialTLS below is also nil),
+ // DialContext and TLSClientConfig are used.
+ //
+ // If DialTLSContext is set, the Dial and DialContext hooks are not used for HTTPS
+ // requests and the TLSClientConfig and TLSHandshakeTimeout
+ // are ignored. The returned net.Conn is assumed to already be
+ // past the TLS handshake.
+ DialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for non-proxied HTTPS requests.
+ //
+ // Deprecated: Use DialTLSContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialTLSContext takes priority.
+ DialTLS func(network, addr string) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client.
+ // If nil, the default configuration is used.
+ // If non-nil, HTTP/2 support may not be enabled by default.
+ TLSClientConfig *tls.Config
+
+ // TLSHandshakeTimeout specifies the maximum amount of time waiting to
+ // wait for a TLS handshake. Zero means no timeout.
+ TLSHandshakeTimeout time.Duration
+
+ // DisableKeepAlives, if true, disables HTTP keep-alives and
+ // will only use the connection to the server for a single
+ // HTTP request.
+ //
+ // This is unrelated to the similarly named TCP keep-alives.
+ DisableKeepAlives bool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // MaxIdleConns controls the maximum number of idle (keep-alive)
+ // connections across all hosts. Zero means no limit.
+ MaxIdleConns int
+
+ // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
+ // (keep-alive) connections to keep per-host. If zero,
+ // DefaultMaxIdleConnsPerHost is used.
+ MaxIdleConnsPerHost int
+
+ // MaxConnsPerHost optionally limits the total number of
+ // connections per host, including connections in the dialing,
+ // active, and idle states. On limit violation, dials will block.
+ //
+ // Zero means no limit.
+ MaxConnsPerHost int
+
+ // IdleConnTimeout is the maximum amount of time an idle
+ // (keep-alive) connection will remain idle before closing
+ // itself.
+ // Zero means no limit.
+ IdleConnTimeout time.Duration
+
+ // ResponseHeaderTimeout, if non-zero, specifies the amount of
+ // time to wait for a server's response headers after fully
+ // writing the request (including its body, if any). This
+ // time does not include the time to read the response body.
+ ResponseHeaderTimeout time.Duration
+
+ // ExpectContinueTimeout, if non-zero, specifies the amount of
+ // time to wait for a server's first response headers after fully
+ // writing the request headers if the request has an
+ // "Expect: 100-continue" header. Zero means no timeout and
+ // causes the body to be sent immediately, without
+ // waiting for the server to approve.
+ // This time does not include the time to send the request header.
+ ExpectContinueTimeout time.Duration
+
+ // TLSNextProto specifies how the Transport switches to an
+ // alternate protocol (such as HTTP/2) after a TLS ALPN
+ // protocol negotiation. If Transport dials an TLS connection
+ // with a non-empty protocol name and TLSNextProto contains a
+ // map entry for that key (such as "h2"), then the func is
+ // called with the request's authority (such as "example.com"
+ // or "example.com:1234") and the TLS connection. The function
+ // must return a RoundTripper that then handles the request.
+ // If TLSNextProto is not nil, HTTP/2 support is not enabled
+ // automatically.
+ TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper
+
+ // ProxyConnectHeader optionally specifies headers to send to
+ // proxies during CONNECT requests.
+ // To set the header dynamically, see GetProxyConnectHeader.
+ ProxyConnectHeader Header
+
+ // GetProxyConnectHeader optionally specifies a func to return
+ // headers to send to proxyURL during a CONNECT request to the
+ // ip:port target.
+ // If it returns an error, the Transport's RoundTrip fails with
+ // that error. It can return (nil, nil) to not add headers.
+ // If GetProxyConnectHeader is non-nil, ProxyConnectHeader is
+ // ignored.
+ GetProxyConnectHeader func(ctx context.Context, proxyURL *url.URL, target string) (Header, error)
+
+ // MaxResponseHeaderBytes specifies a limit on how many
+ // response bytes are allowed in the server's response
+ // header.
+ //
+ // Zero means to use a default limit.
+ MaxResponseHeaderBytes int64
+
+ // WriteBufferSize specifies the size of the write buffer used
+ // when writing to the transport.
+ // If zero, a default (currently 4KB) is used.
+ WriteBufferSize int
+
+ // ReadBufferSize specifies the size of the read buffer used
+ // when reading from the transport.
+ // If zero, a default (currently 4KB) is used.
+ ReadBufferSize int
+
+ // nextProtoOnce guards initialization of TLSNextProto and
+ // h2transport (via onceSetNextProtoDefaults)
+ nextProtoOnce sync.Once
+ h2transport h2Transport // non-nil if http2 wired up
+ tlsNextProtoWasNil bool // whether TLSNextProto was nil when the Once fired
+
+ // ForceAttemptHTTP2 controls whether HTTP/2 is enabled when a non-zero
+ // Dial, DialTLS, or DialContext func or TLSClientConfig is provided.
+ // By default, use of any those fields conservatively disables HTTP/2.
+ // To use a custom dialer or TLS config and still attempt HTTP/2
+ // upgrades, set this to true.
+ ForceAttemptHTTP2 bool
+}
+
+// A cancelKey is the key of the reqCanceler map.
+// We wrap the *Request in this type since we want to use the original request,
+// not any transient one created by roundTrip.
+type cancelKey struct {
+ req *Request
+}
+
+func (t *Transport) writeBufferSize() int {
+ if t.WriteBufferSize > 0 {
+ return t.WriteBufferSize
+ }
+ return 4 << 10
+}
+
+func (t *Transport) readBufferSize() int {
+ if t.ReadBufferSize > 0 {
+ return t.ReadBufferSize
+ }
+ return 4 << 10
+}
+
+// Clone returns a deep copy of t's exported fields.
+func (t *Transport) Clone() *Transport {
+ t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
+ t2 := &Transport{
+ Proxy: t.Proxy,
+ DialContext: t.DialContext,
+ Dial: t.Dial,
+ DialTLS: t.DialTLS,
+ DialTLSContext: t.DialTLSContext,
+ TLSHandshakeTimeout: t.TLSHandshakeTimeout,
+ DisableKeepAlives: t.DisableKeepAlives,
+ DisableCompression: t.DisableCompression,
+ MaxIdleConns: t.MaxIdleConns,
+ MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
+ MaxConnsPerHost: t.MaxConnsPerHost,
+ IdleConnTimeout: t.IdleConnTimeout,
+ ResponseHeaderTimeout: t.ResponseHeaderTimeout,
+ ExpectContinueTimeout: t.ExpectContinueTimeout,
+ ProxyConnectHeader: t.ProxyConnectHeader.Clone(),
+ GetProxyConnectHeader: t.GetProxyConnectHeader,
+ MaxResponseHeaderBytes: t.MaxResponseHeaderBytes,
+ ForceAttemptHTTP2: t.ForceAttemptHTTP2,
+ WriteBufferSize: t.WriteBufferSize,
+ ReadBufferSize: t.ReadBufferSize,
+ }
+ if t.TLSClientConfig != nil {
+ t2.TLSClientConfig = t.TLSClientConfig.Clone()
+ }
+ if !t.tlsNextProtoWasNil {
+ npm := map[string]func(authority string, c *tls.Conn) RoundTripper{}
+ for k, v := range t.TLSNextProto {
+ npm[k] = v
+ }
+ t2.TLSNextProto = npm
+ }
+ return t2
+}
+
+// h2Transport is the interface we expect to be able to call from
+// net/http against an *http2.Transport that's either bundled into
+// h2_bundle.go or supplied by the user via x/net/http2.
+//
+// We name it with the "h2" prefix to stay out of the "http2" prefix
+// namespace used by x/tools/cmd/bundle for h2_bundle.go.
+type h2Transport interface {
+ CloseIdleConnections()
+}
+
+func (t *Transport) hasCustomTLSDialer() bool {
+ return t.DialTLS != nil || t.DialTLSContext != nil
+}
+
+// onceSetNextProtoDefaults initializes TLSNextProto.
+// It must be called via t.nextProtoOnce.Do.
+func (t *Transport) onceSetNextProtoDefaults() {
+ t.tlsNextProtoWasNil = (t.TLSNextProto == nil)
+ if godebug.Get("http2client") == "0" {
+ return
+ }
+
+ // If they've already configured http2 with
+ // golang.org/x/net/http2 instead of the bundled copy, try to
+ // get at its http2.Transport value (via the "https"
+ // altproto map) so we can call CloseIdleConnections on it if
+ // requested. (Issue 22891)
+ altProto, _ := t.altProto.Load().(map[string]RoundTripper)
+ if rv := reflect.ValueOf(altProto["https"]); rv.IsValid() && rv.Type().Kind() == reflect.Struct && rv.Type().NumField() == 1 {
+ if v := rv.Field(0); v.CanInterface() {
+ if h2i, ok := v.Interface().(h2Transport); ok {
+ t.h2transport = h2i
+ return
+ }
+ }
+ }
+
+ if t.TLSNextProto != nil {
+ // This is the documented way to disable http2 on a
+ // Transport.
+ return
+ }
+ if !t.ForceAttemptHTTP2 && (t.TLSClientConfig != nil || t.Dial != nil || t.DialContext != nil || t.hasCustomTLSDialer()) {
+ // Be conservative and don't automatically enable
+ // http2 if they've specified a custom TLS config or
+ // custom dialers. Let them opt-in themselves via
+ // http2.ConfigureTransport so we don't surprise them
+ // by modifying their tls.Config. Issue 14275.
+ // However, if ForceAttemptHTTP2 is true, it overrides the above checks.
+ return
+ }
+ if omitBundledHTTP2 {
+ return
+ }
+ t2, err := http2configureTransports(t)
+ if err != nil {
+ log.Printf("Error enabling Transport HTTP/2 support: %v", err)
+ return
+ }
+ t.h2transport = t2
+
+ // Auto-configure the http2.Transport's MaxHeaderListSize from
+ // the http.Transport's MaxResponseHeaderBytes. They don't
+ // exactly mean the same thing, but they're close.
+ //
+ // TODO: also add this to x/net/http2.Configure Transport, behind
+ // a +build go1.7 build tag:
+ if limit1 := t.MaxResponseHeaderBytes; limit1 != 0 && t2.MaxHeaderListSize == 0 {
+ const h2max = 1<<32 - 1
+ if limit1 >= h2max {
+ t2.MaxHeaderListSize = h2max
+ } else {
+ t2.MaxHeaderListSize = uint32(limit1)
+ }
+ }
+}
+
+// ProxyFromEnvironment returns the URL of the proxy to use for a
+// given request, as indicated by the environment variables
+// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions
+// thereof). HTTPS_PROXY takes precedence over HTTP_PROXY for https
+// requests.
+//
+// The environment values may be either a complete URL or a
+// "host[:port]", in which case the "http" scheme is assumed.
+// The schemes "http", "https", and "socks5" are supported.
+// An error is returned if the value is a different form.
+//
+// A nil URL and nil error are returned if no proxy is defined in the
+// environment, or a proxy should not be used for the given request,
+// as defined by NO_PROXY.
+//
+// As a special case, if req.URL.Host is "localhost" (with or without
+// a port number), then a nil URL and nil error will be returned.
+func ProxyFromEnvironment(req *Request) (*url.URL, error) {
+ return envProxyFunc()(req.URL)
+}
+
+// ProxyURL returns a proxy function (for use in a Transport)
+// that always returns the same URL.
+func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
+ return func(*Request) (*url.URL, error) {
+ return fixedURL, nil
+ }
+}
+
+// transportRequest is a wrapper around a *Request that adds
+// optional extra headers to write and stores any error to return
+// from roundTrip.
+type transportRequest struct {
+ *Request // original request, not to be mutated
+ extra Header // extra headers to write, or nil
+ trace *httptrace.ClientTrace // optional
+ cancelKey cancelKey
+
+ mu sync.Mutex // guards err
+ err error // first setError value for mapRoundTripError to consider
+}
+
+func (tr *transportRequest) extraHeaders() Header {
+ if tr.extra == nil {
+ tr.extra = make(Header)
+ }
+ return tr.extra
+}
+
+func (tr *transportRequest) setError(err error) {
+ tr.mu.Lock()
+ if tr.err == nil {
+ tr.err = err
+ }
+ tr.mu.Unlock()
+}
+
+// useRegisteredProtocol reports whether an alternate protocol (as registered
+// with Transport.RegisterProtocol) should be respected for this request.
+func (t *Transport) useRegisteredProtocol(req *Request) bool {
+ if req.URL.Scheme == "https" && req.requiresHTTP1() {
+ // If this request requires HTTP/1, don't use the
+ // "https" alternate protocol, which is used by the
+ // HTTP/2 code to take over requests if there's an
+ // existing cached HTTP/2 connection.
+ return false
+ }
+ return true
+}
+
+// alternateRoundTripper returns the alternate RoundTripper to use
+// for this request if the Request's URL scheme requires one,
+// or nil for the normal case of using the Transport.
+func (t *Transport) alternateRoundTripper(req *Request) RoundTripper {
+ if !t.useRegisteredProtocol(req) {
+ return nil
+ }
+ altProto, _ := t.altProto.Load().(map[string]RoundTripper)
+ return altProto[req.URL.Scheme]
+}
+
+// roundTrip implements a RoundTripper over HTTP.
+func (t *Transport) roundTrip(req *Request) (*Response, error) {
+ t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
+ ctx := req.Context()
+ trace := httptrace.ContextClientTrace(ctx)
+
+ if req.URL == nil {
+ req.closeBody()
+ return nil, errors.New("http: nil Request.URL")
+ }
+ if req.Header == nil {
+ req.closeBody()
+ return nil, errors.New("http: nil Request.Header")
+ }
+ scheme := req.URL.Scheme
+ isHTTP := scheme == "http" || scheme == "https"
+ if isHTTP {
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ req.closeBody()
+ return nil, fmt.Errorf("net/http: invalid header field name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ req.closeBody()
+ // Don't include the value in the error, because it may be sensitive.
+ return nil, fmt.Errorf("net/http: invalid header field value for %q", k)
+ }
+ }
+ }
+ }
+
+ origReq := req
+ cancelKey := cancelKey{origReq}
+ req = setupRewindBody(req)
+
+ if altRT := t.alternateRoundTripper(req); altRT != nil {
+ if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
+ return resp, err
+ }
+ var err error
+ req, err = rewindBody(req)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if !isHTTP {
+ req.closeBody()
+ return nil, badStringError("unsupported protocol scheme", scheme)
+ }
+ if req.Method != "" && !validMethod(req.Method) {
+ req.closeBody()
+ return nil, fmt.Errorf("net/http: invalid method %q", req.Method)
+ }
+ if req.URL.Host == "" {
+ req.closeBody()
+ return nil, errors.New("http: no Host in request URL")
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ req.closeBody()
+ return nil, ctx.Err()
+ default:
+ }
+
+ // treq gets modified by roundTrip, so we need to recreate for each retry.
+ treq := &transportRequest{Request: req, trace: trace, cancelKey: cancelKey}
+ cm, err := t.connectMethodForRequest(treq)
+ if err != nil {
+ req.closeBody()
+ return nil, err
+ }
+
+ // Get the cached or newly-created connection to either the
+ // host (for http or https), the http proxy, or the http proxy
+ // pre-CONNECTed to https server. In any case, we'll be ready
+ // to send it requests.
+ pconn, err := t.getConn(treq, cm)
+ if err != nil {
+ t.setReqCanceler(cancelKey, nil)
+ req.closeBody()
+ return nil, err
+ }
+
+ var resp *Response
+ if pconn.alt != nil {
+ // HTTP/2 path.
+ t.setReqCanceler(cancelKey, nil) // not cancelable with CancelRequest
+ resp, err = pconn.alt.RoundTrip(req)
+ } else {
+ resp, err = pconn.roundTrip(treq)
+ }
+ if err == nil {
+ resp.Request = origReq
+ return resp, nil
+ }
+
+ // Failed. Clean up and determine whether to retry.
+ if http2isNoCachedConnError(err) {
+ if t.removeIdleConn(pconn) {
+ t.decConnsPerHost(pconn.cacheKey)
+ }
+ } else if !pconn.shouldRetryRequest(req, err) {
+ // Issue 16465: return underlying net.Conn.Read error from peek,
+ // as we've historically done.
+ if e, ok := err.(nothingWrittenError); ok {
+ err = e.error
+ }
+ if e, ok := err.(transportReadFromServerError); ok {
+ err = e.err
+ }
+ return nil, err
+ }
+ testHookRoundTripRetried()
+
+ // Rewind the body if we're able to.
+ req, err = rewindBody(req)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+var errCannotRewind = errors.New("net/http: cannot rewind body after connection loss")
+
+type readTrackingBody struct {
+ io.ReadCloser
+ didRead bool
+ didClose bool
+}
+
+func (r *readTrackingBody) Read(data []byte) (int, error) {
+ r.didRead = true
+ return r.ReadCloser.Read(data)
+}
+
+func (r *readTrackingBody) Close() error {
+ r.didClose = true
+ return r.ReadCloser.Close()
+}
+
+// setupRewindBody returns a new request with a custom body wrapper
+// that can report whether the body needs rewinding.
+// This lets rewindBody avoid an error result when the request
+// does not have GetBody but the body hasn't been read at all yet.
+func setupRewindBody(req *Request) *Request {
+ if req.Body == nil || req.Body == NoBody {
+ return req
+ }
+ newReq := *req
+ newReq.Body = &readTrackingBody{ReadCloser: req.Body}
+ return &newReq
+}
+
+// rewindBody returns a new request with the body rewound.
+// It returns req unmodified if the body does not need rewinding.
+// rewindBody takes care of closing req.Body when appropriate
+// (in all cases except when rewindBody returns req unmodified).
+func rewindBody(req *Request) (rewound *Request, err error) {
+ if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose) {
+ return req, nil // nothing to rewind
+ }
+ if !req.Body.(*readTrackingBody).didClose {
+ req.closeBody()
+ }
+ if req.GetBody == nil {
+ return nil, errCannotRewind
+ }
+ body, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = &readTrackingBody{ReadCloser: body}
+ return &newReq, nil
+}
+
+// shouldRetryRequest reports whether we should retry sending a failed
+// HTTP request on a new connection. The non-nil input error is the
+// error from roundTrip.
+func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
+ if http2isNoCachedConnError(err) {
+ // Issue 16582: if the user started a bunch of
+ // requests at once, they can all pick the same conn
+ // and violate the server's max concurrent streams.
+ // Instead, match the HTTP/1 behavior for now and dial
+ // again to get a new TCP connection, rather than failing
+ // this request.
+ return true
+ }
+ if err == errMissingHost {
+ // User error.
+ return false
+ }
+ if !pc.isReused() {
+ // This was a fresh connection. There's no reason the server
+ // should've hung up on us.
+ //
+ // Also, if we retried now, we could loop forever
+ // creating new connections and retrying if the server
+ // is just hanging up on us because it doesn't like
+ // our request (as opposed to sending an error).
+ return false
+ }
+ if _, ok := err.(nothingWrittenError); ok {
+ // We never wrote anything, so it's safe to retry, if there's no body or we
+ // can "rewind" the body with GetBody.
+ return req.outgoingLength() == 0 || req.GetBody != nil
+ }
+ if !req.isReplayable() {
+ // Don't retry non-idempotent requests.
+ return false
+ }
+ if _, ok := err.(transportReadFromServerError); ok {
+ // We got some non-EOF net.Conn.Read failure reading
+ // the 1st response byte from the server.
+ return true
+ }
+ if err == errServerClosedIdle {
+ // The server replied with io.EOF while we were trying to
+ // read the response. Probably an unfortunately keep-alive
+ // timeout, just as the client was writing a request.
+ return true
+ }
+ return false // conservatively
+}
+
+// ErrSkipAltProtocol is a sentinel error value defined by Transport.RegisterProtocol.
+var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol")
+
+// RegisterProtocol registers a new protocol with scheme.
+// The Transport will pass requests using the given scheme to rt.
+// It is rt's responsibility to simulate HTTP request semantics.
+//
+// RegisterProtocol can be used by other packages to provide
+// implementations of protocol schemes like "ftp" or "file".
+//
+// If rt.RoundTrip returns ErrSkipAltProtocol, the Transport will
+// handle the RoundTrip itself for that one request, as if the
+// protocol were not registered.
+func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
+ t.altMu.Lock()
+ defer t.altMu.Unlock()
+ oldMap, _ := t.altProto.Load().(map[string]RoundTripper)
+ if _, exists := oldMap[scheme]; exists {
+ panic("protocol " + scheme + " already registered")
+ }
+ newMap := make(map[string]RoundTripper)
+ for k, v := range oldMap {
+ newMap[k] = v
+ }
+ newMap[scheme] = rt
+ t.altProto.Store(newMap)
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle in
+// a "keep-alive" state. It does not interrupt any connections currently
+// in use.
+func (t *Transport) CloseIdleConnections() {
+ t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
+ t.idleMu.Lock()
+ m := t.idleConn
+ t.idleConn = nil
+ t.closeIdle = true // close newly idle connections
+ t.idleLRU = connLRU{}
+ t.idleMu.Unlock()
+ for _, conns := range m {
+ for _, pconn := range conns {
+ pconn.close(errCloseIdleConns)
+ }
+ }
+ if t2 := t.h2transport; t2 != nil {
+ t2.CloseIdleConnections()
+ }
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+// CancelRequest should only be called after RoundTrip has returned.
+//
+// Deprecated: Use Request.WithContext to create a request with a
+// cancelable context instead. CancelRequest cannot cancel HTTP/2
+// requests.
+func (t *Transport) CancelRequest(req *Request) {
+ t.cancelRequest(cancelKey{req}, errRequestCanceled)
+}
+
+// Cancel an in-flight request, recording the error value.
+// Returns whether the request was canceled.
+func (t *Transport) cancelRequest(key cancelKey, err error) bool {
+ // This function must not return until the cancel func has completed.
+ // See: https://golang.org/issue/34658
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ cancel := t.reqCanceler[key]
+ delete(t.reqCanceler, key)
+ if cancel != nil {
+ cancel(err)
+ }
+
+ return cancel != nil
+}
+
+//
+// Private implementation past this point.
+//
+
+var (
+ // proxyConfigOnce guards proxyConfig
+ envProxyOnce sync.Once
+ envProxyFuncValue func(*url.URL) (*url.URL, error)
+)
+
+// defaultProxyConfig returns a ProxyConfig value looked up
+// from the environment. This mitigates expensive lookups
+// on some platforms (e.g. Windows).
+func envProxyFunc() func(*url.URL) (*url.URL, error) {
+ envProxyOnce.Do(func() {
+ envProxyFuncValue = httpproxy.FromEnvironment().ProxyFunc()
+ })
+ return envProxyFuncValue
+}
+
+// resetProxyConfig is used by tests.
+func resetProxyConfig() {
+ envProxyOnce = sync.Once{}
+ envProxyFuncValue = nil
+}
+
+func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) {
+ cm.targetScheme = treq.URL.Scheme
+ cm.targetAddr = canonicalAddr(treq.URL)
+ if t.Proxy != nil {
+ cm.proxyURL, err = t.Proxy(treq.Request)
+ }
+ cm.onlyH1 = treq.requiresHTTP1()
+ return cm, err
+}
+
+// proxyAuth returns the Proxy-Authorization header to set
+// on requests, if applicable.
+func (cm *connectMethod) proxyAuth() string {
+ if cm.proxyURL == nil {
+ return ""
+ }
+ if u := cm.proxyURL.User; u != nil {
+ username := u.Username()
+ password, _ := u.Password()
+ return "Basic " + basicAuth(username, password)
+ }
+ return ""
+}
+
+// error values for debugging and testing, not seen by users.
+var (
+ errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled")
+ errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
+ errCloseIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
+ errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
+ errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
+ errCloseIdleConns = errors.New("http: CloseIdleConnections called")
+ errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
+ errIdleConnTimeout = errors.New("http: idle connection timeout")
+
+ // errServerClosedIdle is not seen by users for idempotent requests, but may be
+ // seen by a user if the server shuts down an idle connection and sends its FIN
+ // in flight with already-written POST body bytes from the client.
+ // See https://github.com/golang/go/issues/19943#issuecomment-355607646
+ errServerClosedIdle = errors.New("http: server closed idle connection")
+)
+
+// transportReadFromServerError is used by Transport.readLoop when the
+// 1 byte peek read fails and we're actually anticipating a response.
+// Usually this is just due to the inherent keep-alive shut down race,
+// where the server closed the connection at the same time the client
+// wrote. The underlying err field is usually io.EOF or some
+// ECONNRESET sort of thing which varies by platform. But it might be
+// the user's custom net.Conn.Read error too, so we carry it along for
+// them to return from Transport.RoundTrip.
+type transportReadFromServerError struct {
+ err error
+}
+
+func (e transportReadFromServerError) Unwrap() error { return e.err }
+
+func (e transportReadFromServerError) Error() string {
+ return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err)
+}
+
+func (t *Transport) putOrCloseIdleConn(pconn *persistConn) {
+ if err := t.tryPutIdleConn(pconn); err != nil {
+ pconn.close(err)
+ }
+}
+
+func (t *Transport) maxIdleConnsPerHost() int {
+ if v := t.MaxIdleConnsPerHost; v != 0 {
+ return v
+ }
+ return DefaultMaxIdleConnsPerHost
+}
+
+// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting
+// a new request.
+// If pconn is no longer needed or not in a good state, tryPutIdleConn returns
+// an error explaining why it wasn't registered.
+// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that.
+func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
+ if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
+ return errKeepAlivesDisabled
+ }
+ if pconn.isBroken() {
+ return errConnBroken
+ }
+ pconn.markReused()
+
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+
+ // HTTP/2 (pconn.alt != nil) connections do not come out of the idle list,
+ // because multiple goroutines can use them simultaneously.
+ // If this is an HTTP/2 connection being “returned,” we're done.
+ if pconn.alt != nil && t.idleLRU.m[pconn] != nil {
+ return nil
+ }
+
+ // Deliver pconn to goroutine waiting for idle connection, if any.
+ // (They may be actively dialing, but this conn is ready first.
+ // Chrome calls this socket late binding.
+ // See https://www.chromium.org/developers/design-documents/network-stack#TOC-Connection-Management.)
+ key := pconn.cacheKey
+ if q, ok := t.idleConnWait[key]; ok {
+ done := false
+ if pconn.alt == nil {
+ // HTTP/1.
+ // Loop over the waiting list until we find a w that isn't done already, and hand it pconn.
+ for q.len() > 0 {
+ w := q.popFront()
+ if w.tryDeliver(pconn, nil) {
+ done = true
+ break
+ }
+ }
+ } else {
+ // HTTP/2.
+ // Can hand the same pconn to everyone in the waiting list,
+ // and we still won't be done: we want to put it in the idle
+ // list unconditionally, for any future clients too.
+ for q.len() > 0 {
+ w := q.popFront()
+ w.tryDeliver(pconn, nil)
+ }
+ }
+ if q.len() == 0 {
+ delete(t.idleConnWait, key)
+ } else {
+ t.idleConnWait[key] = q
+ }
+ if done {
+ return nil
+ }
+ }
+
+ if t.closeIdle {
+ return errCloseIdle
+ }
+ if t.idleConn == nil {
+ t.idleConn = make(map[connectMethodKey][]*persistConn)
+ }
+ idles := t.idleConn[key]
+ if len(idles) >= t.maxIdleConnsPerHost() {
+ return errTooManyIdleHost
+ }
+ for _, exist := range idles {
+ if exist == pconn {
+ log.Fatalf("dup idle pconn %p in freelist", pconn)
+ }
+ }
+ t.idleConn[key] = append(idles, pconn)
+ t.idleLRU.add(pconn)
+ if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns {
+ oldest := t.idleLRU.removeOldest()
+ oldest.close(errTooManyIdle)
+ t.removeIdleConnLocked(oldest)
+ }
+
+ // Set idle timer, but only for HTTP/1 (pconn.alt == nil).
+ // The HTTP/2 implementation manages the idle timer itself
+ // (see idleConnTimeout in h2_bundle.go).
+ if t.IdleConnTimeout > 0 && pconn.alt == nil {
+ if pconn.idleTimer != nil {
+ pconn.idleTimer.Reset(t.IdleConnTimeout)
+ } else {
+ pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
+ }
+ }
+ pconn.idleAt = time.Now()
+ return nil
+}
+
+// queueForIdleConn queues w to receive the next idle connection for w.cm.
+// As an optimization hint to the caller, queueForIdleConn reports whether
+// it successfully delivered an already-idle connection.
+func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) {
+ if t.DisableKeepAlives {
+ return false
+ }
+
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+
+ // Stop closing connections that become idle - we might want one.
+ // (That is, undo the effect of t.CloseIdleConnections.)
+ t.closeIdle = false
+
+ if w == nil {
+ // Happens in test hook.
+ return false
+ }
+
+ // If IdleConnTimeout is set, calculate the oldest
+ // persistConn.idleAt time we're willing to use a cached idle
+ // conn.
+ var oldTime time.Time
+ if t.IdleConnTimeout > 0 {
+ oldTime = time.Now().Add(-t.IdleConnTimeout)
+ }
+
+ // Look for most recently-used idle connection.
+ if list, ok := t.idleConn[w.key]; ok {
+ stop := false
+ delivered := false
+ for len(list) > 0 && !stop {
+ pconn := list[len(list)-1]
+
+ // See whether this connection has been idle too long, considering
+ // only the wall time (the Round(0)), in case this is a laptop or VM
+ // coming out of suspend with previously cached idle connections.
+ tooOld := !oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime)
+ if tooOld {
+ // Async cleanup. Launch in its own goroutine (as if a
+ // time.AfterFunc called it); it acquires idleMu, which we're
+ // holding, and does a synchronous net.Conn.Close.
+ go pconn.closeConnIfStillIdle()
+ }
+ if pconn.isBroken() || tooOld {
+ // If either persistConn.readLoop has marked the connection
+ // broken, but Transport.removeIdleConn has not yet removed it
+ // from the idle list, or if this persistConn is too old (it was
+ // idle too long), then ignore it and look for another. In both
+ // cases it's already in the process of being closed.
+ list = list[:len(list)-1]
+ continue
+ }
+ delivered = w.tryDeliver(pconn, nil)
+ if delivered {
+ if pconn.alt != nil {
+ // HTTP/2: multiple clients can share pconn.
+ // Leave it in the list.
+ } else {
+ // HTTP/1: only one client can use pconn.
+ // Remove it from the list.
+ t.idleLRU.remove(pconn)
+ list = list[:len(list)-1]
+ }
+ }
+ stop = true
+ }
+ if len(list) > 0 {
+ t.idleConn[w.key] = list
+ } else {
+ delete(t.idleConn, w.key)
+ }
+ if stop {
+ return delivered
+ }
+ }
+
+ // Register to receive next connection that becomes idle.
+ if t.idleConnWait == nil {
+ t.idleConnWait = make(map[connectMethodKey]wantConnQueue)
+ }
+ q := t.idleConnWait[w.key]
+ q.cleanFront()
+ q.pushBack(w)
+ t.idleConnWait[w.key] = q
+ return false
+}
+
+// removeIdleConn marks pconn as dead.
+func (t *Transport) removeIdleConn(pconn *persistConn) bool {
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+ return t.removeIdleConnLocked(pconn)
+}
+
+// t.idleMu must be held.
+func (t *Transport) removeIdleConnLocked(pconn *persistConn) bool {
+ if pconn.idleTimer != nil {
+ pconn.idleTimer.Stop()
+ }
+ t.idleLRU.remove(pconn)
+ key := pconn.cacheKey
+ pconns := t.idleConn[key]
+ var removed bool
+ switch len(pconns) {
+ case 0:
+ // Nothing
+ case 1:
+ if pconns[0] == pconn {
+ delete(t.idleConn, key)
+ removed = true
+ }
+ default:
+ for i, v := range pconns {
+ if v != pconn {
+ continue
+ }
+ // Slide down, keeping most recently-used
+ // conns at the end.
+ copy(pconns[i:], pconns[i+1:])
+ t.idleConn[key] = pconns[:len(pconns)-1]
+ removed = true
+ break
+ }
+ }
+ return removed
+}
+
+func (t *Transport) setReqCanceler(key cancelKey, fn func(error)) {
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ if t.reqCanceler == nil {
+ t.reqCanceler = make(map[cancelKey]func(error))
+ }
+ if fn != nil {
+ t.reqCanceler[key] = fn
+ } else {
+ delete(t.reqCanceler, key)
+ }
+}
+
+// replaceReqCanceler replaces an existing cancel function. If there is no cancel function
+// for the request, we don't set the function and return false.
+// Since CancelRequest will clear the canceler, we can use the return value to detect if
+// the request was canceled since the last setReqCancel call.
+func (t *Transport) replaceReqCanceler(key cancelKey, fn func(error)) bool {
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ _, ok := t.reqCanceler[key]
+ if !ok {
+ return false
+ }
+ if fn != nil {
+ t.reqCanceler[key] = fn
+ } else {
+ delete(t.reqCanceler, key)
+ }
+ return true
+}
+
+var zeroDialer net.Dialer
+
+func (t *Transport) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ if t.DialContext != nil {
+ return t.DialContext(ctx, network, addr)
+ }
+ if t.Dial != nil {
+ c, err := t.Dial(network, addr)
+ if c == nil && err == nil {
+ err = errors.New("net/http: Transport.Dial hook returned (nil, nil)")
+ }
+ return c, err
+ }
+ return zeroDialer.DialContext(ctx, network, addr)
+}
+
+// A wantConn records state about a wanted connection
+// (that is, an active call to getConn).
+// The conn may be gotten by dialing or by finding an idle connection,
+// or a cancellation may make the conn no longer wanted.
+// These three options are racing against each other and use
+// wantConn to coordinate and agree about the winning outcome.
+type wantConn struct {
+ cm connectMethod
+ key connectMethodKey // cm.key()
+ ctx context.Context // context for dial
+ ready chan struct{} // closed when pc, err pair is delivered
+
+ // hooks for testing to know when dials are done
+ // beforeDial is called in the getConn goroutine when the dial is queued.
+ // afterDial is called when the dial is completed or canceled.
+ beforeDial func()
+ afterDial func()
+
+ mu sync.Mutex // protects pc, err, close(ready)
+ pc *persistConn
+ err error
+}
+
+// waiting reports whether w is still waiting for an answer (connection or error).
+func (w *wantConn) waiting() bool {
+ select {
+ case <-w.ready:
+ return false
+ default:
+ return true
+ }
+}
+
+// tryDeliver attempts to deliver pc, err to w and reports whether it succeeded.
+func (w *wantConn) tryDeliver(pc *persistConn, err error) bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.pc != nil || w.err != nil {
+ return false
+ }
+
+ w.pc = pc
+ w.err = err
+ if w.pc == nil && w.err == nil {
+ panic("net/http: internal error: misuse of tryDeliver")
+ }
+ close(w.ready)
+ return true
+}
+
+// cancel marks w as no longer wanting a result (for example, due to cancellation).
+// If a connection has been delivered already, cancel returns it with t.putOrCloseIdleConn.
+func (w *wantConn) cancel(t *Transport, err error) {
+ w.mu.Lock()
+ if w.pc == nil && w.err == nil {
+ close(w.ready) // catch misbehavior in future delivery
+ }
+ pc := w.pc
+ w.pc = nil
+ w.err = err
+ w.mu.Unlock()
+
+ if pc != nil {
+ t.putOrCloseIdleConn(pc)
+ }
+}
+
+// A wantConnQueue is a queue of wantConns.
+type wantConnQueue struct {
+ // This is a queue, not a deque.
+ // It is split into two stages - head[headPos:] and tail.
+ // popFront is trivial (headPos++) on the first stage, and
+ // pushBack is trivial (append) on the second stage.
+ // If the first stage is empty, popFront can swap the
+ // first and second stages to remedy the situation.
+ //
+ // This two-stage split is analogous to the use of two lists
+ // in Okasaki's purely functional queue but without the
+ // overhead of reversing the list when swapping stages.
+ head []*wantConn
+ headPos int
+ tail []*wantConn
+}
+
+// len returns the number of items in the queue.
+func (q *wantConnQueue) len() int {
+ return len(q.head) - q.headPos + len(q.tail)
+}
+
+// pushBack adds w to the back of the queue.
+func (q *wantConnQueue) pushBack(w *wantConn) {
+ q.tail = append(q.tail, w)
+}
+
+// popFront removes and returns the wantConn at the front of the queue.
+func (q *wantConnQueue) popFront() *wantConn {
+ if q.headPos >= len(q.head) {
+ if len(q.tail) == 0 {
+ return nil
+ }
+ // Pick up tail as new head, clear tail.
+ q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
+ }
+ w := q.head[q.headPos]
+ q.head[q.headPos] = nil
+ q.headPos++
+ return w
+}
+
+// peekFront returns the wantConn at the front of the queue without removing it.
+func (q *wantConnQueue) peekFront() *wantConn {
+ if q.headPos < len(q.head) {
+ return q.head[q.headPos]
+ }
+ if len(q.tail) > 0 {
+ return q.tail[0]
+ }
+ return nil
+}
+
+// cleanFront pops any wantConns that are no longer waiting from the head of the
+// queue, reporting whether any were popped.
+func (q *wantConnQueue) cleanFront() (cleaned bool) {
+ for {
+ w := q.peekFront()
+ if w == nil || w.waiting() {
+ return cleaned
+ }
+ q.popFront()
+ cleaned = true
+ }
+}
+
+func (t *Transport) customDialTLS(ctx context.Context, network, addr string) (conn net.Conn, err error) {
+ if t.DialTLSContext != nil {
+ conn, err = t.DialTLSContext(ctx, network, addr)
+ } else {
+ conn, err = t.DialTLS(network, addr)
+ }
+ if conn == nil && err == nil {
+ err = errors.New("net/http: Transport.DialTLS or DialTLSContext returned (nil, nil)")
+ }
+ return
+}
+
+// getConn dials and creates a new persistConn to the target as
+// specified in the connectMethod. This includes doing a proxy CONNECT
+// and/or setting up TLS. If this doesn't return an error, the persistConn
+// is ready to write requests to.
+func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persistConn, err error) {
+ req := treq.Request
+ trace := treq.trace
+ ctx := req.Context()
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(cm.addr())
+ }
+
+ w := &wantConn{
+ cm: cm,
+ key: cm.key(),
+ ctx: ctx,
+ ready: make(chan struct{}, 1),
+ beforeDial: testHookPrePendingDial,
+ afterDial: testHookPostPendingDial,
+ }
+ defer func() {
+ if err != nil {
+ w.cancel(t, err)
+ }
+ }()
+
+ // Queue for idle connection.
+ if delivered := t.queueForIdleConn(w); delivered {
+ pc := w.pc
+ // Trace only for HTTP/1.
+ // HTTP/2 calls trace.GotConn itself.
+ if pc.alt == nil && trace != nil && trace.GotConn != nil {
+ trace.GotConn(pc.gotIdleConnTrace(pc.idleAt))
+ }
+ // set request canceler to some non-nil function so we
+ // can detect whether it was cleared between now and when
+ // we enter roundTrip
+ t.setReqCanceler(treq.cancelKey, func(error) {})
+ return pc, nil
+ }
+
+ cancelc := make(chan error, 1)
+ t.setReqCanceler(treq.cancelKey, func(err error) { cancelc <- err })
+
+ // Queue for permission to dial.
+ t.queueForDial(w)
+
+ // Wait for completion or cancellation.
+ select {
+ case <-w.ready:
+ // Trace success but only for HTTP/1.
+ // HTTP/2 calls trace.GotConn itself.
+ if w.pc != nil && w.pc.alt == nil && trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{Conn: w.pc.conn, Reused: w.pc.isReused()})
+ }
+ if w.err != nil {
+ // If the request has been canceled, that's probably
+ // what caused w.err; if so, prefer to return the
+ // cancellation error (see golang.org/issue/16049).
+ select {
+ case <-req.Cancel:
+ return nil, errRequestCanceledConn
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ case err := <-cancelc:
+ if err == errRequestCanceled {
+ err = errRequestCanceledConn
+ }
+ return nil, err
+ default:
+ // return below
+ }
+ }
+ return w.pc, w.err
+ case <-req.Cancel:
+ return nil, errRequestCanceledConn
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ case err := <-cancelc:
+ if err == errRequestCanceled {
+ err = errRequestCanceledConn
+ }
+ return nil, err
+ }
+}
+
+// queueForDial queues w to wait for permission to begin dialing.
+// Once w receives permission to dial, it will do so in a separate goroutine.
+func (t *Transport) queueForDial(w *wantConn) {
+ w.beforeDial()
+ if t.MaxConnsPerHost <= 0 {
+ go t.dialConnFor(w)
+ return
+ }
+
+ t.connsPerHostMu.Lock()
+ defer t.connsPerHostMu.Unlock()
+
+ if n := t.connsPerHost[w.key]; n < t.MaxConnsPerHost {
+ if t.connsPerHost == nil {
+ t.connsPerHost = make(map[connectMethodKey]int)
+ }
+ t.connsPerHost[w.key] = n + 1
+ go t.dialConnFor(w)
+ return
+ }
+
+ if t.connsPerHostWait == nil {
+ t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue)
+ }
+ q := t.connsPerHostWait[w.key]
+ q.cleanFront()
+ q.pushBack(w)
+ t.connsPerHostWait[w.key] = q
+}
+
+// dialConnFor dials on behalf of w and delivers the result to w.
+// dialConnFor has received permission to dial w.cm and is counted in t.connCount[w.cm.key()].
+// If the dial is canceled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()].
+func (t *Transport) dialConnFor(w *wantConn) {
+ defer w.afterDial()
+
+ pc, err := t.dialConn(w.ctx, w.cm)
+ delivered := w.tryDeliver(pc, err)
+ if err == nil && (!delivered || pc.alt != nil) {
+ // pconn was not passed to w,
+ // or it is HTTP/2 and can be shared.
+ // Add to the idle connection pool.
+ t.putOrCloseIdleConn(pc)
+ }
+ if err != nil {
+ t.decConnsPerHost(w.key)
+ }
+}
+
+// decConnsPerHost decrements the per-host connection count for key,
+// which may in turn give a different waiting goroutine permission to dial.
+func (t *Transport) decConnsPerHost(key connectMethodKey) {
+ if t.MaxConnsPerHost <= 0 {
+ return
+ }
+
+ t.connsPerHostMu.Lock()
+ defer t.connsPerHostMu.Unlock()
+ n := t.connsPerHost[key]
+ if n == 0 {
+ // Shouldn't happen, but if it does, the counting is buggy and could
+ // easily lead to a silent deadlock, so report the problem loudly.
+ panic("net/http: internal error: connCount underflow")
+ }
+
+ // Can we hand this count to a goroutine still waiting to dial?
+ // (Some goroutines on the wait list may have timed out or
+ // gotten a connection another way. If they're all gone,
+ // we don't want to kick off any spurious dial operations.)
+ if q := t.connsPerHostWait[key]; q.len() > 0 {
+ done := false
+ for q.len() > 0 {
+ w := q.popFront()
+ if w.waiting() {
+ go t.dialConnFor(w)
+ done = true
+ break
+ }
+ }
+ if q.len() == 0 {
+ delete(t.connsPerHostWait, key)
+ } else {
+ // q is a value (like a slice), so we have to store
+ // the updated q back into the map.
+ t.connsPerHostWait[key] = q
+ }
+ if done {
+ return
+ }
+ }
+
+ // Otherwise, decrement the recorded count.
+ if n--; n == 0 {
+ delete(t.connsPerHost, key)
+ } else {
+ t.connsPerHost[key] = n
+ }
+}
+
+// Add TLS to a persistent connection, i.e. negotiate a TLS session. If pconn is already a TLS
+// tunnel, this function establishes a nested TLS session inside the encrypted channel.
+// The remote endpoint's name may be overridden by TLSClientConfig.ServerName.
+func (pconn *persistConn) addTLS(ctx context.Context, name string, trace *httptrace.ClientTrace) error {
+ // Initiate TLS and check remote host name against certificate.
+ cfg := cloneTLSConfig(pconn.t.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = name
+ }
+ if pconn.cacheKey.onlyH1 {
+ cfg.NextProtos = nil
+ }
+ plainConn := pconn.conn
+ tlsConn := tls.Client(plainConn, cfg)
+ errc := make(chan error, 2)
+ var timer *time.Timer // for canceling TLS handshake
+ if d := pconn.t.TLSHandshakeTimeout; d != 0 {
+ timer = time.AfterFunc(d, func() {
+ errc <- tlsHandshakeTimeoutError{}
+ })
+ }
+ go func() {
+ if trace != nil && trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := tlsConn.HandshakeContext(ctx)
+ if timer != nil {
+ timer.Stop()
+ }
+ errc <- err
+ }()
+ if err := <-errc; err != nil {
+ plainConn.Close()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tls.ConnectionState{}, err)
+ }
+ return err
+ }
+ cs := tlsConn.ConnectionState()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(cs, nil)
+ }
+ pconn.tlsState = &cs
+ pconn.conn = tlsConn
+ return nil
+}
+
+type erringRoundTripper interface {
+ RoundTripErr() error
+}
+
+func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *persistConn, err error) {
+ pconn = &persistConn{
+ t: t,
+ cacheKey: cm.key(),
+ reqch: make(chan requestAndChan, 1),
+ writech: make(chan writeRequest, 1),
+ closech: make(chan struct{}),
+ writeErrCh: make(chan error, 1),
+ writeLoopDone: make(chan struct{}),
+ }
+ trace := httptrace.ContextClientTrace(ctx)
+ wrapErr := func(err error) error {
+ if cm.proxyURL != nil {
+ // Return a typed error, per Issue 16997
+ return &net.OpError{Op: "proxyconnect", Net: "tcp", Err: err}
+ }
+ return err
+ }
+ if cm.scheme() == "https" && t.hasCustomTLSDialer() {
+ var err error
+ pconn.conn, err = t.customDialTLS(ctx, "tcp", cm.addr())
+ if err != nil {
+ return nil, wrapErr(err)
+ }
+ if tc, ok := pconn.conn.(*tls.Conn); ok {
+ // Handshake here, in case DialTLS didn't. TLSNextProto below
+ // depends on it for knowing the connection state.
+ if trace != nil && trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ if err := tc.HandshakeContext(ctx); err != nil {
+ go pconn.conn.Close()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tls.ConnectionState{}, err)
+ }
+ return nil, err
+ }
+ cs := tc.ConnectionState()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(cs, nil)
+ }
+ pconn.tlsState = &cs
+ }
+ } else {
+ conn, err := t.dial(ctx, "tcp", cm.addr())
+ if err != nil {
+ return nil, wrapErr(err)
+ }
+ pconn.conn = conn
+ if cm.scheme() == "https" {
+ var firstTLSHost string
+ if firstTLSHost, _, err = net.SplitHostPort(cm.addr()); err != nil {
+ return nil, wrapErr(err)
+ }
+ if err = pconn.addTLS(ctx, firstTLSHost, trace); err != nil {
+ return nil, wrapErr(err)
+ }
+ }
+ }
+
+ // Proxy setup.
+ switch {
+ case cm.proxyURL == nil:
+ // Do nothing. Not using a proxy.
+ case cm.proxyURL.Scheme == "socks5":
+ conn := pconn.conn
+ d := socksNewDialer("tcp", conn.RemoteAddr().String())
+ if u := cm.proxyURL.User; u != nil {
+ auth := &socksUsernamePassword{
+ Username: u.Username(),
+ }
+ auth.Password, _ = u.Password()
+ d.AuthMethods = []socksAuthMethod{
+ socksAuthMethodNotRequired,
+ socksAuthMethodUsernamePassword,
+ }
+ d.Authenticate = auth.Authenticate
+ }
+ if _, err := d.DialWithConn(ctx, conn, "tcp", cm.targetAddr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ case cm.targetScheme == "http":
+ pconn.isProxy = true
+ if pa := cm.proxyAuth(); pa != "" {
+ pconn.mutateHeaderFunc = func(h Header) {
+ h.Set("Proxy-Authorization", pa)
+ }
+ }
+ case cm.targetScheme == "https":
+ conn := pconn.conn
+ var hdr Header
+ if t.GetProxyConnectHeader != nil {
+ var err error
+ hdr, err = t.GetProxyConnectHeader(ctx, cm.proxyURL, cm.targetAddr)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ } else {
+ hdr = t.ProxyConnectHeader
+ }
+ if hdr == nil {
+ hdr = make(Header)
+ }
+ if pa := cm.proxyAuth(); pa != "" {
+ hdr = hdr.Clone()
+ hdr.Set("Proxy-Authorization", pa)
+ }
+ connectReq := &Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: cm.targetAddr},
+ Host: cm.targetAddr,
+ Header: hdr,
+ }
+
+ // If there's no done channel (no deadline or cancellation
+ // from the caller possible), at least set some (long)
+ // timeout here. This will make sure we don't block forever
+ // and leak a goroutine if the connection stops replying
+ // after the TCP connect.
+ connectCtx := ctx
+ if ctx.Done() == nil {
+ newCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
+ defer cancel()
+ connectCtx = newCtx
+ }
+
+ didReadResponse := make(chan struct{}) // closed after CONNECT write+read is done or fails
+ var (
+ resp *Response
+ err error // write or read error
+ )
+ // Write the CONNECT request & read the response.
+ go func() {
+ defer close(didReadResponse)
+ err = connectReq.Write(conn)
+ if err != nil {
+ return
+ }
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err = ReadResponse(br, connectReq)
+ }()
+ select {
+ case <-connectCtx.Done():
+ conn.Close()
+ <-didReadResponse
+ return nil, connectCtx.Err()
+ case <-didReadResponse:
+ // resp or err now set
+ }
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ if resp.StatusCode != 200 {
+ _, text, ok := strings.Cut(resp.Status, " ")
+ conn.Close()
+ if !ok {
+ return nil, errors.New("unknown status code")
+ }
+ return nil, errors.New(text)
+ }
+ }
+
+ if cm.proxyURL != nil && cm.targetScheme == "https" {
+ if err := pconn.addTLS(ctx, cm.tlsHost(), trace); err != nil {
+ return nil, err
+ }
+ }
+
+ if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" {
+ if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok {
+ alt := next(cm.targetAddr, pconn.conn.(*tls.Conn))
+ if e, ok := alt.(erringRoundTripper); ok {
+ // pconn.conn was closed by next (http2configureTransports.upgradeFn).
+ return nil, e.RoundTripErr()
+ }
+ return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil
+ }
+ }
+
+ pconn.br = bufio.NewReaderSize(pconn, t.readBufferSize())
+ pconn.bw = bufio.NewWriterSize(persistConnWriter{pconn}, t.writeBufferSize())
+
+ go pconn.readLoop()
+ go pconn.writeLoop()
+ return pconn, nil
+}
+
+// persistConnWriter is the io.Writer written to by pc.bw.
+// It accumulates the number of bytes written to the underlying conn,
+// so the retry logic can determine whether any bytes made it across
+// the wire.
+// This is exactly 1 pointer field wide so it can go into an interface
+// without allocation.
+type persistConnWriter struct {
+ pc *persistConn
+}
+
+func (w persistConnWriter) Write(p []byte) (n int, err error) {
+ n, err = w.pc.conn.Write(p)
+ w.pc.nwrite += int64(n)
+ return
+}
+
+// ReadFrom exposes persistConnWriter's underlying Conn to io.Copy and if
+// the Conn implements io.ReaderFrom, it can take advantage of optimizations
+// such as sendfile.
+func (w persistConnWriter) ReadFrom(r io.Reader) (n int64, err error) {
+ n, err = io.Copy(w.pc.conn, r)
+ w.pc.nwrite += n
+ return
+}
+
+var _ io.ReaderFrom = (*persistConnWriter)(nil)
+
+// connectMethod is the map key (in its String form) for keeping persistent
+// TCP connections alive for subsequent HTTP requests.
+//
+// A connect method may be of the following types:
+//
+// connectMethod.key().String() Description
+// ------------------------------ -------------------------
+// |http|foo.com http directly to server, no proxy
+// |https|foo.com https directly to server, no proxy
+// |https,h1|foo.com https directly to server w/o HTTP/2, no proxy
+// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
+// http://proxy.com|http http to proxy, http to anywhere after that
+// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com
+// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com
+// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com
+// https://proxy.com|http https to proxy, http to anywhere after that
+type connectMethod struct {
+ _ incomparable
+ proxyURL *url.URL // nil for no proxy, else full proxy URL
+ targetScheme string // "http" or "https"
+ // If proxyURL specifies an http or https proxy, and targetScheme is http (not https),
+ // then targetAddr is not included in the connect method key, because the socket can
+ // be reused for different targetAddr values.
+ targetAddr string
+ onlyH1 bool // whether to disable HTTP/2 and force HTTP/1
+}
+
+func (cm *connectMethod) key() connectMethodKey {
+ proxyStr := ""
+ targetAddr := cm.targetAddr
+ if cm.proxyURL != nil {
+ proxyStr = cm.proxyURL.String()
+ if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" {
+ targetAddr = ""
+ }
+ }
+ return connectMethodKey{
+ proxy: proxyStr,
+ scheme: cm.targetScheme,
+ addr: targetAddr,
+ onlyH1: cm.onlyH1,
+ }
+}
+
+// scheme returns the first hop scheme: http, https, or socks5
+func (cm *connectMethod) scheme() string {
+ if cm.proxyURL != nil {
+ return cm.proxyURL.Scheme
+ }
+ return cm.targetScheme
+}
+
+// addr returns the first hop "host:port" to which we need to TCP connect.
+func (cm *connectMethod) addr() string {
+ if cm.proxyURL != nil {
+ return canonicalAddr(cm.proxyURL)
+ }
+ return cm.targetAddr
+}
+
+// tlsHost returns the host name to match against the peer's
+// TLS certificate.
+func (cm *connectMethod) tlsHost() string {
+ h := cm.targetAddr
+ if hasPort(h) {
+ h = h[:strings.LastIndex(h, ":")]
+ }
+ return h
+}
+
+// connectMethodKey is the map key version of connectMethod, with a
+// stringified proxy URL (or the empty string) instead of a pointer to
+// a URL.
+type connectMethodKey struct {
+ proxy, scheme, addr string
+ onlyH1 bool
+}
+
+func (k connectMethodKey) String() string {
+ // Only used by tests.
+ var h1 string
+ if k.onlyH1 {
+ h1 = ",h1"
+ }
+ return fmt.Sprintf("%s|%s%s|%s", k.proxy, k.scheme, h1, k.addr)
+}
+
+// persistConn wraps a connection, usually a persistent one
+// (but may be used for non-keep-alive requests as well)
+type persistConn struct {
+ // alt optionally specifies the TLS NextProto RoundTripper.
+ // This is used for HTTP/2 today and future protocols later.
+ // If it's non-nil, the rest of the fields are unused.
+ alt RoundTripper
+
+ t *Transport
+ cacheKey connectMethodKey
+ conn net.Conn
+ tlsState *tls.ConnectionState
+ br *bufio.Reader // from conn
+ bw *bufio.Writer // to conn
+ nwrite int64 // bytes written
+ reqch chan requestAndChan // written by roundTrip; read by readLoop
+ writech chan writeRequest // written by roundTrip; read by writeLoop
+ closech chan struct{} // closed when conn closed
+ isProxy bool
+ sawEOF bool // whether we've seen EOF from conn; owned by readLoop
+ readLimit int64 // bytes allowed to be read; owned by readLoop
+ // writeErrCh passes the request write error (usually nil)
+ // from the writeLoop goroutine to the readLoop which passes
+ // it off to the res.Body reader, which then uses it to decide
+ // whether or not a connection can be reused. Issue 7569.
+ writeErrCh chan error
+
+ writeLoopDone chan struct{} // closed when write loop ends
+
+ // Both guarded by Transport.idleMu:
+ idleAt time.Time // time it last become idle
+ idleTimer *time.Timer // holding an AfterFunc to close it
+
+ mu sync.Mutex // guards following fields
+ numExpectedResponses int
+ closed error // set non-nil when conn is closed, before closech is closed
+ canceledErr error // set non-nil if conn is canceled
+ broken bool // an error has happened on this connection; marked broken so it's not reused.
+ reused bool // whether conn has had successful request/response and is being reused.
+ // mutateHeaderFunc is an optional func to modify extra
+ // headers on each outbound request before it's written. (the
+ // original Request given to RoundTrip is not modified)
+ mutateHeaderFunc func(Header)
+}
+
+func (pc *persistConn) maxHeaderResponseSize() int64 {
+ if v := pc.t.MaxResponseHeaderBytes; v != 0 {
+ return v
+ }
+ return 10 << 20 // conservative default; same as http2
+}
+
+func (pc *persistConn) Read(p []byte) (n int, err error) {
+ if pc.readLimit <= 0 {
+ return 0, fmt.Errorf("read limit of %d bytes exhausted", pc.maxHeaderResponseSize())
+ }
+ if int64(len(p)) > pc.readLimit {
+ p = p[:pc.readLimit]
+ }
+ n, err = pc.conn.Read(p)
+ if err == io.EOF {
+ pc.sawEOF = true
+ }
+ pc.readLimit -= int64(n)
+ return
+}
+
+// isBroken reports whether this connection is in a known broken state.
+func (pc *persistConn) isBroken() bool {
+ pc.mu.Lock()
+ b := pc.closed != nil
+ pc.mu.Unlock()
+ return b
+}
+
+// canceled returns non-nil if the connection was closed due to
+// CancelRequest or due to context cancellation.
+func (pc *persistConn) canceled() error {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ return pc.canceledErr
+}
+
+// isReused reports whether this connection has been used before.
+func (pc *persistConn) isReused() bool {
+ pc.mu.Lock()
+ r := pc.reused
+ pc.mu.Unlock()
+ return r
+}
+
+func (pc *persistConn) gotIdleConnTrace(idleAt time.Time) (t httptrace.GotConnInfo) {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ t.Reused = pc.reused
+ t.Conn = pc.conn
+ t.WasIdle = true
+ if !idleAt.IsZero() {
+ t.IdleTime = time.Since(idleAt)
+ }
+ return
+}
+
+func (pc *persistConn) cancelRequest(err error) {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ pc.canceledErr = err
+ pc.closeLocked(errRequestCanceled)
+}
+
+// closeConnIfStillIdle closes the connection if it's still sitting idle.
+// This is what's called by the persistConn's idleTimer, and is run in its
+// own goroutine.
+func (pc *persistConn) closeConnIfStillIdle() {
+ t := pc.t
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+ if _, ok := t.idleLRU.m[pc]; !ok {
+ // Not idle.
+ return
+ }
+ t.removeIdleConnLocked(pc)
+ pc.close(errIdleConnTimeout)
+}
+
+// mapRoundTripError returns the appropriate error value for
+// persistConn.roundTrip.
+//
+// The provided err is the first error that (*persistConn).roundTrip
+// happened to receive from its select statement.
+//
+// The startBytesWritten value should be the value of pc.nwrite before the roundTrip
+// started writing the request.
+func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error {
+ if err == nil {
+ return nil
+ }
+
+ // Wait for the writeLoop goroutine to terminate to avoid data
+ // races on callers who mutate the request on failure.
+ //
+ // When resc in pc.roundTrip and hence rc.ch receives a responseAndError
+ // with a non-nil error it implies that the persistConn is either closed
+ // or closing. Waiting on pc.writeLoopDone is hence safe as all callers
+ // close closech which in turn ensures writeLoop returns.
+ <-pc.writeLoopDone
+
+ // If the request was canceled, that's better than network
+ // failures that were likely the result of tearing down the
+ // connection.
+ if cerr := pc.canceled(); cerr != nil {
+ return cerr
+ }
+
+ // See if an error was set explicitly.
+ req.mu.Lock()
+ reqErr := req.err
+ req.mu.Unlock()
+ if reqErr != nil {
+ return reqErr
+ }
+
+ if err == errServerClosedIdle {
+ // Don't decorate
+ return err
+ }
+
+ if _, ok := err.(transportReadFromServerError); ok {
+ if pc.nwrite == startBytesWritten {
+ return nothingWrittenError{err}
+ }
+ // Don't decorate
+ return err
+ }
+ if pc.isBroken() {
+ if pc.nwrite == startBytesWritten {
+ return nothingWrittenError{err}
+ }
+ return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", err)
+ }
+ return err
+}
+
+// errCallerOwnsConn is an internal sentinel error used when we hand
+// off a writable response.Body to the caller. We use this to prevent
+// closing a net.Conn that is now owned by the caller.
+var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn")
+
+func (pc *persistConn) readLoop() {
+ closeErr := errReadLoopExiting // default value, if not changed below
+ defer func() {
+ pc.close(closeErr)
+ pc.t.removeIdleConn(pc)
+ }()
+
+ tryPutIdleConn := func(trace *httptrace.ClientTrace) bool {
+ if err := pc.t.tryPutIdleConn(pc); err != nil {
+ closeErr = err
+ if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled {
+ trace.PutIdleConn(err)
+ }
+ return false
+ }
+ if trace != nil && trace.PutIdleConn != nil {
+ trace.PutIdleConn(nil)
+ }
+ return true
+ }
+
+ // eofc is used to block caller goroutines reading from Response.Body
+ // at EOF until this goroutines has (potentially) added the connection
+ // back to the idle pool.
+ eofc := make(chan struct{})
+ defer close(eofc) // unblock reader on errors
+
+ // Read this once, before loop starts. (to avoid races in tests)
+ testHookMu.Lock()
+ testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead
+ testHookMu.Unlock()
+
+ alive := true
+ for alive {
+ pc.readLimit = pc.maxHeaderResponseSize()
+ _, err := pc.br.Peek(1)
+
+ pc.mu.Lock()
+ if pc.numExpectedResponses == 0 {
+ pc.readLoopPeekFailLocked(err)
+ pc.mu.Unlock()
+ return
+ }
+ pc.mu.Unlock()
+
+ rc := <-pc.reqch
+ trace := httptrace.ContextClientTrace(rc.req.Context())
+
+ var resp *Response
+ if err == nil {
+ resp, err = pc.readResponse(rc, trace)
+ } else {
+ err = transportReadFromServerError{err}
+ closeErr = err
+ }
+
+ if err != nil {
+ if pc.readLimit <= 0 {
+ err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize())
+ }
+
+ select {
+ case rc.ch <- responseAndError{err: err}:
+ case <-rc.callerGone:
+ return
+ }
+ return
+ }
+ pc.readLimit = maxInt64 // effectively no limit for response bodies
+
+ pc.mu.Lock()
+ pc.numExpectedResponses--
+ pc.mu.Unlock()
+
+ bodyWritable := resp.bodyIsWritable()
+ hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
+
+ if resp.Close || rc.req.Close || resp.StatusCode <= 199 || bodyWritable {
+ // Don't do keep-alive on error if either party requested a close
+ // or we get an unexpected informational (1xx) response.
+ // StatusCode 100 is already handled above.
+ alive = false
+ }
+
+ if !hasBody || bodyWritable {
+ replaced := pc.t.replaceReqCanceler(rc.cancelKey, nil)
+
+ // Put the idle conn back into the pool before we send the response
+ // so if they process it quickly and make another request, they'll
+ // get this same conn. But we use the unbuffered channel 'rc'
+ // to guarantee that persistConn.roundTrip got out of its select
+ // potentially waiting for this persistConn to close.
+ alive = alive &&
+ !pc.sawEOF &&
+ pc.wroteRequest() &&
+ replaced && tryPutIdleConn(trace)
+
+ if bodyWritable {
+ closeErr = errCallerOwnsConn
+ }
+
+ select {
+ case rc.ch <- responseAndError{res: resp}:
+ case <-rc.callerGone:
+ return
+ }
+
+ // Now that they've read from the unbuffered channel, they're safely
+ // out of the select that also waits on this goroutine to die, so
+ // we're allowed to exit now if needed (if alive is false)
+ testHookReadLoopBeforeNextRead()
+ continue
+ }
+
+ waitForBodyRead := make(chan bool, 2)
+ body := &bodyEOFSignal{
+ body: resp.Body,
+ earlyCloseFn: func() error {
+ waitForBodyRead <- false
+ <-eofc // will be closed by deferred call at the end of the function
+ return nil
+
+ },
+ fn: func(err error) error {
+ isEOF := err == io.EOF
+ waitForBodyRead <- isEOF
+ if isEOF {
+ <-eofc // see comment above eofc declaration
+ } else if err != nil {
+ if cerr := pc.canceled(); cerr != nil {
+ return cerr
+ }
+ }
+ return err
+ },
+ }
+
+ resp.Body = body
+ if rc.addedGzip && ascii.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") {
+ resp.Body = &gzipReader{body: body}
+ resp.Header.Del("Content-Encoding")
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+ resp.Uncompressed = true
+ }
+
+ select {
+ case rc.ch <- responseAndError{res: resp}:
+ case <-rc.callerGone:
+ return
+ }
+
+ // Before looping back to the top of this function and peeking on
+ // the bufio.Reader, wait for the caller goroutine to finish
+ // reading the response body. (or for cancellation or death)
+ select {
+ case bodyEOF := <-waitForBodyRead:
+ replaced := pc.t.replaceReqCanceler(rc.cancelKey, nil) // before pc might return to idle pool
+ alive = alive &&
+ bodyEOF &&
+ !pc.sawEOF &&
+ pc.wroteRequest() &&
+ replaced && tryPutIdleConn(trace)
+ if bodyEOF {
+ eofc <- struct{}{}
+ }
+ case <-rc.req.Cancel:
+ alive = false
+ pc.t.CancelRequest(rc.req)
+ case <-rc.req.Context().Done():
+ alive = false
+ pc.t.cancelRequest(rc.cancelKey, rc.req.Context().Err())
+ case <-pc.closech:
+ alive = false
+ }
+
+ testHookReadLoopBeforeNextRead()
+ }
+}
+
+func (pc *persistConn) readLoopPeekFailLocked(peekErr error) {
+ if pc.closed != nil {
+ return
+ }
+ if n := pc.br.Buffered(); n > 0 {
+ buf, _ := pc.br.Peek(n)
+ if is408Message(buf) {
+ pc.closeLocked(errServerClosedIdle)
+ return
+ } else {
+ log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", buf, peekErr)
+ }
+ }
+ if peekErr == io.EOF {
+ // common case.
+ pc.closeLocked(errServerClosedIdle)
+ } else {
+ pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %v", peekErr))
+ }
+}
+
+// is408Message reports whether buf has the prefix of an
+// HTTP 408 Request Timeout response.
+// See golang.org/issue/32310.
+func is408Message(buf []byte) bool {
+ if len(buf) < len("HTTP/1.x 408") {
+ return false
+ }
+ if string(buf[:7]) != "HTTP/1." {
+ return false
+ }
+ return string(buf[8:12]) == " 408"
+}
+
+// readResponse reads an HTTP response (or two, in the case of "Expect:
+// 100-continue") from the server. It returns the final non-100 one.
+// trace is optional.
+func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTrace) (resp *Response, err error) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := pc.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+ num1xx := 0 // number of informational 1xx headers received
+ const max1xxResponses = 5 // arbitrary bound on number of informational responses
+
+ continueCh := rc.continueCh
+ for {
+ resp, err = ReadResponse(pc.br, rc.req)
+ if err != nil {
+ return
+ }
+ resCode := resp.StatusCode
+ if continueCh != nil {
+ if resCode == 100 {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+ continueCh <- struct{}{}
+ continueCh = nil
+ } else if resCode >= 200 {
+ close(continueCh)
+ continueCh = nil
+ }
+ }
+ is1xx := 100 <= resCode && resCode <= 199
+ // treat 101 as a terminal status, see issue 26161
+ is1xxNonTerminal := is1xx && resCode != StatusSwitchingProtocols
+ if is1xxNonTerminal {
+ num1xx++
+ if num1xx > max1xxResponses {
+ return nil, errors.New("net/http: too many 1xx informational responses")
+ }
+ pc.readLimit = pc.maxHeaderResponseSize() // reset the limit
+ if trace != nil && trace.Got1xxResponse != nil {
+ if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(resp.Header)); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ break
+ }
+ if resp.isProtocolSwitch() {
+ resp.Body = newReadWriteCloserBody(pc.br, pc.conn)
+ }
+
+ resp.TLS = pc.tlsState
+ return
+}
+
+// waitForContinue returns the function to block until
+// any response, timeout or connection close. After any of them,
+// the function returns a bool which indicates if the body should be sent.
+func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool {
+ if continueCh == nil {
+ return nil
+ }
+ return func() bool {
+ timer := time.NewTimer(pc.t.ExpectContinueTimeout)
+ defer timer.Stop()
+
+ select {
+ case _, ok := <-continueCh:
+ return ok
+ case <-timer.C:
+ return true
+ case <-pc.closech:
+ return false
+ }
+ }
+}
+
+func newReadWriteCloserBody(br *bufio.Reader, rwc io.ReadWriteCloser) io.ReadWriteCloser {
+ body := &readWriteCloserBody{ReadWriteCloser: rwc}
+ if br.Buffered() != 0 {
+ body.br = br
+ }
+ return body
+}
+
+// readWriteCloserBody is the Response.Body type used when we want to
+// give users write access to the Body through the underlying
+// connection (TCP, unless using custom dialers). This is then
+// the concrete type for a Response.Body on the 101 Switching
+// Protocols response, as used by WebSockets, h2c, etc.
+type readWriteCloserBody struct {
+ _ incomparable
+ br *bufio.Reader // used until empty
+ io.ReadWriteCloser
+}
+
+func (b *readWriteCloserBody) Read(p []byte) (n int, err error) {
+ if b.br != nil {
+ if n := b.br.Buffered(); len(p) > n {
+ p = p[:n]
+ }
+ n, err = b.br.Read(p)
+ if b.br.Buffered() == 0 {
+ b.br = nil
+ }
+ return n, err
+ }
+ return b.ReadWriteCloser.Read(p)
+}
+
+// nothingWrittenError wraps a write errors which ended up writing zero bytes.
+type nothingWrittenError struct {
+ error
+}
+
+func (pc *persistConn) writeLoop() {
+ defer close(pc.writeLoopDone)
+ for {
+ select {
+ case wr := <-pc.writech:
+ startBytesWritten := pc.nwrite
+ err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh))
+ if bre, ok := err.(requestBodyReadError); ok {
+ err = bre.error
+ // Errors reading from the user's
+ // Request.Body are high priority.
+ // Set it here before sending on the
+ // channels below or calling
+ // pc.close() which tears down
+ // connections and causes other
+ // errors.
+ wr.req.setError(err)
+ }
+ if err == nil {
+ err = pc.bw.Flush()
+ }
+ if err != nil {
+ if pc.nwrite == startBytesWritten {
+ err = nothingWrittenError{err}
+ }
+ }
+ pc.writeErrCh <- err // to the body reader, which might recycle us
+ wr.ch <- err // to the roundTrip function
+ if err != nil {
+ pc.close(err)
+ return
+ }
+ case <-pc.closech:
+ return
+ }
+ }
+}
+
+// maxWriteWaitBeforeConnReuse is how long the a Transport RoundTrip
+// will wait to see the Request's Body.Write result after getting a
+// response from the server. See comments in (*persistConn).wroteRequest.
+const maxWriteWaitBeforeConnReuse = 50 * time.Millisecond
+
+// wroteRequest is a check before recycling a connection that the previous write
+// (from writeLoop above) happened and was successful.
+func (pc *persistConn) wroteRequest() bool {
+ select {
+ case err := <-pc.writeErrCh:
+ // Common case: the write happened well before the response, so
+ // avoid creating a timer.
+ return err == nil
+ default:
+ // Rare case: the request was written in writeLoop above but
+ // before it could send to pc.writeErrCh, the reader read it
+ // all, processed it, and called us here. In this case, give the
+ // write goroutine a bit of time to finish its send.
+ //
+ // Less rare case: We also get here in the legitimate case of
+ // Issue 7569, where the writer is still writing (or stalled),
+ // but the server has already replied. In this case, we don't
+ // want to wait too long, and we want to return false so this
+ // connection isn't re-used.
+ t := time.NewTimer(maxWriteWaitBeforeConnReuse)
+ defer t.Stop()
+ select {
+ case err := <-pc.writeErrCh:
+ return err == nil
+ case <-t.C:
+ return false
+ }
+ }
+}
+
+// responseAndError is how the goroutine reading from an HTTP/1 server
+// communicates with the goroutine doing the RoundTrip.
+type responseAndError struct {
+ _ incomparable
+ res *Response // else use this response (see res method)
+ err error
+}
+
+type requestAndChan struct {
+ _ incomparable
+ req *Request
+ cancelKey cancelKey
+ ch chan responseAndError // unbuffered; always send in select on callerGone
+
+ // whether the Transport (as opposed to the user client code)
+ // added the Accept-Encoding gzip header. If the Transport
+ // set it, only then do we transparently decode the gzip.
+ addedGzip bool
+
+ // Optional blocking chan for Expect: 100-continue (for send).
+ // If the request has an "Expect: 100-continue" header and
+ // the server responds 100 Continue, readLoop send a value
+ // to writeLoop via this chan.
+ continueCh chan<- struct{}
+
+ callerGone <-chan struct{} // closed when roundTrip caller has returned
+}
+
+// A writeRequest is sent by the caller's goroutine to the
+// writeLoop's goroutine to write a request while the read loop
+// concurrently waits on both the write response and the server's
+// reply.
+type writeRequest struct {
+ req *transportRequest
+ ch chan<- error
+
+ // Optional blocking chan for Expect: 100-continue (for receive).
+ // If not nil, writeLoop blocks sending request body until
+ // it receives from this chan.
+ continueCh <-chan struct{}
+}
+
+type httpError struct {
+ err string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.err }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
+
+// errRequestCanceled is set to be identical to the one from h2 to facilitate
+// testing.
+var errRequestCanceled = http2errRequestCanceled
+var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify?
+
+func nop() {}
+
+// testHooks. Always non-nil.
+var (
+ testHookEnterRoundTrip = nop
+ testHookWaitResLoop = nop
+ testHookRoundTripRetried = nop
+ testHookPrePendingDial = nop
+ testHookPostPendingDial = nop
+
+ testHookMu sync.Locker = fakeLocker{} // guards following
+ testHookReadLoopBeforeNextRead = nop
+)
+
+func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
+ testHookEnterRoundTrip()
+ if !pc.t.replaceReqCanceler(req.cancelKey, pc.cancelRequest) {
+ pc.t.putOrCloseIdleConn(pc)
+ return nil, errRequestCanceled
+ }
+ pc.mu.Lock()
+ pc.numExpectedResponses++
+ headerFn := pc.mutateHeaderFunc
+ pc.mu.Unlock()
+
+ if headerFn != nil {
+ headerFn(req.extraHeaders())
+ }
+
+ // Ask for a compressed version if the caller didn't set their
+ // own value for Accept-Encoding. We only attempt to
+ // uncompress the gzip stream if we were the layer that
+ // requested it.
+ requestedGzip := false
+ if !pc.t.DisableCompression &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // https://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ req.extraHeaders().Set("Accept-Encoding", "gzip")
+ }
+
+ var continueCh chan struct{}
+ if req.ProtoAtLeast(1, 1) && req.Body != nil && req.expectsContinue() {
+ continueCh = make(chan struct{}, 1)
+ }
+
+ if pc.t.DisableKeepAlives &&
+ !req.wantsClose() &&
+ !isProtocolSwitchHeader(req.Header) {
+ req.extraHeaders().Set("Connection", "close")
+ }
+
+ gone := make(chan struct{})
+ defer close(gone)
+
+ defer func() {
+ if err != nil {
+ pc.t.setReqCanceler(req.cancelKey, nil)
+ }
+ }()
+
+ const debugRoundTrip = false
+
+ // Write the request concurrently with waiting for a response,
+ // in case the server decides to reply before reading our full
+ // request body.
+ startBytesWritten := pc.nwrite
+ writeErrCh := make(chan error, 1)
+ pc.writech <- writeRequest{req, writeErrCh, continueCh}
+
+ resc := make(chan responseAndError)
+ pc.reqch <- requestAndChan{
+ req: req.Request,
+ cancelKey: req.cancelKey,
+ ch: resc,
+ addedGzip: requestedGzip,
+ continueCh: continueCh,
+ callerGone: gone,
+ }
+
+ var respHeaderTimer <-chan time.Time
+ cancelChan := req.Request.Cancel
+ ctxDoneChan := req.Context().Done()
+ pcClosed := pc.closech
+ canceled := false
+ for {
+ testHookWaitResLoop()
+ select {
+ case err := <-writeErrCh:
+ if debugRoundTrip {
+ req.logf("writeErrCh resv: %T/%#v", err, err)
+ }
+ if err != nil {
+ pc.close(fmt.Errorf("write error: %v", err))
+ return nil, pc.mapRoundTripError(req, startBytesWritten, err)
+ }
+ if d := pc.t.ResponseHeaderTimeout; d > 0 {
+ if debugRoundTrip {
+ req.logf("starting timer for %v", d)
+ }
+ timer := time.NewTimer(d)
+ defer timer.Stop() // prevent leaks
+ respHeaderTimer = timer.C
+ }
+ case <-pcClosed:
+ pcClosed = nil
+ if canceled || pc.t.replaceReqCanceler(req.cancelKey, nil) {
+ if debugRoundTrip {
+ req.logf("closech recv: %T %#v", pc.closed, pc.closed)
+ }
+ return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed)
+ }
+ case <-respHeaderTimer:
+ if debugRoundTrip {
+ req.logf("timeout waiting for response headers.")
+ }
+ pc.close(errTimeout)
+ return nil, errTimeout
+ case re := <-resc:
+ if (re.res == nil) == (re.err == nil) {
+ panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil))
+ }
+ if debugRoundTrip {
+ req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err)
+ }
+ if re.err != nil {
+ return nil, pc.mapRoundTripError(req, startBytesWritten, re.err)
+ }
+ return re.res, nil
+ case <-cancelChan:
+ canceled = pc.t.cancelRequest(req.cancelKey, errRequestCanceled)
+ cancelChan = nil
+ case <-ctxDoneChan:
+ canceled = pc.t.cancelRequest(req.cancelKey, req.Context().Err())
+ cancelChan = nil
+ ctxDoneChan = nil
+ }
+ }
+}
+
+// tLogKey is a context WithValue key for test debugging contexts containing
+// a t.Logf func. See export_test.go's Request.WithT method.
+type tLogKey struct{}
+
+func (tr *transportRequest) logf(format string, args ...any) {
+ if logf, ok := tr.Request.Context().Value(tLogKey{}).(func(string, ...any)); ok {
+ logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...)
+ }
+}
+
+// markReused marks this connection as having been successfully used for a
+// request and response.
+func (pc *persistConn) markReused() {
+ pc.mu.Lock()
+ pc.reused = true
+ pc.mu.Unlock()
+}
+
+// close closes the underlying TCP connection and closes
+// the pc.closech channel.
+//
+// The provided err is only for testing and debugging; in normal
+// circumstances it should never be seen by users.
+func (pc *persistConn) close(err error) {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ pc.closeLocked(err)
+}
+
+func (pc *persistConn) closeLocked(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ pc.broken = true
+ if pc.closed == nil {
+ pc.closed = err
+ pc.t.decConnsPerHost(pc.cacheKey)
+ // Close HTTP/1 (pc.alt == nil) connection.
+ // HTTP/2 closes its connection itself.
+ if pc.alt == nil {
+ if err != errCallerOwnsConn {
+ pc.conn.Close()
+ }
+ close(pc.closech)
+ }
+ }
+ pc.mutateHeaderFunc = nil
+}
+
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+ "socks5": "1080",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func canonicalAddr(url *url.URL) string {
+ addr := url.Hostname()
+ if v, err := idnaASCII(addr); err == nil {
+ addr = v
+ }
+ port := url.Port()
+ if port == "" {
+ port = portMap[url.Scheme]
+ }
+ return net.JoinHostPort(addr, port)
+}
+
+// bodyEOFSignal is used by the HTTP/1 transport when reading response
+// bodies to make sure we see the end of a response body before
+// proceeding and reading on the connection again.
+//
+// It wraps a ReadCloser but runs fn (if non-nil) at most
+// once, right before its final (error-producing) Read or Close call
+// returns. fn should return the new error to return from Read or Close.
+//
+// If earlyCloseFn is non-nil and Close is called before io.EOF is
+// seen, earlyCloseFn is called instead of fn, and its return value is
+// the return value from Close.
+type bodyEOFSignal struct {
+ body io.ReadCloser
+ mu sync.Mutex // guards following 4 fields
+ closed bool // whether Close has been called
+ rerr error // sticky Read error
+ fn func(error) error // err will be nil on Read io.EOF
+ earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
+}
+
+var errReadOnClosedResBody = errors.New("http: read on closed response body")
+
+func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
+ es.mu.Lock()
+ closed, rerr := es.closed, es.rerr
+ es.mu.Unlock()
+ if closed {
+ return 0, errReadOnClosedResBody
+ }
+ if rerr != nil {
+ return 0, rerr
+ }
+
+ n, err = es.body.Read(p)
+ if err != nil {
+ es.mu.Lock()
+ defer es.mu.Unlock()
+ if es.rerr == nil {
+ es.rerr = err
+ }
+ err = es.condfn(err)
+ }
+ return
+}
+
+func (es *bodyEOFSignal) Close() error {
+ es.mu.Lock()
+ defer es.mu.Unlock()
+ if es.closed {
+ return nil
+ }
+ es.closed = true
+ if es.earlyCloseFn != nil && es.rerr != io.EOF {
+ return es.earlyCloseFn()
+ }
+ err := es.body.Close()
+ return es.condfn(err)
+}
+
+// caller must hold es.mu.
+func (es *bodyEOFSignal) condfn(err error) error {
+ if es.fn == nil {
+ return err
+ }
+ err = es.fn(err)
+ es.fn = nil
+ return err
+}
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ _ incomparable
+ body *bodyEOFSignal // underlying HTTP/1 response body framing
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // any error from gzip.NewReader; sticky
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zr == nil {
+ if gz.zerr == nil {
+ gz.zr, gz.zerr = gzip.NewReader(gz.body)
+ }
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ }
+
+ gz.body.mu.Lock()
+ if gz.body.closed {
+ err = errReadOnClosedResBody
+ }
+ gz.body.mu.Unlock()
+
+ if err != nil {
+ return 0, err
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type tlsHandshakeTimeoutError struct{}
+
+func (tlsHandshakeTimeoutError) Timeout() bool { return true }
+func (tlsHandshakeTimeoutError) Temporary() bool { return true }
+func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
+
+// fakeLocker is a sync.Locker which does nothing. It's used to guard
+// test-only fields when not under test, to avoid runtime atomic
+// overhead.
+type fakeLocker struct{}
+
+func (fakeLocker) Lock() {}
+func (fakeLocker) Unlock() {}
+
+// cloneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if
+// cfg is nil. This is safe to call even if cfg is in active use by a TLS
+// client or server.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
+
+type connLRU struct {
+ ll *list.List // list.Element.Value type is of *persistConn
+ m map[*persistConn]*list.Element
+}
+
+// add adds pc to the head of the linked list.
+func (cl *connLRU) add(pc *persistConn) {
+ if cl.ll == nil {
+ cl.ll = list.New()
+ cl.m = make(map[*persistConn]*list.Element)
+ }
+ ele := cl.ll.PushFront(pc)
+ if _, ok := cl.m[pc]; ok {
+ panic("persistConn was already in LRU")
+ }
+ cl.m[pc] = ele
+}
+
+func (cl *connLRU) removeOldest() *persistConn {
+ ele := cl.ll.Back()
+ pc := ele.Value.(*persistConn)
+ cl.ll.Remove(ele)
+ delete(cl.m, pc)
+ return pc
+}
+
+// remove removes pc from cl.
+func (cl *connLRU) remove(pc *persistConn) {
+ if ele, ok := cl.m[pc]; ok {
+ cl.ll.Remove(ele)
+ delete(cl.m, pc)
+ }
+}
+
+// len returns the number of items in the cache.
+func (cl *connLRU) len() int {
+ return len(cl.m)
+}
diff --git a/contrib/go/_std_1.18/src/net/http/transport_default_other.go b/contrib/go/_std_1.19/src/net/http/transport_default_other.go
index 8a2f1cc42b..8a2f1cc42b 100644
--- a/contrib/go/_std_1.18/src/net/http/transport_default_other.go
+++ b/contrib/go/_std_1.19/src/net/http/transport_default_other.go
diff --git a/contrib/go/_std_1.18/src/net/interface.go b/contrib/go/_std_1.19/src/net/interface.go
index 0e5d3202c9..0e5d3202c9 100644
--- a/contrib/go/_std_1.18/src/net/interface.go
+++ b/contrib/go/_std_1.19/src/net/interface.go
diff --git a/contrib/go/_std_1.18/src/net/interface_bsd.go b/contrib/go/_std_1.19/src/net/interface_bsd.go
index db7bc756d8..db7bc756d8 100644
--- a/contrib/go/_std_1.18/src/net/interface_bsd.go
+++ b/contrib/go/_std_1.19/src/net/interface_bsd.go
diff --git a/contrib/go/_std_1.18/src/net/interface_darwin.go b/contrib/go/_std_1.19/src/net/interface_darwin.go
index bb4fd73a98..bb4fd73a98 100644
--- a/contrib/go/_std_1.18/src/net/interface_darwin.go
+++ b/contrib/go/_std_1.19/src/net/interface_darwin.go
diff --git a/contrib/go/_std_1.18/src/net/interface_linux.go b/contrib/go/_std_1.19/src/net/interface_linux.go
index 441ab2f880..441ab2f880 100644
--- a/contrib/go/_std_1.18/src/net/interface_linux.go
+++ b/contrib/go/_std_1.19/src/net/interface_linux.go
diff --git a/contrib/go/_std_1.18/src/net/ip.go b/contrib/go/_std_1.19/src/net/ip.go
index 54c52881cf..54c52881cf 100644
--- a/contrib/go/_std_1.18/src/net/ip.go
+++ b/contrib/go/_std_1.19/src/net/ip.go
diff --git a/contrib/go/_std_1.18/src/net/iprawsock.go b/contrib/go/_std_1.19/src/net/iprawsock.go
index f18331a1fd..f18331a1fd 100644
--- a/contrib/go/_std_1.18/src/net/iprawsock.go
+++ b/contrib/go/_std_1.19/src/net/iprawsock.go
diff --git a/contrib/go/_std_1.19/src/net/iprawsock_posix.go b/contrib/go/_std_1.19/src/net/iprawsock_posix.go
new file mode 100644
index 0000000000..64112b08dd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/iprawsock_posix.go
@@ -0,0 +1,147 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "context"
+ "syscall"
+)
+
+func sockaddrToIP(sa syscall.Sockaddr) Addr {
+ switch sa := sa.(type) {
+ case *syscall.SockaddrInet4:
+ return &IPAddr{IP: sa.Addr[0:]}
+ case *syscall.SockaddrInet6:
+ return &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
+ }
+ return nil
+}
+
+func (a *IPAddr) family() int {
+ if a == nil || len(a.IP) <= IPv4len {
+ return syscall.AF_INET
+ }
+ if a.IP.To4() != nil {
+ return syscall.AF_INET
+ }
+ return syscall.AF_INET6
+}
+
+func (a *IPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
+ if a == nil {
+ return nil, nil
+ }
+ return ipToSockaddr(family, a.IP, 0, a.Zone)
+}
+
+func (a *IPAddr) toLocal(net string) sockaddr {
+ return &IPAddr{loopbackIP(net), a.Zone}
+}
+
+func (c *IPConn) readFrom(b []byte) (int, *IPAddr, error) {
+ // TODO(cw,rsc): consider using readv if we know the family
+ // type to avoid the header trim/copy
+ var addr *IPAddr
+ n, sa, err := c.fd.readFrom(b)
+ switch sa := sa.(type) {
+ case *syscall.SockaddrInet4:
+ addr = &IPAddr{IP: sa.Addr[0:]}
+ n = stripIPv4Header(n, b)
+ case *syscall.SockaddrInet6:
+ addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
+ }
+ return n, addr, err
+}
+
+func stripIPv4Header(n int, b []byte) int {
+ if len(b) < 20 {
+ return n
+ }
+ l := int(b[0]&0x0f) << 2
+ if 20 > l || l > len(b) {
+ return n
+ }
+ if b[0]>>4 != 4 {
+ return n
+ }
+ copy(b, b[l:])
+ return n - l
+}
+
+func (c *IPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err error) {
+ var sa syscall.Sockaddr
+ n, oobn, flags, sa, err = c.fd.readMsg(b, oob, 0)
+ switch sa := sa.(type) {
+ case *syscall.SockaddrInet4:
+ addr = &IPAddr{IP: sa.Addr[0:]}
+ case *syscall.SockaddrInet6:
+ addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
+ }
+ return
+}
+
+func (c *IPConn) writeTo(b []byte, addr *IPAddr) (int, error) {
+ if c.fd.isConnected {
+ return 0, ErrWriteToConnected
+ }
+ if addr == nil {
+ return 0, errMissingAddress
+ }
+ sa, err := addr.sockaddr(c.fd.family)
+ if err != nil {
+ return 0, err
+ }
+ return c.fd.writeTo(b, sa)
+}
+
+func (c *IPConn) writeMsg(b, oob []byte, addr *IPAddr) (n, oobn int, err error) {
+ if c.fd.isConnected {
+ return 0, 0, ErrWriteToConnected
+ }
+ if addr == nil {
+ return 0, 0, errMissingAddress
+ }
+ sa, err := addr.sockaddr(c.fd.family)
+ if err != nil {
+ return 0, 0, err
+ }
+ return c.fd.writeMsg(b, oob, sa)
+}
+
+func (sd *sysDialer) dialIP(ctx context.Context, laddr, raddr *IPAddr) (*IPConn, error) {
+ network, proto, err := parseNetwork(ctx, sd.network, true)
+ if err != nil {
+ return nil, err
+ }
+ switch network {
+ case "ip", "ip4", "ip6":
+ default:
+ return nil, UnknownNetworkError(sd.network)
+ }
+ fd, err := internetSocket(ctx, network, laddr, raddr, syscall.SOCK_RAW, proto, "dial", sd.Dialer.Control)
+ if err != nil {
+ return nil, err
+ }
+ return newIPConn(fd), nil
+}
+
+func (sl *sysListener) listenIP(ctx context.Context, laddr *IPAddr) (*IPConn, error) {
+ network, proto, err := parseNetwork(ctx, sl.network, true)
+ if err != nil {
+ return nil, err
+ }
+ switch network {
+ case "ip", "ip4", "ip6":
+ default:
+ return nil, UnknownNetworkError(sl.network)
+ }
+ fd, err := internetSocket(ctx, network, laddr, nil, syscall.SOCK_RAW, proto, "listen", sl.ListenConfig.Control)
+ if err != nil {
+ return nil, err
+ }
+ return newIPConn(fd), nil
+}
diff --git a/contrib/go/_std_1.18/src/net/ipsock.go b/contrib/go/_std_1.19/src/net/ipsock.go
index 0f5da2577c..0f5da2577c 100644
--- a/contrib/go/_std_1.18/src/net/ipsock.go
+++ b/contrib/go/_std_1.19/src/net/ipsock.go
diff --git a/contrib/go/_std_1.19/src/net/ipsock_posix.go b/contrib/go/_std_1.19/src/net/ipsock_posix.go
new file mode 100644
index 0000000000..7bb66f2d6c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/ipsock_posix.go
@@ -0,0 +1,232 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "context"
+ "internal/poll"
+ "net/netip"
+ "runtime"
+ "syscall"
+)
+
+// probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication
+// capabilities which are controlled by the IPV6_V6ONLY socket option
+// and kernel configuration.
+//
+// Should we try to use the IPv4 socket interface if we're only
+// dealing with IPv4 sockets? As long as the host system understands
+// IPv4-mapped IPv6, it's okay to pass IPv4-mapped IPv6 addresses to
+// the IPv6 interface. That simplifies our code and is most
+// general. Unfortunately, we need to run on kernels built without
+// IPv6 support too. So probe the kernel to figure it out.
+func (p *ipStackCapabilities) probe() {
+ s, err := sysSocket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
+ switch err {
+ case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT:
+ case nil:
+ poll.CloseFunc(s)
+ p.ipv4Enabled = true
+ }
+ var probes = []struct {
+ laddr TCPAddr
+ value int
+ }{
+ // IPv6 communication capability
+ {laddr: TCPAddr{IP: ParseIP("::1")}, value: 1},
+ // IPv4-mapped IPv6 address communication capability
+ {laddr: TCPAddr{IP: IPv4(127, 0, 0, 1)}, value: 0},
+ }
+ switch runtime.GOOS {
+ case "dragonfly", "openbsd":
+ // The latest DragonFly BSD and OpenBSD kernels don't
+ // support IPV6_V6ONLY=0. They always return an error
+ // and we don't need to probe the capability.
+ probes = probes[:1]
+ }
+ for i := range probes {
+ s, err := sysSocket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
+ if err != nil {
+ continue
+ }
+ defer poll.CloseFunc(s)
+ syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value)
+ sa, err := probes[i].laddr.sockaddr(syscall.AF_INET6)
+ if err != nil {
+ continue
+ }
+ if err := syscall.Bind(s, sa); err != nil {
+ continue
+ }
+ if i == 0 {
+ p.ipv6Enabled = true
+ } else {
+ p.ipv4MappedIPv6Enabled = true
+ }
+ }
+}
+
+// favoriteAddrFamily returns the appropriate address family for the
+// given network, laddr, raddr and mode.
+//
+// If mode indicates "listen" and laddr is a wildcard, we assume that
+// the user wants to make a passive-open connection with a wildcard
+// address family, both AF_INET and AF_INET6, and a wildcard address
+// like the following:
+//
+// - A listen for a wildcard communication domain, "tcp" or
+// "udp", with a wildcard address: If the platform supports
+// both IPv6 and IPv4-mapped IPv6 communication capabilities,
+// or does not support IPv4, we use a dual stack, AF_INET6 and
+// IPV6_V6ONLY=0, wildcard address listen. The dual stack
+// wildcard address listen may fall back to an IPv6-only,
+// AF_INET6 and IPV6_V6ONLY=1, wildcard address listen.
+// Otherwise we prefer an IPv4-only, AF_INET, wildcard address
+// listen.
+//
+// - A listen for a wildcard communication domain, "tcp" or
+// "udp", with an IPv4 wildcard address: same as above.
+//
+// - A listen for a wildcard communication domain, "tcp" or
+// "udp", with an IPv6 wildcard address: same as above.
+//
+// - A listen for an IPv4 communication domain, "tcp4" or "udp4",
+// with an IPv4 wildcard address: We use an IPv4-only, AF_INET,
+// wildcard address listen.
+//
+// - A listen for an IPv6 communication domain, "tcp6" or "udp6",
+// with an IPv6 wildcard address: We use an IPv6-only, AF_INET6
+// and IPV6_V6ONLY=1, wildcard address listen.
+//
+// Otherwise guess: If the addresses are IPv4 then returns AF_INET,
+// or else returns AF_INET6. It also returns a boolean value what
+// designates IPV6_V6ONLY option.
+//
+// Note that the latest DragonFly BSD and OpenBSD kernels allow
+// neither "net.inet6.ip6.v6only=1" change nor IPPROTO_IPV6 level
+// IPV6_V6ONLY socket option setting.
+func favoriteAddrFamily(network string, laddr, raddr sockaddr, mode string) (family int, ipv6only bool) {
+ switch network[len(network)-1] {
+ case '4':
+ return syscall.AF_INET, false
+ case '6':
+ return syscall.AF_INET6, true
+ }
+
+ if mode == "listen" && (laddr == nil || laddr.isWildcard()) {
+ if supportsIPv4map() || !supportsIPv4() {
+ return syscall.AF_INET6, false
+ }
+ if laddr == nil {
+ return syscall.AF_INET, false
+ }
+ return laddr.family(), false
+ }
+
+ if (laddr == nil || laddr.family() == syscall.AF_INET) &&
+ (raddr == nil || raddr.family() == syscall.AF_INET) {
+ return syscall.AF_INET, false
+ }
+ return syscall.AF_INET6, false
+}
+
+func internetSocket(ctx context.Context, net string, laddr, raddr sockaddr, sotype, proto int, mode string, ctrlFn func(string, string, syscall.RawConn) error) (fd *netFD, err error) {
+ if (runtime.GOOS == "aix" || runtime.GOOS == "windows" || runtime.GOOS == "openbsd") && mode == "dial" && raddr.isWildcard() {
+ raddr = raddr.toLocal(net)
+ }
+ family, ipv6only := favoriteAddrFamily(net, laddr, raddr, mode)
+ return socket(ctx, net, family, sotype, proto, ipv6only, laddr, raddr, ctrlFn)
+}
+
+func ipToSockaddrInet4(ip IP, port int) (syscall.SockaddrInet4, error) {
+ if len(ip) == 0 {
+ ip = IPv4zero
+ }
+ ip4 := ip.To4()
+ if ip4 == nil {
+ return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: ip.String()}
+ }
+ sa := syscall.SockaddrInet4{Port: port}
+ copy(sa.Addr[:], ip4)
+ return sa, nil
+}
+
+func ipToSockaddrInet6(ip IP, port int, zone string) (syscall.SockaddrInet6, error) {
+ // In general, an IP wildcard address, which is either
+ // "0.0.0.0" or "::", means the entire IP addressing
+ // space. For some historical reason, it is used to
+ // specify "any available address" on some operations
+ // of IP node.
+ //
+ // When the IP node supports IPv4-mapped IPv6 address,
+ // we allow a listener to listen to the wildcard
+ // address of both IP addressing spaces by specifying
+ // IPv6 wildcard address.
+ if len(ip) == 0 || ip.Equal(IPv4zero) {
+ ip = IPv6zero
+ }
+ // We accept any IPv6 address including IPv4-mapped
+ // IPv6 address.
+ ip6 := ip.To16()
+ if ip6 == nil {
+ return syscall.SockaddrInet6{}, &AddrError{Err: "non-IPv6 address", Addr: ip.String()}
+ }
+ sa := syscall.SockaddrInet6{Port: port, ZoneId: uint32(zoneCache.index(zone))}
+ copy(sa.Addr[:], ip6)
+ return sa, nil
+}
+
+func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, error) {
+ switch family {
+ case syscall.AF_INET:
+ sa, err := ipToSockaddrInet4(ip, port)
+ if err != nil {
+ return nil, err
+ }
+ return &sa, nil
+ case syscall.AF_INET6:
+ sa, err := ipToSockaddrInet6(ip, port, zone)
+ if err != nil {
+ return nil, err
+ }
+ return &sa, nil
+ }
+ return nil, &AddrError{Err: "invalid address family", Addr: ip.String()}
+}
+
+func addrPortToSockaddrInet4(ap netip.AddrPort) (syscall.SockaddrInet4, error) {
+ // ipToSockaddrInet4 has special handling here for zero length slices.
+ // We do not, because netip has no concept of a generic zero IP address.
+ addr := ap.Addr()
+ if !addr.Is4() {
+ return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: addr.String()}
+ }
+ sa := syscall.SockaddrInet4{
+ Addr: addr.As4(),
+ Port: int(ap.Port()),
+ }
+ return sa, nil
+}
+
+func addrPortToSockaddrInet6(ap netip.AddrPort) (syscall.SockaddrInet6, error) {
+ // ipToSockaddrInet6 has special handling here for zero length slices.
+ // We do not, because netip has no concept of a generic zero IP address.
+ //
+ // addr is allowed to be an IPv4 address, because As16 will convert it
+ // to an IPv4-mapped IPv6 address.
+ // The error message is kept consistent with ipToSockaddrInet6.
+ addr := ap.Addr()
+ if !addr.IsValid() {
+ return syscall.SockaddrInet6{}, &AddrError{Err: "non-IPv6 address", Addr: addr.String()}
+ }
+ sa := syscall.SockaddrInet6{
+ Addr: addr.As16(),
+ Port: int(ap.Port()),
+ ZoneId: uint32(zoneCache.index(addr.Zone())),
+ }
+ return sa, nil
+}
diff --git a/contrib/go/_std_1.19/src/net/lookup.go b/contrib/go/_std_1.19/src/net/lookup.go
new file mode 100644
index 0000000000..7f3d20126c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/lookup.go
@@ -0,0 +1,893 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "context"
+ "internal/nettrace"
+ "internal/singleflight"
+ "net/netip"
+ "sync"
+
+ "golang.org/x/net/dns/dnsmessage"
+)
+
+// protocols contains minimal mappings between internet protocol
+// names and numbers for platforms that don't have a complete list of
+// protocol numbers.
+//
+// See https://www.iana.org/assignments/protocol-numbers
+//
+// On Unix, this map is augmented by readProtocols via lookupProtocol.
+var protocols = map[string]int{
+ "icmp": 1,
+ "igmp": 2,
+ "tcp": 6,
+ "udp": 17,
+ "ipv6-icmp": 58,
+}
+
+// services contains minimal mappings between services names and port
+// numbers for platforms that don't have a complete list of port numbers.
+//
+// See https://www.iana.org/assignments/service-names-port-numbers
+//
+// On Unix, this map is augmented by readServices via goLookupPort.
+var services = map[string]map[string]int{
+ "udp": {
+ "domain": 53,
+ },
+ "tcp": {
+ "ftp": 21,
+ "ftps": 990,
+ "gopher": 70, // ʕ◔ϖ◔ʔ
+ "http": 80,
+ "https": 443,
+ "imap2": 143,
+ "imap3": 220,
+ "imaps": 993,
+ "pop3": 110,
+ "pop3s": 995,
+ "smtp": 25,
+ "ssh": 22,
+ "telnet": 23,
+ },
+}
+
+// dnsWaitGroup can be used by tests to wait for all DNS goroutines to
+// complete. This avoids races on the test hooks.
+var dnsWaitGroup sync.WaitGroup
+
+const maxProtoLength = len("RSVP-E2E-IGNORE") + 10 // with room to grow
+
+func lookupProtocolMap(name string) (int, error) {
+ var lowerProtocol [maxProtoLength]byte
+ n := copy(lowerProtocol[:], name)
+ lowerASCIIBytes(lowerProtocol[:n])
+ proto, found := protocols[string(lowerProtocol[:n])]
+ if !found || n != len(name) {
+ return 0, &AddrError{Err: "unknown IP protocol specified", Addr: name}
+ }
+ return proto, nil
+}
+
+// maxPortBufSize is the longest reasonable name of a service
+// (non-numeric port).
+// Currently the longest known IANA-unregistered name is
+// "mobility-header", so we use that length, plus some slop in case
+// something longer is added in the future.
+const maxPortBufSize = len("mobility-header") + 10
+
+func lookupPortMap(network, service string) (port int, error error) {
+ switch network {
+ case "tcp4", "tcp6":
+ network = "tcp"
+ case "udp4", "udp6":
+ network = "udp"
+ }
+
+ if m, ok := services[network]; ok {
+ var lowerService [maxPortBufSize]byte
+ n := copy(lowerService[:], service)
+ lowerASCIIBytes(lowerService[:n])
+ if port, ok := m[string(lowerService[:n])]; ok && n == len(service) {
+ return port, nil
+ }
+ }
+ return 0, &AddrError{Err: "unknown port", Addr: network + "/" + service}
+}
+
+// ipVersion returns the provided network's IP version: '4', '6' or 0
+// if network does not end in a '4' or '6' byte.
+func ipVersion(network string) byte {
+ if network == "" {
+ return 0
+ }
+ n := network[len(network)-1]
+ if n != '4' && n != '6' {
+ n = 0
+ }
+ return n
+}
+
+// DefaultResolver is the resolver used by the package-level Lookup
+// functions and by Dialers without a specified Resolver.
+var DefaultResolver = &Resolver{}
+
+// A Resolver looks up names and numbers.
+//
+// A nil *Resolver is equivalent to a zero Resolver.
+type Resolver struct {
+ // PreferGo controls whether Go's built-in DNS resolver is preferred
+ // on platforms where it's available. It is equivalent to setting
+ // GODEBUG=netdns=go, but scoped to just this resolver.
+ PreferGo bool
+
+ // StrictErrors controls the behavior of temporary errors
+ // (including timeout, socket errors, and SERVFAIL) when using
+ // Go's built-in resolver. For a query composed of multiple
+ // sub-queries (such as an A+AAAA address lookup, or walking the
+ // DNS search list), this option causes such errors to abort the
+ // whole query instead of returning a partial result. This is
+ // not enabled by default because it may affect compatibility
+ // with resolvers that process AAAA queries incorrectly.
+ StrictErrors bool
+
+ // Dial optionally specifies an alternate dialer for use by
+ // Go's built-in DNS resolver to make TCP and UDP connections
+ // to DNS services. The host in the address parameter will
+ // always be a literal IP address and not a host name, and the
+ // port in the address parameter will be a literal port number
+ // and not a service name.
+ // If the Conn returned is also a PacketConn, sent and received DNS
+ // messages must adhere to RFC 1035 section 4.2.1, "UDP usage".
+ // Otherwise, DNS messages transmitted over Conn must adhere
+ // to RFC 7766 section 5, "Transport Protocol Selection".
+ // If nil, the default dialer is used.
+ Dial func(ctx context.Context, network, address string) (Conn, error)
+
+ // lookupGroup merges LookupIPAddr calls together for lookups for the same
+ // host. The lookupGroup key is the LookupIPAddr.host argument.
+ // The return values are ([]IPAddr, error).
+ lookupGroup singleflight.Group
+
+ // TODO(bradfitz): optional interface impl override hook
+ // TODO(bradfitz): Timeout time.Duration?
+}
+
+func (r *Resolver) preferGo() bool { return r != nil && r.PreferGo }
+func (r *Resolver) strictErrors() bool { return r != nil && r.StrictErrors }
+
+func (r *Resolver) getLookupGroup() *singleflight.Group {
+ if r == nil {
+ return &DefaultResolver.lookupGroup
+ }
+ return &r.lookupGroup
+}
+
+// LookupHost looks up the given host using the local resolver.
+// It returns a slice of that host's addresses.
+//
+// LookupHost uses context.Background internally; to specify the context, use
+// Resolver.LookupHost.
+func LookupHost(host string) (addrs []string, err error) {
+ return DefaultResolver.LookupHost(context.Background(), host)
+}
+
+// LookupHost looks up the given host using the local resolver.
+// It returns a slice of that host's addresses.
+func (r *Resolver) LookupHost(ctx context.Context, host string) (addrs []string, err error) {
+ // Make sure that no matter what we do later, host=="" is rejected.
+ // parseIP, for example, does accept empty strings.
+ if host == "" {
+ return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true}
+ }
+ if ip, _ := parseIPZone(host); ip != nil {
+ return []string{host}, nil
+ }
+ return r.lookupHost(ctx, host)
+}
+
+// LookupIP looks up host using the local resolver.
+// It returns a slice of that host's IPv4 and IPv6 addresses.
+func LookupIP(host string) ([]IP, error) {
+ addrs, err := DefaultResolver.LookupIPAddr(context.Background(), host)
+ if err != nil {
+ return nil, err
+ }
+ ips := make([]IP, len(addrs))
+ for i, ia := range addrs {
+ ips[i] = ia.IP
+ }
+ return ips, nil
+}
+
+// LookupIPAddr looks up host using the local resolver.
+// It returns a slice of that host's IPv4 and IPv6 addresses.
+func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, error) {
+ return r.lookupIPAddr(ctx, "ip", host)
+}
+
+// LookupIP looks up host for the given network using the local resolver.
+// It returns a slice of that host's IP addresses of the type specified by
+// network.
+// network must be one of "ip", "ip4" or "ip6".
+func (r *Resolver) LookupIP(ctx context.Context, network, host string) ([]IP, error) {
+ afnet, _, err := parseNetwork(ctx, network, false)
+ if err != nil {
+ return nil, err
+ }
+ switch afnet {
+ case "ip", "ip4", "ip6":
+ default:
+ return nil, UnknownNetworkError(network)
+ }
+ addrs, err := r.internetAddrList(ctx, afnet, host)
+ if err != nil {
+ return nil, err
+ }
+ ips := make([]IP, 0, len(addrs))
+ for _, addr := range addrs {
+ ips = append(ips, addr.(*IPAddr).IP)
+ }
+ return ips, nil
+}
+
+// LookupNetIP looks up host using the local resolver.
+// It returns a slice of that host's IP addresses of the type specified by
+// network.
+// The network must be one of "ip", "ip4" or "ip6".
+func (r *Resolver) LookupNetIP(ctx context.Context, network, host string) ([]netip.Addr, error) {
+ // TODO(bradfitz): make this efficient, making the internal net package
+ // type throughout be netip.Addr and only converting to the net.IP slice
+ // version at the edge. But for now (2021-10-20), this is a wrapper around
+ // the old way.
+ ips, err := r.LookupIP(ctx, network, host)
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]netip.Addr, 0, len(ips))
+ for _, ip := range ips {
+ if a, ok := netip.AddrFromSlice(ip); ok {
+ ret = append(ret, a)
+ }
+ }
+ return ret, nil
+}
+
+// onlyValuesCtx is a context that uses an underlying context
+// for value lookup if the underlying context hasn't yet expired.
+type onlyValuesCtx struct {
+ context.Context
+ lookupValues context.Context
+}
+
+var _ context.Context = (*onlyValuesCtx)(nil)
+
+// Value performs a lookup if the original context hasn't expired.
+func (ovc *onlyValuesCtx) Value(key any) any {
+ select {
+ case <-ovc.lookupValues.Done():
+ return nil
+ default:
+ return ovc.lookupValues.Value(key)
+ }
+}
+
+// withUnexpiredValuesPreserved returns a context.Context that only uses lookupCtx
+// for its values, otherwise it is never canceled and has no deadline.
+// If the lookup context expires, any looked up values will return nil.
+// See Issue 28600.
+func withUnexpiredValuesPreserved(lookupCtx context.Context) context.Context {
+ return &onlyValuesCtx{Context: context.Background(), lookupValues: lookupCtx}
+}
+
+// lookupIPAddr looks up host using the local resolver and particular network.
+// It returns a slice of that host's IPv4 and IPv6 addresses.
+func (r *Resolver) lookupIPAddr(ctx context.Context, network, host string) ([]IPAddr, error) {
+ // Make sure that no matter what we do later, host=="" is rejected.
+ // parseIPZone, for example, does accept empty strings.
+ if host == "" {
+ return nil, &DNSError{Err: errNoSuchHost.Error(), Name: host, IsNotFound: true}
+ }
+ if ip, zone := parseIPZone(host); ip != nil {
+ return []IPAddr{{IP: ip, Zone: zone}}, nil
+ }
+ trace, _ := ctx.Value(nettrace.TraceKey{}).(*nettrace.Trace)
+ if trace != nil && trace.DNSStart != nil {
+ trace.DNSStart(host)
+ }
+ // The underlying resolver func is lookupIP by default but it
+ // can be overridden by tests. This is needed by net/http, so it
+ // uses a context key instead of unexported variables.
+ resolverFunc := r.lookupIP
+ if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string, string) ([]IPAddr, error)); alt != nil {
+ resolverFunc = alt
+ }
+
+ // We don't want a cancellation of ctx to affect the
+ // lookupGroup operation. Otherwise if our context gets
+ // canceled it might cause an error to be returned to a lookup
+ // using a completely different context. However we need to preserve
+ // only the values in context. See Issue 28600.
+ lookupGroupCtx, lookupGroupCancel := context.WithCancel(withUnexpiredValuesPreserved(ctx))
+
+ lookupKey := network + "\000" + host
+ dnsWaitGroup.Add(1)
+ ch, called := r.getLookupGroup().DoChan(lookupKey, func() (any, error) {
+ defer dnsWaitGroup.Done()
+ return testHookLookupIP(lookupGroupCtx, resolverFunc, network, host)
+ })
+ if !called {
+ dnsWaitGroup.Done()
+ }
+
+ select {
+ case <-ctx.Done():
+ // Our context was canceled. If we are the only
+ // goroutine looking up this key, then drop the key
+ // from the lookupGroup and cancel the lookup.
+ // If there are other goroutines looking up this key,
+ // let the lookup continue uncanceled, and let later
+ // lookups with the same key share the result.
+ // See issues 8602, 20703, 22724.
+ if r.getLookupGroup().ForgetUnshared(lookupKey) {
+ lookupGroupCancel()
+ } else {
+ go func() {
+ <-ch
+ lookupGroupCancel()
+ }()
+ }
+ ctxErr := ctx.Err()
+ err := &DNSError{
+ Err: mapErr(ctxErr).Error(),
+ Name: host,
+ IsTimeout: ctxErr == context.DeadlineExceeded,
+ }
+ if trace != nil && trace.DNSDone != nil {
+ trace.DNSDone(nil, false, err)
+ }
+ return nil, err
+ case r := <-ch:
+ lookupGroupCancel()
+ err := r.Err
+ if err != nil {
+ if _, ok := err.(*DNSError); !ok {
+ isTimeout := false
+ if err == context.DeadlineExceeded {
+ isTimeout = true
+ } else if terr, ok := err.(timeout); ok {
+ isTimeout = terr.Timeout()
+ }
+ err = &DNSError{
+ Err: err.Error(),
+ Name: host,
+ IsTimeout: isTimeout,
+ }
+ }
+ }
+ if trace != nil && trace.DNSDone != nil {
+ addrs, _ := r.Val.([]IPAddr)
+ trace.DNSDone(ipAddrsEface(addrs), r.Shared, err)
+ }
+ return lookupIPReturn(r.Val, err, r.Shared)
+ }
+}
+
+// lookupIPReturn turns the return values from singleflight.Do into
+// the return values from LookupIP.
+func lookupIPReturn(addrsi any, err error, shared bool) ([]IPAddr, error) {
+ if err != nil {
+ return nil, err
+ }
+ addrs := addrsi.([]IPAddr)
+ if shared {
+ clone := make([]IPAddr, len(addrs))
+ copy(clone, addrs)
+ addrs = clone
+ }
+ return addrs, nil
+}
+
+// ipAddrsEface returns an empty interface slice of addrs.
+func ipAddrsEface(addrs []IPAddr) []any {
+ s := make([]any, len(addrs))
+ for i, v := range addrs {
+ s[i] = v
+ }
+ return s
+}
+
+// LookupPort looks up the port for the given network and service.
+//
+// LookupPort uses context.Background internally; to specify the context, use
+// Resolver.LookupPort.
+func LookupPort(network, service string) (port int, err error) {
+ return DefaultResolver.LookupPort(context.Background(), network, service)
+}
+
+// LookupPort looks up the port for the given network and service.
+func (r *Resolver) LookupPort(ctx context.Context, network, service string) (port int, err error) {
+ port, needsLookup := parsePort(service)
+ if needsLookup {
+ switch network {
+ case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6":
+ case "": // a hint wildcard for Go 1.0 undocumented behavior
+ network = "ip"
+ default:
+ return 0, &AddrError{Err: "unknown network", Addr: network}
+ }
+ port, err = r.lookupPort(ctx, network, service)
+ if err != nil {
+ return 0, err
+ }
+ }
+ if 0 > port || port > 65535 {
+ return 0, &AddrError{Err: "invalid port", Addr: service}
+ }
+ return port, nil
+}
+
+// LookupCNAME returns the canonical name for the given host.
+// Callers that do not care about the canonical name can call
+// LookupHost or LookupIP directly; both take care of resolving
+// the canonical name as part of the lookup.
+//
+// A canonical name is the final name after following zero
+// or more CNAME records.
+// LookupCNAME does not return an error if host does not
+// contain DNS "CNAME" records, as long as host resolves to
+// address records.
+//
+// The returned canonical name is validated to be a properly
+// formatted presentation-format domain name.
+//
+// LookupCNAME uses context.Background internally; to specify the context, use
+// Resolver.LookupCNAME.
+func LookupCNAME(host string) (cname string, err error) {
+ return DefaultResolver.LookupCNAME(context.Background(), host)
+}
+
+// LookupCNAME returns the canonical name for the given host.
+// Callers that do not care about the canonical name can call
+// LookupHost or LookupIP directly; both take care of resolving
+// the canonical name as part of the lookup.
+//
+// A canonical name is the final name after following zero
+// or more CNAME records.
+// LookupCNAME does not return an error if host does not
+// contain DNS "CNAME" records, as long as host resolves to
+// address records.
+//
+// The returned canonical name is validated to be a properly
+// formatted presentation-format domain name.
+func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error) {
+ cname, err := r.lookupCNAME(ctx, host)
+ if err != nil {
+ return "", err
+ }
+ if !isDomainName(cname) {
+ return "", &DNSError{Err: errMalformedDNSRecordsDetail, Name: host}
+ }
+ return cname, nil
+}
+
+// LookupSRV tries to resolve an SRV query of the given service,
+// protocol, and domain name. The proto is "tcp" or "udp".
+// The returned records are sorted by priority and randomized
+// by weight within a priority.
+//
+// LookupSRV constructs the DNS name to look up following RFC 2782.
+// That is, it looks up _service._proto.name. To accommodate services
+// publishing SRV records under non-standard names, if both service
+// and proto are empty strings, LookupSRV looks up name directly.
+//
+// The returned service names are validated to be properly
+// formatted presentation-format domain names. If the response contains
+// invalid names, those records are filtered out and an error
+// will be returned alongside the remaining results, if any.
+func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+ return DefaultResolver.LookupSRV(context.Background(), service, proto, name)
+}
+
+// LookupSRV tries to resolve an SRV query of the given service,
+// protocol, and domain name. The proto is "tcp" or "udp".
+// The returned records are sorted by priority and randomized
+// by weight within a priority.
+//
+// LookupSRV constructs the DNS name to look up following RFC 2782.
+// That is, it looks up _service._proto.name. To accommodate services
+// publishing SRV records under non-standard names, if both service
+// and proto are empty strings, LookupSRV looks up name directly.
+//
+// The returned service names are validated to be properly
+// formatted presentation-format domain names. If the response contains
+// invalid names, those records are filtered out and an error
+// will be returned alongside the remaining results, if any.
+func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
+ cname, addrs, err := r.lookupSRV(ctx, service, proto, name)
+ if err != nil {
+ return "", nil, err
+ }
+ if cname != "" && !isDomainName(cname) {
+ return "", nil, &DNSError{Err: "SRV header name is invalid", Name: name}
+ }
+ filteredAddrs := make([]*SRV, 0, len(addrs))
+ for _, addr := range addrs {
+ if addr == nil {
+ continue
+ }
+ if !isDomainName(addr.Target) {
+ continue
+ }
+ filteredAddrs = append(filteredAddrs, addr)
+ }
+ if len(addrs) != len(filteredAddrs) {
+ return cname, filteredAddrs, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name}
+ }
+ return cname, filteredAddrs, nil
+}
+
+// LookupMX returns the DNS MX records for the given domain name sorted by preference.
+//
+// The returned mail server names are validated to be properly
+// formatted presentation-format domain names. If the response contains
+// invalid names, those records are filtered out and an error
+// will be returned alongside the remaining results, if any.
+//
+// LookupMX uses context.Background internally; to specify the context, use
+// Resolver.LookupMX.
+func LookupMX(name string) ([]*MX, error) {
+ return DefaultResolver.LookupMX(context.Background(), name)
+}
+
+// LookupMX returns the DNS MX records for the given domain name sorted by preference.
+//
+// The returned mail server names are validated to be properly
+// formatted presentation-format domain names. If the response contains
+// invalid names, those records are filtered out and an error
+// will be returned alongside the remaining results, if any.
+func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) {
+ records, err := r.lookupMX(ctx, name)
+ if err != nil {
+ return nil, err
+ }
+ filteredMX := make([]*MX, 0, len(records))
+ for _, mx := range records {
+ if mx == nil {
+ continue
+ }
+ if !isDomainName(mx.Host) {
+ continue
+ }
+ filteredMX = append(filteredMX, mx)
+ }
+ if len(records) != len(filteredMX) {
+ return filteredMX, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name}
+ }
+ return filteredMX, nil
+}
+
+// LookupNS returns the DNS NS records for the given domain name.
+//
+// The returned name server names are validated to be properly
+// formatted presentation-format domain names. If the response contains
+// invalid names, those records are filtered out and an error
+// will be returned alongside the remaining results, if any.
+//
+// LookupNS uses context.Background internally; to specify the context, use
+// Resolver.LookupNS.
+func LookupNS(name string) ([]*NS, error) {
+ return DefaultResolver.LookupNS(context.Background(), name)
+}
+
+// LookupNS returns the DNS NS records for the given domain name.
+//
+// The returned name server names are validated to be properly
+// formatted presentation-format domain names. If the response contains
+// invalid names, those records are filtered out and an error
+// will be returned alongside the remaining results, if any.
+func (r *Resolver) LookupNS(ctx context.Context, name string) ([]*NS, error) {
+ records, err := r.lookupNS(ctx, name)
+ if err != nil {
+ return nil, err
+ }
+ filteredNS := make([]*NS, 0, len(records))
+ for _, ns := range records {
+ if ns == nil {
+ continue
+ }
+ if !isDomainName(ns.Host) {
+ continue
+ }
+ filteredNS = append(filteredNS, ns)
+ }
+ if len(records) != len(filteredNS) {
+ return filteredNS, &DNSError{Err: errMalformedDNSRecordsDetail, Name: name}
+ }
+ return filteredNS, nil
+}
+
+// LookupTXT returns the DNS TXT records for the given domain name.
+//
+// LookupTXT uses context.Background internally; to specify the context, use
+// Resolver.LookupTXT.
+func LookupTXT(name string) ([]string, error) {
+ return DefaultResolver.lookupTXT(context.Background(), name)
+}
+
+// LookupTXT returns the DNS TXT records for the given domain name.
+func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
+ return r.lookupTXT(ctx, name)
+}
+
+// LookupAddr performs a reverse lookup for the given address, returning a list
+// of names mapping to that address.
+//
+// The returned names are validated to be properly formatted presentation-format
+// domain names. If the response contains invalid names, those records are filtered
+// out and an error will be returned alongside the remaining results, if any.
+//
+// When using the host C library resolver, at most one result will be
+// returned. To bypass the host resolver, use a custom Resolver.
+//
+// LookupAddr uses context.Background internally; to specify the context, use
+// Resolver.LookupAddr.
+func LookupAddr(addr string) (names []string, err error) {
+ return DefaultResolver.LookupAddr(context.Background(), addr)
+}
+
+// LookupAddr performs a reverse lookup for the given address, returning a list
+// of names mapping to that address.
+//
+// The returned names are validated to be properly formatted presentation-format
+// domain names. If the response contains invalid names, those records are filtered
+// out and an error will be returned alongside the remaining results, if any.
+func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) {
+ names, err := r.lookupAddr(ctx, addr)
+ if err != nil {
+ return nil, err
+ }
+ filteredNames := make([]string, 0, len(names))
+ for _, name := range names {
+ if isDomainName(name) {
+ filteredNames = append(filteredNames, name)
+ }
+ }
+ if len(names) != len(filteredNames) {
+ return filteredNames, &DNSError{Err: errMalformedDNSRecordsDetail, Name: addr}
+ }
+ return filteredNames, nil
+}
+
+// errMalformedDNSRecordsDetail is the DNSError detail which is returned when a Resolver.Lookup...
+// method receives DNS records which contain invalid DNS names. This may be returned alongside
+// results which have had the malformed records filtered out.
+var errMalformedDNSRecordsDetail = "DNS response contained records which contain invalid names"
+
+// dial makes a new connection to the provided server (which must be
+// an IP address) with the provided network type, using either r.Dial
+// (if both r and r.Dial are non-nil) or else Dialer.DialContext.
+func (r *Resolver) dial(ctx context.Context, network, server string) (Conn, error) {
+ // Calling Dial here is scary -- we have to be sure not to
+ // dial a name that will require a DNS lookup, or Dial will
+ // call back here to translate it. The DNS config parser has
+ // already checked that all the cfg.servers are IP
+ // addresses, which Dial will use without a DNS lookup.
+ var c Conn
+ var err error
+ if r != nil && r.Dial != nil {
+ c, err = r.Dial(ctx, network, server)
+ } else {
+ var d Dialer
+ c, err = d.DialContext(ctx, network, server)
+ }
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ return c, nil
+}
+
+// goLookupSRV returns the SRV records for a target name, built either
+// from its component service ("sip"), protocol ("tcp"), and name
+// ("example.com."), or from name directly (if service and proto are
+// both empty).
+//
+// In either case, the returned target name ("_sip._tcp.example.com.")
+// is also returned on success.
+//
+// The records are sorted by weight.
+func (r *Resolver) goLookupSRV(ctx context.Context, service, proto, name string) (target string, srvs []*SRV, err error) {
+ if service == "" && proto == "" {
+ target = name
+ } else {
+ target = "_" + service + "._" + proto + "." + name
+ }
+ p, server, err := r.lookup(ctx, target, dnsmessage.TypeSRV)
+ if err != nil {
+ return "", nil, err
+ }
+ var cname dnsmessage.Name
+ for {
+ h, err := p.AnswerHeader()
+ if err == dnsmessage.ErrSectionDone {
+ break
+ }
+ if err != nil {
+ return "", nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ if h.Type != dnsmessage.TypeSRV {
+ if err := p.SkipAnswer(); err != nil {
+ return "", nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ continue
+ }
+ if cname.Length == 0 && h.Name.Length != 0 {
+ cname = h.Name
+ }
+ srv, err := p.SRVResource()
+ if err != nil {
+ return "", nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ srvs = append(srvs, &SRV{Target: srv.Target.String(), Port: srv.Port, Priority: srv.Priority, Weight: srv.Weight})
+ }
+ byPriorityWeight(srvs).sort()
+ return cname.String(), srvs, nil
+}
+
+// goLookupMX returns the MX records for name.
+func (r *Resolver) goLookupMX(ctx context.Context, name string) ([]*MX, error) {
+ p, server, err := r.lookup(ctx, name, dnsmessage.TypeMX)
+ if err != nil {
+ return nil, err
+ }
+ var mxs []*MX
+ for {
+ h, err := p.AnswerHeader()
+ if err == dnsmessage.ErrSectionDone {
+ break
+ }
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ if h.Type != dnsmessage.TypeMX {
+ if err := p.SkipAnswer(); err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ continue
+ }
+ mx, err := p.MXResource()
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ mxs = append(mxs, &MX{Host: mx.MX.String(), Pref: mx.Pref})
+
+ }
+ byPref(mxs).sort()
+ return mxs, nil
+}
+
+// goLookupNS returns the NS records for name.
+func (r *Resolver) goLookupNS(ctx context.Context, name string) ([]*NS, error) {
+ p, server, err := r.lookup(ctx, name, dnsmessage.TypeNS)
+ if err != nil {
+ return nil, err
+ }
+ var nss []*NS
+ for {
+ h, err := p.AnswerHeader()
+ if err == dnsmessage.ErrSectionDone {
+ break
+ }
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ if h.Type != dnsmessage.TypeNS {
+ if err := p.SkipAnswer(); err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ continue
+ }
+ ns, err := p.NSResource()
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ nss = append(nss, &NS{Host: ns.NS.String()})
+ }
+ return nss, nil
+}
+
+// goLookupTXT returns the TXT records from name.
+func (r *Resolver) goLookupTXT(ctx context.Context, name string) ([]string, error) {
+ p, server, err := r.lookup(ctx, name, dnsmessage.TypeTXT)
+ if err != nil {
+ return nil, err
+ }
+ var txts []string
+ for {
+ h, err := p.AnswerHeader()
+ if err == dnsmessage.ErrSectionDone {
+ break
+ }
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ if h.Type != dnsmessage.TypeTXT {
+ if err := p.SkipAnswer(); err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ continue
+ }
+ txt, err := p.TXTResource()
+ if err != nil {
+ return nil, &DNSError{
+ Err: "cannot unmarshal DNS message",
+ Name: name,
+ Server: server,
+ }
+ }
+ // Multiple strings in one TXT record need to be
+ // concatenated without separator to be consistent
+ // with previous Go resolver.
+ n := 0
+ for _, s := range txt.TXT {
+ n += len(s)
+ }
+ txtJoin := make([]byte, 0, n)
+ for _, s := range txt.TXT {
+ txtJoin = append(txtJoin, s...)
+ }
+ if len(txts) == 0 {
+ txts = make([]string, 0, 1)
+ }
+ txts = append(txts, string(txtJoin))
+ }
+ return txts, nil
+}
diff --git a/contrib/go/_std_1.19/src/net/lookup_unix.go b/contrib/go/_std_1.19/src/net/lookup_unix.go
new file mode 100644
index 0000000000..4b885e938a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/lookup_unix.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package net
+
+import (
+ "context"
+ "internal/bytealg"
+ "sync"
+ "syscall"
+)
+
+var onceReadProtocols sync.Once
+
+// readProtocols loads contents of /etc/protocols into protocols map
+// for quick access.
+func readProtocols() {
+ file, err := open("/etc/protocols")
+ if err != nil {
+ return
+ }
+ defer file.close()
+
+ for line, ok := file.readLine(); ok; line, ok = file.readLine() {
+ // tcp 6 TCP # transmission control protocol
+ if i := bytealg.IndexByteString(line, '#'); i >= 0 {
+ line = line[0:i]
+ }
+ f := getFields(line)
+ if len(f) < 2 {
+ continue
+ }
+ if proto, _, ok := dtoi(f[1]); ok {
+ if _, ok := protocols[f[0]]; !ok {
+ protocols[f[0]] = proto
+ }
+ for _, alias := range f[2:] {
+ if _, ok := protocols[alias]; !ok {
+ protocols[alias] = proto
+ }
+ }
+ }
+ }
+}
+
+// lookupProtocol looks up IP protocol name in /etc/protocols and
+// returns correspondent protocol number.
+func lookupProtocol(_ context.Context, name string) (int, error) {
+ onceReadProtocols.Do(readProtocols)
+ return lookupProtocolMap(name)
+}
+
+func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string, err error) {
+ order := systemConf().hostLookupOrder(r, host)
+ if !r.preferGo() && order == hostLookupCgo {
+ if addrs, err, ok := cgoLookupHost(ctx, host); ok {
+ return addrs, err
+ }
+ // cgo not available (or netgo); fall back to Go's DNS resolver
+ order = hostLookupFilesDNS
+ }
+ return r.goLookupHostOrder(ctx, host, order)
+}
+
+func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) {
+ if r.preferGo() {
+ return r.goLookupIP(ctx, network, host)
+ }
+ order := systemConf().hostLookupOrder(r, host)
+ if order == hostLookupCgo {
+ if addrs, err, ok := cgoLookupIP(ctx, network, host); ok {
+ return addrs, err
+ }
+ // cgo not available (or netgo); fall back to Go's DNS resolver
+ order = hostLookupFilesDNS
+ }
+ ips, _, err := r.goLookupIPCNAMEOrder(ctx, network, host, order)
+ return ips, err
+}
+
+func (r *Resolver) lookupPort(ctx context.Context, network, service string) (int, error) {
+ if !r.preferGo() && systemConf().canUseCgo() {
+ if port, err, ok := cgoLookupPort(ctx, network, service); ok {
+ if err != nil {
+ // Issue 18213: if cgo fails, first check to see whether we
+ // have the answer baked-in to the net package.
+ if port, err := goLookupPort(network, service); err == nil {
+ return port, nil
+ }
+ }
+ return port, err
+ }
+ }
+ return goLookupPort(network, service)
+}
+
+func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error) {
+ if !r.preferGo() && systemConf().canUseCgo() {
+ if cname, err, ok := cgoLookupCNAME(ctx, name); ok {
+ return cname, err
+ }
+ }
+ return r.goLookupCNAME(ctx, name)
+}
+
+func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
+ return r.goLookupSRV(ctx, service, proto, name)
+}
+
+func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
+ return r.goLookupMX(ctx, name)
+}
+
+func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) {
+ return r.goLookupNS(ctx, name)
+}
+
+func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) {
+ return r.goLookupTXT(ctx, name)
+}
+
+func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error) {
+ if !r.preferGo() && systemConf().canUseCgo() {
+ if ptrs, err, ok := cgoLookupPTR(ctx, addr); ok {
+ return ptrs, err
+ }
+ }
+ return r.goLookupPTR(ctx, addr)
+}
+
+// concurrentThreadsLimit returns the number of threads we permit to
+// run concurrently doing DNS lookups via cgo. A DNS lookup may use a
+// file descriptor so we limit this to less than the number of
+// permitted open files. On some systems, notably Darwin, if
+// getaddrinfo is unable to open a file descriptor it simply returns
+// EAI_NONAME rather than a useful error. Limiting the number of
+// concurrent getaddrinfo calls to less than the permitted number of
+// file descriptors makes that error less likely. We don't bother to
+// apply the same limit to DNS lookups run directly from Go, because
+// there we will return a meaningful "too many open files" error.
+func concurrentThreadsLimit() int {
+ var rlim syscall.Rlimit
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil {
+ return 500
+ }
+ r := int(rlim.Cur)
+ if r > 500 {
+ r = 500
+ } else if r > 30 {
+ r -= 30
+ }
+ return r
+}
diff --git a/contrib/go/_std_1.19/src/net/mac.go b/contrib/go/_std_1.19/src/net/mac.go
new file mode 100644
index 0000000000..53d5b2dbf5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/mac.go
@@ -0,0 +1,86 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+const hexDigit = "0123456789abcdef"
+
+// A HardwareAddr represents a physical hardware address.
+type HardwareAddr []byte
+
+func (a HardwareAddr) String() string {
+ if len(a) == 0 {
+ return ""
+ }
+ buf := make([]byte, 0, len(a)*3-1)
+ for i, b := range a {
+ if i > 0 {
+ buf = append(buf, ':')
+ }
+ buf = append(buf, hexDigit[b>>4])
+ buf = append(buf, hexDigit[b&0xF])
+ }
+ return string(buf)
+}
+
+// ParseMAC parses s as an IEEE 802 MAC-48, EUI-48, EUI-64, or a 20-octet
+// IP over InfiniBand link-layer address using one of the following formats:
+//
+// 00:00:5e:00:53:01
+// 02:00:5e:10:00:00:00:01
+// 00:00:00:00:fe:80:00:00:00:00:00:00:02:00:5e:10:00:00:00:01
+// 00-00-5e-00-53-01
+// 02-00-5e-10-00-00-00-01
+// 00-00-00-00-fe-80-00-00-00-00-00-00-02-00-5e-10-00-00-00-01
+// 0000.5e00.5301
+// 0200.5e10.0000.0001
+// 0000.0000.fe80.0000.0000.0000.0200.5e10.0000.0001
+func ParseMAC(s string) (hw HardwareAddr, err error) {
+ if len(s) < 14 {
+ goto error
+ }
+
+ if s[2] == ':' || s[2] == '-' {
+ if (len(s)+1)%3 != 0 {
+ goto error
+ }
+ n := (len(s) + 1) / 3
+ if n != 6 && n != 8 && n != 20 {
+ goto error
+ }
+ hw = make(HardwareAddr, n)
+ for x, i := 0, 0; i < n; i++ {
+ var ok bool
+ if hw[i], ok = xtoi2(s[x:], s[2]); !ok {
+ goto error
+ }
+ x += 3
+ }
+ } else if s[4] == '.' {
+ if (len(s)+1)%5 != 0 {
+ goto error
+ }
+ n := 2 * (len(s) + 1) / 5
+ if n != 6 && n != 8 && n != 20 {
+ goto error
+ }
+ hw = make(HardwareAddr, n)
+ for x, i := 0, 0; i < n; i += 2 {
+ var ok bool
+ if hw[i], ok = xtoi2(s[x:x+2], 0); !ok {
+ goto error
+ }
+ if hw[i+1], ok = xtoi2(s[x+2:], s[4]); !ok {
+ goto error
+ }
+ x += 5
+ }
+ } else {
+ goto error
+ }
+ return hw, nil
+
+error:
+ return nil, &AddrError{Err: "invalid MAC address", Addr: s}
+}
diff --git a/contrib/go/_std_1.19/src/net/net.go b/contrib/go/_std_1.19/src/net/net.go
new file mode 100644
index 0000000000..ff56c31c56
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/net.go
@@ -0,0 +1,771 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package net provides a portable interface for network I/O, including
+TCP/IP, UDP, domain name resolution, and Unix domain sockets.
+
+Although the package provides access to low-level networking
+primitives, most clients will need only the basic interface provided
+by the Dial, Listen, and Accept functions and the associated
+Conn and Listener interfaces. The crypto/tls package uses
+the same interfaces and similar Dial and Listen functions.
+
+The Dial function connects to a server:
+
+ conn, err := net.Dial("tcp", "golang.org:80")
+ if err != nil {
+ // handle error
+ }
+ fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n")
+ status, err := bufio.NewReader(conn).ReadString('\n')
+ // ...
+
+The Listen function creates servers:
+
+ ln, err := net.Listen("tcp", ":8080")
+ if err != nil {
+ // handle error
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ // handle error
+ }
+ go handleConnection(conn)
+ }
+
+# Name Resolution
+
+The method for resolving domain names, whether indirectly with functions like Dial
+or directly with functions like LookupHost and LookupAddr, varies by operating system.
+
+On Unix systems, the resolver has two options for resolving names.
+It can use a pure Go resolver that sends DNS requests directly to the servers
+listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C
+library routines such as getaddrinfo and getnameinfo.
+
+By default the pure Go resolver is used, because a blocked DNS request consumes
+only a goroutine, while a blocked C call consumes an operating system thread.
+When cgo is available, the cgo-based resolver is used instead under a variety of
+conditions: on systems that do not let programs make direct DNS requests (OS X),
+when the LOCALDOMAIN environment variable is present (even if empty),
+when the RES_OPTIONS or HOSTALIASES environment variable is non-empty,
+when the ASR_CONFIG environment variable is non-empty (OpenBSD only),
+when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the
+Go resolver does not implement, and when the name being looked up ends in .local
+or is an mDNS name.
+
+The resolver decision can be overridden by setting the netdns value of the
+GODEBUG environment variable (see package runtime) to go or cgo, as in:
+
+ export GODEBUG=netdns=go # force pure Go resolver
+ export GODEBUG=netdns=cgo # force native resolver (cgo, win32)
+
+The decision can also be forced while building the Go source tree
+by setting the netgo or netcgo build tag.
+
+A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver
+to print debugging information about its decisions.
+To force a particular resolver while also printing debugging information,
+join the two settings by a plus sign, as in GODEBUG=netdns=go+1.
+
+On Plan 9, the resolver always accesses /net/cs and /net/dns.
+
+On Windows, in Go 1.18.x and earlier, the resolver always used C
+library functions, such as GetAddrInfo and DnsQuery.
+*/
+package net
+
+import (
+ "context"
+ "errors"
+ "internal/poll"
+ "io"
+ "os"
+ "sync"
+ "syscall"
+ "time"
+)
+
+// netGo and netCgo contain the state of the build tags used
+// to build this binary, and whether cgo is available.
+// conf.go mirrors these into conf for easier testing.
+var (
+ netGo bool // set true in cgo_stub.go for build tag "netgo" (or no cgo)
+ netCgo bool // set true in conf_netcgo.go for build tag "netcgo"
+)
+
+// Addr represents a network end point address.
+//
+// The two methods Network and String conventionally return strings
+// that can be passed as the arguments to Dial, but the exact form
+// and meaning of the strings is up to the implementation.
+type Addr interface {
+ Network() string // name of the network (for example, "tcp", "udp")
+ String() string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80")
+}
+
+// Conn is a generic stream-oriented network connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn interface {
+ // Read reads data from the connection.
+ // Read can be made to time out and return an error after a fixed
+ // time limit; see SetDeadline and SetReadDeadline.
+ Read(b []byte) (n int, err error)
+
+ // Write writes data to the connection.
+ // Write can be made to time out and return an error after a fixed
+ // time limit; see SetDeadline and SetWriteDeadline.
+ Write(b []byte) (n int, err error)
+
+ // Close closes the connection.
+ // Any blocked Read or Write operations will be unblocked and return errors.
+ Close() error
+
+ // LocalAddr returns the local network address, if known.
+ LocalAddr() Addr
+
+ // RemoteAddr returns the remote network address, if known.
+ RemoteAddr() Addr
+
+ // SetDeadline sets the read and write deadlines associated
+ // with the connection. It is equivalent to calling both
+ // SetReadDeadline and SetWriteDeadline.
+ //
+ // A deadline is an absolute time after which I/O operations
+ // fail instead of blocking. The deadline applies to all future
+ // and pending I/O, not just the immediately following call to
+ // Read or Write. After a deadline has been exceeded, the
+ // connection can be refreshed by setting a deadline in the future.
+ //
+ // If the deadline is exceeded a call to Read or Write or to other
+ // I/O methods will return an error that wraps os.ErrDeadlineExceeded.
+ // This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
+ // The error's Timeout method will return true, but note that there
+ // are other possible errors for which the Timeout method will
+ // return true even if the deadline has not been exceeded.
+ //
+ // An idle timeout can be implemented by repeatedly extending
+ // the deadline after successful Read or Write calls.
+ //
+ // A zero value for t means I/O operations will not time out.
+ SetDeadline(t time.Time) error
+
+ // SetReadDeadline sets the deadline for future Read calls
+ // and any currently-blocked Read call.
+ // A zero value for t means Read will not time out.
+ SetReadDeadline(t time.Time) error
+
+ // SetWriteDeadline sets the deadline for future Write calls
+ // and any currently-blocked Write call.
+ // Even if write times out, it may return n > 0, indicating that
+ // some of the data was successfully written.
+ // A zero value for t means Write will not time out.
+ SetWriteDeadline(t time.Time) error
+}
+
+type conn struct {
+ fd *netFD
+}
+
+func (c *conn) ok() bool { return c != nil && c.fd != nil }
+
+// Implementation of the Conn interface.
+
+// Read implements the Conn Read method.
+func (c *conn) Read(b []byte) (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ n, err := c.fd.Read(b)
+ if err != nil && err != io.EOF {
+ err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return n, err
+}
+
+// Write implements the Conn Write method.
+func (c *conn) Write(b []byte) (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ n, err := c.fd.Write(b)
+ if err != nil {
+ err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return n, err
+}
+
+// Close closes the connection.
+func (c *conn) Close() error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ err := c.fd.Close()
+ if err != nil {
+ err = &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return err
+}
+
+// LocalAddr returns the local network address.
+// The Addr returned is shared by all invocations of LocalAddr, so
+// do not modify it.
+func (c *conn) LocalAddr() Addr {
+ if !c.ok() {
+ return nil
+ }
+ return c.fd.laddr
+}
+
+// RemoteAddr returns the remote network address.
+// The Addr returned is shared by all invocations of RemoteAddr, so
+// do not modify it.
+func (c *conn) RemoteAddr() Addr {
+ if !c.ok() {
+ return nil
+ }
+ return c.fd.raddr
+}
+
+// SetDeadline implements the Conn SetDeadline method.
+func (c *conn) SetDeadline(t time.Time) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := c.fd.SetDeadline(t); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// SetReadDeadline implements the Conn SetReadDeadline method.
+func (c *conn) SetReadDeadline(t time.Time) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := c.fd.SetReadDeadline(t); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
+func (c *conn) SetWriteDeadline(t time.Time) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := c.fd.SetWriteDeadline(t); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// SetReadBuffer sets the size of the operating system's
+// receive buffer associated with the connection.
+func (c *conn) SetReadBuffer(bytes int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := setReadBuffer(c.fd, bytes); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// SetWriteBuffer sets the size of the operating system's
+// transmit buffer associated with the connection.
+func (c *conn) SetWriteBuffer(bytes int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := setWriteBuffer(c.fd, bytes); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// File returns a copy of the underlying os.File.
+// It is the caller's responsibility to close f when finished.
+// Closing c does not affect f, and closing f does not affect c.
+//
+// The returned os.File's file descriptor is different from the connection's.
+// Attempting to change properties of the original using this duplicate
+// may or may not have the desired effect.
+func (c *conn) File() (f *os.File, err error) {
+ f, err = c.fd.dup()
+ if err != nil {
+ err = &OpError{Op: "file", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return
+}
+
+// PacketConn is a generic packet-oriented network connection.
+//
+// Multiple goroutines may invoke methods on a PacketConn simultaneously.
+type PacketConn interface {
+ // ReadFrom reads a packet from the connection,
+ // copying the payload into p. It returns the number of
+ // bytes copied into p and the return address that
+ // was on the packet.
+ // It returns the number of bytes read (0 <= n <= len(p))
+ // and any error encountered. Callers should always process
+ // the n > 0 bytes returned before considering the error err.
+ // ReadFrom can be made to time out and return an error after a
+ // fixed time limit; see SetDeadline and SetReadDeadline.
+ ReadFrom(p []byte) (n int, addr Addr, err error)
+
+ // WriteTo writes a packet with payload p to addr.
+ // WriteTo can be made to time out and return an Error after a
+ // fixed time limit; see SetDeadline and SetWriteDeadline.
+ // On packet-oriented connections, write timeouts are rare.
+ WriteTo(p []byte, addr Addr) (n int, err error)
+
+ // Close closes the connection.
+ // Any blocked ReadFrom or WriteTo operations will be unblocked and return errors.
+ Close() error
+
+ // LocalAddr returns the local network address, if known.
+ LocalAddr() Addr
+
+ // SetDeadline sets the read and write deadlines associated
+ // with the connection. It is equivalent to calling both
+ // SetReadDeadline and SetWriteDeadline.
+ //
+ // A deadline is an absolute time after which I/O operations
+ // fail instead of blocking. The deadline applies to all future
+ // and pending I/O, not just the immediately following call to
+ // Read or Write. After a deadline has been exceeded, the
+ // connection can be refreshed by setting a deadline in the future.
+ //
+ // If the deadline is exceeded a call to Read or Write or to other
+ // I/O methods will return an error that wraps os.ErrDeadlineExceeded.
+ // This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
+ // The error's Timeout method will return true, but note that there
+ // are other possible errors for which the Timeout method will
+ // return true even if the deadline has not been exceeded.
+ //
+ // An idle timeout can be implemented by repeatedly extending
+ // the deadline after successful ReadFrom or WriteTo calls.
+ //
+ // A zero value for t means I/O operations will not time out.
+ SetDeadline(t time.Time) error
+
+ // SetReadDeadline sets the deadline for future ReadFrom calls
+ // and any currently-blocked ReadFrom call.
+ // A zero value for t means ReadFrom will not time out.
+ SetReadDeadline(t time.Time) error
+
+ // SetWriteDeadline sets the deadline for future WriteTo calls
+ // and any currently-blocked WriteTo call.
+ // Even if write times out, it may return n > 0, indicating that
+ // some of the data was successfully written.
+ // A zero value for t means WriteTo will not time out.
+ SetWriteDeadline(t time.Time) error
+}
+
+var listenerBacklogCache struct {
+ sync.Once
+ val int
+}
+
+// listenerBacklog is a caching wrapper around maxListenerBacklog.
+func listenerBacklog() int {
+ listenerBacklogCache.Do(func() { listenerBacklogCache.val = maxListenerBacklog() })
+ return listenerBacklogCache.val
+}
+
+// A Listener is a generic network listener for stream-oriented protocols.
+//
+// Multiple goroutines may invoke methods on a Listener simultaneously.
+type Listener interface {
+ // Accept waits for and returns the next connection to the listener.
+ Accept() (Conn, error)
+
+ // Close closes the listener.
+ // Any blocked Accept operations will be unblocked and return errors.
+ Close() error
+
+ // Addr returns the listener's network address.
+ Addr() Addr
+}
+
+// An Error represents a network error.
+type Error interface {
+ error
+ Timeout() bool // Is the error a timeout?
+
+ // Deprecated: Temporary errors are not well-defined.
+ // Most "temporary" errors are timeouts, and the few exceptions are surprising.
+ // Do not use this method.
+ Temporary() bool
+}
+
+// Various errors contained in OpError.
+var (
+ // For connection setup operations.
+ errNoSuitableAddress = errors.New("no suitable address found")
+
+ // For connection setup and write operations.
+ errMissingAddress = errors.New("missing address")
+
+ // For both read and write operations.
+ errCanceled = canceledError{}
+ ErrWriteToConnected = errors.New("use of WriteTo with pre-connected connection")
+)
+
+// canceledError lets us return the same error string we have always
+// returned, while still being Is context.Canceled.
+type canceledError struct{}
+
+func (canceledError) Error() string { return "operation was canceled" }
+
+func (canceledError) Is(err error) bool { return err == context.Canceled }
+
+// mapErr maps from the context errors to the historical internal net
+// error values.
+func mapErr(err error) error {
+ switch err {
+ case context.Canceled:
+ return errCanceled
+ case context.DeadlineExceeded:
+ return errTimeout
+ default:
+ return err
+ }
+}
+
+// OpError is the error type usually returned by functions in the net
+// package. It describes the operation, network type, and address of
+// an error.
+type OpError struct {
+ // Op is the operation which caused the error, such as
+ // "read" or "write".
+ Op string
+
+ // Net is the network type on which this error occurred,
+ // such as "tcp" or "udp6".
+ Net string
+
+ // For operations involving a remote network connection, like
+ // Dial, Read, or Write, Source is the corresponding local
+ // network address.
+ Source Addr
+
+ // Addr is the network address for which this error occurred.
+ // For local operations, like Listen or SetDeadline, Addr is
+ // the address of the local endpoint being manipulated.
+ // For operations involving a remote network connection, like
+ // Dial, Read, or Write, Addr is the remote address of that
+ // connection.
+ Addr Addr
+
+ // Err is the error that occurred during the operation.
+ // The Error method panics if the error is nil.
+ Err error
+}
+
+func (e *OpError) Unwrap() error { return e.Err }
+
+func (e *OpError) Error() string {
+ if e == nil {
+ return "<nil>"
+ }
+ s := e.Op
+ if e.Net != "" {
+ s += " " + e.Net
+ }
+ if e.Source != nil {
+ s += " " + e.Source.String()
+ }
+ if e.Addr != nil {
+ if e.Source != nil {
+ s += "->"
+ } else {
+ s += " "
+ }
+ s += e.Addr.String()
+ }
+ s += ": " + e.Err.Error()
+ return s
+}
+
+var (
+ // aLongTimeAgo is a non-zero time, far in the past, used for
+ // immediate cancellation of dials.
+ aLongTimeAgo = time.Unix(1, 0)
+
+ // nonDeadline and noCancel are just zero values for
+ // readability with functions taking too many parameters.
+ noDeadline = time.Time{}
+ noCancel = (chan struct{})(nil)
+)
+
+type timeout interface {
+ Timeout() bool
+}
+
+func (e *OpError) Timeout() bool {
+ if ne, ok := e.Err.(*os.SyscallError); ok {
+ t, ok := ne.Err.(timeout)
+ return ok && t.Timeout()
+ }
+ t, ok := e.Err.(timeout)
+ return ok && t.Timeout()
+}
+
+type temporary interface {
+ Temporary() bool
+}
+
+func (e *OpError) Temporary() bool {
+ // Treat ECONNRESET and ECONNABORTED as temporary errors when
+ // they come from calling accept. See issue 6163.
+ if e.Op == "accept" && isConnError(e.Err) {
+ return true
+ }
+
+ if ne, ok := e.Err.(*os.SyscallError); ok {
+ t, ok := ne.Err.(temporary)
+ return ok && t.Temporary()
+ }
+ t, ok := e.Err.(temporary)
+ return ok && t.Temporary()
+}
+
+// A ParseError is the error type of literal network address parsers.
+type ParseError struct {
+ // Type is the type of string that was expected, such as
+ // "IP address", "CIDR address".
+ Type string
+
+ // Text is the malformed text string.
+ Text string
+}
+
+func (e *ParseError) Error() string { return "invalid " + e.Type + ": " + e.Text }
+
+func (e *ParseError) Timeout() bool { return false }
+func (e *ParseError) Temporary() bool { return false }
+
+type AddrError struct {
+ Err string
+ Addr string
+}
+
+func (e *AddrError) Error() string {
+ if e == nil {
+ return "<nil>"
+ }
+ s := e.Err
+ if e.Addr != "" {
+ s = "address " + e.Addr + ": " + s
+ }
+ return s
+}
+
+func (e *AddrError) Timeout() bool { return false }
+func (e *AddrError) Temporary() bool { return false }
+
+type UnknownNetworkError string
+
+func (e UnknownNetworkError) Error() string { return "unknown network " + string(e) }
+func (e UnknownNetworkError) Timeout() bool { return false }
+func (e UnknownNetworkError) Temporary() bool { return false }
+
+type InvalidAddrError string
+
+func (e InvalidAddrError) Error() string { return string(e) }
+func (e InvalidAddrError) Timeout() bool { return false }
+func (e InvalidAddrError) Temporary() bool { return false }
+
+// errTimeout exists to return the historical "i/o timeout" string
+// for context.DeadlineExceeded. See mapErr.
+// It is also used when Dialer.Deadline is exceeded.
+// error.Is(errTimeout, context.DeadlineExceeded) returns true.
+//
+// TODO(iant): We could consider changing this to os.ErrDeadlineExceeded
+// in the future, if we make
+//
+// errors.Is(os.ErrDeadlineExceeded, context.DeadlineExceeded)
+//
+// return true.
+var errTimeout error = &timeoutError{}
+
+type timeoutError struct{}
+
+func (e *timeoutError) Error() string { return "i/o timeout" }
+func (e *timeoutError) Timeout() bool { return true }
+func (e *timeoutError) Temporary() bool { return true }
+
+func (e *timeoutError) Is(err error) bool {
+ return err == context.DeadlineExceeded
+}
+
+// DNSConfigError represents an error reading the machine's DNS configuration.
+// (No longer used; kept for compatibility.)
+type DNSConfigError struct {
+ Err error
+}
+
+func (e *DNSConfigError) Unwrap() error { return e.Err }
+func (e *DNSConfigError) Error() string { return "error reading DNS config: " + e.Err.Error() }
+func (e *DNSConfigError) Timeout() bool { return false }
+func (e *DNSConfigError) Temporary() bool { return false }
+
+// Various errors contained in DNSError.
+var (
+ errNoSuchHost = errors.New("no such host")
+)
+
+// DNSError represents a DNS lookup error.
+type DNSError struct {
+ Err string // description of the error
+ Name string // name looked for
+ Server string // server used
+ IsTimeout bool // if true, timed out; not all timeouts set this
+ IsTemporary bool // if true, error is temporary; not all errors set this
+ IsNotFound bool // if true, host could not be found
+}
+
+func (e *DNSError) Error() string {
+ if e == nil {
+ return "<nil>"
+ }
+ s := "lookup " + e.Name
+ if e.Server != "" {
+ s += " on " + e.Server
+ }
+ s += ": " + e.Err
+ return s
+}
+
+// Timeout reports whether the DNS lookup is known to have timed out.
+// This is not always known; a DNS lookup may fail due to a timeout
+// and return a DNSError for which Timeout returns false.
+func (e *DNSError) Timeout() bool { return e.IsTimeout }
+
+// Temporary reports whether the DNS error is known to be temporary.
+// This is not always known; a DNS lookup may fail due to a temporary
+// error and return a DNSError for which Temporary returns false.
+func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary }
+
+// errClosed exists just so that the docs for ErrClosed don't mention
+// the internal package poll.
+var errClosed = poll.ErrNetClosing
+
+// ErrClosed is the error returned by an I/O call on a network
+// connection that has already been closed, or that is closed by
+// another goroutine before the I/O is completed. This may be wrapped
+// in another error, and should normally be tested using
+// errors.Is(err, net.ErrClosed).
+var ErrClosed error = errClosed
+
+type writerOnly struct {
+ io.Writer
+}
+
+// Fallback implementation of io.ReaderFrom's ReadFrom, when sendfile isn't
+// applicable.
+func genericReadFrom(w io.Writer, r io.Reader) (n int64, err error) {
+ // Use wrapper to hide existing r.ReadFrom from io.Copy.
+ return io.Copy(writerOnly{w}, r)
+}
+
+// Limit the number of concurrent cgo-using goroutines, because
+// each will block an entire operating system thread. The usual culprit
+// is resolving many DNS names in separate goroutines but the DNS
+// server is not responding. Then the many lookups each use a different
+// thread, and the system or the program runs out of threads.
+
+var threadLimit chan struct{}
+
+var threadOnce sync.Once
+
+func acquireThread() {
+ threadOnce.Do(func() {
+ threadLimit = make(chan struct{}, concurrentThreadsLimit())
+ })
+ threadLimit <- struct{}{}
+}
+
+func releaseThread() {
+ <-threadLimit
+}
+
+// buffersWriter is the interface implemented by Conns that support a
+// "writev"-like batch write optimization.
+// writeBuffers should fully consume and write all chunks from the
+// provided Buffers, else it should report a non-nil error.
+type buffersWriter interface {
+ writeBuffers(*Buffers) (int64, error)
+}
+
+// Buffers contains zero or more runs of bytes to write.
+//
+// On certain machines, for certain types of connections, this is
+// optimized into an OS-specific batch write operation (such as
+// "writev").
+type Buffers [][]byte
+
+var (
+ _ io.WriterTo = (*Buffers)(nil)
+ _ io.Reader = (*Buffers)(nil)
+)
+
+// WriteTo writes contents of the buffers to w.
+//
+// WriteTo implements io.WriterTo for Buffers.
+//
+// WriteTo modifies the slice v as well as v[i] for 0 <= i < len(v),
+// but does not modify v[i][j] for any i, j.
+func (v *Buffers) WriteTo(w io.Writer) (n int64, err error) {
+ if wv, ok := w.(buffersWriter); ok {
+ return wv.writeBuffers(v)
+ }
+ for _, b := range *v {
+ nb, err := w.Write(b)
+ n += int64(nb)
+ if err != nil {
+ v.consume(n)
+ return n, err
+ }
+ }
+ v.consume(n)
+ return n, nil
+}
+
+// Read from the buffers.
+//
+// Read implements io.Reader for Buffers.
+//
+// Read modifies the slice v as well as v[i] for 0 <= i < len(v),
+// but does not modify v[i][j] for any i, j.
+func (v *Buffers) Read(p []byte) (n int, err error) {
+ for len(p) > 0 && len(*v) > 0 {
+ n0 := copy(p, (*v)[0])
+ v.consume(int64(n0))
+ p = p[n0:]
+ n += n0
+ }
+ if len(*v) == 0 {
+ err = io.EOF
+ }
+ return
+}
+
+func (v *Buffers) consume(n int64) {
+ for len(*v) > 0 {
+ ln0 := int64(len((*v)[0]))
+ if ln0 > n {
+ (*v)[0] = (*v)[0][n:]
+ return
+ }
+ n -= ln0
+ (*v)[0] = nil
+ *v = (*v)[1:]
+ }
+}
diff --git a/contrib/go/_std_1.18/src/net/netip/leaf_alts.go b/contrib/go/_std_1.19/src/net/netip/leaf_alts.go
index 70513abfd9..70513abfd9 100644
--- a/contrib/go/_std_1.18/src/net/netip/leaf_alts.go
+++ b/contrib/go/_std_1.19/src/net/netip/leaf_alts.go
diff --git a/contrib/go/_std_1.19/src/net/netip/netip.go b/contrib/go/_std_1.19/src/net/netip/netip.go
new file mode 100644
index 0000000000..bb83371a55
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/netip/netip.go
@@ -0,0 +1,1504 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netip defines an IP address type that's a small value type.
+// Building on that Addr type, the package also defines AddrPort (an
+// IP address and a port), and Prefix (an IP address and a bit length
+// prefix).
+//
+// Compared to the net.IP type, this package's Addr type takes less
+// memory, is immutable, and is comparable (supports == and being a
+// map key).
+package netip
+
+import (
+ "errors"
+ "math"
+ "strconv"
+
+ "internal/bytealg"
+ "internal/intern"
+ "internal/itoa"
+)
+
+// Sizes: (64-bit)
+// net.IP: 24 byte slice header + {4, 16} = 28 to 40 bytes
+// net.IPAddr: 40 byte slice header + {4, 16} = 44 to 56 bytes + zone length
+// netip.Addr: 24 bytes (zone is per-name singleton, shared across all users)
+
+// Addr represents an IPv4 or IPv6 address (with or without a scoped
+// addressing zone), similar to net.IP or net.IPAddr.
+//
+// Unlike net.IP or net.IPAddr, Addr is a comparable value
+// type (it supports == and can be a map key) and is immutable.
+//
+// The zero Addr is not a valid IP address.
+// Addr{} is distinct from both 0.0.0.0 and ::.
+type Addr struct {
+ // addr is the hi and lo bits of an IPv6 address. If z==z4,
+ // hi and lo contain the IPv4-mapped IPv6 address.
+ //
+ // hi and lo are constructed by interpreting a 16-byte IPv6
+ // address as a big-endian 128-bit number. The most significant
+ // bits of that number go into hi, the rest into lo.
+ //
+ // For example, 0011:2233:4455:6677:8899:aabb:ccdd:eeff is stored as:
+ // addr.hi = 0x0011223344556677
+ // addr.lo = 0x8899aabbccddeeff
+ //
+ // We store IPs like this, rather than as [16]byte, because it
+ // turns most operations on IPs into arithmetic and bit-twiddling
+ // operations on 64-bit registers, which is much faster than
+ // bytewise processing.
+ addr uint128
+
+ // z is a combination of the address family and the IPv6 zone.
+ //
+ // nil means invalid IP address (for a zero Addr).
+ // z4 means an IPv4 address.
+ // z6noz means an IPv6 address without a zone.
+ //
+ // Otherwise it's the interned zone name string.
+ z *intern.Value
+}
+
+// z0, z4, and z6noz are sentinel Addr.z values.
+// See the Addr type's field docs.
+var (
+ z0 = (*intern.Value)(nil)
+ z4 = new(intern.Value)
+ z6noz = new(intern.Value)
+)
+
+// IPv6LinkLocalAllNodes returns the IPv6 link-local all nodes multicast
+// address ff02::1.
+func IPv6LinkLocalAllNodes() Addr { return AddrFrom16([16]byte{0: 0xff, 1: 0x02, 15: 0x01}) }
+
+// IPv6Unspecified returns the IPv6 unspecified address "::".
+func IPv6Unspecified() Addr { return Addr{z: z6noz} }
+
+// IPv4Unspecified returns the IPv4 unspecified address "0.0.0.0".
+func IPv4Unspecified() Addr { return AddrFrom4([4]byte{}) }
+
+// AddrFrom4 returns the address of the IPv4 address given by the bytes in addr.
+func AddrFrom4(addr [4]byte) Addr {
+ return Addr{
+ addr: uint128{0, 0xffff00000000 | uint64(addr[0])<<24 | uint64(addr[1])<<16 | uint64(addr[2])<<8 | uint64(addr[3])},
+ z: z4,
+ }
+}
+
+// AddrFrom16 returns the IPv6 address given by the bytes in addr.
+// An IPv4-mapped IPv6 address is left as an IPv6 address.
+// (Use Unmap to convert them if needed.)
+func AddrFrom16(addr [16]byte) Addr {
+ return Addr{
+ addr: uint128{
+ beUint64(addr[:8]),
+ beUint64(addr[8:]),
+ },
+ z: z6noz,
+ }
+}
+
+// ipv6Slice is like IPv6Raw, but operates on a 16-byte slice. Assumes
+// slice is 16 bytes, caller must enforce this.
+func ipv6Slice(addr []byte) Addr {
+ return Addr{
+ addr: uint128{
+ beUint64(addr[:8]),
+ beUint64(addr[8:]),
+ },
+ z: z6noz,
+ }
+}
+
+// ParseAddr parses s as an IP address, returning the result. The string
+// s can be in dotted decimal ("192.0.2.1"), IPv6 ("2001:db8::68"),
+// or IPv6 with a scoped addressing zone ("fe80::1cc0:3e8c:119f:c2e1%ens18").
+func ParseAddr(s string) (Addr, error) {
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '.':
+ return parseIPv4(s)
+ case ':':
+ return parseIPv6(s)
+ case '%':
+ // Assume that this was trying to be an IPv6 address with
+ // a zone specifier, but the address is missing.
+ return Addr{}, parseAddrError{in: s, msg: "missing IPv6 address"}
+ }
+ }
+ return Addr{}, parseAddrError{in: s, msg: "unable to parse IP"}
+}
+
+// MustParseAddr calls ParseAddr(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParseAddr(s string) Addr {
+ ip, err := ParseAddr(s)
+ if err != nil {
+ panic(err)
+ }
+ return ip
+}
+
+type parseAddrError struct {
+ in string // the string given to ParseAddr
+ msg string // an explanation of the parse failure
+ at string // optionally, the unparsed portion of in at which the error occurred.
+}
+
+func (err parseAddrError) Error() string {
+ q := strconv.Quote
+ if err.at != "" {
+ return "ParseAddr(" + q(err.in) + "): " + err.msg + " (at " + q(err.at) + ")"
+ }
+ return "ParseAddr(" + q(err.in) + "): " + err.msg
+}
+
+// parseIPv4 parses s as an IPv4 address (in form "192.168.0.1").
+func parseIPv4(s string) (ip Addr, err error) {
+ var fields [4]uint8
+ var val, pos int
+ var digLen int // number of digits in current octet
+ for i := 0; i < len(s); i++ {
+ if s[i] >= '0' && s[i] <= '9' {
+ if digLen == 1 && val == 0 {
+ return Addr{}, parseAddrError{in: s, msg: "IPv4 field has octet with leading zero"}
+ }
+ val = val*10 + int(s[i]) - '0'
+ digLen++
+ if val > 255 {
+ return Addr{}, parseAddrError{in: s, msg: "IPv4 field has value >255"}
+ }
+ } else if s[i] == '.' {
+ // .1.2.3
+ // 1.2.3.
+ // 1..2.3
+ if i == 0 || i == len(s)-1 || s[i-1] == '.' {
+ return Addr{}, parseAddrError{in: s, msg: "IPv4 field must have at least one digit", at: s[i:]}
+ }
+ // 1.2.3.4.5
+ if pos == 3 {
+ return Addr{}, parseAddrError{in: s, msg: "IPv4 address too long"}
+ }
+ fields[pos] = uint8(val)
+ pos++
+ val = 0
+ digLen = 0
+ } else {
+ return Addr{}, parseAddrError{in: s, msg: "unexpected character", at: s[i:]}
+ }
+ }
+ if pos < 3 {
+ return Addr{}, parseAddrError{in: s, msg: "IPv4 address too short"}
+ }
+ fields[3] = uint8(val)
+ return AddrFrom4(fields), nil
+}
+
+// parseIPv6 parses s as an IPv6 address (in form "2001:db8::68").
+func parseIPv6(in string) (Addr, error) {
+ s := in
+
+ // Split off the zone right from the start. Yes it's a second scan
+ // of the string, but trying to handle it inline makes a bunch of
+ // other inner loop conditionals more expensive, and it ends up
+ // being slower.
+ zone := ""
+ i := bytealg.IndexByteString(s, '%')
+ if i != -1 {
+ s, zone = s[:i], s[i+1:]
+ if zone == "" {
+ // Not allowed to have an empty zone if explicitly specified.
+ return Addr{}, parseAddrError{in: in, msg: "zone must be a non-empty string"}
+ }
+ }
+
+ var ip [16]byte
+ ellipsis := -1 // position of ellipsis in ip
+
+ // Might have leading ellipsis
+ if len(s) >= 2 && s[0] == ':' && s[1] == ':' {
+ ellipsis = 0
+ s = s[2:]
+ // Might be only ellipsis
+ if len(s) == 0 {
+ return IPv6Unspecified().WithZone(zone), nil
+ }
+ }
+
+ // Loop, parsing hex numbers followed by colon.
+ i = 0
+ for i < 16 {
+ // Hex number. Similar to parseIPv4, inlining the hex number
+ // parsing yields a significant performance increase.
+ off := 0
+ acc := uint32(0)
+ for ; off < len(s); off++ {
+ c := s[off]
+ if c >= '0' && c <= '9' {
+ acc = (acc << 4) + uint32(c-'0')
+ } else if c >= 'a' && c <= 'f' {
+ acc = (acc << 4) + uint32(c-'a'+10)
+ } else if c >= 'A' && c <= 'F' {
+ acc = (acc << 4) + uint32(c-'A'+10)
+ } else {
+ break
+ }
+ if acc > math.MaxUint16 {
+ // Overflow, fail.
+ return Addr{}, parseAddrError{in: in, msg: "IPv6 field has value >=2^16", at: s}
+ }
+ }
+ if off == 0 {
+ // No digits found, fail.
+ return Addr{}, parseAddrError{in: in, msg: "each colon-separated field must have at least one digit", at: s}
+ }
+
+ // If followed by dot, might be in trailing IPv4.
+ if off < len(s) && s[off] == '.' {
+ if ellipsis < 0 && i != 12 {
+ // Not the right place.
+ return Addr{}, parseAddrError{in: in, msg: "embedded IPv4 address must replace the final 2 fields of the address", at: s}
+ }
+ if i+4 > 16 {
+ // Not enough room.
+ return Addr{}, parseAddrError{in: in, msg: "too many hex fields to fit an embedded IPv4 at the end of the address", at: s}
+ }
+ // TODO: could make this a bit faster by having a helper
+ // that parses to a [4]byte, and have both parseIPv4 and
+ // parseIPv6 use it.
+ ip4, err := parseIPv4(s)
+ if err != nil {
+ return Addr{}, parseAddrError{in: in, msg: err.Error(), at: s}
+ }
+ ip[i] = ip4.v4(0)
+ ip[i+1] = ip4.v4(1)
+ ip[i+2] = ip4.v4(2)
+ ip[i+3] = ip4.v4(3)
+ s = ""
+ i += 4
+ break
+ }
+
+ // Save this 16-bit chunk.
+ ip[i] = byte(acc >> 8)
+ ip[i+1] = byte(acc)
+ i += 2
+
+ // Stop at end of string.
+ s = s[off:]
+ if len(s) == 0 {
+ break
+ }
+
+ // Otherwise must be followed by colon and more.
+ if s[0] != ':' {
+ return Addr{}, parseAddrError{in: in, msg: "unexpected character, want colon", at: s}
+ } else if len(s) == 1 {
+ return Addr{}, parseAddrError{in: in, msg: "colon must be followed by more characters", at: s}
+ }
+ s = s[1:]
+
+ // Look for ellipsis.
+ if s[0] == ':' {
+ if ellipsis >= 0 { // already have one
+ return Addr{}, parseAddrError{in: in, msg: "multiple :: in address", at: s}
+ }
+ ellipsis = i
+ s = s[1:]
+ if len(s) == 0 { // can be at end
+ break
+ }
+ }
+ }
+
+ // Must have used entire string.
+ if len(s) != 0 {
+ return Addr{}, parseAddrError{in: in, msg: "trailing garbage after address", at: s}
+ }
+
+ // If didn't parse enough, expand ellipsis.
+ if i < 16 {
+ if ellipsis < 0 {
+ return Addr{}, parseAddrError{in: in, msg: "address string too short"}
+ }
+ n := 16 - i
+ for j := i - 1; j >= ellipsis; j-- {
+ ip[j+n] = ip[j]
+ }
+ for j := ellipsis + n - 1; j >= ellipsis; j-- {
+ ip[j] = 0
+ }
+ } else if ellipsis >= 0 {
+ // Ellipsis must represent at least one 0 group.
+ return Addr{}, parseAddrError{in: in, msg: "the :: must expand to at least one field of zeros"}
+ }
+ return AddrFrom16(ip).WithZone(zone), nil
+}
+
+// AddrFromSlice parses the 4- or 16-byte byte slice as an IPv4 or IPv6 address.
+// Note that a net.IP can be passed directly as the []byte argument.
+// If slice's length is not 4 or 16, AddrFromSlice returns Addr{}, false.
+func AddrFromSlice(slice []byte) (ip Addr, ok bool) {
+ switch len(slice) {
+ case 4:
+ return AddrFrom4(*(*[4]byte)(slice)), true
+ case 16:
+ return ipv6Slice(slice), true
+ }
+ return Addr{}, false
+}
+
+// v4 returns the i'th byte of ip. If ip is not an IPv4, v4 returns
+// unspecified garbage.
+func (ip Addr) v4(i uint8) uint8 {
+ return uint8(ip.addr.lo >> ((3 - i) * 8))
+}
+
+// v6 returns the i'th byte of ip. If ip is an IPv4 address, this
+// accesses the IPv4-mapped IPv6 address form of the IP.
+func (ip Addr) v6(i uint8) uint8 {
+ return uint8(*(ip.addr.halves()[(i/8)%2]) >> ((7 - i%8) * 8))
+}
+
+// v6u16 returns the i'th 16-bit word of ip. If ip is an IPv4 address,
+// this accesses the IPv4-mapped IPv6 address form of the IP.
+func (ip Addr) v6u16(i uint8) uint16 {
+ return uint16(*(ip.addr.halves()[(i/4)%2]) >> ((3 - i%4) * 16))
+}
+
+// isZero reports whether ip is the zero value of the IP type.
+// The zero value is not a valid IP address of any type.
+//
+// Note that "0.0.0.0" and "::" are not the zero value. Use IsUnspecified to
+// check for these values instead.
+func (ip Addr) isZero() bool {
+ // Faster than comparing ip == Addr{}, but effectively equivalent,
+ // as there's no way to make an IP with a nil z from this package.
+ return ip.z == z0
+}
+
+// IsValid reports whether the Addr is an initialized address (not the zero Addr).
+//
+// Note that "0.0.0.0" and "::" are both valid values.
+func (ip Addr) IsValid() bool { return ip.z != z0 }
+
+// BitLen returns the number of bits in the IP address:
+// 128 for IPv6, 32 for IPv4, and 0 for the zero Addr.
+//
+// Note that IPv4-mapped IPv6 addresses are considered IPv6 addresses
+// and therefore have bit length 128.
+func (ip Addr) BitLen() int {
+ switch ip.z {
+ case z0:
+ return 0
+ case z4:
+ return 32
+ }
+ return 128
+}
+
+// Zone returns ip's IPv6 scoped addressing zone, if any.
+func (ip Addr) Zone() string {
+ if ip.z == nil {
+ return ""
+ }
+ zone, _ := ip.z.Get().(string)
+ return zone
+}
+
+// Compare returns an integer comparing two IPs.
+// The result will be 0 if ip == ip2, -1 if ip < ip2, and +1 if ip > ip2.
+// The definition of "less than" is the same as the Less method.
+func (ip Addr) Compare(ip2 Addr) int {
+ f1, f2 := ip.BitLen(), ip2.BitLen()
+ if f1 < f2 {
+ return -1
+ }
+ if f1 > f2 {
+ return 1
+ }
+ hi1, hi2 := ip.addr.hi, ip2.addr.hi
+ if hi1 < hi2 {
+ return -1
+ }
+ if hi1 > hi2 {
+ return 1
+ }
+ lo1, lo2 := ip.addr.lo, ip2.addr.lo
+ if lo1 < lo2 {
+ return -1
+ }
+ if lo1 > lo2 {
+ return 1
+ }
+ if ip.Is6() {
+ za, zb := ip.Zone(), ip2.Zone()
+ if za < zb {
+ return -1
+ }
+ if za > zb {
+ return 1
+ }
+ }
+ return 0
+}
+
+// Less reports whether ip sorts before ip2.
+// IP addresses sort first by length, then their address.
+// IPv6 addresses with zones sort just after the same address without a zone.
+func (ip Addr) Less(ip2 Addr) bool { return ip.Compare(ip2) == -1 }
+
+func (ip Addr) lessOrEq(ip2 Addr) bool { return ip.Compare(ip2) <= 0 }
+
+// Is4 reports whether ip is an IPv4 address.
+//
+// It returns false for IPv4-mapped IPv6 addresses. See Addr.Unmap.
+func (ip Addr) Is4() bool {
+ return ip.z == z4
+}
+
+// Is4In6 reports whether ip is an IPv4-mapped IPv6 address.
+func (ip Addr) Is4In6() bool {
+ return ip.Is6() && ip.addr.hi == 0 && ip.addr.lo>>32 == 0xffff
+}
+
+// Is6 reports whether ip is an IPv6 address, including IPv4-mapped
+// IPv6 addresses.
+func (ip Addr) Is6() bool {
+ return ip.z != z0 && ip.z != z4
+}
+
+// Unmap returns ip with any IPv4-mapped IPv6 address prefix removed.
+//
+// That is, if ip is an IPv6 address wrapping an IPv4 address, it
+// returns the wrapped IPv4 address. Otherwise it returns ip unmodified.
+func (ip Addr) Unmap() Addr {
+ if ip.Is4In6() {
+ ip.z = z4
+ }
+ return ip
+}
+
+// WithZone returns an IP that's the same as ip but with the provided
+// zone. If zone is empty, the zone is removed. If ip is an IPv4
+// address, WithZone is a no-op and returns ip unchanged.
+func (ip Addr) WithZone(zone string) Addr {
+ if !ip.Is6() {
+ return ip
+ }
+ if zone == "" {
+ ip.z = z6noz
+ return ip
+ }
+ ip.z = intern.GetByString(zone)
+ return ip
+}
+
+// withoutZone unconditionally strips the zone from ip.
+// It's similar to WithZone, but small enough to be inlinable.
+func (ip Addr) withoutZone() Addr {
+ if !ip.Is6() {
+ return ip
+ }
+ ip.z = z6noz
+ return ip
+}
+
+// hasZone reports whether ip has an IPv6 zone.
+func (ip Addr) hasZone() bool {
+ return ip.z != z0 && ip.z != z4 && ip.z != z6noz
+}
+
+// IsLinkLocalUnicast reports whether ip is a link-local unicast address.
+func (ip Addr) IsLinkLocalUnicast() bool {
+ // Dynamic Configuration of IPv4 Link-Local Addresses
+ // https://datatracker.ietf.org/doc/html/rfc3927#section-2.1
+ if ip.Is4() {
+ return ip.v4(0) == 169 && ip.v4(1) == 254
+ }
+ // IP Version 6 Addressing Architecture (2.4 Address Type Identification)
+ // https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
+ if ip.Is6() {
+ return ip.v6u16(0)&0xffc0 == 0xfe80
+ }
+ return false // zero value
+}
+
+// IsLoopback reports whether ip is a loopback address.
+func (ip Addr) IsLoopback() bool {
+ // Requirements for Internet Hosts -- Communication Layers (3.2.1.3 Addressing)
+ // https://datatracker.ietf.org/doc/html/rfc1122#section-3.2.1.3
+ if ip.Is4() {
+ return ip.v4(0) == 127
+ }
+ // IP Version 6 Addressing Architecture (2.4 Address Type Identification)
+ // https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
+ if ip.Is6() {
+ return ip.addr.hi == 0 && ip.addr.lo == 1
+ }
+ return false // zero value
+}
+
+// IsMulticast reports whether ip is a multicast address.
+func (ip Addr) IsMulticast() bool {
+ // Host Extensions for IP Multicasting (4. HOST GROUP ADDRESSES)
+ // https://datatracker.ietf.org/doc/html/rfc1112#section-4
+ if ip.Is4() {
+ return ip.v4(0)&0xf0 == 0xe0
+ }
+ // IP Version 6 Addressing Architecture (2.4 Address Type Identification)
+ // https://datatracker.ietf.org/doc/html/rfc4291#section-2.4
+ if ip.Is6() {
+ return ip.addr.hi>>(64-8) == 0xff // ip.v6(0) == 0xff
+ }
+ return false // zero value
+}
+
+// IsInterfaceLocalMulticast reports whether ip is an IPv6 interface-local
+// multicast address.
+func (ip Addr) IsInterfaceLocalMulticast() bool {
+ // IPv6 Addressing Architecture (2.7.1. Pre-Defined Multicast Addresses)
+ // https://datatracker.ietf.org/doc/html/rfc4291#section-2.7.1
+ if ip.Is6() {
+ return ip.v6u16(0)&0xff0f == 0xff01
+ }
+ return false // zero value
+}
+
+// IsLinkLocalMulticast reports whether ip is a link-local multicast address.
+func (ip Addr) IsLinkLocalMulticast() bool {
+ // IPv4 Multicast Guidelines (4. Local Network Control Block (224.0.0/24))
+ // https://datatracker.ietf.org/doc/html/rfc5771#section-4
+ if ip.Is4() {
+ return ip.v4(0) == 224 && ip.v4(1) == 0 && ip.v4(2) == 0
+ }
+ // IPv6 Addressing Architecture (2.7.1. Pre-Defined Multicast Addresses)
+ // https://datatracker.ietf.org/doc/html/rfc4291#section-2.7.1
+ if ip.Is6() {
+ return ip.v6u16(0)&0xff0f == 0xff02
+ }
+ return false // zero value
+}
+
+// IsGlobalUnicast reports whether ip is a global unicast address.
+//
+// It returns true for IPv6 addresses which fall outside of the current
+// IANA-allocated 2000::/3 global unicast space, with the exception of the
+// link-local address space. It also returns true even if ip is in the IPv4
+// private address space or IPv6 unique local address space.
+// It returns false for the zero Addr.
+//
+// For reference, see RFC 1122, RFC 4291, and RFC 4632.
+func (ip Addr) IsGlobalUnicast() bool {
+ if ip.z == z0 {
+ // Invalid or zero-value.
+ return false
+ }
+
+ // Match package net's IsGlobalUnicast logic. Notably private IPv4 addresses
+ // and ULA IPv6 addresses are still considered "global unicast".
+ if ip.Is4() && (ip == IPv4Unspecified() || ip == AddrFrom4([4]byte{255, 255, 255, 255})) {
+ return false
+ }
+
+ return ip != IPv6Unspecified() &&
+ !ip.IsLoopback() &&
+ !ip.IsMulticast() &&
+ !ip.IsLinkLocalUnicast()
+}
+
+// IsPrivate reports whether ip is a private address, according to RFC 1918
+// (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether
+// ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. This is the
+// same as net.IP.IsPrivate.
+func (ip Addr) IsPrivate() bool {
+ // Match the stdlib's IsPrivate logic.
+ if ip.Is4() {
+ // RFC 1918 allocates 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 as
+ // private IPv4 address subnets.
+ return ip.v4(0) == 10 ||
+ (ip.v4(0) == 172 && ip.v4(1)&0xf0 == 16) ||
+ (ip.v4(0) == 192 && ip.v4(1) == 168)
+ }
+
+ if ip.Is6() {
+ // RFC 4193 allocates fc00::/7 as the unique local unicast IPv6 address
+ // subnet.
+ return ip.v6(0)&0xfe == 0xfc
+ }
+
+ return false // zero value
+}
+
+// IsUnspecified reports whether ip is an unspecified address, either the IPv4
+// address "0.0.0.0" or the IPv6 address "::".
+//
+// Note that the zero Addr is not an unspecified address.
+func (ip Addr) IsUnspecified() bool {
+ return ip == IPv4Unspecified() || ip == IPv6Unspecified()
+}
+
+// Prefix keeps only the top b bits of IP, producing a Prefix
+// of the specified length.
+// If ip is a zero Addr, Prefix always returns a zero Prefix and a nil error.
+// Otherwise, if bits is less than zero or greater than ip.BitLen(),
+// Prefix returns an error.
+func (ip Addr) Prefix(b int) (Prefix, error) {
+ if b < 0 {
+ return Prefix{}, errors.New("negative Prefix bits")
+ }
+ effectiveBits := b
+ switch ip.z {
+ case z0:
+ return Prefix{}, nil
+ case z4:
+ if b > 32 {
+ return Prefix{}, errors.New("prefix length " + itoa.Itoa(b) + " too large for IPv4")
+ }
+ effectiveBits += 96
+ default:
+ if b > 128 {
+ return Prefix{}, errors.New("prefix length " + itoa.Itoa(b) + " too large for IPv6")
+ }
+ }
+ ip.addr = ip.addr.and(mask6(effectiveBits))
+ return PrefixFrom(ip, b), nil
+}
+
+const (
+ netIPv4len = 4
+ netIPv6len = 16
+)
+
+// As16 returns the IP address in its 16-byte representation.
+// IPv4 addresses are returned as IPv4-mapped IPv6 addresses.
+// IPv6 addresses with zones are returned without their zone (use the
+// Zone method to get it).
+// The ip zero value returns all zeroes.
+func (ip Addr) As16() (a16 [16]byte) {
+ bePutUint64(a16[:8], ip.addr.hi)
+ bePutUint64(a16[8:], ip.addr.lo)
+ return a16
+}
+
+// As4 returns an IPv4 or IPv4-in-IPv6 address in its 4-byte representation.
+// If ip is the zero Addr or an IPv6 address, As4 panics.
+// Note that 0.0.0.0 is not the zero Addr.
+func (ip Addr) As4() (a4 [4]byte) {
+ if ip.z == z4 || ip.Is4In6() {
+ bePutUint32(a4[:], uint32(ip.addr.lo))
+ return a4
+ }
+ if ip.z == z0 {
+ panic("As4 called on IP zero value")
+ }
+ panic("As4 called on IPv6 address")
+}
+
+// AsSlice returns an IPv4 or IPv6 address in its respective 4-byte or 16-byte representation.
+func (ip Addr) AsSlice() []byte {
+ switch ip.z {
+ case z0:
+ return nil
+ case z4:
+ var ret [4]byte
+ bePutUint32(ret[:], uint32(ip.addr.lo))
+ return ret[:]
+ default:
+ var ret [16]byte
+ bePutUint64(ret[:8], ip.addr.hi)
+ bePutUint64(ret[8:], ip.addr.lo)
+ return ret[:]
+ }
+}
+
+// Next returns the address following ip.
+// If there is none, it returns the zero Addr.
+func (ip Addr) Next() Addr {
+ ip.addr = ip.addr.addOne()
+ if ip.Is4() {
+ if uint32(ip.addr.lo) == 0 {
+ // Overflowed.
+ return Addr{}
+ }
+ } else {
+ if ip.addr.isZero() {
+ // Overflowed
+ return Addr{}
+ }
+ }
+ return ip
+}
+
+// Prev returns the IP before ip.
+// If there is none, it returns the IP zero value.
+func (ip Addr) Prev() Addr {
+ if ip.Is4() {
+ if uint32(ip.addr.lo) == 0 {
+ return Addr{}
+ }
+ } else if ip.addr.isZero() {
+ return Addr{}
+ }
+ ip.addr = ip.addr.subOne()
+ return ip
+}
+
+// String returns the string form of the IP address ip.
+// It returns one of 5 forms:
+//
+// - "invalid IP", if ip is the zero Addr
+// - IPv4 dotted decimal ("192.0.2.1")
+// - IPv6 ("2001:db8::1")
+// - "::ffff:1.2.3.4" (if Is4In6)
+// - IPv6 with zone ("fe80:db8::1%eth0")
+//
+// Note that unlike package net's IP.String method,
+// IPv4-mapped IPv6 addresses format with a "::ffff:"
+// prefix before the dotted quad.
+func (ip Addr) String() string {
+ switch ip.z {
+ case z0:
+ return "invalid IP"
+ case z4:
+ return ip.string4()
+ default:
+ if ip.Is4In6() {
+ if z := ip.Zone(); z != "" {
+ return "::ffff:" + ip.Unmap().string4() + "%" + z
+ } else {
+ return "::ffff:" + ip.Unmap().string4()
+ }
+ }
+ return ip.string6()
+ }
+}
+
+// AppendTo appends a text encoding of ip,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (ip Addr) AppendTo(b []byte) []byte {
+ switch ip.z {
+ case z0:
+ return b
+ case z4:
+ return ip.appendTo4(b)
+ default:
+ if ip.Is4In6() {
+ b = append(b, "::ffff:"...)
+ b = ip.Unmap().appendTo4(b)
+ if z := ip.Zone(); z != "" {
+ b = append(b, '%')
+ b = append(b, z...)
+ }
+ return b
+ }
+ return ip.appendTo6(b)
+ }
+}
+
+// digits is a string of the hex digits from 0 to f. It's used in
+// appendDecimal and appendHex to format IP addresses.
+const digits = "0123456789abcdef"
+
+// appendDecimal appends the decimal string representation of x to b.
+func appendDecimal(b []byte, x uint8) []byte {
+ // Using this function rather than strconv.AppendUint makes IPv4
+ // string building 2x faster.
+
+ if x >= 100 {
+ b = append(b, digits[x/100])
+ }
+ if x >= 10 {
+ b = append(b, digits[x/10%10])
+ }
+ return append(b, digits[x%10])
+}
+
+// appendHex appends the hex string representation of x to b.
+func appendHex(b []byte, x uint16) []byte {
+ // Using this function rather than strconv.AppendUint makes IPv6
+ // string building 2x faster.
+
+ if x >= 0x1000 {
+ b = append(b, digits[x>>12])
+ }
+ if x >= 0x100 {
+ b = append(b, digits[x>>8&0xf])
+ }
+ if x >= 0x10 {
+ b = append(b, digits[x>>4&0xf])
+ }
+ return append(b, digits[x&0xf])
+}
+
+// appendHexPad appends the fully padded hex string representation of x to b.
+func appendHexPad(b []byte, x uint16) []byte {
+ return append(b, digits[x>>12], digits[x>>8&0xf], digits[x>>4&0xf], digits[x&0xf])
+}
+
+func (ip Addr) string4() string {
+ const max = len("255.255.255.255")
+ ret := make([]byte, 0, max)
+ ret = ip.appendTo4(ret)
+ return string(ret)
+}
+
+func (ip Addr) appendTo4(ret []byte) []byte {
+ ret = appendDecimal(ret, ip.v4(0))
+ ret = append(ret, '.')
+ ret = appendDecimal(ret, ip.v4(1))
+ ret = append(ret, '.')
+ ret = appendDecimal(ret, ip.v4(2))
+ ret = append(ret, '.')
+ ret = appendDecimal(ret, ip.v4(3))
+ return ret
+}
+
+// string6 formats ip in IPv6 textual representation. It follows the
+// guidelines in section 4 of RFC 5952
+// (https://tools.ietf.org/html/rfc5952#section-4): no unnecessary
+// zeros, use :: to elide the longest run of zeros, and don't use ::
+// to compact a single zero field.
+func (ip Addr) string6() string {
+ // Use a zone with a "plausibly long" name, so that most zone-ful
+ // IP addresses won't require additional allocation.
+ //
+ // The compiler does a cool optimization here, where ret ends up
+ // stack-allocated and so the only allocation this function does
+ // is to construct the returned string. As such, it's okay to be a
+ // bit greedy here, size-wise.
+ const max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0")
+ ret := make([]byte, 0, max)
+ ret = ip.appendTo6(ret)
+ return string(ret)
+}
+
+func (ip Addr) appendTo6(ret []byte) []byte {
+ zeroStart, zeroEnd := uint8(255), uint8(255)
+ for i := uint8(0); i < 8; i++ {
+ j := i
+ for j < 8 && ip.v6u16(j) == 0 {
+ j++
+ }
+ if l := j - i; l >= 2 && l > zeroEnd-zeroStart {
+ zeroStart, zeroEnd = i, j
+ }
+ }
+
+ for i := uint8(0); i < 8; i++ {
+ if i == zeroStart {
+ ret = append(ret, ':', ':')
+ i = zeroEnd
+ if i >= 8 {
+ break
+ }
+ } else if i > 0 {
+ ret = append(ret, ':')
+ }
+
+ ret = appendHex(ret, ip.v6u16(i))
+ }
+
+ if ip.z != z6noz {
+ ret = append(ret, '%')
+ ret = append(ret, ip.Zone()...)
+ }
+ return ret
+}
+
+// StringExpanded is like String but IPv6 addresses are expanded with leading
+// zeroes and no "::" compression. For example, "2001:db8::1" becomes
+// "2001:0db8:0000:0000:0000:0000:0000:0001".
+func (ip Addr) StringExpanded() string {
+ switch ip.z {
+ case z0, z4:
+ return ip.String()
+ }
+
+ const size = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
+ ret := make([]byte, 0, size)
+ for i := uint8(0); i < 8; i++ {
+ if i > 0 {
+ ret = append(ret, ':')
+ }
+
+ ret = appendHexPad(ret, ip.v6u16(i))
+ }
+
+ if ip.z != z6noz {
+ // The addition of a zone will cause a second allocation, but when there
+ // is no zone the ret slice will be stack allocated.
+ ret = append(ret, '%')
+ ret = append(ret, ip.Zone()...)
+ }
+ return string(ret)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface,
+// The encoding is the same as returned by String, with one exception:
+// If ip is the zero Addr, the encoding is the empty string.
+func (ip Addr) MarshalText() ([]byte, error) {
+ switch ip.z {
+ case z0:
+ return []byte(""), nil
+ case z4:
+ max := len("255.255.255.255")
+ b := make([]byte, 0, max)
+ return ip.appendTo4(b), nil
+ default:
+ max := len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0")
+ b := make([]byte, 0, max)
+ if ip.Is4In6() {
+ b = append(b, "::ffff:"...)
+ b = ip.Unmap().appendTo4(b)
+ if z := ip.Zone(); z != "" {
+ b = append(b, '%')
+ b = append(b, z...)
+ }
+ return b, nil
+ }
+ return ip.appendTo6(b), nil
+ }
+
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The IP address is expected in a form accepted by ParseAddr.
+//
+// If text is empty, UnmarshalText sets *ip to the zero Addr and
+// returns no error.
+func (ip *Addr) UnmarshalText(text []byte) error {
+ if len(text) == 0 {
+ *ip = Addr{}
+ return nil
+ }
+ var err error
+ *ip, err = ParseAddr(string(text))
+ return err
+}
+
+func (ip Addr) marshalBinaryWithTrailingBytes(trailingBytes int) []byte {
+ var b []byte
+ switch ip.z {
+ case z0:
+ b = make([]byte, trailingBytes)
+ case z4:
+ b = make([]byte, 4+trailingBytes)
+ bePutUint32(b, uint32(ip.addr.lo))
+ default:
+ z := ip.Zone()
+ b = make([]byte, 16+len(z)+trailingBytes)
+ bePutUint64(b[:8], ip.addr.hi)
+ bePutUint64(b[8:], ip.addr.lo)
+ copy(b[16:], z)
+ }
+ return b
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+// It returns a zero-length slice for the zero Addr,
+// the 4-byte form for an IPv4 address,
+// and the 16-byte form with zone appended for an IPv6 address.
+func (ip Addr) MarshalBinary() ([]byte, error) {
+ return ip.marshalBinaryWithTrailingBytes(0), nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It expects data in the form generated by MarshalBinary.
+func (ip *Addr) UnmarshalBinary(b []byte) error {
+ n := len(b)
+ switch {
+ case n == 0:
+ *ip = Addr{}
+ return nil
+ case n == 4:
+ *ip = AddrFrom4(*(*[4]byte)(b))
+ return nil
+ case n == 16:
+ *ip = ipv6Slice(b)
+ return nil
+ case n > 16:
+ *ip = ipv6Slice(b[:16]).WithZone(string(b[16:]))
+ return nil
+ }
+ return errors.New("unexpected slice size")
+}
+
+// AddrPort is an IP and a port number.
+type AddrPort struct {
+ ip Addr
+ port uint16
+}
+
+// AddrPortFrom returns an AddrPort with the provided IP and port.
+// It does not allocate.
+func AddrPortFrom(ip Addr, port uint16) AddrPort { return AddrPort{ip: ip, port: port} }
+
+// Addr returns p's IP address.
+func (p AddrPort) Addr() Addr { return p.ip }
+
+// Port returns p's port.
+func (p AddrPort) Port() uint16 { return p.port }
+
+// splitAddrPort splits s into an IP address string and a port
+// string. It splits strings shaped like "foo:bar" or "[foo]:bar",
+// without further validating the substrings. v6 indicates whether the
+// ip string should parse as an IPv6 address or an IPv4 address, in
+// order for s to be a valid ip:port string.
+func splitAddrPort(s string) (ip, port string, v6 bool, err error) {
+ i := stringsLastIndexByte(s, ':')
+ if i == -1 {
+ return "", "", false, errors.New("not an ip:port")
+ }
+
+ ip, port = s[:i], s[i+1:]
+ if len(ip) == 0 {
+ return "", "", false, errors.New("no IP")
+ }
+ if len(port) == 0 {
+ return "", "", false, errors.New("no port")
+ }
+ if ip[0] == '[' {
+ if len(ip) < 2 || ip[len(ip)-1] != ']' {
+ return "", "", false, errors.New("missing ]")
+ }
+ ip = ip[1 : len(ip)-1]
+ v6 = true
+ }
+
+ return ip, port, v6, nil
+}
+
+// ParseAddrPort parses s as an AddrPort.
+//
+// It doesn't do any name resolution: both the address and the port
+// must be numeric.
+func ParseAddrPort(s string) (AddrPort, error) {
+ var ipp AddrPort
+ ip, port, v6, err := splitAddrPort(s)
+ if err != nil {
+ return ipp, err
+ }
+ port16, err := strconv.ParseUint(port, 10, 16)
+ if err != nil {
+ return ipp, errors.New("invalid port " + strconv.Quote(port) + " parsing " + strconv.Quote(s))
+ }
+ ipp.port = uint16(port16)
+ ipp.ip, err = ParseAddr(ip)
+ if err != nil {
+ return AddrPort{}, err
+ }
+ if v6 && ipp.ip.Is4() {
+ return AddrPort{}, errors.New("invalid ip:port " + strconv.Quote(s) + ", square brackets can only be used with IPv6 addresses")
+ } else if !v6 && ipp.ip.Is6() {
+ return AddrPort{}, errors.New("invalid ip:port " + strconv.Quote(s) + ", IPv6 addresses must be surrounded by square brackets")
+ }
+ return ipp, nil
+}
+
+// MustParseAddrPort calls ParseAddrPort(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParseAddrPort(s string) AddrPort {
+ ip, err := ParseAddrPort(s)
+ if err != nil {
+ panic(err)
+ }
+ return ip
+}
+
+// isZero reports whether p is the zero AddrPort.
+func (p AddrPort) isZero() bool { return p == AddrPort{} }
+
+// IsValid reports whether p.IP() is valid.
+// All ports are valid, including zero.
+func (p AddrPort) IsValid() bool { return p.ip.IsValid() }
+
+func (p AddrPort) String() string {
+ switch p.ip.z {
+ case z0:
+ return "invalid AddrPort"
+ case z4:
+ a := p.ip.As4()
+ buf := make([]byte, 0, 21)
+ for i := range a {
+ buf = strconv.AppendUint(buf, uint64(a[i]), 10)
+ buf = append(buf, "...:"[i])
+ }
+ buf = strconv.AppendUint(buf, uint64(p.port), 10)
+ return string(buf)
+ default:
+ // TODO: this could be more efficient allocation-wise:
+ return joinHostPort(p.ip.String(), itoa.Itoa(int(p.port)))
+ }
+}
+
+func joinHostPort(host, port string) string {
+ // We assume that host is a literal IPv6 address if host has
+ // colons.
+ if bytealg.IndexByteString(host, ':') >= 0 {
+ return "[" + host + "]:" + port
+ }
+ return host + ":" + port
+}
+
+// AppendTo appends a text encoding of p,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (p AddrPort) AppendTo(b []byte) []byte {
+ switch p.ip.z {
+ case z0:
+ return b
+ case z4:
+ b = p.ip.appendTo4(b)
+ default:
+ if p.ip.Is4In6() {
+ b = append(b, "[::ffff:"...)
+ b = p.ip.Unmap().appendTo4(b)
+ if z := p.ip.Zone(); z != "" {
+ b = append(b, '%')
+ b = append(b, z...)
+ }
+ } else {
+ b = append(b, '[')
+ b = p.ip.appendTo6(b)
+ }
+ b = append(b, ']')
+ }
+ b = append(b, ':')
+ b = strconv.AppendUint(b, uint64(p.port), 10)
+ return b
+}
+
+// MarshalText implements the encoding.TextMarshaler interface. The
+// encoding is the same as returned by String, with one exception: if
+// p.Addr() is the zero Addr, the encoding is the empty string.
+func (p AddrPort) MarshalText() ([]byte, error) {
+ var max int
+ switch p.ip.z {
+ case z0:
+ case z4:
+ max = len("255.255.255.255:65535")
+ default:
+ max = len("[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0]:65535")
+ }
+ b := make([]byte, 0, max)
+ b = p.AppendTo(b)
+ return b, nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler
+// interface. The AddrPort is expected in a form
+// generated by MarshalText or accepted by ParseAddrPort.
+func (p *AddrPort) UnmarshalText(text []byte) error {
+ if len(text) == 0 {
+ *p = AddrPort{}
+ return nil
+ }
+ var err error
+ *p, err = ParseAddrPort(string(text))
+ return err
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+// It returns Addr.MarshalBinary with an additional two bytes appended
+// containing the port in little-endian.
+func (p AddrPort) MarshalBinary() ([]byte, error) {
+ b := p.Addr().marshalBinaryWithTrailingBytes(2)
+ lePutUint16(b[len(b)-2:], p.Port())
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It expects data in the form generated by MarshalBinary.
+func (p *AddrPort) UnmarshalBinary(b []byte) error {
+ if len(b) < 2 {
+ return errors.New("unexpected slice size")
+ }
+ var addr Addr
+ err := addr.UnmarshalBinary(b[:len(b)-2])
+ if err != nil {
+ return err
+ }
+ *p = AddrPortFrom(addr, leUint16(b[len(b)-2:]))
+ return nil
+}
+
+// Prefix is an IP address prefix (CIDR) representing an IP network.
+//
+// The first Bits() of Addr() are specified. The remaining bits match any address.
+// The range of Bits() is [0,32] for IPv4 or [0,128] for IPv6.
+type Prefix struct {
+ ip Addr
+
+ // bits is logically a uint8 (storing [0,128]) but also
+ // encodes an "invalid" bit, currently represented by the
+ // invalidPrefixBits sentinel value. It could be packed into
+ // the uint8 more with more complicated expressions in the
+ // accessors, but the extra byte (in padding anyway) doesn't
+ // hurt and simplifies code below.
+ bits int16
+}
+
+// invalidPrefixBits is the Prefix.bits value used when PrefixFrom is
+// outside the range of a uint8. It's returned as the int -1 in the
+// public API.
+const invalidPrefixBits = -1
+
+// PrefixFrom returns a Prefix with the provided IP address and bit
+// prefix length.
+//
+// It does not allocate. Unlike Addr.Prefix, PrefixFrom does not mask
+// off the host bits of ip.
+//
+// If bits is less than zero or greater than ip.BitLen, Prefix.Bits
+// will return an invalid value -1.
+func PrefixFrom(ip Addr, bits int) Prefix {
+ if bits < 0 || bits > ip.BitLen() {
+ bits = invalidPrefixBits
+ }
+ b16 := int16(bits)
+ return Prefix{
+ ip: ip.withoutZone(),
+ bits: b16,
+ }
+}
+
+// Addr returns p's IP address.
+func (p Prefix) Addr() Addr { return p.ip }
+
+// Bits returns p's prefix length.
+//
+// It reports -1 if invalid.
+func (p Prefix) Bits() int { return int(p.bits) }
+
+// IsValid reports whether p.Bits() has a valid range for p.IP().
+// If p.Addr() is the zero Addr, IsValid returns false.
+// Note that if p is the zero Prefix, then p.IsValid() == false.
+func (p Prefix) IsValid() bool { return !p.ip.isZero() && p.bits >= 0 && int(p.bits) <= p.ip.BitLen() }
+
+func (p Prefix) isZero() bool { return p == Prefix{} }
+
+// IsSingleIP reports whether p contains exactly one IP.
+func (p Prefix) IsSingleIP() bool { return p.bits != 0 && int(p.bits) == p.ip.BitLen() }
+
+// ParsePrefix parses s as an IP address prefix.
+// The string can be in the form "192.168.1.0/24" or "2001:db8::/32",
+// the CIDR notation defined in RFC 4632 and RFC 4291.
+// IPv6 zones are not permitted in prefixes, and an error will be returned if a
+// zone is present.
+//
+// Note that masked address bits are not zeroed. Use Masked for that.
+func ParsePrefix(s string) (Prefix, error) {
+ i := stringsLastIndexByte(s, '/')
+ if i < 0 {
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): no '/'")
+ }
+ ip, err := ParseAddr(s[:i])
+ if err != nil {
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): " + err.Error())
+ }
+ // IPv6 zones are not allowed: https://go.dev/issue/51899
+ if ip.Is6() && ip.z != z6noz {
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): IPv6 zones cannot be present in a prefix")
+ }
+
+ bitsStr := s[i+1:]
+ bits, err := strconv.Atoi(bitsStr)
+ if err != nil {
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): bad bits after slash: " + strconv.Quote(bitsStr))
+ }
+ maxBits := 32
+ if ip.Is6() {
+ maxBits = 128
+ }
+ if bits < 0 || bits > maxBits {
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): prefix length out of range")
+ }
+ return PrefixFrom(ip, bits), nil
+}
+
+// MustParsePrefix calls ParsePrefix(s) and panics on error.
+// It is intended for use in tests with hard-coded strings.
+func MustParsePrefix(s string) Prefix {
+ ip, err := ParsePrefix(s)
+ if err != nil {
+ panic(err)
+ }
+ return ip
+}
+
+// Masked returns p in its canonical form, with all but the high
+// p.Bits() bits of p.Addr() masked off.
+//
+// If p is zero or otherwise invalid, Masked returns the zero Prefix.
+func (p Prefix) Masked() Prefix {
+ if m, err := p.ip.Prefix(int(p.bits)); err == nil {
+ return m
+ }
+ return Prefix{}
+}
+
+// Contains reports whether the network p includes ip.
+//
+// An IPv4 address will not match an IPv6 prefix.
+// An IPv4-mapped IPv6 address will not match an IPv4 prefix.
+// A zero-value IP will not match any prefix.
+// If ip has an IPv6 zone, Contains returns false,
+// because Prefixes strip zones.
+func (p Prefix) Contains(ip Addr) bool {
+ if !p.IsValid() || ip.hasZone() {
+ return false
+ }
+ if f1, f2 := p.ip.BitLen(), ip.BitLen(); f1 == 0 || f2 == 0 || f1 != f2 {
+ return false
+ }
+ if ip.Is4() {
+ // xor the IP addresses together; mismatched bits are now ones.
+ // Shift away the number of bits we don't care about.
+ // Shifts in Go are more efficient if the compiler can prove
+ // that the shift amount is smaller than the width of the shifted type (64 here).
+ // We know that p.bits is in the range 0..32 because p is Valid;
+ // the compiler doesn't know that, so mask with 63 to help it.
+ // Now truncate to 32 bits, because this is IPv4.
+ // If all the bits we care about are equal, the result will be zero.
+ return uint32((ip.addr.lo^p.ip.addr.lo)>>((32-p.bits)&63)) == 0
+ } else {
+ // xor the IP addresses together.
+ // Mask away the bits we don't care about.
+ // If all the bits we care about are equal, the result will be zero.
+ return ip.addr.xor(p.ip.addr).and(mask6(int(p.bits))).isZero()
+ }
+}
+
+// Overlaps reports whether p and o contain any IP addresses in common.
+//
+// If p and o are of different address families or either have a zero
+// IP, it reports false. Like the Contains method, a prefix with an
+// IPv4-mapped IPv6 address is still treated as an IPv6 mask.
+func (p Prefix) Overlaps(o Prefix) bool {
+ if !p.IsValid() || !o.IsValid() {
+ return false
+ }
+ if p == o {
+ return true
+ }
+ if p.ip.Is4() != o.ip.Is4() {
+ return false
+ }
+ var minBits int16
+ if p.bits < o.bits {
+ minBits = p.bits
+ } else {
+ minBits = o.bits
+ }
+ if minBits == 0 {
+ return true
+ }
+ // One of these Prefix calls might look redundant, but we don't require
+ // that p and o values are normalized (via Prefix.Masked) first,
+ // so the Prefix call on the one that's already minBits serves to zero
+ // out any remaining bits in IP.
+ var err error
+ if p, err = p.ip.Prefix(int(minBits)); err != nil {
+ return false
+ }
+ if o, err = o.ip.Prefix(int(minBits)); err != nil {
+ return false
+ }
+ return p.ip == o.ip
+}
+
+// AppendTo appends a text encoding of p,
+// as generated by MarshalText,
+// to b and returns the extended buffer.
+func (p Prefix) AppendTo(b []byte) []byte {
+ if p.isZero() {
+ return b
+ }
+ if !p.IsValid() {
+ return append(b, "invalid Prefix"...)
+ }
+
+ // p.ip is non-nil, because p is valid.
+ if p.ip.z == z4 {
+ b = p.ip.appendTo4(b)
+ } else {
+ if p.ip.Is4In6() {
+ b = append(b, "::ffff:"...)
+ b = p.ip.Unmap().appendTo4(b)
+ } else {
+ b = p.ip.appendTo6(b)
+ }
+ }
+
+ b = append(b, '/')
+ b = appendDecimal(b, uint8(p.bits))
+ return b
+}
+
+// MarshalText implements the encoding.TextMarshaler interface,
+// The encoding is the same as returned by String, with one exception:
+// If p is the zero value, the encoding is the empty string.
+func (p Prefix) MarshalText() ([]byte, error) {
+ var max int
+ switch p.ip.z {
+ case z0:
+ case z4:
+ max = len("255.255.255.255/32")
+ default:
+ max = len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0/128")
+ }
+ b := make([]byte, 0, max)
+ b = p.AppendTo(b)
+ return b, nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The IP address is expected in a form accepted by ParsePrefix
+// or generated by MarshalText.
+func (p *Prefix) UnmarshalText(text []byte) error {
+ if len(text) == 0 {
+ *p = Prefix{}
+ return nil
+ }
+ var err error
+ *p, err = ParsePrefix(string(text))
+ return err
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+// It returns Addr.MarshalBinary with an additional byte appended
+// containing the prefix bits.
+func (p Prefix) MarshalBinary() ([]byte, error) {
+ b := p.Addr().withoutZone().marshalBinaryWithTrailingBytes(1)
+ b[len(b)-1] = uint8(p.Bits())
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It expects data in the form generated by MarshalBinary.
+func (p *Prefix) UnmarshalBinary(b []byte) error {
+ if len(b) < 1 {
+ return errors.New("unexpected slice size")
+ }
+ var addr Addr
+ err := addr.UnmarshalBinary(b[:len(b)-1])
+ if err != nil {
+ return err
+ }
+ *p = PrefixFrom(addr, int(b[len(b)-1]))
+ return nil
+}
+
+// String returns the CIDR notation of p: "<ip>/<bits>".
+func (p Prefix) String() string {
+ if !p.IsValid() {
+ return "invalid Prefix"
+ }
+ return p.ip.String() + "/" + itoa.Itoa(int(p.bits))
+}
diff --git a/contrib/go/_std_1.18/src/net/netip/uint128.go b/contrib/go/_std_1.19/src/net/netip/uint128.go
index 738939d7de..738939d7de 100644
--- a/contrib/go/_std_1.18/src/net/netip/uint128.go
+++ b/contrib/go/_std_1.19/src/net/netip/uint128.go
diff --git a/contrib/go/_std_1.19/src/net/nss.go b/contrib/go/_std_1.19/src/net/nss.go
new file mode 100644
index 0000000000..c4c608fb61
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/nss.go
@@ -0,0 +1,158 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "errors"
+ "internal/bytealg"
+ "io"
+ "os"
+)
+
+// nssConf represents the state of the machine's /etc/nsswitch.conf file.
+type nssConf struct {
+ err error // any error encountered opening or parsing the file
+ sources map[string][]nssSource // keyed by database (e.g. "hosts")
+}
+
+type nssSource struct {
+ source string // e.g. "compat", "files", "mdns4_minimal"
+ criteria []nssCriterion
+}
+
+// standardCriteria reports all specified criteria have the default
+// status actions.
+func (s nssSource) standardCriteria() bool {
+ for i, crit := range s.criteria {
+ if !crit.standardStatusAction(i == len(s.criteria)-1) {
+ return false
+ }
+ }
+ return true
+}
+
+// nssCriterion is the parsed structure of one of the criteria in brackets
+// after an NSS source name.
+type nssCriterion struct {
+ negate bool // if "!" was present
+ status string // e.g. "success", "unavail" (lowercase)
+ action string // e.g. "return", "continue" (lowercase)
+}
+
+// standardStatusAction reports whether c is equivalent to not
+// specifying the criterion at all. last is whether this criteria is the
+// last in the list.
+func (c nssCriterion) standardStatusAction(last bool) bool {
+ if c.negate {
+ return false
+ }
+ var def string
+ switch c.status {
+ case "success":
+ def = "return"
+ case "notfound", "unavail", "tryagain":
+ def = "continue"
+ default:
+ // Unknown status
+ return false
+ }
+ if last && c.action == "return" {
+ return true
+ }
+ return c.action == def
+}
+
+func parseNSSConfFile(file string) *nssConf {
+ f, err := os.Open(file)
+ if err != nil {
+ return &nssConf{err: err}
+ }
+ defer f.Close()
+ return parseNSSConf(f)
+}
+
+func parseNSSConf(r io.Reader) *nssConf {
+ slurp, err := readFull(r)
+ if err != nil {
+ return &nssConf{err: err}
+ }
+ conf := new(nssConf)
+ conf.err = foreachLine(slurp, func(line []byte) error {
+ line = trimSpace(removeComment(line))
+ if len(line) == 0 {
+ return nil
+ }
+ colon := bytealg.IndexByte(line, ':')
+ if colon == -1 {
+ return errors.New("no colon on line")
+ }
+ db := string(trimSpace(line[:colon]))
+ srcs := line[colon+1:]
+ for {
+ srcs = trimSpace(srcs)
+ if len(srcs) == 0 {
+ break
+ }
+ sp := bytealg.IndexByte(srcs, ' ')
+ var src string
+ if sp == -1 {
+ src = string(srcs)
+ srcs = nil // done
+ } else {
+ src = string(srcs[:sp])
+ srcs = trimSpace(srcs[sp+1:])
+ }
+ var criteria []nssCriterion
+ // See if there's a criteria block in brackets.
+ if len(srcs) > 0 && srcs[0] == '[' {
+ bclose := bytealg.IndexByte(srcs, ']')
+ if bclose == -1 {
+ return errors.New("unclosed criterion bracket")
+ }
+ var err error
+ criteria, err = parseCriteria(srcs[1:bclose])
+ if err != nil {
+ return errors.New("invalid criteria: " + string(srcs[1:bclose]))
+ }
+ srcs = srcs[bclose+1:]
+ }
+ if conf.sources == nil {
+ conf.sources = make(map[string][]nssSource)
+ }
+ conf.sources[db] = append(conf.sources[db], nssSource{
+ source: src,
+ criteria: criteria,
+ })
+ }
+ return nil
+ })
+ return conf
+}
+
+// parses "foo=bar !foo=bar"
+func parseCriteria(x []byte) (c []nssCriterion, err error) {
+ err = foreachField(x, func(f []byte) error {
+ not := false
+ if len(f) > 0 && f[0] == '!' {
+ not = true
+ f = f[1:]
+ }
+ if len(f) < 3 {
+ return errors.New("criterion too short")
+ }
+ eq := bytealg.IndexByte(f, '=')
+ if eq == -1 {
+ return errors.New("criterion lacks equal sign")
+ }
+ lowerASCIIBytes(f)
+ c = append(c, nssCriterion{
+ negate: not,
+ status: string(f[:eq]),
+ action: string(f[eq+1:]),
+ })
+ return nil
+ })
+ return
+}
diff --git a/contrib/go/_std_1.18/src/net/parse.go b/contrib/go/_std_1.19/src/net/parse.go
index ee2890fe2c..ee2890fe2c 100644
--- a/contrib/go/_std_1.18/src/net/parse.go
+++ b/contrib/go/_std_1.19/src/net/parse.go
diff --git a/contrib/go/_std_1.18/src/net/pipe.go b/contrib/go/_std_1.19/src/net/pipe.go
index f1741938b0..f1741938b0 100644
--- a/contrib/go/_std_1.18/src/net/pipe.go
+++ b/contrib/go/_std_1.19/src/net/pipe.go
diff --git a/contrib/go/_std_1.18/src/net/port.go b/contrib/go/_std_1.19/src/net/port.go
index 32e7628619..32e7628619 100644
--- a/contrib/go/_std_1.18/src/net/port.go
+++ b/contrib/go/_std_1.19/src/net/port.go
diff --git a/contrib/go/_std_1.19/src/net/port_unix.go b/contrib/go/_std_1.19/src/net/port_unix.go
new file mode 100644
index 0000000000..b05b588f17
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/port_unix.go
@@ -0,0 +1,57 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+// Read system port mappings from /etc/services
+
+package net
+
+import (
+ "internal/bytealg"
+ "sync"
+)
+
+var onceReadServices sync.Once
+
+func readServices() {
+ file, err := open("/etc/services")
+ if err != nil {
+ return
+ }
+ defer file.close()
+
+ for line, ok := file.readLine(); ok; line, ok = file.readLine() {
+ // "http 80/tcp www www-http # World Wide Web HTTP"
+ if i := bytealg.IndexByteString(line, '#'); i >= 0 {
+ line = line[:i]
+ }
+ f := getFields(line)
+ if len(f) < 2 {
+ continue
+ }
+ portnet := f[1] // "80/tcp"
+ port, j, ok := dtoi(portnet)
+ if !ok || port <= 0 || j >= len(portnet) || portnet[j] != '/' {
+ continue
+ }
+ netw := portnet[j+1:] // "tcp"
+ m, ok1 := services[netw]
+ if !ok1 {
+ m = make(map[string]int)
+ services[netw] = m
+ }
+ for i := 0; i < len(f); i++ {
+ if i != 1 { // f[1] was port/net
+ m[f[i]] = port
+ }
+ }
+ }
+}
+
+// goLookupPort is the native Go implementation of LookupPort.
+func goLookupPort(network, service string) (port int, err error) {
+ onceReadServices.Do(readServices)
+ return lookupPortMap(network, service)
+}
diff --git a/contrib/go/_std_1.18/src/net/rawconn.go b/contrib/go/_std_1.19/src/net/rawconn.go
index c786354582..c786354582 100644
--- a/contrib/go/_std_1.18/src/net/rawconn.go
+++ b/contrib/go/_std_1.19/src/net/rawconn.go
diff --git a/contrib/go/_std_1.18/src/net/sendfile_linux.go b/contrib/go/_std_1.19/src/net/sendfile_linux.go
index e5150aa5e8..e5150aa5e8 100644
--- a/contrib/go/_std_1.18/src/net/sendfile_linux.go
+++ b/contrib/go/_std_1.19/src/net/sendfile_linux.go
diff --git a/contrib/go/_std_1.18/src/net/sendfile_unix_alt.go b/contrib/go/_std_1.19/src/net/sendfile_unix_alt.go
index f99af92bc8..f99af92bc8 100644
--- a/contrib/go/_std_1.18/src/net/sendfile_unix_alt.go
+++ b/contrib/go/_std_1.19/src/net/sendfile_unix_alt.go
diff --git a/contrib/go/_std_1.18/src/net/sock_bsd.go b/contrib/go/_std_1.19/src/net/sock_bsd.go
index 27daf722b5..27daf722b5 100644
--- a/contrib/go/_std_1.18/src/net/sock_bsd.go
+++ b/contrib/go/_std_1.19/src/net/sock_bsd.go
diff --git a/contrib/go/_std_1.19/src/net/sock_cloexec.go b/contrib/go/_std_1.19/src/net/sock_cloexec.go
new file mode 100644
index 0000000000..3f1cc9827a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sock_cloexec.go
@@ -0,0 +1,25 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements sysSocket for platforms that provide a fast path for
+// setting SetNonblock and CloseOnExec.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package net
+
+import (
+ "os"
+ "syscall"
+)
+
+// Wrapper around the socket system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func sysSocket(family, sotype, proto int) (int, error) {
+ s, err := socketFunc(family, sotype|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, proto)
+ if err != nil {
+ return -1, os.NewSyscallError("socket", err)
+ }
+ return s, nil
+}
diff --git a/contrib/go/_std_1.19/src/net/sock_linux.go b/contrib/go/_std_1.19/src/net/sock_linux.go
new file mode 100644
index 0000000000..2513f9ba7b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sock_linux.go
@@ -0,0 +1,86 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import "syscall"
+
+func kernelVersion() (major int, minor int) {
+ var uname syscall.Utsname
+ if err := syscall.Uname(&uname); err != nil {
+ return
+ }
+
+ rl := uname.Release
+ var values [2]int
+ vi := 0
+ value := 0
+ for _, c := range rl {
+ if c >= '0' && c <= '9' {
+ value = (value * 10) + int(c-'0')
+ } else {
+ // Note that we're assuming N.N.N here. If we see anything else we are likely to
+ // mis-parse it.
+ values[vi] = value
+ vi++
+ if vi >= len(values) {
+ break
+ }
+ value = 0
+ }
+ }
+ switch vi {
+ case 0:
+ return 0, 0
+ case 1:
+ return values[0], 0
+ case 2:
+ return values[0], values[1]
+ }
+ return
+}
+
+// Linux stores the backlog as:
+//
+// - uint16 in kernel version < 4.1,
+// - uint32 in kernel version >= 4.1
+//
+// Truncate number to avoid wrapping.
+//
+// See issue 5030 and 41470.
+func maxAckBacklog(n int) int {
+ major, minor := kernelVersion()
+ size := 16
+ if major > 4 || (major == 4 && minor >= 1) {
+ size = 32
+ }
+
+ var max uint = 1<<size - 1
+ if uint(n) > max {
+ n = int(max)
+ }
+ return n
+}
+
+func maxListenerBacklog() int {
+ fd, err := open("/proc/sys/net/core/somaxconn")
+ if err != nil {
+ return syscall.SOMAXCONN
+ }
+ defer fd.close()
+ l, ok := fd.readLine()
+ if !ok {
+ return syscall.SOMAXCONN
+ }
+ f := getFields(l)
+ n, _, ok := dtoi(f[0])
+ if n == 0 || !ok {
+ return syscall.SOMAXCONN
+ }
+
+ if n > 1<<16-1 {
+ return maxAckBacklog(n)
+ }
+ return n
+}
diff --git a/contrib/go/_std_1.19/src/net/sock_posix.go b/contrib/go/_std_1.19/src/net/sock_posix.go
new file mode 100644
index 0000000000..4431c3a6b3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sock_posix.go
@@ -0,0 +1,254 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package net
+
+import (
+ "context"
+ "internal/poll"
+ "os"
+ "syscall"
+)
+
+// socket returns a network file descriptor that is ready for
+// asynchronous I/O using the network poller.
+func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, ctrlFn func(string, string, syscall.RawConn) error) (fd *netFD, err error) {
+ s, err := sysSocket(family, sotype, proto)
+ if err != nil {
+ return nil, err
+ }
+ if err = setDefaultSockopts(s, family, sotype, ipv6only); err != nil {
+ poll.CloseFunc(s)
+ return nil, err
+ }
+ if fd, err = newFD(s, family, sotype, net); err != nil {
+ poll.CloseFunc(s)
+ return nil, err
+ }
+
+ // This function makes a network file descriptor for the
+ // following applications:
+ //
+ // - An endpoint holder that opens a passive stream
+ // connection, known as a stream listener
+ //
+ // - An endpoint holder that opens a destination-unspecific
+ // datagram connection, known as a datagram listener
+ //
+ // - An endpoint holder that opens an active stream or a
+ // destination-specific datagram connection, known as a
+ // dialer
+ //
+ // - An endpoint holder that opens the other connection, such
+ // as talking to the protocol stack inside the kernel
+ //
+ // For stream and datagram listeners, they will only require
+ // named sockets, so we can assume that it's just a request
+ // from stream or datagram listeners when laddr is not nil but
+ // raddr is nil. Otherwise we assume it's just for dialers or
+ // the other connection holders.
+
+ if laddr != nil && raddr == nil {
+ switch sotype {
+ case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET:
+ if err := fd.listenStream(laddr, listenerBacklog(), ctrlFn); err != nil {
+ fd.Close()
+ return nil, err
+ }
+ return fd, nil
+ case syscall.SOCK_DGRAM:
+ if err := fd.listenDatagram(laddr, ctrlFn); err != nil {
+ fd.Close()
+ return nil, err
+ }
+ return fd, nil
+ }
+ }
+ if err := fd.dial(ctx, laddr, raddr, ctrlFn); err != nil {
+ fd.Close()
+ return nil, err
+ }
+ return fd, nil
+}
+
+func (fd *netFD) ctrlNetwork() string {
+ switch fd.net {
+ case "unix", "unixgram", "unixpacket":
+ return fd.net
+ }
+ switch fd.net[len(fd.net)-1] {
+ case '4', '6':
+ return fd.net
+ }
+ if fd.family == syscall.AF_INET {
+ return fd.net + "4"
+ }
+ return fd.net + "6"
+}
+
+func (fd *netFD) addrFunc() func(syscall.Sockaddr) Addr {
+ switch fd.family {
+ case syscall.AF_INET, syscall.AF_INET6:
+ switch fd.sotype {
+ case syscall.SOCK_STREAM:
+ return sockaddrToTCP
+ case syscall.SOCK_DGRAM:
+ return sockaddrToUDP
+ case syscall.SOCK_RAW:
+ return sockaddrToIP
+ }
+ case syscall.AF_UNIX:
+ switch fd.sotype {
+ case syscall.SOCK_STREAM:
+ return sockaddrToUnix
+ case syscall.SOCK_DGRAM:
+ return sockaddrToUnixgram
+ case syscall.SOCK_SEQPACKET:
+ return sockaddrToUnixpacket
+ }
+ }
+ return func(syscall.Sockaddr) Addr { return nil }
+}
+
+func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr, ctrlFn func(string, string, syscall.RawConn) error) error {
+ if ctrlFn != nil {
+ c, err := newRawConn(fd)
+ if err != nil {
+ return err
+ }
+ var ctrlAddr string
+ if raddr != nil {
+ ctrlAddr = raddr.String()
+ } else if laddr != nil {
+ ctrlAddr = laddr.String()
+ }
+ if err := ctrlFn(fd.ctrlNetwork(), ctrlAddr, c); err != nil {
+ return err
+ }
+ }
+ var err error
+ var lsa syscall.Sockaddr
+ if laddr != nil {
+ if lsa, err = laddr.sockaddr(fd.family); err != nil {
+ return err
+ } else if lsa != nil {
+ if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
+ return os.NewSyscallError("bind", err)
+ }
+ }
+ }
+ var rsa syscall.Sockaddr // remote address from the user
+ var crsa syscall.Sockaddr // remote address we actually connected to
+ if raddr != nil {
+ if rsa, err = raddr.sockaddr(fd.family); err != nil {
+ return err
+ }
+ if crsa, err = fd.connect(ctx, lsa, rsa); err != nil {
+ return err
+ }
+ fd.isConnected = true
+ } else {
+ if err := fd.init(); err != nil {
+ return err
+ }
+ }
+ // Record the local and remote addresses from the actual socket.
+ // Get the local address by calling Getsockname.
+ // For the remote address, use
+ // 1) the one returned by the connect method, if any; or
+ // 2) the one from Getpeername, if it succeeds; or
+ // 3) the one passed to us as the raddr parameter.
+ lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
+ if crsa != nil {
+ fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(crsa))
+ } else if rsa, _ = syscall.Getpeername(fd.pfd.Sysfd); rsa != nil {
+ fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(rsa))
+ } else {
+ fd.setAddr(fd.addrFunc()(lsa), raddr)
+ }
+ return nil
+}
+
+func (fd *netFD) listenStream(laddr sockaddr, backlog int, ctrlFn func(string, string, syscall.RawConn) error) error {
+ var err error
+ if err = setDefaultListenerSockopts(fd.pfd.Sysfd); err != nil {
+ return err
+ }
+ var lsa syscall.Sockaddr
+ if lsa, err = laddr.sockaddr(fd.family); err != nil {
+ return err
+ }
+ if ctrlFn != nil {
+ c, err := newRawConn(fd)
+ if err != nil {
+ return err
+ }
+ if err := ctrlFn(fd.ctrlNetwork(), laddr.String(), c); err != nil {
+ return err
+ }
+ }
+ if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
+ return os.NewSyscallError("bind", err)
+ }
+ if err = listenFunc(fd.pfd.Sysfd, backlog); err != nil {
+ return os.NewSyscallError("listen", err)
+ }
+ if err = fd.init(); err != nil {
+ return err
+ }
+ lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
+ fd.setAddr(fd.addrFunc()(lsa), nil)
+ return nil
+}
+
+func (fd *netFD) listenDatagram(laddr sockaddr, ctrlFn func(string, string, syscall.RawConn) error) error {
+ switch addr := laddr.(type) {
+ case *UDPAddr:
+ // We provide a socket that listens to a wildcard
+ // address with reusable UDP port when the given laddr
+ // is an appropriate UDP multicast address prefix.
+ // This makes it possible for a single UDP listener to
+ // join multiple different group addresses, for
+ // multiple UDP listeners that listen on the same UDP
+ // port to join the same group address.
+ if addr.IP != nil && addr.IP.IsMulticast() {
+ if err := setDefaultMulticastSockopts(fd.pfd.Sysfd); err != nil {
+ return err
+ }
+ addr := *addr
+ switch fd.family {
+ case syscall.AF_INET:
+ addr.IP = IPv4zero
+ case syscall.AF_INET6:
+ addr.IP = IPv6unspecified
+ }
+ laddr = &addr
+ }
+ }
+ var err error
+ var lsa syscall.Sockaddr
+ if lsa, err = laddr.sockaddr(fd.family); err != nil {
+ return err
+ }
+ if ctrlFn != nil {
+ c, err := newRawConn(fd)
+ if err != nil {
+ return err
+ }
+ if err := ctrlFn(fd.ctrlNetwork(), laddr.String(), c); err != nil {
+ return err
+ }
+ }
+ if err = syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
+ return os.NewSyscallError("bind", err)
+ }
+ if err = fd.init(); err != nil {
+ return err
+ }
+ lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
+ fd.setAddr(fd.addrFunc()(lsa), nil)
+ return nil
+}
diff --git a/contrib/go/_std_1.19/src/net/sockaddr_posix.go b/contrib/go/_std_1.19/src/net/sockaddr_posix.go
new file mode 100644
index 0000000000..76c3233b29
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sockaddr_posix.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "syscall"
+)
+
+// A sockaddr represents a TCP, UDP, IP or Unix network endpoint
+// address that can be converted into a syscall.Sockaddr.
+type sockaddr interface {
+ Addr
+
+ // family returns the platform-dependent address family
+ // identifier.
+ family() int
+
+ // isWildcard reports whether the address is a wildcard
+ // address.
+ isWildcard() bool
+
+ // sockaddr returns the address converted into a syscall
+ // sockaddr type that implements syscall.Sockaddr
+ // interface. It returns a nil interface when the address is
+ // nil.
+ sockaddr(family int) (syscall.Sockaddr, error)
+
+ // toLocal maps the zero address to a local system address (127.0.0.1 or ::1)
+ toLocal(net string) sockaddr
+}
diff --git a/contrib/go/_std_1.18/src/net/sockopt_bsd.go b/contrib/go/_std_1.19/src/net/sockopt_bsd.go
index ff99811980..ff99811980 100644
--- a/contrib/go/_std_1.18/src/net/sockopt_bsd.go
+++ b/contrib/go/_std_1.19/src/net/sockopt_bsd.go
diff --git a/contrib/go/_std_1.18/src/net/sockopt_linux.go b/contrib/go/_std_1.19/src/net/sockopt_linux.go
index 3d544299ac..3d544299ac 100644
--- a/contrib/go/_std_1.18/src/net/sockopt_linux.go
+++ b/contrib/go/_std_1.19/src/net/sockopt_linux.go
diff --git a/contrib/go/_std_1.19/src/net/sockopt_posix.go b/contrib/go/_std_1.19/src/net/sockopt_posix.go
new file mode 100644
index 0000000000..32e8fcd505
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sockopt_posix.go
@@ -0,0 +1,134 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package net
+
+import (
+ "internal/bytealg"
+ "runtime"
+ "syscall"
+)
+
+// Boolean to int.
+func boolint(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func ipv4AddrToInterface(ip IP) (*Interface, error) {
+ ift, err := Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ for _, ifi := range ift {
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return nil, err
+ }
+ for _, ifa := range ifat {
+ switch v := ifa.(type) {
+ case *IPAddr:
+ if ip.Equal(v.IP) {
+ return &ifi, nil
+ }
+ case *IPNet:
+ if ip.Equal(v.IP) {
+ return &ifi, nil
+ }
+ }
+ }
+ }
+ if ip.Equal(IPv4zero) {
+ return nil, nil
+ }
+ return nil, errNoSuchInterface
+}
+
+func interfaceToIPv4Addr(ifi *Interface) (IP, error) {
+ if ifi == nil {
+ return IPv4zero, nil
+ }
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return nil, err
+ }
+ for _, ifa := range ifat {
+ switch v := ifa.(type) {
+ case *IPAddr:
+ if v.IP.To4() != nil {
+ return v.IP, nil
+ }
+ case *IPNet:
+ if v.IP.To4() != nil {
+ return v.IP, nil
+ }
+ }
+ }
+ return nil, errNoSuchInterface
+}
+
+func setIPv4MreqToInterface(mreq *syscall.IPMreq, ifi *Interface) error {
+ if ifi == nil {
+ return nil
+ }
+ ifat, err := ifi.Addrs()
+ if err != nil {
+ return err
+ }
+ for _, ifa := range ifat {
+ switch v := ifa.(type) {
+ case *IPAddr:
+ if a := v.IP.To4(); a != nil {
+ copy(mreq.Interface[:], a)
+ goto done
+ }
+ case *IPNet:
+ if a := v.IP.To4(); a != nil {
+ copy(mreq.Interface[:], a)
+ goto done
+ }
+ }
+ }
+done:
+ if bytealg.Equal(mreq.Multiaddr[:], IPv4zero.To4()) {
+ return errNoSuchMulticastInterface
+ }
+ return nil
+}
+
+func setReadBuffer(fd *netFD, bytes int) error {
+ err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
+
+func setWriteBuffer(fd *netFD, bytes int) error {
+ err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
+
+func setKeepAlive(fd *netFD, keepalive bool) error {
+ err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, boolint(keepalive))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
+
+func setLinger(fd *netFD, sec int) error {
+ var l syscall.Linger
+ if sec >= 0 {
+ l.Onoff = 1
+ l.Linger = int32(sec)
+ } else {
+ l.Onoff = 0
+ l.Linger = 0
+ }
+ err := fd.pfd.SetsockoptLinger(syscall.SOL_SOCKET, syscall.SO_LINGER, &l)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
diff --git a/contrib/go/_std_1.18/src/net/sockoptip_bsdvar.go b/contrib/go/_std_1.19/src/net/sockoptip_bsdvar.go
index 3e9ba1ee78..3e9ba1ee78 100644
--- a/contrib/go/_std_1.18/src/net/sockoptip_bsdvar.go
+++ b/contrib/go/_std_1.19/src/net/sockoptip_bsdvar.go
diff --git a/contrib/go/_std_1.18/src/net/sockoptip_linux.go b/contrib/go/_std_1.19/src/net/sockoptip_linux.go
index bd7d834425..bd7d834425 100644
--- a/contrib/go/_std_1.18/src/net/sockoptip_linux.go
+++ b/contrib/go/_std_1.19/src/net/sockoptip_linux.go
diff --git a/contrib/go/_std_1.19/src/net/sockoptip_posix.go b/contrib/go/_std_1.19/src/net/sockoptip_posix.go
new file mode 100644
index 0000000000..572ea455c0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sockoptip_posix.go
@@ -0,0 +1,49 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package net
+
+import (
+ "runtime"
+ "syscall"
+)
+
+func joinIPv4Group(fd *netFD, ifi *Interface, ip IP) error {
+ mreq := &syscall.IPMreq{Multiaddr: [4]byte{ip[0], ip[1], ip[2], ip[3]}}
+ if err := setIPv4MreqToInterface(mreq, ifi); err != nil {
+ return err
+ }
+ err := fd.pfd.SetsockoptIPMreq(syscall.IPPROTO_IP, syscall.IP_ADD_MEMBERSHIP, mreq)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
+
+func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error {
+ var v int
+ if ifi != nil {
+ v = ifi.Index
+ }
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_IF, v)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
+
+func setIPv6MulticastLoopback(fd *netFD, v bool) error {
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_LOOP, boolint(v))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
+
+func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error {
+ mreq := &syscall.IPv6Mreq{}
+ copy(mreq.Multiaddr[:], ip)
+ if ifi != nil {
+ mreq.Interface = uint32(ifi.Index)
+ }
+ err := fd.pfd.SetsockoptIPv6Mreq(syscall.IPPROTO_IPV6, syscall.IPV6_JOIN_GROUP, mreq)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
diff --git a/contrib/go/_std_1.18/src/net/splice_linux.go b/contrib/go/_std_1.19/src/net/splice_linux.go
index 69c3f65770..69c3f65770 100644
--- a/contrib/go/_std_1.18/src/net/splice_linux.go
+++ b/contrib/go/_std_1.19/src/net/splice_linux.go
diff --git a/contrib/go/_std_1.18/src/net/splice_stub.go b/contrib/go/_std_1.19/src/net/splice_stub.go
index 3cdadb11c5..3cdadb11c5 100644
--- a/contrib/go/_std_1.18/src/net/splice_stub.go
+++ b/contrib/go/_std_1.19/src/net/splice_stub.go
diff --git a/contrib/go/_std_1.19/src/net/sys_cloexec.go b/contrib/go/_std_1.19/src/net/sys_cloexec.go
new file mode 100644
index 0000000000..6e61d40c19
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/sys_cloexec.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements sysSocket for platforms that do not provide a fast path
+// for setting SetNonblock and CloseOnExec.
+
+//go:build aix || darwin
+
+package net
+
+import (
+ "internal/poll"
+ "os"
+ "syscall"
+)
+
+// Wrapper around the socket system call that marks the returned file
+// descriptor as nonblocking and close-on-exec.
+func sysSocket(family, sotype, proto int) (int, error) {
+ // See ../syscall/exec_unix.go for description of ForkLock.
+ syscall.ForkLock.RLock()
+ s, err := socketFunc(family, sotype, proto)
+ if err == nil {
+ syscall.CloseOnExec(s)
+ }
+ syscall.ForkLock.RUnlock()
+ if err != nil {
+ return -1, os.NewSyscallError("socket", err)
+ }
+ if err = syscall.SetNonblock(s, true); err != nil {
+ poll.CloseFunc(s)
+ return -1, os.NewSyscallError("setnonblock", err)
+ }
+ return s, nil
+}
diff --git a/contrib/go/_std_1.18/src/net/tcpsock.go b/contrib/go/_std_1.19/src/net/tcpsock.go
index 6bad0e8f8b..6bad0e8f8b 100644
--- a/contrib/go/_std_1.18/src/net/tcpsock.go
+++ b/contrib/go/_std_1.19/src/net/tcpsock.go
diff --git a/contrib/go/_std_1.19/src/net/tcpsock_posix.go b/contrib/go/_std_1.19/src/net/tcpsock_posix.go
new file mode 100644
index 0000000000..1c91170c50
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/tcpsock_posix.go
@@ -0,0 +1,176 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "context"
+ "io"
+ "os"
+ "syscall"
+)
+
+func sockaddrToTCP(sa syscall.Sockaddr) Addr {
+ switch sa := sa.(type) {
+ case *syscall.SockaddrInet4:
+ return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port}
+ case *syscall.SockaddrInet6:
+ return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
+ }
+ return nil
+}
+
+func (a *TCPAddr) family() int {
+ if a == nil || len(a.IP) <= IPv4len {
+ return syscall.AF_INET
+ }
+ if a.IP.To4() != nil {
+ return syscall.AF_INET
+ }
+ return syscall.AF_INET6
+}
+
+func (a *TCPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
+ if a == nil {
+ return nil, nil
+ }
+ return ipToSockaddr(family, a.IP, a.Port, a.Zone)
+}
+
+func (a *TCPAddr) toLocal(net string) sockaddr {
+ return &TCPAddr{loopbackIP(net), a.Port, a.Zone}
+}
+
+func (c *TCPConn) readFrom(r io.Reader) (int64, error) {
+ if n, err, handled := splice(c.fd, r); handled {
+ return n, err
+ }
+ if n, err, handled := sendFile(c.fd, r); handled {
+ return n, err
+ }
+ return genericReadFrom(c, r)
+}
+
+func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ if h := sd.testHookDialTCP; h != nil {
+ return h(ctx, sd.network, laddr, raddr)
+ }
+ if h := testHookDialTCP; h != nil {
+ return h(ctx, sd.network, laddr, raddr)
+ }
+ return sd.doDialTCP(ctx, laddr, raddr)
+}
+
+func (sd *sysDialer) doDialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ fd, err := internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_STREAM, 0, "dial", sd.Dialer.Control)
+
+ // TCP has a rarely used mechanism called a 'simultaneous connection' in
+ // which Dial("tcp", addr1, addr2) run on the machine at addr1 can
+ // connect to a simultaneous Dial("tcp", addr2, addr1) run on the machine
+ // at addr2, without either machine executing Listen. If laddr == nil,
+ // it means we want the kernel to pick an appropriate originating local
+ // address. Some Linux kernels cycle blindly through a fixed range of
+ // local ports, regardless of destination port. If a kernel happens to
+ // pick local port 50001 as the source for a Dial("tcp", "", "localhost:50001"),
+ // then the Dial will succeed, having simultaneously connected to itself.
+ // This can only happen when we are letting the kernel pick a port (laddr == nil)
+ // and when there is no listener for the destination address.
+ // It's hard to argue this is anything other than a kernel bug. If we
+ // see this happen, rather than expose the buggy effect to users, we
+ // close the fd and try again. If it happens twice more, we relent and
+ // use the result. See also:
+ // https://golang.org/issue/2690
+ // https://stackoverflow.com/questions/4949858/
+ //
+ // The opposite can also happen: if we ask the kernel to pick an appropriate
+ // originating local address, sometimes it picks one that is already in use.
+ // So if the error is EADDRNOTAVAIL, we have to try again too, just for
+ // a different reason.
+ //
+ // The kernel socket code is no doubt enjoying watching us squirm.
+ for i := 0; i < 2 && (laddr == nil || laddr.Port == 0) && (selfConnect(fd, err) || spuriousENOTAVAIL(err)); i++ {
+ if err == nil {
+ fd.Close()
+ }
+ fd, err = internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_STREAM, 0, "dial", sd.Dialer.Control)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ return newTCPConn(fd), nil
+}
+
+func selfConnect(fd *netFD, err error) bool {
+ // If the connect failed, we clearly didn't connect to ourselves.
+ if err != nil {
+ return false
+ }
+
+ // The socket constructor can return an fd with raddr nil under certain
+ // unknown conditions. The errors in the calls there to Getpeername
+ // are discarded, but we can't catch the problem there because those
+ // calls are sometimes legally erroneous with a "socket not connected".
+ // Since this code (selfConnect) is already trying to work around
+ // a problem, we make sure if this happens we recognize trouble and
+ // ask the DialTCP routine to try again.
+ // TODO: try to understand what's really going on.
+ if fd.laddr == nil || fd.raddr == nil {
+ return true
+ }
+ l := fd.laddr.(*TCPAddr)
+ r := fd.raddr.(*TCPAddr)
+ return l.Port == r.Port && l.IP.Equal(r.IP)
+}
+
+func spuriousENOTAVAIL(err error) bool {
+ if op, ok := err.(*OpError); ok {
+ err = op.Err
+ }
+ if sys, ok := err.(*os.SyscallError); ok {
+ err = sys.Err
+ }
+ return err == syscall.EADDRNOTAVAIL
+}
+
+func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil }
+
+func (ln *TCPListener) accept() (*TCPConn, error) {
+ fd, err := ln.fd.accept()
+ if err != nil {
+ return nil, err
+ }
+ tc := newTCPConn(fd)
+ if ln.lc.KeepAlive >= 0 {
+ setKeepAlive(fd, true)
+ ka := ln.lc.KeepAlive
+ if ln.lc.KeepAlive == 0 {
+ ka = defaultTCPKeepAlive
+ }
+ setKeepAlivePeriod(fd, ka)
+ }
+ return tc, nil
+}
+
+func (ln *TCPListener) close() error {
+ return ln.fd.Close()
+}
+
+func (ln *TCPListener) file() (*os.File, error) {
+ f, err := ln.fd.dup()
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func (sl *sysListener) listenTCP(ctx context.Context, laddr *TCPAddr) (*TCPListener, error) {
+ fd, err := internetSocket(ctx, sl.network, laddr, nil, syscall.SOCK_STREAM, 0, "listen", sl.ListenConfig.Control)
+ if err != nil {
+ return nil, err
+ }
+ return &TCPListener{fd: fd, lc: sl.ListenConfig}, nil
+}
diff --git a/contrib/go/_std_1.18/src/net/tcpsockopt_darwin.go b/contrib/go/_std_1.19/src/net/tcpsockopt_darwin.go
index 53c6756e33..53c6756e33 100644
--- a/contrib/go/_std_1.18/src/net/tcpsockopt_darwin.go
+++ b/contrib/go/_std_1.19/src/net/tcpsockopt_darwin.go
diff --git a/contrib/go/_std_1.19/src/net/tcpsockopt_posix.go b/contrib/go/_std_1.19/src/net/tcpsockopt_posix.go
new file mode 100644
index 0000000000..d708f04875
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/tcpsockopt_posix.go
@@ -0,0 +1,18 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || windows
+
+package net
+
+import (
+ "runtime"
+ "syscall"
+)
+
+func setNoDelay(fd *netFD, noDelay bool) error {
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_NODELAY, boolint(noDelay))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
+}
diff --git a/contrib/go/_std_1.18/src/net/tcpsockopt_unix.go b/contrib/go/_std_1.19/src/net/tcpsockopt_unix.go
index bdcdc40239..bdcdc40239 100644
--- a/contrib/go/_std_1.18/src/net/tcpsockopt_unix.go
+++ b/contrib/go/_std_1.19/src/net/tcpsockopt_unix.go
diff --git a/contrib/go/_std_1.18/src/net/textproto/header.go b/contrib/go/_std_1.19/src/net/textproto/header.go
index a58df7aebc..a58df7aebc 100644
--- a/contrib/go/_std_1.18/src/net/textproto/header.go
+++ b/contrib/go/_std_1.19/src/net/textproto/header.go
diff --git a/contrib/go/_std_1.18/src/net/textproto/pipeline.go b/contrib/go/_std_1.19/src/net/textproto/pipeline.go
index 1928a306bf..1928a306bf 100644
--- a/contrib/go/_std_1.18/src/net/textproto/pipeline.go
+++ b/contrib/go/_std_1.19/src/net/textproto/pipeline.go
diff --git a/contrib/go/_std_1.19/src/net/textproto/reader.go b/contrib/go/_std_1.19/src/net/textproto/reader.go
new file mode 100644
index 0000000000..1f7afc5766
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/textproto/reader.go
@@ -0,0 +1,788 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package textproto
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// A Reader implements convenience methods for reading requests
+// or responses from a text protocol network connection.
+type Reader struct {
+ R *bufio.Reader
+ dot *dotReader
+ buf []byte // a re-usable buffer for readContinuedLineSlice
+}
+
+// NewReader returns a new Reader reading from r.
+//
+// To avoid denial of service attacks, the provided bufio.Reader
+// should be reading from an io.LimitReader or similar Reader to bound
+// the size of responses.
+func NewReader(r *bufio.Reader) *Reader {
+ return &Reader{R: r}
+}
+
+// ReadLine reads a single line from r,
+// eliding the final \n or \r\n from the returned string.
+func (r *Reader) ReadLine() (string, error) {
+ line, err := r.readLineSlice()
+ return string(line), err
+}
+
+// ReadLineBytes is like ReadLine but returns a []byte instead of a string.
+func (r *Reader) ReadLineBytes() ([]byte, error) {
+ line, err := r.readLineSlice()
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+ line = buf
+ }
+ return line, err
+}
+
+func (r *Reader) readLineSlice() ([]byte, error) {
+ r.closeDot()
+ var line []byte
+ for {
+ l, more, err := r.R.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ // Avoid the copy if the first call produced a full line.
+ if line == nil && !more {
+ return l, nil
+ }
+ line = append(line, l...)
+ if !more {
+ break
+ }
+ }
+ return line, nil
+}
+
+// ReadContinuedLine reads a possibly continued line from r,
+// eliding the final trailing ASCII white space.
+// Lines after the first are considered continuations if they
+// begin with a space or tab character. In the returned data,
+// continuation lines are separated from the previous line
+// only by a single space: the newline and leading white space
+// are removed.
+//
+// For example, consider this input:
+//
+// Line 1
+// continued...
+// Line 2
+//
+// The first call to ReadContinuedLine will return "Line 1 continued..."
+// and the second will return "Line 2".
+//
+// Empty lines are never continued.
+func (r *Reader) ReadContinuedLine() (string, error) {
+ line, err := r.readContinuedLineSlice(noValidation)
+ return string(line), err
+}
+
+// trim returns s with leading and trailing spaces and tabs removed.
+// It does not assume Unicode or UTF-8.
+func trim(s []byte) []byte {
+ i := 0
+ for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
+ i++
+ }
+ n := len(s)
+ for n > i && (s[n-1] == ' ' || s[n-1] == '\t') {
+ n--
+ }
+ return s[i:n]
+}
+
+// ReadContinuedLineBytes is like ReadContinuedLine but
+// returns a []byte instead of a string.
+func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+ line, err := r.readContinuedLineSlice(noValidation)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+ line = buf
+ }
+ return line, err
+}
+
+// readContinuedLineSlice reads continued lines from the reader buffer,
+// returning a byte slice with all lines. The validateFirstLine function
+// is run on the first read line, and if it returns an error then this
+// error is returned from readContinuedLineSlice.
+func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) {
+ if validateFirstLine == nil {
+ return nil, fmt.Errorf("missing validateFirstLine func")
+ }
+
+ // Read the first line.
+ line, err := r.readLineSlice()
+ if err != nil {
+ return nil, err
+ }
+ if len(line) == 0 { // blank line - no continuation
+ return line, nil
+ }
+
+ if err := validateFirstLine(line); err != nil {
+ return nil, err
+ }
+
+ // Optimistically assume that we have started to buffer the next line
+ // and it starts with an ASCII letter (the next header key), or a blank
+ // line, so we can avoid copying that buffered data around in memory
+ // and skipping over non-existent whitespace.
+ if r.R.Buffered() > 1 {
+ peek, _ := r.R.Peek(2)
+ if len(peek) > 0 && (isASCIILetter(peek[0]) || peek[0] == '\n') ||
+ len(peek) == 2 && peek[0] == '\r' && peek[1] == '\n' {
+ return trim(line), nil
+ }
+ }
+
+ // ReadByte or the next readLineSlice will flush the read buffer;
+ // copy the slice into buf.
+ r.buf = append(r.buf[:0], trim(line)...)
+
+ // Read continuation lines.
+ for r.skipSpace() > 0 {
+ line, err := r.readLineSlice()
+ if err != nil {
+ break
+ }
+ r.buf = append(r.buf, ' ')
+ r.buf = append(r.buf, trim(line)...)
+ }
+ return r.buf, nil
+}
+
+// skipSpace skips R over all spaces and returns the number of bytes skipped.
+func (r *Reader) skipSpace() int {
+ n := 0
+ for {
+ c, err := r.R.ReadByte()
+ if err != nil {
+ // Bufio will keep err until next read.
+ break
+ }
+ if c != ' ' && c != '\t' {
+ r.R.UnreadByte()
+ break
+ }
+ n++
+ }
+ return n
+}
+
+func (r *Reader) readCodeLine(expectCode int) (code int, continued bool, message string, err error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return
+ }
+ return parseCodeLine(line, expectCode)
+}
+
+func parseCodeLine(line string, expectCode int) (code int, continued bool, message string, err error) {
+ if len(line) < 4 || line[3] != ' ' && line[3] != '-' {
+ err = ProtocolError("short response: " + line)
+ return
+ }
+ continued = line[3] == '-'
+ code, err = strconv.Atoi(line[0:3])
+ if err != nil || code < 100 {
+ err = ProtocolError("invalid response code: " + line)
+ return
+ }
+ message = line[4:]
+ if 1 <= expectCode && expectCode < 10 && code/100 != expectCode ||
+ 10 <= expectCode && expectCode < 100 && code/10 != expectCode ||
+ 100 <= expectCode && expectCode < 1000 && code != expectCode {
+ err = &Error{code, message}
+ }
+ return
+}
+
+// ReadCodeLine reads a response code line of the form
+//
+// code message
+//
+// where code is a three-digit status code and the message
+// extends to the rest of the line. An example of such a line is:
+//
+// 220 plan9.bell-labs.com ESMTP
+//
+// If the prefix of the status does not match the digits in expectCode,
+// ReadCodeLine returns with err set to &Error{code, message}.
+// For example, if expectCode is 31, an error will be returned if
+// the status is not in the range [310,319].
+//
+// If the response is multi-line, ReadCodeLine returns an error.
+//
+// An expectCode <= 0 disables the check of the status code.
+func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err error) {
+ code, continued, message, err := r.readCodeLine(expectCode)
+ if err == nil && continued {
+ err = ProtocolError("unexpected multi-line response: " + message)
+ }
+ return
+}
+
+// ReadResponse reads a multi-line response of the form:
+//
+// code-message line 1
+// code-message line 2
+// ...
+// code message line n
+//
+// where code is a three-digit status code. The first line starts with the
+// code and a hyphen. The response is terminated by a line that starts
+// with the same code followed by a space. Each line in message is
+// separated by a newline (\n).
+//
+// See page 36 of RFC 959 (https://www.ietf.org/rfc/rfc959.txt) for
+// details of another form of response accepted:
+//
+// code-message line 1
+// message line 2
+// ...
+// code message line n
+//
+// If the prefix of the status does not match the digits in expectCode,
+// ReadResponse returns with err set to &Error{code, message}.
+// For example, if expectCode is 31, an error will be returned if
+// the status is not in the range [310,319].
+//
+// An expectCode <= 0 disables the check of the status code.
+func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) {
+ code, continued, message, err := r.readCodeLine(expectCode)
+ multi := continued
+ for continued {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, "", err
+ }
+
+ var code2 int
+ var moreMessage string
+ code2, continued, moreMessage, err = parseCodeLine(line, 0)
+ if err != nil || code2 != code {
+ message += "\n" + strings.TrimRight(line, "\r\n")
+ continued = true
+ continue
+ }
+ message += "\n" + moreMessage
+ }
+ if err != nil && multi && message != "" {
+ // replace one line error message with all lines (full message)
+ err = &Error{code, message}
+ }
+ return
+}
+
+// DotReader returns a new Reader that satisfies Reads using the
+// decoded text of a dot-encoded block read from r.
+// The returned Reader is only valid until the next call
+// to a method on r.
+//
+// Dot encoding is a common framing used for data blocks
+// in text protocols such as SMTP. The data consists of a sequence
+// of lines, each of which ends in "\r\n". The sequence itself
+// ends at a line containing just a dot: ".\r\n". Lines beginning
+// with a dot are escaped with an additional dot to avoid
+// looking like the end of the sequence.
+//
+// The decoded form returned by the Reader's Read method
+// rewrites the "\r\n" line endings into the simpler "\n",
+// removes leading dot escapes if present, and stops with error io.EOF
+// after consuming (and discarding) the end-of-sequence line.
+func (r *Reader) DotReader() io.Reader {
+ r.closeDot()
+ r.dot = &dotReader{r: r}
+ return r.dot
+}
+
+type dotReader struct {
+ r *Reader
+ state int
+}
+
+// Read satisfies reads by decoding dot-encoded data read from d.r.
+func (d *dotReader) Read(b []byte) (n int, err error) {
+ // Run data through a simple state machine to
+ // elide leading dots, rewrite trailing \r\n into \n,
+ // and detect ending .\r\n line.
+ const (
+ stateBeginLine = iota // beginning of line; initial state; must be zero
+ stateDot // read . at beginning of line
+ stateDotCR // read .\r at beginning of line
+ stateCR // read \r (possibly at end of line)
+ stateData // reading data in middle of line
+ stateEOF // reached .\r\n end marker line
+ )
+ br := d.r.R
+ for n < len(b) && d.state != stateEOF {
+ var c byte
+ c, err = br.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ break
+ }
+ switch d.state {
+ case stateBeginLine:
+ if c == '.' {
+ d.state = stateDot
+ continue
+ }
+ if c == '\r' {
+ d.state = stateCR
+ continue
+ }
+ d.state = stateData
+
+ case stateDot:
+ if c == '\r' {
+ d.state = stateDotCR
+ continue
+ }
+ if c == '\n' {
+ d.state = stateEOF
+ continue
+ }
+ d.state = stateData
+
+ case stateDotCR:
+ if c == '\n' {
+ d.state = stateEOF
+ continue
+ }
+ // Not part of .\r\n.
+ // Consume leading dot and emit saved \r.
+ br.UnreadByte()
+ c = '\r'
+ d.state = stateData
+
+ case stateCR:
+ if c == '\n' {
+ d.state = stateBeginLine
+ break
+ }
+ // Not part of \r\n. Emit saved \r
+ br.UnreadByte()
+ c = '\r'
+ d.state = stateData
+
+ case stateData:
+ if c == '\r' {
+ d.state = stateCR
+ continue
+ }
+ if c == '\n' {
+ d.state = stateBeginLine
+ }
+ }
+ b[n] = c
+ n++
+ }
+ if err == nil && d.state == stateEOF {
+ err = io.EOF
+ }
+ if err != nil && d.r.dot == d {
+ d.r.dot = nil
+ }
+ return
+}
+
+// closeDot drains the current DotReader if any,
+// making sure that it reads until the ending dot line.
+func (r *Reader) closeDot() {
+ if r.dot == nil {
+ return
+ }
+ buf := make([]byte, 128)
+ for r.dot != nil {
+ // When Read reaches EOF or an error,
+ // it will set r.dot == nil.
+ r.dot.Read(buf)
+ }
+}
+
+// ReadDotBytes reads a dot-encoding and returns the decoded data.
+//
+// See the documentation for the DotReader method for details about dot-encoding.
+func (r *Reader) ReadDotBytes() ([]byte, error) {
+ return io.ReadAll(r.DotReader())
+}
+
+// ReadDotLines reads a dot-encoding and returns a slice
+// containing the decoded lines, with the final \r\n or \n elided from each.
+//
+// See the documentation for the DotReader method for details about dot-encoding.
+func (r *Reader) ReadDotLines() ([]string, error) {
+ // We could use ReadDotBytes and then Split it,
+ // but reading a line at a time avoids needing a
+ // large contiguous block of memory and is simpler.
+ var v []string
+ var err error
+ for {
+ var line string
+ line, err = r.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ break
+ }
+
+ // Dot by itself marks end; otherwise cut one dot.
+ if len(line) > 0 && line[0] == '.' {
+ if len(line) == 1 {
+ break
+ }
+ line = line[1:]
+ }
+ v = append(v, line)
+ }
+ return v, err
+}
+
+var colon = []byte(":")
+
+// ReadMIMEHeader reads a MIME-style header from r.
+// The header is a sequence of possibly continued Key: Value lines
+// ending in a blank line.
+// The returned map m maps CanonicalMIMEHeaderKey(key) to a
+// sequence of values in the same order encountered in the input.
+//
+// For example, consider this input:
+//
+// My-Key: Value 1
+// Long-Key: Even
+// Longer Value
+// My-Key: Value 2
+//
+// Given that input, ReadMIMEHeader returns the map:
+//
+// map[string][]string{
+// "My-Key": {"Value 1", "Value 2"},
+// "Long-Key": {"Even Longer Value"},
+// }
+func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+ var strs []string
+ hint := r.upcomingHeaderNewlines()
+ if hint > 0 {
+ strs = make([]string, hint)
+ }
+
+ m := make(MIMEHeader, hint)
+
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+ line, err := r.readLineSlice()
+ if err != nil {
+ return m, err
+ }
+ return m, ProtocolError("malformed MIME header initial line: " + string(line))
+ }
+
+ for {
+ kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon)
+ if len(kv) == 0 {
+ return m, err
+ }
+
+ // Key ends at first colon.
+ k, v, ok := bytes.Cut(kv, colon)
+ if !ok {
+ return m, ProtocolError("malformed MIME header line: " + string(kv))
+ }
+ key := canonicalMIMEHeaderKey(k)
+
+ // As per RFC 7230 field-name is a token, tokens consist of one or more chars.
+ // We could return a ProtocolError here, but better to be liberal in what we
+ // accept, so if we get an empty key, skip it.
+ if key == "" {
+ continue
+ }
+
+ // Skip initial spaces in value.
+ value := strings.TrimLeft(string(v), " \t")
+
+ vv := m[key]
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+ // Set the capacity on strs[0] to 1, so any future append
+ // won't extend the slice into the other strings.
+ vv, strs = strs[:1:1], strs[1:]
+ vv[0] = value
+ m[key] = vv
+ } else {
+ m[key] = append(vv, value)
+ }
+
+ if err != nil {
+ return m, err
+ }
+ }
+}
+
+// noValidation is a no-op validation func for readContinuedLineSlice
+// that permits any lines.
+func noValidation(_ []byte) error { return nil }
+
+// mustHaveFieldNameColon ensures that, per RFC 7230, the
+// field-name is on a single line, so the first line must
+// contain a colon.
+func mustHaveFieldNameColon(line []byte) error {
+ if bytes.IndexByte(line, ':') < 0 {
+ return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q", line))
+ }
+ return nil
+}
+
+var nl = []byte("\n")
+
+// upcomingHeaderNewlines returns an approximation of the number of newlines
+// that will be in this header. If it gets confused, it returns 0.
+func (r *Reader) upcomingHeaderNewlines() (n int) {
+ // Try to determine the 'hint' size.
+ r.R.Peek(1) // force a buffer load if empty
+ s := r.R.Buffered()
+ if s == 0 {
+ return
+ }
+ peek, _ := r.R.Peek(s)
+ return bytes.Count(peek, nl)
+}
+
+// CanonicalMIMEHeaderKey returns the canonical format of the
+// MIME header key s. The canonicalization converts the first
+// letter and any letter following a hyphen to upper case;
+// the rest are converted to lowercase. For example, the
+// canonical key for "accept-encoding" is "Accept-Encoding".
+// MIME header keys are assumed to be ASCII only.
+// If s contains a space or invalid header field bytes, it is
+// returned without modifications.
+func CanonicalMIMEHeaderKey(s string) string {
+ // Quick check for canonical encoding.
+ upper := true
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if !validHeaderFieldByte(c) {
+ return s
+ }
+ if upper && 'a' <= c && c <= 'z' {
+ return canonicalMIMEHeaderKey([]byte(s))
+ }
+ if !upper && 'A' <= c && c <= 'Z' {
+ return canonicalMIMEHeaderKey([]byte(s))
+ }
+ upper = c == '-'
+ }
+ return s
+}
+
+const toLower = 'a' - 'A'
+
+// validHeaderFieldByte reports whether b is a valid byte in a header
+// field name. RFC 7230 says:
+//
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+// token = 1*tchar
+func validHeaderFieldByte(b byte) bool {
+ return int(b) < len(isTokenTable) && isTokenTable[b]
+}
+
+// canonicalMIMEHeaderKey is like CanonicalMIMEHeaderKey but is
+// allowed to mutate the provided byte slice before returning the
+// string.
+//
+// For invalid inputs (if a contains spaces or non-token bytes), a
+// is unchanged and a string copy is returned.
+func canonicalMIMEHeaderKey(a []byte) string {
+ // See if a looks like a header key. If not, return it unchanged.
+ for _, c := range a {
+ if validHeaderFieldByte(c) {
+ continue
+ }
+ // Don't canonicalize.
+ return string(a)
+ }
+
+ upper := true
+ for i, c := range a {
+ // Canonicalize: first letter upper case
+ // and upper case after each dash.
+ // (Host, User-Agent, If-Modified-Since).
+ // MIME headers are ASCII only, so no Unicode issues.
+ if upper && 'a' <= c && c <= 'z' {
+ c -= toLower
+ } else if !upper && 'A' <= c && c <= 'Z' {
+ c += toLower
+ }
+ a[i] = c
+ upper = c == '-' // for next time
+ }
+ commonHeaderOnce.Do(initCommonHeader)
+ // The compiler recognizes m[string(byteSlice)] as a special
+ // case, so a copy of a's bytes into a new string does not
+ // happen in this map lookup:
+ if v := commonHeader[string(a)]; v != "" {
+ return v
+ }
+ return string(a)
+}
+
+// commonHeader interns common header strings.
+var commonHeader map[string]string
+
+var commonHeaderOnce sync.Once
+
+func initCommonHeader() {
+ commonHeader = make(map[string]string)
+ for _, v := range []string{
+ "Accept",
+ "Accept-Charset",
+ "Accept-Encoding",
+ "Accept-Language",
+ "Accept-Ranges",
+ "Cache-Control",
+ "Cc",
+ "Connection",
+ "Content-Id",
+ "Content-Language",
+ "Content-Length",
+ "Content-Transfer-Encoding",
+ "Content-Type",
+ "Cookie",
+ "Date",
+ "Dkim-Signature",
+ "Etag",
+ "Expires",
+ "From",
+ "Host",
+ "If-Modified-Since",
+ "If-None-Match",
+ "In-Reply-To",
+ "Last-Modified",
+ "Location",
+ "Message-Id",
+ "Mime-Version",
+ "Pragma",
+ "Received",
+ "Return-Path",
+ "Server",
+ "Set-Cookie",
+ "Subject",
+ "To",
+ "User-Agent",
+ "Via",
+ "X-Forwarded-For",
+ "X-Imforwards",
+ "X-Powered-By",
+ } {
+ commonHeader[v] = v
+ }
+}
+
+// isTokenTable is a copy of net/http/lex.go's isTokenTable.
+// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
diff --git a/contrib/go/_std_1.19/src/net/textproto/textproto.go b/contrib/go/_std_1.19/src/net/textproto/textproto.go
new file mode 100644
index 0000000000..70038d5888
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/textproto/textproto.go
@@ -0,0 +1,152 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package textproto implements generic support for text-based request/response
+// protocols in the style of HTTP, NNTP, and SMTP.
+//
+// The package provides:
+//
+// Error, which represents a numeric error response from
+// a server.
+//
+// Pipeline, to manage pipelined requests and responses
+// in a client.
+//
+// Reader, to read numeric response code lines,
+// key: value headers, lines wrapped with leading spaces
+// on continuation lines, and whole text blocks ending
+// with a dot on a line by itself.
+//
+// Writer, to write dot-encoded text blocks.
+//
+// Conn, a convenient packaging of Reader, Writer, and Pipeline for use
+// with a single network connection.
+package textproto
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+)
+
+// An Error represents a numeric error response from a server.
+type Error struct {
+ Code int
+ Msg string
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("%03d %s", e.Code, e.Msg)
+}
+
+// A ProtocolError describes a protocol violation such
+// as an invalid response or a hung-up connection.
+type ProtocolError string
+
+func (p ProtocolError) Error() string {
+ return string(p)
+}
+
+// A Conn represents a textual network protocol connection.
+// It consists of a Reader and Writer to manage I/O
+// and a Pipeline to sequence concurrent requests on the connection.
+// These embedded types carry methods with them;
+// see the documentation of those types for details.
+type Conn struct {
+ Reader
+ Writer
+ Pipeline
+ conn io.ReadWriteCloser
+}
+
+// NewConn returns a new Conn using conn for I/O.
+func NewConn(conn io.ReadWriteCloser) *Conn {
+ return &Conn{
+ Reader: Reader{R: bufio.NewReader(conn)},
+ Writer: Writer{W: bufio.NewWriter(conn)},
+ conn: conn,
+ }
+}
+
+// Close closes the connection.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// Dial connects to the given address on the given network using net.Dial
+// and then returns a new Conn for the connection.
+func Dial(network, addr string) (*Conn, error) {
+ c, err := net.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ return NewConn(c), nil
+}
+
+// Cmd is a convenience method that sends a command after
+// waiting its turn in the pipeline. The command text is the
+// result of formatting format with args and appending \r\n.
+// Cmd returns the id of the command, for use with StartResponse and EndResponse.
+//
+// For example, a client might run a HELP command that returns a dot-body
+// by using:
+//
+// id, err := c.Cmd("HELP")
+// if err != nil {
+// return nil, err
+// }
+//
+// c.StartResponse(id)
+// defer c.EndResponse(id)
+//
+// if _, _, err = c.ReadCodeLine(110); err != nil {
+// return nil, err
+// }
+// text, err := c.ReadDotBytes()
+// if err != nil {
+// return nil, err
+// }
+// return c.ReadCodeLine(250)
+func (c *Conn) Cmd(format string, args ...any) (id uint, err error) {
+ id = c.Next()
+ c.StartRequest(id)
+ err = c.PrintfLine(format, args...)
+ c.EndRequest(id)
+ if err != nil {
+ return 0, err
+ }
+ return id, nil
+}
+
+// TrimString returns s without leading and trailing ASCII space.
+func TrimString(s string) string {
+ for len(s) > 0 && isASCIISpace(s[0]) {
+ s = s[1:]
+ }
+ for len(s) > 0 && isASCIISpace(s[len(s)-1]) {
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+// TrimBytes returns b without leading and trailing ASCII space.
+func TrimBytes(b []byte) []byte {
+ for len(b) > 0 && isASCIISpace(b[0]) {
+ b = b[1:]
+ }
+ for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
+ b = b[:len(b)-1]
+ }
+ return b
+}
+
+func isASCIISpace(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\n' || b == '\r'
+}
+
+func isASCIILetter(b byte) bool {
+ b |= 0x20 // make lower case
+ return 'a' <= b && b <= 'z'
+}
diff --git a/contrib/go/_std_1.18/src/net/textproto/writer.go b/contrib/go/_std_1.19/src/net/textproto/writer.go
index 2ece3f511b..2ece3f511b 100644
--- a/contrib/go/_std_1.18/src/net/textproto/writer.go
+++ b/contrib/go/_std_1.19/src/net/textproto/writer.go
diff --git a/contrib/go/_std_1.19/src/net/udpsock.go b/contrib/go/_std_1.19/src/net/udpsock.go
new file mode 100644
index 0000000000..e30624dea5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/udpsock.go
@@ -0,0 +1,368 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "context"
+ "internal/itoa"
+ "net/netip"
+ "syscall"
+)
+
+// BUG(mikio): On Plan 9, the ReadMsgUDP and
+// WriteMsgUDP methods of UDPConn are not implemented.
+
+// BUG(mikio): On Windows, the File method of UDPConn is not
+// implemented.
+
+// BUG(mikio): On JS, methods and functions related to UDPConn are not
+// implemented.
+
+// UDPAddr represents the address of a UDP end point.
+type UDPAddr struct {
+ IP IP
+ Port int
+ Zone string // IPv6 scoped addressing zone
+}
+
+// AddrPort returns the UDPAddr a as a netip.AddrPort.
+//
+// If a.Port does not fit in a uint16, it's silently truncated.
+//
+// If a is nil, a zero value is returned.
+func (a *UDPAddr) AddrPort() netip.AddrPort {
+ if a == nil {
+ return netip.AddrPort{}
+ }
+ na, _ := netip.AddrFromSlice(a.IP)
+ na = na.WithZone(a.Zone)
+ return netip.AddrPortFrom(na, uint16(a.Port))
+}
+
+// Network returns the address's network name, "udp".
+func (a *UDPAddr) Network() string { return "udp" }
+
+func (a *UDPAddr) String() string {
+ if a == nil {
+ return "<nil>"
+ }
+ ip := ipEmptyString(a.IP)
+ if a.Zone != "" {
+ return JoinHostPort(ip+"%"+a.Zone, itoa.Itoa(a.Port))
+ }
+ return JoinHostPort(ip, itoa.Itoa(a.Port))
+}
+
+func (a *UDPAddr) isWildcard() bool {
+ if a == nil || a.IP == nil {
+ return true
+ }
+ return a.IP.IsUnspecified()
+}
+
+func (a *UDPAddr) opAddr() Addr {
+ if a == nil {
+ return nil
+ }
+ return a
+}
+
+// ResolveUDPAddr returns an address of UDP end point.
+//
+// The network must be a UDP network name.
+//
+// If the host in the address parameter is not a literal IP address or
+// the port is not a literal port number, ResolveUDPAddr resolves the
+// address to an address of UDP end point.
+// Otherwise, it parses the address as a pair of literal IP address
+// and port number.
+// The address parameter can use a host name, but this is not
+// recommended, because it will return at most one of the host name's
+// IP addresses.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func ResolveUDPAddr(network, address string) (*UDPAddr, error) {
+ switch network {
+ case "udp", "udp4", "udp6":
+ case "": // a hint wildcard for Go 1.0 undocumented behavior
+ network = "udp"
+ default:
+ return nil, UnknownNetworkError(network)
+ }
+ addrs, err := DefaultResolver.internetAddrList(context.Background(), network, address)
+ if err != nil {
+ return nil, err
+ }
+ return addrs.forResolve(network, address).(*UDPAddr), nil
+}
+
+// UDPAddrFromAddrPort returns addr as a UDPAddr. If addr.IsValid() is false,
+// then the returned UDPAddr will contain a nil IP field, indicating an
+// address family-agnostic unspecified address.
+func UDPAddrFromAddrPort(addr netip.AddrPort) *UDPAddr {
+ return &UDPAddr{
+ IP: addr.Addr().AsSlice(),
+ Zone: addr.Addr().Zone(),
+ Port: int(addr.Port()),
+ }
+}
+
+// An addrPortUDPAddr is a netip.AddrPort-based UDP address that satisfies the Addr interface.
+type addrPortUDPAddr struct {
+ netip.AddrPort
+}
+
+func (addrPortUDPAddr) Network() string { return "udp" }
+
+// UDPConn is the implementation of the Conn and PacketConn interfaces
+// for UDP network connections.
+type UDPConn struct {
+ conn
+}
+
+// SyscallConn returns a raw network connection.
+// This implements the syscall.Conn interface.
+func (c *UDPConn) SyscallConn() (syscall.RawConn, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ return newRawConn(c.fd)
+}
+
+// ReadFromUDP acts like ReadFrom but returns a UDPAddr.
+func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) {
+ // This function is designed to allow the caller to control the lifetime
+ // of the returned *UDPAddr and thereby prevent an allocation.
+ // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/.
+ // The real work is done by readFromUDP, below.
+ return c.readFromUDP(b, &UDPAddr{})
+}
+
+// readFromUDP implements ReadFromUDP.
+func (c *UDPConn) readFromUDP(b []byte, addr *UDPAddr) (int, *UDPAddr, error) {
+ if !c.ok() {
+ return 0, nil, syscall.EINVAL
+ }
+ n, addr, err := c.readFrom(b, addr)
+ if err != nil {
+ err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return n, addr, err
+}
+
+// ReadFrom implements the PacketConn ReadFrom method.
+func (c *UDPConn) ReadFrom(b []byte) (int, Addr, error) {
+ n, addr, err := c.readFromUDP(b, &UDPAddr{})
+ if addr == nil {
+ // Return Addr(nil), not Addr(*UDPConn(nil)).
+ return n, nil, err
+ }
+ return n, addr, err
+}
+
+// ReadFromUDPAddrPort acts like ReadFrom but returns a netip.AddrPort.
+//
+// If c is bound to an unspecified address, the returned
+// netip.AddrPort's address might be an IPv4-mapped IPv6 address.
+// Use netip.Addr.Unmap to get the address without the IPv6 prefix.
+func (c *UDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) {
+ if !c.ok() {
+ return 0, netip.AddrPort{}, syscall.EINVAL
+ }
+ n, addr, err = c.readFromAddrPort(b)
+ if err != nil {
+ err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return n, addr, err
+}
+
+// ReadMsgUDP reads a message from c, copying the payload into b and
+// the associated out-of-band data into oob. It returns the number of
+// bytes copied into b, the number of bytes copied into oob, the flags
+// that were set on the message and the source address of the message.
+//
+// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
+// used to manipulate IP-level socket options in oob.
+func (c *UDPConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *UDPAddr, err error) {
+ var ap netip.AddrPort
+ n, oobn, flags, ap, err = c.ReadMsgUDPAddrPort(b, oob)
+ if ap.IsValid() {
+ addr = UDPAddrFromAddrPort(ap)
+ }
+ return
+}
+
+// ReadMsgUDPAddrPort is like ReadMsgUDP but returns an netip.AddrPort instead of a UDPAddr.
+func (c *UDPConn) ReadMsgUDPAddrPort(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) {
+ if !c.ok() {
+ return 0, 0, 0, netip.AddrPort{}, syscall.EINVAL
+ }
+ n, oobn, flags, addr, err = c.readMsg(b, oob)
+ if err != nil {
+ err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return
+}
+
+// WriteToUDP acts like WriteTo but takes a UDPAddr.
+func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ n, err := c.writeTo(b, addr)
+ if err != nil {
+ err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err}
+ }
+ return n, err
+}
+
+// WriteToUDPAddrPort acts like WriteTo but takes a netip.AddrPort.
+func (c *UDPConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ n, err := c.writeToAddrPort(b, addr)
+ if err != nil {
+ err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addrPortUDPAddr{addr}, Err: err}
+ }
+ return n, err
+}
+
+// WriteTo implements the PacketConn WriteTo method.
+func (c *UDPConn) WriteTo(b []byte, addr Addr) (int, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ a, ok := addr.(*UDPAddr)
+ if !ok {
+ return 0, &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr, Err: syscall.EINVAL}
+ }
+ n, err := c.writeTo(b, a)
+ if err != nil {
+ err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: a.opAddr(), Err: err}
+ }
+ return n, err
+}
+
+// WriteMsgUDP writes a message to addr via c if c isn't connected, or
+// to c's remote address if c is connected (in which case addr must be
+// nil). The payload is copied from b and the associated out-of-band
+// data is copied from oob. It returns the number of payload and
+// out-of-band bytes written.
+//
+// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
+// used to manipulate IP-level socket options in oob.
+func (c *UDPConn) WriteMsgUDP(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) {
+ if !c.ok() {
+ return 0, 0, syscall.EINVAL
+ }
+ n, oobn, err = c.writeMsg(b, oob, addr)
+ if err != nil {
+ err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addr.opAddr(), Err: err}
+ }
+ return
+}
+
+// WriteMsgUDPAddrPort is like WriteMsgUDP but takes a netip.AddrPort instead of a UDPAddr.
+func (c *UDPConn) WriteMsgUDPAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) {
+ if !c.ok() {
+ return 0, 0, syscall.EINVAL
+ }
+ n, oobn, err = c.writeMsgAddrPort(b, oob, addr)
+ if err != nil {
+ err = &OpError{Op: "write", Net: c.fd.net, Source: c.fd.laddr, Addr: addrPortUDPAddr{addr}, Err: err}
+ }
+ return
+}
+
+func newUDPConn(fd *netFD) *UDPConn { return &UDPConn{conn{fd}} }
+
+// DialUDP acts like Dial for UDP networks.
+//
+// The network must be a UDP network name; see func Dial for details.
+//
+// If laddr is nil, a local address is automatically chosen.
+// If the IP field of raddr is nil or an unspecified IP address, the
+// local system is assumed.
+func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
+ switch network {
+ case "udp", "udp4", "udp6":
+ default:
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
+ }
+ if raddr == nil {
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ }
+ sd := &sysDialer{network: network, address: raddr.String()}
+ c, err := sd.dialUDP(context.Background(), laddr, raddr)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ }
+ return c, nil
+}
+
+// ListenUDP acts like ListenPacket for UDP networks.
+//
+// The network must be a UDP network name; see func Dial for details.
+//
+// If the IP field of laddr is nil or an unspecified IP address,
+// ListenUDP listens on all available IP addresses of the local system
+// except multicast IP addresses.
+// If the Port field of laddr is 0, a port number is automatically
+// chosen.
+func ListenUDP(network string, laddr *UDPAddr) (*UDPConn, error) {
+ switch network {
+ case "udp", "udp4", "udp6":
+ default:
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)}
+ }
+ if laddr == nil {
+ laddr = &UDPAddr{}
+ }
+ sl := &sysListener{network: network, address: laddr.String()}
+ c, err := sl.listenUDP(context.Background(), laddr)
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
+ }
+ return c, nil
+}
+
+// ListenMulticastUDP acts like ListenPacket for UDP networks but
+// takes a group address on a specific network interface.
+//
+// The network must be a UDP network name; see func Dial for details.
+//
+// ListenMulticastUDP listens on all available IP addresses of the
+// local system including the group, multicast IP address.
+// If ifi is nil, ListenMulticastUDP uses the system-assigned
+// multicast interface, although this is not recommended because the
+// assignment depends on platforms and sometimes it might require
+// routing configuration.
+// If the Port field of gaddr is 0, a port number is automatically
+// chosen.
+//
+// ListenMulticastUDP is just for convenience of simple, small
+// applications. There are golang.org/x/net/ipv4 and
+// golang.org/x/net/ipv6 packages for general purpose uses.
+//
+// Note that ListenMulticastUDP will set the IP_MULTICAST_LOOP socket option
+// to 0 under IPPROTO_IP, to disable loopback of multicast packets.
+func ListenMulticastUDP(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) {
+ switch network {
+ case "udp", "udp4", "udp6":
+ default:
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: UnknownNetworkError(network)}
+ }
+ if gaddr == nil || gaddr.IP == nil {
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: errMissingAddress}
+ }
+ sl := &sysListener{network: network, address: gaddr.String()}
+ c, err := sl.listenMulticastUDP(context.Background(), ifi, gaddr)
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: gaddr.opAddr(), Err: err}
+ }
+ return c, nil
+}
diff --git a/contrib/go/_std_1.19/src/net/udpsock_posix.go b/contrib/go/_std_1.19/src/net/udpsock_posix.go
new file mode 100644
index 0000000000..5b021d24ae
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/udpsock_posix.go
@@ -0,0 +1,269 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "context"
+ "net/netip"
+ "syscall"
+)
+
+func sockaddrToUDP(sa syscall.Sockaddr) Addr {
+ switch sa := sa.(type) {
+ case *syscall.SockaddrInet4:
+ return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port}
+ case *syscall.SockaddrInet6:
+ return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
+ }
+ return nil
+}
+
+func (a *UDPAddr) family() int {
+ if a == nil || len(a.IP) <= IPv4len {
+ return syscall.AF_INET
+ }
+ if a.IP.To4() != nil {
+ return syscall.AF_INET
+ }
+ return syscall.AF_INET6
+}
+
+func (a *UDPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
+ if a == nil {
+ return nil, nil
+ }
+ return ipToSockaddr(family, a.IP, a.Port, a.Zone)
+}
+
+func (a *UDPAddr) toLocal(net string) sockaddr {
+ return &UDPAddr{loopbackIP(net), a.Port, a.Zone}
+}
+
+func (c *UDPConn) readFrom(b []byte, addr *UDPAddr) (int, *UDPAddr, error) {
+ var n int
+ var err error
+ switch c.fd.family {
+ case syscall.AF_INET:
+ var from syscall.SockaddrInet4
+ n, err = c.fd.readFromInet4(b, &from)
+ if err == nil {
+ ip := from.Addr // copy from.Addr; ip escapes, so this line allocates 4 bytes
+ *addr = UDPAddr{IP: ip[:], Port: from.Port}
+ }
+ case syscall.AF_INET6:
+ var from syscall.SockaddrInet6
+ n, err = c.fd.readFromInet6(b, &from)
+ if err == nil {
+ ip := from.Addr // copy from.Addr; ip escapes, so this line allocates 16 bytes
+ *addr = UDPAddr{IP: ip[:], Port: from.Port, Zone: zoneCache.name(int(from.ZoneId))}
+ }
+ }
+ if err != nil {
+ // No sockaddr, so don't return UDPAddr.
+ addr = nil
+ }
+ return n, addr, err
+}
+
+func (c *UDPConn) readFromAddrPort(b []byte) (n int, addr netip.AddrPort, err error) {
+ var ip netip.Addr
+ var port int
+ switch c.fd.family {
+ case syscall.AF_INET:
+ var from syscall.SockaddrInet4
+ n, err = c.fd.readFromInet4(b, &from)
+ if err == nil {
+ ip = netip.AddrFrom4(from.Addr)
+ port = from.Port
+ }
+ case syscall.AF_INET6:
+ var from syscall.SockaddrInet6
+ n, err = c.fd.readFromInet6(b, &from)
+ if err == nil {
+ ip = netip.AddrFrom16(from.Addr).WithZone(zoneCache.name(int(from.ZoneId)))
+ port = from.Port
+ }
+ }
+ if err == nil {
+ addr = netip.AddrPortFrom(ip, uint16(port))
+ }
+ return n, addr, err
+}
+
+func (c *UDPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr netip.AddrPort, err error) {
+ switch c.fd.family {
+ case syscall.AF_INET:
+ var sa syscall.SockaddrInet4
+ n, oobn, flags, err = c.fd.readMsgInet4(b, oob, 0, &sa)
+ ip := netip.AddrFrom4(sa.Addr)
+ addr = netip.AddrPortFrom(ip, uint16(sa.Port))
+ case syscall.AF_INET6:
+ var sa syscall.SockaddrInet6
+ n, oobn, flags, err = c.fd.readMsgInet6(b, oob, 0, &sa)
+ ip := netip.AddrFrom16(sa.Addr).WithZone(zoneCache.name(int(sa.ZoneId)))
+ addr = netip.AddrPortFrom(ip, uint16(sa.Port))
+ }
+ return
+}
+
+func (c *UDPConn) writeTo(b []byte, addr *UDPAddr) (int, error) {
+ if c.fd.isConnected {
+ return 0, ErrWriteToConnected
+ }
+ if addr == nil {
+ return 0, errMissingAddress
+ }
+
+ switch c.fd.family {
+ case syscall.AF_INET:
+ sa, err := ipToSockaddrInet4(addr.IP, addr.Port)
+ if err != nil {
+ return 0, err
+ }
+ return c.fd.writeToInet4(b, &sa)
+ case syscall.AF_INET6:
+ sa, err := ipToSockaddrInet6(addr.IP, addr.Port, addr.Zone)
+ if err != nil {
+ return 0, err
+ }
+ return c.fd.writeToInet6(b, &sa)
+ default:
+ return 0, &AddrError{Err: "invalid address family", Addr: addr.IP.String()}
+ }
+}
+
+func (c *UDPConn) writeToAddrPort(b []byte, addr netip.AddrPort) (int, error) {
+ if c.fd.isConnected {
+ return 0, ErrWriteToConnected
+ }
+ if !addr.IsValid() {
+ return 0, errMissingAddress
+ }
+
+ switch c.fd.family {
+ case syscall.AF_INET:
+ sa, err := addrPortToSockaddrInet4(addr)
+ if err != nil {
+ return 0, err
+ }
+ return c.fd.writeToInet4(b, &sa)
+ case syscall.AF_INET6:
+ sa, err := addrPortToSockaddrInet6(addr)
+ if err != nil {
+ return 0, err
+ }
+ return c.fd.writeToInet6(b, &sa)
+ default:
+ return 0, &AddrError{Err: "invalid address family", Addr: addr.Addr().String()}
+ }
+}
+
+func (c *UDPConn) writeMsg(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) {
+ if c.fd.isConnected && addr != nil {
+ return 0, 0, ErrWriteToConnected
+ }
+ if !c.fd.isConnected && addr == nil {
+ return 0, 0, errMissingAddress
+ }
+ sa, err := addr.sockaddr(c.fd.family)
+ if err != nil {
+ return 0, 0, err
+ }
+ return c.fd.writeMsg(b, oob, sa)
+}
+
+func (c *UDPConn) writeMsgAddrPort(b, oob []byte, addr netip.AddrPort) (n, oobn int, err error) {
+ if c.fd.isConnected && addr.IsValid() {
+ return 0, 0, ErrWriteToConnected
+ }
+ if !c.fd.isConnected && !addr.IsValid() {
+ return 0, 0, errMissingAddress
+ }
+
+ switch c.fd.family {
+ case syscall.AF_INET:
+ sa, err := addrPortToSockaddrInet4(addr)
+ if err != nil {
+ return 0, 0, err
+ }
+ return c.fd.writeMsgInet4(b, oob, &sa)
+ case syscall.AF_INET6:
+ sa, err := addrPortToSockaddrInet6(addr)
+ if err != nil {
+ return 0, 0, err
+ }
+ return c.fd.writeMsgInet6(b, oob, &sa)
+ default:
+ return 0, 0, &AddrError{Err: "invalid address family", Addr: addr.Addr().String()}
+ }
+}
+
+func (sd *sysDialer) dialUDP(ctx context.Context, laddr, raddr *UDPAddr) (*UDPConn, error) {
+ fd, err := internetSocket(ctx, sd.network, laddr, raddr, syscall.SOCK_DGRAM, 0, "dial", sd.Dialer.Control)
+ if err != nil {
+ return nil, err
+ }
+ return newUDPConn(fd), nil
+}
+
+func (sl *sysListener) listenUDP(ctx context.Context, laddr *UDPAddr) (*UDPConn, error) {
+ fd, err := internetSocket(ctx, sl.network, laddr, nil, syscall.SOCK_DGRAM, 0, "listen", sl.ListenConfig.Control)
+ if err != nil {
+ return nil, err
+ }
+ return newUDPConn(fd), nil
+}
+
+func (sl *sysListener) listenMulticastUDP(ctx context.Context, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) {
+ fd, err := internetSocket(ctx, sl.network, gaddr, nil, syscall.SOCK_DGRAM, 0, "listen", sl.ListenConfig.Control)
+ if err != nil {
+ return nil, err
+ }
+ c := newUDPConn(fd)
+ if ip4 := gaddr.IP.To4(); ip4 != nil {
+ if err := listenIPv4MulticastUDP(c, ifi, ip4); err != nil {
+ c.Close()
+ return nil, err
+ }
+ } else {
+ if err := listenIPv6MulticastUDP(c, ifi, gaddr.IP); err != nil {
+ c.Close()
+ return nil, err
+ }
+ }
+ return c, nil
+}
+
+func listenIPv4MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error {
+ if ifi != nil {
+ if err := setIPv4MulticastInterface(c.fd, ifi); err != nil {
+ return err
+ }
+ }
+ if err := setIPv4MulticastLoopback(c.fd, false); err != nil {
+ return err
+ }
+ if err := joinIPv4Group(c.fd, ifi, ip); err != nil {
+ return err
+ }
+ return nil
+}
+
+func listenIPv6MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error {
+ if ifi != nil {
+ if err := setIPv6MulticastInterface(c.fd, ifi); err != nil {
+ return err
+ }
+ }
+ if err := setIPv6MulticastLoopback(c.fd, false); err != nil {
+ return err
+ }
+ if err := joinIPv6Group(c.fd, ifi, ip); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/contrib/go/_std_1.18/src/net/unixsock.go b/contrib/go/_std_1.19/src/net/unixsock.go
index b38438c11a..b38438c11a 100644
--- a/contrib/go/_std_1.18/src/net/unixsock.go
+++ b/contrib/go/_std_1.19/src/net/unixsock.go
diff --git a/contrib/go/_std_1.19/src/net/unixsock_posix.go b/contrib/go/_std_1.19/src/net/unixsock_posix.go
new file mode 100644
index 0000000000..b244dbdbbd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/unixsock_posix.go
@@ -0,0 +1,227 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package net
+
+import (
+ "context"
+ "errors"
+ "os"
+ "syscall"
+)
+
+func unixSocket(ctx context.Context, net string, laddr, raddr sockaddr, mode string, ctrlFn func(string, string, syscall.RawConn) error) (*netFD, error) {
+ var sotype int
+ switch net {
+ case "unix":
+ sotype = syscall.SOCK_STREAM
+ case "unixgram":
+ sotype = syscall.SOCK_DGRAM
+ case "unixpacket":
+ sotype = syscall.SOCK_SEQPACKET
+ default:
+ return nil, UnknownNetworkError(net)
+ }
+
+ switch mode {
+ case "dial":
+ if laddr != nil && laddr.isWildcard() {
+ laddr = nil
+ }
+ if raddr != nil && raddr.isWildcard() {
+ raddr = nil
+ }
+ if raddr == nil && (sotype != syscall.SOCK_DGRAM || laddr == nil) {
+ return nil, errMissingAddress
+ }
+ case "listen":
+ default:
+ return nil, errors.New("unknown mode: " + mode)
+ }
+
+ fd, err := socket(ctx, net, syscall.AF_UNIX, sotype, 0, false, laddr, raddr, ctrlFn)
+ if err != nil {
+ return nil, err
+ }
+ return fd, nil
+}
+
+func sockaddrToUnix(sa syscall.Sockaddr) Addr {
+ if s, ok := sa.(*syscall.SockaddrUnix); ok {
+ return &UnixAddr{Name: s.Name, Net: "unix"}
+ }
+ return nil
+}
+
+func sockaddrToUnixgram(sa syscall.Sockaddr) Addr {
+ if s, ok := sa.(*syscall.SockaddrUnix); ok {
+ return &UnixAddr{Name: s.Name, Net: "unixgram"}
+ }
+ return nil
+}
+
+func sockaddrToUnixpacket(sa syscall.Sockaddr) Addr {
+ if s, ok := sa.(*syscall.SockaddrUnix); ok {
+ return &UnixAddr{Name: s.Name, Net: "unixpacket"}
+ }
+ return nil
+}
+
+func sotypeToNet(sotype int) string {
+ switch sotype {
+ case syscall.SOCK_STREAM:
+ return "unix"
+ case syscall.SOCK_DGRAM:
+ return "unixgram"
+ case syscall.SOCK_SEQPACKET:
+ return "unixpacket"
+ default:
+ panic("sotypeToNet unknown socket type")
+ }
+}
+
+func (a *UnixAddr) family() int {
+ return syscall.AF_UNIX
+}
+
+func (a *UnixAddr) sockaddr(family int) (syscall.Sockaddr, error) {
+ if a == nil {
+ return nil, nil
+ }
+ return &syscall.SockaddrUnix{Name: a.Name}, nil
+}
+
+func (a *UnixAddr) toLocal(net string) sockaddr {
+ return a
+}
+
+func (c *UnixConn) readFrom(b []byte) (int, *UnixAddr, error) {
+ var addr *UnixAddr
+ n, sa, err := c.fd.readFrom(b)
+ switch sa := sa.(type) {
+ case *syscall.SockaddrUnix:
+ if sa.Name != "" {
+ addr = &UnixAddr{Name: sa.Name, Net: sotypeToNet(c.fd.sotype)}
+ }
+ }
+ return n, addr, err
+}
+
+func (c *UnixConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *UnixAddr, err error) {
+ var sa syscall.Sockaddr
+ n, oobn, flags, sa, err = c.fd.readMsg(b, oob, readMsgFlags)
+ if readMsgFlags == 0 && err == nil && oobn > 0 {
+ setReadMsgCloseOnExec(oob[:oobn])
+ }
+
+ switch sa := sa.(type) {
+ case *syscall.SockaddrUnix:
+ if sa.Name != "" {
+ addr = &UnixAddr{Name: sa.Name, Net: sotypeToNet(c.fd.sotype)}
+ }
+ }
+ return
+}
+
+func (c *UnixConn) writeTo(b []byte, addr *UnixAddr) (int, error) {
+ if c.fd.isConnected {
+ return 0, ErrWriteToConnected
+ }
+ if addr == nil {
+ return 0, errMissingAddress
+ }
+ if addr.Net != sotypeToNet(c.fd.sotype) {
+ return 0, syscall.EAFNOSUPPORT
+ }
+ sa := &syscall.SockaddrUnix{Name: addr.Name}
+ return c.fd.writeTo(b, sa)
+}
+
+func (c *UnixConn) writeMsg(b, oob []byte, addr *UnixAddr) (n, oobn int, err error) {
+ if c.fd.sotype == syscall.SOCK_DGRAM && c.fd.isConnected {
+ return 0, 0, ErrWriteToConnected
+ }
+ var sa syscall.Sockaddr
+ if addr != nil {
+ if addr.Net != sotypeToNet(c.fd.sotype) {
+ return 0, 0, syscall.EAFNOSUPPORT
+ }
+ sa = &syscall.SockaddrUnix{Name: addr.Name}
+ }
+ return c.fd.writeMsg(b, oob, sa)
+}
+
+func (sd *sysDialer) dialUnix(ctx context.Context, laddr, raddr *UnixAddr) (*UnixConn, error) {
+ fd, err := unixSocket(ctx, sd.network, laddr, raddr, "dial", sd.Dialer.Control)
+ if err != nil {
+ return nil, err
+ }
+ return newUnixConn(fd), nil
+}
+
+func (ln *UnixListener) accept() (*UnixConn, error) {
+ fd, err := ln.fd.accept()
+ if err != nil {
+ return nil, err
+ }
+ return newUnixConn(fd), nil
+}
+
+func (ln *UnixListener) close() error {
+ // The operating system doesn't clean up
+ // the file that announcing created, so
+ // we have to clean it up ourselves.
+ // There's a race here--we can't know for
+ // sure whether someone else has come along
+ // and replaced our socket name already--
+ // but this sequence (remove then close)
+ // is at least compatible with the auto-remove
+ // sequence in ListenUnix. It's only non-Go
+ // programs that can mess us up.
+ // Even if there are racy calls to Close, we want to unlink only for the first one.
+ ln.unlinkOnce.Do(func() {
+ if ln.path[0] != '@' && ln.unlink {
+ syscall.Unlink(ln.path)
+ }
+ })
+ return ln.fd.Close()
+}
+
+func (ln *UnixListener) file() (*os.File, error) {
+ f, err := ln.fd.dup()
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// SetUnlinkOnClose sets whether the underlying socket file should be removed
+// from the file system when the listener is closed.
+//
+// The default behavior is to unlink the socket file only when package net created it.
+// That is, when the listener and the underlying socket file were created by a call to
+// Listen or ListenUnix, then by default closing the listener will remove the socket file.
+// but if the listener was created by a call to FileListener to use an already existing
+// socket file, then by default closing the listener will not remove the socket file.
+func (l *UnixListener) SetUnlinkOnClose(unlink bool) {
+ l.unlink = unlink
+}
+
+func (sl *sysListener) listenUnix(ctx context.Context, laddr *UnixAddr) (*UnixListener, error) {
+ fd, err := unixSocket(ctx, sl.network, laddr, nil, "listen", sl.ListenConfig.Control)
+ if err != nil {
+ return nil, err
+ }
+ return &UnixListener{fd: fd, path: fd.laddr.String(), unlink: true}, nil
+}
+
+func (sl *sysListener) listenUnixgram(ctx context.Context, laddr *UnixAddr) (*UnixConn, error) {
+ fd, err := unixSocket(ctx, sl.network, laddr, nil, "listen", sl.ListenConfig.Control)
+ if err != nil {
+ return nil, err
+ }
+ return newUnixConn(fd), nil
+}
diff --git a/contrib/go/_std_1.18/src/net/unixsock_readmsg_cloexec.go b/contrib/go/_std_1.19/src/net/unixsock_readmsg_cloexec.go
index fa4fd7d933..fa4fd7d933 100644
--- a/contrib/go/_std_1.18/src/net/unixsock_readmsg_cloexec.go
+++ b/contrib/go/_std_1.19/src/net/unixsock_readmsg_cloexec.go
diff --git a/contrib/go/_std_1.18/src/net/unixsock_readmsg_cmsg_cloexec.go b/contrib/go/_std_1.19/src/net/unixsock_readmsg_cmsg_cloexec.go
index 6b0de875ad..6b0de875ad 100644
--- a/contrib/go/_std_1.18/src/net/unixsock_readmsg_cmsg_cloexec.go
+++ b/contrib/go/_std_1.19/src/net/unixsock_readmsg_cmsg_cloexec.go
diff --git a/contrib/go/_std_1.19/src/net/url/url.go b/contrib/go/_std_1.19/src/net/url/url.go
new file mode 100644
index 0000000000..d7d2d54a0d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/net/url/url.go
@@ -0,0 +1,1265 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package url parses URLs and implements query escaping.
+package url
+
+// See RFC 3986. This package generally follows RFC 3986, except where
+// it deviates for compatibility reasons. When sending changes, first
+// search old issues for history on decisions. Unit tests should also
+// contain references to issue numbers with details.
+
+import (
+ "errors"
+ "fmt"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Error reports an error and the operation and URL that caused it.
+type Error struct {
+ Op string
+ URL string
+ Err error
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+func (e *Error) Error() string { return fmt.Sprintf("%s %q: %s", e.Op, e.URL, e.Err) }
+
+func (e *Error) Timeout() bool {
+ t, ok := e.Err.(interface {
+ Timeout() bool
+ })
+ return ok && t.Timeout()
+}
+
+func (e *Error) Temporary() bool {
+ t, ok := e.Err.(interface {
+ Temporary() bool
+ })
+ return ok && t.Temporary()
+}
+
+const upperhex = "0123456789ABCDEF"
+
+func ishex(c byte) bool {
+ switch {
+ case '0' <= c && c <= '9':
+ return true
+ case 'a' <= c && c <= 'f':
+ return true
+ case 'A' <= c && c <= 'F':
+ return true
+ }
+ return false
+}
+
+func unhex(c byte) byte {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0'
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+ }
+ return 0
+}
+
+type encoding int
+
+const (
+ encodePath encoding = 1 + iota
+ encodePathSegment
+ encodeHost
+ encodeZone
+ encodeUserPassword
+ encodeQueryComponent
+ encodeFragment
+)
+
+type EscapeError string
+
+func (e EscapeError) Error() string {
+ return "invalid URL escape " + strconv.Quote(string(e))
+}
+
+type InvalidHostError string
+
+func (e InvalidHostError) Error() string {
+ return "invalid character " + strconv.Quote(string(e)) + " in host name"
+}
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 3986.
+//
+// Please be informed that for now shouldEscape does not check all
+// reserved characters correctly. See golang.org/issue/5684.
+func shouldEscape(c byte, mode encoding) bool {
+ // §2.3 Unreserved characters (alphanum)
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ return false
+ }
+
+ if mode == encodeHost || mode == encodeZone {
+ // §3.2.2 Host allows
+ // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
+ // as part of reg-name.
+ // We add : because we include :port as part of host.
+ // We add [ ] because we include [ipv6]:port as part of host.
+ // We add < > because they're the only characters left that
+ // we could possibly allow, and Parse will reject them if we
+ // escape them (because hosts can't use %-encoding for
+ // ASCII bytes).
+ switch c {
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"':
+ return false
+ }
+ }
+
+ switch c {
+ case '-', '_', '.', '~': // §2.3 Unreserved characters (mark)
+ return false
+
+ case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved)
+ // Different sections of the URL allow a few of
+ // the reserved characters to appear unescaped.
+ switch mode {
+ case encodePath: // §3.3
+ // The RFC allows : @ & = + $ but saves / ; , for assigning
+ // meaning to individual path segments. This package
+ // only manipulates the path as a whole, so we allow those
+ // last three as well. That leaves only ? to escape.
+ return c == '?'
+
+ case encodePathSegment: // §3.3
+ // The RFC allows : @ & = + $ but saves / ; , for assigning
+ // meaning to individual path segments.
+ return c == '/' || c == ';' || c == ',' || c == '?'
+
+ case encodeUserPassword: // §3.2.1
+ // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in
+ // userinfo, so we must escape only '@', '/', and '?'.
+ // The parsing of userinfo treats ':' as special so we must escape
+ // that too.
+ return c == '@' || c == '/' || c == '?' || c == ':'
+
+ case encodeQueryComponent: // §3.4
+ // The RFC reserves (so we must escape) everything.
+ return true
+
+ case encodeFragment: // §4.1
+ // The RFC text is silent but the grammar allows
+ // everything, so escape nothing.
+ return false
+ }
+ }
+
+ if mode == encodeFragment {
+ // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are
+ // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not
+ // need to be escaped. To minimize potential breakage, we apply two restrictions:
+ // (1) we always escape sub-delims outside of the fragment, and (2) we always
+ // escape single quote to avoid breaking callers that had previously assumed that
+ // single quotes would be escaped. See issue #19917.
+ switch c {
+ case '!', '(', ')', '*':
+ return false
+ }
+ }
+
+ // Everything else must be escaped.
+ return true
+}
+
+// QueryUnescape does the inverse transformation of QueryEscape,
+// converting each 3-byte encoded substring of the form "%AB" into the
+// hex-decoded byte 0xAB.
+// It returns an error if any % is not followed by two hexadecimal
+// digits.
+func QueryUnescape(s string) (string, error) {
+ return unescape(s, encodeQueryComponent)
+}
+
+// PathUnescape does the inverse transformation of PathEscape,
+// converting each 3-byte encoded substring of the form "%AB" into the
+// hex-decoded byte 0xAB. It returns an error if any % is not followed
+// by two hexadecimal digits.
+//
+// PathUnescape is identical to QueryUnescape except that it does not
+// unescape '+' to ' ' (space).
+func PathUnescape(s string) (string, error) {
+ return unescape(s, encodePathSegment)
+}
+
+// unescape unescapes a string; the mode specifies
+// which section of the URL string is being unescaped.
+func unescape(s string, mode encoding) (string, error) {
+ // Count %, check that they're well-formed.
+ n := 0
+ hasPlus := false
+ for i := 0; i < len(s); {
+ switch s[i] {
+ case '%':
+ n++
+ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+ s = s[i:]
+ if len(s) > 3 {
+ s = s[:3]
+ }
+ return "", EscapeError(s)
+ }
+ // Per https://tools.ietf.org/html/rfc3986#page-21
+ // in the host component %-encoding can only be used
+ // for non-ASCII bytes.
+ // But https://tools.ietf.org/html/rfc6874#section-2
+ // introduces %25 being allowed to escape a percent sign
+ // in IPv6 scoped-address literals. Yay.
+ if mode == encodeHost && unhex(s[i+1]) < 8 && s[i:i+3] != "%25" {
+ return "", EscapeError(s[i : i+3])
+ }
+ if mode == encodeZone {
+ // RFC 6874 says basically "anything goes" for zone identifiers
+ // and that even non-ASCII can be redundantly escaped,
+ // but it seems prudent to restrict %-escaped bytes here to those
+ // that are valid host name bytes in their unescaped form.
+ // That is, you can use escaping in the zone identifier but not
+ // to introduce bytes you couldn't just write directly.
+ // But Windows puts spaces here! Yay.
+ v := unhex(s[i+1])<<4 | unhex(s[i+2])
+ if s[i:i+3] != "%25" && v != ' ' && shouldEscape(v, encodeHost) {
+ return "", EscapeError(s[i : i+3])
+ }
+ }
+ i += 3
+ case '+':
+ hasPlus = mode == encodeQueryComponent
+ i++
+ default:
+ if (mode == encodeHost || mode == encodeZone) && s[i] < 0x80 && shouldEscape(s[i], mode) {
+ return "", InvalidHostError(s[i : i+1])
+ }
+ i++
+ }
+ }
+
+ if n == 0 && !hasPlus {
+ return s, nil
+ }
+
+ var t strings.Builder
+ t.Grow(len(s) - 2*n)
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '%':
+ t.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
+ i += 2
+ case '+':
+ if mode == encodeQueryComponent {
+ t.WriteByte(' ')
+ } else {
+ t.WriteByte('+')
+ }
+ default:
+ t.WriteByte(s[i])
+ }
+ }
+ return t.String(), nil
+}
+
+// QueryEscape escapes the string so it can be safely placed
+// inside a URL query.
+func QueryEscape(s string) string {
+ return escape(s, encodeQueryComponent)
+}
+
+// PathEscape escapes the string so it can be safely placed inside a URL path segment,
+// replacing special characters (including /) with %XX sequences as needed.
+func PathEscape(s string) string {
+ return escape(s, encodePathSegment)
+}
+
+func escape(s string, mode encoding) string {
+ spaceCount, hexCount := 0, 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c, mode) {
+ if c == ' ' && mode == encodeQueryComponent {
+ spaceCount++
+ } else {
+ hexCount++
+ }
+ }
+ }
+
+ if spaceCount == 0 && hexCount == 0 {
+ return s
+ }
+
+ var buf [64]byte
+ var t []byte
+
+ required := len(s) + 2*hexCount
+ if required <= len(buf) {
+ t = buf[:required]
+ } else {
+ t = make([]byte, required)
+ }
+
+ if hexCount == 0 {
+ copy(t, s)
+ for i := 0; i < len(s); i++ {
+ if s[i] == ' ' {
+ t[i] = '+'
+ }
+ }
+ return string(t)
+ }
+
+ j := 0
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; {
+ case c == ' ' && mode == encodeQueryComponent:
+ t[j] = '+'
+ j++
+ case shouldEscape(c, mode):
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ default:
+ t[j] = s[i]
+ j++
+ }
+ }
+ return string(t)
+}
+
+// A URL represents a parsed URL (technically, a URI reference).
+//
+// The general form represented is:
+//
+// [scheme:][//[userinfo@]host][/]path[?query][#fragment]
+//
+// URLs that do not start with a slash after the scheme are interpreted as:
+//
+// scheme:opaque[?query][#fragment]
+//
+// Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/.
+// A consequence is that it is impossible to tell which slashes in the Path were
+// slashes in the raw URL and which were %2f. This distinction is rarely important,
+// but when it is, the code should use RawPath, an optional field which only gets
+// set if the default encoding is different from Path.
+//
+// URL's String method uses the EscapedPath method to obtain the path. See the
+// EscapedPath method for more details.
+type URL struct {
+ Scheme string
+ Opaque string // encoded opaque data
+ User *Userinfo // username and password information
+ Host string // host or host:port
+ Path string // path (relative paths may omit leading slash)
+ RawPath string // encoded path hint (see EscapedPath method)
+ OmitHost bool // do not emit empty host (authority)
+ ForceQuery bool // append a query ('?') even if RawQuery is empty
+ RawQuery string // encoded query values, without '?'
+ Fragment string // fragment for references, without '#'
+ RawFragment string // encoded fragment hint (see EscapedFragment method)
+}
+
+// User returns a Userinfo containing the provided username
+// and no password set.
+func User(username string) *Userinfo {
+ return &Userinfo{username, "", false}
+}
+
+// UserPassword returns a Userinfo containing the provided username
+// and password.
+//
+// This functionality should only be used with legacy web sites.
+// RFC 2396 warns that interpreting Userinfo this way
+// “is NOT RECOMMENDED, because the passing of authentication
+// information in clear text (such as URI) has proven to be a
+// security risk in almost every case where it has been used.”
+func UserPassword(username, password string) *Userinfo {
+ return &Userinfo{username, password, true}
+}
+
+// The Userinfo type is an immutable encapsulation of username and
+// password details for a URL. An existing Userinfo value is guaranteed
+// to have a username set (potentially empty, as allowed by RFC 2396),
+// and optionally a password.
+type Userinfo struct {
+ username string
+ password string
+ passwordSet bool
+}
+
+// Username returns the username.
+func (u *Userinfo) Username() string {
+ if u == nil {
+ return ""
+ }
+ return u.username
+}
+
+// Password returns the password in case it is set, and whether it is set.
+func (u *Userinfo) Password() (string, bool) {
+ if u == nil {
+ return "", false
+ }
+ return u.password, u.passwordSet
+}
+
+// String returns the encoded userinfo information in the standard form
+// of "username[:password]".
+func (u *Userinfo) String() string {
+ if u == nil {
+ return ""
+ }
+ s := escape(u.username, encodeUserPassword)
+ if u.passwordSet {
+ s += ":" + escape(u.password, encodeUserPassword)
+ }
+ return s
+}
+
+// Maybe rawURL is of the form scheme:path.
+// (Scheme must be [a-zA-Z][a-zA-Z0-9+.-]*)
+// If so, return scheme, path; else return "", rawURL.
+func getScheme(rawURL string) (scheme, path string, err error) {
+ for i := 0; i < len(rawURL); i++ {
+ c := rawURL[i]
+ switch {
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ // do nothing
+ case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
+ if i == 0 {
+ return "", rawURL, nil
+ }
+ case c == ':':
+ if i == 0 {
+ return "", "", errors.New("missing protocol scheme")
+ }
+ return rawURL[:i], rawURL[i+1:], nil
+ default:
+ // we have encountered an invalid character,
+ // so there is no valid scheme
+ return "", rawURL, nil
+ }
+ }
+ return "", rawURL, nil
+}
+
+// Parse parses a raw url into a URL structure.
+//
+// The url may be relative (a path, without a host) or absolute
+// (starting with a scheme). Trying to parse a hostname and path
+// without a scheme is invalid but may not necessarily return an
+// error, due to parsing ambiguities.
+func Parse(rawURL string) (*URL, error) {
+ // Cut off #frag
+ u, frag, _ := strings.Cut(rawURL, "#")
+ url, err := parse(u, false)
+ if err != nil {
+ return nil, &Error{"parse", u, err}
+ }
+ if frag == "" {
+ return url, nil
+ }
+ if err = url.setFragment(frag); err != nil {
+ return nil, &Error{"parse", rawURL, err}
+ }
+ return url, nil
+}
+
+// ParseRequestURI parses a raw url into a URL structure. It assumes that
+// url was received in an HTTP request, so the url is interpreted
+// only as an absolute URI or an absolute path.
+// The string url is assumed not to have a #fragment suffix.
+// (Web browsers strip #fragment before sending the URL to a web server.)
+func ParseRequestURI(rawURL string) (*URL, error) {
+ url, err := parse(rawURL, true)
+ if err != nil {
+ return nil, &Error{"parse", rawURL, err}
+ }
+ return url, nil
+}
+
+// parse parses a URL from a string in one of two contexts. If
+// viaRequest is true, the URL is assumed to have arrived via an HTTP request,
+// in which case only absolute URLs or path-absolute relative URLs are allowed.
+// If viaRequest is false, all forms of relative URLs are allowed.
+func parse(rawURL string, viaRequest bool) (*URL, error) {
+ var rest string
+ var err error
+
+ if stringContainsCTLByte(rawURL) {
+ return nil, errors.New("net/url: invalid control character in URL")
+ }
+
+ if rawURL == "" && viaRequest {
+ return nil, errors.New("empty url")
+ }
+ url := new(URL)
+
+ if rawURL == "*" {
+ url.Path = "*"
+ return url, nil
+ }
+
+ // Split off possible leading "http:", "mailto:", etc.
+ // Cannot contain escaped characters.
+ if url.Scheme, rest, err = getScheme(rawURL); err != nil {
+ return nil, err
+ }
+ url.Scheme = strings.ToLower(url.Scheme)
+
+ if strings.HasSuffix(rest, "?") && strings.Count(rest, "?") == 1 {
+ url.ForceQuery = true
+ rest = rest[:len(rest)-1]
+ } else {
+ rest, url.RawQuery, _ = strings.Cut(rest, "?")
+ }
+
+ if !strings.HasPrefix(rest, "/") {
+ if url.Scheme != "" {
+ // We consider rootless paths per RFC 3986 as opaque.
+ url.Opaque = rest
+ return url, nil
+ }
+ if viaRequest {
+ return nil, errors.New("invalid URI for request")
+ }
+
+ // Avoid confusion with malformed schemes, like cache_object:foo/bar.
+ // See golang.org/issue/16822.
+ //
+ // RFC 3986, §3.3:
+ // In addition, a URI reference (Section 4.1) may be a relative-path reference,
+ // in which case the first path segment cannot contain a colon (":") character.
+ if segment, _, _ := strings.Cut(rest, "/"); strings.Contains(segment, ":") {
+ // First path segment has colon. Not allowed in relative URL.
+ return nil, errors.New("first path segment in URL cannot contain colon")
+ }
+ }
+
+ if (url.Scheme != "" || !viaRequest && !strings.HasPrefix(rest, "///")) && strings.HasPrefix(rest, "//") {
+ var authority string
+ authority, rest = rest[2:], ""
+ if i := strings.Index(authority, "/"); i >= 0 {
+ authority, rest = authority[:i], authority[i:]
+ }
+ url.User, url.Host, err = parseAuthority(authority)
+ if err != nil {
+ return nil, err
+ }
+ } else if url.Scheme != "" && strings.HasPrefix(rest, "/") {
+ // OmitHost is set to true when rawURL has an empty host (authority).
+ // See golang.org/issue/46059.
+ url.OmitHost = true
+ }
+
+ // Set Path and, optionally, RawPath.
+ // RawPath is a hint of the encoding of Path. We don't want to set it if
+ // the default escaping of Path is equivalent, to help make sure that people
+ // don't rely on it in general.
+ if err := url.setPath(rest); err != nil {
+ return nil, err
+ }
+ return url, nil
+}
+
+func parseAuthority(authority string) (user *Userinfo, host string, err error) {
+ i := strings.LastIndex(authority, "@")
+ if i < 0 {
+ host, err = parseHost(authority)
+ } else {
+ host, err = parseHost(authority[i+1:])
+ }
+ if err != nil {
+ return nil, "", err
+ }
+ if i < 0 {
+ return nil, host, nil
+ }
+ userinfo := authority[:i]
+ if !validUserinfo(userinfo) {
+ return nil, "", errors.New("net/url: invalid userinfo")
+ }
+ if !strings.Contains(userinfo, ":") {
+ if userinfo, err = unescape(userinfo, encodeUserPassword); err != nil {
+ return nil, "", err
+ }
+ user = User(userinfo)
+ } else {
+ username, password, _ := strings.Cut(userinfo, ":")
+ if username, err = unescape(username, encodeUserPassword); err != nil {
+ return nil, "", err
+ }
+ if password, err = unescape(password, encodeUserPassword); err != nil {
+ return nil, "", err
+ }
+ user = UserPassword(username, password)
+ }
+ return user, host, nil
+}
+
+// parseHost parses host as an authority without user
+// information. That is, as host[:port].
+func parseHost(host string) (string, error) {
+ if strings.HasPrefix(host, "[") {
+ // Parse an IP-Literal in RFC 3986 and RFC 6874.
+ // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80".
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return "", errors.New("missing ']' in host")
+ }
+ colonPort := host[i+1:]
+ if !validOptionalPort(colonPort) {
+ return "", fmt.Errorf("invalid port %q after host", colonPort)
+ }
+
+ // RFC 6874 defines that %25 (%-encoded percent) introduces
+ // the zone identifier, and the zone identifier can use basically
+ // any %-encoding it likes. That's different from the host, which
+ // can only %-encode non-ASCII bytes.
+ // We do impose some restrictions on the zone, to avoid stupidity
+ // like newlines.
+ zone := strings.Index(host[:i], "%25")
+ if zone >= 0 {
+ host1, err := unescape(host[:zone], encodeHost)
+ if err != nil {
+ return "", err
+ }
+ host2, err := unescape(host[zone:i], encodeZone)
+ if err != nil {
+ return "", err
+ }
+ host3, err := unescape(host[i:], encodeHost)
+ if err != nil {
+ return "", err
+ }
+ return host1 + host2 + host3, nil
+ }
+ } else if i := strings.LastIndex(host, ":"); i != -1 {
+ colonPort := host[i:]
+ if !validOptionalPort(colonPort) {
+ return "", fmt.Errorf("invalid port %q after host", colonPort)
+ }
+ }
+
+ var err error
+ if host, err = unescape(host, encodeHost); err != nil {
+ return "", err
+ }
+ return host, nil
+}
+
+// setPath sets the Path and RawPath fields of the URL based on the provided
+// escaped path p. It maintains the invariant that RawPath is only specified
+// when it differs from the default encoding of the path.
+// For example:
+// - setPath("/foo/bar") will set Path="/foo/bar" and RawPath=""
+// - setPath("/foo%2fbar") will set Path="/foo/bar" and RawPath="/foo%2fbar"
+// setPath will return an error only if the provided path contains an invalid
+// escaping.
+func (u *URL) setPath(p string) error {
+ path, err := unescape(p, encodePath)
+ if err != nil {
+ return err
+ }
+ u.Path = path
+ if escp := escape(path, encodePath); p == escp {
+ // Default encoding is fine.
+ u.RawPath = ""
+ } else {
+ u.RawPath = p
+ }
+ return nil
+}
+
+// EscapedPath returns the escaped form of u.Path.
+// In general there are multiple possible escaped forms of any path.
+// EscapedPath returns u.RawPath when it is a valid escaping of u.Path.
+// Otherwise EscapedPath ignores u.RawPath and computes an escaped
+// form on its own.
+// The String and RequestURI methods use EscapedPath to construct
+// their results.
+// In general, code should call EscapedPath instead of
+// reading u.RawPath directly.
+func (u *URL) EscapedPath() string {
+ if u.RawPath != "" && validEncoded(u.RawPath, encodePath) {
+ p, err := unescape(u.RawPath, encodePath)
+ if err == nil && p == u.Path {
+ return u.RawPath
+ }
+ }
+ if u.Path == "*" {
+ return "*" // don't escape (Issue 11202)
+ }
+ return escape(u.Path, encodePath)
+}
+
+// validEncoded reports whether s is a valid encoded path or fragment,
+// according to mode.
+// It must not contain any bytes that require escaping during encoding.
+func validEncoded(s string, mode encoding) bool {
+ for i := 0; i < len(s); i++ {
+ // RFC 3986, Appendix A.
+ // pchar = unreserved / pct-encoded / sub-delims / ":" / "@".
+ // shouldEscape is not quite compliant with the RFC,
+ // so we check the sub-delims ourselves and let
+ // shouldEscape handle the others.
+ switch s[i] {
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '@':
+ // ok
+ case '[', ']':
+ // ok - not specified in RFC 3986 but left alone by modern browsers
+ case '%':
+ // ok - percent encoded, will decode
+ default:
+ if shouldEscape(s[i], mode) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// setFragment is like setPath but for Fragment/RawFragment.
+func (u *URL) setFragment(f string) error {
+ frag, err := unescape(f, encodeFragment)
+ if err != nil {
+ return err
+ }
+ u.Fragment = frag
+ if escf := escape(frag, encodeFragment); f == escf {
+ // Default encoding is fine.
+ u.RawFragment = ""
+ } else {
+ u.RawFragment = f
+ }
+ return nil
+}
+
+// EscapedFragment returns the escaped form of u.Fragment.
+// In general there are multiple possible escaped forms of any fragment.
+// EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment.
+// Otherwise EscapedFragment ignores u.RawFragment and computes an escaped
+// form on its own.
+// The String method uses EscapedFragment to construct its result.
+// In general, code should call EscapedFragment instead of
+// reading u.RawFragment directly.
+func (u *URL) EscapedFragment() string {
+ if u.RawFragment != "" && validEncoded(u.RawFragment, encodeFragment) {
+ f, err := unescape(u.RawFragment, encodeFragment)
+ if err == nil && f == u.Fragment {
+ return u.RawFragment
+ }
+ }
+ return escape(u.Fragment, encodeFragment)
+}
+
+// validOptionalPort reports whether port is either an empty string
+// or matches /^:\d*$/
+func validOptionalPort(port string) bool {
+ if port == "" {
+ return true
+ }
+ if port[0] != ':' {
+ return false
+ }
+ for _, b := range port[1:] {
+ if b < '0' || b > '9' {
+ return false
+ }
+ }
+ return true
+}
+
+// String reassembles the URL into a valid URL string.
+// The general form of the result is one of:
+//
+// scheme:opaque?query#fragment
+// scheme://userinfo@host/path?query#fragment
+//
+// If u.Opaque is non-empty, String uses the first form;
+// otherwise it uses the second form.
+// Any non-ASCII characters in host are escaped.
+// To obtain the path, String uses u.EscapedPath().
+//
+// In the second form, the following rules apply:
+// - if u.Scheme is empty, scheme: is omitted.
+// - if u.User is nil, userinfo@ is omitted.
+// - if u.Host is empty, host/ is omitted.
+// - if u.Scheme and u.Host are empty and u.User is nil,
+// the entire scheme://userinfo@host/ is omitted.
+// - if u.Host is non-empty and u.Path begins with a /,
+// the form host/path does not add its own /.
+// - if u.RawQuery is empty, ?query is omitted.
+// - if u.Fragment is empty, #fragment is omitted.
+func (u *URL) String() string {
+ var buf strings.Builder
+ if u.Scheme != "" {
+ buf.WriteString(u.Scheme)
+ buf.WriteByte(':')
+ }
+ if u.Opaque != "" {
+ buf.WriteString(u.Opaque)
+ } else {
+ if u.Scheme != "" || u.Host != "" || u.User != nil {
+ if u.OmitHost && u.Host == "" && u.User == nil {
+ // omit empty host
+ } else {
+ if u.Host != "" || u.Path != "" || u.User != nil {
+ buf.WriteString("//")
+ }
+ if ui := u.User; ui != nil {
+ buf.WriteString(ui.String())
+ buf.WriteByte('@')
+ }
+ if h := u.Host; h != "" {
+ buf.WriteString(escape(h, encodeHost))
+ }
+ }
+ }
+ path := u.EscapedPath()
+ if path != "" && path[0] != '/' && u.Host != "" {
+ buf.WriteByte('/')
+ }
+ if buf.Len() == 0 {
+ // RFC 3986 §4.2
+ // A path segment that contains a colon character (e.g., "this:that")
+ // cannot be used as the first segment of a relative-path reference, as
+ // it would be mistaken for a scheme name. Such a segment must be
+ // preceded by a dot-segment (e.g., "./this:that") to make a relative-
+ // path reference.
+ if segment, _, _ := strings.Cut(path, "/"); strings.Contains(segment, ":") {
+ buf.WriteString("./")
+ }
+ }
+ buf.WriteString(path)
+ }
+ if u.ForceQuery || u.RawQuery != "" {
+ buf.WriteByte('?')
+ buf.WriteString(u.RawQuery)
+ }
+ if u.Fragment != "" {
+ buf.WriteByte('#')
+ buf.WriteString(u.EscapedFragment())
+ }
+ return buf.String()
+}
+
+// Redacted is like String but replaces any password with "xxxxx".
+// Only the password in u.URL is redacted.
+func (u *URL) Redacted() string {
+ if u == nil {
+ return ""
+ }
+
+ ru := *u
+ if _, has := ru.User.Password(); has {
+ ru.User = UserPassword(ru.User.Username(), "xxxxx")
+ }
+ return ru.String()
+}
+
+// Values maps a string key to a list of values.
+// It is typically used for query parameters and form values.
+// Unlike in the http.Header map, the keys in a Values map
+// are case-sensitive.
+type Values map[string][]string
+
+// Get gets the first value associated with the given key.
+// If there are no values associated with the key, Get returns
+// the empty string. To access multiple values, use the map
+// directly.
+func (v Values) Get(key string) string {
+ if v == nil {
+ return ""
+ }
+ vs := v[key]
+ if len(vs) == 0 {
+ return ""
+ }
+ return vs[0]
+}
+
+// Set sets the key to value. It replaces any existing
+// values.
+func (v Values) Set(key, value string) {
+ v[key] = []string{value}
+}
+
+// Add adds the value to key. It appends to any existing
+// values associated with key.
+func (v Values) Add(key, value string) {
+ v[key] = append(v[key], value)
+}
+
+// Del deletes the values associated with key.
+func (v Values) Del(key string) {
+ delete(v, key)
+}
+
+// Has checks whether a given key is set.
+func (v Values) Has(key string) bool {
+ _, ok := v[key]
+ return ok
+}
+
+// ParseQuery parses the URL-encoded query string and returns
+// a map listing the values specified for each key.
+// ParseQuery always returns a non-nil map containing all the
+// valid query parameters found; err describes the first decoding error
+// encountered, if any.
+//
+// Query is expected to be a list of key=value settings separated by ampersands.
+// A setting without an equals sign is interpreted as a key set to an empty
+// value.
+// Settings containing a non-URL-encoded semicolon are considered invalid.
+func ParseQuery(query string) (Values, error) {
+ m := make(Values)
+ err := parseQuery(m, query)
+ return m, err
+}
+
+func parseQuery(m Values, query string) (err error) {
+ for query != "" {
+ var key string
+ key, query, _ = strings.Cut(query, "&")
+ if strings.Contains(key, ";") {
+ err = fmt.Errorf("invalid semicolon separator in query")
+ continue
+ }
+ if key == "" {
+ continue
+ }
+ key, value, _ := strings.Cut(key, "=")
+ key, err1 := QueryUnescape(key)
+ if err1 != nil {
+ if err == nil {
+ err = err1
+ }
+ continue
+ }
+ value, err1 = QueryUnescape(value)
+ if err1 != nil {
+ if err == nil {
+ err = err1
+ }
+ continue
+ }
+ m[key] = append(m[key], value)
+ }
+ return err
+}
+
+// Encode encodes the values into “URL encoded” form
+// ("bar=baz&foo=quux") sorted by key.
+func (v Values) Encode() string {
+ if v == nil {
+ return ""
+ }
+ var buf strings.Builder
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ keyEscaped := QueryEscape(k)
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(keyEscaped)
+ buf.WriteByte('=')
+ buf.WriteString(QueryEscape(v))
+ }
+ }
+ return buf.String()
+}
+
+// resolvePath applies special path segments from refs and applies
+// them to base, per RFC 3986.
+func resolvePath(base, ref string) string {
+ var full string
+ if ref == "" {
+ full = base
+ } else if ref[0] != '/' {
+ i := strings.LastIndex(base, "/")
+ full = base[:i+1] + ref
+ } else {
+ full = ref
+ }
+ if full == "" {
+ return ""
+ }
+
+ var (
+ elem string
+ dst strings.Builder
+ )
+ first := true
+ remaining := full
+ // We want to return a leading '/', so write it now.
+ dst.WriteByte('/')
+ found := true
+ for found {
+ elem, remaining, found = strings.Cut(remaining, "/")
+ if elem == "." {
+ first = false
+ // drop
+ continue
+ }
+
+ if elem == ".." {
+ // Ignore the leading '/' we already wrote.
+ str := dst.String()[1:]
+ index := strings.LastIndexByte(str, '/')
+
+ dst.Reset()
+ dst.WriteByte('/')
+ if index == -1 {
+ first = true
+ } else {
+ dst.WriteString(str[:index])
+ }
+ } else {
+ if !first {
+ dst.WriteByte('/')
+ }
+ dst.WriteString(elem)
+ first = false
+ }
+ }
+
+ if elem == "." || elem == ".." {
+ dst.WriteByte('/')
+ }
+
+ // We wrote an initial '/', but we don't want two.
+ r := dst.String()
+ if len(r) > 1 && r[1] == '/' {
+ r = r[1:]
+ }
+ return r
+}
+
+// IsAbs reports whether the URL is absolute.
+// Absolute means that it has a non-empty scheme.
+func (u *URL) IsAbs() bool {
+ return u.Scheme != ""
+}
+
+// Parse parses a URL in the context of the receiver. The provided URL
+// may be relative or absolute. Parse returns nil, err on parse
+// failure, otherwise its return value is the same as ResolveReference.
+func (u *URL) Parse(ref string) (*URL, error) {
+ refURL, err := Parse(ref)
+ if err != nil {
+ return nil, err
+ }
+ return u.ResolveReference(refURL), nil
+}
+
+// ResolveReference resolves a URI reference to an absolute URI from
+// an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
+// may be relative or absolute. ResolveReference always returns a new
+// URL instance, even if the returned URL is identical to either the
+// base or reference. If ref is an absolute URL, then ResolveReference
+// ignores base and returns a copy of ref.
+func (u *URL) ResolveReference(ref *URL) *URL {
+ url := *ref
+ if ref.Scheme == "" {
+ url.Scheme = u.Scheme
+ }
+ if ref.Scheme != "" || ref.Host != "" || ref.User != nil {
+ // The "absoluteURI" or "net_path" cases.
+ // We can ignore the error from setPath since we know we provided a
+ // validly-escaped path.
+ url.setPath(resolvePath(ref.EscapedPath(), ""))
+ return &url
+ }
+ if ref.Opaque != "" {
+ url.User = nil
+ url.Host = ""
+ url.Path = ""
+ return &url
+ }
+ if ref.Path == "" && !ref.ForceQuery && ref.RawQuery == "" {
+ url.RawQuery = u.RawQuery
+ if ref.Fragment == "" {
+ url.Fragment = u.Fragment
+ url.RawFragment = u.RawFragment
+ }
+ }
+ // The "abs_path" or "rel_path" cases.
+ url.Host = u.Host
+ url.User = u.User
+ url.setPath(resolvePath(u.EscapedPath(), ref.EscapedPath()))
+ return &url
+}
+
+// Query parses RawQuery and returns the corresponding values.
+// It silently discards malformed value pairs.
+// To check errors use ParseQuery.
+func (u *URL) Query() Values {
+ v, _ := ParseQuery(u.RawQuery)
+ return v
+}
+
+// RequestURI returns the encoded path?query or opaque?query
+// string that would be used in an HTTP request for u.
+func (u *URL) RequestURI() string {
+ result := u.Opaque
+ if result == "" {
+ result = u.EscapedPath()
+ if result == "" {
+ result = "/"
+ }
+ } else {
+ if strings.HasPrefix(result, "//") {
+ result = u.Scheme + ":" + result
+ }
+ }
+ if u.ForceQuery || u.RawQuery != "" {
+ result += "?" + u.RawQuery
+ }
+ return result
+}
+
+// Hostname returns u.Host, stripping any valid port number if present.
+//
+// If the result is enclosed in square brackets, as literal IPv6 addresses are,
+// the square brackets are removed from the result.
+func (u *URL) Hostname() string {
+ host, _ := splitHostPort(u.Host)
+ return host
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+//
+// If u.Host doesn't contain a valid numeric port, Port returns an empty string.
+func (u *URL) Port() string {
+ _, port := splitHostPort(u.Host)
+ return port
+}
+
+// splitHostPort separates host and port. If the port is not valid, it returns
+// the entire input as host, and it doesn't check the validity of the host.
+// Unlike net.SplitHostPort, but per RFC 3986, it requires ports to be numeric.
+func splitHostPort(hostPort string) (host, port string) {
+ host = hostPort
+
+ colon := strings.LastIndexByte(host, ':')
+ if colon != -1 && validOptionalPort(host[colon:]) {
+ host, port = host[:colon], host[colon+1:]
+ }
+
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ host = host[1 : len(host)-1]
+ }
+
+ return
+}
+
+// Marshaling interface implementations.
+// Would like to implement MarshalText/UnmarshalText but that will change the JSON representation of URLs.
+
+func (u *URL) MarshalBinary() (text []byte, err error) {
+ return []byte(u.String()), nil
+}
+
+func (u *URL) UnmarshalBinary(text []byte) error {
+ u1, err := Parse(string(text))
+ if err != nil {
+ return err
+ }
+ *u = *u1
+ return nil
+}
+
+// JoinPath returns a new URL with the provided path elements joined to
+// any existing path and the resulting path cleaned of any ./ or ../ elements.
+// Any sequences of multiple / characters will be reduced to a single /.
+func (u *URL) JoinPath(elem ...string) *URL {
+ elem = append([]string{u.EscapedPath()}, elem...)
+ var p string
+ if !strings.HasPrefix(elem[0], "/") {
+ // Return a relative path if u is relative,
+ // but ensure that it contains no ../ elements.
+ elem[0] = "/" + elem[0]
+ p = path.Join(elem...)[1:]
+ } else {
+ p = path.Join(elem...)
+ }
+ // path.Join will remove any trailing slashes.
+ // Preserve at least one.
+ if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
+ p += "/"
+ }
+ url := *u
+ url.setPath(p)
+ return &url
+}
+
+// validUserinfo reports whether s is a valid userinfo string per RFC 3986
+// Section 3.2.1:
+//
+// userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
+// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+// / "*" / "+" / "," / ";" / "="
+//
+// It doesn't validate pct-encoded. The caller does that via func unescape.
+func validUserinfo(s string) bool {
+ for _, r := range s {
+ if 'A' <= r && r <= 'Z' {
+ continue
+ }
+ if 'a' <= r && r <= 'z' {
+ continue
+ }
+ if '0' <= r && r <= '9' {
+ continue
+ }
+ switch r {
+ case '-', '.', '_', ':', '~', '!', '$', '&', '\'',
+ '(', ')', '*', '+', ',', ';', '=', '%', '@':
+ continue
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+// stringContainsCTLByte reports whether s contains any ASCII control character.
+func stringContainsCTLByte(s string) bool {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if b < ' ' || b == 0x7f {
+ return true
+ }
+ }
+ return false
+}
+
+// JoinPath returns a URL string with the provided path elements joined to
+// the existing path of base and the resulting path cleaned of any ./ or ../ elements.
+func JoinPath(base string, elem ...string) (result string, err error) {
+ url, err := Parse(base)
+ if err != nil {
+ return
+ }
+ result = url.JoinPath(elem...).String()
+ return
+}
diff --git a/contrib/go/_std_1.18/src/net/writev_unix.go b/contrib/go/_std_1.19/src/net/writev_unix.go
index 51ab29dc31..51ab29dc31 100644
--- a/contrib/go/_std_1.18/src/net/writev_unix.go
+++ b/contrib/go/_std_1.19/src/net/writev_unix.go
diff --git a/contrib/go/_std_1.18/src/os/dir.go b/contrib/go/_std_1.19/src/os/dir.go
index 5306bcb3ba..5306bcb3ba 100644
--- a/contrib/go/_std_1.18/src/os/dir.go
+++ b/contrib/go/_std_1.19/src/os/dir.go
diff --git a/contrib/go/_std_1.18/src/os/dir_darwin.go b/contrib/go/_std_1.19/src/os/dir_darwin.go
index deba3eb37f..deba3eb37f 100644
--- a/contrib/go/_std_1.18/src/os/dir_darwin.go
+++ b/contrib/go/_std_1.19/src/os/dir_darwin.go
diff --git a/contrib/go/_std_1.18/src/os/dir_unix.go b/contrib/go/_std_1.19/src/os/dir_unix.go
index 9b3871a3e8..9b3871a3e8 100644
--- a/contrib/go/_std_1.18/src/os/dir_unix.go
+++ b/contrib/go/_std_1.19/src/os/dir_unix.go
diff --git a/contrib/go/_std_1.18/src/os/dirent_linux.go b/contrib/go/_std_1.19/src/os/dirent_linux.go
index 74a3431121..74a3431121 100644
--- a/contrib/go/_std_1.18/src/os/dirent_linux.go
+++ b/contrib/go/_std_1.19/src/os/dirent_linux.go
diff --git a/contrib/go/_std_1.19/src/os/endian_little.go b/contrib/go/_std_1.19/src/os/endian_little.go
new file mode 100644
index 0000000000..a7cf1cdda8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/endian_little.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build 386 || amd64 || arm || arm64 || loong64 || ppc64le || mips64le || mipsle || riscv64 || wasm
+
+package os
+
+const isBigEndian = false
diff --git a/contrib/go/_std_1.18/src/os/env.go b/contrib/go/_std_1.19/src/os/env.go
index 330297b36a..330297b36a 100644
--- a/contrib/go/_std_1.18/src/os/env.go
+++ b/contrib/go/_std_1.19/src/os/env.go
diff --git a/contrib/go/_std_1.18/src/os/error.go b/contrib/go/_std_1.19/src/os/error.go
index fe8f2a8446..fe8f2a8446 100644
--- a/contrib/go/_std_1.18/src/os/error.go
+++ b/contrib/go/_std_1.19/src/os/error.go
diff --git a/contrib/go/_std_1.18/src/os/error_errno.go b/contrib/go/_std_1.19/src/os/error_errno.go
index c8140461a4..c8140461a4 100644
--- a/contrib/go/_std_1.18/src/os/error_errno.go
+++ b/contrib/go/_std_1.19/src/os/error_errno.go
diff --git a/contrib/go/_std_1.19/src/os/error_posix.go b/contrib/go/_std_1.19/src/os/error_posix.go
new file mode 100644
index 0000000000..5ca2e60e5b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/error_posix.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package os
+
+import "syscall"
+
+// wrapSyscallError takes an error and a syscall name. If the error is
+// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name.
+func wrapSyscallError(name string, err error) error {
+ if _, ok := err.(syscall.Errno); ok {
+ err = NewSyscallError(name, err)
+ }
+ return err
+}
diff --git a/contrib/go/_std_1.18/src/os/exec.go b/contrib/go/_std_1.19/src/os/exec.go
index 9eb3166ecb..9eb3166ecb 100644
--- a/contrib/go/_std_1.18/src/os/exec.go
+++ b/contrib/go/_std_1.19/src/os/exec.go
diff --git a/contrib/go/_std_1.19/src/os/exec_posix.go b/contrib/go/_std_1.19/src/os/exec_posix.go
new file mode 100644
index 0000000000..e1e7d53a27
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/exec_posix.go
@@ -0,0 +1,136 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package os
+
+import (
+ "internal/itoa"
+ "internal/syscall/execenv"
+ "runtime"
+ "syscall"
+)
+
+// The only signal values guaranteed to be present in the os package on all
+// systems are os.Interrupt (send the process an interrupt) and os.Kill (force
+// the process to exit). On Windows, sending os.Interrupt to a process with
+// os.Process.Signal is not implemented; it will return an error instead of
+// sending a signal.
+var (
+ Interrupt Signal = syscall.SIGINT
+ Kill Signal = syscall.SIGKILL
+)
+
+func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err error) {
+ // If there is no SysProcAttr (ie. no Chroot or changed
+ // UID/GID), double-check existence of the directory we want
+ // to chdir into. We can make the error clearer this way.
+ if attr != nil && attr.Sys == nil && attr.Dir != "" {
+ if _, err := Stat(attr.Dir); err != nil {
+ pe := err.(*PathError)
+ pe.Op = "chdir"
+ return nil, pe
+ }
+ }
+
+ sysattr := &syscall.ProcAttr{
+ Dir: attr.Dir,
+ Env: attr.Env,
+ Sys: attr.Sys,
+ }
+ if sysattr.Env == nil {
+ sysattr.Env, err = execenv.Default(sysattr.Sys)
+ if err != nil {
+ return nil, err
+ }
+ }
+ sysattr.Files = make([]uintptr, 0, len(attr.Files))
+ for _, f := range attr.Files {
+ sysattr.Files = append(sysattr.Files, f.Fd())
+ }
+
+ pid, h, e := syscall.StartProcess(name, argv, sysattr)
+
+ // Make sure we don't run the finalizers of attr.Files.
+ runtime.KeepAlive(attr)
+
+ if e != nil {
+ return nil, &PathError{Op: "fork/exec", Path: name, Err: e}
+ }
+
+ return newProcess(pid, h), nil
+}
+
+func (p *Process) kill() error {
+ return p.Signal(Kill)
+}
+
+// ProcessState stores information about a process, as reported by Wait.
+type ProcessState struct {
+ pid int // The process's id.
+ status syscall.WaitStatus // System-dependent status info.
+ rusage *syscall.Rusage
+}
+
+// Pid returns the process id of the exited process.
+func (p *ProcessState) Pid() int {
+ return p.pid
+}
+
+func (p *ProcessState) exited() bool {
+ return p.status.Exited()
+}
+
+func (p *ProcessState) success() bool {
+ return p.status.ExitStatus() == 0
+}
+
+func (p *ProcessState) sys() any {
+ return p.status
+}
+
+func (p *ProcessState) sysUsage() any {
+ return p.rusage
+}
+
+func (p *ProcessState) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ status := p.Sys().(syscall.WaitStatus)
+ res := ""
+ switch {
+ case status.Exited():
+ code := status.ExitStatus()
+ if runtime.GOOS == "windows" && uint(code) >= 1<<16 { // windows uses large hex numbers
+ res = "exit status " + uitox(uint(code))
+ } else { // unix systems use small decimal integers
+ res = "exit status " + itoa.Itoa(code) // unix
+ }
+ case status.Signaled():
+ res = "signal: " + status.Signal().String()
+ case status.Stopped():
+ res = "stop signal: " + status.StopSignal().String()
+ if status.StopSignal() == syscall.SIGTRAP && status.TrapCause() != 0 {
+ res += " (trap " + itoa.Itoa(status.TrapCause()) + ")"
+ }
+ case status.Continued():
+ res = "continued"
+ }
+ if status.CoreDump() {
+ res += " (core dumped)"
+ }
+ return res
+}
+
+// ExitCode returns the exit code of the exited process, or -1
+// if the process hasn't exited or was terminated by a signal.
+func (p *ProcessState) ExitCode() int {
+ // return -1 if the process hasn't started.
+ if p == nil {
+ return -1
+ }
+ return p.status.ExitStatus()
+}
diff --git a/contrib/go/_std_1.19/src/os/exec_unix.go b/contrib/go/_std_1.19/src/os/exec_unix.go
new file mode 100644
index 0000000000..90a4a61222
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/exec_unix.go
@@ -0,0 +1,106 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package os
+
+import (
+ "errors"
+ "runtime"
+ "syscall"
+ "time"
+)
+
+func (p *Process) wait() (ps *ProcessState, err error) {
+ if p.Pid == -1 {
+ return nil, syscall.EINVAL
+ }
+
+ // If we can block until Wait4 will succeed immediately, do so.
+ ready, err := p.blockUntilWaitable()
+ if err != nil {
+ return nil, err
+ }
+ if ready {
+ // Mark the process done now, before the call to Wait4,
+ // so that Process.signal will not send a signal.
+ p.setDone()
+ // Acquire a write lock on sigMu to wait for any
+ // active call to the signal method to complete.
+ p.sigMu.Lock()
+ p.sigMu.Unlock()
+ }
+
+ var (
+ status syscall.WaitStatus
+ rusage syscall.Rusage
+ pid1 int
+ e error
+ )
+ for {
+ pid1, e = syscall.Wait4(p.Pid, &status, 0, &rusage)
+ if e != syscall.EINTR {
+ break
+ }
+ }
+ if e != nil {
+ return nil, NewSyscallError("wait", e)
+ }
+ if pid1 != 0 {
+ p.setDone()
+ }
+ ps = &ProcessState{
+ pid: pid1,
+ status: status,
+ rusage: &rusage,
+ }
+ return ps, nil
+}
+
+func (p *Process) signal(sig Signal) error {
+ if p.Pid == -1 {
+ return errors.New("os: process already released")
+ }
+ if p.Pid == 0 {
+ return errors.New("os: process not initialized")
+ }
+ p.sigMu.RLock()
+ defer p.sigMu.RUnlock()
+ if p.done() {
+ return ErrProcessDone
+ }
+ s, ok := sig.(syscall.Signal)
+ if !ok {
+ return errors.New("os: unsupported signal type")
+ }
+ if e := syscall.Kill(p.Pid, s); e != nil {
+ if e == syscall.ESRCH {
+ return ErrProcessDone
+ }
+ return e
+ }
+ return nil
+}
+
+func (p *Process) release() error {
+ // NOOP for unix.
+ p.Pid = -1
+ // no need for a finalizer anymore
+ runtime.SetFinalizer(p, nil)
+ return nil
+}
+
+func findProcess(pid int) (p *Process, err error) {
+ // NOOP for unix.
+ return newProcess(pid, 0), nil
+}
+
+func (p *ProcessState) userTime() time.Duration {
+ return time.Duration(p.rusage.Utime.Nano()) * time.Nanosecond
+}
+
+func (p *ProcessState) systemTime() time.Duration {
+ return time.Duration(p.rusage.Stime.Nano()) * time.Nanosecond
+}
diff --git a/contrib/go/_std_1.18/src/os/executable.go b/contrib/go/_std_1.19/src/os/executable.go
index cc3134af1c..cc3134af1c 100644
--- a/contrib/go/_std_1.18/src/os/executable.go
+++ b/contrib/go/_std_1.19/src/os/executable.go
diff --git a/contrib/go/_std_1.18/src/os/executable_darwin.go b/contrib/go/_std_1.19/src/os/executable_darwin.go
index dae9f4ee18..dae9f4ee18 100644
--- a/contrib/go/_std_1.18/src/os/executable_darwin.go
+++ b/contrib/go/_std_1.19/src/os/executable_darwin.go
diff --git a/contrib/go/_std_1.18/src/os/executable_procfs.go b/contrib/go/_std_1.19/src/os/executable_procfs.go
index 18348eab91..18348eab91 100644
--- a/contrib/go/_std_1.18/src/os/executable_procfs.go
+++ b/contrib/go/_std_1.19/src/os/executable_procfs.go
diff --git a/contrib/go/_std_1.19/src/os/file.go b/contrib/go/_std_1.19/src/os/file.go
new file mode 100644
index 0000000000..9f388921ae
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/file.go
@@ -0,0 +1,726 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package os provides a platform-independent interface to operating system
+// functionality. The design is Unix-like, although the error handling is
+// Go-like; failing calls return values of type error rather than error numbers.
+// Often, more information is available within the error. For example,
+// if a call that takes a file name fails, such as Open or Stat, the error
+// will include the failing file name when printed and will be of type
+// *PathError, which may be unpacked for more information.
+//
+// The os interface is intended to be uniform across all operating systems.
+// Features not generally available appear in the system-specific package syscall.
+//
+// Here is a simple example, opening a file and reading some of it.
+//
+// file, err := os.Open("file.go") // For read access.
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// If the open fails, the error string will be self-explanatory, like
+//
+// open file.go: no such file or directory
+//
+// The file's data can then be read into a slice of bytes. Read and
+// Write take their byte counts from the length of the argument slice.
+//
+// data := make([]byte, 100)
+// count, err := file.Read(data)
+// if err != nil {
+// log.Fatal(err)
+// }
+// fmt.Printf("read %d bytes: %q\n", count, data[:count])
+//
+// Note: The maximum number of concurrent operations on a File may be limited by
+// the OS or the system. The number should be high, but exceeding it may degrade
+// performance or cause other issues.
+package os
+
+import (
+ "errors"
+ "internal/poll"
+ "internal/testlog"
+ "internal/unsafeheader"
+ "io"
+ "io/fs"
+ "runtime"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// Name returns the name of the file as presented to Open.
+func (f *File) Name() string { return f.name }
+
+// Stdin, Stdout, and Stderr are open Files pointing to the standard input,
+// standard output, and standard error file descriptors.
+//
+// Note that the Go runtime writes to standard error for panics and crashes;
+// closing Stderr may cause those messages to go elsewhere, perhaps
+// to a file opened later.
+var (
+ Stdin = NewFile(uintptr(syscall.Stdin), "/dev/stdin")
+ Stdout = NewFile(uintptr(syscall.Stdout), "/dev/stdout")
+ Stderr = NewFile(uintptr(syscall.Stderr), "/dev/stderr")
+)
+
+// Flags to OpenFile wrapping those of the underlying system. Not all
+// flags may be implemented on a given system.
+const (
+ // Exactly one of O_RDONLY, O_WRONLY, or O_RDWR must be specified.
+ O_RDONLY int = syscall.O_RDONLY // open the file read-only.
+ O_WRONLY int = syscall.O_WRONLY // open the file write-only.
+ O_RDWR int = syscall.O_RDWR // open the file read-write.
+ // The remaining values may be or'ed in to control behavior.
+ O_APPEND int = syscall.O_APPEND // append data to the file when writing.
+ O_CREATE int = syscall.O_CREAT // create a new file if none exists.
+ O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist.
+ O_SYNC int = syscall.O_SYNC // open for synchronous I/O.
+ O_TRUNC int = syscall.O_TRUNC // truncate regular writable file when opened.
+)
+
+// Seek whence values.
+//
+// Deprecated: Use io.SeekStart, io.SeekCurrent, and io.SeekEnd.
+const (
+ SEEK_SET int = 0 // seek relative to the origin of the file
+ SEEK_CUR int = 1 // seek relative to the current offset
+ SEEK_END int = 2 // seek relative to the end
+)
+
+// LinkError records an error during a link or symlink or rename
+// system call and the paths that caused it.
+type LinkError struct {
+ Op string
+ Old string
+ New string
+ Err error
+}
+
+func (e *LinkError) Error() string {
+ return e.Op + " " + e.Old + " " + e.New + ": " + e.Err.Error()
+}
+
+func (e *LinkError) Unwrap() error {
+ return e.Err
+}
+
+// Read reads up to len(b) bytes from the File and stores them in b.
+// It returns the number of bytes read and any error encountered.
+// At end of file, Read returns 0, io.EOF.
+func (f *File) Read(b []byte) (n int, err error) {
+ if err := f.checkValid("read"); err != nil {
+ return 0, err
+ }
+ n, e := f.read(b)
+ return n, f.wrapErr("read", e)
+}
+
+// ReadAt reads len(b) bytes from the File starting at byte offset off.
+// It returns the number of bytes read and the error, if any.
+// ReadAt always returns a non-nil error when n < len(b).
+// At end of file, that error is io.EOF.
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ if err := f.checkValid("read"); err != nil {
+ return 0, err
+ }
+
+ if off < 0 {
+ return 0, &PathError{Op: "readat", Path: f.name, Err: errors.New("negative offset")}
+ }
+
+ for len(b) > 0 {
+ m, e := f.pread(b, off)
+ if e != nil {
+ err = f.wrapErr("read", e)
+ break
+ }
+ n += m
+ b = b[m:]
+ off += int64(m)
+ }
+ return
+}
+
+// ReadFrom implements io.ReaderFrom.
+func (f *File) ReadFrom(r io.Reader) (n int64, err error) {
+ if err := f.checkValid("write"); err != nil {
+ return 0, err
+ }
+ n, handled, e := f.readFrom(r)
+ if !handled {
+ return genericReadFrom(f, r) // without wrapping
+ }
+ return n, f.wrapErr("write", e)
+}
+
+func genericReadFrom(f *File, r io.Reader) (int64, error) {
+ return io.Copy(onlyWriter{f}, r)
+}
+
+type onlyWriter struct {
+ io.Writer
+}
+
+// Write writes len(b) bytes from b to the File.
+// It returns the number of bytes written and an error, if any.
+// Write returns a non-nil error when n != len(b).
+func (f *File) Write(b []byte) (n int, err error) {
+ if err := f.checkValid("write"); err != nil {
+ return 0, err
+ }
+ n, e := f.write(b)
+ if n < 0 {
+ n = 0
+ }
+ if n != len(b) {
+ err = io.ErrShortWrite
+ }
+
+ epipecheck(f, e)
+
+ if e != nil {
+ err = f.wrapErr("write", e)
+ }
+
+ return n, err
+}
+
+var errWriteAtInAppendMode = errors.New("os: invalid use of WriteAt on file opened with O_APPEND")
+
+// WriteAt writes len(b) bytes to the File starting at byte offset off.
+// It returns the number of bytes written and an error, if any.
+// WriteAt returns a non-nil error when n != len(b).
+//
+// If file was opened with the O_APPEND flag, WriteAt returns an error.
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ if err := f.checkValid("write"); err != nil {
+ return 0, err
+ }
+ if f.appendMode {
+ return 0, errWriteAtInAppendMode
+ }
+
+ if off < 0 {
+ return 0, &PathError{Op: "writeat", Path: f.name, Err: errors.New("negative offset")}
+ }
+
+ for len(b) > 0 {
+ m, e := f.pwrite(b, off)
+ if e != nil {
+ err = f.wrapErr("write", e)
+ break
+ }
+ n += m
+ b = b[m:]
+ off += int64(m)
+ }
+ return
+}
+
+// Seek sets the offset for the next Read or Write on file to offset, interpreted
+// according to whence: 0 means relative to the origin of the file, 1 means
+// relative to the current offset, and 2 means relative to the end.
+// It returns the new offset and an error, if any.
+// The behavior of Seek on a file opened with O_APPEND is not specified.
+//
+// If f is a directory, the behavior of Seek varies by operating
+// system; you can seek to the beginning of the directory on Unix-like
+// operating systems, but not on Windows.
+func (f *File) Seek(offset int64, whence int) (ret int64, err error) {
+ if err := f.checkValid("seek"); err != nil {
+ return 0, err
+ }
+ r, e := f.seek(offset, whence)
+ if e == nil && f.dirinfo != nil && r != 0 {
+ e = syscall.EISDIR
+ }
+ if e != nil {
+ return 0, f.wrapErr("seek", e)
+ }
+ return r, nil
+}
+
+// WriteString is like Write, but writes the contents of string s rather than
+// a slice of bytes.
+func (f *File) WriteString(s string) (n int, err error) {
+ var b []byte
+ hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
+ hdr.Data = (*unsafeheader.String)(unsafe.Pointer(&s)).Data
+ hdr.Cap = len(s)
+ hdr.Len = len(s)
+ return f.Write(b)
+}
+
+// Mkdir creates a new directory with the specified name and permission
+// bits (before umask).
+// If there is an error, it will be of type *PathError.
+func Mkdir(name string, perm FileMode) error {
+ if runtime.GOOS == "windows" && isWindowsNulName(name) {
+ return &PathError{Op: "mkdir", Path: name, Err: syscall.ENOTDIR}
+ }
+ longName := fixLongPath(name)
+ e := ignoringEINTR(func() error {
+ return syscall.Mkdir(longName, syscallMode(perm))
+ })
+
+ if e != nil {
+ return &PathError{Op: "mkdir", Path: name, Err: e}
+ }
+
+ // mkdir(2) itself won't handle the sticky bit on *BSD and Solaris
+ if !supportsCreateWithStickyBit && perm&ModeSticky != 0 {
+ e = setStickyBit(name)
+
+ if e != nil {
+ Remove(name)
+ return e
+ }
+ }
+
+ return nil
+}
+
+// setStickyBit adds ModeSticky to the permission bits of path, non atomic.
+func setStickyBit(name string) error {
+ fi, err := Stat(name)
+ if err != nil {
+ return err
+ }
+ return Chmod(name, fi.Mode()|ModeSticky)
+}
+
+// Chdir changes the current working directory to the named directory.
+// If there is an error, it will be of type *PathError.
+func Chdir(dir string) error {
+ if e := syscall.Chdir(dir); e != nil {
+ testlog.Open(dir) // observe likely non-existent directory
+ return &PathError{Op: "chdir", Path: dir, Err: e}
+ }
+ if log := testlog.Logger(); log != nil {
+ wd, err := Getwd()
+ if err == nil {
+ log.Chdir(wd)
+ }
+ }
+ return nil
+}
+
+// Open opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func Open(name string) (*File, error) {
+ return OpenFile(name, O_RDONLY, 0)
+}
+
+// Create creates or truncates the named file. If the file already exists,
+// it is truncated. If the file does not exist, it is created with mode 0666
+// (before umask). If successful, methods on the returned File can
+// be used for I/O; the associated file descriptor has mode O_RDWR.
+// If there is an error, it will be of type *PathError.
+func Create(name string) (*File, error) {
+ return OpenFile(name, O_RDWR|O_CREATE|O_TRUNC, 0666)
+}
+
+// OpenFile is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.). If the file does not exist, and the O_CREATE flag
+// is passed, it is created with mode perm (before umask). If successful,
+// methods on the returned File can be used for I/O.
+// If there is an error, it will be of type *PathError.
+func OpenFile(name string, flag int, perm FileMode) (*File, error) {
+ testlog.Open(name)
+ f, err := openFileNolog(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ f.appendMode = flag&O_APPEND != 0
+
+ return f, nil
+}
+
+// lstat is overridden in tests.
+var lstat = Lstat
+
+// Rename renames (moves) oldpath to newpath.
+// If newpath already exists and is not a directory, Rename replaces it.
+// OS-specific restrictions may apply when oldpath and newpath are in different directories.
+// If there is an error, it will be of type *LinkError.
+func Rename(oldpath, newpath string) error {
+ return rename(oldpath, newpath)
+}
+
+// Many functions in package syscall return a count of -1 instead of 0.
+// Using fixCount(call()) instead of call() corrects the count.
+func fixCount(n int, err error) (int, error) {
+ if n < 0 {
+ n = 0
+ }
+ return n, err
+}
+
+// wrapErr wraps an error that occurred during an operation on an open file.
+// It passes io.EOF through unchanged, otherwise converts
+// poll.ErrFileClosing to ErrClosed and wraps the error in a PathError.
+func (f *File) wrapErr(op string, err error) error {
+ if err == nil || err == io.EOF {
+ return err
+ }
+ if err == poll.ErrFileClosing {
+ err = ErrClosed
+ }
+ return &PathError{Op: op, Path: f.name, Err: err}
+}
+
+// TempDir returns the default directory to use for temporary files.
+//
+// On Unix systems, it returns $TMPDIR if non-empty, else /tmp.
+// On Windows, it uses GetTempPath, returning the first non-empty
+// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
+// On Plan 9, it returns /tmp.
+//
+// The directory is neither guaranteed to exist nor have accessible
+// permissions.
+func TempDir() string {
+ return tempDir()
+}
+
+// UserCacheDir returns the default root directory to use for user-specific
+// cached data. Users should create their own application-specific subdirectory
+// within this one and use that.
+//
+// On Unix systems, it returns $XDG_CACHE_HOME as specified by
+// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if
+// non-empty, else $HOME/.cache.
+// On Darwin, it returns $HOME/Library/Caches.
+// On Windows, it returns %LocalAppData%.
+// On Plan 9, it returns $home/lib/cache.
+//
+// If the location cannot be determined (for example, $HOME is not defined),
+// then it will return an error.
+func UserCacheDir() (string, error) {
+ var dir string
+
+ switch runtime.GOOS {
+ case "windows":
+ dir = Getenv("LocalAppData")
+ if dir == "" {
+ return "", errors.New("%LocalAppData% is not defined")
+ }
+
+ case "darwin", "ios":
+ dir = Getenv("HOME")
+ if dir == "" {
+ return "", errors.New("$HOME is not defined")
+ }
+ dir += "/Library/Caches"
+
+ case "plan9":
+ dir = Getenv("home")
+ if dir == "" {
+ return "", errors.New("$home is not defined")
+ }
+ dir += "/lib/cache"
+
+ default: // Unix
+ dir = Getenv("XDG_CACHE_HOME")
+ if dir == "" {
+ dir = Getenv("HOME")
+ if dir == "" {
+ return "", errors.New("neither $XDG_CACHE_HOME nor $HOME are defined")
+ }
+ dir += "/.cache"
+ }
+ }
+
+ return dir, nil
+}
+
+// UserConfigDir returns the default root directory to use for user-specific
+// configuration data. Users should create their own application-specific
+// subdirectory within this one and use that.
+//
+// On Unix systems, it returns $XDG_CONFIG_HOME as specified by
+// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html if
+// non-empty, else $HOME/.config.
+// On Darwin, it returns $HOME/Library/Application Support.
+// On Windows, it returns %AppData%.
+// On Plan 9, it returns $home/lib.
+//
+// If the location cannot be determined (for example, $HOME is not defined),
+// then it will return an error.
+func UserConfigDir() (string, error) {
+ var dir string
+
+ switch runtime.GOOS {
+ case "windows":
+ dir = Getenv("AppData")
+ if dir == "" {
+ return "", errors.New("%AppData% is not defined")
+ }
+
+ case "darwin", "ios":
+ dir = Getenv("HOME")
+ if dir == "" {
+ return "", errors.New("$HOME is not defined")
+ }
+ dir += "/Library/Application Support"
+
+ case "plan9":
+ dir = Getenv("home")
+ if dir == "" {
+ return "", errors.New("$home is not defined")
+ }
+ dir += "/lib"
+
+ default: // Unix
+ dir = Getenv("XDG_CONFIG_HOME")
+ if dir == "" {
+ dir = Getenv("HOME")
+ if dir == "" {
+ return "", errors.New("neither $XDG_CONFIG_HOME nor $HOME are defined")
+ }
+ dir += "/.config"
+ }
+ }
+
+ return dir, nil
+}
+
+// UserHomeDir returns the current user's home directory.
+//
+// On Unix, including macOS, it returns the $HOME environment variable.
+// On Windows, it returns %USERPROFILE%.
+// On Plan 9, it returns the $home environment variable.
+func UserHomeDir() (string, error) {
+ env, enverr := "HOME", "$HOME"
+ switch runtime.GOOS {
+ case "windows":
+ env, enverr = "USERPROFILE", "%userprofile%"
+ case "plan9":
+ env, enverr = "home", "$home"
+ }
+ if v := Getenv(env); v != "" {
+ return v, nil
+ }
+ // On some geese the home directory is not always defined.
+ switch runtime.GOOS {
+ case "android":
+ return "/sdcard", nil
+ case "ios":
+ return "/", nil
+ }
+ return "", errors.New(enverr + " is not defined")
+}
+
+// Chmod changes the mode of the named file to mode.
+// If the file is a symbolic link, it changes the mode of the link's target.
+// If there is an error, it will be of type *PathError.
+//
+// A different subset of the mode bits are used, depending on the
+// operating system.
+//
+// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
+// ModeSticky are used.
+//
+// On Windows, only the 0200 bit (owner writable) of mode is used; it
+// controls whether the file's read-only attribute is set or cleared.
+// The other bits are currently unused. For compatibility with Go 1.12
+// and earlier, use a non-zero mode. Use mode 0400 for a read-only
+// file and 0600 for a readable+writable file.
+//
+// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
+// and ModeTemporary are used.
+func Chmod(name string, mode FileMode) error { return chmod(name, mode) }
+
+// Chmod changes the mode of the file to mode.
+// If there is an error, it will be of type *PathError.
+func (f *File) Chmod(mode FileMode) error { return f.chmod(mode) }
+
+// SetDeadline sets the read and write deadlines for a File.
+// It is equivalent to calling both SetReadDeadline and SetWriteDeadline.
+//
+// Only some kinds of files support setting a deadline. Calls to SetDeadline
+// for files that do not support deadlines will return ErrNoDeadline.
+// On most systems ordinary files do not support deadlines, but pipes do.
+//
+// A deadline is an absolute time after which I/O operations fail with an
+// error instead of blocking. The deadline applies to all future and pending
+// I/O, not just the immediately following call to Read or Write.
+// After a deadline has been exceeded, the connection can be refreshed
+// by setting a deadline in the future.
+//
+// If the deadline is exceeded a call to Read or Write or to other I/O
+// methods will return an error that wraps ErrDeadlineExceeded.
+// This can be tested using errors.Is(err, os.ErrDeadlineExceeded).
+// That error implements the Timeout method, and calling the Timeout
+// method will return true, but there are other possible errors for which
+// the Timeout will return true even if the deadline has not been exceeded.
+//
+// An idle timeout can be implemented by repeatedly extending
+// the deadline after successful Read or Write calls.
+//
+// A zero value for t means I/O operations will not time out.
+func (f *File) SetDeadline(t time.Time) error {
+ return f.setDeadline(t)
+}
+
+// SetReadDeadline sets the deadline for future Read calls and any
+// currently-blocked Read call.
+// A zero value for t means Read will not time out.
+// Not all files support setting deadlines; see SetDeadline.
+func (f *File) SetReadDeadline(t time.Time) error {
+ return f.setReadDeadline(t)
+}
+
+// SetWriteDeadline sets the deadline for any future Write calls and any
+// currently-blocked Write call.
+// Even if Write times out, it may return n > 0, indicating that
+// some of the data was successfully written.
+// A zero value for t means Write will not time out.
+// Not all files support setting deadlines; see SetDeadline.
+func (f *File) SetWriteDeadline(t time.Time) error {
+ return f.setWriteDeadline(t)
+}
+
+// SyscallConn returns a raw file.
+// This implements the syscall.Conn interface.
+func (f *File) SyscallConn() (syscall.RawConn, error) {
+ if err := f.checkValid("SyscallConn"); err != nil {
+ return nil, err
+ }
+ return newRawConn(f)
+}
+
+// isWindowsNulName reports whether name is os.DevNull ('NUL') on Windows.
+// True is returned if name is 'NUL' whatever the case.
+func isWindowsNulName(name string) bool {
+ if len(name) != 3 {
+ return false
+ }
+ if name[0] != 'n' && name[0] != 'N' {
+ return false
+ }
+ if name[1] != 'u' && name[1] != 'U' {
+ return false
+ }
+ if name[2] != 'l' && name[2] != 'L' {
+ return false
+ }
+ return true
+}
+
+// DirFS returns a file system (an fs.FS) for the tree of files rooted at the directory dir.
+//
+// Note that DirFS("/prefix") only guarantees that the Open calls it makes to the
+// operating system will begin with "/prefix": DirFS("/prefix").Open("file") is the
+// same as os.Open("/prefix/file"). So if /prefix/file is a symbolic link pointing outside
+// the /prefix tree, then using DirFS does not stop the access any more than using
+// os.Open does. Additionally, the root of the fs.FS returned for a relative path,
+// DirFS("prefix"), will be affected by later calls to Chdir. DirFS is therefore not
+// a general substitute for a chroot-style security mechanism when the directory tree
+// contains arbitrary content.
+//
+// The result implements fs.StatFS.
+func DirFS(dir string) fs.FS {
+ return dirFS(dir)
+}
+
+func containsAny(s, chars string) bool {
+ for i := 0; i < len(s); i++ {
+ for j := 0; j < len(chars); j++ {
+ if s[i] == chars[j] {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+type dirFS string
+
+func (dir dirFS) Open(name string) (fs.File, error) {
+ if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
+ return nil, &PathError{Op: "open", Path: name, Err: ErrInvalid}
+ }
+ f, err := Open(string(dir) + "/" + name)
+ if err != nil {
+ return nil, err // nil fs.File
+ }
+ return f, nil
+}
+
+func (dir dirFS) Stat(name string) (fs.FileInfo, error) {
+ if !fs.ValidPath(name) || runtime.GOOS == "windows" && containsAny(name, `\:`) {
+ return nil, &PathError{Op: "stat", Path: name, Err: ErrInvalid}
+ }
+ f, err := Stat(string(dir) + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ReadFile reads the named file and returns the contents.
+// A successful call returns err == nil, not err == EOF.
+// Because ReadFile reads the whole file, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadFile(name string) ([]byte, error) {
+ f, err := Open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var size int
+ if info, err := f.Stat(); err == nil {
+ size64 := info.Size()
+ if int64(int(size64)) == size64 {
+ size = int(size64)
+ }
+ }
+ size++ // one byte for final read at EOF
+
+ // If a file claims a small size, read at least 512 bytes.
+ // In particular, files in Linux's /proc claim size 0 but
+ // then do not work right if read in small pieces,
+ // so an initial read of 1 byte would not work correctly.
+ if size < 512 {
+ size = 512
+ }
+
+ data := make([]byte, 0, size)
+ for {
+ if len(data) >= cap(data) {
+ d := append(data[:cap(data)], 0)
+ data = d[:len(data)]
+ }
+ n, err := f.Read(data[len(data):cap(data)])
+ data = data[:len(data)+n]
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return data, err
+ }
+ }
+}
+
+// WriteFile writes data to the named file, creating it if necessary.
+// If the file does not exist, WriteFile creates it with permissions perm (before umask);
+// otherwise WriteFile truncates it before writing, without changing permissions.
+func WriteFile(name string, data []byte, perm FileMode) error {
+ f, err := OpenFile(name, O_WRONLY|O_CREATE|O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ _, err = f.Write(data)
+ if err1 := f.Close(); err1 != nil && err == nil {
+ err = err1
+ }
+ return err
+}
diff --git a/contrib/go/_std_1.19/src/os/file_posix.go b/contrib/go/_std_1.19/src/os/file_posix.go
new file mode 100644
index 0000000000..c6d18ffeb6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/file_posix.go
@@ -0,0 +1,250 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package os
+
+import (
+ "runtime"
+ "syscall"
+ "time"
+)
+
+func sigpipe() // implemented in package runtime
+
+// Close closes the File, rendering it unusable for I/O.
+// On files that support SetDeadline, any pending I/O operations will
+// be canceled and return immediately with an ErrClosed error.
+// Close will return an error if it has already been called.
+func (f *File) Close() error {
+ if f == nil {
+ return ErrInvalid
+ }
+ return f.file.close()
+}
+
+// read reads up to len(b) bytes from the File.
+// It returns the number of bytes read and an error, if any.
+func (f *File) read(b []byte) (n int, err error) {
+ n, err = f.pfd.Read(b)
+ runtime.KeepAlive(f)
+ return n, err
+}
+
+// pread reads len(b) bytes from the File starting at byte offset off.
+// It returns the number of bytes read and the error, if any.
+// EOF is signaled by a zero count with err set to nil.
+func (f *File) pread(b []byte, off int64) (n int, err error) {
+ n, err = f.pfd.Pread(b, off)
+ runtime.KeepAlive(f)
+ return n, err
+}
+
+// write writes len(b) bytes to the File.
+// It returns the number of bytes written and an error, if any.
+func (f *File) write(b []byte) (n int, err error) {
+ n, err = f.pfd.Write(b)
+ runtime.KeepAlive(f)
+ return n, err
+}
+
+// pwrite writes len(b) bytes to the File starting at byte offset off.
+// It returns the number of bytes written and an error, if any.
+func (f *File) pwrite(b []byte, off int64) (n int, err error) {
+ n, err = f.pfd.Pwrite(b, off)
+ runtime.KeepAlive(f)
+ return n, err
+}
+
+// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
+func syscallMode(i FileMode) (o uint32) {
+ o |= uint32(i.Perm())
+ if i&ModeSetuid != 0 {
+ o |= syscall.S_ISUID
+ }
+ if i&ModeSetgid != 0 {
+ o |= syscall.S_ISGID
+ }
+ if i&ModeSticky != 0 {
+ o |= syscall.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
+
+// See docs in file.go:Chmod.
+func chmod(name string, mode FileMode) error {
+ longName := fixLongPath(name)
+ e := ignoringEINTR(func() error {
+ return syscall.Chmod(longName, syscallMode(mode))
+ })
+ if e != nil {
+ return &PathError{Op: "chmod", Path: name, Err: e}
+ }
+ return nil
+}
+
+// See docs in file.go:(*File).Chmod.
+func (f *File) chmod(mode FileMode) error {
+ if err := f.checkValid("chmod"); err != nil {
+ return err
+ }
+ if e := f.pfd.Fchmod(syscallMode(mode)); e != nil {
+ return f.wrapErr("chmod", e)
+ }
+ return nil
+}
+
+// Chown changes the numeric uid and gid of the named file.
+// If the file is a symbolic link, it changes the uid and gid of the link's target.
+// A uid or gid of -1 means to not change that value.
+// If there is an error, it will be of type *PathError.
+//
+// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
+// EPLAN9 error, wrapped in *PathError.
+func Chown(name string, uid, gid int) error {
+ e := ignoringEINTR(func() error {
+ return syscall.Chown(name, uid, gid)
+ })
+ if e != nil {
+ return &PathError{Op: "chown", Path: name, Err: e}
+ }
+ return nil
+}
+
+// Lchown changes the numeric uid and gid of the named file.
+// If the file is a symbolic link, it changes the uid and gid of the link itself.
+// If there is an error, it will be of type *PathError.
+//
+// On Windows, it always returns the syscall.EWINDOWS error, wrapped
+// in *PathError.
+func Lchown(name string, uid, gid int) error {
+ e := ignoringEINTR(func() error {
+ return syscall.Lchown(name, uid, gid)
+ })
+ if e != nil {
+ return &PathError{Op: "lchown", Path: name, Err: e}
+ }
+ return nil
+}
+
+// Chown changes the numeric uid and gid of the named file.
+// If there is an error, it will be of type *PathError.
+//
+// On Windows, it always returns the syscall.EWINDOWS error, wrapped
+// in *PathError.
+func (f *File) Chown(uid, gid int) error {
+ if err := f.checkValid("chown"); err != nil {
+ return err
+ }
+ if e := f.pfd.Fchown(uid, gid); e != nil {
+ return f.wrapErr("chown", e)
+ }
+ return nil
+}
+
+// Truncate changes the size of the file.
+// It does not change the I/O offset.
+// If there is an error, it will be of type *PathError.
+func (f *File) Truncate(size int64) error {
+ if err := f.checkValid("truncate"); err != nil {
+ return err
+ }
+ if e := f.pfd.Ftruncate(size); e != nil {
+ return f.wrapErr("truncate", e)
+ }
+ return nil
+}
+
+// Sync commits the current contents of the file to stable storage.
+// Typically, this means flushing the file system's in-memory copy
+// of recently written data to disk.
+func (f *File) Sync() error {
+ if err := f.checkValid("sync"); err != nil {
+ return err
+ }
+ if e := f.pfd.Fsync(); e != nil {
+ return f.wrapErr("sync", e)
+ }
+ return nil
+}
+
+// Chtimes changes the access and modification times of the named
+// file, similar to the Unix utime() or utimes() functions.
+//
+// The underlying filesystem may truncate or round the values to a
+// less precise time unit.
+// If there is an error, it will be of type *PathError.
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ var utimes [2]syscall.Timespec
+ utimes[0] = syscall.NsecToTimespec(atime.UnixNano())
+ utimes[1] = syscall.NsecToTimespec(mtime.UnixNano())
+ if e := syscall.UtimesNano(fixLongPath(name), utimes[0:]); e != nil {
+ return &PathError{Op: "chtimes", Path: name, Err: e}
+ }
+ return nil
+}
+
+// Chdir changes the current working directory to the file,
+// which must be a directory.
+// If there is an error, it will be of type *PathError.
+func (f *File) Chdir() error {
+ if err := f.checkValid("chdir"); err != nil {
+ return err
+ }
+ if e := f.pfd.Fchdir(); e != nil {
+ return f.wrapErr("chdir", e)
+ }
+ return nil
+}
+
+// setDeadline sets the read and write deadline.
+func (f *File) setDeadline(t time.Time) error {
+ if err := f.checkValid("SetDeadline"); err != nil {
+ return err
+ }
+ return f.pfd.SetDeadline(t)
+}
+
+// setReadDeadline sets the read deadline.
+func (f *File) setReadDeadline(t time.Time) error {
+ if err := f.checkValid("SetReadDeadline"); err != nil {
+ return err
+ }
+ return f.pfd.SetReadDeadline(t)
+}
+
+// setWriteDeadline sets the write deadline.
+func (f *File) setWriteDeadline(t time.Time) error {
+ if err := f.checkValid("SetWriteDeadline"); err != nil {
+ return err
+ }
+ return f.pfd.SetWriteDeadline(t)
+}
+
+// checkValid checks whether f is valid for use.
+// If not, it returns an appropriate error, perhaps incorporating the operation name op.
+func (f *File) checkValid(op string) error {
+ if f == nil {
+ return ErrInvalid
+ }
+ return nil
+}
+
+// ignoringEINTR makes a function call and repeats it if it returns an
+// EINTR error. This appears to be required even though we install all
+// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846.
+// Also #20400 and #36644 are issues in which a signal handler is
+// installed without setting SA_RESTART. None of these are the common case,
+// but there are enough of them that it seems that we can't avoid
+// an EINTR loop.
+func ignoringEINTR(fn func() error) error {
+ for {
+ err := fn()
+ if err != syscall.EINTR {
+ return err
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/os/file_unix.go b/contrib/go/_std_1.19/src/os/file_unix.go
new file mode 100644
index 0000000000..c30a6890de
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/file_unix.go
@@ -0,0 +1,430 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package os
+
+import (
+ "internal/poll"
+ "internal/syscall/unix"
+ "runtime"
+ "syscall"
+)
+
+// fixLongPath is a noop on non-Windows platforms.
+func fixLongPath(path string) string {
+ return path
+}
+
+func rename(oldname, newname string) error {
+ fi, err := Lstat(newname)
+ if err == nil && fi.IsDir() {
+ // There are two independent errors this function can return:
+ // one for a bad oldname, and one for a bad newname.
+ // At this point we've determined the newname is bad.
+ // But just in case oldname is also bad, prioritize returning
+ // the oldname error because that's what we did historically.
+ // However, if the old name and new name are not the same, yet
+ // they refer to the same file, it implies a case-only
+ // rename on a case-insensitive filesystem, which is ok.
+ if ofi, err := Lstat(oldname); err != nil {
+ if pe, ok := err.(*PathError); ok {
+ err = pe.Err
+ }
+ return &LinkError{"rename", oldname, newname, err}
+ } else if newname == oldname || !SameFile(fi, ofi) {
+ return &LinkError{"rename", oldname, newname, syscall.EEXIST}
+ }
+ }
+ err = ignoringEINTR(func() error {
+ return syscall.Rename(oldname, newname)
+ })
+ if err != nil {
+ return &LinkError{"rename", oldname, newname, err}
+ }
+ return nil
+}
+
+// file is the real representation of *File.
+// The extra level of indirection ensures that no clients of os
+// can overwrite this data, which could cause the finalizer
+// to close the wrong file descriptor.
+type file struct {
+ pfd poll.FD
+ name string
+ dirinfo *dirInfo // nil unless directory being read
+ nonblock bool // whether we set nonblocking mode
+ stdoutOrErr bool // whether this is stdout or stderr
+ appendMode bool // whether file is opened for appending
+}
+
+// Fd returns the integer Unix file descriptor referencing the open file.
+// If f is closed, the file descriptor becomes invalid.
+// If f is garbage collected, a finalizer may close the file descriptor,
+// making it invalid; see runtime.SetFinalizer for more information on when
+// a finalizer might be run. On Unix systems this will cause the SetDeadline
+// methods to stop working.
+// Because file descriptors can be reused, the returned file descriptor may
+// only be closed through the Close method of f, or by its finalizer during
+// garbage collection. Otherwise, during garbage collection the finalizer
+// may close an unrelated file descriptor with the same (reused) number.
+//
+// As an alternative, see the f.SyscallConn method.
+func (f *File) Fd() uintptr {
+ if f == nil {
+ return ^(uintptr(0))
+ }
+
+ // If we put the file descriptor into nonblocking mode,
+ // then set it to blocking mode before we return it,
+ // because historically we have always returned a descriptor
+ // opened in blocking mode. The File will continue to work,
+ // but any blocking operation will tie up a thread.
+ if f.nonblock {
+ f.pfd.SetBlocking()
+ }
+
+ return uintptr(f.pfd.Sysfd)
+}
+
+// NewFile returns a new File with the given file descriptor and
+// name. The returned value will be nil if fd is not a valid file
+// descriptor. On Unix systems, if the file descriptor is in
+// non-blocking mode, NewFile will attempt to return a pollable File
+// (one for which the SetDeadline methods work).
+//
+// After passing it to NewFile, fd may become invalid under the same
+// conditions described in the comments of the Fd method, and the same
+// constraints apply.
+func NewFile(fd uintptr, name string) *File {
+ kind := kindNewFile
+ if nb, err := unix.IsNonblock(int(fd)); err == nil && nb {
+ kind = kindNonBlock
+ }
+ return newFile(fd, name, kind)
+}
+
+// newFileKind describes the kind of file to newFile.
+type newFileKind int
+
+const (
+ kindNewFile newFileKind = iota
+ kindOpenFile
+ kindPipe
+ kindNonBlock
+)
+
+// newFile is like NewFile, but if called from OpenFile or Pipe
+// (as passed in the kind parameter) it tries to add the file to
+// the runtime poller.
+func newFile(fd uintptr, name string, kind newFileKind) *File {
+ fdi := int(fd)
+ if fdi < 0 {
+ return nil
+ }
+ f := &File{&file{
+ pfd: poll.FD{
+ Sysfd: fdi,
+ IsStream: true,
+ ZeroReadIsEOF: true,
+ },
+ name: name,
+ stdoutOrErr: fdi == 1 || fdi == 2,
+ }}
+
+ pollable := kind == kindOpenFile || kind == kindPipe || kind == kindNonBlock
+
+ // If the caller passed a non-blocking filedes (kindNonBlock),
+ // we assume they know what they are doing so we allow it to be
+ // used with kqueue.
+ if kind == kindOpenFile {
+ switch runtime.GOOS {
+ case "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd":
+ var st syscall.Stat_t
+ err := ignoringEINTR(func() error {
+ return syscall.Fstat(fdi, &st)
+ })
+ typ := st.Mode & syscall.S_IFMT
+ // Don't try to use kqueue with regular files on *BSDs.
+ // On FreeBSD a regular file is always
+ // reported as ready for writing.
+ // On Dragonfly, NetBSD and OpenBSD the fd is signaled
+ // only once as ready (both read and write).
+ // Issue 19093.
+ // Also don't add directories to the netpoller.
+ if err == nil && (typ == syscall.S_IFREG || typ == syscall.S_IFDIR) {
+ pollable = false
+ }
+
+ // In addition to the behavior described above for regular files,
+ // on Darwin, kqueue does not work properly with fifos:
+ // closing the last writer does not cause a kqueue event
+ // for any readers. See issue #24164.
+ if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && typ == syscall.S_IFIFO {
+ pollable = false
+ }
+ }
+ }
+
+ if err := f.pfd.Init("file", pollable); err != nil {
+ // An error here indicates a failure to register
+ // with the netpoll system. That can happen for
+ // a file descriptor that is not supported by
+ // epoll/kqueue; for example, disk files on
+ // Linux systems. We assume that any real error
+ // will show up in later I/O.
+ } else if pollable {
+ // We successfully registered with netpoll, so put
+ // the file into nonblocking mode.
+ if err := syscall.SetNonblock(fdi, true); err == nil {
+ f.nonblock = true
+ }
+ }
+
+ runtime.SetFinalizer(f.file, (*file).close)
+ return f
+}
+
+// epipecheck raises SIGPIPE if we get an EPIPE error on standard
+// output or standard error. See the SIGPIPE docs in os/signal, and
+// issue 11845.
+func epipecheck(file *File, e error) {
+ if e == syscall.EPIPE && file.stdoutOrErr {
+ sigpipe()
+ }
+}
+
+// DevNull is the name of the operating system's “null device.”
+// On Unix-like systems, it is "/dev/null"; on Windows, "NUL".
+const DevNull = "/dev/null"
+
+// openFileNolog is the Unix implementation of OpenFile.
+// Changes here should be reflected in openFdAt, if relevant.
+func openFileNolog(name string, flag int, perm FileMode) (*File, error) {
+ setSticky := false
+ if !supportsCreateWithStickyBit && flag&O_CREATE != 0 && perm&ModeSticky != 0 {
+ if _, err := Stat(name); IsNotExist(err) {
+ setSticky = true
+ }
+ }
+
+ var r int
+ for {
+ var e error
+ r, e = syscall.Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
+ if e == nil {
+ break
+ }
+
+ // We have to check EINTR here, per issues 11180 and 39237.
+ if e == syscall.EINTR {
+ continue
+ }
+
+ return nil, &PathError{Op: "open", Path: name, Err: e}
+ }
+
+ // open(2) itself won't handle the sticky bit on *BSD and Solaris
+ if setSticky {
+ setStickyBit(name)
+ }
+
+ // There's a race here with fork/exec, which we are
+ // content to live with. See ../syscall/exec_unix.go.
+ if !supportsCloseOnExec {
+ syscall.CloseOnExec(r)
+ }
+
+ return newFile(uintptr(r), name, kindOpenFile), nil
+}
+
+func (file *file) close() error {
+ if file == nil {
+ return syscall.EINVAL
+ }
+ if file.dirinfo != nil {
+ file.dirinfo.close()
+ file.dirinfo = nil
+ }
+ var err error
+ if e := file.pfd.Close(); e != nil {
+ if e == poll.ErrFileClosing {
+ e = ErrClosed
+ }
+ err = &PathError{Op: "close", Path: file.name, Err: e}
+ }
+
+ // no need for a finalizer anymore
+ runtime.SetFinalizer(file, nil)
+ return err
+}
+
+// seek sets the offset for the next Read or Write on file to offset, interpreted
+// according to whence: 0 means relative to the origin of the file, 1 means
+// relative to the current offset, and 2 means relative to the end.
+// It returns the new offset and an error, if any.
+func (f *File) seek(offset int64, whence int) (ret int64, err error) {
+ if f.dirinfo != nil {
+ // Free cached dirinfo, so we allocate a new one if we
+ // access this file as a directory again. See #35767 and #37161.
+ f.dirinfo.close()
+ f.dirinfo = nil
+ }
+ ret, err = f.pfd.Seek(offset, whence)
+ runtime.KeepAlive(f)
+ return ret, err
+}
+
+// Truncate changes the size of the named file.
+// If the file is a symbolic link, it changes the size of the link's target.
+// If there is an error, it will be of type *PathError.
+func Truncate(name string, size int64) error {
+ e := ignoringEINTR(func() error {
+ return syscall.Truncate(name, size)
+ })
+ if e != nil {
+ return &PathError{Op: "truncate", Path: name, Err: e}
+ }
+ return nil
+}
+
+// Remove removes the named file or (empty) directory.
+// If there is an error, it will be of type *PathError.
+func Remove(name string) error {
+ // System call interface forces us to know
+ // whether name is a file or directory.
+ // Try both: it is cheaper on average than
+ // doing a Stat plus the right one.
+ e := ignoringEINTR(func() error {
+ return syscall.Unlink(name)
+ })
+ if e == nil {
+ return nil
+ }
+ e1 := ignoringEINTR(func() error {
+ return syscall.Rmdir(name)
+ })
+ if e1 == nil {
+ return nil
+ }
+
+ // Both failed: figure out which error to return.
+ // OS X and Linux differ on whether unlink(dir)
+ // returns EISDIR, so can't use that. However,
+ // both agree that rmdir(file) returns ENOTDIR,
+ // so we can use that to decide which error is real.
+ // Rmdir might also return ENOTDIR if given a bad
+ // file path, like /etc/passwd/foo, but in that case,
+ // both errors will be ENOTDIR, so it's okay to
+ // use the error from unlink.
+ if e1 != syscall.ENOTDIR {
+ e = e1
+ }
+ return &PathError{Op: "remove", Path: name, Err: e}
+}
+
+func tempDir() string {
+ dir := Getenv("TMPDIR")
+ if dir == "" {
+ if runtime.GOOS == "android" {
+ dir = "/data/local/tmp"
+ } else {
+ dir = "/tmp"
+ }
+ }
+ return dir
+}
+
+// Link creates newname as a hard link to the oldname file.
+// If there is an error, it will be of type *LinkError.
+func Link(oldname, newname string) error {
+ e := ignoringEINTR(func() error {
+ return syscall.Link(oldname, newname)
+ })
+ if e != nil {
+ return &LinkError{"link", oldname, newname, e}
+ }
+ return nil
+}
+
+// Symlink creates newname as a symbolic link to oldname.
+// On Windows, a symlink to a non-existent oldname creates a file symlink;
+// if oldname is later created as a directory the symlink will not work.
+// If there is an error, it will be of type *LinkError.
+func Symlink(oldname, newname string) error {
+ e := ignoringEINTR(func() error {
+ return syscall.Symlink(oldname, newname)
+ })
+ if e != nil {
+ return &LinkError{"symlink", oldname, newname, e}
+ }
+ return nil
+}
+
+// Readlink returns the destination of the named symbolic link.
+// If there is an error, it will be of type *PathError.
+func Readlink(name string) (string, error) {
+ for len := 128; ; len *= 2 {
+ b := make([]byte, len)
+ var (
+ n int
+ e error
+ )
+ for {
+ n, e = fixCount(syscall.Readlink(name, b))
+ if e != syscall.EINTR {
+ break
+ }
+ }
+ // buffer too small
+ if runtime.GOOS == "aix" && e == syscall.ERANGE {
+ continue
+ }
+ if e != nil {
+ return "", &PathError{Op: "readlink", Path: name, Err: e}
+ }
+ if n < len {
+ return string(b[0:n]), nil
+ }
+ }
+}
+
+type unixDirent struct {
+ parent string
+ name string
+ typ FileMode
+ info FileInfo
+}
+
+func (d *unixDirent) Name() string { return d.name }
+func (d *unixDirent) IsDir() bool { return d.typ.IsDir() }
+func (d *unixDirent) Type() FileMode { return d.typ }
+
+func (d *unixDirent) Info() (FileInfo, error) {
+ if d.info != nil {
+ return d.info, nil
+ }
+ return lstat(d.parent + "/" + d.name)
+}
+
+func newUnixDirent(parent, name string, typ FileMode) (DirEntry, error) {
+ ude := &unixDirent{
+ parent: parent,
+ name: name,
+ typ: typ,
+ }
+ if typ != ^FileMode(0) && !testingForceReadDirLstat {
+ return ude, nil
+ }
+
+ info, err := lstat(parent + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+
+ ude.typ = info.Mode().Type()
+ ude.info = info
+ return ude, nil
+}
diff --git a/contrib/go/_std_1.18/src/os/getwd.go b/contrib/go/_std_1.19/src/os/getwd.go
index 90604cf2f4..90604cf2f4 100644
--- a/contrib/go/_std_1.18/src/os/getwd.go
+++ b/contrib/go/_std_1.19/src/os/getwd.go
diff --git a/contrib/go/_std_1.18/src/os/path.go b/contrib/go/_std_1.19/src/os/path.go
index df87887b9b..df87887b9b 100644
--- a/contrib/go/_std_1.18/src/os/path.go
+++ b/contrib/go/_std_1.19/src/os/path.go
diff --git a/contrib/go/_std_1.19/src/os/path_unix.go b/contrib/go/_std_1.19/src/os/path_unix.go
new file mode 100644
index 0000000000..3c6310a4df
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/path_unix.go
@@ -0,0 +1,75 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package os
+
+const (
+ PathSeparator = '/' // OS-specific path separator
+ PathListSeparator = ':' // OS-specific path list separator
+)
+
+// IsPathSeparator reports whether c is a directory separator character.
+func IsPathSeparator(c uint8) bool {
+ return PathSeparator == c
+}
+
+// basename removes trailing slashes and the leading directory name from path name.
+func basename(name string) string {
+ i := len(name) - 1
+ // Remove trailing slashes
+ for ; i > 0 && name[i] == '/'; i-- {
+ name = name[:i]
+ }
+ // Remove leading directory name
+ for i--; i >= 0; i-- {
+ if name[i] == '/' {
+ name = name[i+1:]
+ break
+ }
+ }
+
+ return name
+}
+
+// splitPath returns the base name and parent directory.
+func splitPath(path string) (string, string) {
+ // if no better parent is found, the path is relative from "here"
+ dirname := "."
+
+ // Remove all but one leading slash.
+ for len(path) > 1 && path[0] == '/' && path[1] == '/' {
+ path = path[1:]
+ }
+
+ i := len(path) - 1
+
+ // Remove trailing slashes.
+ for ; i > 0 && path[i] == '/'; i-- {
+ path = path[:i]
+ }
+
+ // if no slashes in path, base is path
+ basename := path
+
+ // Remove leading directory path
+ for i--; i >= 0; i-- {
+ if path[i] == '/' {
+ if i == 0 {
+ dirname = path[:1]
+ } else {
+ dirname = path[:i]
+ }
+ basename = path[i+1:]
+ break
+ }
+ }
+
+ return dirname, basename
+}
+
+func fixRootDirectory(p string) string {
+ return p
+}
diff --git a/contrib/go/_std_1.19/src/os/pipe2_unix.go b/contrib/go/_std_1.19/src/os/pipe2_unix.go
new file mode 100644
index 0000000000..1e2e8ccb67
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/pipe2_unix.go
@@ -0,0 +1,22 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package os
+
+import "syscall"
+
+// Pipe returns a connected pair of Files; reads from r return bytes written to w.
+// It returns the files and an error, if any.
+func Pipe() (r *File, w *File, err error) {
+ var p [2]int
+
+ e := syscall.Pipe2(p[0:], syscall.O_CLOEXEC)
+ if e != nil {
+ return nil, nil, NewSyscallError("pipe2", e)
+ }
+
+ return newFile(uintptr(p[0]), "|0", kindPipe), newFile(uintptr(p[1]), "|1", kindPipe), nil
+}
diff --git a/contrib/go/_std_1.19/src/os/pipe_unix.go b/contrib/go/_std_1.19/src/os/pipe_unix.go
new file mode 100644
index 0000000000..710f77670e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/pipe_unix.go
@@ -0,0 +1,28 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || (js && wasm)
+
+package os
+
+import "syscall"
+
+// Pipe returns a connected pair of Files; reads from r return bytes written to w.
+// It returns the files and an error, if any.
+func Pipe() (r *File, w *File, err error) {
+ var p [2]int
+
+ // See ../syscall/exec.go for description of lock.
+ syscall.ForkLock.RLock()
+ e := syscall.Pipe(p[0:])
+ if e != nil {
+ syscall.ForkLock.RUnlock()
+ return nil, nil, NewSyscallError("pipe", e)
+ }
+ syscall.CloseOnExec(p[0])
+ syscall.CloseOnExec(p[1])
+ syscall.ForkLock.RUnlock()
+
+ return newFile(uintptr(p[0]), "|0", kindPipe), newFile(uintptr(p[1]), "|1", kindPipe), nil
+}
diff --git a/contrib/go/_std_1.18/src/os/proc.go b/contrib/go/_std_1.19/src/os/proc.go
index cbd5a6aad9..cbd5a6aad9 100644
--- a/contrib/go/_std_1.18/src/os/proc.go
+++ b/contrib/go/_std_1.19/src/os/proc.go
diff --git a/contrib/go/_std_1.18/src/os/rawconn.go b/contrib/go/_std_1.19/src/os/rawconn.go
index 14a495d9c0..14a495d9c0 100644
--- a/contrib/go/_std_1.18/src/os/rawconn.go
+++ b/contrib/go/_std_1.19/src/os/rawconn.go
diff --git a/contrib/go/_std_1.18/src/os/readfrom_linux.go b/contrib/go/_std_1.19/src/os/readfrom_linux.go
index 63ea45cf65..63ea45cf65 100644
--- a/contrib/go/_std_1.18/src/os/readfrom_linux.go
+++ b/contrib/go/_std_1.19/src/os/readfrom_linux.go
diff --git a/contrib/go/_std_1.18/src/os/readfrom_stub.go b/contrib/go/_std_1.19/src/os/readfrom_stub.go
index 8b7d5fb8f9..8b7d5fb8f9 100644
--- a/contrib/go/_std_1.18/src/os/readfrom_stub.go
+++ b/contrib/go/_std_1.19/src/os/readfrom_stub.go
diff --git a/contrib/go/_std_1.19/src/os/removeall_at.go b/contrib/go/_std_1.19/src/os/removeall_at.go
new file mode 100644
index 0000000000..8b46152a9e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/removeall_at.go
@@ -0,0 +1,192 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package os
+
+import (
+ "internal/syscall/unix"
+ "io"
+ "syscall"
+)
+
+func removeAll(path string) error {
+ if path == "" {
+ // fail silently to retain compatibility with previous behavior
+ // of RemoveAll. See issue 28830.
+ return nil
+ }
+
+ // The rmdir system call does not permit removing ".",
+ // so we don't permit it either.
+ if endsWithDot(path) {
+ return &PathError{Op: "RemoveAll", Path: path, Err: syscall.EINVAL}
+ }
+
+ // Simple case: if Remove works, we're done.
+ err := Remove(path)
+ if err == nil || IsNotExist(err) {
+ return nil
+ }
+
+ // RemoveAll recurses by deleting the path base from
+ // its parent directory
+ parentDir, base := splitPath(path)
+
+ parent, err := Open(parentDir)
+ if IsNotExist(err) {
+ // If parent does not exist, base cannot exist. Fail silently
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ defer parent.Close()
+
+ if err := removeAllFrom(parent, base); err != nil {
+ if pathErr, ok := err.(*PathError); ok {
+ pathErr.Path = parentDir + string(PathSeparator) + pathErr.Path
+ err = pathErr
+ }
+ return err
+ }
+ return nil
+}
+
+func removeAllFrom(parent *File, base string) error {
+ parentFd := int(parent.Fd())
+ // Simple case: if Unlink (aka remove) works, we're done.
+ err := unix.Unlinkat(parentFd, base, 0)
+ if err == nil || IsNotExist(err) {
+ return nil
+ }
+
+ // EISDIR means that we have a directory, and we need to
+ // remove its contents.
+ // EPERM or EACCES means that we don't have write permission on
+ // the parent directory, but this entry might still be a directory
+ // whose contents need to be removed.
+ // Otherwise just return the error.
+ if err != syscall.EISDIR && err != syscall.EPERM && err != syscall.EACCES {
+ return &PathError{Op: "unlinkat", Path: base, Err: err}
+ }
+
+ // Is this a directory we need to recurse into?
+ var statInfo syscall.Stat_t
+ statErr := unix.Fstatat(parentFd, base, &statInfo, unix.AT_SYMLINK_NOFOLLOW)
+ if statErr != nil {
+ if IsNotExist(statErr) {
+ return nil
+ }
+ return &PathError{Op: "fstatat", Path: base, Err: statErr}
+ }
+ if statInfo.Mode&syscall.S_IFMT != syscall.S_IFDIR {
+ // Not a directory; return the error from the unix.Unlinkat.
+ return &PathError{Op: "unlinkat", Path: base, Err: err}
+ }
+
+ // Remove the directory's entries.
+ var recurseErr error
+ for {
+ const reqSize = 1024
+ var respSize int
+
+ // Open the directory to recurse into
+ file, err := openFdAt(parentFd, base)
+ if err != nil {
+ if IsNotExist(err) {
+ return nil
+ }
+ recurseErr = &PathError{Op: "openfdat", Path: base, Err: err}
+ break
+ }
+
+ for {
+ numErr := 0
+
+ names, readErr := file.Readdirnames(reqSize)
+ // Errors other than EOF should stop us from continuing.
+ if readErr != nil && readErr != io.EOF {
+ file.Close()
+ if IsNotExist(readErr) {
+ return nil
+ }
+ return &PathError{Op: "readdirnames", Path: base, Err: readErr}
+ }
+
+ respSize = len(names)
+ for _, name := range names {
+ err := removeAllFrom(file, name)
+ if err != nil {
+ if pathErr, ok := err.(*PathError); ok {
+ pathErr.Path = base + string(PathSeparator) + pathErr.Path
+ }
+ numErr++
+ if recurseErr == nil {
+ recurseErr = err
+ }
+ }
+ }
+
+ // If we can delete any entry, break to start new iteration.
+ // Otherwise, we discard current names, get next entries and try deleting them.
+ if numErr != reqSize {
+ break
+ }
+ }
+
+ // Removing files from the directory may have caused
+ // the OS to reshuffle it. Simply calling Readdirnames
+ // again may skip some entries. The only reliable way
+ // to avoid this is to close and re-open the
+ // directory. See issue 20841.
+ file.Close()
+
+ // Finish when the end of the directory is reached
+ if respSize < reqSize {
+ break
+ }
+ }
+
+ // Remove the directory itself.
+ unlinkError := unix.Unlinkat(parentFd, base, unix.AT_REMOVEDIR)
+ if unlinkError == nil || IsNotExist(unlinkError) {
+ return nil
+ }
+
+ if recurseErr != nil {
+ return recurseErr
+ }
+ return &PathError{Op: "unlinkat", Path: base, Err: unlinkError}
+}
+
+// openFdAt opens path relative to the directory in fd.
+// Other than that this should act like openFileNolog.
+// This acts like openFileNolog rather than OpenFile because
+// we are going to (try to) remove the file.
+// The contents of this file are not relevant for test caching.
+func openFdAt(dirfd int, name string) (*File, error) {
+ var r int
+ for {
+ var e error
+ r, e = unix.Openat(dirfd, name, O_RDONLY|syscall.O_CLOEXEC, 0)
+ if e == nil {
+ break
+ }
+
+ // See comment in openFileNolog.
+ if e == syscall.EINTR {
+ continue
+ }
+
+ return nil, e
+ }
+
+ if !supportsCloseOnExec {
+ syscall.CloseOnExec(r)
+ }
+
+ return newFile(uintptr(r), name, kindOpenFile), nil
+}
diff --git a/contrib/go/_std_1.19/src/os/rlimit.go b/contrib/go/_std_1.19/src/os/rlimit.go
new file mode 100644
index 0000000000..a89414d098
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/rlimit.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package os
+
+import "syscall"
+
+// Some systems set an artificially low soft limit on open file count, for compatibility
+// with code that uses select and its hard-coded maximum file descriptor
+// (limited by the size of fd_set).
+//
+// Go does not use select, so it should not be subject to these limits.
+// On some systems the limit is 256, which is very easy to run into,
+// even in simple programs like gofmt when they parallelize walking
+// a file tree.
+//
+// After a long discussion on go.dev/issue/46279, we decided the
+// best approach was for Go to raise the limit unconditionally for itself,
+// and then leave old software to set the limit back as needed.
+// Code that really wants Go to leave the limit alone can set the hard limit,
+// which Go of course has no choice but to respect.
+func init() {
+ var lim syscall.Rlimit
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim); err == nil && lim.Cur != lim.Max {
+ lim.Cur = lim.Max
+ adjustFileLimit(&lim)
+ syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lim)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/os/rlimit_darwin.go b/contrib/go/_std_1.19/src/os/rlimit_darwin.go
new file mode 100644
index 0000000000..b28982a83a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/rlimit_darwin.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+package os
+
+import "syscall"
+
+// adjustFileLimit adds per-OS limitations on the Rlimit used for RLIMIT_NOFILE. See rlimit.go.
+func adjustFileLimit(lim *syscall.Rlimit) {
+ // On older macOS, setrlimit(RLIMIT_NOFILE, lim) with lim.Cur = infinity fails.
+ // Set to the value of kern.maxfilesperproc instead.
+ n, err := syscall.SysctlUint32("kern.maxfilesperproc")
+ if err != nil {
+ return
+ }
+ if lim.Cur > uint64(n) {
+ lim.Cur = uint64(n)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/os/rlimit_stub.go b/contrib/go/_std_1.19/src/os/rlimit_stub.go
new file mode 100644
index 0000000000..cbe28400c5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/rlimit_stub.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package os
+
+import "syscall"
+
+// adjustFileLimit adds per-OS limitations on the Rlimit used for RLIMIT_NOFILE. See rlimit.go.
+func adjustFileLimit(lim *syscall.Rlimit) {}
diff --git a/contrib/go/_std_1.18/src/os/stat.go b/contrib/go/_std_1.19/src/os/stat.go
index af66838e3e..af66838e3e 100644
--- a/contrib/go/_std_1.18/src/os/stat.go
+++ b/contrib/go/_std_1.19/src/os/stat.go
diff --git a/contrib/go/_std_1.19/src/os/stat_darwin.go b/contrib/go/_std_1.19/src/os/stat_darwin.go
new file mode 100644
index 0000000000..b92ffd4a0a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/stat_darwin.go
@@ -0,0 +1,47 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os
+
+import (
+ "syscall"
+ "time"
+)
+
+func fillFileStatFromSys(fs *fileStat, name string) {
+ fs.name = basename(name)
+ fs.size = fs.sys.Size
+ fs.modTime = time.Unix(fs.sys.Mtimespec.Unix())
+ fs.mode = FileMode(fs.sys.Mode & 0777)
+ switch fs.sys.Mode & syscall.S_IFMT {
+ case syscall.S_IFBLK, syscall.S_IFWHT:
+ fs.mode |= ModeDevice
+ case syscall.S_IFCHR:
+ fs.mode |= ModeDevice | ModeCharDevice
+ case syscall.S_IFDIR:
+ fs.mode |= ModeDir
+ case syscall.S_IFIFO:
+ fs.mode |= ModeNamedPipe
+ case syscall.S_IFLNK:
+ fs.mode |= ModeSymlink
+ case syscall.S_IFREG:
+ // nothing to do
+ case syscall.S_IFSOCK:
+ fs.mode |= ModeSocket
+ }
+ if fs.sys.Mode&syscall.S_ISGID != 0 {
+ fs.mode |= ModeSetgid
+ }
+ if fs.sys.Mode&syscall.S_ISUID != 0 {
+ fs.mode |= ModeSetuid
+ }
+ if fs.sys.Mode&syscall.S_ISVTX != 0 {
+ fs.mode |= ModeSticky
+ }
+}
+
+// For testing.
+func atime(fi FileInfo) time.Time {
+ return time.Unix(fi.Sys().(*syscall.Stat_t).Atimespec.Unix())
+}
diff --git a/contrib/go/_std_1.19/src/os/stat_linux.go b/contrib/go/_std_1.19/src/os/stat_linux.go
new file mode 100644
index 0000000000..316c26c7ca
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/stat_linux.go
@@ -0,0 +1,47 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os
+
+import (
+ "syscall"
+ "time"
+)
+
+func fillFileStatFromSys(fs *fileStat, name string) {
+ fs.name = basename(name)
+ fs.size = fs.sys.Size
+ fs.modTime = time.Unix(fs.sys.Mtim.Unix())
+ fs.mode = FileMode(fs.sys.Mode & 0777)
+ switch fs.sys.Mode & syscall.S_IFMT {
+ case syscall.S_IFBLK:
+ fs.mode |= ModeDevice
+ case syscall.S_IFCHR:
+ fs.mode |= ModeDevice | ModeCharDevice
+ case syscall.S_IFDIR:
+ fs.mode |= ModeDir
+ case syscall.S_IFIFO:
+ fs.mode |= ModeNamedPipe
+ case syscall.S_IFLNK:
+ fs.mode |= ModeSymlink
+ case syscall.S_IFREG:
+ // nothing to do
+ case syscall.S_IFSOCK:
+ fs.mode |= ModeSocket
+ }
+ if fs.sys.Mode&syscall.S_ISGID != 0 {
+ fs.mode |= ModeSetgid
+ }
+ if fs.sys.Mode&syscall.S_ISUID != 0 {
+ fs.mode |= ModeSetuid
+ }
+ if fs.sys.Mode&syscall.S_ISVTX != 0 {
+ fs.mode |= ModeSticky
+ }
+}
+
+// For testing.
+func atime(fi FileInfo) time.Time {
+ return time.Unix(fi.Sys().(*syscall.Stat_t).Atim.Unix())
+}
diff --git a/contrib/go/_std_1.19/src/os/stat_unix.go b/contrib/go/_std_1.19/src/os/stat_unix.go
new file mode 100644
index 0000000000..437afc02b4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/stat_unix.go
@@ -0,0 +1,52 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package os
+
+import (
+ "syscall"
+)
+
+// Stat returns the FileInfo structure describing file.
+// If there is an error, it will be of type *PathError.
+func (f *File) Stat() (FileInfo, error) {
+ if f == nil {
+ return nil, ErrInvalid
+ }
+ var fs fileStat
+ err := f.pfd.Fstat(&fs.sys)
+ if err != nil {
+ return nil, &PathError{Op: "stat", Path: f.name, Err: err}
+ }
+ fillFileStatFromSys(&fs, f.name)
+ return &fs, nil
+}
+
+// statNolog stats a file with no test logging.
+func statNolog(name string) (FileInfo, error) {
+ var fs fileStat
+ err := ignoringEINTR(func() error {
+ return syscall.Stat(name, &fs.sys)
+ })
+ if err != nil {
+ return nil, &PathError{Op: "stat", Path: name, Err: err}
+ }
+ fillFileStatFromSys(&fs, name)
+ return &fs, nil
+}
+
+// lstatNolog lstats a file with no test logging.
+func lstatNolog(name string) (FileInfo, error) {
+ var fs fileStat
+ err := ignoringEINTR(func() error {
+ return syscall.Lstat(name, &fs.sys)
+ })
+ if err != nil {
+ return nil, &PathError{Op: "lstat", Path: name, Err: err}
+ }
+ fillFileStatFromSys(&fs, name)
+ return &fs, nil
+}
diff --git a/contrib/go/_std_1.18/src/os/sticky_bsd.go b/contrib/go/_std_1.19/src/os/sticky_bsd.go
index e71daf7c74..e71daf7c74 100644
--- a/contrib/go/_std_1.18/src/os/sticky_bsd.go
+++ b/contrib/go/_std_1.19/src/os/sticky_bsd.go
diff --git a/contrib/go/_std_1.18/src/os/sticky_notbsd.go b/contrib/go/_std_1.19/src/os/sticky_notbsd.go
index 9a87fbde92..9a87fbde92 100644
--- a/contrib/go/_std_1.18/src/os/sticky_notbsd.go
+++ b/contrib/go/_std_1.19/src/os/sticky_notbsd.go
diff --git a/contrib/go/_std_1.18/src/os/str.go b/contrib/go/_std_1.19/src/os/str.go
index 35643e0d2f..35643e0d2f 100644
--- a/contrib/go/_std_1.18/src/os/str.go
+++ b/contrib/go/_std_1.19/src/os/str.go
diff --git a/contrib/go/_std_1.18/src/os/sys.go b/contrib/go/_std_1.19/src/os/sys.go
index 28b0f6bab0..28b0f6bab0 100644
--- a/contrib/go/_std_1.18/src/os/sys.go
+++ b/contrib/go/_std_1.19/src/os/sys.go
diff --git a/contrib/go/_std_1.18/src/os/sys_bsd.go b/contrib/go/_std_1.19/src/os/sys_bsd.go
index e272c24571..e272c24571 100644
--- a/contrib/go/_std_1.18/src/os/sys_bsd.go
+++ b/contrib/go/_std_1.19/src/os/sys_bsd.go
diff --git a/contrib/go/_std_1.18/src/os/sys_linux.go b/contrib/go/_std_1.19/src/os/sys_linux.go
index 36a8a24455..36a8a24455 100644
--- a/contrib/go/_std_1.18/src/os/sys_linux.go
+++ b/contrib/go/_std_1.19/src/os/sys_linux.go
diff --git a/contrib/go/_std_1.19/src/os/sys_unix.go b/contrib/go/_std_1.19/src/os/sys_unix.go
new file mode 100644
index 0000000000..79005c2cbd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/os/sys_unix.go
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package os
+
+// supportsCloseOnExec reports whether the platform supports the
+// O_CLOEXEC flag.
+// On Darwin, the O_CLOEXEC flag was introduced in OS X 10.7 (Darwin 11.0.0).
+// See https://support.apple.com/kb/HT1633.
+// On FreeBSD, the O_CLOEXEC flag was introduced in version 8.3.
+const supportsCloseOnExec = true
diff --git a/contrib/go/_std_1.18/src/os/tempfile.go b/contrib/go/_std_1.19/src/os/tempfile.go
index 3be3d13dfb..3be3d13dfb 100644
--- a/contrib/go/_std_1.18/src/os/tempfile.go
+++ b/contrib/go/_std_1.19/src/os/tempfile.go
diff --git a/contrib/go/_std_1.18/src/os/types.go b/contrib/go/_std_1.19/src/os/types.go
index d8edd98b68..d8edd98b68 100644
--- a/contrib/go/_std_1.18/src/os/types.go
+++ b/contrib/go/_std_1.19/src/os/types.go
diff --git a/contrib/go/_std_1.18/src/os/types_unix.go b/contrib/go/_std_1.19/src/os/types_unix.go
index 1b90a5a141..1b90a5a141 100644
--- a/contrib/go/_std_1.18/src/os/types_unix.go
+++ b/contrib/go/_std_1.19/src/os/types_unix.go
diff --git a/contrib/go/_std_1.18/src/os/wait_unimp.go b/contrib/go/_std_1.19/src/os/wait_unimp.go
index 721b9f9f7e..721b9f9f7e 100644
--- a/contrib/go/_std_1.18/src/os/wait_unimp.go
+++ b/contrib/go/_std_1.19/src/os/wait_unimp.go
diff --git a/contrib/go/_std_1.18/src/os/wait_waitid.go b/contrib/go/_std_1.19/src/os/wait_waitid.go
index c0503b209c..c0503b209c 100644
--- a/contrib/go/_std_1.18/src/os/wait_waitid.go
+++ b/contrib/go/_std_1.19/src/os/wait_waitid.go
diff --git a/contrib/go/_std_1.19/src/path/filepath/match.go b/contrib/go/_std_1.19/src/path/filepath/match.go
new file mode 100644
index 0000000000..b5cc4b8cf3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/path/filepath/match.go
@@ -0,0 +1,369 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filepath
+
+import (
+ "errors"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// ErrBadPattern indicates a pattern was malformed.
+var ErrBadPattern = errors.New("syntax error in pattern")
+
+// Match reports whether name matches the shell file name pattern.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-Separator characters
+// '?' matches any single non-Separator character
+// '[' [ '^' ] { character-range } ']'
+// character class (must be non-empty)
+// c matches character c (c != '*', '?', '\\', '[')
+// '\\' c matches character c
+//
+// character-range:
+// c matches character c (c != '\\', '-', ']')
+// '\\' c matches character c
+// lo '-' hi matches character c for lo <= c <= hi
+//
+// Match requires pattern to match all of name, not just a substring.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// On Windows, escaping is disabled. Instead, '\\' is treated as
+// path separator.
+func Match(pattern, name string) (matched bool, err error) {
+Pattern:
+ for len(pattern) > 0 {
+ var star bool
+ var chunk string
+ star, chunk, pattern = scanChunk(pattern)
+ if star && chunk == "" {
+ // Trailing * matches rest of string unless it has a /.
+ return !strings.Contains(name, string(Separator)), nil
+ }
+ // Look for match at current position.
+ t, ok, err := matchChunk(chunk, name)
+ // if we're the last chunk, make sure we've exhausted the name
+ // otherwise we'll give a false result even if we could still match
+ // using the star
+ if ok && (len(t) == 0 || len(pattern) > 0) {
+ name = t
+ continue
+ }
+ if err != nil {
+ return false, err
+ }
+ if star {
+ // Look for match skipping i+1 bytes.
+ // Cannot skip /.
+ for i := 0; i < len(name) && name[i] != Separator; i++ {
+ t, ok, err := matchChunk(chunk, name[i+1:])
+ if ok {
+ // if we're the last chunk, make sure we exhausted the name
+ if len(pattern) == 0 && len(t) > 0 {
+ continue
+ }
+ name = t
+ continue Pattern
+ }
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ return false, nil
+ }
+ return len(name) == 0, nil
+}
+
+// scanChunk gets the next segment of pattern, which is a non-star string
+// possibly preceded by a star.
+func scanChunk(pattern string) (star bool, chunk, rest string) {
+ for len(pattern) > 0 && pattern[0] == '*' {
+ pattern = pattern[1:]
+ star = true
+ }
+ inrange := false
+ var i int
+Scan:
+ for i = 0; i < len(pattern); i++ {
+ switch pattern[i] {
+ case '\\':
+ if runtime.GOOS != "windows" {
+ // error check handled in matchChunk: bad pattern.
+ if i+1 < len(pattern) {
+ i++
+ }
+ }
+ case '[':
+ inrange = true
+ case ']':
+ inrange = false
+ case '*':
+ if !inrange {
+ break Scan
+ }
+ }
+ }
+ return star, pattern[0:i], pattern[i:]
+}
+
+// matchChunk checks whether chunk matches the beginning of s.
+// If so, it returns the remainder of s (after the match).
+// Chunk is all single-character operators: literals, char classes, and ?.
+func matchChunk(chunk, s string) (rest string, ok bool, err error) {
+ // failed records whether the match has failed.
+ // After the match fails, the loop continues on processing chunk,
+ // checking that the pattern is well-formed but no longer reading s.
+ failed := false
+ for len(chunk) > 0 {
+ if !failed && len(s) == 0 {
+ failed = true
+ }
+ switch chunk[0] {
+ case '[':
+ // character class
+ var r rune
+ if !failed {
+ var n int
+ r, n = utf8.DecodeRuneInString(s)
+ s = s[n:]
+ }
+ chunk = chunk[1:]
+ // possibly negated
+ negated := false
+ if len(chunk) > 0 && chunk[0] == '^' {
+ negated = true
+ chunk = chunk[1:]
+ }
+ // parse all ranges
+ match := false
+ nrange := 0
+ for {
+ if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
+ chunk = chunk[1:]
+ break
+ }
+ var lo, hi rune
+ if lo, chunk, err = getEsc(chunk); err != nil {
+ return "", false, err
+ }
+ hi = lo
+ if chunk[0] == '-' {
+ if hi, chunk, err = getEsc(chunk[1:]); err != nil {
+ return "", false, err
+ }
+ }
+ if lo <= r && r <= hi {
+ match = true
+ }
+ nrange++
+ }
+ if match == negated {
+ failed = true
+ }
+
+ case '?':
+ if !failed {
+ if s[0] == Separator {
+ failed = true
+ }
+ _, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ }
+ chunk = chunk[1:]
+
+ case '\\':
+ if runtime.GOOS != "windows" {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ return "", false, ErrBadPattern
+ }
+ }
+ fallthrough
+
+ default:
+ if !failed {
+ if chunk[0] != s[0] {
+ failed = true
+ }
+ s = s[1:]
+ }
+ chunk = chunk[1:]
+ }
+ }
+ if failed {
+ return "", false, nil
+ }
+ return s, true, nil
+}
+
+// getEsc gets a possibly-escaped character from chunk, for a character class.
+func getEsc(chunk string) (r rune, nchunk string, err error) {
+ if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
+ err = ErrBadPattern
+ return
+ }
+ if chunk[0] == '\\' && runtime.GOOS != "windows" {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = ErrBadPattern
+ return
+ }
+ }
+ r, n := utf8.DecodeRuneInString(chunk)
+ if r == utf8.RuneError && n == 1 {
+ err = ErrBadPattern
+ }
+ nchunk = chunk[n:]
+ if len(nchunk) == 0 {
+ err = ErrBadPattern
+ }
+ return
+}
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+func Glob(pattern string) (matches []string, err error) {
+ return globWithLimit(pattern, 0)
+}
+
+func globWithLimit(pattern string, depth int) (matches []string, err error) {
+ // This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
+ const pathSeparatorsLimit = 10000
+ if depth == pathSeparatorsLimit {
+ return nil, ErrBadPattern
+ }
+
+ // Check pattern is well-formed.
+ if _, err := Match(pattern, ""); err != nil {
+ return nil, err
+ }
+ if !hasMeta(pattern) {
+ if _, err = os.Lstat(pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := Split(pattern)
+ volumeLen := 0
+ if runtime.GOOS == "windows" {
+ volumeLen, dir = cleanGlobPathWindows(dir)
+ } else {
+ dir = cleanGlobPath(dir)
+ }
+
+ if !hasMeta(dir[volumeLen:]) {
+ return glob(dir, file, nil)
+ }
+
+ // Prevent infinite recursion. See issue 15879.
+ if dir == pattern {
+ return nil, ErrBadPattern
+ }
+
+ var m []string
+ m, err = globWithLimit(dir, depth+1)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// cleanGlobPath prepares path for glob matching.
+func cleanGlobPath(path string) string {
+ switch path {
+ case "":
+ return "."
+ case string(Separator):
+ // do nothing to the path
+ return path
+ default:
+ return path[0 : len(path)-1] // chop off trailing separator
+ }
+}
+
+// cleanGlobPathWindows is windows version of cleanGlobPath.
+func cleanGlobPathWindows(path string) (prefixLen int, cleaned string) {
+ vollen := volumeNameLen(path)
+ switch {
+ case path == "":
+ return 0, "."
+ case vollen+1 == len(path) && os.IsPathSeparator(path[len(path)-1]): // /, \, C:\ and C:/
+ // do nothing to the path
+ return vollen + 1, path
+ case vollen == len(path) && len(path) == 2: // C:
+ return vollen, path + "." // convert C: into C:.
+ default:
+ if vollen >= len(path) {
+ vollen = len(path) - 1
+ }
+ return vollen, path[0 : len(path)-1] // chop off trailing separator
+ }
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := os.Stat(dir)
+ if err != nil {
+ return // ignore I/O error
+ }
+ if !fi.IsDir() {
+ return // ignore I/O error
+ }
+ d, err := os.Open(dir)
+ if err != nil {
+ return // ignore I/O error
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ magicChars := `*?[`
+ if runtime.GOOS != "windows" {
+ magicChars = `*?[\`
+ }
+ return strings.ContainsAny(path, magicChars)
+}
diff --git a/contrib/go/_std_1.19/src/path/filepath/path.go b/contrib/go/_std_1.19/src/path/filepath/path.go
new file mode 100644
index 0000000000..de7a2c758b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/path/filepath/path.go
@@ -0,0 +1,615 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filepath implements utility routines for manipulating filename paths
+// in a way compatible with the target operating system-defined file paths.
+//
+// The filepath package uses either forward slashes or backslashes,
+// depending on the operating system. To process paths such as URLs
+// that always use forward slashes regardless of the operating
+// system, see the path package.
+package filepath
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+ "sort"
+ "strings"
+)
+
+// A lazybuf is a lazily constructed path buffer.
+// It supports append, reading previously appended bytes,
+// and retrieving the final string. It does not allocate a buffer
+// to hold the output until that output diverges from s.
+type lazybuf struct {
+ path string
+ buf []byte
+ w int
+ volAndPath string
+ volLen int
+}
+
+func (b *lazybuf) index(i int) byte {
+ if b.buf != nil {
+ return b.buf[i]
+ }
+ return b.path[i]
+}
+
+func (b *lazybuf) append(c byte) {
+ if b.buf == nil {
+ if b.w < len(b.path) && b.path[b.w] == c {
+ b.w++
+ return
+ }
+ b.buf = make([]byte, len(b.path))
+ copy(b.buf, b.path[:b.w])
+ }
+ b.buf[b.w] = c
+ b.w++
+}
+
+func (b *lazybuf) string() string {
+ if b.buf == nil {
+ return b.volAndPath[:b.volLen+b.w]
+ }
+ return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
+}
+
+const (
+ Separator = os.PathSeparator
+ ListSeparator = os.PathListSeparator
+)
+
+// Clean returns the shortest path name equivalent to path
+// by purely lexical processing. It applies the following rules
+// iteratively until no further processing can be done:
+//
+// 1. Replace multiple Separator elements with a single one.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path,
+// assuming Separator is '/'.
+//
+// The returned path ends in a slash only if it represents a root directory,
+// such as "/" on Unix or `C:\` on Windows.
+//
+// Finally, any occurrences of slash are replaced by Separator.
+//
+// If the result of this process is an empty string, Clean
+// returns the string ".".
+//
+// See also Rob Pike, “Lexical File Names in Plan 9 or
+// Getting Dot-Dot Right,”
+// https://9p.io/sys/doc/lexnames.html
+func Clean(path string) string {
+ originalPath := path
+ volLen := volumeNameLen(path)
+ path = path[volLen:]
+ if path == "" {
+ if volLen > 1 && originalPath[1] != ':' {
+ // should be UNC
+ return FromSlash(originalPath)
+ }
+ return originalPath + "."
+ }
+ rooted := os.IsPathSeparator(path[0])
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+ // dotdot is index in buf where .. must stop, either because
+ // it is the leading slash or it is a leading ../../.. prefix.
+ n := len(path)
+ out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}
+ r, dotdot := 0, 0
+ if rooted {
+ out.append(Separator)
+ r, dotdot = 1, 1
+ }
+
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+ case path[r] == '.' && r+1 == n:
+ // . element
+ r++
+ case path[r] == '.' && os.IsPathSeparator(path[r+1]):
+ // ./ element
+ r++
+
+ for r < len(path) && os.IsPathSeparator(path[r]) {
+ r++
+ }
+ if out.w == 0 && volumeNameLen(path[r:]) > 0 {
+ // When joining prefix "." and an absolute path on Windows,
+ // the prefix should not be removed.
+ out.append('.')
+ }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+ switch {
+ case out.w > dotdot:
+ // can backtrack
+ out.w--
+ for out.w > dotdot && !os.IsPathSeparator(out.index(out.w)) {
+ out.w--
+ }
+ case !rooted:
+ // cannot backtrack, but not rooted, so append .. element.
+ if out.w > 0 {
+ out.append(Separator)
+ }
+ out.append('.')
+ out.append('.')
+ dotdot = out.w
+ }
+ default:
+ // real path element.
+ // add slash if needed
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append(Separator)
+ }
+ // copy element
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ out.append(path[r])
+ }
+ }
+ }
+
+ // Turn empty string into "."
+ if out.w == 0 {
+ out.append('.')
+ }
+
+ return FromSlash(out.string())
+}
+
+// ToSlash returns the result of replacing each separator character
+// in path with a slash ('/') character. Multiple separators are
+// replaced by multiple slashes.
+func ToSlash(path string) string {
+ if Separator == '/' {
+ return path
+ }
+ return strings.ReplaceAll(path, string(Separator), "/")
+}
+
+// FromSlash returns the result of replacing each slash ('/') character
+// in path with a separator character. Multiple slashes are replaced
+// by multiple separators.
+func FromSlash(path string) string {
+ if Separator == '/' {
+ return path
+ }
+ return strings.ReplaceAll(path, "/", string(Separator))
+}
+
+// SplitList splits a list of paths joined by the OS-specific ListSeparator,
+// usually found in PATH or GOPATH environment variables.
+// Unlike strings.Split, SplitList returns an empty slice when passed an empty
+// string.
+func SplitList(path string) []string {
+ return splitList(path)
+}
+
+// Split splits path immediately following the final Separator,
+// separating it into a directory and file name component.
+// If there is no Separator in path, Split returns an empty dir
+// and file set to path.
+// The returned values have the property that path = dir+file.
+func Split(path string) (dir, file string) {
+ vol := VolumeName(path)
+ i := len(path) - 1
+ for i >= len(vol) && !os.IsPathSeparator(path[i]) {
+ i--
+ }
+ return path[:i+1], path[i+1:]
+}
+
+// Join joins any number of path elements into a single path,
+// separating them with an OS specific Separator. Empty elements
+// are ignored. The result is Cleaned. However, if the argument
+// list is empty or all its elements are empty, Join returns
+// an empty string.
+// On Windows, the result will only be a UNC path if the first
+// non-empty element is a UNC path.
+func Join(elem ...string) string {
+ return join(elem)
+}
+
+// Ext returns the file name extension used by path.
+// The extension is the suffix beginning at the final dot
+// in the final element of path; it is empty if there is
+// no dot.
+func Ext(path string) string {
+ for i := len(path) - 1; i >= 0 && !os.IsPathSeparator(path[i]); i-- {
+ if path[i] == '.' {
+ return path[i:]
+ }
+ }
+ return ""
+}
+
+// EvalSymlinks returns the path name after the evaluation of any symbolic
+// links.
+// If path is relative the result will be relative to the current directory,
+// unless one of the components is an absolute symbolic link.
+// EvalSymlinks calls Clean on the result.
+func EvalSymlinks(path string) (string, error) {
+ return evalSymlinks(path)
+}
+
+// Abs returns an absolute representation of path.
+// If the path is not absolute it will be joined with the current
+// working directory to turn it into an absolute path. The absolute
+// path name for a given file is not guaranteed to be unique.
+// Abs calls Clean on the result.
+func Abs(path string) (string, error) {
+ return abs(path)
+}
+
+func unixAbs(path string) (string, error) {
+ if IsAbs(path) {
+ return Clean(path), nil
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ return Join(wd, path), nil
+}
+
+// Rel returns a relative path that is lexically equivalent to targpath when
+// joined to basepath with an intervening separator. That is,
+// Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
+// On success, the returned path will always be relative to basepath,
+// even if basepath and targpath share no elements.
+// An error is returned if targpath can't be made relative to basepath or if
+// knowing the current working directory would be necessary to compute it.
+// Rel calls Clean on the result.
+func Rel(basepath, targpath string) (string, error) {
+ baseVol := VolumeName(basepath)
+ targVol := VolumeName(targpath)
+ base := Clean(basepath)
+ targ := Clean(targpath)
+ if sameWord(targ, base) {
+ return ".", nil
+ }
+ base = base[len(baseVol):]
+ targ = targ[len(targVol):]
+ if base == "." {
+ base = ""
+ } else if base == "" && volumeNameLen(baseVol) > 2 /* isUNC */ {
+ // Treat any targetpath matching `\\host\share` basepath as absolute path.
+ base = string(Separator)
+ }
+
+ // Can't use IsAbs - `\a` and `a` are both relative in Windows.
+ baseSlashed := len(base) > 0 && base[0] == Separator
+ targSlashed := len(targ) > 0 && targ[0] == Separator
+ if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
+ return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
+ }
+ // Position base[b0:bi] and targ[t0:ti] at the first differing elements.
+ bl := len(base)
+ tl := len(targ)
+ var b0, bi, t0, ti int
+ for {
+ for bi < bl && base[bi] != Separator {
+ bi++
+ }
+ for ti < tl && targ[ti] != Separator {
+ ti++
+ }
+ if !sameWord(targ[t0:ti], base[b0:bi]) {
+ break
+ }
+ if bi < bl {
+ bi++
+ }
+ if ti < tl {
+ ti++
+ }
+ b0 = bi
+ t0 = ti
+ }
+ if base[b0:bi] == ".." {
+ return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
+ }
+ if b0 != bl {
+ // Base elements left. Must go up before going down.
+ seps := strings.Count(base[b0:bl], string(Separator))
+ size := 2 + seps*3
+ if tl != t0 {
+ size += 1 + tl - t0
+ }
+ buf := make([]byte, size)
+ n := copy(buf, "..")
+ for i := 0; i < seps; i++ {
+ buf[n] = Separator
+ copy(buf[n+1:], "..")
+ n += 3
+ }
+ if t0 != tl {
+ buf[n] = Separator
+ copy(buf[n+1:], targ[t0:])
+ }
+ return string(buf), nil
+ }
+ return targ[t0:], nil
+}
+
+// SkipDir is used as a return value from WalkFuncs to indicate that
+// the directory named in the call is to be skipped. It is not returned
+// as an error by any function.
+var SkipDir error = fs.SkipDir
+
+// WalkFunc is the type of the function called by Walk to visit each
+// file or directory.
+//
+// The path argument contains the argument to Walk as a prefix.
+// That is, if Walk is called with root argument "dir" and finds a file
+// named "a" in that directory, the walk function will be called with
+// argument "dir/a".
+//
+// The directory and file are joined with Join, which may clean the
+// directory name: if Walk is called with the root argument "x/../dir"
+// and finds a file named "a" in that directory, the walk function will
+// be called with argument "dir/a", not "x/../dir/a".
+//
+// The info argument is the fs.FileInfo for the named path.
+//
+// The error result returned by the function controls how Walk continues.
+// If the function returns the special value SkipDir, Walk skips the
+// current directory (path if info.IsDir() is true, otherwise path's
+// parent directory). Otherwise, if the function returns a non-nil error,
+// Walk stops entirely and returns that error.
+//
+// The err argument reports an error related to path, signaling that Walk
+// will not walk into that directory. The function can decide how to
+// handle that error; as described earlier, returning the error will
+// cause Walk to stop walking the entire tree.
+//
+// Walk calls the function with a non-nil err argument in two cases.
+//
+// First, if an os.Lstat on the root directory or any directory or file
+// in the tree fails, Walk calls the function with path set to that
+// directory or file's path, info set to nil, and err set to the error
+// from os.Lstat.
+//
+// Second, if a directory's Readdirnames method fails, Walk calls the
+// function with path set to the directory's path, info, set to an
+// fs.FileInfo describing the directory, and err set to the error from
+// Readdirnames.
+type WalkFunc func(path string, info fs.FileInfo, err error) error
+
+var lstat = os.Lstat // for testing
+
+// walkDir recursively descends path, calling walkDirFn.
+func walkDir(path string, d fs.DirEntry, walkDirFn fs.WalkDirFunc) error {
+ if err := walkDirFn(path, d, nil); err != nil || !d.IsDir() {
+ if err == SkipDir && d.IsDir() {
+ // Successfully skipped directory.
+ err = nil
+ }
+ return err
+ }
+
+ dirs, err := readDir(path)
+ if err != nil {
+ // Second call, to report ReadDir error.
+ err = walkDirFn(path, d, err)
+ if err != nil {
+ if err == SkipDir && d.IsDir() {
+ err = nil
+ }
+ return err
+ }
+ }
+
+ for _, d1 := range dirs {
+ path1 := Join(path, d1.Name())
+ if err := walkDir(path1, d1, walkDirFn); err != nil {
+ if err == SkipDir {
+ break
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// walk recursively descends path, calling walkFn.
+func walk(path string, info fs.FileInfo, walkFn WalkFunc) error {
+ if !info.IsDir() {
+ return walkFn(path, info, nil)
+ }
+
+ names, err := readDirNames(path)
+ err1 := walkFn(path, info, err)
+ // If err != nil, walk can't walk into this directory.
+ // err1 != nil means walkFn want walk to skip this directory or stop walking.
+ // Therefore, if one of err and err1 isn't nil, walk will return.
+ if err != nil || err1 != nil {
+ // The caller's behavior is controlled by the return value, which is decided
+ // by walkFn. walkFn may ignore err and return nil.
+ // If walkFn returns SkipDir, it will be handled by the caller.
+ // So walk should return whatever walkFn returns.
+ return err1
+ }
+
+ for _, name := range names {
+ filename := Join(path, name)
+ fileInfo, err := lstat(filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != SkipDir {
+ return err
+ }
+ } else {
+ err = walk(filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// WalkDir walks the file tree rooted at root, calling fn for each file or
+// directory in the tree, including root.
+//
+// All errors that arise visiting files and directories are filtered by fn:
+// see the fs.WalkDirFunc documentation for details.
+//
+// The files are walked in lexical order, which makes the output deterministic
+// but requires WalkDir to read an entire directory into memory before proceeding
+// to walk that directory.
+//
+// WalkDir does not follow symbolic links.
+func WalkDir(root string, fn fs.WalkDirFunc) error {
+ info, err := os.Lstat(root)
+ if err != nil {
+ err = fn(root, nil, err)
+ } else {
+ err = walkDir(root, &statDirEntry{info}, fn)
+ }
+ if err == SkipDir {
+ return nil
+ }
+ return err
+}
+
+type statDirEntry struct {
+ info fs.FileInfo
+}
+
+func (d *statDirEntry) Name() string { return d.info.Name() }
+func (d *statDirEntry) IsDir() bool { return d.info.IsDir() }
+func (d *statDirEntry) Type() fs.FileMode { return d.info.Mode().Type() }
+func (d *statDirEntry) Info() (fs.FileInfo, error) { return d.info, nil }
+
+// Walk walks the file tree rooted at root, calling fn for each file or
+// directory in the tree, including root.
+//
+// All errors that arise visiting files and directories are filtered by fn:
+// see the WalkFunc documentation for details.
+//
+// The files are walked in lexical order, which makes the output deterministic
+// but requires Walk to read an entire directory into memory before proceeding
+// to walk that directory.
+//
+// Walk does not follow symbolic links.
+//
+// Walk is less efficient than WalkDir, introduced in Go 1.16,
+// which avoids calling os.Lstat on every visited file or directory.
+func Walk(root string, fn WalkFunc) error {
+ info, err := os.Lstat(root)
+ if err != nil {
+ err = fn(root, nil, err)
+ } else {
+ err = walk(root, info, fn)
+ }
+ if err == SkipDir {
+ return nil
+ }
+ return err
+}
+
+// readDir reads the directory named by dirname and returns
+// a sorted list of directory entries.
+func readDir(dirname string) ([]fs.DirEntry, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ dirs, err := f.ReadDir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() })
+ return dirs, nil
+}
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entry names.
+func readDirNames(dirname string) ([]string, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// Base returns the last element of path.
+// Trailing path separators are removed before extracting the last element.
+// If the path is empty, Base returns ".".
+// If the path consists entirely of separators, Base returns a single separator.
+func Base(path string) string {
+ if path == "" {
+ return "."
+ }
+ // Strip trailing slashes.
+ for len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) {
+ path = path[0 : len(path)-1]
+ }
+ // Throw away volume name
+ path = path[len(VolumeName(path)):]
+ // Find the last element
+ i := len(path) - 1
+ for i >= 0 && !os.IsPathSeparator(path[i]) {
+ i--
+ }
+ if i >= 0 {
+ path = path[i+1:]
+ }
+ // If empty now, it had only slashes.
+ if path == "" {
+ return string(Separator)
+ }
+ return path
+}
+
+// Dir returns all but the last element of path, typically the path's directory.
+// After dropping the final element, Dir calls Clean on the path and trailing
+// slashes are removed.
+// If the path is empty, Dir returns ".".
+// If the path consists entirely of separators, Dir returns a single separator.
+// The returned path does not end in a separator unless it is the root directory.
+func Dir(path string) string {
+ vol := VolumeName(path)
+ i := len(path) - 1
+ for i >= len(vol) && !os.IsPathSeparator(path[i]) {
+ i--
+ }
+ dir := Clean(path[len(vol) : i+1])
+ if dir == "." && len(vol) > 2 {
+ // must be UNC
+ return vol
+ }
+ return vol + dir
+}
+
+// VolumeName returns leading volume name.
+// Given "C:\foo\bar" it returns "C:" on Windows.
+// Given "\\host\share\foo" it returns "\\host\share".
+// On other platforms it returns "".
+func VolumeName(path string) string {
+ return path[:volumeNameLen(path)]
+}
diff --git a/contrib/go/_std_1.19/src/path/filepath/path_unix.go b/contrib/go/_std_1.19/src/path/filepath/path_unix.go
new file mode 100644
index 0000000000..93fdfdd8a0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/path/filepath/path_unix.go
@@ -0,0 +1,53 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package filepath
+
+import "strings"
+
+// IsAbs reports whether the path is absolute.
+func IsAbs(path string) bool {
+ return strings.HasPrefix(path, "/")
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+func volumeNameLen(path string) int {
+ return 0
+}
+
+// HasPrefix exists for historical compatibility and should not be used.
+//
+// Deprecated: HasPrefix does not respect path boundaries and
+// does not ignore case when required.
+func HasPrefix(p, prefix string) bool {
+ return strings.HasPrefix(p, prefix)
+}
+
+func splitList(path string) []string {
+ if path == "" {
+ return []string{}
+ }
+ return strings.Split(path, string(ListSeparator))
+}
+
+func abs(path string) (string, error) {
+ return unixAbs(path)
+}
+
+func join(elem []string) string {
+ // If there's a bug here, fix the logic in ./path_plan9.go too.
+ for i, e := range elem {
+ if e != "" {
+ return Clean(strings.Join(elem[i:], string(Separator)))
+ }
+ }
+ return ""
+}
+
+func sameWord(a, b string) bool {
+ return a == b
+}
diff --git a/contrib/go/_std_1.18/src/path/filepath/symlink.go b/contrib/go/_std_1.19/src/path/filepath/symlink.go
index 6fefd15977..6fefd15977 100644
--- a/contrib/go/_std_1.18/src/path/filepath/symlink.go
+++ b/contrib/go/_std_1.19/src/path/filepath/symlink.go
diff --git a/contrib/go/_std_1.19/src/path/filepath/symlink_unix.go b/contrib/go/_std_1.19/src/path/filepath/symlink_unix.go
new file mode 100644
index 0000000000..f8980d5ad3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/path/filepath/symlink_unix.go
@@ -0,0 +1,7 @@
+//go:build !windows && !plan9
+
+package filepath
+
+func evalSymlinks(path string) (string, error) {
+ return walkSymlinks(path)
+}
diff --git a/contrib/go/_std_1.19/src/path/match.go b/contrib/go/_std_1.19/src/path/match.go
new file mode 100644
index 0000000000..673bbc7ff6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/path/match.go
@@ -0,0 +1,230 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package path
+
+import (
+ "errors"
+ "internal/bytealg"
+ "unicode/utf8"
+)
+
+// ErrBadPattern indicates a pattern was malformed.
+var ErrBadPattern = errors.New("syntax error in pattern")
+
+// Match reports whether name matches the shell pattern.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-/ characters
+// '?' matches any single non-/ character
+// '[' [ '^' ] { character-range } ']'
+// character class (must be non-empty)
+// c matches character c (c != '*', '?', '\\', '[')
+// '\\' c matches character c
+//
+// character-range:
+// c matches character c (c != '\\', '-', ']')
+// '\\' c matches character c
+// lo '-' hi matches character c for lo <= c <= hi
+//
+// Match requires pattern to match all of name, not just a substring.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+func Match(pattern, name string) (matched bool, err error) {
+Pattern:
+ for len(pattern) > 0 {
+ var star bool
+ var chunk string
+ star, chunk, pattern = scanChunk(pattern)
+ if star && chunk == "" {
+ // Trailing * matches rest of string unless it has a /.
+ return bytealg.IndexByteString(name, '/') < 0, nil
+ }
+ // Look for match at current position.
+ t, ok, err := matchChunk(chunk, name)
+ // if we're the last chunk, make sure we've exhausted the name
+ // otherwise we'll give a false result even if we could still match
+ // using the star
+ if ok && (len(t) == 0 || len(pattern) > 0) {
+ name = t
+ continue
+ }
+ if err != nil {
+ return false, err
+ }
+ if star {
+ // Look for match skipping i+1 bytes.
+ // Cannot skip /.
+ for i := 0; i < len(name) && name[i] != '/'; i++ {
+ t, ok, err := matchChunk(chunk, name[i+1:])
+ if ok {
+ // if we're the last chunk, make sure we exhausted the name
+ if len(pattern) == 0 && len(t) > 0 {
+ continue
+ }
+ name = t
+ continue Pattern
+ }
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ // Before returning false with no error,
+ // check that the remainder of the pattern is syntactically valid.
+ for len(pattern) > 0 {
+ _, chunk, pattern = scanChunk(pattern)
+ if _, _, err := matchChunk(chunk, ""); err != nil {
+ return false, err
+ }
+ }
+ return false, nil
+ }
+ return len(name) == 0, nil
+}
+
+// scanChunk gets the next segment of pattern, which is a non-star string
+// possibly preceded by a star.
+func scanChunk(pattern string) (star bool, chunk, rest string) {
+ for len(pattern) > 0 && pattern[0] == '*' {
+ pattern = pattern[1:]
+ star = true
+ }
+ inrange := false
+ var i int
+Scan:
+ for i = 0; i < len(pattern); i++ {
+ switch pattern[i] {
+ case '\\':
+ // error check handled in matchChunk: bad pattern.
+ if i+1 < len(pattern) {
+ i++
+ }
+ case '[':
+ inrange = true
+ case ']':
+ inrange = false
+ case '*':
+ if !inrange {
+ break Scan
+ }
+ }
+ }
+ return star, pattern[0:i], pattern[i:]
+}
+
+// matchChunk checks whether chunk matches the beginning of s.
+// If so, it returns the remainder of s (after the match).
+// Chunk is all single-character operators: literals, char classes, and ?.
+func matchChunk(chunk, s string) (rest string, ok bool, err error) {
+ // failed records whether the match has failed.
+ // After the match fails, the loop continues on processing chunk,
+ // checking that the pattern is well-formed but no longer reading s.
+ failed := false
+ for len(chunk) > 0 {
+ if !failed && len(s) == 0 {
+ failed = true
+ }
+ switch chunk[0] {
+ case '[':
+ // character class
+ var r rune
+ if !failed {
+ var n int
+ r, n = utf8.DecodeRuneInString(s)
+ s = s[n:]
+ }
+ chunk = chunk[1:]
+ // possibly negated
+ negated := false
+ if len(chunk) > 0 && chunk[0] == '^' {
+ negated = true
+ chunk = chunk[1:]
+ }
+ // parse all ranges
+ match := false
+ nrange := 0
+ for {
+ if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
+ chunk = chunk[1:]
+ break
+ }
+ var lo, hi rune
+ if lo, chunk, err = getEsc(chunk); err != nil {
+ return "", false, err
+ }
+ hi = lo
+ if chunk[0] == '-' {
+ if hi, chunk, err = getEsc(chunk[1:]); err != nil {
+ return "", false, err
+ }
+ }
+ if lo <= r && r <= hi {
+ match = true
+ }
+ nrange++
+ }
+ if match == negated {
+ failed = true
+ }
+
+ case '?':
+ if !failed {
+ if s[0] == '/' {
+ failed = true
+ }
+ _, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ }
+ chunk = chunk[1:]
+
+ case '\\':
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ return "", false, ErrBadPattern
+ }
+ fallthrough
+
+ default:
+ if !failed {
+ if chunk[0] != s[0] {
+ failed = true
+ }
+ s = s[1:]
+ }
+ chunk = chunk[1:]
+ }
+ }
+ if failed {
+ return "", false, nil
+ }
+ return s, true, nil
+}
+
+// getEsc gets a possibly-escaped character from chunk, for a character class.
+func getEsc(chunk string) (r rune, nchunk string, err error) {
+ if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
+ err = ErrBadPattern
+ return
+ }
+ if chunk[0] == '\\' {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = ErrBadPattern
+ return
+ }
+ }
+ r, n := utf8.DecodeRuneInString(chunk)
+ if r == utf8.RuneError && n == 1 {
+ err = ErrBadPattern
+ }
+ nchunk = chunk[n:]
+ if len(nchunk) == 0 {
+ err = ErrBadPattern
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/path/path.go b/contrib/go/_std_1.19/src/path/path.go
new file mode 100644
index 0000000000..547b9debce
--- /dev/null
+++ b/contrib/go/_std_1.19/src/path/path.go
@@ -0,0 +1,233 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package path implements utility routines for manipulating slash-separated
+// paths.
+//
+// The path package should only be used for paths separated by forward
+// slashes, such as the paths in URLs. This package does not deal with
+// Windows paths with drive letters or backslashes; to manipulate
+// operating system paths, use the path/filepath package.
+package path
+
+// A lazybuf is a lazily constructed path buffer.
+// It supports append, reading previously appended bytes,
+// and retrieving the final string. It does not allocate a buffer
+// to hold the output until that output diverges from s.
+type lazybuf struct {
+ s string
+ buf []byte
+ w int
+}
+
+func (b *lazybuf) index(i int) byte {
+ if b.buf != nil {
+ return b.buf[i]
+ }
+ return b.s[i]
+}
+
+func (b *lazybuf) append(c byte) {
+ if b.buf == nil {
+ if b.w < len(b.s) && b.s[b.w] == c {
+ b.w++
+ return
+ }
+ b.buf = make([]byte, len(b.s))
+ copy(b.buf, b.s[:b.w])
+ }
+ b.buf[b.w] = c
+ b.w++
+}
+
+func (b *lazybuf) string() string {
+ if b.buf == nil {
+ return b.s[:b.w]
+ }
+ return string(b.buf[:b.w])
+}
+
+// Clean returns the shortest path name equivalent to path
+// by purely lexical processing. It applies the following rules
+// iteratively until no further processing can be done:
+//
+// 1. Replace multiple slashes with a single slash.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path.
+//
+// The returned path ends in a slash only if it is the root "/".
+//
+// If the result of this process is an empty string, Clean
+// returns the string ".".
+//
+// See also Rob Pike, “Lexical File Names in Plan 9 or
+// Getting Dot-Dot Right,”
+// https://9p.io/sys/doc/lexnames.html
+func Clean(path string) string {
+ if path == "" {
+ return "."
+ }
+
+ rooted := path[0] == '/'
+ n := len(path)
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+ // dotdot is index in buf where .. must stop, either because
+ // it is the leading slash or it is a leading ../../.. prefix.
+ out := lazybuf{s: path}
+ r, dotdot := 0, 0
+ if rooted {
+ out.append('/')
+ r, dotdot = 1, 1
+ }
+
+ for r < n {
+ switch {
+ case path[r] == '/':
+ // empty path element
+ r++
+ case path[r] == '.' && (r+1 == n || path[r+1] == '/'):
+ // . element
+ r++
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'):
+ // .. element: remove to last /
+ r += 2
+ switch {
+ case out.w > dotdot:
+ // can backtrack
+ out.w--
+ for out.w > dotdot && out.index(out.w) != '/' {
+ out.w--
+ }
+ case !rooted:
+ // cannot backtrack, but not rooted, so append .. element.
+ if out.w > 0 {
+ out.append('/')
+ }
+ out.append('.')
+ out.append('.')
+ dotdot = out.w
+ }
+ default:
+ // real path element.
+ // add slash if needed
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append('/')
+ }
+ // copy element
+ for ; r < n && path[r] != '/'; r++ {
+ out.append(path[r])
+ }
+ }
+ }
+
+ // Turn empty string into "."
+ if out.w == 0 {
+ return "."
+ }
+
+ return out.string()
+}
+
+// lastSlash(s) is strings.LastIndex(s, "/") but we can't import strings.
+func lastSlash(s string) int {
+ i := len(s) - 1
+ for i >= 0 && s[i] != '/' {
+ i--
+ }
+ return i
+}
+
+// Split splits path immediately following the final slash,
+// separating it into a directory and file name component.
+// If there is no slash in path, Split returns an empty dir and
+// file set to path.
+// The returned values have the property that path = dir+file.
+func Split(path string) (dir, file string) {
+ i := lastSlash(path)
+ return path[:i+1], path[i+1:]
+}
+
+// Join joins any number of path elements into a single path,
+// separating them with slashes. Empty elements are ignored.
+// The result is Cleaned. However, if the argument list is
+// empty or all its elements are empty, Join returns
+// an empty string.
+func Join(elem ...string) string {
+ size := 0
+ for _, e := range elem {
+ size += len(e)
+ }
+ if size == 0 {
+ return ""
+ }
+ buf := make([]byte, 0, size+len(elem)-1)
+ for _, e := range elem {
+ if len(buf) > 0 || e != "" {
+ if len(buf) > 0 {
+ buf = append(buf, '/')
+ }
+ buf = append(buf, e...)
+ }
+ }
+ return Clean(string(buf))
+}
+
+// Ext returns the file name extension used by path.
+// The extension is the suffix beginning at the final dot
+// in the final slash-separated element of path;
+// it is empty if there is no dot.
+func Ext(path string) string {
+ for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- {
+ if path[i] == '.' {
+ return path[i:]
+ }
+ }
+ return ""
+}
+
+// Base returns the last element of path.
+// Trailing slashes are removed before extracting the last element.
+// If the path is empty, Base returns ".".
+// If the path consists entirely of slashes, Base returns "/".
+func Base(path string) string {
+ if path == "" {
+ return "."
+ }
+ // Strip trailing slashes.
+ for len(path) > 0 && path[len(path)-1] == '/' {
+ path = path[0 : len(path)-1]
+ }
+ // Find the last element
+ if i := lastSlash(path); i >= 0 {
+ path = path[i+1:]
+ }
+ // If empty now, it had only slashes.
+ if path == "" {
+ return "/"
+ }
+ return path
+}
+
+// IsAbs reports whether the path is absolute.
+func IsAbs(path string) bool {
+ return len(path) > 0 && path[0] == '/'
+}
+
+// Dir returns all but the last element of path, typically the path's directory.
+// After dropping the final element using Split, the path is Cleaned and trailing
+// slashes are removed.
+// If the path is empty, Dir returns ".".
+// If the path consists entirely of slashes followed by non-slash bytes, Dir
+// returns a single slash. In any other case, the returned path does not end in a
+// slash.
+func Dir(path string) string {
+ dir, _ := Split(path)
+ return Clean(dir)
+}
diff --git a/contrib/go/_std_1.19/src/reflect/abi.go b/contrib/go/_std_1.19/src/reflect/abi.go
new file mode 100644
index 0000000000..32cb314188
--- /dev/null
+++ b/contrib/go/_std_1.19/src/reflect/abi.go
@@ -0,0 +1,510 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+// These variables are used by the register assignment
+// algorithm in this file.
+//
+// They should be modified with care (no other reflect code
+// may be executing) and are generally only modified
+// when testing this package.
+//
+// They should never be set higher than their internal/abi
+// constant counterparts, because the system relies on a
+// structure that is at least large enough to hold the
+// registers the system supports.
+//
+// Currently they're set to zero because using the actual
+// constants will break every part of the toolchain that
+// uses reflect to call functions (e.g. go test, or anything
+// that uses text/template). The values that are currently
+// commented out there should be the actual values once
+// we're ready to use the register ABI everywhere.
+var (
+ intArgRegs = abi.IntArgRegs
+ floatArgRegs = abi.FloatArgRegs
+ floatRegSize = uintptr(abi.EffectiveFloatRegSize)
+)
+
+// abiStep represents an ABI "instruction." Each instruction
+// describes one part of how to translate between a Go value
+// in memory and a call frame.
+type abiStep struct {
+ kind abiStepKind
+
+ // offset and size together describe a part of a Go value
+ // in memory.
+ offset uintptr
+ size uintptr // size in bytes of the part
+
+ // These fields describe the ABI side of the translation.
+ stkOff uintptr // stack offset, used if kind == abiStepStack
+ ireg int // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer
+ freg int // FP register index, used if kind == abiStepFloatReg
+}
+
+// abiStepKind is the "op-code" for an abiStep instruction.
+type abiStepKind int
+
+const (
+ abiStepBad abiStepKind = iota
+ abiStepStack // copy to/from stack
+ abiStepIntReg // copy to/from integer register
+ abiStepPointer // copy pointer to/from integer register
+ abiStepFloatReg // copy to/from FP register
+)
+
+// abiSeq represents a sequence of ABI instructions for copying
+// from a series of reflect.Values to a call frame (for call arguments)
+// or vice-versa (for call results).
+//
+// An abiSeq should be populated by calling its addArg method.
+type abiSeq struct {
+ // steps is the set of instructions.
+ //
+ // The instructions are grouped together by whole arguments,
+ // with the starting index for the instructions
+ // of the i'th Go value available in valueStart.
+ //
+ // For instance, if this abiSeq represents 3 arguments
+ // passed to a function, then the 2nd argument's steps
+ // begin at steps[valueStart[1]].
+ //
+ // Because reflect accepts Go arguments in distinct
+ // Values and each Value is stored separately, each abiStep
+ // that begins a new argument will have its offset
+ // field == 0.
+ steps []abiStep
+ valueStart []int
+
+ stackBytes uintptr // stack space used
+ iregs, fregs int // registers used
+}
+
+func (a *abiSeq) dump() {
+ for i, p := range a.steps {
+ println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg)
+ }
+ print("values ")
+ for _, i := range a.valueStart {
+ print(i, " ")
+ }
+ println()
+ println("stack", a.stackBytes)
+ println("iregs", a.iregs)
+ println("fregs", a.fregs)
+}
+
+// stepsForValue returns the ABI instructions for translating
+// the i'th Go argument or return value represented by this
+// abiSeq to the Go ABI.
+func (a *abiSeq) stepsForValue(i int) []abiStep {
+ s := a.valueStart[i]
+ var e int
+ if i == len(a.valueStart)-1 {
+ e = len(a.steps)
+ } else {
+ e = a.valueStart[i+1]
+ }
+ return a.steps[s:e]
+}
+
+// addArg extends the abiSeq with a new Go value of type t.
+//
+// If the value was stack-assigned, returns the single
+// abiStep describing that translation, and nil otherwise.
+func (a *abiSeq) addArg(t *rtype) *abiStep {
+ // We'll always be adding a new value, so do that first.
+ pStart := len(a.steps)
+ a.valueStart = append(a.valueStart, pStart)
+ if t.size == 0 {
+ // If the size of the argument type is zero, then
+ // in order to degrade gracefully into ABI0, we need
+ // to stack-assign this type. The reason is that
+ // although zero-sized types take up no space on the
+ // stack, they do cause the next argument to be aligned.
+ // So just do that here, but don't bother actually
+ // generating a new ABI step for it (there's nothing to
+ // actually copy).
+ //
+ // We cannot handle this in the recursive case of
+ // regAssign because zero-sized *fields* of a
+ // non-zero-sized struct do not cause it to be
+ // stack-assigned. So we need a special case here
+ // at the top.
+ a.stackBytes = align(a.stackBytes, uintptr(t.align))
+ return nil
+ }
+ // Hold a copy of "a" so that we can roll back if
+ // register assignment fails.
+ aOld := *a
+ if !a.regAssign(t, 0) {
+ // Register assignment failed. Roll back any changes
+ // and stack-assign.
+ *a = aOld
+ a.stackAssign(t.size, uintptr(t.align))
+ return &a.steps[len(a.steps)-1]
+ }
+ return nil
+}
+
+// addRcvr extends the abiSeq with a new method call
+// receiver according to the interface calling convention.
+//
+// If the receiver was stack-assigned, returns the single
+// abiStep describing that translation, and nil otherwise.
+// Returns true if the receiver is a pointer.
+func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
+ // The receiver is always one word.
+ a.valueStart = append(a.valueStart, len(a.steps))
+ var ok, ptr bool
+ if ifaceIndir(rcvr) || rcvr.pointers() {
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
+ ptr = true
+ } else {
+ // TODO(mknyszek): Is this case even possible?
+ // The interface data work never contains a non-pointer
+ // value. This case was copied over from older code
+ // in the reflect package which only conditionally added
+ // a pointer bit to the reflect.(Value).Call stack frame's
+ // GC bitmap.
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
+ ptr = false
+ }
+ if !ok {
+ a.stackAssign(goarch.PtrSize, goarch.PtrSize)
+ return &a.steps[len(a.steps)-1], ptr
+ }
+ return nil, ptr
+}
+
+// regAssign attempts to reserve argument registers for a value of
+// type t, stored at some offset.
+//
+// It returns whether or not the assignment succeeded, but
+// leaves any changes it made to a.steps behind, so the caller
+// must undo that work by adjusting a.steps if it fails.
+//
+// This method along with the assign* methods represent the
+// complete register-assignment algorithm for the Go ABI.
+func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
+ switch t.Kind() {
+ case UnsafePointer, Pointer, Chan, Map, Func:
+ return a.assignIntN(offset, t.size, 1, 0b1)
+ case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
+ return a.assignIntN(offset, t.size, 1, 0b0)
+ case Int64, Uint64:
+ switch goarch.PtrSize {
+ case 4:
+ return a.assignIntN(offset, 4, 2, 0b0)
+ case 8:
+ return a.assignIntN(offset, 8, 1, 0b0)
+ }
+ case Float32, Float64:
+ return a.assignFloatN(offset, t.size, 1)
+ case Complex64:
+ return a.assignFloatN(offset, 4, 2)
+ case Complex128:
+ return a.assignFloatN(offset, 8, 2)
+ case String:
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
+ case Interface:
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
+ case Slice:
+ return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ switch tt.len {
+ case 0:
+ // There's nothing to assign, so don't modify
+ // a.steps but succeed so the caller doesn't
+ // try to stack-assign this value.
+ return true
+ case 1:
+ return a.regAssign(tt.elem, offset)
+ default:
+ return false
+ }
+ case Struct:
+ st := (*structType)(unsafe.Pointer(t))
+ for i := range st.fields {
+ f := &st.fields[i]
+ if !a.regAssign(f.typ, offset+f.offset) {
+ return false
+ }
+ }
+ return true
+ default:
+ print("t.Kind == ", t.Kind(), "\n")
+ panic("unknown type kind")
+ }
+ panic("unhandled register assignment path")
+}
+
+// assignIntN assigns n values to registers, each "size" bytes large,
+// from the data at [offset, offset+n*size) in memory. Each value at
+// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
+// next n integer registers.
+//
+// Bit i in ptrMap indicates whether the i'th value is a pointer.
+// n must be <= 8.
+//
+// Returns whether assignment succeeded.
+func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool {
+ if n > 8 || n < 0 {
+ panic("invalid n")
+ }
+ if ptrMap != 0 && size != goarch.PtrSize {
+ panic("non-empty pointer map passed for non-pointer-size values")
+ }
+ if a.iregs+n > intArgRegs {
+ return false
+ }
+ for i := 0; i < n; i++ {
+ kind := abiStepIntReg
+ if ptrMap&(uint8(1)<<i) != 0 {
+ kind = abiStepPointer
+ }
+ a.steps = append(a.steps, abiStep{
+ kind: kind,
+ offset: offset + uintptr(i)*size,
+ size: size,
+ ireg: a.iregs,
+ })
+ a.iregs++
+ }
+ return true
+}
+
+// assignFloatN assigns n values to registers, each "size" bytes large,
+// from the data at [offset, offset+n*size) in memory. Each value at
+// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
+// next n floating-point registers.
+//
+// Returns whether assignment succeeded.
+func (a *abiSeq) assignFloatN(offset, size uintptr, n int) bool {
+ if n < 0 {
+ panic("invalid n")
+ }
+ if a.fregs+n > floatArgRegs || floatRegSize < size {
+ return false
+ }
+ for i := 0; i < n; i++ {
+ a.steps = append(a.steps, abiStep{
+ kind: abiStepFloatReg,
+ offset: offset + uintptr(i)*size,
+ size: size,
+ freg: a.fregs,
+ })
+ a.fregs++
+ }
+ return true
+}
+
+// stackAssign reserves space for one value that is "size" bytes
+// large with alignment "alignment" to the stack.
+//
+// Should not be called directly; use addArg instead.
+func (a *abiSeq) stackAssign(size, alignment uintptr) {
+ a.stackBytes = align(a.stackBytes, alignment)
+ a.steps = append(a.steps, abiStep{
+ kind: abiStepStack,
+ offset: 0, // Only used for whole arguments, so the memory offset is 0.
+ size: size,
+ stkOff: a.stackBytes,
+ })
+ a.stackBytes += size
+}
+
+// abiDesc describes the ABI for a function or method.
+type abiDesc struct {
+ // call and ret represent the translation steps for
+ // the call and return paths of a Go function.
+ call, ret abiSeq
+
+ // These fields describe the stack space allocated
+ // for the call. stackCallArgsSize is the amount of space
+ // reserved for arguments but not return values. retOffset
+ // is the offset at which return values begin, and
+ // spill is the size in bytes of additional space reserved
+ // to spill argument registers into in case of preemption in
+ // reflectcall's stack frame.
+ stackCallArgsSize, retOffset, spill uintptr
+
+ // stackPtrs is a bitmap that indicates whether
+ // each word in the ABI stack space (stack-assigned
+ // args + return values) is a pointer. Used
+ // as the heap pointer bitmap for stack space
+ // passed to reflectcall.
+ stackPtrs *bitVector
+
+ // inRegPtrs is a bitmap whose i'th bit indicates
+ // whether the i'th integer argument register contains
+ // a pointer. Used by makeFuncStub and methodValueCall
+ // to make result pointers visible to the GC.
+ //
+ // outRegPtrs is the same, but for result values.
+ // Used by reflectcall to make result pointers visible
+ // to the GC.
+ inRegPtrs, outRegPtrs abi.IntArgRegBitmap
+}
+
+func (a *abiDesc) dump() {
+ println("ABI")
+ println("call")
+ a.call.dump()
+ println("ret")
+ a.ret.dump()
+ println("stackCallArgsSize", a.stackCallArgsSize)
+ println("retOffset", a.retOffset)
+ println("spill", a.spill)
+ print("inRegPtrs:")
+ dumpPtrBitMap(a.inRegPtrs)
+ println()
+ print("outRegPtrs:")
+ dumpPtrBitMap(a.outRegPtrs)
+ println()
+}
+
+func dumpPtrBitMap(b abi.IntArgRegBitmap) {
+ for i := 0; i < intArgRegs; i++ {
+ x := 0
+ if b.Get(i) {
+ x = 1
+ }
+ print(" ", x)
+ }
+}
+
+func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
+ // We need to add space for this argument to
+ // the frame so that it can spill args into it.
+ //
+ // The size of this space is just the sum of the sizes
+ // of each register-allocated type.
+ //
+ // TODO(mknyszek): Remove this when we no longer have
+ // caller reserved spill space.
+ spill := uintptr(0)
+
+ // Compute gc program & stack bitmap for stack arguments
+ stackPtrs := new(bitVector)
+
+ // Compute the stack frame pointer bitmap and register
+ // pointer bitmap for arguments.
+ inRegPtrs := abi.IntArgRegBitmap{}
+
+ // Compute abiSeq for input parameters.
+ var in abiSeq
+ if rcvr != nil {
+ stkStep, isPtr := in.addRcvr(rcvr)
+ if stkStep != nil {
+ if isPtr {
+ stackPtrs.append(1)
+ } else {
+ stackPtrs.append(0)
+ }
+ } else {
+ spill += goarch.PtrSize
+ }
+ }
+ for i, arg := range t.in() {
+ stkStep := in.addArg(arg)
+ if stkStep != nil {
+ addTypeBits(stackPtrs, stkStep.stkOff, arg)
+ } else {
+ spill = align(spill, uintptr(arg.align))
+ spill += arg.size
+ for _, st := range in.stepsForValue(i) {
+ if st.kind == abiStepPointer {
+ inRegPtrs.Set(st.ireg)
+ }
+ }
+ }
+ }
+ spill = align(spill, goarch.PtrSize)
+
+ // From the input parameters alone, we now know
+ // the stackCallArgsSize and retOffset.
+ stackCallArgsSize := in.stackBytes
+ retOffset := align(in.stackBytes, goarch.PtrSize)
+
+ // Compute the stack frame pointer bitmap and register
+ // pointer bitmap for return values.
+ outRegPtrs := abi.IntArgRegBitmap{}
+
+ // Compute abiSeq for output parameters.
+ var out abiSeq
+ // Stack-assigned return values do not share
+ // space with arguments like they do with registers,
+ // so we need to inject a stack offset here.
+ // Fake it by artificially extending stackBytes by
+ // the return offset.
+ out.stackBytes = retOffset
+ for i, res := range t.out() {
+ stkStep := out.addArg(res)
+ if stkStep != nil {
+ addTypeBits(stackPtrs, stkStep.stkOff, res)
+ } else {
+ for _, st := range out.stepsForValue(i) {
+ if st.kind == abiStepPointer {
+ outRegPtrs.Set(st.ireg)
+ }
+ }
+ }
+ }
+ // Undo the faking from earlier so that stackBytes
+ // is accurate.
+ out.stackBytes -= retOffset
+ return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, inRegPtrs, outRegPtrs}
+}
+
+// intFromReg loads an argSize sized integer from reg and places it at to.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+func intFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
+ memmove(to, r.IntRegArgAddr(reg, argSize), argSize)
+}
+
+// intToReg loads an argSize sized integer and stores it into reg.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+func intToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
+ memmove(r.IntRegArgAddr(reg, argSize), from, argSize)
+}
+
+// floatFromReg loads a float value from its register representation in r.
+//
+// argSize must be 4 or 8.
+func floatFromReg(r *abi.RegArgs, reg int, argSize uintptr, to unsafe.Pointer) {
+ switch argSize {
+ case 4:
+ *(*float32)(to) = archFloat32FromReg(r.Floats[reg])
+ case 8:
+ *(*float64)(to) = *(*float64)(unsafe.Pointer(&r.Floats[reg]))
+ default:
+ panic("bad argSize")
+ }
+}
+
+// floatToReg stores a float value in its register representation in r.
+//
+// argSize must be either 4 or 8.
+func floatToReg(r *abi.RegArgs, reg int, argSize uintptr, from unsafe.Pointer) {
+ switch argSize {
+ case 4:
+ r.Floats[reg] = archFloat32ToReg(*(*float32)(from))
+ case 8:
+ r.Floats[reg] = *(*uint64)(from)
+ default:
+ panic("bad argSize")
+ }
+}
diff --git a/contrib/go/_std_1.18/src/reflect/asm_amd64.s b/contrib/go/_std_1.19/src/reflect/asm_amd64.s
index d21d498063..d21d498063 100644
--- a/contrib/go/_std_1.18/src/reflect/asm_amd64.s
+++ b/contrib/go/_std_1.19/src/reflect/asm_amd64.s
diff --git a/contrib/go/_std_1.19/src/reflect/deepequal.go b/contrib/go/_std_1.19/src/reflect/deepequal.go
new file mode 100644
index 0000000000..50b436e5f6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/reflect/deepequal.go
@@ -0,0 +1,238 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Deep equality test via reflection
+
+package reflect
+
+import (
+ "internal/bytealg"
+ "unsafe"
+)
+
+// During deepValueEqual, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+ a1 unsafe.Pointer
+ a2 unsafe.Pointer
+ typ Type
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool {
+ if !v1.IsValid() || !v2.IsValid() {
+ return v1.IsValid() == v2.IsValid()
+ }
+ if v1.Type() != v2.Type() {
+ return false
+ }
+
+ // We want to avoid putting more in the visited map than we need to.
+ // For any possible reference cycle that might be encountered,
+ // hard(v1, v2) needs to return true for at least one of the types in the cycle,
+ // and it's safe and valid to get Value's internal pointer.
+ hard := func(v1, v2 Value) bool {
+ switch v1.Kind() {
+ case Pointer:
+ if v1.typ.ptrdata == 0 {
+ // go:notinheap pointers can't be cyclic.
+ // At least, all of our current uses of go:notinheap have
+ // that property. The runtime ones aren't cyclic (and we don't use
+ // DeepEqual on them anyway), and the cgo-generated ones are
+ // all empty structs.
+ return false
+ }
+ fallthrough
+ case Map, Slice, Interface:
+ // Nil pointers cannot be cyclic. Avoid putting them in the visited map.
+ return !v1.IsNil() && !v2.IsNil()
+ }
+ return false
+ }
+
+ if hard(v1, v2) {
+ // For a Pointer or Map value, we need to check flagIndir,
+ // which we do by calling the pointer method.
+ // For Slice or Interface, flagIndir is always set,
+ // and using v.ptr suffices.
+ ptrval := func(v Value) unsafe.Pointer {
+ switch v.Kind() {
+ case Pointer, Map:
+ return v.pointer()
+ default:
+ return v.ptr
+ }
+ }
+ addr1 := ptrval(v1)
+ addr2 := ptrval(v2)
+ if uintptr(addr1) > uintptr(addr2) {
+ // Canonicalize order to reduce number of entries in visited.
+ // Assumes non-moving garbage collector.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are already seen.
+ typ := v1.Type()
+ v := visit{addr1, addr2, typ}
+ if visited[v] {
+ return true
+ }
+
+ // Remember for later.
+ visited[v] = true
+ }
+
+ switch v1.Kind() {
+ case Array:
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Slice:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.UnsafePointer() == v2.UnsafePointer() {
+ return true
+ }
+ // Special case for []byte, which is common.
+ if v1.Type().Elem().Kind() == Uint8 {
+ return bytealg.Equal(v1.Bytes(), v2.Bytes())
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !deepValueEqual(v1.Index(i), v2.Index(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Interface:
+ if v1.IsNil() || v2.IsNil() {
+ return v1.IsNil() == v2.IsNil()
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Pointer:
+ if v1.UnsafePointer() == v2.UnsafePointer() {
+ return true
+ }
+ return deepValueEqual(v1.Elem(), v2.Elem(), visited)
+ case Struct:
+ for i, n := 0, v1.NumField(); i < n; i++ {
+ if !deepValueEqual(v1.Field(i), v2.Field(i), visited) {
+ return false
+ }
+ }
+ return true
+ case Map:
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ if v1.UnsafePointer() == v2.UnsafePointer() {
+ return true
+ }
+ for _, k := range v1.MapKeys() {
+ val1 := v1.MapIndex(k)
+ val2 := v2.MapIndex(k)
+ if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(val1, val2, visited) {
+ return false
+ }
+ }
+ return true
+ case Func:
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ case Int, Int8, Int16, Int32, Int64:
+ return v1.Int() == v2.Int()
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v1.Uint() == v2.Uint()
+ case String:
+ return v1.String() == v2.String()
+ case Bool:
+ return v1.Bool() == v2.Bool()
+ case Float32, Float64:
+ return v1.Float() == v2.Float()
+ case Complex64, Complex128:
+ return v1.Complex() == v2.Complex()
+ default:
+ // Normal equality suffices
+ return valueInterface(v1, false) == valueInterface(v2, false)
+ }
+}
+
+// DeepEqual reports whether x and y are “deeply equal,” defined as follows.
+// Two values of identical type are deeply equal if one of the following cases applies.
+// Values of distinct types are never deeply equal.
+//
+// Array values are deeply equal when their corresponding elements are deeply equal.
+//
+// Struct values are deeply equal if their corresponding fields,
+// both exported and unexported, are deeply equal.
+//
+// Func values are deeply equal if both are nil; otherwise they are not deeply equal.
+//
+// Interface values are deeply equal if they hold deeply equal concrete values.
+//
+// Map values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they are the same map object or their corresponding keys
+// (matched using Go equality) map to deeply equal values.
+//
+// Pointer values are deeply equal if they are equal using Go's == operator
+// or if they point to deeply equal values.
+//
+// Slice values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they point to the same initial entry of the same underlying array
+// (that is, &x[0] == &y[0]) or their corresponding elements (up to length) are deeply equal.
+// Note that a non-nil empty slice and a nil slice (for example, []byte{} and []byte(nil))
+// are not deeply equal.
+//
+// Other values - numbers, bools, strings, and channels - are deeply equal
+// if they are equal using Go's == operator.
+//
+// In general DeepEqual is a recursive relaxation of Go's == operator.
+// However, this idea is impossible to implement without some inconsistency.
+// Specifically, it is possible for a value to be unequal to itself,
+// either because it is of func type (uncomparable in general)
+// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
+// or because it is an array, struct, or interface containing
+// such a value.
+// On the other hand, pointer values are always equal to themselves,
+// even if they point at or contain such problematic values,
+// because they compare equal using Go's == operator, and that
+// is a sufficient condition to be deeply equal, regardless of content.
+// DeepEqual has been defined so that the same short-cut applies
+// to slices and maps: if x and y are the same slice or the same map,
+// they are deeply equal regardless of content.
+//
+// As DeepEqual traverses the data values it may find a cycle. The
+// second and subsequent times that DeepEqual compares two pointer
+// values that have been compared before, it treats the values as
+// equal rather than examining the values to which they point.
+// This ensures that DeepEqual terminates.
+func DeepEqual(x, y any) bool {
+ if x == nil || y == nil {
+ return x == y
+ }
+ v1 := ValueOf(x)
+ v2 := ValueOf(y)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ return deepValueEqual(v1, v2, make(map[visit]bool))
+}
diff --git a/contrib/go/_std_1.19/src/reflect/float32reg_generic.go b/contrib/go/_std_1.19/src/reflect/float32reg_generic.go
new file mode 100644
index 0000000000..23ad4bf285
--- /dev/null
+++ b/contrib/go/_std_1.19/src/reflect/float32reg_generic.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !ppc64 && !ppc64le && !riscv64
+
+package reflect
+
+import "unsafe"
+
+// This file implements a straightforward conversion of a float32
+// value into its representation in a register. This conversion
+// applies for amd64 and arm64. It is also chosen for the case of
+// zero argument registers, but is not used.
+
+func archFloat32FromReg(reg uint64) float32 {
+ i := uint32(reg)
+ return *(*float32)(unsafe.Pointer(&i))
+}
+
+func archFloat32ToReg(val float32) uint64 {
+ return uint64(*(*uint32)(unsafe.Pointer(&val)))
+}
diff --git a/contrib/go/_std_1.19/src/reflect/makefunc.go b/contrib/go/_std_1.19/src/reflect/makefunc.go
new file mode 100644
index 0000000000..ee0729903e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/reflect/makefunc.go
@@ -0,0 +1,176 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// MakeFunc implementation.
+
+package reflect
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+// makeFuncImpl is the closure value implementing the function
+// returned by MakeFunc.
+// The first three words of this type must be kept in sync with
+// methodValue and runtime.reflectMethodValue.
+// Any changes should be reflected in all three.
+type makeFuncImpl struct {
+ makeFuncCtxt
+ ftyp *funcType
+ fn func([]Value) []Value
+}
+
+// MakeFunc returns a new function of the given Type
+// that wraps the function fn. When called, that new function
+// does the following:
+//
+// - converts its arguments to a slice of Values.
+// - runs results := fn(args).
+// - returns the results as a slice of Values, one per formal result.
+//
+// The implementation fn can assume that the argument Value slice
+// has the number and type of arguments given by typ.
+// If typ describes a variadic function, the final Value is itself
+// a slice representing the variadic arguments, as in the
+// body of a variadic function. The result Value slice returned by fn
+// must have the number and type of results given by typ.
+//
+// The Value.Call method allows the caller to invoke a typed function
+// in terms of Values; in contrast, MakeFunc allows the caller to implement
+// a typed function in terms of Values.
+//
+// The Examples section of the documentation includes an illustration
+// of how to use MakeFunc to build a swap function for different types.
+func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
+ if typ.Kind() != Func {
+ panic("reflect: call of MakeFunc with non-Func type")
+ }
+
+ t := typ.common()
+ ftyp := (*funcType)(unsafe.Pointer(t))
+
+ code := abi.FuncPCABI0(makeFuncStub)
+
+ // makeFuncImpl contains a stack map for use by the runtime
+ _, _, abid := funcLayout(ftyp, nil)
+
+ impl := &makeFuncImpl{
+ makeFuncCtxt: makeFuncCtxt{
+ fn: code,
+ stack: abid.stackPtrs,
+ argLen: abid.stackCallArgsSize,
+ regPtrs: abid.inRegPtrs,
+ },
+ ftyp: ftyp,
+ fn: fn,
+ }
+
+ return Value{t, unsafe.Pointer(impl), flag(Func)}
+}
+
+// makeFuncStub is an assembly function that is the code half of
+// the function returned from MakeFunc. It expects a *callReflectFunc
+// as its context register, and its job is to invoke callReflect(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func makeFuncStub()
+
+// The first 3 words of this type must be kept in sync with
+// makeFuncImpl and runtime.reflectMethodValue.
+// Any changes should be reflected in all three.
+type methodValue struct {
+ makeFuncCtxt
+ method int
+ rcvr Value
+}
+
+// makeMethodValue converts v from the rcvr+method index representation
+// of a method value to an actual method func value, which is
+// basically the receiver value with a special bit set, into a true
+// func value - a value holding an actual func. The output is
+// semantically equivalent to the input as far as the user of package
+// reflect can tell, but the true func representation can be handled
+// by code like Convert and Interface and Assign.
+func makeMethodValue(op string, v Value) Value {
+ if v.flag&flagMethod == 0 {
+ panic("reflect: internal error: invalid use of makeMethodValue")
+ }
+
+ // Ignoring the flagMethod bit, v describes the receiver, not the method type.
+ fl := v.flag & (flagRO | flagAddr | flagIndir)
+ fl |= flag(v.typ.Kind())
+ rcvr := Value{v.typ, v.ptr, fl}
+
+ // v.Type returns the actual type of the method value.
+ ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
+
+ code := methodValueCallCodePtr()
+
+ // methodValue contains a stack map for use by the runtime
+ _, _, abid := funcLayout(ftyp, nil)
+ fv := &methodValue{
+ makeFuncCtxt: makeFuncCtxt{
+ fn: code,
+ stack: abid.stackPtrs,
+ argLen: abid.stackCallArgsSize,
+ regPtrs: abid.inRegPtrs,
+ },
+ method: int(v.flag) >> flagMethodShift,
+ rcvr: rcvr,
+ }
+
+ // Cause panic if method is not appropriate.
+ // The panic would still happen during the call if we omit this,
+ // but we want Interface() and other operations to fail early.
+ methodReceiver(op, fv.rcvr, fv.method)
+
+ return Value{&ftyp.rtype, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)}
+}
+
+func methodValueCallCodePtr() uintptr {
+ return abi.FuncPCABI0(methodValueCall)
+}
+
+// methodValueCall is an assembly function that is the code half of
+// the function returned from makeMethodValue. It expects a *methodValue
+// as its context register, and its job is to invoke callMethod(ctxt, frame)
+// where ctxt is the context register and frame is a pointer to the first
+// word in the passed-in argument frame.
+func methodValueCall()
+
+// This structure must be kept in sync with runtime.reflectMethodValue.
+// Any changes should be reflected in all both.
+type makeFuncCtxt struct {
+ fn uintptr
+ stack *bitVector // ptrmap for both stack args and results
+ argLen uintptr // just args
+ regPtrs abi.IntArgRegBitmap
+}
+
+// moveMakeFuncArgPtrs uses ctxt.regPtrs to copy integer pointer arguments
+// in args.Ints to args.Ptrs where the GC can see them.
+//
+// This is similar to what reflectcallmove does in the runtime, except
+// that happens on the return path, whereas this happens on the call path.
+//
+// nosplit because pointers are being held in uintptr slots in args, so
+// having our stack scanned now could lead to accidentally freeing
+// memory.
+//
+//go:nosplit
+func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) {
+ for i, arg := range args.Ints {
+ // Avoid write barriers! Because our write barrier enqueues what
+ // was there before, we might enqueue garbage.
+ if ctxt.regPtrs.Get(i) {
+ *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = arg
+ } else {
+ // We *must* zero this space ourselves because it's defined in
+ // assembly code and the GC will scan these pointers. Otherwise,
+ // there will be garbage here.
+ *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = 0
+ }
+ }
+}
diff --git a/contrib/go/_std_1.18/src/reflect/swapper.go b/contrib/go/_std_1.19/src/reflect/swapper.go
index 745c7b9f49..745c7b9f49 100644
--- a/contrib/go/_std_1.18/src/reflect/swapper.go
+++ b/contrib/go/_std_1.19/src/reflect/swapper.go
diff --git a/contrib/go/_std_1.19/src/reflect/type.go b/contrib/go/_std_1.19/src/reflect/type.go
new file mode 100644
index 0000000000..a52d3129df
--- /dev/null
+++ b/contrib/go/_std_1.19/src/reflect/type.go
@@ -0,0 +1,3186 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect implements run-time reflection, allowing a program to
+// manipulate objects with arbitrary types. The typical use is to take a value
+// with static type interface{} and extract its dynamic type information by
+// calling TypeOf, which returns a Type.
+//
+// A call to ValueOf returns a Value representing the run-time data.
+// Zero takes a Type and returns a Value representing a zero value
+// for that type.
+//
+// See "The Laws of Reflection" for an introduction to reflection in Go:
+// https://golang.org/doc/articles/laws_of_reflection.html
+package reflect
+
+import (
+ "internal/goarch"
+ "internal/unsafeheader"
+ "strconv"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// Type is the representation of a Go type.
+//
+// Not all methods apply to all kinds of types. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of type before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run-time panic.
+//
+// Type values are comparable, such as with the == operator,
+// so they can be used as map keys.
+// Two Type values are equal if they represent identical types.
+type Type interface {
+ // Methods applicable to all types.
+
+ // Align returns the alignment in bytes of a value of
+ // this type when allocated in memory.
+ Align() int
+
+ // FieldAlign returns the alignment in bytes of a value of
+ // this type when used as a field in a struct.
+ FieldAlign() int
+
+ // Method returns the i'th method in the type's method set.
+ // It panics if i is not in the range [0, NumMethod()).
+ //
+ // For a non-interface type T or *T, the returned Method's Type and Func
+ // fields describe a function whose first argument is the receiver,
+ // and only exported methods are accessible.
+ //
+ // For an interface type, the returned Method's Type field gives the
+ // method signature, without a receiver, and the Func field is nil.
+ //
+ // Methods are sorted in lexicographic order.
+ Method(int) Method
+
+ // MethodByName returns the method with that name in the type's
+ // method set and a boolean indicating if the method was found.
+ //
+ // For a non-interface type T or *T, the returned Method's Type and Func
+ // fields describe a function whose first argument is the receiver.
+ //
+ // For an interface type, the returned Method's Type field gives the
+ // method signature, without a receiver, and the Func field is nil.
+ MethodByName(string) (Method, bool)
+
+ // NumMethod returns the number of methods accessible using Method.
+ //
+ // For a non-interface type, it returns the number of exported methods.
+ //
+ // For an interface type, it returns the number of exported and unexported methods.
+ NumMethod() int
+
+ // Name returns the type's name within its package for a defined type.
+ // For other (non-defined) types it returns the empty string.
+ Name() string
+
+ // PkgPath returns a defined type's package path, that is, the import path
+ // that uniquely identifies the package, such as "encoding/base64".
+ // If the type was predeclared (string, error) or not defined (*T, struct{},
+ // []int, or A where A is an alias for a non-defined type), the package path
+ // will be the empty string.
+ PkgPath() string
+
+ // Size returns the number of bytes needed to store
+ // a value of the given type; it is analogous to unsafe.Sizeof.
+ Size() uintptr
+
+ // String returns a string representation of the type.
+ // The string representation may use shortened package names
+ // (e.g., base64 instead of "encoding/base64") and is not
+ // guaranteed to be unique among types. To test for type identity,
+ // compare the Types directly.
+ String() string
+
+ // Kind returns the specific kind of this type.
+ Kind() Kind
+
+ // Implements reports whether the type implements the interface type u.
+ Implements(u Type) bool
+
+ // AssignableTo reports whether a value of the type is assignable to type u.
+ AssignableTo(u Type) bool
+
+ // ConvertibleTo reports whether a value of the type is convertible to type u.
+ // Even if ConvertibleTo returns true, the conversion may still panic.
+ // For example, a slice of type []T is convertible to *[N]T,
+ // but the conversion will panic if its length is less than N.
+ ConvertibleTo(u Type) bool
+
+ // Comparable reports whether values of this type are comparable.
+ // Even if Comparable returns true, the comparison may still panic.
+ // For example, values of interface type are comparable,
+ // but the comparison will panic if their dynamic type is not comparable.
+ Comparable() bool
+
+ // Methods applicable only to some types, depending on Kind.
+ // The methods allowed for each kind are:
+ //
+ // Int*, Uint*, Float*, Complex*: Bits
+ // Array: Elem, Len
+ // Chan: ChanDir, Elem
+ // Func: In, NumIn, Out, NumOut, IsVariadic.
+ // Map: Key, Elem
+ // Pointer: Elem
+ // Slice: Elem
+ // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
+
+ // Bits returns the size of the type in bits.
+ // It panics if the type's Kind is not one of the
+ // sized or unsized Int, Uint, Float, or Complex kinds.
+ Bits() int
+
+ // ChanDir returns a channel type's direction.
+ // It panics if the type's Kind is not Chan.
+ ChanDir() ChanDir
+
+ // IsVariadic reports whether a function type's final input parameter
+ // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
+ // implicit actual type []T.
+ //
+ // For concreteness, if t represents func(x int, y ... float64), then
+ //
+ // t.NumIn() == 2
+ // t.In(0) is the reflect.Type for "int"
+ // t.In(1) is the reflect.Type for "[]float64"
+ // t.IsVariadic() == true
+ //
+ // IsVariadic panics if the type's Kind is not Func.
+ IsVariadic() bool
+
+ // Elem returns a type's element type.
+ // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice.
+ Elem() Type
+
+ // Field returns a struct type's i'th field.
+ // It panics if the type's Kind is not Struct.
+ // It panics if i is not in the range [0, NumField()).
+ Field(i int) StructField
+
+ // FieldByIndex returns the nested field corresponding
+ // to the index sequence. It is equivalent to calling Field
+ // successively for each index i.
+ // It panics if the type's Kind is not Struct.
+ FieldByIndex(index []int) StructField
+
+ // FieldByName returns the struct field with the given name
+ // and a boolean indicating if the field was found.
+ FieldByName(name string) (StructField, bool)
+
+ // FieldByNameFunc returns the struct field with a name
+ // that satisfies the match function and a boolean indicating if
+ // the field was found.
+ //
+ // FieldByNameFunc considers the fields in the struct itself
+ // and then the fields in any embedded structs, in breadth first order,
+ // stopping at the shallowest nesting depth containing one or more
+ // fields satisfying the match function. If multiple fields at that depth
+ // satisfy the match function, they cancel each other
+ // and FieldByNameFunc returns no match.
+ // This behavior mirrors Go's handling of name lookup in
+ // structs containing embedded fields.
+ FieldByNameFunc(match func(string) bool) (StructField, bool)
+
+ // In returns the type of a function type's i'th input parameter.
+ // It panics if the type's Kind is not Func.
+ // It panics if i is not in the range [0, NumIn()).
+ In(i int) Type
+
+ // Key returns a map type's key type.
+ // It panics if the type's Kind is not Map.
+ Key() Type
+
+ // Len returns an array type's length.
+ // It panics if the type's Kind is not Array.
+ Len() int
+
+ // NumField returns a struct type's field count.
+ // It panics if the type's Kind is not Struct.
+ NumField() int
+
+ // NumIn returns a function type's input parameter count.
+ // It panics if the type's Kind is not Func.
+ NumIn() int
+
+ // NumOut returns a function type's output parameter count.
+ // It panics if the type's Kind is not Func.
+ NumOut() int
+
+ // Out returns the type of a function type's i'th output parameter.
+ // It panics if the type's Kind is not Func.
+ // It panics if i is not in the range [0, NumOut()).
+ Out(i int) Type
+
+ common() *rtype
+ uncommon() *uncommonType
+}
+
+// BUG(rsc): FieldByName and related functions consider struct field names to be equal
+// if the names are equal, even if they are unexported names originating
+// in different packages. The practical effect of this is that the result of
+// t.FieldByName("x") is not well defined if the struct type t contains
+// multiple fields named x (embedded from different packages).
+// FieldByName may return one of the fields named x or may report that there are none.
+// See https://golang.org/issue/4876 for more details.
+
+/*
+ * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
+ * A few are known to ../runtime/type.go to convey to debuggers.
+ * They are also known to ../runtime/type.go.
+ */
+
+// A Kind represents the specific kind of type that a Type represents.
+// The zero Kind is not a valid kind.
+type Kind uint
+
+const (
+ Invalid Kind = iota
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ Array
+ Chan
+ Func
+ Interface
+ Map
+ Pointer
+ Slice
+ String
+ Struct
+ UnsafePointer
+)
+
+// Ptr is the old name for the Pointer kind.
+const Ptr = Pointer
+
+// tflag is used by an rtype to signal what extra type information is
+// available in the memory directly following the rtype value.
+//
+// tflag values must be kept in sync with copies in:
+//
+// cmd/compile/internal/reflectdata/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// runtime/type.go
+type tflag uint8
+
+const (
+ // tflagUncommon means that there is a pointer, *uncommonType,
+ // just beyond the outer type structure.
+ //
+ // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
+ // then t has uncommonType data and it can be accessed as:
+ //
+ // type tUncommon struct {
+ // structType
+ // u uncommonType
+ // }
+ // u := &(*tUncommon)(unsafe.Pointer(t)).u
+ tflagUncommon tflag = 1 << 0
+
+ // tflagExtraStar means the name in the str field has an
+ // extraneous '*' prefix. This is because for most types T in
+ // a program, the type *T also exists and reusing the str data
+ // saves binary size.
+ tflagExtraStar tflag = 1 << 1
+
+ // tflagNamed means the type has a name.
+ tflagNamed tflag = 1 << 2
+
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
+)
+
+// rtype is the common implementation of most values.
+// It is embedded in other struct types.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
+type rtype struct {
+ size uintptr
+ ptrdata uintptr // number of bytes in the type that can contain pointers
+ hash uint32 // hash of type; avoids computation in hash tables
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte // garbage collection data
+ str nameOff // string form
+ ptrToThis typeOff // type for pointer to this type, may be zero
+}
+
+// Method on non-interface type
+type method struct {
+ name nameOff // name of method
+ mtyp typeOff // method type (without receiver)
+ ifn textOff // fn used in interface call (one-word receiver)
+ tfn textOff // fn used for normal method call
+}
+
+// uncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type uncommonType struct {
+ pkgPath nameOff // import path; empty for built-in types like int, string
+ mcount uint16 // number of methods
+ xcount uint16 // number of exported methods
+ moff uint32 // offset from this uncommontype to [mcount]method
+ _ uint32 // unused
+}
+
+// ChanDir represents a channel type's direction.
+type ChanDir int
+
+const (
+ RecvDir ChanDir = 1 << iota // <-chan
+ SendDir // chan<-
+ BothDir = RecvDir | SendDir // chan
+)
+
+// arrayType represents a fixed array type.
+type arrayType struct {
+ rtype
+ elem *rtype // array element type
+ slice *rtype // slice type
+ len uintptr
+}
+
+// chanType represents a channel type.
+type chanType struct {
+ rtype
+ elem *rtype // channel element type
+ dir uintptr // channel direction (ChanDir)
+}
+
+// funcType represents a function type.
+//
+// A *rtype for each in and out parameter is stored in an array that
+// directly follows the funcType (and possibly its uncommonType). So
+// a function type with one method, one input, and one output is:
+//
+// struct {
+// funcType
+// uncommonType
+// [2]*rtype // [0] is in, [1] is out
+// }
+type funcType struct {
+ rtype
+ inCount uint16
+ outCount uint16 // top bit is set if last input parameter is ...
+}
+
+// imethod represents a method on an interface type
+type imethod struct {
+ name nameOff // name of method
+ typ typeOff // .(*FuncType) underneath
+}
+
+// interfaceType represents an interface type.
+type interfaceType struct {
+ rtype
+ pkgPath name // import path
+ methods []imethod // sorted by hash
+}
+
+// mapType represents a map type.
+type mapType struct {
+ rtype
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8 // size of key slot
+ valuesize uint8 // size of value slot
+ bucketsize uint16 // size of bucket
+ flags uint32
+}
+
+// ptrType represents a pointer type.
+type ptrType struct {
+ rtype
+ elem *rtype // pointer element (pointed at) type
+}
+
+// sliceType represents a slice type.
+type sliceType struct {
+ rtype
+ elem *rtype // slice element type
+}
+
+// Struct field
+type structField struct {
+ name name // name is always non-empty
+ typ *rtype // type of field
+ offset uintptr // byte offset of field
+}
+
+func (f *structField) embedded() bool {
+ return f.name.embedded()
+}
+
+// structType represents a struct type.
+type structType struct {
+ rtype
+ pkgPath name
+ fields []structField // sorted by offset
+}
+
+// name is an encoded type name with optional extra data.
+//
+// The first byte is a bit field containing:
+//
+// 1<<0 the name is exported
+// 1<<1 tag data follows the name
+// 1<<2 pkgPath nameOff follows the name and tag
+// 1<<3 the name is of an embedded (a.k.a. anonymous) field
+//
+// Following that, there is a varint-encoded length of the name,
+// followed by the name itself.
+//
+// If tag data is present, it also has a varint-encoded length
+// followed by the tag itself.
+//
+// If the import path follows, then 4 bytes at the end of
+// the data form a nameOff. The import path is only set for concrete
+// methods that are defined in a different package than their type.
+//
+// If a name starts with "*", then the exported bit represents
+// whether the pointed to type is exported.
+//
+// Note: this encoding must match here and in:
+// cmd/compile/internal/reflectdata/reflect.go
+// runtime/type.go
+// internal/reflectlite/type.go
+// cmd/link/internal/ld/decodesym.go
+
+type name struct {
+ bytes *byte
+}
+
+func (n name) data(off int, whySafe string) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
+}
+
+func (n name) isExported() bool {
+ return (*n.bytes)&(1<<0) != 0
+}
+
+func (n name) hasTag() bool {
+ return (*n.bytes)&(1<<1) != 0
+}
+
+func (n name) embedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
+// readVarint parses a varint as encoded by encoding/binary.
+// It returns the number of encoded bytes and the encoded value.
+func (n name) readVarint(off int) (int, int) {
+ v := 0
+ for i := 0; ; i++ {
+ x := *n.data(off+i, "read varint")
+ v += int(x&0x7f) << (7 * i)
+ if x&0x80 == 0 {
+ return i + 1, v
+ }
+ }
+}
+
+// writeVarint writes n to buf in varint form. Returns the
+// number of bytes written. n must be nonnegative.
+// Writes at most 10 bytes.
+func writeVarint(buf []byte, n int) int {
+ for i := 0; ; i++ {
+ b := byte(n & 0x7f)
+ n >>= 7
+ if n == 0 {
+ buf[i] = b
+ return i + 1
+ }
+ buf[i] = b | 0x80
+ }
+}
+
+func (n name) name() (s string) {
+ if n.bytes == nil {
+ return
+ }
+ i, l := n.readVarint(1)
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(n.data(1+i, "non-empty string"))
+ hdr.Len = l
+ return
+}
+
+func (n name) tag() (s string) {
+ if !n.hasTag() {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ i2, l2 := n.readVarint(1 + i + l)
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = unsafe.Pointer(n.data(1+i+l+i2, "non-empty string"))
+ hdr.Len = l2
+ return
+}
+
+func (n name) pkgPath() string {
+ if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
+ return ""
+ }
+ i, l := n.readVarint(1)
+ off := 1 + i + l
+ if n.hasTag() {
+ i2, l2 := n.readVarint(off)
+ off += i2 + l2
+ }
+ var nameOff int32
+ // Note that this field may not be aligned in memory,
+ // so we cannot use a direct int32 assignment here.
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
+ pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
+ return pkgPathName.name()
+}
+
+func newName(n, tag string, exported, embedded bool) name {
+ if len(n) >= 1<<29 {
+ panic("reflect.nameFrom: name too long: " + n[:1024] + "...")
+ }
+ if len(tag) >= 1<<29 {
+ panic("reflect.nameFrom: tag too long: " + tag[:1024] + "...")
+ }
+ var nameLen [10]byte
+ var tagLen [10]byte
+ nameLenLen := writeVarint(nameLen[:], len(n))
+ tagLenLen := writeVarint(tagLen[:], len(tag))
+
+ var bits byte
+ l := 1 + nameLenLen + len(n)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += tagLenLen + len(tag)
+ bits |= 1 << 1
+ }
+ if embedded {
+ bits |= 1 << 3
+ }
+
+ b := make([]byte, l)
+ b[0] = bits
+ copy(b[1:], nameLen[:nameLenLen])
+ copy(b[1+nameLenLen:], n)
+ if len(tag) > 0 {
+ tb := b[1+nameLenLen+len(n):]
+ copy(tb, tagLen[:tagLenLen])
+ copy(tb[tagLenLen:], tag)
+ }
+
+ return name{bytes: &b[0]}
+}
+
+/*
+ * The compiler knows the exact layout of all the data structures above.
+ * The compiler does not know about the data structures and methods below.
+ */
+
+// Method represents a single method.
+type Method struct {
+ // Name is the method name.
+ Name string
+
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // method name. It is empty for upper case (exported) method names.
+ // The combination of PkgPath and Name uniquely identifies a method
+ // in a method set.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ PkgPath string
+
+ Type Type // method type
+ Func Value // func with receiver as first argument
+ Index int // index for Type.Method
+}
+
+// IsExported reports whether the method is exported.
+func (m Method) IsExported() bool {
+ return m.PkgPath == ""
+}
+
+const (
+ kindDirectIface = 1 << 5
+ kindGCProg = 1 << 6 // Type.gc points to GC program
+ kindMask = (1 << 5) - 1
+)
+
+// String returns the name of k.
+func (k Kind) String() string {
+ if uint(k) < uint(len(kindNames)) {
+ return kindNames[uint(k)]
+ }
+ return "kind" + strconv.Itoa(int(k))
+}
+
+var kindNames = []string{
+ Invalid: "invalid",
+ Bool: "bool",
+ Int: "int",
+ Int8: "int8",
+ Int16: "int16",
+ Int32: "int32",
+ Int64: "int64",
+ Uint: "uint",
+ Uint8: "uint8",
+ Uint16: "uint16",
+ Uint32: "uint32",
+ Uint64: "uint64",
+ Uintptr: "uintptr",
+ Float32: "float32",
+ Float64: "float64",
+ Complex64: "complex64",
+ Complex128: "complex128",
+ Array: "array",
+ Chan: "chan",
+ Func: "func",
+ Interface: "interface",
+ Map: "map",
+ Pointer: "ptr",
+ Slice: "slice",
+ String: "string",
+ Struct: "struct",
+ UnsafePointer: "unsafe.Pointer",
+}
+
+func (t *uncommonType) methods() []method {
+ if t.mcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
+}
+
+func (t *uncommonType) exportedMethods() []method {
+ if t.xcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
+}
+
+// resolveNameOff resolves a name offset from a base pointer.
+// The (*rtype).nameOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTextOff resolves a function pointer offset from a base type.
+// The (*rtype).textOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// addReflectOff adds a pointer to the reflection lookup map in the runtime.
+// It returns a new ID that can be used as a typeOff or textOff, and will
+// be resolved correctly. Implemented in the runtime package.
+func addReflectOff(ptr unsafe.Pointer) int32
+
+// resolveReflectName adds a name to the reflection lookup map in the runtime.
+// It returns a new nameOff that can be used to refer to the pointer.
+func resolveReflectName(n name) nameOff {
+ return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
+}
+
+// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
+// It returns a new typeOff that can be used to refer to the pointer.
+func resolveReflectType(t *rtype) typeOff {
+ return typeOff(addReflectOff(unsafe.Pointer(t)))
+}
+
+// resolveReflectText adds a function pointer to the reflection lookup map in
+// the runtime. It returns a new textOff that can be used to refer to the
+// pointer.
+func resolveReflectText(ptr unsafe.Pointer) textOff {
+ return textOff(addReflectOff(ptr))
+}
+
+type nameOff int32 // offset to a name
+type typeOff int32 // offset to an *rtype
+type textOff int32 // offset from top of text section
+
+func (t *rtype) nameOff(off nameOff) name {
+ return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
+}
+
+func (t *rtype) typeOff(off typeOff) *rtype {
+ return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+}
+
+func (t *rtype) textOff(off textOff) unsafe.Pointer {
+ return resolveTextOff(unsafe.Pointer(t), int32(off))
+}
+
+func (t *rtype) uncommon() *uncommonType {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.Kind() {
+ case Struct:
+ return &(*structTypeUncommon)(unsafe.Pointer(t)).u
+ case Pointer:
+ type u struct {
+ ptrType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Func:
+ type u struct {
+ funcType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Slice:
+ type u struct {
+ sliceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Array:
+ type u struct {
+ arrayType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Chan:
+ type u struct {
+ chanType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Map:
+ type u struct {
+ mapType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Interface:
+ type u struct {
+ interfaceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ rtype
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
+func (t *rtype) String() string {
+ s := t.nameOff(t.str).name()
+ if t.tflag&tflagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t *rtype) Size() uintptr { return t.size }
+
+func (t *rtype) Bits() int {
+ if t == nil {
+ panic("reflect: Bits of nil Type")
+ }
+ k := t.Kind()
+ if k < Int || k > Complex128 {
+ panic("reflect: Bits of non-arithmetic Type " + t.String())
+ }
+ return int(t.size) * 8
+}
+
+func (t *rtype) Align() int { return int(t.align) }
+
+func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
+
+func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
+
+func (t *rtype) pointers() bool { return t.ptrdata != 0 }
+
+func (t *rtype) common() *rtype { return t }
+
+func (t *rtype) exportedMethods() []method {
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
+ }
+ return ut.exportedMethods()
+}
+
+func (t *rtype) NumMethod() int {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.NumMethod()
+ }
+ return len(t.exportedMethods())
+}
+
+func (t *rtype) Method(i int) (m Method) {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.Method(i)
+ }
+ methods := t.exportedMethods()
+ if i < 0 || i >= len(methods) {
+ panic("reflect: Method index out of range")
+ }
+ p := methods[i]
+ pname := t.nameOff(p.name)
+ m.Name = pname.name()
+ fl := flag(Func)
+ mtyp := t.typeOff(p.mtyp)
+ ft := (*funcType)(unsafe.Pointer(mtyp))
+ in := make([]Type, 0, 1+len(ft.in()))
+ in = append(in, t)
+ for _, arg := range ft.in() {
+ in = append(in, arg)
+ }
+ out := make([]Type, 0, len(ft.out()))
+ for _, ret := range ft.out() {
+ out = append(out, ret)
+ }
+ mt := FuncOf(in, out, ft.IsVariadic())
+ m.Type = mt
+ tfn := t.textOff(p.tfn)
+ fn := unsafe.Pointer(&tfn)
+ m.Func = Value{mt.(*rtype), fn, fl}
+
+ m.Index = i
+ return m
+}
+
+func (t *rtype) MethodByName(name string) (m Method, ok bool) {
+ if t.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(t))
+ return tt.MethodByName(name)
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return Method{}, false
+ }
+ // TODO(mdempsky): Binary search.
+ for i, p := range ut.exportedMethods() {
+ if t.nameOff(p.name).name() == name {
+ return t.Method(i), true
+ }
+ }
+ return Method{}, false
+}
+
+func (t *rtype) PkgPath() string {
+ if t.tflag&tflagNamed == 0 {
+ return ""
+ }
+ ut := t.uncommon()
+ if ut == nil {
+ return ""
+ }
+ return t.nameOff(ut.pkgPath).name()
+}
+
+func (t *rtype) hasName() bool {
+ return t.tflag&tflagNamed != 0
+}
+
+func (t *rtype) Name() string {
+ if !t.hasName() {
+ return ""
+ }
+ s := t.String()
+ i := len(s) - 1
+ sqBrackets := 0
+ for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
+ switch s[i] {
+ case ']':
+ sqBrackets++
+ case '[':
+ sqBrackets--
+ }
+ i--
+ }
+ return s[i+1:]
+}
+
+func (t *rtype) ChanDir() ChanDir {
+ if t.Kind() != Chan {
+ panic("reflect: ChanDir of non-chan type " + t.String())
+ }
+ tt := (*chanType)(unsafe.Pointer(t))
+ return ChanDir(tt.dir)
+}
+
+func (t *rtype) IsVariadic() bool {
+ if t.Kind() != Func {
+ panic("reflect: IsVariadic of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return tt.outCount&(1<<15) != 0
+}
+
+func (t *rtype) Elem() Type {
+ switch t.Kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Chan:
+ tt := (*chanType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Map:
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Pointer:
+ tt := (*ptrType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ case Slice:
+ tt := (*sliceType)(unsafe.Pointer(t))
+ return toType(tt.elem)
+ }
+ panic("reflect: Elem of invalid type " + t.String())
+}
+
+func (t *rtype) Field(i int) StructField {
+ if t.Kind() != Struct {
+ panic("reflect: Field of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.Field(i)
+}
+
+func (t *rtype) FieldByIndex(index []int) StructField {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByIndex of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByIndex(index)
+}
+
+func (t *rtype) FieldByName(name string) (StructField, bool) {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByName of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByName(name)
+}
+
+func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
+ if t.Kind() != Struct {
+ panic("reflect: FieldByNameFunc of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return tt.FieldByNameFunc(match)
+}
+
+func (t *rtype) In(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: In of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.in()[i])
+}
+
+func (t *rtype) Key() Type {
+ if t.Kind() != Map {
+ panic("reflect: Key of non-map type " + t.String())
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.key)
+}
+
+func (t *rtype) Len() int {
+ if t.Kind() != Array {
+ panic("reflect: Len of non-array type " + t.String())
+ }
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return int(tt.len)
+}
+
+func (t *rtype) NumField() int {
+ if t.Kind() != Struct {
+ panic("reflect: NumField of non-struct type " + t.String())
+ }
+ tt := (*structType)(unsafe.Pointer(t))
+ return len(tt.fields)
+}
+
+func (t *rtype) NumIn() int {
+ if t.Kind() != Func {
+ panic("reflect: NumIn of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return int(tt.inCount)
+}
+
+func (t *rtype) NumOut() int {
+ if t.Kind() != Func {
+ panic("reflect: NumOut of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return len(tt.out())
+}
+
+func (t *rtype) Out(i int) Type {
+ if t.Kind() != Func {
+ panic("reflect: Out of non-func type " + t.String())
+ }
+ tt := (*funcType)(unsafe.Pointer(t))
+ return toType(tt.out()[i])
+}
+
+func (t *funcType) in() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ if t.inCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
+}
+
+func (t *funcType) out() []*rtype {
+ uadd := unsafe.Sizeof(*t)
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ if outCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+func (d ChanDir) String() string {
+ switch d {
+ case SendDir:
+ return "chan<-"
+ case RecvDir:
+ return "<-chan"
+ case BothDir:
+ return "chan"
+ }
+ return "ChanDir" + strconv.Itoa(int(d))
+}
+
+// Method returns the i'th method in the type's method set.
+func (t *interfaceType) Method(i int) (m Method) {
+ if i < 0 || i >= len(t.methods) {
+ return
+ }
+ p := &t.methods[i]
+ pname := t.nameOff(p.name)
+ m.Name = pname.name()
+ if !pname.isExported() {
+ m.PkgPath = pname.pkgPath()
+ if m.PkgPath == "" {
+ m.PkgPath = t.pkgPath.name()
+ }
+ }
+ m.Type = toType(t.typeOff(p.typ))
+ m.Index = i
+ return
+}
+
+// NumMethod returns the number of interface methods in the type's method set.
+func (t *interfaceType) NumMethod() int { return len(t.methods) }
+
+// MethodByName method with the given name in the type's method set.
+func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
+ if t == nil {
+ return
+ }
+ var p *imethod
+ for i := range t.methods {
+ p = &t.methods[i]
+ if t.nameOff(p.name).name() == name {
+ return t.Method(i), true
+ }
+ }
+ return
+}
+
+// A StructField describes a single field in a struct.
+type StructField struct {
+ // Name is the field name.
+ Name string
+
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // field name. It is empty for upper case (exported) field names.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ PkgPath string
+
+ Type Type // field type
+ Tag StructTag // field tag string
+ Offset uintptr // offset within struct, in bytes
+ Index []int // index sequence for Type.FieldByIndex
+ Anonymous bool // is an embedded field
+}
+
+// IsExported reports whether the field is exported.
+func (f StructField) IsExported() bool {
+ return f.PkgPath == ""
+}
+
+// A StructTag is the tag string in a struct field.
+//
+// By convention, tag strings are a concatenation of
+// optionally space-separated key:"value" pairs.
+// Each key is a non-empty string consisting of non-control
+// characters other than space (U+0020 ' '), quote (U+0022 '"'),
+// and colon (U+003A ':'). Each value is quoted using U+0022 '"'
+// characters and Go string literal syntax.
+type StructTag string
+
+// Get returns the value associated with key in the tag string.
+// If there is no such key in the tag, Get returns the empty string.
+// If the tag does not have the conventional format, the value
+// returned by Get is unspecified. To determine whether a tag is
+// explicitly set to the empty string, use Lookup.
+func (tag StructTag) Get(key string) string {
+ v, _ := tag.Lookup(key)
+ return v
+}
+
+// Lookup returns the value associated with key in the tag string.
+// If the key is present in the tag the value (which may be empty)
+// is returned. Otherwise the returned value will be the empty string.
+// The ok return value reports whether the value was explicitly set in
+// the tag string. If the tag does not have the conventional format,
+// the value returned by Lookup is unspecified.
+func (tag StructTag) Lookup(key string) (value string, ok bool) {
+ // When modifying this code, also update the validateStructTag code
+ // in cmd/vet/structtag.go.
+
+ for tag != "" {
+ // Skip leading space.
+ i := 0
+ for i < len(tag) && tag[i] == ' ' {
+ i++
+ }
+ tag = tag[i:]
+ if tag == "" {
+ break
+ }
+
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
+ i = 0
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
+ i++
+ }
+ if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
+ break
+ }
+ name := string(tag[:i])
+ tag = tag[i+1:]
+
+ // Scan quoted string to find value.
+ i = 1
+ for i < len(tag) && tag[i] != '"' {
+ if tag[i] == '\\' {
+ i++
+ }
+ i++
+ }
+ if i >= len(tag) {
+ break
+ }
+ qvalue := string(tag[:i+1])
+ tag = tag[i+1:]
+
+ if key == name {
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ break
+ }
+ return value, true
+ }
+ }
+ return "", false
+}
+
+// Field returns the i'th struct field.
+func (t *structType) Field(i int) (f StructField) {
+ if i < 0 || i >= len(t.fields) {
+ panic("reflect: Field index out of bounds")
+ }
+ p := &t.fields[i]
+ f.Type = toType(p.typ)
+ f.Name = p.name.name()
+ f.Anonymous = p.embedded()
+ if !p.name.isExported() {
+ f.PkgPath = t.pkgPath.name()
+ }
+ if tag := p.name.tag(); tag != "" {
+ f.Tag = StructTag(tag)
+ }
+ f.Offset = p.offset
+
+ // NOTE(rsc): This is the only allocation in the interface
+ // presented by a reflect.Type. It would be nice to avoid,
+ // at least in the common cases, but we need to make sure
+ // that misbehaving clients of reflect cannot affect other
+ // uses of reflect. One possibility is CL 5371098, but we
+ // postponed that ugliness until there is a demonstrated
+ // need for the performance. This is issue 2320.
+ f.Index = []int{i}
+ return
+}
+
+// TODO(gri): Should there be an error/bool indicator if the index
+// is wrong for FieldByIndex?
+
+// FieldByIndex returns the nested field corresponding to index.
+func (t *structType) FieldByIndex(index []int) (f StructField) {
+ f.Type = toType(&t.rtype)
+ for i, x := range index {
+ if i > 0 {
+ ft := f.Type
+ if ft.Kind() == Pointer && ft.Elem().Kind() == Struct {
+ ft = ft.Elem()
+ }
+ f.Type = ft
+ }
+ f = f.Type.Field(x)
+ }
+ return
+}
+
+// A fieldScan represents an item on the fieldByNameFunc scan work list.
+type fieldScan struct {
+ typ *structType
+ index []int
+}
+
+// FieldByNameFunc returns the struct field with a name that satisfies the
+// match function and a boolean to indicate if the field was found.
+func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
+ // This uses the same condition that the Go language does: there must be a unique instance
+ // of the match at a given depth level. If there are multiple instances of a match at the
+ // same depth, they annihilate each other and inhibit any possible match at a lower level.
+ // The algorithm is breadth first search, one depth level at a time.
+
+ // The current and next slices are work queues:
+ // current lists the fields to visit on this depth level,
+ // and next lists the fields on the next lower level.
+ current := []fieldScan{}
+ next := []fieldScan{{typ: t}}
+
+ // nextCount records the number of times an embedded type has been
+ // encountered and considered for queueing in the 'next' slice.
+ // We only queue the first one, but we increment the count on each.
+ // If a struct type T can be reached more than once at a given depth level,
+ // then it annihilates itself and need not be considered at all when we
+ // process that next depth level.
+ var nextCount map[*structType]int
+
+ // visited records the structs that have been considered already.
+ // Embedded pointer fields can create cycles in the graph of
+ // reachable embedded types; visited avoids following those cycles.
+ // It also avoids duplicated effort: if we didn't find the field in an
+ // embedded type T at level 2, we won't find it in one at level 4 either.
+ visited := map[*structType]bool{}
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count := nextCount
+ nextCount = nil
+
+ // Process all the fields at this depth, now listed in 'current'.
+ // The loop queues embedded fields found in 'next', for processing during the next
+ // iteration. The multiplicity of the 'current' field counts is recorded
+ // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
+ for _, scan := range current {
+ t := scan.typ
+ if visited[t] {
+ // We've looked through this type before, at a higher level.
+ // That higher level would shadow the lower level we're now at,
+ // so this one can't be useful to us. Ignore it.
+ continue
+ }
+ visited[t] = true
+ for i := range t.fields {
+ f := &t.fields[i]
+ // Find name and (for embedded field) type for field f.
+ fname := f.name.name()
+ var ntyp *rtype
+ if f.embedded() {
+ // Embedded field of type T or *T.
+ ntyp = f.typ
+ if ntyp.Kind() == Pointer {
+ ntyp = ntyp.Elem().common()
+ }
+ }
+
+ // Does it match?
+ if match(fname) {
+ // Potential match
+ if count[t] > 1 || ok {
+ // Name appeared multiple times at this level: annihilate.
+ return StructField{}, false
+ }
+ result = t.Field(i)
+ result.Index = nil
+ result.Index = append(result.Index, scan.index...)
+ result.Index = append(result.Index, i)
+ ok = true
+ continue
+ }
+
+ // Queue embedded struct fields for processing with next level,
+ // but only if we haven't seen a match yet at this level and only
+ // if the embedded types haven't already been queued.
+ if ok || ntyp == nil || ntyp.Kind() != Struct {
+ continue
+ }
+ styp := (*structType)(unsafe.Pointer(ntyp))
+ if nextCount[styp] > 0 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ continue
+ }
+ if nextCount == nil {
+ nextCount = map[*structType]int{}
+ }
+ nextCount[styp] = 1
+ if count[t] > 1 {
+ nextCount[styp] = 2 // exact multiple doesn't matter
+ }
+ var index []int
+ index = append(index, scan.index...)
+ index = append(index, i)
+ next = append(next, fieldScan{styp, index})
+ }
+ }
+ if ok {
+ break
+ }
+ }
+ return
+}
+
+// FieldByName returns the struct field with the given name
+// and a boolean to indicate if the field was found.
+func (t *structType) FieldByName(name string) (f StructField, present bool) {
+ // Quick check for top-level name, or struct without embedded fields.
+ hasEmbeds := false
+ if name != "" {
+ for i := range t.fields {
+ tf := &t.fields[i]
+ if tf.name.name() == name {
+ return t.Field(i), true
+ }
+ if tf.embedded() {
+ hasEmbeds = true
+ }
+ }
+ }
+ if !hasEmbeds {
+ return
+ }
+ return t.FieldByNameFunc(func(s string) bool { return s == name })
+}
+
+// TypeOf returns the reflection Type that represents the dynamic type of i.
+// If i is a nil interface value, TypeOf returns nil.
+func TypeOf(i any) Type {
+ eface := *(*emptyInterface)(unsafe.Pointer(&i))
+ return toType(eface.typ)
+}
+
+// ptrMap is the cache for PointerTo.
+var ptrMap sync.Map // map[*rtype]*ptrType
+
+// PtrTo returns the pointer type with element t.
+// For example, if t represents type Foo, PtrTo(t) represents *Foo.
+//
+// PtrTo is the old spelling of PointerTo.
+// The two functions behave identically.
+func PtrTo(t Type) Type { return PointerTo(t) }
+
+// PointerTo returns the pointer type with element t.
+// For example, if t represents type Foo, PointerTo(t) represents *Foo.
+func PointerTo(t Type) Type {
+ return t.(*rtype).ptrTo()
+}
+
+func (t *rtype) ptrTo() *rtype {
+ if t.ptrToThis != 0 {
+ return t.typeOff(t.ptrToThis)
+ }
+
+ // Check the cache.
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
+ }
+
+ // Look in known types.
+ s := "*" + t.String()
+ for _, tt := range typesByString(s) {
+ p := (*ptrType)(unsafe.Pointer(tt))
+ if p.elem != t {
+ continue
+ }
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
+ }
+
+ // Create a new ptrType starting with the description
+ // of an *unsafe.Pointer.
+ var iptr any = (*unsafe.Pointer)(nil)
+ prototype := *(**ptrType)(unsafe.Pointer(&iptr))
+ pp := *prototype
+
+ pp.str = resolveReflectName(newName(s, "", false, false))
+ pp.ptrToThis = 0
+
+ // For the type structures linked into the binary, the
+ // compiler provides a good hash of the string.
+ // Create a good hash for the new string by using
+ // the FNV-1 hash's mixing function to combine the
+ // old hash and the new "*".
+ pp.hash = fnv1(t.hash, '*')
+
+ pp.elem = t
+
+ pi, _ := ptrMap.LoadOrStore(t, &pp)
+ return &pi.(*ptrType).rtype
+}
+
+// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
+func fnv1(x uint32, list ...byte) uint32 {
+ for _, b := range list {
+ x = x*16777619 ^ uint32(b)
+ }
+ return x
+}
+
+func (t *rtype) Implements(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.Implements")
+ }
+ if u.Kind() != Interface {
+ panic("reflect: non-interface type passed to Type.Implements")
+ }
+ return implements(u.(*rtype), t)
+}
+
+func (t *rtype) AssignableTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.AssignableTo")
+ }
+ uu := u.(*rtype)
+ return directlyAssignable(uu, t) || implements(uu, t)
+}
+
+func (t *rtype) ConvertibleTo(u Type) bool {
+ if u == nil {
+ panic("reflect: nil type passed to Type.ConvertibleTo")
+ }
+ uu := u.(*rtype)
+ return convertOp(uu, t) != nil
+}
+
+func (t *rtype) Comparable() bool {
+ return t.equal != nil
+}
+
+// implements reports whether the type V implements the interface type T.
+func implements(T, V *rtype) bool {
+ if T.Kind() != Interface {
+ return false
+ }
+ t := (*interfaceType)(unsafe.Pointer(T))
+ if len(t.methods) == 0 {
+ return true
+ }
+
+ // The same algorithm applies in both cases, but the
+ // method tables for an interface type and a concrete type
+ // are different, so the code is duplicated.
+ // In both cases the algorithm is a linear scan over the two
+ // lists - T's methods and V's methods - simultaneously.
+ // Since method tables are stored in a unique sorted order
+ // (alphabetical, with no duplicate method names), the scan
+ // through V's methods must hit a match for each of T's
+ // methods along the way, or else V does not implement T.
+ // This lets us run the scan in overall linear time instead of
+ // the quadratic time a naive search would require.
+ // See also ../runtime/iface.go.
+ if V.Kind() == Interface {
+ v := (*interfaceType)(unsafe.Pointer(V))
+ i := 0
+ for j := 0; j < len(v.methods); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := &v.methods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = v.pkgPath.name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ v := V.uncommon()
+ if v == nil {
+ return false
+ }
+ i := 0
+ vmethods := v.methods()
+ for j := 0; j < int(v.mcount); j++ {
+ tm := &t.methods[i]
+ tmName := t.nameOff(tm.name)
+ vm := vmethods[j]
+ vmName := V.nameOff(vm.name)
+ if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
+ if !tmName.isExported() {
+ tmPkgPath := tmName.pkgPath()
+ if tmPkgPath == "" {
+ tmPkgPath = t.pkgPath.name()
+ }
+ vmPkgPath := vmName.pkgPath()
+ if vmPkgPath == "" {
+ vmPkgPath = V.nameOff(v.pkgPath).name()
+ }
+ if tmPkgPath != vmPkgPath {
+ continue
+ }
+ }
+ if i++; i >= len(t.methods) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// specialChannelAssignability reports whether a value x of channel type V
+// can be directly assigned (using memmove) to another channel type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// T and V must be both of Chan kind.
+func specialChannelAssignability(T, V *rtype) bool {
+ // Special case:
+ // x is a bidirectional channel value, T is a channel type,
+ // x's type V and T have identical element types,
+ // and at least one of V or T is not a defined type.
+ return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
+}
+
+// directlyAssignable reports whether a value x of type V can be directly
+// assigned (using memmove) to a value of type T.
+// https://golang.org/doc/go_spec.html#Assignability
+// Ignoring the interface rules (implemented elsewhere)
+// and the ideal constant rules (no ideal constants at run time).
+func directlyAssignable(T, V *rtype) bool {
+ // x's type V is identical to T?
+ if T == V {
+ return true
+ }
+
+ // Otherwise at least one of T and V must not be defined
+ // and they must have the same kind.
+ if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
+ return false
+ }
+
+ if T.Kind() == Chan && specialChannelAssignability(T, V) {
+ return true
+ }
+
+ // x's type T and V must have identical underlying types.
+ return haveIdenticalUnderlyingType(T, V, true)
+}
+
+func haveIdenticalType(T, V Type, cmpTags bool) bool {
+ if cmpTags {
+ return T == V
+ }
+
+ if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
+ return false
+ }
+
+ return haveIdenticalUnderlyingType(T.common(), V.common(), false)
+}
+
+func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
+ if T == V {
+ return true
+ }
+
+ kind := T.Kind()
+ if kind != V.Kind() {
+ return false
+ }
+
+ // Non-composite types of equal kind have same underlying type
+ // (the predefined instance of the type).
+ if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
+ return true
+ }
+
+ // Composite types.
+ switch kind {
+ case Array:
+ return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Chan:
+ return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Func:
+ t := (*funcType)(unsafe.Pointer(T))
+ v := (*funcType)(unsafe.Pointer(V))
+ if t.outCount != v.outCount || t.inCount != v.inCount {
+ return false
+ }
+ for i := 0; i < t.NumIn(); i++ {
+ if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
+ return false
+ }
+ }
+ for i := 0; i < t.NumOut(); i++ {
+ if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
+ return false
+ }
+ }
+ return true
+
+ case Interface:
+ t := (*interfaceType)(unsafe.Pointer(T))
+ v := (*interfaceType)(unsafe.Pointer(V))
+ if len(t.methods) == 0 && len(v.methods) == 0 {
+ return true
+ }
+ // Might have the same methods but still
+ // need a run time conversion.
+ return false
+
+ case Map:
+ return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Pointer, Slice:
+ return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
+
+ case Struct:
+ t := (*structType)(unsafe.Pointer(T))
+ v := (*structType)(unsafe.Pointer(V))
+ if len(t.fields) != len(v.fields) {
+ return false
+ }
+ if t.pkgPath.name() != v.pkgPath.name() {
+ return false
+ }
+ for i := range t.fields {
+ tf := &t.fields[i]
+ vf := &v.fields[i]
+ if tf.name.name() != vf.name.name() {
+ return false
+ }
+ if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
+ return false
+ }
+ if cmpTags && tf.name.tag() != vf.name.tag() {
+ return false
+ }
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.embedded() != vf.embedded() {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// typelinks is implemented in package runtime.
+// It returns a slice of the sections in each module,
+// and a slice of *rtype offsets in each module.
+//
+// The types in each module are sorted by string. That is, the first
+// two linked types of the first module are:
+//
+// d0 := sections[0]
+// t1 := (*rtype)(add(d0, offset[0][0]))
+// t2 := (*rtype)(add(d0, offset[0][1]))
+//
+// and
+//
+// t1.String() < t2.String()
+//
+// Note that strings are not unique identifiers for types:
+// there can be more than one with a given string.
+// Only types we might want to look up are included:
+// pointers, channels, maps, slices, and arrays.
+func typelinks() (sections []unsafe.Pointer, offset [][]int32)
+
+func rtypeOff(section unsafe.Pointer, off int32) *rtype {
+ return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
+}
+
+// typesByString returns the subslice of typelinks() whose elements have
+// the given string representation.
+// It may be empty (no known types with that string) or may have
+// multiple elements (multiple types with that string).
+func typesByString(s string) []*rtype {
+ sections, offset := typelinks()
+ var ret []*rtype
+
+ for offsI, offs := range offset {
+ section := sections[offsI]
+
+ // We are looking for the first index i where the string becomes >= s.
+ // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
+ i, j := 0, len(offs)
+ for i < j {
+ h := i + (j-i)>>1 // avoid overflow when computing h
+ // i ≤ h < j
+ if !(rtypeOff(section, offs[h]).String() >= s) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+
+ // Having found the first, linear scan forward to find the last.
+ // We could do a second binary search, but the caller is going
+ // to do a linear scan anyway.
+ for j := i; j < len(offs); j++ {
+ typ := rtypeOff(section, offs[j])
+ if typ.String() != s {
+ break
+ }
+ ret = append(ret, typ)
+ }
+ }
+ return ret
+}
+
+// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
+var lookupCache sync.Map // map[cacheKey]*rtype
+
+// A cacheKey is the key for use in the lookupCache.
+// Four values describe any of the types we are looking for:
+// type kind, one or two subtypes, and an extra integer.
+type cacheKey struct {
+ kind Kind
+ t1 *rtype
+ t2 *rtype
+ extra uintptr
+}
+
+// The funcLookupCache caches FuncOf lookups.
+// FuncOf does not share the common lookupCache since cacheKey is not
+// sufficient to represent functions unambiguously.
+var funcLookupCache struct {
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+// ChanOf returns the channel type with the given direction and element type.
+// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
+//
+// The gc runtime imposes a limit of 64 kB on channel element types.
+// If t's size is equal to or exceeds this limit, ChanOf panics.
+func ChanOf(dir ChanDir, t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
+ }
+
+ // This restriction is imposed by the gc compiler and the runtime.
+ if typ.size >= 1<<16 {
+ panic("reflect.ChanOf: element size too large")
+ }
+
+ // Look in known types.
+ var s string
+ switch dir {
+ default:
+ panic("reflect.ChanOf: invalid dir")
+ case SendDir:
+ s = "chan<- " + typ.String()
+ case RecvDir:
+ s = "<-chan " + typ.String()
+ case BothDir:
+ typeStr := typ.String()
+ if typeStr[0] == '<' {
+ // typ is recv chan, need parentheses as "<-" associates with leftmost
+ // chan possible, see:
+ // * https://golang.org/ref/spec#Channel_types
+ // * https://github.com/golang/go/issues/39897
+ s = "chan (" + typeStr + ")"
+ } else {
+ s = "chan " + typeStr
+ }
+ }
+ for _, tt := range typesByString(s) {
+ ch := (*chanType)(unsafe.Pointer(tt))
+ if ch.elem == typ && ch.dir == uintptr(dir) {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a channel type.
+ var ichan any = (chan unsafe.Pointer)(nil)
+ prototype := *(**chanType)(unsafe.Pointer(&ichan))
+ ch := *prototype
+ ch.tflag = tflagRegularMemory
+ ch.dir = uintptr(dir)
+ ch.str = resolveReflectName(newName(s, "", false, false))
+ ch.hash = fnv1(typ.hash, 'c', byte(dir))
+ ch.elem = typ
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics.
+func MapOf(key, elem Type) Type {
+ ktyp := key.(*rtype)
+ etyp := elem.(*rtype)
+
+ if ktyp.equal == nil {
+ panic("reflect.MapOf: invalid key type " + ktyp.String())
+ }
+
+ // Look in cache.
+ ckey := cacheKey{Map, ktyp, etyp, 0}
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
+ }
+
+ // Look in known types.
+ s := "map[" + ktyp.String() + "]" + etyp.String()
+ for _, tt := range typesByString(s) {
+ mt := (*mapType)(unsafe.Pointer(tt))
+ if mt.key == ktyp && mt.elem == etyp {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a map type.
+ // Note: flag values must match those used in the TMAP case
+ // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
+ var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+ mt := **(**mapType)(unsafe.Pointer(&imap))
+ mt.str = resolveReflectName(newName(s, "", false, false))
+ mt.tflag = 0
+ mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
+ mt.key = ktyp
+ mt.elem = etyp
+ mt.bucket = bucketOf(ktyp, etyp)
+ mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+ return typehash(ktyp, p, seed)
+ }
+ mt.flags = 0
+ if ktyp.size > maxKeySize {
+ mt.keysize = uint8(goarch.PtrSize)
+ mt.flags |= 1 // indirect key
+ } else {
+ mt.keysize = uint8(ktyp.size)
+ }
+ if etyp.size > maxValSize {
+ mt.valuesize = uint8(goarch.PtrSize)
+ mt.flags |= 2 // indirect value
+ } else {
+ mt.valuesize = uint8(etyp.size)
+ }
+ mt.bucketsize = uint16(mt.bucket.size)
+ if isReflexive(ktyp) {
+ mt.flags |= 4
+ }
+ if needKeyUpdate(ktyp) {
+ mt.flags |= 8
+ }
+ if hashMightPanic(ktyp) {
+ mt.flags |= 16
+ }
+ mt.ptrToThis = 0
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
+}
+
+// TODO(crawshaw): as these funcTypeFixedN structs have no methods,
+// they could be defined at runtime using the StructOf function.
+type funcTypeFixed4 struct {
+ funcType
+ args [4]*rtype
+}
+type funcTypeFixed8 struct {
+ funcType
+ args [8]*rtype
+}
+type funcTypeFixed16 struct {
+ funcType
+ args [16]*rtype
+}
+type funcTypeFixed32 struct {
+ funcType
+ args [32]*rtype
+}
+type funcTypeFixed64 struct {
+ funcType
+ args [64]*rtype
+}
+type funcTypeFixed128 struct {
+ funcType
+ args [128]*rtype
+}
+
+// FuncOf returns the function type with the given argument and result types.
+// For example if k represents int and e represents string,
+// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
+//
+// The variadic argument controls whether the function is variadic. FuncOf
+// panics if the in[len(in)-1] does not represent a slice and variadic is
+// true.
+func FuncOf(in, out []Type, variadic bool) Type {
+ if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
+ panic("reflect.FuncOf: last arg of variadic func must be slice")
+ }
+
+ // Make a func type.
+ var ifunc any = (func())(nil)
+ prototype := *(**funcType)(unsafe.Pointer(&ifunc))
+ n := len(in) + len(out)
+
+ var ft *funcType
+ var args []*rtype
+ switch {
+ case n <= 4:
+ fixed := new(funcTypeFixed4)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 8:
+ fixed := new(funcTypeFixed8)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 16:
+ fixed := new(funcTypeFixed16)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 32:
+ fixed := new(funcTypeFixed32)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 64:
+ fixed := new(funcTypeFixed64)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 128:
+ fixed := new(funcTypeFixed128)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ default:
+ panic("reflect.FuncOf: too many arguments")
+ }
+ *ft = *prototype
+
+ // Build a hash and minimally populate ft.
+ var hash uint32
+ for _, in := range in {
+ t := in.(*rtype)
+ args = append(args, t)
+ hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
+ }
+ if variadic {
+ hash = fnv1(hash, 'v')
+ }
+ hash = fnv1(hash, '.')
+ for _, out := range out {
+ t := out.(*rtype)
+ args = append(args, t)
+ hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
+ }
+ if len(args) > 50 {
+ panic("reflect.FuncOf does not support more than 50 arguments")
+ }
+ ft.tflag = 0
+ ft.hash = hash
+ ft.inCount = uint16(len(in))
+ ft.outCount = uint16(len(out))
+ if variadic {
+ ft.outCount |= 1 << 15
+ }
+
+ // Look in cache.
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ // Not in cache, lock and retry.
+ funcLookupCache.Lock()
+ defer funcLookupCache.Unlock()
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
+ }
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
+ }
+
+ // Look in known types for the same string representation.
+ str := funcStr(ft)
+ for _, tt := range typesByString(str) {
+ if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
+
+ // Populate the remaining fields of ft and store in cache.
+ ft.str = resolveReflectName(newName(str, "", false, false))
+ ft.ptrToThis = 0
+ return addToCache(&ft.rtype)
+}
+
+// funcStr builds a string representation of a funcType.
+func funcStr(ft *funcType) string {
+ repr := make([]byte, 0, 64)
+ repr = append(repr, "func("...)
+ for i, t := range ft.in() {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ if ft.IsVariadic() && i == int(ft.inCount)-1 {
+ repr = append(repr, "..."...)
+ repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
+ } else {
+ repr = append(repr, t.String()...)
+ }
+ }
+ repr = append(repr, ')')
+ out := ft.out()
+ if len(out) == 1 {
+ repr = append(repr, ' ')
+ } else if len(out) > 1 {
+ repr = append(repr, " ("...)
+ }
+ for i, t := range out {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ repr = append(repr, t.String()...)
+ }
+ if len(out) > 1 {
+ repr = append(repr, ')')
+ }
+ return string(repr)
+}
+
+// isReflexive reports whether the == operation on the type is reflexive.
+// That is, x == x for all values x of type t.
+func isReflexive(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
+ return true
+ case Float32, Float64, Complex64, Complex128, Interface:
+ return false
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return isReflexive(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if !isReflexive(f.typ) {
+ return false
+ }
+ }
+ return true
+ default:
+ // Func, Map, Slice, Invalid
+ panic("isReflexive called on non-key type " + t.String())
+ }
+}
+
+// needKeyUpdate reports whether map overwrites require the key to be copied.
+func needKeyUpdate(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
+ return false
+ case Float32, Float64, Complex64, Complex128, Interface, String:
+ // Float keys can be updated from +0 to -0.
+ // String keys can be updated to use a smaller backing store.
+ // Interfaces might have floats of strings in them.
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return needKeyUpdate(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if needKeyUpdate(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ // Func, Map, Slice, Invalid
+ panic("needKeyUpdate called on non-key type " + t.String())
+ }
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *rtype) bool {
+ switch t.Kind() {
+ case Interface:
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return hashMightPanic(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if hashMightPanic(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+}
+
+// Make sure these routines stay in sync with ../runtime/map.go!
+// These types exist only for GC, so we only fill out GC relevant info.
+// Currently, that's just size and the GC program. We also fill in string
+// for possible debugging use.
+const (
+ bucketSize uintptr = 8
+ maxKeySize uintptr = 128
+ maxValSize uintptr = 128
+)
+
+func bucketOf(ktyp, etyp *rtype) *rtype {
+ if ktyp.size > maxKeySize {
+ ktyp = PointerTo(ktyp).(*rtype)
+ }
+ if etyp.size > maxValSize {
+ etyp = PointerTo(etyp).(*rtype)
+ }
+
+ // Prepare GC data if any.
+ // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
+ // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
+ // Note that since the key and value are known to be <= 128 bytes,
+ // they're guaranteed to have bitmaps instead of GC programs.
+ var gcdata *byte
+ var ptrdata uintptr
+
+ size := bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize
+ if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
+ panic("reflect: bad size computation in MapOf")
+ }
+
+ if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
+ nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize
+ mask := make([]byte, (nptr+7)/8)
+ base := bucketSize / goarch.PtrSize
+
+ if ktyp.ptrdata != 0 {
+ emitGCMask(mask, base, ktyp, bucketSize)
+ }
+ base += bucketSize * ktyp.size / goarch.PtrSize
+
+ if etyp.ptrdata != 0 {
+ emitGCMask(mask, base, etyp, bucketSize)
+ }
+ base += bucketSize * etyp.size / goarch.PtrSize
+
+ word := base
+ mask[word/8] |= 1 << (word % 8)
+ gcdata = &mask[0]
+ ptrdata = (word + 1) * goarch.PtrSize
+
+ // overflow word must be last
+ if ptrdata != size {
+ panic("reflect: bad layout computation in MapOf")
+ }
+ }
+
+ b := &rtype{
+ align: goarch.PtrSize,
+ size: size,
+ kind: uint8(Struct),
+ ptrdata: ptrdata,
+ gcdata: gcdata,
+ }
+ s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
+ b.str = resolveReflectName(newName(s, "", false, false))
+ return b
+}
+
+func (t *rtype) gcSlice(begin, end uintptr) []byte {
+ return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
+}
+
+// emitGCMask writes the GC mask for [n]typ into out, starting at bit
+// offset base.
+func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
+ if typ.kind&kindGCProg != 0 {
+ panic("reflect: unexpected GC program")
+ }
+ ptrs := typ.ptrdata / goarch.PtrSize
+ words := typ.size / goarch.PtrSize
+ mask := typ.gcSlice(0, (ptrs+7)/8)
+ for j := uintptr(0); j < ptrs; j++ {
+ if (mask[j/8]>>(j%8))&1 != 0 {
+ for i := uintptr(0); i < n; i++ {
+ k := base + i*words + j
+ out[k/8] |= 1 << (k % 8)
+ }
+ }
+ }
+}
+
+// appendGCProg appends the GC program for the first ptrdata bytes of
+// typ to dst and returns the extended slice.
+func appendGCProg(dst []byte, typ *rtype) []byte {
+ if typ.kind&kindGCProg != 0 {
+ // Element has GC program; emit one element.
+ n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
+ prog := typ.gcSlice(4, 4+n-1)
+ return append(dst, prog...)
+ }
+
+ // Element is small with pointer mask; use as literal bits.
+ ptrs := typ.ptrdata / goarch.PtrSize
+ mask := typ.gcSlice(0, (ptrs+7)/8)
+
+ // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
+ for ; ptrs > 120; ptrs -= 120 {
+ dst = append(dst, 120)
+ dst = append(dst, mask[:15]...)
+ mask = mask[15:]
+ }
+
+ dst = append(dst, byte(ptrs))
+ dst = append(dst, mask...)
+ return dst
+}
+
+// SliceOf returns the slice type with element type t.
+// For example, if t represents int, SliceOf(t) represents []int.
+func SliceOf(t Type) Type {
+ typ := t.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Slice, typ, nil, 0}
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
+ }
+
+ // Look in known types.
+ s := "[]" + typ.String()
+ for _, tt := range typesByString(s) {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make a slice type.
+ var islice any = ([]unsafe.Pointer)(nil)
+ prototype := *(**sliceType)(unsafe.Pointer(&islice))
+ slice := *prototype
+ slice.tflag = 0
+ slice.str = resolveReflectName(newName(s, "", false, false))
+ slice.hash = fnv1(typ.hash, '[')
+ slice.elem = typ
+ slice.ptrToThis = 0
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
+}
+
+// The structLookupCache caches StructOf lookups.
+// StructOf does not share the common lookupCache since we need to pin
+// the memory associated with *structTypeFixedN.
+var structLookupCache struct {
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+type structTypeUncommon struct {
+ structType
+ u uncommonType
+}
+
+// isLetter reports whether a given 'rune' is classified as a Letter.
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
+}
+
+// StructOf returns the struct type containing fields.
+// The Offset and Index fields are ignored and computed as they would be
+// by the compiler.
+//
+// StructOf currently does not generate wrapper methods for embedded
+// fields and panics if passed unexported StructFields.
+// These limitations may be lifted in a future version.
+func StructOf(fields []StructField) Type {
+ var (
+ hash = fnv1(0, []byte("struct {")...)
+ size uintptr
+ typalign uint8
+ comparable = true
+ methods []method
+
+ fs = make([]structField, len(fields))
+ repr = make([]byte, 0, 64)
+ fset = map[string]struct{}{} // fields' names
+
+ hasGCProg = false // records whether a struct-field type has a GCProg
+ )
+
+ lastzero := uintptr(0)
+ repr = append(repr, "struct {"...)
+ pkgpath := ""
+ for i, field := range fields {
+ if field.Name == "" {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
+ }
+ if !isValidFieldName(field.Name) {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
+ }
+ if field.Type == nil {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
+ }
+ f, fpkgpath := runtimeStructField(field)
+ ft := f.typ
+ if ft.kind&kindGCProg != 0 {
+ hasGCProg = true
+ }
+ if fpkgpath != "" {
+ if pkgpath == "" {
+ pkgpath = fpkgpath
+ } else if pkgpath != fpkgpath {
+ panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
+ }
+ }
+
+ // Update string and hash
+ name := f.name.name()
+ hash = fnv1(hash, []byte(name)...)
+ repr = append(repr, (" " + name)...)
+ if f.embedded() {
+ // Embedded field
+ if f.typ.Kind() == Pointer {
+ // Embedded ** and *interface{} are illegal
+ elem := ft.Elem()
+ if k := elem.Kind(); k == Pointer || k == Interface {
+ panic("reflect.StructOf: illegal embedded field type " + ft.String())
+ }
+ }
+
+ switch f.typ.Kind() {
+ case Interface:
+ ift := (*interfaceType)(unsafe.Pointer(ft))
+ for im, m := range ift.methods {
+ if ift.nameOff(m.name).pkgPath() != "" {
+ // TODO(sbinet). Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+
+ var (
+ mtyp = ift.typeOff(m.typ)
+ ifield = i
+ imethod = im
+ ifn Value
+ tfn Value
+ )
+
+ if ft.kind&kindDirectIface != 0 {
+ tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ } else {
+ tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = in[0]
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ var args []Value
+ var recv = Indirect(in[0])
+ if len(in) > 1 {
+ args = in[1:]
+ }
+ return recv.Field(ifield).Method(imethod).Call(args)
+ })
+ }
+
+ methods = append(methods, method{
+ name: resolveReflectName(ift.nameOff(m.name)),
+ mtyp: resolveReflectType(mtyp),
+ ifn: resolveReflectText(unsafe.Pointer(&ifn)),
+ tfn: resolveReflectText(unsafe.Pointer(&tfn)),
+ })
+ }
+ case Pointer:
+ ptr := (*ptrType)(unsafe.Pointer(ft))
+ if unt := ptr.uncommon(); unt != nil {
+ if i > 0 && unt.mcount > 0 {
+ // Issue 15924.
+ panic("reflect: embedded type with methods not implemented if type is not first field")
+ }
+ if len(fields) > 1 {
+ panic("reflect: embedded type with methods not implemented if there is more than one field")
+ }
+ for _, m := range unt.methods() {
+ mname := ptr.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet).
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.textOff(m.tfn)),
+ })
+ }
+ }
+ if unt := ptr.elem.uncommon(); unt != nil {
+ for _, m := range unt.methods() {
+ mname := ptr.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet)
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
+ })
+ }
+ }
+ default:
+ if unt := ft.uncommon(); unt != nil {
+ if i > 0 && unt.mcount > 0 {
+ // Issue 15924.
+ panic("reflect: embedded type with methods not implemented if type is not first field")
+ }
+ if len(fields) > 1 && ft.kind&kindDirectIface != 0 {
+ panic("reflect: embedded type with methods not implemented for non-pointer type")
+ }
+ for _, m := range unt.methods() {
+ mname := ft.nameOff(m.name)
+ if mname.pkgPath() != "" {
+ // TODO(sbinet)
+ // Issue 15924.
+ panic("reflect: embedded interface with unexported method(s) not implemented")
+ }
+ methods = append(methods, method{
+ name: resolveReflectName(mname),
+ mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ft.textOff(m.ifn)),
+ tfn: resolveReflectText(ft.textOff(m.tfn)),
+ })
+
+ }
+ }
+ }
+ }
+ if _, dup := fset[name]; dup && name != "_" {
+ panic("reflect.StructOf: duplicate field " + name)
+ }
+ fset[name] = struct{}{}
+
+ hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
+
+ repr = append(repr, (" " + ft.String())...)
+ if f.name.hasTag() {
+ hash = fnv1(hash, []byte(f.name.tag())...)
+ repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
+ }
+ if i < len(fields)-1 {
+ repr = append(repr, ';')
+ }
+
+ comparable = comparable && (ft.equal != nil)
+
+ offset := align(size, uintptr(ft.align))
+ if offset < size {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ if ft.align > typalign {
+ typalign = ft.align
+ }
+ size = offset + ft.size
+ if size < offset {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ f.offset = offset
+
+ if ft.size == 0 {
+ lastzero = size
+ }
+
+ fs[i] = f
+ }
+
+ if size > 0 && lastzero == size {
+ // This is a non-zero sized struct that ends in a
+ // zero-sized field. We add an extra byte of padding,
+ // to ensure that taking the address of the final
+ // zero-sized field can't manufacture a pointer to the
+ // next object in the heap. See issue 9401.
+ size++
+ if size == 0 {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ }
+
+ var typ *structType
+ var ut *uncommonType
+
+ if len(methods) == 0 {
+ t := new(structTypeUncommon)
+ typ = &t.structType
+ ut = &t.u
+ } else {
+ // A *rtype representing a struct is followed directly in memory by an
+ // array of method objects representing the methods attached to the
+ // struct. To get the same layout for a run time generated type, we
+ // need an array directly following the uncommonType memory.
+ // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
+ tt := New(StructOf([]StructField{
+ {Name: "S", Type: TypeOf(structType{})},
+ {Name: "U", Type: TypeOf(uncommonType{})},
+ {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
+ }))
+
+ typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer())
+ ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer())
+
+ copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods)
+ }
+ // TODO(sbinet): Once we allow embedding multiple types,
+ // methods will need to be sorted like the compiler does.
+ // TODO(sbinet): Once we allow non-exported methods, we will
+ // need to compute xcount as the number of exported methods.
+ ut.mcount = uint16(len(methods))
+ ut.xcount = ut.mcount
+ ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
+
+ if len(fs) > 0 {
+ repr = append(repr, ' ')
+ }
+ repr = append(repr, '}')
+ hash = fnv1(hash, '}')
+ str := string(repr)
+
+ // Round the size up to be a multiple of the alignment.
+ s := align(size, uintptr(typalign))
+ if s < size {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ size = s
+
+ // Make the struct type.
+ var istruct any = struct{}{}
+ prototype := *(**structType)(unsafe.Pointer(&istruct))
+ *typ = *prototype
+ typ.fields = fs
+ if pkgpath != "" {
+ typ.pkgPath = newName(pkgpath, "", false, false)
+ }
+
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ // Not in cache, lock and retry.
+ structLookupCache.Lock()
+ defer structLookupCache.Unlock()
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
+ }
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
+ }
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
+ }
+
+ // Look in known types.
+ for _, t := range typesByString(str) {
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ // even if 't' wasn't a structType with methods, we should be ok
+ // as the 'u uncommonType' field won't be accessed except when
+ // tflag&tflagUncommon is set.
+ return addToCache(t)
+ }
+ }
+
+ typ.str = resolveReflectName(newName(str, "", false, false))
+ typ.tflag = 0 // TODO: set tflagRegularMemory
+ typ.hash = hash
+ typ.size = size
+ typ.ptrdata = typeptrdata(typ.common())
+ typ.align = typalign
+ typ.fieldAlign = typalign
+ typ.ptrToThis = 0
+ if len(methods) > 0 {
+ typ.tflag |= tflagUncommon
+ }
+
+ if hasGCProg {
+ lastPtrField := 0
+ for i, ft := range fs {
+ if ft.typ.pointers() {
+ lastPtrField = i
+ }
+ }
+ prog := []byte{0, 0, 0, 0} // will be length of prog
+ var off uintptr
+ for i, ft := range fs {
+ if i > lastPtrField {
+ // gcprog should not include anything for any field after
+ // the last field that contains pointer data
+ break
+ }
+ if !ft.typ.pointers() {
+ // Ignore pointerless fields.
+ continue
+ }
+ // Pad to start of this field with zeros.
+ if ft.offset > off {
+ n := (ft.offset - off) / goarch.PtrSize
+ prog = append(prog, 0x01, 0x00) // emit a 0 bit
+ if n > 1 {
+ prog = append(prog, 0x81) // repeat previous bit
+ prog = appendVarint(prog, n-1) // n-1 times
+ }
+ off = ft.offset
+ }
+
+ prog = appendGCProg(prog, ft.typ)
+ off += ft.typ.ptrdata
+ }
+ prog = append(prog, 0)
+ *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
+ typ.kind |= kindGCProg
+ typ.gcdata = &prog[0]
+ } else {
+ typ.kind &^= kindGCProg
+ bv := new(bitVector)
+ addTypeBits(bv, 0, typ.common())
+ if len(bv.data) > 0 {
+ typ.gcdata = &bv.data[0]
+ }
+ }
+ typ.equal = nil
+ if comparable {
+ typ.equal = func(p, q unsafe.Pointer) bool {
+ for _, ft := range typ.fields {
+ pi := add(p, ft.offset, "&x.field safe")
+ qi := add(q, ft.offset, "&x.field safe")
+ if !ft.typ.equal(pi, qi) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ switch {
+ case len(fs) == 1 && !ifaceIndir(fs[0].typ):
+ // structs of 1 direct iface type can be direct
+ typ.kind |= kindDirectIface
+ default:
+ typ.kind &^= kindDirectIface
+ }
+
+ return addToCache(&typ.rtype)
+}
+
+// runtimeStructField takes a StructField value passed to StructOf and
+// returns both the corresponding internal representation, of type
+// structField, and the pkgpath value to use for this field.
+func runtimeStructField(field StructField) (structField, string) {
+ if field.Anonymous && field.PkgPath != "" {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
+ }
+
+ if field.IsExported() {
+ // Best-effort check for misuse.
+ // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
+ }
+ }
+
+ resolveReflectType(field.Type.common()) // install in runtime
+ f := structField{
+ name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous),
+ typ: field.Type.common(),
+ offset: 0,
+ }
+ return f, field.PkgPath
+}
+
+// typeptrdata returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
+func typeptrdata(t *rtype) uintptr {
+ switch t.Kind() {
+ case Struct:
+ st := (*structType)(unsafe.Pointer(t))
+ // find the last field that has pointers.
+ field := -1
+ for i := range st.fields {
+ ft := st.fields[i].typ
+ if ft.pointers() {
+ field = i
+ }
+ }
+ if field == -1 {
+ return 0
+ }
+ f := st.fields[field]
+ return f.offset + f.typ.ptrdata
+
+ default:
+ panic("reflect.typeptrdata: unexpected type, " + t.String())
+ }
+}
+
+// See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
+const maxPtrmaskBytes = 2048
+
+// ArrayOf returns the array type with the given length and element type.
+// For example, if t represents int, ArrayOf(5, t) represents [5]int.
+//
+// If the resulting type would be larger than the available address space,
+// ArrayOf panics.
+func ArrayOf(length int, elem Type) Type {
+ if length < 0 {
+ panic("reflect: negative length passed to ArrayOf")
+ }
+
+ typ := elem.(*rtype)
+
+ // Look in cache.
+ ckey := cacheKey{Array, typ, nil, uintptr(length)}
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
+ }
+
+ // Look in known types.
+ s := "[" + strconv.Itoa(length) + "]" + typ.String()
+ for _, tt := range typesByString(s) {
+ array := (*arrayType)(unsafe.Pointer(tt))
+ if array.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
+
+ // Make an array type.
+ var iarray any = [1]unsafe.Pointer{}
+ prototype := *(**arrayType)(unsafe.Pointer(&iarray))
+ array := *prototype
+ array.tflag = typ.tflag & tflagRegularMemory
+ array.str = resolveReflectName(newName(s, "", false, false))
+ array.hash = fnv1(typ.hash, '[')
+ for n := uint32(length); n > 0; n >>= 8 {
+ array.hash = fnv1(array.hash, byte(n))
+ }
+ array.hash = fnv1(array.hash, ']')
+ array.elem = typ
+ array.ptrToThis = 0
+ if typ.size > 0 {
+ max := ^uintptr(0) / typ.size
+ if uintptr(length) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
+ }
+ array.size = typ.size * uintptr(length)
+ if length > 0 && typ.ptrdata != 0 {
+ array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata
+ }
+ array.align = typ.align
+ array.fieldAlign = typ.fieldAlign
+ array.len = uintptr(length)
+ array.slice = SliceOf(elem).(*rtype)
+
+ switch {
+ case typ.ptrdata == 0 || array.size == 0:
+ // No pointers.
+ array.gcdata = nil
+ array.ptrdata = 0
+
+ case length == 1:
+ // In memory, 1-element array looks just like the element.
+ array.kind |= typ.kind & kindGCProg
+ array.gcdata = typ.gcdata
+ array.ptrdata = typ.ptrdata
+
+ case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
+ // Element is small with pointer mask; array is still small.
+ // Create direct pointer mask by turning each 1 bit in elem
+ // into length 1 bits in larger mask.
+ mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8)
+ emitGCMask(mask, 0, typ, array.len)
+ array.gcdata = &mask[0]
+
+ default:
+ // Create program that emits one element
+ // and then repeats to make the array.
+ prog := []byte{0, 0, 0, 0} // will be length of prog
+ prog = appendGCProg(prog, typ)
+ // Pad from ptrdata to size.
+ elemPtrs := typ.ptrdata / goarch.PtrSize
+ elemWords := typ.size / goarch.PtrSize
+ if elemPtrs < elemWords {
+ // Emit literal 0 bit, then repeat as needed.
+ prog = append(prog, 0x01, 0x00)
+ if elemPtrs+1 < elemWords {
+ prog = append(prog, 0x81)
+ prog = appendVarint(prog, elemWords-elemPtrs-1)
+ }
+ }
+ // Repeat length-1 times.
+ if elemWords < 0x80 {
+ prog = append(prog, byte(elemWords|0x80))
+ } else {
+ prog = append(prog, 0x80)
+ prog = appendVarint(prog, elemWords)
+ }
+ prog = appendVarint(prog, uintptr(length)-1)
+ prog = append(prog, 0)
+ *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
+ array.kind |= kindGCProg
+ array.gcdata = &prog[0]
+ array.ptrdata = array.size // overestimate but ok; must match program
+ }
+
+ etyp := typ.common()
+ esize := etyp.Size()
+
+ array.equal = nil
+ if eequal := etyp.equal; eequal != nil {
+ array.equal = func(p, q unsafe.Pointer) bool {
+ for i := 0; i < length; i++ {
+ pi := arrayAt(p, i, esize, "i < length")
+ qi := arrayAt(q, i, esize, "i < length")
+ if !eequal(pi, qi) {
+ return false
+ }
+
+ }
+ return true
+ }
+ }
+
+ switch {
+ case length == 1 && !ifaceIndir(typ):
+ // array of 1 direct iface type can be direct
+ array.kind |= kindDirectIface
+ default:
+ array.kind &^= kindDirectIface
+ }
+
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
+}
+
+func appendVarint(x []byte, v uintptr) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ x = append(x, byte(v|0x80))
+ }
+ x = append(x, byte(v))
+ return x
+}
+
+// toType converts from a *rtype to a Type that can be returned
+// to the client of package reflect. In gc, the only concern is that
+// a nil *rtype must be replaced by a nil Type, but in gccgo this
+// function takes care of ensuring that multiple *rtype for the same
+// type are coalesced into a single Type.
+func toType(t *rtype) Type {
+ if t == nil {
+ return nil
+ }
+ return t
+}
+
+type layoutKey struct {
+ ftyp *funcType // function signature
+ rcvr *rtype // receiver type, or nil if none
+}
+
+type layoutType struct {
+ t *rtype
+ framePool *sync.Pool
+ abid abiDesc
+}
+
+var layoutCache sync.Map // map[layoutKey]layoutType
+
+// funcLayout computes a struct type representing the layout of the
+// stack-assigned function arguments and return values for the function
+// type t.
+// If rcvr != nil, rcvr specifies the type of the receiver.
+// The returned type exists only for GC, so we only fill out GC relevant info.
+// Currently, that's just size and the GC program. We also fill in
+// the name for possible debugging use.
+func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abid abiDesc) {
+ if t.Kind() != Func {
+ panic("reflect: funcLayout of non-func type " + t.String())
+ }
+ if rcvr != nil && rcvr.Kind() == Interface {
+ panic("reflect: funcLayout with interface receiver " + rcvr.String())
+ }
+ k := layoutKey{t, rcvr}
+ if lti, ok := layoutCache.Load(k); ok {
+ lt := lti.(layoutType)
+ return lt.t, lt.framePool, lt.abid
+ }
+
+ // Compute the ABI layout.
+ abid = newAbiDesc(t, rcvr)
+
+ // build dummy rtype holding gc program
+ x := &rtype{
+ align: goarch.PtrSize,
+ // Don't add spill space here; it's only necessary in
+ // reflectcall's frame, not in the allocated frame.
+ // TODO(mknyszek): Remove this comment when register
+ // spill space in the frame is no longer required.
+ size: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
+ ptrdata: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
+ }
+ if abid.stackPtrs.n > 0 {
+ x.gcdata = &abid.stackPtrs.data[0]
+ }
+
+ var s string
+ if rcvr != nil {
+ s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
+ } else {
+ s = "funcargs(" + t.String() + ")"
+ }
+ x.str = resolveReflectName(newName(s, "", false, false))
+
+ // cache result for future callers
+ framePool = &sync.Pool{New: func() any {
+ return unsafe_New(x)
+ }}
+ lti, _ := layoutCache.LoadOrStore(k, layoutType{
+ t: x,
+ framePool: framePool,
+ abid: abid,
+ })
+ lt := lti.(layoutType)
+ return lt.t, lt.framePool, lt.abid
+}
+
+// ifaceIndir reports whether t is stored indirectly in an interface value.
+func ifaceIndir(t *rtype) bool {
+ return t.kind&kindDirectIface == 0
+}
+
+// Note: this type must agree with runtime.bitvector.
+type bitVector struct {
+ n uint32 // number of bits
+ data []byte
+}
+
+// append a bit to the bitmap.
+func (bv *bitVector) append(bit uint8) {
+ if bv.n%8 == 0 {
+ bv.data = append(bv.data, 0)
+ }
+ bv.data[bv.n/8] |= bit << (bv.n % 8)
+ bv.n++
+}
+
+func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
+ if t.ptrdata == 0 {
+ return
+ }
+
+ switch Kind(t.kind & kindMask) {
+ case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
+ // 1 pointer at start of representation
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
+ bv.append(0)
+ }
+ bv.append(1)
+
+ case Interface:
+ // 2 pointers
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
+ bv.append(0)
+ }
+ bv.append(1)
+ bv.append(1)
+
+ case Array:
+ // repeat inner type
+ tt := (*arrayType)(unsafe.Pointer(t))
+ for i := 0; i < int(tt.len); i++ {
+ addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
+ }
+
+ case Struct:
+ // apply fields
+ tt := (*structType)(unsafe.Pointer(t))
+ for i := range tt.fields {
+ f := &tt.fields[i]
+ addTypeBits(bv, offset+f.offset, f.typ)
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/reflect/value.go b/contrib/go/_std_1.19/src/reflect/value.go
new file mode 100644
index 0000000000..74554a3ac8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/reflect/value.go
@@ -0,0 +1,3620 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+ "errors"
+ "internal/abi"
+ "internal/goarch"
+ "internal/itoa"
+ "internal/unsafeheader"
+ "math"
+ "runtime"
+ "unsafe"
+)
+
+// Value is the reflection interface to a Go value.
+//
+// Not all methods apply to all kinds of values. Restrictions,
+// if any, are noted in the documentation for each method.
+// Use the Kind method to find out the kind of value before
+// calling kind-specific methods. Calling a method
+// inappropriate to the kind of type causes a run time panic.
+//
+// The zero Value represents no value.
+// Its IsValid method returns false, its Kind method returns Invalid,
+// its String method returns "<invalid Value>", and all other methods panic.
+// Most functions and methods never return an invalid value.
+// If one does, its documentation states the conditions explicitly.
+//
+// A Value can be used concurrently by multiple goroutines provided that
+// the underlying Go value can be used concurrently for the equivalent
+// direct operations.
+//
+// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
+type Value struct {
+ // typ holds the type of the value represented by a Value.
+ typ *rtype
+
+ // Pointer-valued data or, if flagIndir is set, pointer to data.
+ // Valid when either flagIndir is set or typ.pointers() is true.
+ ptr unsafe.Pointer
+
+ // flag holds metadata about the value.
+ // The lowest bits are flag bits:
+ // - flagStickyRO: obtained via unexported not embedded field, so read-only
+ // - flagEmbedRO: obtained via unexported embedded field, so read-only
+ // - flagIndir: val holds a pointer to the data
+ // - flagAddr: v.CanAddr is true (implies flagIndir)
+ // - flagMethod: v is a method value.
+ // The next five bits give the Kind of the value.
+ // This repeats typ.Kind() except for method values.
+ // The remaining 23+ bits give a method number for method values.
+ // If flag.kind() != Func, code can assume that flagMethod is unset.
+ // If ifaceIndir(typ), code can assume that flagIndir is set.
+ flag
+
+ // A method value represents a curried method invocation
+ // like r.Read for some receiver r. The typ+val+flag bits describe
+ // the receiver r, but the flag's Kind bits say Func (methods are
+ // functions), and the top bits of the flag give the method number
+ // in r's type's method table.
+}
+
+type flag uintptr
+
+const (
+ flagKindWidth = 5 // there are 27 kinds
+ flagKindMask flag = 1<<flagKindWidth - 1
+ flagStickyRO flag = 1 << 5
+ flagEmbedRO flag = 1 << 6
+ flagIndir flag = 1 << 7
+ flagAddr flag = 1 << 8
+ flagMethod flag = 1 << 9
+ flagMethodShift = 10
+ flagRO flag = flagStickyRO | flagEmbedRO
+)
+
+func (f flag) kind() Kind {
+ return Kind(f & flagKindMask)
+}
+
+func (f flag) ro() flag {
+ if f&flagRO != 0 {
+ return flagStickyRO
+ }
+ return 0
+}
+
+// pointer returns the underlying pointer represented by v.
+// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
+// if v.Kind() == Pointer, the base type must not be go:notinheap.
+func (v Value) pointer() unsafe.Pointer {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
+ panic("can't call pointer on a non-pointer Value")
+ }
+ if v.flag&flagIndir != 0 {
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ return v.ptr
+}
+
+// packEface converts v to the empty interface.
+func packEface(v Value) any {
+ t := v.typ
+ var i any
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // First, fill in the data portion of the interface.
+ switch {
+ case ifaceIndir(t):
+ if v.flag&flagIndir == 0 {
+ panic("bad indir")
+ }
+ // Value is indirect, and so is the interface we're making.
+ ptr := v.ptr
+ if v.flag&flagAddr != 0 {
+ // TODO: pass safe boolean from valueInterface so
+ // we don't need to copy if safe==true?
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ }
+ e.word = ptr
+ case v.flag&flagIndir != 0:
+ // Value is indirect, but interface is direct. We need
+ // to load the data at v.ptr into the interface data word.
+ e.word = *(*unsafe.Pointer)(v.ptr)
+ default:
+ // Value is direct, and so is the interface.
+ e.word = v.ptr
+ }
+ // Now, fill in the type portion. We're very careful here not
+ // to have any operation between the e.word and e.typ assignments
+ // that would let the garbage collector observe the partially-built
+ // interface value.
+ e.typ = t
+ return i
+}
+
+// unpackEface converts the empty interface i to a Value.
+func unpackEface(i any) Value {
+ e := (*emptyInterface)(unsafe.Pointer(&i))
+ // NOTE: don't read e.word until we know whether it is really a pointer or not.
+ t := e.typ
+ if t == nil {
+ return Value{}
+ }
+ f := flag(t.Kind())
+ if ifaceIndir(t) {
+ f |= flagIndir
+ }
+ return Value{t, e.word, f}
+}
+
+// A ValueError occurs when a Value method is invoked on
+// a Value that does not support it. Such cases are documented
+// in the description of each method.
+type ValueError struct {
+ Method string
+ Kind Kind
+}
+
+func (e *ValueError) Error() string {
+ if e.Kind == 0 {
+ return "reflect: call of " + e.Method + " on zero Value"
+ }
+ return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
+}
+
+// valueMethodName returns the name of the exported calling method on Value.
+func valueMethodName() string {
+ var pc [5]uintptr
+ n := runtime.Callers(1, pc[:])
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ for more := true; more; {
+ const prefix = "reflect.Value."
+ frame, more = frames.Next()
+ name := frame.Function
+ if len(name) > len(prefix) && name[:len(prefix)] == prefix {
+ methodName := name[len(prefix):]
+ if len(methodName) > 0 && 'A' <= methodName[0] && methodName[0] <= 'Z' {
+ return name
+ }
+ }
+ }
+ return "unknown method"
+}
+
+// emptyInterface is the header for an interface{} value.
+type emptyInterface struct {
+ typ *rtype
+ word unsafe.Pointer
+}
+
+// nonEmptyInterface is the header for an interface value with methods.
+type nonEmptyInterface struct {
+ // see ../runtime/iface.go:/Itab
+ itab *struct {
+ ityp *rtype // static interface type
+ typ *rtype // dynamic concrete type
+ hash uint32 // copy of typ.hash
+ _ [4]byte
+ fun [100000]unsafe.Pointer // method table
+ }
+ word unsafe.Pointer
+}
+
+// mustBe panics if f's kind is not expected.
+// Making this a method on flag instead of on Value
+// (and embedding flag in Value) means that we can write
+// the very clear v.mustBe(Bool) and have it compile into
+// v.flag.mustBe(Bool), which will only bother to copy the
+// single important word for the receiver.
+func (f flag) mustBe(expected Kind) {
+ // TODO(mvdan): use f.kind() again once mid-stack inlining gets better
+ if Kind(f&flagKindMask) != expected {
+ panic(&ValueError{valueMethodName(), f.kind()})
+ }
+}
+
+// mustBeExported panics if f records that the value was obtained using
+// an unexported field.
+func (f flag) mustBeExported() {
+ if f == 0 || f&flagRO != 0 {
+ f.mustBeExportedSlow()
+ }
+}
+
+func (f flag) mustBeExportedSlow() {
+ if f == 0 {
+ panic(&ValueError{valueMethodName(), Invalid})
+ }
+ if f&flagRO != 0 {
+ panic("reflect: " + valueMethodName() + " using value obtained using unexported field")
+ }
+}
+
+// mustBeAssignable panics if f records that the value is not assignable,
+// which is to say that either it was obtained using an unexported field
+// or it is not addressable.
+func (f flag) mustBeAssignable() {
+ if f&flagRO != 0 || f&flagAddr == 0 {
+ f.mustBeAssignableSlow()
+ }
+}
+
+func (f flag) mustBeAssignableSlow() {
+ if f == 0 {
+ panic(&ValueError{valueMethodName(), Invalid})
+ }
+ // Assignable if addressable and not read-only.
+ if f&flagRO != 0 {
+ panic("reflect: " + valueMethodName() + " using value obtained using unexported field")
+ }
+ if f&flagAddr == 0 {
+ panic("reflect: " + valueMethodName() + " using unaddressable value")
+ }
+}
+
+// Addr returns a pointer value representing the address of v.
+// It panics if CanAddr() returns false.
+// Addr is typically used to obtain a pointer to a struct field
+// or slice element in order to call a method that requires a
+// pointer receiver.
+func (v Value) Addr() Value {
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Addr of unaddressable value")
+ }
+ // Preserve flagRO instead of using v.flag.ro() so that
+ // v.Addr().Elem() is equivalent to v (#32772)
+ fl := v.flag & flagRO
+ return Value{v.typ.ptrTo(), v.ptr, fl | flag(Pointer)}
+}
+
+// Bool returns v's underlying value.
+// It panics if v's kind is not Bool.
+func (v Value) Bool() bool {
+ // panicNotBool is split out to keep Bool inlineable.
+ if v.kind() != Bool {
+ v.panicNotBool()
+ }
+ return *(*bool)(v.ptr)
+}
+
+func (v Value) panicNotBool() {
+ v.mustBe(Bool)
+}
+
+var bytesType = TypeOf(([]byte)(nil)).(*rtype)
+
+// Bytes returns v's underlying value.
+// It panics if v's underlying value is not a slice of bytes or
+// an addressable array of bytes.
+func (v Value) Bytes() []byte {
+ // bytesSlow is split out to keep Bytes inlineable for unnamed []byte.
+ if v.typ == bytesType {
+ return *(*[]byte)(v.ptr)
+ }
+ return v.bytesSlow()
+}
+
+func (v Value) bytesSlow() []byte {
+ switch v.kind() {
+ case Slice:
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]byte)(v.ptr)
+ case Array:
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.Bytes of non-byte array")
+ }
+ if !v.CanAddr() {
+ panic("reflect.Value.Bytes of unaddressable byte array")
+ }
+ p := (*byte)(v.ptr)
+ n := int((*arrayType)(unsafe.Pointer(v.typ)).len)
+ return unsafe.Slice(p, n)
+ }
+ panic(&ValueError{"reflect.Value.Bytes", v.kind()})
+}
+
+// runes returns v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) runes() []rune {
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.Bytes of non-rune slice")
+ }
+ // Slice is always bigger than a word; assume flagIndir.
+ return *(*[]rune)(v.ptr)
+}
+
+// CanAddr reports whether the value's address can be obtained with Addr.
+// Such values are called addressable. A value is addressable if it is
+// an element of a slice, an element of an addressable array,
+// a field of an addressable struct, or the result of dereferencing a pointer.
+// If CanAddr returns false, calling Addr will panic.
+func (v Value) CanAddr() bool {
+ return v.flag&flagAddr != 0
+}
+
+// CanSet reports whether the value of v can be changed.
+// A Value can be changed only if it is addressable and was not
+// obtained by the use of unexported struct fields.
+// If CanSet returns false, calling Set or any type-specific
+// setter (e.g., SetBool, SetInt) will panic.
+func (v Value) CanSet() bool {
+ return v.flag&(flagAddr|flagRO) == flagAddr
+}
+
+// Call calls the function v with the input arguments in.
+// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
+// Call panics if v's Kind is not Func.
+// It returns the output results as Values.
+// As in Go, each input argument must be assignable to the
+// type of the function's corresponding input parameter.
+// If v is a variadic function, Call creates the variadic slice parameter
+// itself, copying in the corresponding values.
+func (v Value) Call(in []Value) []Value {
+ v.mustBe(Func)
+ v.mustBeExported()
+ return v.call("Call", in)
+}
+
+// CallSlice calls the variadic function v with the input arguments in,
+// assigning the slice in[len(in)-1] to v's final variadic argument.
+// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
+// CallSlice panics if v's Kind is not Func or if v is not variadic.
+// It returns the output results as Values.
+// As in Go, each input argument must be assignable to the
+// type of the function's corresponding input parameter.
+func (v Value) CallSlice(in []Value) []Value {
+ v.mustBe(Func)
+ v.mustBeExported()
+ return v.call("CallSlice", in)
+}
+
+var callGC bool // for testing; see TestCallMethodJump and TestCallArgLive
+
+const debugReflectCall = false
+
+func (v Value) call(op string, in []Value) []Value {
+ // Get function pointer, type.
+ t := (*funcType)(unsafe.Pointer(v.typ))
+ var (
+ fn unsafe.Pointer
+ rcvr Value
+ rcvrtype *rtype
+ )
+ if v.flag&flagMethod != 0 {
+ rcvr = v
+ rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
+ } else if v.flag&flagIndir != 0 {
+ fn = *(*unsafe.Pointer)(v.ptr)
+ } else {
+ fn = v.ptr
+ }
+
+ if fn == nil {
+ panic("reflect.Value.Call: call of nil function")
+ }
+
+ isSlice := op == "CallSlice"
+ n := t.NumIn()
+ isVariadic := t.IsVariadic()
+ if isSlice {
+ if !isVariadic {
+ panic("reflect: CallSlice of non-variadic function")
+ }
+ if len(in) < n {
+ panic("reflect: CallSlice with too few input arguments")
+ }
+ if len(in) > n {
+ panic("reflect: CallSlice with too many input arguments")
+ }
+ } else {
+ if isVariadic {
+ n--
+ }
+ if len(in) < n {
+ panic("reflect: Call with too few input arguments")
+ }
+ if !isVariadic && len(in) > n {
+ panic("reflect: Call with too many input arguments")
+ }
+ }
+ for _, x := range in {
+ if x.Kind() == Invalid {
+ panic("reflect: " + op + " using zero Value argument")
+ }
+ }
+ for i := 0; i < n; i++ {
+ if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
+ panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
+ }
+ }
+ if !isSlice && isVariadic {
+ // prepare slice for remaining values
+ m := len(in) - n
+ slice := MakeSlice(t.In(n), m, m)
+ elem := t.In(n).Elem()
+ for i := 0; i < m; i++ {
+ x := in[n+i]
+ if xt := x.Type(); !xt.AssignableTo(elem) {
+ panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
+ }
+ slice.Index(i).Set(x)
+ }
+ origIn := in
+ in = make([]Value, n+1)
+ copy(in[:n], origIn)
+ in[n] = slice
+ }
+
+ nin := len(in)
+ if nin != t.NumIn() {
+ panic("reflect.Value.Call: wrong argument count")
+ }
+ nout := t.NumOut()
+
+ // Register argument space.
+ var regArgs abi.RegArgs
+
+ // Compute frame type.
+ frametype, framePool, abid := funcLayout(t, rcvrtype)
+
+ // Allocate a chunk of memory for frame if needed.
+ var stackArgs unsafe.Pointer
+ if frametype.size != 0 {
+ if nout == 0 {
+ stackArgs = framePool.Get().(unsafe.Pointer)
+ } else {
+ // Can't use pool if the function has return values.
+ // We will leak pointer to args in ret, so its lifetime is not scoped.
+ stackArgs = unsafe_New(frametype)
+ }
+ }
+ frameSize := frametype.size
+
+ if debugReflectCall {
+ println("reflect.call", t.String())
+ abid.dump()
+ }
+
+ // Copy inputs into args.
+
+ // Handle receiver.
+ inStart := 0
+ if rcvrtype != nil {
+ // Guaranteed to only be one word in size,
+ // so it will only take up exactly 1 abiStep (either
+ // in a register or on the stack).
+ switch st := abid.call.steps[0]; st.kind {
+ case abiStepStack:
+ storeRcvr(rcvr, stackArgs)
+ case abiStepPointer:
+ storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ptrs[st.ireg]))
+ fallthrough
+ case abiStepIntReg:
+ storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ints[st.ireg]))
+ case abiStepFloatReg:
+ storeRcvr(rcvr, unsafe.Pointer(&regArgs.Floats[st.freg]))
+ default:
+ panic("unknown ABI parameter kind")
+ }
+ inStart = 1
+ }
+
+ // Handle arguments.
+ for i, v := range in {
+ v.mustBeExported()
+ targ := t.In(i).(*rtype)
+ // TODO(mknyszek): Figure out if it's possible to get some
+ // scratch space for this assignment check. Previously, it
+ // was possible to use space in the argument frame.
+ v = v.assignTo("reflect.Value.Call", targ, nil)
+ stepsLoop:
+ for _, st := range abid.call.stepsForValue(i + inStart) {
+ switch st.kind {
+ case abiStepStack:
+ // Copy values to the "stack."
+ addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
+ if v.flag&flagIndir != 0 {
+ typedmemmove(targ, addr, v.ptr)
+ } else {
+ *(*unsafe.Pointer)(addr) = v.ptr
+ }
+ // There's only one step for a stack-allocated value.
+ break stepsLoop
+ case abiStepIntReg, abiStepPointer:
+ // Copy values to "integer registers."
+ if v.flag&flagIndir != 0 {
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ if st.kind == abiStepPointer {
+ // Duplicate this pointer in the pointer area of the
+ // register space. Otherwise, there's the potential for
+ // this to be the last reference to v.ptr.
+ regArgs.Ptrs[st.ireg] = *(*unsafe.Pointer)(offset)
+ }
+ intToReg(&regArgs, st.ireg, st.size, offset)
+ } else {
+ if st.kind == abiStepPointer {
+ // See the comment in abiStepPointer case above.
+ regArgs.Ptrs[st.ireg] = v.ptr
+ }
+ regArgs.Ints[st.ireg] = uintptr(v.ptr)
+ }
+ case abiStepFloatReg:
+ // Copy values to "float registers."
+ if v.flag&flagIndir == 0 {
+ panic("attempted to copy pointer to FP register")
+ }
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ floatToReg(&regArgs, st.freg, st.size, offset)
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ }
+ // TODO(mknyszek): Remove this when we no longer have
+ // caller reserved spill space.
+ frameSize = align(frameSize, goarch.PtrSize)
+ frameSize += abid.spill
+
+ // Mark pointers in registers for the return path.
+ regArgs.ReturnIsPtr = abid.outRegPtrs
+
+ if debugReflectCall {
+ regArgs.Dump()
+ }
+
+ // For testing; see TestCallArgLive.
+ if callGC {
+ runtime.GC()
+ }
+
+ // Call.
+ call(frametype, fn, stackArgs, uint32(frametype.size), uint32(abid.retOffset), uint32(frameSize), &regArgs)
+
+ // For testing; see TestCallMethodJump.
+ if callGC {
+ runtime.GC()
+ }
+
+ var ret []Value
+ if nout == 0 {
+ if stackArgs != nil {
+ typedmemclr(frametype, stackArgs)
+ framePool.Put(stackArgs)
+ }
+ } else {
+ if stackArgs != nil {
+ // Zero the now unused input area of args,
+ // because the Values returned by this function contain pointers to the args object,
+ // and will thus keep the args object alive indefinitely.
+ typedmemclrpartial(frametype, stackArgs, 0, abid.retOffset)
+ }
+
+ // Wrap Values around return values in args.
+ ret = make([]Value, nout)
+ for i := 0; i < nout; i++ {
+ tv := t.Out(i)
+ if tv.Size() == 0 {
+ // For zero-sized return value, args+off may point to the next object.
+ // In this case, return the zero value instead.
+ ret[i] = Zero(tv)
+ continue
+ }
+ steps := abid.ret.stepsForValue(i)
+ if st := steps[0]; st.kind == abiStepStack {
+ // This value is on the stack. If part of a value is stack
+ // allocated, the entire value is according to the ABI. So
+ // just make an indirection into the allocated frame.
+ fl := flagIndir | flag(tv.Kind())
+ ret[i] = Value{tv.common(), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
+ // Note: this does introduce false sharing between results -
+ // if any result is live, they are all live.
+ // (And the space for the args is live as well, but as we've
+ // cleared that space it isn't as big a deal.)
+ continue
+ }
+
+ // Handle pointers passed in registers.
+ if !ifaceIndir(tv.common()) {
+ // Pointer-valued data gets put directly
+ // into v.ptr.
+ if steps[0].kind != abiStepPointer {
+ print("kind=", steps[0].kind, ", type=", tv.String(), "\n")
+ panic("mismatch between ABI description and types")
+ }
+ ret[i] = Value{tv.common(), regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())}
+ continue
+ }
+
+ // All that's left is values passed in registers that we need to
+ // create space for and copy values back into.
+ //
+ // TODO(mknyszek): We make a new allocation for each register-allocated
+ // value, but previously we could always point into the heap-allocated
+ // stack frame. This is a regression that could be fixed by adding
+ // additional space to the allocated stack frame and storing the
+ // register-allocated return values into the allocated stack frame and
+ // referring there in the resulting Value.
+ s := unsafe_New(tv.common())
+ for _, st := range steps {
+ switch st.kind {
+ case abiStepIntReg:
+ offset := add(s, st.offset, "precomputed value offset")
+ intFromReg(&regArgs, st.ireg, st.size, offset)
+ case abiStepPointer:
+ s := add(s, st.offset, "precomputed value offset")
+ *((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
+ case abiStepFloatReg:
+ offset := add(s, st.offset, "precomputed value offset")
+ floatFromReg(&regArgs, st.freg, st.size, offset)
+ case abiStepStack:
+ panic("register-based return value has stack component")
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ ret[i] = Value{tv.common(), s, flagIndir | flag(tv.Kind())}
+ }
+ }
+
+ return ret
+}
+
+// callReflect is the call implementation used by a function
+// returned by MakeFunc. In many ways it is the opposite of the
+// method Value.call above. The method above converts a call using Values
+// into a call of a function with a concrete argument frame, while
+// callReflect converts a call of a function with a concrete argument
+// frame into a call using Values.
+// It is in this file so that it can be next to the call method above.
+// The remainder of the MakeFunc implementation is in makefunc.go.
+//
+// NOTE: This function must be marked as a "wrapper" in the generated code,
+// so that the linker can make it work correctly for panic and recover.
+// The gc compilers know to do that for the name "reflect.callReflect".
+//
+// ctxt is the "closure" generated by MakeFunc.
+// frame is a pointer to the arguments to that closure on the stack.
+// retValid points to a boolean which should be set when the results
+// section of frame is set.
+//
+// regs contains the argument values passed in registers and will contain
+// the values returned from ctxt.fn in registers.
+func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) {
+ if callGC {
+ // Call GC upon entry during testing.
+ // Getting our stack scanned here is the biggest hazard, because
+ // our caller (makeFuncStub) could have failed to place the last
+ // pointer to a value in regs' pointer space, in which case it
+ // won't be visible to the GC.
+ runtime.GC()
+ }
+ ftyp := ctxt.ftyp
+ f := ctxt.fn
+
+ _, _, abid := funcLayout(ftyp, nil)
+
+ // Copy arguments into Values.
+ ptr := frame
+ in := make([]Value, 0, int(ftyp.inCount))
+ for i, typ := range ftyp.in() {
+ if typ.Size() == 0 {
+ in = append(in, Zero(typ))
+ continue
+ }
+ v := Value{typ, nil, flag(typ.Kind())}
+ steps := abid.call.stepsForValue(i)
+ if st := steps[0]; st.kind == abiStepStack {
+ if ifaceIndir(typ) {
+ // value cannot be inlined in interface data.
+ // Must make a copy, because f might keep a reference to it,
+ // and we cannot let f keep a reference to the stack frame
+ // after this function returns, not even a read-only reference.
+ v.ptr = unsafe_New(typ)
+ if typ.size > 0 {
+ typedmemmove(typ, v.ptr, add(ptr, st.stkOff, "typ.size > 0"))
+ }
+ v.flag |= flagIndir
+ } else {
+ v.ptr = *(*unsafe.Pointer)(add(ptr, st.stkOff, "1-ptr"))
+ }
+ } else {
+ if ifaceIndir(typ) {
+ // All that's left is values passed in registers that we need to
+ // create space for the values.
+ v.flag |= flagIndir
+ v.ptr = unsafe_New(typ)
+ for _, st := range steps {
+ switch st.kind {
+ case abiStepIntReg:
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ intFromReg(regs, st.ireg, st.size, offset)
+ case abiStepPointer:
+ s := add(v.ptr, st.offset, "precomputed value offset")
+ *((*unsafe.Pointer)(s)) = regs.Ptrs[st.ireg]
+ case abiStepFloatReg:
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ floatFromReg(regs, st.freg, st.size, offset)
+ case abiStepStack:
+ panic("register-based return value has stack component")
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ } else {
+ // Pointer-valued data gets put directly
+ // into v.ptr.
+ if steps[0].kind != abiStepPointer {
+ print("kind=", steps[0].kind, ", type=", typ.String(), "\n")
+ panic("mismatch between ABI description and types")
+ }
+ v.ptr = regs.Ptrs[steps[0].ireg]
+ }
+ }
+ in = append(in, v)
+ }
+
+ // Call underlying function.
+ out := f(in)
+ numOut := ftyp.NumOut()
+ if len(out) != numOut {
+ panic("reflect: wrong return count from function created by MakeFunc")
+ }
+
+ // Copy results back into argument frame and register space.
+ if numOut > 0 {
+ for i, typ := range ftyp.out() {
+ v := out[i]
+ if v.typ == nil {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned zero Value")
+ }
+ if v.flag&flagRO != 0 {
+ panic("reflect: function created by MakeFunc using " + funcName(f) +
+ " returned value obtained from unexported field")
+ }
+ if typ.size == 0 {
+ continue
+ }
+
+ // Convert v to type typ if v is assignable to a variable
+ // of type t in the language spec.
+ // See issue 28761.
+ //
+ //
+ // TODO(mknyszek): In the switch to the register ABI we lost
+ // the scratch space here for the register cases (and
+ // temporarily for all the cases).
+ //
+ // If/when this happens, take note of the following:
+ //
+ // We must clear the destination before calling assignTo,
+ // in case assignTo writes (with memory barriers) to the
+ // target location used as scratch space. See issue 39541.
+ v = v.assignTo("reflect.MakeFunc", typ, nil)
+ stepsLoop:
+ for _, st := range abid.ret.stepsForValue(i) {
+ switch st.kind {
+ case abiStepStack:
+ // Copy values to the "stack."
+ addr := add(ptr, st.stkOff, "precomputed stack arg offset")
+ // Do not use write barriers. The stack space used
+ // for this call is not adequately zeroed, and we
+ // are careful to keep the arguments alive until we
+ // return to makeFuncStub's caller.
+ if v.flag&flagIndir != 0 {
+ memmove(addr, v.ptr, st.size)
+ } else {
+ // This case must be a pointer type.
+ *(*uintptr)(addr) = uintptr(v.ptr)
+ }
+ // There's only one step for a stack-allocated value.
+ break stepsLoop
+ case abiStepIntReg, abiStepPointer:
+ // Copy values to "integer registers."
+ if v.flag&flagIndir != 0 {
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ intToReg(regs, st.ireg, st.size, offset)
+ } else {
+ // Only populate the Ints space on the return path.
+ // This is safe because out is kept alive until the
+ // end of this function, and the return path through
+ // makeFuncStub has no preemption, so these pointers
+ // are always visible to the GC.
+ regs.Ints[st.ireg] = uintptr(v.ptr)
+ }
+ case abiStepFloatReg:
+ // Copy values to "float registers."
+ if v.flag&flagIndir == 0 {
+ panic("attempted to copy pointer to FP register")
+ }
+ offset := add(v.ptr, st.offset, "precomputed value offset")
+ floatToReg(regs, st.freg, st.size, offset)
+ default:
+ panic("unknown ABI part kind")
+ }
+ }
+ }
+ }
+
+ // Announce that the return values are valid.
+ // After this point the runtime can depend on the return values being valid.
+ *retValid = true
+
+ // We have to make sure that the out slice lives at least until
+ // the runtime knows the return values are valid. Otherwise, the
+ // return values might not be scanned by anyone during a GC.
+ // (out would be dead, and the return slots not yet alive.)
+ runtime.KeepAlive(out)
+
+ // runtime.getArgInfo expects to be able to find ctxt on the
+ // stack when it finds our caller, makeFuncStub. Make sure it
+ // doesn't get garbage collected.
+ runtime.KeepAlive(ctxt)
+}
+
+// methodReceiver returns information about the receiver
+// described by v. The Value v may or may not have the
+// flagMethod bit set, so the kind cached in v.flag should
+// not be used.
+// The return value rcvrtype gives the method's actual receiver type.
+// The return value t gives the method type signature (without the receiver).
+// The return value fn is a pointer to the method code.
+func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) {
+ i := methodIndex
+ if v.typ.Kind() == Interface {
+ tt := (*interfaceType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.methods)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := &tt.methods[i]
+ if !tt.nameOff(m.name).isExported() {
+ panic("reflect: " + op + " of unexported method")
+ }
+ iface := (*nonEmptyInterface)(v.ptr)
+ if iface.itab == nil {
+ panic("reflect: " + op + " of method on nil interface value")
+ }
+ rcvrtype = iface.itab.typ
+ fn = unsafe.Pointer(&iface.itab.fun[i])
+ t = (*funcType)(unsafe.Pointer(tt.typeOff(m.typ)))
+ } else {
+ rcvrtype = v.typ
+ ms := v.typ.exportedMethods()
+ if uint(i) >= uint(len(ms)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := ms[i]
+ if !v.typ.nameOff(m.name).isExported() {
+ panic("reflect: " + op + " of unexported method")
+ }
+ ifn := v.typ.textOff(m.ifn)
+ fn = unsafe.Pointer(&ifn)
+ t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.mtyp)))
+ }
+ return
+}
+
+// v is a method receiver. Store at p the word which is used to
+// encode that receiver at the start of the argument list.
+// Reflect uses the "interface" calling convention for
+// methods, which always uses one word to record the receiver.
+func storeRcvr(v Value, p unsafe.Pointer) {
+ t := v.typ
+ if t.Kind() == Interface {
+ // the interface data word becomes the receiver word
+ iface := (*nonEmptyInterface)(v.ptr)
+ *(*unsafe.Pointer)(p) = iface.word
+ } else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
+ *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
+ } else {
+ *(*unsafe.Pointer)(p) = v.ptr
+ }
+}
+
+// align returns the result of rounding x up to a multiple of n.
+// n must be a power of two.
+func align(x, n uintptr) uintptr {
+ return (x + n - 1) &^ (n - 1)
+}
+
+// callMethod is the call implementation used by a function returned
+// by makeMethodValue (used by v.Method(i).Interface()).
+// It is a streamlined version of the usual reflect call: the caller has
+// already laid out the argument frame for us, so we don't have
+// to deal with individual Values for each argument.
+// It is in this file so that it can be next to the two similar functions above.
+// The remainder of the makeMethodValue implementation is in makefunc.go.
+//
+// NOTE: This function must be marked as a "wrapper" in the generated code,
+// so that the linker can make it work correctly for panic and recover.
+// The gc compilers know to do that for the name "reflect.callMethod".
+//
+// ctxt is the "closure" generated by makeVethodValue.
+// frame is a pointer to the arguments to that closure on the stack.
+// retValid points to a boolean which should be set when the results
+// section of frame is set.
+//
+// regs contains the argument values passed in registers and will contain
+// the values returned from ctxt.fn in registers.
+func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *abi.RegArgs) {
+ rcvr := ctxt.rcvr
+ rcvrType, valueFuncType, methodFn := methodReceiver("call", rcvr, ctxt.method)
+
+ // There are two ABIs at play here.
+ //
+ // methodValueCall was invoked with the ABI assuming there was no
+ // receiver ("value ABI") and that's what frame and regs are holding.
+ //
+ // Meanwhile, we need to actually call the method with a receiver, which
+ // has its own ABI ("method ABI"). Everything that follows is a translation
+ // between the two.
+ _, _, valueABI := funcLayout(valueFuncType, nil)
+ valueFrame, valueRegs := frame, regs
+ methodFrameType, methodFramePool, methodABI := funcLayout(valueFuncType, rcvrType)
+
+ // Make a new frame that is one word bigger so we can store the receiver.
+ // This space is used for both arguments and return values.
+ methodFrame := methodFramePool.Get().(unsafe.Pointer)
+ var methodRegs abi.RegArgs
+
+ // Deal with the receiver. It's guaranteed to only be one word in size.
+ switch st := methodABI.call.steps[0]; st.kind {
+ case abiStepStack:
+ // Only copy the receiver to the stack if the ABI says so.
+ // Otherwise, it'll be in a register already.
+ storeRcvr(rcvr, methodFrame)
+ case abiStepPointer:
+ // Put the receiver in a register.
+ storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ptrs[st.ireg]))
+ fallthrough
+ case abiStepIntReg:
+ storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Ints[st.ireg]))
+ case abiStepFloatReg:
+ storeRcvr(rcvr, unsafe.Pointer(&methodRegs.Floats[st.freg]))
+ default:
+ panic("unknown ABI parameter kind")
+ }
+
+ // Translate the rest of the arguments.
+ for i, t := range valueFuncType.in() {
+ valueSteps := valueABI.call.stepsForValue(i)
+ methodSteps := methodABI.call.stepsForValue(i + 1)
+
+ // Zero-sized types are trivial: nothing to do.
+ if len(valueSteps) == 0 {
+ if len(methodSteps) != 0 {
+ panic("method ABI and value ABI do not align")
+ }
+ continue
+ }
+
+ // There are four cases to handle in translating each
+ // argument:
+ // 1. Stack -> stack translation.
+ // 2. Stack -> registers translation.
+ // 3. Registers -> stack translation.
+ // 4. Registers -> registers translation.
+
+ // If the value ABI passes the value on the stack,
+ // then the method ABI does too, because it has strictly
+ // fewer arguments. Simply copy between the two.
+ if vStep := valueSteps[0]; vStep.kind == abiStepStack {
+ mStep := methodSteps[0]
+ // Handle stack -> stack translation.
+ if mStep.kind == abiStepStack {
+ if vStep.size != mStep.size {
+ panic("method ABI and value ABI do not align")
+ }
+ typedmemmove(t,
+ add(methodFrame, mStep.stkOff, "precomputed stack offset"),
+ add(valueFrame, vStep.stkOff, "precomputed stack offset"))
+ continue
+ }
+ // Handle stack -> register translation.
+ for _, mStep := range methodSteps {
+ from := add(valueFrame, vStep.stkOff+mStep.offset, "precomputed stack offset")
+ switch mStep.kind {
+ case abiStepPointer:
+ // Do the pointer copy directly so we get a write barrier.
+ methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
+ fallthrough // We need to make sure this ends up in Ints, too.
+ case abiStepIntReg:
+ intToReg(&methodRegs, mStep.ireg, mStep.size, from)
+ case abiStepFloatReg:
+ floatToReg(&methodRegs, mStep.freg, mStep.size, from)
+ default:
+ panic("unexpected method step")
+ }
+ }
+ continue
+ }
+ // Handle register -> stack translation.
+ if mStep := methodSteps[0]; mStep.kind == abiStepStack {
+ for _, vStep := range valueSteps {
+ to := add(methodFrame, mStep.stkOff+vStep.offset, "precomputed stack offset")
+ switch vStep.kind {
+ case abiStepPointer:
+ // Do the pointer copy directly so we get a write barrier.
+ *(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
+ case abiStepIntReg:
+ intFromReg(valueRegs, vStep.ireg, vStep.size, to)
+ case abiStepFloatReg:
+ floatFromReg(valueRegs, vStep.freg, vStep.size, to)
+ default:
+ panic("unexpected value step")
+ }
+ }
+ continue
+ }
+ // Handle register -> register translation.
+ if len(valueSteps) != len(methodSteps) {
+ // Because it's the same type for the value, and it's assigned
+ // to registers both times, it should always take up the same
+ // number of registers for each ABI.
+ panic("method ABI and value ABI don't align")
+ }
+ for i, vStep := range valueSteps {
+ mStep := methodSteps[i]
+ if mStep.kind != vStep.kind {
+ panic("method ABI and value ABI don't align")
+ }
+ switch vStep.kind {
+ case abiStepPointer:
+ // Copy this too, so we get a write barrier.
+ methodRegs.Ptrs[mStep.ireg] = valueRegs.Ptrs[vStep.ireg]
+ fallthrough
+ case abiStepIntReg:
+ methodRegs.Ints[mStep.ireg] = valueRegs.Ints[vStep.ireg]
+ case abiStepFloatReg:
+ methodRegs.Floats[mStep.freg] = valueRegs.Floats[vStep.freg]
+ default:
+ panic("unexpected value step")
+ }
+ }
+ }
+
+ methodFrameSize := methodFrameType.size
+ // TODO(mknyszek): Remove this when we no longer have
+ // caller reserved spill space.
+ methodFrameSize = align(methodFrameSize, goarch.PtrSize)
+ methodFrameSize += methodABI.spill
+
+ // Mark pointers in registers for the return path.
+ methodRegs.ReturnIsPtr = methodABI.outRegPtrs
+
+ // Call.
+ // Call copies the arguments from scratch to the stack, calls fn,
+ // and then copies the results back into scratch.
+ call(methodFrameType, methodFn, methodFrame, uint32(methodFrameType.size), uint32(methodABI.retOffset), uint32(methodFrameSize), &methodRegs)
+
+ // Copy return values.
+ //
+ // This is somewhat simpler because both ABIs have an identical
+ // return value ABI (the types are identical). As a result, register
+ // results can simply be copied over. Stack-allocated values are laid
+ // out the same, but are at different offsets from the start of the frame
+ // Ignore any changes to args.
+ // Avoid constructing out-of-bounds pointers if there are no return values.
+ // because the arguments may be laid out differently.
+ if valueRegs != nil {
+ *valueRegs = methodRegs
+ }
+ if retSize := methodFrameType.size - methodABI.retOffset; retSize > 0 {
+ valueRet := add(valueFrame, valueABI.retOffset, "valueFrame's size > retOffset")
+ methodRet := add(methodFrame, methodABI.retOffset, "methodFrame's size > retOffset")
+ // This copies to the stack. Write barriers are not needed.
+ memmove(valueRet, methodRet, retSize)
+ }
+
+ // Tell the runtime it can now depend on the return values
+ // being properly initialized.
+ *retValid = true
+
+ // Clear the scratch space and put it back in the pool.
+ // This must happen after the statement above, so that the return
+ // values will always be scanned by someone.
+ typedmemclr(methodFrameType, methodFrame)
+ methodFramePool.Put(methodFrame)
+
+ // See the comment in callReflect.
+ runtime.KeepAlive(ctxt)
+
+ // Keep valueRegs alive because it may hold live pointer results.
+ // The caller (methodValueCall) has it as a stack object, which is only
+ // scanned when there is a reference to it.
+ runtime.KeepAlive(valueRegs)
+}
+
+// funcName returns the name of f, for use in error messages.
+func funcName(f func([]Value) []Value) string {
+ pc := *(*uintptr)(unsafe.Pointer(&f))
+ rf := runtime.FuncForPC(pc)
+ if rf != nil {
+ return rf.Name()
+ }
+ return "closure"
+}
+
+// Cap returns v's capacity.
+// It panics if v's Kind is not Array, Chan, Slice or pointer to Array.
+func (v Value) Cap() int {
+ // capNonSlice is split out to keep Cap inlineable for slice kinds.
+ if v.kind() == Slice {
+ return (*unsafeheader.Slice)(v.ptr).Cap
+ }
+ return v.capNonSlice()
+}
+
+func (v Value) capNonSlice() int {
+ k := v.kind()
+ switch k {
+ case Array:
+ return v.typ.Len()
+ case Chan:
+ return chancap(v.pointer())
+ case Ptr:
+ if v.typ.Elem().Kind() == Array {
+ return v.typ.Elem().Len()
+ }
+ panic("reflect: call of reflect.Value.Cap on ptr to non-array Value")
+ }
+ panic(&ValueError{"reflect.Value.Cap", v.kind()})
+}
+
+// Close closes the channel v.
+// It panics if v's Kind is not Chan.
+func (v Value) Close() {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ chanclose(v.pointer())
+}
+
+// CanComplex reports whether Complex can be used without panicking.
+func (v Value) CanComplex() bool {
+ switch v.kind() {
+ case Complex64, Complex128:
+ return true
+ default:
+ return false
+ }
+}
+
+// Complex returns v's underlying value, as a complex128.
+// It panics if v's Kind is not Complex64 or Complex128
+func (v Value) Complex() complex128 {
+ k := v.kind()
+ switch k {
+ case Complex64:
+ return complex128(*(*complex64)(v.ptr))
+ case Complex128:
+ return *(*complex128)(v.ptr)
+ }
+ panic(&ValueError{"reflect.Value.Complex", v.kind()})
+}
+
+// Elem returns the value that the interface v contains
+// or that the pointer v points to.
+// It panics if v's Kind is not Interface or Pointer.
+// It returns the zero Value if v is nil.
+func (v Value) Elem() Value {
+ k := v.kind()
+ switch k {
+ case Interface:
+ var eface any
+ if v.typ.NumMethod() == 0 {
+ eface = *(*any)(v.ptr)
+ } else {
+ eface = (any)(*(*interface {
+ M()
+ })(v.ptr))
+ }
+ x := unpackEface(eface)
+ if x.flag != 0 {
+ x.flag |= v.flag.ro()
+ }
+ return x
+ case Pointer:
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ if ifaceIndir(v.typ) {
+ // This is a pointer to a not-in-heap object. ptr points to a uintptr
+ // in the heap. That uintptr is the address of a not-in-heap object.
+ // In general, pointers to not-in-heap objects can be total junk.
+ // But Elem() is asking to dereference it, so the user has asserted
+ // that at least it is a valid pointer (not just an integer stored in
+ // a pointer slot). So let's check, to make sure that it isn't a pointer
+ // that the runtime will crash on if it sees it during GC or write barriers.
+ // Since it is a not-in-heap pointer, all pointers to the heap are
+ // forbidden! That makes the test pretty easy.
+ // See issue 48399.
+ if !verifyNotInHeapPtr(*(*uintptr)(ptr)) {
+ panic("reflect: reflect.Value.Elem on an invalid notinheap pointer")
+ }
+ }
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ // The returned value's address is v's value.
+ if ptr == nil {
+ return Value{}
+ }
+ tt := (*ptrType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ fl := v.flag&flagRO | flagIndir | flagAddr
+ fl |= flag(typ.Kind())
+ return Value{typ, ptr, fl}
+ }
+ panic(&ValueError{"reflect.Value.Elem", v.kind()})
+}
+
+// Field returns the i'th field of the struct v.
+// It panics if v's Kind is not Struct or i is out of range.
+func (v Value) Field(i int) Value {
+ if v.kind() != Struct {
+ panic(&ValueError{"reflect.Value.Field", v.kind()})
+ }
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.fields)) {
+ panic("reflect: Field index out of range")
+ }
+ field := &tt.fields[i]
+ typ := field.typ
+
+ // Inherit permission bits from v, but clear flagEmbedRO.
+ fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
+ // Using an unexported field forces flagRO.
+ if !field.name.isExported() {
+ if field.embedded() {
+ fl |= flagEmbedRO
+ } else {
+ fl |= flagStickyRO
+ }
+ }
+ // Either flagIndir is set and v.ptr points at struct,
+ // or flagIndir is not set and v.ptr is the actual struct data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must have field.offset = 0,
+ // so v.ptr + field.offset is still the correct address.
+ ptr := add(v.ptr, field.offset, "same as non-reflect &v.field")
+ return Value{typ, ptr, fl}
+}
+
+// FieldByIndex returns the nested field corresponding to index.
+// It panics if evaluation requires stepping through a nil
+// pointer or a field that is not a struct.
+func (v Value) FieldByIndex(index []int) Value {
+ if len(index) == 1 {
+ return v.Field(index[0])
+ }
+ v.mustBe(Struct)
+ for i, x := range index {
+ if i > 0 {
+ if v.Kind() == Pointer && v.typ.Elem().Kind() == Struct {
+ if v.IsNil() {
+ panic("reflect: indirection through nil pointer to embedded struct")
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+// FieldByIndexErr returns the nested field corresponding to index.
+// It returns an error if evaluation requires stepping through a nil
+// pointer, but panics if it must step through a field that
+// is not a struct.
+func (v Value) FieldByIndexErr(index []int) (Value, error) {
+ if len(index) == 1 {
+ return v.Field(index[0]), nil
+ }
+ v.mustBe(Struct)
+ for i, x := range index {
+ if i > 0 {
+ if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct {
+ if v.IsNil() {
+ return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + v.typ.Elem().Name())
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v, nil
+}
+
+// FieldByName returns the struct field with the given name.
+// It returns the zero Value if no field was found.
+// It panics if v's Kind is not struct.
+func (v Value) FieldByName(name string) Value {
+ v.mustBe(Struct)
+ if f, ok := v.typ.FieldByName(name); ok {
+ return v.FieldByIndex(f.Index)
+ }
+ return Value{}
+}
+
+// FieldByNameFunc returns the struct field with a name
+// that satisfies the match function.
+// It panics if v's Kind is not struct.
+// It returns the zero Value if no field was found.
+func (v Value) FieldByNameFunc(match func(string) bool) Value {
+ if f, ok := v.typ.FieldByNameFunc(match); ok {
+ return v.FieldByIndex(f.Index)
+ }
+ return Value{}
+}
+
+// CanFloat reports whether Float can be used without panicking.
+func (v Value) CanFloat() bool {
+ switch v.kind() {
+ case Float32, Float64:
+ return true
+ default:
+ return false
+ }
+}
+
+// Float returns v's underlying value, as a float64.
+// It panics if v's Kind is not Float32 or Float64
+func (v Value) Float() float64 {
+ k := v.kind()
+ switch k {
+ case Float32:
+ return float64(*(*float32)(v.ptr))
+ case Float64:
+ return *(*float64)(v.ptr)
+ }
+ panic(&ValueError{"reflect.Value.Float", v.kind()})
+}
+
+var uint8Type = TypeOf(uint8(0)).(*rtype)
+
+// Index returns v's i'th element.
+// It panics if v's Kind is not Array, Slice, or String or i is out of range.
+func (v Value) Index(i int) Value {
+ switch v.kind() {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(tt.len) {
+ panic("reflect: array index out of range")
+ }
+ typ := tt.elem
+ offset := uintptr(i) * typ.size
+
+ // Either flagIndir is set and v.ptr points at array,
+ // or flagIndir is not set and v.ptr is the actual array data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be doing Index(0), so offset = 0,
+ // so v.ptr + offset is still the correct address.
+ val := add(v.ptr, offset, "same as &v[i], i < tt.len")
+ fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
+ return Value{typ, val, fl}
+
+ case Slice:
+ // Element flag same as Elem of Pointer.
+ // Addressable, indirect, possibly read-only.
+ s := (*unsafeheader.Slice)(v.ptr)
+ if uint(i) >= uint(s.Len) {
+ panic("reflect: slice index out of range")
+ }
+ tt := (*sliceType)(unsafe.Pointer(v.typ))
+ typ := tt.elem
+ val := arrayAt(s.Data, i, typ.size, "i < s.Len")
+ fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
+ return Value{typ, val, fl}
+
+ case String:
+ s := (*unsafeheader.String)(v.ptr)
+ if uint(i) >= uint(s.Len) {
+ panic("reflect: string index out of range")
+ }
+ p := arrayAt(s.Data, i, 1, "i < s.Len")
+ fl := v.flag.ro() | flag(Uint8) | flagIndir
+ return Value{uint8Type, p, fl}
+ }
+ panic(&ValueError{"reflect.Value.Index", v.kind()})
+}
+
+// CanInt reports whether Int can be used without panicking.
+func (v Value) CanInt() bool {
+ switch v.kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return true
+ default:
+ return false
+ }
+}
+
+// Int returns v's underlying value, as an int64.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+func (v Value) Int() int64 {
+ k := v.kind()
+ p := v.ptr
+ switch k {
+ case Int:
+ return int64(*(*int)(p))
+ case Int8:
+ return int64(*(*int8)(p))
+ case Int16:
+ return int64(*(*int16)(p))
+ case Int32:
+ return int64(*(*int32)(p))
+ case Int64:
+ return *(*int64)(p)
+ }
+ panic(&ValueError{"reflect.Value.Int", v.kind()})
+}
+
+// CanInterface reports whether Interface can be used without panicking.
+func (v Value) CanInterface() bool {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.CanInterface", Invalid})
+ }
+ return v.flag&flagRO == 0
+}
+
+// Interface returns v's current value as an interface{}.
+// It is equivalent to:
+//
+// var i interface{} = (v's underlying value)
+//
+// It panics if the Value was obtained by accessing
+// unexported struct fields.
+func (v Value) Interface() (i any) {
+ return valueInterface(v, true)
+}
+
+func valueInterface(v Value, safe bool) any {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.Interface", Invalid})
+ }
+ if safe && v.flag&flagRO != 0 {
+ // Do not allow access to unexported values via Interface,
+ // because they might be pointers that should not be
+ // writable or methods or function that should not be callable.
+ panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
+ }
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue("Interface", v)
+ }
+
+ if v.kind() == Interface {
+ // Special case: return the element inside the interface.
+ // Empty interface has one layout, all interfaces with
+ // methods have a second layout.
+ if v.NumMethod() == 0 {
+ return *(*any)(v.ptr)
+ }
+ return *(*interface {
+ M()
+ })(v.ptr)
+ }
+
+ // TODO: pass safe to packEface so we don't need to copy if safe==true?
+ return packEface(v)
+}
+
+// InterfaceData returns a pair of unspecified uintptr values.
+// It panics if v's Kind is not Interface.
+//
+// In earlier versions of Go, this function returned the interface's
+// value as a uintptr pair. As of Go 1.4, the implementation of
+// interface values precludes any defined use of InterfaceData.
+//
+// Deprecated: The memory representation of interface values is not
+// compatible with InterfaceData.
+func (v Value) InterfaceData() [2]uintptr {
+ v.mustBe(Interface)
+ // We treat this as a read operation, so we allow
+ // it even for unexported data, because the caller
+ // has to import "unsafe" to turn it into something
+ // that can be abused.
+ // Interface value is always bigger than a word; assume flagIndir.
+ return *(*[2]uintptr)(v.ptr)
+}
+
+// IsNil reports whether its argument v is nil. The argument must be
+// a chan, func, interface, map, pointer, or slice value; if it is
+// not, IsNil panics. Note that IsNil is not always equivalent to a
+// regular comparison with nil in Go. For example, if v was created
+// by calling ValueOf with an uninitialized interface variable i,
+// i==nil will be true but v.IsNil will panic as v will be the zero
+// Value.
+func (v Value) IsNil() bool {
+ k := v.kind()
+ switch k {
+ case Chan, Func, Map, Pointer, UnsafePointer:
+ if v.flag&flagMethod != 0 {
+ return false
+ }
+ ptr := v.ptr
+ if v.flag&flagIndir != 0 {
+ ptr = *(*unsafe.Pointer)(ptr)
+ }
+ return ptr == nil
+ case Interface, Slice:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return *(*unsafe.Pointer)(v.ptr) == nil
+ }
+ panic(&ValueError{"reflect.Value.IsNil", v.kind()})
+}
+
+// IsValid reports whether v represents a value.
+// It returns false if v is the zero Value.
+// If IsValid returns false, all other methods except String panic.
+// Most functions and methods never return an invalid Value.
+// If one does, its documentation states the conditions explicitly.
+func (v Value) IsValid() bool {
+ return v.flag != 0
+}
+
+// IsZero reports whether v is the zero value for its type.
+// It panics if the argument is invalid.
+func (v Value) IsZero() bool {
+ switch v.kind() {
+ case Bool:
+ return !v.Bool()
+ case Int, Int8, Int16, Int32, Int64:
+ return v.Int() == 0
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return v.Uint() == 0
+ case Float32, Float64:
+ return math.Float64bits(v.Float()) == 0
+ case Complex64, Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case Array:
+ for i := 0; i < v.Len(); i++ {
+ if !v.Index(i).IsZero() {
+ return false
+ }
+ }
+ return true
+ case Chan, Func, Interface, Map, Pointer, Slice, UnsafePointer:
+ return v.IsNil()
+ case String:
+ return v.Len() == 0
+ case Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !v.Field(i).IsZero() {
+ return false
+ }
+ }
+ return true
+ default:
+ // This should never happens, but will act as a safeguard for
+ // later, as a default value doesn't makes sense here.
+ panic(&ValueError{"reflect.Value.IsZero", v.Kind()})
+ }
+}
+
+// Kind returns v's Kind.
+// If v is the zero Value (IsValid returns false), Kind returns Invalid.
+func (v Value) Kind() Kind {
+ return v.kind()
+}
+
+// Len returns v's length.
+// It panics if v's Kind is not Array, Chan, Map, Slice, String, or pointer to Array.
+func (v Value) Len() int {
+ // lenNonSlice is split out to keep Len inlineable for slice kinds.
+ if v.kind() == Slice {
+ return (*unsafeheader.Slice)(v.ptr).Len
+ }
+ return v.lenNonSlice()
+}
+
+func (v Value) lenNonSlice() int {
+ switch k := v.kind(); k {
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ return int(tt.len)
+ case Chan:
+ return chanlen(v.pointer())
+ case Map:
+ return maplen(v.pointer())
+ case String:
+ // String is bigger than a word; assume flagIndir.
+ return (*unsafeheader.String)(v.ptr).Len
+ case Ptr:
+ if v.typ.Elem().Kind() == Array {
+ return v.typ.Elem().Len()
+ }
+ panic("reflect: call of reflect.Value.Len on ptr to non-array Value")
+ }
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
+}
+
+var stringType = TypeOf("").(*rtype)
+
+// MapIndex returns the value associated with key in the map v.
+// It panics if v's Kind is not Map.
+// It returns the zero Value if key is not found in the map or if v represents a nil map.
+// As in Go, the key's value must be assignable to the map's key type.
+func (v Value) MapIndex(key Value) Value {
+ v.mustBe(Map)
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ // Do not require key to be exported, so that DeepEqual
+ // and other programs can use all the keys returned by
+ // MapKeys as arguments to MapIndex. If either the map
+ // or the key is unexported, though, the result will be
+ // considered unexported. This is consistent with the
+ // behavior for structs, which allow read but not write
+ // of unexported fields.
+
+ var e unsafe.Pointer
+ if (tt.key == stringType || key.kind() == String) && tt.key == key.typ && tt.elem.size <= maxValSize {
+ k := *(*string)(key.ptr)
+ e = mapaccess_faststr(v.typ, v.pointer(), k)
+ } else {
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ e = mapaccess(v.typ, v.pointer(), k)
+ }
+ if e == nil {
+ return Value{}
+ }
+ typ := tt.elem
+ fl := (v.flag | key.flag).ro()
+ fl |= flag(typ.Kind())
+ return copyVal(typ, fl, e)
+}
+
+// MapKeys returns a slice containing all the keys present in the map,
+// in unspecified order.
+// It panics if v's Kind is not Map.
+// It returns an empty slice if v represents a nil map.
+func (v Value) MapKeys() []Value {
+ v.mustBe(Map)
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+ keyType := tt.key
+
+ fl := v.flag.ro() | flag(keyType.Kind())
+
+ m := v.pointer()
+ mlen := int(0)
+ if m != nil {
+ mlen = maplen(m)
+ }
+ var it hiter
+ mapiterinit(v.typ, m, &it)
+ a := make([]Value, mlen)
+ var i int
+ for i = 0; i < len(a); i++ {
+ key := mapiterkey(&it)
+ if key == nil {
+ // Someone deleted an entry from the map since we
+ // called maplen above. It's a data race, but nothing
+ // we can do about it.
+ break
+ }
+ a[i] = copyVal(keyType, fl, key)
+ mapiternext(&it)
+ }
+ return a[:i]
+}
+
+// hiter's structure matches runtime.hiter's structure.
+// Having a clone here allows us to embed a map iterator
+// inside type MapIter so that MapIters can be re-used
+// without doing any allocations.
+type hiter struct {
+ key unsafe.Pointer
+ elem unsafe.Pointer
+ t unsafe.Pointer
+ h unsafe.Pointer
+ buckets unsafe.Pointer
+ bptr unsafe.Pointer
+ overflow *[]unsafe.Pointer
+ oldoverflow *[]unsafe.Pointer
+ startBucket uintptr
+ offset uint8
+ wrapped bool
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
+}
+
+func (h *hiter) initialized() bool {
+ return h.t != nil
+}
+
+// A MapIter is an iterator for ranging over a map.
+// See Value.MapRange.
+type MapIter struct {
+ m Value
+ hiter hiter
+}
+
+// Key returns the key of iter's current map entry.
+func (iter *MapIter) Key() Value {
+ if !iter.hiter.initialized() {
+ panic("MapIter.Key called before Next")
+ }
+ iterkey := mapiterkey(&iter.hiter)
+ if iterkey == nil {
+ panic("MapIter.Key called on exhausted iterator")
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ ktype := t.key
+ return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
+}
+
+// SetIterKey assigns to v the key of iter's current map entry.
+// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
+// As in Go, the key must be assignable to v's type.
+func (v Value) SetIterKey(iter *MapIter) {
+ if !iter.hiter.initialized() {
+ panic("reflect: Value.SetIterKey called before Next")
+ }
+ iterkey := mapiterkey(&iter.hiter)
+ if iterkey == nil {
+ panic("reflect: Value.SetIterKey called on exhausted iterator")
+ }
+
+ v.mustBeAssignable()
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ ktype := t.key
+
+ key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
+ key = key.assignTo("reflect.MapIter.SetKey", v.typ, target)
+ typedmemmove(v.typ, v.ptr, key.ptr)
+}
+
+// Value returns the value of iter's current map entry.
+func (iter *MapIter) Value() Value {
+ if !iter.hiter.initialized() {
+ panic("MapIter.Value called before Next")
+ }
+ iterelem := mapiterelem(&iter.hiter)
+ if iterelem == nil {
+ panic("MapIter.Value called on exhausted iterator")
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ vtype := t.elem
+ return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
+}
+
+// SetIterValue assigns to v the value of iter's current map entry.
+// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
+// As in Go, the value must be assignable to v's type.
+func (v Value) SetIterValue(iter *MapIter) {
+ if !iter.hiter.initialized() {
+ panic("reflect: Value.SetIterValue called before Next")
+ }
+ iterelem := mapiterelem(&iter.hiter)
+ if iterelem == nil {
+ panic("reflect: Value.SetIterValue called on exhausted iterator")
+ }
+
+ v.mustBeAssignable()
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
+ vtype := t.elem
+
+ elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
+ elem = elem.assignTo("reflect.MapIter.SetValue", v.typ, target)
+ typedmemmove(v.typ, v.ptr, elem.ptr)
+}
+
+// Next advances the map iterator and reports whether there is another
+// entry. It returns false when iter is exhausted; subsequent
+// calls to Key, Value, or Next will panic.
+func (iter *MapIter) Next() bool {
+ if !iter.m.IsValid() {
+ panic("MapIter.Next called on an iterator that does not have an associated map Value")
+ }
+ if !iter.hiter.initialized() {
+ mapiterinit(iter.m.typ, iter.m.pointer(), &iter.hiter)
+ } else {
+ if mapiterkey(&iter.hiter) == nil {
+ panic("MapIter.Next called on exhausted iterator")
+ }
+ mapiternext(&iter.hiter)
+ }
+ return mapiterkey(&iter.hiter) != nil
+}
+
+// Reset modifies iter to iterate over v.
+// It panics if v's Kind is not Map and v is not the zero Value.
+// Reset(Value{}) causes iter to not to refer to any map,
+// which may allow the previously iterated-over map to be garbage collected.
+func (iter *MapIter) Reset(v Value) {
+ if v.IsValid() {
+ v.mustBe(Map)
+ }
+ iter.m = v
+ iter.hiter = hiter{}
+}
+
+// MapRange returns a range iterator for a map.
+// It panics if v's Kind is not Map.
+//
+// Call Next to advance the iterator, and Key/Value to access each entry.
+// Next returns false when the iterator is exhausted.
+// MapRange follows the same iteration semantics as a range statement.
+//
+// Example:
+//
+// iter := reflect.ValueOf(m).MapRange()
+// for iter.Next() {
+// k := iter.Key()
+// v := iter.Value()
+// ...
+// }
+func (v Value) MapRange() *MapIter {
+ // This is inlinable to take advantage of "function outlining".
+ // The allocation of MapIter can be stack allocated if the caller
+ // does not allow it to escape.
+ // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
+ if v.kind() != Map {
+ v.panicNotMap()
+ }
+ return &MapIter{m: v}
+}
+
+func (f flag) panicNotMap() {
+ f.mustBe(Map)
+}
+
+// copyVal returns a Value containing the map key or value at ptr,
+// allocating a new variable as needed.
+func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value {
+ if ifaceIndir(typ) {
+ // Copy result so future changes to the map
+ // won't change the underlying value.
+ c := unsafe_New(typ)
+ typedmemmove(typ, c, ptr)
+ return Value{typ, c, fl | flagIndir}
+ }
+ return Value{typ, *(*unsafe.Pointer)(ptr), fl}
+}
+
+// Method returns a function value corresponding to v's i'th method.
+// The arguments to a Call on the returned function should not include
+// a receiver; the returned function will always use v as the receiver.
+// Method panics if i is out of range or if v is a nil interface value.
+func (v Value) Method(i int) Value {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.Method", Invalid})
+ }
+ if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
+ panic("reflect: Method index out of range")
+ }
+ if v.typ.Kind() == Interface && v.IsNil() {
+ panic("reflect: Method on nil interface value")
+ }
+ fl := v.flag.ro() | (v.flag & flagIndir)
+ fl |= flag(Func)
+ fl |= flag(i)<<flagMethodShift | flagMethod
+ return Value{v.typ, v.ptr, fl}
+}
+
+// NumMethod returns the number of methods in the value's method set.
+//
+// For a non-interface type, it returns the number of exported methods.
+//
+// For an interface type, it returns the number of exported and unexported methods.
+func (v Value) NumMethod() int {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.NumMethod", Invalid})
+ }
+ if v.flag&flagMethod != 0 {
+ return 0
+ }
+ return v.typ.NumMethod()
+}
+
+// MethodByName returns a function value corresponding to the method
+// of v with the given name.
+// The arguments to a Call on the returned function should not include
+// a receiver; the returned function will always use v as the receiver.
+// It returns the zero Value if no method was found.
+func (v Value) MethodByName(name string) Value {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.MethodByName", Invalid})
+ }
+ if v.flag&flagMethod != 0 {
+ return Value{}
+ }
+ m, ok := v.typ.MethodByName(name)
+ if !ok {
+ return Value{}
+ }
+ return v.Method(m.Index)
+}
+
+// NumField returns the number of fields in the struct v.
+// It panics if v's Kind is not Struct.
+func (v Value) NumField() int {
+ v.mustBe(Struct)
+ tt := (*structType)(unsafe.Pointer(v.typ))
+ return len(tt.fields)
+}
+
+// OverflowComplex reports whether the complex128 x cannot be represented by v's type.
+// It panics if v's Kind is not Complex64 or Complex128.
+func (v Value) OverflowComplex(x complex128) bool {
+ k := v.kind()
+ switch k {
+ case Complex64:
+ return overflowFloat32(real(x)) || overflowFloat32(imag(x))
+ case Complex128:
+ return false
+ }
+ panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
+}
+
+// OverflowFloat reports whether the float64 x cannot be represented by v's type.
+// It panics if v's Kind is not Float32 or Float64.
+func (v Value) OverflowFloat(x float64) bool {
+ k := v.kind()
+ switch k {
+ case Float32:
+ return overflowFloat32(x)
+ case Float64:
+ return false
+ }
+ panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
+}
+
+func overflowFloat32(x float64) bool {
+ if x < 0 {
+ x = -x
+ }
+ return math.MaxFloat32 < x && x <= math.MaxFloat64
+}
+
+// OverflowInt reports whether the int64 x cannot be represented by v's type.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
+func (v Value) OverflowInt(x int64) bool {
+ k := v.kind()
+ switch k {
+ case Int, Int8, Int16, Int32, Int64:
+ bitSize := v.typ.size * 8
+ trunc := (x << (64 - bitSize)) >> (64 - bitSize)
+ return x != trunc
+ }
+ panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
+}
+
+// OverflowUint reports whether the uint64 x cannot be represented by v's type.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+func (v Value) OverflowUint(x uint64) bool {
+ k := v.kind()
+ switch k {
+ case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
+ bitSize := v.typ.size * 8
+ trunc := (x << (64 - bitSize)) >> (64 - bitSize)
+ return x != trunc
+ }
+ panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
+}
+
+//go:nocheckptr
+// This prevents inlining Value.Pointer when -d=checkptr is enabled,
+// which ensures cmd/compile can recognize unsafe.Pointer(v.Pointer())
+// and make an exception.
+
+// Pointer returns v's value as a uintptr.
+// It returns uintptr instead of unsafe.Pointer so that
+// code using reflect cannot obtain unsafe.Pointers
+// without importing the unsafe package explicitly.
+// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
+//
+// If v's Kind is Slice, the returned pointer is to the first
+// element of the slice. If the slice is nil the returned value
+// is 0. If the slice is empty but non-nil the return value is non-zero.
+//
+// It's preferred to use uintptr(Value.UnsafePointer()) to get the equivalent result.
+func (v Value) Pointer() uintptr {
+ k := v.kind()
+ switch k {
+ case Pointer:
+ if v.typ.ptrdata == 0 {
+ val := *(*uintptr)(v.ptr)
+ // Since it is a not-in-heap pointer, all pointers to the heap are
+ // forbidden! See comment in Value.Elem and issue #48399.
+ if !verifyNotInHeapPtr(val) {
+ panic("reflect: reflect.Value.Pointer on an invalid notinheap pointer")
+ }
+ return val
+ }
+ fallthrough
+ case Chan, Map, UnsafePointer:
+ return uintptr(v.pointer())
+ case Func:
+ if v.flag&flagMethod != 0 {
+ // As the doc comment says, the returned pointer is an
+ // underlying code pointer but not necessarily enough to
+ // identify a single function uniquely. All method expressions
+ // created via reflect have the same underlying code pointer,
+ // so their Pointers are equal. The function used here must
+ // match the one used in makeMethodValue.
+ return methodValueCallCodePtr()
+ }
+ p := v.pointer()
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return uintptr(p)
+
+ case Slice:
+ return (*SliceHeader)(v.ptr).Data
+ }
+ panic(&ValueError{"reflect.Value.Pointer", v.kind()})
+}
+
+// Recv receives and returns a value from the channel v.
+// It panics if v's Kind is not Chan.
+// The receive blocks until a value is ready.
+// The boolean value ok is true if the value x corresponds to a send
+// on the channel, false if it is a zero value received because the channel is closed.
+func (v Value) Recv() (x Value, ok bool) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.recv(false)
+}
+
+// internal recv, possibly non-blocking (nb).
+// v is known to be a channel.
+func (v Value) recv(nb bool) (val Value, ok bool) {
+ tt := (*chanType)(unsafe.Pointer(v.typ))
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect: recv on send-only channel")
+ }
+ t := tt.elem
+ val = Value{t, nil, flag(t.Kind())}
+ var p unsafe.Pointer
+ if ifaceIndir(t) {
+ p = unsafe_New(t)
+ val.ptr = p
+ val.flag |= flagIndir
+ } else {
+ p = unsafe.Pointer(&val.ptr)
+ }
+ selected, ok := chanrecv(v.pointer(), nb, p)
+ if !selected {
+ val = Value{}
+ }
+ return
+}
+
+// Send sends x on the channel v.
+// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
+// As in Go, x's value must be assignable to the channel's element type.
+func (v Value) Send(x Value) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ v.send(x, false)
+}
+
+// internal send, possibly non-blocking.
+// v is known to be a channel.
+func (v Value) send(x Value, nb bool) (selected bool) {
+ tt := (*chanType)(unsafe.Pointer(v.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect: send on recv-only channel")
+ }
+ x.mustBeExported()
+ x = x.assignTo("reflect.Value.Send", tt.elem, nil)
+ var p unsafe.Pointer
+ if x.flag&flagIndir != 0 {
+ p = x.ptr
+ } else {
+ p = unsafe.Pointer(&x.ptr)
+ }
+ return chansend(v.pointer(), p, nb)
+}
+
+// Set assigns x to the value v.
+// It panics if CanSet returns false.
+// As in Go, x's value must be assignable to v's type.
+func (v Value) Set(x Value) {
+ v.mustBeAssignable()
+ x.mustBeExported() // do not let unexported x leak
+ var target unsafe.Pointer
+ if v.kind() == Interface {
+ target = v.ptr
+ }
+ x = x.assignTo("reflect.Set", v.typ, target)
+ if x.flag&flagIndir != 0 {
+ if x.ptr == unsafe.Pointer(&zeroVal[0]) {
+ typedmemclr(v.typ, v.ptr)
+ } else {
+ typedmemmove(v.typ, v.ptr, x.ptr)
+ }
+ } else {
+ *(*unsafe.Pointer)(v.ptr) = x.ptr
+ }
+}
+
+// SetBool sets v's underlying value.
+// It panics if v's Kind is not Bool or if CanSet() is false.
+func (v Value) SetBool(x bool) {
+ v.mustBeAssignable()
+ v.mustBe(Bool)
+ *(*bool)(v.ptr) = x
+}
+
+// SetBytes sets v's underlying value.
+// It panics if v's underlying value is not a slice of bytes.
+func (v Value) SetBytes(x []byte) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Uint8 {
+ panic("reflect.Value.SetBytes of non-byte slice")
+ }
+ *(*[]byte)(v.ptr) = x
+}
+
+// setRunes sets v's underlying value.
+// It panics if v's underlying value is not a slice of runes (int32s).
+func (v Value) setRunes(x []rune) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ if v.typ.Elem().Kind() != Int32 {
+ panic("reflect.Value.setRunes of non-rune slice")
+ }
+ *(*[]rune)(v.ptr) = x
+}
+
+// SetComplex sets v's underlying value to x.
+// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
+func (v Value) SetComplex(x complex128) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
+ case Complex64:
+ *(*complex64)(v.ptr) = complex64(x)
+ case Complex128:
+ *(*complex128)(v.ptr) = x
+ }
+}
+
+// SetFloat sets v's underlying value to x.
+// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
+func (v Value) SetFloat(x float64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
+ case Float32:
+ *(*float32)(v.ptr) = float32(x)
+ case Float64:
+ *(*float64)(v.ptr) = x
+ }
+}
+
+// SetInt sets v's underlying value to x.
+// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
+func (v Value) SetInt(x int64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetInt", v.kind()})
+ case Int:
+ *(*int)(v.ptr) = int(x)
+ case Int8:
+ *(*int8)(v.ptr) = int8(x)
+ case Int16:
+ *(*int16)(v.ptr) = int16(x)
+ case Int32:
+ *(*int32)(v.ptr) = int32(x)
+ case Int64:
+ *(*int64)(v.ptr) = x
+ }
+}
+
+// SetLen sets v's length to n.
+// It panics if v's Kind is not Slice or if n is negative or
+// greater than the capacity of the slice.
+func (v Value) SetLen(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ s := (*unsafeheader.Slice)(v.ptr)
+ if uint(n) > uint(s.Cap) {
+ panic("reflect: slice length out of range in SetLen")
+ }
+ s.Len = n
+}
+
+// SetCap sets v's capacity to n.
+// It panics if v's Kind is not Slice or if n is smaller than the length or
+// greater than the capacity of the slice.
+func (v Value) SetCap(n int) {
+ v.mustBeAssignable()
+ v.mustBe(Slice)
+ s := (*unsafeheader.Slice)(v.ptr)
+ if n < s.Len || n > s.Cap {
+ panic("reflect: slice capacity out of range in SetCap")
+ }
+ s.Cap = n
+}
+
+// SetMapIndex sets the element associated with key in the map v to elem.
+// It panics if v's Kind is not Map.
+// If elem is the zero Value, SetMapIndex deletes the key from the map.
+// Otherwise if v holds a nil map, SetMapIndex will panic.
+// As in Go, key's elem must be assignable to the map's key type,
+// and elem's value must be assignable to the map's elem type.
+func (v Value) SetMapIndex(key, elem Value) {
+ v.mustBe(Map)
+ v.mustBeExported()
+ key.mustBeExported()
+ tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ if (tt.key == stringType || key.kind() == String) && tt.key == key.typ && tt.elem.size <= maxValSize {
+ k := *(*string)(key.ptr)
+ if elem.typ == nil {
+ mapdelete_faststr(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign_faststr(v.typ, v.pointer(), k, e)
+ return
+ }
+
+ key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ if elem.typ == nil {
+ mapdelete(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign(v.typ, v.pointer(), k, e)
+}
+
+// SetUint sets v's underlying value to x.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
+func (v Value) SetUint(x uint64) {
+ v.mustBeAssignable()
+ switch k := v.kind(); k {
+ default:
+ panic(&ValueError{"reflect.Value.SetUint", v.kind()})
+ case Uint:
+ *(*uint)(v.ptr) = uint(x)
+ case Uint8:
+ *(*uint8)(v.ptr) = uint8(x)
+ case Uint16:
+ *(*uint16)(v.ptr) = uint16(x)
+ case Uint32:
+ *(*uint32)(v.ptr) = uint32(x)
+ case Uint64:
+ *(*uint64)(v.ptr) = x
+ case Uintptr:
+ *(*uintptr)(v.ptr) = uintptr(x)
+ }
+}
+
+// SetPointer sets the unsafe.Pointer value v to x.
+// It panics if v's Kind is not UnsafePointer.
+func (v Value) SetPointer(x unsafe.Pointer) {
+ v.mustBeAssignable()
+ v.mustBe(UnsafePointer)
+ *(*unsafe.Pointer)(v.ptr) = x
+}
+
+// SetString sets v's underlying value to x.
+// It panics if v's Kind is not String or if CanSet() is false.
+func (v Value) SetString(x string) {
+ v.mustBeAssignable()
+ v.mustBe(String)
+ *(*string)(v.ptr) = x
+}
+
+// Slice returns v[i:j].
+// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
+// or if the indexes are out of bounds.
+func (v Value) Slice(i, j int) Value {
+ var (
+ cap int
+ typ *sliceType
+ base unsafe.Pointer
+ )
+ switch kind := v.kind(); kind {
+ default:
+ panic(&ValueError{"reflect.Value.Slice", v.kind()})
+
+ case Array:
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Slice: slice of unaddressable array")
+ }
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ cap = int(tt.len)
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
+ base = v.ptr
+
+ case Slice:
+ typ = (*sliceType)(unsafe.Pointer(v.typ))
+ s := (*unsafeheader.Slice)(v.ptr)
+ base = s.Data
+ cap = s.Cap
+
+ case String:
+ s := (*unsafeheader.String)(v.ptr)
+ if i < 0 || j < i || j > s.Len {
+ panic("reflect.Value.Slice: string slice index out of bounds")
+ }
+ var t unsafeheader.String
+ if i < s.Len {
+ t = unsafeheader.String{Data: arrayAt(s.Data, i, 1, "i < s.Len"), Len: j - i}
+ }
+ return Value{v.typ, unsafe.Pointer(&t), v.flag}
+ }
+
+ if i < 0 || j < i || j > cap {
+ panic("reflect.Value.Slice: slice index out of bounds")
+ }
+
+ // Declare slice so that gc can see the base pointer in it.
+ var x []unsafe.Pointer
+
+ // Reinterpret as *unsafeheader.Slice to edit.
+ s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
+ s.Len = j - i
+ s.Cap = cap - i
+ if cap-i > 0 {
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < cap")
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
+
+ fl := v.flag.ro() | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
+}
+
+// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
+// It panics if v's Kind is not Array or Slice, or if v is an unaddressable array,
+// or if the indexes are out of bounds.
+func (v Value) Slice3(i, j, k int) Value {
+ var (
+ cap int
+ typ *sliceType
+ base unsafe.Pointer
+ )
+ switch kind := v.kind(); kind {
+ default:
+ panic(&ValueError{"reflect.Value.Slice3", v.kind()})
+
+ case Array:
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.Slice3: slice of unaddressable array")
+ }
+ tt := (*arrayType)(unsafe.Pointer(v.typ))
+ cap = int(tt.len)
+ typ = (*sliceType)(unsafe.Pointer(tt.slice))
+ base = v.ptr
+
+ case Slice:
+ typ = (*sliceType)(unsafe.Pointer(v.typ))
+ s := (*unsafeheader.Slice)(v.ptr)
+ base = s.Data
+ cap = s.Cap
+ }
+
+ if i < 0 || j < i || k < j || k > cap {
+ panic("reflect.Value.Slice3: slice index out of bounds")
+ }
+
+ // Declare slice so that the garbage collector
+ // can see the base pointer in it.
+ var x []unsafe.Pointer
+
+ // Reinterpret as *unsafeheader.Slice to edit.
+ s := (*unsafeheader.Slice)(unsafe.Pointer(&x))
+ s.Len = j - i
+ s.Cap = k - i
+ if k-i > 0 {
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < k <= cap")
+ } else {
+ // do not advance pointer, to avoid pointing beyond end of slice
+ s.Data = base
+ }
+
+ fl := v.flag.ro() | flagIndir | flag(Slice)
+ return Value{typ.common(), unsafe.Pointer(&x), fl}
+}
+
+// String returns the string v's underlying value, as a string.
+// String is a special case because of Go's String method convention.
+// Unlike the other getters, it does not panic if v's Kind is not String.
+// Instead, it returns a string of the form "<T value>" where T is v's type.
+// The fmt package treats Values specially. It does not call their String
+// method implicitly but instead prints the concrete values they hold.
+func (v Value) String() string {
+ // stringNonString is split out to keep String inlineable for string kinds.
+ if v.kind() == String {
+ return *(*string)(v.ptr)
+ }
+ return v.stringNonString()
+}
+
+func (v Value) stringNonString() string {
+ if v.kind() == Invalid {
+ return "<invalid Value>"
+ }
+ // If you call String on a reflect.Value of other type, it's better to
+ // print something than to panic. Useful in debugging.
+ return "<" + v.Type().String() + " Value>"
+}
+
+// TryRecv attempts to receive a value from the channel v but will not block.
+// It panics if v's Kind is not Chan.
+// If the receive delivers a value, x is the transferred value and ok is true.
+// If the receive cannot finish without blocking, x is the zero Value and ok is false.
+// If the channel is closed, x is the zero value for the channel's element type and ok is false.
+func (v Value) TryRecv() (x Value, ok bool) {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.recv(true)
+}
+
+// TrySend attempts to send x on the channel v but will not block.
+// It panics if v's Kind is not Chan.
+// It reports whether the value was sent.
+// As in Go, x's value must be assignable to the channel's element type.
+func (v Value) TrySend(x Value) bool {
+ v.mustBe(Chan)
+ v.mustBeExported()
+ return v.send(x, true)
+}
+
+// Type returns v's type.
+func (v Value) Type() Type {
+ if v.flag != 0 && v.flag&flagMethod == 0 {
+ return v.typ
+ }
+ return v.typeSlow()
+}
+
+func (v Value) typeSlow() Type {
+ if v.flag == 0 {
+ panic(&ValueError{"reflect.Value.Type", Invalid})
+ }
+ if v.flag&flagMethod == 0 {
+ return v.typ
+ }
+
+ // Method value.
+ // v.typ describes the receiver, not the method type.
+ i := int(v.flag) >> flagMethodShift
+ if v.typ.Kind() == Interface {
+ // Method on interface.
+ tt := (*interfaceType)(unsafe.Pointer(v.typ))
+ if uint(i) >= uint(len(tt.methods)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := &tt.methods[i]
+ return v.typ.typeOff(m.typ)
+ }
+ // Method on concrete type.
+ ms := v.typ.exportedMethods()
+ if uint(i) >= uint(len(ms)) {
+ panic("reflect: internal error: invalid method index")
+ }
+ m := ms[i]
+ return v.typ.typeOff(m.mtyp)
+}
+
+// CanUint reports whether Uint can be used without panicking.
+func (v Value) CanUint() bool {
+ switch v.kind() {
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return true
+ default:
+ return false
+ }
+}
+
+// Uint returns v's underlying value, as a uint64.
+// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
+func (v Value) Uint() uint64 {
+ k := v.kind()
+ p := v.ptr
+ switch k {
+ case Uint:
+ return uint64(*(*uint)(p))
+ case Uint8:
+ return uint64(*(*uint8)(p))
+ case Uint16:
+ return uint64(*(*uint16)(p))
+ case Uint32:
+ return uint64(*(*uint32)(p))
+ case Uint64:
+ return *(*uint64)(p)
+ case Uintptr:
+ return uint64(*(*uintptr)(p))
+ }
+ panic(&ValueError{"reflect.Value.Uint", v.kind()})
+}
+
+//go:nocheckptr
+// This prevents inlining Value.UnsafeAddr when -d=checkptr is enabled,
+// which ensures cmd/compile can recognize unsafe.Pointer(v.UnsafeAddr())
+// and make an exception.
+
+// UnsafeAddr returns a pointer to v's data, as a uintptr.
+// It is for advanced clients that also import the "unsafe" package.
+// It panics if v is not addressable.
+//
+// It's preferred to use uintptr(Value.Addr().UnsafePointer()) to get the equivalent result.
+func (v Value) UnsafeAddr() uintptr {
+ if v.typ == nil {
+ panic(&ValueError{"reflect.Value.UnsafeAddr", Invalid})
+ }
+ if v.flag&flagAddr == 0 {
+ panic("reflect.Value.UnsafeAddr of unaddressable value")
+ }
+ return uintptr(v.ptr)
+}
+
+// UnsafePointer returns v's value as a unsafe.Pointer.
+// It panics if v's Kind is not Chan, Func, Map, Pointer, Slice, or UnsafePointer.
+//
+// If v's Kind is Func, the returned pointer is an underlying
+// code pointer, but not necessarily enough to identify a
+// single function uniquely. The only guarantee is that the
+// result is zero if and only if v is a nil func Value.
+//
+// If v's Kind is Slice, the returned pointer is to the first
+// element of the slice. If the slice is nil the returned value
+// is nil. If the slice is empty but non-nil the return value is non-nil.
+func (v Value) UnsafePointer() unsafe.Pointer {
+ k := v.kind()
+ switch k {
+ case Pointer:
+ if v.typ.ptrdata == 0 {
+ // Since it is a not-in-heap pointer, all pointers to the heap are
+ // forbidden! See comment in Value.Elem and issue #48399.
+ if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) {
+ panic("reflect: reflect.Value.UnsafePointer on an invalid notinheap pointer")
+ }
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ fallthrough
+ case Chan, Map, UnsafePointer:
+ return v.pointer()
+ case Func:
+ if v.flag&flagMethod != 0 {
+ // As the doc comment says, the returned pointer is an
+ // underlying code pointer but not necessarily enough to
+ // identify a single function uniquely. All method expressions
+ // created via reflect have the same underlying code pointer,
+ // so their Pointers are equal. The function used here must
+ // match the one used in makeMethodValue.
+ code := methodValueCallCodePtr()
+ return *(*unsafe.Pointer)(unsafe.Pointer(&code))
+ }
+ p := v.pointer()
+ // Non-nil func value points at data block.
+ // First word of data block is actual code.
+ if p != nil {
+ p = *(*unsafe.Pointer)(p)
+ }
+ return p
+
+ case Slice:
+ return (*unsafeheader.Slice)(v.ptr).Data
+ }
+ panic(&ValueError{"reflect.Value.UnsafePointer", v.kind()})
+}
+
+// StringHeader is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+// Moreover, the Data field is not sufficient to guarantee the data
+// it references will not be garbage collected, so programs must keep
+// a separate, correctly typed pointer to the underlying data.
+type StringHeader struct {
+ Data uintptr
+ Len int
+}
+
+// SliceHeader is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may
+// change in a later release.
+// Moreover, the Data field is not sufficient to guarantee the data
+// it references will not be garbage collected, so programs must keep
+// a separate, correctly typed pointer to the underlying data.
+type SliceHeader struct {
+ Data uintptr
+ Len int
+ Cap int
+}
+
+func typesMustMatch(what string, t1, t2 Type) {
+ if t1 != t2 {
+ panic(what + ": " + t1.String() + " != " + t2.String())
+ }
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+ return add(p, uintptr(i)*eltSize, "i < len")
+}
+
+// grow grows the slice s so that it can hold extra more values, allocating
+// more capacity if needed. It also returns the old and new slice lengths.
+func grow(s Value, extra int) (Value, int, int) {
+ i0 := s.Len()
+ i1 := i0 + extra
+ if i1 < i0 {
+ panic("reflect.Append: slice overflow")
+ }
+ m := s.Cap()
+ if i1 <= m {
+ return s.Slice(0, i1), i0, i1
+ }
+ if m == 0 {
+ m = extra
+ } else {
+ const threshold = 256
+ for m < i1 {
+ if i0 < threshold {
+ m += m
+ } else {
+ m += (m + 3*threshold) / 4
+ }
+ }
+ }
+ t := MakeSlice(s.Type(), i1, m)
+ Copy(t, s)
+ return t, i0, i1
+}
+
+// Append appends the values x to a slice s and returns the resulting slice.
+// As in Go, each x's value must be assignable to the slice's element type.
+func Append(s Value, x ...Value) Value {
+ s.mustBe(Slice)
+ s, i0, i1 := grow(s, len(x))
+ for i, j := i0, 0; i < i1; i, j = i+1, j+1 {
+ s.Index(i).Set(x[j])
+ }
+ return s
+}
+
+// AppendSlice appends a slice t to a slice s and returns the resulting slice.
+// The slices s and t must have the same element type.
+func AppendSlice(s, t Value) Value {
+ s.mustBe(Slice)
+ t.mustBe(Slice)
+ typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
+ s, i0, i1 := grow(s, t.Len())
+ Copy(s.Slice(i0, i1), t)
+ return s
+}
+
+// Copy copies the contents of src into dst until either
+// dst has been filled or src has been exhausted.
+// It returns the number of elements copied.
+// Dst and src each must have kind Slice or Array, and
+// dst and src must have the same element type.
+//
+// As a special case, src can have kind String if the element type of dst is kind Uint8.
+func Copy(dst, src Value) int {
+ dk := dst.kind()
+ if dk != Array && dk != Slice {
+ panic(&ValueError{"reflect.Copy", dk})
+ }
+ if dk == Array {
+ dst.mustBeAssignable()
+ }
+ dst.mustBeExported()
+
+ sk := src.kind()
+ var stringCopy bool
+ if sk != Array && sk != Slice {
+ stringCopy = sk == String && dst.typ.Elem().Kind() == Uint8
+ if !stringCopy {
+ panic(&ValueError{"reflect.Copy", sk})
+ }
+ }
+ src.mustBeExported()
+
+ de := dst.typ.Elem()
+ if !stringCopy {
+ se := src.typ.Elem()
+ typesMustMatch("reflect.Copy", de, se)
+ }
+
+ var ds, ss unsafeheader.Slice
+ if dk == Array {
+ ds.Data = dst.ptr
+ ds.Len = dst.Len()
+ ds.Cap = ds.Len
+ } else {
+ ds = *(*unsafeheader.Slice)(dst.ptr)
+ }
+ if sk == Array {
+ ss.Data = src.ptr
+ ss.Len = src.Len()
+ ss.Cap = ss.Len
+ } else if sk == Slice {
+ ss = *(*unsafeheader.Slice)(src.ptr)
+ } else {
+ sh := *(*unsafeheader.String)(src.ptr)
+ ss.Data = sh.Data
+ ss.Len = sh.Len
+ ss.Cap = sh.Len
+ }
+
+ return typedslicecopy(de.common(), ds, ss)
+}
+
+// A runtimeSelect is a single case passed to rselect.
+// This must match ../runtime/select.go:/runtimeSelect
+type runtimeSelect struct {
+ dir SelectDir // SelectSend, SelectRecv or SelectDefault
+ typ *rtype // channel type
+ ch unsafe.Pointer // channel
+ val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
+}
+
+// rselect runs a select. It returns the index of the chosen case.
+// If the case was a receive, val is filled in with the received value.
+// The conventional OK bool indicates whether the receive corresponds
+// to a sent value.
+//
+//go:noescape
+func rselect([]runtimeSelect) (chosen int, recvOK bool)
+
+// A SelectDir describes the communication direction of a select case.
+type SelectDir int
+
+// NOTE: These values must match ../runtime/select.go:/selectDir.
+
+const (
+ _ SelectDir = iota
+ SelectSend // case Chan <- Send
+ SelectRecv // case <-Chan:
+ SelectDefault // default
+)
+
+// A SelectCase describes a single case in a select operation.
+// The kind of case depends on Dir, the communication direction.
+//
+// If Dir is SelectDefault, the case represents a default case.
+// Chan and Send must be zero Values.
+//
+// If Dir is SelectSend, the case represents a send operation.
+// Normally Chan's underlying value must be a channel, and Send's underlying value must be
+// assignable to the channel's element type. As a special case, if Chan is a zero Value,
+// then the case is ignored, and the field Send will also be ignored and may be either zero
+// or non-zero.
+//
+// If Dir is SelectRecv, the case represents a receive operation.
+// Normally Chan's underlying value must be a channel and Send must be a zero Value.
+// If Chan is a zero Value, then the case is ignored, but Send must still be a zero Value.
+// When a receive operation is selected, the received Value is returned by Select.
+type SelectCase struct {
+ Dir SelectDir // direction of case
+ Chan Value // channel to use (for send or receive)
+ Send Value // value to send (for send)
+}
+
+// Select executes a select operation described by the list of cases.
+// Like the Go select statement, it blocks until at least one of the cases
+// can proceed, makes a uniform pseudo-random choice,
+// and then executes that case. It returns the index of the chosen case
+// and, if that case was a receive operation, the value received and a
+// boolean indicating whether the value corresponds to a send on the channel
+// (as opposed to a zero value received because the channel is closed).
+// Select supports a maximum of 65536 cases.
+func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
+ if len(cases) > 65536 {
+ panic("reflect.Select: too many cases (max 65536)")
+ }
+ // NOTE: Do not trust that caller is not modifying cases data underfoot.
+ // The range is safe because the caller cannot modify our copy of the len
+ // and each iteration makes its own copy of the value c.
+ var runcases []runtimeSelect
+ if len(cases) > 4 {
+ // Slice is heap allocated due to runtime dependent capacity.
+ runcases = make([]runtimeSelect, len(cases))
+ } else {
+ // Slice can be stack allocated due to constant capacity.
+ runcases = make([]runtimeSelect, len(cases), 4)
+ }
+
+ haveDefault := false
+ for i, c := range cases {
+ rc := &runcases[i]
+ rc.dir = c.Dir
+ switch c.Dir {
+ default:
+ panic("reflect.Select: invalid Dir")
+
+ case SelectDefault: // default
+ if haveDefault {
+ panic("reflect.Select: multiple default cases")
+ }
+ haveDefault = true
+ if c.Chan.IsValid() {
+ panic("reflect.Select: default case has Chan value")
+ }
+ if c.Send.IsValid() {
+ panic("reflect.Select: default case has Send value")
+ }
+
+ case SelectSend:
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&SendDir == 0 {
+ panic("reflect.Select: SendDir case using recv-only channel")
+ }
+ rc.ch = ch.pointer()
+ rc.typ = &tt.rtype
+ v := c.Send
+ if !v.IsValid() {
+ panic("reflect.Select: SendDir case missing Send value")
+ }
+ v.mustBeExported()
+ v = v.assignTo("reflect.Select", tt.elem, nil)
+ if v.flag&flagIndir != 0 {
+ rc.val = v.ptr
+ } else {
+ rc.val = unsafe.Pointer(&v.ptr)
+ }
+
+ case SelectRecv:
+ if c.Send.IsValid() {
+ panic("reflect.Select: RecvDir case has Send value")
+ }
+ ch := c.Chan
+ if !ch.IsValid() {
+ break
+ }
+ ch.mustBe(Chan)
+ ch.mustBeExported()
+ tt := (*chanType)(unsafe.Pointer(ch.typ))
+ if ChanDir(tt.dir)&RecvDir == 0 {
+ panic("reflect.Select: RecvDir case using send-only channel")
+ }
+ rc.ch = ch.pointer()
+ rc.typ = &tt.rtype
+ rc.val = unsafe_New(tt.elem)
+ }
+ }
+
+ chosen, recvOK = rselect(runcases)
+ if runcases[chosen].dir == SelectRecv {
+ tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
+ t := tt.elem
+ p := runcases[chosen].val
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ recv = Value{t, p, fl | flagIndir}
+ } else {
+ recv = Value{t, *(*unsafe.Pointer)(p), fl}
+ }
+ }
+ return chosen, recv, recvOK
+}
+
+/*
+ * constructors
+ */
+
+// implemented in package runtime
+func unsafe_New(*rtype) unsafe.Pointer
+func unsafe_NewArray(*rtype, int) unsafe.Pointer
+
+// MakeSlice creates a new zero-initialized slice value
+// for the specified slice type, length, and capacity.
+func MakeSlice(typ Type, len, cap int) Value {
+ if typ.Kind() != Slice {
+ panic("reflect.MakeSlice of non-slice type")
+ }
+ if len < 0 {
+ panic("reflect.MakeSlice: negative len")
+ }
+ if cap < 0 {
+ panic("reflect.MakeSlice: negative cap")
+ }
+ if len > cap {
+ panic("reflect.MakeSlice: len > cap")
+ }
+
+ s := unsafeheader.Slice{Data: unsafe_NewArray(typ.Elem().(*rtype), cap), Len: len, Cap: cap}
+ return Value{typ.(*rtype), unsafe.Pointer(&s), flagIndir | flag(Slice)}
+}
+
+// MakeChan creates a new channel with the specified type and buffer size.
+func MakeChan(typ Type, buffer int) Value {
+ if typ.Kind() != Chan {
+ panic("reflect.MakeChan of non-chan type")
+ }
+ if buffer < 0 {
+ panic("reflect.MakeChan: negative buffer size")
+ }
+ if typ.ChanDir() != BothDir {
+ panic("reflect.MakeChan: unidirectional channel type")
+ }
+ t := typ.(*rtype)
+ ch := makechan(t, buffer)
+ return Value{t, ch, flag(Chan)}
+}
+
+// MakeMap creates a new map with the specified type.
+func MakeMap(typ Type) Value {
+ return MakeMapWithSize(typ, 0)
+}
+
+// MakeMapWithSize creates a new map with the specified type
+// and initial space for approximately n elements.
+func MakeMapWithSize(typ Type, n int) Value {
+ if typ.Kind() != Map {
+ panic("reflect.MakeMapWithSize of non-map type")
+ }
+ t := typ.(*rtype)
+ m := makemap(t, n)
+ return Value{t, m, flag(Map)}
+}
+
+// Indirect returns the value that v points to.
+// If v is a nil pointer, Indirect returns a zero Value.
+// If v is not a pointer, Indirect returns v.
+func Indirect(v Value) Value {
+ if v.Kind() != Pointer {
+ return v
+ }
+ return v.Elem()
+}
+
+// ValueOf returns a new Value initialized to the concrete value
+// stored in the interface i. ValueOf(nil) returns the zero Value.
+func ValueOf(i any) Value {
+ if i == nil {
+ return Value{}
+ }
+
+ // TODO: Maybe allow contents of a Value to live on the stack.
+ // For now we make the contents always escape to the heap. It
+ // makes life easier in a few places (see chanrecv/mapassign
+ // comment below).
+ escapes(i)
+
+ return unpackEface(i)
+}
+
+// Zero returns a Value representing the zero value for the specified type.
+// The result is different from the zero value of the Value struct,
+// which represents no value at all.
+// For example, Zero(TypeOf(42)) returns a Value with Kind Int and value 0.
+// The returned value is neither addressable nor settable.
+func Zero(typ Type) Value {
+ if typ == nil {
+ panic("reflect: Zero(nil)")
+ }
+ t := typ.(*rtype)
+ fl := flag(t.Kind())
+ if ifaceIndir(t) {
+ var p unsafe.Pointer
+ if t.size <= maxZero {
+ p = unsafe.Pointer(&zeroVal[0])
+ } else {
+ p = unsafe_New(t)
+ }
+ return Value{t, p, fl | flagIndir}
+ }
+ return Value{t, nil, fl}
+}
+
+// must match declarations in runtime/map.go.
+const maxZero = 1024
+
+//go:linkname zeroVal runtime.zeroVal
+var zeroVal [maxZero]byte
+
+// New returns a Value representing a pointer to a new zero value
+// for the specified type. That is, the returned Value's Type is PointerTo(typ).
+func New(typ Type) Value {
+ if typ == nil {
+ panic("reflect: New(nil)")
+ }
+ t := typ.(*rtype)
+ pt := t.ptrTo()
+ if ifaceIndir(pt) {
+ // This is a pointer to a go:notinheap type.
+ panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)")
+ }
+ ptr := unsafe_New(t)
+ fl := flag(Pointer)
+ return Value{pt, ptr, fl}
+}
+
+// NewAt returns a Value representing a pointer to a value of the
+// specified type, using p as that pointer.
+func NewAt(typ Type, p unsafe.Pointer) Value {
+ fl := flag(Pointer)
+ t := typ.(*rtype)
+ return Value{t.ptrTo(), p, fl}
+}
+
+// assignTo returns a value v that can be assigned directly to dst.
+// It panics if v is not assignable to dst.
+// For a conversion to an interface type, target, if not nil,
+// is a suggested scratch space to use.
+// target must be initialized memory (or nil).
+func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue(context, v)
+ }
+
+ switch {
+ case directlyAssignable(dst, v.typ):
+ // Overwrite type so that they match.
+ // Same memory layout, so no harm done.
+ fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
+ fl |= flag(dst.Kind())
+ return Value{dst, v.ptr, fl}
+
+ case implements(dst, v.typ):
+ if v.Kind() == Interface && v.IsNil() {
+ // A nil ReadWriter passed to nil Reader is OK,
+ // but using ifaceE2I below will panic.
+ // Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
+ return Value{dst, nil, flag(Interface)}
+ }
+ x := valueInterface(v, false)
+ if target == nil {
+ target = unsafe_New(dst)
+ }
+ if dst.NumMethod() == 0 {
+ *(*any)(target) = x
+ } else {
+ ifaceE2I(dst, x, target)
+ }
+ return Value{dst, target, flagIndir | flag(Interface)}
+ }
+
+ // Failed.
+ panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
+}
+
+// Convert returns the value v converted to type t.
+// If the usual Go conversion rules do not allow conversion
+// of the value v to type t, or if converting v to type t panics, Convert panics.
+func (v Value) Convert(t Type) Value {
+ if v.flag&flagMethod != 0 {
+ v = makeMethodValue("Convert", v)
+ }
+ op := convertOp(t.common(), v.typ)
+ if op == nil {
+ panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
+ }
+ return op(v, t)
+}
+
+// CanConvert reports whether the value v can be converted to type t.
+// If v.CanConvert(t) returns true then v.Convert(t) will not panic.
+func (v Value) CanConvert(t Type) bool {
+ vt := v.Type()
+ if !vt.ConvertibleTo(t) {
+ return false
+ }
+ // Currently the only conversion that is OK in terms of type
+ // but that can panic depending on the value is converting
+ // from slice to pointer-to-array.
+ if vt.Kind() == Slice && t.Kind() == Pointer && t.Elem().Kind() == Array {
+ n := t.Elem().Len()
+ if n > v.Len() {
+ return false
+ }
+ }
+ return true
+}
+
+// convertOp returns the function to convert a value of type src
+// to a value of type dst. If the conversion is illegal, convertOp returns nil.
+func convertOp(dst, src *rtype) func(Value, Type) Value {
+ switch src.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtInt
+ case Float32, Float64:
+ return cvtIntFloat
+ case String:
+ return cvtIntString
+ }
+
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtUint
+ case Float32, Float64:
+ return cvtUintFloat
+ case String:
+ return cvtUintString
+ }
+
+ case Float32, Float64:
+ switch dst.Kind() {
+ case Int, Int8, Int16, Int32, Int64:
+ return cvtFloatInt
+ case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
+ return cvtFloatUint
+ case Float32, Float64:
+ return cvtFloat
+ }
+
+ case Complex64, Complex128:
+ switch dst.Kind() {
+ case Complex64, Complex128:
+ return cvtComplex
+ }
+
+ case String:
+ if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
+ switch dst.Elem().Kind() {
+ case Uint8:
+ return cvtStringBytes
+ case Int32:
+ return cvtStringRunes
+ }
+ }
+
+ case Slice:
+ if dst.Kind() == String && src.Elem().PkgPath() == "" {
+ switch src.Elem().Kind() {
+ case Uint8:
+ return cvtBytesString
+ case Int32:
+ return cvtRunesString
+ }
+ }
+ // "x is a slice, T is a pointer-to-array type,
+ // and the slice and array types have identical element types."
+ if dst.Kind() == Pointer && dst.Elem().Kind() == Array && src.Elem() == dst.Elem().Elem() {
+ return cvtSliceArrayPtr
+ }
+
+ case Chan:
+ if dst.Kind() == Chan && specialChannelAssignability(dst, src) {
+ return cvtDirect
+ }
+ }
+
+ // dst and src have same underlying type.
+ if haveIdenticalUnderlyingType(dst, src, false) {
+ return cvtDirect
+ }
+
+ // dst and src are non-defined pointer types with same underlying base type.
+ if dst.Kind() == Pointer && dst.Name() == "" &&
+ src.Kind() == Pointer && src.Name() == "" &&
+ haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common(), false) {
+ return cvtDirect
+ }
+
+ if implements(dst, src) {
+ if src.Kind() == Interface {
+ return cvtI2I
+ }
+ return cvtT2I
+ }
+
+ return nil
+}
+
+// makeInt returns a Value of type t equal to bits (possibly truncated),
+// where t is a signed or unsigned int type.
+func makeInt(f flag, bits uint64, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 1:
+ *(*uint8)(ptr) = uint8(bits)
+ case 2:
+ *(*uint16)(ptr) = uint16(bits)
+ case 4:
+ *(*uint32)(ptr) = uint32(bits)
+ case 8:
+ *(*uint64)(ptr) = bits
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
+// where t is a float32 or float64 type.
+func makeFloat(f flag, v float64, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 4:
+ *(*float32)(ptr) = float32(v)
+ case 8:
+ *(*float64)(ptr) = v
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeFloat returns a Value of type t equal to v, where t is a float32 type.
+func makeFloat32(f flag, v float32, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ *(*float32)(ptr) = v
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
+// where t is a complex64 or complex128 type.
+func makeComplex(f flag, v complex128, t Type) Value {
+ typ := t.common()
+ ptr := unsafe_New(typ)
+ switch typ.size {
+ case 8:
+ *(*complex64)(ptr) = complex64(v)
+ case 16:
+ *(*complex128)(ptr) = v
+ }
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
+}
+
+func makeString(f flag, v string, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetString(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeBytes(f flag, v []byte, t Type) Value {
+ ret := New(t).Elem()
+ ret.SetBytes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+func makeRunes(f flag, v []rune, t Type) Value {
+ ret := New(t).Elem()
+ ret.setRunes(v)
+ ret.flag = ret.flag&^flagAddr | f
+ return ret
+}
+
+// These conversion functions are returned by convertOp
+// for classes of conversions. For example, the first function, cvtInt,
+// takes any value v of signed int type and returns the value converted
+// to type t, where t is any signed or unsigned int type.
+
+// convertOp: intXX -> [u]intXX
+func cvtInt(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(v.Int()), t)
+}
+
+// convertOp: uintXX -> [u]intXX
+func cvtUint(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), v.Uint(), t)
+}
+
+// convertOp: floatXX -> intXX
+func cvtFloatInt(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(int64(v.Float())), t)
+}
+
+// convertOp: floatXX -> uintXX
+func cvtFloatUint(v Value, t Type) Value {
+ return makeInt(v.flag.ro(), uint64(v.Float()), t)
+}
+
+// convertOp: intXX -> floatXX
+func cvtIntFloat(v Value, t Type) Value {
+ return makeFloat(v.flag.ro(), float64(v.Int()), t)
+}
+
+// convertOp: uintXX -> floatXX
+func cvtUintFloat(v Value, t Type) Value {
+ return makeFloat(v.flag.ro(), float64(v.Uint()), t)
+}
+
+// convertOp: floatXX -> floatXX
+func cvtFloat(v Value, t Type) Value {
+ if v.Type().Kind() == Float32 && t.Kind() == Float32 {
+ // Don't do any conversion if both types have underlying type float32.
+ // This avoids converting to float64 and back, which will
+ // convert a signaling NaN to a quiet NaN. See issue 36400.
+ return makeFloat32(v.flag.ro(), *(*float32)(v.ptr), t)
+ }
+ return makeFloat(v.flag.ro(), v.Float(), t)
+}
+
+// convertOp: complexXX -> complexXX
+func cvtComplex(v Value, t Type) Value {
+ return makeComplex(v.flag.ro(), v.Complex(), t)
+}
+
+// convertOp: intXX -> string
+func cvtIntString(v Value, t Type) Value {
+ s := "\uFFFD"
+ if x := v.Int(); int64(rune(x)) == x {
+ s = string(rune(x))
+ }
+ return makeString(v.flag.ro(), s, t)
+}
+
+// convertOp: uintXX -> string
+func cvtUintString(v Value, t Type) Value {
+ s := "\uFFFD"
+ if x := v.Uint(); uint64(rune(x)) == x {
+ s = string(rune(x))
+ }
+ return makeString(v.flag.ro(), s, t)
+}
+
+// convertOp: []byte -> string
+func cvtBytesString(v Value, t Type) Value {
+ return makeString(v.flag.ro(), string(v.Bytes()), t)
+}
+
+// convertOp: string -> []byte
+func cvtStringBytes(v Value, t Type) Value {
+ return makeBytes(v.flag.ro(), []byte(v.String()), t)
+}
+
+// convertOp: []rune -> string
+func cvtRunesString(v Value, t Type) Value {
+ return makeString(v.flag.ro(), string(v.runes()), t)
+}
+
+// convertOp: string -> []rune
+func cvtStringRunes(v Value, t Type) Value {
+ return makeRunes(v.flag.ro(), []rune(v.String()), t)
+}
+
+// convertOp: []T -> *[N]T
+func cvtSliceArrayPtr(v Value, t Type) Value {
+ n := t.Elem().Len()
+ if n > v.Len() {
+ panic("reflect: cannot convert slice with length " + itoa.Itoa(v.Len()) + " to pointer to array with length " + itoa.Itoa(n))
+ }
+ h := (*unsafeheader.Slice)(v.ptr)
+ return Value{t.common(), h.Data, v.flag&^(flagIndir|flagAddr|flagKindMask) | flag(Pointer)}
+}
+
+// convertOp: direct copy
+func cvtDirect(v Value, typ Type) Value {
+ f := v.flag
+ t := typ.common()
+ ptr := v.ptr
+ if f&flagAddr != 0 {
+ // indirect, mutable word - make a copy
+ c := unsafe_New(t)
+ typedmemmove(t, c, ptr)
+ ptr = c
+ f &^= flagAddr
+ }
+ return Value{t, ptr, v.flag.ro() | f} // v.flag.ro()|f == f?
+}
+
+// convertOp: concrete -> interface
+func cvtT2I(v Value, typ Type) Value {
+ target := unsafe_New(typ.common())
+ x := valueInterface(v, false)
+ if typ.NumMethod() == 0 {
+ *(*any)(target) = x
+ } else {
+ ifaceE2I(typ.(*rtype), x, target)
+ }
+ return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)}
+}
+
+// convertOp: interface -> interface
+func cvtI2I(v Value, typ Type) Value {
+ if v.IsNil() {
+ ret := Zero(typ)
+ ret.flag |= v.flag.ro()
+ return ret
+ }
+ return cvtT2I(v.Elem(), typ)
+}
+
+// implemented in ../runtime
+func chancap(ch unsafe.Pointer) int
+func chanclose(ch unsafe.Pointer)
+func chanlen(ch unsafe.Pointer) int
+
+// Note: some of the noescape annotations below are technically a lie,
+// but safe in the context of this package. Functions like chansend
+// and mapassign don't escape the referent, but may escape anything
+// the referent points to (they do shallow copies of the referent).
+// It is safe in this package because the referent may only point
+// to something a Value may point to, and that is always in the heap
+// (due to the escapes() call in ValueOf).
+
+//go:noescape
+func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, received bool)
+
+//go:noescape
+func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
+
+func makechan(typ *rtype, size int) (ch unsafe.Pointer)
+func makemap(t *rtype, cap int) (m unsafe.Pointer)
+
+//go:noescape
+func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+
+//go:noescape
+func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
+
+//go:noescape
+func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
+
+//go:noescape
+func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
+
+//go:noescape
+func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
+
+//go:noescape
+func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
+
+//go:noescape
+func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
+
+//go:noescape
+func mapiterkey(it *hiter) (key unsafe.Pointer)
+
+//go:noescape
+func mapiterelem(it *hiter) (elem unsafe.Pointer)
+
+//go:noescape
+func mapiternext(it *hiter)
+
+//go:noescape
+func maplen(m unsafe.Pointer) int
+
+// call calls fn with "stackArgsSize" bytes of stack arguments laid out
+// at stackArgs and register arguments laid out in regArgs. frameSize is
+// the total amount of stack space that will be reserved by call, so this
+// should include enough space to spill register arguments to the stack in
+// case of preemption.
+//
+// After fn returns, call copies stackArgsSize-stackRetOffset result bytes
+// back into stackArgs+stackRetOffset before returning, for any return
+// values passed on the stack. Register-based return values will be found
+// in the same regArgs structure.
+//
+// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap
+// indicating which registers will contain pointer-valued return values. The
+// purpose of this bitmap is to keep pointers visible to the GC between
+// returning from reflectcall and actually using them.
+//
+// If copying result bytes back from the stack, the caller must pass the
+// argument frame type as stackArgsType, so that call can execute appropriate
+// write barriers during the copy.
+//
+// Arguments passed through to call do not escape. The type is used only in a
+// very limited callee of call, the stackArgs are copied, and regArgs is only
+// used in the call frame.
+//
+//go:noescape
+//go:linkname call runtime.reflectcall
+func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+
+func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
+
+// memmove copies size bytes to dst from src. No write barriers are used.
+//
+//go:noescape
+func memmove(dst, src unsafe.Pointer, size uintptr)
+
+// typedmemmove copies a value of type t to dst from src.
+//
+//go:noescape
+func typedmemmove(t *rtype, dst, src unsafe.Pointer)
+
+// typedmemmovepartial is like typedmemmove but assumes that
+// dst and src point off bytes into the value and only copies size bytes.
+//
+//go:noescape
+func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr)
+
+// typedmemclr zeros the value at ptr of type t.
+//
+//go:noescape
+func typedmemclr(t *rtype, ptr unsafe.Pointer)
+
+// typedmemclrpartial is like typedmemclr but assumes that
+// dst points off bytes into the value and only clears size bytes.
+//
+//go:noescape
+func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
+
+// typedslicecopy copies a slice of elemType values from src to dst,
+// returning the number of elements copied.
+//
+//go:noescape
+func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int
+
+//go:noescape
+func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
+
+func verifyNotInHeapPtr(p uintptr) bool
+
+// Dummy annotation marking that the value x escapes,
+// for use in cases where the reflect code is so clever that
+// the compiler cannot follow.
+func escapes(x any) {
+ if dummy.b {
+ dummy.x = x
+ }
+}
+
+var dummy struct {
+ b bool
+ x any
+}
diff --git a/contrib/go/_std_1.18/src/reflect/visiblefields.go b/contrib/go/_std_1.19/src/reflect/visiblefields.go
index 9375faa110..9375faa110 100644
--- a/contrib/go/_std_1.18/src/reflect/visiblefields.go
+++ b/contrib/go/_std_1.19/src/reflect/visiblefields.go
diff --git a/contrib/go/_std_1.19/src/runtime/alg.go b/contrib/go/_std_1.19/src/runtime/alg.go
new file mode 100644
index 0000000000..2a413eeef3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/alg.go
@@ -0,0 +1,353 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/cpu"
+ "internal/goarch"
+ "unsafe"
+)
+
+const (
+ c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
+ c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
+)
+
+func memhash0(p unsafe.Pointer, h uintptr) uintptr {
+ return h
+}
+
+func memhash8(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 1)
+}
+
+func memhash16(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 2)
+}
+
+func memhash128(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 16)
+}
+
+//go:nosplit
+func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
+ ptr := getclosureptr()
+ size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
+ return memhash(p, h, size)
+}
+
+// runtime variable to check if the processor we're running on
+// actually supports the instructions used by the AES-based
+// hash implementation.
+var useAeshash bool
+
+// in asm_*.s
+func memhash(p unsafe.Pointer, h, s uintptr) uintptr
+func memhash32(p unsafe.Pointer, h uintptr) uintptr
+func memhash64(p unsafe.Pointer, h uintptr) uintptr
+func strhash(p unsafe.Pointer, h uintptr) uintptr
+
+func strhashFallback(a unsafe.Pointer, h uintptr) uintptr {
+ x := (*stringStruct)(a)
+ return memhashFallback(x.str, h, uintptr(x.len))
+}
+
+// NOTE: Because NaN != NaN, a map can contain any
+// number of (mostly useless) entries keyed with NaNs.
+// To avoid long hash chains, we assign a random number
+// as the hash value for a NaN.
+
+func f32hash(p unsafe.Pointer, h uintptr) uintptr {
+ f := *(*float32)(p)
+ switch {
+ case f == 0:
+ return c1 * (c0 ^ h) // +0, -0
+ case f != f:
+ return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
+ default:
+ return memhash(p, h, 4)
+ }
+}
+
+func f64hash(p unsafe.Pointer, h uintptr) uintptr {
+ f := *(*float64)(p)
+ switch {
+ case f == 0:
+ return c1 * (c0 ^ h) // +0, -0
+ case f != f:
+ return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
+ default:
+ return memhash(p, h, 8)
+ }
+}
+
+func c64hash(p unsafe.Pointer, h uintptr) uintptr {
+ x := (*[2]float32)(p)
+ return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
+}
+
+func c128hash(p unsafe.Pointer, h uintptr) uintptr {
+ x := (*[2]float64)(p)
+ return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
+}
+
+func interhash(p unsafe.Pointer, h uintptr) uintptr {
+ a := (*iface)(p)
+ tab := a.tab
+ if tab == nil {
+ return h
+ }
+ t := tab._type
+ if t.equal == nil {
+ // Check hashability here. We could do this check inside
+ // typehash, but we want to report the topmost type in
+ // the error text (e.g. in a struct with a field of slice type
+ // we want to report the struct, not the slice).
+ panic(errorString("hash of unhashable type " + t.string()))
+ }
+ if isDirectIface(t) {
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
+ } else {
+ return c1 * typehash(t, a.data, h^c0)
+ }
+}
+
+func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
+ a := (*eface)(p)
+ t := a._type
+ if t == nil {
+ return h
+ }
+ if t.equal == nil {
+ // See comment in interhash above.
+ panic(errorString("hash of unhashable type " + t.string()))
+ }
+ if isDirectIface(t) {
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
+ } else {
+ return c1 * typehash(t, a.data, h^c0)
+ }
+}
+
+// typehash computes the hash of the object of type t at address p.
+// h is the seed.
+// This function is seldom used. Most maps use for hashing either
+// fixed functions (e.g. f32hash) or compiler-generated functions
+// (e.g. for a type like struct { x, y string }). This implementation
+// is slower but more general and is used for hashing interface types
+// (called from interhash or nilinterhash, above) or for hashing in
+// maps generated by reflect.MapOf (reflect_typehash, below).
+// Note: this function must match the compiler generated
+// functions exactly. See issue 37716.
+func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ if t.tflag&tflagRegularMemory != 0 {
+ // Handle ptr sizes specially, see issue 37086.
+ switch t.size {
+ case 4:
+ return memhash32(p, h)
+ case 8:
+ return memhash64(p, h)
+ default:
+ return memhash(p, h, t.size)
+ }
+ }
+ switch t.kind & kindMask {
+ case kindFloat32:
+ return f32hash(p, h)
+ case kindFloat64:
+ return f64hash(p, h)
+ case kindComplex64:
+ return c64hash(p, h)
+ case kindComplex128:
+ return c128hash(p, h)
+ case kindString:
+ return strhash(p, h)
+ case kindInterface:
+ i := (*interfacetype)(unsafe.Pointer(t))
+ if len(i.mhdr) == 0 {
+ return nilinterhash(p, h)
+ }
+ return interhash(p, h)
+ case kindArray:
+ a := (*arraytype)(unsafe.Pointer(t))
+ for i := uintptr(0); i < a.len; i++ {
+ h = typehash(a.elem, add(p, i*a.elem.size), h)
+ }
+ return h
+ case kindStruct:
+ s := (*structtype)(unsafe.Pointer(t))
+ for _, f := range s.fields {
+ if f.name.isBlank() {
+ continue
+ }
+ h = typehash(f.typ, add(p, f.offset), h)
+ }
+ return h
+ default:
+ // Should never happen, as typehash should only be called
+ // with comparable types.
+ panic(errorString("hash of unhashable type " + t.string()))
+ }
+}
+
+//go:linkname reflect_typehash reflect.typehash
+func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ return typehash(t, p, h)
+}
+
+func memequal0(p, q unsafe.Pointer) bool {
+ return true
+}
+func memequal8(p, q unsafe.Pointer) bool {
+ return *(*int8)(p) == *(*int8)(q)
+}
+func memequal16(p, q unsafe.Pointer) bool {
+ return *(*int16)(p) == *(*int16)(q)
+}
+func memequal32(p, q unsafe.Pointer) bool {
+ return *(*int32)(p) == *(*int32)(q)
+}
+func memequal64(p, q unsafe.Pointer) bool {
+ return *(*int64)(p) == *(*int64)(q)
+}
+func memequal128(p, q unsafe.Pointer) bool {
+ return *(*[2]int64)(p) == *(*[2]int64)(q)
+}
+func f32equal(p, q unsafe.Pointer) bool {
+ return *(*float32)(p) == *(*float32)(q)
+}
+func f64equal(p, q unsafe.Pointer) bool {
+ return *(*float64)(p) == *(*float64)(q)
+}
+func c64equal(p, q unsafe.Pointer) bool {
+ return *(*complex64)(p) == *(*complex64)(q)
+}
+func c128equal(p, q unsafe.Pointer) bool {
+ return *(*complex128)(p) == *(*complex128)(q)
+}
+func strequal(p, q unsafe.Pointer) bool {
+ return *(*string)(p) == *(*string)(q)
+}
+func interequal(p, q unsafe.Pointer) bool {
+ x := *(*iface)(p)
+ y := *(*iface)(q)
+ return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
+}
+func nilinterequal(p, q unsafe.Pointer) bool {
+ x := *(*eface)(p)
+ y := *(*eface)(q)
+ return x._type == y._type && efaceeq(x._type, x.data, y.data)
+}
+func efaceeq(t *_type, x, y unsafe.Pointer) bool {
+ if t == nil {
+ return true
+ }
+ eq := t.equal
+ if eq == nil {
+ panic(errorString("comparing uncomparable type " + t.string()))
+ }
+ if isDirectIface(t) {
+ // Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
+ // Maps and funcs are not comparable, so they can't reach here.
+ // Ptrs, chans, and single-element items can be compared directly using ==.
+ return x == y
+ }
+ return eq(x, y)
+}
+func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
+ if tab == nil {
+ return true
+ }
+ t := tab._type
+ eq := t.equal
+ if eq == nil {
+ panic(errorString("comparing uncomparable type " + t.string()))
+ }
+ if isDirectIface(t) {
+ // See comment in efaceeq.
+ return x == y
+ }
+ return eq(x, y)
+}
+
+// Testing adapters for hash quality tests (see hash_test.go)
+func stringHash(s string, seed uintptr) uintptr {
+ return strhash(noescape(unsafe.Pointer(&s)), seed)
+}
+
+func bytesHash(b []byte, seed uintptr) uintptr {
+ s := (*slice)(unsafe.Pointer(&b))
+ return memhash(s.array, seed, uintptr(s.len))
+}
+
+func int32Hash(i uint32, seed uintptr) uintptr {
+ return memhash32(noescape(unsafe.Pointer(&i)), seed)
+}
+
+func int64Hash(i uint64, seed uintptr) uintptr {
+ return memhash64(noescape(unsafe.Pointer(&i)), seed)
+}
+
+func efaceHash(i any, seed uintptr) uintptr {
+ return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
+}
+
+func ifaceHash(i interface {
+ F()
+}, seed uintptr) uintptr {
+ return interhash(noescape(unsafe.Pointer(&i)), seed)
+}
+
+const hashRandomBytes = goarch.PtrSize / 4 * 64
+
+// used in asm_{386,amd64,arm64}.s to seed the hash function
+var aeskeysched [hashRandomBytes]byte
+
+// used in hash{32,64}.go to seed the hash function
+var hashkey [4]uintptr
+
+func alginit() {
+ // Install AES hash algorithms if the instructions needed are present.
+ if (GOARCH == "386" || GOARCH == "amd64") &&
+ cpu.X86.HasAES && // AESENC
+ cpu.X86.HasSSSE3 && // PSHUFB
+ cpu.X86.HasSSE41 { // PINSR{D,Q}
+ initAlgAES()
+ return
+ }
+ if GOARCH == "arm64" && cpu.ARM64.HasAES {
+ initAlgAES()
+ return
+ }
+ getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
+ hashkey[0] |= 1 // make sure these numbers are odd
+ hashkey[1] |= 1
+ hashkey[2] |= 1
+ hashkey[3] |= 1
+}
+
+func initAlgAES() {
+ useAeshash = true
+ // Initialize with random data so hash collisions will be hard to engineer.
+ getRandomData(aeskeysched[:])
+}
+
+// Note: These routines perform the read with a native endianness.
+func readUnaligned32(p unsafe.Pointer) uint32 {
+ q := (*[4]byte)(p)
+ if goarch.BigEndian {
+ return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
+ }
+ return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
+}
+
+func readUnaligned64(p unsafe.Pointer) uint64 {
+ q := (*[8]byte)(p)
+ if goarch.BigEndian {
+ return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
+ uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
+ }
+ return uint64(q[0]) | uint64(q[1])<<8 | uint64(q[2])<<16 | uint64(q[3])<<24 | uint64(q[4])<<32 | uint64(q[5])<<40 | uint64(q[6])<<48 | uint64(q[7])<<56
+}
diff --git a/contrib/go/_std_1.19/src/runtime/asan0.go b/contrib/go/_std_1.19/src/runtime/asan0.go
new file mode 100644
index 0000000000..0948786200
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/asan0.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !asan
+
+// Dummy ASan support API, used when not built with -asan.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const asanenabled = false
+
+// Because asanenabled is false, none of these functions should be called.
+
+func asanread(addr unsafe.Pointer, sz uintptr) { throw("asan") }
+func asanwrite(addr unsafe.Pointer, sz uintptr) { throw("asan") }
+func asanunpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") }
+func asanpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") }
+func asanregisterglobals(addr unsafe.Pointer, sz uintptr) { throw("asan") }
diff --git a/contrib/go/_std_1.18/src/runtime/asm.s b/contrib/go/_std_1.19/src/runtime/asm.s
index 84d56de7dd..84d56de7dd 100644
--- a/contrib/go/_std_1.18/src/runtime/asm.s
+++ b/contrib/go/_std_1.19/src/runtime/asm.s
diff --git a/contrib/go/_std_1.19/src/runtime/asm_amd64.h b/contrib/go/_std_1.19/src/runtime/asm_amd64.h
new file mode 100644
index 0000000000..49e0ee2323
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/asm_amd64.h
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Define features that are guaranteed to be supported by setting the AMD64 variable.
+// If a feature is supported, there's no need to check it at runtime every time.
+
+#ifdef GOAMD64_v3
+#define hasAVX2
+#endif
+
+#ifdef GOAMD64_v4
+#define hasAVX2
+#endif
diff --git a/contrib/go/_std_1.19/src/runtime/asm_amd64.s b/contrib/go/_std_1.19/src/runtime/asm_amd64.s
new file mode 100644
index 0000000000..d2f7984178
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/asm_amd64.s
@@ -0,0 +1,2059 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+#include "cgo/abi_amd64.h"
+
+// _rt0_amd64 is common startup code for most amd64 systems when using
+// internal linking. This is the entry point for the program from the
+// kernel for an ordinary -buildmode=exe program. The stack holds the
+// number of arguments and the C-style argv.
+TEXT _rt0_amd64(SB),NOSPLIT,$-8
+ MOVQ 0(SP), DI // argc
+ LEAQ 8(SP), SI // argv
+ JMP runtime·rt0_go(SB)
+
+// main is common startup code for most amd64 systems when using
+// external linking. The C startup code will call the symbol "main"
+// passing argc and argv in the usual C ABI registers DI and SI.
+TEXT main(SB),NOSPLIT,$-8
+ JMP runtime·rt0_go(SB)
+
+// _rt0_amd64_lib is common startup code for most amd64 systems when
+// using -buildmode=c-archive or -buildmode=c-shared. The linker will
+// arrange to invoke this function as a global constructor (for
+// c-archive) or when the shared library is loaded (for c-shared).
+// We expect argc and argv to be passed in the usual C ABI registers
+// DI and SI.
+TEXT _rt0_amd64_lib(SB),NOSPLIT,$0
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ MOVQ DI, _rt0_amd64_lib_argc<>(SB)
+ MOVQ SI, _rt0_amd64_lib_argv<>(SB)
+
+ // Synchronous initialization.
+ CALL runtime·libpreinit(SB)
+
+ // Create a new thread to finish Go runtime initialization.
+ MOVQ _cgo_sys_thread_create(SB), AX
+ TESTQ AX, AX
+ JZ nocgo
+
+ // We're calling back to C.
+ // Align stack per ELF ABI requirements.
+ MOVQ SP, BX // Callee-save in C ABI
+ ANDQ $~15, SP
+ MOVQ $_rt0_amd64_lib_go(SB), DI
+ MOVQ $0, SI
+ CALL AX
+ MOVQ BX, SP
+ JMP restore
+
+nocgo:
+ ADJSP $16
+ MOVQ $0x800000, 0(SP) // stacksize
+ MOVQ $_rt0_amd64_lib_go(SB), AX
+ MOVQ AX, 8(SP) // fn
+ CALL runtime·newosproc0(SB)
+ ADJSP $-16
+
+restore:
+ POP_REGS_HOST_TO_ABI0()
+ RET
+
+// _rt0_amd64_lib_go initializes the Go runtime.
+// This is started in a separate thread by _rt0_amd64_lib.
+TEXT _rt0_amd64_lib_go(SB),NOSPLIT,$0
+ MOVQ _rt0_amd64_lib_argc<>(SB), DI
+ MOVQ _rt0_amd64_lib_argv<>(SB), SI
+ JMP runtime·rt0_go(SB)
+
+DATA _rt0_amd64_lib_argc<>(SB)/8, $0
+GLOBL _rt0_amd64_lib_argc<>(SB),NOPTR, $8
+DATA _rt0_amd64_lib_argv<>(SB)/8, $0
+GLOBL _rt0_amd64_lib_argv<>(SB),NOPTR, $8
+
+#ifdef GOAMD64_v2
+DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v2 microarchitecture support.\n"
+#endif
+
+#ifdef GOAMD64_v3
+DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v3 microarchitecture support.\n"
+#endif
+
+#ifdef GOAMD64_v4
+DATA bad_cpu_msg<>+0x00(SB)/84, $"This program can only be run on AMD64 processors with v4 microarchitecture support.\n"
+#endif
+
+GLOBL bad_cpu_msg<>(SB), RODATA, $84
+
+// Define a list of AMD64 microarchitecture level features
+// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+
+ // SSE3 SSSE3 CMPXCHNG16 SSE4.1 SSE4.2 POPCNT
+#define V2_FEATURES_CX (1 << 0 | 1 << 9 | 1 << 13 | 1 << 19 | 1 << 20 | 1 << 23)
+ // LAHF/SAHF
+#define V2_EXT_FEATURES_CX (1 << 0)
+ // FMA MOVBE OSXSAVE AVX F16C
+#define V3_FEATURES_CX (V2_FEATURES_CX | 1 << 12 | 1 << 22 | 1 << 27 | 1 << 28 | 1 << 29)
+ // ABM (FOR LZNCT)
+#define V3_EXT_FEATURES_CX (V2_EXT_FEATURES_CX | 1 << 5)
+ // BMI1 AVX2 BMI2
+#define V3_EXT_FEATURES_BX (1 << 3 | 1 << 5 | 1 << 8)
+ // XMM YMM
+#define V3_OS_SUPPORT_AX (1 << 1 | 1 << 2)
+
+#define V4_FEATURES_CX V3_FEATURES_CX
+
+#define V4_EXT_FEATURES_CX V3_EXT_FEATURES_CX
+ // AVX512F AVX512DQ AVX512CD AVX512BW AVX512VL
+#define V4_EXT_FEATURES_BX (V3_EXT_FEATURES_BX | 1 << 16 | 1 << 17 | 1 << 28 | 1 << 30 | 1 << 31)
+ // OPMASK ZMM
+#define V4_OS_SUPPORT_AX (V3_OS_SUPPORT_AX | 1 << 5 | (1 << 6 | 1 << 7))
+
+#ifdef GOAMD64_v2
+#define NEED_MAX_CPUID 0x80000001
+#define NEED_FEATURES_CX V2_FEATURES_CX
+#define NEED_EXT_FEATURES_CX V2_EXT_FEATURES_CX
+#endif
+
+#ifdef GOAMD64_v3
+#define NEED_MAX_CPUID 0x80000001
+#define NEED_FEATURES_CX V3_FEATURES_CX
+#define NEED_EXT_FEATURES_CX V3_EXT_FEATURES_CX
+#define NEED_EXT_FEATURES_BX V3_EXT_FEATURES_BX
+#define NEED_OS_SUPPORT_AX V3_OS_SUPPORT_AX
+#endif
+
+#ifdef GOAMD64_v4
+#define NEED_MAX_CPUID 0x80000001
+#define NEED_FEATURES_CX V4_FEATURES_CX
+#define NEED_EXT_FEATURES_CX V4_EXT_FEATURES_CX
+#define NEED_EXT_FEATURES_BX V4_EXT_FEATURES_BX
+
+// Darwin requires a different approach to check AVX512 support, see CL 285572.
+#ifdef GOOS_darwin
+#define NEED_OS_SUPPORT_AX V3_OS_SUPPORT_AX
+// These values are from:
+// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
+#define commpage64_base_address 0x00007fffffe00000
+#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
+#define commpage64_version (commpage64_base_address+0x01E)
+#define hasAVX512F 0x0000004000000000
+#define hasAVX512CD 0x0000008000000000
+#define hasAVX512DQ 0x0000010000000000
+#define hasAVX512BW 0x0000020000000000
+#define hasAVX512VL 0x0000100000000000
+#define NEED_DARWIN_SUPPORT (hasAVX512F | hasAVX512DQ | hasAVX512CD | hasAVX512BW | hasAVX512VL)
+#else
+#define NEED_OS_SUPPORT_AX V4_OS_SUPPORT_AX
+#endif
+
+#endif
+
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
+ // copy arguments forward on an even stack
+ MOVQ DI, AX // argc
+ MOVQ SI, BX // argv
+ SUBQ $(5*8), SP // 3args 2auto
+ ANDQ $~15, SP
+ MOVQ AX, 24(SP)
+ MOVQ BX, 32(SP)
+
+ // create istack out of the given (operating system) stack.
+ // _cgo_init may update stackguard.
+ MOVQ $runtime·g0(SB), DI
+ LEAQ (-64*1024+104)(SP), BX
+ MOVQ BX, g_stackguard0(DI)
+ MOVQ BX, g_stackguard1(DI)
+ MOVQ BX, (g_stack+stack_lo)(DI)
+ MOVQ SP, (g_stack+stack_hi)(DI)
+
+ // find out information about the processor we're on
+ MOVL $0, AX
+ CPUID
+ CMPL AX, $0
+ JE nocpuinfo
+
+ CMPL BX, $0x756E6547 // "Genu"
+ JNE notintel
+ CMPL DX, $0x49656E69 // "ineI"
+ JNE notintel
+ CMPL CX, $0x6C65746E // "ntel"
+ JNE notintel
+ MOVB $1, runtime·isIntel(SB)
+
+notintel:
+ // Load EAX=1 cpuid flags
+ MOVL $1, AX
+ CPUID
+ MOVL AX, runtime·processorVersionInfo(SB)
+
+nocpuinfo:
+ // if there is an _cgo_init, call it.
+ MOVQ _cgo_init(SB), AX
+ TESTQ AX, AX
+ JZ needtls
+ // arg 1: g0, already in DI
+ MOVQ $setg_gcc<>(SB), SI // arg 2: setg_gcc
+#ifdef GOOS_android
+ MOVQ $runtime·tls_g(SB), DX // arg 3: &tls_g
+ // arg 4: TLS base, stored in slot 0 (Android's TLS_SLOT_SELF).
+ // Compensate for tls_g (+16).
+ MOVQ -16(TLS), CX
+#else
+ MOVQ $0, DX // arg 3, 4: not used when using platform's TLS
+ MOVQ $0, CX
+#endif
+#ifdef GOOS_windows
+ // Adjust for the Win64 calling convention.
+ MOVQ CX, R9 // arg 4
+ MOVQ DX, R8 // arg 3
+ MOVQ SI, DX // arg 2
+ MOVQ DI, CX // arg 1
+#endif
+ CALL AX
+
+ // update stackguard after _cgo_init
+ MOVQ $runtime·g0(SB), CX
+ MOVQ (g_stack+stack_lo)(CX), AX
+ ADDQ $const__StackGuard, AX
+ MOVQ AX, g_stackguard0(CX)
+ MOVQ AX, g_stackguard1(CX)
+
+#ifndef GOOS_windows
+ JMP ok
+#endif
+needtls:
+#ifdef GOOS_plan9
+ // skip TLS setup on Plan 9
+ JMP ok
+#endif
+#ifdef GOOS_solaris
+ // skip TLS setup on Solaris
+ JMP ok
+#endif
+#ifdef GOOS_illumos
+ // skip TLS setup on illumos
+ JMP ok
+#endif
+#ifdef GOOS_darwin
+ // skip TLS setup on Darwin
+ JMP ok
+#endif
+#ifdef GOOS_openbsd
+ // skip TLS setup on OpenBSD
+ JMP ok
+#endif
+
+ LEAQ runtime·m0+m_tls(SB), DI
+ CALL runtime·settls(SB)
+
+ // store through it, to make sure it works
+ get_tls(BX)
+ MOVQ $0x123, g(BX)
+ MOVQ runtime·m0+m_tls(SB), AX
+ CMPQ AX, $0x123
+ JEQ 2(PC)
+ CALL runtime·abort(SB)
+ok:
+ // set the per-goroutine and per-mach "registers"
+ get_tls(BX)
+ LEAQ runtime·g0(SB), CX
+ MOVQ CX, g(BX)
+ LEAQ runtime·m0(SB), AX
+
+ // save m->g0 = g0
+ MOVQ CX, m_g0(AX)
+ // save m0 to g0->m
+ MOVQ AX, g_m(CX)
+
+ CLD // convention is D is always left cleared
+
+ // Check GOAMD64 reqirements
+ // We need to do this after setting up TLS, so that
+ // we can report an error if there is a failure. See issue 49586.
+#ifdef NEED_FEATURES_CX
+ MOVL $0, AX
+ CPUID
+ CMPL AX, $0
+ JE bad_cpu
+ MOVL $1, AX
+ CPUID
+ ANDL $NEED_FEATURES_CX, CX
+ CMPL CX, $NEED_FEATURES_CX
+ JNE bad_cpu
+#endif
+
+#ifdef NEED_MAX_CPUID
+ MOVL $0x80000000, AX
+ CPUID
+ CMPL AX, $NEED_MAX_CPUID
+ JL bad_cpu
+#endif
+
+#ifdef NEED_EXT_FEATURES_BX
+ MOVL $7, AX
+ MOVL $0, CX
+ CPUID
+ ANDL $NEED_EXT_FEATURES_BX, BX
+ CMPL BX, $NEED_EXT_FEATURES_BX
+ JNE bad_cpu
+#endif
+
+#ifdef NEED_EXT_FEATURES_CX
+ MOVL $0x80000001, AX
+ CPUID
+ ANDL $NEED_EXT_FEATURES_CX, CX
+ CMPL CX, $NEED_EXT_FEATURES_CX
+ JNE bad_cpu
+#endif
+
+#ifdef NEED_OS_SUPPORT_AX
+ XORL CX, CX
+ XGETBV
+ ANDL $NEED_OS_SUPPORT_AX, AX
+ CMPL AX, $NEED_OS_SUPPORT_AX
+ JNE bad_cpu
+#endif
+
+#ifdef NEED_DARWIN_SUPPORT
+ MOVQ $commpage64_version, BX
+ CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13
+ JL bad_cpu
+ MOVQ $commpage64_cpu_capabilities64, BX
+ MOVQ (BX), BX
+ MOVQ $NEED_DARWIN_SUPPORT, CX
+ ANDQ CX, BX
+ CMPQ BX, CX
+ JNE bad_cpu
+#endif
+
+ CALL runtime·check(SB)
+
+ MOVL 24(SP), AX // copy argc
+ MOVL AX, 0(SP)
+ MOVQ 32(SP), AX // copy argv
+ MOVQ AX, 8(SP)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ MOVQ $runtime·mainPC(SB), AX // entry
+ PUSHQ AX
+ CALL runtime·newproc(SB)
+ POPQ AX
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ CALL runtime·abort(SB) // mstart should never return
+ RET
+
+bad_cpu: // show that the program requires a certain microarchitecture level.
+ MOVQ $2, 0(SP)
+ MOVQ $bad_cpu_msg<>(SB), AX
+ MOVQ AX, 8(SP)
+ MOVQ $84, 16(SP)
+ CALL runtime·write(SB)
+ MOVQ $1, 0(SP)
+ CALL runtime·exit(SB)
+ CALL runtime·abort(SB)
+ RET
+
+ // Prevent dead-code elimination of debugCallV2, which is
+ // intended to be called by debuggers.
+ MOVQ $runtime·debugCallV2<ABIInternal>(SB), AX
+ RET
+
+// mainPC is a function value for runtime.main, to be passed to newproc.
+// The reference to runtime.main is made via ABIInternal, since the
+// actual function (not the ABI0 wrapper) is needed by newproc.
+DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
+GLOBL runtime·mainPC(SB),RODATA,$8
+
+TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
+ BYTE $0xcc
+ RET
+
+TEXT runtime·asminit(SB),NOSPLIT,$0-0
+ // No per-thread init.
+ RET
+
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ CALL runtime·mstart0(SB)
+ RET // not reached
+
+/*
+ * go-routine
+ */
+
+// func gogo(buf *gobuf)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), NOSPLIT, $0-8
+ MOVQ buf+0(FP), BX // gobuf
+ MOVQ gobuf_g(BX), DX
+ MOVQ 0(DX), CX // make sure g != nil
+ JMP gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT, $0
+ get_tls(CX)
+ MOVQ DX, g(CX)
+ MOVQ DX, R14 // set the g register
+ MOVQ gobuf_sp(BX), SP // restore SP
+ MOVQ gobuf_ret(BX), AX
+ MOVQ gobuf_ctxt(BX), DX
+ MOVQ gobuf_bp(BX), BP
+ MOVQ $0, gobuf_sp(BX) // clear to help garbage collector
+ MOVQ $0, gobuf_ret(BX)
+ MOVQ $0, gobuf_ctxt(BX)
+ MOVQ $0, gobuf_bp(BX)
+ MOVQ gobuf_pc(BX), BX
+ JMP BX
+
+// func mcall(fn func(*g))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT, $0-8
+ MOVQ AX, DX // DX = fn
+
+ // save state in g->sched
+ MOVQ 0(SP), BX // caller's PC
+ MOVQ BX, (g_sched+gobuf_pc)(R14)
+ LEAQ fn+0(FP), BX // caller's SP
+ MOVQ BX, (g_sched+gobuf_sp)(R14)
+ MOVQ BP, (g_sched+gobuf_bp)(R14)
+
+ // switch to m->g0 & its stack, call fn
+ MOVQ g_m(R14), BX
+ MOVQ m_g0(BX), SI // SI = g.m.g0
+ CMPQ SI, R14 // if g == m->g0 call badmcall
+ JNE goodm
+ JMP runtime·badmcall(SB)
+goodm:
+ MOVQ R14, AX // AX (and arg 0) = g
+ MOVQ SI, R14 // g = g.m.g0
+ get_tls(CX) // Set G in TLS
+ MOVQ R14, g(CX)
+ MOVQ (g_sched+gobuf_sp)(R14), SP // sp = g0.sched.sp
+ PUSHQ AX // open up space for fn's arg spill slot
+ MOVQ 0(DX), R12
+ CALL R12 // fn(g)
+ POPQ AX
+ JMP runtime·badmcall2(SB)
+ RET
+
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
+// of the G stack. We need to distinguish the routine that
+// lives at the bottom of the G stack from the one that lives
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
+ RET
+
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-8
+ MOVQ fn+0(FP), DI // DI = fn
+ get_tls(CX)
+ MOVQ g(CX), AX // AX = g
+ MOVQ g_m(AX), BX // BX = m
+
+ CMPQ AX, m_gsignal(BX)
+ JEQ noswitch
+
+ MOVQ m_g0(BX), DX // DX = g0
+ CMPQ AX, DX
+ JEQ noswitch
+
+ CMPQ AX, m_curg(BX)
+ JNE bad
+
+ // switch stacks
+ // save our state in g->sched. Pretend to
+ // be systemstack_switch if the G stack is scanned.
+ CALL gosave_systemstack_switch<>(SB)
+
+ // switch to g0
+ MOVQ DX, g(CX)
+ MOVQ DX, R14 // set the g register
+ MOVQ (g_sched+gobuf_sp)(DX), BX
+ MOVQ BX, SP
+
+ // call target function
+ MOVQ DI, DX
+ MOVQ 0(DI), DI
+ CALL DI
+
+ // switch back to g
+ get_tls(CX)
+ MOVQ g(CX), AX
+ MOVQ g_m(AX), BX
+ MOVQ m_curg(BX), AX
+ MOVQ AX, g(CX)
+ MOVQ (g_sched+gobuf_sp)(AX), SP
+ MOVQ $0, (g_sched+gobuf_sp)(AX)
+ RET
+
+noswitch:
+ // already on m stack; tail call the function
+ // Using a tail call here cleans up tracebacks since we won't stop
+ // at an intermediate systemstack.
+ MOVQ DI, DX
+ MOVQ 0(DI), DI
+ JMP DI
+
+bad:
+ // Bad: g is not gsignal, not g0, not curg. What is it?
+ MOVQ $runtime·badsystemstack(SB), AX
+ CALL AX
+ INT $3
+
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+//
+// The traceback routines see morestack on a g0 as being
+// the top of a stack (for example, morestack calling newstack
+// calling the scheduler calling newm calling gc), so we must
+// record an argument size. For that purpose, it has no arguments.
+TEXT runtime·morestack(SB),NOSPLIT,$0-0
+ // Cannot grow scheduler stack (m->g0).
+ get_tls(CX)
+ MOVQ g(CX), BX
+ MOVQ g_m(BX), BX
+ MOVQ m_g0(BX), SI
+ CMPQ g(CX), SI
+ JNE 3(PC)
+ CALL runtime·badmorestackg0(SB)
+ CALL runtime·abort(SB)
+
+ // Cannot grow signal stack (m->gsignal).
+ MOVQ m_gsignal(BX), SI
+ CMPQ g(CX), SI
+ JNE 3(PC)
+ CALL runtime·badmorestackgsignal(SB)
+ CALL runtime·abort(SB)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ NOP SP // tell vet SP changed - stop checking offsets
+ MOVQ 8(SP), AX // f's caller's PC
+ MOVQ AX, (m_morebuf+gobuf_pc)(BX)
+ LEAQ 16(SP), AX // f's caller's SP
+ MOVQ AX, (m_morebuf+gobuf_sp)(BX)
+ get_tls(CX)
+ MOVQ g(CX), SI
+ MOVQ SI, (m_morebuf+gobuf_g)(BX)
+
+ // Set g->sched to context in f.
+ MOVQ 0(SP), AX // f's PC
+ MOVQ AX, (g_sched+gobuf_pc)(SI)
+ LEAQ 8(SP), AX // f's SP
+ MOVQ AX, (g_sched+gobuf_sp)(SI)
+ MOVQ BP, (g_sched+gobuf_bp)(SI)
+ MOVQ DX, (g_sched+gobuf_ctxt)(SI)
+
+ // Call newstack on m->g0's stack.
+ MOVQ m_g0(BX), BX
+ MOVQ BX, g(CX)
+ MOVQ (g_sched+gobuf_sp)(BX), SP
+ CALL runtime·newstack(SB)
+ CALL runtime·abort(SB) // crash if newstack returns
+ RET
+
+// morestack but not preserving ctxt.
+TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
+ MOVL $0, DX
+ JMP runtime·morestack(SB)
+
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ MOVQ AX, 0(R12)
+ MOVQ BX, 8(R12)
+ MOVQ CX, 16(R12)
+ MOVQ DI, 24(R12)
+ MOVQ SI, 32(R12)
+ MOVQ R8, 40(R12)
+ MOVQ R9, 48(R12)
+ MOVQ R10, 56(R12)
+ MOVQ R11, 64(R12)
+ MOVQ X0, 72(R12)
+ MOVQ X1, 80(R12)
+ MOVQ X2, 88(R12)
+ MOVQ X3, 96(R12)
+ MOVQ X4, 104(R12)
+ MOVQ X5, 112(R12)
+ MOVQ X6, 120(R12)
+ MOVQ X7, 128(R12)
+ MOVQ X8, 136(R12)
+ MOVQ X9, 144(R12)
+ MOVQ X10, 152(R12)
+ MOVQ X11, 160(R12)
+ MOVQ X12, 168(R12)
+ MOVQ X13, 176(R12)
+ MOVQ X14, 184(R12)
+ RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ MOVQ 0(R12), AX
+ MOVQ 8(R12), BX
+ MOVQ 16(R12), CX
+ MOVQ 24(R12), DI
+ MOVQ 32(R12), SI
+ MOVQ 40(R12), R8
+ MOVQ 48(R12), R9
+ MOVQ 56(R12), R10
+ MOVQ 64(R12), R11
+ MOVQ 72(R12), X0
+ MOVQ 80(R12), X1
+ MOVQ 88(R12), X2
+ MOVQ 96(R12), X3
+ MOVQ 104(R12), X4
+ MOVQ 112(R12), X5
+ MOVQ 120(R12), X6
+ MOVQ 128(R12), X7
+ MOVQ 136(R12), X8
+ MOVQ 144(R12), X9
+ MOVQ 152(R12), X10
+ MOVQ 160(R12), X11
+ MOVQ 168(R12), X12
+ MOVQ 176(R12), X13
+ MOVQ 184(R12), X14
+ RET
+
+// reflectcall: call a function with the given argument list
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
+// we don't have variable-sized frames, so we use a small number
+// of constant-sized-frame functions to encode a few bits of size in the pc.
+// Caution: ugly multiline assembly macros in your future!
+
+#define DISPATCH(NAME,MAXSIZE) \
+ CMPQ CX, $MAXSIZE; \
+ JA 3(PC); \
+ MOVQ $NAME(SB), AX; \
+ JMP AX
+// Note: can't just "JMP NAME(SB)" - bad inlining results.
+
+TEXT ·reflectcall(SB), NOSPLIT, $0-48
+ MOVLQZX frameSize+32(FP), CX
+ DISPATCH(runtime·call16, 16)
+ DISPATCH(runtime·call32, 32)
+ DISPATCH(runtime·call64, 64)
+ DISPATCH(runtime·call128, 128)
+ DISPATCH(runtime·call256, 256)
+ DISPATCH(runtime·call512, 512)
+ DISPATCH(runtime·call1024, 1024)
+ DISPATCH(runtime·call2048, 2048)
+ DISPATCH(runtime·call4096, 4096)
+ DISPATCH(runtime·call8192, 8192)
+ DISPATCH(runtime·call16384, 16384)
+ DISPATCH(runtime·call32768, 32768)
+ DISPATCH(runtime·call65536, 65536)
+ DISPATCH(runtime·call131072, 131072)
+ DISPATCH(runtime·call262144, 262144)
+ DISPATCH(runtime·call524288, 524288)
+ DISPATCH(runtime·call1048576, 1048576)
+ DISPATCH(runtime·call2097152, 2097152)
+ DISPATCH(runtime·call4194304, 4194304)
+ DISPATCH(runtime·call8388608, 8388608)
+ DISPATCH(runtime·call16777216, 16777216)
+ DISPATCH(runtime·call33554432, 33554432)
+ DISPATCH(runtime·call67108864, 67108864)
+ DISPATCH(runtime·call134217728, 134217728)
+ DISPATCH(runtime·call268435456, 268435456)
+ DISPATCH(runtime·call536870912, 536870912)
+ DISPATCH(runtime·call1073741824, 1073741824)
+ MOVQ $runtime·badreflectcall(SB), AX
+ JMP AX
+
+#define CALLFN(NAME,MAXSIZE) \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
+ NO_LOCAL_POINTERS; \
+ /* copy arguments to stack */ \
+ MOVQ stackArgs+16(FP), SI; \
+ MOVLQZX stackArgsSize+24(FP), CX; \
+ MOVQ SP, DI; \
+ REP;MOVSB; \
+ /* set up argument registers */ \
+ MOVQ regArgs+40(FP), R12; \
+ CALL ·unspillArgs(SB); \
+ /* call function */ \
+ MOVQ f+8(FP), DX; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ MOVQ (DX), R12; \
+ CALL R12; \
+ /* copy register return values back */ \
+ MOVQ regArgs+40(FP), R12; \
+ CALL ·spillArgs(SB); \
+ MOVLQZX stackArgsSize+24(FP), CX; \
+ MOVLQZX stackRetOffset+28(FP), BX; \
+ MOVQ stackArgs+16(FP), DI; \
+ MOVQ stackArgsType+0(FP), DX; \
+ MOVQ SP, SI; \
+ ADDQ BX, DI; \
+ ADDQ BX, SI; \
+ SUBQ BX, CX; \
+ CALL callRet<>(SB); \
+ RET
+
+// callRet copies return values back at the end of call*. This is a
+// separate function so it can allocate stack space for the arguments
+// to reflectcallmove. It does not follow the Go ABI; it expects its
+// arguments in registers.
+TEXT callRet<>(SB), NOSPLIT, $40-0
+ NO_LOCAL_POINTERS
+ MOVQ DX, 0(SP)
+ MOVQ DI, 8(SP)
+ MOVQ SI, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ R12, 32(SP)
+ CALL runtime·reflectcallmove(SB)
+ RET
+
+CALLFN(·call16, 16)
+CALLFN(·call32, 32)
+CALLFN(·call64, 64)
+CALLFN(·call128, 128)
+CALLFN(·call256, 256)
+CALLFN(·call512, 512)
+CALLFN(·call1024, 1024)
+CALLFN(·call2048, 2048)
+CALLFN(·call4096, 4096)
+CALLFN(·call8192, 8192)
+CALLFN(·call16384, 16384)
+CALLFN(·call32768, 32768)
+CALLFN(·call65536, 65536)
+CALLFN(·call131072, 131072)
+CALLFN(·call262144, 262144)
+CALLFN(·call524288, 524288)
+CALLFN(·call1048576, 1048576)
+CALLFN(·call2097152, 2097152)
+CALLFN(·call4194304, 4194304)
+CALLFN(·call8388608, 8388608)
+CALLFN(·call16777216, 16777216)
+CALLFN(·call33554432, 33554432)
+CALLFN(·call67108864, 67108864)
+CALLFN(·call134217728, 134217728)
+CALLFN(·call268435456, 268435456)
+CALLFN(·call536870912, 536870912)
+CALLFN(·call1073741824, 1073741824)
+
+TEXT runtime·procyield(SB),NOSPLIT,$0-0
+ MOVL cycles+0(FP), AX
+again:
+ PAUSE
+ SUBL $1, AX
+ JNZ again
+ RET
+
+
+TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
+ // Stores are already ordered on x86, so this is just a
+ // compile barrier.
+ RET
+
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R9.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
+ MOVQ $runtime·systemstack_switch(SB), R9
+ MOVQ R9, (g_sched+gobuf_pc)(R14)
+ LEAQ 8(SP), R9
+ MOVQ R9, (g_sched+gobuf_sp)(R14)
+ MOVQ $0, (g_sched+gobuf_ret)(R14)
+ MOVQ BP, (g_sched+gobuf_bp)(R14)
+ // Assert ctxt is zero. See func save.
+ MOVQ (g_sched+gobuf_ctxt)(R14), R9
+ TESTQ R9, R9
+ JZ 2(PC)
+ CALL runtime·abort(SB)
+ RET
+
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
+ MOVQ fn+0(FP), AX
+ MOVQ arg+8(FP), BX
+ MOVQ SP, DX
+ SUBQ $32, SP
+ ANDQ $~15, SP // alignment
+ MOVQ DX, 8(SP)
+ MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
+ CALL AX
+ MOVQ 8(SP), DX
+ MOVQ DX, SP
+ RET
+
+// func asmcgocall(fn, arg unsafe.Pointer) int32
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.go for more details.
+TEXT ·asmcgocall(SB),NOSPLIT,$0-20
+ MOVQ fn+0(FP), AX
+ MOVQ arg+8(FP), BX
+
+ MOVQ SP, DX
+
+ // Figure out if we need to switch to m->g0 stack.
+ // We get called to create new OS threads too, and those
+ // come in on the m->g0 stack already. Or we might already
+ // be on the m->gsignal stack.
+ get_tls(CX)
+ MOVQ g(CX), DI
+ CMPQ DI, $0
+ JEQ nosave
+ MOVQ g_m(DI), R8
+ MOVQ m_gsignal(R8), SI
+ CMPQ DI, SI
+ JEQ nosave
+ MOVQ m_g0(R8), SI
+ CMPQ DI, SI
+ JEQ nosave
+
+ // Switch to system stack.
+ CALL gosave_systemstack_switch<>(SB)
+ MOVQ SI, g(CX)
+ MOVQ (g_sched+gobuf_sp)(SI), SP
+
+ // Now on a scheduling stack (a pthread-created stack).
+ // Make sure we have enough room for 4 stack-backed fast-call
+ // registers as per windows amd64 calling convention.
+ SUBQ $64, SP
+ ANDQ $~15, SP // alignment for gcc ABI
+ MOVQ DI, 48(SP) // save g
+ MOVQ (g_stack+stack_hi)(DI), DI
+ SUBQ DX, DI
+ MOVQ DI, 40(SP) // save depth in stack (can't just save SP, as stack might be copied during a callback)
+ MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
+ CALL AX
+
+ // Restore registers, g, stack pointer.
+ get_tls(CX)
+ MOVQ 48(SP), DI
+ MOVQ (g_stack+stack_hi)(DI), SI
+ SUBQ 40(SP), SI
+ MOVQ DI, g(CX)
+ MOVQ SI, SP
+
+ MOVL AX, ret+16(FP)
+ RET
+
+nosave:
+ // Running on a system stack, perhaps even without a g.
+ // Having no g can happen during thread creation or thread teardown
+ // (see needm/dropm on Solaris, for example).
+ // This code is like the above sequence but without saving/restoring g
+ // and without worrying about the stack moving out from under us
+ // (because we're on a system stack, not a goroutine stack).
+ // The above code could be used directly if already on a system stack,
+ // but then the only path through this code would be a rare case on Solaris.
+ // Using this code for all "already on system stack" calls exercises it more,
+ // which should help keep it correct.
+ SUBQ $64, SP
+ ANDQ $~15, SP
+ MOVQ $0, 48(SP) // where above code stores g, in case someone looks during debugging
+ MOVQ DX, 40(SP) // save original stack pointer
+ MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
+ CALL AX
+ MOVQ 40(SP), SI // restore original stack pointer
+ MOVQ SI, SP
+ MOVL AX, ret+16(FP)
+ RET
+
+#ifdef GOOS_windows
+// Dummy TLS that's used on Windows so that we don't crash trying
+// to restore the G register in needm. needm and its callees are
+// very careful never to actually use the G, the TLS just can't be
+// unset since we're in Go code.
+GLOBL zeroTLS<>(SB),RODATA,$const_tlsSize
+#endif
+
+// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
+// See cgocall.go for more details.
+TEXT ·cgocallback(SB),NOSPLIT,$24-24
+ NO_LOCAL_POINTERS
+
+ // If g is nil, Go did not create the current thread.
+ // Call needm to obtain one m for temporary use.
+ // In this case, we're running on the thread stack, so there's
+ // lots of space, but the linker doesn't know. Hide the call from
+ // the linker analysis by using an indirect call through AX.
+ get_tls(CX)
+#ifdef GOOS_windows
+ MOVL $0, BX
+ CMPQ CX, $0
+ JEQ 2(PC)
+#endif
+ MOVQ g(CX), BX
+ CMPQ BX, $0
+ JEQ needm
+ MOVQ g_m(BX), BX
+ MOVQ BX, savedm-8(SP) // saved copy of oldm
+ JMP havem
+needm:
+#ifdef GOOS_windows
+ // Set up a dummy TLS value. needm is careful not to use it,
+ // but it needs to be there to prevent autogenerated code from
+ // crashing when it loads from it.
+ // We don't need to clear it or anything later because needm
+ // will set up TLS properly.
+ MOVQ $zeroTLS<>(SB), DI
+ CALL runtime·settls(SB)
+#endif
+ // On some platforms (Windows) we cannot call needm through
+ // an ABI wrapper because there's no TLS set up, and the ABI
+ // wrapper will try to restore the G register (R14) from TLS.
+ // Clear X15 because Go expects it and we're not calling
+ // through a wrapper, but otherwise avoid setting the G
+ // register in the wrapper and call needm directly. It
+ // takes no arguments and doesn't return any values so
+ // there's no need to handle that. Clear R14 so that there's
+ // a bad value in there, in case needm tries to use it.
+ XORPS X15, X15
+ XORQ R14, R14
+ MOVQ $runtime·needm<ABIInternal>(SB), AX
+ CALL AX
+ MOVQ $0, savedm-8(SP) // dropm on return
+ get_tls(CX)
+ MOVQ g(CX), BX
+ MOVQ g_m(BX), BX
+
+ // Set m->sched.sp = SP, so that if a panic happens
+ // during the function we are about to execute, it will
+ // have a valid SP to run on the g0 stack.
+ // The next few lines (after the havem label)
+ // will save this SP onto the stack and then write
+ // the same SP back to m->sched.sp. That seems redundant,
+ // but if an unrecovered panic happens, unwindm will
+ // restore the g->sched.sp from the stack location
+ // and then systemstack will try to use it. If we don't set it here,
+ // that restored SP will be uninitialized (typically 0) and
+ // will not be usable.
+ MOVQ m_g0(BX), SI
+ MOVQ SP, (g_sched+gobuf_sp)(SI)
+
+havem:
+ // Now there's a valid m, and we're running on its m->g0.
+ // Save current m->g0->sched.sp on stack and then set it to SP.
+ // Save current sp in m->g0->sched.sp in preparation for
+ // switch back to m->curg stack.
+ // NOTE: unwindm knows that the saved g->sched.sp is at 0(SP).
+ MOVQ m_g0(BX), SI
+ MOVQ (g_sched+gobuf_sp)(SI), AX
+ MOVQ AX, 0(SP)
+ MOVQ SP, (g_sched+gobuf_sp)(SI)
+
+ // Switch to m->curg stack and call runtime.cgocallbackg.
+ // Because we are taking over the execution of m->curg
+ // but *not* resuming what had been running, we need to
+ // save that information (m->curg->sched) so we can restore it.
+ // We can restore m->curg->sched.sp easily, because calling
+ // runtime.cgocallbackg leaves SP unchanged upon return.
+ // To save m->curg->sched.pc, we push it onto the curg stack and
+ // open a frame the same size as cgocallback's g0 frame.
+ // Once we switch to the curg stack, the pushed PC will appear
+ // to be the return PC of cgocallback, so that the traceback
+ // will seamlessly trace back into the earlier calls.
+ MOVQ m_curg(BX), SI
+ MOVQ SI, g(CX)
+ MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
+ MOVQ (g_sched+gobuf_pc)(SI), BX
+ MOVQ BX, -8(DI) // "push" return PC on the g stack
+ // Gather our arguments into registers.
+ MOVQ fn+0(FP), BX
+ MOVQ frame+8(FP), CX
+ MOVQ ctxt+16(FP), DX
+ // Compute the size of the frame, including return PC and, if
+ // GOEXPERIMENT=framepointer, the saved base pointer
+ LEAQ fn+0(FP), AX
+ SUBQ SP, AX // AX is our actual frame size
+ SUBQ AX, DI // Allocate the same frame size on the g stack
+ MOVQ DI, SP
+
+ MOVQ BX, 0(SP)
+ MOVQ CX, 8(SP)
+ MOVQ DX, 16(SP)
+ MOVQ $runtime·cgocallbackg(SB), AX
+ CALL AX // indirect call to bypass nosplit check. We're on a different stack now.
+
+ // Compute the size of the frame again. FP and SP have
+ // completely different values here than they did above,
+ // but only their difference matters.
+ LEAQ fn+0(FP), AX
+ SUBQ SP, AX
+
+ // Restore g->sched (== m->curg->sched) from saved values.
+ get_tls(CX)
+ MOVQ g(CX), SI
+ MOVQ SP, DI
+ ADDQ AX, DI
+ MOVQ -8(DI), BX
+ MOVQ BX, (g_sched+gobuf_pc)(SI)
+ MOVQ DI, (g_sched+gobuf_sp)(SI)
+
+ // Switch back to m->g0's stack and restore m->g0->sched.sp.
+ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
+ // so we do not have to restore it.)
+ MOVQ g(CX), BX
+ MOVQ g_m(BX), BX
+ MOVQ m_g0(BX), SI
+ MOVQ SI, g(CX)
+ MOVQ (g_sched+gobuf_sp)(SI), SP
+ MOVQ 0(SP), AX
+ MOVQ AX, (g_sched+gobuf_sp)(SI)
+
+ // If the m on entry was nil, we called needm above to borrow an m
+ // for the duration of the call. Since the call is over, return it with dropm.
+ MOVQ savedm-8(SP), BX
+ CMPQ BX, $0
+ JNE done
+ MOVQ $runtime·dropm(SB), AX
+ CALL AX
+#ifdef GOOS_windows
+ // We need to clear the TLS pointer in case the next
+ // thread that comes into Go tries to reuse that space
+ // but uses the same M.
+ XORQ DI, DI
+ CALL runtime·settls(SB)
+#endif
+done:
+
+ // Done!
+ RET
+
+// func setg(gg *g)
+// set g. for use by needm.
+TEXT runtime·setg(SB), NOSPLIT, $0-8
+ MOVQ gg+0(FP), BX
+ get_tls(CX)
+ MOVQ BX, g(CX)
+ RET
+
+// void setg_gcc(G*); set g called from gcc.
+TEXT setg_gcc<>(SB),NOSPLIT,$0
+ get_tls(AX)
+ MOVQ DI, g(AX)
+ MOVQ DI, R14 // set the g register
+ RET
+
+TEXT runtime·abort(SB),NOSPLIT,$0-0
+ INT $3
+loop:
+ JMP loop
+
+// check that SP is in range [g->stack.lo, g->stack.hi)
+TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
+ get_tls(CX)
+ MOVQ g(CX), AX
+ CMPQ (g_stack+stack_hi)(AX), SP
+ JHI 2(PC)
+ CALL runtime·abort(SB)
+ CMPQ SP, (g_stack+stack_lo)(AX)
+ JHI 2(PC)
+ CALL runtime·abort(SB)
+ RET
+
+// func cputicks() int64
+TEXT runtime·cputicks(SB),NOSPLIT,$0-0
+ CMPB internal∕cpu·X86+const_offsetX86HasRDTSCP(SB), $1
+ JNE fences
+ // Instruction stream serializing RDTSCP is supported.
+ // RDTSCP is supported by Intel Nehalem (2008) and
+ // AMD K8 Rev. F (2006) and newer.
+ RDTSCP
+done:
+ SHLQ $32, DX
+ ADDQ DX, AX
+ MOVQ AX, ret+0(FP)
+ RET
+fences:
+ // MFENCE is instruction stream serializing and flushes the
+ // store buffers on AMD. The serialization semantics of LFENCE on AMD
+ // are dependent on MSR C001_1029 and CPU generation.
+ // LFENCE on Intel does wait for all previous instructions to have executed.
+ // Intel recommends MFENCE;LFENCE in its manuals before RDTSC to have all
+ // previous instructions executed and all previous loads and stores to globally visible.
+ // Using MFENCE;LFENCE here aligns the serializing properties without
+ // runtime detection of CPU manufacturer.
+ MFENCE
+ LFENCE
+ RDTSC
+ JMP done
+
+// func memhash(p unsafe.Pointer, h, s uintptr) uintptr
+// hash function using AES hardware instructions
+TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT,$0-32
+ // AX = ptr to data
+ // BX = seed
+ // CX = size
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
+ JMP aeshashbody<>(SB)
+noaes:
+ JMP runtime·memhashFallback<ABIInternal>(SB)
+
+// func strhash(p unsafe.Pointer, h uintptr) uintptr
+TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT,$0-24
+ // AX = ptr to string struct
+ // BX = seed
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
+ MOVQ 8(AX), CX // length of string
+ MOVQ (AX), AX // string data
+ JMP aeshashbody<>(SB)
+noaes:
+ JMP runtime·strhashFallback<ABIInternal>(SB)
+
+// AX: data
+// BX: hash seed
+// CX: length
+// At return: AX = return value
+TEXT aeshashbody<>(SB),NOSPLIT,$0-0
+ // Fill an SSE register with our seeds.
+ MOVQ BX, X0 // 64 bits of per-table hash seed
+ PINSRW $4, CX, X0 // 16 bits of length
+ PSHUFHW $0, X0, X0 // repeat length 4 times total
+ MOVO X0, X1 // save unscrambled seed
+ PXOR runtime·aeskeysched(SB), X0 // xor in per-process seed
+ AESENC X0, X0 // scramble seed
+
+ CMPQ CX, $16
+ JB aes0to15
+ JE aes16
+ CMPQ CX, $32
+ JBE aes17to32
+ CMPQ CX, $64
+ JBE aes33to64
+ CMPQ CX, $128
+ JBE aes65to128
+ JMP aes129plus
+
+aes0to15:
+ TESTQ CX, CX
+ JE aes0
+
+ ADDQ $16, AX
+ TESTW $0xff0, AX
+ JE endofpage
+
+ // 16 bytes loaded at this address won't cross
+ // a page boundary, so we can load it directly.
+ MOVOU -16(AX), X1
+ ADDQ CX, CX
+ MOVQ $masks<>(SB), AX
+ PAND (AX)(CX*8), X1
+final1:
+ PXOR X0, X1 // xor data with seed
+ AESENC X1, X1 // scramble combo 3 times
+ AESENC X1, X1
+ AESENC X1, X1
+ MOVQ X1, AX // return X1
+ RET
+
+endofpage:
+ // address ends in 1111xxxx. Might be up against
+ // a page boundary, so load ending at last byte.
+ // Then shift bytes down using pshufb.
+ MOVOU -32(AX)(CX*1), X1
+ ADDQ CX, CX
+ MOVQ $shifts<>(SB), AX
+ PSHUFB (AX)(CX*8), X1
+ JMP final1
+
+aes0:
+ // Return scrambled input seed
+ AESENC X0, X0
+ MOVQ X0, AX // return X0
+ RET
+
+aes16:
+ MOVOU (AX), X1
+ JMP final1
+
+aes17to32:
+ // make second starting seed
+ PXOR runtime·aeskeysched+16(SB), X1
+ AESENC X1, X1
+
+ // load data to be hashed
+ MOVOU (AX), X2
+ MOVOU -16(AX)(CX*1), X3
+
+ // xor with seed
+ PXOR X0, X2
+ PXOR X1, X3
+
+ // scramble 3 times
+ AESENC X2, X2
+ AESENC X3, X3
+ AESENC X2, X2
+ AESENC X3, X3
+ AESENC X2, X2
+ AESENC X3, X3
+
+ // combine results
+ PXOR X3, X2
+ MOVQ X2, AX // return X2
+ RET
+
+aes33to64:
+ // make 3 more starting seeds
+ MOVO X1, X2
+ MOVO X1, X3
+ PXOR runtime·aeskeysched+16(SB), X1
+ PXOR runtime·aeskeysched+32(SB), X2
+ PXOR runtime·aeskeysched+48(SB), X3
+ AESENC X1, X1
+ AESENC X2, X2
+ AESENC X3, X3
+
+ MOVOU (AX), X4
+ MOVOU 16(AX), X5
+ MOVOU -32(AX)(CX*1), X6
+ MOVOU -16(AX)(CX*1), X7
+
+ PXOR X0, X4
+ PXOR X1, X5
+ PXOR X2, X6
+ PXOR X3, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ PXOR X6, X4
+ PXOR X7, X5
+ PXOR X5, X4
+ MOVQ X4, AX // return X4
+ RET
+
+aes65to128:
+ // make 7 more starting seeds
+ MOVO X1, X2
+ MOVO X1, X3
+ MOVO X1, X4
+ MOVO X1, X5
+ MOVO X1, X6
+ MOVO X1, X7
+ PXOR runtime·aeskeysched+16(SB), X1
+ PXOR runtime·aeskeysched+32(SB), X2
+ PXOR runtime·aeskeysched+48(SB), X3
+ PXOR runtime·aeskeysched+64(SB), X4
+ PXOR runtime·aeskeysched+80(SB), X5
+ PXOR runtime·aeskeysched+96(SB), X6
+ PXOR runtime·aeskeysched+112(SB), X7
+ AESENC X1, X1
+ AESENC X2, X2
+ AESENC X3, X3
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ // load data
+ MOVOU (AX), X8
+ MOVOU 16(AX), X9
+ MOVOU 32(AX), X10
+ MOVOU 48(AX), X11
+ MOVOU -64(AX)(CX*1), X12
+ MOVOU -48(AX)(CX*1), X13
+ MOVOU -32(AX)(CX*1), X14
+ MOVOU -16(AX)(CX*1), X15
+
+ // xor with seed
+ PXOR X0, X8
+ PXOR X1, X9
+ PXOR X2, X10
+ PXOR X3, X11
+ PXOR X4, X12
+ PXOR X5, X13
+ PXOR X6, X14
+ PXOR X7, X15
+
+ // scramble 3 times
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+
+ // combine results
+ PXOR X12, X8
+ PXOR X13, X9
+ PXOR X14, X10
+ PXOR X15, X11
+ PXOR X10, X8
+ PXOR X11, X9
+ PXOR X9, X8
+ // X15 must be zero on return
+ PXOR X15, X15
+ MOVQ X8, AX // return X8
+ RET
+
+aes129plus:
+ // make 7 more starting seeds
+ MOVO X1, X2
+ MOVO X1, X3
+ MOVO X1, X4
+ MOVO X1, X5
+ MOVO X1, X6
+ MOVO X1, X7
+ PXOR runtime·aeskeysched+16(SB), X1
+ PXOR runtime·aeskeysched+32(SB), X2
+ PXOR runtime·aeskeysched+48(SB), X3
+ PXOR runtime·aeskeysched+64(SB), X4
+ PXOR runtime·aeskeysched+80(SB), X5
+ PXOR runtime·aeskeysched+96(SB), X6
+ PXOR runtime·aeskeysched+112(SB), X7
+ AESENC X1, X1
+ AESENC X2, X2
+ AESENC X3, X3
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ // start with last (possibly overlapping) block
+ MOVOU -128(AX)(CX*1), X8
+ MOVOU -112(AX)(CX*1), X9
+ MOVOU -96(AX)(CX*1), X10
+ MOVOU -80(AX)(CX*1), X11
+ MOVOU -64(AX)(CX*1), X12
+ MOVOU -48(AX)(CX*1), X13
+ MOVOU -32(AX)(CX*1), X14
+ MOVOU -16(AX)(CX*1), X15
+
+ // xor in seed
+ PXOR X0, X8
+ PXOR X1, X9
+ PXOR X2, X10
+ PXOR X3, X11
+ PXOR X4, X12
+ PXOR X5, X13
+ PXOR X6, X14
+ PXOR X7, X15
+
+ // compute number of remaining 128-byte blocks
+ DECQ CX
+ SHRQ $7, CX
+
+aesloop:
+ // scramble state
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+
+ // scramble state, xor in a block
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU 32(AX), X2
+ MOVOU 48(AX), X3
+ AESENC X0, X8
+ AESENC X1, X9
+ AESENC X2, X10
+ AESENC X3, X11
+ MOVOU 64(AX), X4
+ MOVOU 80(AX), X5
+ MOVOU 96(AX), X6
+ MOVOU 112(AX), X7
+ AESENC X4, X12
+ AESENC X5, X13
+ AESENC X6, X14
+ AESENC X7, X15
+
+ ADDQ $128, AX
+ DECQ CX
+ JNE aesloop
+
+ // 3 more scrambles to finish
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+ AESENC X8, X8
+ AESENC X9, X9
+ AESENC X10, X10
+ AESENC X11, X11
+ AESENC X12, X12
+ AESENC X13, X13
+ AESENC X14, X14
+ AESENC X15, X15
+
+ PXOR X12, X8
+ PXOR X13, X9
+ PXOR X14, X10
+ PXOR X15, X11
+ PXOR X10, X8
+ PXOR X11, X9
+ PXOR X9, X8
+ // X15 must be zero on return
+ PXOR X15, X15
+ MOVQ X8, AX // return X8
+ RET
+
+// func memhash32(p unsafe.Pointer, h uintptr) uintptr
+// ABIInternal for performance.
+TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT,$0-24
+ // AX = ptr to data
+ // BX = seed
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
+ MOVQ BX, X0 // X0 = seed
+ PINSRD $2, (AX), X0 // data
+ AESENC runtime·aeskeysched+0(SB), X0
+ AESENC runtime·aeskeysched+16(SB), X0
+ AESENC runtime·aeskeysched+32(SB), X0
+ MOVQ X0, AX // return X0
+ RET
+noaes:
+ JMP runtime·memhash32Fallback<ABIInternal>(SB)
+
+// func memhash64(p unsafe.Pointer, h uintptr) uintptr
+// ABIInternal for performance.
+TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT,$0-24
+ // AX = ptr to data
+ // BX = seed
+ CMPB runtime·useAeshash(SB), $0
+ JEQ noaes
+ MOVQ BX, X0 // X0 = seed
+ PINSRQ $1, (AX), X0 // data
+ AESENC runtime·aeskeysched+0(SB), X0
+ AESENC runtime·aeskeysched+16(SB), X0
+ AESENC runtime·aeskeysched+32(SB), X0
+ MOVQ X0, AX // return X0
+ RET
+noaes:
+ JMP runtime·memhash64Fallback<ABIInternal>(SB)
+
+// simple mask to get rid of data in the high part of the register.
+DATA masks<>+0x00(SB)/8, $0x0000000000000000
+DATA masks<>+0x08(SB)/8, $0x0000000000000000
+DATA masks<>+0x10(SB)/8, $0x00000000000000ff
+DATA masks<>+0x18(SB)/8, $0x0000000000000000
+DATA masks<>+0x20(SB)/8, $0x000000000000ffff
+DATA masks<>+0x28(SB)/8, $0x0000000000000000
+DATA masks<>+0x30(SB)/8, $0x0000000000ffffff
+DATA masks<>+0x38(SB)/8, $0x0000000000000000
+DATA masks<>+0x40(SB)/8, $0x00000000ffffffff
+DATA masks<>+0x48(SB)/8, $0x0000000000000000
+DATA masks<>+0x50(SB)/8, $0x000000ffffffffff
+DATA masks<>+0x58(SB)/8, $0x0000000000000000
+DATA masks<>+0x60(SB)/8, $0x0000ffffffffffff
+DATA masks<>+0x68(SB)/8, $0x0000000000000000
+DATA masks<>+0x70(SB)/8, $0x00ffffffffffffff
+DATA masks<>+0x78(SB)/8, $0x0000000000000000
+DATA masks<>+0x80(SB)/8, $0xffffffffffffffff
+DATA masks<>+0x88(SB)/8, $0x0000000000000000
+DATA masks<>+0x90(SB)/8, $0xffffffffffffffff
+DATA masks<>+0x98(SB)/8, $0x00000000000000ff
+DATA masks<>+0xa0(SB)/8, $0xffffffffffffffff
+DATA masks<>+0xa8(SB)/8, $0x000000000000ffff
+DATA masks<>+0xb0(SB)/8, $0xffffffffffffffff
+DATA masks<>+0xb8(SB)/8, $0x0000000000ffffff
+DATA masks<>+0xc0(SB)/8, $0xffffffffffffffff
+DATA masks<>+0xc8(SB)/8, $0x00000000ffffffff
+DATA masks<>+0xd0(SB)/8, $0xffffffffffffffff
+DATA masks<>+0xd8(SB)/8, $0x000000ffffffffff
+DATA masks<>+0xe0(SB)/8, $0xffffffffffffffff
+DATA masks<>+0xe8(SB)/8, $0x0000ffffffffffff
+DATA masks<>+0xf0(SB)/8, $0xffffffffffffffff
+DATA masks<>+0xf8(SB)/8, $0x00ffffffffffffff
+GLOBL masks<>(SB),RODATA,$256
+
+// func checkASM() bool
+TEXT ·checkASM(SB),NOSPLIT,$0-1
+ // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte
+ MOVQ $masks<>(SB), AX
+ MOVQ $shifts<>(SB), BX
+ ORQ BX, AX
+ TESTQ $15, AX
+ SETEQ ret+0(FP)
+ RET
+
+// these are arguments to pshufb. They move data down from
+// the high bytes of the register to the low bytes of the register.
+// index is how many bytes to move.
+DATA shifts<>+0x00(SB)/8, $0x0000000000000000
+DATA shifts<>+0x08(SB)/8, $0x0000000000000000
+DATA shifts<>+0x10(SB)/8, $0xffffffffffffff0f
+DATA shifts<>+0x18(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x20(SB)/8, $0xffffffffffff0f0e
+DATA shifts<>+0x28(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x30(SB)/8, $0xffffffffff0f0e0d
+DATA shifts<>+0x38(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x40(SB)/8, $0xffffffff0f0e0d0c
+DATA shifts<>+0x48(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x50(SB)/8, $0xffffff0f0e0d0c0b
+DATA shifts<>+0x58(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x60(SB)/8, $0xffff0f0e0d0c0b0a
+DATA shifts<>+0x68(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x70(SB)/8, $0xff0f0e0d0c0b0a09
+DATA shifts<>+0x78(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x80(SB)/8, $0x0f0e0d0c0b0a0908
+DATA shifts<>+0x88(SB)/8, $0xffffffffffffffff
+DATA shifts<>+0x90(SB)/8, $0x0e0d0c0b0a090807
+DATA shifts<>+0x98(SB)/8, $0xffffffffffffff0f
+DATA shifts<>+0xa0(SB)/8, $0x0d0c0b0a09080706
+DATA shifts<>+0xa8(SB)/8, $0xffffffffffff0f0e
+DATA shifts<>+0xb0(SB)/8, $0x0c0b0a0908070605
+DATA shifts<>+0xb8(SB)/8, $0xffffffffff0f0e0d
+DATA shifts<>+0xc0(SB)/8, $0x0b0a090807060504
+DATA shifts<>+0xc8(SB)/8, $0xffffffff0f0e0d0c
+DATA shifts<>+0xd0(SB)/8, $0x0a09080706050403
+DATA shifts<>+0xd8(SB)/8, $0xffffff0f0e0d0c0b
+DATA shifts<>+0xe0(SB)/8, $0x0908070605040302
+DATA shifts<>+0xe8(SB)/8, $0xffff0f0e0d0c0b0a
+DATA shifts<>+0xf0(SB)/8, $0x0807060504030201
+DATA shifts<>+0xf8(SB)/8, $0xff0f0e0d0c0b0a09
+GLOBL shifts<>(SB),RODATA,$256
+
+TEXT runtime·return0(SB), NOSPLIT, $0
+ MOVL $0, AX
+ RET
+
+
+// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
+// Must obey the gcc calling convention.
+TEXT _cgo_topofstack(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVQ g(CX), AX
+ MOVQ g_m(AX), AX
+ MOVQ m_curg(AX), AX
+ MOVQ (g_stack+stack_hi)(AX), AX
+ RET
+
+// The top-most function running on a goroutine
+// returns to goexit+PCQuantum.
+TEXT runtime·goexit(SB),NOSPLIT|TOPFRAME,$0-0
+ BYTE $0x90 // NOP
+ CALL runtime·goexit1(SB) // does not return
+ // traceback from goexit1 must hit code range of goexit
+ BYTE $0x90 // NOP
+
+// This is called from .init_array and follows the platform, not Go, ABI.
+TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
+ PUSHQ R15 // The access to global variables below implicitly uses R15, which is callee-save
+ MOVQ runtime·lastmoduledatap(SB), AX
+ MOVQ DI, moduledata_next(AX)
+ MOVQ DI, runtime·lastmoduledatap(SB)
+ POPQ R15
+ RET
+
+// Initialize special registers then jump to sigpanic.
+// This function is injected from the signal handler for panicking
+// signals. It is quite painful to set X15 in the signal context,
+// so we do it here.
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
+ get_tls(R14)
+ MOVQ g(R14), R14
+#ifndef GOOS_plan9
+ XORPS X15, X15
+#endif
+ JMP ·sigpanic<ABIInternal>(SB)
+
+// gcWriteBarrier performs a heap pointer write and informs the GC.
+//
+// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
+// - DI is the destination of the write
+// - AX is the value being written at DI
+// It clobbers FLAGS. It does not clobber any general-purpose registers,
+// but may clobber others (e.g., SSE registers).
+// Defined as ABIInternal since it does not use the stack-based Go ABI.
+TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$112
+ // Save the registers clobbered by the fast path. This is slightly
+ // faster than having the caller spill these.
+ MOVQ R12, 96(SP)
+ MOVQ R13, 104(SP)
+ // TODO: Consider passing g.m.p in as an argument so they can be shared
+ // across a sequence of write barriers.
+ MOVQ g_m(R14), R13
+ MOVQ m_p(R13), R13
+ MOVQ (p_wbBuf+wbBuf_next)(R13), R12
+ // Increment wbBuf.next position.
+ LEAQ 16(R12), R12
+ MOVQ R12, (p_wbBuf+wbBuf_next)(R13)
+ CMPQ R12, (p_wbBuf+wbBuf_end)(R13)
+ // Record the write.
+ MOVQ AX, -16(R12) // Record value
+ // Note: This turns bad pointer writes into bad
+ // pointer reads, which could be confusing. We could avoid
+ // reading from obviously bad pointers, which would
+ // take care of the vast majority of these. We could
+ // patch this up in the signal handler, or use XCHG to
+ // combine the read and the write.
+ MOVQ (DI), R13
+ MOVQ R13, -8(R12) // Record *slot
+ // Is the buffer full? (flags set in CMPQ above)
+ JEQ flush
+ret:
+ MOVQ 96(SP), R12
+ MOVQ 104(SP), R13
+ // Do the write.
+ MOVQ AX, (DI)
+ RET
+
+flush:
+ // Save all general purpose registers since these could be
+ // clobbered by wbBufFlush and were not saved by the caller.
+ // It is possible for wbBufFlush to clobber other registers
+ // (e.g., SSE registers), but the compiler takes care of saving
+ // those in the caller if necessary. This strikes a balance
+ // with registers that are likely to be used.
+ //
+ // We don't have type information for these, but all code under
+ // here is NOSPLIT, so nothing will observe these.
+ //
+ // TODO: We could strike a different balance; e.g., saving X0
+ // and not saving GP registers that are less likely to be used.
+ MOVQ DI, 0(SP) // Also first argument to wbBufFlush
+ MOVQ AX, 8(SP) // Also second argument to wbBufFlush
+ MOVQ BX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ DX, 32(SP)
+ // DI already saved
+ MOVQ SI, 40(SP)
+ MOVQ BP, 48(SP)
+ MOVQ R8, 56(SP)
+ MOVQ R9, 64(SP)
+ MOVQ R10, 72(SP)
+ MOVQ R11, 80(SP)
+ // R12 already saved
+ // R13 already saved
+ // R14 is g
+ MOVQ R15, 88(SP)
+
+ // This takes arguments DI and AX
+ CALL runtime·wbBufFlush(SB)
+
+ MOVQ 0(SP), DI
+ MOVQ 8(SP), AX
+ MOVQ 16(SP), BX
+ MOVQ 24(SP), CX
+ MOVQ 32(SP), DX
+ MOVQ 40(SP), SI
+ MOVQ 48(SP), BP
+ MOVQ 56(SP), R8
+ MOVQ 64(SP), R9
+ MOVQ 72(SP), R10
+ MOVQ 80(SP), R11
+ MOVQ 88(SP), R15
+ JMP ret
+
+// gcWriteBarrierCX is gcWriteBarrier, but with args in DI and CX.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierCX<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ CX, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ CX, AX
+ RET
+
+// gcWriteBarrierDX is gcWriteBarrier, but with args in DI and DX.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierDX<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ DX, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ DX, AX
+ RET
+
+// gcWriteBarrierBX is gcWriteBarrier, but with args in DI and BX.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierBX<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ BX, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ BX, AX
+ RET
+
+// gcWriteBarrierBP is gcWriteBarrier, but with args in DI and BP.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierBP<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ BP, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ BP, AX
+ RET
+
+// gcWriteBarrierSI is gcWriteBarrier, but with args in DI and SI.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierSI<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ SI, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ SI, AX
+ RET
+
+// gcWriteBarrierR8 is gcWriteBarrier, but with args in DI and R8.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierR8<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ R8, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ R8, AX
+ RET
+
+// gcWriteBarrierR9 is gcWriteBarrier, but with args in DI and R9.
+// Defined as ABIInternal since it does not use the stable Go ABI.
+TEXT runtime·gcWriteBarrierR9<ABIInternal>(SB),NOSPLIT,$0
+ XCHGQ R9, AX
+ CALL runtime·gcWriteBarrier<ABIInternal>(SB)
+ XCHGQ R9, AX
+ RET
+
+DATA debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large"
+GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below
+
+// debugCallV2 is the entry point for debugger-injected function
+// calls on running goroutines. It informs the runtime that a
+// debug call has been injected and creates a call frame for the
+// debugger to fill in.
+//
+// To inject a function call, a debugger should:
+// 1. Check that the goroutine is in state _Grunning and that
+// there are at least 256 bytes free on the stack.
+// 2. Push the current PC on the stack (updating SP).
+// 3. Write the desired argument frame size at SP-16 (using the SP
+// after step 2).
+// 4. Save all machine registers (including flags and XMM registers)
+// so they can be restored later by the debugger.
+// 5. Set the PC to debugCallV2 and resume execution.
+//
+// If the goroutine is in state _Grunnable, then it's not generally
+// safe to inject a call because it may return out via other runtime
+// operations. Instead, the debugger should unwind the stack to find
+// the return to non-runtime code, add a temporary breakpoint there,
+// and inject the call once that breakpoint is hit.
+//
+// If the goroutine is in any other state, it's not safe to inject a call.
+//
+// This function communicates back to the debugger by setting R12 and
+// invoking INT3 to raise a breakpoint signal. See the comments in the
+// implementation for the protocol the debugger is expected to
+// follow. InjectDebugCall in the runtime tests demonstrates this protocol.
+//
+// The debugger must ensure that any pointers passed to the function
+// obey escape analysis requirements. Specifically, it must not pass
+// a stack pointer to an escaping argument. debugCallV2 cannot check
+// this invariant.
+//
+// This is ABIInternal because Go code injects its PC directly into new
+// goroutine stacks.
+TEXT runtime·debugCallV2<ABIInternal>(SB),NOSPLIT,$152-0
+ // Save all registers that may contain pointers so they can be
+ // conservatively scanned.
+ //
+ // We can't do anything that might clobber any of these
+ // registers before this.
+ MOVQ R15, r15-(14*8+8)(SP)
+ MOVQ R14, r14-(13*8+8)(SP)
+ MOVQ R13, r13-(12*8+8)(SP)
+ MOVQ R12, r12-(11*8+8)(SP)
+ MOVQ R11, r11-(10*8+8)(SP)
+ MOVQ R10, r10-(9*8+8)(SP)
+ MOVQ R9, r9-(8*8+8)(SP)
+ MOVQ R8, r8-(7*8+8)(SP)
+ MOVQ DI, di-(6*8+8)(SP)
+ MOVQ SI, si-(5*8+8)(SP)
+ MOVQ BP, bp-(4*8+8)(SP)
+ MOVQ BX, bx-(3*8+8)(SP)
+ MOVQ DX, dx-(2*8+8)(SP)
+ // Save the frame size before we clobber it. Either of the last
+ // saves could clobber this depending on whether there's a saved BP.
+ MOVQ frameSize-24(FP), DX // aka -16(RSP) before prologue
+ MOVQ CX, cx-(1*8+8)(SP)
+ MOVQ AX, ax-(0*8+8)(SP)
+
+ // Save the argument frame size.
+ MOVQ DX, frameSize-128(SP)
+
+ // Perform a safe-point check.
+ MOVQ retpc-8(FP), AX // Caller's PC
+ MOVQ AX, 0(SP)
+ CALL runtime·debugCallCheck(SB)
+ MOVQ 8(SP), AX
+ TESTQ AX, AX
+ JZ good
+ // The safety check failed. Put the reason string at the top
+ // of the stack.
+ MOVQ AX, 0(SP)
+ MOVQ 16(SP), AX
+ MOVQ AX, 8(SP)
+ // Set R12 to 8 and invoke INT3. The debugger should get the
+ // reason a call can't be injected from the top of the stack
+ // and resume execution.
+ MOVQ $8, R12
+ BYTE $0xcc
+ JMP restore
+
+good:
+ // Registers are saved and it's safe to make a call.
+ // Open up a call frame, moving the stack if necessary.
+ //
+ // Once the frame is allocated, this will set R12 to 0 and
+ // invoke INT3. The debugger should write the argument
+ // frame for the call at SP, set up argument registers, push
+ // the trapping PC on the stack, set the PC to the function to
+ // call, set RDX to point to the closure (if a closure call),
+ // and resume execution.
+ //
+ // If the function returns, this will set R12 to 1 and invoke
+ // INT3. The debugger can then inspect any return value saved
+ // on the stack at SP and in registers and resume execution again.
+ //
+ // If the function panics, this will set R12 to 2 and invoke INT3.
+ // The interface{} value of the panic will be at SP. The debugger
+ // can inspect the panic value and resume execution again.
+#define DEBUG_CALL_DISPATCH(NAME,MAXSIZE) \
+ CMPQ AX, $MAXSIZE; \
+ JA 5(PC); \
+ MOVQ $NAME(SB), AX; \
+ MOVQ AX, 0(SP); \
+ CALL runtime·debugCallWrap(SB); \
+ JMP restore
+
+ MOVQ frameSize-128(SP), AX
+ DEBUG_CALL_DISPATCH(debugCall32<>, 32)
+ DEBUG_CALL_DISPATCH(debugCall64<>, 64)
+ DEBUG_CALL_DISPATCH(debugCall128<>, 128)
+ DEBUG_CALL_DISPATCH(debugCall256<>, 256)
+ DEBUG_CALL_DISPATCH(debugCall512<>, 512)
+ DEBUG_CALL_DISPATCH(debugCall1024<>, 1024)
+ DEBUG_CALL_DISPATCH(debugCall2048<>, 2048)
+ DEBUG_CALL_DISPATCH(debugCall4096<>, 4096)
+ DEBUG_CALL_DISPATCH(debugCall8192<>, 8192)
+ DEBUG_CALL_DISPATCH(debugCall16384<>, 16384)
+ DEBUG_CALL_DISPATCH(debugCall32768<>, 32768)
+ DEBUG_CALL_DISPATCH(debugCall65536<>, 65536)
+ // The frame size is too large. Report the error.
+ MOVQ $debugCallFrameTooLarge<>(SB), AX
+ MOVQ AX, 0(SP)
+ MOVQ $20, 8(SP) // length of debugCallFrameTooLarge string
+ MOVQ $8, R12
+ BYTE $0xcc
+ JMP restore
+
+restore:
+ // Calls and failures resume here.
+ //
+ // Set R12 to 16 and invoke INT3. The debugger should restore
+ // all registers except RIP and RSP and resume execution.
+ MOVQ $16, R12
+ BYTE $0xcc
+ // We must not modify flags after this point.
+
+ // Restore pointer-containing registers, which may have been
+ // modified from the debugger's copy by stack copying.
+ MOVQ ax-(0*8+8)(SP), AX
+ MOVQ cx-(1*8+8)(SP), CX
+ MOVQ dx-(2*8+8)(SP), DX
+ MOVQ bx-(3*8+8)(SP), BX
+ MOVQ bp-(4*8+8)(SP), BP
+ MOVQ si-(5*8+8)(SP), SI
+ MOVQ di-(6*8+8)(SP), DI
+ MOVQ r8-(7*8+8)(SP), R8
+ MOVQ r9-(8*8+8)(SP), R9
+ MOVQ r10-(9*8+8)(SP), R10
+ MOVQ r11-(10*8+8)(SP), R11
+ MOVQ r12-(11*8+8)(SP), R12
+ MOVQ r13-(12*8+8)(SP), R13
+ MOVQ r14-(13*8+8)(SP), R14
+ MOVQ r15-(14*8+8)(SP), R15
+
+ RET
+
+// runtime.debugCallCheck assumes that functions defined with the
+// DEBUG_CALL_FN macro are safe points to inject calls.
+#define DEBUG_CALL_FN(NAME,MAXSIZE) \
+TEXT NAME(SB),WRAPPER,$MAXSIZE-0; \
+ NO_LOCAL_POINTERS; \
+ MOVQ $0, R12; \
+ BYTE $0xcc; \
+ MOVQ $1, R12; \
+ BYTE $0xcc; \
+ RET
+DEBUG_CALL_FN(debugCall32<>, 32)
+DEBUG_CALL_FN(debugCall64<>, 64)
+DEBUG_CALL_FN(debugCall128<>, 128)
+DEBUG_CALL_FN(debugCall256<>, 256)
+DEBUG_CALL_FN(debugCall512<>, 512)
+DEBUG_CALL_FN(debugCall1024<>, 1024)
+DEBUG_CALL_FN(debugCall2048<>, 2048)
+DEBUG_CALL_FN(debugCall4096<>, 4096)
+DEBUG_CALL_FN(debugCall8192<>, 8192)
+DEBUG_CALL_FN(debugCall16384<>, 16384)
+DEBUG_CALL_FN(debugCall32768<>, 32768)
+DEBUG_CALL_FN(debugCall65536<>, 65536)
+
+// func debugCallPanicked(val interface{})
+TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
+ // Copy the panic value to the top of stack.
+ MOVQ val_type+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ val_data+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ $2, R12
+ BYTE $0xcc
+ RET
+
+// Note: these functions use a special calling convention to save generated code space.
+// Arguments are passed in registers, but the space for those arguments are allocated
+// in the caller's stack frame. These stubs write the args into that stack space and
+// then tail call to the corresponding runtime handler.
+// The tail call makes these stubs disappear in backtraces.
+// Defined as ABIInternal since they do not use the stack-based Go ABI.
+TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, BX
+ JMP runtime·goPanicIndex<ABIInternal>(SB)
+TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, BX
+ JMP runtime·goPanicIndexU<ABIInternal>(SB)
+TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, AX
+ MOVQ DX, BX
+ JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
+TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, AX
+ MOVQ DX, BX
+ JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
+TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, AX
+ MOVQ DX, BX
+ JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
+TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, AX
+ MOVQ DX, BX
+ JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
+TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, BX
+ JMP runtime·goPanicSliceB<ABIInternal>(SB)
+TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, BX
+ JMP runtime·goPanicSliceBU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ DX, AX
+ JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
+TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ DX, AX
+ JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ DX, AX
+ JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
+TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ DX, AX
+ JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
+TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, AX
+ MOVQ DX, BX
+ JMP runtime·goPanicSlice3B<ABIInternal>(SB)
+TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, AX
+ MOVQ DX, BX
+ JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
+TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, BX
+ JMP runtime·goPanicSlice3C<ABIInternal>(SB)
+TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ CX, BX
+ JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
+TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
+ MOVQ DX, AX
+ JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
+
+#ifdef GOOS_android
+// Use the free TLS_SLOT_APP slot #2 on Android Q.
+// Earlier androids are set up in gcc_android.c.
+DATA runtime·tls_g+0(SB)/8, $16
+GLOBL runtime·tls_g+0(SB), NOPTR, $8
+#endif
+
+// The compiler and assembler's -spectre=ret mode rewrites
+// all indirect CALL AX / JMP AX instructions to be
+// CALL retpolineAX / JMP retpolineAX.
+// See https://support.google.com/faqs/answer/7625886.
+#define RETPOLINE(reg) \
+ /* CALL setup */ BYTE $0xE8; BYTE $(2+2); BYTE $0; BYTE $0; BYTE $0; \
+ /* nospec: */ \
+ /* PAUSE */ BYTE $0xF3; BYTE $0x90; \
+ /* JMP nospec */ BYTE $0xEB; BYTE $-(2+2); \
+ /* setup: */ \
+ /* MOVQ AX, 0(SP) */ BYTE $0x48|((reg&8)>>1); BYTE $0x89; \
+ BYTE $0x04|((reg&7)<<3); BYTE $0x24; \
+ /* RET */ BYTE $0xC3
+
+TEXT runtime·retpolineAX(SB),NOSPLIT,$0; RETPOLINE(0)
+TEXT runtime·retpolineCX(SB),NOSPLIT,$0; RETPOLINE(1)
+TEXT runtime·retpolineDX(SB),NOSPLIT,$0; RETPOLINE(2)
+TEXT runtime·retpolineBX(SB),NOSPLIT,$0; RETPOLINE(3)
+/* SP is 4, can't happen / magic encodings */
+TEXT runtime·retpolineBP(SB),NOSPLIT,$0; RETPOLINE(5)
+TEXT runtime·retpolineSI(SB),NOSPLIT,$0; RETPOLINE(6)
+TEXT runtime·retpolineDI(SB),NOSPLIT,$0; RETPOLINE(7)
+TEXT runtime·retpolineR8(SB),NOSPLIT,$0; RETPOLINE(8)
+TEXT runtime·retpolineR9(SB),NOSPLIT,$0; RETPOLINE(9)
+TEXT runtime·retpolineR10(SB),NOSPLIT,$0; RETPOLINE(10)
+TEXT runtime·retpolineR11(SB),NOSPLIT,$0; RETPOLINE(11)
+TEXT runtime·retpolineR12(SB),NOSPLIT,$0; RETPOLINE(12)
+TEXT runtime·retpolineR13(SB),NOSPLIT,$0; RETPOLINE(13)
+TEXT runtime·retpolineR14(SB),NOSPLIT,$0; RETPOLINE(14)
+TEXT runtime·retpolineR15(SB),NOSPLIT,$0; RETPOLINE(15)
diff --git a/contrib/go/_std_1.18/src/runtime/atomic_pointer.go b/contrib/go/_std_1.19/src/runtime/atomic_pointer.go
index b8f0c22c63..b8f0c22c63 100644
--- a/contrib/go/_std_1.18/src/runtime/atomic_pointer.go
+++ b/contrib/go/_std_1.19/src/runtime/atomic_pointer.go
diff --git a/contrib/go/_std_1.18/src/runtime/cgo.go b/contrib/go/_std_1.19/src/runtime/cgo.go
index d90468240d..d90468240d 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo.go
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/abi_amd64.h b/contrib/go/_std_1.19/src/runtime/cgo/abi_amd64.h
index 9949435fe9..9949435fe9 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/abi_amd64.h
+++ b/contrib/go/_std_1.19/src/runtime/cgo/abi_amd64.h
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/asm_amd64.s b/contrib/go/_std_1.19/src/runtime/cgo/asm_amd64.s
index 386299c548..386299c548 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/asm_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/cgo/asm_amd64.s
diff --git a/contrib/go/_std_1.19/src/runtime/cgo/callbacks.go b/contrib/go/_std_1.19/src/runtime/cgo/callbacks.go
new file mode 100644
index 0000000000..e7c8ef3e07
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgo/callbacks.go
@@ -0,0 +1,107 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgo
+
+import "unsafe"
+
+// These utility functions are available to be called from code
+// compiled with gcc via crosscall2.
+
+// The declaration of crosscall2 is:
+// void crosscall2(void (*fn)(void *), void *, int);
+//
+// We need to export the symbol crosscall2 in order to support
+// callbacks from shared libraries. This applies regardless of
+// linking mode.
+//
+// Compatibility note: SWIG uses crosscall2 in exactly one situation:
+// to call _cgo_panic using the pattern shown below. We need to keep
+// that pattern working. In particular, crosscall2 actually takes four
+// arguments, but it works to call it with three arguments when
+// calling _cgo_panic.
+//
+//go:cgo_export_static crosscall2
+//go:cgo_export_dynamic crosscall2
+
+// Panic. The argument is converted into a Go string.
+
+// Call like this in code compiled with gcc:
+// struct { const char *p; } a;
+// a.p = /* string to pass to panic */;
+// crosscall2(_cgo_panic, &a, sizeof a);
+// /* The function call will not return. */
+
+// TODO: We should export a regular C function to panic, change SWIG
+// to use that instead of the above pattern, and then we can drop
+// backwards-compatibility from crosscall2 and stop exporting it.
+
+//go:linkname _runtime_cgo_panic_internal runtime._cgo_panic_internal
+func _runtime_cgo_panic_internal(p *byte)
+
+//go:linkname _cgo_panic _cgo_panic
+//go:cgo_export_static _cgo_panic
+//go:cgo_export_dynamic _cgo_panic
+func _cgo_panic(a *struct{ cstr *byte }) {
+ _runtime_cgo_panic_internal(a.cstr)
+}
+
+//go:cgo_import_static x_cgo_init
+//go:linkname x_cgo_init x_cgo_init
+//go:linkname _cgo_init _cgo_init
+var x_cgo_init byte
+var _cgo_init = &x_cgo_init
+
+//go:cgo_import_static x_cgo_thread_start
+//go:linkname x_cgo_thread_start x_cgo_thread_start
+//go:linkname _cgo_thread_start _cgo_thread_start
+var x_cgo_thread_start byte
+var _cgo_thread_start = &x_cgo_thread_start
+
+// Creates a new system thread without updating any Go state.
+//
+// This method is invoked during shared library loading to create a new OS
+// thread to perform the runtime initialization. This method is similar to
+// _cgo_sys_thread_start except that it doesn't update any Go state.
+
+//go:cgo_import_static x_cgo_sys_thread_create
+//go:linkname x_cgo_sys_thread_create x_cgo_sys_thread_create
+//go:linkname _cgo_sys_thread_create _cgo_sys_thread_create
+var x_cgo_sys_thread_create byte
+var _cgo_sys_thread_create = &x_cgo_sys_thread_create
+
+// Notifies that the runtime has been initialized.
+//
+// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done)
+// to ensure that the runtime has been initialized before the CGO call is
+// executed. This is necessary for shared libraries where we kickoff runtime
+// initialization in a separate thread and return without waiting for this
+// thread to complete the init.
+
+//go:cgo_import_static x_cgo_notify_runtime_init_done
+//go:linkname x_cgo_notify_runtime_init_done x_cgo_notify_runtime_init_done
+//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done
+var x_cgo_notify_runtime_init_done byte
+var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done
+
+// Sets the traceback context function. See runtime.SetCgoTraceback.
+
+//go:cgo_import_static x_cgo_set_context_function
+//go:linkname x_cgo_set_context_function x_cgo_set_context_function
+//go:linkname _cgo_set_context_function _cgo_set_context_function
+var x_cgo_set_context_function byte
+var _cgo_set_context_function = &x_cgo_set_context_function
+
+// Calls a libc function to execute background work injected via libc
+// interceptors, such as processing pending signals under the thread
+// sanitizer.
+//
+// Left as a nil pointer if no libc interceptors are expected.
+
+//go:cgo_import_static _cgo_yield
+//go:linkname _cgo_yield _cgo_yield
+var _cgo_yield unsafe.Pointer
+
+//go:cgo_export_static _cgo_topofstack
+//go:cgo_export_dynamic _cgo_topofstack
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/callbacks_traceback.go b/contrib/go/_std_1.19/src/runtime/cgo/callbacks_traceback.go
index dae31a8fcd..dae31a8fcd 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/callbacks_traceback.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo/callbacks_traceback.go
diff --git a/contrib/go/_std_1.19/src/runtime/cgo/cgo.go b/contrib/go/_std_1.19/src/runtime/cgo/cgo.go
new file mode 100644
index 0000000000..298aa63675
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgo/cgo.go
@@ -0,0 +1,31 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package cgo contains runtime support for code generated
+by the cgo tool. See the documentation for the cgo command
+for details on using cgo.
+*/
+package cgo
+
+/*
+
+#cgo darwin,!arm64 LDFLAGS: -lpthread
+#cgo darwin,arm64 LDFLAGS: -framework CoreFoundation
+#cgo dragonfly LDFLAGS: -lpthread
+#cgo freebsd LDFLAGS: -lpthread
+#cgo android LDFLAGS: -llog
+#cgo !android,linux LDFLAGS: -lpthread
+#cgo netbsd LDFLAGS: -lpthread
+#cgo openbsd LDFLAGS: -lpthread
+#cgo aix LDFLAGS: -Wl,-berok
+#cgo solaris LDFLAGS: -lxnet
+#cgo solaris LDFLAGS: -lsocket
+
+#cgo CFLAGS: -Wall -Werror
+
+#cgo solaris CPPFLAGS: -D_POSIX_PTHREAD_SEMANTICS
+
+*/
+import "C"
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_amd64.S b/contrib/go/_std_1.19/src/runtime/cgo/gcc_amd64.S
index 46699d1d9c..46699d1d9c 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_amd64.S
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_amd64.S
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_context.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_context.c
index 5fc0abb8bc..5fc0abb8bc 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_context.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_context.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_darwin_amd64.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_darwin_amd64.c
index d5b7fd8fd8..d5b7fd8fd8 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_darwin_amd64.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_darwin_amd64.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_fatalf.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_fatalf.c
index 597e750f12..597e750f12 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_fatalf.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_fatalf.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_libinit.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_libinit.c
index 3304d95fdf..3304d95fdf 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_libinit.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_libinit.c
diff --git a/contrib/go/_std_1.19/src/runtime/cgo/gcc_linux_amd64.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_linux_amd64.c
new file mode 100644
index 0000000000..fb164c1a1d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_linux_amd64.c
@@ -0,0 +1,96 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <pthread.h>
+#include <errno.h>
+#include <string.h> // strerror
+#include <signal.h>
+#include <stdlib.h>
+#include "libcgo.h"
+#include "libcgo_unix.h"
+
+static void* threadentry(void*);
+static void (*setg_gcc)(void*);
+
+// This will be set in gcc_android.c for android-specific customization.
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
+
+void
+x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
+{
+ pthread_attr_t *attr;
+ size_t size;
+
+ /* The memory sanitizer distributed with versions of clang
+ before 3.8 has a bug: if you call mmap before malloc, mmap
+ may return an address that is later overwritten by the msan
+ library. Avoid this problem by forcing a call to malloc
+ here, before we ever call malloc.
+
+ This is only required for the memory sanitizer, so it's
+ unfortunate that we always run it. It should be possible
+ to remove this when we no longer care about versions of
+ clang before 3.8. The test for this is
+ misc/cgo/testsanitizers.
+
+ GCC works hard to eliminate a seemingly unnecessary call to
+ malloc, so we actually use the memory we allocate. */
+
+ setg_gcc = setg;
+ attr = (pthread_attr_t*)malloc(sizeof *attr);
+ if (attr == NULL) {
+ fatalf("malloc failed: %s", strerror(errno));
+ }
+ pthread_attr_init(attr);
+ pthread_attr_getstacksize(attr, &size);
+ g->stacklo = (uintptr)__builtin_frame_address(0) - size + 4096;
+ if (g->stacklo >= g->stackhi)
+ fatalf("bad stack bounds: lo=%p hi=%p\n", g->stacklo, g->stackhi);
+ pthread_attr_destroy(attr);
+ free(attr);
+
+ if (x_cgo_inittls) {
+ x_cgo_inittls(tlsg, tlsbase);
+ }
+}
+
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ sigset_t ign, oset;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ sigfillset(&ign);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts->g->stackhi = size;
+ err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+ if (err != 0) {
+ fatalf("pthread_create failed: %s", strerror(err));
+ }
+}
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ _cgo_tsan_acquire();
+ free(v);
+ _cgo_tsan_release();
+
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
+ return nil;
+}
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_mmap.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_mmap.c
index 698a7e3cd2..698a7e3cd2 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_mmap.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_mmap.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_setenv.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_setenv.c
index d4f798357a..d4f798357a 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_setenv.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_setenv.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_sigaction.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_sigaction.c
index fcf1e50740..fcf1e50740 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_sigaction.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_sigaction.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_traceback.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_traceback.c
index 6e9470c43c..6e9470c43c 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_traceback.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_traceback.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/gcc_util.c b/contrib/go/_std_1.19/src/runtime/cgo/gcc_util.c
index 3fcb48cc8d..3fcb48cc8d 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/gcc_util.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/gcc_util.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/handle.go b/contrib/go/_std_1.19/src/runtime/cgo/handle.go
index d711900d79..d711900d79 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/handle.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo/handle.go
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/iscgo.go b/contrib/go/_std_1.19/src/runtime/cgo/iscgo.go
index e12d0f4b95..e12d0f4b95 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/iscgo.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo/iscgo.go
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/libcgo.h b/contrib/go/_std_1.19/src/runtime/cgo/libcgo.h
index af4960e7e9..af4960e7e9 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/libcgo.h
+++ b/contrib/go/_std_1.19/src/runtime/cgo/libcgo.h
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/libcgo_unix.h b/contrib/go/_std_1.19/src/runtime/cgo/libcgo_unix.h
index a56a366f23..a56a366f23 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/libcgo_unix.h
+++ b/contrib/go/_std_1.19/src/runtime/cgo/libcgo_unix.h
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/linux.go b/contrib/go/_std_1.19/src/runtime/cgo/linux.go
index 1d6fe03917..1d6fe03917 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/linux.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo/linux.go
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/linux_syscall.c b/contrib/go/_std_1.19/src/runtime/cgo/linux_syscall.c
index 59761c8b40..59761c8b40 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/linux_syscall.c
+++ b/contrib/go/_std_1.19/src/runtime/cgo/linux_syscall.c
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/mmap.go b/contrib/go/_std_1.19/src/runtime/cgo/mmap.go
index eae0a9e7cc..eae0a9e7cc 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/mmap.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo/mmap.go
diff --git a/contrib/go/_std_1.19/src/runtime/cgo/setenv.go b/contrib/go/_std_1.19/src/runtime/cgo/setenv.go
new file mode 100644
index 0000000000..2247cb2b59
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgo/setenv.go
@@ -0,0 +1,21 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package cgo
+
+import _ "unsafe" // for go:linkname
+
+//go:cgo_import_static x_cgo_setenv
+//go:linkname x_cgo_setenv x_cgo_setenv
+//go:linkname _cgo_setenv runtime._cgo_setenv
+var x_cgo_setenv byte
+var _cgo_setenv = &x_cgo_setenv
+
+//go:cgo_import_static x_cgo_unsetenv
+//go:linkname x_cgo_unsetenv x_cgo_unsetenv
+//go:linkname _cgo_unsetenv runtime._cgo_unsetenv
+var x_cgo_unsetenv byte
+var _cgo_unsetenv = &x_cgo_unsetenv
diff --git a/contrib/go/_std_1.18/src/runtime/cgo/sigaction.go b/contrib/go/_std_1.19/src/runtime/cgo/sigaction.go
index dc714f7ef4..dc714f7ef4 100644
--- a/contrib/go/_std_1.18/src/runtime/cgo/sigaction.go
+++ b/contrib/go/_std_1.19/src/runtime/cgo/sigaction.go
diff --git a/contrib/go/_std_1.19/src/runtime/cgo_mmap.go b/contrib/go/_std_1.19/src/runtime/cgo_mmap.go
new file mode 100644
index 0000000000..4cb3e65f14
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgo_mmap.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Support for memory sanitizer. See runtime/cgo/mmap.go.
+
+//go:build (linux && amd64) || (linux && arm64)
+
+package runtime
+
+import "unsafe"
+
+// _cgo_mmap is filled in by runtime/cgo when it is linked into the
+// program, so it is only non-nil when using cgo.
+//
+//go:linkname _cgo_mmap _cgo_mmap
+var _cgo_mmap unsafe.Pointer
+
+// _cgo_munmap is filled in by runtime/cgo when it is linked into the
+// program, so it is only non-nil when using cgo.
+//
+//go:linkname _cgo_munmap _cgo_munmap
+var _cgo_munmap unsafe.Pointer
+
+// mmap is used to route the mmap system call through C code when using cgo, to
+// support sanitizer interceptors. Don't allow stack splits, since this function
+// (used by sysAlloc) is called in a lot of low-level parts of the runtime and
+// callers often assume it won't acquire any locks.
+//
+//go:nosplit
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
+ if _cgo_mmap != nil {
+ // Make ret a uintptr so that writing to it in the
+ // function literal does not trigger a write barrier.
+ // A write barrier here could break because of the way
+ // that mmap uses the same value both as a pointer and
+ // an errno value.
+ var ret uintptr
+ systemstack(func() {
+ ret = callCgoMmap(addr, n, prot, flags, fd, off)
+ })
+ if ret < 4096 {
+ return nil, int(ret)
+ }
+ return unsafe.Pointer(ret), 0
+ }
+ return sysMmap(addr, n, prot, flags, fd, off)
+}
+
+func munmap(addr unsafe.Pointer, n uintptr) {
+ if _cgo_munmap != nil {
+ systemstack(func() { callCgoMunmap(addr, n) })
+ return
+ }
+ sysMunmap(addr, n)
+}
+
+// sysMmap calls the mmap system call. It is implemented in assembly.
+func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
+
+// callCgoMmap calls the mmap function in the runtime/cgo package
+// using the GCC calling convention. It is implemented in assembly.
+func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
+
+// sysMunmap calls the munmap system call. It is implemented in assembly.
+func sysMunmap(addr unsafe.Pointer, n uintptr)
+
+// callCgoMunmap calls the munmap function in the runtime/cgo package
+// using the GCC calling convention. It is implemented in assembly.
+func callCgoMunmap(addr unsafe.Pointer, n uintptr)
diff --git a/contrib/go/_std_1.19/src/runtime/cgo_sigaction.go b/contrib/go/_std_1.19/src/runtime/cgo_sigaction.go
new file mode 100644
index 0000000000..9500c52205
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgo_sigaction.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Support for sanitizers. See runtime/cgo/sigaction.go.
+
+//go:build (linux && amd64) || (freebsd && amd64) || (linux && arm64) || (linux && ppc64le)
+
+package runtime
+
+import "unsafe"
+
+// _cgo_sigaction is filled in by runtime/cgo when it is linked into the
+// program, so it is only non-nil when using cgo.
+//
+//go:linkname _cgo_sigaction _cgo_sigaction
+var _cgo_sigaction unsafe.Pointer
+
+//go:nosplit
+//go:nowritebarrierrec
+func sigaction(sig uint32, new, old *sigactiont) {
+ // racewalk.go avoids adding sanitizing instrumentation to package runtime,
+ // but we might be calling into instrumented C functions here,
+ // so we need the pointer parameters to be properly marked.
+ //
+ // Mark the input as having been written before the call
+ // and the output as read after.
+ if msanenabled && new != nil {
+ msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
+ }
+ if asanenabled && new != nil {
+ asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
+ }
+ if _cgo_sigaction == nil || inForkedChild {
+ sysSigaction(sig, new, old)
+ } else {
+ // We need to call _cgo_sigaction, which means we need a big enough stack
+ // for C. To complicate matters, we may be in libpreinit (before the
+ // runtime has been initialized) or in an asynchronous signal handler (with
+ // the current thread in transition between goroutines, or with the g0
+ // system stack already in use).
+
+ var ret int32
+
+ var g *g
+ if mainStarted {
+ g = getg()
+ }
+ sp := uintptr(unsafe.Pointer(&sig))
+ switch {
+ case g == nil:
+ // No g: we're on a C stack or a signal stack.
+ ret = callCgoSigaction(uintptr(sig), new, old)
+ case sp < g.stack.lo || sp >= g.stack.hi:
+ // We're no longer on g's stack, so we must be handling a signal. It's
+ // possible that we interrupted the thread during a transition between g
+ // and g0, so we should stay on the current stack to avoid corrupting g0.
+ ret = callCgoSigaction(uintptr(sig), new, old)
+ default:
+ // We're running on g's stack, so either we're not in a signal handler or
+ // the signal handler has set the correct g. If we're on gsignal or g0,
+ // systemstack will make the call directly; otherwise, it will switch to
+ // g0 to ensure we have enough room to call a libc function.
+ //
+ // The function literal that we pass to systemstack is not nosplit, but
+ // that's ok: we'll be running on a fresh, clean system stack so the stack
+ // check will always succeed anyway.
+ systemstack(func() {
+ ret = callCgoSigaction(uintptr(sig), new, old)
+ })
+ }
+
+ const EINVAL = 22
+ if ret == EINVAL {
+ // libc reserves certain signals — normally 32-33 — for pthreads, and
+ // returns EINVAL for sigaction calls on those signals. If we get EINVAL,
+ // fall back to making the syscall directly.
+ sysSigaction(sig, new, old)
+ }
+ }
+
+ if msanenabled && old != nil {
+ msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
+ }
+ if asanenabled && old != nil {
+ asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
+ }
+}
+
+// callCgoSigaction calls the sigaction function in the runtime/cgo package
+// using the GCC calling convention. It is implemented in assembly.
+//
+//go:noescape
+func callCgoSigaction(sig uintptr, new, old *sigactiont) int32
diff --git a/contrib/go/_std_1.19/src/runtime/cgocall.go b/contrib/go/_std_1.19/src/runtime/cgocall.go
new file mode 100644
index 0000000000..892654ed5b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgocall.go
@@ -0,0 +1,643 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Cgo call and callback support.
+//
+// To call into the C function f from Go, the cgo-generated code calls
+// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
+// gcc-compiled function written by cgo.
+//
+// runtime.cgocall (below) calls entersyscall so as not to block
+// other goroutines or the garbage collector, and then calls
+// runtime.asmcgocall(_cgo_Cfunc_f, frame).
+//
+// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
+// (assumed to be an operating system-allocated stack, so safe to run
+// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
+//
+// _cgo_Cfunc_f invokes the actual C function f with arguments
+// taken from the frame structure, records the results in the frame,
+// and returns to runtime.asmcgocall.
+//
+// After it regains control, runtime.asmcgocall switches back to the
+// original g (m->curg)'s stack and returns to runtime.cgocall.
+//
+// After it regains control, runtime.cgocall calls exitsyscall, which blocks
+// until this m can run Go code without violating the $GOMAXPROCS limit,
+// and then unlocks g from m.
+//
+// The above description skipped over the possibility of the gcc-compiled
+// function f calling back into Go. If that happens, we continue down
+// the rabbit hole during the execution of f.
+//
+// To make it possible for gcc-compiled C code to call a Go function p.GoF,
+// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
+// know about packages). The gcc-compiled C function f calls GoF.
+//
+// GoF initializes "frame", a structure containing all of its
+// arguments and slots for p.GoF's results. It calls
+// crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI.
+//
+// crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from
+// the gcc function call ABI to the gc function call ABI. At this
+// point we're in the Go runtime, but we're still running on m.g0's
+// stack and outside the $GOMAXPROCS limit. crosscall2 calls
+// runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI.
+// (crosscall2's framesize argument is no longer used, but there's one
+// case where SWIG calls crosscall2 directly and expects to pass this
+// argument. See _cgo_panic.)
+//
+// runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack
+// to the original g (m.curg)'s stack, on which it calls
+// runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the
+// stack switch, runtime.cgocallback saves the current SP as
+// m.g0.sched.sp, so that any use of m.g0's stack during the execution
+// of the callback will be done below the existing stack frames.
+// Before overwriting m.g0.sched.sp, it pushes the old value on the
+// m.g0 stack, so that it can be restored later.
+//
+// runtime.cgocallbackg (below) is now running on a real goroutine
+// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will
+// block until the $GOMAXPROCS limit allows running this goroutine.
+// Once exitsyscall has returned, it is safe to do things like call the memory
+// allocator or invoke the Go callback function. runtime.cgocallbackg
+// first defers a function to unwind m.g0.sched.sp, so that if p.GoF
+// panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack
+// and the m.curg stack will be unwound in lock step.
+// Then it calls _cgoexp_GoF(frame).
+//
+// _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments
+// from frame, calls p.GoF, writes the results back to frame, and
+// returns. Now we start unwinding this whole process.
+//
+// runtime.cgocallbackg pops but does not execute the deferred
+// function to unwind m.g0.sched.sp, calls runtime.entersyscall, and
+// returns to runtime.cgocallback.
+//
+// After it regains control, runtime.cgocallback switches back to
+// m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old
+// m.g0.sched.sp value from the stack, and returns to crosscall2.
+//
+// crosscall2 restores the callee-save registers for gcc and returns
+// to GoF, which unpacks any result values and returns to f.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Addresses collected in a cgo backtrace when crashing.
+// Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
+type cgoCallers [32]uintptr
+
+// argset matches runtime/cgo/linux_syscall.c:argset_t
+type argset struct {
+ args unsafe.Pointer
+ retval uintptr
+}
+
+// wrapper for syscall package to call cgocall for libc (cgo) calls.
+//
+//go:linkname syscall_cgocaller syscall.cgocaller
+//go:nosplit
+//go:uintptrescapes
+func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr {
+ as := argset{args: unsafe.Pointer(&args[0])}
+ cgocall(fn, unsafe.Pointer(&as))
+ return as.retval
+}
+
+var ncgocall uint64 // number of cgo calls in total for dead m
+
+// Call from Go to C.
+//
+// This must be nosplit because it's used for syscalls on some
+// platforms. Syscalls may have untyped arguments on the stack, so
+// it's not safe to grow or scan the stack.
+//
+//go:nosplit
+func cgocall(fn, arg unsafe.Pointer) int32 {
+ if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
+ throw("cgocall unavailable")
+ }
+
+ if fn == nil {
+ throw("cgocall nil")
+ }
+
+ if raceenabled {
+ racereleasemerge(unsafe.Pointer(&racecgosync))
+ }
+
+ mp := getg().m
+ mp.ncgocall++
+ mp.ncgo++
+
+ // Reset traceback.
+ mp.cgoCallers[0] = 0
+
+ // Announce we are entering a system call
+ // so that the scheduler knows to create another
+ // M to run goroutines while we are in the
+ // foreign code.
+ //
+ // The call to asmcgocall is guaranteed not to
+ // grow the stack and does not allocate memory,
+ // so it is safe to call while "in a system call", outside
+ // the $GOMAXPROCS accounting.
+ //
+ // fn may call back into Go code, in which case we'll exit the
+ // "system call", run the Go code (which may grow the stack),
+ // and then re-enter the "system call" reusing the PC and SP
+ // saved by entersyscall here.
+ entersyscall()
+
+ // Tell asynchronous preemption that we're entering external
+ // code. We do this after entersyscall because this may block
+ // and cause an async preemption to fail, but at this point a
+ // sync preemption will succeed (though this is not a matter
+ // of correctness).
+ osPreemptExtEnter(mp)
+
+ mp.incgo = true
+ errno := asmcgocall(fn, arg)
+
+ // Update accounting before exitsyscall because exitsyscall may
+ // reschedule us on to a different M.
+ mp.incgo = false
+ mp.ncgo--
+
+ osPreemptExtExit(mp)
+
+ exitsyscall()
+
+ // Note that raceacquire must be called only after exitsyscall has
+ // wired this M to a P.
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&racecgosync))
+ }
+
+ // From the garbage collector's perspective, time can move
+ // backwards in the sequence above. If there's a callback into
+ // Go code, GC will see this function at the call to
+ // asmcgocall. When the Go call later returns to C, the
+ // syscall PC/SP is rolled back and the GC sees this function
+ // back at the call to entersyscall. Normally, fn and arg
+ // would be live at entersyscall and dead at asmcgocall, so if
+ // time moved backwards, GC would see these arguments as dead
+ // and then live. Prevent these undead arguments from crashing
+ // GC by forcing them to stay live across this time warp.
+ KeepAlive(fn)
+ KeepAlive(arg)
+ KeepAlive(mp)
+
+ return errno
+}
+
+// Call from C back to Go. fn must point to an ABIInternal Go entry-point.
+//
+//go:nosplit
+func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
+ gp := getg()
+ if gp != gp.m.curg {
+ println("runtime: bad g in cgocallback")
+ exit(2)
+ }
+
+ // The call from C is on gp.m's g0 stack, so we must ensure
+ // that we stay on that M. We have to do this before calling
+ // exitsyscall, since it would otherwise be free to move us to
+ // a different M. The call to unlockOSThread is in unwindm.
+ lockOSThread()
+
+ checkm := gp.m
+
+ // Save current syscall parameters, so m.syscall can be
+ // used again if callback decide to make syscall.
+ syscall := gp.m.syscall
+
+ // entersyscall saves the caller's SP to allow the GC to trace the Go
+ // stack. However, since we're returning to an earlier stack frame and
+ // need to pair with the entersyscall() call made by cgocall, we must
+ // save syscall* and let reentersyscall restore them.
+ savedsp := unsafe.Pointer(gp.syscallsp)
+ savedpc := gp.syscallpc
+ exitsyscall() // coming out of cgo call
+ gp.m.incgo = false
+
+ osPreemptExtExit(gp.m)
+
+ cgocallbackg1(fn, frame, ctxt) // will call unlockOSThread
+
+ // At this point unlockOSThread has been called.
+ // The following code must not change to a different m.
+ // This is enforced by checking incgo in the schedule function.
+
+ gp.m.incgo = true
+
+ if gp.m != checkm {
+ throw("m changed unexpectedly in cgocallbackg")
+ }
+
+ osPreemptExtEnter(gp.m)
+
+ // going back to cgo call
+ reentersyscall(savedpc, uintptr(savedsp))
+
+ gp.m.syscall = syscall
+}
+
+func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
+ gp := getg()
+
+ // When we return, undo the call to lockOSThread in cgocallbackg.
+ // We must still stay on the same m.
+ defer unlockOSThread()
+
+ if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
+ gp.m.needextram = false
+ systemstack(newextram)
+ }
+
+ if ctxt != 0 {
+ s := append(gp.cgoCtxt, ctxt)
+
+ // Now we need to set gp.cgoCtxt = s, but we could get
+ // a SIGPROF signal while manipulating the slice, and
+ // the SIGPROF handler could pick up gp.cgoCtxt while
+ // tracing up the stack. We need to ensure that the
+ // handler always sees a valid slice, so set the
+ // values in an order such that it always does.
+ p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
+ atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
+ p.cap = cap(s)
+ p.len = len(s)
+
+ defer func(gp *g) {
+ // Decrease the length of the slice by one, safely.
+ p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
+ p.len--
+ }(gp)
+ }
+
+ if gp.m.ncgo == 0 {
+ // The C call to Go came from a thread not currently running
+ // any Go. In the case of -buildmode=c-archive or c-shared,
+ // this call may be coming in before package initialization
+ // is complete. Wait until it is.
+ <-main_init_done
+ }
+
+ // Check whether the profiler needs to be turned on or off; this route to
+ // run Go code does not use runtime.execute, so bypasses the check there.
+ hz := sched.profilehz
+ if gp.m.profilehz != hz {
+ setThreadCPUProfiler(hz)
+ }
+
+ // Add entry to defer stack in case of panic.
+ restore := true
+ defer unwindm(&restore)
+
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&racecgosync))
+ }
+
+ // Invoke callback. This function is generated by cmd/cgo and
+ // will unpack the argument frame and call the Go function.
+ var cb func(frame unsafe.Pointer)
+ cbFV := funcval{uintptr(fn)}
+ *(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV))
+ cb(frame)
+
+ if raceenabled {
+ racereleasemerge(unsafe.Pointer(&racecgosync))
+ }
+
+ // Do not unwind m->g0->sched.sp.
+ // Our caller, cgocallback, will do that.
+ restore = false
+}
+
+func unwindm(restore *bool) {
+ if *restore {
+ // Restore sp saved by cgocallback during
+ // unwind of g's stack (see comment at top of file).
+ mp := acquirem()
+ sched := &mp.g0.sched
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
+
+ // Do the accounting that cgocall will not have a chance to do
+ // during an unwind.
+ //
+ // In the case where a Go call originates from C, ncgo is 0
+ // and there is no matching cgocall to end.
+ if mp.ncgo > 0 {
+ mp.incgo = false
+ mp.ncgo--
+ osPreemptExtExit(mp)
+ }
+
+ releasem(mp)
+ }
+}
+
+// called from assembly
+func badcgocallback() {
+ throw("misaligned stack in cgocallback")
+}
+
+// called from (incomplete) assembly
+func cgounimpl() {
+ throw("cgo not implemented")
+}
+
+var racecgosync uint64 // represents possible synchronization in C code
+
+// Pointer checking for cgo code.
+
+// We want to detect all cases where a program that does not use
+// unsafe makes a cgo call passing a Go pointer to memory that
+// contains a Go pointer. Here a Go pointer is defined as a pointer
+// to memory allocated by the Go runtime. Programs that use unsafe
+// can evade this restriction easily, so we don't try to catch them.
+// The cgo program will rewrite all possibly bad pointer arguments to
+// call cgoCheckPointer, where we can catch cases of a Go pointer
+// pointing to a Go pointer.
+
+// Complicating matters, taking the address of a slice or array
+// element permits the C program to access all elements of the slice
+// or array. In that case we will see a pointer to a single element,
+// but we need to check the entire data structure.
+
+// The cgoCheckPointer call takes additional arguments indicating that
+// it was called on an address expression. An additional argument of
+// true means that it only needs to check a single element. An
+// additional argument of a slice or array means that it needs to
+// check the entire slice/array, but nothing else. Otherwise, the
+// pointer could be anything, and we check the entire heap object,
+// which is conservative but safe.
+
+// When and if we implement a moving garbage collector,
+// cgoCheckPointer will pin the pointer for the duration of the cgo
+// call. (This is necessary but not sufficient; the cgo program will
+// also have to change to pin Go pointers that cannot point to Go
+// pointers.)
+
+// cgoCheckPointer checks if the argument contains a Go pointer that
+// points to a Go pointer, and panics if it does.
+func cgoCheckPointer(ptr any, arg any) {
+ if debug.cgocheck == 0 {
+ return
+ }
+
+ ep := efaceOf(&ptr)
+ t := ep._type
+
+ top := true
+ if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
+ p := ep.data
+ if t.kind&kindDirectIface == 0 {
+ p = *(*unsafe.Pointer)(p)
+ }
+ if p == nil || !cgoIsGoPointer(p) {
+ return
+ }
+ aep := efaceOf(&arg)
+ switch aep._type.kind & kindMask {
+ case kindBool:
+ if t.kind&kindMask == kindUnsafePointer {
+ // We don't know the type of the element.
+ break
+ }
+ pt := (*ptrtype)(unsafe.Pointer(t))
+ cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
+ return
+ case kindSlice:
+ // Check the slice rather than the pointer.
+ ep = aep
+ t = ep._type
+ case kindArray:
+ // Check the array rather than the pointer.
+ // Pass top as false since we have a pointer
+ // to the array.
+ ep = aep
+ t = ep._type
+ top = false
+ default:
+ throw("can't happen")
+ }
+ }
+
+ cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
+}
+
+const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
+const cgoResultFail = "cgo result has Go pointer"
+
+// cgoCheckArg is the real work of cgoCheckPointer. The argument p
+// is either a pointer to the value (of type t), or the value itself,
+// depending on indir. The top parameter is whether we are at the top
+// level, where Go pointers are allowed.
+func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
+ if t.ptrdata == 0 || p == nil {
+ // If the type has no pointers there is nothing to do.
+ return
+ }
+
+ switch t.kind & kindMask {
+ default:
+ throw("can't happen")
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(t))
+ if !indir {
+ if at.len != 1 {
+ throw("can't happen")
+ }
+ cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
+ return
+ }
+ for i := uintptr(0); i < at.len; i++ {
+ cgoCheckArg(at.elem, p, true, top, msg)
+ p = add(p, at.elem.size)
+ }
+ case kindChan, kindMap:
+ // These types contain internal pointers that will
+ // always be allocated in the Go heap. It's never OK
+ // to pass them to C.
+ panic(errorString(msg))
+ case kindFunc:
+ if indir {
+ p = *(*unsafe.Pointer)(p)
+ }
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ panic(errorString(msg))
+ case kindInterface:
+ it := *(**_type)(p)
+ if it == nil {
+ return
+ }
+ // A type known at compile time is OK since it's
+ // constant. A type not known at compile time will be
+ // in the heap and will not be OK.
+ if inheap(uintptr(unsafe.Pointer(it))) {
+ panic(errorString(msg))
+ }
+ p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+ cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
+ case kindSlice:
+ st := (*slicetype)(unsafe.Pointer(t))
+ s := (*slice)(p)
+ p = s.array
+ if p == nil || !cgoIsGoPointer(p) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+ if st.elem.ptrdata == 0 {
+ return
+ }
+ for i := 0; i < s.cap; i++ {
+ cgoCheckArg(st.elem, p, true, false, msg)
+ p = add(p, st.elem.size)
+ }
+ case kindString:
+ ss := (*stringStruct)(p)
+ if !cgoIsGoPointer(ss.str) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t))
+ if !indir {
+ if len(st.fields) != 1 {
+ throw("can't happen")
+ }
+ cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
+ return
+ }
+ for _, f := range st.fields {
+ if f.typ.ptrdata == 0 {
+ continue
+ }
+ cgoCheckArg(f.typ, add(p, f.offset), true, top, msg)
+ }
+ case kindPtr, kindUnsafePointer:
+ if indir {
+ p = *(*unsafe.Pointer)(p)
+ if p == nil {
+ return
+ }
+ }
+
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+
+ cgoCheckUnknownPointer(p, msg)
+ }
+}
+
+// cgoCheckUnknownPointer is called for an arbitrary pointer into Go
+// memory. It checks whether that Go memory contains any other
+// pointer into Go memory. If it does, we panic.
+// The return values are unused but useful to see in panic tracebacks.
+func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
+ if inheap(uintptr(p)) {
+ b, span, _ := findObject(uintptr(p), 0, 0)
+ base = b
+ if base == 0 {
+ return
+ }
+ hbits := heapBitsForAddr(base)
+ n := span.elemsize
+ for i = uintptr(0); i < n; i += goarch.PtrSize {
+ if !hbits.morePointers() {
+ // No more possible pointers.
+ break
+ }
+ if hbits.isPointer() && cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
+ panic(errorString(msg))
+ }
+ hbits = hbits.next()
+ }
+
+ return
+ }
+
+ for _, datap := range activeModules() {
+ if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
+ // We have no way to know the size of the object.
+ // We have to assume that it might contain a pointer.
+ panic(errorString(msg))
+ }
+ // In the text or noptr sections, we know that the
+ // pointer does not point to a Go pointer.
+ }
+
+ return
+}
+
+// cgoIsGoPointer reports whether the pointer is a Go pointer--a
+// pointer to Go memory. We only care about Go memory that might
+// contain pointers.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func cgoIsGoPointer(p unsafe.Pointer) bool {
+ if p == nil {
+ return false
+ }
+
+ if inHeapOrStack(uintptr(p)) {
+ return true
+ }
+
+ for _, datap := range activeModules() {
+ if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// cgoInRange reports whether p is between start and end.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
+ return start <= uintptr(p) && uintptr(p) < end
+}
+
+// cgoCheckResult is called to check the result parameter of an
+// exported Go function. It panics if the result is or contains a Go
+// pointer.
+func cgoCheckResult(val any) {
+ if debug.cgocheck == 0 {
+ return
+ }
+
+ ep := efaceOf(&val)
+ t := ep._type
+ cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)
+}
diff --git a/contrib/go/_std_1.18/src/runtime/cgocallback.go b/contrib/go/_std_1.19/src/runtime/cgocallback.go
index 59953f1cee..59953f1cee 100644
--- a/contrib/go/_std_1.18/src/runtime/cgocallback.go
+++ b/contrib/go/_std_1.19/src/runtime/cgocallback.go
diff --git a/contrib/go/_std_1.19/src/runtime/cgocheck.go b/contrib/go/_std_1.19/src/runtime/cgocheck.go
new file mode 100644
index 0000000000..74a2ec09bc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cgocheck.go
@@ -0,0 +1,268 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code to check that pointer writes follow the cgo rules.
+// These functions are invoked via the write barrier when debug.cgocheck > 1.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "unsafe"
+)
+
+const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
+
+// cgoCheckWriteBarrier is called whenever a pointer is stored into memory.
+// It throws if the program is storing a Go pointer into non-Go memory.
+//
+// This is called from the write barrier, so its entire call tree must
+// be nosplit.
+//
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
+ if !cgoIsGoPointer(unsafe.Pointer(src)) {
+ return
+ }
+ if cgoIsGoPointer(unsafe.Pointer(dst)) {
+ return
+ }
+
+ // If we are running on the system stack then dst might be an
+ // address on the stack, which is OK.
+ g := getg()
+ if g == g.m.g0 || g == g.m.gsignal {
+ return
+ }
+
+ // Allocating memory can write to various mfixalloc structs
+ // that look like they are non-Go memory.
+ if g.m.mallocing != 0 {
+ return
+ }
+
+ // It's OK if writing to memory allocated by persistentalloc.
+ // Do this check last because it is more expensive and rarely true.
+ // If it is false the expense doesn't matter since we are crashing.
+ if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) {
+ return
+ }
+
+ systemstack(func() {
+ println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
+ throw(cgoWriteBarrierFail)
+ })
+}
+
+// cgoCheckMemmove is called when moving a block of memory.
+// dst and src point off bytes into the value to copy.
+// size is the number of bytes to copy.
+// It throws if the program is copying a block that contains a Go pointer
+// into non-Go memory.
+//
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
+ if typ.ptrdata == 0 {
+ return
+ }
+ if !cgoIsGoPointer(src) {
+ return
+ }
+ if cgoIsGoPointer(dst) {
+ return
+ }
+ cgoCheckTypedBlock(typ, src, off, size)
+}
+
+// cgoCheckSliceCopy is called when copying n elements of a slice.
+// src and dst are pointers to the first element of the slice.
+// typ is the element type of the slice.
+// It throws if the program is copying slice elements that contain Go pointers
+// into non-Go memory.
+//
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
+ if typ.ptrdata == 0 {
+ return
+ }
+ if !cgoIsGoPointer(src) {
+ return
+ }
+ if cgoIsGoPointer(dst) {
+ return
+ }
+ p := src
+ for i := 0; i < n; i++ {
+ cgoCheckTypedBlock(typ, p, 0, typ.size)
+ p = add(p, typ.size)
+ }
+}
+
+// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
+// and throws if it finds a Go pointer. The type of the memory is typ,
+// and src is off bytes into that type.
+//
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
+ // Anything past typ.ptrdata is not a pointer.
+ if typ.ptrdata <= off {
+ return
+ }
+ if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
+ size = ptrdataSize
+ }
+
+ if typ.kind&kindGCProg == 0 {
+ cgoCheckBits(src, typ.gcdata, off, size)
+ return
+ }
+
+ // The type has a GC program. Try to find GC bits somewhere else.
+ for _, datap := range activeModules() {
+ if cgoInRange(src, datap.data, datap.edata) {
+ doff := uintptr(src) - datap.data
+ cgoCheckBits(add(src, -doff), datap.gcdatamask.bytedata, off+doff, size)
+ return
+ }
+ if cgoInRange(src, datap.bss, datap.ebss) {
+ boff := uintptr(src) - datap.bss
+ cgoCheckBits(add(src, -boff), datap.gcbssmask.bytedata, off+boff, size)
+ return
+ }
+ }
+
+ s := spanOfUnchecked(uintptr(src))
+ if s.state.get() == mSpanManual {
+ // There are no heap bits for value stored on the stack.
+ // For a channel receive src might be on the stack of some
+ // other goroutine, so we can't unwind the stack even if
+ // we wanted to.
+ // We can't expand the GC program without extra storage
+ // space we can't easily get.
+ // Fortunately we have the type information.
+ systemstack(func() {
+ cgoCheckUsingType(typ, src, off, size)
+ })
+ return
+ }
+
+ // src must be in the regular heap.
+
+ hbits := heapBitsForAddr(uintptr(src))
+ for i := uintptr(0); i < off+size; i += goarch.PtrSize {
+ bits := hbits.bits()
+ if i >= off && bits&bitPointer != 0 {
+ v := *(*unsafe.Pointer)(add(src, i))
+ if cgoIsGoPointer(v) {
+ throw(cgoWriteBarrierFail)
+ }
+ }
+ hbits = hbits.next()
+ }
+}
+
+// cgoCheckBits checks the block of memory at src, for up to size
+// bytes, and throws if it finds a Go pointer. The gcbits mark each
+// pointer value. The src pointer is off bytes into the gcbits.
+//
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
+ skipMask := off / goarch.PtrSize / 8
+ skipBytes := skipMask * goarch.PtrSize * 8
+ ptrmask := addb(gcbits, skipMask)
+ src = add(src, skipBytes)
+ off -= skipBytes
+ size += off
+ var bits uint32
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
+ bits = uint32(*ptrmask)
+ ptrmask = addb(ptrmask, 1)
+ } else {
+ bits >>= 1
+ }
+ if off > 0 {
+ off -= goarch.PtrSize
+ } else {
+ if bits&1 != 0 {
+ v := *(*unsafe.Pointer)(add(src, i))
+ if cgoIsGoPointer(v) {
+ throw(cgoWriteBarrierFail)
+ }
+ }
+ }
+ }
+}
+
+// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
+// fall back to look for pointers in src using the type information.
+// We only use this when looking at a value on the stack when the type
+// uses a GC program, because otherwise it's more efficient to use the
+// GC bits. This is called on the system stack.
+//
+//go:nowritebarrier
+//go:systemstack
+func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
+ if typ.ptrdata == 0 {
+ return
+ }
+
+ // Anything past typ.ptrdata is not a pointer.
+ if typ.ptrdata <= off {
+ return
+ }
+ if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
+ size = ptrdataSize
+ }
+
+ if typ.kind&kindGCProg == 0 {
+ cgoCheckBits(src, typ.gcdata, off, size)
+ return
+ }
+ switch typ.kind & kindMask {
+ default:
+ throw("can't happen")
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(typ))
+ for i := uintptr(0); i < at.len; i++ {
+ if off < at.elem.size {
+ cgoCheckUsingType(at.elem, src, off, size)
+ }
+ src = add(src, at.elem.size)
+ skipped := off
+ if skipped > at.elem.size {
+ skipped = at.elem.size
+ }
+ checked := at.elem.size - skipped
+ off -= skipped
+ if size <= checked {
+ return
+ }
+ size -= checked
+ }
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(typ))
+ for _, f := range st.fields {
+ if off < f.typ.size {
+ cgoCheckUsingType(f.typ, src, off, size)
+ }
+ src = add(src, f.typ.size)
+ skipped := off
+ if skipped > f.typ.size {
+ skipped = f.typ.size
+ }
+ checked := f.typ.size - skipped
+ off -= skipped
+ if size <= checked {
+ return
+ }
+ size -= checked
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/chan.go b/contrib/go/_std_1.19/src/runtime/chan.go
new file mode 100644
index 0000000000..ca516ad9e8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/chan.go
@@ -0,0 +1,851 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// This file contains the implementation of Go channels.
+
+// Invariants:
+// At least one of c.sendq and c.recvq is empty,
+// except for the case of an unbuffered channel with a single goroutine
+// blocked on it for both sending and receiving using a select statement,
+// in which case the length of c.sendq and c.recvq is limited only by the
+// size of the select statement.
+//
+// For buffered channels, also:
+// c.qcount > 0 implies that c.recvq is empty.
+// c.qcount < c.dataqsiz implies that c.sendq is empty.
+
+import (
+ "internal/abi"
+ "runtime/internal/atomic"
+ "runtime/internal/math"
+ "unsafe"
+)
+
+const (
+ maxAlign = 8
+ hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
+ debugChan = false
+)
+
+type hchan struct {
+ qcount uint // total data in the queue
+ dataqsiz uint // size of the circular queue
+ buf unsafe.Pointer // points to an array of dataqsiz elements
+ elemsize uint16
+ closed uint32
+ elemtype *_type // element type
+ sendx uint // send index
+ recvx uint // receive index
+ recvq waitq // list of recv waiters
+ sendq waitq // list of send waiters
+
+ // lock protects all fields in hchan, as well as several
+ // fields in sudogs blocked on this channel.
+ //
+ // Do not change another G's status while holding this lock
+ // (in particular, do not ready a G), as this can deadlock
+ // with stack shrinking.
+ lock mutex
+}
+
+type waitq struct {
+ first *sudog
+ last *sudog
+}
+
+//go:linkname reflect_makechan reflect.makechan
+func reflect_makechan(t *chantype, size int) *hchan {
+ return makechan(t, size)
+}
+
+func makechan64(t *chantype, size int64) *hchan {
+ if int64(int(size)) != size {
+ panic(plainError("makechan: size out of range"))
+ }
+
+ return makechan(t, int(size))
+}
+
+func makechan(t *chantype, size int) *hchan {
+ elem := t.elem
+
+ // compiler checks this but be safe.
+ if elem.size >= 1<<16 {
+ throw("makechan: invalid channel element type")
+ }
+ if hchanSize%maxAlign != 0 || elem.align > maxAlign {
+ throw("makechan: bad alignment")
+ }
+
+ mem, overflow := math.MulUintptr(elem.size, uintptr(size))
+ if overflow || mem > maxAlloc-hchanSize || size < 0 {
+ panic(plainError("makechan: size out of range"))
+ }
+
+ // Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
+ // buf points into the same allocation, elemtype is persistent.
+ // SudoG's are referenced from their owning thread so they can't be collected.
+ // TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
+ var c *hchan
+ switch {
+ case mem == 0:
+ // Queue or element size is zero.
+ c = (*hchan)(mallocgc(hchanSize, nil, true))
+ // Race detector uses this location for synchronization.
+ c.buf = c.raceaddr()
+ case elem.ptrdata == 0:
+ // Elements do not contain pointers.
+ // Allocate hchan and buf in one call.
+ c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
+ c.buf = add(unsafe.Pointer(c), hchanSize)
+ default:
+ // Elements contain pointers.
+ c = new(hchan)
+ c.buf = mallocgc(mem, elem, true)
+ }
+
+ c.elemsize = uint16(elem.size)
+ c.elemtype = elem
+ c.dataqsiz = uint(size)
+ lockInit(&c.lock, lockRankHchan)
+
+ if debugChan {
+ print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
+ }
+ return c
+}
+
+// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+func chanbuf(c *hchan, i uint) unsafe.Pointer {
+ return add(c.buf, uintptr(i)*uintptr(c.elemsize))
+}
+
+// full reports whether a send on c would block (that is, the channel is full).
+// It uses a single word-sized read of mutable state, so although
+// the answer is instantaneously true, the correct answer may have changed
+// by the time the calling function receives the return value.
+func full(c *hchan) bool {
+ // c.dataqsiz is immutable (never written after the channel is created)
+ // so it is safe to read at any time during channel operation.
+ if c.dataqsiz == 0 {
+ // Assumes that a pointer read is relaxed-atomic.
+ return c.recvq.first == nil
+ }
+ // Assumes that a uint read is relaxed-atomic.
+ return c.qcount == c.dataqsiz
+}
+
+// entry point for c <- x from compiled code
+//
+//go:nosplit
+func chansend1(c *hchan, elem unsafe.Pointer) {
+ chansend(c, elem, true, getcallerpc())
+}
+
+/*
+ * generic single channel send/recv
+ * If block is not nil,
+ * then the protocol will not
+ * sleep but return if it could
+ * not complete.
+ *
+ * sleep can wake up with g.param == nil
+ * when a channel involved in the sleep has
+ * been closed. it is easiest to loop and re-run
+ * the operation; we'll see that it's now closed.
+ */
+func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
+ if c == nil {
+ if !block {
+ return false
+ }
+ gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
+ throw("unreachable")
+ }
+
+ if debugChan {
+ print("chansend: chan=", c, "\n")
+ }
+
+ if raceenabled {
+ racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
+ }
+
+ // Fast path: check for failed non-blocking operation without acquiring the lock.
+ //
+ // After observing that the channel is not closed, we observe that the channel is
+ // not ready for sending. Each of these observations is a single word-sized read
+ // (first c.closed and second full()).
+ // Because a closed channel cannot transition from 'ready for sending' to
+ // 'not ready for sending', even if the channel is closed between the two observations,
+ // they imply a moment between the two when the channel was both not yet closed
+ // and not ready for sending. We behave as if we observed the channel at that moment,
+ // and report that the send cannot proceed.
+ //
+ // It is okay if the reads are reordered here: if we observe that the channel is not
+ // ready for sending and then observe that it is not closed, that implies that the
+ // channel wasn't closed during the first observation. However, nothing here
+ // guarantees forward progress. We rely on the side effects of lock release in
+ // chanrecv() and closechan() to update this thread's view of c.closed and full().
+ if !block && c.closed == 0 && full(c) {
+ return false
+ }
+
+ var t0 int64
+ if blockprofilerate > 0 {
+ t0 = cputicks()
+ }
+
+ lock(&c.lock)
+
+ if c.closed != 0 {
+ unlock(&c.lock)
+ panic(plainError("send on closed channel"))
+ }
+
+ if sg := c.recvq.dequeue(); sg != nil {
+ // Found a waiting receiver. We pass the value we want to send
+ // directly to the receiver, bypassing the channel buffer (if any).
+ send(c, sg, ep, func() { unlock(&c.lock) }, 3)
+ return true
+ }
+
+ if c.qcount < c.dataqsiz {
+ // Space is available in the channel buffer. Enqueue the element to send.
+ qp := chanbuf(c, c.sendx)
+ if raceenabled {
+ racenotify(c, c.sendx, nil)
+ }
+ typedmemmove(c.elemtype, qp, ep)
+ c.sendx++
+ if c.sendx == c.dataqsiz {
+ c.sendx = 0
+ }
+ c.qcount++
+ unlock(&c.lock)
+ return true
+ }
+
+ if !block {
+ unlock(&c.lock)
+ return false
+ }
+
+ // Block on the channel. Some receiver will complete our operation for us.
+ gp := getg()
+ mysg := acquireSudog()
+ mysg.releasetime = 0
+ if t0 != 0 {
+ mysg.releasetime = -1
+ }
+ // No stack splits between assigning elem and enqueuing mysg
+ // on gp.waiting where copystack can find it.
+ mysg.elem = ep
+ mysg.waitlink = nil
+ mysg.g = gp
+ mysg.isSelect = false
+ mysg.c = c
+ gp.waiting = mysg
+ gp.param = nil
+ c.sendq.enqueue(mysg)
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
+ gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
+ // Ensure the value being sent is kept alive until the
+ // receiver copies it out. The sudog has a pointer to the
+ // stack object, but sudogs aren't considered as roots of the
+ // stack tracer.
+ KeepAlive(ep)
+
+ // someone woke us up.
+ if mysg != gp.waiting {
+ throw("G waiting list is corrupted")
+ }
+ gp.waiting = nil
+ gp.activeStackChans = false
+ closed := !mysg.success
+ gp.param = nil
+ if mysg.releasetime > 0 {
+ blockevent(mysg.releasetime-t0, 2)
+ }
+ mysg.c = nil
+ releaseSudog(mysg)
+ if closed {
+ if c.closed == 0 {
+ throw("chansend: spurious wakeup")
+ }
+ panic(plainError("send on closed channel"))
+ }
+ return true
+}
+
+// send processes a send operation on an empty channel c.
+// The value ep sent by the sender is copied to the receiver sg.
+// The receiver is then woken up to go on its merry way.
+// Channel c must be empty and locked. send unlocks c with unlockf.
+// sg must already be dequeued from c.
+// ep must be non-nil and point to the heap or the caller's stack.
+func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
+ if raceenabled {
+ if c.dataqsiz == 0 {
+ racesync(c, sg)
+ } else {
+ // Pretend we go through the buffer, even though
+ // we copy directly. Note that we need to increment
+ // the head/tail locations only when raceenabled.
+ racenotify(c, c.recvx, nil)
+ racenotify(c, c.recvx, sg)
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
+ }
+ }
+ if sg.elem != nil {
+ sendDirect(c.elemtype, sg, ep)
+ sg.elem = nil
+ }
+ gp := sg.g
+ unlockf()
+ gp.param = unsafe.Pointer(sg)
+ sg.success = true
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ goready(gp, skip+1)
+}
+
+// Sends and receives on unbuffered or empty-buffered channels are the
+// only operations where one running goroutine writes to the stack of
+// another running goroutine. The GC assumes that stack writes only
+// happen when the goroutine is running and are only done by that
+// goroutine. Using a write barrier is sufficient to make up for
+// violating that assumption, but the write barrier has to work.
+// typedmemmove will call bulkBarrierPreWrite, but the target bytes
+// are not in the heap, so that will not help. We arrange to call
+// memmove and typeBitsBulkBarrier instead.
+
+func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
+ // src is on our stack, dst is a slot on another stack.
+
+ // Once we read sg.elem out of sg, it will no longer
+ // be updated if the destination's stack gets copied (shrunk).
+ // So make sure that no preemption points can happen between read & use.
+ dst := sg.elem
+ typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
+ // No need for cgo write barrier checks because dst is always
+ // Go memory.
+ memmove(dst, src, t.size)
+}
+
+func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
+ // dst is on our stack or the heap, src is on another stack.
+ // The channel is locked, so src will not move during this
+ // operation.
+ src := sg.elem
+ typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
+ memmove(dst, src, t.size)
+}
+
+func closechan(c *hchan) {
+ if c == nil {
+ panic(plainError("close of nil channel"))
+ }
+
+ lock(&c.lock)
+ if c.closed != 0 {
+ unlock(&c.lock)
+ panic(plainError("close of closed channel"))
+ }
+
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
+ racerelease(c.raceaddr())
+ }
+
+ c.closed = 1
+
+ var glist gList
+
+ // release all readers
+ for {
+ sg := c.recvq.dequeue()
+ if sg == nil {
+ break
+ }
+ if sg.elem != nil {
+ typedmemclr(c.elemtype, sg.elem)
+ sg.elem = nil
+ }
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ gp := sg.g
+ gp.param = unsafe.Pointer(sg)
+ sg.success = false
+ if raceenabled {
+ raceacquireg(gp, c.raceaddr())
+ }
+ glist.push(gp)
+ }
+
+ // release all writers (they will panic)
+ for {
+ sg := c.sendq.dequeue()
+ if sg == nil {
+ break
+ }
+ sg.elem = nil
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ gp := sg.g
+ gp.param = unsafe.Pointer(sg)
+ sg.success = false
+ if raceenabled {
+ raceacquireg(gp, c.raceaddr())
+ }
+ glist.push(gp)
+ }
+ unlock(&c.lock)
+
+ // Ready all Gs now that we've dropped the channel lock.
+ for !glist.empty() {
+ gp := glist.pop()
+ gp.schedlink = 0
+ goready(gp, 3)
+ }
+}
+
+// empty reports whether a read from c would block (that is, the channel is
+// empty). It uses a single atomic read of mutable state.
+func empty(c *hchan) bool {
+ // c.dataqsiz is immutable.
+ if c.dataqsiz == 0 {
+ return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
+ }
+ return atomic.Loaduint(&c.qcount) == 0
+}
+
+// entry points for <- c from compiled code
+//
+//go:nosplit
+func chanrecv1(c *hchan, elem unsafe.Pointer) {
+ chanrecv(c, elem, true)
+}
+
+//go:nosplit
+func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
+ _, received = chanrecv(c, elem, true)
+ return
+}
+
+// chanrecv receives on channel c and writes the received data to ep.
+// ep may be nil, in which case received data is ignored.
+// If block == false and no elements are available, returns (false, false).
+// Otherwise, if c is closed, zeros *ep and returns (true, false).
+// Otherwise, fills in *ep with an element and returns (true, true).
+// A non-nil ep must point to the heap or the caller's stack.
+func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
+ // raceenabled: don't need to check ep, as it is always on the stack
+ // or is new memory allocated by reflect.
+
+ if debugChan {
+ print("chanrecv: chan=", c, "\n")
+ }
+
+ if c == nil {
+ if !block {
+ return
+ }
+ gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
+ throw("unreachable")
+ }
+
+ // Fast path: check for failed non-blocking operation without acquiring the lock.
+ if !block && empty(c) {
+ // After observing that the channel is not ready for receiving, we observe whether the
+ // channel is closed.
+ //
+ // Reordering of these checks could lead to incorrect behavior when racing with a close.
+ // For example, if the channel was open and not empty, was closed, and then drained,
+ // reordered reads could incorrectly indicate "open and empty". To prevent reordering,
+ // we use atomic loads for both checks, and rely on emptying and closing to happen in
+ // separate critical sections under the same lock. This assumption fails when closing
+ // an unbuffered channel with a blocked send, but that is an error condition anyway.
+ if atomic.Load(&c.closed) == 0 {
+ // Because a channel cannot be reopened, the later observation of the channel
+ // being not closed implies that it was also not closed at the moment of the
+ // first observation. We behave as if we observed the channel at that moment
+ // and report that the receive cannot proceed.
+ return
+ }
+ // The channel is irreversibly closed. Re-check whether the channel has any pending data
+ // to receive, which could have arrived between the empty and closed checks above.
+ // Sequential consistency is also required here, when racing with such a send.
+ if empty(c) {
+ // The channel is irreversibly closed and empty.
+ if raceenabled {
+ raceacquire(c.raceaddr())
+ }
+ if ep != nil {
+ typedmemclr(c.elemtype, ep)
+ }
+ return true, false
+ }
+ }
+
+ var t0 int64
+ if blockprofilerate > 0 {
+ t0 = cputicks()
+ }
+
+ lock(&c.lock)
+
+ if c.closed != 0 {
+ if c.qcount == 0 {
+ if raceenabled {
+ raceacquire(c.raceaddr())
+ }
+ unlock(&c.lock)
+ if ep != nil {
+ typedmemclr(c.elemtype, ep)
+ }
+ return true, false
+ }
+ // The channel has been closed, but the channel's buffer have data.
+ } else {
+ // Just found waiting sender with not closed.
+ if sg := c.sendq.dequeue(); sg != nil {
+ // Found a waiting sender. If buffer is size 0, receive value
+ // directly from sender. Otherwise, receive from head of queue
+ // and add sender's value to the tail of the queue (both map to
+ // the same buffer slot because the queue is full).
+ recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
+ return true, true
+ }
+ }
+
+ if c.qcount > 0 {
+ // Receive directly from queue
+ qp := chanbuf(c, c.recvx)
+ if raceenabled {
+ racenotify(c, c.recvx, nil)
+ }
+ if ep != nil {
+ typedmemmove(c.elemtype, ep, qp)
+ }
+ typedmemclr(c.elemtype, qp)
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.qcount--
+ unlock(&c.lock)
+ return true, true
+ }
+
+ if !block {
+ unlock(&c.lock)
+ return false, false
+ }
+
+ // no sender available: block on this channel.
+ gp := getg()
+ mysg := acquireSudog()
+ mysg.releasetime = 0
+ if t0 != 0 {
+ mysg.releasetime = -1
+ }
+ // No stack splits between assigning elem and enqueuing mysg
+ // on gp.waiting where copystack can find it.
+ mysg.elem = ep
+ mysg.waitlink = nil
+ gp.waiting = mysg
+ mysg.g = gp
+ mysg.isSelect = false
+ mysg.c = c
+ gp.param = nil
+ c.recvq.enqueue(mysg)
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
+ gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
+
+ // someone woke us up
+ if mysg != gp.waiting {
+ throw("G waiting list is corrupted")
+ }
+ gp.waiting = nil
+ gp.activeStackChans = false
+ if mysg.releasetime > 0 {
+ blockevent(mysg.releasetime-t0, 2)
+ }
+ success := mysg.success
+ gp.param = nil
+ mysg.c = nil
+ releaseSudog(mysg)
+ return true, success
+}
+
+// recv processes a receive operation on a full channel c.
+// There are 2 parts:
+// 1. The value sent by the sender sg is put into the channel
+// and the sender is woken up to go on its merry way.
+// 2. The value received by the receiver (the current G) is
+// written to ep.
+//
+// For synchronous channels, both values are the same.
+// For asynchronous channels, the receiver gets its data from
+// the channel buffer and the sender's data is put in the
+// channel buffer.
+// Channel c must be full and locked. recv unlocks c with unlockf.
+// sg must already be dequeued from c.
+// A non-nil ep must point to the heap or the caller's stack.
+func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
+ if c.dataqsiz == 0 {
+ if raceenabled {
+ racesync(c, sg)
+ }
+ if ep != nil {
+ // copy data from sender
+ recvDirect(c.elemtype, sg, ep)
+ }
+ } else {
+ // Queue is full. Take the item at the
+ // head of the queue. Make the sender enqueue
+ // its item at the tail of the queue. Since the
+ // queue is full, those are both the same slot.
+ qp := chanbuf(c, c.recvx)
+ if raceenabled {
+ racenotify(c, c.recvx, nil)
+ racenotify(c, c.recvx, sg)
+ }
+ // copy data from queue to receiver
+ if ep != nil {
+ typedmemmove(c.elemtype, ep, qp)
+ }
+ // copy data from sender to queue
+ typedmemmove(c.elemtype, qp, sg.elem)
+ c.recvx++
+ if c.recvx == c.dataqsiz {
+ c.recvx = 0
+ }
+ c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
+ }
+ sg.elem = nil
+ gp := sg.g
+ unlockf()
+ gp.param = unsafe.Pointer(sg)
+ sg.success = true
+ if sg.releasetime != 0 {
+ sg.releasetime = cputicks()
+ }
+ goready(gp, skip+1)
+}
+
+func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
+ // There are unlocked sudogs that point into gp's stack. Stack
+ // copying must lock the channels of those sudogs.
+ // Set activeStackChans here instead of before we try parking
+ // because we could self-deadlock in stack growth on the
+ // channel lock.
+ gp.activeStackChans = true
+ // Mark that it's safe for stack shrinking to occur now,
+ // because any thread acquiring this G's stack for shrinking
+ // is guaranteed to observe activeStackChans after this store.
+ atomic.Store8(&gp.parkingOnChan, 0)
+ // Make sure we unlock after setting activeStackChans and
+ // unsetting parkingOnChan. The moment we unlock chanLock
+ // we risk gp getting readied by a channel operation and
+ // so gp could continue running before everything before
+ // the unlock is visible (even to gp itself).
+ unlock((*mutex)(chanLock))
+ return true
+}
+
+// compiler implements
+//
+// select {
+// case c <- v:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selectnbsend(c, v) {
+// ... foo
+// } else {
+// ... bar
+// }
+func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
+ return chansend(c, elem, false, getcallerpc())
+}
+
+// compiler implements
+//
+// select {
+// case v, ok = <-c:
+// ... foo
+// default:
+// ... bar
+// }
+//
+// as
+//
+// if selected, ok = selectnbrecv(&v, c); selected {
+// ... foo
+// } else {
+// ... bar
+// }
+func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected, received bool) {
+ return chanrecv(c, elem, false)
+}
+
+//go:linkname reflect_chansend reflect.chansend
+func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
+ return chansend(c, elem, !nb, getcallerpc())
+}
+
+//go:linkname reflect_chanrecv reflect.chanrecv
+func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
+ return chanrecv(c, elem, !nb)
+}
+
+//go:linkname reflect_chanlen reflect.chanlen
+func reflect_chanlen(c *hchan) int {
+ if c == nil {
+ return 0
+ }
+ return int(c.qcount)
+}
+
+//go:linkname reflectlite_chanlen internal/reflectlite.chanlen
+func reflectlite_chanlen(c *hchan) int {
+ if c == nil {
+ return 0
+ }
+ return int(c.qcount)
+}
+
+//go:linkname reflect_chancap reflect.chancap
+func reflect_chancap(c *hchan) int {
+ if c == nil {
+ return 0
+ }
+ return int(c.dataqsiz)
+}
+
+//go:linkname reflect_chanclose reflect.chanclose
+func reflect_chanclose(c *hchan) {
+ closechan(c)
+}
+
+func (q *waitq) enqueue(sgp *sudog) {
+ sgp.next = nil
+ x := q.last
+ if x == nil {
+ sgp.prev = nil
+ q.first = sgp
+ q.last = sgp
+ return
+ }
+ sgp.prev = x
+ x.next = sgp
+ q.last = sgp
+}
+
+func (q *waitq) dequeue() *sudog {
+ for {
+ sgp := q.first
+ if sgp == nil {
+ return nil
+ }
+ y := sgp.next
+ if y == nil {
+ q.first = nil
+ q.last = nil
+ } else {
+ y.prev = nil
+ q.first = y
+ sgp.next = nil // mark as removed (see dequeueSudoG)
+ }
+
+ // if a goroutine was put on this queue because of a
+ // select, there is a small window between the goroutine
+ // being woken up by a different case and it grabbing the
+ // channel locks. Once it has the lock
+ // it removes itself from the queue, so we won't see it after that.
+ // We use a flag in the G struct to tell us when someone
+ // else has won the race to signal this goroutine but the goroutine
+ // hasn't removed itself from the queue yet.
+ if sgp.isSelect && !atomic.Cas(&sgp.g.selectDone, 0, 1) {
+ continue
+ }
+
+ return sgp
+ }
+}
+
+func (c *hchan) raceaddr() unsafe.Pointer {
+ // Treat read-like and write-like operations on the channel to
+ // happen at this address. Avoid using the address of qcount
+ // or dataqsiz, because the len() and cap() builtins read
+ // those addresses, and we don't want them racing with
+ // operations like close().
+ return unsafe.Pointer(&c.buf)
+}
+
+func racesync(c *hchan, sg *sudog) {
+ racerelease(chanbuf(c, 0))
+ raceacquireg(sg.g, chanbuf(c, 0))
+ racereleaseg(sg.g, chanbuf(c, 0))
+ raceacquire(chanbuf(c, 0))
+}
+
+// Notify the race detector of a send or receive involving buffer entry idx
+// and a channel c or its communicating partner sg.
+// This function handles the special case of c.elemsize==0.
+func racenotify(c *hchan, idx uint, sg *sudog) {
+ // We could have passed the unsafe.Pointer corresponding to entry idx
+ // instead of idx itself. However, in a future version of this function,
+ // we can use idx to better handle the case of elemsize==0.
+ // A future improvement to the detector is to call TSan with c and idx:
+ // this way, Go will continue to not allocating buffer entries for channels
+ // of elemsize==0, yet the race detector can be made to handle multiple
+ // sync objects underneath the hood (one sync object per idx)
+ qp := chanbuf(c, idx)
+ // When elemsize==0, we don't allocate a full buffer for the channel.
+ // Instead of individual buffer entries, the race detector uses the
+ // c.buf as the only buffer entry. This simplification prevents us from
+ // following the memory model's happens-before rules (rules that are
+ // implemented in racereleaseacquire). Instead, we accumulate happens-before
+ // information in the synchronization object associated with c.buf.
+ if c.elemsize == 0 {
+ if sg == nil {
+ raceacquire(qp)
+ racerelease(qp)
+ } else {
+ raceacquireg(sg.g, qp)
+ racereleaseg(sg.g, qp)
+ }
+ } else {
+ if sg == nil {
+ racereleaseacquire(qp)
+ } else {
+ racereleaseacquireg(sg.g, qp)
+ }
+ }
+}
diff --git a/contrib/go/_std_1.18/src/runtime/checkptr.go b/contrib/go/_std_1.19/src/runtime/checkptr.go
index 2d4afd5cf6..2d4afd5cf6 100644
--- a/contrib/go/_std_1.18/src/runtime/checkptr.go
+++ b/contrib/go/_std_1.19/src/runtime/checkptr.go
diff --git a/contrib/go/_std_1.19/src/runtime/compiler.go b/contrib/go/_std_1.19/src/runtime/compiler.go
new file mode 100644
index 0000000000..f430a27719
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/compiler.go
@@ -0,0 +1,12 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Compiler is the name of the compiler toolchain that built the
+// running binary. Known toolchains are:
+//
+// gc Also known as cmd/compile.
+// gccgo The gccgo front end, part of the GCC compiler suite.
+const Compiler = "gc"
diff --git a/contrib/go/_std_1.18/src/runtime/complex.go b/contrib/go/_std_1.19/src/runtime/complex.go
index 07c596fc0b..07c596fc0b 100644
--- a/contrib/go/_std_1.18/src/runtime/complex.go
+++ b/contrib/go/_std_1.19/src/runtime/complex.go
diff --git a/contrib/go/_std_1.18/src/runtime/cpuflags.go b/contrib/go/_std_1.19/src/runtime/cpuflags.go
index bbe93c5bea..bbe93c5bea 100644
--- a/contrib/go/_std_1.18/src/runtime/cpuflags.go
+++ b/contrib/go/_std_1.19/src/runtime/cpuflags.go
diff --git a/contrib/go/_std_1.18/src/runtime/cpuflags_amd64.go b/contrib/go/_std_1.19/src/runtime/cpuflags_amd64.go
index 8cca4bca8f..8cca4bca8f 100644
--- a/contrib/go/_std_1.18/src/runtime/cpuflags_amd64.go
+++ b/contrib/go/_std_1.19/src/runtime/cpuflags_amd64.go
diff --git a/contrib/go/_std_1.19/src/runtime/cpuprof.go b/contrib/go/_std_1.19/src/runtime/cpuprof.go
new file mode 100644
index 0000000000..2f7f6b4153
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cpuprof.go
@@ -0,0 +1,238 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU profiling.
+//
+// The signal handler for the profiling clock tick adds a new stack trace
+// to a log of recent traces. The log is read by a user goroutine that
+// turns it into formatted profile data. If the reader does not keep up
+// with the log, those writes will be recorded as a count of lost records.
+// The actual profile buffer is in profbuf.go.
+
+package runtime
+
+import (
+ "internal/abi"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const (
+ maxCPUProfStack = 64
+
+ // profBufWordCount is the size of the CPU profile buffer's storage for the
+ // header and stack of each sample, measured in 64-bit words. Every sample
+ // has a required header of two words. With a small additional header (a
+ // word or two) and stacks at the profiler's maximum length of 64 frames,
+ // that capacity can support 1900 samples or 19 thread-seconds at a 100 Hz
+ // sample rate, at a cost of 1 MiB.
+ profBufWordCount = 1 << 17
+ // profBufTagCount is the size of the CPU profile buffer's storage for the
+ // goroutine tags associated with each sample. A capacity of 1<<14 means
+ // room for 16k samples, or 160 thread-seconds at a 100 Hz sample rate.
+ profBufTagCount = 1 << 14
+)
+
+type cpuProfile struct {
+ lock mutex
+ on bool // profiling is on
+ log *profBuf // profile events written here
+
+ // extra holds extra stacks accumulated in addNonGo
+ // corresponding to profiling signals arriving on
+ // non-Go-created threads. Those stacks are written
+ // to log the next time a normal Go thread gets the
+ // signal handler.
+ // Assuming the stacks are 2 words each (we don't get
+ // a full traceback from those threads), plus one word
+ // size for framing, 100 Hz profiling would generate
+ // 300 words per second.
+ // Hopefully a normal Go thread will get the profiling
+ // signal at least once every few seconds.
+ extra [1000]uintptr
+ numExtra int
+ lostExtra uint64 // count of frames lost because extra is full
+ lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily
+}
+
+var cpuprof cpuProfile
+
+// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
+// If hz <= 0, SetCPUProfileRate turns off profiling.
+// If the profiler is on, the rate cannot be changed without first turning it off.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.cpuprofile flag instead of calling
+// SetCPUProfileRate directly.
+func SetCPUProfileRate(hz int) {
+ // Clamp hz to something reasonable.
+ if hz < 0 {
+ hz = 0
+ }
+ if hz > 1000000 {
+ hz = 1000000
+ }
+
+ lock(&cpuprof.lock)
+ if hz > 0 {
+ if cpuprof.on || cpuprof.log != nil {
+ print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
+ unlock(&cpuprof.lock)
+ return
+ }
+
+ cpuprof.on = true
+ cpuprof.log = newProfBuf(1, profBufWordCount, profBufTagCount)
+ hdr := [1]uint64{uint64(hz)}
+ cpuprof.log.write(nil, nanotime(), hdr[:], nil)
+ setcpuprofilerate(int32(hz))
+ } else if cpuprof.on {
+ setcpuprofilerate(0)
+ cpuprof.on = false
+ cpuprof.addExtra()
+ cpuprof.log.close()
+ }
+ unlock(&cpuprof.lock)
+}
+
+// add adds the stack trace to the profile.
+// It is called from signal handlers and other limited environments
+// and cannot allocate memory or acquire locks that might be
+// held at the time of the signal, nor can it use substantial amounts
+// of stack.
+//
+//go:nowritebarrierrec
+func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr) {
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ for !atomic.Cas(&prof.signalLock, 0, 1) {
+ // TODO: Is it safe to osyield here? https://go.dev/issue/52672
+ osyield()
+ }
+
+ if prof.hz != 0 { // implies cpuprof.log != nil
+ if p.numExtra > 0 || p.lostExtra > 0 || p.lostAtomic > 0 {
+ p.addExtra()
+ }
+ hdr := [1]uint64{1}
+ // Note: write "knows" that the argument is &gp.labels,
+ // because otherwise its write barrier behavior may not
+ // be correct. See the long comment there before
+ // changing the argument here.
+ cpuprof.log.write(tagPtr, nanotime(), hdr[:], stk)
+ }
+
+ atomic.Store(&prof.signalLock, 0)
+}
+
+// addNonGo adds the non-Go stack trace to the profile.
+// It is called from a non-Go thread, so we cannot use much stack at all,
+// nor do anything that needs a g or an m.
+// In particular, we can't call cpuprof.log.write.
+// Instead, we copy the stack into cpuprof.extra,
+// which will be drained the next time a Go thread
+// gets the signal handling event.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func (p *cpuProfile) addNonGo(stk []uintptr) {
+ // Simple cas-lock to coordinate with SetCPUProfileRate.
+ // (Other calls to add or addNonGo should be blocked out
+ // by the fact that only one SIGPROF can be handled by the
+ // process at a time. If not, this lock will serialize those too.
+ // The use of timer_create(2) on Linux to request process-targeted
+ // signals may have changed this.)
+ for !atomic.Cas(&prof.signalLock, 0, 1) {
+ // TODO: Is it safe to osyield here? https://go.dev/issue/52672
+ osyield()
+ }
+
+ if cpuprof.numExtra+1+len(stk) < len(cpuprof.extra) {
+ i := cpuprof.numExtra
+ cpuprof.extra[i] = uintptr(1 + len(stk))
+ copy(cpuprof.extra[i+1:], stk)
+ cpuprof.numExtra += 1 + len(stk)
+ } else {
+ cpuprof.lostExtra++
+ }
+
+ atomic.Store(&prof.signalLock, 0)
+}
+
+// addExtra adds the "extra" profiling events,
+// queued by addNonGo, to the profile log.
+// addExtra is called either from a signal handler on a Go thread
+// or from an ordinary goroutine; either way it can use stack
+// and has a g. The world may be stopped, though.
+func (p *cpuProfile) addExtra() {
+ // Copy accumulated non-Go profile events.
+ hdr := [1]uint64{1}
+ for i := 0; i < p.numExtra; {
+ p.log.write(nil, 0, hdr[:], p.extra[i+1:i+int(p.extra[i])])
+ i += int(p.extra[i])
+ }
+ p.numExtra = 0
+
+ // Report any lost events.
+ if p.lostExtra > 0 {
+ hdr := [1]uint64{p.lostExtra}
+ lostStk := [2]uintptr{
+ abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
+ }
+ p.log.write(nil, 0, hdr[:], lostStk[:])
+ p.lostExtra = 0
+ }
+
+ if p.lostAtomic > 0 {
+ hdr := [1]uint64{p.lostAtomic}
+ lostStk := [2]uintptr{
+ abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_System) + sys.PCQuantum,
+ }
+ p.log.write(nil, 0, hdr[:], lostStk[:])
+ p.lostAtomic = 0
+ }
+
+}
+
+// CPUProfile panics.
+// It formerly provided raw access to chunks of
+// a pprof-format profile generated by the runtime.
+// The details of generating that format have changed,
+// so this functionality has been removed.
+//
+// Deprecated: Use the runtime/pprof package,
+// or the handlers in the net/http/pprof package,
+// or the testing package's -test.cpuprofile flag instead.
+func CPUProfile() []byte {
+ panic("CPUProfile no longer available")
+}
+
+//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
+func runtime_pprof_runtime_cyclesPerSecond() int64 {
+ return tickspersecond()
+}
+
+// readProfile, provided to runtime/pprof, returns the next chunk of
+// binary CPU profiling stack trace data, blocking until data is available.
+// If profiling is turned off and all the profile data accumulated while it was
+// on has been returned, readProfile returns eof=true.
+// The caller must save the returned data and tags before calling readProfile again.
+// The returned data contains a whole number of records, and tags contains
+// exactly one entry per record.
+//
+//go:linkname runtime_pprof_readProfile runtime/pprof.readProfile
+func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) {
+ lock(&cpuprof.lock)
+ log := cpuprof.log
+ unlock(&cpuprof.lock)
+ data, tags, eof := log.read(profBufBlocking)
+ if len(data) == 0 && eof {
+ lock(&cpuprof.lock)
+ cpuprof.log = nil
+ unlock(&cpuprof.lock)
+ }
+ return data, tags, eof
+}
diff --git a/contrib/go/_std_1.19/src/runtime/cputicks.go b/contrib/go/_std_1.19/src/runtime/cputicks.go
new file mode 100644
index 0000000000..91270617fc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/cputicks.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm && !arm64 && !loong64 && !mips64 && !mips64le && !mips && !mipsle && !wasm
+
+package runtime
+
+// careful: cputicks is not guaranteed to be monotonic! In particular, we have
+// noticed drift between cpus on certain os/arch combinations. See issue 8976.
+func cputicks() int64
diff --git a/contrib/go/_std_1.19/src/runtime/debug.go b/contrib/go/_std_1.19/src/runtime/debug.go
new file mode 100644
index 0000000000..0ab23e0eb7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/debug.go
@@ -0,0 +1,115 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// GOMAXPROCS sets the maximum number of CPUs that can be executing
+// simultaneously and returns the previous setting. It defaults to
+// the value of runtime.NumCPU. If n < 1, it does not change the current setting.
+// This call will go away when the scheduler improves.
+func GOMAXPROCS(n int) int {
+ if GOARCH == "wasm" && n > 1 {
+ n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
+ }
+
+ lock(&sched.lock)
+ ret := int(gomaxprocs)
+ unlock(&sched.lock)
+ if n <= 0 || n == ret {
+ return ret
+ }
+
+ stopTheWorldGC("GOMAXPROCS")
+
+ // newprocs will be processed by startTheWorld
+ newprocs = int32(n)
+
+ startTheWorldGC()
+ return ret
+}
+
+// NumCPU returns the number of logical CPUs usable by the current process.
+//
+// The set of available CPUs is checked by querying the operating system
+// at process startup. Changes to operating system CPU allocation after
+// process startup are not reflected.
+func NumCPU() int {
+ return int(ncpu)
+}
+
+// NumCgoCall returns the number of cgo calls made by the current process.
+func NumCgoCall() int64 {
+ var n = int64(atomic.Load64(&ncgocall))
+ for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
+ n += int64(mp.ncgocall)
+ }
+ return n
+}
+
+// NumGoroutine returns the number of goroutines that currently exist.
+func NumGoroutine() int {
+ return int(gcount())
+}
+
+//go:linkname debug_modinfo runtime/debug.modinfo
+func debug_modinfo() string {
+ return modinfo
+}
+
+// mayMoreStackPreempt is a maymorestack hook that forces a preemption
+// at every possible cooperative preemption point.
+//
+// This is valuable to apply to the runtime, which can be sensitive to
+// preemption points. To apply this to all preemption points in the
+// runtime and runtime-like code, use the following in bash or zsh:
+//
+// X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
+//
+// This must be deeply nosplit because it is called from a function
+// prologue before the stack is set up and because the compiler will
+// call it from any splittable prologue (leading to infinite
+// recursion).
+//
+// Ideally it should also use very little stack because the linker
+// doesn't currently account for this in nosplit stack depth checking.
+//
+// Ensure mayMoreStackPreempt can be called for all ABIs.
+//
+//go:nosplit
+//go:linkname mayMoreStackPreempt
+func mayMoreStackPreempt() {
+ // Don't do anything on the g0 or gsignal stack.
+ g := getg()
+ if g == g.m.g0 || g == g.m.gsignal {
+ return
+ }
+ // Force a preemption, unless the stack is already poisoned.
+ if g.stackguard0 < stackPoisonMin {
+ g.stackguard0 = stackPreempt
+ }
+}
+
+// mayMoreStackMove is a maymorestack hook that forces stack movement
+// at every possible point.
+//
+// See mayMoreStackPreempt.
+//
+//go:nosplit
+//go:linkname mayMoreStackMove
+func mayMoreStackMove() {
+ // Don't do anything on the g0 or gsignal stack.
+ g := getg()
+ if g == g.m.g0 || g == g.m.gsignal {
+ return
+ }
+ // Force stack movement, unless the stack is already poisoned.
+ if g.stackguard0 < stackPoisonMin {
+ g.stackguard0 = stackForceMove
+ }
+}
diff --git a/contrib/go/_std_1.18/src/runtime/debugcall.go b/contrib/go/_std_1.19/src/runtime/debugcall.go
index 2f164e7fd7..2f164e7fd7 100644
--- a/contrib/go/_std_1.18/src/runtime/debugcall.go
+++ b/contrib/go/_std_1.19/src/runtime/debugcall.go
diff --git a/contrib/go/_std_1.19/src/runtime/debuglog.go b/contrib/go/_std_1.19/src/runtime/debuglog.go
new file mode 100644
index 0000000000..ca1a791c93
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/debuglog.go
@@ -0,0 +1,825 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides an internal debug logging facility. The debug
+// log is a lightweight, in-memory, per-M ring buffer. By default, the
+// runtime prints the debug log on panic.
+//
+// To print something to the debug log, call dlog to obtain a dlogger
+// and use the methods on that to add values. The values will be
+// space-separated in the output (much like println).
+//
+// This facility can be enabled by passing -tags debuglog when
+// building. Without this tag, dlog calls compile to nothing.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// debugLogBytes is the size of each per-M ring buffer. This is
+// allocated off-heap to avoid blowing up the M and hence the GC'd
+// heap size.
+const debugLogBytes = 16 << 10
+
+// debugLogStringLimit is the maximum number of bytes in a string.
+// Above this, the string will be truncated with "..(n more bytes).."
+const debugLogStringLimit = debugLogBytes / 8
+
+// dlog returns a debug logger. The caller can use methods on the
+// returned logger to add values, which will be space-separated in the
+// final output, much like println. The caller must call end() to
+// finish the message.
+//
+// dlog can be used from highly-constrained corners of the runtime: it
+// is safe to use in the signal handler, from within the write
+// barrier, from within the stack implementation, and in places that
+// must be recursively nosplit.
+//
+// This will be compiled away if built without the debuglog build tag.
+// However, argument construction may not be. If any of the arguments
+// are not literals or trivial expressions, consider protecting the
+// call with "if dlogEnabled".
+//
+//go:nosplit
+//go:nowritebarrierrec
+func dlog() *dlogger {
+ if !dlogEnabled {
+ return nil
+ }
+
+ // Get the time.
+ tick, nano := uint64(cputicks()), uint64(nanotime())
+
+ // Try to get a cached logger.
+ l := getCachedDlogger()
+
+ // If we couldn't get a cached logger, try to get one from the
+ // global pool.
+ if l == nil {
+ allp := (*uintptr)(unsafe.Pointer(&allDloggers))
+ all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
+ for l1 := all; l1 != nil; l1 = l1.allLink {
+ if atomic.Load(&l1.owned) == 0 && atomic.Cas(&l1.owned, 0, 1) {
+ l = l1
+ break
+ }
+ }
+ }
+
+ // If that failed, allocate a new logger.
+ if l == nil {
+ // Use sysAllocOS instead of sysAlloc because we want to interfere
+ // with the runtime as little as possible, and sysAlloc updates accounting.
+ l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{})))
+ if l == nil {
+ throw("failed to allocate debug log")
+ }
+ l.w.r.data = &l.w.data
+ l.owned = 1
+
+ // Prepend to allDloggers list.
+ headp := (*uintptr)(unsafe.Pointer(&allDloggers))
+ for {
+ head := atomic.Loaduintptr(headp)
+ l.allLink = (*dlogger)(unsafe.Pointer(head))
+ if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
+ break
+ }
+ }
+ }
+
+ // If the time delta is getting too high, write a new sync
+ // packet. We set the limit so we don't write more than 6
+ // bytes of delta in the record header.
+ const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
+ if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
+ l.w.writeSync(tick, nano)
+ }
+
+ // Reserve space for framing header.
+ l.w.ensure(debugLogHeaderSize)
+ l.w.write += debugLogHeaderSize
+
+ // Write record header.
+ l.w.uvarint(tick - l.w.tick)
+ l.w.uvarint(nano - l.w.nano)
+ gp := getg()
+ if gp != nil && gp.m != nil && gp.m.p != 0 {
+ l.w.varint(int64(gp.m.p.ptr().id))
+ } else {
+ l.w.varint(-1)
+ }
+
+ return l
+}
+
+// A dlogger writes to the debug log.
+//
+// To obtain a dlogger, call dlog(). When done with the dlogger, call
+// end().
+//
+//go:notinheap
+type dlogger struct {
+ w debugLogWriter
+
+ // allLink is the next dlogger in the allDloggers list.
+ allLink *dlogger
+
+ // owned indicates that this dlogger is owned by an M. This is
+ // accessed atomically.
+ owned uint32
+}
+
+// allDloggers is a list of all dloggers, linked through
+// dlogger.allLink. This is accessed atomically. This is prepend only,
+// so it doesn't need to protect against ABA races.
+var allDloggers *dlogger
+
+//go:nosplit
+func (l *dlogger) end() {
+ if !dlogEnabled {
+ return
+ }
+
+ // Fill in framing header.
+ size := l.w.write - l.w.r.end
+ if !l.w.writeFrameAt(l.w.r.end, size) {
+ throw("record too large")
+ }
+
+ // Commit the record.
+ l.w.r.end = l.w.write
+
+ // Attempt to return this logger to the cache.
+ if putCachedDlogger(l) {
+ return
+ }
+
+ // Return the logger to the global pool.
+ atomic.Store(&l.owned, 0)
+}
+
+const (
+ debugLogUnknown = 1 + iota
+ debugLogBoolTrue
+ debugLogBoolFalse
+ debugLogInt
+ debugLogUint
+ debugLogHex
+ debugLogPtr
+ debugLogString
+ debugLogConstString
+ debugLogStringOverflow
+
+ debugLogPC
+ debugLogTraceback
+)
+
+//go:nosplit
+func (l *dlogger) b(x bool) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ if x {
+ l.w.byte(debugLogBoolTrue)
+ } else {
+ l.w.byte(debugLogBoolFalse)
+ }
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) i(x int) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i8(x int8) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i16(x int16) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i32(x int32) *dlogger {
+ return l.i64(int64(x))
+}
+
+//go:nosplit
+func (l *dlogger) i64(x int64) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogInt)
+ l.w.varint(x)
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) u(x uint) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) uptr(x uintptr) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u8(x uint8) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u16(x uint16) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u32(x uint32) *dlogger {
+ return l.u64(uint64(x))
+}
+
+//go:nosplit
+func (l *dlogger) u64(x uint64) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogUint)
+ l.w.uvarint(x)
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) hex(x uint64) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogHex)
+ l.w.uvarint(x)
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) p(x any) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogPtr)
+ if x == nil {
+ l.w.uvarint(0)
+ } else {
+ v := efaceOf(&x)
+ switch v._type.kind & kindMask {
+ case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer:
+ l.w.uvarint(uint64(uintptr(v.data)))
+ default:
+ throw("not a pointer type")
+ }
+ }
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) s(x string) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ str := stringStructOf(&x)
+ datap := &firstmoduledata
+ if len(x) > 4 && datap.etext <= uintptr(str.str) && uintptr(str.str) < datap.end {
+ // String constants are in the rodata section, which
+ // isn't recorded in moduledata. But it has to be
+ // somewhere between etext and end.
+ l.w.byte(debugLogConstString)
+ l.w.uvarint(uint64(str.len))
+ l.w.uvarint(uint64(uintptr(str.str) - datap.etext))
+ } else {
+ l.w.byte(debugLogString)
+ var b []byte
+ bb := (*slice)(unsafe.Pointer(&b))
+ bb.array = str.str
+ bb.len, bb.cap = str.len, str.len
+ if len(b) > debugLogStringLimit {
+ b = b[:debugLogStringLimit]
+ }
+ l.w.uvarint(uint64(len(b)))
+ l.w.bytes(b)
+ if len(b) != len(x) {
+ l.w.byte(debugLogStringOverflow)
+ l.w.uvarint(uint64(len(x) - len(b)))
+ }
+ }
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) pc(x uintptr) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogPC)
+ l.w.uvarint(uint64(x))
+ return l
+}
+
+//go:nosplit
+func (l *dlogger) traceback(x []uintptr) *dlogger {
+ if !dlogEnabled {
+ return l
+ }
+ l.w.byte(debugLogTraceback)
+ l.w.uvarint(uint64(len(x)))
+ for _, pc := range x {
+ l.w.uvarint(uint64(pc))
+ }
+ return l
+}
+
+// A debugLogWriter is a ring buffer of binary debug log records.
+//
+// A log record consists of a 2-byte framing header and a sequence of
+// fields. The framing header gives the size of the record as a little
+// endian 16-bit value. Each field starts with a byte indicating its
+// type, followed by type-specific data. If the size in the framing
+// header is 0, it's a sync record consisting of two little endian
+// 64-bit values giving a new time base.
+//
+// Because this is a ring buffer, new records will eventually
+// overwrite old records. Hence, it maintains a reader that consumes
+// the log as it gets overwritten. That reader state is where an
+// actual log reader would start.
+//
+//go:notinheap
+type debugLogWriter struct {
+ write uint64
+ data debugLogBuf
+
+ // tick and nano are the time bases from the most recently
+ // written sync record.
+ tick, nano uint64
+
+ // r is a reader that consumes records as they get overwritten
+ // by the writer. It also acts as the initial reader state
+ // when printing the log.
+ r debugLogReader
+
+ // buf is a scratch buffer for encoding. This is here to
+ // reduce stack usage.
+ buf [10]byte
+}
+
+//go:notinheap
+type debugLogBuf [debugLogBytes]byte
+
+const (
+ // debugLogHeaderSize is the number of bytes in the framing
+ // header of every dlog record.
+ debugLogHeaderSize = 2
+
+ // debugLogSyncSize is the number of bytes in a sync record.
+ debugLogSyncSize = debugLogHeaderSize + 2*8
+)
+
+//go:nosplit
+func (l *debugLogWriter) ensure(n uint64) {
+ for l.write+n >= l.r.begin+uint64(len(l.data)) {
+ // Consume record at begin.
+ if l.r.skip() == ^uint64(0) {
+ // Wrapped around within a record.
+ //
+ // TODO(austin): It would be better to just
+ // eat the whole buffer at this point, but we
+ // have to communicate that to the reader
+ // somehow.
+ throw("record wrapped around")
+ }
+ }
+}
+
+//go:nosplit
+func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
+ l.data[pos%uint64(len(l.data))] = uint8(size)
+ l.data[(pos+1)%uint64(len(l.data))] = uint8(size >> 8)
+ return size <= 0xFFFF
+}
+
+//go:nosplit
+func (l *debugLogWriter) writeSync(tick, nano uint64) {
+ l.tick, l.nano = tick, nano
+ l.ensure(debugLogHeaderSize)
+ l.writeFrameAt(l.write, 0)
+ l.write += debugLogHeaderSize
+ l.writeUint64LE(tick)
+ l.writeUint64LE(nano)
+ l.r.end = l.write
+}
+
+//go:nosplit
+func (l *debugLogWriter) writeUint64LE(x uint64) {
+ var b [8]byte
+ b[0] = byte(x)
+ b[1] = byte(x >> 8)
+ b[2] = byte(x >> 16)
+ b[3] = byte(x >> 24)
+ b[4] = byte(x >> 32)
+ b[5] = byte(x >> 40)
+ b[6] = byte(x >> 48)
+ b[7] = byte(x >> 56)
+ l.bytes(b[:])
+}
+
+//go:nosplit
+func (l *debugLogWriter) byte(x byte) {
+ l.ensure(1)
+ pos := l.write
+ l.write++
+ l.data[pos%uint64(len(l.data))] = x
+}
+
+//go:nosplit
+func (l *debugLogWriter) bytes(x []byte) {
+ l.ensure(uint64(len(x)))
+ pos := l.write
+ l.write += uint64(len(x))
+ for len(x) > 0 {
+ n := copy(l.data[pos%uint64(len(l.data)):], x)
+ pos += uint64(n)
+ x = x[n:]
+ }
+}
+
+//go:nosplit
+func (l *debugLogWriter) varint(x int64) {
+ var u uint64
+ if x < 0 {
+ u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
+ } else {
+ u = (uint64(x) << 1) // do not complement i, bit 0 is 0
+ }
+ l.uvarint(u)
+}
+
+//go:nosplit
+func (l *debugLogWriter) uvarint(u uint64) {
+ i := 0
+ for u >= 0x80 {
+ l.buf[i] = byte(u) | 0x80
+ u >>= 7
+ i++
+ }
+ l.buf[i] = byte(u)
+ i++
+ l.bytes(l.buf[:i])
+}
+
+type debugLogReader struct {
+ data *debugLogBuf
+
+ // begin and end are the positions in the log of the beginning
+ // and end of the log data, modulo len(data).
+ begin, end uint64
+
+ // tick and nano are the current time base at begin.
+ tick, nano uint64
+}
+
+//go:nosplit
+func (r *debugLogReader) skip() uint64 {
+ // Read size at pos.
+ if r.begin+debugLogHeaderSize > r.end {
+ return ^uint64(0)
+ }
+ size := uint64(r.readUint16LEAt(r.begin))
+ if size == 0 {
+ // Sync packet.
+ r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
+ r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
+ size = debugLogSyncSize
+ }
+ if r.begin+size > r.end {
+ return ^uint64(0)
+ }
+ r.begin += size
+ return size
+}
+
+//go:nosplit
+func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
+ return uint16(r.data[pos%uint64(len(r.data))]) |
+ uint16(r.data[(pos+1)%uint64(len(r.data))])<<8
+}
+
+//go:nosplit
+func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
+ var b [8]byte
+ for i := range b {
+ b[i] = r.data[pos%uint64(len(r.data))]
+ pos++
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 |
+ uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 |
+ uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (r *debugLogReader) peek() (tick uint64) {
+ // Consume any sync records.
+ size := uint64(0)
+ for size == 0 {
+ if r.begin+debugLogHeaderSize > r.end {
+ return ^uint64(0)
+ }
+ size = uint64(r.readUint16LEAt(r.begin))
+ if size != 0 {
+ break
+ }
+ if r.begin+debugLogSyncSize > r.end {
+ return ^uint64(0)
+ }
+ // Sync packet.
+ r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
+ r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
+ r.begin += debugLogSyncSize
+ }
+
+ // Peek tick delta.
+ if r.begin+size > r.end {
+ return ^uint64(0)
+ }
+ pos := r.begin + debugLogHeaderSize
+ var u uint64
+ for i := uint(0); ; i += 7 {
+ b := r.data[pos%uint64(len(r.data))]
+ pos++
+ u |= uint64(b&^0x80) << i
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ if pos > r.begin+size {
+ return ^uint64(0)
+ }
+ return r.tick + u
+}
+
+func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
+ // Read size. We've already skipped sync packets and checked
+ // bounds in peek.
+ size := uint64(r.readUint16LEAt(r.begin))
+ end = r.begin + size
+ r.begin += debugLogHeaderSize
+
+ // Read tick, nano, and p.
+ tick = r.uvarint() + r.tick
+ nano = r.uvarint() + r.nano
+ p = int(r.varint())
+
+ return
+}
+
+func (r *debugLogReader) uvarint() uint64 {
+ var u uint64
+ for i := uint(0); ; i += 7 {
+ b := r.data[r.begin%uint64(len(r.data))]
+ r.begin++
+ u |= uint64(b&^0x80) << i
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ return u
+}
+
+func (r *debugLogReader) varint() int64 {
+ u := r.uvarint()
+ var v int64
+ if u&1 == 0 {
+ v = int64(u >> 1)
+ } else {
+ v = ^int64(u >> 1)
+ }
+ return v
+}
+
+func (r *debugLogReader) printVal() bool {
+ typ := r.data[r.begin%uint64(len(r.data))]
+ r.begin++
+
+ switch typ {
+ default:
+ print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
+ return false
+
+ case debugLogUnknown:
+ print("<unknown kind>")
+
+ case debugLogBoolTrue:
+ print(true)
+
+ case debugLogBoolFalse:
+ print(false)
+
+ case debugLogInt:
+ print(r.varint())
+
+ case debugLogUint:
+ print(r.uvarint())
+
+ case debugLogHex, debugLogPtr:
+ print(hex(r.uvarint()))
+
+ case debugLogString:
+ sl := r.uvarint()
+ if r.begin+sl > r.end {
+ r.begin = r.end
+ print("<string length corrupted>")
+ break
+ }
+ for sl > 0 {
+ b := r.data[r.begin%uint64(len(r.data)):]
+ if uint64(len(b)) > sl {
+ b = b[:sl]
+ }
+ r.begin += uint64(len(b))
+ sl -= uint64(len(b))
+ gwrite(b)
+ }
+
+ case debugLogConstString:
+ len, ptr := int(r.uvarint()), uintptr(r.uvarint())
+ ptr += firstmoduledata.etext
+ str := stringStruct{
+ str: unsafe.Pointer(ptr),
+ len: len,
+ }
+ s := *(*string)(unsafe.Pointer(&str))
+ print(s)
+
+ case debugLogStringOverflow:
+ print("..(", r.uvarint(), " more bytes)..")
+
+ case debugLogPC:
+ printDebugLogPC(uintptr(r.uvarint()), false)
+
+ case debugLogTraceback:
+ n := int(r.uvarint())
+ for i := 0; i < n; i++ {
+ print("\n\t")
+ // gentraceback PCs are always return PCs.
+ // Convert them to call PCs.
+ //
+ // TODO(austin): Expand inlined frames.
+ printDebugLogPC(uintptr(r.uvarint()), true)
+ }
+ }
+
+ return true
+}
+
+// printDebugLog prints the debug log.
+func printDebugLog() {
+ if !dlogEnabled {
+ return
+ }
+
+ // This function should not panic or throw since it is used in
+ // the fatal panic path and this may deadlock.
+
+ printlock()
+
+ // Get the list of all debug logs.
+ allp := (*uintptr)(unsafe.Pointer(&allDloggers))
+ all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
+
+ // Count the logs.
+ n := 0
+ for l := all; l != nil; l = l.allLink {
+ n++
+ }
+ if n == 0 {
+ printunlock()
+ return
+ }
+
+ // Prepare read state for all logs.
+ type readState struct {
+ debugLogReader
+ first bool
+ lost uint64
+ nextTick uint64
+ }
+ // Use sysAllocOS instead of sysAlloc because we want to interfere
+ // with the runtime as little as possible, and sysAlloc updates accounting.
+ state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
+ if state1 == nil {
+ println("failed to allocate read state for", n, "logs")
+ printunlock()
+ return
+ }
+ state := (*[1 << 20]readState)(state1)[:n]
+ {
+ l := all
+ for i := range state {
+ s := &state[i]
+ s.debugLogReader = l.w.r
+ s.first = true
+ s.lost = l.w.r.begin
+ s.nextTick = s.peek()
+ l = l.allLink
+ }
+ }
+
+ // Print records.
+ for {
+ // Find the next record.
+ var best struct {
+ tick uint64
+ i int
+ }
+ best.tick = ^uint64(0)
+ for i := range state {
+ if state[i].nextTick < best.tick {
+ best.tick = state[i].nextTick
+ best.i = i
+ }
+ }
+ if best.tick == ^uint64(0) {
+ break
+ }
+
+ // Print record.
+ s := &state[best.i]
+ if s.first {
+ print(">> begin log ", best.i)
+ if s.lost != 0 {
+ print("; lost first ", s.lost>>10, "KB")
+ }
+ print(" <<\n")
+ s.first = false
+ }
+
+ end, _, nano, p := s.header()
+ oldEnd := s.end
+ s.end = end
+
+ print("[")
+ var tmpbuf [21]byte
+ pnano := int64(nano) - runtimeInitTime
+ if pnano < 0 {
+ // Logged before runtimeInitTime was set.
+ pnano = 0
+ }
+ pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
+ print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
+ print(" P ", p, "] ")
+
+ for i := 0; s.begin < s.end; i++ {
+ if i > 0 {
+ print(" ")
+ }
+ if !s.printVal() {
+ // Abort this P log.
+ print("<aborting P log>")
+ end = oldEnd
+ break
+ }
+ }
+ println()
+
+ // Move on to the next record.
+ s.begin = end
+ s.end = oldEnd
+ s.nextTick = s.peek()
+ }
+
+ printunlock()
+}
+
+// printDebugLogPC prints a single symbolized PC. If returnPC is true,
+// pc is a return PC that must first be converted to a call PC.
+func printDebugLogPC(pc uintptr, returnPC bool) {
+ fn := findfunc(pc)
+ if returnPC && (!fn.valid() || pc > fn.entry()) {
+ // TODO(austin): Don't back up if the previous frame
+ // was a sigpanic.
+ pc--
+ }
+
+ print(hex(pc))
+ if !fn.valid() {
+ print(" [unknown PC]")
+ } else {
+ name := funcname(fn)
+ file, line := funcline(fn, pc)
+ print(" [", name, "+", hex(pc-fn.entry()),
+ " ", file, ":", line, "]")
+ }
+}
diff --git a/contrib/go/_std_1.18/src/runtime/debuglog_off.go b/contrib/go/_std_1.19/src/runtime/debuglog_off.go
index fa3be39c70..fa3be39c70 100644
--- a/contrib/go/_std_1.18/src/runtime/debuglog_off.go
+++ b/contrib/go/_std_1.19/src/runtime/debuglog_off.go
diff --git a/contrib/go/_std_1.18/src/runtime/defs_darwin_amd64.go b/contrib/go/_std_1.19/src/runtime/defs_darwin_amd64.go
index cbc26bfcff..cbc26bfcff 100644
--- a/contrib/go/_std_1.18/src/runtime/defs_darwin_amd64.go
+++ b/contrib/go/_std_1.19/src/runtime/defs_darwin_amd64.go
diff --git a/contrib/go/_std_1.19/src/runtime/defs_linux_amd64.go b/contrib/go/_std_1.19/src/runtime/defs_linux_amd64.go
new file mode 100644
index 0000000000..da4d357532
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/defs_linux_amd64.go
@@ -0,0 +1,301 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_linux.go defs1_linux.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_FREE = 0x8
+ _MADV_HUGEPAGE = 0xe
+ _MADV_NOHUGEPAGE = 0xf
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_RESTORER = 0x4000000
+ _SA_SIGINFO = 0x4
+
+ _SI_KERNEL = 0x80
+ _SI_TIMER = -0x2
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+
+ _SIGRTMIN = 0x20
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _CLOCK_THREAD_CPUTIME_ID = 0x3
+
+ _SIGEV_THREAD_ID = 0x4
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+
+ _AF_UNIX = 0x1
+ _SOCK_DGRAM = 0x2
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+//go:nosplit
+func (ts *timespec) setNsec(ns int64) {
+ ts.tv_sec = ns / 1e9
+ ts.tv_nsec = ns % 1e9
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint64
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type siginfoFields struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ // below here is a union; si_addr is the only field we use
+ si_addr uint64
+}
+
+type siginfo struct {
+ siginfoFields
+
+ // Pad struct to the max size in the kernel.
+ _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte
+}
+
+type itimerspec struct {
+ it_interval timespec
+ it_value timespec
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type sigeventFields struct {
+ value uintptr
+ signo int32
+ notify int32
+ // below here is a union; sigev_notify_thread_id is the only field we use
+ sigev_notify_thread_id int32
+}
+
+type sigevent struct {
+ sigeventFields
+
+ // Pad struct to the max size in the kernel.
+ _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte
+}
+
+type epollevent struct {
+ events uint32
+ data [8]byte // unaligned uintptr
+}
+
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_linux.go defs1_linux.go
+
+const (
+ _O_RDONLY = 0x0
+ _O_NONBLOCK = 0x800
+ _O_CLOEXEC = 0x80000
+)
+
+type usigset struct {
+ __val [16]uint64
+}
+
+type fpxreg struct {
+ significand [4]uint16
+ exponent uint16
+ padding [3]uint16
+}
+
+type xmmreg struct {
+ element [4]uint32
+}
+
+type fpstate struct {
+ cwd uint16
+ swd uint16
+ ftw uint16
+ fop uint16
+ rip uint64
+ rdp uint64
+ mxcsr uint32
+ mxcr_mask uint32
+ _st [8]fpxreg
+ _xmm [16]xmmreg
+ padding [24]uint32
+}
+
+type fpxreg1 struct {
+ significand [4]uint16
+ exponent uint16
+ padding [3]uint16
+}
+
+type xmmreg1 struct {
+ element [4]uint32
+}
+
+type fpstate1 struct {
+ cwd uint16
+ swd uint16
+ ftw uint16
+ fop uint16
+ rip uint64
+ rdp uint64
+ mxcsr uint32
+ mxcr_mask uint32
+ _st [8]fpxreg1
+ _xmm [16]xmmreg1
+ padding [24]uint32
+}
+
+type fpreg1 struct {
+ significand [4]uint16
+ exponent uint16
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_flags int32
+ pad_cgo_0 [4]byte
+ ss_size uintptr
+}
+
+type mcontext struct {
+ gregs [23]uint64
+ fpregs *fpstate
+ __reserved1 [8]uint64
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_stack stackt
+ uc_mcontext mcontext
+ uc_sigmask usigset
+ __fpregs_mem fpstate
+}
+
+type sigcontext struct {
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+ rdi uint64
+ rsi uint64
+ rbp uint64
+ rbx uint64
+ rdx uint64
+ rax uint64
+ rcx uint64
+ rsp uint64
+ rip uint64
+ eflags uint64
+ cs uint16
+ gs uint16
+ fs uint16
+ __pad0 uint16
+ err uint64
+ trapno uint64
+ oldmask uint64
+ cr2 uint64
+ fpstate *fpstate1
+ __reserved1 [8]uint64
+}
+
+type sockaddr_un struct {
+ family uint16
+ path [108]byte
+}
diff --git a/contrib/go/_std_1.18/src/runtime/duff_amd64.s b/contrib/go/_std_1.19/src/runtime/duff_amd64.s
index df010f5853..df010f5853 100644
--- a/contrib/go/_std_1.18/src/runtime/duff_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/duff_amd64.s
diff --git a/contrib/go/_std_1.19/src/runtime/env_posix.go b/contrib/go/_std_1.19/src/runtime/env_posix.go
new file mode 100644
index 0000000000..94a19d80d8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/env_posix.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows || plan9
+
+package runtime
+
+import "unsafe"
+
+func gogetenv(key string) string {
+ env := environ()
+ if env == nil {
+ throw("getenv before env init")
+ }
+ for _, s := range env {
+ if len(s) > len(key) && s[len(key)] == '=' && envKeyEqual(s[:len(key)], key) {
+ return s[len(key)+1:]
+ }
+ }
+ return ""
+}
+
+// envKeyEqual reports whether a == b, with ASCII-only case insensitivity
+// on Windows. The two strings must have the same length.
+func envKeyEqual(a, b string) bool {
+ if GOOS == "windows" { // case insensitive
+ for i := 0; i < len(a); i++ {
+ ca, cb := a[i], b[i]
+ if ca == cb || lowerASCII(ca) == lowerASCII(cb) {
+ continue
+ }
+ return false
+ }
+ return true
+ }
+ return a == b
+}
+
+func lowerASCII(c byte) byte {
+ if 'A' <= c && c <= 'Z' {
+ return c + ('a' - 'A')
+ }
+ return c
+}
+
+var _cgo_setenv unsafe.Pointer // pointer to C function
+var _cgo_unsetenv unsafe.Pointer // pointer to C function
+
+// Update the C environment if cgo is loaded.
+// Called from syscall.Setenv.
+//
+//go:linkname syscall_setenv_c syscall.setenv_c
+func syscall_setenv_c(k string, v string) {
+ if _cgo_setenv == nil {
+ return
+ }
+ arg := [2]unsafe.Pointer{cstring(k), cstring(v)}
+ asmcgocall(_cgo_setenv, unsafe.Pointer(&arg))
+}
+
+// Update the C environment if cgo is loaded.
+// Called from syscall.unsetenv.
+//
+//go:linkname syscall_unsetenv_c syscall.unsetenv_c
+func syscall_unsetenv_c(k string) {
+ if _cgo_unsetenv == nil {
+ return
+ }
+ arg := [1]unsafe.Pointer{cstring(k)}
+ asmcgocall(_cgo_unsetenv, unsafe.Pointer(&arg))
+}
+
+func cstring(s string) unsafe.Pointer {
+ p := make([]byte, len(s)+1)
+ copy(p, s)
+ return unsafe.Pointer(&p[0])
+}
diff --git a/contrib/go/_std_1.19/src/runtime/error.go b/contrib/go/_std_1.19/src/runtime/error.go
new file mode 100644
index 0000000000..b11473c634
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/error.go
@@ -0,0 +1,330 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "internal/bytealg"
+
+// The Error interface identifies a run time error.
+type Error interface {
+ error
+
+ // RuntimeError is a no-op function but
+ // serves to distinguish types that are run time
+ // errors from ordinary errors: a type is a
+ // run time error if it has a RuntimeError method.
+ RuntimeError()
+}
+
+// A TypeAssertionError explains a failed type assertion.
+type TypeAssertionError struct {
+ _interface *_type
+ concrete *_type
+ asserted *_type
+ missingMethod string // one method needed by Interface, missing from Concrete
+}
+
+func (*TypeAssertionError) RuntimeError() {}
+
+func (e *TypeAssertionError) Error() string {
+ inter := "interface"
+ if e._interface != nil {
+ inter = e._interface.string()
+ }
+ as := e.asserted.string()
+ if e.concrete == nil {
+ return "interface conversion: " + inter + " is nil, not " + as
+ }
+ cs := e.concrete.string()
+ if e.missingMethod == "" {
+ msg := "interface conversion: " + inter + " is " + cs + ", not " + as
+ if cs == as {
+ // provide slightly clearer error message
+ if e.concrete.pkgpath() != e.asserted.pkgpath() {
+ msg += " (types from different packages)"
+ } else {
+ msg += " (types from different scopes)"
+ }
+ }
+ return msg
+ }
+ return "interface conversion: " + cs + " is not " + as +
+ ": missing method " + e.missingMethod
+}
+
+// itoa converts val to a decimal representation. The result is
+// written somewhere within buf and the location of the result is returned.
+// buf must be at least 20 bytes.
+//
+//go:nosplit
+func itoa(buf []byte, val uint64) []byte {
+ i := len(buf) - 1
+ for val >= 10 {
+ buf[i] = byte(val%10 + '0')
+ i--
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return buf[i:]
+}
+
+// An errorString represents a runtime error described by a single string.
+type errorString string
+
+func (e errorString) RuntimeError() {}
+
+func (e errorString) Error() string {
+ return "runtime error: " + string(e)
+}
+
+type errorAddressString struct {
+ msg string // error message
+ addr uintptr // memory address where the error occurred
+}
+
+func (e errorAddressString) RuntimeError() {}
+
+func (e errorAddressString) Error() string {
+ return "runtime error: " + e.msg
+}
+
+// Addr returns the memory address where a fault occurred.
+// The address provided is best-effort.
+// The veracity of the result may depend on the platform.
+// Errors providing this method will only be returned as
+// a result of using runtime/debug.SetPanicOnFault.
+func (e errorAddressString) Addr() uintptr {
+ return e.addr
+}
+
+// plainError represents a runtime error described a string without
+// the prefix "runtime error: " after invoking errorString.Error().
+// See Issue #14965.
+type plainError string
+
+func (e plainError) RuntimeError() {}
+
+func (e plainError) Error() string {
+ return string(e)
+}
+
+// A boundsError represents an indexing or slicing operation gone wrong.
+type boundsError struct {
+ x int64
+ y int
+ // Values in an index or slice expression can be signed or unsigned.
+ // That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1.
+ // Instead, we keep track of whether x should be interpreted as signed or unsigned.
+ // y is known to be nonnegative and to fit in an int.
+ signed bool
+ code boundsErrorCode
+}
+
+type boundsErrorCode uint8
+
+const (
+ boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed
+
+ boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
+ boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
+ boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
+
+ boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
+ boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
+ boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
+ boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
+
+ boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
+ // Note: in the above, len(s) and cap(s) are stored in y
+)
+
+// boundsErrorFmts provide error text for various out-of-bounds panics.
+// Note: if you change these strings, you should adjust the size of the buffer
+// in boundsError.Error below as well.
+var boundsErrorFmts = [...]string{
+ boundsIndex: "index out of range [%x] with length %y",
+ boundsSliceAlen: "slice bounds out of range [:%x] with length %y",
+ boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
+ boundsSliceB: "slice bounds out of range [%x:%y]",
+ boundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
+ boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
+ boundsSlice3B: "slice bounds out of range [:%x:%y]",
+ boundsSlice3C: "slice bounds out of range [%x:%y:]",
+ boundsConvert: "cannot convert slice with length %y to pointer to array with length %x",
+}
+
+// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
+var boundsNegErrorFmts = [...]string{
+ boundsIndex: "index out of range [%x]",
+ boundsSliceAlen: "slice bounds out of range [:%x]",
+ boundsSliceAcap: "slice bounds out of range [:%x]",
+ boundsSliceB: "slice bounds out of range [%x:]",
+ boundsSlice3Alen: "slice bounds out of range [::%x]",
+ boundsSlice3Acap: "slice bounds out of range [::%x]",
+ boundsSlice3B: "slice bounds out of range [:%x:]",
+ boundsSlice3C: "slice bounds out of range [%x::]",
+}
+
+func (e boundsError) RuntimeError() {}
+
+func appendIntStr(b []byte, v int64, signed bool) []byte {
+ if signed && v < 0 {
+ b = append(b, '-')
+ v = -v
+ }
+ var buf [20]byte
+ b = append(b, itoa(buf[:], uint64(v))...)
+ return b
+}
+
+func (e boundsError) Error() string {
+ fmt := boundsErrorFmts[e.code]
+ if e.signed && e.x < 0 {
+ fmt = boundsNegErrorFmts[e.code]
+ }
+ // max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y"
+ // x can be at most 20 characters. y can be at most 19.
+ b := make([]byte, 0, 100)
+ b = append(b, "runtime error: "...)
+ for i := 0; i < len(fmt); i++ {
+ c := fmt[i]
+ if c != '%' {
+ b = append(b, c)
+ continue
+ }
+ i++
+ switch fmt[i] {
+ case 'x':
+ b = appendIntStr(b, e.x, e.signed)
+ case 'y':
+ b = appendIntStr(b, int64(e.y), true)
+ }
+ }
+ return string(b)
+}
+
+type stringer interface {
+ String() string
+}
+
+// printany prints an argument passed to panic.
+// If panic is called with a value that has a String or Error method,
+// it has already been converted into a string by preprintpanics.
+func printany(i any) {
+ switch v := i.(type) {
+ case nil:
+ print("nil")
+ case bool:
+ print(v)
+ case int:
+ print(v)
+ case int8:
+ print(v)
+ case int16:
+ print(v)
+ case int32:
+ print(v)
+ case int64:
+ print(v)
+ case uint:
+ print(v)
+ case uint8:
+ print(v)
+ case uint16:
+ print(v)
+ case uint32:
+ print(v)
+ case uint64:
+ print(v)
+ case uintptr:
+ print(v)
+ case float32:
+ print(v)
+ case float64:
+ print(v)
+ case complex64:
+ print(v)
+ case complex128:
+ print(v)
+ case string:
+ print(v)
+ default:
+ printanycustomtype(i)
+ }
+}
+
+func printanycustomtype(i any) {
+ eface := efaceOf(&i)
+ typestring := eface._type.string()
+
+ switch eface._type.kind {
+ case kindString:
+ print(typestring, `("`, *(*string)(eface.data), `")`)
+ case kindBool:
+ print(typestring, "(", *(*bool)(eface.data), ")")
+ case kindInt:
+ print(typestring, "(", *(*int)(eface.data), ")")
+ case kindInt8:
+ print(typestring, "(", *(*int8)(eface.data), ")")
+ case kindInt16:
+ print(typestring, "(", *(*int16)(eface.data), ")")
+ case kindInt32:
+ print(typestring, "(", *(*int32)(eface.data), ")")
+ case kindInt64:
+ print(typestring, "(", *(*int64)(eface.data), ")")
+ case kindUint:
+ print(typestring, "(", *(*uint)(eface.data), ")")
+ case kindUint8:
+ print(typestring, "(", *(*uint8)(eface.data), ")")
+ case kindUint16:
+ print(typestring, "(", *(*uint16)(eface.data), ")")
+ case kindUint32:
+ print(typestring, "(", *(*uint32)(eface.data), ")")
+ case kindUint64:
+ print(typestring, "(", *(*uint64)(eface.data), ")")
+ case kindUintptr:
+ print(typestring, "(", *(*uintptr)(eface.data), ")")
+ case kindFloat32:
+ print(typestring, "(", *(*float32)(eface.data), ")")
+ case kindFloat64:
+ print(typestring, "(", *(*float64)(eface.data), ")")
+ case kindComplex64:
+ print(typestring, *(*complex64)(eface.data))
+ case kindComplex128:
+ print(typestring, *(*complex128)(eface.data))
+ default:
+ print("(", typestring, ") ", eface.data)
+ }
+}
+
+// panicwrap generates a panic for a call to a wrapped value method
+// with a nil pointer receiver.
+//
+// It is called from the generated wrapper code.
+func panicwrap() {
+ pc := getcallerpc()
+ name := funcname(findfunc(pc))
+ // name is something like "main.(*T).F".
+ // We want to extract pkg ("main"), typ ("T"), and meth ("F").
+ // Do it by finding the parens.
+ i := bytealg.IndexByteString(name, '(')
+ if i < 0 {
+ throw("panicwrap: no ( in " + name)
+ }
+ pkg := name[:i-1]
+ if i+2 >= len(name) || name[i-1:i+2] != ".(*" {
+ throw("panicwrap: unexpected string after package name: " + name)
+ }
+ name = name[i+2:]
+ i = bytealg.IndexByteString(name, ')')
+ if i < 0 {
+ throw("panicwrap: no ) in " + name)
+ }
+ if i+2 >= len(name) || name[i:i+2] != ")." {
+ throw("panicwrap: unexpected string after type name: " + name)
+ }
+ typ := name[:i]
+ meth := name[i+2:]
+ panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/extern.go b/contrib/go/_std_1.19/src/runtime/extern.go
new file mode 100644
index 0000000000..15c519d233
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/extern.go
@@ -0,0 +1,287 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package runtime contains operations that interact with Go's runtime system,
+such as functions to control goroutines. It also includes the low-level type information
+used by the reflect package; see reflect's documentation for the programmable
+interface to the run-time type system.
+
+# Environment Variables
+
+The following environment variables ($name or %name%, depending on the host
+operating system) control the run-time behavior of Go programs. The meanings
+and use may change from release to release.
+
+The GOGC variable sets the initial garbage collection target percentage.
+A collection is triggered when the ratio of freshly allocated data to live data
+remaining after the previous collection reaches this percentage. The default
+is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
+[runtime/debug.SetGCPercent] allows changing this percentage at run time.
+
+The GOMEMLIMIT variable sets a soft memory limit for the runtime. This memory limit
+includes the Go heap and all other memory managed by the runtime, and excludes
+external memory sources such as mappings of the binary itself, memory managed in
+other languages, and memory held by the operating system on behalf of the Go
+program. GOMEMLIMIT is a numeric value in bytes with an optional unit suffix.
+The supported suffixes include B, KiB, MiB, GiB, and TiB. These suffixes
+represent quantities of bytes as defined by the IEC 80000-13 standard. That is,
+they are based on powers of two: KiB means 2^10 bytes, MiB means 2^20 bytes,
+and so on. The default setting is math.MaxInt64, which effectively disables the
+memory limit. [runtime/debug.SetMemoryLimit] allows changing this limit at run
+time.
+
+The GODEBUG variable controls debugging variables within the runtime.
+It is a comma-separated list of name=val pairs setting these named variables:
+
+ allocfreetrace: setting allocfreetrace=1 causes every allocation to be
+ profiled and a stack trace printed on each object's allocation and free.
+
+ clobberfree: setting clobberfree=1 causes the garbage collector to
+ clobber the memory content of an object with bad content when it frees
+ the object.
+
+ cgocheck: setting cgocheck=0 disables all checks for packages
+ using cgo to incorrectly pass Go pointers to non-Go code.
+ Setting cgocheck=1 (the default) enables relatively cheap
+ checks that may miss some errors. Setting cgocheck=2 enables
+ expensive checks that should not miss any errors, but will
+ cause your program to run slower.
+
+ efence: setting efence=1 causes the allocator to run in a mode
+ where each object is allocated on a unique page and addresses are
+ never recycled.
+
+ gccheckmark: setting gccheckmark=1 enables verification of the
+ garbage collector's concurrent mark phase by performing a
+ second mark pass while the world is stopped. If the second
+ pass finds a reachable object that was not found by concurrent
+ mark, the garbage collector will panic.
+
+ gcpacertrace: setting gcpacertrace=1 causes the garbage collector to
+ print information about the internal state of the concurrent pacer.
+
+ gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines
+ onto smaller stacks. In this mode, a goroutine's stack can only grow.
+
+ gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,
+ making every garbage collection a stop-the-world event. Setting gcstoptheworld=2
+ also disables concurrent sweeping after the garbage collection finishes.
+
+ gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
+ error at each collection, summarizing the amount of memory collected and the
+ length of the pause. The format of this line is subject to change.
+ Currently, it is:
+ gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P
+ where the fields are as follows:
+ gc # the GC number, incremented at each GC
+ @#s time in seconds since program start
+ #% percentage of time spent in GC since program start
+ #+...+# wall-clock/CPU times for the phases of the GC
+ #->#-># MB heap size at GC start, at GC end, and live heap
+ # MB goal goal heap size
+ # MB stacks estimated scannable stack size
+ # MB globals scannable global size
+ # P number of processors used
+ The phases are stop-the-world (STW) sweep termination, concurrent
+ mark and scan, and STW mark termination. The CPU times
+ for mark/scan are broken down in to assist time (GC performed in
+ line with allocation), background GC time, and idle GC time.
+ If the line ends with "(forced)", this GC was forced by a
+ runtime.GC() call.
+
+ harddecommit: setting harddecommit=1 causes memory that is returned to the OS to
+ also have protections removed on it. This is the only mode of operation on Windows,
+ but is helpful in debugging scavenger-related issues on other platforms. Currently,
+ only supported on Linux.
+
+ inittrace: setting inittrace=1 causes the runtime to emit a single line to standard
+ error for each package with init work, summarizing the execution time and memory
+ allocation. No information is printed for inits executed as part of plugin loading
+ and for packages without both user defined and compiler generated init work.
+ The format of this line is subject to change. Currently, it is:
+ init # @#ms, # ms clock, # bytes, # allocs
+ where the fields are as follows:
+ init # the package name
+ @# ms time in milliseconds when the init started since program start
+ # clock wall-clock time for package initialization work
+ # bytes memory allocated on the heap
+ # allocs number of heap allocations
+
+ madvdontneed: setting madvdontneed=0 will use MADV_FREE
+ instead of MADV_DONTNEED on Linux when returning memory to the
+ kernel. This is more efficient, but means RSS numbers will
+ drop only when the OS is under memory pressure.
+
+ memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.
+ When set to 0 memory profiling is disabled. Refer to the description of
+ MemProfileRate for the default value.
+
+ invalidptr: invalidptr=1 (the default) causes the garbage collector and stack
+ copier to crash the program if an invalid pointer value (for example, 1)
+ is found in a pointer-typed location. Setting invalidptr=0 disables this check.
+ This should only be used as a temporary workaround to diagnose buggy code.
+ The real fix is to not store integers in pointer-typed locations.
+
+ sbrk: setting sbrk=1 replaces the memory allocator and garbage collector
+ with a trivial allocator that obtains memory from the operating system and
+ never reclaims any memory.
+
+ scavtrace: setting scavtrace=1 causes the runtime to emit a single line to standard
+ error, roughly once per GC cycle, summarizing the amount of work done by the
+ scavenger as well as the total amount of memory returned to the operating system
+ and an estimate of physical memory utilization. The format of this line is subject
+ to change, but currently it is:
+ scav # KiB work, # KiB total, #% util
+ where the fields are as follows:
+ # KiB work the amount of memory returned to the OS since the last line
+ # KiB total the total amount of memory returned to the OS
+ #% util the fraction of all unscavenged memory which is in-use
+ If the line ends with "(forced)", then scavenging was forced by a
+ debug.FreeOSMemory() call.
+
+ scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
+ detailed multiline info every X milliseconds, describing state of the scheduler,
+ processors, threads and goroutines.
+
+ schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
+ error every X milliseconds, summarizing the scheduler state.
+
+ tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at
+ which goroutines were created, where N limits the number of ancestor goroutines to
+ report. This also extends the information returned by runtime.Stack. Ancestor's goroutine
+ IDs will refer to the ID of the goroutine at the time of creation; it's possible for this
+ ID to be reused for another goroutine. Setting N to 0 will report no ancestry information.
+
+ asyncpreemptoff: asyncpreemptoff=1 disables signal-based
+ asynchronous goroutine preemption. This makes some loops
+ non-preemptible for long periods, which may delay GC and
+ goroutine scheduling. This is useful for debugging GC issues
+ because it also disables the conservative stack scanning used
+ for asynchronously preempted goroutines.
+
+The net and net/http packages also refer to debugging variables in GODEBUG.
+See the documentation for those packages for details.
+
+The GOMAXPROCS variable limits the number of operating system threads that
+can execute user-level Go code simultaneously. There is no limit to the number of threads
+that can be blocked in system calls on behalf of Go code; those do not count against
+the GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes
+the limit.
+
+The GORACE variable configures the race detector, for programs built using -race.
+See https://golang.org/doc/articles/race_detector.html for details.
+
+The GOTRACEBACK variable controls the amount of output generated when a Go
+program fails due to an unrecovered panic or an unexpected runtime condition.
+By default, a failure prints a stack trace for the current goroutine,
+eliding functions internal to the run-time system, and then exits with exit code 2.
+The failure prints stack traces for all goroutines if there is no current goroutine
+or the failure is internal to the run-time.
+GOTRACEBACK=none omits the goroutine stack traces entirely.
+GOTRACEBACK=single (the default) behaves as described above.
+GOTRACEBACK=all adds stack traces for all user-created goroutines.
+GOTRACEBACK=system is like “all” but adds stack frames for run-time functions
+and shows goroutines created internally by the run-time.
+GOTRACEBACK=crash is like “system” but crashes in an operating system-specific
+manner instead of exiting. For example, on Unix systems, the crash raises
+SIGABRT to trigger a core dump.
+For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for
+none, all, and system, respectively.
+The runtime/debug package's SetTraceback function allows increasing the
+amount of output at run time, but it cannot reduce the amount below that
+specified by the environment variable.
+See https://golang.org/pkg/runtime/debug/#SetTraceback.
+
+The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
+the set of Go environment variables. They influence the building of Go programs
+(see https://golang.org/cmd/go and https://golang.org/pkg/go/build).
+GOARCH, GOOS, and GOROOT are recorded at compile time and made available by
+constants or functions in this package, but they do not influence the execution
+of the run-time system.
+*/
+package runtime
+
+import (
+ "internal/goarch"
+ "internal/goos"
+)
+
+// Caller reports file and line number information about function invocations on
+// the calling goroutine's stack. The argument skip is the number of stack frames
+// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
+// meaning of skip differs between Caller and Callers.) The return values report the
+// program counter, file name, and line number within the file of the corresponding
+// call. The boolean ok is false if it was not possible to recover the information.
+func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
+ rpc := make([]uintptr, 1)
+ n := callers(skip+1, rpc[:])
+ if n < 1 {
+ return
+ }
+ frame, _ := CallersFrames(rpc).Next()
+ return frame.PC, frame.File, frame.Line, frame.PC != 0
+}
+
+// Callers fills the slice pc with the return program counters of function invocations
+// on the calling goroutine's stack. The argument skip is the number of stack frames
+// to skip before recording in pc, with 0 identifying the frame for Callers itself and
+// 1 identifying the caller of Callers.
+// It returns the number of entries written to pc.
+//
+// To translate these PCs into symbolic information such as function
+// names and line numbers, use CallersFrames. CallersFrames accounts
+// for inlined functions and adjusts the return program counters into
+// call program counters. Iterating over the returned slice of PCs
+// directly is discouraged, as is using FuncForPC on any of the
+// returned PCs, since these cannot account for inlining or return
+// program counter adjustment.
+func Callers(skip int, pc []uintptr) int {
+ // runtime.callers uses pc.array==nil as a signal
+ // to print a stack trace. Pick off 0-length pc here
+ // so that we don't let a nil pc slice get to it.
+ if len(pc) == 0 {
+ return 0
+ }
+ return callers(skip, pc)
+}
+
+var defaultGOROOT string // set by cmd/link
+
+// GOROOT returns the root of the Go tree. It uses the
+// GOROOT environment variable, if set at process start,
+// or else the root used during the Go build.
+func GOROOT() string {
+ s := gogetenv("GOROOT")
+ if s != "" {
+ return s
+ }
+ return defaultGOROOT
+}
+
+// buildVersion is the Go tree's version string at build time.
+//
+// If any GOEXPERIMENTs are set to non-default values, it will include
+// "X:<GOEXPERIMENT>".
+//
+// This is set by the linker.
+//
+// This is accessed by "go version <binary>".
+var buildVersion string
+
+// Version returns the Go tree's version string.
+// It is either the commit hash and date at the time of the build or,
+// when possible, a release tag like "go1.3".
+func Version() string {
+ return buildVersion
+}
+
+// GOOS is the running program's operating system target:
+// one of darwin, freebsd, linux, and so on.
+// To view possible combinations of GOOS and GOARCH, run "go tool dist list".
+const GOOS string = goos.GOOS
+
+// GOARCH is the running program's architecture target:
+// one of 386, amd64, arm, s390x, and so on.
+const GOARCH string = goarch.GOARCH
diff --git a/contrib/go/_std_1.18/src/runtime/fastlog2.go b/contrib/go/_std_1.19/src/runtime/fastlog2.go
index 1f251bfaab..1f251bfaab 100644
--- a/contrib/go/_std_1.18/src/runtime/fastlog2.go
+++ b/contrib/go/_std_1.19/src/runtime/fastlog2.go
diff --git a/contrib/go/_std_1.18/src/runtime/fastlog2table.go b/contrib/go/_std_1.19/src/runtime/fastlog2table.go
index 6ba4a7d3f2..6ba4a7d3f2 100644
--- a/contrib/go/_std_1.18/src/runtime/fastlog2table.go
+++ b/contrib/go/_std_1.19/src/runtime/fastlog2table.go
diff --git a/contrib/go/_std_1.19/src/runtime/float.go b/contrib/go/_std_1.19/src/runtime/float.go
new file mode 100644
index 0000000000..c80c8b7abf
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/float.go
@@ -0,0 +1,54 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+var inf = float64frombits(0x7FF0000000000000)
+
+// isNaN reports whether f is an IEEE 754 “not-a-number” value.
+func isNaN(f float64) (is bool) {
+ // IEEE 754 says that only NaNs satisfy f != f.
+ return f != f
+}
+
+// isFinite reports whether f is neither NaN nor an infinity.
+func isFinite(f float64) bool {
+ return !isNaN(f - f)
+}
+
+// isInf reports whether f is an infinity.
+func isInf(f float64) bool {
+ return !isNaN(f) && !isFinite(f)
+}
+
+// Abs returns the absolute value of x.
+//
+// Special cases are:
+//
+// Abs(±Inf) = +Inf
+// Abs(NaN) = NaN
+func abs(x float64) float64 {
+ const sign = 1 << 63
+ return float64frombits(float64bits(x) &^ sign)
+}
+
+// copysign returns a value with the magnitude
+// of x and the sign of y.
+func copysign(x, y float64) float64 {
+ const sign = 1 << 63
+ return float64frombits(float64bits(x)&^sign | float64bits(y)&sign)
+}
+
+// Float64bits returns the IEEE 754 binary representation of f.
+func float64bits(f float64) uint64 {
+ return *(*uint64)(unsafe.Pointer(&f))
+}
+
+// Float64frombits returns the floating point number corresponding
+// the IEEE 754 binary representation b.
+func float64frombits(b uint64) float64 {
+ return *(*float64)(unsafe.Pointer(&b))
+}
diff --git a/contrib/go/_std_1.18/src/runtime/funcdata.h b/contrib/go/_std_1.19/src/runtime/funcdata.h
index 2e2bb30446..2e2bb30446 100644
--- a/contrib/go/_std_1.18/src/runtime/funcdata.h
+++ b/contrib/go/_std_1.19/src/runtime/funcdata.h
diff --git a/contrib/go/_std_1.18/src/runtime/go_tls.h b/contrib/go/_std_1.19/src/runtime/go_tls.h
index a47e798d9d..a47e798d9d 100644
--- a/contrib/go/_std_1.18/src/runtime/go_tls.h
+++ b/contrib/go/_std_1.19/src/runtime/go_tls.h
diff --git a/contrib/go/_std_1.19/src/runtime/hash64.go b/contrib/go/_std_1.19/src/runtime/hash64.go
new file mode 100644
index 0000000000..2864a4b963
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/hash64.go
@@ -0,0 +1,92 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hashing algorithm inspired by
+// wyhash: https://github.com/wangyi-fudan/wyhash
+
+//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm
+
+package runtime
+
+import (
+ "runtime/internal/math"
+ "unsafe"
+)
+
+const (
+ m1 = 0xa0761d6478bd642f
+ m2 = 0xe7037ed1a0b428db
+ m3 = 0x8ebc6af09c88c6e3
+ m4 = 0x589965cc75374cc3
+ m5 = 0x1d8e4e27c47d124f
+)
+
+func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr {
+ var a, b uintptr
+ seed ^= hashkey[0] ^ m1
+ switch {
+ case s == 0:
+ return seed
+ case s < 4:
+ a = uintptr(*(*byte)(p))
+ a |= uintptr(*(*byte)(add(p, s>>1))) << 8
+ a |= uintptr(*(*byte)(add(p, s-1))) << 16
+ case s == 4:
+ a = r4(p)
+ b = a
+ case s < 8:
+ a = r4(p)
+ b = r4(add(p, s-4))
+ case s == 8:
+ a = r8(p)
+ b = a
+ case s <= 16:
+ a = r8(p)
+ b = r8(add(p, s-8))
+ default:
+ l := s
+ if l > 48 {
+ seed1 := seed
+ seed2 := seed
+ for ; l > 48; l -= 48 {
+ seed = mix(r8(p)^m2, r8(add(p, 8))^seed)
+ seed1 = mix(r8(add(p, 16))^m3, r8(add(p, 24))^seed1)
+ seed2 = mix(r8(add(p, 32))^m4, r8(add(p, 40))^seed2)
+ p = add(p, 48)
+ }
+ seed ^= seed1 ^ seed2
+ }
+ for ; l > 16; l -= 16 {
+ seed = mix(r8(p)^m2, r8(add(p, 8))^seed)
+ p = add(p, 16)
+ }
+ a = r8(add(p, l-16))
+ b = r8(add(p, l-8))
+ }
+
+ return mix(m5^s, mix(a^m2, b^seed))
+}
+
+func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr {
+ a := r4(p)
+ return mix(m5^4, mix(a^m2, a^seed^hashkey[0]^m1))
+}
+
+func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr {
+ a := r8(p)
+ return mix(m5^8, mix(a^m2, a^seed^hashkey[0]^m1))
+}
+
+func mix(a, b uintptr) uintptr {
+ hi, lo := math.Mul64(uint64(a), uint64(b))
+ return uintptr(hi ^ lo)
+}
+
+func r4(p unsafe.Pointer) uintptr {
+ return uintptr(readUnaligned32(p))
+}
+
+func r8(p unsafe.Pointer) uintptr {
+ return uintptr(readUnaligned64(p))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/heapdump.go b/contrib/go/_std_1.19/src/runtime/heapdump.go
new file mode 100644
index 0000000000..c7f2b7a443
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/heapdump.go
@@ -0,0 +1,752 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of runtime/debug.WriteHeapDump. Writes all
+// objects in the heap plus additional info (roots, threads,
+// finalizers, etc.) to a file.
+
+// The format of the dumped file is described at
+// https://golang.org/s/go15heapdump.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "unsafe"
+)
+
+//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
+func runtime_debug_WriteHeapDump(fd uintptr) {
+ stopTheWorld("write heap dump")
+
+ // Keep m on this G's stack instead of the system stack.
+ // Both readmemstats_m and writeheapdump_m have pretty large
+ // peak stack depths and we risk blowing the system stack.
+ // This is safe because the world is stopped, so we don't
+ // need to worry about anyone shrinking and therefore moving
+ // our stack.
+ var m MemStats
+ systemstack(func() {
+ // Call readmemstats_m here instead of deeper in
+ // writeheapdump_m because we might blow the system stack
+ // otherwise.
+ readmemstats_m(&m)
+ writeheapdump_m(fd, &m)
+ })
+
+ startTheWorld()
+}
+
+const (
+ fieldKindEol = 0
+ fieldKindPtr = 1
+ fieldKindIface = 2
+ fieldKindEface = 3
+ tagEOF = 0
+ tagObject = 1
+ tagOtherRoot = 2
+ tagType = 3
+ tagGoroutine = 4
+ tagStackFrame = 5
+ tagParams = 6
+ tagFinalizer = 7
+ tagItab = 8
+ tagOSThread = 9
+ tagMemStats = 10
+ tagQueuedFinalizer = 11
+ tagData = 12
+ tagBSS = 13
+ tagDefer = 14
+ tagPanic = 15
+ tagMemProf = 16
+ tagAllocSample = 17
+)
+
+var dumpfd uintptr // fd to write the dump to.
+var tmpbuf []byte
+
+// buffer of pending write data
+const (
+ bufSize = 4096
+)
+
+var buf [bufSize]byte
+var nbuf uintptr
+
+func dwrite(data unsafe.Pointer, len uintptr) {
+ if len == 0 {
+ return
+ }
+ if nbuf+len <= bufSize {
+ copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
+ nbuf += len
+ return
+ }
+
+ write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
+ if len >= bufSize {
+ write(dumpfd, data, int32(len))
+ nbuf = 0
+ } else {
+ copy(buf[:], (*[bufSize]byte)(data)[:len])
+ nbuf = len
+ }
+}
+
+func dwritebyte(b byte) {
+ dwrite(unsafe.Pointer(&b), 1)
+}
+
+func flush() {
+ write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
+ nbuf = 0
+}
+
+// Cache of types that have been serialized already.
+// We use a type's hash field to pick a bucket.
+// Inside a bucket, we keep a list of types that
+// have been serialized so far, most recently used first.
+// Note: when a bucket overflows we may end up
+// serializing a type more than once. That's ok.
+const (
+ typeCacheBuckets = 256
+ typeCacheAssoc = 4
+)
+
+type typeCacheBucket struct {
+ t [typeCacheAssoc]*_type
+}
+
+var typecache [typeCacheBuckets]typeCacheBucket
+
+// dump a uint64 in a varint format parseable by encoding/binary
+func dumpint(v uint64) {
+ var buf [10]byte
+ var n int
+ for v >= 0x80 {
+ buf[n] = byte(v | 0x80)
+ n++
+ v >>= 7
+ }
+ buf[n] = byte(v)
+ n++
+ dwrite(unsafe.Pointer(&buf), uintptr(n))
+}
+
+func dumpbool(b bool) {
+ if b {
+ dumpint(1)
+ } else {
+ dumpint(0)
+ }
+}
+
+// dump varint uint64 length followed by memory contents
+func dumpmemrange(data unsafe.Pointer, len uintptr) {
+ dumpint(uint64(len))
+ dwrite(data, len)
+}
+
+func dumpslice(b []byte) {
+ dumpint(uint64(len(b)))
+ if len(b) > 0 {
+ dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
+ }
+}
+
+func dumpstr(s string) {
+ sp := stringStructOf(&s)
+ dumpmemrange(sp.str, uintptr(sp.len))
+}
+
+// dump information for a type
+func dumptype(t *_type) {
+ if t == nil {
+ return
+ }
+
+ // If we've definitely serialized the type before,
+ // no need to do it again.
+ b := &typecache[t.hash&(typeCacheBuckets-1)]
+ if t == b.t[0] {
+ return
+ }
+ for i := 1; i < typeCacheAssoc; i++ {
+ if t == b.t[i] {
+ // Move-to-front
+ for j := i; j > 0; j-- {
+ b.t[j] = b.t[j-1]
+ }
+ b.t[0] = t
+ return
+ }
+ }
+
+ // Might not have been dumped yet. Dump it and
+ // remember we did so.
+ for j := typeCacheAssoc - 1; j > 0; j-- {
+ b.t[j] = b.t[j-1]
+ }
+ b.t[0] = t
+
+ // dump the type
+ dumpint(tagType)
+ dumpint(uint64(uintptr(unsafe.Pointer(t))))
+ dumpint(uint64(t.size))
+ if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" {
+ dumpstr(t.string())
+ } else {
+ pkgpathstr := t.nameOff(x.pkgpath).name()
+ pkgpath := stringStructOf(&pkgpathstr)
+ namestr := t.name()
+ name := stringStructOf(&namestr)
+ dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
+ dwrite(pkgpath.str, uintptr(pkgpath.len))
+ dwritebyte('.')
+ dwrite(name.str, uintptr(name.len))
+ }
+ dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
+}
+
+// dump an object
+func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
+ dumpint(tagObject)
+ dumpint(uint64(uintptr(obj)))
+ dumpmemrange(obj, size)
+ dumpfields(bv)
+}
+
+func dumpotherroot(description string, to unsafe.Pointer) {
+ dumpint(tagOtherRoot)
+ dumpstr(description)
+ dumpint(uint64(uintptr(to)))
+}
+
+func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
+ dumpint(tagFinalizer)
+ dumpint(uint64(uintptr(obj)))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fint))))
+ dumpint(uint64(uintptr(unsafe.Pointer(ot))))
+}
+
+type childInfo struct {
+ // Information passed up from the callee frame about
+ // the layout of the outargs region.
+ argoff uintptr // where the arguments start in the frame
+ arglen uintptr // size of args region
+ args bitvector // if args.n >= 0, pointer map of args region
+ sp *uint8 // callee sp
+ depth uintptr // depth in call stack (0 == most recent)
+}
+
+// dump kinds & offsets of interesting fields in bv
+func dumpbv(cbv *bitvector, offset uintptr) {
+ for i := uintptr(0); i < uintptr(cbv.n); i++ {
+ if cbv.ptrbit(i) == 1 {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(offset + i*goarch.PtrSize))
+ }
+ }
+}
+
+func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
+ child := (*childInfo)(arg)
+ f := s.fn
+
+ // Figure out what we can about our stack map
+ pc := s.pc
+ pcdata := int32(-1) // Use the entry map at function entry
+ if pc != f.entry() {
+ pc--
+ pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil)
+ }
+ if pcdata == -1 {
+ // We do not have a valid pcdata value but there might be a
+ // stackmap for this function. It is likely that we are looking
+ // at the function prologue, assume so and hope for the best.
+ pcdata = 0
+ }
+ stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+
+ var bv bitvector
+ if stkmap != nil && stkmap.n > 0 {
+ bv = stackmapdata(stkmap, pcdata)
+ } else {
+ bv.n = -1
+ }
+
+ // Dump main body of stack frame.
+ dumpint(tagStackFrame)
+ dumpint(uint64(s.sp)) // lowest address in frame
+ dumpint(uint64(child.depth)) // # of frames deep on the stack
+ dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
+ dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents
+ dumpint(uint64(f.entry()))
+ dumpint(uint64(s.pc))
+ dumpint(uint64(s.continpc))
+ name := funcname(f)
+ if name == "" {
+ name = "unknown function"
+ }
+ dumpstr(name)
+
+ // Dump fields in the outargs section
+ if child.args.n >= 0 {
+ dumpbv(&child.args, child.argoff)
+ } else {
+ // conservative - everything might be a pointer
+ for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(off))
+ }
+ }
+
+ // Dump fields in the local vars section
+ if stkmap == nil {
+ // No locals information, dump everything.
+ for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(off))
+ }
+ } else if stkmap.n < 0 {
+ // Locals size information, dump just the locals.
+ size := uintptr(-stkmap.n)
+ for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(off))
+ }
+ } else if stkmap.n > 0 {
+ // Locals bitmap information, scan just the pointers in
+ // locals.
+ dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
+ }
+ dumpint(fieldKindEol)
+
+ // Record arg info for parent.
+ child.argoff = s.argp - s.fp
+ child.arglen = s.arglen
+ child.sp = (*uint8)(unsafe.Pointer(s.sp))
+ child.depth++
+ stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
+ if stkmap != nil {
+ child.args = stackmapdata(stkmap, pcdata)
+ } else {
+ child.args.n = -1
+ }
+ return true
+}
+
+func dumpgoroutine(gp *g) {
+ var sp, pc, lr uintptr
+ if gp.syscallsp != 0 {
+ sp = gp.syscallsp
+ pc = gp.syscallpc
+ lr = 0
+ } else {
+ sp = gp.sched.sp
+ pc = gp.sched.pc
+ lr = gp.sched.lr
+ }
+
+ dumpint(tagGoroutine)
+ dumpint(uint64(uintptr(unsafe.Pointer(gp))))
+ dumpint(uint64(sp))
+ dumpint(uint64(gp.goid))
+ dumpint(uint64(gp.gopc))
+ dumpint(uint64(readgstatus(gp)))
+ dumpbool(isSystemGoroutine(gp, false))
+ dumpbool(false) // isbackground
+ dumpint(uint64(gp.waitsince))
+ dumpstr(gp.waitreason.String())
+ dumpint(uint64(uintptr(gp.sched.ctxt)))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
+
+ // dump stack
+ var child childInfo
+ child.args.n = -1
+ child.arglen = 0
+ child.sp = nil
+ child.depth = 0
+ gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
+
+ // dump defer & panic records
+ for d := gp._defer; d != nil; d = d.link {
+ dumpint(tagDefer)
+ dumpint(uint64(uintptr(unsafe.Pointer(d))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp))))
+ dumpint(uint64(d.sp))
+ dumpint(uint64(d.pc))
+ fn := *(**funcval)(unsafe.Pointer(&d.fn))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
+ if d.fn == nil {
+ // d.fn can be nil for open-coded defers
+ dumpint(uint64(0))
+ } else {
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
+ }
+ dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
+ }
+ for p := gp._panic; p != nil; p = p.link {
+ dumpint(tagPanic)
+ dumpint(uint64(uintptr(unsafe.Pointer(p))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp))))
+ eface := efaceOf(&p.arg)
+ dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
+ dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
+ dumpint(0) // was p->defer, no longer recorded
+ dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
+ }
+}
+
+func dumpgs() {
+ assertWorldStopped()
+
+ // goroutines & stacks
+ forEachG(func(gp *g) {
+ status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
+ switch status {
+ default:
+ print("runtime: unexpected G.status ", hex(status), "\n")
+ throw("dumpgs in STW - bad status")
+ case _Gdead:
+ // ok
+ case _Grunnable,
+ _Gsyscall,
+ _Gwaiting:
+ dumpgoroutine(gp)
+ }
+ })
+}
+
+func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
+ dumpint(tagQueuedFinalizer)
+ dumpint(uint64(uintptr(obj)))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fint))))
+ dumpint(uint64(uintptr(unsafe.Pointer(ot))))
+}
+
+func dumproots() {
+ // To protect mheap_.allspans.
+ assertWorldStopped()
+
+ // TODO(mwhudson): dump datamask etc from all objects
+ // data segment
+ dumpint(tagData)
+ dumpint(uint64(firstmoduledata.data))
+ dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
+ dumpfields(firstmoduledata.gcdatamask)
+
+ // bss segment
+ dumpint(tagBSS)
+ dumpint(uint64(firstmoduledata.bss))
+ dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
+ dumpfields(firstmoduledata.gcbssmask)
+
+ // mspan.types
+ for _, s := range mheap_.allspans {
+ if s.state.get() == mSpanInUse {
+ // Finalizers
+ for sp := s.specials; sp != nil; sp = sp.next {
+ if sp.kind != _KindSpecialFinalizer {
+ continue
+ }
+ spf := (*specialfinalizer)(unsafe.Pointer(sp))
+ p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
+ dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
+ }
+ }
+ }
+
+ // Finalizer queue
+ iterate_finq(finq_callback)
+}
+
+// Bit vector of free marks.
+// Needs to be as big as the largest number of objects per span.
+var freemark [_PageSize / 8]bool
+
+func dumpobjs() {
+ // To protect mheap_.allspans.
+ assertWorldStopped()
+
+ for _, s := range mheap_.allspans {
+ if s.state.get() != mSpanInUse {
+ continue
+ }
+ p := s.base()
+ size := s.elemsize
+ n := (s.npages << _PageShift) / size
+ if n > uintptr(len(freemark)) {
+ throw("freemark array doesn't have enough entries")
+ }
+
+ for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
+ if s.isFree(freeIndex) {
+ freemark[freeIndex] = true
+ }
+ }
+
+ for j := uintptr(0); j < n; j, p = j+1, p+size {
+ if freemark[j] {
+ freemark[j] = false
+ continue
+ }
+ dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
+ }
+ }
+}
+
+func dumpparams() {
+ dumpint(tagParams)
+ x := uintptr(1)
+ if *(*byte)(unsafe.Pointer(&x)) == 1 {
+ dumpbool(false) // little-endian ptrs
+ } else {
+ dumpbool(true) // big-endian ptrs
+ }
+ dumpint(goarch.PtrSize)
+ var arenaStart, arenaEnd uintptr
+ for i1 := range mheap_.arenas {
+ if mheap_.arenas[i1] == nil {
+ continue
+ }
+ for i, ha := range mheap_.arenas[i1] {
+ if ha == nil {
+ continue
+ }
+ base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
+ if arenaStart == 0 || base < arenaStart {
+ arenaStart = base
+ }
+ if base+heapArenaBytes > arenaEnd {
+ arenaEnd = base + heapArenaBytes
+ }
+ }
+ }
+ dumpint(uint64(arenaStart))
+ dumpint(uint64(arenaEnd))
+ dumpstr(goarch.GOARCH)
+ dumpstr(buildVersion)
+ dumpint(uint64(ncpu))
+}
+
+func itab_callback(tab *itab) {
+ t := tab._type
+ dumptype(t)
+ dumpint(tagItab)
+ dumpint(uint64(uintptr(unsafe.Pointer(tab))))
+ dumpint(uint64(uintptr(unsafe.Pointer(t))))
+}
+
+func dumpitabs() {
+ iterate_itabs(itab_callback)
+}
+
+func dumpms() {
+ for mp := allm; mp != nil; mp = mp.alllink {
+ dumpint(tagOSThread)
+ dumpint(uint64(uintptr(unsafe.Pointer(mp))))
+ dumpint(uint64(mp.id))
+ dumpint(mp.procid)
+ }
+}
+
+//go:systemstack
+func dumpmemstats(m *MemStats) {
+ assertWorldStopped()
+
+ // These ints should be identical to the exported
+ // MemStats structure and should be ordered the same
+ // way too.
+ dumpint(tagMemStats)
+ dumpint(m.Alloc)
+ dumpint(m.TotalAlloc)
+ dumpint(m.Sys)
+ dumpint(m.Lookups)
+ dumpint(m.Mallocs)
+ dumpint(m.Frees)
+ dumpint(m.HeapAlloc)
+ dumpint(m.HeapSys)
+ dumpint(m.HeapIdle)
+ dumpint(m.HeapInuse)
+ dumpint(m.HeapReleased)
+ dumpint(m.HeapObjects)
+ dumpint(m.StackInuse)
+ dumpint(m.StackSys)
+ dumpint(m.MSpanInuse)
+ dumpint(m.MSpanSys)
+ dumpint(m.MCacheInuse)
+ dumpint(m.MCacheSys)
+ dumpint(m.BuckHashSys)
+ dumpint(m.GCSys)
+ dumpint(m.OtherSys)
+ dumpint(m.NextGC)
+ dumpint(m.LastGC)
+ dumpint(m.PauseTotalNs)
+ for i := 0; i < 256; i++ {
+ dumpint(m.PauseNs[i])
+ }
+ dumpint(uint64(m.NumGC))
+}
+
+func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
+ stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
+ dumpint(tagMemProf)
+ dumpint(uint64(uintptr(unsafe.Pointer(b))))
+ dumpint(uint64(size))
+ dumpint(uint64(nstk))
+ for i := uintptr(0); i < nstk; i++ {
+ pc := stk[i]
+ f := findfunc(pc)
+ if !f.valid() {
+ var buf [64]byte
+ n := len(buf)
+ n--
+ buf[n] = ')'
+ if pc == 0 {
+ n--
+ buf[n] = '0'
+ } else {
+ for pc > 0 {
+ n--
+ buf[n] = "0123456789abcdef"[pc&15]
+ pc >>= 4
+ }
+ }
+ n--
+ buf[n] = 'x'
+ n--
+ buf[n] = '0'
+ n--
+ buf[n] = '('
+ dumpslice(buf[n:])
+ dumpstr("?")
+ dumpint(0)
+ } else {
+ dumpstr(funcname(f))
+ if i > 0 && pc > f.entry() {
+ pc--
+ }
+ file, line := funcline(f, pc)
+ dumpstr(file)
+ dumpint(uint64(line))
+ }
+ }
+ dumpint(uint64(allocs))
+ dumpint(uint64(frees))
+}
+
+func dumpmemprof() {
+ // To protect mheap_.allspans.
+ assertWorldStopped()
+
+ iterate_memprof(dumpmemprof_callback)
+ for _, s := range mheap_.allspans {
+ if s.state.get() != mSpanInUse {
+ continue
+ }
+ for sp := s.specials; sp != nil; sp = sp.next {
+ if sp.kind != _KindSpecialProfile {
+ continue
+ }
+ spp := (*specialprofile)(unsafe.Pointer(sp))
+ p := s.base() + uintptr(spp.special.offset)
+ dumpint(tagAllocSample)
+ dumpint(uint64(p))
+ dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
+ }
+ }
+}
+
+var dumphdr = []byte("go1.7 heap dump\n")
+
+func mdump(m *MemStats) {
+ assertWorldStopped()
+
+ // make sure we're done sweeping
+ for _, s := range mheap_.allspans {
+ if s.state.get() == mSpanInUse {
+ s.ensureSwept()
+ }
+ }
+ memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
+ dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
+ dumpparams()
+ dumpitabs()
+ dumpobjs()
+ dumpgs()
+ dumpms()
+ dumproots()
+ dumpmemstats(m)
+ dumpmemprof()
+ dumpint(tagEOF)
+ flush()
+}
+
+func writeheapdump_m(fd uintptr, m *MemStats) {
+ assertWorldStopped()
+
+ _g_ := getg()
+ casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
+ _g_.waitreason = waitReasonDumpingHeap
+
+ // Set dump file.
+ dumpfd = fd
+
+ // Call dump routine.
+ mdump(m)
+
+ // Reset dump file.
+ dumpfd = 0
+ if tmpbuf != nil {
+ sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
+ tmpbuf = nil
+ }
+
+ casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
+}
+
+// dumpint() the kind & offset of each field in an object.
+func dumpfields(bv bitvector) {
+ dumpbv(&bv, 0)
+ dumpint(fieldKindEol)
+}
+
+func makeheapobjbv(p uintptr, size uintptr) bitvector {
+ // Extend the temp buffer if necessary.
+ nptr := size / goarch.PtrSize
+ if uintptr(len(tmpbuf)) < nptr/8+1 {
+ if tmpbuf != nil {
+ sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
+ }
+ n := nptr/8 + 1
+ p := sysAlloc(n, &memstats.other_sys)
+ if p == nil {
+ throw("heapdump: out of memory")
+ }
+ tmpbuf = (*[1 << 30]byte)(p)[:n]
+ }
+ // Convert heap bitmap to pointer bitmap.
+ for i := uintptr(0); i < nptr/8+1; i++ {
+ tmpbuf[i] = 0
+ }
+ i := uintptr(0)
+ hbits := heapBitsForAddr(p)
+ for ; i < nptr; i++ {
+ if !hbits.morePointers() {
+ break // end of object
+ }
+ if hbits.isPointer() {
+ tmpbuf[i/8] |= 1 << (i % 8)
+ }
+ hbits = hbits.next()
+ }
+ return bitvector{int32(i), &tmpbuf[0]}
+}
diff --git a/contrib/go/_std_1.19/src/runtime/histogram.go b/contrib/go/_std_1.19/src/runtime/histogram.go
new file mode 100644
index 0000000000..eddfbab3bc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/histogram.go
@@ -0,0 +1,171 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const (
+ // For the time histogram type, we use an HDR histogram.
+ // Values are placed in super-buckets based solely on the most
+ // significant set bit. Thus, super-buckets are power-of-2 sized.
+ // Values are then placed into sub-buckets based on the value of
+ // the next timeHistSubBucketBits most significant bits. Thus,
+ // sub-buckets are linear within a super-bucket.
+ //
+ // Therefore, the number of sub-buckets (timeHistNumSubBuckets)
+ // defines the error. This error may be computed as
+ // 1/timeHistNumSubBuckets*100%. For example, for 16 sub-buckets
+ // per super-bucket the error is approximately 6%.
+ //
+ // The number of super-buckets (timeHistNumSuperBuckets), on the
+ // other hand, defines the range. To reserve room for sub-buckets,
+ // bit timeHistSubBucketBits is the first bit considered for
+ // super-buckets, so super-bucket indices are adjusted accordingly.
+ //
+ // As an example, consider 45 super-buckets with 16 sub-buckets.
+ //
+ // 00110
+ // ^----
+ // │ ^
+ // │ └---- Lowest 4 bits -> sub-bucket 6
+ // └------- Bit 4 unset -> super-bucket 0
+ //
+ // 10110
+ // ^----
+ // │ ^
+ // │ └---- Next 4 bits -> sub-bucket 6
+ // └------- Bit 4 set -> super-bucket 1
+ // 100010
+ // ^----^
+ // │ ^ └-- Lower bits ignored
+ // │ └---- Next 4 bits -> sub-bucket 1
+ // └------- Bit 5 set -> super-bucket 2
+ //
+ // Following this pattern, super-bucket 44 will have the bit 47 set. We don't
+ // have any buckets for higher values, so the highest sub-bucket will
+ // contain values of 2^48-1 nanoseconds or approx. 3 days. This range is
+ // more than enough to handle durations produced by the runtime.
+ timeHistSubBucketBits = 4
+ timeHistNumSubBuckets = 1 << timeHistSubBucketBits
+ timeHistNumSuperBuckets = 45
+ timeHistTotalBuckets = timeHistNumSuperBuckets*timeHistNumSubBuckets + 1
+)
+
+// timeHistogram represents a distribution of durations in
+// nanoseconds.
+//
+// The accuracy and range of the histogram is defined by the
+// timeHistSubBucketBits and timeHistNumSuperBuckets constants.
+//
+// It is an HDR histogram with exponentially-distributed
+// buckets and linearly distributed sub-buckets.
+//
+// Counts in the histogram are updated atomically, so it is safe
+// for concurrent use. It is also safe to read all the values
+// atomically.
+type timeHistogram struct {
+ counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
+
+ // underflow counts all the times we got a negative duration
+ // sample. Because of how time works on some platforms, it's
+ // possible to measure negative durations. We could ignore them,
+ // but we record them anyway because it's better to have some
+ // signal that it's happening than just missing samples.
+ underflow uint64
+}
+
+// record adds the given duration to the distribution.
+//
+// Disallow preemptions and stack growths because this function
+// may run in sensitive locations.
+//
+//go:nosplit
+func (h *timeHistogram) record(duration int64) {
+ if duration < 0 {
+ atomic.Xadd64(&h.underflow, 1)
+ return
+ }
+ // The index of the exponential bucket is just the index
+ // of the highest set bit adjusted for how many bits we
+ // use for the subbucket. Note that it's timeHistSubBucketsBits-1
+ // because we use the 0th bucket to hold values < timeHistNumSubBuckets.
+ var superBucket, subBucket uint
+ if duration >= timeHistNumSubBuckets {
+ // At this point, we know the duration value will always be
+ // at least timeHistSubBucketsBits long.
+ superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits
+ if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) {
+ // The bucket index we got is larger than what we support, so
+ // include this count in the highest bucket, which extends to
+ // infinity.
+ superBucket = timeHistNumSuperBuckets - 1
+ subBucket = timeHistNumSubBuckets - 1
+ } else {
+ // The linear subbucket index is just the timeHistSubBucketsBits
+ // bits after the top bit. To extract that value, shift down
+ // the duration such that we leave the top bit and the next bits
+ // intact, then extract the index.
+ subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
+ }
+ } else {
+ subBucket = uint(duration)
+ }
+ atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
+}
+
+const (
+ fInf = 0x7FF0000000000000
+ fNegInf = 0xFFF0000000000000
+)
+
+func float64Inf() float64 {
+ inf := uint64(fInf)
+ return *(*float64)(unsafe.Pointer(&inf))
+}
+
+func float64NegInf() float64 {
+ inf := uint64(fNegInf)
+ return *(*float64)(unsafe.Pointer(&inf))
+}
+
+// timeHistogramMetricsBuckets generates a slice of boundaries for
+// the timeHistogram. These boundaries are represented in seconds,
+// not nanoseconds like the timeHistogram represents durations.
+func timeHistogramMetricsBuckets() []float64 {
+ b := make([]float64, timeHistTotalBuckets+1)
+ b[0] = float64NegInf()
+ // Super-bucket 0 has no bits above timeHistSubBucketBits
+ // set, so just iterate over each bucket and assign the
+ // incrementing bucket.
+ for i := 0; i < timeHistNumSubBuckets; i++ {
+ bucketNanos := uint64(i)
+ b[i+1] = float64(bucketNanos) / 1e9
+ }
+ // Generate the rest of the super-buckets. It's easier to reason
+ // about if we cut out the 0'th bucket, so subtract one since
+ // we just handled that bucket.
+ for i := 0; i < timeHistNumSuperBuckets-1; i++ {
+ for j := 0; j < timeHistNumSubBuckets; j++ {
+ // Set the super-bucket bit.
+ bucketNanos := uint64(1) << (i + timeHistSubBucketBits)
+ // Set the sub-bucket bits.
+ bucketNanos |= uint64(j) << i
+ // The index for this bucket is going to be the (i+1)'th super bucket
+ // (note that we're starting from zero, but handled the first super-bucket
+ // earlier, so we need to compensate), and the j'th sub bucket.
+ // Add 1 because we left space for -Inf.
+ bucketIndex := (i+1)*timeHistNumSubBuckets + j + 1
+ // Convert nanoseconds to seconds via a division.
+ // These values will all be exactly representable by a float64.
+ b[bucketIndex] = float64(bucketNanos) / 1e9
+ }
+ }
+ b[len(b)-1] = float64Inf()
+ return b
+}
diff --git a/contrib/go/_std_1.18/src/runtime/iface.go b/contrib/go/_std_1.19/src/runtime/iface.go
index a4d56dd33b..a4d56dd33b 100644
--- a/contrib/go/_std_1.18/src/runtime/iface.go
+++ b/contrib/go/_std_1.19/src/runtime/iface.go
diff --git a/contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.go b/contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.go
new file mode 100644
index 0000000000..52a83620c8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.go
@@ -0,0 +1,117 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+// Export some functions via linkname to assembly in sync/atomic.
+//
+//go:linkname Load
+//go:linkname Loadp
+//go:linkname Load64
+
+//go:nosplit
+//go:noinline
+func Load(ptr *uint32) uint32 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
+ return *(*unsafe.Pointer)(ptr)
+}
+
+//go:nosplit
+//go:noinline
+func Load64(ptr *uint64) uint64 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func LoadAcq(ptr *uint32) uint32 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func LoadAcq64(ptr *uint64) uint64 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func LoadAcquintptr(ptr *uintptr) uintptr {
+ return *ptr
+}
+
+//go:noescape
+func Xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func Xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
+//go:noescape
+func Xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func Xchg64(ptr *uint64, new uint64) uint64
+
+//go:noescape
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:nosplit
+//go:noinline
+func Load8(ptr *uint8) uint8 {
+ return *ptr
+}
+
+//go:noescape
+func And8(ptr *uint8, val uint8)
+
+//go:noescape
+func Or8(ptr *uint8, val uint8)
+
+//go:noescape
+func And(ptr *uint32, val uint32)
+
+//go:noescape
+func Or(ptr *uint32, val uint32)
+
+// NOTE: Do not add atomicxor8 (XOR is not idempotent).
+
+//go:noescape
+func Cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func CasRel(ptr *uint32, old, new uint32) bool
+
+//go:noescape
+func Store(ptr *uint32, val uint32)
+
+//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
+func Store64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreRel(ptr *uint32, val uint32)
+
+//go:noescape
+func StoreRel64(ptr *uint64, val uint64)
+
+//go:noescape
+func StoreReluintptr(ptr *uintptr, val uintptr)
+
+// StorepNoWB performs *ptr = val atomically and without a write
+// barrier.
+//
+// NO go:noescape annotation; see atomic_pointer.go.
+func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.s b/contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.s
index d21514b36b..d21514b36b 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/atomic_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/atomic_amd64.s
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/doc.go b/contrib/go/_std_1.19/src/runtime/internal/atomic/doc.go
index 08e6b6ce0b..08e6b6ce0b 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/doc.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/doc.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/stubs.go b/contrib/go/_std_1.19/src/runtime/internal/atomic/stubs.go
index 7df8d9c863..7df8d9c863 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/stubs.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/stubs.go
diff --git a/contrib/go/_std_1.19/src/runtime/internal/atomic/types.go b/contrib/go/_std_1.19/src/runtime/internal/atomic/types.go
new file mode 100644
index 0000000000..d346a76b67
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/types.go
@@ -0,0 +1,431 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+// Int32 is an atomically accessed int32 value.
+//
+// An Int32 must not be copied.
+type Int32 struct {
+ noCopy noCopy
+ value int32
+}
+
+// Load accesses and returns the value atomically.
+func (i *Int32) Load() int32 {
+ return Loadint32(&i.value)
+}
+
+// Store updates the value atomically.
+func (i *Int32) Store(value int32) {
+ Storeint32(&i.value, value)
+}
+
+// CompareAndSwap atomically compares i's value with old,
+// and if they're equal, swaps i's value with new.
+//
+// Returns true if the operation succeeded.
+func (i *Int32) CompareAndSwap(old, new int32) bool {
+ return Casint32(&i.value, old, new)
+}
+
+// Swap replaces i's value with new, returning
+// i's value before the replacement.
+func (i *Int32) Swap(new int32) int32 {
+ return Xchgint32(&i.value, new)
+}
+
+// Add adds delta to i atomically, returning
+// the new updated value.
+//
+// This operation wraps around in the usual
+// two's-complement way.
+func (i *Int32) Add(delta int32) int32 {
+ return Xaddint32(&i.value, delta)
+}
+
+// Int64 is an atomically accessed int64 value.
+//
+// 8-byte aligned on all platforms, unlike a regular int64.
+//
+// An Int64 must not be copied.
+type Int64 struct {
+ noCopy noCopy
+ _ align64
+ value int64
+}
+
+// Load accesses and returns the value atomically.
+func (i *Int64) Load() int64 {
+ return Loadint64(&i.value)
+}
+
+// Store updates the value atomically.
+func (i *Int64) Store(value int64) {
+ Storeint64(&i.value, value)
+}
+
+// CompareAndSwap atomically compares i's value with old,
+// and if they're equal, swaps i's value with new.
+//
+// Returns true if the operation succeeded.
+func (i *Int64) CompareAndSwap(old, new int64) bool {
+ return Casint64(&i.value, old, new)
+}
+
+// Swap replaces i's value with new, returning
+// i's value before the replacement.
+func (i *Int64) Swap(new int64) int64 {
+ return Xchgint64(&i.value, new)
+}
+
+// Add adds delta to i atomically, returning
+// the new updated value.
+//
+// This operation wraps around in the usual
+// two's-complement way.
+func (i *Int64) Add(delta int64) int64 {
+ return Xaddint64(&i.value, delta)
+}
+
+// Uint8 is an atomically accessed uint8 value.
+//
+// A Uint8 must not be copied.
+type Uint8 struct {
+ noCopy noCopy
+ value uint8
+}
+
+// Load accesses and returns the value atomically.
+func (u *Uint8) Load() uint8 {
+ return Load8(&u.value)
+}
+
+// Store updates the value atomically.
+func (u *Uint8) Store(value uint8) {
+ Store8(&u.value, value)
+}
+
+// And takes value and performs a bit-wise
+// "and" operation with the value of u, storing
+// the result into u.
+//
+// The full process is performed atomically.
+func (u *Uint8) And(value uint8) {
+ And8(&u.value, value)
+}
+
+// Or takes value and performs a bit-wise
+// "or" operation with the value of u, storing
+// the result into u.
+//
+// The full process is performed atomically.
+func (u *Uint8) Or(value uint8) {
+ Or8(&u.value, value)
+}
+
+// Bool is an atomically accessed bool value.
+//
+// A Bool must not be copied.
+type Bool struct {
+ // Inherits noCopy from Uint8.
+ u Uint8
+}
+
+// Load accesses and returns the value atomically.
+func (b *Bool) Load() bool {
+ return b.u.Load() != 0
+}
+
+// Store updates the value atomically.
+func (b *Bool) Store(value bool) {
+ s := uint8(0)
+ if value {
+ s = 1
+ }
+ b.u.Store(s)
+}
+
+// Uint32 is an atomically accessed uint32 value.
+//
+// A Uint32 must not be copied.
+type Uint32 struct {
+ noCopy noCopy
+ value uint32
+}
+
+// Load accesses and returns the value atomically.
+func (u *Uint32) Load() uint32 {
+ return Load(&u.value)
+}
+
+// LoadAcquire is a partially unsynchronized version
+// of Load that relaxes ordering constraints. Other threads
+// may observe operations that precede this operation to
+// occur after it, but no operation that occurs after it
+// on this thread can be observed to occur before it.
+//
+// WARNING: Use sparingly and with great care.
+func (u *Uint32) LoadAcquire() uint32 {
+ return LoadAcq(&u.value)
+}
+
+// Store updates the value atomically.
+func (u *Uint32) Store(value uint32) {
+ Store(&u.value, value)
+}
+
+// StoreRelease is a partially unsynchronized version
+// of Store that relaxes ordering constraints. Other threads
+// may observe operations that occur after this operation to
+// precede it, but no operation that precedes it
+// on this thread can be observed to occur after it.
+//
+// WARNING: Use sparingly and with great care.
+func (u *Uint32) StoreRelease(value uint32) {
+ StoreRel(&u.value, value)
+}
+
+// CompareAndSwap atomically compares u's value with old,
+// and if they're equal, swaps u's value with new.
+//
+// Returns true if the operation succeeded.
+func (u *Uint32) CompareAndSwap(old, new uint32) bool {
+ return Cas(&u.value, old, new)
+}
+
+// CompareAndSwapRelease is a partially unsynchronized version
+// of Cas that relaxes ordering constraints. Other threads
+// may observe operations that occur after this operation to
+// precede it, but no operation that precedes it
+// on this thread can be observed to occur after it.
+//
+// Returns true if the operation succeeded.
+//
+// WARNING: Use sparingly and with great care.
+func (u *Uint32) CompareAndSwapRelease(old, new uint32) bool {
+ return CasRel(&u.value, old, new)
+}
+
+// Swap replaces u's value with new, returning
+// u's value before the replacement.
+func (u *Uint32) Swap(value uint32) uint32 {
+ return Xchg(&u.value, value)
+}
+
+// And takes value and performs a bit-wise
+// "and" operation with the value of u, storing
+// the result into u.
+//
+// The full process is performed atomically.
+func (u *Uint32) And(value uint32) {
+ And(&u.value, value)
+}
+
+// Or takes value and performs a bit-wise
+// "or" operation with the value of u, storing
+// the result into u.
+//
+// The full process is performed atomically.
+func (u *Uint32) Or(value uint32) {
+ Or(&u.value, value)
+}
+
+// Add adds delta to u atomically, returning
+// the new updated value.
+//
+// This operation wraps around in the usual
+// two's-complement way.
+func (u *Uint32) Add(delta int32) uint32 {
+ return Xadd(&u.value, delta)
+}
+
+// Uint64 is an atomically accessed uint64 value.
+//
+// 8-byte aligned on all platforms, unlike a regular uint64.
+//
+// A Uint64 must not be copied.
+type Uint64 struct {
+ noCopy noCopy
+ _ align64
+ value uint64
+}
+
+// Load accesses and returns the value atomically.
+func (u *Uint64) Load() uint64 {
+ return Load64(&u.value)
+}
+
+// Store updates the value atomically.
+func (u *Uint64) Store(value uint64) {
+ Store64(&u.value, value)
+}
+
+// CompareAndSwap atomically compares u's value with old,
+// and if they're equal, swaps u's value with new.
+//
+// Returns true if the operation succeeded.
+func (u *Uint64) CompareAndSwap(old, new uint64) bool {
+ return Cas64(&u.value, old, new)
+}
+
+// Swap replaces u's value with new, returning
+// u's value before the replacement.
+func (u *Uint64) Swap(value uint64) uint64 {
+ return Xchg64(&u.value, value)
+}
+
+// Add adds delta to u atomically, returning
+// the new updated value.
+//
+// This operation wraps around in the usual
+// two's-complement way.
+func (u *Uint64) Add(delta int64) uint64 {
+ return Xadd64(&u.value, delta)
+}
+
+// Uintptr is an atomically accessed uintptr value.
+//
+// A Uintptr must not be copied.
+type Uintptr struct {
+ noCopy noCopy
+ value uintptr
+}
+
+// Load accesses and returns the value atomically.
+func (u *Uintptr) Load() uintptr {
+ return Loaduintptr(&u.value)
+}
+
+// LoadAcquire is a partially unsynchronized version
+// of Load that relaxes ordering constraints. Other threads
+// may observe operations that precede this operation to
+// occur after it, but no operation that occurs after it
+// on this thread can be observed to occur before it.
+//
+// WARNING: Use sparingly and with great care.
+func (u *Uintptr) LoadAcquire() uintptr {
+ return LoadAcquintptr(&u.value)
+}
+
+// Store updates the value atomically.
+func (u *Uintptr) Store(value uintptr) {
+ Storeuintptr(&u.value, value)
+}
+
+// StoreRelease is a partially unsynchronized version
+// of Store that relaxes ordering constraints. Other threads
+// may observe operations that occur after this operation to
+// precede it, but no operation that precedes it
+// on this thread can be observed to occur after it.
+//
+// WARNING: Use sparingly and with great care.
+func (u *Uintptr) StoreRelease(value uintptr) {
+ StoreReluintptr(&u.value, value)
+}
+
+// CompareAndSwap atomically compares u's value with old,
+// and if they're equal, swaps u's value with new.
+//
+// Returns true if the operation succeeded.
+func (u *Uintptr) CompareAndSwap(old, new uintptr) bool {
+ return Casuintptr(&u.value, old, new)
+}
+
+// Swap replaces u's value with new, returning
+// u's value before the replacement.
+func (u *Uintptr) Swap(value uintptr) uintptr {
+ return Xchguintptr(&u.value, value)
+}
+
+// Add adds delta to u atomically, returning
+// the new updated value.
+//
+// This operation wraps around in the usual
+// two's-complement way.
+func (u *Uintptr) Add(delta uintptr) uintptr {
+ return Xadduintptr(&u.value, delta)
+}
+
+// Float64 is an atomically accessed float64 value.
+//
+// 8-byte aligned on all platforms, unlike a regular float64.
+//
+// A Float64 must not be copied.
+type Float64 struct {
+ // Inherits noCopy and align64 from Uint64.
+ u Uint64
+}
+
+// Load accesses and returns the value atomically.
+func (f *Float64) Load() float64 {
+ r := f.u.Load()
+ return *(*float64)(unsafe.Pointer(&r))
+}
+
+// Store updates the value atomically.
+func (f *Float64) Store(value float64) {
+ f.u.Store(*(*uint64)(unsafe.Pointer(&value)))
+}
+
+// UnsafePointer is an atomically accessed unsafe.Pointer value.
+//
+// Note that because of the atomicity guarantees, stores to values
+// of this type never trigger a write barrier, and the relevant
+// methods are suffixed with "NoWB" to indicate that explicitly.
+// As a result, this type should be used carefully, and sparingly,
+// mostly with values that do not live in the Go heap anyway.
+//
+// An UnsafePointer must not be copied.
+type UnsafePointer struct {
+ noCopy noCopy
+ value unsafe.Pointer
+}
+
+// Load accesses and returns the value atomically.
+func (u *UnsafePointer) Load() unsafe.Pointer {
+ return Loadp(unsafe.Pointer(&u.value))
+}
+
+// StoreNoWB updates the value atomically.
+//
+// WARNING: As the name implies this operation does *not*
+// perform a write barrier on value, and so this operation may
+// hide pointers from the GC. Use with care and sparingly.
+// It is safe to use with values not found in the Go heap.
+func (u *UnsafePointer) StoreNoWB(value unsafe.Pointer) {
+ StorepNoWB(unsafe.Pointer(&u.value), value)
+}
+
+// CompareAndSwapNoWB atomically (with respect to other methods)
+// compares u's value with old, and if they're equal,
+// swaps u's value with new.
+//
+// Returns true if the operation succeeded.
+//
+// WARNING: As the name implies this operation does *not*
+// perform a write barrier on value, and so this operation may
+// hide pointers from the GC. Use with care and sparingly.
+// It is safe to use with values not found in the Go heap.
+func (u *UnsafePointer) CompareAndSwapNoWB(old, new unsafe.Pointer) bool {
+ return Casp1(&u.value, old, new)
+}
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
+// align64 may be added to structs that must be 64-bit aligned.
+// This struct is recognized by a special case in the compiler
+// and will not work if copied to any other package.
+type align64 struct{}
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/types_64bit.go b/contrib/go/_std_1.19/src/runtime/internal/atomic/types_64bit.go
index 43c1ba2709..43c1ba2709 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/types_64bit.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/types_64bit.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/atomic/unaligned.go b/contrib/go/_std_1.19/src/runtime/internal/atomic/unaligned.go
index a859de4144..a859de4144 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/atomic/unaligned.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/atomic/unaligned.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/math/math.go b/contrib/go/_std_1.19/src/runtime/internal/math/math.go
index c3fac366be..c3fac366be 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/math/math.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/math/math.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/sys/consts.go b/contrib/go/_std_1.19/src/runtime/internal/sys/consts.go
index fffcf81d1f..fffcf81d1f 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/sys/consts.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/sys/consts.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/sys/intrinsics.go b/contrib/go/_std_1.19/src/runtime/internal/sys/intrinsics.go
index 5af49011e9..5af49011e9 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/sys/intrinsics.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/sys/intrinsics.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/sys/intrinsics_common.go b/contrib/go/_std_1.19/src/runtime/internal/sys/intrinsics_common.go
index 48d9759ca9..48d9759ca9 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/sys/intrinsics_common.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/sys/intrinsics_common.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/sys/sys.go b/contrib/go/_std_1.19/src/runtime/internal/sys/sys.go
index 694101d36f..694101d36f 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/sys/sys.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/sys/sys.go
diff --git a/contrib/go/_std_1.18/src/runtime/internal/sys/zversion.go b/contrib/go/_std_1.19/src/runtime/internal/sys/zversion.go
index b058a3db70..b058a3db70 100644
--- a/contrib/go/_std_1.18/src/runtime/internal/sys/zversion.go
+++ b/contrib/go/_std_1.19/src/runtime/internal/sys/zversion.go
diff --git a/contrib/go/_std_1.19/src/runtime/internal/syscall/asm_linux_amd64.s b/contrib/go/_std_1.19/src/runtime/internal/syscall/asm_linux_amd64.s
new file mode 100644
index 0000000000..3740ef1beb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/internal/syscall/asm_linux_amd64.s
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// We need to convert to the syscall ABI.
+//
+// arg | ABIInternal | Syscall
+// ---------------------------
+// num | AX | AX
+// a1 | BX | DI
+// a2 | CX | SI
+// a3 | DI | DX
+// a4 | SI | R10
+// a5 | R8 | R8
+// a6 | R9 | R9
+//
+// r1 | AX | AX
+// r2 | BX | DX
+// err | CX | part of AX
+//
+// Note that this differs from "standard" ABI convention, which would pass 4th
+// arg in CX, not R10.
+TEXT ·Syscall6<ABIInternal>(SB),NOSPLIT,$0
+ // a6 already in R9.
+ // a5 already in R8.
+ MOVQ SI, R10 // a4
+ MOVQ DI, DX // a3
+ MOVQ CX, SI // a2
+ MOVQ BX, DI // a1
+ // num already in AX.
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS ok
+ NEGQ AX
+ MOVQ AX, CX // errno
+ MOVQ $-1, AX // r1
+ MOVQ $0, BX // r2
+ RET
+ok:
+ // r1 already in AX.
+ MOVQ DX, BX // r2
+ MOVQ $0, CX // errno
+ RET
diff --git a/contrib/go/_std_1.19/src/runtime/internal/syscall/syscall_linux.go b/contrib/go/_std_1.19/src/runtime/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000000..7f268e8fba
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/internal/syscall/syscall_linux.go
@@ -0,0 +1,39 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syscall provides the syscall primitives required for the runtime.
+package syscall
+
+import (
+ _ "unsafe" // for go:linkname
+)
+
+// TODO(https://go.dev/issue/51087): This package is incomplete and currently
+// only contains very minimal support for Linux.
+
+// Syscall6 calls system call number 'num' with arguments a1-6.
+func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+
+// syscall_RawSyscall6 is a push linkname to export Syscall6 as
+// syscall.RawSyscall6.
+//
+// //go:uintptrkeepalive because the uintptr argument may be converted pointers
+// that need to be kept alive in the caller (this is implied for Syscall6 since
+// it has no body).
+//
+// //go:nosplit because stack copying does not account for uintptrkeepalive, so
+// the stack must not grow. Stack copying cannot blindly assume that all
+// uintptr arguments are pointers, because some values may look like pointers,
+// but not really be pointers, and adjusting their value would break the call.
+//
+// This is a separate wrapper because we can't export one function as two
+// names. The assembly implementations name themselves Syscall6 would not be
+// affected by a linkname.
+//
+//go:uintptrkeepalive
+//go:nosplit
+//go:linkname syscall_RawSyscall6 syscall.RawSyscall6
+func syscall_RawSyscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) {
+ return Syscall6(num, a1, a2, a3, a4, a5, a6)
+}
diff --git a/contrib/go/_std_1.18/src/runtime/lfstack.go b/contrib/go/_std_1.19/src/runtime/lfstack.go
index 406561a275..406561a275 100644
--- a/contrib/go/_std_1.18/src/runtime/lfstack.go
+++ b/contrib/go/_std_1.19/src/runtime/lfstack.go
diff --git a/contrib/go/_std_1.19/src/runtime/lfstack_64bit.go b/contrib/go/_std_1.19/src/runtime/lfstack_64bit.go
new file mode 100644
index 0000000000..154130cf63
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/lfstack_64bit.go
@@ -0,0 +1,58 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm
+
+package runtime
+
+import "unsafe"
+
+const (
+ // addrBits is the number of bits needed to represent a virtual address.
+ //
+ // See heapAddrBits for a table of address space sizes on
+ // various architectures. 48 bits is enough for all
+ // architectures except s390x.
+ //
+ // On AMD64, virtual addresses are 48-bit (or 57-bit) numbers sign extended to 64.
+ // We shift the address left 16 to eliminate the sign extended part and make
+ // room in the bottom for the count.
+ //
+ // On s390x, virtual addresses are 64-bit. There's not much we
+ // can do about this, so we just hope that the kernel doesn't
+ // get to really high addresses and panic if it does.
+ addrBits = 48
+
+ // In addition to the 16 bits taken from the top, we can take 3 from the
+ // bottom, because node must be pointer-aligned, giving a total of 19 bits
+ // of count.
+ cntBits = 64 - addrBits + 3
+
+ // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit
+ // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA)
+ // are available for mmap.
+ // We assume all lfnode addresses are from memory allocated with mmap.
+ // We use one bit to distinguish between the two ranges.
+ aixAddrBits = 57
+ aixCntBits = 64 - aixAddrBits + 3
+)
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ if GOARCH == "ppc64" && GOOS == "aix" {
+ return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<<aixCntBits-1))
+ }
+ return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
+}
+
+func lfstackUnpack(val uint64) *lfnode {
+ if GOARCH == "amd64" {
+ // amd64 systems can place the stack above the VA hole, so we need to sign extend
+ // val before unpacking.
+ return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3)))
+ }
+ if GOARCH == "ppc64" && GOOS == "aix" {
+ return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56)))
+ }
+ return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/lock_futex.go b/contrib/go/_std_1.19/src/runtime/lock_futex.go
new file mode 100644
index 0000000000..1578984ce2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/lock_futex.go
@@ -0,0 +1,246 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// This implementation depends on OS-specific implementations of
+//
+// futexsleep(addr *uint32, val uint32, ns int64)
+// Atomically,
+// if *addr == val { sleep }
+// Might be woken up spuriously; that's allowed.
+// Don't sleep longer than ns; ns < 0 means forever.
+//
+// futexwakeup(addr *uint32, cnt uint32)
+// If any procs are sleeping on addr, wake up at most cnt.
+
+const (
+ mutex_unlocked = 0
+ mutex_locked = 1
+ mutex_sleeping = 2
+
+ active_spin = 4
+ active_spin_cnt = 30
+ passive_spin = 1
+)
+
+// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
+// mutex_sleeping means that there is presumably at least one sleeping thread.
+// Note that there can be spinning threads during all states - they do not
+// affect mutex's state.
+
+// We use the uintptr mutex.key and note.key as a uint32.
+//
+//go:nosplit
+func key32(p *uintptr) *uint32 {
+ return (*uint32)(unsafe.Pointer(p))
+}
+
+func lock(l *mutex) {
+ lockWithRank(l, getLockRank(l))
+}
+
+func lock2(l *mutex) {
+ gp := getg()
+
+ if gp.m.locks < 0 {
+ throw("runtime·lock: lock count")
+ }
+ gp.m.locks++
+
+ // Speculative grab for lock.
+ v := atomic.Xchg(key32(&l.key), mutex_locked)
+ if v == mutex_unlocked {
+ return
+ }
+
+ // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
+ // depending on whether there is a thread sleeping
+ // on this mutex. If we ever change l->key from
+ // MUTEX_SLEEPING to some other value, we must be
+ // careful to change it back to MUTEX_SLEEPING before
+ // returning, to ensure that the sleeping thread gets
+ // its wakeup call.
+ wait := v
+
+ // On uniprocessors, no point spinning.
+ // On multiprocessors, spin for ACTIVE_SPIN attempts.
+ spin := 0
+ if ncpu > 1 {
+ spin = active_spin
+ }
+ for {
+ // Try for lock, spinning.
+ for i := 0; i < spin; i++ {
+ for l.key == mutex_unlocked {
+ if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
+ return
+ }
+ }
+ procyield(active_spin_cnt)
+ }
+
+ // Try for lock, rescheduling.
+ for i := 0; i < passive_spin; i++ {
+ for l.key == mutex_unlocked {
+ if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
+ return
+ }
+ }
+ osyield()
+ }
+
+ // Sleep.
+ v = atomic.Xchg(key32(&l.key), mutex_sleeping)
+ if v == mutex_unlocked {
+ return
+ }
+ wait = mutex_sleeping
+ futexsleep(key32(&l.key), mutex_sleeping, -1)
+ }
+}
+
+func unlock(l *mutex) {
+ unlockWithRank(l)
+}
+
+func unlock2(l *mutex) {
+ v := atomic.Xchg(key32(&l.key), mutex_unlocked)
+ if v == mutex_unlocked {
+ throw("unlock of unlocked lock")
+ }
+ if v == mutex_sleeping {
+ futexwakeup(key32(&l.key), 1)
+ }
+
+ gp := getg()
+ gp.m.locks--
+ if gp.m.locks < 0 {
+ throw("runtime·unlock: lock count")
+ }
+ if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
+ gp.stackguard0 = stackPreempt
+ }
+}
+
+// One-time notifications.
+func noteclear(n *note) {
+ n.key = 0
+}
+
+func notewakeup(n *note) {
+ old := atomic.Xchg(key32(&n.key), 1)
+ if old != 0 {
+ print("notewakeup - double wakeup (", old, ")\n")
+ throw("notewakeup - double wakeup")
+ }
+ futexwakeup(key32(&n.key), 1)
+}
+
+func notesleep(n *note) {
+ gp := getg()
+ if gp != gp.m.g0 {
+ throw("notesleep not on g0")
+ }
+ ns := int64(-1)
+ if *cgo_yield != nil {
+ // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
+ ns = 10e6
+ }
+ for atomic.Load(key32(&n.key)) == 0 {
+ gp.m.blocked = true
+ futexsleep(key32(&n.key), 0, ns)
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
+ gp.m.blocked = false
+ }
+}
+
+// May run with m.p==nil if called from notetsleep, so write barriers
+// are not allowed.
+//
+//go:nosplit
+//go:nowritebarrier
+func notetsleep_internal(n *note, ns int64) bool {
+ gp := getg()
+
+ if ns < 0 {
+ if *cgo_yield != nil {
+ // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
+ ns = 10e6
+ }
+ for atomic.Load(key32(&n.key)) == 0 {
+ gp.m.blocked = true
+ futexsleep(key32(&n.key), 0, ns)
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
+ gp.m.blocked = false
+ }
+ return true
+ }
+
+ if atomic.Load(key32(&n.key)) != 0 {
+ return true
+ }
+
+ deadline := nanotime() + ns
+ for {
+ if *cgo_yield != nil && ns > 10e6 {
+ ns = 10e6
+ }
+ gp.m.blocked = true
+ futexsleep(key32(&n.key), 0, ns)
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
+ gp.m.blocked = false
+ if atomic.Load(key32(&n.key)) != 0 {
+ break
+ }
+ now := nanotime()
+ if now >= deadline {
+ break
+ }
+ ns = deadline - now
+ }
+ return atomic.Load(key32(&n.key)) != 0
+}
+
+func notetsleep(n *note, ns int64) bool {
+ gp := getg()
+ if gp != gp.m.g0 && gp.m.preemptoff != "" {
+ throw("notetsleep not on g0")
+ }
+
+ return notetsleep_internal(n, ns)
+}
+
+// same as runtime·notetsleep, but called on user g (not g0)
+// calls only nosplit functions between entersyscallblock/exitsyscall
+func notetsleepg(n *note, ns int64) bool {
+ gp := getg()
+ if gp == gp.m.g0 {
+ throw("notetsleepg on g0")
+ }
+
+ entersyscallblock()
+ ok := notetsleep_internal(n, ns)
+ exitsyscall()
+ return ok
+}
+
+func beforeIdle(int64, int64) (*g, bool) {
+ return nil, false
+}
+
+func checkTimeouts() {}
diff --git a/contrib/go/_std_1.19/src/runtime/lock_sema.go b/contrib/go/_std_1.19/src/runtime/lock_sema.go
new file mode 100644
index 0000000000..c5e8cfe24a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/lock_sema.go
@@ -0,0 +1,304 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// This implementation depends on OS-specific implementations of
+//
+// func semacreate(mp *m)
+// Create a semaphore for mp, if it does not already have one.
+//
+// func semasleep(ns int64) int32
+// If ns < 0, acquire m's semaphore and return 0.
+// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
+// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
+//
+// func semawakeup(mp *m)
+// Wake up mp, which is or will soon be sleeping on its semaphore.
+const (
+ locked uintptr = 1
+
+ active_spin = 4
+ active_spin_cnt = 30
+ passive_spin = 1
+)
+
+func lock(l *mutex) {
+ lockWithRank(l, getLockRank(l))
+}
+
+func lock2(l *mutex) {
+ gp := getg()
+ if gp.m.locks < 0 {
+ throw("runtime·lock: lock count")
+ }
+ gp.m.locks++
+
+ // Speculative grab for lock.
+ if atomic.Casuintptr(&l.key, 0, locked) {
+ return
+ }
+ semacreate(gp.m)
+
+ // On uniprocessor's, no point spinning.
+ // On multiprocessors, spin for ACTIVE_SPIN attempts.
+ spin := 0
+ if ncpu > 1 {
+ spin = active_spin
+ }
+Loop:
+ for i := 0; ; i++ {
+ v := atomic.Loaduintptr(&l.key)
+ if v&locked == 0 {
+ // Unlocked. Try to lock.
+ if atomic.Casuintptr(&l.key, v, v|locked) {
+ return
+ }
+ i = 0
+ }
+ if i < spin {
+ procyield(active_spin_cnt)
+ } else if i < spin+passive_spin {
+ osyield()
+ } else {
+ // Someone else has it.
+ // l->waitm points to a linked list of M's waiting
+ // for this lock, chained through m->nextwaitm.
+ // Queue this M.
+ for {
+ gp.m.nextwaitm = muintptr(v &^ locked)
+ if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
+ break
+ }
+ v = atomic.Loaduintptr(&l.key)
+ if v&locked == 0 {
+ continue Loop
+ }
+ }
+ if v&locked != 0 {
+ // Queued. Wait.
+ semasleep(-1)
+ i = 0
+ }
+ }
+ }
+}
+
+func unlock(l *mutex) {
+ unlockWithRank(l)
+}
+
+// We might not be holding a p in this code.
+//
+//go:nowritebarrier
+func unlock2(l *mutex) {
+ gp := getg()
+ var mp *m
+ for {
+ v := atomic.Loaduintptr(&l.key)
+ if v == locked {
+ if atomic.Casuintptr(&l.key, locked, 0) {
+ break
+ }
+ } else {
+ // Other M's are waiting for the lock.
+ // Dequeue an M.
+ mp = muintptr(v &^ locked).ptr()
+ if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
+ // Dequeued an M. Wake it.
+ semawakeup(mp)
+ break
+ }
+ }
+ }
+ gp.m.locks--
+ if gp.m.locks < 0 {
+ throw("runtime·unlock: lock count")
+ }
+ if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
+ gp.stackguard0 = stackPreempt
+ }
+}
+
+// One-time notifications.
+func noteclear(n *note) {
+ if GOOS == "aix" {
+ // On AIX, semaphores might not synchronize the memory in some
+ // rare cases. See issue #30189.
+ atomic.Storeuintptr(&n.key, 0)
+ } else {
+ n.key = 0
+ }
+}
+
+func notewakeup(n *note) {
+ var v uintptr
+ for {
+ v = atomic.Loaduintptr(&n.key)
+ if atomic.Casuintptr(&n.key, v, locked) {
+ break
+ }
+ }
+
+ // Successfully set waitm to locked.
+ // What was it before?
+ switch {
+ case v == 0:
+ // Nothing was waiting. Done.
+ case v == locked:
+ // Two notewakeups! Not allowed.
+ throw("notewakeup - double wakeup")
+ default:
+ // Must be the waiting m. Wake it up.
+ semawakeup((*m)(unsafe.Pointer(v)))
+ }
+}
+
+func notesleep(n *note) {
+ gp := getg()
+ if gp != gp.m.g0 {
+ throw("notesleep not on g0")
+ }
+ semacreate(gp.m)
+ if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
+ // Must be locked (got wakeup).
+ if n.key != locked {
+ throw("notesleep - waitm out of sync")
+ }
+ return
+ }
+ // Queued. Sleep.
+ gp.m.blocked = true
+ if *cgo_yield == nil {
+ semasleep(-1)
+ } else {
+ // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
+ const ns = 10e6
+ for atomic.Loaduintptr(&n.key) == 0 {
+ semasleep(ns)
+ asmcgocall(*cgo_yield, nil)
+ }
+ }
+ gp.m.blocked = false
+}
+
+//go:nosplit
+func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
+ // gp and deadline are logically local variables, but they are written
+ // as parameters so that the stack space they require is charged
+ // to the caller.
+ // This reduces the nosplit footprint of notetsleep_internal.
+ gp = getg()
+
+ // Register for wakeup on n->waitm.
+ if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
+ // Must be locked (got wakeup).
+ if n.key != locked {
+ throw("notetsleep - waitm out of sync")
+ }
+ return true
+ }
+ if ns < 0 {
+ // Queued. Sleep.
+ gp.m.blocked = true
+ if *cgo_yield == nil {
+ semasleep(-1)
+ } else {
+ // Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
+ const ns = 10e6
+ for semasleep(ns) < 0 {
+ asmcgocall(*cgo_yield, nil)
+ }
+ }
+ gp.m.blocked = false
+ return true
+ }
+
+ deadline = nanotime() + ns
+ for {
+ // Registered. Sleep.
+ gp.m.blocked = true
+ if *cgo_yield != nil && ns > 10e6 {
+ ns = 10e6
+ }
+ if semasleep(ns) >= 0 {
+ gp.m.blocked = false
+ // Acquired semaphore, semawakeup unregistered us.
+ // Done.
+ return true
+ }
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
+ gp.m.blocked = false
+ // Interrupted or timed out. Still registered. Semaphore not acquired.
+ ns = deadline - nanotime()
+ if ns <= 0 {
+ break
+ }
+ // Deadline hasn't arrived. Keep sleeping.
+ }
+
+ // Deadline arrived. Still registered. Semaphore not acquired.
+ // Want to give up and return, but have to unregister first,
+ // so that any notewakeup racing with the return does not
+ // try to grant us the semaphore when we don't expect it.
+ for {
+ v := atomic.Loaduintptr(&n.key)
+ switch v {
+ case uintptr(unsafe.Pointer(gp.m)):
+ // No wakeup yet; unregister if possible.
+ if atomic.Casuintptr(&n.key, v, 0) {
+ return false
+ }
+ case locked:
+ // Wakeup happened so semaphore is available.
+ // Grab it to avoid getting out of sync.
+ gp.m.blocked = true
+ if semasleep(-1) < 0 {
+ throw("runtime: unable to acquire - semaphore out of sync")
+ }
+ gp.m.blocked = false
+ return true
+ default:
+ throw("runtime: unexpected waitm - semaphore out of sync")
+ }
+ }
+}
+
+func notetsleep(n *note, ns int64) bool {
+ gp := getg()
+ if gp != gp.m.g0 {
+ throw("notetsleep not on g0")
+ }
+ semacreate(gp.m)
+ return notetsleep_internal(n, ns, nil, 0)
+}
+
+// same as runtime·notetsleep, but called on user g (not g0)
+// calls only nosplit functions between entersyscallblock/exitsyscall
+func notetsleepg(n *note, ns int64) bool {
+ gp := getg()
+ if gp == gp.m.g0 {
+ throw("notetsleepg on g0")
+ }
+ semacreate(gp.m)
+ entersyscallblock()
+ ok := notetsleep_internal(n, ns, nil, 0)
+ exitsyscall()
+ return ok
+}
+
+func beforeIdle(int64, int64) (*g, bool) {
+ return nil, false
+}
+
+func checkTimeouts() {}
diff --git a/contrib/go/_std_1.19/src/runtime/lockrank.go b/contrib/go/_std_1.19/src/runtime/lockrank.go
new file mode 100644
index 0000000000..bb0b189fc7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/lockrank.go
@@ -0,0 +1,260 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file records the static ranks of the locks in the runtime. If a lock
+// is not given a rank, then it is assumed to be a leaf lock, which means no other
+// lock can be acquired while it is held. Therefore, leaf locks do not need to be
+// given an explicit rank. We list all of the architecture-independent leaf locks
+// for documentation purposes, but don't list any of the architecture-dependent
+// locks (which are all leaf locks). debugLock is ignored for ranking, since it is used
+// when printing out lock ranking errors.
+//
+// lockInit(l *mutex, rank int) is used to set the rank of lock before it is used.
+// If there is no clear place to initialize a lock, then the rank of a lock can be
+// specified during the lock call itself via lockWithrank(l *mutex, rank int).
+//
+// Besides the static lock ranking (which is a total ordering of the locks), we
+// also represent and enforce the actual partial order among the locks in the
+// arcs[] array below. That is, if it is possible that lock B can be acquired when
+// lock A is the previous acquired lock that is still held, then there should be
+// an entry for A in arcs[B][]. We will currently fail not only if the total order
+// (the lock ranking) is violated, but also if there is a missing entry in the
+// partial order.
+
+package runtime
+
+type lockRank int
+
+// Constants representing the lock rank of the architecture-independent locks in
+// the runtime. Locks with lower rank must be taken before locks with higher
+// rank.
+const (
+ lockRankDummy lockRank = iota
+
+ // Locks held above sched
+ lockRankSysmon
+ lockRankScavenge
+ lockRankForcegc
+ lockRankSweepWaiters
+ lockRankAssistQueue
+ lockRankCpuprof
+ lockRankSweep
+
+ lockRankPollDesc
+ lockRankSched
+ lockRankDeadlock
+ lockRankAllg
+ lockRankAllp
+
+ lockRankTimers // Multiple timers locked simultaneously in destroy()
+ lockRankItab
+ lockRankReflectOffs
+ lockRankHchan // Multiple hchans acquired in lock order in syncadjustsudogs()
+ lockRankTraceBuf
+ lockRankFin
+ lockRankNotifyList
+ lockRankTraceStrings
+ lockRankMspanSpecial
+ lockRankProfInsert
+ lockRankProfBlock
+ lockRankProfMemActive
+ lockRankProfMemFuture
+ lockRankGcBitsArenas
+ lockRankRoot
+ lockRankTrace
+ lockRankTraceStackTab
+ lockRankNetpollInit
+
+ lockRankRwmutexW
+ lockRankRwmutexR
+
+ lockRankSpanSetSpine
+ lockRankGscan
+ lockRankStackpool
+ lockRankStackLarge
+ lockRankDefer
+ lockRankSudog
+
+ // Memory-related non-leaf locks
+ lockRankWbufSpans
+ lockRankMheap
+ lockRankMheapSpecial
+
+ // Memory-related leaf locks
+ lockRankGlobalAlloc
+ lockRankPageAllocScav
+
+ // Other leaf locks
+ lockRankGFree
+ // Generally, hchan must be acquired before gscan. But in one specific
+ // case (in syncadjustsudogs from markroot after the g has been suspended
+ // by suspendG), we allow gscan to be acquired, and then an hchan lock. To
+ // allow this case, we get this lockRankHchanLeaf rank in
+ // syncadjustsudogs(), rather than lockRankHchan. By using this special
+ // rank, we don't allow any further locks to be acquired other than more
+ // hchan locks.
+ lockRankHchanLeaf
+ lockRankPanic
+
+ // Leaf locks with no dependencies, so these constants are not actually used anywhere.
+ // There are other architecture-dependent leaf locks as well.
+ lockRankNewmHandoff
+ lockRankDebugPtrmask
+ lockRankFaketimeState
+ lockRankTicks
+ lockRankRaceFini
+ lockRankPollCache
+ lockRankDebug
+)
+
+// lockRankLeafRank is the rank of lock that does not have a declared rank, and hence is
+// a leaf lock.
+const lockRankLeafRank lockRank = 1000
+
+// lockNames gives the names associated with each of the above ranks
+var lockNames = []string{
+ lockRankDummy: "",
+
+ lockRankSysmon: "sysmon",
+ lockRankScavenge: "scavenge",
+ lockRankForcegc: "forcegc",
+ lockRankSweepWaiters: "sweepWaiters",
+ lockRankAssistQueue: "assistQueue",
+ lockRankCpuprof: "cpuprof",
+ lockRankSweep: "sweep",
+
+ lockRankPollDesc: "pollDesc",
+ lockRankSched: "sched",
+ lockRankDeadlock: "deadlock",
+ lockRankAllg: "allg",
+ lockRankAllp: "allp",
+
+ lockRankTimers: "timers",
+ lockRankItab: "itab",
+ lockRankReflectOffs: "reflectOffs",
+
+ lockRankHchan: "hchan",
+ lockRankTraceBuf: "traceBuf",
+ lockRankFin: "fin",
+ lockRankNotifyList: "notifyList",
+ lockRankTraceStrings: "traceStrings",
+ lockRankMspanSpecial: "mspanSpecial",
+ lockRankProfInsert: "profInsert",
+ lockRankProfBlock: "profBlock",
+ lockRankProfMemActive: "profMemActive",
+ lockRankProfMemFuture: "profMemFuture",
+ lockRankGcBitsArenas: "gcBitsArenas",
+ lockRankRoot: "root",
+ lockRankTrace: "trace",
+ lockRankTraceStackTab: "traceStackTab",
+ lockRankNetpollInit: "netpollInit",
+
+ lockRankRwmutexW: "rwmutexW",
+ lockRankRwmutexR: "rwmutexR",
+
+ lockRankSpanSetSpine: "spanSetSpine",
+ lockRankGscan: "gscan",
+ lockRankStackpool: "stackpool",
+ lockRankStackLarge: "stackLarge",
+ lockRankDefer: "defer",
+ lockRankSudog: "sudog",
+
+ lockRankWbufSpans: "wbufSpans",
+ lockRankMheap: "mheap",
+ lockRankMheapSpecial: "mheapSpecial",
+
+ lockRankGlobalAlloc: "globalAlloc.mutex",
+ lockRankPageAllocScav: "pageAlloc.scav.lock",
+
+ lockRankGFree: "gFree",
+ lockRankHchanLeaf: "hchanLeaf",
+ lockRankPanic: "panic",
+
+ lockRankNewmHandoff: "newmHandoff.lock",
+ lockRankDebugPtrmask: "debugPtrmask.lock",
+ lockRankFaketimeState: "faketimeState.lock",
+ lockRankTicks: "ticks.lock",
+ lockRankRaceFini: "raceFiniLock",
+ lockRankPollCache: "pollCache.lock",
+ lockRankDebug: "debugLock",
+}
+
+func (rank lockRank) String() string {
+ if rank == 0 {
+ return "UNKNOWN"
+ }
+ if rank == lockRankLeafRank {
+ return "LEAF"
+ }
+ return lockNames[rank]
+}
+
+// lockPartialOrder is a partial order among the various lock types, listing the
+// immediate ordering that has actually been observed in the runtime. Each entry
+// (which corresponds to a particular lock rank) specifies the list of locks
+// that can already be held immediately "above" it.
+//
+// So, for example, the lockRankSched entry shows that all the locks preceding
+// it in rank can actually be held. The allp lock shows that only the sysmon or
+// sched lock can be held immediately above it when it is acquired.
+var lockPartialOrder [][]lockRank = [][]lockRank{
+ lockRankDummy: {},
+ lockRankSysmon: {},
+ lockRankScavenge: {lockRankSysmon},
+ lockRankForcegc: {lockRankSysmon},
+ lockRankSweepWaiters: {},
+ lockRankAssistQueue: {},
+ lockRankCpuprof: {},
+ lockRankSweep: {},
+ lockRankPollDesc: {},
+ lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc},
+ lockRankDeadlock: {lockRankDeadlock},
+ lockRankAllg: {lockRankSysmon, lockRankSched},
+ lockRankAllp: {lockRankSysmon, lockRankSched},
+ lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankPollDesc, lockRankSched, lockRankAllp, lockRankTimers},
+ lockRankItab: {},
+ lockRankReflectOffs: {lockRankItab},
+ lockRankHchan: {lockRankScavenge, lockRankSweep, lockRankHchan},
+ lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
+ lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankSched, lockRankAllg, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf},
+ lockRankNotifyList: {},
+ lockRankTraceStrings: {lockRankTraceBuf},
+ lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings, lockRankProfMemActive},
+ lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankRoot: {},
+ lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot},
+ lockRankTraceStackTab: {lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankTimers, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankRoot, lockRankTrace},
+ lockRankNetpollInit: {lockRankTimers},
+
+ lockRankRwmutexW: {},
+ lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
+
+ lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankSpanSetSpine},
+ lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankSpanSetSpine, lockRankGscan},
+ lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan},
+ lockRankDefer: {},
+ lockRankSudog: {lockRankHchan, lockRankNotifyList},
+ lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankRoot, lockRankTrace, lockRankGscan, lockRankDefer, lockRankSudog},
+ lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankSpanSetSpine, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans},
+ lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
+ lockRankGlobalAlloc: {lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
+ lockRankPageAllocScav: {lockRankMheap},
+
+ lockRankGFree: {lockRankSched},
+ lockRankHchanLeaf: {lockRankGscan, lockRankHchanLeaf},
+ lockRankPanic: {lockRankDeadlock}, // plus any other lock held on throw.
+
+ lockRankNewmHandoff: {},
+ lockRankDebugPtrmask: {},
+ lockRankFaketimeState: {},
+ lockRankTicks: {},
+ lockRankRaceFini: {},
+ lockRankPollCache: {},
+ lockRankDebug: {},
+}
diff --git a/contrib/go/_std_1.19/src/runtime/lockrank_off.go b/contrib/go/_std_1.19/src/runtime/lockrank_off.go
new file mode 100644
index 0000000000..bf046a1041
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/lockrank_off.go
@@ -0,0 +1,66 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.staticlockranking
+
+package runtime
+
+// // lockRankStruct is embedded in mutex, but is empty when staticklockranking is
+// disabled (the default)
+type lockRankStruct struct {
+}
+
+func lockInit(l *mutex, rank lockRank) {
+}
+
+func getLockRank(l *mutex) lockRank {
+ return 0
+}
+
+func lockWithRank(l *mutex, rank lockRank) {
+ lock2(l)
+}
+
+// This function may be called in nosplit context and thus must be nosplit.
+//
+//go:nosplit
+func acquireLockRank(rank lockRank) {
+}
+
+func unlockWithRank(l *mutex) {
+ unlock2(l)
+}
+
+// This function may be called in nosplit context and thus must be nosplit.
+//
+//go:nosplit
+func releaseLockRank(rank lockRank) {
+}
+
+func lockWithRankMayAcquire(l *mutex, rank lockRank) {
+}
+
+//go:nosplit
+func assertLockHeld(l *mutex) {
+}
+
+//go:nosplit
+func assertRankHeld(r lockRank) {
+}
+
+//go:nosplit
+func worldStopped() {
+}
+
+//go:nosplit
+func worldStarted() {
+}
+
+//go:nosplit
+func assertWorldStopped() {
+}
+
+//go:nosplit
+func assertWorldStoppedOrLockHeld(l *mutex) {
+}
diff --git a/contrib/go/_std_1.19/src/runtime/malloc.go b/contrib/go/_std_1.19/src/runtime/malloc.go
new file mode 100644
index 0000000000..eb24fdb0e8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/malloc.go
@@ -0,0 +1,1501 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Memory allocator.
+//
+// This was originally based on tcmalloc, but has diverged quite a bit.
+// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
+
+// The main allocator works in runs of pages.
+// Small allocation sizes (up to and including 32 kB) are
+// rounded to one of about 70 size classes, each of which
+// has its own free set of objects of exactly that size.
+// Any free page of memory can be split into a set of objects
+// of one size class, which are then managed using a free bitmap.
+//
+// The allocator's data structures are:
+//
+// fixalloc: a free-list allocator for fixed-size off-heap objects,
+// used to manage storage used by the allocator.
+// mheap: the malloc heap, managed at page (8192-byte) granularity.
+// mspan: a run of in-use pages managed by the mheap.
+// mcentral: collects all spans of a given size class.
+// mcache: a per-P cache of mspans with free space.
+// mstats: allocation statistics.
+//
+// Allocating a small object proceeds up a hierarchy of caches:
+//
+// 1. Round the size up to one of the small size classes
+// and look in the corresponding mspan in this P's mcache.
+// Scan the mspan's free bitmap to find a free slot.
+// If there is a free slot, allocate it.
+// This can all be done without acquiring a lock.
+//
+// 2. If the mspan has no free slots, obtain a new mspan
+// from the mcentral's list of mspans of the required size
+// class that have free space.
+// Obtaining a whole span amortizes the cost of locking
+// the mcentral.
+//
+// 3. If the mcentral's mspan list is empty, obtain a run
+// of pages from the mheap to use for the mspan.
+//
+// 4. If the mheap is empty or has no page runs large enough,
+// allocate a new group of pages (at least 1MB) from the
+// operating system. Allocating a large run of pages
+// amortizes the cost of talking to the operating system.
+//
+// Sweeping an mspan and freeing objects on it proceeds up a similar
+// hierarchy:
+//
+// 1. If the mspan is being swept in response to allocation, it
+// is returned to the mcache to satisfy the allocation.
+//
+// 2. Otherwise, if the mspan still has allocated objects in it,
+// it is placed on the mcentral free list for the mspan's size
+// class.
+//
+// 3. Otherwise, if all objects in the mspan are free, the mspan's
+// pages are returned to the mheap and the mspan is now dead.
+//
+// Allocating and freeing a large object uses the mheap
+// directly, bypassing the mcache and mcentral.
+//
+// If mspan.needzero is false, then free object slots in the mspan are
+// already zeroed. Otherwise if needzero is true, objects are zeroed as
+// they are allocated. There are various benefits to delaying zeroing
+// this way:
+//
+// 1. Stack frame allocation can avoid zeroing altogether.
+//
+// 2. It exhibits better temporal locality, since the program is
+// probably about to write to the memory.
+//
+// 3. We don't zero pages that never get reused.
+
+// Virtual memory layout
+//
+// The heap consists of a set of arenas, which are 64MB on 64-bit and
+// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
+// aligned to the arena size.
+//
+// Each arena has an associated heapArena object that stores the
+// metadata for that arena: the heap bitmap for all words in the arena
+// and the span map for all pages in the arena. heapArena objects are
+// themselves allocated off-heap.
+//
+// Since arenas are aligned, the address space can be viewed as a
+// series of arena frames. The arena map (mheap_.arenas) maps from
+// arena frame number to *heapArena, or nil for parts of the address
+// space not backed by the Go heap. The arena map is structured as a
+// two-level array consisting of a "L1" arena map and many "L2" arena
+// maps; however, since arenas are large, on many architectures, the
+// arena map consists of a single, large L2 map.
+//
+// The arena map covers the entire possible address space, allowing
+// the Go heap to use any part of the address space. The allocator
+// attempts to keep arenas contiguous so that large spans (and hence
+// large objects) can cross arenas.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "internal/goos"
+ "runtime/internal/atomic"
+ "runtime/internal/math"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const (
+ maxTinySize = _TinySize
+ tinySizeClass = _TinySizeClass
+ maxSmallSize = _MaxSmallSize
+
+ pageShift = _PageShift
+ pageSize = _PageSize
+
+ concurrentSweep = _ConcurrentSweep
+
+ _PageSize = 1 << _PageShift
+ _PageMask = _PageSize - 1
+
+ // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
+ _64bit = 1 << (^uintptr(0) >> 63) / 2
+
+ // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
+ _TinySize = 16
+ _TinySizeClass = int8(2)
+
+ _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
+
+ // Per-P, per order stack segment cache size.
+ _StackCacheSize = 32 * 1024
+
+ // Number of orders that get caching. Order 0 is FixedStack
+ // and each successive order is twice as large.
+ // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
+ // will be allocated directly.
+ // Since FixedStack is different on different systems, we
+ // must vary NumStackOrders to keep the same maximum cached size.
+ // OS | FixedStack | NumStackOrders
+ // -----------------+------------+---------------
+ // linux/darwin/bsd | 2KB | 4
+ // windows/32 | 4KB | 3
+ // windows/64 | 8KB | 2
+ // plan9 | 4KB | 3
+ _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
+
+ // heapAddrBits is the number of bits in a heap address. On
+ // amd64, addresses are sign-extended beyond heapAddrBits. On
+ // other arches, they are zero-extended.
+ //
+ // On most 64-bit platforms, we limit this to 48 bits based on a
+ // combination of hardware and OS limitations.
+ //
+ // amd64 hardware limits addresses to 48 bits, sign-extended
+ // to 64 bits. Addresses where the top 16 bits are not either
+ // all 0 or all 1 are "non-canonical" and invalid. Because of
+ // these "negative" addresses, we offset addresses by 1<<47
+ // (arenaBaseOffset) on amd64 before computing indexes into
+ // the heap arenas index. In 2017, amd64 hardware added
+ // support for 57 bit addresses; however, currently only Linux
+ // supports this extension and the kernel will never choose an
+ // address above 1<<47 unless mmap is called with a hint
+ // address above 1<<47 (which we never do).
+ //
+ // arm64 hardware (as of ARMv8) limits user addresses to 48
+ // bits, in the range [0, 1<<48).
+ //
+ // ppc64, mips64, and s390x support arbitrary 64 bit addresses
+ // in hardware. On Linux, Go leans on stricter OS limits. Based
+ // on Linux's processor.h, the user address space is limited as
+ // follows on 64-bit architectures:
+ //
+ // Architecture Name Maximum Value (exclusive)
+ // ---------------------------------------------------------------------
+ // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
+ // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
+ // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
+ // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
+ // s390x TASK_SIZE 1<<64 (64 bit addresses)
+ //
+ // These limits may increase over time, but are currently at
+ // most 48 bits except on s390x. On all architectures, Linux
+ // starts placing mmap'd regions at addresses that are
+ // significantly below 48 bits, so even if it's possible to
+ // exceed Go's 48 bit limit, it's extremely unlikely in
+ // practice.
+ //
+ // On 32-bit platforms, we accept the full 32-bit address
+ // space because doing so is cheap.
+ // mips32 only has access to the low 2GB of virtual memory, so
+ // we further limit it to 31 bits.
+ //
+ // On ios/arm64, although 64-bit pointers are presumably
+ // available, pointers are truncated to 33 bits in iOS <14.
+ // Furthermore, only the top 4 GiB of the address space are
+ // actually available to the application. In iOS >=14, more
+ // of the address space is available, and the OS can now
+ // provide addresses outside of those 33 bits. Pick 40 bits
+ // as a reasonable balance between address space usage by the
+ // page allocator, and flexibility for what mmap'd regions
+ // we'll accept for the heap. We can't just move to the full
+ // 48 bits because this uses too much address space for older
+ // iOS versions.
+ // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
+ // to a 48-bit address space like every other arm64 platform.
+ //
+ // WebAssembly currently has a limit of 4GB linear memory.
+ heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
+
+ // maxAlloc is the maximum size of an allocation. On 64-bit,
+ // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
+ // 32-bit, however, this is one less than 1<<32 because the
+ // number of bytes in the address space doesn't actually fit
+ // in a uintptr.
+ maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
+
+ // The number of bits in a heap address, the size of heap
+ // arenas, and the L1 and L2 arena map sizes are related by
+ //
+ // (1 << addr bits) = arena size * L1 entries * L2 entries
+ //
+ // Currently, we balance these as follows:
+ //
+ // Platform Addr bits Arena size L1 entries L2 entries
+ // -------------- --------- ---------- ---------- -----------
+ // */64-bit 48 64MB 1 4M (32MB)
+ // windows/64-bit 48 4MB 64 1M (8MB)
+ // ios/arm64 33 4MB 1 2048 (8KB)
+ // */32-bit 32 4MB 1 1024 (4KB)
+ // */mips(le) 31 4MB 1 512 (2KB)
+
+ // heapArenaBytes is the size of a heap arena. The heap
+ // consists of mappings of size heapArenaBytes, aligned to
+ // heapArenaBytes. The initial heap mapping is one arena.
+ //
+ // This is currently 64MB on 64-bit non-Windows and 4MB on
+ // 32-bit and on Windows. We use smaller arenas on Windows
+ // because all committed memory is charged to the process,
+ // even if it's not touched. Hence, for processes with small
+ // heaps, the mapped arena space needs to be commensurate.
+ // This is particularly important with the race detector,
+ // since it significantly amplifies the cost of committed
+ // memory.
+ heapArenaBytes = 1 << logHeapArenaBytes
+
+ // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
+ // prefer using heapArenaBytes where possible (we need the
+ // constant to compute some other constants).
+ logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
+
+ // heapArenaBitmapBytes is the size of each heap arena's bitmap.
+ heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
+
+ pagesPerArena = heapArenaBytes / pageSize
+
+ // arenaL1Bits is the number of bits of the arena number
+ // covered by the first level arena map.
+ //
+ // This number should be small, since the first level arena
+ // map requires PtrSize*(1<<arenaL1Bits) of space in the
+ // binary's BSS. It can be zero, in which case the first level
+ // index is effectively unused. There is a performance benefit
+ // to this, since the generated code can be more efficient,
+ // but comes at the cost of having a large L2 mapping.
+ //
+ // We use the L1 map on 64-bit Windows because the arena size
+ // is small, but the address space is still 48 bits, and
+ // there's a high cost to having a large L2.
+ arenaL1Bits = 6 * (_64bit * goos.IsWindows)
+
+ // arenaL2Bits is the number of bits of the arena number
+ // covered by the second level arena index.
+ //
+ // The size of each arena map allocation is proportional to
+ // 1<<arenaL2Bits, so it's important that this not be too
+ // large. 48 bits leads to 32MB arena index allocations, which
+ // is about the practical threshold.
+ arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
+
+ // arenaL1Shift is the number of bits to shift an arena frame
+ // number by to compute an index into the first level arena map.
+ arenaL1Shift = arenaL2Bits
+
+ // arenaBits is the total bits in a combined arena map index.
+ // This is split between the index into the L1 arena map and
+ // the L2 arena map.
+ arenaBits = arenaL1Bits + arenaL2Bits
+
+ // arenaBaseOffset is the pointer value that corresponds to
+ // index 0 in the heap arena map.
+ //
+ // On amd64, the address space is 48 bits, sign extended to 64
+ // bits. This offset lets us handle "negative" addresses (or
+ // high addresses if viewed as unsigned).
+ //
+ // On aix/ppc64, this offset allows to keep the heapAddrBits to
+ // 48. Otherwise, it would be 60 in order to handle mmap addresses
+ // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
+ // case, the memory reserved in (s *pageAlloc).init for chunks
+ // is causing important slowdowns.
+ //
+ // On other platforms, the user address space is contiguous
+ // and starts at 0, so no offset is necessary.
+ arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
+ // A typed version of this constant that will make it into DWARF (for viewcore).
+ arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
+
+ // Max number of threads to run garbage collection.
+ // 2, 3, and 4 are all plausible maximums depending
+ // on the hardware details of the machine. The garbage
+ // collector scales well to 32 cpus.
+ _MaxGcproc = 32
+
+ // minLegalPointer is the smallest possible legal pointer.
+ // This is the smallest possible architectural page size,
+ // since we assume that the first page is never mapped.
+ //
+ // This should agree with minZeroPage in the compiler.
+ minLegalPointer uintptr = 4096
+)
+
+// physPageSize is the size in bytes of the OS's physical pages.
+// Mapping and unmapping operations must be done at multiples of
+// physPageSize.
+//
+// This must be set by the OS init code (typically in osinit) before
+// mallocinit.
+var physPageSize uintptr
+
+// physHugePageSize is the size in bytes of the OS's default physical huge
+// page size whose allocation is opaque to the application. It is assumed
+// and verified to be a power of two.
+//
+// If set, this must be set by the OS init code (typically in osinit) before
+// mallocinit. However, setting it at all is optional, and leaving the default
+// value is always safe (though potentially less efficient).
+//
+// Since physHugePageSize is always assumed to be a power of two,
+// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
+// The purpose of physHugePageShift is to avoid doing divisions in
+// performance critical functions.
+var (
+ physHugePageSize uintptr
+ physHugePageShift uint
+)
+
+func mallocinit() {
+ if class_to_size[_TinySizeClass] != _TinySize {
+ throw("bad TinySizeClass")
+ }
+
+ if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
+ // heapBits expects modular arithmetic on bitmap
+ // addresses to work.
+ throw("heapArenaBitmapBytes not a power of 2")
+ }
+
+ // Check physPageSize.
+ if physPageSize == 0 {
+ // The OS init code failed to fetch the physical page size.
+ throw("failed to get system page size")
+ }
+ if physPageSize > maxPhysPageSize {
+ print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
+ throw("bad system page size")
+ }
+ if physPageSize < minPhysPageSize {
+ print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
+ throw("bad system page size")
+ }
+ if physPageSize&(physPageSize-1) != 0 {
+ print("system page size (", physPageSize, ") must be a power of 2\n")
+ throw("bad system page size")
+ }
+ if physHugePageSize&(physHugePageSize-1) != 0 {
+ print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
+ throw("bad system huge page size")
+ }
+ if physHugePageSize > maxPhysHugePageSize {
+ // physHugePageSize is greater than the maximum supported huge page size.
+ // Don't throw here, like in the other cases, since a system configured
+ // in this way isn't wrong, we just don't have the code to support them.
+ // Instead, silently set the huge page size to zero.
+ physHugePageSize = 0
+ }
+ if physHugePageSize != 0 {
+ // Since physHugePageSize is a power of 2, it suffices to increase
+ // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
+ for 1<<physHugePageShift != physHugePageSize {
+ physHugePageShift++
+ }
+ }
+ if pagesPerArena%pagesPerSpanRoot != 0 {
+ print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
+ throw("bad pagesPerSpanRoot")
+ }
+ if pagesPerArena%pagesPerReclaimerChunk != 0 {
+ print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
+ throw("bad pagesPerReclaimerChunk")
+ }
+
+ // Initialize the heap.
+ mheap_.init()
+ mcache0 = allocmcache()
+ lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
+ lockInit(&profInsertLock, lockRankProfInsert)
+ lockInit(&profBlockLock, lockRankProfBlock)
+ lockInit(&profMemActiveLock, lockRankProfMemActive)
+ for i := range profMemFutureLock {
+ lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
+ }
+ lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
+
+ // Create initial arena growth hints.
+ if goarch.PtrSize == 8 {
+ // On a 64-bit machine, we pick the following hints
+ // because:
+ //
+ // 1. Starting from the middle of the address space
+ // makes it easier to grow out a contiguous range
+ // without running in to some other mapping.
+ //
+ // 2. This makes Go heap addresses more easily
+ // recognizable when debugging.
+ //
+ // 3. Stack scanning in gccgo is still conservative,
+ // so it's important that addresses be distinguishable
+ // from other data.
+ //
+ // Starting at 0x00c0 means that the valid memory addresses
+ // will begin 0x00c0, 0x00c1, ...
+ // In little-endian, that's c0 00, c1 00, ... None of those are valid
+ // UTF-8 sequences, and they are otherwise as far away from
+ // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
+ // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
+ // on OS X during thread allocations. 0x00c0 causes conflicts with
+ // AddressSanitizer which reserves all memory up to 0x0100.
+ // These choices reduce the odds of a conservative garbage collector
+ // not collecting memory because some non-pointer block of memory
+ // had a bit pattern that matched a memory address.
+ //
+ // However, on arm64, we ignore all this advice above and slam the
+ // allocation at 0x40 << 32 because when using 4k pages with 3-level
+ // translation buffers, the user address space is limited to 39 bits
+ // On ios/arm64, the address space is even smaller.
+ //
+ // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
+ // processes.
+ for i := 0x7f; i >= 0; i-- {
+ var p uintptr
+ switch {
+ case raceenabled:
+ // The TSAN runtime requires the heap
+ // to be in the range [0x00c000000000,
+ // 0x00e000000000).
+ p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
+ if p >= uintptrMask&0x00e000000000 {
+ continue
+ }
+ case GOARCH == "arm64" && GOOS == "ios":
+ p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
+ case GOARCH == "arm64":
+ p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
+ case GOOS == "aix":
+ if i == 0 {
+ // We don't use addresses directly after 0x0A00000000000000
+ // to avoid collisions with others mmaps done by non-go programs.
+ continue
+ }
+ p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
+ default:
+ p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
+ }
+ hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
+ hint.addr = p
+ hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
+ }
+ } else {
+ // On a 32-bit machine, we're much more concerned
+ // about keeping the usable heap contiguous.
+ // Hence:
+ //
+ // 1. We reserve space for all heapArenas up front so
+ // they don't get interleaved with the heap. They're
+ // ~258MB, so this isn't too bad. (We could reserve a
+ // smaller amount of space up front if this is a
+ // problem.)
+ //
+ // 2. We hint the heap to start right above the end of
+ // the binary so we have the best chance of keeping it
+ // contiguous.
+ //
+ // 3. We try to stake out a reasonably large initial
+ // heap reservation.
+
+ const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
+ meta := uintptr(sysReserve(nil, arenaMetaSize))
+ if meta != 0 {
+ mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
+ }
+
+ // We want to start the arena low, but if we're linked
+ // against C code, it's possible global constructors
+ // have called malloc and adjusted the process' brk.
+ // Query the brk so we can avoid trying to map the
+ // region over it (which will cause the kernel to put
+ // the region somewhere else, likely at a high
+ // address).
+ procBrk := sbrk0()
+
+ // If we ask for the end of the data segment but the
+ // operating system requires a little more space
+ // before we can start allocating, it will give out a
+ // slightly higher pointer. Except QEMU, which is
+ // buggy, as usual: it won't adjust the pointer
+ // upward. So adjust it upward a little bit ourselves:
+ // 1/4 MB to get away from the running binary image.
+ p := firstmoduledata.end
+ if p < procBrk {
+ p = procBrk
+ }
+ if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
+ p = mheap_.heapArenaAlloc.end
+ }
+ p = alignUp(p+(256<<10), heapArenaBytes)
+ // Because we're worried about fragmentation on
+ // 32-bit, we try to make a large initial reservation.
+ arenaSizes := []uintptr{
+ 512 << 20,
+ 256 << 20,
+ 128 << 20,
+ }
+ for _, arenaSize := range arenaSizes {
+ a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
+ if a != nil {
+ mheap_.arena.init(uintptr(a), size, false)
+ p = mheap_.arena.end // For hint below
+ break
+ }
+ }
+ hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
+ hint.addr = p
+ hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
+ }
+}
+
+// sysAlloc allocates heap arena space for at least n bytes. The
+// returned pointer is always heapArenaBytes-aligned and backed by
+// h.arenas metadata. The returned size is always a multiple of
+// heapArenaBytes. sysAlloc returns nil on failure.
+// There is no corresponding free function.
+//
+// sysAlloc returns a memory region in the Reserved state. This region must
+// be transitioned to Prepared and then Ready before use.
+//
+// h must be locked.
+func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
+ assertLockHeld(&h.lock)
+
+ n = alignUp(n, heapArenaBytes)
+
+ // First, try the arena pre-reservation.
+ // Newly-used mappings are considered released.
+ v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
+ if v != nil {
+ size = n
+ goto mapped
+ }
+
+ // Try to grow the heap at a hint address.
+ for h.arenaHints != nil {
+ hint := h.arenaHints
+ p := hint.addr
+ if hint.down {
+ p -= n
+ }
+ if p+n < p {
+ // We can't use this, so don't ask.
+ v = nil
+ } else if arenaIndex(p+n-1) >= 1<<arenaBits {
+ // Outside addressable heap. Can't use.
+ v = nil
+ } else {
+ v = sysReserve(unsafe.Pointer(p), n)
+ }
+ if p == uintptr(v) {
+ // Success. Update the hint.
+ if !hint.down {
+ p += n
+ }
+ hint.addr = p
+ size = n
+ break
+ }
+ // Failed. Discard this hint and try the next.
+ //
+ // TODO: This would be cleaner if sysReserve could be
+ // told to only return the requested address. In
+ // particular, this is already how Windows behaves, so
+ // it would simplify things there.
+ if v != nil {
+ sysFreeOS(v, n)
+ }
+ h.arenaHints = hint.next
+ h.arenaHintAlloc.free(unsafe.Pointer(hint))
+ }
+
+ if size == 0 {
+ if raceenabled {
+ // The race detector assumes the heap lives in
+ // [0x00c000000000, 0x00e000000000), but we
+ // just ran out of hints in this region. Give
+ // a nice failure.
+ throw("too many address space collisions for -race mode")
+ }
+
+ // All of the hints failed, so we'll take any
+ // (sufficiently aligned) address the kernel will give
+ // us.
+ v, size = sysReserveAligned(nil, n, heapArenaBytes)
+ if v == nil {
+ return nil, 0
+ }
+
+ // Create new hints for extending this region.
+ hint := (*arenaHint)(h.arenaHintAlloc.alloc())
+ hint.addr, hint.down = uintptr(v), true
+ hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
+ hint = (*arenaHint)(h.arenaHintAlloc.alloc())
+ hint.addr = uintptr(v) + size
+ hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
+ }
+
+ // Check for bad pointers or pointers we can't use.
+ {
+ var bad string
+ p := uintptr(v)
+ if p+size < p {
+ bad = "region exceeds uintptr range"
+ } else if arenaIndex(p) >= 1<<arenaBits {
+ bad = "base outside usable address space"
+ } else if arenaIndex(p+size-1) >= 1<<arenaBits {
+ bad = "end outside usable address space"
+ }
+ if bad != "" {
+ // This should be impossible on most architectures,
+ // but it would be really confusing to debug.
+ print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
+ throw("memory reservation exceeds address space limit")
+ }
+ }
+
+ if uintptr(v)&(heapArenaBytes-1) != 0 {
+ throw("misrounded allocation in sysAlloc")
+ }
+
+mapped:
+ // Create arena metadata.
+ for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
+ l2 := h.arenas[ri.l1()]
+ if l2 == nil {
+ // Allocate an L2 arena map.
+ //
+ // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
+ // statistic we can comfortably account for this space in. With this structure,
+ // we rely on demand paging to avoid large overheads, but tracking which memory
+ // is paged in is too expensive. Trying to account for the whole region means
+ // that it will appear like an enormous memory overhead in statistics, even though
+ // it is not.
+ l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
+ if l2 == nil {
+ throw("out of memory allocating heap arena map")
+ }
+ atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
+ }
+
+ if l2[ri.l2()] != nil {
+ throw("arena already initialized")
+ }
+ var r *heapArena
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
+ if r == nil {
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
+ if r == nil {
+ throw("out of memory allocating heap arena metadata")
+ }
+ }
+
+ // Add the arena to the arenas list.
+ if len(h.allArenas) == cap(h.allArenas) {
+ size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
+ if size == 0 {
+ size = physPageSize
+ }
+ newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
+ if newArray == nil {
+ throw("out of memory allocating allArenas")
+ }
+ oldSlice := h.allArenas
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
+ copy(h.allArenas, oldSlice)
+ // Do not free the old backing array because
+ // there may be concurrent readers. Since we
+ // double the array each time, this can lead
+ // to at most 2x waste.
+ }
+ h.allArenas = h.allArenas[:len(h.allArenas)+1]
+ h.allArenas[len(h.allArenas)-1] = ri
+
+ // Store atomically just in case an object from the
+ // new heap arena becomes visible before the heap lock
+ // is released (which shouldn't happen, but there's
+ // little downside to this).
+ atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
+ }
+
+ // Tell the race detector about the new heap memory.
+ if raceenabled {
+ racemapshadow(v, size)
+ }
+
+ return
+}
+
+// sysReserveAligned is like sysReserve, but the returned pointer is
+// aligned to align bytes. It may reserve either n or n+align bytes,
+// so it returns the size that was reserved.
+func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
+ // Since the alignment is rather large in uses of this
+ // function, we're not likely to get it by chance, so we ask
+ // for a larger region and remove the parts we don't need.
+ retries := 0
+retry:
+ p := uintptr(sysReserve(v, size+align))
+ switch {
+ case p == 0:
+ return nil, 0
+ case p&(align-1) == 0:
+ // We got lucky and got an aligned region, so we can
+ // use the whole thing.
+ return unsafe.Pointer(p), size + align
+ case GOOS == "windows":
+ // On Windows we can't release pieces of a
+ // reservation, so we release the whole thing and
+ // re-reserve the aligned sub-region. This may race,
+ // so we may have to try again.
+ sysFreeOS(unsafe.Pointer(p), size+align)
+ p = alignUp(p, align)
+ p2 := sysReserve(unsafe.Pointer(p), size)
+ if p != uintptr(p2) {
+ // Must have raced. Try again.
+ sysFreeOS(p2, size)
+ if retries++; retries == 100 {
+ throw("failed to allocate aligned heap memory; too many retries")
+ }
+ goto retry
+ }
+ // Success.
+ return p2, size
+ default:
+ // Trim off the unaligned parts.
+ pAligned := alignUp(p, align)
+ sysFreeOS(unsafe.Pointer(p), pAligned-p)
+ end := pAligned + size
+ endLen := (p + size + align) - end
+ if endLen > 0 {
+ sysFreeOS(unsafe.Pointer(end), endLen)
+ }
+ return unsafe.Pointer(pAligned), size
+ }
+}
+
+// base address for all 0-byte allocations
+var zerobase uintptr
+
+// nextFreeFast returns the next free object if one is quickly available.
+// Otherwise it returns 0.
+func nextFreeFast(s *mspan) gclinkptr {
+ theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache?
+ if theBit < 64 {
+ result := s.freeindex + uintptr(theBit)
+ if result < s.nelems {
+ freeidx := result + 1
+ if freeidx%64 == 0 && freeidx != s.nelems {
+ return 0
+ }
+ s.allocCache >>= uint(theBit + 1)
+ s.freeindex = freeidx
+ s.allocCount++
+ return gclinkptr(result*s.elemsize + s.base())
+ }
+ }
+ return 0
+}
+
+// nextFree returns the next free object from the cached span if one is available.
+// Otherwise it refills the cache with a span with an available object and
+// returns that object along with a flag indicating that this was a heavy
+// weight allocation. If it is a heavy weight allocation the caller must
+// determine whether a new GC cycle needs to be started or if the GC is active
+// whether this goroutine needs to assist the GC.
+//
+// Must run in a non-preemptible context since otherwise the owner of
+// c could change.
+func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
+ s = c.alloc[spc]
+ shouldhelpgc = false
+ freeIndex := s.nextFreeIndex()
+ if freeIndex == s.nelems {
+ // The span is full.
+ if uintptr(s.allocCount) != s.nelems {
+ println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
+ throw("s.allocCount != s.nelems && freeIndex == s.nelems")
+ }
+ c.refill(spc)
+ shouldhelpgc = true
+ s = c.alloc[spc]
+
+ freeIndex = s.nextFreeIndex()
+ }
+
+ if freeIndex >= s.nelems {
+ throw("freeIndex is not valid")
+ }
+
+ v = gclinkptr(freeIndex*s.elemsize + s.base())
+ s.allocCount++
+ if uintptr(s.allocCount) > s.nelems {
+ println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
+ throw("s.allocCount > s.nelems")
+ }
+ return
+}
+
+// Allocate an object of size bytes.
+// Small objects are allocated from the per-P cache's free lists.
+// Large objects (> 32 kB) are allocated straight from the heap.
+func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+
+ if size == 0 {
+ return unsafe.Pointer(&zerobase)
+ }
+ userSize := size
+ if asanenabled {
+ // Refer to ASAN runtime library, the malloc() function allocates extra memory,
+ // the redzone, around the user requested memory region. And the redzones are marked
+ // as unaddressable. We perform the same operations in Go to detect the overflows or
+ // underflows.
+ size += computeRZlog(size)
+ }
+
+ if debug.malloc {
+ if debug.sbrk != 0 {
+ align := uintptr(16)
+ if typ != nil {
+ // TODO(austin): This should be just
+ // align = uintptr(typ.align)
+ // but that's only 4 on 32-bit platforms,
+ // even if there's a uint64 field in typ (see #599).
+ // This causes 64-bit atomic accesses to panic.
+ // Hence, we use stricter alignment that matches
+ // the normal allocator better.
+ if size&7 == 0 {
+ align = 8
+ } else if size&3 == 0 {
+ align = 4
+ } else if size&1 == 0 {
+ align = 2
+ } else {
+ align = 1
+ }
+ }
+ return persistentalloc(size, align, &memstats.other_sys)
+ }
+
+ if inittrace.active && inittrace.id == getg().goid {
+ // Init functions are executed sequentially in a single goroutine.
+ inittrace.allocs += 1
+ }
+ }
+
+ // assistG is the G to charge for this allocation, or nil if
+ // GC is not currently active.
+ var assistG *g
+ if gcBlackenEnabled != 0 {
+ // Charge the current user G for this allocation.
+ assistG = getg()
+ if assistG.m.curg != nil {
+ assistG = assistG.m.curg
+ }
+ // Charge the allocation against the G. We'll account
+ // for internal fragmentation at the end of mallocgc.
+ assistG.gcAssistBytes -= int64(size)
+
+ if assistG.gcAssistBytes < 0 {
+ // This G is in debt. Assist the GC to correct
+ // this before allocating. This must happen
+ // before disabling preemption.
+ gcAssistAlloc(assistG)
+ }
+ }
+
+ // Set mp.mallocing to keep from being preempted by GC.
+ mp := acquirem()
+ if mp.mallocing != 0 {
+ throw("malloc deadlock")
+ }
+ if mp.gsignal == getg() {
+ throw("malloc during signal")
+ }
+ mp.mallocing = 1
+
+ shouldhelpgc := false
+ dataSize := userSize
+ c := getMCache(mp)
+ if c == nil {
+ throw("mallocgc called without a P or outside bootstrapping")
+ }
+ var span *mspan
+ var x unsafe.Pointer
+ noscan := typ == nil || typ.ptrdata == 0
+ // In some cases block zeroing can profitably (for latency reduction purposes)
+ // be delayed till preemption is possible; delayedZeroing tracks that state.
+ delayedZeroing := false
+ if size <= maxSmallSize {
+ if noscan && size < maxTinySize {
+ // Tiny allocator.
+ //
+ // Tiny allocator combines several tiny allocation requests
+ // into a single memory block. The resulting memory block
+ // is freed when all subobjects are unreachable. The subobjects
+ // must be noscan (don't have pointers), this ensures that
+ // the amount of potentially wasted memory is bounded.
+ //
+ // Size of the memory block used for combining (maxTinySize) is tunable.
+ // Current setting is 16 bytes, which relates to 2x worst case memory
+ // wastage (when all but one subobjects are unreachable).
+ // 8 bytes would result in no wastage at all, but provides less
+ // opportunities for combining.
+ // 32 bytes provides more opportunities for combining,
+ // but can lead to 4x worst case wastage.
+ // The best case winning is 8x regardless of block size.
+ //
+ // Objects obtained from tiny allocator must not be freed explicitly.
+ // So when an object will be freed explicitly, we ensure that
+ // its size >= maxTinySize.
+ //
+ // SetFinalizer has a special case for objects potentially coming
+ // from tiny allocator, it such case it allows to set finalizers
+ // for an inner byte of a memory block.
+ //
+ // The main targets of tiny allocator are small strings and
+ // standalone escaping variables. On a json benchmark
+ // the allocator reduces number of allocations by ~12% and
+ // reduces heap size by ~20%.
+ off := c.tinyoffset
+ // Align tiny pointer for required (conservative) alignment.
+ if size&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && size == 12 {
+ // Conservatively align 12-byte objects to 8 bytes on 32-bit
+ // systems so that objects whose first field is a 64-bit
+ // value is aligned to 8 bytes and does not cause a fault on
+ // atomic access. See issue 37262.
+ // TODO(mknyszek): Remove this workaround if/when issue 36606
+ // is resolved.
+ off = alignUp(off, 8)
+ } else if size&3 == 0 {
+ off = alignUp(off, 4)
+ } else if size&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+size <= maxTinySize && c.tiny != 0 {
+ // The object fits into existing tiny block.
+ x = unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + size
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ return x
+ }
+ // Allocate a new maxTinySize block.
+ span = c.alloc[tinySpanClass]
+ v := nextFreeFast(span)
+ if v == 0 {
+ v, span, shouldhelpgc = c.nextFree(tinySpanClass)
+ }
+ x = unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+ // See if we need to replace the existing tiny block with the new one
+ // based on amount of remaining free space.
+ if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
+ // Note: disabled when race detector is on, see comment near end of this function.
+ c.tiny = uintptr(x)
+ c.tinyoffset = size
+ }
+ size = maxTinySize
+ } else {
+ var sizeclass uint8
+ if size <= smallSizeMax-8 {
+ sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
+ } else {
+ sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
+ }
+ size = uintptr(class_to_size[sizeclass])
+ spc := makeSpanClass(sizeclass, noscan)
+ span = c.alloc[spc]
+ v := nextFreeFast(span)
+ if v == 0 {
+ v, span, shouldhelpgc = c.nextFree(spc)
+ }
+ x = unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(unsafe.Pointer(v), size)
+ }
+ }
+ } else {
+ shouldhelpgc = true
+ // For large allocations, keep track of zeroed state so that
+ // bulk zeroing can be happen later in a preemptible context.
+ span = c.allocLarge(size, noscan)
+ span.freeindex = 1
+ span.allocCount = 1
+ size = span.elemsize
+ x = unsafe.Pointer(span.base())
+ if needzero && span.needzero != 0 {
+ if noscan {
+ delayedZeroing = true
+ } else {
+ memclrNoHeapPointers(x, size)
+ // We've in theory cleared almost the whole span here,
+ // and could take the extra step of actually clearing
+ // the whole thing. However, don't. Any GC bits for the
+ // uncleared parts will be zero, and it's just going to
+ // be needzero = 1 once freed anyway.
+ }
+ }
+ }
+
+ var scanSize uintptr
+ if !noscan {
+ heapBitsSetType(uintptr(x), size, dataSize, typ)
+ if dataSize > typ.size {
+ // Array allocation. If there are any
+ // pointers, GC has to scan to the last
+ // element.
+ if typ.ptrdata != 0 {
+ scanSize = dataSize - typ.size + typ.ptrdata
+ }
+ } else {
+ scanSize = typ.ptrdata
+ }
+ c.scanAlloc += scanSize
+ }
+
+ // Ensure that the stores above that initialize x to
+ // type-safe memory and set the heap bits occur before
+ // the caller can make x observable to the garbage
+ // collector. Otherwise, on weakly ordered machines,
+ // the garbage collector could follow a pointer to x,
+ // but see uninitialized memory or stale heap bits.
+ publicationBarrier()
+
+ // Allocate black during GC.
+ // All slots hold nil so no scanning is needed.
+ // This may be racing with GC so do it atomically if there can be
+ // a race marking the bit.
+ if gcphase != _GCoff {
+ gcmarknewobject(span, uintptr(x), size, scanSize)
+ }
+
+ if raceenabled {
+ racemalloc(x, size)
+ }
+
+ if msanenabled {
+ msanmalloc(x, size)
+ }
+
+ if asanenabled {
+ // We should only read/write the memory with the size asked by the user.
+ // The rest of the allocated memory should be poisoned, so that we can report
+ // errors when accessing poisoned memory.
+ // The allocated memory is larger than required userSize, it will also include
+ // redzone and some other padding bytes.
+ rzBeg := unsafe.Add(x, userSize)
+ asanpoison(rzBeg, size-userSize)
+ asanunpoison(x, userSize)
+ }
+
+ if rate := MemProfileRate; rate > 0 {
+ // Note cache c only valid while m acquired; see #47302
+ if rate != 1 && size < c.nextSample {
+ c.nextSample -= size
+ } else {
+ profilealloc(mp, x, size)
+ }
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ // Pointerfree data can be zeroed late in a context where preemption can occur.
+ // x will keep the memory alive.
+ if delayedZeroing {
+ if !noscan {
+ throw("delayed zeroing on data that may contain pointers")
+ }
+ memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
+ }
+
+ if debug.malloc {
+ if debug.allocfreetrace != 0 {
+ tracealloc(x, size, typ)
+ }
+
+ if inittrace.active && inittrace.id == getg().goid {
+ // Init functions are executed sequentially in a single goroutine.
+ inittrace.bytes += uint64(size)
+ }
+ }
+
+ if assistG != nil {
+ // Account for internal fragmentation in the assist
+ // debt now that we know it.
+ assistG.gcAssistBytes -= int64(size - dataSize)
+ }
+
+ if shouldhelpgc {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled && noscan && dataSize < maxTinySize {
+ // Pad tinysize allocations so they are aligned with the end
+ // of the tinyalloc region. This ensures that any arithmetic
+ // that goes off the top end of the object will be detectable
+ // by checkptr (issue 38872).
+ // Note that we disable tinyalloc when raceenabled for this to work.
+ // TODO: This padding is only performed when the race detector
+ // is enabled. It would be nice to enable it if any package
+ // was compiled with checkptr, but there's no easy way to
+ // detect that (especially at compile time).
+ // TODO: enable this padding for all allocations, not just
+ // tinyalloc ones. It's tricky because of pointer maps.
+ // Maybe just all noscan objects?
+ x = add(x, size-dataSize)
+ }
+
+ return x
+}
+
+// memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
+// on chunks of the buffer to be zeroed, with opportunities for preemption
+// along the way. memclrNoHeapPointers contains no safepoints and also
+// cannot be preemptively scheduled, so this provides a still-efficient
+// block copy that can also be preempted on a reasonable granularity.
+//
+// Use this with care; if the data being cleared is tagged to contain
+// pointers, this allows the GC to run before it is all cleared.
+func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
+ v := uintptr(x)
+ // got this from benchmarking. 128k is too small, 512k is too large.
+ const chunkBytes = 256 * 1024
+ vsize := v + size
+ for voff := v; voff < vsize; voff = voff + chunkBytes {
+ if getg().preempt {
+ // may hold locks, e.g., profiling
+ goschedguarded()
+ }
+ // clear min(avail, lump) bytes
+ n := vsize - voff
+ if n > chunkBytes {
+ n = chunkBytes
+ }
+ memclrNoHeapPointers(unsafe.Pointer(voff), n)
+ }
+}
+
+// implementation of new builtin
+// compiler (both frontend and SSA backend) knows the signature
+// of this function
+func newobject(typ *_type) unsafe.Pointer {
+ return mallocgc(typ.size, typ, true)
+}
+
+//go:linkname reflect_unsafe_New reflect.unsafe_New
+func reflect_unsafe_New(typ *_type) unsafe.Pointer {
+ return mallocgc(typ.size, typ, true)
+}
+
+//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
+func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
+ return mallocgc(typ.size, typ, true)
+}
+
+// newarray allocates an array of n elements of type typ.
+func newarray(typ *_type, n int) unsafe.Pointer {
+ if n == 1 {
+ return mallocgc(typ.size, typ, true)
+ }
+ mem, overflow := math.MulUintptr(typ.size, uintptr(n))
+ if overflow || mem > maxAlloc || n < 0 {
+ panic(plainError("runtime: allocation size out of range"))
+ }
+ return mallocgc(mem, typ, true)
+}
+
+//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
+func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
+ return newarray(typ, n)
+}
+
+func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
+ c := getMCache(mp)
+ if c == nil {
+ throw("profilealloc called without a P or outside bootstrapping")
+ }
+ c.nextSample = nextSample()
+ mProf_Malloc(x, size)
+}
+
+// nextSample returns the next sampling point for heap profiling. The goal is
+// to sample allocations on average every MemProfileRate bytes, but with a
+// completely random distribution over the allocation timeline; this
+// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
+// processes, the distance between two samples follows the exponential
+// distribution (exp(MemProfileRate)), so the best return value is a random
+// number taken from an exponential distribution whose mean is MemProfileRate.
+func nextSample() uintptr {
+ if MemProfileRate == 1 {
+ // Callers assign our return value to
+ // mcache.next_sample, but next_sample is not used
+ // when the rate is 1. So avoid the math below and
+ // just return something.
+ return 0
+ }
+ if GOOS == "plan9" {
+ // Plan 9 doesn't support floating point in note handler.
+ if g := getg(); g == g.m.gsignal {
+ return nextSampleNoFP()
+ }
+ }
+
+ return uintptr(fastexprand(MemProfileRate))
+}
+
+// fastexprand returns a random number from an exponential distribution with
+// the specified mean.
+func fastexprand(mean int) int32 {
+ // Avoid overflow. Maximum possible step is
+ // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
+ switch {
+ case mean > 0x7000000:
+ mean = 0x7000000
+ case mean == 0:
+ return 0
+ }
+
+ // Take a random sample of the exponential distribution exp(-mean*x).
+ // The probability distribution function is mean*exp(-mean*x), so the CDF is
+ // p = 1 - exp(-mean*x), so
+ // q = 1 - p == exp(-mean*x)
+ // log_e(q) = -mean*x
+ // -log_e(q)/mean = x
+ // x = -log_e(q) * mean
+ // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
+ const randomBitCount = 26
+ q := fastrandn(1<<randomBitCount) + 1
+ qlog := fastlog2(float64(q)) - randomBitCount
+ if qlog > 0 {
+ qlog = 0
+ }
+ const minusLog2 = -0.6931471805599453 // -ln(2)
+ return int32(qlog*(minusLog2*float64(mean))) + 1
+}
+
+// nextSampleNoFP is similar to nextSample, but uses older,
+// simpler code to avoid floating point.
+func nextSampleNoFP() uintptr {
+ // Set first allocation sample size.
+ rate := MemProfileRate
+ if rate > 0x3fffffff { // make 2*rate not overflow
+ rate = 0x3fffffff
+ }
+ if rate != 0 {
+ return uintptr(fastrandn(uint32(2 * rate)))
+ }
+ return 0
+}
+
+type persistentAlloc struct {
+ base *notInHeap
+ off uintptr
+}
+
+var globalAlloc struct {
+ mutex
+ persistentAlloc
+}
+
+// persistentChunkSize is the number of bytes we allocate when we grow
+// a persistentAlloc.
+const persistentChunkSize = 256 << 10
+
+// persistentChunks is a list of all the persistent chunks we have
+// allocated. The list is maintained through the first word in the
+// persistent chunk. This is updated atomically.
+var persistentChunks *notInHeap
+
+// Wrapper around sysAlloc that can allocate small chunks.
+// There is no associated free operation.
+// Intended for things like function/type/debug-related persistent data.
+// If align is 0, uses default align (currently 8).
+// The returned memory will be zeroed.
+// sysStat must be non-nil.
+//
+// Consider marking persistentalloc'd types go:notinheap.
+func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
+ var p *notInHeap
+ systemstack(func() {
+ p = persistentalloc1(size, align, sysStat)
+ })
+ return unsafe.Pointer(p)
+}
+
+// Must run on system stack because stack growth can (re)invoke it.
+// See issue 9174.
+//
+//go:systemstack
+func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
+ const (
+ maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
+ )
+
+ if size == 0 {
+ throw("persistentalloc: size == 0")
+ }
+ if align != 0 {
+ if align&(align-1) != 0 {
+ throw("persistentalloc: align is not a power of 2")
+ }
+ if align > _PageSize {
+ throw("persistentalloc: align is too large")
+ }
+ } else {
+ align = 8
+ }
+
+ if size >= maxBlock {
+ return (*notInHeap)(sysAlloc(size, sysStat))
+ }
+
+ mp := acquirem()
+ var persistent *persistentAlloc
+ if mp != nil && mp.p != 0 {
+ persistent = &mp.p.ptr().palloc
+ } else {
+ lock(&globalAlloc.mutex)
+ persistent = &globalAlloc.persistentAlloc
+ }
+ persistent.off = alignUp(persistent.off, align)
+ if persistent.off+size > persistentChunkSize || persistent.base == nil {
+ persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
+ if persistent.base == nil {
+ if persistent == &globalAlloc.persistentAlloc {
+ unlock(&globalAlloc.mutex)
+ }
+ throw("runtime: cannot allocate memory")
+ }
+
+ // Add the new chunk to the persistentChunks list.
+ for {
+ chunks := uintptr(unsafe.Pointer(persistentChunks))
+ *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
+ if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
+ break
+ }
+ }
+ persistent.off = alignUp(goarch.PtrSize, align)
+ }
+ p := persistent.base.add(persistent.off)
+ persistent.off += size
+ releasem(mp)
+ if persistent == &globalAlloc.persistentAlloc {
+ unlock(&globalAlloc.mutex)
+ }
+
+ if sysStat != &memstats.other_sys {
+ sysStat.add(int64(size))
+ memstats.other_sys.add(-int64(size))
+ }
+ return p
+}
+
+// inPersistentAlloc reports whether p points to memory allocated by
+// persistentalloc. This must be nosplit because it is called by the
+// cgo checker code, which is called by the write barrier code.
+//
+//go:nosplit
+func inPersistentAlloc(p uintptr) bool {
+ chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
+ for chunk != 0 {
+ if p >= chunk && p < chunk+persistentChunkSize {
+ return true
+ }
+ chunk = *(*uintptr)(unsafe.Pointer(chunk))
+ }
+ return false
+}
+
+// linearAlloc is a simple linear allocator that pre-reserves a region
+// of memory and then optionally maps that region into the Ready state
+// as needed.
+//
+// The caller is responsible for locking.
+type linearAlloc struct {
+ next uintptr // next free byte
+ mapped uintptr // one byte past end of mapped space
+ end uintptr // end of reserved space
+
+ mapMemory bool // transition memory from Reserved to Ready if true
+}
+
+func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
+ if base+size < base {
+ // Chop off the last byte. The runtime isn't prepared
+ // to deal with situations where the bounds could overflow.
+ // Leave that memory reserved, though, so we don't map it
+ // later.
+ size -= 1
+ }
+ l.next, l.mapped = base, base
+ l.end = base + size
+ l.mapMemory = mapMemory
+}
+
+func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
+ p := alignUp(l.next, align)
+ if p+size > l.end {
+ return nil
+ }
+ l.next = p + size
+ if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
+ if l.mapMemory {
+ // Transition from Reserved to Prepared to Ready.
+ n := pEnd - l.mapped
+ sysMap(unsafe.Pointer(l.mapped), n, sysStat)
+ sysUsed(unsafe.Pointer(l.mapped), n, n)
+ }
+ l.mapped = pEnd
+ }
+ return unsafe.Pointer(p)
+}
+
+// notInHeap is off-heap memory allocated by a lower-level allocator
+// like sysAlloc or persistentAlloc.
+//
+// In general, it's better to use real types marked as go:notinheap,
+// but this serves as a generic type for situations where that isn't
+// possible (like in the allocators).
+//
+// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
+//
+//go:notinheap
+type notInHeap struct{}
+
+func (p *notInHeap) add(bytes uintptr) *notInHeap {
+ return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
+}
+
+// computeRZlog computes the size of the redzone.
+// Refer to the implementation of the compiler-rt.
+func computeRZlog(userSize uintptr) uintptr {
+ switch {
+ case userSize <= (64 - 16):
+ return 16 << 0
+ case userSize <= (128 - 32):
+ return 16 << 1
+ case userSize <= (512 - 64):
+ return 16 << 2
+ case userSize <= (4096 - 128):
+ return 16 << 3
+ case userSize <= (1<<14)-256:
+ return 16 << 4
+ case userSize <= (1<<15)-512:
+ return 16 << 5
+ case userSize <= (1<<16)-1024:
+ return 16 << 6
+ default:
+ return 16 << 7
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/map.go b/contrib/go/_std_1.19/src/runtime/map.go
new file mode 100644
index 0000000000..65be4727fd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/map.go
@@ -0,0 +1,1418 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// This file contains the implementation of Go's map type.
+//
+// A map is just a hash table. The data is arranged
+// into an array of buckets. Each bucket contains up to
+// 8 key/elem pairs. The low-order bits of the hash are
+// used to select a bucket. Each bucket contains a few
+// high-order bits of each hash to distinguish the entries
+// within a single bucket.
+//
+// If more than 8 keys hash to a bucket, we chain on
+// extra buckets.
+//
+// When the hashtable grows, we allocate a new array
+// of buckets twice as big. Buckets are incrementally
+// copied from the old bucket array to the new bucket array.
+//
+// Map iterators walk through the array of buckets and
+// return the keys in walk order (bucket #, then overflow
+// chain order, then bucket index). To maintain iteration
+// semantics, we never move keys within their bucket (if
+// we did, keys might be returned 0 or 2 times). When
+// growing the table, iterators remain iterating through the
+// old table and must check the new table if the bucket
+// they are iterating through has been moved ("evacuated")
+// to the new table.
+
+// Picking loadFactor: too large and we have lots of overflow
+// buckets, too small and we waste a lot of space. I wrote
+// a simple program to check some stats for different loads:
+// (64-bit, 8 byte keys and elems)
+// loadFactor %overflow bytes/entry hitprobe missprobe
+// 4.00 2.13 20.77 3.00 4.00
+// 4.50 4.05 17.30 3.25 4.50
+// 5.00 6.85 14.77 3.50 5.00
+// 5.50 10.55 12.94 3.75 5.50
+// 6.00 15.27 11.67 4.00 6.00
+// 6.50 20.90 10.79 4.25 6.50
+// 7.00 27.14 10.15 4.50 7.00
+// 7.50 34.03 9.73 4.75 7.50
+// 8.00 41.10 9.40 5.00 8.00
+//
+// %overflow = percentage of buckets which have an overflow bucket
+// bytes/entry = overhead bytes used per key/elem pair
+// hitprobe = # of entries to check when looking up a present key
+// missprobe = # of entries to check when looking up an absent key
+//
+// Keep in mind this data is for maximally loaded tables, i.e. just
+// before the table grows. Typical tables will be somewhat less loaded.
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/math"
+ "unsafe"
+)
+
+const (
+ // Maximum number of key/elem pairs a bucket can hold.
+ bucketCntBits = 3
+ bucketCnt = 1 << bucketCntBits
+
+ // Maximum average load of a bucket that triggers growth is 6.5.
+ // Represent as loadFactorNum/loadFactorDen, to allow integer math.
+ loadFactorNum = 13
+ loadFactorDen = 2
+
+ // Maximum key or elem size to keep inline (instead of mallocing per element).
+ // Must fit in a uint8.
+ // Fast versions cannot handle big elems - the cutoff size for
+ // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
+ maxKeySize = 128
+ maxElemSize = 128
+
+ // data offset should be the size of the bmap struct, but needs to be
+ // aligned correctly. For amd64p32 this means 64-bit alignment
+ // even though pointers are 32 bit.
+ dataOffset = unsafe.Offsetof(struct {
+ b bmap
+ v int64
+ }{}.v)
+
+ // Possible tophash values. We reserve a few possibilities for special marks.
+ // Each bucket (including its overflow buckets, if any) will have either all or none of its
+ // entries in the evacuated* states (except during the evacuate() method, which only happens
+ // during map writes and thus no one else can observe the map during that time).
+ emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
+ emptyOne = 1 // this cell is empty
+ evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
+ evacuatedY = 3 // same as above, but evacuated to second half of larger table.
+ evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
+ minTopHash = 5 // minimum tophash for a normal filled cell.
+
+ // flags
+ iterator = 1 // there may be an iterator using buckets
+ oldIterator = 2 // there may be an iterator using oldbuckets
+ hashWriting = 4 // a goroutine is writing to the map
+ sameSizeGrow = 8 // the current map growth is to a new map of the same size
+
+ // sentinel bucket ID for iterator checks
+ noCheck = 1<<(8*goarch.PtrSize) - 1
+)
+
+// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
+func isEmpty(x uint8) bool {
+ return x <= emptyOne
+}
+
+// A header for a Go map.
+type hmap struct {
+ // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
+ // Make sure this stays in sync with the compiler's definition.
+ count int // # live cells == size of map. Must be first (used by len() builtin)
+ flags uint8
+ B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
+ noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
+ hash0 uint32 // hash seed
+
+ buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
+ oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
+ nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
+
+ extra *mapextra // optional fields
+}
+
+// mapextra holds fields that are not present on all maps.
+type mapextra struct {
+ // If both key and elem do not contain pointers and are inline, then we mark bucket
+ // type as containing no pointers. This avoids scanning such maps.
+ // However, bmap.overflow is a pointer. In order to keep overflow buckets
+ // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
+ // overflow and oldoverflow are only used if key and elem do not contain pointers.
+ // overflow contains overflow buckets for hmap.buckets.
+ // oldoverflow contains overflow buckets for hmap.oldbuckets.
+ // The indirection allows to store a pointer to the slice in hiter.
+ overflow *[]*bmap
+ oldoverflow *[]*bmap
+
+ // nextOverflow holds a pointer to a free overflow bucket.
+ nextOverflow *bmap
+}
+
+// A bucket for a Go map.
+type bmap struct {
+ // tophash generally contains the top byte of the hash value
+ // for each key in this bucket. If tophash[0] < minTopHash,
+ // tophash[0] is a bucket evacuation state instead.
+ tophash [bucketCnt]uint8
+ // Followed by bucketCnt keys and then bucketCnt elems.
+ // NOTE: packing all the keys together and then all the elems together makes the
+ // code a bit more complicated than alternating key/elem/key/elem/... but it allows
+ // us to eliminate padding which would be needed for, e.g., map[int64]int8.
+ // Followed by an overflow pointer.
+}
+
+// A hash iteration structure.
+// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
+// and reflect/value.go to match the layout of this structure.
+type hiter struct {
+ key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
+ elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
+ t *maptype
+ h *hmap
+ buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
+ bptr *bmap // current bucket
+ overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
+ oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
+ startBucket uintptr // bucket iteration started at
+ offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
+ wrapped bool // already wrapped around from end of bucket array to beginning
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
+}
+
+// bucketShift returns 1<<b, optimized for code generation.
+func bucketShift(b uint8) uintptr {
+ // Masking the shift amount allows overflow checks to be elided.
+ return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
+}
+
+// bucketMask returns 1<<b - 1, optimized for code generation.
+func bucketMask(b uint8) uintptr {
+ return bucketShift(b) - 1
+}
+
+// tophash calculates the tophash value for hash.
+func tophash(hash uintptr) uint8 {
+ top := uint8(hash >> (goarch.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ return top
+}
+
+func evacuated(b *bmap) bool {
+ h := b.tophash[0]
+ return h > emptyOne && h < minTopHash
+}
+
+func (b *bmap) overflow(t *maptype) *bmap {
+ return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
+}
+
+func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
+}
+
+func (b *bmap) keys() unsafe.Pointer {
+ return add(unsafe.Pointer(b), dataOffset)
+}
+
+// incrnoverflow increments h.noverflow.
+// noverflow counts the number of overflow buckets.
+// This is used to trigger same-size map growth.
+// See also tooManyOverflowBuckets.
+// To keep hmap small, noverflow is a uint16.
+// When there are few buckets, noverflow is an exact count.
+// When there are many buckets, noverflow is an approximate count.
+func (h *hmap) incrnoverflow() {
+ // We trigger same-size map growth if there are
+ // as many overflow buckets as buckets.
+ // We need to be able to count to 1<<h.B.
+ if h.B < 16 {
+ h.noverflow++
+ return
+ }
+ // Increment with probability 1/(1<<(h.B-15)).
+ // When we reach 1<<15 - 1, we will have approximately
+ // as many overflow buckets as buckets.
+ mask := uint32(1)<<(h.B-15) - 1
+ // Example: if h.B == 18, then mask == 7,
+ // and fastrand & 7 == 0 with probability 1/8.
+ if fastrand()&mask == 0 {
+ h.noverflow++
+ }
+}
+
+func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
+ var ovf *bmap
+ if h.extra != nil && h.extra.nextOverflow != nil {
+ // We have preallocated overflow buckets available.
+ // See makeBucketArray for more details.
+ ovf = h.extra.nextOverflow
+ if ovf.overflow(t) == nil {
+ // We're not at the end of the preallocated overflow buckets. Bump the pointer.
+ h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
+ } else {
+ // This is the last preallocated overflow bucket.
+ // Reset the overflow pointer on this bucket,
+ // which was set to a non-nil sentinel value.
+ ovf.setoverflow(t, nil)
+ h.extra.nextOverflow = nil
+ }
+ } else {
+ ovf = (*bmap)(newobject(t.bucket))
+ }
+ h.incrnoverflow()
+ if t.bucket.ptrdata == 0 {
+ h.createOverflow()
+ *h.extra.overflow = append(*h.extra.overflow, ovf)
+ }
+ b.setoverflow(t, ovf)
+ return ovf
+}
+
+func (h *hmap) createOverflow() {
+ if h.extra == nil {
+ h.extra = new(mapextra)
+ }
+ if h.extra.overflow == nil {
+ h.extra.overflow = new([]*bmap)
+ }
+}
+
+func makemap64(t *maptype, hint int64, h *hmap) *hmap {
+ if int64(int(hint)) != hint {
+ hint = 0
+ }
+ return makemap(t, int(hint), h)
+}
+
+// makemap_small implements Go map creation for make(map[k]v) and
+// make(map[k]v, hint) when hint is known to be at most bucketCnt
+// at compile time and the map needs to be allocated on the heap.
+func makemap_small() *hmap {
+ h := new(hmap)
+ h.hash0 = fastrand()
+ return h
+}
+
+// makemap implements Go map creation for make(map[k]v, hint).
+// If the compiler has determined that the map or the first bucket
+// can be created on the stack, h and/or bucket may be non-nil.
+// If h != nil, the map can be created directly in h.
+// If h.buckets != nil, bucket pointed to can be used as the first bucket.
+func makemap(t *maptype, hint int, h *hmap) *hmap {
+ mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
+ if overflow || mem > maxAlloc {
+ hint = 0
+ }
+
+ // initialize Hmap
+ if h == nil {
+ h = new(hmap)
+ }
+ h.hash0 = fastrand()
+
+ // Find the size parameter B which will hold the requested # of elements.
+ // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
+ B := uint8(0)
+ for overLoadFactor(hint, B) {
+ B++
+ }
+ h.B = B
+
+ // allocate initial hash table
+ // if B == 0, the buckets field is allocated lazily later (in mapassign)
+ // If hint is large zeroing this memory could take a while.
+ if h.B != 0 {
+ var nextOverflow *bmap
+ h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
+ if nextOverflow != nil {
+ h.extra = new(mapextra)
+ h.extra.nextOverflow = nextOverflow
+ }
+ }
+
+ return h
+}
+
+// makeBucketArray initializes a backing array for map buckets.
+// 1<<b is the minimum number of buckets to allocate.
+// dirtyalloc should either be nil or a bucket array previously
+// allocated by makeBucketArray with the same t and b parameters.
+// If dirtyalloc is nil a new backing array will be alloced and
+// otherwise dirtyalloc will be cleared and reused as backing array.
+func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
+ base := bucketShift(b)
+ nbuckets := base
+ // For small b, overflow buckets are unlikely.
+ // Avoid the overhead of the calculation.
+ if b >= 4 {
+ // Add on the estimated number of overflow buckets
+ // required to insert the median number of elements
+ // used with this value of b.
+ nbuckets += bucketShift(b - 4)
+ sz := t.bucket.size * nbuckets
+ up := roundupsize(sz)
+ if up != sz {
+ nbuckets = up / t.bucket.size
+ }
+ }
+
+ if dirtyalloc == nil {
+ buckets = newarray(t.bucket, int(nbuckets))
+ } else {
+ // dirtyalloc was previously generated by
+ // the above newarray(t.bucket, int(nbuckets))
+ // but may not be empty.
+ buckets = dirtyalloc
+ size := t.bucket.size * nbuckets
+ if t.bucket.ptrdata != 0 {
+ memclrHasPointers(buckets, size)
+ } else {
+ memclrNoHeapPointers(buckets, size)
+ }
+ }
+
+ if base != nbuckets {
+ // We preallocated some overflow buckets.
+ // To keep the overhead of tracking these overflow buckets to a minimum,
+ // we use the convention that if a preallocated overflow bucket's overflow
+ // pointer is nil, then there are more available by bumping the pointer.
+ // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
+ nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
+ last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
+ last.setoverflow(t, (*bmap)(buckets))
+ }
+ return buckets, nextOverflow
+}
+
+// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
+// it will return a reference to the zero object for the elem type if
+// the key is not in the map.
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
+func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(mapaccess1)
+ racereadpc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
+ if asanenabled && h != nil {
+ asanread(key, t.key.size)
+ }
+ if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ t.hasher(key, 0) // see issue 23734
+ }
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ hash := t.hasher(key, uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := tophash(hash)
+bucketloop:
+ for ; b != nil; b = b.overflow(t) {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey() {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if t.key.equal(key, k) {
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ e = *((*unsafe.Pointer)(e))
+ }
+ return e
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(mapaccess2)
+ racereadpc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
+ if asanenabled && h != nil {
+ asanread(key, t.key.size)
+ }
+ if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ t.hasher(key, 0) // see issue 23734
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ hash := t.hasher(key, uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := tophash(hash)
+bucketloop:
+ for ; b != nil; b = b.overflow(t) {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey() {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if t.key.equal(key, k) {
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ e = *((*unsafe.Pointer)(e))
+ }
+ return e, true
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+}
+
+// returns both key and elem. Used by map iterator
+func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
+ if h == nil || h.count == 0 {
+ return nil, nil
+ }
+ hash := t.hasher(key, uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := tophash(hash)
+bucketloop:
+ for ; b != nil; b = b.overflow(t) {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey() {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if t.key.equal(key, k) {
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ e = *((*unsafe.Pointer)(e))
+ }
+ return k, e
+ }
+ }
+ }
+ return nil, nil
+}
+
+func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
+ e := mapaccess1(t, h, key)
+ if e == unsafe.Pointer(&zeroVal[0]) {
+ return zero
+ }
+ return e
+}
+
+func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+ e := mapaccess1(t, h, key)
+ if e == unsafe.Pointer(&zeroVal[0]) {
+ return zero, false
+ }
+ return e, true
+}
+
+// Like mapaccess, but allocates a slot for the key if it is not present in the map.
+func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(mapassign)
+ racewritepc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled {
+ msanread(key, t.key.size)
+ }
+ if asanenabled {
+ asanread(key, t.key.size)
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+ hash := t.hasher(key, uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
+ // in which case we have not actually done a write.
+ h.flags ^= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ top := tophash(hash)
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var elem unsafe.Pointer
+bucketloop:
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if isEmpty(b.tophash[i]) && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ }
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey() {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if !t.key.equal(key, k) {
+ continue
+ }
+ // already have a mapping for key. Update it.
+ if t.needkeyupdate() {
+ typedmemmove(t.key, k, key)
+ }
+ elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+ newb := h.newoverflow(t, b)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ elem = add(insertk, bucketCnt*uintptr(t.keysize))
+ }
+
+ // store new key/elem at insert position
+ if t.indirectkey() {
+ kmem := newobject(t.key)
+ *(*unsafe.Pointer)(insertk) = kmem
+ insertk = kmem
+ }
+ if t.indirectelem() {
+ vmem := newobject(t.elem)
+ *(*unsafe.Pointer)(elem) = vmem
+ }
+ typedmemmove(t.key, insertk, key)
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ if t.indirectelem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
+ return elem
+}
+
+func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(mapdelete)
+ racewritepc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
+ if asanenabled && h != nil {
+ asanread(key, t.key.size)
+ }
+ if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ t.hasher(key, 0) // see issue 23734
+ }
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+
+ hash := t.hasher(key, uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
+ // in which case we have not actually done a write (delete).
+ h.flags ^= hashWriting
+
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ bOrig := b
+ top := tophash(hash)
+search:
+ for ; b != nil; b = b.overflow(t) {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == emptyRest {
+ break search
+ }
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ k2 := k
+ if t.indirectkey() {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ if !t.key.equal(key, k2) {
+ continue
+ }
+ // Only clear key if there are pointers in it.
+ if t.indirectkey() {
+ *(*unsafe.Pointer)(k) = nil
+ } else if t.key.ptrdata != 0 {
+ memclrHasPointers(k, t.key.size)
+ }
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ *(*unsafe.Pointer)(e) = nil
+ } else if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ b.tophash[i] = emptyOne
+ // If the bucket now ends in a bunch of emptyOne states,
+ // change those to emptyRest states.
+ // It would be nice to make this a separate function, but
+ // for loops are not currently inlineable.
+ if i == bucketCnt-1 {
+ if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+ goto notLast
+ }
+ } else {
+ if b.tophash[i+1] != emptyRest {
+ goto notLast
+ }
+ }
+ for {
+ b.tophash[i] = emptyRest
+ if i == 0 {
+ if b == bOrig {
+ break // beginning of initial bucket, we're done.
+ }
+ // Find previous bucket, continue at its last entry.
+ c := b
+ for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+ }
+ i = bucketCnt - 1
+ } else {
+ i--
+ }
+ if b.tophash[i] != emptyOne {
+ break
+ }
+ }
+ notLast:
+ h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
+ break search
+ }
+ }
+
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+// mapiterinit initializes the hiter struct used for ranging over maps.
+// The hiter struct pointed to by 'it' is allocated on the stack
+// by the compilers order pass or on the heap by reflect_mapiterinit.
+// Both need to have zeroed hiter since the struct contains pointers.
+func mapiterinit(t *maptype, h *hmap, it *hiter) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
+ }
+
+ it.t = t
+ if h == nil || h.count == 0 {
+ return
+ }
+
+ if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
+ throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
+ }
+ it.h = h
+
+ // grab snapshot of bucket state
+ it.B = h.B
+ it.buckets = h.buckets
+ if t.bucket.ptrdata == 0 {
+ // Allocate the current slice and remember pointers to both current and old.
+ // This preserves all relevant overflow buckets alive even if
+ // the table grows and/or overflow buckets are added to the table
+ // while we are iterating.
+ h.createOverflow()
+ it.overflow = h.extra.overflow
+ it.oldoverflow = h.extra.oldoverflow
+ }
+
+ // decide where to start
+ var r uintptr
+ if h.B > 31-bucketCntBits {
+ r = uintptr(fastrand64())
+ } else {
+ r = uintptr(fastrand())
+ }
+ it.startBucket = r & bucketMask(h.B)
+ it.offset = uint8(r >> h.B & (bucketCnt - 1))
+
+ // iterator state
+ it.bucket = it.startBucket
+
+ // Remember we have an iterator.
+ // Can run concurrently with another mapiterinit().
+ if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
+ atomic.Or8(&h.flags, iterator|oldIterator)
+ }
+
+ mapiternext(it)
+}
+
+func mapiternext(it *hiter) {
+ h := it.h
+ if raceenabled {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map iteration and map write")
+ }
+ t := it.t
+ bucket := it.bucket
+ b := it.bptr
+ i := it.i
+ checkBucket := it.checkBucket
+
+next:
+ if b == nil {
+ if bucket == it.startBucket && it.wrapped {
+ // end of iteration
+ it.key = nil
+ it.elem = nil
+ return
+ }
+ if h.growing() && it.B == h.B {
+ // Iterator was started in the middle of a grow, and the grow isn't done yet.
+ // If the bucket we're looking at hasn't been filled in yet (i.e. the old
+ // bucket hasn't been evacuated) then we need to iterate through the old
+ // bucket and only return the ones that will be migrated to this bucket.
+ oldbucket := bucket & it.h.oldbucketmask()
+ b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ if !evacuated(b) {
+ checkBucket = bucket
+ } else {
+ b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+ checkBucket = noCheck
+ }
+ } else {
+ b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+ checkBucket = noCheck
+ }
+ bucket++
+ if bucket == bucketShift(it.B) {
+ bucket = 0
+ it.wrapped = true
+ }
+ i = 0
+ }
+ for ; i < bucketCnt; i++ {
+ offi := (i + it.offset) & (bucketCnt - 1)
+ if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
+ // TODO: emptyRest is hard to use here, as we start iterating
+ // in the middle of a bucket. It's feasible, just tricky.
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
+ if t.indirectkey() {
+ k = *((*unsafe.Pointer)(k))
+ }
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
+ if checkBucket != noCheck && !h.sameSizeGrow() {
+ // Special case: iterator was started during a grow to a larger size
+ // and the grow is not done yet. We're working on a bucket whose
+ // oldbucket has not been evacuated yet. Or at least, it wasn't
+ // evacuated when we started the bucket. So we're iterating
+ // through the oldbucket, skipping any keys that will go
+ // to the other new bucket (each oldbucket expands to two
+ // buckets during a grow).
+ if t.reflexivekey() || t.key.equal(k, k) {
+ // If the item in the oldbucket is not destined for
+ // the current new bucket in the iteration, skip it.
+ hash := t.hasher(k, uintptr(h.hash0))
+ if hash&bucketMask(it.B) != checkBucket {
+ continue
+ }
+ } else {
+ // Hash isn't repeatable if k != k (NaNs). We need a
+ // repeatable and randomish choice of which direction
+ // to send NaNs during evacuation. We'll use the low
+ // bit of tophash to decide which way NaNs go.
+ // NOTE: this case is why we need two evacuate tophash
+ // values, evacuatedX and evacuatedY, that differ in
+ // their low bit.
+ if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
+ continue
+ }
+ }
+ }
+ if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
+ !(t.reflexivekey() || t.key.equal(k, k)) {
+ // This is the golden data, we can return it.
+ // OR
+ // key!=key, so the entry can't be deleted or updated, so we can just return it.
+ // That's lucky for us because when key!=key we can't look it up successfully.
+ it.key = k
+ if t.indirectelem() {
+ e = *((*unsafe.Pointer)(e))
+ }
+ it.elem = e
+ } else {
+ // The hash table has grown since the iterator was started.
+ // The golden data for this key is now somewhere else.
+ // Check the current hash table for the data.
+ // This code handles the case where the key
+ // has been deleted, updated, or deleted and reinserted.
+ // NOTE: we need to regrab the key as it has potentially been
+ // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
+ rk, re := mapaccessK(t, h, k)
+ if rk == nil {
+ continue // key has been deleted
+ }
+ it.key = rk
+ it.elem = re
+ }
+ it.bucket = bucket
+ if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
+ it.bptr = b
+ }
+ it.i = i + 1
+ it.checkBucket = checkBucket
+ return
+ }
+ b = b.overflow(t)
+ i = 0
+ goto next
+}
+
+// mapclear deletes all keys from a map.
+func mapclear(t *maptype, h *hmap) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(mapclear)
+ racewritepc(unsafe.Pointer(h), callerpc, pc)
+ }
+
+ if h == nil || h.count == 0 {
+ return
+ }
+
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+
+ h.flags ^= hashWriting
+
+ h.flags &^= sameSizeGrow
+ h.oldbuckets = nil
+ h.nevacuate = 0
+ h.noverflow = 0
+ h.count = 0
+
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+
+ // Keep the mapextra allocation but clear any extra information.
+ if h.extra != nil {
+ *h.extra = mapextra{}
+ }
+
+ // makeBucketArray clears the memory pointed to by h.buckets
+ // and recovers any overflow buckets by generating them
+ // as if h.buckets was newly alloced.
+ _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
+ if nextOverflow != nil {
+ // If overflow buckets are created then h.extra
+ // will have been allocated during initial bucket creation.
+ h.extra.nextOverflow = nextOverflow
+ }
+
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func hashGrow(t *maptype, h *hmap) {
+ // If we've hit the load factor, get bigger.
+ // Otherwise, there are too many overflow buckets,
+ // so keep the same number of buckets and "grow" laterally.
+ bigger := uint8(1)
+ if !overLoadFactor(h.count+1, h.B) {
+ bigger = 0
+ h.flags |= sameSizeGrow
+ }
+ oldbuckets := h.buckets
+ newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
+
+ flags := h.flags &^ (iterator | oldIterator)
+ if h.flags&iterator != 0 {
+ flags |= oldIterator
+ }
+ // commit the grow (atomic wrt gc)
+ h.B += bigger
+ h.flags = flags
+ h.oldbuckets = oldbuckets
+ h.buckets = newbuckets
+ h.nevacuate = 0
+ h.noverflow = 0
+
+ if h.extra != nil && h.extra.overflow != nil {
+ // Promote current overflow buckets to the old generation.
+ if h.extra.oldoverflow != nil {
+ throw("oldoverflow is not nil")
+ }
+ h.extra.oldoverflow = h.extra.overflow
+ h.extra.overflow = nil
+ }
+ if nextOverflow != nil {
+ if h.extra == nil {
+ h.extra = new(mapextra)
+ }
+ h.extra.nextOverflow = nextOverflow
+ }
+
+ // the actual copying of the hash table data is done incrementally
+ // by growWork() and evacuate().
+}
+
+// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
+func overLoadFactor(count int, B uint8) bool {
+ return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
+}
+
+// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
+// Note that most of these overflow buckets must be in sparse use;
+// if use was dense, then we'd have already triggered regular map growth.
+func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
+ // If the threshold is too low, we do extraneous work.
+ // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
+ // "too many" means (approximately) as many overflow buckets as regular buckets.
+ // See incrnoverflow for more details.
+ if B > 15 {
+ B = 15
+ }
+ // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
+ return noverflow >= uint16(1)<<(B&15)
+}
+
+// growing reports whether h is growing. The growth may be to the same size or bigger.
+func (h *hmap) growing() bool {
+ return h.oldbuckets != nil
+}
+
+// sameSizeGrow reports whether the current growth is to a map of the same size.
+func (h *hmap) sameSizeGrow() bool {
+ return h.flags&sameSizeGrow != 0
+}
+
+// noldbuckets calculates the number of buckets prior to the current map growth.
+func (h *hmap) noldbuckets() uintptr {
+ oldB := h.B
+ if !h.sameSizeGrow() {
+ oldB--
+ }
+ return bucketShift(oldB)
+}
+
+// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
+func (h *hmap) oldbucketmask() uintptr {
+ return h.noldbuckets() - 1
+}
+
+func growWork(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate(t, h, h.nevacuate)
+ }
+}
+
+func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
+ b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
+ return evacuated(b)
+}
+
+// evacDst is an evacuation destination.
+type evacDst struct {
+ b *bmap // current destination bucket
+ i int // key/elem index into b
+ k unsafe.Pointer // pointer to current key storage
+ e unsafe.Pointer // pointer to current elem storage
+}
+
+func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.e = add(x.k, bucketCnt*uintptr(t.keysize))
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.e = add(y.k, bucketCnt*uintptr(t.keysize))
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ e := add(k, bucketCnt*uintptr(t.keysize))
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
+ top := b.tophash[i]
+ if isEmpty(top) {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ k2 := k
+ if t.indirectkey() {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/elem to bucket x or bucket y).
+ hash := t.hasher(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
+ // If key != key (NaNs), then the hash could be (and probably
+ // will be) entirely different from the old hash. Moreover,
+ // it isn't reproducible. Reproducibility is required in the
+ // presence of iterators, as our evacuation decision must
+ // match whatever decision the iterator made.
+ // Fortunately, we have the freedom to send these keys either
+ // way. Also, tophash is meaningless for these kinds of keys.
+ // We let the low bit of tophash drive the evacuation decision.
+ // We recompute a new random tophash for the next level so
+ // these keys will get evenly distributed across all buckets
+ // after multiple grows.
+ useY = top & 1
+ top = tophash(hash)
+ } else {
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+ }
+
+ if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
+ throw("bad evacuatedN")
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ if t.indirectkey() {
+ *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
+ } else {
+ typedmemmove(t.key, dst.k, k) // copy elem
+ }
+ if t.indirectelem() {
+ *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
+ } else {
+ typedmemmove(t.elem, dst.e, e)
+ }
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or elem arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, uintptr(t.keysize))
+ dst.e = add(dst.e, uintptr(t.elemsize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/elem to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}
+
+func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
+ h.nevacuate++
+ // Experiments suggest that 1024 is overkill by at least an order of magnitude.
+ // Put it in there as a safeguard anyway, to ensure O(1) behavior.
+ stop := h.nevacuate + 1024
+ if stop > newbit {
+ stop = newbit
+ }
+ for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
+ h.nevacuate++
+ }
+ if h.nevacuate == newbit { // newbit == # of oldbuckets
+ // Growing is all done. Free old main bucket array.
+ h.oldbuckets = nil
+ // Can discard old overflow buckets as well.
+ // If they are still referenced by an iterator,
+ // then the iterator holds a pointers to the slice.
+ if h.extra != nil {
+ h.extra.oldoverflow = nil
+ }
+ h.flags &^= sameSizeGrow
+ }
+}
+
+// Reflect stubs. Called from ../reflect/asm_*.s
+
+//go:linkname reflect_makemap reflect.makemap
+func reflect_makemap(t *maptype, cap int) *hmap {
+ // Check invariants and reflects math.
+ if t.key.equal == nil {
+ throw("runtime.reflect_makemap: unsupported map key type")
+ }
+ if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
+ t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
+ throw("key size wrong")
+ }
+ if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
+ t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
+ throw("elem size wrong")
+ }
+ if t.key.align > bucketCnt {
+ throw("key align too big")
+ }
+ if t.elem.align > bucketCnt {
+ throw("elem align too big")
+ }
+ if t.key.size%uintptr(t.key.align) != 0 {
+ throw("key size not a multiple of key align")
+ }
+ if t.elem.size%uintptr(t.elem.align) != 0 {
+ throw("elem size not a multiple of elem align")
+ }
+ if bucketCnt < 8 {
+ throw("bucketsize too small for proper alignment")
+ }
+ if dataOffset%uintptr(t.key.align) != 0 {
+ throw("need padding in bucket (key)")
+ }
+ if dataOffset%uintptr(t.elem.align) != 0 {
+ throw("need padding in bucket (elem)")
+ }
+
+ return makemap(t, cap, nil)
+}
+
+//go:linkname reflect_mapaccess reflect.mapaccess
+func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ elem, ok := mapaccess2(t, h, key)
+ if !ok {
+ // reflect wants nil for a missing element
+ elem = nil
+ }
+ return elem
+}
+
+//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
+func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
+ elem, ok := mapaccess2_faststr(t, h, key)
+ if !ok {
+ // reflect wants nil for a missing element
+ elem = nil
+ }
+ return elem
+}
+
+//go:linkname reflect_mapassign reflect.mapassign
+func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
+ p := mapassign(t, h, key)
+ typedmemmove(t.elem, p, elem)
+}
+
+//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr
+func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
+ p := mapassign_faststr(t, h, key)
+ typedmemmove(t.elem, p, elem)
+}
+
+//go:linkname reflect_mapdelete reflect.mapdelete
+func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+ mapdelete(t, h, key)
+}
+
+//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
+func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
+ mapdelete_faststr(t, h, key)
+}
+
+//go:linkname reflect_mapiterinit reflect.mapiterinit
+func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
+ mapiterinit(t, h, it)
+}
+
+//go:linkname reflect_mapiternext reflect.mapiternext
+func reflect_mapiternext(it *hiter) {
+ mapiternext(it)
+}
+
+//go:linkname reflect_mapiterkey reflect.mapiterkey
+func reflect_mapiterkey(it *hiter) unsafe.Pointer {
+ return it.key
+}
+
+//go:linkname reflect_mapiterelem reflect.mapiterelem
+func reflect_mapiterelem(it *hiter) unsafe.Pointer {
+ return it.elem
+}
+
+//go:linkname reflect_maplen reflect.maplen
+func reflect_maplen(h *hmap) int {
+ if h == nil {
+ return 0
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+ }
+ return h.count
+}
+
+//go:linkname reflectlite_maplen internal/reflectlite.maplen
+func reflectlite_maplen(h *hmap) int {
+ if h == nil {
+ return 0
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+ }
+ return h.count
+}
+
+const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
+var zeroVal [maxZero]byte
diff --git a/contrib/go/_std_1.19/src/runtime/map_fast32.go b/contrib/go/_std_1.19/src/runtime/map_fast32.go
new file mode 100644
index 0000000000..01ea330950
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/map_fast32.go
@@ -0,0 +1,462 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for ; b != nil; b = b.overflow(t) {
+ for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for ; b != nil; b = b.overflow(t) {
+ for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+}
+
+func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
+ h.flags ^= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast32(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+bucketloop:
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if isEmpty(b.tophash[i]) {
+ if insertb == nil {
+ inserti = i
+ insertb = b
+ }
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ inserti = i
+ insertb = b
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if insertb == nil {
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+ // store new key at insert position
+ *(*uint32)(insertk) = key
+
+ h.count++
+
+done:
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return elem
+}
+
+func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
+ h.flags ^= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast32(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+bucketloop:
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if isEmpty(b.tophash[i]) {
+ if insertb == nil {
+ inserti = i
+ insertb = b
+ }
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ inserti = i
+ insertb = b
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if insertb == nil {
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+ // store new key at insert position
+ *(*unsafe.Pointer)(insertk) = key
+
+ h.count++
+
+done:
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return elem
+}
+
+func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
+ h.flags ^= hashWriting
+
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast32(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ bOrig := b
+search:
+ for ; b != nil; b = b.overflow(t) {
+ for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
+ continue
+ }
+ // Only clear key if there are pointers in it.
+ // This can only happen if pointers are 32 bit
+ // wide as 64 bit pointers do not fit into a 32 bit key.
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
+ // The key must be a pointer as we checked pointers are
+ // 32 bits wide and the key is 32 bits wide also.
+ *(*unsafe.Pointer)(k) = nil
+ }
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ b.tophash[i] = emptyOne
+ // If the bucket now ends in a bunch of emptyOne states,
+ // change those to emptyRest states.
+ if i == bucketCnt-1 {
+ if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+ goto notLast
+ }
+ } else {
+ if b.tophash[i+1] != emptyRest {
+ goto notLast
+ }
+ }
+ for {
+ b.tophash[i] = emptyRest
+ if i == 0 {
+ if b == bOrig {
+ break // beginning of initial bucket, we're done.
+ }
+ // Find previous bucket, continue at its last entry.
+ c := b
+ for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+ }
+ i = bucketCnt - 1
+ } else {
+ i--
+ }
+ if b.tophash[i] != emptyOne {
+ break
+ }
+ }
+ notLast:
+ h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
+ break search
+ }
+ }
+
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate_fast32(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate_fast32(t, h, h.nevacuate)
+ }
+}
+
+func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.e = add(x.k, bucketCnt*4)
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.e = add(y.k, bucketCnt*4)
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ e := add(k, bucketCnt*4)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
+ top := b.tophash[i]
+ if isEmpty(top) {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/elem to bucket x or bucket y).
+ hash := t.hasher(k, uintptr(h.hash0))
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.e = add(dst.k, bucketCnt*4)
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+
+ // Copy key.
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
+ // Write with a write barrier.
+ *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
+ } else {
+ *(*uint32)(dst.k) = *(*uint32)(k)
+ }
+
+ typedmemmove(t.elem, dst.e, e)
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or elem arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, 4)
+ dst.e = add(dst.e, uintptr(t.elemsize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/elem to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/map_fast64.go b/contrib/go/_std_1.19/src/runtime/map_fast64.go
new file mode 100644
index 0000000000..2967360b76
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/map_fast64.go
@@ -0,0 +1,470 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for ; b != nil; b = b.overflow(t) {
+ for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for ; b != nil; b = b.overflow(t) {
+ for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+}
+
+func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
+ h.flags ^= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast64(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+bucketloop:
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if isEmpty(b.tophash[i]) {
+ if insertb == nil {
+ insertb = b
+ inserti = i
+ }
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ insertb = b
+ inserti = i
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if insertb == nil {
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+ // store new key at insert position
+ *(*uint64)(insertk) = key
+
+ h.count++
+
+done:
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return elem
+}
+
+func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
+ h.flags ^= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast64(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+bucketloop:
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if isEmpty(b.tophash[i]) {
+ if insertb == nil {
+ insertb = b
+ inserti = i
+ }
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ insertb = b
+ inserti = i
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if insertb == nil {
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+ // store new key at insert position
+ *(*unsafe.Pointer)(insertk) = key
+
+ h.count++
+
+done:
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return elem
+}
+
+func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
+ h.flags ^= hashWriting
+
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast64(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ bOrig := b
+search:
+ for ; b != nil; b = b.overflow(t) {
+ for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
+ continue
+ }
+ // Only clear key if there are pointers in it.
+ if t.key.ptrdata != 0 {
+ if goarch.PtrSize == 8 {
+ *(*unsafe.Pointer)(k) = nil
+ } else {
+ // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
+ // Just call memclrHasPointers instead of trying to handle all cases here.
+ memclrHasPointers(k, 8)
+ }
+ }
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ b.tophash[i] = emptyOne
+ // If the bucket now ends in a bunch of emptyOne states,
+ // change those to emptyRest states.
+ if i == bucketCnt-1 {
+ if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+ goto notLast
+ }
+ } else {
+ if b.tophash[i+1] != emptyRest {
+ goto notLast
+ }
+ }
+ for {
+ b.tophash[i] = emptyRest
+ if i == 0 {
+ if b == bOrig {
+ break // beginning of initial bucket, we're done.
+ }
+ // Find previous bucket, continue at its last entry.
+ c := b
+ for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+ }
+ i = bucketCnt - 1
+ } else {
+ i--
+ }
+ if b.tophash[i] != emptyOne {
+ break
+ }
+ }
+ notLast:
+ h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
+ break search
+ }
+ }
+
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate_fast64(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate_fast64(t, h, h.nevacuate)
+ }
+}
+
+func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.e = add(x.k, bucketCnt*8)
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.e = add(y.k, bucketCnt*8)
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ e := add(k, bucketCnt*8)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
+ top := b.tophash[i]
+ if isEmpty(top) {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/elem to bucket x or bucket y).
+ hash := t.hasher(k, uintptr(h.hash0))
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.e = add(dst.k, bucketCnt*8)
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+
+ // Copy key.
+ if t.key.ptrdata != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 8 {
+ // Write with a write barrier.
+ *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
+ } else {
+ // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
+ // Give up and call typedmemmove.
+ typedmemmove(t.key, dst.k, k)
+ }
+ } else {
+ *(*uint64)(dst.k) = *(*uint64)(k)
+ }
+
+ typedmemmove(t.elem, dst.e, e)
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or elem arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, 8)
+ dst.e = add(dst.e, uintptr(t.elemsize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/elem to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/map_faststr.go b/contrib/go/_std_1.19/src/runtime/map_faststr.go
new file mode 100644
index 0000000000..006c24cee2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/map_faststr.go
@@ -0,0 +1,485 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ key := stringStructOf(&ky)
+ if h.B == 0 {
+ // One-bucket table.
+ b := (*bmap)(h.buckets)
+ if key.len < 32 {
+ // short key, doing lots of comparisons is ok
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || isEmpty(b.tophash[i]) {
+ if b.tophash[i] == emptyRest {
+ break
+ }
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ // long key, try not to do more comparisons than necessary
+ keymaybe := uintptr(bucketCnt)
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || isEmpty(b.tophash[i]) {
+ if b.tophash[i] == emptyRest {
+ break
+ }
+ continue
+ }
+ if k.str == key.str {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ }
+ // check first 4 bytes
+ if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+ continue
+ }
+ // check last 4 bytes
+ if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+ continue
+ }
+ if keymaybe != bucketCnt {
+ // Two keys are potential matches. Use hash to distinguish them.
+ goto dohash
+ }
+ keymaybe = i
+ }
+ if keymaybe != bucketCnt {
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
+ if memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+ }
+dohash:
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := tophash(hash)
+ for ; b != nil; b = b.overflow(t) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || b.tophash[i] != top {
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map read and map write")
+ }
+ key := stringStructOf(&ky)
+ if h.B == 0 {
+ // One-bucket table.
+ b := (*bmap)(h.buckets)
+ if key.len < 32 {
+ // short key, doing lots of comparisons is ok
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || isEmpty(b.tophash[i]) {
+ if b.tophash[i] == emptyRest {
+ break
+ }
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ // long key, try not to do more comparisons than necessary
+ keymaybe := uintptr(bucketCnt)
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || isEmpty(b.tophash[i]) {
+ if b.tophash[i] == emptyRest {
+ break
+ }
+ continue
+ }
+ if k.str == key.str {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
+ }
+ // check first 4 bytes
+ if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+ continue
+ }
+ // check last 4 bytes
+ if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+ continue
+ }
+ if keymaybe != bucketCnt {
+ // Two keys are potential matches. Use hash to distinguish them.
+ goto dohash
+ }
+ keymaybe = i
+ }
+ if keymaybe != bucketCnt {
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
+ if memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+dohash:
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ m := bucketMask(h.B)
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ if !h.sameSizeGrow() {
+ // There used to be half as many buckets; mask down one more power of two.
+ m >>= 1
+ }
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := tophash(hash)
+ for ; b != nil; b = b.overflow(t) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || b.tophash[i] != top {
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
+ }
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+}
+
+func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+ key := stringStructOf(&s)
+ hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
+ h.flags ^= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_faststr(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ top := tophash(hash)
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+bucketloop:
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if isEmpty(b.tophash[i]) && insertb == nil {
+ insertb = b
+ inserti = i
+ }
+ if b.tophash[i] == emptyRest {
+ break bucketloop
+ }
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
+ continue
+ }
+ // already have a mapping for key. Update it.
+ inserti = i
+ insertb = b
+ // Overwrite existing key, so it can be garbage collected.
+ // The size is already guaranteed to be set correctly.
+ k.str = key.str
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if insertb == nil {
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
+ // store new key at insert position
+ *((*stringStruct)(insertk)) = *key
+ h.count++
+
+done:
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return elem
+}
+
+func mapdelete_faststr(t *maptype, h *hmap, ky string) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ fatal("concurrent map writes")
+ }
+
+ key := stringStructOf(&ky)
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
+ h.flags ^= hashWriting
+
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_faststr(t, h, bucket)
+ }
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ bOrig := b
+ top := tophash(hash)
+search:
+ for ; b != nil; b = b.overflow(t) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ k := (*stringStruct)(kptr)
+ if k.len != key.len || b.tophash[i] != top {
+ continue
+ }
+ if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
+ continue
+ }
+ // Clear key's pointer.
+ k.str = nil
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ b.tophash[i] = emptyOne
+ // If the bucket now ends in a bunch of emptyOne states,
+ // change those to emptyRest states.
+ if i == bucketCnt-1 {
+ if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+ goto notLast
+ }
+ } else {
+ if b.tophash[i+1] != emptyRest {
+ goto notLast
+ }
+ }
+ for {
+ b.tophash[i] = emptyRest
+ if i == 0 {
+ if b == bOrig {
+ break // beginning of initial bucket, we're done.
+ }
+ // Find previous bucket, continue at its last entry.
+ c := b
+ for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+ }
+ i = bucketCnt - 1
+ } else {
+ i--
+ }
+ if b.tophash[i] != emptyOne {
+ break
+ }
+ }
+ notLast:
+ h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
+ break search
+ }
+ }
+
+ if h.flags&hashWriting == 0 {
+ fatal("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate_faststr(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate_faststr(t, h, h.nevacuate)
+ }
+}
+
+func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ e := add(k, bucketCnt*2*goarch.PtrSize)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
+ top := b.tophash[i]
+ if isEmpty(top) {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/elem to bucket x or bucket y).
+ hash := t.hasher(k, uintptr(h.hash0))
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+
+ // Copy key.
+ *(*string)(dst.k) = *(*string)(k)
+
+ typedmemmove(t.elem, dst.e, e)
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or elem arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, 2*goarch.PtrSize)
+ dst.e = add(dst.e, uintptr(t.elemsize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/elem to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mbarrier.go b/contrib/go/_std_1.19/src/runtime/mbarrier.go
new file mode 100644
index 0000000000..c3b45415a9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mbarrier.go
@@ -0,0 +1,344 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: write barriers.
+//
+// For the concurrent garbage collector, the Go compiler implements
+// updates to pointer-valued fields that may be in heap objects by
+// emitting calls to write barriers. The main write barrier for
+// individual pointer writes is gcWriteBarrier and is implemented in
+// assembly. This file contains write barrier entry points for bulk
+// operations. See also mwbbuf.go.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "unsafe"
+)
+
+// Go uses a hybrid barrier that combines a Yuasa-style deletion
+// barrier—which shades the object whose reference is being
+// overwritten—with Dijkstra insertion barrier—which shades the object
+// whose reference is being written. The insertion part of the barrier
+// is necessary while the calling goroutine's stack is grey. In
+// pseudocode, the barrier is:
+//
+// writePointer(slot, ptr):
+// shade(*slot)
+// if current stack is grey:
+// shade(ptr)
+// *slot = ptr
+//
+// slot is the destination in Go code.
+// ptr is the value that goes into the slot in Go code.
+//
+// Shade indicates that it has seen a white pointer by adding the referent
+// to wbuf as well as marking it.
+//
+// The two shades and the condition work together to prevent a mutator
+// from hiding an object from the garbage collector:
+//
+// 1. shade(*slot) prevents a mutator from hiding an object by moving
+// the sole pointer to it from the heap to its stack. If it attempts
+// to unlink an object from the heap, this will shade it.
+//
+// 2. shade(ptr) prevents a mutator from hiding an object by moving
+// the sole pointer to it from its stack into a black object in the
+// heap. If it attempts to install the pointer into a black object,
+// this will shade it.
+//
+// 3. Once a goroutine's stack is black, the shade(ptr) becomes
+// unnecessary. shade(ptr) prevents hiding an object by moving it from
+// the stack to the heap, but this requires first having a pointer
+// hidden on the stack. Immediately after a stack is scanned, it only
+// points to shaded objects, so it's not hiding anything, and the
+// shade(*slot) prevents it from hiding any other pointers on its
+// stack.
+//
+// For a detailed description of this barrier and proof of
+// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
+//
+//
+//
+// Dealing with memory ordering:
+//
+// Both the Yuasa and Dijkstra barriers can be made conditional on the
+// color of the object containing the slot. We chose not to make these
+// conditional because the cost of ensuring that the object holding
+// the slot doesn't concurrently change color without the mutator
+// noticing seems prohibitive.
+//
+// Consider the following example where the mutator writes into
+// a slot and then loads the slot's mark bit while the GC thread
+// writes to the slot's mark bit and then as part of scanning reads
+// the slot.
+//
+// Initially both [slot] and [slotmark] are 0 (nil)
+// Mutator thread GC thread
+// st [slot], ptr st [slotmark], 1
+//
+// ld r1, [slotmark] ld r2, [slot]
+//
+// Without an expensive memory barrier between the st and the ld, the final
+// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
+// example of what can happen when loads are allowed to be reordered with older
+// stores (avoiding such reorderings lies at the heart of the classic
+// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
+// barriers, which will slow down both the mutator and the GC, we always grey
+// the ptr object regardless of the slot's color.
+//
+// Another place where we intentionally omit memory barriers is when
+// accessing mheap_.arena_used to check if a pointer points into the
+// heap. On relaxed memory machines, it's possible for a mutator to
+// extend the size of the heap by updating arena_used, allocate an
+// object from this new region, and publish a pointer to that object,
+// but for tracing running on another processor to observe the pointer
+// but use the old value of arena_used. In this case, tracing will not
+// mark the object, even though it's reachable. However, the mutator
+// is guaranteed to execute a write barrier when it publishes the
+// pointer, so it will take care of marking the object. A general
+// consequence of this is that the garbage collector may cache the
+// value of mheap_.arena_used. (See issue #9984.)
+//
+//
+// Stack writes:
+//
+// The compiler omits write barriers for writes to the current frame,
+// but if a stack pointer has been passed down the call stack, the
+// compiler will generate a write barrier for writes through that
+// pointer (because it doesn't know it's not a heap pointer).
+//
+// One might be tempted to ignore the write barrier if slot points
+// into to the stack. Don't do it! Mark termination only re-scans
+// frames that have potentially been active since the concurrent scan,
+// so it depends on write barriers to track changes to pointers in
+// stack frames that have not been active.
+//
+//
+// Global writes:
+//
+// The Go garbage collector requires write barriers when heap pointers
+// are stored in globals. Many garbage collectors ignore writes to
+// globals and instead pick up global -> heap pointers during
+// termination. This increases pause time, so we instead rely on write
+// barriers for writes to globals so that we don't have to rescan
+// global during mark termination.
+//
+//
+// Publication ordering:
+//
+// The write barrier is *pre-publication*, meaning that the write
+// barrier happens prior to the *slot = ptr write that may make ptr
+// reachable by some goroutine that currently cannot reach it.
+//
+//
+// Signal handler pointer writes:
+//
+// In general, the signal handler cannot safely invoke the write
+// barrier because it may run without a P or even during the write
+// barrier.
+//
+// There is exactly one exception: profbuf.go omits a barrier during
+// signal handler profile logging. That's safe only because of the
+// deletion barrier. See profbuf.go for a detailed argument. If we
+// remove the deletion barrier, we'll have to work out a new way to
+// handle the profile logging.
+
+// typedmemmove copies a value of type typ to dst from src.
+// Must be nosplit, see #16026.
+//
+// TODO: Perfect for go:nosplitrec since we can't have a safe point
+// anywhere in the bulk barrier or memmove.
+//
+//go:nosplit
+func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
+ if dst == src {
+ return
+ }
+ if writeBarrier.needed && typ.ptrdata != 0 {
+ bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata)
+ }
+ // There's a race here: if some other goroutine can write to
+ // src, it may change some pointer in src after we've
+ // performed the write barrier but before we perform the
+ // memory copy. This safe because the write performed by that
+ // other goroutine must also be accompanied by a write
+ // barrier, so at worst we've unnecessarily greyed the old
+ // pointer that was in src.
+ memmove(dst, src, typ.size)
+ if writeBarrier.cgo {
+ cgoCheckMemmove(typ, dst, src, 0, typ.size)
+ }
+}
+
+//go:linkname reflect_typedmemmove reflect.typedmemmove
+func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
+ if raceenabled {
+ raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ }
+ if msanenabled {
+ msanwrite(dst, typ.size)
+ msanread(src, typ.size)
+ }
+ if asanenabled {
+ asanwrite(dst, typ.size)
+ asanread(src, typ.size)
+ }
+ typedmemmove(typ, dst, src)
+}
+
+//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
+func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
+ reflect_typedmemmove(typ, dst, src)
+}
+
+// typedmemmovepartial is like typedmemmove but assumes that
+// dst and src point off bytes into the value and only copies size bytes.
+// off must be a multiple of goarch.PtrSize.
+//
+//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
+func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
+ if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {
+ if off&(goarch.PtrSize-1) != 0 {
+ panic("reflect: internal error: misaligned offset")
+ }
+ pwsize := alignDown(size, goarch.PtrSize)
+ if poff := typ.ptrdata - off; pwsize > poff {
+ pwsize = poff
+ }
+ bulkBarrierPreWrite(uintptr(dst), uintptr(src), pwsize)
+ }
+
+ memmove(dst, src, size)
+ if writeBarrier.cgo {
+ cgoCheckMemmove(typ, dst, src, off, size)
+ }
+}
+
+// reflectcallmove is invoked by reflectcall to copy the return values
+// out of the stack and into the heap, invoking the necessary write
+// barriers. dst, src, and size describe the return value area to
+// copy. typ describes the entire frame (not just the return values).
+// typ may be nil, which indicates write barriers are not needed.
+//
+// It must be nosplit and must only call nosplit functions because the
+// stack map of reflectcall is wrong.
+//
+//go:nosplit
+func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
+ if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= goarch.PtrSize {
+ bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
+ }
+ memmove(dst, src, size)
+
+ // Move pointers returned in registers to a place where the GC can see them.
+ for i := range regs.Ints {
+ if regs.ReturnIsPtr.Get(i) {
+ regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
+ }
+ }
+}
+
+//go:nosplit
+func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
+ n := dstLen
+ if n > srcLen {
+ n = srcLen
+ }
+ if n == 0 {
+ return 0
+ }
+
+ // The compiler emits calls to typedslicecopy before
+ // instrumentation runs, so unlike the other copying and
+ // assignment operations, it's not instrumented in the calling
+ // code and needs its own instrumentation.
+ if raceenabled {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(slicecopy)
+ racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
+ racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
+ }
+ if msanenabled {
+ msanwrite(dstPtr, uintptr(n)*typ.size)
+ msanread(srcPtr, uintptr(n)*typ.size)
+ }
+ if asanenabled {
+ asanwrite(dstPtr, uintptr(n)*typ.size)
+ asanread(srcPtr, uintptr(n)*typ.size)
+ }
+
+ if writeBarrier.cgo {
+ cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
+ }
+
+ if dstPtr == srcPtr {
+ return n
+ }
+
+ // Note: No point in checking typ.ptrdata here:
+ // compiler only emits calls to typedslicecopy for types with pointers,
+ // and growslice and reflect_typedslicecopy check for pointers
+ // before calling typedslicecopy.
+ size := uintptr(n) * typ.size
+ if writeBarrier.needed {
+ pwsize := size - typ.size + typ.ptrdata
+ bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
+ }
+ // See typedmemmove for a discussion of the race between the
+ // barrier and memmove.
+ memmove(dstPtr, srcPtr, size)
+ return n
+}
+
+//go:linkname reflect_typedslicecopy reflect.typedslicecopy
+func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
+ if elemType.ptrdata == 0 {
+ return slicecopy(dst.array, dst.len, src.array, src.len, elemType.size)
+ }
+ return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
+}
+
+// typedmemclr clears the typed memory at ptr with type typ. The
+// memory at ptr must already be initialized (and hence in type-safe
+// state). If the memory is being initialized for the first time, see
+// memclrNoHeapPointers.
+//
+// If the caller knows that typ has pointers, it can alternatively
+// call memclrHasPointers.
+//
+//go:nosplit
+func typedmemclr(typ *_type, ptr unsafe.Pointer) {
+ if writeBarrier.needed && typ.ptrdata != 0 {
+ bulkBarrierPreWrite(uintptr(ptr), 0, typ.ptrdata)
+ }
+ memclrNoHeapPointers(ptr, typ.size)
+}
+
+//go:linkname reflect_typedmemclr reflect.typedmemclr
+func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
+ typedmemclr(typ, ptr)
+}
+
+//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
+func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
+ if writeBarrier.needed && typ.ptrdata != 0 {
+ bulkBarrierPreWrite(uintptr(ptr), 0, size)
+ }
+ memclrNoHeapPointers(ptr, size)
+}
+
+// memclrHasPointers clears n bytes of typed memory starting at ptr.
+// The caller must ensure that the type of the object at ptr has
+// pointers, usually by checking typ.ptrdata. However, ptr
+// does not have to point to the start of the allocation.
+//
+//go:nosplit
+func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
+ bulkBarrierPreWrite(uintptr(ptr), 0, n)
+ memclrNoHeapPointers(ptr, n)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mbitmap.go b/contrib/go/_std_1.19/src/runtime/mbitmap.go
new file mode 100644
index 0000000000..a3a6590d65
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mbitmap.go
@@ -0,0 +1,2056 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: type and heap bitmaps.
+//
+// Stack, data, and bss bitmaps
+//
+// Stack frames and global variables in the data and bss sections are
+// described by bitmaps with 1 bit per pointer-sized word. A "1" bit
+// means the word is a live pointer to be visited by the GC (referred to
+// as "pointer"). A "0" bit means the word should be ignored by GC
+// (referred to as "scalar", though it could be a dead pointer value).
+//
+// Heap bitmap
+//
+// The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
+// stored in the heapArena metadata backing each heap arena.
+// That is, if ha is the heapArena for the arena starting a start,
+// then ha.bitmap[0] holds the 2-bit entries for the four words start
+// through start+3*ptrSize, ha.bitmap[1] holds the entries for
+// start+4*ptrSize through start+7*ptrSize, and so on.
+//
+// In each 2-bit entry, the lower bit is a pointer/scalar bit, just
+// like in the stack/data bitmaps described above. The upper bit
+// indicates scan/dead: a "1" value ("scan") indicates that there may
+// be pointers in later words of the allocation, and a "0" value
+// ("dead") indicates there are no more pointers in the allocation. If
+// the upper bit is 0, the lower bit must also be 0, and this
+// indicates scanning can ignore the rest of the allocation.
+//
+// The 2-bit entries are split when written into the byte, so that the top half
+// of the byte contains 4 high (scan) bits and the bottom half contains 4 low
+// (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to
+// keep the pointer bits contiguous, instead of having to space them out.
+//
+// The code makes use of the fact that the zero value for a heap
+// bitmap means scalar/dead. This property must be preserved when
+// modifying the encoding.
+//
+// The bitmap for noscan spans is not maintained. Code must ensure
+// that an object is scannable before consulting its bitmap by
+// checking either the noscan bit in the span or by consulting its
+// type's information.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const (
+ bitPointer = 1 << 0
+ bitScan = 1 << 4
+
+ heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
+ wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
+
+ // all scan/pointer bits in a byte
+ bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
+ bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
+)
+
+// addb returns the byte pointer p+n.
+//
+//go:nowritebarrier
+//go:nosplit
+func addb(p *byte, n uintptr) *byte {
+ // Note: wrote out full expression instead of calling add(p, n)
+ // to reduce the number of temporaries generated by the
+ // compiler for this trivial expression during inlining.
+ return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
+}
+
+// subtractb returns the byte pointer p-n.
+//
+//go:nowritebarrier
+//go:nosplit
+func subtractb(p *byte, n uintptr) *byte {
+ // Note: wrote out full expression instead of calling add(p, -n)
+ // to reduce the number of temporaries generated by the
+ // compiler for this trivial expression during inlining.
+ return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
+}
+
+// add1 returns the byte pointer p+1.
+//
+//go:nowritebarrier
+//go:nosplit
+func add1(p *byte) *byte {
+ // Note: wrote out full expression instead of calling addb(p, 1)
+ // to reduce the number of temporaries generated by the
+ // compiler for this trivial expression during inlining.
+ return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
+}
+
+// subtract1 returns the byte pointer p-1.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nowritebarrier
+//go:nosplit
+func subtract1(p *byte) *byte {
+ // Note: wrote out full expression instead of calling subtractb(p, 1)
+ // to reduce the number of temporaries generated by the
+ // compiler for this trivial expression during inlining.
+ return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
+}
+
+// heapBits provides access to the bitmap bits for a single heap word.
+// The methods on heapBits take value receivers so that the compiler
+// can more easily inline calls to those methods and registerize the
+// struct fields independently.
+type heapBits struct {
+ bitp *uint8
+ shift uint32
+ arena uint32 // Index of heap arena containing bitp
+ last *uint8 // Last byte arena's bitmap
+}
+
+// Make the compiler check that heapBits.arena is large enough to hold
+// the maximum arena frame number.
+var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
+
+// markBits provides access to the mark bit for an object in the heap.
+// bytep points to the byte holding the mark bit.
+// mask is a byte with a single bit set that can be &ed with *bytep
+// to see if the bit has been set.
+// *m.byte&m.mask != 0 indicates the mark bit is set.
+// index can be used along with span information to generate
+// the address of the object in the heap.
+// We maintain one set of mark bits for allocation and one for
+// marking purposes.
+type markBits struct {
+ bytep *uint8
+ mask uint8
+ index uintptr
+}
+
+//go:nosplit
+func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
+ bytep, mask := s.allocBits.bitp(allocBitIndex)
+ return markBits{bytep, mask, allocBitIndex}
+}
+
+// refillAllocCache takes 8 bytes s.allocBits starting at whichByte
+// and negates them so that ctz (count trailing zeros) instructions
+// can be used. It then places these 8 bytes into the cached 64 bit
+// s.allocCache.
+func (s *mspan) refillAllocCache(whichByte uintptr) {
+ bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
+ aCache := uint64(0)
+ aCache |= uint64(bytes[0])
+ aCache |= uint64(bytes[1]) << (1 * 8)
+ aCache |= uint64(bytes[2]) << (2 * 8)
+ aCache |= uint64(bytes[3]) << (3 * 8)
+ aCache |= uint64(bytes[4]) << (4 * 8)
+ aCache |= uint64(bytes[5]) << (5 * 8)
+ aCache |= uint64(bytes[6]) << (6 * 8)
+ aCache |= uint64(bytes[7]) << (7 * 8)
+ s.allocCache = ^aCache
+}
+
+// nextFreeIndex returns the index of the next free object in s at
+// or after s.freeindex.
+// There are hardware instructions that can be used to make this
+// faster if profiling warrants it.
+func (s *mspan) nextFreeIndex() uintptr {
+ sfreeindex := s.freeindex
+ snelems := s.nelems
+ if sfreeindex == snelems {
+ return sfreeindex
+ }
+ if sfreeindex > snelems {
+ throw("s.freeindex > s.nelems")
+ }
+
+ aCache := s.allocCache
+
+ bitIndex := sys.Ctz64(aCache)
+ for bitIndex == 64 {
+ // Move index to start of next cached bits.
+ sfreeindex = (sfreeindex + 64) &^ (64 - 1)
+ if sfreeindex >= snelems {
+ s.freeindex = snelems
+ return snelems
+ }
+ whichByte := sfreeindex / 8
+ // Refill s.allocCache with the next 64 alloc bits.
+ s.refillAllocCache(whichByte)
+ aCache = s.allocCache
+ bitIndex = sys.Ctz64(aCache)
+ // nothing available in cached bits
+ // grab the next 8 bytes and try again.
+ }
+ result := sfreeindex + uintptr(bitIndex)
+ if result >= snelems {
+ s.freeindex = snelems
+ return snelems
+ }
+
+ s.allocCache >>= uint(bitIndex + 1)
+ sfreeindex = result + 1
+
+ if sfreeindex%64 == 0 && sfreeindex != snelems {
+ // We just incremented s.freeindex so it isn't 0.
+ // As each 1 in s.allocCache was encountered and used for allocation
+ // it was shifted away. At this point s.allocCache contains all 0s.
+ // Refill s.allocCache so that it corresponds
+ // to the bits at s.allocBits starting at s.freeindex.
+ whichByte := sfreeindex / 8
+ s.refillAllocCache(whichByte)
+ }
+ s.freeindex = sfreeindex
+ return result
+}
+
+// isFree reports whether the index'th object in s is unallocated.
+//
+// The caller must ensure s.state is mSpanInUse, and there must have
+// been no preemption points since ensuring this (which could allow a
+// GC transition, which would allow the state to change).
+func (s *mspan) isFree(index uintptr) bool {
+ if index < s.freeindex {
+ return false
+ }
+ bytep, mask := s.allocBits.bitp(index)
+ return *bytep&mask == 0
+}
+
+// divideByElemSize returns n/s.elemsize.
+// n must be within [0, s.npages*_PageSize),
+// or may be exactly s.npages*_PageSize
+// if s.elemsize is from sizeclasses.go.
+func (s *mspan) divideByElemSize(n uintptr) uintptr {
+ const doubleCheck = false
+
+ // See explanation in mksizeclasses.go's computeDivMagic.
+ q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
+
+ if doubleCheck && q != n/s.elemsize {
+ println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
+ throw("bad magic division")
+ }
+ return q
+}
+
+func (s *mspan) objIndex(p uintptr) uintptr {
+ return s.divideByElemSize(p - s.base())
+}
+
+func markBitsForAddr(p uintptr) markBits {
+ s := spanOf(p)
+ objIndex := s.objIndex(p)
+ return s.markBitsForIndex(objIndex)
+}
+
+func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
+ bytep, mask := s.gcmarkBits.bitp(objIndex)
+ return markBits{bytep, mask, objIndex}
+}
+
+func (s *mspan) markBitsForBase() markBits {
+ return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
+}
+
+// isMarked reports whether mark bit m is set.
+func (m markBits) isMarked() bool {
+ return *m.bytep&m.mask != 0
+}
+
+// setMarked sets the marked bit in the markbits, atomically.
+func (m markBits) setMarked() {
+ // Might be racing with other updates, so use atomic update always.
+ // We used to be clever here and use a non-atomic update in certain
+ // cases, but it's not worth the risk.
+ atomic.Or8(m.bytep, m.mask)
+}
+
+// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
+func (m markBits) setMarkedNonAtomic() {
+ *m.bytep |= m.mask
+}
+
+// clearMarked clears the marked bit in the markbits, atomically.
+func (m markBits) clearMarked() {
+ // Might be racing with other updates, so use atomic update always.
+ // We used to be clever here and use a non-atomic update in certain
+ // cases, but it's not worth the risk.
+ atomic.And8(m.bytep, ^m.mask)
+}
+
+// markBitsForSpan returns the markBits for the span base address base.
+func markBitsForSpan(base uintptr) (mbits markBits) {
+ mbits = markBitsForAddr(base)
+ if mbits.mask != 1 {
+ throw("markBitsForSpan: unaligned start")
+ }
+ return mbits
+}
+
+// advance advances the markBits to the next object in the span.
+func (m *markBits) advance() {
+ if m.mask == 1<<7 {
+ m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
+ m.mask = 1
+ } else {
+ m.mask = m.mask << 1
+ }
+ m.index++
+}
+
+// heapBitsForAddr returns the heapBits for the address addr.
+// The caller must ensure addr is in an allocated span.
+// In particular, be careful not to point past the end of an object.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func heapBitsForAddr(addr uintptr) (h heapBits) {
+ // 2 bits per word, 4 pairs per byte, and a mask is hard coded.
+ arena := arenaIndex(addr)
+ ha := mheap_.arenas[arena.l1()][arena.l2()]
+ // The compiler uses a load for nil checking ha, but in this
+ // case we'll almost never hit that cache line again, so it
+ // makes more sense to do a value check.
+ if ha == nil {
+ // addr is not in the heap. Return nil heapBits, which
+ // we expect to crash in the caller.
+ return
+ }
+ h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
+ h.shift = uint32((addr / goarch.PtrSize) & 3)
+ h.arena = uint32(arena)
+ h.last = &ha.bitmap[len(ha.bitmap)-1]
+ return
+}
+
+// clobberdeadPtr is a special value that is used by the compiler to
+// clobber dead stack slots, when -clobberdead flag is set.
+const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
+
+// badPointer throws bad pointer in heap panic.
+func badPointer(s *mspan, p, refBase, refOff uintptr) {
+ // Typically this indicates an incorrect use
+ // of unsafe or cgo to store a bad pointer in
+ // the Go heap. It may also indicate a runtime
+ // bug.
+ //
+ // TODO(austin): We could be more aggressive
+ // and detect pointers to unallocated objects
+ // in allocated spans.
+ printlock()
+ print("runtime: pointer ", hex(p))
+ if s != nil {
+ state := s.state.get()
+ if state != mSpanInUse {
+ print(" to unallocated span")
+ } else {
+ print(" to unused region of span")
+ }
+ print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
+ }
+ print("\n")
+ if refBase != 0 {
+ print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
+ gcDumpObject("object", refBase, refOff)
+ }
+ getg().m.traceback = 2
+ throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
+}
+
+// findObject returns the base address for the heap object containing
+// the address p, the object's span, and the index of the object in s.
+// If p does not point into a heap object, it returns base == 0.
+//
+// If p points is an invalid heap pointer and debug.invalidptr != 0,
+// findObject panics.
+//
+// refBase and refOff optionally give the base address of the object
+// in which the pointer p was found and the byte offset at which it
+// was found. These are used for error reporting.
+//
+// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
+// Since p is a uintptr, it would not be adjusted if the stack were to move.
+//
+//go:nosplit
+func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
+ s = spanOf(p)
+ // If s is nil, the virtual address has never been part of the heap.
+ // This pointer may be to some mmap'd region, so we allow it.
+ if s == nil {
+ if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
+ // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
+ // as they are the only platform where compiler's clobberdead mode is
+ // implemented. On these platforms clobberdeadPtr cannot be a valid address.
+ badPointer(s, p, refBase, refOff)
+ }
+ return
+ }
+ // If p is a bad pointer, it may not be in s's bounds.
+ //
+ // Check s.state to synchronize with span initialization
+ // before checking other fields. See also spanOfHeap.
+ if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
+ // Pointers into stacks are also ok, the runtime manages these explicitly.
+ if state == mSpanManual {
+ return
+ }
+ // The following ensures that we are rigorous about what data
+ // structures hold valid pointers.
+ if debug.invalidptr != 0 {
+ badPointer(s, p, refBase, refOff)
+ }
+ return
+ }
+
+ objIndex = s.objIndex(p)
+ base = s.base() + objIndex*s.elemsize
+ return
+}
+
+// verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
+//
+//go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
+func reflect_verifyNotInHeapPtr(p uintptr) bool {
+ // Conversion to a pointer is ok as long as findObject above does not call badPointer.
+ // Since we're already promised that p doesn't point into the heap, just disallow heap
+ // pointers and the special clobbered pointer.
+ return spanOf(p) == nil && p != clobberdeadPtr
+}
+
+// next returns the heapBits describing the next pointer-sized word in memory.
+// That is, if h describes address p, h.next() describes p+ptrSize.
+// Note that next does not modify h. The caller must record the result.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (h heapBits) next() heapBits {
+ if h.shift < 3*heapBitsShift {
+ h.shift += heapBitsShift
+ } else if h.bitp != h.last {
+ h.bitp, h.shift = add1(h.bitp), 0
+ } else {
+ // Move to the next arena.
+ return h.nextArena()
+ }
+ return h
+}
+
+// nextArena advances h to the beginning of the next heap arena.
+//
+// This is a slow-path helper to next. gc's inliner knows that
+// heapBits.next can be inlined even though it calls this. This is
+// marked noinline so it doesn't get inlined into next and cause next
+// to be too big to inline.
+//
+//go:nosplit
+//go:noinline
+func (h heapBits) nextArena() heapBits {
+ h.arena++
+ ai := arenaIdx(h.arena)
+ l2 := mheap_.arenas[ai.l1()]
+ if l2 == nil {
+ // We just passed the end of the object, which
+ // was also the end of the heap. Poison h. It
+ // should never be dereferenced at this point.
+ return heapBits{}
+ }
+ ha := l2[ai.l2()]
+ if ha == nil {
+ return heapBits{}
+ }
+ h.bitp, h.shift = &ha.bitmap[0], 0
+ h.last = &ha.bitmap[len(ha.bitmap)-1]
+ return h
+}
+
+// forward returns the heapBits describing n pointer-sized words ahead of h in memory.
+// That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
+// h.forward(1) is equivalent to h.next(), just slower.
+// Note that forward does not modify h. The caller must record the result.
+// bits returns the heap bits for the current word.
+//
+//go:nosplit
+func (h heapBits) forward(n uintptr) heapBits {
+ n += uintptr(h.shift) / heapBitsShift
+ nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
+ h.shift = uint32(n%4) * heapBitsShift
+ if nbitp <= uintptr(unsafe.Pointer(h.last)) {
+ h.bitp = (*uint8)(unsafe.Pointer(nbitp))
+ return h
+ }
+
+ // We're in a new heap arena.
+ past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
+ h.arena += 1 + uint32(past/heapArenaBitmapBytes)
+ ai := arenaIdx(h.arena)
+ if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
+ a := l2[ai.l2()]
+ h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
+ h.last = &a.bitmap[len(a.bitmap)-1]
+ } else {
+ h.bitp, h.last = nil, nil
+ }
+ return h
+}
+
+// forwardOrBoundary is like forward, but stops at boundaries between
+// contiguous sections of the bitmap. It returns the number of words
+// advanced over, which will be <= n.
+func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
+ maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
+ if n > maxn {
+ n = maxn
+ }
+ return h.forward(n), n
+}
+
+// The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
+// The result includes in its higher bits the bits for subsequent words
+// described by the same bitmap byte.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (h heapBits) bits() uint32 {
+ // The (shift & 31) eliminates a test and conditional branch
+ // from the generated code.
+ return uint32(*h.bitp) >> (h.shift & 31)
+}
+
+// morePointers reports whether this word and all remaining words in this object
+// are scalars.
+// h must not describe the second word of the object.
+func (h heapBits) morePointers() bool {
+ return h.bits()&bitScan != 0
+}
+
+// isPointer reports whether the heap bits describe a pointer word.
+//
+// nosplit because it is used during write barriers and must not be preempted.
+//
+//go:nosplit
+func (h heapBits) isPointer() bool {
+ return h.bits()&bitPointer != 0
+}
+
+// bulkBarrierPreWrite executes a write barrier
+// for every pointer slot in the memory range [src, src+size),
+// using pointer/scalar information from [dst, dst+size).
+// This executes the write barriers necessary before a memmove.
+// src, dst, and size must be pointer-aligned.
+// The range [dst, dst+size) must lie within a single object.
+// It does not perform the actual writes.
+//
+// As a special case, src == 0 indicates that this is being used for a
+// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
+// barrier.
+//
+// Callers should call bulkBarrierPreWrite immediately before
+// calling memmove(dst, src, size). This function is marked nosplit
+// to avoid being preempted; the GC must not stop the goroutine
+// between the memmove and the execution of the barriers.
+// The caller is also responsible for cgo pointer checks if this
+// may be writing Go pointers into non-Go memory.
+//
+// The pointer bitmap is not maintained for allocations containing
+// no pointers at all; any caller of bulkBarrierPreWrite must first
+// make sure the underlying allocation contains pointers, usually
+// by checking typ.ptrdata.
+//
+// Callers must perform cgo checks if writeBarrier.cgo.
+//
+//go:nosplit
+func bulkBarrierPreWrite(dst, src, size uintptr) {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
+ throw("bulkBarrierPreWrite: unaligned arguments")
+ }
+ if !writeBarrier.needed {
+ return
+ }
+ if s := spanOf(dst); s == nil {
+ // If dst is a global, use the data or BSS bitmaps to
+ // execute write barriers.
+ for _, datap := range activeModules() {
+ if datap.data <= dst && dst < datap.edata {
+ bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
+ return
+ }
+ }
+ for _, datap := range activeModules() {
+ if datap.bss <= dst && dst < datap.ebss {
+ bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
+ return
+ }
+ }
+ return
+ } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
+ // dst was heap memory at some point, but isn't now.
+ // It can't be a global. It must be either our stack,
+ // or in the case of direct channel sends, it could be
+ // another stack. Either way, no need for barriers.
+ // This will also catch if dst is in a freed span,
+ // though that should never have.
+ return
+ }
+
+ buf := &getg().m.p.ptr().wbBuf
+ h := heapBitsForAddr(dst)
+ if src == 0 {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if h.isPointer() {
+ dstx := (*uintptr)(unsafe.Pointer(dst + i))
+ if !buf.putFast(*dstx, 0) {
+ wbBufFlush(nil, 0)
+ }
+ }
+ h = h.next()
+ }
+ } else {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if h.isPointer() {
+ dstx := (*uintptr)(unsafe.Pointer(dst + i))
+ srcx := (*uintptr)(unsafe.Pointer(src + i))
+ if !buf.putFast(*dstx, *srcx) {
+ wbBufFlush(nil, 0)
+ }
+ }
+ h = h.next()
+ }
+ }
+}
+
+// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
+// does not execute write barriers for [dst, dst+size).
+//
+// In addition to the requirements of bulkBarrierPreWrite
+// callers need to ensure [dst, dst+size) is zeroed.
+//
+// This is used for special cases where e.g. dst was just
+// created and zeroed with malloc.
+//
+//go:nosplit
+func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
+ throw("bulkBarrierPreWrite: unaligned arguments")
+ }
+ if !writeBarrier.needed {
+ return
+ }
+ buf := &getg().m.p.ptr().wbBuf
+ h := heapBitsForAddr(dst)
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if h.isPointer() {
+ srcx := (*uintptr)(unsafe.Pointer(src + i))
+ if !buf.putFast(0, *srcx) {
+ wbBufFlush(nil, 0)
+ }
+ }
+ h = h.next()
+ }
+}
+
+// bulkBarrierBitmap executes write barriers for copying from [src,
+// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
+// assumed to start maskOffset bytes into the data covered by the
+// bitmap in bits (which may not be a multiple of 8).
+//
+// This is used by bulkBarrierPreWrite for writes to data and BSS.
+//
+//go:nosplit
+func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
+ word := maskOffset / goarch.PtrSize
+ bits = addb(bits, word/8)
+ mask := uint8(1) << (word % 8)
+
+ buf := &getg().m.p.ptr().wbBuf
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if mask == 0 {
+ bits = addb(bits, 1)
+ if *bits == 0 {
+ // Skip 8 words.
+ i += 7 * goarch.PtrSize
+ continue
+ }
+ mask = 1
+ }
+ if *bits&mask != 0 {
+ dstx := (*uintptr)(unsafe.Pointer(dst + i))
+ if src == 0 {
+ if !buf.putFast(*dstx, 0) {
+ wbBufFlush(nil, 0)
+ }
+ } else {
+ srcx := (*uintptr)(unsafe.Pointer(src + i))
+ if !buf.putFast(*dstx, *srcx) {
+ wbBufFlush(nil, 0)
+ }
+ }
+ }
+ mask <<= 1
+ }
+}
+
+// typeBitsBulkBarrier executes a write barrier for every
+// pointer that would be copied from [src, src+size) to [dst,
+// dst+size) by a memmove using the type bitmap to locate those
+// pointer slots.
+//
+// The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
+// dst, src, and size must be pointer-aligned.
+// The type typ must have a plain bitmap, not a GC program.
+// The only use of this function is in channel sends, and the
+// 64 kB channel element limit takes care of this for us.
+//
+// Must not be preempted because it typically runs right before memmove,
+// and the GC must observe them as an atomic action.
+//
+// Callers must perform cgo checks if writeBarrier.cgo.
+//
+//go:nosplit
+func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
+ if typ == nil {
+ throw("runtime: typeBitsBulkBarrier without type")
+ }
+ if typ.size != size {
+ println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
+ throw("runtime: invalid typeBitsBulkBarrier")
+ }
+ if typ.kind&kindGCProg != 0 {
+ println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
+ throw("runtime: invalid typeBitsBulkBarrier")
+ }
+ if !writeBarrier.needed {
+ return
+ }
+ ptrmask := typ.gcdata
+ buf := &getg().m.p.ptr().wbBuf
+ var bits uint32
+ for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
+ bits = uint32(*ptrmask)
+ ptrmask = addb(ptrmask, 1)
+ } else {
+ bits = bits >> 1
+ }
+ if bits&1 != 0 {
+ dstx := (*uintptr)(unsafe.Pointer(dst + i))
+ srcx := (*uintptr)(unsafe.Pointer(src + i))
+ if !buf.putFast(*dstx, *srcx) {
+ wbBufFlush(nil, 0)
+ }
+ }
+ }
+}
+
+// The methods operating on spans all require that h has been returned
+// by heapBitsForSpan and that size, n, total are the span layout description
+// returned by the mspan's layout method.
+// If total > size*n, it means that there is extra leftover memory in the span,
+// usually due to rounding.
+//
+// TODO(rsc): Perhaps introduce a different heapBitsSpan type.
+
+// initSpan initializes the heap bitmap for a span.
+// If this is a span of pointer-sized objects, it initializes all
+// words to pointer/scan.
+// Otherwise, it initializes all words to scalar/dead.
+func (h heapBits) initSpan(s *mspan) {
+ // Clear bits corresponding to objects.
+ nw := (s.npages << _PageShift) / goarch.PtrSize
+ if nw%wordsPerBitmapByte != 0 {
+ throw("initSpan: unaligned length")
+ }
+ if h.shift != 0 {
+ throw("initSpan: unaligned base")
+ }
+ isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
+ for nw > 0 {
+ hNext, anw := h.forwardOrBoundary(nw)
+ nbyte := anw / wordsPerBitmapByte
+ if isPtrs {
+ bitp := h.bitp
+ for i := uintptr(0); i < nbyte; i++ {
+ *bitp = bitPointerAll | bitScanAll
+ bitp = add1(bitp)
+ }
+ } else {
+ memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
+ }
+ h = hNext
+ nw -= anw
+ }
+}
+
+// countAlloc returns the number of objects allocated in span s by
+// scanning the allocation bitmap.
+func (s *mspan) countAlloc() int {
+ count := 0
+ bytes := divRoundUp(s.nelems, 8)
+ // Iterate over each 8-byte chunk and count allocations
+ // with an intrinsic. Note that newMarkBits guarantees that
+ // gcmarkBits will be 8-byte aligned, so we don't have to
+ // worry about edge cases, irrelevant bits will simply be zero.
+ for i := uintptr(0); i < bytes; i += 8 {
+ // Extract 64 bits from the byte pointer and get a OnesCount.
+ // Note that the unsafe cast here doesn't preserve endianness,
+ // but that's OK. We only care about how many bits are 1, not
+ // about the order we discover them in.
+ mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
+ count += sys.OnesCount64(mrkBits)
+ }
+ return count
+}
+
+// heapBitsSetType records that the new allocation [x, x+size)
+// holds in [x, x+dataSize) one or more values of type typ.
+// (The number of values is given by dataSize / typ.size.)
+// If dataSize < size, the fragment [x+dataSize, x+size) is
+// recorded as non-pointer data.
+// It is known that the type has pointers somewhere;
+// malloc does not call heapBitsSetType when there are no pointers,
+// because all free objects are marked as noscan during
+// heapBitsSweepSpan.
+//
+// There can only be one allocation from a given span active at a time,
+// and the bitmap for a span always falls on byte boundaries,
+// so there are no write-write races for access to the heap bitmap.
+// Hence, heapBitsSetType can access the bitmap without atomics.
+//
+// There can be read-write races between heapBitsSetType and things
+// that read the heap bitmap like scanobject. However, since
+// heapBitsSetType is only used for objects that have not yet been
+// made reachable, readers will ignore bits being modified by this
+// function. This does mean this function cannot transiently modify
+// bits that belong to neighboring objects. Also, on weakly-ordered
+// machines, callers must execute a store/store (publication) barrier
+// between calling this function and making the object reachable.
+func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
+ const doubleCheck = false // slow but helpful; enable to test modifications to this code
+
+ const (
+ mask1 = bitPointer | bitScan // 00010001
+ mask2 = bitPointer | bitScan | mask1<<heapBitsShift // 00110011
+ mask3 = bitPointer | bitScan | mask2<<heapBitsShift // 01110111
+ )
+
+ // dataSize is always size rounded up to the next malloc size class,
+ // except in the case of allocating a defer block, in which case
+ // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
+ // arbitrarily larger.
+ //
+ // The checks for size == goarch.PtrSize and size == 2*goarch.PtrSize can therefore
+ // assume that dataSize == size without checking it explicitly.
+
+ if goarch.PtrSize == 8 && size == goarch.PtrSize {
+ // It's one word and it has pointers, it must be a pointer.
+ // Since all allocated one-word objects are pointers
+ // (non-pointers are aggregated into tinySize allocations),
+ // initSpan sets the pointer bits for us. Nothing to do here.
+ if doubleCheck {
+ h := heapBitsForAddr(x)
+ if !h.isPointer() {
+ throw("heapBitsSetType: pointer bit missing")
+ }
+ if !h.morePointers() {
+ throw("heapBitsSetType: scan bit missing")
+ }
+ }
+ return
+ }
+
+ h := heapBitsForAddr(x)
+ ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
+
+ // 2-word objects only have 4 bitmap bits and 3-word objects only have 6 bitmap bits.
+ // Therefore, these objects share a heap bitmap byte with the objects next to them.
+ // These are called out as a special case primarily so the code below can assume all
+ // objects are at least 4 words long and that their bitmaps start either at the beginning
+ // of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
+
+ if size == 2*goarch.PtrSize {
+ if typ.size == goarch.PtrSize {
+ // We're allocating a block big enough to hold two pointers.
+ // On 64-bit, that means the actual object must be two pointers,
+ // or else we'd have used the one-pointer-sized block.
+ // On 32-bit, however, this is the 8-byte block, the smallest one.
+ // So it could be that we're allocating one pointer and this was
+ // just the smallest block available. Distinguish by checking dataSize.
+ // (In general the number of instances of typ being allocated is
+ // dataSize/typ.size.)
+ if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
+ // 1 pointer object. On 32-bit machines clear the bit for the
+ // unused second word.
+ *h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
+ *h.bitp |= (bitPointer | bitScan) << h.shift
+ } else {
+ // 2-element array of pointer.
+ *h.bitp |= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
+ }
+ return
+ }
+ // Otherwise typ.size must be 2*goarch.PtrSize,
+ // and typ.kind&kindGCProg == 0.
+ if doubleCheck {
+ if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
+ print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
+ throw("heapBitsSetType")
+ }
+ }
+ b := uint32(*ptrmask)
+ hb := b & 3
+ hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
+ // Clear the bits for this object so we can set the
+ // appropriate ones.
+ *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
+ *h.bitp |= uint8(hb << h.shift)
+ return
+ } else if size == 3*goarch.PtrSize {
+ b := uint8(*ptrmask)
+ if doubleCheck {
+ if b == 0 {
+ println("runtime: invalid type ", typ.string())
+ throw("heapBitsSetType: called with non-pointer type")
+ }
+ if goarch.PtrSize != 8 {
+ throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
+ }
+ if typ.kind&kindGCProg != 0 {
+ throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
+ }
+ if typ.size == 2*goarch.PtrSize {
+ print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
+ throw("heapBitsSetType: inconsistent object sizes")
+ }
+ }
+ if typ.size == goarch.PtrSize {
+ // The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
+ // Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
+ if doubleCheck && *typ.gcdata != 1 {
+ print("runtime: heapBitsSetType size=", size, " typ.size=", typ.size, "but *typ.gcdata", *typ.gcdata, "\n")
+ throw("heapBitsSetType: unexpected gcdata for 1 pointer wide type size in 3 pointer wide size class")
+ }
+ // 3 element array of pointers. Unrolling ptrmask 3 times into p yields 00000111.
+ b = 7
+ }
+
+ hb := b & 7
+ // Set bitScan bits for all pointers.
+ hb |= hb << wordsPerBitmapByte
+ // First bitScan bit is always set since the type contains pointers.
+ hb |= bitScan
+ // Second bitScan bit needs to also be set if the third bitScan bit is set.
+ hb |= hb & (bitScan << (2 * heapBitsShift)) >> 1
+
+ // For h.shift > 1 heap bits cross a byte boundary and need to be written part
+ // to h.bitp and part to the next h.bitp.
+ switch h.shift {
+ case 0:
+ *h.bitp &^= mask3 << 0
+ *h.bitp |= hb << 0
+ case 1:
+ *h.bitp &^= mask3 << 1
+ *h.bitp |= hb << 1
+ case 2:
+ *h.bitp &^= mask2 << 2
+ *h.bitp |= (hb & mask2) << 2
+ // Two words written to the first byte.
+ // Advance two words to get to the next byte.
+ h = h.next().next()
+ *h.bitp &^= mask1
+ *h.bitp |= (hb >> 2) & mask1
+ case 3:
+ *h.bitp &^= mask1 << 3
+ *h.bitp |= (hb & mask1) << 3
+ // One word written to the first byte.
+ // Advance one word to get to the next byte.
+ h = h.next()
+ *h.bitp &^= mask2
+ *h.bitp |= (hb >> 1) & mask2
+ }
+ return
+ }
+
+ // Copy from 1-bit ptrmask into 2-bit bitmap.
+ // The basic approach is to use a single uintptr as a bit buffer,
+ // alternating between reloading the buffer and writing bitmap bytes.
+ // In general, one load can supply two bitmap byte writes.
+ // This is a lot of lines of code, but it compiles into relatively few
+ // machine instructions.
+
+ outOfPlace := false
+ if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrandn(2) == 0) {
+ // This object spans heap arenas, so the bitmap may be
+ // discontiguous. Unroll it into the object instead
+ // and then copy it out.
+ //
+ // In doubleCheck mode, we randomly do this anyway to
+ // stress test the bitmap copying path.
+ outOfPlace = true
+ h.bitp = (*uint8)(unsafe.Pointer(x))
+ h.last = nil
+ }
+
+ var (
+ // Ptrmask input.
+ p *byte // last ptrmask byte read
+ b uintptr // ptrmask bits already loaded
+ nb uintptr // number of bits in b at next read
+ endp *byte // final ptrmask byte to read (then repeat)
+ endnb uintptr // number of valid bits in *endp
+ pbits uintptr // alternate source of bits
+
+ // Heap bitmap output.
+ w uintptr // words processed
+ nw uintptr // number of words to process
+ hbitp *byte // next heap bitmap byte to write
+ hb uintptr // bits being prepared for *hbitp
+ )
+
+ hbitp = h.bitp
+
+ // Handle GC program. Delayed until this part of the code
+ // so that we can use the same double-checking mechanism
+ // as the 1-bit case. Nothing above could have encountered
+ // GC programs: the cases were all too small.
+ if typ.kind&kindGCProg != 0 {
+ heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
+ if doubleCheck {
+ // Double-check the heap bits written by GC program
+ // by running the GC program to create a 1-bit pointer mask
+ // and then jumping to the double-check code below.
+ // This doesn't catch bugs shared between the 1-bit and 4-bit
+ // GC program execution, but it does catch mistakes specific
+ // to just one of those and bugs in heapBitsSetTypeGCProg's
+ // implementation of arrays.
+ lock(&debugPtrmask.lock)
+ if debugPtrmask.data == nil {
+ debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
+ }
+ ptrmask = debugPtrmask.data
+ runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
+ }
+ goto Phase4
+ }
+
+ // Note about sizes:
+ //
+ // typ.size is the number of words in the object,
+ // and typ.ptrdata is the number of words in the prefix
+ // of the object that contains pointers. That is, the final
+ // typ.size - typ.ptrdata words contain no pointers.
+ // This allows optimization of a common pattern where
+ // an object has a small header followed by a large scalar
+ // buffer. If we know the pointers are over, we don't have
+ // to scan the buffer's heap bitmap at all.
+ // The 1-bit ptrmasks are sized to contain only bits for
+ // the typ.ptrdata prefix, zero padded out to a full byte
+ // of bitmap. This code sets nw (below) so that heap bitmap
+ // bits are only written for the typ.ptrdata prefix; if there is
+ // more room in the allocated object, the next heap bitmap
+ // entry is a 00, indicating that there are no more pointers
+ // to scan. So only the ptrmask for the ptrdata bytes is needed.
+ //
+ // Replicated copies are not as nice: if there is an array of
+ // objects with scalar tails, all but the last tail does have to
+ // be initialized, because there is no way to say "skip forward".
+ // However, because of the possibility of a repeated type with
+ // size not a multiple of 4 pointers (one heap bitmap byte),
+ // the code already must handle the last ptrmask byte specially
+ // by treating it as containing only the bits for endnb pointers,
+ // where endnb <= 4. We represent large scalar tails that must
+ // be expanded in the replication by setting endnb larger than 4.
+ // This will have the effect of reading many bits out of b,
+ // but once the real bits are shifted out, b will supply as many
+ // zero bits as we try to read, which is exactly what we need.
+
+ p = ptrmask
+ if typ.size < dataSize {
+ // Filling in bits for an array of typ.
+ // Set up for repetition of ptrmask during main loop.
+ // Note that ptrmask describes only a prefix of
+ const maxBits = goarch.PtrSize*8 - 7
+ if typ.ptrdata/goarch.PtrSize <= maxBits {
+ // Entire ptrmask fits in uintptr with room for a byte fragment.
+ // Load into pbits and never read from ptrmask again.
+ // This is especially important when the ptrmask has
+ // fewer than 8 bits in it; otherwise the reload in the middle
+ // of the Phase 2 loop would itself need to loop to gather
+ // at least 8 bits.
+
+ // Accumulate ptrmask into b.
+ // ptrmask is sized to describe only typ.ptrdata, but we record
+ // it as describing typ.size bytes, since all the high bits are zero.
+ nb = typ.ptrdata / goarch.PtrSize
+ for i := uintptr(0); i < nb; i += 8 {
+ b |= uintptr(*p) << i
+ p = add1(p)
+ }
+ nb = typ.size / goarch.PtrSize
+
+ // Replicate ptrmask to fill entire pbits uintptr.
+ // Doubling and truncating is fewer steps than
+ // iterating by nb each time. (nb could be 1.)
+ // Since we loaded typ.ptrdata/goarch.PtrSize bits
+ // but are pretending to have typ.size/goarch.PtrSize,
+ // there might be no replication necessary/possible.
+ pbits = b
+ endnb = nb
+ if nb+nb <= maxBits {
+ for endnb <= goarch.PtrSize*8 {
+ pbits |= pbits << endnb
+ endnb += endnb
+ }
+ // Truncate to a multiple of original ptrmask.
+ // Because nb+nb <= maxBits, nb fits in a byte.
+ // Byte division is cheaper than uintptr division.
+ endnb = uintptr(maxBits/byte(nb)) * nb
+ pbits &= 1<<endnb - 1
+ b = pbits
+ nb = endnb
+ }
+
+ // Clear p and endp as sentinel for using pbits.
+ // Checked during Phase 2 loop.
+ p = nil
+ endp = nil
+ } else {
+ // Ptrmask is larger. Read it multiple times.
+ n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
+ endp = addb(ptrmask, n)
+ endnb = typ.size/goarch.PtrSize - n*8
+ }
+ }
+ if p != nil {
+ b = uintptr(*p)
+ p = add1(p)
+ nb = 8
+ }
+
+ if typ.size == dataSize {
+ // Single entry: can stop once we reach the non-pointer data.
+ nw = typ.ptrdata / goarch.PtrSize
+ } else {
+ // Repeated instances of typ in an array.
+ // Have to process first N-1 entries in full, but can stop
+ // once we reach the non-pointer data in the final entry.
+ nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
+ }
+ if nw == 0 {
+ // No pointers! Caller was supposed to check.
+ println("runtime: invalid type ", typ.string())
+ throw("heapBitsSetType: called with non-pointer type")
+ return
+ }
+
+ // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
+ // The leading byte is special because it contains the bits for word 1,
+ // which does not have the scan bit set.
+ // The leading half-byte is special because it's a half a byte,
+ // so we have to be careful with the bits already there.
+ switch {
+ default:
+ throw("heapBitsSetType: unexpected shift")
+
+ case h.shift == 0:
+ // Ptrmask and heap bitmap are aligned.
+ //
+ // This is a fast path for small objects.
+ //
+ // The first byte we write out covers the first four
+ // words of the object. The scan/dead bit on the first
+ // word must be set to scan since there are pointers
+ // somewhere in the object.
+ // In all following words, we set the scan/dead
+ // appropriately to indicate that the object continues
+ // to the next 2-bit entry in the bitmap.
+ //
+ // We set four bits at a time here, but if the object
+ // is fewer than four words, phase 3 will clear
+ // unnecessary bits.
+ hb = b & bitPointerAll
+ hb |= bitScanAll
+ if w += 4; w >= nw {
+ goto Phase3
+ }
+ *hbitp = uint8(hb)
+ hbitp = add1(hbitp)
+ b >>= 4
+ nb -= 4
+
+ case h.shift == 2:
+ // Ptrmask and heap bitmap are misaligned.
+ //
+ // On 32 bit architectures only the 6-word object that corresponds
+ // to a 24 bytes size class can start with h.shift of 2 here since
+ // all other non 16 byte aligned size classes have been handled by
+ // special code paths at the beginning of heapBitsSetType on 32 bit.
+ //
+ // Many size classes are only 16 byte aligned. On 64 bit architectures
+ // this results in a heap bitmap position starting with a h.shift of 2.
+ //
+ // The bits for the first two words are in a byte shared
+ // with another object, so we must be careful with the bits
+ // already there.
+ //
+ // We took care of 1-word, 2-word, and 3-word objects above,
+ // so this is at least a 6-word object.
+ hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
+ hb |= bitScan << (2 * heapBitsShift)
+ if nw > 1 {
+ hb |= bitScan << (3 * heapBitsShift)
+ }
+ b >>= 2
+ nb -= 2
+ *hbitp &^= uint8((bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << (2 * heapBitsShift))
+ *hbitp |= uint8(hb)
+ hbitp = add1(hbitp)
+ if w += 2; w >= nw {
+ // We know that there is more data, because we handled 2-word and 3-word objects above.
+ // This must be at least a 6-word object. If we're out of pointer words,
+ // mark no scan in next bitmap byte and finish.
+ hb = 0
+ w += 4
+ goto Phase3
+ }
+ }
+
+ // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
+ // The loop computes the bits for that last write but does not execute the write;
+ // it leaves the bits in hb for processing by phase 3.
+ // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
+ // use in the first half of the loop right now, and then we only adjust nb explicitly
+ // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
+ nb -= 4
+ for {
+ // Emit bitmap byte.
+ // b has at least nb+4 bits, with one exception:
+ // if w+4 >= nw, then b has only nw-w bits,
+ // but we'll stop at the break and then truncate
+ // appropriately in Phase 3.
+ hb = b & bitPointerAll
+ hb |= bitScanAll
+ if w += 4; w >= nw {
+ break
+ }
+ *hbitp = uint8(hb)
+ hbitp = add1(hbitp)
+ b >>= 4
+
+ // Load more bits. b has nb right now.
+ if p != endp {
+ // Fast path: keep reading from ptrmask.
+ // nb unmodified: we just loaded 8 bits,
+ // and the next iteration will consume 8 bits,
+ // leaving us with the same nb the next time we're here.
+ if nb < 8 {
+ b |= uintptr(*p) << nb
+ p = add1(p)
+ } else {
+ // Reduce the number of bits in b.
+ // This is important if we skipped
+ // over a scalar tail, since nb could
+ // be larger than the bit width of b.
+ nb -= 8
+ }
+ } else if p == nil {
+ // Almost as fast path: track bit count and refill from pbits.
+ // For short repetitions.
+ if nb < 8 {
+ b |= pbits << nb
+ nb += endnb
+ }
+ nb -= 8 // for next iteration
+ } else {
+ // Slow path: reached end of ptrmask.
+ // Process final partial byte and rewind to start.
+ b |= uintptr(*p) << nb
+ nb += endnb
+ if nb < 8 {
+ b |= uintptr(*ptrmask) << nb
+ p = add1(ptrmask)
+ } else {
+ nb -= 8
+ p = ptrmask
+ }
+ }
+
+ // Emit bitmap byte.
+ hb = b & bitPointerAll
+ hb |= bitScanAll
+ if w += 4; w >= nw {
+ break
+ }
+ *hbitp = uint8(hb)
+ hbitp = add1(hbitp)
+ b >>= 4
+ }
+
+Phase3:
+ // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
+ if w > nw {
+ // Counting the 4 entries in hb not yet written to memory,
+ // there are more entries than possible pointer slots.
+ // Discard the excess entries (can't be more than 3).
+ mask := uintptr(1)<<(4-(w-nw)) - 1
+ hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
+ }
+
+ // Change nw from counting possibly-pointer words to total words in allocation.
+ nw = size / goarch.PtrSize
+
+ // Write whole bitmap bytes.
+ // The first is hb, the rest are zero.
+ if w <= nw {
+ *hbitp = uint8(hb)
+ hbitp = add1(hbitp)
+ hb = 0 // for possible final half-byte below
+ for w += 4; w <= nw; w += 4 {
+ *hbitp = 0
+ hbitp = add1(hbitp)
+ }
+ }
+
+ // Write final partial bitmap byte if any.
+ // We know w > nw, or else we'd still be in the loop above.
+ // It can be bigger only due to the 4 entries in hb that it counts.
+ // If w == nw+4 then there's nothing left to do: we wrote all nw entries
+ // and can discard the 4 sitting in hb.
+ // But if w == nw+2, we need to write first two in hb.
+ // The byte is shared with the next object, so be careful with
+ // existing bits.
+ if w == nw+2 {
+ *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
+ }
+
+Phase4:
+ // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
+ if outOfPlace {
+ // TODO: We could probably make this faster by
+ // handling [x+dataSize, x+size) specially.
+ h := heapBitsForAddr(x)
+ // cnw is the number of heap words, or bit pairs
+ // remaining (like nw above).
+ cnw := size / goarch.PtrSize
+ src := (*uint8)(unsafe.Pointer(x))
+ // We know the first and last byte of the bitmap are
+ // not the same, but it's still possible for small
+ // objects span arenas, so it may share bitmap bytes
+ // with neighboring objects.
+ //
+ // Handle the first byte specially if it's shared. See
+ // Phase 1 for why this is the only special case we need.
+ if doubleCheck {
+ if !(h.shift == 0 || h.shift == 2) {
+ print("x=", x, " size=", size, " cnw=", h.shift, "\n")
+ throw("bad start shift")
+ }
+ }
+ if h.shift == 2 {
+ *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
+ h = h.next().next()
+ cnw -= 2
+ src = addb(src, 1)
+ }
+ // We're now byte aligned. Copy out to per-arena
+ // bitmaps until the last byte (which may again be
+ // partial).
+ for cnw >= 4 {
+ // This loop processes four words at a time,
+ // so round cnw down accordingly.
+ hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
+
+ // n is the number of bitmap bytes to copy.
+ n := words / 4
+ memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
+ cnw -= words
+ h = hNext
+ src = addb(src, n)
+ }
+ if doubleCheck && h.shift != 0 {
+ print("cnw=", cnw, " h.shift=", h.shift, "\n")
+ throw("bad shift after block copy")
+ }
+ // Handle the last byte if it's shared.
+ if cnw == 2 {
+ *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
+ src = addb(src, 1)
+ h = h.next().next()
+ }
+ if doubleCheck {
+ if uintptr(unsafe.Pointer(src)) > x+size {
+ throw("copy exceeded object size")
+ }
+ if !(cnw == 0 || cnw == 2) {
+ print("x=", x, " size=", size, " cnw=", cnw, "\n")
+ throw("bad number of remaining words")
+ }
+ // Set up hbitp so doubleCheck code below can check it.
+ hbitp = h.bitp
+ }
+ // Zero the object where we wrote the bitmap.
+ memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
+ }
+
+ // Double check the whole bitmap.
+ if doubleCheck {
+ // x+size may not point to the heap, so back up one
+ // word and then advance it the way we do above.
+ end := heapBitsForAddr(x + size - goarch.PtrSize)
+ if outOfPlace {
+ // In out-of-place copying, we just advance
+ // using next.
+ end = end.next()
+ } else {
+ // Don't use next because that may advance to
+ // the next arena and the in-place logic
+ // doesn't do that.
+ end.shift += heapBitsShift
+ if end.shift == 4*heapBitsShift {
+ end.bitp, end.shift = add1(end.bitp), 0
+ }
+ }
+ if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
+ println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
+ print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
+ print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
+ h0 := heapBitsForAddr(x)
+ print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
+ print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
+ throw("bad heapBitsSetType")
+ }
+
+ // Double-check that bits to be written were written correctly.
+ // Does not check that other bits were not written, unfortunately.
+ h := heapBitsForAddr(x)
+ nptr := typ.ptrdata / goarch.PtrSize
+ ndata := typ.size / goarch.PtrSize
+ count := dataSize / typ.size
+ totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
+ for i := uintptr(0); i < size/goarch.PtrSize; i++ {
+ j := i % ndata
+ var have, want uint8
+ have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
+ if i >= totalptr {
+ if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
+ // heapBitsSetTypeGCProg always fills
+ // in full nibbles of bitScan.
+ want = bitScan
+ }
+ } else {
+ if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
+ want |= bitPointer
+ }
+ want |= bitScan
+ }
+ if have != want {
+ println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
+ print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
+ print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
+ print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
+ h0 := heapBitsForAddr(x)
+ print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
+ print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
+ print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
+ println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
+ if typ.kind&kindGCProg != 0 {
+ println("GC program:")
+ dumpGCProg(addb(typ.gcdata, 4))
+ }
+ throw("bad heapBitsSetType")
+ }
+ h = h.next()
+ }
+ if ptrmask == debugPtrmask.data {
+ unlock(&debugPtrmask.lock)
+ }
+ }
+}
+
+var debugPtrmask struct {
+ lock mutex
+ data *byte
+}
+
+// heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
+// progSize is the size of the memory described by the program.
+// elemSize is the size of the element that the GC program describes (a prefix of).
+// dataSize is the total size of the intended data, a multiple of elemSize.
+// allocSize is the total size of the allocated memory.
+//
+// GC programs are only used for large allocations.
+// heapBitsSetType requires that allocSize is a multiple of 4 words,
+// so that the relevant bitmap bytes are not shared with surrounding
+// objects.
+func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
+ if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
+ // Alignment will be wrong.
+ throw("heapBitsSetTypeGCProg: small allocation")
+ }
+ var totalBits uintptr
+ if elemSize == dataSize {
+ totalBits = runGCProg(prog, nil, h.bitp, 2)
+ if totalBits*goarch.PtrSize != progSize {
+ println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
+ throw("heapBitsSetTypeGCProg: unexpected bit count")
+ }
+ } else {
+ count := dataSize / elemSize
+
+ // Piece together program trailer to run after prog that does:
+ // literal(0)
+ // repeat(1, elemSize-progSize-1) // zeros to fill element size
+ // repeat(elemSize, count-1) // repeat that element for count
+ // This zero-pads the data remaining in the first element and then
+ // repeats that first element to fill the array.
+ var trailer [40]byte // 3 varints (max 10 each) + some bytes
+ i := 0
+ if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
+ // literal(0)
+ trailer[i] = 0x01
+ i++
+ trailer[i] = 0
+ i++
+ if n > 1 {
+ // repeat(1, n-1)
+ trailer[i] = 0x81
+ i++
+ n--
+ for ; n >= 0x80; n >>= 7 {
+ trailer[i] = byte(n | 0x80)
+ i++
+ }
+ trailer[i] = byte(n)
+ i++
+ }
+ }
+ // repeat(elemSize/ptrSize, count-1)
+ trailer[i] = 0x80
+ i++
+ n := elemSize / goarch.PtrSize
+ for ; n >= 0x80; n >>= 7 {
+ trailer[i] = byte(n | 0x80)
+ i++
+ }
+ trailer[i] = byte(n)
+ i++
+ n = count - 1
+ for ; n >= 0x80; n >>= 7 {
+ trailer[i] = byte(n | 0x80)
+ i++
+ }
+ trailer[i] = byte(n)
+ i++
+ trailer[i] = 0
+ i++
+
+ runGCProg(prog, &trailer[0], h.bitp, 2)
+
+ // Even though we filled in the full array just now,
+ // record that we only filled in up to the ptrdata of the
+ // last element. This will cause the code below to
+ // memclr the dead section of the final array element,
+ // so that scanobject can stop early in the final element.
+ totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
+ }
+ endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
+ endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
+ memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
+}
+
+// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
+// size the size of the region described by prog, in bytes.
+// The resulting bitvector will have no more than size/goarch.PtrSize bits.
+func progToPointerMask(prog *byte, size uintptr) bitvector {
+ n := (size/goarch.PtrSize + 7) / 8
+ x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
+ x[len(x)-1] = 0xa1 // overflow check sentinel
+ n = runGCProg(prog, nil, &x[0], 1)
+ if x[len(x)-1] != 0xa1 {
+ throw("progToPointerMask: overflow")
+ }
+ return bitvector{int32(n), &x[0]}
+}
+
+// Packed GC pointer bitmaps, aka GC programs.
+//
+// For large types containing arrays, the type information has a
+// natural repetition that can be encoded to save space in the
+// binary and in the memory representation of the type information.
+//
+// The encoding is a simple Lempel-Ziv style bytecode machine
+// with the following instructions:
+//
+// 00000000: stop
+// 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
+// 10000000 n c: repeat the previous n bits c times; n, c are varints
+// 1nnnnnnn c: repeat the previous n bits c times; c is a varint
+
+// runGCProg executes the GC program prog, and then trailer if non-nil,
+// writing to dst with entries of the given size.
+// If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
+// If size == 2, dst is the 2-bit heap bitmap, and writes move backward
+// starting at dst (because the heap bitmap does). In this case, the caller guarantees
+// that only whole bytes in dst need to be written.
+//
+// runGCProg returns the number of 1- or 2-bit entries written to memory.
+func runGCProg(prog, trailer, dst *byte, size int) uintptr {
+ dstStart := dst
+
+ // Bits waiting to be written to memory.
+ var bits uintptr
+ var nbits uintptr
+
+ p := prog
+Run:
+ for {
+ // Flush accumulated full bytes.
+ // The rest of the loop assumes that nbits <= 7.
+ for ; nbits >= 8; nbits -= 8 {
+ if size == 1 {
+ *dst = uint8(bits)
+ dst = add1(dst)
+ bits >>= 8
+ } else {
+ v := bits&bitPointerAll | bitScanAll
+ *dst = uint8(v)
+ dst = add1(dst)
+ bits >>= 4
+ v = bits&bitPointerAll | bitScanAll
+ *dst = uint8(v)
+ dst = add1(dst)
+ bits >>= 4
+ }
+ }
+
+ // Process one instruction.
+ inst := uintptr(*p)
+ p = add1(p)
+ n := inst & 0x7F
+ if inst&0x80 == 0 {
+ // Literal bits; n == 0 means end of program.
+ if n == 0 {
+ // Program is over; continue in trailer if present.
+ if trailer != nil {
+ p = trailer
+ trailer = nil
+ continue
+ }
+ break Run
+ }
+ nbyte := n / 8
+ for i := uintptr(0); i < nbyte; i++ {
+ bits |= uintptr(*p) << nbits
+ p = add1(p)
+ if size == 1 {
+ *dst = uint8(bits)
+ dst = add1(dst)
+ bits >>= 8
+ } else {
+ v := bits&0xf | bitScanAll
+ *dst = uint8(v)
+ dst = add1(dst)
+ bits >>= 4
+ v = bits&0xf | bitScanAll
+ *dst = uint8(v)
+ dst = add1(dst)
+ bits >>= 4
+ }
+ }
+ if n %= 8; n > 0 {
+ bits |= uintptr(*p) << nbits
+ p = add1(p)
+ nbits += n
+ }
+ continue Run
+ }
+
+ // Repeat. If n == 0, it is encoded in a varint in the next bytes.
+ if n == 0 {
+ for off := uint(0); ; off += 7 {
+ x := uintptr(*p)
+ p = add1(p)
+ n |= (x & 0x7F) << off
+ if x&0x80 == 0 {
+ break
+ }
+ }
+ }
+
+ // Count is encoded in a varint in the next bytes.
+ c := uintptr(0)
+ for off := uint(0); ; off += 7 {
+ x := uintptr(*p)
+ p = add1(p)
+ c |= (x & 0x7F) << off
+ if x&0x80 == 0 {
+ break
+ }
+ }
+ c *= n // now total number of bits to copy
+
+ // If the number of bits being repeated is small, load them
+ // into a register and use that register for the entire loop
+ // instead of repeatedly reading from memory.
+ // Handling fewer than 8 bits here makes the general loop simpler.
+ // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
+ // the pattern to a bit buffer holding at most 7 bits (a partial byte)
+ // it will not overflow.
+ src := dst
+ const maxBits = goarch.PtrSize*8 - 7
+ if n <= maxBits {
+ // Start with bits in output buffer.
+ pattern := bits
+ npattern := nbits
+
+ // If we need more bits, fetch them from memory.
+ if size == 1 {
+ src = subtract1(src)
+ for npattern < n {
+ pattern <<= 8
+ pattern |= uintptr(*src)
+ src = subtract1(src)
+ npattern += 8
+ }
+ } else {
+ src = subtract1(src)
+ for npattern < n {
+ pattern <<= 4
+ pattern |= uintptr(*src) & 0xf
+ src = subtract1(src)
+ npattern += 4
+ }
+ }
+
+ // We started with the whole bit output buffer,
+ // and then we loaded bits from whole bytes.
+ // Either way, we might now have too many instead of too few.
+ // Discard the extra.
+ if npattern > n {
+ pattern >>= npattern - n
+ npattern = n
+ }
+
+ // Replicate pattern to at most maxBits.
+ if npattern == 1 {
+ // One bit being repeated.
+ // If the bit is 1, make the pattern all 1s.
+ // If the bit is 0, the pattern is already all 0s,
+ // but we can claim that the number of bits
+ // in the word is equal to the number we need (c),
+ // because right shift of bits will zero fill.
+ if pattern == 1 {
+ pattern = 1<<maxBits - 1
+ npattern = maxBits
+ } else {
+ npattern = c
+ }
+ } else {
+ b := pattern
+ nb := npattern
+ if nb+nb <= maxBits {
+ // Double pattern until the whole uintptr is filled.
+ for nb <= goarch.PtrSize*8 {
+ b |= b << nb
+ nb += nb
+ }
+ // Trim away incomplete copy of original pattern in high bits.
+ // TODO(rsc): Replace with table lookup or loop on systems without divide?
+ nb = maxBits / npattern * npattern
+ b &= 1<<nb - 1
+ pattern = b
+ npattern = nb
+ }
+ }
+
+ // Add pattern to bit buffer and flush bit buffer, c/npattern times.
+ // Since pattern contains >8 bits, there will be full bytes to flush
+ // on each iteration.
+ for ; c >= npattern; c -= npattern {
+ bits |= pattern << nbits
+ nbits += npattern
+ if size == 1 {
+ for nbits >= 8 {
+ *dst = uint8(bits)
+ dst = add1(dst)
+ bits >>= 8
+ nbits -= 8
+ }
+ } else {
+ for nbits >= 4 {
+ *dst = uint8(bits&0xf | bitScanAll)
+ dst = add1(dst)
+ bits >>= 4
+ nbits -= 4
+ }
+ }
+ }
+
+ // Add final fragment to bit buffer.
+ if c > 0 {
+ pattern &= 1<<c - 1
+ bits |= pattern << nbits
+ nbits += c
+ }
+ continue Run
+ }
+
+ // Repeat; n too large to fit in a register.
+ // Since nbits <= 7, we know the first few bytes of repeated data
+ // are already written to memory.
+ off := n - nbits // n > nbits because n > maxBits and nbits <= 7
+ if size == 1 {
+ // Leading src fragment.
+ src = subtractb(src, (off+7)/8)
+ if frag := off & 7; frag != 0 {
+ bits |= uintptr(*src) >> (8 - frag) << nbits
+ src = add1(src)
+ nbits += frag
+ c -= frag
+ }
+ // Main loop: load one byte, write another.
+ // The bits are rotating through the bit buffer.
+ for i := c / 8; i > 0; i-- {
+ bits |= uintptr(*src) << nbits
+ src = add1(src)
+ *dst = uint8(bits)
+ dst = add1(dst)
+ bits >>= 8
+ }
+ // Final src fragment.
+ if c %= 8; c > 0 {
+ bits |= (uintptr(*src) & (1<<c - 1)) << nbits
+ nbits += c
+ }
+ } else {
+ // Leading src fragment.
+ src = subtractb(src, (off+3)/4)
+ if frag := off & 3; frag != 0 {
+ bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
+ src = add1(src)
+ nbits += frag
+ c -= frag
+ }
+ // Main loop: load one byte, write another.
+ // The bits are rotating through the bit buffer.
+ for i := c / 4; i > 0; i-- {
+ bits |= (uintptr(*src) & 0xf) << nbits
+ src = add1(src)
+ *dst = uint8(bits&0xf | bitScanAll)
+ dst = add1(dst)
+ bits >>= 4
+ }
+ // Final src fragment.
+ if c %= 4; c > 0 {
+ bits |= (uintptr(*src) & (1<<c - 1)) << nbits
+ nbits += c
+ }
+ }
+ }
+
+ // Write any final bits out, using full-byte writes, even for the final byte.
+ var totalBits uintptr
+ if size == 1 {
+ totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
+ nbits += -nbits & 7
+ for ; nbits > 0; nbits -= 8 {
+ *dst = uint8(bits)
+ dst = add1(dst)
+ bits >>= 8
+ }
+ } else {
+ totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
+ nbits += -nbits & 3
+ for ; nbits > 0; nbits -= 4 {
+ v := bits&0xf | bitScanAll
+ *dst = uint8(v)
+ dst = add1(dst)
+ bits >>= 4
+ }
+ }
+ return totalBits
+}
+
+// materializeGCProg allocates space for the (1-bit) pointer bitmask
+// for an object of size ptrdata. Then it fills that space with the
+// pointer bitmask specified by the program prog.
+// The bitmask starts at s.startAddr.
+// The result must be deallocated with dematerializeGCProg.
+func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
+ // Each word of ptrdata needs one bit in the bitmap.
+ bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
+ // Compute the number of pages needed for bitmapBytes.
+ pages := divRoundUp(bitmapBytes, pageSize)
+ s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
+ runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
+ return s
+}
+func dematerializeGCProg(s *mspan) {
+ mheap_.freeManual(s, spanAllocPtrScalarBits)
+}
+
+func dumpGCProg(p *byte) {
+ nptr := 0
+ for {
+ x := *p
+ p = add1(p)
+ if x == 0 {
+ print("\t", nptr, " end\n")
+ break
+ }
+ if x&0x80 == 0 {
+ print("\t", nptr, " lit ", x, ":")
+ n := int(x+7) / 8
+ for i := 0; i < n; i++ {
+ print(" ", hex(*p))
+ p = add1(p)
+ }
+ print("\n")
+ nptr += int(x)
+ } else {
+ nbit := int(x &^ 0x80)
+ if nbit == 0 {
+ for nb := uint(0); ; nb += 7 {
+ x := *p
+ p = add1(p)
+ nbit |= int(x&0x7f) << nb
+ if x&0x80 == 0 {
+ break
+ }
+ }
+ }
+ count := 0
+ for nb := uint(0); ; nb += 7 {
+ x := *p
+ p = add1(p)
+ count |= int(x&0x7f) << nb
+ if x&0x80 == 0 {
+ break
+ }
+ }
+ print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
+ nptr += nbit * count
+ }
+ }
+}
+
+// Testing.
+
+func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
+ target := (*stkframe)(ctxt)
+ if frame.sp <= target.sp && target.sp < frame.varp {
+ *target = *frame
+ return false
+ }
+ return true
+}
+
+// gcbits returns the GC type info for x, for testing.
+// The result is the bitmap entries (0 or 1), one entry per byte.
+//
+//go:linkname reflect_gcbits reflect.gcbits
+func reflect_gcbits(x any) []byte {
+ ret := getgcmask(x)
+ typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
+ nptr := typ.ptrdata / goarch.PtrSize
+ for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
+ ret = ret[:len(ret)-1]
+ }
+ return ret
+}
+
+// Returns GC type info for the pointer stored in ep for testing.
+// If ep points to the stack, only static live information will be returned
+// (i.e. not for objects which are only dynamically live stack objects).
+func getgcmask(ep any) (mask []byte) {
+ e := *efaceOf(&ep)
+ p := e.data
+ t := e._type
+ // data or bss
+ for _, datap := range activeModules() {
+ // data
+ if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
+ bitmap := datap.gcdatamask.bytedata
+ n := (*ptrtype)(unsafe.Pointer(t)).elem.size
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ }
+ return
+ }
+
+ // bss
+ if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
+ bitmap := datap.gcbssmask.bytedata
+ n := (*ptrtype)(unsafe.Pointer(t)).elem.size
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ }
+ return
+ }
+ }
+
+ // heap
+ if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
+ hbits := heapBitsForAddr(base)
+ n := s.elemsize
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ if hbits.isPointer() {
+ mask[i/goarch.PtrSize] = 1
+ }
+ if !hbits.morePointers() {
+ mask = mask[:i/goarch.PtrSize]
+ break
+ }
+ hbits = hbits.next()
+ }
+ return
+ }
+
+ // stack
+ if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
+ var frame stkframe
+ frame.sp = uintptr(p)
+ _g_ := getg()
+ gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
+ if frame.fn.valid() {
+ locals, _, _ := getStackMap(&frame, nil, false)
+ if locals.n == 0 {
+ return
+ }
+ size := uintptr(locals.n) * goarch.PtrSize
+ n := (*ptrtype)(unsafe.Pointer(t)).elem.size
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = locals.ptrbit(off)
+ }
+ }
+ return
+ }
+
+ // otherwise, not something the GC knows about.
+ // possibly read-only data, like malloc(0).
+ // must not have pointers
+ return
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mcache.go b/contrib/go/_std_1.19/src/runtime/mcache.go
new file mode 100644
index 0000000000..1f484fb9b6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mcache.go
@@ -0,0 +1,329 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// Per-thread (in Go, per-P) cache for small objects.
+// This includes a small object cache and local allocation stats.
+// No locking needed because it is per-thread (per-P).
+//
+// mcaches are allocated from non-GC'd memory, so any heap pointers
+// must be specially handled.
+//
+//go:notinheap
+type mcache struct {
+ // The following members are accessed on every malloc,
+ // so they are grouped here for better caching.
+ nextSample uintptr // trigger heap sample after allocating this many bytes
+ scanAlloc uintptr // bytes of scannable heap allocated
+
+ // Allocator cache for tiny objects w/o pointers.
+ // See "Tiny allocator" comment in malloc.go.
+
+ // tiny points to the beginning of the current tiny block, or
+ // nil if there is no current tiny block.
+ //
+ // tiny is a heap pointer. Since mcache is in non-GC'd memory,
+ // we handle it by clearing it in releaseAll during mark
+ // termination.
+ //
+ // tinyAllocs is the number of tiny allocations performed
+ // by the P that owns this mcache.
+ tiny uintptr
+ tinyoffset uintptr
+ tinyAllocs uintptr
+
+ // The rest is not accessed on every malloc.
+
+ alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
+
+ stackcache [_NumStackOrders]stackfreelist
+
+ // flushGen indicates the sweepgen during which this mcache
+ // was last flushed. If flushGen != mheap_.sweepgen, the spans
+ // in this mcache are stale and need to the flushed so they
+ // can be swept. This is done in acquirep.
+ flushGen uint32
+}
+
+// A gclink is a node in a linked list of blocks, like mlink,
+// but it is opaque to the garbage collector.
+// The GC does not trace the pointers during collection,
+// and the compiler does not emit write barriers for assignments
+// of gclinkptr values. Code should store references to gclinks
+// as gclinkptr, not as *gclink.
+type gclink struct {
+ next gclinkptr
+}
+
+// A gclinkptr is a pointer to a gclink, but it is opaque
+// to the garbage collector.
+type gclinkptr uintptr
+
+// ptr returns the *gclink form of p.
+// The result should be used for accessing fields, not stored
+// in other data structures.
+func (p gclinkptr) ptr() *gclink {
+ return (*gclink)(unsafe.Pointer(p))
+}
+
+type stackfreelist struct {
+ list gclinkptr // linked list of free stacks
+ size uintptr // total size of stacks in list
+}
+
+// dummy mspan that contains no free objects.
+var emptymspan mspan
+
+func allocmcache() *mcache {
+ var c *mcache
+ systemstack(func() {
+ lock(&mheap_.lock)
+ c = (*mcache)(mheap_.cachealloc.alloc())
+ c.flushGen = mheap_.sweepgen
+ unlock(&mheap_.lock)
+ })
+ for i := range c.alloc {
+ c.alloc[i] = &emptymspan
+ }
+ c.nextSample = nextSample()
+ return c
+}
+
+// freemcache releases resources associated with this
+// mcache and puts the object onto a free list.
+//
+// In some cases there is no way to simply release
+// resources, such as statistics, so donate them to
+// a different mcache (the recipient).
+func freemcache(c *mcache) {
+ systemstack(func() {
+ c.releaseAll()
+ stackcache_clear(c)
+
+ // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
+ // with the stealing of gcworkbufs during garbage collection to avoid
+ // a race where the workbuf is double-freed.
+ // gcworkbuffree(c.gcworkbuf)
+
+ lock(&mheap_.lock)
+ mheap_.cachealloc.free(unsafe.Pointer(c))
+ unlock(&mheap_.lock)
+ })
+}
+
+// getMCache is a convenience function which tries to obtain an mcache.
+//
+// Returns nil if we're not bootstrapping or we don't have a P. The caller's
+// P must not change, so we must be in a non-preemptible state.
+func getMCache(mp *m) *mcache {
+ // Grab the mcache, since that's where stats live.
+ pp := mp.p.ptr()
+ var c *mcache
+ if pp == nil {
+ // We will be called without a P while bootstrapping,
+ // in which case we use mcache0, which is set in mallocinit.
+ // mcache0 is cleared when bootstrapping is complete,
+ // by procresize.
+ c = mcache0
+ } else {
+ c = pp.mcache
+ }
+ return c
+}
+
+// refill acquires a new span of span class spc for c. This span will
+// have at least one free object. The current span in c must be full.
+//
+// Must run in a non-preemptible context since otherwise the owner of
+// c could change.
+func (c *mcache) refill(spc spanClass) {
+ // Return the current cached span to the central lists.
+ s := c.alloc[spc]
+
+ if uintptr(s.allocCount) != s.nelems {
+ throw("refill of span with free space remaining")
+ }
+ if s != &emptymspan {
+ // Mark this span as no longer cached.
+ if s.sweepgen != mheap_.sweepgen+3 {
+ throw("bad sweepgen in refill")
+ }
+ mheap_.central[spc].mcentral.uncacheSpan(s)
+
+ // Count up how many slots were used and record it.
+ stats := memstats.heapStats.acquire()
+ slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
+ atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed)
+
+ // Flush tinyAllocs.
+ if spc == tinySpanClass {
+ atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
+ c.tinyAllocs = 0
+ }
+ memstats.heapStats.release()
+
+ // Count the allocs in inconsistent, internal stats.
+ bytesAllocated := slotsUsed * int64(s.elemsize)
+ gcController.totalAlloc.Add(bytesAllocated)
+
+ // Clear the second allocCount just to be safe.
+ s.allocCountBeforeCache = 0
+ }
+
+ // Get a new cached span from the central lists.
+ s = mheap_.central[spc].mcentral.cacheSpan()
+ if s == nil {
+ throw("out of memory")
+ }
+
+ if uintptr(s.allocCount) == s.nelems {
+ throw("span has no free space")
+ }
+
+ // Indicate that this span is cached and prevent asynchronous
+ // sweeping in the next sweep phase.
+ s.sweepgen = mheap_.sweepgen + 3
+
+ // Store the current alloc count for accounting later.
+ s.allocCountBeforeCache = s.allocCount
+
+ // Update heapLive and flush scanAlloc.
+ //
+ // We have not yet allocated anything new into the span, but we
+ // assume that all of its slots will get used, so this makes
+ // heapLive an overestimate.
+ //
+ // When the span gets uncached, we'll fix up this overestimate
+ // if necessary (see releaseAll).
+ //
+ // We pick an overestimate here because an underestimate leads
+ // the pacer to believe that it's in better shape than it is,
+ // which appears to lead to more memory used. See #53738 for
+ // more details.
+ usedBytes := uintptr(s.allocCount) * s.elemsize
+ gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
+ c.scanAlloc = 0
+
+ c.alloc[spc] = s
+}
+
+// allocLarge allocates a span for a large object.
+func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
+ if size+_PageSize < size {
+ throw("out of memory")
+ }
+ npages := size >> _PageShift
+ if size&_PageMask != 0 {
+ npages++
+ }
+
+ // Deduct credit for this span allocation and sweep if
+ // necessary. mHeap_Alloc will also sweep npages, so this only
+ // pays the debt down to npage pages.
+ deductSweepCredit(npages*_PageSize, npages)
+
+ spc := makeSpanClass(0, noscan)
+ s := mheap_.alloc(npages, spc)
+ if s == nil {
+ throw("out of memory")
+ }
+
+ // Count the alloc in consistent, external stats.
+ stats := memstats.heapStats.acquire()
+ atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
+ atomic.Xadd64(&stats.largeAllocCount, 1)
+ memstats.heapStats.release()
+
+ // Count the alloc in inconsistent, internal stats.
+ gcController.totalAlloc.Add(int64(npages * pageSize))
+
+ // Update heapLive.
+ gcController.update(int64(s.npages*pageSize), 0)
+
+ // Put the large span in the mcentral swept list so that it's
+ // visible to the background sweeper.
+ mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
+ s.limit = s.base() + size
+ heapBitsForAddr(s.base()).initSpan(s)
+ return s
+}
+
+func (c *mcache) releaseAll() {
+ // Take this opportunity to flush scanAlloc.
+ scanAlloc := int64(c.scanAlloc)
+ c.scanAlloc = 0
+
+ sg := mheap_.sweepgen
+ dHeapLive := int64(0)
+ for i := range c.alloc {
+ s := c.alloc[i]
+ if s != &emptymspan {
+ slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
+ s.allocCountBeforeCache = 0
+
+ // Adjust smallAllocCount for whatever was allocated.
+ stats := memstats.heapStats.acquire()
+ atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed)
+ memstats.heapStats.release()
+
+ // Adjust the actual allocs in inconsistent, internal stats.
+ // We assumed earlier that the full span gets allocated.
+ gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
+
+ if s.sweepgen != sg+1 {
+ // refill conservatively counted unallocated slots in gcController.heapLive.
+ // Undo this.
+ //
+ // If this span was cached before sweep, then gcController.heapLive was totally
+ // recomputed since caching this span, so we don't do this for stale spans.
+ dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
+ }
+
+ // Release the span to the mcentral.
+ mheap_.central[i].mcentral.uncacheSpan(s)
+ c.alloc[i] = &emptymspan
+ }
+ }
+ // Clear tinyalloc pool.
+ c.tiny = 0
+ c.tinyoffset = 0
+
+ // Flush tinyAllocs.
+ stats := memstats.heapStats.acquire()
+ atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
+ c.tinyAllocs = 0
+ memstats.heapStats.release()
+
+ // Update heapLive and heapScan.
+ gcController.update(dHeapLive, scanAlloc)
+}
+
+// prepareForSweep flushes c if the system has entered a new sweep phase
+// since c was populated. This must happen between the sweep phase
+// starting and the first allocation from c.
+func (c *mcache) prepareForSweep() {
+ // Alternatively, instead of making sure we do this on every P
+ // between starting the world and allocating on that P, we
+ // could leave allocate-black on, allow allocation to continue
+ // as usual, use a ragged barrier at the beginning of sweep to
+ // ensure all cached spans are swept, and then disable
+ // allocate-black. However, with this approach it's difficult
+ // to avoid spilling mark bits into the *next* GC cycle.
+ sg := mheap_.sweepgen
+ if c.flushGen == sg {
+ return
+ } else if c.flushGen != sg-2 {
+ println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg)
+ throw("bad flushGen")
+ }
+ c.releaseAll()
+ stackcache_clear(c)
+ atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart
+}
diff --git a/contrib/go/_std_1.18/src/runtime/mcentral.go b/contrib/go/_std_1.19/src/runtime/mcentral.go
index e4bdf35071..e4bdf35071 100644
--- a/contrib/go/_std_1.18/src/runtime/mcentral.go
+++ b/contrib/go/_std_1.19/src/runtime/mcentral.go
diff --git a/contrib/go/_std_1.18/src/runtime/mcheckmark.go b/contrib/go/_std_1.19/src/runtime/mcheckmark.go
index 1dd28585f1..1dd28585f1 100644
--- a/contrib/go/_std_1.18/src/runtime/mcheckmark.go
+++ b/contrib/go/_std_1.19/src/runtime/mcheckmark.go
diff --git a/contrib/go/_std_1.19/src/runtime/mem.go b/contrib/go/_std_1.19/src/runtime/mem.go
new file mode 100644
index 0000000000..0ca933b25b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mem.go
@@ -0,0 +1,143 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// OS memory management abstraction layer
+//
+// Regions of the address space managed by the runtime may be in one of four
+// states at any given time:
+// 1) None - Unreserved and unmapped, the default state of any region.
+// 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
+// Does not count against the process' memory footprint.
+// 3) Prepared - Reserved, intended not to be backed by physical memory (though
+// an OS may implement this lazily). Can transition efficiently to
+// Ready. Accessing memory in such a region is undefined (may
+// fault, may give back unexpected zeroes, etc.).
+// 4) Ready - may be accessed safely.
+//
+// This set of states is more than is strictly necessary to support all the
+// currently supported platforms. One could get by with just None, Reserved, and
+// Ready. However, the Prepared state gives us flexibility for performance
+// purposes. For example, on POSIX-y operating systems, Reserved is usually a
+// private anonymous mmap'd region with PROT_NONE set, and to transition
+// to Ready would require setting PROT_READ|PROT_WRITE. However the
+// underspecification of Prepared lets us use just MADV_FREE to transition from
+// Ready to Prepared. Thus with the Prepared state we can set the permission
+// bits just once early on, we can efficiently tell the OS that it's free to
+// take pages away from us when we don't strictly need them.
+//
+// This file defines a cross-OS interface for a common set of helpers
+// that transition memory regions between these states. The helpers call into
+// OS-specific implementations that handle errors, while the interface boundary
+// implements cross-OS functionality, like updating runtime accounting.
+
+// sysAlloc transitions an OS-chosen region of memory from None to Ready.
+// More specifically, it obtains a large chunk of zeroed memory from the
+// operating system, typically on the order of a hundred kilobytes
+// or a megabyte. This memory is always immediately available for use.
+//
+// sysStat must be non-nil.
+//
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//
+//go:nosplit
+func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {
+ sysStat.add(int64(n))
+ gcController.mappedReady.Add(int64(n))
+ return sysAllocOS(n)
+}
+
+// sysUnused transitions a memory region from Ready to Prepared. It notifies the
+// operating system that the physical pages backing this memory region are no
+// longer needed and can be reused for other purposes. The contents of a
+// sysUnused memory region are considered forfeit and the region must not be
+// accessed again until sysUsed is called.
+func sysUnused(v unsafe.Pointer, n uintptr) {
+ gcController.mappedReady.Add(-int64(n))
+ sysUnusedOS(v, n)
+}
+
+// sysUsed transitions a memory region from Prepared to Ready. It notifies the
+// operating system that the memory region is needed and ensures that the region
+// may be safely accessed. This is typically a no-op on systems that don't have
+// an explicit commit step and hard over-commit limits, but is critical on
+// Windows, for example.
+//
+// This operation is idempotent for memory already in the Prepared state, so
+// it is safe to refer, with v and n, to a range of memory that includes both
+// Prepared and Ready memory. However, the caller must provide the exact amount
+// of Prepared memory for accounting purposes.
+func sysUsed(v unsafe.Pointer, n, prepared uintptr) {
+ gcController.mappedReady.Add(int64(prepared))
+ sysUsedOS(v, n)
+}
+
+// sysHugePage does not transition memory regions, but instead provides a
+// hint to the OS that it would be more efficient to back this memory region
+// with pages of a larger size transparently.
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+ sysHugePageOS(v, n)
+}
+
+// sysFree transitions a memory region from any state to None. Therefore, it
+// returns memory unconditionally. It is used if an out-of-memory error has been
+// detected midway through an allocation or to carve out an aligned section of
+// the address space. It is okay if sysFree is a no-op only if sysReserve always
+// returns a memory region aligned to the heap allocator's alignment
+// restrictions.
+//
+// sysStat must be non-nil.
+//
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//
+//go:nosplit
+func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(-int64(n))
+ gcController.mappedReady.Add(-int64(n))
+ sysFreeOS(v, n)
+}
+
+// sysFault transitions a memory region from Ready to Reserved. It
+// marks a region such that it will always fault if accessed. Used only for
+// debugging the runtime.
+//
+// TODO(mknyszek): Currently it's true that all uses of sysFault transition
+// memory from Ready to Reserved, but this may not be true in the future
+// since on every platform the operation is much more general than that.
+// If a transition from Prepared is ever introduced, create a new function
+// that elides the Ready state accounting.
+func sysFault(v unsafe.Pointer, n uintptr) {
+ gcController.mappedReady.Add(-int64(n))
+ sysFaultOS(v, n)
+}
+
+// sysReserve transitions a memory region from None to Reserved. It reserves
+// address space in such a way that it would cause a fatal fault upon access
+// (either via permissions or not committing the memory). Such a reservation is
+// thus never backed by physical memory.
+//
+// If the pointer passed to it is non-nil, the caller wants the
+// reservation there, but sysReserve can still choose another
+// location if that one is unavailable.
+//
+// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by sysReserve.
+func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+ return sysReserveOS(v, n)
+}
+
+// sysMap transitions a memory region from Reserved to Prepared. It ensures the
+// memory region can be efficiently transitioned to Ready.
+//
+// sysStat must be non-nil.
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
+ sysStat.add(int64(n))
+ sysMapOS(v, n)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mem_darwin.go b/contrib/go/_std_1.19/src/runtime/mem_darwin.go
new file mode 100644
index 0000000000..25862cf161
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mem_darwin.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//
+//go:nosplit
+func sysAllocOS(n uintptr) unsafe.Pointer {
+ v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if err != 0 {
+ return nil
+ }
+ return v
+}
+
+func sysUnusedOS(v unsafe.Pointer, n uintptr) {
+ // MADV_FREE_REUSABLE is like MADV_FREE except it also propagates
+ // accounting information about the process to task_info.
+ madvise(v, n, _MADV_FREE_REUSABLE)
+}
+
+func sysUsedOS(v unsafe.Pointer, n uintptr) {
+ // MADV_FREE_REUSE is necessary to keep the kernel's accounting
+ // accurate. If called on any memory region that hasn't been
+ // MADV_FREE_REUSABLE'd, it's a no-op.
+ madvise(v, n, _MADV_FREE_REUSE)
+}
+
+func sysHugePageOS(v unsafe.Pointer, n uintptr) {
+}
+
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//
+//go:nosplit
+func sysFreeOS(v unsafe.Pointer, n uintptr) {
+ munmap(v, n)
+}
+
+func sysFaultOS(v unsafe.Pointer, n uintptr) {
+ mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
+}
+
+func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+ p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if err != 0 {
+ return nil
+ }
+ return p
+}
+
+const _ENOMEM = 12
+
+func sysMapOS(v unsafe.Pointer, n uintptr) {
+ p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if err == _ENOMEM {
+ throw("runtime: out of memory")
+ }
+ if p != v || err != 0 {
+ print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
+ throw("runtime: cannot map pages in arena address space")
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mem_linux.go b/contrib/go/_std_1.19/src/runtime/mem_linux.go
new file mode 100644
index 0000000000..1630664cff
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mem_linux.go
@@ -0,0 +1,193 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+const (
+ _EACCES = 13
+ _EINVAL = 22
+)
+
+// Don't split the stack as this method may be invoked without a valid G, which
+// prevents us from allocating more stack.
+//
+//go:nosplit
+func sysAllocOS(n uintptr) unsafe.Pointer {
+ p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if err != 0 {
+ if err == _EACCES {
+ print("runtime: mmap: access denied\n")
+ exit(2)
+ }
+ if err == _EAGAIN {
+ print("runtime: mmap: too much locked memory (check 'ulimit -l').\n")
+ exit(2)
+ }
+ return nil
+ }
+ return p
+}
+
+var adviseUnused = uint32(_MADV_FREE)
+
+func sysUnusedOS(v unsafe.Pointer, n uintptr) {
+ // By default, Linux's "transparent huge page" support will
+ // merge pages into a huge page if there's even a single
+ // present regular page, undoing the effects of madvise(adviseUnused)
+ // below. On amd64, that means khugepaged can turn a single
+ // 4KB page to 2MB, bloating the process's RSS by as much as
+ // 512X. (See issue #8832 and Linux kernel bug
+ // https://bugzilla.kernel.org/show_bug.cgi?id=93111)
+ //
+ // To work around this, we explicitly disable transparent huge
+ // pages when we release pages of the heap. However, we have
+ // to do this carefully because changing this flag tends to
+ // split the VMA (memory mapping) containing v in to three
+ // VMAs in order to track the different values of the
+ // MADV_NOHUGEPAGE flag in the different regions. There's a
+ // default limit of 65530 VMAs per address space (sysctl
+ // vm.max_map_count), so we must be careful not to create too
+ // many VMAs (see issue #12233).
+ //
+ // Since huge pages are huge, there's little use in adjusting
+ // the MADV_NOHUGEPAGE flag on a fine granularity, so we avoid
+ // exploding the number of VMAs by only adjusting the
+ // MADV_NOHUGEPAGE flag on a large granularity. This still
+ // gets most of the benefit of huge pages while keeping the
+ // number of VMAs under control. With hugePageSize = 2MB, even
+ // a pessimal heap can reach 128GB before running out of VMAs.
+ if physHugePageSize != 0 {
+ // If it's a large allocation, we want to leave huge
+ // pages enabled. Hence, we only adjust the huge page
+ // flag on the huge pages containing v and v+n-1, and
+ // only if those aren't aligned.
+ var head, tail uintptr
+ if uintptr(v)&(physHugePageSize-1) != 0 {
+ // Compute huge page containing v.
+ head = alignDown(uintptr(v), physHugePageSize)
+ }
+ if (uintptr(v)+n)&(physHugePageSize-1) != 0 {
+ // Compute huge page containing v+n-1.
+ tail = alignDown(uintptr(v)+n-1, physHugePageSize)
+ }
+
+ // Note that madvise will return EINVAL if the flag is
+ // already set, which is quite likely. We ignore
+ // errors.
+ if head != 0 && head+physHugePageSize == tail {
+ // head and tail are different but adjacent,
+ // so do this in one call.
+ madvise(unsafe.Pointer(head), 2*physHugePageSize, _MADV_NOHUGEPAGE)
+ } else {
+ // Advise the huge pages containing v and v+n-1.
+ if head != 0 {
+ madvise(unsafe.Pointer(head), physHugePageSize, _MADV_NOHUGEPAGE)
+ }
+ if tail != 0 && tail != head {
+ madvise(unsafe.Pointer(tail), physHugePageSize, _MADV_NOHUGEPAGE)
+ }
+ }
+ }
+
+ if uintptr(v)&(physPageSize-1) != 0 || n&(physPageSize-1) != 0 {
+ // madvise will round this to any physical page
+ // *covered* by this range, so an unaligned madvise
+ // will release more memory than intended.
+ throw("unaligned sysUnused")
+ }
+
+ var advise uint32
+ if debug.madvdontneed != 0 {
+ advise = _MADV_DONTNEED
+ } else {
+ advise = atomic.Load(&adviseUnused)
+ }
+ if errno := madvise(v, n, int32(advise)); advise == _MADV_FREE && errno != 0 {
+ // MADV_FREE was added in Linux 4.5. Fall back to MADV_DONTNEED if it is
+ // not supported.
+ atomic.Store(&adviseUnused, _MADV_DONTNEED)
+ madvise(v, n, _MADV_DONTNEED)
+ }
+
+ if debug.harddecommit > 0 {
+ p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if p != v || err != 0 {
+ throw("runtime: cannot disable permissions in address space")
+ }
+ }
+}
+
+func sysUsedOS(v unsafe.Pointer, n uintptr) {
+ if debug.harddecommit > 0 {
+ p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if err == _ENOMEM {
+ throw("runtime: out of memory")
+ }
+ if p != v || err != 0 {
+ throw("runtime: cannot remap pages in address space")
+ }
+ return
+
+ // Don't do the sysHugePage optimization in hard decommit mode.
+ // We're breaking up pages everywhere, there's no point.
+ }
+ // Partially undo the NOHUGEPAGE marks from sysUnused
+ // for whole huge pages between v and v+n. This may
+ // leave huge pages off at the end points v and v+n
+ // even though allocations may cover these entire huge
+ // pages. We could detect this and undo NOHUGEPAGE on
+ // the end points as well, but it's probably not worth
+ // the cost because when neighboring allocations are
+ // freed sysUnused will just set NOHUGEPAGE again.
+ sysHugePageOS(v, n)
+}
+
+func sysHugePageOS(v unsafe.Pointer, n uintptr) {
+ if physHugePageSize != 0 {
+ // Round v up to a huge page boundary.
+ beg := alignUp(uintptr(v), physHugePageSize)
+ // Round v+n down to a huge page boundary.
+ end := alignDown(uintptr(v)+n, physHugePageSize)
+
+ if beg < end {
+ madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
+ }
+ }
+}
+
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//
+//go:nosplit
+func sysFreeOS(v unsafe.Pointer, n uintptr) {
+ munmap(v, n)
+}
+
+func sysFaultOS(v unsafe.Pointer, n uintptr) {
+ mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
+}
+
+func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
+ p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if err != 0 {
+ return nil
+ }
+ return p
+}
+
+func sysMapOS(v unsafe.Pointer, n uintptr) {
+ p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if err == _ENOMEM {
+ throw("runtime: out of memory")
+ }
+ if p != v || err != 0 {
+ print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
+ throw("runtime: cannot map pages in arena address space")
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/memclr_amd64.s b/contrib/go/_std_1.19/src/runtime/memclr_amd64.s
new file mode 100644
index 0000000000..19bfa6f20d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/memclr_amd64.s
@@ -0,0 +1,218 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9
+
+#include "go_asm.h"
+#include "textflag.h"
+#include "asm_amd64.h"
+
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
+// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+// ABIInternal for performance.
+TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB), NOSPLIT, $0-16
+ // AX = ptr
+ // BX = n
+ MOVQ AX, DI // DI = ptr
+ XORQ AX, AX
+
+ // MOVOU seems always faster than REP STOSQ when Enhanced REP STOSQ is not available.
+tail:
+ // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
+ TESTQ BX, BX
+ JEQ _0
+ CMPQ BX, $2
+ JBE _1or2
+ CMPQ BX, $4
+ JBE _3or4
+ CMPQ BX, $8
+ JB _5through7
+ JE _8
+ CMPQ BX, $16
+ JBE _9through16
+ CMPQ BX, $32
+ JBE _17through32
+ CMPQ BX, $64
+ JBE _33through64
+ CMPQ BX, $128
+ JBE _65through128
+ CMPQ BX, $256
+ JBE _129through256
+
+ CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
+ JNE skip_erms
+
+ // If the size is less than 2kb, do not use ERMS as it has a big start-up cost.
+ // Table 3-4. Relative Performance of Memcpy() Using ERMSB Vs. 128-bit AVX
+ // in the Intel Optimization Guide shows better performance for ERMSB starting
+ // from 2KB. Benchmarks show the similar threshold for REP STOS vs AVX.
+ CMPQ BX, $2048
+ JAE loop_preheader_erms
+
+skip_erms:
+#ifndef hasAVX2
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JE loop_preheader_avx2
+ // TODO: for really big clears, use MOVNTDQ, even without AVX2.
+
+loop:
+ MOVOU X15, 0(DI)
+ MOVOU X15, 16(DI)
+ MOVOU X15, 32(DI)
+ MOVOU X15, 48(DI)
+ MOVOU X15, 64(DI)
+ MOVOU X15, 80(DI)
+ MOVOU X15, 96(DI)
+ MOVOU X15, 112(DI)
+ MOVOU X15, 128(DI)
+ MOVOU X15, 144(DI)
+ MOVOU X15, 160(DI)
+ MOVOU X15, 176(DI)
+ MOVOU X15, 192(DI)
+ MOVOU X15, 208(DI)
+ MOVOU X15, 224(DI)
+ MOVOU X15, 240(DI)
+ SUBQ $256, BX
+ ADDQ $256, DI
+ CMPQ BX, $256
+ JAE loop
+ JMP tail
+#endif
+
+loop_preheader_avx2:
+ VPXOR X0, X0, X0
+ // For smaller sizes MOVNTDQ may be faster or slower depending on hardware.
+ // For larger sizes it is always faster, even on dual Xeons with 30M cache.
+ // TODO take into account actual LLC size. E. g. glibc uses LLC size/2.
+ CMPQ BX, $0x2000000
+ JAE loop_preheader_avx2_huge
+
+loop_avx2:
+ VMOVDQU Y0, 0(DI)
+ VMOVDQU Y0, 32(DI)
+ VMOVDQU Y0, 64(DI)
+ VMOVDQU Y0, 96(DI)
+ SUBQ $128, BX
+ ADDQ $128, DI
+ CMPQ BX, $128
+ JAE loop_avx2
+ VMOVDQU Y0, -32(DI)(BX*1)
+ VMOVDQU Y0, -64(DI)(BX*1)
+ VMOVDQU Y0, -96(DI)(BX*1)
+ VMOVDQU Y0, -128(DI)(BX*1)
+ VZEROUPPER
+ RET
+
+loop_preheader_erms:
+#ifndef hasAVX2
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JNE loop_erms
+#endif
+
+ VPXOR X0, X0, X0
+ // At this point both ERMS and AVX2 is supported. While REP STOS can use a no-RFO
+ // write protocol, ERMS could show the same or slower performance comparing to
+ // Non-Temporal Stores when the size is bigger than LLC depending on hardware.
+ CMPQ BX, $0x2000000
+ JAE loop_preheader_avx2_huge
+
+loop_erms:
+ // STOSQ is used to guarantee that the whole zeroed pointer-sized word is visible
+ // for a memory subsystem as the GC requires this.
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+ REP; STOSQ
+ JMP tail
+
+loop_preheader_avx2_huge:
+ // Align to 32 byte boundary
+ VMOVDQU Y0, 0(DI)
+ MOVQ DI, SI
+ ADDQ $32, DI
+ ANDQ $~31, DI
+ SUBQ DI, SI
+ ADDQ SI, BX
+loop_avx2_huge:
+ VMOVNTDQ Y0, 0(DI)
+ VMOVNTDQ Y0, 32(DI)
+ VMOVNTDQ Y0, 64(DI)
+ VMOVNTDQ Y0, 96(DI)
+ SUBQ $128, BX
+ ADDQ $128, DI
+ CMPQ BX, $128
+ JAE loop_avx2_huge
+ // In the description of MOVNTDQ in [1]
+ // "... fencing operation implemented with the SFENCE or MFENCE instruction
+ // should be used in conjunction with MOVNTDQ instructions..."
+ // [1] 64-ia-32-architectures-software-developer-manual-325462.pdf
+ SFENCE
+ VMOVDQU Y0, -32(DI)(BX*1)
+ VMOVDQU Y0, -64(DI)(BX*1)
+ VMOVDQU Y0, -96(DI)(BX*1)
+ VMOVDQU Y0, -128(DI)(BX*1)
+ VZEROUPPER
+ RET
+
+_1or2:
+ MOVB AX, (DI)
+ MOVB AX, -1(DI)(BX*1)
+ RET
+_0:
+ RET
+_3or4:
+ MOVW AX, (DI)
+ MOVW AX, -2(DI)(BX*1)
+ RET
+_5through7:
+ MOVL AX, (DI)
+ MOVL AX, -4(DI)(BX*1)
+ RET
+_8:
+ // We need a separate case for 8 to make sure we clear pointers atomically.
+ MOVQ AX, (DI)
+ RET
+_9through16:
+ MOVQ AX, (DI)
+ MOVQ AX, -8(DI)(BX*1)
+ RET
+_17through32:
+ MOVOU X15, (DI)
+ MOVOU X15, -16(DI)(BX*1)
+ RET
+_33through64:
+ MOVOU X15, (DI)
+ MOVOU X15, 16(DI)
+ MOVOU X15, -32(DI)(BX*1)
+ MOVOU X15, -16(DI)(BX*1)
+ RET
+_65through128:
+ MOVOU X15, (DI)
+ MOVOU X15, 16(DI)
+ MOVOU X15, 32(DI)
+ MOVOU X15, 48(DI)
+ MOVOU X15, -64(DI)(BX*1)
+ MOVOU X15, -48(DI)(BX*1)
+ MOVOU X15, -32(DI)(BX*1)
+ MOVOU X15, -16(DI)(BX*1)
+ RET
+_129through256:
+ MOVOU X15, (DI)
+ MOVOU X15, 16(DI)
+ MOVOU X15, 32(DI)
+ MOVOU X15, 48(DI)
+ MOVOU X15, 64(DI)
+ MOVOU X15, 80(DI)
+ MOVOU X15, 96(DI)
+ MOVOU X15, 112(DI)
+ MOVOU X15, -128(DI)(BX*1)
+ MOVOU X15, -112(DI)(BX*1)
+ MOVOU X15, -96(DI)(BX*1)
+ MOVOU X15, -80(DI)(BX*1)
+ MOVOU X15, -64(DI)(BX*1)
+ MOVOU X15, -48(DI)(BX*1)
+ MOVOU X15, -32(DI)(BX*1)
+ MOVOU X15, -16(DI)(BX*1)
+ RET
diff --git a/contrib/go/_std_1.19/src/runtime/memmove_amd64.s b/contrib/go/_std_1.19/src/runtime/memmove_amd64.s
new file mode 100644
index 0000000000..018bb0b19d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/memmove_amd64.s
@@ -0,0 +1,532 @@
+// Derived from Inferno's libkern/memmove-386.s (adapted for amd64)
+// https://bitbucket.org/inferno-os/inferno-os/src/master/libkern/memmove-386.s
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
+// Portions Copyright 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !plan9
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// See memmove Go doc for important implementation constraints.
+
+// func memmove(to, from unsafe.Pointer, n uintptr)
+// ABIInternal for performance.
+TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT, $0-24
+ // AX = to
+ // BX = from
+ // CX = n
+ MOVQ AX, DI
+ MOVQ BX, SI
+ MOVQ CX, BX
+
+ // REP instructions have a high startup cost, so we handle small sizes
+ // with some straightline code. The REP MOVSQ instruction is really fast
+ // for large sizes. The cutover is approximately 2K.
+tail:
+ // move_129through256 or smaller work whether or not the source and the
+ // destination memory regions overlap because they load all data into
+ // registers before writing it back. move_256through2048 on the other
+ // hand can be used only when the memory regions don't overlap or the copy
+ // direction is forward.
+ //
+ // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
+ TESTQ BX, BX
+ JEQ move_0
+ CMPQ BX, $2
+ JBE move_1or2
+ CMPQ BX, $4
+ JB move_3
+ JBE move_4
+ CMPQ BX, $8
+ JB move_5through7
+ JE move_8
+ CMPQ BX, $16
+ JBE move_9through16
+ CMPQ BX, $32
+ JBE move_17through32
+ CMPQ BX, $64
+ JBE move_33through64
+ CMPQ BX, $128
+ JBE move_65through128
+ CMPQ BX, $256
+ JBE move_129through256
+
+ TESTB $1, runtime·useAVXmemmove(SB)
+ JNZ avxUnaligned
+
+/*
+ * check and set for backwards
+ */
+ CMPQ SI, DI
+ JLS back
+
+/*
+ * forward copy loop
+ */
+forward:
+ CMPQ BX, $2048
+ JLS move_256through2048
+
+ // If REP MOVSB isn't fast, don't use it
+ CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
+ JNE fwdBy8
+
+ // Check alignment
+ MOVL SI, AX
+ ORL DI, AX
+ TESTL $7, AX
+ JEQ fwdBy8
+
+ // Do 1 byte at a time
+ MOVQ BX, CX
+ REP; MOVSB
+ RET
+
+fwdBy8:
+ // Do 8 bytes at a time
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+ REP; MOVSQ
+ JMP tail
+
+back:
+/*
+ * check overlap
+ */
+ MOVQ SI, CX
+ ADDQ BX, CX
+ CMPQ CX, DI
+ JLS forward
+/*
+ * whole thing backwards has
+ * adjusted addresses
+ */
+ ADDQ BX, DI
+ ADDQ BX, SI
+ STD
+
+/*
+ * copy
+ */
+ MOVQ BX, CX
+ SHRQ $3, CX
+ ANDQ $7, BX
+
+ SUBQ $8, DI
+ SUBQ $8, SI
+ REP; MOVSQ
+
+ CLD
+ ADDQ $8, DI
+ ADDQ $8, SI
+ SUBQ BX, DI
+ SUBQ BX, SI
+ JMP tail
+
+move_1or2:
+ MOVB (SI), AX
+ MOVB -1(SI)(BX*1), CX
+ MOVB AX, (DI)
+ MOVB CX, -1(DI)(BX*1)
+ RET
+move_0:
+ RET
+move_4:
+ MOVL (SI), AX
+ MOVL AX, (DI)
+ RET
+move_3:
+ MOVW (SI), AX
+ MOVB 2(SI), CX
+ MOVW AX, (DI)
+ MOVB CX, 2(DI)
+ RET
+move_5through7:
+ MOVL (SI), AX
+ MOVL -4(SI)(BX*1), CX
+ MOVL AX, (DI)
+ MOVL CX, -4(DI)(BX*1)
+ RET
+move_8:
+ // We need a separate case for 8 to make sure we write pointers atomically.
+ MOVQ (SI), AX
+ MOVQ AX, (DI)
+ RET
+move_9through16:
+ MOVQ (SI), AX
+ MOVQ -8(SI)(BX*1), CX
+ MOVQ AX, (DI)
+ MOVQ CX, -8(DI)(BX*1)
+ RET
+move_17through32:
+ MOVOU (SI), X0
+ MOVOU -16(SI)(BX*1), X1
+ MOVOU X0, (DI)
+ MOVOU X1, -16(DI)(BX*1)
+ RET
+move_33through64:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(BX*1), X2
+ MOVOU -16(SI)(BX*1), X3
+ MOVOU X0, (DI)
+ MOVOU X1, 16(DI)
+ MOVOU X2, -32(DI)(BX*1)
+ MOVOU X3, -16(DI)(BX*1)
+ RET
+move_65through128:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU 32(SI), X2
+ MOVOU 48(SI), X3
+ MOVOU -64(SI)(BX*1), X4
+ MOVOU -48(SI)(BX*1), X5
+ MOVOU -32(SI)(BX*1), X6
+ MOVOU -16(SI)(BX*1), X7
+ MOVOU X0, (DI)
+ MOVOU X1, 16(DI)
+ MOVOU X2, 32(DI)
+ MOVOU X3, 48(DI)
+ MOVOU X4, -64(DI)(BX*1)
+ MOVOU X5, -48(DI)(BX*1)
+ MOVOU X6, -32(DI)(BX*1)
+ MOVOU X7, -16(DI)(BX*1)
+ RET
+move_129through256:
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU 32(SI), X2
+ MOVOU 48(SI), X3
+ MOVOU 64(SI), X4
+ MOVOU 80(SI), X5
+ MOVOU 96(SI), X6
+ MOVOU 112(SI), X7
+ MOVOU -128(SI)(BX*1), X8
+ MOVOU -112(SI)(BX*1), X9
+ MOVOU -96(SI)(BX*1), X10
+ MOVOU -80(SI)(BX*1), X11
+ MOVOU -64(SI)(BX*1), X12
+ MOVOU -48(SI)(BX*1), X13
+ MOVOU -32(SI)(BX*1), X14
+ MOVOU -16(SI)(BX*1), X15
+ MOVOU X0, (DI)
+ MOVOU X1, 16(DI)
+ MOVOU X2, 32(DI)
+ MOVOU X3, 48(DI)
+ MOVOU X4, 64(DI)
+ MOVOU X5, 80(DI)
+ MOVOU X6, 96(DI)
+ MOVOU X7, 112(DI)
+ MOVOU X8, -128(DI)(BX*1)
+ MOVOU X9, -112(DI)(BX*1)
+ MOVOU X10, -96(DI)(BX*1)
+ MOVOU X11, -80(DI)(BX*1)
+ MOVOU X12, -64(DI)(BX*1)
+ MOVOU X13, -48(DI)(BX*1)
+ MOVOU X14, -32(DI)(BX*1)
+ MOVOU X15, -16(DI)(BX*1)
+ // X15 must be zero on return
+ PXOR X15, X15
+ RET
+move_256through2048:
+ SUBQ $256, BX
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU 32(SI), X2
+ MOVOU 48(SI), X3
+ MOVOU 64(SI), X4
+ MOVOU 80(SI), X5
+ MOVOU 96(SI), X6
+ MOVOU 112(SI), X7
+ MOVOU 128(SI), X8
+ MOVOU 144(SI), X9
+ MOVOU 160(SI), X10
+ MOVOU 176(SI), X11
+ MOVOU 192(SI), X12
+ MOVOU 208(SI), X13
+ MOVOU 224(SI), X14
+ MOVOU 240(SI), X15
+ MOVOU X0, (DI)
+ MOVOU X1, 16(DI)
+ MOVOU X2, 32(DI)
+ MOVOU X3, 48(DI)
+ MOVOU X4, 64(DI)
+ MOVOU X5, 80(DI)
+ MOVOU X6, 96(DI)
+ MOVOU X7, 112(DI)
+ MOVOU X8, 128(DI)
+ MOVOU X9, 144(DI)
+ MOVOU X10, 160(DI)
+ MOVOU X11, 176(DI)
+ MOVOU X12, 192(DI)
+ MOVOU X13, 208(DI)
+ MOVOU X14, 224(DI)
+ MOVOU X15, 240(DI)
+ CMPQ BX, $256
+ LEAQ 256(SI), SI
+ LEAQ 256(DI), DI
+ JGE move_256through2048
+ // X15 must be zero on return
+ PXOR X15, X15
+ JMP tail
+
+avxUnaligned:
+ // There are two implementations of move algorithm.
+ // The first one for non-overlapped memory regions. It uses forward copying.
+ // The second one for overlapped regions. It uses backward copying
+ MOVQ DI, CX
+ SUBQ SI, CX
+ // Now CX contains distance between SRC and DEST
+ CMPQ CX, BX
+ // If the distance lesser than region length it means that regions are overlapped
+ JC copy_backward
+
+ // Non-temporal copy would be better for big sizes.
+ CMPQ BX, $0x100000
+ JAE gobble_big_data_fwd
+
+ // Memory layout on the source side
+ // SI CX
+ // |<---------BX before correction--------->|
+ // | |<--BX corrected-->| |
+ // | | |<--- AX --->|
+ // |<-R11->| |<-128 bytes->|
+ // +----------------------------------------+
+ // | Head | Body | Tail |
+ // +-------+------------------+-------------+
+ // ^ ^ ^
+ // | | |
+ // Save head into Y4 Save tail into X5..X12
+ // |
+ // SI+R11, where R11 = ((DI & -32) + 32) - DI
+ // Algorithm:
+ // 1. Unaligned save of the tail's 128 bytes
+ // 2. Unaligned save of the head's 32 bytes
+ // 3. Destination-aligned copying of body (128 bytes per iteration)
+ // 4. Put head on the new place
+ // 5. Put the tail on the new place
+ // It can be important to satisfy processor's pipeline requirements for
+ // small sizes as the cost of unaligned memory region copying is
+ // comparable with the cost of main loop. So code is slightly messed there.
+ // There is more clean implementation of that algorithm for bigger sizes
+ // where the cost of unaligned part copying is negligible.
+ // You can see it after gobble_big_data_fwd label.
+ LEAQ (SI)(BX*1), CX
+ MOVQ DI, R10
+ // CX points to the end of buffer so we need go back slightly. We will use negative offsets there.
+ MOVOU -0x80(CX), X5
+ MOVOU -0x70(CX), X6
+ MOVQ $0x80, AX
+ // Align destination address
+ ANDQ $-32, DI
+ ADDQ $32, DI
+ // Continue tail saving.
+ MOVOU -0x60(CX), X7
+ MOVOU -0x50(CX), X8
+ // Make R11 delta between aligned and unaligned destination addresses.
+ MOVQ DI, R11
+ SUBQ R10, R11
+ // Continue tail saving.
+ MOVOU -0x40(CX), X9
+ MOVOU -0x30(CX), X10
+ // Let's make bytes-to-copy value adjusted as we've prepared unaligned part for copying.
+ SUBQ R11, BX
+ // Continue tail saving.
+ MOVOU -0x20(CX), X11
+ MOVOU -0x10(CX), X12
+ // The tail will be put on its place after main body copying.
+ // It's time for the unaligned heading part.
+ VMOVDQU (SI), Y4
+ // Adjust source address to point past head.
+ ADDQ R11, SI
+ SUBQ AX, BX
+ // Aligned memory copying there
+gobble_128_loop:
+ VMOVDQU (SI), Y0
+ VMOVDQU 0x20(SI), Y1
+ VMOVDQU 0x40(SI), Y2
+ VMOVDQU 0x60(SI), Y3
+ ADDQ AX, SI
+ VMOVDQA Y0, (DI)
+ VMOVDQA Y1, 0x20(DI)
+ VMOVDQA Y2, 0x40(DI)
+ VMOVDQA Y3, 0x60(DI)
+ ADDQ AX, DI
+ SUBQ AX, BX
+ JA gobble_128_loop
+ // Now we can store unaligned parts.
+ ADDQ AX, BX
+ ADDQ DI, BX
+ VMOVDQU Y4, (R10)
+ VZEROUPPER
+ MOVOU X5, -0x80(BX)
+ MOVOU X6, -0x70(BX)
+ MOVOU X7, -0x60(BX)
+ MOVOU X8, -0x50(BX)
+ MOVOU X9, -0x40(BX)
+ MOVOU X10, -0x30(BX)
+ MOVOU X11, -0x20(BX)
+ MOVOU X12, -0x10(BX)
+ RET
+
+gobble_big_data_fwd:
+ // There is forward copying for big regions.
+ // It uses non-temporal mov instructions.
+ // Details of this algorithm are commented previously for small sizes.
+ LEAQ (SI)(BX*1), CX
+ MOVOU -0x80(SI)(BX*1), X5
+ MOVOU -0x70(CX), X6
+ MOVOU -0x60(CX), X7
+ MOVOU -0x50(CX), X8
+ MOVOU -0x40(CX), X9
+ MOVOU -0x30(CX), X10
+ MOVOU -0x20(CX), X11
+ MOVOU -0x10(CX), X12
+ VMOVDQU (SI), Y4
+ MOVQ DI, R8
+ ANDQ $-32, DI
+ ADDQ $32, DI
+ MOVQ DI, R10
+ SUBQ R8, R10
+ SUBQ R10, BX
+ ADDQ R10, SI
+ LEAQ (DI)(BX*1), CX
+ SUBQ $0x80, BX
+gobble_mem_fwd_loop:
+ PREFETCHNTA 0x1C0(SI)
+ PREFETCHNTA 0x280(SI)
+ // Prefetch values were chosen empirically.
+ // Approach for prefetch usage as in 9.5.6 of [1]
+ // [1] 64-ia-32-architectures-optimization-manual.pdf
+ // https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
+ VMOVDQU (SI), Y0
+ VMOVDQU 0x20(SI), Y1
+ VMOVDQU 0x40(SI), Y2
+ VMOVDQU 0x60(SI), Y3
+ ADDQ $0x80, SI
+ VMOVNTDQ Y0, (DI)
+ VMOVNTDQ Y1, 0x20(DI)
+ VMOVNTDQ Y2, 0x40(DI)
+ VMOVNTDQ Y3, 0x60(DI)
+ ADDQ $0x80, DI
+ SUBQ $0x80, BX
+ JA gobble_mem_fwd_loop
+ // NT instructions don't follow the normal cache-coherency rules.
+ // We need SFENCE there to make copied data available timely.
+ SFENCE
+ VMOVDQU Y4, (R8)
+ VZEROUPPER
+ MOVOU X5, -0x80(CX)
+ MOVOU X6, -0x70(CX)
+ MOVOU X7, -0x60(CX)
+ MOVOU X8, -0x50(CX)
+ MOVOU X9, -0x40(CX)
+ MOVOU X10, -0x30(CX)
+ MOVOU X11, -0x20(CX)
+ MOVOU X12, -0x10(CX)
+ RET
+
+copy_backward:
+ MOVQ DI, AX
+ // Backward copying is about the same as the forward one.
+ // Firstly we load unaligned tail in the beginning of region.
+ MOVOU (SI), X5
+ MOVOU 0x10(SI), X6
+ ADDQ BX, DI
+ MOVOU 0x20(SI), X7
+ MOVOU 0x30(SI), X8
+ LEAQ -0x20(DI), R10
+ MOVQ DI, R11
+ MOVOU 0x40(SI), X9
+ MOVOU 0x50(SI), X10
+ ANDQ $0x1F, R11
+ MOVOU 0x60(SI), X11
+ MOVOU 0x70(SI), X12
+ XORQ R11, DI
+ // Let's point SI to the end of region
+ ADDQ BX, SI
+ // and load unaligned head into X4.
+ VMOVDQU -0x20(SI), Y4
+ SUBQ R11, SI
+ SUBQ R11, BX
+ // If there is enough data for non-temporal moves go to special loop
+ CMPQ BX, $0x100000
+ JA gobble_big_data_bwd
+ SUBQ $0x80, BX
+gobble_mem_bwd_loop:
+ VMOVDQU -0x20(SI), Y0
+ VMOVDQU -0x40(SI), Y1
+ VMOVDQU -0x60(SI), Y2
+ VMOVDQU -0x80(SI), Y3
+ SUBQ $0x80, SI
+ VMOVDQA Y0, -0x20(DI)
+ VMOVDQA Y1, -0x40(DI)
+ VMOVDQA Y2, -0x60(DI)
+ VMOVDQA Y3, -0x80(DI)
+ SUBQ $0x80, DI
+ SUBQ $0x80, BX
+ JA gobble_mem_bwd_loop
+ // Let's store unaligned data
+ VMOVDQU Y4, (R10)
+ VZEROUPPER
+ MOVOU X5, (AX)
+ MOVOU X6, 0x10(AX)
+ MOVOU X7, 0x20(AX)
+ MOVOU X8, 0x30(AX)
+ MOVOU X9, 0x40(AX)
+ MOVOU X10, 0x50(AX)
+ MOVOU X11, 0x60(AX)
+ MOVOU X12, 0x70(AX)
+ RET
+
+gobble_big_data_bwd:
+ SUBQ $0x80, BX
+gobble_big_mem_bwd_loop:
+ PREFETCHNTA -0x1C0(SI)
+ PREFETCHNTA -0x280(SI)
+ VMOVDQU -0x20(SI), Y0
+ VMOVDQU -0x40(SI), Y1
+ VMOVDQU -0x60(SI), Y2
+ VMOVDQU -0x80(SI), Y3
+ SUBQ $0x80, SI
+ VMOVNTDQ Y0, -0x20(DI)
+ VMOVNTDQ Y1, -0x40(DI)
+ VMOVNTDQ Y2, -0x60(DI)
+ VMOVNTDQ Y3, -0x80(DI)
+ SUBQ $0x80, DI
+ SUBQ $0x80, BX
+ JA gobble_big_mem_bwd_loop
+ SFENCE
+ VMOVDQU Y4, (R10)
+ VZEROUPPER
+ MOVOU X5, (AX)
+ MOVOU X6, 0x10(AX)
+ MOVOU X7, 0x20(AX)
+ MOVOU X8, 0x30(AX)
+ MOVOU X9, 0x40(AX)
+ MOVOU X10, 0x50(AX)
+ MOVOU X11, 0x60(AX)
+ MOVOU X12, 0x70(AX)
+ RET
diff --git a/contrib/go/_std_1.19/src/runtime/metrics.go b/contrib/go/_std_1.19/src/runtime/metrics.go
new file mode 100644
index 0000000000..986121b9c2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/metrics.go
@@ -0,0 +1,618 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Metrics implementation exported to runtime/metrics.
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+var (
+ // metrics is a map of runtime/metrics keys to data used by the runtime
+ // to sample each metric's value. metricsInit indicates it has been
+ // initialized.
+ //
+ // These fields are protected by metricsSema which should be
+ // locked/unlocked with metricsLock() / metricsUnlock().
+ metricsSema uint32 = 1
+ metricsInit bool
+ metrics map[string]metricData
+
+ sizeClassBuckets []float64
+ timeHistBuckets []float64
+)
+
+type metricData struct {
+ // deps is the set of runtime statistics that this metric
+ // depends on. Before compute is called, the statAggregate
+ // which will be passed must ensure() these dependencies.
+ deps statDepSet
+
+ // compute is a function that populates a metricValue
+ // given a populated statAggregate structure.
+ compute func(in *statAggregate, out *metricValue)
+}
+
+func metricsLock() {
+ // Acquire the metricsSema but with handoff. Operations are typically
+ // expensive enough that queueing up goroutines and handing off between
+ // them will be noticeably better-behaved.
+ semacquire1(&metricsSema, true, 0, 0)
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&metricsSema))
+ }
+}
+
+func metricsUnlock() {
+ if raceenabled {
+ racerelease(unsafe.Pointer(&metricsSema))
+ }
+ semrelease(&metricsSema)
+}
+
+// initMetrics initializes the metrics map if it hasn't been yet.
+//
+// metricsSema must be held.
+func initMetrics() {
+ if metricsInit {
+ return
+ }
+
+ sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
+ // Skip size class 0 which is a stand-in for large objects, but large
+ // objects are tracked separately (and they actually get placed in
+ // the last bucket, not the first).
+ sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
+ for i := 1; i < _NumSizeClasses; i++ {
+ // Size classes have an inclusive upper-bound
+ // and exclusive lower bound (e.g. 48-byte size class is
+ // (32, 48]) whereas we want and inclusive lower-bound
+ // and exclusive upper-bound (e.g. 48-byte size class is
+ // [33, 49). We can achieve this by shifting all bucket
+ // boundaries up by 1.
+ //
+ // Also, a float64 can precisely represent integers with
+ // value up to 2^53 and size classes are relatively small
+ // (nowhere near 2^48 even) so this will give us exact
+ // boundaries.
+ sizeClassBuckets[i] = float64(class_to_size[i] + 1)
+ }
+ sizeClassBuckets = append(sizeClassBuckets, float64Inf())
+
+ timeHistBuckets = timeHistogramMetricsBuckets()
+ metrics = map[string]metricData{
+ "/cgo/go-to-c-calls:calls": {
+ compute: func(_ *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(NumCgoCall())
+ },
+ },
+ "/gc/cycles/automatic:gc-cycles": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
+ },
+ },
+ "/gc/cycles/forced:gc-cycles": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.gcCyclesForced
+ },
+ },
+ "/gc/cycles/total:gc-cycles": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.gcCyclesDone
+ },
+ },
+ "/gc/heap/allocs-by-size:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ hist := out.float64HistOrInit(sizeClassBuckets)
+ hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
+ // Cut off the first index which is ostensibly for size class 0,
+ // but large objects are tracked separately so it's actually unused.
+ for i, count := range in.heapStats.smallAllocCount[1:] {
+ hist.counts[i] = uint64(count)
+ }
+ },
+ },
+ "/gc/heap/allocs:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.heapStats.totalAllocated
+ },
+ },
+ "/gc/heap/allocs:objects": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.heapStats.totalAllocs
+ },
+ },
+ "/gc/heap/frees-by-size:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ hist := out.float64HistOrInit(sizeClassBuckets)
+ hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
+ // Cut off the first index which is ostensibly for size class 0,
+ // but large objects are tracked separately so it's actually unused.
+ for i, count := range in.heapStats.smallFreeCount[1:] {
+ hist.counts[i] = uint64(count)
+ }
+ },
+ },
+ "/gc/heap/frees:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.heapStats.totalFreed
+ },
+ },
+ "/gc/heap/frees:objects": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.heapStats.totalFrees
+ },
+ },
+ "/gc/heap/goal:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.heapGoal
+ },
+ },
+ "/gc/heap/objects:objects": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.heapStats.numObjects
+ },
+ },
+ "/gc/heap/tiny/allocs:objects": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.tinyAllocCount)
+ },
+ },
+ "/gc/limiter/last-enabled:gc-cycle": {
+ compute: func(_ *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
+ },
+ },
+ "/gc/pauses:seconds": {
+ compute: func(_ *statAggregate, out *metricValue) {
+ hist := out.float64HistOrInit(timeHistBuckets)
+ // The bottom-most bucket, containing negative values, is tracked
+ // as a separately as underflow, so fill that in manually and then
+ // iterate over the rest.
+ hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
+ for i := range memstats.gcPauseDist.counts {
+ hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
+ }
+ },
+ },
+ "/gc/stack/starting-size:bytes": {
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(startingStackSize)
+ },
+ },
+ "/memory/classes/heap/free:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
+ in.heapStats.inStacks - in.heapStats.inWorkBufs -
+ in.heapStats.inPtrScalarBits)
+ },
+ },
+ "/memory/classes/heap/objects:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.heapStats.inObjects
+ },
+ },
+ "/memory/classes/heap/released:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.released)
+ },
+ },
+ "/memory/classes/heap/stacks:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.inStacks)
+ },
+ },
+ "/memory/classes/heap/unused:bytes": {
+ deps: makeStatDepSet(heapStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
+ },
+ },
+ "/memory/classes/metadata/mcache/free:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
+ },
+ },
+ "/memory/classes/metadata/mcache/inuse:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.mCacheInUse
+ },
+ },
+ "/memory/classes/metadata/mspan/free:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
+ },
+ },
+ "/memory/classes/metadata/mspan/inuse:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.mSpanInUse
+ },
+ },
+ "/memory/classes/metadata/other:bytes": {
+ deps: makeStatDepSet(heapStatsDep, sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
+ },
+ },
+ "/memory/classes/os-stacks:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.stacksSys
+ },
+ },
+ "/memory/classes/other:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.otherSys
+ },
+ },
+ "/memory/classes/profiling/buckets:bytes": {
+ deps: makeStatDepSet(sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.sysStats.buckHashSys
+ },
+ },
+ "/memory/classes/total:bytes": {
+ deps: makeStatDepSet(heapStatsDep, sysStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
+ in.sysStats.stacksSys + in.sysStats.mSpanSys +
+ in.sysStats.mCacheSys + in.sysStats.buckHashSys +
+ in.sysStats.gcMiscSys + in.sysStats.otherSys
+ },
+ },
+ "/sched/gomaxprocs:threads": {
+ compute: func(_ *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(gomaxprocs)
+ },
+ },
+ "/sched/goroutines:goroutines": {
+ compute: func(_ *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = uint64(gcount())
+ },
+ },
+ "/sched/latencies:seconds": {
+ compute: func(_ *statAggregate, out *metricValue) {
+ hist := out.float64HistOrInit(timeHistBuckets)
+ hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
+ for i := range sched.timeToRun.counts {
+ hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
+ }
+ },
+ },
+ }
+ metricsInit = true
+}
+
+// statDep is a dependency on a group of statistics
+// that a metric might have.
+type statDep uint
+
+const (
+ heapStatsDep statDep = iota // corresponds to heapStatsAggregate
+ sysStatsDep // corresponds to sysStatsAggregate
+ numStatsDeps
+)
+
+// statDepSet represents a set of statDeps.
+//
+// Under the hood, it's a bitmap.
+type statDepSet [1]uint64
+
+// makeStatDepSet creates a new statDepSet from a list of statDeps.
+func makeStatDepSet(deps ...statDep) statDepSet {
+ var s statDepSet
+ for _, d := range deps {
+ s[d/64] |= 1 << (d % 64)
+ }
+ return s
+}
+
+// differennce returns set difference of s from b as a new set.
+func (s statDepSet) difference(b statDepSet) statDepSet {
+ var c statDepSet
+ for i := range s {
+ c[i] = s[i] &^ b[i]
+ }
+ return c
+}
+
+// union returns the union of the two sets as a new set.
+func (s statDepSet) union(b statDepSet) statDepSet {
+ var c statDepSet
+ for i := range s {
+ c[i] = s[i] | b[i]
+ }
+ return c
+}
+
+// empty returns true if there are no dependencies in the set.
+func (s *statDepSet) empty() bool {
+ for _, c := range s {
+ if c != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// has returns true if the set contains a given statDep.
+func (s *statDepSet) has(d statDep) bool {
+ return s[d/64]&(1<<(d%64)) != 0
+}
+
+// heapStatsAggregate represents memory stats obtained from the
+// runtime. This set of stats is grouped together because they
+// depend on each other in some way to make sense of the runtime's
+// current heap memory use. They're also sharded across Ps, so it
+// makes sense to grab them all at once.
+type heapStatsAggregate struct {
+ heapStatsDelta
+
+ // Derived from values in heapStatsDelta.
+
+ // inObjects is the bytes of memory occupied by objects,
+ inObjects uint64
+
+ // numObjects is the number of live objects in the heap.
+ numObjects uint64
+
+ // totalAllocated is the total bytes of heap objects allocated
+ // over the lifetime of the program.
+ totalAllocated uint64
+
+ // totalFreed is the total bytes of heap objects freed
+ // over the lifetime of the program.
+ totalFreed uint64
+
+ // totalAllocs is the number of heap objects allocated over
+ // the lifetime of the program.
+ totalAllocs uint64
+
+ // totalFrees is the number of heap objects freed over
+ // the lifetime of the program.
+ totalFrees uint64
+}
+
+// compute populates the heapStatsAggregate with values from the runtime.
+func (a *heapStatsAggregate) compute() {
+ memstats.heapStats.read(&a.heapStatsDelta)
+
+ // Calculate derived stats.
+ a.totalAllocs = a.largeAllocCount
+ a.totalFrees = a.largeFreeCount
+ a.totalAllocated = a.largeAlloc
+ a.totalFreed = a.largeFree
+ for i := range a.smallAllocCount {
+ na := a.smallAllocCount[i]
+ nf := a.smallFreeCount[i]
+ a.totalAllocs += na
+ a.totalFrees += nf
+ a.totalAllocated += na * uint64(class_to_size[i])
+ a.totalFreed += nf * uint64(class_to_size[i])
+ }
+ a.inObjects = a.totalAllocated - a.totalFreed
+ a.numObjects = a.totalAllocs - a.totalFrees
+}
+
+// sysStatsAggregate represents system memory stats obtained
+// from the runtime. This set of stats is grouped together because
+// they're all relatively cheap to acquire and generally independent
+// of one another and other runtime memory stats. The fact that they
+// may be acquired at different times, especially with respect to
+// heapStatsAggregate, means there could be some skew, but because of
+// these stats are independent, there's no real consistency issue here.
+type sysStatsAggregate struct {
+ stacksSys uint64
+ mSpanSys uint64
+ mSpanInUse uint64
+ mCacheSys uint64
+ mCacheInUse uint64
+ buckHashSys uint64
+ gcMiscSys uint64
+ otherSys uint64
+ heapGoal uint64
+ gcCyclesDone uint64
+ gcCyclesForced uint64
+}
+
+// compute populates the sysStatsAggregate with values from the runtime.
+func (a *sysStatsAggregate) compute() {
+ a.stacksSys = memstats.stacks_sys.load()
+ a.buckHashSys = memstats.buckhash_sys.load()
+ a.gcMiscSys = memstats.gcMiscSys.load()
+ a.otherSys = memstats.other_sys.load()
+ a.heapGoal = gcController.heapGoal()
+ a.gcCyclesDone = uint64(memstats.numgc)
+ a.gcCyclesForced = uint64(memstats.numforcedgc)
+
+ systemstack(func() {
+ lock(&mheap_.lock)
+ a.mSpanSys = memstats.mspan_sys.load()
+ a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
+ a.mCacheSys = memstats.mcache_sys.load()
+ a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
+ unlock(&mheap_.lock)
+ })
+}
+
+// statAggregate is the main driver of the metrics implementation.
+//
+// It contains multiple aggregates of runtime statistics, as well
+// as a set of these aggregates that it has populated. The aggergates
+// are populated lazily by its ensure method.
+type statAggregate struct {
+ ensured statDepSet
+ heapStats heapStatsAggregate
+ sysStats sysStatsAggregate
+}
+
+// ensure populates statistics aggregates determined by deps if they
+// haven't yet been populated.
+func (a *statAggregate) ensure(deps *statDepSet) {
+ missing := deps.difference(a.ensured)
+ if missing.empty() {
+ return
+ }
+ for i := statDep(0); i < numStatsDeps; i++ {
+ if !missing.has(i) {
+ continue
+ }
+ switch i {
+ case heapStatsDep:
+ a.heapStats.compute()
+ case sysStatsDep:
+ a.sysStats.compute()
+ }
+ }
+ a.ensured = a.ensured.union(missing)
+}
+
+// metricValidKind is a runtime copy of runtime/metrics.ValueKind and
+// must be kept structurally identical to that type.
+type metricKind int
+
+const (
+ // These values must be kept identical to their corresponding Kind* values
+ // in the runtime/metrics package.
+ metricKindBad metricKind = iota
+ metricKindUint64
+ metricKindFloat64
+ metricKindFloat64Histogram
+)
+
+// metricSample is a runtime copy of runtime/metrics.Sample and
+// must be kept structurally identical to that type.
+type metricSample struct {
+ name string
+ value metricValue
+}
+
+// metricValue is a runtime copy of runtime/metrics.Sample and
+// must be kept structurally identical to that type.
+type metricValue struct {
+ kind metricKind
+ scalar uint64 // contains scalar values for scalar Kinds.
+ pointer unsafe.Pointer // contains non-scalar values.
+}
+
+// float64HistOrInit tries to pull out an existing float64Histogram
+// from the value, but if none exists, then it allocates one with
+// the given buckets.
+func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
+ var hist *metricFloat64Histogram
+ if v.kind == metricKindFloat64Histogram && v.pointer != nil {
+ hist = (*metricFloat64Histogram)(v.pointer)
+ } else {
+ v.kind = metricKindFloat64Histogram
+ hist = new(metricFloat64Histogram)
+ v.pointer = unsafe.Pointer(hist)
+ }
+ hist.buckets = buckets
+ if len(hist.counts) != len(hist.buckets)-1 {
+ hist.counts = make([]uint64, len(buckets)-1)
+ }
+ return hist
+}
+
+// metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
+// and must be kept structurally identical to that type.
+type metricFloat64Histogram struct {
+ counts []uint64
+ buckets []float64
+}
+
+// agg is used by readMetrics, and is protected by metricsSema.
+//
+// Managed as a global variable because its pointer will be
+// an argument to a dynamically-defined function, and we'd
+// like to avoid it escaping to the heap.
+var agg statAggregate
+
+// readMetrics is the implementation of runtime/metrics.Read.
+//
+//go:linkname readMetrics runtime/metrics.runtime_readMetrics
+func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
+ // Construct a slice from the args.
+ sl := slice{samplesp, len, cap}
+ samples := *(*[]metricSample)(unsafe.Pointer(&sl))
+
+ metricsLock()
+
+ // Ensure the map is initialized.
+ initMetrics()
+
+ // Clear agg defensively.
+ agg = statAggregate{}
+
+ // Sample.
+ for i := range samples {
+ sample := &samples[i]
+ data, ok := metrics[sample.name]
+ if !ok {
+ sample.value.kind = metricKindBad
+ continue
+ }
+ // Ensure we have all the stats we need.
+ // agg is populated lazily.
+ agg.ensure(&data.deps)
+
+ // Compute the value based on the stats we have.
+ data.compute(&agg, &sample.value)
+ }
+
+ metricsUnlock()
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mfinal.go b/contrib/go/_std_1.19/src/runtime/mfinal.go
new file mode 100644
index 0000000000..f3f3a79fa5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mfinal.go
@@ -0,0 +1,491 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: finalizers and block profiling.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// finblock is an array of finalizers to be executed. finblocks are
+// arranged in a linked list for the finalizer queue.
+//
+// finblock is allocated from non-GC'd memory, so any heap pointers
+// must be specially handled. GC currently assumes that the finalizer
+// queue does not grow during marking (but it can shrink).
+//
+//go:notinheap
+type finblock struct {
+ alllink *finblock
+ next *finblock
+ cnt uint32
+ _ int32
+ fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
+}
+
+var finlock mutex // protects the following variables
+var fing *g // goroutine that runs finalizers
+var finq *finblock // list of finalizers that are to be executed
+var finc *finblock // cache of free blocks
+var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
+var fingwait bool
+var fingwake bool
+var allfin *finblock // list of all blocks
+
+// NOTE: Layout known to queuefinalizer.
+type finalizer struct {
+ fn *funcval // function to call (may be a heap pointer)
+ arg unsafe.Pointer // ptr to object (may be a heap pointer)
+ nret uintptr // bytes of return values from fn
+ fint *_type // type of first argument of fn
+ ot *ptrtype // type of ptr to object (may be a heap pointer)
+}
+
+var finalizer1 = [...]byte{
+ // Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
+ // Each byte describes 8 words.
+ // Need 8 Finalizers described by 5 bytes before pattern repeats:
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // ptr ptr INT ptr ptr
+ // aka
+ //
+ // ptr ptr INT ptr ptr ptr ptr INT
+ // ptr ptr ptr ptr INT ptr ptr ptr
+ // ptr INT ptr ptr ptr ptr INT ptr
+ // ptr ptr ptr INT ptr ptr ptr ptr
+ // INT ptr ptr ptr ptr INT ptr ptr
+ //
+ // Assumptions about Finalizer layout checked below.
+ 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
+ 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
+ 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
+ 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
+ 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
+}
+
+func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
+ if gcphase != _GCoff {
+ // Currently we assume that the finalizer queue won't
+ // grow during marking so we don't have to rescan it
+ // during mark termination. If we ever need to lift
+ // this assumption, we can do it by adding the
+ // necessary barriers to queuefinalizer (which it may
+ // have automatically).
+ throw("queuefinalizer during GC")
+ }
+
+ lock(&finlock)
+ if finq == nil || finq.cnt == uint32(len(finq.fin)) {
+ if finc == nil {
+ finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
+ finc.alllink = allfin
+ allfin = finc
+ if finptrmask[0] == 0 {
+ // Build pointer mask for Finalizer array in block.
+ // Check assumptions made in finalizer1 array above.
+ if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.fn) != 0 ||
+ unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
+ throw("finalizer out of sync")
+ }
+ for i := range finptrmask {
+ finptrmask[i] = finalizer1[i%len(finalizer1)]
+ }
+ }
+ }
+ block := finc
+ finc = block.next
+ block.next = finq
+ finq = block
+ }
+ f := &finq.fin[finq.cnt]
+ atomic.Xadd(&finq.cnt, +1) // Sync with markroots
+ f.fn = fn
+ f.nret = nret
+ f.fint = fint
+ f.ot = ot
+ f.arg = p
+ fingwake = true
+ unlock(&finlock)
+}
+
+//go:nowritebarrier
+func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
+ for fb := allfin; fb != nil; fb = fb.alllink {
+ for i := uint32(0); i < fb.cnt; i++ {
+ f := &fb.fin[i]
+ callback(f.fn, f.arg, f.nret, f.fint, f.ot)
+ }
+ }
+}
+
+func wakefing() *g {
+ var res *g
+ lock(&finlock)
+ if fingwait && fingwake {
+ fingwait = false
+ fingwake = false
+ res = fing
+ }
+ unlock(&finlock)
+ return res
+}
+
+var (
+ fingCreate uint32
+ fingRunning bool
+)
+
+func createfing() {
+ // start the finalizer goroutine exactly once
+ if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
+ go runfinq()
+ }
+}
+
+// This is the goroutine that runs all of the finalizers
+func runfinq() {
+ var (
+ frame unsafe.Pointer
+ framecap uintptr
+ argRegs int
+ )
+
+ gp := getg()
+ lock(&finlock)
+ fing = gp
+ unlock(&finlock)
+
+ for {
+ lock(&finlock)
+ fb := finq
+ finq = nil
+ if fb == nil {
+ fingwait = true
+ goparkunlock(&finlock, waitReasonFinalizerWait, traceEvGoBlock, 1)
+ continue
+ }
+ argRegs = intArgRegs
+ unlock(&finlock)
+ if raceenabled {
+ racefingo()
+ }
+ for fb != nil {
+ for i := fb.cnt; i > 0; i-- {
+ f := &fb.fin[i-1]
+
+ var regs abi.RegArgs
+ // The args may be passed in registers or on stack. Even for
+ // the register case, we still need the spill slots.
+ // TODO: revisit if we remove spill slots.
+ //
+ // Unfortunately because we can have an arbitrary
+ // amount of returns and it would be complex to try and
+ // figure out how many of those can get passed in registers,
+ // just conservatively assume none of them do.
+ framesz := unsafe.Sizeof((any)(nil)) + f.nret
+ if framecap < framesz {
+ // The frame does not contain pointers interesting for GC,
+ // all not yet finalized objects are stored in finq.
+ // If we do not mark it as FlagNoScan,
+ // the last finalized object is not collected.
+ frame = mallocgc(framesz, nil, true)
+ framecap = framesz
+ }
+
+ if f.fint == nil {
+ throw("missing type in runfinq")
+ }
+ r := frame
+ if argRegs > 0 {
+ r = unsafe.Pointer(&regs.Ints)
+ } else {
+ // frame is effectively uninitialized
+ // memory. That means we have to clear
+ // it before writing to it to avoid
+ // confusing the write barrier.
+ *(*[2]uintptr)(frame) = [2]uintptr{}
+ }
+ switch f.fint.kind & kindMask {
+ case kindPtr:
+ // direct use of pointer
+ *(*unsafe.Pointer)(r) = f.arg
+ case kindInterface:
+ ityp := (*interfacetype)(unsafe.Pointer(f.fint))
+ // set up with empty interface
+ (*eface)(r)._type = &f.ot.typ
+ (*eface)(r).data = f.arg
+ if len(ityp.mhdr) != 0 {
+ // convert to interface with methods
+ // this conversion is guaranteed to succeed - we checked in SetFinalizer
+ (*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type)
+ }
+ default:
+ throw("bad kind in runfinq")
+ }
+ fingRunning = true
+ reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
+ fingRunning = false
+
+ // Drop finalizer queue heap references
+ // before hiding them from markroot.
+ // This also ensures these will be
+ // clear if we reuse the finalizer.
+ f.fn = nil
+ f.arg = nil
+ f.ot = nil
+ atomic.Store(&fb.cnt, i-1)
+ }
+ next := fb.next
+ lock(&finlock)
+ fb.next = finc
+ finc = fb
+ unlock(&finlock)
+ fb = next
+ }
+ }
+}
+
+// SetFinalizer sets the finalizer associated with obj to the provided
+// finalizer function. When the garbage collector finds an unreachable block
+// with an associated finalizer, it clears the association and runs
+// finalizer(obj) in a separate goroutine. This makes obj reachable again,
+// but now without an associated finalizer. Assuming that SetFinalizer
+// is not called again, the next time the garbage collector sees
+// that obj is unreachable, it will free obj.
+//
+// SetFinalizer(obj, nil) clears any finalizer associated with obj.
+//
+// The argument obj must be a pointer to an object allocated by calling
+// new, by taking the address of a composite literal, or by taking the
+// address of a local variable.
+// The argument finalizer must be a function that takes a single argument
+// to which obj's type can be assigned, and can have arbitrary ignored return
+// values. If either of these is not true, SetFinalizer may abort the
+// program.
+//
+// Finalizers are run in dependency order: if A points at B, both have
+// finalizers, and they are otherwise unreachable, only the finalizer
+// for A runs; once A is freed, the finalizer for B can run.
+// If a cyclic structure includes a block with a finalizer, that
+// cycle is not guaranteed to be garbage collected and the finalizer
+// is not guaranteed to run, because there is no ordering that
+// respects the dependencies.
+//
+// The finalizer is scheduled to run at some arbitrary time after the
+// program can no longer reach the object to which obj points.
+// There is no guarantee that finalizers will run before a program exits,
+// so typically they are useful only for releasing non-memory resources
+// associated with an object during a long-running program.
+// For example, an os.File object could use a finalizer to close the
+// associated operating system file descriptor when a program discards
+// an os.File without calling Close, but it would be a mistake
+// to depend on a finalizer to flush an in-memory I/O buffer such as a
+// bufio.Writer, because the buffer would not be flushed at program exit.
+//
+// It is not guaranteed that a finalizer will run if the size of *obj is
+// zero bytes.
+//
+// It is not guaranteed that a finalizer will run for objects allocated
+// in initializers for package-level variables. Such objects may be
+// linker-allocated, not heap-allocated.
+//
+// A finalizer may run as soon as an object becomes unreachable.
+// In order to use finalizers correctly, the program must ensure that
+// the object is reachable until it is no longer required.
+// Objects stored in global variables, or that can be found by tracing
+// pointers from a global variable, are reachable. For other objects,
+// pass the object to a call of the KeepAlive function to mark the
+// last point in the function where the object must be reachable.
+//
+// For example, if p points to a struct, such as os.File, that contains
+// a file descriptor d, and p has a finalizer that closes that file
+// descriptor, and if the last use of p in a function is a call to
+// syscall.Write(p.d, buf, size), then p may be unreachable as soon as
+// the program enters syscall.Write. The finalizer may run at that moment,
+// closing p.d, causing syscall.Write to fail because it is writing to
+// a closed file descriptor (or, worse, to an entirely different
+// file descriptor opened by a different goroutine). To avoid this problem,
+// call KeepAlive(p) after the call to syscall.Write.
+//
+// A single goroutine runs all finalizers for a program, sequentially.
+// If a finalizer must run for a long time, it should do so by starting
+// a new goroutine.
+//
+// In the terminology of the Go memory model, a call
+// SetFinalizer(x, f) “synchronizes before” the finalization call f(x).
+// However, there is no guarantee that KeepAlive(x) or any other use of x
+// “synchronizes before” f(x), so in general a finalizer should use a mutex
+// or other synchronization mechanism if it needs to access mutable state in x.
+// For example, consider a finalizer that inspects a mutable field in x
+// that is modified from time to time in the main program before x
+// becomes unreachable and the finalizer is invoked.
+// The modifications in the main program and the inspection in the finalizer
+// need to use appropriate synchronization, such as mutexes or atomic updates,
+// to avoid read-write races.
+func SetFinalizer(obj any, finalizer any) {
+ if debug.sbrk != 0 {
+ // debug.sbrk never frees memory, so no finalizers run
+ // (and we don't have the data structures to record them).
+ return
+ }
+ e := efaceOf(&obj)
+ etyp := e._type
+ if etyp == nil {
+ throw("runtime.SetFinalizer: first argument is nil")
+ }
+ if etyp.kind&kindMask != kindPtr {
+ throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer")
+ }
+ ot := (*ptrtype)(unsafe.Pointer(etyp))
+ if ot.elem == nil {
+ throw("nil elem type!")
+ }
+
+ // find the containing object
+ base, _, _ := findObject(uintptr(e.data), 0, 0)
+
+ if base == 0 {
+ // 0-length objects are okay.
+ if e.data == unsafe.Pointer(&zerobase) {
+ return
+ }
+
+ // Global initializers might be linker-allocated.
+ // var Foo = &Object{}
+ // func main() {
+ // runtime.SetFinalizer(Foo, nil)
+ // }
+ // The relevant segments are: noptrdata, data, bss, noptrbss.
+ // We cannot assume they are in any order or even contiguous,
+ // due to external linking.
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if datap.noptrdata <= uintptr(e.data) && uintptr(e.data) < datap.enoptrdata ||
+ datap.data <= uintptr(e.data) && uintptr(e.data) < datap.edata ||
+ datap.bss <= uintptr(e.data) && uintptr(e.data) < datap.ebss ||
+ datap.noptrbss <= uintptr(e.data) && uintptr(e.data) < datap.enoptrbss {
+ return
+ }
+ }
+ throw("runtime.SetFinalizer: pointer not in allocated block")
+ }
+
+ if uintptr(e.data) != base {
+ // As an implementation detail we allow to set finalizers for an inner byte
+ // of an object if it could come from tiny alloc (see mallocgc for details).
+ if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize {
+ throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
+ }
+ }
+
+ f := efaceOf(&finalizer)
+ ftyp := f._type
+ if ftyp == nil {
+ // switch to system stack and remove finalizer
+ systemstack(func() {
+ removefinalizer(e.data)
+ })
+ return
+ }
+
+ if ftyp.kind&kindMask != kindFunc {
+ throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function")
+ }
+ ft := (*functype)(unsafe.Pointer(ftyp))
+ if ft.dotdotdot() {
+ throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot")
+ }
+ if ft.inCount != 1 {
+ throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
+ }
+ fint := ft.in()[0]
+ switch {
+ case fint == etyp:
+ // ok - same type
+ goto okarg
+ case fint.kind&kindMask == kindPtr:
+ if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
+ // ok - not same type, but both pointers,
+ // one or the other is unnamed, and same element type, so assignable.
+ goto okarg
+ }
+ case fint.kind&kindMask == kindInterface:
+ ityp := (*interfacetype)(unsafe.Pointer(fint))
+ if len(ityp.mhdr) == 0 {
+ // ok - satisfies empty interface
+ goto okarg
+ }
+ if iface := assertE2I2(ityp, *efaceOf(&obj)); iface.tab != nil {
+ goto okarg
+ }
+ }
+ throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string())
+okarg:
+ // compute size needed for return parameters
+ nret := uintptr(0)
+ for _, t := range ft.out() {
+ nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
+ }
+ nret = alignUp(nret, goarch.PtrSize)
+
+ // make sure we have a finalizer goroutine
+ createfing()
+
+ systemstack(func() {
+ if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
+ throw("runtime.SetFinalizer: finalizer already set")
+ }
+ })
+}
+
+// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
+//
+//go:noinline
+
+// KeepAlive marks its argument as currently reachable.
+// This ensures that the object is not freed, and its finalizer is not run,
+// before the point in the program where KeepAlive is called.
+//
+// A very simplified example showing where KeepAlive is required:
+//
+// type File struct { d int }
+// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
+// // ... do something if err != nil ...
+// p := &File{d}
+// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
+// var buf [10]byte
+// n, err := syscall.Read(p.d, buf[:])
+// // Ensure p is not finalized until Read returns.
+// runtime.KeepAlive(p)
+// // No more uses of p after this point.
+//
+// Without the KeepAlive call, the finalizer could run at the start of
+// syscall.Read, closing the file descriptor before syscall.Read makes
+// the actual system call.
+//
+// Note: KeepAlive should only be used to prevent finalizers from
+// running prematurely. In particular, when used with unsafe.Pointer,
+// the rules for valid uses of unsafe.Pointer still apply.
+func KeepAlive(x any) {
+ // Introduce a use of x that the compiler can't eliminate.
+ // This makes sure x is alive on entry. We need x to be alive
+ // on entry for "defer runtime.KeepAlive(x)"; see issue 21402.
+ if cgoAlwaysFalse {
+ println(x)
+ }
+}
diff --git a/contrib/go/_std_1.18/src/runtime/mfixalloc.go b/contrib/go/_std_1.19/src/runtime/mfixalloc.go
index b701a09b40..b701a09b40 100644
--- a/contrib/go/_std_1.18/src/runtime/mfixalloc.go
+++ b/contrib/go/_std_1.19/src/runtime/mfixalloc.go
diff --git a/contrib/go/_std_1.19/src/runtime/mgc.go b/contrib/go/_std_1.19/src/runtime/mgc.go
new file mode 100644
index 0000000000..63e04636d7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgc.go
@@ -0,0 +1,1761 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector (GC).
+//
+// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
+// GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
+// non-generational and non-compacting. Allocation is done using size segregated per P allocation
+// areas to minimize fragmentation while eliminating locks in the common case.
+//
+// The algorithm decomposes into several steps.
+// This is a high level description of the algorithm being used. For an overview of GC a good
+// place to start is Richard Jones' gchandbook.org.
+//
+// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
+// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
+// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
+// 966-975.
+// For journal quality proofs that these steps are complete, correct, and terminate see
+// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
+// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
+//
+// 1. GC performs sweep termination.
+//
+// a. Stop the world. This causes all Ps to reach a GC safe-point.
+//
+// b. Sweep any unswept spans. There will only be unswept spans if
+// this GC cycle was forced before the expected time.
+//
+// 2. GC performs the mark phase.
+//
+// a. Prepare for the mark phase by setting gcphase to _GCmark
+// (from _GCoff), enabling the write barrier, enabling mutator
+// assists, and enqueueing root mark jobs. No objects may be
+// scanned until all Ps have enabled the write barrier, which is
+// accomplished using STW.
+//
+// b. Start the world. From this point, GC work is done by mark
+// workers started by the scheduler and by assists performed as
+// part of allocation. The write barrier shades both the
+// overwritten pointer and the new pointer value for any pointer
+// writes (see mbarrier.go for details). Newly allocated objects
+// are immediately marked black.
+//
+// c. GC performs root marking jobs. This includes scanning all
+// stacks, shading all globals, and shading any heap pointers in
+// off-heap runtime data structures. Scanning a stack stops a
+// goroutine, shades any pointers found on its stack, and then
+// resumes the goroutine.
+//
+// d. GC drains the work queue of grey objects, scanning each grey
+// object to black and shading all pointers found in the object
+// (which in turn may add those pointers to the work queue).
+//
+// e. Because GC work is spread across local caches, GC uses a
+// distributed termination algorithm to detect when there are no
+// more root marking jobs or grey objects (see gcMarkDone). At this
+// point, GC transitions to mark termination.
+//
+// 3. GC performs mark termination.
+//
+// a. Stop the world.
+//
+// b. Set gcphase to _GCmarktermination, and disable workers and
+// assists.
+//
+// c. Perform housekeeping like flushing mcaches.
+//
+// 4. GC performs the sweep phase.
+//
+// a. Prepare for the sweep phase by setting gcphase to _GCoff,
+// setting up sweep state and disabling the write barrier.
+//
+// b. Start the world. From this point on, newly allocated objects
+// are white, and allocating sweeps spans before use if necessary.
+//
+// c. GC does concurrent sweeping in the background and in response
+// to allocation. See description below.
+//
+// 5. When sufficient allocation has taken place, replay the sequence
+// starting with 1 above. See discussion of GC rate below.
+
+// Concurrent sweep.
+//
+// The sweep phase proceeds concurrently with normal program execution.
+// The heap is swept span-by-span both lazily (when a goroutine needs another span)
+// and concurrently in a background goroutine (this helps programs that are not CPU bound).
+// At the end of STW mark termination all spans are marked as "needs sweeping".
+//
+// The background sweeper goroutine simply sweeps spans one-by-one.
+//
+// To avoid requesting more OS memory while there are unswept spans, when a
+// goroutine needs another span, it first attempts to reclaim that much memory
+// by sweeping. When a goroutine needs to allocate a new small-object span, it
+// sweeps small-object spans for the same object size until it frees at least
+// one object. When a goroutine needs to allocate large-object span from heap,
+// it sweeps spans until it frees at least that many pages into heap. There is
+// one case where this may not suffice: if a goroutine sweeps and frees two
+// nonadjacent one-page spans to the heap, it will allocate a new two-page
+// span, but there can still be other one-page unswept spans which could be
+// combined into a two-page span.
+//
+// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
+// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
+// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
+// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
+// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
+// The finalizer goroutine is kicked off only when all spans are swept.
+// When the next GC starts, it sweeps all not-yet-swept spans (if any).
+
+// GC rate.
+// Next GC is after we've allocated an extra amount of memory proportional to
+// the amount already in use. The proportion is controlled by GOGC environment variable
+// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
+// (this mark is computed by the gcController.heapGoal method). This keeps the GC cost in
+// linear proportion to the allocation cost. Adjusting GOGC just changes the linear constant
+// (and also the amount of extra memory used).
+
+// Oblets
+//
+// In order to prevent long pauses while scanning large objects and to
+// improve parallelism, the garbage collector breaks up scan jobs for
+// objects larger than maxObletBytes into "oblets" of at most
+// maxObletBytes. When scanning encounters the beginning of a large
+// object, it scans only the first oblet and enqueues the remaining
+// oblets as new scan jobs.
+
+package runtime
+
+import (
+ "internal/cpu"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+const (
+ _DebugGC = 0
+ _ConcurrentSweep = true
+ _FinBlockSize = 4 * 1024
+
+ // debugScanConservative enables debug logging for stack
+ // frames that are scanned conservatively.
+ debugScanConservative = false
+
+ // sweepMinHeapDistance is a lower bound on the heap distance
+ // (in bytes) reserved for concurrent sweeping between GC
+ // cycles.
+ sweepMinHeapDistance = 1024 * 1024
+)
+
+func gcinit() {
+ if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
+ throw("size of Workbuf is suboptimal")
+ }
+ // No sweep on the first cycle.
+ sweep.active.state.Store(sweepDrainedMask)
+
+ // Initialize GC pacer state.
+ // Use the environment variable GOGC for the initial gcPercent value.
+ // Use the environment variable GOMEMLIMIT for the initial memoryLimit value.
+ gcController.init(readGOGC(), readGOMEMLIMIT())
+
+ work.startSema = 1
+ work.markDoneSema = 1
+ lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
+ lockInit(&work.assistQueue.lock, lockRankAssistQueue)
+ lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
+}
+
+// gcenable is called after the bulk of the runtime initialization,
+// just before we're about to start letting user code run.
+// It kicks off the background sweeper goroutine, the background
+// scavenger goroutine, and enables GC.
+func gcenable() {
+ // Kick off sweeping and scavenging.
+ c := make(chan int, 2)
+ go bgsweep(c)
+ go bgscavenge(c)
+ <-c
+ <-c
+ memstats.enablegc = true // now that runtime is initialized, GC is okay
+}
+
+// Garbage collector phase.
+// Indicates to write barrier and synchronization task to perform.
+var gcphase uint32
+
+// The compiler knows about this variable.
+// If you change it, you must change builtin/runtime.go, too.
+// If you change the first four bytes, you must also change the write
+// barrier insertion code.
+var writeBarrier struct {
+ enabled bool // compiler emits a check of this before calling write barrier
+ pad [3]byte // compiler uses 32-bit load for "enabled" field
+ needed bool // whether we need a write barrier for current GC phase
+ cgo bool // whether we need a write barrier for a cgo check
+ alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
+}
+
+// gcBlackenEnabled is 1 if mutator assists and background mark
+// workers are allowed to blacken objects. This must only be set when
+// gcphase == _GCmark.
+var gcBlackenEnabled uint32
+
+const (
+ _GCoff = iota // GC not running; sweeping in background, write barrier disabled
+ _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
+ _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
+)
+
+//go:nosplit
+func setGCPhase(x uint32) {
+ atomic.Store(&gcphase, x)
+ writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
+ writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
+}
+
+// gcMarkWorkerMode represents the mode that a concurrent mark worker
+// should operate in.
+//
+// Concurrent marking happens through four different mechanisms. One
+// is mutator assists, which happen in response to allocations and are
+// not scheduled. The other three are variations in the per-P mark
+// workers and are distinguished by gcMarkWorkerMode.
+type gcMarkWorkerMode int
+
+const (
+ // gcMarkWorkerNotWorker indicates that the next scheduled G is not
+ // starting work and the mode should be ignored.
+ gcMarkWorkerNotWorker gcMarkWorkerMode = iota
+
+ // gcMarkWorkerDedicatedMode indicates that the P of a mark
+ // worker is dedicated to running that mark worker. The mark
+ // worker should run without preemption.
+ gcMarkWorkerDedicatedMode
+
+ // gcMarkWorkerFractionalMode indicates that a P is currently
+ // running the "fractional" mark worker. The fractional worker
+ // is necessary when GOMAXPROCS*gcBackgroundUtilization is not
+ // an integer and using only dedicated workers would result in
+ // utilization too far from the target of gcBackgroundUtilization.
+ // The fractional worker should run until it is preempted and
+ // will be scheduled to pick up the fractional part of
+ // GOMAXPROCS*gcBackgroundUtilization.
+ gcMarkWorkerFractionalMode
+
+ // gcMarkWorkerIdleMode indicates that a P is running the mark
+ // worker because it has nothing else to do. The idle worker
+ // should run until it is preempted and account its time
+ // against gcController.idleMarkTime.
+ gcMarkWorkerIdleMode
+)
+
+// gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes
+// to use in execution traces.
+var gcMarkWorkerModeStrings = [...]string{
+ "Not worker",
+ "GC (dedicated)",
+ "GC (fractional)",
+ "GC (idle)",
+}
+
+// pollFractionalWorkerExit reports whether a fractional mark worker
+// should self-preempt. It assumes it is called from the fractional
+// worker.
+func pollFractionalWorkerExit() bool {
+ // This should be kept in sync with the fractional worker
+ // scheduler logic in findRunnableGCWorker.
+ now := nanotime()
+ delta := now - gcController.markStartTime
+ if delta <= 0 {
+ return true
+ }
+ p := getg().m.p.ptr()
+ selfTime := p.gcFractionalMarkTime + (now - p.gcMarkWorkerStartTime)
+ // Add some slack to the utilization goal so that the
+ // fractional worker isn't behind again the instant it exits.
+ return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal
+}
+
+var work workType
+
+type workType struct {
+ full lfstack // lock-free list of full blocks workbuf
+ empty lfstack // lock-free list of empty blocks workbuf
+ pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait
+
+ wbufSpans struct {
+ lock mutex
+ // free is a list of spans dedicated to workbufs, but
+ // that don't currently contain any workbufs.
+ free mSpanList
+ // busy is a list of all spans containing workbufs on
+ // one of the workbuf lists.
+ busy mSpanList
+ }
+
+ // Restore 64-bit alignment on 32-bit.
+ _ uint32
+
+ // bytesMarked is the number of bytes marked this cycle. This
+ // includes bytes blackened in scanned objects, noscan objects
+ // that go straight to black, and permagrey objects scanned by
+ // markroot during the concurrent scan phase. This is updated
+ // atomically during the cycle. Updates may be batched
+ // arbitrarily, since the value is only read at the end of the
+ // cycle.
+ //
+ // Because of benign races during marking, this number may not
+ // be the exact number of marked bytes, but it should be very
+ // close.
+ //
+ // Put this field here because it needs 64-bit atomic access
+ // (and thus 8-byte alignment even on 32-bit architectures).
+ bytesMarked uint64
+
+ markrootNext uint32 // next markroot job
+ markrootJobs uint32 // number of markroot jobs
+
+ nproc uint32
+ tstart int64
+ nwait uint32
+
+ // Number of roots of various root types. Set by gcMarkRootPrepare.
+ //
+ // nStackRoots == len(stackRoots), but we have nStackRoots for
+ // consistency.
+ nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
+
+ // Base indexes of each root type. Set by gcMarkRootPrepare.
+ baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
+
+ // stackRoots is a snapshot of all of the Gs that existed
+ // before the beginning of concurrent marking. The backing
+ // store of this must not be modified because it might be
+ // shared with allgs.
+ stackRoots []*g
+
+ // Each type of GC state transition is protected by a lock.
+ // Since multiple threads can simultaneously detect the state
+ // transition condition, any thread that detects a transition
+ // condition must acquire the appropriate transition lock,
+ // re-check the transition condition and return if it no
+ // longer holds or perform the transition if it does.
+ // Likewise, any transition must invalidate the transition
+ // condition before releasing the lock. This ensures that each
+ // transition is performed by exactly one thread and threads
+ // that need the transition to happen block until it has
+ // happened.
+ //
+ // startSema protects the transition from "off" to mark or
+ // mark termination.
+ startSema uint32
+ // markDoneSema protects transitions from mark to mark termination.
+ markDoneSema uint32
+
+ bgMarkReady note // signal background mark worker has started
+ bgMarkDone uint32 // cas to 1 when at a background mark completion point
+ // Background mark completion signaling
+
+ // mode is the concurrency mode of the current GC cycle.
+ mode gcMode
+
+ // userForced indicates the current GC cycle was forced by an
+ // explicit user call.
+ userForced bool
+
+ // totaltime is the CPU nanoseconds spent in GC since the
+ // program started if debug.gctrace > 0.
+ totaltime int64
+
+ // initialHeapLive is the value of gcController.heapLive at the
+ // beginning of this GC cycle.
+ initialHeapLive uint64
+
+ // assistQueue is a queue of assists that are blocked because
+ // there was neither enough credit to steal or enough work to
+ // do.
+ assistQueue struct {
+ lock mutex
+ q gQueue
+ }
+
+ // sweepWaiters is a list of blocked goroutines to wake when
+ // we transition from mark termination to sweep.
+ sweepWaiters struct {
+ lock mutex
+ list gList
+ }
+
+ // cycles is the number of completed GC cycles, where a GC
+ // cycle is sweep termination, mark, mark termination, and
+ // sweep. This differs from memstats.numgc, which is
+ // incremented at mark termination.
+ cycles uint32
+
+ // Timing/utilization stats for this cycle.
+ stwprocs, maxprocs int32
+ tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
+
+ pauseNS int64 // total STW time this cycle
+ pauseStart int64 // nanotime() of last STW
+
+ // debug.gctrace heap sizes for this cycle.
+ heap0, heap1, heap2 uint64
+}
+
+// GC runs a garbage collection and blocks the caller until the
+// garbage collection is complete. It may also block the entire
+// program.
+func GC() {
+ // We consider a cycle to be: sweep termination, mark, mark
+ // termination, and sweep. This function shouldn't return
+ // until a full cycle has been completed, from beginning to
+ // end. Hence, we always want to finish up the current cycle
+ // and start a new one. That means:
+ //
+ // 1. In sweep termination, mark, or mark termination of cycle
+ // N, wait until mark termination N completes and transitions
+ // to sweep N.
+ //
+ // 2. In sweep N, help with sweep N.
+ //
+ // At this point we can begin a full cycle N+1.
+ //
+ // 3. Trigger cycle N+1 by starting sweep termination N+1.
+ //
+ // 4. Wait for mark termination N+1 to complete.
+ //
+ // 5. Help with sweep N+1 until it's done.
+ //
+ // This all has to be written to deal with the fact that the
+ // GC may move ahead on its own. For example, when we block
+ // until mark termination N, we may wake up in cycle N+2.
+
+ // Wait until the current sweep termination, mark, and mark
+ // termination complete.
+ n := atomic.Load(&work.cycles)
+ gcWaitOnMark(n)
+
+ // We're now in sweep N or later. Trigger GC cycle N+1, which
+ // will first finish sweep N if necessary and then enter sweep
+ // termination N+1.
+ gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1})
+
+ // Wait for mark termination N+1 to complete.
+ gcWaitOnMark(n + 1)
+
+ // Finish sweep N+1 before returning. We do this both to
+ // complete the cycle and because runtime.GC() is often used
+ // as part of tests and benchmarks to get the system into a
+ // relatively stable and isolated state.
+ for atomic.Load(&work.cycles) == n+1 && sweepone() != ^uintptr(0) {
+ sweep.nbgsweep++
+ Gosched()
+ }
+
+ // Callers may assume that the heap profile reflects the
+ // just-completed cycle when this returns (historically this
+ // happened because this was a STW GC), but right now the
+ // profile still reflects mark termination N, not N+1.
+ //
+ // As soon as all of the sweep frees from cycle N+1 are done,
+ // we can go ahead and publish the heap profile.
+ //
+ // First, wait for sweeping to finish. (We know there are no
+ // more spans on the sweep queue, but we may be concurrently
+ // sweeping spans, so we have to wait.)
+ for atomic.Load(&work.cycles) == n+1 && !isSweepDone() {
+ Gosched()
+ }
+
+ // Now we're really done with sweeping, so we can publish the
+ // stable heap profile. Only do this if we haven't already hit
+ // another mark termination.
+ mp := acquirem()
+ cycle := atomic.Load(&work.cycles)
+ if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) {
+ mProf_PostSweep()
+ }
+ releasem(mp)
+}
+
+// gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
+// already completed this mark phase, it returns immediately.
+func gcWaitOnMark(n uint32) {
+ for {
+ // Disable phase transitions.
+ lock(&work.sweepWaiters.lock)
+ nMarks := atomic.Load(&work.cycles)
+ if gcphase != _GCmark {
+ // We've already completed this cycle's mark.
+ nMarks++
+ }
+ if nMarks > n {
+ // We're done.
+ unlock(&work.sweepWaiters.lock)
+ return
+ }
+
+ // Wait until sweep termination, mark, and mark
+ // termination of cycle N complete.
+ work.sweepWaiters.list.push(getg())
+ goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceEvGoBlock, 1)
+ }
+}
+
+// gcMode indicates how concurrent a GC cycle should be.
+type gcMode int
+
+const (
+ gcBackgroundMode gcMode = iota // concurrent GC and sweep
+ gcForceMode // stop-the-world GC now, concurrent sweep
+ gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user)
+)
+
+// A gcTrigger is a predicate for starting a GC cycle. Specifically,
+// it is an exit condition for the _GCoff phase.
+type gcTrigger struct {
+ kind gcTriggerKind
+ now int64 // gcTriggerTime: current time
+ n uint32 // gcTriggerCycle: cycle number to start
+}
+
+type gcTriggerKind int
+
+const (
+ // gcTriggerHeap indicates that a cycle should be started when
+ // the heap size reaches the trigger heap size computed by the
+ // controller.
+ gcTriggerHeap gcTriggerKind = iota
+
+ // gcTriggerTime indicates that a cycle should be started when
+ // it's been more than forcegcperiod nanoseconds since the
+ // previous GC cycle.
+ gcTriggerTime
+
+ // gcTriggerCycle indicates that a cycle should be started if
+ // we have not yet started cycle number gcTrigger.n (relative
+ // to work.cycles).
+ gcTriggerCycle
+)
+
+// test reports whether the trigger condition is satisfied, meaning
+// that the exit condition for the _GCoff phase has been met. The exit
+// condition should be tested when allocating.
+func (t gcTrigger) test() bool {
+ if !memstats.enablegc || panicking != 0 || gcphase != _GCoff {
+ return false
+ }
+ switch t.kind {
+ case gcTriggerHeap:
+ // Non-atomic access to gcController.heapLive for performance. If
+ // we are going to trigger on this, this thread just
+ // atomically wrote gcController.heapLive anyway and we'll see our
+ // own write.
+ trigger, _ := gcController.trigger()
+ return atomic.Load64(&gcController.heapLive) >= trigger
+ case gcTriggerTime:
+ if gcController.gcPercent.Load() < 0 {
+ return false
+ }
+ lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
+ return lastgc != 0 && t.now-lastgc > forcegcperiod
+ case gcTriggerCycle:
+ // t.n > work.cycles, but accounting for wraparound.
+ return int32(t.n-work.cycles) > 0
+ }
+ return true
+}
+
+// gcStart starts the GC. It transitions from _GCoff to _GCmark (if
+// debug.gcstoptheworld == 0) or performs all of GC (if
+// debug.gcstoptheworld != 0).
+//
+// This may return without performing this transition in some cases,
+// such as when called on a system stack or with locks held.
+func gcStart(trigger gcTrigger) {
+ // Since this is called from malloc and malloc is called in
+ // the guts of a number of libraries that might be holding
+ // locks, don't attempt to start GC in non-preemptible or
+ // potentially unstable situations.
+ mp := acquirem()
+ if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
+ releasem(mp)
+ return
+ }
+ releasem(mp)
+ mp = nil
+
+ // Pick up the remaining unswept/not being swept spans concurrently
+ //
+ // This shouldn't happen if we're being invoked in background
+ // mode since proportional sweep should have just finished
+ // sweeping everything, but rounding errors, etc, may leave a
+ // few spans unswept. In forced mode, this is necessary since
+ // GC can be forced at any point in the sweeping cycle.
+ //
+ // We check the transition condition continuously here in case
+ // this G gets delayed in to the next GC cycle.
+ for trigger.test() && sweepone() != ^uintptr(0) {
+ sweep.nbgsweep++
+ }
+
+ // Perform GC initialization and the sweep termination
+ // transition.
+ semacquire(&work.startSema)
+ // Re-check transition condition under transition lock.
+ if !trigger.test() {
+ semrelease(&work.startSema)
+ return
+ }
+
+ // For stats, check if this GC was forced by the user.
+ work.userForced = trigger.kind == gcTriggerCycle
+
+ // In gcstoptheworld debug mode, upgrade the mode accordingly.
+ // We do this after re-checking the transition condition so
+ // that multiple goroutines that detect the heap trigger don't
+ // start multiple STW GCs.
+ mode := gcBackgroundMode
+ if debug.gcstoptheworld == 1 {
+ mode = gcForceMode
+ } else if debug.gcstoptheworld == 2 {
+ mode = gcForceBlockMode
+ }
+
+ // Ok, we're doing it! Stop everybody else
+ semacquire(&gcsema)
+ semacquire(&worldsema)
+
+ if trace.enabled {
+ traceGCStart()
+ }
+
+ // Check that all Ps have finished deferred mcache flushes.
+ for _, p := range allp {
+ if fg := atomic.Load(&p.mcache.flushGen); fg != mheap_.sweepgen {
+ println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen)
+ throw("p mcache not flushed")
+ }
+ }
+
+ gcBgMarkStartWorkers()
+
+ systemstack(gcResetMarkState)
+
+ work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
+ if work.stwprocs > ncpu {
+ // This is used to compute CPU time of the STW phases,
+ // so it can't be more than ncpu, even if GOMAXPROCS is.
+ work.stwprocs = ncpu
+ }
+ work.heap0 = atomic.Load64(&gcController.heapLive)
+ work.pauseNS = 0
+ work.mode = mode
+
+ now := nanotime()
+ work.tSweepTerm = now
+ work.pauseStart = now
+ if trace.enabled {
+ traceGCSTWStart(1)
+ }
+ systemstack(stopTheWorldWithSema)
+ // Finish sweep before we start concurrent scan.
+ systemstack(func() {
+ finishsweep_m()
+ })
+
+ // clearpools before we start the GC. If we wait they memory will not be
+ // reclaimed until the next GC cycle.
+ clearpools()
+
+ work.cycles++
+
+ // Assists and workers can start the moment we start
+ // the world.
+ gcController.startCycle(now, int(gomaxprocs), trigger)
+
+ // Notify the CPU limiter that assists may begin.
+ gcCPULimiter.startGCTransition(true, now)
+
+ // In STW mode, disable scheduling of user Gs. This may also
+ // disable scheduling of this goroutine, so it may block as
+ // soon as we start the world again.
+ if mode != gcBackgroundMode {
+ schedEnableUser(false)
+ }
+
+ // Enter concurrent mark phase and enable
+ // write barriers.
+ //
+ // Because the world is stopped, all Ps will
+ // observe that write barriers are enabled by
+ // the time we start the world and begin
+ // scanning.
+ //
+ // Write barriers must be enabled before assists are
+ // enabled because they must be enabled before
+ // any non-leaf heap objects are marked. Since
+ // allocations are blocked until assists can
+ // happen, we want enable assists as early as
+ // possible.
+ setGCPhase(_GCmark)
+
+ gcBgMarkPrepare() // Must happen before assist enable.
+ gcMarkRootPrepare()
+
+ // Mark all active tinyalloc blocks. Since we're
+ // allocating from these, they need to be black like
+ // other allocations. The alternative is to blacken
+ // the tiny block on every allocation from it, which
+ // would slow down the tiny allocator.
+ gcMarkTinyAllocs()
+
+ // At this point all Ps have enabled the write
+ // barrier, thus maintaining the no white to
+ // black invariant. Enable mutator assists to
+ // put back-pressure on fast allocating
+ // mutators.
+ atomic.Store(&gcBlackenEnabled, 1)
+
+ // In STW mode, we could block the instant systemstack
+ // returns, so make sure we're not preemptible.
+ mp = acquirem()
+
+ // Concurrent mark.
+ systemstack(func() {
+ now = startTheWorldWithSema(trace.enabled)
+ work.pauseNS += now - work.pauseStart
+ work.tMark = now
+ memstats.gcPauseDist.record(now - work.pauseStart)
+
+ // Release the CPU limiter.
+ gcCPULimiter.finishGCTransition(now)
+ })
+
+ // Release the world sema before Gosched() in STW mode
+ // because we will need to reacquire it later but before
+ // this goroutine becomes runnable again, and we could
+ // self-deadlock otherwise.
+ semrelease(&worldsema)
+ releasem(mp)
+
+ // Make sure we block instead of returning to user code
+ // in STW mode.
+ if mode != gcBackgroundMode {
+ Gosched()
+ }
+
+ semrelease(&work.startSema)
+}
+
+// gcMarkDoneFlushed counts the number of P's with flushed work.
+//
+// Ideally this would be a captured local in gcMarkDone, but forEachP
+// escapes its callback closure, so it can't capture anything.
+//
+// This is protected by markDoneSema.
+var gcMarkDoneFlushed uint32
+
+// gcMarkDone transitions the GC from mark to mark termination if all
+// reachable objects have been marked (that is, there are no grey
+// objects and can be no more in the future). Otherwise, it flushes
+// all local work to the global queues where it can be discovered by
+// other workers.
+//
+// This should be called when all local mark work has been drained and
+// there are no remaining workers. Specifically, when
+//
+// work.nwait == work.nproc && !gcMarkWorkAvailable(p)
+//
+// The calling context must be preemptible.
+//
+// Flushing local work is important because idle Ps may have local
+// work queued. This is the only way to make that work visible and
+// drive GC to completion.
+//
+// It is explicitly okay to have write barriers in this function. If
+// it does transition to mark termination, then all reachable objects
+// have been marked, so the write barrier cannot shade any more
+// objects.
+func gcMarkDone() {
+ // Ensure only one thread is running the ragged barrier at a
+ // time.
+ semacquire(&work.markDoneSema)
+
+top:
+ // Re-check transition condition under transition lock.
+ //
+ // It's critical that this checks the global work queues are
+ // empty before performing the ragged barrier. Otherwise,
+ // there could be global work that a P could take after the P
+ // has passed the ragged barrier.
+ if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
+ semrelease(&work.markDoneSema)
+ return
+ }
+
+ // forEachP needs worldsema to execute, and we'll need it to
+ // stop the world later, so acquire worldsema now.
+ semacquire(&worldsema)
+
+ // Flush all local buffers and collect flushedWork flags.
+ gcMarkDoneFlushed = 0
+ systemstack(func() {
+ gp := getg().m.curg
+ // Mark the user stack as preemptible so that it may be scanned.
+ // Otherwise, our attempt to force all P's to a safepoint could
+ // result in a deadlock as we attempt to preempt a worker that's
+ // trying to preempt us (e.g. for a stack scan).
+ casgstatus(gp, _Grunning, _Gwaiting)
+ forEachP(func(_p_ *p) {
+ // Flush the write barrier buffer, since this may add
+ // work to the gcWork.
+ wbBufFlush1(_p_)
+
+ // Flush the gcWork, since this may create global work
+ // and set the flushedWork flag.
+ //
+ // TODO(austin): Break up these workbufs to
+ // better distribute work.
+ _p_.gcw.dispose()
+ // Collect the flushedWork flag.
+ if _p_.gcw.flushedWork {
+ atomic.Xadd(&gcMarkDoneFlushed, 1)
+ _p_.gcw.flushedWork = false
+ }
+ })
+ casgstatus(gp, _Gwaiting, _Grunning)
+ })
+
+ if gcMarkDoneFlushed != 0 {
+ // More grey objects were discovered since the
+ // previous termination check, so there may be more
+ // work to do. Keep going. It's possible the
+ // transition condition became true again during the
+ // ragged barrier, so re-check it.
+ semrelease(&worldsema)
+ goto top
+ }
+
+ // There was no global work, no local work, and no Ps
+ // communicated work since we took markDoneSema. Therefore
+ // there are no grey objects and no more objects can be
+ // shaded. Transition to mark termination.
+ now := nanotime()
+ work.tMarkTerm = now
+ work.pauseStart = now
+ getg().m.preemptoff = "gcing"
+ if trace.enabled {
+ traceGCSTWStart(0)
+ }
+ systemstack(stopTheWorldWithSema)
+ // The gcphase is _GCmark, it will transition to _GCmarktermination
+ // below. The important thing is that the wb remains active until
+ // all marking is complete. This includes writes made by the GC.
+
+ // There is sometimes work left over when we enter mark termination due
+ // to write barriers performed after the completion barrier above.
+ // Detect this and resume concurrent mark. This is obviously
+ // unfortunate.
+ //
+ // See issue #27993 for details.
+ //
+ // Switch to the system stack to call wbBufFlush1, though in this case
+ // it doesn't matter because we're non-preemptible anyway.
+ restart := false
+ systemstack(func() {
+ for _, p := range allp {
+ wbBufFlush1(p)
+ if !p.gcw.empty() {
+ restart = true
+ break
+ }
+ }
+ })
+ if restart {
+ getg().m.preemptoff = ""
+ systemstack(func() {
+ now := startTheWorldWithSema(true)
+ work.pauseNS += now - work.pauseStart
+ memstats.gcPauseDist.record(now - work.pauseStart)
+ })
+ semrelease(&worldsema)
+ goto top
+ }
+
+ gcComputeStartingStackSize()
+
+ // Disable assists and background workers. We must do
+ // this before waking blocked assists.
+ atomic.Store(&gcBlackenEnabled, 0)
+
+ // Notify the CPU limiter that GC assists will now cease.
+ gcCPULimiter.startGCTransition(false, now)
+
+ // Wake all blocked assists. These will run when we
+ // start the world again.
+ gcWakeAllAssists()
+
+ // Likewise, release the transition lock. Blocked
+ // workers and assists will run when we start the
+ // world again.
+ semrelease(&work.markDoneSema)
+
+ // In STW mode, re-enable user goroutines. These will be
+ // queued to run after we start the world.
+ schedEnableUser(true)
+
+ // endCycle depends on all gcWork cache stats being flushed.
+ // The termination algorithm above ensured that up to
+ // allocations since the ragged barrier.
+ gcController.endCycle(now, int(gomaxprocs), work.userForced)
+
+ // Perform mark termination. This will restart the world.
+ gcMarkTermination()
+}
+
+// World must be stopped and mark assists and background workers must be
+// disabled.
+func gcMarkTermination() {
+ // Start marktermination (write barrier remains enabled for now).
+ setGCPhase(_GCmarktermination)
+
+ work.heap1 = gcController.heapLive
+ startTime := nanotime()
+
+ mp := acquirem()
+ mp.preemptoff = "gcing"
+ _g_ := getg()
+ _g_.m.traceback = 2
+ gp := _g_.m.curg
+ casgstatus(gp, _Grunning, _Gwaiting)
+ gp.waitreason = waitReasonGarbageCollection
+
+ // Run gc on the g0 stack. We do this so that the g stack
+ // we're currently running on will no longer change. Cuts
+ // the root set down a bit (g0 stacks are not scanned, and
+ // we don't need to scan gc's internal state). We also
+ // need to switch to g0 so we can shrink the stack.
+ systemstack(func() {
+ gcMark(startTime)
+ // Must return immediately.
+ // The outer function's stack may have moved
+ // during gcMark (it shrinks stacks, including the
+ // outer function's stack), so we must not refer
+ // to any of its variables. Return back to the
+ // non-system stack to pick up the new addresses
+ // before continuing.
+ })
+
+ systemstack(func() {
+ work.heap2 = work.bytesMarked
+ if debug.gccheckmark > 0 {
+ // Run a full non-parallel, stop-the-world
+ // mark using checkmark bits, to check that we
+ // didn't forget to mark anything during the
+ // concurrent mark process.
+ startCheckmarks()
+ gcResetMarkState()
+ gcw := &getg().m.p.ptr().gcw
+ gcDrain(gcw, 0)
+ wbBufFlush1(getg().m.p.ptr())
+ gcw.dispose()
+ endCheckmarks()
+ }
+
+ // marking is complete so we can turn the write barrier off
+ setGCPhase(_GCoff)
+ gcSweep(work.mode)
+ })
+
+ _g_.m.traceback = 0
+ casgstatus(gp, _Gwaiting, _Grunning)
+
+ if trace.enabled {
+ traceGCDone()
+ }
+
+ // all done
+ mp.preemptoff = ""
+
+ if gcphase != _GCoff {
+ throw("gc done but gcphase != _GCoff")
+ }
+
+ // Record heapInUse for scavenger.
+ memstats.lastHeapInUse = gcController.heapInUse.load()
+
+ // Update GC trigger and pacing, as well as downstream consumers
+ // of this pacing information, for the next cycle.
+ systemstack(gcControllerCommit)
+
+ // Update timing memstats
+ now := nanotime()
+ sec, nsec, _ := time_now()
+ unixNow := sec*1e9 + int64(nsec)
+ work.pauseNS += now - work.pauseStart
+ work.tEnd = now
+ memstats.gcPauseDist.record(now - work.pauseStart)
+ atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
+ atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
+ memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
+ memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
+ memstats.pause_total_ns += uint64(work.pauseNS)
+
+ // Update work.totaltime.
+ sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
+ // We report idle marking time below, but omit it from the
+ // overall utilization here since it's "free".
+ markCpu := gcController.assistTime.Load() + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
+ markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
+ cycleCpu := sweepTermCpu + markCpu + markTermCpu
+ work.totaltime += cycleCpu
+
+ // Compute overall GC CPU utilization.
+ totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
+ memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
+
+ // Reset assist time stat.
+ //
+ // Do this now, instead of at the start of the next GC cycle, because
+ // these two may keep accumulating even if the GC is not active.
+ mheap_.pages.scav.assistTime.Store(0)
+
+ // Reset sweep state.
+ sweep.nbgsweep = 0
+ sweep.npausesweep = 0
+
+ if work.userForced {
+ memstats.numforcedgc++
+ }
+
+ // Bump GC cycle count and wake goroutines waiting on sweep.
+ lock(&work.sweepWaiters.lock)
+ memstats.numgc++
+ injectglist(&work.sweepWaiters.list)
+ unlock(&work.sweepWaiters.lock)
+
+ // Release the CPU limiter.
+ gcCPULimiter.finishGCTransition(now)
+
+ // Finish the current heap profiling cycle and start a new
+ // heap profiling cycle. We do this before starting the world
+ // so events don't leak into the wrong cycle.
+ mProf_NextCycle()
+
+ // There may be stale spans in mcaches that need to be swept.
+ // Those aren't tracked in any sweep lists, so we need to
+ // count them against sweep completion until we ensure all
+ // those spans have been forced out.
+ sl := sweep.active.begin()
+ if !sl.valid {
+ throw("failed to set sweep barrier")
+ }
+
+ systemstack(func() { startTheWorldWithSema(true) })
+
+ // Flush the heap profile so we can start a new cycle next GC.
+ // This is relatively expensive, so we don't do it with the
+ // world stopped.
+ mProf_Flush()
+
+ // Prepare workbufs for freeing by the sweeper. We do this
+ // asynchronously because it can take non-trivial time.
+ prepareFreeWorkbufs()
+
+ // Free stack spans. This must be done between GC cycles.
+ systemstack(freeStackSpans)
+
+ // Ensure all mcaches are flushed. Each P will flush its own
+ // mcache before allocating, but idle Ps may not. Since this
+ // is necessary to sweep all spans, we need to ensure all
+ // mcaches are flushed before we start the next GC cycle.
+ systemstack(func() {
+ forEachP(func(_p_ *p) {
+ _p_.mcache.prepareForSweep()
+ })
+ })
+ // Now that we've swept stale spans in mcaches, they don't
+ // count against unswept spans.
+ sweep.active.end(sl)
+
+ // Print gctrace before dropping worldsema. As soon as we drop
+ // worldsema another cycle could start and smash the stats
+ // we're trying to print.
+ if debug.gctrace > 0 {
+ util := int(memstats.gc_cpu_fraction * 100)
+
+ var sbuf [24]byte
+ printlock()
+ print("gc ", memstats.numgc,
+ " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
+ util, "%: ")
+ prev := work.tSweepTerm
+ for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
+ if i != 0 {
+ print("+")
+ }
+ print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
+ prev = ns
+ }
+ print(" ms clock, ")
+ for i, ns := range []int64{
+ sweepTermCpu,
+ gcController.assistTime.Load(),
+ gcController.dedicatedMarkTime + gcController.fractionalMarkTime,
+ gcController.idleMarkTime,
+ markTermCpu,
+ } {
+ if i == 2 || i == 3 {
+ // Separate mark time components with /.
+ print("/")
+ } else if i != 0 {
+ print("+")
+ }
+ print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
+ }
+ print(" ms cpu, ",
+ work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
+ gcController.lastHeapGoal>>20, " MB goal, ",
+ atomic.Load64(&gcController.maxStackScan)>>20, " MB stacks, ",
+ gcController.globalsScan>>20, " MB globals, ",
+ work.maxprocs, " P")
+ if work.userForced {
+ print(" (forced)")
+ }
+ print("\n")
+ printunlock()
+ }
+
+ semrelease(&worldsema)
+ semrelease(&gcsema)
+ // Careful: another GC cycle may start now.
+
+ releasem(mp)
+ mp = nil
+
+ // now that gc is done, kick off finalizer thread if needed
+ if !concurrentSweep {
+ // give the queued finalizers, if any, a chance to run
+ Gosched()
+ }
+}
+
+// gcBgMarkStartWorkers prepares background mark worker goroutines. These
+// goroutines will not run until the mark phase, but they must be started while
+// the work is not stopped and from a regular G stack. The caller must hold
+// worldsema.
+func gcBgMarkStartWorkers() {
+ // Background marking is performed by per-P G's. Ensure that each P has
+ // a background GC G.
+ //
+ // Worker Gs don't exit if gomaxprocs is reduced. If it is raised
+ // again, we can reuse the old workers; no need to create new workers.
+ for gcBgMarkWorkerCount < gomaxprocs {
+ go gcBgMarkWorker()
+
+ notetsleepg(&work.bgMarkReady, -1)
+ noteclear(&work.bgMarkReady)
+ // The worker is now guaranteed to be added to the pool before
+ // its P's next findRunnableGCWorker.
+
+ gcBgMarkWorkerCount++
+ }
+}
+
+// gcBgMarkPrepare sets up state for background marking.
+// Mutator assists must not yet be enabled.
+func gcBgMarkPrepare() {
+ // Background marking will stop when the work queues are empty
+ // and there are no more workers (note that, since this is
+ // concurrent, this may be a transient state, but mark
+ // termination will clean it up). Between background workers
+ // and assists, we don't really know how many workers there
+ // will be, so we pretend to have an arbitrarily large number
+ // of workers, almost all of which are "waiting". While a
+ // worker is working it decrements nwait. If nproc == nwait,
+ // there are no workers.
+ work.nproc = ^uint32(0)
+ work.nwait = ^uint32(0)
+}
+
+// gcBgMarkWorker is an entry in the gcBgMarkWorkerPool. It points to a single
+// gcBgMarkWorker goroutine.
+type gcBgMarkWorkerNode struct {
+ // Unused workers are managed in a lock-free stack. This field must be first.
+ node lfnode
+
+ // The g of this worker.
+ gp guintptr
+
+ // Release this m on park. This is used to communicate with the unlock
+ // function, which cannot access the G's stack. It is unused outside of
+ // gcBgMarkWorker().
+ m muintptr
+}
+
+func gcBgMarkWorker() {
+ gp := getg()
+
+ // We pass node to a gopark unlock function, so it can't be on
+ // the stack (see gopark). Prevent deadlock from recursively
+ // starting GC by disabling preemption.
+ gp.m.preemptoff = "GC worker init"
+ node := new(gcBgMarkWorkerNode)
+ gp.m.preemptoff = ""
+
+ node.gp.set(gp)
+
+ node.m.set(acquirem())
+ notewakeup(&work.bgMarkReady)
+ // After this point, the background mark worker is generally scheduled
+ // cooperatively by gcController.findRunnableGCWorker. While performing
+ // work on the P, preemption is disabled because we are working on
+ // P-local work buffers. When the preempt flag is set, this puts itself
+ // into _Gwaiting to be woken up by gcController.findRunnableGCWorker
+ // at the appropriate time.
+ //
+ // When preemption is enabled (e.g., while in gcMarkDone), this worker
+ // may be preempted and schedule as a _Grunnable G from a runq. That is
+ // fine; it will eventually gopark again for further scheduling via
+ // findRunnableGCWorker.
+ //
+ // Since we disable preemption before notifying bgMarkReady, we
+ // guarantee that this G will be in the worker pool for the next
+ // findRunnableGCWorker. This isn't strictly necessary, but it reduces
+ // latency between _GCmark starting and the workers starting.
+
+ for {
+ // Go to sleep until woken by
+ // gcController.findRunnableGCWorker.
+ gopark(func(g *g, nodep unsafe.Pointer) bool {
+ node := (*gcBgMarkWorkerNode)(nodep)
+
+ if mp := node.m.ptr(); mp != nil {
+ // The worker G is no longer running; release
+ // the M.
+ //
+ // N.B. it is _safe_ to release the M as soon
+ // as we are no longer performing P-local mark
+ // work.
+ //
+ // However, since we cooperatively stop work
+ // when gp.preempt is set, if we releasem in
+ // the loop then the following call to gopark
+ // would immediately preempt the G. This is
+ // also safe, but inefficient: the G must
+ // schedule again only to enter gopark and park
+ // again. Thus, we defer the release until
+ // after parking the G.
+ releasem(mp)
+ }
+
+ // Release this G to the pool.
+ gcBgMarkWorkerPool.push(&node.node)
+ // Note that at this point, the G may immediately be
+ // rescheduled and may be running.
+ return true
+ }, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceEvGoBlock, 0)
+
+ // Preemption must not occur here, or another G might see
+ // p.gcMarkWorkerMode.
+
+ // Disable preemption so we can use the gcw. If the
+ // scheduler wants to preempt us, we'll stop draining,
+ // dispose the gcw, and then preempt.
+ node.m.set(acquirem())
+ pp := gp.m.p.ptr() // P can't change with preemption disabled.
+
+ if gcBlackenEnabled == 0 {
+ println("worker mode", pp.gcMarkWorkerMode)
+ throw("gcBgMarkWorker: blackening not enabled")
+ }
+
+ if pp.gcMarkWorkerMode == gcMarkWorkerNotWorker {
+ throw("gcBgMarkWorker: mode not set")
+ }
+
+ startTime := nanotime()
+ pp.gcMarkWorkerStartTime = startTime
+ var trackLimiterEvent bool
+ if pp.gcMarkWorkerMode == gcMarkWorkerIdleMode {
+ trackLimiterEvent = pp.limiterEvent.start(limiterEventIdleMarkWork, startTime)
+ }
+
+ decnwait := atomic.Xadd(&work.nwait, -1)
+ if decnwait == work.nproc {
+ println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
+ throw("work.nwait was > work.nproc")
+ }
+
+ systemstack(func() {
+ // Mark our goroutine preemptible so its stack
+ // can be scanned. This lets two mark workers
+ // scan each other (otherwise, they would
+ // deadlock). We must not modify anything on
+ // the G stack. However, stack shrinking is
+ // disabled for mark workers, so it is safe to
+ // read from the G stack.
+ casgstatus(gp, _Grunning, _Gwaiting)
+ switch pp.gcMarkWorkerMode {
+ default:
+ throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
+ case gcMarkWorkerDedicatedMode:
+ gcDrain(&pp.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ if gp.preempt {
+ // We were preempted. This is
+ // a useful signal to kick
+ // everything out of the run
+ // queue so it can run
+ // somewhere else.
+ if drainQ, n := runqdrain(pp); n > 0 {
+ lock(&sched.lock)
+ globrunqputbatch(&drainQ, int32(n))
+ unlock(&sched.lock)
+ }
+ }
+ // Go back to draining, this time
+ // without preemption.
+ gcDrain(&pp.gcw, gcDrainFlushBgCredit)
+ case gcMarkWorkerFractionalMode:
+ gcDrain(&pp.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ case gcMarkWorkerIdleMode:
+ gcDrain(&pp.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ }
+ casgstatus(gp, _Gwaiting, _Grunning)
+ })
+
+ // Account for time and mark us as stopped.
+ now := nanotime()
+ duration := now - startTime
+ gcController.markWorkerStop(pp.gcMarkWorkerMode, duration)
+ if trackLimiterEvent {
+ pp.limiterEvent.stop(limiterEventIdleMarkWork, now)
+ }
+ if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode {
+ atomic.Xaddint64(&pp.gcFractionalMarkTime, duration)
+ }
+
+ // Was this the last worker and did we run out
+ // of work?
+ incnwait := atomic.Xadd(&work.nwait, +1)
+ if incnwait > work.nproc {
+ println("runtime: p.gcMarkWorkerMode=", pp.gcMarkWorkerMode,
+ "work.nwait=", incnwait, "work.nproc=", work.nproc)
+ throw("work.nwait > work.nproc")
+ }
+
+ // We'll releasem after this point and thus this P may run
+ // something else. We must clear the worker mode to avoid
+ // attributing the mode to a different (non-worker) G in
+ // traceGoStart.
+ pp.gcMarkWorkerMode = gcMarkWorkerNotWorker
+
+ // If this worker reached a background mark completion
+ // point, signal the main GC goroutine.
+ if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
+ // We don't need the P-local buffers here, allow
+ // preemption because we may schedule like a regular
+ // goroutine in gcMarkDone (block on locks, etc).
+ releasem(node.m.ptr())
+ node.m.set(nil)
+
+ gcMarkDone()
+ }
+ }
+}
+
+// gcMarkWorkAvailable reports whether executing a mark worker
+// on p is potentially useful. p may be nil, in which case it only
+// checks the global sources of work.
+func gcMarkWorkAvailable(p *p) bool {
+ if p != nil && !p.gcw.empty() {
+ return true
+ }
+ if !work.full.empty() {
+ return true // global work available
+ }
+ if work.markrootNext < work.markrootJobs {
+ return true // root scan work available
+ }
+ return false
+}
+
+// gcMark runs the mark (or, for concurrent GC, mark termination)
+// All gcWork caches must be empty.
+// STW is in effect at this point.
+func gcMark(startTime int64) {
+ if debug.allocfreetrace > 0 {
+ tracegc()
+ }
+
+ if gcphase != _GCmarktermination {
+ throw("in gcMark expecting to see gcphase as _GCmarktermination")
+ }
+ work.tstart = startTime
+
+ // Check that there's no marking work remaining.
+ if work.full != 0 || work.markrootNext < work.markrootJobs {
+ print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
+ panic("non-empty mark queue after concurrent mark")
+ }
+
+ if debug.gccheckmark > 0 {
+ // This is expensive when there's a large number of
+ // Gs, so only do it if checkmark is also enabled.
+ gcMarkRootCheck()
+ }
+ if work.full != 0 {
+ throw("work.full != 0")
+ }
+
+ // Drop allg snapshot. allgs may have grown, in which case
+ // this is the only reference to the old backing store and
+ // there's no need to keep it around.
+ work.stackRoots = nil
+
+ // Clear out buffers and double-check that all gcWork caches
+ // are empty. This should be ensured by gcMarkDone before we
+ // enter mark termination.
+ //
+ // TODO: We could clear out buffers just before mark if this
+ // has a non-negligible impact on STW time.
+ for _, p := range allp {
+ // The write barrier may have buffered pointers since
+ // the gcMarkDone barrier. However, since the barrier
+ // ensured all reachable objects were marked, all of
+ // these must be pointers to black objects. Hence we
+ // can just discard the write barrier buffer.
+ if debug.gccheckmark > 0 {
+ // For debugging, flush the buffer and make
+ // sure it really was all marked.
+ wbBufFlush1(p)
+ } else {
+ p.wbBuf.reset()
+ }
+
+ gcw := &p.gcw
+ if !gcw.empty() {
+ printlock()
+ print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork)
+ if gcw.wbuf1 == nil {
+ print(" wbuf1=<nil>")
+ } else {
+ print(" wbuf1.n=", gcw.wbuf1.nobj)
+ }
+ if gcw.wbuf2 == nil {
+ print(" wbuf2=<nil>")
+ } else {
+ print(" wbuf2.n=", gcw.wbuf2.nobj)
+ }
+ print("\n")
+ throw("P has cached GC work at end of mark termination")
+ }
+ // There may still be cached empty buffers, which we
+ // need to flush since we're going to free them. Also,
+ // there may be non-zero stats because we allocated
+ // black after the gcMarkDone barrier.
+ gcw.dispose()
+ }
+
+ // Flush scanAlloc from each mcache since we're about to modify
+ // heapScan directly. If we were to flush this later, then scanAlloc
+ // might have incorrect information.
+ //
+ // Note that it's not important to retain this information; we know
+ // exactly what heapScan is at this point via scanWork.
+ for _, p := range allp {
+ c := p.mcache
+ if c == nil {
+ continue
+ }
+ c.scanAlloc = 0
+ }
+
+ // Reset controller state.
+ gcController.resetLive(work.bytesMarked)
+}
+
+// gcSweep must be called on the system stack because it acquires the heap
+// lock. See mheap for details.
+//
+// The world must be stopped.
+//
+//go:systemstack
+func gcSweep(mode gcMode) {
+ assertWorldStopped()
+
+ if gcphase != _GCoff {
+ throw("gcSweep being done but phase is not GCoff")
+ }
+
+ lock(&mheap_.lock)
+ mheap_.sweepgen += 2
+ sweep.active.reset()
+ mheap_.pagesSwept.Store(0)
+ mheap_.sweepArenas = mheap_.allArenas
+ mheap_.reclaimIndex.Store(0)
+ mheap_.reclaimCredit.Store(0)
+ unlock(&mheap_.lock)
+
+ sweep.centralIndex.clear()
+
+ if !_ConcurrentSweep || mode == gcForceBlockMode {
+ // Special case synchronous sweep.
+ // Record that no proportional sweeping has to happen.
+ lock(&mheap_.lock)
+ mheap_.sweepPagesPerByte = 0
+ unlock(&mheap_.lock)
+ // Sweep all spans eagerly.
+ for sweepone() != ^uintptr(0) {
+ sweep.npausesweep++
+ }
+ // Free workbufs eagerly.
+ prepareFreeWorkbufs()
+ for freeSomeWbufs(false) {
+ }
+ // All "free" events for this mark/sweep cycle have
+ // now happened, so we can make this profile cycle
+ // available immediately.
+ mProf_NextCycle()
+ mProf_Flush()
+ return
+ }
+
+ // Background sweep.
+ lock(&sweep.lock)
+ if sweep.parked {
+ sweep.parked = false
+ ready(sweep.g, 0, true)
+ }
+ unlock(&sweep.lock)
+}
+
+// gcResetMarkState resets global state prior to marking (concurrent
+// or STW) and resets the stack scan state of all Gs.
+//
+// This is safe to do without the world stopped because any Gs created
+// during or after this will start out in the reset state.
+//
+// gcResetMarkState must be called on the system stack because it acquires
+// the heap lock. See mheap for details.
+//
+//go:systemstack
+func gcResetMarkState() {
+ // This may be called during a concurrent phase, so lock to make sure
+ // allgs doesn't change.
+ forEachG(func(gp *g) {
+ gp.gcscandone = false // set to true in gcphasework
+ gp.gcAssistBytes = 0
+ })
+
+ // Clear page marks. This is just 1MB per 64GB of heap, so the
+ // time here is pretty trivial.
+ lock(&mheap_.lock)
+ arenas := mheap_.allArenas
+ unlock(&mheap_.lock)
+ for _, ai := range arenas {
+ ha := mheap_.arenas[ai.l1()][ai.l2()]
+ for i := range ha.pageMarks {
+ ha.pageMarks[i] = 0
+ }
+ }
+
+ work.bytesMarked = 0
+ work.initialHeapLive = atomic.Load64(&gcController.heapLive)
+}
+
+// Hooks for other packages
+
+var poolcleanup func()
+var boringCaches []unsafe.Pointer // for crypto/internal/boring
+
+//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
+func sync_runtime_registerPoolCleanup(f func()) {
+ poolcleanup = f
+}
+
+//go:linkname boring_registerCache crypto/internal/boring/bcache.registerCache
+func boring_registerCache(p unsafe.Pointer) {
+ boringCaches = append(boringCaches, p)
+}
+
+func clearpools() {
+ // clear sync.Pools
+ if poolcleanup != nil {
+ poolcleanup()
+ }
+
+ // clear boringcrypto caches
+ for _, p := range boringCaches {
+ atomicstorep(p, nil)
+ }
+
+ // Clear central sudog cache.
+ // Leave per-P caches alone, they have strictly bounded size.
+ // Disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ lock(&sched.sudoglock)
+ var sg, sgnext *sudog
+ for sg = sched.sudogcache; sg != nil; sg = sgnext {
+ sgnext = sg.next
+ sg.next = nil
+ }
+ sched.sudogcache = nil
+ unlock(&sched.sudoglock)
+
+ // Clear central defer pool.
+ // Leave per-P pools alone, they have strictly bounded size.
+ lock(&sched.deferlock)
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = sched.deferpool; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
+ }
+ sched.deferpool = nil
+ unlock(&sched.deferlock)
+}
+
+// Timing
+
+// itoaDiv formats val/(10**dec) into buf.
+func itoaDiv(buf []byte, val uint64, dec int) []byte {
+ i := len(buf) - 1
+ idec := i - dec
+ for val >= 10 || i >= idec {
+ buf[i] = byte(val%10 + '0')
+ i--
+ if i == idec {
+ buf[i] = '.'
+ i--
+ }
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return buf[i:]
+}
+
+// fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
+func fmtNSAsMS(buf []byte, ns uint64) []byte {
+ if ns >= 10e6 {
+ // Format as whole milliseconds.
+ return itoaDiv(buf, ns/1e6, 0)
+ }
+ // Format two digits of precision, with at most three decimal places.
+ x := ns / 1e3
+ if x == 0 {
+ buf[0] = '0'
+ return buf[:1]
+ }
+ dec := 3
+ for x >= 100 {
+ x /= 10
+ dec--
+ }
+ return itoaDiv(buf, x, dec)
+}
+
+// Helpers for testing GC.
+
+// gcTestMoveStackOnNextCall causes the stack to be moved on a call
+// immediately following the call to this. It may not work correctly
+// if any other work appears after this call (such as returning).
+// Typically the following call should be marked go:noinline so it
+// performs a stack check.
+//
+// In rare cases this may not cause the stack to move, specifically if
+// there's a preemption between this call and the next.
+func gcTestMoveStackOnNextCall() {
+ gp := getg()
+ gp.stackguard0 = stackForceMove
+}
+
+// gcTestIsReachable performs a GC and returns a bit set where bit i
+// is set if ptrs[i] is reachable.
+func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
+ // This takes the pointers as unsafe.Pointers in order to keep
+ // them live long enough for us to attach specials. After
+ // that, we drop our references to them.
+
+ if len(ptrs) > 64 {
+ panic("too many pointers for uint64 mask")
+ }
+
+ // Block GC while we attach specials and drop our references
+ // to ptrs. Otherwise, if a GC is in progress, it could mark
+ // them reachable via this function before we have a chance to
+ // drop them.
+ semacquire(&gcsema)
+
+ // Create reachability specials for ptrs.
+ specials := make([]*specialReachable, len(ptrs))
+ for i, p := range ptrs {
+ lock(&mheap_.speciallock)
+ s := (*specialReachable)(mheap_.specialReachableAlloc.alloc())
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialReachable
+ if !addspecial(p, &s.special) {
+ throw("already have a reachable special (duplicate pointer?)")
+ }
+ specials[i] = s
+ // Make sure we don't retain ptrs.
+ ptrs[i] = nil
+ }
+
+ semrelease(&gcsema)
+
+ // Force a full GC and sweep.
+ GC()
+
+ // Process specials.
+ for i, s := range specials {
+ if !s.done {
+ printlock()
+ println("runtime: object", i, "was not swept")
+ throw("IsReachable failed")
+ }
+ if s.reachable {
+ mask |= 1 << i
+ }
+ lock(&mheap_.speciallock)
+ mheap_.specialReachableAlloc.free(unsafe.Pointer(s))
+ unlock(&mheap_.speciallock)
+ }
+
+ return mask
+}
+
+// gcTestPointerClass returns the category of what p points to, one of:
+// "heap", "stack", "data", "bss", "other". This is useful for checking
+// that a test is doing what it's intended to do.
+//
+// This is nosplit simply to avoid extra pointer shuffling that may
+// complicate a test.
+//
+//go:nosplit
+func gcTestPointerClass(p unsafe.Pointer) string {
+ p2 := uintptr(noescape(p))
+ gp := getg()
+ if gp.stack.lo <= p2 && p2 < gp.stack.hi {
+ return "stack"
+ }
+ if base, _, _ := findObject(p2, 0, 0); base != 0 {
+ return "heap"
+ }
+ for _, datap := range activeModules() {
+ if datap.data <= p2 && p2 < datap.edata || datap.noptrdata <= p2 && p2 < datap.enoptrdata {
+ return "data"
+ }
+ if datap.bss <= p2 && p2 < datap.ebss || datap.noptrbss <= p2 && p2 <= datap.enoptrbss {
+ return "bss"
+ }
+ }
+ KeepAlive(p)
+ return "other"
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgclimit.go b/contrib/go/_std_1.19/src/runtime/mgclimit.go
new file mode 100644
index 0000000000..d94e471643
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgclimit.go
@@ -0,0 +1,484 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "runtime/internal/atomic"
+
+// gcCPULimiter is a mechanism to limit GC CPU utilization in situations
+// where it might become excessive and inhibit application progress (e.g.
+// a death spiral).
+//
+// The core of the limiter is a leaky bucket mechanism that fills with GC
+// CPU time and drains with mutator time. Because the bucket fills and
+// drains with time directly (i.e. without any weighting), this effectively
+// sets a very conservative limit of 50%. This limit could be enforced directly,
+// however, but the purpose of the bucket is to accommodate spikes in GC CPU
+// utilization without hurting throughput.
+//
+// Note that the bucket in the leaky bucket mechanism can never go negative,
+// so the GC never gets credit for a lot of CPU time spent without the GC
+// running. This is intentional, as an application that stays idle for, say,
+// an entire day, could build up enough credit to fail to prevent a death
+// spiral the following day. The bucket's capacity is the GC's only leeway.
+//
+// The capacity thus also sets the window the limiter considers. For example,
+// if the capacity of the bucket is 1 cpu-second, then the limiter will not
+// kick in until at least 1 full cpu-second in the last 2 cpu-second window
+// is spent on GC CPU time.
+var gcCPULimiter gcCPULimiterState
+
+type gcCPULimiterState struct {
+ lock atomic.Uint32
+
+ enabled atomic.Bool
+ bucket struct {
+ // Invariants:
+ // - fill >= 0
+ // - capacity >= 0
+ // - fill <= capacity
+ fill, capacity uint64
+ }
+ // overflow is the cumulative amount of GC CPU time that we tried to fill the
+ // bucket with but exceeded its capacity.
+ overflow uint64
+
+ // gcEnabled is an internal copy of gcBlackenEnabled that determines
+ // whether the limiter tracks total assist time.
+ //
+ // gcBlackenEnabled isn't used directly so as to keep this structure
+ // unit-testable.
+ gcEnabled bool
+
+ // transitioning is true when the GC is in a STW and transitioning between
+ // the mark and sweep phases.
+ transitioning bool
+
+ _ uint32 // Align assistTimePool and lastUpdate on 32-bit platforms.
+
+ // assistTimePool is the accumulated assist time since the last update.
+ assistTimePool atomic.Int64
+
+ // idleMarkTimePool is the accumulated idle mark time since the last update.
+ idleMarkTimePool atomic.Int64
+
+ // idleTimePool is the accumulated time Ps spent on the idle list since the last update.
+ idleTimePool atomic.Int64
+
+ // lastUpdate is the nanotime timestamp of the last time update was called.
+ //
+ // Updated under lock, but may be read concurrently.
+ lastUpdate atomic.Int64
+
+ // lastEnabledCycle is the GC cycle that last had the limiter enabled.
+ lastEnabledCycle atomic.Uint32
+
+ // nprocs is an internal copy of gomaxprocs, used to determine total available
+ // CPU time.
+ //
+ // gomaxprocs isn't used directly so as to keep this structure unit-testable.
+ nprocs int32
+
+ // test indicates whether this instance of the struct was made for testing purposes.
+ test bool
+}
+
+// limiting returns true if the CPU limiter is currently enabled, meaning the Go GC
+// should take action to limit CPU utilization.
+//
+// It is safe to call concurrently with other operations.
+func (l *gcCPULimiterState) limiting() bool {
+ return l.enabled.Load()
+}
+
+// startGCTransition notifies the limiter of a GC transition.
+//
+// This call takes ownership of the limiter and disables all other means of
+// updating the limiter. Release ownership by calling finishGCTransition.
+//
+// It is safe to call concurrently with other operations.
+func (l *gcCPULimiterState) startGCTransition(enableGC bool, now int64) {
+ if !l.tryLock() {
+ // This must happen during a STW, so we can't fail to acquire the lock.
+ // If we did, something went wrong. Throw.
+ throw("failed to acquire lock to start a GC transition")
+ }
+ if l.gcEnabled == enableGC {
+ throw("transitioning GC to the same state as before?")
+ }
+ // Flush whatever was left between the last update and now.
+ l.updateLocked(now)
+ l.gcEnabled = enableGC
+ l.transitioning = true
+ // N.B. finishGCTransition releases the lock.
+ //
+ // We don't release here to increase the chance that if there's a failure
+ // to finish the transition, that we throw on failing to acquire the lock.
+}
+
+// finishGCTransition notifies the limiter that the GC transition is complete
+// and releases ownership of it. It also accumulates STW time in the bucket.
+// now must be the timestamp from the end of the STW pause.
+func (l *gcCPULimiterState) finishGCTransition(now int64) {
+ if !l.transitioning {
+ throw("finishGCTransition called without starting one?")
+ }
+ // Count the full nprocs set of CPU time because the world is stopped
+ // between startGCTransition and finishGCTransition. Even though the GC
+ // isn't running on all CPUs, it is preventing user code from doing so,
+ // so it might as well be.
+ if lastUpdate := l.lastUpdate.Load(); now >= lastUpdate {
+ l.accumulate(0, (now-lastUpdate)*int64(l.nprocs))
+ }
+ l.lastUpdate.Store(now)
+ l.transitioning = false
+ l.unlock()
+}
+
+// gcCPULimiterUpdatePeriod dictates the maximum amount of wall-clock time
+// we can go before updating the limiter.
+const gcCPULimiterUpdatePeriod = 10e6 // 10ms
+
+// needUpdate returns true if the limiter's maximum update period has been
+// exceeded, and so would benefit from an update.
+func (l *gcCPULimiterState) needUpdate(now int64) bool {
+ return now-l.lastUpdate.Load() > gcCPULimiterUpdatePeriod
+}
+
+// addAssistTime notifies the limiter of additional assist time. It will be
+// included in the next update.
+func (l *gcCPULimiterState) addAssistTime(t int64) {
+ l.assistTimePool.Add(t)
+}
+
+// addIdleTime notifies the limiter of additional time a P spent on the idle list. It will be
+// subtracted from the total CPU time in the next update.
+func (l *gcCPULimiterState) addIdleTime(t int64) {
+ l.idleTimePool.Add(t)
+}
+
+// update updates the bucket given runtime-specific information. now is the
+// current monotonic time in nanoseconds.
+//
+// This is safe to call concurrently with other operations, except *GCTransition.
+func (l *gcCPULimiterState) update(now int64) {
+ if !l.tryLock() {
+ // We failed to acquire the lock, which means something else is currently
+ // updating. Just drop our update, the next one to update will include
+ // our total assist time.
+ return
+ }
+ if l.transitioning {
+ throw("update during transition")
+ }
+ l.updateLocked(now)
+ l.unlock()
+}
+
+// updatedLocked is the implementation of update. l.lock must be held.
+func (l *gcCPULimiterState) updateLocked(now int64) {
+ lastUpdate := l.lastUpdate.Load()
+ if now < lastUpdate {
+ // Defensively avoid overflow. This isn't even the latest update anyway.
+ return
+ }
+ windowTotalTime := (now - lastUpdate) * int64(l.nprocs)
+ l.lastUpdate.Store(now)
+
+ // Drain the pool of assist time.
+ assistTime := l.assistTimePool.Load()
+ if assistTime != 0 {
+ l.assistTimePool.Add(-assistTime)
+ }
+
+ // Drain the pool of idle time.
+ idleTime := l.idleTimePool.Load()
+ if idleTime != 0 {
+ l.idleTimePool.Add(-idleTime)
+ }
+
+ if !l.test {
+ // Consume time from in-flight events. Make sure we're not preemptible so allp can't change.
+ //
+ // The reason we do this instead of just waiting for those events to finish and push updates
+ // is to ensure that all the time we're accounting for happened sometime between lastUpdate
+ // and now. This dramatically simplifies reasoning about the limiter because we're not at
+ // risk of extra time being accounted for in this window than actually happened in this window,
+ // leading to all sorts of weird transient behavior.
+ mp := acquirem()
+ for _, pp := range allp {
+ typ, duration := pp.limiterEvent.consume(now)
+ switch typ {
+ case limiterEventIdleMarkWork:
+ fallthrough
+ case limiterEventIdle:
+ idleTime += duration
+ case limiterEventMarkAssist:
+ fallthrough
+ case limiterEventScavengeAssist:
+ assistTime += duration
+ case limiterEventNone:
+ break
+ default:
+ throw("invalid limiter event type found")
+ }
+ }
+ releasem(mp)
+ }
+
+ // Compute total GC time.
+ windowGCTime := assistTime
+ if l.gcEnabled {
+ windowGCTime += int64(float64(windowTotalTime) * gcBackgroundUtilization)
+ }
+
+ // Subtract out all idle time from the total time. Do this after computing
+ // GC time, because the background utilization is dependent on the *real*
+ // total time, not the total time after idle time is subtracted.
+ //
+ // Idle time is counted as any time that a P is on the P idle list plus idle mark
+ // time. Idle mark workers soak up time that the application spends idle.
+ //
+ // On a heavily undersubscribed system, any additional idle time can skew GC CPU
+ // utilization, because the GC might be executing continuously and thrashing,
+ // yet the CPU utilization with respect to GOMAXPROCS will be quite low, so
+ // the limiter fails to turn on. By subtracting idle time, we're removing time that
+ // we know the application was idle giving a more accurate picture of whether
+ // the GC is thrashing.
+ //
+ // Note that this can cause the limiter to turn on even if it's not needed. For
+ // instance, on a system with 32 Ps but only 1 running goroutine, each GC will have
+ // 8 dedicated GC workers. Assuming the GC cycle is half mark phase and half sweep
+ // phase, then the GC CPU utilization over that cycle, with idle time removed, will
+ // be 8/(8+2) = 80%. Even though the limiter turns on, though, assist should be
+ // unnecessary, as the GC has way more CPU time to outpace the 1 goroutine that's
+ // running.
+ windowTotalTime -= idleTime
+
+ l.accumulate(windowTotalTime-windowGCTime, windowGCTime)
+}
+
+// accumulate adds time to the bucket and signals whether the limiter is enabled.
+//
+// This is an internal function that deals just with the bucket. Prefer update.
+// l.lock must be held.
+func (l *gcCPULimiterState) accumulate(mutatorTime, gcTime int64) {
+ headroom := l.bucket.capacity - l.bucket.fill
+ enabled := headroom == 0
+
+ // Let's be careful about three things here:
+ // 1. The addition and subtraction, for the invariants.
+ // 2. Overflow.
+ // 3. Excessive mutation of l.enabled, which is accessed
+ // by all assists, potentially more than once.
+ change := gcTime - mutatorTime
+
+ // Handle limiting case.
+ if change > 0 && headroom <= uint64(change) {
+ l.overflow += uint64(change) - headroom
+ l.bucket.fill = l.bucket.capacity
+ if !enabled {
+ l.enabled.Store(true)
+ l.lastEnabledCycle.Store(memstats.numgc + 1)
+ }
+ return
+ }
+
+ // Handle non-limiting cases.
+ if change < 0 && l.bucket.fill <= uint64(-change) {
+ // Bucket emptied.
+ l.bucket.fill = 0
+ } else {
+ // All other cases.
+ l.bucket.fill -= uint64(-change)
+ }
+ if change != 0 && enabled {
+ l.enabled.Store(false)
+ }
+}
+
+// tryLock attempts to lock l. Returns true on success.
+func (l *gcCPULimiterState) tryLock() bool {
+ return l.lock.CompareAndSwap(0, 1)
+}
+
+// unlock releases the lock on l. Must be called if tryLock returns true.
+func (l *gcCPULimiterState) unlock() {
+ old := l.lock.Swap(0)
+ if old != 1 {
+ throw("double unlock")
+ }
+}
+
+// capacityPerProc is the limiter's bucket capacity for each P in GOMAXPROCS.
+const capacityPerProc = 1e9 // 1 second in nanoseconds
+
+// resetCapacity updates the capacity based on GOMAXPROCS. Must not be called
+// while the GC is enabled.
+//
+// It is safe to call concurrently with other operations.
+func (l *gcCPULimiterState) resetCapacity(now int64, nprocs int32) {
+ if !l.tryLock() {
+ // This must happen during a STW, so we can't fail to acquire the lock.
+ // If we did, something went wrong. Throw.
+ throw("failed to acquire lock to reset capacity")
+ }
+ // Flush the rest of the time for this period.
+ l.updateLocked(now)
+ l.nprocs = nprocs
+
+ l.bucket.capacity = uint64(nprocs) * capacityPerProc
+ if l.bucket.fill > l.bucket.capacity {
+ l.bucket.fill = l.bucket.capacity
+ l.enabled.Store(true)
+ l.lastEnabledCycle.Store(memstats.numgc + 1)
+ } else if l.bucket.fill < l.bucket.capacity {
+ l.enabled.Store(false)
+ }
+ l.unlock()
+}
+
+// limiterEventType indicates the type of an event occuring on some P.
+//
+// These events represent the full set of events that the GC CPU limiter tracks
+// to execute its function.
+//
+// This type may use no more than limiterEventBits bits of information.
+type limiterEventType uint8
+
+const (
+ limiterEventNone limiterEventType = iota // None of the following events.
+ limiterEventIdleMarkWork // Refers to an idle mark worker (see gcMarkWorkerMode).
+ limiterEventMarkAssist // Refers to mark assist (see gcAssistAlloc).
+ limiterEventScavengeAssist // Refers to a scavenge assist (see allocSpan).
+ limiterEventIdle // Refers to time a P spent on the idle list.
+
+ limiterEventBits = 3
+)
+
+// limiterEventTypeMask is a mask for the bits in p.limiterEventStart that represent
+// the event type. The rest of the bits of that field represent a timestamp.
+const (
+ limiterEventTypeMask = uint64((1<<limiterEventBits)-1) << (64 - limiterEventBits)
+ limiterEventStampNone = limiterEventStamp(0)
+)
+
+// limiterEventStamp is a nanotime timestamp packed with a limiterEventType.
+type limiterEventStamp uint64
+
+// makeLimiterEventStamp creates a new stamp from the event type and the current timestamp.
+func makeLimiterEventStamp(typ limiterEventType, now int64) limiterEventStamp {
+ return limiterEventStamp(uint64(typ)<<(64-limiterEventBits) | (uint64(now) &^ limiterEventTypeMask))
+}
+
+// duration computes the difference between now and the start time stored in the stamp.
+//
+// Returns 0 if the difference is negative, which may happen if now is stale or if the
+// before and after timestamps cross a 2^(64-limiterEventBits) boundary.
+func (s limiterEventStamp) duration(now int64) int64 {
+ // The top limiterEventBits bits of the timestamp are derived from the current time
+ // when computing a duration.
+ start := int64((uint64(now) & limiterEventTypeMask) | (uint64(s) &^ limiterEventTypeMask))
+ if now < start {
+ return 0
+ }
+ return now - start
+}
+
+// type extracts the event type from the stamp.
+func (s limiterEventStamp) typ() limiterEventType {
+ return limiterEventType(s >> (64 - limiterEventBits))
+}
+
+// limiterEvent represents tracking state for an event tracked by the GC CPU limiter.
+type limiterEvent struct {
+ stamp atomic.Uint64 // Stores a limiterEventStamp.
+}
+
+// start begins tracking a new limiter event of the current type. If an event
+// is already in flight, then a new event cannot begin because the current time is
+// already being attributed to that event. In this case, this function returns false.
+// Otherwise, it returns true.
+//
+// The caller must be non-preemptible until at least stop is called or this function
+// returns false. Because this is trying to measure "on-CPU" time of some event, getting
+// scheduled away during it can mean that whatever we're measuring isn't a reflection
+// of "on-CPU" time. The OS could deschedule us at any time, but we want to maintain as
+// close of an approximation as we can.
+func (e *limiterEvent) start(typ limiterEventType, now int64) bool {
+ if limiterEventStamp(e.stamp.Load()).typ() != limiterEventNone {
+ return false
+ }
+ e.stamp.Store(uint64(makeLimiterEventStamp(typ, now)))
+ return true
+}
+
+// consume acquires the partial event CPU time from any in-flight event.
+// It achieves this by storing the current time as the new event time.
+//
+// Returns the type of the in-flight event, as well as how long it's currently been
+// executing for. Returns limiterEventNone if no event is active.
+func (e *limiterEvent) consume(now int64) (typ limiterEventType, duration int64) {
+ // Read the limiter event timestamp and update it to now.
+ for {
+ old := limiterEventStamp(e.stamp.Load())
+ typ = old.typ()
+ if typ == limiterEventNone {
+ // There's no in-flight event, so just push that up.
+ return
+ }
+ duration = old.duration(now)
+ if duration == 0 {
+ // We might have a stale now value, or this crossed the
+ // 2^(64-limiterEventBits) boundary in the clock readings.
+ // Just ignore it.
+ return limiterEventNone, 0
+ }
+ new := makeLimiterEventStamp(typ, now)
+ if e.stamp.CompareAndSwap(uint64(old), uint64(new)) {
+ break
+ }
+ }
+ return
+}
+
+// stop stops the active limiter event. Throws if the
+//
+// The caller must be non-preemptible across the event. See start as to why.
+func (e *limiterEvent) stop(typ limiterEventType, now int64) {
+ var stamp limiterEventStamp
+ for {
+ stamp = limiterEventStamp(e.stamp.Load())
+ if stamp.typ() != typ {
+ print("runtime: want=", typ, " got=", stamp.typ(), "\n")
+ throw("limiterEvent.stop: found wrong event in p's limiter event slot")
+ }
+ if e.stamp.CompareAndSwap(uint64(stamp), uint64(limiterEventStampNone)) {
+ break
+ }
+ }
+ duration := stamp.duration(now)
+ if duration == 0 {
+ // It's possible that we're missing time because we crossed a
+ // 2^(64-limiterEventBits) boundary between the start and end.
+ // In this case, we're dropping that information. This is OK because
+ // at worst it'll cause a transient hiccup that will quickly resolve
+ // itself as all new timestamps begin on the other side of the boundary.
+ // Such a hiccup should be incredibly rare.
+ return
+ }
+ // Account for the event.
+ switch typ {
+ case limiterEventIdleMarkWork:
+ fallthrough
+ case limiterEventIdle:
+ gcCPULimiter.addIdleTime(duration)
+ case limiterEventMarkAssist:
+ fallthrough
+ case limiterEventScavengeAssist:
+ gcCPULimiter.addAssistTime(duration)
+ default:
+ throw("limiterEvent.stop: invalid limiter event type found")
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgcmark.go b/contrib/go/_std_1.19/src/runtime/mgcmark.go
new file mode 100644
index 0000000000..74637072c5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgcmark.go
@@ -0,0 +1,1601 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: marking and scanning
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const (
+ fixedRootFinalizers = iota
+ fixedRootFreeGStacks
+ fixedRootCount
+
+ // rootBlockBytes is the number of bytes to scan per data or
+ // BSS root.
+ rootBlockBytes = 256 << 10
+
+ // maxObletBytes is the maximum bytes of an object to scan at
+ // once. Larger objects will be split up into "oblets" of at
+ // most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
+ // scan preemption at ~100 µs.
+ //
+ // This must be > _MaxSmallSize so that the object base is the
+ // span base.
+ maxObletBytes = 128 << 10
+
+ // drainCheckThreshold specifies how many units of work to do
+ // between self-preemption checks in gcDrain. Assuming a scan
+ // rate of 1 MB/ms, this is ~100 µs. Lower values have higher
+ // overhead in the scan loop (the scheduler check may perform
+ // a syscall, so its overhead is nontrivial). Higher values
+ // make the system less responsive to incoming work.
+ drainCheckThreshold = 100000
+
+ // pagesPerSpanRoot indicates how many pages to scan from a span root
+ // at a time. Used by special root marking.
+ //
+ // Higher values improve throughput by increasing locality, but
+ // increase the minimum latency of a marking operation.
+ //
+ // Must be a multiple of the pageInUse bitmap element size and
+ // must also evenly divide pagesPerArena.
+ pagesPerSpanRoot = 512
+)
+
+// gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
+// some miscellany) and initializes scanning-related state.
+//
+// The world must be stopped.
+func gcMarkRootPrepare() {
+ assertWorldStopped()
+
+ // Compute how many data and BSS root blocks there are.
+ nBlocks := func(bytes uintptr) int {
+ return int(divRoundUp(bytes, rootBlockBytes))
+ }
+
+ work.nDataRoots = 0
+ work.nBSSRoots = 0
+
+ // Scan globals.
+ for _, datap := range activeModules() {
+ nDataRoots := nBlocks(datap.edata - datap.data)
+ if nDataRoots > work.nDataRoots {
+ work.nDataRoots = nDataRoots
+ }
+ }
+
+ for _, datap := range activeModules() {
+ nBSSRoots := nBlocks(datap.ebss - datap.bss)
+ if nBSSRoots > work.nBSSRoots {
+ work.nBSSRoots = nBSSRoots
+ }
+ }
+
+ // Scan span roots for finalizer specials.
+ //
+ // We depend on addfinalizer to mark objects that get
+ // finalizers after root marking.
+ //
+ // We're going to scan the whole heap (that was available at the time the
+ // mark phase started, i.e. markArenas) for in-use spans which have specials.
+ //
+ // Break up the work into arenas, and further into chunks.
+ //
+ // Snapshot allArenas as markArenas. This snapshot is safe because allArenas
+ // is append-only.
+ mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
+ work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
+
+ // Scan stacks.
+ //
+ // Gs may be created after this point, but it's okay that we
+ // ignore them because they begin life without any roots, so
+ // there's nothing to scan, and any roots they create during
+ // the concurrent phase will be caught by the write barrier.
+ work.stackRoots = allGsSnapshot()
+ work.nStackRoots = len(work.stackRoots)
+
+ work.markrootNext = 0
+ work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
+
+ // Calculate base indexes of each root type
+ work.baseData = uint32(fixedRootCount)
+ work.baseBSS = work.baseData + uint32(work.nDataRoots)
+ work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
+ work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
+ work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
+}
+
+// gcMarkRootCheck checks that all roots have been scanned. It is
+// purely for debugging.
+func gcMarkRootCheck() {
+ if work.markrootNext < work.markrootJobs {
+ print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
+ throw("left over markroot jobs")
+ }
+
+ // Check that stacks have been scanned.
+ //
+ // We only check the first nStackRoots Gs that we should have scanned.
+ // Since we don't care about newer Gs (see comment in
+ // gcMarkRootPrepare), no locking is required.
+ i := 0
+ forEachGRace(func(gp *g) {
+ if i >= work.nStackRoots {
+ return
+ }
+
+ if !gp.gcscandone {
+ println("gp", gp, "goid", gp.goid,
+ "status", readgstatus(gp),
+ "gcscandone", gp.gcscandone)
+ throw("scan missed a g")
+ }
+
+ i++
+ })
+}
+
+// ptrmask for an allocation containing a single pointer.
+var oneptrmask = [...]uint8{1}
+
+// markroot scans the i'th root.
+//
+// Preemption must be disabled (because this uses a gcWork).
+//
+// Returns the amount of GC work credit produced by the operation.
+// If flushBgCredit is true, then that credit is also flushed
+// to the background credit pool.
+//
+// nowritebarrier is only advisory here.
+//
+//go:nowritebarrier
+func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
+ // Note: if you add a case here, please also update heapdump.go:dumproots.
+ var workDone int64
+ var workCounter *atomic.Int64
+ switch {
+ case work.baseData <= i && i < work.baseBSS:
+ workCounter = &gcController.globalsScanWork
+ for _, datap := range activeModules() {
+ workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
+ }
+
+ case work.baseBSS <= i && i < work.baseSpans:
+ workCounter = &gcController.globalsScanWork
+ for _, datap := range activeModules() {
+ workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
+ }
+
+ case i == fixedRootFinalizers:
+ for fb := allfin; fb != nil; fb = fb.alllink {
+ cnt := uintptr(atomic.Load(&fb.cnt))
+ scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
+ }
+
+ case i == fixedRootFreeGStacks:
+ // Switch to the system stack so we can call
+ // stackfree.
+ systemstack(markrootFreeGStacks)
+
+ case work.baseSpans <= i && i < work.baseStacks:
+ // mark mspan.specials
+ markrootSpans(gcw, int(i-work.baseSpans))
+
+ default:
+ // the rest is scanning goroutine stacks
+ workCounter = &gcController.stackScanWork
+ if i < work.baseStacks || work.baseEnd <= i {
+ printlock()
+ print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
+ throw("markroot: bad index")
+ }
+ gp := work.stackRoots[i-work.baseStacks]
+
+ // remember when we've first observed the G blocked
+ // needed only to output in traceback
+ status := readgstatus(gp) // We are not in a scan state
+ if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
+ gp.waitsince = work.tstart
+ }
+
+ // scanstack must be done on the system stack in case
+ // we're trying to scan our own stack.
+ systemstack(func() {
+ // If this is a self-scan, put the user G in
+ // _Gwaiting to prevent self-deadlock. It may
+ // already be in _Gwaiting if this is a mark
+ // worker or we're in mark termination.
+ userG := getg().m.curg
+ selfScan := gp == userG && readgstatus(userG) == _Grunning
+ if selfScan {
+ casgstatus(userG, _Grunning, _Gwaiting)
+ userG.waitreason = waitReasonGarbageCollectionScan
+ }
+
+ // TODO: suspendG blocks (and spins) until gp
+ // stops, which may take a while for
+ // running goroutines. Consider doing this in
+ // two phases where the first is non-blocking:
+ // we scan the stacks we can and ask running
+ // goroutines to scan themselves; and the
+ // second blocks.
+ stopped := suspendG(gp)
+ if stopped.dead {
+ gp.gcscandone = true
+ return
+ }
+ if gp.gcscandone {
+ throw("g already scanned")
+ }
+ workDone += scanstack(gp, gcw)
+ gp.gcscandone = true
+ resumeG(stopped)
+
+ if selfScan {
+ casgstatus(userG, _Gwaiting, _Grunning)
+ }
+ })
+ }
+ if workCounter != nil && workDone != 0 {
+ workCounter.Add(workDone)
+ if flushBgCredit {
+ gcFlushBgCredit(workDone)
+ }
+ }
+ return workDone
+}
+
+// markrootBlock scans the shard'th shard of the block of memory [b0,
+// b0+n0), with the given pointer mask.
+//
+// Returns the amount of work done.
+//
+//go:nowritebarrier
+func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
+ if rootBlockBytes%(8*goarch.PtrSize) != 0 {
+ // This is necessary to pick byte offsets in ptrmask0.
+ throw("rootBlockBytes must be a multiple of 8*ptrSize")
+ }
+
+ // Note that if b0 is toward the end of the address space,
+ // then b0 + rootBlockBytes might wrap around.
+ // These tests are written to avoid any possible overflow.
+ off := uintptr(shard) * rootBlockBytes
+ if off >= n0 {
+ return 0
+ }
+ b := b0 + off
+ ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
+ n := uintptr(rootBlockBytes)
+ if off+n > n0 {
+ n = n0 - off
+ }
+
+ // Scan this shard.
+ scanblock(b, n, ptrmask, gcw, nil)
+ return int64(n)
+}
+
+// markrootFreeGStacks frees stacks of dead Gs.
+//
+// This does not free stacks of dead Gs cached on Ps, but having a few
+// cached stacks around isn't a problem.
+func markrootFreeGStacks() {
+ // Take list of dead Gs with stacks.
+ lock(&sched.gFree.lock)
+ list := sched.gFree.stack
+ sched.gFree.stack = gList{}
+ unlock(&sched.gFree.lock)
+ if list.empty() {
+ return
+ }
+
+ // Free stacks.
+ q := gQueue{list.head, list.head}
+ for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
+ stackfree(gp.stack)
+ gp.stack.lo = 0
+ gp.stack.hi = 0
+ // Manipulate the queue directly since the Gs are
+ // already all linked the right way.
+ q.tail.set(gp)
+ }
+
+ // Put Gs back on the free list.
+ lock(&sched.gFree.lock)
+ sched.gFree.noStack.pushAll(q)
+ unlock(&sched.gFree.lock)
+}
+
+// markrootSpans marks roots for one shard of markArenas.
+//
+//go:nowritebarrier
+func markrootSpans(gcw *gcWork, shard int) {
+ // Objects with finalizers have two GC-related invariants:
+ //
+ // 1) Everything reachable from the object must be marked.
+ // This ensures that when we pass the object to its finalizer,
+ // everything the finalizer can reach will be retained.
+ //
+ // 2) Finalizer specials (which are not in the garbage
+ // collected heap) are roots. In practice, this means the fn
+ // field must be scanned.
+ sg := mheap_.sweepgen
+
+ // Find the arena and page index into that arena for this shard.
+ ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
+ ha := mheap_.arenas[ai.l1()][ai.l2()]
+ arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
+
+ // Construct slice of bitmap which we'll iterate over.
+ specialsbits := ha.pageSpecials[arenaPage/8:]
+ specialsbits = specialsbits[:pagesPerSpanRoot/8]
+ for i := range specialsbits {
+ // Find set bits, which correspond to spans with specials.
+ specials := atomic.Load8(&specialsbits[i])
+ if specials == 0 {
+ continue
+ }
+ for j := uint(0); j < 8; j++ {
+ if specials&(1<<j) == 0 {
+ continue
+ }
+ // Find the span for this bit.
+ //
+ // This value is guaranteed to be non-nil because having
+ // specials implies that the span is in-use, and since we're
+ // currently marking we can be sure that we don't have to worry
+ // about the span being freed and re-used.
+ s := ha.spans[arenaPage+uint(i)*8+j]
+
+ // The state must be mSpanInUse if the specials bit is set, so
+ // sanity check that.
+ if state := s.state.get(); state != mSpanInUse {
+ print("s.state = ", state, "\n")
+ throw("non in-use span found with specials bit set")
+ }
+ // Check that this span was swept (it may be cached or uncached).
+ if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
+ // sweepgen was updated (+2) during non-checkmark GC pass
+ print("sweep ", s.sweepgen, " ", sg, "\n")
+ throw("gc: unswept span")
+ }
+
+ // Lock the specials to prevent a special from being
+ // removed from the list while we're traversing it.
+ lock(&s.speciallock)
+ for sp := s.specials; sp != nil; sp = sp.next {
+ if sp.kind != _KindSpecialFinalizer {
+ continue
+ }
+ // don't mark finalized object, but scan it so we
+ // retain everything it points to.
+ spf := (*specialfinalizer)(unsafe.Pointer(sp))
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+ p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
+
+ // Mark everything that can be reached from
+ // the object (but *not* the object itself or
+ // we'll never collect it).
+ scanobject(p, gcw)
+
+ // The special itself is a root.
+ scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
+ }
+ unlock(&s.speciallock)
+ }
+ }
+}
+
+// gcAssistAlloc performs GC work to make gp's assist debt positive.
+// gp must be the calling user goroutine.
+//
+// This must be called with preemption enabled.
+func gcAssistAlloc(gp *g) {
+ // Don't assist in non-preemptible contexts. These are
+ // generally fragile and won't allow the assist to block.
+ if getg() == gp.m.g0 {
+ return
+ }
+ if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
+ return
+ }
+
+ traced := false
+retry:
+ if go119MemoryLimitSupport && gcCPULimiter.limiting() {
+ // If the CPU limiter is enabled, intentionally don't
+ // assist to reduce the amount of CPU time spent in the GC.
+ if traced {
+ traceGCMarkAssistDone()
+ }
+ return
+ }
+ // Compute the amount of scan work we need to do to make the
+ // balance positive. When the required amount of work is low,
+ // we over-assist to build up credit for future allocations
+ // and amortize the cost of assisting.
+ assistWorkPerByte := gcController.assistWorkPerByte.Load()
+ assistBytesPerWork := gcController.assistBytesPerWork.Load()
+ debtBytes := -gp.gcAssistBytes
+ scanWork := int64(assistWorkPerByte * float64(debtBytes))
+ if scanWork < gcOverAssistWork {
+ scanWork = gcOverAssistWork
+ debtBytes = int64(assistBytesPerWork * float64(scanWork))
+ }
+
+ // Steal as much credit as we can from the background GC's
+ // scan credit. This is racy and may drop the background
+ // credit below 0 if two mutators steal at the same time. This
+ // will just cause steals to fail until credit is accumulated
+ // again, so in the long run it doesn't really matter, but we
+ // do have to handle the negative credit case.
+ bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
+ stolen := int64(0)
+ if bgScanCredit > 0 {
+ if bgScanCredit < scanWork {
+ stolen = bgScanCredit
+ gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
+ } else {
+ stolen = scanWork
+ gp.gcAssistBytes += debtBytes
+ }
+ atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
+
+ scanWork -= stolen
+
+ if scanWork == 0 {
+ // We were able to steal all of the credit we
+ // needed.
+ if traced {
+ traceGCMarkAssistDone()
+ }
+ return
+ }
+ }
+
+ if trace.enabled && !traced {
+ traced = true
+ traceGCMarkAssistStart()
+ }
+
+ // Perform assist work
+ systemstack(func() {
+ gcAssistAlloc1(gp, scanWork)
+ // The user stack may have moved, so this can't touch
+ // anything on it until it returns from systemstack.
+ })
+
+ completed := gp.param != nil
+ gp.param = nil
+ if completed {
+ gcMarkDone()
+ }
+
+ if gp.gcAssistBytes < 0 {
+ // We were unable steal enough credit or perform
+ // enough work to pay off the assist debt. We need to
+ // do one of these before letting the mutator allocate
+ // more to prevent over-allocation.
+ //
+ // If this is because we were preempted, reschedule
+ // and try some more.
+ if gp.preempt {
+ Gosched()
+ goto retry
+ }
+
+ // Add this G to an assist queue and park. When the GC
+ // has more background credit, it will satisfy queued
+ // assists before flushing to the global credit pool.
+ //
+ // Note that this does *not* get woken up when more
+ // work is added to the work list. The theory is that
+ // there wasn't enough work to do anyway, so we might
+ // as well let background marking take care of the
+ // work that is available.
+ if !gcParkAssist() {
+ goto retry
+ }
+
+ // At this point either background GC has satisfied
+ // this G's assist debt, or the GC cycle is over.
+ }
+ if traced {
+ traceGCMarkAssistDone()
+ }
+}
+
+// gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
+// stack. This is a separate function to make it easier to see that
+// we're not capturing anything from the user stack, since the user
+// stack may move while we're in this function.
+//
+// gcAssistAlloc1 indicates whether this assist completed the mark
+// phase by setting gp.param to non-nil. This can't be communicated on
+// the stack since it may move.
+//
+//go:systemstack
+func gcAssistAlloc1(gp *g, scanWork int64) {
+ // Clear the flag indicating that this assist completed the
+ // mark phase.
+ gp.param = nil
+
+ if atomic.Load(&gcBlackenEnabled) == 0 {
+ // The gcBlackenEnabled check in malloc races with the
+ // store that clears it but an atomic check in every malloc
+ // would be a performance hit.
+ // Instead we recheck it here on the non-preemptable system
+ // stack to determine if we should perform an assist.
+
+ // GC is done, so ignore any remaining debt.
+ gp.gcAssistBytes = 0
+ return
+ }
+ // Track time spent in this assist. Since we're on the
+ // system stack, this is non-preemptible, so we can
+ // just measure start and end time.
+ //
+ // Limiter event tracking might be disabled if we end up here
+ // while on a mark worker.
+ startTime := nanotime()
+ trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
+
+ decnwait := atomic.Xadd(&work.nwait, -1)
+ if decnwait == work.nproc {
+ println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
+ throw("nwait > work.nprocs")
+ }
+
+ // gcDrainN requires the caller to be preemptible.
+ casgstatus(gp, _Grunning, _Gwaiting)
+ gp.waitreason = waitReasonGCAssistMarking
+
+ // drain own cached work first in the hopes that it
+ // will be more cache friendly.
+ gcw := &getg().m.p.ptr().gcw
+ workDone := gcDrainN(gcw, scanWork)
+
+ casgstatus(gp, _Gwaiting, _Grunning)
+
+ // Record that we did this much scan work.
+ //
+ // Back out the number of bytes of assist credit that
+ // this scan work counts for. The "1+" is a poor man's
+ // round-up, to ensure this adds credit even if
+ // assistBytesPerWork is very low.
+ assistBytesPerWork := gcController.assistBytesPerWork.Load()
+ gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
+
+ // If this is the last worker and we ran out of work,
+ // signal a completion point.
+ incnwait := atomic.Xadd(&work.nwait, +1)
+ if incnwait > work.nproc {
+ println("runtime: work.nwait=", incnwait,
+ "work.nproc=", work.nproc)
+ throw("work.nwait > work.nproc")
+ }
+
+ if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
+ // This has reached a background completion point. Set
+ // gp.param to a non-nil value to indicate this. It
+ // doesn't matter what we set it to (it just has to be
+ // a valid pointer).
+ gp.param = unsafe.Pointer(gp)
+ }
+ now := nanotime()
+ duration := now - startTime
+ _p_ := gp.m.p.ptr()
+ _p_.gcAssistTime += duration
+ if trackLimiterEvent {
+ _p_.limiterEvent.stop(limiterEventMarkAssist, now)
+ }
+ if _p_.gcAssistTime > gcAssistTimeSlack {
+ gcController.assistTime.Add(_p_.gcAssistTime)
+ gcCPULimiter.update(now)
+ _p_.gcAssistTime = 0
+ }
+}
+
+// gcWakeAllAssists wakes all currently blocked assists. This is used
+// at the end of a GC cycle. gcBlackenEnabled must be false to prevent
+// new assists from going to sleep after this point.
+func gcWakeAllAssists() {
+ lock(&work.assistQueue.lock)
+ list := work.assistQueue.q.popList()
+ injectglist(&list)
+ unlock(&work.assistQueue.lock)
+}
+
+// gcParkAssist puts the current goroutine on the assist queue and parks.
+//
+// gcParkAssist reports whether the assist is now satisfied. If it
+// returns false, the caller must retry the assist.
+func gcParkAssist() bool {
+ lock(&work.assistQueue.lock)
+ // If the GC cycle finished while we were getting the lock,
+ // exit the assist. The cycle can't finish while we hold the
+ // lock.
+ if atomic.Load(&gcBlackenEnabled) == 0 {
+ unlock(&work.assistQueue.lock)
+ return true
+ }
+
+ gp := getg()
+ oldList := work.assistQueue.q
+ work.assistQueue.q.pushBack(gp)
+
+ // Recheck for background credit now that this G is in
+ // the queue, but can still back out. This avoids a
+ // race in case background marking has flushed more
+ // credit since we checked above.
+ if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
+ work.assistQueue.q = oldList
+ if oldList.tail != 0 {
+ oldList.tail.ptr().schedlink.set(nil)
+ }
+ unlock(&work.assistQueue.lock)
+ return false
+ }
+ // Park.
+ goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
+ return true
+}
+
+// gcFlushBgCredit flushes scanWork units of background scan work
+// credit. This first satisfies blocked assists on the
+// work.assistQueue and then flushes any remaining credit to
+// gcController.bgScanCredit.
+//
+// Write barriers are disallowed because this is used by gcDrain after
+// it has ensured that all work is drained and this must preserve that
+// condition.
+//
+//go:nowritebarrierrec
+func gcFlushBgCredit(scanWork int64) {
+ if work.assistQueue.q.empty() {
+ // Fast path; there are no blocked assists. There's a
+ // small window here where an assist may add itself to
+ // the blocked queue and park. If that happens, we'll
+ // just get it on the next flush.
+ atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
+ return
+ }
+
+ assistBytesPerWork := gcController.assistBytesPerWork.Load()
+ scanBytes := int64(float64(scanWork) * assistBytesPerWork)
+
+ lock(&work.assistQueue.lock)
+ for !work.assistQueue.q.empty() && scanBytes > 0 {
+ gp := work.assistQueue.q.pop()
+ // Note that gp.gcAssistBytes is negative because gp
+ // is in debt. Think carefully about the signs below.
+ if scanBytes+gp.gcAssistBytes >= 0 {
+ // Satisfy this entire assist debt.
+ scanBytes += gp.gcAssistBytes
+ gp.gcAssistBytes = 0
+ // It's important that we *not* put gp in
+ // runnext. Otherwise, it's possible for user
+ // code to exploit the GC worker's high
+ // scheduler priority to get itself always run
+ // before other goroutines and always in the
+ // fresh quantum started by GC.
+ ready(gp, 0, false)
+ } else {
+ // Partially satisfy this assist.
+ gp.gcAssistBytes += scanBytes
+ scanBytes = 0
+ // As a heuristic, we move this assist to the
+ // back of the queue so that large assists
+ // can't clog up the assist queue and
+ // substantially delay small assists.
+ work.assistQueue.q.pushBack(gp)
+ break
+ }
+ }
+
+ if scanBytes > 0 {
+ // Convert from scan bytes back to work.
+ assistWorkPerByte := gcController.assistWorkPerByte.Load()
+ scanWork = int64(float64(scanBytes) * assistWorkPerByte)
+ atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
+ }
+ unlock(&work.assistQueue.lock)
+}
+
+// scanstack scans gp's stack, greying all pointers found on the stack.
+//
+// Returns the amount of scan work performed, but doesn't update
+// gcController.stackScanWork or flush any credit. Any background credit produced
+// by this function should be flushed by its caller. scanstack itself can't
+// safely flush because it may result in trying to wake up a goroutine that
+// was just scanned, resulting in a self-deadlock.
+//
+// scanstack will also shrink the stack if it is safe to do so. If it
+// is not, it schedules a stack shrink for the next synchronous safe
+// point.
+//
+// scanstack is marked go:systemstack because it must not be preempted
+// while using a workbuf.
+//
+//go:nowritebarrier
+//go:systemstack
+func scanstack(gp *g, gcw *gcWork) int64 {
+ if readgstatus(gp)&_Gscan == 0 {
+ print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
+ throw("scanstack - bad status")
+ }
+
+ switch readgstatus(gp) &^ _Gscan {
+ default:
+ print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ throw("mark - bad status")
+ case _Gdead:
+ return 0
+ case _Grunning:
+ print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ throw("scanstack: goroutine not stopped")
+ case _Grunnable, _Gsyscall, _Gwaiting:
+ // ok
+ }
+
+ if gp == getg() {
+ throw("can't scan our own stack")
+ }
+
+ // scannedSize is the amount of work we'll be reporting.
+ //
+ // It is less than the allocated size (which is hi-lo).
+ var sp uintptr
+ if gp.syscallsp != 0 {
+ sp = gp.syscallsp // If in a system call this is the stack pointer (gp.sched.sp can be 0 in this case on Windows).
+ } else {
+ sp = gp.sched.sp
+ }
+ scannedSize := gp.stack.hi - sp
+
+ // Keep statistics for initial stack size calculation.
+ // Note that this accumulates the scanned size, not the allocated size.
+ p := getg().m.p.ptr()
+ p.scannedStackSize += uint64(scannedSize)
+ p.scannedStacks++
+
+ if isShrinkStackSafe(gp) {
+ // Shrink the stack if not much of it is being used.
+ shrinkstack(gp)
+ } else {
+ // Otherwise, shrink the stack at the next sync safe point.
+ gp.preemptShrink = true
+ }
+
+ var state stackScanState
+ state.stack = gp.stack
+
+ if stackTraceDebug {
+ println("stack trace goroutine", gp.goid)
+ }
+
+ if debugScanConservative && gp.asyncSafePoint {
+ print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
+ }
+
+ // Scan the saved context register. This is effectively a live
+ // register that gets moved back and forth between the
+ // register and sched.ctxt without a write barrier.
+ if gp.sched.ctxt != nil {
+ scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
+ }
+
+ // Scan the stack. Accumulate a list of stack objects.
+ scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
+ scanframeworker(frame, &state, gcw)
+ return true
+ }
+ gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
+
+ // Find additional pointers that point into the stack from the heap.
+ // Currently this includes defers and panics. See also function copystack.
+
+ // Find and trace other pointers in defer records.
+ for d := gp._defer; d != nil; d = d.link {
+ if d.fn != nil {
+ // Scan the func value, which could be a stack allocated closure.
+ // See issue 30453.
+ scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
+ }
+ if d.link != nil {
+ // The link field of a stack-allocated defer record might point
+ // to a heap-allocated defer record. Keep that heap record live.
+ scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
+ }
+ // Retain defers records themselves.
+ // Defer records might not be reachable from the G through regular heap
+ // tracing because the defer linked list might weave between the stack and the heap.
+ if d.heap {
+ scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
+ }
+ }
+ if gp._panic != nil {
+ // Panics are always stack allocated.
+ state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
+ }
+
+ // Find and scan all reachable stack objects.
+ //
+ // The state's pointer queue prioritizes precise pointers over
+ // conservative pointers so that we'll prefer scanning stack
+ // objects precisely.
+ state.buildIndex()
+ for {
+ p, conservative := state.getPtr()
+ if p == 0 {
+ break
+ }
+ obj := state.findObject(p)
+ if obj == nil {
+ continue
+ }
+ r := obj.r
+ if r == nil {
+ // We've already scanned this object.
+ continue
+ }
+ obj.setRecord(nil) // Don't scan it again.
+ if stackTraceDebug {
+ printlock()
+ print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
+ if conservative {
+ print(" (conservative)")
+ }
+ println()
+ printunlock()
+ }
+ gcdata := r.gcdata()
+ var s *mspan
+ if r.useGCProg() {
+ // This path is pretty unlikely, an object large enough
+ // to have a GC program allocated on the stack.
+ // We need some space to unpack the program into a straight
+ // bitmask, which we allocate/free here.
+ // TODO: it would be nice if there were a way to run a GC
+ // program without having to store all its bits. We'd have
+ // to change from a Lempel-Ziv style program to something else.
+ // Or we can forbid putting objects on stacks if they require
+ // a gc program (see issue 27447).
+ s = materializeGCProg(r.ptrdata(), gcdata)
+ gcdata = (*byte)(unsafe.Pointer(s.startAddr))
+ }
+
+ b := state.stack.lo + uintptr(obj.off)
+ if conservative {
+ scanConservative(b, r.ptrdata(), gcdata, gcw, &state)
+ } else {
+ scanblock(b, r.ptrdata(), gcdata, gcw, &state)
+ }
+
+ if s != nil {
+ dematerializeGCProg(s)
+ }
+ }
+
+ // Deallocate object buffers.
+ // (Pointer buffers were all deallocated in the loop above.)
+ for state.head != nil {
+ x := state.head
+ state.head = x.next
+ if stackTraceDebug {
+ for i := 0; i < x.nobj; i++ {
+ obj := &x.obj[i]
+ if obj.r == nil { // reachable
+ continue
+ }
+ println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
+ // Note: not necessarily really dead - only reachable-from-ptr dead.
+ }
+ }
+ x.nobj = 0
+ putempty((*workbuf)(unsafe.Pointer(x)))
+ }
+ if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
+ throw("remaining pointer buffers")
+ }
+ return int64(scannedSize)
+}
+
+// Scan a stack frame: local variables and function arguments/results.
+//
+//go:nowritebarrier
+func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
+ if _DebugGC > 1 && frame.continpc != 0 {
+ print("scanframe ", funcname(frame.fn), "\n")
+ }
+
+ isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == funcID_asyncPreempt
+ isDebugCall := frame.fn.valid() && frame.fn.funcID == funcID_debugCallV2
+ if state.conservative || isAsyncPreempt || isDebugCall {
+ if debugScanConservative {
+ println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
+ }
+
+ // Conservatively scan the frame. Unlike the precise
+ // case, this includes the outgoing argument space
+ // since we may have stopped while this function was
+ // setting up a call.
+ //
+ // TODO: We could narrow this down if the compiler
+ // produced a single map per function of stack slots
+ // and registers that ever contain a pointer.
+ if frame.varp != 0 {
+ size := frame.varp - frame.sp
+ if size > 0 {
+ scanConservative(frame.sp, size, nil, gcw, state)
+ }
+ }
+
+ // Scan arguments to this frame.
+ if frame.arglen != 0 {
+ // TODO: We could pass the entry argument map
+ // to narrow this down further.
+ scanConservative(frame.argp, frame.arglen, nil, gcw, state)
+ }
+
+ if isAsyncPreempt || isDebugCall {
+ // This function's frame contained the
+ // registers for the asynchronously stopped
+ // parent frame. Scan the parent
+ // conservatively.
+ state.conservative = true
+ } else {
+ // We only wanted to scan those two frames
+ // conservatively. Clear the flag for future
+ // frames.
+ state.conservative = false
+ }
+ return
+ }
+
+ locals, args, objs := getStackMap(frame, &state.cache, false)
+
+ // Scan local variables if stack frame has been allocated.
+ if locals.n > 0 {
+ size := uintptr(locals.n) * goarch.PtrSize
+ scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
+ }
+
+ // Scan arguments.
+ if args.n > 0 {
+ scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
+ }
+
+ // Add all stack objects to the stack object list.
+ if frame.varp != 0 {
+ // varp is 0 for defers, where there are no locals.
+ // In that case, there can't be a pointer to its args, either.
+ // (And all args would be scanned above anyway.)
+ for i := range objs {
+ obj := &objs[i]
+ off := obj.off
+ base := frame.varp // locals base pointer
+ if off >= 0 {
+ base = frame.argp // arguments and return values base pointer
+ }
+ ptr := base + uintptr(off)
+ if ptr < frame.sp {
+ // object hasn't been allocated in the frame yet.
+ continue
+ }
+ if stackTraceDebug {
+ println("stkobj at", hex(ptr), "of size", obj.size)
+ }
+ state.addObject(ptr, obj)
+ }
+ }
+}
+
+type gcDrainFlags int
+
+const (
+ gcDrainUntilPreempt gcDrainFlags = 1 << iota
+ gcDrainFlushBgCredit
+ gcDrainIdle
+ gcDrainFractional
+)
+
+// gcDrain scans roots and objects in work buffers, blackening grey
+// objects until it is unable to get more work. It may return before
+// GC is done; it's the caller's responsibility to balance work from
+// other Ps.
+//
+// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
+// is set.
+//
+// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
+// to do.
+//
+// If flags&gcDrainFractional != 0, gcDrain self-preempts when
+// pollFractionalWorkerExit() returns true. This implies
+// gcDrainNoBlock.
+//
+// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
+// credit to gcController.bgScanCredit every gcCreditSlack units of
+// scan work.
+//
+// gcDrain will always return if there is a pending STW.
+//
+//go:nowritebarrier
+func gcDrain(gcw *gcWork, flags gcDrainFlags) {
+ if !writeBarrier.needed {
+ throw("gcDrain phase incorrect")
+ }
+
+ gp := getg().m.curg
+ preemptible := flags&gcDrainUntilPreempt != 0
+ flushBgCredit := flags&gcDrainFlushBgCredit != 0
+ idle := flags&gcDrainIdle != 0
+
+ initScanWork := gcw.heapScanWork
+
+ // checkWork is the scan work before performing the next
+ // self-preempt check.
+ checkWork := int64(1<<63 - 1)
+ var check func() bool
+ if flags&(gcDrainIdle|gcDrainFractional) != 0 {
+ checkWork = initScanWork + drainCheckThreshold
+ if idle {
+ check = pollWork
+ } else if flags&gcDrainFractional != 0 {
+ check = pollFractionalWorkerExit
+ }
+ }
+
+ // Drain root marking jobs.
+ if work.markrootNext < work.markrootJobs {
+ // Stop if we're preemptible or if someone wants to STW.
+ for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
+ job := atomic.Xadd(&work.markrootNext, +1) - 1
+ if job >= work.markrootJobs {
+ break
+ }
+ markroot(gcw, job, flushBgCredit)
+ if check != nil && check() {
+ goto done
+ }
+ }
+ }
+
+ // Drain heap marking jobs.
+ // Stop if we're preemptible or if someone wants to STW.
+ for !(gp.preempt && (preemptible || atomic.Load(&sched.gcwaiting) != 0)) {
+ // Try to keep work available on the global queue. We used to
+ // check if there were waiting workers, but it's better to
+ // just keep work available than to make workers wait. In the
+ // worst case, we'll do O(log(_WorkbufSize)) unnecessary
+ // balances.
+ if work.full == 0 {
+ gcw.balance()
+ }
+
+ b := gcw.tryGetFast()
+ if b == 0 {
+ b = gcw.tryGet()
+ if b == 0 {
+ // Flush the write barrier
+ // buffer; this may create
+ // more work.
+ wbBufFlush(nil, 0)
+ b = gcw.tryGet()
+ }
+ }
+ if b == 0 {
+ // Unable to get work.
+ break
+ }
+ scanobject(b, gcw)
+
+ // Flush background scan work credit to the global
+ // account if we've accumulated enough locally so
+ // mutator assists can draw on it.
+ if gcw.heapScanWork >= gcCreditSlack {
+ gcController.heapScanWork.Add(gcw.heapScanWork)
+ if flushBgCredit {
+ gcFlushBgCredit(gcw.heapScanWork - initScanWork)
+ initScanWork = 0
+ }
+ checkWork -= gcw.heapScanWork
+ gcw.heapScanWork = 0
+
+ if checkWork <= 0 {
+ checkWork += drainCheckThreshold
+ if check != nil && check() {
+ break
+ }
+ }
+ }
+ }
+
+done:
+ // Flush remaining scan work credit.
+ if gcw.heapScanWork > 0 {
+ gcController.heapScanWork.Add(gcw.heapScanWork)
+ if flushBgCredit {
+ gcFlushBgCredit(gcw.heapScanWork - initScanWork)
+ }
+ gcw.heapScanWork = 0
+ }
+}
+
+// gcDrainN blackens grey objects until it has performed roughly
+// scanWork units of scan work or the G is preempted. This is
+// best-effort, so it may perform less work if it fails to get a work
+// buffer. Otherwise, it will perform at least n units of work, but
+// may perform more because scanning is always done in whole object
+// increments. It returns the amount of scan work performed.
+//
+// The caller goroutine must be in a preemptible state (e.g.,
+// _Gwaiting) to prevent deadlocks during stack scanning. As a
+// consequence, this must be called on the system stack.
+//
+//go:nowritebarrier
+//go:systemstack
+func gcDrainN(gcw *gcWork, scanWork int64) int64 {
+ if !writeBarrier.needed {
+ throw("gcDrainN phase incorrect")
+ }
+
+ // There may already be scan work on the gcw, which we don't
+ // want to claim was done by this call.
+ workFlushed := -gcw.heapScanWork
+
+ // In addition to backing out because of a preemption, back out
+ // if the GC CPU limiter is enabled.
+ gp := getg().m.curg
+ for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
+ // See gcDrain comment.
+ if work.full == 0 {
+ gcw.balance()
+ }
+
+ b := gcw.tryGetFast()
+ if b == 0 {
+ b = gcw.tryGet()
+ if b == 0 {
+ // Flush the write barrier buffer;
+ // this may create more work.
+ wbBufFlush(nil, 0)
+ b = gcw.tryGet()
+ }
+ }
+
+ if b == 0 {
+ // Try to do a root job.
+ if work.markrootNext < work.markrootJobs {
+ job := atomic.Xadd(&work.markrootNext, +1) - 1
+ if job < work.markrootJobs {
+ workFlushed += markroot(gcw, job, false)
+ continue
+ }
+ }
+ // No heap or root jobs.
+ break
+ }
+
+ scanobject(b, gcw)
+
+ // Flush background scan work credit.
+ if gcw.heapScanWork >= gcCreditSlack {
+ gcController.heapScanWork.Add(gcw.heapScanWork)
+ workFlushed += gcw.heapScanWork
+ gcw.heapScanWork = 0
+ }
+ }
+
+ // Unlike gcDrain, there's no need to flush remaining work
+ // here because this never flushes to bgScanCredit and
+ // gcw.dispose will flush any remaining work to scanWork.
+
+ return workFlushed + gcw.heapScanWork
+}
+
+// scanblock scans b as scanobject would, but using an explicit
+// pointer bitmap instead of the heap bitmap.
+//
+// This is used to scan non-heap roots, so it does not update
+// gcw.bytesMarked or gcw.heapScanWork.
+//
+// If stk != nil, possible stack pointers are also reported to stk.putPtr.
+//
+//go:nowritebarrier
+func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
+ // Use local copies of original parameters, so that a stack trace
+ // due to one of the throws below shows the original block
+ // base and extent.
+ b := b0
+ n := n0
+
+ for i := uintptr(0); i < n; {
+ // Find bits for the next word.
+ bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
+ if bits == 0 {
+ i += goarch.PtrSize * 8
+ continue
+ }
+ for j := 0; j < 8 && i < n; j++ {
+ if bits&1 != 0 {
+ // Same work as in scanobject; see comments there.
+ p := *(*uintptr)(unsafe.Pointer(b + i))
+ if p != 0 {
+ if obj, span, objIndex := findObject(p, b, i); obj != 0 {
+ greyobject(obj, b, i, span, gcw, objIndex)
+ } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
+ stk.putPtr(p, false)
+ }
+ }
+ }
+ bits >>= 1
+ i += goarch.PtrSize
+ }
+ }
+}
+
+// scanobject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanobject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+//go:nowritebarrier
+func scanobject(b uintptr, gcw *gcWork) {
+ // Prefetch object before we scan it.
+ //
+ // This will overlap fetching the beginning of the object with initial
+ // setup before we start scanning the object.
+ sys.Prefetch(b)
+
+ // Find the bits for b and the size of the object at b.
+ //
+ // b is either the beginning of an object, in which case this
+ // is the size of the object to scan, or it points to an
+ // oblet, in which case we compute the size to scan below.
+ hbits := heapBitsForAddr(b)
+ s := spanOfUnchecked(b)
+ n := s.elemsize
+ if n == 0 {
+ throw("scanobject n == 0")
+ }
+
+ if n > maxObletBytes {
+ // Large object. Break into oblets for better
+ // parallelism and lower latency.
+ if b == s.base() {
+ // It's possible this is a noscan object (not
+ // from greyobject, but from other code
+ // paths), in which case we must *not* enqueue
+ // oblets since their bitmaps will be
+ // uninitialized.
+ if s.spanclass.noscan() {
+ // Bypass the whole scan.
+ gcw.bytesMarked += uint64(n)
+ return
+ }
+
+ // Enqueue the other oblets to scan later.
+ // Some oblets may be in b's scalar tail, but
+ // these will be marked as "no more pointers",
+ // so we'll drop out immediately when we go to
+ // scan those.
+ for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+ if !gcw.putFast(oblet) {
+ gcw.put(oblet)
+ }
+ }
+ }
+
+ // Compute the size of the oblet. Since this object
+ // must be a large object, s.base() is the beginning
+ // of the object.
+ n = s.base() + s.elemsize - b
+ if n > maxObletBytes {
+ n = maxObletBytes
+ }
+ }
+
+ var i uintptr
+ for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() {
+ // Load bits once. See CL 22712 and issue 16973 for discussion.
+ bits := hbits.bits()
+ if bits&bitScan == 0 {
+ break // no more pointers in this object
+ }
+ if bits&bitPointer == 0 {
+ continue // not a pointer
+ }
+
+ // Work here is duplicated in scanblock and above.
+ // If you make changes here, make changes there too.
+ obj := *(*uintptr)(unsafe.Pointer(b + i))
+
+ // At this point we have extracted the next potential pointer.
+ // Quickly filter out nil and pointers back to the current object.
+ if obj != 0 && obj-b >= n {
+ // Test if obj points into the Go heap and, if so,
+ // mark the object.
+ //
+ // Note that it's possible for findObject to
+ // fail if obj points to a just-allocated heap
+ // object because of a race with growing the
+ // heap. In this case, we know the object was
+ // just allocated and hence will be marked by
+ // allocation itself.
+ if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
+ greyobject(obj, b, i, span, gcw, objIndex)
+ }
+ }
+ }
+ gcw.bytesMarked += uint64(n)
+ gcw.heapScanWork += int64(i)
+}
+
+// scanConservative scans block [b, b+n) conservatively, treating any
+// pointer-like value in the block as a pointer.
+//
+// If ptrmask != nil, only words that are marked in ptrmask are
+// considered as potential pointers.
+//
+// If state != nil, it's assumed that [b, b+n) is a block in the stack
+// and may contain pointers to stack objects.
+func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
+ if debugScanConservative {
+ printlock()
+ print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
+ hexdumpWords(b, b+n, func(p uintptr) byte {
+ if ptrmask != nil {
+ word := (p - b) / goarch.PtrSize
+ bits := *addb(ptrmask, word/8)
+ if (bits>>(word%8))&1 == 0 {
+ return '$'
+ }
+ }
+
+ val := *(*uintptr)(unsafe.Pointer(p))
+ if state != nil && state.stack.lo <= val && val < state.stack.hi {
+ return '@'
+ }
+
+ span := spanOfHeap(val)
+ if span == nil {
+ return ' '
+ }
+ idx := span.objIndex(val)
+ if span.isFree(idx) {
+ return ' '
+ }
+ return '*'
+ })
+ printunlock()
+ }
+
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ if ptrmask != nil {
+ word := i / goarch.PtrSize
+ bits := *addb(ptrmask, word/8)
+ if bits == 0 {
+ // Skip 8 words (the loop increment will do the 8th)
+ //
+ // This must be the first time we've
+ // seen this word of ptrmask, so i
+ // must be 8-word-aligned, but check
+ // our reasoning just in case.
+ if i%(goarch.PtrSize*8) != 0 {
+ throw("misaligned mask")
+ }
+ i += goarch.PtrSize*8 - goarch.PtrSize
+ continue
+ }
+ if (bits>>(word%8))&1 == 0 {
+ continue
+ }
+ }
+
+ val := *(*uintptr)(unsafe.Pointer(b + i))
+
+ // Check if val points into the stack.
+ if state != nil && state.stack.lo <= val && val < state.stack.hi {
+ // val may point to a stack object. This
+ // object may be dead from last cycle and
+ // hence may contain pointers to unallocated
+ // objects, but unlike heap objects we can't
+ // tell if it's already dead. Hence, if all
+ // pointers to this object are from
+ // conservative scanning, we have to scan it
+ // defensively, too.
+ state.putPtr(val, true)
+ continue
+ }
+
+ // Check if val points to a heap span.
+ span := spanOfHeap(val)
+ if span == nil {
+ continue
+ }
+
+ // Check if val points to an allocated object.
+ idx := span.objIndex(val)
+ if span.isFree(idx) {
+ continue
+ }
+
+ // val points to an allocated object. Mark it.
+ obj := span.base() + idx*span.elemsize
+ greyobject(obj, b, i, span, gcw, idx)
+ }
+}
+
+// Shade the object if it isn't already.
+// The object is not nil and known to be in the heap.
+// Preemption must be disabled.
+//
+//go:nowritebarrier
+func shade(b uintptr) {
+ if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
+ gcw := &getg().m.p.ptr().gcw
+ greyobject(obj, 0, 0, span, gcw, objIndex)
+ }
+}
+
+// obj is the start of an object with mark mbits.
+// If it isn't already marked, mark it and enqueue into gcw.
+// base and off are for debugging only and could be removed.
+//
+// See also wbBufFlush1, which partially duplicates this logic.
+//
+//go:nowritebarrierrec
+func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
+ // obj should be start of allocation, and so must be at least pointer-aligned.
+ if obj&(goarch.PtrSize-1) != 0 {
+ throw("greyobject: obj not pointer-aligned")
+ }
+ mbits := span.markBitsForIndex(objIndex)
+
+ if useCheckmark {
+ if setCheckmark(obj, base, off, mbits) {
+ // Already marked.
+ return
+ }
+ } else {
+ if debug.gccheckmark > 0 && span.isFree(objIndex) {
+ print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
+ gcDumpObject("base", base, off)
+ gcDumpObject("obj", obj, ^uintptr(0))
+ getg().m.traceback = 2
+ throw("marking free object")
+ }
+
+ // If marked we have nothing to do.
+ if mbits.isMarked() {
+ return
+ }
+ mbits.setMarked()
+
+ // Mark span.
+ arena, pageIdx, pageMask := pageIndexOf(span.base())
+ if arena.pageMarks[pageIdx]&pageMask == 0 {
+ atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
+ }
+
+ // If this is a noscan object, fast-track it to black
+ // instead of greying it.
+ if span.spanclass.noscan() {
+ gcw.bytesMarked += uint64(span.elemsize)
+ return
+ }
+ }
+
+ // We're adding obj to P's local workbuf, so it's likely
+ // this object will be processed soon by the same P.
+ // Even if the workbuf gets flushed, there will likely still be
+ // some benefit on platforms with inclusive shared caches.
+ sys.Prefetch(obj)
+ // Queue the obj for scanning.
+ if !gcw.putFast(obj) {
+ gcw.put(obj)
+ }
+}
+
+// gcDumpObject dumps the contents of obj for debugging and marks the
+// field at byte offset off in obj.
+func gcDumpObject(label string, obj, off uintptr) {
+ s := spanOf(obj)
+ print(label, "=", hex(obj))
+ if s == nil {
+ print(" s=nil\n")
+ return
+ }
+ print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
+ if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
+ print(mSpanStateNames[state], "\n")
+ } else {
+ print("unknown(", state, ")\n")
+ }
+
+ skipped := false
+ size := s.elemsize
+ if s.state.get() == mSpanManual && size == 0 {
+ // We're printing something from a stack frame. We
+ // don't know how big it is, so just show up to an
+ // including off.
+ size = off + goarch.PtrSize
+ }
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ // For big objects, just print the beginning (because
+ // that usually hints at the object's type) and the
+ // fields around off.
+ if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
+ skipped = true
+ continue
+ }
+ if skipped {
+ print(" ...\n")
+ skipped = false
+ }
+ print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
+ if i == off {
+ print(" <==")
+ }
+ print("\n")
+ }
+ if skipped {
+ print(" ...\n")
+ }
+}
+
+// gcmarknewobject marks a newly allocated object black. obj must
+// not contain any non-nil pointers.
+//
+// This is nosplit so it can manipulate a gcWork without preemption.
+//
+//go:nowritebarrier
+//go:nosplit
+func gcmarknewobject(span *mspan, obj, size, scanSize uintptr) {
+ if useCheckmark { // The world should be stopped so this should not happen.
+ throw("gcmarknewobject called while doing checkmark")
+ }
+
+ // Mark object.
+ objIndex := span.objIndex(obj)
+ span.markBitsForIndex(objIndex).setMarked()
+
+ // Mark span.
+ arena, pageIdx, pageMask := pageIndexOf(span.base())
+ if arena.pageMarks[pageIdx]&pageMask == 0 {
+ atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
+ }
+
+ gcw := &getg().m.p.ptr().gcw
+ gcw.bytesMarked += uint64(size)
+}
+
+// gcMarkTinyAllocs greys all active tiny alloc blocks.
+//
+// The world must be stopped.
+func gcMarkTinyAllocs() {
+ assertWorldStopped()
+
+ for _, p := range allp {
+ c := p.mcache
+ if c == nil || c.tiny == 0 {
+ continue
+ }
+ _, span, objIndex := findObject(c.tiny, 0, 0)
+ gcw := &p.gcw
+ greyobject(c.tiny, 0, 0, span, gcw, objIndex)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgcpacer.go b/contrib/go/_std_1.19/src/runtime/mgcpacer.go
new file mode 100644
index 0000000000..2d9fd27748
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgcpacer.go
@@ -0,0 +1,1579 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/cpu"
+ "internal/goexperiment"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// go119MemoryLimitSupport is a feature flag for a number of changes
+// related to the memory limit feature (#48409). Disabling this flag
+// disables those features, as well as the memory limit mechanism,
+// which becomes a no-op.
+const go119MemoryLimitSupport = true
+
+const (
+ // gcGoalUtilization is the goal CPU utilization for
+ // marking as a fraction of GOMAXPROCS.
+ //
+ // Increasing the goal utilization will shorten GC cycles as the GC
+ // has more resources behind it, lessening costs from the write barrier,
+ // but comes at the cost of increasing mutator latency.
+ gcGoalUtilization = gcBackgroundUtilization
+
+ // gcBackgroundUtilization is the fixed CPU utilization for background
+ // marking. It must be <= gcGoalUtilization. The difference between
+ // gcGoalUtilization and gcBackgroundUtilization will be made up by
+ // mark assists. The scheduler will aim to use within 50% of this
+ // goal.
+ //
+ // As a general rule, there's little reason to set gcBackgroundUtilization
+ // < gcGoalUtilization. One reason might be in mostly idle applications,
+ // where goroutines are unlikely to assist at all, so the actual
+ // utilization will be lower than the goal. But this is moot point
+ // because the idle mark workers already soak up idle CPU resources.
+ // These two values are still kept separate however because they are
+ // distinct conceptually, and in previous iterations of the pacer the
+ // distinction was more important.
+ gcBackgroundUtilization = 0.25
+
+ // gcCreditSlack is the amount of scan work credit that can
+ // accumulate locally before updating gcController.heapScanWork and,
+ // optionally, gcController.bgScanCredit. Lower values give a more
+ // accurate assist ratio and make it more likely that assists will
+ // successfully steal background credit. Higher values reduce memory
+ // contention.
+ gcCreditSlack = 2000
+
+ // gcAssistTimeSlack is the nanoseconds of mutator assist time that
+ // can accumulate on a P before updating gcController.assistTime.
+ gcAssistTimeSlack = 5000
+
+ // gcOverAssistWork determines how many extra units of scan work a GC
+ // assist does when an assist happens. This amortizes the cost of an
+ // assist by pre-paying for this many bytes of future allocations.
+ gcOverAssistWork = 64 << 10
+
+ // defaultHeapMinimum is the value of heapMinimum for GOGC==100.
+ defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
+ (1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
+
+ // maxStackScanSlack is the bytes of stack space allocated or freed
+ // that can accumulate on a P before updating gcController.stackSize.
+ maxStackScanSlack = 8 << 10
+
+ // memoryLimitHeapGoalHeadroom is the amount of headroom the pacer gives to
+ // the heap goal when operating in the memory-limited regime. That is,
+ // it'll reduce the heap goal by this many extra bytes off of the base
+ // calculation.
+ memoryLimitHeapGoalHeadroom = 1 << 20
+)
+
+func init() {
+ if offset := unsafe.Offsetof(gcController.heapLive); offset%8 != 0 {
+ println(offset)
+ throw("gcController.heapLive not aligned to 8 bytes")
+ }
+}
+
+// gcController implements the GC pacing controller that determines
+// when to trigger concurrent garbage collection and how much marking
+// work to do in mutator assists and background marking.
+//
+// It calculates the ratio between the allocation rate (in terms of CPU
+// time) and the GC scan throughput to determine the heap size at which to
+// trigger a GC cycle such that no GC assists are required to finish on time.
+// This algorithm thus optimizes GC CPU utilization to the dedicated background
+// mark utilization of 25% of GOMAXPROCS by minimizing GC assists.
+// GOMAXPROCS. The high-level design of this algorithm is documented
+// at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md.
+// See https://golang.org/s/go15gcpacing for additional historical context.
+var gcController gcControllerState
+
+type gcControllerState struct {
+ // Initialized from GOGC. GOGC=off means no GC.
+ gcPercent atomic.Int32
+
+ _ uint32 // padding so following 64-bit values are 8-byte aligned
+
+ // memoryLimit is the soft memory limit in bytes.
+ //
+ // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64
+ // which means no soft memory limit in practice.
+ //
+ // This is an int64 instead of a uint64 to more easily maintain parity with
+ // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value
+ // should never be negative.
+ memoryLimit atomic.Int64
+
+ // heapMinimum is the minimum heap size at which to trigger GC.
+ // For small heaps, this overrides the usual GOGC*live set rule.
+ //
+ // When there is a very small live set but a lot of allocation, simply
+ // collecting when the heap reaches GOGC*live results in many GC
+ // cycles and high total per-GC overhead. This minimum amortizes this
+ // per-GC overhead while keeping the heap reasonably small.
+ //
+ // During initialization this is set to 4MB*GOGC/100. In the case of
+ // GOGC==0, this will set heapMinimum to 0, resulting in constant
+ // collection even when the heap size is small, which is useful for
+ // debugging.
+ heapMinimum uint64
+
+ // runway is the amount of runway in heap bytes allocated by the
+ // application that we want to give the GC once it starts.
+ //
+ // This is computed from consMark during mark termination.
+ runway atomic.Uint64
+
+ // consMark is the estimated per-CPU consMark ratio for the application.
+ //
+ // It represents the ratio between the application's allocation
+ // rate, as bytes allocated per CPU-time, and the GC's scan rate,
+ // as bytes scanned per CPU-time.
+ // The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
+ //
+ // At a high level, this value is computed as the bytes of memory
+ // allocated (cons) per unit of scan work completed (mark) in a GC
+ // cycle, divided by the CPU time spent on each activity.
+ //
+ // Updated at the end of each GC cycle, in endCycle.
+ consMark float64
+
+ // consMarkController holds the state for the mark-cons ratio
+ // estimation over time.
+ //
+ // Its purpose is to smooth out noisiness in the computation of
+ // consMark; see consMark for details.
+ consMarkController piController
+
+ _ uint32 // Padding for atomics on 32-bit platforms.
+
+ // gcPercentHeapGoal is the goal heapLive for when next GC ends derived
+ // from gcPercent.
+ //
+ // Set to ^uint64(0) if gcPercent is disabled.
+ gcPercentHeapGoal atomic.Uint64
+
+ // sweepDistMinTrigger is the minimum trigger to ensure a minimum
+ // sweep distance.
+ //
+ // This bound is also special because it applies to both the trigger
+ // *and* the goal (all other trigger bounds must be based *on* the goal).
+ //
+ // It is computed ahead of time, at commit time. The theory is that,
+ // absent a sudden change to a parameter like gcPercent, the trigger
+ // will be chosen to always give the sweeper enough headroom. However,
+ // such a change might dramatically and suddenly move up the trigger,
+ // in which case we need to ensure the sweeper still has enough headroom.
+ sweepDistMinTrigger atomic.Uint64
+
+ // triggered is the point at which the current GC cycle actually triggered.
+ // Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0).
+ //
+ // Updated while the world is stopped.
+ triggered uint64
+
+ // lastHeapGoal is the value of heapGoal at the moment the last GC
+ // ended. Note that this is distinct from the last value heapGoal had,
+ // because it could change if e.g. gcPercent changes.
+ //
+ // Read and written with the world stopped or with mheap_.lock held.
+ lastHeapGoal uint64
+
+ // heapLive is the number of bytes considered live by the GC.
+ // That is: retained by the most recent GC plus allocated
+ // since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since
+ // heapAlloc includes unmarked objects that have not yet been swept (and
+ // hence goes up as we allocate and down as we sweep) while heapLive
+ // excludes these objects (and hence only goes up between GCs).
+ //
+ // This is updated atomically without locking. To reduce
+ // contention, this is updated only when obtaining a span from
+ // an mcentral and at this point it counts all of the
+ // unallocated slots in that span (which will be allocated
+ // before that mcache obtains another span from that
+ // mcentral). Hence, it slightly overestimates the "true" live
+ // heap size. It's better to overestimate than to
+ // underestimate because 1) this triggers the GC earlier than
+ // necessary rather than potentially too late and 2) this
+ // leads to a conservative GC rate rather than a GC rate that
+ // is potentially too low.
+ //
+ // Reads should likewise be atomic (or during STW).
+ //
+ // Whenever this is updated, call traceHeapAlloc() and
+ // this gcControllerState's revise() method.
+ heapLive uint64
+
+ // heapScan is the number of bytes of "scannable" heap. This
+ // is the live heap (as counted by heapLive), but omitting
+ // no-scan objects and no-scan tails of objects.
+ //
+ // This value is fixed at the start of a GC cycle, so during a
+ // GC cycle it is safe to read without atomics, and it represents
+ // the maximum scannable heap.
+ heapScan uint64
+
+ // lastHeapScan is the number of bytes of heap that were scanned
+ // last GC cycle. It is the same as heapMarked, but only
+ // includes the "scannable" parts of objects.
+ //
+ // Updated when the world is stopped.
+ lastHeapScan uint64
+
+ // lastStackScan is the number of bytes of stack that were scanned
+ // last GC cycle.
+ lastStackScan uint64
+
+ // maxStackScan is the amount of allocated goroutine stack space in
+ // use by goroutines.
+ //
+ // This number tracks allocated goroutine stack space rather than used
+ // goroutine stack space (i.e. what is actually scanned) because used
+ // goroutine stack space is much harder to measure cheaply. By using
+ // allocated space, we make an overestimate; this is OK, it's better
+ // to conservatively overcount than undercount.
+ //
+ // Read and updated atomically.
+ maxStackScan uint64
+
+ // globalsScan is the total amount of global variable space
+ // that is scannable.
+ //
+ // Read and updated atomically.
+ globalsScan uint64
+
+ // heapMarked is the number of bytes marked by the previous
+ // GC. After mark termination, heapLive == heapMarked, but
+ // unlike heapLive, heapMarked does not change until the
+ // next mark termination.
+ heapMarked uint64
+
+ // heapScanWork is the total heap scan work performed this cycle.
+ // stackScanWork is the total stack scan work performed this cycle.
+ // globalsScanWork is the total globals scan work performed this cycle.
+ //
+ // These are updated atomically during the cycle. Updates occur in
+ // bounded batches, since they are both written and read
+ // throughout the cycle. At the end of the cycle, heapScanWork is how
+ // much of the retained heap is scannable.
+ //
+ // Currently these are measured in bytes. For most uses, this is an
+ // opaque unit of work, but for estimation the definition is important.
+ //
+ // Note that stackScanWork includes only stack space scanned, not all
+ // of the allocated stack.
+ heapScanWork atomic.Int64
+ stackScanWork atomic.Int64
+ globalsScanWork atomic.Int64
+
+ // bgScanCredit is the scan work credit accumulated by the
+ // concurrent background scan. This credit is accumulated by
+ // the background scan and stolen by mutator assists. This is
+ // updated atomically. Updates occur in bounded batches, since
+ // it is both written and read throughout the cycle.
+ bgScanCredit int64
+
+ // assistTime is the nanoseconds spent in mutator assists
+ // during this cycle. This is updated atomically, and must also
+ // be updated atomically even during a STW, because it is read
+ // by sysmon. Updates occur in bounded batches, since it is both
+ // written and read throughout the cycle.
+ assistTime atomic.Int64
+
+ // dedicatedMarkTime is the nanoseconds spent in dedicated
+ // mark workers during this cycle. This is updated atomically
+ // at the end of the concurrent mark phase.
+ dedicatedMarkTime int64
+
+ // fractionalMarkTime is the nanoseconds spent in the
+ // fractional mark worker during this cycle. This is updated
+ // atomically throughout the cycle and will be up-to-date if
+ // the fractional mark worker is not currently running.
+ fractionalMarkTime int64
+
+ // idleMarkTime is the nanoseconds spent in idle marking
+ // during this cycle. This is updated atomically throughout
+ // the cycle.
+ idleMarkTime int64
+
+ // markStartTime is the absolute start time in nanoseconds
+ // that assists and background mark workers started.
+ markStartTime int64
+
+ // dedicatedMarkWorkersNeeded is the number of dedicated mark
+ // workers that need to be started. This is computed at the
+ // beginning of each cycle and decremented atomically as
+ // dedicated mark workers get started.
+ dedicatedMarkWorkersNeeded int64
+
+ // idleMarkWorkers is two packed int32 values in a single uint64.
+ // These two values are always updated simultaneously.
+ //
+ // The bottom int32 is the current number of idle mark workers executing.
+ //
+ // The top int32 is the maximum number of idle mark workers allowed to
+ // execute concurrently. Normally, this number is just gomaxprocs. However,
+ // during periodic GC cycles it is set to 0 because the system is idle
+ // anyway; there's no need to go full blast on all of GOMAXPROCS.
+ //
+ // The maximum number of idle mark workers is used to prevent new workers
+ // from starting, but it is not a hard maximum. It is possible (but
+ // exceedingly rare) for the current number of idle mark workers to
+ // transiently exceed the maximum. This could happen if the maximum changes
+ // just after a GC ends, and an M with no P.
+ //
+ // Note that if we have no dedicated mark workers, we set this value to
+ // 1 in this case we only have fractional GC workers which aren't scheduled
+ // strictly enough to ensure GC progress. As a result, idle-priority mark
+ // workers are vital to GC progress in these situations.
+ //
+ // For example, consider a situation in which goroutines block on the GC
+ // (such as via runtime.GOMAXPROCS) and only fractional mark workers are
+ // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the
+ // last running M might skip scheduling a fractional mark worker if its
+ // utilization goal is met, such that once it goes to sleep (because there's
+ // nothing to do), there will be nothing else to spin up a new M for the
+ // fractional worker in the future, stalling GC progress and causing a
+ // deadlock. However, idle-priority workers will *always* run when there is
+ // nothing left to do, ensuring the GC makes progress.
+ //
+ // See github.com/golang/go/issues/44163 for more details.
+ idleMarkWorkers atomic.Uint64
+
+ // assistWorkPerByte is the ratio of scan work to allocated
+ // bytes that should be performed by mutator assists. This is
+ // computed at the beginning of each cycle and updated every
+ // time heapScan is updated.
+ assistWorkPerByte atomic.Float64
+
+ // assistBytesPerWork is 1/assistWorkPerByte.
+ //
+ // Note that because this is read and written independently
+ // from assistWorkPerByte users may notice a skew between
+ // the two values, and such a state should be safe.
+ assistBytesPerWork atomic.Float64
+
+ // fractionalUtilizationGoal is the fraction of wall clock
+ // time that should be spent in the fractional mark worker on
+ // each P that isn't running a dedicated worker.
+ //
+ // For example, if the utilization goal is 25% and there are
+ // no dedicated workers, this will be 0.25. If the goal is
+ // 25%, there is one dedicated worker, and GOMAXPROCS is 5,
+ // this will be 0.05 to make up the missing 5%.
+ //
+ // If this is zero, no fractional workers are needed.
+ fractionalUtilizationGoal float64
+
+ // These memory stats are effectively duplicates of fields from
+ // memstats.heapStats but are updated atomically or with the world
+ // stopped and don't provide the same consistency guarantees.
+ //
+ // Because the runtime is responsible for managing a memory limit, it's
+ // useful to couple these stats more tightly to the gcController, which
+ // is intimately connected to how that memory limit is maintained.
+ heapInUse sysMemStat // bytes in mSpanInUse spans
+ heapReleased sysMemStat // bytes released to the OS
+ heapFree sysMemStat // bytes not in any span, but not released to the OS
+ totalAlloc atomic.Uint64 // total bytes allocated
+ totalFree atomic.Uint64 // total bytes freed
+ mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go).
+
+ // test indicates that this is a test-only copy of gcControllerState.
+ test bool
+
+ _ cpu.CacheLinePad
+}
+
+func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) {
+ c.heapMinimum = defaultHeapMinimum
+ c.triggered = ^uint64(0)
+
+ c.consMarkController = piController{
+ // Tuned first via the Ziegler-Nichols process in simulation,
+ // then the integral time was manually tuned against real-world
+ // applications to deal with noisiness in the measured cons/mark
+ // ratio.
+ kp: 0.9,
+ ti: 4.0,
+
+ // Set a high reset time in GC cycles.
+ // This is inversely proportional to the rate at which we
+ // accumulate error from clipping. By making this very high
+ // we make the accumulation slow. In general, clipping is
+ // OK in our situation, hence the choice.
+ //
+ // Tune this if we get unintended effects from clipping for
+ // a long time.
+ tt: 1000,
+ min: -1000,
+ max: 1000,
+ }
+
+ c.setGCPercent(gcPercent)
+ c.setMemoryLimit(memoryLimit)
+ c.commit(true) // No sweep phase in the first GC cycle.
+ // N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at
+ // initialization time.
+ // N.B. No need to call revise; there's no GC enabled during
+ // initialization.
+}
+
+// startCycle resets the GC controller's state and computes estimates
+// for a new GC cycle. The caller must hold worldsema and the world
+// must be stopped.
+func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) {
+ c.heapScanWork.Store(0)
+ c.stackScanWork.Store(0)
+ c.globalsScanWork.Store(0)
+ c.bgScanCredit = 0
+ c.assistTime.Store(0)
+ c.dedicatedMarkTime = 0
+ c.fractionalMarkTime = 0
+ c.idleMarkTime = 0
+ c.markStartTime = markStartTime
+
+ // TODO(mknyszek): This is supposed to be the actual trigger point for the heap, but
+ // causes regressions in memory use. The cause is that the PI controller used to smooth
+ // the cons/mark ratio measurements tends to flail when using the less accurate precomputed
+ // trigger for the cons/mark calculation, and this results in the controller being more
+ // conservative about steady-states it tries to find in the future.
+ //
+ // This conservatism is transient, but these transient states tend to matter for short-lived
+ // programs, especially because the PI controller is overdamped, partially because it is
+ // configured with a relatively large time constant.
+ //
+ // Ultimately, I think this is just two mistakes piled on one another: the choice of a swingy
+ // smoothing function that recalls a fairly long history (due to its overdamped time constant)
+ // coupled with an inaccurate cons/mark calculation. It just so happens this works better
+ // today, and it makes it harder to change things in the future.
+ //
+ // This is described in #53738. Fix this for #53892 by changing back to the actual trigger
+ // point and simplifying the smoothing function.
+ heapTrigger, heapGoal := c.trigger()
+ c.triggered = heapTrigger
+
+ // Compute the background mark utilization goal. In general,
+ // this may not come out exactly. We round the number of
+ // dedicated workers so that the utilization is closest to
+ // 25%. For small GOMAXPROCS, this would introduce too much
+ // error, so we add fractional workers in that case.
+ totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
+ c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal + 0.5)
+ utilError := float64(c.dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
+ const maxUtilError = 0.3
+ if utilError < -maxUtilError || utilError > maxUtilError {
+ // Rounding put us more than 30% off our goal. With
+ // gcBackgroundUtilization of 25%, this happens for
+ // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
+ // workers to compensate.
+ if float64(c.dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
+ // Too many dedicated workers.
+ c.dedicatedMarkWorkersNeeded--
+ }
+ c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)) / float64(procs)
+ } else {
+ c.fractionalUtilizationGoal = 0
+ }
+
+ // In STW mode, we just want dedicated workers.
+ if debug.gcstoptheworld > 0 {
+ c.dedicatedMarkWorkersNeeded = int64(procs)
+ c.fractionalUtilizationGoal = 0
+ }
+
+ // Clear per-P state
+ for _, p := range allp {
+ p.gcAssistTime = 0
+ p.gcFractionalMarkTime = 0
+ }
+
+ if trigger.kind == gcTriggerTime {
+ // During a periodic GC cycle, reduce the number of idle mark workers
+ // required. However, we need at least one dedicated mark worker or
+ // idle GC worker to ensure GC progress in some scenarios (see comment
+ // on maxIdleMarkWorkers).
+ if c.dedicatedMarkWorkersNeeded > 0 {
+ c.setMaxIdleMarkWorkers(0)
+ } else {
+ // TODO(mknyszek): The fundamental reason why we need this is because
+ // we can't count on the fractional mark worker to get scheduled.
+ // Fix that by ensuring it gets scheduled according to its quota even
+ // if the rest of the application is idle.
+ c.setMaxIdleMarkWorkers(1)
+ }
+ } else {
+ // N.B. gomaxprocs and dedicatedMarkWorkersNeeded is guaranteed not to
+ // change during a GC cycle.
+ c.setMaxIdleMarkWorkers(int32(procs) - int32(c.dedicatedMarkWorkersNeeded))
+ }
+
+ // Compute initial values for controls that are updated
+ // throughout the cycle.
+ c.revise()
+
+ if debug.gcpacertrace > 0 {
+ assistRatio := c.assistWorkPerByte.Load()
+ print("pacer: assist ratio=", assistRatio,
+ " (scan ", gcController.heapScan>>20, " MB in ",
+ work.initialHeapLive>>20, "->",
+ heapGoal>>20, " MB)",
+ " workers=", c.dedicatedMarkWorkersNeeded,
+ "+", c.fractionalUtilizationGoal, "\n")
+ }
+}
+
+// revise updates the assist ratio during the GC cycle to account for
+// improved estimates. This should be called whenever gcController.heapScan,
+// gcController.heapLive, or if any inputs to gcController.heapGoal are
+// updated. It is safe to call concurrently, but it may race with other
+// calls to revise.
+//
+// The result of this race is that the two assist ratio values may not line
+// up or may be stale. In practice this is OK because the assist ratio
+// moves slowly throughout a GC cycle, and the assist ratio is a best-effort
+// heuristic anyway. Furthermore, no part of the heuristic depends on
+// the two assist ratio values being exact reciprocals of one another, since
+// the two values are used to convert values from different sources.
+//
+// The worst case result of this raciness is that we may miss a larger shift
+// in the ratio (say, if we decide to pace more aggressively against the
+// hard heap goal) but even this "hard goal" is best-effort (see #40460).
+// The dedicated GC should ensure we don't exceed the hard goal by too much
+// in the rare case we do exceed it.
+//
+// It should only be called when gcBlackenEnabled != 0 (because this
+// is when assists are enabled and the necessary statistics are
+// available).
+func (c *gcControllerState) revise() {
+ gcPercent := c.gcPercent.Load()
+ if gcPercent < 0 {
+ // If GC is disabled but we're running a forced GC,
+ // act like GOGC is huge for the below calculations.
+ gcPercent = 100000
+ }
+ live := atomic.Load64(&c.heapLive)
+ scan := atomic.Load64(&c.heapScan)
+ work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
+
+ // Assume we're under the soft goal. Pace GC to complete at
+ // heapGoal assuming the heap is in steady-state.
+ heapGoal := int64(c.heapGoal())
+
+ // The expected scan work is computed as the amount of bytes scanned last
+ // GC cycle (both heap and stack), plus our estimate of globals work for this cycle.
+ scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan + c.globalsScan)
+
+ // maxScanWork is a worst-case estimate of the amount of scan work that
+ // needs to be performed in this GC cycle. Specifically, it represents
+ // the case where *all* scannable memory turns out to be live, and
+ // *all* allocated stack space is scannable.
+ maxStackScan := atomic.Load64(&c.maxStackScan)
+ maxScanWork := int64(scan + maxStackScan + c.globalsScan)
+ if work > scanWorkExpected {
+ // We've already done more scan work than expected. Because our expectation
+ // is based on a steady-state scannable heap size, we assume this means our
+ // heap is growing. Compute a new heap goal that takes our existing runway
+ // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
+ // scan work. This keeps our assist ratio stable if the heap continues to grow.
+ //
+ // The effect of this mechanism is that assists stay flat in the face of heap
+ // growths. It's OK to use more memory this cycle to scan all the live heap,
+ // because the next GC cycle is inevitably going to use *at least* that much
+ // memory anyway.
+ extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered)
+ scanWorkExpected = maxScanWork
+
+ // hardGoal is a hard limit on the amount that we're willing to push back the
+ // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
+ // stacks and/or globals grow to twice their size, this limits the current GC cycle's
+ // growth to 4x the original live heap's size).
+ //
+ // This maintains the invariant that we use no more memory than the next GC cycle
+ // will anyway.
+ hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
+ if extHeapGoal > hardGoal {
+ extHeapGoal = hardGoal
+ }
+ heapGoal = extHeapGoal
+ }
+ if int64(live) > heapGoal {
+ // We're already past our heap goal, even the extrapolated one.
+ // Leave ourselves some extra runway, so in the worst case we
+ // finish by that point.
+ const maxOvershoot = 1.1
+ heapGoal = int64(float64(heapGoal) * maxOvershoot)
+
+ // Compute the upper bound on the scan work remaining.
+ scanWorkExpected = maxScanWork
+ }
+
+ // Compute the remaining scan work estimate.
+ //
+ // Note that we currently count allocations during GC as both
+ // scannable heap (heapScan) and scan work completed
+ // (scanWork), so allocation will change this difference
+ // slowly in the soft regime and not at all in the hard
+ // regime.
+ scanWorkRemaining := scanWorkExpected - work
+ if scanWorkRemaining < 1000 {
+ // We set a somewhat arbitrary lower bound on
+ // remaining scan work since if we aim a little high,
+ // we can miss by a little.
+ //
+ // We *do* need to enforce that this is at least 1,
+ // since marking is racy and double-scanning objects
+ // may legitimately make the remaining scan work
+ // negative, even in the hard goal regime.
+ scanWorkRemaining = 1000
+ }
+
+ // Compute the heap distance remaining.
+ heapRemaining := heapGoal - int64(live)
+ if heapRemaining <= 0 {
+ // This shouldn't happen, but if it does, avoid
+ // dividing by zero or setting the assist negative.
+ heapRemaining = 1
+ }
+
+ // Compute the mutator assist ratio so by the time the mutator
+ // allocates the remaining heap bytes up to heapGoal, it will
+ // have done (or stolen) the remaining amount of scan work.
+ // Note that the assist ratio values are updated atomically
+ // but not together. This means there may be some degree of
+ // skew between the two values. This is generally OK as the
+ // values shift relatively slowly over the course of a GC
+ // cycle.
+ assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
+ assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
+ c.assistWorkPerByte.Store(assistWorkPerByte)
+ c.assistBytesPerWork.Store(assistBytesPerWork)
+}
+
+// endCycle computes the consMark estimate for the next cycle.
+// userForced indicates whether the current GC cycle was forced
+// by the application.
+func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) {
+ // Record last heap goal for the scavenger.
+ // We'll be updating the heap goal soon.
+ gcController.lastHeapGoal = c.heapGoal()
+
+ // Compute the duration of time for which assists were turned on.
+ assistDuration := now - c.markStartTime
+
+ // Assume background mark hit its utilization goal.
+ utilization := gcBackgroundUtilization
+ // Add assist utilization; avoid divide by zero.
+ if assistDuration > 0 {
+ utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs))
+ }
+
+ if c.heapLive <= c.triggered {
+ // Shouldn't happen, but let's be very safe about this in case the
+ // GC is somehow extremely short.
+ //
+ // In this case though, the only reasonable value for c.heapLive-c.triggered
+ // would be 0, which isn't really all that useful, i.e. the GC was so short
+ // that it didn't matter.
+ //
+ // Ignore this case and don't update anything.
+ return
+ }
+ idleUtilization := 0.0
+ if assistDuration > 0 {
+ idleUtilization = float64(c.idleMarkTime) / float64(assistDuration*int64(procs))
+ }
+ // Determine the cons/mark ratio.
+ //
+ // The units we want for the numerator and denominator are both B / cpu-ns.
+ // We get this by taking the bytes allocated or scanned, and divide by the amount of
+ // CPU time it took for those operations. For allocations, that CPU time is
+ //
+ // assistDuration * procs * (1 - utilization)
+ //
+ // Where utilization includes just background GC workers and assists. It does *not*
+ // include idle GC work time, because in theory the mutator is free to take that at
+ // any point.
+ //
+ // For scanning, that CPU time is
+ //
+ // assistDuration * procs * (utilization + idleUtilization)
+ //
+ // In this case, we *include* idle utilization, because that is additional CPU time that the
+ // the GC had available to it.
+ //
+ // In effect, idle GC time is sort of double-counted here, but it's very weird compared
+ // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
+ // *always* free to take it.
+ //
+ // So this calculation is really:
+ // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
+ // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)
+ //
+ // Note that because we only care about the ratio, assistDuration and procs cancel out.
+ scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
+ currentConsMark := (float64(c.heapLive-c.triggered) * (utilization + idleUtilization)) /
+ (float64(scanWork) * (1 - utilization))
+
+ // Update cons/mark controller. The time period for this is 1 GC cycle.
+ //
+ // This use of a PI controller might seem strange. So, here's an explanation:
+ //
+ // currentConsMark represents the consMark we *should've* had to be perfectly
+ // on-target for this cycle. Given that we assume the next GC will be like this
+ // one in the steady-state, it stands to reason that we should just pick that
+ // as our next consMark. In practice, however, currentConsMark is too noisy:
+ // we're going to be wildly off-target in each GC cycle if we do that.
+ //
+ // What we do instead is make a long-term assumption: there is some steady-state
+ // consMark value, but it's obscured by noise. By constantly shooting for this
+ // noisy-but-perfect consMark value, the controller will bounce around a bit,
+ // but its average behavior, in aggregate, should be less noisy and closer to
+ // the true long-term consMark value, provided its tuned to be slightly overdamped.
+ var ok bool
+ oldConsMark := c.consMark
+ c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ if !ok {
+ // The error spiraled out of control. This is incredibly unlikely seeing
+ // as this controller is essentially just a smoothing function, but it might
+ // mean that something went very wrong with how currentConsMark was calculated.
+ // Just reset consMark and keep going.
+ c.consMark = 0
+ }
+
+ if debug.gcpacertrace > 0 {
+ printlock()
+ goal := gcGoalUtilization * 100
+ print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
+ print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan+c.globalsScan, " B exp.) ")
+ print("in ", c.triggered, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")")
+ if !ok {
+ print("[controller reset]")
+ }
+ println()
+ printunlock()
+ }
+}
+
+// enlistWorker encourages another dedicated mark worker to start on
+// another P if there are spare worker slots. It is used by putfull
+// when more work is made available.
+//
+//go:nowritebarrier
+func (c *gcControllerState) enlistWorker() {
+ // If there are idle Ps, wake one so it will run an idle worker.
+ // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
+ //
+ // if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
+ // wakep()
+ // return
+ // }
+
+ // There are no idle Ps. If we need more dedicated workers,
+ // try to preempt a running P so it will switch to a worker.
+ if c.dedicatedMarkWorkersNeeded <= 0 {
+ return
+ }
+ // Pick a random other P to preempt.
+ if gomaxprocs <= 1 {
+ return
+ }
+ gp := getg()
+ if gp == nil || gp.m == nil || gp.m.p == 0 {
+ return
+ }
+ myID := gp.m.p.ptr().id
+ for tries := 0; tries < 5; tries++ {
+ id := int32(fastrandn(uint32(gomaxprocs - 1)))
+ if id >= myID {
+ id++
+ }
+ p := allp[id]
+ if p.status != _Prunning {
+ continue
+ }
+ if preemptone(p) {
+ return
+ }
+ }
+}
+
+// findRunnableGCWorker returns a background mark worker for _p_ if it
+// should be run. This must only be called when gcBlackenEnabled != 0.
+func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64) {
+ if gcBlackenEnabled == 0 {
+ throw("gcControllerState.findRunnable: blackening not enabled")
+ }
+
+ // Since we have the current time, check if the GC CPU limiter
+ // hasn't had an update in a while. This check is necessary in
+ // case the limiter is on but hasn't been checked in a while and
+ // so may have left sufficient headroom to turn off again.
+ if now == 0 {
+ now = nanotime()
+ }
+ if gcCPULimiter.needUpdate(now) {
+ gcCPULimiter.update(now)
+ }
+
+ if !gcMarkWorkAvailable(_p_) {
+ // No work to be done right now. This can happen at
+ // the end of the mark phase when there are still
+ // assists tapering off. Don't bother running a worker
+ // now because it'll just return immediately.
+ return nil, now
+ }
+
+ // Grab a worker before we commit to running below.
+ node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
+ if node == nil {
+ // There is at least one worker per P, so normally there are
+ // enough workers to run on all Ps, if necessary. However, once
+ // a worker enters gcMarkDone it may park without rejoining the
+ // pool, thus freeing a P with no corresponding worker.
+ // gcMarkDone never depends on another worker doing work, so it
+ // is safe to simply do nothing here.
+ //
+ // If gcMarkDone bails out without completing the mark phase,
+ // it will always do so with queued global work. Thus, that P
+ // will be immediately eligible to re-run the worker G it was
+ // just using, ensuring work can complete.
+ return nil, now
+ }
+
+ decIfPositive := func(ptr *int64) bool {
+ for {
+ v := atomic.Loadint64(ptr)
+ if v <= 0 {
+ return false
+ }
+
+ if atomic.Casint64(ptr, v, v-1) {
+ return true
+ }
+ }
+ }
+
+ if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
+ // This P is now dedicated to marking until the end of
+ // the concurrent mark phase.
+ _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
+ } else if c.fractionalUtilizationGoal == 0 {
+ // No need for fractional workers.
+ gcBgMarkWorkerPool.push(&node.node)
+ return nil, now
+ } else {
+ // Is this P behind on the fractional utilization
+ // goal?
+ //
+ // This should be kept in sync with pollFractionalWorkerExit.
+ delta := now - c.markStartTime
+ if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
+ // Nope. No need to run a fractional worker.
+ gcBgMarkWorkerPool.push(&node.node)
+ return nil, now
+ }
+ // Run a fractional worker.
+ _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
+ }
+
+ // Run the background mark worker.
+ gp := node.gp.ptr()
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, now
+}
+
+// resetLive sets up the controller state for the next mark phase after the end
+// of the previous one. Must be called after endCycle and before commit, before
+// the world is started.
+//
+// The world must be stopped.
+func (c *gcControllerState) resetLive(bytesMarked uint64) {
+ c.heapMarked = bytesMarked
+ c.heapLive = bytesMarked
+ c.heapScan = uint64(c.heapScanWork.Load())
+ c.lastHeapScan = uint64(c.heapScanWork.Load())
+ c.lastStackScan = uint64(c.stackScanWork.Load())
+ c.triggered = ^uint64(0) // Reset triggered.
+
+ // heapLive was updated, so emit a trace event.
+ if trace.enabled {
+ traceHeapAlloc()
+ }
+}
+
+// markWorkerStop must be called whenever a mark worker stops executing.
+//
+// It updates mark work accounting in the controller by a duration of
+// work in nanoseconds and other bookkeeping.
+//
+// Safe to execute at any time.
+func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) {
+ switch mode {
+ case gcMarkWorkerDedicatedMode:
+ atomic.Xaddint64(&c.dedicatedMarkTime, duration)
+ atomic.Xaddint64(&c.dedicatedMarkWorkersNeeded, 1)
+ case gcMarkWorkerFractionalMode:
+ atomic.Xaddint64(&c.fractionalMarkTime, duration)
+ case gcMarkWorkerIdleMode:
+ atomic.Xaddint64(&c.idleMarkTime, duration)
+ c.removeIdleMarkWorker()
+ default:
+ throw("markWorkerStop: unknown mark worker mode")
+ }
+}
+
+func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
+ if dHeapLive != 0 {
+ atomic.Xadd64(&gcController.heapLive, dHeapLive)
+ if trace.enabled {
+ // gcController.heapLive changed.
+ traceHeapAlloc()
+ }
+ }
+ if gcBlackenEnabled == 0 {
+ // Update heapScan when we're not in a current GC. It is fixed
+ // at the beginning of a cycle.
+ if dHeapScan != 0 {
+ atomic.Xadd64(&gcController.heapScan, dHeapScan)
+ }
+ } else {
+ // gcController.heapLive changed.
+ c.revise()
+ }
+}
+
+func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
+ if pp == nil {
+ atomic.Xadd64(&c.maxStackScan, amount)
+ return
+ }
+ pp.maxStackScanDelta += amount
+ if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack {
+ atomic.Xadd64(&c.maxStackScan, pp.maxStackScanDelta)
+ pp.maxStackScanDelta = 0
+ }
+}
+
+func (c *gcControllerState) addGlobals(amount int64) {
+ atomic.Xadd64(&c.globalsScan, amount)
+}
+
+// heapGoal returns the current heap goal.
+func (c *gcControllerState) heapGoal() uint64 {
+ goal, _ := c.heapGoalInternal()
+ return goal
+}
+
+// heapGoalInternal is the implementation of heapGoal which returns additional
+// information that is necessary for computing the trigger.
+//
+// The returned minTrigger is always <= goal.
+func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) {
+ // Start with the goal calculated for gcPercent.
+ goal = c.gcPercentHeapGoal.Load()
+
+ // Check if the memory-limit-based goal is smaller, and if so, pick that.
+ if newGoal := c.memoryLimitHeapGoal(); go119MemoryLimitSupport && newGoal < goal {
+ goal = newGoal
+ } else {
+ // We're not limited by the memory limit goal, so perform a series of
+ // adjustments that might move the goal forward in a variety of circumstances.
+
+ sweepDistTrigger := c.sweepDistMinTrigger.Load()
+ if sweepDistTrigger > goal {
+ // Set the goal to maintain a minimum sweep distance since
+ // the last call to commit. Note that we never want to do this
+ // if we're in the memory limit regime, because it could push
+ // the goal up.
+ goal = sweepDistTrigger
+ }
+ // Since we ignore the sweep distance trigger in the memory
+ // limit regime, we need to ensure we don't propagate it to
+ // the trigger, because it could cause a violation of the
+ // invariant that the trigger < goal.
+ minTrigger = sweepDistTrigger
+
+ // Ensure that the heap goal is at least a little larger than
+ // the point at which we triggered. This may not be the case if GC
+ // start is delayed or if the allocation that pushed gcController.heapLive
+ // over trigger is large or if the trigger is really close to
+ // GOGC. Assist is proportional to this distance, so enforce a
+ // minimum distance, even if it means going over the GOGC goal
+ // by a tiny bit.
+ //
+ // Ignore this if we're in the memory limit regime: we'd prefer to
+ // have the GC respond hard about how close we are to the goal than to
+ // push the goal back in such a manner that it could cause us to exceed
+ // the memory limit.
+ const minRunway = 64 << 10
+ if c.triggered != ^uint64(0) && goal < c.triggered+minRunway {
+ goal = c.triggered + minRunway
+ }
+ }
+ return
+}
+
+// memoryLimitHeapGoal returns a heap goal derived from memoryLimit.
+func (c *gcControllerState) memoryLimitHeapGoal() uint64 {
+ // Start by pulling out some values we'll need. Be careful about overflow.
+ var heapFree, heapAlloc, mappedReady uint64
+ for {
+ heapFree = c.heapFree.load() // Free and unscavenged memory.
+ heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use.
+ mappedReady = c.mappedReady.Load() // Total unreleased mapped memory.
+ if heapFree+heapAlloc <= mappedReady {
+ break
+ }
+ // It is impossible for total unreleased mapped memory to exceed heap memory, but
+ // because these stats are updated independently, we may observe a partial update
+ // including only some values. Thus, we appear to break the invariant. However,
+ // this condition is necessarily transient, so just try again. In the case of a
+ // persistent accounting error, we'll deadlock here.
+ }
+
+ // Below we compute a goal from memoryLimit. There are a few things to be aware of.
+ // Firstly, the memoryLimit does not easily compare to the heap goal: the former
+ // is total mapped memory by the runtime that hasn't been released, while the latter is
+ // only heap object memory. Intuitively, the way we convert from one to the other is to
+ // subtract everything from memoryLimit that both contributes to the memory limit (so,
+ // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what
+ // lines up with reality, but it's a good starting point.
+ //
+ // In practice this computation looks like the following:
+ //
+ // memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0)) - memoryLimitHeapGoalHeadroom
+ // ^1 ^2 ^3
+ //
+ // Let's break this down.
+ //
+ // The first term (marker 1) is everything that contributes to the memory limit and isn't
+ // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads.
+ // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged
+ // memory that may contain heap objects in the future.
+ //
+ // Let's take a step back. In an ideal world, this term would look something like just
+ // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap
+ // goal, and subtract out everything else. This is of course impossible; the definition
+ // is circular! However, this impossible definition contains a key insight: the amount
+ // we're *going* to use matters just as much as whatever we're currently using.
+ //
+ // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and
+ // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free
+ // and unscavenged memory, pushing the goal down significantly.
+ //
+ // heapFree is also safe to exclude from the memory limit because in the steady-state, it's
+ // just a pool of memory for future heap allocations, and making new allocations from heapFree
+ // memory doesn't increase overall memory use. In transient states, the scavenger and the
+ // allocator actively manage the pool of heapFree memory to maintain the memory limit.
+ //
+ // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is
+ // intended to help recover from such a situation. By pushing the heap goal down, we also
+ // push the trigger down, triggering and finishing a GC sooner in order to make room for
+ // other memory sources. Note that since we're effectively reducing the heap goal by X bytes,
+ // we're actually giving more than X bytes of headroom back, because the heap goal is in
+ // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store
+ // X bytes worth of objects.
+ //
+ // The third term (marker 3) subtracts an additional memoryLimitHeapGoalHeadroom bytes from the
+ // heap goal. As the name implies, this is to provide additional headroom in the face of pacing
+ // inaccuracies. This is a fixed number of bytes because these inaccuracies disproportionately
+ // affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier. Shorter GC cycles
+ // and less GC work means noisy external factors like the OS scheduler have a greater impact.
+
+ memoryLimit := uint64(c.memoryLimit.Load())
+
+ // Compute term 1.
+ nonHeapMemory := mappedReady - heapFree - heapAlloc
+
+ // Compute term 2.
+ var overage uint64
+ if mappedReady > memoryLimit {
+ overage = mappedReady - memoryLimit
+ }
+
+ if nonHeapMemory+overage >= memoryLimit {
+ // We're at a point where non-heap memory exceeds the memory limit on its own.
+ // There's honestly not much we can do here but just trigger GCs continuously
+ // and let the CPU limiter reign that in. Something has to give at this point.
+ // Set it to heapMarked, the lowest possible goal.
+ return c.heapMarked
+ }
+
+ // Compute the goal.
+ goal := memoryLimit - (nonHeapMemory + overage)
+
+ // Apply some headroom to the goal to account for pacing inaccuracies.
+ // Be careful about small limits.
+ if goal < memoryLimitHeapGoalHeadroom || goal-memoryLimitHeapGoalHeadroom < memoryLimitHeapGoalHeadroom {
+ goal = memoryLimitHeapGoalHeadroom
+ } else {
+ goal = goal - memoryLimitHeapGoalHeadroom
+ }
+ // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense.
+ if goal < c.heapMarked {
+ goal = c.heapMarked
+ }
+ return goal
+}
+
+const (
+ // These constants determine the bounds on the GC trigger as a fraction
+ // of heap bytes allocated between the start of a GC (heapLive == heapMarked)
+ // and the end of a GC (heapLive == heapGoal).
+ //
+ // The constants are obscured in this way for efficiency. The denominator
+ // of the fraction is always a power-of-two for a quick division, so that
+ // the numerator is a single constant integer multiplication.
+ triggerRatioDen = 64
+
+ // The minimum trigger constant was chosen empirically: given a sufficiently
+ // fast/scalable allocator with 48 Ps that could drive the trigger ratio
+ // to <0.05, this constant causes applications to retain the same peak
+ // RSS compared to not having this allocator.
+ minTriggerRatioNum = 45 // ~0.7
+
+ // The maximum trigger constant is chosen somewhat arbitrarily, but the
+ // current constant has served us well over the years.
+ maxTriggerRatioNum = 61 // ~0.95
+)
+
+// trigger returns the current point at which a GC should trigger along with
+// the heap goal.
+//
+// The returned value may be compared against heapLive to determine whether
+// the GC should trigger. Thus, the GC trigger condition should be (but may
+// not be, in the case of small movements for efficiency) checked whenever
+// the heap goal may change.
+func (c *gcControllerState) trigger() (uint64, uint64) {
+ goal, minTrigger := c.heapGoalInternal()
+
+ // Invariant: the trigger must always be less than the heap goal.
+ //
+ // Note that the memory limit sets a hard maximum on our heap goal,
+ // but the live heap may grow beyond it.
+
+ if c.heapMarked >= goal {
+ // The goal should never be smaller than heapMarked, but let's be
+ // defensive about it. The only reasonable trigger here is one that
+ // causes a continuous GC cycle at heapMarked, but respect the goal
+ // if it came out as smaller than that.
+ return goal, goal
+ }
+
+ // Below this point, c.heapMarked < goal.
+
+ // heapMarked is our absolute minimum, and it's possible the trigger
+ // bound we get from heapGoalinternal is less than that.
+ if minTrigger < c.heapMarked {
+ minTrigger = c.heapMarked
+ }
+
+ // If we let the trigger go too low, then if the application
+ // is allocating very rapidly we might end up in a situation
+ // where we're allocating black during a nearly always-on GC.
+ // The result of this is a growing heap and ultimately an
+ // increase in RSS. By capping us at a point >0, we're essentially
+ // saying that we're OK using more CPU during the GC to prevent
+ // this growth in RSS.
+ triggerLowerBound := uint64(((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum) + c.heapMarked
+ if minTrigger < triggerLowerBound {
+ minTrigger = triggerLowerBound
+ }
+
+ // For small heaps, set the max trigger point at maxTriggerRatio of the way
+ // from the live heap to the heap goal. This ensures we always have *some*
+ // headroom when the GC actually starts. For larger heaps, set the max trigger
+ // point at the goal, minus the minimum heap size.
+ //
+ // This choice follows from the fact that the minimum heap size is chosen
+ // to reflect the costs of a GC with no work to do. With a large heap but
+ // very little scan work to perform, this gives us exactly as much runway
+ // as we would need, in the worst case.
+ maxTrigger := uint64(((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum) + c.heapMarked
+ if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger {
+ maxTrigger = goal - defaultHeapMinimum
+ }
+ if maxTrigger < minTrigger {
+ maxTrigger = minTrigger
+ }
+
+ // Compute the trigger from our bounds and the runway stored by commit.
+ var trigger uint64
+ runway := c.runway.Load()
+ if runway > goal {
+ trigger = minTrigger
+ } else {
+ trigger = goal - runway
+ }
+ if trigger < minTrigger {
+ trigger = minTrigger
+ }
+ if trigger > maxTrigger {
+ trigger = maxTrigger
+ }
+ if trigger > goal {
+ print("trigger=", trigger, " heapGoal=", goal, "\n")
+ print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n")
+ throw("produced a trigger greater than the heap goal")
+ }
+ return trigger, goal
+}
+
+// commit recomputes all pacing parameters needed to derive the
+// trigger and the heap goal. Namely, the gcPercent-based heap goal,
+// and the amount of runway we want to give the GC this cycle.
+//
+// This can be called any time. If GC is the in the middle of a
+// concurrent phase, it will adjust the pacing of that phase.
+//
+// isSweepDone should be the result of calling isSweepDone(),
+// unless we're testing or we know we're executing during a GC cycle.
+//
+// This depends on gcPercent, gcController.heapMarked, and
+// gcController.heapLive. These must be up to date.
+//
+// Callers must call gcControllerState.revise after calling this
+// function if the GC is enabled.
+//
+// mheap_.lock must be held or the world must be stopped.
+func (c *gcControllerState) commit(isSweepDone bool) {
+ if !c.test {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+ }
+
+ if isSweepDone {
+ // The sweep is done, so there aren't any restrictions on the trigger
+ // we need to think about.
+ c.sweepDistMinTrigger.Store(0)
+ } else {
+ // Concurrent sweep happens in the heap growth
+ // from gcController.heapLive to trigger. Make sure we
+ // give the sweeper some runway if it doesn't have enough.
+ c.sweepDistMinTrigger.Store(atomic.Load64(&c.heapLive) + sweepMinHeapDistance)
+ }
+
+ // Compute the next GC goal, which is when the allocated heap
+ // has grown by GOGC/100 over where it started the last cycle,
+ // plus additional runway for non-heap sources of GC work.
+ gcPercentHeapGoal := ^uint64(0)
+ if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
+ gcPercentHeapGoal = c.heapMarked + (c.heapMarked+atomic.Load64(&c.lastStackScan)+atomic.Load64(&c.globalsScan))*uint64(gcPercent)/100
+ }
+ // Apply the minimum heap size here. It's defined in terms of gcPercent
+ // and is only updated by functions that call commit.
+ if gcPercentHeapGoal < c.heapMinimum {
+ gcPercentHeapGoal = c.heapMinimum
+ }
+ c.gcPercentHeapGoal.Store(gcPercentHeapGoal)
+
+ // Compute the amount of runway we want the GC to have by using our
+ // estimate of the cons/mark ratio.
+ //
+ // The idea is to take our expected scan work, and multiply it by
+ // the cons/mark ratio to determine how long it'll take to complete
+ // that scan work in terms of bytes allocated. This gives us our GC's
+ // runway.
+ //
+ // However, the cons/mark ratio is a ratio of rates per CPU-second, but
+ // here we care about the relative rates for some division of CPU
+ // resources among the mutator and the GC.
+ //
+ // To summarize, we have B / cpu-ns, and we want B / ns. We get that
+ // by multiplying by our desired division of CPU resources. We choose
+ // to express CPU resources as GOMAPROCS*fraction. Note that because
+ // we're working with a ratio here, we can omit the number of CPU cores,
+ // because they'll appear in the numerator and denominator and cancel out.
+ // As a result, this is basically just "weighing" the cons/mark ratio by
+ // our desired division of resources.
+ //
+ // Furthermore, by setting the runway so that CPU resources are divided
+ // this way, assuming that the cons/mark ratio is correct, we make that
+ // division a reality.
+ c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan+c.globalsScan)))
+}
+
+// setGCPercent updates gcPercent. commit must be called after.
+// Returns the old value of gcPercent.
+//
+// The world must be stopped, or mheap_.lock must be held.
+func (c *gcControllerState) setGCPercent(in int32) int32 {
+ if !c.test {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+ }
+
+ out := c.gcPercent.Load()
+ if in < 0 {
+ in = -1
+ }
+ c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
+ c.gcPercent.Store(in)
+
+ return out
+}
+
+//go:linkname setGCPercent runtime/debug.setGCPercent
+func setGCPercent(in int32) (out int32) {
+ // Run on the system stack since we grab the heap lock.
+ systemstack(func() {
+ lock(&mheap_.lock)
+ out = gcController.setGCPercent(in)
+ gcControllerCommit()
+ unlock(&mheap_.lock)
+ })
+
+ // If we just disabled GC, wait for any concurrent GC mark to
+ // finish so we always return with no GC running.
+ if in < 0 {
+ gcWaitOnMark(atomic.Load(&work.cycles))
+ }
+
+ return out
+}
+
+func readGOGC() int32 {
+ p := gogetenv("GOGC")
+ if p == "off" {
+ return -1
+ }
+ if n, ok := atoi32(p); ok {
+ return n
+ }
+ return 100
+}
+
+// setMemoryLimit updates memoryLimit. commit must be called after
+// Returns the old value of memoryLimit.
+//
+// The world must be stopped, or mheap_.lock must be held.
+func (c *gcControllerState) setMemoryLimit(in int64) int64 {
+ if !c.test {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+ }
+
+ out := c.memoryLimit.Load()
+ if in >= 0 {
+ c.memoryLimit.Store(in)
+ }
+
+ return out
+}
+
+//go:linkname setMemoryLimit runtime/debug.setMemoryLimit
+func setMemoryLimit(in int64) (out int64) {
+ // Run on the system stack since we grab the heap lock.
+ systemstack(func() {
+ lock(&mheap_.lock)
+ out = gcController.setMemoryLimit(in)
+ if in < 0 || out == in {
+ // If we're just checking the value or not changing
+ // it, there's no point in doing the rest.
+ unlock(&mheap_.lock)
+ return
+ }
+ gcControllerCommit()
+ unlock(&mheap_.lock)
+ })
+ return out
+}
+
+func readGOMEMLIMIT() int64 {
+ p := gogetenv("GOMEMLIMIT")
+ if p == "" || p == "off" {
+ return maxInt64
+ }
+ n, ok := parseByteCount(p)
+ if !ok {
+ print("GOMEMLIMIT=", p, "\n")
+ throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`")
+ }
+ return n
+}
+
+type piController struct {
+ kp float64 // Proportional constant.
+ ti float64 // Integral time constant.
+ tt float64 // Reset time.
+
+ min, max float64 // Output boundaries.
+
+ // PI controller state.
+
+ errIntegral float64 // Integral of the error from t=0 to now.
+
+ // Error flags.
+ errOverflow bool // Set if errIntegral ever overflowed.
+ inputOverflow bool // Set if an operation with the input overflowed.
+}
+
+// next provides a new sample to the controller.
+//
+// input is the sample, setpoint is the desired point, and period is how much
+// time (in whatever unit makes the most sense) has passed since the last sample.
+//
+// Returns a new value for the variable it's controlling, and whether the operation
+// completed successfully. One reason this might fail is if error has been growing
+// in an unbounded manner, to the point of overflow.
+//
+// In the specific case of an error overflow occurs, the errOverflow field will be
+// set and the rest of the controller's internal state will be fully reset.
+func (c *piController) next(input, setpoint, period float64) (float64, bool) {
+ // Compute the raw output value.
+ prop := c.kp * (setpoint - input)
+ rawOutput := prop + c.errIntegral
+
+ // Clamp rawOutput into output.
+ output := rawOutput
+ if isInf(output) || isNaN(output) {
+ // The input had a large enough magnitude that either it was already
+ // overflowed, or some operation with it overflowed.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.inputOverflow = true
+ return c.min, false
+ }
+ if output < c.min {
+ output = c.min
+ } else if output > c.max {
+ output = c.max
+ }
+
+ // Update the controller's state.
+ if c.ti != 0 && c.tt != 0 {
+ c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
+ if isInf(c.errIntegral) || isNaN(c.errIntegral) {
+ // So much error has accumulated that we managed to overflow.
+ // The assumptions around the controller have likely broken down.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.errOverflow = true
+ return c.min, false
+ }
+ }
+ return output, true
+}
+
+// reset resets the controller state, except for controller error flags.
+func (c *piController) reset() {
+ c.errIntegral = 0
+}
+
+// addIdleMarkWorker attempts to add a new idle mark worker.
+//
+// If this returns true, the caller must become an idle mark worker unless
+// there's no background mark worker goroutines in the pool. This case is
+// harmless because there are already background mark workers running.
+// If this returns false, the caller must NOT become an idle mark worker.
+//
+// nosplit because it may be called without a P.
+//
+//go:nosplit
+func (c *gcControllerState) addIdleMarkWorker() bool {
+ for {
+ old := c.idleMarkWorkers.Load()
+ n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
+ if n >= max {
+ // See the comment on idleMarkWorkers for why
+ // n > max is tolerated.
+ return false
+ }
+ if n < 0 {
+ print("n=", n, " max=", max, "\n")
+ throw("negative idle mark workers")
+ }
+ new := uint64(uint32(n+1)) | (uint64(max) << 32)
+ if c.idleMarkWorkers.CompareAndSwap(old, new) {
+ return true
+ }
+ }
+}
+
+// needIdleMarkWorker is a hint as to whether another idle mark worker is needed.
+//
+// The caller must still call addIdleMarkWorker to become one. This is mainly
+// useful for a quick check before an expensive operation.
+//
+// nosplit because it may be called without a P.
+//
+//go:nosplit
+func (c *gcControllerState) needIdleMarkWorker() bool {
+ p := c.idleMarkWorkers.Load()
+ n, max := int32(p&uint64(^uint32(0))), int32(p>>32)
+ return n < max
+}
+
+// removeIdleMarkWorker must be called when an new idle mark worker stops executing.
+func (c *gcControllerState) removeIdleMarkWorker() {
+ for {
+ old := c.idleMarkWorkers.Load()
+ n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
+ if n-1 < 0 {
+ print("n=", n, " max=", max, "\n")
+ throw("negative idle mark workers")
+ }
+ new := uint64(uint32(n-1)) | (uint64(max) << 32)
+ if c.idleMarkWorkers.CompareAndSwap(old, new) {
+ return
+ }
+ }
+}
+
+// setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed.
+//
+// This method is optimistic in that it does not wait for the number of
+// idle mark workers to reduce to max before returning; it assumes the workers
+// will deschedule themselves.
+func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) {
+ for {
+ old := c.idleMarkWorkers.Load()
+ n := int32(old & uint64(^uint32(0)))
+ if n < 0 {
+ print("n=", n, " max=", max, "\n")
+ throw("negative idle mark workers")
+ }
+ new := uint64(uint32(n)) | (uint64(max) << 32)
+ if c.idleMarkWorkers.CompareAndSwap(old, new) {
+ return
+ }
+ }
+}
+
+// gcControllerCommit is gcController.commit, but passes arguments from live
+// (non-test) data. It also updates any consumers of the GC pacing, such as
+// sweep pacing and the background scavenger.
+//
+// Calls gcController.commit.
+//
+// The heap lock must be held, so this must be executed on the system stack.
+//
+//go:systemstack
+func gcControllerCommit() {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+
+ gcController.commit(isSweepDone())
+
+ // Update mark pacing.
+ if gcphase != _GCoff {
+ gcController.revise()
+ }
+
+ // TODO(mknyszek): This isn't really accurate any longer because the heap
+ // goal is computed dynamically. Still useful to snapshot, but not as useful.
+ if trace.enabled {
+ traceHeapGoal()
+ }
+
+ trigger, heapGoal := gcController.trigger()
+ gcPaceSweeper(trigger)
+ gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgcscavenge.go b/contrib/go/_std_1.19/src/runtime/mgcscavenge.go
new file mode 100644
index 0000000000..bf38f87c77
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgcscavenge.go
@@ -0,0 +1,1105 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Scavenging free pages.
+//
+// This file implements scavenging (the release of physical pages backing mapped
+// memory) of free and unused pages in the heap as a way to deal with page-level
+// fragmentation and reduce the RSS of Go applications.
+//
+// Scavenging in Go happens on two fronts: there's the background
+// (asynchronous) scavenger and the heap-growth (synchronous) scavenger.
+//
+// The former happens on a goroutine much like the background sweeper which is
+// soft-capped at using scavengePercent of the mutator's time, based on
+// order-of-magnitude estimates of the costs of scavenging. The background
+// scavenger's primary goal is to bring the estimated heap RSS of the
+// application down to a goal.
+//
+// Before we consider what this looks like, we need to split the world into two
+// halves. One in which a memory limit is not set, and one in which it is.
+//
+// For the former, the goal is defined as:
+// (retainExtraPercent+100) / 100 * (heapGoal / lastHeapGoal) * lastHeapInUse
+//
+// Essentially, we wish to have the application's RSS track the heap goal, but
+// the heap goal is defined in terms of bytes of objects, rather than pages like
+// RSS. As a result, we need to take into account for fragmentation internal to
+// spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal
+// and the last heap goal, which tells us by how much the heap is growing and
+// shrinking. We estimate what the heap will grow to in terms of pages by taking
+// this ratio and multiplying it by heapInUse at the end of the last GC, which
+// allows us to account for this additional fragmentation. Note that this
+// procedure makes the assumption that the degree of fragmentation won't change
+// dramatically over the next GC cycle. Overestimating the amount of
+// fragmentation simply results in higher memory use, which will be accounted
+// for by the next pacing up date. Underestimating the fragmentation however
+// could lead to performance degradation. Handling this case is not within the
+// scope of the scavenger. Situations where the amount of fragmentation balloons
+// over the course of a single GC cycle should be considered pathologies,
+// flagged as bugs, and fixed appropriately.
+//
+// An additional factor of retainExtraPercent is added as a buffer to help ensure
+// that there's more unscavenged memory to allocate out of, since each allocation
+// out of scavenged memory incurs a potentially expensive page fault.
+//
+// If a memory limit is set, then we wish to pick a scavenge goal that maintains
+// that memory limit. For that, we look at total memory that has been committed
+// (memstats.mappedReady) and try to bring that down below the limit. In this case,
+// we want to give buffer space in the *opposite* direction. When the application
+// is close to the limit, we want to make sure we push harder to keep it under, so
+// if we target below the memory limit, we ensure that the background scavenger is
+// giving the situation the urgency it deserves.
+//
+// In this case, the goal is defined as:
+// (100-reduceExtraPercent) / 100 * memoryLimit
+//
+// We compute both of these goals, and check whether either of them have been met.
+// The background scavenger continues operating as long as either one of the goals
+// has not been met.
+//
+// The goals are updated after each GC.
+//
+// The synchronous heap-growth scavenging happens whenever the heap grows in
+// size, for some definition of heap-growth. The intuition behind this is that
+// the application had to grow the heap because existing fragments were
+// not sufficiently large to satisfy a page-level memory allocation, so we
+// scavenge those fragments eagerly to offset the growth in RSS that results.
+
+package runtime
+
+import (
+ "internal/goos"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const (
+ // The background scavenger is paced according to these parameters.
+ //
+ // scavengePercent represents the portion of mutator time we're willing
+ // to spend on scavenging in percent.
+ scavengePercent = 1 // 1%
+
+ // retainExtraPercent represents the amount of memory over the heap goal
+ // that the scavenger should keep as a buffer space for the allocator.
+ // This constant is used when we do not have a memory limit set.
+ //
+ // The purpose of maintaining this overhead is to have a greater pool of
+ // unscavenged memory available for allocation (since using scavenged memory
+ // incurs an additional cost), to account for heap fragmentation and
+ // the ever-changing layout of the heap.
+ retainExtraPercent = 10
+
+ // reduceExtraPercent represents the amount of memory under the limit
+ // that the scavenger should target. For example, 5 means we target 95%
+ // of the limit.
+ //
+ // The purpose of shooting lower than the limit is to ensure that, once
+ // close to the limit, the scavenger is working hard to maintain it. If
+ // we have a memory limit set but are far away from it, there's no harm
+ // in leaving up to 100-retainExtraPercent live, and it's more efficient
+ // anyway, for the same reasons that retainExtraPercent exists.
+ reduceExtraPercent = 5
+
+ // maxPagesPerPhysPage is the maximum number of supported runtime pages per
+ // physical page, based on maxPhysPageSize.
+ maxPagesPerPhysPage = maxPhysPageSize / pageSize
+
+ // scavengeCostRatio is the approximate ratio between the costs of using previously
+ // scavenged memory and scavenging memory.
+ //
+ // For most systems the cost of scavenging greatly outweighs the costs
+ // associated with using scavenged memory, making this constant 0. On other systems
+ // (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial.
+ //
+ // This ratio is used as part of multiplicative factor to help the scavenger account
+ // for the additional costs of using scavenged memory in its pacing.
+ scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos)
+)
+
+// heapRetained returns an estimate of the current heap RSS.
+func heapRetained() uint64 {
+ return gcController.heapInUse.load() + gcController.heapFree.load()
+}
+
+// gcPaceScavenger updates the scavenger's pacing, particularly
+// its rate and RSS goal. For this, it requires the current heapGoal,
+// and the heapGoal for the previous GC cycle.
+//
+// The RSS goal is based on the current heap goal with a small overhead
+// to accommodate non-determinism in the allocator.
+//
+// The pacing is based on scavengePageRate, which applies to both regular and
+// huge pages. See that constant for more information.
+//
+// Must be called whenever GC pacing is updated.
+//
+// mheap_.lock must be held or the world must be stopped.
+func gcPaceScavenger(memoryLimit int64, heapGoal, lastHeapGoal uint64) {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+
+ // As described at the top of this file, there are two scavenge goals here: one
+ // for gcPercent and one for memoryLimit. Let's handle the latter first because
+ // it's simpler.
+
+ // We want to target retaining (100-reduceExtraPercent)% of the heap.
+ memoryLimitGoal := uint64(float64(memoryLimit) * (100.0 - reduceExtraPercent))
+
+ // mappedReady is comparable to memoryLimit, and represents how much total memory
+ // the Go runtime has committed now (estimated).
+ mappedReady := gcController.mappedReady.Load()
+
+ // If we're below the goal already indicate that we don't need the background
+ // scavenger for the memory limit. This may seems worrisome at first, but note
+ // that the allocator will assist the background scavenger in the face of a memory
+ // limit, so we'll be safe even if we stop the scavenger when we shouldn't have.
+ if mappedReady <= memoryLimitGoal {
+ scavenge.memoryLimitGoal.Store(^uint64(0))
+ } else {
+ scavenge.memoryLimitGoal.Store(memoryLimitGoal)
+ }
+
+ // Now handle the gcPercent goal.
+
+ // If we're called before the first GC completed, disable scavenging.
+ // We never scavenge before the 2nd GC cycle anyway (we don't have enough
+ // information about the heap yet) so this is fine, and avoids a fault
+ // or garbage data later.
+ if lastHeapGoal == 0 {
+ scavenge.gcPercentGoal.Store(^uint64(0))
+ return
+ }
+ // Compute our scavenging goal.
+ goalRatio := float64(heapGoal) / float64(lastHeapGoal)
+ gcPercentGoal := uint64(float64(memstats.lastHeapInUse) * goalRatio)
+ // Add retainExtraPercent overhead to retainedGoal. This calculation
+ // looks strange but the purpose is to arrive at an integer division
+ // (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8)
+ // that also avoids the overflow from a multiplication.
+ gcPercentGoal += gcPercentGoal / (1.0 / (retainExtraPercent / 100.0))
+ // Align it to a physical page boundary to make the following calculations
+ // a bit more exact.
+ gcPercentGoal = (gcPercentGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1)
+
+ // Represents where we are now in the heap's contribution to RSS in bytes.
+ //
+ // Guaranteed to always be a multiple of physPageSize on systems where
+ // physPageSize <= pageSize since we map new heap memory at a size larger than
+ // any physPageSize and released memory in multiples of the physPageSize.
+ //
+ // However, certain functions recategorize heap memory as other stats (e.g.
+ // stacks) and this happens in multiples of pageSize, so on systems
+ // where physPageSize > pageSize the calculations below will not be exact.
+ // Generally this is OK since we'll be off by at most one regular
+ // physical page.
+ heapRetainedNow := heapRetained()
+
+ // If we're already below our goal, or within one page of our goal, then indicate
+ // that we don't need the background scavenger for maintaining a memory overhead
+ // proportional to the heap goal.
+ if heapRetainedNow <= gcPercentGoal || heapRetainedNow-gcPercentGoal < uint64(physPageSize) {
+ scavenge.gcPercentGoal.Store(^uint64(0))
+ } else {
+ scavenge.gcPercentGoal.Store(gcPercentGoal)
+ }
+}
+
+var scavenge struct {
+ // gcPercentGoal is the amount of retained heap memory (measured by
+ // heapRetained) that the runtime will try to maintain by returning
+ // memory to the OS. This goal is derived from gcController.gcPercent
+ // by choosing to retain enough memory to allocate heap memory up to
+ // the heap goal.
+ gcPercentGoal atomic.Uint64
+
+ // memoryLimitGoal is the amount of memory retained by the runtime (
+ // measured by gcController.mappedReady) that the runtime will try to
+ // maintain by returning memory to the OS. This goal is derived from
+ // gcController.memoryLimit by choosing to target the memory limit or
+ // some lower target to keep the scavenger working.
+ memoryLimitGoal atomic.Uint64
+}
+
+const (
+ // It doesn't really matter what value we start at, but we can't be zero, because
+ // that'll cause divide-by-zero issues. Pick something conservative which we'll
+ // also use as a fallback.
+ startingScavSleepRatio = 0.001
+
+ // Spend at least 1 ms scavenging, otherwise the corresponding
+ // sleep time to maintain our desired utilization is too low to
+ // be reliable.
+ minScavWorkTime = 1e6
+)
+
+// Sleep/wait state of the background scavenger.
+var scavenger scavengerState
+
+type scavengerState struct {
+ // lock protects all fields below.
+ lock mutex
+
+ // g is the goroutine the scavenger is bound to.
+ g *g
+
+ // parked is whether or not the scavenger is parked.
+ parked bool
+
+ // timer is the timer used for the scavenger to sleep.
+ timer *timer
+
+ // sysmonWake signals to sysmon that it should wake the scavenger.
+ sysmonWake atomic.Uint32
+
+ // targetCPUFraction is the target CPU overhead for the scavenger.
+ targetCPUFraction float64
+
+ // sleepRatio is the ratio of time spent doing scavenging work to
+ // time spent sleeping. This is used to decide how long the scavenger
+ // should sleep for in between batches of work. It is set by
+ // critSleepController in order to maintain a CPU overhead of
+ // targetCPUFraction.
+ //
+ // Lower means more sleep, higher means more aggressive scavenging.
+ sleepRatio float64
+
+ // sleepController controls sleepRatio.
+ //
+ // See sleepRatio for more details.
+ sleepController piController
+
+ // cooldown is the time left in nanoseconds during which we avoid
+ // using the controller and we hold sleepRatio at a conservative
+ // value. Used if the controller's assumptions fail to hold.
+ controllerCooldown int64
+
+ // printControllerReset instructs printScavTrace to signal that
+ // the controller was reset.
+ printControllerReset bool
+
+ // sleepStub is a stub used for testing to avoid actually having
+ // the scavenger sleep.
+ //
+ // Unlike the other stubs, this is not populated if left nil
+ // Instead, it is called when non-nil because any valid implementation
+ // of this function basically requires closing over this scavenger
+ // state, and allocating a closure is not allowed in the runtime as
+ // a matter of policy.
+ sleepStub func(n int64) int64
+
+ // scavenge is a function that scavenges n bytes of memory.
+ // Returns how many bytes of memory it actually scavenged, as
+ // well as the time it took in nanoseconds. Usually mheap.pages.scavenge
+ // with nanotime called around it, but stubbed out for testing.
+ // Like mheap.pages.scavenge, if it scavenges less than n bytes of
+ // memory, the caller may assume the heap is exhausted of scavengable
+ // memory for now.
+ //
+ // If this is nil, it is populated with the real thing in init.
+ scavenge func(n uintptr) (uintptr, int64)
+
+ // shouldStop is a callback called in the work loop and provides a
+ // point that can force the scavenger to stop early, for example because
+ // the scavenge policy dictates too much has been scavenged already.
+ //
+ // If this is nil, it is populated with the real thing in init.
+ shouldStop func() bool
+
+ // gomaxprocs returns the current value of gomaxprocs. Stub for testing.
+ //
+ // If this is nil, it is populated with the real thing in init.
+ gomaxprocs func() int32
+}
+
+// init initializes a scavenger state and wires to the current G.
+//
+// Must be called from a regular goroutine that can allocate.
+func (s *scavengerState) init() {
+ if s.g != nil {
+ throw("scavenger state is already wired")
+ }
+ lockInit(&s.lock, lockRankScavenge)
+ s.g = getg()
+
+ s.timer = new(timer)
+ s.timer.arg = s
+ s.timer.f = func(s any, _ uintptr) {
+ s.(*scavengerState).wake()
+ }
+
+ // input: fraction of CPU time actually used.
+ // setpoint: ideal CPU fraction.
+ // output: ratio of time worked to time slept (determines sleep time).
+ //
+ // The output of this controller is somewhat indirect to what we actually
+ // want to achieve: how much time to sleep for. The reason for this definition
+ // is to ensure that the controller's outputs have a direct relationship with
+ // its inputs (as opposed to an inverse relationship), making it somewhat
+ // easier to reason about for tuning purposes.
+ s.sleepController = piController{
+ // Tuned loosely via Ziegler-Nichols process.
+ kp: 0.3375,
+ ti: 3.2e6,
+ tt: 1e9, // 1 second reset time.
+
+ // These ranges seem wide, but we want to give the controller plenty of
+ // room to hunt for the optimal value.
+ min: 0.001, // 1:1000
+ max: 1000.0, // 1000:1
+ }
+ s.sleepRatio = startingScavSleepRatio
+
+ // Install real functions if stubs aren't present.
+ if s.scavenge == nil {
+ s.scavenge = func(n uintptr) (uintptr, int64) {
+ start := nanotime()
+ r := mheap_.pages.scavenge(n, nil)
+ end := nanotime()
+ if start >= end {
+ return r, 0
+ }
+ return r, end - start
+ }
+ }
+ if s.shouldStop == nil {
+ s.shouldStop = func() bool {
+ // If background scavenging is disabled or if there's no work to do just stop.
+ return heapRetained() <= scavenge.gcPercentGoal.Load() &&
+ (!go119MemoryLimitSupport ||
+ gcController.mappedReady.Load() <= scavenge.memoryLimitGoal.Load())
+ }
+ }
+ if s.gomaxprocs == nil {
+ s.gomaxprocs = func() int32 {
+ return gomaxprocs
+ }
+ }
+}
+
+// park parks the scavenger goroutine.
+func (s *scavengerState) park() {
+ lock(&s.lock)
+ if getg() != s.g {
+ throw("tried to park scavenger from another goroutine")
+ }
+ s.parked = true
+ goparkunlock(&s.lock, waitReasonGCScavengeWait, traceEvGoBlock, 2)
+}
+
+// ready signals to sysmon that the scavenger should be awoken.
+func (s *scavengerState) ready() {
+ s.sysmonWake.Store(1)
+}
+
+// wake immediately unparks the scavenger if necessary.
+//
+// Safe to run without a P.
+func (s *scavengerState) wake() {
+ lock(&s.lock)
+ if s.parked {
+ // Unset sysmonWake, since the scavenger is now being awoken.
+ s.sysmonWake.Store(0)
+
+ // s.parked is unset to prevent a double wake-up.
+ s.parked = false
+
+ // Ready the goroutine by injecting it. We use injectglist instead
+ // of ready or goready in order to allow us to run this function
+ // without a P. injectglist also avoids placing the goroutine in
+ // the current P's runnext slot, which is desirable to prevent
+ // the scavenger from interfering with user goroutine scheduling
+ // too much.
+ var list gList
+ list.push(s.g)
+ injectglist(&list)
+ }
+ unlock(&s.lock)
+}
+
+// sleep puts the scavenger to sleep based on the amount of time that it worked
+// in nanoseconds.
+//
+// Note that this function should only be called by the scavenger.
+//
+// The scavenger may be woken up earlier by a pacing change, and it may not go
+// to sleep at all if there's a pending pacing change.
+func (s *scavengerState) sleep(worked float64) {
+ lock(&s.lock)
+ if getg() != s.g {
+ throw("tried to sleep scavenger from another goroutine")
+ }
+
+ if worked < minScavWorkTime {
+ // This means there wasn't enough work to actually fill up minScavWorkTime.
+ // That's fine; we shouldn't try to do anything with this information
+ // because it's going result in a short enough sleep request that things
+ // will get messy. Just assume we did at least this much work.
+ // All this means is that we'll sleep longer than we otherwise would have.
+ worked = minScavWorkTime
+ }
+
+ // Multiply the critical time by 1 + the ratio of the costs of using
+ // scavenged memory vs. scavenging memory. This forces us to pay down
+ // the cost of reusing this memory eagerly by sleeping for a longer period
+ // of time and scavenging less frequently. More concretely, we avoid situations
+ // where we end up scavenging so often that we hurt allocation performance
+ // because of the additional overheads of using scavenged memory.
+ worked *= 1 + scavengeCostRatio
+
+ // sleepTime is the amount of time we're going to sleep, based on the amount
+ // of time we worked, and the sleepRatio.
+ sleepTime := int64(worked / s.sleepRatio)
+
+ var slept int64
+ if s.sleepStub == nil {
+ // Set the timer.
+ //
+ // This must happen here instead of inside gopark
+ // because we can't close over any variables without
+ // failing escape analysis.
+ start := nanotime()
+ resetTimer(s.timer, start+sleepTime)
+
+ // Mark ourselves as asleep and go to sleep.
+ s.parked = true
+ goparkunlock(&s.lock, waitReasonSleep, traceEvGoSleep, 2)
+
+ // How long we actually slept for.
+ slept = nanotime() - start
+
+ lock(&s.lock)
+ // Stop the timer here because s.wake is unable to do it for us.
+ // We don't really care if we succeed in stopping the timer. One
+ // reason we might fail is that we've already woken up, but the timer
+ // might be in the process of firing on some other P; essentially we're
+ // racing with it. That's totally OK. Double wake-ups are perfectly safe.
+ stopTimer(s.timer)
+ unlock(&s.lock)
+ } else {
+ unlock(&s.lock)
+ slept = s.sleepStub(sleepTime)
+ }
+
+ // Stop here if we're cooling down from the controller.
+ if s.controllerCooldown > 0 {
+ // worked and slept aren't exact measures of time, but it's OK to be a bit
+ // sloppy here. We're just hoping we're avoiding some transient bad behavior.
+ t := slept + int64(worked)
+ if t > s.controllerCooldown {
+ s.controllerCooldown = 0
+ } else {
+ s.controllerCooldown -= t
+ }
+ return
+ }
+
+ // idealFraction is the ideal % of overall application CPU time that we
+ // spend scavenging.
+ idealFraction := float64(scavengePercent) / 100.0
+
+ // Calculate the CPU time spent.
+ //
+ // This may be slightly inaccurate with respect to GOMAXPROCS, but we're
+ // recomputing this often enough relative to GOMAXPROCS changes in general
+ // (it only changes when the world is stopped, and not during a GC) that
+ // that small inaccuracy is in the noise.
+ cpuFraction := worked / ((float64(slept) + worked) * float64(s.gomaxprocs()))
+
+ // Update the critSleepRatio, adjusting until we reach our ideal fraction.
+ var ok bool
+ s.sleepRatio, ok = s.sleepController.next(cpuFraction, idealFraction, float64(slept)+worked)
+ if !ok {
+ // The core assumption of the controller, that we can get a proportional
+ // response, broke down. This may be transient, so temporarily switch to
+ // sleeping a fixed, conservative amount.
+ s.sleepRatio = startingScavSleepRatio
+ s.controllerCooldown = 5e9 // 5 seconds.
+
+ // Signal the scav trace printer to output this.
+ s.controllerFailed()
+ }
+}
+
+// controllerFailed indicates that the scavenger's scheduling
+// controller failed.
+func (s *scavengerState) controllerFailed() {
+ lock(&s.lock)
+ s.printControllerReset = true
+ unlock(&s.lock)
+}
+
+// run is the body of the main scavenging loop.
+//
+// Returns the number of bytes released and the estimated time spent
+// releasing those bytes.
+//
+// Must be run on the scavenger goroutine.
+func (s *scavengerState) run() (released uintptr, worked float64) {
+ lock(&s.lock)
+ if getg() != s.g {
+ throw("tried to run scavenger from another goroutine")
+ }
+ unlock(&s.lock)
+
+ for worked < minScavWorkTime {
+ // If something from outside tells us to stop early, stop.
+ if s.shouldStop() {
+ break
+ }
+
+ // scavengeQuantum is the amount of memory we try to scavenge
+ // in one go. A smaller value means the scavenger is more responsive
+ // to the scheduler in case of e.g. preemption. A larger value means
+ // that the overheads of scavenging are better amortized, so better
+ // scavenging throughput.
+ //
+ // The current value is chosen assuming a cost of ~10µs/physical page
+ // (this is somewhat pessimistic), which implies a worst-case latency of
+ // about 160µs for 4 KiB physical pages. The current value is biased
+ // toward latency over throughput.
+ const scavengeQuantum = 64 << 10
+
+ // Accumulate the amount of time spent scavenging.
+ r, duration := s.scavenge(scavengeQuantum)
+
+ // On some platforms we may see end >= start if the time it takes to scavenge
+ // memory is less than the minimum granularity of its clock (e.g. Windows) or
+ // due to clock bugs.
+ //
+ // In this case, just assume scavenging takes 10 µs per regular physical page
+ // (determined empirically), and conservatively ignore the impact of huge pages
+ // on timing.
+ const approxWorkedNSPerPhysicalPage = 10e3
+ if duration == 0 {
+ worked += approxWorkedNSPerPhysicalPage * float64(r/physPageSize)
+ } else {
+ // TODO(mknyszek): If duration is small compared to worked, it could be
+ // rounded down to zero. Probably not a problem in practice because the
+ // values are all within a few orders of magnitude of each other but maybe
+ // worth worrying about.
+ worked += float64(duration)
+ }
+ released += r
+
+ // scavenge does not return until it either finds the requisite amount of
+ // memory to scavenge, or exhausts the heap. If we haven't found enough
+ // to scavenge, then the heap must be exhausted.
+ if r < scavengeQuantum {
+ break
+ }
+ // When using fake time just do one loop.
+ if faketime != 0 {
+ break
+ }
+ }
+ if released > 0 && released < physPageSize {
+ // If this happens, it means that we may have attempted to release part
+ // of a physical page, but the likely effect of that is that it released
+ // the whole physical page, some of which may have still been in-use.
+ // This could lead to memory corruption. Throw.
+ throw("released less than one physical page of memory")
+ }
+ return
+}
+
+// Background scavenger.
+//
+// The background scavenger maintains the RSS of the application below
+// the line described by the proportional scavenging statistics in
+// the mheap struct.
+func bgscavenge(c chan int) {
+ scavenger.init()
+
+ c <- 1
+ scavenger.park()
+
+ for {
+ released, workTime := scavenger.run()
+ if released == 0 {
+ scavenger.park()
+ continue
+ }
+ atomic.Xadduintptr(&mheap_.pages.scav.released, released)
+ scavenger.sleep(workTime)
+ }
+}
+
+// scavenge scavenges nbytes worth of free pages, starting with the
+// highest address first. Successive calls continue from where it left
+// off until the heap is exhausted. Call scavengeStartGen to bring it
+// back to the top of the heap.
+//
+// Returns the amount of memory scavenged in bytes.
+//
+// scavenge always tries to scavenge nbytes worth of memory, and will
+// only fail to do so if the heap is exhausted for now.
+func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool) uintptr {
+ released := uintptr(0)
+ for released < nbytes {
+ ci, pageIdx := p.scav.index.find()
+ if ci == 0 {
+ break
+ }
+ systemstack(func() {
+ released += p.scavengeOne(ci, pageIdx, nbytes-released)
+ })
+ if shouldStop != nil && shouldStop() {
+ break
+ }
+ }
+ return released
+}
+
+// printScavTrace prints a scavenge trace line to standard error.
+//
+// released should be the amount of memory released since the last time this
+// was called, and forced indicates whether the scavenge was forced by the
+// application.
+//
+// scavenger.lock must be held.
+func printScavTrace(released uintptr, forced bool) {
+ assertLockHeld(&scavenger.lock)
+
+ printlock()
+ print("scav ",
+ released>>10, " KiB work, ",
+ gcController.heapReleased.load()>>10, " KiB total, ",
+ (gcController.heapInUse.load()*100)/heapRetained(), "% util",
+ )
+ if forced {
+ print(" (forced)")
+ } else if scavenger.printControllerReset {
+ print(" [controller reset]")
+ scavenger.printControllerReset = false
+ }
+ println()
+ printunlock()
+}
+
+// scavengeOne walks over the chunk at chunk index ci and searches for
+// a contiguous run of pages to scavenge. It will try to scavenge
+// at most max bytes at once, but may scavenge more to avoid
+// breaking huge pages. Once it scavenges some memory it returns
+// how much it scavenged in bytes.
+//
+// searchIdx is the page index to start searching from in ci.
+//
+// Returns the number of bytes scavenged.
+//
+// Must run on the systemstack because it acquires p.mheapLock.
+//
+//go:systemstack
+func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintptr {
+ // Calculate the maximum number of pages to scavenge.
+ //
+ // This should be alignUp(max, pageSize) / pageSize but max can and will
+ // be ^uintptr(0), so we need to be very careful not to overflow here.
+ // Rather than use alignUp, calculate the number of pages rounded down
+ // first, then add back one if necessary.
+ maxPages := max / pageSize
+ if max%pageSize != 0 {
+ maxPages++
+ }
+
+ // Calculate the minimum number of pages we can scavenge.
+ //
+ // Because we can only scavenge whole physical pages, we must
+ // ensure that we scavenge at least minPages each time, aligned
+ // to minPages*pageSize.
+ minPages := physPageSize / pageSize
+ if minPages < 1 {
+ minPages = 1
+ }
+
+ lock(p.mheapLock)
+ if p.summary[len(p.summary)-1][ci].max() >= uint(minPages) {
+ // We only bother looking for a candidate if there at least
+ // minPages free pages at all.
+ base, npages := p.chunkOf(ci).findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
+
+ // If we found something, scavenge it and return!
+ if npages != 0 {
+ // Compute the full address for the start of the range.
+ addr := chunkBase(ci) + uintptr(base)*pageSize
+
+ // Mark the range we're about to scavenge as allocated, because
+ // we don't want any allocating goroutines to grab it while
+ // the scavenging is in progress.
+ if scav := p.allocRange(addr, uintptr(npages)); scav != 0 {
+ throw("double scavenge")
+ }
+
+ // With that done, it's safe to unlock.
+ unlock(p.mheapLock)
+
+ if !p.test {
+ // Only perform the actual scavenging if we're not in a test.
+ // It's dangerous to do so otherwise.
+ sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
+
+ // Update global accounting only when not in test, otherwise
+ // the runtime's accounting will be wrong.
+ nbytes := int64(npages) * pageSize
+ gcController.heapReleased.add(nbytes)
+ gcController.heapFree.add(-nbytes)
+
+ stats := memstats.heapStats.acquire()
+ atomic.Xaddint64(&stats.committed, -nbytes)
+ atomic.Xaddint64(&stats.released, nbytes)
+ memstats.heapStats.release()
+ }
+
+ // Relock the heap, because now we need to make these pages
+ // available allocation. Free them back to the page allocator.
+ lock(p.mheapLock)
+ p.free(addr, uintptr(npages), true)
+
+ // Mark the range as scavenged.
+ p.chunkOf(ci).scavenged.setRange(base, npages)
+ unlock(p.mheapLock)
+
+ return uintptr(npages) * pageSize
+ }
+ }
+ // Mark this chunk as having no free pages.
+ p.scav.index.clear(ci)
+ unlock(p.mheapLock)
+
+ return 0
+}
+
+// fillAligned returns x but with all zeroes in m-aligned
+// groups of m bits set to 1 if any bit in the group is non-zero.
+//
+// For example, fillAligned(0x0100a3, 8) == 0xff00ff.
+//
+// Note that if m == 1, this is a no-op.
+//
+// m must be a power of 2 <= maxPagesPerPhysPage.
+func fillAligned(x uint64, m uint) uint64 {
+ apply := func(x uint64, c uint64) uint64 {
+ // The technique used it here is derived from
+ // https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ // and extended for more than just bytes (like nibbles
+ // and uint16s) by using an appropriate constant.
+ //
+ // To summarize the technique, quoting from that page:
+ // "[It] works by first zeroing the high bits of the [8]
+ // bytes in the word. Subsequently, it adds a number that
+ // will result in an overflow to the high bit of a byte if
+ // any of the low bits were initially set. Next the high
+ // bits of the original word are ORed with these values;
+ // thus, the high bit of a byte is set iff any bit in the
+ // byte was set. Finally, we determine if any of these high
+ // bits are zero by ORing with ones everywhere except the
+ // high bits and inverting the result."
+ return ^((((x & c) + c) | x) | c)
+ }
+ // Transform x to contain a 1 bit at the top of each m-aligned
+ // group of m zero bits.
+ switch m {
+ case 1:
+ return x
+ case 2:
+ x = apply(x, 0x5555555555555555)
+ case 4:
+ x = apply(x, 0x7777777777777777)
+ case 8:
+ x = apply(x, 0x7f7f7f7f7f7f7f7f)
+ case 16:
+ x = apply(x, 0x7fff7fff7fff7fff)
+ case 32:
+ x = apply(x, 0x7fffffff7fffffff)
+ case 64: // == maxPagesPerPhysPage
+ x = apply(x, 0x7fffffffffffffff)
+ default:
+ throw("bad m value")
+ }
+ // Now, the top bit of each m-aligned group in x is set
+ // that group was all zero in the original x.
+
+ // From each group of m bits subtract 1.
+ // Because we know only the top bits of each
+ // m-aligned group are set, we know this will
+ // set each group to have all the bits set except
+ // the top bit, so just OR with the original
+ // result to set all the bits.
+ return ^((x - (x >> (m - 1))) | x)
+}
+
+// findScavengeCandidate returns a start index and a size for this pallocData
+// segment which represents a contiguous region of free and unscavenged memory.
+//
+// searchIdx indicates the page index within this chunk to start the search, but
+// note that findScavengeCandidate searches backwards through the pallocData. As a
+// a result, it will return the highest scavenge candidate in address order.
+//
+// min indicates a hard minimum size and alignment for runs of pages. That is,
+// findScavengeCandidate will not return a region smaller than min pages in size,
+// or that is min pages or greater in size but not aligned to min. min must be
+// a non-zero power of 2 <= maxPagesPerPhysPage.
+//
+// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
+// findScavengeCandidate effectively returns entire free and unscavenged regions.
+// If max < pallocChunkPages, it may truncate the returned region such that size is
+// max. However, findScavengeCandidate may still return a larger region if, for
+// example, it chooses to preserve huge pages, or if max is not aligned to min (it
+// will round up). That is, even if max is small, the returned size is not guaranteed
+// to be equal to max. max is allowed to be less than min, in which case it is as if
+// max == min.
+func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
+ if min&(min-1) != 0 || min == 0 {
+ print("runtime: min = ", min, "\n")
+ throw("min must be a non-zero power of 2")
+ } else if min > maxPagesPerPhysPage {
+ print("runtime: min = ", min, "\n")
+ throw("min too large")
+ }
+ // max may not be min-aligned, so we might accidentally truncate to
+ // a max value which causes us to return a non-min-aligned value.
+ // To prevent this, align max up to a multiple of min (which is always
+ // a power of 2). This also prevents max from ever being less than
+ // min, unless it's zero, so handle that explicitly.
+ if max == 0 {
+ max = min
+ } else {
+ max = alignUp(max, min)
+ }
+
+ i := int(searchIdx / 64)
+ // Start by quickly skipping over blocks of non-free or scavenged pages.
+ for ; i >= 0; i-- {
+ // 1s are scavenged OR non-free => 0s are unscavenged AND free
+ x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
+ if x != ^uint64(0) {
+ break
+ }
+ }
+ if i < 0 {
+ // Failed to find any free/unscavenged pages.
+ return 0, 0
+ }
+ // We have something in the 64-bit chunk at i, but it could
+ // extend further. Loop until we find the extent of it.
+
+ // 1s are scavenged OR non-free => 0s are unscavenged AND free
+ x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
+ z1 := uint(sys.LeadingZeros64(^x))
+ run, end := uint(0), uint(i)*64+(64-z1)
+ if x<<z1 != 0 {
+ // After shifting out z1 bits, we still have 1s,
+ // so the run ends inside this word.
+ run = uint(sys.LeadingZeros64(x << z1))
+ } else {
+ // After shifting out z1 bits, we have no more 1s.
+ // This means the run extends to the bottom of the
+ // word so it may extend into further words.
+ run = 64 - z1
+ for j := i - 1; j >= 0; j-- {
+ x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min))
+ run += uint(sys.LeadingZeros64(x))
+ if x != 0 {
+ // The run stopped in this word.
+ break
+ }
+ }
+ }
+
+ // Split the run we found if it's larger than max but hold on to
+ // our original length, since we may need it later.
+ size := run
+ if size > uint(max) {
+ size = uint(max)
+ }
+ start := end - size
+
+ // Each huge page is guaranteed to fit in a single palloc chunk.
+ //
+ // TODO(mknyszek): Support larger huge page sizes.
+ // TODO(mknyszek): Consider taking pages-per-huge-page as a parameter
+ // so we can write tests for this.
+ if physHugePageSize > pageSize && physHugePageSize > physPageSize {
+ // We have huge pages, so let's ensure we don't break one by scavenging
+ // over a huge page boundary. If the range [start, start+size) overlaps with
+ // a free-and-unscavenged huge page, we want to grow the region we scavenge
+ // to include that huge page.
+
+ // Compute the huge page boundary above our candidate.
+ pagesPerHugePage := uintptr(physHugePageSize / pageSize)
+ hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
+
+ // If that boundary is within our current candidate, then we may be breaking
+ // a huge page.
+ if hugePageAbove <= end {
+ // Compute the huge page boundary below our candidate.
+ hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage))
+
+ if hugePageBelow >= end-run {
+ // We're in danger of breaking apart a huge page since start+size crosses
+ // a huge page boundary and rounding down start to the nearest huge
+ // page boundary is included in the full run we found. Include the entire
+ // huge page in the bound by rounding down to the huge page size.
+ size = size + (start - hugePageBelow)
+ start = hugePageBelow
+ }
+ }
+ }
+ return start, size
+}
+
+// scavengeIndex is a structure for efficiently managing which pageAlloc chunks have
+// memory available to scavenge.
+type scavengeIndex struct {
+ // chunks is a bitmap representing the entire address space. Each bit represents
+ // a single chunk, and a 1 value indicates the presence of pages available for
+ // scavenging. Updates to the bitmap are serialized by the pageAlloc lock.
+ //
+ // The underlying storage of chunks is platform dependent and may not even be
+ // totally mapped read/write. min and max reflect the extent that is safe to access.
+ // min is inclusive, max is exclusive.
+ //
+ // searchAddr is the maximum address (in the offset address space, so we have a linear
+ // view of the address space; see mranges.go:offAddr) containing memory available to
+ // scavenge. It is a hint to the find operation to avoid O(n^2) behavior in repeated lookups.
+ //
+ // searchAddr is always inclusive and should be the base address of the highest runtime
+ // page available for scavenging.
+ //
+ // searchAddr is managed by both find and mark.
+ //
+ // Normally, find monotonically decreases searchAddr as it finds no more free pages to
+ // scavenge. However, mark, when marking a new chunk at an index greater than the current
+ // searchAddr, sets searchAddr to the *negative* index into chunks of that page. The trick here
+ // is that concurrent calls to find will fail to monotonically decrease searchAddr, and so they
+ // won't barge over new memory becoming available to scavenge. Furthermore, this ensures
+ // that some future caller of find *must* observe the new high index. That caller
+ // (or any other racing with it), then makes searchAddr positive before continuing, bringing
+ // us back to our monotonically decreasing steady-state.
+ //
+ // A pageAlloc lock serializes updates between min, max, and searchAddr, so abs(searchAddr)
+ // is always guaranteed to be >= min and < max (converted to heap addresses).
+ //
+ // TODO(mknyszek): Ideally we would use something bigger than a uint8 for faster
+ // iteration like uint32, but we lack the bit twiddling intrinsics. We'd need to either
+ // copy them from math/bits or fix the fact that we can't import math/bits' code from
+ // the runtime due to compiler instrumentation.
+ searchAddr atomicOffAddr
+ chunks []atomic.Uint8
+ minHeapIdx atomic.Int32
+ min, max atomic.Int32
+}
+
+// find returns the highest chunk index that may contain pages available to scavenge.
+// It also returns an offset to start searching in the highest chunk.
+func (s *scavengeIndex) find() (chunkIdx, uint) {
+ searchAddr, marked := s.searchAddr.Load()
+ if searchAddr == minOffAddr.addr() {
+ // We got a cleared search addr.
+ return 0, 0
+ }
+
+ // Starting from searchAddr's chunk, and moving down to minHeapIdx,
+ // iterate until we find a chunk with pages to scavenge.
+ min := s.minHeapIdx.Load()
+ searchChunk := chunkIndex(uintptr(searchAddr))
+ start := int32(searchChunk / 8)
+ for i := start; i >= min; i-- {
+ // Skip over irrelevant address space.
+ chunks := s.chunks[i].Load()
+ if chunks == 0 {
+ continue
+ }
+ // Note that we can't have 8 leading zeroes here because
+ // we necessarily skipped that case. So, what's left is
+ // an index. If there are no zeroes, we want the 7th
+ // index, if 1 zero, the 6th, and so on.
+ n := 7 - sys.LeadingZeros8(chunks)
+ ci := chunkIdx(uint(i)*8 + uint(n))
+ if searchChunk == ci {
+ return ci, chunkPageIndex(uintptr(searchAddr))
+ }
+ // Try to reduce searchAddr to newSearchAddr.
+ newSearchAddr := chunkBase(ci) + pallocChunkBytes - pageSize
+ if marked {
+ // Attempt to be the first one to decrease the searchAddr
+ // after an increase. If we fail, that means there was another
+ // increase, or somebody else got to it before us. Either way,
+ // it doesn't matter. We may lose some performance having an
+ // incorrect search address, but it's far more important that
+ // we don't miss updates.
+ s.searchAddr.StoreUnmark(searchAddr, newSearchAddr)
+ } else {
+ // Decrease searchAddr.
+ s.searchAddr.StoreMin(newSearchAddr)
+ }
+ return ci, pallocChunkPages - 1
+ }
+ // Clear searchAddr, because we've exhausted the heap.
+ s.searchAddr.Clear()
+ return 0, 0
+}
+
+// mark sets the inclusive range of chunks between indices start and end as
+// containing pages available to scavenge.
+//
+// Must be serialized with other mark, markRange, and clear calls.
+func (s *scavengeIndex) mark(base, limit uintptr) {
+ start, end := chunkIndex(base), chunkIndex(limit-pageSize)
+ if start == end {
+ // Within a chunk.
+ mask := uint8(1 << (start % 8))
+ s.chunks[start/8].Or(mask)
+ } else if start/8 == end/8 {
+ // Within the same byte in the index.
+ mask := uint8(uint16(1<<(end-start+1))-1) << (start % 8)
+ s.chunks[start/8].Or(mask)
+ } else {
+ // Crosses multiple bytes in the index.
+ startAligned := chunkIdx(alignUp(uintptr(start), 8))
+ endAligned := chunkIdx(alignDown(uintptr(end), 8))
+
+ // Do the end of the first byte first.
+ if width := startAligned - start; width > 0 {
+ mask := uint8(uint16(1<<width)-1) << (start % 8)
+ s.chunks[start/8].Or(mask)
+ }
+ // Do the middle aligned sections that take up a whole
+ // byte.
+ for ci := startAligned; ci < endAligned; ci += 8 {
+ s.chunks[ci/8].Store(^uint8(0))
+ }
+ // Do the end of the last byte.
+ //
+ // This width check doesn't match the one above
+ // for start because aligning down into the endAligned
+ // block means we always have at least one chunk in this
+ // block (note that end is *inclusive*). This also means
+ // that if end == endAligned+n, then what we really want
+ // is to fill n+1 chunks, i.e. width n+1. By induction,
+ // this is true for all n.
+ if width := end - endAligned + 1; width > 0 {
+ mask := uint8(uint16(1<<width) - 1)
+ s.chunks[end/8].Or(mask)
+ }
+ }
+ newSearchAddr := limit - pageSize
+ searchAddr, _ := s.searchAddr.Load()
+ // N.B. Because mark is serialized, it's not necessary to do a
+ // full CAS here. mark only ever increases searchAddr, while
+ // find only ever decreases it. Since we only ever race with
+ // decreases, even if the value we loaded is stale, the actual
+ // value will never be larger.
+ if (offAddr{searchAddr}).lessThan(offAddr{newSearchAddr}) {
+ s.searchAddr.StoreMarked(newSearchAddr)
+ }
+}
+
+// clear sets the chunk at index ci as not containing pages available to scavenge.
+//
+// Must be serialized with other mark, markRange, and clear calls.
+func (s *scavengeIndex) clear(ci chunkIdx) {
+ s.chunks[ci/8].And(^uint8(1 << (ci % 8)))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgcstack.go b/contrib/go/_std_1.19/src/runtime/mgcstack.go
new file mode 100644
index 0000000000..472c61a491
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgcstack.go
@@ -0,0 +1,353 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: stack objects and stack tracing
+// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing
+// Also see issue 22350.
+
+// Stack tracing solves the problem of determining which parts of the
+// stack are live and should be scanned. It runs as part of scanning
+// a single goroutine stack.
+//
+// Normally determining which parts of the stack are live is easy to
+// do statically, as user code has explicit references (reads and
+// writes) to stack variables. The compiler can do a simple dataflow
+// analysis to determine liveness of stack variables at every point in
+// the code. See cmd/compile/internal/gc/plive.go for that analysis.
+//
+// However, when we take the address of a stack variable, determining
+// whether that variable is still live is less clear. We can still
+// look for static accesses, but accesses through a pointer to the
+// variable are difficult in general to track statically. That pointer
+// can be passed among functions on the stack, conditionally retained,
+// etc.
+//
+// Instead, we will track pointers to stack variables dynamically.
+// All pointers to stack-allocated variables will themselves be on the
+// stack somewhere (or in associated locations, like defer records), so
+// we can find them all efficiently.
+//
+// Stack tracing is organized as a mini garbage collection tracing
+// pass. The objects in this garbage collection are all the variables
+// on the stack whose address is taken, and which themselves contain a
+// pointer. We call these variables "stack objects".
+//
+// We begin by determining all the stack objects on the stack and all
+// the statically live pointers that may point into the stack. We then
+// process each pointer to see if it points to a stack object. If it
+// does, we scan that stack object. It may contain pointers into the
+// heap, in which case those pointers are passed to the main garbage
+// collection. It may also contain pointers into the stack, in which
+// case we add them to our set of stack pointers.
+//
+// Once we're done processing all the pointers (including the ones we
+// added during processing), we've found all the stack objects that
+// are live. Any dead stack objects are not scanned and their contents
+// will not keep heap objects live. Unlike the main garbage
+// collection, we can't sweep the dead stack objects; they live on in
+// a moribund state until the stack frame that contains them is
+// popped.
+//
+// A stack can look like this:
+//
+// +----------+
+// | foo() |
+// | +------+ |
+// | | A | | <---\
+// | +------+ | |
+// | | |
+// | +------+ | |
+// | | B | | |
+// | +------+ | |
+// | | |
+// +----------+ |
+// | bar() | |
+// | +------+ | |
+// | | C | | <-\ |
+// | +----|-+ | | |
+// | | | | |
+// | +----v-+ | | |
+// | | D ---------/
+// | +------+ | |
+// | | |
+// +----------+ |
+// | baz() | |
+// | +------+ | |
+// | | E -------/
+// | +------+ |
+// | ^ |
+// | F: --/ |
+// | |
+// +----------+
+//
+// foo() calls bar() calls baz(). Each has a frame on the stack.
+// foo() has stack objects A and B.
+// bar() has stack objects C and D, with C pointing to D and D pointing to A.
+// baz() has a stack object E pointing to C, and a local variable F pointing to E.
+//
+// Starting from the pointer in local variable F, we will eventually
+// scan all of E, C, D, and A (in that order). B is never scanned
+// because there is no live pointer to it. If B is also statically
+// dead (meaning that foo() never accesses B again after it calls
+// bar()), then B's pointers into the heap are not considered live.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "unsafe"
+)
+
+const stackTraceDebug = false
+
+// Buffer for pointers found during stack tracing.
+// Must be smaller than or equal to workbuf.
+//
+//go:notinheap
+type stackWorkBuf struct {
+ stackWorkBufHdr
+ obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
+}
+
+// Header declaration must come after the buf declaration above, because of issue #14620.
+//
+//go:notinheap
+type stackWorkBufHdr struct {
+ workbufhdr
+ next *stackWorkBuf // linked list of workbufs
+ // Note: we could theoretically repurpose lfnode.next as this next pointer.
+ // It would save 1 word, but that probably isn't worth busting open
+ // the lfnode API.
+}
+
+// Buffer for stack objects found on a goroutine stack.
+// Must be smaller than or equal to workbuf.
+//
+//go:notinheap
+type stackObjectBuf struct {
+ stackObjectBufHdr
+ obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
+}
+
+//go:notinheap
+type stackObjectBufHdr struct {
+ workbufhdr
+ next *stackObjectBuf
+}
+
+func init() {
+ if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
+ panic("stackWorkBuf too big")
+ }
+ if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
+ panic("stackObjectBuf too big")
+ }
+}
+
+// A stackObject represents a variable on the stack that has had
+// its address taken.
+//
+//go:notinheap
+type stackObject struct {
+ off uint32 // offset above stack.lo
+ size uint32 // size of object
+ r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned.
+ left *stackObject // objects with lower addresses
+ right *stackObject // objects with higher addresses
+}
+
+// obj.r = r, but with no write barrier.
+//
+//go:nowritebarrier
+func (obj *stackObject) setRecord(r *stackObjectRecord) {
+ // Types of stack objects are always in read-only memory, not the heap.
+ // So not using a write barrier is ok.
+ *(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r))
+}
+
+// A stackScanState keeps track of the state used during the GC walk
+// of a goroutine.
+type stackScanState struct {
+ cache pcvalueCache
+
+ // stack limits
+ stack stack
+
+ // conservative indicates that the next frame must be scanned conservatively.
+ // This applies only to the innermost frame at an async safe-point.
+ conservative bool
+
+ // buf contains the set of possible pointers to stack objects.
+ // Organized as a LIFO linked list of buffers.
+ // All buffers except possibly the head buffer are full.
+ buf *stackWorkBuf
+ freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis
+
+ // cbuf contains conservative pointers to stack objects. If
+ // all pointers to a stack object are obtained via
+ // conservative scanning, then the stack object may be dead
+ // and may contain dead pointers, so it must be scanned
+ // defensively.
+ cbuf *stackWorkBuf
+
+ // list of stack objects
+ // Objects are in increasing address order.
+ head *stackObjectBuf
+ tail *stackObjectBuf
+ nobjs int
+
+ // root of binary tree for fast object lookup by address
+ // Initialized by buildIndex.
+ root *stackObject
+}
+
+// Add p as a potential pointer to a stack object.
+// p must be a stack address.
+func (s *stackScanState) putPtr(p uintptr, conservative bool) {
+ if p < s.stack.lo || p >= s.stack.hi {
+ throw("address not a stack address")
+ }
+ head := &s.buf
+ if conservative {
+ head = &s.cbuf
+ }
+ buf := *head
+ if buf == nil {
+ // Initial setup.
+ buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
+ buf.nobj = 0
+ buf.next = nil
+ *head = buf
+ } else if buf.nobj == len(buf.obj) {
+ if s.freeBuf != nil {
+ buf = s.freeBuf
+ s.freeBuf = nil
+ } else {
+ buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
+ }
+ buf.nobj = 0
+ buf.next = *head
+ *head = buf
+ }
+ buf.obj[buf.nobj] = p
+ buf.nobj++
+}
+
+// Remove and return a potential pointer to a stack object.
+// Returns 0 if there are no more pointers available.
+//
+// This prefers non-conservative pointers so we scan stack objects
+// precisely if there are any non-conservative pointers to them.
+func (s *stackScanState) getPtr() (p uintptr, conservative bool) {
+ for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} {
+ buf := *head
+ if buf == nil {
+ // Never had any data.
+ continue
+ }
+ if buf.nobj == 0 {
+ if s.freeBuf != nil {
+ // Free old freeBuf.
+ putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
+ }
+ // Move buf to the freeBuf.
+ s.freeBuf = buf
+ buf = buf.next
+ *head = buf
+ if buf == nil {
+ // No more data in this list.
+ continue
+ }
+ }
+ buf.nobj--
+ return buf.obj[buf.nobj], head == &s.cbuf
+ }
+ // No more data in either list.
+ if s.freeBuf != nil {
+ putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
+ s.freeBuf = nil
+ }
+ return 0, false
+}
+
+// addObject adds a stack object at addr of type typ to the set of stack objects.
+func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) {
+ x := s.tail
+ if x == nil {
+ // initial setup
+ x = (*stackObjectBuf)(unsafe.Pointer(getempty()))
+ x.next = nil
+ s.head = x
+ s.tail = x
+ }
+ if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size {
+ throw("objects added out of order or overlapping")
+ }
+ if x.nobj == len(x.obj) {
+ // full buffer - allocate a new buffer, add to end of linked list
+ y := (*stackObjectBuf)(unsafe.Pointer(getempty()))
+ y.next = nil
+ x.next = y
+ s.tail = y
+ x = y
+ }
+ obj := &x.obj[x.nobj]
+ x.nobj++
+ obj.off = uint32(addr - s.stack.lo)
+ obj.size = uint32(r.size)
+ obj.setRecord(r)
+ // obj.left and obj.right will be initialized by buildIndex before use.
+ s.nobjs++
+}
+
+// buildIndex initializes s.root to a binary search tree.
+// It should be called after all addObject calls but before
+// any call of findObject.
+func (s *stackScanState) buildIndex() {
+ s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs)
+}
+
+// Build a binary search tree with the n objects in the list
+// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ...
+// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx].
+// (The first object that was not included in the binary search tree.)
+// If n == 0, returns nil, x.
+func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) {
+ if n == 0 {
+ return nil, x, idx
+ }
+ var left, right *stackObject
+ left, x, idx = binarySearchTree(x, idx, n/2)
+ root = &x.obj[idx]
+ idx++
+ if idx == len(x.obj) {
+ x = x.next
+ idx = 0
+ }
+ right, x, idx = binarySearchTree(x, idx, n-n/2-1)
+ root.left = left
+ root.right = right
+ return root, x, idx
+}
+
+// findObject returns the stack object containing address a, if any.
+// Must have called buildIndex previously.
+func (s *stackScanState) findObject(a uintptr) *stackObject {
+ off := uint32(a - s.stack.lo)
+ obj := s.root
+ for {
+ if obj == nil {
+ return nil
+ }
+ if off < obj.off {
+ obj = obj.left
+ continue
+ }
+ if off >= obj.off+obj.size {
+ obj = obj.right
+ continue
+ }
+ return obj
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgcsweep.go b/contrib/go/_std_1.19/src/runtime/mgcsweep.go
new file mode 100644
index 0000000000..de57f18c4f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgcsweep.go
@@ -0,0 +1,889 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: sweeping
+
+// The sweeper consists of two different algorithms:
+//
+// * The object reclaimer finds and frees unmarked slots in spans. It
+// can free a whole span if none of the objects are marked, but that
+// isn't its goal. This can be driven either synchronously by
+// mcentral.cacheSpan for mcentral spans, or asynchronously by
+// sweepone, which looks at all the mcentral lists.
+//
+// * The span reclaimer looks for spans that contain no marked objects
+// and frees whole spans. This is a separate algorithm because
+// freeing whole spans is the hardest task for the object reclaimer,
+// but is critical when allocating new spans. The entry point for
+// this is mheap_.reclaim and it's driven by a sequential scan of
+// the page marks bitmap in the heap arenas.
+//
+// Both algorithms ultimately call mspan.sweep, which sweeps a single
+// heap span.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+var sweep sweepdata
+
+// State of background sweep.
+type sweepdata struct {
+ lock mutex
+ g *g
+ parked bool
+ started bool
+
+ nbgsweep uint32
+ npausesweep uint32
+
+ // active tracks outstanding sweepers and the sweep
+ // termination condition.
+ active activeSweep
+
+ // centralIndex is the current unswept span class.
+ // It represents an index into the mcentral span
+ // sets. Accessed and updated via its load and
+ // update methods. Not protected by a lock.
+ //
+ // Reset at mark termination.
+ // Used by mheap.nextSpanForSweep.
+ centralIndex sweepClass
+}
+
+// sweepClass is a spanClass and one bit to represent whether we're currently
+// sweeping partial or full spans.
+type sweepClass uint32
+
+const (
+ numSweepClasses = numSpanClasses * 2
+ sweepClassDone sweepClass = sweepClass(^uint32(0))
+)
+
+func (s *sweepClass) load() sweepClass {
+ return sweepClass(atomic.Load((*uint32)(s)))
+}
+
+func (s *sweepClass) update(sNew sweepClass) {
+ // Only update *s if its current value is less than sNew,
+ // since *s increases monotonically.
+ sOld := s.load()
+ for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
+ sOld = s.load()
+ }
+ // TODO(mknyszek): This isn't the only place we have
+ // an atomic monotonically increasing counter. It would
+ // be nice to have an "atomic max" which is just implemented
+ // as the above on most architectures. Some architectures
+ // like RISC-V however have native support for an atomic max.
+}
+
+func (s *sweepClass) clear() {
+ atomic.Store((*uint32)(s), 0)
+}
+
+// split returns the underlying span class as well as
+// whether we're interested in the full or partial
+// unswept lists for that class, indicated as a boolean
+// (true means "full").
+func (s sweepClass) split() (spc spanClass, full bool) {
+ return spanClass(s >> 1), s&1 == 0
+}
+
+// nextSpanForSweep finds and pops the next span for sweeping from the
+// central sweep buffers. It returns ownership of the span to the caller.
+// Returns nil if no such span exists.
+func (h *mheap) nextSpanForSweep() *mspan {
+ sg := h.sweepgen
+ for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
+ spc, full := sc.split()
+ c := &h.central[spc].mcentral
+ var s *mspan
+ if full {
+ s = c.fullUnswept(sg).pop()
+ } else {
+ s = c.partialUnswept(sg).pop()
+ }
+ if s != nil {
+ // Write down that we found something so future sweepers
+ // can start from here.
+ sweep.centralIndex.update(sc)
+ return s
+ }
+ }
+ // Write down that we found nothing.
+ sweep.centralIndex.update(sweepClassDone)
+ return nil
+}
+
+const sweepDrainedMask = 1 << 31
+
+// activeSweep is a type that captures whether sweeping
+// is done, and whether there are any outstanding sweepers.
+//
+// Every potential sweeper must call begin() before they look
+// for work, and end() after they've finished sweeping.
+type activeSweep struct {
+ // state is divided into two parts.
+ //
+ // The top bit (masked by sweepDrainedMask) is a boolean
+ // value indicating whether all the sweep work has been
+ // drained from the queue.
+ //
+ // The rest of the bits are a counter, indicating the
+ // number of outstanding concurrent sweepers.
+ state atomic.Uint32
+}
+
+// begin registers a new sweeper. Returns a sweepLocker
+// for acquiring spans for sweeping. Any outstanding sweeper blocks
+// sweep termination.
+//
+// If the sweepLocker is invalid, the caller can be sure that all
+// outstanding sweep work has been drained, so there is nothing left
+// to sweep. Note that there may be sweepers currently running, so
+// this does not indicate that all sweeping has completed.
+//
+// Even if the sweepLocker is invalid, its sweepGen is always valid.
+func (a *activeSweep) begin() sweepLocker {
+ for {
+ state := a.state.Load()
+ if state&sweepDrainedMask != 0 {
+ return sweepLocker{mheap_.sweepgen, false}
+ }
+ if a.state.CompareAndSwap(state, state+1) {
+ return sweepLocker{mheap_.sweepgen, true}
+ }
+ }
+}
+
+// end deregisters a sweeper. Must be called once for each time
+// begin is called if the sweepLocker is valid.
+func (a *activeSweep) end(sl sweepLocker) {
+ if sl.sweepGen != mheap_.sweepgen {
+ throw("sweeper left outstanding across sweep generations")
+ }
+ for {
+ state := a.state.Load()
+ if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
+ throw("mismatched begin/end of activeSweep")
+ }
+ if a.state.CompareAndSwap(state, state-1) {
+ if state != sweepDrainedMask {
+ return
+ }
+ if debug.gcpacertrace > 0 {
+ print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
+ }
+ return
+ }
+ }
+}
+
+// markDrained marks the active sweep cycle as having drained
+// all remaining work. This is safe to be called concurrently
+// with all other methods of activeSweep, though may race.
+//
+// Returns true if this call was the one that actually performed
+// the mark.
+func (a *activeSweep) markDrained() bool {
+ for {
+ state := a.state.Load()
+ if state&sweepDrainedMask != 0 {
+ return false
+ }
+ if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
+ return true
+ }
+ }
+}
+
+// sweepers returns the current number of active sweepers.
+func (a *activeSweep) sweepers() uint32 {
+ return a.state.Load() &^ sweepDrainedMask
+}
+
+// isDone returns true if all sweep work has been drained and no more
+// outstanding sweepers exist. That is, when the sweep phase is
+// completely done.
+func (a *activeSweep) isDone() bool {
+ return a.state.Load() == sweepDrainedMask
+}
+
+// reset sets up the activeSweep for the next sweep cycle.
+//
+// The world must be stopped.
+func (a *activeSweep) reset() {
+ assertWorldStopped()
+ a.state.Store(0)
+}
+
+// finishsweep_m ensures that all spans are swept.
+//
+// The world must be stopped. This ensures there are no sweeps in
+// progress.
+//
+//go:nowritebarrier
+func finishsweep_m() {
+ assertWorldStopped()
+
+ // Sweeping must be complete before marking commences, so
+ // sweep any unswept spans. If this is a concurrent GC, there
+ // shouldn't be any spans left to sweep, so this should finish
+ // instantly. If GC was forced before the concurrent sweep
+ // finished, there may be spans to sweep.
+ for sweepone() != ^uintptr(0) {
+ sweep.npausesweep++
+ }
+
+ // Make sure there aren't any outstanding sweepers left.
+ // At this point, with the world stopped, it means one of two
+ // things. Either we were able to preempt a sweeper, or that
+ // a sweeper didn't call sweep.active.end when it should have.
+ // Both cases indicate a bug, so throw.
+ if sweep.active.sweepers() != 0 {
+ throw("active sweepers found at start of mark phase")
+ }
+
+ // Reset all the unswept buffers, which should be empty.
+ // Do this in sweep termination as opposed to mark termination
+ // so that we can catch unswept spans and reclaim blocks as
+ // soon as possible.
+ sg := mheap_.sweepgen
+ for i := range mheap_.central {
+ c := &mheap_.central[i].mcentral
+ c.partialUnswept(sg).reset()
+ c.fullUnswept(sg).reset()
+ }
+
+ // Sweeping is done, so if the scavenger isn't already awake,
+ // wake it up. There's definitely work for it to do at this
+ // point.
+ scavenger.wake()
+
+ nextMarkBitArenaEpoch()
+}
+
+func bgsweep(c chan int) {
+ sweep.g = getg()
+
+ lockInit(&sweep.lock, lockRankSweep)
+ lock(&sweep.lock)
+ sweep.parked = true
+ c <- 1
+ goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
+
+ for {
+ for sweepone() != ^uintptr(0) {
+ sweep.nbgsweep++
+ Gosched()
+ }
+ for freeSomeWbufs(true) {
+ Gosched()
+ }
+ lock(&sweep.lock)
+ if !isSweepDone() {
+ // This can happen if a GC runs between
+ // gosweepone returning ^0 above
+ // and the lock being acquired.
+ unlock(&sweep.lock)
+ continue
+ }
+ sweep.parked = true
+ goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
+ }
+}
+
+// sweepLocker acquires sweep ownership of spans.
+type sweepLocker struct {
+ // sweepGen is the sweep generation of the heap.
+ sweepGen uint32
+ valid bool
+}
+
+// sweepLocked represents sweep ownership of a span.
+type sweepLocked struct {
+ *mspan
+}
+
+// tryAcquire attempts to acquire sweep ownership of span s. If it
+// successfully acquires ownership, it blocks sweep completion.
+func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
+ if !l.valid {
+ throw("use of invalid sweepLocker")
+ }
+ // Check before attempting to CAS.
+ if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
+ return sweepLocked{}, false
+ }
+ // Attempt to acquire sweep ownership of s.
+ if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
+ return sweepLocked{}, false
+ }
+ return sweepLocked{s}, true
+}
+
+// sweepone sweeps some unswept heap span and returns the number of pages returned
+// to the heap, or ^uintptr(0) if there was nothing to sweep.
+func sweepone() uintptr {
+ gp := getg()
+
+ // Increment locks to ensure that the goroutine is not preempted
+ // in the middle of sweep thus leaving the span in an inconsistent state for next GC
+ gp.m.locks++
+
+ // TODO(austin): sweepone is almost always called in a loop;
+ // lift the sweepLocker into its callers.
+ sl := sweep.active.begin()
+ if !sl.valid {
+ gp.m.locks--
+ return ^uintptr(0)
+ }
+
+ // Find a span to sweep.
+ npages := ^uintptr(0)
+ var noMoreWork bool
+ for {
+ s := mheap_.nextSpanForSweep()
+ if s == nil {
+ noMoreWork = sweep.active.markDrained()
+ break
+ }
+ if state := s.state.get(); state != mSpanInUse {
+ // This can happen if direct sweeping already
+ // swept this span, but in that case the sweep
+ // generation should always be up-to-date.
+ if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
+ print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
+ throw("non in-use span in unswept list")
+ }
+ continue
+ }
+ if s, ok := sl.tryAcquire(s); ok {
+ // Sweep the span we found.
+ npages = s.npages
+ if s.sweep(false) {
+ // Whole span was freed. Count it toward the
+ // page reclaimer credit since these pages can
+ // now be used for span allocation.
+ mheap_.reclaimCredit.Add(npages)
+ } else {
+ // Span is still in-use, so this returned no
+ // pages to the heap and the span needs to
+ // move to the swept in-use list.
+ npages = 0
+ }
+ break
+ }
+ }
+ sweep.active.end(sl)
+
+ if noMoreWork {
+ // The sweep list is empty. There may still be
+ // concurrent sweeps running, but we're at least very
+ // close to done sweeping.
+
+ // Move the scavenge gen forward (signaling
+ // that there's new work to do) and wake the scavenger.
+ //
+ // The scavenger is signaled by the last sweeper because once
+ // sweeping is done, we will definitely have useful work for
+ // the scavenger to do, since the scavenger only runs over the
+ // heap once per GC cycle. This update is not done during sweep
+ // termination because in some cases there may be a long delay
+ // between sweep done and sweep termination (e.g. not enough
+ // allocations to trigger a GC) which would be nice to fill in
+ // with scavenging work.
+ if debug.scavtrace > 0 {
+ systemstack(func() {
+ lock(&mheap_.lock)
+ released := atomic.Loaduintptr(&mheap_.pages.scav.released)
+ printScavTrace(released, false)
+ atomic.Storeuintptr(&mheap_.pages.scav.released, 0)
+ unlock(&mheap_.lock)
+ })
+ }
+ scavenger.ready()
+ }
+
+ gp.m.locks--
+ return npages
+}
+
+// isSweepDone reports whether all spans are swept.
+//
+// Note that this condition may transition from false to true at any
+// time as the sweeper runs. It may transition from true to false if a
+// GC runs; to prevent that the caller must be non-preemptible or must
+// somehow block GC progress.
+func isSweepDone() bool {
+ return sweep.active.isDone()
+}
+
+// Returns only when span s has been swept.
+//
+//go:nowritebarrier
+func (s *mspan) ensureSwept() {
+ // Caller must disable preemption.
+ // Otherwise when this function returns the span can become unswept again
+ // (if GC is triggered on another goroutine).
+ _g_ := getg()
+ if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ throw("mspan.ensureSwept: m is not locked")
+ }
+
+ // If this operation fails, then that means that there are
+ // no more spans to be swept. In this case, either s has already
+ // been swept, or is about to be acquired for sweeping and swept.
+ sl := sweep.active.begin()
+ if sl.valid {
+ // The caller must be sure that the span is a mSpanInUse span.
+ if s, ok := sl.tryAcquire(s); ok {
+ s.sweep(false)
+ sweep.active.end(sl)
+ return
+ }
+ sweep.active.end(sl)
+ }
+
+ // Unfortunately we can't sweep the span ourselves. Somebody else
+ // got to it first. We don't have efficient means to wait, but that's
+ // OK, it will be swept fairly soon.
+ for {
+ spangen := atomic.Load(&s.sweepgen)
+ if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
+ break
+ }
+ osyield()
+ }
+}
+
+// Sweep frees or collects finalizers for blocks not marked in the mark phase.
+// It clears the mark bits in preparation for the next GC round.
+// Returns true if the span was returned to heap.
+// If preserve=true, don't return it to heap nor relink in mcentral lists;
+// caller takes care of it.
+func (sl *sweepLocked) sweep(preserve bool) bool {
+ // It's critical that we enter this function with preemption disabled,
+ // GC must not start while we are in the middle of this function.
+ _g_ := getg()
+ if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ throw("mspan.sweep: m is not locked")
+ }
+
+ s := sl.mspan
+ if !preserve {
+ // We'll release ownership of this span. Nil it out to
+ // prevent the caller from accidentally using it.
+ sl.mspan = nil
+ }
+
+ sweepgen := mheap_.sweepgen
+ if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
+ print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
+ throw("mspan.sweep: bad span state")
+ }
+
+ if trace.enabled {
+ traceGCSweepSpan(s.npages * _PageSize)
+ }
+
+ mheap_.pagesSwept.Add(int64(s.npages))
+
+ spc := s.spanclass
+ size := s.elemsize
+
+ // The allocBits indicate which unmarked objects don't need to be
+ // processed since they were free at the end of the last GC cycle
+ // and were not allocated since then.
+ // If the allocBits index is >= s.freeindex and the bit
+ // is not marked then the object remains unallocated
+ // since the last GC.
+ // This situation is analogous to being on a freelist.
+
+ // Unlink & free special records for any objects we're about to free.
+ // Two complications here:
+ // 1. An object can have both finalizer and profile special records.
+ // In such case we need to queue finalizer for execution,
+ // mark the object as live and preserve the profile special.
+ // 2. A tiny object can have several finalizers setup for different offsets.
+ // If such object is not marked, we need to queue all finalizers at once.
+ // Both 1 and 2 are possible at the same time.
+ hadSpecials := s.specials != nil
+ siter := newSpecialsIter(s)
+ for siter.valid() {
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+ objIndex := uintptr(siter.s.offset) / size
+ p := s.base() + objIndex*size
+ mbits := s.markBitsForIndex(objIndex)
+ if !mbits.isMarked() {
+ // This object is not marked and has at least one special record.
+ // Pass 1: see if it has at least one finalizer.
+ hasFin := false
+ endOffset := p - s.base() + size
+ for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
+ if tmp.kind == _KindSpecialFinalizer {
+ // Stop freeing of object if it has a finalizer.
+ mbits.setMarkedNonAtomic()
+ hasFin = true
+ break
+ }
+ }
+ // Pass 2: queue all finalizers _or_ handle profile record.
+ for siter.valid() && uintptr(siter.s.offset) < endOffset {
+ // Find the exact byte for which the special was setup
+ // (as opposed to object beginning).
+ special := siter.s
+ p := s.base() + uintptr(special.offset)
+ if special.kind == _KindSpecialFinalizer || !hasFin {
+ siter.unlinkAndNext()
+ freeSpecial(special, unsafe.Pointer(p), size)
+ } else {
+ // The object has finalizers, so we're keeping it alive.
+ // All other specials only apply when an object is freed,
+ // so just keep the special record.
+ siter.next()
+ }
+ }
+ } else {
+ // object is still live
+ if siter.s.kind == _KindSpecialReachable {
+ special := siter.unlinkAndNext()
+ (*specialReachable)(unsafe.Pointer(special)).reachable = true
+ freeSpecial(special, unsafe.Pointer(p), size)
+ } else {
+ // keep special record
+ siter.next()
+ }
+ }
+ }
+ if hadSpecials && s.specials == nil {
+ spanHasNoSpecials(s)
+ }
+
+ if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
+ // Find all newly freed objects. This doesn't have to
+ // efficient; allocfreetrace has massive overhead.
+ mbits := s.markBitsForBase()
+ abits := s.allocBitsForIndex(0)
+ for i := uintptr(0); i < s.nelems; i++ {
+ if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
+ x := s.base() + i*s.elemsize
+ if debug.allocfreetrace != 0 {
+ tracefree(unsafe.Pointer(x), size)
+ }
+ if debug.clobberfree != 0 {
+ clobberfree(unsafe.Pointer(x), size)
+ }
+ if raceenabled {
+ racefree(unsafe.Pointer(x), size)
+ }
+ if msanenabled {
+ msanfree(unsafe.Pointer(x), size)
+ }
+ if asanenabled {
+ asanpoison(unsafe.Pointer(x), size)
+ }
+ }
+ mbits.advance()
+ abits.advance()
+ }
+ }
+
+ // Check for zombie objects.
+ if s.freeindex < s.nelems {
+ // Everything < freeindex is allocated and hence
+ // cannot be zombies.
+ //
+ // Check the first bitmap byte, where we have to be
+ // careful with freeindex.
+ obj := s.freeindex
+ if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
+ s.reportZombies()
+ }
+ // Check remaining bytes.
+ for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
+ if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
+ s.reportZombies()
+ }
+ }
+ }
+
+ // Count the number of free objects in this span.
+ nalloc := uint16(s.countAlloc())
+ nfreed := s.allocCount - nalloc
+ if nalloc > s.allocCount {
+ // The zombie check above should have caught this in
+ // more detail.
+ print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+ throw("sweep increased allocation count")
+ }
+
+ s.allocCount = nalloc
+ s.freeindex = 0 // reset allocation index to start of span.
+ if trace.enabled {
+ getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
+ }
+
+ // gcmarkBits becomes the allocBits.
+ // get a fresh cleared gcmarkBits in preparation for next GC
+ s.allocBits = s.gcmarkBits
+ s.gcmarkBits = newMarkBits(s.nelems)
+
+ // Initialize alloc bits cache.
+ s.refillAllocCache(0)
+
+ // The span must be in our exclusive ownership until we update sweepgen,
+ // check for potential races.
+ if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
+ print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
+ throw("mspan.sweep: bad span state after sweep")
+ }
+ if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
+ throw("swept cached span")
+ }
+
+ // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
+ // because of the potential for a concurrent free/SetFinalizer.
+ //
+ // But we need to set it before we make the span available for allocation
+ // (return it to heap or mcentral), because allocation code assumes that a
+ // span is already swept if available for allocation.
+ //
+ // Serialization point.
+ // At this point the mark bits are cleared and allocation ready
+ // to go so release the span.
+ atomic.Store(&s.sweepgen, sweepgen)
+
+ if spc.sizeclass() != 0 {
+ // Handle spans for small objects.
+ if nfreed > 0 {
+ // Only mark the span as needing zeroing if we've freed any
+ // objects, because a fresh span that had been allocated into,
+ // wasn't totally filled, but then swept, still has all of its
+ // free slots zeroed.
+ s.needzero = 1
+ stats := memstats.heapStats.acquire()
+ atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
+ memstats.heapStats.release()
+
+ // Count the frees in the inconsistent, internal stats.
+ gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))
+ }
+ if !preserve {
+ // The caller may not have removed this span from whatever
+ // unswept set its on but taken ownership of the span for
+ // sweeping by updating sweepgen. If this span still is in
+ // an unswept set, then the mcentral will pop it off the
+ // set, check its sweepgen, and ignore it.
+ if nalloc == 0 {
+ // Free totally free span directly back to the heap.
+ mheap_.freeSpan(s)
+ return true
+ }
+ // Return span back to the right mcentral list.
+ if uintptr(nalloc) == s.nelems {
+ mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
+ } else {
+ mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
+ }
+ }
+ } else if !preserve {
+ // Handle spans for large objects.
+ if nfreed != 0 {
+ // Free large object span to heap.
+
+ // NOTE(rsc,dvyukov): The original implementation of efence
+ // in CL 22060046 used sysFree instead of sysFault, so that
+ // the operating system would eventually give the memory
+ // back to us again, so that an efence program could run
+ // longer without running out of memory. Unfortunately,
+ // calling sysFree here without any kind of adjustment of the
+ // heap data structures means that when the memory does
+ // come back to us, we have the wrong metadata for it, either in
+ // the mspan structures or in the garbage collection bitmap.
+ // Using sysFault here means that the program will run out of
+ // memory fairly quickly in efence mode, but at least it won't
+ // have mysterious crashes due to confused memory reuse.
+ // It should be possible to switch back to sysFree if we also
+ // implement and then call some kind of mheap.deleteSpan.
+ if debug.efence > 0 {
+ s.limit = 0 // prevent mlookup from finding this span
+ sysFault(unsafe.Pointer(s.base()), size)
+ } else {
+ mheap_.freeSpan(s)
+ }
+
+ // Count the free in the consistent, external stats.
+ stats := memstats.heapStats.acquire()
+ atomic.Xadd64(&stats.largeFreeCount, 1)
+ atomic.Xadd64(&stats.largeFree, int64(size))
+ memstats.heapStats.release()
+
+ // Count the free in the inconsistent, internal stats.
+ gcController.totalFree.Add(int64(size))
+
+ return true
+ }
+
+ // Add a large span directly onto the full+swept list.
+ mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
+ }
+ return false
+}
+
+// reportZombies reports any marked but free objects in s and throws.
+//
+// This generally means one of the following:
+//
+// 1. User code converted a pointer to a uintptr and then back
+// unsafely, and a GC ran while the uintptr was the only reference to
+// an object.
+//
+// 2. User code (or a compiler bug) constructed a bad pointer that
+// points to a free slot, often a past-the-end pointer.
+//
+// 3. The GC two cycles ago missed a pointer and freed a live object,
+// but it was still live in the last cycle, so this GC cycle found a
+// pointer to that object and marked it.
+func (s *mspan) reportZombies() {
+ printlock()
+ print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
+ mbits := s.markBitsForBase()
+ abits := s.allocBitsForIndex(0)
+ for i := uintptr(0); i < s.nelems; i++ {
+ addr := s.base() + i*s.elemsize
+ print(hex(addr))
+ alloc := i < s.freeindex || abits.isMarked()
+ if alloc {
+ print(" alloc")
+ } else {
+ print(" free ")
+ }
+ if mbits.isMarked() {
+ print(" marked ")
+ } else {
+ print(" unmarked")
+ }
+ zombie := mbits.isMarked() && !alloc
+ if zombie {
+ print(" zombie")
+ }
+ print("\n")
+ if zombie {
+ length := s.elemsize
+ if length > 1024 {
+ length = 1024
+ }
+ hexdumpWords(addr, addr+length, nil)
+ }
+ mbits.advance()
+ abits.advance()
+ }
+ throw("found pointer to free object")
+}
+
+// deductSweepCredit deducts sweep credit for allocating a span of
+// size spanBytes. This must be performed *before* the span is
+// allocated to ensure the system has enough credit. If necessary, it
+// performs sweeping to prevent going in to debt. If the caller will
+// also sweep pages (e.g., for a large allocation), it can pass a
+// non-zero callerSweepPages to leave that many pages unswept.
+//
+// deductSweepCredit makes a worst-case assumption that all spanBytes
+// bytes of the ultimately allocated span will be available for object
+// allocation.
+//
+// deductSweepCredit is the core of the "proportional sweep" system.
+// It uses statistics gathered by the garbage collector to perform
+// enough sweeping so that all pages are swept during the concurrent
+// sweep phase between GC cycles.
+//
+// mheap_ must NOT be locked.
+func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
+ if mheap_.sweepPagesPerByte == 0 {
+ // Proportional sweep is done or disabled.
+ return
+ }
+
+ if trace.enabled {
+ traceGCSweepStart()
+ }
+
+retry:
+ sweptBasis := mheap_.pagesSweptBasis.Load()
+
+ // Fix debt if necessary.
+ newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
+ pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
+ for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
+ if sweepone() == ^uintptr(0) {
+ mheap_.sweepPagesPerByte = 0
+ break
+ }
+ if mheap_.pagesSweptBasis.Load() != sweptBasis {
+ // Sweep pacing changed. Recompute debt.
+ goto retry
+ }
+ }
+
+ if trace.enabled {
+ traceGCSweepDone()
+ }
+}
+
+// clobberfree sets the memory content at x to bad content, for debugging
+// purposes.
+func clobberfree(x unsafe.Pointer, size uintptr) {
+ // size (span.elemsize) is always a multiple of 4.
+ for i := uintptr(0); i < size; i += 4 {
+ *(*uint32)(add(x, i)) = 0xdeadbeef
+ }
+}
+
+// gcPaceSweeper updates the sweeper's pacing parameters.
+//
+// Must be called whenever the GC's pacing is updated.
+//
+// The world must be stopped, or mheap_.lock must be held.
+func gcPaceSweeper(trigger uint64) {
+ assertWorldStoppedOrLockHeld(&mheap_.lock)
+
+ // Update sweep pacing.
+ if isSweepDone() {
+ mheap_.sweepPagesPerByte = 0
+ } else {
+ // Concurrent sweep needs to sweep all of the in-use
+ // pages by the time the allocated heap reaches the GC
+ // trigger. Compute the ratio of in-use pages to sweep
+ // per byte allocated, accounting for the fact that
+ // some might already be swept.
+ heapLiveBasis := atomic.Load64(&gcController.heapLive)
+ heapDistance := int64(trigger) - int64(heapLiveBasis)
+ // Add a little margin so rounding errors and
+ // concurrent sweep are less likely to leave pages
+ // unswept when GC starts.
+ heapDistance -= 1024 * 1024
+ if heapDistance < _PageSize {
+ // Avoid setting the sweep ratio extremely high
+ heapDistance = _PageSize
+ }
+ pagesSwept := mheap_.pagesSwept.Load()
+ pagesInUse := mheap_.pagesInUse.Load()
+ sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
+ if sweepDistancePages <= 0 {
+ mheap_.sweepPagesPerByte = 0
+ } else {
+ mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
+ mheap_.sweepHeapLiveBasis = heapLiveBasis
+ // Write pagesSweptBasis last, since this
+ // signals concurrent sweeps to recompute
+ // their debt.
+ mheap_.pagesSweptBasis.Store(pagesSwept)
+ }
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mgcwork.go b/contrib/go/_std_1.19/src/runtime/mgcwork.go
new file mode 100644
index 0000000000..424de2fcca
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mgcwork.go
@@ -0,0 +1,488 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+const (
+ _WorkbufSize = 2048 // in bytes; larger values result in less contention
+
+ // workbufAlloc is the number of bytes to allocate at a time
+ // for new workbufs. This must be a multiple of pageSize and
+ // should be a multiple of _WorkbufSize.
+ //
+ // Larger values reduce workbuf allocation overhead. Smaller
+ // values reduce heap fragmentation.
+ workbufAlloc = 32 << 10
+)
+
+func init() {
+ if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
+ throw("bad workbufAlloc")
+ }
+}
+
+// Garbage collector work pool abstraction.
+//
+// This implements a producer/consumer model for pointers to grey
+// objects. A grey object is one that is marked and on a work
+// queue. A black object is marked and not on a work queue.
+//
+// Write barriers, root discovery, stack scanning, and object scanning
+// produce pointers to grey objects. Scanning consumes pointers to
+// grey objects, thus blackening them, and then scans them,
+// potentially producing new pointers to grey objects.
+
+// A gcWork provides the interface to produce and consume work for the
+// garbage collector.
+//
+// A gcWork can be used on the stack as follows:
+//
+// (preemption must be disabled)
+// gcw := &getg().m.p.ptr().gcw
+// .. call gcw.put() to produce and gcw.tryGet() to consume ..
+//
+// It's important that any use of gcWork during the mark phase prevent
+// the garbage collector from transitioning to mark termination since
+// gcWork may locally hold GC work buffers. This can be done by
+// disabling preemption (systemstack or acquirem).
+type gcWork struct {
+ // wbuf1 and wbuf2 are the primary and secondary work buffers.
+ //
+ // This can be thought of as a stack of both work buffers'
+ // pointers concatenated. When we pop the last pointer, we
+ // shift the stack up by one work buffer by bringing in a new
+ // full buffer and discarding an empty one. When we fill both
+ // buffers, we shift the stack down by one work buffer by
+ // bringing in a new empty buffer and discarding a full one.
+ // This way we have one buffer's worth of hysteresis, which
+ // amortizes the cost of getting or putting a work buffer over
+ // at least one buffer of work and reduces contention on the
+ // global work lists.
+ //
+ // wbuf1 is always the buffer we're currently pushing to and
+ // popping from and wbuf2 is the buffer that will be discarded
+ // next.
+ //
+ // Invariant: Both wbuf1 and wbuf2 are nil or neither are.
+ wbuf1, wbuf2 *workbuf
+
+ // Bytes marked (blackened) on this gcWork. This is aggregated
+ // into work.bytesMarked by dispose.
+ bytesMarked uint64
+
+ // Heap scan work performed on this gcWork. This is aggregated into
+ // gcController by dispose and may also be flushed by callers.
+ // Other types of scan work are flushed immediately.
+ heapScanWork int64
+
+ // flushedWork indicates that a non-empty work buffer was
+ // flushed to the global work list since the last gcMarkDone
+ // termination check. Specifically, this indicates that this
+ // gcWork may have communicated work to another gcWork.
+ flushedWork bool
+}
+
+// Most of the methods of gcWork are go:nowritebarrierrec because the
+// write barrier itself can invoke gcWork methods but the methods are
+// not generally re-entrant. Hence, if a gcWork method invoked the
+// write barrier while the gcWork was in an inconsistent state, and
+// the write barrier in turn invoked a gcWork method, it could
+// permanently corrupt the gcWork.
+
+func (w *gcWork) init() {
+ w.wbuf1 = getempty()
+ wbuf2 := trygetfull()
+ if wbuf2 == nil {
+ wbuf2 = getempty()
+ }
+ w.wbuf2 = wbuf2
+}
+
+// put enqueues a pointer for the garbage collector to trace.
+// obj must point to the beginning of a heap object or an oblet.
+//
+//go:nowritebarrierrec
+func (w *gcWork) put(obj uintptr) {
+ flushed := false
+ wbuf := w.wbuf1
+ // Record that this may acquire the wbufSpans or heap lock to
+ // allocate a workbuf.
+ lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
+ lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
+ if wbuf == nil {
+ w.init()
+ wbuf = w.wbuf1
+ // wbuf is empty at this point.
+ } else if wbuf.nobj == len(wbuf.obj) {
+ w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
+ wbuf = w.wbuf1
+ if wbuf.nobj == len(wbuf.obj) {
+ putfull(wbuf)
+ w.flushedWork = true
+ wbuf = getempty()
+ w.wbuf1 = wbuf
+ flushed = true
+ }
+ }
+
+ wbuf.obj[wbuf.nobj] = obj
+ wbuf.nobj++
+
+ // If we put a buffer on full, let the GC controller know so
+ // it can encourage more workers to run. We delay this until
+ // the end of put so that w is in a consistent state, since
+ // enlistWorker may itself manipulate w.
+ if flushed && gcphase == _GCmark {
+ gcController.enlistWorker()
+ }
+}
+
+// putFast does a put and reports whether it can be done quickly
+// otherwise it returns false and the caller needs to call put.
+//
+//go:nowritebarrierrec
+func (w *gcWork) putFast(obj uintptr) bool {
+ wbuf := w.wbuf1
+ if wbuf == nil || wbuf.nobj == len(wbuf.obj) {
+ return false
+ }
+
+ wbuf.obj[wbuf.nobj] = obj
+ wbuf.nobj++
+ return true
+}
+
+// putBatch performs a put on every pointer in obj. See put for
+// constraints on these pointers.
+//
+//go:nowritebarrierrec
+func (w *gcWork) putBatch(obj []uintptr) {
+ if len(obj) == 0 {
+ return
+ }
+
+ flushed := false
+ wbuf := w.wbuf1
+ if wbuf == nil {
+ w.init()
+ wbuf = w.wbuf1
+ }
+
+ for len(obj) > 0 {
+ for wbuf.nobj == len(wbuf.obj) {
+ putfull(wbuf)
+ w.flushedWork = true
+ w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
+ wbuf = w.wbuf1
+ flushed = true
+ }
+ n := copy(wbuf.obj[wbuf.nobj:], obj)
+ wbuf.nobj += n
+ obj = obj[n:]
+ }
+
+ if flushed && gcphase == _GCmark {
+ gcController.enlistWorker()
+ }
+}
+
+// tryGet dequeues a pointer for the garbage collector to trace.
+//
+// If there are no pointers remaining in this gcWork or in the global
+// queue, tryGet returns 0. Note that there may still be pointers in
+// other gcWork instances or other caches.
+//
+//go:nowritebarrierrec
+func (w *gcWork) tryGet() uintptr {
+ wbuf := w.wbuf1
+ if wbuf == nil {
+ w.init()
+ wbuf = w.wbuf1
+ // wbuf is empty at this point.
+ }
+ if wbuf.nobj == 0 {
+ w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
+ wbuf = w.wbuf1
+ if wbuf.nobj == 0 {
+ owbuf := wbuf
+ wbuf = trygetfull()
+ if wbuf == nil {
+ return 0
+ }
+ putempty(owbuf)
+ w.wbuf1 = wbuf
+ }
+ }
+
+ wbuf.nobj--
+ return wbuf.obj[wbuf.nobj]
+}
+
+// tryGetFast dequeues a pointer for the garbage collector to trace
+// if one is readily available. Otherwise it returns 0 and
+// the caller is expected to call tryGet().
+//
+//go:nowritebarrierrec
+func (w *gcWork) tryGetFast() uintptr {
+ wbuf := w.wbuf1
+ if wbuf == nil || wbuf.nobj == 0 {
+ return 0
+ }
+
+ wbuf.nobj--
+ return wbuf.obj[wbuf.nobj]
+}
+
+// dispose returns any cached pointers to the global queue.
+// The buffers are being put on the full queue so that the
+// write barriers will not simply reacquire them before the
+// GC can inspect them. This helps reduce the mutator's
+// ability to hide pointers during the concurrent mark phase.
+//
+//go:nowritebarrierrec
+func (w *gcWork) dispose() {
+ if wbuf := w.wbuf1; wbuf != nil {
+ if wbuf.nobj == 0 {
+ putempty(wbuf)
+ } else {
+ putfull(wbuf)
+ w.flushedWork = true
+ }
+ w.wbuf1 = nil
+
+ wbuf = w.wbuf2
+ if wbuf.nobj == 0 {
+ putempty(wbuf)
+ } else {
+ putfull(wbuf)
+ w.flushedWork = true
+ }
+ w.wbuf2 = nil
+ }
+ if w.bytesMarked != 0 {
+ // dispose happens relatively infrequently. If this
+ // atomic becomes a problem, we should first try to
+ // dispose less and if necessary aggregate in a per-P
+ // counter.
+ atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
+ w.bytesMarked = 0
+ }
+ if w.heapScanWork != 0 {
+ gcController.heapScanWork.Add(w.heapScanWork)
+ w.heapScanWork = 0
+ }
+}
+
+// balance moves some work that's cached in this gcWork back on the
+// global queue.
+//
+//go:nowritebarrierrec
+func (w *gcWork) balance() {
+ if w.wbuf1 == nil {
+ return
+ }
+ if wbuf := w.wbuf2; wbuf.nobj != 0 {
+ putfull(wbuf)
+ w.flushedWork = true
+ w.wbuf2 = getempty()
+ } else if wbuf := w.wbuf1; wbuf.nobj > 4 {
+ w.wbuf1 = handoff(wbuf)
+ w.flushedWork = true // handoff did putfull
+ } else {
+ return
+ }
+ // We flushed a buffer to the full list, so wake a worker.
+ if gcphase == _GCmark {
+ gcController.enlistWorker()
+ }
+}
+
+// empty reports whether w has no mark work available.
+//
+//go:nowritebarrierrec
+func (w *gcWork) empty() bool {
+ return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
+}
+
+// Internally, the GC work pool is kept in arrays in work buffers.
+// The gcWork interface caches a work buffer until full (or empty) to
+// avoid contending on the global work buffer lists.
+
+type workbufhdr struct {
+ node lfnode // must be first
+ nobj int
+}
+
+//go:notinheap
+type workbuf struct {
+ workbufhdr
+ // account for the above fields
+ obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
+}
+
+// workbuf factory routines. These funcs are used to manage the
+// workbufs.
+// If the GC asks for some work these are the only routines that
+// make wbufs available to the GC.
+
+func (b *workbuf) checknonempty() {
+ if b.nobj == 0 {
+ throw("workbuf is empty")
+ }
+}
+
+func (b *workbuf) checkempty() {
+ if b.nobj != 0 {
+ throw("workbuf is not empty")
+ }
+}
+
+// getempty pops an empty work buffer off the work.empty list,
+// allocating new buffers if none are available.
+//
+//go:nowritebarrier
+func getempty() *workbuf {
+ var b *workbuf
+ if work.empty != 0 {
+ b = (*workbuf)(work.empty.pop())
+ if b != nil {
+ b.checkempty()
+ }
+ }
+ // Record that this may acquire the wbufSpans or heap lock to
+ // allocate a workbuf.
+ lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
+ lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
+ if b == nil {
+ // Allocate more workbufs.
+ var s *mspan
+ if work.wbufSpans.free.first != nil {
+ lock(&work.wbufSpans.lock)
+ s = work.wbufSpans.free.first
+ if s != nil {
+ work.wbufSpans.free.remove(s)
+ work.wbufSpans.busy.insert(s)
+ }
+ unlock(&work.wbufSpans.lock)
+ }
+ if s == nil {
+ systemstack(func() {
+ s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
+ })
+ if s == nil {
+ throw("out of memory")
+ }
+ // Record the new span in the busy list.
+ lock(&work.wbufSpans.lock)
+ work.wbufSpans.busy.insert(s)
+ unlock(&work.wbufSpans.lock)
+ }
+ // Slice up the span into new workbufs. Return one and
+ // put the rest on the empty list.
+ for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
+ newb := (*workbuf)(unsafe.Pointer(s.base() + i))
+ newb.nobj = 0
+ lfnodeValidate(&newb.node)
+ if i == 0 {
+ b = newb
+ } else {
+ putempty(newb)
+ }
+ }
+ }
+ return b
+}
+
+// putempty puts a workbuf onto the work.empty list.
+// Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
+//
+//go:nowritebarrier
+func putempty(b *workbuf) {
+ b.checkempty()
+ work.empty.push(&b.node)
+}
+
+// putfull puts the workbuf on the work.full list for the GC.
+// putfull accepts partially full buffers so the GC can avoid competing
+// with the mutators for ownership of partially full buffers.
+//
+//go:nowritebarrier
+func putfull(b *workbuf) {
+ b.checknonempty()
+ work.full.push(&b.node)
+}
+
+// trygetfull tries to get a full or partially empty workbuffer.
+// If one is not immediately available return nil
+//
+//go:nowritebarrier
+func trygetfull() *workbuf {
+ b := (*workbuf)(work.full.pop())
+ if b != nil {
+ b.checknonempty()
+ return b
+ }
+ return b
+}
+
+//go:nowritebarrier
+func handoff(b *workbuf) *workbuf {
+ // Make new buffer with half of b's pointers.
+ b1 := getempty()
+ n := b.nobj / 2
+ b.nobj -= n
+ b1.nobj = n
+ memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
+
+ // Put b on full list - let first half of b get stolen.
+ putfull(b)
+ return b1
+}
+
+// prepareFreeWorkbufs moves busy workbuf spans to free list so they
+// can be freed to the heap. This must only be called when all
+// workbufs are on the empty list.
+func prepareFreeWorkbufs() {
+ lock(&work.wbufSpans.lock)
+ if work.full != 0 {
+ throw("cannot free workbufs when work.full != 0")
+ }
+ // Since all workbufs are on the empty list, we don't care
+ // which ones are in which spans. We can wipe the entire empty
+ // list and move all workbuf spans to the free list.
+ work.empty = 0
+ work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
+ unlock(&work.wbufSpans.lock)
+}
+
+// freeSomeWbufs frees some workbufs back to the heap and returns
+// true if it should be called again to free more.
+func freeSomeWbufs(preemptible bool) bool {
+ const batchSize = 64 // ~1–2 µs per span.
+ lock(&work.wbufSpans.lock)
+ if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
+ unlock(&work.wbufSpans.lock)
+ return false
+ }
+ systemstack(func() {
+ gp := getg().m.curg
+ for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
+ span := work.wbufSpans.free.first
+ if span == nil {
+ break
+ }
+ work.wbufSpans.free.remove(span)
+ mheap_.freeManual(span, spanAllocWorkBuf)
+ }
+ })
+ more := !work.wbufSpans.free.isEmpty()
+ unlock(&work.wbufSpans.lock)
+ return more
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mheap.go b/contrib/go/_std_1.19/src/runtime/mheap.go
new file mode 100644
index 0000000000..b19a2ff408
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mheap.go
@@ -0,0 +1,2156 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page heap.
+//
+// See malloc.go for overview.
+
+package runtime
+
+import (
+ "internal/cpu"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+const (
+ // minPhysPageSize is a lower-bound on the physical page size. The
+ // true physical page size may be larger than this. In contrast,
+ // sys.PhysPageSize is an upper-bound on the physical page size.
+ minPhysPageSize = 4096
+
+ // maxPhysPageSize is the maximum page size the runtime supports.
+ maxPhysPageSize = 512 << 10
+
+ // maxPhysHugePageSize sets an upper-bound on the maximum huge page size
+ // that the runtime supports.
+ maxPhysHugePageSize = pallocChunkBytes
+
+ // pagesPerReclaimerChunk indicates how many pages to scan from the
+ // pageInUse bitmap at a time. Used by the page reclaimer.
+ //
+ // Higher values reduce contention on scanning indexes (such as
+ // h.reclaimIndex), but increase the minimum latency of the
+ // operation.
+ //
+ // The time required to scan this many pages can vary a lot depending
+ // on how many spans are actually freed. Experimentally, it can
+ // scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only
+ // free spans at ~32 MB/ms. Using 512 pages bounds this at
+ // roughly 100µs.
+ //
+ // Must be a multiple of the pageInUse bitmap element size and
+ // must also evenly divide pagesPerArena.
+ pagesPerReclaimerChunk = 512
+
+ // physPageAlignedStacks indicates whether stack allocations must be
+ // physical page aligned. This is a requirement for MAP_STACK on
+ // OpenBSD.
+ physPageAlignedStacks = GOOS == "openbsd"
+)
+
+// Main malloc heap.
+// The heap itself is the "free" and "scav" treaps,
+// but all the other global data is here too.
+//
+// mheap must not be heap-allocated because it contains mSpanLists,
+// which must not be heap-allocated.
+//
+//go:notinheap
+type mheap struct {
+ // lock must only be acquired on the system stack, otherwise a g
+ // could self-deadlock if its stack grows with the lock held.
+ lock mutex
+
+ _ uint32 // 8-byte align pages so its alignment is consistent with tests.
+
+ pages pageAlloc // page allocation data structure
+
+ sweepgen uint32 // sweep generation, see comment in mspan; written during STW
+
+ // allspans is a slice of all mspans ever created. Each mspan
+ // appears exactly once.
+ //
+ // The memory for allspans is manually managed and can be
+ // reallocated and move as the heap grows.
+ //
+ // In general, allspans is protected by mheap_.lock, which
+ // prevents concurrent access as well as freeing the backing
+ // store. Accesses during STW might not hold the lock, but
+ // must ensure that allocation cannot happen around the
+ // access (since that may free the backing store).
+ allspans []*mspan // all spans out there
+
+ // _ uint32 // align uint64 fields on 32-bit for atomics
+
+ // Proportional sweep
+ //
+ // These parameters represent a linear function from gcController.heapLive
+ // to page sweep count. The proportional sweep system works to
+ // stay in the black by keeping the current page sweep count
+ // above this line at the current gcController.heapLive.
+ //
+ // The line has slope sweepPagesPerByte and passes through a
+ // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
+ // any given time, the system is at (gcController.heapLive,
+ // pagesSwept) in this space.
+ //
+ // It is important that the line pass through a point we
+ // control rather than simply starting at a 0,0 origin
+ // because that lets us adjust sweep pacing at any time while
+ // accounting for current progress. If we could only adjust
+ // the slope, it would create a discontinuity in debt if any
+ // progress has already been made.
+ pagesInUse atomic.Uint64 // pages of spans in stats mSpanInUse
+ pagesSwept atomic.Uint64 // pages swept this cycle
+ pagesSweptBasis atomic.Uint64 // pagesSwept to use as the origin of the sweep ratio
+ sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
+ sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
+ // TODO(austin): pagesInUse should be a uintptr, but the 386
+ // compiler can't 8-byte align fields.
+
+ // Page reclaimer state
+
+ // reclaimIndex is the page index in allArenas of next page to
+ // reclaim. Specifically, it refers to page (i %
+ // pagesPerArena) of arena allArenas[i / pagesPerArena].
+ //
+ // If this is >= 1<<63, the page reclaimer is done scanning
+ // the page marks.
+ reclaimIndex atomic.Uint64
+
+ // reclaimCredit is spare credit for extra pages swept. Since
+ // the page reclaimer works in large chunks, it may reclaim
+ // more than requested. Any spare pages released go to this
+ // credit pool.
+ reclaimCredit atomic.Uintptr
+
+ // arenas is the heap arena map. It points to the metadata for
+ // the heap for every arena frame of the entire usable virtual
+ // address space.
+ //
+ // Use arenaIndex to compute indexes into this array.
+ //
+ // For regions of the address space that are not backed by the
+ // Go heap, the arena map contains nil.
+ //
+ // Modifications are protected by mheap_.lock. Reads can be
+ // performed without locking; however, a given entry can
+ // transition from nil to non-nil at any time when the lock
+ // isn't held. (Entries never transitions back to nil.)
+ //
+ // In general, this is a two-level mapping consisting of an L1
+ // map and possibly many L2 maps. This saves space when there
+ // are a huge number of arena frames. However, on many
+ // platforms (even 64-bit), arenaL1Bits is 0, making this
+ // effectively a single-level map. In this case, arenas[0]
+ // will never be nil.
+ arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
+
+ // heapArenaAlloc is pre-reserved space for allocating heapArena
+ // objects. This is only used on 32-bit, where we pre-reserve
+ // this space to avoid interleaving it with the heap itself.
+ heapArenaAlloc linearAlloc
+
+ // arenaHints is a list of addresses at which to attempt to
+ // add more heap arenas. This is initially populated with a
+ // set of general hint addresses, and grown with the bounds of
+ // actual heap arena ranges.
+ arenaHints *arenaHint
+
+ // arena is a pre-reserved space for allocating heap arenas
+ // (the actual arenas). This is only used on 32-bit.
+ arena linearAlloc
+
+ // allArenas is the arenaIndex of every mapped arena. This can
+ // be used to iterate through the address space.
+ //
+ // Access is protected by mheap_.lock. However, since this is
+ // append-only and old backing arrays are never freed, it is
+ // safe to acquire mheap_.lock, copy the slice header, and
+ // then release mheap_.lock.
+ allArenas []arenaIdx
+
+ // sweepArenas is a snapshot of allArenas taken at the
+ // beginning of the sweep cycle. This can be read safely by
+ // simply blocking GC (by disabling preemption).
+ sweepArenas []arenaIdx
+
+ // markArenas is a snapshot of allArenas taken at the beginning
+ // of the mark cycle. Because allArenas is append-only, neither
+ // this slice nor its contents will change during the mark, so
+ // it can be read safely.
+ markArenas []arenaIdx
+
+ // curArena is the arena that the heap is currently growing
+ // into. This should always be physPageSize-aligned.
+ curArena struct {
+ base, end uintptr
+ }
+
+ _ uint32 // ensure 64-bit alignment of central
+
+ // central free lists for small size classes.
+ // the padding makes sure that the mcentrals are
+ // spaced CacheLinePadSize bytes apart, so that each mcentral.lock
+ // gets its own cache line.
+ // central is indexed by spanClass.
+ central [numSpanClasses]struct {
+ mcentral mcentral
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
+ }
+
+ spanalloc fixalloc // allocator for span*
+ cachealloc fixalloc // allocator for mcache*
+ specialfinalizeralloc fixalloc // allocator for specialfinalizer*
+ specialprofilealloc fixalloc // allocator for specialprofile*
+ specialReachableAlloc fixalloc // allocator for specialReachable
+ speciallock mutex // lock for special record allocators.
+ arenaHintAlloc fixalloc // allocator for arenaHints
+
+ unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
+}
+
+var mheap_ mheap
+
+// A heapArena stores metadata for a heap arena. heapArenas are stored
+// outside of the Go heap and accessed via the mheap_.arenas index.
+//
+//go:notinheap
+type heapArena struct {
+ // bitmap stores the pointer/scalar bitmap for the words in
+ // this arena. See mbitmap.go for a description. Use the
+ // heapBits type to access this.
+ bitmap [heapArenaBitmapBytes]byte
+
+ // spans maps from virtual address page ID within this arena to *mspan.
+ // For allocated spans, their pages map to the span itself.
+ // For free spans, only the lowest and highest pages map to the span itself.
+ // Internal pages map to an arbitrary span.
+ // For pages that have never been allocated, spans entries are nil.
+ //
+ // Modifications are protected by mheap.lock. Reads can be
+ // performed without locking, but ONLY from indexes that are
+ // known to contain in-use or stack spans. This means there
+ // must not be a safe-point between establishing that an
+ // address is live and looking it up in the spans array.
+ spans [pagesPerArena]*mspan
+
+ // pageInUse is a bitmap that indicates which spans are in
+ // state mSpanInUse. This bitmap is indexed by page number,
+ // but only the bit corresponding to the first page in each
+ // span is used.
+ //
+ // Reads and writes are atomic.
+ pageInUse [pagesPerArena / 8]uint8
+
+ // pageMarks is a bitmap that indicates which spans have any
+ // marked objects on them. Like pageInUse, only the bit
+ // corresponding to the first page in each span is used.
+ //
+ // Writes are done atomically during marking. Reads are
+ // non-atomic and lock-free since they only occur during
+ // sweeping (and hence never race with writes).
+ //
+ // This is used to quickly find whole spans that can be freed.
+ //
+ // TODO(austin): It would be nice if this was uint64 for
+ // faster scanning, but we don't have 64-bit atomic bit
+ // operations.
+ pageMarks [pagesPerArena / 8]uint8
+
+ // pageSpecials is a bitmap that indicates which spans have
+ // specials (finalizers or other). Like pageInUse, only the bit
+ // corresponding to the first page in each span is used.
+ //
+ // Writes are done atomically whenever a special is added to
+ // a span and whenever the last special is removed from a span.
+ // Reads are done atomically to find spans containing specials
+ // during marking.
+ pageSpecials [pagesPerArena / 8]uint8
+
+ // checkmarks stores the debug.gccheckmark state. It is only
+ // used if debug.gccheckmark > 0.
+ checkmarks *checkmarksMap
+
+ // zeroedBase marks the first byte of the first page in this
+ // arena which hasn't been used yet and is therefore already
+ // zero. zeroedBase is relative to the arena base.
+ // Increases monotonically until it hits heapArenaBytes.
+ //
+ // This field is sufficient to determine if an allocation
+ // needs to be zeroed because the page allocator follows an
+ // address-ordered first-fit policy.
+ //
+ // Read atomically and written with an atomic CAS.
+ zeroedBase uintptr
+}
+
+// arenaHint is a hint for where to grow the heap arenas. See
+// mheap_.arenaHints.
+//
+//go:notinheap
+type arenaHint struct {
+ addr uintptr
+ down bool
+ next *arenaHint
+}
+
+// An mspan is a run of pages.
+//
+// When a mspan is in the heap free treap, state == mSpanFree
+// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
+// If the mspan is in the heap scav treap, then in addition to the
+// above scavenged == true. scavenged == false in all other cases.
+//
+// When a mspan is allocated, state == mSpanInUse or mSpanManual
+// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
+
+// Every mspan is in one doubly-linked list, either in the mheap's
+// busy list or one of the mcentral's span lists.
+
+// An mspan representing actual memory has state mSpanInUse,
+// mSpanManual, or mSpanFree. Transitions between these states are
+// constrained as follows:
+//
+// - A span may transition from free to in-use or manual during any GC
+// phase.
+//
+// - During sweeping (gcphase == _GCoff), a span may transition from
+// in-use to free (as a result of sweeping) or manual to free (as a
+// result of stacks being freed).
+//
+// - During GC (gcphase != _GCoff), a span *must not* transition from
+// manual or in-use to free. Because concurrent GC may read a pointer
+// and then look up its span, the span state must be monotonic.
+//
+// Setting mspan.state to mSpanInUse or mSpanManual must be done
+// atomically and only after all other span fields are valid.
+// Likewise, if inspecting a span is contingent on it being
+// mSpanInUse, the state should be loaded atomically and checked
+// before depending on other fields. This allows the garbage collector
+// to safely deal with potentially invalid pointers, since resolving
+// such pointers may race with a span being allocated.
+type mSpanState uint8
+
+const (
+ mSpanDead mSpanState = iota
+ mSpanInUse // allocated for garbage collected heap
+ mSpanManual // allocated for manual management (e.g., stack allocator)
+)
+
+// mSpanStateNames are the names of the span states, indexed by
+// mSpanState.
+var mSpanStateNames = []string{
+ "mSpanDead",
+ "mSpanInUse",
+ "mSpanManual",
+ "mSpanFree",
+}
+
+// mSpanStateBox holds an mSpanState and provides atomic operations on
+// it. This is a separate type to disallow accidental comparison or
+// assignment with mSpanState.
+type mSpanStateBox struct {
+ s mSpanState
+}
+
+func (b *mSpanStateBox) set(s mSpanState) {
+ atomic.Store8((*uint8)(&b.s), uint8(s))
+}
+
+func (b *mSpanStateBox) get() mSpanState {
+ return mSpanState(atomic.Load8((*uint8)(&b.s)))
+}
+
+// mSpanList heads a linked list of spans.
+//
+//go:notinheap
+type mSpanList struct {
+ first *mspan // first span in list, or nil if none
+ last *mspan // last span in list, or nil if none
+}
+
+//go:notinheap
+type mspan struct {
+ next *mspan // next span in list, or nil if none
+ prev *mspan // previous span in list, or nil if none
+ list *mSpanList // For debugging. TODO: Remove.
+
+ startAddr uintptr // address of first byte of span aka s.base()
+ npages uintptr // number of pages in span
+
+ manualFreeList gclinkptr // list of free objects in mSpanManual spans
+
+ // freeindex is the slot index between 0 and nelems at which to begin scanning
+ // for the next free object in this span.
+ // Each allocation scans allocBits starting at freeindex until it encounters a 0
+ // indicating a free object. freeindex is then adjusted so that subsequent scans begin
+ // just past the newly discovered free object.
+ //
+ // If freeindex == nelem, this span has no free objects.
+ //
+ // allocBits is a bitmap of objects in this span.
+ // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
+ // then object n is free;
+ // otherwise, object n is allocated. Bits starting at nelem are
+ // undefined and should never be referenced.
+ //
+ // Object n starts at address n*elemsize + (start << pageShift).
+ freeindex uintptr
+ // TODO: Look up nelems from sizeclass and remove this field if it
+ // helps performance.
+ nelems uintptr // number of object in the span.
+
+ // Cache of the allocBits at freeindex. allocCache is shifted
+ // such that the lowest bit corresponds to the bit freeindex.
+ // allocCache holds the complement of allocBits, thus allowing
+ // ctz (count trailing zero) to use it directly.
+ // allocCache may contain bits beyond s.nelems; the caller must ignore
+ // these.
+ allocCache uint64
+
+ // allocBits and gcmarkBits hold pointers to a span's mark and
+ // allocation bits. The pointers are 8 byte aligned.
+ // There are three arenas where this data is held.
+ // free: Dirty arenas that are no longer accessed
+ // and can be reused.
+ // next: Holds information to be used in the next GC cycle.
+ // current: Information being used during this GC cycle.
+ // previous: Information being used during the last GC cycle.
+ // A new GC cycle starts with the call to finishsweep_m.
+ // finishsweep_m moves the previous arena to the free arena,
+ // the current arena to the previous arena, and
+ // the next arena to the current arena.
+ // The next arena is populated as the spans request
+ // memory to hold gcmarkBits for the next GC cycle as well
+ // as allocBits for newly allocated spans.
+ //
+ // The pointer arithmetic is done "by hand" instead of using
+ // arrays to avoid bounds checks along critical performance
+ // paths.
+ // The sweep will free the old allocBits and set allocBits to the
+ // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
+ // out memory.
+ allocBits *gcBits
+ gcmarkBits *gcBits
+
+ // sweep generation:
+ // if sweepgen == h->sweepgen - 2, the span needs sweeping
+ // if sweepgen == h->sweepgen - 1, the span is currently being swept
+ // if sweepgen == h->sweepgen, the span is swept and ready to use
+ // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
+ // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
+ // h->sweepgen is incremented by 2 after every GC
+
+ sweepgen uint32
+ divMul uint32 // for divide by elemsize
+ allocCount uint16 // number of allocated objects
+ spanclass spanClass // size class and noscan (uint8)
+ state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
+ needzero uint8 // needs to be zeroed before allocation
+ allocCountBeforeCache uint16 // a copy of allocCount that is stored just before this span is cached
+ elemsize uintptr // computed from sizeclass or from npages
+ limit uintptr // end of data in span
+ speciallock mutex // guards specials list
+ specials *special // linked list of special records sorted by offset.
+}
+
+func (s *mspan) base() uintptr {
+ return s.startAddr
+}
+
+func (s *mspan) layout() (size, n, total uintptr) {
+ total = s.npages << _PageShift
+ size = s.elemsize
+ if size > 0 {
+ n = total / size
+ }
+ return
+}
+
+// recordspan adds a newly allocated span to h.allspans.
+//
+// This only happens the first time a span is allocated from
+// mheap.spanalloc (it is not called when a span is reused).
+//
+// Write barriers are disallowed here because it can be called from
+// gcWork when allocating new workbufs. However, because it's an
+// indirect call from the fixalloc initializer, the compiler can't see
+// this.
+//
+// The heap lock must be held.
+//
+//go:nowritebarrierrec
+func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
+ h := (*mheap)(vh)
+ s := (*mspan)(p)
+
+ assertLockHeld(&h.lock)
+
+ if len(h.allspans) >= cap(h.allspans) {
+ n := 64 * 1024 / goarch.PtrSize
+ if n < cap(h.allspans)*3/2 {
+ n = cap(h.allspans) * 3 / 2
+ }
+ var new []*mspan
+ sp := (*slice)(unsafe.Pointer(&new))
+ sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
+ if sp.array == nil {
+ throw("runtime: cannot allocate memory")
+ }
+ sp.len = len(h.allspans)
+ sp.cap = n
+ if len(h.allspans) > 0 {
+ copy(new, h.allspans)
+ }
+ oldAllspans := h.allspans
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
+ if len(oldAllspans) != 0 {
+ sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
+ }
+ }
+ h.allspans = h.allspans[:len(h.allspans)+1]
+ h.allspans[len(h.allspans)-1] = s
+}
+
+// A spanClass represents the size class and noscan-ness of a span.
+//
+// Each size class has a noscan spanClass and a scan spanClass. The
+// noscan spanClass contains only noscan objects, which do not contain
+// pointers and thus do not need to be scanned by the garbage
+// collector.
+type spanClass uint8
+
+const (
+ numSpanClasses = _NumSizeClasses << 1
+ tinySpanClass = spanClass(tinySizeClass<<1 | 1)
+)
+
+func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
+ return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
+}
+
+func (sc spanClass) sizeclass() int8 {
+ return int8(sc >> 1)
+}
+
+func (sc spanClass) noscan() bool {
+ return sc&1 != 0
+}
+
+// arenaIndex returns the index into mheap_.arenas of the arena
+// containing metadata for p. This index combines of an index into the
+// L1 map and an index into the L2 map and should be used as
+// mheap_.arenas[ai.l1()][ai.l2()].
+//
+// If p is outside the range of valid heap addresses, either l1() or
+// l2() will be out of bounds.
+//
+// It is nosplit because it's called by spanOf and several other
+// nosplit functions.
+//
+//go:nosplit
+func arenaIndex(p uintptr) arenaIdx {
+ return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
+}
+
+// arenaBase returns the low address of the region covered by heap
+// arena i.
+func arenaBase(i arenaIdx) uintptr {
+ return uintptr(i)*heapArenaBytes + arenaBaseOffset
+}
+
+type arenaIdx uint
+
+func (i arenaIdx) l1() uint {
+ if arenaL1Bits == 0 {
+ // Let the compiler optimize this away if there's no
+ // L1 map.
+ return 0
+ } else {
+ return uint(i) >> arenaL1Shift
+ }
+}
+
+func (i arenaIdx) l2() uint {
+ if arenaL1Bits == 0 {
+ return uint(i)
+ } else {
+ return uint(i) & (1<<arenaL2Bits - 1)
+ }
+}
+
+// inheap reports whether b is a pointer into a (potentially dead) heap object.
+// It returns false for pointers into mSpanManual spans.
+// Non-preemptible because it is used by write barriers.
+//
+//go:nowritebarrier
+//go:nosplit
+func inheap(b uintptr) bool {
+ return spanOfHeap(b) != nil
+}
+
+// inHeapOrStack is a variant of inheap that returns true for pointers
+// into any allocated heap span.
+//
+//go:nowritebarrier
+//go:nosplit
+func inHeapOrStack(b uintptr) bool {
+ s := spanOf(b)
+ if s == nil || b < s.base() {
+ return false
+ }
+ switch s.state.get() {
+ case mSpanInUse, mSpanManual:
+ return b < s.limit
+ default:
+ return false
+ }
+}
+
+// spanOf returns the span of p. If p does not point into the heap
+// arena or no span has ever contained p, spanOf returns nil.
+//
+// If p does not point to allocated memory, this may return a non-nil
+// span that does *not* contain p. If this is a possibility, the
+// caller should either call spanOfHeap or check the span bounds
+// explicitly.
+//
+// Must be nosplit because it has callers that are nosplit.
+//
+//go:nosplit
+func spanOf(p uintptr) *mspan {
+ // This function looks big, but we use a lot of constant
+ // folding around arenaL1Bits to get it under the inlining
+ // budget. Also, many of the checks here are safety checks
+ // that Go needs to do anyway, so the generated code is quite
+ // short.
+ ri := arenaIndex(p)
+ if arenaL1Bits == 0 {
+ // If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
+ if ri.l2() >= uint(len(mheap_.arenas[0])) {
+ return nil
+ }
+ } else {
+ // If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
+ if ri.l1() >= uint(len(mheap_.arenas)) {
+ return nil
+ }
+ }
+ l2 := mheap_.arenas[ri.l1()]
+ if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
+ return nil
+ }
+ ha := l2[ri.l2()]
+ if ha == nil {
+ return nil
+ }
+ return ha.spans[(p/pageSize)%pagesPerArena]
+}
+
+// spanOfUnchecked is equivalent to spanOf, but the caller must ensure
+// that p points into an allocated heap arena.
+//
+// Must be nosplit because it has callers that are nosplit.
+//
+//go:nosplit
+func spanOfUnchecked(p uintptr) *mspan {
+ ai := arenaIndex(p)
+ return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
+}
+
+// spanOfHeap is like spanOf, but returns nil if p does not point to a
+// heap object.
+//
+// Must be nosplit because it has callers that are nosplit.
+//
+//go:nosplit
+func spanOfHeap(p uintptr) *mspan {
+ s := spanOf(p)
+ // s is nil if it's never been allocated. Otherwise, we check
+ // its state first because we don't trust this pointer, so we
+ // have to synchronize with span initialization. Then, it's
+ // still possible we picked up a stale span pointer, so we
+ // have to check the span's bounds.
+ if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
+ return nil
+ }
+ return s
+}
+
+// pageIndexOf returns the arena, page index, and page mask for pointer p.
+// The caller must ensure p is in the heap.
+func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) {
+ ai := arenaIndex(p)
+ arena = mheap_.arenas[ai.l1()][ai.l2()]
+ pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
+ pageMask = byte(1 << ((p / pageSize) % 8))
+ return
+}
+
+// Initialize the heap.
+func (h *mheap) init() {
+ lockInit(&h.lock, lockRankMheap)
+ lockInit(&h.speciallock, lockRankMheapSpecial)
+
+ h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
+ h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
+ h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
+ h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
+ h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
+ h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
+
+ // Don't zero mspan allocations. Background sweeping can
+ // inspect a span concurrently with allocating it, so it's
+ // important that the span's sweepgen survive across freeing
+ // and re-allocating a span to prevent background sweeping
+ // from improperly cas'ing it from 0.
+ //
+ // This is safe because mspan contains no heap pointers.
+ h.spanalloc.zero = false
+
+ // h->mapcache needs no init
+
+ for i := range h.central {
+ h.central[i].mcentral.init(spanClass(i))
+ }
+
+ h.pages.init(&h.lock, &memstats.gcMiscSys)
+}
+
+// reclaim sweeps and reclaims at least npage pages into the heap.
+// It is called before allocating npage pages to keep growth in check.
+//
+// reclaim implements the page-reclaimer half of the sweeper.
+//
+// h.lock must NOT be held.
+func (h *mheap) reclaim(npage uintptr) {
+ // TODO(austin): Half of the time spent freeing spans is in
+ // locking/unlocking the heap (even with low contention). We
+ // could make the slow path here several times faster by
+ // batching heap frees.
+
+ // Bail early if there's no more reclaim work.
+ if h.reclaimIndex.Load() >= 1<<63 {
+ return
+ }
+
+ // Disable preemption so the GC can't start while we're
+ // sweeping, so we can read h.sweepArenas, and so
+ // traceGCSweepStart/Done pair on the P.
+ mp := acquirem()
+
+ if trace.enabled {
+ traceGCSweepStart()
+ }
+
+ arenas := h.sweepArenas
+ locked := false
+ for npage > 0 {
+ // Pull from accumulated credit first.
+ if credit := h.reclaimCredit.Load(); credit > 0 {
+ take := credit
+ if take > npage {
+ // Take only what we need.
+ take = npage
+ }
+ if h.reclaimCredit.CompareAndSwap(credit, credit-take) {
+ npage -= take
+ }
+ continue
+ }
+
+ // Claim a chunk of work.
+ idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk)
+ if idx/pagesPerArena >= uintptr(len(arenas)) {
+ // Page reclaiming is done.
+ h.reclaimIndex.Store(1 << 63)
+ break
+ }
+
+ if !locked {
+ // Lock the heap for reclaimChunk.
+ lock(&h.lock)
+ locked = true
+ }
+
+ // Scan this chunk.
+ nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk)
+ if nfound <= npage {
+ npage -= nfound
+ } else {
+ // Put spare pages toward global credit.
+ h.reclaimCredit.Add(nfound - npage)
+ npage = 0
+ }
+ }
+ if locked {
+ unlock(&h.lock)
+ }
+
+ if trace.enabled {
+ traceGCSweepDone()
+ }
+ releasem(mp)
+}
+
+// reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
+// It returns the number of pages returned to the heap.
+//
+// h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
+// temporarily unlocked and re-locked in order to do sweeping or if tracing is
+// enabled.
+func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
+ // The heap lock must be held because this accesses the
+ // heapArena.spans arrays using potentially non-live pointers.
+ // In particular, if a span were freed and merged concurrently
+ // with this probing heapArena.spans, it would be possible to
+ // observe arbitrary, stale span pointers.
+ assertLockHeld(&h.lock)
+
+ n0 := n
+ var nFreed uintptr
+ sl := sweep.active.begin()
+ if !sl.valid {
+ return 0
+ }
+ for n > 0 {
+ ai := arenas[pageIdx/pagesPerArena]
+ ha := h.arenas[ai.l1()][ai.l2()]
+
+ // Get a chunk of the bitmap to work on.
+ arenaPage := uint(pageIdx % pagesPerArena)
+ inUse := ha.pageInUse[arenaPage/8:]
+ marked := ha.pageMarks[arenaPage/8:]
+ if uintptr(len(inUse)) > n/8 {
+ inUse = inUse[:n/8]
+ marked = marked[:n/8]
+ }
+
+ // Scan this bitmap chunk for spans that are in-use
+ // but have no marked objects on them.
+ for i := range inUse {
+ inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
+ if inUseUnmarked == 0 {
+ continue
+ }
+
+ for j := uint(0); j < 8; j++ {
+ if inUseUnmarked&(1<<j) != 0 {
+ s := ha.spans[arenaPage+uint(i)*8+j]
+ if s, ok := sl.tryAcquire(s); ok {
+ npages := s.npages
+ unlock(&h.lock)
+ if s.sweep(false) {
+ nFreed += npages
+ }
+ lock(&h.lock)
+ // Reload inUse. It's possible nearby
+ // spans were freed when we dropped the
+ // lock and we don't want to get stale
+ // pointers from the spans array.
+ inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
+ }
+ }
+ }
+ }
+
+ // Advance.
+ pageIdx += uintptr(len(inUse) * 8)
+ n -= uintptr(len(inUse) * 8)
+ }
+ sweep.active.end(sl)
+ if trace.enabled {
+ unlock(&h.lock)
+ // Account for pages scanned but not reclaimed.
+ traceGCSweepSpan((n0 - nFreed) * pageSize)
+ lock(&h.lock)
+ }
+
+ assertLockHeld(&h.lock) // Must be locked on return.
+ return nFreed
+}
+
+// spanAllocType represents the type of allocation to make, or
+// the type of allocation to be freed.
+type spanAllocType uint8
+
+const (
+ spanAllocHeap spanAllocType = iota // heap span
+ spanAllocStack // stack span
+ spanAllocPtrScalarBits // unrolled GC prog bitmap span
+ spanAllocWorkBuf // work buf span
+)
+
+// manual returns true if the span allocation is manually managed.
+func (s spanAllocType) manual() bool {
+ return s != spanAllocHeap
+}
+
+// alloc allocates a new span of npage pages from the GC'd heap.
+//
+// spanclass indicates the span's size class and scannability.
+//
+// Returns a span that has been fully initialized. span.needzero indicates
+// whether the span has been zeroed. Note that it may not be.
+func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
+ // Don't do any operations that lock the heap on the G stack.
+ // It might trigger stack growth, and the stack growth code needs
+ // to be able to allocate heap.
+ var s *mspan
+ systemstack(func() {
+ // To prevent excessive heap growth, before allocating n pages
+ // we need to sweep and reclaim at least n pages.
+ if !isSweepDone() {
+ h.reclaim(npages)
+ }
+ s = h.allocSpan(npages, spanAllocHeap, spanclass)
+ })
+ return s
+}
+
+// allocManual allocates a manually-managed span of npage pages.
+// allocManual returns nil if allocation fails.
+//
+// allocManual adds the bytes used to *stat, which should be a
+// memstats in-use field. Unlike allocations in the GC'd heap, the
+// allocation does *not* count toward heapInUse.
+//
+// The memory backing the returned span may not be zeroed if
+// span.needzero is set.
+//
+// allocManual must be called on the system stack because it may
+// acquire the heap lock via allocSpan. See mheap for details.
+//
+// If new code is written to call allocManual, do NOT use an
+// existing spanAllocType value and instead declare a new one.
+//
+//go:systemstack
+func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
+ if !typ.manual() {
+ throw("manual span allocation called with non-manually-managed type")
+ }
+ return h.allocSpan(npages, typ, 0)
+}
+
+// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
+// is s.
+func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
+ p := base / pageSize
+ ai := arenaIndex(base)
+ ha := h.arenas[ai.l1()][ai.l2()]
+ for n := uintptr(0); n < npage; n++ {
+ i := (p + n) % pagesPerArena
+ if i == 0 {
+ ai = arenaIndex(base + n*pageSize)
+ ha = h.arenas[ai.l1()][ai.l2()]
+ }
+ ha.spans[i] = s
+ }
+}
+
+// allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
+// assumed to be allocated, needs to be zeroed, updating heap arena metadata for
+// future allocations.
+//
+// This must be called each time pages are allocated from the heap, even if the page
+// allocator can otherwise prove the memory it's allocating is already zero because
+// they're fresh from the operating system. It updates heapArena metadata that is
+// critical for future page allocations.
+//
+// There are no locking constraints on this method.
+func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
+ for npage > 0 {
+ ai := arenaIndex(base)
+ ha := h.arenas[ai.l1()][ai.l2()]
+
+ zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
+ arenaBase := base % heapArenaBytes
+ if arenaBase < zeroedBase {
+ // We extended into the non-zeroed part of the
+ // arena, so this region needs to be zeroed before use.
+ //
+ // zeroedBase is monotonically increasing, so if we see this now then
+ // we can be sure we need to zero this memory region.
+ //
+ // We still need to update zeroedBase for this arena, and
+ // potentially more arenas.
+ needZero = true
+ }
+ // We may observe arenaBase > zeroedBase if we're racing with one or more
+ // allocations which are acquiring memory directly before us in the address
+ // space. But, because we know no one else is acquiring *this* memory, it's
+ // still safe to not zero.
+
+ // Compute how far into the arena we extend into, capped
+ // at heapArenaBytes.
+ arenaLimit := arenaBase + npage*pageSize
+ if arenaLimit > heapArenaBytes {
+ arenaLimit = heapArenaBytes
+ }
+ // Increase ha.zeroedBase so it's >= arenaLimit.
+ // We may be racing with other updates.
+ for arenaLimit > zeroedBase {
+ if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) {
+ break
+ }
+ zeroedBase = atomic.Loaduintptr(&ha.zeroedBase)
+ // Double check basic conditions of zeroedBase.
+ if zeroedBase <= arenaLimit && zeroedBase > arenaBase {
+ // The zeroedBase moved into the space we were trying to
+ // claim. That's very bad, and indicates someone allocated
+ // the same region we did.
+ throw("potentially overlapping in-use allocations detected")
+ }
+ }
+
+ // Move base forward and subtract from npage to move into
+ // the next arena, or finish.
+ base += arenaLimit - arenaBase
+ npage -= (arenaLimit - arenaBase) / pageSize
+ }
+ return
+}
+
+// tryAllocMSpan attempts to allocate an mspan object from
+// the P-local cache, but may fail.
+//
+// h.lock need not be held.
+//
+// This caller must ensure that its P won't change underneath
+// it during this function. Currently to ensure that we enforce
+// that the function is run on the system stack, because that's
+// the only place it is used now. In the future, this requirement
+// may be relaxed if its use is necessary elsewhere.
+//
+//go:systemstack
+func (h *mheap) tryAllocMSpan() *mspan {
+ pp := getg().m.p.ptr()
+ // If we don't have a p or the cache is empty, we can't do
+ // anything here.
+ if pp == nil || pp.mspancache.len == 0 {
+ return nil
+ }
+ // Pull off the last entry in the cache.
+ s := pp.mspancache.buf[pp.mspancache.len-1]
+ pp.mspancache.len--
+ return s
+}
+
+// allocMSpanLocked allocates an mspan object.
+//
+// h.lock must be held.
+//
+// allocMSpanLocked must be called on the system stack because
+// its caller holds the heap lock. See mheap for details.
+// Running on the system stack also ensures that we won't
+// switch Ps during this function. See tryAllocMSpan for details.
+//
+//go:systemstack
+func (h *mheap) allocMSpanLocked() *mspan {
+ assertLockHeld(&h.lock)
+
+ pp := getg().m.p.ptr()
+ if pp == nil {
+ // We don't have a p so just do the normal thing.
+ return (*mspan)(h.spanalloc.alloc())
+ }
+ // Refill the cache if necessary.
+ if pp.mspancache.len == 0 {
+ const refillCount = len(pp.mspancache.buf) / 2
+ for i := 0; i < refillCount; i++ {
+ pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc())
+ }
+ pp.mspancache.len = refillCount
+ }
+ // Pull off the last entry in the cache.
+ s := pp.mspancache.buf[pp.mspancache.len-1]
+ pp.mspancache.len--
+ return s
+}
+
+// freeMSpanLocked free an mspan object.
+//
+// h.lock must be held.
+//
+// freeMSpanLocked must be called on the system stack because
+// its caller holds the heap lock. See mheap for details.
+// Running on the system stack also ensures that we won't
+// switch Ps during this function. See tryAllocMSpan for details.
+//
+//go:systemstack
+func (h *mheap) freeMSpanLocked(s *mspan) {
+ assertLockHeld(&h.lock)
+
+ pp := getg().m.p.ptr()
+ // First try to free the mspan directly to the cache.
+ if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
+ pp.mspancache.buf[pp.mspancache.len] = s
+ pp.mspancache.len++
+ return
+ }
+ // Failing that (or if we don't have a p), just free it to
+ // the heap.
+ h.spanalloc.free(unsafe.Pointer(s))
+}
+
+// allocSpan allocates an mspan which owns npages worth of memory.
+//
+// If typ.manual() == false, allocSpan allocates a heap span of class spanclass
+// and updates heap accounting. If manual == true, allocSpan allocates a
+// manually-managed span (spanclass is ignored), and the caller is
+// responsible for any accounting related to its use of the span. Either
+// way, allocSpan will atomically add the bytes in the newly allocated
+// span to *sysStat.
+//
+// The returned span is fully initialized.
+//
+// h.lock must not be held.
+//
+// allocSpan must be called on the system stack both because it acquires
+// the heap lock and because it must block GC transitions.
+//
+//go:systemstack
+func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
+ // Function-global state.
+ gp := getg()
+ base, scav := uintptr(0), uintptr(0)
+ growth := uintptr(0)
+
+ // On some platforms we need to provide physical page aligned stack
+ // allocations. Where the page size is less than the physical page
+ // size, we already manage to do this by default.
+ needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
+
+ // If the allocation is small enough, try the page cache!
+ // The page cache does not support aligned allocations, so we cannot use
+ // it if we need to provide a physical page aligned stack allocation.
+ pp := gp.m.p.ptr()
+ if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 {
+ c := &pp.pcache
+
+ // If the cache is empty, refill it.
+ if c.empty() {
+ lock(&h.lock)
+ *c = h.pages.allocToCache()
+ unlock(&h.lock)
+ }
+
+ // Try to allocate from the cache.
+ base, scav = c.alloc(npages)
+ if base != 0 {
+ s = h.tryAllocMSpan()
+ if s != nil {
+ goto HaveSpan
+ }
+ // We have a base but no mspan, so we need
+ // to lock the heap.
+ }
+ }
+
+ // For one reason or another, we couldn't get the
+ // whole job done without the heap lock.
+ lock(&h.lock)
+
+ if needPhysPageAlign {
+ // Overallocate by a physical page to allow for later alignment.
+ extraPages := physPageSize / pageSize
+
+ // Find a big enough region first, but then only allocate the
+ // aligned portion. We can't just allocate and then free the
+ // edges because we need to account for scavenged memory, and
+ // that's difficult with alloc.
+ //
+ // Note that we skip updates to searchAddr here. It's OK if
+ // it's stale and higher than normal; it'll operate correctly,
+ // just come with a performance cost.
+ base, _ = h.pages.find(npages + extraPages)
+ if base == 0 {
+ var ok bool
+ growth, ok = h.grow(npages + extraPages)
+ if !ok {
+ unlock(&h.lock)
+ return nil
+ }
+ base, _ = h.pages.find(npages + extraPages)
+ if base == 0 {
+ throw("grew heap, but no adequate free space found")
+ }
+ }
+ base = alignUp(base, physPageSize)
+ scav = h.pages.allocRange(base, npages)
+ }
+ if base == 0 {
+ // Try to acquire a base address.
+ base, scav = h.pages.alloc(npages)
+ if base == 0 {
+ var ok bool
+ growth, ok = h.grow(npages)
+ if !ok {
+ unlock(&h.lock)
+ return nil
+ }
+ base, scav = h.pages.alloc(npages)
+ if base == 0 {
+ throw("grew heap, but no adequate free space found")
+ }
+ }
+ }
+ if s == nil {
+ // We failed to get an mspan earlier, so grab
+ // one now that we have the heap lock.
+ s = h.allocMSpanLocked()
+ }
+ unlock(&h.lock)
+
+HaveSpan:
+ // At this point, both s != nil and base != 0, and the heap
+ // lock is no longer held. Initialize the span.
+ s.init(base, npages)
+ if h.allocNeedsZero(base, npages) {
+ s.needzero = 1
+ }
+ nbytes := npages * pageSize
+ if typ.manual() {
+ s.manualFreeList = 0
+ s.nelems = 0
+ s.limit = s.base() + s.npages*pageSize
+ s.state.set(mSpanManual)
+ } else {
+ // We must set span properties before the span is published anywhere
+ // since we're not holding the heap lock.
+ s.spanclass = spanclass
+ if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
+ s.elemsize = nbytes
+ s.nelems = 1
+ s.divMul = 0
+ } else {
+ s.elemsize = uintptr(class_to_size[sizeclass])
+ s.nelems = nbytes / s.elemsize
+ s.divMul = class_to_divmagic[sizeclass]
+ }
+
+ // Initialize mark and allocation structures.
+ s.freeindex = 0
+ s.allocCache = ^uint64(0) // all 1s indicating all free.
+ s.gcmarkBits = newMarkBits(s.nelems)
+ s.allocBits = newAllocBits(s.nelems)
+
+ // It's safe to access h.sweepgen without the heap lock because it's
+ // only ever updated with the world stopped and we run on the
+ // systemstack which blocks a STW transition.
+ atomic.Store(&s.sweepgen, h.sweepgen)
+
+ // Now that the span is filled in, set its state. This
+ // is a publication barrier for the other fields in
+ // the span. While valid pointers into this span
+ // should never be visible until the span is returned,
+ // if the garbage collector finds an invalid pointer,
+ // access to the span may race with initialization of
+ // the span. We resolve this race by atomically
+ // setting the state after the span is fully
+ // initialized, and atomically checking the state in
+ // any situation where a pointer is suspect.
+ s.state.set(mSpanInUse)
+ }
+
+ // Decide if we need to scavenge in response to what we just allocated.
+ // Specifically, we track the maximum amount of memory to scavenge of all
+ // the alternatives below, assuming that the maximum satisfies *all*
+ // conditions we check (e.g. if we need to scavenge X to satisfy the
+ // memory limit and Y to satisfy heap-growth scavenging, and Y > X, then
+ // it's fine to pick Y, because the memory limit is still satisfied).
+ //
+ // It's fine to do this after allocating because we expect any scavenged
+ // pages not to get touched until we return. Simultaneously, it's important
+ // to do this before calling sysUsed because that may commit address space.
+ bytesToScavenge := uintptr(0)
+ if limit := gcController.memoryLimit.Load(); go119MemoryLimitSupport && !gcCPULimiter.limiting() {
+ // Assist with scavenging to maintain the memory limit by the amount
+ // that we expect to page in.
+ inuse := gcController.mappedReady.Load()
+ // Be careful about overflow, especially with uintptrs. Even on 32-bit platforms
+ // someone can set a really big memory limit that isn't maxInt64.
+ if uint64(scav)+inuse > uint64(limit) {
+ bytesToScavenge = uintptr(uint64(scav) + inuse - uint64(limit))
+ }
+ }
+ if goal := scavenge.gcPercentGoal.Load(); goal != ^uint64(0) && growth > 0 {
+ // We just caused a heap growth, so scavenge down what will soon be used.
+ // By scavenging inline we deal with the failure to allocate out of
+ // memory fragments by scavenging the memory fragments that are least
+ // likely to be re-used.
+ //
+ // Only bother with this because we're not using a memory limit. We don't
+ // care about heap growths as long as we're under the memory limit, and the
+ // previous check for scaving already handles that.
+ if retained := heapRetained(); retained+uint64(growth) > goal {
+ // The scavenging algorithm requires the heap lock to be dropped so it
+ // can acquire it only sparingly. This is a potentially expensive operation
+ // so it frees up other goroutines to allocate in the meanwhile. In fact,
+ // they can make use of the growth we just created.
+ todo := growth
+ if overage := uintptr(retained + uint64(growth) - goal); todo > overage {
+ todo = overage
+ }
+ if todo > bytesToScavenge {
+ bytesToScavenge = todo
+ }
+ }
+ }
+ // There are a few very limited cirumstances where we won't have a P here.
+ // It's OK to simply skip scavenging in these cases. Something else will notice
+ // and pick up the tab.
+ if pp != nil && bytesToScavenge > 0 {
+ // Measure how long we spent scavenging and add that measurement to the assist
+ // time so we can track it for the GC CPU limiter.
+ //
+ // Limiter event tracking might be disabled if we end up here
+ // while on a mark worker.
+ start := nanotime()
+ track := pp.limiterEvent.start(limiterEventScavengeAssist, start)
+
+ // Scavenge, but back out if the limiter turns on.
+ h.pages.scavenge(bytesToScavenge, func() bool {
+ return gcCPULimiter.limiting()
+ })
+
+ // Finish up accounting.
+ now := nanotime()
+ if track {
+ pp.limiterEvent.stop(limiterEventScavengeAssist, now)
+ }
+ h.pages.scav.assistTime.Add(now - start)
+ }
+
+ // Commit and account for any scavenged memory that the span now owns.
+ if scav != 0 {
+ // sysUsed all the pages that are actually available
+ // in the span since some of them might be scavenged.
+ sysUsed(unsafe.Pointer(base), nbytes, scav)
+ gcController.heapReleased.add(-int64(scav))
+ }
+ // Update stats.
+ gcController.heapFree.add(-int64(nbytes - scav))
+ if typ == spanAllocHeap {
+ gcController.heapInUse.add(int64(nbytes))
+ }
+ // Update consistent stats.
+ stats := memstats.heapStats.acquire()
+ atomic.Xaddint64(&stats.committed, int64(scav))
+ atomic.Xaddint64(&stats.released, -int64(scav))
+ switch typ {
+ case spanAllocHeap:
+ atomic.Xaddint64(&stats.inHeap, int64(nbytes))
+ case spanAllocStack:
+ atomic.Xaddint64(&stats.inStacks, int64(nbytes))
+ case spanAllocPtrScalarBits:
+ atomic.Xaddint64(&stats.inPtrScalarBits, int64(nbytes))
+ case spanAllocWorkBuf:
+ atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
+ }
+ memstats.heapStats.release()
+
+ // Publish the span in various locations.
+
+ // This is safe to call without the lock held because the slots
+ // related to this span will only ever be read or modified by
+ // this thread until pointers into the span are published (and
+ // we execute a publication barrier at the end of this function
+ // before that happens) or pageInUse is updated.
+ h.setSpans(s.base(), npages, s)
+
+ if !typ.manual() {
+ // Mark in-use span in arena page bitmap.
+ //
+ // This publishes the span to the page sweeper, so
+ // it's imperative that the span be completely initialized
+ // prior to this line.
+ arena, pageIdx, pageMask := pageIndexOf(s.base())
+ atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
+
+ // Update related page sweeper stats.
+ h.pagesInUse.Add(int64(npages))
+ }
+
+ // Make sure the newly allocated span will be observed
+ // by the GC before pointers into the span are published.
+ publicationBarrier()
+
+ return s
+}
+
+// Try to add at least npage pages of memory to the heap,
+// returning how much the heap grew by and whether it worked.
+//
+// h.lock must be held.
+func (h *mheap) grow(npage uintptr) (uintptr, bool) {
+ assertLockHeld(&h.lock)
+
+ // We must grow the heap in whole palloc chunks.
+ // We call sysMap below but note that because we
+ // round up to pallocChunkPages which is on the order
+ // of MiB (generally >= to the huge page size) we
+ // won't be calling it too much.
+ ask := alignUp(npage, pallocChunkPages) * pageSize
+
+ totalGrowth := uintptr(0)
+ // This may overflow because ask could be very large
+ // and is otherwise unrelated to h.curArena.base.
+ end := h.curArena.base + ask
+ nBase := alignUp(end, physPageSize)
+ if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
+ // Not enough room in the current arena. Allocate more
+ // arena space. This may not be contiguous with the
+ // current arena, so we have to request the full ask.
+ av, asize := h.sysAlloc(ask)
+ if av == nil {
+ inUse := gcController.heapFree.load() + gcController.heapReleased.load() + gcController.heapInUse.load()
+ print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n")
+ return 0, false
+ }
+
+ if uintptr(av) == h.curArena.end {
+ // The new space is contiguous with the old
+ // space, so just extend the current space.
+ h.curArena.end = uintptr(av) + asize
+ } else {
+ // The new space is discontiguous. Track what
+ // remains of the current space and switch to
+ // the new space. This should be rare.
+ if size := h.curArena.end - h.curArena.base; size != 0 {
+ // Transition this space from Reserved to Prepared and mark it
+ // as released since we'll be able to start using it after updating
+ // the page allocator and releasing the lock at any time.
+ sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased)
+ // Update stats.
+ stats := memstats.heapStats.acquire()
+ atomic.Xaddint64(&stats.released, int64(size))
+ memstats.heapStats.release()
+ // Update the page allocator's structures to make this
+ // space ready for allocation.
+ h.pages.grow(h.curArena.base, size)
+ totalGrowth += size
+ }
+ // Switch to the new space.
+ h.curArena.base = uintptr(av)
+ h.curArena.end = uintptr(av) + asize
+ }
+
+ // Recalculate nBase.
+ // We know this won't overflow, because sysAlloc returned
+ // a valid region starting at h.curArena.base which is at
+ // least ask bytes in size.
+ nBase = alignUp(h.curArena.base+ask, physPageSize)
+ }
+
+ // Grow into the current arena.
+ v := h.curArena.base
+ h.curArena.base = nBase
+
+ // Transition the space we're going to use from Reserved to Prepared.
+ //
+ // The allocation is always aligned to the heap arena
+ // size which is always > physPageSize, so its safe to
+ // just add directly to heapReleased.
+ sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased)
+
+ // The memory just allocated counts as both released
+ // and idle, even though it's not yet backed by spans.
+ stats := memstats.heapStats.acquire()
+ atomic.Xaddint64(&stats.released, int64(nBase-v))
+ memstats.heapStats.release()
+
+ // Update the page allocator's structures to make this
+ // space ready for allocation.
+ h.pages.grow(v, nBase-v)
+ totalGrowth += nBase - v
+ return totalGrowth, true
+}
+
+// Free the span back into the heap.
+func (h *mheap) freeSpan(s *mspan) {
+ systemstack(func() {
+ lock(&h.lock)
+ if msanenabled {
+ // Tell msan that this entire span is no longer in use.
+ base := unsafe.Pointer(s.base())
+ bytes := s.npages << _PageShift
+ msanfree(base, bytes)
+ }
+ if asanenabled {
+ // Tell asan that this entire span is no longer in use.
+ base := unsafe.Pointer(s.base())
+ bytes := s.npages << _PageShift
+ asanpoison(base, bytes)
+ }
+ h.freeSpanLocked(s, spanAllocHeap)
+ unlock(&h.lock)
+ })
+}
+
+// freeManual frees a manually-managed span returned by allocManual.
+// typ must be the same as the spanAllocType passed to the allocManual that
+// allocated s.
+//
+// This must only be called when gcphase == _GCoff. See mSpanState for
+// an explanation.
+//
+// freeManual must be called on the system stack because it acquires
+// the heap lock. See mheap for details.
+//
+//go:systemstack
+func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
+ s.needzero = 1
+ lock(&h.lock)
+ h.freeSpanLocked(s, typ)
+ unlock(&h.lock)
+}
+
+func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
+ assertLockHeld(&h.lock)
+
+ switch s.state.get() {
+ case mSpanManual:
+ if s.allocCount != 0 {
+ throw("mheap.freeSpanLocked - invalid stack free")
+ }
+ case mSpanInUse:
+ if s.allocCount != 0 || s.sweepgen != h.sweepgen {
+ print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
+ throw("mheap.freeSpanLocked - invalid free")
+ }
+ h.pagesInUse.Add(-int64(s.npages))
+
+ // Clear in-use bit in arena page bitmap.
+ arena, pageIdx, pageMask := pageIndexOf(s.base())
+ atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
+ default:
+ throw("mheap.freeSpanLocked - invalid span state")
+ }
+
+ // Update stats.
+ //
+ // Mirrors the code in allocSpan.
+ nbytes := s.npages * pageSize
+ gcController.heapFree.add(int64(nbytes))
+ if typ == spanAllocHeap {
+ gcController.heapInUse.add(-int64(nbytes))
+ }
+ // Update consistent stats.
+ stats := memstats.heapStats.acquire()
+ switch typ {
+ case spanAllocHeap:
+ atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
+ case spanAllocStack:
+ atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
+ case spanAllocPtrScalarBits:
+ atomic.Xaddint64(&stats.inPtrScalarBits, -int64(nbytes))
+ case spanAllocWorkBuf:
+ atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
+ }
+ memstats.heapStats.release()
+
+ // Mark the space as free.
+ h.pages.free(s.base(), s.npages, false)
+
+ // Free the span structure. We no longer have a use for it.
+ s.state.set(mSpanDead)
+ h.freeMSpanLocked(s)
+}
+
+// scavengeAll acquires the heap lock (blocking any additional
+// manipulation of the page allocator) and iterates over the whole
+// heap, scavenging every free page available.
+func (h *mheap) scavengeAll() {
+ // Disallow malloc or panic while holding the heap lock. We do
+ // this here because this is a non-mallocgc entry-point to
+ // the mheap API.
+ gp := getg()
+ gp.m.mallocing++
+
+ released := h.pages.scavenge(^uintptr(0), nil)
+
+ gp.m.mallocing--
+
+ if debug.scavtrace > 0 {
+ printScavTrace(released, true)
+ }
+}
+
+//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
+func runtime_debug_freeOSMemory() {
+ GC()
+ systemstack(func() { mheap_.scavengeAll() })
+}
+
+// Initialize a new span with the given start and npages.
+func (span *mspan) init(base uintptr, npages uintptr) {
+ // span is *not* zeroed.
+ span.next = nil
+ span.prev = nil
+ span.list = nil
+ span.startAddr = base
+ span.npages = npages
+ span.allocCount = 0
+ span.spanclass = 0
+ span.elemsize = 0
+ span.speciallock.key = 0
+ span.specials = nil
+ span.needzero = 0
+ span.freeindex = 0
+ span.allocBits = nil
+ span.gcmarkBits = nil
+ span.state.set(mSpanDead)
+ lockInit(&span.speciallock, lockRankMspanSpecial)
+}
+
+func (span *mspan) inList() bool {
+ return span.list != nil
+}
+
+// Initialize an empty doubly-linked list.
+func (list *mSpanList) init() {
+ list.first = nil
+ list.last = nil
+}
+
+func (list *mSpanList) remove(span *mspan) {
+ if span.list != list {
+ print("runtime: failed mSpanList.remove span.npages=", span.npages,
+ " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
+ throw("mSpanList.remove")
+ }
+ if list.first == span {
+ list.first = span.next
+ } else {
+ span.prev.next = span.next
+ }
+ if list.last == span {
+ list.last = span.prev
+ } else {
+ span.next.prev = span.prev
+ }
+ span.next = nil
+ span.prev = nil
+ span.list = nil
+}
+
+func (list *mSpanList) isEmpty() bool {
+ return list.first == nil
+}
+
+func (list *mSpanList) insert(span *mspan) {
+ if span.next != nil || span.prev != nil || span.list != nil {
+ println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list)
+ throw("mSpanList.insert")
+ }
+ span.next = list.first
+ if list.first != nil {
+ // The list contains at least one span; link it in.
+ // The last span in the list doesn't change.
+ list.first.prev = span
+ } else {
+ // The list contains no spans, so this is also the last span.
+ list.last = span
+ }
+ list.first = span
+ span.list = list
+}
+
+func (list *mSpanList) insertBack(span *mspan) {
+ if span.next != nil || span.prev != nil || span.list != nil {
+ println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list)
+ throw("mSpanList.insertBack")
+ }
+ span.prev = list.last
+ if list.last != nil {
+ // The list contains at least one span.
+ list.last.next = span
+ } else {
+ // The list contains no spans, so this is also the first span.
+ list.first = span
+ }
+ list.last = span
+ span.list = list
+}
+
+// takeAll removes all spans from other and inserts them at the front
+// of list.
+func (list *mSpanList) takeAll(other *mSpanList) {
+ if other.isEmpty() {
+ return
+ }
+
+ // Reparent everything in other to list.
+ for s := other.first; s != nil; s = s.next {
+ s.list = list
+ }
+
+ // Concatenate the lists.
+ if list.isEmpty() {
+ *list = *other
+ } else {
+ // Neither list is empty. Put other before list.
+ other.last.next = list.first
+ list.first.prev = other.last
+ list.first = other.first
+ }
+
+ other.first, other.last = nil, nil
+}
+
+const (
+ _KindSpecialFinalizer = 1
+ _KindSpecialProfile = 2
+ // _KindSpecialReachable is a special used for tracking
+ // reachability during testing.
+ _KindSpecialReachable = 3
+ // Note: The finalizer special must be first because if we're freeing
+ // an object, a finalizer special will cause the freeing operation
+ // to abort, and we want to keep the other special records around
+ // if that happens.
+)
+
+//go:notinheap
+type special struct {
+ next *special // linked list in span
+ offset uint16 // span offset of object
+ kind byte // kind of special
+}
+
+// spanHasSpecials marks a span as having specials in the arena bitmap.
+func spanHasSpecials(s *mspan) {
+ arenaPage := (s.base() / pageSize) % pagesPerArena
+ ai := arenaIndex(s.base())
+ ha := mheap_.arenas[ai.l1()][ai.l2()]
+ atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8))
+}
+
+// spanHasNoSpecials marks a span as having no specials in the arena bitmap.
+func spanHasNoSpecials(s *mspan) {
+ arenaPage := (s.base() / pageSize) % pagesPerArena
+ ai := arenaIndex(s.base())
+ ha := mheap_.arenas[ai.l1()][ai.l2()]
+ atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8)))
+}
+
+// Adds the special record s to the list of special records for
+// the object p. All fields of s should be filled in except for
+// offset & next, which this routine will fill in.
+// Returns true if the special was successfully added, false otherwise.
+// (The add will fail only if a record with the same p and s->kind
+// already exists.)
+func addspecial(p unsafe.Pointer, s *special) bool {
+ span := spanOfHeap(uintptr(p))
+ if span == nil {
+ throw("addspecial on invalid pointer")
+ }
+
+ // Ensure that the span is swept.
+ // Sweeping accesses the specials list w/o locks, so we have
+ // to synchronize with it. And it's just much safer.
+ mp := acquirem()
+ span.ensureSwept()
+
+ offset := uintptr(p) - span.base()
+ kind := s.kind
+
+ lock(&span.speciallock)
+
+ // Find splice point, check for existing record.
+ t := &span.specials
+ for {
+ x := *t
+ if x == nil {
+ break
+ }
+ if offset == uintptr(x.offset) && kind == x.kind {
+ unlock(&span.speciallock)
+ releasem(mp)
+ return false // already exists
+ }
+ if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
+ break
+ }
+ t = &x.next
+ }
+
+ // Splice in record, fill in offset.
+ s.offset = uint16(offset)
+ s.next = *t
+ *t = s
+ spanHasSpecials(span)
+ unlock(&span.speciallock)
+ releasem(mp)
+
+ return true
+}
+
+// Removes the Special record of the given kind for the object p.
+// Returns the record if the record existed, nil otherwise.
+// The caller must FixAlloc_Free the result.
+func removespecial(p unsafe.Pointer, kind uint8) *special {
+ span := spanOfHeap(uintptr(p))
+ if span == nil {
+ throw("removespecial on invalid pointer")
+ }
+
+ // Ensure that the span is swept.
+ // Sweeping accesses the specials list w/o locks, so we have
+ // to synchronize with it. And it's just much safer.
+ mp := acquirem()
+ span.ensureSwept()
+
+ offset := uintptr(p) - span.base()
+
+ var result *special
+ lock(&span.speciallock)
+ t := &span.specials
+ for {
+ s := *t
+ if s == nil {
+ break
+ }
+ // This function is used for finalizers only, so we don't check for
+ // "interior" specials (p must be exactly equal to s->offset).
+ if offset == uintptr(s.offset) && kind == s.kind {
+ *t = s.next
+ result = s
+ break
+ }
+ t = &s.next
+ }
+ if span.specials == nil {
+ spanHasNoSpecials(span)
+ }
+ unlock(&span.speciallock)
+ releasem(mp)
+ return result
+}
+
+// The described object has a finalizer set for it.
+//
+// specialfinalizer is allocated from non-GC'd memory, so any heap
+// pointers must be specially handled.
+//
+//go:notinheap
+type specialfinalizer struct {
+ special special
+ fn *funcval // May be a heap pointer.
+ nret uintptr
+ fint *_type // May be a heap pointer, but always live.
+ ot *ptrtype // May be a heap pointer, but always live.
+}
+
+// Adds a finalizer to the object p. Returns true if it succeeded.
+func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
+ lock(&mheap_.speciallock)
+ s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialFinalizer
+ s.fn = f
+ s.nret = nret
+ s.fint = fint
+ s.ot = ot
+ if addspecial(p, &s.special) {
+ // This is responsible for maintaining the same
+ // GC-related invariants as markrootSpans in any
+ // situation where it's possible that markrootSpans
+ // has already run but mark termination hasn't yet.
+ if gcphase != _GCoff {
+ base, _, _ := findObject(uintptr(p), 0, 0)
+ mp := acquirem()
+ gcw := &mp.p.ptr().gcw
+ // Mark everything reachable from the object
+ // so it's retained for the finalizer.
+ scanobject(base, gcw)
+ // Mark the finalizer itself, since the
+ // special isn't part of the GC'd heap.
+ scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
+ releasem(mp)
+ }
+ return true
+ }
+
+ // There was an old finalizer
+ lock(&mheap_.speciallock)
+ mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
+ unlock(&mheap_.speciallock)
+ return false
+}
+
+// Removes the finalizer (if any) from the object p.
+func removefinalizer(p unsafe.Pointer) {
+ s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
+ if s == nil {
+ return // there wasn't a finalizer to remove
+ }
+ lock(&mheap_.speciallock)
+ mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
+ unlock(&mheap_.speciallock)
+}
+
+// The described object is being heap profiled.
+//
+//go:notinheap
+type specialprofile struct {
+ special special
+ b *bucket
+}
+
+// Set the heap profile bucket associated with addr to b.
+func setprofilebucket(p unsafe.Pointer, b *bucket) {
+ lock(&mheap_.speciallock)
+ s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialProfile
+ s.b = b
+ if !addspecial(p, &s.special) {
+ throw("setprofilebucket: profile already set")
+ }
+}
+
+// specialReachable tracks whether an object is reachable on the next
+// GC cycle. This is used by testing.
+type specialReachable struct {
+ special special
+ done bool
+ reachable bool
+}
+
+// specialsIter helps iterate over specials lists.
+type specialsIter struct {
+ pprev **special
+ s *special
+}
+
+func newSpecialsIter(span *mspan) specialsIter {
+ return specialsIter{&span.specials, span.specials}
+}
+
+func (i *specialsIter) valid() bool {
+ return i.s != nil
+}
+
+func (i *specialsIter) next() {
+ i.pprev = &i.s.next
+ i.s = *i.pprev
+}
+
+// unlinkAndNext removes the current special from the list and moves
+// the iterator to the next special. It returns the unlinked special.
+func (i *specialsIter) unlinkAndNext() *special {
+ cur := i.s
+ i.s = cur.next
+ *i.pprev = i.s
+ return cur
+}
+
+// freeSpecial performs any cleanup on special s and deallocates it.
+// s must already be unlinked from the specials list.
+func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
+ switch s.kind {
+ case _KindSpecialFinalizer:
+ sf := (*specialfinalizer)(unsafe.Pointer(s))
+ queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
+ lock(&mheap_.speciallock)
+ mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
+ unlock(&mheap_.speciallock)
+ case _KindSpecialProfile:
+ sp := (*specialprofile)(unsafe.Pointer(s))
+ mProf_Free(sp.b, size)
+ lock(&mheap_.speciallock)
+ mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
+ unlock(&mheap_.speciallock)
+ case _KindSpecialReachable:
+ sp := (*specialReachable)(unsafe.Pointer(s))
+ sp.done = true
+ // The creator frees these.
+ default:
+ throw("bad special kind")
+ panic("not reached")
+ }
+}
+
+// gcBits is an alloc/mark bitmap. This is always used as *gcBits.
+//
+//go:notinheap
+type gcBits uint8
+
+// bytep returns a pointer to the n'th byte of b.
+func (b *gcBits) bytep(n uintptr) *uint8 {
+ return addb((*uint8)(b), n)
+}
+
+// bitp returns a pointer to the byte containing bit n and a mask for
+// selecting that bit from *bytep.
+func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
+ return b.bytep(n / 8), 1 << (n % 8)
+}
+
+const gcBitsChunkBytes = uintptr(64 << 10)
+const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
+
+type gcBitsHeader struct {
+ free uintptr // free is the index into bits of the next free byte.
+ next uintptr // *gcBits triggers recursive type bug. (issue 14620)
+}
+
+//go:notinheap
+type gcBitsArena struct {
+ // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
+ free uintptr // free is the index into bits of the next free byte; read/write atomically
+ next *gcBitsArena
+ bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
+}
+
+var gcBitsArenas struct {
+ lock mutex
+ free *gcBitsArena
+ next *gcBitsArena // Read atomically. Write atomically under lock.
+ current *gcBitsArena
+ previous *gcBitsArena
+}
+
+// tryAlloc allocates from b or returns nil if b does not have enough room.
+// This is safe to call concurrently.
+func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
+ if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
+ return nil
+ }
+ // Try to allocate from this block.
+ end := atomic.Xadduintptr(&b.free, bytes)
+ if end > uintptr(len(b.bits)) {
+ return nil
+ }
+ // There was enough room.
+ start := end - bytes
+ return &b.bits[start]
+}
+
+// newMarkBits returns a pointer to 8 byte aligned bytes
+// to be used for a span's mark bits.
+func newMarkBits(nelems uintptr) *gcBits {
+ blocksNeeded := uintptr((nelems + 63) / 64)
+ bytesNeeded := blocksNeeded * 8
+
+ // Try directly allocating from the current head arena.
+ head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
+ if p := head.tryAlloc(bytesNeeded); p != nil {
+ return p
+ }
+
+ // There's not enough room in the head arena. We may need to
+ // allocate a new arena.
+ lock(&gcBitsArenas.lock)
+ // Try the head arena again, since it may have changed. Now
+ // that we hold the lock, the list head can't change, but its
+ // free position still can.
+ if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
+ unlock(&gcBitsArenas.lock)
+ return p
+ }
+
+ // Allocate a new arena. This may temporarily drop the lock.
+ fresh := newArenaMayUnlock()
+ // If newArenaMayUnlock dropped the lock, another thread may
+ // have put a fresh arena on the "next" list. Try allocating
+ // from next again.
+ if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
+ // Put fresh back on the free list.
+ // TODO: Mark it "already zeroed"
+ fresh.next = gcBitsArenas.free
+ gcBitsArenas.free = fresh
+ unlock(&gcBitsArenas.lock)
+ return p
+ }
+
+ // Allocate from the fresh arena. We haven't linked it in yet, so
+ // this cannot race and is guaranteed to succeed.
+ p := fresh.tryAlloc(bytesNeeded)
+ if p == nil {
+ throw("markBits overflow")
+ }
+
+ // Add the fresh arena to the "next" list.
+ fresh.next = gcBitsArenas.next
+ atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
+
+ unlock(&gcBitsArenas.lock)
+ return p
+}
+
+// newAllocBits returns a pointer to 8 byte aligned bytes
+// to be used for this span's alloc bits.
+// newAllocBits is used to provide newly initialized spans
+// allocation bits. For spans not being initialized the
+// mark bits are repurposed as allocation bits when
+// the span is swept.
+func newAllocBits(nelems uintptr) *gcBits {
+ return newMarkBits(nelems)
+}
+
+// nextMarkBitArenaEpoch establishes a new epoch for the arenas
+// holding the mark bits. The arenas are named relative to the
+// current GC cycle which is demarcated by the call to finishweep_m.
+//
+// All current spans have been swept.
+// During that sweep each span allocated room for its gcmarkBits in
+// gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
+// where the GC will mark objects and after each span is swept these bits
+// will be used to allocate objects.
+// gcBitsArenas.current becomes gcBitsArenas.previous where the span's
+// gcAllocBits live until all the spans have been swept during this GC cycle.
+// The span's sweep extinguishes all the references to gcBitsArenas.previous
+// by pointing gcAllocBits into the gcBitsArenas.current.
+// The gcBitsArenas.previous is released to the gcBitsArenas.free list.
+func nextMarkBitArenaEpoch() {
+ lock(&gcBitsArenas.lock)
+ if gcBitsArenas.previous != nil {
+ if gcBitsArenas.free == nil {
+ gcBitsArenas.free = gcBitsArenas.previous
+ } else {
+ // Find end of previous arenas.
+ last := gcBitsArenas.previous
+ for last = gcBitsArenas.previous; last.next != nil; last = last.next {
+ }
+ last.next = gcBitsArenas.free
+ gcBitsArenas.free = gcBitsArenas.previous
+ }
+ }
+ gcBitsArenas.previous = gcBitsArenas.current
+ gcBitsArenas.current = gcBitsArenas.next
+ atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
+ unlock(&gcBitsArenas.lock)
+}
+
+// newArenaMayUnlock allocates and zeroes a gcBits arena.
+// The caller must hold gcBitsArena.lock. This may temporarily release it.
+func newArenaMayUnlock() *gcBitsArena {
+ var result *gcBitsArena
+ if gcBitsArenas.free == nil {
+ unlock(&gcBitsArenas.lock)
+ result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
+ if result == nil {
+ throw("runtime: cannot allocate memory")
+ }
+ lock(&gcBitsArenas.lock)
+ } else {
+ result = gcBitsArenas.free
+ gcBitsArenas.free = gcBitsArenas.free.next
+ memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
+ }
+ result.next = nil
+ // If result.bits is not 8 byte aligned adjust index so
+ // that &result.bits[result.free] is 8 byte aligned.
+ if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
+ result.free = 0
+ } else {
+ result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
+ }
+ return result
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mpagealloc.go b/contrib/go/_std_1.19/src/runtime/mpagealloc.go
new file mode 100644
index 0000000000..5de25cfe00
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mpagealloc.go
@@ -0,0 +1,1013 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page allocator.
+//
+// The page allocator manages mapped pages (defined by pageSize, NOT
+// physPageSize) for allocation and re-use. It is embedded into mheap.
+//
+// Pages are managed using a bitmap that is sharded into chunks.
+// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
+// process's address space. Chunks are managed in a sparse-array-style structure
+// similar to mheap.arenas, since the bitmap may be large on some systems.
+//
+// The bitmap is efficiently searched by using a radix tree in combination
+// with fast bit-wise intrinsics. Allocation is performed using an address-ordered
+// first-fit approach.
+//
+// Each entry in the radix tree is a summary that describes three properties of
+// a particular region of the address space: the number of contiguous free pages
+// at the start and end of the region it represents, and the maximum number of
+// contiguous free pages found anywhere in that region.
+//
+// Each level of the radix tree is stored as one contiguous array, which represents
+// a different granularity of subdivision of the processes' address space. Thus, this
+// radix tree is actually implicit in these large arrays, as opposed to having explicit
+// dynamically-allocated pointer-based node structures. Naturally, these arrays may be
+// quite large for system with large address spaces, so in these cases they are mapped
+// into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk.
+//
+// The root level (referred to as L0 and index 0 in pageAlloc.summary) has each
+// summary represent the largest section of address space (16 GiB on 64-bit systems),
+// with each subsequent level representing successively smaller subsections until we
+// reach the finest granularity at the leaves, a chunk.
+//
+// More specifically, each summary in each level (except for leaf summaries)
+// represents some number of entries in the following level. For example, each
+// summary in the root level may represent a 16 GiB region of address space,
+// and in the next level there could be 8 corresponding entries which represent 2
+// GiB subsections of that 16 GiB region, each of which could correspond to 8
+// entries in the next level which each represent 256 MiB regions, and so on.
+//
+// Thus, this design only scales to heaps so large, but can always be extended to
+// larger heaps by simply adding levels to the radix tree, which mostly costs
+// additional virtual address space. The choice of managing large arrays also means
+// that a large amount of virtual address space may be reserved by the runtime.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+const (
+ // The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
+ // in the bitmap at once.
+ pallocChunkPages = 1 << logPallocChunkPages
+ pallocChunkBytes = pallocChunkPages * pageSize
+ logPallocChunkPages = 9
+ logPallocChunkBytes = logPallocChunkPages + pageShift
+
+ // The number of radix bits for each level.
+ //
+ // The value of 3 is chosen such that the block of summaries we need to scan at
+ // each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is
+ // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree
+ // levels perfectly into the 21-bit pallocBits summary field at the root level.
+ //
+ // The following equation explains how each of the constants relate:
+ // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits
+ //
+ // summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.
+ summaryLevelBits = 3
+ summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
+
+ // pallocChunksL2Bits is the number of bits of the chunk index number
+ // covered by the second level of the chunks map.
+ //
+ // See (*pageAlloc).chunks for more details. Update the documentation
+ // there should this change.
+ pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
+ pallocChunksL1Shift = pallocChunksL2Bits
+)
+
+// maxSearchAddr returns the maximum searchAddr value, which indicates
+// that the heap has no free space.
+//
+// This function exists just to make it clear that this is the maximum address
+// for the page allocator's search space. See maxOffAddr for details.
+//
+// It's a function (rather than a variable) because it needs to be
+// usable before package runtime's dynamic initialization is complete.
+// See #51913 for details.
+func maxSearchAddr() offAddr { return maxOffAddr }
+
+// Global chunk index.
+//
+// Represents an index into the leaf level of the radix tree.
+// Similar to arenaIndex, except instead of arenas, it divides the address
+// space into chunks.
+type chunkIdx uint
+
+// chunkIndex returns the global index of the palloc chunk containing the
+// pointer p.
+func chunkIndex(p uintptr) chunkIdx {
+ return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes)
+}
+
+// chunkIndex returns the base address of the palloc chunk at index ci.
+func chunkBase(ci chunkIdx) uintptr {
+ return uintptr(ci)*pallocChunkBytes + arenaBaseOffset
+}
+
+// chunkPageIndex computes the index of the page that contains p,
+// relative to the chunk which contains p.
+func chunkPageIndex(p uintptr) uint {
+ return uint(p % pallocChunkBytes / pageSize)
+}
+
+// l1 returns the index into the first level of (*pageAlloc).chunks.
+func (i chunkIdx) l1() uint {
+ if pallocChunksL1Bits == 0 {
+ // Let the compiler optimize this away if there's no
+ // L1 map.
+ return 0
+ } else {
+ return uint(i) >> pallocChunksL1Shift
+ }
+}
+
+// l2 returns the index into the second level of (*pageAlloc).chunks.
+func (i chunkIdx) l2() uint {
+ if pallocChunksL1Bits == 0 {
+ return uint(i)
+ } else {
+ return uint(i) & (1<<pallocChunksL2Bits - 1)
+ }
+}
+
+// offAddrToLevelIndex converts an address in the offset address space
+// to the index into summary[level] containing addr.
+func offAddrToLevelIndex(level int, addr offAddr) int {
+ return int((addr.a - arenaBaseOffset) >> levelShift[level])
+}
+
+// levelIndexToOffAddr converts an index into summary[level] into
+// the corresponding address in the offset address space.
+func levelIndexToOffAddr(level, idx int) offAddr {
+ return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset}
+}
+
+// addrsToSummaryRange converts base and limit pointers into a range
+// of entries for the given summary level.
+//
+// The returned range is inclusive on the lower bound and exclusive on
+// the upper bound.
+func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) {
+ // This is slightly more nuanced than just a shift for the exclusive
+ // upper-bound. Note that the exclusive upper bound may be within a
+ // summary at this level, meaning if we just do the obvious computation
+ // hi will end up being an inclusive upper bound. Unfortunately, just
+ // adding 1 to that is too broad since we might be on the very edge
+ // of a summary's max page count boundary for this level
+ // (1 << levelLogPages[level]). So, make limit an inclusive upper bound
+ // then shift, then add 1, so we get an exclusive upper bound at the end.
+ lo = int((base - arenaBaseOffset) >> levelShift[level])
+ hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1
+ return
+}
+
+// blockAlignSummaryRange aligns indices into the given level to that
+// level's block width (1 << levelBits[level]). It assumes lo is inclusive
+// and hi is exclusive, and so aligns them down and up respectively.
+func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
+ e := uintptr(1) << levelBits[level]
+ return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
+}
+
+type pageAlloc struct {
+ // Radix tree of summaries.
+ //
+ // Each slice's cap represents the whole memory reservation.
+ // Each slice's len reflects the allocator's maximum known
+ // mapped heap address for that level.
+ //
+ // The backing store of each summary level is reserved in init
+ // and may or may not be committed in grow (small address spaces
+ // may commit all the memory in init).
+ //
+ // The purpose of keeping len <= cap is to enforce bounds checks
+ // on the top end of the slice so that instead of an unknown
+ // runtime segmentation fault, we get a much friendlier out-of-bounds
+ // error.
+ //
+ // To iterate over a summary level, use inUse to determine which ranges
+ // are currently available. Otherwise one might try to access
+ // memory which is only Reserved which may result in a hard fault.
+ //
+ // We may still get segmentation faults < len since some of that
+ // memory may not be committed yet.
+ summary [summaryLevels][]pallocSum
+
+ // chunks is a slice of bitmap chunks.
+ //
+ // The total size of chunks is quite large on most 64-bit platforms
+ // (O(GiB) or more) if flattened, so rather than making one large mapping
+ // (which has problems on some platforms, even when PROT_NONE) we use a
+ // two-level sparse array approach similar to the arena index in mheap.
+ //
+ // To find the chunk containing a memory address `a`, do:
+ // chunkOf(chunkIndex(a))
+ //
+ // Below is a table describing the configuration for chunks for various
+ // heapAddrBits supported by the runtime.
+ //
+ // heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
+ // ------------------------------------------------
+ // 32 | 0 | 10 | 128 KiB
+ // 33 (iOS) | 0 | 11 | 256 KiB
+ // 48 | 13 | 13 | 1 MiB
+ //
+ // There's no reason to use the L1 part of chunks on 32-bit, the
+ // address space is small so the L2 is small. For platforms with a
+ // 48-bit address space, we pick the L1 such that the L2 is 1 MiB
+ // in size, which is a good balance between low granularity without
+ // making the impact on BSS too high (note the L1 is stored directly
+ // in pageAlloc).
+ //
+ // To iterate over the bitmap, use inUse to determine which ranges
+ // are currently available. Otherwise one might iterate over unused
+ // ranges.
+ //
+ // Protected by mheapLock.
+ //
+ // TODO(mknyszek): Consider changing the definition of the bitmap
+ // such that 1 means free and 0 means in-use so that summaries and
+ // the bitmaps align better on zero-values.
+ chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
+
+ // The address to start an allocation search with. It must never
+ // point to any memory that is not contained in inUse, i.e.
+ // inUse.contains(searchAddr.addr()) must always be true. The one
+ // exception to this rule is that it may take on the value of
+ // maxOffAddr to indicate that the heap is exhausted.
+ //
+ // We guarantee that all valid heap addresses below this value
+ // are allocated and not worth searching.
+ searchAddr offAddr
+
+ // start and end represent the chunk indices
+ // which pageAlloc knows about. It assumes
+ // chunks in the range [start, end) are
+ // currently ready to use.
+ start, end chunkIdx
+
+ // inUse is a slice of ranges of address space which are
+ // known by the page allocator to be currently in-use (passed
+ // to grow).
+ //
+ // This field is currently unused on 32-bit architectures but
+ // is harmless to track. We care much more about having a
+ // contiguous heap in these cases and take additional measures
+ // to ensure that, so in nearly all cases this should have just
+ // 1 element.
+ //
+ // All access is protected by the mheapLock.
+ inUse addrRanges
+
+ _ uint32 // Align scav so it's easier to reason about alignment within scav.
+
+ // scav stores the scavenger state.
+ scav struct {
+ // index is an efficient index of chunks that have pages available to
+ // scavenge.
+ index scavengeIndex
+
+ // released is the amount of memory released this generation.
+ //
+ // Updated atomically.
+ released uintptr
+
+ _ uint32 // Align assistTime for atomics on 32-bit platforms.
+
+ // scavengeAssistTime is the time spent scavenging in the last GC cycle.
+ //
+ // This is reset once a GC cycle ends.
+ assistTime atomic.Int64
+ }
+
+ // mheap_.lock. This level of indirection makes it possible
+ // to test pageAlloc indepedently of the runtime allocator.
+ mheapLock *mutex
+
+ // sysStat is the runtime memstat to update when new system
+ // memory is committed by the pageAlloc for allocation metadata.
+ sysStat *sysMemStat
+
+ // summaryMappedReady is the number of bytes mapped in the Ready state
+ // in the summary structure. Used only for testing currently.
+ //
+ // Protected by mheapLock.
+ summaryMappedReady uintptr
+
+ // Whether or not this struct is being used in tests.
+ test bool
+}
+
+func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) {
+ if levelLogPages[0] > logMaxPackedValue {
+ // We can't represent 1<<levelLogPages[0] pages, the maximum number
+ // of pages we need to represent at the root level, in a summary, which
+ // is a big problem. Throw.
+ print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n")
+ print("runtime: summary max pages = ", maxPackedValue, "\n")
+ throw("root level max pages doesn't fit in summary")
+ }
+ p.sysStat = sysStat
+
+ // Initialize p.inUse.
+ p.inUse.init(sysStat)
+
+ // System-dependent initialization.
+ p.sysInit()
+
+ // Start with the searchAddr in a state indicating there's no free memory.
+ p.searchAddr = maxSearchAddr()
+
+ // Set the mheapLock.
+ p.mheapLock = mheapLock
+}
+
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := p.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
+// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
+func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
+ return &p.chunks[ci.l1()][ci.l2()]
+}
+
+// grow sets up the metadata for the address range [base, base+size).
+// It may allocate metadata, in which case *p.sysStat will be updated.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) grow(base, size uintptr) {
+ assertLockHeld(p.mheapLock)
+
+ // Round up to chunks, since we can't deal with increments smaller
+ // than chunks. Also, sysGrow expects aligned values.
+ limit := alignUp(base+size, pallocChunkBytes)
+ base = alignDown(base, pallocChunkBytes)
+
+ // Grow the summary levels in a system-dependent manner.
+ // We just update a bunch of additional metadata here.
+ p.sysGrow(base, limit)
+
+ // Update p.start and p.end.
+ // If no growth happened yet, start == 0. This is generally
+ // safe since the zero page is unmapped.
+ firstGrowth := p.start == 0
+ start, end := chunkIndex(base), chunkIndex(limit)
+ if firstGrowth || start < p.start {
+ p.start = start
+ }
+ if end > p.end {
+ p.end = end
+ }
+ // Note that [base, limit) will never overlap with any existing
+ // range inUse because grow only ever adds never-used memory
+ // regions to the page allocator.
+ p.inUse.add(makeAddrRange(base, limit))
+
+ // A grow operation is a lot like a free operation, so if our
+ // chunk ends up below p.searchAddr, update p.searchAddr to the
+ // new address, just like in free.
+ if b := (offAddr{base}); b.lessThan(p.searchAddr) {
+ p.searchAddr = b
+ }
+
+ // Add entries into chunks, which is sparse, if needed. Then,
+ // initialize the bitmap.
+ //
+ // Newly-grown memory is always considered scavenged.
+ // Set all the bits in the scavenged bitmaps high.
+ for c := chunkIndex(base); c < chunkIndex(limit); c++ {
+ if p.chunks[c.l1()] == nil {
+ // Create the necessary l2 entry.
+ //
+ // Store it atomically to avoid races with readers which
+ // don't acquire the heap lock.
+ r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat)
+ if r == nil {
+ throw("pageAlloc: out of memory")
+ }
+ atomic.StorepNoWB(unsafe.Pointer(&p.chunks[c.l1()]), r)
+ }
+ p.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
+ }
+
+ // Update summaries accordingly. The grow acts like a free, so
+ // we need to ensure this newly-free memory is visible in the
+ // summaries.
+ p.update(base, size/pageSize, true, false)
+}
+
+// update updates heap metadata. It must be called each time the bitmap
+// is updated.
+//
+// If contig is true, update does some optimizations assuming that there was
+// a contiguous allocation or free between addr and addr+npages. alloc indicates
+// whether the operation performed was an allocation or a free.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
+ assertLockHeld(p.mheapLock)
+
+ // base, limit, start, and end are inclusive.
+ limit := base + npages*pageSize - 1
+ sc, ec := chunkIndex(base), chunkIndex(limit)
+
+ // Handle updating the lowest level first.
+ if sc == ec {
+ // Fast path: the allocation doesn't span more than one chunk,
+ // so update this one and if the summary didn't change, return.
+ x := p.summary[len(p.summary)-1][sc]
+ y := p.chunkOf(sc).summarize()
+ if x == y {
+ return
+ }
+ p.summary[len(p.summary)-1][sc] = y
+ } else if contig {
+ // Slow contiguous path: the allocation spans more than one chunk
+ // and at least one summary is guaranteed to change.
+ summary := p.summary[len(p.summary)-1]
+
+ // Update the summary for chunk sc.
+ summary[sc] = p.chunkOf(sc).summarize()
+
+ // Update the summaries for chunks in between, which are
+ // either totally allocated or freed.
+ whole := p.summary[len(p.summary)-1][sc+1 : ec]
+ if alloc {
+ // Should optimize into a memclr.
+ for i := range whole {
+ whole[i] = 0
+ }
+ } else {
+ for i := range whole {
+ whole[i] = freeChunkSum
+ }
+ }
+
+ // Update the summary for chunk ec.
+ summary[ec] = p.chunkOf(ec).summarize()
+ } else {
+ // Slow general path: the allocation spans more than one chunk
+ // and at least one summary is guaranteed to change.
+ //
+ // We can't assume a contiguous allocation happened, so walk over
+ // every chunk in the range and manually recompute the summary.
+ summary := p.summary[len(p.summary)-1]
+ for c := sc; c <= ec; c++ {
+ summary[c] = p.chunkOf(c).summarize()
+ }
+ }
+
+ // Walk up the radix tree and update the summaries appropriately.
+ changed := true
+ for l := len(p.summary) - 2; l >= 0 && changed; l-- {
+ // Update summaries at level l from summaries at level l+1.
+ changed = false
+
+ // "Constants" for the previous level which we
+ // need to compute the summary from that level.
+ logEntriesPerBlock := levelBits[l+1]
+ logMaxPages := levelLogPages[l+1]
+
+ // lo and hi describe all the parts of the level we need to look at.
+ lo, hi := addrsToSummaryRange(l, base, limit+1)
+
+ // Iterate over each block, updating the corresponding summary in the less-granular level.
+ for i := lo; i < hi; i++ {
+ children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
+ sum := mergeSummaries(children, logMaxPages)
+ old := p.summary[l][i]
+ if old != sum {
+ changed = true
+ p.summary[l][i] = sum
+ }
+ }
+ }
+}
+
+// allocRange marks the range of memory [base, base+npages*pageSize) as
+// allocated. It also updates the summaries to reflect the newly-updated
+// bitmap.
+//
+// Returns the amount of scavenged memory in bytes present in the
+// allocated range.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
+ assertLockHeld(p.mheapLock)
+
+ limit := base + npages*pageSize - 1
+ sc, ec := chunkIndex(base), chunkIndex(limit)
+ si, ei := chunkPageIndex(base), chunkPageIndex(limit)
+
+ scav := uint(0)
+ if sc == ec {
+ // The range doesn't cross any chunk boundaries.
+ chunk := p.chunkOf(sc)
+ scav += chunk.scavenged.popcntRange(si, ei+1-si)
+ chunk.allocRange(si, ei+1-si)
+ } else {
+ // The range crosses at least one chunk boundary.
+ chunk := p.chunkOf(sc)
+ scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
+ chunk.allocRange(si, pallocChunkPages-si)
+ for c := sc + 1; c < ec; c++ {
+ chunk := p.chunkOf(c)
+ scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
+ chunk.allocAll()
+ }
+ chunk = p.chunkOf(ec)
+ scav += chunk.scavenged.popcntRange(0, ei+1)
+ chunk.allocRange(0, ei+1)
+ }
+ p.update(base, npages, true, true)
+ return uintptr(scav) * pageSize
+}
+
+// findMappedAddr returns the smallest mapped offAddr that is
+// >= addr. That is, if addr refers to mapped memory, then it is
+// returned. If addr is higher than any mapped region, then
+// it returns maxOffAddr.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
+ assertLockHeld(p.mheapLock)
+
+ // If we're not in a test, validate first by checking mheap_.arenas.
+ // This is a fast path which is only safe to use outside of testing.
+ ai := arenaIndex(addr.addr())
+ if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
+ vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr())
+ if ok {
+ return offAddr{vAddr}
+ } else {
+ // The candidate search address is greater than any
+ // known address, which means we definitely have no
+ // free memory left.
+ return maxOffAddr
+ }
+ }
+ return addr
+}
+
+// find searches for the first (address-ordered) contiguous free region of
+// npages in size and returns a base address for that region.
+//
+// It uses p.searchAddr to prune its search and assumes that no palloc chunks
+// below chunkIndex(p.searchAddr) contain any free memory at all.
+//
+// find also computes and returns a candidate p.searchAddr, which may or
+// may not prune more of the address space than p.searchAddr already does.
+// This candidate is always a valid p.searchAddr.
+//
+// find represents the slow path and the full radix tree search.
+//
+// Returns a base address of 0 on failure, in which case the candidate
+// searchAddr returned is invalid and must be ignored.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
+ assertLockHeld(p.mheapLock)
+
+ // Search algorithm.
+ //
+ // This algorithm walks each level l of the radix tree from the root level
+ // to the leaf level. It iterates over at most 1 << levelBits[l] of entries
+ // in a given level in the radix tree, and uses the summary information to
+ // find either:
+ // 1) That a given subtree contains a large enough contiguous region, at
+ // which point it continues iterating on the next level, or
+ // 2) That there are enough contiguous boundary-crossing bits to satisfy
+ // the allocation, at which point it knows exactly where to start
+ // allocating from.
+ //
+ // i tracks the index into the current level l's structure for the
+ // contiguous 1 << levelBits[l] entries we're actually interested in.
+ //
+ // NOTE: Technically this search could allocate a region which crosses
+ // the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is
+ // a discontinuity. However, the only way this could happen is if the
+ // page at the zero address is mapped, and this is impossible on
+ // every system we support where arenaBaseOffset != 0. So, the
+ // discontinuity is already encoded in the fact that the OS will never
+ // map the zero page for us, and this function doesn't try to handle
+ // this case in any way.
+
+ // i is the beginning of the block of entries we're searching at the
+ // current level.
+ i := 0
+
+ // firstFree is the region of address space that we are certain to
+ // find the first free page in the heap. base and bound are the inclusive
+ // bounds of this window, and both are addresses in the linearized, contiguous
+ // view of the address space (with arenaBaseOffset pre-added). At each level,
+ // this window is narrowed as we find the memory region containing the
+ // first free page of memory. To begin with, the range reflects the
+ // full process address space.
+ //
+ // firstFree is updated by calling foundFree each time free space in the
+ // heap is discovered.
+ //
+ // At the end of the search, base.addr() is the best new
+ // searchAddr we could deduce in this search.
+ firstFree := struct {
+ base, bound offAddr
+ }{
+ base: minOffAddr,
+ bound: maxOffAddr,
+ }
+ // foundFree takes the given address range [addr, addr+size) and
+ // updates firstFree if it is a narrower range. The input range must
+ // either be fully contained within firstFree or not overlap with it
+ // at all.
+ //
+ // This way, we'll record the first summary we find with any free
+ // pages on the root level and narrow that down if we descend into
+ // that summary. But as soon as we need to iterate beyond that summary
+ // in a level to find a large enough range, we'll stop narrowing.
+ foundFree := func(addr offAddr, size uintptr) {
+ if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
+ // This range fits within the current firstFree window, so narrow
+ // down the firstFree window to the base and bound of this range.
+ firstFree.base = addr
+ firstFree.bound = addr.add(size - 1)
+ } else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
+ // This range only partially overlaps with the firstFree range,
+ // so throw.
+ print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
+ print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
+ throw("range partially overlaps")
+ }
+ }
+
+ // lastSum is the summary which we saw on the previous level that made us
+ // move on to the next level. Used to print additional information in the
+ // case of a catastrophic failure.
+ // lastSumIdx is that summary's index in the previous level.
+ lastSum := packPallocSum(0, 0, 0)
+ lastSumIdx := -1
+
+nextLevel:
+ for l := 0; l < len(p.summary); l++ {
+ // For the root level, entriesPerBlock is the whole level.
+ entriesPerBlock := 1 << levelBits[l]
+ logMaxPages := levelLogPages[l]
+
+ // We've moved into a new level, so let's update i to our new
+ // starting index. This is a no-op for level 0.
+ i <<= levelBits[l]
+
+ // Slice out the block of entries we care about.
+ entries := p.summary[l][i : i+entriesPerBlock]
+
+ // Determine j0, the first index we should start iterating from.
+ // The searchAddr may help us eliminate iterations if we followed the
+ // searchAddr on the previous level or we're on the root leve, in which
+ // case the searchAddr should be the same as i after levelShift.
+ j0 := 0
+ if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
+ j0 = searchIdx & (entriesPerBlock - 1)
+ }
+
+ // Run over the level entries looking for
+ // a contiguous run of at least npages either
+ // within an entry or across entries.
+ //
+ // base contains the page index (relative to
+ // the first entry's first page) of the currently
+ // considered run of consecutive pages.
+ //
+ // size contains the size of the currently considered
+ // run of consecutive pages.
+ var base, size uint
+ for j := j0; j < len(entries); j++ {
+ sum := entries[j]
+ if sum == 0 {
+ // A full entry means we broke any streak and
+ // that we should skip it altogether.
+ size = 0
+ continue
+ }
+
+ // We've encountered a non-zero summary which means
+ // free memory, so update firstFree.
+ foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
+
+ s := sum.start()
+ if size+s >= uint(npages) {
+ // If size == 0 we don't have a run yet,
+ // which means base isn't valid. So, set
+ // base to the first page in this block.
+ if size == 0 {
+ base = uint(j) << logMaxPages
+ }
+ // We hit npages; we're done!
+ size += s
+ break
+ }
+ if sum.max() >= uint(npages) {
+ // The entry itself contains npages contiguous
+ // free pages, so continue on the next level
+ // to find that run.
+ i += j
+ lastSumIdx = i
+ lastSum = sum
+ continue nextLevel
+ }
+ if size == 0 || s < 1<<logMaxPages {
+ // We either don't have a current run started, or this entry
+ // isn't totally free (meaning we can't continue the current
+ // one), so try to begin a new run by setting size and base
+ // based on sum.end.
+ size = sum.end()
+ base = uint(j+1)<<logMaxPages - size
+ continue
+ }
+ // The entry is completely free, so continue the run.
+ size += 1 << logMaxPages
+ }
+ if size >= uint(npages) {
+ // We found a sufficiently large run of free pages straddling
+ // some boundary, so compute the address and return it.
+ addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
+ return addr, p.findMappedAddr(firstFree.base)
+ }
+ if l == 0 {
+ // We're at level zero, so that means we've exhausted our search.
+ return 0, maxSearchAddr()
+ }
+
+ // We're not at level zero, and we exhausted the level we were looking in.
+ // This means that either our calculations were wrong or the level above
+ // lied to us. In either case, dump some useful state and throw.
+ print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
+ print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
+ print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n")
+ print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
+ for j := 0; j < len(entries); j++ {
+ sum := entries[j]
+ print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
+ }
+ throw("bad summary data")
+ }
+
+ // Since we've gotten to this point, that means we haven't found a
+ // sufficiently-sized free region straddling some boundary (chunk or larger).
+ // This means the last summary we inspected must have had a large enough "max"
+ // value, so look inside the chunk to find a suitable run.
+ //
+ // After iterating over all levels, i must contain a chunk index which
+ // is what the final level represents.
+ ci := chunkIdx(i)
+ j, searchIdx := p.chunkOf(ci).find(npages, 0)
+ if j == ^uint(0) {
+ // We couldn't find any space in this chunk despite the summaries telling
+ // us it should be there. There's likely a bug, so dump some state and throw.
+ sum := p.summary[len(p.summary)-1][i]
+ print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
+ print("runtime: npages = ", npages, "\n")
+ throw("bad summary data")
+ }
+
+ // Compute the address at which the free space starts.
+ addr := chunkBase(ci) + uintptr(j)*pageSize
+
+ // Since we actually searched the chunk, we may have
+ // found an even narrower free window.
+ searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
+ foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
+ return addr, p.findMappedAddr(firstFree.base)
+}
+
+// alloc allocates npages worth of memory from the page heap, returning the base
+// address for the allocation and the amount of scavenged memory in bytes
+// contained in the region [base address, base address + npages*pageSize).
+//
+// Returns a 0 base address on failure, in which case other returned values
+// should be ignored.
+//
+// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
+func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
+ assertLockHeld(p.mheapLock)
+
+ // If the searchAddr refers to a region which has a higher address than
+ // any known chunk, then we know we're out of memory.
+ if chunkIndex(p.searchAddr.addr()) >= p.end {
+ return 0, 0
+ }
+
+ // If npages has a chance of fitting in the chunk where the searchAddr is,
+ // search it directly.
+ searchAddr := minOffAddr
+ if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) {
+ // npages is guaranteed to be no greater than pallocChunkPages here.
+ i := chunkIndex(p.searchAddr.addr())
+ if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) {
+ j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr()))
+ if j == ^uint(0) {
+ print("runtime: max = ", max, ", npages = ", npages, "\n")
+ print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n")
+ throw("bad summary data")
+ }
+ addr = chunkBase(i) + uintptr(j)*pageSize
+ searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
+ goto Found
+ }
+ }
+ // We failed to use a searchAddr for one reason or another, so try
+ // the slow path.
+ addr, searchAddr = p.find(npages)
+ if addr == 0 {
+ if npages == 1 {
+ // We failed to find a single free page, the smallest unit
+ // of allocation. This means we know the heap is completely
+ // exhausted. Otherwise, the heap still might have free
+ // space in it, just not enough contiguous space to
+ // accommodate npages.
+ p.searchAddr = maxSearchAddr()
+ }
+ return 0, 0
+ }
+Found:
+ // Go ahead and actually mark the bits now that we have an address.
+ scav = p.allocRange(addr, npages)
+
+ // If we found a higher searchAddr, we know that all the
+ // heap memory before that searchAddr in an offset address space is
+ // allocated, so bump p.searchAddr up to the new one.
+ if p.searchAddr.lessThan(searchAddr) {
+ p.searchAddr = searchAddr
+ }
+ return addr, scav
+}
+
+// free returns npages worth of memory starting at base back to the page heap.
+//
+// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
+func (p *pageAlloc) free(base, npages uintptr, scavenged bool) {
+ assertLockHeld(p.mheapLock)
+
+ // If we're freeing pages below the p.searchAddr, update searchAddr.
+ if b := (offAddr{base}); b.lessThan(p.searchAddr) {
+ p.searchAddr = b
+ }
+ limit := base + npages*pageSize - 1
+ if !scavenged {
+ p.scav.index.mark(base, limit+1)
+ }
+ if npages == 1 {
+ // Fast path: we're clearing a single bit, and we know exactly
+ // where it is, so mark it directly.
+ i := chunkIndex(base)
+ p.chunkOf(i).free1(chunkPageIndex(base))
+ } else {
+ // Slow path: we're clearing more bits so we may need to iterate.
+ sc, ec := chunkIndex(base), chunkIndex(limit)
+ si, ei := chunkPageIndex(base), chunkPageIndex(limit)
+
+ if sc == ec {
+ // The range doesn't cross any chunk boundaries.
+ p.chunkOf(sc).free(si, ei+1-si)
+ } else {
+ // The range crosses at least one chunk boundary.
+ p.chunkOf(sc).free(si, pallocChunkPages-si)
+ for c := sc + 1; c < ec; c++ {
+ p.chunkOf(c).freeAll()
+ }
+ p.chunkOf(ec).free(0, ei+1)
+ }
+ }
+ p.update(base, npages, true, false)
+}
+
+const (
+ pallocSumBytes = unsafe.Sizeof(pallocSum(0))
+
+ // maxPackedValue is the maximum value that any of the three fields in
+ // the pallocSum may take on.
+ maxPackedValue = 1 << logMaxPackedValue
+ logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits
+
+ freeChunkSum = pallocSum(uint64(pallocChunkPages) |
+ uint64(pallocChunkPages<<logMaxPackedValue) |
+ uint64(pallocChunkPages<<(2*logMaxPackedValue)))
+)
+
+// pallocSum is a packed summary type which packs three numbers: start, max,
+// and end into a single 8-byte value. Each of these values are a summary of
+// a bitmap and are thus counts, each of which may have a maximum value of
+// 2^21 - 1, or all three may be equal to 2^21. The latter case is represented
+// by just setting the 64th bit.
+type pallocSum uint64
+
+// packPallocSum takes a start, max, and end value and produces a pallocSum.
+func packPallocSum(start, max, end uint) pallocSum {
+ if max == maxPackedValue {
+ return pallocSum(uint64(1 << 63))
+ }
+ return pallocSum((uint64(start) & (maxPackedValue - 1)) |
+ ((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) |
+ ((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue)))
+}
+
+// start extracts the start value from a packed sum.
+func (p pallocSum) start() uint {
+ if uint64(p)&uint64(1<<63) != 0 {
+ return maxPackedValue
+ }
+ return uint(uint64(p) & (maxPackedValue - 1))
+}
+
+// max extracts the max value from a packed sum.
+func (p pallocSum) max() uint {
+ if uint64(p)&uint64(1<<63) != 0 {
+ return maxPackedValue
+ }
+ return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1))
+}
+
+// end extracts the end value from a packed sum.
+func (p pallocSum) end() uint {
+ if uint64(p)&uint64(1<<63) != 0 {
+ return maxPackedValue
+ }
+ return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
+}
+
+// unpack unpacks all three values from the summary.
+func (p pallocSum) unpack() (uint, uint, uint) {
+ if uint64(p)&uint64(1<<63) != 0 {
+ return maxPackedValue, maxPackedValue, maxPackedValue
+ }
+ return uint(uint64(p) & (maxPackedValue - 1)),
+ uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)),
+ uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
+}
+
+// mergeSummaries merges consecutive summaries which may each represent at
+// most 1 << logMaxPagesPerSum pages each together into one.
+func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum {
+ // Merge the summaries in sums into one.
+ //
+ // We do this by keeping a running summary representing the merged
+ // summaries of sums[:i] in start, max, and end.
+ start, max, end := sums[0].unpack()
+ for i := 1; i < len(sums); i++ {
+ // Merge in sums[i].
+ si, mi, ei := sums[i].unpack()
+
+ // Merge in sums[i].start only if the running summary is
+ // completely free, otherwise this summary's start
+ // plays no role in the combined sum.
+ if start == uint(i)<<logMaxPagesPerSum {
+ start += si
+ }
+
+ // Recompute the max value of the running sum by looking
+ // across the boundary between the running sum and sums[i]
+ // and at the max sums[i], taking the greatest of those two
+ // and the max of the running sum.
+ if end+si > max {
+ max = end + si
+ }
+ if mi > max {
+ max = mi
+ }
+
+ // Merge in end by checking if this new summary is totally
+ // free. If it is, then we want to extend the running sum's
+ // end by the new summary. If not, then we have some alloc'd
+ // pages in there and we just want to take the end value in
+ // sums[i].
+ if ei == 1<<logMaxPagesPerSum {
+ end += 1 << logMaxPagesPerSum
+ } else {
+ end = ei
+ }
+ }
+ return packPallocSum(start, max, end)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mpagealloc_64bit.go b/contrib/go/_std_1.19/src/runtime/mpagealloc_64bit.go
new file mode 100644
index 0000000000..371c1fb31c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mpagealloc_64bit.go
@@ -0,0 +1,257 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+const (
+ // The number of levels in the radix tree.
+ summaryLevels = 5
+
+ // Constants for testing.
+ pageAlloc32Bit = 0
+ pageAlloc64Bit = 1
+
+ // Number of bits needed to represent all indices into the L1 of the
+ // chunks map.
+ //
+ // See (*pageAlloc).chunks for more details. Update the documentation
+ // there should this number change.
+ pallocChunksL1Bits = 13
+)
+
+// levelBits is the number of bits in the radix for a given level in the super summary
+// structure.
+//
+// The sum of all the entries of levelBits should equal heapAddrBits.
+var levelBits = [summaryLevels]uint{
+ summaryL0Bits,
+ summaryLevelBits,
+ summaryLevelBits,
+ summaryLevelBits,
+ summaryLevelBits,
+}
+
+// levelShift is the number of bits to shift to acquire the radix for a given level
+// in the super summary structure.
+//
+// With levelShift, one can compute the index of the summary at level l related to a
+// pointer p by doing:
+//
+// p >> levelShift[l]
+var levelShift = [summaryLevels]uint{
+ heapAddrBits - summaryL0Bits,
+ heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
+ heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
+ heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
+ heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
+}
+
+// levelLogPages is log2 the maximum number of runtime pages in the address space
+// a summary in the given level represents.
+//
+// The leaf level always represents exactly log2 of 1 chunk's worth of pages.
+var levelLogPages = [summaryLevels]uint{
+ logPallocChunkPages + 4*summaryLevelBits,
+ logPallocChunkPages + 3*summaryLevelBits,
+ logPallocChunkPages + 2*summaryLevelBits,
+ logPallocChunkPages + 1*summaryLevelBits,
+ logPallocChunkPages,
+}
+
+// sysInit performs architecture-dependent initialization of fields
+// in pageAlloc. pageAlloc should be uninitialized except for sysStat
+// if any runtime statistic should be updated.
+func (p *pageAlloc) sysInit() {
+ // Reserve memory for each level. This will get mapped in
+ // as R/W by setArenas.
+ for l, shift := range levelShift {
+ entries := 1 << (heapAddrBits - shift)
+
+ // Reserve b bytes of memory anywhere in the address space.
+ b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
+ r := sysReserve(nil, b)
+ if r == nil {
+ throw("failed to reserve page summary memory")
+ }
+
+ // Put this reservation into a slice.
+ sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
+ p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
+ }
+
+ // Set up the scavenge index.
+ nbytes := uintptr(1<<heapAddrBits) / pallocChunkBytes / 8
+ r := sysReserve(nil, nbytes)
+ sl := notInHeapSlice{(*notInHeap)(r), int(nbytes), int(nbytes)}
+ p.scav.index.chunks = *(*[]atomic.Uint8)(unsafe.Pointer(&sl))
+}
+
+// sysGrow performs architecture-dependent operations on heap
+// growth for the page allocator, such as mapping in new memory
+// for summaries. It also updates the length of the slices in
+// [.summary.
+//
+// base is the base of the newly-added heap memory and limit is
+// the first address past the end of the newly-added heap memory.
+// Both must be aligned to pallocChunkBytes.
+//
+// The caller must update p.start and p.end after calling sysGrow.
+func (p *pageAlloc) sysGrow(base, limit uintptr) {
+ if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
+ print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
+ throw("sysGrow bounds not aligned to pallocChunkBytes")
+ }
+
+ // addrRangeToSummaryRange converts a range of addresses into a range
+ // of summary indices which must be mapped to support those addresses
+ // in the summary range.
+ addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
+ sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
+ return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
+ }
+
+ // summaryRangeToSumAddrRange converts a range of indices in any
+ // level of p.summary into page-aligned addresses which cover that
+ // range of indices.
+ summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
+ baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
+ limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
+ base := unsafe.Pointer(&p.summary[level][0])
+ return addrRange{
+ offAddr{uintptr(add(base, baseOffset))},
+ offAddr{uintptr(add(base, limitOffset))},
+ }
+ }
+
+ // addrRangeToSumAddrRange is a convienience function that converts
+ // an address range r to the address range of the given summary level
+ // that stores the summaries for r.
+ addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
+ sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
+ return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
+ }
+
+ // Find the first inUse index which is strictly greater than base.
+ //
+ // Because this function will never be asked remap the same memory
+ // twice, this index is effectively the index at which we would insert
+ // this new growth, and base will never overlap/be contained within
+ // any existing range.
+ //
+ // This will be used to look at what memory in the summary array is already
+ // mapped before and after this new range.
+ inUseIndex := p.inUse.findSucc(base)
+
+ // Walk up the radix tree and map summaries in as needed.
+ for l := range p.summary {
+ // Figure out what part of the summary array this new address space needs.
+ needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
+
+ // Update the summary slices with a new upper-bound. This ensures
+ // we get tight bounds checks on at least the top bound.
+ //
+ // We must do this regardless of whether we map new memory.
+ if needIdxLimit > len(p.summary[l]) {
+ p.summary[l] = p.summary[l][:needIdxLimit]
+ }
+
+ // Compute the needed address range in the summary array for level l.
+ need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
+
+ // Prune need down to what needs to be newly mapped. Some parts of it may
+ // already be mapped by what inUse describes due to page alignment requirements
+ // for mapping. prune's invariants are guaranteed by the fact that this
+ // function will never be asked to remap the same memory twice.
+ if inUseIndex > 0 {
+ need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
+ }
+ if inUseIndex < len(p.inUse.ranges) {
+ need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
+ }
+ // It's possible that after our pruning above, there's nothing new to map.
+ if need.size() == 0 {
+ continue
+ }
+
+ // Map and commit need.
+ sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
+ sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
+ p.summaryMappedReady += need.size()
+ }
+
+ // Update the scavenge index.
+ p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat)
+}
+
+// grow increases the index's backing store in response to a heap growth.
+//
+// Returns the amount of memory added to sysStat.
+func (s *scavengeIndex) grow(base, limit uintptr, sysStat *sysMemStat) uintptr {
+ if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
+ print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
+ throw("sysGrow bounds not aligned to pallocChunkBytes")
+ }
+ // Map and commit the pieces of chunks that we need.
+ //
+ // We always map the full range of the minimum heap address to the
+ // maximum heap address. We don't do this for the summary structure
+ // because it's quite large and a discontiguous heap could cause a
+ // lot of memory to be used. In this situation, the worst case overhead
+ // is in the single-digit MiB if we map the whole thing.
+ //
+ // The base address of the backing store is always page-aligned,
+ // because it comes from the OS, so it's sufficient to align the
+ // index.
+ haveMin := s.min.Load()
+ haveMax := s.max.Load()
+ needMin := int32(alignDown(uintptr(chunkIndex(base)/8), physPageSize))
+ needMax := int32(alignUp(uintptr((chunkIndex(limit)+7)/8), physPageSize))
+ // Extend the range down to what we have, if there's no overlap.
+ if needMax < haveMin {
+ needMax = haveMin
+ }
+ if needMin > haveMax {
+ needMin = haveMax
+ }
+ have := makeAddrRange(
+ // Avoid a panic from indexing one past the last element.
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(haveMin),
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(haveMax),
+ )
+ need := makeAddrRange(
+ // Avoid a panic from indexing one past the last element.
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(needMin),
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(needMax),
+ )
+ // Subtract any overlap from rounding. We can't re-map memory because
+ // it'll be zeroed.
+ need = need.subtract(have)
+
+ // If we've got something to map, map it, and update the slice bounds.
+ if need.size() != 0 {
+ sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
+ sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
+ // Update the indices only after the new memory is valid.
+ if haveMin == 0 || needMin < haveMin {
+ s.min.Store(needMin)
+ }
+ if haveMax == 0 || needMax > haveMax {
+ s.max.Store(needMax)
+ }
+ }
+ // Update minHeapIdx. Note that even if there's no mapping work to do,
+ // we may still have a new, lower minimum heap address.
+ minHeapIdx := s.minHeapIdx.Load()
+ if baseIdx := int32(chunkIndex(base) / 8); minHeapIdx == 0 || baseIdx < minHeapIdx {
+ s.minHeapIdx.Store(baseIdx)
+ }
+ return need.size()
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mpagecache.go b/contrib/go/_std_1.19/src/runtime/mpagecache.go
new file mode 100644
index 0000000000..5bad4f789a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mpagecache.go
@@ -0,0 +1,177 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
+
+// pageCache represents a per-p cache of pages the allocator can
+// allocate from without a lock. More specifically, it represents
+// a pageCachePages*pageSize chunk of memory with 0 or more free
+// pages in it.
+type pageCache struct {
+ base uintptr // base address of the chunk
+ cache uint64 // 64-bit bitmap representing free pages (1 means free)
+ scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
+}
+
+// empty returns true if the pageCache has any free pages, and false
+// otherwise.
+func (c *pageCache) empty() bool {
+ return c.cache == 0
+}
+
+// alloc allocates npages from the page cache and is the main entry
+// point for allocation.
+//
+// Returns a base address and the amount of scavenged memory in the
+// allocated region in bytes.
+//
+// Returns a base address of zero on failure, in which case the
+// amount of scavenged memory should be ignored.
+func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
+ if c.cache == 0 {
+ return 0, 0
+ }
+ if npages == 1 {
+ i := uintptr(sys.TrailingZeros64(c.cache))
+ scav := (c.scav >> i) & 1
+ c.cache &^= 1 << i // set bit to mark in-use
+ c.scav &^= 1 << i // clear bit to mark unscavenged
+ return c.base + i*pageSize, uintptr(scav) * pageSize
+ }
+ return c.allocN(npages)
+}
+
+// allocN is a helper which attempts to allocate npages worth of pages
+// from the cache. It represents the general case for allocating from
+// the page cache.
+//
+// Returns a base address and the amount of scavenged memory in the
+// allocated region in bytes.
+func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
+ i := findBitRange64(c.cache, uint(npages))
+ if i >= 64 {
+ return 0, 0
+ }
+ mask := ((uint64(1) << npages) - 1) << i
+ scav := sys.OnesCount64(c.scav & mask)
+ c.cache &^= mask // mark in-use bits
+ c.scav &^= mask // clear scavenged bits
+ return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
+}
+
+// flush empties out unallocated free pages in the given cache
+// into s. Then, it clears the cache, such that empty returns
+// true.
+//
+// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
+func (c *pageCache) flush(p *pageAlloc) {
+ assertLockHeld(p.mheapLock)
+
+ if c.empty() {
+ return
+ }
+ ci := chunkIndex(c.base)
+ pi := chunkPageIndex(c.base)
+
+ // This method is called very infrequently, so just do the
+ // slower, safer thing by iterating over each bit individually.
+ for i := uint(0); i < 64; i++ {
+ if c.cache&(1<<i) != 0 {
+ p.chunkOf(ci).free1(pi + i)
+ }
+ if c.scav&(1<<i) != 0 {
+ p.chunkOf(ci).scavenged.setRange(pi+i, 1)
+ }
+ }
+ // Since this is a lot like a free, we need to make sure
+ // we update the searchAddr just like free does.
+ if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
+ p.searchAddr = b
+ }
+ p.update(c.base, pageCachePages, false, false)
+ *c = pageCache{}
+}
+
+// allocToCache acquires a pageCachePages-aligned chunk of free pages which
+// may not be contiguous, and returns a pageCache structure which owns the
+// chunk.
+//
+// p.mheapLock must be held.
+//
+// Must run on the system stack because p.mheapLock must be held.
+//
+//go:systemstack
+func (p *pageAlloc) allocToCache() pageCache {
+ assertLockHeld(p.mheapLock)
+
+ // If the searchAddr refers to a region which has a higher address than
+ // any known chunk, then we know we're out of memory.
+ if chunkIndex(p.searchAddr.addr()) >= p.end {
+ return pageCache{}
+ }
+ c := pageCache{}
+ ci := chunkIndex(p.searchAddr.addr()) // chunk index
+ var chunk *pallocData
+ if p.summary[len(p.summary)-1][ci] != 0 {
+ // Fast path: there's free pages at or near the searchAddr address.
+ chunk = p.chunkOf(ci)
+ j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
+ if j == ^uint(0) {
+ throw("bad summary data")
+ }
+ c = pageCache{
+ base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
+ cache: ^chunk.pages64(j),
+ scav: chunk.scavenged.block64(j),
+ }
+ } else {
+ // Slow path: the searchAddr address had nothing there, so go find
+ // the first free page the slow way.
+ addr, _ := p.find(1)
+ if addr == 0 {
+ // We failed to find adequate free space, so mark the searchAddr as OoM
+ // and return an empty pageCache.
+ p.searchAddr = maxSearchAddr()
+ return pageCache{}
+ }
+ ci := chunkIndex(addr)
+ chunk = p.chunkOf(ci)
+ c = pageCache{
+ base: alignDown(addr, 64*pageSize),
+ cache: ^chunk.pages64(chunkPageIndex(addr)),
+ scav: chunk.scavenged.block64(chunkPageIndex(addr)),
+ }
+ }
+
+ // Set the page bits as allocated and clear the scavenged bits, but
+ // be careful to only set and clear the relevant bits.
+ cpi := chunkPageIndex(c.base)
+ chunk.allocPages64(cpi, c.cache)
+ chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
+
+ // Update as an allocation, but note that it's not contiguous.
+ p.update(c.base, pageCachePages, false, true)
+
+ // Set the search address to the last page represented by the cache.
+ // Since all of the pages in this block are going to the cache, and we
+ // searched for the first free page, we can confidently start at the
+ // next page.
+ //
+ // However, p.searchAddr is not allowed to point into unmapped heap memory
+ // unless it is maxSearchAddr, so make it the last page as opposed to
+ // the page after.
+ p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
+ return c
+}
diff --git a/contrib/go/_std_1.18/src/runtime/mpallocbits.go b/contrib/go/_std_1.19/src/runtime/mpallocbits.go
index f63164becd..f63164becd 100644
--- a/contrib/go/_std_1.18/src/runtime/mpallocbits.go
+++ b/contrib/go/_std_1.19/src/runtime/mpallocbits.go
diff --git a/contrib/go/_std_1.19/src/runtime/mprof.go b/contrib/go/_std_1.19/src/runtime/mprof.go
new file mode 100644
index 0000000000..99a67b9a3a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mprof.go
@@ -0,0 +1,1283 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc profiling.
+// Patterned after tcmalloc's algorithms; shorter code.
+
+package runtime
+
+import (
+ "internal/abi"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// NOTE(rsc): Everything here could use cas if contention became an issue.
+var (
+ // profInsertLock protects changes to the start of all *bucket linked lists
+ profInsertLock mutex
+ // profBlockLock protects the contents of every blockRecord struct
+ profBlockLock mutex
+ // profMemActiveLock protects the active field of every memRecord struct
+ profMemActiveLock mutex
+ // profMemFutureLock is a set of locks that protect the respective elements
+ // of the future array of every memRecord struct
+ profMemFutureLock [len(memRecord{}.future)]mutex
+)
+
+// All memory allocations are local and do not escape outside of the profiler.
+// The profiler is forbidden from referring to garbage-collected memory.
+
+const (
+ // profile types
+ memProfile bucketType = 1 + iota
+ blockProfile
+ mutexProfile
+
+ // size of bucket hash table
+ buckHashSize = 179999
+
+ // max depth of stack to record in bucket
+ maxStack = 32
+)
+
+type bucketType int
+
+// A bucket holds per-call-stack profiling information.
+// The representation is a bit sleazy, inherited from C.
+// This struct defines the bucket header. It is followed in
+// memory by the stack words and then the actual record
+// data, either a memRecord or a blockRecord.
+//
+// Per-call-stack profiling information.
+// Lookup by hashing call stack into a linked-list hash table.
+//
+// None of the fields in this bucket header are modified after
+// creation, including its next and allnext links.
+//
+// No heap pointers.
+//
+//go:notinheap
+type bucket struct {
+ next *bucket
+ allnext *bucket
+ typ bucketType // memBucket or blockBucket (includes mutexProfile)
+ hash uintptr
+ size uintptr
+ nstk uintptr
+}
+
+// A memRecord is the bucket data for a bucket of type memProfile,
+// part of the memory profile.
+type memRecord struct {
+ // The following complex 3-stage scheme of stats accumulation
+ // is required to obtain a consistent picture of mallocs and frees
+ // for some point in time.
+ // The problem is that mallocs come in real time, while frees
+ // come only after a GC during concurrent sweeping. So if we would
+ // naively count them, we would get a skew toward mallocs.
+ //
+ // Hence, we delay information to get consistent snapshots as
+ // of mark termination. Allocations count toward the next mark
+ // termination's snapshot, while sweep frees count toward the
+ // previous mark termination's snapshot:
+ //
+ // MT MT MT MT
+ // .·| .·| .·| .·|
+ // .·˙ | .·˙ | .·˙ | .·˙ |
+ // .·˙ | .·˙ | .·˙ | .·˙ |
+ // .·˙ |.·˙ |.·˙ |.·˙ |
+ //
+ // alloc → ▲ ← free
+ // ┠┅┅┅┅┅┅┅┅┅┅┅P
+ // C+2 → C+1 → C
+ //
+ // alloc → ▲ ← free
+ // ┠┅┅┅┅┅┅┅┅┅┅┅P
+ // C+2 → C+1 → C
+ //
+ // Since we can't publish a consistent snapshot until all of
+ // the sweep frees are accounted for, we wait until the next
+ // mark termination ("MT" above) to publish the previous mark
+ // termination's snapshot ("P" above). To do this, allocation
+ // and free events are accounted to *future* heap profile
+ // cycles ("C+n" above) and we only publish a cycle once all
+ // of the events from that cycle must be done. Specifically:
+ //
+ // Mallocs are accounted to cycle C+2.
+ // Explicit frees are accounted to cycle C+2.
+ // GC frees (done during sweeping) are accounted to cycle C+1.
+ //
+ // After mark termination, we increment the global heap
+ // profile cycle counter and accumulate the stats from cycle C
+ // into the active profile.
+
+ // active is the currently published profile. A profiling
+ // cycle can be accumulated into active once its complete.
+ active memRecordCycle
+
+ // future records the profile events we're counting for cycles
+ // that have not yet been published. This is ring buffer
+ // indexed by the global heap profile cycle C and stores
+ // cycles C, C+1, and C+2. Unlike active, these counts are
+ // only for a single cycle; they are not cumulative across
+ // cycles.
+ //
+ // We store cycle C here because there's a window between when
+ // C becomes the active cycle and when we've flushed it to
+ // active.
+ future [3]memRecordCycle
+}
+
+// memRecordCycle
+type memRecordCycle struct {
+ allocs, frees uintptr
+ alloc_bytes, free_bytes uintptr
+}
+
+// add accumulates b into a. It does not zero b.
+func (a *memRecordCycle) add(b *memRecordCycle) {
+ a.allocs += b.allocs
+ a.frees += b.frees
+ a.alloc_bytes += b.alloc_bytes
+ a.free_bytes += b.free_bytes
+}
+
+// A blockRecord is the bucket data for a bucket of type blockProfile,
+// which is used in blocking and mutex profiles.
+type blockRecord struct {
+ count float64
+ cycles int64
+}
+
+var (
+ mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
+ bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
+ xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
+ buckhash atomic.UnsafePointer // *buckhashArray
+
+ mProfCycle mProfCycleHolder
+)
+
+type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
+
+const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
+
+// mProfCycleHolder holds the global heap profile cycle number (wrapped at
+// mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
+// indicate whether future[cycle] in all buckets has been queued to flush into
+// the active profile.
+type mProfCycleHolder struct {
+ value atomic.Uint32
+}
+
+// read returns the current cycle count.
+func (c *mProfCycleHolder) read() (cycle uint32) {
+ v := c.value.Load()
+ cycle = v >> 1
+ return cycle
+}
+
+// setFlushed sets the flushed flag. It returns the current cycle count and the
+// previous value of the flushed flag.
+func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
+ for {
+ prev := c.value.Load()
+ cycle = prev >> 1
+ alreadyFlushed = (prev & 0x1) != 0
+ next := prev | 0x1
+ if c.value.CompareAndSwap(prev, next) {
+ return cycle, alreadyFlushed
+ }
+ }
+}
+
+// increment increases the cycle count by one, wrapping the value at
+// mProfCycleWrap. It clears the flushed flag.
+func (c *mProfCycleHolder) increment() {
+ // We explicitly wrap mProfCycle rather than depending on
+ // uint wraparound because the memRecord.future ring does not
+ // itself wrap at a power of two.
+ for {
+ prev := c.value.Load()
+ cycle := prev >> 1
+ cycle = (cycle + 1) % mProfCycleWrap
+ next := cycle << 1
+ if c.value.CompareAndSwap(prev, next) {
+ break
+ }
+ }
+}
+
+// newBucket allocates a bucket with the given type and number of stack entries.
+func newBucket(typ bucketType, nstk int) *bucket {
+ size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
+ switch typ {
+ default:
+ throw("invalid profile bucket type")
+ case memProfile:
+ size += unsafe.Sizeof(memRecord{})
+ case blockProfile, mutexProfile:
+ size += unsafe.Sizeof(blockRecord{})
+ }
+
+ b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
+ b.typ = typ
+ b.nstk = uintptr(nstk)
+ return b
+}
+
+// stk returns the slice in b holding the stack.
+func (b *bucket) stk() []uintptr {
+ stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+ return stk[:b.nstk:b.nstk]
+}
+
+// mp returns the memRecord associated with the memProfile bucket b.
+func (b *bucket) mp() *memRecord {
+ if b.typ != memProfile {
+ throw("bad use of bucket.mp")
+ }
+ data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
+ return (*memRecord)(data)
+}
+
+// bp returns the blockRecord associated with the blockProfile bucket b.
+func (b *bucket) bp() *blockRecord {
+ if b.typ != blockProfile && b.typ != mutexProfile {
+ throw("bad use of bucket.bp")
+ }
+ data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
+ return (*blockRecord)(data)
+}
+
+// Return the bucket for stk[0:nstk], allocating new bucket if needed.
+func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
+ bh := (*buckhashArray)(buckhash.Load())
+ if bh == nil {
+ lock(&profInsertLock)
+ // check again under the lock
+ bh = (*buckhashArray)(buckhash.Load())
+ if bh == nil {
+ bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
+ if bh == nil {
+ throw("runtime: cannot allocate memory")
+ }
+ buckhash.StoreNoWB(unsafe.Pointer(bh))
+ }
+ unlock(&profInsertLock)
+ }
+
+ // Hash stack.
+ var h uintptr
+ for _, pc := range stk {
+ h += pc
+ h += h << 10
+ h ^= h >> 6
+ }
+ // hash in size
+ h += size
+ h += h << 10
+ h ^= h >> 6
+ // finalize
+ h += h << 3
+ h ^= h >> 11
+
+ i := int(h % buckHashSize)
+ // first check optimistically, without the lock
+ for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
+ if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
+ return b
+ }
+ }
+
+ if !alloc {
+ return nil
+ }
+
+ lock(&profInsertLock)
+ // check again under the insertion lock
+ for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
+ if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
+ unlock(&profInsertLock)
+ return b
+ }
+ }
+
+ // Create new bucket.
+ b := newBucket(typ, len(stk))
+ copy(b.stk(), stk)
+ b.hash = h
+ b.size = size
+
+ var allnext *atomic.UnsafePointer
+ if typ == memProfile {
+ allnext = &mbuckets
+ } else if typ == mutexProfile {
+ allnext = &xbuckets
+ } else {
+ allnext = &bbuckets
+ }
+
+ b.next = (*bucket)(bh[i].Load())
+ b.allnext = (*bucket)(allnext.Load())
+
+ bh[i].StoreNoWB(unsafe.Pointer(b))
+ allnext.StoreNoWB(unsafe.Pointer(b))
+
+ unlock(&profInsertLock)
+ return b
+}
+
+func eqslice(x, y []uintptr) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ for i, xi := range x {
+ if xi != y[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// mProf_NextCycle publishes the next heap profile cycle and creates a
+// fresh heap profile cycle. This operation is fast and can be done
+// during STW. The caller must call mProf_Flush before calling
+// mProf_NextCycle again.
+//
+// This is called by mark termination during STW so allocations and
+// frees after the world is started again count towards a new heap
+// profiling cycle.
+func mProf_NextCycle() {
+ mProfCycle.increment()
+}
+
+// mProf_Flush flushes the events from the current heap profiling
+// cycle into the active profile. After this it is safe to start a new
+// heap profiling cycle with mProf_NextCycle.
+//
+// This is called by GC after mark termination starts the world. In
+// contrast with mProf_NextCycle, this is somewhat expensive, but safe
+// to do concurrently.
+func mProf_Flush() {
+ cycle, alreadyFlushed := mProfCycle.setFlushed()
+ if alreadyFlushed {
+ return
+ }
+
+ index := cycle % uint32(len(memRecord{}.future))
+ lock(&profMemActiveLock)
+ lock(&profMemFutureLock[index])
+ mProf_FlushLocked(index)
+ unlock(&profMemFutureLock[index])
+ unlock(&profMemActiveLock)
+}
+
+// mProf_FlushLocked flushes the events from the heap profiling cycle at index
+// into the active profile. The caller must hold the lock for the active profile
+// (profMemActiveLock) and for the profiling cycle at index
+// (profMemFutureLock[index]).
+func mProf_FlushLocked(index uint32) {
+ assertLockHeld(&profMemActiveLock)
+ assertLockHeld(&profMemFutureLock[index])
+ head := (*bucket)(mbuckets.Load())
+ for b := head; b != nil; b = b.allnext {
+ mp := b.mp()
+
+ // Flush cycle C into the published profile and clear
+ // it for reuse.
+ mpc := &mp.future[index]
+ mp.active.add(mpc)
+ *mpc = memRecordCycle{}
+ }
+}
+
+// mProf_PostSweep records that all sweep frees for this GC cycle have
+// completed. This has the effect of publishing the heap profile
+// snapshot as of the last mark termination without advancing the heap
+// profile cycle.
+func mProf_PostSweep() {
+ // Flush cycle C+1 to the active profile so everything as of
+ // the last mark termination becomes visible. *Don't* advance
+ // the cycle, since we're still accumulating allocs in cycle
+ // C+2, which have to become C+1 in the next mark termination
+ // and so on.
+ cycle := mProfCycle.read() + 1
+
+ index := cycle % uint32(len(memRecord{}.future))
+ lock(&profMemActiveLock)
+ lock(&profMemFutureLock[index])
+ mProf_FlushLocked(index)
+ unlock(&profMemFutureLock[index])
+ unlock(&profMemActiveLock)
+}
+
+// Called by malloc to record a profiled block.
+func mProf_Malloc(p unsafe.Pointer, size uintptr) {
+ var stk [maxStack]uintptr
+ nstk := callers(4, stk[:])
+
+ index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
+
+ b := stkbucket(memProfile, size, stk[:nstk], true)
+ mp := b.mp()
+ mpc := &mp.future[index]
+
+ lock(&profMemFutureLock[index])
+ mpc.allocs++
+ mpc.alloc_bytes += size
+ unlock(&profMemFutureLock[index])
+
+ // Setprofilebucket locks a bunch of other mutexes, so we call it outside of
+ // the profiler locks. This reduces potential contention and chances of
+ // deadlocks. Since the object must be alive during the call to
+ // mProf_Malloc, it's fine to do this non-atomically.
+ systemstack(func() {
+ setprofilebucket(p, b)
+ })
+}
+
+// Called when freeing a profiled block.
+func mProf_Free(b *bucket, size uintptr) {
+ index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
+
+ mp := b.mp()
+ mpc := &mp.future[index]
+
+ lock(&profMemFutureLock[index])
+ mpc.frees++
+ mpc.free_bytes += size
+ unlock(&profMemFutureLock[index])
+}
+
+var blockprofilerate uint64 // in CPU ticks
+
+// SetBlockProfileRate controls the fraction of goroutine blocking events
+// that are reported in the blocking profile. The profiler aims to sample
+// an average of one blocking event per rate nanoseconds spent blocked.
+//
+// To include every blocking event in the profile, pass rate = 1.
+// To turn off profiling entirely, pass rate <= 0.
+func SetBlockProfileRate(rate int) {
+ var r int64
+ if rate <= 0 {
+ r = 0 // disable profiling
+ } else if rate == 1 {
+ r = 1 // profile everything
+ } else {
+ // convert ns to cycles, use float64 to prevent overflow during multiplication
+ r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
+ if r == 0 {
+ r = 1
+ }
+ }
+
+ atomic.Store64(&blockprofilerate, uint64(r))
+}
+
+func blockevent(cycles int64, skip int) {
+ if cycles <= 0 {
+ cycles = 1
+ }
+
+ rate := int64(atomic.Load64(&blockprofilerate))
+ if blocksampled(cycles, rate) {
+ saveblockevent(cycles, rate, skip+1, blockProfile)
+ }
+}
+
+// blocksampled returns true for all events where cycles >= rate. Shorter
+// events have a cycles/rate random chance of returning true.
+func blocksampled(cycles, rate int64) bool {
+ if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
+ return false
+ }
+ return true
+}
+
+func saveblockevent(cycles, rate int64, skip int, which bucketType) {
+ gp := getg()
+ var nstk int
+ var stk [maxStack]uintptr
+ if gp.m.curg == nil || gp.m.curg == gp {
+ nstk = callers(skip, stk[:])
+ } else {
+ nstk = gcallers(gp.m.curg, skip, stk[:])
+ }
+ b := stkbucket(which, 0, stk[:nstk], true)
+ bp := b.bp()
+
+ lock(&profBlockLock)
+ if which == blockProfile && cycles < rate {
+ // Remove sampling bias, see discussion on http://golang.org/cl/299991.
+ bp.count += float64(rate) / float64(cycles)
+ bp.cycles += rate
+ } else {
+ bp.count++
+ bp.cycles += cycles
+ }
+ unlock(&profBlockLock)
+}
+
+var mutexprofilerate uint64 // fraction sampled
+
+// SetMutexProfileFraction controls the fraction of mutex contention events
+// that are reported in the mutex profile. On average 1/rate events are
+// reported. The previous rate is returned.
+//
+// To turn off profiling entirely, pass rate 0.
+// To just read the current rate, pass rate < 0.
+// (For n>1 the details of sampling may change.)
+func SetMutexProfileFraction(rate int) int {
+ if rate < 0 {
+ return int(mutexprofilerate)
+ }
+ old := mutexprofilerate
+ atomic.Store64(&mutexprofilerate, uint64(rate))
+ return int(old)
+}
+
+//go:linkname mutexevent sync.event
+func mutexevent(cycles int64, skip int) {
+ if cycles < 0 {
+ cycles = 0
+ }
+ rate := int64(atomic.Load64(&mutexprofilerate))
+ // TODO(pjw): measure impact of always calling fastrand vs using something
+ // like malloc.go:nextSample()
+ if rate > 0 && int64(fastrand())%rate == 0 {
+ saveblockevent(cycles, rate, skip+1, mutexProfile)
+ }
+}
+
+// Go interface to profile data.
+
+// A StackRecord describes a single execution stack.
+type StackRecord struct {
+ Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
+}
+
+// Stack returns the stack trace associated with the record,
+// a prefix of r.Stack0.
+func (r *StackRecord) Stack() []uintptr {
+ for i, v := range r.Stack0 {
+ if v == 0 {
+ return r.Stack0[0:i]
+ }
+ }
+ return r.Stack0[0:]
+}
+
+// MemProfileRate controls the fraction of memory allocations
+// that are recorded and reported in the memory profile.
+// The profiler aims to sample an average of
+// one allocation per MemProfileRate bytes allocated.
+//
+// To include every allocated block in the profile, set MemProfileRate to 1.
+// To turn off profiling entirely, set MemProfileRate to 0.
+//
+// The tools that process the memory profiles assume that the
+// profile rate is constant across the lifetime of the program
+// and equal to the current value. Programs that change the
+// memory profiling rate should do so just once, as early as
+// possible in the execution of the program (for example,
+// at the beginning of main).
+var MemProfileRate int = defaultMemProfileRate(512 * 1024)
+
+// defaultMemProfileRate returns 0 if disableMemoryProfiling is set.
+// It exists primarily for the godoc rendering of MemProfileRate
+// above.
+func defaultMemProfileRate(v int) int {
+ if disableMemoryProfiling {
+ return 0
+ }
+ return v
+}
+
+// disableMemoryProfiling is set by the linker if runtime.MemProfile
+// is not used and the link type guarantees nobody else could use it
+// elsewhere.
+var disableMemoryProfiling bool
+
+// A MemProfileRecord describes the live objects allocated
+// by a particular call sequence (stack trace).
+type MemProfileRecord struct {
+ AllocBytes, FreeBytes int64 // number of bytes allocated, freed
+ AllocObjects, FreeObjects int64 // number of objects allocated, freed
+ Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
+}
+
+// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
+func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
+
+// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
+func (r *MemProfileRecord) InUseObjects() int64 {
+ return r.AllocObjects - r.FreeObjects
+}
+
+// Stack returns the stack trace associated with the record,
+// a prefix of r.Stack0.
+func (r *MemProfileRecord) Stack() []uintptr {
+ for i, v := range r.Stack0 {
+ if v == 0 {
+ return r.Stack0[0:i]
+ }
+ }
+ return r.Stack0[0:]
+}
+
+// MemProfile returns a profile of memory allocated and freed per allocation
+// site.
+//
+// MemProfile returns n, the number of records in the current memory profile.
+// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
+// If len(p) < n, MemProfile does not change p and returns n, false.
+//
+// If inuseZero is true, the profile includes allocation records
+// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
+// These are sites where memory was allocated, but it has all
+// been released back to the runtime.
+//
+// The returned profile may be up to two garbage collection cycles old.
+// This is to avoid skewing the profile toward allocations; because
+// allocations happen in real time but frees are delayed until the garbage
+// collector performs sweeping, the profile only accounts for allocations
+// that have had a chance to be freed by the garbage collector.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.memprofile flag instead
+// of calling MemProfile directly.
+func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
+ cycle := mProfCycle.read()
+ // If we're between mProf_NextCycle and mProf_Flush, take care
+ // of flushing to the active profile so we only have to look
+ // at the active profile below.
+ index := cycle % uint32(len(memRecord{}.future))
+ lock(&profMemActiveLock)
+ lock(&profMemFutureLock[index])
+ mProf_FlushLocked(index)
+ unlock(&profMemFutureLock[index])
+ clear := true
+ head := (*bucket)(mbuckets.Load())
+ for b := head; b != nil; b = b.allnext {
+ mp := b.mp()
+ if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
+ n++
+ }
+ if mp.active.allocs != 0 || mp.active.frees != 0 {
+ clear = false
+ }
+ }
+ if clear {
+ // Absolutely no data, suggesting that a garbage collection
+ // has not yet happened. In order to allow profiling when
+ // garbage collection is disabled from the beginning of execution,
+ // accumulate all of the cycles, and recount buckets.
+ n = 0
+ for b := head; b != nil; b = b.allnext {
+ mp := b.mp()
+ for c := range mp.future {
+ lock(&profMemFutureLock[c])
+ mp.active.add(&mp.future[c])
+ mp.future[c] = memRecordCycle{}
+ unlock(&profMemFutureLock[c])
+ }
+ if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
+ n++
+ }
+ }
+ }
+ if n <= len(p) {
+ ok = true
+ idx := 0
+ for b := head; b != nil; b = b.allnext {
+ mp := b.mp()
+ if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
+ record(&p[idx], b)
+ idx++
+ }
+ }
+ }
+ unlock(&profMemActiveLock)
+ return
+}
+
+// Write b's data to r.
+func record(r *MemProfileRecord, b *bucket) {
+ mp := b.mp()
+ r.AllocBytes = int64(mp.active.alloc_bytes)
+ r.FreeBytes = int64(mp.active.free_bytes)
+ r.AllocObjects = int64(mp.active.allocs)
+ r.FreeObjects = int64(mp.active.frees)
+ if raceenabled {
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
+ }
+ if msanenabled {
+ msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+ }
+ if asanenabled {
+ asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+ }
+ copy(r.Stack0[:], b.stk())
+ for i := int(b.nstk); i < len(r.Stack0); i++ {
+ r.Stack0[i] = 0
+ }
+}
+
+func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
+ lock(&profMemActiveLock)
+ head := (*bucket)(mbuckets.Load())
+ for b := head; b != nil; b = b.allnext {
+ mp := b.mp()
+ fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
+ }
+ unlock(&profMemActiveLock)
+}
+
+// BlockProfileRecord describes blocking events originated
+// at a particular call sequence (stack trace).
+type BlockProfileRecord struct {
+ Count int64
+ Cycles int64
+ StackRecord
+}
+
+// BlockProfile returns n, the number of records in the current blocking profile.
+// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
+// If len(p) < n, BlockProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.blockprofile flag instead
+// of calling BlockProfile directly.
+func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
+ lock(&profBlockLock)
+ head := (*bucket)(bbuckets.Load())
+ for b := head; b != nil; b = b.allnext {
+ n++
+ }
+ if n <= len(p) {
+ ok = true
+ for b := head; b != nil; b = b.allnext {
+ bp := b.bp()
+ r := &p[0]
+ r.Count = int64(bp.count)
+ // Prevent callers from having to worry about division by zero errors.
+ // See discussion on http://golang.org/cl/299991.
+ if r.Count == 0 {
+ r.Count = 1
+ }
+ r.Cycles = bp.cycles
+ if raceenabled {
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
+ }
+ if msanenabled {
+ msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+ }
+ if asanenabled {
+ asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+ }
+ i := copy(r.Stack0[:], b.stk())
+ for ; i < len(r.Stack0); i++ {
+ r.Stack0[i] = 0
+ }
+ p = p[1:]
+ }
+ }
+ unlock(&profBlockLock)
+ return
+}
+
+// MutexProfile returns n, the number of records in the current mutex profile.
+// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
+// Otherwise, MutexProfile does not change p, and returns n, false.
+//
+// Most clients should use the runtime/pprof package
+// instead of calling MutexProfile directly.
+func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
+ lock(&profBlockLock)
+ head := (*bucket)(xbuckets.Load())
+ for b := head; b != nil; b = b.allnext {
+ n++
+ }
+ if n <= len(p) {
+ ok = true
+ for b := head; b != nil; b = b.allnext {
+ bp := b.bp()
+ r := &p[0]
+ r.Count = int64(bp.count)
+ r.Cycles = bp.cycles
+ i := copy(r.Stack0[:], b.stk())
+ for ; i < len(r.Stack0); i++ {
+ r.Stack0[i] = 0
+ }
+ p = p[1:]
+ }
+ }
+ unlock(&profBlockLock)
+ return
+}
+
+// ThreadCreateProfile returns n, the number of records in the thread creation profile.
+// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
+// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package instead
+// of calling ThreadCreateProfile directly.
+func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
+ first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
+ for mp := first; mp != nil; mp = mp.alllink {
+ n++
+ }
+ if n <= len(p) {
+ ok = true
+ i := 0
+ for mp := first; mp != nil; mp = mp.alllink {
+ p[i].Stack0 = mp.createstack
+ i++
+ }
+ }
+ return
+}
+
+//go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels
+func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ return goroutineProfileWithLabels(p, labels)
+}
+
+const go119ConcurrentGoroutineProfile = true
+
+// labels may be nil. If labels is non-nil, it must have the same length as p.
+func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ if labels != nil && len(labels) != len(p) {
+ labels = nil
+ }
+
+ if go119ConcurrentGoroutineProfile {
+ return goroutineProfileWithLabelsConcurrent(p, labels)
+ }
+ return goroutineProfileWithLabelsSync(p, labels)
+}
+
+var goroutineProfile = struct {
+ sema uint32
+ active bool
+ offset atomic.Int64
+ records []StackRecord
+ labels []unsafe.Pointer
+}{
+ sema: 1,
+}
+
+// goroutineProfileState indicates the status of a goroutine's stack for the
+// current in-progress goroutine profile. Goroutines' stacks are initially
+// "Absent" from the profile, and end up "Satisfied" by the time the profile is
+// complete. While a goroutine's stack is being captured, its
+// goroutineProfileState will be "InProgress" and it will not be able to run
+// until the capture completes and the state moves to "Satisfied".
+//
+// Some goroutines (the finalizer goroutine, which at various times can be
+// either a "system" or a "user" goroutine, and the goroutine that is
+// coordinating the profile, any goroutines created during the profile) move
+// directly to the "Satisfied" state.
+type goroutineProfileState uint32
+
+const (
+ goroutineProfileAbsent goroutineProfileState = iota
+ goroutineProfileInProgress
+ goroutineProfileSatisfied
+)
+
+type goroutineProfileStateHolder atomic.Uint32
+
+func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
+ return goroutineProfileState((*atomic.Uint32)(p).Load())
+}
+
+func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
+ (*atomic.Uint32)(p).Store(uint32(value))
+}
+
+func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
+ return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
+}
+
+func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ semacquire(&goroutineProfile.sema)
+
+ ourg := getg()
+
+ stopTheWorld("profile")
+ // Using gcount while the world is stopped should give us a consistent view
+ // of the number of live goroutines, minus the number of goroutines that are
+ // alive and permanently marked as "system". But to make this count agree
+ // with what we'd get from isSystemGoroutine, we need special handling for
+ // goroutines that can vary between user and system to ensure that the count
+ // doesn't change during the collection. So, check the finalizer goroutine
+ // in particular.
+ n = int(gcount())
+ if fingRunning {
+ n++
+ }
+
+ if n > len(p) {
+ // There's not enough space in p to store the whole profile, so (per the
+ // contract of runtime.GoroutineProfile) we're not allowed to write to p
+ // at all and must return n, false.
+ startTheWorld()
+ semrelease(&goroutineProfile.sema)
+ return n, false
+ }
+
+ // Save current goroutine.
+ sp := getcallersp()
+ pc := getcallerpc()
+ systemstack(func() {
+ saveg(pc, sp, ourg, &p[0])
+ })
+ ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
+ goroutineProfile.offset.Store(1)
+
+ // Prepare for all other goroutines to enter the profile. Aside from ourg,
+ // every goroutine struct in the allgs list has its goroutineProfiled field
+ // cleared. Any goroutine created from this point on (while
+ // goroutineProfile.active is set) will start with its goroutineProfiled
+ // field set to goroutineProfileSatisfied.
+ goroutineProfile.active = true
+ goroutineProfile.records = p
+ goroutineProfile.labels = labels
+ // The finalizer goroutine needs special handling because it can vary over
+ // time between being a user goroutine (eligible for this profile) and a
+ // system goroutine (to be excluded). Pick one before restarting the world.
+ if fing != nil {
+ fing.goroutineProfiled.Store(goroutineProfileSatisfied)
+ if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
+ doRecordGoroutineProfile(fing)
+ }
+ }
+ startTheWorld()
+
+ // Visit each goroutine that existed as of the startTheWorld call above.
+ //
+ // New goroutines may not be in this list, but we didn't want to know about
+ // them anyway. If they do appear in this list (via reusing a dead goroutine
+ // struct, or racing to launch between the world restarting and us getting
+ // the list), they will already have their goroutineProfiled field set to
+ // goroutineProfileSatisfied before their state transitions out of _Gdead.
+ //
+ // Any goroutine that the scheduler tries to execute concurrently with this
+ // call will start by adding itself to the profile (before the act of
+ // executing can cause any changes in its stack).
+ forEachGRace(func(gp1 *g) {
+ tryRecordGoroutineProfile(gp1, Gosched)
+ })
+
+ stopTheWorld("profile cleanup")
+ endOffset := goroutineProfile.offset.Swap(0)
+ goroutineProfile.active = false
+ goroutineProfile.records = nil
+ goroutineProfile.labels = nil
+ startTheWorld()
+
+ // Restore the invariant that every goroutine struct in allgs has its
+ // goroutineProfiled field cleared.
+ forEachGRace(func(gp1 *g) {
+ gp1.goroutineProfiled.Store(goroutineProfileAbsent)
+ })
+
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&labelSync))
+ }
+
+ if n != int(endOffset) {
+ // It's a big surprise that the number of goroutines changed while we
+ // were collecting the profile. But probably better to return a
+ // truncated profile than to crash the whole process.
+ //
+ // For instance, needm moves a goroutine out of the _Gdead state and so
+ // might be able to change the goroutine count without interacting with
+ // the scheduler. For code like that, the race windows are small and the
+ // combination of features is uncommon, so it's hard to be (and remain)
+ // sure we've caught them all.
+ }
+
+ semrelease(&goroutineProfile.sema)
+ return n, true
+}
+
+// tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
+// tryRecordGoroutineProfile.
+//
+//go:yeswritebarrierrec
+func tryRecordGoroutineProfileWB(gp1 *g) {
+ if getg().m.p.ptr() == nil {
+ throw("no P available, write barriers are forbidden")
+ }
+ tryRecordGoroutineProfile(gp1, osyield)
+}
+
+// tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
+// in the current goroutine profile: either that it should not be profiled, or
+// that a snapshot of its call stack and labels are now in the profile.
+func tryRecordGoroutineProfile(gp1 *g, yield func()) {
+ if readgstatus(gp1) == _Gdead {
+ // Dead goroutines should not appear in the profile. Goroutines that
+ // start while profile collection is active will get goroutineProfiled
+ // set to goroutineProfileSatisfied before transitioning out of _Gdead,
+ // so here we check _Gdead first.
+ return
+ }
+ if isSystemGoroutine(gp1, true) {
+ // System goroutines should not appear in the profile. (The finalizer
+ // goroutine is marked as "already profiled".)
+ return
+ }
+
+ for {
+ prev := gp1.goroutineProfiled.Load()
+ if prev == goroutineProfileSatisfied {
+ // This goroutine is already in the profile (or is new since the
+ // start of collection, so shouldn't appear in the profile).
+ break
+ }
+ if prev == goroutineProfileInProgress {
+ // Something else is adding gp1 to the goroutine profile right now.
+ // Give that a moment to finish.
+ yield()
+ continue
+ }
+
+ // While we have gp1.goroutineProfiled set to
+ // goroutineProfileInProgress, gp1 may appear _Grunnable but will not
+ // actually be able to run. Disable preemption for ourselves, to make
+ // sure we finish profiling gp1 right away instead of leaving it stuck
+ // in this limbo.
+ mp := acquirem()
+ if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
+ doRecordGoroutineProfile(gp1)
+ gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
+ }
+ releasem(mp)
+ }
+}
+
+// doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
+// goroutine profile. Preemption is disabled.
+//
+// This may be called via tryRecordGoroutineProfile in two ways: by the
+// goroutine that is coordinating the goroutine profile (running on its own
+// stack), or from the scheduler in preparation to execute gp1 (running on the
+// system stack).
+func doRecordGoroutineProfile(gp1 *g) {
+ if readgstatus(gp1) == _Grunning {
+ print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
+ throw("cannot read stack of running goroutine")
+ }
+
+ offset := int(goroutineProfile.offset.Add(1)) - 1
+
+ if offset >= len(goroutineProfile.records) {
+ // Should be impossible, but better to return a truncated profile than
+ // to crash the entire process at this point. Instead, deal with it in
+ // goroutineProfileWithLabelsConcurrent where we have more context.
+ return
+ }
+
+ // saveg calls gentraceback, which may call cgo traceback functions. When
+ // called from the scheduler, this is on the system stack already so
+ // traceback.go:cgoContextPCs will avoid calling back into the scheduler.
+ //
+ // When called from the goroutine coordinating the profile, we still have
+ // set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
+ // preventing it from being truly _Grunnable. So we'll use the system stack
+ // to avoid schedule delays.
+ systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
+
+ if goroutineProfile.labels != nil {
+ goroutineProfile.labels[offset] = gp1.labels
+ }
+}
+
+func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ gp := getg()
+
+ isOK := func(gp1 *g) bool {
+ // Checking isSystemGoroutine here makes GoroutineProfile
+ // consistent with both NumGoroutine and Stack.
+ return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
+ }
+
+ stopTheWorld("profile")
+
+ // World is stopped, no locking required.
+ n = 1
+ forEachGRace(func(gp1 *g) {
+ if isOK(gp1) {
+ n++
+ }
+ })
+
+ if n <= len(p) {
+ ok = true
+ r, lbl := p, labels
+
+ // Save current goroutine.
+ sp := getcallersp()
+ pc := getcallerpc()
+ systemstack(func() {
+ saveg(pc, sp, gp, &r[0])
+ })
+ r = r[1:]
+
+ // If we have a place to put our goroutine labelmap, insert it there.
+ if labels != nil {
+ lbl[0] = gp.labels
+ lbl = lbl[1:]
+ }
+
+ // Save other goroutines.
+ forEachGRace(func(gp1 *g) {
+ if !isOK(gp1) {
+ return
+ }
+
+ if len(r) == 0 {
+ // Should be impossible, but better to return a
+ // truncated profile than to crash the entire process.
+ return
+ }
+ // saveg calls gentraceback, which may call cgo traceback functions.
+ // The world is stopped, so it cannot use cgocall (which will be
+ // blocked at exitsyscall). Do it on the system stack so it won't
+ // call into the schedular (see traceback.go:cgoContextPCs).
+ systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
+ if labels != nil {
+ lbl[0] = gp1.labels
+ lbl = lbl[1:]
+ }
+ r = r[1:]
+ })
+ }
+
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&labelSync))
+ }
+
+ startTheWorld()
+ return n, ok
+}
+
+// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
+// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
+// If len(p) < n, GoroutineProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package instead
+// of calling GoroutineProfile directly.
+func GoroutineProfile(p []StackRecord) (n int, ok bool) {
+
+ return goroutineProfileWithLabels(p, nil)
+}
+
+func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
+ n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
+ if n < len(r.Stack0) {
+ r.Stack0[n] = 0
+ }
+}
+
+// Stack formats a stack trace of the calling goroutine into buf
+// and returns the number of bytes written to buf.
+// If all is true, Stack formats stack traces of all other goroutines
+// into buf after the trace for the current goroutine.
+func Stack(buf []byte, all bool) int {
+ if all {
+ stopTheWorld("stack trace")
+ }
+
+ n := 0
+ if len(buf) > 0 {
+ gp := getg()
+ sp := getcallersp()
+ pc := getcallerpc()
+ systemstack(func() {
+ g0 := getg()
+ // Force traceback=1 to override GOTRACEBACK setting,
+ // so that Stack's results are consistent.
+ // GOTRACEBACK is only about crash dumps.
+ g0.m.traceback = 1
+ g0.writebuf = buf[0:0:len(buf)]
+ goroutineheader(gp)
+ traceback(pc, sp, 0, gp)
+ if all {
+ tracebackothers(gp)
+ }
+ g0.m.traceback = 0
+ n = len(g0.writebuf)
+ g0.writebuf = nil
+ })
+ }
+
+ if all {
+ startTheWorld()
+ }
+ return n
+}
+
+// Tracing of alloc/free/gc.
+
+var tracelock mutex
+
+func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
+ lock(&tracelock)
+ gp := getg()
+ gp.m.traceback = 2
+ if typ == nil {
+ print("tracealloc(", p, ", ", hex(size), ")\n")
+ } else {
+ print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
+ }
+ if gp.m.curg == nil || gp == gp.m.curg {
+ goroutineheader(gp)
+ pc := getcallerpc()
+ sp := getcallersp()
+ systemstack(func() {
+ traceback(pc, sp, 0, gp)
+ })
+ } else {
+ goroutineheader(gp.m.curg)
+ traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
+ }
+ print("\n")
+ gp.m.traceback = 0
+ unlock(&tracelock)
+}
+
+func tracefree(p unsafe.Pointer, size uintptr) {
+ lock(&tracelock)
+ gp := getg()
+ gp.m.traceback = 2
+ print("tracefree(", p, ", ", hex(size), ")\n")
+ goroutineheader(gp)
+ pc := getcallerpc()
+ sp := getcallersp()
+ systemstack(func() {
+ traceback(pc, sp, 0, gp)
+ })
+ print("\n")
+ gp.m.traceback = 0
+ unlock(&tracelock)
+}
+
+func tracegc() {
+ lock(&tracelock)
+ gp := getg()
+ gp.m.traceback = 2
+ print("tracegc()\n")
+ // running on m->g0 stack; show all non-g0 goroutines
+ tracebackothers(gp)
+ print("end tracegc\n")
+ print("\n")
+ gp.m.traceback = 0
+ unlock(&tracelock)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mranges.go b/contrib/go/_std_1.19/src/runtime/mranges.go
new file mode 100644
index 0000000000..9cf83cc613
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mranges.go
@@ -0,0 +1,436 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Address range data structure.
+//
+// This file contains an implementation of a data structure which
+// manages ordered address ranges.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// addrRange represents a region of address space.
+//
+// An addrRange must never span a gap in the address space.
+type addrRange struct {
+ // base and limit together represent the region of address space
+ // [base, limit). That is, base is inclusive, limit is exclusive.
+ // These are address over an offset view of the address space on
+ // platforms with a segmented address space, that is, on platforms
+ // where arenaBaseOffset != 0.
+ base, limit offAddr
+}
+
+// makeAddrRange creates a new address range from two virtual addresses.
+//
+// Throws if the base and limit are not in the same memory segment.
+func makeAddrRange(base, limit uintptr) addrRange {
+ r := addrRange{offAddr{base}, offAddr{limit}}
+ if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
+ throw("addr range base and limit are not in the same memory segment")
+ }
+ return r
+}
+
+// size returns the size of the range represented in bytes.
+func (a addrRange) size() uintptr {
+ if !a.base.lessThan(a.limit) {
+ return 0
+ }
+ // Subtraction is safe because limit and base must be in the same
+ // segment of the address space.
+ return a.limit.diff(a.base)
+}
+
+// contains returns whether or not the range contains a given address.
+func (a addrRange) contains(addr uintptr) bool {
+ return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
+}
+
+// subtract takes the addrRange toPrune and cuts out any overlap with
+// from, then returns the new range. subtract assumes that a and b
+// either don't overlap at all, only overlap on one side, or are equal.
+// If b is strictly contained in a, thus forcing a split, it will throw.
+func (a addrRange) subtract(b addrRange) addrRange {
+ if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
+ return addrRange{}
+ } else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
+ throw("bad prune")
+ } else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
+ a.base = b.limit
+ } else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
+ a.limit = b.base
+ }
+ return a
+}
+
+// removeGreaterEqual removes all addresses in a greater than or equal
+// to addr and returns the new range.
+func (a addrRange) removeGreaterEqual(addr uintptr) addrRange {
+ if (offAddr{addr}).lessEqual(a.base) {
+ return addrRange{}
+ }
+ if a.limit.lessEqual(offAddr{addr}) {
+ return a
+ }
+ return makeAddrRange(a.base.addr(), addr)
+}
+
+var (
+ // minOffAddr is the minimum address in the offset space, and
+ // it corresponds to the virtual address arenaBaseOffset.
+ minOffAddr = offAddr{arenaBaseOffset}
+
+ // maxOffAddr is the maximum address in the offset address
+ // space. It corresponds to the highest virtual address representable
+ // by the page alloc chunk and heap arena maps.
+ maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
+)
+
+// offAddr represents an address in a contiguous view
+// of the address space on systems where the address space is
+// segmented. On other systems, it's just a normal address.
+type offAddr struct {
+ // a is just the virtual address, but should never be used
+ // directly. Call addr() to get this value instead.
+ a uintptr
+}
+
+// add adds a uintptr offset to the offAddr.
+func (l offAddr) add(bytes uintptr) offAddr {
+ return offAddr{a: l.a + bytes}
+}
+
+// sub subtracts a uintptr offset from the offAddr.
+func (l offAddr) sub(bytes uintptr) offAddr {
+ return offAddr{a: l.a - bytes}
+}
+
+// diff returns the amount of bytes in between the
+// two offAddrs.
+func (l1 offAddr) diff(l2 offAddr) uintptr {
+ return l1.a - l2.a
+}
+
+// lessThan returns true if l1 is less than l2 in the offset
+// address space.
+func (l1 offAddr) lessThan(l2 offAddr) bool {
+ return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
+}
+
+// lessEqual returns true if l1 is less than or equal to l2 in
+// the offset address space.
+func (l1 offAddr) lessEqual(l2 offAddr) bool {
+ return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
+}
+
+// equal returns true if the two offAddr values are equal.
+func (l1 offAddr) equal(l2 offAddr) bool {
+ // No need to compare in the offset space, it
+ // means the same thing.
+ return l1 == l2
+}
+
+// addr returns the virtual address for this offset address.
+func (l offAddr) addr() uintptr {
+ return l.a
+}
+
+// atomicOffAddr is like offAddr, but operations on it are atomic.
+// It also contains operations to be able to store marked addresses
+// to ensure that they're not overridden until they've been seen.
+type atomicOffAddr struct {
+ // a contains the offset address, unlike offAddr.
+ a atomic.Int64
+}
+
+// Clear attempts to store minOffAddr in atomicOffAddr. It may fail
+// if a marked value is placed in the box in the meanwhile.
+func (b *atomicOffAddr) Clear() {
+ for {
+ old := b.a.Load()
+ if old < 0 {
+ return
+ }
+ if b.a.CompareAndSwap(old, int64(minOffAddr.addr()-arenaBaseOffset)) {
+ return
+ }
+ }
+}
+
+// StoreMin stores addr if it's less than the current value in the
+// offset address space if the current value is not marked.
+func (b *atomicOffAddr) StoreMin(addr uintptr) {
+ new := int64(addr - arenaBaseOffset)
+ for {
+ old := b.a.Load()
+ if old < new {
+ return
+ }
+ if b.a.CompareAndSwap(old, new) {
+ return
+ }
+ }
+}
+
+// StoreUnmark attempts to unmark the value in atomicOffAddr and
+// replace it with newAddr. markedAddr must be a marked address
+// returned by Load. This function will not store newAddr if the
+// box no longer contains markedAddr.
+func (b *atomicOffAddr) StoreUnmark(markedAddr, newAddr uintptr) {
+ b.a.CompareAndSwap(-int64(markedAddr-arenaBaseOffset), int64(newAddr-arenaBaseOffset))
+}
+
+// StoreMarked stores addr but first converted to the offset address
+// space and then negated.
+func (b *atomicOffAddr) StoreMarked(addr uintptr) {
+ b.a.Store(-int64(addr - arenaBaseOffset))
+}
+
+// Load returns the address in the box as a virtual address. It also
+// returns if the value was marked or not.
+func (b *atomicOffAddr) Load() (uintptr, bool) {
+ v := b.a.Load()
+ wasMarked := false
+ if v < 0 {
+ wasMarked = true
+ v = -v
+ }
+ return uintptr(v) + arenaBaseOffset, wasMarked
+}
+
+// addrRanges is a data structure holding a collection of ranges of
+// address space.
+//
+// The ranges are coalesced eagerly to reduce the
+// number ranges it holds.
+//
+// The slice backing store for this field is persistentalloc'd
+// and thus there is no way to free it.
+//
+// addrRanges is not thread-safe.
+type addrRanges struct {
+ // ranges is a slice of ranges sorted by base.
+ ranges []addrRange
+
+ // totalBytes is the total amount of address space in bytes counted by
+ // this addrRanges.
+ totalBytes uintptr
+
+ // sysStat is the stat to track allocations by this type
+ sysStat *sysMemStat
+}
+
+func (a *addrRanges) init(sysStat *sysMemStat) {
+ ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
+ ranges.len = 0
+ ranges.cap = 16
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
+ a.sysStat = sysStat
+ a.totalBytes = 0
+}
+
+// findSucc returns the first index in a such that addr is
+// less than the base of the addrRange at that index.
+func (a *addrRanges) findSucc(addr uintptr) int {
+ base := offAddr{addr}
+
+ // Narrow down the search space via a binary search
+ // for large addrRanges until we have at most iterMax
+ // candidates left.
+ const iterMax = 8
+ bot, top := 0, len(a.ranges)
+ for top-bot > iterMax {
+ i := ((top - bot) / 2) + bot
+ if a.ranges[i].contains(base.addr()) {
+ // a.ranges[i] contains base, so
+ // its successor is the next index.
+ return i + 1
+ }
+ if base.lessThan(a.ranges[i].base) {
+ // In this case i might actually be
+ // the successor, but we can't be sure
+ // until we check the ones before it.
+ top = i
+ } else {
+ // In this case we know base is
+ // greater than or equal to a.ranges[i].limit-1,
+ // so i is definitely not the successor.
+ // We already checked i, so pick the next
+ // one.
+ bot = i + 1
+ }
+ }
+ // There are top-bot candidates left, so
+ // iterate over them and find the first that
+ // base is strictly less than.
+ for i := bot; i < top; i++ {
+ if base.lessThan(a.ranges[i].base) {
+ return i
+ }
+ }
+ return top
+}
+
+// findAddrGreaterEqual returns the smallest address represented by a
+// that is >= addr. Thus, if the address is represented by a,
+// then it returns addr. The second return value indicates whether
+// such an address exists for addr in a. That is, if addr is larger than
+// any address known to a, the second return value will be false.
+func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
+ i := a.findSucc(addr)
+ if i == 0 {
+ return a.ranges[0].base.addr(), true
+ }
+ if a.ranges[i-1].contains(addr) {
+ return addr, true
+ }
+ if i < len(a.ranges) {
+ return a.ranges[i].base.addr(), true
+ }
+ return 0, false
+}
+
+// contains returns true if a covers the address addr.
+func (a *addrRanges) contains(addr uintptr) bool {
+ i := a.findSucc(addr)
+ if i == 0 {
+ return false
+ }
+ return a.ranges[i-1].contains(addr)
+}
+
+// add inserts a new address range to a.
+//
+// r must not overlap with any address range in a and r.size() must be > 0.
+func (a *addrRanges) add(r addrRange) {
+ // The copies in this function are potentially expensive, but this data
+ // structure is meant to represent the Go heap. At worst, copying this
+ // would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
+ // copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
+ // arenas which is completely discontiguous. ~160µs is still a lot, but in
+ // practice most platforms have 64 MiB arenas (which cuts this by a factor
+ // of 16) and Go heaps are usually mostly contiguous, so the chance that
+ // an addrRanges even grows to that size is extremely low.
+
+ // An empty range has no effect on the set of addresses represented
+ // by a, but passing a zero-sized range is almost always a bug.
+ if r.size() == 0 {
+ print("runtime: range = {", hex(r.base.addr()), ", ", hex(r.limit.addr()), "}\n")
+ throw("attempted to add zero-sized address range")
+ }
+ // Because we assume r is not currently represented in a,
+ // findSucc gives us our insertion index.
+ i := a.findSucc(r.base.addr())
+ coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
+ coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
+ if coalescesUp && coalescesDown {
+ // We have neighbors and they both border us.
+ // Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
+ a.ranges[i-1].limit = a.ranges[i].limit
+
+ // Delete a.ranges[i].
+ copy(a.ranges[i:], a.ranges[i+1:])
+ a.ranges = a.ranges[:len(a.ranges)-1]
+ } else if coalescesDown {
+ // We have a neighbor at a lower address only and it borders us.
+ // Merge the new space into a.ranges[i-1].
+ a.ranges[i-1].limit = r.limit
+ } else if coalescesUp {
+ // We have a neighbor at a higher address only and it borders us.
+ // Merge the new space into a.ranges[i].
+ a.ranges[i].base = r.base
+ } else {
+ // We may or may not have neighbors which don't border us.
+ // Add the new range.
+ if len(a.ranges)+1 > cap(a.ranges) {
+ // Grow the array. Note that this leaks the old array, but since
+ // we're doubling we have at most 2x waste. For a 1 TiB heap and
+ // 4 MiB arenas which are all discontiguous (both very conservative
+ // assumptions), this would waste at most 4 MiB of memory.
+ oldRanges := a.ranges
+ ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
+ ranges.len = len(oldRanges) + 1
+ ranges.cap = cap(oldRanges) * 2
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
+
+ // Copy in the old array, but make space for the new range.
+ copy(a.ranges[:i], oldRanges[:i])
+ copy(a.ranges[i+1:], oldRanges[i:])
+ } else {
+ a.ranges = a.ranges[:len(a.ranges)+1]
+ copy(a.ranges[i+1:], a.ranges[i:])
+ }
+ a.ranges[i] = r
+ }
+ a.totalBytes += r.size()
+}
+
+// removeLast removes and returns the highest-addressed contiguous range
+// of a, or the last nBytes of that range, whichever is smaller. If a is
+// empty, it returns an empty range.
+func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
+ if len(a.ranges) == 0 {
+ return addrRange{}
+ }
+ r := a.ranges[len(a.ranges)-1]
+ size := r.size()
+ if size > nBytes {
+ newEnd := r.limit.sub(nBytes)
+ a.ranges[len(a.ranges)-1].limit = newEnd
+ a.totalBytes -= nBytes
+ return addrRange{newEnd, r.limit}
+ }
+ a.ranges = a.ranges[:len(a.ranges)-1]
+ a.totalBytes -= size
+ return r
+}
+
+// removeGreaterEqual removes the ranges of a which are above addr, and additionally
+// splits any range containing addr.
+func (a *addrRanges) removeGreaterEqual(addr uintptr) {
+ pivot := a.findSucc(addr)
+ if pivot == 0 {
+ // addr is before all ranges in a.
+ a.totalBytes = 0
+ a.ranges = a.ranges[:0]
+ return
+ }
+ removed := uintptr(0)
+ for _, r := range a.ranges[pivot:] {
+ removed += r.size()
+ }
+ if r := a.ranges[pivot-1]; r.contains(addr) {
+ removed += r.size()
+ r = r.removeGreaterEqual(addr)
+ if r.size() == 0 {
+ pivot--
+ } else {
+ removed -= r.size()
+ a.ranges[pivot-1] = r
+ }
+ }
+ a.ranges = a.ranges[:pivot]
+ a.totalBytes -= removed
+}
+
+// cloneInto makes a deep clone of a's state into b, re-using
+// b's ranges if able.
+func (a *addrRanges) cloneInto(b *addrRanges) {
+ if len(a.ranges) > cap(b.ranges) {
+ // Grow the array.
+ ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
+ ranges.len = 0
+ ranges.cap = cap(a.ranges)
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
+ }
+ b.ranges = b.ranges[:len(a.ranges)]
+ b.totalBytes = a.totalBytes
+ copy(b.ranges, a.ranges)
+}
diff --git a/contrib/go/_std_1.18/src/runtime/msan0.go b/contrib/go/_std_1.19/src/runtime/msan0.go
index 2f5fd2d982..2f5fd2d982 100644
--- a/contrib/go/_std_1.18/src/runtime/msan0.go
+++ b/contrib/go/_std_1.19/src/runtime/msan0.go
diff --git a/contrib/go/_std_1.18/src/runtime/msize.go b/contrib/go/_std_1.19/src/runtime/msize.go
index c56aa5a7b2..c56aa5a7b2 100644
--- a/contrib/go/_std_1.18/src/runtime/msize.go
+++ b/contrib/go/_std_1.19/src/runtime/msize.go
diff --git a/contrib/go/_std_1.19/src/runtime/mspanset.go b/contrib/go/_std_1.19/src/runtime/mspanset.go
new file mode 100644
index 0000000000..4158495ddd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mspanset.go
@@ -0,0 +1,354 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/cpu"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// A spanSet is a set of *mspans.
+//
+// spanSet is safe for concurrent push and pop operations.
+type spanSet struct {
+ // A spanSet is a two-level data structure consisting of a
+ // growable spine that points to fixed-sized blocks. The spine
+ // can be accessed without locks, but adding a block or
+ // growing it requires taking the spine lock.
+ //
+ // Because each mspan covers at least 8K of heap and takes at
+ // most 8 bytes in the spanSet, the growth of the spine is
+ // quite limited.
+ //
+ // The spine and all blocks are allocated off-heap, which
+ // allows this to be used in the memory manager and avoids the
+ // need for write barriers on all of these. spanSetBlocks are
+ // managed in a pool, though never freed back to the operating
+ // system. We never release spine memory because there could be
+ // concurrent lock-free access and we're likely to reuse it
+ // anyway. (In principle, we could do this during STW.)
+
+ spineLock mutex
+ spine unsafe.Pointer // *[N]*spanSetBlock, accessed atomically
+ spineLen uintptr // Spine array length, accessed atomically
+ spineCap uintptr // Spine array cap, accessed under lock
+
+ // index is the head and tail of the spanSet in a single field.
+ // The head and the tail both represent an index into the logical
+ // concatenation of all blocks, with the head always behind or
+ // equal to the tail (indicating an empty set). This field is
+ // always accessed atomically.
+ //
+ // The head and the tail are only 32 bits wide, which means we
+ // can only support up to 2^32 pushes before a reset. If every
+ // span in the heap were stored in this set, and each span were
+ // the minimum size (1 runtime page, 8 KiB), then roughly the
+ // smallest heap which would be unrepresentable is 32 TiB in size.
+ index headTailIndex
+}
+
+const (
+ spanSetBlockEntries = 512 // 4KB on 64-bit
+ spanSetInitSpineCap = 256 // Enough for 1GB heap on 64-bit
+)
+
+type spanSetBlock struct {
+ // Free spanSetBlocks are managed via a lock-free stack.
+ lfnode
+
+ // popped is the number of pop operations that have occurred on
+ // this block. This number is used to help determine when a block
+ // may be safely recycled.
+ popped uint32
+
+ // spans is the set of spans in this block.
+ spans [spanSetBlockEntries]*mspan
+}
+
+// push adds span s to buffer b. push is safe to call concurrently
+// with other push and pop operations.
+func (b *spanSet) push(s *mspan) {
+ // Obtain our slot.
+ cursor := uintptr(b.index.incTail().tail() - 1)
+ top, bottom := cursor/spanSetBlockEntries, cursor%spanSetBlockEntries
+
+ // Do we need to add a block?
+ spineLen := atomic.Loaduintptr(&b.spineLen)
+ var block *spanSetBlock
+retry:
+ if top < spineLen {
+ spine := atomic.Loadp(unsafe.Pointer(&b.spine))
+ blockp := add(spine, goarch.PtrSize*top)
+ block = (*spanSetBlock)(atomic.Loadp(blockp))
+ } else {
+ // Add a new block to the spine, potentially growing
+ // the spine.
+ lock(&b.spineLock)
+ // spineLen cannot change until we release the lock,
+ // but may have changed while we were waiting.
+ spineLen = atomic.Loaduintptr(&b.spineLen)
+ if top < spineLen {
+ unlock(&b.spineLock)
+ goto retry
+ }
+
+ if spineLen == b.spineCap {
+ // Grow the spine.
+ newCap := b.spineCap * 2
+ if newCap == 0 {
+ newCap = spanSetInitSpineCap
+ }
+ newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
+ if b.spineCap != 0 {
+ // Blocks are allocated off-heap, so
+ // no write barriers.
+ memmove(newSpine, b.spine, b.spineCap*goarch.PtrSize)
+ }
+ // Spine is allocated off-heap, so no write barrier.
+ atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
+ b.spineCap = newCap
+ // We can't immediately free the old spine
+ // since a concurrent push with a lower index
+ // could still be reading from it. We let it
+ // leak because even a 1TB heap would waste
+ // less than 2MB of memory on old spines. If
+ // this is a problem, we could free old spines
+ // during STW.
+ }
+
+ // Allocate a new block from the pool.
+ block = spanSetBlockPool.alloc()
+
+ // Add it to the spine.
+ blockp := add(b.spine, goarch.PtrSize*top)
+ // Blocks are allocated off-heap, so no write barrier.
+ atomic.StorepNoWB(blockp, unsafe.Pointer(block))
+ atomic.Storeuintptr(&b.spineLen, spineLen+1)
+ unlock(&b.spineLock)
+ }
+
+ // We have a block. Insert the span atomically, since there may be
+ // concurrent readers via the block API.
+ atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), unsafe.Pointer(s))
+}
+
+// pop removes and returns a span from buffer b, or nil if b is empty.
+// pop is safe to call concurrently with other pop and push operations.
+func (b *spanSet) pop() *mspan {
+ var head, tail uint32
+claimLoop:
+ for {
+ headtail := b.index.load()
+ head, tail = headtail.split()
+ if head >= tail {
+ // The buf is empty, as far as we can tell.
+ return nil
+ }
+ // Check if the head position we want to claim is actually
+ // backed by a block.
+ spineLen := atomic.Loaduintptr(&b.spineLen)
+ if spineLen <= uintptr(head)/spanSetBlockEntries {
+ // We're racing with a spine growth and the allocation of
+ // a new block (and maybe a new spine!), and trying to grab
+ // the span at the index which is currently being pushed.
+ // Instead of spinning, let's just notify the caller that
+ // there's nothing currently here. Spinning on this is
+ // almost definitely not worth it.
+ return nil
+ }
+ // Try to claim the current head by CASing in an updated head.
+ // This may fail transiently due to a push which modifies the
+ // tail, so keep trying while the head isn't changing.
+ want := head
+ for want == head {
+ if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) {
+ break claimLoop
+ }
+ headtail = b.index.load()
+ head, tail = headtail.split()
+ }
+ // We failed to claim the spot we were after and the head changed,
+ // meaning a popper got ahead of us. Try again from the top because
+ // the buf may not be empty.
+ }
+ top, bottom := head/spanSetBlockEntries, head%spanSetBlockEntries
+
+ // We may be reading a stale spine pointer, but because the length
+ // grows monotonically and we've already verified it, we'll definitely
+ // be reading from a valid block.
+ spine := atomic.Loadp(unsafe.Pointer(&b.spine))
+ blockp := add(spine, goarch.PtrSize*uintptr(top))
+
+ // Given that the spine length is correct, we know we will never
+ // see a nil block here, since the length is always updated after
+ // the block is set.
+ block := (*spanSetBlock)(atomic.Loadp(blockp))
+ s := (*mspan)(atomic.Loadp(unsafe.Pointer(&block.spans[bottom])))
+ for s == nil {
+ // We raced with the span actually being set, but given that we
+ // know a block for this span exists, the race window here is
+ // extremely small. Try again.
+ s = (*mspan)(atomic.Loadp(unsafe.Pointer(&block.spans[bottom])))
+ }
+ // Clear the pointer. This isn't strictly necessary, but defensively
+ // avoids accidentally re-using blocks which could lead to memory
+ // corruption. This way, we'll get a nil pointer access instead.
+ atomic.StorepNoWB(unsafe.Pointer(&block.spans[bottom]), nil)
+
+ // Increase the popped count. If we are the last possible popper
+ // in the block (note that bottom need not equal spanSetBlockEntries-1
+ // due to races) then it's our responsibility to free the block.
+ //
+ // If we increment popped to spanSetBlockEntries, we can be sure that
+ // we're the last popper for this block, and it's thus safe to free it.
+ // Every other popper must have crossed this barrier (and thus finished
+ // popping its corresponding mspan) by the time we get here. Because
+ // we're the last popper, we also don't have to worry about concurrent
+ // pushers (there can't be any). Note that we may not be the popper
+ // which claimed the last slot in the block, we're just the last one
+ // to finish popping.
+ if atomic.Xadd(&block.popped, 1) == spanSetBlockEntries {
+ // Clear the block's pointer.
+ atomic.StorepNoWB(blockp, nil)
+
+ // Return the block to the block pool.
+ spanSetBlockPool.free(block)
+ }
+ return s
+}
+
+// reset resets a spanSet which is empty. It will also clean up
+// any left over blocks.
+//
+// Throws if the buf is not empty.
+//
+// reset may not be called concurrently with any other operations
+// on the span set.
+func (b *spanSet) reset() {
+ head, tail := b.index.load().split()
+ if head < tail {
+ print("head = ", head, ", tail = ", tail, "\n")
+ throw("attempt to clear non-empty span set")
+ }
+ top := head / spanSetBlockEntries
+ if uintptr(top) < b.spineLen {
+ // If the head catches up to the tail and the set is empty,
+ // we may not clean up the block containing the head and tail
+ // since it may be pushed into again. In order to avoid leaking
+ // memory since we're going to reset the head and tail, clean
+ // up such a block now, if it exists.
+ blockp := (**spanSetBlock)(add(b.spine, goarch.PtrSize*uintptr(top)))
+ block := *blockp
+ if block != nil {
+ // Sanity check the popped value.
+ if block.popped == 0 {
+ // popped should never be zero because that means we have
+ // pushed at least one value but not yet popped if this
+ // block pointer is not nil.
+ throw("span set block with unpopped elements found in reset")
+ }
+ if block.popped == spanSetBlockEntries {
+ // popped should also never be equal to spanSetBlockEntries
+ // because the last popper should have made the block pointer
+ // in this slot nil.
+ throw("fully empty unfreed span set block found in reset")
+ }
+
+ // Clear the pointer to the block.
+ atomic.StorepNoWB(unsafe.Pointer(blockp), nil)
+
+ // Return the block to the block pool.
+ spanSetBlockPool.free(block)
+ }
+ }
+ b.index.reset()
+ atomic.Storeuintptr(&b.spineLen, 0)
+}
+
+// spanSetBlockPool is a global pool of spanSetBlocks.
+var spanSetBlockPool spanSetBlockAlloc
+
+// spanSetBlockAlloc represents a concurrent pool of spanSetBlocks.
+type spanSetBlockAlloc struct {
+ stack lfstack
+}
+
+// alloc tries to grab a spanSetBlock out of the pool, and if it fails
+// persistentallocs a new one and returns it.
+func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
+ if s := (*spanSetBlock)(p.stack.pop()); s != nil {
+ return s
+ }
+ return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
+}
+
+// free returns a spanSetBlock back to the pool.
+func (p *spanSetBlockAlloc) free(block *spanSetBlock) {
+ atomic.Store(&block.popped, 0)
+ p.stack.push(&block.lfnode)
+}
+
+// haidTailIndex represents a combined 32-bit head and 32-bit tail
+// of a queue into a single 64-bit value.
+type headTailIndex uint64
+
+// makeHeadTailIndex creates a headTailIndex value from a separate
+// head and tail.
+func makeHeadTailIndex(head, tail uint32) headTailIndex {
+ return headTailIndex(uint64(head)<<32 | uint64(tail))
+}
+
+// head returns the head of a headTailIndex value.
+func (h headTailIndex) head() uint32 {
+ return uint32(h >> 32)
+}
+
+// tail returns the tail of a headTailIndex value.
+func (h headTailIndex) tail() uint32 {
+ return uint32(h)
+}
+
+// split splits the headTailIndex value into its parts.
+func (h headTailIndex) split() (head uint32, tail uint32) {
+ return h.head(), h.tail()
+}
+
+// load atomically reads a headTailIndex value.
+func (h *headTailIndex) load() headTailIndex {
+ return headTailIndex(atomic.Load64((*uint64)(h)))
+}
+
+// cas atomically compares-and-swaps a headTailIndex value.
+func (h *headTailIndex) cas(old, new headTailIndex) bool {
+ return atomic.Cas64((*uint64)(h), uint64(old), uint64(new))
+}
+
+// incHead atomically increments the head of a headTailIndex.
+func (h *headTailIndex) incHead() headTailIndex {
+ return headTailIndex(atomic.Xadd64((*uint64)(h), (1 << 32)))
+}
+
+// decHead atomically decrements the head of a headTailIndex.
+func (h *headTailIndex) decHead() headTailIndex {
+ return headTailIndex(atomic.Xadd64((*uint64)(h), -(1 << 32)))
+}
+
+// incTail atomically increments the tail of a headTailIndex.
+func (h *headTailIndex) incTail() headTailIndex {
+ ht := headTailIndex(atomic.Xadd64((*uint64)(h), +1))
+ // Check for overflow.
+ if ht.tail() == 0 {
+ print("runtime: head = ", ht.head(), ", tail = ", ht.tail(), "\n")
+ throw("headTailIndex overflow")
+ }
+ return ht
+}
+
+// reset clears the headTailIndex to (0, 0).
+func (h *headTailIndex) reset() {
+ atomic.Store64((*uint64)(h), 0)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mstats.go b/contrib/go/_std_1.19/src/runtime/mstats.go
new file mode 100644
index 0000000000..0029ea956c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mstats.go
@@ -0,0 +1,888 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Memory statistics
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+type mstats struct {
+ // Statistics about malloc heap.
+ heapStats consistentHeapStats
+
+ // Statistics about stacks.
+ stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
+
+ // Statistics about allocation of low-level fixed-size structures.
+ mspan_sys sysMemStat
+ mcache_sys sysMemStat
+ buckhash_sys sysMemStat // profiling bucket hash table
+
+ // Statistics about GC overhead.
+ gcMiscSys sysMemStat // updated atomically or during STW
+
+ // Miscellaneous statistics.
+ other_sys sysMemStat // updated atomically or during STW
+
+ // Statistics about the garbage collector.
+
+ // Protected by mheap or stopping the world during GC.
+ last_gc_unix uint64 // last gc (in unix time)
+ pause_total_ns uint64
+ pause_ns [256]uint64 // circular buffer of recent gc pause lengths
+ pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
+ numgc uint32
+ numforcedgc uint32 // number of user-forced GCs
+ gc_cpu_fraction float64 // fraction of CPU time used by GC
+
+ last_gc_nanotime uint64 // last gc (monotonic time)
+ lastHeapInUse uint64 // heapInUse at mark termination of the previous GC
+
+ enablegc bool
+
+ _ uint32 // ensure gcPauseDist is aligned.
+
+ // gcPauseDist represents the distribution of all GC-related
+ // application pauses in the runtime.
+ //
+ // Each individual pause is counted separately, unlike pause_ns.
+ gcPauseDist timeHistogram
+}
+
+var memstats mstats
+
+// A MemStats records statistics about the memory allocator.
+type MemStats struct {
+ // General statistics.
+
+ // Alloc is bytes of allocated heap objects.
+ //
+ // This is the same as HeapAlloc (see below).
+ Alloc uint64
+
+ // TotalAlloc is cumulative bytes allocated for heap objects.
+ //
+ // TotalAlloc increases as heap objects are allocated, but
+ // unlike Alloc and HeapAlloc, it does not decrease when
+ // objects are freed.
+ TotalAlloc uint64
+
+ // Sys is the total bytes of memory obtained from the OS.
+ //
+ // Sys is the sum of the XSys fields below. Sys measures the
+ // virtual address space reserved by the Go runtime for the
+ // heap, stacks, and other internal data structures. It's
+ // likely that not all of the virtual address space is backed
+ // by physical memory at any given moment, though in general
+ // it all was at some point.
+ Sys uint64
+
+ // Lookups is the number of pointer lookups performed by the
+ // runtime.
+ //
+ // This is primarily useful for debugging runtime internals.
+ Lookups uint64
+
+ // Mallocs is the cumulative count of heap objects allocated.
+ // The number of live objects is Mallocs - Frees.
+ Mallocs uint64
+
+ // Frees is the cumulative count of heap objects freed.
+ Frees uint64
+
+ // Heap memory statistics.
+ //
+ // Interpreting the heap statistics requires some knowledge of
+ // how Go organizes memory. Go divides the virtual address
+ // space of the heap into "spans", which are contiguous
+ // regions of memory 8K or larger. A span may be in one of
+ // three states:
+ //
+ // An "idle" span contains no objects or other data. The
+ // physical memory backing an idle span can be released back
+ // to the OS (but the virtual address space never is), or it
+ // can be converted into an "in use" or "stack" span.
+ //
+ // An "in use" span contains at least one heap object and may
+ // have free space available to allocate more heap objects.
+ //
+ // A "stack" span is used for goroutine stacks. Stack spans
+ // are not considered part of the heap. A span can change
+ // between heap and stack memory; it is never used for both
+ // simultaneously.
+
+ // HeapAlloc is bytes of allocated heap objects.
+ //
+ // "Allocated" heap objects include all reachable objects, as
+ // well as unreachable objects that the garbage collector has
+ // not yet freed. Specifically, HeapAlloc increases as heap
+ // objects are allocated and decreases as the heap is swept
+ // and unreachable objects are freed. Sweeping occurs
+ // incrementally between GC cycles, so these two processes
+ // occur simultaneously, and as a result HeapAlloc tends to
+ // change smoothly (in contrast with the sawtooth that is
+ // typical of stop-the-world garbage collectors).
+ HeapAlloc uint64
+
+ // HeapSys is bytes of heap memory obtained from the OS.
+ //
+ // HeapSys measures the amount of virtual address space
+ // reserved for the heap. This includes virtual address space
+ // that has been reserved but not yet used, which consumes no
+ // physical memory, but tends to be small, as well as virtual
+ // address space for which the physical memory has been
+ // returned to the OS after it became unused (see HeapReleased
+ // for a measure of the latter).
+ //
+ // HeapSys estimates the largest size the heap has had.
+ HeapSys uint64
+
+ // HeapIdle is bytes in idle (unused) spans.
+ //
+ // Idle spans have no objects in them. These spans could be
+ // (and may already have been) returned to the OS, or they can
+ // be reused for heap allocations, or they can be reused as
+ // stack memory.
+ //
+ // HeapIdle minus HeapReleased estimates the amount of memory
+ // that could be returned to the OS, but is being retained by
+ // the runtime so it can grow the heap without requesting more
+ // memory from the OS. If this difference is significantly
+ // larger than the heap size, it indicates there was a recent
+ // transient spike in live heap size.
+ HeapIdle uint64
+
+ // HeapInuse is bytes in in-use spans.
+ //
+ // In-use spans have at least one object in them. These spans
+ // can only be used for other objects of roughly the same
+ // size.
+ //
+ // HeapInuse minus HeapAlloc estimates the amount of memory
+ // that has been dedicated to particular size classes, but is
+ // not currently being used. This is an upper bound on
+ // fragmentation, but in general this memory can be reused
+ // efficiently.
+ HeapInuse uint64
+
+ // HeapReleased is bytes of physical memory returned to the OS.
+ //
+ // This counts heap memory from idle spans that was returned
+ // to the OS and has not yet been reacquired for the heap.
+ HeapReleased uint64
+
+ // HeapObjects is the number of allocated heap objects.
+ //
+ // Like HeapAlloc, this increases as objects are allocated and
+ // decreases as the heap is swept and unreachable objects are
+ // freed.
+ HeapObjects uint64
+
+ // Stack memory statistics.
+ //
+ // Stacks are not considered part of the heap, but the runtime
+ // can reuse a span of heap memory for stack memory, and
+ // vice-versa.
+
+ // StackInuse is bytes in stack spans.
+ //
+ // In-use stack spans have at least one stack in them. These
+ // spans can only be used for other stacks of the same size.
+ //
+ // There is no StackIdle because unused stack spans are
+ // returned to the heap (and hence counted toward HeapIdle).
+ StackInuse uint64
+
+ // StackSys is bytes of stack memory obtained from the OS.
+ //
+ // StackSys is StackInuse, plus any memory obtained directly
+ // from the OS for OS thread stacks (which should be minimal).
+ StackSys uint64
+
+ // Off-heap memory statistics.
+ //
+ // The following statistics measure runtime-internal
+ // structures that are not allocated from heap memory (usually
+ // because they are part of implementing the heap). Unlike
+ // heap or stack memory, any memory allocated to these
+ // structures is dedicated to these structures.
+ //
+ // These are primarily useful for debugging runtime memory
+ // overheads.
+
+ // MSpanInuse is bytes of allocated mspan structures.
+ MSpanInuse uint64
+
+ // MSpanSys is bytes of memory obtained from the OS for mspan
+ // structures.
+ MSpanSys uint64
+
+ // MCacheInuse is bytes of allocated mcache structures.
+ MCacheInuse uint64
+
+ // MCacheSys is bytes of memory obtained from the OS for
+ // mcache structures.
+ MCacheSys uint64
+
+ // BuckHashSys is bytes of memory in profiling bucket hash tables.
+ BuckHashSys uint64
+
+ // GCSys is bytes of memory in garbage collection metadata.
+ GCSys uint64
+
+ // OtherSys is bytes of memory in miscellaneous off-heap
+ // runtime allocations.
+ OtherSys uint64
+
+ // Garbage collector statistics.
+
+ // NextGC is the target heap size of the next GC cycle.
+ //
+ // The garbage collector's goal is to keep HeapAlloc ≤ NextGC.
+ // At the end of each GC cycle, the target for the next cycle
+ // is computed based on the amount of reachable data and the
+ // value of GOGC.
+ NextGC uint64
+
+ // LastGC is the time the last garbage collection finished, as
+ // nanoseconds since 1970 (the UNIX epoch).
+ LastGC uint64
+
+ // PauseTotalNs is the cumulative nanoseconds in GC
+ // stop-the-world pauses since the program started.
+ //
+ // During a stop-the-world pause, all goroutines are paused
+ // and only the garbage collector can run.
+ PauseTotalNs uint64
+
+ // PauseNs is a circular buffer of recent GC stop-the-world
+ // pause times in nanoseconds.
+ //
+ // The most recent pause is at PauseNs[(NumGC+255)%256]. In
+ // general, PauseNs[N%256] records the time paused in the most
+ // recent N%256th GC cycle. There may be multiple pauses per
+ // GC cycle; this is the sum of all pauses during a cycle.
+ PauseNs [256]uint64
+
+ // PauseEnd is a circular buffer of recent GC pause end times,
+ // as nanoseconds since 1970 (the UNIX epoch).
+ //
+ // This buffer is filled the same way as PauseNs. There may be
+ // multiple pauses per GC cycle; this records the end of the
+ // last pause in a cycle.
+ PauseEnd [256]uint64
+
+ // NumGC is the number of completed GC cycles.
+ NumGC uint32
+
+ // NumForcedGC is the number of GC cycles that were forced by
+ // the application calling the GC function.
+ NumForcedGC uint32
+
+ // GCCPUFraction is the fraction of this program's available
+ // CPU time used by the GC since the program started.
+ //
+ // GCCPUFraction is expressed as a number between 0 and 1,
+ // where 0 means GC has consumed none of this program's CPU. A
+ // program's available CPU time is defined as the integral of
+ // GOMAXPROCS since the program started. That is, if
+ // GOMAXPROCS is 2 and a program has been running for 10
+ // seconds, its "available CPU" is 20 seconds. GCCPUFraction
+ // does not include CPU time used for write barrier activity.
+ //
+ // This is the same as the fraction of CPU reported by
+ // GODEBUG=gctrace=1.
+ GCCPUFraction float64
+
+ // EnableGC indicates that GC is enabled. It is always true,
+ // even if GOGC=off.
+ EnableGC bool
+
+ // DebugGC is currently unused.
+ DebugGC bool
+
+ // BySize reports per-size class allocation statistics.
+ //
+ // BySize[N] gives statistics for allocations of size S where
+ // BySize[N-1].Size < S ≤ BySize[N].Size.
+ //
+ // This does not report allocations larger than BySize[60].Size.
+ BySize [61]struct {
+ // Size is the maximum byte size of an object in this
+ // size class.
+ Size uint32
+
+ // Mallocs is the cumulative count of heap objects
+ // allocated in this size class. The cumulative bytes
+ // of allocation is Size*Mallocs. The number of live
+ // objects in this size class is Mallocs - Frees.
+ Mallocs uint64
+
+ // Frees is the cumulative count of heap objects freed
+ // in this size class.
+ Frees uint64
+ }
+}
+
+func init() {
+ if offset := unsafe.Offsetof(memstats.heapStats); offset%8 != 0 {
+ println(offset)
+ throw("memstats.heapStats not aligned to 8 bytes")
+ }
+ if offset := unsafe.Offsetof(memstats.gcPauseDist); offset%8 != 0 {
+ println(offset)
+ throw("memstats.gcPauseDist not aligned to 8 bytes")
+ }
+ // Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
+ // [3]heapStatsDelta) to be 8-byte aligned.
+ if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
+ println(size)
+ throw("heapStatsDelta not a multiple of 8 bytes in size")
+ }
+}
+
+// ReadMemStats populates m with memory allocator statistics.
+//
+// The returned memory allocator statistics are up to date as of the
+// call to ReadMemStats. This is in contrast with a heap profile,
+// which is a snapshot as of the most recently completed garbage
+// collection cycle.
+func ReadMemStats(m *MemStats) {
+ stopTheWorld("read mem stats")
+
+ systemstack(func() {
+ readmemstats_m(m)
+ })
+
+ startTheWorld()
+}
+
+// readmemstats_m populates stats for internal runtime values.
+//
+// The world must be stopped.
+func readmemstats_m(stats *MemStats) {
+ assertWorldStopped()
+
+ // Flush mcaches to mcentral before doing anything else.
+ //
+ // Flushing to the mcentral may in general cause stats to
+ // change as mcentral data structures are manipulated.
+ systemstack(flushallmcaches)
+
+ // Calculate memory allocator stats.
+ // During program execution we only count number of frees and amount of freed memory.
+ // Current number of alive objects in the heap and amount of alive heap memory
+ // are calculated by scanning all spans.
+ // Total number of mallocs is calculated as number of frees plus number of alive objects.
+ // Similarly, total amount of allocated memory is calculated as amount of freed memory
+ // plus amount of alive heap memory.
+
+ // Collect consistent stats, which are the source-of-truth in some cases.
+ var consStats heapStatsDelta
+ memstats.heapStats.unsafeRead(&consStats)
+
+ // Collect large allocation stats.
+ totalAlloc := consStats.largeAlloc
+ nMalloc := consStats.largeAllocCount
+ totalFree := consStats.largeFree
+ nFree := consStats.largeFreeCount
+
+ // Collect per-sizeclass stats.
+ var bySize [_NumSizeClasses]struct {
+ Size uint32
+ Mallocs uint64
+ Frees uint64
+ }
+ for i := range bySize {
+ bySize[i].Size = uint32(class_to_size[i])
+
+ // Malloc stats.
+ a := consStats.smallAllocCount[i]
+ totalAlloc += a * uint64(class_to_size[i])
+ nMalloc += a
+ bySize[i].Mallocs = a
+
+ // Free stats.
+ f := consStats.smallFreeCount[i]
+ totalFree += f * uint64(class_to_size[i])
+ nFree += f
+ bySize[i].Frees = f
+ }
+
+ // Account for tiny allocations.
+ // For historical reasons, MemStats includes tiny allocations
+ // in both the total free and total alloc count. This double-counts
+ // memory in some sense because their tiny allocation block is also
+ // counted. Tracking the lifetime of individual tiny allocations is
+ // currently not done because it would be too expensive.
+ nFree += consStats.tinyAllocCount
+ nMalloc += consStats.tinyAllocCount
+
+ // Calculate derived stats.
+
+ stackInUse := uint64(consStats.inStacks)
+ gcWorkBufInUse := uint64(consStats.inWorkBufs)
+ gcProgPtrScalarBitsInUse := uint64(consStats.inPtrScalarBits)
+
+ totalMapped := gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load() +
+ memstats.stacks_sys.load() + memstats.mspan_sys.load() + memstats.mcache_sys.load() +
+ memstats.buckhash_sys.load() + memstats.gcMiscSys.load() + memstats.other_sys.load() +
+ stackInUse + gcWorkBufInUse + gcProgPtrScalarBitsInUse
+
+ heapGoal := gcController.heapGoal()
+
+ // The world is stopped, so the consistent stats (after aggregation)
+ // should be identical to some combination of memstats. In particular:
+ //
+ // * memstats.heapInUse == inHeap
+ // * memstats.heapReleased == released
+ // * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits
+ // * memstats.totalAlloc == totalAlloc
+ // * memstats.totalFree == totalFree
+ //
+ // Check if that's actually true.
+ //
+ // TODO(mknyszek): Maybe don't throw here. It would be bad if a
+ // bug in otherwise benign accounting caused the whole application
+ // to crash.
+ if gcController.heapInUse.load() != uint64(consStats.inHeap) {
+ print("runtime: heapInUse=", gcController.heapInUse.load(), "\n")
+ print("runtime: consistent value=", consStats.inHeap, "\n")
+ throw("heapInUse and consistent stats are not equal")
+ }
+ if gcController.heapReleased.load() != uint64(consStats.released) {
+ print("runtime: heapReleased=", gcController.heapReleased.load(), "\n")
+ print("runtime: consistent value=", consStats.released, "\n")
+ throw("heapReleased and consistent stats are not equal")
+ }
+ heapRetained := gcController.heapInUse.load() + gcController.heapFree.load()
+ consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
+ if heapRetained != consRetained {
+ print("runtime: global value=", heapRetained, "\n")
+ print("runtime: consistent value=", consRetained, "\n")
+ throw("measures of the retained heap are not equal")
+ }
+ if gcController.totalAlloc.Load() != totalAlloc {
+ print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n")
+ print("runtime: consistent value=", totalAlloc, "\n")
+ throw("totalAlloc and consistent stats are not equal")
+ }
+ if gcController.totalFree.Load() != totalFree {
+ print("runtime: totalFree=", gcController.totalFree.Load(), "\n")
+ print("runtime: consistent value=", totalFree, "\n")
+ throw("totalFree and consistent stats are not equal")
+ }
+ // Also check that mappedReady lines up with totalMapped - released.
+ // This isn't really the same type of "make sure consistent stats line up" situation,
+ // but this is an opportune time to check.
+ if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) {
+ print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n")
+ print("runtime: totalMapped=", totalMapped, "\n")
+ print("runtime: released=", uint64(consStats.released), "\n")
+ print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n")
+ throw("mappedReady and other memstats are not equal")
+ }
+
+ // We've calculated all the values we need. Now, populate stats.
+
+ stats.Alloc = totalAlloc - totalFree
+ stats.TotalAlloc = totalAlloc
+ stats.Sys = totalMapped
+ stats.Mallocs = nMalloc
+ stats.Frees = nFree
+ stats.HeapAlloc = totalAlloc - totalFree
+ stats.HeapSys = gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load()
+ // By definition, HeapIdle is memory that was mapped
+ // for the heap but is not currently used to hold heap
+ // objects. It also specifically is memory that can be
+ // used for other purposes, like stacks, but this memory
+ // is subtracted out of HeapSys before it makes that
+ // transition. Put another way:
+ //
+ // HeapSys = bytes allocated from the OS for the heap - bytes ultimately used for non-heap purposes
+ // HeapIdle = bytes allocated from the OS for the heap - bytes ultimately used for any purpose
+ //
+ // or
+ //
+ // HeapSys = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse
+ // HeapIdle = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse - heapInUse
+ //
+ // => HeapIdle = HeapSys - heapInUse = heapFree + heapReleased
+ stats.HeapIdle = gcController.heapFree.load() + gcController.heapReleased.load()
+ stats.HeapInuse = gcController.heapInUse.load()
+ stats.HeapReleased = gcController.heapReleased.load()
+ stats.HeapObjects = nMalloc - nFree
+ stats.StackInuse = stackInUse
+ // memstats.stacks_sys is only memory mapped directly for OS stacks.
+ // Add in heap-allocated stack memory for user consumption.
+ stats.StackSys = stackInUse + memstats.stacks_sys.load()
+ stats.MSpanInuse = uint64(mheap_.spanalloc.inuse)
+ stats.MSpanSys = memstats.mspan_sys.load()
+ stats.MCacheInuse = uint64(mheap_.cachealloc.inuse)
+ stats.MCacheSys = memstats.mcache_sys.load()
+ stats.BuckHashSys = memstats.buckhash_sys.load()
+ // MemStats defines GCSys as an aggregate of all memory related
+ // to the memory management system, but we track this memory
+ // at a more granular level in the runtime.
+ stats.GCSys = memstats.gcMiscSys.load() + gcWorkBufInUse + gcProgPtrScalarBitsInUse
+ stats.OtherSys = memstats.other_sys.load()
+ stats.NextGC = heapGoal
+ stats.LastGC = memstats.last_gc_unix
+ stats.PauseTotalNs = memstats.pause_total_ns
+ stats.PauseNs = memstats.pause_ns
+ stats.PauseEnd = memstats.pause_end
+ stats.NumGC = memstats.numgc
+ stats.NumForcedGC = memstats.numforcedgc
+ stats.GCCPUFraction = memstats.gc_cpu_fraction
+ stats.EnableGC = true
+
+ // stats.BySize and bySize might not match in length.
+ // That's OK, stats.BySize cannot change due to backwards
+ // compatibility issues. copy will copy the minimum amount
+ // of values between the two of them.
+ copy(stats.BySize[:], bySize[:])
+}
+
+//go:linkname readGCStats runtime/debug.readGCStats
+func readGCStats(pauses *[]uint64) {
+ systemstack(func() {
+ readGCStats_m(pauses)
+ })
+}
+
+// readGCStats_m must be called on the system stack because it acquires the heap
+// lock. See mheap for details.
+//
+//go:systemstack
+func readGCStats_m(pauses *[]uint64) {
+ p := *pauses
+ // Calling code in runtime/debug should make the slice large enough.
+ if cap(p) < len(memstats.pause_ns)+3 {
+ throw("short slice passed to readGCStats")
+ }
+
+ // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
+ lock(&mheap_.lock)
+
+ n := memstats.numgc
+ if n > uint32(len(memstats.pause_ns)) {
+ n = uint32(len(memstats.pause_ns))
+ }
+
+ // The pause buffer is circular. The most recent pause is at
+ // pause_ns[(numgc-1)%len(pause_ns)], and then backward
+ // from there to go back farther in time. We deliver the times
+ // most recent first (in p[0]).
+ p = p[:cap(p)]
+ for i := uint32(0); i < n; i++ {
+ j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
+ p[i] = memstats.pause_ns[j]
+ p[n+i] = memstats.pause_end[j]
+ }
+
+ p[n+n] = memstats.last_gc_unix
+ p[n+n+1] = uint64(memstats.numgc)
+ p[n+n+2] = memstats.pause_total_ns
+ unlock(&mheap_.lock)
+ *pauses = p[:n+n+3]
+}
+
+// flushmcache flushes the mcache of allp[i].
+//
+// The world must be stopped.
+//
+//go:nowritebarrier
+func flushmcache(i int) {
+ assertWorldStopped()
+
+ p := allp[i]
+ c := p.mcache
+ if c == nil {
+ return
+ }
+ c.releaseAll()
+ stackcache_clear(c)
+}
+
+// flushallmcaches flushes the mcaches of all Ps.
+//
+// The world must be stopped.
+//
+//go:nowritebarrier
+func flushallmcaches() {
+ assertWorldStopped()
+
+ for i := 0; i < int(gomaxprocs); i++ {
+ flushmcache(i)
+ }
+}
+
+// sysMemStat represents a global system statistic that is managed atomically.
+//
+// This type must structurally be a uint64 so that mstats aligns with MemStats.
+type sysMemStat uint64
+
+// load atomically reads the value of the stat.
+//
+// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
+//
+//go:nosplit
+func (s *sysMemStat) load() uint64 {
+ return atomic.Load64((*uint64)(s))
+}
+
+// add atomically adds the sysMemStat by n.
+//
+// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
+//
+//go:nosplit
+func (s *sysMemStat) add(n int64) {
+ val := atomic.Xadd64((*uint64)(s), n)
+ if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) {
+ print("runtime: val=", val, " n=", n, "\n")
+ throw("sysMemStat overflow")
+ }
+}
+
+// heapStatsDelta contains deltas of various runtime memory statistics
+// that need to be updated together in order for them to be kept
+// consistent with one another.
+type heapStatsDelta struct {
+ // Memory stats.
+ committed int64 // byte delta of memory committed
+ released int64 // byte delta of released memory generated
+ inHeap int64 // byte delta of memory placed in the heap
+ inStacks int64 // byte delta of memory reserved for stacks
+ inWorkBufs int64 // byte delta of memory reserved for work bufs
+ inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
+
+ // Allocator stats.
+ //
+ // These are all uint64 because they're cumulative, and could quickly wrap
+ // around otherwise.
+ tinyAllocCount uint64 // number of tiny allocations
+ largeAlloc uint64 // bytes allocated for large objects
+ largeAllocCount uint64 // number of large object allocations
+ smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
+ largeFree uint64 // bytes freed for large objects (>maxSmallSize)
+ largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
+ smallFreeCount [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
+
+ // NOTE: This struct must be a multiple of 8 bytes in size because it
+ // is stored in an array. If it's not, atomic accesses to the above
+ // fields may be unaligned and fail on 32-bit platforms.
+}
+
+// merge adds in the deltas from b into a.
+func (a *heapStatsDelta) merge(b *heapStatsDelta) {
+ a.committed += b.committed
+ a.released += b.released
+ a.inHeap += b.inHeap
+ a.inStacks += b.inStacks
+ a.inWorkBufs += b.inWorkBufs
+ a.inPtrScalarBits += b.inPtrScalarBits
+
+ a.tinyAllocCount += b.tinyAllocCount
+ a.largeAlloc += b.largeAlloc
+ a.largeAllocCount += b.largeAllocCount
+ for i := range b.smallAllocCount {
+ a.smallAllocCount[i] += b.smallAllocCount[i]
+ }
+ a.largeFree += b.largeFree
+ a.largeFreeCount += b.largeFreeCount
+ for i := range b.smallFreeCount {
+ a.smallFreeCount[i] += b.smallFreeCount[i]
+ }
+}
+
+// consistentHeapStats represents a set of various memory statistics
+// whose updates must be viewed completely to get a consistent
+// state of the world.
+//
+// To write updates to memory stats use the acquire and release
+// methods. To obtain a consistent global snapshot of these statistics,
+// use read.
+type consistentHeapStats struct {
+ // stats is a ring buffer of heapStatsDelta values.
+ // Writers always atomically update the delta at index gen.
+ //
+ // Readers operate by rotating gen (0 -> 1 -> 2 -> 0 -> ...)
+ // and synchronizing with writers by observing each P's
+ // statsSeq field. If the reader observes a P not writing,
+ // it can be sure that it will pick up the new gen value the
+ // next time it writes.
+ //
+ // The reader then takes responsibility by clearing space
+ // in the ring buffer for the next reader to rotate gen to
+ // that space (i.e. it merges in values from index (gen-2) mod 3
+ // to index (gen-1) mod 3, then clears the former).
+ //
+ // Note that this means only one reader can be reading at a time.
+ // There is no way for readers to synchronize.
+ //
+ // This process is why we need a ring buffer of size 3 instead
+ // of 2: one is for the writers, one contains the most recent
+ // data, and the last one is clear so writers can begin writing
+ // to it the moment gen is updated.
+ stats [3]heapStatsDelta
+
+ // gen represents the current index into which writers
+ // are writing, and can take on the value of 0, 1, or 2.
+ // This value is updated atomically.
+ gen uint32
+
+ // noPLock is intended to provide mutual exclusion for updating
+ // stats when no P is available. It does not block other writers
+ // with a P, only other writers without a P and the reader. Because
+ // stats are usually updated when a P is available, contention on
+ // this lock should be minimal.
+ noPLock mutex
+}
+
+// acquire returns a heapStatsDelta to be updated. In effect,
+// it acquires the shard for writing. release must be called
+// as soon as the relevant deltas are updated.
+//
+// The returned heapStatsDelta must be updated atomically.
+//
+// The caller's P must not change between acquire and
+// release. This also means that the caller should not
+// acquire a P or release its P in between. A P also must
+// not acquire a given consistentHeapStats if it hasn't
+// yet released it.
+//
+// nosplit because a stack growth in this function could
+// lead to a stack allocation that could reenter the
+// function.
+//
+//go:nosplit
+func (m *consistentHeapStats) acquire() *heapStatsDelta {
+ if pp := getg().m.p.ptr(); pp != nil {
+ seq := atomic.Xadd(&pp.statsSeq, 1)
+ if seq%2 == 0 {
+ // Should have been incremented to odd.
+ print("runtime: seq=", seq, "\n")
+ throw("bad sequence number")
+ }
+ } else {
+ lock(&m.noPLock)
+ }
+ gen := atomic.Load(&m.gen) % 3
+ return &m.stats[gen]
+}
+
+// release indicates that the writer is done modifying
+// the delta. The value returned by the corresponding
+// acquire must no longer be accessed or modified after
+// release is called.
+//
+// The caller's P must not change between acquire and
+// release. This also means that the caller should not
+// acquire a P or release its P in between.
+//
+// nosplit because a stack growth in this function could
+// lead to a stack allocation that causes another acquire
+// before this operation has completed.
+//
+//go:nosplit
+func (m *consistentHeapStats) release() {
+ if pp := getg().m.p.ptr(); pp != nil {
+ seq := atomic.Xadd(&pp.statsSeq, 1)
+ if seq%2 != 0 {
+ // Should have been incremented to even.
+ print("runtime: seq=", seq, "\n")
+ throw("bad sequence number")
+ }
+ } else {
+ unlock(&m.noPLock)
+ }
+}
+
+// unsafeRead aggregates the delta for this shard into out.
+//
+// Unsafe because it does so without any synchronization. The
+// world must be stopped.
+func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta) {
+ assertWorldStopped()
+
+ for i := range m.stats {
+ out.merge(&m.stats[i])
+ }
+}
+
+// unsafeClear clears the shard.
+//
+// Unsafe because the world must be stopped and values should
+// be donated elsewhere before clearing.
+func (m *consistentHeapStats) unsafeClear() {
+ assertWorldStopped()
+
+ for i := range m.stats {
+ m.stats[i] = heapStatsDelta{}
+ }
+}
+
+// read takes a globally consistent snapshot of m
+// and puts the aggregated value in out. Even though out is a
+// heapStatsDelta, the resulting values should be complete and
+// valid statistic values.
+//
+// Not safe to call concurrently. The world must be stopped
+// or metricsSema must be held.
+func (m *consistentHeapStats) read(out *heapStatsDelta) {
+ // Getting preempted after this point is not safe because
+ // we read allp. We need to make sure a STW can't happen
+ // so it doesn't change out from under us.
+ mp := acquirem()
+
+ // Get the current generation. We can be confident that this
+ // will not change since read is serialized and is the only
+ // one that modifies currGen.
+ currGen := atomic.Load(&m.gen)
+ prevGen := currGen - 1
+ if currGen == 0 {
+ prevGen = 2
+ }
+
+ // Prevent writers without a P from writing while we update gen.
+ lock(&m.noPLock)
+
+ // Rotate gen, effectively taking a snapshot of the state of
+ // these statistics at the point of the exchange by moving
+ // writers to the next set of deltas.
+ //
+ // This exchange is safe to do because we won't race
+ // with anyone else trying to update this value.
+ atomic.Xchg(&m.gen, (currGen+1)%3)
+
+ // Allow P-less writers to continue. They'll be writing to the
+ // next generation now.
+ unlock(&m.noPLock)
+
+ for _, p := range allp {
+ // Spin until there are no more writers.
+ for atomic.Load(&p.statsSeq)%2 != 0 {
+ }
+ }
+
+ // At this point we've observed that each sequence
+ // number is even, so any future writers will observe
+ // the new gen value. That means it's safe to read from
+ // the other deltas in the stats buffer.
+
+ // Perform our responsibilities and free up
+ // stats[prevGen] for the next time we want to take
+ // a snapshot.
+ m.stats[currGen].merge(&m.stats[prevGen])
+ m.stats[prevGen] = heapStatsDelta{}
+
+ // Finally, copy out the complete delta.
+ *out = m.stats[currGen]
+
+ releasem(mp)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/mwbbuf.go b/contrib/go/_std_1.19/src/runtime/mwbbuf.go
new file mode 100644
index 0000000000..39ce0b46a9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/mwbbuf.go
@@ -0,0 +1,290 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This implements the write barrier buffer. The write barrier itself
+// is gcWriteBarrier and is implemented in assembly.
+//
+// See mbarrier.go for algorithmic details on the write barrier. This
+// file deals only with the buffer.
+//
+// The write barrier has a fast path and a slow path. The fast path
+// simply enqueues to a per-P write barrier buffer. It's written in
+// assembly and doesn't clobber any general purpose registers, so it
+// doesn't have the usual overheads of a Go call.
+//
+// When the buffer fills up, the write barrier invokes the slow path
+// (wbBufFlush) to flush the buffer to the GC work queues. In this
+// path, since the compiler didn't spill registers, we spill *all*
+// registers and disallow any GC safe points that could observe the
+// stack frame (since we don't know the types of the spilled
+// registers).
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// testSmallBuf forces a small write barrier buffer to stress write
+// barrier flushing.
+const testSmallBuf = false
+
+// wbBuf is a per-P buffer of pointers queued by the write barrier.
+// This buffer is flushed to the GC workbufs when it fills up and on
+// various GC transitions.
+//
+// This is closely related to a "sequential store buffer" (SSB),
+// except that SSBs are usually used for maintaining remembered sets,
+// while this is used for marking.
+type wbBuf struct {
+ // next points to the next slot in buf. It must not be a
+ // pointer type because it can point past the end of buf and
+ // must be updated without write barriers.
+ //
+ // This is a pointer rather than an index to optimize the
+ // write barrier assembly.
+ next uintptr
+
+ // end points to just past the end of buf. It must not be a
+ // pointer type because it points past the end of buf and must
+ // be updated without write barriers.
+ end uintptr
+
+ // buf stores a series of pointers to execute write barriers
+ // on. This must be a multiple of wbBufEntryPointers because
+ // the write barrier only checks for overflow once per entry.
+ buf [wbBufEntryPointers * wbBufEntries]uintptr
+}
+
+const (
+ // wbBufEntries is the number of write barriers between
+ // flushes of the write barrier buffer.
+ //
+ // This trades latency for throughput amortization. Higher
+ // values amortize flushing overhead more, but increase the
+ // latency of flushing. Higher values also increase the cache
+ // footprint of the buffer.
+ //
+ // TODO: What is the latency cost of this? Tune this value.
+ wbBufEntries = 256
+
+ // wbBufEntryPointers is the number of pointers added to the
+ // buffer by each write barrier.
+ wbBufEntryPointers = 2
+)
+
+// reset empties b by resetting its next and end pointers.
+func (b *wbBuf) reset() {
+ start := uintptr(unsafe.Pointer(&b.buf[0]))
+ b.next = start
+ if writeBarrier.cgo {
+ // Effectively disable the buffer by forcing a flush
+ // on every barrier.
+ b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
+ } else if testSmallBuf {
+ // For testing, allow two barriers in the buffer. If
+ // we only did one, then barriers of non-heap pointers
+ // would be no-ops. This lets us combine a buffered
+ // barrier with a flush at a later time.
+ b.end = uintptr(unsafe.Pointer(&b.buf[2*wbBufEntryPointers]))
+ } else {
+ b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
+ }
+
+ if (b.end-b.next)%(wbBufEntryPointers*unsafe.Sizeof(b.buf[0])) != 0 {
+ throw("bad write barrier buffer bounds")
+ }
+}
+
+// discard resets b's next pointer, but not its end pointer.
+//
+// This must be nosplit because it's called by wbBufFlush.
+//
+//go:nosplit
+func (b *wbBuf) discard() {
+ b.next = uintptr(unsafe.Pointer(&b.buf[0]))
+}
+
+// empty reports whether b contains no pointers.
+func (b *wbBuf) empty() bool {
+ return b.next == uintptr(unsafe.Pointer(&b.buf[0]))
+}
+
+// putFast adds old and new to the write barrier buffer and returns
+// false if a flush is necessary. Callers should use this as:
+//
+// buf := &getg().m.p.ptr().wbBuf
+// if !buf.putFast(old, new) {
+// wbBufFlush(...)
+// }
+// ... actual memory write ...
+//
+// The arguments to wbBufFlush depend on whether the caller is doing
+// its own cgo pointer checks. If it is, then this can be
+// wbBufFlush(nil, 0). Otherwise, it must pass the slot address and
+// new.
+//
+// The caller must ensure there are no preemption points during the
+// above sequence. There must be no preemption points while buf is in
+// use because it is a per-P resource. There must be no preemption
+// points between the buffer put and the write to memory because this
+// could allow a GC phase change, which could result in missed write
+// barriers.
+//
+// putFast must be nowritebarrierrec to because write barriers here would
+// corrupt the write barrier buffer. It (and everything it calls, if
+// it called anything) has to be nosplit to avoid scheduling on to a
+// different P and a different buffer.
+//
+//go:nowritebarrierrec
+//go:nosplit
+func (b *wbBuf) putFast(old, new uintptr) bool {
+ p := (*[2]uintptr)(unsafe.Pointer(b.next))
+ p[0] = old
+ p[1] = new
+ b.next += 2 * goarch.PtrSize
+ return b.next != b.end
+}
+
+// wbBufFlush flushes the current P's write barrier buffer to the GC
+// workbufs. It is passed the slot and value of the write barrier that
+// caused the flush so that it can implement cgocheck.
+//
+// This must not have write barriers because it is part of the write
+// barrier implementation.
+//
+// This and everything it calls must be nosplit because 1) the stack
+// contains untyped slots from gcWriteBarrier and 2) there must not be
+// a GC safe point between the write barrier test in the caller and
+// flushing the buffer.
+//
+// TODO: A "go:nosplitrec" annotation would be perfect for this.
+//
+//go:nowritebarrierrec
+//go:nosplit
+func wbBufFlush(dst *uintptr, src uintptr) {
+ // Note: Every possible return from this function must reset
+ // the buffer's next pointer to prevent buffer overflow.
+
+ // This *must not* modify its arguments because this
+ // function's argument slots do double duty in gcWriteBarrier
+ // as register spill slots. Currently, not modifying the
+ // arguments is sufficient to keep the spill slots unmodified
+ // (which seems unlikely to change since it costs little and
+ // helps with debugging).
+
+ if getg().m.dying > 0 {
+ // We're going down. Not much point in write barriers
+ // and this way we can allow write barriers in the
+ // panic path.
+ getg().m.p.ptr().wbBuf.discard()
+ return
+ }
+
+ if writeBarrier.cgo && dst != nil {
+ // This must be called from the stack that did the
+ // write. It's nosplit all the way down.
+ cgoCheckWriteBarrier(dst, src)
+ if !writeBarrier.needed {
+ // We were only called for cgocheck.
+ getg().m.p.ptr().wbBuf.discard()
+ return
+ }
+ }
+
+ // Switch to the system stack so we don't have to worry about
+ // the untyped stack slots or safe points.
+ systemstack(func() {
+ wbBufFlush1(getg().m.p.ptr())
+ })
+}
+
+// wbBufFlush1 flushes p's write barrier buffer to the GC work queue.
+//
+// This must not have write barriers because it is part of the write
+// barrier implementation, so this may lead to infinite loops or
+// buffer corruption.
+//
+// This must be non-preemptible because it uses the P's workbuf.
+//
+//go:nowritebarrierrec
+//go:systemstack
+func wbBufFlush1(_p_ *p) {
+ // Get the buffered pointers.
+ start := uintptr(unsafe.Pointer(&_p_.wbBuf.buf[0]))
+ n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0])
+ ptrs := _p_.wbBuf.buf[:n]
+
+ // Poison the buffer to make extra sure nothing is enqueued
+ // while we're processing the buffer.
+ _p_.wbBuf.next = 0
+
+ if useCheckmark {
+ // Slow path for checkmark mode.
+ for _, ptr := range ptrs {
+ shade(ptr)
+ }
+ _p_.wbBuf.reset()
+ return
+ }
+
+ // Mark all of the pointers in the buffer and record only the
+ // pointers we greyed. We use the buffer itself to temporarily
+ // record greyed pointers.
+ //
+ // TODO: Should scanobject/scanblock just stuff pointers into
+ // the wbBuf? Then this would become the sole greying path.
+ //
+ // TODO: We could avoid shading any of the "new" pointers in
+ // the buffer if the stack has been shaded, or even avoid
+ // putting them in the buffer at all (which would double its
+ // capacity). This is slightly complicated with the buffer; we
+ // could track whether any un-shaded goroutine has used the
+ // buffer, or just track globally whether there are any
+ // un-shaded stacks and flush after each stack scan.
+ gcw := &_p_.gcw
+ pos := 0
+ for _, ptr := range ptrs {
+ if ptr < minLegalPointer {
+ // nil pointers are very common, especially
+ // for the "old" values. Filter out these and
+ // other "obvious" non-heap pointers ASAP.
+ //
+ // TODO: Should we filter out nils in the fast
+ // path to reduce the rate of flushes?
+ continue
+ }
+ obj, span, objIndex := findObject(ptr, 0, 0)
+ if obj == 0 {
+ continue
+ }
+ // TODO: Consider making two passes where the first
+ // just prefetches the mark bits.
+ mbits := span.markBitsForIndex(objIndex)
+ if mbits.isMarked() {
+ continue
+ }
+ mbits.setMarked()
+
+ // Mark span.
+ arena, pageIdx, pageMask := pageIndexOf(span.base())
+ if arena.pageMarks[pageIdx]&pageMask == 0 {
+ atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
+ }
+
+ if span.spanclass.noscan() {
+ gcw.bytesMarked += uint64(span.elemsize)
+ continue
+ }
+ ptrs[pos] = obj
+ pos++
+ }
+
+ // Enqueue the greyed objects.
+ gcw.putBatch(ptrs[:pos])
+
+ _p_.wbBuf.reset()
+}
diff --git a/contrib/go/_std_1.18/src/runtime/nbpipe_pipe.go b/contrib/go/_std_1.19/src/runtime/nbpipe_pipe.go
index 408e1ec410..408e1ec410 100644
--- a/contrib/go/_std_1.18/src/runtime/nbpipe_pipe.go
+++ b/contrib/go/_std_1.19/src/runtime/nbpipe_pipe.go
diff --git a/contrib/go/_std_1.19/src/runtime/nbpipe_pipe2.go b/contrib/go/_std_1.19/src/runtime/nbpipe_pipe2.go
new file mode 100644
index 0000000000..22d60b4a63
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/nbpipe_pipe2.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package runtime
+
+func nonblockingPipe() (r, w int32, errno int32) {
+ return pipe2(_O_NONBLOCK | _O_CLOEXEC)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/netpoll.go b/contrib/go/_std_1.19/src/runtime/netpoll.go
new file mode 100644
index 0000000000..ac6bc89530
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/netpoll.go
@@ -0,0 +1,656 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || windows
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// Integrated network poller (platform-independent part).
+// A particular implementation (epoll/kqueue/port/AIX/Windows)
+// must define the following functions:
+//
+// func netpollinit()
+// Initialize the poller. Only called once.
+//
+// func netpollopen(fd uintptr, pd *pollDesc) int32
+// Arm edge-triggered notifications for fd. The pd argument is to pass
+// back to netpollready when fd is ready. Return an errno value.
+//
+// func netpollclose(fd uintptr) int32
+// Disable notifications for fd. Return an errno value.
+//
+// func netpoll(delta int64) gList
+// Poll the network. If delta < 0, block indefinitely. If delta == 0,
+// poll without blocking. If delta > 0, block for up to delta nanoseconds.
+// Return a list of goroutines built by calling netpollready.
+//
+// func netpollBreak()
+// Wake up the network poller, assumed to be blocked in netpoll.
+//
+// func netpollIsPollDescriptor(fd uintptr) bool
+// Reports whether fd is a file descriptor used by the poller.
+
+// Error codes returned by runtime_pollReset and runtime_pollWait.
+// These must match the values in internal/poll/fd_poll_runtime.go.
+const (
+ pollNoError = 0 // no error
+ pollErrClosing = 1 // descriptor is closed
+ pollErrTimeout = 2 // I/O timeout
+ pollErrNotPollable = 3 // general error polling descriptor
+)
+
+// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
+// goroutines respectively. The semaphore can be in the following states:
+//
+// pdReady - io readiness notification is pending;
+// a goroutine consumes the notification by changing the state to nil.
+// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
+// the goroutine commits to park by changing the state to G pointer,
+// or, alternatively, concurrent io notification changes the state to pdReady,
+// or, alternatively, concurrent timeout/close changes the state to nil.
+// G pointer - the goroutine is blocked on the semaphore;
+// io notification or timeout/close changes the state to pdReady or nil respectively
+// and unparks the goroutine.
+// nil - none of the above.
+const (
+ pdReady uintptr = 1
+ pdWait uintptr = 2
+)
+
+const pollBlockSize = 4 * 1024
+
+// Network poller descriptor.
+//
+// No heap pointers.
+//
+//go:notinheap
+type pollDesc struct {
+ link *pollDesc // in pollcache, protected by pollcache.lock
+ fd uintptr // constant for pollDesc usage lifetime
+
+ // atomicInfo holds bits from closing, rd, and wd,
+ // which are only ever written while holding the lock,
+ // summarized for use by netpollcheckerr,
+ // which cannot acquire the lock.
+ // After writing these fields under lock in a way that
+ // might change the summary, code must call publishInfo
+ // before releasing the lock.
+ // Code that changes fields and then calls netpollunblock
+ // (while still holding the lock) must call publishInfo
+ // before calling netpollunblock, because publishInfo is what
+ // stops netpollblock from blocking anew
+ // (by changing the result of netpollcheckerr).
+ // atomicInfo also holds the eventErr bit,
+ // recording whether a poll event on the fd got an error;
+ // atomicInfo is the only source of truth for that bit.
+ atomicInfo atomic.Uint32 // atomic pollInfo
+
+ // rg, wg are accessed atomically and hold g pointers.
+ // (Using atomic.Uintptr here is similar to using guintptr elsewhere.)
+ rg atomic.Uintptr // pdReady, pdWait, G waiting for read or nil
+ wg atomic.Uintptr // pdReady, pdWait, G waiting for write or nil
+
+ lock mutex // protects the following fields
+ closing bool
+ user uint32 // user settable cookie
+ rseq uintptr // protects from stale read timers
+ rt timer // read deadline timer (set if rt.f != nil)
+ rd int64 // read deadline (a nanotime in the future, -1 when expired)
+ wseq uintptr // protects from stale write timers
+ wt timer // write deadline timer
+ wd int64 // write deadline (a nanotime in the future, -1 when expired)
+ self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
+}
+
+// pollInfo is the bits needed by netpollcheckerr, stored atomically,
+// mostly duplicating state that is manipulated under lock in pollDesc.
+// The one exception is the pollEventErr bit, which is maintained only
+// in the pollInfo.
+type pollInfo uint32
+
+const (
+ pollClosing = 1 << iota
+ pollEventErr
+ pollExpiredReadDeadline
+ pollExpiredWriteDeadline
+)
+
+func (i pollInfo) closing() bool { return i&pollClosing != 0 }
+func (i pollInfo) eventErr() bool { return i&pollEventErr != 0 }
+func (i pollInfo) expiredReadDeadline() bool { return i&pollExpiredReadDeadline != 0 }
+func (i pollInfo) expiredWriteDeadline() bool { return i&pollExpiredWriteDeadline != 0 }
+
+// info returns the pollInfo corresponding to pd.
+func (pd *pollDesc) info() pollInfo {
+ return pollInfo(pd.atomicInfo.Load())
+}
+
+// publishInfo updates pd.atomicInfo (returned by pd.info)
+// using the other values in pd.
+// It must be called while holding pd.lock,
+// and it must be called after changing anything
+// that might affect the info bits.
+// In practice this means after changing closing
+// or changing rd or wd from < 0 to >= 0.
+func (pd *pollDesc) publishInfo() {
+ var info uint32
+ if pd.closing {
+ info |= pollClosing
+ }
+ if pd.rd < 0 {
+ info |= pollExpiredReadDeadline
+ }
+ if pd.wd < 0 {
+ info |= pollExpiredWriteDeadline
+ }
+
+ // Set all of x except the pollEventErr bit.
+ x := pd.atomicInfo.Load()
+ for !pd.atomicInfo.CompareAndSwap(x, (x&pollEventErr)|info) {
+ x = pd.atomicInfo.Load()
+ }
+}
+
+// setEventErr sets the result of pd.info().eventErr() to b.
+func (pd *pollDesc) setEventErr(b bool) {
+ x := pd.atomicInfo.Load()
+ for (x&pollEventErr != 0) != b && !pd.atomicInfo.CompareAndSwap(x, x^pollEventErr) {
+ x = pd.atomicInfo.Load()
+ }
+}
+
+type pollCache struct {
+ lock mutex
+ first *pollDesc
+ // PollDesc objects must be type-stable,
+ // because we can get ready notification from epoll/kqueue
+ // after the descriptor is closed/reused.
+ // Stale notifications are detected using seq variable,
+ // seq is incremented when deadlines are changed or descriptor is reused.
+}
+
+var (
+ netpollInitLock mutex
+ netpollInited uint32
+
+ pollcache pollCache
+ netpollWaiters uint32
+)
+
+//go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
+func poll_runtime_pollServerInit() {
+ netpollGenericInit()
+}
+
+func netpollGenericInit() {
+ if atomic.Load(&netpollInited) == 0 {
+ lockInit(&netpollInitLock, lockRankNetpollInit)
+ lock(&netpollInitLock)
+ if netpollInited == 0 {
+ netpollinit()
+ atomic.Store(&netpollInited, 1)
+ }
+ unlock(&netpollInitLock)
+ }
+}
+
+func netpollinited() bool {
+ return atomic.Load(&netpollInited) != 0
+}
+
+//go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor
+
+// poll_runtime_isPollServerDescriptor reports whether fd is a
+// descriptor being used by netpoll.
+func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
+ return netpollIsPollDescriptor(fd)
+}
+
+//go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
+func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
+ pd := pollcache.alloc()
+ lock(&pd.lock)
+ wg := pd.wg.Load()
+ if wg != 0 && wg != pdReady {
+ throw("runtime: blocked write on free polldesc")
+ }
+ rg := pd.rg.Load()
+ if rg != 0 && rg != pdReady {
+ throw("runtime: blocked read on free polldesc")
+ }
+ pd.fd = fd
+ pd.closing = false
+ pd.setEventErr(false)
+ pd.rseq++
+ pd.rg.Store(0)
+ pd.rd = 0
+ pd.wseq++
+ pd.wg.Store(0)
+ pd.wd = 0
+ pd.self = pd
+ pd.publishInfo()
+ unlock(&pd.lock)
+
+ errno := netpollopen(fd, pd)
+ if errno != 0 {
+ pollcache.free(pd)
+ return nil, int(errno)
+ }
+ return pd, 0
+}
+
+//go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose
+func poll_runtime_pollClose(pd *pollDesc) {
+ if !pd.closing {
+ throw("runtime: close polldesc w/o unblock")
+ }
+ wg := pd.wg.Load()
+ if wg != 0 && wg != pdReady {
+ throw("runtime: blocked write on closing polldesc")
+ }
+ rg := pd.rg.Load()
+ if rg != 0 && rg != pdReady {
+ throw("runtime: blocked read on closing polldesc")
+ }
+ netpollclose(pd.fd)
+ pollcache.free(pd)
+}
+
+func (c *pollCache) free(pd *pollDesc) {
+ lock(&c.lock)
+ pd.link = c.first
+ c.first = pd
+ unlock(&c.lock)
+}
+
+// poll_runtime_pollReset, which is internal/poll.runtime_pollReset,
+// prepares a descriptor for polling in mode, which is 'r' or 'w'.
+// This returns an error code; the codes are defined above.
+//
+//go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
+func poll_runtime_pollReset(pd *pollDesc, mode int) int {
+ errcode := netpollcheckerr(pd, int32(mode))
+ if errcode != pollNoError {
+ return errcode
+ }
+ if mode == 'r' {
+ pd.rg.Store(0)
+ } else if mode == 'w' {
+ pd.wg.Store(0)
+ }
+ return pollNoError
+}
+
+// poll_runtime_pollWait, which is internal/poll.runtime_pollWait,
+// waits for a descriptor to be ready for reading or writing,
+// according to mode, which is 'r' or 'w'.
+// This returns an error code; the codes are defined above.
+//
+//go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
+func poll_runtime_pollWait(pd *pollDesc, mode int) int {
+ errcode := netpollcheckerr(pd, int32(mode))
+ if errcode != pollNoError {
+ return errcode
+ }
+ // As for now only Solaris, illumos, and AIX use level-triggered IO.
+ if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" {
+ netpollarm(pd, mode)
+ }
+ for !netpollblock(pd, int32(mode), false) {
+ errcode = netpollcheckerr(pd, int32(mode))
+ if errcode != pollNoError {
+ return errcode
+ }
+ // Can happen if timeout has fired and unblocked us,
+ // but before we had a chance to run, timeout has been reset.
+ // Pretend it has not happened and retry.
+ }
+ return pollNoError
+}
+
+//go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled
+func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
+ // This function is used only on windows after a failed attempt to cancel
+ // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
+ for !netpollblock(pd, int32(mode), true) {
+ }
+}
+
+//go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline
+func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
+ lock(&pd.lock)
+ if pd.closing {
+ unlock(&pd.lock)
+ return
+ }
+ rd0, wd0 := pd.rd, pd.wd
+ combo0 := rd0 > 0 && rd0 == wd0
+ if d > 0 {
+ d += nanotime()
+ if d <= 0 {
+ // If the user has a deadline in the future, but the delay calculation
+ // overflows, then set the deadline to the maximum possible value.
+ d = 1<<63 - 1
+ }
+ }
+ if mode == 'r' || mode == 'r'+'w' {
+ pd.rd = d
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ pd.wd = d
+ }
+ pd.publishInfo()
+ combo := pd.rd > 0 && pd.rd == pd.wd
+ rtf := netpollReadDeadline
+ if combo {
+ rtf = netpollDeadline
+ }
+ if pd.rt.f == nil {
+ if pd.rd > 0 {
+ pd.rt.f = rtf
+ // Copy current seq into the timer arg.
+ // Timer func will check the seq against current descriptor seq,
+ // if they differ the descriptor was reused or timers were reset.
+ pd.rt.arg = pd.makeArg()
+ pd.rt.seq = pd.rseq
+ resettimer(&pd.rt, pd.rd)
+ }
+ } else if pd.rd != rd0 || combo != combo0 {
+ pd.rseq++ // invalidate current timers
+ if pd.rd > 0 {
+ modtimer(&pd.rt, pd.rd, 0, rtf, pd.makeArg(), pd.rseq)
+ } else {
+ deltimer(&pd.rt)
+ pd.rt.f = nil
+ }
+ }
+ if pd.wt.f == nil {
+ if pd.wd > 0 && !combo {
+ pd.wt.f = netpollWriteDeadline
+ pd.wt.arg = pd.makeArg()
+ pd.wt.seq = pd.wseq
+ resettimer(&pd.wt, pd.wd)
+ }
+ } else if pd.wd != wd0 || combo != combo0 {
+ pd.wseq++ // invalidate current timers
+ if pd.wd > 0 && !combo {
+ modtimer(&pd.wt, pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq)
+ } else {
+ deltimer(&pd.wt)
+ pd.wt.f = nil
+ }
+ }
+ // If we set the new deadline in the past, unblock currently pending IO if any.
+ // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
+ var rg, wg *g
+ if pd.rd < 0 {
+ rg = netpollunblock(pd, 'r', false)
+ }
+ if pd.wd < 0 {
+ wg = netpollunblock(pd, 'w', false)
+ }
+ unlock(&pd.lock)
+ if rg != nil {
+ netpollgoready(rg, 3)
+ }
+ if wg != nil {
+ netpollgoready(wg, 3)
+ }
+}
+
+//go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
+func poll_runtime_pollUnblock(pd *pollDesc) {
+ lock(&pd.lock)
+ if pd.closing {
+ throw("runtime: unblock on closing polldesc")
+ }
+ pd.closing = true
+ pd.rseq++
+ pd.wseq++
+ var rg, wg *g
+ pd.publishInfo()
+ rg = netpollunblock(pd, 'r', false)
+ wg = netpollunblock(pd, 'w', false)
+ if pd.rt.f != nil {
+ deltimer(&pd.rt)
+ pd.rt.f = nil
+ }
+ if pd.wt.f != nil {
+ deltimer(&pd.wt)
+ pd.wt.f = nil
+ }
+ unlock(&pd.lock)
+ if rg != nil {
+ netpollgoready(rg, 3)
+ }
+ if wg != nil {
+ netpollgoready(wg, 3)
+ }
+}
+
+// netpollready is called by the platform-specific netpoll function.
+// It declares that the fd associated with pd is ready for I/O.
+// The toRun argument is used to build a list of goroutines to return
+// from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
+// whether the fd is ready for reading or writing or both.
+//
+// This may run while the world is stopped, so write barriers are not allowed.
+//
+//go:nowritebarrier
+func netpollready(toRun *gList, pd *pollDesc, mode int32) {
+ var rg, wg *g
+ if mode == 'r' || mode == 'r'+'w' {
+ rg = netpollunblock(pd, 'r', true)
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ wg = netpollunblock(pd, 'w', true)
+ }
+ if rg != nil {
+ toRun.push(rg)
+ }
+ if wg != nil {
+ toRun.push(wg)
+ }
+}
+
+func netpollcheckerr(pd *pollDesc, mode int32) int {
+ info := pd.info()
+ if info.closing() {
+ return pollErrClosing
+ }
+ if (mode == 'r' && info.expiredReadDeadline()) || (mode == 'w' && info.expiredWriteDeadline()) {
+ return pollErrTimeout
+ }
+ // Report an event scanning error only on a read event.
+ // An error on a write event will be captured in a subsequent
+ // write call that is able to report a more specific error.
+ if mode == 'r' && info.eventErr() {
+ return pollErrNotPollable
+ }
+ return pollNoError
+}
+
+func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
+ r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
+ if r {
+ // Bump the count of goroutines waiting for the poller.
+ // The scheduler uses this to decide whether to block
+ // waiting for the poller if there is nothing else to do.
+ atomic.Xadd(&netpollWaiters, 1)
+ }
+ return r
+}
+
+func netpollgoready(gp *g, traceskip int) {
+ atomic.Xadd(&netpollWaiters, -1)
+ goready(gp, traceskip+1)
+}
+
+// returns true if IO is ready, or false if timedout or closed
+// waitio - wait only for completed IO, ignore errors
+// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
+// can hold only a single waiting goroutine for each mode.
+func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
+ gpp := &pd.rg
+ if mode == 'w' {
+ gpp = &pd.wg
+ }
+
+ // set the gpp semaphore to pdWait
+ for {
+ // Consume notification if already ready.
+ if gpp.CompareAndSwap(pdReady, 0) {
+ return true
+ }
+ if gpp.CompareAndSwap(0, pdWait) {
+ break
+ }
+
+ // Double check that this isn't corrupt; otherwise we'd loop
+ // forever.
+ if v := gpp.Load(); v != pdReady && v != 0 {
+ throw("runtime: double wait")
+ }
+ }
+
+ // need to recheck error states after setting gpp to pdWait
+ // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
+ // do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg
+ if waitio || netpollcheckerr(pd, mode) == pollNoError {
+ gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceEvGoBlockNet, 5)
+ }
+ // be careful to not lose concurrent pdReady notification
+ old := gpp.Swap(0)
+ if old > pdWait {
+ throw("runtime: corrupted polldesc")
+ }
+ return old == pdReady
+}
+
+func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
+ gpp := &pd.rg
+ if mode == 'w' {
+ gpp = &pd.wg
+ }
+
+ for {
+ old := gpp.Load()
+ if old == pdReady {
+ return nil
+ }
+ if old == 0 && !ioready {
+ // Only set pdReady for ioready. runtime_pollWait
+ // will check for timeout/cancel before waiting.
+ return nil
+ }
+ var new uintptr
+ if ioready {
+ new = pdReady
+ }
+ if gpp.CompareAndSwap(old, new) {
+ if old == pdWait {
+ old = 0
+ }
+ return (*g)(unsafe.Pointer(old))
+ }
+ }
+}
+
+func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
+ lock(&pd.lock)
+ // Seq arg is seq when the timer was set.
+ // If it's stale, ignore the timer event.
+ currentSeq := pd.rseq
+ if !read {
+ currentSeq = pd.wseq
+ }
+ if seq != currentSeq {
+ // The descriptor was reused or timers were reset.
+ unlock(&pd.lock)
+ return
+ }
+ var rg *g
+ if read {
+ if pd.rd <= 0 || pd.rt.f == nil {
+ throw("runtime: inconsistent read deadline")
+ }
+ pd.rd = -1
+ pd.publishInfo()
+ rg = netpollunblock(pd, 'r', false)
+ }
+ var wg *g
+ if write {
+ if pd.wd <= 0 || pd.wt.f == nil && !read {
+ throw("runtime: inconsistent write deadline")
+ }
+ pd.wd = -1
+ pd.publishInfo()
+ wg = netpollunblock(pd, 'w', false)
+ }
+ unlock(&pd.lock)
+ if rg != nil {
+ netpollgoready(rg, 0)
+ }
+ if wg != nil {
+ netpollgoready(wg, 0)
+ }
+}
+
+func netpollDeadline(arg any, seq uintptr) {
+ netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
+}
+
+func netpollReadDeadline(arg any, seq uintptr) {
+ netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
+}
+
+func netpollWriteDeadline(arg any, seq uintptr) {
+ netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
+}
+
+func (c *pollCache) alloc() *pollDesc {
+ lock(&c.lock)
+ if c.first == nil {
+ const pdSize = unsafe.Sizeof(pollDesc{})
+ n := pollBlockSize / pdSize
+ if n == 0 {
+ n = 1
+ }
+ // Must be in non-GC memory because can be referenced
+ // only from epoll/kqueue internals.
+ mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
+ for i := uintptr(0); i < n; i++ {
+ pd := (*pollDesc)(add(mem, i*pdSize))
+ pd.link = c.first
+ c.first = pd
+ }
+ }
+ pd := c.first
+ c.first = pd.link
+ lockInit(&pd.lock, lockRankPollDesc)
+ unlock(&c.lock)
+ return pd
+}
+
+// makeArg converts pd to an interface{}.
+// makeArg does not do any allocation. Normally, such
+// a conversion requires an allocation because pointers to
+// go:notinheap types (which pollDesc is) must be stored
+// in interfaces indirectly. See issue 42076.
+func (pd *pollDesc) makeArg() (i any) {
+ x := (*eface)(unsafe.Pointer(&i))
+ x._type = pdType
+ x.data = unsafe.Pointer(&pd.self)
+ return
+}
+
+var (
+ pdEface any = (*pollDesc)(nil)
+ pdType *_type = efaceOf(&pdEface)._type
+)
diff --git a/contrib/go/_std_1.18/src/runtime/netpoll_epoll.go b/contrib/go/_std_1.19/src/runtime/netpoll_epoll.go
index b7d6199965..b7d6199965 100644
--- a/contrib/go/_std_1.18/src/runtime/netpoll_epoll.go
+++ b/contrib/go/_std_1.19/src/runtime/netpoll_epoll.go
diff --git a/contrib/go/_std_1.18/src/runtime/netpoll_kqueue.go b/contrib/go/_std_1.19/src/runtime/netpoll_kqueue.go
index 1694753b6f..1694753b6f 100644
--- a/contrib/go/_std_1.18/src/runtime/netpoll_kqueue.go
+++ b/contrib/go/_std_1.19/src/runtime/netpoll_kqueue.go
diff --git a/contrib/go/_std_1.19/src/runtime/os_darwin.go b/contrib/go/_std_1.19/src/runtime/os_darwin.go
new file mode 100644
index 0000000000..8562d7d906
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/os_darwin.go
@@ -0,0 +1,474 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+type mOS struct {
+ initialized bool
+ mutex pthreadmutex
+ cond pthreadcond
+ count int
+}
+
+func unimplemented(name string) {
+ println(name, "not implemented")
+ *(*int)(unsafe.Pointer(uintptr(1231))) = 1231
+}
+
+//go:nosplit
+func semacreate(mp *m) {
+ if mp.initialized {
+ return
+ }
+ mp.initialized = true
+ if err := pthread_mutex_init(&mp.mutex, nil); err != 0 {
+ throw("pthread_mutex_init")
+ }
+ if err := pthread_cond_init(&mp.cond, nil); err != 0 {
+ throw("pthread_cond_init")
+ }
+}
+
+//go:nosplit
+func semasleep(ns int64) int32 {
+ var start int64
+ if ns >= 0 {
+ start = nanotime()
+ }
+ mp := getg().m
+ pthread_mutex_lock(&mp.mutex)
+ for {
+ if mp.count > 0 {
+ mp.count--
+ pthread_mutex_unlock(&mp.mutex)
+ return 0
+ }
+ if ns >= 0 {
+ spent := nanotime() - start
+ if spent >= ns {
+ pthread_mutex_unlock(&mp.mutex)
+ return -1
+ }
+ var t timespec
+ t.setNsec(ns - spent)
+ err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t)
+ if err == _ETIMEDOUT {
+ pthread_mutex_unlock(&mp.mutex)
+ return -1
+ }
+ } else {
+ pthread_cond_wait(&mp.cond, &mp.mutex)
+ }
+ }
+}
+
+//go:nosplit
+func semawakeup(mp *m) {
+ pthread_mutex_lock(&mp.mutex)
+ mp.count++
+ if mp.count > 0 {
+ pthread_cond_signal(&mp.cond)
+ }
+ pthread_mutex_unlock(&mp.mutex)
+}
+
+// The read and write file descriptors used by the sigNote functions.
+var sigNoteRead, sigNoteWrite int32
+
+// sigNoteSetup initializes an async-signal-safe note.
+//
+// The current implementation of notes on Darwin is not async-signal-safe,
+// because the functions pthread_mutex_lock, pthread_cond_signal, and
+// pthread_mutex_unlock, called by semawakeup, are not async-signal-safe.
+// There is only one case where we need to wake up a note from a signal
+// handler: the sigsend function. The signal handler code does not require
+// all the features of notes: it does not need to do a timed wait.
+// This is a separate implementation of notes, based on a pipe, that does
+// not support timed waits but is async-signal-safe.
+func sigNoteSetup(*note) {
+ if sigNoteRead != 0 || sigNoteWrite != 0 {
+ throw("duplicate sigNoteSetup")
+ }
+ var errno int32
+ sigNoteRead, sigNoteWrite, errno = pipe()
+ if errno != 0 {
+ throw("pipe failed")
+ }
+ closeonexec(sigNoteRead)
+ closeonexec(sigNoteWrite)
+
+ // Make the write end of the pipe non-blocking, so that if the pipe
+ // buffer is somehow full we will not block in the signal handler.
+ // Leave the read end of the pipe blocking so that we will block
+ // in sigNoteSleep.
+ setNonblock(sigNoteWrite)
+}
+
+// sigNoteWakeup wakes up a thread sleeping on a note created by sigNoteSetup.
+func sigNoteWakeup(*note) {
+ var b byte
+ write(uintptr(sigNoteWrite), unsafe.Pointer(&b), 1)
+}
+
+// sigNoteSleep waits for a note created by sigNoteSetup to be woken.
+func sigNoteSleep(*note) {
+ for {
+ var b byte
+ entersyscallblock()
+ n := read(sigNoteRead, unsafe.Pointer(&b), 1)
+ exitsyscall()
+ if n != -_EINTR {
+ return
+ }
+ }
+}
+
+// BSD interface for threading.
+func osinit() {
+ // pthread_create delayed until end of goenvs so that we
+ // can look at the environment first.
+
+ ncpu = getncpu()
+ physPageSize = getPageSize()
+}
+
+func sysctlbynameInt32(name []byte) (int32, int32) {
+ out := int32(0)
+ nout := unsafe.Sizeof(out)
+ ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ return ret, out
+}
+
+//go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname
+func internal_cpu_getsysctlbyname(name []byte) (int32, int32) {
+ return sysctlbynameInt32(name)
+}
+
+const (
+ _CTL_HW = 6
+ _HW_NCPU = 3
+ _HW_PAGESIZE = 7
+)
+
+func getncpu() int32 {
+ // Use sysctl to fetch hw.ncpu.
+ mib := [2]uint32{_CTL_HW, _HW_NCPU}
+ out := uint32(0)
+ nout := unsafe.Sizeof(out)
+ ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ if ret >= 0 && int32(out) > 0 {
+ return int32(out)
+ }
+ return 1
+}
+
+func getPageSize() uintptr {
+ // Use sysctl to fetch hw.pagesize.
+ mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
+ out := uint32(0)
+ nout := unsafe.Sizeof(out)
+ ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ if ret >= 0 && int32(out) > 0 {
+ return uintptr(out)
+ }
+ return 0
+}
+
+var urandom_dev = []byte("/dev/urandom\x00")
+
+//go:nosplit
+func getRandomData(r []byte) {
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
+ closefd(fd)
+ extendRandom(r, int(n))
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// May run with m.p==nil, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func newosproc(mp *m) {
+ stk := unsafe.Pointer(mp.g0.stack.hi)
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
+ }
+
+ // Initialize an attribute object.
+ var attr pthreadattr
+ var err int32
+ err = pthread_attr_init(&attr)
+ if err != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+
+ // Find out OS stack size for our own stack guard.
+ var stacksize uintptr
+ if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+ mp.g0.stack.hi = stacksize // for mstart
+
+ // Tell the pthread library we won't join with this thread.
+ if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+
+ // Finally, create the thread. It starts at mstart_stub, which does some low-level
+ // setup and then calls mstart.
+ var oset sigset
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ err = pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+ if err != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+}
+
+// glue code to call mstart from pthread_create.
+func mstart_stub()
+
+// newosproc0 is a version of newosproc that can be called before the runtime
+// is initialized.
+//
+// This function is not safe to use after initialization as it does not pass an M as fnarg.
+//
+//go:nosplit
+func newosproc0(stacksize uintptr, fn uintptr) {
+ // Initialize an attribute object.
+ var attr pthreadattr
+ var err int32
+ err = pthread_attr_init(&attr)
+ if err != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+
+ // The caller passes in a suggested stack size,
+ // from when we allocated the stack and thread ourselves,
+ // without libpthread. Now that we're using libpthread,
+ // we use the OS default stack size instead of the suggestion.
+ // Find out that stack size for our own stack guard.
+ if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+ g0.stack.hi = stacksize // for mstart
+ memstats.stacks_sys.add(int64(stacksize))
+
+ // Tell the pthread library we won't join with this thread.
+ if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+
+ // Finally, create the thread. It starts at mstart_stub, which does some low-level
+ // setup and then calls mstart.
+ var oset sigset
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ err = pthread_create(&attr, fn, nil)
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+ if err != 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+}
+
+var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
+var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
+
+// Called to do synchronous initialization of Go code built with
+// -buildmode=c-archive or -buildmode=c-shared.
+// None of the Go runtime is initialized.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func libpreinit() {
+ initsig(true)
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024) // OS X wants >= 8K
+ mp.gsignal.m = mp
+ if GOOS == "darwin" && GOARCH == "arm64" {
+ // mlock the signal stack to work around a kernel bug where it may
+ // SIGILL when the signal stack is not faulted in while a signal
+ // arrives. See issue 42774.
+ mlock(unsafe.Pointer(mp.gsignal.stack.hi-physPageSize), physPageSize)
+ }
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, cannot allocate memory.
+func minit() {
+ // iOS does not support alternate signal stack.
+ // The signal handler handles it directly.
+ if !(GOOS == "ios" && GOARCH == "arm64") {
+ minitSignalStack()
+ }
+ minitSignalMask()
+ getg().m.procid = uint64(pthread_self())
+}
+
+// Called from dropm to undo the effect of an minit.
+//
+//go:nosplit
+func unminit() {
+ // iOS does not support alternate signal stack.
+ // See minit.
+ if !(GOOS == "ios" && GOARCH == "arm64") {
+ unminitSignals()
+ }
+}
+
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
+//go:nosplit
+func osyield_no_g() {
+ usleep_no_g(1)
+}
+
+//go:nosplit
+func osyield() {
+ usleep(1)
+}
+
+const (
+ _NSIG = 32
+ _SI_USER = 0 /* empirically true, but not what headers say */
+ _SIG_BLOCK = 1
+ _SIG_UNBLOCK = 2
+ _SIG_SETMASK = 3
+ _SS_DISABLE = 4
+)
+
+//extern SigTabTT runtime·sigtab[];
+
+type sigset uint32
+
+var sigset_all = ^sigset(0)
+
+//go:nosplit
+//go:nowritebarrierrec
+func setsig(i uint32, fn uintptr) {
+ var sa usigactiont
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
+ sa.sa_mask = ^uint32(0)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ if iscgo {
+ fn = abi.FuncPCABI0(cgoSigtramp)
+ } else {
+ fn = abi.FuncPCABI0(sigtramp)
+ }
+ }
+ *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn
+ sigaction(i, &sa, nil)
+}
+
+// sigtramp is the callback from libc when a signal is received.
+// It is called with the C calling convention.
+func sigtramp()
+func cgoSigtramp()
+
+//go:nosplit
+//go:nowritebarrierrec
+func setsigstack(i uint32) {
+ var osa usigactiont
+ sigaction(i, nil, &osa)
+ handler := *(*uintptr)(unsafe.Pointer(&osa.__sigaction_u))
+ if osa.sa_flags&_SA_ONSTACK != 0 {
+ return
+ }
+ var sa usigactiont
+ *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = handler
+ sa.sa_mask = osa.sa_mask
+ sa.sa_flags = osa.sa_flags | _SA_ONSTACK
+ sigaction(i, &sa, nil)
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func getsig(i uint32) uintptr {
+ var sa usigactiont
+ sigaction(i, nil, &sa)
+ return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
+}
+
+// setSignaltstackSP sets the ss_sp field of a stackt.
+//
+//go:nosplit
+func setSignalstackSP(s *stackt, sp uintptr) {
+ *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func sigaddset(mask *sigset, i int) {
+ *mask |= 1 << (uint32(i) - 1)
+}
+
+func sigdelset(mask *sigset, i int) {
+ *mask &^= 1 << (uint32(i) - 1)
+}
+
+func setProcessCPUProfiler(hz int32) {
+ setProcessCPUProfilerTimer(hz)
+}
+
+func setThreadCPUProfiler(hz int32) {
+ setThreadCPUProfilerHz(hz)
+}
+
+//go:nosplit
+func validSIGPROF(mp *m, c *sigctxt) bool {
+ return true
+}
+
+//go:linkname executablePath os.executablePath
+var executablePath string
+
+func sysargs(argc int32, argv **byte) {
+ // skip over argv, envv and the first string will be the path
+ n := argc + 1
+ for argv_index(argv, n) != nil {
+ n++
+ }
+ executablePath = gostringnocopy(argv_index(argv, n+1))
+
+ // strip "executable_path=" prefix if available, it's added after OS X 10.11.
+ const prefix = "executable_path="
+ if len(executablePath) > len(prefix) && executablePath[:len(prefix)] == prefix {
+ executablePath = executablePath[len(prefix):]
+ }
+}
+
+func signalM(mp *m, sig int) {
+ pthread_kill(pthread(mp.procid), uint32(sig))
+}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/contrib/go/_std_1.19/src/runtime/os_linux.go b/contrib/go/_std_1.19/src/runtime/os_linux.go
new file mode 100644
index 0000000000..25aea6522d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/os_linux.go
@@ -0,0 +1,888 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/syscall"
+ "unsafe"
+)
+
+// sigPerThreadSyscall is the same signal (SIGSETXID) used by glibc for
+// per-thread syscalls on Linux. We use it for the same purpose in non-cgo
+// binaries.
+const sigPerThreadSyscall = _SIGRTMIN + 1
+
+type mOS struct {
+ // profileTimer holds the ID of the POSIX interval timer for profiling CPU
+ // usage on this thread.
+ //
+ // It is valid when the profileTimerValid field is non-zero. A thread
+ // creates and manages its own timer, and these fields are read and written
+ // only by this thread. But because some of the reads on profileTimerValid
+ // are in signal handling code, access to that field uses atomic operations.
+ profileTimer int32
+ profileTimerValid uint32
+
+ // needPerThreadSyscall indicates that a per-thread syscall is required
+ // for doAllThreadsSyscall.
+ needPerThreadSyscall atomic.Uint8
+}
+
+//go:noescape
+func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
+
+// Linux futex.
+//
+// futexsleep(uint32 *addr, uint32 val)
+// futexwakeup(uint32 *addr)
+//
+// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
+// Futexwakeup wakes up threads sleeping on addr.
+// Futexsleep is allowed to wake up spuriously.
+
+const (
+ _FUTEX_PRIVATE_FLAG = 128
+ _FUTEX_WAIT_PRIVATE = 0 | _FUTEX_PRIVATE_FLAG
+ _FUTEX_WAKE_PRIVATE = 1 | _FUTEX_PRIVATE_FLAG
+)
+
+// Atomically,
+//
+// if(*addr == val) sleep
+//
+// Might be woken up spuriously; that's allowed.
+// Don't sleep longer than ns; ns < 0 means forever.
+//
+//go:nosplit
+func futexsleep(addr *uint32, val uint32, ns int64) {
+ // Some Linux kernels have a bug where futex of
+ // FUTEX_WAIT returns an internal error code
+ // as an errno. Libpthread ignores the return value
+ // here, and so can we: as it says a few lines up,
+ // spurious wakeups are allowed.
+ if ns < 0 {
+ futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, nil, nil, 0)
+ return
+ }
+
+ var ts timespec
+ ts.setNsec(ns)
+ futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, unsafe.Pointer(&ts), nil, 0)
+}
+
+// If any procs are sleeping on addr, wake up at most cnt.
+//
+//go:nosplit
+func futexwakeup(addr *uint32, cnt uint32) {
+ ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE_PRIVATE, cnt, nil, nil, 0)
+ if ret >= 0 {
+ return
+ }
+
+ // I don't know that futex wakeup can return
+ // EAGAIN or EINTR, but if it does, it would be
+ // safe to loop and call futex again.
+ systemstack(func() {
+ print("futexwakeup addr=", addr, " returned ", ret, "\n")
+ })
+
+ *(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
+}
+
+func getproccount() int32 {
+ // This buffer is huge (8 kB) but we are on the system stack
+ // and there should be plenty of space (64 kB).
+ // Also this is a leaf, so we're not holding up the memory for long.
+ // See golang.org/issue/11823.
+ // The suggested behavior here is to keep trying with ever-larger
+ // buffers, but we don't have a dynamic memory allocator at the
+ // moment, so that's a bit tricky and seems like overkill.
+ const maxCPUs = 64 * 1024
+ var buf [maxCPUs / 8]byte
+ r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
+ if r < 0 {
+ return 1
+ }
+ n := int32(0)
+ for _, v := range buf[:r] {
+ for v != 0 {
+ n += int32(v & 1)
+ v >>= 1
+ }
+ }
+ if n == 0 {
+ n = 1
+ }
+ return n
+}
+
+// Clone, the Linux rfork.
+const (
+ _CLONE_VM = 0x100
+ _CLONE_FS = 0x200
+ _CLONE_FILES = 0x400
+ _CLONE_SIGHAND = 0x800
+ _CLONE_PTRACE = 0x2000
+ _CLONE_VFORK = 0x4000
+ _CLONE_PARENT = 0x8000
+ _CLONE_THREAD = 0x10000
+ _CLONE_NEWNS = 0x20000
+ _CLONE_SYSVSEM = 0x40000
+ _CLONE_SETTLS = 0x80000
+ _CLONE_PARENT_SETTID = 0x100000
+ _CLONE_CHILD_CLEARTID = 0x200000
+ _CLONE_UNTRACED = 0x800000
+ _CLONE_CHILD_SETTID = 0x1000000
+ _CLONE_STOPPED = 0x2000000
+ _CLONE_NEWUTS = 0x4000000
+ _CLONE_NEWIPC = 0x8000000
+
+ // As of QEMU 2.8.0 (5ea2fc84d), user emulation requires all six of these
+ // flags to be set when creating a thread; attempts to share the other
+ // five but leave SYSVSEM unshared will fail with -EINVAL.
+ //
+ // In non-QEMU environments CLONE_SYSVSEM is inconsequential as we do not
+ // use System V semaphores.
+
+ cloneFlags = _CLONE_VM | /* share memory */
+ _CLONE_FS | /* share cwd, etc */
+ _CLONE_FILES | /* share fd table */
+ _CLONE_SIGHAND | /* share sig handler table */
+ _CLONE_SYSVSEM | /* share SysV semaphore undo lists (see issue #20763) */
+ _CLONE_THREAD /* revisit - okay for now */
+)
+
+//go:noescape
+func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
+
+// May run with m.p==nil, so write barriers are not allowed.
+//
+//go:nowritebarrier
+func newosproc(mp *m) {
+ stk := unsafe.Pointer(mp.g0.stack.hi)
+ /*
+ * note: strace gets confused if we use CLONE_PTRACE here.
+ */
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", abi.FuncPCABI0(clone), " id=", mp.id, " ostk=", &mp, "\n")
+ }
+
+ // Disable signals during clone, so that the new thread starts
+ // with signals disabled. It will enable them in minit.
+ var oset sigset
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(abi.FuncPCABI0(mstart)))
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+
+ if ret < 0 {
+ print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
+ if ret == -_EAGAIN {
+ println("runtime: may need to increase max user processes (ulimit -u)")
+ }
+ throw("newosproc")
+ }
+}
+
+// Version of newosproc that doesn't require a valid G.
+//
+//go:nosplit
+func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
+ stack := sysAlloc(stacksize, &memstats.stacks_sys)
+ if stack == nil {
+ write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
+ exit(1)
+ }
+ ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
+ if ret < 0 {
+ write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
+ exit(1)
+ }
+}
+
+var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
+var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
+
+const (
+ _AT_NULL = 0 // End of vector
+ _AT_PAGESZ = 6 // System physical page size
+ _AT_HWCAP = 16 // hardware capability bit vector
+ _AT_RANDOM = 25 // introduced in 2.6.29
+ _AT_HWCAP2 = 26 // hardware capability bit vector 2
+)
+
+var procAuxv = []byte("/proc/self/auxv\x00")
+
+var addrspace_vec [1]byte
+
+func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
+
+func sysargs(argc int32, argv **byte) {
+ n := argc + 1
+
+ // skip over argv, envp to get to auxv
+ for argv_index(argv, n) != nil {
+ n++
+ }
+
+ // skip NULL separator
+ n++
+
+ // now argv+n is auxv
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
+ if sysauxv(auxv[:]) != 0 {
+ return
+ }
+ // In some situations we don't get a loader-provided
+ // auxv, such as when loaded as a library on Android.
+ // Fall back to /proc/self/auxv.
+ fd := open(&procAuxv[0], 0 /* O_RDONLY */, 0)
+ if fd < 0 {
+ // On Android, /proc/self/auxv might be unreadable (issue 9229), so we fallback to
+ // try using mincore to detect the physical page size.
+ // mincore should return EINVAL when address is not a multiple of system page size.
+ const size = 256 << 10 // size of memory region to allocate
+ p, err := mmap(nil, size, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if err != 0 {
+ return
+ }
+ var n uintptr
+ for n = 4 << 10; n < size; n <<= 1 {
+ err := mincore(unsafe.Pointer(uintptr(p)+n), 1, &addrspace_vec[0])
+ if err == 0 {
+ physPageSize = n
+ break
+ }
+ }
+ if physPageSize == 0 {
+ physPageSize = size
+ }
+ munmap(p, size)
+ return
+ }
+ var buf [128]uintptr
+ n = read(fd, noescape(unsafe.Pointer(&buf[0])), int32(unsafe.Sizeof(buf)))
+ closefd(fd)
+ if n < 0 {
+ return
+ }
+ // Make sure buf is terminated, even if we didn't read
+ // the whole file.
+ buf[len(buf)-2] = _AT_NULL
+ sysauxv(buf[:])
+}
+
+// startupRandomData holds random bytes initialized at startup. These come from
+// the ELF AT_RANDOM auxiliary vector.
+var startupRandomData []byte
+
+func sysauxv(auxv []uintptr) int {
+ var i int
+ for ; auxv[i] != _AT_NULL; i += 2 {
+ tag, val := auxv[i], auxv[i+1]
+ switch tag {
+ case _AT_RANDOM:
+ // The kernel provides a pointer to 16-bytes
+ // worth of random data.
+ startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:]
+
+ case _AT_PAGESZ:
+ physPageSize = val
+ }
+
+ archauxv(tag, val)
+ vdsoauxv(tag, val)
+ }
+ return i / 2
+}
+
+var sysTHPSizePath = []byte("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size\x00")
+
+func getHugePageSize() uintptr {
+ var numbuf [20]byte
+ fd := open(&sysTHPSizePath[0], 0 /* O_RDONLY */, 0)
+ if fd < 0 {
+ return 0
+ }
+ ptr := noescape(unsafe.Pointer(&numbuf[0]))
+ n := read(fd, ptr, int32(len(numbuf)))
+ closefd(fd)
+ if n <= 0 {
+ return 0
+ }
+ n-- // remove trailing newline
+ v, ok := atoi(slicebytetostringtmp((*byte)(ptr), int(n)))
+ if !ok || v < 0 {
+ v = 0
+ }
+ if v&(v-1) != 0 {
+ // v is not a power of 2
+ return 0
+ }
+ return uintptr(v)
+}
+
+func osinit() {
+ ncpu = getproccount()
+ physHugePageSize = getHugePageSize()
+ if iscgo {
+ // #42494 glibc and musl reserve some signals for
+ // internal use and require they not be blocked by
+ // the rest of a normal C runtime. When the go runtime
+ // blocks...unblocks signals, temporarily, the blocked
+ // interval of time is generally very short. As such,
+ // these expectations of *libc code are mostly met by
+ // the combined go+cgo system of threads. However,
+ // when go causes a thread to exit, via a return from
+ // mstart(), the combined runtime can deadlock if
+ // these signals are blocked. Thus, don't block these
+ // signals when exiting threads.
+ // - glibc: SIGCANCEL (32), SIGSETXID (33)
+ // - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
+ sigdelset(&sigsetAllExiting, 32)
+ sigdelset(&sigsetAllExiting, 33)
+ sigdelset(&sigsetAllExiting, 34)
+ }
+ osArchInit()
+}
+
+var urandom_dev = []byte("/dev/urandom\x00")
+
+func getRandomData(r []byte) {
+ if startupRandomData != nil {
+ n := copy(r, startupRandomData)
+ extendRandom(r, n)
+ return
+ }
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
+ closefd(fd)
+ extendRandom(r, int(n))
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// Called to do synchronous initialization of Go code built with
+// -buildmode=c-archive or -buildmode=c-shared.
+// None of the Go runtime is initialized.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func libpreinit() {
+ initsig(true)
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
+ mp.gsignal.m = mp
+}
+
+func gettid() uint32
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, cannot allocate memory.
+func minit() {
+ minitSignals()
+
+ // Cgo-created threads and the bootstrap m are missing a
+ // procid. We need this for asynchronous preemption and it's
+ // useful in debuggers.
+ getg().m.procid = uint64(gettid())
+}
+
+// Called from dropm to undo the effect of an minit.
+//
+//go:nosplit
+func unminit() {
+ unminitSignals()
+}
+
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
+//#ifdef GOARCH_386
+//#define sa_handler k_sa_handler
+//#endif
+
+func sigreturn()
+func sigtramp() // Called via C ABI
+func cgoSigtramp()
+
+//go:noescape
+func sigaltstack(new, old *stackt)
+
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
+
+//go:noescape
+func timer_create(clockid int32, sevp *sigevent, timerid *int32) int32
+
+//go:noescape
+func timer_settime(timerid int32, flags int32, new, old *itimerspec) int32
+
+//go:noescape
+func timer_delete(timerid int32) int32
+
+//go:noescape
+func rtsigprocmask(how int32, new, old *sigset, size int32)
+
+//go:nosplit
+//go:nowritebarrierrec
+func sigprocmask(how int32, new, old *sigset) {
+ rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
+}
+
+func raise(sig uint32)
+func raiseproc(sig uint32)
+
+//go:noescape
+func sched_getaffinity(pid, len uintptr, buf *byte) int32
+func osyield()
+
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
+func pipe2(flags int32) (r, w int32, errno int32)
+
+const (
+ _si_max_size = 128
+ _sigev_max_size = 64
+)
+
+//go:nosplit
+//go:nowritebarrierrec
+func setsig(i uint32, fn uintptr) {
+ var sa sigactiont
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTORER | _SA_RESTART
+ sigfillset(&sa.sa_mask)
+ // Although Linux manpage says "sa_restorer element is obsolete and
+ // should not be used". x86_64 kernel requires it. Only use it on
+ // x86.
+ if GOARCH == "386" || GOARCH == "amd64" {
+ sa.sa_restorer = abi.FuncPCABI0(sigreturn)
+ }
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ if iscgo {
+ fn = abi.FuncPCABI0(cgoSigtramp)
+ } else {
+ fn = abi.FuncPCABI0(sigtramp)
+ }
+ }
+ sa.sa_handler = fn
+ sigaction(i, &sa, nil)
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func setsigstack(i uint32) {
+ var sa sigactiont
+ sigaction(i, nil, &sa)
+ if sa.sa_flags&_SA_ONSTACK != 0 {
+ return
+ }
+ sa.sa_flags |= _SA_ONSTACK
+ sigaction(i, &sa, nil)
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func getsig(i uint32) uintptr {
+ var sa sigactiont
+ sigaction(i, nil, &sa)
+ return sa.sa_handler
+}
+
+// setSignaltstackSP sets the ss_sp field of a stackt.
+//
+//go:nosplit
+func setSignalstackSP(s *stackt, sp uintptr) {
+ *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
+}
+
+//go:nosplit
+func (c *sigctxt) fixsigcode(sig uint32) {
+}
+
+// sysSigaction calls the rt_sigaction system call.
+//
+//go:nosplit
+func sysSigaction(sig uint32, new, old *sigactiont) {
+ if rt_sigaction(uintptr(sig), new, old, unsafe.Sizeof(sigactiont{}.sa_mask)) != 0 {
+ // Workaround for bugs in QEMU user mode emulation.
+ //
+ // QEMU turns calls to the sigaction system call into
+ // calls to the C library sigaction call; the C
+ // library call rejects attempts to call sigaction for
+ // SIGCANCEL (32) or SIGSETXID (33).
+ //
+ // QEMU rejects calling sigaction on SIGRTMAX (64).
+ //
+ // Just ignore the error in these case. There isn't
+ // anything we can do about it anyhow.
+ if sig != 32 && sig != 33 && sig != 64 {
+ // Use system stack to avoid split stack overflow on ppc64/ppc64le.
+ systemstack(func() {
+ throw("sigaction failed")
+ })
+ }
+ }
+}
+
+// rt_sigaction is implemented in assembly.
+//
+//go:noescape
+func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
+
+func getpid() int
+func tgkill(tgid, tid, sig int)
+
+// signalM sends a signal to mp.
+func signalM(mp *m, sig int) {
+ tgkill(getpid(), int(mp.procid), sig)
+}
+
+// go118UseTimerCreateProfiler enables the per-thread CPU profiler.
+const go118UseTimerCreateProfiler = true
+
+// validSIGPROF compares this signal delivery's code against the signal sources
+// that the profiler uses, returning whether the delivery should be processed.
+// To be processed, a signal delivery from a known profiling mechanism should
+// correspond to the best profiling mechanism available to this thread. Signals
+// from other sources are always considered valid.
+//
+//go:nosplit
+func validSIGPROF(mp *m, c *sigctxt) bool {
+ code := int32(c.sigcode())
+ setitimer := code == _SI_KERNEL
+ timer_create := code == _SI_TIMER
+
+ if !(setitimer || timer_create) {
+ // The signal doesn't correspond to a profiling mechanism that the
+ // runtime enables itself. There's no reason to process it, but there's
+ // no reason to ignore it either.
+ return true
+ }
+
+ if mp == nil {
+ // Since we don't have an M, we can't check if there's an active
+ // per-thread timer for this thread. We don't know how long this thread
+ // has been around, and if it happened to interact with the Go scheduler
+ // at a time when profiling was active (causing it to have a per-thread
+ // timer). But it may have never interacted with the Go scheduler, or
+ // never while profiling was active. To avoid double-counting, process
+ // only signals from setitimer.
+ //
+ // When a custom cgo traceback function has been registered (on
+ // platforms that support runtime.SetCgoTraceback), SIGPROF signals
+ // delivered to a thread that cannot find a matching M do this check in
+ // the assembly implementations of runtime.cgoSigtramp.
+ return setitimer
+ }
+
+ // Having an M means the thread interacts with the Go scheduler, and we can
+ // check whether there's an active per-thread timer for this thread.
+ if atomic.Load(&mp.profileTimerValid) != 0 {
+ // If this M has its own per-thread CPU profiling interval timer, we
+ // should track the SIGPROF signals that come from that timer (for
+ // accurate reporting of its CPU usage; see issue 35057) and ignore any
+ // that it gets from the process-wide setitimer (to not over-count its
+ // CPU consumption).
+ return timer_create
+ }
+
+ // No active per-thread timer means the only valid profiler is setitimer.
+ return setitimer
+}
+
+func setProcessCPUProfiler(hz int32) {
+ setProcessCPUProfilerTimer(hz)
+}
+
+func setThreadCPUProfiler(hz int32) {
+ mp := getg().m
+ mp.profilehz = hz
+
+ if !go118UseTimerCreateProfiler {
+ return
+ }
+
+ // destroy any active timer
+ if atomic.Load(&mp.profileTimerValid) != 0 {
+ timerid := mp.profileTimer
+ atomic.Store(&mp.profileTimerValid, 0)
+ mp.profileTimer = 0
+
+ ret := timer_delete(timerid)
+ if ret != 0 {
+ print("runtime: failed to disable profiling timer; timer_delete(", timerid, ") errno=", -ret, "\n")
+ throw("timer_delete")
+ }
+ }
+
+ if hz == 0 {
+ // If the goal was to disable profiling for this thread, then the job's done.
+ return
+ }
+
+ // The period of the timer should be 1/Hz. For every "1/Hz" of additional
+ // work, the user should expect one additional sample in the profile.
+ //
+ // But to scale down to very small amounts of application work, to observe
+ // even CPU usage of "one tenth" of the requested period, set the initial
+ // timing delay in a different way: So that "one tenth" of a period of CPU
+ // spend shows up as a 10% chance of one sample (for an expected value of
+ // 0.1 samples), and so that "two and six tenths" periods of CPU spend show
+ // up as a 60% chance of 3 samples and a 40% chance of 2 samples (for an
+ // expected value of 2.6). Set the initial delay to a value in the unifom
+ // random distribution between 0 and the desired period. And because "0"
+ // means "disable timer", add 1 so the half-open interval [0,period) turns
+ // into (0,period].
+ //
+ // Otherwise, this would show up as a bias away from short-lived threads and
+ // from threads that are only occasionally active: for example, when the
+ // garbage collector runs on a mostly-idle system, the additional threads it
+ // activates may do a couple milliseconds of GC-related work and nothing
+ // else in the few seconds that the profiler observes.
+ spec := new(itimerspec)
+ spec.it_value.setNsec(1 + int64(fastrandn(uint32(1e9/hz))))
+ spec.it_interval.setNsec(1e9 / int64(hz))
+
+ var timerid int32
+ var sevp sigevent
+ sevp.notify = _SIGEV_THREAD_ID
+ sevp.signo = _SIGPROF
+ sevp.sigev_notify_thread_id = int32(mp.procid)
+ ret := timer_create(_CLOCK_THREAD_CPUTIME_ID, &sevp, &timerid)
+ if ret != 0 {
+ // If we cannot create a timer for this M, leave profileTimerValid false
+ // to fall back to the process-wide setitimer profiler.
+ return
+ }
+
+ ret = timer_settime(timerid, 0, spec, nil)
+ if ret != 0 {
+ print("runtime: failed to configure profiling timer; timer_settime(", timerid,
+ ", 0, {interval: {",
+ spec.it_interval.tv_sec, "s + ", spec.it_interval.tv_nsec, "ns} value: {",
+ spec.it_value.tv_sec, "s + ", spec.it_value.tv_nsec, "ns}}, nil) errno=", -ret, "\n")
+ throw("timer_settime")
+ }
+
+ mp.profileTimer = timerid
+ atomic.Store(&mp.profileTimerValid, 1)
+}
+
+// perThreadSyscallArgs contains the system call number, arguments, and
+// expected return values for a system call to be executed on all threads.
+type perThreadSyscallArgs struct {
+ trap uintptr
+ a1 uintptr
+ a2 uintptr
+ a3 uintptr
+ a4 uintptr
+ a5 uintptr
+ a6 uintptr
+ r1 uintptr
+ r2 uintptr
+}
+
+// perThreadSyscall is the system call to execute for the ongoing
+// doAllThreadsSyscall.
+//
+// perThreadSyscall may only be written while mp.needPerThreadSyscall == 0 on
+// all Ms.
+var perThreadSyscall perThreadSyscallArgs
+
+// syscall_runtime_doAllThreadsSyscall and executes a specified system call on
+// all Ms.
+//
+// The system call is expected to succeed and return the same value on every
+// thread. If any threads do not match, the runtime throws.
+//
+//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
+//go:uintptrescapes
+func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ if iscgo {
+ // In cgo, we are not aware of threads created in C, so this approach will not work.
+ panic("doAllThreadsSyscall not supported with cgo enabled")
+ }
+
+ // STW to guarantee that user goroutines see an atomic change to thread
+ // state. Without STW, goroutines could migrate Ms while change is in
+ // progress and e.g., see state old -> new -> old -> new.
+ //
+ // N.B. Internally, this function does not depend on STW to
+ // successfully change every thread. It is only needed for user
+ // expectations, per above.
+ stopTheWorld("doAllThreadsSyscall")
+
+ // This function depends on several properties:
+ //
+ // 1. All OS threads that already exist are associated with an M in
+ // allm. i.e., we won't miss any pre-existing threads.
+ // 2. All Ms listed in allm will eventually have an OS thread exist.
+ // i.e., they will set procid and be able to receive signals.
+ // 3. OS threads created after we read allm will clone from a thread
+ // that has executed the system call. i.e., they inherit the
+ // modified state.
+ //
+ // We achieve these through different mechanisms:
+ //
+ // 1. Addition of new Ms to allm in allocm happens before clone of its
+ // OS thread later in newm.
+ // 2. newm does acquirem to avoid being preempted, ensuring that new Ms
+ // created in allocm will eventually reach OS thread clone later in
+ // newm.
+ // 3. We take allocmLock for write here to prevent allocation of new Ms
+ // while this function runs. Per (1), this prevents clone of OS
+ // threads that are not yet in allm.
+ allocmLock.lock()
+
+ // Disable preemption, preventing us from changing Ms, as we handle
+ // this M specially.
+ //
+ // N.B. STW and lock() above do this as well, this is added for extra
+ // clarity.
+ acquirem()
+
+ // N.B. allocmLock also prevents concurrent execution of this function,
+ // serializing use of perThreadSyscall, mp.needPerThreadSyscall, and
+ // ensuring all threads execute system calls from multiple calls in the
+ // same order.
+
+ r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
+ if GOARCH == "ppc64" || GOARCH == "ppc64le" {
+ // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
+ r2 = 0
+ }
+ if errno != 0 {
+ releasem(getg().m)
+ allocmLock.unlock()
+ startTheWorld()
+ return r1, r2, errno
+ }
+
+ perThreadSyscall = perThreadSyscallArgs{
+ trap: trap,
+ a1: a1,
+ a2: a2,
+ a3: a3,
+ a4: a4,
+ a5: a5,
+ a6: a6,
+ r1: r1,
+ r2: r2,
+ }
+
+ // Wait for all threads to start.
+ //
+ // As described above, some Ms have been added to allm prior to
+ // allocmLock, but not yet completed OS clone and set procid.
+ //
+ // At minimum we must wait for a thread to set procid before we can
+ // send it a signal.
+ //
+ // We take this one step further and wait for all threads to start
+ // before sending any signals. This prevents system calls from getting
+ // applied twice: once in the parent and once in the child, like so:
+ //
+ // A B C
+ // add C to allm
+ // doAllThreadsSyscall
+ // allocmLock.lock()
+ // signal B
+ // <receive signal>
+ // execute syscall
+ // <signal return>
+ // clone C
+ // <thread start>
+ // set procid
+ // signal C
+ // <receive signal>
+ // execute syscall
+ // <signal return>
+ //
+ // In this case, thread C inherited the syscall-modified state from
+ // thread B and did not need to execute the syscall, but did anyway
+ // because doAllThreadsSyscall could not be sure whether it was
+ // required.
+ //
+ // Some system calls may not be idempotent, so we ensure each thread
+ // executes the system call exactly once.
+ for mp := allm; mp != nil; mp = mp.alllink {
+ for atomic.Load64(&mp.procid) == 0 {
+ // Thread is starting.
+ osyield()
+ }
+ }
+
+ // Signal every other thread, where they will execute perThreadSyscall
+ // from the signal handler.
+ gp := getg()
+ tid := gp.m.procid
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if atomic.Load64(&mp.procid) == tid {
+ // Our thread already performed the syscall.
+ continue
+ }
+ mp.needPerThreadSyscall.Store(1)
+ signalM(mp, sigPerThreadSyscall)
+ }
+
+ // Wait for all threads to complete.
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if mp.procid == tid {
+ continue
+ }
+ for mp.needPerThreadSyscall.Load() != 0 {
+ osyield()
+ }
+ }
+
+ perThreadSyscall = perThreadSyscallArgs{}
+
+ releasem(getg().m)
+ allocmLock.unlock()
+ startTheWorld()
+
+ return r1, r2, errno
+}
+
+// runPerThreadSyscall runs perThreadSyscall for this M if required.
+//
+// This function throws if the system call returns with anything other than the
+// expected values.
+//
+//go:nosplit
+func runPerThreadSyscall() {
+ gp := getg()
+ if gp.m.needPerThreadSyscall.Load() == 0 {
+ return
+ }
+
+ args := perThreadSyscall
+ r1, r2, errno := syscall.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
+ if GOARCH == "ppc64" || GOARCH == "ppc64le" {
+ // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
+ r2 = 0
+ }
+ if errno != 0 || r1 != args.r1 || r2 != args.r2 {
+ print("trap:", args.trap, ", a123456=[", args.a1, ",", args.a2, ",", args.a3, ",", args.a4, ",", args.a5, ",", args.a6, "]\n")
+ print("results: got {r1=", r1, ",r2=", r2, ",errno=", errno, "}, want {r1=", args.r1, ",r2=", args.r2, ",errno=0\n")
+ fatal("AllThreadsSyscall6 results differ between threads; runtime corrupted")
+ }
+
+ gp.m.needPerThreadSyscall.Store(0)
+}
diff --git a/contrib/go/_std_1.18/src/runtime/os_linux_generic.go b/contrib/go/_std_1.19/src/runtime/os_linux_generic.go
index bed9e66e15..bed9e66e15 100644
--- a/contrib/go/_std_1.18/src/runtime/os_linux_generic.go
+++ b/contrib/go/_std_1.19/src/runtime/os_linux_generic.go
diff --git a/contrib/go/_std_1.19/src/runtime/os_linux_noauxv.go b/contrib/go/_std_1.19/src/runtime/os_linux_noauxv.go
new file mode 100644
index 0000000000..ff377277aa
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/os_linux_noauxv.go
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && !arm && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le
+
+package runtime
+
+func archauxv(tag, val uintptr) {
+}
diff --git a/contrib/go/_std_1.18/src/runtime/os_linux_x86.go b/contrib/go/_std_1.19/src/runtime/os_linux_x86.go
index c88f61fa2e..c88f61fa2e 100644
--- a/contrib/go/_std_1.18/src/runtime/os_linux_x86.go
+++ b/contrib/go/_std_1.19/src/runtime/os_linux_x86.go
diff --git a/contrib/go/_std_1.18/src/runtime/os_nonopenbsd.go b/contrib/go/_std_1.19/src/runtime/os_nonopenbsd.go
index a5775961e8..a5775961e8 100644
--- a/contrib/go/_std_1.18/src/runtime/os_nonopenbsd.go
+++ b/contrib/go/_std_1.19/src/runtime/os_nonopenbsd.go
diff --git a/contrib/go/_std_1.19/src/runtime/panic.go b/contrib/go/_std_1.19/src/runtime/panic.go
new file mode 100644
index 0000000000..121f2022a4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/panic.go
@@ -0,0 +1,1370 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// throwType indicates the current type of ongoing throw, which affects the
+// amount of detail printed to stderr. Higher values include more detail.
+type throwType uint32
+
+const (
+ // throwTypeNone means that we are not throwing.
+ throwTypeNone throwType = iota
+
+ // throwTypeUser is a throw due to a problem with the application.
+ //
+ // These throws do not include runtime frames, system goroutines, or
+ // frame metadata.
+ throwTypeUser
+
+ // throwTypeRuntime is a throw due to a problem with Go itself.
+ //
+ // These throws include as much information as possible to aid in
+ // debugging the runtime, including runtime frames, system goroutines,
+ // and frame metadata.
+ throwTypeRuntime
+)
+
+// We have two different ways of doing defers. The older way involves creating a
+// defer record at the time that a defer statement is executing and adding it to a
+// defer chain. This chain is inspected by the deferreturn call at all function
+// exits in order to run the appropriate defer calls. A cheaper way (which we call
+// open-coded defers) is used for functions in which no defer statements occur in
+// loops. In that case, we simply store the defer function/arg information into
+// specific stack slots at the point of each defer statement, as well as setting a
+// bit in a bitmask. At each function exit, we add inline code to directly make
+// the appropriate defer calls based on the bitmask and fn/arg information stored
+// on the stack. During panic/Goexit processing, the appropriate defer calls are
+// made using extra funcdata info that indicates the exact stack slots that
+// contain the bitmask and defer fn/args.
+
+// Check to make sure we can really generate a panic. If the panic
+// was generated from the runtime, or from inside malloc, then convert
+// to a throw of msg.
+// pc should be the program counter of the compiler-generated code that
+// triggered this panic.
+func panicCheck1(pc uintptr, msg string) {
+ if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
+ // Note: wasm can't tail call, so we can't get the original caller's pc.
+ throw(msg)
+ }
+ // TODO: is this redundant? How could we be in malloc
+ // but not in the runtime? runtime/internal/*, maybe?
+ gp := getg()
+ if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
+ throw(msg)
+ }
+}
+
+// Same as above, but calling from the runtime is allowed.
+//
+// Using this function is necessary for any panic that may be
+// generated by runtime.sigpanic, since those are always called by the
+// runtime.
+func panicCheck2(err string) {
+ // panic allocates, so to avoid recursive malloc, turn panics
+ // during malloc into throws.
+ gp := getg()
+ if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
+ throw(err)
+ }
+}
+
+// Many of the following panic entry-points turn into throws when they
+// happen in various runtime contexts. These should never happen in
+// the runtime, and if they do, they indicate a serious issue and
+// should not be caught by user code.
+//
+// The panic{Index,Slice,divide,shift} functions are called by
+// code generated by the compiler for out of bounds index expressions,
+// out of bounds slice expressions, division by zero, and shift by negative.
+// The panicdivide (again), panicoverflow, panicfloat, and panicmem
+// functions are called by the signal handler when a signal occurs
+// indicating the respective problem.
+//
+// Since panic{Index,Slice,shift} are never called directly, and
+// since the runtime package should never have an out of bounds slice
+// or array reference or negative shift, if we see those functions called from the
+// runtime package we turn the panic into a throw. That will dump the
+// entire runtime stack for easier debugging.
+//
+// The entry points called by the signal handler will be called from
+// runtime.sigpanic, so we can't disallow calls from the runtime to
+// these (they always look like they're called from the runtime).
+// Hence, for these, we just check for clearly bad runtime conditions.
+//
+// The panic{Index,Slice} functions are implemented in assembly and tail call
+// to the goPanic{Index,Slice} functions below. This is done so we can use
+// a space-minimal register calling convention.
+
+// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
+//
+//go:yeswritebarrierrec
+func goPanicIndex(x int, y int) {
+ panicCheck1(getcallerpc(), "index out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
+}
+
+//go:yeswritebarrierrec
+func goPanicIndexU(x uint, y int) {
+ panicCheck1(getcallerpc(), "index out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
+}
+
+// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
+//
+//go:yeswritebarrierrec
+func goPanicSliceAlen(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
+}
+
+//go:yeswritebarrierrec
+func goPanicSliceAlenU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
+}
+
+//go:yeswritebarrierrec
+func goPanicSliceAcap(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
+}
+
+//go:yeswritebarrierrec
+func goPanicSliceAcapU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
+}
+
+// failures in the comparisons for s[x:y], 0 <= x <= y
+//
+//go:yeswritebarrierrec
+func goPanicSliceB(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
+}
+
+//go:yeswritebarrierrec
+func goPanicSliceBU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
+}
+
+// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
+func goPanicSlice3Alen(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
+}
+func goPanicSlice3AlenU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
+}
+func goPanicSlice3Acap(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
+}
+func goPanicSlice3AcapU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
+}
+
+// failures in the comparisons for s[:x:y], 0 <= x <= y
+func goPanicSlice3B(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
+}
+func goPanicSlice3BU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
+}
+
+// failures in the comparisons for s[x:y:], 0 <= x <= y
+func goPanicSlice3C(x int, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
+}
+func goPanicSlice3CU(x uint, y int) {
+ panicCheck1(getcallerpc(), "slice bounds out of range")
+ panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
+}
+
+// failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s)
+func goPanicSliceConvert(x int, y int) {
+ panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array")
+ panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
+}
+
+// Implemented in assembly, as they take arguments in registers.
+// Declared here to mark them as ABIInternal.
+func panicIndex(x int, y int)
+func panicIndexU(x uint, y int)
+func panicSliceAlen(x int, y int)
+func panicSliceAlenU(x uint, y int)
+func panicSliceAcap(x int, y int)
+func panicSliceAcapU(x uint, y int)
+func panicSliceB(x int, y int)
+func panicSliceBU(x uint, y int)
+func panicSlice3Alen(x int, y int)
+func panicSlice3AlenU(x uint, y int)
+func panicSlice3Acap(x int, y int)
+func panicSlice3AcapU(x uint, y int)
+func panicSlice3B(x int, y int)
+func panicSlice3BU(x uint, y int)
+func panicSlice3C(x int, y int)
+func panicSlice3CU(x uint, y int)
+func panicSliceConvert(x int, y int)
+
+var shiftError = error(errorString("negative shift amount"))
+
+//go:yeswritebarrierrec
+func panicshift() {
+ panicCheck1(getcallerpc(), "negative shift amount")
+ panic(shiftError)
+}
+
+var divideError = error(errorString("integer divide by zero"))
+
+//go:yeswritebarrierrec
+func panicdivide() {
+ panicCheck2("integer divide by zero")
+ panic(divideError)
+}
+
+var overflowError = error(errorString("integer overflow"))
+
+func panicoverflow() {
+ panicCheck2("integer overflow")
+ panic(overflowError)
+}
+
+var floatError = error(errorString("floating point error"))
+
+func panicfloat() {
+ panicCheck2("floating point error")
+ panic(floatError)
+}
+
+var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
+
+func panicmem() {
+ panicCheck2("invalid memory address or nil pointer dereference")
+ panic(memoryError)
+}
+
+func panicmemAddr(addr uintptr) {
+ panicCheck2("invalid memory address or nil pointer dereference")
+ panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
+}
+
+// Create a new deferred function fn, which has no arguments and results.
+// The compiler turns a defer statement into a call to this.
+func deferproc(fn func()) {
+ gp := getg()
+ if gp.m.curg != gp {
+ // go code on the system stack can't defer
+ throw("defer on system stack")
+ }
+
+ d := newdefer()
+ if d._panic != nil {
+ throw("deferproc: d.panic != nil after newdefer")
+ }
+ d.link = gp._defer
+ gp._defer = d
+ d.fn = fn
+ d.pc = getcallerpc()
+ // We must not be preempted between calling getcallersp and
+ // storing it to d.sp because getcallersp's result is a
+ // uintptr stack pointer.
+ d.sp = getcallersp()
+
+ // deferproc returns 0 normally.
+ // a deferred func that stops a panic
+ // makes the deferproc return 1.
+ // the code the compiler generates always
+ // checks the return value and jumps to the
+ // end of the function if deferproc returns != 0.
+ return0()
+ // No code can go here - the C return register has
+ // been set and must not be clobbered.
+}
+
+// deferprocStack queues a new deferred function with a defer record on the stack.
+// The defer record must have its fn field initialized.
+// All other fields can contain junk.
+// Nosplit because of the uninitialized pointer fields on the stack.
+//
+//go:nosplit
+func deferprocStack(d *_defer) {
+ gp := getg()
+ if gp.m.curg != gp {
+ // go code on the system stack can't defer
+ throw("defer on system stack")
+ }
+ // fn is already set.
+ // The other fields are junk on entry to deferprocStack and
+ // are initialized here.
+ d.started = false
+ d.heap = false
+ d.openDefer = false
+ d.sp = getcallersp()
+ d.pc = getcallerpc()
+ d.framepc = 0
+ d.varp = 0
+ // The lines below implement:
+ // d.panic = nil
+ // d.fd = nil
+ // d.link = gp._defer
+ // gp._defer = d
+ // But without write barriers. The first three are writes to
+ // the stack so they don't need a write barrier, and furthermore
+ // are to uninitialized memory, so they must not use a write barrier.
+ // The fourth write does not require a write barrier because we
+ // explicitly mark all the defer structures, so we don't need to
+ // keep track of pointers to them with a write barrier.
+ *(*uintptr)(unsafe.Pointer(&d._panic)) = 0
+ *(*uintptr)(unsafe.Pointer(&d.fd)) = 0
+ *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
+ *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
+
+ return0()
+ // No code can go here - the C return register has
+ // been set and must not be clobbered.
+}
+
+// Each P holds a pool for defers.
+
+// Allocate a Defer, usually using per-P pool.
+// Each defer must be released with freedefer. The defer is not
+// added to any defer chain yet.
+func newdefer() *_defer {
+ var d *_defer
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.deferpool) == 0 && sched.deferpool != nil {
+ lock(&sched.deferlock)
+ for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
+ d := sched.deferpool
+ sched.deferpool = d.link
+ d.link = nil
+ pp.deferpool = append(pp.deferpool, d)
+ }
+ unlock(&sched.deferlock)
+ }
+ if n := len(pp.deferpool); n > 0 {
+ d = pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ }
+ releasem(mp)
+ mp, pp = nil, nil
+
+ if d == nil {
+ // Allocate new defer.
+ d = new(_defer)
+ }
+ d.heap = true
+ return d
+}
+
+// Free the given defer.
+// The defer cannot be used after this call.
+//
+// This is nosplit because the incoming defer is in a perilous state.
+// It's not on any defer list, so stack copying won't adjust stack
+// pointers in it (namely, d.link). Hence, if we were to copy the
+// stack, d could then contain a stale pointer.
+//
+//go:nosplit
+func freedefer(d *_defer) {
+ d.link = nil
+ // After this point we can copy the stack.
+
+ if d._panic != nil {
+ freedeferpanic()
+ }
+ if d.fn != nil {
+ freedeferfn()
+ }
+ if !d.heap {
+ return
+ }
+
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.deferpool) == cap(pp.deferpool) {
+ // Transfer half of local cache to the central cache.
+ var first, last *_defer
+ for len(pp.deferpool) > cap(pp.deferpool)/2 {
+ n := len(pp.deferpool)
+ d := pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ if first == nil {
+ first = d
+ } else {
+ last.link = d
+ }
+ last = d
+ }
+ lock(&sched.deferlock)
+ last.link = sched.deferpool
+ sched.deferpool = first
+ unlock(&sched.deferlock)
+ }
+
+ *d = _defer{}
+
+ pp.deferpool = append(pp.deferpool, d)
+
+ releasem(mp)
+ mp, pp = nil, nil
+}
+
+// Separate function so that it can split stack.
+// Windows otherwise runs out of stack space.
+func freedeferpanic() {
+ // _panic must be cleared before d is unlinked from gp.
+ throw("freedefer with d._panic != nil")
+}
+
+func freedeferfn() {
+ // fn must be cleared before d is unlinked from gp.
+ throw("freedefer with d.fn != nil")
+}
+
+// deferreturn runs deferred functions for the caller's frame.
+// The compiler inserts a call to this at the end of any
+// function which calls defer.
+func deferreturn() {
+ gp := getg()
+ for {
+ d := gp._defer
+ if d == nil {
+ return
+ }
+ sp := getcallersp()
+ if d.sp != sp {
+ return
+ }
+ if d.openDefer {
+ done := runOpenDeferFrame(gp, d)
+ if !done {
+ throw("unfinished open-coded defers in deferreturn")
+ }
+ gp._defer = d.link
+ freedefer(d)
+ // If this frame uses open defers, then this
+ // must be the only defer record for the
+ // frame, so we can just return.
+ return
+ }
+
+ fn := d.fn
+ d.fn = nil
+ gp._defer = d.link
+ freedefer(d)
+ fn()
+ }
+}
+
+// Goexit terminates the goroutine that calls it. No other goroutine is affected.
+// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
+// is not a panic, any recover calls in those deferred functions will return nil.
+//
+// Calling Goexit from the main goroutine terminates that goroutine
+// without func main returning. Since func main has not returned,
+// the program continues execution of other goroutines.
+// If all other goroutines exit, the program crashes.
+func Goexit() {
+ // Run all deferred functions for the current goroutine.
+ // This code is similar to gopanic, see that implementation
+ // for detailed comments.
+ gp := getg()
+
+ // Create a panic object for Goexit, so we can recognize when it might be
+ // bypassed by a recover().
+ var p _panic
+ p.goexit = true
+ p.link = gp._panic
+ gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
+
+ addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
+ for {
+ d := gp._defer
+ if d == nil {
+ break
+ }
+ if d.started {
+ if d._panic != nil {
+ d._panic.aborted = true
+ d._panic = nil
+ }
+ if !d.openDefer {
+ d.fn = nil
+ gp._defer = d.link
+ freedefer(d)
+ continue
+ }
+ }
+ d.started = true
+ d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
+ if d.openDefer {
+ done := runOpenDeferFrame(gp, d)
+ if !done {
+ // We should always run all defers in the frame,
+ // since there is no panic associated with this
+ // defer that can be recovered.
+ throw("unfinished open-coded defers in Goexit")
+ }
+ if p.aborted {
+ // Since our current defer caused a panic and may
+ // have been already freed, just restart scanning
+ // for open-coded defers from this frame again.
+ addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
+ } else {
+ addOneOpenDeferFrame(gp, 0, nil)
+ }
+ } else {
+ // Save the pc/sp in deferCallSave(), so we can "recover" back to this
+ // loop if necessary.
+ deferCallSave(&p, d.fn)
+ }
+ if p.aborted {
+ // We had a recursive panic in the defer d we started, and
+ // then did a recover in a defer that was further down the
+ // defer chain than d. In the case of an outstanding Goexit,
+ // we force the recover to return back to this loop. d will
+ // have already been freed if completed, so just continue
+ // immediately to the next defer on the chain.
+ p.aborted = false
+ continue
+ }
+ if gp._defer != d {
+ throw("bad defer entry in Goexit")
+ }
+ d._panic = nil
+ d.fn = nil
+ gp._defer = d.link
+ freedefer(d)
+ // Note: we ignore recovers here because Goexit isn't a panic
+ }
+ goexit1()
+}
+
+// Call all Error and String methods before freezing the world.
+// Used when crashing with panicking.
+func preprintpanics(p *_panic) {
+ defer func() {
+ text := "panic while printing panic value"
+ switch r := recover().(type) {
+ case nil:
+ // nothing to do
+ case string:
+ throw(text + ": " + r)
+ default:
+ throw(text + ": type " + efaceOf(&r)._type.string())
+ }
+ }()
+ for p != nil {
+ switch v := p.arg.(type) {
+ case error:
+ p.arg = v.Error()
+ case stringer:
+ p.arg = v.String()
+ }
+ p = p.link
+ }
+}
+
+// Print all currently active panics. Used when crashing.
+// Should only be called after preprintpanics.
+func printpanics(p *_panic) {
+ if p.link != nil {
+ printpanics(p.link)
+ if !p.link.goexit {
+ print("\t")
+ }
+ }
+ if p.goexit {
+ return
+ }
+ print("panic: ")
+ printany(p.arg)
+ if p.recovered {
+ print(" [recovered]")
+ }
+ print("\n")
+}
+
+// addOneOpenDeferFrame scans the stack (in gentraceback order, from inner frames to
+// outer frames) for the first frame (if any) with open-coded defers. If it finds
+// one, it adds a single entry to the defer chain for that frame. The entry added
+// represents all the defers in the associated open defer frame, and is sorted in
+// order with respect to any non-open-coded defers.
+//
+// addOneOpenDeferFrame stops (possibly without adding a new entry) if it encounters
+// an in-progress open defer entry. An in-progress open defer entry means there has
+// been a new panic because of a defer in the associated frame. addOneOpenDeferFrame
+// does not add an open defer entry past a started entry, because that started entry
+// still needs to finished, and addOneOpenDeferFrame will be called when that started
+// entry is completed. The defer removal loop in gopanic() similarly stops at an
+// in-progress defer entry. Together, addOneOpenDeferFrame and the defer removal loop
+// ensure the invariant that there is no open defer entry further up the stack than
+// an in-progress defer, and also that the defer removal loop is guaranteed to remove
+// all not-in-progress open defer entries from the defer chain.
+//
+// If sp is non-nil, addOneOpenDeferFrame starts the stack scan from the frame
+// specified by sp. If sp is nil, it uses the sp from the current defer record (which
+// has just been finished). Hence, it continues the stack scan from the frame of the
+// defer that just finished. It skips any frame that already has a (not-in-progress)
+// open-coded _defer record in the defer chain.
+//
+// Note: All entries of the defer chain (including this new open-coded entry) have
+// their pointers (including sp) adjusted properly if the stack moves while
+// running deferred functions. Also, it is safe to pass in the sp arg (which is
+// the direct result of calling getcallersp()), because all pointer variables
+// (including arguments) are adjusted as needed during stack copies.
+func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
+ var prevDefer *_defer
+ if sp == nil {
+ prevDefer = gp._defer
+ pc = prevDefer.framepc
+ sp = unsafe.Pointer(prevDefer.sp)
+ }
+ systemstack(func() {
+ gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff,
+ func(frame *stkframe, unused unsafe.Pointer) bool {
+ if prevDefer != nil && prevDefer.sp == frame.sp {
+ // Skip the frame for the previous defer that
+ // we just finished (and was used to set
+ // where we restarted the stack scan)
+ return true
+ }
+ f := frame.fn
+ fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo)
+ if fd == nil {
+ return true
+ }
+ // Insert the open defer record in the
+ // chain, in order sorted by sp.
+ d := gp._defer
+ var prev *_defer
+ for d != nil {
+ dsp := d.sp
+ if frame.sp < dsp {
+ break
+ }
+ if frame.sp == dsp {
+ if !d.openDefer {
+ throw("duplicated defer entry")
+ }
+ // Don't add any record past an
+ // in-progress defer entry. We don't
+ // need it, and more importantly, we
+ // want to keep the invariant that
+ // there is no open defer entry
+ // passed an in-progress entry (see
+ // header comment).
+ if d.started {
+ return false
+ }
+ return true
+ }
+ prev = d
+ d = d.link
+ }
+ if frame.fn.deferreturn == 0 {
+ throw("missing deferreturn")
+ }
+
+ d1 := newdefer()
+ d1.openDefer = true
+ d1._panic = nil
+ // These are the pc/sp to set after we've
+ // run a defer in this frame that did a
+ // recover. We return to a special
+ // deferreturn that runs any remaining
+ // defers and then returns from the
+ // function.
+ d1.pc = frame.fn.entry() + uintptr(frame.fn.deferreturn)
+ d1.varp = frame.varp
+ d1.fd = fd
+ // Save the SP/PC associated with current frame,
+ // so we can continue stack trace later if needed.
+ d1.framepc = frame.pc
+ d1.sp = frame.sp
+ d1.link = d
+ if prev == nil {
+ gp._defer = d1
+ } else {
+ prev.link = d1
+ }
+ // Stop stack scanning after adding one open defer record
+ return false
+ },
+ nil, 0)
+ })
+}
+
+// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
+// uint32 and a pointer to the byte following the varint.
+//
+// There is a similar function runtime.readvarint, which takes a slice of bytes,
+// rather than an unsafe pointer. These functions are duplicated, because one of
+// the two use cases for the functions would get slower if the functions were
+// combined.
+func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
+ var r uint32
+ var shift int
+ for {
+ b := *(*uint8)((unsafe.Pointer(fd)))
+ fd = add(fd, unsafe.Sizeof(b))
+ if b < 128 {
+ return r + uint32(b)<<shift, fd
+ }
+ r += ((uint32(b) &^ 128) << shift)
+ shift += 7
+ if shift > 28 {
+ panic("Bad varint")
+ }
+ }
+}
+
+// runOpenDeferFrame runs the active open-coded defers in the frame specified by
+// d. It normally processes all active defers in the frame, but stops immediately
+// if a defer does a successful recover. It returns true if there are no
+// remaining defers to run in the frame.
+func runOpenDeferFrame(gp *g, d *_defer) bool {
+ done := true
+ fd := d.fd
+
+ deferBitsOffset, fd := readvarintUnsafe(fd)
+ nDefers, fd := readvarintUnsafe(fd)
+ deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
+
+ for i := int(nDefers) - 1; i >= 0; i-- {
+ // read the funcdata info for this defer
+ var closureOffset uint32
+ closureOffset, fd = readvarintUnsafe(fd)
+ if deferBits&(1<<i) == 0 {
+ continue
+ }
+ closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
+ d.fn = closure
+ deferBits = deferBits &^ (1 << i)
+ *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
+ p := d._panic
+ // Call the defer. Note that this can change d.varp if
+ // the stack moves.
+ deferCallSave(p, d.fn)
+ if p != nil && p.aborted {
+ break
+ }
+ d.fn = nil
+ if d._panic != nil && d._panic.recovered {
+ done = deferBits == 0
+ break
+ }
+ }
+
+ return done
+}
+
+// deferCallSave calls fn() after saving the caller's pc and sp in the
+// panic record. This allows the runtime to return to the Goexit defer
+// processing loop, in the unusual case where the Goexit may be
+// bypassed by a successful recover.
+//
+// This is marked as a wrapper by the compiler so it doesn't appear in
+// tracebacks.
+func deferCallSave(p *_panic, fn func()) {
+ if p != nil {
+ p.argp = unsafe.Pointer(getargp())
+ p.pc = getcallerpc()
+ p.sp = unsafe.Pointer(getcallersp())
+ }
+ fn()
+ if p != nil {
+ p.pc = 0
+ p.sp = unsafe.Pointer(nil)
+ }
+}
+
+// The implementation of the predeclared function panic.
+func gopanic(e any) {
+ gp := getg()
+ if gp.m.curg != gp {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ throw("panic on system stack")
+ }
+
+ if gp.m.mallocing != 0 {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ throw("panic during malloc")
+ }
+ if gp.m.preemptoff != "" {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ print("preempt off reason: ")
+ print(gp.m.preemptoff)
+ print("\n")
+ throw("panic during preemptoff")
+ }
+ if gp.m.locks != 0 {
+ print("panic: ")
+ printany(e)
+ print("\n")
+ throw("panic holding locks")
+ }
+
+ var p _panic
+ p.arg = e
+ p.link = gp._panic
+ gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
+
+ atomic.Xadd(&runningPanicDefers, 1)
+
+ // By calculating getcallerpc/getcallersp here, we avoid scanning the
+ // gopanic frame (stack scanning is slow...)
+ addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
+
+ for {
+ d := gp._defer
+ if d == nil {
+ break
+ }
+
+ // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
+ // take defer off list. An earlier panic will not continue running, but we will make sure below that an
+ // earlier Goexit does continue running.
+ if d.started {
+ if d._panic != nil {
+ d._panic.aborted = true
+ }
+ d._panic = nil
+ if !d.openDefer {
+ // For open-coded defers, we need to process the
+ // defer again, in case there are any other defers
+ // to call in the frame (not including the defer
+ // call that caused the panic).
+ d.fn = nil
+ gp._defer = d.link
+ freedefer(d)
+ continue
+ }
+ }
+
+ // Mark defer as started, but keep on list, so that traceback
+ // can find and update the defer's argument frame if stack growth
+ // or a garbage collection happens before executing d.fn.
+ d.started = true
+
+ // Record the panic that is running the defer.
+ // If there is a new panic during the deferred call, that panic
+ // will find d in the list and will mark d._panic (this panic) aborted.
+ d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
+
+ done := true
+ if d.openDefer {
+ done = runOpenDeferFrame(gp, d)
+ if done && !d._panic.recovered {
+ addOneOpenDeferFrame(gp, 0, nil)
+ }
+ } else {
+ p.argp = unsafe.Pointer(getargp())
+ d.fn()
+ }
+ p.argp = nil
+
+ // Deferred function did not panic. Remove d.
+ if gp._defer != d {
+ throw("bad defer entry in panic")
+ }
+ d._panic = nil
+
+ // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
+ //GC()
+
+ pc := d.pc
+ sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
+ if done {
+ d.fn = nil
+ gp._defer = d.link
+ freedefer(d)
+ }
+ if p.recovered {
+ gp._panic = p.link
+ if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
+ // A normal recover would bypass/abort the Goexit. Instead,
+ // we return to the processing loop of the Goexit.
+ gp.sigcode0 = uintptr(gp._panic.sp)
+ gp.sigcode1 = uintptr(gp._panic.pc)
+ mcall(recovery)
+ throw("bypassed recovery failed") // mcall should not return
+ }
+ atomic.Xadd(&runningPanicDefers, -1)
+
+ // After a recover, remove any remaining non-started,
+ // open-coded defer entries, since the corresponding defers
+ // will be executed normally (inline). Any such entry will
+ // become stale once we run the corresponding defers inline
+ // and exit the associated stack frame. We only remove up to
+ // the first started (in-progress) open defer entry, not
+ // including the current frame, since any higher entries will
+ // be from a higher panic in progress, and will still be
+ // needed.
+ d := gp._defer
+ var prev *_defer
+ if !done {
+ // Skip our current frame, if not done. It is
+ // needed to complete any remaining defers in
+ // deferreturn()
+ prev = d
+ d = d.link
+ }
+ for d != nil {
+ if d.started {
+ // This defer is started but we
+ // are in the middle of a
+ // defer-panic-recover inside of
+ // it, so don't remove it or any
+ // further defer entries
+ break
+ }
+ if d.openDefer {
+ if prev == nil {
+ gp._defer = d.link
+ } else {
+ prev.link = d.link
+ }
+ newd := d.link
+ freedefer(d)
+ d = newd
+ } else {
+ prev = d
+ d = d.link
+ }
+ }
+
+ gp._panic = p.link
+ // Aborted panics are marked but remain on the g.panic list.
+ // Remove them from the list.
+ for gp._panic != nil && gp._panic.aborted {
+ gp._panic = gp._panic.link
+ }
+ if gp._panic == nil { // must be done with signal
+ gp.sig = 0
+ }
+ // Pass information about recovering frame to recovery.
+ gp.sigcode0 = uintptr(sp)
+ gp.sigcode1 = pc
+ mcall(recovery)
+ throw("recovery failed") // mcall should not return
+ }
+ }
+
+ // ran out of deferred calls - old-school panic now
+ // Because it is unsafe to call arbitrary user code after freezing
+ // the world, we call preprintpanics to invoke all necessary Error
+ // and String methods to prepare the panic strings before startpanic.
+ preprintpanics(gp._panic)
+
+ fatalpanic(gp._panic) // should not return
+ *(*int)(nil) = 0 // not reached
+}
+
+// getargp returns the location where the caller
+// writes outgoing function call arguments.
+//
+//go:nosplit
+//go:noinline
+func getargp() uintptr {
+ return getcallersp() + sys.MinFrameSize
+}
+
+// The implementation of the predeclared function recover.
+// Cannot split the stack because it needs to reliably
+// find the stack segment of its caller.
+//
+// TODO(rsc): Once we commit to CopyStackAlways,
+// this doesn't need to be nosplit.
+//
+//go:nosplit
+func gorecover(argp uintptr) any {
+ // Must be in a function running as part of a deferred call during the panic.
+ // Must be called from the topmost function of the call
+ // (the function used in the defer statement).
+ // p.argp is the argument pointer of that topmost deferred function call.
+ // Compare against argp reported by caller.
+ // If they match, the caller is the one who can recover.
+ gp := getg()
+ p := gp._panic
+ if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
+ p.recovered = true
+ return p.arg
+ }
+ return nil
+}
+
+//go:linkname sync_throw sync.throw
+func sync_throw(s string) {
+ throw(s)
+}
+
+//go:linkname sync_fatal sync.fatal
+func sync_fatal(s string) {
+ fatal(s)
+}
+
+// throw triggers a fatal error that dumps a stack trace and exits.
+//
+// throw should be used for runtime-internal fatal errors where Go itself,
+// rather than user code, may be at fault for the failure.
+//
+//go:nosplit
+func throw(s string) {
+ // Everything throw does should be recursively nosplit so it
+ // can be called even when it's unsafe to grow the stack.
+ systemstack(func() {
+ print("fatal error: ", s, "\n")
+ })
+
+ fatalthrow(throwTypeRuntime)
+}
+
+// fatal triggers a fatal error that dumps a stack trace and exits.
+//
+// fatal is equivalent to throw, but is used when user code is expected to be
+// at fault for the failure, such as racing map writes.
+//
+// fatal does not include runtime frames, system goroutines, or frame metadata
+// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
+//
+//go:nosplit
+func fatal(s string) {
+ // Everything fatal does should be recursively nosplit so it
+ // can be called even when it's unsafe to grow the stack.
+ systemstack(func() {
+ print("fatal error: ", s, "\n")
+ })
+
+ fatalthrow(throwTypeUser)
+}
+
+// runningPanicDefers is non-zero while running deferred functions for panic.
+// runningPanicDefers is incremented and decremented atomically.
+// This is used to try hard to get a panic stack trace out when exiting.
+var runningPanicDefers uint32
+
+// panicking is non-zero when crashing the program for an unrecovered panic.
+// panicking is incremented and decremented atomically.
+var panicking uint32
+
+// paniclk is held while printing the panic information and stack trace,
+// so that two concurrent panics don't overlap their output.
+var paniclk mutex
+
+// Unwind the stack after a deferred function calls recover
+// after a panic. Then arrange to continue running as though
+// the caller of the deferred function returned normally.
+func recovery(gp *g) {
+ // Info about defer passed in G struct.
+ sp := gp.sigcode0
+ pc := gp.sigcode1
+
+ // d's arguments need to be in the stack.
+ if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
+ print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
+ throw("bad recovery")
+ }
+
+ // Make the deferproc for this d return again,
+ // this time returning 1. The calling function will
+ // jump to the standard return epilogue.
+ gp.sched.sp = sp
+ gp.sched.pc = pc
+ gp.sched.lr = 0
+ gp.sched.ret = 1
+ gogo(&gp.sched)
+}
+
+// fatalthrow implements an unrecoverable runtime throw. It freezes the
+// system, prints stack traces starting from its caller, and terminates the
+// process.
+//
+//go:nosplit
+func fatalthrow(t throwType) {
+ pc := getcallerpc()
+ sp := getcallersp()
+ gp := getg()
+
+ if gp.m.throwing == throwTypeNone {
+ gp.m.throwing = t
+ }
+
+ // Switch to the system stack to avoid any stack growth, which may make
+ // things worse if the runtime is in a bad state.
+ systemstack(func() {
+ startpanic_m()
+
+ if dopanic_m(gp, pc, sp) {
+ // crash uses a decent amount of nosplit stack and we're already
+ // low on stack in throw, so crash on the system stack (unlike
+ // fatalpanic).
+ crash()
+ }
+
+ exit(2)
+ })
+
+ *(*int)(nil) = 0 // not reached
+}
+
+// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
+// that if msgs != nil, fatalpanic also prints panic messages and decrements
+// runningPanicDefers once main is blocked from exiting.
+//
+//go:nosplit
+func fatalpanic(msgs *_panic) {
+ pc := getcallerpc()
+ sp := getcallersp()
+ gp := getg()
+ var docrash bool
+ // Switch to the system stack to avoid any stack growth, which
+ // may make things worse if the runtime is in a bad state.
+ systemstack(func() {
+ if startpanic_m() && msgs != nil {
+ // There were panic messages and startpanic_m
+ // says it's okay to try to print them.
+
+ // startpanic_m set panicking, which will
+ // block main from exiting, so now OK to
+ // decrement runningPanicDefers.
+ atomic.Xadd(&runningPanicDefers, -1)
+
+ printpanics(msgs)
+ }
+
+ docrash = dopanic_m(gp, pc, sp)
+ })
+
+ if docrash {
+ // By crashing outside the above systemstack call, debuggers
+ // will not be confused when generating a backtrace.
+ // Function crash is marked nosplit to avoid stack growth.
+ crash()
+ }
+
+ systemstack(func() {
+ exit(2)
+ })
+
+ *(*int)(nil) = 0 // not reached
+}
+
+// startpanic_m prepares for an unrecoverable panic.
+//
+// It returns true if panic messages should be printed, or false if
+// the runtime is in bad shape and should just print stacks.
+//
+// It must not have write barriers even though the write barrier
+// explicitly ignores writes once dying > 0. Write barriers still
+// assume that g.m.p != nil, and this function may not have P
+// in some contexts (e.g. a panic in a signal handler for a signal
+// sent to an M with no P).
+//
+//go:nowritebarrierrec
+func startpanic_m() bool {
+ _g_ := getg()
+ if mheap_.cachealloc.size == 0 { // very early
+ print("runtime: panic before malloc heap initialized\n")
+ }
+ // Disallow malloc during an unrecoverable panic. A panic
+ // could happen in a signal handler, or in a throw, or inside
+ // malloc itself. We want to catch if an allocation ever does
+ // happen (even if we're not in one of these situations).
+ _g_.m.mallocing++
+
+ // If we're dying because of a bad lock count, set it to a
+ // good lock count so we don't recursively panic below.
+ if _g_.m.locks < 0 {
+ _g_.m.locks = 1
+ }
+
+ switch _g_.m.dying {
+ case 0:
+ // Setting dying >0 has the side-effect of disabling this G's writebuf.
+ _g_.m.dying = 1
+ atomic.Xadd(&panicking, 1)
+ lock(&paniclk)
+ if debug.schedtrace > 0 || debug.scheddetail > 0 {
+ schedtrace(true)
+ }
+ freezetheworld()
+ return true
+ case 1:
+ // Something failed while panicking.
+ // Just print a stack trace and exit.
+ _g_.m.dying = 2
+ print("panic during panic\n")
+ return false
+ case 2:
+ // This is a genuine bug in the runtime, we couldn't even
+ // print the stack trace successfully.
+ _g_.m.dying = 3
+ print("stack trace unavailable\n")
+ exit(4)
+ fallthrough
+ default:
+ // Can't even print! Just exit.
+ exit(5)
+ return false // Need to return something.
+ }
+}
+
+var didothers bool
+var deadlock mutex
+
+func dopanic_m(gp *g, pc, sp uintptr) bool {
+ if gp.sig != 0 {
+ signame := signame(gp.sig)
+ if signame != "" {
+ print("[signal ", signame)
+ } else {
+ print("[signal ", hex(gp.sig))
+ }
+ print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
+ }
+
+ level, all, docrash := gotraceback()
+ _g_ := getg()
+ if level > 0 {
+ if gp != gp.m.curg {
+ all = true
+ }
+ if gp != gp.m.g0 {
+ print("\n")
+ goroutineheader(gp)
+ traceback(pc, sp, 0, gp)
+ } else if level >= 2 || _g_.m.throwing >= throwTypeRuntime {
+ print("\nruntime stack:\n")
+ traceback(pc, sp, 0, gp)
+ }
+ if !didothers && all {
+ didothers = true
+ tracebackothers(gp)
+ }
+ }
+ unlock(&paniclk)
+
+ if atomic.Xadd(&panicking, -1) != 0 {
+ // Some other m is panicking too.
+ // Let it print what it needs to print.
+ // Wait forever without chewing up cpu.
+ // It will exit when it's done.
+ lock(&deadlock)
+ lock(&deadlock)
+ }
+
+ printDebugLog()
+
+ return docrash
+}
+
+// canpanic returns false if a signal should throw instead of
+// panicking.
+//
+//go:nosplit
+func canpanic(gp *g) bool {
+ // Note that g is m->gsignal, different from gp.
+ // Note also that g->m can change at preemption, so m can go stale
+ // if this function ever makes a function call.
+ _g_ := getg()
+ mp := _g_.m
+
+ // Is it okay for gp to panic instead of crashing the program?
+ // Yes, as long as it is running Go code, not runtime code,
+ // and not stuck in a system call.
+ if gp == nil || gp != mp.curg {
+ return false
+ }
+ if mp.locks != 0 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
+ return false
+ }
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
+ return false
+ }
+ if GOOS == "windows" && mp.libcallsp != 0 {
+ return false
+ }
+ return true
+}
+
+// shouldPushSigpanic reports whether pc should be used as sigpanic's
+// return PC (pushing a frame for the call). Otherwise, it should be
+// left alone so that LR is used as sigpanic's return PC, effectively
+// replacing the top-most frame with sigpanic. This is used by
+// preparePanic.
+func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
+ if pc == 0 {
+ // Probably a call to a nil func. The old LR is more
+ // useful in the stack trace. Not pushing the frame
+ // will make the trace look like a call to sigpanic
+ // instead. (Otherwise the trace will end at sigpanic
+ // and we won't get to see who faulted.)
+ return false
+ }
+ // If we don't recognize the PC as code, but we do recognize
+ // the link register as code, then this assumes the panic was
+ // caused by a call to non-code. In this case, we want to
+ // ignore this call to make unwinding show the context.
+ //
+ // If we running C code, we're not going to recognize pc as a
+ // Go function, so just assume it's good. Otherwise, traceback
+ // may try to read a stale LR that looks like a Go code
+ // pointer and wander into the woods.
+ if gp.m.incgo || findfunc(pc).valid() {
+ // This wasn't a bad call, so use PC as sigpanic's
+ // return PC.
+ return true
+ }
+ if findfunc(lr).valid() {
+ // This was a bad call, but the LR is good, so use the
+ // LR as sigpanic's return PC.
+ return false
+ }
+ // Neither the PC or LR is good. Hopefully pushing a frame
+ // will work.
+ return true
+}
+
+// isAbortPC reports whether pc is the program counter at which
+// runtime.abort raises a signal.
+//
+// It is nosplit because it's part of the isgoexception
+// implementation.
+//
+//go:nosplit
+func isAbortPC(pc uintptr) bool {
+ f := findfunc(pc)
+ if !f.valid() {
+ return false
+ }
+ return f.funcID == funcID_abort
+}
diff --git a/contrib/go/_std_1.18/src/runtime/plugin.go b/contrib/go/_std_1.19/src/runtime/plugin.go
index a61dcc3b5d..a61dcc3b5d 100644
--- a/contrib/go/_std_1.18/src/runtime/plugin.go
+++ b/contrib/go/_std_1.19/src/runtime/plugin.go
diff --git a/contrib/go/_std_1.18/src/runtime/preempt.go b/contrib/go/_std_1.19/src/runtime/preempt.go
index da24f5042c..da24f5042c 100644
--- a/contrib/go/_std_1.18/src/runtime/preempt.go
+++ b/contrib/go/_std_1.19/src/runtime/preempt.go
diff --git a/contrib/go/_std_1.18/src/runtime/preempt_amd64.s b/contrib/go/_std_1.19/src/runtime/preempt_amd64.s
index 31f7c8b66f..31f7c8b66f 100644
--- a/contrib/go/_std_1.18/src/runtime/preempt_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/preempt_amd64.s
diff --git a/contrib/go/_std_1.18/src/runtime/preempt_nonwindows.go b/contrib/go/_std_1.19/src/runtime/preempt_nonwindows.go
index d6a2408cb7..d6a2408cb7 100644
--- a/contrib/go/_std_1.18/src/runtime/preempt_nonwindows.go
+++ b/contrib/go/_std_1.19/src/runtime/preempt_nonwindows.go
diff --git a/contrib/go/_std_1.18/src/runtime/print.go b/contrib/go/_std_1.19/src/runtime/print.go
index b2a642bb86..b2a642bb86 100644
--- a/contrib/go/_std_1.18/src/runtime/print.go
+++ b/contrib/go/_std_1.19/src/runtime/print.go
diff --git a/contrib/go/_std_1.19/src/runtime/proc.go b/contrib/go/_std_1.19/src/runtime/proc.go
new file mode 100644
index 0000000000..3991a48b10
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/proc.go
@@ -0,0 +1,6343 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/cpu"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// set using cmd/go/internal/modload.ModInfoProg
+var modinfo string
+
+// Goroutine scheduler
+// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
+//
+// The main concepts are:
+// G - goroutine.
+// M - worker thread, or machine.
+// P - processor, a resource that is required to execute Go code.
+// M must have an associated P to execute Go code, however it can be
+// blocked or in a syscall w/o an associated P.
+//
+// Design doc at https://golang.org/s/go11sched.
+
+// Worker thread parking/unparking.
+// We need to balance between keeping enough running worker threads to utilize
+// available hardware parallelism and parking excessive running worker threads
+// to conserve CPU resources and power. This is not simple for two reasons:
+// (1) scheduler state is intentionally distributed (in particular, per-P work
+// queues), so it is not possible to compute global predicates on fast paths;
+// (2) for optimal thread management we would need to know the future (don't park
+// a worker thread when a new goroutine will be readied in near future).
+//
+// Three rejected approaches that would work badly:
+// 1. Centralize all scheduler state (would inhibit scalability).
+// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
+// is a spare P, unpark a thread and handoff it the thread and the goroutine.
+// This would lead to thread state thrashing, as the thread that readied the
+// goroutine can be out of work the very next moment, we will need to park it.
+// Also, it would destroy locality of computation as we want to preserve
+// dependent goroutines on the same thread; and introduce additional latency.
+// 3. Unpark an additional thread whenever we ready a goroutine and there is an
+// idle P, but don't do handoff. This would lead to excessive thread parking/
+// unparking as the additional threads will instantly park without discovering
+// any work to do.
+//
+// The current approach:
+//
+// This approach applies to three primary sources of potential work: readying a
+// goroutine, new/modified-earlier timers, and idle-priority GC. See below for
+// additional details.
+//
+// We unpark an additional thread when we submit work if (this is wakep()):
+// 1. There is an idle P, and
+// 2. There are no "spinning" worker threads.
+//
+// A worker thread is considered spinning if it is out of local work and did
+// not find work in the global run queue or netpoller; the spinning state is
+// denoted in m.spinning and in sched.nmspinning. Threads unparked this way are
+// also considered spinning; we don't do goroutine handoff so such threads are
+// out of work initially. Spinning threads spin on looking for work in per-P
+// run queues and timer heaps or from the GC before parking. If a spinning
+// thread finds work it takes itself out of the spinning state and proceeds to
+// execution. If it does not find work it takes itself out of the spinning
+// state and then parks.
+//
+// If there is at least one spinning thread (sched.nmspinning>1), we don't
+// unpark new threads when submitting work. To compensate for that, if the last
+// spinning thread finds work and stops spinning, it must unpark a new spinning
+// thread. This approach smooths out unjustified spikes of thread unparking,
+// but at the same time guarantees eventual maximal CPU parallelism
+// utilization.
+//
+// The main implementation complication is that we need to be very careful
+// during spinning->non-spinning thread transition. This transition can race
+// with submission of new work, and either one part or another needs to unpark
+// another worker thread. If they both fail to do that, we can end up with
+// semi-persistent CPU underutilization.
+//
+// The general pattern for submission is:
+// 1. Submit work to the local run queue, timer heap, or GC state.
+// 2. #StoreLoad-style memory barrier.
+// 3. Check sched.nmspinning.
+//
+// The general pattern for spinning->non-spinning transition is:
+// 1. Decrement nmspinning.
+// 2. #StoreLoad-style memory barrier.
+// 3. Check all per-P work queues and GC for new work.
+//
+// Note that all this complexity does not apply to global run queue as we are
+// not sloppy about thread unparking when submitting to global queue. Also see
+// comments for nmspinning manipulation.
+//
+// How these different sources of work behave varies, though it doesn't affect
+// the synchronization approach:
+// * Ready goroutine: this is an obvious source of work; the goroutine is
+// immediately ready and must run on some thread eventually.
+// * New/modified-earlier timer: The current timer implementation (see time.go)
+// uses netpoll in a thread with no work available to wait for the soonest
+// timer. If there is no thread waiting, we want a new spinning thread to go
+// wait.
+// * Idle-priority GC: The GC wakes a stopped idle thread to contribute to
+// background GC work (note: currently disabled per golang.org/issue/19112).
+// Also see golang.org/issue/44313, as this should be extended to all GC
+// workers.
+
+var (
+ m0 m
+ g0 g
+ mcache0 *mcache
+ raceprocctx0 uintptr
+)
+
+//go:linkname runtime_inittask runtime..inittask
+var runtime_inittask initTask
+
+//go:linkname main_inittask main..inittask
+var main_inittask initTask
+
+// main_init_done is a signal used by cgocallbackg that initialization
+// has been completed. It is made before _cgo_notify_runtime_init_done,
+// so all cgo calls can rely on it existing. When main_init is complete,
+// it is closed, meaning cgocallbackg can reliably receive from it.
+var main_init_done chan bool
+
+//go:linkname main_main main.main
+func main_main()
+
+// mainStarted indicates that the main M has started.
+var mainStarted bool
+
+// runtimeInitTime is the nanotime() at which the runtime started.
+var runtimeInitTime int64
+
+// Value to use for signal mask for newly created M's.
+var initSigmask sigset
+
+// The main goroutine.
+func main() {
+ g := getg()
+
+ // Racectx of m0->g0 is used only as the parent of the main goroutine.
+ // It must not be used for anything else.
+ g.m.g0.racectx = 0
+
+ // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
+ // Using decimal instead of binary GB and MB because
+ // they look nicer in the stack overflow failure message.
+ if goarch.PtrSize == 8 {
+ maxstacksize = 1000000000
+ } else {
+ maxstacksize = 250000000
+ }
+
+ // An upper limit for max stack size. Used to avoid random crashes
+ // after calling SetMaxStack and trying to allocate a stack that is too big,
+ // since stackalloc works with 32-bit sizes.
+ maxstackceiling = 2 * maxstacksize
+
+ // Allow newproc to start new Ms.
+ mainStarted = true
+
+ if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
+ systemstack(func() {
+ newm(sysmon, nil, -1)
+ })
+ }
+
+ // Lock the main goroutine onto this, the main OS thread,
+ // during initialization. Most programs won't care, but a few
+ // do require certain calls to be made by the main thread.
+ // Those can arrange for main.main to run in the main thread
+ // by calling runtime.LockOSThread during initialization
+ // to preserve the lock.
+ lockOSThread()
+
+ if g.m != &m0 {
+ throw("runtime.main not on m0")
+ }
+
+ // Record when the world started.
+ // Must be before doInit for tracing init.
+ runtimeInitTime = nanotime()
+ if runtimeInitTime == 0 {
+ throw("nanotime returning zero")
+ }
+
+ if debug.inittrace != 0 {
+ inittrace.id = getg().goid
+ inittrace.active = true
+ }
+
+ doInit(&runtime_inittask) // Must be before defer.
+
+ // Defer unlock so that runtime.Goexit during init does the unlock too.
+ needUnlock := true
+ defer func() {
+ if needUnlock {
+ unlockOSThread()
+ }
+ }()
+
+ gcenable()
+
+ main_init_done = make(chan bool)
+ if iscgo {
+ if _cgo_thread_start == nil {
+ throw("_cgo_thread_start missing")
+ }
+ if GOOS != "windows" {
+ if _cgo_setenv == nil {
+ throw("_cgo_setenv missing")
+ }
+ if _cgo_unsetenv == nil {
+ throw("_cgo_unsetenv missing")
+ }
+ }
+ if _cgo_notify_runtime_init_done == nil {
+ throw("_cgo_notify_runtime_init_done missing")
+ }
+ // Start the template thread in case we enter Go from
+ // a C-created thread and need to create a new thread.
+ startTemplateThread()
+ cgocall(_cgo_notify_runtime_init_done, nil)
+ }
+
+ doInit(&main_inittask)
+
+ // Disable init tracing after main init done to avoid overhead
+ // of collecting statistics in malloc and newproc
+ inittrace.active = false
+
+ close(main_init_done)
+
+ needUnlock = false
+ unlockOSThread()
+
+ if isarchive || islibrary {
+ // A program compiled with -buildmode=c-archive or c-shared
+ // has a main, but it is not executed.
+ return
+ }
+ fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
+ fn()
+ if raceenabled {
+ racefini()
+ }
+
+ // Make racy client program work: if panicking on
+ // another goroutine at the same time as main returns,
+ // let the other goroutine finish printing the panic trace.
+ // Once it does, it will exit. See issues 3934 and 20018.
+ if atomic.Load(&runningPanicDefers) != 0 {
+ // Running deferred functions should not take long.
+ for c := 0; c < 1000; c++ {
+ if atomic.Load(&runningPanicDefers) == 0 {
+ break
+ }
+ Gosched()
+ }
+ }
+ if atomic.Load(&panicking) != 0 {
+ gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
+ }
+
+ exit(0)
+ for {
+ var x *int32
+ *x = 0
+ }
+}
+
+// os_beforeExit is called from os.Exit(0).
+//
+//go:linkname os_beforeExit os.runtime_beforeExit
+func os_beforeExit() {
+ if raceenabled {
+ racefini()
+ }
+}
+
+// start forcegc helper goroutine
+func init() {
+ go forcegchelper()
+}
+
+func forcegchelper() {
+ forcegc.g = getg()
+ lockInit(&forcegc.lock, lockRankForcegc)
+ for {
+ lock(&forcegc.lock)
+ if forcegc.idle != 0 {
+ throw("forcegc: phase error")
+ }
+ atomic.Store(&forcegc.idle, 1)
+ goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
+ // this goroutine is explicitly resumed by sysmon
+ if debug.gctrace > 0 {
+ println("GC forced")
+ }
+ // Time-triggered, fully concurrent.
+ gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
+ }
+}
+
+//go:nosplit
+
+// Gosched yields the processor, allowing other goroutines to run. It does not
+// suspend the current goroutine, so execution resumes automatically.
+func Gosched() {
+ checkTimeouts()
+ mcall(gosched_m)
+}
+
+// goschedguarded yields the processor like gosched, but also checks
+// for forbidden states and opts out of the yield in those cases.
+//
+//go:nosplit
+func goschedguarded() {
+ mcall(goschedguarded_m)
+}
+
+// Puts the current goroutine into a waiting state and calls unlockf on the
+// system stack.
+//
+// If unlockf returns false, the goroutine is resumed.
+//
+// unlockf must not access this G's stack, as it may be moved between
+// the call to gopark and the call to unlockf.
+//
+// Note that because unlockf is called after putting the G into a waiting
+// state, the G may have already been readied by the time unlockf is called
+// unless there is external synchronization preventing the G from being
+// readied. If unlockf returns false, it must guarantee that the G cannot be
+// externally readied.
+//
+// Reason explains why the goroutine has been parked. It is displayed in stack
+// traces and heap dumps. Reasons should be unique and descriptive. Do not
+// re-use reasons, add new ones.
+func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
+ if reason != waitReasonSleep {
+ checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
+ }
+ mp := acquirem()
+ gp := mp.curg
+ status := readgstatus(gp)
+ if status != _Grunning && status != _Gscanrunning {
+ throw("gopark: bad g status")
+ }
+ mp.waitlock = lock
+ mp.waitunlockf = unlockf
+ gp.waitreason = reason
+ mp.waittraceev = traceEv
+ mp.waittraceskip = traceskip
+ releasem(mp)
+ // can't do anything that might move the G between Ms here.
+ mcall(park_m)
+}
+
+// Puts the current goroutine into a waiting state and unlocks the lock.
+// The goroutine can be made runnable again by calling goready(gp).
+func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
+ gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
+}
+
+func goready(gp *g, traceskip int) {
+ systemstack(func() {
+ ready(gp, traceskip, true)
+ })
+}
+
+//go:nosplit
+func acquireSudog() *sudog {
+ // Delicate dance: the semaphore implementation calls
+ // acquireSudog, acquireSudog calls new(sudog),
+ // new calls malloc, malloc can call the garbage collector,
+ // and the garbage collector calls the semaphore implementation
+ // in stopTheWorld.
+ // Break the cycle by doing acquirem/releasem around new(sudog).
+ // The acquirem/releasem increments m.locks during new(sudog),
+ // which keeps the garbage collector from being invoked.
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.sudogcache) == 0 {
+ lock(&sched.sudoglock)
+ // First, try to grab a batch from central cache.
+ for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
+ s := sched.sudogcache
+ sched.sudogcache = s.next
+ s.next = nil
+ pp.sudogcache = append(pp.sudogcache, s)
+ }
+ unlock(&sched.sudoglock)
+ // If the central cache is empty, allocate a new one.
+ if len(pp.sudogcache) == 0 {
+ pp.sudogcache = append(pp.sudogcache, new(sudog))
+ }
+ }
+ n := len(pp.sudogcache)
+ s := pp.sudogcache[n-1]
+ pp.sudogcache[n-1] = nil
+ pp.sudogcache = pp.sudogcache[:n-1]
+ if s.elem != nil {
+ throw("acquireSudog: found s.elem != nil in cache")
+ }
+ releasem(mp)
+ return s
+}
+
+//go:nosplit
+func releaseSudog(s *sudog) {
+ if s.elem != nil {
+ throw("runtime: sudog with non-nil elem")
+ }
+ if s.isSelect {
+ throw("runtime: sudog with non-false isSelect")
+ }
+ if s.next != nil {
+ throw("runtime: sudog with non-nil next")
+ }
+ if s.prev != nil {
+ throw("runtime: sudog with non-nil prev")
+ }
+ if s.waitlink != nil {
+ throw("runtime: sudog with non-nil waitlink")
+ }
+ if s.c != nil {
+ throw("runtime: sudog with non-nil c")
+ }
+ gp := getg()
+ if gp.param != nil {
+ throw("runtime: releaseSudog with non-nil gp.param")
+ }
+ mp := acquirem() // avoid rescheduling to another P
+ pp := mp.p.ptr()
+ if len(pp.sudogcache) == cap(pp.sudogcache) {
+ // Transfer half of local cache to the central cache.
+ var first, last *sudog
+ for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
+ n := len(pp.sudogcache)
+ p := pp.sudogcache[n-1]
+ pp.sudogcache[n-1] = nil
+ pp.sudogcache = pp.sudogcache[:n-1]
+ if first == nil {
+ first = p
+ } else {
+ last.next = p
+ }
+ last = p
+ }
+ lock(&sched.sudoglock)
+ last.next = sched.sudogcache
+ sched.sudogcache = first
+ unlock(&sched.sudoglock)
+ }
+ pp.sudogcache = append(pp.sudogcache, s)
+ releasem(mp)
+}
+
+// called from assembly
+func badmcall(fn func(*g)) {
+ throw("runtime: mcall called on m->g0 stack")
+}
+
+func badmcall2(fn func(*g)) {
+ throw("runtime: mcall function returned")
+}
+
+func badreflectcall() {
+ panic(plainError("arg size to reflect.call more than 1GB"))
+}
+
+var badmorestackg0Msg = "fatal: morestack on g0\n"
+
+//go:nosplit
+//go:nowritebarrierrec
+func badmorestackg0() {
+ sp := stringStructOf(&badmorestackg0Msg)
+ write(2, sp.str, int32(sp.len))
+}
+
+var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
+
+//go:nosplit
+//go:nowritebarrierrec
+func badmorestackgsignal() {
+ sp := stringStructOf(&badmorestackgsignalMsg)
+ write(2, sp.str, int32(sp.len))
+}
+
+//go:nosplit
+func badctxt() {
+ throw("ctxt != 0")
+}
+
+func lockedOSThread() bool {
+ gp := getg()
+ return gp.lockedm != 0 && gp.m.lockedg != 0
+}
+
+var (
+ // allgs contains all Gs ever created (including dead Gs), and thus
+ // never shrinks.
+ //
+ // Access via the slice is protected by allglock or stop-the-world.
+ // Readers that cannot take the lock may (carefully!) use the atomic
+ // variables below.
+ allglock mutex
+ allgs []*g
+
+ // allglen and allgptr are atomic variables that contain len(allgs) and
+ // &allgs[0] respectively. Proper ordering depends on totally-ordered
+ // loads and stores. Writes are protected by allglock.
+ //
+ // allgptr is updated before allglen. Readers should read allglen
+ // before allgptr to ensure that allglen is always <= len(allgptr). New
+ // Gs appended during the race can be missed. For a consistent view of
+ // all Gs, allglock must be held.
+ //
+ // allgptr copies should always be stored as a concrete type or
+ // unsafe.Pointer, not uintptr, to ensure that GC can still reach it
+ // even if it points to a stale array.
+ allglen uintptr
+ allgptr **g
+)
+
+func allgadd(gp *g) {
+ if readgstatus(gp) == _Gidle {
+ throw("allgadd: bad status Gidle")
+ }
+
+ lock(&allglock)
+ allgs = append(allgs, gp)
+ if &allgs[0] != allgptr {
+ atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
+ }
+ atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
+ unlock(&allglock)
+}
+
+// allGsSnapshot returns a snapshot of the slice of all Gs.
+//
+// The world must be stopped or allglock must be held.
+func allGsSnapshot() []*g {
+ assertWorldStoppedOrLockHeld(&allglock)
+
+ // Because the world is stopped or allglock is held, allgadd
+ // cannot happen concurrently with this. allgs grows
+ // monotonically and existing entries never change, so we can
+ // simply return a copy of the slice header. For added safety,
+ // we trim everything past len because that can still change.
+ return allgs[:len(allgs):len(allgs)]
+}
+
+// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
+func atomicAllG() (**g, uintptr) {
+ length := atomic.Loaduintptr(&allglen)
+ ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
+ return ptr, length
+}
+
+// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
+func atomicAllGIndex(ptr **g, i uintptr) *g {
+ return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
+}
+
+// forEachG calls fn on every G from allgs.
+//
+// forEachG takes a lock to exclude concurrent addition of new Gs.
+func forEachG(fn func(gp *g)) {
+ lock(&allglock)
+ for _, gp := range allgs {
+ fn(gp)
+ }
+ unlock(&allglock)
+}
+
+// forEachGRace calls fn on every G from allgs.
+//
+// forEachGRace avoids locking, but does not exclude addition of new Gs during
+// execution, which may be missed.
+func forEachGRace(fn func(gp *g)) {
+ ptr, length := atomicAllG()
+ for i := uintptr(0); i < length; i++ {
+ gp := atomicAllGIndex(ptr, i)
+ fn(gp)
+ }
+ return
+}
+
+const (
+ // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
+ // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
+ _GoidCacheBatch = 16
+)
+
+// cpuinit extracts the environment variable GODEBUG from the environment on
+// Unix-like operating systems and calls internal/cpu.Initialize.
+func cpuinit() {
+ const prefix = "GODEBUG="
+ var env string
+
+ switch GOOS {
+ case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
+ cpu.DebugOptions = true
+
+ // Similar to goenv_unix but extracts the environment value for
+ // GODEBUG directly.
+ // TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
+ n := int32(0)
+ for argv_index(argv, argc+1+n) != nil {
+ n++
+ }
+
+ for i := int32(0); i < n; i++ {
+ p := argv_index(argv, argc+1+i)
+ s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
+
+ if hasPrefix(s, prefix) {
+ env = gostring(p)[len(prefix):]
+ break
+ }
+ }
+ }
+
+ cpu.Initialize(env)
+
+ // Support cpu feature variables are used in code generated by the compiler
+ // to guard execution of instructions that can not be assumed to be always supported.
+ switch GOARCH {
+ case "386", "amd64":
+ x86HasPOPCNT = cpu.X86.HasPOPCNT
+ x86HasSSE41 = cpu.X86.HasSSE41
+ x86HasFMA = cpu.X86.HasFMA
+
+ case "arm":
+ armHasVFPv4 = cpu.ARM.HasVFPv4
+
+ case "arm64":
+ arm64HasATOMICS = cpu.ARM64.HasATOMICS
+ }
+}
+
+// The bootstrap sequence is:
+//
+// call osinit
+// call schedinit
+// make & queue new G
+// call runtime·mstart
+//
+// The new G calls runtime·main.
+func schedinit() {
+ lockInit(&sched.lock, lockRankSched)
+ lockInit(&sched.sysmonlock, lockRankSysmon)
+ lockInit(&sched.deferlock, lockRankDefer)
+ lockInit(&sched.sudoglock, lockRankSudog)
+ lockInit(&deadlock, lockRankDeadlock)
+ lockInit(&paniclk, lockRankPanic)
+ lockInit(&allglock, lockRankAllg)
+ lockInit(&allpLock, lockRankAllp)
+ lockInit(&reflectOffs.lock, lockRankReflectOffs)
+ lockInit(&finlock, lockRankFin)
+ lockInit(&trace.bufLock, lockRankTraceBuf)
+ lockInit(&trace.stringsLock, lockRankTraceStrings)
+ lockInit(&trace.lock, lockRankTrace)
+ lockInit(&cpuprof.lock, lockRankCpuprof)
+ lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
+ // Enforce that this lock is always a leaf lock.
+ // All of this lock's critical sections should be
+ // extremely short.
+ lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
+
+ // raceinit must be the first call to race detector.
+ // In particular, it must be done before mallocinit below calls racemapshadow.
+ _g_ := getg()
+ if raceenabled {
+ _g_.racectx, raceprocctx0 = raceinit()
+ }
+
+ sched.maxmcount = 10000
+
+ // The world starts stopped.
+ worldStopped()
+
+ moduledataverify()
+ stackinit()
+ mallocinit()
+ cpuinit() // must run before alginit
+ alginit() // maps, hash, fastrand must not be used before this call
+ fastrandinit() // must run before mcommoninit
+ mcommoninit(_g_.m, -1)
+ modulesinit() // provides activeModules
+ typelinksinit() // uses maps, activeModules
+ itabsinit() // uses activeModules
+ stkobjinit() // must run before GC starts
+
+ sigsave(&_g_.m.sigmask)
+ initSigmask = _g_.m.sigmask
+
+ if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
+ println(offset)
+ throw("sched.timeToRun not aligned to 8 bytes")
+ }
+
+ goargs()
+ goenvs()
+ parsedebugvars()
+ gcinit()
+
+ lock(&sched.lock)
+ sched.lastpoll = uint64(nanotime())
+ procs := ncpu
+ if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
+ procs = n
+ }
+ if procresize(procs) != nil {
+ throw("unknown runnable goroutine during bootstrap")
+ }
+ unlock(&sched.lock)
+
+ // World is effectively started now, as P's can run.
+ worldStarted()
+
+ // For cgocheck > 1, we turn on the write barrier at all times
+ // and check all pointer writes. We can't do this until after
+ // procresize because the write barrier needs a P.
+ if debug.cgocheck > 1 {
+ writeBarrier.cgo = true
+ writeBarrier.enabled = true
+ for _, p := range allp {
+ p.wbBuf.reset()
+ }
+ }
+
+ if buildVersion == "" {
+ // Condition should never trigger. This code just serves
+ // to ensure runtime·buildVersion is kept in the resulting binary.
+ buildVersion = "unknown"
+ }
+ if len(modinfo) == 1 {
+ // Condition should never trigger. This code just serves
+ // to ensure runtime·modinfo is kept in the resulting binary.
+ modinfo = ""
+ }
+}
+
+func dumpgstatus(gp *g) {
+ _g_ := getg()
+ print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
+}
+
+// sched.lock must be held.
+func checkmcount() {
+ assertLockHeld(&sched.lock)
+
+ if mcount() > sched.maxmcount {
+ print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
+ throw("thread exhaustion")
+ }
+}
+
+// mReserveID returns the next ID to use for a new m. This new m is immediately
+// considered 'running' by checkdead.
+//
+// sched.lock must be held.
+func mReserveID() int64 {
+ assertLockHeld(&sched.lock)
+
+ if sched.mnext+1 < sched.mnext {
+ throw("runtime: thread ID overflow")
+ }
+ id := sched.mnext
+ sched.mnext++
+ checkmcount()
+ return id
+}
+
+// Pre-allocated ID may be passed as 'id', or omitted by passing -1.
+func mcommoninit(mp *m, id int64) {
+ _g_ := getg()
+
+ // g0 stack won't make sense for user (and is not necessary unwindable).
+ if _g_ != _g_.m.g0 {
+ callers(1, mp.createstack[:])
+ }
+
+ lock(&sched.lock)
+
+ if id >= 0 {
+ mp.id = id
+ } else {
+ mp.id = mReserveID()
+ }
+
+ lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
+ hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
+ if lo|hi == 0 {
+ hi = 1
+ }
+ // Same behavior as for 1.17.
+ // TODO: Simplify ths.
+ if goarch.BigEndian {
+ mp.fastrand = uint64(lo)<<32 | uint64(hi)
+ } else {
+ mp.fastrand = uint64(hi)<<32 | uint64(lo)
+ }
+
+ mpreinit(mp)
+ if mp.gsignal != nil {
+ mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
+ }
+
+ // Add to allm so garbage collector doesn't free g->m
+ // when it is just in a register or thread-local storage.
+ mp.alllink = allm
+
+ // NumCgoCall() iterates over allm w/o schedlock,
+ // so we need to publish it safely.
+ atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
+ unlock(&sched.lock)
+
+ // Allocate memory to hold a cgo traceback if the cgo call crashes.
+ if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
+ mp.cgoCallers = new(cgoCallers)
+ }
+}
+
+var fastrandseed uintptr
+
+func fastrandinit() {
+ s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
+ getRandomData(s)
+}
+
+// Mark gp ready to run.
+func ready(gp *g, traceskip int, next bool) {
+ if trace.enabled {
+ traceGoUnpark(gp, traceskip)
+ }
+
+ status := readgstatus(gp)
+
+ // Mark runnable.
+ _g_ := getg()
+ mp := acquirem() // disable preemption because it can be holding p in a local var
+ if status&^_Gscan != _Gwaiting {
+ dumpgstatus(gp)
+ throw("bad g->status in ready")
+ }
+
+ // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ runqput(_g_.m.p.ptr(), gp, next)
+ wakep()
+ releasem(mp)
+}
+
+// freezeStopWait is a large value that freezetheworld sets
+// sched.stopwait to in order to request that all Gs permanently stop.
+const freezeStopWait = 0x7fffffff
+
+// freezing is set to non-zero if the runtime is trying to freeze the
+// world.
+var freezing uint32
+
+// Similar to stopTheWorld but best-effort and can be called several times.
+// There is no reverse operation, used during crashing.
+// This function must not lock any mutexes.
+func freezetheworld() {
+ atomic.Store(&freezing, 1)
+ // stopwait and preemption requests can be lost
+ // due to races with concurrently executing threads,
+ // so try several times
+ for i := 0; i < 5; i++ {
+ // this should tell the scheduler to not start any new goroutines
+ sched.stopwait = freezeStopWait
+ atomic.Store(&sched.gcwaiting, 1)
+ // this should stop running goroutines
+ if !preemptall() {
+ break // no running goroutines
+ }
+ usleep(1000)
+ }
+ // to be sure
+ usleep(1000)
+ preemptall()
+ usleep(1000)
+}
+
+// All reads and writes of g's status go through readgstatus, casgstatus
+// castogscanstatus, casfrom_Gscanstatus.
+//
+//go:nosplit
+func readgstatus(gp *g) uint32 {
+ return atomic.Load(&gp.atomicstatus)
+}
+
+// The Gscanstatuses are acting like locks and this releases them.
+// If it proves to be a performance hit we should be able to make these
+// simple atomic stores but for now we are going to throw if
+// we see an inconsistent state.
+func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
+ success := false
+
+ // Check that transition is valid.
+ switch oldval {
+ default:
+ print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
+ dumpgstatus(gp)
+ throw("casfrom_Gscanstatus:top gp->status is not in scan state")
+ case _Gscanrunnable,
+ _Gscanwaiting,
+ _Gscanrunning,
+ _Gscansyscall,
+ _Gscanpreempted:
+ if newval == oldval&^_Gscan {
+ success = atomic.Cas(&gp.atomicstatus, oldval, newval)
+ }
+ }
+ if !success {
+ print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
+ dumpgstatus(gp)
+ throw("casfrom_Gscanstatus: gp->status is not in scan state")
+ }
+ releaseLockRank(lockRankGscan)
+}
+
+// This will return false if the gp is not in the expected status and the cas fails.
+// This acts like a lock acquire while the casfromgstatus acts like a lock release.
+func castogscanstatus(gp *g, oldval, newval uint32) bool {
+ switch oldval {
+ case _Grunnable,
+ _Grunning,
+ _Gwaiting,
+ _Gsyscall:
+ if newval == oldval|_Gscan {
+ r := atomic.Cas(&gp.atomicstatus, oldval, newval)
+ if r {
+ acquireLockRank(lockRankGscan)
+ }
+ return r
+
+ }
+ }
+ print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
+ throw("castogscanstatus")
+ panic("not reached")
+}
+
+// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
+// and casfrom_Gscanstatus instead.
+// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
+// put it in the Gscan state is finished.
+//
+//go:nosplit
+func casgstatus(gp *g, oldval, newval uint32) {
+ if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
+ systemstack(func() {
+ print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
+ throw("casgstatus: bad incoming values")
+ })
+ }
+
+ acquireLockRank(lockRankGscan)
+ releaseLockRank(lockRankGscan)
+
+ // See https://golang.org/cl/21503 for justification of the yield delay.
+ const yieldDelay = 5 * 1000
+ var nextYield int64
+
+ // loop if gp->atomicstatus is in a scan state giving
+ // GC time to finish and change the state to oldval.
+ for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
+ if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
+ throw("casgstatus: waiting for Gwaiting but is Grunnable")
+ }
+ if i == 0 {
+ nextYield = nanotime() + yieldDelay
+ }
+ if nanotime() < nextYield {
+ for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
+ procyield(1)
+ }
+ } else {
+ osyield()
+ nextYield = nanotime() + yieldDelay/2
+ }
+ }
+
+ // Handle tracking for scheduling latencies.
+ if oldval == _Grunning {
+ // Track every 8th time a goroutine transitions out of running.
+ if gp.trackingSeq%gTrackingPeriod == 0 {
+ gp.tracking = true
+ }
+ gp.trackingSeq++
+ }
+ if gp.tracking {
+ if oldval == _Grunnable {
+ // We transitioned out of runnable, so measure how much
+ // time we spent in this state and add it to
+ // runnableTime.
+ now := nanotime()
+ gp.runnableTime += now - gp.runnableStamp
+ gp.runnableStamp = 0
+ }
+ if newval == _Grunnable {
+ // We just transitioned into runnable, so record what
+ // time that happened.
+ now := nanotime()
+ gp.runnableStamp = now
+ } else if newval == _Grunning {
+ // We're transitioning into running, so turn off
+ // tracking and record how much time we spent in
+ // runnable.
+ gp.tracking = false
+ sched.timeToRun.record(gp.runnableTime)
+ gp.runnableTime = 0
+ }
+ }
+}
+
+// casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
+// Returns old status. Cannot call casgstatus directly, because we are racing with an
+// async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
+// it might have become Grunnable by the time we get to the cas. If we called casgstatus,
+// it would loop waiting for the status to go back to Gwaiting, which it never will.
+//
+//go:nosplit
+func casgcopystack(gp *g) uint32 {
+ for {
+ oldstatus := readgstatus(gp) &^ _Gscan
+ if oldstatus != _Gwaiting && oldstatus != _Grunnable {
+ throw("copystack: bad status, not Gwaiting or Grunnable")
+ }
+ if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
+ return oldstatus
+ }
+ }
+}
+
+// casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
+//
+// TODO(austin): This is the only status operation that both changes
+// the status and locks the _Gscan bit. Rethink this.
+func casGToPreemptScan(gp *g, old, new uint32) {
+ if old != _Grunning || new != _Gscan|_Gpreempted {
+ throw("bad g transition")
+ }
+ acquireLockRank(lockRankGscan)
+ for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
+ }
+}
+
+// casGFromPreempted attempts to transition gp from _Gpreempted to
+// _Gwaiting. If successful, the caller is responsible for
+// re-scheduling gp.
+func casGFromPreempted(gp *g, old, new uint32) bool {
+ if old != _Gpreempted || new != _Gwaiting {
+ throw("bad g transition")
+ }
+ return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
+}
+
+// stopTheWorld stops all P's from executing goroutines, interrupting
+// all goroutines at GC safe points and records reason as the reason
+// for the stop. On return, only the current goroutine's P is running.
+// stopTheWorld must not be called from a system stack and the caller
+// must not hold worldsema. The caller must call startTheWorld when
+// other P's should resume execution.
+//
+// stopTheWorld is safe for multiple goroutines to call at the
+// same time. Each will execute its own stop, and the stops will
+// be serialized.
+//
+// This is also used by routines that do stack dumps. If the system is
+// in panic or being exited, this may not reliably stop all
+// goroutines.
+func stopTheWorld(reason string) {
+ semacquire(&worldsema)
+ gp := getg()
+ gp.m.preemptoff = reason
+ systemstack(func() {
+ // Mark the goroutine which called stopTheWorld preemptible so its
+ // stack may be scanned.
+ // This lets a mark worker scan us while we try to stop the world
+ // since otherwise we could get in a mutual preemption deadlock.
+ // We must not modify anything on the G stack because a stack shrink
+ // may occur. A stack shrink is otherwise OK though because in order
+ // to return from this function (and to leave the system stack) we
+ // must have preempted all goroutines, including any attempting
+ // to scan our stack, in which case, any stack shrinking will
+ // have already completed by the time we exit.
+ casgstatus(gp, _Grunning, _Gwaiting)
+ stopTheWorldWithSema()
+ casgstatus(gp, _Gwaiting, _Grunning)
+ })
+}
+
+// startTheWorld undoes the effects of stopTheWorld.
+func startTheWorld() {
+ systemstack(func() { startTheWorldWithSema(false) })
+
+ // worldsema must be held over startTheWorldWithSema to ensure
+ // gomaxprocs cannot change while worldsema is held.
+ //
+ // Release worldsema with direct handoff to the next waiter, but
+ // acquirem so that semrelease1 doesn't try to yield our time.
+ //
+ // Otherwise if e.g. ReadMemStats is being called in a loop,
+ // it might stomp on other attempts to stop the world, such as
+ // for starting or ending GC. The operation this blocks is
+ // so heavy-weight that we should just try to be as fair as
+ // possible here.
+ //
+ // We don't want to just allow us to get preempted between now
+ // and releasing the semaphore because then we keep everyone
+ // (including, for example, GCs) waiting longer.
+ mp := acquirem()
+ mp.preemptoff = ""
+ semrelease1(&worldsema, true, 0)
+ releasem(mp)
+}
+
+// stopTheWorldGC has the same effect as stopTheWorld, but blocks
+// until the GC is not running. It also blocks a GC from starting
+// until startTheWorldGC is called.
+func stopTheWorldGC(reason string) {
+ semacquire(&gcsema)
+ stopTheWorld(reason)
+}
+
+// startTheWorldGC undoes the effects of stopTheWorldGC.
+func startTheWorldGC() {
+ startTheWorld()
+ semrelease(&gcsema)
+}
+
+// Holding worldsema grants an M the right to try to stop the world.
+var worldsema uint32 = 1
+
+// Holding gcsema grants the M the right to block a GC, and blocks
+// until the current GC is done. In particular, it prevents gomaxprocs
+// from changing concurrently.
+//
+// TODO(mknyszek): Once gomaxprocs and the execution tracer can handle
+// being changed/enabled during a GC, remove this.
+var gcsema uint32 = 1
+
+// stopTheWorldWithSema is the core implementation of stopTheWorld.
+// The caller is responsible for acquiring worldsema and disabling
+// preemption first and then should stopTheWorldWithSema on the system
+// stack:
+//
+// semacquire(&worldsema, 0)
+// m.preemptoff = "reason"
+// systemstack(stopTheWorldWithSema)
+//
+// When finished, the caller must either call startTheWorld or undo
+// these three operations separately:
+//
+// m.preemptoff = ""
+// systemstack(startTheWorldWithSema)
+// semrelease(&worldsema)
+//
+// It is allowed to acquire worldsema once and then execute multiple
+// startTheWorldWithSema/stopTheWorldWithSema pairs.
+// Other P's are able to execute between successive calls to
+// startTheWorldWithSema and stopTheWorldWithSema.
+// Holding worldsema causes any other goroutines invoking
+// stopTheWorld to block.
+func stopTheWorldWithSema() {
+ _g_ := getg()
+
+ // If we hold a lock, then we won't be able to stop another M
+ // that is blocked trying to acquire the lock.
+ if _g_.m.locks > 0 {
+ throw("stopTheWorld: holding locks")
+ }
+
+ lock(&sched.lock)
+ sched.stopwait = gomaxprocs
+ atomic.Store(&sched.gcwaiting, 1)
+ preemptall()
+ // stop current P
+ _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
+ sched.stopwait--
+ // try to retake all P's in Psyscall status
+ for _, p := range allp {
+ s := p.status
+ if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
+ if trace.enabled {
+ traceGoSysBlock(p)
+ traceProcStop(p)
+ }
+ p.syscalltick++
+ sched.stopwait--
+ }
+ }
+ // stop idle P's
+ now := nanotime()
+ for {
+ p, _ := pidleget(now)
+ if p == nil {
+ break
+ }
+ p.status = _Pgcstop
+ sched.stopwait--
+ }
+ wait := sched.stopwait > 0
+ unlock(&sched.lock)
+
+ // wait for remaining P's to stop voluntarily
+ if wait {
+ for {
+ // wait for 100us, then try to re-preempt in case of any races
+ if notetsleep(&sched.stopnote, 100*1000) {
+ noteclear(&sched.stopnote)
+ break
+ }
+ preemptall()
+ }
+ }
+
+ // sanity checks
+ bad := ""
+ if sched.stopwait != 0 {
+ bad = "stopTheWorld: not stopped (stopwait != 0)"
+ } else {
+ for _, p := range allp {
+ if p.status != _Pgcstop {
+ bad = "stopTheWorld: not stopped (status != _Pgcstop)"
+ }
+ }
+ }
+ if atomic.Load(&freezing) != 0 {
+ // Some other thread is panicking. This can cause the
+ // sanity checks above to fail if the panic happens in
+ // the signal handler on a stopped thread. Either way,
+ // we should halt this thread.
+ lock(&deadlock)
+ lock(&deadlock)
+ }
+ if bad != "" {
+ throw(bad)
+ }
+
+ worldStopped()
+}
+
+func startTheWorldWithSema(emitTraceEvent bool) int64 {
+ assertWorldStopped()
+
+ mp := acquirem() // disable preemption because it can be holding p in a local var
+ if netpollinited() {
+ list := netpoll(0) // non-blocking
+ injectglist(&list)
+ }
+ lock(&sched.lock)
+
+ procs := gomaxprocs
+ if newprocs != 0 {
+ procs = newprocs
+ newprocs = 0
+ }
+ p1 := procresize(procs)
+ sched.gcwaiting = 0
+ if sched.sysmonwait != 0 {
+ sched.sysmonwait = 0
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+
+ worldStarted()
+
+ for p1 != nil {
+ p := p1
+ p1 = p1.link.ptr()
+ if p.m != 0 {
+ mp := p.m.ptr()
+ p.m = 0
+ if mp.nextp != 0 {
+ throw("startTheWorld: inconsistent mp->nextp")
+ }
+ mp.nextp.set(p)
+ notewakeup(&mp.park)
+ } else {
+ // Start M to run P. Do not start another M below.
+ newm(nil, p, -1)
+ }
+ }
+
+ // Capture start-the-world time before doing clean-up tasks.
+ startTime := nanotime()
+ if emitTraceEvent {
+ traceGCSTWDone()
+ }
+
+ // Wakeup an additional proc in case we have excessive runnable goroutines
+ // in local queues or in the global queue. If we don't, the proc will park itself.
+ // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
+ wakep()
+
+ releasem(mp)
+
+ return startTime
+}
+
+// usesLibcall indicates whether this runtime performs system calls
+// via libcall.
+func usesLibcall() bool {
+ switch GOOS {
+ case "aix", "darwin", "illumos", "ios", "solaris", "windows":
+ return true
+ case "openbsd":
+ return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
+ }
+ return false
+}
+
+// mStackIsSystemAllocated indicates whether this runtime starts on a
+// system-allocated stack.
+func mStackIsSystemAllocated() bool {
+ switch GOOS {
+ case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
+ return true
+ case "openbsd":
+ switch GOARCH {
+ case "386", "amd64", "arm", "arm64":
+ return true
+ }
+ }
+ return false
+}
+
+// mstart is the entry-point for new Ms.
+// It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0.
+func mstart()
+
+// mstart0 is the Go entry-point for new Ms.
+// This must not split the stack because we may not even have stack
+// bounds set up yet.
+//
+// May run during STW (because it doesn't have a P yet), so write
+// barriers are not allowed.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func mstart0() {
+ _g_ := getg()
+
+ osStack := _g_.stack.lo == 0
+ if osStack {
+ // Initialize stack bounds from system stack.
+ // Cgo may have left stack size in stack.hi.
+ // minit may update the stack bounds.
+ //
+ // Note: these bounds may not be very accurate.
+ // We set hi to &size, but there are things above
+ // it. The 1024 is supposed to compensate this,
+ // but is somewhat arbitrary.
+ size := _g_.stack.hi
+ if size == 0 {
+ size = 8192 * sys.StackGuardMultiplier
+ }
+ _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
+ _g_.stack.lo = _g_.stack.hi - size + 1024
+ }
+ // Initialize stack guard so that we can start calling regular
+ // Go code.
+ _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ // This is the g0, so we can also call go:systemstack
+ // functions, which check stackguard1.
+ _g_.stackguard1 = _g_.stackguard0
+ mstart1()
+
+ // Exit this thread.
+ if mStackIsSystemAllocated() {
+ // Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
+ // the stack, but put it in _g_.stack before mstart,
+ // so the logic above hasn't set osStack yet.
+ osStack = true
+ }
+ mexit(osStack)
+}
+
+// The go:noinline is to guarantee the getcallerpc/getcallersp below are safe,
+// so that we can set up g0.sched to return to the call of mstart1 above.
+//
+//go:noinline
+func mstart1() {
+ _g_ := getg()
+
+ if _g_ != _g_.m.g0 {
+ throw("bad runtime·mstart")
+ }
+
+ // Set up m.g0.sched as a label returning to just
+ // after the mstart1 call in mstart0 above, for use by goexit0 and mcall.
+ // We're never coming back to mstart1 after we call schedule,
+ // so other calls can reuse the current frame.
+ // And goexit0 does a gogo that needs to return from mstart1
+ // and let mstart0 exit the thread.
+ _g_.sched.g = guintptr(unsafe.Pointer(_g_))
+ _g_.sched.pc = getcallerpc()
+ _g_.sched.sp = getcallersp()
+
+ asminit()
+ minit()
+
+ // Install signal handlers; after minit so that minit can
+ // prepare the thread to be able to handle the signals.
+ if _g_.m == &m0 {
+ mstartm0()
+ }
+
+ if fn := _g_.m.mstartfn; fn != nil {
+ fn()
+ }
+
+ if _g_.m != &m0 {
+ acquirep(_g_.m.nextp.ptr())
+ _g_.m.nextp = 0
+ }
+ schedule()
+}
+
+// mstartm0 implements part of mstart1 that only runs on the m0.
+//
+// Write barriers are allowed here because we know the GC can't be
+// running yet, so they'll be no-ops.
+//
+//go:yeswritebarrierrec
+func mstartm0() {
+ // Create an extra M for callbacks on threads not created by Go.
+ // An extra M is also needed on Windows for callbacks created by
+ // syscall.NewCallback. See issue #6751 for details.
+ if (iscgo || GOOS == "windows") && !cgoHasExtraM {
+ cgoHasExtraM = true
+ newextram()
+ }
+ initsig(false)
+}
+
+// mPark causes a thread to park itself, returning once woken.
+//
+//go:nosplit
+func mPark() {
+ gp := getg()
+ notesleep(&gp.m.park)
+ noteclear(&gp.m.park)
+}
+
+// mexit tears down and exits the current thread.
+//
+// Don't call this directly to exit the thread, since it must run at
+// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
+// unwind the stack to the point that exits the thread.
+//
+// It is entered with m.p != nil, so write barriers are allowed. It
+// will release the P before exiting.
+//
+//go:yeswritebarrierrec
+func mexit(osStack bool) {
+ g := getg()
+ m := g.m
+
+ if m == &m0 {
+ // This is the main thread. Just wedge it.
+ //
+ // On Linux, exiting the main thread puts the process
+ // into a non-waitable zombie state. On Plan 9,
+ // exiting the main thread unblocks wait even though
+ // other threads are still running. On Solaris we can
+ // neither exitThread nor return from mstart. Other
+ // bad things probably happen on other platforms.
+ //
+ // We could try to clean up this M more before wedging
+ // it, but that complicates signal handling.
+ handoffp(releasep())
+ lock(&sched.lock)
+ sched.nmfreed++
+ checkdead()
+ unlock(&sched.lock)
+ mPark()
+ throw("locked m0 woke up")
+ }
+
+ sigblock(true)
+ unminit()
+
+ // Free the gsignal stack.
+ if m.gsignal != nil {
+ stackfree(m.gsignal.stack)
+ // On some platforms, when calling into VDSO (e.g. nanotime)
+ // we store our g on the gsignal stack, if there is one.
+ // Now the stack is freed, unlink it from the m, so we
+ // won't write to it when calling VDSO code.
+ m.gsignal = nil
+ }
+
+ // Remove m from allm.
+ lock(&sched.lock)
+ for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
+ if *pprev == m {
+ *pprev = m.alllink
+ goto found
+ }
+ }
+ throw("m not found in allm")
+found:
+ if !osStack {
+ // Delay reaping m until it's done with the stack.
+ //
+ // If this is using an OS stack, the OS will free it
+ // so there's no need for reaping.
+ atomic.Store(&m.freeWait, 1)
+ // Put m on the free list, though it will not be reaped until
+ // freeWait is 0. Note that the free list must not be linked
+ // through alllink because some functions walk allm without
+ // locking, so may be using alllink.
+ m.freelink = sched.freem
+ sched.freem = m
+ }
+ unlock(&sched.lock)
+
+ atomic.Xadd64(&ncgocall, int64(m.ncgocall))
+
+ // Release the P.
+ handoffp(releasep())
+ // After this point we must not have write barriers.
+
+ // Invoke the deadlock detector. This must happen after
+ // handoffp because it may have started a new M to take our
+ // P's work.
+ lock(&sched.lock)
+ sched.nmfreed++
+ checkdead()
+ unlock(&sched.lock)
+
+ if GOOS == "darwin" || GOOS == "ios" {
+ // Make sure pendingPreemptSignals is correct when an M exits.
+ // For #41702.
+ if atomic.Load(&m.signalPending) != 0 {
+ atomic.Xadd(&pendingPreemptSignals, -1)
+ }
+ }
+
+ // Destroy all allocated resources. After this is called, we may no
+ // longer take any locks.
+ mdestroy(m)
+
+ if osStack {
+ // Return from mstart and let the system thread
+ // library free the g0 stack and terminate the thread.
+ return
+ }
+
+ // mstart is the thread's entry point, so there's nothing to
+ // return to. Exit the thread directly. exitThread will clear
+ // m.freeWait when it's done with the stack and the m can be
+ // reaped.
+ exitThread(&m.freeWait)
+}
+
+// forEachP calls fn(p) for every P p when p reaches a GC safe point.
+// If a P is currently executing code, this will bring the P to a GC
+// safe point and execute fn on that P. If the P is not executing code
+// (it is idle or in a syscall), this will call fn(p) directly while
+// preventing the P from exiting its state. This does not ensure that
+// fn will run on every CPU executing Go code, but it acts as a global
+// memory barrier. GC uses this as a "ragged barrier."
+//
+// The caller must hold worldsema.
+//
+//go:systemstack
+func forEachP(fn func(*p)) {
+ mp := acquirem()
+ _p_ := getg().m.p.ptr()
+
+ lock(&sched.lock)
+ if sched.safePointWait != 0 {
+ throw("forEachP: sched.safePointWait != 0")
+ }
+ sched.safePointWait = gomaxprocs - 1
+ sched.safePointFn = fn
+
+ // Ask all Ps to run the safe point function.
+ for _, p := range allp {
+ if p != _p_ {
+ atomic.Store(&p.runSafePointFn, 1)
+ }
+ }
+ preemptall()
+
+ // Any P entering _Pidle or _Psyscall from now on will observe
+ // p.runSafePointFn == 1 and will call runSafePointFn when
+ // changing its status to _Pidle/_Psyscall.
+
+ // Run safe point function for all idle Ps. sched.pidle will
+ // not change because we hold sched.lock.
+ for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
+ if atomic.Cas(&p.runSafePointFn, 1, 0) {
+ fn(p)
+ sched.safePointWait--
+ }
+ }
+
+ wait := sched.safePointWait > 0
+ unlock(&sched.lock)
+
+ // Run fn for the current P.
+ fn(_p_)
+
+ // Force Ps currently in _Psyscall into _Pidle and hand them
+ // off to induce safe point function execution.
+ for _, p := range allp {
+ s := p.status
+ if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
+ if trace.enabled {
+ traceGoSysBlock(p)
+ traceProcStop(p)
+ }
+ p.syscalltick++
+ handoffp(p)
+ }
+ }
+
+ // Wait for remaining Ps to run fn.
+ if wait {
+ for {
+ // Wait for 100us, then try to re-preempt in
+ // case of any races.
+ //
+ // Requires system stack.
+ if notetsleep(&sched.safePointNote, 100*1000) {
+ noteclear(&sched.safePointNote)
+ break
+ }
+ preemptall()
+ }
+ }
+ if sched.safePointWait != 0 {
+ throw("forEachP: not done")
+ }
+ for _, p := range allp {
+ if p.runSafePointFn != 0 {
+ throw("forEachP: P did not run fn")
+ }
+ }
+
+ lock(&sched.lock)
+ sched.safePointFn = nil
+ unlock(&sched.lock)
+ releasem(mp)
+}
+
+// runSafePointFn runs the safe point function, if any, for this P.
+// This should be called like
+//
+// if getg().m.p.runSafePointFn != 0 {
+// runSafePointFn()
+// }
+//
+// runSafePointFn must be checked on any transition in to _Pidle or
+// _Psyscall to avoid a race where forEachP sees that the P is running
+// just before the P goes into _Pidle/_Psyscall and neither forEachP
+// nor the P run the safe-point function.
+func runSafePointFn() {
+ p := getg().m.p.ptr()
+ // Resolve the race between forEachP running the safe-point
+ // function on this P's behalf and this P running the
+ // safe-point function directly.
+ if !atomic.Cas(&p.runSafePointFn, 1, 0) {
+ return
+ }
+ sched.safePointFn(p)
+ lock(&sched.lock)
+ sched.safePointWait--
+ if sched.safePointWait == 0 {
+ notewakeup(&sched.safePointNote)
+ }
+ unlock(&sched.lock)
+}
+
+// When running with cgo, we call _cgo_thread_start
+// to start threads for us so that we can play nicely with
+// foreign code.
+var cgoThreadStart unsafe.Pointer
+
+type cgothreadstart struct {
+ g guintptr
+ tls *uint64
+ fn unsafe.Pointer
+}
+
+// Allocate a new m unassociated with any thread.
+// Can use p for allocation context if needed.
+// fn is recorded as the new m's m.mstartfn.
+// id is optional pre-allocated m ID. Omit by passing -1.
+//
+// This function is allowed to have write barriers even if the caller
+// isn't because it borrows _p_.
+//
+//go:yeswritebarrierrec
+func allocm(_p_ *p, fn func(), id int64) *m {
+ allocmLock.rlock()
+
+ // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
+ // disable preemption to ensure it is not stolen, which would make the
+ // caller lose ownership.
+ acquirem()
+
+ _g_ := getg()
+ if _g_.m.p == 0 {
+ acquirep(_p_) // temporarily borrow p for mallocs in this function
+ }
+
+ // Release the free M list. We need to do this somewhere and
+ // this may free up a stack we can use.
+ if sched.freem != nil {
+ lock(&sched.lock)
+ var newList *m
+ for freem := sched.freem; freem != nil; {
+ if freem.freeWait != 0 {
+ next := freem.freelink
+ freem.freelink = newList
+ newList = freem
+ freem = next
+ continue
+ }
+ // stackfree must be on the system stack, but allocm is
+ // reachable off the system stack transitively from
+ // startm.
+ systemstack(func() {
+ stackfree(freem.g0.stack)
+ })
+ freem = freem.freelink
+ }
+ sched.freem = newList
+ unlock(&sched.lock)
+ }
+
+ mp := new(m)
+ mp.mstartfn = fn
+ mcommoninit(mp, id)
+
+ // In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
+ // Windows and Plan 9 will layout sched stack on OS stack.
+ if iscgo || mStackIsSystemAllocated() {
+ mp.g0 = malg(-1)
+ } else {
+ mp.g0 = malg(8192 * sys.StackGuardMultiplier)
+ }
+ mp.g0.m = mp
+
+ if _p_ == _g_.m.p.ptr() {
+ releasep()
+ }
+
+ releasem(_g_.m)
+ allocmLock.runlock()
+ return mp
+}
+
+// needm is called when a cgo callback happens on a
+// thread without an m (a thread not created by Go).
+// In this case, needm is expected to find an m to use
+// and return with m, g initialized correctly.
+// Since m and g are not set now (likely nil, but see below)
+// needm is limited in what routines it can call. In particular
+// it can only call nosplit functions (textflag 7) and cannot
+// do any scheduling that requires an m.
+//
+// In order to avoid needing heavy lifting here, we adopt
+// the following strategy: there is a stack of available m's
+// that can be stolen. Using compare-and-swap
+// to pop from the stack has ABA races, so we simulate
+// a lock by doing an exchange (via Casuintptr) to steal the stack
+// head and replace the top pointer with MLOCKED (1).
+// This serves as a simple spin lock that we can use even
+// without an m. The thread that locks the stack in this way
+// unlocks the stack by storing a valid stack head pointer.
+//
+// In order to make sure that there is always an m structure
+// available to be stolen, we maintain the invariant that there
+// is always one more than needed. At the beginning of the
+// program (if cgo is in use) the list is seeded with a single m.
+// If needm finds that it has taken the last m off the list, its job
+// is - once it has installed its own m so that it can do things like
+// allocate memory - to create a spare m and put it on the list.
+//
+// Each of these extra m's also has a g0 and a curg that are
+// pressed into service as the scheduling stack and current
+// goroutine for the duration of the cgo callback.
+//
+// When the callback is done with the m, it calls dropm to
+// put the m back on the list.
+//
+//go:nosplit
+func needm() {
+ if (iscgo || GOOS == "windows") && !cgoHasExtraM {
+ // Can happen if C/C++ code calls Go from a global ctor.
+ // Can also happen on Windows if a global ctor uses a
+ // callback created by syscall.NewCallback. See issue #6751
+ // for details.
+ //
+ // Can not throw, because scheduler is not initialized yet.
+ write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
+ exit(1)
+ }
+
+ // Save and block signals before getting an M.
+ // The signal handler may call needm itself,
+ // and we must avoid a deadlock. Also, once g is installed,
+ // any incoming signals will try to execute,
+ // but we won't have the sigaltstack settings and other data
+ // set up appropriately until the end of minit, which will
+ // unblock the signals. This is the same dance as when
+ // starting a new m to run Go code via newosproc.
+ var sigmask sigset
+ sigsave(&sigmask)
+ sigblock(false)
+
+ // Lock extra list, take head, unlock popped list.
+ // nilokay=false is safe here because of the invariant above,
+ // that the extra list always contains or will soon contain
+ // at least one m.
+ mp := lockextra(false)
+
+ // Set needextram when we've just emptied the list,
+ // so that the eventual call into cgocallbackg will
+ // allocate a new m for the extra list. We delay the
+ // allocation until then so that it can be done
+ // after exitsyscall makes sure it is okay to be
+ // running at all (that is, there's no garbage collection
+ // running right now).
+ mp.needextram = mp.schedlink == 0
+ extraMCount--
+ unlockextra(mp.schedlink.ptr())
+
+ // Store the original signal mask for use by minit.
+ mp.sigmask = sigmask
+
+ // Install TLS on some platforms (previously setg
+ // would do this if necessary).
+ osSetupTLS(mp)
+
+ // Install g (= m->g0) and set the stack bounds
+ // to match the current stack. We don't actually know
+ // how big the stack is, like we don't know how big any
+ // scheduling stack is, but we assume there's at least 32 kB,
+ // which is more than enough for us.
+ setg(mp.g0)
+ _g_ := getg()
+ _g_.stack.hi = getcallersp() + 1024
+ _g_.stack.lo = getcallersp() - 32*1024
+ _g_.stackguard0 = _g_.stack.lo + _StackGuard
+
+ // Initialize this thread to use the m.
+ asminit()
+ minit()
+
+ // mp.curg is now a real goroutine.
+ casgstatus(mp.curg, _Gdead, _Gsyscall)
+ atomic.Xadd(&sched.ngsys, -1)
+}
+
+var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
+
+// newextram allocates m's and puts them on the extra list.
+// It is called with a working local m, so that it can do things
+// like call schedlock and allocate.
+func newextram() {
+ c := atomic.Xchg(&extraMWaiters, 0)
+ if c > 0 {
+ for i := uint32(0); i < c; i++ {
+ oneNewExtraM()
+ }
+ } else {
+ // Make sure there is at least one extra M.
+ mp := lockextra(true)
+ unlockextra(mp)
+ if mp == nil {
+ oneNewExtraM()
+ }
+ }
+}
+
+// oneNewExtraM allocates an m and puts it on the extra list.
+func oneNewExtraM() {
+ // Create extra goroutine locked to extra m.
+ // The goroutine is the context in which the cgo callback will run.
+ // The sched.pc will never be returned to, but setting it to
+ // goexit makes clear to the traceback routines where
+ // the goroutine stack ends.
+ mp := allocm(nil, nil, -1)
+ gp := malg(4096)
+ gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
+ gp.sched.sp = gp.stack.hi
+ gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
+ gp.sched.lr = 0
+ gp.sched.g = guintptr(unsafe.Pointer(gp))
+ gp.syscallpc = gp.sched.pc
+ gp.syscallsp = gp.sched.sp
+ gp.stktopsp = gp.sched.sp
+ // malg returns status as _Gidle. Change to _Gdead before
+ // adding to allg where GC can see it. We use _Gdead to hide
+ // this from tracebacks and stack scans since it isn't a
+ // "real" goroutine until needm grabs it.
+ casgstatus(gp, _Gidle, _Gdead)
+ gp.m = mp
+ mp.curg = gp
+ mp.lockedInt++
+ mp.lockedg.set(gp)
+ gp.lockedm.set(mp)
+ gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
+ if raceenabled {
+ gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
+ }
+ // put on allg for garbage collector
+ allgadd(gp)
+
+ // gp is now on the allg list, but we don't want it to be
+ // counted by gcount. It would be more "proper" to increment
+ // sched.ngfree, but that requires locking. Incrementing ngsys
+ // has the same effect.
+ atomic.Xadd(&sched.ngsys, +1)
+
+ // Add m to the extra list.
+ mnext := lockextra(true)
+ mp.schedlink.set(mnext)
+ extraMCount++
+ unlockextra(mp)
+}
+
+// dropm is called when a cgo callback has called needm but is now
+// done with the callback and returning back into the non-Go thread.
+// It puts the current m back onto the extra list.
+//
+// The main expense here is the call to signalstack to release the
+// m's signal stack, and then the call to needm on the next callback
+// from this thread. It is tempting to try to save the m for next time,
+// which would eliminate both these costs, but there might not be
+// a next time: the current thread (which Go does not control) might exit.
+// If we saved the m for that thread, there would be an m leak each time
+// such a thread exited. Instead, we acquire and release an m on each
+// call. These should typically not be scheduling operations, just a few
+// atomics, so the cost should be small.
+//
+// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
+// variable using pthread_key_create. Unlike the pthread keys we already use
+// on OS X, this dummy key would never be read by Go code. It would exist
+// only so that we could register at thread-exit-time destructor.
+// That destructor would put the m back onto the extra list.
+// This is purely a performance optimization. The current version,
+// in which dropm happens on each cgo call, is still correct too.
+// We may have to keep the current version on systems with cgo
+// but without pthreads, like Windows.
+func dropm() {
+ // Clear m and g, and return m to the extra list.
+ // After the call to setg we can only call nosplit functions
+ // with no pointer manipulation.
+ mp := getg().m
+
+ // Return mp.curg to dead state.
+ casgstatus(mp.curg, _Gsyscall, _Gdead)
+ mp.curg.preemptStop = false
+ atomic.Xadd(&sched.ngsys, +1)
+
+ // Block signals before unminit.
+ // Unminit unregisters the signal handling stack (but needs g on some systems).
+ // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
+ // It's important not to try to handle a signal between those two steps.
+ sigmask := mp.sigmask
+ sigblock(false)
+ unminit()
+
+ mnext := lockextra(true)
+ extraMCount++
+ mp.schedlink.set(mnext)
+
+ setg(nil)
+
+ // Commit the release of mp.
+ unlockextra(mp)
+
+ msigrestore(sigmask)
+}
+
+// A helper function for EnsureDropM.
+func getm() uintptr {
+ return uintptr(unsafe.Pointer(getg().m))
+}
+
+var extram uintptr
+var extraMCount uint32 // Protected by lockextra
+var extraMWaiters uint32
+
+// lockextra locks the extra list and returns the list head.
+// The caller must unlock the list by storing a new list head
+// to extram. If nilokay is true, then lockextra will
+// return a nil list head if that's what it finds. If nilokay is false,
+// lockextra will keep waiting until the list head is no longer nil.
+//
+//go:nosplit
+func lockextra(nilokay bool) *m {
+ const locked = 1
+
+ incr := false
+ for {
+ old := atomic.Loaduintptr(&extram)
+ if old == locked {
+ osyield_no_g()
+ continue
+ }
+ if old == 0 && !nilokay {
+ if !incr {
+ // Add 1 to the number of threads
+ // waiting for an M.
+ // This is cleared by newextram.
+ atomic.Xadd(&extraMWaiters, 1)
+ incr = true
+ }
+ usleep_no_g(1)
+ continue
+ }
+ if atomic.Casuintptr(&extram, old, locked) {
+ return (*m)(unsafe.Pointer(old))
+ }
+ osyield_no_g()
+ continue
+ }
+}
+
+//go:nosplit
+func unlockextra(mp *m) {
+ atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
+}
+
+var (
+ // allocmLock is locked for read when creating new Ms in allocm and their
+ // addition to allm. Thus acquiring this lock for write blocks the
+ // creation of new Ms.
+ allocmLock rwmutex
+
+ // execLock serializes exec and clone to avoid bugs or unspecified
+ // behaviour around exec'ing while creating/destroying threads. See
+ // issue #19546.
+ execLock rwmutex
+)
+
+// newmHandoff contains a list of m structures that need new OS threads.
+// This is used by newm in situations where newm itself can't safely
+// start an OS thread.
+var newmHandoff struct {
+ lock mutex
+
+ // newm points to a list of M structures that need new OS
+ // threads. The list is linked through m.schedlink.
+ newm muintptr
+
+ // waiting indicates that wake needs to be notified when an m
+ // is put on the list.
+ waiting bool
+ wake note
+
+ // haveTemplateThread indicates that the templateThread has
+ // been started. This is not protected by lock. Use cas to set
+ // to 1.
+ haveTemplateThread uint32
+}
+
+// Create a new m. It will start off with a call to fn, or else the scheduler.
+// fn needs to be static and not a heap allocated closure.
+// May run with m.p==nil, so write barriers are not allowed.
+//
+// id is optional pre-allocated m ID. Omit by passing -1.
+//
+//go:nowritebarrierrec
+func newm(fn func(), _p_ *p, id int64) {
+ // allocm adds a new M to allm, but they do not start until created by
+ // the OS in newm1 or the template thread.
+ //
+ // doAllThreadsSyscall requires that every M in allm will eventually
+ // start and be signal-able, even with a STW.
+ //
+ // Disable preemption here until we start the thread to ensure that
+ // newm is not preempted between allocm and starting the new thread,
+ // ensuring that anything added to allm is guaranteed to eventually
+ // start.
+ acquirem()
+
+ mp := allocm(_p_, fn, id)
+ mp.nextp.set(_p_)
+ mp.sigmask = initSigmask
+ if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
+ // We're on a locked M or a thread that may have been
+ // started by C. The kernel state of this thread may
+ // be strange (the user may have locked it for that
+ // purpose). We don't want to clone that into another
+ // thread. Instead, ask a known-good thread to create
+ // the thread for us.
+ //
+ // This is disabled on Plan 9. See golang.org/issue/22227.
+ //
+ // TODO: This may be unnecessary on Windows, which
+ // doesn't model thread creation off fork.
+ lock(&newmHandoff.lock)
+ if newmHandoff.haveTemplateThread == 0 {
+ throw("on a locked thread with no template thread")
+ }
+ mp.schedlink = newmHandoff.newm
+ newmHandoff.newm.set(mp)
+ if newmHandoff.waiting {
+ newmHandoff.waiting = false
+ notewakeup(&newmHandoff.wake)
+ }
+ unlock(&newmHandoff.lock)
+ // The M has not started yet, but the template thread does not
+ // participate in STW, so it will always process queued Ms and
+ // it is safe to releasem.
+ releasem(getg().m)
+ return
+ }
+ newm1(mp)
+ releasem(getg().m)
+}
+
+func newm1(mp *m) {
+ if iscgo {
+ var ts cgothreadstart
+ if _cgo_thread_start == nil {
+ throw("_cgo_thread_start missing")
+ }
+ ts.g.set(mp.g0)
+ ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
+ ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
+ if msanenabled {
+ msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
+ }
+ if asanenabled {
+ asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
+ }
+ execLock.rlock() // Prevent process clone.
+ asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
+ execLock.runlock()
+ return
+ }
+ execLock.rlock() // Prevent process clone.
+ newosproc(mp)
+ execLock.runlock()
+}
+
+// startTemplateThread starts the template thread if it is not already
+// running.
+//
+// The calling thread must itself be in a known-good state.
+func startTemplateThread() {
+ if GOARCH == "wasm" { // no threads on wasm yet
+ return
+ }
+
+ // Disable preemption to guarantee that the template thread will be
+ // created before a park once haveTemplateThread is set.
+ mp := acquirem()
+ if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
+ releasem(mp)
+ return
+ }
+ newm(templateThread, nil, -1)
+ releasem(mp)
+}
+
+// templateThread is a thread in a known-good state that exists solely
+// to start new threads in known-good states when the calling thread
+// may not be in a good state.
+//
+// Many programs never need this, so templateThread is started lazily
+// when we first enter a state that might lead to running on a thread
+// in an unknown state.
+//
+// templateThread runs on an M without a P, so it must not have write
+// barriers.
+//
+//go:nowritebarrierrec
+func templateThread() {
+ lock(&sched.lock)
+ sched.nmsys++
+ checkdead()
+ unlock(&sched.lock)
+
+ for {
+ lock(&newmHandoff.lock)
+ for newmHandoff.newm != 0 {
+ newm := newmHandoff.newm.ptr()
+ newmHandoff.newm = 0
+ unlock(&newmHandoff.lock)
+ for newm != nil {
+ next := newm.schedlink.ptr()
+ newm.schedlink = 0
+ newm1(newm)
+ newm = next
+ }
+ lock(&newmHandoff.lock)
+ }
+ newmHandoff.waiting = true
+ noteclear(&newmHandoff.wake)
+ unlock(&newmHandoff.lock)
+ notesleep(&newmHandoff.wake)
+ }
+}
+
+// Stops execution of the current m until new work is available.
+// Returns with acquired P.
+func stopm() {
+ _g_ := getg()
+
+ if _g_.m.locks != 0 {
+ throw("stopm holding locks")
+ }
+ if _g_.m.p != 0 {
+ throw("stopm holding p")
+ }
+ if _g_.m.spinning {
+ throw("stopm spinning")
+ }
+
+ lock(&sched.lock)
+ mput(_g_.m)
+ unlock(&sched.lock)
+ mPark()
+ acquirep(_g_.m.nextp.ptr())
+ _g_.m.nextp = 0
+}
+
+func mspinning() {
+ // startm's caller incremented nmspinning. Set the new M's spinning.
+ getg().m.spinning = true
+}
+
+// Schedules some M to run the p (creates an M if necessary).
+// If p==nil, tries to get an idle P, if no idle P's does nothing.
+// May run with m.p==nil, so write barriers are not allowed.
+// If spinning is set, the caller has incremented nmspinning and startm will
+// either decrement nmspinning or set m.spinning in the newly started M.
+//
+// Callers passing a non-nil P must call from a non-preemptible context. See
+// comment on acquirem below.
+//
+// Must not have write barriers because this may be called without a P.
+//
+//go:nowritebarrierrec
+func startm(_p_ *p, spinning bool) {
+ // Disable preemption.
+ //
+ // Every owned P must have an owner that will eventually stop it in the
+ // event of a GC stop request. startm takes transient ownership of a P
+ // (either from argument or pidleget below) and transfers ownership to
+ // a started M, which will be responsible for performing the stop.
+ //
+ // Preemption must be disabled during this transient ownership,
+ // otherwise the P this is running on may enter GC stop while still
+ // holding the transient P, leaving that P in limbo and deadlocking the
+ // STW.
+ //
+ // Callers passing a non-nil P must already be in non-preemptible
+ // context, otherwise such preemption could occur on function entry to
+ // startm. Callers passing a nil P may be preemptible, so we must
+ // disable preemption before acquiring a P from pidleget below.
+ mp := acquirem()
+ lock(&sched.lock)
+ if _p_ == nil {
+ _p_, _ = pidleget(0)
+ if _p_ == nil {
+ unlock(&sched.lock)
+ if spinning {
+ // The caller incremented nmspinning, but there are no idle Ps,
+ // so it's okay to just undo the increment and give up.
+ if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
+ throw("startm: negative nmspinning")
+ }
+ }
+ releasem(mp)
+ return
+ }
+ }
+ nmp := mget()
+ if nmp == nil {
+ // No M is available, we must drop sched.lock and call newm.
+ // However, we already own a P to assign to the M.
+ //
+ // Once sched.lock is released, another G (e.g., in a syscall),
+ // could find no idle P while checkdead finds a runnable G but
+ // no running M's because this new M hasn't started yet, thus
+ // throwing in an apparent deadlock.
+ //
+ // Avoid this situation by pre-allocating the ID for the new M,
+ // thus marking it as 'running' before we drop sched.lock. This
+ // new M will eventually run the scheduler to execute any
+ // queued G's.
+ id := mReserveID()
+ unlock(&sched.lock)
+
+ var fn func()
+ if spinning {
+ // The caller incremented nmspinning, so set m.spinning in the new M.
+ fn = mspinning
+ }
+ newm(fn, _p_, id)
+ // Ownership transfer of _p_ committed by start in newm.
+ // Preemption is now safe.
+ releasem(mp)
+ return
+ }
+ unlock(&sched.lock)
+ if nmp.spinning {
+ throw("startm: m is spinning")
+ }
+ if nmp.nextp != 0 {
+ throw("startm: m has p")
+ }
+ if spinning && !runqempty(_p_) {
+ throw("startm: p has runnable gs")
+ }
+ // The caller incremented nmspinning, so set m.spinning in the new M.
+ nmp.spinning = spinning
+ nmp.nextp.set(_p_)
+ notewakeup(&nmp.park)
+ // Ownership transfer of _p_ committed by wakeup. Preemption is now
+ // safe.
+ releasem(mp)
+}
+
+// Hands off P from syscall or locked M.
+// Always runs without a P, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func handoffp(_p_ *p) {
+ // handoffp must start an M in any situation where
+ // findrunnable would return a G to run on _p_.
+
+ // if it has local work, start it straight away
+ if !runqempty(_p_) || sched.runqsize != 0 {
+ startm(_p_, false)
+ return
+ }
+ // if there's trace work to do, start it straight away
+ if (trace.enabled || trace.shutdown) && traceReaderAvailable() {
+ startm(_p_, false)
+ return
+ }
+ // if it has GC work, start it straight away
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
+ startm(_p_, false)
+ return
+ }
+ // no local work, check that there are no spinning/idle M's,
+ // otherwise our help is not required
+ if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
+ startm(_p_, true)
+ return
+ }
+ lock(&sched.lock)
+ if sched.gcwaiting != 0 {
+ _p_.status = _Pgcstop
+ sched.stopwait--
+ if sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ unlock(&sched.lock)
+ return
+ }
+ if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
+ sched.safePointFn(_p_)
+ sched.safePointWait--
+ if sched.safePointWait == 0 {
+ notewakeup(&sched.safePointNote)
+ }
+ }
+ if sched.runqsize != 0 {
+ unlock(&sched.lock)
+ startm(_p_, false)
+ return
+ }
+ // If this is the last running P and nobody is polling network,
+ // need to wakeup another M to poll network.
+ if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
+ unlock(&sched.lock)
+ startm(_p_, false)
+ return
+ }
+
+ // The scheduler lock cannot be held when calling wakeNetPoller below
+ // because wakeNetPoller may call wakep which may call startm.
+ when := nobarrierWakeTime(_p_)
+ pidleput(_p_, 0)
+ unlock(&sched.lock)
+
+ if when != 0 {
+ wakeNetPoller(when)
+ }
+}
+
+// Tries to add one more P to execute G's.
+// Called when a G is made runnable (newproc, ready).
+func wakep() {
+ if atomic.Load(&sched.npidle) == 0 {
+ return
+ }
+ // be conservative about spinning threads
+ if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
+ return
+ }
+ startm(nil, true)
+}
+
+// Stops execution of the current m that is locked to a g until the g is runnable again.
+// Returns with acquired P.
+func stoplockedm() {
+ _g_ := getg()
+
+ if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
+ throw("stoplockedm: inconsistent locking")
+ }
+ if _g_.m.p != 0 {
+ // Schedule another M to run this p.
+ _p_ := releasep()
+ handoffp(_p_)
+ }
+ incidlelocked(1)
+ // Wait until another thread schedules lockedg again.
+ mPark()
+ status := readgstatus(_g_.m.lockedg.ptr())
+ if status&^_Gscan != _Grunnable {
+ print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
+ dumpgstatus(_g_.m.lockedg.ptr())
+ throw("stoplockedm: not runnable")
+ }
+ acquirep(_g_.m.nextp.ptr())
+ _g_.m.nextp = 0
+}
+
+// Schedules the locked m to run the locked gp.
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func startlockedm(gp *g) {
+ _g_ := getg()
+
+ mp := gp.lockedm.ptr()
+ if mp == _g_.m {
+ throw("startlockedm: locked to me")
+ }
+ if mp.nextp != 0 {
+ throw("startlockedm: m has p")
+ }
+ // directly handoff current P to the locked m
+ incidlelocked(-1)
+ _p_ := releasep()
+ mp.nextp.set(_p_)
+ notewakeup(&mp.park)
+ stopm()
+}
+
+// Stops the current m for stopTheWorld.
+// Returns when the world is restarted.
+func gcstopm() {
+ _g_ := getg()
+
+ if sched.gcwaiting == 0 {
+ throw("gcstopm: not waiting for gc")
+ }
+ if _g_.m.spinning {
+ _g_.m.spinning = false
+ // OK to just drop nmspinning here,
+ // startTheWorld will unpark threads as necessary.
+ if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
+ throw("gcstopm: negative nmspinning")
+ }
+ }
+ _p_ := releasep()
+ lock(&sched.lock)
+ _p_.status = _Pgcstop
+ sched.stopwait--
+ if sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ unlock(&sched.lock)
+ stopm()
+}
+
+// Schedules gp to run on the current M.
+// If inheritTime is true, gp inherits the remaining time in the
+// current time slice. Otherwise, it starts a new time slice.
+// Never returns.
+//
+// Write barriers are allowed because this is called immediately after
+// acquiring a P in several places.
+//
+//go:yeswritebarrierrec
+func execute(gp *g, inheritTime bool) {
+ _g_ := getg()
+
+ if goroutineProfile.active {
+ // Make sure that gp has had its stack written out to the goroutine
+ // profile, exactly as it was when the goroutine profiler first stopped
+ // the world.
+ tryRecordGoroutineProfile(gp, osyield)
+ }
+
+ // Assign gp.m before entering _Grunning so running Gs have an
+ // M.
+ _g_.m.curg = gp
+ gp.m = _g_.m
+ casgstatus(gp, _Grunnable, _Grunning)
+ gp.waitsince = 0
+ gp.preempt = false
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ if !inheritTime {
+ _g_.m.p.ptr().schedtick++
+ }
+
+ // Check whether the profiler needs to be turned on or off.
+ hz := sched.profilehz
+ if _g_.m.profilehz != hz {
+ setThreadCPUProfiler(hz)
+ }
+
+ if trace.enabled {
+ // GoSysExit has to happen when we have a P, but before GoStart.
+ // So we emit it here.
+ if gp.syscallsp != 0 && gp.sysblocktraced {
+ traceGoSysExit(gp.sysexitticks)
+ }
+ traceGoStart()
+ }
+
+ gogo(&gp.sched)
+}
+
+// Finds a runnable goroutine to execute.
+// Tries to steal from other P's, get g from local or global queue, poll network.
+// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
+// reader) so the caller should try to wake a P.
+func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
+ _g_ := getg()
+
+ // The conditions here and in handoffp must agree: if
+ // findrunnable would return a G to run, handoffp must start
+ // an M.
+
+top:
+ _p_ := _g_.m.p.ptr()
+ if sched.gcwaiting != 0 {
+ gcstopm()
+ goto top
+ }
+ if _p_.runSafePointFn != 0 {
+ runSafePointFn()
+ }
+
+ // now and pollUntil are saved for work stealing later,
+ // which may steal timers. It's important that between now
+ // and then, nothing blocks, so these numbers remain mostly
+ // relevant.
+ now, pollUntil, _ := checkTimers(_p_, 0)
+
+ // Try to schedule the trace reader.
+ if trace.enabled || trace.shutdown {
+ gp = traceReader()
+ if gp != nil {
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ traceGoUnpark(gp, 0)
+ return gp, false, true
+ }
+ }
+
+ // Try to schedule a GC worker.
+ if gcBlackenEnabled != 0 {
+ gp, now = gcController.findRunnableGCWorker(_p_, now)
+ if gp != nil {
+ return gp, false, true
+ }
+ }
+
+ // Check the global runnable queue once in a while to ensure fairness.
+ // Otherwise two goroutines can completely occupy the local runqueue
+ // by constantly respawning each other.
+ if _p_.schedtick%61 == 0 && sched.runqsize > 0 {
+ lock(&sched.lock)
+ gp = globrunqget(_p_, 1)
+ unlock(&sched.lock)
+ if gp != nil {
+ return gp, false, false
+ }
+ }
+
+ // Wake up the finalizer G.
+ if fingwait && fingwake {
+ if gp := wakefing(); gp != nil {
+ ready(gp, 0, true)
+ }
+ }
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
+
+ // local runq
+ if gp, inheritTime := runqget(_p_); gp != nil {
+ return gp, inheritTime, false
+ }
+
+ // global runq
+ if sched.runqsize != 0 {
+ lock(&sched.lock)
+ gp := globrunqget(_p_, 0)
+ unlock(&sched.lock)
+ if gp != nil {
+ return gp, false, false
+ }
+ }
+
+ // Poll network.
+ // This netpoll is only an optimization before we resort to stealing.
+ // We can safely skip it if there are no waiters or a thread is blocked
+ // in netpoll already. If there is any kind of logical race with that
+ // blocked thread (e.g. it has already returned from netpoll, but does
+ // not set lastpoll yet), this thread will do blocking netpoll below
+ // anyway.
+ if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
+ if list := netpoll(0); !list.empty() { // non-blocking
+ gp := list.pop()
+ injectglist(&list)
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false, false
+ }
+ }
+
+ // Spinning Ms: steal work from other Ps.
+ //
+ // Limit the number of spinning Ms to half the number of busy Ps.
+ // This is necessary to prevent excessive CPU consumption when
+ // GOMAXPROCS>>1 but the program parallelism is low.
+ procs := uint32(gomaxprocs)
+ if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
+ if !_g_.m.spinning {
+ _g_.m.spinning = true
+ atomic.Xadd(&sched.nmspinning, 1)
+ }
+
+ gp, inheritTime, tnow, w, newWork := stealWork(now)
+ now = tnow
+ if gp != nil {
+ // Successfully stole.
+ return gp, inheritTime, false
+ }
+ if newWork {
+ // There may be new timer or GC work; restart to
+ // discover.
+ goto top
+ }
+ if w != 0 && (pollUntil == 0 || w < pollUntil) {
+ // Earlier timer to wait for.
+ pollUntil = w
+ }
+ }
+
+ // We have nothing to do.
+ //
+ // If we're in the GC mark phase, can safely scan and blacken objects,
+ // and have work to do, run idle-time marking rather than give up the P.
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) && gcController.addIdleMarkWorker() {
+ node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
+ if node != nil {
+ _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ gp := node.gp.ptr()
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false, false
+ }
+ gcController.removeIdleMarkWorker()
+ }
+
+ // wasm only:
+ // If a callback returned and no other goroutine is awake,
+ // then wake event handler goroutine which pauses execution
+ // until a callback was triggered.
+ gp, otherReady := beforeIdle(now, pollUntil)
+ if gp != nil {
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false, false
+ }
+ if otherReady {
+ goto top
+ }
+
+ // Before we drop our P, make a snapshot of the allp slice,
+ // which can change underfoot once we no longer block
+ // safe-points. We don't need to snapshot the contents because
+ // everything up to cap(allp) is immutable.
+ allpSnapshot := allp
+ // Also snapshot masks. Value changes are OK, but we can't allow
+ // len to change out from under us.
+ idlepMaskSnapshot := idlepMask
+ timerpMaskSnapshot := timerpMask
+
+ // return P and block
+ lock(&sched.lock)
+ if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
+ unlock(&sched.lock)
+ goto top
+ }
+ if sched.runqsize != 0 {
+ gp := globrunqget(_p_, 0)
+ unlock(&sched.lock)
+ return gp, false, false
+ }
+ if releasep() != _p_ {
+ throw("findrunnable: wrong p")
+ }
+ now = pidleput(_p_, now)
+ unlock(&sched.lock)
+
+ // Delicate dance: thread transitions from spinning to non-spinning
+ // state, potentially concurrently with submission of new work. We must
+ // drop nmspinning first and then check all sources again (with
+ // #StoreLoad memory barrier in between). If we do it the other way
+ // around, another thread can submit work after we've checked all
+ // sources but before we drop nmspinning; as a result nobody will
+ // unpark a thread to run the work.
+ //
+ // This applies to the following sources of work:
+ //
+ // * Goroutines added to a per-P run queue.
+ // * New/modified-earlier timers on a per-P timer heap.
+ // * Idle-priority GC work (barring golang.org/issue/19112).
+ //
+ // If we discover new work below, we need to restore m.spinning as a signal
+ // for resetspinning to unpark a new worker thread (because there can be more
+ // than one starving goroutine). However, if after discovering new work
+ // we also observe no idle Ps it is OK to skip unparking a new worker
+ // thread: the system is fully loaded so no spinning threads are required.
+ // Also see "Worker thread parking/unparking" comment at the top of the file.
+ wasSpinning := _g_.m.spinning
+ if _g_.m.spinning {
+ _g_.m.spinning = false
+ if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
+ throw("findrunnable: negative nmspinning")
+ }
+
+ // Note the for correctness, only the last M transitioning from
+ // spinning to non-spinning must perform these rechecks to
+ // ensure no missed work. We are performing it on every M that
+ // transitions as a conservative change to monitor effects on
+ // latency. See golang.org/issue/43997.
+
+ // Check all runqueues once again.
+ _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
+ if _p_ != nil {
+ acquirep(_p_)
+ _g_.m.spinning = true
+ atomic.Xadd(&sched.nmspinning, 1)
+ goto top
+ }
+
+ // Check for idle-priority GC work again.
+ _p_, gp = checkIdleGCNoP()
+ if _p_ != nil {
+ acquirep(_p_)
+ _g_.m.spinning = true
+ atomic.Xadd(&sched.nmspinning, 1)
+
+ // Run the idle worker.
+ _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false, false
+ }
+
+ // Finally, check for timer creation or expiry concurrently with
+ // transitioning from spinning to non-spinning.
+ //
+ // Note that we cannot use checkTimers here because it calls
+ // adjusttimers which may need to allocate memory, and that isn't
+ // allowed when we don't have an active P.
+ pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
+ }
+
+ // Poll network until next timer.
+ if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
+ atomic.Store64(&sched.pollUntil, uint64(pollUntil))
+ if _g_.m.p != 0 {
+ throw("findrunnable: netpoll with p")
+ }
+ if _g_.m.spinning {
+ throw("findrunnable: netpoll with spinning")
+ }
+ // Refresh now.
+ now = nanotime()
+ delay := int64(-1)
+ if pollUntil != 0 {
+ delay = pollUntil - now
+ if delay < 0 {
+ delay = 0
+ }
+ }
+ if faketime != 0 {
+ // When using fake time, just poll.
+ delay = 0
+ }
+ list := netpoll(delay) // block until new work is available
+ atomic.Store64(&sched.pollUntil, 0)
+ atomic.Store64(&sched.lastpoll, uint64(now))
+ if faketime != 0 && list.empty() {
+ // Using fake time and nothing is ready; stop M.
+ // When all M's stop, checkdead will call timejump.
+ stopm()
+ goto top
+ }
+ lock(&sched.lock)
+ _p_, _ = pidleget(now)
+ unlock(&sched.lock)
+ if _p_ == nil {
+ injectglist(&list)
+ } else {
+ acquirep(_p_)
+ if !list.empty() {
+ gp := list.pop()
+ injectglist(&list)
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ if trace.enabled {
+ traceGoUnpark(gp, 0)
+ }
+ return gp, false, false
+ }
+ if wasSpinning {
+ _g_.m.spinning = true
+ atomic.Xadd(&sched.nmspinning, 1)
+ }
+ goto top
+ }
+ } else if pollUntil != 0 && netpollinited() {
+ pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
+ if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
+ netpollBreak()
+ }
+ }
+ stopm()
+ goto top
+}
+
+// pollWork reports whether there is non-background work this P could
+// be doing. This is a fairly lightweight check to be used for
+// background work loops, like idle GC. It checks a subset of the
+// conditions checked by the actual scheduler.
+func pollWork() bool {
+ if sched.runqsize != 0 {
+ return true
+ }
+ p := getg().m.p.ptr()
+ if !runqempty(p) {
+ return true
+ }
+ if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
+ if list := netpoll(0); !list.empty() {
+ injectglist(&list)
+ return true
+ }
+ }
+ return false
+}
+
+// stealWork attempts to steal a runnable goroutine or timer from any P.
+//
+// If newWork is true, new work may have been readied.
+//
+// If now is not 0 it is the current time. stealWork returns the passed time or
+// the current time if now was passed as 0.
+func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
+ pp := getg().m.p.ptr()
+
+ ranTimer := false
+
+ const stealTries = 4
+ for i := 0; i < stealTries; i++ {
+ stealTimersOrRunNextG := i == stealTries-1
+
+ for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
+ if sched.gcwaiting != 0 {
+ // GC work may be available.
+ return nil, false, now, pollUntil, true
+ }
+ p2 := allp[enum.position()]
+ if pp == p2 {
+ continue
+ }
+
+ // Steal timers from p2. This call to checkTimers is the only place
+ // where we might hold a lock on a different P's timers. We do this
+ // once on the last pass before checking runnext because stealing
+ // from the other P's runnext should be the last resort, so if there
+ // are timers to steal do that first.
+ //
+ // We only check timers on one of the stealing iterations because
+ // the time stored in now doesn't change in this loop and checking
+ // the timers for each P more than once with the same value of now
+ // is probably a waste of time.
+ //
+ // timerpMask tells us whether the P may have timers at all. If it
+ // can't, no need to check at all.
+ if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
+ tnow, w, ran := checkTimers(p2, now)
+ now = tnow
+ if w != 0 && (pollUntil == 0 || w < pollUntil) {
+ pollUntil = w
+ }
+ if ran {
+ // Running the timers may have
+ // made an arbitrary number of G's
+ // ready and added them to this P's
+ // local run queue. That invalidates
+ // the assumption of runqsteal
+ // that it always has room to add
+ // stolen G's. So check now if there
+ // is a local G to run.
+ if gp, inheritTime := runqget(pp); gp != nil {
+ return gp, inheritTime, now, pollUntil, ranTimer
+ }
+ ranTimer = true
+ }
+ }
+
+ // Don't bother to attempt to steal if p2 is idle.
+ if !idlepMask.read(enum.position()) {
+ if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
+ return gp, false, now, pollUntil, ranTimer
+ }
+ }
+ }
+ }
+
+ // No goroutines found to steal. Regardless, running a timer may have
+ // made some goroutine ready that we missed. Indicate the next timer to
+ // wait for.
+ return nil, false, now, pollUntil, ranTimer
+}
+
+// Check all Ps for a runnable G to steal.
+//
+// On entry we have no P. If a G is available to steal and a P is available,
+// the P is returned which the caller should acquire and attempt to steal the
+// work to.
+func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
+ for id, p2 := range allpSnapshot {
+ if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
+ lock(&sched.lock)
+ pp, _ := pidleget(0)
+ unlock(&sched.lock)
+ if pp != nil {
+ return pp
+ }
+
+ // Can't get a P, don't bother checking remaining Ps.
+ break
+ }
+ }
+
+ return nil
+}
+
+// Check all Ps for a timer expiring sooner than pollUntil.
+//
+// Returns updated pollUntil value.
+func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
+ for id, p2 := range allpSnapshot {
+ if timerpMaskSnapshot.read(uint32(id)) {
+ w := nobarrierWakeTime(p2)
+ if w != 0 && (pollUntil == 0 || w < pollUntil) {
+ pollUntil = w
+ }
+ }
+ }
+
+ return pollUntil
+}
+
+// Check for idle-priority GC, without a P on entry.
+//
+// If some GC work, a P, and a worker G are all available, the P and G will be
+// returned. The returned P has not been wired yet.
+func checkIdleGCNoP() (*p, *g) {
+ // N.B. Since we have no P, gcBlackenEnabled may change at any time; we
+ // must check again after acquiring a P. As an optimization, we also check
+ // if an idle mark worker is needed at all. This is OK here, because if we
+ // observe that one isn't needed, at least one is currently running. Even if
+ // it stops running, its own journey into the scheduler should schedule it
+ // again, if need be (at which point, this check will pass, if relevant).
+ if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
+ return nil, nil
+ }
+ if !gcMarkWorkAvailable(nil) {
+ return nil, nil
+ }
+
+ // Work is available; we can start an idle GC worker only if there is
+ // an available P and available worker G.
+ //
+ // We can attempt to acquire these in either order, though both have
+ // synchronization concerns (see below). Workers are almost always
+ // available (see comment in findRunnableGCWorker for the one case
+ // there may be none). Since we're slightly less likely to find a P,
+ // check for that first.
+ //
+ // Synchronization: note that we must hold sched.lock until we are
+ // committed to keeping it. Otherwise we cannot put the unnecessary P
+ // back in sched.pidle without performing the full set of idle
+ // transition checks.
+ //
+ // If we were to check gcBgMarkWorkerPool first, we must somehow handle
+ // the assumption in gcControllerState.findRunnableGCWorker that an
+ // empty gcBgMarkWorkerPool is only possible if gcMarkDone is running.
+ lock(&sched.lock)
+ pp, now := pidleget(0)
+ if pp == nil {
+ unlock(&sched.lock)
+ return nil, nil
+ }
+
+ // Now that we own a P, gcBlackenEnabled can't change (as it requires STW).
+ if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
+ pidleput(pp, now)
+ unlock(&sched.lock)
+ return nil, nil
+ }
+
+ node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
+ if node == nil {
+ pidleput(pp, now)
+ unlock(&sched.lock)
+ gcController.removeIdleMarkWorker()
+ return nil, nil
+ }
+
+ unlock(&sched.lock)
+
+ return pp, node.gp.ptr()
+}
+
+// wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
+// going to wake up before the when argument; or it wakes an idle P to service
+// timers and the network poller if there isn't one already.
+func wakeNetPoller(when int64) {
+ if atomic.Load64(&sched.lastpoll) == 0 {
+ // In findrunnable we ensure that when polling the pollUntil
+ // field is either zero or the time to which the current
+ // poll is expected to run. This can have a spurious wakeup
+ // but should never miss a wakeup.
+ pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
+ if pollerPollUntil == 0 || pollerPollUntil > when {
+ netpollBreak()
+ }
+ } else {
+ // There are no threads in the network poller, try to get
+ // one there so it can handle new timers.
+ if GOOS != "plan9" { // Temporary workaround - see issue #42303.
+ wakep()
+ }
+ }
+}
+
+func resetspinning() {
+ _g_ := getg()
+ if !_g_.m.spinning {
+ throw("resetspinning: not a spinning m")
+ }
+ _g_.m.spinning = false
+ nmspinning := atomic.Xadd(&sched.nmspinning, -1)
+ if int32(nmspinning) < 0 {
+ throw("findrunnable: negative nmspinning")
+ }
+ // M wakeup policy is deliberately somewhat conservative, so check if we
+ // need to wakeup another P here. See "Worker thread parking/unparking"
+ // comment at the top of the file for details.
+ wakep()
+}
+
+// injectglist adds each runnable G on the list to some run queue,
+// and clears glist. If there is no current P, they are added to the
+// global queue, and up to npidle M's are started to run them.
+// Otherwise, for each idle P, this adds a G to the global queue
+// and starts an M. Any remaining G's are added to the current P's
+// local run queue.
+// This may temporarily acquire sched.lock.
+// Can run concurrently with GC.
+func injectglist(glist *gList) {
+ if glist.empty() {
+ return
+ }
+ if trace.enabled {
+ for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
+ traceGoUnpark(gp, 0)
+ }
+ }
+
+ // Mark all the goroutines as runnable before we put them
+ // on the run queues.
+ head := glist.head.ptr()
+ var tail *g
+ qsize := 0
+ for gp := head; gp != nil; gp = gp.schedlink.ptr() {
+ tail = gp
+ qsize++
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ }
+
+ // Turn the gList into a gQueue.
+ var q gQueue
+ q.head.set(head)
+ q.tail.set(tail)
+ *glist = gList{}
+
+ startIdle := func(n int) {
+ for ; n != 0 && sched.npidle != 0; n-- {
+ startm(nil, false)
+ }
+ }
+
+ pp := getg().m.p.ptr()
+ if pp == nil {
+ lock(&sched.lock)
+ globrunqputbatch(&q, int32(qsize))
+ unlock(&sched.lock)
+ startIdle(qsize)
+ return
+ }
+
+ npidle := int(atomic.Load(&sched.npidle))
+ var globq gQueue
+ var n int
+ for n = 0; n < npidle && !q.empty(); n++ {
+ g := q.pop()
+ globq.pushBack(g)
+ }
+ if n > 0 {
+ lock(&sched.lock)
+ globrunqputbatch(&globq, int32(n))
+ unlock(&sched.lock)
+ startIdle(n)
+ qsize -= n
+ }
+
+ if !q.empty() {
+ runqputbatch(pp, &q, qsize)
+ }
+}
+
+// One round of scheduler: find a runnable goroutine and execute it.
+// Never returns.
+func schedule() {
+ _g_ := getg()
+
+ if _g_.m.locks != 0 {
+ throw("schedule: holding locks")
+ }
+
+ if _g_.m.lockedg != 0 {
+ stoplockedm()
+ execute(_g_.m.lockedg.ptr(), false) // Never returns.
+ }
+
+ // We should not schedule away from a g that is executing a cgo call,
+ // since the cgo call is using the m's g0 stack.
+ if _g_.m.incgo {
+ throw("schedule: in cgo")
+ }
+
+top:
+ pp := _g_.m.p.ptr()
+ pp.preempt = false
+
+ // Safety check: if we are spinning, the run queue should be empty.
+ // Check this before calling checkTimers, as that might call
+ // goready to put a ready goroutine on the local run queue.
+ if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
+ throw("schedule: spinning with local work")
+ }
+
+ gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available
+
+ // This thread is going to run a goroutine and is not spinning anymore,
+ // so if it was marked as spinning we need to reset it now and potentially
+ // start a new spinning M.
+ if _g_.m.spinning {
+ resetspinning()
+ }
+
+ if sched.disable.user && !schedEnabled(gp) {
+ // Scheduling of this goroutine is disabled. Put it on
+ // the list of pending runnable goroutines for when we
+ // re-enable user scheduling and look again.
+ lock(&sched.lock)
+ if schedEnabled(gp) {
+ // Something re-enabled scheduling while we
+ // were acquiring the lock.
+ unlock(&sched.lock)
+ } else {
+ sched.disable.runnable.pushBack(gp)
+ sched.disable.n++
+ unlock(&sched.lock)
+ goto top
+ }
+ }
+
+ // If about to schedule a not-normal goroutine (a GCworker or tracereader),
+ // wake a P if there is one.
+ if tryWakeP {
+ wakep()
+ }
+ if gp.lockedm != 0 {
+ // Hands off own p to the locked m,
+ // then blocks waiting for a new p.
+ startlockedm(gp)
+ goto top
+ }
+
+ execute(gp, inheritTime)
+}
+
+// dropg removes the association between m and the current goroutine m->curg (gp for short).
+// Typically a caller sets gp's status away from Grunning and then
+// immediately calls dropg to finish the job. The caller is also responsible
+// for arranging that gp will be restarted using ready at an
+// appropriate time. After calling dropg and arranging for gp to be
+// readied later, the caller can do other work but eventually should
+// call schedule to restart the scheduling of goroutines on this m.
+func dropg() {
+ _g_ := getg()
+
+ setMNoWB(&_g_.m.curg.m, nil)
+ setGNoWB(&_g_.m.curg, nil)
+}
+
+// checkTimers runs any timers for the P that are ready.
+// If now is not 0 it is the current time.
+// It returns the passed time or the current time if now was passed as 0.
+// and the time when the next timer should run or 0 if there is no next timer,
+// and reports whether it ran any timers.
+// If the time when the next timer should run is not 0,
+// it is always larger than the returned time.
+// We pass now in and out to avoid extra calls of nanotime.
+//
+//go:yeswritebarrierrec
+func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
+ // If it's not yet time for the first timer, or the first adjusted
+ // timer, then there is nothing to do.
+ next := int64(atomic.Load64(&pp.timer0When))
+ nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
+ if next == 0 || (nextAdj != 0 && nextAdj < next) {
+ next = nextAdj
+ }
+
+ if next == 0 {
+ // No timers to run or adjust.
+ return now, 0, false
+ }
+
+ if now == 0 {
+ now = nanotime()
+ }
+ if now < next {
+ // Next timer is not ready to run, but keep going
+ // if we would clear deleted timers.
+ // This corresponds to the condition below where
+ // we decide whether to call clearDeletedTimers.
+ if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
+ return now, next, false
+ }
+ }
+
+ lock(&pp.timersLock)
+
+ if len(pp.timers) > 0 {
+ adjusttimers(pp, now)
+ for len(pp.timers) > 0 {
+ // Note that runtimer may temporarily unlock
+ // pp.timersLock.
+ if tw := runtimer(pp, now); tw != 0 {
+ if tw > 0 {
+ pollUntil = tw
+ }
+ break
+ }
+ ran = true
+ }
+ }
+
+ // If this is the local P, and there are a lot of deleted timers,
+ // clear them out. We only do this for the local P to reduce
+ // lock contention on timersLock.
+ if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
+ clearDeletedTimers(pp)
+ }
+
+ unlock(&pp.timersLock)
+
+ return now, pollUntil, ran
+}
+
+func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
+ unlock((*mutex)(lock))
+ return true
+}
+
+// park continuation on g0.
+func park_m(gp *g) {
+ _g_ := getg()
+
+ if trace.enabled {
+ traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
+ }
+
+ casgstatus(gp, _Grunning, _Gwaiting)
+ dropg()
+
+ if fn := _g_.m.waitunlockf; fn != nil {
+ ok := fn(gp, _g_.m.waitlock)
+ _g_.m.waitunlockf = nil
+ _g_.m.waitlock = nil
+ if !ok {
+ if trace.enabled {
+ traceGoUnpark(gp, 2)
+ }
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ execute(gp, true) // Schedule it back, never returns.
+ }
+ }
+ schedule()
+}
+
+func goschedImpl(gp *g) {
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning {
+ dumpgstatus(gp)
+ throw("bad g status")
+ }
+ casgstatus(gp, _Grunning, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ globrunqput(gp)
+ unlock(&sched.lock)
+
+ schedule()
+}
+
+// Gosched continuation on g0.
+func gosched_m(gp *g) {
+ if trace.enabled {
+ traceGoSched()
+ }
+ goschedImpl(gp)
+}
+
+// goschedguarded is a forbidden-states-avoided version of gosched_m
+func goschedguarded_m(gp *g) {
+
+ if !canPreemptM(gp.m) {
+ gogo(&gp.sched) // never return
+ }
+
+ if trace.enabled {
+ traceGoSched()
+ }
+ goschedImpl(gp)
+}
+
+func gopreempt_m(gp *g) {
+ if trace.enabled {
+ traceGoPreempt()
+ }
+ goschedImpl(gp)
+}
+
+// preemptPark parks gp and puts it in _Gpreempted.
+//
+//go:systemstack
+func preemptPark(gp *g) {
+ if trace.enabled {
+ traceGoPark(traceEvGoBlock, 0)
+ }
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning {
+ dumpgstatus(gp)
+ throw("bad g status")
+ }
+ gp.waitreason = waitReasonPreempted
+
+ if gp.asyncSafePoint {
+ // Double-check that async preemption does not
+ // happen in SPWRITE assembly functions.
+ // isAsyncSafePoint must exclude this case.
+ f := findfunc(gp.sched.pc)
+ if !f.valid() {
+ throw("preempt at unknown pc")
+ }
+ if f.flag&funcFlag_SPWRITE != 0 {
+ println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
+ throw("preempt SPWRITE")
+ }
+ }
+
+ // Transition from _Grunning to _Gscan|_Gpreempted. We can't
+ // be in _Grunning when we dropg because then we'd be running
+ // without an M, but the moment we're in _Gpreempted,
+ // something could claim this G before we've fully cleaned it
+ // up. Hence, we set the scan bit to lock down further
+ // transitions until we can dropg.
+ casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
+ dropg()
+ casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
+ schedule()
+}
+
+// goyield is like Gosched, but it:
+// - emits a GoPreempt trace event instead of a GoSched trace event
+// - puts the current G on the runq of the current P instead of the globrunq
+func goyield() {
+ checkTimeouts()
+ mcall(goyield_m)
+}
+
+func goyield_m(gp *g) {
+ if trace.enabled {
+ traceGoPreempt()
+ }
+ pp := gp.m.p.ptr()
+ casgstatus(gp, _Grunning, _Grunnable)
+ dropg()
+ runqput(pp, gp, false)
+ schedule()
+}
+
+// Finishes execution of the current goroutine.
+func goexit1() {
+ if raceenabled {
+ racegoend()
+ }
+ if trace.enabled {
+ traceGoEnd()
+ }
+ mcall(goexit0)
+}
+
+// goexit continuation on g0.
+func goexit0(gp *g) {
+ _g_ := getg()
+ _p_ := _g_.m.p.ptr()
+
+ casgstatus(gp, _Grunning, _Gdead)
+ gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
+ if isSystemGoroutine(gp, false) {
+ atomic.Xadd(&sched.ngsys, -1)
+ }
+ gp.m = nil
+ locked := gp.lockedm != 0
+ gp.lockedm = 0
+ _g_.m.lockedg = 0
+ gp.preemptStop = false
+ gp.paniconfault = false
+ gp._defer = nil // should be true already but just in case.
+ gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
+ gp.writebuf = nil
+ gp.waitreason = 0
+ gp.param = nil
+ gp.labels = nil
+ gp.timer = nil
+
+ if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
+ // Flush assist credit to the global pool. This gives
+ // better information to pacing if the application is
+ // rapidly creating an exiting goroutines.
+ assistWorkPerByte := gcController.assistWorkPerByte.Load()
+ scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
+ atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
+ gp.gcAssistBytes = 0
+ }
+
+ dropg()
+
+ if GOARCH == "wasm" { // no threads yet on wasm
+ gfput(_p_, gp)
+ schedule() // never returns
+ }
+
+ if _g_.m.lockedInt != 0 {
+ print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
+ throw("internal lockOSThread error")
+ }
+ gfput(_p_, gp)
+ if locked {
+ // The goroutine may have locked this thread because
+ // it put it in an unusual kernel state. Kill it
+ // rather than returning it to the thread pool.
+
+ // Return to mstart, which will release the P and exit
+ // the thread.
+ if GOOS != "plan9" { // See golang.org/issue/22227.
+ gogo(&_g_.m.g0.sched)
+ } else {
+ // Clear lockedExt on plan9 since we may end up re-using
+ // this thread.
+ _g_.m.lockedExt = 0
+ }
+ }
+ schedule()
+}
+
+// save updates getg().sched to refer to pc and sp so that a following
+// gogo will restore pc and sp.
+//
+// save must not have write barriers because invoking a write barrier
+// can clobber getg().sched.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func save(pc, sp uintptr) {
+ _g_ := getg()
+
+ if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
+ // m.g0.sched is special and must describe the context
+ // for exiting the thread. mstart1 writes to it directly.
+ // m.gsignal.sched should not be used at all.
+ // This check makes sure save calls do not accidentally
+ // run in contexts where they'd write to system g's.
+ throw("save on system g not allowed")
+ }
+
+ _g_.sched.pc = pc
+ _g_.sched.sp = sp
+ _g_.sched.lr = 0
+ _g_.sched.ret = 0
+ // We need to ensure ctxt is zero, but can't have a write
+ // barrier here. However, it should always already be zero.
+ // Assert that.
+ if _g_.sched.ctxt != nil {
+ badctxt()
+ }
+}
+
+// The goroutine g is about to enter a system call.
+// Record that it's not using the cpu anymore.
+// This is called only from the go syscall library and cgocall,
+// not from the low-level system calls used by the runtime.
+//
+// Entersyscall cannot split the stack: the save must
+// make g->sched refer to the caller's stack segment, because
+// entersyscall is going to return immediately after.
+//
+// Nothing entersyscall calls can split the stack either.
+// We cannot safely move the stack during an active call to syscall,
+// because we do not know which of the uintptr arguments are
+// really pointers (back into the stack).
+// In practice, this means that we make the fast path run through
+// entersyscall doing no-split things, and the slow path has to use systemstack
+// to run bigger things on the system stack.
+//
+// reentersyscall is the entry point used by cgo callbacks, where explicitly
+// saved SP and PC are restored. This is needed when exitsyscall will be called
+// from a function further up in the call stack than the parent, as g->syscallsp
+// must always point to a valid stack frame. entersyscall below is the normal
+// entry point for syscalls, which obtains the SP and PC from the caller.
+//
+// Syscall tracing:
+// At the start of a syscall we emit traceGoSysCall to capture the stack trace.
+// If the syscall does not block, that is it, we do not emit any other events.
+// If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
+// when syscall returns we emit traceGoSysExit and when the goroutine starts running
+// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
+// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
+// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
+// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
+// and we wait for the increment before emitting traceGoSysExit.
+// Note that the increment is done even if tracing is not enabled,
+// because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
+//
+//go:nosplit
+func reentersyscall(pc, sp uintptr) {
+ _g_ := getg()
+
+ // Disable preemption because during this function g is in Gsyscall status,
+ // but can have inconsistent g->sched, do not let GC observe it.
+ _g_.m.locks++
+
+ // Entersyscall must not call any function that might split/grow the stack.
+ // (See details in comment above.)
+ // Catch calls that might, by replacing the stack guard with something that
+ // will trip any stack check and leaving a flag to tell newstack to die.
+ _g_.stackguard0 = stackPreempt
+ _g_.throwsplit = true
+
+ // Leave SP around for GC and traceback.
+ save(pc, sp)
+ _g_.syscallsp = sp
+ _g_.syscallpc = pc
+ casgstatus(_g_, _Grunning, _Gsyscall)
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ systemstack(func() {
+ print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ throw("entersyscall")
+ })
+ }
+
+ if trace.enabled {
+ systemstack(traceGoSysCall)
+ // systemstack itself clobbers g.sched.{pc,sp} and we might
+ // need them later when the G is genuinely blocked in a
+ // syscall
+ save(pc, sp)
+ }
+
+ if atomic.Load(&sched.sysmonwait) != 0 {
+ systemstack(entersyscall_sysmon)
+ save(pc, sp)
+ }
+
+ if _g_.m.p.ptr().runSafePointFn != 0 {
+ // runSafePointFn may stack split if run on this stack
+ systemstack(runSafePointFn)
+ save(pc, sp)
+ }
+
+ _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
+ _g_.sysblocktraced = true
+ pp := _g_.m.p.ptr()
+ pp.m = 0
+ _g_.m.oldp.set(pp)
+ _g_.m.p = 0
+ atomic.Store(&pp.status, _Psyscall)
+ if sched.gcwaiting != 0 {
+ systemstack(entersyscall_gcwait)
+ save(pc, sp)
+ }
+
+ _g_.m.locks--
+}
+
+// Standard syscall entry used by the go syscall library and normal cgo calls.
+//
+// This is exported via linkname to assembly in the syscall package and x/sys.
+//
+//go:nosplit
+//go:linkname entersyscall
+func entersyscall() {
+ reentersyscall(getcallerpc(), getcallersp())
+}
+
+func entersyscall_sysmon() {
+ lock(&sched.lock)
+ if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+}
+
+func entersyscall_gcwait() {
+ _g_ := getg()
+ _p_ := _g_.m.oldp.ptr()
+
+ lock(&sched.lock)
+ if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
+ if trace.enabled {
+ traceGoSysBlock(_p_)
+ traceProcStop(_p_)
+ }
+ _p_.syscalltick++
+ if sched.stopwait--; sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ }
+ unlock(&sched.lock)
+}
+
+// The same as entersyscall(), but with a hint that the syscall is blocking.
+//
+//go:nosplit
+func entersyscallblock() {
+ _g_ := getg()
+
+ _g_.m.locks++ // see comment in entersyscall
+ _g_.throwsplit = true
+ _g_.stackguard0 = stackPreempt // see comment in entersyscall
+ _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
+ _g_.sysblocktraced = true
+ _g_.m.p.ptr().syscalltick++
+
+ // Leave SP around for GC and traceback.
+ pc := getcallerpc()
+ sp := getcallersp()
+ save(pc, sp)
+ _g_.syscallsp = _g_.sched.sp
+ _g_.syscallpc = _g_.sched.pc
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ sp1 := sp
+ sp2 := _g_.sched.sp
+ sp3 := _g_.syscallsp
+ systemstack(func() {
+ print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ throw("entersyscallblock")
+ })
+ }
+ casgstatus(_g_, _Grunning, _Gsyscall)
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ systemstack(func() {
+ print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ throw("entersyscallblock")
+ })
+ }
+
+ systemstack(entersyscallblock_handoff)
+
+ // Resave for traceback during blocked call.
+ save(getcallerpc(), getcallersp())
+
+ _g_.m.locks--
+}
+
+func entersyscallblock_handoff() {
+ if trace.enabled {
+ traceGoSysCall()
+ traceGoSysBlock(getg().m.p.ptr())
+ }
+ handoffp(releasep())
+}
+
+// The goroutine g exited its system call.
+// Arrange for it to run on a cpu again.
+// This is called only from the go syscall library, not
+// from the low-level system calls used by the runtime.
+//
+// Write barriers are not allowed because our P may have been stolen.
+//
+// This is exported via linkname to assembly in the syscall package.
+//
+//go:nosplit
+//go:nowritebarrierrec
+//go:linkname exitsyscall
+func exitsyscall() {
+ _g_ := getg()
+
+ _g_.m.locks++ // see comment in entersyscall
+ if getcallersp() > _g_.syscallsp {
+ throw("exitsyscall: syscall frame is no longer valid")
+ }
+
+ _g_.waitsince = 0
+ oldp := _g_.m.oldp.ptr()
+ _g_.m.oldp = 0
+ if exitsyscallfast(oldp) {
+ // When exitsyscallfast returns success, we have a P so can now use
+ // write barriers
+ if goroutineProfile.active {
+ // Make sure that gp has had its stack written out to the goroutine
+ // profile, exactly as it was when the goroutine profiler first
+ // stopped the world.
+ systemstack(func() {
+ tryRecordGoroutineProfileWB(_g_)
+ })
+ }
+ if trace.enabled {
+ if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ systemstack(traceGoStart)
+ }
+ }
+ // There's a cpu for us, so we can run.
+ _g_.m.p.ptr().syscalltick++
+ // We need to cas the status and scan before resuming...
+ casgstatus(_g_, _Gsyscall, _Grunning)
+
+ // Garbage collector isn't running (since we are),
+ // so okay to clear syscallsp.
+ _g_.syscallsp = 0
+ _g_.m.locks--
+ if _g_.preempt {
+ // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ } else {
+ // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
+ _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ }
+ _g_.throwsplit = false
+
+ if sched.disable.user && !schedEnabled(_g_) {
+ // Scheduling of this goroutine is disabled.
+ Gosched()
+ }
+
+ return
+ }
+
+ _g_.sysexitticks = 0
+ if trace.enabled {
+ // Wait till traceGoSysBlock event is emitted.
+ // This ensures consistency of the trace (the goroutine is started after it is blocked).
+ for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
+ osyield()
+ }
+ // We can't trace syscall exit right now because we don't have a P.
+ // Tracing code can invoke write barriers that cannot run without a P.
+ // So instead we remember the syscall exit time and emit the event
+ // in execute when we have a P.
+ _g_.sysexitticks = cputicks()
+ }
+
+ _g_.m.locks--
+
+ // Call the scheduler.
+ mcall(exitsyscall0)
+
+ // Scheduler returned, so we're allowed to run now.
+ // Delete the syscallsp information that we left for
+ // the garbage collector during the system call.
+ // Must wait until now because until gosched returns
+ // we don't know for sure that the garbage collector
+ // is not running.
+ _g_.syscallsp = 0
+ _g_.m.p.ptr().syscalltick++
+ _g_.throwsplit = false
+}
+
+//go:nosplit
+func exitsyscallfast(oldp *p) bool {
+ _g_ := getg()
+
+ // Freezetheworld sets stopwait but does not retake P's.
+ if sched.stopwait == freezeStopWait {
+ return false
+ }
+
+ // Try to re-acquire the last P.
+ if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
+ // There's a cpu for us, so we can run.
+ wirep(oldp)
+ exitsyscallfast_reacquired()
+ return true
+ }
+
+ // Try to get any other idle P.
+ if sched.pidle != 0 {
+ var ok bool
+ systemstack(func() {
+ ok = exitsyscallfast_pidle()
+ if ok && trace.enabled {
+ if oldp != nil {
+ // Wait till traceGoSysBlock event is emitted.
+ // This ensures consistency of the trace (the goroutine is started after it is blocked).
+ for oldp.syscalltick == _g_.m.syscalltick {
+ osyield()
+ }
+ }
+ traceGoSysExit(0)
+ }
+ })
+ if ok {
+ return true
+ }
+ }
+ return false
+}
+
+// exitsyscallfast_reacquired is the exitsyscall path on which this G
+// has successfully reacquired the P it was running on before the
+// syscall.
+//
+//go:nosplit
+func exitsyscallfast_reacquired() {
+ _g_ := getg()
+ if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ if trace.enabled {
+ // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
+ // traceGoSysBlock for this syscall was already emitted,
+ // but here we effectively retake the p from the new syscall running on the same p.
+ systemstack(func() {
+ // Denote blocking of the new syscall.
+ traceGoSysBlock(_g_.m.p.ptr())
+ // Denote completion of the current syscall.
+ traceGoSysExit(0)
+ })
+ }
+ _g_.m.p.ptr().syscalltick++
+ }
+}
+
+func exitsyscallfast_pidle() bool {
+ lock(&sched.lock)
+ _p_, _ := pidleget(0)
+ if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ return true
+ }
+ return false
+}
+
+// exitsyscall slow path on g0.
+// Failed to acquire P, enqueue gp as runnable.
+//
+// Called via mcall, so gp is the calling g from this M.
+//
+//go:nowritebarrierrec
+func exitsyscall0(gp *g) {
+ casgstatus(gp, _Gsyscall, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ var _p_ *p
+ if schedEnabled(gp) {
+ _p_, _ = pidleget(0)
+ }
+ var locked bool
+ if _p_ == nil {
+ globrunqput(gp)
+
+ // Below, we stoplockedm if gp is locked. globrunqput releases
+ // ownership of gp, so we must check if gp is locked prior to
+ // committing the release by unlocking sched.lock, otherwise we
+ // could race with another M transitioning gp from unlocked to
+ // locked.
+ locked = gp.lockedm != 0
+ } else if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ execute(gp, false) // Never returns.
+ }
+ if locked {
+ // Wait until another thread schedules gp and so m again.
+ //
+ // N.B. lockedm must be this M, as this g was running on this M
+ // before entersyscall.
+ stoplockedm()
+ execute(gp, false) // Never returns.
+ }
+ stopm()
+ schedule() // Never returns.
+}
+
+// Called from syscall package before fork.
+//
+//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
+//go:nosplit
+func syscall_runtime_BeforeFork() {
+ gp := getg().m.curg
+
+ // Block signals during a fork, so that the child does not run
+ // a signal handler before exec if a signal is sent to the process
+ // group. See issue #18600.
+ gp.m.locks++
+ sigsave(&gp.m.sigmask)
+ sigblock(false)
+
+ // This function is called before fork in syscall package.
+ // Code between fork and exec must not allocate memory nor even try to grow stack.
+ // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
+ // runtime_AfterFork will undo this in parent process, but not in child.
+ gp.stackguard0 = stackFork
+}
+
+// Called from syscall package after fork in parent.
+//
+//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
+//go:nosplit
+func syscall_runtime_AfterFork() {
+ gp := getg().m.curg
+
+ // See the comments in beforefork.
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+
+ msigrestore(gp.m.sigmask)
+
+ gp.m.locks--
+}
+
+// inForkedChild is true while manipulating signals in the child process.
+// This is used to avoid calling libc functions in case we are using vfork.
+var inForkedChild bool
+
+// Called from syscall package after fork in child.
+// It resets non-sigignored signals to the default handler, and
+// restores the signal mask in preparation for the exec.
+//
+// Because this might be called during a vfork, and therefore may be
+// temporarily sharing address space with the parent process, this must
+// not change any global variables or calling into C code that may do so.
+//
+//go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
+//go:nosplit
+//go:nowritebarrierrec
+func syscall_runtime_AfterForkInChild() {
+ // It's OK to change the global variable inForkedChild here
+ // because we are going to change it back. There is no race here,
+ // because if we are sharing address space with the parent process,
+ // then the parent process can not be running concurrently.
+ inForkedChild = true
+
+ clearSignalHandlers()
+
+ // When we are the child we are the only thread running,
+ // so we know that nothing else has changed gp.m.sigmask.
+ msigrestore(getg().m.sigmask)
+
+ inForkedChild = false
+}
+
+// pendingPreemptSignals is the number of preemption signals
+// that have been sent but not received. This is only used on Darwin.
+// For #41702.
+var pendingPreemptSignals uint32
+
+// Called from syscall package before Exec.
+//
+//go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
+func syscall_runtime_BeforeExec() {
+ // Prevent thread creation during exec.
+ execLock.lock()
+
+ // On Darwin, wait for all pending preemption signals to
+ // be received. See issue #41702.
+ if GOOS == "darwin" || GOOS == "ios" {
+ for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
+ osyield()
+ }
+ }
+}
+
+// Called from syscall package after Exec.
+//
+//go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
+func syscall_runtime_AfterExec() {
+ execLock.unlock()
+}
+
+// Allocate a new g, with a stack big enough for stacksize bytes.
+func malg(stacksize int32) *g {
+ newg := new(g)
+ if stacksize >= 0 {
+ stacksize = round2(_StackSystem + stacksize)
+ systemstack(func() {
+ newg.stack = stackalloc(uint32(stacksize))
+ })
+ newg.stackguard0 = newg.stack.lo + _StackGuard
+ newg.stackguard1 = ^uintptr(0)
+ // Clear the bottom word of the stack. We record g
+ // there on gsignal stack during VDSO on ARM and ARM64.
+ *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
+ }
+ return newg
+}
+
+// Create a new g running fn.
+// Put it on the queue of g's waiting to run.
+// The compiler turns a go statement into a call to this.
+func newproc(fn *funcval) {
+ gp := getg()
+ pc := getcallerpc()
+ systemstack(func() {
+ newg := newproc1(fn, gp, pc)
+
+ _p_ := getg().m.p.ptr()
+ runqput(_p_, newg, true)
+
+ if mainStarted {
+ wakep()
+ }
+ })
+}
+
+// Create a new g in state _Grunnable, starting at fn. callerpc is the
+// address of the go statement that created this. The caller is responsible
+// for adding the new g to the scheduler.
+func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
+ _g_ := getg()
+
+ if fn == nil {
+ fatal("go of nil func value")
+ }
+ acquirem() // disable preemption because it can be holding p in a local var
+
+ _p_ := _g_.m.p.ptr()
+ newg := gfget(_p_)
+ if newg == nil {
+ newg = malg(_StackMin)
+ casgstatus(newg, _Gidle, _Gdead)
+ allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
+ }
+ if newg.stack.hi == 0 {
+ throw("newproc1: newg missing stack")
+ }
+
+ if readgstatus(newg) != _Gdead {
+ throw("newproc1: new g is not Gdead")
+ }
+
+ totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
+ totalSize = alignUp(totalSize, sys.StackAlign)
+ sp := newg.stack.hi - totalSize
+ spArg := sp
+ if usesLR {
+ // caller's LR
+ *(*uintptr)(unsafe.Pointer(sp)) = 0
+ prepGoExitFrame(sp)
+ spArg += sys.MinFrameSize
+ }
+
+ memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
+ newg.sched.sp = sp
+ newg.stktopsp = sp
+ newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
+ newg.sched.g = guintptr(unsafe.Pointer(newg))
+ gostartcallfn(&newg.sched, fn)
+ newg.gopc = callerpc
+ newg.ancestors = saveAncestors(callergp)
+ newg.startpc = fn.fn
+ if isSystemGoroutine(newg, false) {
+ atomic.Xadd(&sched.ngsys, +1)
+ } else {
+ // Only user goroutines inherit pprof labels.
+ if _g_.m.curg != nil {
+ newg.labels = _g_.m.curg.labels
+ }
+ if goroutineProfile.active {
+ // A concurrent goroutine profile is running. It should include
+ // exactly the set of goroutines that were alive when the goroutine
+ // profiler first stopped the world. That does not include newg, so
+ // mark it as not needing a profile before transitioning it from
+ // _Gdead.
+ newg.goroutineProfiled.Store(goroutineProfileSatisfied)
+ }
+ }
+ // Track initial transition?
+ newg.trackingSeq = uint8(fastrand())
+ if newg.trackingSeq%gTrackingPeriod == 0 {
+ newg.tracking = true
+ }
+ casgstatus(newg, _Gdead, _Grunnable)
+ gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
+
+ if _p_.goidcache == _p_.goidcacheend {
+ // Sched.goidgen is the last allocated id,
+ // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
+ // At startup sched.goidgen=0, so main goroutine receives goid=1.
+ _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
+ _p_.goidcache -= _GoidCacheBatch - 1
+ _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
+ }
+ newg.goid = int64(_p_.goidcache)
+ _p_.goidcache++
+ if raceenabled {
+ newg.racectx = racegostart(callerpc)
+ if newg.labels != nil {
+ // See note in proflabel.go on labelSync's role in synchronizing
+ // with the reads in the signal handler.
+ racereleasemergeg(newg, unsafe.Pointer(&labelSync))
+ }
+ }
+ if trace.enabled {
+ traceGoCreate(newg, newg.startpc)
+ }
+ releasem(_g_.m)
+
+ return newg
+}
+
+// saveAncestors copies previous ancestors of the given caller g and
+// includes infor for the current caller into a new set of tracebacks for
+// a g being created.
+func saveAncestors(callergp *g) *[]ancestorInfo {
+ // Copy all prior info, except for the root goroutine (goid 0).
+ if debug.tracebackancestors <= 0 || callergp.goid == 0 {
+ return nil
+ }
+ var callerAncestors []ancestorInfo
+ if callergp.ancestors != nil {
+ callerAncestors = *callergp.ancestors
+ }
+ n := int32(len(callerAncestors)) + 1
+ if n > debug.tracebackancestors {
+ n = debug.tracebackancestors
+ }
+ ancestors := make([]ancestorInfo, n)
+ copy(ancestors[1:], callerAncestors)
+
+ var pcs [_TracebackMaxFrames]uintptr
+ npcs := gcallers(callergp, 0, pcs[:])
+ ipcs := make([]uintptr, npcs)
+ copy(ipcs, pcs[:])
+ ancestors[0] = ancestorInfo{
+ pcs: ipcs,
+ goid: callergp.goid,
+ gopc: callergp.gopc,
+ }
+
+ ancestorsp := new([]ancestorInfo)
+ *ancestorsp = ancestors
+ return ancestorsp
+}
+
+// Put on gfree list.
+// If local list is too long, transfer a batch to the global list.
+func gfput(_p_ *p, gp *g) {
+ if readgstatus(gp) != _Gdead {
+ throw("gfput: bad status (not Gdead)")
+ }
+
+ stksize := gp.stack.hi - gp.stack.lo
+
+ if stksize != uintptr(startingStackSize) {
+ // non-standard stack size - free it.
+ stackfree(gp.stack)
+ gp.stack.lo = 0
+ gp.stack.hi = 0
+ gp.stackguard0 = 0
+ }
+
+ _p_.gFree.push(gp)
+ _p_.gFree.n++
+ if _p_.gFree.n >= 64 {
+ var (
+ inc int32
+ stackQ gQueue
+ noStackQ gQueue
+ )
+ for _p_.gFree.n >= 32 {
+ gp = _p_.gFree.pop()
+ _p_.gFree.n--
+ if gp.stack.lo == 0 {
+ noStackQ.push(gp)
+ } else {
+ stackQ.push(gp)
+ }
+ inc++
+ }
+ lock(&sched.gFree.lock)
+ sched.gFree.noStack.pushAll(noStackQ)
+ sched.gFree.stack.pushAll(stackQ)
+ sched.gFree.n += inc
+ unlock(&sched.gFree.lock)
+ }
+}
+
+// Get from gfree list.
+// If local list is empty, grab a batch from global list.
+func gfget(_p_ *p) *g {
+retry:
+ if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
+ lock(&sched.gFree.lock)
+ // Move a batch of free Gs to the P.
+ for _p_.gFree.n < 32 {
+ // Prefer Gs with stacks.
+ gp := sched.gFree.stack.pop()
+ if gp == nil {
+ gp = sched.gFree.noStack.pop()
+ if gp == nil {
+ break
+ }
+ }
+ sched.gFree.n--
+ _p_.gFree.push(gp)
+ _p_.gFree.n++
+ }
+ unlock(&sched.gFree.lock)
+ goto retry
+ }
+ gp := _p_.gFree.pop()
+ if gp == nil {
+ return nil
+ }
+ _p_.gFree.n--
+ if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
+ // Deallocate old stack. We kept it in gfput because it was the
+ // right size when the goroutine was put on the free list, but
+ // the right size has changed since then.
+ systemstack(func() {
+ stackfree(gp.stack)
+ gp.stack.lo = 0
+ gp.stack.hi = 0
+ gp.stackguard0 = 0
+ })
+ }
+ if gp.stack.lo == 0 {
+ // Stack was deallocated in gfput or just above. Allocate a new one.
+ systemstack(func() {
+ gp.stack = stackalloc(startingStackSize)
+ })
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ } else {
+ if raceenabled {
+ racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
+ }
+ if msanenabled {
+ msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
+ }
+ if asanenabled {
+ asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
+ }
+ }
+ return gp
+}
+
+// Purge all cached G's from gfree list to the global list.
+func gfpurge(_p_ *p) {
+ var (
+ inc int32
+ stackQ gQueue
+ noStackQ gQueue
+ )
+ for !_p_.gFree.empty() {
+ gp := _p_.gFree.pop()
+ _p_.gFree.n--
+ if gp.stack.lo == 0 {
+ noStackQ.push(gp)
+ } else {
+ stackQ.push(gp)
+ }
+ inc++
+ }
+ lock(&sched.gFree.lock)
+ sched.gFree.noStack.pushAll(noStackQ)
+ sched.gFree.stack.pushAll(stackQ)
+ sched.gFree.n += inc
+ unlock(&sched.gFree.lock)
+}
+
+// Breakpoint executes a breakpoint trap.
+func Breakpoint() {
+ breakpoint()
+}
+
+// dolockOSThread is called by LockOSThread and lockOSThread below
+// after they modify m.locked. Do not allow preemption during this call,
+// or else the m might be different in this function than in the caller.
+//
+//go:nosplit
+func dolockOSThread() {
+ if GOARCH == "wasm" {
+ return // no threads on wasm yet
+ }
+ _g_ := getg()
+ _g_.m.lockedg.set(_g_)
+ _g_.lockedm.set(_g_.m)
+}
+
+//go:nosplit
+
+// LockOSThread wires the calling goroutine to its current operating system thread.
+// The calling goroutine will always execute in that thread,
+// and no other goroutine will execute in it,
+// until the calling goroutine has made as many calls to
+// UnlockOSThread as to LockOSThread.
+// If the calling goroutine exits without unlocking the thread,
+// the thread will be terminated.
+//
+// All init functions are run on the startup thread. Calling LockOSThread
+// from an init function will cause the main function to be invoked on
+// that thread.
+//
+// A goroutine should call LockOSThread before calling OS services or
+// non-Go library functions that depend on per-thread state.
+func LockOSThread() {
+ if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
+ // If we need to start a new thread from the locked
+ // thread, we need the template thread. Start it now
+ // while we're in a known-good state.
+ startTemplateThread()
+ }
+ _g_ := getg()
+ _g_.m.lockedExt++
+ if _g_.m.lockedExt == 0 {
+ _g_.m.lockedExt--
+ panic("LockOSThread nesting overflow")
+ }
+ dolockOSThread()
+}
+
+//go:nosplit
+func lockOSThread() {
+ getg().m.lockedInt++
+ dolockOSThread()
+}
+
+// dounlockOSThread is called by UnlockOSThread and unlockOSThread below
+// after they update m->locked. Do not allow preemption during this call,
+// or else the m might be in different in this function than in the caller.
+//
+//go:nosplit
+func dounlockOSThread() {
+ if GOARCH == "wasm" {
+ return // no threads on wasm yet
+ }
+ _g_ := getg()
+ if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
+ return
+ }
+ _g_.m.lockedg = 0
+ _g_.lockedm = 0
+}
+
+//go:nosplit
+
+// UnlockOSThread undoes an earlier call to LockOSThread.
+// If this drops the number of active LockOSThread calls on the
+// calling goroutine to zero, it unwires the calling goroutine from
+// its fixed operating system thread.
+// If there are no active LockOSThread calls, this is a no-op.
+//
+// Before calling UnlockOSThread, the caller must ensure that the OS
+// thread is suitable for running other goroutines. If the caller made
+// any permanent changes to the state of the thread that would affect
+// other goroutines, it should not call this function and thus leave
+// the goroutine locked to the OS thread until the goroutine (and
+// hence the thread) exits.
+func UnlockOSThread() {
+ _g_ := getg()
+ if _g_.m.lockedExt == 0 {
+ return
+ }
+ _g_.m.lockedExt--
+ dounlockOSThread()
+}
+
+//go:nosplit
+func unlockOSThread() {
+ _g_ := getg()
+ if _g_.m.lockedInt == 0 {
+ systemstack(badunlockosthread)
+ }
+ _g_.m.lockedInt--
+ dounlockOSThread()
+}
+
+func badunlockosthread() {
+ throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
+}
+
+func gcount() int32 {
+ n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
+ for _, _p_ := range allp {
+ n -= _p_.gFree.n
+ }
+
+ // All these variables can be changed concurrently, so the result can be inconsistent.
+ // But at least the current goroutine is running.
+ if n < 1 {
+ n = 1
+ }
+ return n
+}
+
+func mcount() int32 {
+ return int32(sched.mnext - sched.nmfreed)
+}
+
+var prof struct {
+ signalLock uint32
+ hz int32
+}
+
+func _System() { _System() }
+func _ExternalCode() { _ExternalCode() }
+func _LostExternalCode() { _LostExternalCode() }
+func _GC() { _GC() }
+func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
+func _VDSO() { _VDSO() }
+
+// Called if we receive a SIGPROF signal.
+// Called by the signal handler, may run during STW.
+//
+//go:nowritebarrierrec
+func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
+ if prof.hz == 0 {
+ return
+ }
+
+ // If mp.profilehz is 0, then profiling is not enabled for this thread.
+ // We must check this to avoid a deadlock between setcpuprofilerate
+ // and the call to cpuprof.add, below.
+ if mp != nil && mp.profilehz == 0 {
+ return
+ }
+
+ // On mips{,le}/arm, 64bit atomics are emulated with spinlocks, in
+ // runtime/internal/atomic. If SIGPROF arrives while the program is inside
+ // the critical section, it creates a deadlock (when writing the sample).
+ // As a workaround, create a counter of SIGPROFs while in critical section
+ // to store the count, and pass it to sigprof.add() later when SIGPROF is
+ // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
+ if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
+ if f := findfunc(pc); f.valid() {
+ if hasPrefix(funcname(f), "runtime/internal/atomic") {
+ cpuprof.lostAtomic++
+ return
+ }
+ }
+ if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
+ // runtime/internal/atomic functions call into kernel
+ // helpers on arm < 7. See
+ // runtime/internal/atomic/sys_linux_arm.s.
+ cpuprof.lostAtomic++
+ return
+ }
+ }
+
+ // Profiling runs concurrently with GC, so it must not allocate.
+ // Set a trap in case the code does allocate.
+ // Note that on windows, one thread takes profiles of all the
+ // other threads, so mp is usually not getg().m.
+ // In fact mp may not even be stopped.
+ // See golang.org/issue/17165.
+ getg().m.mallocing++
+
+ var stk [maxCPUProfStack]uintptr
+ n := 0
+ if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
+ cgoOff := 0
+ // Check cgoCallersUse to make sure that we are not
+ // interrupting other code that is fiddling with
+ // cgoCallers. We are running in a signal handler
+ // with all signals blocked, so we don't have to worry
+ // about any other code interrupting us.
+ if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
+ for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
+ cgoOff++
+ }
+ copy(stk[:], mp.cgoCallers[:cgoOff])
+ mp.cgoCallers[0] = 0
+ }
+
+ // Collect Go stack that leads to the cgo call.
+ n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
+ if n > 0 {
+ n += cgoOff
+ }
+ } else {
+ n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
+ }
+
+ if n <= 0 {
+ // Normal traceback is impossible or has failed.
+ // See if it falls into several common cases.
+ n = 0
+ if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
+ // Libcall, i.e. runtime syscall on windows.
+ // Collect Go stack that leads to the call.
+ n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
+ }
+ if n == 0 && mp != nil && mp.vdsoSP != 0 {
+ n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
+ }
+ if n == 0 {
+ // If all of the above has failed, account it against abstract "System" or "GC".
+ n = 2
+ if inVDSOPage(pc) {
+ pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
+ } else if pc > firstmoduledata.etext {
+ // "ExternalCode" is better than "etext".
+ pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
+ }
+ stk[0] = pc
+ if mp.preemptoff != "" {
+ stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
+ } else {
+ stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
+ }
+ }
+ }
+
+ if prof.hz != 0 {
+ // Note: it can happen on Windows that we interrupted a system thread
+ // with no g, so gp could nil. The other nil checks are done out of
+ // caution, but not expected to be nil in practice.
+ var tagPtr *unsafe.Pointer
+ if gp != nil && gp.m != nil && gp.m.curg != nil {
+ tagPtr = &gp.m.curg.labels
+ }
+ cpuprof.add(tagPtr, stk[:n])
+
+ gprof := gp
+ var pp *p
+ if gp != nil && gp.m != nil {
+ if gp.m.curg != nil {
+ gprof = gp.m.curg
+ }
+ pp = gp.m.p.ptr()
+ }
+ traceCPUSample(gprof, pp, stk[:n])
+ }
+ getg().m.mallocing--
+}
+
+// setcpuprofilerate sets the CPU profiling rate to hz times per second.
+// If hz <= 0, setcpuprofilerate turns off CPU profiling.
+func setcpuprofilerate(hz int32) {
+ // Force sane arguments.
+ if hz < 0 {
+ hz = 0
+ }
+
+ // Disable preemption, otherwise we can be rescheduled to another thread
+ // that has profiling enabled.
+ _g_ := getg()
+ _g_.m.locks++
+
+ // Stop profiler on this thread so that it is safe to lock prof.
+ // if a profiling signal came in while we had prof locked,
+ // it would deadlock.
+ setThreadCPUProfiler(0)
+
+ for !atomic.Cas(&prof.signalLock, 0, 1) {
+ osyield()
+ }
+ if prof.hz != hz {
+ setProcessCPUProfiler(hz)
+ prof.hz = hz
+ }
+ atomic.Store(&prof.signalLock, 0)
+
+ lock(&sched.lock)
+ sched.profilehz = hz
+ unlock(&sched.lock)
+
+ if hz != 0 {
+ setThreadCPUProfiler(hz)
+ }
+
+ _g_.m.locks--
+}
+
+// init initializes pp, which may be a freshly allocated p or a
+// previously destroyed p, and transitions it to status _Pgcstop.
+func (pp *p) init(id int32) {
+ pp.id = id
+ pp.status = _Pgcstop
+ pp.sudogcache = pp.sudogbuf[:0]
+ pp.deferpool = pp.deferpoolbuf[:0]
+ pp.wbBuf.reset()
+ if pp.mcache == nil {
+ if id == 0 {
+ if mcache0 == nil {
+ throw("missing mcache?")
+ }
+ // Use the bootstrap mcache0. Only one P will get
+ // mcache0: the one with ID 0.
+ pp.mcache = mcache0
+ } else {
+ pp.mcache = allocmcache()
+ }
+ }
+ if raceenabled && pp.raceprocctx == 0 {
+ if id == 0 {
+ pp.raceprocctx = raceprocctx0
+ raceprocctx0 = 0 // bootstrap
+ } else {
+ pp.raceprocctx = raceproccreate()
+ }
+ }
+ lockInit(&pp.timersLock, lockRankTimers)
+
+ // This P may get timers when it starts running. Set the mask here
+ // since the P may not go through pidleget (notably P 0 on startup).
+ timerpMask.set(id)
+ // Similarly, we may not go through pidleget before this P starts
+ // running if it is P 0 on startup.
+ idlepMask.clear(id)
+}
+
+// destroy releases all of the resources associated with pp and
+// transitions it to status _Pdead.
+//
+// sched.lock must be held and the world must be stopped.
+func (pp *p) destroy() {
+ assertLockHeld(&sched.lock)
+ assertWorldStopped()
+
+ // Move all runnable goroutines to the global queue
+ for pp.runqhead != pp.runqtail {
+ // Pop from tail of local queue
+ pp.runqtail--
+ gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
+ // Push onto head of global queue
+ globrunqputhead(gp)
+ }
+ if pp.runnext != 0 {
+ globrunqputhead(pp.runnext.ptr())
+ pp.runnext = 0
+ }
+ if len(pp.timers) > 0 {
+ plocal := getg().m.p.ptr()
+ // The world is stopped, but we acquire timersLock to
+ // protect against sysmon calling timeSleepUntil.
+ // This is the only case where we hold the timersLock of
+ // more than one P, so there are no deadlock concerns.
+ lock(&plocal.timersLock)
+ lock(&pp.timersLock)
+ moveTimers(plocal, pp.timers)
+ pp.timers = nil
+ pp.numTimers = 0
+ pp.deletedTimers = 0
+ atomic.Store64(&pp.timer0When, 0)
+ unlock(&pp.timersLock)
+ unlock(&plocal.timersLock)
+ }
+ // Flush p's write barrier buffer.
+ if gcphase != _GCoff {
+ wbBufFlush1(pp)
+ pp.gcw.dispose()
+ }
+ for i := range pp.sudogbuf {
+ pp.sudogbuf[i] = nil
+ }
+ pp.sudogcache = pp.sudogbuf[:0]
+ for j := range pp.deferpoolbuf {
+ pp.deferpoolbuf[j] = nil
+ }
+ pp.deferpool = pp.deferpoolbuf[:0]
+ systemstack(func() {
+ for i := 0; i < pp.mspancache.len; i++ {
+ // Safe to call since the world is stopped.
+ mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
+ }
+ pp.mspancache.len = 0
+ lock(&mheap_.lock)
+ pp.pcache.flush(&mheap_.pages)
+ unlock(&mheap_.lock)
+ })
+ freemcache(pp.mcache)
+ pp.mcache = nil
+ gfpurge(pp)
+ traceProcFree(pp)
+ if raceenabled {
+ if pp.timerRaceCtx != 0 {
+ // The race detector code uses a callback to fetch
+ // the proc context, so arrange for that callback
+ // to see the right thing.
+ // This hack only works because we are the only
+ // thread running.
+ mp := getg().m
+ phold := mp.p.ptr()
+ mp.p.set(pp)
+
+ racectxend(pp.timerRaceCtx)
+ pp.timerRaceCtx = 0
+
+ mp.p.set(phold)
+ }
+ raceprocdestroy(pp.raceprocctx)
+ pp.raceprocctx = 0
+ }
+ pp.gcAssistTime = 0
+ pp.status = _Pdead
+}
+
+// Change number of processors.
+//
+// sched.lock must be held, and the world must be stopped.
+//
+// gcworkbufs must not be being modified by either the GC or the write barrier
+// code, so the GC must not be running if the number of Ps actually changes.
+//
+// Returns list of Ps with local work, they need to be scheduled by the caller.
+func procresize(nprocs int32) *p {
+ assertLockHeld(&sched.lock)
+ assertWorldStopped()
+
+ old := gomaxprocs
+ if old < 0 || nprocs <= 0 {
+ throw("procresize: invalid arg")
+ }
+ if trace.enabled {
+ traceGomaxprocs(nprocs)
+ }
+
+ // update statistics
+ now := nanotime()
+ if sched.procresizetime != 0 {
+ sched.totaltime += int64(old) * (now - sched.procresizetime)
+ }
+ sched.procresizetime = now
+
+ maskWords := (nprocs + 31) / 32
+
+ // Grow allp if necessary.
+ if nprocs > int32(len(allp)) {
+ // Synchronize with retake, which could be running
+ // concurrently since it doesn't run on a P.
+ lock(&allpLock)
+ if nprocs <= int32(cap(allp)) {
+ allp = allp[:nprocs]
+ } else {
+ nallp := make([]*p, nprocs)
+ // Copy everything up to allp's cap so we
+ // never lose old allocated Ps.
+ copy(nallp, allp[:cap(allp)])
+ allp = nallp
+ }
+
+ if maskWords <= int32(cap(idlepMask)) {
+ idlepMask = idlepMask[:maskWords]
+ timerpMask = timerpMask[:maskWords]
+ } else {
+ nidlepMask := make([]uint32, maskWords)
+ // No need to copy beyond len, old Ps are irrelevant.
+ copy(nidlepMask, idlepMask)
+ idlepMask = nidlepMask
+
+ ntimerpMask := make([]uint32, maskWords)
+ copy(ntimerpMask, timerpMask)
+ timerpMask = ntimerpMask
+ }
+ unlock(&allpLock)
+ }
+
+ // initialize new P's
+ for i := old; i < nprocs; i++ {
+ pp := allp[i]
+ if pp == nil {
+ pp = new(p)
+ }
+ pp.init(i)
+ atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
+ }
+
+ _g_ := getg()
+ if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
+ // continue to use the current P
+ _g_.m.p.ptr().status = _Prunning
+ _g_.m.p.ptr().mcache.prepareForSweep()
+ } else {
+ // release the current P and acquire allp[0].
+ //
+ // We must do this before destroying our current P
+ // because p.destroy itself has write barriers, so we
+ // need to do that from a valid P.
+ if _g_.m.p != 0 {
+ if trace.enabled {
+ // Pretend that we were descheduled
+ // and then scheduled again to keep
+ // the trace sane.
+ traceGoSched()
+ traceProcStop(_g_.m.p.ptr())
+ }
+ _g_.m.p.ptr().m = 0
+ }
+ _g_.m.p = 0
+ p := allp[0]
+ p.m = 0
+ p.status = _Pidle
+ acquirep(p)
+ if trace.enabled {
+ traceGoStart()
+ }
+ }
+
+ // g.m.p is now set, so we no longer need mcache0 for bootstrapping.
+ mcache0 = nil
+
+ // release resources from unused P's
+ for i := nprocs; i < old; i++ {
+ p := allp[i]
+ p.destroy()
+ // can't free P itself because it can be referenced by an M in syscall
+ }
+
+ // Trim allp.
+ if int32(len(allp)) != nprocs {
+ lock(&allpLock)
+ allp = allp[:nprocs]
+ idlepMask = idlepMask[:maskWords]
+ timerpMask = timerpMask[:maskWords]
+ unlock(&allpLock)
+ }
+
+ var runnablePs *p
+ for i := nprocs - 1; i >= 0; i-- {
+ p := allp[i]
+ if _g_.m.p.ptr() == p {
+ continue
+ }
+ p.status = _Pidle
+ if runqempty(p) {
+ pidleput(p, now)
+ } else {
+ p.m.set(mget())
+ p.link.set(runnablePs)
+ runnablePs = p
+ }
+ }
+ stealOrder.reset(uint32(nprocs))
+ var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
+ atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
+ if old != nprocs {
+ // Notify the limiter that the amount of procs has changed.
+ gcCPULimiter.resetCapacity(now, nprocs)
+ }
+ return runnablePs
+}
+
+// Associate p and the current m.
+//
+// This function is allowed to have write barriers even if the caller
+// isn't because it immediately acquires _p_.
+//
+//go:yeswritebarrierrec
+func acquirep(_p_ *p) {
+ // Do the part that isn't allowed to have write barriers.
+ wirep(_p_)
+
+ // Have p; write barriers now allowed.
+
+ // Perform deferred mcache flush before this P can allocate
+ // from a potentially stale mcache.
+ _p_.mcache.prepareForSweep()
+
+ if trace.enabled {
+ traceProcStart()
+ }
+}
+
+// wirep is the first step of acquirep, which actually associates the
+// current M to _p_. This is broken out so we can disallow write
+// barriers for this part, since we don't yet have a P.
+//
+//go:nowritebarrierrec
+//go:nosplit
+func wirep(_p_ *p) {
+ _g_ := getg()
+
+ if _g_.m.p != 0 {
+ throw("wirep: already in go")
+ }
+ if _p_.m != 0 || _p_.status != _Pidle {
+ id := int64(0)
+ if _p_.m != 0 {
+ id = _p_.m.ptr().id
+ }
+ print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
+ throw("wirep: invalid p state")
+ }
+ _g_.m.p.set(_p_)
+ _p_.m.set(_g_.m)
+ _p_.status = _Prunning
+}
+
+// Disassociate p and the current m.
+func releasep() *p {
+ _g_ := getg()
+
+ if _g_.m.p == 0 {
+ throw("releasep: invalid arg")
+ }
+ _p_ := _g_.m.p.ptr()
+ if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
+ print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
+ throw("releasep: invalid p state")
+ }
+ if trace.enabled {
+ traceProcStop(_g_.m.p.ptr())
+ }
+ _g_.m.p = 0
+ _p_.m = 0
+ _p_.status = _Pidle
+ return _p_
+}
+
+func incidlelocked(v int32) {
+ lock(&sched.lock)
+ sched.nmidlelocked += v
+ if v > 0 {
+ checkdead()
+ }
+ unlock(&sched.lock)
+}
+
+// Check for deadlock situation.
+// The check is based on number of running M's, if 0 -> deadlock.
+// sched.lock must be held.
+func checkdead() {
+ assertLockHeld(&sched.lock)
+
+ // For -buildmode=c-shared or -buildmode=c-archive it's OK if
+ // there are no running goroutines. The calling program is
+ // assumed to be running.
+ if islibrary || isarchive {
+ return
+ }
+
+ // If we are dying because of a signal caught on an already idle thread,
+ // freezetheworld will cause all running threads to block.
+ // And runtime will essentially enter into deadlock state,
+ // except that there is a thread that will call exit soon.
+ if panicking > 0 {
+ return
+ }
+
+ // If we are not running under cgo, but we have an extra M then account
+ // for it. (It is possible to have an extra M on Windows without cgo to
+ // accommodate callbacks created by syscall.NewCallback. See issue #6751
+ // for details.)
+ var run0 int32
+ if !iscgo && cgoHasExtraM {
+ mp := lockextra(true)
+ haveExtraM := extraMCount > 0
+ unlockextra(mp)
+ if haveExtraM {
+ run0 = 1
+ }
+ }
+
+ run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
+ if run > run0 {
+ return
+ }
+ if run < 0 {
+ print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
+ throw("checkdead: inconsistent counts")
+ }
+
+ grunning := 0
+ forEachG(func(gp *g) {
+ if isSystemGoroutine(gp, false) {
+ return
+ }
+ s := readgstatus(gp)
+ switch s &^ _Gscan {
+ case _Gwaiting,
+ _Gpreempted:
+ grunning++
+ case _Grunnable,
+ _Grunning,
+ _Gsyscall:
+ print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
+ throw("checkdead: runnable g")
+ }
+ })
+ if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
+ unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
+ fatal("no goroutines (main called runtime.Goexit) - deadlock!")
+ }
+
+ // Maybe jump time forward for playground.
+ if faketime != 0 {
+ if when := timeSleepUntil(); when < maxWhen {
+ faketime = when
+
+ // Start an M to steal the timer.
+ pp, _ := pidleget(faketime)
+ if pp == nil {
+ // There should always be a free P since
+ // nothing is running.
+ throw("checkdead: no p for timer")
+ }
+ mp := mget()
+ if mp == nil {
+ // There should always be a free M since
+ // nothing is running.
+ throw("checkdead: no m for timer")
+ }
+ // M must be spinning to steal. We set this to be
+ // explicit, but since this is the only M it would
+ // become spinning on its own anyways.
+ atomic.Xadd(&sched.nmspinning, 1)
+ mp.spinning = true
+ mp.nextp.set(pp)
+ notewakeup(&mp.park)
+ return
+ }
+ }
+
+ // There are no goroutines running, so we can look at the P's.
+ for _, _p_ := range allp {
+ if len(_p_.timers) > 0 {
+ return
+ }
+ }
+
+ unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
+ fatal("all goroutines are asleep - deadlock!")
+}
+
+// forcegcperiod is the maximum time in nanoseconds between garbage
+// collections. If we go this long without a garbage collection, one
+// is forced to run.
+//
+// This is a variable for testing purposes. It normally doesn't change.
+var forcegcperiod int64 = 2 * 60 * 1e9
+
+// needSysmonWorkaround is true if the workaround for
+// golang.org/issue/42515 is needed on NetBSD.
+var needSysmonWorkaround bool = false
+
+// Always runs without a P, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func sysmon() {
+ lock(&sched.lock)
+ sched.nmsys++
+ checkdead()
+ unlock(&sched.lock)
+
+ lasttrace := int64(0)
+ idle := 0 // how many cycles in succession we had not wokeup somebody
+ delay := uint32(0)
+
+ for {
+ if idle == 0 { // start with 20us sleep...
+ delay = 20
+ } else if idle > 50 { // start doubling the sleep after 1ms...
+ delay *= 2
+ }
+ if delay > 10*1000 { // up to 10ms
+ delay = 10 * 1000
+ }
+ usleep(delay)
+
+ // sysmon should not enter deep sleep if schedtrace is enabled so that
+ // it can print that information at the right time.
+ //
+ // It should also not enter deep sleep if there are any active P's so
+ // that it can retake P's from syscalls, preempt long running G's, and
+ // poll the network if all P's are busy for long stretches.
+ //
+ // It should wakeup from deep sleep if any P's become active either due
+ // to exiting a syscall or waking up due to a timer expiring so that it
+ // can resume performing those duties. If it wakes from a syscall it
+ // resets idle and delay as a bet that since it had retaken a P from a
+ // syscall before, it may need to do it again shortly after the
+ // application starts work again. It does not reset idle when waking
+ // from a timer to avoid adding system load to applications that spend
+ // most of their time sleeping.
+ now := nanotime()
+ if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
+ lock(&sched.lock)
+ if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
+ syscallWake := false
+ next := timeSleepUntil()
+ if next > now {
+ atomic.Store(&sched.sysmonwait, 1)
+ unlock(&sched.lock)
+ // Make wake-up period small enough
+ // for the sampling to be correct.
+ sleep := forcegcperiod / 2
+ if next-now < sleep {
+ sleep = next - now
+ }
+ shouldRelax := sleep >= osRelaxMinNS
+ if shouldRelax {
+ osRelax(true)
+ }
+ syscallWake = notetsleep(&sched.sysmonnote, sleep)
+ if shouldRelax {
+ osRelax(false)
+ }
+ lock(&sched.lock)
+ atomic.Store(&sched.sysmonwait, 0)
+ noteclear(&sched.sysmonnote)
+ }
+ if syscallWake {
+ idle = 0
+ delay = 20
+ }
+ }
+ unlock(&sched.lock)
+ }
+
+ lock(&sched.sysmonlock)
+ // Update now in case we blocked on sysmonnote or spent a long time
+ // blocked on schedlock or sysmonlock above.
+ now = nanotime()
+
+ // trigger libc interceptors if needed
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
+ // poll network if not polled for more than 10ms
+ lastpoll := int64(atomic.Load64(&sched.lastpoll))
+ if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
+ atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
+ list := netpoll(0) // non-blocking - returns list of goroutines
+ if !list.empty() {
+ // Need to decrement number of idle locked M's
+ // (pretending that one more is running) before injectglist.
+ // Otherwise it can lead to the following situation:
+ // injectglist grabs all P's but before it starts M's to run the P's,
+ // another M returns from syscall, finishes running its G,
+ // observes that there is no work to do and no other running M's
+ // and reports deadlock.
+ incidlelocked(-1)
+ injectglist(&list)
+ incidlelocked(1)
+ }
+ }
+ if GOOS == "netbsd" && needSysmonWorkaround {
+ // netpoll is responsible for waiting for timer
+ // expiration, so we typically don't have to worry
+ // about starting an M to service timers. (Note that
+ // sleep for timeSleepUntil above simply ensures sysmon
+ // starts running again when that timer expiration may
+ // cause Go code to run again).
+ //
+ // However, netbsd has a kernel bug that sometimes
+ // misses netpollBreak wake-ups, which can lead to
+ // unbounded delays servicing timers. If we detect this
+ // overrun, then startm to get something to handle the
+ // timer.
+ //
+ // See issue 42515 and
+ // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
+ if next := timeSleepUntil(); next < now {
+ startm(nil, false)
+ }
+ }
+ if scavenger.sysmonWake.Load() != 0 {
+ // Kick the scavenger awake if someone requested it.
+ scavenger.wake()
+ }
+ // retake P's blocked in syscalls
+ // and preempt long running G's
+ if retake(now) != 0 {
+ idle = 0
+ } else {
+ idle++
+ }
+ // check if we need to force a GC
+ if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
+ lock(&forcegc.lock)
+ forcegc.idle = 0
+ var list gList
+ list.push(forcegc.g)
+ injectglist(&list)
+ unlock(&forcegc.lock)
+ }
+ if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
+ lasttrace = now
+ schedtrace(debug.scheddetail > 0)
+ }
+ unlock(&sched.sysmonlock)
+ }
+}
+
+type sysmontick struct {
+ schedtick uint32
+ schedwhen int64
+ syscalltick uint32
+ syscallwhen int64
+}
+
+// forcePreemptNS is the time slice given to a G before it is
+// preempted.
+const forcePreemptNS = 10 * 1000 * 1000 // 10ms
+
+func retake(now int64) uint32 {
+ n := 0
+ // Prevent allp slice changes. This lock will be completely
+ // uncontended unless we're already stopping the world.
+ lock(&allpLock)
+ // We can't use a range loop over allp because we may
+ // temporarily drop the allpLock. Hence, we need to re-fetch
+ // allp each time around the loop.
+ for i := 0; i < len(allp); i++ {
+ _p_ := allp[i]
+ if _p_ == nil {
+ // This can happen if procresize has grown
+ // allp but not yet created new Ps.
+ continue
+ }
+ pd := &_p_.sysmontick
+ s := _p_.status
+ sysretake := false
+ if s == _Prunning || s == _Psyscall {
+ // Preempt G if it's running for too long.
+ t := int64(_p_.schedtick)
+ if int64(pd.schedtick) != t {
+ pd.schedtick = uint32(t)
+ pd.schedwhen = now
+ } else if pd.schedwhen+forcePreemptNS <= now {
+ preemptone(_p_)
+ // In case of syscall, preemptone() doesn't
+ // work, because there is no M wired to P.
+ sysretake = true
+ }
+ }
+ if s == _Psyscall {
+ // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
+ t := int64(_p_.syscalltick)
+ if !sysretake && int64(pd.syscalltick) != t {
+ pd.syscalltick = uint32(t)
+ pd.syscallwhen = now
+ continue
+ }
+ // On the one hand we don't want to retake Ps if there is no other work to do,
+ // but on the other hand we want to retake them eventually
+ // because they can prevent the sysmon thread from deep sleep.
+ if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ continue
+ }
+ // Drop allpLock so we can take sched.lock.
+ unlock(&allpLock)
+ // Need to decrement number of idle locked M's
+ // (pretending that one more is running) before the CAS.
+ // Otherwise the M from which we retake can exit the syscall,
+ // increment nmidle and report deadlock.
+ incidlelocked(-1)
+ if atomic.Cas(&_p_.status, s, _Pidle) {
+ if trace.enabled {
+ traceGoSysBlock(_p_)
+ traceProcStop(_p_)
+ }
+ n++
+ _p_.syscalltick++
+ handoffp(_p_)
+ }
+ incidlelocked(1)
+ lock(&allpLock)
+ }
+ }
+ unlock(&allpLock)
+ return uint32(n)
+}
+
+// Tell all goroutines that they have been preempted and they should stop.
+// This function is purely best-effort. It can fail to inform a goroutine if a
+// processor just started running it.
+// No locks need to be held.
+// Returns true if preemption request was issued to at least one goroutine.
+func preemptall() bool {
+ res := false
+ for _, _p_ := range allp {
+ if _p_.status != _Prunning {
+ continue
+ }
+ if preemptone(_p_) {
+ res = true
+ }
+ }
+ return res
+}
+
+// Tell the goroutine running on processor P to stop.
+// This function is purely best-effort. It can incorrectly fail to inform the
+// goroutine. It can inform the wrong goroutine. Even if it informs the
+// correct goroutine, that goroutine might ignore the request if it is
+// simultaneously executing newstack.
+// No lock needs to be held.
+// Returns true if preemption request was issued.
+// The actual preemption will happen at some point in the future
+// and will be indicated by the gp->status no longer being
+// Grunning
+func preemptone(_p_ *p) bool {
+ mp := _p_.m.ptr()
+ if mp == nil || mp == getg().m {
+ return false
+ }
+ gp := mp.curg
+ if gp == nil || gp == mp.g0 {
+ return false
+ }
+
+ gp.preempt = true
+
+ // Every call in a goroutine checks for stack overflow by
+ // comparing the current stack pointer to gp->stackguard0.
+ // Setting gp->stackguard0 to StackPreempt folds
+ // preemption into the normal stack overflow check.
+ gp.stackguard0 = stackPreempt
+
+ // Request an async preemption of this P.
+ if preemptMSupported && debug.asyncpreemptoff == 0 {
+ _p_.preempt = true
+ preemptM(mp)
+ }
+
+ return true
+}
+
+var starttime int64
+
+func schedtrace(detailed bool) {
+ now := nanotime()
+ if starttime == 0 {
+ starttime = now
+ }
+
+ lock(&sched.lock)
+ print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
+ if detailed {
+ print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
+ }
+ // We must be careful while reading data from P's, M's and G's.
+ // Even if we hold schedlock, most data can be changed concurrently.
+ // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
+ for i, _p_ := range allp {
+ mp := _p_.m.ptr()
+ h := atomic.Load(&_p_.runqhead)
+ t := atomic.Load(&_p_.runqtail)
+ if detailed {
+ id := int64(-1)
+ if mp != nil {
+ id = mp.id
+ }
+ print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
+ } else {
+ // In non-detailed mode format lengths of per-P run queues as:
+ // [len1 len2 len3 len4]
+ print(" ")
+ if i == 0 {
+ print("[")
+ }
+ print(t - h)
+ if i == len(allp)-1 {
+ print("]\n")
+ }
+ }
+ }
+
+ if !detailed {
+ unlock(&sched.lock)
+ return
+ }
+
+ for mp := allm; mp != nil; mp = mp.alllink {
+ _p_ := mp.p.ptr()
+ gp := mp.curg
+ lockedg := mp.lockedg.ptr()
+ id1 := int32(-1)
+ if _p_ != nil {
+ id1 = _p_.id
+ }
+ id2 := int64(-1)
+ if gp != nil {
+ id2 = gp.goid
+ }
+ id3 := int64(-1)
+ if lockedg != nil {
+ id3 = lockedg.goid
+ }
+ print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
+ }
+
+ forEachG(func(gp *g) {
+ mp := gp.m
+ lockedm := gp.lockedm.ptr()
+ id1 := int64(-1)
+ if mp != nil {
+ id1 = mp.id
+ }
+ id2 := int64(-1)
+ if lockedm != nil {
+ id2 = lockedm.id
+ }
+ print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
+ })
+ unlock(&sched.lock)
+}
+
+// schedEnableUser enables or disables the scheduling of user
+// goroutines.
+//
+// This does not stop already running user goroutines, so the caller
+// should first stop the world when disabling user goroutines.
+func schedEnableUser(enable bool) {
+ lock(&sched.lock)
+ if sched.disable.user == !enable {
+ unlock(&sched.lock)
+ return
+ }
+ sched.disable.user = !enable
+ if enable {
+ n := sched.disable.n
+ sched.disable.n = 0
+ globrunqputbatch(&sched.disable.runnable, n)
+ unlock(&sched.lock)
+ for ; n != 0 && sched.npidle != 0; n-- {
+ startm(nil, false)
+ }
+ } else {
+ unlock(&sched.lock)
+ }
+}
+
+// schedEnabled reports whether gp should be scheduled. It returns
+// false is scheduling of gp is disabled.
+//
+// sched.lock must be held.
+func schedEnabled(gp *g) bool {
+ assertLockHeld(&sched.lock)
+
+ if sched.disable.user {
+ return isSystemGoroutine(gp, true)
+ }
+ return true
+}
+
+// Put mp on midle list.
+// sched.lock must be held.
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func mput(mp *m) {
+ assertLockHeld(&sched.lock)
+
+ mp.schedlink = sched.midle
+ sched.midle.set(mp)
+ sched.nmidle++
+ checkdead()
+}
+
+// Try to get an m from midle list.
+// sched.lock must be held.
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func mget() *m {
+ assertLockHeld(&sched.lock)
+
+ mp := sched.midle.ptr()
+ if mp != nil {
+ sched.midle = mp.schedlink
+ sched.nmidle--
+ }
+ return mp
+}
+
+// Put gp on the global runnable queue.
+// sched.lock must be held.
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func globrunqput(gp *g) {
+ assertLockHeld(&sched.lock)
+
+ sched.runq.pushBack(gp)
+ sched.runqsize++
+}
+
+// Put gp at the head of the global runnable queue.
+// sched.lock must be held.
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func globrunqputhead(gp *g) {
+ assertLockHeld(&sched.lock)
+
+ sched.runq.push(gp)
+ sched.runqsize++
+}
+
+// Put a batch of runnable goroutines on the global runnable queue.
+// This clears *batch.
+// sched.lock must be held.
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func globrunqputbatch(batch *gQueue, n int32) {
+ assertLockHeld(&sched.lock)
+
+ sched.runq.pushBackAll(*batch)
+ sched.runqsize += n
+ *batch = gQueue{}
+}
+
+// Try get a batch of G's from the global runnable queue.
+// sched.lock must be held.
+func globrunqget(_p_ *p, max int32) *g {
+ assertLockHeld(&sched.lock)
+
+ if sched.runqsize == 0 {
+ return nil
+ }
+
+ n := sched.runqsize/gomaxprocs + 1
+ if n > sched.runqsize {
+ n = sched.runqsize
+ }
+ if max > 0 && n > max {
+ n = max
+ }
+ if n > int32(len(_p_.runq))/2 {
+ n = int32(len(_p_.runq)) / 2
+ }
+
+ sched.runqsize -= n
+
+ gp := sched.runq.pop()
+ n--
+ for ; n > 0; n-- {
+ gp1 := sched.runq.pop()
+ runqput(_p_, gp1, false)
+ }
+ return gp
+}
+
+// pMask is an atomic bitstring with one bit per P.
+type pMask []uint32
+
+// read returns true if P id's bit is set.
+func (p pMask) read(id uint32) bool {
+ word := id / 32
+ mask := uint32(1) << (id % 32)
+ return (atomic.Load(&p[word]) & mask) != 0
+}
+
+// set sets P id's bit.
+func (p pMask) set(id int32) {
+ word := id / 32
+ mask := uint32(1) << (id % 32)
+ atomic.Or(&p[word], mask)
+}
+
+// clear clears P id's bit.
+func (p pMask) clear(id int32) {
+ word := id / 32
+ mask := uint32(1) << (id % 32)
+ atomic.And(&p[word], ^mask)
+}
+
+// updateTimerPMask clears pp's timer mask if it has no timers on its heap.
+//
+// Ideally, the timer mask would be kept immediately consistent on any timer
+// operations. Unfortunately, updating a shared global data structure in the
+// timer hot path adds too much overhead in applications frequently switching
+// between no timers and some timers.
+//
+// As a compromise, the timer mask is updated only on pidleget / pidleput. A
+// running P (returned by pidleget) may add a timer at any time, so its mask
+// must be set. An idle P (passed to pidleput) cannot add new timers while
+// idle, so if it has no timers at that time, its mask may be cleared.
+//
+// Thus, we get the following effects on timer-stealing in findrunnable:
+//
+// - Idle Ps with no timers when they go idle are never checked in findrunnable
+// (for work- or timer-stealing; this is the ideal case).
+// - Running Ps must always be checked.
+// - Idle Ps whose timers are stolen must continue to be checked until they run
+// again, even after timer expiration.
+//
+// When the P starts running again, the mask should be set, as a timer may be
+// added at any time.
+//
+// TODO(prattmic): Additional targeted updates may improve the above cases.
+// e.g., updating the mask when stealing a timer.
+func updateTimerPMask(pp *p) {
+ if atomic.Load(&pp.numTimers) > 0 {
+ return
+ }
+
+ // Looks like there are no timers, however another P may transiently
+ // decrement numTimers when handling a timerModified timer in
+ // checkTimers. We must take timersLock to serialize with these changes.
+ lock(&pp.timersLock)
+ if atomic.Load(&pp.numTimers) == 0 {
+ timerpMask.clear(pp.id)
+ }
+ unlock(&pp.timersLock)
+}
+
+// pidleput puts p on the _Pidle list. now must be a relatively recent call
+// to nanotime or zero. Returns now or the current time if now was zero.
+//
+// This releases ownership of p. Once sched.lock is released it is no longer
+// safe to use p.
+//
+// sched.lock must be held.
+//
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func pidleput(_p_ *p, now int64) int64 {
+ assertLockHeld(&sched.lock)
+
+ if !runqempty(_p_) {
+ throw("pidleput: P has non-empty run queue")
+ }
+ if now == 0 {
+ now = nanotime()
+ }
+ updateTimerPMask(_p_) // clear if there are no timers.
+ idlepMask.set(_p_.id)
+ _p_.link = sched.pidle
+ sched.pidle.set(_p_)
+ atomic.Xadd(&sched.npidle, 1)
+ if !_p_.limiterEvent.start(limiterEventIdle, now) {
+ throw("must be able to track idle limiter event")
+ }
+ return now
+}
+
+// pidleget tries to get a p from the _Pidle list, acquiring ownership.
+//
+// sched.lock must be held.
+//
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func pidleget(now int64) (*p, int64) {
+ assertLockHeld(&sched.lock)
+
+ _p_ := sched.pidle.ptr()
+ if _p_ != nil {
+ // Timer may get added at any time now.
+ if now == 0 {
+ now = nanotime()
+ }
+ timerpMask.set(_p_.id)
+ idlepMask.clear(_p_.id)
+ sched.pidle = _p_.link
+ atomic.Xadd(&sched.npidle, -1)
+ _p_.limiterEvent.stop(limiterEventIdle, now)
+ }
+ return _p_, now
+}
+
+// runqempty reports whether _p_ has no Gs on its local run queue.
+// It never returns true spuriously.
+func runqempty(_p_ *p) bool {
+ // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
+ // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
+ // Simply observing that runqhead == runqtail and then observing that runqnext == nil
+ // does not mean the queue is empty.
+ for {
+ head := atomic.Load(&_p_.runqhead)
+ tail := atomic.Load(&_p_.runqtail)
+ runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
+ if tail == atomic.Load(&_p_.runqtail) {
+ return head == tail && runnext == 0
+ }
+ }
+}
+
+// To shake out latent assumptions about scheduling order,
+// we introduce some randomness into scheduling decisions
+// when running with the race detector.
+// The need for this was made obvious by changing the
+// (deterministic) scheduling order in Go 1.5 and breaking
+// many poorly-written tests.
+// With the randomness here, as long as the tests pass
+// consistently with -race, they shouldn't have latent scheduling
+// assumptions.
+const randomizeScheduler = raceenabled
+
+// runqput tries to put g on the local runnable queue.
+// If next is false, runqput adds g to the tail of the runnable queue.
+// If next is true, runqput puts g in the _p_.runnext slot.
+// If the run queue is full, runnext puts g on the global queue.
+// Executed only by the owner P.
+func runqput(_p_ *p, gp *g, next bool) {
+ if randomizeScheduler && next && fastrandn(2) == 0 {
+ next = false
+ }
+
+ if next {
+ retryNext:
+ oldnext := _p_.runnext
+ if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
+ goto retryNext
+ }
+ if oldnext == 0 {
+ return
+ }
+ // Kick the old runnext out to the regular run queue.
+ gp = oldnext.ptr()
+ }
+
+retry:
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
+ t := _p_.runqtail
+ if t-h < uint32(len(_p_.runq)) {
+ _p_.runq[t%uint32(len(_p_.runq))].set(gp)
+ atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ return
+ }
+ if runqputslow(_p_, gp, h, t) {
+ return
+ }
+ // the queue is not full, now the put above must succeed
+ goto retry
+}
+
+// Put g and a batch of work from local runnable queue on global queue.
+// Executed only by the owner P.
+func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
+ var batch [len(_p_.runq)/2 + 1]*g
+
+ // First, grab a batch from local queue.
+ n := t - h
+ n = n / 2
+ if n != uint32(len(_p_.runq)/2) {
+ throw("runqputslow: queue is not full")
+ }
+ for i := uint32(0); i < n; i++ {
+ batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
+ }
+ if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ return false
+ }
+ batch[n] = gp
+
+ if randomizeScheduler {
+ for i := uint32(1); i <= n; i++ {
+ j := fastrandn(i + 1)
+ batch[i], batch[j] = batch[j], batch[i]
+ }
+ }
+
+ // Link the goroutines.
+ for i := uint32(0); i < n; i++ {
+ batch[i].schedlink.set(batch[i+1])
+ }
+ var q gQueue
+ q.head.set(batch[0])
+ q.tail.set(batch[n])
+
+ // Now put the batch on global queue.
+ lock(&sched.lock)
+ globrunqputbatch(&q, int32(n+1))
+ unlock(&sched.lock)
+ return true
+}
+
+// runqputbatch tries to put all the G's on q on the local runnable queue.
+// If the queue is full, they are put on the global queue; in that case
+// this will temporarily acquire the scheduler lock.
+// Executed only by the owner P.
+func runqputbatch(pp *p, q *gQueue, qsize int) {
+ h := atomic.LoadAcq(&pp.runqhead)
+ t := pp.runqtail
+ n := uint32(0)
+ for !q.empty() && t-h < uint32(len(pp.runq)) {
+ gp := q.pop()
+ pp.runq[t%uint32(len(pp.runq))].set(gp)
+ t++
+ n++
+ }
+ qsize -= int(n)
+
+ if randomizeScheduler {
+ off := func(o uint32) uint32 {
+ return (pp.runqtail + o) % uint32(len(pp.runq))
+ }
+ for i := uint32(1); i < n; i++ {
+ j := fastrandn(i + 1)
+ pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
+ }
+ }
+
+ atomic.StoreRel(&pp.runqtail, t)
+ if !q.empty() {
+ lock(&sched.lock)
+ globrunqputbatch(q, int32(qsize))
+ unlock(&sched.lock)
+ }
+}
+
+// Get g from local runnable queue.
+// If inheritTime is true, gp should inherit the remaining time in the
+// current time slice. Otherwise, it should start a new time slice.
+// Executed only by the owner P.
+func runqget(_p_ *p) (gp *g, inheritTime bool) {
+ // If there's a runnext, it's the next G to run.
+ next := _p_.runnext
+ // If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
+ // because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
+ // Hence, there's no need to retry this CAS if it falls.
+ if next != 0 && _p_.runnext.cas(next, 0) {
+ return next.ptr(), true
+ }
+
+ for {
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := _p_.runqtail
+ if t == h {
+ return nil, false
+ }
+ gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
+ if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ return gp, false
+ }
+ }
+}
+
+// runqdrain drains the local runnable queue of _p_ and returns all goroutines in it.
+// Executed only by the owner P.
+func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
+ oldNext := _p_.runnext
+ if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
+ drainQ.pushBack(oldNext.ptr())
+ n++
+ }
+
+retry:
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := _p_.runqtail
+ qn := t - h
+ if qn == 0 {
+ return
+ }
+ if qn > uint32(len(_p_.runq)) { // read inconsistent h and t
+ goto retry
+ }
+
+ if !atomic.CasRel(&_p_.runqhead, h, h+qn) { // cas-release, commits consume
+ goto retry
+ }
+
+ // We've inverted the order in which it gets G's from the local P's runnable queue
+ // and then advances the head pointer because we don't want to mess up the statuses of G's
+ // while runqdrain() and runqsteal() are running in parallel.
+ // Thus we should advance the head pointer before draining the local P into a gQueue,
+ // so that we can update any gp.schedlink only after we take the full ownership of G,
+ // meanwhile, other P's can't access to all G's in local P's runnable queue and steal them.
+ // See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details.
+ for i := uint32(0); i < qn; i++ {
+ gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
+ drainQ.pushBack(gp)
+ n++
+ }
+ return
+}
+
+// Grabs a batch of goroutines from _p_'s runnable queue into batch.
+// Batch is a ring buffer starting at batchHead.
+// Returns number of grabbed goroutines.
+// Can be executed by any P.
+func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
+ for {
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
+ n := t - h
+ n = n - n/2
+ if n == 0 {
+ if stealRunNextG {
+ // Try to steal from _p_.runnext.
+ if next := _p_.runnext; next != 0 {
+ if _p_.status == _Prunning {
+ // Sleep to ensure that _p_ isn't about to run the g
+ // we are about to steal.
+ // The important use case here is when the g running
+ // on _p_ ready()s another g and then almost
+ // immediately blocks. Instead of stealing runnext
+ // in this window, back off to give _p_ a chance to
+ // schedule runnext. This will avoid thrashing gs
+ // between different Ps.
+ // A sync chan send/recv takes ~50ns as of time of
+ // writing, so 3us gives ~50x overshoot.
+ if GOOS != "windows" && GOOS != "openbsd" && GOOS != "netbsd" {
+ usleep(3)
+ } else {
+ // On some platforms system timer granularity is
+ // 1-15ms, which is way too much for this
+ // optimization. So just yield.
+ osyield()
+ }
+ }
+ if !_p_.runnext.cas(next, 0) {
+ continue
+ }
+ batch[batchHead%uint32(len(batch))] = next
+ return 1
+ }
+ }
+ return 0
+ }
+ if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
+ continue
+ }
+ for i := uint32(0); i < n; i++ {
+ g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
+ batch[(batchHead+i)%uint32(len(batch))] = g
+ }
+ if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ return n
+ }
+ }
+}
+
+// Steal half of elements from local runnable queue of p2
+// and put onto local runnable queue of p.
+// Returns one of the stolen elements (or nil if failed).
+func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
+ t := _p_.runqtail
+ n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
+ if n == 0 {
+ return nil
+ }
+ n--
+ gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
+ if n == 0 {
+ return gp
+ }
+ h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
+ if t-h+n >= uint32(len(_p_.runq)) {
+ throw("runqsteal: runq overflow")
+ }
+ atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ return gp
+}
+
+// A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
+// be on one gQueue or gList at a time.
+type gQueue struct {
+ head guintptr
+ tail guintptr
+}
+
+// empty reports whether q is empty.
+func (q *gQueue) empty() bool {
+ return q.head == 0
+}
+
+// push adds gp to the head of q.
+func (q *gQueue) push(gp *g) {
+ gp.schedlink = q.head
+ q.head.set(gp)
+ if q.tail == 0 {
+ q.tail.set(gp)
+ }
+}
+
+// pushBack adds gp to the tail of q.
+func (q *gQueue) pushBack(gp *g) {
+ gp.schedlink = 0
+ if q.tail != 0 {
+ q.tail.ptr().schedlink.set(gp)
+ } else {
+ q.head.set(gp)
+ }
+ q.tail.set(gp)
+}
+
+// pushBackAll adds all Gs in q2 to the tail of q. After this q2 must
+// not be used.
+func (q *gQueue) pushBackAll(q2 gQueue) {
+ if q2.tail == 0 {
+ return
+ }
+ q2.tail.ptr().schedlink = 0
+ if q.tail != 0 {
+ q.tail.ptr().schedlink = q2.head
+ } else {
+ q.head = q2.head
+ }
+ q.tail = q2.tail
+}
+
+// pop removes and returns the head of queue q. It returns nil if
+// q is empty.
+func (q *gQueue) pop() *g {
+ gp := q.head.ptr()
+ if gp != nil {
+ q.head = gp.schedlink
+ if q.head == 0 {
+ q.tail = 0
+ }
+ }
+ return gp
+}
+
+// popList takes all Gs in q and returns them as a gList.
+func (q *gQueue) popList() gList {
+ stack := gList{q.head}
+ *q = gQueue{}
+ return stack
+}
+
+// A gList is a list of Gs linked through g.schedlink. A G can only be
+// on one gQueue or gList at a time.
+type gList struct {
+ head guintptr
+}
+
+// empty reports whether l is empty.
+func (l *gList) empty() bool {
+ return l.head == 0
+}
+
+// push adds gp to the head of l.
+func (l *gList) push(gp *g) {
+ gp.schedlink = l.head
+ l.head.set(gp)
+}
+
+// pushAll prepends all Gs in q to l.
+func (l *gList) pushAll(q gQueue) {
+ if !q.empty() {
+ q.tail.ptr().schedlink = l.head
+ l.head = q.head
+ }
+}
+
+// pop removes and returns the head of l. If l is empty, it returns nil.
+func (l *gList) pop() *g {
+ gp := l.head.ptr()
+ if gp != nil {
+ l.head = gp.schedlink
+ }
+ return gp
+}
+
+//go:linkname setMaxThreads runtime/debug.setMaxThreads
+func setMaxThreads(in int) (out int) {
+ lock(&sched.lock)
+ out = int(sched.maxmcount)
+ if in > 0x7fffffff { // MaxInt32
+ sched.maxmcount = 0x7fffffff
+ } else {
+ sched.maxmcount = int32(in)
+ }
+ checkmcount()
+ unlock(&sched.lock)
+ return
+}
+
+//go:nosplit
+func procPin() int {
+ _g_ := getg()
+ mp := _g_.m
+
+ mp.locks++
+ return int(mp.p.ptr().id)
+}
+
+//go:nosplit
+func procUnpin() {
+ _g_ := getg()
+ _g_.m.locks--
+}
+
+//go:linkname sync_runtime_procPin sync.runtime_procPin
+//go:nosplit
+func sync_runtime_procPin() int {
+ return procPin()
+}
+
+//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
+//go:nosplit
+func sync_runtime_procUnpin() {
+ procUnpin()
+}
+
+//go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
+//go:nosplit
+func sync_atomic_runtime_procPin() int {
+ return procPin()
+}
+
+//go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
+//go:nosplit
+func sync_atomic_runtime_procUnpin() {
+ procUnpin()
+}
+
+// Active spinning for sync.Mutex.
+//
+//go:linkname sync_runtime_canSpin sync.runtime_canSpin
+//go:nosplit
+func sync_runtime_canSpin(i int) bool {
+ // sync.Mutex is cooperative, so we are conservative with spinning.
+ // Spin only few times and only if running on a multicore machine and
+ // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
+ // As opposed to runtime mutex we don't do passive spinning here,
+ // because there can be work on global runq or on other Ps.
+ if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
+ return false
+ }
+ if p := getg().m.p.ptr(); !runqempty(p) {
+ return false
+ }
+ return true
+}
+
+//go:linkname sync_runtime_doSpin sync.runtime_doSpin
+//go:nosplit
+func sync_runtime_doSpin() {
+ procyield(active_spin_cnt)
+}
+
+var stealOrder randomOrder
+
+// randomOrder/randomEnum are helper types for randomized work stealing.
+// They allow to enumerate all Ps in different pseudo-random orders without repetitions.
+// The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
+// are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
+type randomOrder struct {
+ count uint32
+ coprimes []uint32
+}
+
+type randomEnum struct {
+ i uint32
+ count uint32
+ pos uint32
+ inc uint32
+}
+
+func (ord *randomOrder) reset(count uint32) {
+ ord.count = count
+ ord.coprimes = ord.coprimes[:0]
+ for i := uint32(1); i <= count; i++ {
+ if gcd(i, count) == 1 {
+ ord.coprimes = append(ord.coprimes, i)
+ }
+ }
+}
+
+func (ord *randomOrder) start(i uint32) randomEnum {
+ return randomEnum{
+ count: ord.count,
+ pos: i % ord.count,
+ inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
+ }
+}
+
+func (enum *randomEnum) done() bool {
+ return enum.i == enum.count
+}
+
+func (enum *randomEnum) next() {
+ enum.i++
+ enum.pos = (enum.pos + enum.inc) % enum.count
+}
+
+func (enum *randomEnum) position() uint32 {
+ return enum.pos
+}
+
+func gcd(a, b uint32) uint32 {
+ for b != 0 {
+ a, b = b, a%b
+ }
+ return a
+}
+
+// An initTask represents the set of initializations that need to be done for a package.
+// Keep in sync with ../../test/initempty.go:initTask
+type initTask struct {
+ // TODO: pack the first 3 fields more tightly?
+ state uintptr // 0 = uninitialized, 1 = in progress, 2 = done
+ ndeps uintptr
+ nfns uintptr
+ // followed by ndeps instances of an *initTask, one per package depended on
+ // followed by nfns pcs, one per init function to run
+}
+
+// inittrace stores statistics for init functions which are
+// updated by malloc and newproc when active is true.
+var inittrace tracestat
+
+type tracestat struct {
+ active bool // init tracing activation status
+ id int64 // init goroutine id
+ allocs uint64 // heap allocations
+ bytes uint64 // heap allocated bytes
+}
+
+func doInit(t *initTask) {
+ switch t.state {
+ case 2: // fully initialized
+ return
+ case 1: // initialization in progress
+ throw("recursive call during initialization - linker skew")
+ default: // not initialized yet
+ t.state = 1 // initialization in progress
+
+ for i := uintptr(0); i < t.ndeps; i++ {
+ p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
+ t2 := *(**initTask)(p)
+ doInit(t2)
+ }
+
+ if t.nfns == 0 {
+ t.state = 2 // initialization done
+ return
+ }
+
+ var (
+ start int64
+ before tracestat
+ )
+
+ if inittrace.active {
+ start = nanotime()
+ // Load stats non-atomically since tracinit is updated only by this init goroutine.
+ before = inittrace
+ }
+
+ firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
+ for i := uintptr(0); i < t.nfns; i++ {
+ p := add(firstFunc, i*goarch.PtrSize)
+ f := *(*func())(unsafe.Pointer(&p))
+ f()
+ }
+
+ if inittrace.active {
+ end := nanotime()
+ // Load stats non-atomically since tracinit is updated only by this init goroutine.
+ after := inittrace
+
+ f := *(*func())(unsafe.Pointer(&firstFunc))
+ pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
+
+ var sbuf [24]byte
+ print("init ", pkg, " @")
+ print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
+ print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
+ print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
+ print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
+ print("\n")
+ }
+
+ t.state = 2 // initialization done
+ }
+}
diff --git a/contrib/go/_std_1.19/src/runtime/profbuf.go b/contrib/go/_std_1.19/src/runtime/profbuf.go
new file mode 100644
index 0000000000..3d907d5612
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/profbuf.go
@@ -0,0 +1,560 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// A profBuf is a lock-free buffer for profiling events,
+// safe for concurrent use by one reader and one writer.
+// The writer may be a signal handler running without a user g.
+// The reader is assumed to be a user g.
+//
+// Each logged event corresponds to a fixed size header, a list of
+// uintptrs (typically a stack), and exactly one unsafe.Pointer tag.
+// The header and uintptrs are stored in the circular buffer data and the
+// tag is stored in a circular buffer tags, running in parallel.
+// In the circular buffer data, each event takes 2+hdrsize+len(stk)
+// words: the value 2+hdrsize+len(stk), then the time of the event, then
+// hdrsize words giving the fixed-size header, and then len(stk) words
+// for the stack.
+//
+// The current effective offsets into the tags and data circular buffers
+// for reading and writing are stored in the high 30 and low 32 bits of r and w.
+// The bottom bits of the high 32 are additional flag bits in w, unused in r.
+// "Effective" offsets means the total number of reads or writes, mod 2^length.
+// The offset in the buffer is the effective offset mod the length of the buffer.
+// To make wraparound mod 2^length match wraparound mod length of the buffer,
+// the length of the buffer must be a power of two.
+//
+// If the reader catches up to the writer, a flag passed to read controls
+// whether the read blocks until more data is available. A read returns a
+// pointer to the buffer data itself; the caller is assumed to be done with
+// that data at the next read. The read offset rNext tracks the next offset to
+// be returned by read. By definition, r ≤ rNext ≤ w (before wraparound),
+// and rNext is only used by the reader, so it can be accessed without atomics.
+//
+// If the writer gets ahead of the reader, so that the buffer fills,
+// future writes are discarded and replaced in the output stream by an
+// overflow entry, which has size 2+hdrsize+1, time set to the time of
+// the first discarded write, a header of all zeroed words, and a "stack"
+// containing one word, the number of discarded writes.
+//
+// Between the time the buffer fills and the buffer becomes empty enough
+// to hold more data, the overflow entry is stored as a pending overflow
+// entry in the fields overflow and overflowTime. The pending overflow
+// entry can be turned into a real record by either the writer or the
+// reader. If the writer is called to write a new record and finds that
+// the output buffer has room for both the pending overflow entry and the
+// new record, the writer emits the pending overflow entry and the new
+// record into the buffer. If the reader is called to read data and finds
+// that the output buffer is empty but that there is a pending overflow
+// entry, the reader will return a synthesized record for the pending
+// overflow entry.
+//
+// Only the writer can create or add to a pending overflow entry, but
+// either the reader or the writer can clear the pending overflow entry.
+// A pending overflow entry is indicated by the low 32 bits of 'overflow'
+// holding the number of discarded writes, and overflowTime holding the
+// time of the first discarded write. The high 32 bits of 'overflow'
+// increment each time the low 32 bits transition from zero to non-zero
+// or vice versa. This sequence number avoids ABA problems in the use of
+// compare-and-swap to coordinate between reader and writer.
+// The overflowTime is only written when the low 32 bits of overflow are
+// zero, that is, only when there is no pending overflow entry, in
+// preparation for creating a new one. The reader can therefore fetch and
+// clear the entry atomically using
+//
+// for {
+// overflow = load(&b.overflow)
+// if uint32(overflow) == 0 {
+// // no pending entry
+// break
+// }
+// time = load(&b.overflowTime)
+// if cas(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
+// // pending entry cleared
+// break
+// }
+// }
+// if uint32(overflow) > 0 {
+// emit entry for uint32(overflow), time
+// }
+type profBuf struct {
+ // accessed atomically
+ r, w profAtomic
+ overflow uint64
+ overflowTime uint64
+ eof uint32
+
+ // immutable (excluding slice content)
+ hdrsize uintptr
+ data []uint64
+ tags []unsafe.Pointer
+
+ // owned by reader
+ rNext profIndex
+ overflowBuf []uint64 // for use by reader to return overflow record
+ wait note
+}
+
+// A profAtomic is the atomically-accessed word holding a profIndex.
+type profAtomic uint64
+
+// A profIndex is the packet tag and data counts and flags bits, described above.
+type profIndex uint64
+
+const (
+ profReaderSleeping profIndex = 1 << 32 // reader is sleeping and must be woken up
+ profWriteExtra profIndex = 1 << 33 // overflow or eof waiting
+)
+
+func (x *profAtomic) load() profIndex {
+ return profIndex(atomic.Load64((*uint64)(x)))
+}
+
+func (x *profAtomic) store(new profIndex) {
+ atomic.Store64((*uint64)(x), uint64(new))
+}
+
+func (x *profAtomic) cas(old, new profIndex) bool {
+ return atomic.Cas64((*uint64)(x), uint64(old), uint64(new))
+}
+
+func (x profIndex) dataCount() uint32 {
+ return uint32(x)
+}
+
+func (x profIndex) tagCount() uint32 {
+ return uint32(x >> 34)
+}
+
+// countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount,
+// assuming that they are no more than 2^29 apart (guaranteed since they are never more than
+// len(data) or len(tags) apart, respectively).
+// tagCount wraps at 2^30, while dataCount wraps at 2^32.
+// This function works for both.
+func countSub(x, y uint32) int {
+ // x-y is 32-bit signed or 30-bit signed; sign-extend to 32 bits and convert to int.
+ return int(int32(x-y) << 2 >> 2)
+}
+
+// addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".
+func (x profIndex) addCountsAndClearFlags(data, tag int) profIndex {
+ return profIndex((uint64(x)>>34+uint64(uint32(tag)<<2>>2))<<34 | uint64(uint32(x)+uint32(data)))
+}
+
+// hasOverflow reports whether b has any overflow records pending.
+func (b *profBuf) hasOverflow() bool {
+ return uint32(atomic.Load64(&b.overflow)) > 0
+}
+
+// takeOverflow consumes the pending overflow records, returning the overflow count
+// and the time of the first overflow.
+// When called by the reader, it is racing against incrementOverflow.
+func (b *profBuf) takeOverflow() (count uint32, time uint64) {
+ overflow := atomic.Load64(&b.overflow)
+ time = atomic.Load64(&b.overflowTime)
+ for {
+ count = uint32(overflow)
+ if count == 0 {
+ time = 0
+ break
+ }
+ // Increment generation, clear overflow count in low bits.
+ if atomic.Cas64(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
+ break
+ }
+ overflow = atomic.Load64(&b.overflow)
+ time = atomic.Load64(&b.overflowTime)
+ }
+ return uint32(overflow), time
+}
+
+// incrementOverflow records a single overflow at time now.
+// It is racing against a possible takeOverflow in the reader.
+func (b *profBuf) incrementOverflow(now int64) {
+ for {
+ overflow := atomic.Load64(&b.overflow)
+
+ // Once we see b.overflow reach 0, it's stable: no one else is changing it underfoot.
+ // We need to set overflowTime if we're incrementing b.overflow from 0.
+ if uint32(overflow) == 0 {
+ // Store overflowTime first so it's always available when overflow != 0.
+ atomic.Store64(&b.overflowTime, uint64(now))
+ atomic.Store64(&b.overflow, (((overflow>>32)+1)<<32)+1)
+ break
+ }
+ // Otherwise we're racing to increment against reader
+ // who wants to set b.overflow to 0.
+ // Out of paranoia, leave 2³²-1 a sticky overflow value,
+ // to avoid wrapping around. Extremely unlikely.
+ if int32(overflow) == -1 {
+ break
+ }
+ if atomic.Cas64(&b.overflow, overflow, overflow+1) {
+ break
+ }
+ }
+}
+
+// newProfBuf returns a new profiling buffer with room for
+// a header of hdrsize words and a buffer of at least bufwords words.
+func newProfBuf(hdrsize, bufwords, tags int) *profBuf {
+ if min := 2 + hdrsize + 1; bufwords < min {
+ bufwords = min
+ }
+
+ // Buffer sizes must be power of two, so that we don't have to
+ // worry about uint32 wraparound changing the effective position
+ // within the buffers. We store 30 bits of count; limiting to 28
+ // gives us some room for intermediate calculations.
+ if bufwords >= 1<<28 || tags >= 1<<28 {
+ throw("newProfBuf: buffer too large")
+ }
+ var i int
+ for i = 1; i < bufwords; i <<= 1 {
+ }
+ bufwords = i
+ for i = 1; i < tags; i <<= 1 {
+ }
+ tags = i
+
+ b := new(profBuf)
+ b.hdrsize = uintptr(hdrsize)
+ b.data = make([]uint64, bufwords)
+ b.tags = make([]unsafe.Pointer, tags)
+ b.overflowBuf = make([]uint64, 2+b.hdrsize+1)
+ return b
+}
+
+// canWriteRecord reports whether the buffer has room
+// for a single contiguous record with a stack of length nstk.
+func (b *profBuf) canWriteRecord(nstk int) bool {
+ br := b.r.load()
+ bw := b.w.load()
+
+ // room for tag?
+ if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 1 {
+ return false
+ }
+
+ // room for data?
+ nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
+ want := 2 + int(b.hdrsize) + nstk
+ i := int(bw.dataCount() % uint32(len(b.data)))
+ if i+want > len(b.data) {
+ // Can't fit in trailing fragment of slice.
+ // Skip over that and start over at beginning of slice.
+ nd -= len(b.data) - i
+ }
+ return nd >= want
+}
+
+// canWriteTwoRecords reports whether the buffer has room
+// for two records with stack lengths nstk1, nstk2, in that order.
+// Each record must be contiguous on its own, but the two
+// records need not be contiguous (one can be at the end of the buffer
+// and the other can wrap around and start at the beginning of the buffer).
+func (b *profBuf) canWriteTwoRecords(nstk1, nstk2 int) bool {
+ br := b.r.load()
+ bw := b.w.load()
+
+ // room for tag?
+ if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 2 {
+ return false
+ }
+
+ // room for data?
+ nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
+
+ // first record
+ want := 2 + int(b.hdrsize) + nstk1
+ i := int(bw.dataCount() % uint32(len(b.data)))
+ if i+want > len(b.data) {
+ // Can't fit in trailing fragment of slice.
+ // Skip over that and start over at beginning of slice.
+ nd -= len(b.data) - i
+ i = 0
+ }
+ i += want
+ nd -= want
+
+ // second record
+ want = 2 + int(b.hdrsize) + nstk2
+ if i+want > len(b.data) {
+ // Can't fit in trailing fragment of slice.
+ // Skip over that and start over at beginning of slice.
+ nd -= len(b.data) - i
+ i = 0
+ }
+ return nd >= want
+}
+
+// write writes an entry to the profiling buffer b.
+// The entry begins with a fixed hdr, which must have
+// length b.hdrsize, followed by a variable-sized stack
+// and a single tag pointer *tagPtr (or nil if tagPtr is nil).
+// No write barriers allowed because this might be called from a signal handler.
+func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
+ if b == nil {
+ return
+ }
+ if len(hdr) > int(b.hdrsize) {
+ throw("misuse of profBuf.write")
+ }
+
+ if hasOverflow := b.hasOverflow(); hasOverflow && b.canWriteTwoRecords(1, len(stk)) {
+ // Room for both an overflow record and the one being written.
+ // Write the overflow record if the reader hasn't gotten to it yet.
+ // Only racing against reader, not other writers.
+ count, time := b.takeOverflow()
+ if count > 0 {
+ var stk [1]uintptr
+ stk[0] = uintptr(count)
+ b.write(nil, int64(time), nil, stk[:])
+ }
+ } else if hasOverflow || !b.canWriteRecord(len(stk)) {
+ // Pending overflow without room to write overflow and new records
+ // or no overflow but also no room for new record.
+ b.incrementOverflow(now)
+ b.wakeupExtra()
+ return
+ }
+
+ // There's room: write the record.
+ br := b.r.load()
+ bw := b.w.load()
+
+ // Profiling tag
+ //
+ // The tag is a pointer, but we can't run a write barrier here.
+ // We have interrupted the OS-level execution of gp, but the
+ // runtime still sees gp as executing. In effect, we are running
+ // in place of the real gp. Since gp is the only goroutine that
+ // can overwrite gp.labels, the value of gp.labels is stable during
+ // this signal handler: it will still be reachable from gp when
+ // we finish executing. If a GC is in progress right now, it must
+ // keep gp.labels alive, because gp.labels is reachable from gp.
+ // If gp were to overwrite gp.labels, the deletion barrier would
+ // still shade that pointer, which would preserve it for the
+ // in-progress GC, so all is well. Any future GC will see the
+ // value we copied when scanning b.tags (heap-allocated).
+ // We arrange that the store here is always overwriting a nil,
+ // so there is no need for a deletion barrier on b.tags[wt].
+ wt := int(bw.tagCount() % uint32(len(b.tags)))
+ if tagPtr != nil {
+ *(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(unsafe.Pointer(*tagPtr))
+ }
+
+ // Main record.
+ // It has to fit in a contiguous section of the slice, so if it doesn't fit at the end,
+ // leave a rewind marker (0) and start over at the beginning of the slice.
+ wd := int(bw.dataCount() % uint32(len(b.data)))
+ nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
+ skip := 0
+ if wd+2+int(b.hdrsize)+len(stk) > len(b.data) {
+ b.data[wd] = 0
+ skip = len(b.data) - wd
+ nd -= skip
+ wd = 0
+ }
+ data := b.data[wd:]
+ data[0] = uint64(2 + b.hdrsize + uintptr(len(stk))) // length
+ data[1] = uint64(now) // time stamp
+ // header, zero-padded
+ i := uintptr(copy(data[2:2+b.hdrsize], hdr))
+ for ; i < b.hdrsize; i++ {
+ data[2+i] = 0
+ }
+ for i, pc := range stk {
+ data[2+b.hdrsize+uintptr(i)] = uint64(pc)
+ }
+
+ for {
+ // Commit write.
+ // Racing with reader setting flag bits in b.w, to avoid lost wakeups.
+ old := b.w.load()
+ new := old.addCountsAndClearFlags(skip+2+len(stk)+int(b.hdrsize), 1)
+ if !b.w.cas(old, new) {
+ continue
+ }
+ // If there was a reader, wake it up.
+ if old&profReaderSleeping != 0 {
+ notewakeup(&b.wait)
+ }
+ break
+ }
+}
+
+// close signals that there will be no more writes on the buffer.
+// Once all the data has been read from the buffer, reads will return eof=true.
+func (b *profBuf) close() {
+ if atomic.Load(&b.eof) > 0 {
+ throw("runtime: profBuf already closed")
+ }
+ atomic.Store(&b.eof, 1)
+ b.wakeupExtra()
+}
+
+// wakeupExtra must be called after setting one of the "extra"
+// atomic fields b.overflow or b.eof.
+// It records the change in b.w and wakes up the reader if needed.
+func (b *profBuf) wakeupExtra() {
+ for {
+ old := b.w.load()
+ new := old | profWriteExtra
+ if !b.w.cas(old, new) {
+ continue
+ }
+ if old&profReaderSleeping != 0 {
+ notewakeup(&b.wait)
+ }
+ break
+ }
+}
+
+// profBufReadMode specifies whether to block when no data is available to read.
+type profBufReadMode int
+
+const (
+ profBufBlocking profBufReadMode = iota
+ profBufNonBlocking
+)
+
+var overflowTag [1]unsafe.Pointer // always nil
+
+func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool) {
+ if b == nil {
+ return nil, nil, true
+ }
+
+ br := b.rNext
+
+ // Commit previous read, returning that part of the ring to the writer.
+ // First clear tags that have now been read, both to avoid holding
+ // up the memory they point at for longer than necessary
+ // and so that b.write can assume it is always overwriting
+ // nil tag entries (see comment in b.write).
+ rPrev := b.r.load()
+ if rPrev != br {
+ ntag := countSub(br.tagCount(), rPrev.tagCount())
+ ti := int(rPrev.tagCount() % uint32(len(b.tags)))
+ for i := 0; i < ntag; i++ {
+ b.tags[ti] = nil
+ if ti++; ti == len(b.tags) {
+ ti = 0
+ }
+ }
+ b.r.store(br)
+ }
+
+Read:
+ bw := b.w.load()
+ numData := countSub(bw.dataCount(), br.dataCount())
+ if numData == 0 {
+ if b.hasOverflow() {
+ // No data to read, but there is overflow to report.
+ // Racing with writer flushing b.overflow into a real record.
+ count, time := b.takeOverflow()
+ if count == 0 {
+ // Lost the race, go around again.
+ goto Read
+ }
+ // Won the race, report overflow.
+ dst := b.overflowBuf
+ dst[0] = uint64(2 + b.hdrsize + 1)
+ dst[1] = uint64(time)
+ for i := uintptr(0); i < b.hdrsize; i++ {
+ dst[2+i] = 0
+ }
+ dst[2+b.hdrsize] = uint64(count)
+ return dst[:2+b.hdrsize+1], overflowTag[:1], false
+ }
+ if atomic.Load(&b.eof) > 0 {
+ // No data, no overflow, EOF set: done.
+ return nil, nil, true
+ }
+ if bw&profWriteExtra != 0 {
+ // Writer claims to have published extra information (overflow or eof).
+ // Attempt to clear notification and then check again.
+ // If we fail to clear the notification it means b.w changed,
+ // so we still need to check again.
+ b.w.cas(bw, bw&^profWriteExtra)
+ goto Read
+ }
+
+ // Nothing to read right now.
+ // Return or sleep according to mode.
+ if mode == profBufNonBlocking {
+ return nil, nil, false
+ }
+ if !b.w.cas(bw, bw|profReaderSleeping) {
+ goto Read
+ }
+ // Committed to sleeping.
+ notetsleepg(&b.wait, -1)
+ noteclear(&b.wait)
+ goto Read
+ }
+ data = b.data[br.dataCount()%uint32(len(b.data)):]
+ if len(data) > numData {
+ data = data[:numData]
+ } else {
+ numData -= len(data) // available in case of wraparound
+ }
+ skip := 0
+ if data[0] == 0 {
+ // Wraparound record. Go back to the beginning of the ring.
+ skip = len(data)
+ data = b.data
+ if len(data) > numData {
+ data = data[:numData]
+ }
+ }
+
+ ntag := countSub(bw.tagCount(), br.tagCount())
+ if ntag == 0 {
+ throw("runtime: malformed profBuf buffer - tag and data out of sync")
+ }
+ tags = b.tags[br.tagCount()%uint32(len(b.tags)):]
+ if len(tags) > ntag {
+ tags = tags[:ntag]
+ }
+
+ // Count out whole data records until either data or tags is done.
+ // They are always in sync in the buffer, but due to an end-of-slice
+ // wraparound we might need to stop early and return the rest
+ // in the next call.
+ di := 0
+ ti := 0
+ for di < len(data) && data[di] != 0 && ti < len(tags) {
+ if uintptr(di)+uintptr(data[di]) > uintptr(len(data)) {
+ throw("runtime: malformed profBuf buffer - invalid size")
+ }
+ di += int(data[di])
+ ti++
+ }
+
+ // Remember how much we returned, to commit read on next call.
+ b.rNext = br.addCountsAndClearFlags(skip+di, ti)
+
+ if raceenabled {
+ // Match racereleasemerge in runtime_setProfLabel,
+ // so that the setting of the labels in runtime_setProfLabel
+ // is treated as happening before any use of the labels
+ // by our caller. The synchronization on labelSync itself is a fiction
+ // for the race detector. The actual synchronization is handled
+ // by the fact that the signal handler only reads from the current
+ // goroutine and uses atomics to write the updated queue indices,
+ // and then the read-out from the signal handler buffer uses
+ // atomics to read those queue indices.
+ raceacquire(unsafe.Pointer(&labelSync))
+ }
+
+ return data[:di], tags[:ti], false
+}
diff --git a/contrib/go/_std_1.18/src/runtime/proflabel.go b/contrib/go/_std_1.19/src/runtime/proflabel.go
index b2a161729e..b2a161729e 100644
--- a/contrib/go/_std_1.18/src/runtime/proflabel.go
+++ b/contrib/go/_std_1.19/src/runtime/proflabel.go
diff --git a/contrib/go/_std_1.18/src/runtime/race0.go b/contrib/go/_std_1.19/src/runtime/race0.go
index f36d4387c7..f36d4387c7 100644
--- a/contrib/go/_std_1.18/src/runtime/race0.go
+++ b/contrib/go/_std_1.19/src/runtime/race0.go
diff --git a/contrib/go/_std_1.18/src/runtime/rdebug.go b/contrib/go/_std_1.19/src/runtime/rdebug.go
index 1b213f1934..1b213f1934 100644
--- a/contrib/go/_std_1.18/src/runtime/rdebug.go
+++ b/contrib/go/_std_1.19/src/runtime/rdebug.go
diff --git a/contrib/go/_std_1.18/src/runtime/relax_stub.go b/contrib/go/_std_1.19/src/runtime/relax_stub.go
index e507702fc1..e507702fc1 100644
--- a/contrib/go/_std_1.18/src/runtime/relax_stub.go
+++ b/contrib/go/_std_1.19/src/runtime/relax_stub.go
diff --git a/contrib/go/_std_1.18/src/runtime/rt0_darwin_amd64.s b/contrib/go/_std_1.19/src/runtime/rt0_darwin_amd64.s
index ed804d47c5..ed804d47c5 100644
--- a/contrib/go/_std_1.18/src/runtime/rt0_darwin_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/rt0_darwin_amd64.s
diff --git a/contrib/go/_std_1.18/src/runtime/rt0_linux_amd64.s b/contrib/go/_std_1.19/src/runtime/rt0_linux_amd64.s
index 94ff7094d6..94ff7094d6 100644
--- a/contrib/go/_std_1.18/src/runtime/rt0_linux_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/rt0_linux_amd64.s
diff --git a/contrib/go/_std_1.19/src/runtime/runtime.go b/contrib/go/_std_1.19/src/runtime/runtime.go
new file mode 100644
index 0000000000..2cf93abefa
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/runtime.go
@@ -0,0 +1,67 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ _ "unsafe" // for go:linkname
+)
+
+//go:generate go run wincallback.go
+//go:generate go run mkduff.go
+//go:generate go run mkfastlog2table.go
+
+var ticks ticksType
+
+type ticksType struct {
+ lock mutex
+ pad uint32 // ensure 8-byte alignment of val on 386
+ val uint64
+}
+
+// Note: Called by runtime/pprof in addition to runtime code.
+func tickspersecond() int64 {
+ r := int64(atomic.Load64(&ticks.val))
+ if r != 0 {
+ return r
+ }
+ lock(&ticks.lock)
+ r = int64(ticks.val)
+ if r == 0 {
+ t0 := nanotime()
+ c0 := cputicks()
+ usleep(100 * 1000)
+ t1 := nanotime()
+ c1 := cputicks()
+ if t1 == t0 {
+ t1++
+ }
+ r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0)
+ if r == 0 {
+ r++
+ }
+ atomic.Store64(&ticks.val, uint64(r))
+ }
+ unlock(&ticks.lock)
+ return r
+}
+
+var envs []string
+var argslice []string
+
+//go:linkname syscall_runtime_envs syscall.runtime_envs
+func syscall_runtime_envs() []string { return append([]string{}, envs...) }
+
+//go:linkname syscall_Getpagesize syscall.Getpagesize
+func syscall_Getpagesize() int { return int(physPageSize) }
+
+//go:linkname os_runtime_args os.runtime_args
+func os_runtime_args() []string { return append([]string{}, argslice...) }
+
+//go:linkname syscall_Exit syscall.Exit
+//go:nosplit
+func syscall_Exit(code int) {
+ exit(int32(code))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/runtime1.go b/contrib/go/_std_1.19/src/runtime/runtime1.go
new file mode 100644
index 0000000000..e307901fc2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/runtime1.go
@@ -0,0 +1,559 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/bytealg"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// Keep a cached value to make gotraceback fast,
+// since we call it on every call to gentraceback.
+// The cached value is a uint32 in which the low bits
+// are the "crash" and "all" settings and the remaining
+// bits are the traceback value (0 off, 1 on, 2 include system).
+const (
+ tracebackCrash = 1 << iota
+ tracebackAll
+ tracebackShift = iota
+)
+
+var traceback_cache uint32 = 2 << tracebackShift
+var traceback_env uint32
+
+// gotraceback returns the current traceback settings.
+//
+// If level is 0, suppress all tracebacks.
+// If level is 1, show tracebacks, but exclude runtime frames.
+// If level is 2, show tracebacks including runtime frames.
+// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
+// If crash is set, crash (core dump, etc) after tracebacking.
+//
+//go:nosplit
+func gotraceback() (level int32, all, crash bool) {
+ _g_ := getg()
+ t := atomic.Load(&traceback_cache)
+ crash = t&tracebackCrash != 0
+ all = _g_.m.throwing >= throwTypeUser || t&tracebackAll != 0
+ if _g_.m.traceback != 0 {
+ level = int32(_g_.m.traceback)
+ } else if _g_.m.throwing >= throwTypeRuntime {
+ // Always include runtime frames in runtime throws unless
+ // otherwise overridden by m.traceback.
+ level = 2
+ } else {
+ level = int32(t >> tracebackShift)
+ }
+ return
+}
+
+var (
+ argc int32
+ argv **byte
+)
+
+// nosplit for use in linux startup sysargs
+//
+//go:nosplit
+func argv_index(argv **byte, i int32) *byte {
+ return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
+}
+
+func args(c int32, v **byte) {
+ argc = c
+ argv = v
+ sysargs(c, v)
+}
+
+func goargs() {
+ if GOOS == "windows" {
+ return
+ }
+ argslice = make([]string, argc)
+ for i := int32(0); i < argc; i++ {
+ argslice[i] = gostringnocopy(argv_index(argv, i))
+ }
+}
+
+func goenvs_unix() {
+ // TODO(austin): ppc64 in dynamic linking mode doesn't
+ // guarantee env[] will immediately follow argv. Might cause
+ // problems.
+ n := int32(0)
+ for argv_index(argv, argc+1+n) != nil {
+ n++
+ }
+
+ envs = make([]string, n)
+ for i := int32(0); i < n; i++ {
+ envs[i] = gostring(argv_index(argv, argc+1+i))
+ }
+}
+
+func environ() []string {
+ return envs
+}
+
+// TODO: These should be locals in testAtomic64, but we don't 8-byte
+// align stack variables on 386.
+var test_z64, test_x64 uint64
+
+func testAtomic64() {
+ test_z64 = 42
+ test_x64 = 0
+ if atomic.Cas64(&test_z64, test_x64, 1) {
+ throw("cas64 failed")
+ }
+ if test_x64 != 0 {
+ throw("cas64 failed")
+ }
+ test_x64 = 42
+ if !atomic.Cas64(&test_z64, test_x64, 1) {
+ throw("cas64 failed")
+ }
+ if test_x64 != 42 || test_z64 != 1 {
+ throw("cas64 failed")
+ }
+ if atomic.Load64(&test_z64) != 1 {
+ throw("load64 failed")
+ }
+ atomic.Store64(&test_z64, (1<<40)+1)
+ if atomic.Load64(&test_z64) != (1<<40)+1 {
+ throw("store64 failed")
+ }
+ if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
+ throw("xadd64 failed")
+ }
+ if atomic.Load64(&test_z64) != (2<<40)+2 {
+ throw("xadd64 failed")
+ }
+ if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
+ throw("xchg64 failed")
+ }
+ if atomic.Load64(&test_z64) != (3<<40)+3 {
+ throw("xchg64 failed")
+ }
+}
+
+func check() {
+ var (
+ a int8
+ b uint8
+ c int16
+ d uint16
+ e int32
+ f uint32
+ g int64
+ h uint64
+ i, i1 float32
+ j, j1 float64
+ k unsafe.Pointer
+ l *uint16
+ m [4]byte
+ )
+ type x1t struct {
+ x uint8
+ }
+ type y1t struct {
+ x1 x1t
+ y uint8
+ }
+ var x1 x1t
+ var y1 y1t
+
+ if unsafe.Sizeof(a) != 1 {
+ throw("bad a")
+ }
+ if unsafe.Sizeof(b) != 1 {
+ throw("bad b")
+ }
+ if unsafe.Sizeof(c) != 2 {
+ throw("bad c")
+ }
+ if unsafe.Sizeof(d) != 2 {
+ throw("bad d")
+ }
+ if unsafe.Sizeof(e) != 4 {
+ throw("bad e")
+ }
+ if unsafe.Sizeof(f) != 4 {
+ throw("bad f")
+ }
+ if unsafe.Sizeof(g) != 8 {
+ throw("bad g")
+ }
+ if unsafe.Sizeof(h) != 8 {
+ throw("bad h")
+ }
+ if unsafe.Sizeof(i) != 4 {
+ throw("bad i")
+ }
+ if unsafe.Sizeof(j) != 8 {
+ throw("bad j")
+ }
+ if unsafe.Sizeof(k) != goarch.PtrSize {
+ throw("bad k")
+ }
+ if unsafe.Sizeof(l) != goarch.PtrSize {
+ throw("bad l")
+ }
+ if unsafe.Sizeof(x1) != 1 {
+ throw("bad unsafe.Sizeof x1")
+ }
+ if unsafe.Offsetof(y1.y) != 1 {
+ throw("bad offsetof y1.y")
+ }
+ if unsafe.Sizeof(y1) != 2 {
+ throw("bad unsafe.Sizeof y1")
+ }
+
+ if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
+ throw("bad timediv")
+ }
+
+ var z uint32
+ z = 1
+ if !atomic.Cas(&z, 1, 2) {
+ throw("cas1")
+ }
+ if z != 2 {
+ throw("cas2")
+ }
+
+ z = 4
+ if atomic.Cas(&z, 5, 6) {
+ throw("cas3")
+ }
+ if z != 4 {
+ throw("cas4")
+ }
+
+ z = 0xffffffff
+ if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
+ throw("cas5")
+ }
+ if z != 0xfffffffe {
+ throw("cas6")
+ }
+
+ m = [4]byte{1, 1, 1, 1}
+ atomic.Or8(&m[1], 0xf0)
+ if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
+ throw("atomicor8")
+ }
+
+ m = [4]byte{0xff, 0xff, 0xff, 0xff}
+ atomic.And8(&m[1], 0x1)
+ if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
+ throw("atomicand8")
+ }
+
+ *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
+ if j == j {
+ throw("float64nan")
+ }
+ if !(j != j) {
+ throw("float64nan1")
+ }
+
+ *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
+ if j == j1 {
+ throw("float64nan2")
+ }
+ if !(j != j1) {
+ throw("float64nan3")
+ }
+
+ *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
+ if i == i {
+ throw("float32nan")
+ }
+ if i == i {
+ throw("float32nan1")
+ }
+
+ *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
+ if i == i1 {
+ throw("float32nan2")
+ }
+ if i == i1 {
+ throw("float32nan3")
+ }
+
+ testAtomic64()
+
+ if _FixedStack != round2(_FixedStack) {
+ throw("FixedStack is not power-of-2")
+ }
+
+ if !checkASM() {
+ throw("assembly checks failed")
+ }
+}
+
+type dbgVar struct {
+ name string
+ value *int32
+}
+
+// Holds variables parsed from GODEBUG env var,
+// except for "memprofilerate" since there is an
+// existing int var for that value, which may
+// already have an initial value.
+var debug struct {
+ cgocheck int32
+ clobberfree int32
+ efence int32
+ gccheckmark int32
+ gcpacertrace int32
+ gcshrinkstackoff int32
+ gcstoptheworld int32
+ gctrace int32
+ invalidptr int32
+ madvdontneed int32 // for Linux; issue 28466
+ scavtrace int32
+ scheddetail int32
+ schedtrace int32
+ tracebackancestors int32
+ asyncpreemptoff int32
+ harddecommit int32
+ adaptivestackstart int32
+
+ // debug.malloc is used as a combined debug check
+ // in the malloc function and should be set
+ // if any of the below debug options is != 0.
+ malloc bool
+ allocfreetrace int32
+ inittrace int32
+ sbrk int32
+}
+
+var dbgvars = []dbgVar{
+ {"allocfreetrace", &debug.allocfreetrace},
+ {"clobberfree", &debug.clobberfree},
+ {"cgocheck", &debug.cgocheck},
+ {"efence", &debug.efence},
+ {"gccheckmark", &debug.gccheckmark},
+ {"gcpacertrace", &debug.gcpacertrace},
+ {"gcshrinkstackoff", &debug.gcshrinkstackoff},
+ {"gcstoptheworld", &debug.gcstoptheworld},
+ {"gctrace", &debug.gctrace},
+ {"invalidptr", &debug.invalidptr},
+ {"madvdontneed", &debug.madvdontneed},
+ {"sbrk", &debug.sbrk},
+ {"scavtrace", &debug.scavtrace},
+ {"scheddetail", &debug.scheddetail},
+ {"schedtrace", &debug.schedtrace},
+ {"tracebackancestors", &debug.tracebackancestors},
+ {"asyncpreemptoff", &debug.asyncpreemptoff},
+ {"inittrace", &debug.inittrace},
+ {"harddecommit", &debug.harddecommit},
+ {"adaptivestackstart", &debug.adaptivestackstart},
+}
+
+func parsedebugvars() {
+ // defaults
+ debug.cgocheck = 1
+ debug.invalidptr = 1
+ debug.adaptivestackstart = 1 // go119 - set this to 0 to turn larger initial goroutine stacks off
+ if GOOS == "linux" {
+ // On Linux, MADV_FREE is faster than MADV_DONTNEED,
+ // but doesn't affect many of the statistics that
+ // MADV_DONTNEED does until the memory is actually
+ // reclaimed. This generally leads to poor user
+ // experience, like confusing stats in top and other
+ // monitoring tools; and bad integration with
+ // management systems that respond to memory usage.
+ // Hence, default to MADV_DONTNEED.
+ debug.madvdontneed = 1
+ }
+
+ for p := gogetenv("GODEBUG"); p != ""; {
+ field := ""
+ i := bytealg.IndexByteString(p, ',')
+ if i < 0 {
+ field, p = p, ""
+ } else {
+ field, p = p[:i], p[i+1:]
+ }
+ i = bytealg.IndexByteString(field, '=')
+ if i < 0 {
+ continue
+ }
+ key, value := field[:i], field[i+1:]
+
+ // Update MemProfileRate directly here since it
+ // is int, not int32, and should only be updated
+ // if specified in GODEBUG.
+ if key == "memprofilerate" {
+ if n, ok := atoi(value); ok {
+ MemProfileRate = n
+ }
+ } else {
+ for _, v := range dbgvars {
+ if v.name == key {
+ if n, ok := atoi32(value); ok {
+ *v.value = n
+ }
+ }
+ }
+ }
+ }
+
+ debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
+
+ setTraceback(gogetenv("GOTRACEBACK"))
+ traceback_env = traceback_cache
+}
+
+//go:linkname setTraceback runtime/debug.SetTraceback
+func setTraceback(level string) {
+ var t uint32
+ switch level {
+ case "none":
+ t = 0
+ case "single", "":
+ t = 1 << tracebackShift
+ case "all":
+ t = 1<<tracebackShift | tracebackAll
+ case "system":
+ t = 2<<tracebackShift | tracebackAll
+ case "crash":
+ t = 2<<tracebackShift | tracebackAll | tracebackCrash
+ default:
+ t = tracebackAll
+ if n, ok := atoi(level); ok && n == int(uint32(n)) {
+ t |= uint32(n) << tracebackShift
+ }
+ }
+ // when C owns the process, simply exit'ing the process on fatal errors
+ // and panics is surprising. Be louder and abort instead.
+ if islibrary || isarchive {
+ t |= tracebackCrash
+ }
+
+ t |= traceback_env
+
+ atomic.Store(&traceback_cache, t)
+}
+
+// Poor mans 64-bit division.
+// This is a very special function, do not use it if you are not sure what you are doing.
+// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
+// Handles overflow in a time-specific manner.
+// This keeps us within no-split stack limits on 32-bit processors.
+//
+//go:nosplit
+func timediv(v int64, div int32, rem *int32) int32 {
+ res := int32(0)
+ for bit := 30; bit >= 0; bit-- {
+ if v >= int64(div)<<uint(bit) {
+ v = v - (int64(div) << uint(bit))
+ // Before this for loop, res was 0, thus all these
+ // power of 2 increments are now just bitsets.
+ res |= 1 << uint(bit)
+ }
+ }
+ if v >= int64(div) {
+ if rem != nil {
+ *rem = 0
+ }
+ return 0x7fffffff
+ }
+ if rem != nil {
+ *rem = int32(v)
+ }
+ return res
+}
+
+// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
+
+//go:nosplit
+func acquirem() *m {
+ _g_ := getg()
+ _g_.m.locks++
+ return _g_.m
+}
+
+//go:nosplit
+func releasem(mp *m) {
+ _g_ := getg()
+ mp.locks--
+ if mp.locks == 0 && _g_.preempt {
+ // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ }
+}
+
+//go:linkname reflect_typelinks reflect.typelinks
+func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
+ modules := activeModules()
+ sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
+ ret := [][]int32{modules[0].typelinks}
+ for _, md := range modules[1:] {
+ sections = append(sections, unsafe.Pointer(md.types))
+ ret = append(ret, md.typelinks)
+ }
+ return sections, ret
+}
+
+// reflect_resolveNameOff resolves a name offset from a base pointer.
+//
+//go:linkname reflect_resolveNameOff reflect.resolveNameOff
+func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
+ return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
+}
+
+// reflect_resolveTypeOff resolves an *rtype offset from a base type.
+//
+//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
+func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+ return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
+}
+
+// reflect_resolveTextOff resolves a function pointer offset from a base type.
+//
+//go:linkname reflect_resolveTextOff reflect.resolveTextOff
+func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+ return (*_type)(rtype).textOff(textOff(off))
+
+}
+
+// reflectlite_resolveNameOff resolves a name offset from a base pointer.
+//
+//go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
+func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
+ return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
+}
+
+// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
+//
+//go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
+func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+ return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
+}
+
+// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
+//
+//go:linkname reflect_addReflectOff reflect.addReflectOff
+func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
+ reflectOffsLock()
+ if reflectOffs.m == nil {
+ reflectOffs.m = make(map[int32]unsafe.Pointer)
+ reflectOffs.minv = make(map[unsafe.Pointer]int32)
+ reflectOffs.next = -1
+ }
+ id, found := reflectOffs.minv[ptr]
+ if !found {
+ id = reflectOffs.next
+ reflectOffs.next-- // use negative offsets as IDs to aid debugging
+ reflectOffs.m[id] = ptr
+ reflectOffs.minv[ptr] = id
+ }
+ reflectOffsUnlock()
+ return id
+}
diff --git a/contrib/go/_std_1.19/src/runtime/runtime2.go b/contrib/go/_std_1.19/src/runtime/runtime2.go
new file mode 100644
index 0000000000..e1788223e7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/runtime2.go
@@ -0,0 +1,1151 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// defined constants
+const (
+ // G status
+ //
+ // Beyond indicating the general state of a G, the G status
+ // acts like a lock on the goroutine's stack (and hence its
+ // ability to execute user code).
+ //
+ // If you add to this list, add to the list
+ // of "okay during garbage collection" status
+ // in mgcmark.go too.
+ //
+ // TODO(austin): The _Gscan bit could be much lighter-weight.
+ // For example, we could choose not to run _Gscanrunnable
+ // goroutines found in the run queue, rather than CAS-looping
+ // until they become _Grunnable. And transitions like
+ // _Gscanwaiting -> _Gscanrunnable are actually okay because
+ // they don't affect stack ownership.
+
+ // _Gidle means this goroutine was just allocated and has not
+ // yet been initialized.
+ _Gidle = iota // 0
+
+ // _Grunnable means this goroutine is on a run queue. It is
+ // not currently executing user code. The stack is not owned.
+ _Grunnable // 1
+
+ // _Grunning means this goroutine may execute user code. The
+ // stack is owned by this goroutine. It is not on a run queue.
+ // It is assigned an M and a P (g.m and g.m.p are valid).
+ _Grunning // 2
+
+ // _Gsyscall means this goroutine is executing a system call.
+ // It is not executing user code. The stack is owned by this
+ // goroutine. It is not on a run queue. It is assigned an M.
+ _Gsyscall // 3
+
+ // _Gwaiting means this goroutine is blocked in the runtime.
+ // It is not executing user code. It is not on a run queue,
+ // but should be recorded somewhere (e.g., a channel wait
+ // queue) so it can be ready()d when necessary. The stack is
+ // not owned *except* that a channel operation may read or
+ // write parts of the stack under the appropriate channel
+ // lock. Otherwise, it is not safe to access the stack after a
+ // goroutine enters _Gwaiting (e.g., it may get moved).
+ _Gwaiting // 4
+
+ // _Gmoribund_unused is currently unused, but hardcoded in gdb
+ // scripts.
+ _Gmoribund_unused // 5
+
+ // _Gdead means this goroutine is currently unused. It may be
+ // just exited, on a free list, or just being initialized. It
+ // is not executing user code. It may or may not have a stack
+ // allocated. The G and its stack (if any) are owned by the M
+ // that is exiting the G or that obtained the G from the free
+ // list.
+ _Gdead // 6
+
+ // _Genqueue_unused is currently unused.
+ _Genqueue_unused // 7
+
+ // _Gcopystack means this goroutine's stack is being moved. It
+ // is not executing user code and is not on a run queue. The
+ // stack is owned by the goroutine that put it in _Gcopystack.
+ _Gcopystack // 8
+
+ // _Gpreempted means this goroutine stopped itself for a
+ // suspendG preemption. It is like _Gwaiting, but nothing is
+ // yet responsible for ready()ing it. Some suspendG must CAS
+ // the status to _Gwaiting to take responsibility for
+ // ready()ing this G.
+ _Gpreempted // 9
+
+ // _Gscan combined with one of the above states other than
+ // _Grunning indicates that GC is scanning the stack. The
+ // goroutine is not executing user code and the stack is owned
+ // by the goroutine that set the _Gscan bit.
+ //
+ // _Gscanrunning is different: it is used to briefly block
+ // state transitions while GC signals the G to scan its own
+ // stack. This is otherwise like _Grunning.
+ //
+ // atomicstatus&~Gscan gives the state the goroutine will
+ // return to when the scan completes.
+ _Gscan = 0x1000
+ _Gscanrunnable = _Gscan + _Grunnable // 0x1001
+ _Gscanrunning = _Gscan + _Grunning // 0x1002
+ _Gscansyscall = _Gscan + _Gsyscall // 0x1003
+ _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
+ _Gscanpreempted = _Gscan + _Gpreempted // 0x1009
+)
+
+const (
+ // P status
+
+ // _Pidle means a P is not being used to run user code or the
+ // scheduler. Typically, it's on the idle P list and available
+ // to the scheduler, but it may just be transitioning between
+ // other states.
+ //
+ // The P is owned by the idle list or by whatever is
+ // transitioning its state. Its run queue is empty.
+ _Pidle = iota
+
+ // _Prunning means a P is owned by an M and is being used to
+ // run user code or the scheduler. Only the M that owns this P
+ // is allowed to change the P's status from _Prunning. The M
+ // may transition the P to _Pidle (if it has no more work to
+ // do), _Psyscall (when entering a syscall), or _Pgcstop (to
+ // halt for the GC). The M may also hand ownership of the P
+ // off directly to another M (e.g., to schedule a locked G).
+ _Prunning
+
+ // _Psyscall means a P is not running user code. It has
+ // affinity to an M in a syscall but is not owned by it and
+ // may be stolen by another M. This is similar to _Pidle but
+ // uses lightweight transitions and maintains M affinity.
+ //
+ // Leaving _Psyscall must be done with a CAS, either to steal
+ // or retake the P. Note that there's an ABA hazard: even if
+ // an M successfully CASes its original P back to _Prunning
+ // after a syscall, it must understand the P may have been
+ // used by another M in the interim.
+ _Psyscall
+
+ // _Pgcstop means a P is halted for STW and owned by the M
+ // that stopped the world. The M that stopped the world
+ // continues to use its P, even in _Pgcstop. Transitioning
+ // from _Prunning to _Pgcstop causes an M to release its P and
+ // park.
+ //
+ // The P retains its run queue and startTheWorld will restart
+ // the scheduler on Ps with non-empty run queues.
+ _Pgcstop
+
+ // _Pdead means a P is no longer used (GOMAXPROCS shrank). We
+ // reuse Ps if GOMAXPROCS increases. A dead P is mostly
+ // stripped of its resources, though a few things remain
+ // (e.g., trace buffers).
+ _Pdead
+)
+
+// Mutual exclusion locks. In the uncontended case,
+// as fast as spin locks (just a few user-level instructions),
+// but on the contention path they sleep in the kernel.
+// A zeroed Mutex is unlocked (no need to initialize each lock).
+// Initialization is helpful for static lock ranking, but not required.
+type mutex struct {
+ // Empty struct if lock ranking is disabled, otherwise includes the lock rank
+ lockRankStruct
+ // Futex-based impl treats it as uint32 key,
+ // while sema-based impl as M* waitm.
+ // Used to be a union, but unions break precise GC.
+ key uintptr
+}
+
+// sleep and wakeup on one-time events.
+// before any calls to notesleep or notewakeup,
+// must call noteclear to initialize the Note.
+// then, exactly one thread can call notesleep
+// and exactly one thread can call notewakeup (once).
+// once notewakeup has been called, the notesleep
+// will return. future notesleep will return immediately.
+// subsequent noteclear must be called only after
+// previous notesleep has returned, e.g. it's disallowed
+// to call noteclear straight after notewakeup.
+//
+// notetsleep is like notesleep but wakes up after
+// a given number of nanoseconds even if the event
+// has not yet happened. if a goroutine uses notetsleep to
+// wake up early, it must wait to call noteclear until it
+// can be sure that no other goroutine is calling
+// notewakeup.
+//
+// notesleep/notetsleep are generally called on g0,
+// notetsleepg is similar to notetsleep but is called on user g.
+type note struct {
+ // Futex-based impl treats it as uint32 key,
+ // while sema-based impl as M* waitm.
+ // Used to be a union, but unions break precise GC.
+ key uintptr
+}
+
+type funcval struct {
+ fn uintptr
+ // variable-size, fn-specific data here
+}
+
+type iface struct {
+ tab *itab
+ data unsafe.Pointer
+}
+
+type eface struct {
+ _type *_type
+ data unsafe.Pointer
+}
+
+func efaceOf(ep *any) *eface {
+ return (*eface)(unsafe.Pointer(ep))
+}
+
+// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
+// It is particularly important to avoid write barriers when the current P has
+// been released, because the GC thinks the world is stopped, and an
+// unexpected write barrier would not be synchronized with the GC,
+// which can lead to a half-executed write barrier that has marked the object
+// but not queued it. If the GC skips the object and completes before the
+// queuing can occur, it will incorrectly free the object.
+//
+// We tried using special assignment functions invoked only when not
+// holding a running P, but then some updates to a particular memory
+// word went through write barriers and some did not. This breaks the
+// write barrier shadow checking mode, and it is also scary: better to have
+// a word that is completely ignored by the GC than to have one for which
+// only a few updates are ignored.
+//
+// Gs and Ps are always reachable via true pointers in the
+// allgs and allp lists or (during allocation before they reach those lists)
+// from stack variables.
+//
+// Ms are always reachable via true pointers either from allm or
+// freem. Unlike Gs and Ps we do free Ms, so it's important that
+// nothing ever hold an muintptr across a safe point.
+
+// A guintptr holds a goroutine pointer, but typed as a uintptr
+// to bypass write barriers. It is used in the Gobuf goroutine state
+// and in scheduling lists that are manipulated without a P.
+//
+// The Gobuf.g goroutine pointer is almost always updated by assembly code.
+// In one of the few places it is updated by Go code - func save - it must be
+// treated as a uintptr to avoid a write barrier being emitted at a bad time.
+// Instead of figuring out how to emit the write barriers missing in the
+// assembly manipulation, we change the type of the field to uintptr,
+// so that it does not require write barriers at all.
+//
+// Goroutine structs are published in the allg list and never freed.
+// That will keep the goroutine structs from being collected.
+// There is never a time that Gobuf.g's contain the only references
+// to a goroutine: the publishing of the goroutine in allg comes first.
+// Goroutine pointers are also kept in non-GC-visible places like TLS,
+// so I can't see them ever moving. If we did want to start moving data
+// in the GC, we'd need to allocate the goroutine structs from an
+// alternate arena. Using guintptr doesn't make that problem any worse.
+// Note that pollDesc.rg, pollDesc.wg also store g in uintptr form,
+// so they would need to be updated too if g's start moving.
+type guintptr uintptr
+
+//go:nosplit
+func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
+
+//go:nosplit
+func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
+
+//go:nosplit
+func (gp *guintptr) cas(old, new guintptr) bool {
+ return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
+}
+
+// setGNoWB performs *gp = new without a write barrier.
+// For times when it's impractical to use a guintptr.
+//
+//go:nosplit
+//go:nowritebarrier
+func setGNoWB(gp **g, new *g) {
+ (*guintptr)(unsafe.Pointer(gp)).set(new)
+}
+
+type puintptr uintptr
+
+//go:nosplit
+func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
+
+//go:nosplit
+func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
+
+// muintptr is a *m that is not tracked by the garbage collector.
+//
+// Because we do free Ms, there are some additional constrains on
+// muintptrs:
+//
+// 1. Never hold an muintptr locally across a safe point.
+//
+// 2. Any muintptr in the heap must be owned by the M itself so it can
+// ensure it is not in use when the last true *m is released.
+type muintptr uintptr
+
+//go:nosplit
+func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
+
+//go:nosplit
+func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
+
+// setMNoWB performs *mp = new without a write barrier.
+// For times when it's impractical to use an muintptr.
+//
+//go:nosplit
+//go:nowritebarrier
+func setMNoWB(mp **m, new *m) {
+ (*muintptr)(unsafe.Pointer(mp)).set(new)
+}
+
+type gobuf struct {
+ // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
+ //
+ // ctxt is unusual with respect to GC: it may be a
+ // heap-allocated funcval, so GC needs to track it, but it
+ // needs to be set and cleared from assembly, where it's
+ // difficult to have write barriers. However, ctxt is really a
+ // saved, live register, and we only ever exchange it between
+ // the real register and the gobuf. Hence, we treat it as a
+ // root during stack scanning, which means assembly that saves
+ // and restores it doesn't need write barriers. It's still
+ // typed as a pointer so that any other writes from Go get
+ // write barriers.
+ sp uintptr
+ pc uintptr
+ g guintptr
+ ctxt unsafe.Pointer
+ ret uintptr
+ lr uintptr
+ bp uintptr // for framepointer-enabled architectures
+}
+
+// sudog represents a g in a wait list, such as for sending/receiving
+// on a channel.
+//
+// sudog is necessary because the g ↔ synchronization object relation
+// is many-to-many. A g can be on many wait lists, so there may be
+// many sudogs for one g; and many gs may be waiting on the same
+// synchronization object, so there may be many sudogs for one object.
+//
+// sudogs are allocated from a special pool. Use acquireSudog and
+// releaseSudog to allocate and free them.
+type sudog struct {
+ // The following fields are protected by the hchan.lock of the
+ // channel this sudog is blocking on. shrinkstack depends on
+ // this for sudogs involved in channel ops.
+
+ g *g
+
+ next *sudog
+ prev *sudog
+ elem unsafe.Pointer // data element (may point to stack)
+
+ // The following fields are never accessed concurrently.
+ // For channels, waitlink is only accessed by g.
+ // For semaphores, all fields (including the ones above)
+ // are only accessed when holding a semaRoot lock.
+
+ acquiretime int64
+ releasetime int64
+ ticket uint32
+
+ // isSelect indicates g is participating in a select, so
+ // g.selectDone must be CAS'd to win the wake-up race.
+ isSelect bool
+
+ // success indicates whether communication over channel c
+ // succeeded. It is true if the goroutine was awoken because a
+ // value was delivered over channel c, and false if awoken
+ // because c was closed.
+ success bool
+
+ parent *sudog // semaRoot binary tree
+ waitlink *sudog // g.waiting list or semaRoot
+ waittail *sudog // semaRoot
+ c *hchan // channel
+}
+
+type libcall struct {
+ fn uintptr
+ n uintptr // number of parameters
+ args uintptr // parameters
+ r1 uintptr // return values
+ r2 uintptr
+ err uintptr // error number
+}
+
+// Stack describes a Go execution stack.
+// The bounds of the stack are exactly [lo, hi),
+// with no implicit data structures on either side.
+type stack struct {
+ lo uintptr
+ hi uintptr
+}
+
+// heldLockInfo gives info on a held lock and the rank of that lock
+type heldLockInfo struct {
+ lockAddr uintptr
+ rank lockRank
+}
+
+type g struct {
+ // Stack parameters.
+ // stack describes the actual stack memory: [stack.lo, stack.hi).
+ // stackguard0 is the stack pointer compared in the Go stack growth prologue.
+ // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
+ // stackguard1 is the stack pointer compared in the C stack growth prologue.
+ // It is stack.lo+StackGuard on g0 and gsignal stacks.
+ // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
+ stack stack // offset known to runtime/cgo
+ stackguard0 uintptr // offset known to liblink
+ stackguard1 uintptr // offset known to liblink
+
+ _panic *_panic // innermost panic - offset known to liblink
+ _defer *_defer // innermost defer
+ m *m // current m; offset known to arm liblink
+ sched gobuf
+ syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
+ syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
+ stktopsp uintptr // expected sp at top of stack, to check in traceback
+ // param is a generic pointer parameter field used to pass
+ // values in particular contexts where other storage for the
+ // parameter would be difficult to find. It is currently used
+ // in three ways:
+ // 1. When a channel operation wakes up a blocked goroutine, it sets param to
+ // point to the sudog of the completed blocking operation.
+ // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
+ // the GC cycle. It is unsafe to do so in any other way, because the goroutine's
+ // stack may have moved in the meantime.
+ // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
+ // closure in the runtime is forbidden.
+ param unsafe.Pointer
+ atomicstatus uint32
+ stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
+ goid int64
+ schedlink guintptr
+ waitsince int64 // approx time when the g become blocked
+ waitreason waitReason // if status==Gwaiting
+
+ preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
+ preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
+ preemptShrink bool // shrink stack at synchronous safe point
+
+ // asyncSafePoint is set if g is stopped at an asynchronous
+ // safe point. This means there are frames on the stack
+ // without precise pointer information.
+ asyncSafePoint bool
+
+ paniconfault bool // panic (instead of crash) on unexpected fault address
+ gcscandone bool // g has scanned stack; protected by _Gscan bit in status
+ throwsplit bool // must not split stack
+ // activeStackChans indicates that there are unlocked channels
+ // pointing into this goroutine's stack. If true, stack
+ // copying needs to acquire channel locks to protect these
+ // areas of the stack.
+ activeStackChans bool
+ // parkingOnChan indicates that the goroutine is about to
+ // park on a chansend or chanrecv. Used to signal an unsafe point
+ // for stack shrinking. It's a boolean value, but is updated atomically.
+ parkingOnChan uint8
+
+ raceignore int8 // ignore race detection events
+ sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
+ tracking bool // whether we're tracking this G for sched latency statistics
+ trackingSeq uint8 // used to decide whether to track this G
+ runnableStamp int64 // timestamp of when the G last became runnable, only used when tracking
+ runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
+ sysexitticks int64 // cputicks when syscall has returned (for tracing)
+ traceseq uint64 // trace event sequencer
+ tracelastp puintptr // last P emitted an event for this goroutine
+ lockedm muintptr
+ sig uint32
+ writebuf []byte
+ sigcode0 uintptr
+ sigcode1 uintptr
+ sigpc uintptr
+ gopc uintptr // pc of go statement that created this goroutine
+ ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
+ startpc uintptr // pc of goroutine function
+ racectx uintptr
+ waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
+ cgoCtxt []uintptr // cgo traceback context
+ labels unsafe.Pointer // profiler labels
+ timer *timer // cached timer for time.Sleep
+ selectDone uint32 // are we participating in a select and did someone win the race?
+
+ // goroutineProfiled indicates the status of this goroutine's stack for the
+ // current in-progress goroutine profile
+ goroutineProfiled goroutineProfileStateHolder
+
+ // Per-G GC state
+
+ // gcAssistBytes is this G's GC assist credit in terms of
+ // bytes allocated. If this is positive, then the G has credit
+ // to allocate gcAssistBytes bytes without assisting. If this
+ // is negative, then the G must correct this by performing
+ // scan work. We track this in bytes to make it fast to update
+ // and check for debt in the malloc hot path. The assist ratio
+ // determines how this corresponds to scan work debt.
+ gcAssistBytes int64
+}
+
+// gTrackingPeriod is the number of transitions out of _Grunning between
+// latency tracking runs.
+const gTrackingPeriod = 8
+
+const (
+ // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
+ // like Windows.
+ tlsSlots = 6
+ tlsSize = tlsSlots * goarch.PtrSize
+)
+
+type m struct {
+ g0 *g // goroutine with scheduling stack
+ morebuf gobuf // gobuf arg to morestack
+ divmod uint32 // div/mod denominator for arm - known to liblink
+ _ uint32 // align next field to 8 bytes
+
+ // Fields not known to debuggers.
+ procid uint64 // for debuggers, but offset not hard-coded
+ gsignal *g // signal-handling g
+ goSigStack gsignalStack // Go-allocated signal handling stack
+ sigmask sigset // storage for saved signal mask
+ tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
+ mstartfn func()
+ curg *g // current running goroutine
+ caughtsig guintptr // goroutine running during fatal signal
+ p puintptr // attached p for executing go code (nil if not executing go code)
+ nextp puintptr
+ oldp puintptr // the p that was attached before executing a syscall
+ id int64
+ mallocing int32
+ throwing throwType
+ preemptoff string // if != "", keep curg running on this m
+ locks int32
+ dying int32
+ profilehz int32
+ spinning bool // m is out of work and is actively looking for work
+ blocked bool // m is blocked on a note
+ newSigstack bool // minit on C thread called sigaltstack
+ printlock int8
+ incgo bool // m is executing a cgo call
+ freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
+ fastrand uint64
+ needextram bool
+ traceback uint8
+ ncgocall uint64 // number of cgo calls in total
+ ncgo int32 // number of cgo calls currently in progress
+ cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
+ cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
+ park note
+ alllink *m // on allm
+ schedlink muintptr
+ lockedg guintptr
+ createstack [32]uintptr // stack that created this thread.
+ lockedExt uint32 // tracking for external LockOSThread
+ lockedInt uint32 // tracking for internal lockOSThread
+ nextwaitm muintptr // next m waiting for lock
+ waitunlockf func(*g, unsafe.Pointer) bool
+ waitlock unsafe.Pointer
+ waittraceev byte
+ waittraceskip int
+ startingtrace bool
+ syscalltick uint32
+ freelink *m // on sched.freem
+
+ // these are here because they are too large to be on the stack
+ // of low-level NOSPLIT functions.
+ libcall libcall
+ libcallpc uintptr // for cpu profiler
+ libcallsp uintptr
+ libcallg guintptr
+ syscall libcall // stores syscall parameters on windows
+
+ vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
+ vdsoPC uintptr // PC for traceback while in VDSO call
+
+ // preemptGen counts the number of completed preemption
+ // signals. This is used to detect when a preemption is
+ // requested, but fails. Accessed atomically.
+ preemptGen uint32
+
+ // Whether this is a pending preemption signal on this M.
+ // Accessed atomically.
+ signalPending uint32
+
+ dlogPerM
+
+ mOS
+
+ // Up to 10 locks held by this m, maintained by the lock ranking code.
+ locksHeldLen int
+ locksHeld [10]heldLockInfo
+}
+
+type p struct {
+ id int32
+ status uint32 // one of pidle/prunning/...
+ link puintptr
+ schedtick uint32 // incremented on every scheduler call
+ syscalltick uint32 // incremented on every system call
+ sysmontick sysmontick // last tick observed by sysmon
+ m muintptr // back-link to associated m (nil if idle)
+ mcache *mcache
+ pcache pageCache
+ raceprocctx uintptr
+
+ deferpool []*_defer // pool of available defer structs (see panic.go)
+ deferpoolbuf [32]*_defer
+
+ // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
+ goidcache uint64
+ goidcacheend uint64
+
+ // Queue of runnable goroutines. Accessed without lock.
+ runqhead uint32
+ runqtail uint32
+ runq [256]guintptr
+ // runnext, if non-nil, is a runnable G that was ready'd by
+ // the current G and should be run next instead of what's in
+ // runq if there's time remaining in the running G's time
+ // slice. It will inherit the time left in the current time
+ // slice. If a set of goroutines is locked in a
+ // communicate-and-wait pattern, this schedules that set as a
+ // unit and eliminates the (potentially large) scheduling
+ // latency that otherwise arises from adding the ready'd
+ // goroutines to the end of the run queue.
+ //
+ // Note that while other P's may atomically CAS this to zero,
+ // only the owner P can CAS it to a valid G.
+ runnext guintptr
+
+ // Available G's (status == Gdead)
+ gFree struct {
+ gList
+ n int32
+ }
+
+ sudogcache []*sudog
+ sudogbuf [128]*sudog
+
+ // Cache of mspan objects from the heap.
+ mspancache struct {
+ // We need an explicit length here because this field is used
+ // in allocation codepaths where write barriers are not allowed,
+ // and eliminating the write barrier/keeping it eliminated from
+ // slice updates is tricky, moreso than just managing the length
+ // ourselves.
+ len int
+ buf [128]*mspan
+ }
+
+ tracebuf traceBufPtr
+
+ // traceSweep indicates the sweep events should be traced.
+ // This is used to defer the sweep start event until a span
+ // has actually been swept.
+ traceSweep bool
+ // traceSwept and traceReclaimed track the number of bytes
+ // swept and reclaimed by sweeping in the current sweep loop.
+ traceSwept, traceReclaimed uintptr
+
+ palloc persistentAlloc // per-P to avoid mutex
+
+ _ uint32 // Alignment for atomic fields below
+
+ // The when field of the first entry on the timer heap.
+ // This is updated using atomic functions.
+ // This is 0 if the timer heap is empty.
+ timer0When uint64
+
+ // The earliest known nextwhen field of a timer with
+ // timerModifiedEarlier status. Because the timer may have been
+ // modified again, there need not be any timer with this value.
+ // This is updated using atomic functions.
+ // This is 0 if there are no timerModifiedEarlier timers.
+ timerModifiedEarliest uint64
+
+ // Per-P GC state
+ gcAssistTime int64 // Nanoseconds in assistAlloc
+ gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
+
+ // limiterEvent tracks events for the GC CPU limiter.
+ limiterEvent limiterEvent
+
+ // gcMarkWorkerMode is the mode for the next mark worker to run in.
+ // That is, this is used to communicate with the worker goroutine
+ // selected for immediate execution by
+ // gcController.findRunnableGCWorker. When scheduling other goroutines,
+ // this field must be set to gcMarkWorkerNotWorker.
+ gcMarkWorkerMode gcMarkWorkerMode
+ // gcMarkWorkerStartTime is the nanotime() at which the most recent
+ // mark worker started.
+ gcMarkWorkerStartTime int64
+
+ // gcw is this P's GC work buffer cache. The work buffer is
+ // filled by write barriers, drained by mutator assists, and
+ // disposed on certain GC state transitions.
+ gcw gcWork
+
+ // wbBuf is this P's GC write barrier buffer.
+ //
+ // TODO: Consider caching this in the running G.
+ wbBuf wbBuf
+
+ runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
+
+ // statsSeq is a counter indicating whether this P is currently
+ // writing any stats. Its value is even when not, odd when it is.
+ statsSeq uint32
+
+ // Lock for timers. We normally access the timers while running
+ // on this P, but the scheduler can also do it from a different P.
+ timersLock mutex
+
+ // Actions to take at some time. This is used to implement the
+ // standard library's time package.
+ // Must hold timersLock to access.
+ timers []*timer
+
+ // Number of timers in P's heap.
+ // Modified using atomic instructions.
+ numTimers uint32
+
+ // Number of timerDeleted timers in P's heap.
+ // Modified using atomic instructions.
+ deletedTimers uint32
+
+ // Race context used while executing timer functions.
+ timerRaceCtx uintptr
+
+ // maxStackScanDelta accumulates the amount of stack space held by
+ // live goroutines (i.e. those eligible for stack scanning).
+ // Flushed to gcController.maxStackScan once maxStackScanSlack
+ // or -maxStackScanSlack is reached.
+ maxStackScanDelta int64
+
+ // gc-time statistics about current goroutines
+ // Note that this differs from maxStackScan in that this
+ // accumulates the actual stack observed to be used at GC time (hi - sp),
+ // not an instantaneous measure of the total stack size that might need
+ // to be scanned (hi - lo).
+ scannedStackSize uint64 // stack size of goroutines scanned by this P
+ scannedStacks uint64 // number of goroutines scanned by this P
+
+ // preempt is set to indicate that this P should be enter the
+ // scheduler ASAP (regardless of what G is running on it).
+ preempt bool
+
+ // Padding is no longer needed. False sharing is now not a worry because p is large enough
+ // that its size class is an integer multiple of the cache line size (for any of our architectures).
+}
+
+type schedt struct {
+ // accessed atomically. keep at top to ensure alignment on 32-bit systems.
+ goidgen uint64
+ lastpoll uint64 // time of last network poll, 0 if currently polling
+ pollUntil uint64 // time to which current poll is sleeping
+
+ lock mutex
+
+ // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
+ // sure to call checkdead().
+
+ midle muintptr // idle m's waiting for work
+ nmidle int32 // number of idle m's waiting for work
+ nmidlelocked int32 // number of locked m's waiting for work
+ mnext int64 // number of m's that have been created and next M ID
+ maxmcount int32 // maximum number of m's allowed (or die)
+ nmsys int32 // number of system m's not counted for deadlock
+ nmfreed int64 // cumulative number of freed m's
+
+ ngsys uint32 // number of system goroutines; updated atomically
+
+ pidle puintptr // idle p's
+ npidle uint32
+ nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
+
+ // Global runnable queue.
+ runq gQueue
+ runqsize int32
+
+ // disable controls selective disabling of the scheduler.
+ //
+ // Use schedEnableUser to control this.
+ //
+ // disable is protected by sched.lock.
+ disable struct {
+ // user disables scheduling of user goroutines.
+ user bool
+ runnable gQueue // pending runnable Gs
+ n int32 // length of runnable
+ }
+
+ // Global cache of dead G's.
+ gFree struct {
+ lock mutex
+ stack gList // Gs with stacks
+ noStack gList // Gs without stacks
+ n int32
+ }
+
+ // Central cache of sudog structs.
+ sudoglock mutex
+ sudogcache *sudog
+
+ // Central pool of available defer structs.
+ deferlock mutex
+ deferpool *_defer
+
+ // freem is the list of m's waiting to be freed when their
+ // m.exited is set. Linked through m.freelink.
+ freem *m
+
+ gcwaiting uint32 // gc is waiting to run
+ stopwait int32
+ stopnote note
+ sysmonwait uint32
+ sysmonnote note
+
+ // safepointFn should be called on each P at the next GC
+ // safepoint if p.runSafePointFn is set.
+ safePointFn func(*p)
+ safePointWait int32
+ safePointNote note
+
+ profilehz int32 // cpu profiling rate
+
+ procresizetime int64 // nanotime() of last change to gomaxprocs
+ totaltime int64 // ∫gomaxprocs dt up to procresizetime
+
+ // sysmonlock protects sysmon's actions on the runtime.
+ //
+ // Acquire and hold this mutex to block sysmon from interacting
+ // with the rest of the runtime.
+ sysmonlock mutex
+
+ // timeToRun is a distribution of scheduling latencies, defined
+ // as the sum of time a G spends in the _Grunnable state before
+ // it transitions to _Grunning.
+ //
+ // timeToRun is protected by sched.lock.
+ timeToRun timeHistogram
+}
+
+// Values for the flags field of a sigTabT.
+const (
+ _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
+ _SigKill // if signal.Notify doesn't take it, exit quietly
+ _SigThrow // if signal.Notify doesn't take it, exit loudly
+ _SigPanic // if the signal is from the kernel, panic
+ _SigDefault // if the signal isn't explicitly requested, don't monitor it
+ _SigGoExit // cause all runtime procs to exit (only used on Plan 9).
+ _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
+ _SigUnblock // always unblock; see blockableSig
+ _SigIgn // _SIG_DFL action is to ignore the signal
+)
+
+// Layout of in-memory per-function information prepared by linker
+// See https://golang.org/s/go12symtab.
+// Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
+// and with package debug/gosym and with symtab.go in package runtime.
+type _func struct {
+ entryoff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
+ nameoff int32 // function name
+
+ args int32 // in/out args size
+ deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
+
+ pcsp uint32
+ pcfile uint32
+ pcln uint32
+ npcdata uint32
+ cuOffset uint32 // runtime.cutab offset of this function's CU
+ funcID funcID // set for certain special runtime functions
+ flag funcFlag
+ _ [1]byte // pad
+ nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
+}
+
+// Pseudo-Func that is returned for PCs that occur in inlined code.
+// A *Func can be either a *_func or a *funcinl, and they are distinguished
+// by the first uintptr.
+type funcinl struct {
+ ones uint32 // set to ^0 to distinguish from _func
+ entry uintptr // entry of the real (the "outermost") frame
+ name string
+ file string
+ line int
+}
+
+// layout of Itab known to compilers
+// allocated in non-garbage-collected memory
+// Needs to be in sync with
+// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WriteTabs.
+type itab struct {
+ inter *interfacetype
+ _type *_type
+ hash uint32 // copy of _type.hash. Used for type switches.
+ _ [4]byte
+ fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
+}
+
+// Lock-free stack node.
+// Also known to export_test.go.
+type lfnode struct {
+ next uint64
+ pushcnt uintptr
+}
+
+type forcegcstate struct {
+ lock mutex
+ g *g
+ idle uint32
+}
+
+// extendRandom extends the random numbers in r[:n] to the whole slice r.
+// Treats n<0 as n==0.
+func extendRandom(r []byte, n int) {
+ if n < 0 {
+ n = 0
+ }
+ for n < len(r) {
+ // Extend random bits using hash function & time seed
+ w := n
+ if w > 16 {
+ w = 16
+ }
+ h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
+ for i := 0; i < goarch.PtrSize && n < len(r); i++ {
+ r[n] = byte(h)
+ n++
+ h >>= 8
+ }
+ }
+}
+
+// A _defer holds an entry on the list of deferred calls.
+// If you add a field here, add code to clear it in deferProcStack.
+// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
+// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
+// Some defers will be allocated on the stack and some on the heap.
+// All defers are logically part of the stack, so write barriers to
+// initialize them are not required. All defers must be manually scanned,
+// and for heap defers, marked.
+type _defer struct {
+ started bool
+ heap bool
+ // openDefer indicates that this _defer is for a frame with open-coded
+ // defers. We have only one defer record for the entire frame (which may
+ // currently have 0, 1, or more defers active).
+ openDefer bool
+ sp uintptr // sp at time of defer
+ pc uintptr // pc at time of defer
+ fn func() // can be nil for open-coded defers
+ _panic *_panic // panic that is running defer
+ link *_defer // next defer on G; can point to either heap or stack!
+
+ // If openDefer is true, the fields below record values about the stack
+ // frame and associated function that has the open-coded defer(s). sp
+ // above will be the sp for the frame, and pc will be address of the
+ // deferreturn call in the function.
+ fd unsafe.Pointer // funcdata for the function associated with the frame
+ varp uintptr // value of varp for the stack frame
+ // framepc is the current pc associated with the stack frame. Together,
+ // with sp above (which is the sp associated with the stack frame),
+ // framepc/sp can be used as pc/sp pair to continue a stack trace via
+ // gentraceback().
+ framepc uintptr
+}
+
+// A _panic holds information about an active panic.
+//
+// A _panic value must only ever live on the stack.
+//
+// The argp and link fields are stack pointers, but don't need special
+// handling during stack growth: because they are pointer-typed and
+// _panic values only live on the stack, regular stack pointer
+// adjustment takes care of them.
+type _panic struct {
+ argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
+ arg any // argument to panic
+ link *_panic // link to earlier panic
+ pc uintptr // where to return to in runtime if this panic is bypassed
+ sp unsafe.Pointer // where to return to in runtime if this panic is bypassed
+ recovered bool // whether this panic is over
+ aborted bool // the panic was aborted
+ goexit bool
+}
+
+// stack traces
+type stkframe struct {
+ fn funcInfo // function being run
+ pc uintptr // program counter within fn
+ continpc uintptr // program counter where execution can continue, or 0 if not
+ lr uintptr // program counter at caller aka link register
+ sp uintptr // stack pointer at pc
+ fp uintptr // stack pointer at caller aka frame pointer
+ varp uintptr // top of local variables
+ argp uintptr // pointer to function arguments
+ arglen uintptr // number of bytes at argp
+ argmap *bitvector // force use of this argmap
+}
+
+// ancestorInfo records details of where a goroutine was started.
+type ancestorInfo struct {
+ pcs []uintptr // pcs from the stack of this goroutine
+ goid int64 // goroutine id of this goroutine; original goroutine possibly dead
+ gopc uintptr // pc of go statement that created this goroutine
+}
+
+const (
+ _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
+ _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
+ _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
+)
+
+// The maximum number of frames we print for a traceback
+const _TracebackMaxFrames = 100
+
+// A waitReason explains why a goroutine has been stopped.
+// See gopark. Do not re-use waitReasons, add new ones.
+type waitReason uint8
+
+const (
+ waitReasonZero waitReason = iota // ""
+ waitReasonGCAssistMarking // "GC assist marking"
+ waitReasonIOWait // "IO wait"
+ waitReasonChanReceiveNilChan // "chan receive (nil chan)"
+ waitReasonChanSendNilChan // "chan send (nil chan)"
+ waitReasonDumpingHeap // "dumping heap"
+ waitReasonGarbageCollection // "garbage collection"
+ waitReasonGarbageCollectionScan // "garbage collection scan"
+ waitReasonPanicWait // "panicwait"
+ waitReasonSelect // "select"
+ waitReasonSelectNoCases // "select (no cases)"
+ waitReasonGCAssistWait // "GC assist wait"
+ waitReasonGCSweepWait // "GC sweep wait"
+ waitReasonGCScavengeWait // "GC scavenge wait"
+ waitReasonChanReceive // "chan receive"
+ waitReasonChanSend // "chan send"
+ waitReasonFinalizerWait // "finalizer wait"
+ waitReasonForceGCIdle // "force gc (idle)"
+ waitReasonSemacquire // "semacquire"
+ waitReasonSleep // "sleep"
+ waitReasonSyncCondWait // "sync.Cond.Wait"
+ waitReasonTimerGoroutineIdle // "timer goroutine (idle)"
+ waitReasonTraceReaderBlocked // "trace reader (blocked)"
+ waitReasonWaitForGCCycle // "wait for GC cycle"
+ waitReasonGCWorkerIdle // "GC worker (idle)"
+ waitReasonPreempted // "preempted"
+ waitReasonDebugCall // "debug call"
+)
+
+var waitReasonStrings = [...]string{
+ waitReasonZero: "",
+ waitReasonGCAssistMarking: "GC assist marking",
+ waitReasonIOWait: "IO wait",
+ waitReasonChanReceiveNilChan: "chan receive (nil chan)",
+ waitReasonChanSendNilChan: "chan send (nil chan)",
+ waitReasonDumpingHeap: "dumping heap",
+ waitReasonGarbageCollection: "garbage collection",
+ waitReasonGarbageCollectionScan: "garbage collection scan",
+ waitReasonPanicWait: "panicwait",
+ waitReasonSelect: "select",
+ waitReasonSelectNoCases: "select (no cases)",
+ waitReasonGCAssistWait: "GC assist wait",
+ waitReasonGCSweepWait: "GC sweep wait",
+ waitReasonGCScavengeWait: "GC scavenge wait",
+ waitReasonChanReceive: "chan receive",
+ waitReasonChanSend: "chan send",
+ waitReasonFinalizerWait: "finalizer wait",
+ waitReasonForceGCIdle: "force gc (idle)",
+ waitReasonSemacquire: "semacquire",
+ waitReasonSleep: "sleep",
+ waitReasonSyncCondWait: "sync.Cond.Wait",
+ waitReasonTimerGoroutineIdle: "timer goroutine (idle)",
+ waitReasonTraceReaderBlocked: "trace reader (blocked)",
+ waitReasonWaitForGCCycle: "wait for GC cycle",
+ waitReasonGCWorkerIdle: "GC worker (idle)",
+ waitReasonPreempted: "preempted",
+ waitReasonDebugCall: "debug call",
+}
+
+func (w waitReason) String() string {
+ if w < 0 || w >= waitReason(len(waitReasonStrings)) {
+ return "unknown wait reason"
+ }
+ return waitReasonStrings[w]
+}
+
+var (
+ allm *m
+ gomaxprocs int32
+ ncpu int32
+ forcegc forcegcstate
+ sched schedt
+ newprocs int32
+
+ // allpLock protects P-less reads and size changes of allp, idlepMask,
+ // and timerpMask, and all writes to allp.
+ allpLock mutex
+ // len(allp) == gomaxprocs; may change at safe points, otherwise
+ // immutable.
+ allp []*p
+ // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must
+ // be atomic. Length may change at safe points.
+ //
+ // Each P must update only its own bit. In order to maintain
+ // consistency, a P going idle must the idle mask simultaneously with
+ // updates to the idle P list under the sched.lock, otherwise a racing
+ // pidleget may clear the mask before pidleput sets the mask,
+ // corrupting the bitmap.
+ //
+ // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.
+ idlepMask pMask
+ // Bitmask of Ps that may have a timer, one bit per P. Reads and writes
+ // must be atomic. Length may change at safe points.
+ timerpMask pMask
+
+ // Pool of GC parked background workers. Entries are type
+ // *gcBgMarkWorkerNode.
+ gcBgMarkWorkerPool lfstack
+
+ // Total number of gcBgMarkWorker goroutines. Protected by worldsema.
+ gcBgMarkWorkerCount int32
+
+ // Information about what cpu features are available.
+ // Packages outside the runtime should not use these
+ // as they are not an external api.
+ // Set on startup in asm_{386,amd64}.s
+ processorVersionInfo uint32
+ isIntel bool
+
+ goarm uint8 // set by cmd/link on arm systems
+)
+
+// Set by the linker so the runtime can determine the buildmode.
+var (
+ islibrary bool // -buildmode=c-shared
+ isarchive bool // -buildmode=c-archive
+)
+
+// Must agree with internal/buildcfg.FramePointerEnabled.
+const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"
diff --git a/contrib/go/_std_1.19/src/runtime/runtime_boring.go b/contrib/go/_std_1.19/src/runtime/runtime_boring.go
new file mode 100644
index 0000000000..5a98b20253
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/runtime_boring.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import _ "unsafe" // for go:linkname
+
+//go:linkname boring_runtime_arg0 crypto/internal/boring.runtime_arg0
+func boring_runtime_arg0() string {
+ // On Windows, argslice is not set, and it's too much work to find argv0.
+ if len(argslice) == 0 {
+ return ""
+ }
+ return argslice[0]
+}
+
+//go:linkname fipstls_runtime_arg0 crypto/internal/boring/fipstls.runtime_arg0
+func fipstls_runtime_arg0() string { return boring_runtime_arg0() }
diff --git a/contrib/go/_std_1.18/src/runtime/rwmutex.go b/contrib/go/_std_1.19/src/runtime/rwmutex.go
index 7713c3f1cc..7713c3f1cc 100644
--- a/contrib/go/_std_1.18/src/runtime/rwmutex.go
+++ b/contrib/go/_std_1.19/src/runtime/rwmutex.go
diff --git a/contrib/go/_std_1.18/src/runtime/select.go b/contrib/go/_std_1.19/src/runtime/select.go
index e18b2f14c0..e18b2f14c0 100644
--- a/contrib/go/_std_1.18/src/runtime/select.go
+++ b/contrib/go/_std_1.19/src/runtime/select.go
diff --git a/contrib/go/_std_1.19/src/runtime/sema.go b/contrib/go/_std_1.19/src/runtime/sema.go
new file mode 100644
index 0000000000..39935f70a9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/sema.go
@@ -0,0 +1,623 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Semaphore implementation exposed to Go.
+// Intended use is provide a sleep and wakeup
+// primitive that can be used in the contended case
+// of other synchronization primitives.
+// Thus it targets the same goal as Linux's futex,
+// but it has much simpler semantics.
+//
+// That is, don't think of these as semaphores.
+// Think of them as a way to implement sleep and wakeup
+// such that every sleep is paired with a single wakeup,
+// even if, due to races, the wakeup happens before the sleep.
+//
+// See Mullender and Cox, ``Semaphores in Plan 9,''
+// https://swtch.com/semaphore.pdf
+
+package runtime
+
+import (
+ "internal/cpu"
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// Asynchronous semaphore for sync.Mutex.
+
+// A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem).
+// Each of those sudog may in turn point (through s.waitlink) to a list
+// of other sudogs waiting on the same address.
+// The operations on the inner lists of sudogs with the same address
+// are all O(1). The scanning of the top-level semaRoot list is O(log n),
+// where n is the number of distinct addresses with goroutines blocked
+// on them that hash to the given semaRoot.
+// See golang.org/issue/17953 for a program that worked badly
+// before we introduced the second level of list, and
+// BenchmarkSemTable/OneAddrCollision/* for a benchmark that exercises this.
+type semaRoot struct {
+ lock mutex
+ treap *sudog // root of balanced tree of unique waiters.
+ nwait uint32 // Number of waiters. Read w/o the lock.
+}
+
+var semtable semTable
+
+// Prime to not correlate with any user patterns.
+const semTabSize = 251
+
+type semTable [semTabSize]struct {
+ root semaRoot
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
+}
+
+func (t *semTable) rootFor(addr *uint32) *semaRoot {
+ return &t[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
+}
+
+//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
+func sync_runtime_Semacquire(addr *uint32) {
+ semacquire1(addr, false, semaBlockProfile, 0)
+}
+
+//go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire
+func poll_runtime_Semacquire(addr *uint32) {
+ semacquire1(addr, false, semaBlockProfile, 0)
+}
+
+//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
+func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
+ semrelease1(addr, handoff, skipframes)
+}
+
+//go:linkname sync_runtime_SemacquireMutex sync.runtime_SemacquireMutex
+func sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) {
+ semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes)
+}
+
+//go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease
+func poll_runtime_Semrelease(addr *uint32) {
+ semrelease(addr)
+}
+
+func readyWithTime(s *sudog, traceskip int) {
+ if s.releasetime != 0 {
+ s.releasetime = cputicks()
+ }
+ goready(s.g, traceskip)
+}
+
+type semaProfileFlags int
+
+const (
+ semaBlockProfile semaProfileFlags = 1 << iota
+ semaMutexProfile
+)
+
+// Called from runtime.
+func semacquire(addr *uint32) {
+ semacquire1(addr, false, 0, 0)
+}
+
+func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int) {
+ gp := getg()
+ if gp != gp.m.curg {
+ throw("semacquire not on the G stack")
+ }
+
+ // Easy case.
+ if cansemacquire(addr) {
+ return
+ }
+
+ // Harder case:
+ // increment waiter count
+ // try cansemacquire one more time, return if succeeded
+ // enqueue itself as a waiter
+ // sleep
+ // (waiter descriptor is dequeued by signaler)
+ s := acquireSudog()
+ root := semtable.rootFor(addr)
+ t0 := int64(0)
+ s.releasetime = 0
+ s.acquiretime = 0
+ s.ticket = 0
+ if profile&semaBlockProfile != 0 && blockprofilerate > 0 {
+ t0 = cputicks()
+ s.releasetime = -1
+ }
+ if profile&semaMutexProfile != 0 && mutexprofilerate > 0 {
+ if t0 == 0 {
+ t0 = cputicks()
+ }
+ s.acquiretime = t0
+ }
+ for {
+ lockWithRank(&root.lock, lockRankRoot)
+ // Add ourselves to nwait to disable "easy case" in semrelease.
+ atomic.Xadd(&root.nwait, 1)
+ // Check cansemacquire to avoid missed wakeup.
+ if cansemacquire(addr) {
+ atomic.Xadd(&root.nwait, -1)
+ unlock(&root.lock)
+ break
+ }
+ // Any semrelease after the cansemacquire knows we're waiting
+ // (we set nwait above), so go to sleep.
+ root.queue(addr, s, lifo)
+ goparkunlock(&root.lock, waitReasonSemacquire, traceEvGoBlockSync, 4+skipframes)
+ if s.ticket != 0 || cansemacquire(addr) {
+ break
+ }
+ }
+ if s.releasetime > 0 {
+ blockevent(s.releasetime-t0, 3+skipframes)
+ }
+ releaseSudog(s)
+}
+
+func semrelease(addr *uint32) {
+ semrelease1(addr, false, 0)
+}
+
+func semrelease1(addr *uint32, handoff bool, skipframes int) {
+ root := semtable.rootFor(addr)
+ atomic.Xadd(addr, 1)
+
+ // Easy case: no waiters?
+ // This check must happen after the xadd, to avoid a missed wakeup
+ // (see loop in semacquire).
+ if atomic.Load(&root.nwait) == 0 {
+ return
+ }
+
+ // Harder case: search for a waiter and wake it.
+ lockWithRank(&root.lock, lockRankRoot)
+ if atomic.Load(&root.nwait) == 0 {
+ // The count is already consumed by another goroutine,
+ // so no need to wake up another goroutine.
+ unlock(&root.lock)
+ return
+ }
+ s, t0 := root.dequeue(addr)
+ if s != nil {
+ atomic.Xadd(&root.nwait, -1)
+ }
+ unlock(&root.lock)
+ if s != nil { // May be slow or even yield, so unlock first
+ acquiretime := s.acquiretime
+ if acquiretime != 0 {
+ mutexevent(t0-acquiretime, 3+skipframes)
+ }
+ if s.ticket != 0 {
+ throw("corrupted semaphore ticket")
+ }
+ if handoff && cansemacquire(addr) {
+ s.ticket = 1
+ }
+ readyWithTime(s, 5+skipframes)
+ if s.ticket == 1 && getg().m.locks == 0 {
+ // Direct G handoff
+ // readyWithTime has added the waiter G as runnext in the
+ // current P; we now call the scheduler so that we start running
+ // the waiter G immediately.
+ // Note that waiter inherits our time slice: this is desirable
+ // to avoid having a highly contended semaphore hog the P
+ // indefinitely. goyield is like Gosched, but it emits a
+ // "preempted" trace event instead and, more importantly, puts
+ // the current G on the local runq instead of the global one.
+ // We only do this in the starving regime (handoff=true), as in
+ // the non-starving case it is possible for a different waiter
+ // to acquire the semaphore while we are yielding/scheduling,
+ // and this would be wasteful. We wait instead to enter starving
+ // regime, and then we start to do direct handoffs of ticket and
+ // P.
+ // See issue 33747 for discussion.
+ goyield()
+ }
+ }
+}
+
+func cansemacquire(addr *uint32) bool {
+ for {
+ v := atomic.Load(addr)
+ if v == 0 {
+ return false
+ }
+ if atomic.Cas(addr, v, v-1) {
+ return true
+ }
+ }
+}
+
+// queue adds s to the blocked goroutines in semaRoot.
+func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
+ s.g = getg()
+ s.elem = unsafe.Pointer(addr)
+ s.next = nil
+ s.prev = nil
+
+ var last *sudog
+ pt := &root.treap
+ for t := *pt; t != nil; t = *pt {
+ if t.elem == unsafe.Pointer(addr) {
+ // Already have addr in list.
+ if lifo {
+ // Substitute s in t's place in treap.
+ *pt = s
+ s.ticket = t.ticket
+ s.acquiretime = t.acquiretime
+ s.parent = t.parent
+ s.prev = t.prev
+ s.next = t.next
+ if s.prev != nil {
+ s.prev.parent = s
+ }
+ if s.next != nil {
+ s.next.parent = s
+ }
+ // Add t first in s's wait list.
+ s.waitlink = t
+ s.waittail = t.waittail
+ if s.waittail == nil {
+ s.waittail = t
+ }
+ t.parent = nil
+ t.prev = nil
+ t.next = nil
+ t.waittail = nil
+ } else {
+ // Add s to end of t's wait list.
+ if t.waittail == nil {
+ t.waitlink = s
+ } else {
+ t.waittail.waitlink = s
+ }
+ t.waittail = s
+ s.waitlink = nil
+ }
+ return
+ }
+ last = t
+ if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) {
+ pt = &t.prev
+ } else {
+ pt = &t.next
+ }
+ }
+
+ // Add s as new leaf in tree of unique addrs.
+ // The balanced tree is a treap using ticket as the random heap priority.
+ // That is, it is a binary tree ordered according to the elem addresses,
+ // but then among the space of possible binary trees respecting those
+ // addresses, it is kept balanced on average by maintaining a heap ordering
+ // on the ticket: s.ticket <= both s.prev.ticket and s.next.ticket.
+ // https://en.wikipedia.org/wiki/Treap
+ // https://faculty.washington.edu/aragon/pubs/rst89.pdf
+ //
+ // s.ticket compared with zero in couple of places, therefore set lowest bit.
+ // It will not affect treap's quality noticeably.
+ s.ticket = fastrand() | 1
+ s.parent = last
+ *pt = s
+
+ // Rotate up into tree according to ticket (priority).
+ for s.parent != nil && s.parent.ticket > s.ticket {
+ if s.parent.prev == s {
+ root.rotateRight(s.parent)
+ } else {
+ if s.parent.next != s {
+ panic("semaRoot queue")
+ }
+ root.rotateLeft(s.parent)
+ }
+ }
+}
+
+// dequeue searches for and finds the first goroutine
+// in semaRoot blocked on addr.
+// If the sudog was being profiled, dequeue returns the time
+// at which it was woken up as now. Otherwise now is 0.
+func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now int64) {
+ ps := &root.treap
+ s := *ps
+ for ; s != nil; s = *ps {
+ if s.elem == unsafe.Pointer(addr) {
+ goto Found
+ }
+ if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) {
+ ps = &s.prev
+ } else {
+ ps = &s.next
+ }
+ }
+ return nil, 0
+
+Found:
+ now = int64(0)
+ if s.acquiretime != 0 {
+ now = cputicks()
+ }
+ if t := s.waitlink; t != nil {
+ // Substitute t, also waiting on addr, for s in root tree of unique addrs.
+ *ps = t
+ t.ticket = s.ticket
+ t.parent = s.parent
+ t.prev = s.prev
+ if t.prev != nil {
+ t.prev.parent = t
+ }
+ t.next = s.next
+ if t.next != nil {
+ t.next.parent = t
+ }
+ if t.waitlink != nil {
+ t.waittail = s.waittail
+ } else {
+ t.waittail = nil
+ }
+ t.acquiretime = now
+ s.waitlink = nil
+ s.waittail = nil
+ } else {
+ // Rotate s down to be leaf of tree for removal, respecting priorities.
+ for s.next != nil || s.prev != nil {
+ if s.next == nil || s.prev != nil && s.prev.ticket < s.next.ticket {
+ root.rotateRight(s)
+ } else {
+ root.rotateLeft(s)
+ }
+ }
+ // Remove s, now a leaf.
+ if s.parent != nil {
+ if s.parent.prev == s {
+ s.parent.prev = nil
+ } else {
+ s.parent.next = nil
+ }
+ } else {
+ root.treap = nil
+ }
+ }
+ s.parent = nil
+ s.elem = nil
+ s.next = nil
+ s.prev = nil
+ s.ticket = 0
+ return s, now
+}
+
+// rotateLeft rotates the tree rooted at node x.
+// turning (x a (y b c)) into (y (x a b) c).
+func (root *semaRoot) rotateLeft(x *sudog) {
+ // p -> (x a (y b c))
+ p := x.parent
+ y := x.next
+ b := y.prev
+
+ y.prev = x
+ x.parent = y
+ x.next = b
+ if b != nil {
+ b.parent = x
+ }
+
+ y.parent = p
+ if p == nil {
+ root.treap = y
+ } else if p.prev == x {
+ p.prev = y
+ } else {
+ if p.next != x {
+ throw("semaRoot rotateLeft")
+ }
+ p.next = y
+ }
+}
+
+// rotateRight rotates the tree rooted at node y.
+// turning (y (x a b) c) into (x a (y b c)).
+func (root *semaRoot) rotateRight(y *sudog) {
+ // p -> (y (x a b) c)
+ p := y.parent
+ x := y.prev
+ b := x.next
+
+ x.next = y
+ y.parent = x
+ y.prev = b
+ if b != nil {
+ b.parent = y
+ }
+
+ x.parent = p
+ if p == nil {
+ root.treap = x
+ } else if p.prev == y {
+ p.prev = x
+ } else {
+ if p.next != y {
+ throw("semaRoot rotateRight")
+ }
+ p.next = x
+ }
+}
+
+// notifyList is a ticket-based notification list used to implement sync.Cond.
+//
+// It must be kept in sync with the sync package.
+type notifyList struct {
+ // wait is the ticket number of the next waiter. It is atomically
+ // incremented outside the lock.
+ wait uint32
+
+ // notify is the ticket number of the next waiter to be notified. It can
+ // be read outside the lock, but is only written to with lock held.
+ //
+ // Both wait & notify can wrap around, and such cases will be correctly
+ // handled as long as their "unwrapped" difference is bounded by 2^31.
+ // For this not to be the case, we'd need to have 2^31+ goroutines
+ // blocked on the same condvar, which is currently not possible.
+ notify uint32
+
+ // List of parked waiters.
+ lock mutex
+ head *sudog
+ tail *sudog
+}
+
+// less checks if a < b, considering a & b running counts that may overflow the
+// 32-bit range, and that their "unwrapped" difference is always less than 2^31.
+func less(a, b uint32) bool {
+ return int32(a-b) < 0
+}
+
+// notifyListAdd adds the caller to a notify list such that it can receive
+// notifications. The caller must eventually call notifyListWait to wait for
+// such a notification, passing the returned ticket number.
+//
+//go:linkname notifyListAdd sync.runtime_notifyListAdd
+func notifyListAdd(l *notifyList) uint32 {
+ // This may be called concurrently, for example, when called from
+ // sync.Cond.Wait while holding a RWMutex in read mode.
+ return atomic.Xadd(&l.wait, 1) - 1
+}
+
+// notifyListWait waits for a notification. If one has been sent since
+// notifyListAdd was called, it returns immediately. Otherwise, it blocks.
+//
+//go:linkname notifyListWait sync.runtime_notifyListWait
+func notifyListWait(l *notifyList, t uint32) {
+ lockWithRank(&l.lock, lockRankNotifyList)
+
+ // Return right away if this ticket has already been notified.
+ if less(t, l.notify) {
+ unlock(&l.lock)
+ return
+ }
+
+ // Enqueue itself.
+ s := acquireSudog()
+ s.g = getg()
+ s.ticket = t
+ s.releasetime = 0
+ t0 := int64(0)
+ if blockprofilerate > 0 {
+ t0 = cputicks()
+ s.releasetime = -1
+ }
+ if l.tail == nil {
+ l.head = s
+ } else {
+ l.tail.next = s
+ }
+ l.tail = s
+ goparkunlock(&l.lock, waitReasonSyncCondWait, traceEvGoBlockCond, 3)
+ if t0 != 0 {
+ blockevent(s.releasetime-t0, 2)
+ }
+ releaseSudog(s)
+}
+
+// notifyListNotifyAll notifies all entries in the list.
+//
+//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
+func notifyListNotifyAll(l *notifyList) {
+ // Fast-path: if there are no new waiters since the last notification
+ // we don't need to acquire the lock.
+ if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
+ return
+ }
+
+ // Pull the list out into a local variable, waiters will be readied
+ // outside the lock.
+ lockWithRank(&l.lock, lockRankNotifyList)
+ s := l.head
+ l.head = nil
+ l.tail = nil
+
+ // Update the next ticket to be notified. We can set it to the current
+ // value of wait because any previous waiters are already in the list
+ // or will notice that they have already been notified when trying to
+ // add themselves to the list.
+ atomic.Store(&l.notify, atomic.Load(&l.wait))
+ unlock(&l.lock)
+
+ // Go through the local list and ready all waiters.
+ for s != nil {
+ next := s.next
+ s.next = nil
+ readyWithTime(s, 4)
+ s = next
+ }
+}
+
+// notifyListNotifyOne notifies one entry in the list.
+//
+//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
+func notifyListNotifyOne(l *notifyList) {
+ // Fast-path: if there are no new waiters since the last notification
+ // we don't need to acquire the lock at all.
+ if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
+ return
+ }
+
+ lockWithRank(&l.lock, lockRankNotifyList)
+
+ // Re-check under the lock if we need to do anything.
+ t := l.notify
+ if t == atomic.Load(&l.wait) {
+ unlock(&l.lock)
+ return
+ }
+
+ // Update the next notify ticket number.
+ atomic.Store(&l.notify, t+1)
+
+ // Try to find the g that needs to be notified.
+ // If it hasn't made it to the list yet we won't find it,
+ // but it won't park itself once it sees the new notify number.
+ //
+ // This scan looks linear but essentially always stops quickly.
+ // Because g's queue separately from taking numbers,
+ // there may be minor reorderings in the list, but we
+ // expect the g we're looking for to be near the front.
+ // The g has others in front of it on the list only to the
+ // extent that it lost the race, so the iteration will not
+ // be too long. This applies even when the g is missing:
+ // it hasn't yet gotten to sleep and has lost the race to
+ // the (few) other g's that we find on the list.
+ for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
+ if s.ticket == t {
+ n := s.next
+ if p != nil {
+ p.next = n
+ } else {
+ l.head = n
+ }
+ if n == nil {
+ l.tail = p
+ }
+ unlock(&l.lock)
+ s.next = nil
+ readyWithTime(s, 4)
+ return
+ }
+ }
+ unlock(&l.lock)
+}
+
+//go:linkname notifyListCheck sync.runtime_notifyListCheck
+func notifyListCheck(sz uintptr) {
+ if sz != unsafe.Sizeof(notifyList{}) {
+ print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
+ throw("bad notifyList size")
+ }
+}
+
+//go:linkname sync_nanotime sync.runtime_nanotime
+func sync_nanotime() int64 {
+ return nanotime()
+}
diff --git a/contrib/go/_std_1.18/src/runtime/signal_amd64.go b/contrib/go/_std_1.19/src/runtime/signal_amd64.go
index 8ade208836..8ade208836 100644
--- a/contrib/go/_std_1.18/src/runtime/signal_amd64.go
+++ b/contrib/go/_std_1.19/src/runtime/signal_amd64.go
diff --git a/contrib/go/_std_1.18/src/runtime/signal_darwin.go b/contrib/go/_std_1.19/src/runtime/signal_darwin.go
index 8090fb22a5..8090fb22a5 100644
--- a/contrib/go/_std_1.18/src/runtime/signal_darwin.go
+++ b/contrib/go/_std_1.19/src/runtime/signal_darwin.go
diff --git a/contrib/go/_std_1.19/src/runtime/signal_darwin_amd64.go b/contrib/go/_std_1.19/src/runtime/signal_darwin_amd64.go
new file mode 100644
index 0000000000..20544d8489
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/signal_darwin_amd64.go
@@ -0,0 +1,96 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) regs() *regs64 { return &(*ucontext)(c.ctxt).uc_mcontext.ss }
+
+func (c *sigctxt) rax() uint64 { return c.regs().rax }
+func (c *sigctxt) rbx() uint64 { return c.regs().rbx }
+func (c *sigctxt) rcx() uint64 { return c.regs().rcx }
+func (c *sigctxt) rdx() uint64 { return c.regs().rdx }
+func (c *sigctxt) rdi() uint64 { return c.regs().rdi }
+func (c *sigctxt) rsi() uint64 { return c.regs().rsi }
+func (c *sigctxt) rbp() uint64 { return c.regs().rbp }
+func (c *sigctxt) rsp() uint64 { return c.regs().rsp }
+func (c *sigctxt) r8() uint64 { return c.regs().r8 }
+func (c *sigctxt) r9() uint64 { return c.regs().r9 }
+func (c *sigctxt) r10() uint64 { return c.regs().r10 }
+func (c *sigctxt) r11() uint64 { return c.regs().r11 }
+func (c *sigctxt) r12() uint64 { return c.regs().r12 }
+func (c *sigctxt) r13() uint64 { return c.regs().r13 }
+func (c *sigctxt) r14() uint64 { return c.regs().r14 }
+func (c *sigctxt) r15() uint64 { return c.regs().r15 }
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) rip() uint64 { return c.regs().rip }
+
+func (c *sigctxt) rflags() uint64 { return c.regs().rflags }
+func (c *sigctxt) cs() uint64 { return c.regs().cs }
+func (c *sigctxt) fs() uint64 { return c.regs().fs }
+func (c *sigctxt) gs() uint64 { return c.regs().gs }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) { c.info.si_addr = x }
+
+//go:nosplit
+func (c *sigctxt) fixsigcode(sig uint32) {
+ switch sig {
+ case _SIGTRAP:
+ // OS X sets c.sigcode() == TRAP_BRKPT unconditionally for all SIGTRAPs,
+ // leaving no way to distinguish a breakpoint-induced SIGTRAP
+ // from an asynchronous signal SIGTRAP.
+ // They all look breakpoint-induced by default.
+ // Try looking at the code to see if it's a breakpoint.
+ // The assumption is that we're very unlikely to get an
+ // asynchronous SIGTRAP at just the moment that the
+ // PC started to point at unmapped memory.
+ pc := uintptr(c.rip())
+ // OS X will leave the pc just after the INT 3 instruction.
+ // INT 3 is usually 1 byte, but there is a 2-byte form.
+ code := (*[2]byte)(unsafe.Pointer(pc - 2))
+ if code[1] != 0xCC && (code[0] != 0xCD || code[1] != 3) {
+ // SIGTRAP on something other than INT 3.
+ c.set_sigcode(_SI_USER)
+ }
+
+ case _SIGSEGV:
+ // x86-64 has 48-bit virtual addresses. The top 16 bits must echo bit 47.
+ // The hardware delivers a different kind of fault for a malformed address
+ // than it does for an attempt to access a valid but unmapped address.
+ // OS X 10.9.2 mishandles the malformed address case, making it look like
+ // a user-generated signal (like someone ran kill -SEGV ourpid).
+ // We pass user-generated signals to os/signal, or else ignore them.
+ // Doing that here - and returning to the faulting code - results in an
+ // infinite loop. It appears the best we can do is rewrite what the kernel
+ // delivers into something more like the truth. The address used below
+ // has very little chance of being the one that caused the fault, but it is
+ // malformed, it is clearly not a real pointer, and if it does get printed
+ // in real life, people will probably search for it and find this code.
+ // There are no Google hits for b01dfacedebac1e or 0xb01dfacedebac1e
+ // as I type this comment.
+ //
+ // Note: if this code is removed, please consider
+ // enabling TestSignalForwardingGo for darwin-amd64 in
+ // misc/cgo/testcarchive/carchive_test.go.
+ if c.sigcode() == _SI_USER {
+ c.set_sigcode(_SI_USER + 1)
+ c.set_sigaddr(0xb01dfacedebac1e)
+ }
+ }
+}
diff --git a/contrib/go/_std_1.18/src/runtime/signal_linux_amd64.go b/contrib/go/_std_1.19/src/runtime/signal_linux_amd64.go
index 573b118397..573b118397 100644
--- a/contrib/go/_std_1.18/src/runtime/signal_linux_amd64.go
+++ b/contrib/go/_std_1.19/src/runtime/signal_linux_amd64.go
diff --git a/contrib/go/_std_1.19/src/runtime/signal_unix.go b/contrib/go/_std_1.19/src/runtime/signal_unix.go
new file mode 100644
index 0000000000..0be499b2e9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/signal_unix.go
@@ -0,0 +1,1348 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package runtime
+
+import (
+ "internal/abi"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// sigTabT is the type of an entry in the global sigtable array.
+// sigtable is inherently system dependent, and appears in OS-specific files,
+// but sigTabT is the same for all Unixy systems.
+// The sigtable array is indexed by a system signal number to get the flags
+// and printable name of each signal.
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+//go:linkname os_sigpipe os.sigpipe
+func os_sigpipe() {
+ systemstack(sigpipe)
+}
+
+func signame(sig uint32) string {
+ if sig >= uint32(len(sigtable)) {
+ return ""
+ }
+ return sigtable[sig].name
+}
+
+const (
+ _SIG_DFL uintptr = 0
+ _SIG_IGN uintptr = 1
+)
+
+// sigPreempt is the signal used for non-cooperative preemption.
+//
+// There's no good way to choose this signal, but there are some
+// heuristics:
+//
+// 1. It should be a signal that's passed-through by debuggers by
+// default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO,
+// SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
+//
+// 2. It shouldn't be used internally by libc in mixed Go/C binaries
+// because libc may assume it's the only thing that can handle these
+// signals. For example SIGCANCEL or SIGSETXID.
+//
+// 3. It should be a signal that can happen spuriously without
+// consequences. For example, SIGALRM is a bad choice because the
+// signal handler can't tell if it was caused by the real process
+// alarm or not (arguably this means the signal is broken, but I
+// digress). SIGUSR1 and SIGUSR2 are also bad because those are often
+// used in meaningful ways by applications.
+//
+// 4. We need to deal with platforms without real-time signals (like
+// macOS), so those are out.
+//
+// We use SIGURG because it meets all of these criteria, is extremely
+// unlikely to be used by an application for its "real" meaning (both
+// because out-of-band data is basically unused and because SIGURG
+// doesn't report which socket has the condition, making it pretty
+// useless), and even if it is, the application has to be ready for
+// spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more
+// likely to be used for real.
+const sigPreempt = _SIGURG
+
+// Stores the signal handlers registered before Go installed its own.
+// These signal handlers will be invoked in cases where Go doesn't want to
+// handle a particular signal (e.g., signal occurred on a non-Go thread).
+// See sigfwdgo for more information on when the signals are forwarded.
+//
+// This is read by the signal handler; accesses should use
+// atomic.Loaduintptr and atomic.Storeuintptr.
+var fwdSig [_NSIG]uintptr
+
+// handlingSig is indexed by signal number and is non-zero if we are
+// currently handling the signal. Or, to put it another way, whether
+// the signal handler is currently set to the Go signal handler or not.
+// This is uint32 rather than bool so that we can use atomic instructions.
+var handlingSig [_NSIG]uint32
+
+// channels for synchronizing signal mask updates with the signal mask
+// thread
+var (
+ disableSigChan chan uint32
+ enableSigChan chan uint32
+ maskUpdatedChan chan struct{}
+)
+
+func init() {
+ // _NSIG is the number of signals on this operating system.
+ // sigtable should describe what to do for all the possible signals.
+ if len(sigtable) != _NSIG {
+ print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
+ throw("bad sigtable len")
+ }
+}
+
+var signalsOK bool
+
+// Initialize signals.
+// Called by libpreinit so runtime may not be initialized.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func initsig(preinit bool) {
+ if !preinit {
+ // It's now OK for signal handlers to run.
+ signalsOK = true
+ }
+
+ // For c-archive/c-shared this is called by libpreinit with
+ // preinit == true.
+ if (isarchive || islibrary) && !preinit {
+ return
+ }
+
+ for i := uint32(0); i < _NSIG; i++ {
+ t := &sigtable[i]
+ if t.flags == 0 || t.flags&_SigDefault != 0 {
+ continue
+ }
+
+ // We don't need to use atomic operations here because
+ // there shouldn't be any other goroutines running yet.
+ fwdSig[i] = getsig(i)
+
+ if !sigInstallGoHandler(i) {
+ // Even if we are not installing a signal handler,
+ // set SA_ONSTACK if necessary.
+ if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN {
+ setsigstack(i)
+ } else if fwdSig[i] == _SIG_IGN {
+ sigInitIgnored(i)
+ }
+ continue
+ }
+
+ handlingSig[i] = 1
+ setsig(i, abi.FuncPCABIInternal(sighandler))
+ }
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func sigInstallGoHandler(sig uint32) bool {
+ // For some signals, we respect an inherited SIG_IGN handler
+ // rather than insist on installing our own default handler.
+ // Even these signals can be fetched using the os/signal package.
+ switch sig {
+ case _SIGHUP, _SIGINT:
+ if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
+ return false
+ }
+ }
+
+ if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
+ // sigPerThreadSyscall is the same signal used by glibc for
+ // per-thread syscalls on Linux. We use it for the same purpose
+ // in non-cgo binaries.
+ return true
+ }
+
+ t := &sigtable[sig]
+ if t.flags&_SigSetStack != 0 {
+ return false
+ }
+
+ // When built using c-archive or c-shared, only install signal
+ // handlers for synchronous signals and SIGPIPE and sigPreempt.
+ if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt {
+ return false
+ }
+
+ return true
+}
+
+// sigenable enables the Go signal handler to catch the signal sig.
+// It is only called while holding the os/signal.handlers lock,
+// via os/signal.enableSignal and signal_enable.
+func sigenable(sig uint32) {
+ if sig >= uint32(len(sigtable)) {
+ return
+ }
+
+ // SIGPROF is handled specially for profiling.
+ if sig == _SIGPROF {
+ return
+ }
+
+ t := &sigtable[sig]
+ if t.flags&_SigNotify != 0 {
+ ensureSigM()
+ enableSigChan <- sig
+ <-maskUpdatedChan
+ if atomic.Cas(&handlingSig[sig], 0, 1) {
+ atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
+ }
+ }
+}
+
+// sigdisable disables the Go signal handler for the signal sig.
+// It is only called while holding the os/signal.handlers lock,
+// via os/signal.disableSignal and signal_disable.
+func sigdisable(sig uint32) {
+ if sig >= uint32(len(sigtable)) {
+ return
+ }
+
+ // SIGPROF is handled specially for profiling.
+ if sig == _SIGPROF {
+ return
+ }
+
+ t := &sigtable[sig]
+ if t.flags&_SigNotify != 0 {
+ ensureSigM()
+ disableSigChan <- sig
+ <-maskUpdatedChan
+
+ // If initsig does not install a signal handler for a
+ // signal, then to go back to the state before Notify
+ // we should remove the one we installed.
+ if !sigInstallGoHandler(sig) {
+ atomic.Store(&handlingSig[sig], 0)
+ setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
+ }
+ }
+}
+
+// sigignore ignores the signal sig.
+// It is only called while holding the os/signal.handlers lock,
+// via os/signal.ignoreSignal and signal_ignore.
+func sigignore(sig uint32) {
+ if sig >= uint32(len(sigtable)) {
+ return
+ }
+
+ // SIGPROF is handled specially for profiling.
+ if sig == _SIGPROF {
+ return
+ }
+
+ t := &sigtable[sig]
+ if t.flags&_SigNotify != 0 {
+ atomic.Store(&handlingSig[sig], 0)
+ setsig(sig, _SIG_IGN)
+ }
+}
+
+// clearSignalHandlers clears all signal handlers that are not ignored
+// back to the default. This is called by the child after a fork, so that
+// we can enable the signal mask for the exec without worrying about
+// running a signal handler in the child.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func clearSignalHandlers() {
+ for i := uint32(0); i < _NSIG; i++ {
+ if atomic.Load(&handlingSig[i]) != 0 {
+ setsig(i, _SIG_DFL)
+ }
+ }
+}
+
+// setProcessCPUProfilerTimer is called when the profiling timer changes.
+// It is called with prof.signalLock held. hz is the new timer, and is 0 if
+// profiling is being disabled. Enable or disable the signal as
+// required for -buildmode=c-archive.
+func setProcessCPUProfilerTimer(hz int32) {
+ if hz != 0 {
+ // Enable the Go signal handler if not enabled.
+ if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
+ h := getsig(_SIGPROF)
+ // If no signal handler was installed before, then we record
+ // _SIG_IGN here. When we turn off profiling (below) we'll start
+ // ignoring SIGPROF signals. We do this, rather than change
+ // to SIG_DFL, because there may be a pending SIGPROF
+ // signal that has not yet been delivered to some other thread.
+ // If we change to SIG_DFL when turning off profiling, the
+ // program will crash when that SIGPROF is delivered. We assume
+ // that programs that use profiling don't want to crash on a
+ // stray SIGPROF. See issue 19320.
+ // We do the change here instead of when turning off profiling,
+ // because there we may race with a signal handler running
+ // concurrently, in particular, sigfwdgo may observe _SIG_DFL and
+ // die. See issue 43828.
+ if h == _SIG_DFL {
+ h = _SIG_IGN
+ }
+ atomic.Storeuintptr(&fwdSig[_SIGPROF], h)
+ setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
+ }
+
+ var it itimerval
+ it.it_interval.tv_sec = 0
+ it.it_interval.set_usec(1000000 / hz)
+ it.it_value = it.it_interval
+ setitimer(_ITIMER_PROF, &it, nil)
+ } else {
+ setitimer(_ITIMER_PROF, &itimerval{}, nil)
+
+ // If the Go signal handler should be disabled by default,
+ // switch back to the signal handler that was installed
+ // when we enabled profiling. We don't try to handle the case
+ // of a program that changes the SIGPROF handler while Go
+ // profiling is enabled.
+ if !sigInstallGoHandler(_SIGPROF) {
+ if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
+ h := atomic.Loaduintptr(&fwdSig[_SIGPROF])
+ setsig(_SIGPROF, h)
+ }
+ }
+ }
+}
+
+// setThreadCPUProfilerHz makes any thread-specific changes required to
+// implement profiling at a rate of hz.
+// No changes required on Unix systems when using setitimer.
+func setThreadCPUProfilerHz(hz int32) {
+ getg().m.profilehz = hz
+}
+
+func sigpipe() {
+ if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) {
+ return
+ }
+ dieFromSignal(_SIGPIPE)
+}
+
+// doSigPreempt handles a preemption signal on gp.
+func doSigPreempt(gp *g, ctxt *sigctxt) {
+ // Check if this G wants to be preempted and is safe to
+ // preempt.
+ if wantAsyncPreempt(gp) {
+ if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
+ // Adjust the PC and inject a call to asyncPreempt.
+ ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
+ }
+ }
+
+ // Acknowledge the preemption.
+ atomic.Xadd(&gp.m.preemptGen, 1)
+ atomic.Store(&gp.m.signalPending, 0)
+
+ if GOOS == "darwin" || GOOS == "ios" {
+ atomic.Xadd(&pendingPreemptSignals, -1)
+ }
+}
+
+const preemptMSupported = true
+
+// preemptM sends a preemption request to mp. This request may be
+// handled asynchronously and may be coalesced with other requests to
+// the M. When the request is received, if the running G or P are
+// marked for preemption and the goroutine is at an asynchronous
+// safe-point, it will preempt the goroutine. It always atomically
+// increments mp.preemptGen after handling a preemption request.
+func preemptM(mp *m) {
+ // On Darwin, don't try to preempt threads during exec.
+ // Issue #41702.
+ if GOOS == "darwin" || GOOS == "ios" {
+ execLock.rlock()
+ }
+
+ if atomic.Cas(&mp.signalPending, 0, 1) {
+ if GOOS == "darwin" || GOOS == "ios" {
+ atomic.Xadd(&pendingPreemptSignals, 1)
+ }
+
+ // If multiple threads are preempting the same M, it may send many
+ // signals to the same M such that it hardly make progress, causing
+ // live-lock problem. Apparently this could happen on darwin. See
+ // issue #37741.
+ // Only send a signal if there isn't already one pending.
+ signalM(mp, sigPreempt)
+ }
+
+ if GOOS == "darwin" || GOOS == "ios" {
+ execLock.runlock()
+ }
+}
+
+// sigFetchG fetches the value of G safely when running in a signal handler.
+// On some architectures, the g value may be clobbered when running in a VDSO.
+// See issue #32912.
+//
+//go:nosplit
+func sigFetchG(c *sigctxt) *g {
+ switch GOARCH {
+ case "arm", "arm64", "ppc64", "ppc64le", "riscv64", "s390x":
+ if !iscgo && inVDSOPage(c.sigpc()) {
+ // When using cgo, we save the g on TLS and load it from there
+ // in sigtramp. Just use that.
+ // Otherwise, before making a VDSO call we save the g to the
+ // bottom of the signal stack. Fetch from there.
+ // TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
+ // work.
+ sp := getcallersp()
+ s := spanOf(sp)
+ if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
+ gp := *(**g)(unsafe.Pointer(s.base()))
+ return gp
+ }
+ return nil
+ }
+ }
+ return getg()
+}
+
+// sigtrampgo is called from the signal handler function, sigtramp,
+// written in assembly code.
+// This is called by the signal handler, and the world may be stopped.
+//
+// It must be nosplit because getg() is still the G that was running
+// (if any) when the signal was delivered, but it's (usually) called
+// on the gsignal stack. Until this switches the G to gsignal, the
+// stack bounds check won't work.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
+ if sigfwdgo(sig, info, ctx) {
+ return
+ }
+ c := &sigctxt{info, ctx}
+ g := sigFetchG(c)
+ setg(g)
+ if g == nil {
+ if sig == _SIGPROF {
+ // Some platforms (Linux) have per-thread timers, which we use in
+ // combination with the process-wide timer. Avoid double-counting.
+ if validSIGPROF(nil, c) {
+ sigprofNonGoPC(c.sigpc())
+ }
+ return
+ }
+ if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
+ // This is probably a signal from preemptM sent
+ // while executing Go code but received while
+ // executing non-Go code.
+ // We got past sigfwdgo, so we know that there is
+ // no non-Go signal handler for sigPreempt.
+ // The default behavior for sigPreempt is to ignore
+ // the signal, so badsignal will be a no-op anyway.
+ if GOOS == "darwin" || GOOS == "ios" {
+ atomic.Xadd(&pendingPreemptSignals, -1)
+ }
+ return
+ }
+ c.fixsigcode(sig)
+ badsignal(uintptr(sig), c)
+ return
+ }
+
+ setg(g.m.gsignal)
+
+ // If some non-Go code called sigaltstack, adjust.
+ var gsignalStack gsignalStack
+ setStack := adjustSignalStack(sig, g.m, &gsignalStack)
+ if setStack {
+ g.m.gsignal.stktopsp = getcallersp()
+ }
+
+ if g.stackguard0 == stackFork {
+ signalDuringFork(sig)
+ }
+
+ c.fixsigcode(sig)
+ sighandler(sig, info, ctx, g)
+ setg(g)
+ if setStack {
+ restoreGsignalStack(&gsignalStack)
+ }
+}
+
+// If the signal handler receives a SIGPROF signal on a non-Go thread,
+// it tries to collect a traceback into sigprofCallers.
+// sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
+var sigprofCallers cgoCallers
+var sigprofCallersUse uint32
+
+// sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
+// and the signal handler collected a stack trace in sigprofCallers.
+// When this is called, sigprofCallersUse will be non-zero.
+// g is nil, and what we can do is very limited.
+//
+// It is called from the signal handling functions written in assembly code that
+// are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
+// not verified that the SIGPROF delivery corresponds to the best available
+// profiling source for this thread.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
+ if prof.hz != 0 {
+ c := &sigctxt{info, ctx}
+ // Some platforms (Linux) have per-thread timers, which we use in
+ // combination with the process-wide timer. Avoid double-counting.
+ if validSIGPROF(nil, c) {
+ n := 0
+ for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
+ n++
+ }
+ cpuprof.addNonGo(sigprofCallers[:n])
+ }
+ }
+
+ atomic.Store(&sigprofCallersUse, 0)
+}
+
+// sigprofNonGoPC is called when a profiling signal arrived on a
+// non-Go thread and we have a single PC value, not a stack trace.
+// g is nil, and what we can do is very limited.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func sigprofNonGoPC(pc uintptr) {
+ if prof.hz != 0 {
+ stk := []uintptr{
+ pc,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
+ }
+ cpuprof.addNonGo(stk)
+ }
+}
+
+// adjustSignalStack adjusts the current stack guard based on the
+// stack pointer that is actually in use while handling a signal.
+// We do this in case some non-Go code called sigaltstack.
+// This reports whether the stack was adjusted, and if so stores the old
+// signal stack in *gsigstack.
+//
+//go:nosplit
+func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
+ sp := uintptr(unsafe.Pointer(&sig))
+ if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi {
+ return false
+ }
+
+ var st stackt
+ sigaltstack(nil, &st)
+ stsp := uintptr(unsafe.Pointer(st.ss_sp))
+ if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
+ setGsignalStack(&st, gsigStack)
+ return true
+ }
+
+ if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
+ // The signal was delivered on the g0 stack.
+ // This can happen when linked with C code
+ // using the thread sanitizer, which collects
+ // signals then delivers them itself by calling
+ // the signal handler directly when C code,
+ // including C code called via cgo, calls a
+ // TSAN-intercepted function such as malloc.
+ //
+ // We check this condition last as g0.stack.lo
+ // may be not very accurate (see mstart).
+ st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
+ setSignalstackSP(&st, mp.g0.stack.lo)
+ setGsignalStack(&st, gsigStack)
+ return true
+ }
+
+ // sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
+ setg(nil)
+ needm()
+ if st.ss_flags&_SS_DISABLE != 0 {
+ noSignalStack(sig)
+ } else {
+ sigNotOnStack(sig)
+ }
+ dropm()
+ return false
+}
+
+// crashing is the number of m's we have waited for when implementing
+// GOTRACEBACK=crash when a signal is received.
+var crashing int32
+
+// testSigtrap and testSigusr1 are used by the runtime tests. If
+// non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
+// normal behavior on this signal is suppressed.
+var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
+var testSigusr1 func(gp *g) bool
+
+// sighandler is invoked when a signal occurs. The global g will be
+// set to a gsignal goroutine and we will be running on the alternate
+// signal stack. The parameter g will be the value of the global g
+// when the signal occurred. The sig, info, and ctxt parameters are
+// from the system signal handler: they are the parameters passed when
+// the SA is passed to the sigaction system call.
+//
+// The garbage collector may have stopped the world, so write barriers
+// are not allowed.
+//
+//go:nowritebarrierrec
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+ mp := _g_.m
+
+ // Cgo TSAN (not the Go race detector) intercepts signals and calls the
+ // signal handler at a later time. When the signal handler is called, the
+ // memory may have changed, but the signal context remains old. The
+ // unmatched signal context and memory makes it unsafe to unwind or inspect
+ // the stack. So we ignore delayed non-fatal signals that will cause a stack
+ // inspection (profiling signal and preemption signal).
+ // cgo_yield is only non-nil for TSAN, and is specifically used to trigger
+ // signal delivery. We use that as an indicator of delayed signals.
+ // For delayed signals, the handler is called on the g0 stack (see
+ // adjustSignalStack).
+ delayedSignal := *cgo_yield != nil && mp != nil && _g_.stack == mp.g0.stack
+
+ if sig == _SIGPROF {
+ // Some platforms (Linux) have per-thread timers, which we use in
+ // combination with the process-wide timer. Avoid double-counting.
+ if !delayedSignal && validSIGPROF(mp, c) {
+ sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp)
+ }
+ return
+ }
+
+ if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) {
+ return
+ }
+
+ if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
+ return
+ }
+
+ if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
+ // sigPerThreadSyscall is the same signal used by glibc for
+ // per-thread syscalls on Linux. We use it for the same purpose
+ // in non-cgo binaries. Since this signal is not _SigNotify,
+ // there is nothing more to do once we run the syscall.
+ runPerThreadSyscall()
+ return
+ }
+
+ if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal {
+ // Might be a preemption signal.
+ doSigPreempt(gp, c)
+ // Even if this was definitely a preemption signal, it
+ // may have been coalesced with another signal, so we
+ // still let it through to the application.
+ }
+
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 && gp.throwsplit {
+ // We can't safely sigpanic because it may grow the
+ // stack. Abort in the signal handler instead.
+ flags = _SigThrow
+ }
+ if isAbortPC(c.sigpc()) {
+ // On many architectures, the abort function just
+ // causes a memory fault. Don't turn that into a panic.
+ flags = _SigThrow
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // The signal is going to cause a panic.
+ // Arrange the stack so that it looks like the point
+ // where the signal occurred made a call to the
+ // function sigpanic. Then set the PC to sigpanic.
+
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.fault())
+ gp.sigpc = c.sigpc()
+
+ c.preparePanic(sig, gp)
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if c.sigcode() == _SI_USER && signal_ignored(sig) {
+ return
+ }
+
+ if flags&_SigKill != 0 {
+ dieFromSignal(sig)
+ }
+
+ // _SigThrow means that we should exit now.
+ // If we get here with _SigPanic, it means that the signal
+ // was sent to us by a program (c.sigcode() == _SI_USER);
+ // in that case, if we didn't handle it in sigsend, we exit now.
+ if flags&(_SigThrow|_SigPanic) == 0 {
+ return
+ }
+
+ _g_.m.throwing = throwTypeRuntime
+ _g_.m.caughtsig.set(gp)
+
+ if crashing == 0 {
+ startpanic_m()
+ }
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.sigpc()), " m=", _g_.m.id, " sigcode=", c.sigcode(), "\n")
+ if _g_.m.incgo && gp == _g_.m.g0 && _g_.m.curg != nil {
+ print("signal arrived during cgo execution\n")
+ // Switch to curg so that we get a traceback of the Go code
+ // leading up to the cgocall, which switched from curg to g0.
+ gp = _g_.m.curg
+ }
+ if sig == _SIGILL || sig == _SIGFPE {
+ // It would be nice to know how long the instruction is.
+ // Unfortunately, that's complicated to do in general (mostly for x86
+ // and s930x, but other archs have non-standard instruction lengths also).
+ // Opt to print 16 bytes, which covers most instructions.
+ const maxN = 16
+ n := uintptr(maxN)
+ // We have to be careful, though. If we're near the end of
+ // a page and the following page isn't mapped, we could
+ // segfault. So make sure we don't straddle a page (even though
+ // that could lead to printing an incomplete instruction).
+ // We're assuming here we can read at least the page containing the PC.
+ // I suppose it is possible that the page is mapped executable but not readable?
+ pc := c.sigpc()
+ if n > physPageSize-pc%physPageSize {
+ n = physPageSize - pc%physPageSize
+ }
+ print("instruction bytes:")
+ b := (*[maxN]byte)(unsafe.Pointer(pc))
+ for i := uintptr(0); i < n; i++ {
+ print(" ", hex(b[i]))
+ }
+ println()
+ }
+ print("\n")
+
+ level, _, docrash := gotraceback()
+ if level > 0 {
+ goroutineheader(gp)
+ tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
+ if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
+ // tracebackothers on original m skipped this one; trace it now.
+ goroutineheader(_g_.m.curg)
+ traceback(^uintptr(0), ^uintptr(0), 0, _g_.m.curg)
+ } else if crashing == 0 {
+ tracebackothers(gp)
+ print("\n")
+ }
+ dumpregs(c)
+ }
+
+ if docrash {
+ crashing++
+ if crashing < mcount()-int32(extraMCount) {
+ // There are other m's that need to dump their stacks.
+ // Relay SIGQUIT to the next m by sending it to the current process.
+ // All m's that have already received SIGQUIT have signal masks blocking
+ // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
+ // When the last m receives the SIGQUIT, it will fall through to the call to
+ // crash below. Just in case the relaying gets botched, each m involved in
+ // the relay sleeps for 5 seconds and then does the crash/exit itself.
+ // In expected operation, the last m has received the SIGQUIT and run
+ // crash/exit and the process is gone, all long before any of the
+ // 5-second sleeps have finished.
+ print("\n-----\n\n")
+ raiseproc(_SIGQUIT)
+ usleep(5 * 1000 * 1000)
+ }
+ crash()
+ }
+
+ printDebugLog()
+
+ exit(2)
+}
+
+// sigpanic turns a synchronous signal into a run-time panic.
+// If the signal handler sees a synchronous panic, it arranges the
+// stack to look like the function where the signal occurred called
+// sigpanic, sets the signal's PC value to sigpanic, and returns from
+// the signal handler. The effect is that the program will act as
+// though the function that got the signal simply called sigpanic
+// instead.
+//
+// This must NOT be nosplit because the linker doesn't know where
+// sigpanic calls can be injected.
+//
+// The signal handler must not inject a call to sigpanic if
+// getg().throwsplit, since sigpanic may need to grow the stack.
+//
+// This is exported via linkname to assembly in runtime/cgo.
+//
+//go:linkname sigpanic
+func sigpanic() {
+ g := getg()
+ if !canpanic(g) {
+ throw("unexpected signal during runtime execution")
+ }
+
+ switch g.sig {
+ case _SIGBUS:
+ if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 {
+ panicmem()
+ }
+ // Support runtime/debug.SetPanicOnFault.
+ if g.paniconfault {
+ panicmemAddr(g.sigcode1)
+ }
+ print("unexpected fault address ", hex(g.sigcode1), "\n")
+ throw("fault")
+ case _SIGSEGV:
+ if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 {
+ panicmem()
+ }
+ // Support runtime/debug.SetPanicOnFault.
+ if g.paniconfault {
+ panicmemAddr(g.sigcode1)
+ }
+ print("unexpected fault address ", hex(g.sigcode1), "\n")
+ throw("fault")
+ case _SIGFPE:
+ switch g.sigcode0 {
+ case _FPE_INTDIV:
+ panicdivide()
+ case _FPE_INTOVF:
+ panicoverflow()
+ }
+ panicfloat()
+ }
+
+ if g.sig >= uint32(len(sigtable)) {
+ // can't happen: we looked up g.sig in sigtable to decide to call sigpanic
+ throw("unexpected signal value")
+ }
+ panic(errorString(sigtable[g.sig].name))
+}
+
+// dieFromSignal kills the program with a signal.
+// This provides the expected exit status for the shell.
+// This is only called with fatal signals expected to kill the process.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func dieFromSignal(sig uint32) {
+ unblocksig(sig)
+ // Mark the signal as unhandled to ensure it is forwarded.
+ atomic.Store(&handlingSig[sig], 0)
+ raise(sig)
+
+ // That should have killed us. On some systems, though, raise
+ // sends the signal to the whole process rather than to just
+ // the current thread, which means that the signal may not yet
+ // have been delivered. Give other threads a chance to run and
+ // pick up the signal.
+ osyield()
+ osyield()
+ osyield()
+
+ // If that didn't work, try _SIG_DFL.
+ setsig(sig, _SIG_DFL)
+ raise(sig)
+
+ osyield()
+ osyield()
+ osyield()
+
+ // If we are still somehow running, just exit with the wrong status.
+ exit(2)
+}
+
+// raisebadsignal is called when a signal is received on a non-Go
+// thread, and the Go program does not want to handle it (that is, the
+// program has not called os/signal.Notify for the signal).
+func raisebadsignal(sig uint32, c *sigctxt) {
+ if sig == _SIGPROF {
+ // Ignore profiling signals that arrive on non-Go threads.
+ return
+ }
+
+ var handler uintptr
+ if sig >= _NSIG {
+ handler = _SIG_DFL
+ } else {
+ handler = atomic.Loaduintptr(&fwdSig[sig])
+ }
+
+ // Reset the signal handler and raise the signal.
+ // We are currently running inside a signal handler, so the
+ // signal is blocked. We need to unblock it before raising the
+ // signal, or the signal we raise will be ignored until we return
+ // from the signal handler. We know that the signal was unblocked
+ // before entering the handler, or else we would not have received
+ // it. That means that we don't have to worry about blocking it
+ // again.
+ unblocksig(sig)
+ setsig(sig, handler)
+
+ // If we're linked into a non-Go program we want to try to
+ // avoid modifying the original context in which the signal
+ // was raised. If the handler is the default, we know it
+ // is non-recoverable, so we don't have to worry about
+ // re-installing sighandler. At this point we can just
+ // return and the signal will be re-raised and caught by
+ // the default handler with the correct context.
+ //
+ // On FreeBSD, the libthr sigaction code prevents
+ // this from working so we fall through to raise.
+ if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && c.sigcode() != _SI_USER {
+ return
+ }
+
+ raise(sig)
+
+ // Give the signal a chance to be delivered.
+ // In almost all real cases the program is about to crash,
+ // so sleeping here is not a waste of time.
+ usleep(1000)
+
+ // If the signal didn't cause the program to exit, restore the
+ // Go signal handler and carry on.
+ //
+ // We may receive another instance of the signal before we
+ // restore the Go handler, but that is not so bad: we know
+ // that the Go program has been ignoring the signal.
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
+}
+
+//go:nosplit
+func crash() {
+ // OS X core dumps are linear dumps of the mapped memory,
+ // from the first virtual byte to the last, with zeros in the gaps.
+ // Because of the way we arrange the address space on 64-bit systems,
+ // this means the OS X core file will be >128 GB and even on a zippy
+ // workstation can take OS X well over an hour to write (uninterruptible).
+ // Save users from making that mistake.
+ if GOOS == "darwin" && GOARCH == "amd64" {
+ return
+ }
+
+ dieFromSignal(_SIGABRT)
+}
+
+// ensureSigM starts one global, sleeping thread to make sure at least one thread
+// is available to catch signals enabled for os/signal.
+func ensureSigM() {
+ if maskUpdatedChan != nil {
+ return
+ }
+ maskUpdatedChan = make(chan struct{})
+ disableSigChan = make(chan uint32)
+ enableSigChan = make(chan uint32)
+ go func() {
+ // Signal masks are per-thread, so make sure this goroutine stays on one
+ // thread.
+ LockOSThread()
+ defer UnlockOSThread()
+ // The sigBlocked mask contains the signals not active for os/signal,
+ // initially all signals except the essential. When signal.Notify()/Stop is called,
+ // sigenable/sigdisable in turn notify this thread to update its signal
+ // mask accordingly.
+ sigBlocked := sigset_all
+ for i := range sigtable {
+ if !blockableSig(uint32(i)) {
+ sigdelset(&sigBlocked, i)
+ }
+ }
+ sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
+ for {
+ select {
+ case sig := <-enableSigChan:
+ if sig > 0 {
+ sigdelset(&sigBlocked, int(sig))
+ }
+ case sig := <-disableSigChan:
+ if sig > 0 && blockableSig(sig) {
+ sigaddset(&sigBlocked, int(sig))
+ }
+ }
+ sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
+ maskUpdatedChan <- struct{}{}
+ }
+ }()
+}
+
+// This is called when we receive a signal when there is no signal stack.
+// This can only happen if non-Go code calls sigaltstack to disable the
+// signal stack.
+func noSignalStack(sig uint32) {
+ println("signal", sig, "received on thread with no signal stack")
+ throw("non-Go code disabled sigaltstack")
+}
+
+// This is called if we receive a signal when there is a signal stack
+// but we are not on it. This can only happen if non-Go code called
+// sigaction without setting the SS_ONSTACK flag.
+func sigNotOnStack(sig uint32) {
+ println("signal", sig, "received but handler not on signal stack")
+ throw("non-Go code set up signal handler without SA_ONSTACK flag")
+}
+
+// signalDuringFork is called if we receive a signal while doing a fork.
+// We do not want signals at that time, as a signal sent to the process
+// group may be delivered to the child process, causing confusion.
+// This should never be called, because we block signals across the fork;
+// this function is just a safety check. See issue 18600 for background.
+func signalDuringFork(sig uint32) {
+ println("signal", sig, "received during fork")
+ throw("signal received during fork")
+}
+
+var badginsignalMsg = "fatal: bad g in signal handler\n"
+
+// This runs on a foreign stack, without an m or a g. No stack split.
+//
+//go:nosplit
+//go:norace
+//go:nowritebarrierrec
+func badsignal(sig uintptr, c *sigctxt) {
+ if !iscgo && !cgoHasExtraM {
+ // There is no extra M. needm will not be able to grab
+ // an M. Instead of hanging, just crash.
+ // Cannot call split-stack function as there is no G.
+ s := stringStructOf(&badginsignalMsg)
+ write(2, s.str, int32(s.len))
+ exit(2)
+ *(*uintptr)(unsafe.Pointer(uintptr(123))) = 2
+ }
+ needm()
+ if !sigsend(uint32(sig)) {
+ // A foreign thread received the signal sig, and the
+ // Go code does not want to handle it.
+ raisebadsignal(uint32(sig), c)
+ }
+ dropm()
+}
+
+//go:noescape
+func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
+
+// Determines if the signal should be handled by Go and if not, forwards the
+// signal to the handler that was installed before Go's. Returns whether the
+// signal was forwarded.
+// This is called by the signal handler, and the world may be stopped.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
+ if sig >= uint32(len(sigtable)) {
+ return false
+ }
+ fwdFn := atomic.Loaduintptr(&fwdSig[sig])
+ flags := sigtable[sig].flags
+
+ // If we aren't handling the signal, forward it.
+ if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK {
+ // If the signal is ignored, doing nothing is the same as forwarding.
+ if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) {
+ return true
+ }
+ // We are not handling the signal and there is no other handler to forward to.
+ // Crash with the default behavior.
+ if fwdFn == _SIG_DFL {
+ setsig(sig, _SIG_DFL)
+ dieFromSignal(sig)
+ return false
+ }
+
+ sigfwd(fwdFn, sig, info, ctx)
+ return true
+ }
+
+ // This function and its caller sigtrampgo assumes SIGPIPE is delivered on the
+ // originating thread. This property does not hold on macOS (golang.org/issue/33384),
+ // so we have no choice but to ignore SIGPIPE.
+ if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE {
+ return true
+ }
+
+ // If there is no handler to forward to, no need to forward.
+ if fwdFn == _SIG_DFL {
+ return false
+ }
+
+ c := &sigctxt{info, ctx}
+ // Only forward synchronous signals and SIGPIPE.
+ // Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
+ // is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
+ // or pipe.
+ if (c.sigcode() == _SI_USER || flags&_SigPanic == 0) && sig != _SIGPIPE {
+ return false
+ }
+ // Determine if the signal occurred inside Go code. We test that:
+ // (1) we weren't in VDSO page,
+ // (2) we were in a goroutine (i.e., m.curg != nil), and
+ // (3) we weren't in CGO.
+ g := sigFetchG(c)
+ if g != nil && g.m != nil && g.m.curg != nil && !g.m.incgo {
+ return false
+ }
+
+ // Signal not handled by Go, forward it.
+ if fwdFn != _SIG_IGN {
+ sigfwd(fwdFn, sig, info, ctx)
+ }
+
+ return true
+}
+
+// sigsave saves the current thread's signal mask into *p.
+// This is used to preserve the non-Go signal mask when a non-Go
+// thread calls a Go function.
+// This is nosplit and nowritebarrierrec because it is called by needm
+// which may be called on a non-Go thread with no g available.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func sigsave(p *sigset) {
+ sigprocmask(_SIG_SETMASK, nil, p)
+}
+
+// msigrestore sets the current thread's signal mask to sigmask.
+// This is used to restore the non-Go signal mask when a non-Go thread
+// calls a Go function.
+// This is nosplit and nowritebarrierrec because it is called by dropm
+// after g has been cleared.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func msigrestore(sigmask sigset) {
+ sigprocmask(_SIG_SETMASK, &sigmask, nil)
+}
+
+// sigsetAllExiting is used by sigblock(true) when a thread is
+// exiting. sigset_all is defined in OS specific code, and per GOOS
+// behavior may override this default for sigsetAllExiting: see
+// osinit().
+var sigsetAllExiting = sigset_all
+
+// sigblock blocks signals in the current thread's signal mask.
+// This is used to block signals while setting up and tearing down g
+// when a non-Go thread calls a Go function. When a thread is exiting
+// we use the sigsetAllExiting value, otherwise the OS specific
+// definition of sigset_all is used.
+// This is nosplit and nowritebarrierrec because it is called by needm
+// which may be called on a non-Go thread with no g available.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func sigblock(exiting bool) {
+ if exiting {
+ sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
+ return
+ }
+ sigprocmask(_SIG_SETMASK, &sigset_all, nil)
+}
+
+// unblocksig removes sig from the current thread's signal mask.
+// This is nosplit and nowritebarrierrec because it is called from
+// dieFromSignal, which can be called by sigfwdgo while running in the
+// signal handler, on the signal stack, with no g available.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func unblocksig(sig uint32) {
+ var set sigset
+ sigaddset(&set, int(sig))
+ sigprocmask(_SIG_UNBLOCK, &set, nil)
+}
+
+// minitSignals is called when initializing a new m to set the
+// thread's alternate signal stack and signal mask.
+func minitSignals() {
+ minitSignalStack()
+ minitSignalMask()
+}
+
+// minitSignalStack is called when initializing a new m to set the
+// alternate signal stack. If the alternate signal stack is not set
+// for the thread (the normal case) then set the alternate signal
+// stack to the gsignal stack. If the alternate signal stack is set
+// for the thread (the case when a non-Go thread sets the alternate
+// signal stack and then calls a Go function) then set the gsignal
+// stack to the alternate signal stack. We also set the alternate
+// signal stack to the gsignal stack if cgo is not used (regardless
+// of whether it is already set). Record which choice was made in
+// newSigstack, so that it can be undone in unminit.
+func minitSignalStack() {
+ _g_ := getg()
+ var st stackt
+ sigaltstack(nil, &st)
+ if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
+ signalstack(&_g_.m.gsignal.stack)
+ _g_.m.newSigstack = true
+ } else {
+ setGsignalStack(&st, &_g_.m.goSigStack)
+ _g_.m.newSigstack = false
+ }
+}
+
+// minitSignalMask is called when initializing a new m to set the
+// thread's signal mask. When this is called all signals have been
+// blocked for the thread. This starts with m.sigmask, which was set
+// either from initSigmask for a newly created thread or by calling
+// sigsave if this is a non-Go thread calling a Go function. It
+// removes all essential signals from the mask, thus causing those
+// signals to not be blocked. Then it sets the thread's signal mask.
+// After this is called the thread can receive signals.
+func minitSignalMask() {
+ nmask := getg().m.sigmask
+ for i := range sigtable {
+ if !blockableSig(uint32(i)) {
+ sigdelset(&nmask, i)
+ }
+ }
+ sigprocmask(_SIG_SETMASK, &nmask, nil)
+}
+
+// unminitSignals is called from dropm, via unminit, to undo the
+// effect of calling minit on a non-Go thread.
+//
+//go:nosplit
+func unminitSignals() {
+ if getg().m.newSigstack {
+ st := stackt{ss_flags: _SS_DISABLE}
+ sigaltstack(&st, nil)
+ } else {
+ // We got the signal stack from someone else. Restore
+ // the Go-allocated stack in case this M gets reused
+ // for another thread (e.g., it's an extram). Also, on
+ // Android, libc allocates a signal stack for all
+ // threads, so it's important to restore the Go stack
+ // even on Go-created threads so we can free it.
+ restoreGsignalStack(&getg().m.goSigStack)
+ }
+}
+
+// blockableSig reports whether sig may be blocked by the signal mask.
+// We never want to block the signals marked _SigUnblock;
+// these are the synchronous signals that turn into a Go panic.
+// We never want to block the preemption signal if it is being used.
+// In a Go program--not a c-archive/c-shared--we never want to block
+// the signals marked _SigKill or _SigThrow, as otherwise it's possible
+// for all running threads to block them and delay their delivery until
+// we start a new thread. When linked into a C program we let the C code
+// decide on the disposition of those signals.
+func blockableSig(sig uint32) bool {
+ flags := sigtable[sig].flags
+ if flags&_SigUnblock != 0 {
+ return false
+ }
+ if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
+ return false
+ }
+ if isarchive || islibrary {
+ return true
+ }
+ return flags&(_SigKill|_SigThrow) == 0
+}
+
+// gsignalStack saves the fields of the gsignal stack changed by
+// setGsignalStack.
+type gsignalStack struct {
+ stack stack
+ stackguard0 uintptr
+ stackguard1 uintptr
+ stktopsp uintptr
+}
+
+// setGsignalStack sets the gsignal stack of the current m to an
+// alternate signal stack returned from the sigaltstack system call.
+// It saves the old values in *old for use by restoreGsignalStack.
+// This is used when handling a signal if non-Go code has set the
+// alternate signal stack.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func setGsignalStack(st *stackt, old *gsignalStack) {
+ g := getg()
+ if old != nil {
+ old.stack = g.m.gsignal.stack
+ old.stackguard0 = g.m.gsignal.stackguard0
+ old.stackguard1 = g.m.gsignal.stackguard1
+ old.stktopsp = g.m.gsignal.stktopsp
+ }
+ stsp := uintptr(unsafe.Pointer(st.ss_sp))
+ g.m.gsignal.stack.lo = stsp
+ g.m.gsignal.stack.hi = stsp + st.ss_size
+ g.m.gsignal.stackguard0 = stsp + _StackGuard
+ g.m.gsignal.stackguard1 = stsp + _StackGuard
+}
+
+// restoreGsignalStack restores the gsignal stack to the value it had
+// before entering the signal handler.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func restoreGsignalStack(st *gsignalStack) {
+ gp := getg().m.gsignal
+ gp.stack = st.stack
+ gp.stackguard0 = st.stackguard0
+ gp.stackguard1 = st.stackguard1
+ gp.stktopsp = st.stktopsp
+}
+
+// signalstack sets the current thread's alternate signal stack to s.
+//
+//go:nosplit
+func signalstack(s *stack) {
+ st := stackt{ss_size: s.hi - s.lo}
+ setSignalstackSP(&st, s.lo)
+ sigaltstack(&st, nil)
+}
+
+// setsigsegv is used on darwin/arm64 to fake a segmentation fault.
+//
+// This is exported via linkname to assembly in runtime/cgo.
+//
+//go:nosplit
+//go:linkname setsigsegv
+func setsigsegv(pc uintptr) {
+ g := getg()
+ g.sig = _SIGSEGV
+ g.sigpc = pc
+ g.sigcode0 = _SEGV_MAPERR
+ g.sigcode1 = 0 // TODO: emulate si_addr
+}
diff --git a/contrib/go/_std_1.19/src/runtime/sigqueue.go b/contrib/go/_std_1.19/src/runtime/sigqueue.go
new file mode 100644
index 0000000000..49502cbed3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/sigqueue.go
@@ -0,0 +1,275 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements runtime support for signal handling.
+//
+// Most synchronization primitives are not available from
+// the signal handler (it cannot block, allocate memory, or use locks)
+// so the handler communicates with a processing goroutine
+// via struct sig, below.
+//
+// sigsend is called by the signal handler to queue a new signal.
+// signal_recv is called by the Go program to receive a newly queued signal.
+//
+// Synchronization between sigsend and signal_recv is based on the sig.state
+// variable. It can be in three states:
+// * sigReceiving means that signal_recv is blocked on sig.Note and there are
+// no new pending signals.
+// * sigSending means that sig.mask *may* contain new pending signals,
+// signal_recv can't be blocked in this state.
+// * sigIdle means that there are no new pending signals and signal_recv is not
+// blocked.
+//
+// Transitions between states are done atomically with CAS.
+//
+// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
+// If several sigsends and signal_recv execute concurrently, it can lead to
+// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
+// nor deadlocks.
+
+//go:build !plan9
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ _ "unsafe" // for go:linkname
+)
+
+// sig handles communication between the signal handler and os/signal.
+// Other than the inuse and recv fields, the fields are accessed atomically.
+//
+// The wanted and ignored fields are only written by one goroutine at
+// a time; access is controlled by the handlers Mutex in os/signal.
+// The fields are only read by that one goroutine and by the signal handler.
+// We access them atomically to minimize the race between setting them
+// in the goroutine calling os/signal and the signal handler,
+// which may be running in a different thread. That race is unavoidable,
+// as there is no connection between handling a signal and receiving one,
+// but atomic instructions should minimize it.
+var sig struct {
+ note note
+ mask [(_NSIG + 31) / 32]uint32
+ wanted [(_NSIG + 31) / 32]uint32
+ ignored [(_NSIG + 31) / 32]uint32
+ recv [(_NSIG + 31) / 32]uint32
+ state uint32
+ delivering uint32
+ inuse bool
+}
+
+const (
+ sigIdle = iota
+ sigReceiving
+ sigSending
+)
+
+// sigsend delivers a signal from sighandler to the internal signal delivery queue.
+// It reports whether the signal was sent. If not, the caller typically crashes the program.
+// It runs from the signal handler, so it's limited in what it can do.
+func sigsend(s uint32) bool {
+ bit := uint32(1) << uint(s&31)
+ if s >= uint32(32*len(sig.wanted)) {
+ return false
+ }
+
+ atomic.Xadd(&sig.delivering, 1)
+ // We are running in the signal handler; defer is not available.
+
+ if w := atomic.Load(&sig.wanted[s/32]); w&bit == 0 {
+ atomic.Xadd(&sig.delivering, -1)
+ return false
+ }
+
+ // Add signal to outgoing queue.
+ for {
+ mask := sig.mask[s/32]
+ if mask&bit != 0 {
+ atomic.Xadd(&sig.delivering, -1)
+ return true // signal already in queue
+ }
+ if atomic.Cas(&sig.mask[s/32], mask, mask|bit) {
+ break
+ }
+ }
+
+ // Notify receiver that queue has new bit.
+Send:
+ for {
+ switch atomic.Load(&sig.state) {
+ default:
+ throw("sigsend: inconsistent state")
+ case sigIdle:
+ if atomic.Cas(&sig.state, sigIdle, sigSending) {
+ break Send
+ }
+ case sigSending:
+ // notification already pending
+ break Send
+ case sigReceiving:
+ if atomic.Cas(&sig.state, sigReceiving, sigIdle) {
+ if GOOS == "darwin" || GOOS == "ios" {
+ sigNoteWakeup(&sig.note)
+ break Send
+ }
+ notewakeup(&sig.note)
+ break Send
+ }
+ }
+ }
+
+ atomic.Xadd(&sig.delivering, -1)
+ return true
+}
+
+// Called to receive the next queued signal.
+// Must only be called from a single goroutine at a time.
+//
+//go:linkname signal_recv os/signal.signal_recv
+func signal_recv() uint32 {
+ for {
+ // Serve any signals from local copy.
+ for i := uint32(0); i < _NSIG; i++ {
+ if sig.recv[i/32]&(1<<(i&31)) != 0 {
+ sig.recv[i/32] &^= 1 << (i & 31)
+ return i
+ }
+ }
+
+ // Wait for updates to be available from signal sender.
+ Receive:
+ for {
+ switch atomic.Load(&sig.state) {
+ default:
+ throw("signal_recv: inconsistent state")
+ case sigIdle:
+ if atomic.Cas(&sig.state, sigIdle, sigReceiving) {
+ if GOOS == "darwin" || GOOS == "ios" {
+ sigNoteSleep(&sig.note)
+ break Receive
+ }
+ notetsleepg(&sig.note, -1)
+ noteclear(&sig.note)
+ break Receive
+ }
+ case sigSending:
+ if atomic.Cas(&sig.state, sigSending, sigIdle) {
+ break Receive
+ }
+ }
+ }
+
+ // Incorporate updates from sender into local copy.
+ for i := range sig.mask {
+ sig.recv[i] = atomic.Xchg(&sig.mask[i], 0)
+ }
+ }
+}
+
+// signalWaitUntilIdle waits until the signal delivery mechanism is idle.
+// This is used to ensure that we do not drop a signal notification due
+// to a race between disabling a signal and receiving a signal.
+// This assumes that signal delivery has already been disabled for
+// the signal(s) in question, and here we are just waiting to make sure
+// that all the signals have been delivered to the user channels
+// by the os/signal package.
+//
+//go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle
+func signalWaitUntilIdle() {
+ // Although the signals we care about have been removed from
+ // sig.wanted, it is possible that another thread has received
+ // a signal, has read from sig.wanted, is now updating sig.mask,
+ // and has not yet woken up the processor thread. We need to wait
+ // until all current signal deliveries have completed.
+ for atomic.Load(&sig.delivering) != 0 {
+ Gosched()
+ }
+
+ // Although WaitUntilIdle seems like the right name for this
+ // function, the state we are looking for is sigReceiving, not
+ // sigIdle. The sigIdle state is really more like sigProcessing.
+ for atomic.Load(&sig.state) != sigReceiving {
+ Gosched()
+ }
+}
+
+// Must only be called from a single goroutine at a time.
+//
+//go:linkname signal_enable os/signal.signal_enable
+func signal_enable(s uint32) {
+ if !sig.inuse {
+ // This is the first call to signal_enable. Initialize.
+ sig.inuse = true // enable reception of signals; cannot disable
+ if GOOS == "darwin" || GOOS == "ios" {
+ sigNoteSetup(&sig.note)
+ } else {
+ noteclear(&sig.note)
+ }
+ }
+
+ if s >= uint32(len(sig.wanted)*32) {
+ return
+ }
+
+ w := sig.wanted[s/32]
+ w |= 1 << (s & 31)
+ atomic.Store(&sig.wanted[s/32], w)
+
+ i := sig.ignored[s/32]
+ i &^= 1 << (s & 31)
+ atomic.Store(&sig.ignored[s/32], i)
+
+ sigenable(s)
+}
+
+// Must only be called from a single goroutine at a time.
+//
+//go:linkname signal_disable os/signal.signal_disable
+func signal_disable(s uint32) {
+ if s >= uint32(len(sig.wanted)*32) {
+ return
+ }
+ sigdisable(s)
+
+ w := sig.wanted[s/32]
+ w &^= 1 << (s & 31)
+ atomic.Store(&sig.wanted[s/32], w)
+}
+
+// Must only be called from a single goroutine at a time.
+//
+//go:linkname signal_ignore os/signal.signal_ignore
+func signal_ignore(s uint32) {
+ if s >= uint32(len(sig.wanted)*32) {
+ return
+ }
+ sigignore(s)
+
+ w := sig.wanted[s/32]
+ w &^= 1 << (s & 31)
+ atomic.Store(&sig.wanted[s/32], w)
+
+ i := sig.ignored[s/32]
+ i |= 1 << (s & 31)
+ atomic.Store(&sig.ignored[s/32], i)
+}
+
+// sigInitIgnored marks the signal as already ignored. This is called at
+// program start by initsig. In a shared library initsig is called by
+// libpreinit, so the runtime may not be initialized yet.
+//
+//go:nosplit
+func sigInitIgnored(s uint32) {
+ i := sig.ignored[s/32]
+ i |= 1 << (s & 31)
+ atomic.Store(&sig.ignored[s/32], i)
+}
+
+// Checked by signal handlers.
+//
+//go:linkname signal_ignored os/signal.signal_ignored
+func signal_ignored(s uint32) bool {
+ i := atomic.Load(&sig.ignored[s/32])
+ return i&(1<<(s&31)) != 0
+}
diff --git a/contrib/go/_std_1.18/src/runtime/sigqueue_note.go b/contrib/go/_std_1.19/src/runtime/sigqueue_note.go
index fb1a517fa5..fb1a517fa5 100644
--- a/contrib/go/_std_1.18/src/runtime/sigqueue_note.go
+++ b/contrib/go/_std_1.19/src/runtime/sigqueue_note.go
diff --git a/contrib/go/_std_1.18/src/runtime/sigtab_linux_generic.go b/contrib/go/_std_1.19/src/runtime/sigtab_linux_generic.go
index fe93bbafb5..fe93bbafb5 100644
--- a/contrib/go/_std_1.18/src/runtime/sigtab_linux_generic.go
+++ b/contrib/go/_std_1.19/src/runtime/sigtab_linux_generic.go
diff --git a/contrib/go/_std_1.18/src/runtime/sizeclasses.go b/contrib/go/_std_1.19/src/runtime/sizeclasses.go
index 067871eaf3..067871eaf3 100644
--- a/contrib/go/_std_1.18/src/runtime/sizeclasses.go
+++ b/contrib/go/_std_1.19/src/runtime/sizeclasses.go
diff --git a/contrib/go/_std_1.19/src/runtime/slice.go b/contrib/go/_std_1.19/src/runtime/slice.go
new file mode 100644
index 0000000000..2413a46d6a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/slice.go
@@ -0,0 +1,344 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "runtime/internal/math"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+type slice struct {
+ array unsafe.Pointer
+ len int
+ cap int
+}
+
+// A notInHeapSlice is a slice backed by go:notinheap memory.
+type notInHeapSlice struct {
+ array *notInHeap
+ len int
+ cap int
+}
+
+func panicmakeslicelen() {
+ panic(errorString("makeslice: len out of range"))
+}
+
+func panicmakeslicecap() {
+ panic(errorString("makeslice: cap out of range"))
+}
+
+// makeslicecopy allocates a slice of "tolen" elements of type "et",
+// then copies "fromlen" elements of type "et" into that new allocation from "from".
+func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
+ var tomem, copymem uintptr
+ if uintptr(tolen) > uintptr(fromlen) {
+ var overflow bool
+ tomem, overflow = math.MulUintptr(et.size, uintptr(tolen))
+ if overflow || tomem > maxAlloc || tolen < 0 {
+ panicmakeslicelen()
+ }
+ copymem = et.size * uintptr(fromlen)
+ } else {
+ // fromlen is a known good length providing and equal or greater than tolen,
+ // thereby making tolen a good slice length too as from and to slices have the
+ // same element width.
+ tomem = et.size * uintptr(tolen)
+ copymem = tomem
+ }
+
+ var to unsafe.Pointer
+ if et.ptrdata == 0 {
+ to = mallocgc(tomem, nil, false)
+ if copymem < tomem {
+ memclrNoHeapPointers(add(to, copymem), tomem-copymem)
+ }
+ } else {
+ // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
+ to = mallocgc(tomem, et, true)
+ if copymem > 0 && writeBarrier.enabled {
+ // Only shade the pointers in old.array since we know the destination slice to
+ // only contains nil pointers because it has been cleared during alloc.
+ bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem)
+ }
+ }
+
+ if raceenabled {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(makeslicecopy)
+ racereadrangepc(from, copymem, callerpc, pc)
+ }
+ if msanenabled {
+ msanread(from, copymem)
+ }
+ if asanenabled {
+ asanread(from, copymem)
+ }
+
+ memmove(to, from, copymem)
+
+ return to
+}
+
+func makeslice(et *_type, len, cap int) unsafe.Pointer {
+ mem, overflow := math.MulUintptr(et.size, uintptr(cap))
+ if overflow || mem > maxAlloc || len < 0 || len > cap {
+ // NOTE: Produce a 'len out of range' error instead of a
+ // 'cap out of range' error when someone does make([]T, bignumber).
+ // 'cap out of range' is true too, but since the cap is only being
+ // supplied implicitly, saying len is clearer.
+ // See golang.org/issue/4085.
+ mem, overflow := math.MulUintptr(et.size, uintptr(len))
+ if overflow || mem > maxAlloc || len < 0 {
+ panicmakeslicelen()
+ }
+ panicmakeslicecap()
+ }
+
+ return mallocgc(mem, et, true)
+}
+
+func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
+ len := int(len64)
+ if int64(len) != len64 {
+ panicmakeslicelen()
+ }
+
+ cap := int(cap64)
+ if int64(cap) != cap64 {
+ panicmakeslicecap()
+ }
+
+ return makeslice(et, len, cap)
+}
+
+// This is a wrapper over runtime/internal/math.MulUintptr,
+// so the compiler can recognize and treat it as an intrinsic.
+func mulUintptr(a, b uintptr) (uintptr, bool) {
+ return math.MulUintptr(a, b)
+}
+
+// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
+func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
+ if len < 0 {
+ panicunsafeslicelen()
+ }
+
+ mem, overflow := math.MulUintptr(et.size, uintptr(len))
+ if overflow || mem > -uintptr(ptr) {
+ if ptr == nil {
+ panicunsafeslicenilptr()
+ }
+ panicunsafeslicelen()
+ }
+}
+
+// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
+func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64) {
+ len := int(len64)
+ if int64(len) != len64 {
+ panicunsafeslicelen()
+ }
+ unsafeslice(et, ptr, len)
+}
+
+func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64) {
+ unsafeslice64(et, ptr, len64)
+
+ // Check that underlying array doesn't straddle multiple heap objects.
+ // unsafeslice64 has already checked for overflow.
+ if checkptrStraddles(ptr, uintptr(len64)*et.size) {
+ throw("checkptr: unsafe.Slice result straddles multiple allocations")
+ }
+}
+
+func panicunsafeslicelen() {
+ panic(errorString("unsafe.Slice: len out of range"))
+}
+
+func panicunsafeslicenilptr() {
+ panic(errorString("unsafe.Slice: ptr is nil and len is not zero"))
+}
+
+// growslice handles slice growth during append.
+// It is passed the slice element type, the old slice, and the desired new minimum capacity,
+// and it returns a new slice with at least that capacity, with the old data
+// copied into it.
+// The new slice's length is set to the old slice's length,
+// NOT to the new requested capacity.
+// This is for codegen convenience. The old slice's length is used immediately
+// to calculate where to write new values during an append.
+// TODO: When the old backend is gone, reconsider this decision.
+// The SSA backend might prefer the new length or to return only ptr/cap and save stack space.
+func growslice(et *_type, old slice, cap int) slice {
+ if raceenabled {
+ callerpc := getcallerpc()
+ racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
+ }
+ if msanenabled {
+ msanread(old.array, uintptr(old.len*int(et.size)))
+ }
+ if asanenabled {
+ asanread(old.array, uintptr(old.len*int(et.size)))
+ }
+
+ if cap < old.cap {
+ panic(errorString("growslice: cap out of range"))
+ }
+
+ if et.size == 0 {
+ // append should not create a slice with nil pointer but non-zero len.
+ // We assume that append doesn't need to preserve old.array in this case.
+ return slice{unsafe.Pointer(&zerobase), old.len, cap}
+ }
+
+ newcap := old.cap
+ doublecap := newcap + newcap
+ if cap > doublecap {
+ newcap = cap
+ } else {
+ const threshold = 256
+ if old.cap < threshold {
+ newcap = doublecap
+ } else {
+ // Check 0 < newcap to detect overflow
+ // and prevent an infinite loop.
+ for 0 < newcap && newcap < cap {
+ // Transition from growing 2x for small slices
+ // to growing 1.25x for large slices. This formula
+ // gives a smooth-ish transition between the two.
+ newcap += (newcap + 3*threshold) / 4
+ }
+ // Set newcap to the requested cap when
+ // the newcap calculation overflowed.
+ if newcap <= 0 {
+ newcap = cap
+ }
+ }
+ }
+
+ var overflow bool
+ var lenmem, newlenmem, capmem uintptr
+ // Specialize for common values of et.size.
+ // For 1 we don't need any division/multiplication.
+ // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
+ // For powers of 2, use a variable shift.
+ switch {
+ case et.size == 1:
+ lenmem = uintptr(old.len)
+ newlenmem = uintptr(cap)
+ capmem = roundupsize(uintptr(newcap))
+ overflow = uintptr(newcap) > maxAlloc
+ newcap = int(capmem)
+ case et.size == goarch.PtrSize:
+ lenmem = uintptr(old.len) * goarch.PtrSize
+ newlenmem = uintptr(cap) * goarch.PtrSize
+ capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
+ overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
+ newcap = int(capmem / goarch.PtrSize)
+ case isPowerOfTwo(et.size):
+ var shift uintptr
+ if goarch.PtrSize == 8 {
+ // Mask shift for better code generation.
+ shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
+ } else {
+ shift = uintptr(sys.Ctz32(uint32(et.size))) & 31
+ }
+ lenmem = uintptr(old.len) << shift
+ newlenmem = uintptr(cap) << shift
+ capmem = roundupsize(uintptr(newcap) << shift)
+ overflow = uintptr(newcap) > (maxAlloc >> shift)
+ newcap = int(capmem >> shift)
+ default:
+ lenmem = uintptr(old.len) * et.size
+ newlenmem = uintptr(cap) * et.size
+ capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
+ capmem = roundupsize(capmem)
+ newcap = int(capmem / et.size)
+ }
+
+ // The check of overflow in addition to capmem > maxAlloc is needed
+ // to prevent an overflow which can be used to trigger a segfault
+ // on 32bit architectures with this example program:
+ //
+ // type T [1<<27 + 1]int64
+ //
+ // var d T
+ // var s []T
+ //
+ // func main() {
+ // s = append(s, d, d, d, d)
+ // print(len(s), "\n")
+ // }
+ if overflow || capmem > maxAlloc {
+ panic(errorString("growslice: cap out of range"))
+ }
+
+ var p unsafe.Pointer
+ if et.ptrdata == 0 {
+ p = mallocgc(capmem, nil, false)
+ // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
+ // Only clear the part that will not be overwritten.
+ memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
+ } else {
+ // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
+ p = mallocgc(capmem, et, true)
+ if lenmem > 0 && writeBarrier.enabled {
+ // Only shade the pointers in old.array since we know the destination slice p
+ // only contains nil pointers because it has been cleared during alloc.
+ bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata)
+ }
+ }
+ memmove(p, old.array, lenmem)
+
+ return slice{p, old.len, newcap}
+}
+
+func isPowerOfTwo(x uintptr) bool {
+ return x&(x-1) == 0
+}
+
+// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
+func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
+ if fromLen == 0 || toLen == 0 {
+ return 0
+ }
+
+ n := fromLen
+ if toLen < n {
+ n = toLen
+ }
+
+ if width == 0 {
+ return n
+ }
+
+ size := uintptr(n) * width
+ if raceenabled {
+ callerpc := getcallerpc()
+ pc := abi.FuncPCABIInternal(slicecopy)
+ racereadrangepc(fromPtr, size, callerpc, pc)
+ racewriterangepc(toPtr, size, callerpc, pc)
+ }
+ if msanenabled {
+ msanread(fromPtr, size)
+ msanwrite(toPtr, size)
+ }
+ if asanenabled {
+ asanread(fromPtr, size)
+ asanwrite(toPtr, size)
+ }
+
+ if size == 1 { // common case worth about 2x to do here
+ // TODO: is this still worth it with new memmove impl?
+ *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
+ } else {
+ memmove(toPtr, fromPtr, size)
+ }
+ return n
+}
diff --git a/contrib/go/_std_1.18/src/runtime/softfloat64.go b/contrib/go/_std_1.19/src/runtime/softfloat64.go
index 42ef009297..42ef009297 100644
--- a/contrib/go/_std_1.18/src/runtime/softfloat64.go
+++ b/contrib/go/_std_1.19/src/runtime/softfloat64.go
diff --git a/contrib/go/_std_1.19/src/runtime/stack.go b/contrib/go/_std_1.19/src/runtime/stack.go
new file mode 100644
index 0000000000..2a7f0bd1c3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/stack.go
@@ -0,0 +1,1484 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/cpu"
+ "internal/goarch"
+ "internal/goos"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+/*
+Stack layout parameters.
+Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
+
+The per-goroutine g->stackguard is set to point StackGuard bytes
+above the bottom of the stack. Each function compares its stack
+pointer against g->stackguard to check for overflow. To cut one
+instruction from the check sequence for functions with tiny frames,
+the stack is allowed to protrude StackSmall bytes below the stack
+guard. Functions with large frames don't bother with the check and
+always call morestack. The sequences are (for amd64, others are
+similar):
+
+ guard = g->stackguard
+ frame = function's stack frame size
+ argsize = size of function arguments (call + return)
+
+ stack frame size <= StackSmall:
+ CMPQ guard, SP
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size > StackSmall but < StackBig
+ LEAQ (frame-StackSmall)(SP), R0
+ CMPQ guard, R0
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size >= StackBig:
+ MOVQ m->morearg, $((argsize << 32) | frame)
+ CALL morestack(SB)
+
+The bottom StackGuard - StackSmall bytes are important: there has
+to be enough room to execute functions that refuse to check for
+stack overflow, either because they need to be adjacent to the
+actual caller's frame (deferproc) or because they handle the imminent
+stack overflow (morestack).
+
+For example, deferproc might call malloc, which does one of the
+above checks (without allocating a full frame), which might trigger
+a call to morestack. This sequence needs to fit in the bottom
+section of the stack. On amd64, morestack's frame is 40 bytes, and
+deferproc's frame is 56 bytes. That fits well within the
+StackGuard - StackSmall bytes at the bottom.
+The linkers explore all possible call traces involving non-splitting
+functions to make sure that this limit cannot be violated.
+*/
+
+const (
+ // StackSystem is a number of additional bytes to add
+ // to each stack below the usual guard area for OS-specific
+ // purposes like signal handling. Used on Windows, Plan 9,
+ // and iOS because they do not use a separate stack.
+ _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
+
+ // The minimum size of stack used by Go code
+ _StackMin = 2048
+
+ // The minimum stack size to allocate.
+ // The hackery here rounds FixedStack0 up to a power of 2.
+ _FixedStack0 = _StackMin + _StackSystem
+ _FixedStack1 = _FixedStack0 - 1
+ _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
+ _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
+ _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
+ _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
+ _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
+ _FixedStack = _FixedStack6 + 1
+
+ // Functions that need frames bigger than this use an extra
+ // instruction to do the stack split check, to avoid overflow
+ // in case SP - framesize wraps below zero.
+ // This value can be no bigger than the size of the unmapped
+ // space at zero.
+ _StackBig = 4096
+
+ // The stack guard is a pointer this many bytes above the
+ // bottom of the stack.
+ //
+ // The guard leaves enough room for one _StackSmall frame plus
+ // a _StackLimit chain of NOSPLIT calls plus _StackSystem
+ // bytes for the OS.
+ _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
+
+ // After a stack split check the SP is allowed to be this
+ // many bytes below the stack guard. This saves an instruction
+ // in the checking sequence for tiny frames.
+ _StackSmall = 128
+
+ // The maximum number of bytes that a chain of NOSPLIT
+ // functions can use.
+ _StackLimit = _StackGuard - _StackSystem - _StackSmall
+)
+
+const (
+ // stackDebug == 0: no logging
+ // == 1: logging of per-stack operations
+ // == 2: logging of per-frame operations
+ // == 3: logging of per-word updates
+ // == 4: logging of per-word reads
+ stackDebug = 0
+ stackFromSystem = 0 // allocate stacks from system memory instead of the heap
+ stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
+ stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
+ stackNoCache = 0 // disable per-P small stack caches
+
+ // check the BP links during traceback.
+ debugCheckBP = false
+)
+
+const (
+ uintptrMask = 1<<(8*goarch.PtrSize) - 1
+
+ // The values below can be stored to g.stackguard0 to force
+ // the next stack check to fail.
+ // These are all larger than any real SP.
+
+ // Goroutine preemption request.
+ // 0xfffffade in hex.
+ stackPreempt = uintptrMask & -1314
+
+ // Thread is forking. Causes a split stack check failure.
+ // 0xfffffb2e in hex.
+ stackFork = uintptrMask & -1234
+
+ // Force a stack movement. Used for debugging.
+ // 0xfffffeed in hex.
+ stackForceMove = uintptrMask & -275
+
+ // stackPoisonMin is the lowest allowed stack poison value.
+ stackPoisonMin = uintptrMask & -4096
+)
+
+// Global pool of spans that have free stacks.
+// Stacks are assigned an order according to size.
+//
+// order = log_2(size/FixedStack)
+//
+// There is a free list for each order.
+var stackpool [_NumStackOrders]struct {
+ item stackpoolItem
+ _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
+}
+
+//go:notinheap
+type stackpoolItem struct {
+ mu mutex
+ span mSpanList
+}
+
+// Global pool of large stack spans.
+var stackLarge struct {
+ lock mutex
+ free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
+}
+
+func stackinit() {
+ if _StackCacheSize&_PageMask != 0 {
+ throw("cache size must be a multiple of page size")
+ }
+ for i := range stackpool {
+ stackpool[i].item.span.init()
+ lockInit(&stackpool[i].item.mu, lockRankStackpool)
+ }
+ for i := range stackLarge.free {
+ stackLarge.free[i].init()
+ lockInit(&stackLarge.lock, lockRankStackLarge)
+ }
+}
+
+// stacklog2 returns ⌊log_2(n)⌋.
+func stacklog2(n uintptr) int {
+ log2 := 0
+ for n > 1 {
+ n >>= 1
+ log2++
+ }
+ return log2
+}
+
+// Allocates a stack from the free pool. Must be called with
+// stackpool[order].item.mu held.
+func stackpoolalloc(order uint8) gclinkptr {
+ list := &stackpool[order].item.span
+ s := list.first
+ lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
+ if s == nil {
+ // no free stacks. Allocate another span worth.
+ s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
+ if s == nil {
+ throw("out of memory")
+ }
+ if s.allocCount != 0 {
+ throw("bad allocCount")
+ }
+ if s.manualFreeList.ptr() != nil {
+ throw("bad manualFreeList")
+ }
+ osStackAlloc(s)
+ s.elemsize = _FixedStack << order
+ for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
+ x := gclinkptr(s.base() + i)
+ x.ptr().next = s.manualFreeList
+ s.manualFreeList = x
+ }
+ list.insert(s)
+ }
+ x := s.manualFreeList
+ if x.ptr() == nil {
+ throw("span has no free stacks")
+ }
+ s.manualFreeList = x.ptr().next
+ s.allocCount++
+ if s.manualFreeList.ptr() == nil {
+ // all stacks in s are allocated.
+ list.remove(s)
+ }
+ return x
+}
+
+// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
+func stackpoolfree(x gclinkptr, order uint8) {
+ s := spanOfUnchecked(uintptr(x))
+ if s.state.get() != mSpanManual {
+ throw("freeing stack not in a stack span")
+ }
+ if s.manualFreeList.ptr() == nil {
+ // s will now have a free stack
+ stackpool[order].item.span.insert(s)
+ }
+ x.ptr().next = s.manualFreeList
+ s.manualFreeList = x
+ s.allocCount--
+ if gcphase == _GCoff && s.allocCount == 0 {
+ // Span is completely free. Return it to the heap
+ // immediately if we're sweeping.
+ //
+ // If GC is active, we delay the free until the end of
+ // GC to avoid the following type of situation:
+ //
+ // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
+ // 2) The stack that pointer points to is copied
+ // 3) The old stack is freed
+ // 4) The containing span is marked free
+ // 5) GC attempts to mark the SudoG.elem pointer. The
+ // marking fails because the pointer looks like a
+ // pointer into a free span.
+ //
+ // By not freeing, we prevent step #4 until GC is done.
+ stackpool[order].item.span.remove(s)
+ s.manualFreeList = 0
+ osStackFree(s)
+ mheap_.freeManual(s, spanAllocStack)
+ }
+}
+
+// stackcacherefill/stackcacherelease implement a global pool of stack segments.
+// The pool is required to prevent unlimited growth of per-thread caches.
+//
+//go:systemstack
+func stackcacherefill(c *mcache, order uint8) {
+ if stackDebug >= 1 {
+ print("stackcacherefill order=", order, "\n")
+ }
+
+ // Grab some stacks from the global cache.
+ // Grab half of the allowed capacity (to prevent thrashing).
+ var list gclinkptr
+ var size uintptr
+ lock(&stackpool[order].item.mu)
+ for size < _StackCacheSize/2 {
+ x := stackpoolalloc(order)
+ x.ptr().next = list
+ list = x
+ size += _FixedStack << order
+ }
+ unlock(&stackpool[order].item.mu)
+ c.stackcache[order].list = list
+ c.stackcache[order].size = size
+}
+
+//go:systemstack
+func stackcacherelease(c *mcache, order uint8) {
+ if stackDebug >= 1 {
+ print("stackcacherelease order=", order, "\n")
+ }
+ x := c.stackcache[order].list
+ size := c.stackcache[order].size
+ lock(&stackpool[order].item.mu)
+ for size > _StackCacheSize/2 {
+ y := x.ptr().next
+ stackpoolfree(x, order)
+ x = y
+ size -= _FixedStack << order
+ }
+ unlock(&stackpool[order].item.mu)
+ c.stackcache[order].list = x
+ c.stackcache[order].size = size
+}
+
+//go:systemstack
+func stackcache_clear(c *mcache) {
+ if stackDebug >= 1 {
+ print("stackcache clear\n")
+ }
+ for order := uint8(0); order < _NumStackOrders; order++ {
+ lock(&stackpool[order].item.mu)
+ x := c.stackcache[order].list
+ for x.ptr() != nil {
+ y := x.ptr().next
+ stackpoolfree(x, order)
+ x = y
+ }
+ c.stackcache[order].list = 0
+ c.stackcache[order].size = 0
+ unlock(&stackpool[order].item.mu)
+ }
+}
+
+// stackalloc allocates an n byte stack.
+//
+// stackalloc must run on the system stack because it uses per-P
+// resources and must not split the stack.
+//
+//go:systemstack
+func stackalloc(n uint32) stack {
+ // Stackalloc must be called on scheduler stack, so that we
+ // never try to grow the stack during the code that stackalloc runs.
+ // Doing so would cause a deadlock (issue 1547).
+ thisg := getg()
+ if thisg != thisg.m.g0 {
+ throw("stackalloc not on scheduler stack")
+ }
+ if n&(n-1) != 0 {
+ throw("stack size not a power of 2")
+ }
+ if stackDebug >= 1 {
+ print("stackalloc ", n, "\n")
+ }
+
+ if debug.efence != 0 || stackFromSystem != 0 {
+ n = uint32(alignUp(uintptr(n), physPageSize))
+ v := sysAlloc(uintptr(n), &memstats.stacks_sys)
+ if v == nil {
+ throw("out of memory (stackalloc)")
+ }
+ return stack{uintptr(v), uintptr(v) + uintptr(n)}
+ }
+
+ // Small stacks are allocated with a fixed-size free-list allocator.
+ // If we need a stack of a bigger size, we fall back on allocating
+ // a dedicated span.
+ var v unsafe.Pointer
+ if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ order := uint8(0)
+ n2 := n
+ for n2 > _FixedStack {
+ order++
+ n2 >>= 1
+ }
+ var x gclinkptr
+ if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
+ // thisg.m.p == 0 can happen in the guts of exitsyscall
+ // or procresize. Just get a stack from the global pool.
+ // Also don't touch stackcache during gc
+ // as it's flushed concurrently.
+ lock(&stackpool[order].item.mu)
+ x = stackpoolalloc(order)
+ unlock(&stackpool[order].item.mu)
+ } else {
+ c := thisg.m.p.ptr().mcache
+ x = c.stackcache[order].list
+ if x.ptr() == nil {
+ stackcacherefill(c, order)
+ x = c.stackcache[order].list
+ }
+ c.stackcache[order].list = x.ptr().next
+ c.stackcache[order].size -= uintptr(n)
+ }
+ v = unsafe.Pointer(x)
+ } else {
+ var s *mspan
+ npage := uintptr(n) >> _PageShift
+ log2npage := stacklog2(npage)
+
+ // Try to get a stack from the large stack cache.
+ lock(&stackLarge.lock)
+ if !stackLarge.free[log2npage].isEmpty() {
+ s = stackLarge.free[log2npage].first
+ stackLarge.free[log2npage].remove(s)
+ }
+ unlock(&stackLarge.lock)
+
+ lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
+
+ if s == nil {
+ // Allocate a new stack from the heap.
+ s = mheap_.allocManual(npage, spanAllocStack)
+ if s == nil {
+ throw("out of memory")
+ }
+ osStackAlloc(s)
+ s.elemsize = uintptr(n)
+ }
+ v = unsafe.Pointer(s.base())
+ }
+
+ if raceenabled {
+ racemalloc(v, uintptr(n))
+ }
+ if msanenabled {
+ msanmalloc(v, uintptr(n))
+ }
+ if asanenabled {
+ asanunpoison(v, uintptr(n))
+ }
+ if stackDebug >= 1 {
+ print(" allocated ", v, "\n")
+ }
+ return stack{uintptr(v), uintptr(v) + uintptr(n)}
+}
+
+// stackfree frees an n byte stack allocation at stk.
+//
+// stackfree must run on the system stack because it uses per-P
+// resources and must not split the stack.
+//
+//go:systemstack
+func stackfree(stk stack) {
+ gp := getg()
+ v := unsafe.Pointer(stk.lo)
+ n := stk.hi - stk.lo
+ if n&(n-1) != 0 {
+ throw("stack not a power of 2")
+ }
+ if stk.lo+n < stk.hi {
+ throw("bad stack size")
+ }
+ if stackDebug >= 1 {
+ println("stackfree", v, n)
+ memclrNoHeapPointers(v, n) // for testing, clobber stack data
+ }
+ if debug.efence != 0 || stackFromSystem != 0 {
+ if debug.efence != 0 || stackFaultOnFree != 0 {
+ sysFault(v, n)
+ } else {
+ sysFree(v, n, &memstats.stacks_sys)
+ }
+ return
+ }
+ if msanenabled {
+ msanfree(v, n)
+ }
+ if asanenabled {
+ asanpoison(v, n)
+ }
+ if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ order := uint8(0)
+ n2 := n
+ for n2 > _FixedStack {
+ order++
+ n2 >>= 1
+ }
+ x := gclinkptr(v)
+ if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
+ lock(&stackpool[order].item.mu)
+ stackpoolfree(x, order)
+ unlock(&stackpool[order].item.mu)
+ } else {
+ c := gp.m.p.ptr().mcache
+ if c.stackcache[order].size >= _StackCacheSize {
+ stackcacherelease(c, order)
+ }
+ x.ptr().next = c.stackcache[order].list
+ c.stackcache[order].list = x
+ c.stackcache[order].size += n
+ }
+ } else {
+ s := spanOfUnchecked(uintptr(v))
+ if s.state.get() != mSpanManual {
+ println(hex(s.base()), v)
+ throw("bad span state")
+ }
+ if gcphase == _GCoff {
+ // Free the stack immediately if we're
+ // sweeping.
+ osStackFree(s)
+ mheap_.freeManual(s, spanAllocStack)
+ } else {
+ // If the GC is running, we can't return a
+ // stack span to the heap because it could be
+ // reused as a heap span, and this state
+ // change would race with GC. Add it to the
+ // large stack cache instead.
+ log2npage := stacklog2(s.npages)
+ lock(&stackLarge.lock)
+ stackLarge.free[log2npage].insert(s)
+ unlock(&stackLarge.lock)
+ }
+ }
+}
+
+var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
+
+var maxstackceiling = maxstacksize
+
+var ptrnames = []string{
+ 0: "scalar",
+ 1: "ptr",
+}
+
+// Stack frame layout
+//
+// (x86)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | return address |
+// +------------------+
+// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+ <- frame->sp
+//
+// (arm)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | caller's retaddr |
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+
+// | return address |
+// +------------------+ <- frame->sp
+
+type adjustinfo struct {
+ old stack
+ delta uintptr // ptr distance from old to new stack (newbase - oldbase)
+ cache pcvalueCache
+
+ // sghi is the highest sudog.elem on the stack.
+ sghi uintptr
+}
+
+// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
+// If so, it rewrites *vpp to point into the new stack.
+func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
+ pp := (*uintptr)(vpp)
+ p := *pp
+ if stackDebug >= 4 {
+ print(" ", pp, ":", hex(p), "\n")
+ }
+ if adjinfo.old.lo <= p && p < adjinfo.old.hi {
+ *pp = p + adjinfo.delta
+ if stackDebug >= 3 {
+ print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
+ }
+ }
+}
+
+// Information from the compiler about the layout of stack frames.
+// Note: this type must agree with reflect.bitVector.
+type bitvector struct {
+ n int32 // # of bits
+ bytedata *uint8
+}
+
+// ptrbit returns the i'th bit in bv.
+// ptrbit is less efficient than iterating directly over bitvector bits,
+// and should only be used in non-performance-critical code.
+// See adjustpointers for an example of a high-efficiency walk of a bitvector.
+func (bv *bitvector) ptrbit(i uintptr) uint8 {
+ b := *(addb(bv.bytedata, i/8))
+ return (b >> (i % 8)) & 1
+}
+
+// bv describes the memory starting at address scanp.
+// Adjust any pointers contained therein.
+func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
+ minp := adjinfo.old.lo
+ maxp := adjinfo.old.hi
+ delta := adjinfo.delta
+ num := uintptr(bv.n)
+ // If this frame might contain channel receive slots, use CAS
+ // to adjust pointers. If the slot hasn't been received into
+ // yet, it may contain stack pointers and a concurrent send
+ // could race with adjusting those pointers. (The sent value
+ // itself can never contain stack pointers.)
+ useCAS := uintptr(scanp) < adjinfo.sghi
+ for i := uintptr(0); i < num; i += 8 {
+ if stackDebug >= 4 {
+ for j := uintptr(0); j < 8; j++ {
+ print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
+ }
+ }
+ b := *(addb(bv.bytedata, i/8))
+ for b != 0 {
+ j := uintptr(sys.Ctz8(b))
+ b &= b - 1
+ pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
+ retry:
+ p := *pp
+ if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
+ // Looks like a junk value in a pointer slot.
+ // Live analysis wrong?
+ getg().m.traceback = 2
+ print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
+ throw("invalid pointer found on stack")
+ }
+ if minp <= p && p < maxp {
+ if stackDebug >= 3 {
+ print("adjust ptr ", hex(p), " ", funcname(f), "\n")
+ }
+ if useCAS {
+ ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
+ if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
+ goto retry
+ }
+ } else {
+ *pp = p + delta
+ }
+ }
+ }
+ }
+}
+
+// Note: the argument/return area is adjusted by the callee.
+func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
+ adjinfo := (*adjustinfo)(arg)
+ if frame.continpc == 0 {
+ // Frame is dead.
+ return true
+ }
+ f := frame.fn
+ if stackDebug >= 2 {
+ print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
+ }
+ if f.funcID == funcID_systemstack_switch {
+ // A special routine at the bottom of stack of a goroutine that does a systemstack call.
+ // We will allow it to be copied even though we don't
+ // have full GC info for it (because it is written in asm).
+ return true
+ }
+
+ locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
+
+ // Adjust local variables if stack frame has been allocated.
+ if locals.n > 0 {
+ size := uintptr(locals.n) * goarch.PtrSize
+ adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
+ }
+
+ // Adjust saved base pointer if there is one.
+ // TODO what about arm64 frame pointer adjustment?
+ if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
+ if stackDebug >= 3 {
+ print(" saved bp\n")
+ }
+ if debugCheckBP {
+ // Frame pointers should always point to the next higher frame on
+ // the Go stack (or be nil, for the top frame on the stack).
+ bp := *(*uintptr)(unsafe.Pointer(frame.varp))
+ if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
+ println("runtime: found invalid frame pointer")
+ print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
+ throw("bad frame pointer")
+ }
+ }
+ adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
+ }
+
+ // Adjust arguments.
+ if args.n > 0 {
+ if stackDebug >= 3 {
+ print(" args\n")
+ }
+ adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
+ }
+
+ // Adjust pointers in all stack objects (whether they are live or not).
+ // See comments in mgcmark.go:scanframeworker.
+ if frame.varp != 0 {
+ for i := range objs {
+ obj := &objs[i]
+ off := obj.off
+ base := frame.varp // locals base pointer
+ if off >= 0 {
+ base = frame.argp // arguments and return values base pointer
+ }
+ p := base + uintptr(off)
+ if p < frame.sp {
+ // Object hasn't been allocated in the frame yet.
+ // (Happens when the stack bounds check fails and
+ // we call into morestack.)
+ continue
+ }
+ ptrdata := obj.ptrdata()
+ gcdata := obj.gcdata()
+ var s *mspan
+ if obj.useGCProg() {
+ // See comments in mgcmark.go:scanstack
+ s = materializeGCProg(ptrdata, gcdata)
+ gcdata = (*byte)(unsafe.Pointer(s.startAddr))
+ }
+ for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
+ if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
+ adjustpointer(adjinfo, unsafe.Pointer(p+i))
+ }
+ }
+ if s != nil {
+ dematerializeGCProg(s)
+ }
+ }
+ }
+
+ return true
+}
+
+func adjustctxt(gp *g, adjinfo *adjustinfo) {
+ adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
+ if !framepointer_enabled {
+ return
+ }
+ if debugCheckBP {
+ bp := gp.sched.bp
+ if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
+ println("runtime: found invalid top frame pointer")
+ print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
+ throw("bad top frame pointer")
+ }
+ }
+ adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
+}
+
+func adjustdefers(gp *g, adjinfo *adjustinfo) {
+ // Adjust pointers in the Defer structs.
+ // We need to do this first because we need to adjust the
+ // defer.link fields so we always work on the new stack.
+ adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
+ for d := gp._defer; d != nil; d = d.link {
+ adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
+ adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.link))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
+ }
+}
+
+func adjustpanics(gp *g, adjinfo *adjustinfo) {
+ // Panics are on stack and already adjusted.
+ // Update pointer to head of list in G.
+ adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
+}
+
+func adjustsudogs(gp *g, adjinfo *adjustinfo) {
+ // the data elements pointed to by a SudoG structure
+ // might be in the stack.
+ for s := gp.waiting; s != nil; s = s.waitlink {
+ adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
+ }
+}
+
+func fillstack(stk stack, b byte) {
+ for p := stk.lo; p < stk.hi; p++ {
+ *(*byte)(unsafe.Pointer(p)) = b
+ }
+}
+
+func findsghi(gp *g, stk stack) uintptr {
+ var sghi uintptr
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
+ if stk.lo <= p && p < stk.hi && p > sghi {
+ sghi = p
+ }
+ }
+ return sghi
+}
+
+// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
+// stack they refer to while synchronizing with concurrent channel
+// operations. It returns the number of bytes of stack copied.
+func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
+ if gp.waiting == nil {
+ return 0
+ }
+
+ // Lock channels to prevent concurrent send/receive.
+ var lastc *hchan
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if sg.c != lastc {
+ // There is a ranking cycle here between gscan bit and
+ // hchan locks. Normally, we only allow acquiring hchan
+ // locks and then getting a gscan bit. In this case, we
+ // already have the gscan bit. We allow acquiring hchan
+ // locks here as a special case, since a deadlock can't
+ // happen because the G involved must already be
+ // suspended. So, we get a special hchan lock rank here
+ // that is lower than gscan, but doesn't allow acquiring
+ // any other locks other than hchan.
+ lockWithRank(&sg.c.lock, lockRankHchanLeaf)
+ }
+ lastc = sg.c
+ }
+
+ // Adjust sudogs.
+ adjustsudogs(gp, adjinfo)
+
+ // Copy the part of the stack the sudogs point in to
+ // while holding the lock to prevent races on
+ // send/receive slots.
+ var sgsize uintptr
+ if adjinfo.sghi != 0 {
+ oldBot := adjinfo.old.hi - used
+ newBot := oldBot + adjinfo.delta
+ sgsize = adjinfo.sghi - oldBot
+ memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
+ }
+
+ // Unlock channels.
+ lastc = nil
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if sg.c != lastc {
+ unlock(&sg.c.lock)
+ }
+ lastc = sg.c
+ }
+
+ return sgsize
+}
+
+// Copies gp's stack to a new stack of a different size.
+// Caller must have changed gp status to Gcopystack.
+func copystack(gp *g, newsize uintptr) {
+ if gp.syscallsp != 0 {
+ throw("stack growth not allowed in system call")
+ }
+ old := gp.stack
+ if old.lo == 0 {
+ throw("nil stackbase")
+ }
+ used := old.hi - gp.sched.sp
+ // Add just the difference to gcController.addScannableStack.
+ // g0 stacks never move, so this will never account for them.
+ // It's also fine if we have no P, addScannableStack can deal with
+ // that case.
+ gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
+
+ // allocate new stack
+ new := stackalloc(uint32(newsize))
+ if stackPoisonCopy != 0 {
+ fillstack(new, 0xfd)
+ }
+ if stackDebug >= 1 {
+ print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
+ }
+
+ // Compute adjustment.
+ var adjinfo adjustinfo
+ adjinfo.old = old
+ adjinfo.delta = new.hi - old.hi
+
+ // Adjust sudogs, synchronizing with channel ops if necessary.
+ ncopy := used
+ if !gp.activeStackChans {
+ if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
+ // It's not safe for someone to shrink this stack while we're actively
+ // parking on a channel, but it is safe to grow since we do that
+ // ourselves and explicitly don't want to synchronize with channels
+ // since we could self-deadlock.
+ throw("racy sudog adjustment due to parking on channel")
+ }
+ adjustsudogs(gp, &adjinfo)
+ } else {
+ // sudogs may be pointing in to the stack and gp has
+ // released channel locks, so other goroutines could
+ // be writing to gp's stack. Find the highest such
+ // pointer so we can handle everything there and below
+ // carefully. (This shouldn't be far from the bottom
+ // of the stack, so there's little cost in handling
+ // everything below it carefully.)
+ adjinfo.sghi = findsghi(gp, old)
+
+ // Synchronize with channel ops and copy the part of
+ // the stack they may interact with.
+ ncopy -= syncadjustsudogs(gp, used, &adjinfo)
+ }
+
+ // Copy the stack (or the rest of it) to the new location
+ memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
+
+ // Adjust remaining structures that have pointers into stacks.
+ // We have to do most of these before we traceback the new
+ // stack because gentraceback uses them.
+ adjustctxt(gp, &adjinfo)
+ adjustdefers(gp, &adjinfo)
+ adjustpanics(gp, &adjinfo)
+ if adjinfo.sghi != 0 {
+ adjinfo.sghi += adjinfo.delta
+ }
+
+ // Swap out old stack for new one
+ gp.stack = new
+ gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
+ gp.sched.sp = new.hi - used
+ gp.stktopsp += adjinfo.delta
+
+ // Adjust pointers in the new stack.
+ gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
+
+ // free old stack
+ if stackPoisonCopy != 0 {
+ fillstack(old, 0xfc)
+ }
+ stackfree(old)
+}
+
+// round x up to a power of 2.
+func round2(x int32) int32 {
+ s := uint(0)
+ for 1<<s < x {
+ s++
+ }
+ return 1 << s
+}
+
+// Called from runtime·morestack when more stack is needed.
+// Allocate larger stack and relocate to new stack.
+// Stack growth is multiplicative, for constant amortized cost.
+//
+// g->atomicstatus will be Grunning or Gscanrunning upon entry.
+// If the scheduler is trying to stop this g, then it will set preemptStop.
+//
+// This must be nowritebarrierrec because it can be called as part of
+// stack growth from other nowritebarrierrec functions, but the
+// compiler doesn't check this.
+//
+//go:nowritebarrierrec
+func newstack() {
+ thisg := getg()
+ // TODO: double check all gp. shouldn't be getg().
+ if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
+ throw("stack growth after fork")
+ }
+ if thisg.m.morebuf.g.ptr() != thisg.m.curg {
+ print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
+ morebuf := thisg.m.morebuf
+ traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
+ throw("runtime: wrong goroutine in newstack")
+ }
+
+ gp := thisg.m.curg
+
+ if thisg.m.curg.throwsplit {
+ // Update syscallsp, syscallpc in case traceback uses them.
+ morebuf := thisg.m.morebuf
+ gp.syscallsp = morebuf.sp
+ gp.syscallpc = morebuf.pc
+ pcname, pcoff := "(unknown)", uintptr(0)
+ f := findfunc(gp.sched.pc)
+ if f.valid() {
+ pcname = funcname(f)
+ pcoff = gp.sched.pc - f.entry()
+ }
+ print("runtime: newstack at ", pcname, "+", hex(pcoff),
+ " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
+ "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
+ "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
+
+ thisg.m.traceback = 2 // Include runtime frames
+ traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
+ throw("runtime: stack split at bad time")
+ }
+
+ morebuf := thisg.m.morebuf
+ thisg.m.morebuf.pc = 0
+ thisg.m.morebuf.lr = 0
+ thisg.m.morebuf.sp = 0
+ thisg.m.morebuf.g = 0
+
+ // NOTE: stackguard0 may change underfoot, if another thread
+ // is about to try to preempt gp. Read it just once and use that same
+ // value now and below.
+ stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
+
+ // Be conservative about where we preempt.
+ // We are interested in preempting user Go code, not runtime code.
+ // If we're holding locks, mallocing, or preemption is disabled, don't
+ // preempt.
+ // This check is very early in newstack so that even the status change
+ // from Grunning to Gwaiting and back doesn't happen in this case.
+ // That status change by itself can be viewed as a small preemption,
+ // because the GC might change Gwaiting to Gscanwaiting, and then
+ // this goroutine has to wait for the GC to finish before continuing.
+ // If the GC is in some way dependent on this goroutine (for example,
+ // it needs a lock held by the goroutine), that small preemption turns
+ // into a real deadlock.
+ preempt := stackguard0 == stackPreempt
+ if preempt {
+ if !canPreemptM(thisg.m) {
+ // Let the goroutine keep running for now.
+ // gp->preempt is set, so it will be preempted next time.
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ gogo(&gp.sched) // never return
+ }
+ }
+
+ if gp.stack.lo == 0 {
+ throw("missing stack in newstack")
+ }
+ sp := gp.sched.sp
+ if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
+ // The call to morestack cost a word.
+ sp -= goarch.PtrSize
+ }
+ if stackDebug >= 1 || sp < gp.stack.lo {
+ print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
+ "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
+ "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
+ }
+ if sp < gp.stack.lo {
+ print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
+ print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
+ throw("runtime: split stack overflow")
+ }
+
+ if preempt {
+ if gp == thisg.m.g0 {
+ throw("runtime: preempt g0")
+ }
+ if thisg.m.p == 0 && thisg.m.locks == 0 {
+ throw("runtime: g is running but p is not")
+ }
+
+ if gp.preemptShrink {
+ // We're at a synchronous safe point now, so
+ // do the pending stack shrink.
+ gp.preemptShrink = false
+ shrinkstack(gp)
+ }
+
+ if gp.preemptStop {
+ preemptPark(gp) // never returns
+ }
+
+ // Act like goroutine called runtime.Gosched.
+ gopreempt_m(gp) // never return
+ }
+
+ // Allocate a bigger segment and move the stack.
+ oldsize := gp.stack.hi - gp.stack.lo
+ newsize := oldsize * 2
+
+ // Make sure we grow at least as much as needed to fit the new frame.
+ // (This is just an optimization - the caller of morestack will
+ // recheck the bounds on return.)
+ if f := findfunc(gp.sched.pc); f.valid() {
+ max := uintptr(funcMaxSPDelta(f))
+ needed := max + _StackGuard
+ used := gp.stack.hi - gp.sched.sp
+ for newsize-used < needed {
+ newsize *= 2
+ }
+ }
+
+ if stackguard0 == stackForceMove {
+ // Forced stack movement used for debugging.
+ // Don't double the stack (or we may quickly run out
+ // if this is done repeatedly).
+ newsize = oldsize
+ }
+
+ if newsize > maxstacksize || newsize > maxstackceiling {
+ if maxstacksize < maxstackceiling {
+ print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
+ } else {
+ print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
+ }
+ print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
+ throw("stack overflow")
+ }
+
+ // The goroutine must be executing in order to call newstack,
+ // so it must be Grunning (or Gscanrunning).
+ casgstatus(gp, _Grunning, _Gcopystack)
+
+ // The concurrent GC will not scan the stack while we are doing the copy since
+ // the gp is in a Gcopystack status.
+ copystack(gp, newsize)
+ if stackDebug >= 1 {
+ print("stack grow done\n")
+ }
+ casgstatus(gp, _Gcopystack, _Grunning)
+ gogo(&gp.sched)
+}
+
+//go:nosplit
+func nilfunc() {
+ *(*uint8)(nil) = 0
+}
+
+// adjust Gobuf as if it executed a call to fn
+// and then stopped before the first instruction in fn.
+func gostartcallfn(gobuf *gobuf, fv *funcval) {
+ var fn unsafe.Pointer
+ if fv != nil {
+ fn = unsafe.Pointer(fv.fn)
+ } else {
+ fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
+ }
+ gostartcall(gobuf, fn, unsafe.Pointer(fv))
+}
+
+// isShrinkStackSafe returns whether it's safe to attempt to shrink
+// gp's stack. Shrinking the stack is only safe when we have precise
+// pointer maps for all frames on the stack.
+func isShrinkStackSafe(gp *g) bool {
+ // We can't copy the stack if we're in a syscall.
+ // The syscall might have pointers into the stack and
+ // often we don't have precise pointer maps for the innermost
+ // frames.
+ //
+ // We also can't copy the stack if we're at an asynchronous
+ // safe-point because we don't have precise pointer maps for
+ // all frames.
+ //
+ // We also can't *shrink* the stack in the window between the
+ // goroutine calling gopark to park on a channel and
+ // gp.activeStackChans being set.
+ return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
+}
+
+// Maybe shrink the stack being used by gp.
+//
+// gp must be stopped and we must own its stack. It may be in
+// _Grunning, but only if this is our own user G.
+func shrinkstack(gp *g) {
+ if gp.stack.lo == 0 {
+ throw("missing stack in shrinkstack")
+ }
+ if s := readgstatus(gp); s&_Gscan == 0 {
+ // We don't own the stack via _Gscan. We could still
+ // own it if this is our own user G and we're on the
+ // system stack.
+ if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
+ // We don't own the stack.
+ throw("bad status in shrinkstack")
+ }
+ }
+ if !isShrinkStackSafe(gp) {
+ throw("shrinkstack at bad time")
+ }
+ // Check for self-shrinks while in a libcall. These may have
+ // pointers into the stack disguised as uintptrs, but these
+ // code paths should all be nosplit.
+ if gp == getg().m.curg && gp.m.libcallsp != 0 {
+ throw("shrinking stack in libcall")
+ }
+
+ if debug.gcshrinkstackoff > 0 {
+ return
+ }
+ f := findfunc(gp.startpc)
+ if f.valid() && f.funcID == funcID_gcBgMarkWorker {
+ // We're not allowed to shrink the gcBgMarkWorker
+ // stack (see gcBgMarkWorker for explanation).
+ return
+ }
+
+ oldsize := gp.stack.hi - gp.stack.lo
+ newsize := oldsize / 2
+ // Don't shrink the allocation below the minimum-sized stack
+ // allocation.
+ if newsize < _FixedStack {
+ return
+ }
+ // Compute how much of the stack is currently in use and only
+ // shrink the stack if gp is using less than a quarter of its
+ // current stack. The currently used stack includes everything
+ // down to the SP plus the stack guard space that ensures
+ // there's room for nosplit functions.
+ avail := gp.stack.hi - gp.stack.lo
+ if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
+ return
+ }
+
+ if stackDebug > 0 {
+ print("shrinking stack ", oldsize, "->", newsize, "\n")
+ }
+
+ copystack(gp, newsize)
+}
+
+// freeStackSpans frees unused stack spans at the end of GC.
+func freeStackSpans() {
+ // Scan stack pools for empty stack spans.
+ for order := range stackpool {
+ lock(&stackpool[order].item.mu)
+ list := &stackpool[order].item.span
+ for s := list.first; s != nil; {
+ next := s.next
+ if s.allocCount == 0 {
+ list.remove(s)
+ s.manualFreeList = 0
+ osStackFree(s)
+ mheap_.freeManual(s, spanAllocStack)
+ }
+ s = next
+ }
+ unlock(&stackpool[order].item.mu)
+ }
+
+ // Free large stack spans.
+ lock(&stackLarge.lock)
+ for i := range stackLarge.free {
+ for s := stackLarge.free[i].first; s != nil; {
+ next := s.next
+ stackLarge.free[i].remove(s)
+ osStackFree(s)
+ mheap_.freeManual(s, spanAllocStack)
+ s = next
+ }
+ }
+ unlock(&stackLarge.lock)
+}
+
+// getStackMap returns the locals and arguments live pointer maps, and
+// stack object list for frame.
+func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
+ targetpc := frame.continpc
+ if targetpc == 0 {
+ // Frame is dead. Return empty bitvectors.
+ return
+ }
+
+ f := frame.fn
+ pcdata := int32(-1)
+ if targetpc != f.entry() {
+ // Back up to the CALL. If we're at the function entry
+ // point, we want to use the entry map (-1), even if
+ // the first instruction of the function changes the
+ // stack map.
+ targetpc--
+ pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
+ }
+ if pcdata == -1 {
+ // We do not have a valid pcdata value but there might be a
+ // stackmap for this function. It is likely that we are looking
+ // at the function prologue, assume so and hope for the best.
+ pcdata = 0
+ }
+
+ // Local variables.
+ size := frame.varp - frame.sp
+ var minsize uintptr
+ switch goarch.ArchFamily {
+ case goarch.ARM64:
+ minsize = sys.StackAlign
+ default:
+ minsize = sys.MinFrameSize
+ }
+ if size > minsize {
+ stackid := pcdata
+ stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+ if stkmap == nil || stkmap.n <= 0 {
+ print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
+ throw("missing stackmap")
+ }
+ // If nbit == 0, there's no work to do.
+ if stkmap.nbit > 0 {
+ if stackid < 0 || stackid >= stkmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
+ throw("bad symbol table")
+ }
+ locals = stackmapdata(stkmap, stackid)
+ if stackDebug >= 3 && debug {
+ print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
+ }
+ } else if stackDebug >= 3 && debug {
+ print(" no locals to adjust\n")
+ }
+ }
+
+ // Arguments.
+ if frame.arglen > 0 {
+ if frame.argmap != nil {
+ // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
+ // In this case, arglen specifies how much of the args section is actually live.
+ // (It could be either all the args + results, or just the args.)
+ args = *frame.argmap
+ n := int32(frame.arglen / goarch.PtrSize)
+ if n < args.n {
+ args.n = n // Don't use more of the arguments than arglen.
+ }
+ } else {
+ stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
+ if stackmap == nil || stackmap.n <= 0 {
+ print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
+ throw("missing stackmap")
+ }
+ if pcdata < 0 || pcdata >= stackmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
+ throw("bad symbol table")
+ }
+ if stackmap.nbit > 0 {
+ args = stackmapdata(stackmap, pcdata)
+ }
+ }
+ }
+
+ // stack objects.
+ if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") &&
+ unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
+ // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
+ // We don't actually use argmap in this case, but we need to fake the stack object
+ // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
+ // This offset matches the assembly code on amd64 and arm64.
+ objs = methodValueCallFrameObjs[:]
+ } else {
+ p := funcdata(f, _FUNCDATA_StackObjects)
+ if p != nil {
+ n := *(*uintptr)(p)
+ p = add(p, goarch.PtrSize)
+ r0 := (*stackObjectRecord)(noescape(p))
+ objs = unsafe.Slice(r0, int(n))
+ // Note: the noescape above is needed to keep
+ // getStackMap from "leaking param content:
+ // frame". That leak propagates up to getgcmask, then
+ // GCMask, then verifyGCInfo, which converts the stack
+ // gcinfo tests into heap gcinfo tests :(
+ }
+ }
+
+ return
+}
+
+var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjectinit
+
+func stkobjinit() {
+ var abiRegArgsEface any = abi.RegArgs{}
+ abiRegArgsType := efaceOf(&abiRegArgsEface)._type
+ if abiRegArgsType.kind&kindGCProg != 0 {
+ throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
+ }
+ // Set methodValueCallFrameObjs[0].gcdataoff so that
+ // stackObjectRecord.gcdata() will work correctly with it.
+ ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
+ var mod *moduledata
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if datap.gofunc <= ptr && ptr < datap.end {
+ mod = datap
+ break
+ }
+ }
+ if mod == nil {
+ throw("methodValueCallFrameObjs is not in a module")
+ }
+ methodValueCallFrameObjs[0] = stackObjectRecord{
+ off: -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local.
+ size: int32(abiRegArgsType.size),
+ _ptrdata: int32(abiRegArgsType.ptrdata),
+ gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata),
+ }
+}
+
+// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
+// This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
+type stackObjectRecord struct {
+ // offset in frame
+ // if negative, offset from varp
+ // if non-negative, offset from argp
+ off int32
+ size int32
+ _ptrdata int32 // ptrdata, or -ptrdata is GC prog is used
+ gcdataoff uint32 // offset to gcdata from moduledata.rodata
+}
+
+func (r *stackObjectRecord) useGCProg() bool {
+ return r._ptrdata < 0
+}
+
+func (r *stackObjectRecord) ptrdata() uintptr {
+ x := r._ptrdata
+ if x < 0 {
+ return uintptr(-x)
+ }
+ return uintptr(x)
+}
+
+// gcdata returns pointer map or GC prog of the type.
+func (r *stackObjectRecord) gcdata() *byte {
+ ptr := uintptr(unsafe.Pointer(r))
+ var mod *moduledata
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if datap.gofunc <= ptr && ptr < datap.end {
+ mod = datap
+ break
+ }
+ }
+ // If you get a panic here due to a nil mod,
+ // you may have made a copy of a stackObjectRecord.
+ // You must use the original pointer.
+ res := mod.rodata + uintptr(r.gcdataoff)
+ return (*byte)(unsafe.Pointer(res))
+}
+
+// This is exported as ABI0 via linkname so obj can call it.
+//
+//go:nosplit
+//go:linkname morestackc
+func morestackc() {
+ throw("attempt to execute system stack code on user stack")
+}
+
+// startingStackSize is the amount of stack that new goroutines start with.
+// It is a power of 2, and between _FixedStack and maxstacksize, inclusive.
+// startingStackSize is updated every GC by tracking the average size of
+// stacks scanned during the GC.
+var startingStackSize uint32 = _FixedStack
+
+func gcComputeStartingStackSize() {
+ if debug.adaptivestackstart == 0 {
+ return
+ }
+ // For details, see the design doc at
+ // https://docs.google.com/document/d/1YDlGIdVTPnmUiTAavlZxBI1d9pwGQgZT7IKFKlIXohQ/edit?usp=sharing
+ // The basic algorithm is to track the average size of stacks
+ // and start goroutines with stack equal to that average size.
+ // Starting at the average size uses at most 2x the space that
+ // an ideal algorithm would have used.
+ // This is just a heuristic to avoid excessive stack growth work
+ // early in a goroutine's lifetime. See issue 18138. Stacks that
+ // are allocated too small can still grow, and stacks allocated
+ // too large can still shrink.
+ var scannedStackSize uint64
+ var scannedStacks uint64
+ for _, p := range allp {
+ scannedStackSize += p.scannedStackSize
+ scannedStacks += p.scannedStacks
+ // Reset for next time
+ p.scannedStackSize = 0
+ p.scannedStacks = 0
+ }
+ if scannedStacks == 0 {
+ startingStackSize = _FixedStack
+ return
+ }
+ avg := scannedStackSize/scannedStacks + _StackGuard
+ // Note: we add _StackGuard to ensure that a goroutine that
+ // uses the average space will not trigger a growth.
+ if avg > uint64(maxstacksize) {
+ avg = uint64(maxstacksize)
+ }
+ if avg < _FixedStack {
+ avg = _FixedStack
+ }
+ // Note: maxstacksize fits in 30 bits, so avg also does.
+ startingStackSize = uint32(round2(int32(avg)))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/string.go b/contrib/go/_std_1.19/src/runtime/string.go
new file mode 100644
index 0000000000..359a5658c5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/string.go
@@ -0,0 +1,589 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/bytealg"
+ "internal/goarch"
+ "unsafe"
+)
+
+// The constant is known to the compiler.
+// There is no fundamental theory behind this number.
+const tmpStringBufSize = 32
+
+type tmpBuf [tmpStringBufSize]byte
+
+// concatstrings implements a Go string concatenation x+y+z+...
+// The operands are passed in the slice a.
+// If buf != nil, the compiler has determined that the result does not
+// escape the calling function, so the string data can be stored in buf
+// if small enough.
+func concatstrings(buf *tmpBuf, a []string) string {
+ idx := 0
+ l := 0
+ count := 0
+ for i, x := range a {
+ n := len(x)
+ if n == 0 {
+ continue
+ }
+ if l+n < l {
+ throw("string concatenation too long")
+ }
+ l += n
+ count++
+ idx = i
+ }
+ if count == 0 {
+ return ""
+ }
+
+ // If there is just one string and either it is not on the stack
+ // or our result does not escape the calling frame (buf != nil),
+ // then we can return that string directly.
+ if count == 1 && (buf != nil || !stringDataOnStack(a[idx])) {
+ return a[idx]
+ }
+ s, b := rawstringtmp(buf, l)
+ for _, x := range a {
+ copy(b, x)
+ b = b[len(x):]
+ }
+ return s
+}
+
+func concatstring2(buf *tmpBuf, a0, a1 string) string {
+ return concatstrings(buf, []string{a0, a1})
+}
+
+func concatstring3(buf *tmpBuf, a0, a1, a2 string) string {
+ return concatstrings(buf, []string{a0, a1, a2})
+}
+
+func concatstring4(buf *tmpBuf, a0, a1, a2, a3 string) string {
+ return concatstrings(buf, []string{a0, a1, a2, a3})
+}
+
+func concatstring5(buf *tmpBuf, a0, a1, a2, a3, a4 string) string {
+ return concatstrings(buf, []string{a0, a1, a2, a3, a4})
+}
+
+// slicebytetostring converts a byte slice to a string.
+// It is inserted by the compiler into generated code.
+// ptr is a pointer to the first element of the slice;
+// n is the length of the slice.
+// Buf is a fixed-size buffer for the result,
+// it is not nil if the result does not escape.
+func slicebytetostring(buf *tmpBuf, ptr *byte, n int) (str string) {
+ if n == 0 {
+ // Turns out to be a relatively common case.
+ // Consider that you want to parse out data between parens in "foo()bar",
+ // you find the indices and convert the subslice to string.
+ return ""
+ }
+ if raceenabled {
+ racereadrangepc(unsafe.Pointer(ptr),
+ uintptr(n),
+ getcallerpc(),
+ abi.FuncPCABIInternal(slicebytetostring))
+ }
+ if msanenabled {
+ msanread(unsafe.Pointer(ptr), uintptr(n))
+ }
+ if asanenabled {
+ asanread(unsafe.Pointer(ptr), uintptr(n))
+ }
+ if n == 1 {
+ p := unsafe.Pointer(&staticuint64s[*ptr])
+ if goarch.BigEndian {
+ p = add(p, 7)
+ }
+ stringStructOf(&str).str = p
+ stringStructOf(&str).len = 1
+ return
+ }
+
+ var p unsafe.Pointer
+ if buf != nil && n <= len(buf) {
+ p = unsafe.Pointer(buf)
+ } else {
+ p = mallocgc(uintptr(n), nil, false)
+ }
+ stringStructOf(&str).str = p
+ stringStructOf(&str).len = n
+ memmove(p, unsafe.Pointer(ptr), uintptr(n))
+ return
+}
+
+// stringDataOnStack reports whether the string's data is
+// stored on the current goroutine's stack.
+func stringDataOnStack(s string) bool {
+ ptr := uintptr(stringStructOf(&s).str)
+ stk := getg().stack
+ return stk.lo <= ptr && ptr < stk.hi
+}
+
+func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
+ if buf != nil && l <= len(buf) {
+ b = buf[:l]
+ s = slicebytetostringtmp(&b[0], len(b))
+ } else {
+ s, b = rawstring(l)
+ }
+ return
+}
+
+// slicebytetostringtmp returns a "string" referring to the actual []byte bytes.
+//
+// Callers need to ensure that the returned string will not be used after
+// the calling goroutine modifies the original slice or synchronizes with
+// another goroutine.
+//
+// The function is only called when instrumenting
+// and otherwise intrinsified by the compiler.
+//
+// Some internal compiler optimizations use this function.
+// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)]
+// where k is []byte, T1 to Tn is a nesting of struct and array literals.
+// - Used for "<"+string(b)+">" concatenation where b is []byte.
+// - Used for string(b)=="foo" comparison where b is []byte.
+func slicebytetostringtmp(ptr *byte, n int) (str string) {
+ if raceenabled && n > 0 {
+ racereadrangepc(unsafe.Pointer(ptr),
+ uintptr(n),
+ getcallerpc(),
+ abi.FuncPCABIInternal(slicebytetostringtmp))
+ }
+ if msanenabled && n > 0 {
+ msanread(unsafe.Pointer(ptr), uintptr(n))
+ }
+ if asanenabled && n > 0 {
+ asanread(unsafe.Pointer(ptr), uintptr(n))
+ }
+ stringStructOf(&str).str = unsafe.Pointer(ptr)
+ stringStructOf(&str).len = n
+ return
+}
+
+func stringtoslicebyte(buf *tmpBuf, s string) []byte {
+ var b []byte
+ if buf != nil && len(s) <= len(buf) {
+ *buf = tmpBuf{}
+ b = buf[:len(s)]
+ } else {
+ b = rawbyteslice(len(s))
+ }
+ copy(b, s)
+ return b
+}
+
+func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune {
+ // two passes.
+ // unlike slicerunetostring, no race because strings are immutable.
+ n := 0
+ for range s {
+ n++
+ }
+
+ var a []rune
+ if buf != nil && n <= len(buf) {
+ *buf = [tmpStringBufSize]rune{}
+ a = buf[:n]
+ } else {
+ a = rawruneslice(n)
+ }
+
+ n = 0
+ for _, r := range s {
+ a[n] = r
+ n++
+ }
+ return a
+}
+
+func slicerunetostring(buf *tmpBuf, a []rune) string {
+ if raceenabled && len(a) > 0 {
+ racereadrangepc(unsafe.Pointer(&a[0]),
+ uintptr(len(a))*unsafe.Sizeof(a[0]),
+ getcallerpc(),
+ abi.FuncPCABIInternal(slicerunetostring))
+ }
+ if msanenabled && len(a) > 0 {
+ msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
+ }
+ if asanenabled && len(a) > 0 {
+ asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
+ }
+ var dum [4]byte
+ size1 := 0
+ for _, r := range a {
+ size1 += encoderune(dum[:], r)
+ }
+ s, b := rawstringtmp(buf, size1+3)
+ size2 := 0
+ for _, r := range a {
+ // check for race
+ if size2 >= size1 {
+ break
+ }
+ size2 += encoderune(b[size2:], r)
+ }
+ return s[:size2]
+}
+
+type stringStruct struct {
+ str unsafe.Pointer
+ len int
+}
+
+// Variant with *byte pointer type for DWARF debugging.
+type stringStructDWARF struct {
+ str *byte
+ len int
+}
+
+func stringStructOf(sp *string) *stringStruct {
+ return (*stringStruct)(unsafe.Pointer(sp))
+}
+
+func intstring(buf *[4]byte, v int64) (s string) {
+ var b []byte
+ if buf != nil {
+ b = buf[:]
+ s = slicebytetostringtmp(&b[0], len(b))
+ } else {
+ s, b = rawstring(4)
+ }
+ if int64(rune(v)) != v {
+ v = runeError
+ }
+ n := encoderune(b, rune(v))
+ return s[:n]
+}
+
+// rawstring allocates storage for a new string. The returned
+// string and byte slice both refer to the same storage.
+// The storage is not zeroed. Callers should use
+// b to set the string contents and then drop b.
+func rawstring(size int) (s string, b []byte) {
+ p := mallocgc(uintptr(size), nil, false)
+
+ stringStructOf(&s).str = p
+ stringStructOf(&s).len = size
+
+ *(*slice)(unsafe.Pointer(&b)) = slice{p, size, size}
+
+ return
+}
+
+// rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
+func rawbyteslice(size int) (b []byte) {
+ cap := roundupsize(uintptr(size))
+ p := mallocgc(cap, nil, false)
+ if cap != uintptr(size) {
+ memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size))
+ }
+
+ *(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(cap)}
+ return
+}
+
+// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
+func rawruneslice(size int) (b []rune) {
+ if uintptr(size) > maxAlloc/4 {
+ throw("out of memory")
+ }
+ mem := roundupsize(uintptr(size) * 4)
+ p := mallocgc(mem, nil, false)
+ if mem != uintptr(size)*4 {
+ memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4)
+ }
+
+ *(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(mem / 4)}
+ return
+}
+
+// used by cmd/cgo
+func gobytes(p *byte, n int) (b []byte) {
+ if n == 0 {
+ return make([]byte, 0)
+ }
+
+ if n < 0 || uintptr(n) > maxAlloc {
+ panic(errorString("gobytes: length out of range"))
+ }
+
+ bp := mallocgc(uintptr(n), nil, false)
+ memmove(bp, unsafe.Pointer(p), uintptr(n))
+
+ *(*slice)(unsafe.Pointer(&b)) = slice{bp, n, n}
+ return
+}
+
+// This is exported via linkname to assembly in syscall (for Plan9).
+//
+//go:linkname gostring
+func gostring(p *byte) string {
+ l := findnull(p)
+ if l == 0 {
+ return ""
+ }
+ s, b := rawstring(l)
+ memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
+ return s
+}
+
+func gostringn(p *byte, l int) string {
+ if l == 0 {
+ return ""
+ }
+ s, b := rawstring(l)
+ memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
+ return s
+}
+
+func hasPrefix(s, prefix string) bool {
+ return len(s) >= len(prefix) && s[:len(prefix)] == prefix
+}
+
+const (
+ maxUint64 = ^uint64(0)
+ maxInt64 = int64(maxUint64 >> 1)
+)
+
+// atoi64 parses an int64 from a string s.
+// The bool result reports whether s is a number
+// representable by a value of type int64.
+func atoi64(s string) (int64, bool) {
+ if s == "" {
+ return 0, false
+ }
+
+ neg := false
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ }
+
+ un := uint64(0)
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c < '0' || c > '9' {
+ return 0, false
+ }
+ if un > maxUint64/10 {
+ // overflow
+ return 0, false
+ }
+ un *= 10
+ un1 := un + uint64(c) - '0'
+ if un1 < un {
+ // overflow
+ return 0, false
+ }
+ un = un1
+ }
+
+ if !neg && un > uint64(maxInt64) {
+ return 0, false
+ }
+ if neg && un > uint64(maxInt64)+1 {
+ return 0, false
+ }
+
+ n := int64(un)
+ if neg {
+ n = -n
+ }
+
+ return n, true
+}
+
+// atoi is like atoi64 but for integers
+// that fit into an int.
+func atoi(s string) (int, bool) {
+ if n, ok := atoi64(s); n == int64(int(n)) {
+ return int(n), ok
+ }
+ return 0, false
+}
+
+// atoi32 is like atoi but for integers
+// that fit into an int32.
+func atoi32(s string) (int32, bool) {
+ if n, ok := atoi64(s); n == int64(int32(n)) {
+ return int32(n), ok
+ }
+ return 0, false
+}
+
+// parseByteCount parses a string that represents a count of bytes.
+//
+// s must match the following regular expression:
+//
+// ^[0-9]+(([KMGT]i)?B)?$
+//
+// In other words, an integer byte count with an optional unit
+// suffix. Acceptable suffixes include one of
+// - KiB, MiB, GiB, TiB which represent binary IEC/ISO 80000 units, or
+// - B, which just represents bytes.
+//
+// Returns an int64 because that's what its callers want and receive,
+// but the result is always non-negative.
+func parseByteCount(s string) (int64, bool) {
+ // The empty string is not valid.
+ if s == "" {
+ return 0, false
+ }
+ // Handle the easy non-suffix case.
+ last := s[len(s)-1]
+ if last >= '0' && last <= '9' {
+ n, ok := atoi64(s)
+ if !ok || n < 0 {
+ return 0, false
+ }
+ return n, ok
+ }
+ // Failing a trailing digit, this must always end in 'B'.
+ // Also at this point there must be at least one digit before
+ // that B.
+ if last != 'B' || len(s) < 2 {
+ return 0, false
+ }
+ // The one before that must always be a digit or 'i'.
+ if c := s[len(s)-2]; c >= '0' && c <= '9' {
+ // Trivial 'B' suffix.
+ n, ok := atoi64(s[:len(s)-1])
+ if !ok || n < 0 {
+ return 0, false
+ }
+ return n, ok
+ } else if c != 'i' {
+ return 0, false
+ }
+ // Finally, we need at least 4 characters now, for the unit
+ // prefix and at least one digit.
+ if len(s) < 4 {
+ return 0, false
+ }
+ power := 0
+ switch s[len(s)-3] {
+ case 'K':
+ power = 1
+ case 'M':
+ power = 2
+ case 'G':
+ power = 3
+ case 'T':
+ power = 4
+ default:
+ // Invalid suffix.
+ return 0, false
+ }
+ m := uint64(1)
+ for i := 0; i < power; i++ {
+ m *= 1024
+ }
+ n, ok := atoi64(s[:len(s)-3])
+ if !ok || n < 0 {
+ return 0, false
+ }
+ un := uint64(n)
+ if un > maxUint64/m {
+ // Overflow.
+ return 0, false
+ }
+ un *= m
+ if un > uint64(maxInt64) {
+ // Overflow.
+ return 0, false
+ }
+ return int64(un), true
+}
+
+//go:nosplit
+func findnull(s *byte) int {
+ if s == nil {
+ return 0
+ }
+
+ // Avoid IndexByteString on Plan 9 because it uses SSE instructions
+ // on x86 machines, and those are classified as floating point instructions,
+ // which are illegal in a note handler.
+ if GOOS == "plan9" {
+ p := (*[maxAlloc/2 - 1]byte)(unsafe.Pointer(s))
+ l := 0
+ for p[l] != 0 {
+ l++
+ }
+ return l
+ }
+
+ // pageSize is the unit we scan at a time looking for NULL.
+ // It must be the minimum page size for any architecture Go
+ // runs on. It's okay (just a minor performance loss) if the
+ // actual system page size is larger than this value.
+ const pageSize = 4096
+
+ offset := 0
+ ptr := unsafe.Pointer(s)
+ // IndexByteString uses wide reads, so we need to be careful
+ // with page boundaries. Call IndexByteString on
+ // [ptr, endOfPage) interval.
+ safeLen := int(pageSize - uintptr(ptr)%pageSize)
+
+ for {
+ t := *(*string)(unsafe.Pointer(&stringStruct{ptr, safeLen}))
+ // Check one page at a time.
+ if i := bytealg.IndexByteString(t, 0); i != -1 {
+ return offset + i
+ }
+ // Move to next page
+ ptr = unsafe.Pointer(uintptr(ptr) + uintptr(safeLen))
+ offset += safeLen
+ safeLen = pageSize
+ }
+}
+
+func findnullw(s *uint16) int {
+ if s == nil {
+ return 0
+ }
+ p := (*[maxAlloc/2/2 - 1]uint16)(unsafe.Pointer(s))
+ l := 0
+ for p[l] != 0 {
+ l++
+ }
+ return l
+}
+
+//go:nosplit
+func gostringnocopy(str *byte) string {
+ ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)}
+ s := *(*string)(unsafe.Pointer(&ss))
+ return s
+}
+
+func gostringw(strw *uint16) string {
+ var buf [8]byte
+ str := (*[maxAlloc/2/2 - 1]uint16)(unsafe.Pointer(strw))
+ n1 := 0
+ for i := 0; str[i] != 0; i++ {
+ n1 += encoderune(buf[:], rune(str[i]))
+ }
+ s, b := rawstring(n1 + 4)
+ n2 := 0
+ for i := 0; str[i] != 0; i++ {
+ // check for race
+ if n2 >= n1 {
+ break
+ }
+ n2 += encoderune(b[n2:], rune(str[i]))
+ }
+ b[n2] = 0 // for luck
+ return s[:n2]
+}
diff --git a/contrib/go/_std_1.19/src/runtime/stubs.go b/contrib/go/_std_1.19/src/runtime/stubs.go
new file mode 100644
index 0000000000..929f8fadca
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/stubs.go
@@ -0,0 +1,480 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ "runtime/internal/math"
+ "unsafe"
+)
+
+// Should be a built-in for unsafe.Pointer?
+//
+//go:nosplit
+func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+// getg returns the pointer to the current g.
+// The compiler rewrites calls to this function into instructions
+// that fetch the g directly (from TLS or from the dedicated register).
+func getg() *g
+
+// mcall switches from the g to the g0 stack and invokes fn(g),
+// where g is the goroutine that made the call.
+// mcall saves g's current PC/SP in g->sched so that it can be restored later.
+// It is up to fn to arrange for that later execution, typically by recording
+// g in a data structure, causing something to call ready(g) later.
+// mcall returns to the original goroutine g later, when g has been rescheduled.
+// fn must not return at all; typically it ends by calling schedule, to let the m
+// run other goroutines.
+//
+// mcall can only be called from g stacks (not g0, not gsignal).
+//
+// This must NOT be go:noescape: if fn is a stack-allocated closure,
+// fn puts g on a run queue, and g executes before fn returns, the
+// closure will be invalidated while it is still executing.
+func mcall(fn func(*g))
+
+// systemstack runs fn on a system stack.
+// If systemstack is called from the per-OS-thread (g0) stack, or
+// if systemstack is called from the signal handling (gsignal) stack,
+// systemstack calls fn directly and returns.
+// Otherwise, systemstack is being called from the limited stack
+// of an ordinary goroutine. In this case, systemstack switches
+// to the per-OS-thread stack, calls fn, and switches back.
+// It is common to use a func literal as the argument, in order
+// to share inputs and outputs with the code around the call
+// to system stack:
+//
+// ... set up y ...
+// systemstack(func() {
+// x = bigcall(y)
+// })
+// ... use x ...
+//
+//go:noescape
+func systemstack(fn func())
+
+var badsystemstackMsg = "fatal: systemstack called from unexpected goroutine"
+
+//go:nosplit
+//go:nowritebarrierrec
+func badsystemstack() {
+ sp := stringStructOf(&badsystemstackMsg)
+ write(2, sp.str, int32(sp.len))
+}
+
+// memclrNoHeapPointers clears n bytes starting at ptr.
+//
+// Usually you should use typedmemclr. memclrNoHeapPointers should be
+// used only when the caller knows that *ptr contains no heap pointers
+// because either:
+//
+// *ptr is initialized memory and its type is pointer-free, or
+//
+// *ptr is uninitialized memory (e.g., memory that's being reused
+// for a new allocation) and hence contains only "junk".
+//
+// memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
+// is a multiple of the pointer size, then any pointer-aligned,
+// pointer-sized portion is cleared atomically. Despite the function
+// name, this is necessary because this function is the underlying
+// implementation of typedmemclr and memclrHasPointers. See the doc of
+// memmove for more details.
+//
+// The (CPU-specific) implementations of this function are in memclr_*.s.
+//
+//go:noescape
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+
+//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
+func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
+ memclrNoHeapPointers(ptr, n)
+}
+
+// memmove copies n bytes from "from" to "to".
+//
+// memmove ensures that any pointer in "from" is written to "to" with
+// an indivisible write, so that racy reads cannot observe a
+// half-written pointer. This is necessary to prevent the garbage
+// collector from observing invalid pointers, and differs from memmove
+// in unmanaged languages. However, memmove is only required to do
+// this if "from" and "to" may contain pointers, which can only be the
+// case if "from", "to", and "n" are all be word-aligned.
+//
+// Implementations are in memmove_*.s.
+//
+//go:noescape
+func memmove(to, from unsafe.Pointer, n uintptr)
+
+// Outside assembly calls memmove. Make sure it has ABI wrappers.
+//
+//go:linkname memmove
+
+//go:linkname reflect_memmove reflect.memmove
+func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
+ memmove(to, from, n)
+}
+
+// exported value for testing
+const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
+
+//go:nosplit
+func fastrand() uint32 {
+ mp := getg().m
+ // Implement wyrand: https://github.com/wangyi-fudan/wyhash
+ // Only the platform that math.Mul64 can be lowered
+ // by the compiler should be in this list.
+ if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
+ goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
+ goarch.IsS390x|goarch.IsRiscv64 == 1 {
+ mp.fastrand += 0xa0761d6478bd642f
+ hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
+ return uint32(hi ^ lo)
+ }
+
+ // Implement xorshift64+: 2 32-bit xorshift sequences added together.
+ // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
+ // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+ // This generator passes the SmallCrush suite, part of TestU01 framework:
+ // http://simul.iro.umontreal.ca/testu01/tu01.html
+ t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand))
+ s1, s0 := t[0], t[1]
+ s1 ^= s1 << 17
+ s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
+ t[0], t[1] = s0, s1
+ return s0 + s1
+}
+
+//go:nosplit
+func fastrandn(n uint32) uint32 {
+ // This is similar to fastrand() % n, but faster.
+ // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+ return uint32(uint64(fastrand()) * uint64(n) >> 32)
+}
+
+func fastrand64() uint64 {
+ mp := getg().m
+ // Implement wyrand: https://github.com/wangyi-fudan/wyhash
+ // Only the platform that math.Mul64 can be lowered
+ // by the compiler should be in this list.
+ if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
+ goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
+ goarch.IsS390x|goarch.IsRiscv64 == 1 {
+ mp.fastrand += 0xa0761d6478bd642f
+ hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
+ return hi ^ lo
+ }
+
+ // Implement xorshift64+: 2 32-bit xorshift sequences added together.
+ // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+ // This generator passes the SmallCrush suite, part of TestU01 framework:
+ // http://simul.iro.umontreal.ca/testu01/tu01.html
+ t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand))
+ s1, s0 := t[0], t[1]
+ s1 ^= s1 << 17
+ s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
+ r := uint64(s0 + s1)
+
+ s0, s1 = s1, s0
+ s1 ^= s1 << 17
+ s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
+ r += uint64(s0+s1) << 32
+
+ t[0], t[1] = s0, s1
+ return r
+}
+
+func fastrandu() uint {
+ if goarch.PtrSize == 4 {
+ return uint(fastrand())
+ }
+ return uint(fastrand64())
+}
+
+//go:linkname sync_fastrandn sync.fastrandn
+func sync_fastrandn(n uint32) uint32 { return fastrandn(n) }
+
+//go:linkname net_fastrandu net.fastrandu
+func net_fastrandu() uint { return fastrandu() }
+
+//go:linkname os_fastrand os.fastrand
+func os_fastrand() uint32 { return fastrand() }
+
+// in internal/bytealg/equal_*.s
+//
+//go:noescape
+func memequal(a, b unsafe.Pointer, size uintptr) bool
+
+// noescape hides a pointer from escape analysis. noescape is
+// the identity function but escape analysis doesn't think the
+// output depends on the input. noescape is inlined and currently
+// compiles down to zero instructions.
+// USE CAREFULLY!
+//
+//go:nosplit
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+ x := uintptr(p)
+ return unsafe.Pointer(x ^ 0)
+}
+
+// Not all cgocallback frames are actually cgocallback,
+// so not all have these arguments. Mark them uintptr so that the GC
+// does not misinterpret memory when the arguments are not present.
+// cgocallback is not called from Go, only from crosscall2.
+// This in turn calls cgocallbackg, which is where we'll find
+// pointer-declared arguments.
+func cgocallback(fn, frame, ctxt uintptr)
+
+func gogo(buf *gobuf)
+
+func asminit()
+func setg(gg *g)
+func breakpoint()
+
+// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
+// frameSize, and regArgs.
+//
+// Arguments passed on the stack and space for return values passed on the stack
+// must be laid out at the space pointed to by stackArgs (with total length
+// stackArgsSize) according to the ABI.
+//
+// stackRetOffset must be some value <= stackArgsSize that indicates the
+// offset within stackArgs where the return value space begins.
+//
+// frameSize is the total size of the argument frame at stackArgs and must
+// therefore be >= stackArgsSize. It must include additional space for spilling
+// register arguments for stack growth and preemption.
+//
+// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
+// since frameSize will be redundant with stackArgsSize.
+//
+// Arguments passed in registers must be laid out in regArgs according to the ABI.
+// regArgs will hold any return values passed in registers after the call.
+//
+// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
+// then copies back stackArgsSize-stackRetOffset bytes back to the return space
+// in stackArgs once fn has completed. It also "unspills" argument registers from
+// regArgs before calling fn, and spills them back into regArgs immediately
+// following the call to fn. If there are results being returned on the stack,
+// the caller should pass the argument frame type as stackArgsType so that
+// reflectcall can execute appropriate write barriers during the copy.
+//
+// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
+// registers on the return path will contain Go pointers. It will then store
+// these pointers in regArgs.Ptrs such that they are visible to the GC.
+//
+// Package reflect passes a frame type. In package runtime, there is only
+// one call that copies results back, in callbackWrap in syscall_windows.go, and it
+// does NOT pass a frame type, meaning there are no write barriers invoked. See that
+// call site for justification.
+//
+// Package reflect accesses this symbol through a linkname.
+//
+// Arguments passed through to reflectcall do not escape. The type is used
+// only in a very limited callee of reflectcall, the stackArgs are copied, and
+// regArgs is only used in the reflectcall frame.
+//
+//go:noescape
+func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+
+func procyield(cycles uint32)
+
+type neverCallThisFunction struct{}
+
+// goexit is the return stub at the top of every goroutine call stack.
+// Each goroutine stack is constructed as if goexit called the
+// goroutine's entry point function, so that when the entry point
+// function returns, it will return to goexit, which will call goexit1
+// to perform the actual exit.
+//
+// This function must never be called directly. Call goexit1 instead.
+// gentraceback assumes that goexit terminates the stack. A direct
+// call on the stack will cause gentraceback to stop walking the stack
+// prematurely and if there is leftover state it may panic.
+func goexit(neverCallThisFunction)
+
+// publicationBarrier performs a store/store barrier (a "publication"
+// or "export" barrier). Some form of synchronization is required
+// between initializing an object and making that object accessible to
+// another processor. Without synchronization, the initialization
+// writes and the "publication" write may be reordered, allowing the
+// other processor to follow the pointer and observe an uninitialized
+// object. In general, higher-level synchronization should be used,
+// such as locking or an atomic pointer write. publicationBarrier is
+// for when those aren't an option, such as in the implementation of
+// the memory manager.
+//
+// There's no corresponding barrier for the read side because the read
+// side naturally has a data dependency order. All architectures that
+// Go supports or seems likely to ever support automatically enforce
+// data dependency ordering.
+func publicationBarrier()
+
+// getcallerpc returns the program counter (PC) of its caller's caller.
+// getcallersp returns the stack pointer (SP) of its caller's caller.
+// The implementation may be a compiler intrinsic; there is not
+// necessarily code implementing this on every platform.
+//
+// For example:
+//
+// func f(arg1, arg2, arg3 int) {
+// pc := getcallerpc()
+// sp := getcallersp()
+// }
+//
+// These two lines find the PC and SP immediately following
+// the call to f (where f will return).
+//
+// The call to getcallerpc and getcallersp must be done in the
+// frame being asked about.
+//
+// The result of getcallersp is correct at the time of the return,
+// but it may be invalidated by any subsequent call to a function
+// that might relocate the stack in order to grow or shrink it.
+// A general rule is that the result of getcallersp should be used
+// immediately and can only be passed to nosplit functions.
+
+//go:noescape
+func getcallerpc() uintptr
+
+//go:noescape
+func getcallersp() uintptr // implemented as an intrinsic on all platforms
+
+// getclosureptr returns the pointer to the current closure.
+// getclosureptr can only be used in an assignment statement
+// at the entry of a function. Moreover, go:nosplit directive
+// must be specified at the declaration of caller function,
+// so that the function prolog does not clobber the closure register.
+// for example:
+//
+// //go:nosplit
+// func f(arg1, arg2, arg3 int) {
+// dx := getclosureptr()
+// }
+//
+// The compiler rewrites calls to this function into instructions that fetch the
+// pointer from a well-known register (DX on x86 architecture, etc.) directly.
+func getclosureptr() uintptr
+
+//go:noescape
+func asmcgocall(fn, arg unsafe.Pointer) int32
+
+func morestack()
+func morestack_noctxt()
+func rt0_go()
+
+// return0 is a stub used to return 0 from deferproc.
+// It is called at the very end of deferproc to signal
+// the calling Go function that it should not jump
+// to deferreturn.
+// in asm_*.s
+func return0()
+
+// in asm_*.s
+// not called directly; definitions here supply type information for traceback.
+// These must have the same signature (arg pointer map) as reflectcall.
+func call16(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call32(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call64(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call128(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call256(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call512(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call1024(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call2048(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call4096(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call8192(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call16384(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call32768(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call65536(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call131072(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call262144(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call524288(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call1048576(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call2097152(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call4194304(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call8388608(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call16777216(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call33554432(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call67108864(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call134217728(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call268435456(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call536870912(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+
+func systemstack_switch()
+
+// alignUp rounds n up to a multiple of a. a must be a power of 2.
+func alignUp(n, a uintptr) uintptr {
+ return (n + a - 1) &^ (a - 1)
+}
+
+// alignDown rounds n down to a multiple of a. a must be a power of 2.
+func alignDown(n, a uintptr) uintptr {
+ return n &^ (a - 1)
+}
+
+// divRoundUp returns ceil(n / a).
+func divRoundUp(n, a uintptr) uintptr {
+ // a is generally a power of two. This will get inlined and
+ // the compiler will optimize the division.
+ return (n + a - 1) / a
+}
+
+// checkASM reports whether assembly runtime checks have passed.
+func checkASM() bool
+
+func memequal_varlen(a, b unsafe.Pointer) bool
+
+// bool2int returns 0 if x is false or 1 if x is true.
+func bool2int(x bool) int {
+ // Avoid branches. In the SSA compiler, this compiles to
+ // exactly what you would want it to.
+ return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
+}
+
+// abort crashes the runtime in situations where even throw might not
+// work. In general it should do something a debugger will recognize
+// (e.g., an INT3 on x86). A crash in abort is recognized by the
+// signal handler, which will attempt to tear down the runtime
+// immediately.
+func abort()
+
+// Called from compiled code; declared for vet; do NOT call from Go.
+func gcWriteBarrier()
+func duffzero()
+func duffcopy()
+
+// Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
+func addmoduledata()
+
+// Injected by the signal handler for panicking signals.
+// Initializes any registers that have fixed meaning at calls but
+// are scratch in bodies and calls sigpanic.
+// On many platforms it just jumps to sigpanic.
+func sigpanic0()
+
+// intArgRegs is used by the various register assignment
+// algorithm implementations in the runtime. These include:.
+// - Finalizers (mfinal.go)
+// - Windows callbacks (syscall_windows.go)
+//
+// Both are stripped-down versions of the algorithm since they
+// only have to deal with a subset of cases (finalizers only
+// take a pointer or interface argument, Go Windows callbacks
+// don't support floating point).
+//
+// It should be modified with care and are generally only
+// modified when testing this package.
+//
+// It should never be set higher than its internal/abi
+// constant counterparts, because the system relies on a
+// structure that is at least large enough to hold the
+// registers the system supports.
+//
+// Protected by finlock.
+var intArgRegs = abi.IntArgRegs
diff --git a/contrib/go/_std_1.19/src/runtime/stubs2.go b/contrib/go/_std_1.19/src/runtime/stubs2.go
new file mode 100644
index 0000000000..94a888dec6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/stubs2.go
@@ -0,0 +1,41 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !aix && !darwin && !js && !openbsd && !plan9 && !solaris && !windows
+
+package runtime
+
+import "unsafe"
+
+// read calls the read system call.
+// It returns a non-negative number of bytes written or a negative errno value.
+func read(fd int32, p unsafe.Pointer, n int32) int32
+
+func closefd(fd int32) int32
+
+func exit(code int32)
+func usleep(usec uint32)
+
+//go:nosplit
+func usleep_no_g(usec uint32) {
+ usleep(usec)
+}
+
+// write calls the write system call.
+// It returns a non-negative number of bytes written or a negative errno value.
+//
+//go:noescape
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32
+
+//go:noescape
+func open(name *byte, mode, perm int32) int32
+
+// return value is only set on linux to be used in osinit()
+func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
+
+// exitThread terminates the current thread, writing *wait = 0 when
+// the stack is safe to reclaim.
+//
+//go:noescape
+func exitThread(wait *uint32)
diff --git a/contrib/go/_std_1.18/src/runtime/stubs3.go b/contrib/go/_std_1.19/src/runtime/stubs3.go
index 891663b110..891663b110 100644
--- a/contrib/go/_std_1.18/src/runtime/stubs3.go
+++ b/contrib/go/_std_1.19/src/runtime/stubs3.go
diff --git a/contrib/go/_std_1.18/src/runtime/stubs_amd64.go b/contrib/go/_std_1.19/src/runtime/stubs_amd64.go
index 687a506cdd..687a506cdd 100644
--- a/contrib/go/_std_1.18/src/runtime/stubs_amd64.go
+++ b/contrib/go/_std_1.19/src/runtime/stubs_amd64.go
diff --git a/contrib/go/_std_1.19/src/runtime/stubs_linux.go b/contrib/go/_std_1.19/src/runtime/stubs_linux.go
new file mode 100644
index 0000000000..2367dc2bd0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/stubs_linux.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+
+package runtime
+
+import "unsafe"
+
+func sbrk0() uintptr
+
+// Called from write_err_android.go only, but defined in sys_linux_*.s;
+// declared here (instead of in write_err_android.go) for go vet on non-android builds.
+// The return value is the raw syscall result, which may encode an error number.
+//
+//go:noescape
+func access(name *byte, mode int32) int32
+func connect(fd int32, addr unsafe.Pointer, len int32) int32
+func socket(domain int32, typ int32, prot int32) int32
diff --git a/contrib/go/_std_1.18/src/runtime/stubs_nonlinux.go b/contrib/go/_std_1.19/src/runtime/stubs_nonlinux.go
index 1a06d7cc1d..1a06d7cc1d 100644
--- a/contrib/go/_std_1.18/src/runtime/stubs_nonlinux.go
+++ b/contrib/go/_std_1.19/src/runtime/stubs_nonlinux.go
diff --git a/contrib/go/_std_1.19/src/runtime/symtab.go b/contrib/go/_std_1.19/src/runtime/symtab.go
new file mode 100644
index 0000000000..ad34b68c7d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/symtab.go
@@ -0,0 +1,1183 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Frames may be used to get function/file/line information for a
+// slice of PC values returned by Callers.
+type Frames struct {
+ // callers is a slice of PCs that have not yet been expanded to frames.
+ callers []uintptr
+
+ // frames is a slice of Frames that have yet to be returned.
+ frames []Frame
+ frameStore [2]Frame
+}
+
+// Frame is the information returned by Frames for each call frame.
+type Frame struct {
+ // PC is the program counter for the location in this frame.
+ // For a frame that calls another frame, this will be the
+ // program counter of a call instruction. Because of inlining,
+ // multiple frames may have the same PC value, but different
+ // symbolic information.
+ PC uintptr
+
+ // Func is the Func value of this call frame. This may be nil
+ // for non-Go code or fully inlined functions.
+ Func *Func
+
+ // Function is the package path-qualified function name of
+ // this call frame. If non-empty, this string uniquely
+ // identifies a single function in the program.
+ // This may be the empty string if not known.
+ // If Func is not nil then Function == Func.Name().
+ Function string
+
+ // File and Line are the file name and line number of the
+ // location in this frame. For non-leaf frames, this will be
+ // the location of a call. These may be the empty string and
+ // zero, respectively, if not known.
+ File string
+ Line int
+
+ // Entry point program counter for the function; may be zero
+ // if not known. If Func is not nil then Entry ==
+ // Func.Entry().
+ Entry uintptr
+
+ // The runtime's internal view of the function. This field
+ // is set (funcInfo.valid() returns true) only for Go functions,
+ // not for C functions.
+ funcInfo funcInfo
+}
+
+// CallersFrames takes a slice of PC values returned by Callers and
+// prepares to return function/file/line information.
+// Do not change the slice until you are done with the Frames.
+func CallersFrames(callers []uintptr) *Frames {
+ f := &Frames{callers: callers}
+ f.frames = f.frameStore[:0]
+ return f
+}
+
+// Next returns a Frame representing the next call frame in the slice
+// of PC values. If it has already returned all call frames, Next
+// returns a zero Frame.
+//
+// The more result indicates whether the next call to Next will return
+// a valid Frame. It does not necessarily indicate whether this call
+// returned one.
+//
+// See the Frames example for idiomatic usage.
+func (ci *Frames) Next() (frame Frame, more bool) {
+ for len(ci.frames) < 2 {
+ // Find the next frame.
+ // We need to look for 2 frames so we know what
+ // to return for the "more" result.
+ if len(ci.callers) == 0 {
+ break
+ }
+ pc := ci.callers[0]
+ ci.callers = ci.callers[1:]
+ funcInfo := findfunc(pc)
+ if !funcInfo.valid() {
+ if cgoSymbolizer != nil {
+ // Pre-expand cgo frames. We could do this
+ // incrementally, too, but there's no way to
+ // avoid allocation in this case anyway.
+ ci.frames = append(ci.frames, expandCgoFrames(pc)...)
+ }
+ continue
+ }
+ f := funcInfo._Func()
+ entry := f.Entry()
+ if pc > entry {
+ // We store the pc of the start of the instruction following
+ // the instruction in question (the call or the inline mark).
+ // This is done for historical reasons, and to make FuncForPC
+ // work correctly for entries in the result of runtime.Callers.
+ pc--
+ }
+ name := funcname(funcInfo)
+ if inldata := funcdata(funcInfo, _FUNCDATA_InlTree); inldata != nil {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ // Non-strict as cgoTraceback may have added bogus PCs
+ // with a valid funcInfo but invalid PCDATA.
+ ix := pcdatavalue1(funcInfo, _PCDATA_InlTreeIndex, pc, nil, false)
+ if ix >= 0 {
+ // Note: entry is not modified. It always refers to a real frame, not an inlined one.
+ f = nil
+ name = funcnameFromNameoff(funcInfo, inltree[ix].func_)
+ // File/line is already correct.
+ // TODO: remove file/line from InlinedCall?
+ }
+ }
+ ci.frames = append(ci.frames, Frame{
+ PC: pc,
+ Func: f,
+ Function: name,
+ Entry: entry,
+ funcInfo: funcInfo,
+ // Note: File,Line set below
+ })
+ }
+
+ // Pop one frame from the frame list. Keep the rest.
+ // Avoid allocation in the common case, which is 1 or 2 frames.
+ switch len(ci.frames) {
+ case 0: // In the rare case when there are no frames at all, we return Frame{}.
+ return
+ case 1:
+ frame = ci.frames[0]
+ ci.frames = ci.frameStore[:0]
+ case 2:
+ frame = ci.frames[0]
+ ci.frameStore[0] = ci.frames[1]
+ ci.frames = ci.frameStore[:1]
+ default:
+ frame = ci.frames[0]
+ ci.frames = ci.frames[1:]
+ }
+ more = len(ci.frames) > 0
+ if frame.funcInfo.valid() {
+ // Compute file/line just before we need to return it,
+ // as it can be expensive. This avoids computing file/line
+ // for the Frame we find but don't return. See issue 32093.
+ file, line := funcline1(frame.funcInfo, frame.PC, false)
+ frame.File, frame.Line = file, int(line)
+ }
+ return
+}
+
+// runtime_expandFinalInlineFrame expands the final pc in stk to include all
+// "callers" if pc is inline.
+//
+//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
+func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
+ if len(stk) == 0 {
+ return stk
+ }
+ pc := stk[len(stk)-1]
+ tracepc := pc - 1
+
+ f := findfunc(tracepc)
+ if !f.valid() {
+ // Not a Go function.
+ return stk
+ }
+
+ inldata := funcdata(f, _FUNCDATA_InlTree)
+ if inldata == nil {
+ // Nothing inline in f.
+ return stk
+ }
+
+ // Treat the previous func as normal. We haven't actually checked, but
+ // since this pc was included in the stack, we know it shouldn't be
+ // elided.
+ lastFuncID := funcID_normal
+
+ // Remove pc from stk; we'll re-add it below.
+ stk = stk[:len(stk)-1]
+
+ // See inline expansion in gentraceback.
+ var cache pcvalueCache
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ for {
+ // Non-strict as cgoTraceback may have added bogus PCs
+ // with a valid funcInfo but invalid PCDATA.
+ ix := pcdatavalue1(f, _PCDATA_InlTreeIndex, tracepc, &cache, false)
+ if ix < 0 {
+ break
+ }
+ if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
+ // ignore wrappers
+ } else {
+ stk = append(stk, pc)
+ }
+ lastFuncID = inltree[ix].funcID
+ // Back up to an instruction in the "caller".
+ tracepc = f.entry() + uintptr(inltree[ix].parentPc)
+ pc = tracepc + 1
+ }
+
+ // N.B. we want to keep the last parentPC which is not inline.
+ stk = append(stk, pc)
+
+ return stk
+}
+
+// expandCgoFrames expands frame information for pc, known to be
+// a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
+// returns nil if pc could not be expanded.
+func expandCgoFrames(pc uintptr) []Frame {
+ arg := cgoSymbolizerArg{pc: pc}
+ callCgoSymbolizer(&arg)
+
+ if arg.file == nil && arg.funcName == nil {
+ // No useful information from symbolizer.
+ return nil
+ }
+
+ var frames []Frame
+ for {
+ frames = append(frames, Frame{
+ PC: pc,
+ Func: nil,
+ Function: gostring(arg.funcName),
+ File: gostring(arg.file),
+ Line: int(arg.lineno),
+ Entry: arg.entry,
+ // funcInfo is zero, which implies !funcInfo.valid().
+ // That ensures that we use the File/Line info given here.
+ })
+ if arg.more == 0 {
+ break
+ }
+ callCgoSymbolizer(&arg)
+ }
+
+ // No more frames for this PC. Tell the symbolizer we are done.
+ // We don't try to maintain a single cgoSymbolizerArg for the
+ // whole use of Frames, because there would be no good way to tell
+ // the symbolizer when we are done.
+ arg.pc = 0
+ callCgoSymbolizer(&arg)
+
+ return frames
+}
+
+// NOTE: Func does not expose the actual unexported fields, because we return *Func
+// values to users, and we want to keep them from being able to overwrite the data
+// with (say) *f = Func{}.
+// All code operating on a *Func must call raw() to get the *_func
+// or funcInfo() to get the funcInfo instead.
+
+// A Func represents a Go function in the running binary.
+type Func struct {
+ opaque struct{} // unexported field to disallow conversions
+}
+
+func (f *Func) raw() *_func {
+ return (*_func)(unsafe.Pointer(f))
+}
+
+func (f *Func) funcInfo() funcInfo {
+ return f.raw().funcInfo()
+}
+
+func (f *_func) funcInfo() funcInfo {
+ // Find the module containing fn. fn is located in the pclntable.
+ // The unsafe.Pointer to uintptr conversions and arithmetic
+ // are safe because we are working with module addresses.
+ ptr := uintptr(unsafe.Pointer(f))
+ var mod *moduledata
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if len(datap.pclntable) == 0 {
+ continue
+ }
+ base := uintptr(unsafe.Pointer(&datap.pclntable[0]))
+ if base <= ptr && ptr < base+uintptr(len(datap.pclntable)) {
+ mod = datap
+ break
+ }
+ }
+ return funcInfo{f, mod}
+}
+
+// PCDATA and FUNCDATA table indexes.
+//
+// See funcdata.h and ../cmd/internal/objabi/funcdata.go.
+const (
+ _PCDATA_UnsafePoint = 0
+ _PCDATA_StackMapIndex = 1
+ _PCDATA_InlTreeIndex = 2
+ _PCDATA_ArgLiveIndex = 3
+
+ _FUNCDATA_ArgsPointerMaps = 0
+ _FUNCDATA_LocalsPointerMaps = 1
+ _FUNCDATA_StackObjects = 2
+ _FUNCDATA_InlTree = 3
+ _FUNCDATA_OpenCodedDeferInfo = 4
+ _FUNCDATA_ArgInfo = 5
+ _FUNCDATA_ArgLiveInfo = 6
+ _FUNCDATA_WrapInfo = 7
+
+ _ArgsSizeUnknown = -0x80000000
+)
+
+const (
+ // PCDATA_UnsafePoint values.
+ _PCDATA_UnsafePointSafe = -1 // Safe for async preemption
+ _PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption
+
+ // _PCDATA_Restart1(2) apply on a sequence of instructions, within
+ // which if an async preemption happens, we should back off the PC
+ // to the start of the sequence when resume.
+ // We need two so we can distinguish the start/end of the sequence
+ // in case that two sequences are next to each other.
+ _PCDATA_Restart1 = -3
+ _PCDATA_Restart2 = -4
+
+ // Like _PCDATA_RestartAtEntry, but back to function entry if async
+ // preempted.
+ _PCDATA_RestartAtEntry = -5
+)
+
+// A FuncID identifies particular functions that need to be treated
+// specially by the runtime.
+// Note that in some situations involving plugins, there may be multiple
+// copies of a particular special runtime function.
+// Note: this list must match the list in cmd/internal/objabi/funcid.go.
+type funcID uint8
+
+const (
+ funcID_normal funcID = iota // not a special function
+ funcID_abort
+ funcID_asmcgocall
+ funcID_asyncPreempt
+ funcID_cgocallback
+ funcID_debugCallV2
+ funcID_gcBgMarkWorker
+ funcID_goexit
+ funcID_gogo
+ funcID_gopanic
+ funcID_handleAsyncEvent
+ funcID_mcall
+ funcID_morestack
+ funcID_mstart
+ funcID_panicwrap
+ funcID_rt0_go
+ funcID_runfinq
+ funcID_runtime_main
+ funcID_sigpanic
+ funcID_systemstack
+ funcID_systemstack_switch
+ funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
+)
+
+// A FuncFlag holds bits about a function.
+// This list must match the list in cmd/internal/objabi/funcid.go.
+type funcFlag uint8
+
+const (
+ // TOPFRAME indicates a function that appears at the top of its stack.
+ // The traceback routine stop at such a function and consider that a
+ // successful, complete traversal of the stack.
+ // Examples of TOPFRAME functions include goexit, which appears
+ // at the top of a user goroutine stack, and mstart, which appears
+ // at the top of a system goroutine stack.
+ funcFlag_TOPFRAME funcFlag = 1 << iota
+
+ // SPWRITE indicates a function that writes an arbitrary value to SP
+ // (any write other than adding or subtracting a constant amount).
+ // The traceback routines cannot encode such changes into the
+ // pcsp tables, so the function traceback cannot safely unwind past
+ // SPWRITE functions. Stopping at an SPWRITE function is considered
+ // to be an incomplete unwinding of the stack. In certain contexts
+ // (in particular garbage collector stack scans) that is a fatal error.
+ funcFlag_SPWRITE
+
+ // ASM indicates that a function was implemented in assembly.
+ funcFlag_ASM
+)
+
+// pcHeader holds data used by the pclntab lookups.
+type pcHeader struct {
+ magic uint32 // 0xFFFFFFF0
+ pad1, pad2 uint8 // 0,0
+ minLC uint8 // min instruction size
+ ptrSize uint8 // size of a ptr in bytes
+ nfunc int // number of functions in the module
+ nfiles uint // number of entries in the file tab
+ textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text
+ funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
+ cuOffset uintptr // offset to the cutab variable from pcHeader
+ filetabOffset uintptr // offset to the filetab variable from pcHeader
+ pctabOffset uintptr // offset to the pctab variable from pcHeader
+ pclnOffset uintptr // offset to the pclntab variable from pcHeader
+}
+
+// moduledata records information about the layout of the executable
+// image. It is written by the linker. Any changes here must be
+// matched changes to the code in cmd/link/internal/ld/symtab.go:symtab.
+// moduledata is stored in statically allocated non-pointer memory;
+// none of the pointers here are visible to the garbage collector.
+type moduledata struct {
+ pcHeader *pcHeader
+ funcnametab []byte
+ cutab []uint32
+ filetab []byte
+ pctab []byte
+ pclntable []byte
+ ftab []functab
+ findfunctab uintptr
+ minpc, maxpc uintptr
+
+ text, etext uintptr
+ noptrdata, enoptrdata uintptr
+ data, edata uintptr
+ bss, ebss uintptr
+ noptrbss, enoptrbss uintptr
+ end, gcdata, gcbss uintptr
+ types, etypes uintptr
+ rodata uintptr
+ gofunc uintptr // go.func.*
+
+ textsectmap []textsect
+ typelinks []int32 // offsets from types
+ itablinks []*itab
+
+ ptab []ptabEntry
+
+ pluginpath string
+ pkghashes []modulehash
+
+ modulename string
+ modulehashes []modulehash
+
+ hasmain uint8 // 1 if module contains the main function, 0 otherwise
+
+ gcdatamask, gcbssmask bitvector
+
+ typemap map[typeOff]*_type // offset to *_rtype in previous module
+
+ bad bool // module failed to load and should be ignored
+
+ next *moduledata
+}
+
+// A modulehash is used to compare the ABI of a new module or a
+// package in a new module with the loaded program.
+//
+// For each shared library a module links against, the linker creates an entry in the
+// moduledata.modulehashes slice containing the name of the module, the abi hash seen
+// at link time and a pointer to the runtime abi hash. These are checked in
+// moduledataverify1 below.
+//
+// For each loaded plugin, the pkghashes slice has a modulehash of the
+// newly loaded package that can be used to check the plugin's version of
+// a package against any previously loaded version of the package.
+// This is done in plugin.lastmoduleinit.
+type modulehash struct {
+ modulename string
+ linktimehash string
+ runtimehash *string
+}
+
+// pinnedTypemaps are the map[typeOff]*_type from the moduledata objects.
+//
+// These typemap objects are allocated at run time on the heap, but the
+// only direct reference to them is in the moduledata, created by the
+// linker and marked SNOPTRDATA so it is ignored by the GC.
+//
+// To make sure the map isn't collected, we keep a second reference here.
+var pinnedTypemaps []map[typeOff]*_type
+
+var firstmoduledata moduledata // linker symbol
+var lastmoduledatap *moduledata // linker symbol
+var modulesSlice *[]*moduledata // see activeModules
+
+// activeModules returns a slice of active modules.
+//
+// A module is active once its gcdatamask and gcbssmask have been
+// assembled and it is usable by the GC.
+//
+// This is nosplit/nowritebarrier because it is called by the
+// cgo pointer checking code.
+//
+//go:nosplit
+//go:nowritebarrier
+func activeModules() []*moduledata {
+ p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
+ if p == nil {
+ return nil
+ }
+ return *p
+}
+
+// modulesinit creates the active modules slice out of all loaded modules.
+//
+// When a module is first loaded by the dynamic linker, an .init_array
+// function (written by cmd/link) is invoked to call addmoduledata,
+// appending to the module to the linked list that starts with
+// firstmoduledata.
+//
+// There are two times this can happen in the lifecycle of a Go
+// program. First, if compiled with -linkshared, a number of modules
+// built with -buildmode=shared can be loaded at program initialization.
+// Second, a Go program can load a module while running that was built
+// with -buildmode=plugin.
+//
+// After loading, this function is called which initializes the
+// moduledata so it is usable by the GC and creates a new activeModules
+// list.
+//
+// Only one goroutine may call modulesinit at a time.
+func modulesinit() {
+ modules := new([]*moduledata)
+ for md := &firstmoduledata; md != nil; md = md.next {
+ if md.bad {
+ continue
+ }
+ *modules = append(*modules, md)
+ if md.gcdatamask == (bitvector{}) {
+ scanDataSize := md.edata - md.data
+ md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), scanDataSize)
+ scanBSSSize := md.ebss - md.bss
+ md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), scanBSSSize)
+ gcController.addGlobals(int64(scanDataSize + scanBSSSize))
+ }
+ }
+
+ // Modules appear in the moduledata linked list in the order they are
+ // loaded by the dynamic loader, with one exception: the
+ // firstmoduledata itself the module that contains the runtime. This
+ // is not always the first module (when using -buildmode=shared, it
+ // is typically libstd.so, the second module). The order matters for
+ // typelinksinit, so we swap the first module with whatever module
+ // contains the main function.
+ //
+ // See Issue #18729.
+ for i, md := range *modules {
+ if md.hasmain != 0 {
+ (*modules)[0] = md
+ (*modules)[i] = &firstmoduledata
+ break
+ }
+ }
+
+ atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules))
+}
+
+type functab struct {
+ entryoff uint32 // relative to runtime.text
+ funcoff uint32
+}
+
+// Mapping information for secondary text sections
+
+type textsect struct {
+ vaddr uintptr // prelinked section vaddr
+ end uintptr // vaddr + section length
+ baseaddr uintptr // relocated section address
+}
+
+const minfunc = 16 // minimum function size
+const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table
+
+// findfunctab is an array of these structures.
+// Each bucket represents 4096 bytes of the text segment.
+// Each subbucket represents 256 bytes of the text segment.
+// To find a function given a pc, locate the bucket and subbucket for
+// that pc. Add together the idx and subbucket value to obtain a
+// function index. Then scan the functab array starting at that
+// index to find the target function.
+// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
+type findfuncbucket struct {
+ idx uint32
+ subbuckets [16]byte
+}
+
+func moduledataverify() {
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ moduledataverify1(datap)
+ }
+}
+
+const debugPcln = false
+
+func moduledataverify1(datap *moduledata) {
+ // Check that the pclntab's format is valid.
+ hdr := datap.pcHeader
+ if hdr.magic != 0xfffffff0 || hdr.pad1 != 0 || hdr.pad2 != 0 ||
+ hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text {
+ println("runtime: pcHeader: magic=", hex(hdr.magic), "pad1=", hdr.pad1, "pad2=", hdr.pad2,
+ "minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pcHeader.textStart=", hex(hdr.textStart),
+ "text=", hex(datap.text), "pluginpath=", datap.pluginpath)
+ throw("invalid function symbol table")
+ }
+
+ // ftab is lookup table for function by program counter.
+ nftab := len(datap.ftab) - 1
+ for i := 0; i < nftab; i++ {
+ // NOTE: ftab[nftab].entry is legal; it is the address beyond the final function.
+ if datap.ftab[i].entryoff > datap.ftab[i+1].entryoff {
+ f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap}
+ f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap}
+ f2name := "end"
+ if i+1 < nftab {
+ f2name = funcname(f2)
+ }
+ println("function symbol table not sorted by PC offset:", hex(datap.ftab[i].entryoff), funcname(f1), ">", hex(datap.ftab[i+1].entryoff), f2name, ", plugin:", datap.pluginpath)
+ for j := 0; j <= i; j++ {
+ println("\t", hex(datap.ftab[j].entryoff), funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}))
+ }
+ if GOOS == "aix" && isarchive {
+ println("-Wl,-bnoobjreorder is mandatory on aix/ppc64 with c-archive")
+ }
+ throw("invalid runtime symbol table")
+ }
+ }
+
+ min := datap.textAddr(datap.ftab[0].entryoff)
+ max := datap.textAddr(datap.ftab[nftab].entryoff)
+ if datap.minpc != min || datap.maxpc != max {
+ println("minpc=", hex(datap.minpc), "min=", hex(min), "maxpc=", hex(datap.maxpc), "max=", hex(max))
+ throw("minpc or maxpc invalid")
+ }
+
+ for _, modulehash := range datap.modulehashes {
+ if modulehash.linktimehash != *modulehash.runtimehash {
+ println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename)
+ throw("abi mismatch")
+ }
+ }
+}
+
+// textAddr returns md.text + off, with special handling for multiple text sections.
+// off is a (virtual) offset computed at internal linking time,
+// before the external linker adjusts the sections' base addresses.
+//
+// The text, or instruction stream is generated as one large buffer.
+// The off (offset) for a function is its offset within this buffer.
+// If the total text size gets too large, there can be issues on platforms like ppc64
+// if the target of calls are too far for the call instruction.
+// To resolve the large text issue, the text is split into multiple text sections
+// to allow the linker to generate long calls when necessary.
+// When this happens, the vaddr for each text section is set to its offset within the text.
+// Each function's offset is compared against the section vaddrs and ends to determine the containing section.
+// Then the section relative offset is added to the section's
+// relocated baseaddr to compute the function address.
+//
+// It is nosplit because it is part of the findfunc implementation.
+//
+//go:nosplit
+func (md *moduledata) textAddr(off32 uint32) uintptr {
+ off := uintptr(off32)
+ res := md.text + off
+ if len(md.textsectmap) > 1 {
+ for i, sect := range md.textsectmap {
+ // For the last section, include the end address (etext), as it is included in the functab.
+ if off >= sect.vaddr && off < sect.end || (i == len(md.textsectmap)-1 && off == sect.end) {
+ res = sect.baseaddr + off - sect.vaddr
+ break
+ }
+ }
+ if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory
+ println("runtime: textAddr", hex(res), "out of range", hex(md.text), "-", hex(md.etext))
+ throw("runtime: text offset out of range")
+ }
+ }
+ return res
+}
+
+// textOff is the opposite of textAddr. It converts a PC to a (virtual) offset
+// to md.text, and returns if the PC is in any Go text section.
+//
+// It is nosplit because it is part of the findfunc implementation.
+//
+//go:nosplit
+func (md *moduledata) textOff(pc uintptr) (uint32, bool) {
+ res := uint32(pc - md.text)
+ if len(md.textsectmap) > 1 {
+ for i, sect := range md.textsectmap {
+ if sect.baseaddr > pc {
+ // pc is not in any section.
+ return 0, false
+ }
+ end := sect.baseaddr + (sect.end - sect.vaddr)
+ // For the last section, include the end address (etext), as it is included in the functab.
+ if i == len(md.textsectmap) {
+ end++
+ }
+ if pc < end {
+ res = uint32(pc - sect.baseaddr + sect.vaddr)
+ break
+ }
+ }
+ }
+ return res, true
+}
+
+// FuncForPC returns a *Func describing the function that contains the
+// given program counter address, or else nil.
+//
+// If pc represents multiple functions because of inlining, it returns
+// the *Func describing the innermost function, but with an entry of
+// the outermost function.
+func FuncForPC(pc uintptr) *Func {
+ f := findfunc(pc)
+ if !f.valid() {
+ return nil
+ }
+ if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
+ // Note: strict=false so bad PCs (those between functions) don't crash the runtime.
+ // We just report the preceding function in that situation. See issue 29735.
+ // TODO: Perhaps we should report no function at all in that case.
+ // The runtime currently doesn't have function end info, alas.
+ if ix := pcdatavalue1(f, _PCDATA_InlTreeIndex, pc, nil, false); ix >= 0 {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ name := funcnameFromNameoff(f, inltree[ix].func_)
+ file, line := funcline(f, pc)
+ fi := &funcinl{
+ ones: ^uint32(0),
+ entry: f.entry(), // entry of the real (the outermost) function.
+ name: name,
+ file: file,
+ line: int(line),
+ }
+ return (*Func)(unsafe.Pointer(fi))
+ }
+ }
+ return f._Func()
+}
+
+// Name returns the name of the function.
+func (f *Func) Name() string {
+ if f == nil {
+ return ""
+ }
+ fn := f.raw()
+ if fn.isInlined() { // inlined version
+ fi := (*funcinl)(unsafe.Pointer(fn))
+ return fi.name
+ }
+ return funcname(f.funcInfo())
+}
+
+// Entry returns the entry address of the function.
+func (f *Func) Entry() uintptr {
+ fn := f.raw()
+ if fn.isInlined() { // inlined version
+ fi := (*funcinl)(unsafe.Pointer(fn))
+ return fi.entry
+ }
+ return fn.funcInfo().entry()
+}
+
+// FileLine returns the file name and line number of the
+// source code corresponding to the program counter pc.
+// The result will not be accurate if pc is not a program
+// counter within f.
+func (f *Func) FileLine(pc uintptr) (file string, line int) {
+ fn := f.raw()
+ if fn.isInlined() { // inlined version
+ fi := (*funcinl)(unsafe.Pointer(fn))
+ return fi.file, fi.line
+ }
+ // Pass strict=false here, because anyone can call this function,
+ // and they might just be wrong about targetpc belonging to f.
+ file, line32 := funcline1(f.funcInfo(), pc, false)
+ return file, int(line32)
+}
+
+// findmoduledatap looks up the moduledata for a PC.
+//
+// It is nosplit because it's part of the isgoexception
+// implementation.
+//
+//go:nosplit
+func findmoduledatap(pc uintptr) *moduledata {
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if datap.minpc <= pc && pc < datap.maxpc {
+ return datap
+ }
+ }
+ return nil
+}
+
+type funcInfo struct {
+ *_func
+ datap *moduledata
+}
+
+func (f funcInfo) valid() bool {
+ return f._func != nil
+}
+
+func (f funcInfo) _Func() *Func {
+ return (*Func)(unsafe.Pointer(f._func))
+}
+
+// isInlined reports whether f should be re-interpreted as a *funcinl.
+func (f *_func) isInlined() bool {
+ return f.entryoff == ^uint32(0) // see comment for funcinl.ones
+}
+
+// entry returns the entry PC for f.
+func (f funcInfo) entry() uintptr {
+ return f.datap.textAddr(f.entryoff)
+}
+
+// findfunc looks up function metadata for a PC.
+//
+// It is nosplit because it's part of the isgoexception
+// implementation.
+//
+//go:nosplit
+func findfunc(pc uintptr) funcInfo {
+ datap := findmoduledatap(pc)
+ if datap == nil {
+ return funcInfo{}
+ }
+ const nsub = uintptr(len(findfuncbucket{}.subbuckets))
+
+ pcOff, ok := datap.textOff(pc)
+ if !ok {
+ return funcInfo{}
+ }
+
+ x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal?
+ b := x / pcbucketsize
+ i := x % pcbucketsize / (pcbucketsize / nsub)
+
+ ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
+ idx := ffb.idx + uint32(ffb.subbuckets[i])
+
+ // Find the ftab entry.
+ for datap.ftab[idx+1].entryoff <= pcOff {
+ idx++
+ }
+
+ funcoff := datap.ftab[idx].funcoff
+ return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[funcoff])), datap}
+}
+
+type pcvalueCache struct {
+ entries [2][8]pcvalueCacheEnt
+}
+
+type pcvalueCacheEnt struct {
+ // targetpc and off together are the key of this cache entry.
+ targetpc uintptr
+ off uint32
+ // val is the value of this cached pcvalue entry.
+ val int32
+}
+
+// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
+// It must be very cheap to calculate.
+// For now, align to goarch.PtrSize and reduce mod the number of entries.
+// In practice, this appears to be fairly randomly and evenly distributed.
+func pcvalueCacheKey(targetpc uintptr) uintptr {
+ return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
+}
+
+// Returns the PCData value, and the PC where this value starts.
+// TODO: the start PC is returned only when cache is nil.
+func pcvalue(f funcInfo, off uint32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) {
+ if off == 0 {
+ return -1, 0
+ }
+
+ // Check the cache. This speeds up walks of deep stacks, which
+ // tend to have the same recursive functions over and over.
+ //
+ // This cache is small enough that full associativity is
+ // cheaper than doing the hashing for a less associative
+ // cache.
+ if cache != nil {
+ x := pcvalueCacheKey(targetpc)
+ for i := range cache.entries[x] {
+ // We check off first because we're more
+ // likely to have multiple entries with
+ // different offsets for the same targetpc
+ // than the other way around, so we'll usually
+ // fail in the first clause.
+ ent := &cache.entries[x][i]
+ if ent.off == off && ent.targetpc == targetpc {
+ return ent.val, 0
+ }
+ }
+ }
+
+ if !f.valid() {
+ if strict && panicking == 0 {
+ println("runtime: no module data for", hex(f.entry()))
+ throw("no module data")
+ }
+ return -1, 0
+ }
+ datap := f.datap
+ p := datap.pctab[off:]
+ pc := f.entry()
+ prevpc := pc
+ val := int32(-1)
+ for {
+ var ok bool
+ p, ok = step(p, &pc, &val, pc == f.entry())
+ if !ok {
+ break
+ }
+ if targetpc < pc {
+ // Replace a random entry in the cache. Random
+ // replacement prevents a performance cliff if
+ // a recursive stack's cycle is slightly
+ // larger than the cache.
+ // Put the new element at the beginning,
+ // since it is the most likely to be newly used.
+ if cache != nil {
+ x := pcvalueCacheKey(targetpc)
+ e := &cache.entries[x]
+ ci := fastrandn(uint32(len(cache.entries[x])))
+ e[ci] = e[0]
+ e[0] = pcvalueCacheEnt{
+ targetpc: targetpc,
+ off: off,
+ val: val,
+ }
+ }
+
+ return val, prevpc
+ }
+ prevpc = pc
+ }
+
+ // If there was a table, it should have covered all program counters.
+ // If not, something is wrong.
+ if panicking != 0 || !strict {
+ return -1, 0
+ }
+
+ print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n")
+
+ p = datap.pctab[off:]
+ pc = f.entry()
+ val = -1
+ for {
+ var ok bool
+ p, ok = step(p, &pc, &val, pc == f.entry())
+ if !ok {
+ break
+ }
+ print("\tvalue=", val, " until pc=", hex(pc), "\n")
+ }
+
+ throw("invalid runtime symbol table")
+ return -1, 0
+}
+
+func cfuncname(f funcInfo) *byte {
+ if !f.valid() || f.nameoff == 0 {
+ return nil
+ }
+ return &f.datap.funcnametab[f.nameoff]
+}
+
+func funcname(f funcInfo) string {
+ return gostringnocopy(cfuncname(f))
+}
+
+func funcpkgpath(f funcInfo) string {
+ name := funcname(f)
+ i := len(name) - 1
+ for ; i > 0; i-- {
+ if name[i] == '/' {
+ break
+ }
+ }
+ for ; i < len(name); i++ {
+ if name[i] == '.' {
+ break
+ }
+ }
+ return name[:i]
+}
+
+func cfuncnameFromNameoff(f funcInfo, nameoff int32) *byte {
+ if !f.valid() {
+ return nil
+ }
+ return &f.datap.funcnametab[nameoff]
+}
+
+func funcnameFromNameoff(f funcInfo, nameoff int32) string {
+ return gostringnocopy(cfuncnameFromNameoff(f, nameoff))
+}
+
+func funcfile(f funcInfo, fileno int32) string {
+ datap := f.datap
+ if !f.valid() {
+ return "?"
+ }
+ // Make sure the cu index and file offset are valid
+ if fileoff := datap.cutab[f.cuOffset+uint32(fileno)]; fileoff != ^uint32(0) {
+ return gostringnocopy(&datap.filetab[fileoff])
+ }
+ // pcln section is corrupt.
+ return "?"
+}
+
+func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) {
+ datap := f.datap
+ if !f.valid() {
+ return "?", 0
+ }
+ fileno, _ := pcvalue(f, f.pcfile, targetpc, nil, strict)
+ line, _ = pcvalue(f, f.pcln, targetpc, nil, strict)
+ if fileno == -1 || line == -1 || int(fileno) >= len(datap.filetab) {
+ // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
+ return "?", 0
+ }
+ file = funcfile(f, fileno)
+ return
+}
+
+func funcline(f funcInfo, targetpc uintptr) (file string, line int32) {
+ return funcline1(f, targetpc, true)
+}
+
+func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
+ x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
+ if debugPcln && x&(goarch.PtrSize-1) != 0 {
+ print("invalid spdelta ", funcname(f), " ", hex(f.entry()), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
+ throw("bad spdelta")
+ }
+ return x
+}
+
+// funcMaxSPDelta returns the maximum spdelta at any point in f.
+func funcMaxSPDelta(f funcInfo) int32 {
+ datap := f.datap
+ p := datap.pctab[f.pcsp:]
+ pc := f.entry()
+ val := int32(-1)
+ max := int32(0)
+ for {
+ var ok bool
+ p, ok = step(p, &pc, &val, pc == f.entry())
+ if !ok {
+ return max
+ }
+ if val > max {
+ max = val
+ }
+ }
+}
+
+func pcdatastart(f funcInfo, table uint32) uint32 {
+ return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
+}
+
+func pcdatavalue(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache) int32 {
+ if table >= f.npcdata {
+ return -1
+ }
+ r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, true)
+ return r
+}
+
+func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
+ if table >= f.npcdata {
+ return -1
+ }
+ r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, strict)
+ return r
+}
+
+// Like pcdatavalue, but also return the start PC of this PCData value.
+// It doesn't take a cache.
+func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) {
+ if table >= f.npcdata {
+ return -1, 0
+ }
+ return pcvalue(f, pcdatastart(f, table), targetpc, nil, true)
+}
+
+// funcdata returns a pointer to the ith funcdata for f.
+// funcdata should be kept in sync with cmd/link:writeFuncs.
+func funcdata(f funcInfo, i uint8) unsafe.Pointer {
+ if i < 0 || i >= f.nfuncdata {
+ return nil
+ }
+ base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses
+ p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4
+ off := *(*uint32)(unsafe.Pointer(p))
+ // Return off == ^uint32(0) ? 0 : f.datap.gofunc + uintptr(off), but without branches.
+ // The compiler calculates mask on most architectures using conditional assignment.
+ var mask uintptr
+ if off == ^uint32(0) {
+ mask = 1
+ }
+ mask--
+ raw := base + uintptr(off)
+ return unsafe.Pointer(raw & mask)
+}
+
+// step advances to the next pc, value pair in the encoded table.
+func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {
+ // For both uvdelta and pcdelta, the common case (~70%)
+ // is that they are a single byte. If so, avoid calling readvarint.
+ uvdelta := uint32(p[0])
+ if uvdelta == 0 && !first {
+ return nil, false
+ }
+ n := uint32(1)
+ if uvdelta&0x80 != 0 {
+ n, uvdelta = readvarint(p)
+ }
+ *val += int32(-(uvdelta & 1) ^ (uvdelta >> 1))
+ p = p[n:]
+
+ pcdelta := uint32(p[0])
+ n = 1
+ if pcdelta&0x80 != 0 {
+ n, pcdelta = readvarint(p)
+ }
+ p = p[n:]
+ *pc += uintptr(pcdelta * sys.PCQuantum)
+ return p, true
+}
+
+// readvarint reads a varint from p.
+func readvarint(p []byte) (read uint32, val uint32) {
+ var v, shift, n uint32
+ for {
+ b := p[n]
+ n++
+ v |= uint32(b&0x7F) << (shift & 31)
+ if b&0x80 == 0 {
+ break
+ }
+ shift += 7
+ }
+ return n, v
+}
+
+type stackmap struct {
+ n int32 // number of bitmaps
+ nbit int32 // number of bits in each bitmap
+ bytedata [1]byte // bitmaps, each starting on a byte boundary
+}
+
+//go:nowritebarrier
+func stackmapdata(stkmap *stackmap, n int32) bitvector {
+ // Check this invariant only when stackDebug is on at all.
+ // The invariant is already checked by many of stackmapdata's callers,
+ // and disabling it by default allows stackmapdata to be inlined.
+ if stackDebug > 0 && (n < 0 || n >= stkmap.n) {
+ throw("stackmapdata: index out of range")
+ }
+ return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))}
+}
+
+// inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.
+type inlinedCall struct {
+ parent int16 // index of parent in the inltree, or < 0
+ funcID funcID // type of the called function
+ _ byte
+ file int32 // perCU file index for inlined call. See cmd/link:pcln.go
+ line int32 // line number of the call site
+ func_ int32 // offset into pclntab for name of called function
+ parentPc int32 // position of an instruction whose source position is the call site (offset from entry)
+}
diff --git a/contrib/go/_std_1.19/src/runtime/sys_darwin.go b/contrib/go/_std_1.19/src/runtime/sys_darwin.go
new file mode 100644
index 0000000000..1547fdceb0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/sys_darwin.go
@@ -0,0 +1,537 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+// The X versions of syscall expect the libc call to return a 64-bit result.
+// Otherwise (the non-X version) expects a 32-bit result.
+// This distinction is required because an error is indicated by returning -1,
+// and we need to know whether to check 32 or 64 bits of the result.
+// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
+
+//go:linkname syscall_syscall syscall.syscall
+//go:nosplit
+func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
+ exitsyscall()
+ return args.r1, args.r2, args.err
+}
+func syscall()
+
+//go:linkname syscall_syscallX syscall.syscallX
+//go:nosplit
+func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&args))
+ exitsyscall()
+ return args.r1, args.r2, args.err
+}
+func syscallX()
+
+//go:linkname syscall_syscall6 syscall.syscall6
+//go:nosplit
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
+ exitsyscall()
+ return args.r1, args.r2, args.err
+}
+func syscall6()
+
+//go:linkname syscall_syscall6X syscall.syscall6X
+//go:nosplit
+func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&args))
+ exitsyscall()
+ return args.r1, args.r2, args.err
+}
+func syscall6X()
+
+//go:linkname syscall_syscallPtr syscall.syscallPtr
+//go:nosplit
+func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&args))
+ exitsyscall()
+ return args.r1, args.r2, args.err
+}
+func syscallPtr()
+
+//go:linkname syscall_rawSyscall syscall.rawSyscall
+//go:nosplit
+func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
+ return args.r1, args.r2, args.err
+}
+
+//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
+//go:nosplit
+func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
+ return args.r1, args.r2, args.err
+}
+
+// syscallNoErr is used in crypto/x509 to call into Security.framework and CF.
+
+//go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall
+//go:nosplit
+func crypto_x509_syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) (r1 uintptr) {
+ args := struct {
+ fn, a1, a2, a3, a4, a5 uintptr
+ f1 float64
+ r1 uintptr
+ }{fn, a1, a2, a3, a4, a5, f1, r1}
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_x509)), unsafe.Pointer(&args))
+ exitsyscall()
+ return args.r1
+}
+func syscall_x509()
+
+// The *_trampoline functions convert from the Go calling convention to the C calling convention
+// and then call the underlying libc function. They are defined in sys_darwin_$ARCH.s.
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_init(attr *pthreadattr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
+ KeepAlive(attr)
+ return ret
+}
+func pthread_attr_init_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
+ KeepAlive(attr)
+ KeepAlive(size)
+ return ret
+}
+func pthread_attr_getstacksize_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
+ KeepAlive(attr)
+ return ret
+}
+func pthread_attr_setdetachstate_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_create_trampoline)), unsafe.Pointer(&attr))
+ KeepAlive(attr)
+ KeepAlive(arg) // Just for consistency. Arg of course needs to be kept alive for the start function.
+ return ret
+}
+func pthread_create_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func raise(sig uint32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(raise_trampoline)), unsafe.Pointer(&sig))
+}
+func raise_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_self() (t pthread) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_self_trampoline)), unsafe.Pointer(&t))
+ return
+}
+func pthread_self_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_kill(t pthread, sig uint32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_kill_trampoline)), unsafe.Pointer(&t))
+ return
+}
+func pthread_kill_trampoline()
+
+// mmap is used to do low-level memory allocation via mmap. Don't allow stack
+// splits, since this function (used by sysAlloc) is called in a lot of low-level
+// parts of the runtime and callers often assume it won't acquire any locks.
+//
+//go:nosplit
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
+ args := struct {
+ addr unsafe.Pointer
+ n uintptr
+ prot, flags, fd int32
+ off uint32
+ ret1 unsafe.Pointer
+ ret2 int
+ }{addr, n, prot, flags, fd, off, nil, 0}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
+ return args.ret1, args.ret2
+}
+func mmap_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func munmap(addr unsafe.Pointer, n uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
+ KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
+}
+func munmap_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
+ KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
+}
+func madvise_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func mlock(addr unsafe.Pointer, n uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(mlock_trampoline)), unsafe.Pointer(&addr))
+ KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
+}
+func mlock_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func read(fd int32, p unsafe.Pointer, n int32) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
+ KeepAlive(p)
+ return ret
+}
+func read_trampoline()
+
+func pipe() (r, w int32, errno int32) {
+ var p [2]int32
+ errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe_trampoline)), noescape(unsafe.Pointer(&p)))
+ return p[0], p[1], errno
+}
+func pipe_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func closefd(fd int32) int32 {
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
+}
+func close_trampoline()
+
+// This is exported via linkname to assembly in runtime/cgo.
+//
+//go:nosplit
+//go:cgo_unsafe_args
+//go:linkname exit
+func exit(code int32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
+}
+func exit_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func usleep(usec uint32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+func usleep_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func usleep_no_g(usec uint32) {
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
+ KeepAlive(p)
+ return ret
+}
+func write_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func open(name *byte, mode, perm int32) (ret int32) {
+ ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
+ KeepAlive(name)
+ return
+}
+func open_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func nanotime1() int64 {
+ var r struct {
+ t int64 // raw timer
+ numer, denom uint32 // conversion factors. nanoseconds = t * numer / denom.
+ }
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(nanotime_trampoline)), unsafe.Pointer(&r))
+ // Note: Apple seems unconcerned about overflow here. See
+ // https://developer.apple.com/library/content/qa/qa1398/_index.html
+ // Note also, numer == denom == 1 is common.
+ t := r.t
+ if r.numer != 1 {
+ t *= int64(r.numer)
+ }
+ if r.denom != 1 {
+ t /= int64(r.denom)
+ }
+ return t
+}
+func nanotime_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func walltime() (int64, int32) {
+ var t timespec
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(walltime_trampoline)), unsafe.Pointer(&t))
+ return t.tv_sec, int32(t.tv_nsec)
+}
+func walltime_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigaction(sig uint32, new *usigactiont, old *usigactiont) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func sigaction_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigprocmask(how uint32, new *sigset, old *sigset) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func sigprocmask_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigaltstack(new *stackt, old *stackt) {
+ if new != nil && new.ss_flags&_SS_DISABLE != 0 && new.ss_size == 0 {
+ // Despite the fact that Darwin's sigaltstack man page says it ignores the size
+ // when SS_DISABLE is set, it doesn't. sigaltstack returns ENOMEM
+ // if we don't give it a reasonable size.
+ // ref: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20140421/214296.html
+ new.ss_size = 32768
+ }
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func sigaltstack_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func raiseproc(sig uint32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
+}
+func raiseproc_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func setitimer(mode int32, new, old *itimerval) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func setitimer_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sysctl(mib *uint32, miblen uint32, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
+ KeepAlive(mib)
+ KeepAlive(oldp)
+ KeepAlive(oldlenp)
+ KeepAlive(newp)
+ return ret
+}
+func sysctl_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sysctlbyname(name *byte, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctlbyname_trampoline)), unsafe.Pointer(&name))
+ KeepAlive(name)
+ KeepAlive(oldp)
+ KeepAlive(oldlenp)
+ KeepAlive(newp)
+ return ret
+}
+func sysctlbyname_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func fcntl(fd, cmd, arg int32) int32 {
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&fd))
+}
+func fcntl_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func kqueue() int32 {
+ v := libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
+ return v
+}
+func kqueue_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
+ KeepAlive(ch)
+ KeepAlive(ev)
+ KeepAlive(ts)
+ return ret
+}
+func kevent_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_mutex_init(m *pthreadmutex, attr *pthreadmutexattr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_init_trampoline)), unsafe.Pointer(&m))
+ KeepAlive(m)
+ KeepAlive(attr)
+ return ret
+}
+func pthread_mutex_init_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_mutex_lock(m *pthreadmutex) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_lock_trampoline)), unsafe.Pointer(&m))
+ KeepAlive(m)
+ return ret
+}
+func pthread_mutex_lock_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_mutex_unlock(m *pthreadmutex) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_mutex_unlock_trampoline)), unsafe.Pointer(&m))
+ KeepAlive(m)
+ return ret
+}
+func pthread_mutex_unlock_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_cond_init(c *pthreadcond, attr *pthreadcondattr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_init_trampoline)), unsafe.Pointer(&c))
+ KeepAlive(c)
+ KeepAlive(attr)
+ return ret
+}
+func pthread_cond_init_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_cond_wait(c *pthreadcond, m *pthreadmutex) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_wait_trampoline)), unsafe.Pointer(&c))
+ KeepAlive(c)
+ KeepAlive(m)
+ return ret
+}
+func pthread_cond_wait_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_cond_timedwait_relative_np(c *pthreadcond, m *pthreadmutex, t *timespec) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_timedwait_relative_np_trampoline)), unsafe.Pointer(&c))
+ KeepAlive(c)
+ KeepAlive(m)
+ KeepAlive(t)
+ return ret
+}
+func pthread_cond_timedwait_relative_np_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_cond_signal(c *pthreadcond) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_cond_signal_trampoline)), unsafe.Pointer(&c))
+ KeepAlive(c)
+ return ret
+}
+func pthread_cond_signal_trampoline()
+
+// Not used on Darwin, but must be defined.
+func exitThread(wait *uint32) {
+}
+
+//go:nosplit
+func closeonexec(fd int32) {
+ fcntl(fd, _F_SETFD, _FD_CLOEXEC)
+}
+
+//go:nosplit
+func setNonblock(fd int32) {
+ flags := fcntl(fd, _F_GETFL, 0)
+ fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
+}
+
+// Tell the linker that the libc_* functions are to be found
+// in a system library, with the libc_ prefix missing.
+
+//go:cgo_import_dynamic libc_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_create pthread_create "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_self pthread_self "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_kill pthread_kill "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_exit _exit "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_raise raise "/usr/lib/libSystem.B.dylib"
+
+//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
+
+//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_error __error "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_usleep usleep "/usr/lib/libSystem.B.dylib"
+
+//go:cgo_import_dynamic libc_mach_timebase_info mach_timebase_info "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_mach_absolute_time mach_absolute_time "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_sigaction sigaction "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_sigaltstack sigaltstack "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_setitimer setitimer "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
+
+//go:cgo_import_dynamic libc_pthread_mutex_init pthread_mutex_init "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_cond_init pthread_cond_init "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_cond_wait pthread_cond_wait "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_cond_timedwait_relative_np pthread_cond_timedwait_relative_np "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_cond_signal pthread_cond_signal "/usr/lib/libSystem.B.dylib"
diff --git a/contrib/go/_std_1.19/src/runtime/sys_darwin_amd64.s b/contrib/go/_std_1.19/src/runtime/sys_darwin_amd64.s
new file mode 100644
index 0000000000..ba81fcc35c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/sys_darwin_amd64.s
@@ -0,0 +1,867 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// System calls and other sys.stuff for AMD64, Darwin
+// System calls are implemented in libSystem, this file contains
+// trampolines that convert from Go to C calling convention.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+#include "cgo/abi_amd64.h"
+
+#define CLOCK_REALTIME 0
+
+// Exit the entire program (like C exit)
+TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 exit status
+ CALL libc_exit(SB)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
+ RET
+
+TEXT runtime·open_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 flags
+ MOVL 12(DI), DX // arg 3 mode
+ MOVQ 0(DI), DI // arg 1 pathname
+ XORL AX, AX // vararg: say "no float args"
+ CALL libc_open(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·close_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 fd
+ CALL libc_close(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·read_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 buf
+ MOVL 16(DI), DX // arg 3 count
+ MOVL 0(DI), DI // arg 1 fd
+ CALL libc_read(SB)
+ TESTL AX, AX
+ JGE noerr
+ CALL libc_error(SB)
+ MOVL (AX), AX
+ NEGL AX // caller expects negative errno value
+noerr:
+ POPQ BP
+ RET
+
+TEXT runtime·write_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 buf
+ MOVL 16(DI), DX // arg 3 count
+ MOVQ 0(DI), DI // arg 1 fd
+ CALL libc_write(SB)
+ TESTL AX, AX
+ JGE noerr
+ CALL libc_error(SB)
+ MOVL (AX), AX
+ NEGL AX // caller expects negative errno value
+noerr:
+ POPQ BP
+ RET
+
+TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ CALL libc_pipe(SB) // pointer already in DI
+ TESTL AX, AX
+ JEQ 3(PC)
+ CALL libc_error(SB) // return negative errno value
+ NEGL AX
+ POPQ BP
+ RET
+
+TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 new
+ MOVQ 16(DI), DX // arg 3 old
+ MOVL 0(DI), DI // arg 1 which
+ CALL libc_setitimer(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 len
+ MOVL 16(DI), DX // arg 3 advice
+ MOVQ 0(DI), DI // arg 1 addr
+ CALL libc_madvise(SB)
+ // ignore failure - maybe pages are locked
+ POPQ BP
+ RET
+
+TEXT runtime·mlock_trampoline(SB), NOSPLIT, $0
+ UNDEF // unimplemented
+
+GLOBL timebase<>(SB),NOPTR,$(machTimebaseInfo__size)
+
+TEXT runtime·nanotime_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ DI, BX
+ CALL libc_mach_absolute_time(SB)
+ MOVQ AX, 0(BX)
+ MOVL timebase<>+machTimebaseInfo_numer(SB), SI
+ MOVL timebase<>+machTimebaseInfo_denom(SB), DI // atomic read
+ TESTL DI, DI
+ JNE initialized
+
+ SUBQ $(machTimebaseInfo__size+15)/16*16, SP
+ MOVQ SP, DI
+ CALL libc_mach_timebase_info(SB)
+ MOVL machTimebaseInfo_numer(SP), SI
+ MOVL machTimebaseInfo_denom(SP), DI
+ ADDQ $(machTimebaseInfo__size+15)/16*16, SP
+
+ MOVL SI, timebase<>+machTimebaseInfo_numer(SB)
+ MOVL DI, AX
+ XCHGL AX, timebase<>+machTimebaseInfo_denom(SB) // atomic write
+
+initialized:
+ MOVL SI, 8(BX)
+ MOVL DI, 12(BX)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP // make a frame; keep stack aligned
+ MOVQ SP, BP
+ MOVQ DI, SI // arg 2 timespec
+ MOVL $CLOCK_REALTIME, DI // arg 1 clock_id
+ CALL libc_clock_gettime(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 new
+ MOVQ 16(DI), DX // arg 3 old
+ MOVL 0(DI), DI // arg 1 sig
+ CALL libc_sigaction(SB)
+ TESTL AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
+ RET
+
+TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 new
+ MOVQ 16(DI), DX // arg 3 old
+ MOVL 0(DI), DI // arg 1 how
+ CALL libc_pthread_sigmask(SB)
+ TESTL AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
+ RET
+
+TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 old
+ MOVQ 0(DI), DI // arg 1 new
+ CALL libc_sigaltstack(SB)
+ TESTQ AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
+ RET
+
+TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), BX // signal
+ CALL libc_getpid(SB)
+ MOVL AX, DI // arg 1 pid
+ MOVL BX, SI // arg 2 signal
+ CALL libc_kill(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+ MOVQ fn+0(FP), AX
+ MOVL sig+8(FP), DI
+ MOVQ info+16(FP), SI
+ MOVQ ctx+24(FP), DX
+ PUSHQ BP
+ MOVQ SP, BP
+ ANDQ $~15, SP // alignment for x86_64 ABI
+ CALL AX
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// This is the function registered during sigaction and is invoked when
+// a signal is received. It just redirects to the Go function sigtrampgo.
+// Called using C ABI.
+TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$0
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ // Set up ABIInternal environment: g in R14, cleared X15.
+ get_tls(R12)
+ MOVQ g(R12), R14
+ PXOR X15, X15
+
+ // Reserve space for spill slots.
+ NOP SP // disable vet stack checking
+ ADJSP $24
+
+ // Call into the Go signal handler
+ MOVQ DI, AX // sig
+ MOVQ SI, BX // info
+ MOVQ DX, CX // ctx
+ CALL ·sigtrampgo<ABIInternal>(SB)
+
+ ADJSP $-24
+
+ POP_REGS_HOST_TO_ABI0()
+ RET
+
+// Called using C ABI.
+TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$0
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ // Call into the Go signal handler
+ NOP SP // disable vet stack checking
+ ADJSP $24
+ MOVL DI, 0(SP) // sig
+ MOVQ SI, 8(SP) // info
+ MOVQ DX, 16(SP) // ctx
+ CALL ·sigprofNonGo(SB)
+ ADJSP $-24
+
+ POP_REGS_HOST_TO_ABI0()
+ RET
+
+// Used instead of sigtramp in programs that use cgo.
+// Arguments from kernel are in DI, SI, DX.
+TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
+ // If no traceback function, do usual sigtramp.
+ MOVQ runtime·cgoTraceback(SB), AX
+ TESTQ AX, AX
+ JZ sigtramp
+
+ // If no traceback support function, which means that
+ // runtime/cgo was not linked in, do usual sigtramp.
+ MOVQ _cgo_callers(SB), AX
+ TESTQ AX, AX
+ JZ sigtramp
+
+ // Figure out if we are currently in a cgo call.
+ // If not, just do usual sigtramp.
+ get_tls(CX)
+ MOVQ g(CX),AX
+ TESTQ AX, AX
+ JZ sigtrampnog // g == nil
+ MOVQ g_m(AX), AX
+ TESTQ AX, AX
+ JZ sigtramp // g.m == nil
+ MOVL m_ncgo(AX), CX
+ TESTL CX, CX
+ JZ sigtramp // g.m.ncgo == 0
+ MOVQ m_curg(AX), CX
+ TESTQ CX, CX
+ JZ sigtramp // g.m.curg == nil
+ MOVQ g_syscallsp(CX), CX
+ TESTQ CX, CX
+ JZ sigtramp // g.m.curg.syscallsp == 0
+ MOVQ m_cgoCallers(AX), R8
+ TESTQ R8, R8
+ JZ sigtramp // g.m.cgoCallers == nil
+ MOVL m_cgoCallersUse(AX), CX
+ TESTL CX, CX
+ JNZ sigtramp // g.m.cgoCallersUse != 0
+
+ // Jump to a function in runtime/cgo.
+ // That function, written in C, will call the user's traceback
+ // function with proper unwind info, and will then call back here.
+ // The first three arguments, and the fifth, are already in registers.
+ // Set the two remaining arguments now.
+ MOVQ runtime·cgoTraceback(SB), CX
+ MOVQ $runtime·sigtramp(SB), R9
+ MOVQ _cgo_callers(SB), AX
+ JMP AX
+
+sigtramp:
+ JMP runtime·sigtramp(SB)
+
+sigtrampnog:
+ // Signal arrived on a non-Go thread. If this is SIGPROF, get a
+ // stack trace.
+ CMPL DI, $27 // 27 == SIGPROF
+ JNZ sigtramp
+
+ // Lock sigprofCallersUse.
+ MOVL $0, AX
+ MOVL $1, CX
+ MOVQ $runtime·sigprofCallersUse(SB), R11
+ LOCK
+ CMPXCHGL CX, 0(R11)
+ JNZ sigtramp // Skip stack trace if already locked.
+
+ // Jump to the traceback function in runtime/cgo.
+ // It will call back to sigprofNonGo, via sigprofNonGoWrapper, to convert
+ // the arguments to the Go calling convention.
+ // First three arguments to traceback function are in registers already.
+ MOVQ runtime·cgoTraceback(SB), CX
+ MOVQ $runtime·sigprofCallers(SB), R8
+ MOVQ $runtime·sigprofNonGoWrapper<>(SB), R9
+ MOVQ _cgo_callers(SB), AX
+ JMP AX
+
+TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP // make a frame; keep stack aligned
+ MOVQ SP, BP
+ MOVQ DI, BX
+ MOVQ 0(BX), DI // arg 1 addr
+ MOVQ 8(BX), SI // arg 2 len
+ MOVL 16(BX), DX // arg 3 prot
+ MOVL 20(BX), CX // arg 4 flags
+ MOVL 24(BX), R8 // arg 5 fid
+ MOVL 28(BX), R9 // arg 6 offset
+ CALL libc_mmap(SB)
+ XORL DX, DX
+ CMPQ AX, $-1
+ JNE ok
+ CALL libc_error(SB)
+ MOVLQSX (AX), DX // errno
+ XORL AX, AX
+ok:
+ MOVQ AX, 32(BX)
+ MOVQ DX, 40(BX)
+ POPQ BP
+ RET
+
+TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 len
+ MOVQ 0(DI), DI // arg 1 addr
+ CALL libc_munmap(SB)
+ TESTQ AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
+ RET
+
+TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 usec
+ CALL libc_usleep(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·settls(SB),NOSPLIT,$32
+ // Nothing to do on Darwin, pthread already set thread-local storage up.
+ RET
+
+TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 miblen
+ MOVQ 16(DI), DX // arg 3 oldp
+ MOVQ 24(DI), CX // arg 4 oldlenp
+ MOVQ 32(DI), R8 // arg 5 newp
+ MOVQ 40(DI), R9 // arg 6 newlen
+ MOVQ 0(DI), DI // arg 1 mib
+ CALL libc_sysctl(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 oldp
+ MOVQ 16(DI), DX // arg 3 oldlenp
+ MOVQ 24(DI), CX // arg 4 newp
+ MOVQ 32(DI), R8 // arg 5 newlen
+ MOVQ 0(DI), DI // arg 1 name
+ CALL libc_sysctlbyname(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ CALL libc_kqueue(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 keventt
+ MOVL 16(DI), DX // arg 3 nch
+ MOVQ 24(DI), CX // arg 4 ev
+ MOVL 32(DI), R8 // arg 5 nev
+ MOVQ 40(DI), R9 // arg 6 ts
+ MOVL 0(DI), DI // arg 1 kq
+ CALL libc_kevent(SB)
+ CMPL AX, $-1
+ JNE ok
+ CALL libc_error(SB)
+ MOVLQSX (AX), AX // errno
+ NEGQ AX // caller wants it as a negative error code
+ok:
+ POPQ BP
+ RET
+
+TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 4(DI), SI // arg 2 cmd
+ MOVL 8(DI), DX // arg 3 arg
+ MOVL 0(DI), DI // arg 1 fd
+ XORL AX, AX // vararg: say "no float args"
+ CALL libc_fcntl(SB)
+ POPQ BP
+ RET
+
+// mstart_stub is the first function executed on a new thread started by pthread_create.
+// It just does some low-level setup and then calls mstart.
+// Note: called with the C calling convention.
+TEXT runtime·mstart_stub(SB),NOSPLIT,$0
+ // DI points to the m.
+ // We are already on m's g0 stack.
+
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ MOVQ m_g0(DI), DX // g
+
+ // Initialize TLS entry.
+ // See cmd/link/internal/ld/sym.go:computeTLSOffset.
+ MOVQ DX, 0x30(GS)
+
+ CALL runtime·mstart(SB)
+
+ POP_REGS_HOST_TO_ABI0()
+
+ // Go is all done with this OS thread.
+ // Tell pthread everything is ok (we never join with this thread, so
+ // the value here doesn't really matter).
+ XORL AX, AX
+ RET
+
+// These trampolines help convert from Go calling convention to C calling convention.
+// They should be called with asmcgocall.
+// A pointer to the arguments is passed in DI.
+// A single int32 result is returned in AX.
+// (For more results, make an args/results structure.)
+TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP // make frame, keep stack 16-byte aligned.
+ MOVQ SP, BP
+ MOVQ 0(DI), DI // arg 1 attr
+ CALL libc_pthread_attr_init(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 size
+ MOVQ 0(DI), DI // arg 1 attr
+ CALL libc_pthread_attr_getstacksize(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 state
+ MOVQ 0(DI), DI // arg 1 attr
+ CALL libc_pthread_attr_setdetachstate(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ 0(DI), SI // arg 2 attr
+ MOVQ 8(DI), DX // arg 3 start
+ MOVQ 16(DI), CX // arg 4 arg
+ MOVQ SP, DI // arg 1 &threadid (which we throw away)
+ CALL libc_pthread_create(SB)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+TEXT runtime·raise_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 signal
+ CALL libc_raise(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 attr
+ MOVQ 0(DI), DI // arg 1 mutex
+ CALL libc_pthread_mutex_init(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 0(DI), DI // arg 1 mutex
+ CALL libc_pthread_mutex_lock(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 0(DI), DI // arg 1 mutex
+ CALL libc_pthread_mutex_unlock(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 attr
+ MOVQ 0(DI), DI // arg 1 cond
+ CALL libc_pthread_cond_init(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 mutex
+ MOVQ 0(DI), DI // arg 1 cond
+ CALL libc_pthread_cond_wait(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 mutex
+ MOVQ 16(DI), DX // arg 3 timeout
+ MOVQ 0(DI), DI // arg 1 cond
+ CALL libc_pthread_cond_timedwait_relative_np(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 0(DI), DI // arg 1 cond
+ CALL libc_pthread_cond_signal(SB)
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ DI, BX // BX is caller-save
+ CALL libc_pthread_self(SB)
+ MOVQ AX, 0(BX) // return value
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 sig
+ MOVQ 0(DI), DI // arg 1 thread
+ CALL libc_pthread_kill(SB)
+ POPQ BP
+ RET
+
+// syscall calls a function in libc on behalf of the syscall package.
+// syscall takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), CX // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL CX
+
+ MOVQ (SP), DI
+ MOVQ AX, (4*8)(DI) // r1
+ MOVQ DX, (5*8)(DI) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPL AX, $-1 // Note: high 32 bits are junk
+ JNE ok
+
+ // Get error code from libc.
+ CALL libc_error(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (6*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// syscallX calls a function in libc on behalf of the syscall package.
+// syscallX takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscallX must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscallX is like syscall but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscallX(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), CX // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL CX
+
+ MOVQ (SP), DI
+ MOVQ AX, (4*8)(DI) // r1
+ MOVQ DX, (5*8)(DI) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPQ AX, $-1
+ JNE ok
+
+ // Get error code from libc.
+ CALL libc_error(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (6*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// syscallPtr is like syscallX except that the libc function reports an
+// error by returning NULL and setting errno.
+TEXT runtime·syscallPtr(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), CX // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL CX
+
+ MOVQ (SP), DI
+ MOVQ AX, (4*8)(DI) // r1
+ MOVQ DX, (5*8)(DI) // r2
+
+ // syscallPtr libc functions return NULL on error
+ // and set errno.
+ TESTQ AX, AX
+ JNE ok
+
+ // Get error code from libc.
+ CALL libc_error(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (6*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// syscall6 calls a function in libc on behalf of the syscall package.
+// syscall6 takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall6 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6 expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall6(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), R11// fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), R9 // a6
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (SP), DI
+ MOVQ AX, (7*8)(DI) // r1
+ MOVQ DX, (8*8)(DI) // r2
+
+ CMPL AX, $-1
+ JNE ok
+
+ CALL libc_error(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (9*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// syscall6X calls a function in libc on behalf of the syscall package.
+// syscall6X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall6X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6X is like syscall6 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall6X(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), R11// fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), R9 // a6
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (SP), DI
+ MOVQ AX, (7*8)(DI) // r1
+ MOVQ DX, (8*8)(DI) // r2
+
+ CMPQ AX, $-1
+ JNE ok
+
+ CALL libc_error(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (9*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
+// takes 5 uintptrs and 1 float64, and only returns one value,
+// for use with standard C ABI functions.
+TEXT runtime·syscall_x509(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), R11// fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), X0 // f1
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (SP), DI
+ MOVQ AX, (7*8)(DI) // r1
+
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
diff --git a/contrib/go/_std_1.19/src/runtime/sys_libc.go b/contrib/go/_std_1.19/src/runtime/sys_libc.go
new file mode 100644
index 0000000000..0c6f13ca9f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/sys_libc.go
@@ -0,0 +1,54 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || (openbsd && !mips64)
+
+package runtime
+
+import "unsafe"
+
+// Call fn with arg as its argument. Return what fn returns.
+// fn is the raw pc value of the entry point of the desired function.
+// Switches to the system stack, if not already there.
+// Preserves the calling point as the location where a profiler traceback will begin.
+//
+//go:nosplit
+func libcCall(fn, arg unsafe.Pointer) int32 {
+ // Leave caller's PC/SP/G around for traceback.
+ gp := getg()
+ var mp *m
+ if gp != nil {
+ mp = gp.m
+ }
+ if mp != nil && mp.libcallsp == 0 {
+ mp.libcallg.set(gp)
+ mp.libcallpc = getcallerpc()
+ // sp must be the last, because once async cpu profiler finds
+ // all three values to be non-zero, it will use them
+ mp.libcallsp = getcallersp()
+ } else {
+ // Make sure we don't reset libcallsp. This makes
+ // libcCall reentrant; We remember the g/pc/sp for the
+ // first call on an M, until that libcCall instance
+ // returns. Reentrance only matters for signals, as
+ // libc never calls back into Go. The tricky case is
+ // where we call libcX from an M and record g/pc/sp.
+ // Before that call returns, a signal arrives on the
+ // same M and the signal handling code calls another
+ // libc function. We don't want that second libcCall
+ // from within the handler to be recorded, and we
+ // don't want that call's completion to zero
+ // libcallsp.
+ // We don't need to set libcall* while we're in a sighandler
+ // (even if we're not currently in libc) because we block all
+ // signals while we're handling a signal. That includes the
+ // profile signal, which is the one that uses the libcall* info.
+ mp = nil
+ }
+ res := asmcgocall(fn, arg)
+ if mp != nil {
+ mp.libcallsp = 0
+ }
+ return res
+}
diff --git a/contrib/go/_std_1.19/src/runtime/sys_linux_amd64.s b/contrib/go/_std_1.19/src/runtime/sys_linux_amd64.s
new file mode 100644
index 0000000000..ca6ecb13eb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/sys_linux_amd64.s
@@ -0,0 +1,757 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for AMD64, Linux
+//
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+#include "cgo/abi_amd64.h"
+
+#define AT_FDCWD -100
+
+#define SYS_read 0
+#define SYS_write 1
+#define SYS_close 3
+#define SYS_mmap 9
+#define SYS_munmap 11
+#define SYS_brk 12
+#define SYS_rt_sigaction 13
+#define SYS_rt_sigprocmask 14
+#define SYS_rt_sigreturn 15
+#define SYS_sched_yield 24
+#define SYS_mincore 27
+#define SYS_madvise 28
+#define SYS_nanosleep 35
+#define SYS_setittimer 38
+#define SYS_getpid 39
+#define SYS_socket 41
+#define SYS_connect 42
+#define SYS_clone 56
+#define SYS_exit 60
+#define SYS_kill 62
+#define SYS_fcntl 72
+#define SYS_sigaltstack 131
+#define SYS_arch_prctl 158
+#define SYS_gettid 186
+#define SYS_futex 202
+#define SYS_sched_getaffinity 204
+#define SYS_epoll_create 213
+#define SYS_timer_create 222
+#define SYS_timer_settime 223
+#define SYS_timer_delete 226
+#define SYS_clock_gettime 228
+#define SYS_exit_group 231
+#define SYS_epoll_ctl 233
+#define SYS_tgkill 234
+#define SYS_openat 257
+#define SYS_faccessat 269
+#define SYS_epoll_pwait 281
+#define SYS_epoll_create1 291
+#define SYS_pipe2 293
+
+TEXT runtime·exit(SB),NOSPLIT,$0-4
+ MOVL code+0(FP), DI
+ MOVL $SYS_exit_group, AX
+ SYSCALL
+ RET
+
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ MOVL $0, DI // exit code
+ MOVL $SYS_exit, AX
+ SYSCALL
+ // We may not even have a stack any more.
+ INT $3
+ JMP 0(PC)
+
+TEXT runtime·open(SB),NOSPLIT,$0-20
+ // This uses openat instead of open, because Android O blocks open.
+ MOVL $AT_FDCWD, DI // AT_FDCWD, so this acts like open
+ MOVQ name+0(FP), SI
+ MOVL mode+8(FP), DX
+ MOVL perm+12(FP), R10
+ MOVL $SYS_openat, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ MOVL $-1, AX
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT runtime·closefd(SB),NOSPLIT,$0-12
+ MOVL fd+0(FP), DI
+ MOVL $SYS_close, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ MOVL $-1, AX
+ MOVL AX, ret+8(FP)
+ RET
+
+TEXT runtime·write1(SB),NOSPLIT,$0-28
+ MOVQ fd+0(FP), DI
+ MOVQ p+8(FP), SI
+ MOVL n+16(FP), DX
+ MOVL $SYS_write, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+TEXT runtime·read(SB),NOSPLIT,$0-28
+ MOVL fd+0(FP), DI
+ MOVQ p+8(FP), SI
+ MOVL n+16(FP), DX
+ MOVL $SYS_read, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// func pipe2(flags int32) (r, w int32, errno int32)
+TEXT runtime·pipe2(SB),NOSPLIT,$0-20
+ LEAQ r+8(FP), DI
+ MOVL flags+0(FP), SI
+ MOVL $SYS_pipe2, AX
+ SYSCALL
+ MOVL AX, errno+16(FP)
+ RET
+
+TEXT runtime·usleep(SB),NOSPLIT,$16
+ MOVL $0, DX
+ MOVL usec+0(FP), AX
+ MOVL $1000000, CX
+ DIVL CX
+ MOVQ AX, 0(SP)
+ MOVL $1000, AX // usec to nsec
+ MULL DX
+ MOVQ AX, 8(SP)
+
+ // nanosleep(&ts, 0)
+ MOVQ SP, DI
+ MOVL $0, SI
+ MOVL $SYS_nanosleep, AX
+ SYSCALL
+ RET
+
+TEXT runtime·gettid(SB),NOSPLIT,$0-4
+ MOVL $SYS_gettid, AX
+ SYSCALL
+ MOVL AX, ret+0(FP)
+ RET
+
+TEXT runtime·raise(SB),NOSPLIT,$0
+ MOVL $SYS_getpid, AX
+ SYSCALL
+ MOVL AX, R12
+ MOVL $SYS_gettid, AX
+ SYSCALL
+ MOVL AX, SI // arg 2 tid
+ MOVL R12, DI // arg 1 pid
+ MOVL sig+0(FP), DX // arg 3
+ MOVL $SYS_tgkill, AX
+ SYSCALL
+ RET
+
+TEXT runtime·raiseproc(SB),NOSPLIT,$0
+ MOVL $SYS_getpid, AX
+ SYSCALL
+ MOVL AX, DI // arg 1 pid
+ MOVL sig+0(FP), SI // arg 2
+ MOVL $SYS_kill, AX
+ SYSCALL
+ RET
+
+TEXT ·getpid(SB),NOSPLIT,$0-8
+ MOVL $SYS_getpid, AX
+ SYSCALL
+ MOVQ AX, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT,$0
+ MOVQ tgid+0(FP), DI
+ MOVQ tid+8(FP), SI
+ MOVQ sig+16(FP), DX
+ MOVL $SYS_tgkill, AX
+ SYSCALL
+ RET
+
+TEXT runtime·setitimer(SB),NOSPLIT,$0-24
+ MOVL mode+0(FP), DI
+ MOVQ new+8(FP), SI
+ MOVQ old+16(FP), DX
+ MOVL $SYS_setittimer, AX
+ SYSCALL
+ RET
+
+TEXT runtime·timer_create(SB),NOSPLIT,$0-28
+ MOVL clockid+0(FP), DI
+ MOVQ sevp+8(FP), SI
+ MOVQ timerid+16(FP), DX
+ MOVL $SYS_timer_create, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+TEXT runtime·timer_settime(SB),NOSPLIT,$0-28
+ MOVL timerid+0(FP), DI
+ MOVL flags+4(FP), SI
+ MOVQ new+8(FP), DX
+ MOVQ old+16(FP), R10
+ MOVL $SYS_timer_settime, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+TEXT runtime·timer_delete(SB),NOSPLIT,$0-12
+ MOVL timerid+0(FP), DI
+ MOVL $SYS_timer_delete, AX
+ SYSCALL
+ MOVL AX, ret+8(FP)
+ RET
+
+TEXT runtime·mincore(SB),NOSPLIT,$0-28
+ MOVQ addr+0(FP), DI
+ MOVQ n+8(FP), SI
+ MOVQ dst+16(FP), DX
+ MOVL $SYS_mincore, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// func nanotime1() int64
+TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
+ // We don't know how much stack space the VDSO code will need,
+ // so switch to g0.
+ // In particular, a kernel configured with CONFIG_OPTIMIZE_INLINING=n
+ // and hardening can use a full page of stack space in gettime_sym
+ // due to stack probes inserted to avoid stack/heap collisions.
+ // See issue #20427.
+
+ MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
+
+ MOVQ g_m(R14), BX // BX unchanged by C code.
+
+ // Set vdsoPC and vdsoSP for SIGPROF traceback.
+ // Save the old values on stack and restore them on exit,
+ // so this function is reentrant.
+ MOVQ m_vdsoPC(BX), CX
+ MOVQ m_vdsoSP(BX), DX
+ MOVQ CX, 0(SP)
+ MOVQ DX, 8(SP)
+
+ LEAQ ret+0(FP), DX
+ MOVQ -8(DX), CX
+ MOVQ CX, m_vdsoPC(BX)
+ MOVQ DX, m_vdsoSP(BX)
+
+ CMPQ R14, m_curg(BX) // Only switch if on curg.
+ JNE noswitch
+
+ MOVQ m_g0(BX), DX
+ MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack
+
+noswitch:
+ SUBQ $16, SP // Space for results
+ ANDQ $~15, SP // Align for C code
+
+ MOVL $1, DI // CLOCK_MONOTONIC
+ LEAQ 0(SP), SI
+ MOVQ runtime·vdsoClockgettimeSym(SB), AX
+ CMPQ AX, $0
+ JEQ fallback
+ CALL AX
+ret:
+ MOVQ 0(SP), AX // sec
+ MOVQ 8(SP), DX // nsec
+ MOVQ R12, SP // Restore real SP
+ // Restore vdsoPC, vdsoSP
+ // We don't worry about being signaled between the two stores.
+ // If we are not in a signal handler, we'll restore vdsoSP to 0,
+ // and no one will care about vdsoPC. If we are in a signal handler,
+ // we cannot receive another signal.
+ MOVQ 8(SP), CX
+ MOVQ CX, m_vdsoSP(BX)
+ MOVQ 0(SP), CX
+ MOVQ CX, m_vdsoPC(BX)
+ // sec is in AX, nsec in DX
+ // return nsec in AX
+ IMULQ $1000000000, AX
+ ADDQ DX, AX
+ MOVQ AX, ret+0(FP)
+ RET
+fallback:
+ MOVQ $SYS_clock_gettime, AX
+ SYSCALL
+ JMP ret
+
+TEXT runtime·rtsigprocmask(SB),NOSPLIT,$0-28
+ MOVL how+0(FP), DI
+ MOVQ new+8(FP), SI
+ MOVQ old+16(FP), DX
+ MOVL size+24(FP), R10
+ MOVL $SYS_rt_sigprocmask, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ RET
+
+TEXT runtime·rt_sigaction(SB),NOSPLIT,$0-36
+ MOVQ sig+0(FP), DI
+ MOVQ new+8(FP), SI
+ MOVQ old+16(FP), DX
+ MOVQ size+24(FP), R10
+ MOVL $SYS_rt_sigaction, AX
+ SYSCALL
+ MOVL AX, ret+32(FP)
+ RET
+
+// Call the function stored in _cgo_sigaction using the GCC calling convention.
+TEXT runtime·callCgoSigaction(SB),NOSPLIT,$16
+ MOVQ sig+0(FP), DI
+ MOVQ new+8(FP), SI
+ MOVQ old+16(FP), DX
+ MOVQ _cgo_sigaction(SB), AX
+ MOVQ SP, BX // callee-saved
+ ANDQ $~15, SP // alignment as per amd64 psABI
+ CALL AX
+ MOVQ BX, SP
+ MOVL AX, ret+24(FP)
+ RET
+
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+ MOVQ fn+0(FP), AX
+ MOVL sig+8(FP), DI
+ MOVQ info+16(FP), SI
+ MOVQ ctx+24(FP), DX
+ PUSHQ BP
+ MOVQ SP, BP
+ ANDQ $~15, SP // alignment for x86_64 ABI
+ CALL AX
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// Called using C ABI.
+TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$0
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ // Set up ABIInternal environment: g in R14, cleared X15.
+ get_tls(R12)
+ MOVQ g(R12), R14
+ PXOR X15, X15
+
+ // Reserve space for spill slots.
+ NOP SP // disable vet stack checking
+ ADJSP $24
+
+ // Call into the Go signal handler
+ MOVQ DI, AX // sig
+ MOVQ SI, BX // info
+ MOVQ DX, CX // ctx
+ CALL ·sigtrampgo<ABIInternal>(SB)
+
+ ADJSP $-24
+
+ POP_REGS_HOST_TO_ABI0()
+ RET
+
+// Called using C ABI.
+TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$0
+ // Transition from C ABI to Go ABI.
+ PUSH_REGS_HOST_TO_ABI0()
+
+ // Set up ABIInternal environment: g in R14, cleared X15.
+ get_tls(R12)
+ MOVQ g(R12), R14
+ PXOR X15, X15
+
+ // Reserve space for spill slots.
+ NOP SP // disable vet stack checking
+ ADJSP $24
+
+ // Call into the Go signal handler
+ MOVQ DI, AX // sig
+ MOVQ SI, BX // info
+ MOVQ DX, CX // ctx
+ CALL ·sigprofNonGo<ABIInternal>(SB)
+
+ ADJSP $-24
+
+ POP_REGS_HOST_TO_ABI0()
+ RET
+
+// Used instead of sigtramp in programs that use cgo.
+// Arguments from kernel are in DI, SI, DX.
+TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
+ // If no traceback function, do usual sigtramp.
+ MOVQ runtime·cgoTraceback(SB), AX
+ TESTQ AX, AX
+ JZ sigtramp
+
+ // If no traceback support function, which means that
+ // runtime/cgo was not linked in, do usual sigtramp.
+ MOVQ _cgo_callers(SB), AX
+ TESTQ AX, AX
+ JZ sigtramp
+
+ // Figure out if we are currently in a cgo call.
+ // If not, just do usual sigtramp.
+ get_tls(CX)
+ MOVQ g(CX),AX
+ TESTQ AX, AX
+ JZ sigtrampnog // g == nil
+ MOVQ g_m(AX), AX
+ TESTQ AX, AX
+ JZ sigtramp // g.m == nil
+ MOVL m_ncgo(AX), CX
+ TESTL CX, CX
+ JZ sigtramp // g.m.ncgo == 0
+ MOVQ m_curg(AX), CX
+ TESTQ CX, CX
+ JZ sigtramp // g.m.curg == nil
+ MOVQ g_syscallsp(CX), CX
+ TESTQ CX, CX
+ JZ sigtramp // g.m.curg.syscallsp == 0
+ MOVQ m_cgoCallers(AX), R8
+ TESTQ R8, R8
+ JZ sigtramp // g.m.cgoCallers == nil
+ MOVL m_cgoCallersUse(AX), CX
+ TESTL CX, CX
+ JNZ sigtramp // g.m.cgoCallersUse != 0
+
+ // Jump to a function in runtime/cgo.
+ // That function, written in C, will call the user's traceback
+ // function with proper unwind info, and will then call back here.
+ // The first three arguments, and the fifth, are already in registers.
+ // Set the two remaining arguments now.
+ MOVQ runtime·cgoTraceback(SB), CX
+ MOVQ $runtime·sigtramp(SB), R9
+ MOVQ _cgo_callers(SB), AX
+ JMP AX
+
+sigtramp:
+ JMP runtime·sigtramp(SB)
+
+sigtrampnog:
+ // Signal arrived on a non-Go thread. If this is SIGPROF, get a
+ // stack trace.
+ CMPL DI, $27 // 27 == SIGPROF
+ JNZ sigtramp
+
+ // Lock sigprofCallersUse.
+ MOVL $0, AX
+ MOVL $1, CX
+ MOVQ $runtime·sigprofCallersUse(SB), R11
+ LOCK
+ CMPXCHGL CX, 0(R11)
+ JNZ sigtramp // Skip stack trace if already locked.
+
+ // Jump to the traceback function in runtime/cgo.
+ // It will call back to sigprofNonGo, via sigprofNonGoWrapper, to convert
+ // the arguments to the Go calling convention.
+ // First three arguments to traceback function are in registers already.
+ MOVQ runtime·cgoTraceback(SB), CX
+ MOVQ $runtime·sigprofCallers(SB), R8
+ MOVQ $runtime·sigprofNonGoWrapper<>(SB), R9
+ MOVQ _cgo_callers(SB), AX
+ JMP AX
+
+// For cgo unwinding to work, this function must look precisely like
+// the one in glibc. The glibc source code is:
+// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/x86_64/sigaction.c
+// The code that cares about the precise instructions used is:
+// https://gcc.gnu.org/viewcvs/gcc/trunk/libgcc/config/i386/linux-unwind.h?revision=219188&view=markup
+TEXT runtime·sigreturn(SB),NOSPLIT,$0
+ MOVQ $SYS_rt_sigreturn, AX
+ SYSCALL
+ INT $3 // not reached
+
+TEXT runtime·sysMmap(SB),NOSPLIT,$0
+ MOVQ addr+0(FP), DI
+ MOVQ n+8(FP), SI
+ MOVL prot+16(FP), DX
+ MOVL flags+20(FP), R10
+ MOVL fd+24(FP), R8
+ MOVL off+28(FP), R9
+
+ MOVL $SYS_mmap, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS ok
+ NOTQ AX
+ INCQ AX
+ MOVQ $0, p+32(FP)
+ MOVQ AX, err+40(FP)
+ RET
+ok:
+ MOVQ AX, p+32(FP)
+ MOVQ $0, err+40(FP)
+ RET
+
+// Call the function stored in _cgo_mmap using the GCC calling convention.
+// This must be called on the system stack.
+TEXT runtime·callCgoMmap(SB),NOSPLIT,$16
+ MOVQ addr+0(FP), DI
+ MOVQ n+8(FP), SI
+ MOVL prot+16(FP), DX
+ MOVL flags+20(FP), CX
+ MOVL fd+24(FP), R8
+ MOVL off+28(FP), R9
+ MOVQ _cgo_mmap(SB), AX
+ MOVQ SP, BX
+ ANDQ $~15, SP // alignment as per amd64 psABI
+ MOVQ BX, 0(SP)
+ CALL AX
+ MOVQ 0(SP), SP
+ MOVQ AX, ret+32(FP)
+ RET
+
+TEXT runtime·sysMunmap(SB),NOSPLIT,$0
+ MOVQ addr+0(FP), DI
+ MOVQ n+8(FP), SI
+ MOVQ $SYS_munmap, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ RET
+
+// Call the function stored in _cgo_munmap using the GCC calling convention.
+// This must be called on the system stack.
+TEXT runtime·callCgoMunmap(SB),NOSPLIT,$16-16
+ MOVQ addr+0(FP), DI
+ MOVQ n+8(FP), SI
+ MOVQ _cgo_munmap(SB), AX
+ MOVQ SP, BX
+ ANDQ $~15, SP // alignment as per amd64 psABI
+ MOVQ BX, 0(SP)
+ CALL AX
+ MOVQ 0(SP), SP
+ RET
+
+TEXT runtime·madvise(SB),NOSPLIT,$0
+ MOVQ addr+0(FP), DI
+ MOVQ n+8(FP), SI
+ MOVL flags+16(FP), DX
+ MOVQ $SYS_madvise, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// int64 futex(int32 *uaddr, int32 op, int32 val,
+// struct timespec *timeout, int32 *uaddr2, int32 val2);
+TEXT runtime·futex(SB),NOSPLIT,$0
+ MOVQ addr+0(FP), DI
+ MOVL op+8(FP), SI
+ MOVL val+12(FP), DX
+ MOVQ ts+16(FP), R10
+ MOVQ addr2+24(FP), R8
+ MOVL val3+32(FP), R9
+ MOVL $SYS_futex, AX
+ SYSCALL
+ MOVL AX, ret+40(FP)
+ RET
+
+// int32 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void));
+TEXT runtime·clone(SB),NOSPLIT,$0
+ MOVL flags+0(FP), DI
+ MOVQ stk+8(FP), SI
+ MOVQ $0, DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ // Copy mp, gp, fn off parent stack for use by child.
+ // Careful: Linux system call clobbers CX and R11.
+ MOVQ mp+16(FP), R13
+ MOVQ gp+24(FP), R9
+ MOVQ fn+32(FP), R12
+ CMPQ R13, $0 // m
+ JEQ nog1
+ CMPQ R9, $0 // g
+ JEQ nog1
+ LEAQ m_tls(R13), R8
+#ifdef GOOS_android
+ // Android stores the TLS offset in runtime·tls_g.
+ SUBQ runtime·tls_g(SB), R8
+#else
+ ADDQ $8, R8 // ELF wants to use -8(FS)
+#endif
+ ORQ $0x00080000, DI //add flag CLONE_SETTLS(0x00080000) to call clone
+nog1:
+ MOVL $SYS_clone, AX
+ SYSCALL
+
+ // In parent, return.
+ CMPQ AX, $0
+ JEQ 3(PC)
+ MOVL AX, ret+40(FP)
+ RET
+
+ // In child, on new stack.
+ MOVQ SI, SP
+
+ // If g or m are nil, skip Go-related setup.
+ CMPQ R13, $0 // m
+ JEQ nog2
+ CMPQ R9, $0 // g
+ JEQ nog2
+
+ // Initialize m->procid to Linux tid
+ MOVL $SYS_gettid, AX
+ SYSCALL
+ MOVQ AX, m_procid(R13)
+
+ // In child, set up new stack
+ get_tls(CX)
+ MOVQ R13, g_m(R9)
+ MOVQ R9, g(CX)
+ MOVQ R9, R14 // set g register
+ CALL runtime·stackcheck(SB)
+
+nog2:
+ // Call fn. This is the PC of an ABI0 function.
+ CALL R12
+
+ // It shouldn't return. If it does, exit that thread.
+ MOVL $111, DI
+ MOVL $SYS_exit, AX
+ SYSCALL
+ JMP -3(PC) // keep exiting
+
+TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
+ MOVQ new+0(FP), DI
+ MOVQ old+8(FP), SI
+ MOVQ $SYS_sigaltstack, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ RET
+
+// set tls base to DI
+TEXT runtime·settls(SB),NOSPLIT,$32
+#ifdef GOOS_android
+ // Android stores the TLS offset in runtime·tls_g.
+ SUBQ runtime·tls_g(SB), DI
+#else
+ ADDQ $8, DI // ELF wants to use -8(FS)
+#endif
+ MOVQ DI, SI
+ MOVQ $0x1002, DI // ARCH_SET_FS
+ MOVQ $SYS_arch_prctl, AX
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ RET
+
+TEXT runtime·osyield(SB),NOSPLIT,$0
+ MOVL $SYS_sched_yield, AX
+ SYSCALL
+ RET
+
+TEXT runtime·sched_getaffinity(SB),NOSPLIT,$0
+ MOVQ pid+0(FP), DI
+ MOVQ len+8(FP), SI
+ MOVQ buf+16(FP), DX
+ MOVL $SYS_sched_getaffinity, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// int32 runtime·epollcreate(int32 size);
+TEXT runtime·epollcreate(SB),NOSPLIT,$0
+ MOVL size+0(FP), DI
+ MOVL $SYS_epoll_create, AX
+ SYSCALL
+ MOVL AX, ret+8(FP)
+ RET
+
+// int32 runtime·epollcreate1(int32 flags);
+TEXT runtime·epollcreate1(SB),NOSPLIT,$0
+ MOVL flags+0(FP), DI
+ MOVL $SYS_epoll_create1, AX
+ SYSCALL
+ MOVL AX, ret+8(FP)
+ RET
+
+// func epollctl(epfd, op, fd int32, ev *epollEvent) int
+TEXT runtime·epollctl(SB),NOSPLIT,$0
+ MOVL epfd+0(FP), DI
+ MOVL op+4(FP), SI
+ MOVL fd+8(FP), DX
+ MOVQ ev+16(FP), R10
+ MOVL $SYS_epoll_ctl, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout);
+TEXT runtime·epollwait(SB),NOSPLIT,$0
+ // This uses pwait instead of wait, because Android O blocks wait.
+ MOVL epfd+0(FP), DI
+ MOVQ ev+8(FP), SI
+ MOVL nev+16(FP), DX
+ MOVL timeout+20(FP), R10
+ MOVQ $0, R8
+ MOVL $SYS_epoll_pwait, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// void runtime·closeonexec(int32 fd);
+TEXT runtime·closeonexec(SB),NOSPLIT,$0
+ MOVL fd+0(FP), DI // fd
+ MOVQ $2, SI // F_SETFD
+ MOVQ $1, DX // FD_CLOEXEC
+ MOVL $SYS_fcntl, AX
+ SYSCALL
+ RET
+
+// int access(const char *name, int mode)
+TEXT runtime·access(SB),NOSPLIT,$0
+ // This uses faccessat instead of access, because Android O blocks access.
+ MOVL $AT_FDCWD, DI // AT_FDCWD, so this acts like access
+ MOVQ name+0(FP), SI
+ MOVL mode+8(FP), DX
+ MOVL $0, R10
+ MOVL $SYS_faccessat, AX
+ SYSCALL
+ MOVL AX, ret+16(FP)
+ RET
+
+// int connect(int fd, const struct sockaddr *addr, socklen_t addrlen)
+TEXT runtime·connect(SB),NOSPLIT,$0-28
+ MOVL fd+0(FP), DI
+ MOVQ addr+8(FP), SI
+ MOVL len+16(FP), DX
+ MOVL $SYS_connect, AX
+ SYSCALL
+ MOVL AX, ret+24(FP)
+ RET
+
+// int socket(int domain, int type, int protocol)
+TEXT runtime·socket(SB),NOSPLIT,$0-20
+ MOVL domain+0(FP), DI
+ MOVL typ+4(FP), SI
+ MOVL prot+8(FP), DX
+ MOVL $SYS_socket, AX
+ SYSCALL
+ MOVL AX, ret+16(FP)
+ RET
+
+// func sbrk0() uintptr
+TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
+ // Implemented as brk(NULL).
+ MOVQ $0, DI
+ MOVL $SYS_brk, AX
+ SYSCALL
+ MOVQ AX, ret+0(FP)
+ RET
diff --git a/contrib/go/_std_1.18/src/runtime/sys_nonppc64x.go b/contrib/go/_std_1.19/src/runtime/sys_nonppc64x.go
index 653f1c999f..653f1c999f 100644
--- a/contrib/go/_std_1.18/src/runtime/sys_nonppc64x.go
+++ b/contrib/go/_std_1.19/src/runtime/sys_nonppc64x.go
diff --git a/contrib/go/_std_1.18/src/runtime/sys_x86.go b/contrib/go/_std_1.19/src/runtime/sys_x86.go
index 9fb36c2a66..9fb36c2a66 100644
--- a/contrib/go/_std_1.18/src/runtime/sys_x86.go
+++ b/contrib/go/_std_1.19/src/runtime/sys_x86.go
diff --git a/contrib/go/_std_1.18/src/runtime/textflag.h b/contrib/go/_std_1.19/src/runtime/textflag.h
index 214075e360..214075e360 100644
--- a/contrib/go/_std_1.18/src/runtime/textflag.h
+++ b/contrib/go/_std_1.19/src/runtime/textflag.h
diff --git a/contrib/go/_std_1.19/src/runtime/time.go b/contrib/go/_std_1.19/src/runtime/time.go
new file mode 100644
index 0000000000..80b0bfb72c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/time.go
@@ -0,0 +1,1142 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Time-related runtime and pieces of package time.
+
+package runtime
+
+import (
+ "internal/abi"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Package time knows the layout of this structure.
+// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
+type timer struct {
+ // If this timer is on a heap, which P's heap it is on.
+ // puintptr rather than *p to match uintptr in the versions
+ // of this struct defined in other packages.
+ pp puintptr
+
+ // Timer wakes up at when, and then at when+period, ... (period > 0 only)
+ // each time calling f(arg, now) in the timer goroutine, so f must be
+ // a well-behaved function and not block.
+ //
+ // when must be positive on an active timer.
+ when int64
+ period int64
+ f func(any, uintptr)
+ arg any
+ seq uintptr
+
+ // What to set the when field to in timerModifiedXX status.
+ nextwhen int64
+
+ // The status field holds one of the values below.
+ status uint32
+}
+
+// Code outside this file has to be careful in using a timer value.
+//
+// The pp, status, and nextwhen fields may only be used by code in this file.
+//
+// Code that creates a new timer value can set the when, period, f,
+// arg, and seq fields.
+// A new timer value may be passed to addtimer (called by time.startTimer).
+// After doing that no fields may be touched.
+//
+// An active timer (one that has been passed to addtimer) may be
+// passed to deltimer (time.stopTimer), after which it is no longer an
+// active timer. It is an inactive timer.
+// In an inactive timer the period, f, arg, and seq fields may be modified,
+// but not the when field.
+// It's OK to just drop an inactive timer and let the GC collect it.
+// It's not OK to pass an inactive timer to addtimer.
+// Only newly allocated timer values may be passed to addtimer.
+//
+// An active timer may be passed to modtimer. No fields may be touched.
+// It remains an active timer.
+//
+// An inactive timer may be passed to resettimer to turn into an
+// active timer with an updated when field.
+// It's OK to pass a newly allocated timer value to resettimer.
+//
+// Timer operations are addtimer, deltimer, modtimer, resettimer,
+// cleantimers, adjusttimers, and runtimer.
+//
+// We don't permit calling addtimer/deltimer/modtimer/resettimer simultaneously,
+// but adjusttimers and runtimer can be called at the same time as any of those.
+//
+// Active timers live in heaps attached to P, in the timers field.
+// Inactive timers live there too temporarily, until they are removed.
+//
+// addtimer:
+// timerNoStatus -> timerWaiting
+// anything else -> panic: invalid value
+// deltimer:
+// timerWaiting -> timerModifying -> timerDeleted
+// timerModifiedEarlier -> timerModifying -> timerDeleted
+// timerModifiedLater -> timerModifying -> timerDeleted
+// timerNoStatus -> do nothing
+// timerDeleted -> do nothing
+// timerRemoving -> do nothing
+// timerRemoved -> do nothing
+// timerRunning -> wait until status changes
+// timerMoving -> wait until status changes
+// timerModifying -> wait until status changes
+// modtimer:
+// timerWaiting -> timerModifying -> timerModifiedXX
+// timerModifiedXX -> timerModifying -> timerModifiedYY
+// timerNoStatus -> timerModifying -> timerWaiting
+// timerRemoved -> timerModifying -> timerWaiting
+// timerDeleted -> timerModifying -> timerModifiedXX
+// timerRunning -> wait until status changes
+// timerMoving -> wait until status changes
+// timerRemoving -> wait until status changes
+// timerModifying -> wait until status changes
+// cleantimers (looks in P's timer heap):
+// timerDeleted -> timerRemoving -> timerRemoved
+// timerModifiedXX -> timerMoving -> timerWaiting
+// adjusttimers (looks in P's timer heap):
+// timerDeleted -> timerRemoving -> timerRemoved
+// timerModifiedXX -> timerMoving -> timerWaiting
+// runtimer (looks in P's timer heap):
+// timerNoStatus -> panic: uninitialized timer
+// timerWaiting -> timerWaiting or
+// timerWaiting -> timerRunning -> timerNoStatus or
+// timerWaiting -> timerRunning -> timerWaiting
+// timerModifying -> wait until status changes
+// timerModifiedXX -> timerMoving -> timerWaiting
+// timerDeleted -> timerRemoving -> timerRemoved
+// timerRunning -> panic: concurrent runtimer calls
+// timerRemoved -> panic: inconsistent timer heap
+// timerRemoving -> panic: inconsistent timer heap
+// timerMoving -> panic: inconsistent timer heap
+
+// Values for the timer status field.
+const (
+ // Timer has no status set yet.
+ timerNoStatus = iota
+
+ // Waiting for timer to fire.
+ // The timer is in some P's heap.
+ timerWaiting
+
+ // Running the timer function.
+ // A timer will only have this status briefly.
+ timerRunning
+
+ // The timer is deleted and should be removed.
+ // It should not be run, but it is still in some P's heap.
+ timerDeleted
+
+ // The timer is being removed.
+ // The timer will only have this status briefly.
+ timerRemoving
+
+ // The timer has been stopped.
+ // It is not in any P's heap.
+ timerRemoved
+
+ // The timer is being modified.
+ // The timer will only have this status briefly.
+ timerModifying
+
+ // The timer has been modified to an earlier time.
+ // The new when value is in the nextwhen field.
+ // The timer is in some P's heap, possibly in the wrong place.
+ timerModifiedEarlier
+
+ // The timer has been modified to the same or a later time.
+ // The new when value is in the nextwhen field.
+ // The timer is in some P's heap, possibly in the wrong place.
+ timerModifiedLater
+
+ // The timer has been modified and is being moved.
+ // The timer will only have this status briefly.
+ timerMoving
+)
+
+// maxWhen is the maximum value for timer's when field.
+const maxWhen = 1<<63 - 1
+
+// verifyTimers can be set to true to add debugging checks that the
+// timer heaps are valid.
+const verifyTimers = false
+
+// Package time APIs.
+// Godoc uses the comments in package time, not these.
+
+// time.now is implemented in assembly.
+
+// timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
+//
+//go:linkname timeSleep time.Sleep
+func timeSleep(ns int64) {
+ if ns <= 0 {
+ return
+ }
+
+ gp := getg()
+ t := gp.timer
+ if t == nil {
+ t = new(timer)
+ gp.timer = t
+ }
+ t.f = goroutineReady
+ t.arg = gp
+ t.nextwhen = nanotime() + ns
+ if t.nextwhen < 0 { // check for overflow.
+ t.nextwhen = maxWhen
+ }
+ gopark(resetForSleep, unsafe.Pointer(t), waitReasonSleep, traceEvGoSleep, 1)
+}
+
+// resetForSleep is called after the goroutine is parked for timeSleep.
+// We can't call resettimer in timeSleep itself because if this is a short
+// sleep and there are many goroutines then the P can wind up running the
+// timer function, goroutineReady, before the goroutine has been parked.
+func resetForSleep(gp *g, ut unsafe.Pointer) bool {
+ t := (*timer)(ut)
+ resettimer(t, t.nextwhen)
+ return true
+}
+
+// startTimer adds t to the timer heap.
+//
+//go:linkname startTimer time.startTimer
+func startTimer(t *timer) {
+ if raceenabled {
+ racerelease(unsafe.Pointer(t))
+ }
+ addtimer(t)
+}
+
+// stopTimer stops a timer.
+// It reports whether t was stopped before being run.
+//
+//go:linkname stopTimer time.stopTimer
+func stopTimer(t *timer) bool {
+ return deltimer(t)
+}
+
+// resetTimer resets an inactive timer, adding it to the heap.
+//
+// Reports whether the timer was modified before it was run.
+//
+//go:linkname resetTimer time.resetTimer
+func resetTimer(t *timer, when int64) bool {
+ if raceenabled {
+ racerelease(unsafe.Pointer(t))
+ }
+ return resettimer(t, when)
+}
+
+// modTimer modifies an existing timer.
+//
+//go:linkname modTimer time.modTimer
+func modTimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq uintptr) {
+ modtimer(t, when, period, f, arg, seq)
+}
+
+// Go runtime.
+
+// Ready the goroutine arg.
+func goroutineReady(arg any, seq uintptr) {
+ goready(arg.(*g), 0)
+}
+
+// addtimer adds a timer to the current P.
+// This should only be called with a newly created timer.
+// That avoids the risk of changing the when field of a timer in some P's heap,
+// which could cause the heap to become unsorted.
+func addtimer(t *timer) {
+ // when must be positive. A negative value will cause runtimer to
+ // overflow during its delta calculation and never expire other runtime
+ // timers. Zero will cause checkTimers to fail to notice the timer.
+ if t.when <= 0 {
+ throw("timer when must be positive")
+ }
+ if t.period < 0 {
+ throw("timer period must be non-negative")
+ }
+ if t.status != timerNoStatus {
+ throw("addtimer called with initialized timer")
+ }
+ t.status = timerWaiting
+
+ when := t.when
+
+ // Disable preemption while using pp to avoid changing another P's heap.
+ mp := acquirem()
+
+ pp := getg().m.p.ptr()
+ lock(&pp.timersLock)
+ cleantimers(pp)
+ doaddtimer(pp, t)
+ unlock(&pp.timersLock)
+
+ wakeNetPoller(when)
+
+ releasem(mp)
+}
+
+// doaddtimer adds t to the current P's heap.
+// The caller must have locked the timers for pp.
+func doaddtimer(pp *p, t *timer) {
+ // Timers rely on the network poller, so make sure the poller
+ // has started.
+ if netpollInited == 0 {
+ netpollGenericInit()
+ }
+
+ if t.pp != 0 {
+ throw("doaddtimer: P already set in timer")
+ }
+ t.pp.set(pp)
+ i := len(pp.timers)
+ pp.timers = append(pp.timers, t)
+ siftupTimer(pp.timers, i)
+ if t == pp.timers[0] {
+ atomic.Store64(&pp.timer0When, uint64(t.when))
+ }
+ atomic.Xadd(&pp.numTimers, 1)
+}
+
+// deltimer deletes the timer t. It may be on some other P, so we can't
+// actually remove it from the timers heap. We can only mark it as deleted.
+// It will be removed in due course by the P whose heap it is on.
+// Reports whether the timer was removed before it was run.
+func deltimer(t *timer) bool {
+ for {
+ switch s := atomic.Load(&t.status); s {
+ case timerWaiting, timerModifiedLater:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp := acquirem()
+ if atomic.Cas(&t.status, s, timerModifying) {
+ // Must fetch t.pp before changing status,
+ // as cleantimers in another goroutine
+ // can clear t.pp of a timerDeleted timer.
+ tpp := t.pp.ptr()
+ if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
+ badTimer()
+ }
+ releasem(mp)
+ atomic.Xadd(&tpp.deletedTimers, 1)
+ // Timer was not yet run.
+ return true
+ } else {
+ releasem(mp)
+ }
+ case timerModifiedEarlier:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp := acquirem()
+ if atomic.Cas(&t.status, s, timerModifying) {
+ // Must fetch t.pp before setting status
+ // to timerDeleted.
+ tpp := t.pp.ptr()
+ if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
+ badTimer()
+ }
+ releasem(mp)
+ atomic.Xadd(&tpp.deletedTimers, 1)
+ // Timer was not yet run.
+ return true
+ } else {
+ releasem(mp)
+ }
+ case timerDeleted, timerRemoving, timerRemoved:
+ // Timer was already run.
+ return false
+ case timerRunning, timerMoving:
+ // The timer is being run or moved, by a different P.
+ // Wait for it to complete.
+ osyield()
+ case timerNoStatus:
+ // Removing timer that was never added or
+ // has already been run. Also see issue 21874.
+ return false
+ case timerModifying:
+ // Simultaneous calls to deltimer and modtimer.
+ // Wait for the other call to complete.
+ osyield()
+ default:
+ badTimer()
+ }
+ }
+}
+
+// dodeltimer removes timer i from the current P's heap.
+// We are locked on the P when this is called.
+// It returns the smallest changed index in pp.timers.
+// The caller must have locked the timers for pp.
+func dodeltimer(pp *p, i int) int {
+ if t := pp.timers[i]; t.pp.ptr() != pp {
+ throw("dodeltimer: wrong P")
+ } else {
+ t.pp = 0
+ }
+ last := len(pp.timers) - 1
+ if i != last {
+ pp.timers[i] = pp.timers[last]
+ }
+ pp.timers[last] = nil
+ pp.timers = pp.timers[:last]
+ smallestChanged := i
+ if i != last {
+ // Moving to i may have moved the last timer to a new parent,
+ // so sift up to preserve the heap guarantee.
+ smallestChanged = siftupTimer(pp.timers, i)
+ siftdownTimer(pp.timers, i)
+ }
+ if i == 0 {
+ updateTimer0When(pp)
+ }
+ n := atomic.Xadd(&pp.numTimers, -1)
+ if n == 0 {
+ // If there are no timers, then clearly none are modified.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+ }
+ return smallestChanged
+}
+
+// dodeltimer0 removes timer 0 from the current P's heap.
+// We are locked on the P when this is called.
+// It reports whether it saw no problems due to races.
+// The caller must have locked the timers for pp.
+func dodeltimer0(pp *p) {
+ if t := pp.timers[0]; t.pp.ptr() != pp {
+ throw("dodeltimer0: wrong P")
+ } else {
+ t.pp = 0
+ }
+ last := len(pp.timers) - 1
+ if last > 0 {
+ pp.timers[0] = pp.timers[last]
+ }
+ pp.timers[last] = nil
+ pp.timers = pp.timers[:last]
+ if last > 0 {
+ siftdownTimer(pp.timers, 0)
+ }
+ updateTimer0When(pp)
+ n := atomic.Xadd(&pp.numTimers, -1)
+ if n == 0 {
+ // If there are no timers, then clearly none are modified.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+ }
+}
+
+// modtimer modifies an existing timer.
+// This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset.
+// Reports whether the timer was modified before it was run.
+func modtimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq uintptr) bool {
+ if when <= 0 {
+ throw("timer when must be positive")
+ }
+ if period < 0 {
+ throw("timer period must be non-negative")
+ }
+
+ status := uint32(timerNoStatus)
+ wasRemoved := false
+ var pending bool
+ var mp *m
+loop:
+ for {
+ switch status = atomic.Load(&t.status); status {
+ case timerWaiting, timerModifiedEarlier, timerModifiedLater:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp = acquirem()
+ if atomic.Cas(&t.status, status, timerModifying) {
+ pending = true // timer not yet run
+ break loop
+ }
+ releasem(mp)
+ case timerNoStatus, timerRemoved:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp = acquirem()
+
+ // Timer was already run and t is no longer in a heap.
+ // Act like addtimer.
+ if atomic.Cas(&t.status, status, timerModifying) {
+ wasRemoved = true
+ pending = false // timer already run or stopped
+ break loop
+ }
+ releasem(mp)
+ case timerDeleted:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp = acquirem()
+ if atomic.Cas(&t.status, status, timerModifying) {
+ atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
+ pending = false // timer already stopped
+ break loop
+ }
+ releasem(mp)
+ case timerRunning, timerRemoving, timerMoving:
+ // The timer is being run or moved, by a different P.
+ // Wait for it to complete.
+ osyield()
+ case timerModifying:
+ // Multiple simultaneous calls to modtimer.
+ // Wait for the other call to complete.
+ osyield()
+ default:
+ badTimer()
+ }
+ }
+
+ t.period = period
+ t.f = f
+ t.arg = arg
+ t.seq = seq
+
+ if wasRemoved {
+ t.when = when
+ pp := getg().m.p.ptr()
+ lock(&pp.timersLock)
+ doaddtimer(pp, t)
+ unlock(&pp.timersLock)
+ if !atomic.Cas(&t.status, timerModifying, timerWaiting) {
+ badTimer()
+ }
+ releasem(mp)
+ wakeNetPoller(when)
+ } else {
+ // The timer is in some other P's heap, so we can't change
+ // the when field. If we did, the other P's heap would
+ // be out of order. So we put the new when value in the
+ // nextwhen field, and let the other P set the when field
+ // when it is prepared to resort the heap.
+ t.nextwhen = when
+
+ newStatus := uint32(timerModifiedLater)
+ if when < t.when {
+ newStatus = timerModifiedEarlier
+ }
+
+ tpp := t.pp.ptr()
+
+ if newStatus == timerModifiedEarlier {
+ updateTimerModifiedEarliest(tpp, when)
+ }
+
+ // Set the new status of the timer.
+ if !atomic.Cas(&t.status, timerModifying, newStatus) {
+ badTimer()
+ }
+ releasem(mp)
+
+ // If the new status is earlier, wake up the poller.
+ if newStatus == timerModifiedEarlier {
+ wakeNetPoller(when)
+ }
+ }
+
+ return pending
+}
+
+// resettimer resets the time when a timer should fire.
+// If used for an inactive timer, the timer will become active.
+// This should be called instead of addtimer if the timer value has been,
+// or may have been, used previously.
+// Reports whether the timer was modified before it was run.
+func resettimer(t *timer, when int64) bool {
+ return modtimer(t, when, t.period, t.f, t.arg, t.seq)
+}
+
+// cleantimers cleans up the head of the timer queue. This speeds up
+// programs that create and delete timers; leaving them in the heap
+// slows down addtimer. Reports whether no timer problems were found.
+// The caller must have locked the timers for pp.
+func cleantimers(pp *p) {
+ gp := getg()
+ for {
+ if len(pp.timers) == 0 {
+ return
+ }
+
+ // This loop can theoretically run for a while, and because
+ // it is holding timersLock it cannot be preempted.
+ // If someone is trying to preempt us, just return.
+ // We can clean the timers later.
+ if gp.preemptStop {
+ return
+ }
+
+ t := pp.timers[0]
+ if t.pp.ptr() != pp {
+ throw("cleantimers: bad p")
+ }
+ switch s := atomic.Load(&t.status); s {
+ case timerDeleted:
+ if !atomic.Cas(&t.status, s, timerRemoving) {
+ continue
+ }
+ dodeltimer0(pp)
+ if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
+ badTimer()
+ }
+ atomic.Xadd(&pp.deletedTimers, -1)
+ case timerModifiedEarlier, timerModifiedLater:
+ if !atomic.Cas(&t.status, s, timerMoving) {
+ continue
+ }
+ // Now we can change the when field.
+ t.when = t.nextwhen
+ // Move t to the right position.
+ dodeltimer0(pp)
+ doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+ default:
+ // Head of timers does not need adjustment.
+ return
+ }
+ }
+}
+
+// moveTimers moves a slice of timers to pp. The slice has been taken
+// from a different P.
+// This is currently called when the world is stopped, but the caller
+// is expected to have locked the timers for pp.
+func moveTimers(pp *p, timers []*timer) {
+ for _, t := range timers {
+ loop:
+ for {
+ switch s := atomic.Load(&t.status); s {
+ case timerWaiting:
+ if !atomic.Cas(&t.status, s, timerMoving) {
+ continue
+ }
+ t.pp = 0
+ doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+ break loop
+ case timerModifiedEarlier, timerModifiedLater:
+ if !atomic.Cas(&t.status, s, timerMoving) {
+ continue
+ }
+ t.when = t.nextwhen
+ t.pp = 0
+ doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+ break loop
+ case timerDeleted:
+ if !atomic.Cas(&t.status, s, timerRemoved) {
+ continue
+ }
+ t.pp = 0
+ // We no longer need this timer in the heap.
+ break loop
+ case timerModifying:
+ // Loop until the modification is complete.
+ osyield()
+ case timerNoStatus, timerRemoved:
+ // We should not see these status values in a timers heap.
+ badTimer()
+ case timerRunning, timerRemoving, timerMoving:
+ // Some other P thinks it owns this timer,
+ // which should not happen.
+ badTimer()
+ default:
+ badTimer()
+ }
+ }
+ }
+}
+
+// adjusttimers looks through the timers in the current P's heap for
+// any timers that have been modified to run earlier, and puts them in
+// the correct place in the heap. While looking for those timers,
+// it also moves timers that have been modified to run later,
+// and removes deleted timers. The caller must have locked the timers for pp.
+func adjusttimers(pp *p, now int64) {
+ // If we haven't yet reached the time of the first timerModifiedEarlier
+ // timer, don't do anything. This speeds up programs that adjust
+ // a lot of timers back and forth if the timers rarely expire.
+ // We'll postpone looking through all the adjusted timers until
+ // one would actually expire.
+ first := atomic.Load64(&pp.timerModifiedEarliest)
+ if first == 0 || int64(first) > now {
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
+ return
+ }
+
+ // We are going to clear all timerModifiedEarlier timers.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+
+ var moved []*timer
+ for i := 0; i < len(pp.timers); i++ {
+ t := pp.timers[i]
+ if t.pp.ptr() != pp {
+ throw("adjusttimers: bad p")
+ }
+ switch s := atomic.Load(&t.status); s {
+ case timerDeleted:
+ if atomic.Cas(&t.status, s, timerRemoving) {
+ changed := dodeltimer(pp, i)
+ if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
+ badTimer()
+ }
+ atomic.Xadd(&pp.deletedTimers, -1)
+ // Go back to the earliest changed heap entry.
+ // "- 1" because the loop will add 1.
+ i = changed - 1
+ }
+ case timerModifiedEarlier, timerModifiedLater:
+ if atomic.Cas(&t.status, s, timerMoving) {
+ // Now we can change the when field.
+ t.when = t.nextwhen
+ // Take t off the heap, and hold onto it.
+ // We don't add it back yet because the
+ // heap manipulation could cause our
+ // loop to skip some other timer.
+ changed := dodeltimer(pp, i)
+ moved = append(moved, t)
+ // Go back to the earliest changed heap entry.
+ // "- 1" because the loop will add 1.
+ i = changed - 1
+ }
+ case timerNoStatus, timerRunning, timerRemoving, timerRemoved, timerMoving:
+ badTimer()
+ case timerWaiting:
+ // OK, nothing to do.
+ case timerModifying:
+ // Check again after modification is complete.
+ osyield()
+ i--
+ default:
+ badTimer()
+ }
+ }
+
+ if len(moved) > 0 {
+ addAdjustedTimers(pp, moved)
+ }
+
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
+}
+
+// addAdjustedTimers adds any timers we adjusted in adjusttimers
+// back to the timer heap.
+func addAdjustedTimers(pp *p, moved []*timer) {
+ for _, t := range moved {
+ doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+ }
+}
+
+// nobarrierWakeTime looks at P's timers and returns the time when we
+// should wake up the netpoller. It returns 0 if there are no timers.
+// This function is invoked when dropping a P, and must run without
+// any write barriers.
+//
+//go:nowritebarrierrec
+func nobarrierWakeTime(pp *p) int64 {
+ next := int64(atomic.Load64(&pp.timer0When))
+ nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
+ if next == 0 || (nextAdj != 0 && nextAdj < next) {
+ next = nextAdj
+ }
+ return next
+}
+
+// runtimer examines the first timer in timers. If it is ready based on now,
+// it runs the timer and removes or updates it.
+// Returns 0 if it ran a timer, -1 if there are no more timers, or the time
+// when the first timer should run.
+// The caller must have locked the timers for pp.
+// If a timer is run, this will temporarily unlock the timers.
+//
+//go:systemstack
+func runtimer(pp *p, now int64) int64 {
+ for {
+ t := pp.timers[0]
+ if t.pp.ptr() != pp {
+ throw("runtimer: bad p")
+ }
+ switch s := atomic.Load(&t.status); s {
+ case timerWaiting:
+ if t.when > now {
+ // Not ready to run.
+ return t.when
+ }
+
+ if !atomic.Cas(&t.status, s, timerRunning) {
+ continue
+ }
+ // Note that runOneTimer may temporarily unlock
+ // pp.timersLock.
+ runOneTimer(pp, t, now)
+ return 0
+
+ case timerDeleted:
+ if !atomic.Cas(&t.status, s, timerRemoving) {
+ continue
+ }
+ dodeltimer0(pp)
+ if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
+ badTimer()
+ }
+ atomic.Xadd(&pp.deletedTimers, -1)
+ if len(pp.timers) == 0 {
+ return -1
+ }
+
+ case timerModifiedEarlier, timerModifiedLater:
+ if !atomic.Cas(&t.status, s, timerMoving) {
+ continue
+ }
+ t.when = t.nextwhen
+ dodeltimer0(pp)
+ doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+
+ case timerModifying:
+ // Wait for modification to complete.
+ osyield()
+
+ case timerNoStatus, timerRemoved:
+ // Should not see a new or inactive timer on the heap.
+ badTimer()
+ case timerRunning, timerRemoving, timerMoving:
+ // These should only be set when timers are locked,
+ // and we didn't do it.
+ badTimer()
+ default:
+ badTimer()
+ }
+ }
+}
+
+// runOneTimer runs a single timer.
+// The caller must have locked the timers for pp.
+// This will temporarily unlock the timers while running the timer function.
+//
+//go:systemstack
+func runOneTimer(pp *p, t *timer, now int64) {
+ if raceenabled {
+ ppcur := getg().m.p.ptr()
+ if ppcur.timerRaceCtx == 0 {
+ ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum)
+ }
+ raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t))
+ }
+
+ f := t.f
+ arg := t.arg
+ seq := t.seq
+
+ if t.period > 0 {
+ // Leave in heap but adjust next time to fire.
+ delta := t.when - now
+ t.when += t.period * (1 + -delta/t.period)
+ if t.when < 0 { // check for overflow.
+ t.when = maxWhen
+ }
+ siftdownTimer(pp.timers, 0)
+ if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
+ badTimer()
+ }
+ updateTimer0When(pp)
+ } else {
+ // Remove from heap.
+ dodeltimer0(pp)
+ if !atomic.Cas(&t.status, timerRunning, timerNoStatus) {
+ badTimer()
+ }
+ }
+
+ if raceenabled {
+ // Temporarily use the current P's racectx for g0.
+ gp := getg()
+ if gp.racectx != 0 {
+ throw("runOneTimer: unexpected racectx")
+ }
+ gp.racectx = gp.m.p.ptr().timerRaceCtx
+ }
+
+ unlock(&pp.timersLock)
+
+ f(arg, seq)
+
+ lock(&pp.timersLock)
+
+ if raceenabled {
+ gp := getg()
+ gp.racectx = 0
+ }
+}
+
+// clearDeletedTimers removes all deleted timers from the P's timer heap.
+// This is used to avoid clogging up the heap if the program
+// starts a lot of long-running timers and then stops them.
+// For example, this can happen via context.WithTimeout.
+//
+// This is the only function that walks through the entire timer heap,
+// other than moveTimers which only runs when the world is stopped.
+//
+// The caller must have locked the timers for pp.
+func clearDeletedTimers(pp *p) {
+ // We are going to clear all timerModifiedEarlier timers.
+ // Do this now in case new ones show up while we are looping.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+
+ cdel := int32(0)
+ to := 0
+ changedHeap := false
+ timers := pp.timers
+nextTimer:
+ for _, t := range timers {
+ for {
+ switch s := atomic.Load(&t.status); s {
+ case timerWaiting:
+ if changedHeap {
+ timers[to] = t
+ siftupTimer(timers, to)
+ }
+ to++
+ continue nextTimer
+ case timerModifiedEarlier, timerModifiedLater:
+ if atomic.Cas(&t.status, s, timerMoving) {
+ t.when = t.nextwhen
+ timers[to] = t
+ siftupTimer(timers, to)
+ to++
+ changedHeap = true
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
+ continue nextTimer
+ }
+ case timerDeleted:
+ if atomic.Cas(&t.status, s, timerRemoving) {
+ t.pp = 0
+ cdel++
+ if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
+ badTimer()
+ }
+ changedHeap = true
+ continue nextTimer
+ }
+ case timerModifying:
+ // Loop until modification complete.
+ osyield()
+ case timerNoStatus, timerRemoved:
+ // We should not see these status values in a timer heap.
+ badTimer()
+ case timerRunning, timerRemoving, timerMoving:
+ // Some other P thinks it owns this timer,
+ // which should not happen.
+ badTimer()
+ default:
+ badTimer()
+ }
+ }
+ }
+
+ // Set remaining slots in timers slice to nil,
+ // so that the timer values can be garbage collected.
+ for i := to; i < len(timers); i++ {
+ timers[i] = nil
+ }
+
+ atomic.Xadd(&pp.deletedTimers, -cdel)
+ atomic.Xadd(&pp.numTimers, -cdel)
+
+ timers = timers[:to]
+ pp.timers = timers
+ updateTimer0When(pp)
+
+ if verifyTimers {
+ verifyTimerHeap(pp)
+ }
+}
+
+// verifyTimerHeap verifies that the timer heap is in a valid state.
+// This is only for debugging, and is only called if verifyTimers is true.
+// The caller must have locked the timers.
+func verifyTimerHeap(pp *p) {
+ for i, t := range pp.timers {
+ if i == 0 {
+ // First timer has no parent.
+ continue
+ }
+
+ // The heap is 4-ary. See siftupTimer and siftdownTimer.
+ p := (i - 1) / 4
+ if t.when < pp.timers[p].when {
+ print("bad timer heap at ", i, ": ", p, ": ", pp.timers[p].when, ", ", i, ": ", t.when, "\n")
+ throw("bad timer heap")
+ }
+ }
+ if numTimers := int(atomic.Load(&pp.numTimers)); len(pp.timers) != numTimers {
+ println("timer heap len", len(pp.timers), "!= numTimers", numTimers)
+ throw("bad timer heap len")
+ }
+}
+
+// updateTimer0When sets the P's timer0When field.
+// The caller must have locked the timers for pp.
+func updateTimer0When(pp *p) {
+ if len(pp.timers) == 0 {
+ atomic.Store64(&pp.timer0When, 0)
+ } else {
+ atomic.Store64(&pp.timer0When, uint64(pp.timers[0].when))
+ }
+}
+
+// updateTimerModifiedEarliest updates the recorded nextwhen field of the
+// earlier timerModifiedEarier value.
+// The timers for pp will not be locked.
+func updateTimerModifiedEarliest(pp *p, nextwhen int64) {
+ for {
+ old := atomic.Load64(&pp.timerModifiedEarliest)
+ if old != 0 && int64(old) < nextwhen {
+ return
+ }
+ if atomic.Cas64(&pp.timerModifiedEarliest, old, uint64(nextwhen)) {
+ return
+ }
+ }
+}
+
+// timeSleepUntil returns the time when the next timer should fire. Returns
+// maxWhen if there are no timers.
+// This is only called by sysmon and checkdead.
+func timeSleepUntil() int64 {
+ next := int64(maxWhen)
+
+ // Prevent allp slice changes. This is like retake.
+ lock(&allpLock)
+ for _, pp := range allp {
+ if pp == nil {
+ // This can happen if procresize has grown
+ // allp but not yet created new Ps.
+ continue
+ }
+
+ w := int64(atomic.Load64(&pp.timer0When))
+ if w != 0 && w < next {
+ next = w
+ }
+
+ w = int64(atomic.Load64(&pp.timerModifiedEarliest))
+ if w != 0 && w < next {
+ next = w
+ }
+ }
+ unlock(&allpLock)
+
+ return next
+}
+
+// Heap maintenance algorithms.
+// These algorithms check for slice index errors manually.
+// Slice index error can happen if the program is using racy
+// access to timers. We don't want to panic here, because
+// it will cause the program to crash with a mysterious
+// "panic holding locks" message. Instead, we panic while not
+// holding a lock.
+
+// siftupTimer puts the timer at position i in the right place
+// in the heap by moving it up toward the top of the heap.
+// It returns the smallest changed index.
+func siftupTimer(t []*timer, i int) int {
+ if i >= len(t) {
+ badTimer()
+ }
+ when := t[i].when
+ if when <= 0 {
+ badTimer()
+ }
+ tmp := t[i]
+ for i > 0 {
+ p := (i - 1) / 4 // parent
+ if when >= t[p].when {
+ break
+ }
+ t[i] = t[p]
+ i = p
+ }
+ if tmp != t[i] {
+ t[i] = tmp
+ }
+ return i
+}
+
+// siftdownTimer puts the timer at position i in the right place
+// in the heap by moving it down toward the bottom of the heap.
+func siftdownTimer(t []*timer, i int) {
+ n := len(t)
+ if i >= n {
+ badTimer()
+ }
+ when := t[i].when
+ if when <= 0 {
+ badTimer()
+ }
+ tmp := t[i]
+ for {
+ c := i*4 + 1 // left child
+ c3 := c + 2 // mid child
+ if c >= n {
+ break
+ }
+ w := t[c].when
+ if c+1 < n && t[c+1].when < w {
+ w = t[c+1].when
+ c++
+ }
+ if c3 < n {
+ w3 := t[c3].when
+ if c3+1 < n && t[c3+1].when < w3 {
+ w3 = t[c3+1].when
+ c3++
+ }
+ if w3 < w {
+ w = w3
+ c = c3
+ }
+ }
+ if w >= when {
+ break
+ }
+ t[i] = t[c]
+ i = c
+ }
+ if tmp != t[i] {
+ t[i] = tmp
+ }
+}
+
+// badTimer is called if the timer data structures have been corrupted,
+// presumably due to racy use by the program. We panic here rather than
+// panicing due to invalid slice access while holding locks.
+// See issue #25686.
+func badTimer() {
+ throw("timer data corruption")
+}
diff --git a/contrib/go/_std_1.18/src/runtime/time_linux_amd64.s b/contrib/go/_std_1.19/src/runtime/time_linux_amd64.s
index 1416d23230..1416d23230 100644
--- a/contrib/go/_std_1.18/src/runtime/time_linux_amd64.s
+++ b/contrib/go/_std_1.19/src/runtime/time_linux_amd64.s
diff --git a/contrib/go/_std_1.18/src/runtime/time_nofake.go b/contrib/go/_std_1.19/src/runtime/time_nofake.go
index 70a2102b22..70a2102b22 100644
--- a/contrib/go/_std_1.18/src/runtime/time_nofake.go
+++ b/contrib/go/_std_1.19/src/runtime/time_nofake.go
diff --git a/contrib/go/_std_1.18/src/runtime/timeasm.go b/contrib/go/_std_1.19/src/runtime/timeasm.go
index 0421388686..0421388686 100644
--- a/contrib/go/_std_1.18/src/runtime/timeasm.go
+++ b/contrib/go/_std_1.19/src/runtime/timeasm.go
diff --git a/contrib/go/_std_1.18/src/runtime/timestub.go b/contrib/go/_std_1.19/src/runtime/timestub.go
index 1d2926b43d..1d2926b43d 100644
--- a/contrib/go/_std_1.18/src/runtime/timestub.go
+++ b/contrib/go/_std_1.19/src/runtime/timestub.go
diff --git a/contrib/go/_std_1.18/src/runtime/tls_stub.go b/contrib/go/_std_1.19/src/runtime/tls_stub.go
index 7bdfc6b89a..7bdfc6b89a 100644
--- a/contrib/go/_std_1.18/src/runtime/tls_stub.go
+++ b/contrib/go/_std_1.19/src/runtime/tls_stub.go
diff --git a/contrib/go/_std_1.19/src/runtime/trace.go b/contrib/go/_std_1.19/src/runtime/trace.go
new file mode 100644
index 0000000000..10436d80c2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/trace.go
@@ -0,0 +1,1442 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go execution tracer.
+// The tracer captures a wide range of execution events like goroutine
+// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
+// changes of heap size, processor start/stop, etc and writes them to a buffer
+// in a compact form. A precise nanosecond-precision timestamp and a stack
+// trace is captured for most events.
+// See https://golang.org/s/go15trace for more info.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Event types in the trace, args are given in square brackets.
+const (
+ traceEvNone = 0 // unused
+ traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
+ traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
+ traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
+ traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
+ traceEvProcStart = 5 // start of P [timestamp, thread id]
+ traceEvProcStop = 6 // stop of P [timestamp]
+ traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
+ traceEvGCDone = 8 // GC done [timestamp]
+ traceEvGCSTWStart = 9 // GC STW start [timestamp, kind]
+ traceEvGCSTWDone = 10 // GC STW done [timestamp]
+ traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
+ traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
+ traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
+ traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
+ traceEvGoEnd = 15 // goroutine ends [timestamp]
+ traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
+ traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
+ traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
+ traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
+ traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
+ traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
+ traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
+ traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
+ traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
+ traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
+ traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
+ traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
+ traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
+ traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
+ traceEvGoSysBlock = 30 // syscall blocks [timestamp]
+ traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
+ traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
+ traceEvHeapAlloc = 33 // gcController.heapLive change [timestamp, heap_alloc]
+ traceEvHeapGoal = 34 // gcController.heapGoal() (formerly next_gc) change [timestamp, heap goal in bytes]
+ traceEvTimerGoroutine = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
+ traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
+ traceEvString = 37 // string dictionary entry [ID, length, string]
+ traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
+ traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
+ traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
+ traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
+ traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
+ traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
+ traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
+ traceEvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
+ traceEvUserTaskEnd = 46 // end of a task [timestamp, internal task id, stack]
+ traceEvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
+ traceEvUserLog = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
+ traceEvCPUSample = 49 // CPU profiling sample [timestamp, stack, real timestamp, real P id (-1 when absent), goroutine id]
+ traceEvCount = 50
+ // Byte is used but only 6 bits are available for event type.
+ // The remaining 2 bits are used to specify the number of arguments.
+ // That means, the max event type value is 63.
+)
+
+const (
+ // Timestamps in trace are cputicks/traceTickDiv.
+ // This makes absolute values of timestamp diffs smaller,
+ // and so they are encoded in less number of bytes.
+ // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
+ // The suggested increment frequency for PowerPC's time base register is
+ // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
+ // and ppc64le.
+ // Tracing won't work reliably for architectures where cputicks is emulated
+ // by nanotime, so the value doesn't matter for those architectures.
+ traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
+ // Maximum number of PCs in a single stack trace.
+ // Since events contain only stack id rather than whole stack trace,
+ // we can allow quite large values here.
+ traceStackSize = 128
+ // Identifier of a fake P that is used when we trace without a real P.
+ traceGlobProc = -1
+ // Maximum number of bytes to encode uint64 in base-128.
+ traceBytesPerNumber = 10
+ // Shift of the number of arguments in the first event byte.
+ traceArgCountShift = 6
+ // Flag passed to traceGoPark to denote that the previous wakeup of this
+ // goroutine was futile. For example, a goroutine was unblocked on a mutex,
+ // but another goroutine got ahead and acquired the mutex before the first
+ // goroutine is scheduled, so the first goroutine has to block again.
+ // Such wakeups happen on buffered channels and sync.Mutex,
+ // but are generally not interesting for end user.
+ traceFutileWakeup byte = 128
+)
+
+// trace is global tracing context.
+var trace struct {
+ lock mutex // protects the following members
+ lockOwner *g // to avoid deadlocks during recursive lock locks
+ enabled bool // when set runtime traces events
+ shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false
+ headerWritten bool // whether ReadTrace has emitted trace header
+ footerWritten bool // whether ReadTrace has emitted trace footer
+ shutdownSema uint32 // used to wait for ReadTrace completion
+ seqStart uint64 // sequence number when tracing was started
+ ticksStart int64 // cputicks when tracing was started
+ ticksEnd int64 // cputicks when tracing was stopped
+ timeStart int64 // nanotime when tracing was started
+ timeEnd int64 // nanotime when tracing was stopped
+ seqGC uint64 // GC start/done sequencer
+ reading traceBufPtr // buffer currently handed off to user
+ empty traceBufPtr // stack of empty buffers
+ fullHead traceBufPtr // queue of full buffers
+ fullTail traceBufPtr
+ reader guintptr // goroutine that called ReadTrace, or nil
+ stackTab traceStackTable // maps stack traces to unique ids
+ // cpuLogRead accepts CPU profile samples from the signal handler where
+ // they're generated. It uses a two-word header to hold the IDs of the P and
+ // G (respectively) that were active at the time of the sample. Because
+ // profBuf uses a record with all zeros in its header to indicate overflow,
+ // we make sure to make the P field always non-zero: The ID of a real P will
+ // start at bit 1, and bit 0 will be set. Samples that arrive while no P is
+ // running (such as near syscalls) will set the first header field to 0b10.
+ // This careful handling of the first header field allows us to store ID of
+ // the active G directly in the second field, even though that will be 0
+ // when sampling g0.
+ cpuLogRead *profBuf
+ // cpuLogBuf is a trace buffer to hold events corresponding to CPU profile
+ // samples, which arrive out of band and not directly connected to a
+ // specific P.
+ cpuLogBuf traceBufPtr
+
+ signalLock atomic.Uint32 // protects use of the following member, only usable in signal handlers
+ cpuLogWrite *profBuf // copy of cpuLogRead for use in signal handlers, set without signalLock
+
+ // Dictionary for traceEvString.
+ //
+ // TODO: central lock to access the map is not ideal.
+ // option: pre-assign ids to all user annotation region names and tags
+ // option: per-P cache
+ // option: sync.Map like data structure
+ stringsLock mutex
+ strings map[string]uint64
+ stringSeq uint64
+
+ // markWorkerLabels maps gcMarkWorkerMode to string ID.
+ markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
+
+ bufLock mutex // protects buf
+ buf traceBufPtr // global trace buffer, used when running without a p
+}
+
+// traceBufHeader is per-P tracing buffer.
+type traceBufHeader struct {
+ link traceBufPtr // in trace.empty/full
+ lastTicks uint64 // when we wrote the last event
+ pos int // next write offset in arr
+ stk [traceStackSize]uintptr // scratch buffer for traceback
+}
+
+// traceBuf is per-P tracing buffer.
+//
+//go:notinheap
+type traceBuf struct {
+ traceBufHeader
+ arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
+}
+
+// traceBufPtr is a *traceBuf that is not traced by the garbage
+// collector and doesn't have write barriers. traceBufs are not
+// allocated from the GC'd heap, so this is safe, and are often
+// manipulated in contexts where write barriers are not allowed, so
+// this is necessary.
+//
+// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
+type traceBufPtr uintptr
+
+func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
+func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
+func traceBufPtrOf(b *traceBuf) traceBufPtr {
+ return traceBufPtr(unsafe.Pointer(b))
+}
+
+// StartTrace enables tracing for the current process.
+// While tracing, the data will be buffered and available via ReadTrace.
+// StartTrace returns an error if tracing is already enabled.
+// Most clients should use the runtime/trace package or the testing package's
+// -test.trace flag instead of calling StartTrace directly.
+func StartTrace() error {
+ // Stop the world so that we can take a consistent snapshot
+ // of all goroutines at the beginning of the trace.
+ // Do not stop the world during GC so we ensure we always see
+ // a consistent view of GC-related events (e.g. a start is always
+ // paired with an end).
+ stopTheWorldGC("start tracing")
+
+ // Prevent sysmon from running any code that could generate events.
+ lock(&sched.sysmonlock)
+
+ // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
+ // Exitsyscall could check trace.enabled long before and then suddenly wake up
+ // and decide to write to trace at a random point in time.
+ // However, such syscall will use the global trace.buf buffer, because we've
+ // acquired all p's by doing stop-the-world. So this protects us from such races.
+ lock(&trace.bufLock)
+
+ if trace.enabled || trace.shutdown {
+ unlock(&trace.bufLock)
+ unlock(&sched.sysmonlock)
+ startTheWorldGC()
+ return errorString("tracing is already enabled")
+ }
+
+ // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
+ // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
+ // That would lead to an inconsistent trace:
+ // - either GoSysExit appears before EvGoInSyscall,
+ // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
+ // To instruct traceEvent that it must not ignore events below, we set startingtrace.
+ // trace.enabled is set afterwards once we have emitted all preliminary events.
+ _g_ := getg()
+ _g_.m.startingtrace = true
+
+ // Obtain current stack ID to use in all traceEvGoCreate events below.
+ mp := acquirem()
+ stkBuf := make([]uintptr, traceStackSize)
+ stackID := traceStackID(mp, stkBuf, 2)
+ releasem(mp)
+
+ profBuf := newProfBuf(2, profBufWordCount, profBufTagCount) // after the timestamp, header is [pp.id, gp.goid]
+ trace.cpuLogRead = profBuf
+
+ // We must not acquire trace.signalLock outside of a signal handler: a
+ // profiling signal may arrive at any time and try to acquire it, leading to
+ // deadlock. Because we can't use that lock to protect updates to
+ // trace.cpuLogWrite (only use of the structure it references), reads and
+ // writes of the pointer must be atomic. (And although this field is never
+ // the sole pointer to the profBuf value, it's best to allow a write barrier
+ // here.)
+ atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))
+
+ // World is stopped, no need to lock.
+ forEachGRace(func(gp *g) {
+ status := readgstatus(gp)
+ if status != _Gdead {
+ gp.traceseq = 0
+ gp.tracelastp = getg().m.p
+ // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
+ id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
+ traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
+ }
+ if status == _Gwaiting {
+ // traceEvGoWaiting is implied to have seq=1.
+ gp.traceseq++
+ traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
+ }
+ if status == _Gsyscall {
+ gp.traceseq++
+ traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
+ } else {
+ gp.sysblocktraced = false
+ }
+ })
+ traceProcStart()
+ traceGoStart()
+ // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
+ // If we do it the other way around, it is possible that exitsyscall will
+ // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
+ // It will lead to a false conclusion that cputicks is broken.
+ trace.ticksStart = cputicks()
+ trace.timeStart = nanotime()
+ trace.headerWritten = false
+ trace.footerWritten = false
+
+ // string to id mapping
+ // 0 : reserved for an empty string
+ // remaining: other strings registered by traceString
+ trace.stringSeq = 0
+ trace.strings = make(map[string]uint64)
+
+ trace.seqGC = 0
+ _g_.m.startingtrace = false
+ trace.enabled = true
+
+ // Register runtime goroutine labels.
+ _, pid, bufp := traceAcquireBuffer()
+ for i, label := range gcMarkWorkerModeStrings[:] {
+ trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
+ }
+ traceReleaseBuffer(pid)
+
+ unlock(&trace.bufLock)
+
+ unlock(&sched.sysmonlock)
+
+ startTheWorldGC()
+ return nil
+}
+
+// StopTrace stops tracing, if it was previously enabled.
+// StopTrace only returns after all the reads for the trace have completed.
+func StopTrace() {
+ // Stop the world so that we can collect the trace buffers from all p's below,
+ // and also to avoid races with traceEvent.
+ stopTheWorldGC("stop tracing")
+
+ // See the comment in StartTrace.
+ lock(&sched.sysmonlock)
+
+ // See the comment in StartTrace.
+ lock(&trace.bufLock)
+
+ if !trace.enabled {
+ unlock(&trace.bufLock)
+ unlock(&sched.sysmonlock)
+ startTheWorldGC()
+ return
+ }
+
+ traceGoSched()
+
+ atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
+ trace.cpuLogRead.close()
+ traceReadCPU()
+
+ // Loop over all allocated Ps because dead Ps may still have
+ // trace buffers.
+ for _, p := range allp[:cap(allp)] {
+ buf := p.tracebuf
+ if buf != 0 {
+ traceFullQueue(buf)
+ p.tracebuf = 0
+ }
+ }
+ if trace.buf != 0 {
+ buf := trace.buf
+ trace.buf = 0
+ if buf.ptr().pos != 0 {
+ traceFullQueue(buf)
+ }
+ }
+ if trace.cpuLogBuf != 0 {
+ buf := trace.cpuLogBuf
+ trace.cpuLogBuf = 0
+ if buf.ptr().pos != 0 {
+ traceFullQueue(buf)
+ }
+ }
+
+ for {
+ trace.ticksEnd = cputicks()
+ trace.timeEnd = nanotime()
+ // Windows time can tick only every 15ms, wait for at least one tick.
+ if trace.timeEnd != trace.timeStart {
+ break
+ }
+ osyield()
+ }
+
+ trace.enabled = false
+ trace.shutdown = true
+ unlock(&trace.bufLock)
+
+ unlock(&sched.sysmonlock)
+
+ startTheWorldGC()
+
+ // The world is started but we've set trace.shutdown, so new tracing can't start.
+ // Wait for the trace reader to flush pending buffers and stop.
+ semacquire(&trace.shutdownSema)
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&trace.shutdownSema))
+ }
+
+ // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
+ lock(&trace.lock)
+ for _, p := range allp[:cap(allp)] {
+ if p.tracebuf != 0 {
+ throw("trace: non-empty trace buffer in proc")
+ }
+ }
+ if trace.buf != 0 {
+ throw("trace: non-empty global trace buffer")
+ }
+ if trace.fullHead != 0 || trace.fullTail != 0 {
+ throw("trace: non-empty full trace buffer")
+ }
+ if trace.reading != 0 || trace.reader != 0 {
+ throw("trace: reading after shutdown")
+ }
+ for trace.empty != 0 {
+ buf := trace.empty
+ trace.empty = buf.ptr().link
+ sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
+ }
+ trace.strings = nil
+ trace.shutdown = false
+ trace.cpuLogRead = nil
+ unlock(&trace.lock)
+}
+
+// ReadTrace returns the next chunk of binary tracing data, blocking until data
+// is available. If tracing is turned off and all the data accumulated while it
+// was on has been returned, ReadTrace returns nil. The caller must copy the
+// returned data before calling ReadTrace again.
+// ReadTrace must be called from one goroutine at a time.
+func ReadTrace() []byte {
+ // This function may need to lock trace.lock recursively
+ // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
+ // To allow this we use trace.lockOwner.
+ // Also this function must not allocate while holding trace.lock:
+ // allocation can call heap allocate, which will try to emit a trace
+ // event while holding heap lock.
+ lock(&trace.lock)
+ trace.lockOwner = getg()
+
+ if trace.reader != 0 {
+ // More than one goroutine reads trace. This is bad.
+ // But we rather do not crash the program because of tracing,
+ // because tracing can be enabled at runtime on prod servers.
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ println("runtime: ReadTrace called from multiple goroutines simultaneously")
+ return nil
+ }
+ // Recycle the old buffer.
+ if buf := trace.reading; buf != 0 {
+ buf.ptr().link = trace.empty
+ trace.empty = buf
+ trace.reading = 0
+ }
+ // Write trace header.
+ if !trace.headerWritten {
+ trace.headerWritten = true
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ return []byte("go 1.19 trace\x00\x00\x00")
+ }
+ // Optimistically look for CPU profile samples. This may write new stack
+ // records, and may write new tracing buffers.
+ if !trace.footerWritten && !trace.shutdown {
+ traceReadCPU()
+ }
+ // Wait for new data.
+ if trace.fullHead == 0 && !trace.shutdown {
+ trace.reader.set(getg())
+ goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
+ lock(&trace.lock)
+ }
+ // Write a buffer.
+ if trace.fullHead != 0 {
+ buf := traceFullDequeue()
+ trace.reading = buf
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ return buf.ptr().arr[:buf.ptr().pos]
+ }
+
+ // Write footer with timer frequency.
+ if !trace.footerWritten {
+ trace.footerWritten = true
+ // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
+ freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
+ if freq <= 0 {
+ throw("trace: ReadTrace got invalid frequency")
+ }
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ var data []byte
+ data = append(data, traceEvFrequency|0<<traceArgCountShift)
+ data = traceAppend(data, uint64(freq))
+ // This will emit a bunch of full buffers, we will pick them up
+ // on the next iteration.
+ trace.stackTab.dump()
+ return data
+ }
+ // Done.
+ if trace.shutdown {
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ if raceenabled {
+ // Model synchronization on trace.shutdownSema, which race
+ // detector does not see. This is required to avoid false
+ // race reports on writer passed to trace.Start.
+ racerelease(unsafe.Pointer(&trace.shutdownSema))
+ }
+ // trace.enabled is already reset, so can call traceable functions.
+ semrelease(&trace.shutdownSema)
+ return nil
+ }
+ // Also bad, but see the comment above.
+ trace.lockOwner = nil
+ unlock(&trace.lock)
+ println("runtime: spurious wakeup of trace reader")
+ return nil
+}
+
+// traceReader returns the trace reader that should be woken up, if any.
+// Callers should first check that trace.enabled or trace.shutdown is set.
+func traceReader() *g {
+ if !traceReaderAvailable() {
+ return nil
+ }
+ lock(&trace.lock)
+ if !traceReaderAvailable() {
+ unlock(&trace.lock)
+ return nil
+ }
+ gp := trace.reader.ptr()
+ trace.reader.set(nil)
+ unlock(&trace.lock)
+ return gp
+}
+
+// traceReaderAvailable returns true if the trace reader is not currently
+// scheduled and should be. Callers should first check that trace.enabled
+// or trace.shutdown is set.
+func traceReaderAvailable() bool {
+ return trace.reader != 0 && (trace.fullHead != 0 || trace.shutdown)
+}
+
+// traceProcFree frees trace buffer associated with pp.
+func traceProcFree(pp *p) {
+ buf := pp.tracebuf
+ pp.tracebuf = 0
+ if buf == 0 {
+ return
+ }
+ lock(&trace.lock)
+ traceFullQueue(buf)
+ unlock(&trace.lock)
+}
+
+// traceFullQueue queues buf into queue of full buffers.
+func traceFullQueue(buf traceBufPtr) {
+ buf.ptr().link = 0
+ if trace.fullHead == 0 {
+ trace.fullHead = buf
+ } else {
+ trace.fullTail.ptr().link = buf
+ }
+ trace.fullTail = buf
+}
+
+// traceFullDequeue dequeues from queue of full buffers.
+func traceFullDequeue() traceBufPtr {
+ buf := trace.fullHead
+ if buf == 0 {
+ return 0
+ }
+ trace.fullHead = buf.ptr().link
+ if trace.fullHead == 0 {
+ trace.fullTail = 0
+ }
+ buf.ptr().link = 0
+ return buf
+}
+
+// traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
+// ev is event type.
+// If skip > 0, write current stack id as the last argument (skipping skip top frames).
+// If skip = 0, this event type should contain a stack, but we don't want
+// to collect and remember it for this particular call.
+func traceEvent(ev byte, skip int, args ...uint64) {
+ mp, pid, bufp := traceAcquireBuffer()
+ // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
+ // This protects from races between traceEvent and StartTrace/StopTrace.
+
+ // The caller checked that trace.enabled == true, but trace.enabled might have been
+ // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
+ // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
+ // so if we see trace.enabled == true now, we know it's true for the rest of the function.
+ // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
+ // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
+ //
+ // Note trace_userTaskCreate runs the same check.
+ if !trace.enabled && !mp.startingtrace {
+ traceReleaseBuffer(pid)
+ return
+ }
+
+ if skip > 0 {
+ if getg() == mp.curg {
+ skip++ // +1 because stack is captured in traceEventLocked.
+ }
+ }
+ traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
+ traceReleaseBuffer(pid)
+}
+
+// traceEventLocked writes a single event of type ev to the trace buffer bufp,
+// flushing the buffer if necessary. pid is the id of the current P, or
+// traceGlobProc if we're tracing without a real P.
+//
+// Preemption is disabled, and if running without a real P the global tracing
+// buffer is locked.
+//
+// Events types that do not include a stack set skip to -1. Event types that
+// include a stack may explicitly reference a stackID from the trace.stackTab
+// (obtained by an earlier call to traceStackID). Without an explicit stackID,
+// this function will automatically capture the stack of the goroutine currently
+// running on mp, skipping skip top frames or, if skip is 0, writing out an
+// empty stack record.
+//
+// It records the event's args to the traceBuf, and also makes an effort to
+// reserve extraBytes bytes of additional space immediately following the event,
+// in the same traceBuf.
+func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
+ buf := bufp.ptr()
+ // TODO: test on non-zero extraBytes param.
+ maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
+ if buf == nil || len(buf.arr)-buf.pos < maxSize {
+ buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
+ bufp.set(buf)
+ }
+
+ // NOTE: ticks might be same after tick division, although the real cputicks is
+ // linear growth.
+ ticks := uint64(cputicks()) / traceTickDiv
+ tickDiff := ticks - buf.lastTicks
+ if tickDiff == 0 {
+ ticks = buf.lastTicks + 1
+ tickDiff = 1
+ }
+
+ buf.lastTicks = ticks
+ narg := byte(len(args))
+ if stackID != 0 || skip >= 0 {
+ narg++
+ }
+ // We have only 2 bits for number of arguments.
+ // If number is >= 3, then the event type is followed by event length in bytes.
+ if narg > 3 {
+ narg = 3
+ }
+ startPos := buf.pos
+ buf.byte(ev | narg<<traceArgCountShift)
+ var lenp *byte
+ if narg == 3 {
+ // Reserve the byte for length assuming that length < 128.
+ buf.varint(0)
+ lenp = &buf.arr[buf.pos-1]
+ }
+ buf.varint(tickDiff)
+ for _, a := range args {
+ buf.varint(a)
+ }
+ if stackID != 0 {
+ buf.varint(uint64(stackID))
+ } else if skip == 0 {
+ buf.varint(0)
+ } else if skip > 0 {
+ buf.varint(traceStackID(mp, buf.stk[:], skip))
+ }
+ evSize := buf.pos - startPos
+ if evSize > maxSize {
+ throw("invalid length of trace event")
+ }
+ if lenp != nil {
+ // Fill in actual length.
+ *lenp = byte(evSize - 2)
+ }
+}
+
+// traceCPUSample writes a CPU profile sample stack to the execution tracer's
+// profiling buffer. It is called from a signal handler, so is limited in what
+// it can do.
+func traceCPUSample(gp *g, pp *p, stk []uintptr) {
+ if !trace.enabled {
+ // Tracing is usually turned off; don't spend time acquiring the signal
+ // lock unless it's active.
+ return
+ }
+
+ // Match the clock used in traceEventLocked
+ now := cputicks()
+ // The "header" here is the ID of the P that was running the profiled code,
+ // followed by the ID of the goroutine. (For normal CPU profiling, it's
+ // usually the number of samples with the given stack.) Near syscalls, pp
+ // may be nil. Reporting goid of 0 is fine for either g0 or a nil gp.
+ var hdr [2]uint64
+ if pp != nil {
+ // Overflow records in profBuf have all header values set to zero. Make
+ // sure that real headers have at least one bit set.
+ hdr[0] = uint64(pp.id)<<1 | 0b1
+ } else {
+ hdr[0] = 0b10
+ }
+ if gp != nil {
+ hdr[1] = uint64(gp.goid)
+ }
+
+ // Allow only one writer at a time
+ for !trace.signalLock.CompareAndSwap(0, 1) {
+ // TODO: Is it safe to osyield here? https://go.dev/issue/52672
+ osyield()
+ }
+
+ if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
+ // Note: we don't pass a tag pointer here (how should profiling tags
+ // interact with the execution tracer?), but if we did we'd need to be
+ // careful about write barriers. See the long comment in profBuf.write.
+ log.write(nil, now, hdr[:], stk)
+ }
+
+ trace.signalLock.Store(0)
+}
+
+func traceReadCPU() {
+ bufp := &trace.cpuLogBuf
+
+ for {
+ data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
+ if len(data) == 0 {
+ break
+ }
+ for len(data) > 0 {
+ if len(data) < 4 || data[0] > uint64(len(data)) {
+ break // truncated profile
+ }
+ if data[0] < 4 || tags != nil && len(tags) < 1 {
+ break // malformed profile
+ }
+ if len(tags) < 1 {
+ break // mismatched profile records and tags
+ }
+ timestamp := data[1]
+ ppid := data[2] >> 1
+ if hasP := (data[2] & 0b1) != 0; !hasP {
+ ppid = ^uint64(0)
+ }
+ goid := data[3]
+ stk := data[4:data[0]]
+ empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
+ data = data[data[0]:]
+ // No support here for reporting goroutine tags at the moment; if
+ // that information is to be part of the execution trace, we'd
+ // probably want to see when the tags are applied and when they
+ // change, instead of only seeing them when we get a CPU sample.
+ tags = tags[1:]
+
+ if empty {
+ // Looks like an overflow record from the profBuf. Not much to
+ // do here, we only want to report full records.
+ //
+ // TODO: should we start a goroutine to drain the profBuf,
+ // rather than relying on a high-enough volume of tracing events
+ // to keep ReadTrace busy? https://go.dev/issue/52674
+ continue
+ }
+
+ buf := bufp.ptr()
+ if buf == nil {
+ *bufp = traceFlush(*bufp, 0)
+ buf = bufp.ptr()
+ }
+ for i := range stk {
+ if i >= len(buf.stk) {
+ break
+ }
+ buf.stk[i] = uintptr(stk[i])
+ }
+ stackID := trace.stackTab.put(buf.stk[:len(stk)])
+
+ traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp/traceTickDiv, ppid, goid)
+ }
+ }
+}
+
+func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
+ _g_ := getg()
+ gp := mp.curg
+ var nstk int
+ if gp == _g_ {
+ nstk = callers(skip+1, buf)
+ } else if gp != nil {
+ gp = mp.curg
+ nstk = gcallers(gp, skip, buf)
+ }
+ if nstk > 0 {
+ nstk-- // skip runtime.goexit
+ }
+ if nstk > 0 && gp.goid == 1 {
+ nstk-- // skip runtime.main
+ }
+ id := trace.stackTab.put(buf[:nstk])
+ return uint64(id)
+}
+
+// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
+func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
+ mp = acquirem()
+ if p := mp.p.ptr(); p != nil {
+ return mp, p.id, &p.tracebuf
+ }
+ lock(&trace.bufLock)
+ return mp, traceGlobProc, &trace.buf
+}
+
+// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
+func traceReleaseBuffer(pid int32) {
+ if pid == traceGlobProc {
+ unlock(&trace.bufLock)
+ }
+ releasem(getg().m)
+}
+
+// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
+func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
+ owner := trace.lockOwner
+ dolock := owner == nil || owner != getg().m.curg
+ if dolock {
+ lock(&trace.lock)
+ }
+ if buf != 0 {
+ traceFullQueue(buf)
+ }
+ if trace.empty != 0 {
+ buf = trace.empty
+ trace.empty = buf.ptr().link
+ } else {
+ buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
+ if buf == 0 {
+ throw("trace: out of memory")
+ }
+ }
+ bufp := buf.ptr()
+ bufp.link.set(nil)
+ bufp.pos = 0
+
+ // initialize the buffer for a new batch
+ ticks := uint64(cputicks()) / traceTickDiv
+ if ticks == bufp.lastTicks {
+ ticks = bufp.lastTicks + 1
+ }
+ bufp.lastTicks = ticks
+ bufp.byte(traceEvBatch | 1<<traceArgCountShift)
+ bufp.varint(uint64(pid))
+ bufp.varint(ticks)
+
+ if dolock {
+ unlock(&trace.lock)
+ }
+ return buf
+}
+
+// traceString adds a string to the trace.strings and returns the id.
+func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
+ if s == "" {
+ return 0, bufp
+ }
+
+ lock(&trace.stringsLock)
+ if raceenabled {
+ // raceacquire is necessary because the map access
+ // below is race annotated.
+ raceacquire(unsafe.Pointer(&trace.stringsLock))
+ }
+
+ if id, ok := trace.strings[s]; ok {
+ if raceenabled {
+ racerelease(unsafe.Pointer(&trace.stringsLock))
+ }
+ unlock(&trace.stringsLock)
+
+ return id, bufp
+ }
+
+ trace.stringSeq++
+ id := trace.stringSeq
+ trace.strings[s] = id
+
+ if raceenabled {
+ racerelease(unsafe.Pointer(&trace.stringsLock))
+ }
+ unlock(&trace.stringsLock)
+
+ // memory allocation in above may trigger tracing and
+ // cause *bufp changes. Following code now works with *bufp,
+ // so there must be no memory allocation or any activities
+ // that causes tracing after this point.
+
+ buf := bufp.ptr()
+ size := 1 + 2*traceBytesPerNumber + len(s)
+ if buf == nil || len(buf.arr)-buf.pos < size {
+ buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
+ bufp.set(buf)
+ }
+ buf.byte(traceEvString)
+ buf.varint(id)
+
+ // double-check the string and the length can fit.
+ // Otherwise, truncate the string.
+ slen := len(s)
+ if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
+ slen = room
+ }
+
+ buf.varint(uint64(slen))
+ buf.pos += copy(buf.arr[buf.pos:], s[:slen])
+
+ bufp.set(buf)
+ return id, bufp
+}
+
+// traceAppend appends v to buf in little-endian-base-128 encoding.
+func traceAppend(buf []byte, v uint64) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ buf = append(buf, 0x80|byte(v))
+ }
+ buf = append(buf, byte(v))
+ return buf
+}
+
+// varint appends v to buf in little-endian-base-128 encoding.
+func (buf *traceBuf) varint(v uint64) {
+ pos := buf.pos
+ for ; v >= 0x80; v >>= 7 {
+ buf.arr[pos] = 0x80 | byte(v)
+ pos++
+ }
+ buf.arr[pos] = byte(v)
+ pos++
+ buf.pos = pos
+}
+
+// byte appends v to buf.
+func (buf *traceBuf) byte(v byte) {
+ buf.arr[buf.pos] = v
+ buf.pos++
+}
+
+// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
+// It is lock-free for reading.
+type traceStackTable struct {
+ lock mutex
+ seq uint32
+ mem traceAlloc
+ tab [1 << 13]traceStackPtr
+}
+
+// traceStack is a single stack in traceStackTable.
+type traceStack struct {
+ link traceStackPtr
+ hash uintptr
+ id uint32
+ n int
+ stk [0]uintptr // real type [n]uintptr
+}
+
+type traceStackPtr uintptr
+
+func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
+
+// stack returns slice of PCs.
+func (ts *traceStack) stack() []uintptr {
+ return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
+}
+
+// put returns a unique id for the stack trace pcs and caches it in the table,
+// if it sees the trace for the first time.
+func (tab *traceStackTable) put(pcs []uintptr) uint32 {
+ if len(pcs) == 0 {
+ return 0
+ }
+ hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
+ // First, search the hashtable w/o the mutex.
+ if id := tab.find(pcs, hash); id != 0 {
+ return id
+ }
+ // Now, double check under the mutex.
+ lock(&tab.lock)
+ if id := tab.find(pcs, hash); id != 0 {
+ unlock(&tab.lock)
+ return id
+ }
+ // Create new record.
+ tab.seq++
+ stk := tab.newStack(len(pcs))
+ stk.hash = hash
+ stk.id = tab.seq
+ stk.n = len(pcs)
+ stkpc := stk.stack()
+ for i, pc := range pcs {
+ stkpc[i] = pc
+ }
+ part := int(hash % uintptr(len(tab.tab)))
+ stk.link = tab.tab[part]
+ atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
+ unlock(&tab.lock)
+ return stk.id
+}
+
+// find checks if the stack trace pcs is already present in the table.
+func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
+ part := int(hash % uintptr(len(tab.tab)))
+Search:
+ for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
+ if stk.hash == hash && stk.n == len(pcs) {
+ for i, stkpc := range stk.stack() {
+ if stkpc != pcs[i] {
+ continue Search
+ }
+ }
+ return stk.id
+ }
+ }
+ return 0
+}
+
+// newStack allocates a new stack of size n.
+func (tab *traceStackTable) newStack(n int) *traceStack {
+ return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
+}
+
+// allFrames returns all of the Frames corresponding to pcs.
+func allFrames(pcs []uintptr) []Frame {
+ frames := make([]Frame, 0, len(pcs))
+ ci := CallersFrames(pcs)
+ for {
+ f, more := ci.Next()
+ frames = append(frames, f)
+ if !more {
+ return frames
+ }
+ }
+}
+
+// dump writes all previously cached stacks to trace buffers,
+// releases all memory and resets state.
+func (tab *traceStackTable) dump() {
+ var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
+ bufp := traceFlush(0, 0)
+ for _, stk := range tab.tab {
+ stk := stk.ptr()
+ for ; stk != nil; stk = stk.link.ptr() {
+ tmpbuf := tmp[:0]
+ tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
+ frames := allFrames(stk.stack())
+ tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
+ for _, f := range frames {
+ var frame traceFrame
+ frame, bufp = traceFrameForPC(bufp, 0, f)
+ tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
+ tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
+ tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
+ tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
+ }
+ // Now copy to the buffer.
+ size := 1 + traceBytesPerNumber + len(tmpbuf)
+ if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
+ bufp = traceFlush(bufp, 0)
+ }
+ buf := bufp.ptr()
+ buf.byte(traceEvStack | 3<<traceArgCountShift)
+ buf.varint(uint64(len(tmpbuf)))
+ buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
+ }
+ }
+
+ lock(&trace.lock)
+ traceFullQueue(bufp)
+ unlock(&trace.lock)
+
+ tab.mem.drop()
+ *tab = traceStackTable{}
+ lockInit(&((*tab).lock), lockRankTraceStackTab)
+}
+
+type traceFrame struct {
+ funcID uint64
+ fileID uint64
+ line uint64
+}
+
+// traceFrameForPC records the frame information.
+// It may allocate memory.
+func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
+ bufp := &buf
+ var frame traceFrame
+
+ fn := f.Function
+ const maxLen = 1 << 10
+ if len(fn) > maxLen {
+ fn = fn[len(fn)-maxLen:]
+ }
+ frame.funcID, bufp = traceString(bufp, pid, fn)
+ frame.line = uint64(f.Line)
+ file := f.File
+ if len(file) > maxLen {
+ file = file[len(file)-maxLen:]
+ }
+ frame.fileID, bufp = traceString(bufp, pid, file)
+ return frame, (*bufp)
+}
+
+// traceAlloc is a non-thread-safe region allocator.
+// It holds a linked list of traceAllocBlock.
+type traceAlloc struct {
+ head traceAllocBlockPtr
+ off uintptr
+}
+
+// traceAllocBlock is a block in traceAlloc.
+//
+// traceAllocBlock is allocated from non-GC'd memory, so it must not
+// contain heap pointers. Writes to pointers to traceAllocBlocks do
+// not need write barriers.
+//
+//go:notinheap
+type traceAllocBlock struct {
+ next traceAllocBlockPtr
+ data [64<<10 - goarch.PtrSize]byte
+}
+
+// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
+type traceAllocBlockPtr uintptr
+
+func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
+func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
+
+// alloc allocates n-byte block.
+func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
+ n = alignUp(n, goarch.PtrSize)
+ if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
+ if n > uintptr(len(a.head.ptr().data)) {
+ throw("trace: alloc too large")
+ }
+ block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
+ if block == nil {
+ throw("trace: out of memory")
+ }
+ block.next.set(a.head.ptr())
+ a.head.set(block)
+ a.off = 0
+ }
+ p := &a.head.ptr().data[a.off]
+ a.off += n
+ return unsafe.Pointer(p)
+}
+
+// drop frees all previously allocated memory and resets the allocator.
+func (a *traceAlloc) drop() {
+ for a.head != 0 {
+ block := a.head.ptr()
+ a.head.set(block.next.ptr())
+ sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
+ }
+}
+
+// The following functions write specific events to trace.
+
+func traceGomaxprocs(procs int32) {
+ traceEvent(traceEvGomaxprocs, 1, uint64(procs))
+}
+
+func traceProcStart() {
+ traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
+}
+
+func traceProcStop(pp *p) {
+ // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
+ // to handle this we temporary employ the P.
+ mp := acquirem()
+ oldp := mp.p
+ mp.p.set(pp)
+ traceEvent(traceEvProcStop, -1)
+ mp.p = oldp
+ releasem(mp)
+}
+
+func traceGCStart() {
+ traceEvent(traceEvGCStart, 3, trace.seqGC)
+ trace.seqGC++
+}
+
+func traceGCDone() {
+ traceEvent(traceEvGCDone, -1)
+}
+
+func traceGCSTWStart(kind int) {
+ traceEvent(traceEvGCSTWStart, -1, uint64(kind))
+}
+
+func traceGCSTWDone() {
+ traceEvent(traceEvGCSTWDone, -1)
+}
+
+// traceGCSweepStart prepares to trace a sweep loop. This does not
+// emit any events until traceGCSweepSpan is called.
+//
+// traceGCSweepStart must be paired with traceGCSweepDone and there
+// must be no preemption points between these two calls.
+func traceGCSweepStart() {
+ // Delay the actual GCSweepStart event until the first span
+ // sweep. If we don't sweep anything, don't emit any events.
+ _p_ := getg().m.p.ptr()
+ if _p_.traceSweep {
+ throw("double traceGCSweepStart")
+ }
+ _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
+}
+
+// traceGCSweepSpan traces the sweep of a single page.
+//
+// This may be called outside a traceGCSweepStart/traceGCSweepDone
+// pair; however, it will not emit any trace events in this case.
+func traceGCSweepSpan(bytesSwept uintptr) {
+ _p_ := getg().m.p.ptr()
+ if _p_.traceSweep {
+ if _p_.traceSwept == 0 {
+ traceEvent(traceEvGCSweepStart, 1)
+ }
+ _p_.traceSwept += bytesSwept
+ }
+}
+
+func traceGCSweepDone() {
+ _p_ := getg().m.p.ptr()
+ if !_p_.traceSweep {
+ throw("missing traceGCSweepStart")
+ }
+ if _p_.traceSwept != 0 {
+ traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
+ }
+ _p_.traceSweep = false
+}
+
+func traceGCMarkAssistStart() {
+ traceEvent(traceEvGCMarkAssistStart, 1)
+}
+
+func traceGCMarkAssistDone() {
+ traceEvent(traceEvGCMarkAssistDone, -1)
+}
+
+func traceGoCreate(newg *g, pc uintptr) {
+ newg.traceseq = 0
+ newg.tracelastp = getg().m.p
+ // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
+ id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
+ traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
+}
+
+func traceGoStart() {
+ _g_ := getg().m.curg
+ _p_ := _g_.m.p
+ _g_.traceseq++
+ if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
+ traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
+ } else if _g_.tracelastp == _p_ {
+ traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
+ } else {
+ _g_.tracelastp = _p_
+ traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
+ }
+}
+
+func traceGoEnd() {
+ traceEvent(traceEvGoEnd, -1)
+}
+
+func traceGoSched() {
+ _g_ := getg()
+ _g_.tracelastp = _g_.m.p
+ traceEvent(traceEvGoSched, 1)
+}
+
+func traceGoPreempt() {
+ _g_ := getg()
+ _g_.tracelastp = _g_.m.p
+ traceEvent(traceEvGoPreempt, 1)
+}
+
+func traceGoPark(traceEv byte, skip int) {
+ if traceEv&traceFutileWakeup != 0 {
+ traceEvent(traceEvFutileWakeup, -1)
+ }
+ traceEvent(traceEv & ^traceFutileWakeup, skip)
+}
+
+func traceGoUnpark(gp *g, skip int) {
+ _p_ := getg().m.p
+ gp.traceseq++
+ if gp.tracelastp == _p_ {
+ traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
+ } else {
+ gp.tracelastp = _p_
+ traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
+ }
+}
+
+func traceGoSysCall() {
+ traceEvent(traceEvGoSysCall, 1)
+}
+
+func traceGoSysExit(ts int64) {
+ if ts != 0 && ts < trace.ticksStart {
+ // There is a race between the code that initializes sysexitticks
+ // (in exitsyscall, which runs without a P, and therefore is not
+ // stopped with the rest of the world) and the code that initializes
+ // a new trace. The recorded sysexitticks must therefore be treated
+ // as "best effort". If they are valid for this trace, then great,
+ // use them for greater accuracy. But if they're not valid for this
+ // trace, assume that the trace was started after the actual syscall
+ // exit (but before we actually managed to start the goroutine,
+ // aka right now), and assign a fresh time stamp to keep the log consistent.
+ ts = 0
+ }
+ _g_ := getg().m.curg
+ _g_.traceseq++
+ _g_.tracelastp = _g_.m.p
+ traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
+}
+
+func traceGoSysBlock(pp *p) {
+ // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
+ // to handle this we temporary employ the P.
+ mp := acquirem()
+ oldp := mp.p
+ mp.p.set(pp)
+ traceEvent(traceEvGoSysBlock, -1)
+ mp.p = oldp
+ releasem(mp)
+}
+
+func traceHeapAlloc() {
+ traceEvent(traceEvHeapAlloc, -1, gcController.heapLive)
+}
+
+func traceHeapGoal() {
+ heapGoal := gcController.heapGoal()
+ if heapGoal == ^uint64(0) {
+ // Heap-based triggering is disabled.
+ traceEvent(traceEvHeapGoal, -1, 0)
+ } else {
+ traceEvent(traceEvHeapGoal, -1, heapGoal)
+ }
+}
+
+// To access runtime functions from runtime/trace.
+// See runtime/trace/annotation.go
+
+//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
+func trace_userTaskCreate(id, parentID uint64, taskType string) {
+ if !trace.enabled {
+ return
+ }
+
+ // Same as in traceEvent.
+ mp, pid, bufp := traceAcquireBuffer()
+ if !trace.enabled && !mp.startingtrace {
+ traceReleaseBuffer(pid)
+ return
+ }
+
+ typeStringID, bufp := traceString(bufp, pid, taskType)
+ traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
+ traceReleaseBuffer(pid)
+}
+
+//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
+func trace_userTaskEnd(id uint64) {
+ traceEvent(traceEvUserTaskEnd, 2, id)
+}
+
+//go:linkname trace_userRegion runtime/trace.userRegion
+func trace_userRegion(id, mode uint64, name string) {
+ if !trace.enabled {
+ return
+ }
+
+ mp, pid, bufp := traceAcquireBuffer()
+ if !trace.enabled && !mp.startingtrace {
+ traceReleaseBuffer(pid)
+ return
+ }
+
+ nameStringID, bufp := traceString(bufp, pid, name)
+ traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
+ traceReleaseBuffer(pid)
+}
+
+//go:linkname trace_userLog runtime/trace.userLog
+func trace_userLog(id uint64, category, message string) {
+ if !trace.enabled {
+ return
+ }
+
+ mp, pid, bufp := traceAcquireBuffer()
+ if !trace.enabled && !mp.startingtrace {
+ traceReleaseBuffer(pid)
+ return
+ }
+
+ categoryID, bufp := traceString(bufp, pid, category)
+
+ extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string
+ traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
+ // traceEventLocked reserved extra space for val and len(val)
+ // in buf, so buf now has room for the following.
+ buf := bufp.ptr()
+
+ // double-check the message and its length can fit.
+ // Otherwise, truncate the message.
+ slen := len(message)
+ if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
+ slen = room
+ }
+ buf.varint(uint64(slen))
+ buf.pos += copy(buf.arr[buf.pos:], message[:slen])
+
+ traceReleaseBuffer(pid)
+}
+
+// the start PC of a goroutine for tracing purposes. If pc is a wrapper,
+// it returns the PC of the wrapped function. Otherwise it returns pc.
+func startPCforTrace(pc uintptr) uintptr {
+ f := findfunc(pc)
+ if !f.valid() {
+ return pc // should not happen, but don't care
+ }
+ w := funcdata(f, _FUNCDATA_WrapInfo)
+ if w == nil {
+ return pc // not a wrapper
+ }
+ return f.datap.textAddr(*(*uint32)(w))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/traceback.go b/contrib/go/_std_1.19/src/runtime/traceback.go
new file mode 100644
index 0000000000..49147ff838
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/traceback.go
@@ -0,0 +1,1447 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/bytealg"
+ "internal/goarch"
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// The code in this file implements stack trace walking for all architectures.
+// The most important fact about a given architecture is whether it uses a link register.
+// On systems with link registers, the prologue for a non-leaf function stores the
+// incoming value of LR at the bottom of the newly allocated stack frame.
+// On systems without link registers (x86), the architecture pushes a return PC during
+// the call instruction, so the return PC ends up above the stack frame.
+// In this file, the return PC is always called LR, no matter how it was found.
+
+const usesLR = sys.MinFrameSize > 0
+
+// Generic traceback. Handles runtime stack prints (pcbuf == nil),
+// the runtime.Callers function (pcbuf != nil), as well as the garbage
+// collector (callback != nil). A little clunky to merge these, but avoids
+// duplicating the code and all its subtlety.
+//
+// The skip argument is only valid with pcbuf != nil and counts the number
+// of logical frames to skip rather than physical frames (with inlining, a
+// PC in pcbuf can represent multiple calls).
+func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int {
+ if skip > 0 && callback != nil {
+ throw("gentraceback callback cannot be used with non-zero skip")
+ }
+
+ // Don't call this "g"; it's too easy get "g" and "gp" confused.
+ if ourg := getg(); ourg == gp && ourg == ourg.m.curg {
+ // The starting sp has been passed in as a uintptr, and the caller may
+ // have other uintptr-typed stack references as well.
+ // If during one of the calls that got us here or during one of the
+ // callbacks below the stack must be grown, all these uintptr references
+ // to the stack will not be updated, and gentraceback will continue
+ // to inspect the old stack memory, which may no longer be valid.
+ // Even if all the variables were updated correctly, it is not clear that
+ // we want to expose a traceback that begins on one stack and ends
+ // on another stack. That could confuse callers quite a bit.
+ // Instead, we require that gentraceback and any other function that
+ // accepts an sp for the current goroutine (typically obtained by
+ // calling getcallersp) must not run on that goroutine's stack but
+ // instead on the g0 stack.
+ throw("gentraceback cannot trace user goroutine on its own stack")
+ }
+ level, _, _ := gotraceback()
+
+ var ctxt *funcval // Context pointer for unstarted goroutines. See issue #25897.
+
+ if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
+ if gp.syscallsp != 0 {
+ pc0 = gp.syscallpc
+ sp0 = gp.syscallsp
+ if usesLR {
+ lr0 = 0
+ }
+ } else {
+ pc0 = gp.sched.pc
+ sp0 = gp.sched.sp
+ if usesLR {
+ lr0 = gp.sched.lr
+ }
+ ctxt = (*funcval)(gp.sched.ctxt)
+ }
+ }
+
+ nprint := 0
+ var frame stkframe
+ frame.pc = pc0
+ frame.sp = sp0
+ if usesLR {
+ frame.lr = lr0
+ }
+ waspanic := false
+ cgoCtxt := gp.cgoCtxt
+ stack := gp.stack
+ printing := pcbuf == nil && callback == nil
+
+ // If the PC is zero, it's likely a nil function call.
+ // Start in the caller's frame.
+ if frame.pc == 0 {
+ if usesLR {
+ frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
+ frame.lr = 0
+ } else {
+ frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
+ frame.sp += goarch.PtrSize
+ }
+ }
+
+ // runtime/internal/atomic functions call into kernel helpers on
+ // arm < 7. See runtime/internal/atomic/sys_linux_arm.s.
+ //
+ // Start in the caller's frame.
+ if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && frame.pc&0xffff0000 == 0xffff0000 {
+ // Note that the calls are simple BL without pushing the return
+ // address, so we use LR directly.
+ //
+ // The kernel helpers are frameless leaf functions, so SP and
+ // LR are not touched.
+ frame.pc = frame.lr
+ frame.lr = 0
+ }
+
+ f := findfunc(frame.pc)
+ if !f.valid() {
+ if callback != nil || printing {
+ print("runtime: g ", gp.goid, ": unknown pc ", hex(frame.pc), "\n")
+ tracebackHexdump(stack, &frame, 0)
+ }
+ if callback != nil {
+ throw("unknown pc")
+ }
+ return 0
+ }
+ frame.fn = f
+
+ var cache pcvalueCache
+
+ lastFuncID := funcID_normal
+ n := 0
+ for n < max {
+ // Typically:
+ // pc is the PC of the running function.
+ // sp is the stack pointer at that program counter.
+ // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown.
+ // stk is the stack containing sp.
+ // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
+ f = frame.fn
+ if f.pcsp == 0 {
+ // No frame information, must be external function, like race support.
+ // See golang.org/issue/13568.
+ break
+ }
+
+ // Compute function info flags.
+ flag := f.flag
+ if f.funcID == funcID_cgocallback {
+ // cgocallback does write SP to switch from the g0 to the curg stack,
+ // but it carefully arranges that during the transition BOTH stacks
+ // have cgocallback frame valid for unwinding through.
+ // So we don't need to exclude it with the other SP-writing functions.
+ flag &^= funcFlag_SPWRITE
+ }
+ if frame.pc == pc0 && frame.sp == sp0 && pc0 == gp.syscallpc && sp0 == gp.syscallsp {
+ // Some Syscall functions write to SP, but they do so only after
+ // saving the entry PC/SP using entersyscall.
+ // Since we are using the entry PC/SP, the later SP write doesn't matter.
+ flag &^= funcFlag_SPWRITE
+ }
+
+ // Found an actual function.
+ // Derive frame pointer and link register.
+ if frame.fp == 0 {
+ // Jump over system stack transitions. If we're on g0 and there's a user
+ // goroutine, try to jump. Otherwise this is a regular call.
+ if flags&_TraceJumpStack != 0 && gp == gp.m.g0 && gp.m.curg != nil {
+ switch f.funcID {
+ case funcID_morestack:
+ // morestack does not return normally -- newstack()
+ // gogo's to curg.sched. Match that.
+ // This keeps morestack() from showing up in the backtrace,
+ // but that makes some sense since it'll never be returned
+ // to.
+ frame.pc = gp.m.curg.sched.pc
+ frame.fn = findfunc(frame.pc)
+ f = frame.fn
+ flag = f.flag
+ frame.lr = gp.m.curg.sched.lr
+ frame.sp = gp.m.curg.sched.sp
+ stack = gp.m.curg.stack
+ cgoCtxt = gp.m.curg.cgoCtxt
+ case funcID_systemstack:
+ // systemstack returns normally, so just follow the
+ // stack transition.
+ frame.sp = gp.m.curg.sched.sp
+ stack = gp.m.curg.stack
+ cgoCtxt = gp.m.curg.cgoCtxt
+ flag &^= funcFlag_SPWRITE
+ }
+ }
+ frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache))
+ if !usesLR {
+ // On x86, call instruction pushes return PC before entering new function.
+ frame.fp += goarch.PtrSize
+ }
+ }
+ var flr funcInfo
+ if flag&funcFlag_TOPFRAME != 0 {
+ // This function marks the top of the stack. Stop the traceback.
+ frame.lr = 0
+ flr = funcInfo{}
+ } else if flag&funcFlag_SPWRITE != 0 && (callback == nil || n > 0) {
+ // The function we are in does a write to SP that we don't know
+ // how to encode in the spdelta table. Examples include context
+ // switch routines like runtime.gogo but also any code that switches
+ // to the g0 stack to run host C code. Since we can't reliably unwind
+ // the SP (we might not even be on the stack we think we are),
+ // we stop the traceback here.
+ // This only applies for profiling signals (callback == nil).
+ //
+ // For a GC stack traversal (callback != nil), we should only see
+ // a function when it has voluntarily preempted itself on entry
+ // during the stack growth check. In that case, the function has
+ // not yet had a chance to do any writes to SP and is safe to unwind.
+ // isAsyncSafePoint does not allow assembly functions to be async preempted,
+ // and preemptPark double-checks that SPWRITE functions are not async preempted.
+ // So for GC stack traversal we leave things alone (this if body does not execute for n == 0)
+ // at the bottom frame of the stack. But farther up the stack we'd better not
+ // find any.
+ if callback != nil {
+ println("traceback: unexpected SPWRITE function", funcname(f))
+ throw("traceback")
+ }
+ frame.lr = 0
+ flr = funcInfo{}
+ } else {
+ var lrPtr uintptr
+ if usesLR {
+ if n == 0 && frame.sp < frame.fp || frame.lr == 0 {
+ lrPtr = frame.sp
+ frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
+ }
+ } else {
+ if frame.lr == 0 {
+ lrPtr = frame.fp - goarch.PtrSize
+ frame.lr = uintptr(*(*uintptr)(unsafe.Pointer(lrPtr)))
+ }
+ }
+ flr = findfunc(frame.lr)
+ if !flr.valid() {
+ // This happens if you get a profiling interrupt at just the wrong time.
+ // In that context it is okay to stop early.
+ // But if callback is set, we're doing a garbage collection and must
+ // get everything, so crash loudly.
+ doPrint := printing
+ if doPrint && gp.m.incgo && f.funcID == funcID_sigpanic {
+ // We can inject sigpanic
+ // calls directly into C code,
+ // in which case we'll see a C
+ // return PC. Don't complain.
+ doPrint = false
+ }
+ if callback != nil || doPrint {
+ print("runtime: g ", gp.goid, ": unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n")
+ tracebackHexdump(stack, &frame, lrPtr)
+ }
+ if callback != nil {
+ throw("unknown caller pc")
+ }
+ }
+ }
+
+ frame.varp = frame.fp
+ if !usesLR {
+ // On x86, call instruction pushes return PC before entering new function.
+ frame.varp -= goarch.PtrSize
+ }
+
+ // For architectures with frame pointers, if there's
+ // a frame, then there's a saved frame pointer here.
+ //
+ // NOTE: This code is not as general as it looks.
+ // On x86, the ABI is to save the frame pointer word at the
+ // top of the stack frame, so we have to back down over it.
+ // On arm64, the frame pointer should be at the bottom of
+ // the stack (with R29 (aka FP) = RSP), in which case we would
+ // not want to do the subtraction here. But we started out without
+ // any frame pointer, and when we wanted to add it, we didn't
+ // want to break all the assembly doing direct writes to 8(RSP)
+ // to set the first parameter to a called function.
+ // So we decided to write the FP link *below* the stack pointer
+ // (with R29 = RSP - 8 in Go functions).
+ // This is technically ABI-compatible but not standard.
+ // And it happens to end up mimicking the x86 layout.
+ // Other architectures may make different decisions.
+ if frame.varp > frame.sp && framepointer_enabled {
+ frame.varp -= goarch.PtrSize
+ }
+
+ // Derive size of arguments.
+ // Most functions have a fixed-size argument block,
+ // so we can use metadata about the function f.
+ // Not all, though: there are some variadic functions
+ // in package runtime and reflect, and for those we use call-specific
+ // metadata recorded by f's caller.
+ if callback != nil || printing {
+ frame.argp = frame.fp + sys.MinFrameSize
+ var ok bool
+ frame.arglen, frame.argmap, ok = getArgInfoFast(f, callback != nil)
+ if !ok {
+ frame.arglen, frame.argmap = getArgInfo(&frame, f, callback != nil, ctxt)
+ }
+ }
+ ctxt = nil // ctxt is only needed to get arg maps for the topmost frame
+
+ // Determine frame's 'continuation PC', where it can continue.
+ // Normally this is the return address on the stack, but if sigpanic
+ // is immediately below this function on the stack, then the frame
+ // stopped executing due to a trap, and frame.pc is probably not
+ // a safe point for looking up liveness information. In this panicking case,
+ // the function either doesn't return at all (if it has no defers or if the
+ // defers do not recover) or it returns from one of the calls to
+ // deferproc a second time (if the corresponding deferred func recovers).
+ // In the latter case, use a deferreturn call site as the continuation pc.
+ frame.continpc = frame.pc
+ if waspanic {
+ if frame.fn.deferreturn != 0 {
+ frame.continpc = frame.fn.entry() + uintptr(frame.fn.deferreturn) + 1
+ // Note: this may perhaps keep return variables alive longer than
+ // strictly necessary, as we are using "function has a defer statement"
+ // as a proxy for "function actually deferred something". It seems
+ // to be a minor drawback. (We used to actually look through the
+ // gp._defer for a defer corresponding to this function, but that
+ // is hard to do with defer records on the stack during a stack copy.)
+ // Note: the +1 is to offset the -1 that
+ // stack.go:getStackMap does to back up a return
+ // address make sure the pc is in the CALL instruction.
+ } else {
+ frame.continpc = 0
+ }
+ }
+
+ if callback != nil {
+ if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
+ return n
+ }
+ }
+
+ if pcbuf != nil {
+ pc := frame.pc
+ // backup to CALL instruction to read inlining info (same logic as below)
+ tracepc := pc
+ // Normally, pc is a return address. In that case, we want to look up
+ // file/line information using pc-1, because that is the pc of the
+ // call instruction (more precisely, the last byte of the call instruction).
+ // Callers expect the pc buffer to contain return addresses and do the
+ // same -1 themselves, so we keep pc unchanged.
+ // When the pc is from a signal (e.g. profiler or segv) then we want
+ // to look up file/line information using pc, and we store pc+1 in the
+ // pc buffer so callers can unconditionally subtract 1 before looking up.
+ // See issue 34123.
+ // The pc can be at function entry when the frame is initialized without
+ // actually running code, like runtime.mstart.
+ if (n == 0 && flags&_TraceTrap != 0) || waspanic || pc == f.entry() {
+ pc++
+ } else {
+ tracepc--
+ }
+
+ // If there is inlining info, record the inner frames.
+ if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ for {
+ ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache)
+ if ix < 0 {
+ break
+ }
+ if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
+ // ignore wrappers
+ } else if skip > 0 {
+ skip--
+ } else if n < max {
+ (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
+ n++
+ }
+ lastFuncID = inltree[ix].funcID
+ // Back up to an instruction in the "caller".
+ tracepc = frame.fn.entry() + uintptr(inltree[ix].parentPc)
+ pc = tracepc + 1
+ }
+ }
+ // Record the main frame.
+ if f.funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) {
+ // Ignore wrapper functions (except when they trigger panics).
+ } else if skip > 0 {
+ skip--
+ } else if n < max {
+ (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
+ n++
+ }
+ lastFuncID = f.funcID
+ n-- // offset n++ below
+ }
+
+ if printing {
+ // assume skip=0 for printing.
+ //
+ // Never elide wrappers if we haven't printed
+ // any frames. And don't elide wrappers that
+ // called panic rather than the wrapped
+ // function. Otherwise, leave them out.
+
+ // backup to CALL instruction to read inlining info (same logic as below)
+ tracepc := frame.pc
+ if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry() && !waspanic {
+ tracepc--
+ }
+ // If there is inlining info, print the inner frames.
+ if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ var inlFunc _func
+ inlFuncInfo := funcInfo{&inlFunc, f.datap}
+ for {
+ ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil)
+ if ix < 0 {
+ break
+ }
+
+ // Create a fake _func for the
+ // inlined function.
+ inlFunc.nameoff = inltree[ix].func_
+ inlFunc.funcID = inltree[ix].funcID
+
+ if (flags&_TraceRuntimeFrames) != 0 || showframe(inlFuncInfo, gp, nprint == 0, inlFuncInfo.funcID, lastFuncID) {
+ name := funcname(inlFuncInfo)
+ file, line := funcline(f, tracepc)
+ print(name, "(...)\n")
+ print("\t", file, ":", line, "\n")
+ nprint++
+ }
+ lastFuncID = inltree[ix].funcID
+ // Back up to an instruction in the "caller".
+ tracepc = frame.fn.entry() + uintptr(inltree[ix].parentPc)
+ }
+ }
+ if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, f.funcID, lastFuncID) {
+ // Print during crash.
+ // main(0x1, 0x2, 0x3)
+ // /home/rsc/go/src/runtime/x.go:23 +0xf
+ //
+ name := funcname(f)
+ file, line := funcline(f, tracepc)
+ if name == "runtime.gopanic" {
+ name = "panic"
+ }
+ print(name, "(")
+ argp := unsafe.Pointer(frame.argp)
+ printArgs(f, argp, tracepc)
+ print(")\n")
+ print("\t", file, ":", line)
+ if frame.pc > f.entry() {
+ print(" +", hex(frame.pc-f.entry()))
+ }
+ if gp.m != nil && gp.m.throwing >= throwTypeRuntime && gp == gp.m.curg || level >= 2 {
+ print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc))
+ }
+ print("\n")
+ nprint++
+ }
+ lastFuncID = f.funcID
+ }
+ n++
+
+ if f.funcID == funcID_cgocallback && len(cgoCtxt) > 0 {
+ ctxt := cgoCtxt[len(cgoCtxt)-1]
+ cgoCtxt = cgoCtxt[:len(cgoCtxt)-1]
+
+ // skip only applies to Go frames.
+ // callback != nil only used when we only care
+ // about Go frames.
+ if skip == 0 && callback == nil {
+ n = tracebackCgoContext(pcbuf, printing, ctxt, n, max)
+ }
+ }
+
+ waspanic = f.funcID == funcID_sigpanic
+ injectedCall := waspanic || f.funcID == funcID_asyncPreempt || f.funcID == funcID_debugCallV2
+
+ // Do not unwind past the bottom of the stack.
+ if !flr.valid() {
+ break
+ }
+
+ if frame.pc == frame.lr && frame.sp == frame.fp {
+ // If the next frame is identical to the current frame, we cannot make progress.
+ print("runtime: traceback stuck. pc=", hex(frame.pc), " sp=", hex(frame.sp), "\n")
+ tracebackHexdump(stack, &frame, frame.sp)
+ throw("traceback stuck")
+ }
+
+ // Unwind to next frame.
+ frame.fn = flr
+ frame.pc = frame.lr
+ frame.lr = 0
+ frame.sp = frame.fp
+ frame.fp = 0
+ frame.argmap = nil
+
+ // On link register architectures, sighandler saves the LR on stack
+ // before faking a call.
+ if usesLR && injectedCall {
+ x := *(*uintptr)(unsafe.Pointer(frame.sp))
+ frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign)
+ f = findfunc(frame.pc)
+ frame.fn = f
+ if !f.valid() {
+ frame.pc = x
+ } else if funcspdelta(f, frame.pc, &cache) == 0 {
+ frame.lr = x
+ }
+ }
+ }
+
+ if printing {
+ n = nprint
+ }
+
+ // Note that panic != nil is okay here: there can be leftover panics,
+ // because the defers on the panic stack do not nest in frame order as
+ // they do on the defer stack. If you have:
+ //
+ // frame 1 defers d1
+ // frame 2 defers d2
+ // frame 3 defers d3
+ // frame 4 panics
+ // frame 4's panic starts running defers
+ // frame 5, running d3, defers d4
+ // frame 5 panics
+ // frame 5's panic starts running defers
+ // frame 6, running d4, garbage collects
+ // frame 6, running d2, garbage collects
+ //
+ // During the execution of d4, the panic stack is d4 -> d3, which
+ // is nested properly, and we'll treat frame 3 as resumable, because we
+ // can find d3. (And in fact frame 3 is resumable. If d4 recovers
+ // and frame 5 continues running, d3, d3 can recover and we'll
+ // resume execution in (returning from) frame 3.)
+ //
+ // During the execution of d2, however, the panic stack is d2 -> d3,
+ // which is inverted. The scan will match d2 to frame 2 but having
+ // d2 on the stack until then means it will not match d3 to frame 3.
+ // This is okay: if we're running d2, then all the defers after d2 have
+ // completed and their corresponding frames are dead. Not finding d3
+ // for frame 3 means we'll set frame 3's continpc == 0, which is correct
+ // (frame 3 is dead). At the end of the walk the panic stack can thus
+ // contain defers (d3 in this case) for dead frames. The inversion here
+ // always indicates a dead frame, and the effect of the inversion on the
+ // scan is to hide those dead frames, so the scan is still okay:
+ // what's left on the panic stack are exactly (and only) the dead frames.
+ //
+ // We require callback != nil here because only when callback != nil
+ // do we know that gentraceback is being called in a "must be correct"
+ // context as opposed to a "best effort" context. The tracebacks with
+ // callbacks only happen when everything is stopped nicely.
+ // At other times, such as when gathering a stack for a profiling signal
+ // or when printing a traceback during a crash, everything may not be
+ // stopped nicely, and the stack walk may not be able to complete.
+ if callback != nil && n < max && frame.sp != gp.stktopsp {
+ print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n")
+ print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n")
+ throw("traceback did not unwind completely")
+ }
+
+ return n
+}
+
+// printArgs prints function arguments in traceback.
+func printArgs(f funcInfo, argp unsafe.Pointer, pc uintptr) {
+ // The "instruction" of argument printing is encoded in _FUNCDATA_ArgInfo.
+ // See cmd/compile/internal/ssagen.emitArgInfo for the description of the
+ // encoding.
+ // These constants need to be in sync with the compiler.
+ const (
+ _endSeq = 0xff
+ _startAgg = 0xfe
+ _endAgg = 0xfd
+ _dotdotdot = 0xfc
+ _offsetTooLarge = 0xfb
+ )
+
+ const (
+ limit = 10 // print no more than 10 args/components
+ maxDepth = 5 // no more than 5 layers of nesting
+ maxLen = (maxDepth*3+2)*limit + 1 // max length of _FUNCDATA_ArgInfo (see the compiler side for reasoning)
+ )
+
+ p := (*[maxLen]uint8)(funcdata(f, _FUNCDATA_ArgInfo))
+ if p == nil {
+ return
+ }
+
+ liveInfo := funcdata(f, _FUNCDATA_ArgLiveInfo)
+ liveIdx := pcdatavalue(f, _PCDATA_ArgLiveIndex, pc, nil)
+ startOffset := uint8(0xff) // smallest offset that needs liveness info (slots with a lower offset is always live)
+ if liveInfo != nil {
+ startOffset = *(*uint8)(liveInfo)
+ }
+
+ isLive := func(off, slotIdx uint8) bool {
+ if liveInfo == nil || liveIdx <= 0 {
+ return true // no liveness info, always live
+ }
+ if off < startOffset {
+ return true
+ }
+ bits := *(*uint8)(add(liveInfo, uintptr(liveIdx)+uintptr(slotIdx/8)))
+ return bits&(1<<(slotIdx%8)) != 0
+ }
+
+ print1 := func(off, sz, slotIdx uint8) {
+ x := readUnaligned64(add(argp, uintptr(off)))
+ // mask out irrelevant bits
+ if sz < 8 {
+ shift := 64 - sz*8
+ if goarch.BigEndian {
+ x = x >> shift
+ } else {
+ x = x << shift >> shift
+ }
+ }
+ print(hex(x))
+ if !isLive(off, slotIdx) {
+ print("?")
+ }
+ }
+
+ start := true
+ printcomma := func() {
+ if !start {
+ print(", ")
+ }
+ }
+ pi := 0
+ slotIdx := uint8(0) // register arg spill slot index
+printloop:
+ for {
+ o := p[pi]
+ pi++
+ switch o {
+ case _endSeq:
+ break printloop
+ case _startAgg:
+ printcomma()
+ print("{")
+ start = true
+ continue
+ case _endAgg:
+ print("}")
+ case _dotdotdot:
+ printcomma()
+ print("...")
+ case _offsetTooLarge:
+ printcomma()
+ print("_")
+ default:
+ printcomma()
+ sz := p[pi]
+ pi++
+ print1(o, sz, slotIdx)
+ if o >= startOffset {
+ slotIdx++
+ }
+ }
+ start = false
+ }
+}
+
+// reflectMethodValue is a partial duplicate of reflect.makeFuncImpl
+// and reflect.methodValue.
+type reflectMethodValue struct {
+ fn uintptr
+ stack *bitvector // ptrmap for both args and results
+ argLen uintptr // just args
+}
+
+// getArgInfoFast returns the argument frame information for a call to f.
+// It is short and inlineable. However, it does not handle all functions.
+// If ok reports false, you must call getArgInfo instead.
+// TODO(josharian): once we do mid-stack inlining,
+// call getArgInfo directly from getArgInfoFast and stop returning an ok bool.
+func getArgInfoFast(f funcInfo, needArgMap bool) (arglen uintptr, argmap *bitvector, ok bool) {
+ return uintptr(f.args), nil, !(needArgMap && f.args == _ArgsSizeUnknown)
+}
+
+// getArgInfo returns the argument frame information for a call to f
+// with call frame frame.
+//
+// This is used for both actual calls with active stack frames and for
+// deferred calls or goroutines that are not yet executing. If this is an actual
+// call, ctxt must be nil (getArgInfo will retrieve what it needs from
+// the active stack frame). If this is a deferred call or unstarted goroutine,
+// ctxt must be the function object that was deferred or go'd.
+func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector) {
+ arglen = uintptr(f.args)
+ if needArgMap && f.args == _ArgsSizeUnknown {
+ // Extract argument bitmaps for reflect stubs from the calls they made to reflect.
+ switch funcname(f) {
+ case "reflect.makeFuncStub", "reflect.methodValueCall":
+ // These take a *reflect.methodValue as their
+ // context register.
+ var mv *reflectMethodValue
+ var retValid bool
+ if ctxt != nil {
+ // This is not an actual call, but a
+ // deferred call or an unstarted goroutine.
+ // The function value is itself the *reflect.methodValue.
+ mv = (*reflectMethodValue)(unsafe.Pointer(ctxt))
+ } else {
+ // This is a real call that took the
+ // *reflect.methodValue as its context
+ // register and immediately saved it
+ // to 0(SP). Get the methodValue from
+ // 0(SP).
+ arg0 := frame.sp + sys.MinFrameSize
+ mv = *(**reflectMethodValue)(unsafe.Pointer(arg0))
+ // Figure out whether the return values are valid.
+ // Reflect will update this value after it copies
+ // in the return values.
+ retValid = *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
+ }
+ if mv.fn != f.entry() {
+ print("runtime: confused by ", funcname(f), "\n")
+ throw("reflect mismatch")
+ }
+ bv := mv.stack
+ arglen = uintptr(bv.n * goarch.PtrSize)
+ if !retValid {
+ arglen = uintptr(mv.argLen) &^ (goarch.PtrSize - 1)
+ }
+ argmap = bv
+ }
+ }
+ return
+}
+
+// tracebackCgoContext handles tracing back a cgo context value, from
+// the context argument to setCgoTraceback, for the gentraceback
+// function. It returns the new value of n.
+func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int {
+ var cgoPCs [32]uintptr
+ cgoContextPCs(ctxt, cgoPCs[:])
+ var arg cgoSymbolizerArg
+ anySymbolized := false
+ for _, pc := range cgoPCs {
+ if pc == 0 || n >= max {
+ break
+ }
+ if pcbuf != nil {
+ (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc
+ }
+ if printing {
+ if cgoSymbolizer == nil {
+ print("non-Go function at pc=", hex(pc), "\n")
+ } else {
+ c := printOneCgoTraceback(pc, max-n, &arg)
+ n += c - 1 // +1 a few lines down
+ anySymbolized = true
+ }
+ }
+ n++
+ }
+ if anySymbolized {
+ arg.pc = 0
+ callCgoSymbolizer(&arg)
+ }
+ return n
+}
+
+func printcreatedby(gp *g) {
+ // Show what created goroutine, except main goroutine (goid 1).
+ pc := gp.gopc
+ f := findfunc(pc)
+ if f.valid() && showframe(f, gp, false, funcID_normal, funcID_normal) && gp.goid != 1 {
+ printcreatedby1(f, pc)
+ }
+}
+
+func printcreatedby1(f funcInfo, pc uintptr) {
+ print("created by ", funcname(f), "\n")
+ tracepc := pc // back up to CALL instruction for funcline.
+ if pc > f.entry() {
+ tracepc -= sys.PCQuantum
+ }
+ file, line := funcline(f, tracepc)
+ print("\t", file, ":", line)
+ if pc > f.entry() {
+ print(" +", hex(pc-f.entry()))
+ }
+ print("\n")
+}
+
+func traceback(pc, sp, lr uintptr, gp *g) {
+ traceback1(pc, sp, lr, gp, 0)
+}
+
+// tracebacktrap is like traceback but expects that the PC and SP were obtained
+// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp.
+// Because they are from a trap instead of from a saved pair,
+// the initial PC must not be rewound to the previous instruction.
+// (All the saved pairs record a PC that is a return address, so we
+// rewind it into the CALL instruction.)
+// If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to
+// the pc/sp/lr passed in.
+func tracebacktrap(pc, sp, lr uintptr, gp *g) {
+ if gp.m.libcallsp != 0 {
+ // We're in C code somewhere, traceback from the saved position.
+ traceback1(gp.m.libcallpc, gp.m.libcallsp, 0, gp.m.libcallg.ptr(), 0)
+ return
+ }
+ traceback1(pc, sp, lr, gp, _TraceTrap)
+}
+
+func traceback1(pc, sp, lr uintptr, gp *g, flags uint) {
+ // If the goroutine is in cgo, and we have a cgo traceback, print that.
+ if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 {
+ // Lock cgoCallers so that a signal handler won't
+ // change it, copy the array, reset it, unlock it.
+ // We are locked to the thread and are not running
+ // concurrently with a signal handler.
+ // We just have to stop a signal handler from interrupting
+ // in the middle of our copy.
+ atomic.Store(&gp.m.cgoCallersUse, 1)
+ cgoCallers := *gp.m.cgoCallers
+ gp.m.cgoCallers[0] = 0
+ atomic.Store(&gp.m.cgoCallersUse, 0)
+
+ printCgoTraceback(&cgoCallers)
+ }
+
+ if readgstatus(gp)&^_Gscan == _Gsyscall {
+ // Override registers if blocked in system call.
+ pc = gp.syscallpc
+ sp = gp.syscallsp
+ flags &^= _TraceTrap
+ }
+ if gp.m != nil && gp.m.vdsoSP != 0 {
+ // Override registers if running in VDSO. This comes after the
+ // _Gsyscall check to cover VDSO calls after entersyscall.
+ pc = gp.m.vdsoPC
+ sp = gp.m.vdsoSP
+ flags &^= _TraceTrap
+ }
+
+ // Print traceback. By default, omits runtime frames.
+ // If that means we print nothing at all, repeat forcing all frames printed.
+ n := gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags)
+ if n == 0 && (flags&_TraceRuntimeFrames) == 0 {
+ n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags|_TraceRuntimeFrames)
+ }
+ if n == _TracebackMaxFrames {
+ print("...additional frames elided...\n")
+ }
+ printcreatedby(gp)
+
+ if gp.ancestors == nil {
+ return
+ }
+ for _, ancestor := range *gp.ancestors {
+ printAncestorTraceback(ancestor)
+ }
+}
+
+// printAncestorTraceback prints the traceback of the given ancestor.
+// TODO: Unify this with gentraceback and CallersFrames.
+func printAncestorTraceback(ancestor ancestorInfo) {
+ print("[originating from goroutine ", ancestor.goid, "]:\n")
+ for fidx, pc := range ancestor.pcs {
+ f := findfunc(pc) // f previously validated
+ if showfuncinfo(f, fidx == 0, funcID_normal, funcID_normal) {
+ printAncestorTracebackFuncInfo(f, pc)
+ }
+ }
+ if len(ancestor.pcs) == _TracebackMaxFrames {
+ print("...additional frames elided...\n")
+ }
+ // Show what created goroutine, except main goroutine (goid 1).
+ f := findfunc(ancestor.gopc)
+ if f.valid() && showfuncinfo(f, false, funcID_normal, funcID_normal) && ancestor.goid != 1 {
+ printcreatedby1(f, ancestor.gopc)
+ }
+}
+
+// printAncestorTraceback prints the given function info at a given pc
+// within an ancestor traceback. The precision of this info is reduced
+// due to only have access to the pcs at the time of the caller
+// goroutine being created.
+func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) {
+ name := funcname(f)
+ if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
+ inltree := (*[1 << 20]inlinedCall)(inldata)
+ ix := pcdatavalue(f, _PCDATA_InlTreeIndex, pc, nil)
+ if ix >= 0 {
+ name = funcnameFromNameoff(f, inltree[ix].func_)
+ }
+ }
+ file, line := funcline(f, pc)
+ if name == "runtime.gopanic" {
+ name = "panic"
+ }
+ print(name, "(...)\n")
+ print("\t", file, ":", line)
+ if pc > f.entry() {
+ print(" +", hex(pc-f.entry()))
+ }
+ print("\n")
+}
+
+func callers(skip int, pcbuf []uintptr) int {
+ sp := getcallersp()
+ pc := getcallerpc()
+ gp := getg()
+ var n int
+ systemstack(func() {
+ n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0)
+ })
+ return n
+}
+
+func gcallers(gp *g, skip int, pcbuf []uintptr) int {
+ return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0)
+}
+
+// showframe reports whether the frame with the given characteristics should
+// be printed during a traceback.
+func showframe(f funcInfo, gp *g, firstFrame bool, funcID, childID funcID) bool {
+ g := getg()
+ if g.m.throwing >= throwTypeRuntime && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
+ return true
+ }
+ return showfuncinfo(f, firstFrame, funcID, childID)
+}
+
+// showfuncinfo reports whether a function with the given characteristics should
+// be printed during a traceback.
+func showfuncinfo(f funcInfo, firstFrame bool, funcID, childID funcID) bool {
+ // Note that f may be a synthesized funcInfo for an inlined
+ // function, in which case only nameoff and funcID are set.
+
+ level, _, _ := gotraceback()
+ if level > 1 {
+ // Show all frames.
+ return true
+ }
+
+ if !f.valid() {
+ return false
+ }
+
+ if funcID == funcID_wrapper && elideWrapperCalling(childID) {
+ return false
+ }
+
+ name := funcname(f)
+
+ // Special case: always show runtime.gopanic frame
+ // in the middle of a stack trace, so that we can
+ // see the boundary between ordinary code and
+ // panic-induced deferred code.
+ // See golang.org/issue/5832.
+ if name == "runtime.gopanic" && !firstFrame {
+ return true
+ }
+
+ return bytealg.IndexByteString(name, '.') >= 0 && (!hasPrefix(name, "runtime.") || isExportedRuntime(name))
+}
+
+// isExportedRuntime reports whether name is an exported runtime function.
+// It is only for runtime functions, so ASCII A-Z is fine.
+func isExportedRuntime(name string) bool {
+ const n = len("runtime.")
+ return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z'
+}
+
+// elideWrapperCalling reports whether a wrapper function that called
+// function id should be elided from stack traces.
+func elideWrapperCalling(id funcID) bool {
+ // If the wrapper called a panic function instead of the
+ // wrapped function, we want to include it in stacks.
+ return !(id == funcID_gopanic || id == funcID_sigpanic || id == funcID_panicwrap)
+}
+
+var gStatusStrings = [...]string{
+ _Gidle: "idle",
+ _Grunnable: "runnable",
+ _Grunning: "running",
+ _Gsyscall: "syscall",
+ _Gwaiting: "waiting",
+ _Gdead: "dead",
+ _Gcopystack: "copystack",
+ _Gpreempted: "preempted",
+}
+
+func goroutineheader(gp *g) {
+ gpstatus := readgstatus(gp)
+
+ isScan := gpstatus&_Gscan != 0
+ gpstatus &^= _Gscan // drop the scan bit
+
+ // Basic string status
+ var status string
+ if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) {
+ status = gStatusStrings[gpstatus]
+ } else {
+ status = "???"
+ }
+
+ // Override.
+ if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero {
+ status = gp.waitreason.String()
+ }
+
+ // approx time the G is blocked, in minutes
+ var waitfor int64
+ if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
+ waitfor = (nanotime() - gp.waitsince) / 60e9
+ }
+ print("goroutine ", gp.goid, " [", status)
+ if isScan {
+ print(" (scan)")
+ }
+ if waitfor >= 1 {
+ print(", ", waitfor, " minutes")
+ }
+ if gp.lockedm != 0 {
+ print(", locked to thread")
+ }
+ print("]:\n")
+}
+
+func tracebackothers(me *g) {
+ level, _, _ := gotraceback()
+
+ // Show the current goroutine first, if we haven't already.
+ curgp := getg().m.curg
+ if curgp != nil && curgp != me {
+ print("\n")
+ goroutineheader(curgp)
+ traceback(^uintptr(0), ^uintptr(0), 0, curgp)
+ }
+
+ // We can't call locking forEachG here because this may be during fatal
+ // throw/panic, where locking could be out-of-order or a direct
+ // deadlock.
+ //
+ // Instead, use forEachGRace, which requires no locking. We don't lock
+ // against concurrent creation of new Gs, but even with allglock we may
+ // miss Gs created after this loop.
+ forEachGRace(func(gp *g) {
+ if gp == me || gp == curgp || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 {
+ return
+ }
+ print("\n")
+ goroutineheader(gp)
+ // Note: gp.m == g.m occurs when tracebackothers is
+ // called from a signal handler initiated during a
+ // systemstack call. The original G is still in the
+ // running state, and we want to print its stack.
+ if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning {
+ print("\tgoroutine running on other thread; stack unavailable\n")
+ printcreatedby(gp)
+ } else {
+ traceback(^uintptr(0), ^uintptr(0), 0, gp)
+ }
+ })
+}
+
+// tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
+// for debugging purposes. If the address bad is included in the
+// hexdumped range, it will mark it as well.
+func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
+ const expand = 32 * goarch.PtrSize
+ const maxExpand = 256 * goarch.PtrSize
+ // Start around frame.sp.
+ lo, hi := frame.sp, frame.sp
+ // Expand to include frame.fp.
+ if frame.fp != 0 && frame.fp < lo {
+ lo = frame.fp
+ }
+ if frame.fp != 0 && frame.fp > hi {
+ hi = frame.fp
+ }
+ // Expand a bit more.
+ lo, hi = lo-expand, hi+expand
+ // But don't go too far from frame.sp.
+ if lo < frame.sp-maxExpand {
+ lo = frame.sp - maxExpand
+ }
+ if hi > frame.sp+maxExpand {
+ hi = frame.sp + maxExpand
+ }
+ // And don't go outside the stack bounds.
+ if lo < stk.lo {
+ lo = stk.lo
+ }
+ if hi > stk.hi {
+ hi = stk.hi
+ }
+
+ // Print the hex dump.
+ print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n")
+ hexdumpWords(lo, hi, func(p uintptr) byte {
+ switch p {
+ case frame.fp:
+ return '>'
+ case frame.sp:
+ return '<'
+ case bad:
+ return '!'
+ }
+ return 0
+ })
+}
+
+// isSystemGoroutine reports whether the goroutine g must be omitted
+// in stack dumps and deadlock detector. This is any goroutine that
+// starts at a runtime.* entry point, except for runtime.main,
+// runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq.
+//
+// If fixed is true, any goroutine that can vary between user and
+// system (that is, the finalizer goroutine) is considered a user
+// goroutine.
+func isSystemGoroutine(gp *g, fixed bool) bool {
+ // Keep this in sync with internal/trace.IsSystemGoroutine.
+ f := findfunc(gp.startpc)
+ if !f.valid() {
+ return false
+ }
+ if f.funcID == funcID_runtime_main || f.funcID == funcID_handleAsyncEvent {
+ return false
+ }
+ if f.funcID == funcID_runfinq {
+ // We include the finalizer goroutine if it's calling
+ // back into user code.
+ if fixed {
+ // This goroutine can vary. In fixed mode,
+ // always consider it a user goroutine.
+ return false
+ }
+ return !fingRunning
+ }
+ return hasPrefix(funcname(f), "runtime.")
+}
+
+// SetCgoTraceback records three C functions to use to gather
+// traceback information from C code and to convert that traceback
+// information into symbolic information. These are used when printing
+// stack traces for a program that uses cgo.
+//
+// The traceback and context functions may be called from a signal
+// handler, and must therefore use only async-signal safe functions.
+// The symbolizer function may be called while the program is
+// crashing, and so must be cautious about using memory. None of the
+// functions may call back into Go.
+//
+// The context function will be called with a single argument, a
+// pointer to a struct:
+//
+// struct {
+// Context uintptr
+// }
+//
+// In C syntax, this struct will be
+//
+// struct {
+// uintptr_t Context;
+// };
+//
+// If the Context field is 0, the context function is being called to
+// record the current traceback context. It should record in the
+// Context field whatever information is needed about the current
+// point of execution to later produce a stack trace, probably the
+// stack pointer and PC. In this case the context function will be
+// called from C code.
+//
+// If the Context field is not 0, then it is a value returned by a
+// previous call to the context function. This case is called when the
+// context is no longer needed; that is, when the Go code is returning
+// to its C code caller. This permits the context function to release
+// any associated resources.
+//
+// While it would be correct for the context function to record a
+// complete a stack trace whenever it is called, and simply copy that
+// out in the traceback function, in a typical program the context
+// function will be called many times without ever recording a
+// traceback for that context. Recording a complete stack trace in a
+// call to the context function is likely to be inefficient.
+//
+// The traceback function will be called with a single argument, a
+// pointer to a struct:
+//
+// struct {
+// Context uintptr
+// SigContext uintptr
+// Buf *uintptr
+// Max uintptr
+// }
+//
+// In C syntax, this struct will be
+//
+// struct {
+// uintptr_t Context;
+// uintptr_t SigContext;
+// uintptr_t* Buf;
+// uintptr_t Max;
+// };
+//
+// The Context field will be zero to gather a traceback from the
+// current program execution point. In this case, the traceback
+// function will be called from C code.
+//
+// Otherwise Context will be a value previously returned by a call to
+// the context function. The traceback function should gather a stack
+// trace from that saved point in the program execution. The traceback
+// function may be called from an execution thread other than the one
+// that recorded the context, but only when the context is known to be
+// valid and unchanging. The traceback function may also be called
+// deeper in the call stack on the same thread that recorded the
+// context. The traceback function may be called multiple times with
+// the same Context value; it will usually be appropriate to cache the
+// result, if possible, the first time this is called for a specific
+// context value.
+//
+// If the traceback function is called from a signal handler on a Unix
+// system, SigContext will be the signal context argument passed to
+// the signal handler (a C ucontext_t* cast to uintptr_t). This may be
+// used to start tracing at the point where the signal occurred. If
+// the traceback function is not called from a signal handler,
+// SigContext will be zero.
+//
+// Buf is where the traceback information should be stored. It should
+// be PC values, such that Buf[0] is the PC of the caller, Buf[1] is
+// the PC of that function's caller, and so on. Max is the maximum
+// number of entries to store. The function should store a zero to
+// indicate the top of the stack, or that the caller is on a different
+// stack, presumably a Go stack.
+//
+// Unlike runtime.Callers, the PC values returned should, when passed
+// to the symbolizer function, return the file/line of the call
+// instruction. No additional subtraction is required or appropriate.
+//
+// On all platforms, the traceback function is invoked when a call from
+// Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le,
+// linux/arm64, and freebsd/amd64, the traceback function is also invoked
+// when a signal is received by a thread that is executing a cgo call.
+// The traceback function should not make assumptions about when it is
+// called, as future versions of Go may make additional calls.
+//
+// The symbolizer function will be called with a single argument, a
+// pointer to a struct:
+//
+// struct {
+// PC uintptr // program counter to fetch information for
+// File *byte // file name (NUL terminated)
+// Lineno uintptr // line number
+// Func *byte // function name (NUL terminated)
+// Entry uintptr // function entry point
+// More uintptr // set non-zero if more info for this PC
+// Data uintptr // unused by runtime, available for function
+// }
+//
+// In C syntax, this struct will be
+//
+// struct {
+// uintptr_t PC;
+// char* File;
+// uintptr_t Lineno;
+// char* Func;
+// uintptr_t Entry;
+// uintptr_t More;
+// uintptr_t Data;
+// };
+//
+// The PC field will be a value returned by a call to the traceback
+// function.
+//
+// The first time the function is called for a particular traceback,
+// all the fields except PC will be 0. The function should fill in the
+// other fields if possible, setting them to 0/nil if the information
+// is not available. The Data field may be used to store any useful
+// information across calls. The More field should be set to non-zero
+// if there is more information for this PC, zero otherwise. If More
+// is set non-zero, the function will be called again with the same
+// PC, and may return different information (this is intended for use
+// with inlined functions). If More is zero, the function will be
+// called with the next PC value in the traceback. When the traceback
+// is complete, the function will be called once more with PC set to
+// zero; this may be used to free any information. Each call will
+// leave the fields of the struct set to the same values they had upon
+// return, except for the PC field when the More field is zero. The
+// function must not keep a copy of the struct pointer between calls.
+//
+// When calling SetCgoTraceback, the version argument is the version
+// number of the structs that the functions expect to receive.
+// Currently this must be zero.
+//
+// The symbolizer function may be nil, in which case the results of
+// the traceback function will be displayed as numbers. If the
+// traceback function is nil, the symbolizer function will never be
+// called. The context function may be nil, in which case the
+// traceback function will only be called with the context field set
+// to zero. If the context function is nil, then calls from Go to C
+// to Go will not show a traceback for the C portion of the call stack.
+//
+// SetCgoTraceback should be called only once, ideally from an init function.
+func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) {
+ if version != 0 {
+ panic("unsupported version")
+ }
+
+ if cgoTraceback != nil && cgoTraceback != traceback ||
+ cgoContext != nil && cgoContext != context ||
+ cgoSymbolizer != nil && cgoSymbolizer != symbolizer {
+ panic("call SetCgoTraceback only once")
+ }
+
+ cgoTraceback = traceback
+ cgoContext = context
+ cgoSymbolizer = symbolizer
+
+ // The context function is called when a C function calls a Go
+ // function. As such it is only called by C code in runtime/cgo.
+ if _cgo_set_context_function != nil {
+ cgocall(_cgo_set_context_function, context)
+ }
+}
+
+var cgoTraceback unsafe.Pointer
+var cgoContext unsafe.Pointer
+var cgoSymbolizer unsafe.Pointer
+
+// cgoTracebackArg is the type passed to cgoTraceback.
+type cgoTracebackArg struct {
+ context uintptr
+ sigContext uintptr
+ buf *uintptr
+ max uintptr
+}
+
+// cgoContextArg is the type passed to the context function.
+type cgoContextArg struct {
+ context uintptr
+}
+
+// cgoSymbolizerArg is the type passed to cgoSymbolizer.
+type cgoSymbolizerArg struct {
+ pc uintptr
+ file *byte
+ lineno uintptr
+ funcName *byte
+ entry uintptr
+ more uintptr
+ data uintptr
+}
+
+// cgoTraceback prints a traceback of callers.
+func printCgoTraceback(callers *cgoCallers) {
+ if cgoSymbolizer == nil {
+ for _, c := range callers {
+ if c == 0 {
+ break
+ }
+ print("non-Go function at pc=", hex(c), "\n")
+ }
+ return
+ }
+
+ var arg cgoSymbolizerArg
+ for _, c := range callers {
+ if c == 0 {
+ break
+ }
+ printOneCgoTraceback(c, 0x7fffffff, &arg)
+ }
+ arg.pc = 0
+ callCgoSymbolizer(&arg)
+}
+
+// printOneCgoTraceback prints the traceback of a single cgo caller.
+// This can print more than one line because of inlining.
+// Returns the number of frames printed.
+func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int {
+ c := 0
+ arg.pc = pc
+ for c <= max {
+ callCgoSymbolizer(arg)
+ if arg.funcName != nil {
+ // Note that we don't print any argument
+ // information here, not even parentheses.
+ // The symbolizer must add that if appropriate.
+ println(gostringnocopy(arg.funcName))
+ } else {
+ println("non-Go function")
+ }
+ print("\t")
+ if arg.file != nil {
+ print(gostringnocopy(arg.file), ":", arg.lineno, " ")
+ }
+ print("pc=", hex(pc), "\n")
+ c++
+ if arg.more == 0 {
+ break
+ }
+ }
+ return c
+}
+
+// callCgoSymbolizer calls the cgoSymbolizer function.
+func callCgoSymbolizer(arg *cgoSymbolizerArg) {
+ call := cgocall
+ if panicking > 0 || getg().m.curg != getg() {
+ // We do not want to call into the scheduler when panicking
+ // or when on the system stack.
+ call = asmcgocall
+ }
+ if msanenabled {
+ msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
+ }
+ if asanenabled {
+ asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
+ }
+ call(cgoSymbolizer, noescape(unsafe.Pointer(arg)))
+}
+
+// cgoContextPCs gets the PC values from a cgo traceback.
+func cgoContextPCs(ctxt uintptr, buf []uintptr) {
+ if cgoTraceback == nil {
+ return
+ }
+ call := cgocall
+ if panicking > 0 || getg().m.curg != getg() {
+ // We do not want to call into the scheduler when panicking
+ // or when on the system stack.
+ call = asmcgocall
+ }
+ arg := cgoTracebackArg{
+ context: ctxt,
+ buf: (*uintptr)(noescape(unsafe.Pointer(&buf[0]))),
+ max: uintptr(len(buf)),
+ }
+ if msanenabled {
+ msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
+ }
+ if asanenabled {
+ asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
+ }
+ call(cgoTraceback, noescape(unsafe.Pointer(&arg)))
+}
diff --git a/contrib/go/_std_1.19/src/runtime/type.go b/contrib/go/_std_1.19/src/runtime/type.go
new file mode 100644
index 0000000000..e8e7819ecf
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/type.go
@@ -0,0 +1,719 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Runtime type representation.
+
+package runtime
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+// tflag is documented in reflect/type.go.
+//
+// tflag values must be kept in sync with copies in:
+//
+// cmd/compile/internal/reflectdata/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// reflect/type.go
+// internal/reflectlite/type.go
+type tflag uint8
+
+const (
+ tflagUncommon tflag = 1 << 0
+ tflagExtraStar tflag = 1 << 1
+ tflagNamed tflag = 1 << 2
+ tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
+)
+
+// Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize,
+// ../cmd/compile/internal/reflectdata/reflect.go:/^func.dcommontype and
+// ../reflect/type.go:/^type.rtype.
+// ../internal/reflectlite/type.go:/^type.rtype.
+type _type struct {
+ size uintptr
+ ptrdata uintptr // size of memory prefix holding all pointers
+ hash uint32
+ tflag tflag
+ align uint8
+ fieldAlign uint8
+ kind uint8
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // gcdata stores the GC type data for the garbage collector.
+ // If the KindGCProg bit is set in kind, gcdata is a GC program.
+ // Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
+ gcdata *byte
+ str nameOff
+ ptrToThis typeOff
+}
+
+func (t *_type) string() string {
+ s := t.nameOff(t.str).name()
+ if t.tflag&tflagExtraStar != 0 {
+ return s[1:]
+ }
+ return s
+}
+
+func (t *_type) uncommon() *uncommontype {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.kind & kindMask {
+ case kindStruct:
+ type u struct {
+ structtype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindPtr:
+ type u struct {
+ ptrtype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindFunc:
+ type u struct {
+ functype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindSlice:
+ type u struct {
+ slicetype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindArray:
+ type u struct {
+ arraytype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindChan:
+ type u struct {
+ chantype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindMap:
+ type u struct {
+ maptype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindInterface:
+ type u struct {
+ interfacetype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ _type
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
+func (t *_type) name() string {
+ if t.tflag&tflagNamed == 0 {
+ return ""
+ }
+ s := t.string()
+ i := len(s) - 1
+ sqBrackets := 0
+ for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
+ switch s[i] {
+ case ']':
+ sqBrackets++
+ case '[':
+ sqBrackets--
+ }
+ i--
+ }
+ return s[i+1:]
+}
+
+// pkgpath returns the path of the package where t was defined, if
+// available. This is not the same as the reflect package's PkgPath
+// method, in that it returns the package path for struct and interface
+// types, not just named types.
+func (t *_type) pkgpath() string {
+ if u := t.uncommon(); u != nil {
+ return t.nameOff(u.pkgpath).name()
+ }
+ switch t.kind & kindMask {
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t))
+ return st.pkgPath.name()
+ case kindInterface:
+ it := (*interfacetype)(unsafe.Pointer(t))
+ return it.pkgpath.name()
+ }
+ return ""
+}
+
+// reflectOffs holds type offsets defined at run time by the reflect package.
+//
+// When a type is defined at run time, its *rtype data lives on the heap.
+// There are a wide range of possible addresses the heap may use, that
+// may not be representable as a 32-bit offset. Moreover the GC may
+// one day start moving heap memory, in which case there is no stable
+// offset that can be defined.
+//
+// To provide stable offsets, we add pin *rtype objects in a global map
+// and treat the offset as an identifier. We use negative offsets that
+// do not overlap with any compile-time module offsets.
+//
+// Entries are created by reflect.addReflectOff.
+var reflectOffs struct {
+ lock mutex
+ next int32
+ m map[int32]unsafe.Pointer
+ minv map[unsafe.Pointer]int32
+}
+
+func reflectOffsLock() {
+ lock(&reflectOffs.lock)
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&reflectOffs.lock))
+ }
+}
+
+func reflectOffsUnlock() {
+ if raceenabled {
+ racerelease(unsafe.Pointer(&reflectOffs.lock))
+ }
+ unlock(&reflectOffs.lock)
+}
+
+func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
+ if off == 0 {
+ return name{}
+ }
+ base := uintptr(ptrInModule)
+ for md := &firstmoduledata; md != nil; md = md.next {
+ if base >= md.types && base < md.etypes {
+ res := md.types + uintptr(off)
+ if res > md.etypes {
+ println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
+ throw("runtime: name offset out of range")
+ }
+ return name{(*byte)(unsafe.Pointer(res))}
+ }
+ }
+
+ // No module found. see if it is a run time name.
+ reflectOffsLock()
+ res, found := reflectOffs.m[int32(off)]
+ reflectOffsUnlock()
+ if !found {
+ println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: name offset base pointer out of range")
+ }
+ return name{(*byte)(res)}
+}
+
+func (t *_type) nameOff(off nameOff) name {
+ return resolveNameOff(unsafe.Pointer(t), off)
+}
+
+func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
+ if off == 0 || off == -1 {
+ // -1 is the sentinel value for unreachable code.
+ // See cmd/link/internal/ld/data.go:relocsym.
+ return nil
+ }
+ base := uintptr(ptrInModule)
+ var md *moduledata
+ for next := &firstmoduledata; next != nil; next = next.next {
+ if base >= next.types && base < next.etypes {
+ md = next
+ break
+ }
+ }
+ if md == nil {
+ reflectOffsLock()
+ res := reflectOffs.m[int32(off)]
+ reflectOffsUnlock()
+ if res == nil {
+ println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: type offset base pointer out of range")
+ }
+ return (*_type)(res)
+ }
+ if t := md.typemap[off]; t != nil {
+ return t
+ }
+ res := md.types + uintptr(off)
+ if res > md.etypes {
+ println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
+ throw("runtime: type offset out of range")
+ }
+ return (*_type)(unsafe.Pointer(res))
+}
+
+func (t *_type) typeOff(off typeOff) *_type {
+ return resolveTypeOff(unsafe.Pointer(t), off)
+}
+
+func (t *_type) textOff(off textOff) unsafe.Pointer {
+ if off == -1 {
+ // -1 is the sentinel value for unreachable code.
+ // See cmd/link/internal/ld/data.go:relocsym.
+ return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
+ }
+ base := uintptr(unsafe.Pointer(t))
+ var md *moduledata
+ for next := &firstmoduledata; next != nil; next = next.next {
+ if base >= next.types && base < next.etypes {
+ md = next
+ break
+ }
+ }
+ if md == nil {
+ reflectOffsLock()
+ res := reflectOffs.m[int32(off)]
+ reflectOffsUnlock()
+ if res == nil {
+ println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: text offset base pointer out of range")
+ }
+ return res
+ }
+ res := md.textAddr(uint32(off))
+ return unsafe.Pointer(res)
+}
+
+func (t *functype) in() []*_type {
+ // See funcType in reflect/type.go for details on data layout.
+ uadd := uintptr(unsafe.Sizeof(functype{}))
+ if t.typ.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommontype{})
+ }
+ return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount]
+}
+
+func (t *functype) out() []*_type {
+ // See funcType in reflect/type.go for details on data layout.
+ uadd := uintptr(unsafe.Sizeof(functype{}))
+ if t.typ.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommontype{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
+}
+
+func (t *functype) dotdotdot() bool {
+ return t.outCount&(1<<15) != 0
+}
+
+type nameOff int32
+type typeOff int32
+type textOff int32
+
+type method struct {
+ name nameOff
+ mtyp typeOff
+ ifn textOff
+ tfn textOff
+}
+
+type uncommontype struct {
+ pkgpath nameOff
+ mcount uint16 // number of methods
+ xcount uint16 // number of exported methods
+ moff uint32 // offset from this uncommontype to [mcount]method
+ _ uint32 // unused
+}
+
+type imethod struct {
+ name nameOff
+ ityp typeOff
+}
+
+type interfacetype struct {
+ typ _type
+ pkgpath name
+ mhdr []imethod
+}
+
+type maptype struct {
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal type representing a hash bucket
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8 // size of key slot
+ elemsize uint8 // size of elem slot
+ bucketsize uint16 // size of bucket
+ flags uint32
+}
+
+// Note: flag values must match those used in the TMAP case
+// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
+func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
+ return mt.flags&1 != 0
+}
+func (mt *maptype) indirectelem() bool { // store ptr to elem instead of elem itself
+ return mt.flags&2 != 0
+}
+func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
+ return mt.flags&4 != 0
+}
+func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite
+ return mt.flags&8 != 0
+}
+func (mt *maptype) hashMightPanic() bool { // true if hash function might panic
+ return mt.flags&16 != 0
+}
+
+type arraytype struct {
+ typ _type
+ elem *_type
+ slice *_type
+ len uintptr
+}
+
+type chantype struct {
+ typ _type
+ elem *_type
+ dir uintptr
+}
+
+type slicetype struct {
+ typ _type
+ elem *_type
+}
+
+type functype struct {
+ typ _type
+ inCount uint16
+ outCount uint16
+}
+
+type ptrtype struct {
+ typ _type
+ elem *_type
+}
+
+type structfield struct {
+ name name
+ typ *_type
+ offset uintptr
+}
+
+type structtype struct {
+ typ _type
+ pkgPath name
+ fields []structfield
+}
+
+// name is an encoded type name with optional extra data.
+// See reflect/type.go for details.
+type name struct {
+ bytes *byte
+}
+
+func (n name) data(off int) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off)))
+}
+
+func (n name) isExported() bool {
+ return (*n.bytes)&(1<<0) != 0
+}
+
+func (n name) isEmbedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
+func (n name) readvarint(off int) (int, int) {
+ v := 0
+ for i := 0; ; i++ {
+ x := *n.data(off + i)
+ v += int(x&0x7f) << (7 * i)
+ if x&0x80 == 0 {
+ return i + 1, v
+ }
+ }
+}
+
+func (n name) name() (s string) {
+ if n.bytes == nil {
+ return ""
+ }
+ i, l := n.readvarint(1)
+ if l == 0 {
+ return ""
+ }
+ hdr := (*stringStruct)(unsafe.Pointer(&s))
+ hdr.str = unsafe.Pointer(n.data(1 + i))
+ hdr.len = l
+ return
+}
+
+func (n name) tag() (s string) {
+ if *n.data(0)&(1<<1) == 0 {
+ return ""
+ }
+ i, l := n.readvarint(1)
+ i2, l2 := n.readvarint(1 + i + l)
+ hdr := (*stringStruct)(unsafe.Pointer(&s))
+ hdr.str = unsafe.Pointer(n.data(1 + i + l + i2))
+ hdr.len = l2
+ return
+}
+
+func (n name) pkgPath() string {
+ if n.bytes == nil || *n.data(0)&(1<<2) == 0 {
+ return ""
+ }
+ i, l := n.readvarint(1)
+ off := 1 + i + l
+ if *n.data(0)&(1<<1) != 0 {
+ i2, l2 := n.readvarint(off)
+ off += i2 + l2
+ }
+ var nameOff nameOff
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:])
+ pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff)
+ return pkgPathName.name()
+}
+
+func (n name) isBlank() bool {
+ if n.bytes == nil {
+ return false
+ }
+ _, l := n.readvarint(1)
+ return l == 1 && *n.data(2) == '_'
+}
+
+// typelinksinit scans the types from extra modules and builds the
+// moduledata typemap used to de-duplicate type pointers.
+func typelinksinit() {
+ if firstmoduledata.next == nil {
+ return
+ }
+ typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
+
+ modules := activeModules()
+ prev := modules[0]
+ for _, md := range modules[1:] {
+ // Collect types from the previous module into typehash.
+ collect:
+ for _, tl := range prev.typelinks {
+ var t *_type
+ if prev.typemap == nil {
+ t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
+ } else {
+ t = prev.typemap[typeOff(tl)]
+ }
+ // Add to typehash if not seen before.
+ tlist := typehash[t.hash]
+ for _, tcur := range tlist {
+ if tcur == t {
+ continue collect
+ }
+ }
+ typehash[t.hash] = append(tlist, t)
+ }
+
+ if md.typemap == nil {
+ // If any of this module's typelinks match a type from a
+ // prior module, prefer that prior type by adding the offset
+ // to this module's typemap.
+ tm := make(map[typeOff]*_type, len(md.typelinks))
+ pinnedTypemaps = append(pinnedTypemaps, tm)
+ md.typemap = tm
+ for _, tl := range md.typelinks {
+ t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
+ for _, candidate := range typehash[t.hash] {
+ seen := map[_typePair]struct{}{}
+ if typesEqual(t, candidate, seen) {
+ t = candidate
+ break
+ }
+ }
+ md.typemap[typeOff(tl)] = t
+ }
+ }
+
+ prev = md
+ }
+}
+
+type _typePair struct {
+ t1 *_type
+ t2 *_type
+}
+
+// typesEqual reports whether two types are equal.
+//
+// Everywhere in the runtime and reflect packages, it is assumed that
+// there is exactly one *_type per Go type, so that pointer equality
+// can be used to test if types are equal. There is one place that
+// breaks this assumption: buildmode=shared. In this case a type can
+// appear as two different pieces of memory. This is hidden from the
+// runtime and reflect package by the per-module typemap built in
+// typelinksinit. It uses typesEqual to map types from later modules
+// back into earlier ones.
+//
+// Only typelinksinit needs this function.
+func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
+ tp := _typePair{t, v}
+ if _, ok := seen[tp]; ok {
+ return true
+ }
+
+ // mark these types as seen, and thus equivalent which prevents an infinite loop if
+ // the two types are identical, but recursively defined and loaded from
+ // different modules
+ seen[tp] = struct{}{}
+
+ if t == v {
+ return true
+ }
+ kind := t.kind & kindMask
+ if kind != v.kind&kindMask {
+ return false
+ }
+ if t.string() != v.string() {
+ return false
+ }
+ ut := t.uncommon()
+ uv := v.uncommon()
+ if ut != nil || uv != nil {
+ if ut == nil || uv == nil {
+ return false
+ }
+ pkgpatht := t.nameOff(ut.pkgpath).name()
+ pkgpathv := v.nameOff(uv.pkgpath).name()
+ if pkgpatht != pkgpathv {
+ return false
+ }
+ }
+ if kindBool <= kind && kind <= kindComplex128 {
+ return true
+ }
+ switch kind {
+ case kindString, kindUnsafePointer:
+ return true
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(t))
+ av := (*arraytype)(unsafe.Pointer(v))
+ return typesEqual(at.elem, av.elem, seen) && at.len == av.len
+ case kindChan:
+ ct := (*chantype)(unsafe.Pointer(t))
+ cv := (*chantype)(unsafe.Pointer(v))
+ return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem, seen)
+ case kindFunc:
+ ft := (*functype)(unsafe.Pointer(t))
+ fv := (*functype)(unsafe.Pointer(v))
+ if ft.outCount != fv.outCount || ft.inCount != fv.inCount {
+ return false
+ }
+ tin, vin := ft.in(), fv.in()
+ for i := 0; i < len(tin); i++ {
+ if !typesEqual(tin[i], vin[i], seen) {
+ return false
+ }
+ }
+ tout, vout := ft.out(), fv.out()
+ for i := 0; i < len(tout); i++ {
+ if !typesEqual(tout[i], vout[i], seen) {
+ return false
+ }
+ }
+ return true
+ case kindInterface:
+ it := (*interfacetype)(unsafe.Pointer(t))
+ iv := (*interfacetype)(unsafe.Pointer(v))
+ if it.pkgpath.name() != iv.pkgpath.name() {
+ return false
+ }
+ if len(it.mhdr) != len(iv.mhdr) {
+ return false
+ }
+ for i := range it.mhdr {
+ tm := &it.mhdr[i]
+ vm := &iv.mhdr[i]
+ // Note the mhdr array can be relocated from
+ // another module. See #17724.
+ tname := resolveNameOff(unsafe.Pointer(tm), tm.name)
+ vname := resolveNameOff(unsafe.Pointer(vm), vm.name)
+ if tname.name() != vname.name() {
+ return false
+ }
+ if tname.pkgPath() != vname.pkgPath() {
+ return false
+ }
+ tityp := resolveTypeOff(unsafe.Pointer(tm), tm.ityp)
+ vityp := resolveTypeOff(unsafe.Pointer(vm), vm.ityp)
+ if !typesEqual(tityp, vityp, seen) {
+ return false
+ }
+ }
+ return true
+ case kindMap:
+ mt := (*maptype)(unsafe.Pointer(t))
+ mv := (*maptype)(unsafe.Pointer(v))
+ return typesEqual(mt.key, mv.key, seen) && typesEqual(mt.elem, mv.elem, seen)
+ case kindPtr:
+ pt := (*ptrtype)(unsafe.Pointer(t))
+ pv := (*ptrtype)(unsafe.Pointer(v))
+ return typesEqual(pt.elem, pv.elem, seen)
+ case kindSlice:
+ st := (*slicetype)(unsafe.Pointer(t))
+ sv := (*slicetype)(unsafe.Pointer(v))
+ return typesEqual(st.elem, sv.elem, seen)
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t))
+ sv := (*structtype)(unsafe.Pointer(v))
+ if len(st.fields) != len(sv.fields) {
+ return false
+ }
+ if st.pkgPath.name() != sv.pkgPath.name() {
+ return false
+ }
+ for i := range st.fields {
+ tf := &st.fields[i]
+ vf := &sv.fields[i]
+ if tf.name.name() != vf.name.name() {
+ return false
+ }
+ if !typesEqual(tf.typ, vf.typ, seen) {
+ return false
+ }
+ if tf.name.tag() != vf.name.tag() {
+ return false
+ }
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.name.isEmbedded() != vf.name.isEmbedded() {
+ return false
+ }
+ }
+ return true
+ default:
+ println("runtime: impossible type kind", kind)
+ throw("runtime: impossible type kind")
+ return false
+ }
+}
diff --git a/contrib/go/_std_1.18/src/runtime/typekind.go b/contrib/go/_std_1.19/src/runtime/typekind.go
index 7087a9b046..7087a9b046 100644
--- a/contrib/go/_std_1.18/src/runtime/typekind.go
+++ b/contrib/go/_std_1.19/src/runtime/typekind.go
diff --git a/contrib/go/_std_1.18/src/runtime/utf8.go b/contrib/go/_std_1.19/src/runtime/utf8.go
index 52b757662d..52b757662d 100644
--- a/contrib/go/_std_1.18/src/runtime/utf8.go
+++ b/contrib/go/_std_1.19/src/runtime/utf8.go
diff --git a/contrib/go/_std_1.19/src/runtime/vdso_elf64.go b/contrib/go/_std_1.19/src/runtime/vdso_elf64.go
new file mode 100644
index 0000000000..d41d25e770
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/vdso_elf64.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
+
+package runtime
+
+// ELF64 structure definitions for use by the vDSO loader
+
+type elfSym struct {
+ st_name uint32
+ st_info byte
+ st_other byte
+ st_shndx uint16
+ st_value uint64
+ st_size uint64
+}
+
+type elfVerdef struct {
+ vd_version uint16 /* Version revision */
+ vd_flags uint16 /* Version information */
+ vd_ndx uint16 /* Version Index */
+ vd_cnt uint16 /* Number of associated aux entries */
+ vd_hash uint32 /* Version name hash value */
+ vd_aux uint32 /* Offset in bytes to verdaux array */
+ vd_next uint32 /* Offset in bytes to next verdef entry */
+}
+
+type elfEhdr struct {
+ e_ident [_EI_NIDENT]byte /* Magic number and other info */
+ e_type uint16 /* Object file type */
+ e_machine uint16 /* Architecture */
+ e_version uint32 /* Object file version */
+ e_entry uint64 /* Entry point virtual address */
+ e_phoff uint64 /* Program header table file offset */
+ e_shoff uint64 /* Section header table file offset */
+ e_flags uint32 /* Processor-specific flags */
+ e_ehsize uint16 /* ELF header size in bytes */
+ e_phentsize uint16 /* Program header table entry size */
+ e_phnum uint16 /* Program header table entry count */
+ e_shentsize uint16 /* Section header table entry size */
+ e_shnum uint16 /* Section header table entry count */
+ e_shstrndx uint16 /* Section header string table index */
+}
+
+type elfPhdr struct {
+ p_type uint32 /* Segment type */
+ p_flags uint32 /* Segment flags */
+ p_offset uint64 /* Segment file offset */
+ p_vaddr uint64 /* Segment virtual address */
+ p_paddr uint64 /* Segment physical address */
+ p_filesz uint64 /* Segment size in file */
+ p_memsz uint64 /* Segment size in memory */
+ p_align uint64 /* Segment alignment */
+}
+
+type elfShdr struct {
+ sh_name uint32 /* Section name (string tbl index) */
+ sh_type uint32 /* Section type */
+ sh_flags uint64 /* Section flags */
+ sh_addr uint64 /* Section virtual addr at execution */
+ sh_offset uint64 /* Section file offset */
+ sh_size uint64 /* Section size in bytes */
+ sh_link uint32 /* Link to another section */
+ sh_info uint32 /* Additional section information */
+ sh_addralign uint64 /* Section alignment */
+ sh_entsize uint64 /* Entry size if section holds table */
+}
+
+type elfDyn struct {
+ d_tag int64 /* Dynamic entry type */
+ d_val uint64 /* Integer value */
+}
+
+type elfVerdaux struct {
+ vda_name uint32 /* Version or dependency names */
+ vda_next uint32 /* Offset in bytes to next verdaux entry */
+}
diff --git a/contrib/go/_std_1.19/src/runtime/vdso_in_none.go b/contrib/go/_std_1.19/src/runtime/vdso_in_none.go
new file mode 100644
index 0000000000..3a6ee6f049
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/vdso_in_none.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64 && !s390x) || !linux
+
+package runtime
+
+// A dummy version of inVDSOPage for targets that don't use a VDSO.
+
+func inVDSOPage(pc uintptr) bool {
+ return false
+}
diff --git a/contrib/go/_std_1.19/src/runtime/vdso_linux.go b/contrib/go/_std_1.19/src/runtime/vdso_linux.go
new file mode 100644
index 0000000000..4523615711
--- /dev/null
+++ b/contrib/go/_std_1.19/src/runtime/vdso_linux.go
@@ -0,0 +1,295 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
+
+package runtime
+
+import "unsafe"
+
+// Look up symbols in the Linux vDSO.
+
+// This code was originally based on the sample Linux vDSO parser at
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/vDSO/parse_vdso.c
+
+// This implements the ELF dynamic linking spec at
+// http://sco.com/developers/gabi/latest/ch5.dynamic.html
+
+// The version section is documented at
+// https://refspecs.linuxfoundation.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/symversion.html
+
+const (
+ _AT_SYSINFO_EHDR = 33
+
+ _PT_LOAD = 1 /* Loadable program segment */
+ _PT_DYNAMIC = 2 /* Dynamic linking information */
+
+ _DT_NULL = 0 /* Marks end of dynamic section */
+ _DT_HASH = 4 /* Dynamic symbol hash table */
+ _DT_STRTAB = 5 /* Address of string table */
+ _DT_SYMTAB = 6 /* Address of symbol table */
+ _DT_GNU_HASH = 0x6ffffef5 /* GNU-style dynamic symbol hash table */
+ _DT_VERSYM = 0x6ffffff0
+ _DT_VERDEF = 0x6ffffffc
+
+ _VER_FLG_BASE = 0x1 /* Version definition of file itself */
+
+ _SHN_UNDEF = 0 /* Undefined section */
+
+ _SHT_DYNSYM = 11 /* Dynamic linker symbol table */
+
+ _STT_FUNC = 2 /* Symbol is a code object */
+
+ _STT_NOTYPE = 0 /* Symbol type is not specified */
+
+ _STB_GLOBAL = 1 /* Global symbol */
+ _STB_WEAK = 2 /* Weak symbol */
+
+ _EI_NIDENT = 16
+
+ // Maximum indices for the array types used when traversing the vDSO ELF structures.
+ // Computed from architecture-specific max provided by vdso_linux_*.go
+ vdsoSymTabSize = vdsoArrayMax / unsafe.Sizeof(elfSym{})
+ vdsoDynSize = vdsoArrayMax / unsafe.Sizeof(elfDyn{})
+ vdsoSymStringsSize = vdsoArrayMax // byte
+ vdsoVerSymSize = vdsoArrayMax / 2 // uint16
+ vdsoHashSize = vdsoArrayMax / 4 // uint32
+
+ // vdsoBloomSizeScale is a scaling factor for gnuhash tables which are uint32 indexed,
+ // but contain uintptrs
+ vdsoBloomSizeScale = unsafe.Sizeof(uintptr(0)) / 4 // uint32
+)
+
+/* How to extract and insert information held in the st_info field. */
+func _ELF_ST_BIND(val byte) byte { return val >> 4 }
+func _ELF_ST_TYPE(val byte) byte { return val & 0xf }
+
+type vdsoSymbolKey struct {
+ name string
+ symHash uint32
+ gnuHash uint32
+ ptr *uintptr
+}
+
+type vdsoVersionKey struct {
+ version string
+ verHash uint32
+}
+
+type vdsoInfo struct {
+ valid bool
+
+ /* Load information */
+ loadAddr uintptr
+ loadOffset uintptr /* loadAddr - recorded vaddr */
+
+ /* Symbol table */
+ symtab *[vdsoSymTabSize]elfSym
+ symstrings *[vdsoSymStringsSize]byte
+ chain []uint32
+ bucket []uint32
+ symOff uint32
+ isGNUHash bool
+
+ /* Version table */
+ versym *[vdsoVerSymSize]uint16
+ verdef *elfVerdef
+}
+
+// see vdso_linux_*.go for vdsoSymbolKeys[] and vdso*Sym vars
+
+func vdsoInitFromSysinfoEhdr(info *vdsoInfo, hdr *elfEhdr) {
+ info.valid = false
+ info.loadAddr = uintptr(unsafe.Pointer(hdr))
+
+ pt := unsafe.Pointer(info.loadAddr + uintptr(hdr.e_phoff))
+
+ // We need two things from the segment table: the load offset
+ // and the dynamic table.
+ var foundVaddr bool
+ var dyn *[vdsoDynSize]elfDyn
+ for i := uint16(0); i < hdr.e_phnum; i++ {
+ pt := (*elfPhdr)(add(pt, uintptr(i)*unsafe.Sizeof(elfPhdr{})))
+ switch pt.p_type {
+ case _PT_LOAD:
+ if !foundVaddr {
+ foundVaddr = true
+ info.loadOffset = info.loadAddr + uintptr(pt.p_offset-pt.p_vaddr)
+ }
+
+ case _PT_DYNAMIC:
+ dyn = (*[vdsoDynSize]elfDyn)(unsafe.Pointer(info.loadAddr + uintptr(pt.p_offset)))
+ }
+ }
+
+ if !foundVaddr || dyn == nil {
+ return // Failed
+ }
+
+ // Fish out the useful bits of the dynamic table.
+
+ var hash, gnuhash *[vdsoHashSize]uint32
+ info.symstrings = nil
+ info.symtab = nil
+ info.versym = nil
+ info.verdef = nil
+ for i := 0; dyn[i].d_tag != _DT_NULL; i++ {
+ dt := &dyn[i]
+ p := info.loadOffset + uintptr(dt.d_val)
+ switch dt.d_tag {
+ case _DT_STRTAB:
+ info.symstrings = (*[vdsoSymStringsSize]byte)(unsafe.Pointer(p))
+ case _DT_SYMTAB:
+ info.symtab = (*[vdsoSymTabSize]elfSym)(unsafe.Pointer(p))
+ case _DT_HASH:
+ hash = (*[vdsoHashSize]uint32)(unsafe.Pointer(p))
+ case _DT_GNU_HASH:
+ gnuhash = (*[vdsoHashSize]uint32)(unsafe.Pointer(p))
+ case _DT_VERSYM:
+ info.versym = (*[vdsoVerSymSize]uint16)(unsafe.Pointer(p))
+ case _DT_VERDEF:
+ info.verdef = (*elfVerdef)(unsafe.Pointer(p))
+ }
+ }
+
+ if info.symstrings == nil || info.symtab == nil || (hash == nil && gnuhash == nil) {
+ return // Failed
+ }
+
+ if info.verdef == nil {
+ info.versym = nil
+ }
+
+ if gnuhash != nil {
+ // Parse the GNU hash table header.
+ nbucket := gnuhash[0]
+ info.symOff = gnuhash[1]
+ bloomSize := gnuhash[2]
+ info.bucket = gnuhash[4+bloomSize*uint32(vdsoBloomSizeScale):][:nbucket]
+ info.chain = gnuhash[4+bloomSize*uint32(vdsoBloomSizeScale)+nbucket:]
+ info.isGNUHash = true
+ } else {
+ // Parse the hash table header.
+ nbucket := hash[0]
+ nchain := hash[1]
+ info.bucket = hash[2 : 2+nbucket]
+ info.chain = hash[2+nbucket : 2+nbucket+nchain]
+ }
+
+ // That's all we need.
+ info.valid = true
+}
+
+func vdsoFindVersion(info *vdsoInfo, ver *vdsoVersionKey) int32 {
+ if !info.valid {
+ return 0
+ }
+
+ def := info.verdef
+ for {
+ if def.vd_flags&_VER_FLG_BASE == 0 {
+ aux := (*elfVerdaux)(add(unsafe.Pointer(def), uintptr(def.vd_aux)))
+ if def.vd_hash == ver.verHash && ver.version == gostringnocopy(&info.symstrings[aux.vda_name]) {
+ return int32(def.vd_ndx & 0x7fff)
+ }
+ }
+
+ if def.vd_next == 0 {
+ break
+ }
+ def = (*elfVerdef)(add(unsafe.Pointer(def), uintptr(def.vd_next)))
+ }
+
+ return -1 // cannot match any version
+}
+
+func vdsoParseSymbols(info *vdsoInfo, version int32) {
+ if !info.valid {
+ return
+ }
+
+ apply := func(symIndex uint32, k vdsoSymbolKey) bool {
+ sym := &info.symtab[symIndex]
+ typ := _ELF_ST_TYPE(sym.st_info)
+ bind := _ELF_ST_BIND(sym.st_info)
+ // On ppc64x, VDSO functions are of type _STT_NOTYPE.
+ if typ != _STT_FUNC && typ != _STT_NOTYPE || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF {
+ return false
+ }
+ if k.name != gostringnocopy(&info.symstrings[sym.st_name]) {
+ return false
+ }
+ // Check symbol version.
+ if info.versym != nil && version != 0 && int32(info.versym[symIndex]&0x7fff) != version {
+ return false
+ }
+
+ *k.ptr = info.loadOffset + uintptr(sym.st_value)
+ return true
+ }
+
+ if !info.isGNUHash {
+ // Old-style DT_HASH table.
+ for _, k := range vdsoSymbolKeys {
+ if len(info.bucket) > 0 {
+ for chain := info.bucket[k.symHash%uint32(len(info.bucket))]; chain != 0; chain = info.chain[chain] {
+ if apply(chain, k) {
+ break
+ }
+ }
+ }
+ }
+ return
+ }
+
+ // New-style DT_GNU_HASH table.
+ for _, k := range vdsoSymbolKeys {
+ symIndex := info.bucket[k.gnuHash%uint32(len(info.bucket))]
+ if symIndex < info.symOff {
+ continue
+ }
+ for ; ; symIndex++ {
+ hash := info.chain[symIndex-info.symOff]
+ if hash|1 == k.gnuHash|1 {
+ // Found a hash match.
+ if apply(symIndex, k) {
+ break
+ }
+ }
+ if hash&1 != 0 {
+ // End of chain.
+ break
+ }
+ }
+ }
+}
+
+func vdsoauxv(tag, val uintptr) {
+ switch tag {
+ case _AT_SYSINFO_EHDR:
+ if val == 0 {
+ // Something went wrong
+ return
+ }
+ var info vdsoInfo
+ // TODO(rsc): I don't understand why the compiler thinks info escapes
+ // when passed to the three functions below.
+ info1 := (*vdsoInfo)(noescape(unsafe.Pointer(&info)))
+ vdsoInitFromSysinfoEhdr(info1, (*elfEhdr)(unsafe.Pointer(val)))
+ vdsoParseSymbols(info1, vdsoFindVersion(info1, &vdsoLinuxVersion))
+ }
+}
+
+// vdsoMarker reports whether PC is on the VDSO page.
+//
+//go:nosplit
+func inVDSOPage(pc uintptr) bool {
+ for _, k := range vdsoSymbolKeys {
+ if *k.ptr != 0 {
+ page := *k.ptr &^ (physPageSize - 1)
+ return pc >= page && pc < page+physPageSize
+ }
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.18/src/runtime/vdso_linux_amd64.go b/contrib/go/_std_1.19/src/runtime/vdso_linux_amd64.go
index 4e9f748f4a..4e9f748f4a 100644
--- a/contrib/go/_std_1.18/src/runtime/vdso_linux_amd64.go
+++ b/contrib/go/_std_1.19/src/runtime/vdso_linux_amd64.go
diff --git a/contrib/go/_std_1.18/src/runtime/write_err.go b/contrib/go/_std_1.19/src/runtime/write_err.go
index 81ae872e9c..81ae872e9c 100644
--- a/contrib/go/_std_1.18/src/runtime/write_err.go
+++ b/contrib/go/_std_1.19/src/runtime/write_err.go
diff --git a/contrib/go/_std_1.19/src/sort/search.go b/contrib/go/_std_1.19/src/sort/search.go
new file mode 100644
index 0000000000..874e40813d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sort/search.go
@@ -0,0 +1,150 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements binary search.
+
+package sort
+
+// Search uses binary search to find and return the smallest index i
+// in [0, n) at which f(i) is true, assuming that on the range [0, n),
+// f(i) == true implies f(i+1) == true. That is, Search requires that
+// f is false for some (possibly empty) prefix of the input range [0, n)
+// and then true for the (possibly empty) remainder; Search returns
+// the first true index. If there is no such index, Search returns n.
+// (Note that the "not found" return value is not -1 as in, for instance,
+// strings.Index.)
+// Search calls f(i) only for i in the range [0, n).
+//
+// A common use of Search is to find the index i for a value x in
+// a sorted, indexable data structure such as an array or slice.
+// In this case, the argument f, typically a closure, captures the value
+// to be searched for, and how the data structure is indexed and
+// ordered.
+//
+// For instance, given a slice data sorted in ascending order,
+// the call Search(len(data), func(i int) bool { return data[i] >= 23 })
+// returns the smallest index i such that data[i] >= 23. If the caller
+// wants to find whether 23 is in the slice, it must test data[i] == 23
+// separately.
+//
+// Searching data sorted in descending order would use the <=
+// operator instead of the >= operator.
+//
+// To complete the example above, the following code tries to find the value
+// x in an integer slice data sorted in ascending order:
+//
+// x := 23
+// i := sort.Search(len(data), func(i int) bool { return data[i] >= x })
+// if i < len(data) && data[i] == x {
+// // x is present at data[i]
+// } else {
+// // x is not present in data,
+// // but i is the index where it would be inserted.
+// }
+//
+// As a more whimsical example, this program guesses your number:
+//
+// func GuessingGame() {
+// var s string
+// fmt.Printf("Pick an integer from 0 to 100.\n")
+// answer := sort.Search(100, func(i int) bool {
+// fmt.Printf("Is your number <= %d? ", i)
+// fmt.Scanf("%s", &s)
+// return s != "" && s[0] == 'y'
+// })
+// fmt.Printf("Your number is %d.\n", answer)
+// }
+func Search(n int, f func(int) bool) int {
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !f(h) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
+
+// Find uses binary search to find and return the smallest index i in [0, n)
+// at which cmp(i) <= 0. If there is no such index i, Find returns i = n.
+// The found result is true if i < n and cmp(i) == 0.
+// Find calls cmp(i) only for i in the range [0, n).
+//
+// To permit binary search, Find requires that cmp(i) > 0 for a leading
+// prefix of the range, cmp(i) == 0 in the middle, and cmp(i) < 0 for
+// the final suffix of the range. (Each subrange could be empty.)
+// The usual way to establish this condition is to interpret cmp(i)
+// as a comparison of a desired target value t against entry i in an
+// underlying indexed data structure x, returning <0, 0, and >0
+// when t < x[i], t == x[i], and t > x[i], respectively.
+//
+// For example, to look for a particular string in a sorted, random-access
+// list of strings:
+//
+// i, found := sort.Find(x.Len(), func(i int) int {
+// return strings.Compare(target, x.At(i))
+// })
+// if found {
+// fmt.Printf("found %s at entry %d\n", target, i)
+// } else {
+// fmt.Printf("%s not found, would insert at %d", target, i)
+// }
+func Find(n int, cmp func(int) int) (i int, found bool) {
+ // The invariants here are similar to the ones in Search.
+ // Define cmp(-1) > 0 and cmp(n) <= 0
+ // Invariant: cmp(i-1) > 0, cmp(j) <= 0
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(h) > 0 {
+ i = h + 1 // preserves cmp(i-1) > 0
+ } else {
+ j = h // preserves cmp(j) <= 0
+ }
+ }
+ // i == j, cmp(i-1) > 0 and cmp(j) <= 0
+ return i, i < n && cmp(i) == 0
+}
+
+// Convenience wrappers for common cases.
+
+// SearchInts searches for x in a sorted slice of ints and returns the index
+// as specified by Search. The return value is the index to insert x if x is
+// not present (it could be len(a)).
+// The slice must be sorted in ascending order.
+func SearchInts(a []int, x int) int {
+ return Search(len(a), func(i int) bool { return a[i] >= x })
+}
+
+// SearchFloat64s searches for x in a sorted slice of float64s and returns the index
+// as specified by Search. The return value is the index to insert x if x is not
+// present (it could be len(a)).
+// The slice must be sorted in ascending order.
+func SearchFloat64s(a []float64, x float64) int {
+ return Search(len(a), func(i int) bool { return a[i] >= x })
+}
+
+// SearchStrings searches for x in a sorted slice of strings and returns the index
+// as specified by Search. The return value is the index to insert x if x is not
+// present (it could be len(a)).
+// The slice must be sorted in ascending order.
+func SearchStrings(a []string, x string) int {
+ return Search(len(a), func(i int) bool { return a[i] >= x })
+}
+
+// Search returns the result of applying SearchInts to the receiver and x.
+func (p IntSlice) Search(x int) int { return SearchInts(p, x) }
+
+// Search returns the result of applying SearchFloat64s to the receiver and x.
+func (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) }
+
+// Search returns the result of applying SearchStrings to the receiver and x.
+func (p StringSlice) Search(x string) int { return SearchStrings(p, x) }
diff --git a/contrib/go/_std_1.19/src/sort/slice.go b/contrib/go/_std_1.19/src/sort/slice.go
new file mode 100644
index 0000000000..443182b42e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sort/slice.go
@@ -0,0 +1,49 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sort
+
+import "math/bits"
+
+// Slice sorts the slice x given the provided less function.
+// It panics if x is not a slice.
+//
+// The sort is not guaranteed to be stable: equal elements
+// may be reversed from their original order.
+// For a stable sort, use SliceStable.
+//
+// The less function must satisfy the same requirements as
+// the Interface type's Less method.
+func Slice(x any, less func(i, j int) bool) {
+ rv := reflectValueOf(x)
+ swap := reflectSwapper(x)
+ length := rv.Len()
+ limit := bits.Len(uint(length))
+ pdqsort_func(lessSwap{less, swap}, 0, length, limit)
+}
+
+// SliceStable sorts the slice x using the provided less
+// function, keeping equal elements in their original order.
+// It panics if x is not a slice.
+//
+// The less function must satisfy the same requirements as
+// the Interface type's Less method.
+func SliceStable(x any, less func(i, j int) bool) {
+ rv := reflectValueOf(x)
+ swap := reflectSwapper(x)
+ stable_func(lessSwap{less, swap}, rv.Len())
+}
+
+// SliceIsSorted reports whether the slice x is sorted according to the provided less function.
+// It panics if x is not a slice.
+func SliceIsSorted(x any, less func(i, j int) bool) bool {
+ rv := reflectValueOf(x)
+ n := rv.Len()
+ for i := n - 1; i > 0; i-- {
+ if less(i, i-1) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/contrib/go/_std_1.18/src/sort/slice_go113.go b/contrib/go/_std_1.19/src/sort/slice_go113.go
index 53542dbd1a..53542dbd1a 100644
--- a/contrib/go/_std_1.18/src/sort/slice_go113.go
+++ b/contrib/go/_std_1.19/src/sort/slice_go113.go
diff --git a/contrib/go/_std_1.19/src/sort/sort.go b/contrib/go/_std_1.19/src/sort/sort.go
new file mode 100644
index 0000000000..68e2f0d082
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sort/sort.go
@@ -0,0 +1,262 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen_sort_variants.go
+
+// Package sort provides primitives for sorting slices and user-defined collections.
+package sort
+
+import "math/bits"
+
+// An implementation of Interface can be sorted by the routines in this package.
+// The methods refer to elements of the underlying collection by integer index.
+type Interface interface {
+ // Len is the number of elements in the collection.
+ Len() int
+
+ // Less reports whether the element with index i
+ // must sort before the element with index j.
+ //
+ // If both Less(i, j) and Less(j, i) are false,
+ // then the elements at index i and j are considered equal.
+ // Sort may place equal elements in any order in the final result,
+ // while Stable preserves the original input order of equal elements.
+ //
+ // Less must describe a transitive ordering:
+ // - if both Less(i, j) and Less(j, k) are true, then Less(i, k) must be true as well.
+ // - if both Less(i, j) and Less(j, k) are false, then Less(i, k) must be false as well.
+ //
+ // Note that floating-point comparison (the < operator on float32 or float64 values)
+ // is not a transitive ordering when not-a-number (NaN) values are involved.
+ // See Float64Slice.Less for a correct implementation for floating-point values.
+ Less(i, j int) bool
+
+ // Swap swaps the elements with indexes i and j.
+ Swap(i, j int)
+}
+
+// Sort sorts data in ascending order as determined by the Less method.
+// It makes one call to data.Len to determine n and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func Sort(data Interface) {
+ n := data.Len()
+ if n <= 1 {
+ return
+ }
+ limit := bits.Len(uint(n))
+ pdqsort(data, 0, n, limit)
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ shift := uint(bits.Len(uint(length)))
+ return uint(1 << shift)
+}
+
+// lessSwap is a pair of Less and Swap function for use with the
+// auto-generated func-optimized variant of sort.go in
+// zfuncversion.go.
+type lessSwap struct {
+ Less func(i, j int) bool
+ Swap func(i, j int)
+}
+
+type reverse struct {
+ // This embedded Interface permits Reverse to use the methods of
+ // another Interface implementation.
+ Interface
+}
+
+// Less returns the opposite of the embedded implementation's Less method.
+func (r reverse) Less(i, j int) bool {
+ return r.Interface.Less(j, i)
+}
+
+// Reverse returns the reverse order for data.
+func Reverse(data Interface) Interface {
+ return &reverse{data}
+}
+
+// IsSorted reports whether data is sorted.
+func IsSorted(data Interface) bool {
+ n := data.Len()
+ for i := n - 1; i > 0; i-- {
+ if data.Less(i, i-1) {
+ return false
+ }
+ }
+ return true
+}
+
+// Convenience types for common cases
+
+// IntSlice attaches the methods of Interface to []int, sorting in increasing order.
+type IntSlice []int
+
+func (x IntSlice) Len() int { return len(x) }
+func (x IntSlice) Less(i, j int) bool { return x[i] < x[j] }
+func (x IntSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// Sort is a convenience method: x.Sort() calls Sort(x).
+func (x IntSlice) Sort() { Sort(x) }
+
+// Float64Slice implements Interface for a []float64, sorting in increasing order,
+// with not-a-number (NaN) values ordered before other values.
+type Float64Slice []float64
+
+func (x Float64Slice) Len() int { return len(x) }
+
+// Less reports whether x[i] should be ordered before x[j], as required by the sort Interface.
+// Note that floating-point comparison by itself is not a transitive relation: it does not
+// report a consistent ordering for not-a-number (NaN) values.
+// This implementation of Less places NaN values before any others, by using:
+//
+// x[i] < x[j] || (math.IsNaN(x[i]) && !math.IsNaN(x[j]))
+func (x Float64Slice) Less(i, j int) bool { return x[i] < x[j] || (isNaN(x[i]) && !isNaN(x[j])) }
+func (x Float64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// isNaN is a copy of math.IsNaN to avoid a dependency on the math package.
+func isNaN(f float64) bool {
+ return f != f
+}
+
+// Sort is a convenience method: x.Sort() calls Sort(x).
+func (x Float64Slice) Sort() { Sort(x) }
+
+// StringSlice attaches the methods of Interface to []string, sorting in increasing order.
+type StringSlice []string
+
+func (x StringSlice) Len() int { return len(x) }
+func (x StringSlice) Less(i, j int) bool { return x[i] < x[j] }
+func (x StringSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// Sort is a convenience method: x.Sort() calls Sort(x).
+func (x StringSlice) Sort() { Sort(x) }
+
+// Convenience wrappers for common cases
+
+// Ints sorts a slice of ints in increasing order.
+func Ints(x []int) { Sort(IntSlice(x)) }
+
+// Float64s sorts a slice of float64s in increasing order.
+// Not-a-number (NaN) values are ordered before other values.
+func Float64s(x []float64) { Sort(Float64Slice(x)) }
+
+// Strings sorts a slice of strings in increasing order.
+func Strings(x []string) { Sort(StringSlice(x)) }
+
+// IntsAreSorted reports whether the slice x is sorted in increasing order.
+func IntsAreSorted(x []int) bool { return IsSorted(IntSlice(x)) }
+
+// Float64sAreSorted reports whether the slice x is sorted in increasing order,
+// with not-a-number (NaN) values before any other values.
+func Float64sAreSorted(x []float64) bool { return IsSorted(Float64Slice(x)) }
+
+// StringsAreSorted reports whether the slice x is sorted in increasing order.
+func StringsAreSorted(x []string) bool { return IsSorted(StringSlice(x)) }
+
+// Notes on stable sorting:
+// The used algorithms are simple and provable correct on all input and use
+// only logarithmic additional stack space. They perform well if compared
+// experimentally to other stable in-place sorting algorithms.
+//
+// Remarks on other algorithms evaluated:
+// - GCC's 4.6.3 stable_sort with merge_without_buffer from libstdc++:
+// Not faster.
+// - GCC's __rotate for block rotations: Not faster.
+// - "Practical in-place mergesort" from Jyrki Katajainen, Tomi A. Pasanen
+// and Jukka Teuhola; Nordic Journal of Computing 3,1 (1996), 27-40:
+// The given algorithms are in-place, number of Swap and Assignments
+// grow as n log n but the algorithm is not stable.
+// - "Fast Stable In-Place Sorting with O(n) Data Moves" J.I. Munro and
+// V. Raman in Algorithmica (1996) 16, 115-160:
+// This algorithm either needs additional 2n bits or works only if there
+// are enough different elements available to encode some permutations
+// which have to be undone later (so not stable on any input).
+// - All the optimal in-place sorting/merging algorithms I found are either
+// unstable or rely on enough different elements in each step to encode the
+// performed block rearrangements. See also "In-Place Merging Algorithms",
+// Denham Coates-Evely, Department of Computer Science, Kings College,
+// January 2004 and the references in there.
+// - Often "optimal" algorithms are optimal in the number of assignments
+// but Interface has only Swap as operation.
+
+// Stable sorts data in ascending order as determined by the Less method,
+// while keeping the original order of equal elements.
+//
+// It makes one call to data.Len to determine n, O(n*log(n)) calls to
+// data.Less and O(n*log(n)*log(n)) calls to data.Swap.
+func Stable(data Interface) {
+ stable(data, data.Len())
+}
+
+/*
+Complexity of Stable Sorting
+
+
+Complexity of block swapping rotation
+
+Each Swap puts one new element into its correct, final position.
+Elements which reach their final position are no longer moved.
+Thus block swapping rotation needs |u|+|v| calls to Swaps.
+This is best possible as each element might need a move.
+
+Pay attention when comparing to other optimal algorithms which
+typically count the number of assignments instead of swaps:
+E.g. the optimal algorithm of Dudzinski and Dydek for in-place
+rotations uses O(u + v + gcd(u,v)) assignments which is
+better than our O(3 * (u+v)) as gcd(u,v) <= u.
+
+
+Stable sorting by SymMerge and BlockSwap rotations
+
+SymMerg complexity for same size input M = N:
+Calls to Less: O(M*log(N/M+1)) = O(N*log(2)) = O(N)
+Calls to Swap: O((M+N)*log(M)) = O(2*N*log(N)) = O(N*log(N))
+
+(The following argument does not fuzz over a missing -1 or
+other stuff which does not impact the final result).
+
+Let n = data.Len(). Assume n = 2^k.
+
+Plain merge sort performs log(n) = k iterations.
+On iteration i the algorithm merges 2^(k-i) blocks, each of size 2^i.
+
+Thus iteration i of merge sort performs:
+Calls to Less O(2^(k-i) * 2^i) = O(2^k) = O(2^log(n)) = O(n)
+Calls to Swap O(2^(k-i) * 2^i * log(2^i)) = O(2^k * i) = O(n*i)
+
+In total k = log(n) iterations are performed; so in total:
+Calls to Less O(log(n) * n)
+Calls to Swap O(n + 2*n + 3*n + ... + (k-1)*n + k*n)
+ = O((k/2) * k * n) = O(n * k^2) = O(n * log^2(n))
+
+
+Above results should generalize to arbitrary n = 2^k + p
+and should not be influenced by the initial insertion sort phase:
+Insertion sort is O(n^2) on Swap and Less, thus O(bs^2) per block of
+size bs at n/bs blocks: O(bs*n) Swaps and Less during insertion sort.
+Merge sort iterations start at i = log(bs). With t = log(bs) constant:
+Calls to Less O((log(n)-t) * n + bs*n) = O(log(n)*n + (bs-t)*n)
+ = O(n * log(n))
+Calls to Swap O(n * log^2(n) - (t^2+t)/2*n) = O(n * log^2(n))
+
+*/
diff --git a/contrib/go/_std_1.19/src/sort/zsortfunc.go b/contrib/go/_std_1.19/src/sort/zsortfunc.go
new file mode 100644
index 0000000000..49b6169b97
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sort/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sort
+
+// insertionSort_func sorts data[a:b] using insertion sort.
+func insertionSort_func(data lessSwap, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data.Less(j, j-1); j-- {
+ data.Swap(j, j-1)
+ }
+ }
+}
+
+// siftDown_func implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDown_func(data lessSwap, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data.Less(first+child, first+child+1) {
+ child++
+ }
+ if !data.Less(first+root, first+child) {
+ return
+ }
+ data.Swap(first+root, first+child)
+ root = child
+ }
+}
+
+func heapSort_func(data lessSwap, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown_func(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data.Swap(first, first+i)
+ siftDown_func(data, lo, i, first)
+ }
+}
+
+// pdqsort_func sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsort_func(data lessSwap, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSort_func(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSort_func(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatterns_func(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivot_func(data, a, b)
+ if hint == decreasingHint {
+ reverseRange_func(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSort_func(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !data.Less(a-1, pivot) {
+ mid := partitionEqual_func(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partition_func(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsort_func(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsort_func(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partition_func does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
+// On return, data[newpivot] = p
+func partition_func(data lessSwap, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data.Swap(a, pivot)
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && data.Less(i, a) {
+ i++
+ }
+ for i <= j && !data.Less(j, a) {
+ j--
+ }
+ if i > j {
+ data.Swap(j, a)
+ return j, true
+ }
+ data.Swap(i, j)
+ i++
+ j--
+
+ for {
+ for i <= j && data.Less(i, a) {
+ i++
+ }
+ for i <= j && !data.Less(j, a) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data.Swap(i, j)
+ i++
+ j--
+ }
+ data.Swap(j, a)
+ return j, false
+}
+
+// partitionEqual_func partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqual_func(data lessSwap, a, b, pivot int) (newpivot int) {
+ data.Swap(a, pivot)
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !data.Less(a, i) {
+ i++
+ }
+ for i <= j && data.Less(a, j) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data.Swap(i, j)
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSort_func partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSort_func(data lessSwap, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !data.Less(i, i-1) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data.Swap(i, i-1)
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !data.Less(j, j-1) {
+ break
+ }
+ data.Swap(j, j-1)
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !data.Less(j, j-1) {
+ break
+ }
+ data.Swap(j, j-1)
+ }
+ }
+ }
+ return false
+}
+
+// breakPatterns_func scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatterns_func(data lessSwap, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data.Swap(idx, a+other)
+ }
+ }
+}
+
+// choosePivot_func chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivot_func(data lessSwap, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacent_func(data, i, &swaps)
+ j = medianAdjacent_func(data, j, &swaps)
+ k = medianAdjacent_func(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = median_func(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2_func returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2_func(data lessSwap, a, b int, swaps *int) (int, int) {
+ if data.Less(b, a) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// median_func returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func median_func(data lessSwap, a, b, c int, swaps *int) int {
+ a, b = order2_func(data, a, b, swaps)
+ b, c = order2_func(data, b, c, swaps)
+ a, b = order2_func(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacent_func finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacent_func(data lessSwap, a int, swaps *int) int {
+ return median_func(data, a-1, a, a+1, swaps)
+}
+
+func reverseRange_func(data lessSwap, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data.Swap(i, j)
+ i++
+ j--
+ }
+}
+
+func swapRange_func(data lessSwap, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data.Swap(a+i, b+i)
+ }
+}
+
+func stable_func(data lessSwap, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSort_func(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSort_func(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMerge_func(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMerge_func(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMerge_func merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMerge_func(data lessSwap, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data.Less(h, a) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data.Swap(k, k+1)
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !data.Less(m, h) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data.Swap(k, k-1)
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !data.Less(p-c, c) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotate_func(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMerge_func(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMerge_func(data, mid, end, b)
+ }
+}
+
+// rotate_func rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotate_func(data lessSwap, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRange_func(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRange_func(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRange_func(data, m-i, m, i)
+}
diff --git a/contrib/go/_std_1.19/src/sort/zsortinterface.go b/contrib/go/_std_1.19/src/sort/zsortinterface.go
new file mode 100644
index 0000000000..51fa5032e9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sort/zsortinterface.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sort
+
+// insertionSort sorts data[a:b] using insertion sort.
+func insertionSort(data Interface, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data.Less(j, j-1); j-- {
+ data.Swap(j, j-1)
+ }
+ }
+}
+
+// siftDown implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data Interface, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data.Less(first+child, first+child+1) {
+ child++
+ }
+ if !data.Less(first+root, first+child) {
+ return
+ }
+ data.Swap(first+root, first+child)
+ root = child
+ }
+}
+
+func heapSort(data Interface, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data.Swap(first, first+i)
+ siftDown(data, lo, i, first)
+ }
+}
+
+// pdqsort sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsort(data Interface, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSort(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSort(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatterns(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivot(data, a, b)
+ if hint == decreasingHint {
+ reverseRange(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSort(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !data.Less(a-1, pivot) {
+ mid := partitionEqual(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partition(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsort(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsort(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partition does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
+// On return, data[newpivot] = p
+func partition(data Interface, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data.Swap(a, pivot)
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && data.Less(i, a) {
+ i++
+ }
+ for i <= j && !data.Less(j, a) {
+ j--
+ }
+ if i > j {
+ data.Swap(j, a)
+ return j, true
+ }
+ data.Swap(i, j)
+ i++
+ j--
+
+ for {
+ for i <= j && data.Less(i, a) {
+ i++
+ }
+ for i <= j && !data.Less(j, a) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data.Swap(i, j)
+ i++
+ j--
+ }
+ data.Swap(j, a)
+ return j, false
+}
+
+// partitionEqual partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqual(data Interface, a, b, pivot int) (newpivot int) {
+ data.Swap(a, pivot)
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !data.Less(a, i) {
+ i++
+ }
+ for i <= j && data.Less(a, j) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data.Swap(i, j)
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSort partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSort(data Interface, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !data.Less(i, i-1) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data.Swap(i, i-1)
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !data.Less(j, j-1) {
+ break
+ }
+ data.Swap(j, j-1)
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !data.Less(j, j-1) {
+ break
+ }
+ data.Swap(j, j-1)
+ }
+ }
+ }
+ return false
+}
+
+// breakPatterns scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatterns(data Interface, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data.Swap(idx, a+other)
+ }
+ }
+}
+
+// choosePivot chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivot(data Interface, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacent(data, i, &swaps)
+ j = medianAdjacent(data, j, &swaps)
+ k = medianAdjacent(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = median(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2 returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2(data Interface, a, b int, swaps *int) (int, int) {
+ if data.Less(b, a) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// median returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func median(data Interface, a, b, c int, swaps *int) int {
+ a, b = order2(data, a, b, swaps)
+ b, c = order2(data, b, c, swaps)
+ a, b = order2(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacent finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacent(data Interface, a int, swaps *int) int {
+ return median(data, a-1, a, a+1, swaps)
+}
+
+func reverseRange(data Interface, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data.Swap(i, j)
+ i++
+ j--
+ }
+}
+
+func swapRange(data Interface, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data.Swap(a+i, b+i)
+ }
+}
+
+func stable(data Interface, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSort(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSort(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMerge(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMerge(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMerge merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMerge(data Interface, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data.Less(h, a) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data.Swap(k, k+1)
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !data.Less(m, h) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data.Swap(k, k-1)
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !data.Less(p-c, c) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotate(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMerge(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMerge(data, mid, end, b)
+ }
+}
+
+// rotate rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotate(data Interface, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRange(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRange(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRange(data, m-i, m, i)
+}
diff --git a/contrib/go/_std_1.18/src/strconv/atob.go b/contrib/go/_std_1.19/src/strconv/atob.go
index 0a495008d7..0a495008d7 100644
--- a/contrib/go/_std_1.18/src/strconv/atob.go
+++ b/contrib/go/_std_1.19/src/strconv/atob.go
diff --git a/contrib/go/_std_1.18/src/strconv/atoc.go b/contrib/go/_std_1.19/src/strconv/atoc.go
index 85c7bafefa..85c7bafefa 100644
--- a/contrib/go/_std_1.18/src/strconv/atoc.go
+++ b/contrib/go/_std_1.19/src/strconv/atoc.go
diff --git a/contrib/go/_std_1.19/src/strconv/atof.go b/contrib/go/_std_1.19/src/strconv/atof.go
new file mode 100644
index 0000000000..8fc90425f6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/atof.go
@@ -0,0 +1,709 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strconv
+
+// decimal to binary floating point conversion.
+// Algorithm:
+// 1) Store input in multiprecision decimal.
+// 2) Multiply/divide decimal by powers of two until in range [0.5, 1)
+// 3) Multiply by 2^precision and round to get mantissa.
+
+import "math"
+
+var optimize = true // set to false to force slow-path conversions for testing
+
+// commonPrefixLenIgnoreCase returns the length of the common
+// prefix of s and prefix, with the character case of s ignored.
+// The prefix argument must be all lower-case.
+func commonPrefixLenIgnoreCase(s, prefix string) int {
+ n := len(prefix)
+ if n > len(s) {
+ n = len(s)
+ }
+ for i := 0; i < n; i++ {
+ c := s[i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ if c != prefix[i] {
+ return i
+ }
+ }
+ return n
+}
+
+// special returns the floating-point value for the special,
+// possibly signed floating-point representations inf, infinity,
+// and NaN. The result is ok if a prefix of s contains one
+// of these representations and n is the length of that prefix.
+// The character case is ignored.
+func special(s string) (f float64, n int, ok bool) {
+ if len(s) == 0 {
+ return 0, 0, false
+ }
+ sign := 1
+ nsign := 0
+ switch s[0] {
+ case '+', '-':
+ if s[0] == '-' {
+ sign = -1
+ }
+ nsign = 1
+ s = s[1:]
+ fallthrough
+ case 'i', 'I':
+ n := commonPrefixLenIgnoreCase(s, "infinity")
+ // Anything longer than "inf" is ok, but if we
+ // don't have "infinity", only consume "inf".
+ if 3 < n && n < 8 {
+ n = 3
+ }
+ if n == 3 || n == 8 {
+ return math.Inf(sign), nsign + n, true
+ }
+ case 'n', 'N':
+ if commonPrefixLenIgnoreCase(s, "nan") == 3 {
+ return math.NaN(), 3, true
+ }
+ }
+ return 0, 0, false
+}
+
+func (b *decimal) set(s string) (ok bool) {
+ i := 0
+ b.neg = false
+ b.trunc = false
+
+ // optional sign
+ if i >= len(s) {
+ return
+ }
+ switch {
+ case s[i] == '+':
+ i++
+ case s[i] == '-':
+ b.neg = true
+ i++
+ }
+
+ // digits
+ sawdot := false
+ sawdigits := false
+ for ; i < len(s); i++ {
+ switch {
+ case s[i] == '_':
+ // readFloat already checked underscores
+ continue
+ case s[i] == '.':
+ if sawdot {
+ return
+ }
+ sawdot = true
+ b.dp = b.nd
+ continue
+
+ case '0' <= s[i] && s[i] <= '9':
+ sawdigits = true
+ if s[i] == '0' && b.nd == 0 { // ignore leading zeros
+ b.dp--
+ continue
+ }
+ if b.nd < len(b.d) {
+ b.d[b.nd] = s[i]
+ b.nd++
+ } else if s[i] != '0' {
+ b.trunc = true
+ }
+ continue
+ }
+ break
+ }
+ if !sawdigits {
+ return
+ }
+ if !sawdot {
+ b.dp = b.nd
+ }
+
+ // optional exponent moves decimal point.
+ // if we read a very large, very long number,
+ // just be sure to move the decimal point by
+ // a lot (say, 100000). it doesn't matter if it's
+ // not the exact number.
+ if i < len(s) && lower(s[i]) == 'e' {
+ i++
+ if i >= len(s) {
+ return
+ }
+ esign := 1
+ if s[i] == '+' {
+ i++
+ } else if s[i] == '-' {
+ i++
+ esign = -1
+ }
+ if i >= len(s) || s[i] < '0' || s[i] > '9' {
+ return
+ }
+ e := 0
+ for ; i < len(s) && ('0' <= s[i] && s[i] <= '9' || s[i] == '_'); i++ {
+ if s[i] == '_' {
+ // readFloat already checked underscores
+ continue
+ }
+ if e < 10000 {
+ e = e*10 + int(s[i]) - '0'
+ }
+ }
+ b.dp += e * esign
+ }
+
+ if i != len(s) {
+ return
+ }
+
+ ok = true
+ return
+}
+
+// readFloat reads a decimal or hexadecimal mantissa and exponent from a float
+// string representation in s; the number may be followed by other characters.
+// readFloat reports the number of bytes consumed (i), and whether the number
+// is valid (ok).
+func readFloat(s string) (mantissa uint64, exp int, neg, trunc, hex bool, i int, ok bool) {
+ underscores := false
+
+ // optional sign
+ if i >= len(s) {
+ return
+ }
+ switch {
+ case s[i] == '+':
+ i++
+ case s[i] == '-':
+ neg = true
+ i++
+ }
+
+ // digits
+ base := uint64(10)
+ maxMantDigits := 19 // 10^19 fits in uint64
+ expChar := byte('e')
+ if i+2 < len(s) && s[i] == '0' && lower(s[i+1]) == 'x' {
+ base = 16
+ maxMantDigits = 16 // 16^16 fits in uint64
+ i += 2
+ expChar = 'p'
+ hex = true
+ }
+ sawdot := false
+ sawdigits := false
+ nd := 0
+ ndMant := 0
+ dp := 0
+loop:
+ for ; i < len(s); i++ {
+ switch c := s[i]; true {
+ case c == '_':
+ underscores = true
+ continue
+
+ case c == '.':
+ if sawdot {
+ break loop
+ }
+ sawdot = true
+ dp = nd
+ continue
+
+ case '0' <= c && c <= '9':
+ sawdigits = true
+ if c == '0' && nd == 0 { // ignore leading zeros
+ dp--
+ continue
+ }
+ nd++
+ if ndMant < maxMantDigits {
+ mantissa *= base
+ mantissa += uint64(c - '0')
+ ndMant++
+ } else if c != '0' {
+ trunc = true
+ }
+ continue
+
+ case base == 16 && 'a' <= lower(c) && lower(c) <= 'f':
+ sawdigits = true
+ nd++
+ if ndMant < maxMantDigits {
+ mantissa *= 16
+ mantissa += uint64(lower(c) - 'a' + 10)
+ ndMant++
+ } else {
+ trunc = true
+ }
+ continue
+ }
+ break
+ }
+ if !sawdigits {
+ return
+ }
+ if !sawdot {
+ dp = nd
+ }
+
+ if base == 16 {
+ dp *= 4
+ ndMant *= 4
+ }
+
+ // optional exponent moves decimal point.
+ // if we read a very large, very long number,
+ // just be sure to move the decimal point by
+ // a lot (say, 100000). it doesn't matter if it's
+ // not the exact number.
+ if i < len(s) && lower(s[i]) == expChar {
+ i++
+ if i >= len(s) {
+ return
+ }
+ esign := 1
+ if s[i] == '+' {
+ i++
+ } else if s[i] == '-' {
+ i++
+ esign = -1
+ }
+ if i >= len(s) || s[i] < '0' || s[i] > '9' {
+ return
+ }
+ e := 0
+ for ; i < len(s) && ('0' <= s[i] && s[i] <= '9' || s[i] == '_'); i++ {
+ if s[i] == '_' {
+ underscores = true
+ continue
+ }
+ if e < 10000 {
+ e = e*10 + int(s[i]) - '0'
+ }
+ }
+ dp += e * esign
+ } else if base == 16 {
+ // Must have exponent.
+ return
+ }
+
+ if mantissa != 0 {
+ exp = dp - ndMant
+ }
+
+ if underscores && !underscoreOK(s[:i]) {
+ return
+ }
+
+ ok = true
+ return
+}
+
+// decimal power of ten to binary power of two.
+var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26}
+
+func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) {
+ var exp int
+ var mant uint64
+
+ // Zero is always a special case.
+ if d.nd == 0 {
+ mant = 0
+ exp = flt.bias
+ goto out
+ }
+
+ // Obvious overflow/underflow.
+ // These bounds are for 64-bit floats.
+ // Will have to change if we want to support 80-bit floats in the future.
+ if d.dp > 310 {
+ goto overflow
+ }
+ if d.dp < -330 {
+ // zero
+ mant = 0
+ exp = flt.bias
+ goto out
+ }
+
+ // Scale by powers of two until in range [0.5, 1.0)
+ exp = 0
+ for d.dp > 0 {
+ var n int
+ if d.dp >= len(powtab) {
+ n = 27
+ } else {
+ n = powtab[d.dp]
+ }
+ d.Shift(-n)
+ exp += n
+ }
+ for d.dp < 0 || d.dp == 0 && d.d[0] < '5' {
+ var n int
+ if -d.dp >= len(powtab) {
+ n = 27
+ } else {
+ n = powtab[-d.dp]
+ }
+ d.Shift(n)
+ exp -= n
+ }
+
+ // Our range is [0.5,1) but floating point range is [1,2).
+ exp--
+
+ // Minimum representable exponent is flt.bias+1.
+ // If the exponent is smaller, move it up and
+ // adjust d accordingly.
+ if exp < flt.bias+1 {
+ n := flt.bias + 1 - exp
+ d.Shift(-n)
+ exp += n
+ }
+
+ if exp-flt.bias >= 1<<flt.expbits-1 {
+ goto overflow
+ }
+
+ // Extract 1+flt.mantbits bits.
+ d.Shift(int(1 + flt.mantbits))
+ mant = d.RoundedInteger()
+
+ // Rounding might have added a bit; shift down.
+ if mant == 2<<flt.mantbits {
+ mant >>= 1
+ exp++
+ if exp-flt.bias >= 1<<flt.expbits-1 {
+ goto overflow
+ }
+ }
+
+ // Denormalized?
+ if mant&(1<<flt.mantbits) == 0 {
+ exp = flt.bias
+ }
+ goto out
+
+overflow:
+ // ±Inf
+ mant = 0
+ exp = 1<<flt.expbits - 1 + flt.bias
+ overflow = true
+
+out:
+ // Assemble bits.
+ bits := mant & (uint64(1)<<flt.mantbits - 1)
+ bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
+ if d.neg {
+ bits |= 1 << flt.mantbits << flt.expbits
+ }
+ return bits, overflow
+}
+
+// Exact powers of 10.
+var float64pow10 = []float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+}
+var float32pow10 = []float32{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10}
+
+// If possible to convert decimal representation to 64-bit float f exactly,
+// entirely in floating-point math, do so, avoiding the expense of decimalToFloatBits.
+// Three common cases:
+//
+// value is exact integer
+// value is exact integer * exact power of ten
+// value is exact integer / exact power of ten
+//
+// These all produce potentially inexact but correctly rounded answers.
+func atof64exact(mantissa uint64, exp int, neg bool) (f float64, ok bool) {
+ if mantissa>>float64info.mantbits != 0 {
+ return
+ }
+ f = float64(mantissa)
+ if neg {
+ f = -f
+ }
+ switch {
+ case exp == 0:
+ // an integer.
+ return f, true
+ // Exact integers are <= 10^15.
+ // Exact powers of ten are <= 10^22.
+ case exp > 0 && exp <= 15+22: // int * 10^k
+ // If exponent is big but number of digits is not,
+ // can move a few zeros into the integer part.
+ if exp > 22 {
+ f *= float64pow10[exp-22]
+ exp = 22
+ }
+ if f > 1e15 || f < -1e15 {
+ // the exponent was really too large.
+ return
+ }
+ return f * float64pow10[exp], true
+ case exp < 0 && exp >= -22: // int / 10^k
+ return f / float64pow10[-exp], true
+ }
+ return
+}
+
+// If possible to compute mantissa*10^exp to 32-bit float f exactly,
+// entirely in floating-point math, do so, avoiding the machinery above.
+func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) {
+ if mantissa>>float32info.mantbits != 0 {
+ return
+ }
+ f = float32(mantissa)
+ if neg {
+ f = -f
+ }
+ switch {
+ case exp == 0:
+ return f, true
+ // Exact integers are <= 10^7.
+ // Exact powers of ten are <= 10^10.
+ case exp > 0 && exp <= 7+10: // int * 10^k
+ // If exponent is big but number of digits is not,
+ // can move a few zeros into the integer part.
+ if exp > 10 {
+ f *= float32pow10[exp-10]
+ exp = 10
+ }
+ if f > 1e7 || f < -1e7 {
+ // the exponent was really too large.
+ return
+ }
+ return f * float32pow10[exp], true
+ case exp < 0 && exp >= -10: // int / 10^k
+ return f / float32pow10[-exp], true
+ }
+ return
+}
+
+// atofHex converts the hex floating-point string s
+// to a rounded float32 or float64 value (depending on flt==&float32info or flt==&float64info)
+// and returns it as a float64.
+// The string s has already been parsed into a mantissa, exponent, and sign (neg==true for negative).
+// If trunc is true, trailing non-zero bits have been omitted from the mantissa.
+func atofHex(s string, flt *floatInfo, mantissa uint64, exp int, neg, trunc bool) (float64, error) {
+ maxExp := 1<<flt.expbits + flt.bias - 2
+ minExp := flt.bias + 1
+ exp += int(flt.mantbits) // mantissa now implicitly divided by 2^mantbits.
+
+ // Shift mantissa and exponent to bring representation into float range.
+ // Eventually we want a mantissa with a leading 1-bit followed by mantbits other bits.
+ // For rounding, we need two more, where the bottom bit represents
+ // whether that bit or any later bit was non-zero.
+ // (If the mantissa has already lost non-zero bits, trunc is true,
+ // and we OR in a 1 below after shifting left appropriately.)
+ for mantissa != 0 && mantissa>>(flt.mantbits+2) == 0 {
+ mantissa <<= 1
+ exp--
+ }
+ if trunc {
+ mantissa |= 1
+ }
+ for mantissa>>(1+flt.mantbits+2) != 0 {
+ mantissa = mantissa>>1 | mantissa&1
+ exp++
+ }
+
+ // If exponent is too negative,
+ // denormalize in hopes of making it representable.
+ // (The -2 is for the rounding bits.)
+ for mantissa > 1 && exp < minExp-2 {
+ mantissa = mantissa>>1 | mantissa&1
+ exp++
+ }
+
+ // Round using two bottom bits.
+ round := mantissa & 3
+ mantissa >>= 2
+ round |= mantissa & 1 // round to even (round up if mantissa is odd)
+ exp += 2
+ if round == 3 {
+ mantissa++
+ if mantissa == 1<<(1+flt.mantbits) {
+ mantissa >>= 1
+ exp++
+ }
+ }
+
+ if mantissa>>flt.mantbits == 0 { // Denormal or zero.
+ exp = flt.bias
+ }
+ var err error
+ if exp > maxExp { // infinity and range error
+ mantissa = 1 << flt.mantbits
+ exp = maxExp + 1
+ err = rangeError(fnParseFloat, s)
+ }
+
+ bits := mantissa & (1<<flt.mantbits - 1)
+ bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits
+ if neg {
+ bits |= 1 << flt.mantbits << flt.expbits
+ }
+ if flt == &float32info {
+ return float64(math.Float32frombits(uint32(bits))), err
+ }
+ return math.Float64frombits(bits), err
+}
+
+const fnParseFloat = "ParseFloat"
+
+func atof32(s string) (f float32, n int, err error) {
+ if val, n, ok := special(s); ok {
+ return float32(val), n, nil
+ }
+
+ mantissa, exp, neg, trunc, hex, n, ok := readFloat(s)
+ if !ok {
+ return 0, n, syntaxError(fnParseFloat, s)
+ }
+
+ if hex {
+ f, err := atofHex(s[:n], &float32info, mantissa, exp, neg, trunc)
+ return float32(f), n, err
+ }
+
+ if optimize {
+ // Try pure floating-point arithmetic conversion, and if that fails,
+ // the Eisel-Lemire algorithm.
+ if !trunc {
+ if f, ok := atof32exact(mantissa, exp, neg); ok {
+ return f, n, nil
+ }
+ }
+ f, ok := eiselLemire32(mantissa, exp, neg)
+ if ok {
+ if !trunc {
+ return f, n, nil
+ }
+ // Even if the mantissa was truncated, we may
+ // have found the correct result. Confirm by
+ // converting the upper mantissa bound.
+ fUp, ok := eiselLemire32(mantissa+1, exp, neg)
+ if ok && f == fUp {
+ return f, n, nil
+ }
+ }
+ }
+
+ // Slow fallback.
+ var d decimal
+ if !d.set(s[:n]) {
+ return 0, n, syntaxError(fnParseFloat, s)
+ }
+ b, ovf := d.floatBits(&float32info)
+ f = math.Float32frombits(uint32(b))
+ if ovf {
+ err = rangeError(fnParseFloat, s)
+ }
+ return f, n, err
+}
+
+func atof64(s string) (f float64, n int, err error) {
+ if val, n, ok := special(s); ok {
+ return val, n, nil
+ }
+
+ mantissa, exp, neg, trunc, hex, n, ok := readFloat(s)
+ if !ok {
+ return 0, n, syntaxError(fnParseFloat, s)
+ }
+
+ if hex {
+ f, err := atofHex(s[:n], &float64info, mantissa, exp, neg, trunc)
+ return f, n, err
+ }
+
+ if optimize {
+ // Try pure floating-point arithmetic conversion, and if that fails,
+ // the Eisel-Lemire algorithm.
+ if !trunc {
+ if f, ok := atof64exact(mantissa, exp, neg); ok {
+ return f, n, nil
+ }
+ }
+ f, ok := eiselLemire64(mantissa, exp, neg)
+ if ok {
+ if !trunc {
+ return f, n, nil
+ }
+ // Even if the mantissa was truncated, we may
+ // have found the correct result. Confirm by
+ // converting the upper mantissa bound.
+ fUp, ok := eiselLemire64(mantissa+1, exp, neg)
+ if ok && f == fUp {
+ return f, n, nil
+ }
+ }
+ }
+
+ // Slow fallback.
+ var d decimal
+ if !d.set(s[:n]) {
+ return 0, n, syntaxError(fnParseFloat, s)
+ }
+ b, ovf := d.floatBits(&float64info)
+ f = math.Float64frombits(b)
+ if ovf {
+ err = rangeError(fnParseFloat, s)
+ }
+ return f, n, err
+}
+
+// ParseFloat converts the string s to a floating-point number
+// with the precision specified by bitSize: 32 for float32, or 64 for float64.
+// When bitSize=32, the result still has type float64, but it will be
+// convertible to float32 without changing its value.
+//
+// ParseFloat accepts decimal and hexadecimal floating-point numbers
+// as defined by the Go syntax for [floating-point literals].
+// If s is well-formed and near a valid floating-point number,
+// ParseFloat returns the nearest floating-point number rounded
+// using IEEE754 unbiased rounding.
+// (Parsing a hexadecimal floating-point value only rounds when
+// there are more bits in the hexadecimal representation than
+// will fit in the mantissa.)
+//
+// The errors that ParseFloat returns have concrete type *NumError
+// and include err.Num = s.
+//
+// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax.
+//
+// If s is syntactically well-formed but is more than 1/2 ULP
+// away from the largest floating point number of the given size,
+// ParseFloat returns f = ±Inf, err.Err = ErrRange.
+//
+// ParseFloat recognizes the string "NaN", and the (possibly signed) strings "Inf" and "Infinity"
+// as their respective special floating point values. It ignores case when matching.
+//
+// [floating-point literals]: https://go.dev/ref/spec#Floating-point_literals
+func ParseFloat(s string, bitSize int) (float64, error) {
+ f, n, err := parseFloatPrefix(s, bitSize)
+ if n != len(s) && (err == nil || err.(*NumError).Err != ErrSyntax) {
+ return 0, syntaxError(fnParseFloat, s)
+ }
+ return f, err
+}
+
+func parseFloatPrefix(s string, bitSize int) (float64, int, error) {
+ if bitSize == 32 {
+ f, n, err := atof32(s)
+ return float64(f), n, err
+ }
+ return atof64(s)
+}
diff --git a/contrib/go/_std_1.19/src/strconv/atoi.go b/contrib/go/_std_1.19/src/strconv/atoi.go
new file mode 100644
index 0000000000..be08f93356
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/atoi.go
@@ -0,0 +1,316 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strconv
+
+import "errors"
+
+// lower(c) is a lower-case letter if and only if
+// c is either that lower-case letter or the equivalent upper-case letter.
+// Instead of writing c == 'x' || c == 'X' one can write lower(c) == 'x'.
+// Note that lower of non-letters can produce other non-letters.
+func lower(c byte) byte {
+ return c | ('x' - 'X')
+}
+
+// ErrRange indicates that a value is out of range for the target type.
+var ErrRange = errors.New("value out of range")
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// A NumError records a failed conversion.
+type NumError struct {
+ Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat, ParseComplex)
+ Num string // the input
+ Err error // the reason the conversion failed (e.g. ErrRange, ErrSyntax, etc.)
+}
+
+func (e *NumError) Error() string {
+ return "strconv." + e.Func + ": " + "parsing " + Quote(e.Num) + ": " + e.Err.Error()
+}
+
+func (e *NumError) Unwrap() error { return e.Err }
+
+func syntaxError(fn, str string) *NumError {
+ return &NumError{fn, str, ErrSyntax}
+}
+
+func rangeError(fn, str string) *NumError {
+ return &NumError{fn, str, ErrRange}
+}
+
+func baseError(fn, str string, base int) *NumError {
+ return &NumError{fn, str, errors.New("invalid base " + Itoa(base))}
+}
+
+func bitSizeError(fn, str string, bitSize int) *NumError {
+ return &NumError{fn, str, errors.New("invalid bit size " + Itoa(bitSize))}
+}
+
+const intSize = 32 << (^uint(0) >> 63)
+
+// IntSize is the size in bits of an int or uint value.
+const IntSize = intSize
+
+const maxUint64 = 1<<64 - 1
+
+// ParseUint is like ParseInt but for unsigned numbers.
+//
+// A sign prefix is not permitted.
+func ParseUint(s string, base int, bitSize int) (uint64, error) {
+ const fnParseUint = "ParseUint"
+
+ if s == "" {
+ return 0, syntaxError(fnParseUint, s)
+ }
+
+ base0 := base == 0
+
+ s0 := s
+ switch {
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ base = 10
+ if s[0] == '0' {
+ switch {
+ case len(s) >= 3 && lower(s[1]) == 'b':
+ base = 2
+ s = s[2:]
+ case len(s) >= 3 && lower(s[1]) == 'o':
+ base = 8
+ s = s[2:]
+ case len(s) >= 3 && lower(s[1]) == 'x':
+ base = 16
+ s = s[2:]
+ default:
+ base = 8
+ s = s[1:]
+ }
+ }
+
+ default:
+ return 0, baseError(fnParseUint, s0, base)
+ }
+
+ if bitSize == 0 {
+ bitSize = IntSize
+ } else if bitSize < 0 || bitSize > 64 {
+ return 0, bitSizeError(fnParseUint, s0, bitSize)
+ }
+
+ // Cutoff is the smallest number such that cutoff*base > maxUint64.
+ // Use compile-time constants for common cases.
+ var cutoff uint64
+ switch base {
+ case 10:
+ cutoff = maxUint64/10 + 1
+ case 16:
+ cutoff = maxUint64/16 + 1
+ default:
+ cutoff = maxUint64/uint64(base) + 1
+ }
+
+ maxVal := uint64(1)<<uint(bitSize) - 1
+
+ underscores := false
+ var n uint64
+ for _, c := range []byte(s) {
+ var d byte
+ switch {
+ case c == '_' && base0:
+ underscores = true
+ continue
+ case '0' <= c && c <= '9':
+ d = c - '0'
+ case 'a' <= lower(c) && lower(c) <= 'z':
+ d = lower(c) - 'a' + 10
+ default:
+ return 0, syntaxError(fnParseUint, s0)
+ }
+
+ if d >= byte(base) {
+ return 0, syntaxError(fnParseUint, s0)
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ return maxVal, rangeError(fnParseUint, s0)
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(d)
+ if n1 < n || n1 > maxVal {
+ // n+d overflows
+ return maxVal, rangeError(fnParseUint, s0)
+ }
+ n = n1
+ }
+
+ if underscores && !underscoreOK(s0) {
+ return 0, syntaxError(fnParseUint, s0)
+ }
+
+ return n, nil
+}
+
+// ParseInt interprets a string s in the given base (0, 2 to 36) and
+// bit size (0 to 64) and returns the corresponding value i.
+//
+// The string may begin with a leading sign: "+" or "-".
+//
+// If the base argument is 0, the true base is implied by the string's
+// prefix following the sign (if present): 2 for "0b", 8 for "0" or "0o",
+// 16 for "0x", and 10 otherwise. Also, for argument base 0 only,
+// underscore characters are permitted as defined by the Go syntax for
+// [integer literals].
+//
+// The bitSize argument specifies the integer type
+// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64
+// correspond to int, int8, int16, int32, and int64.
+// If bitSize is below 0 or above 64, an error is returned.
+//
+// The errors that ParseInt returns have concrete type *NumError
+// and include err.Num = s. If s is empty or contains invalid
+// digits, err.Err = ErrSyntax and the returned value is 0;
+// if the value corresponding to s cannot be represented by a
+// signed integer of the given size, err.Err = ErrRange and the
+// returned value is the maximum magnitude integer of the
+// appropriate bitSize and sign.
+//
+// [integer literals]: https://go.dev/ref/spec#Integer_literals
+func ParseInt(s string, base int, bitSize int) (i int64, err error) {
+ const fnParseInt = "ParseInt"
+
+ if s == "" {
+ return 0, syntaxError(fnParseInt, s)
+ }
+
+ // Pick off leading sign.
+ s0 := s
+ neg := false
+ if s[0] == '+' {
+ s = s[1:]
+ } else if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ }
+
+ // Convert unsigned and check range.
+ var un uint64
+ un, err = ParseUint(s, base, bitSize)
+ if err != nil && err.(*NumError).Err != ErrRange {
+ err.(*NumError).Func = fnParseInt
+ err.(*NumError).Num = s0
+ return 0, err
+ }
+
+ if bitSize == 0 {
+ bitSize = IntSize
+ }
+
+ cutoff := uint64(1 << uint(bitSize-1))
+ if !neg && un >= cutoff {
+ return int64(cutoff - 1), rangeError(fnParseInt, s0)
+ }
+ if neg && un > cutoff {
+ return -int64(cutoff), rangeError(fnParseInt, s0)
+ }
+ n := int64(un)
+ if neg {
+ n = -n
+ }
+ return n, nil
+}
+
+// Atoi is equivalent to ParseInt(s, 10, 0), converted to type int.
+func Atoi(s string) (int, error) {
+ const fnAtoi = "Atoi"
+
+ sLen := len(s)
+ if intSize == 32 && (0 < sLen && sLen < 10) ||
+ intSize == 64 && (0 < sLen && sLen < 19) {
+ // Fast path for small integers that fit int type.
+ s0 := s
+ if s[0] == '-' || s[0] == '+' {
+ s = s[1:]
+ if len(s) < 1 {
+ return 0, &NumError{fnAtoi, s0, ErrSyntax}
+ }
+ }
+
+ n := 0
+ for _, ch := range []byte(s) {
+ ch -= '0'
+ if ch > 9 {
+ return 0, &NumError{fnAtoi, s0, ErrSyntax}
+ }
+ n = n*10 + int(ch)
+ }
+ if s0[0] == '-' {
+ n = -n
+ }
+ return n, nil
+ }
+
+ // Slow path for invalid, big, or underscored integers.
+ i64, err := ParseInt(s, 10, 0)
+ if nerr, ok := err.(*NumError); ok {
+ nerr.Func = fnAtoi
+ }
+ return int(i64), err
+}
+
+// underscoreOK reports whether the underscores in s are allowed.
+// Checking them in this one function lets all the parsers skip over them simply.
+// Underscore must appear only between digits or between a base prefix and a digit.
+func underscoreOK(s string) bool {
+ // saw tracks the last character (class) we saw:
+ // ^ for beginning of number,
+ // 0 for a digit or base prefix,
+ // _ for an underscore,
+ // ! for none of the above.
+ saw := '^'
+ i := 0
+
+ // Optional sign.
+ if len(s) >= 1 && (s[0] == '-' || s[0] == '+') {
+ s = s[1:]
+ }
+
+ // Optional base prefix.
+ hex := false
+ if len(s) >= 2 && s[0] == '0' && (lower(s[1]) == 'b' || lower(s[1]) == 'o' || lower(s[1]) == 'x') {
+ i = 2
+ saw = '0' // base prefix counts as a digit for "underscore as digit separator"
+ hex = lower(s[1]) == 'x'
+ }
+
+ // Number proper.
+ for ; i < len(s); i++ {
+ // Digits are always okay.
+ if '0' <= s[i] && s[i] <= '9' || hex && 'a' <= lower(s[i]) && lower(s[i]) <= 'f' {
+ saw = '0'
+ continue
+ }
+ // Underscore must follow digit.
+ if s[i] == '_' {
+ if saw != '0' {
+ return false
+ }
+ saw = '_'
+ continue
+ }
+ // Underscore must also be followed by digit.
+ if saw == '_' {
+ return false
+ }
+ // Saw non-digit, non-underscore.
+ saw = '!'
+ }
+ return saw != '_'
+}
diff --git a/contrib/go/_std_1.18/src/strconv/bytealg.go b/contrib/go/_std_1.19/src/strconv/bytealg.go
index a2bb12c5f2..a2bb12c5f2 100644
--- a/contrib/go/_std_1.18/src/strconv/bytealg.go
+++ b/contrib/go/_std_1.19/src/strconv/bytealg.go
diff --git a/contrib/go/_std_1.18/src/strconv/ctoa.go b/contrib/go/_std_1.19/src/strconv/ctoa.go
index c16a2e579c..c16a2e579c 100644
--- a/contrib/go/_std_1.18/src/strconv/ctoa.go
+++ b/contrib/go/_std_1.19/src/strconv/ctoa.go
diff --git a/contrib/go/_std_1.18/src/strconv/decimal.go b/contrib/go/_std_1.19/src/strconv/decimal.go
index b58001888e..b58001888e 100644
--- a/contrib/go/_std_1.18/src/strconv/decimal.go
+++ b/contrib/go/_std_1.19/src/strconv/decimal.go
diff --git a/contrib/go/_std_1.19/src/strconv/doc.go b/contrib/go/_std_1.19/src/strconv/doc.go
new file mode 100644
index 0000000000..769ecd9a21
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/doc.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package strconv implements conversions to and from string representations
+// of basic data types.
+//
+// # Numeric Conversions
+//
+// The most common numeric conversions are Atoi (string to int) and Itoa (int to string).
+//
+// i, err := strconv.Atoi("-42")
+// s := strconv.Itoa(-42)
+//
+// These assume decimal and the Go int type.
+//
+// ParseBool, ParseFloat, ParseInt, and ParseUint convert strings to values:
+//
+// b, err := strconv.ParseBool("true")
+// f, err := strconv.ParseFloat("3.1415", 64)
+// i, err := strconv.ParseInt("-42", 10, 64)
+// u, err := strconv.ParseUint("42", 10, 64)
+//
+// The parse functions return the widest type (float64, int64, and uint64),
+// but if the size argument specifies a narrower width the result can be
+// converted to that narrower type without data loss:
+//
+// s := "2147483647" // biggest int32
+// i64, err := strconv.ParseInt(s, 10, 32)
+// ...
+// i := int32(i64)
+//
+// FormatBool, FormatFloat, FormatInt, and FormatUint convert values to strings:
+//
+// s := strconv.FormatBool(true)
+// s := strconv.FormatFloat(3.1415, 'E', -1, 64)
+// s := strconv.FormatInt(-42, 16)
+// s := strconv.FormatUint(42, 16)
+//
+// AppendBool, AppendFloat, AppendInt, and AppendUint are similar but
+// append the formatted value to a destination slice.
+//
+// # String Conversions
+//
+// Quote and QuoteToASCII convert strings to quoted Go string literals.
+// The latter guarantees that the result is an ASCII string, by escaping
+// any non-ASCII Unicode with \u:
+//
+// q := strconv.Quote("Hello, 世界")
+// q := strconv.QuoteToASCII("Hello, 世界")
+//
+// QuoteRune and QuoteRuneToASCII are similar but accept runes and
+// return quoted Go rune literals.
+//
+// Unquote and UnquoteChar unquote Go string and rune literals.
+package strconv
diff --git a/contrib/go/_std_1.19/src/strconv/eisel_lemire.go b/contrib/go/_std_1.19/src/strconv/eisel_lemire.go
new file mode 100644
index 0000000000..03842e5079
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/eisel_lemire.go
@@ -0,0 +1,884 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strconv
+
+// This file implements the Eisel-Lemire ParseFloat algorithm, published in
+// 2020 and discussed extensively at
+// https://nigeltao.github.io/blog/2020/eisel-lemire.html
+//
+// The original C++ implementation is at
+// https://github.com/lemire/fast_double_parser/blob/644bef4306059d3be01a04e77d3cc84b379c596f/include/fast_double_parser.h#L840
+//
+// This Go re-implementation closely follows the C re-implementation at
+// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/internal/cgen/base/floatconv-submodule-code.c#L990
+//
+// Additional testing (on over several million test strings) is done by
+// https://github.com/nigeltao/parse-number-fxx-test-data/blob/5280dcfccf6d0b02a65ae282dad0b6d9de50e039/script/test-go-strconv.go
+
+import (
+ "math"
+ "math/bits"
+)
+
+func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) {
+ // The terse comments in this function body refer to sections of the
+ // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post.
+
+ // Exp10 Range.
+ if man == 0 {
+ if neg {
+ f = math.Float64frombits(0x8000000000000000) // Negative zero.
+ }
+ return f, true
+ }
+ if exp10 < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < exp10 {
+ return 0, false
+ }
+
+ // Normalization.
+ clz := bits.LeadingZeros64(man)
+ man <<= uint(clz)
+ const float64ExponentBias = 1023
+ retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz)
+
+ // Multiplication.
+ xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
+
+ // Wider Approximation.
+ if xHi&0x1FF == 0x1FF && xLo+man < man {
+ yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
+ mergedHi, mergedLo := xHi, xLo+yHi
+ if mergedLo < xLo {
+ mergedHi++
+ }
+ if mergedHi&0x1FF == 0x1FF && mergedLo+1 == 0 && yLo+man < man {
+ return 0, false
+ }
+ xHi, xLo = mergedHi, mergedLo
+ }
+
+ // Shifting to 54 Bits.
+ msb := xHi >> 63
+ retMantissa := xHi >> (msb + 9)
+ retExp2 -= 1 ^ msb
+
+ // Half-way Ambiguity.
+ if xLo == 0 && xHi&0x1FF == 0 && retMantissa&3 == 1 {
+ return 0, false
+ }
+
+ // From 54 to 53 Bits.
+ retMantissa += retMantissa & 1
+ retMantissa >>= 1
+ if retMantissa>>53 > 0 {
+ retMantissa >>= 1
+ retExp2 += 1
+ }
+ // retExp2 is a uint64. Zero or underflow means that we're in subnormal
+ // float64 space. 0x7FF or above means that we're in Inf/NaN float64 space.
+ //
+ // The if block is equivalent to (but has fewer branches than):
+ // if retExp2 <= 0 || retExp2 >= 0x7FF { etc }
+ if retExp2-1 >= 0x7FF-1 {
+ return 0, false
+ }
+ retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF
+ if neg {
+ retBits |= 0x8000000000000000
+ }
+ return math.Float64frombits(retBits), true
+}
+
+func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) {
+ // The terse comments in this function body refer to sections of the
+ // https://nigeltao.github.io/blog/2020/eisel-lemire.html blog post.
+ //
+ // That blog post discusses the float64 flavor (11 exponent bits with a
+ // -1023 bias, 52 mantissa bits) of the algorithm, but the same approach
+ // applies to the float32 flavor (8 exponent bits with a -127 bias, 23
+ // mantissa bits). The computation here happens with 64-bit values (e.g.
+ // man, xHi, retMantissa) before finally converting to a 32-bit float.
+
+ // Exp10 Range.
+ if man == 0 {
+ if neg {
+ f = math.Float32frombits(0x80000000) // Negative zero.
+ }
+ return f, true
+ }
+ if exp10 < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < exp10 {
+ return 0, false
+ }
+
+ // Normalization.
+ clz := bits.LeadingZeros64(man)
+ man <<= uint(clz)
+ const float32ExponentBias = 127
+ retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz)
+
+ // Multiplication.
+ xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
+
+ // Wider Approximation.
+ if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man {
+ yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
+ mergedHi, mergedLo := xHi, xLo+yHi
+ if mergedLo < xLo {
+ mergedHi++
+ }
+ if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
+ return 0, false
+ }
+ xHi, xLo = mergedHi, mergedLo
+ }
+
+ // Shifting to 54 Bits (and for float32, it's shifting to 25 bits).
+ msb := xHi >> 63
+ retMantissa := xHi >> (msb + 38)
+ retExp2 -= 1 ^ msb
+
+ // Half-way Ambiguity.
+ if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 {
+ return 0, false
+ }
+
+ // From 54 to 53 Bits (and for float32, it's from 25 to 24 bits).
+ retMantissa += retMantissa & 1
+ retMantissa >>= 1
+ if retMantissa>>24 > 0 {
+ retMantissa >>= 1
+ retExp2 += 1
+ }
+ // retExp2 is a uint64. Zero or underflow means that we're in subnormal
+ // float32 space. 0xFF or above means that we're in Inf/NaN float32 space.
+ //
+ // The if block is equivalent to (but has fewer branches than):
+ // if retExp2 <= 0 || retExp2 >= 0xFF { etc }
+ if retExp2-1 >= 0xFF-1 {
+ return 0, false
+ }
+ retBits := retExp2<<23 | retMantissa&0x007FFFFF
+ if neg {
+ retBits |= 0x80000000
+ }
+ return math.Float32frombits(uint32(retBits)), true
+}
+
+// detailedPowersOfTen{Min,Max}Exp10 is the power of 10 represented by the
+// first and last rows of detailedPowersOfTen. Both bounds are inclusive.
+const (
+ detailedPowersOfTenMinExp10 = -348
+ detailedPowersOfTenMaxExp10 = +347
+)
+
+// detailedPowersOfTen contains 128-bit mantissa approximations (rounded down)
+// to the powers of 10. For example:
+//
+// - 1e43 ≈ (0xE596B7B0_C643C719 * (2 ** 79))
+// - 1e43 = (0xE596B7B0_C643C719_6D9CCD05_D0000000 * (2 ** 15))
+//
+// The mantissas are explicitly listed. The exponents are implied by a linear
+// expression with slope 217706.0/65536.0 ≈ log(10)/log(2).
+//
+// The table was generated by
+// https://github.com/google/wuffs/blob/ba3818cb6b473a2ed0b38ecfc07dbbd3a97e8ae7/script/print-mpb-powers-of-10.go
+var detailedPowersOfTen = [...][2]uint64{
+ {0x1732C869CD60E453, 0xFA8FD5A0081C0288}, // 1e-348
+ {0x0E7FBD42205C8EB4, 0x9C99E58405118195}, // 1e-347
+ {0x521FAC92A873B261, 0xC3C05EE50655E1FA}, // 1e-346
+ {0xE6A797B752909EF9, 0xF4B0769E47EB5A78}, // 1e-345
+ {0x9028BED2939A635C, 0x98EE4A22ECF3188B}, // 1e-344
+ {0x7432EE873880FC33, 0xBF29DCABA82FDEAE}, // 1e-343
+ {0x113FAA2906A13B3F, 0xEEF453D6923BD65A}, // 1e-342
+ {0x4AC7CA59A424C507, 0x9558B4661B6565F8}, // 1e-341
+ {0x5D79BCF00D2DF649, 0xBAAEE17FA23EBF76}, // 1e-340
+ {0xF4D82C2C107973DC, 0xE95A99DF8ACE6F53}, // 1e-339
+ {0x79071B9B8A4BE869, 0x91D8A02BB6C10594}, // 1e-338
+ {0x9748E2826CDEE284, 0xB64EC836A47146F9}, // 1e-337
+ {0xFD1B1B2308169B25, 0xE3E27A444D8D98B7}, // 1e-336
+ {0xFE30F0F5E50E20F7, 0x8E6D8C6AB0787F72}, // 1e-335
+ {0xBDBD2D335E51A935, 0xB208EF855C969F4F}, // 1e-334
+ {0xAD2C788035E61382, 0xDE8B2B66B3BC4723}, // 1e-333
+ {0x4C3BCB5021AFCC31, 0x8B16FB203055AC76}, // 1e-332
+ {0xDF4ABE242A1BBF3D, 0xADDCB9E83C6B1793}, // 1e-331
+ {0xD71D6DAD34A2AF0D, 0xD953E8624B85DD78}, // 1e-330
+ {0x8672648C40E5AD68, 0x87D4713D6F33AA6B}, // 1e-329
+ {0x680EFDAF511F18C2, 0xA9C98D8CCB009506}, // 1e-328
+ {0x0212BD1B2566DEF2, 0xD43BF0EFFDC0BA48}, // 1e-327
+ {0x014BB630F7604B57, 0x84A57695FE98746D}, // 1e-326
+ {0x419EA3BD35385E2D, 0xA5CED43B7E3E9188}, // 1e-325
+ {0x52064CAC828675B9, 0xCF42894A5DCE35EA}, // 1e-324
+ {0x7343EFEBD1940993, 0x818995CE7AA0E1B2}, // 1e-323
+ {0x1014EBE6C5F90BF8, 0xA1EBFB4219491A1F}, // 1e-322
+ {0xD41A26E077774EF6, 0xCA66FA129F9B60A6}, // 1e-321
+ {0x8920B098955522B4, 0xFD00B897478238D0}, // 1e-320
+ {0x55B46E5F5D5535B0, 0x9E20735E8CB16382}, // 1e-319
+ {0xEB2189F734AA831D, 0xC5A890362FDDBC62}, // 1e-318
+ {0xA5E9EC7501D523E4, 0xF712B443BBD52B7B}, // 1e-317
+ {0x47B233C92125366E, 0x9A6BB0AA55653B2D}, // 1e-316
+ {0x999EC0BB696E840A, 0xC1069CD4EABE89F8}, // 1e-315
+ {0xC00670EA43CA250D, 0xF148440A256E2C76}, // 1e-314
+ {0x380406926A5E5728, 0x96CD2A865764DBCA}, // 1e-313
+ {0xC605083704F5ECF2, 0xBC807527ED3E12BC}, // 1e-312
+ {0xF7864A44C633682E, 0xEBA09271E88D976B}, // 1e-311
+ {0x7AB3EE6AFBE0211D, 0x93445B8731587EA3}, // 1e-310
+ {0x5960EA05BAD82964, 0xB8157268FDAE9E4C}, // 1e-309
+ {0x6FB92487298E33BD, 0xE61ACF033D1A45DF}, // 1e-308
+ {0xA5D3B6D479F8E056, 0x8FD0C16206306BAB}, // 1e-307
+ {0x8F48A4899877186C, 0xB3C4F1BA87BC8696}, // 1e-306
+ {0x331ACDABFE94DE87, 0xE0B62E2929ABA83C}, // 1e-305
+ {0x9FF0C08B7F1D0B14, 0x8C71DCD9BA0B4925}, // 1e-304
+ {0x07ECF0AE5EE44DD9, 0xAF8E5410288E1B6F}, // 1e-303
+ {0xC9E82CD9F69D6150, 0xDB71E91432B1A24A}, // 1e-302
+ {0xBE311C083A225CD2, 0x892731AC9FAF056E}, // 1e-301
+ {0x6DBD630A48AAF406, 0xAB70FE17C79AC6CA}, // 1e-300
+ {0x092CBBCCDAD5B108, 0xD64D3D9DB981787D}, // 1e-299
+ {0x25BBF56008C58EA5, 0x85F0468293F0EB4E}, // 1e-298
+ {0xAF2AF2B80AF6F24E, 0xA76C582338ED2621}, // 1e-297
+ {0x1AF5AF660DB4AEE1, 0xD1476E2C07286FAA}, // 1e-296
+ {0x50D98D9FC890ED4D, 0x82CCA4DB847945CA}, // 1e-295
+ {0xE50FF107BAB528A0, 0xA37FCE126597973C}, // 1e-294
+ {0x1E53ED49A96272C8, 0xCC5FC196FEFD7D0C}, // 1e-293
+ {0x25E8E89C13BB0F7A, 0xFF77B1FCBEBCDC4F}, // 1e-292
+ {0x77B191618C54E9AC, 0x9FAACF3DF73609B1}, // 1e-291
+ {0xD59DF5B9EF6A2417, 0xC795830D75038C1D}, // 1e-290
+ {0x4B0573286B44AD1D, 0xF97AE3D0D2446F25}, // 1e-289
+ {0x4EE367F9430AEC32, 0x9BECCE62836AC577}, // 1e-288
+ {0x229C41F793CDA73F, 0xC2E801FB244576D5}, // 1e-287
+ {0x6B43527578C1110F, 0xF3A20279ED56D48A}, // 1e-286
+ {0x830A13896B78AAA9, 0x9845418C345644D6}, // 1e-285
+ {0x23CC986BC656D553, 0xBE5691EF416BD60C}, // 1e-284
+ {0x2CBFBE86B7EC8AA8, 0xEDEC366B11C6CB8F}, // 1e-283
+ {0x7BF7D71432F3D6A9, 0x94B3A202EB1C3F39}, // 1e-282
+ {0xDAF5CCD93FB0CC53, 0xB9E08A83A5E34F07}, // 1e-281
+ {0xD1B3400F8F9CFF68, 0xE858AD248F5C22C9}, // 1e-280
+ {0x23100809B9C21FA1, 0x91376C36D99995BE}, // 1e-279
+ {0xABD40A0C2832A78A, 0xB58547448FFFFB2D}, // 1e-278
+ {0x16C90C8F323F516C, 0xE2E69915B3FFF9F9}, // 1e-277
+ {0xAE3DA7D97F6792E3, 0x8DD01FAD907FFC3B}, // 1e-276
+ {0x99CD11CFDF41779C, 0xB1442798F49FFB4A}, // 1e-275
+ {0x40405643D711D583, 0xDD95317F31C7FA1D}, // 1e-274
+ {0x482835EA666B2572, 0x8A7D3EEF7F1CFC52}, // 1e-273
+ {0xDA3243650005EECF, 0xAD1C8EAB5EE43B66}, // 1e-272
+ {0x90BED43E40076A82, 0xD863B256369D4A40}, // 1e-271
+ {0x5A7744A6E804A291, 0x873E4F75E2224E68}, // 1e-270
+ {0x711515D0A205CB36, 0xA90DE3535AAAE202}, // 1e-269
+ {0x0D5A5B44CA873E03, 0xD3515C2831559A83}, // 1e-268
+ {0xE858790AFE9486C2, 0x8412D9991ED58091}, // 1e-267
+ {0x626E974DBE39A872, 0xA5178FFF668AE0B6}, // 1e-266
+ {0xFB0A3D212DC8128F, 0xCE5D73FF402D98E3}, // 1e-265
+ {0x7CE66634BC9D0B99, 0x80FA687F881C7F8E}, // 1e-264
+ {0x1C1FFFC1EBC44E80, 0xA139029F6A239F72}, // 1e-263
+ {0xA327FFB266B56220, 0xC987434744AC874E}, // 1e-262
+ {0x4BF1FF9F0062BAA8, 0xFBE9141915D7A922}, // 1e-261
+ {0x6F773FC3603DB4A9, 0x9D71AC8FADA6C9B5}, // 1e-260
+ {0xCB550FB4384D21D3, 0xC4CE17B399107C22}, // 1e-259
+ {0x7E2A53A146606A48, 0xF6019DA07F549B2B}, // 1e-258
+ {0x2EDA7444CBFC426D, 0x99C102844F94E0FB}, // 1e-257
+ {0xFA911155FEFB5308, 0xC0314325637A1939}, // 1e-256
+ {0x793555AB7EBA27CA, 0xF03D93EEBC589F88}, // 1e-255
+ {0x4BC1558B2F3458DE, 0x96267C7535B763B5}, // 1e-254
+ {0x9EB1AAEDFB016F16, 0xBBB01B9283253CA2}, // 1e-253
+ {0x465E15A979C1CADC, 0xEA9C227723EE8BCB}, // 1e-252
+ {0x0BFACD89EC191EC9, 0x92A1958A7675175F}, // 1e-251
+ {0xCEF980EC671F667B, 0xB749FAED14125D36}, // 1e-250
+ {0x82B7E12780E7401A, 0xE51C79A85916F484}, // 1e-249
+ {0xD1B2ECB8B0908810, 0x8F31CC0937AE58D2}, // 1e-248
+ {0x861FA7E6DCB4AA15, 0xB2FE3F0B8599EF07}, // 1e-247
+ {0x67A791E093E1D49A, 0xDFBDCECE67006AC9}, // 1e-246
+ {0xE0C8BB2C5C6D24E0, 0x8BD6A141006042BD}, // 1e-245
+ {0x58FAE9F773886E18, 0xAECC49914078536D}, // 1e-244
+ {0xAF39A475506A899E, 0xDA7F5BF590966848}, // 1e-243
+ {0x6D8406C952429603, 0x888F99797A5E012D}, // 1e-242
+ {0xC8E5087BA6D33B83, 0xAAB37FD7D8F58178}, // 1e-241
+ {0xFB1E4A9A90880A64, 0xD5605FCDCF32E1D6}, // 1e-240
+ {0x5CF2EEA09A55067F, 0x855C3BE0A17FCD26}, // 1e-239
+ {0xF42FAA48C0EA481E, 0xA6B34AD8C9DFC06F}, // 1e-238
+ {0xF13B94DAF124DA26, 0xD0601D8EFC57B08B}, // 1e-237
+ {0x76C53D08D6B70858, 0x823C12795DB6CE57}, // 1e-236
+ {0x54768C4B0C64CA6E, 0xA2CB1717B52481ED}, // 1e-235
+ {0xA9942F5DCF7DFD09, 0xCB7DDCDDA26DA268}, // 1e-234
+ {0xD3F93B35435D7C4C, 0xFE5D54150B090B02}, // 1e-233
+ {0xC47BC5014A1A6DAF, 0x9EFA548D26E5A6E1}, // 1e-232
+ {0x359AB6419CA1091B, 0xC6B8E9B0709F109A}, // 1e-231
+ {0xC30163D203C94B62, 0xF867241C8CC6D4C0}, // 1e-230
+ {0x79E0DE63425DCF1D, 0x9B407691D7FC44F8}, // 1e-229
+ {0x985915FC12F542E4, 0xC21094364DFB5636}, // 1e-228
+ {0x3E6F5B7B17B2939D, 0xF294B943E17A2BC4}, // 1e-227
+ {0xA705992CEECF9C42, 0x979CF3CA6CEC5B5A}, // 1e-226
+ {0x50C6FF782A838353, 0xBD8430BD08277231}, // 1e-225
+ {0xA4F8BF5635246428, 0xECE53CEC4A314EBD}, // 1e-224
+ {0x871B7795E136BE99, 0x940F4613AE5ED136}, // 1e-223
+ {0x28E2557B59846E3F, 0xB913179899F68584}, // 1e-222
+ {0x331AEADA2FE589CF, 0xE757DD7EC07426E5}, // 1e-221
+ {0x3FF0D2C85DEF7621, 0x9096EA6F3848984F}, // 1e-220
+ {0x0FED077A756B53A9, 0xB4BCA50B065ABE63}, // 1e-219
+ {0xD3E8495912C62894, 0xE1EBCE4DC7F16DFB}, // 1e-218
+ {0x64712DD7ABBBD95C, 0x8D3360F09CF6E4BD}, // 1e-217
+ {0xBD8D794D96AACFB3, 0xB080392CC4349DEC}, // 1e-216
+ {0xECF0D7A0FC5583A0, 0xDCA04777F541C567}, // 1e-215
+ {0xF41686C49DB57244, 0x89E42CAAF9491B60}, // 1e-214
+ {0x311C2875C522CED5, 0xAC5D37D5B79B6239}, // 1e-213
+ {0x7D633293366B828B, 0xD77485CB25823AC7}, // 1e-212
+ {0xAE5DFF9C02033197, 0x86A8D39EF77164BC}, // 1e-211
+ {0xD9F57F830283FDFC, 0xA8530886B54DBDEB}, // 1e-210
+ {0xD072DF63C324FD7B, 0xD267CAA862A12D66}, // 1e-209
+ {0x4247CB9E59F71E6D, 0x8380DEA93DA4BC60}, // 1e-208
+ {0x52D9BE85F074E608, 0xA46116538D0DEB78}, // 1e-207
+ {0x67902E276C921F8B, 0xCD795BE870516656}, // 1e-206
+ {0x00BA1CD8A3DB53B6, 0x806BD9714632DFF6}, // 1e-205
+ {0x80E8A40ECCD228A4, 0xA086CFCD97BF97F3}, // 1e-204
+ {0x6122CD128006B2CD, 0xC8A883C0FDAF7DF0}, // 1e-203
+ {0x796B805720085F81, 0xFAD2A4B13D1B5D6C}, // 1e-202
+ {0xCBE3303674053BB0, 0x9CC3A6EEC6311A63}, // 1e-201
+ {0xBEDBFC4411068A9C, 0xC3F490AA77BD60FC}, // 1e-200
+ {0xEE92FB5515482D44, 0xF4F1B4D515ACB93B}, // 1e-199
+ {0x751BDD152D4D1C4A, 0x991711052D8BF3C5}, // 1e-198
+ {0xD262D45A78A0635D, 0xBF5CD54678EEF0B6}, // 1e-197
+ {0x86FB897116C87C34, 0xEF340A98172AACE4}, // 1e-196
+ {0xD45D35E6AE3D4DA0, 0x9580869F0E7AAC0E}, // 1e-195
+ {0x8974836059CCA109, 0xBAE0A846D2195712}, // 1e-194
+ {0x2BD1A438703FC94B, 0xE998D258869FACD7}, // 1e-193
+ {0x7B6306A34627DDCF, 0x91FF83775423CC06}, // 1e-192
+ {0x1A3BC84C17B1D542, 0xB67F6455292CBF08}, // 1e-191
+ {0x20CABA5F1D9E4A93, 0xE41F3D6A7377EECA}, // 1e-190
+ {0x547EB47B7282EE9C, 0x8E938662882AF53E}, // 1e-189
+ {0xE99E619A4F23AA43, 0xB23867FB2A35B28D}, // 1e-188
+ {0x6405FA00E2EC94D4, 0xDEC681F9F4C31F31}, // 1e-187
+ {0xDE83BC408DD3DD04, 0x8B3C113C38F9F37E}, // 1e-186
+ {0x9624AB50B148D445, 0xAE0B158B4738705E}, // 1e-185
+ {0x3BADD624DD9B0957, 0xD98DDAEE19068C76}, // 1e-184
+ {0xE54CA5D70A80E5D6, 0x87F8A8D4CFA417C9}, // 1e-183
+ {0x5E9FCF4CCD211F4C, 0xA9F6D30A038D1DBC}, // 1e-182
+ {0x7647C3200069671F, 0xD47487CC8470652B}, // 1e-181
+ {0x29ECD9F40041E073, 0x84C8D4DFD2C63F3B}, // 1e-180
+ {0xF468107100525890, 0xA5FB0A17C777CF09}, // 1e-179
+ {0x7182148D4066EEB4, 0xCF79CC9DB955C2CC}, // 1e-178
+ {0xC6F14CD848405530, 0x81AC1FE293D599BF}, // 1e-177
+ {0xB8ADA00E5A506A7C, 0xA21727DB38CB002F}, // 1e-176
+ {0xA6D90811F0E4851C, 0xCA9CF1D206FDC03B}, // 1e-175
+ {0x908F4A166D1DA663, 0xFD442E4688BD304A}, // 1e-174
+ {0x9A598E4E043287FE, 0x9E4A9CEC15763E2E}, // 1e-173
+ {0x40EFF1E1853F29FD, 0xC5DD44271AD3CDBA}, // 1e-172
+ {0xD12BEE59E68EF47C, 0xF7549530E188C128}, // 1e-171
+ {0x82BB74F8301958CE, 0x9A94DD3E8CF578B9}, // 1e-170
+ {0xE36A52363C1FAF01, 0xC13A148E3032D6E7}, // 1e-169
+ {0xDC44E6C3CB279AC1, 0xF18899B1BC3F8CA1}, // 1e-168
+ {0x29AB103A5EF8C0B9, 0x96F5600F15A7B7E5}, // 1e-167
+ {0x7415D448F6B6F0E7, 0xBCB2B812DB11A5DE}, // 1e-166
+ {0x111B495B3464AD21, 0xEBDF661791D60F56}, // 1e-165
+ {0xCAB10DD900BEEC34, 0x936B9FCEBB25C995}, // 1e-164
+ {0x3D5D514F40EEA742, 0xB84687C269EF3BFB}, // 1e-163
+ {0x0CB4A5A3112A5112, 0xE65829B3046B0AFA}, // 1e-162
+ {0x47F0E785EABA72AB, 0x8FF71A0FE2C2E6DC}, // 1e-161
+ {0x59ED216765690F56, 0xB3F4E093DB73A093}, // 1e-160
+ {0x306869C13EC3532C, 0xE0F218B8D25088B8}, // 1e-159
+ {0x1E414218C73A13FB, 0x8C974F7383725573}, // 1e-158
+ {0xE5D1929EF90898FA, 0xAFBD2350644EEACF}, // 1e-157
+ {0xDF45F746B74ABF39, 0xDBAC6C247D62A583}, // 1e-156
+ {0x6B8BBA8C328EB783, 0x894BC396CE5DA772}, // 1e-155
+ {0x066EA92F3F326564, 0xAB9EB47C81F5114F}, // 1e-154
+ {0xC80A537B0EFEFEBD, 0xD686619BA27255A2}, // 1e-153
+ {0xBD06742CE95F5F36, 0x8613FD0145877585}, // 1e-152
+ {0x2C48113823B73704, 0xA798FC4196E952E7}, // 1e-151
+ {0xF75A15862CA504C5, 0xD17F3B51FCA3A7A0}, // 1e-150
+ {0x9A984D73DBE722FB, 0x82EF85133DE648C4}, // 1e-149
+ {0xC13E60D0D2E0EBBA, 0xA3AB66580D5FDAF5}, // 1e-148
+ {0x318DF905079926A8, 0xCC963FEE10B7D1B3}, // 1e-147
+ {0xFDF17746497F7052, 0xFFBBCFE994E5C61F}, // 1e-146
+ {0xFEB6EA8BEDEFA633, 0x9FD561F1FD0F9BD3}, // 1e-145
+ {0xFE64A52EE96B8FC0, 0xC7CABA6E7C5382C8}, // 1e-144
+ {0x3DFDCE7AA3C673B0, 0xF9BD690A1B68637B}, // 1e-143
+ {0x06BEA10CA65C084E, 0x9C1661A651213E2D}, // 1e-142
+ {0x486E494FCFF30A62, 0xC31BFA0FE5698DB8}, // 1e-141
+ {0x5A89DBA3C3EFCCFA, 0xF3E2F893DEC3F126}, // 1e-140
+ {0xF89629465A75E01C, 0x986DDB5C6B3A76B7}, // 1e-139
+ {0xF6BBB397F1135823, 0xBE89523386091465}, // 1e-138
+ {0x746AA07DED582E2C, 0xEE2BA6C0678B597F}, // 1e-137
+ {0xA8C2A44EB4571CDC, 0x94DB483840B717EF}, // 1e-136
+ {0x92F34D62616CE413, 0xBA121A4650E4DDEB}, // 1e-135
+ {0x77B020BAF9C81D17, 0xE896A0D7E51E1566}, // 1e-134
+ {0x0ACE1474DC1D122E, 0x915E2486EF32CD60}, // 1e-133
+ {0x0D819992132456BA, 0xB5B5ADA8AAFF80B8}, // 1e-132
+ {0x10E1FFF697ED6C69, 0xE3231912D5BF60E6}, // 1e-131
+ {0xCA8D3FFA1EF463C1, 0x8DF5EFABC5979C8F}, // 1e-130
+ {0xBD308FF8A6B17CB2, 0xB1736B96B6FD83B3}, // 1e-129
+ {0xAC7CB3F6D05DDBDE, 0xDDD0467C64BCE4A0}, // 1e-128
+ {0x6BCDF07A423AA96B, 0x8AA22C0DBEF60EE4}, // 1e-127
+ {0x86C16C98D2C953C6, 0xAD4AB7112EB3929D}, // 1e-126
+ {0xE871C7BF077BA8B7, 0xD89D64D57A607744}, // 1e-125
+ {0x11471CD764AD4972, 0x87625F056C7C4A8B}, // 1e-124
+ {0xD598E40D3DD89BCF, 0xA93AF6C6C79B5D2D}, // 1e-123
+ {0x4AFF1D108D4EC2C3, 0xD389B47879823479}, // 1e-122
+ {0xCEDF722A585139BA, 0x843610CB4BF160CB}, // 1e-121
+ {0xC2974EB4EE658828, 0xA54394FE1EEDB8FE}, // 1e-120
+ {0x733D226229FEEA32, 0xCE947A3DA6A9273E}, // 1e-119
+ {0x0806357D5A3F525F, 0x811CCC668829B887}, // 1e-118
+ {0xCA07C2DCB0CF26F7, 0xA163FF802A3426A8}, // 1e-117
+ {0xFC89B393DD02F0B5, 0xC9BCFF6034C13052}, // 1e-116
+ {0xBBAC2078D443ACE2, 0xFC2C3F3841F17C67}, // 1e-115
+ {0xD54B944B84AA4C0D, 0x9D9BA7832936EDC0}, // 1e-114
+ {0x0A9E795E65D4DF11, 0xC5029163F384A931}, // 1e-113
+ {0x4D4617B5FF4A16D5, 0xF64335BCF065D37D}, // 1e-112
+ {0x504BCED1BF8E4E45, 0x99EA0196163FA42E}, // 1e-111
+ {0xE45EC2862F71E1D6, 0xC06481FB9BCF8D39}, // 1e-110
+ {0x5D767327BB4E5A4C, 0xF07DA27A82C37088}, // 1e-109
+ {0x3A6A07F8D510F86F, 0x964E858C91BA2655}, // 1e-108
+ {0x890489F70A55368B, 0xBBE226EFB628AFEA}, // 1e-107
+ {0x2B45AC74CCEA842E, 0xEADAB0ABA3B2DBE5}, // 1e-106
+ {0x3B0B8BC90012929D, 0x92C8AE6B464FC96F}, // 1e-105
+ {0x09CE6EBB40173744, 0xB77ADA0617E3BBCB}, // 1e-104
+ {0xCC420A6A101D0515, 0xE55990879DDCAABD}, // 1e-103
+ {0x9FA946824A12232D, 0x8F57FA54C2A9EAB6}, // 1e-102
+ {0x47939822DC96ABF9, 0xB32DF8E9F3546564}, // 1e-101
+ {0x59787E2B93BC56F7, 0xDFF9772470297EBD}, // 1e-100
+ {0x57EB4EDB3C55B65A, 0x8BFBEA76C619EF36}, // 1e-99
+ {0xEDE622920B6B23F1, 0xAEFAE51477A06B03}, // 1e-98
+ {0xE95FAB368E45ECED, 0xDAB99E59958885C4}, // 1e-97
+ {0x11DBCB0218EBB414, 0x88B402F7FD75539B}, // 1e-96
+ {0xD652BDC29F26A119, 0xAAE103B5FCD2A881}, // 1e-95
+ {0x4BE76D3346F0495F, 0xD59944A37C0752A2}, // 1e-94
+ {0x6F70A4400C562DDB, 0x857FCAE62D8493A5}, // 1e-93
+ {0xCB4CCD500F6BB952, 0xA6DFBD9FB8E5B88E}, // 1e-92
+ {0x7E2000A41346A7A7, 0xD097AD07A71F26B2}, // 1e-91
+ {0x8ED400668C0C28C8, 0x825ECC24C873782F}, // 1e-90
+ {0x728900802F0F32FA, 0xA2F67F2DFA90563B}, // 1e-89
+ {0x4F2B40A03AD2FFB9, 0xCBB41EF979346BCA}, // 1e-88
+ {0xE2F610C84987BFA8, 0xFEA126B7D78186BC}, // 1e-87
+ {0x0DD9CA7D2DF4D7C9, 0x9F24B832E6B0F436}, // 1e-86
+ {0x91503D1C79720DBB, 0xC6EDE63FA05D3143}, // 1e-85
+ {0x75A44C6397CE912A, 0xF8A95FCF88747D94}, // 1e-84
+ {0xC986AFBE3EE11ABA, 0x9B69DBE1B548CE7C}, // 1e-83
+ {0xFBE85BADCE996168, 0xC24452DA229B021B}, // 1e-82
+ {0xFAE27299423FB9C3, 0xF2D56790AB41C2A2}, // 1e-81
+ {0xDCCD879FC967D41A, 0x97C560BA6B0919A5}, // 1e-80
+ {0x5400E987BBC1C920, 0xBDB6B8E905CB600F}, // 1e-79
+ {0x290123E9AAB23B68, 0xED246723473E3813}, // 1e-78
+ {0xF9A0B6720AAF6521, 0x9436C0760C86E30B}, // 1e-77
+ {0xF808E40E8D5B3E69, 0xB94470938FA89BCE}, // 1e-76
+ {0xB60B1D1230B20E04, 0xE7958CB87392C2C2}, // 1e-75
+ {0xB1C6F22B5E6F48C2, 0x90BD77F3483BB9B9}, // 1e-74
+ {0x1E38AEB6360B1AF3, 0xB4ECD5F01A4AA828}, // 1e-73
+ {0x25C6DA63C38DE1B0, 0xE2280B6C20DD5232}, // 1e-72
+ {0x579C487E5A38AD0E, 0x8D590723948A535F}, // 1e-71
+ {0x2D835A9DF0C6D851, 0xB0AF48EC79ACE837}, // 1e-70
+ {0xF8E431456CF88E65, 0xDCDB1B2798182244}, // 1e-69
+ {0x1B8E9ECB641B58FF, 0x8A08F0F8BF0F156B}, // 1e-68
+ {0xE272467E3D222F3F, 0xAC8B2D36EED2DAC5}, // 1e-67
+ {0x5B0ED81DCC6ABB0F, 0xD7ADF884AA879177}, // 1e-66
+ {0x98E947129FC2B4E9, 0x86CCBB52EA94BAEA}, // 1e-65
+ {0x3F2398D747B36224, 0xA87FEA27A539E9A5}, // 1e-64
+ {0x8EEC7F0D19A03AAD, 0xD29FE4B18E88640E}, // 1e-63
+ {0x1953CF68300424AC, 0x83A3EEEEF9153E89}, // 1e-62
+ {0x5FA8C3423C052DD7, 0xA48CEAAAB75A8E2B}, // 1e-61
+ {0x3792F412CB06794D, 0xCDB02555653131B6}, // 1e-60
+ {0xE2BBD88BBEE40BD0, 0x808E17555F3EBF11}, // 1e-59
+ {0x5B6ACEAEAE9D0EC4, 0xA0B19D2AB70E6ED6}, // 1e-58
+ {0xF245825A5A445275, 0xC8DE047564D20A8B}, // 1e-57
+ {0xEED6E2F0F0D56712, 0xFB158592BE068D2E}, // 1e-56
+ {0x55464DD69685606B, 0x9CED737BB6C4183D}, // 1e-55
+ {0xAA97E14C3C26B886, 0xC428D05AA4751E4C}, // 1e-54
+ {0xD53DD99F4B3066A8, 0xF53304714D9265DF}, // 1e-53
+ {0xE546A8038EFE4029, 0x993FE2C6D07B7FAB}, // 1e-52
+ {0xDE98520472BDD033, 0xBF8FDB78849A5F96}, // 1e-51
+ {0x963E66858F6D4440, 0xEF73D256A5C0F77C}, // 1e-50
+ {0xDDE7001379A44AA8, 0x95A8637627989AAD}, // 1e-49
+ {0x5560C018580D5D52, 0xBB127C53B17EC159}, // 1e-48
+ {0xAAB8F01E6E10B4A6, 0xE9D71B689DDE71AF}, // 1e-47
+ {0xCAB3961304CA70E8, 0x9226712162AB070D}, // 1e-46
+ {0x3D607B97C5FD0D22, 0xB6B00D69BB55C8D1}, // 1e-45
+ {0x8CB89A7DB77C506A, 0xE45C10C42A2B3B05}, // 1e-44
+ {0x77F3608E92ADB242, 0x8EB98A7A9A5B04E3}, // 1e-43
+ {0x55F038B237591ED3, 0xB267ED1940F1C61C}, // 1e-42
+ {0x6B6C46DEC52F6688, 0xDF01E85F912E37A3}, // 1e-41
+ {0x2323AC4B3B3DA015, 0x8B61313BBABCE2C6}, // 1e-40
+ {0xABEC975E0A0D081A, 0xAE397D8AA96C1B77}, // 1e-39
+ {0x96E7BD358C904A21, 0xD9C7DCED53C72255}, // 1e-38
+ {0x7E50D64177DA2E54, 0x881CEA14545C7575}, // 1e-37
+ {0xDDE50BD1D5D0B9E9, 0xAA242499697392D2}, // 1e-36
+ {0x955E4EC64B44E864, 0xD4AD2DBFC3D07787}, // 1e-35
+ {0xBD5AF13BEF0B113E, 0x84EC3C97DA624AB4}, // 1e-34
+ {0xECB1AD8AEACDD58E, 0xA6274BBDD0FADD61}, // 1e-33
+ {0x67DE18EDA5814AF2, 0xCFB11EAD453994BA}, // 1e-32
+ {0x80EACF948770CED7, 0x81CEB32C4B43FCF4}, // 1e-31
+ {0xA1258379A94D028D, 0xA2425FF75E14FC31}, // 1e-30
+ {0x096EE45813A04330, 0xCAD2F7F5359A3B3E}, // 1e-29
+ {0x8BCA9D6E188853FC, 0xFD87B5F28300CA0D}, // 1e-28
+ {0x775EA264CF55347D, 0x9E74D1B791E07E48}, // 1e-27
+ {0x95364AFE032A819D, 0xC612062576589DDA}, // 1e-26
+ {0x3A83DDBD83F52204, 0xF79687AED3EEC551}, // 1e-25
+ {0xC4926A9672793542, 0x9ABE14CD44753B52}, // 1e-24
+ {0x75B7053C0F178293, 0xC16D9A0095928A27}, // 1e-23
+ {0x5324C68B12DD6338, 0xF1C90080BAF72CB1}, // 1e-22
+ {0xD3F6FC16EBCA5E03, 0x971DA05074DA7BEE}, // 1e-21
+ {0x88F4BB1CA6BCF584, 0xBCE5086492111AEA}, // 1e-20
+ {0x2B31E9E3D06C32E5, 0xEC1E4A7DB69561A5}, // 1e-19
+ {0x3AFF322E62439FCF, 0x9392EE8E921D5D07}, // 1e-18
+ {0x09BEFEB9FAD487C2, 0xB877AA3236A4B449}, // 1e-17
+ {0x4C2EBE687989A9B3, 0xE69594BEC44DE15B}, // 1e-16
+ {0x0F9D37014BF60A10, 0x901D7CF73AB0ACD9}, // 1e-15
+ {0x538484C19EF38C94, 0xB424DC35095CD80F}, // 1e-14
+ {0x2865A5F206B06FB9, 0xE12E13424BB40E13}, // 1e-13
+ {0xF93F87B7442E45D3, 0x8CBCCC096F5088CB}, // 1e-12
+ {0xF78F69A51539D748, 0xAFEBFF0BCB24AAFE}, // 1e-11
+ {0xB573440E5A884D1B, 0xDBE6FECEBDEDD5BE}, // 1e-10
+ {0x31680A88F8953030, 0x89705F4136B4A597}, // 1e-9
+ {0xFDC20D2B36BA7C3D, 0xABCC77118461CEFC}, // 1e-8
+ {0x3D32907604691B4C, 0xD6BF94D5E57A42BC}, // 1e-7
+ {0xA63F9A49C2C1B10F, 0x8637BD05AF6C69B5}, // 1e-6
+ {0x0FCF80DC33721D53, 0xA7C5AC471B478423}, // 1e-5
+ {0xD3C36113404EA4A8, 0xD1B71758E219652B}, // 1e-4
+ {0x645A1CAC083126E9, 0x83126E978D4FDF3B}, // 1e-3
+ {0x3D70A3D70A3D70A3, 0xA3D70A3D70A3D70A}, // 1e-2
+ {0xCCCCCCCCCCCCCCCC, 0xCCCCCCCCCCCCCCCC}, // 1e-1
+ {0x0000000000000000, 0x8000000000000000}, // 1e0
+ {0x0000000000000000, 0xA000000000000000}, // 1e1
+ {0x0000000000000000, 0xC800000000000000}, // 1e2
+ {0x0000000000000000, 0xFA00000000000000}, // 1e3
+ {0x0000000000000000, 0x9C40000000000000}, // 1e4
+ {0x0000000000000000, 0xC350000000000000}, // 1e5
+ {0x0000000000000000, 0xF424000000000000}, // 1e6
+ {0x0000000000000000, 0x9896800000000000}, // 1e7
+ {0x0000000000000000, 0xBEBC200000000000}, // 1e8
+ {0x0000000000000000, 0xEE6B280000000000}, // 1e9
+ {0x0000000000000000, 0x9502F90000000000}, // 1e10
+ {0x0000000000000000, 0xBA43B74000000000}, // 1e11
+ {0x0000000000000000, 0xE8D4A51000000000}, // 1e12
+ {0x0000000000000000, 0x9184E72A00000000}, // 1e13
+ {0x0000000000000000, 0xB5E620F480000000}, // 1e14
+ {0x0000000000000000, 0xE35FA931A0000000}, // 1e15
+ {0x0000000000000000, 0x8E1BC9BF04000000}, // 1e16
+ {0x0000000000000000, 0xB1A2BC2EC5000000}, // 1e17
+ {0x0000000000000000, 0xDE0B6B3A76400000}, // 1e18
+ {0x0000000000000000, 0x8AC7230489E80000}, // 1e19
+ {0x0000000000000000, 0xAD78EBC5AC620000}, // 1e20
+ {0x0000000000000000, 0xD8D726B7177A8000}, // 1e21
+ {0x0000000000000000, 0x878678326EAC9000}, // 1e22
+ {0x0000000000000000, 0xA968163F0A57B400}, // 1e23
+ {0x0000000000000000, 0xD3C21BCECCEDA100}, // 1e24
+ {0x0000000000000000, 0x84595161401484A0}, // 1e25
+ {0x0000000000000000, 0xA56FA5B99019A5C8}, // 1e26
+ {0x0000000000000000, 0xCECB8F27F4200F3A}, // 1e27
+ {0x4000000000000000, 0x813F3978F8940984}, // 1e28
+ {0x5000000000000000, 0xA18F07D736B90BE5}, // 1e29
+ {0xA400000000000000, 0xC9F2C9CD04674EDE}, // 1e30
+ {0x4D00000000000000, 0xFC6F7C4045812296}, // 1e31
+ {0xF020000000000000, 0x9DC5ADA82B70B59D}, // 1e32
+ {0x6C28000000000000, 0xC5371912364CE305}, // 1e33
+ {0xC732000000000000, 0xF684DF56C3E01BC6}, // 1e34
+ {0x3C7F400000000000, 0x9A130B963A6C115C}, // 1e35
+ {0x4B9F100000000000, 0xC097CE7BC90715B3}, // 1e36
+ {0x1E86D40000000000, 0xF0BDC21ABB48DB20}, // 1e37
+ {0x1314448000000000, 0x96769950B50D88F4}, // 1e38
+ {0x17D955A000000000, 0xBC143FA4E250EB31}, // 1e39
+ {0x5DCFAB0800000000, 0xEB194F8E1AE525FD}, // 1e40
+ {0x5AA1CAE500000000, 0x92EFD1B8D0CF37BE}, // 1e41
+ {0xF14A3D9E40000000, 0xB7ABC627050305AD}, // 1e42
+ {0x6D9CCD05D0000000, 0xE596B7B0C643C719}, // 1e43
+ {0xE4820023A2000000, 0x8F7E32CE7BEA5C6F}, // 1e44
+ {0xDDA2802C8A800000, 0xB35DBF821AE4F38B}, // 1e45
+ {0xD50B2037AD200000, 0xE0352F62A19E306E}, // 1e46
+ {0x4526F422CC340000, 0x8C213D9DA502DE45}, // 1e47
+ {0x9670B12B7F410000, 0xAF298D050E4395D6}, // 1e48
+ {0x3C0CDD765F114000, 0xDAF3F04651D47B4C}, // 1e49
+ {0xA5880A69FB6AC800, 0x88D8762BF324CD0F}, // 1e50
+ {0x8EEA0D047A457A00, 0xAB0E93B6EFEE0053}, // 1e51
+ {0x72A4904598D6D880, 0xD5D238A4ABE98068}, // 1e52
+ {0x47A6DA2B7F864750, 0x85A36366EB71F041}, // 1e53
+ {0x999090B65F67D924, 0xA70C3C40A64E6C51}, // 1e54
+ {0xFFF4B4E3F741CF6D, 0xD0CF4B50CFE20765}, // 1e55
+ {0xBFF8F10E7A8921A4, 0x82818F1281ED449F}, // 1e56
+ {0xAFF72D52192B6A0D, 0xA321F2D7226895C7}, // 1e57
+ {0x9BF4F8A69F764490, 0xCBEA6F8CEB02BB39}, // 1e58
+ {0x02F236D04753D5B4, 0xFEE50B7025C36A08}, // 1e59
+ {0x01D762422C946590, 0x9F4F2726179A2245}, // 1e60
+ {0x424D3AD2B7B97EF5, 0xC722F0EF9D80AAD6}, // 1e61
+ {0xD2E0898765A7DEB2, 0xF8EBAD2B84E0D58B}, // 1e62
+ {0x63CC55F49F88EB2F, 0x9B934C3B330C8577}, // 1e63
+ {0x3CBF6B71C76B25FB, 0xC2781F49FFCFA6D5}, // 1e64
+ {0x8BEF464E3945EF7A, 0xF316271C7FC3908A}, // 1e65
+ {0x97758BF0E3CBB5AC, 0x97EDD871CFDA3A56}, // 1e66
+ {0x3D52EEED1CBEA317, 0xBDE94E8E43D0C8EC}, // 1e67
+ {0x4CA7AAA863EE4BDD, 0xED63A231D4C4FB27}, // 1e68
+ {0x8FE8CAA93E74EF6A, 0x945E455F24FB1CF8}, // 1e69
+ {0xB3E2FD538E122B44, 0xB975D6B6EE39E436}, // 1e70
+ {0x60DBBCA87196B616, 0xE7D34C64A9C85D44}, // 1e71
+ {0xBC8955E946FE31CD, 0x90E40FBEEA1D3A4A}, // 1e72
+ {0x6BABAB6398BDBE41, 0xB51D13AEA4A488DD}, // 1e73
+ {0xC696963C7EED2DD1, 0xE264589A4DCDAB14}, // 1e74
+ {0xFC1E1DE5CF543CA2, 0x8D7EB76070A08AEC}, // 1e75
+ {0x3B25A55F43294BCB, 0xB0DE65388CC8ADA8}, // 1e76
+ {0x49EF0EB713F39EBE, 0xDD15FE86AFFAD912}, // 1e77
+ {0x6E3569326C784337, 0x8A2DBF142DFCC7AB}, // 1e78
+ {0x49C2C37F07965404, 0xACB92ED9397BF996}, // 1e79
+ {0xDC33745EC97BE906, 0xD7E77A8F87DAF7FB}, // 1e80
+ {0x69A028BB3DED71A3, 0x86F0AC99B4E8DAFD}, // 1e81
+ {0xC40832EA0D68CE0C, 0xA8ACD7C0222311BC}, // 1e82
+ {0xF50A3FA490C30190, 0xD2D80DB02AABD62B}, // 1e83
+ {0x792667C6DA79E0FA, 0x83C7088E1AAB65DB}, // 1e84
+ {0x577001B891185938, 0xA4B8CAB1A1563F52}, // 1e85
+ {0xED4C0226B55E6F86, 0xCDE6FD5E09ABCF26}, // 1e86
+ {0x544F8158315B05B4, 0x80B05E5AC60B6178}, // 1e87
+ {0x696361AE3DB1C721, 0xA0DC75F1778E39D6}, // 1e88
+ {0x03BC3A19CD1E38E9, 0xC913936DD571C84C}, // 1e89
+ {0x04AB48A04065C723, 0xFB5878494ACE3A5F}, // 1e90
+ {0x62EB0D64283F9C76, 0x9D174B2DCEC0E47B}, // 1e91
+ {0x3BA5D0BD324F8394, 0xC45D1DF942711D9A}, // 1e92
+ {0xCA8F44EC7EE36479, 0xF5746577930D6500}, // 1e93
+ {0x7E998B13CF4E1ECB, 0x9968BF6ABBE85F20}, // 1e94
+ {0x9E3FEDD8C321A67E, 0xBFC2EF456AE276E8}, // 1e95
+ {0xC5CFE94EF3EA101E, 0xEFB3AB16C59B14A2}, // 1e96
+ {0xBBA1F1D158724A12, 0x95D04AEE3B80ECE5}, // 1e97
+ {0x2A8A6E45AE8EDC97, 0xBB445DA9CA61281F}, // 1e98
+ {0xF52D09D71A3293BD, 0xEA1575143CF97226}, // 1e99
+ {0x593C2626705F9C56, 0x924D692CA61BE758}, // 1e100
+ {0x6F8B2FB00C77836C, 0xB6E0C377CFA2E12E}, // 1e101
+ {0x0B6DFB9C0F956447, 0xE498F455C38B997A}, // 1e102
+ {0x4724BD4189BD5EAC, 0x8EDF98B59A373FEC}, // 1e103
+ {0x58EDEC91EC2CB657, 0xB2977EE300C50FE7}, // 1e104
+ {0x2F2967B66737E3ED, 0xDF3D5E9BC0F653E1}, // 1e105
+ {0xBD79E0D20082EE74, 0x8B865B215899F46C}, // 1e106
+ {0xECD8590680A3AA11, 0xAE67F1E9AEC07187}, // 1e107
+ {0xE80E6F4820CC9495, 0xDA01EE641A708DE9}, // 1e108
+ {0x3109058D147FDCDD, 0x884134FE908658B2}, // 1e109
+ {0xBD4B46F0599FD415, 0xAA51823E34A7EEDE}, // 1e110
+ {0x6C9E18AC7007C91A, 0xD4E5E2CDC1D1EA96}, // 1e111
+ {0x03E2CF6BC604DDB0, 0x850FADC09923329E}, // 1e112
+ {0x84DB8346B786151C, 0xA6539930BF6BFF45}, // 1e113
+ {0xE612641865679A63, 0xCFE87F7CEF46FF16}, // 1e114
+ {0x4FCB7E8F3F60C07E, 0x81F14FAE158C5F6E}, // 1e115
+ {0xE3BE5E330F38F09D, 0xA26DA3999AEF7749}, // 1e116
+ {0x5CADF5BFD3072CC5, 0xCB090C8001AB551C}, // 1e117
+ {0x73D9732FC7C8F7F6, 0xFDCB4FA002162A63}, // 1e118
+ {0x2867E7FDDCDD9AFA, 0x9E9F11C4014DDA7E}, // 1e119
+ {0xB281E1FD541501B8, 0xC646D63501A1511D}, // 1e120
+ {0x1F225A7CA91A4226, 0xF7D88BC24209A565}, // 1e121
+ {0x3375788DE9B06958, 0x9AE757596946075F}, // 1e122
+ {0x0052D6B1641C83AE, 0xC1A12D2FC3978937}, // 1e123
+ {0xC0678C5DBD23A49A, 0xF209787BB47D6B84}, // 1e124
+ {0xF840B7BA963646E0, 0x9745EB4D50CE6332}, // 1e125
+ {0xB650E5A93BC3D898, 0xBD176620A501FBFF}, // 1e126
+ {0xA3E51F138AB4CEBE, 0xEC5D3FA8CE427AFF}, // 1e127
+ {0xC66F336C36B10137, 0x93BA47C980E98CDF}, // 1e128
+ {0xB80B0047445D4184, 0xB8A8D9BBE123F017}, // 1e129
+ {0xA60DC059157491E5, 0xE6D3102AD96CEC1D}, // 1e130
+ {0x87C89837AD68DB2F, 0x9043EA1AC7E41392}, // 1e131
+ {0x29BABE4598C311FB, 0xB454E4A179DD1877}, // 1e132
+ {0xF4296DD6FEF3D67A, 0xE16A1DC9D8545E94}, // 1e133
+ {0x1899E4A65F58660C, 0x8CE2529E2734BB1D}, // 1e134
+ {0x5EC05DCFF72E7F8F, 0xB01AE745B101E9E4}, // 1e135
+ {0x76707543F4FA1F73, 0xDC21A1171D42645D}, // 1e136
+ {0x6A06494A791C53A8, 0x899504AE72497EBA}, // 1e137
+ {0x0487DB9D17636892, 0xABFA45DA0EDBDE69}, // 1e138
+ {0x45A9D2845D3C42B6, 0xD6F8D7509292D603}, // 1e139
+ {0x0B8A2392BA45A9B2, 0x865B86925B9BC5C2}, // 1e140
+ {0x8E6CAC7768D7141E, 0xA7F26836F282B732}, // 1e141
+ {0x3207D795430CD926, 0xD1EF0244AF2364FF}, // 1e142
+ {0x7F44E6BD49E807B8, 0x8335616AED761F1F}, // 1e143
+ {0x5F16206C9C6209A6, 0xA402B9C5A8D3A6E7}, // 1e144
+ {0x36DBA887C37A8C0F, 0xCD036837130890A1}, // 1e145
+ {0xC2494954DA2C9789, 0x802221226BE55A64}, // 1e146
+ {0xF2DB9BAA10B7BD6C, 0xA02AA96B06DEB0FD}, // 1e147
+ {0x6F92829494E5ACC7, 0xC83553C5C8965D3D}, // 1e148
+ {0xCB772339BA1F17F9, 0xFA42A8B73ABBF48C}, // 1e149
+ {0xFF2A760414536EFB, 0x9C69A97284B578D7}, // 1e150
+ {0xFEF5138519684ABA, 0xC38413CF25E2D70D}, // 1e151
+ {0x7EB258665FC25D69, 0xF46518C2EF5B8CD1}, // 1e152
+ {0xEF2F773FFBD97A61, 0x98BF2F79D5993802}, // 1e153
+ {0xAAFB550FFACFD8FA, 0xBEEEFB584AFF8603}, // 1e154
+ {0x95BA2A53F983CF38, 0xEEAABA2E5DBF6784}, // 1e155
+ {0xDD945A747BF26183, 0x952AB45CFA97A0B2}, // 1e156
+ {0x94F971119AEEF9E4, 0xBA756174393D88DF}, // 1e157
+ {0x7A37CD5601AAB85D, 0xE912B9D1478CEB17}, // 1e158
+ {0xAC62E055C10AB33A, 0x91ABB422CCB812EE}, // 1e159
+ {0x577B986B314D6009, 0xB616A12B7FE617AA}, // 1e160
+ {0xED5A7E85FDA0B80B, 0xE39C49765FDF9D94}, // 1e161
+ {0x14588F13BE847307, 0x8E41ADE9FBEBC27D}, // 1e162
+ {0x596EB2D8AE258FC8, 0xB1D219647AE6B31C}, // 1e163
+ {0x6FCA5F8ED9AEF3BB, 0xDE469FBD99A05FE3}, // 1e164
+ {0x25DE7BB9480D5854, 0x8AEC23D680043BEE}, // 1e165
+ {0xAF561AA79A10AE6A, 0xADA72CCC20054AE9}, // 1e166
+ {0x1B2BA1518094DA04, 0xD910F7FF28069DA4}, // 1e167
+ {0x90FB44D2F05D0842, 0x87AA9AFF79042286}, // 1e168
+ {0x353A1607AC744A53, 0xA99541BF57452B28}, // 1e169
+ {0x42889B8997915CE8, 0xD3FA922F2D1675F2}, // 1e170
+ {0x69956135FEBADA11, 0x847C9B5D7C2E09B7}, // 1e171
+ {0x43FAB9837E699095, 0xA59BC234DB398C25}, // 1e172
+ {0x94F967E45E03F4BB, 0xCF02B2C21207EF2E}, // 1e173
+ {0x1D1BE0EEBAC278F5, 0x8161AFB94B44F57D}, // 1e174
+ {0x6462D92A69731732, 0xA1BA1BA79E1632DC}, // 1e175
+ {0x7D7B8F7503CFDCFE, 0xCA28A291859BBF93}, // 1e176
+ {0x5CDA735244C3D43E, 0xFCB2CB35E702AF78}, // 1e177
+ {0x3A0888136AFA64A7, 0x9DEFBF01B061ADAB}, // 1e178
+ {0x088AAA1845B8FDD0, 0xC56BAEC21C7A1916}, // 1e179
+ {0x8AAD549E57273D45, 0xF6C69A72A3989F5B}, // 1e180
+ {0x36AC54E2F678864B, 0x9A3C2087A63F6399}, // 1e181
+ {0x84576A1BB416A7DD, 0xC0CB28A98FCF3C7F}, // 1e182
+ {0x656D44A2A11C51D5, 0xF0FDF2D3F3C30B9F}, // 1e183
+ {0x9F644AE5A4B1B325, 0x969EB7C47859E743}, // 1e184
+ {0x873D5D9F0DDE1FEE, 0xBC4665B596706114}, // 1e185
+ {0xA90CB506D155A7EA, 0xEB57FF22FC0C7959}, // 1e186
+ {0x09A7F12442D588F2, 0x9316FF75DD87CBD8}, // 1e187
+ {0x0C11ED6D538AEB2F, 0xB7DCBF5354E9BECE}, // 1e188
+ {0x8F1668C8A86DA5FA, 0xE5D3EF282A242E81}, // 1e189
+ {0xF96E017D694487BC, 0x8FA475791A569D10}, // 1e190
+ {0x37C981DCC395A9AC, 0xB38D92D760EC4455}, // 1e191
+ {0x85BBE253F47B1417, 0xE070F78D3927556A}, // 1e192
+ {0x93956D7478CCEC8E, 0x8C469AB843B89562}, // 1e193
+ {0x387AC8D1970027B2, 0xAF58416654A6BABB}, // 1e194
+ {0x06997B05FCC0319E, 0xDB2E51BFE9D0696A}, // 1e195
+ {0x441FECE3BDF81F03, 0x88FCF317F22241E2}, // 1e196
+ {0xD527E81CAD7626C3, 0xAB3C2FDDEEAAD25A}, // 1e197
+ {0x8A71E223D8D3B074, 0xD60B3BD56A5586F1}, // 1e198
+ {0xF6872D5667844E49, 0x85C7056562757456}, // 1e199
+ {0xB428F8AC016561DB, 0xA738C6BEBB12D16C}, // 1e200
+ {0xE13336D701BEBA52, 0xD106F86E69D785C7}, // 1e201
+ {0xECC0024661173473, 0x82A45B450226B39C}, // 1e202
+ {0x27F002D7F95D0190, 0xA34D721642B06084}, // 1e203
+ {0x31EC038DF7B441F4, 0xCC20CE9BD35C78A5}, // 1e204
+ {0x7E67047175A15271, 0xFF290242C83396CE}, // 1e205
+ {0x0F0062C6E984D386, 0x9F79A169BD203E41}, // 1e206
+ {0x52C07B78A3E60868, 0xC75809C42C684DD1}, // 1e207
+ {0xA7709A56CCDF8A82, 0xF92E0C3537826145}, // 1e208
+ {0x88A66076400BB691, 0x9BBCC7A142B17CCB}, // 1e209
+ {0x6ACFF893D00EA435, 0xC2ABF989935DDBFE}, // 1e210
+ {0x0583F6B8C4124D43, 0xF356F7EBF83552FE}, // 1e211
+ {0xC3727A337A8B704A, 0x98165AF37B2153DE}, // 1e212
+ {0x744F18C0592E4C5C, 0xBE1BF1B059E9A8D6}, // 1e213
+ {0x1162DEF06F79DF73, 0xEDA2EE1C7064130C}, // 1e214
+ {0x8ADDCB5645AC2BA8, 0x9485D4D1C63E8BE7}, // 1e215
+ {0x6D953E2BD7173692, 0xB9A74A0637CE2EE1}, // 1e216
+ {0xC8FA8DB6CCDD0437, 0xE8111C87C5C1BA99}, // 1e217
+ {0x1D9C9892400A22A2, 0x910AB1D4DB9914A0}, // 1e218
+ {0x2503BEB6D00CAB4B, 0xB54D5E4A127F59C8}, // 1e219
+ {0x2E44AE64840FD61D, 0xE2A0B5DC971F303A}, // 1e220
+ {0x5CEAECFED289E5D2, 0x8DA471A9DE737E24}, // 1e221
+ {0x7425A83E872C5F47, 0xB10D8E1456105DAD}, // 1e222
+ {0xD12F124E28F77719, 0xDD50F1996B947518}, // 1e223
+ {0x82BD6B70D99AAA6F, 0x8A5296FFE33CC92F}, // 1e224
+ {0x636CC64D1001550B, 0xACE73CBFDC0BFB7B}, // 1e225
+ {0x3C47F7E05401AA4E, 0xD8210BEFD30EFA5A}, // 1e226
+ {0x65ACFAEC34810A71, 0x8714A775E3E95C78}, // 1e227
+ {0x7F1839A741A14D0D, 0xA8D9D1535CE3B396}, // 1e228
+ {0x1EDE48111209A050, 0xD31045A8341CA07C}, // 1e229
+ {0x934AED0AAB460432, 0x83EA2B892091E44D}, // 1e230
+ {0xF81DA84D5617853F, 0xA4E4B66B68B65D60}, // 1e231
+ {0x36251260AB9D668E, 0xCE1DE40642E3F4B9}, // 1e232
+ {0xC1D72B7C6B426019, 0x80D2AE83E9CE78F3}, // 1e233
+ {0xB24CF65B8612F81F, 0xA1075A24E4421730}, // 1e234
+ {0xDEE033F26797B627, 0xC94930AE1D529CFC}, // 1e235
+ {0x169840EF017DA3B1, 0xFB9B7CD9A4A7443C}, // 1e236
+ {0x8E1F289560EE864E, 0x9D412E0806E88AA5}, // 1e237
+ {0xF1A6F2BAB92A27E2, 0xC491798A08A2AD4E}, // 1e238
+ {0xAE10AF696774B1DB, 0xF5B5D7EC8ACB58A2}, // 1e239
+ {0xACCA6DA1E0A8EF29, 0x9991A6F3D6BF1765}, // 1e240
+ {0x17FD090A58D32AF3, 0xBFF610B0CC6EDD3F}, // 1e241
+ {0xDDFC4B4CEF07F5B0, 0xEFF394DCFF8A948E}, // 1e242
+ {0x4ABDAF101564F98E, 0x95F83D0A1FB69CD9}, // 1e243
+ {0x9D6D1AD41ABE37F1, 0xBB764C4CA7A4440F}, // 1e244
+ {0x84C86189216DC5ED, 0xEA53DF5FD18D5513}, // 1e245
+ {0x32FD3CF5B4E49BB4, 0x92746B9BE2F8552C}, // 1e246
+ {0x3FBC8C33221DC2A1, 0xB7118682DBB66A77}, // 1e247
+ {0x0FABAF3FEAA5334A, 0xE4D5E82392A40515}, // 1e248
+ {0x29CB4D87F2A7400E, 0x8F05B1163BA6832D}, // 1e249
+ {0x743E20E9EF511012, 0xB2C71D5BCA9023F8}, // 1e250
+ {0x914DA9246B255416, 0xDF78E4B2BD342CF6}, // 1e251
+ {0x1AD089B6C2F7548E, 0x8BAB8EEFB6409C1A}, // 1e252
+ {0xA184AC2473B529B1, 0xAE9672ABA3D0C320}, // 1e253
+ {0xC9E5D72D90A2741E, 0xDA3C0F568CC4F3E8}, // 1e254
+ {0x7E2FA67C7A658892, 0x8865899617FB1871}, // 1e255
+ {0xDDBB901B98FEEAB7, 0xAA7EEBFB9DF9DE8D}, // 1e256
+ {0x552A74227F3EA565, 0xD51EA6FA85785631}, // 1e257
+ {0xD53A88958F87275F, 0x8533285C936B35DE}, // 1e258
+ {0x8A892ABAF368F137, 0xA67FF273B8460356}, // 1e259
+ {0x2D2B7569B0432D85, 0xD01FEF10A657842C}, // 1e260
+ {0x9C3B29620E29FC73, 0x8213F56A67F6B29B}, // 1e261
+ {0x8349F3BA91B47B8F, 0xA298F2C501F45F42}, // 1e262
+ {0x241C70A936219A73, 0xCB3F2F7642717713}, // 1e263
+ {0xED238CD383AA0110, 0xFE0EFB53D30DD4D7}, // 1e264
+ {0xF4363804324A40AA, 0x9EC95D1463E8A506}, // 1e265
+ {0xB143C6053EDCD0D5, 0xC67BB4597CE2CE48}, // 1e266
+ {0xDD94B7868E94050A, 0xF81AA16FDC1B81DA}, // 1e267
+ {0xCA7CF2B4191C8326, 0x9B10A4E5E9913128}, // 1e268
+ {0xFD1C2F611F63A3F0, 0xC1D4CE1F63F57D72}, // 1e269
+ {0xBC633B39673C8CEC, 0xF24A01A73CF2DCCF}, // 1e270
+ {0xD5BE0503E085D813, 0x976E41088617CA01}, // 1e271
+ {0x4B2D8644D8A74E18, 0xBD49D14AA79DBC82}, // 1e272
+ {0xDDF8E7D60ED1219E, 0xEC9C459D51852BA2}, // 1e273
+ {0xCABB90E5C942B503, 0x93E1AB8252F33B45}, // 1e274
+ {0x3D6A751F3B936243, 0xB8DA1662E7B00A17}, // 1e275
+ {0x0CC512670A783AD4, 0xE7109BFBA19C0C9D}, // 1e276
+ {0x27FB2B80668B24C5, 0x906A617D450187E2}, // 1e277
+ {0xB1F9F660802DEDF6, 0xB484F9DC9641E9DA}, // 1e278
+ {0x5E7873F8A0396973, 0xE1A63853BBD26451}, // 1e279
+ {0xDB0B487B6423E1E8, 0x8D07E33455637EB2}, // 1e280
+ {0x91CE1A9A3D2CDA62, 0xB049DC016ABC5E5F}, // 1e281
+ {0x7641A140CC7810FB, 0xDC5C5301C56B75F7}, // 1e282
+ {0xA9E904C87FCB0A9D, 0x89B9B3E11B6329BA}, // 1e283
+ {0x546345FA9FBDCD44, 0xAC2820D9623BF429}, // 1e284
+ {0xA97C177947AD4095, 0xD732290FBACAF133}, // 1e285
+ {0x49ED8EABCCCC485D, 0x867F59A9D4BED6C0}, // 1e286
+ {0x5C68F256BFFF5A74, 0xA81F301449EE8C70}, // 1e287
+ {0x73832EEC6FFF3111, 0xD226FC195C6A2F8C}, // 1e288
+ {0xC831FD53C5FF7EAB, 0x83585D8FD9C25DB7}, // 1e289
+ {0xBA3E7CA8B77F5E55, 0xA42E74F3D032F525}, // 1e290
+ {0x28CE1BD2E55F35EB, 0xCD3A1230C43FB26F}, // 1e291
+ {0x7980D163CF5B81B3, 0x80444B5E7AA7CF85}, // 1e292
+ {0xD7E105BCC332621F, 0xA0555E361951C366}, // 1e293
+ {0x8DD9472BF3FEFAA7, 0xC86AB5C39FA63440}, // 1e294
+ {0xB14F98F6F0FEB951, 0xFA856334878FC150}, // 1e295
+ {0x6ED1BF9A569F33D3, 0x9C935E00D4B9D8D2}, // 1e296
+ {0x0A862F80EC4700C8, 0xC3B8358109E84F07}, // 1e297
+ {0xCD27BB612758C0FA, 0xF4A642E14C6262C8}, // 1e298
+ {0x8038D51CB897789C, 0x98E7E9CCCFBD7DBD}, // 1e299
+ {0xE0470A63E6BD56C3, 0xBF21E44003ACDD2C}, // 1e300
+ {0x1858CCFCE06CAC74, 0xEEEA5D5004981478}, // 1e301
+ {0x0F37801E0C43EBC8, 0x95527A5202DF0CCB}, // 1e302
+ {0xD30560258F54E6BA, 0xBAA718E68396CFFD}, // 1e303
+ {0x47C6B82EF32A2069, 0xE950DF20247C83FD}, // 1e304
+ {0x4CDC331D57FA5441, 0x91D28B7416CDD27E}, // 1e305
+ {0xE0133FE4ADF8E952, 0xB6472E511C81471D}, // 1e306
+ {0x58180FDDD97723A6, 0xE3D8F9E563A198E5}, // 1e307
+ {0x570F09EAA7EA7648, 0x8E679C2F5E44FF8F}, // 1e308
+ {0x2CD2CC6551E513DA, 0xB201833B35D63F73}, // 1e309
+ {0xF8077F7EA65E58D1, 0xDE81E40A034BCF4F}, // 1e310
+ {0xFB04AFAF27FAF782, 0x8B112E86420F6191}, // 1e311
+ {0x79C5DB9AF1F9B563, 0xADD57A27D29339F6}, // 1e312
+ {0x18375281AE7822BC, 0xD94AD8B1C7380874}, // 1e313
+ {0x8F2293910D0B15B5, 0x87CEC76F1C830548}, // 1e314
+ {0xB2EB3875504DDB22, 0xA9C2794AE3A3C69A}, // 1e315
+ {0x5FA60692A46151EB, 0xD433179D9C8CB841}, // 1e316
+ {0xDBC7C41BA6BCD333, 0x849FEEC281D7F328}, // 1e317
+ {0x12B9B522906C0800, 0xA5C7EA73224DEFF3}, // 1e318
+ {0xD768226B34870A00, 0xCF39E50FEAE16BEF}, // 1e319
+ {0xE6A1158300D46640, 0x81842F29F2CCE375}, // 1e320
+ {0x60495AE3C1097FD0, 0xA1E53AF46F801C53}, // 1e321
+ {0x385BB19CB14BDFC4, 0xCA5E89B18B602368}, // 1e322
+ {0x46729E03DD9ED7B5, 0xFCF62C1DEE382C42}, // 1e323
+ {0x6C07A2C26A8346D1, 0x9E19DB92B4E31BA9}, // 1e324
+ {0xC7098B7305241885, 0xC5A05277621BE293}, // 1e325
+ {0xB8CBEE4FC66D1EA7, 0xF70867153AA2DB38}, // 1e326
+ {0x737F74F1DC043328, 0x9A65406D44A5C903}, // 1e327
+ {0x505F522E53053FF2, 0xC0FE908895CF3B44}, // 1e328
+ {0x647726B9E7C68FEF, 0xF13E34AABB430A15}, // 1e329
+ {0x5ECA783430DC19F5, 0x96C6E0EAB509E64D}, // 1e330
+ {0xB67D16413D132072, 0xBC789925624C5FE0}, // 1e331
+ {0xE41C5BD18C57E88F, 0xEB96BF6EBADF77D8}, // 1e332
+ {0x8E91B962F7B6F159, 0x933E37A534CBAAE7}, // 1e333
+ {0x723627BBB5A4ADB0, 0xB80DC58E81FE95A1}, // 1e334
+ {0xCEC3B1AAA30DD91C, 0xE61136F2227E3B09}, // 1e335
+ {0x213A4F0AA5E8A7B1, 0x8FCAC257558EE4E6}, // 1e336
+ {0xA988E2CD4F62D19D, 0xB3BD72ED2AF29E1F}, // 1e337
+ {0x93EB1B80A33B8605, 0xE0ACCFA875AF45A7}, // 1e338
+ {0xBC72F130660533C3, 0x8C6C01C9498D8B88}, // 1e339
+ {0xEB8FAD7C7F8680B4, 0xAF87023B9BF0EE6A}, // 1e340
+ {0xA67398DB9F6820E1, 0xDB68C2CA82ED2A05}, // 1e341
+ {0x88083F8943A1148C, 0x892179BE91D43A43}, // 1e342
+ {0x6A0A4F6B948959B0, 0xAB69D82E364948D4}, // 1e343
+ {0x848CE34679ABB01C, 0xD6444E39C3DB9B09}, // 1e344
+ {0xF2D80E0C0C0B4E11, 0x85EAB0E41A6940E5}, // 1e345
+ {0x6F8E118F0F0E2195, 0xA7655D1D2103911F}, // 1e346
+ {0x4B7195F2D2D1A9FB, 0xD13EB46469447567}, // 1e347
+}
diff --git a/contrib/go/_std_1.19/src/strconv/ftoa.go b/contrib/go/_std_1.19/src/strconv/ftoa.go
new file mode 100644
index 0000000000..f602d0ffe6
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/ftoa.go
@@ -0,0 +1,585 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary to decimal floating point conversion.
+// Algorithm:
+// 1) store mantissa in multiprecision decimal
+// 2) shift decimal by exponent
+// 3) read digits out & format
+
+package strconv
+
+import "math"
+
+// TODO: move elsewhere?
+type floatInfo struct {
+ mantbits uint
+ expbits uint
+ bias int
+}
+
+var float32info = floatInfo{23, 8, -127}
+var float64info = floatInfo{52, 11, -1023}
+
+// FormatFloat converts the floating-point number f to a string,
+// according to the format fmt and precision prec. It rounds the
+// result assuming that the original was obtained from a floating-point
+// value of bitSize bits (32 for float32, 64 for float64).
+//
+// The format fmt is one of
+// 'b' (-ddddp±ddd, a binary exponent),
+// 'e' (-d.dddde±dd, a decimal exponent),
+// 'E' (-d.ddddE±dd, a decimal exponent),
+// 'f' (-ddd.dddd, no exponent),
+// 'g' ('e' for large exponents, 'f' otherwise),
+// 'G' ('E' for large exponents, 'f' otherwise),
+// 'x' (-0xd.ddddp±ddd, a hexadecimal fraction and binary exponent), or
+// 'X' (-0Xd.ddddP±ddd, a hexadecimal fraction and binary exponent).
+//
+// The precision prec controls the number of digits (excluding the exponent)
+// printed by the 'e', 'E', 'f', 'g', 'G', 'x', and 'X' formats.
+// For 'e', 'E', 'f', 'x', and 'X', it is the number of digits after the decimal point.
+// For 'g' and 'G' it is the maximum number of significant digits (trailing
+// zeros are removed).
+// The special precision -1 uses the smallest number of digits
+// necessary such that ParseFloat will return f exactly.
+func FormatFloat(f float64, fmt byte, prec, bitSize int) string {
+ return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize))
+}
+
+// AppendFloat appends the string form of the floating-point number f,
+// as generated by FormatFloat, to dst and returns the extended buffer.
+func AppendFloat(dst []byte, f float64, fmt byte, prec, bitSize int) []byte {
+ return genericFtoa(dst, f, fmt, prec, bitSize)
+}
+
+func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte {
+ var bits uint64
+ var flt *floatInfo
+ switch bitSize {
+ case 32:
+ bits = uint64(math.Float32bits(float32(val)))
+ flt = &float32info
+ case 64:
+ bits = math.Float64bits(val)
+ flt = &float64info
+ default:
+ panic("strconv: illegal AppendFloat/FormatFloat bitSize")
+ }
+
+ neg := bits>>(flt.expbits+flt.mantbits) != 0
+ exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
+ mant := bits & (uint64(1)<<flt.mantbits - 1)
+
+ switch exp {
+ case 1<<flt.expbits - 1:
+ // Inf, NaN
+ var s string
+ switch {
+ case mant != 0:
+ s = "NaN"
+ case neg:
+ s = "-Inf"
+ default:
+ s = "+Inf"
+ }
+ return append(dst, s...)
+
+ case 0:
+ // denormalized
+ exp++
+
+ default:
+ // add implicit top bit
+ mant |= uint64(1) << flt.mantbits
+ }
+ exp += flt.bias
+
+ // Pick off easy binary, hex formats.
+ if fmt == 'b' {
+ return fmtB(dst, neg, mant, exp, flt)
+ }
+ if fmt == 'x' || fmt == 'X' {
+ return fmtX(dst, prec, fmt, neg, mant, exp, flt)
+ }
+
+ if !optimize {
+ return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ }
+
+ var digs decimalSlice
+ ok := false
+ // Negative precision means "only as much as needed to be exact."
+ shortest := prec < 0
+ if shortest {
+ // Use Ryu algorithm.
+ var buf [32]byte
+ digs.d = buf[:]
+ ryuFtoaShortest(&digs, mant, exp-int(flt.mantbits), flt)
+ ok = true
+ // Precision for shortest representation mode.
+ switch fmt {
+ case 'e', 'E':
+ prec = max(digs.nd-1, 0)
+ case 'f':
+ prec = max(digs.nd-digs.dp, 0)
+ case 'g', 'G':
+ prec = digs.nd
+ }
+ } else if fmt != 'f' {
+ // Fixed number of digits.
+ digits := prec
+ switch fmt {
+ case 'e', 'E':
+ digits++
+ case 'g', 'G':
+ if prec == 0 {
+ prec = 1
+ }
+ digits = prec
+ default:
+ // Invalid mode.
+ digits = 1
+ }
+ var buf [24]byte
+ if bitSize == 32 && digits <= 9 {
+ digs.d = buf[:]
+ ryuFtoaFixed32(&digs, uint32(mant), exp-int(flt.mantbits), digits)
+ ok = true
+ } else if digits <= 18 {
+ digs.d = buf[:]
+ ryuFtoaFixed64(&digs, mant, exp-int(flt.mantbits), digits)
+ ok = true
+ }
+ }
+ if !ok {
+ return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
+ }
+ return formatDigits(dst, shortest, neg, digs, prec, fmt)
+}
+
+// bigFtoa uses multiprecision computations to format a float.
+func bigFtoa(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
+ d := new(decimal)
+ d.Assign(mant)
+ d.Shift(exp - int(flt.mantbits))
+ var digs decimalSlice
+ shortest := prec < 0
+ if shortest {
+ roundShortest(d, mant, exp, flt)
+ digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
+ // Precision for shortest representation mode.
+ switch fmt {
+ case 'e', 'E':
+ prec = digs.nd - 1
+ case 'f':
+ prec = max(digs.nd-digs.dp, 0)
+ case 'g', 'G':
+ prec = digs.nd
+ }
+ } else {
+ // Round appropriately.
+ switch fmt {
+ case 'e', 'E':
+ d.Round(prec + 1)
+ case 'f':
+ d.Round(d.dp + prec)
+ case 'g', 'G':
+ if prec == 0 {
+ prec = 1
+ }
+ d.Round(prec)
+ }
+ digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
+ }
+ return formatDigits(dst, shortest, neg, digs, prec, fmt)
+}
+
+func formatDigits(dst []byte, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) []byte {
+ switch fmt {
+ case 'e', 'E':
+ return fmtE(dst, neg, digs, prec, fmt)
+ case 'f':
+ return fmtF(dst, neg, digs, prec)
+ case 'g', 'G':
+ // trailing fractional zeros in 'e' form will be trimmed.
+ eprec := prec
+ if eprec > digs.nd && digs.nd >= digs.dp {
+ eprec = digs.nd
+ }
+ // %e is used if the exponent from the conversion
+ // is less than -4 or greater than or equal to the precision.
+ // if precision was the shortest possible, use precision 6 for this decision.
+ if shortest {
+ eprec = 6
+ }
+ exp := digs.dp - 1
+ if exp < -4 || exp >= eprec {
+ if prec > digs.nd {
+ prec = digs.nd
+ }
+ return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
+ }
+ if prec > digs.dp {
+ prec = digs.nd
+ }
+ return fmtF(dst, neg, digs, max(prec-digs.dp, 0))
+ }
+
+ // unknown format
+ return append(dst, '%', fmt)
+}
+
+// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits
+// that will let the original floating point value be precisely reconstructed.
+func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
+ // If mantissa is zero, the number is zero; stop now.
+ if mant == 0 {
+ d.nd = 0
+ return
+ }
+
+ // Compute upper and lower such that any decimal number
+ // between upper and lower (possibly inclusive)
+ // will round to the original floating point number.
+
+ // We may see at once that the number is already shortest.
+ //
+ // Suppose d is not denormal, so that 2^exp <= d < 10^dp.
+ // The closest shorter number is at least 10^(dp-nd) away.
+ // The lower/upper bounds computed below are at distance
+ // at most 2^(exp-mantbits).
+ //
+ // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
+ // or equivalently log2(10)*(dp-nd) > exp-mantbits.
+ // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
+ minexp := flt.bias + 1 // minimum possible exponent
+ if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
+ // The number is already shortest.
+ return
+ }
+
+ // d = mant << (exp - mantbits)
+ // Next highest floating point number is mant+1 << exp-mantbits.
+ // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
+ upper := new(decimal)
+ upper.Assign(mant*2 + 1)
+ upper.Shift(exp - int(flt.mantbits) - 1)
+
+ // d = mant << (exp - mantbits)
+ // Next lowest floating point number is mant-1 << exp-mantbits,
+ // unless mant-1 drops the significant bit and exp is not the minimum exp,
+ // in which case the next lowest is mant*2-1 << exp-mantbits-1.
+ // Either way, call it mantlo << explo-mantbits.
+ // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
+ var mantlo uint64
+ var explo int
+ if mant > 1<<flt.mantbits || exp == minexp {
+ mantlo = mant - 1
+ explo = exp
+ } else {
+ mantlo = mant*2 - 1
+ explo = exp - 1
+ }
+ lower := new(decimal)
+ lower.Assign(mantlo*2 + 1)
+ lower.Shift(explo - int(flt.mantbits) - 1)
+
+ // The upper and lower bounds are possible outputs only if
+ // the original mantissa is even, so that IEEE round-to-even
+ // would round to the original mantissa and not the neighbors.
+ inclusive := mant%2 == 0
+
+ // As we walk the digits we want to know whether rounding up would fall
+ // within the upper bound. This is tracked by upperdelta:
+ //
+ // If upperdelta == 0, the digits of d and upper are the same so far.
+ //
+ // If upperdelta == 1, we saw a difference of 1 between d and upper on a
+ // previous digit and subsequently only 9s for d and 0s for upper.
+ // (Thus rounding up may fall outside the bound, if it is exclusive.)
+ //
+ // If upperdelta == 2, then the difference is greater than 1
+ // and we know that rounding up falls within the bound.
+ var upperdelta uint8
+
+ // Now we can figure out the minimum number of digits required.
+ // Walk along until d has distinguished itself from upper and lower.
+ for ui := 0; ; ui++ {
+ // lower, d, and upper may have the decimal points at different
+ // places. In this case upper is the longest, so we iterate from
+ // ui==0 and start li and mi at (possibly) -1.
+ mi := ui - upper.dp + d.dp
+ if mi >= d.nd {
+ break
+ }
+ li := ui - upper.dp + lower.dp
+ l := byte('0') // lower digit
+ if li >= 0 && li < lower.nd {
+ l = lower.d[li]
+ }
+ m := byte('0') // middle digit
+ if mi >= 0 {
+ m = d.d[mi]
+ }
+ u := byte('0') // upper digit
+ if ui < upper.nd {
+ u = upper.d[ui]
+ }
+
+ // Okay to round down (truncate) if lower has a different digit
+ // or if lower is inclusive and is exactly the result of rounding
+ // down (i.e., and we have reached the final digit of lower).
+ okdown := l != m || inclusive && li+1 == lower.nd
+
+ switch {
+ case upperdelta == 0 && m+1 < u:
+ // Example:
+ // m = 12345xxx
+ // u = 12347xxx
+ upperdelta = 2
+ case upperdelta == 0 && m != u:
+ // Example:
+ // m = 12345xxx
+ // u = 12346xxx
+ upperdelta = 1
+ case upperdelta == 1 && (m != '9' || u != '0'):
+ // Example:
+ // m = 1234598x
+ // u = 1234600x
+ upperdelta = 2
+ }
+ // Okay to round up if upper has a different digit and either upper
+ // is inclusive or upper is bigger than the result of rounding up.
+ okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd)
+
+ // If it's okay to do either, then round to the nearest one.
+ // If it's okay to do only one, do it.
+ switch {
+ case okdown && okup:
+ d.Round(mi + 1)
+ return
+ case okdown:
+ d.RoundDown(mi + 1)
+ return
+ case okup:
+ d.RoundUp(mi + 1)
+ return
+ }
+ }
+}
+
+type decimalSlice struct {
+ d []byte
+ nd, dp int
+ neg bool
+}
+
+// %e: -d.ddddde±dd
+func fmtE(dst []byte, neg bool, d decimalSlice, prec int, fmt byte) []byte {
+ // sign
+ if neg {
+ dst = append(dst, '-')
+ }
+
+ // first digit
+ ch := byte('0')
+ if d.nd != 0 {
+ ch = d.d[0]
+ }
+ dst = append(dst, ch)
+
+ // .moredigits
+ if prec > 0 {
+ dst = append(dst, '.')
+ i := 1
+ m := min(d.nd, prec+1)
+ if i < m {
+ dst = append(dst, d.d[i:m]...)
+ i = m
+ }
+ for ; i <= prec; i++ {
+ dst = append(dst, '0')
+ }
+ }
+
+ // e±
+ dst = append(dst, fmt)
+ exp := d.dp - 1
+ if d.nd == 0 { // special case: 0 has exponent 0
+ exp = 0
+ }
+ if exp < 0 {
+ ch = '-'
+ exp = -exp
+ } else {
+ ch = '+'
+ }
+ dst = append(dst, ch)
+
+ // dd or ddd
+ switch {
+ case exp < 10:
+ dst = append(dst, '0', byte(exp)+'0')
+ case exp < 100:
+ dst = append(dst, byte(exp/10)+'0', byte(exp%10)+'0')
+ default:
+ dst = append(dst, byte(exp/100)+'0', byte(exp/10)%10+'0', byte(exp%10)+'0')
+ }
+
+ return dst
+}
+
+// %f: -ddddddd.ddddd
+func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte {
+ // sign
+ if neg {
+ dst = append(dst, '-')
+ }
+
+ // integer, padded with zeros as needed.
+ if d.dp > 0 {
+ m := min(d.nd, d.dp)
+ dst = append(dst, d.d[:m]...)
+ for ; m < d.dp; m++ {
+ dst = append(dst, '0')
+ }
+ } else {
+ dst = append(dst, '0')
+ }
+
+ // fraction
+ if prec > 0 {
+ dst = append(dst, '.')
+ for i := 0; i < prec; i++ {
+ ch := byte('0')
+ if j := d.dp + i; 0 <= j && j < d.nd {
+ ch = d.d[j]
+ }
+ dst = append(dst, ch)
+ }
+ }
+
+ return dst
+}
+
+// %b: -ddddddddp±ddd
+func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
+ // sign
+ if neg {
+ dst = append(dst, '-')
+ }
+
+ // mantissa
+ dst, _ = formatBits(dst, mant, 10, false, true)
+
+ // p
+ dst = append(dst, 'p')
+
+ // ±exponent
+ exp -= int(flt.mantbits)
+ if exp >= 0 {
+ dst = append(dst, '+')
+ }
+ dst, _ = formatBits(dst, uint64(exp), 10, exp < 0, true)
+
+ return dst
+}
+
+// %x: -0x1.yyyyyyyyp±ddd or -0x0p+0. (y is hex digit, d is decimal digit)
+func fmtX(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
+ if mant == 0 {
+ exp = 0
+ }
+
+ // Shift digits so leading 1 (if any) is at bit 1<<60.
+ mant <<= 60 - flt.mantbits
+ for mant != 0 && mant&(1<<60) == 0 {
+ mant <<= 1
+ exp--
+ }
+
+ // Round if requested.
+ if prec >= 0 && prec < 15 {
+ shift := uint(prec * 4)
+ extra := (mant << shift) & (1<<60 - 1)
+ mant >>= 60 - shift
+ if extra|(mant&1) > 1<<59 {
+ mant++
+ }
+ mant <<= 60 - shift
+ if mant&(1<<61) != 0 {
+ // Wrapped around.
+ mant >>= 1
+ exp++
+ }
+ }
+
+ hex := lowerhex
+ if fmt == 'X' {
+ hex = upperhex
+ }
+
+ // sign, 0x, leading digit
+ if neg {
+ dst = append(dst, '-')
+ }
+ dst = append(dst, '0', fmt, '0'+byte((mant>>60)&1))
+
+ // .fraction
+ mant <<= 4 // remove leading 0 or 1
+ if prec < 0 && mant != 0 {
+ dst = append(dst, '.')
+ for mant != 0 {
+ dst = append(dst, hex[(mant>>60)&15])
+ mant <<= 4
+ }
+ } else if prec > 0 {
+ dst = append(dst, '.')
+ for i := 0; i < prec; i++ {
+ dst = append(dst, hex[(mant>>60)&15])
+ mant <<= 4
+ }
+ }
+
+ // p±
+ ch := byte('P')
+ if fmt == lower(fmt) {
+ ch = 'p'
+ }
+ dst = append(dst, ch)
+ if exp < 0 {
+ ch = '-'
+ exp = -exp
+ } else {
+ ch = '+'
+ }
+ dst = append(dst, ch)
+
+ // dd or ddd or dddd
+ switch {
+ case exp < 100:
+ dst = append(dst, byte(exp/10)+'0', byte(exp%10)+'0')
+ case exp < 1000:
+ dst = append(dst, byte(exp/100)+'0', byte((exp/10)%10)+'0', byte(exp%10)+'0')
+ default:
+ dst = append(dst, byte(exp/1000)+'0', byte(exp/100)%10+'0', byte((exp/10)%10)+'0', byte(exp%10)+'0')
+ }
+
+ return dst
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/contrib/go/_std_1.19/src/strconv/ftoaryu.go b/contrib/go/_std_1.19/src/strconv/ftoaryu.go
new file mode 100644
index 0000000000..b975cdc9b9
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/ftoaryu.go
@@ -0,0 +1,569 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strconv
+
+import (
+ "math/bits"
+)
+
+// binary to decimal conversion using the Ryū algorithm.
+//
+// See Ulf Adams, "Ryū: Fast Float-to-String Conversion" (doi:10.1145/3192366.3192369)
+//
+// Fixed precision formatting is a variant of the original paper's
+// algorithm, where a single multiplication by 10^k is required,
+// sharing the same rounding guarantees.
+
+// ryuFtoaFixed32 formats mant*(2^exp) with prec decimal digits.
+func ryuFtoaFixed32(d *decimalSlice, mant uint32, exp int, prec int) {
+ if prec < 0 {
+ panic("ryuFtoaFixed32 called with negative prec")
+ }
+ if prec > 9 {
+ panic("ryuFtoaFixed32 called with prec > 9")
+ }
+ // Zero input.
+ if mant == 0 {
+ d.nd, d.dp = 0, 0
+ return
+ }
+ // Renormalize to a 25-bit mantissa.
+ e2 := exp
+ if b := bits.Len32(mant); b < 25 {
+ mant <<= uint(25 - b)
+ e2 += int(b) - 25
+ }
+ // Choose an exponent such that rounded mant*(2^e2)*(10^q) has
+ // at least prec decimal digits, i.e
+ // mant*(2^e2)*(10^q) >= 10^(prec-1)
+ // Because mant >= 2^24, it is enough to choose:
+ // 2^(e2+24) >= 10^(-q+prec-1)
+ // or q = -mulByLog2Log10(e2+24) + prec - 1
+ q := -mulByLog2Log10(e2+24) + prec - 1
+
+ // Now compute mant*(2^e2)*(10^q).
+ // Is it an exact computation?
+ // Only small positive powers of 10 are exact (5^28 has 66 bits).
+ exact := q <= 27 && q >= 0
+
+ di, dexp2, d0 := mult64bitPow10(mant, e2, q)
+ if dexp2 >= 0 {
+ panic("not enough significant bits after mult64bitPow10")
+ }
+ // As a special case, computation might still be exact, if exponent
+ // was negative and if it amounts to computing an exact division.
+ // In that case, we ignore all lower bits.
+ // Note that division by 10^11 cannot be exact as 5^11 has 26 bits.
+ if q < 0 && q >= -10 && divisibleByPower5(uint64(mant), -q) {
+ exact = true
+ d0 = true
+ }
+ // Remove extra lower bits and keep rounding info.
+ extra := uint(-dexp2)
+ extraMask := uint32(1<<extra - 1)
+
+ di, dfrac := di>>extra, di&extraMask
+ roundUp := false
+ if exact {
+ // If we computed an exact product, d + 1/2
+ // should round to d+1 if 'd' is odd.
+ roundUp = dfrac > 1<<(extra-1) ||
+ (dfrac == 1<<(extra-1) && !d0) ||
+ (dfrac == 1<<(extra-1) && d0 && di&1 == 1)
+ } else {
+ // otherwise, d+1/2 always rounds up because
+ // we truncated below.
+ roundUp = dfrac>>(extra-1) == 1
+ }
+ if dfrac != 0 {
+ d0 = false
+ }
+ // Proceed to the requested number of digits
+ formatDecimal(d, uint64(di), !d0, roundUp, prec)
+ // Adjust exponent
+ d.dp -= q
+}
+
+// ryuFtoaFixed64 formats mant*(2^exp) with prec decimal digits.
+func ryuFtoaFixed64(d *decimalSlice, mant uint64, exp int, prec int) {
+ if prec > 18 {
+ panic("ryuFtoaFixed64 called with prec > 18")
+ }
+ // Zero input.
+ if mant == 0 {
+ d.nd, d.dp = 0, 0
+ return
+ }
+ // Renormalize to a 55-bit mantissa.
+ e2 := exp
+ if b := bits.Len64(mant); b < 55 {
+ mant = mant << uint(55-b)
+ e2 += int(b) - 55
+ }
+ // Choose an exponent such that rounded mant*(2^e2)*(10^q) has
+ // at least prec decimal digits, i.e
+ // mant*(2^e2)*(10^q) >= 10^(prec-1)
+ // Because mant >= 2^54, it is enough to choose:
+ // 2^(e2+54) >= 10^(-q+prec-1)
+ // or q = -mulByLog2Log10(e2+54) + prec - 1
+ //
+ // The minimal required exponent is -mulByLog2Log10(1025)+18 = -291
+ // The maximal required exponent is mulByLog2Log10(1074)+18 = 342
+ q := -mulByLog2Log10(e2+54) + prec - 1
+
+ // Now compute mant*(2^e2)*(10^q).
+ // Is it an exact computation?
+ // Only small positive powers of 10 are exact (5^55 has 128 bits).
+ exact := q <= 55 && q >= 0
+
+ di, dexp2, d0 := mult128bitPow10(mant, e2, q)
+ if dexp2 >= 0 {
+ panic("not enough significant bits after mult128bitPow10")
+ }
+ // As a special case, computation might still be exact, if exponent
+ // was negative and if it amounts to computing an exact division.
+ // In that case, we ignore all lower bits.
+ // Note that division by 10^23 cannot be exact as 5^23 has 54 bits.
+ if q < 0 && q >= -22 && divisibleByPower5(mant, -q) {
+ exact = true
+ d0 = true
+ }
+ // Remove extra lower bits and keep rounding info.
+ extra := uint(-dexp2)
+ extraMask := uint64(1<<extra - 1)
+
+ di, dfrac := di>>extra, di&extraMask
+ roundUp := false
+ if exact {
+ // If we computed an exact product, d + 1/2
+ // should round to d+1 if 'd' is odd.
+ roundUp = dfrac > 1<<(extra-1) ||
+ (dfrac == 1<<(extra-1) && !d0) ||
+ (dfrac == 1<<(extra-1) && d0 && di&1 == 1)
+ } else {
+ // otherwise, d+1/2 always rounds up because
+ // we truncated below.
+ roundUp = dfrac>>(extra-1) == 1
+ }
+ if dfrac != 0 {
+ d0 = false
+ }
+ // Proceed to the requested number of digits
+ formatDecimal(d, di, !d0, roundUp, prec)
+ // Adjust exponent
+ d.dp -= q
+}
+
+var uint64pow10 = [...]uint64{
+ 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+}
+
+// formatDecimal fills d with at most prec decimal digits
+// of mantissa m. The boolean trunc indicates whether m
+// is truncated compared to the original number being formatted.
+func formatDecimal(d *decimalSlice, m uint64, trunc bool, roundUp bool, prec int) {
+ max := uint64pow10[prec]
+ trimmed := 0
+ for m >= max {
+ a, b := m/10, m%10
+ m = a
+ trimmed++
+ if b > 5 {
+ roundUp = true
+ } else if b < 5 {
+ roundUp = false
+ } else { // b == 5
+ // round up if there are trailing digits,
+ // or if the new value of m is odd (round-to-even convention)
+ roundUp = trunc || m&1 == 1
+ }
+ if b != 0 {
+ trunc = true
+ }
+ }
+ if roundUp {
+ m++
+ }
+ if m >= max {
+ // Happens if di was originally 99999....xx
+ m /= 10
+ trimmed++
+ }
+ // render digits (similar to formatBits)
+ n := uint(prec)
+ d.nd = int(prec)
+ v := m
+ for v >= 100 {
+ var v1, v2 uint64
+ if v>>32 == 0 {
+ v1, v2 = uint64(uint32(v)/100), uint64(uint32(v)%100)
+ } else {
+ v1, v2 = v/100, v%100
+ }
+ n -= 2
+ d.d[n+1] = smallsString[2*v2+1]
+ d.d[n+0] = smallsString[2*v2+0]
+ v = v1
+ }
+ if v > 0 {
+ n--
+ d.d[n] = smallsString[2*v+1]
+ }
+ if v >= 10 {
+ n--
+ d.d[n] = smallsString[2*v]
+ }
+ for d.d[d.nd-1] == '0' {
+ d.nd--
+ trimmed++
+ }
+ d.dp = d.nd + trimmed
+}
+
+// ryuFtoaShortest formats mant*2^exp with prec decimal digits.
+func ryuFtoaShortest(d *decimalSlice, mant uint64, exp int, flt *floatInfo) {
+ if mant == 0 {
+ d.nd, d.dp = 0, 0
+ return
+ }
+ // If input is an exact integer with fewer bits than the mantissa,
+ // the previous and next integer are not admissible representations.
+ if exp <= 0 && bits.TrailingZeros64(mant) >= -exp {
+ mant >>= uint(-exp)
+ ryuDigits(d, mant, mant, mant, true, false)
+ return
+ }
+ ml, mc, mu, e2 := computeBounds(mant, exp, flt)
+ if e2 == 0 {
+ ryuDigits(d, ml, mc, mu, true, false)
+ return
+ }
+ // Find 10^q *larger* than 2^-e2
+ q := mulByLog2Log10(-e2) + 1
+
+ // We are going to multiply by 10^q using 128-bit arithmetic.
+ // The exponent is the same for all 3 numbers.
+ var dl, dc, du uint64
+ var dl0, dc0, du0 bool
+ if flt == &float32info {
+ var dl32, dc32, du32 uint32
+ dl32, _, dl0 = mult64bitPow10(uint32(ml), e2, q)
+ dc32, _, dc0 = mult64bitPow10(uint32(mc), e2, q)
+ du32, e2, du0 = mult64bitPow10(uint32(mu), e2, q)
+ dl, dc, du = uint64(dl32), uint64(dc32), uint64(du32)
+ } else {
+ dl, _, dl0 = mult128bitPow10(ml, e2, q)
+ dc, _, dc0 = mult128bitPow10(mc, e2, q)
+ du, e2, du0 = mult128bitPow10(mu, e2, q)
+ }
+ if e2 >= 0 {
+ panic("not enough significant bits after mult128bitPow10")
+ }
+ // Is it an exact computation?
+ if q > 55 {
+ // Large positive powers of ten are not exact
+ dl0, dc0, du0 = false, false, false
+ }
+ if q < 0 && q >= -24 {
+ // Division by a power of ten may be exact.
+ // (note that 5^25 is a 59-bit number so division by 5^25 is never exact).
+ if divisibleByPower5(ml, -q) {
+ dl0 = true
+ }
+ if divisibleByPower5(mc, -q) {
+ dc0 = true
+ }
+ if divisibleByPower5(mu, -q) {
+ du0 = true
+ }
+ }
+ // Express the results (dl, dc, du)*2^e2 as integers.
+ // Extra bits must be removed and rounding hints computed.
+ extra := uint(-e2)
+ extraMask := uint64(1<<extra - 1)
+ // Now compute the floored, integral base 10 mantissas.
+ dl, fracl := dl>>extra, dl&extraMask
+ dc, fracc := dc>>extra, dc&extraMask
+ du, fracu := du>>extra, du&extraMask
+ // Is it allowed to use 'du' as a result?
+ // It is always allowed when it is truncated, but also
+ // if it is exact and the original binary mantissa is even
+ // When disallowed, we can subtract 1.
+ uok := !du0 || fracu > 0
+ if du0 && fracu == 0 {
+ uok = mant&1 == 0
+ }
+ if !uok {
+ du--
+ }
+ // Is 'dc' the correctly rounded base 10 mantissa?
+ // The correct rounding might be dc+1
+ cup := false // don't round up.
+ if dc0 {
+ // If we computed an exact product, the half integer
+ // should round to next (even) integer if 'dc' is odd.
+ cup = fracc > 1<<(extra-1) ||
+ (fracc == 1<<(extra-1) && dc&1 == 1)
+ } else {
+ // otherwise, the result is a lower truncation of the ideal
+ // result.
+ cup = fracc>>(extra-1) == 1
+ }
+ // Is 'dl' an allowed representation?
+ // Only if it is an exact value, and if the original binary mantissa
+ // was even.
+ lok := dl0 && fracl == 0 && (mant&1 == 0)
+ if !lok {
+ dl++
+ }
+ // We need to remember whether the trimmed digits of 'dc' are zero.
+ c0 := dc0 && fracc == 0
+ // render digits
+ ryuDigits(d, dl, dc, du, c0, cup)
+ d.dp -= q
+}
+
+// mulByLog2Log10 returns math.Floor(x * log(2)/log(10)) for an integer x in
+// the range -1600 <= x && x <= +1600.
+//
+// The range restriction lets us work in faster integer arithmetic instead of
+// slower floating point arithmetic. Correctness is verified by unit tests.
+func mulByLog2Log10(x int) int {
+ // log(2)/log(10) ≈ 0.30102999566 ≈ 78913 / 2^18
+ return (x * 78913) >> 18
+}
+
+// mulByLog10Log2 returns math.Floor(x * log(10)/log(2)) for an integer x in
+// the range -500 <= x && x <= +500.
+//
+// The range restriction lets us work in faster integer arithmetic instead of
+// slower floating point arithmetic. Correctness is verified by unit tests.
+func mulByLog10Log2(x int) int {
+ // log(10)/log(2) ≈ 3.32192809489 ≈ 108853 / 2^15
+ return (x * 108853) >> 15
+}
+
+// computeBounds returns a floating-point vector (l, c, u)×2^e2
+// where the mantissas are 55-bit (or 26-bit) integers, describing the interval
+// represented by the input float64 or float32.
+func computeBounds(mant uint64, exp int, flt *floatInfo) (lower, central, upper uint64, e2 int) {
+ if mant != 1<<flt.mantbits || exp == flt.bias+1-int(flt.mantbits) {
+ // regular case (or denormals)
+ lower, central, upper = 2*mant-1, 2*mant, 2*mant+1
+ e2 = exp - 1
+ return
+ } else {
+ // border of an exponent
+ lower, central, upper = 4*mant-1, 4*mant, 4*mant+2
+ e2 = exp - 2
+ return
+ }
+}
+
+func ryuDigits(d *decimalSlice, lower, central, upper uint64,
+ c0, cup bool) {
+ lhi, llo := divmod1e9(lower)
+ chi, clo := divmod1e9(central)
+ uhi, ulo := divmod1e9(upper)
+ if uhi == 0 {
+ // only low digits (for denormals)
+ ryuDigits32(d, llo, clo, ulo, c0, cup, 8)
+ } else if lhi < uhi {
+ // truncate 9 digits at once.
+ if llo != 0 {
+ lhi++
+ }
+ c0 = c0 && clo == 0
+ cup = (clo > 5e8) || (clo == 5e8 && cup)
+ ryuDigits32(d, lhi, chi, uhi, c0, cup, 8)
+ d.dp += 9
+ } else {
+ d.nd = 0
+ // emit high part
+ n := uint(9)
+ for v := chi; v > 0; {
+ v1, v2 := v/10, v%10
+ v = v1
+ n--
+ d.d[n] = byte(v2 + '0')
+ }
+ d.d = d.d[n:]
+ d.nd = int(9 - n)
+ // emit low part
+ ryuDigits32(d, llo, clo, ulo,
+ c0, cup, d.nd+8)
+ }
+ // trim trailing zeros
+ for d.nd > 0 && d.d[d.nd-1] == '0' {
+ d.nd--
+ }
+ // trim initial zeros
+ for d.nd > 0 && d.d[0] == '0' {
+ d.nd--
+ d.dp--
+ d.d = d.d[1:]
+ }
+}
+
+// ryuDigits32 emits decimal digits for a number less than 1e9.
+func ryuDigits32(d *decimalSlice, lower, central, upper uint32,
+ c0, cup bool, endindex int) {
+ if upper == 0 {
+ d.dp = endindex + 1
+ return
+ }
+ trimmed := 0
+ // Remember last trimmed digit to check for round-up.
+ // c0 will be used to remember zeroness of following digits.
+ cNextDigit := 0
+ for upper > 0 {
+ // Repeatedly compute:
+ // l = Ceil(lower / 10^k)
+ // c = Round(central / 10^k)
+ // u = Floor(upper / 10^k)
+ // and stop when c goes out of the (l, u) interval.
+ l := (lower + 9) / 10
+ c, cdigit := central/10, central%10
+ u := upper / 10
+ if l > u {
+ // don't trim the last digit as it is forbidden to go below l
+ // other, trim and exit now.
+ break
+ }
+ // Check that we didn't cross the lower boundary.
+ // The case where l < u but c == l-1 is essentially impossible,
+ // but may happen if:
+ // lower = ..11
+ // central = ..19
+ // upper = ..31
+ // and means that 'central' is very close but less than
+ // an integer ending with many zeros, and usually
+ // the "round-up" logic hides the problem.
+ if l == c+1 && c < u {
+ c++
+ cdigit = 0
+ cup = false
+ }
+ trimmed++
+ // Remember trimmed digits of c
+ c0 = c0 && cNextDigit == 0
+ cNextDigit = int(cdigit)
+ lower, central, upper = l, c, u
+ }
+ // should we round up?
+ if trimmed > 0 {
+ cup = cNextDigit > 5 ||
+ (cNextDigit == 5 && !c0) ||
+ (cNextDigit == 5 && c0 && central&1 == 1)
+ }
+ if central < upper && cup {
+ central++
+ }
+ // We know where the number ends, fill directly
+ endindex -= trimmed
+ v := central
+ n := endindex
+ for n > d.nd {
+ v1, v2 := v/100, v%100
+ d.d[n] = smallsString[2*v2+1]
+ d.d[n-1] = smallsString[2*v2+0]
+ n -= 2
+ v = v1
+ }
+ if n == d.nd {
+ d.d[n] = byte(v + '0')
+ }
+ d.nd = endindex + 1
+ d.dp = d.nd + trimmed
+}
+
+// mult64bitPow10 takes a floating-point input with a 25-bit
+// mantissa and multiplies it with 10^q. The resulting mantissa
+// is m*P >> 57 where P is a 64-bit element of the detailedPowersOfTen tables.
+// It is typically 31 or 32-bit wide.
+// The returned boolean is true if all trimmed bits were zero.
+//
+// That is:
+//
+// m*2^e2 * round(10^q) = resM * 2^resE + ε
+// exact = ε == 0
+func mult64bitPow10(m uint32, e2, q int) (resM uint32, resE int, exact bool) {
+ if q == 0 {
+ // P == 1<<63
+ return m << 6, e2 - 6, true
+ }
+ if q < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < q {
+ // This never happens due to the range of float32/float64 exponent
+ panic("mult64bitPow10: power of 10 is out of range")
+ }
+ pow := detailedPowersOfTen[q-detailedPowersOfTenMinExp10][1]
+ if q < 0 {
+ // Inverse powers of ten must be rounded up.
+ pow += 1
+ }
+ hi, lo := bits.Mul64(uint64(m), pow)
+ e2 += mulByLog10Log2(q) - 63 + 57
+ return uint32(hi<<7 | lo>>57), e2, lo<<7 == 0
+}
+
+// mult128bitPow10 takes a floating-point input with a 55-bit
+// mantissa and multiplies it with 10^q. The resulting mantissa
+// is m*P >> 119 where P is a 128-bit element of the detailedPowersOfTen tables.
+// It is typically 63 or 64-bit wide.
+// The returned boolean is true is all trimmed bits were zero.
+//
+// That is:
+//
+// m*2^e2 * round(10^q) = resM * 2^resE + ε
+// exact = ε == 0
+func mult128bitPow10(m uint64, e2, q int) (resM uint64, resE int, exact bool) {
+ if q == 0 {
+ // P == 1<<127
+ return m << 8, e2 - 8, true
+ }
+ if q < detailedPowersOfTenMinExp10 || detailedPowersOfTenMaxExp10 < q {
+ // This never happens due to the range of float32/float64 exponent
+ panic("mult128bitPow10: power of 10 is out of range")
+ }
+ pow := detailedPowersOfTen[q-detailedPowersOfTenMinExp10]
+ if q < 0 {
+ // Inverse powers of ten must be rounded up.
+ pow[0] += 1
+ }
+ e2 += mulByLog10Log2(q) - 127 + 119
+
+ // long multiplication
+ l1, l0 := bits.Mul64(m, pow[0])
+ h1, h0 := bits.Mul64(m, pow[1])
+ mid, carry := bits.Add64(l1, h0, 0)
+ h1 += carry
+ return h1<<9 | mid>>55, e2, mid<<9 == 0 && l0 == 0
+}
+
+func divisibleByPower5(m uint64, k int) bool {
+ if m == 0 {
+ return true
+ }
+ for i := 0; i < k; i++ {
+ if m%5 != 0 {
+ return false
+ }
+ m /= 5
+ }
+ return true
+}
+
+// divmod1e9 computes quotient and remainder of division by 1e9,
+// avoiding runtime uint64 division on 32-bit platforms.
+func divmod1e9(x uint64) (uint32, uint32) {
+ if !host32bit {
+ return uint32(x / 1e9), uint32(x % 1e9)
+ }
+ // Use the same sequence of operations as the amd64 compiler.
+ hi, _ := bits.Mul64(x>>1, 0x89705f4136b4a598) // binary digits of 1e-9
+ q := hi >> 28
+ return uint32(q), uint32(x - q*1e9)
+}
diff --git a/contrib/go/_std_1.18/src/strconv/isprint.go b/contrib/go/_std_1.19/src/strconv/isprint.go
index 994a8e423c..994a8e423c 100644
--- a/contrib/go/_std_1.18/src/strconv/isprint.go
+++ b/contrib/go/_std_1.19/src/strconv/isprint.go
diff --git a/contrib/go/_std_1.19/src/strconv/itoa.go b/contrib/go/_std_1.19/src/strconv/itoa.go
new file mode 100644
index 0000000000..b0c2666e7c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/itoa.go
@@ -0,0 +1,205 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strconv
+
+import "math/bits"
+
+const fastSmalls = true // enable fast path for small integers
+
+// FormatUint returns the string representation of i in the given base,
+// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
+// for digit values >= 10.
+func FormatUint(i uint64, base int) string {
+ if fastSmalls && i < nSmalls && base == 10 {
+ return small(int(i))
+ }
+ _, s := formatBits(nil, i, base, false, false)
+ return s
+}
+
+// FormatInt returns the string representation of i in the given base,
+// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
+// for digit values >= 10.
+func FormatInt(i int64, base int) string {
+ if fastSmalls && 0 <= i && i < nSmalls && base == 10 {
+ return small(int(i))
+ }
+ _, s := formatBits(nil, uint64(i), base, i < 0, false)
+ return s
+}
+
+// Itoa is equivalent to FormatInt(int64(i), 10).
+func Itoa(i int) string {
+ return FormatInt(int64(i), 10)
+}
+
+// AppendInt appends the string form of the integer i,
+// as generated by FormatInt, to dst and returns the extended buffer.
+func AppendInt(dst []byte, i int64, base int) []byte {
+ if fastSmalls && 0 <= i && i < nSmalls && base == 10 {
+ return append(dst, small(int(i))...)
+ }
+ dst, _ = formatBits(dst, uint64(i), base, i < 0, true)
+ return dst
+}
+
+// AppendUint appends the string form of the unsigned integer i,
+// as generated by FormatUint, to dst and returns the extended buffer.
+func AppendUint(dst []byte, i uint64, base int) []byte {
+ if fastSmalls && i < nSmalls && base == 10 {
+ return append(dst, small(int(i))...)
+ }
+ dst, _ = formatBits(dst, i, base, false, true)
+ return dst
+}
+
+// small returns the string for an i with 0 <= i < nSmalls.
+func small(i int) string {
+ if i < 10 {
+ return digits[i : i+1]
+ }
+ return smallsString[i*2 : i*2+2]
+}
+
+const nSmalls = 100
+
+const smallsString = "00010203040506070809" +
+ "10111213141516171819" +
+ "20212223242526272829" +
+ "30313233343536373839" +
+ "40414243444546474849" +
+ "50515253545556575859" +
+ "60616263646566676869" +
+ "70717273747576777879" +
+ "80818283848586878889" +
+ "90919293949596979899"
+
+const host32bit = ^uint(0)>>32 == 0
+
+const digits = "0123456789abcdefghijklmnopqrstuvwxyz"
+
+// formatBits computes the string representation of u in the given base.
+// If neg is set, u is treated as negative int64 value. If append_ is
+// set, the string is appended to dst and the resulting byte slice is
+// returned as the first result value; otherwise the string is returned
+// as the second result value.
+func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s string) {
+ if base < 2 || base > len(digits) {
+ panic("strconv: illegal AppendInt/FormatInt base")
+ }
+ // 2 <= base && base <= len(digits)
+
+ var a [64 + 1]byte // +1 for sign of 64bit value in base 2
+ i := len(a)
+
+ if neg {
+ u = -u
+ }
+
+ // convert bits
+ // We use uint values where we can because those will
+ // fit into a single register even on a 32bit machine.
+ if base == 10 {
+ // common case: use constants for / because
+ // the compiler can optimize it into a multiply+shift
+
+ if host32bit {
+ // convert the lower digits using 32bit operations
+ for u >= 1e9 {
+ // Avoid using r = a%b in addition to q = a/b
+ // since 64bit division and modulo operations
+ // are calculated by runtime functions on 32bit machines.
+ q := u / 1e9
+ us := uint(u - q*1e9) // u % 1e9 fits into a uint
+ for j := 4; j > 0; j-- {
+ is := us % 100 * 2
+ us /= 100
+ i -= 2
+ a[i+1] = smallsString[is+1]
+ a[i+0] = smallsString[is+0]
+ }
+
+ // us < 10, since it contains the last digit
+ // from the initial 9-digit us.
+ i--
+ a[i] = smallsString[us*2+1]
+
+ u = q
+ }
+ // u < 1e9
+ }
+
+ // u guaranteed to fit into a uint
+ us := uint(u)
+ for us >= 100 {
+ is := us % 100 * 2
+ us /= 100
+ i -= 2
+ a[i+1] = smallsString[is+1]
+ a[i+0] = smallsString[is+0]
+ }
+
+ // us < 100
+ is := us * 2
+ i--
+ a[i] = smallsString[is+1]
+ if us >= 10 {
+ i--
+ a[i] = smallsString[is]
+ }
+
+ } else if isPowerOfTwo(base) {
+ // Use shifts and masks instead of / and %.
+ // Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36.
+ // The largest power of 2 below or equal to 36 is 32, which is 1 << 5;
+ // i.e., the largest possible shift count is 5. By &-ind that value with
+ // the constant 7 we tell the compiler that the shift count is always
+ // less than 8 which is smaller than any register width. This allows
+ // the compiler to generate better code for the shift operation.
+ shift := uint(bits.TrailingZeros(uint(base))) & 7
+ b := uint64(base)
+ m := uint(base) - 1 // == 1<<shift - 1
+ for u >= b {
+ i--
+ a[i] = digits[uint(u)&m]
+ u >>= shift
+ }
+ // u < base
+ i--
+ a[i] = digits[uint(u)]
+ } else {
+ // general case
+ b := uint64(base)
+ for u >= b {
+ i--
+ // Avoid using r = a%b in addition to q = a/b
+ // since 64bit division and modulo operations
+ // are calculated by runtime functions on 32bit machines.
+ q := u / b
+ a[i] = digits[uint(u-q*b)]
+ u = q
+ }
+ // u < base
+ i--
+ a[i] = digits[uint(u)]
+ }
+
+ // add sign, if any
+ if neg {
+ i--
+ a[i] = '-'
+ }
+
+ if append_ {
+ d = append(dst, a[i:]...)
+ return
+ }
+ s = string(a[i:])
+ return
+}
+
+func isPowerOfTwo(x int) bool {
+ return x&(x-1) == 0
+}
diff --git a/contrib/go/_std_1.19/src/strconv/quote.go b/contrib/go/_std_1.19/src/strconv/quote.go
new file mode 100644
index 0000000000..1b5bddfeae
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strconv/quote.go
@@ -0,0 +1,604 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run makeisprint.go -output isprint.go
+
+package strconv
+
+import (
+ "unicode/utf8"
+)
+
+const (
+ lowerhex = "0123456789abcdef"
+ upperhex = "0123456789ABCDEF"
+)
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ return index(s, c) != -1
+}
+
+func quoteWith(s string, quote byte, ASCIIonly, graphicOnly bool) string {
+ return string(appendQuotedWith(make([]byte, 0, 3*len(s)/2), s, quote, ASCIIonly, graphicOnly))
+}
+
+func quoteRuneWith(r rune, quote byte, ASCIIonly, graphicOnly bool) string {
+ return string(appendQuotedRuneWith(nil, r, quote, ASCIIonly, graphicOnly))
+}
+
+func appendQuotedWith(buf []byte, s string, quote byte, ASCIIonly, graphicOnly bool) []byte {
+ // Often called with big strings, so preallocate. If there's quoting,
+ // this is conservative but still helps a lot.
+ if cap(buf)-len(buf) < len(s) {
+ nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1)
+ copy(nBuf, buf)
+ buf = nBuf
+ }
+ buf = append(buf, quote)
+ for width := 0; len(s) > 0; s = s[width:] {
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
+ }
+ buf = append(buf, quote)
+ return buf
+}
+
+func appendQuotedRuneWith(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
+ buf = append(buf, quote)
+ if !utf8.ValidRune(r) {
+ r = utf8.RuneError
+ }
+ buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
+ buf = append(buf, quote)
+ return buf
+}
+
+func appendEscapedRune(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ if r == rune(quote) || r == '\\' { // always backslashed
+ buf = append(buf, '\\')
+ buf = append(buf, byte(r))
+ return buf
+ }
+ if ASCIIonly {
+ if r < utf8.RuneSelf && IsPrint(r) {
+ buf = append(buf, byte(r))
+ return buf
+ }
+ } else if IsPrint(r) || graphicOnly && isInGraphicList(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ return buf
+ }
+ switch r {
+ case '\a':
+ buf = append(buf, `\a`...)
+ case '\b':
+ buf = append(buf, `\b`...)
+ case '\f':
+ buf = append(buf, `\f`...)
+ case '\n':
+ buf = append(buf, `\n`...)
+ case '\r':
+ buf = append(buf, `\r`...)
+ case '\t':
+ buf = append(buf, `\t`...)
+ case '\v':
+ buf = append(buf, `\v`...)
+ default:
+ switch {
+ case r < ' ' || r == 0x7f:
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[byte(r)>>4])
+ buf = append(buf, lowerhex[byte(r)&0xF])
+ case !utf8.ValidRune(r):
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, `\u`...)
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, `\U`...)
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ return buf
+}
+
+// Quote returns a double-quoted Go string literal representing s. The
+// returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
+// control characters and non-printable characters as defined by
+// IsPrint.
+func Quote(s string) string {
+ return quoteWith(s, '"', false, false)
+}
+
+// AppendQuote appends a double-quoted Go string literal representing s,
+// as generated by Quote, to dst and returns the extended buffer.
+func AppendQuote(dst []byte, s string) []byte {
+ return appendQuotedWith(dst, s, '"', false, false)
+}
+
+// QuoteToASCII returns a double-quoted Go string literal representing s.
+// The returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
+// non-ASCII characters and non-printable characters as defined by IsPrint.
+func QuoteToASCII(s string) string {
+ return quoteWith(s, '"', true, false)
+}
+
+// AppendQuoteToASCII appends a double-quoted Go string literal representing s,
+// as generated by QuoteToASCII, to dst and returns the extended buffer.
+func AppendQuoteToASCII(dst []byte, s string) []byte {
+ return appendQuotedWith(dst, s, '"', true, false)
+}
+
+// QuoteToGraphic returns a double-quoted Go string literal representing s.
+// The returned string leaves Unicode graphic characters, as defined by
+// IsGraphic, unchanged and uses Go escape sequences (\t, \n, \xFF, \u0100)
+// for non-graphic characters.
+func QuoteToGraphic(s string) string {
+ return quoteWith(s, '"', false, true)
+}
+
+// AppendQuoteToGraphic appends a double-quoted Go string literal representing s,
+// as generated by QuoteToGraphic, to dst and returns the extended buffer.
+func AppendQuoteToGraphic(dst []byte, s string) []byte {
+ return appendQuotedWith(dst, s, '"', false, true)
+}
+
+// QuoteRune returns a single-quoted Go character literal representing the
+// rune. The returned string uses Go escape sequences (\t, \n, \xFF, \u0100)
+// for control characters and non-printable characters as defined by IsPrint.
+// If r is not a valid Unicode code point, it is interpreted as the Unicode
+// replacement character U+FFFD.
+func QuoteRune(r rune) string {
+ return quoteRuneWith(r, '\'', false, false)
+}
+
+// AppendQuoteRune appends a single-quoted Go character literal representing the rune,
+// as generated by QuoteRune, to dst and returns the extended buffer.
+func AppendQuoteRune(dst []byte, r rune) []byte {
+ return appendQuotedRuneWith(dst, r, '\'', false, false)
+}
+
+// QuoteRuneToASCII returns a single-quoted Go character literal representing
+// the rune. The returned string uses Go escape sequences (\t, \n, \xFF,
+// \u0100) for non-ASCII characters and non-printable characters as defined
+// by IsPrint.
+// If r is not a valid Unicode code point, it is interpreted as the Unicode
+// replacement character U+FFFD.
+func QuoteRuneToASCII(r rune) string {
+ return quoteRuneWith(r, '\'', true, false)
+}
+
+// AppendQuoteRuneToASCII appends a single-quoted Go character literal representing the rune,
+// as generated by QuoteRuneToASCII, to dst and returns the extended buffer.
+func AppendQuoteRuneToASCII(dst []byte, r rune) []byte {
+ return appendQuotedRuneWith(dst, r, '\'', true, false)
+}
+
+// QuoteRuneToGraphic returns a single-quoted Go character literal representing
+// the rune. If the rune is not a Unicode graphic character,
+// as defined by IsGraphic, the returned string will use a Go escape sequence
+// (\t, \n, \xFF, \u0100).
+// If r is not a valid Unicode code point, it is interpreted as the Unicode
+// replacement character U+FFFD.
+func QuoteRuneToGraphic(r rune) string {
+ return quoteRuneWith(r, '\'', false, true)
+}
+
+// AppendQuoteRuneToGraphic appends a single-quoted Go character literal representing the rune,
+// as generated by QuoteRuneToGraphic, to dst and returns the extended buffer.
+func AppendQuoteRuneToGraphic(dst []byte, r rune) []byte {
+ return appendQuotedRuneWith(dst, r, '\'', false, true)
+}
+
+// CanBackquote reports whether the string s can be represented
+// unchanged as a single-line backquoted string without control
+// characters other than tab.
+func CanBackquote(s string) bool {
+ for len(s) > 0 {
+ r, wid := utf8.DecodeRuneInString(s)
+ s = s[wid:]
+ if wid > 1 {
+ if r == '\ufeff' {
+ return false // BOMs are invisible and should not be quoted.
+ }
+ continue // All other multibyte runes are correctly encoded and assumed printable.
+ }
+ if r == utf8.RuneError {
+ return false
+ }
+ if (r < ' ' && r != '\t') || r == '`' || r == '\u007F' {
+ return false
+ }
+ }
+ return true
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+// UnquoteChar decodes the first character or byte in the escaped string
+// or character literal represented by the string s.
+// It returns four values:
+//
+// 1. value, the decoded Unicode code point or byte value;
+// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+// 3. tail, the remainder of the string after the character; and
+// 4. an error that will be nil if the character is syntactically valid.
+//
+// The second argument, quote, specifies the type of literal being parsed
+// and therefore which escaped quote character is permitted.
+// If set to a single quote, it permits the sequence \' and disallows unescaped '.
+// If set to a double quote, it permits \" and disallows unescaped ".
+// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
+func UnquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ if len(s) == 0 {
+ err = ErrSyntax
+ return
+ }
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if !utf8.ValidRune(v) {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
+
+// QuotedPrefix returns the quoted string (as understood by Unquote) at the prefix of s.
+// If s does not start with a valid quoted string, QuotedPrefix returns an error.
+func QuotedPrefix(s string) (string, error) {
+ out, _, err := unquote(s, false)
+ return out, err
+}
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (string, error) {
+ out, rem, err := unquote(s, true)
+ if len(rem) > 0 {
+ return "", ErrSyntax
+ }
+ return out, err
+}
+
+// unquote parses a quoted string at the start of the input,
+// returning the parsed prefix, the remaining suffix, and any parse errors.
+// If unescape is true, the parsed prefix is unescaped,
+// otherwise the input prefix is provided verbatim.
+func unquote(in string, unescape bool) (out, rem string, err error) {
+ // Determine the quote form and optimistically find the terminating quote.
+ if len(in) < 2 {
+ return "", in, ErrSyntax
+ }
+ quote := in[0]
+ end := index(in[1:], quote)
+ if end < 0 {
+ return "", in, ErrSyntax
+ }
+ end += 2 // position after terminating quote; may be wrong if escape sequences are present
+
+ switch quote {
+ case '`':
+ switch {
+ case !unescape:
+ out = in[:end] // include quotes
+ case !contains(in[:end], '\r'):
+ out = in[len("`") : end-len("`")] // exclude quotes
+ default:
+ // Carriage return characters ('\r') inside raw string literals
+ // are discarded from the raw string value.
+ buf := make([]byte, 0, end-len("`")-len("\r")-len("`"))
+ for i := len("`"); i < end-len("`"); i++ {
+ if in[i] != '\r' {
+ buf = append(buf, in[i])
+ }
+ }
+ out = string(buf)
+ }
+ // NOTE: Prior implementations did not verify that raw strings consist
+ // of valid UTF-8 characters and we continue to not verify it as such.
+ // The Go specification does not explicitly require valid UTF-8,
+ // but only mention that it is implicitly valid for Go source code
+ // (which must be valid UTF-8).
+ return out, in[end:], nil
+ case '"', '\'':
+ // Handle quoted strings without any escape sequences.
+ if !contains(in[:end], '\\') && !contains(in[:end], '\n') {
+ var valid bool
+ switch quote {
+ case '"':
+ valid = utf8.ValidString(in[len(`"`) : end-len(`"`)])
+ case '\'':
+ r, n := utf8.DecodeRuneInString(in[len("'") : end-len("'")])
+ valid = len("'")+n+len("'") == end && (r != utf8.RuneError || n != 1)
+ }
+ if valid {
+ out = in[:end]
+ if unescape {
+ out = out[1 : end-1] // exclude quotes
+ }
+ return out, in[end:], nil
+ }
+ }
+
+ // Handle quoted strings with escape sequences.
+ var buf []byte
+ in0 := in
+ in = in[1:] // skip starting quote
+ if unescape {
+ buf = make([]byte, 0, 3*end/2) // try to avoid more allocations
+ }
+ for len(in) > 0 && in[0] != quote {
+ // Process the next character,
+ // rejecting any unescaped newline characters which are invalid.
+ r, multibyte, rem, err := UnquoteChar(in, quote)
+ if in[0] == '\n' || err != nil {
+ return "", in0, ErrSyntax
+ }
+ in = rem
+
+ // Append the character if unescaping the input.
+ if unescape {
+ if r < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(r))
+ } else {
+ var arr [utf8.UTFMax]byte
+ n := utf8.EncodeRune(arr[:], r)
+ buf = append(buf, arr[:n]...)
+ }
+ }
+
+ // Single quoted strings must be a single character.
+ if quote == '\'' {
+ break
+ }
+ }
+
+ // Verify that the string ends with a terminating quote.
+ if !(len(in) > 0 && in[0] == quote) {
+ return "", in0, ErrSyntax
+ }
+ in = in[1:] // skip terminating quote
+
+ if unescape {
+ return string(buf), in, nil
+ }
+ return in0[:len(in0)-len(in)], in, nil
+ default:
+ return "", in, ErrSyntax
+ }
+}
+
+// bsearch16 returns the smallest i such that a[i] >= x.
+// If there is no such i, bsearch16 returns len(a).
+func bsearch16(a []uint16, x uint16) int {
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)>>1
+ if a[h] < x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i
+}
+
+// bsearch32 returns the smallest i such that a[i] >= x.
+// If there is no such i, bsearch32 returns len(a).
+func bsearch32(a []uint32, x uint32) int {
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)>>1
+ if a[h] < x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i
+}
+
+// TODO: IsPrint is a local implementation of unicode.IsPrint, verified by the tests
+// to give the same answer. It allows this package not to depend on unicode,
+// and therefore not pull in all the Unicode tables. If the linker were better
+// at tossing unused tables, we could get rid of this implementation.
+// That would be nice.
+
+// IsPrint reports whether the rune is defined as printable by Go, with
+// the same definition as unicode.IsPrint: letters, numbers, punctuation,
+// symbols and ASCII space.
+func IsPrint(r rune) bool {
+ // Fast check for Latin-1
+ if r <= 0xFF {
+ if 0x20 <= r && r <= 0x7E {
+ // All the ASCII is printable from space through DEL-1.
+ return true
+ }
+ if 0xA1 <= r && r <= 0xFF {
+ // Similarly for ¡ through ÿ...
+ return r != 0xAD // ...except for the bizarre soft hyphen.
+ }
+ return false
+ }
+
+ // Same algorithm, either on uint16 or uint32 value.
+ // First, find first i such that isPrint[i] >= x.
+ // This is the index of either the start or end of a pair that might span x.
+ // The start is even (isPrint[i&^1]) and the end is odd (isPrint[i|1]).
+ // If we find x in a range, make sure x is not in isNotPrint list.
+
+ if 0 <= r && r < 1<<16 {
+ rr, isPrint, isNotPrint := uint16(r), isPrint16, isNotPrint16
+ i := bsearch16(isPrint, rr)
+ if i >= len(isPrint) || rr < isPrint[i&^1] || isPrint[i|1] < rr {
+ return false
+ }
+ j := bsearch16(isNotPrint, rr)
+ return j >= len(isNotPrint) || isNotPrint[j] != rr
+ }
+
+ rr, isPrint, isNotPrint := uint32(r), isPrint32, isNotPrint32
+ i := bsearch32(isPrint, rr)
+ if i >= len(isPrint) || rr < isPrint[i&^1] || isPrint[i|1] < rr {
+ return false
+ }
+ if r >= 0x20000 {
+ return true
+ }
+ r -= 0x10000
+ j := bsearch16(isNotPrint, uint16(r))
+ return j >= len(isNotPrint) || isNotPrint[j] != uint16(r)
+}
+
+// IsGraphic reports whether the rune is defined as a Graphic by Unicode. Such
+// characters include letters, marks, numbers, punctuation, symbols, and
+// spaces, from categories L, M, N, P, S, and Zs.
+func IsGraphic(r rune) bool {
+ if IsPrint(r) {
+ return true
+ }
+ return isInGraphicList(r)
+}
+
+// isInGraphicList reports whether the rune is in the isGraphic list. This separation
+// from IsGraphic allows quoteWith to avoid two calls to IsPrint.
+// Should be called only if IsPrint fails.
+func isInGraphicList(r rune) bool {
+ // We know r must fit in 16 bits - see makeisprint.go.
+ if r > 0xFFFF {
+ return false
+ }
+ rr := uint16(r)
+ i := bsearch16(isGraphic, rr)
+ return i < len(isGraphic) && rr == isGraphic[i]
+}
diff --git a/contrib/go/_std_1.19/src/strings/builder.go b/contrib/go/_std_1.19/src/strings/builder.go
new file mode 100644
index 0000000000..3caddabd4e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strings/builder.go
@@ -0,0 +1,126 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strings
+
+import (
+ "unicode/utf8"
+ "unsafe"
+)
+
+// A Builder is used to efficiently build a string using Write methods.
+// It minimizes memory copying. The zero value is ready to use.
+// Do not copy a non-zero Builder.
+type Builder struct {
+ addr *Builder // of receiver, to detect copies by value
+ buf []byte
+}
+
+// noescape hides a pointer from escape analysis. It is the identity function
+// but escape analysis doesn't think the output depends on the input.
+// noescape is inlined and currently compiles down to zero instructions.
+// USE CAREFULLY!
+// This was copied from the runtime; see issues 23382 and 7921.
+//
+//go:nosplit
+//go:nocheckptr
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+ x := uintptr(p)
+ return unsafe.Pointer(x ^ 0)
+}
+
+func (b *Builder) copyCheck() {
+ if b.addr == nil {
+ // This hack works around a failing of Go's escape analysis
+ // that was causing b to escape and be heap allocated.
+ // See issue 23382.
+ // TODO: once issue 7921 is fixed, this should be reverted to
+ // just "b.addr = b".
+ b.addr = (*Builder)(noescape(unsafe.Pointer(b)))
+ } else if b.addr != b {
+ panic("strings: illegal use of non-zero Builder copied by value")
+ }
+}
+
+// String returns the accumulated string.
+func (b *Builder) String() string {
+ return *(*string)(unsafe.Pointer(&b.buf))
+}
+
+// Len returns the number of accumulated bytes; b.Len() == len(b.String()).
+func (b *Builder) Len() int { return len(b.buf) }
+
+// Cap returns the capacity of the builder's underlying byte slice. It is the
+// total space allocated for the string being built and includes any bytes
+// already written.
+func (b *Builder) Cap() int { return cap(b.buf) }
+
+// Reset resets the Builder to be empty.
+func (b *Builder) Reset() {
+ b.addr = nil
+ b.buf = nil
+}
+
+// grow copies the buffer to a new, larger buffer so that there are at least n
+// bytes of capacity beyond len(b.buf).
+func (b *Builder) grow(n int) {
+ buf := make([]byte, len(b.buf), 2*cap(b.buf)+n)
+ copy(buf, b.buf)
+ b.buf = buf
+}
+
+// Grow grows b's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to b
+// without another allocation. If n is negative, Grow panics.
+func (b *Builder) Grow(n int) {
+ b.copyCheck()
+ if n < 0 {
+ panic("strings.Builder.Grow: negative count")
+ }
+ if cap(b.buf)-len(b.buf) < n {
+ b.grow(n)
+ }
+}
+
+// Write appends the contents of p to b's buffer.
+// Write always returns len(p), nil.
+func (b *Builder) Write(p []byte) (int, error) {
+ b.copyCheck()
+ b.buf = append(b.buf, p...)
+ return len(p), nil
+}
+
+// WriteByte appends the byte c to b's buffer.
+// The returned error is always nil.
+func (b *Builder) WriteByte(c byte) error {
+ b.copyCheck()
+ b.buf = append(b.buf, c)
+ return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer.
+// It returns the length of r and a nil error.
+func (b *Builder) WriteRune(r rune) (int, error) {
+ b.copyCheck()
+ // Compare as uint32 to correctly handle negative runes.
+ if uint32(r) < utf8.RuneSelf {
+ b.buf = append(b.buf, byte(r))
+ return 1, nil
+ }
+ l := len(b.buf)
+ if cap(b.buf)-l < utf8.UTFMax {
+ b.grow(utf8.UTFMax)
+ }
+ n := utf8.EncodeRune(b.buf[l:l+utf8.UTFMax], r)
+ b.buf = b.buf[:l+n]
+ return n, nil
+}
+
+// WriteString appends the contents of s to b's buffer.
+// It returns the length of s and a nil error.
+func (b *Builder) WriteString(s string) (int, error) {
+ b.copyCheck()
+ b.buf = append(b.buf, s...)
+ return len(s), nil
+}
diff --git a/contrib/go/_std_1.18/src/strings/clone.go b/contrib/go/_std_1.19/src/strings/clone.go
index edd1497d9e..edd1497d9e 100644
--- a/contrib/go/_std_1.18/src/strings/clone.go
+++ b/contrib/go/_std_1.19/src/strings/clone.go
diff --git a/contrib/go/_std_1.18/src/strings/compare.go b/contrib/go/_std_1.19/src/strings/compare.go
index 2bd4a243db..2bd4a243db 100644
--- a/contrib/go/_std_1.18/src/strings/compare.go
+++ b/contrib/go/_std_1.19/src/strings/compare.go
diff --git a/contrib/go/_std_1.18/src/strings/reader.go b/contrib/go/_std_1.19/src/strings/reader.go
index 6f069a62ca..6f069a62ca 100644
--- a/contrib/go/_std_1.18/src/strings/reader.go
+++ b/contrib/go/_std_1.19/src/strings/reader.go
diff --git a/contrib/go/_std_1.19/src/strings/replace.go b/contrib/go/_std_1.19/src/strings/replace.go
new file mode 100644
index 0000000000..73bc78a07e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strings/replace.go
@@ -0,0 +1,569 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strings
+
+import (
+ "io"
+ "sync"
+)
+
+// Replacer replaces a list of strings with replacements.
+// It is safe for concurrent use by multiple goroutines.
+type Replacer struct {
+ once sync.Once // guards buildOnce method
+ r replacer
+ oldnew []string
+}
+
+// replacer is the interface that a replacement algorithm needs to implement.
+type replacer interface {
+ Replace(s string) string
+ WriteString(w io.Writer, s string) (n int, err error)
+}
+
+// NewReplacer returns a new Replacer from a list of old, new string
+// pairs. Replacements are performed in the order they appear in the
+// target string, without overlapping matches. The old string
+// comparisons are done in argument order.
+//
+// NewReplacer panics if given an odd number of arguments.
+func NewReplacer(oldnew ...string) *Replacer {
+ if len(oldnew)%2 == 1 {
+ panic("strings.NewReplacer: odd argument count")
+ }
+ return &Replacer{oldnew: append([]string(nil), oldnew...)}
+}
+
+func (r *Replacer) buildOnce() {
+ r.r = r.build()
+ r.oldnew = nil
+}
+
+func (b *Replacer) build() replacer {
+ oldnew := b.oldnew
+ if len(oldnew) == 2 && len(oldnew[0]) > 1 {
+ return makeSingleStringReplacer(oldnew[0], oldnew[1])
+ }
+
+ allNewBytes := true
+ for i := 0; i < len(oldnew); i += 2 {
+ if len(oldnew[i]) != 1 {
+ return makeGenericReplacer(oldnew)
+ }
+ if len(oldnew[i+1]) != 1 {
+ allNewBytes = false
+ }
+ }
+
+ if allNewBytes {
+ r := byteReplacer{}
+ for i := range r {
+ r[i] = byte(i)
+ }
+ // The first occurrence of old->new map takes precedence
+ // over the others with the same old string.
+ for i := len(oldnew) - 2; i >= 0; i -= 2 {
+ o := oldnew[i][0]
+ n := oldnew[i+1][0]
+ r[o] = n
+ }
+ return &r
+ }
+
+ r := byteStringReplacer{toReplace: make([]string, 0, len(oldnew)/2)}
+ // The first occurrence of old->new map takes precedence
+ // over the others with the same old string.
+ for i := len(oldnew) - 2; i >= 0; i -= 2 {
+ o := oldnew[i][0]
+ n := oldnew[i+1]
+ // To avoid counting repetitions multiple times.
+ if r.replacements[o] == nil {
+ // We need to use string([]byte{o}) instead of string(o),
+ // to avoid utf8 encoding of o.
+ // E. g. byte(150) produces string of length 2.
+ r.toReplace = append(r.toReplace, string([]byte{o}))
+ }
+ r.replacements[o] = []byte(n)
+
+ }
+ return &r
+}
+
+// Replace returns a copy of s with all replacements performed.
+func (r *Replacer) Replace(s string) string {
+ r.once.Do(r.buildOnce)
+ return r.r.Replace(s)
+}
+
+// WriteString writes s to w with all replacements performed.
+func (r *Replacer) WriteString(w io.Writer, s string) (n int, err error) {
+ r.once.Do(r.buildOnce)
+ return r.r.WriteString(w, s)
+}
+
+// trieNode is a node in a lookup trie for prioritized key/value pairs. Keys
+// and values may be empty. For example, the trie containing keys "ax", "ay",
+// "bcbc", "x" and "xy" could have eight nodes:
+//
+// n0 -
+// n1 a-
+// n2 .x+
+// n3 .y+
+// n4 b-
+// n5 .cbc+
+// n6 x+
+// n7 .y+
+//
+// n0 is the root node, and its children are n1, n4 and n6; n1's children are
+// n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked
+// with a trailing "-") are partial keys, and nodes n2, n3, n5, n6 and n7
+// (marked with a trailing "+") are complete keys.
+type trieNode struct {
+ // value is the value of the trie node's key/value pair. It is empty if
+ // this node is not a complete key.
+ value string
+ // priority is the priority (higher is more important) of the trie node's
+ // key/value pair; keys are not necessarily matched shortest- or longest-
+ // first. Priority is positive if this node is a complete key, and zero
+ // otherwise. In the example above, positive/zero priorities are marked
+ // with a trailing "+" or "-".
+ priority int
+
+ // A trie node may have zero, one or more child nodes:
+ // * if the remaining fields are zero, there are no children.
+ // * if prefix and next are non-zero, there is one child in next.
+ // * if table is non-zero, it defines all the children.
+ //
+ // Prefixes are preferred over tables when there is one child, but the
+ // root node always uses a table for lookup efficiency.
+
+ // prefix is the difference in keys between this trie node and the next.
+ // In the example above, node n4 has prefix "cbc" and n4's next node is n5.
+ // Node n5 has no children and so has zero prefix, next and table fields.
+ prefix string
+ next *trieNode
+
+ // table is a lookup table indexed by the next byte in the key, after
+ // remapping that byte through genericReplacer.mapping to create a dense
+ // index. In the example above, the keys only use 'a', 'b', 'c', 'x' and
+ // 'y', which remap to 0, 1, 2, 3 and 4. All other bytes remap to 5, and
+ // genericReplacer.tableSize will be 5. Node n0's table will be
+ // []*trieNode{ 0:n1, 1:n4, 3:n6 }, where the 0, 1 and 3 are the remapped
+ // 'a', 'b' and 'x'.
+ table []*trieNode
+}
+
+func (t *trieNode) add(key, val string, priority int, r *genericReplacer) {
+ if key == "" {
+ if t.priority == 0 {
+ t.value = val
+ t.priority = priority
+ }
+ return
+ }
+
+ if t.prefix != "" {
+ // Need to split the prefix among multiple nodes.
+ var n int // length of the longest common prefix
+ for ; n < len(t.prefix) && n < len(key); n++ {
+ if t.prefix[n] != key[n] {
+ break
+ }
+ }
+ if n == len(t.prefix) {
+ t.next.add(key[n:], val, priority, r)
+ } else if n == 0 {
+ // First byte differs, start a new lookup table here. Looking up
+ // what is currently t.prefix[0] will lead to prefixNode, and
+ // looking up key[0] will lead to keyNode.
+ var prefixNode *trieNode
+ if len(t.prefix) == 1 {
+ prefixNode = t.next
+ } else {
+ prefixNode = &trieNode{
+ prefix: t.prefix[1:],
+ next: t.next,
+ }
+ }
+ keyNode := new(trieNode)
+ t.table = make([]*trieNode, r.tableSize)
+ t.table[r.mapping[t.prefix[0]]] = prefixNode
+ t.table[r.mapping[key[0]]] = keyNode
+ t.prefix = ""
+ t.next = nil
+ keyNode.add(key[1:], val, priority, r)
+ } else {
+ // Insert new node after the common section of the prefix.
+ next := &trieNode{
+ prefix: t.prefix[n:],
+ next: t.next,
+ }
+ t.prefix = t.prefix[:n]
+ t.next = next
+ next.add(key[n:], val, priority, r)
+ }
+ } else if t.table != nil {
+ // Insert into existing table.
+ m := r.mapping[key[0]]
+ if t.table[m] == nil {
+ t.table[m] = new(trieNode)
+ }
+ t.table[m].add(key[1:], val, priority, r)
+ } else {
+ t.prefix = key
+ t.next = new(trieNode)
+ t.next.add("", val, priority, r)
+ }
+}
+
+func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) {
+ // Iterate down the trie to the end, and grab the value and keylen with
+ // the highest priority.
+ bestPriority := 0
+ node := &r.root
+ n := 0
+ for node != nil {
+ if node.priority > bestPriority && !(ignoreRoot && node == &r.root) {
+ bestPriority = node.priority
+ val = node.value
+ keylen = n
+ found = true
+ }
+
+ if s == "" {
+ break
+ }
+ if node.table != nil {
+ index := r.mapping[s[0]]
+ if int(index) == r.tableSize {
+ break
+ }
+ node = node.table[index]
+ s = s[1:]
+ n++
+ } else if node.prefix != "" && HasPrefix(s, node.prefix) {
+ n += len(node.prefix)
+ s = s[len(node.prefix):]
+ node = node.next
+ } else {
+ break
+ }
+ }
+ return
+}
+
+// genericReplacer is the fully generic algorithm.
+// It's used as a fallback when nothing faster can be used.
+type genericReplacer struct {
+ root trieNode
+ // tableSize is the size of a trie node's lookup table. It is the number
+ // of unique key bytes.
+ tableSize int
+ // mapping maps from key bytes to a dense index for trieNode.table.
+ mapping [256]byte
+}
+
+func makeGenericReplacer(oldnew []string) *genericReplacer {
+ r := new(genericReplacer)
+ // Find each byte used, then assign them each an index.
+ for i := 0; i < len(oldnew); i += 2 {
+ key := oldnew[i]
+ for j := 0; j < len(key); j++ {
+ r.mapping[key[j]] = 1
+ }
+ }
+
+ for _, b := range r.mapping {
+ r.tableSize += int(b)
+ }
+
+ var index byte
+ for i, b := range r.mapping {
+ if b == 0 {
+ r.mapping[i] = byte(r.tableSize)
+ } else {
+ r.mapping[i] = index
+ index++
+ }
+ }
+ // Ensure root node uses a lookup table (for performance).
+ r.root.table = make([]*trieNode, r.tableSize)
+
+ for i := 0; i < len(oldnew); i += 2 {
+ r.root.add(oldnew[i], oldnew[i+1], len(oldnew)-i, r)
+ }
+ return r
+}
+
+type appendSliceWriter []byte
+
+// Write writes to the buffer to satisfy io.Writer.
+func (w *appendSliceWriter) Write(p []byte) (int, error) {
+ *w = append(*w, p...)
+ return len(p), nil
+}
+
+// WriteString writes to the buffer without string->[]byte->string allocations.
+func (w *appendSliceWriter) WriteString(s string) (int, error) {
+ *w = append(*w, s...)
+ return len(s), nil
+}
+
+type stringWriter struct {
+ w io.Writer
+}
+
+func (w stringWriter) WriteString(s string) (int, error) {
+ return w.w.Write([]byte(s))
+}
+
+func getStringWriter(w io.Writer) io.StringWriter {
+ sw, ok := w.(io.StringWriter)
+ if !ok {
+ sw = stringWriter{w}
+ }
+ return sw
+}
+
+func (r *genericReplacer) Replace(s string) string {
+ buf := make(appendSliceWriter, 0, len(s))
+ r.WriteString(&buf, s)
+ return string(buf)
+}
+
+func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) {
+ sw := getStringWriter(w)
+ var last, wn int
+ var prevMatchEmpty bool
+ for i := 0; i <= len(s); {
+ // Fast path: s[i] is not a prefix of any pattern.
+ if i != len(s) && r.root.priority == 0 {
+ index := int(r.mapping[s[i]])
+ if index == r.tableSize || r.root.table[index] == nil {
+ i++
+ continue
+ }
+ }
+
+ // Ignore the empty match iff the previous loop found the empty match.
+ val, keylen, match := r.lookup(s[i:], prevMatchEmpty)
+ prevMatchEmpty = match && keylen == 0
+ if match {
+ wn, err = sw.WriteString(s[last:i])
+ n += wn
+ if err != nil {
+ return
+ }
+ wn, err = sw.WriteString(val)
+ n += wn
+ if err != nil {
+ return
+ }
+ i += keylen
+ last = i
+ continue
+ }
+ i++
+ }
+ if last != len(s) {
+ wn, err = sw.WriteString(s[last:])
+ n += wn
+ }
+ return
+}
+
+// singleStringReplacer is the implementation that's used when there is only
+// one string to replace (and that string has more than one byte).
+type singleStringReplacer struct {
+ finder *stringFinder
+ // value is the new string that replaces that pattern when it's found.
+ value string
+}
+
+func makeSingleStringReplacer(pattern string, value string) *singleStringReplacer {
+ return &singleStringReplacer{finder: makeStringFinder(pattern), value: value}
+}
+
+func (r *singleStringReplacer) Replace(s string) string {
+ var buf Builder
+ i, matched := 0, false
+ for {
+ match := r.finder.next(s[i:])
+ if match == -1 {
+ break
+ }
+ matched = true
+ buf.Grow(match + len(r.value))
+ buf.WriteString(s[i : i+match])
+ buf.WriteString(r.value)
+ i += match + len(r.finder.pattern)
+ }
+ if !matched {
+ return s
+ }
+ buf.WriteString(s[i:])
+ return buf.String()
+}
+
+func (r *singleStringReplacer) WriteString(w io.Writer, s string) (n int, err error) {
+ sw := getStringWriter(w)
+ var i, wn int
+ for {
+ match := r.finder.next(s[i:])
+ if match == -1 {
+ break
+ }
+ wn, err = sw.WriteString(s[i : i+match])
+ n += wn
+ if err != nil {
+ return
+ }
+ wn, err = sw.WriteString(r.value)
+ n += wn
+ if err != nil {
+ return
+ }
+ i += match + len(r.finder.pattern)
+ }
+ wn, err = sw.WriteString(s[i:])
+ n += wn
+ return
+}
+
+// byteReplacer is the implementation that's used when all the "old"
+// and "new" values are single ASCII bytes.
+// The array contains replacement bytes indexed by old byte.
+type byteReplacer [256]byte
+
+func (r *byteReplacer) Replace(s string) string {
+ var buf []byte // lazily allocated
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if r[b] != b {
+ if buf == nil {
+ buf = []byte(s)
+ }
+ buf[i] = r[b]
+ }
+ }
+ if buf == nil {
+ return s
+ }
+ return string(buf)
+}
+
+func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) {
+ // TODO(bradfitz): use io.WriteString with slices of s, avoiding allocation.
+ bufsize := 32 << 10
+ if len(s) < bufsize {
+ bufsize = len(s)
+ }
+ buf := make([]byte, bufsize)
+
+ for len(s) > 0 {
+ ncopy := copy(buf, s)
+ s = s[ncopy:]
+ for i, b := range buf[:ncopy] {
+ buf[i] = r[b]
+ }
+ wn, err := w.Write(buf[:ncopy])
+ n += wn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// byteStringReplacer is the implementation that's used when all the
+// "old" values are single ASCII bytes but the "new" values vary in size.
+type byteStringReplacer struct {
+ // replacements contains replacement byte slices indexed by old byte.
+ // A nil []byte means that the old byte should not be replaced.
+ replacements [256][]byte
+ // toReplace keeps a list of bytes to replace. Depending on length of toReplace
+ // and length of target string it may be faster to use Count, or a plain loop.
+ // We store single byte as a string, because Count takes a string.
+ toReplace []string
+}
+
+// countCutOff controls the ratio of a string length to a number of replacements
+// at which (*byteStringReplacer).Replace switches algorithms.
+// For strings with higher ration of length to replacements than that value,
+// we call Count, for each replacement from toReplace.
+// For strings, with a lower ratio we use simple loop, because of Count overhead.
+// countCutOff is an empirically determined overhead multiplier.
+// TODO(tocarip) revisit once we have register-based abi/mid-stack inlining.
+const countCutOff = 8
+
+func (r *byteStringReplacer) Replace(s string) string {
+ newSize := len(s)
+ anyChanges := false
+ // Is it faster to use Count?
+ if len(r.toReplace)*countCutOff <= len(s) {
+ for _, x := range r.toReplace {
+ if c := Count(s, x); c != 0 {
+ // The -1 is because we are replacing 1 byte with len(replacements[b]) bytes.
+ newSize += c * (len(r.replacements[x[0]]) - 1)
+ anyChanges = true
+ }
+
+ }
+ } else {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if r.replacements[b] != nil {
+ // See above for explanation of -1
+ newSize += len(r.replacements[b]) - 1
+ anyChanges = true
+ }
+ }
+ }
+ if !anyChanges {
+ return s
+ }
+ buf := make([]byte, newSize)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if r.replacements[b] != nil {
+ j += copy(buf[j:], r.replacements[b])
+ } else {
+ buf[j] = b
+ j++
+ }
+ }
+ return string(buf)
+}
+
+func (r *byteStringReplacer) WriteString(w io.Writer, s string) (n int, err error) {
+ sw := getStringWriter(w)
+ last := 0
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if r.replacements[b] == nil {
+ continue
+ }
+ if last != i {
+ nw, err := sw.WriteString(s[last:i])
+ n += nw
+ if err != nil {
+ return n, err
+ }
+ }
+ last = i + 1
+ nw, err := w.Write(r.replacements[b])
+ n += nw
+ if err != nil {
+ return n, err
+ }
+ }
+ if last != len(s) {
+ var nw int
+ nw, err = sw.WriteString(s[last:])
+ n += nw
+ }
+ return
+}
diff --git a/contrib/go/_std_1.18/src/strings/search.go b/contrib/go/_std_1.19/src/strings/search.go
index e5bffbbfe8..e5bffbbfe8 100644
--- a/contrib/go/_std_1.18/src/strings/search.go
+++ b/contrib/go/_std_1.19/src/strings/search.go
diff --git a/contrib/go/_std_1.19/src/strings/strings.go b/contrib/go/_std_1.19/src/strings/strings.go
new file mode 100644
index 0000000000..1dc4238522
--- /dev/null
+++ b/contrib/go/_std_1.19/src/strings/strings.go
@@ -0,0 +1,1192 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package strings implements simple functions to manipulate UTF-8 encoded strings.
+//
+// For information about UTF-8 strings in Go, see https://blog.golang.org/strings.
+package strings
+
+import (
+ "internal/bytealg"
+ "unicode"
+ "unicode/utf8"
+)
+
+// explode splits s into a slice of UTF-8 strings,
+// one string per Unicode character up to a maximum of n (n < 0 means no limit).
+// Invalid UTF-8 sequences become correct encodings of U+FFFD.
+func explode(s string, n int) []string {
+ l := utf8.RuneCountInString(s)
+ if n < 0 || n > l {
+ n = l
+ }
+ a := make([]string, n)
+ for i := 0; i < n-1; i++ {
+ ch, size := utf8.DecodeRuneInString(s)
+ a[i] = s[:size]
+ s = s[size:]
+ if ch == utf8.RuneError {
+ a[i] = string(utf8.RuneError)
+ }
+ }
+ if n > 0 {
+ a[n-1] = s
+ }
+ return a
+}
+
+// Count counts the number of non-overlapping instances of substr in s.
+// If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
+func Count(s, substr string) int {
+ // special case
+ if len(substr) == 0 {
+ return utf8.RuneCountInString(s) + 1
+ }
+ if len(substr) == 1 {
+ return bytealg.CountString(s, substr[0])
+ }
+ n := 0
+ for {
+ i := Index(s, substr)
+ if i == -1 {
+ return n
+ }
+ n++
+ s = s[i+len(substr):]
+ }
+}
+
+// Contains reports whether substr is within s.
+func Contains(s, substr string) bool {
+ return Index(s, substr) >= 0
+}
+
+// ContainsAny reports whether any Unicode code points in chars are within s.
+func ContainsAny(s, chars string) bool {
+ return IndexAny(s, chars) >= 0
+}
+
+// ContainsRune reports whether the Unicode code point r is within s.
+func ContainsRune(s string, r rune) bool {
+ return IndexRune(s, r) >= 0
+}
+
+// LastIndex returns the index of the last instance of substr in s, or -1 if substr is not present in s.
+func LastIndex(s, substr string) int {
+ n := len(substr)
+ switch {
+ case n == 0:
+ return len(s)
+ case n == 1:
+ return LastIndexByte(s, substr[0])
+ case n == len(s):
+ if substr == s {
+ return 0
+ }
+ return -1
+ case n > len(s):
+ return -1
+ }
+ // Rabin-Karp search from the end of the string
+ hashss, pow := bytealg.HashStrRev(substr)
+ last := len(s) - n
+ var h uint32
+ for i := len(s) - 1; i >= last; i-- {
+ h = h*bytealg.PrimeRK + uint32(s[i])
+ }
+ if h == hashss && s[last:] == substr {
+ return last
+ }
+ for i := last - 1; i >= 0; i-- {
+ h *= bytealg.PrimeRK
+ h += uint32(s[i])
+ h -= pow * uint32(s[i+n])
+ if h == hashss && s[i:i+n] == substr {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s.
+func IndexByte(s string, c byte) int {
+ return bytealg.IndexByteString(s, c)
+}
+
+// IndexRune returns the index of the first instance of the Unicode code point
+// r, or -1 if rune is not present in s.
+// If r is utf8.RuneError, it returns the first instance of any
+// invalid UTF-8 byte sequence.
+func IndexRune(s string, r rune) int {
+ switch {
+ case 0 <= r && r < utf8.RuneSelf:
+ return IndexByte(s, byte(r))
+ case r == utf8.RuneError:
+ for i, r := range s {
+ if r == utf8.RuneError {
+ return i
+ }
+ }
+ return -1
+ case !utf8.ValidRune(r):
+ return -1
+ default:
+ return Index(s, string(r))
+ }
+}
+
+// IndexAny returns the index of the first instance of any Unicode code point
+// from chars in s, or -1 if no Unicode code point from chars is present in s.
+func IndexAny(s, chars string) int {
+ if chars == "" {
+ // Avoid scanning all of s.
+ return -1
+ }
+ if len(chars) == 1 {
+ // Avoid scanning all of s.
+ r := rune(chars[0])
+ if r >= utf8.RuneSelf {
+ r = utf8.RuneError
+ }
+ return IndexRune(s, r)
+ }
+ if len(s) > 8 {
+ if as, isASCII := makeASCIISet(chars); isASCII {
+ for i := 0; i < len(s); i++ {
+ if as.contains(s[i]) {
+ return i
+ }
+ }
+ return -1
+ }
+ }
+ for i, c := range s {
+ if IndexRune(chars, c) >= 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// LastIndexAny returns the index of the last instance of any Unicode code
+// point from chars in s, or -1 if no Unicode code point from chars is
+// present in s.
+func LastIndexAny(s, chars string) int {
+ if chars == "" {
+ // Avoid scanning all of s.
+ return -1
+ }
+ if len(s) == 1 {
+ rc := rune(s[0])
+ if rc >= utf8.RuneSelf {
+ rc = utf8.RuneError
+ }
+ if IndexRune(chars, rc) >= 0 {
+ return 0
+ }
+ return -1
+ }
+ if len(s) > 8 {
+ if as, isASCII := makeASCIISet(chars); isASCII {
+ for i := len(s) - 1; i >= 0; i-- {
+ if as.contains(s[i]) {
+ return i
+ }
+ }
+ return -1
+ }
+ }
+ if len(chars) == 1 {
+ rc := rune(chars[0])
+ if rc >= utf8.RuneSelf {
+ rc = utf8.RuneError
+ }
+ for i := len(s); i > 0; {
+ r, size := utf8.DecodeLastRuneInString(s[:i])
+ i -= size
+ if rc == r {
+ return i
+ }
+ }
+ return -1
+ }
+ for i := len(s); i > 0; {
+ r, size := utf8.DecodeLastRuneInString(s[:i])
+ i -= size
+ if IndexRune(chars, r) >= 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
+func LastIndexByte(s string, c byte) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
+
+// Generic split: splits after each instance of sep,
+// including sepSave bytes of sep in the subarrays.
+func genSplit(s, sep string, sepSave, n int) []string {
+ if n == 0 {
+ return nil
+ }
+ if sep == "" {
+ return explode(s, n)
+ }
+ if n < 0 {
+ n = Count(s, sep) + 1
+ }
+
+ if n > len(s)+1 {
+ n = len(s) + 1
+ }
+ a := make([]string, n)
+ n--
+ i := 0
+ for i < n {
+ m := Index(s, sep)
+ if m < 0 {
+ break
+ }
+ a[i] = s[:m+sepSave]
+ s = s[m+len(sep):]
+ i++
+ }
+ a[i] = s
+ return a[:i+1]
+}
+
+// SplitN slices s into substrings separated by sep and returns a slice of
+// the substrings between those separators.
+//
+// The count determines the number of substrings to return:
+//
+// n > 0: at most n substrings; the last substring will be the unsplit remainder.
+// n == 0: the result is nil (zero substrings)
+// n < 0: all substrings
+//
+// Edge cases for s and sep (for example, empty strings) are handled
+// as described in the documentation for Split.
+//
+// To split around the first instance of a separator, see Cut.
+func SplitN(s, sep string, n int) []string { return genSplit(s, sep, 0, n) }
+
+// SplitAfterN slices s into substrings after each instance of sep and
+// returns a slice of those substrings.
+//
+// The count determines the number of substrings to return:
+//
+// n > 0: at most n substrings; the last substring will be the unsplit remainder.
+// n == 0: the result is nil (zero substrings)
+// n < 0: all substrings
+//
+// Edge cases for s and sep (for example, empty strings) are handled
+// as described in the documentation for SplitAfter.
+func SplitAfterN(s, sep string, n int) []string {
+ return genSplit(s, sep, len(sep), n)
+}
+
+// Split slices s into all substrings separated by sep and returns a slice of
+// the substrings between those separators.
+//
+// If s does not contain sep and sep is not empty, Split returns a
+// slice of length 1 whose only element is s.
+//
+// If sep is empty, Split splits after each UTF-8 sequence. If both s
+// and sep are empty, Split returns an empty slice.
+//
+// It is equivalent to SplitN with a count of -1.
+//
+// To split around the first instance of a separator, see Cut.
+func Split(s, sep string) []string { return genSplit(s, sep, 0, -1) }
+
+// SplitAfter slices s into all substrings after each instance of sep and
+// returns a slice of those substrings.
+//
+// If s does not contain sep and sep is not empty, SplitAfter returns
+// a slice of length 1 whose only element is s.
+//
+// If sep is empty, SplitAfter splits after each UTF-8 sequence. If
+// both s and sep are empty, SplitAfter returns an empty slice.
+//
+// It is equivalent to SplitAfterN with a count of -1.
+func SplitAfter(s, sep string) []string {
+ return genSplit(s, sep, len(sep), -1)
+}
+
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
+// Fields splits the string s around each instance of one or more consecutive white space
+// characters, as defined by unicode.IsSpace, returning a slice of substrings of s or an
+// empty slice if s contains only white space.
+func Fields(s string) []string {
+ // First count the fields.
+ // This is an exact count if s is ASCII, otherwise it is an approximation.
+ n := 0
+ wasSpace := 1
+ // setBits is used to track which bits are set in the bytes of s.
+ setBits := uint8(0)
+ for i := 0; i < len(s); i++ {
+ r := s[i]
+ setBits |= r
+ isSpace := int(asciiSpace[r])
+ n += wasSpace & ^isSpace
+ wasSpace = isSpace
+ }
+
+ if setBits >= utf8.RuneSelf {
+ // Some runes in the input string are not ASCII.
+ return FieldsFunc(s, unicode.IsSpace)
+ }
+ // ASCII fast path
+ a := make([]string, n)
+ na := 0
+ fieldStart := 0
+ i := 0
+ // Skip spaces in the front of the input.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ for i < len(s) {
+ if asciiSpace[s[i]] == 0 {
+ i++
+ continue
+ }
+ a[na] = s[fieldStart:i]
+ na++
+ i++
+ // Skip spaces in between fields.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ }
+ if fieldStart < len(s) { // Last field might end at EOF.
+ a[na] = s[fieldStart:]
+ }
+ return a
+}
+
+// FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c)
+// and returns an array of slices of s. If all code points in s satisfy f(c) or the
+// string is empty, an empty slice is returned.
+//
+// FieldsFunc makes no guarantees about the order in which it calls f(c)
+// and assumes that f always returns the same value for a given c.
+func FieldsFunc(s string, f func(rune) bool) []string {
+ // A span is used to record a slice of s of the form s[start:end].
+ // The start index is inclusive and the end index is exclusive.
+ type span struct {
+ start int
+ end int
+ }
+ spans := make([]span, 0, 32)
+
+ // Find the field start and end indices.
+ // Doing this in a separate pass (rather than slicing the string s
+ // and collecting the result substrings right away) is significantly
+ // more efficient, possibly due to cache effects.
+ start := -1 // valid span start if >= 0
+ for end, rune := range s {
+ if f(rune) {
+ if start >= 0 {
+ spans = append(spans, span{start, end})
+ // Set start to a negative value.
+ // Note: using -1 here consistently and reproducibly
+ // slows down this code by a several percent on amd64.
+ start = ^start
+ }
+ } else {
+ if start < 0 {
+ start = end
+ }
+ }
+ }
+
+ // Last field might end at EOF.
+ if start >= 0 {
+ spans = append(spans, span{start, len(s)})
+ }
+
+ // Create strings from recorded field indices.
+ a := make([]string, len(spans))
+ for i, span := range spans {
+ a[i] = s[span.start:span.end]
+ }
+
+ return a
+}
+
+// Join concatenates the elements of its first argument to create a single string. The separator
+// string sep is placed between elements in the resulting string.
+func Join(elems []string, sep string) string {
+ switch len(elems) {
+ case 0:
+ return ""
+ case 1:
+ return elems[0]
+ }
+ n := len(sep) * (len(elems) - 1)
+ for i := 0; i < len(elems); i++ {
+ n += len(elems[i])
+ }
+
+ var b Builder
+ b.Grow(n)
+ b.WriteString(elems[0])
+ for _, s := range elems[1:] {
+ b.WriteString(sep)
+ b.WriteString(s)
+ }
+ return b.String()
+}
+
+// HasPrefix tests whether the string s begins with prefix.
+func HasPrefix(s, prefix string) bool {
+ return len(s) >= len(prefix) && s[0:len(prefix)] == prefix
+}
+
+// HasSuffix tests whether the string s ends with suffix.
+func HasSuffix(s, suffix string) bool {
+ return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
+
+// Map returns a copy of the string s with all its characters modified
+// according to the mapping function. If mapping returns a negative value, the character is
+// dropped from the string with no replacement.
+func Map(mapping func(rune) rune, s string) string {
+ // In the worst case, the string can grow when mapped, making
+ // things unpleasant. But it's so rare we barge in assuming it's
+ // fine. It could also shrink but that falls out naturally.
+
+ // The output buffer b is initialized on demand, the first
+ // time a character differs.
+ var b Builder
+
+ for i, c := range s {
+ r := mapping(c)
+ if r == c && c != utf8.RuneError {
+ continue
+ }
+
+ var width int
+ if c == utf8.RuneError {
+ c, width = utf8.DecodeRuneInString(s[i:])
+ if width != 1 && r == c {
+ continue
+ }
+ } else {
+ width = utf8.RuneLen(c)
+ }
+
+ b.Grow(len(s) + utf8.UTFMax)
+ b.WriteString(s[:i])
+ if r >= 0 {
+ b.WriteRune(r)
+ }
+
+ s = s[i+width:]
+ break
+ }
+
+ // Fast path for unchanged input
+ if b.Cap() == 0 { // didn't call b.Grow above
+ return s
+ }
+
+ for _, c := range s {
+ r := mapping(c)
+
+ if r >= 0 {
+ // common case
+ // Due to inlining, it is more performant to determine if WriteByte should be
+ // invoked rather than always call WriteRune
+ if r < utf8.RuneSelf {
+ b.WriteByte(byte(r))
+ } else {
+ // r is not a ASCII rune.
+ b.WriteRune(r)
+ }
+ }
+ }
+
+ return b.String()
+}
+
+// Repeat returns a new string consisting of count copies of the string s.
+//
+// It panics if count is negative or if
+// the result of (len(s) * count) overflows.
+func Repeat(s string, count int) string {
+ if count == 0 {
+ return ""
+ }
+
+ // Since we cannot return an error on overflow,
+ // we should panic if the repeat will generate
+ // an overflow.
+ // See Issue golang.org/issue/16237
+ if count < 0 {
+ panic("strings: negative Repeat count")
+ } else if len(s)*count/count != len(s) {
+ panic("strings: Repeat count causes overflow")
+ }
+
+ n := len(s) * count
+ var b Builder
+ b.Grow(n)
+ b.WriteString(s)
+ for b.Len() < n {
+ if b.Len() <= n/2 {
+ b.WriteString(b.String())
+ } else {
+ b.WriteString(b.String()[:n-b.Len()])
+ break
+ }
+ }
+ return b.String()
+}
+
+// ToUpper returns s with all Unicode letters mapped to their upper case.
+func ToUpper(s string) string {
+ isASCII, hasLower := true, false
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= utf8.RuneSelf {
+ isASCII = false
+ break
+ }
+ hasLower = hasLower || ('a' <= c && c <= 'z')
+ }
+
+ if isASCII { // optimize for ASCII-only strings.
+ if !hasLower {
+ return s
+ }
+ var b Builder
+ b.Grow(len(s))
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if 'a' <= c && c <= 'z' {
+ c -= 'a' - 'A'
+ }
+ b.WriteByte(c)
+ }
+ return b.String()
+ }
+ return Map(unicode.ToUpper, s)
+}
+
+// ToLower returns s with all Unicode letters mapped to their lower case.
+func ToLower(s string) string {
+ isASCII, hasUpper := true, false
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= utf8.RuneSelf {
+ isASCII = false
+ break
+ }
+ hasUpper = hasUpper || ('A' <= c && c <= 'Z')
+ }
+
+ if isASCII { // optimize for ASCII-only strings.
+ if !hasUpper {
+ return s
+ }
+ var b Builder
+ b.Grow(len(s))
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b.WriteByte(c)
+ }
+ return b.String()
+ }
+ return Map(unicode.ToLower, s)
+}
+
+// ToTitle returns a copy of the string s with all Unicode letters mapped to
+// their Unicode title case.
+func ToTitle(s string) string { return Map(unicode.ToTitle, s) }
+
+// ToUpperSpecial returns a copy of the string s with all Unicode letters mapped to their
+// upper case using the case mapping specified by c.
+func ToUpperSpecial(c unicode.SpecialCase, s string) string {
+ return Map(c.ToUpper, s)
+}
+
+// ToLowerSpecial returns a copy of the string s with all Unicode letters mapped to their
+// lower case using the case mapping specified by c.
+func ToLowerSpecial(c unicode.SpecialCase, s string) string {
+ return Map(c.ToLower, s)
+}
+
+// ToTitleSpecial returns a copy of the string s with all Unicode letters mapped to their
+// Unicode title case, giving priority to the special casing rules.
+func ToTitleSpecial(c unicode.SpecialCase, s string) string {
+ return Map(c.ToTitle, s)
+}
+
+// ToValidUTF8 returns a copy of the string s with each run of invalid UTF-8 byte sequences
+// replaced by the replacement string, which may be empty.
+func ToValidUTF8(s, replacement string) string {
+ var b Builder
+
+ for i, c := range s {
+ if c != utf8.RuneError {
+ continue
+ }
+
+ _, wid := utf8.DecodeRuneInString(s[i:])
+ if wid == 1 {
+ b.Grow(len(s) + len(replacement))
+ b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast path for unchanged input
+ if b.Cap() == 0 { // didn't call b.Grow above
+ return s
+ }
+
+ invalid := false // previous byte was from an invalid UTF-8 sequence
+ for i := 0; i < len(s); {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ i++
+ invalid = false
+ b.WriteByte(c)
+ continue
+ }
+ _, wid := utf8.DecodeRuneInString(s[i:])
+ if wid == 1 {
+ i++
+ if !invalid {
+ invalid = true
+ b.WriteString(replacement)
+ }
+ continue
+ }
+ invalid = false
+ b.WriteString(s[i : i+wid])
+ i += wid
+ }
+
+ return b.String()
+}
+
+// isSeparator reports whether the rune could mark a word boundary.
+// TODO: update when package unicode captures more of the properties.
+func isSeparator(r rune) bool {
+ // ASCII alphanumerics and underscore are not separators
+ if r <= 0x7F {
+ switch {
+ case '0' <= r && r <= '9':
+ return false
+ case 'a' <= r && r <= 'z':
+ return false
+ case 'A' <= r && r <= 'Z':
+ return false
+ case r == '_':
+ return false
+ }
+ return true
+ }
+ // Letters and digits are not separators
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return false
+ }
+ // Otherwise, all we can do for now is treat spaces as separators.
+ return unicode.IsSpace(r)
+}
+
+// Title returns a copy of the string s with all Unicode letters that begin words
+// mapped to their Unicode title case.
+//
+// Deprecated: The rule Title uses for word boundaries does not handle Unicode
+// punctuation properly. Use golang.org/x/text/cases instead.
+func Title(s string) string {
+ // Use a closure here to remember state.
+ // Hackish but effective. Depends on Map scanning in order and calling
+ // the closure once per rune.
+ prev := ' '
+ return Map(
+ func(r rune) rune {
+ if isSeparator(prev) {
+ prev = r
+ return unicode.ToTitle(r)
+ }
+ prev = r
+ return r
+ },
+ s)
+}
+
+// TrimLeftFunc returns a slice of the string s with all leading
+// Unicode code points c satisfying f(c) removed.
+func TrimLeftFunc(s string, f func(rune) bool) string {
+ i := indexFunc(s, f, false)
+ if i == -1 {
+ return ""
+ }
+ return s[i:]
+}
+
+// TrimRightFunc returns a slice of the string s with all trailing
+// Unicode code points c satisfying f(c) removed.
+func TrimRightFunc(s string, f func(rune) bool) string {
+ i := lastIndexFunc(s, f, false)
+ if i >= 0 && s[i] >= utf8.RuneSelf {
+ _, wid := utf8.DecodeRuneInString(s[i:])
+ i += wid
+ } else {
+ i++
+ }
+ return s[0:i]
+}
+
+// TrimFunc returns a slice of the string s with all leading
+// and trailing Unicode code points c satisfying f(c) removed.
+func TrimFunc(s string, f func(rune) bool) string {
+ return TrimRightFunc(TrimLeftFunc(s, f), f)
+}
+
+// IndexFunc returns the index into s of the first Unicode
+// code point satisfying f(c), or -1 if none do.
+func IndexFunc(s string, f func(rune) bool) int {
+ return indexFunc(s, f, true)
+}
+
+// LastIndexFunc returns the index into s of the last
+// Unicode code point satisfying f(c), or -1 if none do.
+func LastIndexFunc(s string, f func(rune) bool) int {
+ return lastIndexFunc(s, f, true)
+}
+
+// indexFunc is the same as IndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func indexFunc(s string, f func(rune) bool, truth bool) int {
+ for i, r := range s {
+ if f(r) == truth {
+ return i
+ }
+ }
+ return -1
+}
+
+// lastIndexFunc is the same as LastIndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func lastIndexFunc(s string, f func(rune) bool, truth bool) int {
+ for i := len(s); i > 0; {
+ r, size := utf8.DecodeLastRuneInString(s[0:i])
+ i -= size
+ if f(r) == truth {
+ return i
+ }
+ }
+ return -1
+}
+
+// asciiSet is a 32-byte value, where each bit represents the presence of a
+// given ASCII character in the set. The 128-bits of the lower 16 bytes,
+// starting with the least-significant bit of the lowest word to the
+// most-significant bit of the highest word, map to the full range of all
+// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
+// ensuring that any non-ASCII character will be reported as not in the set.
+// This allocates a total of 32 bytes even though the upper half
+// is unused to avoid bounds checks in asciiSet.contains.
+type asciiSet [8]uint32
+
+// makeASCIISet creates a set of ASCII characters and reports whether all
+// characters in chars are ASCII.
+func makeASCIISet(chars string) (as asciiSet, ok bool) {
+ for i := 0; i < len(chars); i++ {
+ c := chars[i]
+ if c >= utf8.RuneSelf {
+ return as, false
+ }
+ as[c/32] |= 1 << (c % 32)
+ }
+ return as, true
+}
+
+// contains reports whether c is inside the set.
+func (as *asciiSet) contains(c byte) bool {
+ return (as[c/32] & (1 << (c % 32))) != 0
+}
+
+// Trim returns a slice of the string s with all leading and
+// trailing Unicode code points contained in cutset removed.
+func Trim(s, cutset string) string {
+ if s == "" || cutset == "" {
+ return s
+ }
+ if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+ return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
+ }
+ if as, ok := makeASCIISet(cutset); ok {
+ return trimLeftASCII(trimRightASCII(s, &as), &as)
+ }
+ return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
+}
+
+// TrimLeft returns a slice of the string s with all leading
+// Unicode code points contained in cutset removed.
+//
+// To remove a prefix, use TrimPrefix instead.
+func TrimLeft(s, cutset string) string {
+ if s == "" || cutset == "" {
+ return s
+ }
+ if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+ return trimLeftByte(s, cutset[0])
+ }
+ if as, ok := makeASCIISet(cutset); ok {
+ return trimLeftASCII(s, &as)
+ }
+ return trimLeftUnicode(s, cutset)
+}
+
+func trimLeftByte(s string, c byte) string {
+ for len(s) > 0 && s[0] == c {
+ s = s[1:]
+ }
+ return s
+}
+
+func trimLeftASCII(s string, as *asciiSet) string {
+ for len(s) > 0 {
+ if !as.contains(s[0]) {
+ break
+ }
+ s = s[1:]
+ }
+ return s
+}
+
+func trimLeftUnicode(s, cutset string) string {
+ for len(s) > 0 {
+ r, n := rune(s[0]), 1
+ if r >= utf8.RuneSelf {
+ r, n = utf8.DecodeRuneInString(s)
+ }
+ if !ContainsRune(cutset, r) {
+ break
+ }
+ s = s[n:]
+ }
+ return s
+}
+
+// TrimRight returns a slice of the string s, with all trailing
+// Unicode code points contained in cutset removed.
+//
+// To remove a suffix, use TrimSuffix instead.
+func TrimRight(s, cutset string) string {
+ if s == "" || cutset == "" {
+ return s
+ }
+ if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+ return trimRightByte(s, cutset[0])
+ }
+ if as, ok := makeASCIISet(cutset); ok {
+ return trimRightASCII(s, &as)
+ }
+ return trimRightUnicode(s, cutset)
+}
+
+func trimRightByte(s string, c byte) string {
+ for len(s) > 0 && s[len(s)-1] == c {
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func trimRightASCII(s string, as *asciiSet) string {
+ for len(s) > 0 {
+ if !as.contains(s[len(s)-1]) {
+ break
+ }
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func trimRightUnicode(s, cutset string) string {
+ for len(s) > 0 {
+ r, n := rune(s[len(s)-1]), 1
+ if r >= utf8.RuneSelf {
+ r, n = utf8.DecodeLastRuneInString(s)
+ }
+ if !ContainsRune(cutset, r) {
+ break
+ }
+ s = s[:len(s)-n]
+ }
+ return s
+}
+
+// TrimSpace returns a slice of the string s, with all leading
+// and trailing white space removed, as defined by Unicode.
+func TrimSpace(s string) string {
+ // Fast path for ASCII: look for the first ASCII non-space byte
+ start := 0
+ for ; start < len(s); start++ {
+ c := s[start]
+ if c >= utf8.RuneSelf {
+ // If we run into a non-ASCII byte, fall back to the
+ // slower unicode-aware method on the remaining bytes
+ return TrimFunc(s[start:], unicode.IsSpace)
+ }
+ if asciiSpace[c] == 0 {
+ break
+ }
+ }
+
+ // Now look for the first ASCII non-space byte from the end
+ stop := len(s)
+ for ; stop > start; stop-- {
+ c := s[stop-1]
+ if c >= utf8.RuneSelf {
+ // start has been already trimmed above, should trim end only
+ return TrimRightFunc(s[start:stop], unicode.IsSpace)
+ }
+ if asciiSpace[c] == 0 {
+ break
+ }
+ }
+
+ // At this point s[start:stop] starts and ends with an ASCII
+ // non-space bytes, so we're done. Non-ASCII cases have already
+ // been handled above.
+ return s[start:stop]
+}
+
+// TrimPrefix returns s without the provided leading prefix string.
+// If s doesn't start with prefix, s is returned unchanged.
+func TrimPrefix(s, prefix string) string {
+ if HasPrefix(s, prefix) {
+ return s[len(prefix):]
+ }
+ return s
+}
+
+// TrimSuffix returns s without the provided trailing suffix string.
+// If s doesn't end with suffix, s is returned unchanged.
+func TrimSuffix(s, suffix string) string {
+ if HasSuffix(s, suffix) {
+ return s[:len(s)-len(suffix)]
+ }
+ return s
+}
+
+// Replace returns a copy of the string s with the first n
+// non-overlapping instances of old replaced by new.
+// If old is empty, it matches at the beginning of the string
+// and after each UTF-8 sequence, yielding up to k+1 replacements
+// for a k-rune string.
+// If n < 0, there is no limit on the number of replacements.
+func Replace(s, old, new string, n int) string {
+ if old == new || n == 0 {
+ return s // avoid allocation
+ }
+
+ // Compute number of replacements.
+ if m := Count(s, old); m == 0 {
+ return s // avoid allocation
+ } else if n < 0 || m < n {
+ n = m
+ }
+
+ // Apply replacements to buffer.
+ var b Builder
+ b.Grow(len(s) + n*(len(new)-len(old)))
+ start := 0
+ for i := 0; i < n; i++ {
+ j := start
+ if len(old) == 0 {
+ if i > 0 {
+ _, wid := utf8.DecodeRuneInString(s[start:])
+ j += wid
+ }
+ } else {
+ j += Index(s[start:], old)
+ }
+ b.WriteString(s[start:j])
+ b.WriteString(new)
+ start = j + len(old)
+ }
+ b.WriteString(s[start:])
+ return b.String()
+}
+
+// ReplaceAll returns a copy of the string s with all
+// non-overlapping instances of old replaced by new.
+// If old is empty, it matches at the beginning of the string
+// and after each UTF-8 sequence, yielding up to k+1 replacements
+// for a k-rune string.
+func ReplaceAll(s, old, new string) string {
+ return Replace(s, old, new, -1)
+}
+
+// EqualFold reports whether s and t, interpreted as UTF-8 strings,
+// are equal under simple Unicode case-folding, which is a more general
+// form of case-insensitivity.
+func EqualFold(s, t string) bool {
+ for s != "" && t != "" {
+ // Extract first rune from each string.
+ var sr, tr rune
+ if s[0] < utf8.RuneSelf {
+ sr, s = rune(s[0]), s[1:]
+ } else {
+ r, size := utf8.DecodeRuneInString(s)
+ sr, s = r, s[size:]
+ }
+ if t[0] < utf8.RuneSelf {
+ tr, t = rune(t[0]), t[1:]
+ } else {
+ r, size := utf8.DecodeRuneInString(t)
+ tr, t = r, t[size:]
+ }
+
+ // If they match, keep going; if not, return false.
+
+ // Easy case.
+ if tr == sr {
+ continue
+ }
+
+ // Make sr < tr to simplify what follows.
+ if tr < sr {
+ tr, sr = sr, tr
+ }
+ // Fast check for ASCII.
+ if tr < utf8.RuneSelf {
+ // ASCII only, sr/tr must be upper/lower case
+ if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
+ continue
+ }
+ return false
+ }
+
+ // General case. SimpleFold(x) returns the next equivalent rune > x
+ // or wraps around to smaller values.
+ r := unicode.SimpleFold(sr)
+ for r != sr && r < tr {
+ r = unicode.SimpleFold(r)
+ }
+ if r == tr {
+ continue
+ }
+ return false
+ }
+
+ // One string is empty. Are both?
+ return s == t
+}
+
+// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
+func Index(s, substr string) int {
+ n := len(substr)
+ switch {
+ case n == 0:
+ return 0
+ case n == 1:
+ return IndexByte(s, substr[0])
+ case n == len(s):
+ if substr == s {
+ return 0
+ }
+ return -1
+ case n > len(s):
+ return -1
+ case n <= bytealg.MaxLen:
+ // Use brute force when s and substr both are small
+ if len(s) <= bytealg.MaxBruteForce {
+ return bytealg.IndexString(s, substr)
+ }
+ c0 := substr[0]
+ c1 := substr[1]
+ i := 0
+ t := len(s) - n + 1
+ fails := 0
+ for i < t {
+ if s[i] != c0 {
+ // IndexByte is faster than bytealg.IndexString, so use it as long as
+ // we're not getting lots of false positives.
+ o := IndexByte(s[i+1:t], c0)
+ if o < 0 {
+ return -1
+ }
+ i += o + 1
+ }
+ if s[i+1] == c1 && s[i:i+n] == substr {
+ return i
+ }
+ fails++
+ i++
+ // Switch to bytealg.IndexString when IndexByte produces too many false positives.
+ if fails > bytealg.Cutover(i) {
+ r := bytealg.IndexString(s[i:], substr)
+ if r >= 0 {
+ return r + i
+ }
+ return -1
+ }
+ }
+ return -1
+ }
+ c0 := substr[0]
+ c1 := substr[1]
+ i := 0
+ t := len(s) - n + 1
+ fails := 0
+ for i < t {
+ if s[i] != c0 {
+ o := IndexByte(s[i+1:t], c0)
+ if o < 0 {
+ return -1
+ }
+ i += o + 1
+ }
+ if s[i+1] == c1 && s[i:i+n] == substr {
+ return i
+ }
+ i++
+ fails++
+ if fails >= 4+i>>4 && i < t {
+ // See comment in ../bytes/bytes.go.
+ j := bytealg.IndexRabinKarp(s[i:], substr)
+ if j < 0 {
+ return -1
+ }
+ return i + j
+ }
+ }
+ return -1
+}
+
+// Cut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, "", false.
+func Cut(s, sep string) (before, after string, found bool) {
+ if i := Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
diff --git a/contrib/go/_std_1.18/src/sync/atomic/asm.s b/contrib/go/_std_1.19/src/sync/atomic/asm.s
index 2022304665..2022304665 100644
--- a/contrib/go/_std_1.18/src/sync/atomic/asm.s
+++ b/contrib/go/_std_1.19/src/sync/atomic/asm.s
diff --git a/contrib/go/_std_1.19/src/sync/atomic/doc.go b/contrib/go/_std_1.19/src/sync/atomic/doc.go
new file mode 100644
index 0000000000..7977d13168
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/atomic/doc.go
@@ -0,0 +1,153 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atomic provides low-level atomic memory primitives
+// useful for implementing synchronization algorithms.
+//
+// These functions require great care to be used correctly.
+// Except for special, low-level applications, synchronization is better
+// done with channels or the facilities of the sync package.
+// Share memory by communicating;
+// don't communicate by sharing memory.
+//
+// The swap operation, implemented by the SwapT functions, is the atomic
+// equivalent of:
+//
+// old = *addr
+// *addr = new
+// return old
+//
+// The compare-and-swap operation, implemented by the CompareAndSwapT
+// functions, is the atomic equivalent of:
+//
+// if *addr == old {
+// *addr = new
+// return true
+// }
+// return false
+//
+// The add operation, implemented by the AddT functions, is the atomic
+// equivalent of:
+//
+// *addr += delta
+// return *addr
+//
+// The load and store operations, implemented by the LoadT and StoreT
+// functions, are the atomic equivalents of "return *addr" and
+// "*addr = val".
+//
+// In the terminology of the Go memory model, if the effect of
+// an atomic operation A is observed by atomic operation B,
+// then A “synchronizes before” B.
+// Additionally, all the atomic operations executed in a program
+// behave as though executed in some sequentially consistent order.
+// This definition provides the same semantics as
+// C++'s sequentially consistent atomics and Java's volatile variables.
+package atomic
+
+import (
+ "unsafe"
+)
+
+// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
+//
+// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
+//
+// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
+// for 64-bit alignment of 64-bit words accessed atomically via the primitive
+// atomic functions (types Int64 and Uint64 are automatically aligned).
+// The first word in an allocated struct, array, or slice; in a global
+// variable; or in a local variable (because the subject of all atomic operations
+// will escape to the heap) can be relied upon to be 64-bit aligned.
+
+// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
+func SwapInt32(addr *int32, new int32) (old int32)
+
+// SwapInt64 atomically stores new into *addr and returns the previous *addr value.
+func SwapInt64(addr *int64, new int64) (old int64)
+
+// SwapUint32 atomically stores new into *addr and returns the previous *addr value.
+func SwapUint32(addr *uint32, new uint32) (old uint32)
+
+// SwapUint64 atomically stores new into *addr and returns the previous *addr value.
+func SwapUint64(addr *uint64, new uint64) (old uint64)
+
+// SwapUintptr atomically stores new into *addr and returns the previous *addr value.
+func SwapUintptr(addr *uintptr, new uintptr) (old uintptr)
+
+// SwapPointer atomically stores new into *addr and returns the previous *addr value.
+func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)
+
+// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
+func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
+
+// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
+func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
+
+// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
+
+// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
+func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
+
+// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
+func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool)
+
+// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
+func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
+
+// AddInt32 atomically adds delta to *addr and returns the new value.
+func AddInt32(addr *int32, delta int32) (new int32)
+
+// AddUint32 atomically adds delta to *addr and returns the new value.
+// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)).
+// In particular, to decrement x, do AddUint32(&x, ^uint32(0)).
+func AddUint32(addr *uint32, delta uint32) (new uint32)
+
+// AddInt64 atomically adds delta to *addr and returns the new value.
+func AddInt64(addr *int64, delta int64) (new int64)
+
+// AddUint64 atomically adds delta to *addr and returns the new value.
+// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)).
+// In particular, to decrement x, do AddUint64(&x, ^uint64(0)).
+func AddUint64(addr *uint64, delta uint64) (new uint64)
+
+// AddUintptr atomically adds delta to *addr and returns the new value.
+func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+
+// LoadInt32 atomically loads *addr.
+func LoadInt32(addr *int32) (val int32)
+
+// LoadInt64 atomically loads *addr.
+func LoadInt64(addr *int64) (val int64)
+
+// LoadUint32 atomically loads *addr.
+func LoadUint32(addr *uint32) (val uint32)
+
+// LoadUint64 atomically loads *addr.
+func LoadUint64(addr *uint64) (val uint64)
+
+// LoadUintptr atomically loads *addr.
+func LoadUintptr(addr *uintptr) (val uintptr)
+
+// LoadPointer atomically loads *addr.
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+// StoreInt32 atomically stores val into *addr.
+func StoreInt32(addr *int32, val int32)
+
+// StoreInt64 atomically stores val into *addr.
+func StoreInt64(addr *int64, val int64)
+
+// StoreUint32 atomically stores val into *addr.
+func StoreUint32(addr *uint32, val uint32)
+
+// StoreUint64 atomically stores val into *addr.
+func StoreUint64(addr *uint64, val uint64)
+
+// StoreUintptr atomically stores val into *addr.
+func StoreUintptr(addr *uintptr, val uintptr)
+
+// StorePointer atomically stores val into *addr.
+func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
diff --git a/contrib/go/_std_1.19/src/sync/atomic/type.go b/contrib/go/_std_1.19/src/sync/atomic/type.go
new file mode 100644
index 0000000000..f7b8f5a3b7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/atomic/type.go
@@ -0,0 +1,191 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+// A Bool is an atomic boolean value.
+// The zero value is false.
+type Bool struct {
+ _ noCopy
+ v uint32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Bool) Load() bool { return LoadUint32(&x.v) != 0 }
+
+// Store atomically stores val into x.
+func (x *Bool) Store(val bool) { StoreUint32(&x.v, b32(val)) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Bool) Swap(new bool) (old bool) { return SwapUint32(&x.v, b32(new)) != 0 }
+
+// CompareAndSwap executes the compare-and-swap operation for the boolean value x.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return CompareAndSwapUint32(&x.v, b32(old), b32(new))
+}
+
+// b32 returns a uint32 0 or 1 representing b.
+func b32(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// A Pointer is an atomic pointer of type *T. The zero value is a nil *T.
+type Pointer[T any] struct {
+ _ noCopy
+ v unsafe.Pointer
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Pointer[T]) Load() *T { return (*T)(LoadPointer(&x.v)) }
+
+// Store atomically stores val into x.
+func (x *Pointer[T]) Store(val *T) { StorePointer(&x.v, unsafe.Pointer(val)) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Pointer[T]) Swap(new *T) (old *T) { return (*T)(SwapPointer(&x.v, unsafe.Pointer(new))) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return CompareAndSwapPointer(&x.v, unsafe.Pointer(old), unsafe.Pointer(new))
+}
+
+// An Int32 is an atomic int32. The zero value is zero.
+type Int32 struct {
+ _ noCopy
+ v int32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Int32) Load() int32 { return LoadInt32(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Int32) Store(val int32) { StoreInt32(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Int32) Swap(new int32) (old int32) { return SwapInt32(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Int32) CompareAndSwap(old, new int32) (swapped bool) {
+ return CompareAndSwapInt32(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) }
+
+// An Int64 is an atomic int64. The zero value is zero.
+type Int64 struct {
+ _ noCopy
+ _ align64
+ v int64
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Int64) Load() int64 { return LoadInt64(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Int64) Store(val int64) { StoreInt64(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Int64) Swap(new int64) (old int64) { return SwapInt64(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Int64) CompareAndSwap(old, new int64) (swapped bool) {
+ return CompareAndSwapInt64(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Int64) Add(delta int64) (new int64) { return AddInt64(&x.v, delta) }
+
+// An Uint32 is an atomic uint32. The zero value is zero.
+type Uint32 struct {
+ _ noCopy
+ v uint32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uint32) Load() uint32 { return LoadUint32(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uint32) Store(val uint32) { StoreUint32(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uint32) Swap(new uint32) (old uint32) { return SwapUint32(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
+ return CompareAndSwapUint32(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) }
+
+// An Uint64 is an atomic uint64. The zero value is zero.
+type Uint64 struct {
+ _ noCopy
+ _ align64
+ v uint64
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uint64) Load() uint64 { return LoadUint64(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uint64) Store(val uint64) { StoreUint64(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uint64) Swap(new uint64) (old uint64) { return SwapUint64(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
+ return CompareAndSwapUint64(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) }
+
+// An Uintptr is an atomic uintptr. The zero value is zero.
+type Uintptr struct {
+ _ noCopy
+ v uintptr
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uintptr) Load() uintptr { return LoadUintptr(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uintptr) Store(val uintptr) { StoreUintptr(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uintptr) Swap(new uintptr) (old uintptr) { return SwapUintptr(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return CompareAndSwapUintptr(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, delta) }
+
+// noCopy may be added to structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
+// align64 may be added to structs that must be 64-bit aligned.
+// This struct is recognized by a special case in the compiler
+// and will not work if copied to any other package.
+type align64 struct{}
diff --git a/contrib/go/_std_1.18/src/sync/atomic/value.go b/contrib/go/_std_1.19/src/sync/atomic/value.go
index 88315f2d88..88315f2d88 100644
--- a/contrib/go/_std_1.18/src/sync/atomic/value.go
+++ b/contrib/go/_std_1.19/src/sync/atomic/value.go
diff --git a/contrib/go/_std_1.19/src/sync/cond.go b/contrib/go/_std_1.19/src/sync/cond.go
new file mode 100644
index 0000000000..cbf5ba6071
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/cond.go
@@ -0,0 +1,117 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// Cond implements a condition variable, a rendezvous point
+// for goroutines waiting for or announcing the occurrence
+// of an event.
+//
+// Each Cond has an associated Locker L (often a *Mutex or *RWMutex),
+// which must be held when changing the condition and
+// when calling the Wait method.
+//
+// A Cond must not be copied after first use.
+//
+// In the terminology of the Go memory model, Cond arranges that
+// a call to Broadcast or Signal “synchronizes before” any Wait call
+// that it unblocks.
+//
+// For many simple use cases, users will be better off using channels than a
+// Cond (Broadcast corresponds to closing a channel, and Signal corresponds to
+// sending on a channel).
+//
+// For more on replacements for sync.Cond, see [Roberto Clapis's series on
+// advanced concurrency patterns], as well as [Bryan Mills's talk on concurrency
+// patterns].
+//
+// [Roberto Clapis's series on advanced concurrency patterns]: https://blogtitle.github.io/categories/concurrency/
+// [Bryan Mills's talk on concurrency patterns]: https://drive.google.com/file/d/1nPdvhB0PutEJzdCq5ms6UI58dp50fcAN/view
+type Cond struct {
+ noCopy noCopy
+
+ // L is held while observing or changing the condition
+ L Locker
+
+ notify notifyList
+ checker copyChecker
+}
+
+// NewCond returns a new Cond with Locker l.
+func NewCond(l Locker) *Cond {
+ return &Cond{L: l}
+}
+
+// Wait atomically unlocks c.L and suspends execution
+// of the calling goroutine. After later resuming execution,
+// Wait locks c.L before returning. Unlike in other systems,
+// Wait cannot return unless awoken by Broadcast or Signal.
+//
+// Because c.L is not locked when Wait first resumes, the caller
+// typically cannot assume that the condition is true when
+// Wait returns. Instead, the caller should Wait in a loop:
+//
+// c.L.Lock()
+// for !condition() {
+// c.Wait()
+// }
+// ... make use of condition ...
+// c.L.Unlock()
+func (c *Cond) Wait() {
+ c.checker.check()
+ t := runtime_notifyListAdd(&c.notify)
+ c.L.Unlock()
+ runtime_notifyListWait(&c.notify, t)
+ c.L.Lock()
+}
+
+// Signal wakes one goroutine waiting on c, if there is any.
+//
+// It is allowed but not required for the caller to hold c.L
+// during the call.
+//
+// Signal() does not affect goroutine scheduling priority; if other goroutines
+// are attempting to lock c.L, they may be awoken before a "waiting" goroutine.
+func (c *Cond) Signal() {
+ c.checker.check()
+ runtime_notifyListNotifyOne(&c.notify)
+}
+
+// Broadcast wakes all goroutines waiting on c.
+//
+// It is allowed but not required for the caller to hold c.L
+// during the call.
+func (c *Cond) Broadcast() {
+ c.checker.check()
+ runtime_notifyListNotifyAll(&c.notify)
+}
+
+// copyChecker holds back pointer to itself to detect object copying.
+type copyChecker uintptr
+
+func (c *copyChecker) check() {
+ if uintptr(*c) != uintptr(unsafe.Pointer(c)) &&
+ !atomic.CompareAndSwapUintptr((*uintptr)(c), 0, uintptr(unsafe.Pointer(c))) &&
+ uintptr(*c) != uintptr(unsafe.Pointer(c)) {
+ panic("sync.Cond is copied")
+ }
+}
+
+// noCopy may be added to structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
diff --git a/contrib/go/_std_1.19/src/sync/map.go b/contrib/go/_std_1.19/src/sync/map.go
new file mode 100644
index 0000000000..ec529e056b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/map.go
@@ -0,0 +1,393 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
+// by multiple goroutines without additional locking or coordination.
+// Loads, stores, and deletes run in amortized constant time.
+//
+// The Map type is specialized. Most code should use a plain Go map instead,
+// with separate locking or coordination, for better type safety and to make it
+// easier to maintain other invariants along with the map content.
+//
+// The Map type is optimized for two common use cases: (1) when the entry for a given
+// key is only ever written once but read many times, as in caches that only grow,
+// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
+// sets of keys. In these two cases, use of a Map may significantly reduce lock
+// contention compared to a Go map paired with a separate Mutex or RWMutex.
+//
+// The zero Map is empty and ready for use. A Map must not be copied after first use.
+//
+// In the terminology of the Go memory model, Map arranges that a write operation
+// “synchronizes before” any read operation that observes the effect of the write, where
+// read and write operations are defined as follows.
+// Load, LoadAndDelete, LoadOrStore are read operations;
+// Delete, LoadAndDelete, and Store are write operations;
+// and LoadOrStore is a write operation when it returns loaded set to false.
+type Map struct {
+ mu Mutex
+
+ // read contains the portion of the map's contents that are safe for
+ // concurrent access (with or without mu held).
+ //
+ // The read field itself is always safe to load, but must only be stored with
+ // mu held.
+ //
+ // Entries stored in read may be updated concurrently without mu, but updating
+ // a previously-expunged entry requires that the entry be copied to the dirty
+ // map and unexpunged with mu held.
+ read atomic.Value // readOnly
+
+ // dirty contains the portion of the map's contents that require mu to be
+ // held. To ensure that the dirty map can be promoted to the read map quickly,
+ // it also includes all of the non-expunged entries in the read map.
+ //
+ // Expunged entries are not stored in the dirty map. An expunged entry in the
+ // clean map must be unexpunged and added to the dirty map before a new value
+ // can be stored to it.
+ //
+ // If the dirty map is nil, the next write to the map will initialize it by
+ // making a shallow copy of the clean map, omitting stale entries.
+ dirty map[any]*entry
+
+ // misses counts the number of loads since the read map was last updated that
+ // needed to lock mu to determine whether the key was present.
+ //
+ // Once enough misses have occurred to cover the cost of copying the dirty
+ // map, the dirty map will be promoted to the read map (in the unamended
+ // state) and the next store to the map will make a new dirty copy.
+ misses int
+}
+
+// readOnly is an immutable struct stored atomically in the Map.read field.
+type readOnly struct {
+ m map[any]*entry
+ amended bool // true if the dirty map contains some key not in m.
+}
+
+// expunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var expunged = unsafe.Pointer(new(any))
+
+// An entry is a slot in the map corresponding to a particular key.
+type entry struct {
+ // p points to the interface{} value stored for the entry.
+ //
+ // If p == nil, the entry has been deleted, and either m.dirty == nil or
+ // m.dirty[key] is e.
+ //
+ // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
+ // is missing from m.dirty.
+ //
+ // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
+ // != nil, in m.dirty[key].
+ //
+ // An entry can be deleted by atomic replacement with nil: when m.dirty is
+ // next created, it will atomically replace nil with expunged and leave
+ // m.dirty[key] unset.
+ //
+ // An entry's associated value can be updated by atomic replacement, provided
+ // p != expunged. If p == expunged, an entry's associated value can be updated
+ // only after first setting m.dirty[key] = e so that lookups using the dirty
+ // map find the entry.
+ p unsafe.Pointer // *interface{}
+}
+
+func newEntry(i any) *entry {
+ return &entry{p: unsafe.Pointer(&i)}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *Map) Load(key any) (value any, ok bool) {
+ read, _ := m.read.Load().(readOnly)
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ // Avoid reporting a spurious miss if m.dirty got promoted while we were
+ // blocked on m.mu. (If further loads of the same key will not miss, it's
+ // not worth copying the dirty map for this key.)
+ read, _ = m.read.Load().(readOnly)
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if !ok {
+ return nil, false
+ }
+ return e.load()
+}
+
+func (e *entry) load() (value any, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return nil, false
+ }
+ return *(*any)(p), true
+}
+
+// Store sets the value for a key.
+func (m *Map) Store(key, value any) {
+ read, _ := m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok && e.tryStore(&value) {
+ return
+ }
+
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ // The entry was previously expunged, which implies that there is a
+ // non-nil dirty map and this entry is not in it.
+ m.dirty[key] = e
+ }
+ e.storeLocked(&value)
+ } else if e, ok := m.dirty[key]; ok {
+ e.storeLocked(&value)
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ }
+ m.mu.Unlock()
+}
+
+// tryStore stores a value if the entry has not been expunged.
+//
+// If the entry is expunged, tryStore returns false and leaves the entry
+// unchanged.
+func (e *entry) tryStore(i *any) bool {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
+ return true
+ }
+ }
+}
+
+// unexpungeLocked ensures that the entry is not marked as expunged.
+//
+// If the entry was previously expunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *entry) unexpungeLocked() (wasExpunged bool) {
+ return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
+}
+
+// storeLocked unconditionally stores a value to the entry.
+//
+// The entry must be known not to be expunged.
+func (e *entry) storeLocked(i *any) {
+ atomic.StorePointer(&e.p, unsafe.Pointer(i))
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
+ // Avoid locking if it's a clean hit.
+ read, _ := m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ actual, loaded, ok := e.tryLoadOrStore(value)
+ if ok {
+ return actual, loaded
+ }
+ }
+
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ m.dirty[key] = e
+ }
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ } else if e, ok := m.dirty[key]; ok {
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ m.missLocked()
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ actual, loaded = value, false
+ }
+ m.mu.Unlock()
+
+ return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the entry is not
+// expunged.
+//
+// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
+// returns with ok==false.
+func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *(*any)(p), true, true
+ }
+
+ // Copy the interface after the first load to make this method more amenable
+ // to escape analysis: if we hit the "load" path or the entry is expunged, we
+ // shouldn't bother heap-allocating.
+ ic := i
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
+ return i, false, true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *(*any)(p), true, true
+ }
+ }
+}
+
+// LoadAndDelete deletes the value for a key, returning the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
+ read, _ := m.read.Load().(readOnly)
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ delete(m.dirty, key)
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if ok {
+ return e.delete()
+ }
+ return nil, false
+}
+
+// Delete deletes the value for a key.
+func (m *Map) Delete(key any) {
+ m.LoadAndDelete(key)
+}
+
+func (e *entry) delete() (value any, ok bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return nil, false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+ return *(*any)(p), true
+ }
+ }
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the Map's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently (including by f), Range may reflect any
+// mapping for that key from any point during the Range call. Range does not
+// block other methods on the receiver; even f itself may call any method on m.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *Map) Range(f func(key, value any) bool) {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read, _ := m.read.Load().(readOnly)
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if read.amended {
+ read = readOnly{m: m.dirty}
+ m.read.Store(read)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for k, e := range read.m {
+ v, ok := e.load()
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *Map) missLocked() {
+ m.misses++
+ if m.misses < len(m.dirty) {
+ return
+ }
+ m.read.Store(readOnly{m: m.dirty})
+ m.dirty = nil
+ m.misses = 0
+}
+
+func (m *Map) dirtyLocked() {
+ if m.dirty != nil {
+ return
+ }
+
+ read, _ := m.read.Load().(readOnly)
+ m.dirty = make(map[any]*entry, len(read.m))
+ for k, e := range read.m {
+ if !e.tryExpungeLocked() {
+ m.dirty[k] = e
+ }
+ }
+}
+
+func (e *entry) tryExpungeLocked() (isExpunged bool) {
+ p := atomic.LoadPointer(&e.p)
+ for p == nil {
+ if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ }
+ return p == expunged
+}
diff --git a/contrib/go/_std_1.19/src/sync/mutex.go b/contrib/go/_std_1.19/src/sync/mutex.go
new file mode 100644
index 0000000000..2ea024e585
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/mutex.go
@@ -0,0 +1,259 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sync provides basic synchronization primitives such as mutual
+// exclusion locks. Other than the Once and WaitGroup types, most are intended
+// for use by low-level library routines. Higher-level synchronization is
+// better done via channels and communication.
+//
+// Values containing the types defined in this package should not be copied.
+package sync
+
+import (
+ "internal/race"
+ "sync/atomic"
+ "unsafe"
+)
+
+// Provided by runtime via linkname.
+func throw(string)
+func fatal(string)
+
+// A Mutex is a mutual exclusion lock.
+// The zero value for a Mutex is an unlocked mutex.
+//
+// A Mutex must not be copied after first use.
+//
+// In the terminology of the Go memory model,
+// the n'th call to Unlock “synchronizes before” the m'th call to Lock
+// for any n < m.
+// A successful call to TryLock is equivalent to a call to Lock.
+// A failed call to TryLock does not establish any “synchronizes before”
+// relation at all.
+type Mutex struct {
+ state int32
+ sema uint32
+}
+
+// A Locker represents an object that can be locked and unlocked.
+type Locker interface {
+ Lock()
+ Unlock()
+}
+
+const (
+ mutexLocked = 1 << iota // mutex is locked
+ mutexWoken
+ mutexStarving
+ mutexWaiterShift = iota
+
+ // Mutex fairness.
+ //
+ // Mutex can be in 2 modes of operations: normal and starvation.
+ // In normal mode waiters are queued in FIFO order, but a woken up waiter
+ // does not own the mutex and competes with new arriving goroutines over
+ // the ownership. New arriving goroutines have an advantage -- they are
+ // already running on CPU and there can be lots of them, so a woken up
+ // waiter has good chances of losing. In such case it is queued at front
+ // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
+ // it switches mutex to the starvation mode.
+ //
+ // In starvation mode ownership of the mutex is directly handed off from
+ // the unlocking goroutine to the waiter at the front of the queue.
+ // New arriving goroutines don't try to acquire the mutex even if it appears
+ // to be unlocked, and don't try to spin. Instead they queue themselves at
+ // the tail of the wait queue.
+ //
+ // If a waiter receives ownership of the mutex and sees that either
+ // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
+ // it switches mutex back to normal operation mode.
+ //
+ // Normal mode has considerably better performance as a goroutine can acquire
+ // a mutex several times in a row even if there are blocked waiters.
+ // Starvation mode is important to prevent pathological cases of tail latency.
+ starvationThresholdNs = 1e6
+)
+
+// Lock locks m.
+// If the lock is already in use, the calling goroutine
+// blocks until the mutex is available.
+func (m *Mutex) Lock() {
+ // Fast path: grab unlocked mutex.
+ if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
+ }
+ return
+ }
+ // Slow path (outlined so that the fast path can be inlined)
+ m.lockSlow()
+}
+
+// TryLock tries to lock m and reports whether it succeeded.
+//
+// Note that while correct uses of TryLock do exist, they are rare,
+// and use of TryLock is often a sign of a deeper problem
+// in a particular use of mutexes.
+func (m *Mutex) TryLock() bool {
+ old := m.state
+ if old&(mutexLocked|mutexStarving) != 0 {
+ return false
+ }
+
+ // There may be a goroutine waiting for the mutex, but we are
+ // running now and can try to grab the mutex before that
+ // goroutine wakes up.
+ if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
+ return false
+ }
+
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
+ }
+ return true
+}
+
+func (m *Mutex) lockSlow() {
+ var waitStartTime int64
+ starving := false
+ awoke := false
+ iter := 0
+ old := m.state
+ for {
+ // Don't spin in starvation mode, ownership is handed off to waiters
+ // so we won't be able to acquire the mutex anyway.
+ if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
+ // Active spinning makes sense.
+ // Try to set mutexWoken flag to inform Unlock
+ // to not wake other blocked goroutines.
+ if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
+ atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
+ awoke = true
+ }
+ runtime_doSpin()
+ iter++
+ old = m.state
+ continue
+ }
+ new := old
+ // Don't try to acquire starving mutex, new arriving goroutines must queue.
+ if old&mutexStarving == 0 {
+ new |= mutexLocked
+ }
+ if old&(mutexLocked|mutexStarving) != 0 {
+ new += 1 << mutexWaiterShift
+ }
+ // The current goroutine switches mutex to starvation mode.
+ // But if the mutex is currently unlocked, don't do the switch.
+ // Unlock expects that starving mutex has waiters, which will not
+ // be true in this case.
+ if starving && old&mutexLocked != 0 {
+ new |= mutexStarving
+ }
+ if awoke {
+ // The goroutine has been woken from sleep,
+ // so we need to reset the flag in either case.
+ if new&mutexWoken == 0 {
+ throw("sync: inconsistent mutex state")
+ }
+ new &^= mutexWoken
+ }
+ if atomic.CompareAndSwapInt32(&m.state, old, new) {
+ if old&(mutexLocked|mutexStarving) == 0 {
+ break // locked the mutex with CAS
+ }
+ // If we were already waiting before, queue at the front of the queue.
+ queueLifo := waitStartTime != 0
+ if waitStartTime == 0 {
+ waitStartTime = runtime_nanotime()
+ }
+ runtime_SemacquireMutex(&m.sema, queueLifo, 1)
+ starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
+ old = m.state
+ if old&mutexStarving != 0 {
+ // If this goroutine was woken and mutex is in starvation mode,
+ // ownership was handed off to us but mutex is in somewhat
+ // inconsistent state: mutexLocked is not set and we are still
+ // accounted as waiter. Fix that.
+ if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
+ throw("sync: inconsistent mutex state")
+ }
+ delta := int32(mutexLocked - 1<<mutexWaiterShift)
+ if !starving || old>>mutexWaiterShift == 1 {
+ // Exit starvation mode.
+ // Critical to do it here and consider wait time.
+ // Starvation mode is so inefficient, that two goroutines
+ // can go lock-step infinitely once they switch mutex
+ // to starvation mode.
+ delta -= mutexStarving
+ }
+ atomic.AddInt32(&m.state, delta)
+ break
+ }
+ awoke = true
+ iter = 0
+ } else {
+ old = m.state
+ }
+ }
+
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
+ }
+}
+
+// Unlock unlocks m.
+// It is a run-time error if m is not locked on entry to Unlock.
+//
+// A locked Mutex is not associated with a particular goroutine.
+// It is allowed for one goroutine to lock a Mutex and then
+// arrange for another goroutine to unlock it.
+func (m *Mutex) Unlock() {
+ if race.Enabled {
+ _ = m.state
+ race.Release(unsafe.Pointer(m))
+ }
+
+ // Fast path: drop lock bit.
+ new := atomic.AddInt32(&m.state, -mutexLocked)
+ if new != 0 {
+ // Outlined slow path to allow inlining the fast path.
+ // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
+ m.unlockSlow(new)
+ }
+}
+
+func (m *Mutex) unlockSlow(new int32) {
+ if (new+mutexLocked)&mutexLocked == 0 {
+ fatal("sync: unlock of unlocked mutex")
+ }
+ if new&mutexStarving == 0 {
+ old := new
+ for {
+ // If there are no waiters or a goroutine has already
+ // been woken or grabbed the lock, no need to wake anyone.
+ // In starvation mode ownership is directly handed off from unlocking
+ // goroutine to the next waiter. We are not part of this chain,
+ // since we did not observe mutexStarving when we unlocked the mutex above.
+ // So get off the way.
+ if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
+ return
+ }
+ // Grab the right to wake someone.
+ new = (old - 1<<mutexWaiterShift) | mutexWoken
+ if atomic.CompareAndSwapInt32(&m.state, old, new) {
+ runtime_Semrelease(&m.sema, false, 1)
+ return
+ }
+ old = m.state
+ }
+ } else {
+ // Starving mode: handoff mutex ownership to the next waiter, and yield
+ // our time slice so that the next waiter can start to run immediately.
+ // Note: mutexLocked is not set, the waiter will set it after wakeup.
+ // But mutex is still considered locked if mutexStarving is set,
+ // so new coming goroutines won't acquire it.
+ runtime_Semrelease(&m.sema, true, 1)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/sync/once.go b/contrib/go/_std_1.19/src/sync/once.go
new file mode 100644
index 0000000000..b6399cfc3d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/once.go
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+)
+
+// Once is an object that will perform exactly one action.
+//
+// A Once must not be copied after first use.
+//
+// In the terminology of the Go memory model,
+// the return from f “synchronizes before”
+// the return from any call of once.Do(f).
+type Once struct {
+ // done indicates whether the action has been performed.
+ // It is first in the struct because it is used in the hot path.
+ // The hot path is inlined at every call site.
+ // Placing done first allows more compact instructions on some architectures (amd64/386),
+ // and fewer instructions (to calculate offset) on other architectures.
+ done uint32
+ m Mutex
+}
+
+// Do calls the function f if and only if Do is being called for the
+// first time for this instance of Once. In other words, given
+//
+// var once Once
+//
+// if once.Do(f) is called multiple times, only the first call will invoke f,
+// even if f has a different value in each invocation. A new instance of
+// Once is required for each function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+//
+// config.once.Do(func() { config.init(filename) })
+//
+// Because no call to Do returns until the one call to f returns, if f causes
+// Do to be called, it will deadlock.
+//
+// If f panics, Do considers it to have returned; future calls of Do return
+// without calling f.
+func (o *Once) Do(f func()) {
+ // Note: Here is an incorrect implementation of Do:
+ //
+ // if atomic.CompareAndSwapUint32(&o.done, 0, 1) {
+ // f()
+ // }
+ //
+ // Do guarantees that when it returns, f has finished.
+ // This implementation would not implement that guarantee:
+ // given two simultaneous calls, the winner of the cas would
+ // call f, and the second would return immediately, without
+ // waiting for the first's call to f to complete.
+ // This is why the slow path falls back to a mutex, and why
+ // the atomic.StoreUint32 must be delayed until after f returns.
+
+ if atomic.LoadUint32(&o.done) == 0 {
+ // Outlined slow-path to allow inlining of the fast-path.
+ o.doSlow(f)
+ }
+}
+
+func (o *Once) doSlow(f func()) {
+ o.m.Lock()
+ defer o.m.Unlock()
+ if o.done == 0 {
+ defer atomic.StoreUint32(&o.done, 1)
+ f()
+ }
+}
diff --git a/contrib/go/_std_1.19/src/sync/pool.go b/contrib/go/_std_1.19/src/sync/pool.go
new file mode 100644
index 0000000000..cf01e2e189
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/pool.go
@@ -0,0 +1,297 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "internal/race"
+ "runtime"
+ "sync/atomic"
+ "unsafe"
+)
+
+// A Pool is a set of temporary objects that may be individually saved and
+// retrieved.
+//
+// Any item stored in the Pool may be removed automatically at any time without
+// notification. If the Pool holds the only reference when this happens, the
+// item might be deallocated.
+//
+// A Pool is safe for use by multiple goroutines simultaneously.
+//
+// Pool's purpose is to cache allocated but unused items for later reuse,
+// relieving pressure on the garbage collector. That is, it makes it easy to
+// build efficient, thread-safe free lists. However, it is not suitable for all
+// free lists.
+//
+// An appropriate use of a Pool is to manage a group of temporary items
+// silently shared among and potentially reused by concurrent independent
+// clients of a package. Pool provides a way to amortize allocation overhead
+// across many clients.
+//
+// An example of good use of a Pool is in the fmt package, which maintains a
+// dynamically-sized store of temporary output buffers. The store scales under
+// load (when many goroutines are actively printing) and shrinks when
+// quiescent.
+//
+// On the other hand, a free list maintained as part of a short-lived object is
+// not a suitable use for a Pool, since the overhead does not amortize well in
+// that scenario. It is more efficient to have such objects implement their own
+// free list.
+//
+// A Pool must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Put(x) “synchronizes before”
+// a call to Get returning that same value x.
+// Similarly, a call to New returning x “synchronizes before”
+// a call to Get returning that same value x.
+type Pool struct {
+ noCopy noCopy
+
+ local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
+ localSize uintptr // size of the local array
+
+ victim unsafe.Pointer // local from previous cycle
+ victimSize uintptr // size of victims array
+
+ // New optionally specifies a function to generate
+ // a value when Get would otherwise return nil.
+ // It may not be changed concurrently with calls to Get.
+ New func() any
+}
+
+// Local per-P Pool appendix.
+type poolLocalInternal struct {
+ private any // Can be used only by the respective P.
+ shared poolChain // Local P can pushHead/popHead; any P can popTail.
+}
+
+type poolLocal struct {
+ poolLocalInternal
+
+ // Prevents false sharing on widespread platforms with
+ // 128 mod (cache line size) = 0 .
+ pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
+}
+
+// from runtime
+func fastrandn(n uint32) uint32
+
+var poolRaceHash [128]uint64
+
+// poolRaceAddr returns an address to use as the synchronization point
+// for race detector logic. We don't use the actual pointer stored in x
+// directly, for fear of conflicting with other synchronization on that address.
+// Instead, we hash the pointer to get an index into poolRaceHash.
+// See discussion on golang.org/cl/31589.
+func poolRaceAddr(x any) unsafe.Pointer {
+ ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
+ h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
+ return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
+}
+
+// Put adds x to the pool.
+func (p *Pool) Put(x any) {
+ if x == nil {
+ return
+ }
+ if race.Enabled {
+ if fastrandn(4) == 0 {
+ // Randomly drop x on floor.
+ return
+ }
+ race.ReleaseMerge(poolRaceAddr(x))
+ race.Disable()
+ }
+ l, _ := p.pin()
+ if l.private == nil {
+ l.private = x
+ } else {
+ l.shared.pushHead(x)
+ }
+ runtime_procUnpin()
+ if race.Enabled {
+ race.Enable()
+ }
+}
+
+// Get selects an arbitrary item from the Pool, removes it from the
+// Pool, and returns it to the caller.
+// Get may choose to ignore the pool and treat it as empty.
+// Callers should not assume any relation between values passed to Put and
+// the values returned by Get.
+//
+// If Get would otherwise return nil and p.New is non-nil, Get returns
+// the result of calling p.New.
+func (p *Pool) Get() any {
+ if race.Enabled {
+ race.Disable()
+ }
+ l, pid := p.pin()
+ x := l.private
+ l.private = nil
+ if x == nil {
+ // Try to pop the head of the local shard. We prefer
+ // the head over the tail for temporal locality of
+ // reuse.
+ x, _ = l.shared.popHead()
+ if x == nil {
+ x = p.getSlow(pid)
+ }
+ }
+ runtime_procUnpin()
+ if race.Enabled {
+ race.Enable()
+ if x != nil {
+ race.Acquire(poolRaceAddr(x))
+ }
+ }
+ if x == nil && p.New != nil {
+ x = p.New()
+ }
+ return x
+}
+
+func (p *Pool) getSlow(pid int) any {
+ // See the comment in pin regarding ordering of the loads.
+ size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ locals := p.local // load-consume
+ // Try to steal one element from other procs.
+ for i := 0; i < int(size); i++ {
+ l := indexLocal(locals, (pid+i+1)%int(size))
+ if x, _ := l.shared.popTail(); x != nil {
+ return x
+ }
+ }
+
+ // Try the victim cache. We do this after attempting to steal
+ // from all primary caches because we want objects in the
+ // victim cache to age out if at all possible.
+ size = atomic.LoadUintptr(&p.victimSize)
+ if uintptr(pid) >= size {
+ return nil
+ }
+ locals = p.victim
+ l := indexLocal(locals, pid)
+ if x := l.private; x != nil {
+ l.private = nil
+ return x
+ }
+ for i := 0; i < int(size); i++ {
+ l := indexLocal(locals, (pid+i)%int(size))
+ if x, _ := l.shared.popTail(); x != nil {
+ return x
+ }
+ }
+
+ // Mark the victim cache as empty for future gets don't bother
+ // with it.
+ atomic.StoreUintptr(&p.victimSize, 0)
+
+ return nil
+}
+
+// pin pins the current goroutine to P, disables preemption and
+// returns poolLocal pool for the P and the P's id.
+// Caller must call runtime_procUnpin() when done with the pool.
+func (p *Pool) pin() (*poolLocal, int) {
+ pid := runtime_procPin()
+ // In pinSlow we store to local and then to localSize, here we load in opposite order.
+ // Since we've disabled preemption, GC cannot happen in between.
+ // Thus here we must observe local at least as large localSize.
+ // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
+ s := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ l := p.local // load-consume
+ if uintptr(pid) < s {
+ return indexLocal(l, pid), pid
+ }
+ return p.pinSlow()
+}
+
+func (p *Pool) pinSlow() (*poolLocal, int) {
+ // Retry under the mutex.
+ // Can not lock the mutex while pinned.
+ runtime_procUnpin()
+ allPoolsMu.Lock()
+ defer allPoolsMu.Unlock()
+ pid := runtime_procPin()
+ // poolCleanup won't be called while we are pinned.
+ s := p.localSize
+ l := p.local
+ if uintptr(pid) < s {
+ return indexLocal(l, pid), pid
+ }
+ if p.local == nil {
+ allPools = append(allPools, p)
+ }
+ // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
+ size := runtime.GOMAXPROCS(0)
+ local := make([]poolLocal, size)
+ atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
+ runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
+ return &local[pid], pid
+}
+
+func poolCleanup() {
+ // This function is called with the world stopped, at the beginning of a garbage collection.
+ // It must not allocate and probably should not call any runtime functions.
+
+ // Because the world is stopped, no pool user can be in a
+ // pinned section (in effect, this has all Ps pinned).
+
+ // Drop victim caches from all pools.
+ for _, p := range oldPools {
+ p.victim = nil
+ p.victimSize = 0
+ }
+
+ // Move primary cache to victim cache.
+ for _, p := range allPools {
+ p.victim = p.local
+ p.victimSize = p.localSize
+ p.local = nil
+ p.localSize = 0
+ }
+
+ // The pools with non-empty primary caches now have non-empty
+ // victim caches and no pools have primary caches.
+ oldPools, allPools = allPools, nil
+}
+
+var (
+ allPoolsMu Mutex
+
+ // allPools is the set of pools that have non-empty primary
+ // caches. Protected by either 1) allPoolsMu and pinning or 2)
+ // STW.
+ allPools []*Pool
+
+ // oldPools is the set of pools that may have non-empty victim
+ // caches. Protected by STW.
+ oldPools []*Pool
+)
+
+func init() {
+ runtime_registerPoolCleanup(poolCleanup)
+}
+
+func indexLocal(l unsafe.Pointer, i int) *poolLocal {
+ lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
+ return (*poolLocal)(lp)
+}
+
+// Implemented in runtime.
+func runtime_registerPoolCleanup(cleanup func())
+func runtime_procPin() int
+func runtime_procUnpin()
+
+// The below are implemented in runtime/internal/atomic and the
+// compiler also knows to intrinsify the symbol we linkname into this
+// package.
+
+//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr
+func runtime_LoadAcquintptr(ptr *uintptr) uintptr
+
+//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr
+func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr
diff --git a/contrib/go/_std_1.18/src/sync/poolqueue.go b/contrib/go/_std_1.19/src/sync/poolqueue.go
index 631f2c15fd..631f2c15fd 100644
--- a/contrib/go/_std_1.18/src/sync/poolqueue.go
+++ b/contrib/go/_std_1.19/src/sync/poolqueue.go
diff --git a/contrib/go/_std_1.18/src/sync/runtime.go b/contrib/go/_std_1.19/src/sync/runtime.go
index de2b0a3ccd..de2b0a3ccd 100644
--- a/contrib/go/_std_1.18/src/sync/runtime.go
+++ b/contrib/go/_std_1.19/src/sync/runtime.go
diff --git a/contrib/go/_std_1.18/src/sync/runtime2.go b/contrib/go/_std_1.19/src/sync/runtime2.go
index 9b7e9922fb..9b7e9922fb 100644
--- a/contrib/go/_std_1.18/src/sync/runtime2.go
+++ b/contrib/go/_std_1.19/src/sync/runtime2.go
diff --git a/contrib/go/_std_1.19/src/sync/rwmutex.go b/contrib/go/_std_1.19/src/sync/rwmutex.go
new file mode 100644
index 0000000000..e914f3eba0
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/rwmutex.go
@@ -0,0 +1,231 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "internal/race"
+ "sync/atomic"
+ "unsafe"
+)
+
+// There is a modified copy of this file in runtime/rwmutex.go.
+// If you make any changes here, see if you should make them there.
+
+// A RWMutex is a reader/writer mutual exclusion lock.
+// The lock can be held by an arbitrary number of readers or a single writer.
+// The zero value for a RWMutex is an unlocked mutex.
+//
+// A RWMutex must not be copied after first use.
+//
+// If a goroutine holds a RWMutex for reading and another goroutine might
+// call Lock, no goroutine should expect to be able to acquire a read lock
+// until the initial read lock is released. In particular, this prohibits
+// recursive read locking. This is to ensure that the lock eventually becomes
+// available; a blocked Lock call excludes new readers from acquiring the
+// lock.
+//
+// In the terminology of the Go memory model,
+// the n'th call to Unlock “synchronizes before” the m'th call to Lock
+// for any n < m, just as for Mutex.
+// For any call to RLock, there exists an n such that
+// the n'th call to Unlock “synchronizes before” that call to RLock,
+// and the corresponding call to RUnlock “synchronizes before”
+// the n+1'th call to Lock.
+type RWMutex struct {
+ w Mutex // held if there are pending writers
+ writerSem uint32 // semaphore for writers to wait for completing readers
+ readerSem uint32 // semaphore for readers to wait for completing writers
+ readerCount int32 // number of pending readers
+ readerWait int32 // number of departing readers
+}
+
+const rwmutexMaxReaders = 1 << 30
+
+// Happens-before relationships are indicated to the race detector via:
+// - Unlock -> Lock: readerSem
+// - Unlock -> RLock: readerSem
+// - RUnlock -> Lock: writerSem
+//
+// The methods below temporarily disable handling of race synchronization
+// events in order to provide the more precise model above to the race
+// detector.
+//
+// For example, atomic.AddInt32 in RLock should not appear to provide
+// acquire-release semantics, which would incorrectly synchronize racing
+// readers, thus potentially missing races.
+
+// RLock locks rw for reading.
+//
+// It should not be used for recursive read locking; a blocked Lock
+// call excludes new readers from acquiring the lock. See the
+// documentation on the RWMutex type.
+func (rw *RWMutex) RLock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ if atomic.AddInt32(&rw.readerCount, 1) < 0 {
+ // A writer is pending, wait for it.
+ runtime_SemacquireMutex(&rw.readerSem, false, 0)
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ }
+}
+
+// TryRLock tries to lock rw for reading and reports whether it succeeded.
+//
+// Note that while correct uses of TryRLock do exist, they are rare,
+// and use of TryRLock is often a sign of a deeper problem
+// in a particular use of mutexes.
+func (rw *RWMutex) TryRLock() bool {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ for {
+ c := atomic.LoadInt32(&rw.readerCount)
+ if c < 0 {
+ if race.Enabled {
+ race.Enable()
+ }
+ return false
+ }
+ if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) {
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ }
+ return true
+ }
+ }
+}
+
+// RUnlock undoes a single RLock call;
+// it does not affect other simultaneous readers.
+// It is a run-time error if rw is not locked for reading
+// on entry to RUnlock.
+func (rw *RWMutex) RUnlock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
+ }
+ if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
+ // Outlined slow-path to allow the fast-path to be inlined
+ rw.rUnlockSlow(r)
+ }
+ if race.Enabled {
+ race.Enable()
+ }
+}
+
+func (rw *RWMutex) rUnlockSlow(r int32) {
+ if r+1 == 0 || r+1 == -rwmutexMaxReaders {
+ race.Enable()
+ fatal("sync: RUnlock of unlocked RWMutex")
+ }
+ // A writer is pending.
+ if atomic.AddInt32(&rw.readerWait, -1) == 0 {
+ // The last reader unblocks the writer.
+ runtime_Semrelease(&rw.writerSem, false, 1)
+ }
+}
+
+// Lock locks rw for writing.
+// If the lock is already locked for reading or writing,
+// Lock blocks until the lock is available.
+func (rw *RWMutex) Lock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ // First, resolve competition with other writers.
+ rw.w.Lock()
+ // Announce to readers there is a pending writer.
+ r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
+ // Wait for active readers.
+ if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
+ runtime_SemacquireMutex(&rw.writerSem, false, 0)
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
+ }
+}
+
+// TryLock tries to lock rw for writing and reports whether it succeeded.
+//
+// Note that while correct uses of TryLock do exist, they are rare,
+// and use of TryLock is often a sign of a deeper problem
+// in a particular use of mutexes.
+func (rw *RWMutex) TryLock() bool {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ if !rw.w.TryLock() {
+ if race.Enabled {
+ race.Enable()
+ }
+ return false
+ }
+ if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
+ rw.w.Unlock()
+ if race.Enabled {
+ race.Enable()
+ }
+ return false
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
+ }
+ return true
+}
+
+// Unlock unlocks rw for writing. It is a run-time error if rw is
+// not locked for writing on entry to Unlock.
+//
+// As with Mutexes, a locked RWMutex is not associated with a particular
+// goroutine. One goroutine may RLock (Lock) a RWMutex and then
+// arrange for another goroutine to RUnlock (Unlock) it.
+func (rw *RWMutex) Unlock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Release(unsafe.Pointer(&rw.readerSem))
+ race.Disable()
+ }
+
+ // Announce to readers there is no active writer.
+ r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
+ if r >= rwmutexMaxReaders {
+ race.Enable()
+ fatal("sync: Unlock of unlocked RWMutex")
+ }
+ // Unblock blocked readers, if any.
+ for i := 0; i < int(r); i++ {
+ runtime_Semrelease(&rw.readerSem, false, 0)
+ }
+ // Allow other writers to proceed.
+ rw.w.Unlock()
+ if race.Enabled {
+ race.Enable()
+ }
+}
+
+// RLocker returns a Locker interface that implements
+// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock.
+func (rw *RWMutex) RLocker() Locker {
+ return (*rlocker)(rw)
+}
+
+type rlocker RWMutex
+
+func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
+func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
diff --git a/contrib/go/_std_1.19/src/sync/waitgroup.go b/contrib/go/_std_1.19/src/sync/waitgroup.go
new file mode 100644
index 0000000000..9f26ae106c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/sync/waitgroup.go
@@ -0,0 +1,150 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "internal/race"
+ "sync/atomic"
+ "unsafe"
+)
+
+// A WaitGroup waits for a collection of goroutines to finish.
+// The main goroutine calls Add to set the number of
+// goroutines to wait for. Then each of the goroutines
+// runs and calls Done when finished. At the same time,
+// Wait can be used to block until all goroutines have finished.
+//
+// A WaitGroup must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Done
+// “synchronizes before” the return of any Wait call that it unblocks.
+type WaitGroup struct {
+ noCopy noCopy
+
+ // 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
+ // 64-bit atomic operations require 64-bit alignment, but 32-bit
+ // compilers only guarantee that 64-bit fields are 32-bit aligned.
+ // For this reason on 32 bit architectures we need to check in state()
+ // if state1 is aligned or not, and dynamically "swap" the field order if
+ // needed.
+ state1 uint64
+ state2 uint32
+}
+
+// state returns pointers to the state and sema fields stored within wg.state*.
+func (wg *WaitGroup) state() (statep *uint64, semap *uint32) {
+ if unsafe.Alignof(wg.state1) == 8 || uintptr(unsafe.Pointer(&wg.state1))%8 == 0 {
+ // state1 is 64-bit aligned: nothing to do.
+ return &wg.state1, &wg.state2
+ } else {
+ // state1 is 32-bit aligned but not 64-bit aligned: this means that
+ // (&state1)+4 is 64-bit aligned.
+ state := (*[3]uint32)(unsafe.Pointer(&wg.state1))
+ return (*uint64)(unsafe.Pointer(&state[1])), &state[0]
+ }
+}
+
+// Add adds delta, which may be negative, to the WaitGroup counter.
+// If the counter becomes zero, all goroutines blocked on Wait are released.
+// If the counter goes negative, Add panics.
+//
+// Note that calls with a positive delta that occur when the counter is zero
+// must happen before a Wait. Calls with a negative delta, or calls with a
+// positive delta that start when the counter is greater than zero, may happen
+// at any time.
+// Typically this means the calls to Add should execute before the statement
+// creating the goroutine or other event to be waited for.
+// If a WaitGroup is reused to wait for several independent sets of events,
+// new Add calls must happen after all previous Wait calls have returned.
+// See the WaitGroup example.
+func (wg *WaitGroup) Add(delta int) {
+ statep, semap := wg.state()
+ if race.Enabled {
+ _ = *statep // trigger nil deref early
+ if delta < 0 {
+ // Synchronize decrements with Wait.
+ race.ReleaseMerge(unsafe.Pointer(wg))
+ }
+ race.Disable()
+ defer race.Enable()
+ }
+ state := atomic.AddUint64(statep, uint64(delta)<<32)
+ v := int32(state >> 32)
+ w := uint32(state)
+ if race.Enabled && delta > 0 && v == int32(delta) {
+ // The first increment must be synchronized with Wait.
+ // Need to model this as a read, because there can be
+ // several concurrent wg.counter transitions from 0.
+ race.Read(unsafe.Pointer(semap))
+ }
+ if v < 0 {
+ panic("sync: negative WaitGroup counter")
+ }
+ if w != 0 && delta > 0 && v == int32(delta) {
+ panic("sync: WaitGroup misuse: Add called concurrently with Wait")
+ }
+ if v > 0 || w == 0 {
+ return
+ }
+ // This goroutine has set counter to 0 when waiters > 0.
+ // Now there can't be concurrent mutations of state:
+ // - Adds must not happen concurrently with Wait,
+ // - Wait does not increment waiters if it sees counter == 0.
+ // Still do a cheap sanity check to detect WaitGroup misuse.
+ if *statep != state {
+ panic("sync: WaitGroup misuse: Add called concurrently with Wait")
+ }
+ // Reset waiters count to 0.
+ *statep = 0
+ for ; w != 0; w-- {
+ runtime_Semrelease(semap, false, 0)
+ }
+}
+
+// Done decrements the WaitGroup counter by one.
+func (wg *WaitGroup) Done() {
+ wg.Add(-1)
+}
+
+// Wait blocks until the WaitGroup counter is zero.
+func (wg *WaitGroup) Wait() {
+ statep, semap := wg.state()
+ if race.Enabled {
+ _ = *statep // trigger nil deref early
+ race.Disable()
+ }
+ for {
+ state := atomic.LoadUint64(statep)
+ v := int32(state >> 32)
+ w := uint32(state)
+ if v == 0 {
+ // Counter is 0, no need to wait.
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
+ }
+ return
+ }
+ // Increment waiters count.
+ if atomic.CompareAndSwapUint64(statep, state, state+1) {
+ if race.Enabled && w == 0 {
+ // Wait must be synchronized with the first Add.
+ // Need to model this is as a write to race with the read in Add.
+ // As a consequence, can do the write only for the first waiter,
+ // otherwise concurrent Waits will race with each other.
+ race.Write(unsafe.Pointer(semap))
+ }
+ runtime_Semacquire(semap)
+ if *statep != 0 {
+ panic("sync: WaitGroup is reused before previous Wait has returned")
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
+ }
+ return
+ }
+ }
+}
diff --git a/contrib/go/_std_1.18/src/syscall/asan0.go b/contrib/go/_std_1.19/src/syscall/asan0.go
index 08bc44dea1..08bc44dea1 100644
--- a/contrib/go/_std_1.18/src/syscall/asan0.go
+++ b/contrib/go/_std_1.19/src/syscall/asan0.go
diff --git a/contrib/go/_std_1.19/src/syscall/asm_darwin_amd64.s b/contrib/go/_std_1.19/src/syscall/asm_darwin_amd64.s
new file mode 100644
index 0000000000..77b58e051b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/asm_darwin_amd64.s
@@ -0,0 +1,134 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Trap # in AX, args in DI SI DX, return in AX DX
+
+// func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno);
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ CALL runtime·entersyscall<ABIInternal>(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ trap+0(FP), AX // syscall entry
+ ADDQ $0x2000000, AX
+ SYSCALL
+ JCC ok
+ MOVQ $-1, r1+32(FP)
+ MOVQ $0, r2+40(FP)
+ MOVQ AX, err+48(FP)
+ CALL runtime·exitsyscall<ABIInternal>(SB)
+ RET
+ok:
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ MOVQ $0, err+48(FP)
+ CALL runtime·exitsyscall<ABIInternal>(SB)
+ RET
+
+// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno);
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ CALL runtime·entersyscall<ABIInternal>(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ a4+32(FP), R10
+ MOVQ a5+40(FP), R8
+ MOVQ a6+48(FP), R9
+ MOVQ trap+0(FP), AX // syscall entry
+ ADDQ $0x2000000, AX
+ SYSCALL
+ JCC ok6
+ MOVQ $-1, r1+56(FP)
+ MOVQ $0, r2+64(FP)
+ MOVQ AX, err+72(FP)
+ CALL runtime·exitsyscall<ABIInternal>(SB)
+ RET
+ok6:
+ MOVQ AX, r1+56(FP)
+ MOVQ DX, r2+64(FP)
+ MOVQ $0, err+72(FP)
+ CALL runtime·exitsyscall<ABIInternal>(SB)
+ RET
+
+// func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ CALL runtime·entersyscall<ABIInternal>(SB)
+ MOVQ trap+0(FP), AX // syscall entry
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ a4+32(FP), R10
+ MOVQ a5+40(FP), R8
+ MOVQ a6+48(FP), R9
+ MOVQ a7+56(FP), R11
+ MOVQ a8+64(FP), R12
+ MOVQ a9+72(FP), R13
+ SUBQ $32, SP
+ MOVQ R11, 8(SP)
+ MOVQ R12, 16(SP)
+ MOVQ R13, 24(SP)
+ ADDQ $0x2000000, AX
+ SYSCALL
+ JCC ok9
+ ADDQ $32, SP
+ MOVQ $-1, r1+80(FP)
+ MOVQ $0, r2+88(FP)
+ MOVQ AX, err+96(FP)
+ CALL runtime·exitsyscall<ABIInternal>(SB)
+ RET
+ok9:
+ ADDQ $32, SP
+ MOVQ AX, r1+80(FP)
+ MOVQ DX, r2+88(FP)
+ MOVQ $0, err+96(FP)
+ CALL runtime·exitsyscall<ABIInternal>(SB)
+ RET
+
+// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ trap+0(FP), AX // syscall entry
+ ADDQ $0x2000000, AX
+ SYSCALL
+ JCC ok1
+ MOVQ $-1, r1+32(FP)
+ MOVQ $0, r2+40(FP)
+ MOVQ AX, err+48(FP)
+ RET
+ok1:
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ MOVQ $0, err+48(FP)
+ RET
+
+// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ a4+32(FP), R10
+ MOVQ a5+40(FP), R8
+ MOVQ a6+48(FP), R9
+ MOVQ trap+0(FP), AX // syscall entry
+ ADDQ $0x2000000, AX
+ SYSCALL
+ JCC ok2
+ MOVQ $-1, r1+56(FP)
+ MOVQ $0, r2+64(FP)
+ MOVQ AX, err+72(FP)
+ RET
+ok2:
+ MOVQ AX, r1+56(FP)
+ MOVQ DX, r2+64(FP)
+ MOVQ $0, err+72(FP)
+ RET
diff --git a/contrib/go/_std_1.19/src/syscall/asm_linux_amd64.s b/contrib/go/_std_1.19/src/syscall/asm_linux_amd64.s
new file mode 100644
index 0000000000..3206a45d5d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/asm_linux_amd64.s
@@ -0,0 +1,68 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+#define SYS_gettimeofday 96
+
+// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-32
+ MOVQ a1+8(FP), DI
+ MOVQ $0, SI
+ MOVQ $0, DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ POPQ R12 // preserve return address
+ SYSCALL
+ PUSHQ R12
+ CMPQ AX, $0xfffffffffffff001
+ JLS ok2
+ MOVQ $-1, r1+16(FP)
+ NEGQ AX
+ MOVQ AX, err+24(FP)
+ RET
+ok2:
+ MOVQ AX, r1+16(FP)
+ MOVQ $0, err+24(FP)
+ RET
+
+// func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
+TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
+// func gettimeofday(tv *Timeval) (err uintptr)
+TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+ MOVQ tv+0(FP), DI
+ MOVQ $0, SI
+ MOVQ runtime·vdsoGettimeofdaySym(SB), AX
+ TESTQ AX, AX
+ JZ fallback
+ CALL AX
+ret:
+ CMPQ AX, $0xfffffffffffff001
+ JLS ok7
+ NEGQ AX
+ MOVQ AX, err+8(FP)
+ RET
+fallback:
+ MOVL $SYS_gettimeofday, AX
+ SYSCALL
+ JMP ret
+ok7:
+ MOVQ $0, err+8(FP)
+ RET
diff --git a/contrib/go/_std_1.18/src/syscall/bpf_darwin.go b/contrib/go/_std_1.19/src/syscall/bpf_darwin.go
index fb86049ae9..fb86049ae9 100644
--- a/contrib/go/_std_1.18/src/syscall/bpf_darwin.go
+++ b/contrib/go/_std_1.19/src/syscall/bpf_darwin.go
diff --git a/contrib/go/_std_1.19/src/syscall/dirent.go b/contrib/go/_std_1.19/src/syscall/dirent.go
new file mode 100644
index 0000000000..b10608a662
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/dirent.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package syscall
+
+import "unsafe"
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ if isBigEndian {
+ return readIntBE(b[off:], size), true
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntBE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[1]) | uint64(b[0])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number of
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ reclen, ok := direntReclen(buf)
+ if !ok || reclen > uint64(len(buf)) {
+ return origlen, count, names
+ }
+ rec := buf[:reclen]
+ buf = buf[reclen:]
+ ino, ok := direntIno(rec)
+ if !ok {
+ break
+ }
+ if ino == 0 { // File absent in directory.
+ continue
+ }
+ const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
+ namlen, ok := direntNamlen(rec)
+ if !ok || namoff+namlen > uint64(len(rec)) {
+ break
+ }
+ name := rec[namoff : namoff+namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ max--
+ count++
+ names = append(names, string(name))
+ }
+ return origlen - len(buf), count, names
+}
diff --git a/contrib/go/_std_1.19/src/syscall/endian_little.go b/contrib/go/_std_1.19/src/syscall/endian_little.go
new file mode 100644
index 0000000000..f5fcb58db4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/endian_little.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build 386 || amd64 || arm || arm64 || loong64 || ppc64le || mips64le || mipsle || riscv64 || wasm
+
+package syscall
+
+const isBigEndian = false
diff --git a/contrib/go/_std_1.19/src/syscall/env_unix.go b/contrib/go/_std_1.19/src/syscall/env_unix.go
new file mode 100644
index 0000000000..67e6c5fbe2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/env_unix.go
@@ -0,0 +1,155 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm) || plan9
+
+// Unix environment variables.
+
+package syscall
+
+import (
+ "runtime"
+ "sync"
+)
+
+var (
+ // envOnce guards initialization by copyenv, which populates env.
+ envOnce sync.Once
+
+ // envLock guards env and envs.
+ envLock sync.RWMutex
+
+ // env maps from an environment variable to its first occurrence in envs.
+ env map[string]int
+
+ // envs is provided by the runtime. elements are expected to
+ // be of the form "key=value". An empty string means deleted
+ // (or a duplicate to be ignored).
+ envs []string = runtime_envs()
+)
+
+func runtime_envs() []string // in package runtime
+
+// setenv_c and unsetenv_c are provided by the runtime but are no-ops
+// if cgo isn't loaded.
+func setenv_c(k, v string)
+func unsetenv_c(k string)
+
+func copyenv() {
+ env = make(map[string]int)
+ for i, s := range envs {
+ for j := 0; j < len(s); j++ {
+ if s[j] == '=' {
+ key := s[:j]
+ if _, ok := env[key]; !ok {
+ env[key] = i // first mention of key
+ } else {
+ // Clear duplicate keys. This permits Unsetenv to
+ // safely delete only the first item without
+ // worrying about unshadowing a later one,
+ // which might be a security problem.
+ envs[i] = ""
+ }
+ break
+ }
+ }
+ }
+}
+
+func Unsetenv(key string) error {
+ envOnce.Do(copyenv)
+
+ envLock.Lock()
+ defer envLock.Unlock()
+
+ if i, ok := env[key]; ok {
+ envs[i] = ""
+ delete(env, key)
+ }
+ unsetenv_c(key)
+ return nil
+}
+
+func Getenv(key string) (value string, found bool) {
+ envOnce.Do(copyenv)
+ if len(key) == 0 {
+ return "", false
+ }
+
+ envLock.RLock()
+ defer envLock.RUnlock()
+
+ i, ok := env[key]
+ if !ok {
+ return "", false
+ }
+ s := envs[i]
+ for i := 0; i < len(s); i++ {
+ if s[i] == '=' {
+ return s[i+1:], true
+ }
+ }
+ return "", false
+}
+
+func Setenv(key, value string) error {
+ envOnce.Do(copyenv)
+ if len(key) == 0 {
+ return EINVAL
+ }
+ for i := 0; i < len(key); i++ {
+ if key[i] == '=' || key[i] == 0 {
+ return EINVAL
+ }
+ }
+ // On Plan 9, null is used as a separator, eg in $path.
+ if runtime.GOOS != "plan9" {
+ for i := 0; i < len(value); i++ {
+ if value[i] == 0 {
+ return EINVAL
+ }
+ }
+ }
+
+ envLock.Lock()
+ defer envLock.Unlock()
+
+ i, ok := env[key]
+ kv := key + "=" + value
+ if ok {
+ envs[i] = kv
+ } else {
+ i = len(envs)
+ envs = append(envs, kv)
+ }
+ env[key] = i
+ setenv_c(key, value)
+ return nil
+}
+
+func Clearenv() {
+ envOnce.Do(copyenv) // prevent copyenv in Getenv/Setenv
+
+ envLock.Lock()
+ defer envLock.Unlock()
+
+ for k := range env {
+ unsetenv_c(k)
+ }
+ env = make(map[string]int)
+ envs = []string{}
+}
+
+func Environ() []string {
+ envOnce.Do(copyenv)
+ envLock.RLock()
+ defer envLock.RUnlock()
+ a := make([]string, 0, len(envs))
+ for _, env := range envs {
+ if env != "" {
+ a = append(a, env)
+ }
+ }
+ return a
+}
diff --git a/contrib/go/_std_1.19/src/syscall/exec_libc2.go b/contrib/go/_std_1.19/src/syscall/exec_libc2.go
new file mode 100644
index 0000000000..9eb61a5d35
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/exec_libc2.go
@@ -0,0 +1,281 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || (openbsd && !mips64)
+
+package syscall
+
+import (
+ "internal/abi"
+ "runtime"
+ "unsafe"
+)
+
+type SysProcAttr struct {
+ Chroot string // Chroot.
+ Credential *Credential // Credential.
+ Ptrace bool // Enable tracing.
+ Setsid bool // Create session.
+ // Setpgid sets the process group ID of the child to Pgid,
+ // or, if Pgid == 0, to the new child's process ID.
+ Setpgid bool
+ // Setctty sets the controlling terminal of the child to
+ // file descriptor Ctty. Ctty must be a descriptor number
+ // in the child process: an index into ProcAttr.Files.
+ // This is only meaningful if Setsid is true.
+ Setctty bool
+ Noctty bool // Detach fd 0 from controlling terminal
+ Ctty int // Controlling TTY fd
+ // Foreground places the child process group in the foreground.
+ // This implies Setpgid. The Ctty field must be set to
+ // the descriptor of the controlling TTY.
+ // Unlike Setctty, in this case Ctty must be a descriptor
+ // number in the parent process.
+ Foreground bool
+ Pgid int // Child's process group ID if Setpgid.
+}
+
+// Implemented in runtime package.
+func runtime_BeforeFork()
+func runtime_AfterFork()
+func runtime_AfterForkInChild()
+
+// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
+// If a dup or exec fails, write the errno error to pipe.
+// (Pipe is close-on-exec so if exec succeeds, it will be closed.)
+// In the child, this function must not acquire any locks, because
+// they might have been locked at the time of the fork. This means
+// no rescheduling, no malloc calls, and no new stack segments.
+// For the same reason compiler does not race instrument it.
+// The calls to rawSyscall are okay because they are assembly
+// functions that do not grow the stack.
+//
+//go:norace
+func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
+ // Declare all variables at top in case any
+ // declarations require heap allocation (e.g., err1).
+ var (
+ r1 uintptr
+ err1 Errno
+ nextfd int
+ i int
+ )
+
+ // guard against side effects of shuffling fds below.
+ // Make sure that nextfd is beyond any currently open files so
+ // that we can't run the risk of overwriting any of them.
+ fd := make([]int, len(attr.Files))
+ nextfd = len(attr.Files)
+ for i, ufd := range attr.Files {
+ if nextfd < int(ufd) {
+ nextfd = int(ufd)
+ }
+ fd[i] = int(ufd)
+ }
+ nextfd++
+
+ // About to call fork.
+ // No more allocation or calls of non-assembly functions.
+ runtime_BeforeFork()
+ r1, _, err1 = rawSyscall(abi.FuncPCABI0(libc_fork_trampoline), 0, 0, 0)
+ if err1 != 0 {
+ runtime_AfterFork()
+ return 0, err1
+ }
+
+ if r1 != 0 {
+ // parent; return PID
+ runtime_AfterFork()
+ return int(r1), 0
+ }
+
+ // Fork succeeded, now in child.
+
+ // Enable tracing if requested.
+ if sys.Ptrace {
+ if err := ptrace(PTRACE_TRACEME, 0, 0, 0); err != nil {
+ err1 = err.(Errno)
+ goto childerror
+ }
+ }
+
+ // Session ID
+ if sys.Setsid {
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setsid_trampoline), 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set process group
+ if sys.Setpgid || sys.Foreground {
+ // Place child in process group.
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setpgid_trampoline), 0, uintptr(sys.Pgid), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ if sys.Foreground {
+ // This should really be pid_t, however _C_int (aka int32) is
+ // generally equivalent.
+ pgrp := _C_int(sys.Pgid)
+ if pgrp == 0 {
+ r1, _, err1 = rawSyscall(abi.FuncPCABI0(libc_getpid_trampoline), 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ pgrp = _C_int(r1)
+ }
+
+ // Place process group in foreground.
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp)))
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Restore the signal mask. We do this after TIOCSPGRP to avoid
+ // having the kernel send a SIGTTOU signal to the process group.
+ runtime_AfterForkInChild()
+
+ // Chroot
+ if chroot != nil {
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_chroot_trampoline), uintptr(unsafe.Pointer(chroot)), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // User and groups
+ if cred := sys.Credential; cred != nil {
+ ngroups := uintptr(len(cred.Groups))
+ groups := uintptr(0)
+ if ngroups > 0 {
+ groups = uintptr(unsafe.Pointer(&cred.Groups[0]))
+ }
+ if !cred.NoSetGroups {
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setgroups_trampoline), ngroups, groups, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setgid_trampoline), uintptr(cred.Gid), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_setuid_trampoline), uintptr(cred.Uid), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Chdir
+ if dir != nil {
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_chdir_trampoline), uintptr(unsafe.Pointer(dir)), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Pass 1: look for fd[i] < i and move those up above len(fd)
+ // so that pass 2 won't stomp on an fd it needs later.
+ if pipe < nextfd {
+ if runtime.GOOS == "openbsd" {
+ _, _, err1 = rawSyscall(dupTrampoline, uintptr(pipe), uintptr(nextfd), O_CLOEXEC)
+ } else {
+ _, _, err1 = rawSyscall(dupTrampoline, uintptr(pipe), uintptr(nextfd), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(nextfd), F_SETFD, FD_CLOEXEC)
+ }
+ if err1 != 0 {
+ goto childerror
+ }
+ pipe = nextfd
+ nextfd++
+ }
+ for i = 0; i < len(fd); i++ {
+ if fd[i] >= 0 && fd[i] < int(i) {
+ if nextfd == pipe { // don't stomp on pipe
+ nextfd++
+ }
+ if runtime.GOOS == "openbsd" {
+ _, _, err1 = rawSyscall(dupTrampoline, uintptr(fd[i]), uintptr(nextfd), O_CLOEXEC)
+ } else {
+ _, _, err1 = rawSyscall(dupTrampoline, uintptr(fd[i]), uintptr(nextfd), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(nextfd), F_SETFD, FD_CLOEXEC)
+ }
+ if err1 != 0 {
+ goto childerror
+ }
+ fd[i] = nextfd
+ nextfd++
+ }
+ }
+
+ // Pass 2: dup fd[i] down onto i.
+ for i = 0; i < len(fd); i++ {
+ if fd[i] == -1 {
+ rawSyscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(i), 0, 0)
+ continue
+ }
+ if fd[i] == int(i) {
+ // dup2(i, i) won't clear close-on-exec flag on Linux,
+ // probably not elsewhere either.
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd[i]), F_SETFD, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ continue
+ }
+ // The new fd is created NOT close-on-exec,
+ // which is exactly what we want.
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(fd[i]), uintptr(i), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // By convention, we don't close-on-exec the fds we are
+ // started with, so if len(fd) < 3, close 0, 1, 2 as needed.
+ // Programs that know they inherit fds >= 3 will need
+ // to set them close-on-exec.
+ for i = len(fd); i < 3; i++ {
+ rawSyscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(i), 0, 0)
+ }
+
+ // Detach fd 0 from tty
+ if sys.Noctty {
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), 0, uintptr(TIOCNOTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set the controlling TTY to Ctty
+ if sys.Setctty {
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Time to exec.
+ _, _, err1 = rawSyscall(abi.FuncPCABI0(libc_execve_trampoline),
+ uintptr(unsafe.Pointer(argv0)),
+ uintptr(unsafe.Pointer(&argv[0])),
+ uintptr(unsafe.Pointer(&envv[0])))
+
+childerror:
+ // send error code on pipe
+ rawSyscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
+ for {
+ rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0)
+ }
+}
diff --git a/contrib/go/_std_1.19/src/syscall/exec_linux.go b/contrib/go/_std_1.19/src/syscall/exec_linux.go
new file mode 100644
index 0000000000..554aad412d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/exec_linux.go
@@ -0,0 +1,626 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+
+package syscall
+
+import (
+ "internal/itoa"
+ "runtime"
+ "unsafe"
+)
+
+// SysProcIDMap holds Container ID to Host ID mappings used for User Namespaces in Linux.
+// See user_namespaces(7).
+type SysProcIDMap struct {
+ ContainerID int // Container ID.
+ HostID int // Host ID.
+ Size int // Size.
+}
+
+type SysProcAttr struct {
+ Chroot string // Chroot.
+ Credential *Credential // Credential.
+ // Ptrace tells the child to call ptrace(PTRACE_TRACEME).
+ // Call runtime.LockOSThread before starting a process with this set,
+ // and don't call UnlockOSThread until done with PtraceSyscall calls.
+ Ptrace bool
+ Setsid bool // Create session.
+ // Setpgid sets the process group ID of the child to Pgid,
+ // or, if Pgid == 0, to the new child's process ID.
+ Setpgid bool
+ // Setctty sets the controlling terminal of the child to
+ // file descriptor Ctty. Ctty must be a descriptor number
+ // in the child process: an index into ProcAttr.Files.
+ // This is only meaningful if Setsid is true.
+ Setctty bool
+ Noctty bool // Detach fd 0 from controlling terminal
+ Ctty int // Controlling TTY fd
+ // Foreground places the child process group in the foreground.
+ // This implies Setpgid. The Ctty field must be set to
+ // the descriptor of the controlling TTY.
+ // Unlike Setctty, in this case Ctty must be a descriptor
+ // number in the parent process.
+ Foreground bool
+ Pgid int // Child's process group ID if Setpgid.
+ // Pdeathsig, if non-zero, is a signal that the kernel will send to
+ // the child process when the creating thread dies. Note that the signal
+ // is sent on thread termination, which may happen before process termination.
+ // There are more details at https://go.dev/issue/27505.
+ Pdeathsig Signal
+ Cloneflags uintptr // Flags for clone calls (Linux only)
+ Unshareflags uintptr // Flags for unshare calls (Linux only)
+ UidMappings []SysProcIDMap // User ID mappings for user namespaces.
+ GidMappings []SysProcIDMap // Group ID mappings for user namespaces.
+ // GidMappingsEnableSetgroups enabling setgroups syscall.
+ // If false, then setgroups syscall will be disabled for the child process.
+ // This parameter is no-op if GidMappings == nil. Otherwise for unprivileged
+ // users this should be set to false for mappings work.
+ GidMappingsEnableSetgroups bool
+ AmbientCaps []uintptr // Ambient capabilities (Linux only)
+}
+
+var (
+ none = [...]byte{'n', 'o', 'n', 'e', 0}
+ slash = [...]byte{'/', 0}
+)
+
+// Implemented in runtime package.
+func runtime_BeforeFork()
+func runtime_AfterFork()
+func runtime_AfterForkInChild()
+
+// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
+// If a dup or exec fails, write the errno error to pipe.
+// (Pipe is close-on-exec so if exec succeeds, it will be closed.)
+// In the child, this function must not acquire any locks, because
+// they might have been locked at the time of the fork. This means
+// no rescheduling, no malloc calls, and no new stack segments.
+// For the same reason compiler does not race instrument it.
+// The calls to RawSyscall are okay because they are assembly
+// functions that do not grow the stack.
+//
+//go:norace
+func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
+ // Set up and fork. This returns immediately in the parent or
+ // if there's an error.
+ r1, err1, p, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe)
+ if locked {
+ runtime_AfterFork()
+ }
+ if err1 != 0 {
+ return 0, err1
+ }
+
+ // parent; return PID
+ pid = int(r1)
+
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ Close(p[0])
+ var err2 Errno
+ // uid/gid mappings will be written after fork and unshare(2) for user
+ // namespaces.
+ if sys.Unshareflags&CLONE_NEWUSER == 0 {
+ if err := writeUidGidMappings(pid, sys); err != nil {
+ err2 = err.(Errno)
+ }
+ }
+ RawSyscall(SYS_WRITE, uintptr(p[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
+ Close(p[1])
+ }
+
+ return pid, 0
+}
+
+const _LINUX_CAPABILITY_VERSION_3 = 0x20080522
+
+type capHeader struct {
+ version uint32
+ pid int32
+}
+
+type capData struct {
+ effective uint32
+ permitted uint32
+ inheritable uint32
+}
+type caps struct {
+ hdr capHeader
+ data [2]capData
+}
+
+// See CAP_TO_INDEX in linux/capability.h:
+func capToIndex(cap uintptr) uintptr { return cap >> 5 }
+
+// See CAP_TO_MASK in linux/capability.h:
+func capToMask(cap uintptr) uint32 { return 1 << uint(cap&31) }
+
+// forkAndExecInChild1 implements the body of forkAndExecInChild up to
+// the parent's post-fork path. This is a separate function so we can
+// separate the child's and parent's stack frames if we're using
+// vfork.
+//
+// This is go:noinline because the point is to keep the stack frames
+// of this and forkAndExecInChild separate.
+//
+//go:noinline
+//go:norace
+func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (r1 uintptr, err1 Errno, p [2]int, locked bool) {
+ // Defined in linux/prctl.h starting with Linux 4.3.
+ const (
+ PR_CAP_AMBIENT = 0x2f
+ PR_CAP_AMBIENT_RAISE = 0x2
+ )
+
+ // vfork requires that the child not touch any of the parent's
+ // active stack frames. Hence, the child does all post-fork
+ // processing in this stack frame and never returns, while the
+ // parent returns immediately from this frame and does all
+ // post-fork processing in the outer frame.
+ // Declare all variables at top in case any
+ // declarations require heap allocation (e.g., err1).
+ var (
+ err2 Errno
+ nextfd int
+ i int
+ caps caps
+ fd1 uintptr
+ puid, psetgroups, pgid []byte
+ uidmap, setgroups, gidmap []byte
+ )
+
+ if sys.UidMappings != nil {
+ puid = []byte("/proc/self/uid_map\000")
+ uidmap = formatIDMappings(sys.UidMappings)
+ }
+
+ if sys.GidMappings != nil {
+ psetgroups = []byte("/proc/self/setgroups\000")
+ pgid = []byte("/proc/self/gid_map\000")
+
+ if sys.GidMappingsEnableSetgroups {
+ setgroups = []byte("allow\000")
+ } else {
+ setgroups = []byte("deny\000")
+ }
+ gidmap = formatIDMappings(sys.GidMappings)
+ }
+
+ // Record parent PID so child can test if it has died.
+ ppid, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0)
+
+ // Guard against side effects of shuffling fds below.
+ // Make sure that nextfd is beyond any currently open files so
+ // that we can't run the risk of overwriting any of them.
+ fd := make([]int, len(attr.Files))
+ nextfd = len(attr.Files)
+ for i, ufd := range attr.Files {
+ if nextfd < int(ufd) {
+ nextfd = int(ufd)
+ }
+ fd[i] = int(ufd)
+ }
+ nextfd++
+
+ // Allocate another pipe for parent to child communication for
+ // synchronizing writing of User ID/Group ID mappings.
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ if err := forkExecPipe(p[:]); err != nil {
+ err1 = err.(Errno)
+ return
+ }
+ }
+
+ // About to call fork.
+ // No more allocation or calls of non-assembly functions.
+ runtime_BeforeFork()
+ locked = true
+ switch {
+ case sys.Cloneflags&CLONE_NEWUSER == 0 && sys.Unshareflags&CLONE_NEWUSER == 0:
+ r1, err1 = rawVforkSyscall(SYS_CLONE, uintptr(SIGCHLD|CLONE_VFORK|CLONE_VM)|sys.Cloneflags)
+ case runtime.GOARCH == "s390x":
+ r1, _, err1 = RawSyscall6(SYS_CLONE, 0, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0)
+ default:
+ r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
+ }
+ if err1 != 0 || r1 != 0 {
+ // If we're in the parent, we must return immediately
+ // so we're not in the same stack frame as the child.
+ // This can at most use the return PC, which the child
+ // will not modify, and the results of
+ // rawVforkSyscall, which must have been written after
+ // the child was replaced.
+ return
+ }
+
+ // Fork succeeded, now in child.
+
+ // Enable the "keep capabilities" flag to set ambient capabilities later.
+ if len(sys.AmbientCaps) > 0 {
+ _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_KEEPCAPS, 1, 0, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Wait for User ID/Group ID mappings to be written.
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(p[1]), 0, 0); err1 != 0 {
+ goto childerror
+ }
+ r1, _, err1 = RawSyscall(SYS_READ, uintptr(p[0]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
+ if err1 != 0 {
+ goto childerror
+ }
+ if r1 != unsafe.Sizeof(err2) {
+ err1 = EINVAL
+ goto childerror
+ }
+ if err2 != 0 {
+ err1 = err2
+ goto childerror
+ }
+ }
+
+ // Session ID
+ if sys.Setsid {
+ _, _, err1 = RawSyscall(SYS_SETSID, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set process group
+ if sys.Setpgid || sys.Foreground {
+ // Place child in process group.
+ _, _, err1 = RawSyscall(SYS_SETPGID, 0, uintptr(sys.Pgid), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ if sys.Foreground {
+ pgrp := int32(sys.Pgid)
+ if pgrp == 0 {
+ r1, _ = rawSyscallNoError(SYS_GETPID, 0, 0, 0)
+
+ pgrp = int32(r1)
+ }
+
+ // Place process group in foreground.
+ _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp)))
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Restore the signal mask. We do this after TIOCSPGRP to avoid
+ // having the kernel send a SIGTTOU signal to the process group.
+ runtime_AfterForkInChild()
+
+ // Unshare
+ if sys.Unshareflags != 0 {
+ _, _, err1 = RawSyscall(SYS_UNSHARE, sys.Unshareflags, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+
+ if sys.Unshareflags&CLONE_NEWUSER != 0 && sys.GidMappings != nil {
+ dirfd := int(_AT_FDCWD)
+ if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&psetgroups[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
+ goto childerror
+ }
+ r1, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&setgroups[0])), uintptr(len(setgroups)))
+ if err1 != 0 {
+ goto childerror
+ }
+ if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+ goto childerror
+ }
+
+ if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&pgid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
+ goto childerror
+ }
+ r1, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&gidmap[0])), uintptr(len(gidmap)))
+ if err1 != 0 {
+ goto childerror
+ }
+ if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+ goto childerror
+ }
+ }
+
+ if sys.Unshareflags&CLONE_NEWUSER != 0 && sys.UidMappings != nil {
+ dirfd := int(_AT_FDCWD)
+ if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&puid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
+ goto childerror
+ }
+ r1, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&uidmap[0])), uintptr(len(uidmap)))
+ if err1 != 0 {
+ goto childerror
+ }
+ if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // The unshare system call in Linux doesn't unshare mount points
+ // mounted with --shared. Systemd mounts / with --shared. For a
+ // long discussion of the pros and cons of this see debian bug 739593.
+ // The Go model of unsharing is more like Plan 9, where you ask
+ // to unshare and the namespaces are unconditionally unshared.
+ // To make this model work we must further mark / as MS_PRIVATE.
+ // This is what the standard unshare command does.
+ if sys.Unshareflags&CLONE_NEWNS == CLONE_NEWNS {
+ _, _, err1 = RawSyscall6(SYS_MOUNT, uintptr(unsafe.Pointer(&none[0])), uintptr(unsafe.Pointer(&slash[0])), 0, MS_REC|MS_PRIVATE, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ }
+
+ // Chroot
+ if chroot != nil {
+ _, _, err1 = RawSyscall(SYS_CHROOT, uintptr(unsafe.Pointer(chroot)), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // User and groups
+ if cred := sys.Credential; cred != nil {
+ ngroups := uintptr(len(cred.Groups))
+ groups := uintptr(0)
+ if ngroups > 0 {
+ groups = uintptr(unsafe.Pointer(&cred.Groups[0]))
+ }
+ if !(sys.GidMappings != nil && !sys.GidMappingsEnableSetgroups && ngroups == 0) && !cred.NoSetGroups {
+ _, _, err1 = RawSyscall(_SYS_setgroups, ngroups, groups, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ _, _, err1 = RawSyscall(sys_SETGID, uintptr(cred.Gid), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ _, _, err1 = RawSyscall(sys_SETUID, uintptr(cred.Uid), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ if len(sys.AmbientCaps) != 0 {
+ // Ambient capabilities were added in the 4.3 kernel,
+ // so it is safe to always use _LINUX_CAPABILITY_VERSION_3.
+ caps.hdr.version = _LINUX_CAPABILITY_VERSION_3
+
+ if _, _, err1 := RawSyscall(SYS_CAPGET, uintptr(unsafe.Pointer(&caps.hdr)), uintptr(unsafe.Pointer(&caps.data[0])), 0); err1 != 0 {
+ goto childerror
+ }
+
+ for _, c := range sys.AmbientCaps {
+ // Add the c capability to the permitted and inheritable capability mask,
+ // otherwise we will not be able to add it to the ambient capability mask.
+ caps.data[capToIndex(c)].permitted |= capToMask(c)
+ caps.data[capToIndex(c)].inheritable |= capToMask(c)
+ }
+
+ if _, _, err1 := RawSyscall(SYS_CAPSET, uintptr(unsafe.Pointer(&caps.hdr)), uintptr(unsafe.Pointer(&caps.data[0])), 0); err1 != 0 {
+ goto childerror
+ }
+
+ for _, c := range sys.AmbientCaps {
+ _, _, err1 = RawSyscall6(SYS_PRCTL, PR_CAP_AMBIENT, uintptr(PR_CAP_AMBIENT_RAISE), c, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ }
+
+ // Chdir
+ if dir != nil {
+ _, _, err1 = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Parent death signal
+ if sys.Pdeathsig != 0 {
+ _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_PDEATHSIG, uintptr(sys.Pdeathsig), 0, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+
+ // Signal self if parent is already dead. This might cause a
+ // duplicate signal in rare cases, but it won't matter when
+ // using SIGKILL.
+ r1, _ = rawSyscallNoError(SYS_GETPPID, 0, 0, 0)
+ if r1 != ppid {
+ pid, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0)
+ _, _, err1 := RawSyscall(SYS_KILL, pid, uintptr(sys.Pdeathsig), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ }
+
+ // Pass 1: look for fd[i] < i and move those up above len(fd)
+ // so that pass 2 won't stomp on an fd it needs later.
+ if pipe < nextfd {
+ _, _, err1 = RawSyscall(SYS_DUP3, uintptr(pipe), uintptr(nextfd), O_CLOEXEC)
+ if err1 != 0 {
+ goto childerror
+ }
+ pipe = nextfd
+ nextfd++
+ }
+ for i = 0; i < len(fd); i++ {
+ if fd[i] >= 0 && fd[i] < int(i) {
+ if nextfd == pipe { // don't stomp on pipe
+ nextfd++
+ }
+ _, _, err1 = RawSyscall(SYS_DUP3, uintptr(fd[i]), uintptr(nextfd), O_CLOEXEC)
+ if err1 != 0 {
+ goto childerror
+ }
+ fd[i] = nextfd
+ nextfd++
+ }
+ }
+
+ // Pass 2: dup fd[i] down onto i.
+ for i = 0; i < len(fd); i++ {
+ if fd[i] == -1 {
+ RawSyscall(SYS_CLOSE, uintptr(i), 0, 0)
+ continue
+ }
+ if fd[i] == int(i) {
+ // dup2(i, i) won't clear close-on-exec flag on Linux,
+ // probably not elsewhere either.
+ _, _, err1 = RawSyscall(fcntl64Syscall, uintptr(fd[i]), F_SETFD, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ continue
+ }
+ // The new fd is created NOT close-on-exec,
+ // which is exactly what we want.
+ _, _, err1 = RawSyscall(SYS_DUP3, uintptr(fd[i]), uintptr(i), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // By convention, we don't close-on-exec the fds we are
+ // started with, so if len(fd) < 3, close 0, 1, 2 as needed.
+ // Programs that know they inherit fds >= 3 will need
+ // to set them close-on-exec.
+ for i = len(fd); i < 3; i++ {
+ RawSyscall(SYS_CLOSE, uintptr(i), 0, 0)
+ }
+
+ // Detach fd 0 from tty
+ if sys.Noctty {
+ _, _, err1 = RawSyscall(SYS_IOCTL, 0, uintptr(TIOCNOTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set the controlling TTY to Ctty
+ if sys.Setctty {
+ _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSCTTY), 1)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Enable tracing if requested.
+ // Do this right before exec so that we don't unnecessarily trace the runtime
+ // setting up after the fork. See issue #21428.
+ if sys.Ptrace {
+ _, _, err1 = RawSyscall(SYS_PTRACE, uintptr(PTRACE_TRACEME), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Time to exec.
+ _, _, err1 = RawSyscall(SYS_EXECVE,
+ uintptr(unsafe.Pointer(argv0)),
+ uintptr(unsafe.Pointer(&argv[0])),
+ uintptr(unsafe.Pointer(&envv[0])))
+
+childerror:
+ // send error code on pipe
+ RawSyscall(SYS_WRITE, uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
+ for {
+ RawSyscall(SYS_EXIT, 253, 0, 0)
+ }
+}
+
+// Try to open a pipe with O_CLOEXEC set on both file descriptors.
+func forkExecPipe(p []int) (err error) {
+ return Pipe2(p, O_CLOEXEC)
+}
+
+func formatIDMappings(idMap []SysProcIDMap) []byte {
+ var data []byte
+ for _, im := range idMap {
+ data = append(data, []byte(itoa.Itoa(im.ContainerID)+" "+itoa.Itoa(im.HostID)+" "+itoa.Itoa(im.Size)+"\n")...)
+ }
+ return data
+}
+
+// writeIDMappings writes the user namespace User ID or Group ID mappings to the specified path.
+func writeIDMappings(path string, idMap []SysProcIDMap) error {
+ fd, err := Open(path, O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ if _, err := Write(fd, formatIDMappings(idMap)); err != nil {
+ Close(fd)
+ return err
+ }
+
+ if err := Close(fd); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// writeSetgroups writes to /proc/PID/setgroups "deny" if enable is false
+// and "allow" if enable is true.
+// This is needed since kernel 3.19, because you can't write gid_map without
+// disabling setgroups() system call.
+func writeSetgroups(pid int, enable bool) error {
+ sgf := "/proc/" + itoa.Itoa(pid) + "/setgroups"
+ fd, err := Open(sgf, O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ var data []byte
+ if enable {
+ data = []byte("allow")
+ } else {
+ data = []byte("deny")
+ }
+
+ if _, err := Write(fd, data); err != nil {
+ Close(fd)
+ return err
+ }
+
+ return Close(fd)
+}
+
+// writeUidGidMappings writes User ID and Group ID mappings for user namespaces
+// for a process and it is called from the parent process.
+func writeUidGidMappings(pid int, sys *SysProcAttr) error {
+ if sys.UidMappings != nil {
+ uidf := "/proc/" + itoa.Itoa(pid) + "/uid_map"
+ if err := writeIDMappings(uidf, sys.UidMappings); err != nil {
+ return err
+ }
+ }
+
+ if sys.GidMappings != nil {
+ // If the kernel is too old to support /proc/PID/setgroups, writeSetGroups will return ENOENT; this is OK.
+ if err := writeSetgroups(pid, sys.GidMappingsEnableSetgroups); err != nil && err != ENOENT {
+ return err
+ }
+ gidf := "/proc/" + itoa.Itoa(pid) + "/gid_map"
+ if err := writeIDMappings(gidf, sys.GidMappings); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/contrib/go/_std_1.19/src/syscall/exec_unix.go b/contrib/go/_std_1.19/src/syscall/exec_unix.go
new file mode 100644
index 0000000000..286be454d8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/exec_unix.go
@@ -0,0 +1,306 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+// Fork, exec, wait, etc.
+
+package syscall
+
+import (
+ errorspkg "errors"
+ "internal/bytealg"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+// Lock synchronizing creation of new file descriptors with fork.
+//
+// We want the child in a fork/exec sequence to inherit only the
+// file descriptors we intend. To do that, we mark all file
+// descriptors close-on-exec and then, in the child, explicitly
+// unmark the ones we want the exec'ed program to keep.
+// Unix doesn't make this easy: there is, in general, no way to
+// allocate a new file descriptor close-on-exec. Instead you
+// have to allocate the descriptor and then mark it close-on-exec.
+// If a fork happens between those two events, the child's exec
+// will inherit an unwanted file descriptor.
+//
+// This lock solves that race: the create new fd/mark close-on-exec
+// operation is done holding ForkLock for reading, and the fork itself
+// is done holding ForkLock for writing. At least, that's the idea.
+// There are some complications.
+//
+// Some system calls that create new file descriptors can block
+// for arbitrarily long times: open on a hung NFS server or named
+// pipe, accept on a socket, and so on. We can't reasonably grab
+// the lock across those operations.
+//
+// It is worse to inherit some file descriptors than others.
+// If a non-malicious child accidentally inherits an open ordinary file,
+// that's not a big deal. On the other hand, if a long-lived child
+// accidentally inherits the write end of a pipe, then the reader
+// of that pipe will not see EOF until that child exits, potentially
+// causing the parent program to hang. This is a common problem
+// in threaded C programs that use popen.
+//
+// Luckily, the file descriptors that are most important not to
+// inherit are not the ones that can take an arbitrarily long time
+// to create: pipe returns instantly, and the net package uses
+// non-blocking I/O to accept on a listening socket.
+// The rules for which file descriptor-creating operations use the
+// ForkLock are as follows:
+//
+// 1) Pipe. Does not block. Use the ForkLock.
+// 2) Socket. Does not block. Use the ForkLock.
+// 3) Accept. If using non-blocking mode, use the ForkLock.
+// Otherwise, live with the race.
+// 4) Open. Can block. Use O_CLOEXEC if available (Linux).
+// Otherwise, live with the race.
+// 5) Dup. Does not block. Use the ForkLock.
+// On Linux, could use fcntl F_DUPFD_CLOEXEC
+// instead of the ForkLock, but only for dup(fd, -1).
+
+var ForkLock sync.RWMutex
+
+// StringSlicePtr converts a slice of strings to a slice of pointers
+// to NUL-terminated byte arrays. If any string contains a NUL byte
+// this function panics instead of returning an error.
+//
+// Deprecated: Use SlicePtrFromStrings instead.
+func StringSlicePtr(ss []string) []*byte {
+ bb := make([]*byte, len(ss)+1)
+ for i := 0; i < len(ss); i++ {
+ bb[i] = StringBytePtr(ss[i])
+ }
+ bb[len(ss)] = nil
+ return bb
+}
+
+// SlicePtrFromStrings converts a slice of strings to a slice of
+// pointers to NUL-terminated byte arrays. If any string contains
+// a NUL byte, it returns (nil, EINVAL).
+func SlicePtrFromStrings(ss []string) ([]*byte, error) {
+ n := 0
+ for _, s := range ss {
+ if bytealg.IndexByteString(s, 0) != -1 {
+ return nil, EINVAL
+ }
+ n += len(s) + 1 // +1 for NUL
+ }
+ bb := make([]*byte, len(ss)+1)
+ b := make([]byte, n)
+ n = 0
+ for i, s := range ss {
+ bb[i] = &b[n]
+ copy(b[n:], s)
+ n += len(s) + 1
+ }
+ return bb, nil
+}
+
+func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) }
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ flag, err := fcntl(fd, F_GETFL, 0)
+ if err != nil {
+ return err
+ }
+ if nonblocking {
+ flag |= O_NONBLOCK
+ } else {
+ flag &^= O_NONBLOCK
+ }
+ _, err = fcntl(fd, F_SETFL, flag)
+ return err
+}
+
+// Credential holds user and group identities to be assumed
+// by a child process started by StartProcess.
+type Credential struct {
+ Uid uint32 // User ID.
+ Gid uint32 // Group ID.
+ Groups []uint32 // Supplementary group IDs.
+ NoSetGroups bool // If true, don't set supplementary groups
+}
+
+// ProcAttr holds attributes that will be applied to a new process started
+// by StartProcess.
+type ProcAttr struct {
+ Dir string // Current working directory.
+ Env []string // Environment.
+ Files []uintptr // File descriptors.
+ Sys *SysProcAttr
+}
+
+var zeroProcAttr ProcAttr
+var zeroSysProcAttr SysProcAttr
+
+func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) {
+ var p [2]int
+ var n int
+ var err1 Errno
+ var wstatus WaitStatus
+
+ if attr == nil {
+ attr = &zeroProcAttr
+ }
+ sys := attr.Sys
+ if sys == nil {
+ sys = &zeroSysProcAttr
+ }
+
+ // Convert args to C form.
+ argv0p, err := BytePtrFromString(argv0)
+ if err != nil {
+ return 0, err
+ }
+ argvp, err := SlicePtrFromStrings(argv)
+ if err != nil {
+ return 0, err
+ }
+ envvp, err := SlicePtrFromStrings(attr.Env)
+ if err != nil {
+ return 0, err
+ }
+
+ if (runtime.GOOS == "freebsd" || runtime.GOOS == "dragonfly") && len(argv[0]) > len(argv0) {
+ argvp[0] = argv0p
+ }
+
+ var chroot *byte
+ if sys.Chroot != "" {
+ chroot, err = BytePtrFromString(sys.Chroot)
+ if err != nil {
+ return 0, err
+ }
+ }
+ var dir *byte
+ if attr.Dir != "" {
+ dir, err = BytePtrFromString(attr.Dir)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // Both Setctty and Foreground use the Ctty field,
+ // but they give it slightly different meanings.
+ if sys.Setctty && sys.Foreground {
+ return 0, errorspkg.New("both Setctty and Foreground set in SysProcAttr")
+ }
+ if sys.Setctty && sys.Ctty >= len(attr.Files) {
+ return 0, errorspkg.New("Setctty set but Ctty not valid in child")
+ }
+
+ // Acquire the fork lock so that no other threads
+ // create new fds that are not yet close-on-exec
+ // before we fork.
+ ForkLock.Lock()
+
+ // Allocate child status pipe close on exec.
+ if err = forkExecPipe(p[:]); err != nil {
+ ForkLock.Unlock()
+ return 0, err
+ }
+
+ // Kick off child.
+ pid, err1 = forkAndExecInChild(argv0p, argvp, envvp, chroot, dir, attr, sys, p[1])
+ if err1 != 0 {
+ Close(p[0])
+ Close(p[1])
+ ForkLock.Unlock()
+ return 0, Errno(err1)
+ }
+ ForkLock.Unlock()
+
+ // Read child error status from pipe.
+ Close(p[1])
+ for {
+ n, err = readlen(p[0], (*byte)(unsafe.Pointer(&err1)), int(unsafe.Sizeof(err1)))
+ if err != EINTR {
+ break
+ }
+ }
+ Close(p[0])
+ if err != nil || n != 0 {
+ if n == int(unsafe.Sizeof(err1)) {
+ err = Errno(err1)
+ }
+ if err == nil {
+ err = EPIPE
+ }
+
+ // Child failed; wait for it to exit, to make sure
+ // the zombies don't accumulate.
+ _, err1 := Wait4(pid, &wstatus, 0, nil)
+ for err1 == EINTR {
+ _, err1 = Wait4(pid, &wstatus, 0, nil)
+ }
+ return 0, err
+ }
+
+ // Read got EOF, so pipe closed on exec, so exec succeeded.
+ return pid, nil
+}
+
+// Combination of fork and exec, careful to be thread safe.
+func ForkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) {
+ return forkExec(argv0, argv, attr)
+}
+
+// StartProcess wraps ForkExec for package os.
+func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) {
+ pid, err = forkExec(argv0, argv, attr)
+ return pid, 0, err
+}
+
+// Implemented in runtime package.
+func runtime_BeforeExec()
+func runtime_AfterExec()
+
+// execveLibc is non-nil on OS using libc syscall, set to execve in exec_libc.go; this
+// avoids a build dependency for other platforms.
+var execveLibc func(path uintptr, argv uintptr, envp uintptr) Errno
+var execveDarwin func(path *byte, argv **byte, envp **byte) error
+var execveOpenBSD func(path *byte, argv **byte, envp **byte) error
+
+// Exec invokes the execve(2) system call.
+func Exec(argv0 string, argv []string, envv []string) (err error) {
+ argv0p, err := BytePtrFromString(argv0)
+ if err != nil {
+ return err
+ }
+ argvp, err := SlicePtrFromStrings(argv)
+ if err != nil {
+ return err
+ }
+ envvp, err := SlicePtrFromStrings(envv)
+ if err != nil {
+ return err
+ }
+ runtime_BeforeExec()
+
+ var err1 error
+ if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" || runtime.GOOS == "aix" {
+ // RawSyscall should never be used on Solaris, illumos, or AIX.
+ err1 = execveLibc(
+ uintptr(unsafe.Pointer(argv0p)),
+ uintptr(unsafe.Pointer(&argvp[0])),
+ uintptr(unsafe.Pointer(&envvp[0])))
+ } else if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
+ // Similarly on Darwin.
+ err1 = execveDarwin(argv0p, &argvp[0], &envvp[0])
+ } else if runtime.GOOS == "openbsd" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
+ // Similarly on OpenBSD.
+ err1 = execveOpenBSD(argv0p, &argvp[0], &envvp[0])
+ } else {
+ _, _, err1 = RawSyscall(SYS_EXECVE,
+ uintptr(unsafe.Pointer(argv0p)),
+ uintptr(unsafe.Pointer(&argvp[0])),
+ uintptr(unsafe.Pointer(&envvp[0])))
+ }
+ runtime_AfterExec()
+ return err1
+}
diff --git a/contrib/go/_std_1.18/src/syscall/flock.go b/contrib/go/_std_1.19/src/syscall/flock.go
index 8cb8f16153..8cb8f16153 100644
--- a/contrib/go/_std_1.18/src/syscall/flock.go
+++ b/contrib/go/_std_1.19/src/syscall/flock.go
diff --git a/contrib/go/_std_1.18/src/syscall/flock_darwin.go b/contrib/go/_std_1.19/src/syscall/flock_darwin.go
index d2bd84130c..d2bd84130c 100644
--- a/contrib/go/_std_1.18/src/syscall/flock_darwin.go
+++ b/contrib/go/_std_1.19/src/syscall/flock_darwin.go
diff --git a/contrib/go/_std_1.19/src/syscall/forkpipe.go b/contrib/go/_std_1.19/src/syscall/forkpipe.go
new file mode 100644
index 0000000000..5082abc41c
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/forkpipe.go
@@ -0,0 +1,21 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin
+
+package syscall
+
+// Try to open a pipe with O_CLOEXEC set on both file descriptors.
+func forkExecPipe(p []int) error {
+ err := Pipe(p)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
+ return err
+}
diff --git a/contrib/go/_std_1.18/src/syscall/lsf_linux.go b/contrib/go/_std_1.19/src/syscall/lsf_linux.go
index 28e96d54e6..28e96d54e6 100644
--- a/contrib/go/_std_1.18/src/syscall/lsf_linux.go
+++ b/contrib/go/_std_1.19/src/syscall/lsf_linux.go
diff --git a/contrib/go/_std_1.18/src/syscall/msan0.go b/contrib/go/_std_1.19/src/syscall/msan0.go
index fba8a5f716..fba8a5f716 100644
--- a/contrib/go/_std_1.18/src/syscall/msan0.go
+++ b/contrib/go/_std_1.19/src/syscall/msan0.go
diff --git a/contrib/go/_std_1.18/src/syscall/net.go b/contrib/go/_std_1.19/src/syscall/net.go
index 531fa80d8f..531fa80d8f 100644
--- a/contrib/go/_std_1.18/src/syscall/net.go
+++ b/contrib/go/_std_1.19/src/syscall/net.go
diff --git a/contrib/go/_std_1.18/src/syscall/netlink_linux.go b/contrib/go/_std_1.19/src/syscall/netlink_linux.go
index 2d810705bf..2d810705bf 100644
--- a/contrib/go/_std_1.18/src/syscall/netlink_linux.go
+++ b/contrib/go/_std_1.19/src/syscall/netlink_linux.go
diff --git a/contrib/go/_std_1.18/src/syscall/ptrace_darwin.go b/contrib/go/_std_1.19/src/syscall/ptrace_darwin.go
index 519e451c73..519e451c73 100644
--- a/contrib/go/_std_1.18/src/syscall/ptrace_darwin.go
+++ b/contrib/go/_std_1.19/src/syscall/ptrace_darwin.go
diff --git a/contrib/go/_std_1.18/src/syscall/route_bsd.go b/contrib/go/_std_1.19/src/syscall/route_bsd.go
index 8e47ff888e..8e47ff888e 100644
--- a/contrib/go/_std_1.18/src/syscall/route_bsd.go
+++ b/contrib/go/_std_1.19/src/syscall/route_bsd.go
diff --git a/contrib/go/_std_1.18/src/syscall/route_darwin.go b/contrib/go/_std_1.19/src/syscall/route_darwin.go
index b0636ed07c..b0636ed07c 100644
--- a/contrib/go/_std_1.18/src/syscall/route_darwin.go
+++ b/contrib/go/_std_1.19/src/syscall/route_darwin.go
diff --git a/contrib/go/_std_1.18/src/syscall/setuidgid_linux.go b/contrib/go/_std_1.19/src/syscall/setuidgid_linux.go
index c995d258eb..c995d258eb 100644
--- a/contrib/go/_std_1.18/src/syscall/setuidgid_linux.go
+++ b/contrib/go/_std_1.19/src/syscall/setuidgid_linux.go
diff --git a/contrib/go/_std_1.18/src/syscall/sock_cloexec_linux.go b/contrib/go/_std_1.19/src/syscall/sock_cloexec_linux.go
index 600cf25c15..600cf25c15 100644
--- a/contrib/go/_std_1.18/src/syscall/sock_cloexec_linux.go
+++ b/contrib/go/_std_1.19/src/syscall/sock_cloexec_linux.go
diff --git a/contrib/go/_std_1.18/src/syscall/sockcmsg_linux.go b/contrib/go/_std_1.19/src/syscall/sockcmsg_linux.go
index d97667cf7e..d97667cf7e 100644
--- a/contrib/go/_std_1.18/src/syscall/sockcmsg_linux.go
+++ b/contrib/go/_std_1.19/src/syscall/sockcmsg_linux.go
diff --git a/contrib/go/_std_1.19/src/syscall/sockcmsg_unix.go b/contrib/go/_std_1.19/src/syscall/sockcmsg_unix.go
new file mode 100644
index 0000000000..6ade73e87e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/sockcmsg_unix.go
@@ -0,0 +1,92 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+// Socket control messages
+
+package syscall
+
+import (
+ "unsafe"
+)
+
+// CmsgLen returns the value to store in the Len field of the Cmsghdr
+// structure, taking into account any necessary alignment.
+func CmsgLen(datalen int) int {
+ return cmsgAlignOf(SizeofCmsghdr) + datalen
+}
+
+// CmsgSpace returns the number of bytes an ancillary element with
+// payload of the passed data length occupies.
+func CmsgSpace(datalen int) int {
+ return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen)
+}
+
+func (h *Cmsghdr) data(offset uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr)) + offset)
+}
+
+// SocketControlMessage represents a socket control message.
+type SocketControlMessage struct {
+ Header Cmsghdr
+ Data []byte
+}
+
+// ParseSocketControlMessage parses b as an array of socket control
+// messages.
+func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
+ var msgs []SocketControlMessage
+ i := 0
+ for i+CmsgLen(0) <= len(b) {
+ h, dbuf, err := socketControlMessageHeaderAndData(b[i:])
+ if err != nil {
+ return nil, err
+ }
+ m := SocketControlMessage{Header: *h, Data: dbuf}
+ msgs = append(msgs, m)
+ i += cmsgAlignOf(int(h.Len))
+ }
+ return msgs, nil
+}
+
+func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) {
+ return nil, nil, EINVAL
+ }
+ return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil
+}
+
+// UnixRights encodes a set of open file descriptors into a socket
+// control message for sending to another process.
+func UnixRights(fds ...int) []byte {
+ datalen := len(fds) * 4
+ b := make([]byte, CmsgSpace(datalen))
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = SOL_SOCKET
+ h.Type = SCM_RIGHTS
+ h.SetLen(CmsgLen(datalen))
+ for i, fd := range fds {
+ *(*int32)(h.data(4 * uintptr(i))) = int32(fd)
+ }
+ return b
+}
+
+// ParseUnixRights decodes a socket control message that contains an
+// integer array of open file descriptors from another process.
+func ParseUnixRights(m *SocketControlMessage) ([]int, error) {
+ if m.Header.Level != SOL_SOCKET {
+ return nil, EINVAL
+ }
+ if m.Header.Type != SCM_RIGHTS {
+ return nil, EINVAL
+ }
+ fds := make([]int, len(m.Data)>>2)
+ for i, j := 0, 0; i < len(m.Data); i += 4 {
+ fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i])))
+ j++
+ }
+ return fds, nil
+}
diff --git a/contrib/go/_std_1.18/src/syscall/sockcmsg_unix_other.go b/contrib/go/_std_1.19/src/syscall/sockcmsg_unix_other.go
index 845bd9df99..845bd9df99 100644
--- a/contrib/go/_std_1.18/src/syscall/sockcmsg_unix_other.go
+++ b/contrib/go/_std_1.19/src/syscall/sockcmsg_unix_other.go
diff --git a/contrib/go/_std_1.19/src/syscall/syscall.go b/contrib/go/_std_1.19/src/syscall/syscall.go
new file mode 100644
index 0000000000..62bfa449cf
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/syscall.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syscall contains an interface to the low-level operating system
+// primitives. The details vary depending on the underlying system, and
+// by default, godoc will display the syscall documentation for the current
+// system. If you want godoc to display syscall documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+// The primary use of syscall is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net". Use
+// those packages rather than this one if you can.
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+// These calls return err == nil to indicate success; otherwise
+// err is an operating system error describing the failure.
+// On most systems, that error has type syscall.Errno.
+//
+// Deprecated: this package is locked down. Callers should use the
+// corresponding package in the golang.org/x/sys repository instead.
+// That is also where updates required by new systems or versions
+// should be applied. See https://golang.org/s/go1.4-syscall for more
+// information.
+package syscall
+
+import "internal/bytealg"
+
+//go:generate go run ./mksyscall_windows.go -systemdll -output zsyscall_windows.go syscall_windows.go security_windows.go
+
+// StringByteSlice converts a string to a NUL-terminated []byte,
+// If s contains a NUL byte this function panics instead of
+// returning an error.
+//
+// Deprecated: Use ByteSliceFromString instead.
+func StringByteSlice(s string) []byte {
+ a, err := ByteSliceFromString(s)
+ if err != nil {
+ panic("syscall: string with NUL passed to StringByteSlice")
+ }
+ return a
+}
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+ if bytealg.IndexByteString(s, 0) != -1 {
+ return nil, EINVAL
+ }
+ a := make([]byte, len(s)+1)
+ copy(a, s)
+ return a, nil
+}
+
+// StringBytePtr returns a pointer to a NUL-terminated array of bytes.
+// If s contains a NUL byte this function panics instead of returning
+// an error.
+//
+// Deprecated: Use BytePtrFromString instead.
+func StringBytePtr(s string) *byte { return &StringByteSlice(s)[0] }
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+ a, err := ByteSliceFromString(s)
+ if err != nil {
+ return nil, err
+ }
+ return &a[0], nil
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mksyscall.pl.
+var _zero uintptr
+
+// Unix returns the time stored in ts as seconds plus nanoseconds.
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+ return int64(ts.Sec), int64(ts.Nsec)
+}
+
+// Unix returns the time stored in tv as seconds plus nanoseconds.
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+ return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+// Nano returns the time stored in ts as nanoseconds.
+func (ts *Timespec) Nano() int64 {
+ return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+// Nano returns the time stored in tv as nanoseconds.
+func (tv *Timeval) Nano() int64 {
+ return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
+
+// Getpagesize and Exit are provided by the runtime.
+
+func Getpagesize() int
+func Exit(code int)
diff --git a/contrib/go/_std_1.18/src/syscall/syscall_bsd.go b/contrib/go/_std_1.19/src/syscall/syscall_bsd.go
index 5e636d5258..5e636d5258 100644
--- a/contrib/go/_std_1.18/src/syscall/syscall_bsd.go
+++ b/contrib/go/_std_1.19/src/syscall/syscall_bsd.go
diff --git a/contrib/go/_std_1.19/src/syscall/syscall_darwin.go b/contrib/go/_std_1.19/src/syscall/syscall_darwin.go
new file mode 100644
index 0000000000..663bd98c10
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/syscall_darwin.go
@@ -0,0 +1,339 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package syscall
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+
+var dupTrampoline = abi.FuncPCABI0(libc_dup2_trampoline)
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+func direntIno(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
+}
+
+func direntReclen(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
+}
+
+func direntNamlen(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
+}
+
+func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
+func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
+
+//sysnb pipe(p *[2]int32) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var q [2]int32
+ err = pipe(&q)
+ if err == nil {
+ p[0] = int(q[0])
+ p[1] = int(q[1])
+ }
+ return
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_getfsstat_trampoline), uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func libc_getfsstat_trampoline()
+
+//go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib"
+
+//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
+
+/*
+ * Wrapped
+ */
+
+//sys kill(pid int, signum int, posix int) (err error)
+
+func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) }
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys closedir(dir uintptr) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exchangedata(path1 string, path2 string, options int) (err error)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fsync(fd int) (err error)
+// Fsync is not called for os.File.Sync(). Please see internal/poll/fd_fsync_darwin.go
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Getuid() (uid int)
+//sysnb Issetugid() (tainted bool)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys pread(fd int, p []byte, offset int64) (n int, err error)
+//sys pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_lseek
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sys Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setprivexec(flag int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys writev(fd int, iovecs []Iovec) (cnt uintptr, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sysnb fork() (pid int, err error)
+//sysnb ioctl(fd int, req int, arg int) (err error)
+//sysnb ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_ioctl
+//sysnb execve(path *byte, argv **byte, envp **byte) (err error)
+//sysnb exit(res int) (err error)
+//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error)
+//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_fcntl
+//sys unlinkat(fd int, path string, flags int) (err error)
+//sys openat(fd int, path string, flags int, perm uint32) (fdret int, err error)
+//sys getcwd(buf []byte) (n int, err error)
+
+func init() {
+ execveDarwin = execve
+}
+
+func fdopendir(fd int) (dir uintptr, err error) {
+ r0, _, e1 := syscallPtr(abi.FuncPCABI0(libc_fdopendir_trampoline), uintptr(fd), 0, 0)
+ dir = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fdopendir_trampoline()
+
+//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ // Simulate Getdirentries using fdopendir/readdir_r/closedir.
+ // We store the number of entries to skip in the seek
+ // offset of fd. See issue #31368.
+ // It's not the full required semantics, but should handle the case
+ // of calling Getdirentries or ReadDirent repeatedly.
+ // It won't handle assigning the results of lseek to *basep, or handle
+ // the directory being edited underfoot.
+ skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
+ if err != nil {
+ return 0, err
+ }
+
+ // We need to duplicate the incoming file descriptor
+ // because the caller expects to retain control of it, but
+ // fdopendir expects to take control of its argument.
+ // Just Dup'ing the file descriptor is not enough, as the
+ // result shares underlying state. Use openat to make a really
+ // new file descriptor referring to the same directory.
+ fd2, err := openat(fd, ".", O_RDONLY, 0)
+ if err != nil {
+ return 0, err
+ }
+ d, err := fdopendir(fd2)
+ if err != nil {
+ Close(fd2)
+ return 0, err
+ }
+ defer closedir(d)
+
+ var cnt int64
+ for {
+ var entry Dirent
+ var entryp *Dirent
+ e := readdir_r(d, &entry, &entryp)
+ if e != 0 {
+ return n, errnoErr(e)
+ }
+ if entryp == nil {
+ break
+ }
+ if skip > 0 {
+ skip--
+ cnt++
+ continue
+ }
+ reclen := int(entry.Reclen)
+ if reclen > len(buf) {
+ // Not enough room. Return for now.
+ // The counter will let us know where we should start up again.
+ // Note: this strategy for suspending in the middle and
+ // restarting is O(n^2) in the length of the directory. Oh well.
+ break
+ }
+ // Copy entry into return buffer.
+ s := struct {
+ ptr unsafe.Pointer
+ siz int
+ cap int
+ }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen}
+ copy(buf, *(*[]byte)(unsafe.Pointer(&s)))
+ buf = buf[reclen:]
+ n += reclen
+ cnt++
+ }
+ // Set the seek offset of the input fd to record
+ // how many files we've already returned.
+ _, err = Seek(fd, cnt, 0 /* SEEK_SET */)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
+// Implemented in the runtime package (runtime/sys_darwin.go)
+func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+func syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+func rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+func rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+func syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
diff --git a/contrib/go/_std_1.18/src/syscall/syscall_darwin_amd64.go b/contrib/go/_std_1.19/src/syscall/syscall_darwin_amd64.go
index ef3c1998aa..ef3c1998aa 100644
--- a/contrib/go/_std_1.18/src/syscall/syscall_darwin_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/syscall_darwin_amd64.go
diff --git a/contrib/go/_std_1.19/src/syscall/syscall_linux.go b/contrib/go/_std_1.19/src/syscall/syscall_linux.go
new file mode 100644
index 0000000000..e1837b91a7
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/syscall_linux.go
@@ -0,0 +1,1238 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Linux system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and
+// wrap it in our own nicer implementation.
+
+package syscall
+
+import (
+ "internal/itoa"
+ "unsafe"
+)
+
+// N.B. RawSyscall6 is provided via linkname by runtime/internal/syscall.
+//
+// Errno is uintptr and thus compatible with the runtime/internal/syscall
+// definition.
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+
+// Pull in entersyscall/exitsyscall for Syscall/Syscall6.
+//
+// Note that this can't be a push linkname because the runtime already has a
+// nameless linkname to export to assembly here and in x/sys. Additionally,
+// entersyscall fetches the caller PC and SP and thus can't have a wrapper
+// inbetween.
+
+//go:linkname runtime_entersyscall runtime.entersyscall
+func runtime_entersyscall()
+
+//go:linkname runtime_exitsyscall runtime.exitsyscall
+func runtime_exitsyscall()
+
+// N.B. For the Syscall functions below:
+//
+// //go:uintptrkeepalive because the uintptr argument may be converted pointers
+// that need to be kept alive in the caller (this is implied for RawSyscall6
+// since it has no body).
+//
+// //go:nosplit because stack copying does not account for uintptrkeepalive, so
+// the stack must not grow. Stack copying cannot blindly assume that all
+// uintptr arguments are pointers, because some values may look like pointers,
+// but not really be pointers, and adjusting their value would break the call.
+//
+// //go:norace, on RawSyscall, to avoid race instrumentation if RawSyscall is
+// called after fork, or from a signal handler.
+//
+// //go:linkname to ensure ABI wrappers are generated for external callers
+// (notably x/sys/unix assembly).
+
+//go:uintptrkeepalive
+//go:nosplit
+//go:norace
+//go:linkname RawSyscall
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
+ return RawSyscall6(trap, a1, a2, a3, 0, 0, 0)
+}
+
+//go:uintptrkeepalive
+//go:nosplit
+//go:linkname Syscall
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
+ runtime_entersyscall()
+ // N.B. Calling RawSyscall here is unsafe with atomic coverage
+ // instrumentation and race mode.
+ //
+ // Coverage instrumentation will add a sync/atomic call to RawSyscall.
+ // Race mode will add race instrumentation to sync/atomic. Race
+ // instrumentation requires a P, which we no longer have.
+ //
+ // RawSyscall6 is fine because it is implemented in assembly and thus
+ // has no coverage instrumentation.
+ //
+ // This is typically not a problem in the runtime because cmd/go avoids
+ // adding coverage instrumentation to the runtime in race mode.
+ r1, r2, err = RawSyscall6(trap, a1, a2, a3, 0, 0, 0)
+ runtime_exitsyscall()
+ return
+}
+
+//go:uintptrkeepalive
+//go:nosplit
+//go:linkname Syscall6
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) {
+ runtime_entersyscall()
+ r1, r2, err = RawSyscall6(trap, a1, a2, a3, a4, a5, a6)
+ runtime_exitsyscall()
+ return
+}
+
+func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
+
+/*
+ * Wrapped
+ */
+
+func Access(path string, mode uint32) (err error) {
+ return Faccessat(_AT_FDCWD, path, mode, 0)
+}
+
+func Chmod(path string, mode uint32) (err error) {
+ return Fchmodat(_AT_FDCWD, path, mode, 0)
+}
+
+func Chown(path string, uid int, gid int) (err error) {
+ return Fchownat(_AT_FDCWD, path, uid, gid, 0)
+}
+
+func Creat(path string, mode uint32) (fd int, err error) {
+ return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
+}
+
+func isGroupMember(gid int) bool {
+ groups, err := Getgroups()
+ if err != nil {
+ return false
+ }
+
+ for _, g := range groups {
+ if g == gid {
+ return true
+ }
+ }
+ return false
+}
+
+//sys faccessat(dirfd int, path string, mode uint32) (err error)
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ if flags & ^(_AT_SYMLINK_NOFOLLOW|_AT_EACCESS) != 0 {
+ return EINVAL
+ }
+
+ // The Linux kernel faccessat system call does not take any flags.
+ // The glibc faccessat implements the flags itself; see
+ // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/faccessat.c;hb=HEAD
+ // Because people naturally expect syscall.Faccessat to act
+ // like C faccessat, we do the same.
+
+ if flags == 0 {
+ return faccessat(dirfd, path, mode)
+ }
+
+ var st Stat_t
+ if err := fstatat(dirfd, path, &st, flags&_AT_SYMLINK_NOFOLLOW); err != nil {
+ return err
+ }
+
+ mode &= 7
+ if mode == 0 {
+ return nil
+ }
+
+ var uid int
+ if flags&_AT_EACCESS != 0 {
+ uid = Geteuid()
+ } else {
+ uid = Getuid()
+ }
+
+ if uid == 0 {
+ if mode&1 == 0 {
+ // Root can read and write any file.
+ return nil
+ }
+ if st.Mode&0111 != 0 {
+ // Root can execute any file that anybody can execute.
+ return nil
+ }
+ return EACCES
+ }
+
+ var fmode uint32
+ if uint32(uid) == st.Uid {
+ fmode = (st.Mode >> 6) & 7
+ } else {
+ var gid int
+ if flags&_AT_EACCESS != 0 {
+ gid = Getegid()
+ } else {
+ gid = Getgid()
+ }
+
+ if uint32(gid) == st.Gid || isGroupMember(int(st.Gid)) {
+ fmode = (st.Mode >> 3) & 7
+ } else {
+ fmode = st.Mode & 7
+ }
+ }
+
+ if fmode&mode == mode {
+ return nil
+ }
+
+ return EACCES
+}
+
+//sys fchmodat(dirfd int, path string, mode uint32) (err error)
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior
+ // and check the flags. Otherwise the mode would be applied to the symlink
+ // destination which is not what the user expects.
+ if flags&^_AT_SYMLINK_NOFOLLOW != 0 {
+ return EINVAL
+ } else if flags&_AT_SYMLINK_NOFOLLOW != 0 {
+ return EOPNOTSUPP
+ }
+ return fchmodat(dirfd, path, mode)
+}
+
+//sys linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
+
+func Link(oldpath string, newpath string) (err error) {
+ return linkat(_AT_FDCWD, oldpath, _AT_FDCWD, newpath, 0)
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+ return Mkdirat(_AT_FDCWD, path, mode)
+}
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+ return Mknodat(_AT_FDCWD, path, mode, dev)
+}
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ return openat(_AT_FDCWD, path, mode|O_LARGEFILE, perm)
+}
+
+//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
+
+func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ return openat(dirfd, path, flags|O_LARGEFILE, mode)
+}
+
+func Pipe(p []int) error {
+ return Pipe2(p, 0)
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) error {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err := pipe2(&pp, flags)
+ if err == nil {
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ }
+ return err
+}
+
+//sys readlinkat(dirfd int, path string, buf []byte) (n int, err error)
+
+func Readlink(path string, buf []byte) (n int, err error) {
+ return readlinkat(_AT_FDCWD, path, buf)
+}
+
+func Rename(oldpath string, newpath string) (err error) {
+ return Renameat(_AT_FDCWD, oldpath, _AT_FDCWD, newpath)
+}
+
+func Rmdir(path string) error {
+ return unlinkat(_AT_FDCWD, path, _AT_REMOVEDIR)
+}
+
+//sys symlinkat(oldpath string, newdirfd int, newpath string) (err error)
+
+func Symlink(oldpath string, newpath string) (err error) {
+ return symlinkat(oldpath, _AT_FDCWD, newpath)
+}
+
+func Unlink(path string) error {
+ return unlinkat(_AT_FDCWD, path, 0)
+}
+
+//sys unlinkat(dirfd int, path string, flags int) (err error)
+
+func Unlinkat(dirfd int, path string) error {
+ return unlinkat(dirfd, path, 0)
+}
+
+func Utimes(path string, tv []Timeval) (err error) {
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
+
+func UtimesNano(path string, ts []Timespec) (err error) {
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+func Futimesat(dirfd int, path string, tv []Timeval) (err error) {
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func Futimes(fd int, tv []Timeval) (err error) {
+ // Believe it or not, this is the best we can do on Linux
+ // (and is what glibc does).
+ return Utimes("/proc/self/fd/"+itoa.Itoa(fd), tv)
+}
+
+const ImplementsGetwd = true
+
+//sys Getcwd(buf []byte) (n int, err error)
+
+func Getwd() (wd string, err error) {
+ var buf [PathMax]byte
+ n, err := Getcwd(buf[0:])
+ if err != nil {
+ return "", err
+ }
+ // Getcwd returns the number of bytes written to buf, including the NUL.
+ if n < 1 || n > len(buf) || buf[n-1] != 0 {
+ return "", EINVAL
+ }
+ // In some cases, Linux can return a path that starts with the
+ // "(unreachable)" prefix, which can potentially be a valid relative
+ // path. To work around that, return ENOENT if path is not absolute.
+ if buf[0] != '/' {
+ return "", ENOENT
+ }
+
+ return string(buf[0 : n-1]), nil
+}
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 1<<16 on Linux.
+ if n < 0 || n > 1<<20 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+var cgo_libc_setgroups unsafe.Pointer // non-nil if cgo linked.
+
+func Setgroups(gids []int) (err error) {
+ n := uintptr(len(gids))
+ if n == 0 {
+ if cgo_libc_setgroups == nil {
+ if _, _, e1 := AllThreadsSyscall(_SYS_setgroups, 0, 0, 0); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+ }
+ if ret := cgocaller(cgo_libc_setgroups, 0, 0); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ if cgo_libc_setgroups == nil {
+ if _, _, e1 := AllThreadsSyscall(_SYS_setgroups, n, uintptr(unsafe.Pointer(&a[0])), 0); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+ }
+ if ret := cgocaller(cgo_libc_setgroups, n, uintptr(unsafe.Pointer(&a[0]))); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+type WaitStatus uint32
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits. At least that's the idea.
+// There are various irregularities. For example, the
+// "continued" status is 0xFFFF, distinguishing itself
+// from stopped via the core dump bit.
+
+const (
+ mask = 0x7F
+ core = 0x80
+ exited = 0x00
+ stopped = 0x7F
+ shift = 8
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
+
+func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
+
+func (w WaitStatus) Continued() bool { return w == 0xFFFF }
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) ExitStatus() int {
+ if !w.Exited() {
+ return -1
+ }
+ return int(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) Signal() Signal {
+ if !w.Signaled() {
+ return -1
+ }
+ return Signal(w & mask)
+}
+
+func (w WaitStatus) StopSignal() Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int {
+ if w.StopSignal() != SIGTRAP {
+ return -1
+ }
+ return int(w>>shift) >> 8
+}
+
+//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ wpid, err = wait4(pid, &status, options, rusage)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+func Mkfifo(path string, mode uint32) (err error) {
+ return Mknod(path, mode|S_IFIFO, 0)
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Addr = sa.Addr
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ sa.raw.Addr = sa.Addr
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n > len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ if n == len(sa.raw.Path) && name[0] != '@' {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+type SockaddrLinklayer struct {
+ Protocol uint16
+ Ifindex int
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]byte
+ raw RawSockaddrLinklayer
+}
+
+func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_PACKET
+ sa.raw.Protocol = sa.Protocol
+ sa.raw.Ifindex = int32(sa.Ifindex)
+ sa.raw.Hatype = sa.Hatype
+ sa.raw.Pkttype = sa.Pkttype
+ sa.raw.Halen = sa.Halen
+ sa.raw.Addr = sa.Addr
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
+}
+
+type SockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+ raw RawSockaddrNetlink
+}
+
+func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_NETLINK
+ sa.raw.Pad = sa.Pad
+ sa.raw.Pid = sa.Pid
+ sa.raw.Groups = sa.Groups
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_NETLINK:
+ pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrNetlink)
+ sa.Family = pp.Family
+ sa.Pad = pp.Pad
+ sa.Pid = pp.Pid
+ sa.Groups = pp.Groups
+ return sa, nil
+
+ case AF_PACKET:
+ pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
+ sa := new(SockaddrLinklayer)
+ sa.Protocol = pp.Protocol
+ sa.Ifindex = int(pp.Ifindex)
+ sa.Hatype = pp.Hatype
+ sa.Pkttype = pp.Pkttype
+ sa.Halen = pp.Halen
+ sa.Addr = pp.Addr
+ return sa, nil
+
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ if pp.Path[0] == 0 {
+ // "Abstract" Unix domain socket.
+ // Rewrite leading NUL as @ for textual display.
+ // (This is the standard convention.)
+ // Not friendly to overwrite in place,
+ // but the callers below don't care.
+ pp.Path[0] = '@'
+ }
+
+ // Assume path ends at NUL.
+ // This is not technically the Linux semantics for
+ // abstract Unix domain sockets--they are supposed
+ // to be uninterpreted fixed-size binary blobs--but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.Addr = pp.Addr
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ sa.Addr = pp.Addr
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, 0)
+ if err != nil {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+ var value IPMreq
+ vallen := _Socklen(SizeofIPMreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
+ var value IPMreqn
+ vallen := _Socklen(SizeofIPMreqn)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+ var value IPv6Mreq
+ vallen := _Socklen(SizeofIPv6Mreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+ var value IPv6MTUInfo
+ vallen := _Socklen(SizeofIPv6MTUInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+ var value ICMPv6Filter
+ vallen := _Socklen(SizeofICMPv6Filter)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
+ var value Ucred
+ vallen := _Socklen(SizeofUcred)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = &p[0]
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ if len(p) == 0 {
+ var sockType int
+ sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
+ if err != nil {
+ return
+ }
+ // receive at least one normal byte
+ if sockType != SOCK_DGRAM {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ }
+ msg.Control = &oob[0]
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ return
+}
+
+func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) {
+ var msg Msghdr
+ msg.Name = (*byte)(ptr)
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = &p[0]
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ if len(p) == 0 {
+ var sockType int
+ sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
+ if err != nil {
+ return 0, err
+ }
+ // send at least one normal byte
+ if sockType != SOCK_DGRAM {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ }
+ msg.Control = &oob[0]
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+// BindToDevice binds the socket associated with fd to device.
+func BindToDevice(fd int, device string) (err error) {
+ return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
+}
+
+//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+
+func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
+ // The peek requests are machine-size oriented, so we wrap it
+ // to retrieve arbitrary-length data.
+
+ // The ptrace syscall differs from glibc's ptrace.
+ // Peeks returns the word in *data, not as the return value.
+
+ var buf [sizeofPtr]byte
+
+ // Leading edge. PEEKTEXT/PEEKDATA don't require aligned
+ // access (PEEKUSER warns that it might), but if we don't
+ // align our reads, we might straddle an unmapped page
+ // boundary and not get the bytes leading up to the page
+ // boundary.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(out, buf[addr%sizeofPtr:])
+ out = out[n:]
+ }
+
+ // Remainder.
+ for len(out) > 0 {
+ // We use an internal buffer to guarantee alignment.
+ // It's not documented if this is necessary, but we're paranoid.
+ err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copied := copy(out, buf[0:])
+ n += copied
+ out = out[copied:]
+ }
+
+ return n, nil
+}
+
+func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
+}
+
+func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
+}
+
+func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
+ // As for ptracePeek, we need to align our accesses to deal
+ // with the possibility of straddling an invalid page.
+
+ // Leading edge.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(buf[addr%sizeofPtr:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word)
+ if err != nil {
+ return 0, err
+ }
+ data = data[n:]
+ }
+
+ // Interior.
+ for len(data) > sizeofPtr {
+ word := *((*uintptr)(unsafe.Pointer(&data[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += sizeofPtr
+ data = data[sizeofPtr:]
+ }
+
+ // Trailing edge.
+ if len(data) > 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copy(buf[0:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += len(data)
+ }
+
+ return n, nil
+}
+
+func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
+}
+
+func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
+}
+
+func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
+ return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
+ return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+func PtraceSetOptions(pid int, options int) (err error) {
+ return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
+}
+
+func PtraceGetEventMsg(pid int) (msg uint, err error) {
+ var data _C_long
+ err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
+ msg = uint(data)
+ return
+}
+
+func PtraceCont(pid int, signal int) (err error) {
+ return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
+}
+
+func PtraceSyscall(pid int, signal int) (err error) {
+ return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
+}
+
+func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
+
+func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
+
+func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
+
+//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
+
+func Reboot(cmd int) (err error) {
+ return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ return Getdents(fd, buf)
+}
+
+func direntIno(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
+}
+
+func direntReclen(buf []byte) (uint64, bool) {
+ return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
+}
+
+func direntNamlen(buf []byte) (uint64, bool) {
+ reclen, ok := direntReclen(buf)
+ if !ok {
+ return 0, false
+ }
+ return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
+}
+
+//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
+
+func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
+ // Certain file systems get rather angry and EINVAL if you give
+ // them an empty string of data, rather than NULL.
+ if data == "" {
+ return mount(source, target, fstype, flags, nil)
+ }
+ datap, err := BytePtrFromString(data)
+ if err != nil {
+ return err
+ }
+ return mount(source, target, fstype, flags, datap)
+}
+
+// Sendto
+// Recvfrom
+// Socketpair
+
+/*
+ * Direct access
+ */
+//sys Acct(path string) (err error)
+//sys Adjtimex(buf *Timex) (state int, err error)
+//sys Chdir(path string) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(oldfd int) (fd int, err error)
+//sys Dup3(oldfd int, newfd int, flags int) (err error)
+//sysnb EpollCreate1(flag int) (fd int, err error)
+//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
+//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+//sys Fdatasync(fd int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fsync(fd int) (err error)
+//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
+//sysnb Getpgid(pid int) (pgid int, err error)
+
+func Getpgrp() (pid int) {
+ pid, _ = Getpgid(0)
+ return
+}
+
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Gettid() (tid int)
+//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
+//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
+//sysnb InotifyInit1(flags int) (fd int, err error)
+//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
+//sysnb Kill(pid int, sig Signal) (err error)
+//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
+//sys Listxattr(path string, dest []byte) (sz int, err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
+//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
+//sys read(fd int, p []byte) (n int, err error)
+//sys Removexattr(path string, attr string) (err error)
+//sys Setdomainname(p []byte) (err error)
+//sys Sethostname(p []byte) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tv *Timeval) (err error)
+
+// Provided by runtime.syscall_runtime_doAllThreadsSyscall which stops the
+// world and invokes the syscall on each OS thread. Once this function returns,
+// all threads are in sync.
+//
+//go:uintptrescapes
+func runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+
+// AllThreadsSyscall performs a syscall on each OS thread of the Go
+// runtime. It first invokes the syscall on one thread. Should that
+// invocation fail, it returns immediately with the error status.
+// Otherwise, it invokes the syscall on all of the remaining threads
+// in parallel. It will terminate the program if it observes any
+// invoked syscall's return value differs from that of the first
+// invocation.
+//
+// AllThreadsSyscall is intended for emulating simultaneous
+// process-wide state changes that require consistently modifying
+// per-thread state of the Go runtime.
+//
+// AllThreadsSyscall is unaware of any threads that are launched
+// explicitly by cgo linked code, so the function always returns
+// ENOTSUP in binaries that use cgo.
+//
+//go:uintptrescapes
+func AllThreadsSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
+ if cgo_libc_setegid != nil {
+ return minus1, minus1, ENOTSUP
+ }
+ r1, r2, errno := runtime_doAllThreadsSyscall(trap, a1, a2, a3, 0, 0, 0)
+ return r1, r2, Errno(errno)
+}
+
+// AllThreadsSyscall6 is like AllThreadsSyscall, but extended to six
+// arguments.
+//
+//go:uintptrescapes
+func AllThreadsSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) {
+ if cgo_libc_setegid != nil {
+ return minus1, minus1, ENOTSUP
+ }
+ r1, r2, errno := runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6)
+ return r1, r2, Errno(errno)
+}
+
+// linked by runtime.cgocall.go
+//
+//go:uintptrescapes
+func cgocaller(unsafe.Pointer, ...uintptr) uintptr
+
+var cgo_libc_setegid unsafe.Pointer // non-nil if cgo linked.
+
+const minus1 = ^uintptr(0)
+
+func Setegid(egid int) (err error) {
+ if cgo_libc_setegid == nil {
+ if _, _, e1 := AllThreadsSyscall(SYS_SETRESGID, minus1, uintptr(egid), minus1); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setegid, uintptr(egid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_seteuid unsafe.Pointer // non-nil if cgo linked.
+
+func Seteuid(euid int) (err error) {
+ if cgo_libc_seteuid == nil {
+ if _, _, e1 := AllThreadsSyscall(SYS_SETRESUID, minus1, uintptr(euid), minus1); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_seteuid, uintptr(euid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_setgid unsafe.Pointer // non-nil if cgo linked.
+
+func Setgid(gid int) (err error) {
+ if cgo_libc_setgid == nil {
+ if _, _, e1 := AllThreadsSyscall(sys_SETGID, uintptr(gid), 0, 0); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setgid, uintptr(gid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_setregid unsafe.Pointer // non-nil if cgo linked.
+
+func Setregid(rgid, egid int) (err error) {
+ if cgo_libc_setregid == nil {
+ if _, _, e1 := AllThreadsSyscall(sys_SETREGID, uintptr(rgid), uintptr(egid), 0); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setregid, uintptr(rgid), uintptr(egid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_setresgid unsafe.Pointer // non-nil if cgo linked.
+
+func Setresgid(rgid, egid, sgid int) (err error) {
+ if cgo_libc_setresgid == nil {
+ if _, _, e1 := AllThreadsSyscall(sys_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setresgid, uintptr(rgid), uintptr(egid), uintptr(sgid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_setresuid unsafe.Pointer // non-nil if cgo linked.
+
+func Setresuid(ruid, euid, suid int) (err error) {
+ if cgo_libc_setresuid == nil {
+ if _, _, e1 := AllThreadsSyscall(sys_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setresuid, uintptr(ruid), uintptr(euid), uintptr(suid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_setreuid unsafe.Pointer // non-nil if cgo linked.
+
+func Setreuid(ruid, euid int) (err error) {
+ if cgo_libc_setreuid == nil {
+ if _, _, e1 := AllThreadsSyscall(sys_SETREUID, uintptr(ruid), uintptr(euid), 0); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setreuid, uintptr(ruid), uintptr(euid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+var cgo_libc_setuid unsafe.Pointer // non-nil if cgo linked.
+
+func Setuid(uid int) (err error) {
+ if cgo_libc_setuid == nil {
+ if _, _, e1 := AllThreadsSyscall(sys_SETUID, uintptr(uid), 0, 0); e1 != 0 {
+ err = errnoErr(e1)
+ }
+ } else if ret := cgocaller(cgo_libc_setuid, uintptr(uid)); ret != 0 {
+ err = errnoErr(Errno(ret))
+ }
+ return
+}
+
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
+//sys Sync()
+//sysnb Sysinfo(info *Sysinfo_t) (err error)
+//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
+//sysnb Tgkill(tgid int, tid int, sig Signal) (err error)
+//sysnb Times(tms *Tms) (ticks uintptr, err error)
+//sysnb Umask(mask int) (oldmask int)
+//sysnb Uname(buf *Utsname) (err error)
+//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
+//sys Unshare(flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys exitThread(code int) (err error) = SYS_EXIT
+//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
+//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
+
+// mmap varies by architecture; see syscall_linux_*.go.
+//sys munmap(addr uintptr, length uintptr) (err error)
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
+
+//sys Madvise(b []byte, advice int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Munlockall() (err error)
diff --git a/contrib/go/_std_1.19/src/syscall/syscall_linux_amd64.go b/contrib/go/_std_1.19/src/syscall/syscall_linux_amd64.go
new file mode 100644
index 0000000000..0bcc664d32
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/syscall_linux_amd64.go
@@ -0,0 +1,122 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+const _SYS_setgroups = SYS_SETGROUPS
+
+//sys Dup2(oldfd int, newfd int) (err error)
+//sysnb EpollCreate(size int) (fd int, err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Pause() (err error)
+//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+
+func Stat(path string, stat *Stat_t) (err error) {
+ return fstatat(_AT_FDCWD, path, stat, 0)
+}
+
+func Lchown(path string, uid int, gid int) (err error) {
+ return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW)
+}
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ return fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW)
+}
+
+//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error)
+
+//go:noescape
+func gettimeofday(tv *Timeval) (err Errno)
+
+func Gettimeofday(tv *Timeval) (err error) {
+ errno := gettimeofday(tv)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ errno := gettimeofday(&tv)
+ if errno != 0 {
+ return 0, errno
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+//sys utimes(path string, times *[2]Timeval) (err error)
+
+func setTimespec(sec, nsec int64) Timespec {
+ return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+ return Timeval{Sec: sec, Usec: usec}
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Rip }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno)
diff --git a/contrib/go/_std_1.19/src/syscall/syscall_unix.go b/contrib/go/_std_1.19/src/syscall/syscall_unix.go
new file mode 100644
index 0000000000..cf0e238e2f
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/syscall_unix.go
@@ -0,0 +1,518 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package syscall
+
+import (
+ "internal/bytealg"
+ "internal/itoa"
+ "internal/oserror"
+ "internal/race"
+ "internal/unsafeheader"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+var (
+ Stdin = 0
+ Stdout = 1
+ Stderr = 2
+)
+
+const (
+ darwin64Bit = (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && sizeofPtr == 8
+ netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4
+)
+
+// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte.
+func clen(n []byte) int {
+ if i := bytealg.IndexByte(n, 0); i != -1 {
+ return i
+ }
+ return len(n)
+}
+
+// Mmap manager, for use by operating system-specific implementations.
+
+type mmapper struct {
+ sync.Mutex
+ active map[*byte][]byte // active mappings; key is last byte in mapping
+ mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error)
+ munmap func(addr uintptr, length uintptr) error
+}
+
+func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ if length <= 0 {
+ return nil, EINVAL
+ }
+
+ // Map the requested memory.
+ addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Use unsafe to turn addr into a []byte.
+ var b []byte
+ hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
+ hdr.Data = unsafe.Pointer(addr)
+ hdr.Cap = length
+ hdr.Len = length
+
+ // Register mapping in m and return it.
+ p := &b[cap(b)-1]
+ m.Lock()
+ defer m.Unlock()
+ m.active[p] = b
+ return b, nil
+}
+
+func (m *mmapper) Munmap(data []byte) (err error) {
+ if len(data) == 0 || len(data) != cap(data) {
+ return EINVAL
+ }
+
+ // Find the base of the mapping.
+ p := &data[cap(data)-1]
+ m.Lock()
+ defer m.Unlock()
+ b := m.active[p]
+ if b == nil || &b[0] != &data[0] {
+ return EINVAL
+ }
+
+ // Unmap the memory and update m.
+ if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil {
+ return errno
+ }
+ delete(m.active, p)
+ return nil
+}
+
+// An Errno is an unsigned number describing an error condition.
+// It implements the error interface. The zero Errno is by convention
+// a non-error, so code to convert from Errno to error should use:
+//
+// err = nil
+// if errno != 0 {
+// err = errno
+// }
+//
+// Errno values can be tested against error values from the os package
+// using errors.Is. For example:
+//
+// _, _, err := syscall.Syscall(...)
+// if errors.Is(err, fs.ErrNotExist) ...
+type Errno uintptr
+
+func (e Errno) Error() string {
+ if 0 <= int(e) && int(e) < len(errors) {
+ s := errors[e]
+ if s != "" {
+ return s
+ }
+ }
+ return "errno " + itoa.Itoa(int(e))
+}
+
+func (e Errno) Is(target error) bool {
+ switch target {
+ case oserror.ErrPermission:
+ return e == EACCES || e == EPERM
+ case oserror.ErrExist:
+ return e == EEXIST || e == ENOTEMPTY
+ case oserror.ErrNotExist:
+ return e == ENOENT
+ }
+ return false
+}
+
+func (e Errno) Temporary() bool {
+ return e == EINTR || e == EMFILE || e == ENFILE || e.Timeout()
+}
+
+func (e Errno) Timeout() bool {
+ return e == EAGAIN || e == EWOULDBLOCK || e == ETIMEDOUT
+}
+
+// Do the interface allocations only once for common
+// Errno values.
+var (
+ errEAGAIN error = EAGAIN
+ errEINVAL error = EINVAL
+ errENOENT error = ENOENT
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case EAGAIN:
+ return errEAGAIN
+ case EINVAL:
+ return errEINVAL
+ case ENOENT:
+ return errENOENT
+ }
+ return e
+}
+
+// A Signal is a number describing a process signal.
+// It implements the os.Signal interface.
+type Signal int
+
+func (s Signal) Signal() {}
+
+func (s Signal) String() string {
+ if 0 <= s && int(s) < len(signals) {
+ str := signals[s]
+ if str != "" {
+ return str
+ }
+ }
+ return "signal " + itoa.Itoa(int(s))
+}
+
+func Read(fd int, p []byte) (n int, err error) {
+ n, err = read(fd, p)
+ if race.Enabled {
+ if n > 0 {
+ race.WriteRange(unsafe.Pointer(&p[0]), n)
+ }
+ if err == nil {
+ race.Acquire(unsafe.Pointer(&ioSync))
+ }
+ }
+ if msanenabled && n > 0 {
+ msanWrite(unsafe.Pointer(&p[0]), n)
+ }
+ if asanenabled && n > 0 {
+ asanWrite(unsafe.Pointer(&p[0]), n)
+ }
+ return
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ if race.Enabled {
+ race.ReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ if faketime && (fd == 1 || fd == 2) {
+ n = faketimeWrite(fd, p)
+ if n < 0 {
+ n, err = 0, errnoErr(Errno(-n))
+ }
+ } else {
+ n, err = write(fd, p)
+ }
+ if race.Enabled && n > 0 {
+ race.ReadRange(unsafe.Pointer(&p[0]), n)
+ }
+ if msanenabled && n > 0 {
+ msanRead(unsafe.Pointer(&p[0]), n)
+ }
+ if asanenabled && n > 0 {
+ asanRead(unsafe.Pointer(&p[0]), n)
+ }
+ return
+}
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ n, err = pread(fd, p, offset)
+ if race.Enabled {
+ if n > 0 {
+ race.WriteRange(unsafe.Pointer(&p[0]), n)
+ }
+ if err == nil {
+ race.Acquire(unsafe.Pointer(&ioSync))
+ }
+ }
+ if msanenabled && n > 0 {
+ msanWrite(unsafe.Pointer(&p[0]), n)
+ }
+ if asanenabled && n > 0 {
+ asanWrite(unsafe.Pointer(&p[0]), n)
+ }
+ return
+}
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ if race.Enabled {
+ race.ReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = pwrite(fd, p, offset)
+ if race.Enabled && n > 0 {
+ race.ReadRange(unsafe.Pointer(&p[0]), n)
+ }
+ if msanenabled && n > 0 {
+ msanRead(unsafe.Pointer(&p[0]), n)
+ }
+ if asanenabled && n > 0 {
+ asanRead(unsafe.Pointer(&p[0]), n)
+ }
+ return
+}
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+type Sockaddr interface {
+ sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs
+}
+
+type SockaddrInet4 struct {
+ Port int
+ Addr [4]byte
+ raw RawSockaddrInet4
+}
+
+type SockaddrInet6 struct {
+ Port int
+ ZoneId uint32
+ Addr [16]byte
+ raw RawSockaddrInet6
+}
+
+type SockaddrUnix struct {
+ Name string
+ raw RawSockaddrUnix
+}
+
+func Bind(fd int, sa Sockaddr) (err error) {
+ ptr, n, err := sa.sockaddr()
+ if err != nil {
+ return err
+ }
+ return bind(fd, ptr, n)
+}
+
+func Connect(fd int, sa Sockaddr) (err error) {
+ ptr, n, err := sa.sockaddr()
+ if err != nil {
+ return err
+ }
+ return connect(fd, ptr, n)
+}
+
+func Getpeername(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getpeername(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInt(fd, level, opt int) (value int, err error) {
+ var n int32
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+ return int(n), err
+}
+
+func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil {
+ return
+ }
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func recvfromInet4(fd int, p []byte, flags int, from *SockaddrInet4) (n int, err error) {
+ var rsa RawSockaddrAny
+ var socklen _Socklen = SizeofSockaddrAny
+ if n, err = recvfrom(fd, p, flags, &rsa, &socklen); err != nil {
+ return
+ }
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(&rsa))
+ port := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ from.Port = int(port[0])<<8 + int(port[1])
+ from.Addr = pp.Addr
+ return
+}
+
+func recvfromInet6(fd int, p []byte, flags int, from *SockaddrInet6) (n int, err error) {
+ var rsa RawSockaddrAny
+ var socklen _Socklen = SizeofSockaddrAny
+ if n, err = recvfrom(fd, p, flags, &rsa, &socklen); err != nil {
+ return
+ }
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(&rsa))
+ port := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ from.Port = int(port[0])<<8 + int(port[1])
+ from.ZoneId = pp.Scope_id
+ from.Addr = pp.Addr
+ return
+}
+
+func recvmsgInet4(fd int, p, oob []byte, flags int, from *SockaddrInet4) (n, oobn int, recvflags int, err error) {
+ var rsa RawSockaddrAny
+ n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
+ if err != nil {
+ return
+ }
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(&rsa))
+ port := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ from.Port = int(port[0])<<8 + int(port[1])
+ from.Addr = pp.Addr
+ return
+}
+
+func recvmsgInet6(fd int, p, oob []byte, flags int, from *SockaddrInet6) (n, oobn int, recvflags int, err error) {
+ var rsa RawSockaddrAny
+ n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
+ if err != nil {
+ return
+ }
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(&rsa))
+ port := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ from.Port = int(port[0])<<8 + int(port[1])
+ from.ZoneId = pp.Scope_id
+ from.Addr = pp.Addr
+ return
+}
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ return sendmsgN(fd, p, oob, ptr, salen, flags)
+}
+
+func sendmsgNInet4(fd int, p, oob []byte, to *SockaddrInet4, flags int) (n int, err error) {
+ ptr, salen, err := to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ return sendmsgN(fd, p, oob, ptr, salen, flags)
+}
+
+func sendmsgNInet6(fd int, p, oob []byte, to *SockaddrInet6, flags int) (n int, err error) {
+ ptr, salen, err := to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ return sendmsgN(fd, p, oob, ptr, salen, flags)
+}
+
+func sendtoInet4(fd int, p []byte, flags int, to *SockaddrInet4) (err error) {
+ ptr, n, err := to.sockaddr()
+ if err != nil {
+ return err
+ }
+ return sendto(fd, p, flags, ptr, n)
+}
+
+func sendtoInet6(fd int, p []byte, flags int, to *SockaddrInet6) (err error) {
+ ptr, n, err := to.sockaddr()
+ if err != nil {
+ return err
+ }
+ return sendto(fd, p, flags, ptr, n)
+}
+
+func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) {
+ ptr, n, err := to.sockaddr()
+ if err != nil {
+ return err
+ }
+ return sendto(fd, p, flags, ptr, n)
+}
+
+func SetsockoptByte(fd, level, opt int, value byte) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1)
+}
+
+func SetsockoptInt(fd, level, opt int, value int) (err error) {
+ var n = int32(value)
+ return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4)
+}
+
+func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4)
+}
+
+func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq)
+}
+
+func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq)
+}
+
+func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter)
+}
+
+func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger)
+}
+
+func SetsockoptString(fd, level, opt int, s string) (err error) {
+ var p unsafe.Pointer
+ if len(s) > 0 {
+ p = unsafe.Pointer(&[]byte(s)[0])
+ }
+ return setsockopt(fd, level, opt, p, uintptr(len(s)))
+}
+
+func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
+}
+
+func Socket(domain, typ, proto int) (fd int, err error) {
+ if domain == AF_INET6 && SocketDisableIPv6 {
+ return -1, EAFNOSUPPORT
+ }
+ fd, err = socket(domain, typ, proto)
+ return
+}
+
+func Socketpair(domain, typ, proto int) (fd [2]int, err error) {
+ var fdx [2]int32
+ err = socketpair(domain, typ, proto, &fdx)
+ if err == nil {
+ fd[0] = int(fdx[0])
+ fd[1] = int(fdx[1])
+ }
+ return
+}
+
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ if race.Enabled {
+ race.ReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ return sendfile(outfd, infd, offset, count)
+}
+
+var ioSync int64
diff --git a/contrib/go/_std_1.18/src/syscall/time_nofake.go b/contrib/go/_std_1.19/src/syscall/time_nofake.go
index 231875d8c3..231875d8c3 100644
--- a/contrib/go/_std_1.18/src/syscall/time_nofake.go
+++ b/contrib/go/_std_1.19/src/syscall/time_nofake.go
diff --git a/contrib/go/_std_1.19/src/syscall/timestruct.go b/contrib/go/_std_1.19/src/syscall/timestruct.go
new file mode 100644
index 0000000000..8a03171ee5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/timestruct.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package syscall
+
+// TimespecToNSec returns the time stored in ts as nanoseconds.
+func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }
+
+// NsecToTimespec converts a number of nanoseconds into a Timespec.
+func NsecToTimespec(nsec int64) Timespec {
+ sec := nsec / 1e9
+ nsec = nsec % 1e9
+ if nsec < 0 {
+ nsec += 1e9
+ sec--
+ }
+ return setTimespec(sec, nsec)
+}
+
+// TimevalToNsec returns the time stored in tv as nanoseconds.
+func TimevalToNsec(tv Timeval) int64 { return tv.Nano() }
+
+// NsecToTimeval converts a number of nanoseconds into a Timeval.
+func NsecToTimeval(nsec int64) Timeval {
+ nsec += 999 // round up to microsecond
+ usec := nsec % 1e9 / 1e3
+ sec := nsec / 1e9
+ if usec < 0 {
+ usec += 1e6
+ sec--
+ }
+ return setTimeval(sec, usec)
+}
diff --git a/contrib/go/_std_1.18/src/syscall/zerrors_darwin_amd64.go b/contrib/go/_std_1.19/src/syscall/zerrors_darwin_amd64.go
index ecbe89c547..ecbe89c547 100644
--- a/contrib/go/_std_1.18/src/syscall/zerrors_darwin_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/zerrors_darwin_amd64.go
diff --git a/contrib/go/_std_1.18/src/syscall/zerrors_linux_amd64.go b/contrib/go/_std_1.19/src/syscall/zerrors_linux_amd64.go
index 3ff6e498e7..3ff6e498e7 100644
--- a/contrib/go/_std_1.18/src/syscall/zerrors_linux_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/zerrors_linux_amd64.go
diff --git a/contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.go b/contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.go
new file mode 100644
index 0000000000..ee78a572fc
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.go
@@ -0,0 +1,2004 @@
+// mksyscall.pl -darwin -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
+// Code generated by the command above; DO NOT EDIT.
+
+//go:build darwin && amd64
+
+package syscall
+
+import "unsafe"
+import "internal/abi"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
+ r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getgroups_trampoline()
+
+//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(ngid int, gid *_Gid_t) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setgroups_trampoline()
+
+//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := syscall6(abi.FuncPCABI0(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_wait4_trampoline()
+
+//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_accept_trampoline()
+
+//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_bind_trampoline()
+
+//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_connect_trampoline()
+
+//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_socket_trampoline()
+
+//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getsockopt_trampoline()
+
+//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setsockopt_trampoline()
+
+//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getpeername_trampoline()
+
+//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getsockname_trampoline()
+
+//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(s int, how int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_shutdown_trampoline()
+
+//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := rawSyscall6(abi.FuncPCABI0(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_socketpair_trampoline()
+
+//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall6(abi.FuncPCABI0(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_recvfrom_trampoline()
+
+//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_sendto_trampoline()
+
+//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_recvmsg_trampoline()
+
+//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_sendmsg_trampoline()
+
+//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
+ r0, _, e1 := syscall6(abi.FuncPCABI0(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_kevent_trampoline()
+
+//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, timeval *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_utimes_trampoline()
+
+//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimes(fd int, timeval *[2]Timeval) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_futimes_trampoline()
+
+//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fcntl_trampoline()
+
+//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_pipe_trampoline()
+
+//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_utimensat_trampoline), uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_utimensat_trampoline()
+
+//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kill(pid int, signum int, posix int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_kill_trampoline()
+
+//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Access(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_access_trampoline()
+
+//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_adjtime_trampoline()
+
+//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_chdir_trampoline()
+
+//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chflags(path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_chflags_trampoline()
+
+//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chmod(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_chmod_trampoline()
+
+//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_chown_trampoline()
+
+//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_chroot_trampoline()
+
+//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_close_trampoline), uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_close_trampoline()
+
+//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func closedir(dir uintptr) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_closedir_trampoline), uintptr(dir), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_closedir_trampoline()
+
+//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(fd int) (nfd int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_dup_trampoline), uintptr(fd), 0, 0)
+ nfd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_dup_trampoline()
+
+//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(from int, to int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_dup2_trampoline), uintptr(from), uintptr(to), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_dup2_trampoline()
+
+//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exchangedata(path1 string, path2 string, options int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path1)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(path2)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_exchangedata_trampoline()
+
+//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fchdir_trampoline), uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fchdir_trampoline()
+
+//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchflags(fd int, flags int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fchflags_trampoline()
+
+//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fchmod_trampoline()
+
+//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fchown_trampoline()
+
+//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_flock_trampoline), uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_flock_trampoline()
+
+//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fpathconf(fd int, name int) (val int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fpathconf_trampoline()
+
+//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fsync_trampoline), uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fsync_trampoline()
+
+//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_ftruncate_trampoline()
+
+//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdtablesize() (size int) {
+ r0, _, _ := syscall(abi.FuncPCABI0(libc_getdtablesize_trampoline), 0, 0, 0)
+ size = int(r0)
+ return
+}
+
+func libc_getdtablesize_trampoline()
+
+//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getegid_trampoline), 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+func libc_getegid_trampoline()
+
+//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (uid int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_geteuid_trampoline), 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+func libc_geteuid_trampoline()
+
+//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getgid_trampoline), 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+func libc_getgid_trampoline()
+
+//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getpgid_trampoline), uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getpgid_trampoline()
+
+//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgrp() (pgrp int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpgrp_trampoline), 0, 0, 0)
+ pgrp = int(r0)
+ return
+}
+
+func libc_getpgrp_trampoline()
+
+//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getpid_trampoline), 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+func libc_getpid_trampoline()
+
+//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getppid_trampoline), 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+func libc_getppid_trampoline()
+
+//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getpriority_trampoline()
+
+//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getrlimit_trampoline()
+
+//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getrusage_trampoline()
+
+//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_getsid_trampoline), uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getsid_trampoline()
+
+//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_getuid_trampoline), 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+func libc_getuid_trampoline()
+
+//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Issetugid() (tainted bool) {
+ r0, _, _ := rawSyscall(abi.FuncPCABI0(libc_issetugid_trampoline), 0, 0, 0)
+ tainted = bool(r0 != 0)
+ return
+}
+
+func libc_issetugid_trampoline()
+
+//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kqueue() (fd int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_kqueue_trampoline), 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_kqueue_trampoline()
+
+//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_lchown_trampoline()
+
+//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Link(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_link_trampoline()
+
+//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, backlog int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_listen_trampoline()
+
+//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdir(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mkdir_trampoline()
+
+//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkfifo(path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mkfifo_trampoline()
+
+//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mknod_trampoline()
+
+//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mlock_trampoline()
+
+//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_mlockall_trampoline), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mlockall_trampoline()
+
+//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mprotect_trampoline()
+
+//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_munlock_trampoline()
+
+//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_munlockall_trampoline), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_munlockall_trampoline()
+
+//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_open_trampoline()
+
+//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pathconf(path string, name int) (val int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_pathconf_trampoline()
+
+//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_pread_trampoline()
+
+//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall6(abi.FuncPCABI0(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_pwrite_trampoline()
+
+//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_read_trampoline()
+
+//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
+ r0, _, _ := syscall(abi.FuncPCABI0(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
+ res = Errno(r0)
+ return
+}
+
+func libc_readdir_r_trampoline()
+
+//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlink(path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_readlink_trampoline()
+
+//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rename(from string, to string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_rename_trampoline()
+
+//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Revoke(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_revoke_trampoline()
+
+//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Rmdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_rmdir_trampoline()
+
+//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ r0, _, e1 := syscallX(abi.FuncPCABI0(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence))
+ newoffset = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_lseek_trampoline()
+
+//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_select_trampoline()
+
+//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setegid(egid int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_setegid_trampoline), uintptr(egid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setegid_trampoline()
+
+//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seteuid(euid int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_seteuid_trampoline), uintptr(euid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_seteuid_trampoline()
+
+//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setgid(gid int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setgid_trampoline), uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setgid_trampoline()
+
+//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setlogin(name string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setlogin_trampoline()
+
+//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setpgid_trampoline()
+
+//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setpriority_trampoline()
+
+//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setprivexec(flag int) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_setprivexec_trampoline), uintptr(flag), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setprivexec_trampoline()
+
+//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setregid_trampoline()
+
+//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setreuid_trampoline()
+
+//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(which int, lim *Rlimit) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setrlimit_trampoline()
+
+//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setsid_trampoline), 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setsid_trampoline()
+
+//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tp *Timeval) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_settimeofday_trampoline()
+
+//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setuid(uid int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_setuid_trampoline), uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_setuid_trampoline()
+
+//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlink(path string, link string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(link)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_symlink_trampoline()
+
+//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_sync_trampoline), 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_sync_trampoline()
+
+//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_truncate_trampoline()
+
+//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(newmask int) (oldmask int) {
+ r0, _, _ := syscall(abi.FuncPCABI0(libc_umask_trampoline), uintptr(newmask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+func libc_umask_trampoline()
+
+//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Undelete(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_undelete_trampoline()
+
+//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlink(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_unlink_trampoline()
+
+//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_unmount_trampoline()
+
+//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_write_trampoline()
+
+//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscallX(abi.FuncPCABI0(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ cnt = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_writev_trampoline()
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
+ r0, _, e1 := syscall6X(abi.FuncPCABI0(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
+ ret = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_mmap_trampoline()
+
+//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_munmap_trampoline()
+
+//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fork() (pid int, err error) {
+ r0, _, e1 := rawSyscall(abi.FuncPCABI0(libc_fork_trampoline), 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fork_trampoline()
+
+//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctl(fd int, req int, arg int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_ioctl_trampoline()
+
+//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func execve(path *byte, argv **byte, envp **byte) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_execve_trampoline()
+
+//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exit(res int) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), uintptr(res), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_exit_trampoline()
+
+//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
+ var _p0 unsafe.Pointer
+ if len(mib) > 0 {
+ _p0 = unsafe.Pointer(&mib[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_sysctl_trampoline()
+
+//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) {
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(fd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_unlinkat_trampoline()
+
+//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall6(abi.FuncPCABI0(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0)
+ fdret = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_openat_trampoline()
+
+//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall(abi.FuncPCABI0(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_getcwd_trampoline()
+
+//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fstat64_trampoline()
+
+//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fstatfs64_trampoline()
+
+//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tp *Timeval) (err error) {
+ _, _, e1 := rawSyscall(abi.FuncPCABI0(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_gettimeofday_trampoline()
+
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_lstat64_trampoline()
+
+//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_stat64_trampoline()
+
+//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, stat *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall(abi.FuncPCABI0(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_statfs64_trampoline()
+
+//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_fstatat64_trampoline()
+
+//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+//go:nosplit
+func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) {
+ _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func libc_ptrace_trampoline()
+
+//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.s b/contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.s
index 563083d441..563083d441 100644
--- a/contrib/go/_std_1.18/src/syscall/zsyscall_darwin_amd64.s
+++ b/contrib/go/_std_1.19/src/syscall/zsyscall_darwin_amd64.s
diff --git a/contrib/go/_std_1.19/src/syscall/zsyscall_linux_amd64.go b/contrib/go/_std_1.19/src/syscall/zsyscall_linux_amd64.go
new file mode 100644
index 0000000000..07f328e1e2
--- /dev/null
+++ b/contrib/go/_std_1.19/src/syscall/zsyscall_linux_amd64.go
@@ -0,0 +1,1644 @@
+// mksyscall.pl -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go
+// Code generated by the command above; DO NOT EDIT.
+
+//go:build linux && amd64
+
+package syscall
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func faccessat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fchmodat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(arg)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(source)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ var _p2 *byte
+ _p2, err = BytePtrFromString(fstype)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+ state = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _ := rawSyscallNoError(SYS_GETPPID, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+ r0, _ := rawSyscallNoError(SYS_GETTID, 0, 0, 0)
+ tid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(dest) > 0 {
+ _p2 = unsafe.Pointer(&dest[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+ watchdesc = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+ success = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(dest) > 0 {
+ _p1 = unsafe.Pointer(&dest[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(newroot)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(putold)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(data) > 0 {
+ _p2 = unsafe.Pointer(&data[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+ Syscall(SYS_SYNC, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+ n = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+ ticks = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+ r0, _ := rawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(oldfd int, newfd int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _ := rawSyscallNoError(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+ r0, _ := rawSyscallNoError(SYS_GETEUID, 0, 0, 0)
+ euid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _ := rawSyscallNoError(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _ := rawSyscallNoError(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit() (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ioperm(from int, num int, on int) (err error) {
+ _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Iopl(level int) (err error) {
+ _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+ _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+ off = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+ n = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+ _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_NEWFSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ nn = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
+ xaddr = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(events) > 0 {
+ _p0 = unsafe.Pointer(&events[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path string, times *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/contrib/go/_std_1.18/src/syscall/zsysnum_darwin_amd64.go b/contrib/go/_std_1.19/src/syscall/zsysnum_darwin_amd64.go
index 08e003f292..08e003f292 100644
--- a/contrib/go/_std_1.18/src/syscall/zsysnum_darwin_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/zsysnum_darwin_amd64.go
diff --git a/contrib/go/_std_1.18/src/syscall/zsysnum_linux_amd64.go b/contrib/go/_std_1.19/src/syscall/zsysnum_linux_amd64.go
index 576c7c36a6..576c7c36a6 100644
--- a/contrib/go/_std_1.18/src/syscall/zsysnum_linux_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/zsysnum_linux_amd64.go
diff --git a/contrib/go/_std_1.18/src/syscall/ztypes_darwin_amd64.go b/contrib/go/_std_1.19/src/syscall/ztypes_darwin_amd64.go
index 551edc7025..551edc7025 100644
--- a/contrib/go/_std_1.18/src/syscall/ztypes_darwin_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/ztypes_darwin_amd64.go
diff --git a/contrib/go/_std_1.18/src/syscall/ztypes_linux_amd64.go b/contrib/go/_std_1.19/src/syscall/ztypes_linux_amd64.go
index 1bab13bf43..1bab13bf43 100644
--- a/contrib/go/_std_1.18/src/syscall/ztypes_linux_amd64.go
+++ b/contrib/go/_std_1.19/src/syscall/ztypes_linux_amd64.go
diff --git a/contrib/go/_std_1.19/src/text/template/doc.go b/contrib/go/_std_1.19/src/text/template/doc.go
new file mode 100644
index 0000000000..58cc97371b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/text/template/doc.go
@@ -0,0 +1,464 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template implements data-driven templates for generating textual output.
+
+To generate HTML output, see package html/template, which has the same interface
+as this package but automatically secures HTML output against certain attacks.
+
+Templates are executed by applying them to a data structure. Annotations in the
+template refer to elements of the data structure (typically a field of a struct
+or a key in a map) to control execution and derive values to be displayed.
+Execution of the template walks the structure and sets the cursor, represented
+by a period '.' and called "dot", to the value at the current location in the
+structure as execution proceeds.
+
+The input text for a template is UTF-8-encoded text in any format.
+"Actions"--data evaluations or control structures--are delimited by
+"{{" and "}}"; all text outside actions is copied to the output unchanged.
+
+Once parsed, a template may be executed safely in parallel, although if parallel
+executions share a Writer the output may be interleaved.
+
+Here is a trivial example that prints "17 items are made of wool".
+
+ type Inventory struct {
+ Material string
+ Count uint
+ }
+ sweaters := Inventory{"wool", 17}
+ tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
+ if err != nil { panic(err) }
+ err = tmpl.Execute(os.Stdout, sweaters)
+ if err != nil { panic(err) }
+
+More intricate examples appear below.
+
+Text and spaces
+
+By default, all text between actions is copied verbatim when the template is
+executed. For example, the string " items are made of " in the example above
+appears on standard output when the program is run.
+
+However, to aid in formatting template source code, if an action's left
+delimiter (by default "{{") is followed immediately by a minus sign and white
+space, all trailing white space is trimmed from the immediately preceding text.
+Similarly, if the right delimiter ("}}") is preceded by white space and a minus
+sign, all leading white space is trimmed from the immediately following text.
+In these trim markers, the white space must be present:
+"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
+"{{-3}}" parses as an action containing the number -3.
+
+For instance, when executing the template whose source is
+
+ "{{23 -}} < {{- 45}}"
+
+the generated output would be
+
+ "23<45"
+
+For this trimming, the definition of white space characters is the same as in Go:
+space, horizontal tab, carriage return, and newline.
+
+Actions
+
+Here is the list of actions. "Arguments" and "pipelines" are evaluations of
+data, defined in detail in the corresponding sections that follow.
+
+*/
+// {{/* a comment */}}
+// {{- /* a comment with white space trimmed from preceding and following text */ -}}
+// A comment; discarded. May contain newlines.
+// Comments do not nest and must start and end at the
+// delimiters, as shown here.
+/*
+
+ {{pipeline}}
+ The default textual representation (the same as would be
+ printed by fmt.Print) of the value of the pipeline is copied
+ to the output.
+
+ {{if pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, T1 is executed. The empty values are false, 0, any
+ nil pointer or interface value, and any array, slice, map, or
+ string of length zero.
+ Dot is unaffected.
+
+ {{if pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, T0 is executed;
+ otherwise, T1 is executed. Dot is unaffected.
+
+ {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
+ To simplify the appearance of if-else chains, the else action
+ of an if may include another if directly; the effect is exactly
+ the same as writing
+ {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
+
+ {{range pipeline}} T1 {{end}}
+ The value of the pipeline must be an array, slice, map, or channel.
+ If the value of the pipeline has length zero, nothing is output;
+ otherwise, dot is set to the successive elements of the array,
+ slice, or map and T1 is executed. If the value is a map and the
+ keys are of basic type with a defined order, the elements will be
+ visited in sorted key order.
+
+ {{range pipeline}} T1 {{else}} T0 {{end}}
+ The value of the pipeline must be an array, slice, map, or channel.
+ If the value of the pipeline has length zero, dot is unaffected and
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
+ {{break}}
+ The innermost {{range pipeline}} loop is ended early, stopping the
+ current iteration and bypassing all remaining iterations.
+
+ {{continue}}
+ The current iteration of the innermost {{range pipeline}} loop is
+ stopped, and the loop starts the next iteration.
+
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+ {{template "name" pipeline}}
+ The template with the specified name is executed with dot set
+ to the value of the pipeline.
+
+ {{block "name" pipeline}} T1 {{end}}
+ A block is shorthand for defining a template
+ {{define "name"}} T1 {{end}}
+ and then executing it in place
+ {{template "name" pipeline}}
+ The typical use is to define a set of root templates that are
+ then customized by redefining the block templates within.
+
+ {{with pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, dot is set to the value of the pipeline and T1 is
+ executed.
+
+ {{with pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, dot is unaffected and T0
+ is executed; otherwise, dot is set to the value of the pipeline
+ and T1 is executed.
+
+Arguments
+
+An argument is a simple value, denoted by one of the following.
+
+ - A boolean, string, character, integer, floating-point, imaginary
+ or complex constant in Go syntax. These behave like Go's untyped
+ constants. Note that, as in Go, whether a large integer constant
+ overflows when assigned or passed to a function can depend on whether
+ the host machine's ints are 32 or 64 bits.
+ - The keyword nil, representing an untyped Go nil.
+ - The character '.' (period):
+ .
+ The result is the value of dot.
+ - A variable name, which is a (possibly empty) alphanumeric string
+ preceded by a dollar sign, such as
+ $piOver2
+ or
+ $
+ The result is the value of the variable.
+ Variables are described below.
+ - The name of a field of the data, which must be a struct, preceded
+ by a period, such as
+ .Field
+ The result is the value of the field. Field invocations may be
+ chained:
+ .Field1.Field2
+ Fields can also be evaluated on variables, including chaining:
+ $x.Field1.Field2
+ - The name of a key of the data, which must be a map, preceded
+ by a period, such as
+ .Key
+ The result is the map element value indexed by the key.
+ Key invocations may be chained and combined with fields to any
+ depth:
+ .Field1.Key1.Field2.Key2
+ Although the key must be an alphanumeric identifier, unlike with
+ field names they do not need to start with an upper case letter.
+ Keys can also be evaluated on variables, including chaining:
+ $x.key1.key2
+ - The name of a niladic method of the data, preceded by a period,
+ such as
+ .Method
+ The result is the value of invoking the method with dot as the
+ receiver, dot.Method(). Such a method must have one return value (of
+ any type) or two return values, the second of which is an error.
+ If it has two and the returned error is non-nil, execution terminates
+ and an error is returned to the caller as the value of Execute.
+ Method invocations may be chained and combined with fields and keys
+ to any depth:
+ .Field1.Key1.Method1.Field2.Key2.Method2
+ Methods can also be evaluated on variables, including chaining:
+ $x.Method1.Field
+ - The name of a niladic function, such as
+ fun
+ The result is the value of invoking the function, fun(). The return
+ types and values behave as in methods. Functions and function
+ names are described below.
+ - A parenthesized instance of one the above, for grouping. The result
+ may be accessed by a field or map key invocation.
+ print (.F1 arg1) (.F2 arg2)
+ (.StructValuedMethod "arg").Field
+
+Arguments may evaluate to any type; if they are pointers the implementation
+automatically indirects to the base type when required.
+If an evaluation yields a function value, such as a function-valued
+field of a struct, the function is not invoked automatically, but it
+can be used as a truth value for an if action and the like. To invoke
+it, use the call function, defined below.
+
+Pipelines
+
+A pipeline is a possibly chained sequence of "commands". A command is a simple
+value (argument) or a function or method call, possibly with multiple arguments:
+
+ Argument
+ The result is the value of evaluating the argument.
+ .Method [Argument...]
+ The method can be alone or the last element of a chain but,
+ unlike methods in the middle of a chain, it can take arguments.
+ The result is the value of calling the method with the
+ arguments:
+ dot.Method(Argument1, etc.)
+ functionName [Argument...]
+ The result is the value of calling the function associated
+ with the name:
+ function(Argument1, etc.)
+ Functions and function names are described below.
+
+A pipeline may be "chained" by separating a sequence of commands with pipeline
+characters '|'. In a chained pipeline, the result of each command is
+passed as the last argument of the following command. The output of the final
+command in the pipeline is the value of the pipeline.
+
+The output of a command will be either one value or two values, the second of
+which has type error. If that second value is present and evaluates to
+non-nil, execution terminates and the error is returned to the caller of
+Execute.
+
+Variables
+
+A pipeline inside an action may initialize a variable to capture the result.
+The initialization has syntax
+
+ $variable := pipeline
+
+where $variable is the name of the variable. An action that declares a
+variable produces no output.
+
+Variables previously declared can also be assigned, using the syntax
+
+ $variable = pipeline
+
+If a "range" action initializes a variable, the variable is set to the
+successive elements of the iteration. Also, a "range" may declare two
+variables, separated by a comma:
+
+ range $index, $element := pipeline
+
+in which case $index and $element are set to the successive values of the
+array/slice index or map key and element, respectively. Note that if there is
+only one variable, it is assigned the element; this is opposite to the
+convention in Go range clauses.
+
+A variable's scope extends to the "end" action of the control structure ("if",
+"with", or "range") in which it is declared, or to the end of the template if
+there is no such control structure. A template invocation does not inherit
+variables from the point of its invocation.
+
+When execution begins, $ is set to the data argument passed to Execute, that is,
+to the starting value of dot.
+
+Examples
+
+Here are some example one-line templates demonstrating pipelines and variables.
+All produce the quoted word "output":
+
+ {{"\"output\""}}
+ A string constant.
+ {{`"output"`}}
+ A raw string constant.
+ {{printf "%q" "output"}}
+ A function call.
+ {{"output" | printf "%q"}}
+ A function call whose final argument comes from the previous
+ command.
+ {{printf "%q" (print "out" "put")}}
+ A parenthesized argument.
+ {{"put" | printf "%s%s" "out" | printf "%q"}}
+ A more elaborate call.
+ {{"output" | printf "%s" | printf "%q"}}
+ A longer chain.
+ {{with "output"}}{{printf "%q" .}}{{end}}
+ A with action using dot.
+ {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
+ A with action that creates and uses a variable.
+ {{with $x := "output"}}{{printf "%q" $x}}{{end}}
+ A with action that uses the variable in another action.
+ {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
+ The same, but pipelined.
+
+Functions
+
+During execution functions are found in two function maps: first in the
+template, then in the global function map. By default, no functions are defined
+in the template but the Funcs method can be used to add them.
+
+Predefined global functions are named as follows.
+
+ and
+ Returns the boolean AND of its arguments by returning the
+ first empty argument or the last argument. That is,
+ "and x y" behaves as "if x then y else x."
+ Evaluation proceeds through the arguments left to right
+ and returns when the result is determined.
+ call
+ Returns the result of calling the first argument, which
+ must be a function, with the remaining arguments as parameters.
+ Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
+ Y is a func-valued field, map entry, or the like.
+ The first argument must be the result of an evaluation
+ that yields a value of function type (as distinct from
+ a predefined function such as print). The function must
+ return either one or two result values, the second of which
+ is of type error. If the arguments don't match the function
+ or the returned error value is non-nil, execution stops.
+ html
+ Returns the escaped HTML equivalent of the textual
+ representation of its arguments. This function is unavailable
+ in html/template, with a few exceptions.
+ index
+ Returns the result of indexing its first argument by the
+ following arguments. Thus "index x 1 2 3" is, in Go syntax,
+ x[1][2][3]. Each indexed item must be a map, slice, or array.
+ slice
+ slice returns the result of slicing its first argument by the
+ remaining arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2],
+ while "slice x" is x[:], "slice x 1" is x[1:], and "slice x 1 2 3"
+ is x[1:2:3]. The first argument must be a string, slice, or array.
+ js
+ Returns the escaped JavaScript equivalent of the textual
+ representation of its arguments.
+ len
+ Returns the integer length of its argument.
+ not
+ Returns the boolean negation of its single argument.
+ or
+ Returns the boolean OR of its arguments by returning the
+ first non-empty argument or the last argument, that is,
+ "or x y" behaves as "if x then x else y".
+ Evaluation proceeds through the arguments left to right
+ and returns when the result is determined.
+ print
+ An alias for fmt.Sprint
+ printf
+ An alias for fmt.Sprintf
+ println
+ An alias for fmt.Sprintln
+ urlquery
+ Returns the escaped value of the textual representation of
+ its arguments in a form suitable for embedding in a URL query.
+ This function is unavailable in html/template, with a few
+ exceptions.
+
+The boolean functions take any zero value to be false and a non-zero
+value to be true.
+
+There is also a set of binary comparison operators defined as
+functions:
+
+ eq
+ Returns the boolean truth of arg1 == arg2
+ ne
+ Returns the boolean truth of arg1 != arg2
+ lt
+ Returns the boolean truth of arg1 < arg2
+ le
+ Returns the boolean truth of arg1 <= arg2
+ gt
+ Returns the boolean truth of arg1 > arg2
+ ge
+ Returns the boolean truth of arg1 >= arg2
+
+For simpler multi-way equality tests, eq (only) accepts two or more
+arguments and compares the second and subsequent to the first,
+returning in effect
+
+ arg1==arg2 || arg1==arg3 || arg1==arg4 ...
+
+(Unlike with || in Go, however, eq is a function call and all the
+arguments will be evaluated.)
+
+The comparison functions work on any values whose type Go defines as
+comparable. For basic types such as integers, the rules are relaxed:
+size and exact type are ignored, so any integer value, signed or unsigned,
+may be compared with any other integer value. (The arithmetic value is compared,
+not the bit pattern, so all negative integers are less than all unsigned integers.)
+However, as usual, one may not compare an int with a float32 and so on.
+
+Associated templates
+
+Each template is named by a string specified when it is created. Also, each
+template is associated with zero or more other templates that it may invoke by
+name; such associations are transitive and form a name space of templates.
+
+A template may use a template invocation to instantiate another associated
+template; see the explanation of the "template" action above. The name must be
+that of a template associated with the template that contains the invocation.
+
+Nested template definitions
+
+When parsing a template, another template may be defined and associated with the
+template being parsed. Template definitions must appear at the top level of the
+template, much like global variables in a Go program.
+
+The syntax of such definitions is to surround each template declaration with a
+"define" and "end" action.
+
+The define action names the template being created by providing a string
+constant. Here is a simple example:
+
+ `{{define "T1"}}ONE{{end}}
+ {{define "T2"}}TWO{{end}}
+ {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
+ {{template "T3"}}`
+
+This defines two templates, T1 and T2, and a third T3 that invokes the other two
+when it is executed. Finally it invokes T3. If executed this template will
+produce the text
+
+ ONE TWO
+
+By construction, a template may reside in only one association. If it's
+necessary to have a template addressable from multiple associations, the
+template definition must be parsed multiple times to create distinct *Template
+values, or must be copied with the Clone or AddParseTree method.
+
+Parse may be called multiple times to assemble the various associated templates;
+see the ParseFiles and ParseGlob functions and methods for simple ways to parse
+related templates stored in files.
+
+A template may be executed directly or through ExecuteTemplate, which executes
+an associated template identified by name. To invoke our example above, we
+might write,
+
+ err := tmpl.Execute(os.Stdout, "no data needed")
+ if err != nil {
+ log.Fatalf("execution failed: %s", err)
+ }
+
+or to invoke a particular template explicitly by name,
+
+ err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
+ if err != nil {
+ log.Fatalf("execution failed: %s", err)
+ }
+
+*/
+package template
diff --git a/contrib/go/_std_1.18/src/text/template/exec.go b/contrib/go/_std_1.19/src/text/template/exec.go
index 37984cf91a..37984cf91a 100644
--- a/contrib/go/_std_1.18/src/text/template/exec.go
+++ b/contrib/go/_std_1.19/src/text/template/exec.go
diff --git a/contrib/go/_std_1.19/src/text/template/funcs.go b/contrib/go/_std_1.19/src/text/template/funcs.go
new file mode 100644
index 0000000000..390d47ebbb
--- /dev/null
+++ b/contrib/go/_std_1.19/src/text/template/funcs.go
@@ -0,0 +1,777 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. In that case, if the second (error)
+// return value evaluates to non-nil during execution, execution terminates and
+// Execute returns that error.
+//
+// Errors returned by Execute wrap the underlying error; call errors.As to
+// uncover them.
+//
+// When template execution invokes a function with an argument list, that list
+// must be assignable to the function's parameter types. Functions meant to
+// apply to arguments of arbitrary type can use parameters of type interface{} or
+// of type reflect.Value. Similarly, functions meant to return a result of arbitrary
+// type can return interface{} or reflect.Value.
+type FuncMap map[string]any
+
+// builtins returns the FuncMap.
+// It is not a global variable so the linker can dead code eliminate
+// more when this isn't called. See golang.org/issue/36021.
+// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
+func builtins() FuncMap {
+ return FuncMap{
+ "and": and,
+ "call": call,
+ "html": HTMLEscaper,
+ "index": index,
+ "slice": slice,
+ "js": JSEscaper,
+ "len": length,
+ "not": not,
+ "or": or,
+ "print": fmt.Sprint,
+ "printf": fmt.Sprintf,
+ "println": fmt.Sprintln,
+ "urlquery": URLQueryEscaper,
+
+ // Comparisons
+ "eq": eq, // ==
+ "ge": ge, // >=
+ "gt": gt, // >
+ "le": le, // <=
+ "lt": lt, // <
+ "ne": ne, // !=
+ }
+}
+
+var builtinFuncsOnce struct {
+ sync.Once
+ v map[string]reflect.Value
+}
+
+// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
+// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
+func builtinFuncs() map[string]reflect.Value {
+ builtinFuncsOnce.Do(func() {
+ builtinFuncsOnce.v = createValueFuncs(builtins())
+ })
+ return builtinFuncsOnce.v
+}
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+ m := make(map[string]reflect.Value)
+ addValueFuncs(m, funcMap)
+ return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+ for name, fn := range in {
+ if !goodName(name) {
+ panic(fmt.Errorf("function name %q is not a valid identifier", name))
+ }
+ v := reflect.ValueOf(fn)
+ if v.Kind() != reflect.Func {
+ panic("value for " + name + " not a function")
+ }
+ if !goodFunc(v.Type()) {
+ panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
+ }
+ out[name] = v
+ }
+}
+
+// addFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+ for name, fn := range in {
+ out[name] = fn
+ }
+}
+
+// goodFunc reports whether the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+ // We allow functions with 1 result or 2 results where the second is an error.
+ switch {
+ case typ.NumOut() == 1:
+ return true
+ case typ.NumOut() == 2 && typ.Out(1) == errorType:
+ return true
+ }
+ return false
+}
+
+// goodName reports whether the function name is a valid identifier.
+func goodName(name string) bool {
+ if name == "" {
+ return false
+ }
+ for i, r := range name {
+ switch {
+ case r == '_':
+ case i == 0 && !unicode.IsLetter(r):
+ return false
+ case !unicode.IsLetter(r) && !unicode.IsDigit(r):
+ return false
+ }
+ }
+ return true
+}
+
+// findFunction looks for a function in the template, and global map.
+func findFunction(name string, tmpl *Template) (v reflect.Value, isBuiltin, ok bool) {
+ if tmpl != nil && tmpl.common != nil {
+ tmpl.muFuncs.RLock()
+ defer tmpl.muFuncs.RUnlock()
+ if fn := tmpl.execFuncs[name]; fn.IsValid() {
+ return fn, false, true
+ }
+ }
+ if fn := builtinFuncs()[name]; fn.IsValid() {
+ return fn, true, true
+ }
+ return reflect.Value{}, false, false
+}
+
+// prepareArg checks if value can be used as an argument of type argType, and
+// converts an invalid value to appropriate zero if possible.
+func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
+ if !value.IsValid() {
+ if !canBeNil(argType) {
+ return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
+ }
+ value = reflect.Zero(argType)
+ }
+ if value.Type().AssignableTo(argType) {
+ return value, nil
+ }
+ if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
+ value = value.Convert(argType)
+ return value, nil
+ }
+ return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
+}
+
+func intLike(typ reflect.Kind) bool {
+ switch typ {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return true
+ }
+ return false
+}
+
+// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
+func indexArg(index reflect.Value, cap int) (int, error) {
+ var x int64
+ switch index.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x = index.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x = int64(index.Uint())
+ case reflect.Invalid:
+ return 0, fmt.Errorf("cannot index slice/array with nil")
+ default:
+ return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+ }
+ if x < 0 || int(x) < 0 || int(x) > cap {
+ return 0, fmt.Errorf("index out of range: %d", x)
+ }
+ return int(x), nil
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
+ item = indirectInterface(item)
+ if !item.IsValid() {
+ return reflect.Value{}, fmt.Errorf("index of untyped nil")
+ }
+ for _, index := range indexes {
+ index = indirectInterface(index)
+ var isNil bool
+ if item, isNil = indirect(item); isNil {
+ return reflect.Value{}, fmt.Errorf("index of nil pointer")
+ }
+ switch item.Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ x, err := indexArg(index, item.Len())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ item = item.Index(x)
+ case reflect.Map:
+ index, err := prepareArg(index, item.Type().Key())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ if x := item.MapIndex(index); x.IsValid() {
+ item = x
+ } else {
+ item = reflect.Zero(item.Type().Elem())
+ }
+ case reflect.Invalid:
+ // the loop holds invariant: item.IsValid()
+ panic("unreachable")
+ default:
+ return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
+ }
+ }
+ return item, nil
+}
+
+// Slicing.
+
+// slice returns the result of slicing its first argument by the remaining
+// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
+// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
+// argument must be a string, slice, or array.
+func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
+ item = indirectInterface(item)
+ if !item.IsValid() {
+ return reflect.Value{}, fmt.Errorf("slice of untyped nil")
+ }
+ if len(indexes) > 3 {
+ return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
+ }
+ var cap int
+ switch item.Kind() {
+ case reflect.String:
+ if len(indexes) == 3 {
+ return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
+ }
+ cap = item.Len()
+ case reflect.Array, reflect.Slice:
+ cap = item.Cap()
+ default:
+ return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
+ }
+ // set default values for cases item[:], item[i:].
+ idx := [3]int{0, item.Len()}
+ for i, index := range indexes {
+ x, err := indexArg(index, cap)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ idx[i] = x
+ }
+ // given item[i:j], make sure i <= j.
+ if idx[0] > idx[1] {
+ return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
+ }
+ if len(indexes) < 3 {
+ return item.Slice(idx[0], idx[1]), nil
+ }
+ // given item[i:j:k], make sure i <= j <= k.
+ if idx[1] > idx[2] {
+ return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
+ }
+ return item.Slice3(idx[0], idx[1], idx[2]), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item reflect.Value) (int, error) {
+ item, isNil := indirect(item)
+ if isNil {
+ return 0, fmt.Errorf("len of nil pointer")
+ }
+ switch item.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return item.Len(), nil
+ }
+ return 0, fmt.Errorf("len of type %s", item.Type())
+}
+
+// Function invocation
+
+// call returns the result of evaluating the first argument as a function.
+// The function must return 1 result, or 2 results, the second of which is an error.
+func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
+ fn = indirectInterface(fn)
+ if !fn.IsValid() {
+ return reflect.Value{}, fmt.Errorf("call of nil")
+ }
+ typ := fn.Type()
+ if typ.Kind() != reflect.Func {
+ return reflect.Value{}, fmt.Errorf("non-function of type %s", typ)
+ }
+ if !goodFunc(typ) {
+ return reflect.Value{}, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
+ }
+ numIn := typ.NumIn()
+ var dddType reflect.Type
+ if typ.IsVariadic() {
+ if len(args) < numIn-1 {
+ return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
+ }
+ dddType = typ.In(numIn - 1).Elem()
+ } else {
+ if len(args) != numIn {
+ return reflect.Value{}, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
+ }
+ }
+ argv := make([]reflect.Value, len(args))
+ for i, arg := range args {
+ arg = indirectInterface(arg)
+ // Compute the expected type. Clumsy because of variadics.
+ argType := dddType
+ if !typ.IsVariadic() || i < numIn-1 {
+ argType = typ.In(i)
+ }
+
+ var err error
+ if argv[i], err = prepareArg(arg, argType); err != nil {
+ return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
+ }
+ }
+ return safeCall(fn, argv)
+}
+
+// safeCall runs fun.Call(args), and returns the resulting value and error, if
+// any. If the call panics, the panic value is returned as an error.
+func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(error); ok {
+ err = e
+ } else {
+ err = fmt.Errorf("%v", r)
+ }
+ }
+ }()
+ ret := fun.Call(args)
+ if len(ret) == 2 && !ret[1].IsNil() {
+ return ret[0], ret[1].Interface().(error)
+ }
+ return ret[0], nil
+}
+
+// Boolean logic.
+
+func truth(arg reflect.Value) bool {
+ t, _ := isTrue(indirectInterface(arg))
+ return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
+ panic("unreachable") // implemented as a special case in evalCall
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
+ panic("unreachable") // implemented as a special case in evalCall
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg reflect.Value) bool {
+ return !truth(arg)
+}
+
+// Comparison.
+
+// TODO: Perhaps allow comparison between signed and unsigned integers.
+
+var (
+ errBadComparisonType = errors.New("invalid type for comparison")
+ errBadComparison = errors.New("incompatible types for comparison")
+ errNoComparison = errors.New("missing argument for comparison")
+)
+
+type kind int
+
+const (
+ invalidKind kind = iota
+ boolKind
+ complexKind
+ intKind
+ floatKind
+ stringKind
+ uintKind
+)
+
+func basicKind(v reflect.Value) (kind, error) {
+ switch v.Kind() {
+ case reflect.Bool:
+ return boolKind, nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intKind, nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintKind, nil
+ case reflect.Float32, reflect.Float64:
+ return floatKind, nil
+ case reflect.Complex64, reflect.Complex128:
+ return complexKind, nil
+ case reflect.String:
+ return stringKind, nil
+ }
+ return invalidKind, errBadComparisonType
+}
+
+// isNil returns true if v is the zero reflect.Value, or nil of its type.
+func isNil(v reflect.Value) bool {
+ if v == zero {
+ return true
+ }
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// canCompare reports whether v1 and v2 are both the same kind, or one is nil.
+// Called only when dealing with nillable types, or there's about to be an error.
+func canCompare(v1, v2 reflect.Value) bool {
+ k1 := v1.Kind()
+ k2 := v2.Kind()
+ if k1 == k2 {
+ return true
+ }
+ // We know the type can be compared to nil.
+ return k1 == reflect.Invalid || k2 == reflect.Invalid
+}
+
+// eq evaluates the comparison a == b || a == c || ...
+func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
+ arg1 = indirectInterface(arg1)
+ if len(arg2) == 0 {
+ return false, errNoComparison
+ }
+ k1, _ := basicKind(arg1)
+ for _, arg := range arg2 {
+ arg = indirectInterface(arg)
+ k2, _ := basicKind(arg)
+ truth := false
+ if k1 != k2 {
+ // Special case: Can compare integer values regardless of type's sign.
+ switch {
+ case k1 == intKind && k2 == uintKind:
+ truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
+ case k1 == uintKind && k2 == intKind:
+ truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
+ default:
+ if arg1 != zero && arg != zero {
+ return false, errBadComparison
+ }
+ }
+ } else {
+ switch k1 {
+ case boolKind:
+ truth = arg1.Bool() == arg.Bool()
+ case complexKind:
+ truth = arg1.Complex() == arg.Complex()
+ case floatKind:
+ truth = arg1.Float() == arg.Float()
+ case intKind:
+ truth = arg1.Int() == arg.Int()
+ case stringKind:
+ truth = arg1.String() == arg.String()
+ case uintKind:
+ truth = arg1.Uint() == arg.Uint()
+ default:
+ if !canCompare(arg1, arg) {
+ return false, fmt.Errorf("non-comparable types %s: %v, %s: %v", arg1, arg1.Type(), arg.Type(), arg)
+ }
+ if isNil(arg1) || isNil(arg) {
+ truth = isNil(arg) == isNil(arg1)
+ } else {
+ if !arg.Type().Comparable() {
+ return false, fmt.Errorf("non-comparable type %s: %v", arg, arg.Type())
+ }
+ truth = arg1.Interface() == arg.Interface()
+ }
+ }
+ }
+ if truth {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ne evaluates the comparison a != b.
+func ne(arg1, arg2 reflect.Value) (bool, error) {
+ // != is the inverse of ==.
+ equal, err := eq(arg1, arg2)
+ return !equal, err
+}
+
+// lt evaluates the comparison a < b.
+func lt(arg1, arg2 reflect.Value) (bool, error) {
+ arg1 = indirectInterface(arg1)
+ k1, err := basicKind(arg1)
+ if err != nil {
+ return false, err
+ }
+ arg2 = indirectInterface(arg2)
+ k2, err := basicKind(arg2)
+ if err != nil {
+ return false, err
+ }
+ truth := false
+ if k1 != k2 {
+ // Special case: Can compare integer values regardless of type's sign.
+ switch {
+ case k1 == intKind && k2 == uintKind:
+ truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
+ case k1 == uintKind && k2 == intKind:
+ truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
+ default:
+ return false, errBadComparison
+ }
+ } else {
+ switch k1 {
+ case boolKind, complexKind:
+ return false, errBadComparisonType
+ case floatKind:
+ truth = arg1.Float() < arg2.Float()
+ case intKind:
+ truth = arg1.Int() < arg2.Int()
+ case stringKind:
+ truth = arg1.String() < arg2.String()
+ case uintKind:
+ truth = arg1.Uint() < arg2.Uint()
+ default:
+ panic("invalid kind")
+ }
+ }
+ return truth, nil
+}
+
+// le evaluates the comparison <= b.
+func le(arg1, arg2 reflect.Value) (bool, error) {
+ // <= is < or ==.
+ lessThan, err := lt(arg1, arg2)
+ if lessThan || err != nil {
+ return lessThan, err
+ }
+ return eq(arg1, arg2)
+}
+
+// gt evaluates the comparison a > b.
+func gt(arg1, arg2 reflect.Value) (bool, error) {
+ // > is the inverse of <=.
+ lessOrEqual, err := le(arg1, arg2)
+ if err != nil {
+ return false, err
+ }
+ return !lessOrEqual, nil
+}
+
+// ge evaluates the comparison a >= b.
+func ge(arg1, arg2 reflect.Value) (bool, error) {
+ // >= is the inverse of <.
+ lessThan, err := lt(arg1, arg2)
+ if err != nil {
+ return false, err
+ }
+ return !lessThan, nil
+}
+
+// HTML escaping.
+
+var (
+ htmlQuot = []byte("&#34;") // shorter than "&quot;"
+ htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
+ htmlAmp = []byte("&amp;")
+ htmlLt = []byte("&lt;")
+ htmlGt = []byte("&gt;")
+ htmlNull = []byte("\uFFFD")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+ last := 0
+ for i, c := range b {
+ var html []byte
+ switch c {
+ case '\000':
+ html = htmlNull
+ case '"':
+ html = htmlQuot
+ case '\'':
+ html = htmlApos
+ case '&':
+ html = htmlAmp
+ case '<':
+ html = htmlLt
+ case '>':
+ html = htmlGt
+ default:
+ continue
+ }
+ w.Write(b[last:i])
+ w.Write(html)
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if !strings.ContainsAny(s, "'\"&<>\000") {
+ return s
+ }
+ var b bytes.Buffer
+ HTMLEscape(&b, []byte(s))
+ return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...any) string {
+ return HTMLEscapeString(evalArgs(args))
+}
+
+// JavaScript escaping.
+
+var (
+ jsLowUni = []byte(`\u00`)
+ hex = []byte("0123456789ABCDEF")
+
+ jsBackslash = []byte(`\\`)
+ jsApos = []byte(`\'`)
+ jsQuot = []byte(`\"`)
+ jsLt = []byte(`\u003C`)
+ jsGt = []byte(`\u003E`)
+ jsAmp = []byte(`\u0026`)
+ jsEq = []byte(`\u003D`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+ last := 0
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+
+ if !jsIsSpecial(rune(c)) {
+ // fast path: nothing to do
+ continue
+ }
+ w.Write(b[last:i])
+
+ if c < utf8.RuneSelf {
+ // Quotes, slashes and angle brackets get quoted.
+ // Control characters get written as \u00XX.
+ switch c {
+ case '\\':
+ w.Write(jsBackslash)
+ case '\'':
+ w.Write(jsApos)
+ case '"':
+ w.Write(jsQuot)
+ case '<':
+ w.Write(jsLt)
+ case '>':
+ w.Write(jsGt)
+ case '&':
+ w.Write(jsAmp)
+ case '=':
+ w.Write(jsEq)
+ default:
+ w.Write(jsLowUni)
+ t, b := c>>4, c&0x0f
+ w.Write(hex[t : t+1])
+ w.Write(hex[b : b+1])
+ }
+ } else {
+ // Unicode rune.
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.IsPrint(r) {
+ w.Write(b[i : i+size])
+ } else {
+ fmt.Fprintf(w, "\\u%04X", r)
+ }
+ i += size - 1
+ }
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexFunc(s, jsIsSpecial) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ JSEscape(&b, []byte(s))
+ return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+ switch r {
+ case '\\', '\'', '"', '<', '>', '&', '=':
+ return true
+ }
+ return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...any) string {
+ return JSEscapeString(evalArgs(args))
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...any) string {
+ return url.QueryEscape(evalArgs(args))
+}
+
+// evalArgs formats the list of arguments into a string. It is therefore equivalent to
+//
+// fmt.Sprint(args...)
+//
+// except that each argument is indirected (if a pointer), as required,
+// using the same rules as the default string evaluation during template
+// execution.
+func evalArgs(args []any) string {
+ ok := false
+ var s string
+ // Fast path for simple common case.
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ for i, arg := range args {
+ a, ok := printableValue(reflect.ValueOf(arg))
+ if ok {
+ args[i] = a
+ } // else let fmt do its thing
+ }
+ s = fmt.Sprint(args...)
+ }
+ return s
+}
diff --git a/contrib/go/_std_1.19/src/text/template/helper.go b/contrib/go/_std_1.19/src/text/template/helper.go
new file mode 100644
index 0000000000..48af3928b3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/text/template/helper.go
@@ -0,0 +1,178 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions to make constructing templates easier.
+
+package template
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+)
+
+// Functions and methods to parse templates.
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+//
+// var t = template.Must(template.New("name").Parse("text"))
+func Must(t *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// ParseFiles creates a new Template and parses the template definitions from
+// the named files. The returned template's name will have the base name and
+// parsed contents of the first file. There must be at least one file.
+// If an error occurs, parsing stops and the returned *Template is nil.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
+// named "foo", while "a/foo" is unavailable.
+func ParseFiles(filenames ...string) (*Template, error) {
+ return parseFiles(nil, readFileOS, filenames...)
+}
+
+// ParseFiles parses the named files and associates the resulting templates with
+// t. If an error occurs, parsing stops and the returned template is nil;
+// otherwise it is t. There must be at least one file.
+// Since the templates created by ParseFiles are named by the base
+// names of the argument files, t should usually have the name of one
+// of the (base) names of the files. If it does not, depending on t's
+// contents before calling ParseFiles, t.Execute may fail. In that
+// case use t.ExecuteTemplate to execute a valid template.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
+ t.init()
+ return parseFiles(t, readFileOS, filenames...)
+}
+
+// parseFiles is the helper for the method and function. If the argument
+// template is nil, it is created from the first file.
+func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
+ if len(filenames) == 0 {
+ // Not really a problem, but be consistent.
+ return nil, fmt.Errorf("template: no files named in call to ParseFiles")
+ }
+ for _, filename := range filenames {
+ name, b, err := readFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ s := string(b)
+ // First template becomes return value if not already defined,
+ // and we use that one for subsequent New calls to associate
+ // all the templates together. Also, if this file has the same name
+ // as t, this file becomes the contents of t, so
+ // t, err := New(name).Funcs(xxx).ParseFiles(name)
+ // works. Otherwise we create a new template associated with t.
+ var tmpl *Template
+ if t == nil {
+ t = New(name)
+ }
+ if name == t.Name() {
+ tmpl = t
+ } else {
+ tmpl = t.New(name)
+ }
+ _, err = tmpl.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return t, nil
+}
+
+// ParseGlob creates a new Template and parses the template definitions from
+// the files identified by the pattern. The files are matched according to the
+// semantics of filepath.Match, and the pattern must match at least one file.
+// The returned template will have the (base) name and (parsed) contents of the
+// first file matched by the pattern. ParseGlob is equivalent to calling
+// ParseFiles with the list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func ParseGlob(pattern string) (*Template, error) {
+ return parseGlob(nil, pattern)
+}
+
+// ParseGlob parses the template definitions in the files identified by the
+// pattern and associates the resulting templates with t. The files are matched
+// according to the semantics of filepath.Match, and the pattern must match at
+// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
+// list of files matched by the pattern.
+//
+// When parsing multiple files with the same name in different directories,
+// the last one mentioned will be the one that results.
+func (t *Template) ParseGlob(pattern string) (*Template, error) {
+ t.init()
+ return parseGlob(t, pattern)
+}
+
+// parseGlob is the implementation of the function and method ParseGlob.
+func parseGlob(t *Template, pattern string) (*Template, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+ }
+ return parseFiles(t, readFileOS, filenames...)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
+ return parseFS(nil, fsys, patterns)
+}
+
+// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys
+// instead of the host operating system's file system.
+// It accepts a list of glob patterns.
+// (Note that most file names serve as glob patterns matching only themselves.)
+func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
+ t.init()
+ return parseFS(t, fsys, patterns)
+}
+
+func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
+ var filenames []string
+ for _, pattern := range patterns {
+ list, err := fs.Glob(fsys, pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(list) == 0 {
+ return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+ }
+ filenames = append(filenames, list...)
+ }
+ return parseFiles(t, readFileFS(fsys), filenames...)
+}
+
+func readFileOS(file string) (name string, b []byte, err error) {
+ name = filepath.Base(file)
+ b, err = os.ReadFile(file)
+ return
+}
+
+func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
+ return func(file string) (name string, b []byte, err error) {
+ name = path.Base(file)
+ b, err = fs.ReadFile(fsys, file)
+ return
+ }
+}
diff --git a/contrib/go/_std_1.19/src/text/template/option.go b/contrib/go/_std_1.19/src/text/template/option.go
new file mode 100644
index 0000000000..ea2fd80c06
--- /dev/null
+++ b/contrib/go/_std_1.19/src/text/template/option.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code to handle template options.
+
+package template
+
+import "strings"
+
+// missingKeyAction defines how to respond to indexing a map with a key that is not present.
+type missingKeyAction int
+
+const (
+ mapInvalid missingKeyAction = iota // Return an invalid reflect.Value.
+ mapZeroValue // Return the zero value for the map element.
+ mapError // Error out
+)
+
+type option struct {
+ missingKey missingKeyAction
+}
+
+// Option sets options for the template. Options are described by
+// strings, either a simple string or "key=value". There can be at
+// most one equals sign in an option string. If the option string
+// is unrecognized or otherwise invalid, Option panics.
+//
+// Known options:
+//
+// missingkey: Control the behavior during execution if a map is
+// indexed with a key that is not present in the map.
+//
+// "missingkey=default" or "missingkey=invalid"
+// The default behavior: Do nothing and continue execution.
+// If printed, the result of the index operation is the string
+// "<no value>".
+// "missingkey=zero"
+// The operation returns the zero value for the map type's element.
+// "missingkey=error"
+// Execution stops immediately with an error.
+func (t *Template) Option(opt ...string) *Template {
+ t.init()
+ for _, s := range opt {
+ t.setOption(s)
+ }
+ return t
+}
+
+func (t *Template) setOption(opt string) {
+ if opt == "" {
+ panic("empty option string")
+ }
+ // key=value
+ if key, value, ok := strings.Cut(opt, "="); ok {
+ switch key {
+ case "missingkey":
+ switch value {
+ case "invalid", "default":
+ t.option.missingKey = mapInvalid
+ return
+ case "zero":
+ t.option.missingKey = mapZeroValue
+ return
+ case "error":
+ t.option.missingKey = mapError
+ return
+ }
+ }
+ }
+ panic("unrecognized option: " + opt)
+}
diff --git a/contrib/go/_std_1.19/src/text/template/parse/lex.go b/contrib/go/_std_1.19/src/text/template/parse/lex.go
new file mode 100644
index 0000000000..29403dd947
--- /dev/null
+++ b/contrib/go/_std_1.19/src/text/template/parse/lex.go
@@ -0,0 +1,679 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType // The type of this item.
+ pos Pos // The starting position, in bytes, of this item in the input string.
+ val string // The value of this item.
+ line int // The line number at the start of this item.
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case i.typ > itemKeyword:
+ return fmt.Sprintf("<%s>", i.val)
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
+ itemComment // comment text
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemAssign // equals ('=') introducing an assignment
+ itemDeclare // colon-equals (':=') introducing a declaration
+ itemEOF
+ itemField // alphanumeric identifier starting with '.'
+ itemIdentifier // alphanumeric identifier not starting with '.'
+ itemLeftDelim // left action delimiter
+ itemLeftParen // '(' inside action
+ itemNumber // simple number, including imaginary
+ itemPipe // pipe symbol
+ itemRawString // raw quoted string (includes quotes)
+ itemRightDelim // right action delimiter
+ itemRightParen // ')' inside action
+ itemSpace // run of spaces separating arguments
+ itemString // quoted string (includes quotes)
+ itemText // plain text
+ itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemBlock // block keyword
+ itemBreak // break keyword
+ itemContinue // continue keyword
+ itemDot // the cursor, spelled '.'
+ itemDefine // define keyword
+ itemElse // else keyword
+ itemEnd // end keyword
+ itemIf // if keyword
+ itemNil // the untyped nil constant, easiest to treat as a keyword
+ itemRange // range keyword
+ itemTemplate // template keyword
+ itemWith // with keyword
+)
+
+var key = map[string]itemType{
+ ".": itemDot,
+ "block": itemBlock,
+ "break": itemBreak,
+ "continue": itemContinue,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+ "if": itemIf,
+ "range": itemRange,
+ "nil": itemNil,
+ "template": itemTemplate,
+ "with": itemWith,
+}
+
+const eof = -1
+
+// Trimming spaces.
+// If the action begins "{{- " rather than "{{", then all space/tab/newlines
+// preceding the action are trimmed; conversely if it ends " -}}" the
+// leading spaces are trimmed. This is done entirely in the lexer; the
+// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
+// to be present to avoid ambiguity with things like "{{-3}}". It reads
+// better with the space present anyway. For simplicity, only ASCII
+// does the job.
+const (
+ spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
+ trimMarker = '-' // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
+ trimMarkerLen = Pos(1 + 1) // marker plus space before or after
+)
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ name string // the name of the input; used only for error reports
+ input string // the string being scanned
+ leftDelim string // start of action
+ rightDelim string // end of action
+ emitComment bool // emit itemComment tokens.
+ pos Pos // current position in the input
+ start Pos // start position of this item
+ atEOF bool // we have hit the end of input and returned eof
+ items chan item // channel of scanned items
+ parenDepth int // nesting depth of ( ) exprs
+ line int // 1+number of newlines seen
+ startLine int // start line of this item
+ breakOK bool // break keyword allowed
+ continueOK bool // continue keyword allowed
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+ if int(l.pos) >= len(l.input) {
+ l.atEOF = true
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += Pos(w)
+ if r == '\n' {
+ l.line++
+ }
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune.
+func (l *lexer) backup() {
+ if !l.atEOF && l.pos > 0 {
+ r, w := utf8.DecodeLastRuneInString(l.input[:l.pos])
+ l.pos -= Pos(w)
+ // Correct newline count.
+ if r == '\n' {
+ l.line--
+ }
+ }
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ l.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine}
+ l.start = l.pos
+ l.startLine = l.line
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.line += strings.Count(l.input[l.start:l.pos], "\n")
+ l.start = l.pos
+ l.startLine = l.line
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.ContainsRune(valid, l.next()) {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.ContainsRune(valid, l.next()) {
+ }
+ l.backup()
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...any) stateFn {
+ l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+// Called by the parser, not in the lexing goroutine.
+func (l *lexer) nextItem() item {
+ return <-l.items
+}
+
+// drain drains the output so the lexing goroutine will exit.
+// Called by the parser, not in the lexing goroutine.
+func (l *lexer) drain() {
+ for range l.items {
+ }
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input, left, right string, emitComment, breakOK, continueOK bool) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+ if right == "" {
+ right = rightDelim
+ }
+ l := &lexer{
+ name: name,
+ input: input,
+ leftDelim: left,
+ rightDelim: right,
+ emitComment: emitComment,
+ breakOK: breakOK,
+ continueOK: continueOK,
+ items: make(chan item),
+ line: 1,
+ startLine: 1,
+ }
+ go l.run()
+ return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+ for state := lexText; state != nil; {
+ state = state(l)
+ }
+ close(l.items)
+}
+
+// state functions
+
+const (
+ leftDelim = "{{"
+ rightDelim = "}}"
+ leftComment = "/*"
+ rightComment = "*/"
+)
+
+// lexText scans until an opening action delimiter, "{{".
+func lexText(l *lexer) stateFn {
+ if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
+ ldn := Pos(len(l.leftDelim))
+ l.pos += Pos(x)
+ trimLength := Pos(0)
+ if hasLeftTrimMarker(l.input[l.pos+ldn:]) {
+ trimLength = rightTrimLength(l.input[l.start:l.pos])
+ }
+ l.pos -= trimLength
+ if l.pos > l.start {
+ l.line += strings.Count(l.input[l.start:l.pos], "\n")
+ l.emit(itemText)
+ }
+ l.pos += trimLength
+ l.ignore()
+ return lexLeftDelim
+ }
+ l.pos = Pos(len(l.input))
+ // Correctly reached EOF.
+ if l.pos > l.start {
+ l.line += strings.Count(l.input[l.start:l.pos], "\n")
+ l.emit(itemText)
+ }
+ l.emit(itemEOF)
+ return nil
+}
+
+// rightTrimLength returns the length of the spaces at the end of the string.
+func rightTrimLength(s string) Pos {
+ return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
+}
+
+// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
+func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
+ if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
+ return true, true
+ }
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
+ return true, false
+ }
+ return false, false
+}
+
+// leftTrimLength returns the length of the spaces at the beginning of the string.
+func leftTrimLength(s string) Pos {
+ return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
+}
+
+// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
+func lexLeftDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.leftDelim))
+ trimSpace := hasLeftTrimMarker(l.input[l.pos:])
+ afterMarker := Pos(0)
+ if trimSpace {
+ afterMarker = trimMarkerLen
+ }
+ if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
+ l.pos += afterMarker
+ l.ignore()
+ return lexComment
+ }
+ l.emit(itemLeftDelim)
+ l.pos += afterMarker
+ l.ignore()
+ l.parenDepth = 0
+ return lexInsideAction
+}
+
+// lexComment scans a comment. The left comment marker is known to be present.
+func lexComment(l *lexer) stateFn {
+ l.pos += Pos(len(leftComment))
+ i := strings.Index(l.input[l.pos:], rightComment)
+ if i < 0 {
+ return l.errorf("unclosed comment")
+ }
+ l.pos += Pos(i + len(rightComment))
+ delim, trimSpace := l.atRightDelim()
+ if !delim {
+ return l.errorf("comment ends before closing delimiter")
+ }
+ if l.emitComment {
+ l.emit(itemComment)
+ }
+ if trimSpace {
+ l.pos += trimMarkerLen
+ }
+ l.pos += Pos(len(l.rightDelim))
+ if trimSpace {
+ l.pos += leftTrimLength(l.input[l.pos:])
+ }
+ l.ignore()
+ return lexText
+}
+
+// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
+func lexRightDelim(l *lexer) stateFn {
+ trimSpace := hasRightTrimMarker(l.input[l.pos:])
+ if trimSpace {
+ l.pos += trimMarkerLen
+ l.ignore()
+ }
+ l.pos += Pos(len(l.rightDelim))
+ l.emit(itemRightDelim)
+ if trimSpace {
+ l.pos += leftTrimLength(l.input[l.pos:])
+ l.ignore()
+ }
+ return lexText
+}
+
+// lexInsideAction scans the elements inside action delimiters.
+func lexInsideAction(l *lexer) stateFn {
+ // Either number, quoted string, or identifier.
+ // Spaces separate arguments; runs of spaces turn into itemSpace.
+ // Pipe symbols separate and are emitted.
+ delim, _ := l.atRightDelim()
+ if delim {
+ if l.parenDepth == 0 {
+ return lexRightDelim
+ }
+ return l.errorf("unclosed left paren")
+ }
+ switch r := l.next(); {
+ case r == eof:
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ l.backup() // Put space back in case we have " -}}".
+ return lexSpace
+ case r == '=':
+ l.emit(itemAssign)
+ case r == ':':
+ if l.next() != '=' {
+ return l.errorf("expected :=")
+ }
+ l.emit(itemDeclare)
+ case r == '|':
+ l.emit(itemPipe)
+ case r == '"':
+ return lexQuote
+ case r == '`':
+ return lexRawQuote
+ case r == '$':
+ return lexVariable
+ case r == '\'':
+ return lexChar
+ case r == '.':
+ // special look-ahead for ".field" so we don't break l.backup().
+ if l.pos < Pos(len(l.input)) {
+ r := l.input[l.pos]
+ if r < '0' || '9' < r {
+ return lexField
+ }
+ }
+ fallthrough // '.' can start a number.
+ case r == '+' || r == '-' || ('0' <= r && r <= '9'):
+ l.backup()
+ return lexNumber
+ case isAlphaNumeric(r):
+ l.backup()
+ return lexIdentifier
+ case r == '(':
+ l.emit(itemLeftParen)
+ l.parenDepth++
+ case r == ')':
+ l.emit(itemRightParen)
+ l.parenDepth--
+ if l.parenDepth < 0 {
+ return l.errorf("unexpected right paren %#U", r)
+ }
+ case r <= unicode.MaxASCII && unicode.IsPrint(r):
+ l.emit(itemChar)
+ default:
+ return l.errorf("unrecognized character in action: %#U", r)
+ }
+ return lexInsideAction
+}
+
+// lexSpace scans a run of space characters.
+// We have not consumed the first space, which is known to be present.
+// Take care if there is a trim-marked right delimiter, which starts with a space.
+func lexSpace(l *lexer) stateFn {
+ var r rune
+ var numSpaces int
+ for {
+ r = l.peek()
+ if !isSpace(r) {
+ break
+ }
+ l.next()
+ numSpaces++
+ }
+ // Be careful about a trim-marked closing delimiter, which has a minus
+ // after a space. We know there is a space, so check for the '-' that might follow.
+ if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
+ l.backup() // Before the space.
+ if numSpaces == 1 {
+ return lexRightDelim // On the delim, so go right to that.
+ }
+ }
+ l.emit(itemSpace)
+ return lexInsideAction
+}
+
+// lexIdentifier scans an alphanumeric.
+func lexIdentifier(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case isAlphaNumeric(r):
+ // absorb.
+ default:
+ l.backup()
+ word := l.input[l.start:l.pos]
+ if !l.atTerminator() {
+ return l.errorf("bad character %#U", r)
+ }
+ switch {
+ case key[word] > itemKeyword:
+ item := key[word]
+ if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
+ l.emit(itemIdentifier)
+ } else {
+ l.emit(item)
+ }
+ case word[0] == '.':
+ l.emit(itemField)
+ case word == "true", word == "false":
+ l.emit(itemBool)
+ default:
+ l.emit(itemIdentifier)
+ }
+ break Loop
+ }
+ }
+ return lexInsideAction
+}
+
+// lexField scans a field: .Alphanumeric.
+// The . has been scanned.
+func lexField(l *lexer) stateFn {
+ return lexFieldOrVariable(l, itemField)
+}
+
+// lexVariable scans a Variable: $Alphanumeric.
+// The $ has been scanned.
+func lexVariable(l *lexer) stateFn {
+ if l.atTerminator() { // Nothing interesting follows -> "$".
+ l.emit(itemVariable)
+ return lexInsideAction
+ }
+ return lexFieldOrVariable(l, itemVariable)
+}
+
+// lexVariable scans a field or variable: [.$]Alphanumeric.
+// The . or $ has been scanned.
+func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
+ if l.atTerminator() { // Nothing interesting follows -> "." or "$".
+ if typ == itemVariable {
+ l.emit(itemVariable)
+ } else {
+ l.emit(itemDot)
+ }
+ return lexInsideAction
+ }
+ var r rune
+ for {
+ r = l.next()
+ if !isAlphaNumeric(r) {
+ l.backup()
+ break
+ }
+ }
+ if !l.atTerminator() {
+ return l.errorf("bad character %#U", r)
+ }
+ l.emit(typ)
+ return lexInsideAction
+}
+
+// atTerminator reports whether the input is at valid termination character to
+// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
+// like "$x+2" not being acceptable without a space, in case we decide one
+// day to implement arithmetic.
+func (l *lexer) atTerminator() bool {
+ r := l.peek()
+ if isSpace(r) {
+ return true
+ }
+ switch r {
+ case eof, '.', ',', '|', ':', ')', '(':
+ return true
+ }
+ return strings.HasPrefix(l.input[l.pos:], l.rightDelim)
+}
+
+// lexChar scans a character constant. The initial quote is already
+// scanned. Syntax checking is done by the parser.
+func lexChar(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated character constant")
+ case '\'':
+ break Loop
+ }
+ }
+ l.emit(itemCharConstant)
+ return lexInsideAction
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+func lexNumber(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ if sign := l.peek(); sign == '+' || sign == '-' {
+ // Complex: 1+2i. No spaces, must end in 'i'.
+ if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(itemComplex)
+ } else {
+ l.emit(itemNumber)
+ }
+ return lexInsideAction
+}
+
+func (l *lexer) scanNumber() bool {
+ // Optional leading sign.
+ l.accept("+-")
+ // Is it hex?
+ digits := "0123456789_"
+ if l.accept("0") {
+ // Note: Leading 0 does not mean octal in floats.
+ if l.accept("xX") {
+ digits = "0123456789abcdefABCDEF_"
+ } else if l.accept("oO") {
+ digits = "01234567_"
+ } else if l.accept("bB") {
+ digits = "01_"
+ }
+ }
+ l.acceptRun(digits)
+ if l.accept(".") {
+ l.acceptRun(digits)
+ }
+ if len(digits) == 10+1 && l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun("0123456789_")
+ }
+ if len(digits) == 16+6+1 && l.accept("pP") {
+ l.accept("+-")
+ l.acceptRun("0123456789_")
+ }
+ // Is it imaginary?
+ l.accept("i")
+ // Next thing mustn't be alphanumeric.
+ if isAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+// lexQuote scans a quoted string.
+func lexQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated quoted string")
+ case '"':
+ break Loop
+ }
+ }
+ l.emit(itemString)
+ return lexInsideAction
+}
+
+// lexRawQuote scans a raw quoted string.
+func lexRawQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case eof:
+ return l.errorf("unterminated raw quoted string")
+ case '`':
+ break Loop
+ }
+ }
+ l.emit(itemRawString)
+ return lexInsideAction
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t' || r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
+
+func hasLeftTrimMarker(s string) bool {
+ return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
+}
+
+func hasRightTrimMarker(s string) bool {
+ return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
+}
diff --git a/contrib/go/_std_1.18/src/text/template/parse/node.go b/contrib/go/_std_1.19/src/text/template/parse/node.go
index 47268225c8..47268225c8 100644
--- a/contrib/go/_std_1.18/src/text/template/parse/node.go
+++ b/contrib/go/_std_1.19/src/text/template/parse/node.go
diff --git a/contrib/go/_std_1.19/src/text/template/parse/parse.go b/contrib/go/_std_1.19/src/text/template/parse/parse.go
new file mode 100644
index 0000000000..00c258ad5d
--- /dev/null
+++ b/contrib/go/_std_1.19/src/text/template/parse/parse.go
@@ -0,0 +1,826 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse builds parse trees for templates as defined by text/template
+// and html/template. Clients should use those packages to construct templates
+// rather than this one, which provides shared internal data structures not
+// intended for general use.
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Tree is the representation of a single parsed template.
+type Tree struct {
+ Name string // name of the template represented by the tree.
+ ParseName string // name of the top-level template during parsing, for error messages.
+ Root *ListNode // top-level root of the tree.
+ Mode Mode // parsing mode.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+ funcs []map[string]any
+ lex *lexer
+ token [3]item // three-token lookahead for parser.
+ peekCount int
+ vars []string // variables defined at the moment.
+ treeSet map[string]*Tree
+ actionLine int // line of left delim starting action
+ rangeDepth int
+}
+
+// A mode value is a set of flags (or 0). Modes control parser behavior.
+type Mode uint
+
+const (
+ ParseComments Mode = 1 << iota // parse comments and add them to AST
+ SkipFuncCheck // do not check that functions are defined
+)
+
+// Copy returns a copy of the Tree. Any parsing state is discarded.
+func (t *Tree) Copy() *Tree {
+ if t == nil {
+ return nil
+ }
+ return &Tree{
+ Name: t.Name,
+ ParseName: t.ParseName,
+ Root: t.Root.CopyList(),
+ text: t.text,
+ }
+}
+
+// Parse returns a map from template name to parse.Tree, created by parsing the
+// templates described in the argument string. The top-level template will be
+// given the specified name. If an error is encountered, parsing stops and an
+// empty map is returned with the error.
+func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error) {
+ treeSet := make(map[string]*Tree)
+ t := New(name)
+ t.text = text
+ _, err := t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
+ return treeSet, err
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+ if t.peekCount > 0 {
+ t.peekCount--
+ } else {
+ t.token[0] = t.lex.nextItem()
+ }
+ return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+ t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens.
+// The zeroth token is already there.
+func (t *Tree) backup2(t1 item) {
+ t.token[1] = t1
+ t.peekCount = 2
+}
+
+// backup3 backs the input stream up three tokens
+// The zeroth token is already there.
+func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
+ t.token[1] = t1
+ t.token[2] = t2
+ t.peekCount = 3
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+ if t.peekCount > 0 {
+ return t.token[t.peekCount-1]
+ }
+ t.peekCount = 1
+ t.token[0] = t.lex.nextItem()
+ return t.token[0]
+}
+
+// nextNonSpace returns the next non-space token.
+func (t *Tree) nextNonSpace() (token item) {
+ for {
+ token = t.next()
+ if token.typ != itemSpace {
+ break
+ }
+ }
+ return token
+}
+
+// peekNonSpace returns but does not consume the next non-space token.
+func (t *Tree) peekNonSpace() item {
+ token := t.nextNonSpace()
+ t.backup()
+ return token
+}
+
+// Parsing.
+
+// New allocates a new parse tree with the given name.
+func New(name string, funcs ...map[string]any) *Tree {
+ return &Tree{
+ Name: name,
+ funcs: funcs,
+ }
+}
+
+// ErrorContext returns a textual representation of the location of the node in the input text.
+// The receiver is only used when the node does not have a pointer to the tree inside,
+// which can occur in old code.
+func (t *Tree) ErrorContext(n Node) (location, context string) {
+ pos := int(n.Position())
+ tree := n.tree()
+ if tree == nil {
+ tree = t
+ }
+ text := tree.text[:pos]
+ byteNum := strings.LastIndex(text, "\n")
+ if byteNum == -1 {
+ byteNum = pos // On first line.
+ } else {
+ byteNum++ // After the newline.
+ byteNum = pos - byteNum
+ }
+ lineNum := 1 + strings.Count(text, "\n")
+ context = n.String()
+ return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...any) {
+ t.Root = nil
+ format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.token[0].line, format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (t *Tree) error(err error) {
+ t.errorf("%s", err)
+}
+
+// expect consumes the next token and guarantees it has the required type.
+func (t *Tree) expect(expected itemType, context string) item {
+ token := t.nextNonSpace()
+ if token.typ != expected {
+ t.unexpected(token, context)
+ }
+ return token
+}
+
+// expectOneOf consumes the next token and guarantees it has one of the required types.
+func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
+ token := t.nextNonSpace()
+ if token.typ != expected1 && token.typ != expected2 {
+ t.unexpected(token, context)
+ }
+ return token
+}
+
+// unexpected complains about the token and terminates processing.
+func (t *Tree) unexpected(token item, context string) {
+ if token.typ == itemError {
+ extra := ""
+ if t.actionLine != 0 && t.actionLine != token.line {
+ extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
+ if strings.HasSuffix(token.val, " action") {
+ extra = extra[len(" in action"):] // avoid "action in action"
+ }
+ }
+ t.errorf("%s%s", token, extra)
+ }
+ t.errorf("unexpected %s in %s", token, context)
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ if t != nil {
+ t.lex.drain()
+ t.stopParse()
+ }
+ *errp = e.(error)
+ }
+}
+
+// startParse initializes the parser, using the lexer.
+func (t *Tree) startParse(funcs []map[string]any, lex *lexer, treeSet map[string]*Tree) {
+ t.Root = nil
+ t.lex = lex
+ t.vars = []string{"$"}
+ t.funcs = funcs
+ t.treeSet = treeSet
+}
+
+// stopParse terminates parsing.
+func (t *Tree) stopParse() {
+ t.lex = nil
+ t.vars = nil
+ t.funcs = nil
+ t.treeSet = nil
+}
+
+// Parse parses the template definition string to construct a representation of
+// the template for execution. If either action delimiter string is empty, the
+// default ("{{" or "}}") is used. Embedded template definitions are added to
+// the treeSet map.
+func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]any) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.ParseName = t.Name
+ emitComment := t.Mode&ParseComments != 0
+ breakOK := !t.hasFunction("break")
+ continueOK := !t.hasFunction("continue")
+ lexer := lex(t.Name, text, leftDelim, rightDelim, emitComment, breakOK, continueOK)
+ t.startParse(funcs, lexer, treeSet)
+ t.text = text
+ t.parse()
+ t.add()
+ t.stopParse()
+ return t, nil
+}
+
+// add adds tree to t.treeSet.
+func (t *Tree) add() {
+ tree := t.treeSet[t.Name]
+ if tree == nil || IsEmptyTree(tree.Root) {
+ t.treeSet[t.Name] = t
+ return
+ }
+ if !IsEmptyTree(t.Root) {
+ t.errorf("template: multiple definition of template %q", t.Name)
+ }
+}
+
+// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
+func IsEmptyTree(n Node) bool {
+ switch n := n.(type) {
+ case nil:
+ return true
+ case *ActionNode:
+ case *CommentNode:
+ return true
+ case *IfNode:
+ case *ListNode:
+ for _, node := range n.Nodes {
+ if !IsEmptyTree(node) {
+ return false
+ }
+ }
+ return true
+ case *RangeNode:
+ case *TemplateNode:
+ case *TextNode:
+ return len(bytes.TrimSpace(n.Text)) == 0
+ case *WithNode:
+ default:
+ panic("unknown node: " + n.String())
+ }
+ return false
+}
+
+// parse is the top-level parser for a template, essentially the same
+// as itemList except it also parses {{define}} actions.
+// It runs to EOF.
+func (t *Tree) parse() {
+ t.Root = t.newList(t.peek().pos)
+ for t.peek().typ != itemEOF {
+ if t.peek().typ == itemLeftDelim {
+ delim := t.next()
+ if t.nextNonSpace().typ == itemDefine {
+ newT := New("definition") // name will be updated once we know it.
+ newT.text = t.text
+ newT.Mode = t.Mode
+ newT.ParseName = t.ParseName
+ newT.startParse(t.funcs, t.lex, t.treeSet)
+ newT.parseDefinition()
+ continue
+ }
+ t.backup2(delim)
+ }
+ switch n := t.textOrAction(); n.Type() {
+ case nodeEnd, nodeElse:
+ t.errorf("unexpected %s", n)
+ default:
+ t.Root.append(n)
+ }
+ }
+}
+
+// parseDefinition parses a {{define}} ... {{end}} template definition and
+// installs the definition in t.treeSet. The "define" keyword has already
+// been scanned.
+func (t *Tree) parseDefinition() {
+ const context = "define clause"
+ name := t.expectOneOf(itemString, itemRawString, context)
+ var err error
+ t.Name, err = strconv.Unquote(name.val)
+ if err != nil {
+ t.error(err)
+ }
+ t.expect(itemRightDelim, context)
+ var end Node
+ t.Root, end = t.itemList()
+ if end.Type() != nodeEnd {
+ t.errorf("unexpected %s in %s", end, context)
+ }
+ t.add()
+ t.stopParse()
+}
+
+// itemList:
+//
+// textOrAction*
+//
+// Terminates at {{end}} or {{else}}, returned separately.
+func (t *Tree) itemList() (list *ListNode, next Node) {
+ list = t.newList(t.peekNonSpace().pos)
+ for t.peekNonSpace().typ != itemEOF {
+ n := t.textOrAction()
+ switch n.Type() {
+ case nodeEnd, nodeElse:
+ return list, n
+ }
+ list.append(n)
+ }
+ t.errorf("unexpected EOF")
+ return
+}
+
+// textOrAction:
+//
+// text | comment | action
+func (t *Tree) textOrAction() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
+ t.actionLine = token.line
+ defer t.clearActionLine()
+ return t.action()
+ case itemComment:
+ return t.newComment(token.pos, token.val)
+ default:
+ t.unexpected(token, "input")
+ }
+ return nil
+}
+
+func (t *Tree) clearActionLine() {
+ t.actionLine = 0
+}
+
+// Action:
+//
+// control
+// command ("|" command)*
+//
+// Left delim is past. Now get actions.
+// First word could be a keyword such as range.
+func (t *Tree) action() (n Node) {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemBlock:
+ return t.blockControl()
+ case itemBreak:
+ return t.breakControl(token.pos, token.line)
+ case itemContinue:
+ return t.continueControl(token.pos, token.line)
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+ return t.endControl()
+ case itemIf:
+ return t.ifControl()
+ case itemRange:
+ return t.rangeControl()
+ case itemTemplate:
+ return t.templateControl()
+ case itemWith:
+ return t.withControl()
+ }
+ t.backup()
+ token := t.peek()
+ // Do not pop variables; they persist until "end".
+ return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+}
+
+// Break:
+//
+// {{break}}
+//
+// Break keyword is past.
+func (t *Tree) breakControl(pos Pos, line int) Node {
+ if token := t.nextNonSpace(); token.typ != itemRightDelim {
+ t.unexpected(token, "{{break}}")
+ }
+ if t.rangeDepth == 0 {
+ t.errorf("{{break}} outside {{range}}")
+ }
+ return t.newBreak(pos, line)
+}
+
+// Continue:
+//
+// {{continue}}
+//
+// Continue keyword is past.
+func (t *Tree) continueControl(pos Pos, line int) Node {
+ if token := t.nextNonSpace(); token.typ != itemRightDelim {
+ t.unexpected(token, "{{continue}}")
+ }
+ if t.rangeDepth == 0 {
+ t.errorf("{{continue}} outside {{range}}")
+ }
+ return t.newContinue(pos, line)
+}
+
+// Pipeline:
+//
+// declarations? command ('|' command)*
+func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+ token := t.peekNonSpace()
+ pipe = t.newPipeline(token.pos, token.line, nil)
+ // Are there declarations or assignments?
+decls:
+ if v := t.peekNonSpace(); v.typ == itemVariable {
+ t.next()
+ // Since space is a token, we need 3-token look-ahead here in the worst case:
+ // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
+ // argument variable rather than a declaration. So remember the token
+ // adjacent to the variable so we can push it back if necessary.
+ tokenAfterVariable := t.peek()
+ next := t.peekNonSpace()
+ switch {
+ case next.typ == itemAssign, next.typ == itemDeclare:
+ pipe.IsAssign = next.typ == itemAssign
+ t.nextNonSpace()
+ pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
+ t.vars = append(t.vars, v.val)
+ case next.typ == itemChar && next.val == ",":
+ t.nextNonSpace()
+ pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
+ t.vars = append(t.vars, v.val)
+ if context == "range" && len(pipe.Decl) < 2 {
+ switch t.peekNonSpace().typ {
+ case itemVariable, itemRightDelim, itemRightParen:
+ // second initialized variable in a range pipeline
+ goto decls
+ default:
+ t.errorf("range can only initialize variables")
+ }
+ }
+ t.errorf("too many declarations in %s", context)
+ case tokenAfterVariable.typ == itemSpace:
+ t.backup3(v, tokenAfterVariable)
+ default:
+ t.backup2(v)
+ }
+ }
+ for {
+ switch token := t.nextNonSpace(); token.typ {
+ case end:
+ // At this point, the pipeline is complete
+ t.checkPipeline(pipe, context)
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
+ t.backup()
+ pipe.append(t.command())
+ default:
+ t.unexpected(token, context)
+ }
+ }
+}
+
+func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+ // Reject empty pipelines
+ if len(pipe.Cmds) == 0 {
+ t.errorf("missing value for %s", context)
+ }
+ // Only the first command of a pipeline can start with a non executable operand
+ for i, c := range pipe.Cmds[1:] {
+ switch c.Args[0].Type() {
+ case NodeBool, NodeDot, NodeNil, NodeNumber, NodeString:
+ // With A|B|C, pipeline stage 2 is B
+ t.errorf("non executable command in pipeline stage %d", i+2)
+ }
+ }
+}
+
+func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+ pipe = t.pipeline(context, itemRightDelim)
+ if context == "range" {
+ t.rangeDepth++
+ }
+ var next Node
+ list, next = t.itemList()
+ if context == "range" {
+ t.rangeDepth--
+ }
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+ if allowElseIf {
+ // Special case for "else if". If the "else" is followed immediately by an "if",
+ // the elseControl will have left the "if" token pending. Treat
+ // {{if a}}_{{else if b}}_{{end}}
+ // as
+ // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
+ // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
+ // is assumed. This technique works even for long if-else-if chains.
+ // TODO: Should we allow else-if in with and range?
+ if t.peek().typ == itemIf {
+ t.next() // Consume the "if" token.
+ elseList = t.newList(next.Position())
+ elseList.append(t.ifControl())
+ // Do not consume the next item - only one {{end}} required.
+ break
+ }
+ }
+ elseList, next = t.itemList()
+ if next.Type() != nodeEnd {
+ t.errorf("expected end; found %s", next)
+ }
+ }
+ return pipe.Position(), pipe.Line, pipe, list, elseList
+}
+
+// If:
+//
+// {{if pipeline}} itemList {{end}}
+// {{if pipeline}} itemList {{else}} itemList {{end}}
+//
+// If keyword is past.
+func (t *Tree) ifControl() Node {
+ return t.newIf(t.parseControl(true, "if"))
+}
+
+// Range:
+//
+// {{range pipeline}} itemList {{end}}
+// {{range pipeline}} itemList {{else}} itemList {{end}}
+//
+// Range keyword is past.
+func (t *Tree) rangeControl() Node {
+ r := t.newRange(t.parseControl(false, "range"))
+ return r
+}
+
+// With:
+//
+// {{with pipeline}} itemList {{end}}
+// {{with pipeline}} itemList {{else}} itemList {{end}}
+//
+// If keyword is past.
+func (t *Tree) withControl() Node {
+ return t.newWith(t.parseControl(false, "with"))
+}
+
+// End:
+//
+// {{end}}
+//
+// End keyword is past.
+func (t *Tree) endControl() Node {
+ return t.newEnd(t.expect(itemRightDelim, "end").pos)
+}
+
+// Else:
+//
+// {{else}}
+//
+// Else keyword is past.
+func (t *Tree) elseControl() Node {
+ // Special case for "else if".
+ peek := t.peekNonSpace()
+ if peek.typ == itemIf {
+ // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
+ return t.newElse(peek.pos, peek.line)
+ }
+ token := t.expect(itemRightDelim, "else")
+ return t.newElse(token.pos, token.line)
+}
+
+// Block:
+//
+// {{block stringValue pipeline}}
+//
+// Block keyword is past.
+// The name must be something that can evaluate to a string.
+// The pipeline is mandatory.
+func (t *Tree) blockControl() Node {
+ const context = "block clause"
+
+ token := t.nextNonSpace()
+ name := t.parseTemplateName(token, context)
+ pipe := t.pipeline(context, itemRightDelim)
+
+ block := New(name) // name will be updated once we know it.
+ block.text = t.text
+ block.Mode = t.Mode
+ block.ParseName = t.ParseName
+ block.startParse(t.funcs, t.lex, t.treeSet)
+ var end Node
+ block.Root, end = block.itemList()
+ if end.Type() != nodeEnd {
+ t.errorf("unexpected %s in %s", end, context)
+ }
+ block.add()
+ block.stopParse()
+
+ return t.newTemplate(token.pos, token.line, name, pipe)
+}
+
+// Template:
+//
+// {{template stringValue pipeline}}
+//
+// Template keyword is past. The name must be something that can evaluate
+// to a string.
+func (t *Tree) templateControl() Node {
+ const context = "template clause"
+ token := t.nextNonSpace()
+ name := t.parseTemplateName(token, context)
+ var pipe *PipeNode
+ if t.nextNonSpace().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ pipe = t.pipeline(context, itemRightDelim)
+ }
+ return t.newTemplate(token.pos, token.line, name, pipe)
+}
+
+func (t *Tree) parseTemplateName(token item, context string) (name string) {
+ switch token.typ {
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ name = s
+ default:
+ t.unexpected(token, context)
+ }
+ return
+}
+
+// command:
+//
+// operand (space operand)*
+//
+// space-separated arguments up to a pipeline character or right delimiter.
+// we consume the pipe character but leave the right delim to terminate the action.
+func (t *Tree) command() *CommandNode {
+ cmd := t.newCommand(t.peekNonSpace().pos)
+ for {
+ t.peekNonSpace() // skip leading spaces.
+ operand := t.operand()
+ if operand != nil {
+ cmd.append(operand)
+ }
+ switch token := t.next(); token.typ {
+ case itemSpace:
+ continue
+ case itemRightDelim, itemRightParen:
+ t.backup()
+ case itemPipe:
+ // nothing here; break loop below
+ default:
+ t.unexpected(token, "operand")
+ }
+ break
+ }
+ if len(cmd.Args) == 0 {
+ t.errorf("empty command")
+ }
+ return cmd
+}
+
+// operand:
+//
+// term .Field*
+//
+// An operand is a space-separated component of a command,
+// a term possibly followed by field accesses.
+// A nil return means the next item is not an operand.
+func (t *Tree) operand() Node {
+ node := t.term()
+ if node == nil {
+ return nil
+ }
+ if t.peek().typ == itemField {
+ chain := t.newChain(t.peek().pos, node)
+ for t.peek().typ == itemField {
+ chain.Add(t.next().val)
+ }
+ // Compatibility with original API: If the term is of type NodeField
+ // or NodeVariable, just put more fields on the original.
+ // Otherwise, keep the Chain node.
+ // Obvious parsing errors involving literal values are detected here.
+ // More complex error cases will have to be handled at execution time.
+ switch node.Type() {
+ case NodeField:
+ node = t.newField(chain.Position(), chain.String())
+ case NodeVariable:
+ node = t.newVariable(chain.Position(), chain.String())
+ case NodeBool, NodeString, NodeNumber, NodeNil, NodeDot:
+ t.errorf("unexpected . after term %q", node.String())
+ default:
+ node = chain
+ }
+ }
+ return node
+}
+
+// term:
+//
+// literal (number, string, nil, boolean)
+// function (identifier)
+// .
+// .Field
+// $
+// '(' pipeline ')'
+//
+// A term is a simple "expression".
+// A nil return means the next item is not a term.
+func (t *Tree) term() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemIdentifier:
+ checkFunc := t.Mode&SkipFuncCheck == 0
+ if checkFunc && !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+ }
+ return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
+ case itemDot:
+ return t.newDot(token.pos)
+ case itemNil:
+ return t.newNil(token.pos)
+ case itemVariable:
+ return t.useVar(token.pos, token.val)
+ case itemField:
+ return t.newField(token.pos, token.val)
+ case itemBool:
+ return t.newBool(token.pos, token.val == "true")
+ case itemCharConstant, itemComplex, itemNumber:
+ number, err := t.newNumber(token.pos, token.val, token.typ)
+ if err != nil {
+ t.error(err)
+ }
+ return number
+ case itemLeftParen:
+ return t.pipeline("parenthesized pipeline", itemRightParen)
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ return t.newString(token.pos, token.val, s)
+ }
+ t.backup()
+ return nil
+}
+
+// hasFunction reports if a function name exists in the Tree's maps.
+func (t *Tree) hasFunction(name string) bool {
+ for _, funcMap := range t.funcs {
+ if funcMap == nil {
+ continue
+ }
+ if funcMap[name] != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// popVars trims the variable list to the specified length
+func (t *Tree) popVars(n int) {
+ t.vars = t.vars[:n]
+}
+
+// useVar returns a node for a variable reference. It errors if the
+// variable is not defined.
+func (t *Tree) useVar(pos Pos, name string) Node {
+ v := t.newVariable(pos, name)
+ for _, varName := range t.vars {
+ if varName == v.Ident[0] {
+ return v
+ }
+ }
+ t.errorf("undefined variable %q", v.Ident[0])
+ return nil
+}
diff --git a/contrib/go/_std_1.18/src/text/template/template.go b/contrib/go/_std_1.19/src/text/template/template.go
index 776be9cd07..776be9cd07 100644
--- a/contrib/go/_std_1.18/src/text/template/template.go
+++ b/contrib/go/_std_1.19/src/text/template/template.go
diff --git a/contrib/go/_std_1.19/src/time/format.go b/contrib/go/_std_1.19/src/time/format.go
new file mode 100644
index 0000000000..8431ff89b4
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/format.go
@@ -0,0 +1,1619 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package time
+
+import "errors"
+
+// These are predefined layouts for use in Time.Format and time.Parse.
+// The reference time used in these layouts is the specific time stamp:
+//
+// 01/02 03:04:05PM '06 -0700
+//
+// (January 2, 15:04:05, 2006, in time zone seven hours west of GMT).
+// That value is recorded as the constant named Layout, listed below. As a Unix
+// time, this is 1136239445. Since MST is GMT-0700, the reference would be
+// printed by the Unix date command as:
+//
+// Mon Jan 2 15:04:05 MST 2006
+//
+// It is a regrettable historic error that the date uses the American convention
+// of putting the numerical month before the day.
+//
+// The example for Time.Format demonstrates the working of the layout string
+// in detail and is a good reference.
+//
+// Note that the RFC822, RFC850, and RFC1123 formats should be applied
+// only to local times. Applying them to UTC times will use "UTC" as the
+// time zone abbreviation, while strictly speaking those RFCs require the
+// use of "GMT" in that case.
+// In general RFC1123Z should be used instead of RFC1123 for servers
+// that insist on that format, and RFC3339 should be preferred for new protocols.
+// RFC3339, RFC822, RFC822Z, RFC1123, and RFC1123Z are useful for formatting;
+// when used with time.Parse they do not accept all the time formats
+// permitted by the RFCs and they do accept time formats not formally defined.
+// The RFC3339Nano format removes trailing zeros from the seconds field
+// and thus may not sort correctly once formatted.
+//
+// Most programs can use one of the defined constants as the layout passed to
+// Format or Parse. The rest of this comment can be ignored unless you are
+// creating a custom layout string.
+//
+// To define your own format, write down what the reference time would look like
+// formatted your way; see the values of constants like ANSIC, StampMicro or
+// Kitchen for examples. The model is to demonstrate what the reference time
+// looks like so that the Format and Parse methods can apply the same
+// transformation to a general time value.
+//
+// Here is a summary of the components of a layout string. Each element shows by
+// example the formatting of an element of the reference time. Only these values
+// are recognized. Text in the layout string that is not recognized as part of
+// the reference time is echoed verbatim during Format and expected to appear
+// verbatim in the input to Parse.
+//
+// Year: "2006" "06"
+// Month: "Jan" "January" "01" "1"
+// Day of the week: "Mon" "Monday"
+// Day of the month: "2" "_2" "02"
+// Day of the year: "__2" "002"
+// Hour: "15" "3" "03" (PM or AM)
+// Minute: "4" "04"
+// Second: "5" "05"
+// AM/PM mark: "PM"
+//
+// Numeric time zone offsets format as follows:
+//
+// "-0700" ±hhmm
+// "-07:00" ±hh:mm
+// "-07" ±hh
+// "-070000" ±hhmmss
+// "-07:00:00" ±hh:mm:ss
+//
+// Replacing the sign in the format with a Z triggers
+// the ISO 8601 behavior of printing Z instead of an
+// offset for the UTC zone. Thus:
+//
+// "Z0700" Z or ±hhmm
+// "Z07:00" Z or ±hh:mm
+// "Z07" Z or ±hh
+// "Z070000" Z or ±hhmmss
+// "Z07:00:00" Z or ±hh:mm:ss
+//
+// Within the format string, the underscores in "_2" and "__2" represent spaces
+// that may be replaced by digits if the following number has multiple digits,
+// for compatibility with fixed-width Unix time formats. A leading zero represents
+// a zero-padded value.
+//
+// The formats __2 and 002 are space-padded and zero-padded
+// three-character day of year; there is no unpadded day of year format.
+//
+// A comma or decimal point followed by one or more zeros represents
+// a fractional second, printed to the given number of decimal places.
+// A comma or decimal point followed by one or more nines represents
+// a fractional second, printed to the given number of decimal places, with
+// trailing zeros removed.
+// For example "15:04:05,000" or "15:04:05.000" formats or parses with
+// millisecond precision.
+//
+// Some valid layouts are invalid time values for time.Parse, due to formats
+// such as _ for space padding and Z for zone information.
+const (
+ Layout = "01/02 03:04:05PM '06 -0700" // The reference time, in numerical order.
+ ANSIC = "Mon Jan _2 15:04:05 2006"
+ UnixDate = "Mon Jan _2 15:04:05 MST 2006"
+ RubyDate = "Mon Jan 02 15:04:05 -0700 2006"
+ RFC822 = "02 Jan 06 15:04 MST"
+ RFC822Z = "02 Jan 06 15:04 -0700" // RFC822 with numeric zone
+ RFC850 = "Monday, 02-Jan-06 15:04:05 MST"
+ RFC1123 = "Mon, 02 Jan 2006 15:04:05 MST"
+ RFC1123Z = "Mon, 02 Jan 2006 15:04:05 -0700" // RFC1123 with numeric zone
+ RFC3339 = "2006-01-02T15:04:05Z07:00"
+ RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00"
+ Kitchen = "3:04PM"
+ // Handy time stamps.
+ Stamp = "Jan _2 15:04:05"
+ StampMilli = "Jan _2 15:04:05.000"
+ StampMicro = "Jan _2 15:04:05.000000"
+ StampNano = "Jan _2 15:04:05.000000000"
+)
+
+const (
+ _ = iota
+ stdLongMonth = iota + stdNeedDate // "January"
+ stdMonth // "Jan"
+ stdNumMonth // "1"
+ stdZeroMonth // "01"
+ stdLongWeekDay // "Monday"
+ stdWeekDay // "Mon"
+ stdDay // "2"
+ stdUnderDay // "_2"
+ stdZeroDay // "02"
+ stdUnderYearDay // "__2"
+ stdZeroYearDay // "002"
+ stdHour = iota + stdNeedClock // "15"
+ stdHour12 // "3"
+ stdZeroHour12 // "03"
+ stdMinute // "4"
+ stdZeroMinute // "04"
+ stdSecond // "5"
+ stdZeroSecond // "05"
+ stdLongYear = iota + stdNeedDate // "2006"
+ stdYear // "06"
+ stdPM = iota + stdNeedClock // "PM"
+ stdpm // "pm"
+ stdTZ = iota // "MST"
+ stdISO8601TZ // "Z0700" // prints Z for UTC
+ stdISO8601SecondsTZ // "Z070000"
+ stdISO8601ShortTZ // "Z07"
+ stdISO8601ColonTZ // "Z07:00" // prints Z for UTC
+ stdISO8601ColonSecondsTZ // "Z07:00:00"
+ stdNumTZ // "-0700" // always numeric
+ stdNumSecondsTz // "-070000"
+ stdNumShortTZ // "-07" // always numeric
+ stdNumColonTZ // "-07:00" // always numeric
+ stdNumColonSecondsTZ // "-07:00:00"
+ stdFracSecond0 // ".0", ".00", ... , trailing zeros included
+ stdFracSecond9 // ".9", ".99", ..., trailing zeros omitted
+
+ stdNeedDate = 1 << 8 // need month, day, year
+ stdNeedClock = 2 << 8 // need hour, minute, second
+ stdArgShift = 16 // extra argument in high bits, above low stdArgShift
+ stdSeparatorShift = 28 // extra argument in high 4 bits for fractional second separators
+ stdMask = 1<<stdArgShift - 1 // mask out argument
+)
+
+// std0x records the std values for "01", "02", ..., "06".
+var std0x = [...]int{stdZeroMonth, stdZeroDay, stdZeroHour12, stdZeroMinute, stdZeroSecond, stdYear}
+
+// startsWithLowerCase reports whether the string has a lower-case letter at the beginning.
+// Its purpose is to prevent matching strings like "Month" when looking for "Mon".
+func startsWithLowerCase(str string) bool {
+ if len(str) == 0 {
+ return false
+ }
+ c := str[0]
+ return 'a' <= c && c <= 'z'
+}
+
+// nextStdChunk finds the first occurrence of a std string in
+// layout and returns the text before, the std string, and the text after.
+func nextStdChunk(layout string) (prefix string, std int, suffix string) {
+ for i := 0; i < len(layout); i++ {
+ switch c := int(layout[i]); c {
+ case 'J': // January, Jan
+ if len(layout) >= i+3 && layout[i:i+3] == "Jan" {
+ if len(layout) >= i+7 && layout[i:i+7] == "January" {
+ return layout[0:i], stdLongMonth, layout[i+7:]
+ }
+ if !startsWithLowerCase(layout[i+3:]) {
+ return layout[0:i], stdMonth, layout[i+3:]
+ }
+ }
+
+ case 'M': // Monday, Mon, MST
+ if len(layout) >= i+3 {
+ if layout[i:i+3] == "Mon" {
+ if len(layout) >= i+6 && layout[i:i+6] == "Monday" {
+ return layout[0:i], stdLongWeekDay, layout[i+6:]
+ }
+ if !startsWithLowerCase(layout[i+3:]) {
+ return layout[0:i], stdWeekDay, layout[i+3:]
+ }
+ }
+ if layout[i:i+3] == "MST" {
+ return layout[0:i], stdTZ, layout[i+3:]
+ }
+ }
+
+ case '0': // 01, 02, 03, 04, 05, 06, 002
+ if len(layout) >= i+2 && '1' <= layout[i+1] && layout[i+1] <= '6' {
+ return layout[0:i], std0x[layout[i+1]-'1'], layout[i+2:]
+ }
+ if len(layout) >= i+3 && layout[i+1] == '0' && layout[i+2] == '2' {
+ return layout[0:i], stdZeroYearDay, layout[i+3:]
+ }
+
+ case '1': // 15, 1
+ if len(layout) >= i+2 && layout[i+1] == '5' {
+ return layout[0:i], stdHour, layout[i+2:]
+ }
+ return layout[0:i], stdNumMonth, layout[i+1:]
+
+ case '2': // 2006, 2
+ if len(layout) >= i+4 && layout[i:i+4] == "2006" {
+ return layout[0:i], stdLongYear, layout[i+4:]
+ }
+ return layout[0:i], stdDay, layout[i+1:]
+
+ case '_': // _2, _2006, __2
+ if len(layout) >= i+2 && layout[i+1] == '2' {
+ //_2006 is really a literal _, followed by stdLongYear
+ if len(layout) >= i+5 && layout[i+1:i+5] == "2006" {
+ return layout[0 : i+1], stdLongYear, layout[i+5:]
+ }
+ return layout[0:i], stdUnderDay, layout[i+2:]
+ }
+ if len(layout) >= i+3 && layout[i+1] == '_' && layout[i+2] == '2' {
+ return layout[0:i], stdUnderYearDay, layout[i+3:]
+ }
+
+ case '3':
+ return layout[0:i], stdHour12, layout[i+1:]
+
+ case '4':
+ return layout[0:i], stdMinute, layout[i+1:]
+
+ case '5':
+ return layout[0:i], stdSecond, layout[i+1:]
+
+ case 'P': // PM
+ if len(layout) >= i+2 && layout[i+1] == 'M' {
+ return layout[0:i], stdPM, layout[i+2:]
+ }
+
+ case 'p': // pm
+ if len(layout) >= i+2 && layout[i+1] == 'm' {
+ return layout[0:i], stdpm, layout[i+2:]
+ }
+
+ case '-': // -070000, -07:00:00, -0700, -07:00, -07
+ if len(layout) >= i+7 && layout[i:i+7] == "-070000" {
+ return layout[0:i], stdNumSecondsTz, layout[i+7:]
+ }
+ if len(layout) >= i+9 && layout[i:i+9] == "-07:00:00" {
+ return layout[0:i], stdNumColonSecondsTZ, layout[i+9:]
+ }
+ if len(layout) >= i+5 && layout[i:i+5] == "-0700" {
+ return layout[0:i], stdNumTZ, layout[i+5:]
+ }
+ if len(layout) >= i+6 && layout[i:i+6] == "-07:00" {
+ return layout[0:i], stdNumColonTZ, layout[i+6:]
+ }
+ if len(layout) >= i+3 && layout[i:i+3] == "-07" {
+ return layout[0:i], stdNumShortTZ, layout[i+3:]
+ }
+
+ case 'Z': // Z070000, Z07:00:00, Z0700, Z07:00,
+ if len(layout) >= i+7 && layout[i:i+7] == "Z070000" {
+ return layout[0:i], stdISO8601SecondsTZ, layout[i+7:]
+ }
+ if len(layout) >= i+9 && layout[i:i+9] == "Z07:00:00" {
+ return layout[0:i], stdISO8601ColonSecondsTZ, layout[i+9:]
+ }
+ if len(layout) >= i+5 && layout[i:i+5] == "Z0700" {
+ return layout[0:i], stdISO8601TZ, layout[i+5:]
+ }
+ if len(layout) >= i+6 && layout[i:i+6] == "Z07:00" {
+ return layout[0:i], stdISO8601ColonTZ, layout[i+6:]
+ }
+ if len(layout) >= i+3 && layout[i:i+3] == "Z07" {
+ return layout[0:i], stdISO8601ShortTZ, layout[i+3:]
+ }
+
+ case '.', ',': // ,000, or .000, or ,999, or .999 - repeated digits for fractional seconds.
+ if i+1 < len(layout) && (layout[i+1] == '0' || layout[i+1] == '9') {
+ ch := layout[i+1]
+ j := i + 1
+ for j < len(layout) && layout[j] == ch {
+ j++
+ }
+ // String of digits must end here - only fractional second is all digits.
+ if !isDigit(layout, j) {
+ code := stdFracSecond0
+ if layout[i+1] == '9' {
+ code = stdFracSecond9
+ }
+ std := stdFracSecond(code, j-(i+1), c)
+ return layout[0:i], std, layout[j:]
+ }
+ }
+ }
+ }
+ return layout, 0, ""
+}
+
+var longDayNames = []string{
+ "Sunday",
+ "Monday",
+ "Tuesday",
+ "Wednesday",
+ "Thursday",
+ "Friday",
+ "Saturday",
+}
+
+var shortDayNames = []string{
+ "Sun",
+ "Mon",
+ "Tue",
+ "Wed",
+ "Thu",
+ "Fri",
+ "Sat",
+}
+
+var shortMonthNames = []string{
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "May",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Oct",
+ "Nov",
+ "Dec",
+}
+
+var longMonthNames = []string{
+ "January",
+ "February",
+ "March",
+ "April",
+ "May",
+ "June",
+ "July",
+ "August",
+ "September",
+ "October",
+ "November",
+ "December",
+}
+
+// match reports whether s1 and s2 match ignoring case.
+// It is assumed s1 and s2 are the same length.
+func match(s1, s2 string) bool {
+ for i := 0; i < len(s1); i++ {
+ c1 := s1[i]
+ c2 := s2[i]
+ if c1 != c2 {
+ // Switch to lower-case; 'a'-'A' is known to be a single bit.
+ c1 |= 'a' - 'A'
+ c2 |= 'a' - 'A'
+ if c1 != c2 || c1 < 'a' || c1 > 'z' {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func lookup(tab []string, val string) (int, string, error) {
+ for i, v := range tab {
+ if len(val) >= len(v) && match(val[0:len(v)], v) {
+ return i, val[len(v):], nil
+ }
+ }
+ return -1, val, errBad
+}
+
+// appendInt appends the decimal form of x to b and returns the result.
+// If the decimal form (excluding sign) is shorter than width, the result is padded with leading 0's.
+// Duplicates functionality in strconv, but avoids dependency.
+func appendInt(b []byte, x int, width int) []byte {
+ u := uint(x)
+ if x < 0 {
+ b = append(b, '-')
+ u = uint(-x)
+ }
+
+ // Assemble decimal in reverse order.
+ var buf [20]byte
+ i := len(buf)
+ for u >= 10 {
+ i--
+ q := u / 10
+ buf[i] = byte('0' + u - q*10)
+ u = q
+ }
+ i--
+ buf[i] = byte('0' + u)
+
+ // Add 0-padding.
+ for w := len(buf) - i; w < width; w++ {
+ b = append(b, '0')
+ }
+
+ return append(b, buf[i:]...)
+}
+
+// Never printed, just needs to be non-nil for return by atoi.
+var atoiError = errors.New("time: invalid number")
+
+// Duplicates functionality in strconv, but avoids dependency.
+func atoi(s string) (x int, err error) {
+ neg := false
+ if s != "" && (s[0] == '-' || s[0] == '+') {
+ neg = s[0] == '-'
+ s = s[1:]
+ }
+ q, rem, err := leadingInt(s)
+ x = int(q)
+ if err != nil || rem != "" {
+ return 0, atoiError
+ }
+ if neg {
+ x = -x
+ }
+ return x, nil
+}
+
+// The "std" value passed to formatNano contains two packed fields: the number of
+// digits after the decimal and the separator character (period or comma).
+// These functions pack and unpack that variable.
+func stdFracSecond(code, n, c int) int {
+ // Use 0xfff to make the failure case even more absurd.
+ if c == '.' {
+ return code | ((n & 0xfff) << stdArgShift)
+ }
+ return code | ((n & 0xfff) << stdArgShift) | 1<<stdSeparatorShift
+}
+
+func digitsLen(std int) int {
+ return (std >> stdArgShift) & 0xfff
+}
+
+func separator(std int) byte {
+ if (std >> stdSeparatorShift) == 0 {
+ return '.'
+ }
+ return ','
+}
+
+// formatNano appends a fractional second, as nanoseconds, to b
+// and returns the result.
+func formatNano(b []byte, nanosec uint, std int) []byte {
+ var (
+ n = digitsLen(std)
+ separator = separator(std)
+ trim = std&stdMask == stdFracSecond9
+ )
+ u := nanosec
+ var buf [9]byte
+ for start := len(buf); start > 0; {
+ start--
+ buf[start] = byte(u%10 + '0')
+ u /= 10
+ }
+
+ if n > 9 {
+ n = 9
+ }
+ if trim {
+ for n > 0 && buf[n-1] == '0' {
+ n--
+ }
+ if n == 0 {
+ return b
+ }
+ }
+ b = append(b, separator)
+ return append(b, buf[:n]...)
+}
+
+// String returns the time formatted using the format string
+//
+// "2006-01-02 15:04:05.999999999 -0700 MST"
+//
+// If the time has a monotonic clock reading, the returned string
+// includes a final field "m=±<value>", where value is the monotonic
+// clock reading formatted as a decimal number of seconds.
+//
+// The returned string is meant for debugging; for a stable serialized
+// representation, use t.MarshalText, t.MarshalBinary, or t.Format
+// with an explicit format string.
+func (t Time) String() string {
+ s := t.Format("2006-01-02 15:04:05.999999999 -0700 MST")
+
+ // Format monotonic clock reading as m=±ddd.nnnnnnnnn.
+ if t.wall&hasMonotonic != 0 {
+ m2 := uint64(t.ext)
+ sign := byte('+')
+ if t.ext < 0 {
+ sign = '-'
+ m2 = -m2
+ }
+ m1, m2 := m2/1e9, m2%1e9
+ m0, m1 := m1/1e9, m1%1e9
+ buf := make([]byte, 0, 24)
+ buf = append(buf, " m="...)
+ buf = append(buf, sign)
+ wid := 0
+ if m0 != 0 {
+ buf = appendInt(buf, int(m0), 0)
+ wid = 9
+ }
+ buf = appendInt(buf, int(m1), wid)
+ buf = append(buf, '.')
+ buf = appendInt(buf, int(m2), 9)
+ s += string(buf)
+ }
+ return s
+}
+
+// GoString implements fmt.GoStringer and formats t to be printed in Go source
+// code.
+func (t Time) GoString() string {
+ buf := make([]byte, 0, 70)
+ buf = append(buf, "time.Date("...)
+ buf = appendInt(buf, t.Year(), 0)
+ month := t.Month()
+ if January <= month && month <= December {
+ buf = append(buf, ", time."...)
+ buf = append(buf, t.Month().String()...)
+ } else {
+ // It's difficult to construct a time.Time with a date outside the
+ // standard range but we might as well try to handle the case.
+ buf = appendInt(buf, int(month), 0)
+ }
+ buf = append(buf, ", "...)
+ buf = appendInt(buf, t.Day(), 0)
+ buf = append(buf, ", "...)
+ buf = appendInt(buf, t.Hour(), 0)
+ buf = append(buf, ", "...)
+ buf = appendInt(buf, t.Minute(), 0)
+ buf = append(buf, ", "...)
+ buf = appendInt(buf, t.Second(), 0)
+ buf = append(buf, ", "...)
+ buf = appendInt(buf, t.Nanosecond(), 0)
+ buf = append(buf, ", "...)
+ switch loc := t.Location(); loc {
+ case UTC, nil:
+ buf = append(buf, "time.UTC"...)
+ case Local:
+ buf = append(buf, "time.Local"...)
+ default:
+ // there are several options for how we could display this, none of
+ // which are great:
+ //
+ // - use Location(loc.name), which is not technically valid syntax
+ // - use LoadLocation(loc.name), which will cause a syntax error when
+ // embedded and also would require us to escape the string without
+ // importing fmt or strconv
+ // - try to use FixedZone, which would also require escaping the name
+ // and would represent e.g. "America/Los_Angeles" daylight saving time
+ // shifts inaccurately
+ // - use the pointer format, which is no worse than you'd get with the
+ // old fmt.Sprintf("%#v", t) format.
+ //
+ // Of these, Location(loc.name) is the least disruptive. This is an edge
+ // case we hope not to hit too often.
+ buf = append(buf, `time.Location(`...)
+ buf = append(buf, []byte(quote(loc.name))...)
+ buf = append(buf, `)`...)
+ }
+ buf = append(buf, ')')
+ return string(buf)
+}
+
+// Format returns a textual representation of the time value formatted according
+// to the layout defined by the argument. See the documentation for the
+// constant called Layout to see how to represent the layout format.
+//
+// The executable example for Time.Format demonstrates the working
+// of the layout string in detail and is a good reference.
+func (t Time) Format(layout string) string {
+ const bufSize = 64
+ var b []byte
+ max := len(layout) + 10
+ if max < bufSize {
+ var buf [bufSize]byte
+ b = buf[:0]
+ } else {
+ b = make([]byte, 0, max)
+ }
+ b = t.AppendFormat(b, layout)
+ return string(b)
+}
+
+// AppendFormat is like Format but appends the textual
+// representation to b and returns the extended buffer.
+func (t Time) AppendFormat(b []byte, layout string) []byte {
+ var (
+ name, offset, abs = t.locabs()
+
+ year int = -1
+ month Month
+ day int
+ yday int
+ hour int = -1
+ min int
+ sec int
+ )
+ // Each iteration generates one std value.
+ for layout != "" {
+ prefix, std, suffix := nextStdChunk(layout)
+ if prefix != "" {
+ b = append(b, prefix...)
+ }
+ if std == 0 {
+ break
+ }
+ layout = suffix
+
+ // Compute year, month, day if needed.
+ if year < 0 && std&stdNeedDate != 0 {
+ year, month, day, yday = absDate(abs, true)
+ yday++
+ }
+
+ // Compute hour, minute, second if needed.
+ if hour < 0 && std&stdNeedClock != 0 {
+ hour, min, sec = absClock(abs)
+ }
+
+ switch std & stdMask {
+ case stdYear:
+ y := year
+ if y < 0 {
+ y = -y
+ }
+ b = appendInt(b, y%100, 2)
+ case stdLongYear:
+ b = appendInt(b, year, 4)
+ case stdMonth:
+ b = append(b, month.String()[:3]...)
+ case stdLongMonth:
+ m := month.String()
+ b = append(b, m...)
+ case stdNumMonth:
+ b = appendInt(b, int(month), 0)
+ case stdZeroMonth:
+ b = appendInt(b, int(month), 2)
+ case stdWeekDay:
+ b = append(b, absWeekday(abs).String()[:3]...)
+ case stdLongWeekDay:
+ s := absWeekday(abs).String()
+ b = append(b, s...)
+ case stdDay:
+ b = appendInt(b, day, 0)
+ case stdUnderDay:
+ if day < 10 {
+ b = append(b, ' ')
+ }
+ b = appendInt(b, day, 0)
+ case stdZeroDay:
+ b = appendInt(b, day, 2)
+ case stdUnderYearDay:
+ if yday < 100 {
+ b = append(b, ' ')
+ if yday < 10 {
+ b = append(b, ' ')
+ }
+ }
+ b = appendInt(b, yday, 0)
+ case stdZeroYearDay:
+ b = appendInt(b, yday, 3)
+ case stdHour:
+ b = appendInt(b, hour, 2)
+ case stdHour12:
+ // Noon is 12PM, midnight is 12AM.
+ hr := hour % 12
+ if hr == 0 {
+ hr = 12
+ }
+ b = appendInt(b, hr, 0)
+ case stdZeroHour12:
+ // Noon is 12PM, midnight is 12AM.
+ hr := hour % 12
+ if hr == 0 {
+ hr = 12
+ }
+ b = appendInt(b, hr, 2)
+ case stdMinute:
+ b = appendInt(b, min, 0)
+ case stdZeroMinute:
+ b = appendInt(b, min, 2)
+ case stdSecond:
+ b = appendInt(b, sec, 0)
+ case stdZeroSecond:
+ b = appendInt(b, sec, 2)
+ case stdPM:
+ if hour >= 12 {
+ b = append(b, "PM"...)
+ } else {
+ b = append(b, "AM"...)
+ }
+ case stdpm:
+ if hour >= 12 {
+ b = append(b, "pm"...)
+ } else {
+ b = append(b, "am"...)
+ }
+ case stdISO8601TZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ShortTZ, stdISO8601ColonSecondsTZ, stdNumTZ, stdNumColonTZ, stdNumSecondsTz, stdNumShortTZ, stdNumColonSecondsTZ:
+ // Ugly special case. We cheat and take the "Z" variants
+ // to mean "the time zone as formatted for ISO 8601".
+ if offset == 0 && (std == stdISO8601TZ || std == stdISO8601ColonTZ || std == stdISO8601SecondsTZ || std == stdISO8601ShortTZ || std == stdISO8601ColonSecondsTZ) {
+ b = append(b, 'Z')
+ break
+ }
+ zone := offset / 60 // convert to minutes
+ absoffset := offset
+ if zone < 0 {
+ b = append(b, '-')
+ zone = -zone
+ absoffset = -absoffset
+ } else {
+ b = append(b, '+')
+ }
+ b = appendInt(b, zone/60, 2)
+ if std == stdISO8601ColonTZ || std == stdNumColonTZ || std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ {
+ b = append(b, ':')
+ }
+ if std != stdNumShortTZ && std != stdISO8601ShortTZ {
+ b = appendInt(b, zone%60, 2)
+ }
+
+ // append seconds if appropriate
+ if std == stdISO8601SecondsTZ || std == stdNumSecondsTz || std == stdNumColonSecondsTZ || std == stdISO8601ColonSecondsTZ {
+ if std == stdNumColonSecondsTZ || std == stdISO8601ColonSecondsTZ {
+ b = append(b, ':')
+ }
+ b = appendInt(b, absoffset%60, 2)
+ }
+
+ case stdTZ:
+ if name != "" {
+ b = append(b, name...)
+ break
+ }
+ // No time zone known for this time, but we must print one.
+ // Use the -0700 format.
+ zone := offset / 60 // convert to minutes
+ if zone < 0 {
+ b = append(b, '-')
+ zone = -zone
+ } else {
+ b = append(b, '+')
+ }
+ b = appendInt(b, zone/60, 2)
+ b = appendInt(b, zone%60, 2)
+ case stdFracSecond0, stdFracSecond9:
+ b = formatNano(b, uint(t.Nanosecond()), std)
+ }
+ }
+ return b
+}
+
+var errBad = errors.New("bad value for field") // placeholder not passed to user
+
+// ParseError describes a problem parsing a time string.
+type ParseError struct {
+ Layout string
+ Value string
+ LayoutElem string
+ ValueElem string
+ Message string
+}
+
+// These are borrowed from unicode/utf8 and strconv and replicate behavior in
+// that package, since we can't take a dependency on either.
+const (
+ lowerhex = "0123456789abcdef"
+ runeSelf = 0x80
+ runeError = '\uFFFD'
+)
+
+func quote(s string) string {
+ buf := make([]byte, 1, len(s)+2) // slice will be at least len(s) + quotes
+ buf[0] = '"'
+ for i, c := range s {
+ if c >= runeSelf || c < ' ' {
+ // This means you are asking us to parse a time.Duration or
+ // time.Location with unprintable or non-ASCII characters in it.
+ // We don't expect to hit this case very often. We could try to
+ // reproduce strconv.Quote's behavior with full fidelity but
+ // given how rarely we expect to hit these edge cases, speed and
+ // conciseness are better.
+ var width int
+ if c == runeError {
+ width = 1
+ if i+2 < len(s) && s[i:i+3] == string(runeError) {
+ width = 3
+ }
+ } else {
+ width = len(string(c))
+ }
+ for j := 0; j < width; j++ {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[i+j]>>4])
+ buf = append(buf, lowerhex[s[i+j]&0xF])
+ }
+ } else {
+ if c == '"' || c == '\\' {
+ buf = append(buf, '\\')
+ }
+ buf = append(buf, string(c)...)
+ }
+ }
+ buf = append(buf, '"')
+ return string(buf)
+}
+
+// Error returns the string representation of a ParseError.
+func (e *ParseError) Error() string {
+ if e.Message == "" {
+ return "parsing time " +
+ quote(e.Value) + " as " +
+ quote(e.Layout) + ": cannot parse " +
+ quote(e.ValueElem) + " as " +
+ quote(e.LayoutElem)
+ }
+ return "parsing time " +
+ quote(e.Value) + e.Message
+}
+
+// isDigit reports whether s[i] is in range and is a decimal digit.
+func isDigit(s string, i int) bool {
+ if len(s) <= i {
+ return false
+ }
+ c := s[i]
+ return '0' <= c && c <= '9'
+}
+
+// getnum parses s[0:1] or s[0:2] (fixed forces s[0:2])
+// as a decimal integer and returns the integer and the
+// remainder of the string.
+func getnum(s string, fixed bool) (int, string, error) {
+ if !isDigit(s, 0) {
+ return 0, s, errBad
+ }
+ if !isDigit(s, 1) {
+ if fixed {
+ return 0, s, errBad
+ }
+ return int(s[0] - '0'), s[1:], nil
+ }
+ return int(s[0]-'0')*10 + int(s[1]-'0'), s[2:], nil
+}
+
+// getnum3 parses s[0:1], s[0:2], or s[0:3] (fixed forces s[0:3])
+// as a decimal integer and returns the integer and the remainder
+// of the string.
+func getnum3(s string, fixed bool) (int, string, error) {
+ var n, i int
+ for i = 0; i < 3 && isDigit(s, i); i++ {
+ n = n*10 + int(s[i]-'0')
+ }
+ if i == 0 || fixed && i != 3 {
+ return 0, s, errBad
+ }
+ return n, s[i:], nil
+}
+
+func cutspace(s string) string {
+ for len(s) > 0 && s[0] == ' ' {
+ s = s[1:]
+ }
+ return s
+}
+
+// skip removes the given prefix from value,
+// treating runs of space characters as equivalent.
+func skip(value, prefix string) (string, error) {
+ for len(prefix) > 0 {
+ if prefix[0] == ' ' {
+ if len(value) > 0 && value[0] != ' ' {
+ return value, errBad
+ }
+ prefix = cutspace(prefix)
+ value = cutspace(value)
+ continue
+ }
+ if len(value) == 0 || value[0] != prefix[0] {
+ return value, errBad
+ }
+ prefix = prefix[1:]
+ value = value[1:]
+ }
+ return value, nil
+}
+
+// Parse parses a formatted string and returns the time value it represents.
+// See the documentation for the constant called Layout to see how to
+// represent the format. The second argument must be parseable using
+// the format string (layout) provided as the first argument.
+//
+// The example for Time.Format demonstrates the working of the layout string
+// in detail and is a good reference.
+//
+// When parsing (only), the input may contain a fractional second
+// field immediately after the seconds field, even if the layout does not
+// signify its presence. In that case either a comma or a decimal point
+// followed by a maximal series of digits is parsed as a fractional second.
+// Fractional seconds are truncated to nanosecond precision.
+//
+// Elements omitted from the layout are assumed to be zero or, when
+// zero is impossible, one, so parsing "3:04pm" returns the time
+// corresponding to Jan 1, year 0, 15:04:00 UTC (note that because the year is
+// 0, this time is before the zero Time).
+// Years must be in the range 0000..9999. The day of the week is checked
+// for syntax but it is otherwise ignored.
+//
+// For layouts specifying the two-digit year 06, a value NN >= 69 will be treated
+// as 19NN and a value NN < 69 will be treated as 20NN.
+//
+// The remainder of this comment describes the handling of time zones.
+//
+// In the absence of a time zone indicator, Parse returns a time in UTC.
+//
+// When parsing a time with a zone offset like -0700, if the offset corresponds
+// to a time zone used by the current location (Local), then Parse uses that
+// location and zone in the returned time. Otherwise it records the time as
+// being in a fabricated location with time fixed at the given zone offset.
+//
+// When parsing a time with a zone abbreviation like MST, if the zone abbreviation
+// has a defined offset in the current location, then that offset is used.
+// The zone abbreviation "UTC" is recognized as UTC regardless of location.
+// If the zone abbreviation is unknown, Parse records the time as being
+// in a fabricated location with the given zone abbreviation and a zero offset.
+// This choice means that such a time can be parsed and reformatted with the
+// same layout losslessly, but the exact instant used in the representation will
+// differ by the actual zone offset. To avoid such problems, prefer time layouts
+// that use a numeric zone offset, or use ParseInLocation.
+func Parse(layout, value string) (Time, error) {
+ return parse(layout, value, UTC, Local)
+}
+
+// ParseInLocation is like Parse but differs in two important ways.
+// First, in the absence of time zone information, Parse interprets a time as UTC;
+// ParseInLocation interprets the time as in the given location.
+// Second, when given a zone offset or abbreviation, Parse tries to match it
+// against the Local location; ParseInLocation uses the given location.
+func ParseInLocation(layout, value string, loc *Location) (Time, error) {
+ return parse(layout, value, loc, loc)
+}
+
+func parse(layout, value string, defaultLocation, local *Location) (Time, error) {
+ alayout, avalue := layout, value
+ rangeErrString := "" // set if a value is out of range
+ amSet := false // do we need to subtract 12 from the hour for midnight?
+ pmSet := false // do we need to add 12 to the hour?
+
+ // Time being constructed.
+ var (
+ year int
+ month int = -1
+ day int = -1
+ yday int = -1
+ hour int
+ min int
+ sec int
+ nsec int
+ z *Location
+ zoneOffset int = -1
+ zoneName string
+ )
+
+ // Each iteration processes one std value.
+ for {
+ var err error
+ prefix, std, suffix := nextStdChunk(layout)
+ stdstr := layout[len(prefix) : len(layout)-len(suffix)]
+ value, err = skip(value, prefix)
+ if err != nil {
+ return Time{}, &ParseError{alayout, avalue, prefix, value, ""}
+ }
+ if std == 0 {
+ if len(value) != 0 {
+ return Time{}, &ParseError{alayout, avalue, "", value, ": extra text: " + quote(value)}
+ }
+ break
+ }
+ layout = suffix
+ var p string
+ switch std & stdMask {
+ case stdYear:
+ if len(value) < 2 {
+ err = errBad
+ break
+ }
+ hold := value
+ p, value = value[0:2], value[2:]
+ year, err = atoi(p)
+ if err != nil {
+ value = hold
+ } else if year >= 69 { // Unix time starts Dec 31 1969 in some time zones
+ year += 1900
+ } else {
+ year += 2000
+ }
+ case stdLongYear:
+ if len(value) < 4 || !isDigit(value, 0) {
+ err = errBad
+ break
+ }
+ p, value = value[0:4], value[4:]
+ year, err = atoi(p)
+ case stdMonth:
+ month, value, err = lookup(shortMonthNames, value)
+ month++
+ case stdLongMonth:
+ month, value, err = lookup(longMonthNames, value)
+ month++
+ case stdNumMonth, stdZeroMonth:
+ month, value, err = getnum(value, std == stdZeroMonth)
+ if err == nil && (month <= 0 || 12 < month) {
+ rangeErrString = "month"
+ }
+ case stdWeekDay:
+ // Ignore weekday except for error checking.
+ _, value, err = lookup(shortDayNames, value)
+ case stdLongWeekDay:
+ _, value, err = lookup(longDayNames, value)
+ case stdDay, stdUnderDay, stdZeroDay:
+ if std == stdUnderDay && len(value) > 0 && value[0] == ' ' {
+ value = value[1:]
+ }
+ day, value, err = getnum(value, std == stdZeroDay)
+ // Note that we allow any one- or two-digit day here.
+ // The month, day, year combination is validated after we've completed parsing.
+ case stdUnderYearDay, stdZeroYearDay:
+ for i := 0; i < 2; i++ {
+ if std == stdUnderYearDay && len(value) > 0 && value[0] == ' ' {
+ value = value[1:]
+ }
+ }
+ yday, value, err = getnum3(value, std == stdZeroYearDay)
+ // Note that we allow any one-, two-, or three-digit year-day here.
+ // The year-day, year combination is validated after we've completed parsing.
+ case stdHour:
+ hour, value, err = getnum(value, false)
+ if hour < 0 || 24 <= hour {
+ rangeErrString = "hour"
+ }
+ case stdHour12, stdZeroHour12:
+ hour, value, err = getnum(value, std == stdZeroHour12)
+ if hour < 0 || 12 < hour {
+ rangeErrString = "hour"
+ }
+ case stdMinute, stdZeroMinute:
+ min, value, err = getnum(value, std == stdZeroMinute)
+ if min < 0 || 60 <= min {
+ rangeErrString = "minute"
+ }
+ case stdSecond, stdZeroSecond:
+ sec, value, err = getnum(value, std == stdZeroSecond)
+ if sec < 0 || 60 <= sec {
+ rangeErrString = "second"
+ break
+ }
+ // Special case: do we have a fractional second but no
+ // fractional second in the format?
+ if len(value) >= 2 && commaOrPeriod(value[0]) && isDigit(value, 1) {
+ _, std, _ = nextStdChunk(layout)
+ std &= stdMask
+ if std == stdFracSecond0 || std == stdFracSecond9 {
+ // Fractional second in the layout; proceed normally
+ break
+ }
+ // No fractional second in the layout but we have one in the input.
+ n := 2
+ for ; n < len(value) && isDigit(value, n); n++ {
+ }
+ nsec, rangeErrString, err = parseNanoseconds(value, n)
+ value = value[n:]
+ }
+ case stdPM:
+ if len(value) < 2 {
+ err = errBad
+ break
+ }
+ p, value = value[0:2], value[2:]
+ switch p {
+ case "PM":
+ pmSet = true
+ case "AM":
+ amSet = true
+ default:
+ err = errBad
+ }
+ case stdpm:
+ if len(value) < 2 {
+ err = errBad
+ break
+ }
+ p, value = value[0:2], value[2:]
+ switch p {
+ case "pm":
+ pmSet = true
+ case "am":
+ amSet = true
+ default:
+ err = errBad
+ }
+ case stdISO8601TZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ShortTZ, stdISO8601ColonSecondsTZ, stdNumTZ, stdNumShortTZ, stdNumColonTZ, stdNumSecondsTz, stdNumColonSecondsTZ:
+ if (std == stdISO8601TZ || std == stdISO8601ShortTZ || std == stdISO8601ColonTZ) && len(value) >= 1 && value[0] == 'Z' {
+ value = value[1:]
+ z = UTC
+ break
+ }
+ var sign, hour, min, seconds string
+ if std == stdISO8601ColonTZ || std == stdNumColonTZ {
+ if len(value) < 6 {
+ err = errBad
+ break
+ }
+ if value[3] != ':' {
+ err = errBad
+ break
+ }
+ sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], "00", value[6:]
+ } else if std == stdNumShortTZ || std == stdISO8601ShortTZ {
+ if len(value) < 3 {
+ err = errBad
+ break
+ }
+ sign, hour, min, seconds, value = value[0:1], value[1:3], "00", "00", value[3:]
+ } else if std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ {
+ if len(value) < 9 {
+ err = errBad
+ break
+ }
+ if value[3] != ':' || value[6] != ':' {
+ err = errBad
+ break
+ }
+ sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], value[7:9], value[9:]
+ } else if std == stdISO8601SecondsTZ || std == stdNumSecondsTz {
+ if len(value) < 7 {
+ err = errBad
+ break
+ }
+ sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], value[5:7], value[7:]
+ } else {
+ if len(value) < 5 {
+ err = errBad
+ break
+ }
+ sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], "00", value[5:]
+ }
+ var hr, mm, ss int
+ hr, err = atoi(hour)
+ if err == nil {
+ mm, err = atoi(min)
+ }
+ if err == nil {
+ ss, err = atoi(seconds)
+ }
+ zoneOffset = (hr*60+mm)*60 + ss // offset is in seconds
+ switch sign[0] {
+ case '+':
+ case '-':
+ zoneOffset = -zoneOffset
+ default:
+ err = errBad
+ }
+ case stdTZ:
+ // Does it look like a time zone?
+ if len(value) >= 3 && value[0:3] == "UTC" {
+ z = UTC
+ value = value[3:]
+ break
+ }
+ n, ok := parseTimeZone(value)
+ if !ok {
+ err = errBad
+ break
+ }
+ zoneName, value = value[:n], value[n:]
+
+ case stdFracSecond0:
+ // stdFracSecond0 requires the exact number of digits as specified in
+ // the layout.
+ ndigit := 1 + digitsLen(std)
+ if len(value) < ndigit {
+ err = errBad
+ break
+ }
+ nsec, rangeErrString, err = parseNanoseconds(value, ndigit)
+ value = value[ndigit:]
+
+ case stdFracSecond9:
+ if len(value) < 2 || !commaOrPeriod(value[0]) || value[1] < '0' || '9' < value[1] {
+ // Fractional second omitted.
+ break
+ }
+ // Take any number of digits, even more than asked for,
+ // because it is what the stdSecond case would do.
+ i := 0
+ for i < 9 && i+1 < len(value) && '0' <= value[i+1] && value[i+1] <= '9' {
+ i++
+ }
+ nsec, rangeErrString, err = parseNanoseconds(value, 1+i)
+ value = value[1+i:]
+ }
+ if rangeErrString != "" {
+ return Time{}, &ParseError{alayout, avalue, stdstr, value, ": " + rangeErrString + " out of range"}
+ }
+ if err != nil {
+ return Time{}, &ParseError{alayout, avalue, stdstr, value, ""}
+ }
+ }
+ if pmSet && hour < 12 {
+ hour += 12
+ } else if amSet && hour == 12 {
+ hour = 0
+ }
+
+ // Convert yday to day, month.
+ if yday >= 0 {
+ var d int
+ var m int
+ if isLeap(year) {
+ if yday == 31+29 {
+ m = int(February)
+ d = 29
+ } else if yday > 31+29 {
+ yday--
+ }
+ }
+ if yday < 1 || yday > 365 {
+ return Time{}, &ParseError{alayout, avalue, "", value, ": day-of-year out of range"}
+ }
+ if m == 0 {
+ m = (yday-1)/31 + 1
+ if int(daysBefore[m]) < yday {
+ m++
+ }
+ d = yday - int(daysBefore[m-1])
+ }
+ // If month, day already seen, yday's m, d must match.
+ // Otherwise, set them from m, d.
+ if month >= 0 && month != m {
+ return Time{}, &ParseError{alayout, avalue, "", value, ": day-of-year does not match month"}
+ }
+ month = m
+ if day >= 0 && day != d {
+ return Time{}, &ParseError{alayout, avalue, "", value, ": day-of-year does not match day"}
+ }
+ day = d
+ } else {
+ if month < 0 {
+ month = int(January)
+ }
+ if day < 0 {
+ day = 1
+ }
+ }
+
+ // Validate the day of the month.
+ if day < 1 || day > daysIn(Month(month), year) {
+ return Time{}, &ParseError{alayout, avalue, "", value, ": day out of range"}
+ }
+
+ if z != nil {
+ return Date(year, Month(month), day, hour, min, sec, nsec, z), nil
+ }
+
+ if zoneOffset != -1 {
+ t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
+ t.addSec(-int64(zoneOffset))
+
+ // Look for local zone with the given offset.
+ // If that zone was in effect at the given time, use it.
+ name, offset, _, _, _ := local.lookup(t.unixSec())
+ if offset == zoneOffset && (zoneName == "" || name == zoneName) {
+ t.setLoc(local)
+ return t, nil
+ }
+
+ // Otherwise create fake zone to record offset.
+ t.setLoc(FixedZone(zoneName, zoneOffset))
+ return t, nil
+ }
+
+ if zoneName != "" {
+ t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
+ // Look for local zone with the given offset.
+ // If that zone was in effect at the given time, use it.
+ offset, ok := local.lookupName(zoneName, t.unixSec())
+ if ok {
+ t.addSec(-int64(offset))
+ t.setLoc(local)
+ return t, nil
+ }
+
+ // Otherwise, create fake zone with unknown offset.
+ if len(zoneName) > 3 && zoneName[:3] == "GMT" {
+ offset, _ = atoi(zoneName[3:]) // Guaranteed OK by parseGMT.
+ offset *= 3600
+ }
+ t.setLoc(FixedZone(zoneName, offset))
+ return t, nil
+ }
+
+ // Otherwise, fall back to default.
+ return Date(year, Month(month), day, hour, min, sec, nsec, defaultLocation), nil
+}
+
+// parseTimeZone parses a time zone string and returns its length. Time zones
+// are human-generated and unpredictable. We can't do precise error checking.
+// On the other hand, for a correct parse there must be a time zone at the
+// beginning of the string, so it's almost always true that there's one
+// there. We look at the beginning of the string for a run of upper-case letters.
+// If there are more than 5, it's an error.
+// If there are 4 or 5 and the last is a T, it's a time zone.
+// If there are 3, it's a time zone.
+// Otherwise, other than special cases, it's not a time zone.
+// GMT is special because it can have an hour offset.
+func parseTimeZone(value string) (length int, ok bool) {
+ if len(value) < 3 {
+ return 0, false
+ }
+ // Special case 1: ChST and MeST are the only zones with a lower-case letter.
+ if len(value) >= 4 && (value[:4] == "ChST" || value[:4] == "MeST") {
+ return 4, true
+ }
+ // Special case 2: GMT may have an hour offset; treat it specially.
+ if value[:3] == "GMT" {
+ length = parseGMT(value)
+ return length, true
+ }
+ // Special Case 3: Some time zones are not named, but have +/-00 format
+ if value[0] == '+' || value[0] == '-' {
+ length = parseSignedOffset(value)
+ ok := length > 0 // parseSignedOffset returns 0 in case of bad input
+ return length, ok
+ }
+ // How many upper-case letters are there? Need at least three, at most five.
+ var nUpper int
+ for nUpper = 0; nUpper < 6; nUpper++ {
+ if nUpper >= len(value) {
+ break
+ }
+ if c := value[nUpper]; c < 'A' || 'Z' < c {
+ break
+ }
+ }
+ switch nUpper {
+ case 0, 1, 2, 6:
+ return 0, false
+ case 5: // Must end in T to match.
+ if value[4] == 'T' {
+ return 5, true
+ }
+ case 4:
+ // Must end in T, except one special case.
+ if value[3] == 'T' || value[:4] == "WITA" {
+ return 4, true
+ }
+ case 3:
+ return 3, true
+ }
+ return 0, false
+}
+
+// parseGMT parses a GMT time zone. The input string is known to start "GMT".
+// The function checks whether that is followed by a sign and a number in the
+// range -23 through +23 excluding zero.
+func parseGMT(value string) int {
+ value = value[3:]
+ if len(value) == 0 {
+ return 3
+ }
+
+ return 3 + parseSignedOffset(value)
+}
+
+// parseSignedOffset parses a signed timezone offset (e.g. "+03" or "-04").
+// The function checks for a signed number in the range -23 through +23 excluding zero.
+// Returns length of the found offset string or 0 otherwise
+func parseSignedOffset(value string) int {
+ sign := value[0]
+ if sign != '-' && sign != '+' {
+ return 0
+ }
+ x, rem, err := leadingInt(value[1:])
+
+ // fail if nothing consumed by leadingInt
+ if err != nil || value[1:] == rem {
+ return 0
+ }
+ if x > 23 {
+ return 0
+ }
+ return len(value) - len(rem)
+}
+
+func commaOrPeriod(b byte) bool {
+ return b == '.' || b == ','
+}
+
+func parseNanoseconds(value string, nbytes int) (ns int, rangeErrString string, err error) {
+ if !commaOrPeriod(value[0]) {
+ err = errBad
+ return
+ }
+ if nbytes > 10 {
+ value = value[:10]
+ nbytes = 10
+ }
+ if ns, err = atoi(value[1:nbytes]); err != nil {
+ return
+ }
+ if ns < 0 {
+ rangeErrString = "fractional second"
+ return
+ }
+ // We need nanoseconds, which means scaling by the number
+ // of missing digits in the format, maximum length 10.
+ scaleDigits := 10 - nbytes
+ for i := 0; i < scaleDigits; i++ {
+ ns *= 10
+ }
+ return
+}
+
+var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
+
+// leadingInt consumes the leading [0-9]* from s.
+func leadingInt(s string) (x uint64, rem string, err error) {
+ i := 0
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c < '0' || c > '9' {
+ break
+ }
+ if x > 1<<63/10 {
+ // overflow
+ return 0, "", errLeadingInt
+ }
+ x = x*10 + uint64(c) - '0'
+ if x > 1<<63 {
+ // overflow
+ return 0, "", errLeadingInt
+ }
+ }
+ return x, s[i:], nil
+}
+
+// leadingFraction consumes the leading [0-9]* from s.
+// It is used only for fractions, so does not return an error on overflow,
+// it just stops accumulating precision.
+func leadingFraction(s string) (x uint64, scale float64, rem string) {
+ i := 0
+ scale = 1
+ overflow := false
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c < '0' || c > '9' {
+ break
+ }
+ if overflow {
+ continue
+ }
+ if x > (1<<63-1)/10 {
+ // It's possible for overflow to give a positive number, so take care.
+ overflow = true
+ continue
+ }
+ y := x*10 + uint64(c) - '0'
+ if y > 1<<63 {
+ overflow = true
+ continue
+ }
+ x = y
+ scale *= 10
+ }
+ return x, scale, s[i:]
+}
+
+var unitMap = map[string]uint64{
+ "ns": uint64(Nanosecond),
+ "us": uint64(Microsecond),
+ "µs": uint64(Microsecond), // U+00B5 = micro symbol
+ "μs": uint64(Microsecond), // U+03BC = Greek letter mu
+ "ms": uint64(Millisecond),
+ "s": uint64(Second),
+ "m": uint64(Minute),
+ "h": uint64(Hour),
+}
+
+// ParseDuration parses a duration string.
+// A duration string is a possibly signed sequence of
+// decimal numbers, each with optional fraction and a unit suffix,
+// such as "300ms", "-1.5h" or "2h45m".
+// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+func ParseDuration(s string) (Duration, error) {
+ // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
+ orig := s
+ var d uint64
+ neg := false
+
+ // Consume [-+]?
+ if s != "" {
+ c := s[0]
+ if c == '-' || c == '+' {
+ neg = c == '-'
+ s = s[1:]
+ }
+ }
+ // Special case: if all that is left is "0", this is zero.
+ if s == "0" {
+ return 0, nil
+ }
+ if s == "" {
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ for s != "" {
+ var (
+ v, f uint64 // integers before, after decimal point
+ scale float64 = 1 // value = v + f/scale
+ )
+
+ var err error
+
+ // The next character must be [0-9.]
+ if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ // Consume [0-9]*
+ pl := len(s)
+ v, s, err = leadingInt(s)
+ if err != nil {
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ pre := pl != len(s) // whether we consumed anything before a period
+
+ // Consume (\.[0-9]*)?
+ post := false
+ if s != "" && s[0] == '.' {
+ s = s[1:]
+ pl := len(s)
+ f, scale, s = leadingFraction(s)
+ post = pl != len(s)
+ }
+ if !pre && !post {
+ // no digits (e.g. ".s" or "-.s")
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+
+ // Consume unit.
+ i := 0
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c == '.' || '0' <= c && c <= '9' {
+ break
+ }
+ }
+ if i == 0 {
+ return 0, errors.New("time: missing unit in duration " + quote(orig))
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
+ }
+ if v > 1<<63/unit {
+ // overflow
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ v *= unit
+ if f > 0 {
+ // float64 is needed to be nanosecond accurate for fractions of hours.
+ // v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
+ v += uint64(float64(f) * (float64(unit) / scale))
+ if v > 1<<63 {
+ // overflow
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ }
+ d += v
+ if d > 1<<63 {
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ }
+ if neg {
+ return -Duration(d), nil
+ }
+ if d > 1<<63-1 {
+ return 0, errors.New("time: invalid duration " + quote(orig))
+ }
+ return Duration(d), nil
+}
diff --git a/contrib/go/_std_1.19/src/time/sleep.go b/contrib/go/_std_1.19/src/time/sleep.go
new file mode 100644
index 0000000000..cdab4782ad
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/sleep.go
@@ -0,0 +1,177 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package time
+
+// Sleep pauses the current goroutine for at least the duration d.
+// A negative or zero duration causes Sleep to return immediately.
+func Sleep(d Duration)
+
+// Interface to timers implemented in package runtime.
+// Must be in sync with ../runtime/time.go:/^type timer
+type runtimeTimer struct {
+ pp uintptr
+ when int64
+ period int64
+ f func(any, uintptr) // NOTE: must not be closure
+ arg any
+ seq uintptr
+ nextwhen int64
+ status uint32
+}
+
+// when is a helper function for setting the 'when' field of a runtimeTimer.
+// It returns what the time will be, in nanoseconds, Duration d in the future.
+// If d is negative, it is ignored. If the returned value would be less than
+// zero because of an overflow, MaxInt64 is returned.
+func when(d Duration) int64 {
+ if d <= 0 {
+ return runtimeNano()
+ }
+ t := runtimeNano() + int64(d)
+ if t < 0 {
+ // N.B. runtimeNano() and d are always positive, so addition
+ // (including overflow) will never result in t == 0.
+ t = 1<<63 - 1 // math.MaxInt64
+ }
+ return t
+}
+
+func startTimer(*runtimeTimer)
+func stopTimer(*runtimeTimer) bool
+func resetTimer(*runtimeTimer, int64) bool
+func modTimer(t *runtimeTimer, when, period int64, f func(any, uintptr), arg any, seq uintptr)
+
+// The Timer type represents a single event.
+// When the Timer expires, the current time will be sent on C,
+// unless the Timer was created by AfterFunc.
+// A Timer must be created with NewTimer or AfterFunc.
+type Timer struct {
+ C <-chan Time
+ r runtimeTimer
+}
+
+// Stop prevents the Timer from firing.
+// It returns true if the call stops the timer, false if the timer has already
+// expired or been stopped.
+// Stop does not close the channel, to prevent a read from the channel succeeding
+// incorrectly.
+//
+// To ensure the channel is empty after a call to Stop, check the
+// return value and drain the channel.
+// For example, assuming the program has not received from t.C already:
+//
+// if !t.Stop() {
+// <-t.C
+// }
+//
+// This cannot be done concurrent to other receives from the Timer's
+// channel or other calls to the Timer's Stop method.
+//
+// For a timer created with AfterFunc(d, f), if t.Stop returns false, then the timer
+// has already expired and the function f has been started in its own goroutine;
+// Stop does not wait for f to complete before returning.
+// If the caller needs to know whether f is completed, it must coordinate
+// with f explicitly.
+func (t *Timer) Stop() bool {
+ if t.r.f == nil {
+ panic("time: Stop called on uninitialized Timer")
+ }
+ return stopTimer(&t.r)
+}
+
+// NewTimer creates a new Timer that will send
+// the current time on its channel after at least duration d.
+func NewTimer(d Duration) *Timer {
+ c := make(chan Time, 1)
+ t := &Timer{
+ C: c,
+ r: runtimeTimer{
+ when: when(d),
+ f: sendTime,
+ arg: c,
+ },
+ }
+ startTimer(&t.r)
+ return t
+}
+
+// Reset changes the timer to expire after duration d.
+// It returns true if the timer had been active, false if the timer had
+// expired or been stopped.
+//
+// For a Timer created with NewTimer, Reset should be invoked only on
+// stopped or expired timers with drained channels.
+//
+// If a program has already received a value from t.C, the timer is known
+// to have expired and the channel drained, so t.Reset can be used directly.
+// If a program has not yet received a value from t.C, however,
+// the timer must be stopped and—if Stop reports that the timer expired
+// before being stopped—the channel explicitly drained:
+//
+// if !t.Stop() {
+// <-t.C
+// }
+// t.Reset(d)
+//
+// This should not be done concurrent to other receives from the Timer's
+// channel.
+//
+// Note that it is not possible to use Reset's return value correctly, as there
+// is a race condition between draining the channel and the new timer expiring.
+// Reset should always be invoked on stopped or expired channels, as described above.
+// The return value exists to preserve compatibility with existing programs.
+//
+// For a Timer created with AfterFunc(d, f), Reset either reschedules
+// when f will run, in which case Reset returns true, or schedules f
+// to run again, in which case it returns false.
+// When Reset returns false, Reset neither waits for the prior f to
+// complete before returning nor does it guarantee that the subsequent
+// goroutine running f does not run concurrently with the prior
+// one. If the caller needs to know whether the prior execution of
+// f is completed, it must coordinate with f explicitly.
+func (t *Timer) Reset(d Duration) bool {
+ if t.r.f == nil {
+ panic("time: Reset called on uninitialized Timer")
+ }
+ w := when(d)
+ return resetTimer(&t.r, w)
+}
+
+// sendTime does a non-blocking send of the current time on c.
+func sendTime(c any, seq uintptr) {
+ select {
+ case c.(chan Time) <- Now():
+ default:
+ }
+}
+
+// After waits for the duration to elapse and then sends the current time
+// on the returned channel.
+// It is equivalent to NewTimer(d).C.
+// The underlying Timer is not recovered by the garbage collector
+// until the timer fires. If efficiency is a concern, use NewTimer
+// instead and call Timer.Stop if the timer is no longer needed.
+func After(d Duration) <-chan Time {
+ return NewTimer(d).C
+}
+
+// AfterFunc waits for the duration to elapse and then calls f
+// in its own goroutine. It returns a Timer that can
+// be used to cancel the call using its Stop method.
+func AfterFunc(d Duration, f func()) *Timer {
+ t := &Timer{
+ r: runtimeTimer{
+ when: when(d),
+ f: goFunc,
+ arg: f,
+ },
+ }
+ startTimer(&t.r)
+ return t
+}
+
+func goFunc(arg any, seq uintptr) {
+ go arg.(func())()
+}
diff --git a/contrib/go/_std_1.19/src/time/sys_unix.go b/contrib/go/_std_1.19/src/time/sys_unix.go
new file mode 100644
index 0000000000..0f06aa6ccd
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/sys_unix.go
@@ -0,0 +1,54 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix || (js && wasm)
+
+package time
+
+import (
+ "errors"
+ "syscall"
+)
+
+// for testing: whatever interrupts a sleep
+func interrupt() {
+ syscall.Kill(syscall.Getpid(), syscall.SIGCHLD)
+}
+
+func open(name string) (uintptr, error) {
+ fd, err := syscall.Open(name, syscall.O_RDONLY, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uintptr(fd), nil
+}
+
+func read(fd uintptr, buf []byte) (int, error) {
+ return syscall.Read(int(fd), buf)
+}
+
+func closefd(fd uintptr) {
+ syscall.Close(int(fd))
+}
+
+func preadn(fd uintptr, buf []byte, off int) error {
+ whence := seekStart
+ if off < 0 {
+ whence = seekEnd
+ }
+ if _, err := syscall.Seek(int(fd), int64(off), whence); err != nil {
+ return err
+ }
+ for len(buf) > 0 {
+ m, err := syscall.Read(int(fd), buf)
+ if m <= 0 {
+ if err == nil {
+ return errors.New("short read")
+ }
+ return err
+ }
+ buf = buf[m:]
+ }
+ return nil
+}
diff --git a/contrib/go/_std_1.19/src/time/tick.go b/contrib/go/_std_1.19/src/time/tick.go
new file mode 100644
index 0000000000..dcfeca8783
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/tick.go
@@ -0,0 +1,73 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package time
+
+import "errors"
+
+// A Ticker holds a channel that delivers “ticks” of a clock
+// at intervals.
+type Ticker struct {
+ C <-chan Time // The channel on which the ticks are delivered.
+ r runtimeTimer
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the current time on the channel after each tick. The period of the
+// ticks is specified by the duration argument. The ticker will adjust
+// the time interval or drop ticks to make up for slow receivers.
+// The duration d must be greater than zero; if not, NewTicker will
+// panic. Stop the ticker to release associated resources.
+func NewTicker(d Duration) *Ticker {
+ if d <= 0 {
+ panic(errors.New("non-positive interval for NewTicker"))
+ }
+ // Give the channel a 1-element time buffer.
+ // If the client falls behind while reading, we drop ticks
+ // on the floor until the client catches up.
+ c := make(chan Time, 1)
+ t := &Ticker{
+ C: c,
+ r: runtimeTimer{
+ when: when(d),
+ period: int64(d),
+ f: sendTime,
+ arg: c,
+ },
+ }
+ startTimer(&t.r)
+ return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+// Stop does not close the channel, to prevent a concurrent goroutine
+// reading from the channel from seeing an erroneous "tick".
+func (t *Ticker) Stop() {
+ stopTimer(&t.r)
+}
+
+// Reset stops a ticker and resets its period to the specified duration.
+// The next tick will arrive after the new period elapses. The duration d
+// must be greater than zero; if not, Reset will panic.
+func (t *Ticker) Reset(d Duration) {
+ if d <= 0 {
+ panic("non-positive interval for Ticker.Reset")
+ }
+ if t.r.f == nil {
+ panic("time: Reset called on uninitialized Ticker")
+ }
+ modTimer(&t.r, when(d), int64(d), t.r.f, t.r.arg, t.r.seq)
+}
+
+// Tick is a convenience wrapper for NewTicker providing access to the ticking
+// channel only. While Tick is useful for clients that have no need to shut down
+// the Ticker, be aware that without a way to shut it down the underlying
+// Ticker cannot be recovered by the garbage collector; it "leaks".
+// Unlike NewTicker, Tick will return nil if d <= 0.
+func Tick(d Duration) <-chan Time {
+ if d <= 0 {
+ return nil
+ }
+ return NewTicker(d).C
+}
diff --git a/contrib/go/_std_1.19/src/time/time.go b/contrib/go/_std_1.19/src/time/time.go
new file mode 100644
index 0000000000..47b26e39a8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/time.go
@@ -0,0 +1,1619 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package time provides functionality for measuring and displaying time.
+//
+// The calendrical calculations always assume a Gregorian calendar, with
+// no leap seconds.
+//
+// # Monotonic Clocks
+//
+// Operating systems provide both a “wall clock,” which is subject to
+// changes for clock synchronization, and a “monotonic clock,” which is
+// not. The general rule is that the wall clock is for telling time and
+// the monotonic clock is for measuring time. Rather than split the API,
+// in this package the Time returned by time.Now contains both a wall
+// clock reading and a monotonic clock reading; later time-telling
+// operations use the wall clock reading, but later time-measuring
+// operations, specifically comparisons and subtractions, use the
+// monotonic clock reading.
+//
+// For example, this code always computes a positive elapsed time of
+// approximately 20 milliseconds, even if the wall clock is changed during
+// the operation being timed:
+//
+// start := time.Now()
+// ... operation that takes 20 milliseconds ...
+// t := time.Now()
+// elapsed := t.Sub(start)
+//
+// Other idioms, such as time.Since(start), time.Until(deadline), and
+// time.Now().Before(deadline), are similarly robust against wall clock
+// resets.
+//
+// The rest of this section gives the precise details of how operations
+// use monotonic clocks, but understanding those details is not required
+// to use this package.
+//
+// The Time returned by time.Now contains a monotonic clock reading.
+// If Time t has a monotonic clock reading, t.Add adds the same duration to
+// both the wall clock and monotonic clock readings to compute the result.
+// Because t.AddDate(y, m, d), t.Round(d), and t.Truncate(d) are wall time
+// computations, they always strip any monotonic clock reading from their results.
+// Because t.In, t.Local, and t.UTC are used for their effect on the interpretation
+// of the wall time, they also strip any monotonic clock reading from their results.
+// The canonical way to strip a monotonic clock reading is to use t = t.Round(0).
+//
+// If Times t and u both contain monotonic clock readings, the operations
+// t.After(u), t.Before(u), t.Equal(u), and t.Sub(u) are carried out
+// using the monotonic clock readings alone, ignoring the wall clock
+// readings. If either t or u contains no monotonic clock reading, these
+// operations fall back to using the wall clock readings.
+//
+// On some systems the monotonic clock will stop if the computer goes to sleep.
+// On such a system, t.Sub(u) may not accurately reflect the actual
+// time that passed between t and u.
+//
+// Because the monotonic clock reading has no meaning outside
+// the current process, the serialized forms generated by t.GobEncode,
+// t.MarshalBinary, t.MarshalJSON, and t.MarshalText omit the monotonic
+// clock reading, and t.Format provides no format for it. Similarly, the
+// constructors time.Date, time.Parse, time.ParseInLocation, and time.Unix,
+// as well as the unmarshalers t.GobDecode, t.UnmarshalBinary.
+// t.UnmarshalJSON, and t.UnmarshalText always create times with
+// no monotonic clock reading.
+//
+// The monotonic clock reading exists only in Time values. It is not
+// a part of Duration values or the Unix times returned by t.Unix and
+// friends.
+//
+// Note that the Go == operator compares not just the time instant but
+// also the Location and the monotonic clock reading. See the
+// documentation for the Time type for a discussion of equality
+// testing for Time values.
+//
+// For debugging, the result of t.String does include the monotonic
+// clock reading if present. If t != u because of different monotonic clock readings,
+// that difference will be visible when printing t.String() and u.String().
+package time
+
+import (
+ "errors"
+ _ "unsafe" // for go:linkname
+)
+
+// A Time represents an instant in time with nanosecond precision.
+//
+// Programs using times should typically store and pass them as values,
+// not pointers. That is, time variables and struct fields should be of
+// type time.Time, not *time.Time.
+//
+// A Time value can be used by multiple goroutines simultaneously except
+// that the methods GobDecode, UnmarshalBinary, UnmarshalJSON and
+// UnmarshalText are not concurrency-safe.
+//
+// Time instants can be compared using the Before, After, and Equal methods.
+// The Sub method subtracts two instants, producing a Duration.
+// The Add method adds a Time and a Duration, producing a Time.
+//
+// The zero value of type Time is January 1, year 1, 00:00:00.000000000 UTC.
+// As this time is unlikely to come up in practice, the IsZero method gives
+// a simple way of detecting a time that has not been initialized explicitly.
+//
+// Each Time has associated with it a Location, consulted when computing the
+// presentation form of the time, such as in the Format, Hour, and Year methods.
+// The methods Local, UTC, and In return a Time with a specific location.
+// Changing the location in this way changes only the presentation; it does not
+// change the instant in time being denoted and therefore does not affect the
+// computations described in earlier paragraphs.
+//
+// Representations of a Time value saved by the GobEncode, MarshalBinary,
+// MarshalJSON, and MarshalText methods store the Time.Location's offset, but not
+// the location name. They therefore lose information about Daylight Saving Time.
+//
+// In addition to the required “wall clock” reading, a Time may contain an optional
+// reading of the current process's monotonic clock, to provide additional precision
+// for comparison or subtraction.
+// See the “Monotonic Clocks” section in the package documentation for details.
+//
+// Note that the Go == operator compares not just the time instant but also the
+// Location and the monotonic clock reading. Therefore, Time values should not
+// be used as map or database keys without first guaranteeing that the
+// identical Location has been set for all values, which can be achieved
+// through use of the UTC or Local method, and that the monotonic clock reading
+// has been stripped by setting t = t.Round(0). In general, prefer t.Equal(u)
+// to t == u, since t.Equal uses the most accurate comparison available and
+// correctly handles the case when only one of its arguments has a monotonic
+// clock reading.
+type Time struct {
+ // wall and ext encode the wall time seconds, wall time nanoseconds,
+ // and optional monotonic clock reading in nanoseconds.
+ //
+ // From high to low bit position, wall encodes a 1-bit flag (hasMonotonic),
+ // a 33-bit seconds field, and a 30-bit wall time nanoseconds field.
+ // The nanoseconds field is in the range [0, 999999999].
+ // If the hasMonotonic bit is 0, then the 33-bit field must be zero
+ // and the full signed 64-bit wall seconds since Jan 1 year 1 is stored in ext.
+ // If the hasMonotonic bit is 1, then the 33-bit field holds a 33-bit
+ // unsigned wall seconds since Jan 1 year 1885, and ext holds a
+ // signed 64-bit monotonic clock reading, nanoseconds since process start.
+ wall uint64
+ ext int64
+
+ // loc specifies the Location that should be used to
+ // determine the minute, hour, month, day, and year
+ // that correspond to this Time.
+ // The nil location means UTC.
+ // All UTC times are represented with loc==nil, never loc==&utcLoc.
+ loc *Location
+}
+
+const (
+ hasMonotonic = 1 << 63
+ maxWall = wallToInternal + (1<<33 - 1) // year 2157
+ minWall = wallToInternal // year 1885
+ nsecMask = 1<<30 - 1
+ nsecShift = 30
+)
+
+// These helpers for manipulating the wall and monotonic clock readings
+// take pointer receivers, even when they don't modify the time,
+// to make them cheaper to call.
+
+// nsec returns the time's nanoseconds.
+func (t *Time) nsec() int32 {
+ return int32(t.wall & nsecMask)
+}
+
+// sec returns the time's seconds since Jan 1 year 1.
+func (t *Time) sec() int64 {
+ if t.wall&hasMonotonic != 0 {
+ return wallToInternal + int64(t.wall<<1>>(nsecShift+1))
+ }
+ return t.ext
+}
+
+// unixSec returns the time's seconds since Jan 1 1970 (Unix time).
+func (t *Time) unixSec() int64 { return t.sec() + internalToUnix }
+
+// addSec adds d seconds to the time.
+func (t *Time) addSec(d int64) {
+ if t.wall&hasMonotonic != 0 {
+ sec := int64(t.wall << 1 >> (nsecShift + 1))
+ dsec := sec + d
+ if 0 <= dsec && dsec <= 1<<33-1 {
+ t.wall = t.wall&nsecMask | uint64(dsec)<<nsecShift | hasMonotonic
+ return
+ }
+ // Wall second now out of range for packed field.
+ // Move to ext.
+ t.stripMono()
+ }
+
+ // Check if the sum of t.ext and d overflows and handle it properly.
+ sum := t.ext + d
+ if (sum > t.ext) == (d > 0) {
+ t.ext = sum
+ } else if d > 0 {
+ t.ext = 1<<63 - 1
+ } else {
+ t.ext = -(1<<63 - 1)
+ }
+}
+
+// setLoc sets the location associated with the time.
+func (t *Time) setLoc(loc *Location) {
+ if loc == &utcLoc {
+ loc = nil
+ }
+ t.stripMono()
+ t.loc = loc
+}
+
+// stripMono strips the monotonic clock reading in t.
+func (t *Time) stripMono() {
+ if t.wall&hasMonotonic != 0 {
+ t.ext = t.sec()
+ t.wall &= nsecMask
+ }
+}
+
+// setMono sets the monotonic clock reading in t.
+// If t cannot hold a monotonic clock reading,
+// because its wall time is too large,
+// setMono is a no-op.
+func (t *Time) setMono(m int64) {
+ if t.wall&hasMonotonic == 0 {
+ sec := t.ext
+ if sec < minWall || maxWall < sec {
+ return
+ }
+ t.wall |= hasMonotonic | uint64(sec-minWall)<<nsecShift
+ }
+ t.ext = m
+}
+
+// mono returns t's monotonic clock reading.
+// It returns 0 for a missing reading.
+// This function is used only for testing,
+// so it's OK that technically 0 is a valid
+// monotonic clock reading as well.
+func (t *Time) mono() int64 {
+ if t.wall&hasMonotonic == 0 {
+ return 0
+ }
+ return t.ext
+}
+
+// After reports whether the time instant t is after u.
+func (t Time) After(u Time) bool {
+ if t.wall&u.wall&hasMonotonic != 0 {
+ return t.ext > u.ext
+ }
+ ts := t.sec()
+ us := u.sec()
+ return ts > us || ts == us && t.nsec() > u.nsec()
+}
+
+// Before reports whether the time instant t is before u.
+func (t Time) Before(u Time) bool {
+ if t.wall&u.wall&hasMonotonic != 0 {
+ return t.ext < u.ext
+ }
+ ts := t.sec()
+ us := u.sec()
+ return ts < us || ts == us && t.nsec() < u.nsec()
+}
+
+// Equal reports whether t and u represent the same time instant.
+// Two times can be equal even if they are in different locations.
+// For example, 6:00 +0200 and 4:00 UTC are Equal.
+// See the documentation on the Time type for the pitfalls of using == with
+// Time values; most code should use Equal instead.
+func (t Time) Equal(u Time) bool {
+ if t.wall&u.wall&hasMonotonic != 0 {
+ return t.ext == u.ext
+ }
+ return t.sec() == u.sec() && t.nsec() == u.nsec()
+}
+
+// A Month specifies a month of the year (January = 1, ...).
+type Month int
+
+const (
+ January Month = 1 + iota
+ February
+ March
+ April
+ May
+ June
+ July
+ August
+ September
+ October
+ November
+ December
+)
+
+// String returns the English name of the month ("January", "February", ...).
+func (m Month) String() string {
+ if January <= m && m <= December {
+ return longMonthNames[m-1]
+ }
+ buf := make([]byte, 20)
+ n := fmtInt(buf, uint64(m))
+ return "%!Month(" + string(buf[n:]) + ")"
+}
+
+// A Weekday specifies a day of the week (Sunday = 0, ...).
+type Weekday int
+
+const (
+ Sunday Weekday = iota
+ Monday
+ Tuesday
+ Wednesday
+ Thursday
+ Friday
+ Saturday
+)
+
+// String returns the English name of the day ("Sunday", "Monday", ...).
+func (d Weekday) String() string {
+ if Sunday <= d && d <= Saturday {
+ return longDayNames[d]
+ }
+ buf := make([]byte, 20)
+ n := fmtInt(buf, uint64(d))
+ return "%!Weekday(" + string(buf[n:]) + ")"
+}
+
+// Computations on time.
+//
+// The zero value for a Time is defined to be
+// January 1, year 1, 00:00:00.000000000 UTC
+// which (1) looks like a zero, or as close as you can get in a date
+// (1-1-1 00:00:00 UTC), (2) is unlikely enough to arise in practice to
+// be a suitable "not set" sentinel, unlike Jan 1 1970, and (3) has a
+// non-negative year even in time zones west of UTC, unlike 1-1-0
+// 00:00:00 UTC, which would be 12-31-(-1) 19:00:00 in New York.
+//
+// The zero Time value does not force a specific epoch for the time
+// representation. For example, to use the Unix epoch internally, we
+// could define that to distinguish a zero value from Jan 1 1970, that
+// time would be represented by sec=-1, nsec=1e9. However, it does
+// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
+// epoch, and that's what we do.
+//
+// The Add and Sub computations are oblivious to the choice of epoch.
+//
+// The presentation computations - year, month, minute, and so on - all
+// rely heavily on division and modulus by positive constants. For
+// calendrical calculations we want these divisions to round down, even
+// for negative values, so that the remainder is always positive, but
+// Go's division (like most hardware division instructions) rounds to
+// zero. We can still do those computations and then adjust the result
+// for a negative numerator, but it's annoying to write the adjustment
+// over and over. Instead, we can change to a different epoch so long
+// ago that all the times we care about will be positive, and then round
+// to zero and round down coincide. These presentation routines already
+// have to add the zone offset, so adding the translation to the
+// alternate epoch is cheap. For example, having a non-negative time t
+// means that we can write
+//
+// sec = t % 60
+//
+// instead of
+//
+// sec = t % 60
+// if sec < 0 {
+// sec += 60
+// }
+//
+// everywhere.
+//
+// The calendar runs on an exact 400 year cycle: a 400-year calendar
+// printed for 1970-2369 will apply as well to 2370-2769. Even the days
+// of the week match up. It simplifies the computations to choose the
+// cycle boundaries so that the exceptional years are always delayed as
+// long as possible. That means choosing a year equal to 1 mod 400, so
+// that the first leap year is the 4th year, the first missed leap year
+// is the 100th year, and the missed missed leap year is the 400th year.
+// So we'd prefer instead to print a calendar for 2001-2400 and reuse it
+// for 2401-2800.
+//
+// Finally, it's convenient if the delta between the Unix epoch and
+// long-ago epoch is representable by an int64 constant.
+//
+// These three considerations—choose an epoch as early as possible, that
+// uses a year equal to 1 mod 400, and that is no more than 2⁶³ seconds
+// earlier than 1970—bring us to the year -292277022399. We refer to
+// this year as the absolute zero year, and to times measured as a uint64
+// seconds since this year as absolute times.
+//
+// Times measured as an int64 seconds since the year 1—the representation
+// used for Time's sec field—are called internal times.
+//
+// Times measured as an int64 seconds since the year 1970 are called Unix
+// times.
+//
+// It is tempting to just use the year 1 as the absolute epoch, defining
+// that the routines are only valid for years >= 1. However, the
+// routines would then be invalid when displaying the epoch in time zones
+// west of UTC, since it is year 0. It doesn't seem tenable to say that
+// printing the zero time correctly isn't supported in half the time
+// zones. By comparison, it's reasonable to mishandle some times in
+// the year -292277022399.
+//
+// All this is opaque to clients of the API and can be changed if a
+// better implementation presents itself.
+
+const (
+ // The unsigned zero year for internal calculations.
+ // Must be 1 mod 400, and times before it will not compute correctly,
+ // but otherwise can be changed at will.
+ absoluteZeroYear = -292277022399
+
+ // The year of the zero Time.
+ // Assumed by the unixToInternal computation below.
+ internalYear = 1
+
+ // Offsets to convert between internal and absolute or Unix times.
+ absoluteToInternal int64 = (absoluteZeroYear - internalYear) * 365.2425 * secondsPerDay
+ internalToAbsolute = -absoluteToInternal
+
+ unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay
+ internalToUnix int64 = -unixToInternal
+
+ wallToInternal int64 = (1884*365 + 1884/4 - 1884/100 + 1884/400) * secondsPerDay
+)
+
+// IsZero reports whether t represents the zero time instant,
+// January 1, year 1, 00:00:00 UTC.
+func (t Time) IsZero() bool {
+ return t.sec() == 0 && t.nsec() == 0
+}
+
+// abs returns the time t as an absolute time, adjusted by the zone offset.
+// It is called when computing a presentation property like Month or Hour.
+func (t Time) abs() uint64 {
+ l := t.loc
+ // Avoid function calls when possible.
+ if l == nil || l == &localLoc {
+ l = l.get()
+ }
+ sec := t.unixSec()
+ if l != &utcLoc {
+ if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
+ sec += int64(l.cacheZone.offset)
+ } else {
+ _, offset, _, _, _ := l.lookup(sec)
+ sec += int64(offset)
+ }
+ }
+ return uint64(sec + (unixToInternal + internalToAbsolute))
+}
+
+// locabs is a combination of the Zone and abs methods,
+// extracting both return values from a single zone lookup.
+func (t Time) locabs() (name string, offset int, abs uint64) {
+ l := t.loc
+ if l == nil || l == &localLoc {
+ l = l.get()
+ }
+ // Avoid function call if we hit the local time cache.
+ sec := t.unixSec()
+ if l != &utcLoc {
+ if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
+ name = l.cacheZone.name
+ offset = l.cacheZone.offset
+ } else {
+ name, offset, _, _, _ = l.lookup(sec)
+ }
+ sec += int64(offset)
+ } else {
+ name = "UTC"
+ }
+ abs = uint64(sec + (unixToInternal + internalToAbsolute))
+ return
+}
+
+// Date returns the year, month, and day in which t occurs.
+func (t Time) Date() (year int, month Month, day int) {
+ year, month, day, _ = t.date(true)
+ return
+}
+
+// Year returns the year in which t occurs.
+func (t Time) Year() int {
+ year, _, _, _ := t.date(false)
+ return year
+}
+
+// Month returns the month of the year specified by t.
+func (t Time) Month() Month {
+ _, month, _, _ := t.date(true)
+ return month
+}
+
+// Day returns the day of the month specified by t.
+func (t Time) Day() int {
+ _, _, day, _ := t.date(true)
+ return day
+}
+
+// Weekday returns the day of the week specified by t.
+func (t Time) Weekday() Weekday {
+ return absWeekday(t.abs())
+}
+
+// absWeekday is like Weekday but operates on an absolute time.
+func absWeekday(abs uint64) Weekday {
+ // January 1 of the absolute year, like January 1 of 2001, was a Monday.
+ sec := (abs + uint64(Monday)*secondsPerDay) % secondsPerWeek
+ return Weekday(int(sec) / secondsPerDay)
+}
+
+// ISOWeek returns the ISO 8601 year and week number in which t occurs.
+// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
+// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
+// of year n+1.
+func (t Time) ISOWeek() (year, week int) {
+ // According to the rule that the first calendar week of a calendar year is
+ // the week including the first Thursday of that year, and that the last one is
+ // the week immediately preceding the first calendar week of the next calendar year.
+ // See https://www.iso.org/obp/ui#iso:std:iso:8601:-1:ed-1:v1:en:term:3.1.1.23 for details.
+
+ // weeks start with Monday
+ // Monday Tuesday Wednesday Thursday Friday Saturday Sunday
+ // 1 2 3 4 5 6 7
+ // +3 +2 +1 0 -1 -2 -3
+ // the offset to Thursday
+ abs := t.abs()
+ d := Thursday - absWeekday(abs)
+ // handle Sunday
+ if d == 4 {
+ d = -3
+ }
+ // find the Thursday of the calendar week
+ abs += uint64(d) * secondsPerDay
+ year, _, _, yday := absDate(abs, false)
+ return year, yday/7 + 1
+}
+
+// Clock returns the hour, minute, and second within the day specified by t.
+func (t Time) Clock() (hour, min, sec int) {
+ return absClock(t.abs())
+}
+
+// absClock is like clock but operates on an absolute time.
+func absClock(abs uint64) (hour, min, sec int) {
+ sec = int(abs % secondsPerDay)
+ hour = sec / secondsPerHour
+ sec -= hour * secondsPerHour
+ min = sec / secondsPerMinute
+ sec -= min * secondsPerMinute
+ return
+}
+
+// Hour returns the hour within the day specified by t, in the range [0, 23].
+func (t Time) Hour() int {
+ return int(t.abs()%secondsPerDay) / secondsPerHour
+}
+
+// Minute returns the minute offset within the hour specified by t, in the range [0, 59].
+func (t Time) Minute() int {
+ return int(t.abs()%secondsPerHour) / secondsPerMinute
+}
+
+// Second returns the second offset within the minute specified by t, in the range [0, 59].
+func (t Time) Second() int {
+ return int(t.abs() % secondsPerMinute)
+}
+
+// Nanosecond returns the nanosecond offset within the second specified by t,
+// in the range [0, 999999999].
+func (t Time) Nanosecond() int {
+ return int(t.nsec())
+}
+
+// YearDay returns the day of the year specified by t, in the range [1,365] for non-leap years,
+// and [1,366] in leap years.
+func (t Time) YearDay() int {
+ _, _, _, yday := t.date(false)
+ return yday + 1
+}
+
+// A Duration represents the elapsed time between two instants
+// as an int64 nanosecond count. The representation limits the
+// largest representable duration to approximately 290 years.
+type Duration int64
+
+const (
+ minDuration Duration = -1 << 63
+ maxDuration Duration = 1<<63 - 1
+)
+
+// Common durations. There is no definition for units of Day or larger
+// to avoid confusion across daylight savings time zone transitions.
+//
+// To count the number of units in a Duration, divide:
+//
+// second := time.Second
+// fmt.Print(int64(second/time.Millisecond)) // prints 1000
+//
+// To convert an integer number of units to a Duration, multiply:
+//
+// seconds := 10
+// fmt.Print(time.Duration(seconds)*time.Second) // prints 10s
+const (
+ Nanosecond Duration = 1
+ Microsecond = 1000 * Nanosecond
+ Millisecond = 1000 * Microsecond
+ Second = 1000 * Millisecond
+ Minute = 60 * Second
+ Hour = 60 * Minute
+)
+
+// String returns a string representing the duration in the form "72h3m0.5s".
+// Leading zero units are omitted. As a special case, durations less than one
+// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
+// that the leading digit is non-zero. The zero duration formats as 0s.
+func (d Duration) String() string {
+ // Largest time is 2540400h10m10.000000000s
+ var buf [32]byte
+ w := len(buf)
+
+ u := uint64(d)
+ neg := d < 0
+ if neg {
+ u = -u
+ }
+
+ if u < uint64(Second) {
+ // Special case: if duration is smaller than a second,
+ // use smaller units, like 1.2ms
+ var prec int
+ w--
+ buf[w] = 's'
+ w--
+ switch {
+ case u == 0:
+ return "0s"
+ case u < uint64(Microsecond):
+ // print nanoseconds
+ prec = 0
+ buf[w] = 'n'
+ case u < uint64(Millisecond):
+ // print microseconds
+ prec = 3
+ // U+00B5 'µ' micro sign == 0xC2 0xB5
+ w-- // Need room for two bytes.
+ copy(buf[w:], "µ")
+ default:
+ // print milliseconds
+ prec = 6
+ buf[w] = 'm'
+ }
+ w, u = fmtFrac(buf[:w], u, prec)
+ w = fmtInt(buf[:w], u)
+ } else {
+ w--
+ buf[w] = 's'
+
+ w, u = fmtFrac(buf[:w], u, 9)
+
+ // u is now integer seconds
+ w = fmtInt(buf[:w], u%60)
+ u /= 60
+
+ // u is now integer minutes
+ if u > 0 {
+ w--
+ buf[w] = 'm'
+ w = fmtInt(buf[:w], u%60)
+ u /= 60
+
+ // u is now integer hours
+ // Stop at hours because days can be different lengths.
+ if u > 0 {
+ w--
+ buf[w] = 'h'
+ w = fmtInt(buf[:w], u)
+ }
+ }
+ }
+
+ if neg {
+ w--
+ buf[w] = '-'
+ }
+
+ return string(buf[w:])
+}
+
+// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
+// tail of buf, omitting trailing zeros. It omits the decimal
+// point too when the fraction is 0. It returns the index where the
+// output bytes begin and the value v/10**prec.
+func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
+ // Omit trailing zeros up to and including decimal point.
+ w := len(buf)
+ print := false
+ for i := 0; i < prec; i++ {
+ digit := v % 10
+ print = print || digit != 0
+ if print {
+ w--
+ buf[w] = byte(digit) + '0'
+ }
+ v /= 10
+ }
+ if print {
+ w--
+ buf[w] = '.'
+ }
+ return w, v
+}
+
+// fmtInt formats v into the tail of buf.
+// It returns the index where the output begins.
+func fmtInt(buf []byte, v uint64) int {
+ w := len(buf)
+ if v == 0 {
+ w--
+ buf[w] = '0'
+ } else {
+ for v > 0 {
+ w--
+ buf[w] = byte(v%10) + '0'
+ v /= 10
+ }
+ }
+ return w
+}
+
+// Nanoseconds returns the duration as an integer nanosecond count.
+func (d Duration) Nanoseconds() int64 { return int64(d) }
+
+// Microseconds returns the duration as an integer microsecond count.
+func (d Duration) Microseconds() int64 { return int64(d) / 1e3 }
+
+// Milliseconds returns the duration as an integer millisecond count.
+func (d Duration) Milliseconds() int64 { return int64(d) / 1e6 }
+
+// These methods return float64 because the dominant
+// use case is for printing a floating point number like 1.5s, and
+// a truncation to integer would make them not useful in those cases.
+// Splitting the integer and fraction ourselves guarantees that
+// converting the returned float64 to an integer rounds the same
+// way that a pure integer conversion would have, even in cases
+// where, say, float64(d.Nanoseconds())/1e9 would have rounded
+// differently.
+
+// Seconds returns the duration as a floating point number of seconds.
+func (d Duration) Seconds() float64 {
+ sec := d / Second
+ nsec := d % Second
+ return float64(sec) + float64(nsec)/1e9
+}
+
+// Minutes returns the duration as a floating point number of minutes.
+func (d Duration) Minutes() float64 {
+ min := d / Minute
+ nsec := d % Minute
+ return float64(min) + float64(nsec)/(60*1e9)
+}
+
+// Hours returns the duration as a floating point number of hours.
+func (d Duration) Hours() float64 {
+ hour := d / Hour
+ nsec := d % Hour
+ return float64(hour) + float64(nsec)/(60*60*1e9)
+}
+
+// Truncate returns the result of rounding d toward zero to a multiple of m.
+// If m <= 0, Truncate returns d unchanged.
+func (d Duration) Truncate(m Duration) Duration {
+ if m <= 0 {
+ return d
+ }
+ return d - d%m
+}
+
+// lessThanHalf reports whether x+x < y but avoids overflow,
+// assuming x and y are both positive (Duration is signed).
+func lessThanHalf(x, y Duration) bool {
+ return uint64(x)+uint64(x) < uint64(y)
+}
+
+// Round returns the result of rounding d to the nearest multiple of m.
+// The rounding behavior for halfway values is to round away from zero.
+// If the result exceeds the maximum (or minimum)
+// value that can be stored in a Duration,
+// Round returns the maximum (or minimum) duration.
+// If m <= 0, Round returns d unchanged.
+func (d Duration) Round(m Duration) Duration {
+ if m <= 0 {
+ return d
+ }
+ r := d % m
+ if d < 0 {
+ r = -r
+ if lessThanHalf(r, m) {
+ return d + r
+ }
+ if d1 := d - m + r; d1 < d {
+ return d1
+ }
+ return minDuration // overflow
+ }
+ if lessThanHalf(r, m) {
+ return d - r
+ }
+ if d1 := d + m - r; d1 > d {
+ return d1
+ }
+ return maxDuration // overflow
+}
+
+// Abs returns the absolute value of d.
+// As a special case, math.MinInt64 is converted to math.MaxInt64.
+func (d Duration) Abs() Duration {
+ switch {
+ case d >= 0:
+ return d
+ case d == minDuration:
+ return maxDuration
+ default:
+ return -d
+ }
+}
+
+// Add returns the time t+d.
+func (t Time) Add(d Duration) Time {
+ dsec := int64(d / 1e9)
+ nsec := t.nsec() + int32(d%1e9)
+ if nsec >= 1e9 {
+ dsec++
+ nsec -= 1e9
+ } else if nsec < 0 {
+ dsec--
+ nsec += 1e9
+ }
+ t.wall = t.wall&^nsecMask | uint64(nsec) // update nsec
+ t.addSec(dsec)
+ if t.wall&hasMonotonic != 0 {
+ te := t.ext + int64(d)
+ if d < 0 && te > t.ext || d > 0 && te < t.ext {
+ // Monotonic clock reading now out of range; degrade to wall-only.
+ t.stripMono()
+ } else {
+ t.ext = te
+ }
+ }
+ return t
+}
+
+// Sub returns the duration t-u. If the result exceeds the maximum (or minimum)
+// value that can be stored in a Duration, the maximum (or minimum) duration
+// will be returned.
+// To compute t-d for a duration d, use t.Add(-d).
+func (t Time) Sub(u Time) Duration {
+ if t.wall&u.wall&hasMonotonic != 0 {
+ te := t.ext
+ ue := u.ext
+ d := Duration(te - ue)
+ if d < 0 && te > ue {
+ return maxDuration // t - u is positive out of range
+ }
+ if d > 0 && te < ue {
+ return minDuration // t - u is negative out of range
+ }
+ return d
+ }
+ d := Duration(t.sec()-u.sec())*Second + Duration(t.nsec()-u.nsec())
+ // Check for overflow or underflow.
+ switch {
+ case u.Add(d).Equal(t):
+ return d // d is correct
+ case t.Before(u):
+ return minDuration // t - u is negative out of range
+ default:
+ return maxDuration // t - u is positive out of range
+ }
+}
+
+// Since returns the time elapsed since t.
+// It is shorthand for time.Now().Sub(t).
+func Since(t Time) Duration {
+ var now Time
+ if t.wall&hasMonotonic != 0 {
+ // Common case optimization: if t has monotonic time, then Sub will use only it.
+ now = Time{hasMonotonic, runtimeNano() - startNano, nil}
+ } else {
+ now = Now()
+ }
+ return now.Sub(t)
+}
+
+// Until returns the duration until t.
+// It is shorthand for t.Sub(time.Now()).
+func Until(t Time) Duration {
+ var now Time
+ if t.wall&hasMonotonic != 0 {
+ // Common case optimization: if t has monotonic time, then Sub will use only it.
+ now = Time{hasMonotonic, runtimeNano() - startNano, nil}
+ } else {
+ now = Now()
+ }
+ return t.Sub(now)
+}
+
+// AddDate returns the time corresponding to adding the
+// given number of years, months, and days to t.
+// For example, AddDate(-1, 2, 3) applied to January 1, 2011
+// returns March 4, 2010.
+//
+// AddDate normalizes its result in the same way that Date does,
+// so, for example, adding one month to October 31 yields
+// December 1, the normalized form for November 31.
+func (t Time) AddDate(years int, months int, days int) Time {
+ year, month, day := t.Date()
+ hour, min, sec := t.Clock()
+ return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec()), t.Location())
+}
+
+const (
+ secondsPerMinute = 60
+ secondsPerHour = 60 * secondsPerMinute
+ secondsPerDay = 24 * secondsPerHour
+ secondsPerWeek = 7 * secondsPerDay
+ daysPer400Years = 365*400 + 97
+ daysPer100Years = 365*100 + 24
+ daysPer4Years = 365*4 + 1
+)
+
+// date computes the year, day of year, and when full=true,
+// the month and day in which t occurs.
+func (t Time) date(full bool) (year int, month Month, day int, yday int) {
+ return absDate(t.abs(), full)
+}
+
+// absDate is like date but operates on an absolute time.
+func absDate(abs uint64, full bool) (year int, month Month, day int, yday int) {
+ // Split into time and day.
+ d := abs / secondsPerDay
+
+ // Account for 400 year cycles.
+ n := d / daysPer400Years
+ y := 400 * n
+ d -= daysPer400Years * n
+
+ // Cut off 100-year cycles.
+ // The last cycle has one extra leap year, so on the last day
+ // of that year, day / daysPer100Years will be 4 instead of 3.
+ // Cut it back down to 3 by subtracting n>>2.
+ n = d / daysPer100Years
+ n -= n >> 2
+ y += 100 * n
+ d -= daysPer100Years * n
+
+ // Cut off 4-year cycles.
+ // The last cycle has a missing leap year, which does not
+ // affect the computation.
+ n = d / daysPer4Years
+ y += 4 * n
+ d -= daysPer4Years * n
+
+ // Cut off years within a 4-year cycle.
+ // The last year is a leap year, so on the last day of that year,
+ // day / 365 will be 4 instead of 3. Cut it back down to 3
+ // by subtracting n>>2.
+ n = d / 365
+ n -= n >> 2
+ y += n
+ d -= 365 * n
+
+ year = int(int64(y) + absoluteZeroYear)
+ yday = int(d)
+
+ if !full {
+ return
+ }
+
+ day = yday
+ if isLeap(year) {
+ // Leap year
+ switch {
+ case day > 31+29-1:
+ // After leap day; pretend it wasn't there.
+ day--
+ case day == 31+29-1:
+ // Leap day.
+ month = February
+ day = 29
+ return
+ }
+ }
+
+ // Estimate month on assumption that every month has 31 days.
+ // The estimate may be too low by at most one month, so adjust.
+ month = Month(day / 31)
+ end := int(daysBefore[month+1])
+ var begin int
+ if day >= end {
+ month++
+ begin = end
+ } else {
+ begin = int(daysBefore[month])
+ }
+
+ month++ // because January is 1
+ day = day - begin + 1
+ return
+}
+
+// daysBefore[m] counts the number of days in a non-leap year
+// before month m begins. There is an entry for m=12, counting
+// the number of days before January of next year (365).
+var daysBefore = [...]int32{
+ 0,
+ 31,
+ 31 + 28,
+ 31 + 28 + 31,
+ 31 + 28 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
+ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
+}
+
+func daysIn(m Month, year int) int {
+ if m == February && isLeap(year) {
+ return 29
+ }
+ return int(daysBefore[m] - daysBefore[m-1])
+}
+
+// daysSinceEpoch takes a year and returns the number of days from
+// the absolute epoch to the start of that year.
+// This is basically (year - zeroYear) * 365, but accounting for leap days.
+func daysSinceEpoch(year int) uint64 {
+ y := uint64(int64(year) - absoluteZeroYear)
+
+ // Add in days from 400-year cycles.
+ n := y / 400
+ y -= 400 * n
+ d := daysPer400Years * n
+
+ // Add in 100-year cycles.
+ n = y / 100
+ y -= 100 * n
+ d += daysPer100Years * n
+
+ // Add in 4-year cycles.
+ n = y / 4
+ y -= 4 * n
+ d += daysPer4Years * n
+
+ // Add in non-leap years.
+ n = y
+ d += 365 * n
+
+ return d
+}
+
+// Provided by package runtime.
+func now() (sec int64, nsec int32, mono int64)
+
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+//
+//go:linkname runtimeNano runtime.nanotime
+func runtimeNano() int64
+
+// Monotonic times are reported as offsets from startNano.
+// We initialize startNano to runtimeNano() - 1 so that on systems where
+// monotonic time resolution is fairly low (e.g. Windows 2008
+// which appears to have a default resolution of 15ms),
+// we avoid ever reporting a monotonic time of 0.
+// (Callers may want to use 0 as "time not set".)
+var startNano int64 = runtimeNano() - 1
+
+// Now returns the current local time.
+func Now() Time {
+ sec, nsec, mono := now()
+ mono -= startNano
+ sec += unixToInternal - minWall
+ if uint64(sec)>>33 != 0 {
+ return Time{uint64(nsec), sec + minWall, Local}
+ }
+ return Time{hasMonotonic | uint64(sec)<<nsecShift | uint64(nsec), mono, Local}
+}
+
+func unixTime(sec int64, nsec int32) Time {
+ return Time{uint64(nsec), sec + unixToInternal, Local}
+}
+
+// UTC returns t with the location set to UTC.
+func (t Time) UTC() Time {
+ t.setLoc(&utcLoc)
+ return t
+}
+
+// Local returns t with the location set to local time.
+func (t Time) Local() Time {
+ t.setLoc(Local)
+ return t
+}
+
+// In returns a copy of t representing the same time instant, but
+// with the copy's location information set to loc for display
+// purposes.
+//
+// In panics if loc is nil.
+func (t Time) In(loc *Location) Time {
+ if loc == nil {
+ panic("time: missing Location in call to Time.In")
+ }
+ t.setLoc(loc)
+ return t
+}
+
+// Location returns the time zone information associated with t.
+func (t Time) Location() *Location {
+ l := t.loc
+ if l == nil {
+ l = UTC
+ }
+ return l
+}
+
+// Zone computes the time zone in effect at time t, returning the abbreviated
+// name of the zone (such as "CET") and its offset in seconds east of UTC.
+func (t Time) Zone() (name string, offset int) {
+ name, offset, _, _, _ = t.loc.lookup(t.unixSec())
+ return
+}
+
+// ZoneBounds returns the bounds of the time zone in effect at time t.
+// The zone begins at start and the next zone begins at end.
+// If the zone begins at the beginning of time, start will be returned as a zero Time.
+// If the zone goes on forever, end will be returned as a zero Time.
+// The Location of the returned times will be the same as t.
+func (t Time) ZoneBounds() (start, end Time) {
+ _, _, startSec, endSec, _ := t.loc.lookup(t.unixSec())
+ if startSec != alpha {
+ start = unixTime(startSec, 0)
+ start.setLoc(t.loc)
+ }
+ if endSec != omega {
+ end = unixTime(endSec, 0)
+ end.setLoc(t.loc)
+ }
+ return
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC. The result does not depend on the
+// location associated with t.
+// Unix-like operating systems often record time as a 32-bit
+// count of seconds, but since the method here returns a 64-bit
+// value it is valid for billions of years into the past or future.
+func (t Time) Unix() int64 {
+ return t.unixSec()
+}
+
+// UnixMilli returns t as a Unix time, the number of milliseconds elapsed since
+// January 1, 1970 UTC. The result is undefined if the Unix time in
+// milliseconds cannot be represented by an int64 (a date more than 292 million
+// years before or after 1970). The result does not depend on the
+// location associated with t.
+func (t Time) UnixMilli() int64 {
+ return t.unixSec()*1e3 + int64(t.nsec())/1e6
+}
+
+// UnixMicro returns t as a Unix time, the number of microseconds elapsed since
+// January 1, 1970 UTC. The result is undefined if the Unix time in
+// microseconds cannot be represented by an int64 (a date before year -290307 or
+// after year 294246). The result does not depend on the location associated
+// with t.
+func (t Time) UnixMicro() int64 {
+ return t.unixSec()*1e6 + int64(t.nsec())/1e3
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC. The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64 (a date before the year
+// 1678 or after 2262). Note that this means the result of calling UnixNano
+// on the zero Time is undefined. The result does not depend on the
+// location associated with t.
+func (t Time) UnixNano() int64 {
+ return (t.unixSec())*1e9 + int64(t.nsec())
+}
+
+const (
+ timeBinaryVersionV1 byte = iota + 1 // For general situation
+ timeBinaryVersionV2 // For LMT only
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (t Time) MarshalBinary() ([]byte, error) {
+ var offsetMin int16 // minutes east of UTC. -1 is UTC.
+ var offsetSec int8
+ version := timeBinaryVersionV1
+
+ if t.Location() == UTC {
+ offsetMin = -1
+ } else {
+ _, offset := t.Zone()
+ if offset%60 != 0 {
+ version = timeBinaryVersionV2
+ offsetSec = int8(offset % 60)
+ }
+
+ offset /= 60
+ if offset < -32768 || offset == -1 || offset > 32767 {
+ return nil, errors.New("Time.MarshalBinary: unexpected zone offset")
+ }
+ offsetMin = int16(offset)
+ }
+
+ sec := t.sec()
+ nsec := t.nsec()
+ enc := []byte{
+ version, // byte 0 : version
+ byte(sec >> 56), // bytes 1-8: seconds
+ byte(sec >> 48),
+ byte(sec >> 40),
+ byte(sec >> 32),
+ byte(sec >> 24),
+ byte(sec >> 16),
+ byte(sec >> 8),
+ byte(sec),
+ byte(nsec >> 24), // bytes 9-12: nanoseconds
+ byte(nsec >> 16),
+ byte(nsec >> 8),
+ byte(nsec),
+ byte(offsetMin >> 8), // bytes 13-14: zone offset in minutes
+ byte(offsetMin),
+ }
+ if version == timeBinaryVersionV2 {
+ enc = append(enc, byte(offsetSec))
+ }
+
+ return enc, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (t *Time) UnmarshalBinary(data []byte) error {
+ buf := data
+ if len(buf) == 0 {
+ return errors.New("Time.UnmarshalBinary: no data")
+ }
+
+ version := buf[0]
+ if version != timeBinaryVersionV1 && version != timeBinaryVersionV2 {
+ return errors.New("Time.UnmarshalBinary: unsupported version")
+ }
+
+ wantLen := /*version*/ 1 + /*sec*/ 8 + /*nsec*/ 4 + /*zone offset*/ 2
+ if version == timeBinaryVersionV2 {
+ wantLen++
+ }
+ if len(buf) != wantLen {
+ return errors.New("Time.UnmarshalBinary: invalid length")
+ }
+
+ buf = buf[1:]
+ sec := int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
+ int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56
+
+ buf = buf[8:]
+ nsec := int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
+
+ buf = buf[4:]
+ offset := int(int16(buf[1])|int16(buf[0])<<8) * 60
+ if version == timeBinaryVersionV2 {
+ offset += int(buf[2])
+ }
+
+ *t = Time{}
+ t.wall = uint64(nsec)
+ t.ext = sec
+
+ if offset == -1*60 {
+ t.setLoc(&utcLoc)
+ } else if _, localoff, _, _, _ := Local.lookup(t.unixSec()); offset == localoff {
+ t.setLoc(Local)
+ } else {
+ t.setLoc(FixedZone("", offset))
+ }
+
+ return nil
+}
+
+// TODO(rsc): Remove GobEncoder, GobDecoder, MarshalJSON, UnmarshalJSON in Go 2.
+// The same semantics will be provided by the generic MarshalBinary, MarshalText,
+// UnmarshalBinary, UnmarshalText.
+
+// GobEncode implements the gob.GobEncoder interface.
+func (t Time) GobEncode() ([]byte, error) {
+ return t.MarshalBinary()
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (t *Time) GobDecode(data []byte) error {
+ return t.UnmarshalBinary(data)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+// The time is a quoted string in RFC 3339 format, with sub-second precision added if present.
+func (t Time) MarshalJSON() ([]byte, error) {
+ if y := t.Year(); y < 0 || y >= 10000 {
+ // RFC 3339 is clear that years are 4 digits exactly.
+ // See golang.org/issue/4556#c15 for more discussion.
+ return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
+ }
+
+ b := make([]byte, 0, len(RFC3339Nano)+2)
+ b = append(b, '"')
+ b = t.AppendFormat(b, RFC3339Nano)
+ b = append(b, '"')
+ return b, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// The time is expected to be a quoted string in RFC 3339 format.
+func (t *Time) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+ // Fractional seconds are handled implicitly by Parse.
+ var err error
+ *t, err = Parse(`"`+RFC3339+`"`, string(data))
+ return err
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The time is formatted in RFC 3339 format, with sub-second precision added if present.
+func (t Time) MarshalText() ([]byte, error) {
+ if y := t.Year(); y < 0 || y >= 10000 {
+ return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
+ }
+
+ b := make([]byte, 0, len(RFC3339Nano))
+ return t.AppendFormat(b, RFC3339Nano), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The time is expected to be in RFC 3339 format.
+func (t *Time) UnmarshalText(data []byte) error {
+ // Fractional seconds are handled implicitly by Parse.
+ var err error
+ *t, err = Parse(RFC3339, string(data))
+ return err
+}
+
+// Unix returns the local Time corresponding to the given Unix time,
+// sec seconds and nsec nanoseconds since January 1, 1970 UTC.
+// It is valid to pass nsec outside the range [0, 999999999].
+// Not all sec values have a corresponding time value. One such
+// value is 1<<63-1 (the largest int64 value).
+func Unix(sec int64, nsec int64) Time {
+ if nsec < 0 || nsec >= 1e9 {
+ n := nsec / 1e9
+ sec += n
+ nsec -= n * 1e9
+ if nsec < 0 {
+ nsec += 1e9
+ sec--
+ }
+ }
+ return unixTime(sec, int32(nsec))
+}
+
+// UnixMilli returns the local Time corresponding to the given Unix time,
+// msec milliseconds since January 1, 1970 UTC.
+func UnixMilli(msec int64) Time {
+ return Unix(msec/1e3, (msec%1e3)*1e6)
+}
+
+// UnixMicro returns the local Time corresponding to the given Unix time,
+// usec microseconds since January 1, 1970 UTC.
+func UnixMicro(usec int64) Time {
+ return Unix(usec/1e6, (usec%1e6)*1e3)
+}
+
+// IsDST reports whether the time in the configured location is in Daylight Savings Time.
+func (t Time) IsDST() bool {
+ _, _, _, _, isDST := t.loc.lookup(t.Unix())
+ return isDST
+}
+
+func isLeap(year int) bool {
+ return year%4 == 0 && (year%100 != 0 || year%400 == 0)
+}
+
+// norm returns nhi, nlo such that
+//
+// hi * base + lo == nhi * base + nlo
+// 0 <= nlo < base
+func norm(hi, lo, base int) (nhi, nlo int) {
+ if lo < 0 {
+ n := (-lo-1)/base + 1
+ hi -= n
+ lo += n * base
+ }
+ if lo >= base {
+ n := lo / base
+ hi += n
+ lo -= n * base
+ }
+ return hi, lo
+}
+
+// Date returns the Time corresponding to
+//
+// yyyy-mm-dd hh:mm:ss + nsec nanoseconds
+//
+// in the appropriate zone for that time in the given location.
+//
+// The month, day, hour, min, sec, and nsec values may be outside
+// their usual ranges and will be normalized during the conversion.
+// For example, October 32 converts to November 1.
+//
+// A daylight savings time transition skips or repeats times.
+// For example, in the United States, March 13, 2011 2:15am never occurred,
+// while November 6, 2011 1:15am occurred twice. In such cases, the
+// choice of time zone, and therefore the time, is not well-defined.
+// Date returns a time that is correct in one of the two zones involved
+// in the transition, but it does not guarantee which.
+//
+// Date panics if loc is nil.
+func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {
+ if loc == nil {
+ panic("time: missing Location in call to Date")
+ }
+
+ // Normalize month, overflowing into year.
+ m := int(month) - 1
+ year, m = norm(year, m, 12)
+ month = Month(m) + 1
+
+ // Normalize nsec, sec, min, hour, overflowing into day.
+ sec, nsec = norm(sec, nsec, 1e9)
+ min, sec = norm(min, sec, 60)
+ hour, min = norm(hour, min, 60)
+ day, hour = norm(day, hour, 24)
+
+ // Compute days since the absolute epoch.
+ d := daysSinceEpoch(year)
+
+ // Add in days before this month.
+ d += uint64(daysBefore[month-1])
+ if isLeap(year) && month >= March {
+ d++ // February 29
+ }
+
+ // Add in days before today.
+ d += uint64(day - 1)
+
+ // Add in time elapsed today.
+ abs := d * secondsPerDay
+ abs += uint64(hour*secondsPerHour + min*secondsPerMinute + sec)
+
+ unix := int64(abs) + (absoluteToInternal + internalToUnix)
+
+ // Look for zone offset for expected time, so we can adjust to UTC.
+ // The lookup function expects UTC, so first we pass unix in the
+ // hope that it will not be too close to a zone transition,
+ // and then adjust if it is.
+ _, offset, start, end, _ := loc.lookup(unix)
+ if offset != 0 {
+ utc := unix - int64(offset)
+ // If utc is valid for the time zone we found, then we have the right offset.
+ // If not, we get the correct offset by looking up utc in the location.
+ if utc < start || utc >= end {
+ _, offset, _, _, _ = loc.lookup(utc)
+ }
+ unix -= int64(offset)
+ }
+
+ t := unixTime(unix, int32(nsec))
+ t.setLoc(loc)
+ return t
+}
+
+// Truncate returns the result of rounding t down to a multiple of d (since the zero time).
+// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged.
+//
+// Truncate operates on the time as an absolute duration since the
+// zero time; it does not operate on the presentation form of the
+// time. Thus, Truncate(Hour) may return a time with a non-zero
+// minute, depending on the time's Location.
+func (t Time) Truncate(d Duration) Time {
+ t.stripMono()
+ if d <= 0 {
+ return t
+ }
+ _, r := div(t, d)
+ return t.Add(-r)
+}
+
+// Round returns the result of rounding t to the nearest multiple of d (since the zero time).
+// The rounding behavior for halfway values is to round up.
+// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged.
+//
+// Round operates on the time as an absolute duration since the
+// zero time; it does not operate on the presentation form of the
+// time. Thus, Round(Hour) may return a time with a non-zero
+// minute, depending on the time's Location.
+func (t Time) Round(d Duration) Time {
+ t.stripMono()
+ if d <= 0 {
+ return t
+ }
+ _, r := div(t, d)
+ if lessThanHalf(r, d) {
+ return t.Add(-r)
+ }
+ return t.Add(d - r)
+}
+
+// div divides t by d and returns the quotient parity and remainder.
+// We don't use the quotient parity anymore (round half up instead of round to even)
+// but it's still here in case we change our minds.
+func div(t Time, d Duration) (qmod2 int, r Duration) {
+ neg := false
+ nsec := t.nsec()
+ sec := t.sec()
+ if sec < 0 {
+ // Operate on absolute value.
+ neg = true
+ sec = -sec
+ nsec = -nsec
+ if nsec < 0 {
+ nsec += 1e9
+ sec-- // sec >= 1 before the -- so safe
+ }
+ }
+
+ switch {
+ // Special case: 2d divides 1 second.
+ case d < Second && Second%(d+d) == 0:
+ qmod2 = int(nsec/int32(d)) & 1
+ r = Duration(nsec % int32(d))
+
+ // Special case: d is a multiple of 1 second.
+ case d%Second == 0:
+ d1 := int64(d / Second)
+ qmod2 = int(sec/d1) & 1
+ r = Duration(sec%d1)*Second + Duration(nsec)
+
+ // General case.
+ // This could be faster if more cleverness were applied,
+ // but it's really only here to avoid special case restrictions in the API.
+ // No one will care about these cases.
+ default:
+ // Compute nanoseconds as 128-bit number.
+ sec := uint64(sec)
+ tmp := (sec >> 32) * 1e9
+ u1 := tmp >> 32
+ u0 := tmp << 32
+ tmp = (sec & 0xFFFFFFFF) * 1e9
+ u0x, u0 := u0, u0+tmp
+ if u0 < u0x {
+ u1++
+ }
+ u0x, u0 = u0, u0+uint64(nsec)
+ if u0 < u0x {
+ u1++
+ }
+
+ // Compute remainder by subtracting r<<k for decreasing k.
+ // Quotient parity is whether we subtract on last round.
+ d1 := uint64(d)
+ for d1>>63 != 1 {
+ d1 <<= 1
+ }
+ d0 := uint64(0)
+ for {
+ qmod2 = 0
+ if u1 > d1 || u1 == d1 && u0 >= d0 {
+ // subtract
+ qmod2 = 1
+ u0x, u0 = u0, u0-d0
+ if u0 > u0x {
+ u1--
+ }
+ u1 -= d1
+ }
+ if d1 == 0 && d0 == uint64(d) {
+ break
+ }
+ d0 >>= 1
+ d0 |= (d1 & 1) << 63
+ d1 >>= 1
+ }
+ r = Duration(u0)
+ }
+
+ if neg && r != 0 {
+ // If input was negative and not an exact multiple of d, we computed q, r such that
+ // q*d + r = -t
+ // But the right answers are given by -(q-1), d-r:
+ // q*d + r = -t
+ // -q*d - r = t
+ // -(q-1)*d + (d - r) = t
+ qmod2 ^= 1
+ r = d - r
+ }
+ return
+}
diff --git a/contrib/go/_std_1.19/src/time/zoneinfo.go b/contrib/go/_std_1.19/src/time/zoneinfo.go
new file mode 100644
index 0000000000..b3313583d8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/zoneinfo.go
@@ -0,0 +1,687 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package time
+
+import (
+ "errors"
+ "sync"
+ "syscall"
+)
+
+//go:generate env ZONEINFO=$GOROOT/lib/time/zoneinfo.zip go run genzabbrs.go -output zoneinfo_abbrs_windows.go
+
+// A Location maps time instants to the zone in use at that time.
+// Typically, the Location represents the collection of time offsets
+// in use in a geographical area. For many Locations the time offset varies
+// depending on whether daylight savings time is in use at the time instant.
+type Location struct {
+ name string
+ zone []zone
+ tx []zoneTrans
+
+ // The tzdata information can be followed by a string that describes
+ // how to handle DST transitions not recorded in zoneTrans.
+ // The format is the TZ environment variable without a colon; see
+ // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap08.html.
+ // Example string, for America/Los_Angeles: PST8PDT,M3.2.0,M11.1.0
+ extend string
+
+ // Most lookups will be for the current time.
+ // To avoid the binary search through tx, keep a
+ // static one-element cache that gives the correct
+ // zone for the time when the Location was created.
+ // if cacheStart <= t < cacheEnd,
+ // lookup can return cacheZone.
+ // The units for cacheStart and cacheEnd are seconds
+ // since January 1, 1970 UTC, to match the argument
+ // to lookup.
+ cacheStart int64
+ cacheEnd int64
+ cacheZone *zone
+}
+
+// A zone represents a single time zone such as CET.
+type zone struct {
+ name string // abbreviated name, "CET"
+ offset int // seconds east of UTC
+ isDST bool // is this zone Daylight Savings Time?
+}
+
+// A zoneTrans represents a single time zone transition.
+type zoneTrans struct {
+ when int64 // transition time, in seconds since 1970 GMT
+ index uint8 // the index of the zone that goes into effect at that time
+ isstd, isutc bool // ignored - no idea what these mean
+}
+
+// alpha and omega are the beginning and end of time for zone
+// transitions.
+const (
+ alpha = -1 << 63 // math.MinInt64
+ omega = 1<<63 - 1 // math.MaxInt64
+)
+
+// UTC represents Universal Coordinated Time (UTC).
+var UTC *Location = &utcLoc
+
+// utcLoc is separate so that get can refer to &utcLoc
+// and ensure that it never returns a nil *Location,
+// even if a badly behaved client has changed UTC.
+var utcLoc = Location{name: "UTC"}
+
+// Local represents the system's local time zone.
+// On Unix systems, Local consults the TZ environment
+// variable to find the time zone to use. No TZ means
+// use the system default /etc/localtime.
+// TZ="" means use UTC.
+// TZ="foo" means use file foo in the system timezone directory.
+var Local *Location = &localLoc
+
+// localLoc is separate so that initLocal can initialize
+// it even if a client has changed Local.
+var localLoc Location
+var localOnce sync.Once
+
+func (l *Location) get() *Location {
+ if l == nil {
+ return &utcLoc
+ }
+ if l == &localLoc {
+ localOnce.Do(initLocal)
+ }
+ return l
+}
+
+// String returns a descriptive name for the time zone information,
+// corresponding to the name argument to LoadLocation or FixedZone.
+func (l *Location) String() string {
+ return l.get().name
+}
+
+// FixedZone returns a Location that always uses
+// the given zone name and offset (seconds east of UTC).
+func FixedZone(name string, offset int) *Location {
+ l := &Location{
+ name: name,
+ zone: []zone{{name, offset, false}},
+ tx: []zoneTrans{{alpha, 0, false, false}},
+ cacheStart: alpha,
+ cacheEnd: omega,
+ }
+ l.cacheZone = &l.zone[0]
+ return l
+}
+
+// lookup returns information about the time zone in use at an
+// instant in time expressed as seconds since January 1, 1970 00:00:00 UTC.
+//
+// The returned information gives the name of the zone (such as "CET"),
+// the start and end times bracketing sec when that zone is in effect,
+// the offset in seconds east of UTC (such as -5*60*60), and whether
+// the daylight savings is being observed at that time.
+func (l *Location) lookup(sec int64) (name string, offset int, start, end int64, isDST bool) {
+ l = l.get()
+
+ if len(l.zone) == 0 {
+ name = "UTC"
+ offset = 0
+ start = alpha
+ end = omega
+ isDST = false
+ return
+ }
+
+ if zone := l.cacheZone; zone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
+ name = zone.name
+ offset = zone.offset
+ start = l.cacheStart
+ end = l.cacheEnd
+ isDST = zone.isDST
+ return
+ }
+
+ if len(l.tx) == 0 || sec < l.tx[0].when {
+ zone := &l.zone[l.lookupFirstZone()]
+ name = zone.name
+ offset = zone.offset
+ start = alpha
+ if len(l.tx) > 0 {
+ end = l.tx[0].when
+ } else {
+ end = omega
+ }
+ isDST = zone.isDST
+ return
+ }
+
+ // Binary search for entry with largest time <= sec.
+ // Not using sort.Search to avoid dependencies.
+ tx := l.tx
+ end = omega
+ lo := 0
+ hi := len(tx)
+ for hi-lo > 1 {
+ m := lo + (hi-lo)/2
+ lim := tx[m].when
+ if sec < lim {
+ end = lim
+ hi = m
+ } else {
+ lo = m
+ }
+ }
+ zone := &l.zone[tx[lo].index]
+ name = zone.name
+ offset = zone.offset
+ start = tx[lo].when
+ // end = maintained during the search
+ isDST = zone.isDST
+
+ // If we're at the end of the known zone transitions,
+ // try the extend string.
+ if lo == len(tx)-1 && l.extend != "" {
+ if ename, eoffset, estart, eend, eisDST, ok := tzset(l.extend, end, sec); ok {
+ return ename, eoffset, estart, eend, eisDST
+ }
+ }
+
+ return
+}
+
+// lookupFirstZone returns the index of the time zone to use for times
+// before the first transition time, or when there are no transition
+// times.
+//
+// The reference implementation in localtime.c from
+// https://www.iana.org/time-zones/repository/releases/tzcode2013g.tar.gz
+// implements the following algorithm for these cases:
+// 1. If the first zone is unused by the transitions, use it.
+// 2. Otherwise, if there are transition times, and the first
+// transition is to a zone in daylight time, find the first
+// non-daylight-time zone before and closest to the first transition
+// zone.
+// 3. Otherwise, use the first zone that is not daylight time, if
+// there is one.
+// 4. Otherwise, use the first zone.
+func (l *Location) lookupFirstZone() int {
+ // Case 1.
+ if !l.firstZoneUsed() {
+ return 0
+ }
+
+ // Case 2.
+ if len(l.tx) > 0 && l.zone[l.tx[0].index].isDST {
+ for zi := int(l.tx[0].index) - 1; zi >= 0; zi-- {
+ if !l.zone[zi].isDST {
+ return zi
+ }
+ }
+ }
+
+ // Case 3.
+ for zi := range l.zone {
+ if !l.zone[zi].isDST {
+ return zi
+ }
+ }
+
+ // Case 4.
+ return 0
+}
+
+// firstZoneUsed reports whether the first zone is used by some
+// transition.
+func (l *Location) firstZoneUsed() bool {
+ for _, tx := range l.tx {
+ if tx.index == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// tzset takes a timezone string like the one found in the TZ environment
+// variable, the end of the last time zone transition expressed as seconds
+// since January 1, 1970 00:00:00 UTC, and a time expressed the same way.
+// We call this a tzset string since in C the function tzset reads TZ.
+// The return values are as for lookup, plus ok which reports whether the
+// parse succeeded.
+func tzset(s string, initEnd, sec int64) (name string, offset int, start, end int64, isDST, ok bool) {
+ var (
+ stdName, dstName string
+ stdOffset, dstOffset int
+ )
+
+ stdName, s, ok = tzsetName(s)
+ if ok {
+ stdOffset, s, ok = tzsetOffset(s)
+ }
+ if !ok {
+ return "", 0, 0, 0, false, false
+ }
+
+ // The numbers in the tzset string are added to local time to get UTC,
+ // but our offsets are added to UTC to get local time,
+ // so we negate the number we see here.
+ stdOffset = -stdOffset
+
+ if len(s) == 0 || s[0] == ',' {
+ // No daylight savings time.
+ return stdName, stdOffset, initEnd, omega, false, true
+ }
+
+ dstName, s, ok = tzsetName(s)
+ if ok {
+ if len(s) == 0 || s[0] == ',' {
+ dstOffset = stdOffset + secondsPerHour
+ } else {
+ dstOffset, s, ok = tzsetOffset(s)
+ dstOffset = -dstOffset // as with stdOffset, above
+ }
+ }
+ if !ok {
+ return "", 0, 0, 0, false, false
+ }
+
+ if len(s) == 0 {
+ // Default DST rules per tzcode.
+ s = ",M3.2.0,M11.1.0"
+ }
+ // The TZ definition does not mention ';' here but tzcode accepts it.
+ if s[0] != ',' && s[0] != ';' {
+ return "", 0, 0, 0, false, false
+ }
+ s = s[1:]
+
+ var startRule, endRule rule
+ startRule, s, ok = tzsetRule(s)
+ if !ok || len(s) == 0 || s[0] != ',' {
+ return "", 0, 0, 0, false, false
+ }
+ s = s[1:]
+ endRule, s, ok = tzsetRule(s)
+ if !ok || len(s) > 0 {
+ return "", 0, 0, 0, false, false
+ }
+
+ year, _, _, yday := absDate(uint64(sec+unixToInternal+internalToAbsolute), false)
+
+ ysec := int64(yday*secondsPerDay) + sec%secondsPerDay
+
+ // Compute start of year in seconds since Unix epoch.
+ d := daysSinceEpoch(year)
+ abs := int64(d * secondsPerDay)
+ abs += absoluteToInternal + internalToUnix
+
+ startSec := int64(tzruleTime(year, startRule, stdOffset))
+ endSec := int64(tzruleTime(year, endRule, dstOffset))
+ dstIsDST, stdIsDST := true, false
+ // Note: this is a flipping of "DST" and "STD" while retaining the labels
+ // This happens in southern hemispheres. The labelling here thus is a little
+ // inconsistent with the goal.
+ if endSec < startSec {
+ startSec, endSec = endSec, startSec
+ stdName, dstName = dstName, stdName
+ stdOffset, dstOffset = dstOffset, stdOffset
+ stdIsDST, dstIsDST = dstIsDST, stdIsDST
+ }
+
+ // The start and end values that we return are accurate
+ // close to a daylight savings transition, but are otherwise
+ // just the start and end of the year. That suffices for
+ // the only caller that cares, which is Date.
+ if ysec < startSec {
+ return stdName, stdOffset, abs, startSec + abs, stdIsDST, true
+ } else if ysec >= endSec {
+ return stdName, stdOffset, endSec + abs, abs + 365*secondsPerDay, stdIsDST, true
+ } else {
+ return dstName, dstOffset, startSec + abs, endSec + abs, dstIsDST, true
+ }
+}
+
+// tzsetName returns the timezone name at the start of the tzset string s,
+// and the remainder of s, and reports whether the parsing is OK.
+func tzsetName(s string) (string, string, bool) {
+ if len(s) == 0 {
+ return "", "", false
+ }
+ if s[0] != '<' {
+ for i, r := range s {
+ switch r {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',', '-', '+':
+ if i < 3 {
+ return "", "", false
+ }
+ return s[:i], s[i:], true
+ }
+ }
+ if len(s) < 3 {
+ return "", "", false
+ }
+ return s, "", true
+ } else {
+ for i, r := range s {
+ if r == '>' {
+ return s[1:i], s[i+1:], true
+ }
+ }
+ return "", "", false
+ }
+}
+
+// tzsetOffset returns the timezone offset at the start of the tzset string s,
+// and the remainder of s, and reports whether the parsing is OK.
+// The timezone offset is returned as a number of seconds.
+func tzsetOffset(s string) (offset int, rest string, ok bool) {
+ if len(s) == 0 {
+ return 0, "", false
+ }
+ neg := false
+ if s[0] == '+' {
+ s = s[1:]
+ } else if s[0] == '-' {
+ s = s[1:]
+ neg = true
+ }
+
+ // The tzdata code permits values up to 24 * 7 here,
+ // although POSIX does not.
+ var hours int
+ hours, s, ok = tzsetNum(s, 0, 24*7)
+ if !ok {
+ return 0, "", false
+ }
+ off := hours * secondsPerHour
+ if len(s) == 0 || s[0] != ':' {
+ if neg {
+ off = -off
+ }
+ return off, s, true
+ }
+
+ var mins int
+ mins, s, ok = tzsetNum(s[1:], 0, 59)
+ if !ok {
+ return 0, "", false
+ }
+ off += mins * secondsPerMinute
+ if len(s) == 0 || s[0] != ':' {
+ if neg {
+ off = -off
+ }
+ return off, s, true
+ }
+
+ var secs int
+ secs, s, ok = tzsetNum(s[1:], 0, 59)
+ if !ok {
+ return 0, "", false
+ }
+ off += secs
+
+ if neg {
+ off = -off
+ }
+ return off, s, true
+}
+
+// ruleKind is the kinds of rules that can be seen in a tzset string.
+type ruleKind int
+
+const (
+ ruleJulian ruleKind = iota
+ ruleDOY
+ ruleMonthWeekDay
+)
+
+// rule is a rule read from a tzset string.
+type rule struct {
+ kind ruleKind
+ day int
+ week int
+ mon int
+ time int // transition time
+}
+
+// tzsetRule parses a rule from a tzset string.
+// It returns the rule, and the remainder of the string, and reports success.
+func tzsetRule(s string) (rule, string, bool) {
+ var r rule
+ if len(s) == 0 {
+ return rule{}, "", false
+ }
+ ok := false
+ if s[0] == 'J' {
+ var jday int
+ jday, s, ok = tzsetNum(s[1:], 1, 365)
+ if !ok {
+ return rule{}, "", false
+ }
+ r.kind = ruleJulian
+ r.day = jday
+ } else if s[0] == 'M' {
+ var mon int
+ mon, s, ok = tzsetNum(s[1:], 1, 12)
+ if !ok || len(s) == 0 || s[0] != '.' {
+ return rule{}, "", false
+
+ }
+ var week int
+ week, s, ok = tzsetNum(s[1:], 1, 5)
+ if !ok || len(s) == 0 || s[0] != '.' {
+ return rule{}, "", false
+ }
+ var day int
+ day, s, ok = tzsetNum(s[1:], 0, 6)
+ if !ok {
+ return rule{}, "", false
+ }
+ r.kind = ruleMonthWeekDay
+ r.day = day
+ r.week = week
+ r.mon = mon
+ } else {
+ var day int
+ day, s, ok = tzsetNum(s, 0, 365)
+ if !ok {
+ return rule{}, "", false
+ }
+ r.kind = ruleDOY
+ r.day = day
+ }
+
+ if len(s) == 0 || s[0] != '/' {
+ r.time = 2 * secondsPerHour // 2am is the default
+ return r, s, true
+ }
+
+ offset, s, ok := tzsetOffset(s[1:])
+ if !ok {
+ return rule{}, "", false
+ }
+ r.time = offset
+
+ return r, s, true
+}
+
+// tzsetNum parses a number from a tzset string.
+// It returns the number, and the remainder of the string, and reports success.
+// The number must be between min and max.
+func tzsetNum(s string, min, max int) (num int, rest string, ok bool) {
+ if len(s) == 0 {
+ return 0, "", false
+ }
+ num = 0
+ for i, r := range s {
+ if r < '0' || r > '9' {
+ if i == 0 || num < min {
+ return 0, "", false
+ }
+ return num, s[i:], true
+ }
+ num *= 10
+ num += int(r) - '0'
+ if num > max {
+ return 0, "", false
+ }
+ }
+ if num < min {
+ return 0, "", false
+ }
+ return num, "", true
+}
+
+// tzruleTime takes a year, a rule, and a timezone offset,
+// and returns the number of seconds since the start of the year
+// that the rule takes effect.
+func tzruleTime(year int, r rule, off int) int {
+ var s int
+ switch r.kind {
+ case ruleJulian:
+ s = (r.day - 1) * secondsPerDay
+ if isLeap(year) && r.day >= 60 {
+ s += secondsPerDay
+ }
+ case ruleDOY:
+ s = r.day * secondsPerDay
+ case ruleMonthWeekDay:
+ // Zeller's Congruence.
+ m1 := (r.mon+9)%12 + 1
+ yy0 := year
+ if r.mon <= 2 {
+ yy0--
+ }
+ yy1 := yy0 / 100
+ yy2 := yy0 % 100
+ dow := ((26*m1-2)/10 + 1 + yy2 + yy2/4 + yy1/4 - 2*yy1) % 7
+ if dow < 0 {
+ dow += 7
+ }
+ // Now dow is the day-of-week of the first day of r.mon.
+ // Get the day-of-month of the first "dow" day.
+ d := r.day - dow
+ if d < 0 {
+ d += 7
+ }
+ for i := 1; i < r.week; i++ {
+ if d+7 >= daysIn(Month(r.mon), year) {
+ break
+ }
+ d += 7
+ }
+ d += int(daysBefore[r.mon-1])
+ if isLeap(year) && r.mon > 2 {
+ d++
+ }
+ s = d * secondsPerDay
+ }
+
+ return s + r.time - off
+}
+
+// lookupName returns information about the time zone with
+// the given name (such as "EST") at the given pseudo-Unix time
+// (what the given time of day would be in UTC).
+func (l *Location) lookupName(name string, unix int64) (offset int, ok bool) {
+ l = l.get()
+
+ // First try for a zone with the right name that was actually
+ // in effect at the given time. (In Sydney, Australia, both standard
+ // and daylight-savings time are abbreviated "EST". Using the
+ // offset helps us pick the right one for the given time.
+ // It's not perfect: during the backward transition we might pick
+ // either one.)
+ for i := range l.zone {
+ zone := &l.zone[i]
+ if zone.name == name {
+ nam, offset, _, _, _ := l.lookup(unix - int64(zone.offset))
+ if nam == zone.name {
+ return offset, true
+ }
+ }
+ }
+
+ // Otherwise fall back to an ordinary name match.
+ for i := range l.zone {
+ zone := &l.zone[i]
+ if zone.name == name {
+ return zone.offset, true
+ }
+ }
+
+ // Otherwise, give up.
+ return
+}
+
+// NOTE(rsc): Eventually we will need to accept the POSIX TZ environment
+// syntax too, but I don't feel like implementing it today.
+
+var errLocation = errors.New("time: invalid location name")
+
+var zoneinfo *string
+var zoneinfoOnce sync.Once
+
+// LoadLocation returns the Location with the given name.
+//
+// If the name is "" or "UTC", LoadLocation returns UTC.
+// If the name is "Local", LoadLocation returns Local.
+//
+// Otherwise, the name is taken to be a location name corresponding to a file
+// in the IANA Time Zone database, such as "America/New_York".
+//
+// LoadLocation looks for the IANA Time Zone database in the following
+// locations in order:
+//
+// - the directory or uncompressed zip file named by the ZONEINFO environment variable
+// - on a Unix system, the system standard installation location
+// - $GOROOT/lib/time/zoneinfo.zip
+// - the time/tzdata package, if it was imported
+func LoadLocation(name string) (*Location, error) {
+ if name == "" || name == "UTC" {
+ return UTC, nil
+ }
+ if name == "Local" {
+ return Local, nil
+ }
+ if containsDotDot(name) || name[0] == '/' || name[0] == '\\' {
+ // No valid IANA Time Zone name contains a single dot,
+ // much less dot dot. Likewise, none begin with a slash.
+ return nil, errLocation
+ }
+ zoneinfoOnce.Do(func() {
+ env, _ := syscall.Getenv("ZONEINFO")
+ zoneinfo = &env
+ })
+ var firstErr error
+ if *zoneinfo != "" {
+ if zoneData, err := loadTzinfoFromDirOrZip(*zoneinfo, name); err == nil {
+ if z, err := LoadLocationFromTZData(name, zoneData); err == nil {
+ return z, nil
+ }
+ firstErr = err
+ } else if err != syscall.ENOENT {
+ firstErr = err
+ }
+ }
+ if z, err := loadLocation(name, platformZoneSources); err == nil {
+ return z, nil
+ } else if firstErr == nil {
+ firstErr = err
+ }
+ return nil, firstErr
+}
+
+// containsDotDot reports whether s contains "..".
+func containsDotDot(s string) bool {
+ if len(s) < 2 {
+ return false
+ }
+ for i := 0; i < len(s)-1; i++ {
+ if s[i] == '.' && s[i+1] == '.' {
+ return true
+ }
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.19/src/time/zoneinfo_goroot.go b/contrib/go/_std_1.19/src/time/zoneinfo_goroot.go
new file mode 100644
index 0000000000..92bdcf4afe
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/zoneinfo_goroot.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !ios && !android
+
+package time
+
+func gorootZoneSource(goroot string) (string, bool) {
+ if goroot == "" {
+ return "", false
+ }
+ return goroot + "/lib/time/zoneinfo.zip", true
+}
diff --git a/contrib/go/_std_1.19/src/time/zoneinfo_read.go b/contrib/go/_std_1.19/src/time/zoneinfo_read.go
new file mode 100644
index 0000000000..90814ad36a
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/zoneinfo_read.go
@@ -0,0 +1,597 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parse "zoneinfo" time zone file.
+// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
+// See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo,
+// and ftp://munnari.oz.au/pub/oldtz/
+
+package time
+
+import (
+ "errors"
+ "runtime"
+ "syscall"
+)
+
+// registerLoadFromEmbeddedTZData is called by the time/tzdata package,
+// if it is imported.
+func registerLoadFromEmbeddedTZData(f func(string) (string, error)) {
+ loadFromEmbeddedTZData = f
+}
+
+// loadFromEmbeddedTZData is used to load a specific tzdata file
+// from tzdata information embedded in the binary itself.
+// This is set when the time/tzdata package is imported,
+// via registerLoadFromEmbeddedTzdata.
+var loadFromEmbeddedTZData func(zipname string) (string, error)
+
+// maxFileSize is the max permitted size of files read by readFile.
+// As reference, the zoneinfo.zip distributed by Go is ~350 KB,
+// so 10MB is overkill.
+const maxFileSize = 10 << 20
+
+type fileSizeError string
+
+func (f fileSizeError) Error() string {
+ return "time: file " + string(f) + " is too large"
+}
+
+// Copies of io.Seek* constants to avoid importing "io":
+const (
+ seekStart = 0
+ seekCurrent = 1
+ seekEnd = 2
+)
+
+// Simple I/O interface to binary blob of data.
+type dataIO struct {
+ p []byte
+ error bool
+}
+
+func (d *dataIO) read(n int) []byte {
+ if len(d.p) < n {
+ d.p = nil
+ d.error = true
+ return nil
+ }
+ p := d.p[0:n]
+ d.p = d.p[n:]
+ return p
+}
+
+func (d *dataIO) big4() (n uint32, ok bool) {
+ p := d.read(4)
+ if len(p) < 4 {
+ d.error = true
+ return 0, false
+ }
+ return uint32(p[3]) | uint32(p[2])<<8 | uint32(p[1])<<16 | uint32(p[0])<<24, true
+}
+
+func (d *dataIO) big8() (n uint64, ok bool) {
+ n1, ok1 := d.big4()
+ n2, ok2 := d.big4()
+ if !ok1 || !ok2 {
+ d.error = true
+ return 0, false
+ }
+ return (uint64(n1) << 32) | uint64(n2), true
+}
+
+func (d *dataIO) byte() (n byte, ok bool) {
+ p := d.read(1)
+ if len(p) < 1 {
+ d.error = true
+ return 0, false
+ }
+ return p[0], true
+}
+
+// read returns the read of the data in the buffer.
+func (d *dataIO) rest() []byte {
+ r := d.p
+ d.p = nil
+ return r
+}
+
+// Make a string by stopping at the first NUL
+func byteString(p []byte) string {
+ for i := 0; i < len(p); i++ {
+ if p[i] == 0 {
+ return string(p[0:i])
+ }
+ }
+ return string(p)
+}
+
+var badData = errors.New("malformed time zone information")
+
+// LoadLocationFromTZData returns a Location with the given name
+// initialized from the IANA Time Zone database-formatted data.
+// The data should be in the format of a standard IANA time zone file
+// (for example, the content of /etc/localtime on Unix systems).
+func LoadLocationFromTZData(name string, data []byte) (*Location, error) {
+ d := dataIO{data, false}
+
+ // 4-byte magic "TZif"
+ if magic := d.read(4); string(magic) != "TZif" {
+ return nil, badData
+ }
+
+ // 1-byte version, then 15 bytes of padding
+ var version int
+ var p []byte
+ if p = d.read(16); len(p) != 16 {
+ return nil, badData
+ } else {
+ switch p[0] {
+ case 0:
+ version = 1
+ case '2':
+ version = 2
+ case '3':
+ version = 3
+ default:
+ return nil, badData
+ }
+ }
+
+ // six big-endian 32-bit integers:
+ // number of UTC/local indicators
+ // number of standard/wall indicators
+ // number of leap seconds
+ // number of transition times
+ // number of local time zones
+ // number of characters of time zone abbrev strings
+ const (
+ NUTCLocal = iota
+ NStdWall
+ NLeap
+ NTime
+ NZone
+ NChar
+ )
+ var n [6]int
+ for i := 0; i < 6; i++ {
+ nn, ok := d.big4()
+ if !ok {
+ return nil, badData
+ }
+ if uint32(int(nn)) != nn {
+ return nil, badData
+ }
+ n[i] = int(nn)
+ }
+
+ // If we have version 2 or 3, then the data is first written out
+ // in a 32-bit format, then written out again in a 64-bit format.
+ // Skip the 32-bit format and read the 64-bit one, as it can
+ // describe a broader range of dates.
+
+ is64 := false
+ if version > 1 {
+ // Skip the 32-bit data.
+ skip := n[NTime]*4 +
+ n[NTime] +
+ n[NZone]*6 +
+ n[NChar] +
+ n[NLeap]*8 +
+ n[NStdWall] +
+ n[NUTCLocal]
+ // Skip the version 2 header that we just read.
+ skip += 4 + 16
+ d.read(skip)
+
+ is64 = true
+
+ // Read the counts again, they can differ.
+ for i := 0; i < 6; i++ {
+ nn, ok := d.big4()
+ if !ok {
+ return nil, badData
+ }
+ if uint32(int(nn)) != nn {
+ return nil, badData
+ }
+ n[i] = int(nn)
+ }
+ }
+
+ size := 4
+ if is64 {
+ size = 8
+ }
+
+ // Transition times.
+ txtimes := dataIO{d.read(n[NTime] * size), false}
+
+ // Time zone indices for transition times.
+ txzones := d.read(n[NTime])
+
+ // Zone info structures
+ zonedata := dataIO{d.read(n[NZone] * 6), false}
+
+ // Time zone abbreviations.
+ abbrev := d.read(n[NChar])
+
+ // Leap-second time pairs
+ d.read(n[NLeap] * (size + 4))
+
+ // Whether tx times associated with local time types
+ // are specified as standard time or wall time.
+ isstd := d.read(n[NStdWall])
+
+ // Whether tx times associated with local time types
+ // are specified as UTC or local time.
+ isutc := d.read(n[NUTCLocal])
+
+ if d.error { // ran out of data
+ return nil, badData
+ }
+
+ var extend string
+ rest := d.rest()
+ if len(rest) > 2 && rest[0] == '\n' && rest[len(rest)-1] == '\n' {
+ extend = string(rest[1 : len(rest)-1])
+ }
+
+ // Now we can build up a useful data structure.
+ // First the zone information.
+ // utcoff[4] isdst[1] nameindex[1]
+ nzone := n[NZone]
+ if nzone == 0 {
+ // Reject tzdata files with no zones. There's nothing useful in them.
+ // This also avoids a panic later when we add and then use a fake transition (golang.org/issue/29437).
+ return nil, badData
+ }
+ zones := make([]zone, nzone)
+ for i := range zones {
+ var ok bool
+ var n uint32
+ if n, ok = zonedata.big4(); !ok {
+ return nil, badData
+ }
+ if uint32(int(n)) != n {
+ return nil, badData
+ }
+ zones[i].offset = int(int32(n))
+ var b byte
+ if b, ok = zonedata.byte(); !ok {
+ return nil, badData
+ }
+ zones[i].isDST = b != 0
+ if b, ok = zonedata.byte(); !ok || int(b) >= len(abbrev) {
+ return nil, badData
+ }
+ zones[i].name = byteString(abbrev[b:])
+ if runtime.GOOS == "aix" && len(name) > 8 && (name[:8] == "Etc/GMT+" || name[:8] == "Etc/GMT-") {
+ // There is a bug with AIX 7.2 TL 0 with files in Etc,
+ // GMT+1 will return GMT-1 instead of GMT+1 or -01.
+ if name != "Etc/GMT+0" {
+ // GMT+0 is OK
+ zones[i].name = name[4:]
+ }
+ }
+ }
+
+ // Now the transition time info.
+ tx := make([]zoneTrans, n[NTime])
+ for i := range tx {
+ var n int64
+ if !is64 {
+ if n4, ok := txtimes.big4(); !ok {
+ return nil, badData
+ } else {
+ n = int64(int32(n4))
+ }
+ } else {
+ if n8, ok := txtimes.big8(); !ok {
+ return nil, badData
+ } else {
+ n = int64(n8)
+ }
+ }
+ tx[i].when = n
+ if int(txzones[i]) >= len(zones) {
+ return nil, badData
+ }
+ tx[i].index = txzones[i]
+ if i < len(isstd) {
+ tx[i].isstd = isstd[i] != 0
+ }
+ if i < len(isutc) {
+ tx[i].isutc = isutc[i] != 0
+ }
+ }
+
+ if len(tx) == 0 {
+ // Build fake transition to cover all time.
+ // This happens in fixed locations like "Etc/GMT0".
+ tx = append(tx, zoneTrans{when: alpha, index: 0})
+ }
+
+ // Committed to succeed.
+ l := &Location{zone: zones, tx: tx, name: name, extend: extend}
+
+ // Fill in the cache with information about right now,
+ // since that will be the most common lookup.
+ sec, _, _ := now()
+ for i := range tx {
+ if tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {
+ l.cacheStart = tx[i].when
+ l.cacheEnd = omega
+ l.cacheZone = &l.zone[tx[i].index]
+ if i+1 < len(tx) {
+ l.cacheEnd = tx[i+1].when
+ } else if l.extend != "" {
+ // If we're at the end of the known zone transitions,
+ // try the extend string.
+ if name, offset, estart, eend, isDST, ok := tzset(l.extend, l.cacheEnd, sec); ok {
+ l.cacheStart = estart
+ l.cacheEnd = eend
+ // Find the zone that is returned by tzset to avoid allocation if possible.
+ if zoneIdx := findZone(l.zone, name, offset, isDST); zoneIdx != -1 {
+ l.cacheZone = &l.zone[zoneIdx]
+ } else {
+ l.cacheZone = &zone{
+ name: name,
+ offset: offset,
+ isDST: isDST,
+ }
+ }
+ }
+ }
+ break
+ }
+ }
+
+ return l, nil
+}
+
+func findZone(zones []zone, name string, offset int, isDST bool) int {
+ for i, z := range zones {
+ if z.name == name && z.offset == offset && z.isDST == isDST {
+ return i
+ }
+ }
+ return -1
+}
+
+// loadTzinfoFromDirOrZip returns the contents of the file with the given name
+// in dir. dir can either be an uncompressed zip file, or a directory.
+func loadTzinfoFromDirOrZip(dir, name string) ([]byte, error) {
+ if len(dir) > 4 && dir[len(dir)-4:] == ".zip" {
+ return loadTzinfoFromZip(dir, name)
+ }
+ if dir != "" {
+ name = dir + "/" + name
+ }
+ return readFile(name)
+}
+
+// There are 500+ zoneinfo files. Rather than distribute them all
+// individually, we ship them in an uncompressed zip file.
+// Used this way, the zip file format serves as a commonly readable
+// container for the individual small files. We choose zip over tar
+// because zip files have a contiguous table of contents, making
+// individual file lookups faster, and because the per-file overhead
+// in a zip file is considerably less than tar's 512 bytes.
+
+// get4 returns the little-endian 32-bit value in b.
+func get4(b []byte) int {
+ if len(b) < 4 {
+ return 0
+ }
+ return int(b[0]) | int(b[1])<<8 | int(b[2])<<16 | int(b[3])<<24
+}
+
+// get2 returns the little-endian 16-bit value in b.
+func get2(b []byte) int {
+ if len(b) < 2 {
+ return 0
+ }
+ return int(b[0]) | int(b[1])<<8
+}
+
+// loadTzinfoFromZip returns the contents of the file with the given name
+// in the given uncompressed zip file.
+func loadTzinfoFromZip(zipfile, name string) ([]byte, error) {
+ fd, err := open(zipfile)
+ if err != nil {
+ return nil, err
+ }
+ defer closefd(fd)
+
+ const (
+ zecheader = 0x06054b50
+ zcheader = 0x02014b50
+ ztailsize = 22
+
+ zheadersize = 30
+ zheader = 0x04034b50
+ )
+
+ buf := make([]byte, ztailsize)
+ if err := preadn(fd, buf, -ztailsize); err != nil || get4(buf) != zecheader {
+ return nil, errors.New("corrupt zip file " + zipfile)
+ }
+ n := get2(buf[10:])
+ size := get4(buf[12:])
+ off := get4(buf[16:])
+
+ buf = make([]byte, size)
+ if err := preadn(fd, buf, off); err != nil {
+ return nil, errors.New("corrupt zip file " + zipfile)
+ }
+
+ for i := 0; i < n; i++ {
+ // zip entry layout:
+ // 0 magic[4]
+ // 4 madevers[1]
+ // 5 madeos[1]
+ // 6 extvers[1]
+ // 7 extos[1]
+ // 8 flags[2]
+ // 10 meth[2]
+ // 12 modtime[2]
+ // 14 moddate[2]
+ // 16 crc[4]
+ // 20 csize[4]
+ // 24 uncsize[4]
+ // 28 namelen[2]
+ // 30 xlen[2]
+ // 32 fclen[2]
+ // 34 disknum[2]
+ // 36 iattr[2]
+ // 38 eattr[4]
+ // 42 off[4]
+ // 46 name[namelen]
+ // 46+namelen+xlen+fclen - next header
+ //
+ if get4(buf) != zcheader {
+ break
+ }
+ meth := get2(buf[10:])
+ size := get4(buf[24:])
+ namelen := get2(buf[28:])
+ xlen := get2(buf[30:])
+ fclen := get2(buf[32:])
+ off := get4(buf[42:])
+ zname := buf[46 : 46+namelen]
+ buf = buf[46+namelen+xlen+fclen:]
+ if string(zname) != name {
+ continue
+ }
+ if meth != 0 {
+ return nil, errors.New("unsupported compression for " + name + " in " + zipfile)
+ }
+
+ // zip per-file header layout:
+ // 0 magic[4]
+ // 4 extvers[1]
+ // 5 extos[1]
+ // 6 flags[2]
+ // 8 meth[2]
+ // 10 modtime[2]
+ // 12 moddate[2]
+ // 14 crc[4]
+ // 18 csize[4]
+ // 22 uncsize[4]
+ // 26 namelen[2]
+ // 28 xlen[2]
+ // 30 name[namelen]
+ // 30+namelen+xlen - file data
+ //
+ buf = make([]byte, zheadersize+namelen)
+ if err := preadn(fd, buf, off); err != nil ||
+ get4(buf) != zheader ||
+ get2(buf[8:]) != meth ||
+ get2(buf[26:]) != namelen ||
+ string(buf[30:30+namelen]) != name {
+ return nil, errors.New("corrupt zip file " + zipfile)
+ }
+ xlen = get2(buf[28:])
+
+ buf = make([]byte, size)
+ if err := preadn(fd, buf, off+30+namelen+xlen); err != nil {
+ return nil, errors.New("corrupt zip file " + zipfile)
+ }
+
+ return buf, nil
+ }
+
+ return nil, syscall.ENOENT
+}
+
+// loadTzinfoFromTzdata returns the time zone information of the time zone
+// with the given name, from a tzdata database file as they are typically
+// found on android.
+var loadTzinfoFromTzdata func(file, name string) ([]byte, error)
+
+// loadTzinfo returns the time zone information of the time zone
+// with the given name, from a given source. A source may be a
+// timezone database directory, tzdata database file or an uncompressed
+// zip file, containing the contents of such a directory.
+func loadTzinfo(name string, source string) ([]byte, error) {
+ if len(source) >= 6 && source[len(source)-6:] == "tzdata" {
+ return loadTzinfoFromTzdata(source, name)
+ }
+ return loadTzinfoFromDirOrZip(source, name)
+}
+
+// loadLocation returns the Location with the given name from one of
+// the specified sources. See loadTzinfo for a list of supported sources.
+// The first timezone data matching the given name that is successfully loaded
+// and parsed is returned as a Location.
+func loadLocation(name string, sources []string) (z *Location, firstErr error) {
+ for _, source := range sources {
+ zoneData, err := loadTzinfo(name, source)
+ if err == nil {
+ if z, err = LoadLocationFromTZData(name, zoneData); err == nil {
+ return z, nil
+ }
+ }
+ if firstErr == nil && err != syscall.ENOENT {
+ firstErr = err
+ }
+ }
+ if loadFromEmbeddedTZData != nil {
+ zoneData, err := loadFromEmbeddedTZData(name)
+ if err == nil {
+ if z, err = LoadLocationFromTZData(name, []byte(zoneData)); err == nil {
+ return z, nil
+ }
+ }
+ if firstErr == nil && err != syscall.ENOENT {
+ firstErr = err
+ }
+ }
+ if source, ok := gorootZoneSource(runtime.GOROOT()); ok {
+ zoneData, err := loadTzinfo(name, source)
+ if err == nil {
+ if z, err = LoadLocationFromTZData(name, zoneData); err == nil {
+ return z, nil
+ }
+ }
+ if firstErr == nil && err != syscall.ENOENT {
+ firstErr = err
+ }
+ }
+ if firstErr != nil {
+ return nil, firstErr
+ }
+ return nil, errors.New("unknown time zone " + name)
+}
+
+// readFile reads and returns the content of the named file.
+// It is a trivial implementation of os.ReadFile, reimplemented
+// here to avoid depending on io/ioutil or os.
+// It returns an error if name exceeds maxFileSize bytes.
+func readFile(name string) ([]byte, error) {
+ f, err := open(name)
+ if err != nil {
+ return nil, err
+ }
+ defer closefd(f)
+ var (
+ buf [4096]byte
+ ret []byte
+ n int
+ )
+ for {
+ n, err = read(f, buf[:])
+ if n > 0 {
+ ret = append(ret, buf[:n]...)
+ }
+ if n == 0 || err != nil {
+ break
+ }
+ if len(ret) > maxFileSize {
+ return nil, fileSizeError(name)
+ }
+ }
+ return ret, err
+}
diff --git a/contrib/go/_std_1.19/src/time/zoneinfo_unix.go b/contrib/go/_std_1.19/src/time/zoneinfo_unix.go
new file mode 100644
index 0000000000..67b8beb47b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/time/zoneinfo_unix.go
@@ -0,0 +1,67 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix && !ios && !android
+
+// Parse "zoneinfo" time zone file.
+// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
+// See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo,
+// and ftp://munnari.oz.au/pub/oldtz/
+
+package time
+
+import (
+ "syscall"
+)
+
+// Many systems use /usr/share/zoneinfo, Solaris 2 has
+// /usr/share/lib/zoneinfo, IRIX 6 has /usr/lib/locale/TZ.
+var platformZoneSources = []string{
+ "/usr/share/zoneinfo/",
+ "/usr/share/lib/zoneinfo/",
+ "/usr/lib/locale/TZ/",
+}
+
+func initLocal() {
+ // consult $TZ to find the time zone to use.
+ // no $TZ means use the system default /etc/localtime.
+ // $TZ="" means use UTC.
+ // $TZ="foo" or $TZ=":foo" if foo is an absolute path, then the file pointed
+ // by foo will be used to initialize timezone; otherwise, file
+ // /usr/share/zoneinfo/foo will be used.
+
+ tz, ok := syscall.Getenv("TZ")
+ switch {
+ case !ok:
+ z, err := loadLocation("localtime", []string{"/etc"})
+ if err == nil {
+ localLoc = *z
+ localLoc.name = "Local"
+ return
+ }
+ case tz != "":
+ if tz[0] == ':' {
+ tz = tz[1:]
+ }
+ if tz != "" && tz[0] == '/' {
+ if z, err := loadLocation(tz, []string{""}); err == nil {
+ localLoc = *z
+ if tz == "/etc/localtime" {
+ localLoc.name = "Local"
+ } else {
+ localLoc.name = tz
+ }
+ return
+ }
+ } else if tz != "" && tz != "UTC" {
+ if z, err := loadLocation(tz, platformZoneSources); err == nil {
+ localLoc = *z
+ return
+ }
+ }
+ }
+
+ // Fall back to UTC.
+ localLoc.name = "UTC"
+}
diff --git a/contrib/go/_std_1.18/src/unicode/casetables.go b/contrib/go/_std_1.19/src/unicode/casetables.go
index 29bf167e56..29bf167e56 100644
--- a/contrib/go/_std_1.18/src/unicode/casetables.go
+++ b/contrib/go/_std_1.19/src/unicode/casetables.go
diff --git a/contrib/go/_std_1.18/src/unicode/digit.go b/contrib/go/_std_1.19/src/unicode/digit.go
index 53171b3969..53171b3969 100644
--- a/contrib/go/_std_1.18/src/unicode/digit.go
+++ b/contrib/go/_std_1.19/src/unicode/digit.go
diff --git a/contrib/go/_std_1.19/src/unicode/graphic.go b/contrib/go/_std_1.19/src/unicode/graphic.go
new file mode 100644
index 0000000000..2af29778bf
--- /dev/null
+++ b/contrib/go/_std_1.19/src/unicode/graphic.go
@@ -0,0 +1,146 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unicode
+
+// Bit masks for each code point under U+0100, for fast lookup.
+const (
+ pC = 1 << iota // a control character.
+ pP // a punctuation character.
+ pN // a numeral.
+ pS // a symbolic character.
+ pZ // a spacing character.
+ pLu // an upper-case letter.
+ pLl // a lower-case letter.
+ pp // a printable character according to Go's definition.
+ pg = pp | pZ // a graphical character according to the Unicode definition.
+ pLo = pLl | pLu // a letter that is neither upper nor lower case.
+ pLmask = pLo
+)
+
+// GraphicRanges defines the set of graphic characters according to Unicode.
+var GraphicRanges = []*RangeTable{
+ L, M, N, P, S, Zs,
+}
+
+// PrintRanges defines the set of printable characters according to Go.
+// ASCII space, U+0020, is handled separately.
+var PrintRanges = []*RangeTable{
+ L, M, N, P, S,
+}
+
+// IsGraphic reports whether the rune is defined as a Graphic by Unicode.
+// Such characters include letters, marks, numbers, punctuation, symbols, and
+// spaces, from categories L, M, N, P, S, Zs.
+func IsGraphic(r rune) bool {
+ // We convert to uint32 to avoid the extra test for negative,
+ // and in the index we convert to uint8 to avoid the range check.
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pg != 0
+ }
+ return In(r, GraphicRanges...)
+}
+
+// IsPrint reports whether the rune is defined as printable by Go. Such
+// characters include letters, marks, numbers, punctuation, symbols, and the
+// ASCII space character, from categories L, M, N, P, S and the ASCII space
+// character. This categorization is the same as IsGraphic except that the
+// only spacing character is ASCII space, U+0020.
+func IsPrint(r rune) bool {
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pp != 0
+ }
+ return In(r, PrintRanges...)
+}
+
+// IsOneOf reports whether the rune is a member of one of the ranges.
+// The function "In" provides a nicer signature and should be used in preference to IsOneOf.
+func IsOneOf(ranges []*RangeTable, r rune) bool {
+ for _, inside := range ranges {
+ if Is(inside, r) {
+ return true
+ }
+ }
+ return false
+}
+
+// In reports whether the rune is a member of one of the ranges.
+func In(r rune, ranges ...*RangeTable) bool {
+ for _, inside := range ranges {
+ if Is(inside, r) {
+ return true
+ }
+ }
+ return false
+}
+
+// IsControl reports whether the rune is a control character.
+// The C (Other) Unicode category includes more code points
+// such as surrogates; use Is(C, r) to test for them.
+func IsControl(r rune) bool {
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pC != 0
+ }
+ // All control characters are < MaxLatin1.
+ return false
+}
+
+// IsLetter reports whether the rune is a letter (category L).
+func IsLetter(r rune) bool {
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&(pLmask) != 0
+ }
+ return isExcludingLatin(Letter, r)
+}
+
+// IsMark reports whether the rune is a mark character (category M).
+func IsMark(r rune) bool {
+ // There are no mark characters in Latin-1.
+ return isExcludingLatin(Mark, r)
+}
+
+// IsNumber reports whether the rune is a number (category N).
+func IsNumber(r rune) bool {
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pN != 0
+ }
+ return isExcludingLatin(Number, r)
+}
+
+// IsPunct reports whether the rune is a Unicode punctuation character
+// (category P).
+func IsPunct(r rune) bool {
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pP != 0
+ }
+ return Is(Punct, r)
+}
+
+// IsSpace reports whether the rune is a space character as defined
+// by Unicode's White Space property; in the Latin-1 space
+// this is
+//
+// '\t', '\n', '\v', '\f', '\r', ' ', U+0085 (NEL), U+00A0 (NBSP).
+//
+// Other definitions of spacing characters are set by category
+// Z and property Pattern_White_Space.
+func IsSpace(r rune) bool {
+ // This property isn't the same as Z; special-case it.
+ if uint32(r) <= MaxLatin1 {
+ switch r {
+ case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0:
+ return true
+ }
+ return false
+ }
+ return isExcludingLatin(White_Space, r)
+}
+
+// IsSymbol reports whether the rune is a symbolic character.
+func IsSymbol(r rune) bool {
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pS != 0
+ }
+ return isExcludingLatin(Symbol, r)
+}
diff --git a/contrib/go/_std_1.19/src/unicode/letter.go b/contrib/go/_std_1.19/src/unicode/letter.go
new file mode 100644
index 0000000000..f3f8e52964
--- /dev/null
+++ b/contrib/go/_std_1.19/src/unicode/letter.go
@@ -0,0 +1,371 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unicode provides data and functions to test some properties of
+// Unicode code points.
+package unicode
+
+const (
+ MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ ReplacementChar = '\uFFFD' // Represents invalid code points.
+ MaxASCII = '\u007F' // maximum ASCII value.
+ MaxLatin1 = '\u00FF' // maximum Latin-1 value.
+)
+
+// RangeTable defines a set of Unicode code points by listing the ranges of
+// code points within the set. The ranges are listed in two slices
+// to save space: a slice of 16-bit ranges and a slice of 32-bit ranges.
+// The two slices must be in sorted order and non-overlapping.
+// Also, R32 should contain only values >= 0x10000 (1<<16).
+type RangeTable struct {
+ R16 []Range16
+ R32 []Range32
+ LatinOffset int // number of entries in R16 with Hi <= MaxLatin1
+}
+
+// Range16 represents of a range of 16-bit Unicode code points. The range runs from Lo to Hi
+// inclusive and has the specified stride.
+type Range16 struct {
+ Lo uint16
+ Hi uint16
+ Stride uint16
+}
+
+// Range32 represents of a range of Unicode code points and is used when one or
+// more of the values will not fit in 16 bits. The range runs from Lo to Hi
+// inclusive and has the specified stride. Lo and Hi must always be >= 1<<16.
+type Range32 struct {
+ Lo uint32
+ Hi uint32
+ Stride uint32
+}
+
+// CaseRange represents a range of Unicode code points for simple (one
+// code point to one code point) case conversion.
+// The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas
+// are the number to add to the code point to reach the code point for a
+// different case for that character. They may be negative. If zero, it
+// means the character is in the corresponding case. There is a special
+// case representing sequences of alternating corresponding Upper and Lower
+// pairs. It appears with a fixed Delta of
+//
+// {UpperLower, UpperLower, UpperLower}
+//
+// The constant UpperLower has an otherwise impossible delta value.
+type CaseRange struct {
+ Lo uint32
+ Hi uint32
+ Delta d
+}
+
+// SpecialCase represents language-specific case mappings such as Turkish.
+// Methods of SpecialCase customize (by overriding) the standard mappings.
+type SpecialCase []CaseRange
+
+// BUG(r): There is no mechanism for full case folding, that is, for
+// characters that involve multiple runes in the input or output.
+
+// Indices into the Delta arrays inside CaseRanges for case mapping.
+const (
+ UpperCase = iota
+ LowerCase
+ TitleCase
+ MaxCase
+)
+
+type d [MaxCase]rune // to make the CaseRanges text shorter
+
+// If the Delta field of a CaseRange is UpperLower, it means
+// this CaseRange represents a sequence of the form (say)
+// Upper Lower Upper Lower.
+const (
+ UpperLower = MaxRune + 1 // (Cannot be a valid delta.)
+)
+
+// linearMax is the maximum size table for linear search for non-Latin1 rune.
+// Derived by running 'go test -calibrate'.
+const linearMax = 18
+
+// is16 reports whether r is in the sorted slice of 16-bit ranges.
+func is16(ranges []Range16, r uint16) bool {
+ if len(ranges) <= linearMax || r <= MaxLatin1 {
+ for i := range ranges {
+ range_ := &ranges[i]
+ if r < range_.Lo {
+ return false
+ }
+ if r <= range_.Hi {
+ return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
+ }
+ }
+ return false
+ }
+
+ // binary search over ranges
+ lo := 0
+ hi := len(ranges)
+ for lo < hi {
+ m := lo + (hi-lo)/2
+ range_ := &ranges[m]
+ if range_.Lo <= r && r <= range_.Hi {
+ return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
+ }
+ if r < range_.Lo {
+ hi = m
+ } else {
+ lo = m + 1
+ }
+ }
+ return false
+}
+
+// is32 reports whether r is in the sorted slice of 32-bit ranges.
+func is32(ranges []Range32, r uint32) bool {
+ if len(ranges) <= linearMax {
+ for i := range ranges {
+ range_ := &ranges[i]
+ if r < range_.Lo {
+ return false
+ }
+ if r <= range_.Hi {
+ return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
+ }
+ }
+ return false
+ }
+
+ // binary search over ranges
+ lo := 0
+ hi := len(ranges)
+ for lo < hi {
+ m := lo + (hi-lo)/2
+ range_ := ranges[m]
+ if range_.Lo <= r && r <= range_.Hi {
+ return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
+ }
+ if r < range_.Lo {
+ hi = m
+ } else {
+ lo = m + 1
+ }
+ }
+ return false
+}
+
+// Is reports whether the rune is in the specified table of ranges.
+func Is(rangeTab *RangeTable, r rune) bool {
+ r16 := rangeTab.R16
+ // Compare as uint32 to correctly handle negative runes.
+ if len(r16) > 0 && uint32(r) <= uint32(r16[len(r16)-1].Hi) {
+ return is16(r16, uint16(r))
+ }
+ r32 := rangeTab.R32
+ if len(r32) > 0 && r >= rune(r32[0].Lo) {
+ return is32(r32, uint32(r))
+ }
+ return false
+}
+
+func isExcludingLatin(rangeTab *RangeTable, r rune) bool {
+ r16 := rangeTab.R16
+ // Compare as uint32 to correctly handle negative runes.
+ if off := rangeTab.LatinOffset; len(r16) > off && uint32(r) <= uint32(r16[len(r16)-1].Hi) {
+ return is16(r16[off:], uint16(r))
+ }
+ r32 := rangeTab.R32
+ if len(r32) > 0 && r >= rune(r32[0].Lo) {
+ return is32(r32, uint32(r))
+ }
+ return false
+}
+
+// IsUpper reports whether the rune is an upper case letter.
+func IsUpper(r rune) bool {
+ // See comment in IsGraphic.
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pLmask == pLu
+ }
+ return isExcludingLatin(Upper, r)
+}
+
+// IsLower reports whether the rune is a lower case letter.
+func IsLower(r rune) bool {
+ // See comment in IsGraphic.
+ if uint32(r) <= MaxLatin1 {
+ return properties[uint8(r)]&pLmask == pLl
+ }
+ return isExcludingLatin(Lower, r)
+}
+
+// IsTitle reports whether the rune is a title case letter.
+func IsTitle(r rune) bool {
+ if r <= MaxLatin1 {
+ return false
+ }
+ return isExcludingLatin(Title, r)
+}
+
+// to maps the rune using the specified case mapping.
+// It additionally reports whether caseRange contained a mapping for r.
+func to(_case int, r rune, caseRange []CaseRange) (mappedRune rune, foundMapping bool) {
+ if _case < 0 || MaxCase <= _case {
+ return ReplacementChar, false // as reasonable an error as any
+ }
+ // binary search over ranges
+ lo := 0
+ hi := len(caseRange)
+ for lo < hi {
+ m := lo + (hi-lo)/2
+ cr := caseRange[m]
+ if rune(cr.Lo) <= r && r <= rune(cr.Hi) {
+ delta := cr.Delta[_case]
+ if delta > MaxRune {
+ // In an Upper-Lower sequence, which always starts with
+ // an UpperCase letter, the real deltas always look like:
+ // {0, 1, 0} UpperCase (Lower is next)
+ // {-1, 0, -1} LowerCase (Upper, Title are previous)
+ // The characters at even offsets from the beginning of the
+ // sequence are upper case; the ones at odd offsets are lower.
+ // The correct mapping can be done by clearing or setting the low
+ // bit in the sequence offset.
+ // The constants UpperCase and TitleCase are even while LowerCase
+ // is odd so we take the low bit from _case.
+ return rune(cr.Lo) + ((r-rune(cr.Lo))&^1 | rune(_case&1)), true
+ }
+ return r + delta, true
+ }
+ if r < rune(cr.Lo) {
+ hi = m
+ } else {
+ lo = m + 1
+ }
+ }
+ return r, false
+}
+
+// To maps the rune to the specified case: UpperCase, LowerCase, or TitleCase.
+func To(_case int, r rune) rune {
+ r, _ = to(_case, r, CaseRanges)
+ return r
+}
+
+// ToUpper maps the rune to upper case.
+func ToUpper(r rune) rune {
+ if r <= MaxASCII {
+ if 'a' <= r && r <= 'z' {
+ r -= 'a' - 'A'
+ }
+ return r
+ }
+ return To(UpperCase, r)
+}
+
+// ToLower maps the rune to lower case.
+func ToLower(r rune) rune {
+ if r <= MaxASCII {
+ if 'A' <= r && r <= 'Z' {
+ r += 'a' - 'A'
+ }
+ return r
+ }
+ return To(LowerCase, r)
+}
+
+// ToTitle maps the rune to title case.
+func ToTitle(r rune) rune {
+ if r <= MaxASCII {
+ if 'a' <= r && r <= 'z' { // title case is upper case for ASCII
+ r -= 'a' - 'A'
+ }
+ return r
+ }
+ return To(TitleCase, r)
+}
+
+// ToUpper maps the rune to upper case giving priority to the special mapping.
+func (special SpecialCase) ToUpper(r rune) rune {
+ r1, hadMapping := to(UpperCase, r, []CaseRange(special))
+ if r1 == r && !hadMapping {
+ r1 = ToUpper(r)
+ }
+ return r1
+}
+
+// ToTitle maps the rune to title case giving priority to the special mapping.
+func (special SpecialCase) ToTitle(r rune) rune {
+ r1, hadMapping := to(TitleCase, r, []CaseRange(special))
+ if r1 == r && !hadMapping {
+ r1 = ToTitle(r)
+ }
+ return r1
+}
+
+// ToLower maps the rune to lower case giving priority to the special mapping.
+func (special SpecialCase) ToLower(r rune) rune {
+ r1, hadMapping := to(LowerCase, r, []CaseRange(special))
+ if r1 == r && !hadMapping {
+ r1 = ToLower(r)
+ }
+ return r1
+}
+
+// caseOrbit is defined in tables.go as []foldPair. Right now all the
+// entries fit in uint16, so use uint16. If that changes, compilation
+// will fail (the constants in the composite literal will not fit in uint16)
+// and the types here can change to uint32.
+type foldPair struct {
+ From uint16
+ To uint16
+}
+
+// SimpleFold iterates over Unicode code points equivalent under
+// the Unicode-defined simple case folding. Among the code points
+// equivalent to rune (including rune itself), SimpleFold returns the
+// smallest rune > r if one exists, or else the smallest rune >= 0.
+// If r is not a valid Unicode code point, SimpleFold(r) returns r.
+//
+// For example:
+//
+// SimpleFold('A') = 'a'
+// SimpleFold('a') = 'A'
+//
+// SimpleFold('K') = 'k'
+// SimpleFold('k') = '\u212A' (Kelvin symbol, K)
+// SimpleFold('\u212A') = 'K'
+//
+// SimpleFold('1') = '1'
+//
+// SimpleFold(-2) = -2
+func SimpleFold(r rune) rune {
+ if r < 0 || r > MaxRune {
+ return r
+ }
+
+ if int(r) < len(asciiFold) {
+ return rune(asciiFold[r])
+ }
+
+ // Consult caseOrbit table for special cases.
+ lo := 0
+ hi := len(caseOrbit)
+ for lo < hi {
+ m := lo + (hi-lo)/2
+ if rune(caseOrbit[m].From) < r {
+ lo = m + 1
+ } else {
+ hi = m
+ }
+ }
+ if lo < len(caseOrbit) && rune(caseOrbit[lo].From) == r {
+ return rune(caseOrbit[lo].To)
+ }
+
+ // No folding specified. This is a one- or two-element
+ // equivalence class containing rune and ToLower(rune)
+ // and ToUpper(rune) if they are different from rune.
+ if l := ToLower(r); l != r {
+ return l
+ }
+ return ToUpper(r)
+}
diff --git a/contrib/go/_std_1.18/src/unicode/tables.go b/contrib/go/_std_1.19/src/unicode/tables.go
index a9b23bfacd..a9b23bfacd 100644
--- a/contrib/go/_std_1.18/src/unicode/tables.go
+++ b/contrib/go/_std_1.19/src/unicode/tables.go
diff --git a/contrib/go/_std_1.18/src/unicode/utf16/utf16.go b/contrib/go/_std_1.19/src/unicode/utf16/utf16.go
index 1a881aa769..1a881aa769 100644
--- a/contrib/go/_std_1.18/src/unicode/utf16/utf16.go
+++ b/contrib/go/_std_1.19/src/unicode/utf16/utf16.go
diff --git a/contrib/go/_std_1.19/src/unicode/utf8/utf8.go b/contrib/go/_std_1.19/src/unicode/utf8/utf8.go
new file mode 100644
index 0000000000..1e9f666e23
--- /dev/null
+++ b/contrib/go/_std_1.19/src/unicode/utf8/utf8.go
@@ -0,0 +1,583 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utf8 implements functions and constants to support text encoded in
+// UTF-8. It includes functions to translate between runes and UTF-8 byte sequences.
+// See https://en.wikipedia.org/wiki/UTF-8
+package utf8
+
+// The conditions RuneError==unicode.ReplacementChar and
+// MaxRune==unicode.MaxRune are verified in the tests.
+// Defining them locally avoids this package depending on package unicode.
+
+// Numbers fundamental to the encoding.
+const (
+ RuneError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
+ RuneSelf = 0x80 // characters below RuneSelf are represented as themselves in a single byte.
+ MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ UTFMax = 4 // maximum number of bytes of a UTF-8 encoded Unicode character.
+)
+
+// Code points in the surrogate range are not valid for UTF-8.
+const (
+ surrogateMin = 0xD800
+ surrogateMax = 0xDFFF
+)
+
+const (
+ t1 = 0b00000000
+ tx = 0b10000000
+ t2 = 0b11000000
+ t3 = 0b11100000
+ t4 = 0b11110000
+ t5 = 0b11111000
+
+ maskx = 0b00111111
+ mask2 = 0b00011111
+ mask3 = 0b00001111
+ mask4 = 0b00000111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+
+ // The default lowest and highest continuation byte.
+ locb = 0b10000000
+ hicb = 0b10111111
+
+ // These names of these constants are chosen to give nice alignment in the
+ // table below. The first nibble is an index into acceptRanges or F for
+ // special one-byte cases. The second nibble is the Rune length or the
+ // Status for the special one-byte case.
+ xx = 0xF1 // invalid: size 1
+ as = 0xF0 // ASCII: size 1
+ s1 = 0x02 // accept 0, size 2
+ s2 = 0x13 // accept 1, size 3
+ s3 = 0x03 // accept 0, size 3
+ s4 = 0x23 // accept 2, size 3
+ s5 = 0x34 // accept 3, size 4
+ s6 = 0x04 // accept 0, size 4
+ s7 = 0x44 // accept 4, size 4
+)
+
+// first is information about the first byte in a UTF-8 sequence.
+var first = [256]uint8{
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
+ as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
+ // 1 2 3 4 5 6 7 8 9 A B C D E F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
+ xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
+ xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
+ s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
+ s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
+ s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
+}
+
+// acceptRange gives the range of valid values for the second byte in a UTF-8
+// sequence.
+type acceptRange struct {
+ lo uint8 // lowest value for second byte.
+ hi uint8 // highest value for second byte.
+}
+
+// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
+var acceptRanges = [16]acceptRange{
+ 0: {locb, hicb},
+ 1: {0xA0, hicb},
+ 2: {locb, 0x9F},
+ 3: {0x90, hicb},
+ 4: {locb, 0x8F},
+}
+
+// FullRune reports whether the bytes in p begin with a full UTF-8 encoding of a rune.
+// An invalid encoding is considered a full Rune since it will convert as a width-1 error rune.
+func FullRune(p []byte) bool {
+ n := len(p)
+ if n == 0 {
+ return false
+ }
+ x := first[p[0]]
+ if n >= int(x&7) {
+ return true // ASCII, invalid or valid.
+ }
+ // Must be short or invalid.
+ accept := acceptRanges[x>>4]
+ if n > 1 && (p[1] < accept.lo || accept.hi < p[1]) {
+ return true
+ } else if n > 2 && (p[2] < locb || hicb < p[2]) {
+ return true
+ }
+ return false
+}
+
+// FullRuneInString is like FullRune but its input is a string.
+func FullRuneInString(s string) bool {
+ n := len(s)
+ if n == 0 {
+ return false
+ }
+ x := first[s[0]]
+ if n >= int(x&7) {
+ return true // ASCII, invalid, or valid.
+ }
+ // Must be short or invalid.
+ accept := acceptRanges[x>>4]
+ if n > 1 && (s[1] < accept.lo || accept.hi < s[1]) {
+ return true
+ } else if n > 2 && (s[2] < locb || hicb < s[2]) {
+ return true
+ }
+ return false
+}
+
+// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and
+// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
+// the encoding is invalid, it returns (RuneError, 1). Both are impossible
+// results for correct, non-empty UTF-8.
+//
+// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
+// out of range, or is not the shortest possible UTF-8 encoding for the
+// value. No other validation is performed.
+func DecodeRune(p []byte) (r rune, size int) {
+ n := len(p)
+ if n < 1 {
+ return RuneError, 0
+ }
+ p0 := p[0]
+ x := first[p0]
+ if x >= as {
+ // The following code simulates an additional check for x == xx and
+ // handling the ASCII and invalid cases accordingly. This mask-and-or
+ // approach prevents an additional branch.
+ mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
+ return rune(p[0])&^mask | RuneError&mask, 1
+ }
+ sz := int(x & 7)
+ accept := acceptRanges[x>>4]
+ if n < sz {
+ return RuneError, 1
+ }
+ b1 := p[1]
+ if b1 < accept.lo || accept.hi < b1 {
+ return RuneError, 1
+ }
+ if sz <= 2 { // <= instead of == to help the compiler eliminate some bounds checks
+ return rune(p0&mask2)<<6 | rune(b1&maskx), 2
+ }
+ b2 := p[2]
+ if b2 < locb || hicb < b2 {
+ return RuneError, 1
+ }
+ if sz <= 3 {
+ return rune(p0&mask3)<<12 | rune(b1&maskx)<<6 | rune(b2&maskx), 3
+ }
+ b3 := p[3]
+ if b3 < locb || hicb < b3 {
+ return RuneError, 1
+ }
+ return rune(p0&mask4)<<18 | rune(b1&maskx)<<12 | rune(b2&maskx)<<6 | rune(b3&maskx), 4
+}
+
+// DecodeRuneInString is like DecodeRune but its input is a string. If s is
+// empty it returns (RuneError, 0). Otherwise, if the encoding is invalid, it
+// returns (RuneError, 1). Both are impossible results for correct, non-empty
+// UTF-8.
+//
+// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
+// out of range, or is not the shortest possible UTF-8 encoding for the
+// value. No other validation is performed.
+func DecodeRuneInString(s string) (r rune, size int) {
+ n := len(s)
+ if n < 1 {
+ return RuneError, 0
+ }
+ s0 := s[0]
+ x := first[s0]
+ if x >= as {
+ // The following code simulates an additional check for x == xx and
+ // handling the ASCII and invalid cases accordingly. This mask-and-or
+ // approach prevents an additional branch.
+ mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
+ return rune(s[0])&^mask | RuneError&mask, 1
+ }
+ sz := int(x & 7)
+ accept := acceptRanges[x>>4]
+ if n < sz {
+ return RuneError, 1
+ }
+ s1 := s[1]
+ if s1 < accept.lo || accept.hi < s1 {
+ return RuneError, 1
+ }
+ if sz <= 2 { // <= instead of == to help the compiler eliminate some bounds checks
+ return rune(s0&mask2)<<6 | rune(s1&maskx), 2
+ }
+ s2 := s[2]
+ if s2 < locb || hicb < s2 {
+ return RuneError, 1
+ }
+ if sz <= 3 {
+ return rune(s0&mask3)<<12 | rune(s1&maskx)<<6 | rune(s2&maskx), 3
+ }
+ s3 := s[3]
+ if s3 < locb || hicb < s3 {
+ return RuneError, 1
+ }
+ return rune(s0&mask4)<<18 | rune(s1&maskx)<<12 | rune(s2&maskx)<<6 | rune(s3&maskx), 4
+}
+
+// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and
+// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
+// the encoding is invalid, it returns (RuneError, 1). Both are impossible
+// results for correct, non-empty UTF-8.
+//
+// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
+// out of range, or is not the shortest possible UTF-8 encoding for the
+// value. No other validation is performed.
+func DecodeLastRune(p []byte) (r rune, size int) {
+ end := len(p)
+ if end == 0 {
+ return RuneError, 0
+ }
+ start := end - 1
+ r = rune(p[start])
+ if r < RuneSelf {
+ return r, 1
+ }
+ // guard against O(n^2) behavior when traversing
+ // backwards through strings with long sequences of
+ // invalid UTF-8.
+ lim := end - UTFMax
+ if lim < 0 {
+ lim = 0
+ }
+ for start--; start >= lim; start-- {
+ if RuneStart(p[start]) {
+ break
+ }
+ }
+ if start < 0 {
+ start = 0
+ }
+ r, size = DecodeRune(p[start:end])
+ if start+size != end {
+ return RuneError, 1
+ }
+ return r, size
+}
+
+// DecodeLastRuneInString is like DecodeLastRune but its input is a string. If
+// s is empty it returns (RuneError, 0). Otherwise, if the encoding is invalid,
+// it returns (RuneError, 1). Both are impossible results for correct,
+// non-empty UTF-8.
+//
+// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
+// out of range, or is not the shortest possible UTF-8 encoding for the
+// value. No other validation is performed.
+func DecodeLastRuneInString(s string) (r rune, size int) {
+ end := len(s)
+ if end == 0 {
+ return RuneError, 0
+ }
+ start := end - 1
+ r = rune(s[start])
+ if r < RuneSelf {
+ return r, 1
+ }
+ // guard against O(n^2) behavior when traversing
+ // backwards through strings with long sequences of
+ // invalid UTF-8.
+ lim := end - UTFMax
+ if lim < 0 {
+ lim = 0
+ }
+ for start--; start >= lim; start-- {
+ if RuneStart(s[start]) {
+ break
+ }
+ }
+ if start < 0 {
+ start = 0
+ }
+ r, size = DecodeRuneInString(s[start:end])
+ if start+size != end {
+ return RuneError, 1
+ }
+ return r, size
+}
+
+// RuneLen returns the number of bytes required to encode the rune.
+// It returns -1 if the rune is not a valid value to encode in UTF-8.
+func RuneLen(r rune) int {
+ switch {
+ case r < 0:
+ return -1
+ case r <= rune1Max:
+ return 1
+ case r <= rune2Max:
+ return 2
+ case surrogateMin <= r && r <= surrogateMax:
+ return -1
+ case r <= rune3Max:
+ return 3
+ case r <= MaxRune:
+ return 4
+ }
+ return -1
+}
+
+// EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
+// If the rune is out of range, it writes the encoding of RuneError.
+// It returns the number of bytes written.
+func EncodeRune(p []byte, r rune) int {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune1Max:
+ p[0] = byte(r)
+ return 1
+ case i <= rune2Max:
+ _ = p[1] // eliminate bounds checks
+ p[0] = t2 | byte(r>>6)
+ p[1] = tx | byte(r)&maskx
+ return 2
+ case i > MaxRune, surrogateMin <= i && i <= surrogateMax:
+ r = RuneError
+ fallthrough
+ case i <= rune3Max:
+ _ = p[2] // eliminate bounds checks
+ p[0] = t3 | byte(r>>12)
+ p[1] = tx | byte(r>>6)&maskx
+ p[2] = tx | byte(r)&maskx
+ return 3
+ default:
+ _ = p[3] // eliminate bounds checks
+ p[0] = t4 | byte(r>>18)
+ p[1] = tx | byte(r>>12)&maskx
+ p[2] = tx | byte(r>>6)&maskx
+ p[3] = tx | byte(r)&maskx
+ return 4
+ }
+}
+
+// AppendRune appends the UTF-8 encoding of r to the end of p and
+// returns the extended buffer. If the rune is out of range,
+// it appends the encoding of RuneError.
+func AppendRune(p []byte, r rune) []byte {
+ // This function is inlineable for fast handling of ASCII.
+ if uint32(r) <= rune1Max {
+ return append(p, byte(r))
+ }
+ return appendRuneNonASCII(p, r)
+}
+
+func appendRuneNonASCII(p []byte, r rune) []byte {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune2Max:
+ return append(p, t2|byte(r>>6), tx|byte(r)&maskx)
+ case i > MaxRune, surrogateMin <= i && i <= surrogateMax:
+ r = RuneError
+ fallthrough
+ case i <= rune3Max:
+ return append(p, t3|byte(r>>12), tx|byte(r>>6)&maskx, tx|byte(r)&maskx)
+ default:
+ return append(p, t4|byte(r>>18), tx|byte(r>>12)&maskx, tx|byte(r>>6)&maskx, tx|byte(r)&maskx)
+ }
+}
+
+// RuneCount returns the number of runes in p. Erroneous and short
+// encodings are treated as single runes of width 1 byte.
+func RuneCount(p []byte) int {
+ np := len(p)
+ var n int
+ for i := 0; i < np; {
+ n++
+ c := p[i]
+ if c < RuneSelf {
+ // ASCII fast path
+ i++
+ continue
+ }
+ x := first[c]
+ if x == xx {
+ i++ // invalid.
+ continue
+ }
+ size := int(x & 7)
+ if i+size > np {
+ i++ // Short or invalid.
+ continue
+ }
+ accept := acceptRanges[x>>4]
+ if c := p[i+1]; c < accept.lo || accept.hi < c {
+ size = 1
+ } else if size == 2 {
+ } else if c := p[i+2]; c < locb || hicb < c {
+ size = 1
+ } else if size == 3 {
+ } else if c := p[i+3]; c < locb || hicb < c {
+ size = 1
+ }
+ i += size
+ }
+ return n
+}
+
+// RuneCountInString is like RuneCount but its input is a string.
+func RuneCountInString(s string) (n int) {
+ ns := len(s)
+ for i := 0; i < ns; n++ {
+ c := s[i]
+ if c < RuneSelf {
+ // ASCII fast path
+ i++
+ continue
+ }
+ x := first[c]
+ if x == xx {
+ i++ // invalid.
+ continue
+ }
+ size := int(x & 7)
+ if i+size > ns {
+ i++ // Short or invalid.
+ continue
+ }
+ accept := acceptRanges[x>>4]
+ if c := s[i+1]; c < accept.lo || accept.hi < c {
+ size = 1
+ } else if size == 2 {
+ } else if c := s[i+2]; c < locb || hicb < c {
+ size = 1
+ } else if size == 3 {
+ } else if c := s[i+3]; c < locb || hicb < c {
+ size = 1
+ }
+ i += size
+ }
+ return n
+}
+
+// RuneStart reports whether the byte could be the first byte of an encoded,
+// possibly invalid rune. Second and subsequent bytes always have the top two
+// bits set to 10.
+func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
+
+// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
+func Valid(p []byte) bool {
+ // This optimization avoids the need to recompute the capacity
+ // when generating code for p[8:], bringing it to parity with
+ // ValidString, which was 20% faster on long ASCII strings.
+ p = p[:len(p):len(p)]
+
+ // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
+ for len(p) >= 8 {
+ // Combining two 32 bit loads allows the same code to be used
+ // for 32 and 64 bit platforms.
+ // The compiler can generate a 32bit load for first32 and second32
+ // on many platforms. See test/codegen/memcombine.go.
+ first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+ second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
+ if (first32|second32)&0x80808080 != 0 {
+ // Found a non ASCII byte (>= RuneSelf).
+ break
+ }
+ p = p[8:]
+ }
+ n := len(p)
+ for i := 0; i < n; {
+ pi := p[i]
+ if pi < RuneSelf {
+ i++
+ continue
+ }
+ x := first[pi]
+ if x == xx {
+ return false // Illegal starter byte.
+ }
+ size := int(x & 7)
+ if i+size > n {
+ return false // Short or invalid.
+ }
+ accept := acceptRanges[x>>4]
+ if c := p[i+1]; c < accept.lo || accept.hi < c {
+ return false
+ } else if size == 2 {
+ } else if c := p[i+2]; c < locb || hicb < c {
+ return false
+ } else if size == 3 {
+ } else if c := p[i+3]; c < locb || hicb < c {
+ return false
+ }
+ i += size
+ }
+ return true
+}
+
+// ValidString reports whether s consists entirely of valid UTF-8-encoded runes.
+func ValidString(s string) bool {
+ // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
+ for len(s) >= 8 {
+ // Combining two 32 bit loads allows the same code to be used
+ // for 32 and 64 bit platforms.
+ // The compiler can generate a 32bit load for first32 and second32
+ // on many platforms. See test/codegen/memcombine.go.
+ first32 := uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24
+ second32 := uint32(s[4]) | uint32(s[5])<<8 | uint32(s[6])<<16 | uint32(s[7])<<24
+ if (first32|second32)&0x80808080 != 0 {
+ // Found a non ASCII byte (>= RuneSelf).
+ break
+ }
+ s = s[8:]
+ }
+ n := len(s)
+ for i := 0; i < n; {
+ si := s[i]
+ if si < RuneSelf {
+ i++
+ continue
+ }
+ x := first[si]
+ if x == xx {
+ return false // Illegal starter byte.
+ }
+ size := int(x & 7)
+ if i+size > n {
+ return false // Short or invalid.
+ }
+ accept := acceptRanges[x>>4]
+ if c := s[i+1]; c < accept.lo || accept.hi < c {
+ return false
+ } else if size == 2 {
+ } else if c := s[i+2]; c < locb || hicb < c {
+ return false
+ } else if size == 3 {
+ } else if c := s[i+3]; c < locb || hicb < c {
+ return false
+ }
+ i += size
+ }
+ return true
+}
+
+// ValidRune reports whether r can be legally encoded as UTF-8.
+// Code points that are out of range or a surrogate half are illegal.
+func ValidRune(r rune) bool {
+ switch {
+ case 0 <= r && r < surrogateMin:
+ return true
+ case surrogateMax < r && r <= MaxRune:
+ return true
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
index a2ecf5c325..a2ecf5c325 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
index 025b49897e..025b49897e 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/xor.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/xor.go
index c2d04851e0..c2d04851e0 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20/xor.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20/xor.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
index 93da7322bc..93da7322bc 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
index 25959b9a6e..25959b9a6e 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
index 867c181a14..867c181a14 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
index 96b2fd898b..96b2fd898b 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
index 1cebfe946f..1cebfe946f 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 3a1674a1e5..3a1674a1e5 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
index cda8e3edfd..cda8e3edfd 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/builder.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
new file mode 100644
index 0000000000..c7ded75771
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -0,0 +1,337 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cryptobyte
+
+import (
+ "errors"
+ "fmt"
+)
+
+// A Builder builds byte strings from fixed-length and length-prefixed values.
+// Builders either allocate space as needed, or are ‘fixed’, which means that
+// they write into a given buffer and produce an error if it's exhausted.
+//
+// The zero value is a usable Builder that allocates space as needed.
+//
+// Simple values are marshaled and appended to a Builder using methods on the
+// Builder. Length-prefixed values are marshaled by providing a
+// BuilderContinuation, which is a function that writes the inner contents of
+// the value to a given Builder. See the documentation for BuilderContinuation
+// for details.
+type Builder struct {
+ err error
+ result []byte
+ fixedSize bool
+ child *Builder
+ offset int
+ pendingLenLen int
+ pendingIsASN1 bool
+ inContinuation *bool
+}
+
+// NewBuilder creates a Builder that appends its output to the given buffer.
+// Like append(), the slice will be reallocated if its capacity is exceeded.
+// Use Bytes to get the final buffer.
+func NewBuilder(buffer []byte) *Builder {
+ return &Builder{
+ result: buffer,
+ }
+}
+
+// NewFixedBuilder creates a Builder that appends its output into the given
+// buffer. This builder does not reallocate the output buffer. Writes that
+// would exceed the buffer's capacity are treated as an error.
+func NewFixedBuilder(buffer []byte) *Builder {
+ return &Builder{
+ result: buffer,
+ fixedSize: true,
+ }
+}
+
+// SetError sets the value to be returned as the error from Bytes. Writes
+// performed after calling SetError are ignored.
+func (b *Builder) SetError(err error) {
+ b.err = err
+}
+
+// Bytes returns the bytes written by the builder or an error if one has
+// occurred during building.
+func (b *Builder) Bytes() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+ return b.result[b.offset:], nil
+}
+
+// BytesOrPanic returns the bytes written by the builder or panics if an error
+// has occurred during building.
+func (b *Builder) BytesOrPanic() []byte {
+ if b.err != nil {
+ panic(b.err)
+ }
+ return b.result[b.offset:]
+}
+
+// AddUint8 appends an 8-bit value to the byte string.
+func (b *Builder) AddUint8(v uint8) {
+ b.add(byte(v))
+}
+
+// AddUint16 appends a big-endian, 16-bit value to the byte string.
+func (b *Builder) AddUint16(v uint16) {
+ b.add(byte(v>>8), byte(v))
+}
+
+// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
+// byte of the 32-bit input value is silently truncated.
+func (b *Builder) AddUint24(v uint32) {
+ b.add(byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint32 appends a big-endian, 32-bit value to the byte string.
+func (b *Builder) AddUint32(v uint32) {
+ b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddBytes appends a sequence of bytes to the byte string.
+func (b *Builder) AddBytes(v []byte) {
+ b.add(v...)
+}
+
+// BuilderContinuation is a continuation-passing interface for building
+// length-prefixed byte sequences. Builder methods for length-prefixed
+// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
+// supplied to them. The child builder passed to the continuation can be used
+// to build the content of the length-prefixed sequence. For example:
+//
+// parent := cryptobyte.NewBuilder()
+// parent.AddUint8LengthPrefixed(func (child *Builder) {
+// child.AddUint8(42)
+// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
+// grandchild.AddUint8(5)
+// })
+// })
+//
+// It is an error to write more bytes to the child than allowed by the reserved
+// length prefix. After the continuation returns, the child must be considered
+// invalid, i.e. users must not store any copies or references of the child
+// that outlive the continuation.
+//
+// If the continuation panics with a value of type BuildError then the inner
+// error will be returned as the error from Bytes. If the child panics
+// otherwise then Bytes will repanic with the same value.
+type BuilderContinuation func(child *Builder)
+
+// BuildError wraps an error. If a BuilderContinuation panics with this value,
+// the panic will be recovered and the inner error will be returned from
+// Builder.Bytes.
+type BuildError struct {
+ Err error
+}
+
+// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
+func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(1, false, f)
+}
+
+// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
+func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(2, false, f)
+}
+
+// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
+func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(3, false, f)
+}
+
+// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
+func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(4, false, f)
+}
+
+func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
+ if !*b.inContinuation {
+ *b.inContinuation = true
+
+ defer func() {
+ *b.inContinuation = false
+
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ if buildError, ok := r.(BuildError); ok {
+ b.err = buildError.Err
+ } else {
+ panic(r)
+ }
+ }()
+ }
+
+ f(arg)
+}
+
+func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
+ // Subsequent writes can be ignored if the builder has encountered an error.
+ if b.err != nil {
+ return
+ }
+
+ offset := len(b.result)
+ b.add(make([]byte, lenLen)...)
+
+ if b.inContinuation == nil {
+ b.inContinuation = new(bool)
+ }
+
+ b.child = &Builder{
+ result: b.result,
+ fixedSize: b.fixedSize,
+ offset: offset,
+ pendingLenLen: lenLen,
+ pendingIsASN1: isASN1,
+ inContinuation: b.inContinuation,
+ }
+
+ b.callContinuation(f, b.child)
+ b.flushChild()
+ if b.child != nil {
+ panic("cryptobyte: internal error")
+ }
+}
+
+func (b *Builder) flushChild() {
+ if b.child == nil {
+ return
+ }
+ b.child.flushChild()
+ child := b.child
+ b.child = nil
+
+ if child.err != nil {
+ b.err = child.err
+ return
+ }
+
+ length := len(child.result) - child.pendingLenLen - child.offset
+
+ if length < 0 {
+ panic("cryptobyte: internal error") // result unexpectedly shrunk
+ }
+
+ if child.pendingIsASN1 {
+ // For ASN.1, we reserved a single byte for the length. If that turned out
+ // to be incorrect, we have to move the contents along in order to make
+ // space.
+ if child.pendingLenLen != 1 {
+ panic("cryptobyte: internal error")
+ }
+ var lenLen, lenByte uint8
+ if int64(length) > 0xfffffffe {
+ b.err = errors.New("pending ASN.1 child too long")
+ return
+ } else if length > 0xffffff {
+ lenLen = 5
+ lenByte = 0x80 | 4
+ } else if length > 0xffff {
+ lenLen = 4
+ lenByte = 0x80 | 3
+ } else if length > 0xff {
+ lenLen = 3
+ lenByte = 0x80 | 2
+ } else if length > 0x7f {
+ lenLen = 2
+ lenByte = 0x80 | 1
+ } else {
+ lenLen = 1
+ lenByte = uint8(length)
+ length = 0
+ }
+
+ // Insert the initial length byte, make space for successive length bytes,
+ // and adjust the offset.
+ child.result[child.offset] = lenByte
+ extraBytes := int(lenLen - 1)
+ if extraBytes != 0 {
+ child.add(make([]byte, extraBytes)...)
+ childStart := child.offset + child.pendingLenLen
+ copy(child.result[childStart+extraBytes:], child.result[childStart:])
+ }
+ child.offset++
+ child.pendingLenLen = extraBytes
+ }
+
+ l := length
+ for i := child.pendingLenLen - 1; i >= 0; i-- {
+ child.result[child.offset+i] = uint8(l)
+ l >>= 8
+ }
+ if l != 0 {
+ b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
+ return
+ }
+
+ if b.fixedSize && &b.result[0] != &child.result[0] {
+ panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
+ }
+
+ b.result = child.result
+}
+
+func (b *Builder) add(bytes ...byte) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted write while child is pending")
+ }
+ if len(b.result)+len(bytes) < len(bytes) {
+ b.err = errors.New("cryptobyte: length overflow")
+ }
+ if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
+ b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
+ return
+ }
+ b.result = append(b.result, bytes...)
+}
+
+// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
+// child builder passed to a continuation to unwrite bytes from its parent will
+// panic.
+func (b *Builder) Unwrite(n int) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted unwrite while child is pending")
+ }
+ length := len(b.result) - b.pendingLenLen - b.offset
+ if length < 0 {
+ panic("cryptobyte: internal error")
+ }
+ if n > length {
+ panic("cryptobyte: attempted to unwrite more than was written")
+ }
+ b.result = b.result[:len(b.result)-n]
+}
+
+// A MarshalingValue marshals itself into a Builder.
+type MarshalingValue interface {
+ // Marshal is called by Builder.AddValue. It receives a pointer to a builder
+ // to marshal itself into. It may return an error that occurred during
+ // marshaling, such as unset or invalid values.
+ Marshal(b *Builder) error
+}
+
+// AddValue calls Marshal on v, passing a pointer to the builder to append to.
+// If Marshal returns an error, it is set on the Builder so that subsequent
+// appends don't have an effect.
+func (b *Builder) AddValue(v MarshalingValue) {
+ err := v.Marshal(b)
+ if err != nil {
+ b.err = err
+ }
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/string.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/string.go
index 589d297e6b..589d297e6b 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/cryptobyte/string.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/cryptobyte/string.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/curve25519.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/curve25519.go
index cda3fdd354..cda3fdd354 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/curve25519.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
index ca841ad99e..ca841ad99e 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
new file mode 100644
index 0000000000..edcf163c4e
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
@@ -0,0 +1,16 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
index 293f013c94..293f013c94 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
index 234a5b2e5d..234a5b2e5d 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
index 7b5b78cbd6..7b5b78cbd6 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/hkdf/hkdf.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
index dda3f143be..dda3f143be 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
index ed52b3418a..ed52b3418a 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
index 4aaea810a2..4aaea810a2 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
index 6d522333f2..6d522333f2 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
index 1d74f0f881..1d74f0f881 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
new file mode 100644
index 0000000000..e041da5ea3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -0,0 +1,309 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides the generic implementation of Sum and MAC. Other files
+// might provide optimized assembly implementations of some of this code.
+
+package poly1305
+
+import "encoding/binary"
+
+// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
+// for a 64 bytes message is approximately
+//
+// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
+//
+// for some secret r and s. It can be computed sequentially like
+//
+// for len(msg) > 0:
+// h += read(msg, 16)
+// h *= r
+// h %= 2¹³⁰ - 5
+// return h + s
+//
+// All the complexity is about doing performant constant-time math on numbers
+// larger than any available numeric type.
+
+func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
+ h := newMACGeneric(key)
+ h.Write(msg)
+ h.Sum(out)
+}
+
+func newMACGeneric(key *[32]byte) macGeneric {
+ m := macGeneric{}
+ initialize(key, &m.macState)
+ return m
+}
+
+// macState holds numbers in saturated 64-bit little-endian limbs. That is,
+// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
+type macState struct {
+ // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
+ // can grow larger during and after rounds. It must, however, remain below
+ // 2 * (2¹³⁰ - 5).
+ h [3]uint64
+ // r and s are the private key components.
+ r [2]uint64
+ s [2]uint64
+}
+
+type macGeneric struct {
+ macState
+
+ buffer [TagSize]byte
+ offset int
+}
+
+// Write splits the incoming message into TagSize chunks, and passes them to
+// update. It buffers incomplete chunks.
+func (h *macGeneric) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ updateGeneric(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ updateGeneric(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+// Sum flushes the last incomplete chunk from the buffer, if any, and generates
+// the MAC output. It does not modify its state, in order to allow for multiple
+// calls to Sum, even if no Write is allowed after Sum.
+func (h *macGeneric) Sum(out *[TagSize]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ updateGeneric(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
+
+// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
+// clears some bits of the secret coefficient to make it possible to implement
+// multiplication more efficiently.
+const (
+ rMask0 = 0x0FFFFFFC0FFFFFFF
+ rMask1 = 0x0FFFFFFC0FFFFFFC
+)
+
+// initialize loads the 256-bit key into the two 128-bit secret values r and s.
+func initialize(key *[32]byte, m *macState) {
+ m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
+ m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
+ m.s[0] = binary.LittleEndian.Uint64(key[16:24])
+ m.s[1] = binary.LittleEndian.Uint64(key[24:32])
+}
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+func mul64(a, b uint64) uint128 {
+ hi, lo := bitsMul64(a, b)
+ return uint128{lo, hi}
+}
+
+func add128(a, b uint128) uint128 {
+ lo, c := bitsAdd64(a.lo, b.lo, 0)
+ hi, c := bitsAdd64(a.hi, b.hi, c)
+ if c != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+ return uint128{lo, hi}
+}
+
+func shiftRightBy2(a uint128) uint128 {
+ a.lo = a.lo>>2 | (a.hi&3)<<62
+ a.hi = a.hi >> 2
+ return a
+}
+
+// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
+// 128 bits of message, it computes
+//
+// h₊ = (h + m) * r mod 2¹³⁰ - 5
+//
+// If the msg length is not a multiple of TagSize, it assumes the last
+// incomplete chunk is the final one.
+func updateGeneric(state *macState, msg []byte) {
+ h0, h1, h2 := state.h[0], state.h[1], state.h[2]
+ r0, r1 := state.r[0], state.r[1]
+
+ for len(msg) > 0 {
+ var c uint64
+
+ // For the first step, h + m, we use a chain of bits.Add64 intrinsics.
+ // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
+ // reduced at the end of the multiplication below.
+ //
+ // The spec requires us to set a bit just above the message size, not to
+ // hide leading zeroes. For full chunks, that's 1 << 128, so we can just
+ // add 1 to the most significant (2¹²⁸) limb, h2.
+ if len(msg) >= TagSize {
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
+ h2 += c + 1
+
+ msg = msg[TagSize:]
+ } else {
+ var buf [TagSize]byte
+ copy(buf[:], msg)
+ buf[len(msg)] = 1
+
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
+ h2 += c
+
+ msg = nil
+ }
+
+ // Multiplication of big number limbs is similar to elementary school
+ // columnar multiplication. Instead of digits, there are 64-bit limbs.
+ //
+ // We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
+ //
+ // h2 h1 h0 x
+ // r1 r0 =
+ // ----------------
+ // h2r0 h1r0 h0r0 <-- individual 128-bit products
+ // + h2r1 h1r1 h0r1
+ // ------------------------
+ // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
+ // ------------------------
+ // m3.hi m2.hi m1.hi m0.hi <-- carry propagation
+ // + m3.lo m2.lo m1.lo m0.lo
+ // -------------------------------
+ // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
+ //
+ // The main difference from pen-and-paper multiplication is that we do
+ // carry propagation in a separate step, as if we wrote two digit sums
+ // at first (the 128-bit limbs), and then carried the tens all at once.
+
+ h0r0 := mul64(h0, r0)
+ h1r0 := mul64(h1, r0)
+ h2r0 := mul64(h2, r0)
+ h0r1 := mul64(h0, r1)
+ h1r1 := mul64(h1, r1)
+ h2r1 := mul64(h2, r1)
+
+ // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
+ // top 4 bits cleared by rMask{0,1}, we know that their product is not going
+ // to overflow 64 bits, so we can ignore the high part of the products.
+ //
+ // This also means that the product doesn't have a fifth limb (t4).
+ if h2r0.hi != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+ if h2r1.hi != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+
+ m0 := h0r0
+ m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
+ m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
+ m3 := h2r1
+
+ t0 := m0.lo
+ t1, c := bitsAdd64(m1.lo, m0.hi, 0)
+ t2, c := bitsAdd64(m2.lo, m1.hi, c)
+ t3, _ := bitsAdd64(m3.lo, m2.hi, c)
+
+ // Now we have the result as 4 64-bit limbs, and we need to reduce it
+ // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
+ // a cheap partial reduction according to the reduction identity
+ //
+ // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
+ //
+ // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
+ // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
+ // assumptions we make about h in the rest of the code.
+ //
+ // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
+
+ // We split the final result at the 2¹³⁰ mark into h and cc, the carry.
+ // Note that the carry bits are effectively shifted left by 2, in other
+ // words, cc = c * 4 for the c in the reduction identity.
+ h0, h1, h2 = t0, t1, t2&maskLow2Bits
+ cc := uint128{t2 & maskNotLow2Bits, t3}
+
+ // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
+
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
+ h2 += c
+
+ cc = shiftRightBy2(cc)
+
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
+ h2 += c
+
+ // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
+ //
+ // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
+ }
+
+ state.h[0], state.h[1], state.h[2] = h0, h1, h2
+}
+
+const (
+ maskLow2Bits uint64 = 0x0000000000000003
+ maskNotLow2Bits uint64 = ^maskLow2Bits
+)
+
+// select64 returns x if v == 1 and y if v == 0, in constant time.
+func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
+
+// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
+const (
+ p0 = 0xFFFFFFFFFFFFFFFB
+ p1 = 0xFFFFFFFFFFFFFFFF
+ p2 = 0x0000000000000003
+)
+
+// finalize completes the modular reduction of h and computes
+//
+// out = h + s mod 2¹²⁸
+func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
+ h0, h1, h2 := h[0], h[1], h[2]
+
+ // After the partial reduction in updateGeneric, h might be more than
+ // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
+ // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
+ // result if the subtraction underflows, and t otherwise.
+
+ hMinusP0, b := bitsSub64(h0, p0, 0)
+ hMinusP1, b := bitsSub64(h1, p1, b)
+ _, b = bitsSub64(h2, p2, b)
+
+ // h = h if h < p else h - p
+ h0 = select64(b, h0, hMinusP0)
+ h1 = select64(b, h1, hMinusP1)
+
+ // Finally, we compute the last Poly1305 step
+ //
+ // tag = h + s mod 2¹²⁸
+ //
+ // by just doing a wide addition with the 128 low bits of h and discarding
+ // the overflow.
+ h0, c := bitsAdd64(h0, s[0], 0)
+ h1, _ = bitsAdd64(h1, s[1], c)
+
+ binary.LittleEndian.PutUint64(out[0:8], h0)
+ binary.LittleEndian.PutUint64(out[8:16], h1)
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
index 4fad24f8dc..4fad24f8dc 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/net/dns/dnsmessage/message.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
new file mode 100644
index 0000000000..0cdf89f9ff
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/dns/dnsmessage/message.go
@@ -0,0 +1,2665 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dnsmessage provides a mostly RFC 1035 compliant implementation of
+// DNS message packing and unpacking.
+//
+// The package also supports messages with Extension Mechanisms for DNS
+// (EDNS(0)) as defined in RFC 6891.
+//
+// This implementation is designed to minimize heap allocations and avoid
+// unnecessary packing and unpacking as much as possible.
+package dnsmessage
+
+import (
+ "errors"
+)
+
+// Message formats
+
+// A Type is a type of DNS request and response.
+type Type uint16
+
+const (
+ // ResourceHeader.Type and Question.Type
+ TypeA Type = 1
+ TypeNS Type = 2
+ TypeCNAME Type = 5
+ TypeSOA Type = 6
+ TypePTR Type = 12
+ TypeMX Type = 15
+ TypeTXT Type = 16
+ TypeAAAA Type = 28
+ TypeSRV Type = 33
+ TypeOPT Type = 41
+
+ // Question.Type
+ TypeWKS Type = 11
+ TypeHINFO Type = 13
+ TypeMINFO Type = 14
+ TypeAXFR Type = 252
+ TypeALL Type = 255
+)
+
+var typeNames = map[Type]string{
+ TypeA: "TypeA",
+ TypeNS: "TypeNS",
+ TypeCNAME: "TypeCNAME",
+ TypeSOA: "TypeSOA",
+ TypePTR: "TypePTR",
+ TypeMX: "TypeMX",
+ TypeTXT: "TypeTXT",
+ TypeAAAA: "TypeAAAA",
+ TypeSRV: "TypeSRV",
+ TypeOPT: "TypeOPT",
+ TypeWKS: "TypeWKS",
+ TypeHINFO: "TypeHINFO",
+ TypeMINFO: "TypeMINFO",
+ TypeAXFR: "TypeAXFR",
+ TypeALL: "TypeALL",
+}
+
+// String implements fmt.Stringer.String.
+func (t Type) String() string {
+ if n, ok := typeNames[t]; ok {
+ return n
+ }
+ return printUint16(uint16(t))
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (t Type) GoString() string {
+ if n, ok := typeNames[t]; ok {
+ return "dnsmessage." + n
+ }
+ return printUint16(uint16(t))
+}
+
+// A Class is a type of network.
+type Class uint16
+
+const (
+ // ResourceHeader.Class and Question.Class
+ ClassINET Class = 1
+ ClassCSNET Class = 2
+ ClassCHAOS Class = 3
+ ClassHESIOD Class = 4
+
+ // Question.Class
+ ClassANY Class = 255
+)
+
+var classNames = map[Class]string{
+ ClassINET: "ClassINET",
+ ClassCSNET: "ClassCSNET",
+ ClassCHAOS: "ClassCHAOS",
+ ClassHESIOD: "ClassHESIOD",
+ ClassANY: "ClassANY",
+}
+
+// String implements fmt.Stringer.String.
+func (c Class) String() string {
+ if n, ok := classNames[c]; ok {
+ return n
+ }
+ return printUint16(uint16(c))
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (c Class) GoString() string {
+ if n, ok := classNames[c]; ok {
+ return "dnsmessage." + n
+ }
+ return printUint16(uint16(c))
+}
+
+// An OpCode is a DNS operation code.
+type OpCode uint16
+
+// GoString implements fmt.GoStringer.GoString.
+func (o OpCode) GoString() string {
+ return printUint16(uint16(o))
+}
+
+// An RCode is a DNS response status code.
+type RCode uint16
+
+// Header.RCode values.
+const (
+ RCodeSuccess RCode = 0 // NoError
+ RCodeFormatError RCode = 1 // FormErr
+ RCodeServerFailure RCode = 2 // ServFail
+ RCodeNameError RCode = 3 // NXDomain
+ RCodeNotImplemented RCode = 4 // NotImp
+ RCodeRefused RCode = 5 // Refused
+)
+
+var rCodeNames = map[RCode]string{
+ RCodeSuccess: "RCodeSuccess",
+ RCodeFormatError: "RCodeFormatError",
+ RCodeServerFailure: "RCodeServerFailure",
+ RCodeNameError: "RCodeNameError",
+ RCodeNotImplemented: "RCodeNotImplemented",
+ RCodeRefused: "RCodeRefused",
+}
+
+// String implements fmt.Stringer.String.
+func (r RCode) String() string {
+ if n, ok := rCodeNames[r]; ok {
+ return n
+ }
+ return printUint16(uint16(r))
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r RCode) GoString() string {
+ if n, ok := rCodeNames[r]; ok {
+ return "dnsmessage." + n
+ }
+ return printUint16(uint16(r))
+}
+
+func printPaddedUint8(i uint8) string {
+ b := byte(i)
+ return string([]byte{
+ b/100 + '0',
+ b/10%10 + '0',
+ b%10 + '0',
+ })
+}
+
+func printUint8Bytes(buf []byte, i uint8) []byte {
+ b := byte(i)
+ if i >= 100 {
+ buf = append(buf, b/100+'0')
+ }
+ if i >= 10 {
+ buf = append(buf, b/10%10+'0')
+ }
+ return append(buf, b%10+'0')
+}
+
+func printByteSlice(b []byte) string {
+ if len(b) == 0 {
+ return ""
+ }
+ buf := make([]byte, 0, 5*len(b))
+ buf = printUint8Bytes(buf, uint8(b[0]))
+ for _, n := range b[1:] {
+ buf = append(buf, ',', ' ')
+ buf = printUint8Bytes(buf, uint8(n))
+ }
+ return string(buf)
+}
+
+const hexDigits = "0123456789abcdef"
+
+func printString(str []byte) string {
+ buf := make([]byte, 0, len(str))
+ for i := 0; i < len(str); i++ {
+ c := str[i]
+ if c == '.' || c == '-' || c == ' ' ||
+ 'A' <= c && c <= 'Z' ||
+ 'a' <= c && c <= 'z' ||
+ '0' <= c && c <= '9' {
+ buf = append(buf, c)
+ continue
+ }
+
+ upper := c >> 4
+ lower := (c << 4) >> 4
+ buf = append(
+ buf,
+ '\\',
+ 'x',
+ hexDigits[upper],
+ hexDigits[lower],
+ )
+ }
+ return string(buf)
+}
+
+func printUint16(i uint16) string {
+ return printUint32(uint32(i))
+}
+
+func printUint32(i uint32) string {
+ // Max value is 4294967295.
+ buf := make([]byte, 10)
+ for b, d := buf, uint32(1000000000); d > 0; d /= 10 {
+ b[0] = byte(i/d%10 + '0')
+ if b[0] == '0' && len(b) == len(buf) && len(buf) > 1 {
+ buf = buf[1:]
+ }
+ b = b[1:]
+ i %= d
+ }
+ return string(buf)
+}
+
+func printBool(b bool) string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+var (
+ // ErrNotStarted indicates that the prerequisite information isn't
+ // available yet because the previous records haven't been appropriately
+ // parsed, skipped or finished.
+ ErrNotStarted = errors.New("parsing/packing of this type isn't available yet")
+
+ // ErrSectionDone indicated that all records in the section have been
+ // parsed or finished.
+ ErrSectionDone = errors.New("parsing/packing of this section has completed")
+
+ errBaseLen = errors.New("insufficient data for base length type")
+ errCalcLen = errors.New("insufficient data for calculated length type")
+ errReserved = errors.New("segment prefix is reserved")
+ errTooManyPtr = errors.New("too many pointers (>10)")
+ errInvalidPtr = errors.New("invalid pointer")
+ errNilResouceBody = errors.New("nil resource body")
+ errResourceLen = errors.New("insufficient data for resource body length")
+ errSegTooLong = errors.New("segment length too long")
+ errZeroSegLen = errors.New("zero length segment")
+ errResTooLong = errors.New("resource length too long")
+ errTooManyQuestions = errors.New("too many Questions to pack (>65535)")
+ errTooManyAnswers = errors.New("too many Answers to pack (>65535)")
+ errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)")
+ errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)")
+ errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)")
+ errStringTooLong = errors.New("character string exceeds maximum length (255)")
+ errCompressedSRV = errors.New("compressed name in SRV resource data")
+)
+
+// Internal constants.
+const (
+ // packStartingCap is the default initial buffer size allocated during
+ // packing.
+ //
+ // The starting capacity doesn't matter too much, but most DNS responses
+ // Will be <= 512 bytes as it is the limit for DNS over UDP.
+ packStartingCap = 512
+
+ // uint16Len is the length (in bytes) of a uint16.
+ uint16Len = 2
+
+ // uint32Len is the length (in bytes) of a uint32.
+ uint32Len = 4
+
+ // headerLen is the length (in bytes) of a DNS header.
+ //
+ // A header is comprised of 6 uint16s and no padding.
+ headerLen = 6 * uint16Len
+)
+
+type nestedError struct {
+ // s is the current level's error message.
+ s string
+
+ // err is the nested error.
+ err error
+}
+
+// nestedError implements error.Error.
+func (e *nestedError) Error() string {
+ return e.s + ": " + e.err.Error()
+}
+
+// Header is a representation of a DNS message header.
+type Header struct {
+ ID uint16
+ Response bool
+ OpCode OpCode
+ Authoritative bool
+ Truncated bool
+ RecursionDesired bool
+ RecursionAvailable bool
+ RCode RCode
+}
+
+func (m *Header) pack() (id uint16, bits uint16) {
+ id = m.ID
+ bits = uint16(m.OpCode)<<11 | uint16(m.RCode)
+ if m.RecursionAvailable {
+ bits |= headerBitRA
+ }
+ if m.RecursionDesired {
+ bits |= headerBitRD
+ }
+ if m.Truncated {
+ bits |= headerBitTC
+ }
+ if m.Authoritative {
+ bits |= headerBitAA
+ }
+ if m.Response {
+ bits |= headerBitQR
+ }
+ return
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (m *Header) GoString() string {
+ return "dnsmessage.Header{" +
+ "ID: " + printUint16(m.ID) + ", " +
+ "Response: " + printBool(m.Response) + ", " +
+ "OpCode: " + m.OpCode.GoString() + ", " +
+ "Authoritative: " + printBool(m.Authoritative) + ", " +
+ "Truncated: " + printBool(m.Truncated) + ", " +
+ "RecursionDesired: " + printBool(m.RecursionDesired) + ", " +
+ "RecursionAvailable: " + printBool(m.RecursionAvailable) + ", " +
+ "RCode: " + m.RCode.GoString() + "}"
+}
+
+// Message is a representation of a DNS message.
+type Message struct {
+ Header
+ Questions []Question
+ Answers []Resource
+ Authorities []Resource
+ Additionals []Resource
+}
+
+type section uint8
+
+const (
+ sectionNotStarted section = iota
+ sectionHeader
+ sectionQuestions
+ sectionAnswers
+ sectionAuthorities
+ sectionAdditionals
+ sectionDone
+
+ headerBitQR = 1 << 15 // query/response (response=1)
+ headerBitAA = 1 << 10 // authoritative
+ headerBitTC = 1 << 9 // truncated
+ headerBitRD = 1 << 8 // recursion desired
+ headerBitRA = 1 << 7 // recursion available
+)
+
+var sectionNames = map[section]string{
+ sectionHeader: "header",
+ sectionQuestions: "Question",
+ sectionAnswers: "Answer",
+ sectionAuthorities: "Authority",
+ sectionAdditionals: "Additional",
+}
+
+// header is the wire format for a DNS message header.
+type header struct {
+ id uint16
+ bits uint16
+ questions uint16
+ answers uint16
+ authorities uint16
+ additionals uint16
+}
+
+func (h *header) count(sec section) uint16 {
+ switch sec {
+ case sectionQuestions:
+ return h.questions
+ case sectionAnswers:
+ return h.answers
+ case sectionAuthorities:
+ return h.authorities
+ case sectionAdditionals:
+ return h.additionals
+ }
+ return 0
+}
+
+// pack appends the wire format of the header to msg.
+func (h *header) pack(msg []byte) []byte {
+ msg = packUint16(msg, h.id)
+ msg = packUint16(msg, h.bits)
+ msg = packUint16(msg, h.questions)
+ msg = packUint16(msg, h.answers)
+ msg = packUint16(msg, h.authorities)
+ return packUint16(msg, h.additionals)
+}
+
+func (h *header) unpack(msg []byte, off int) (int, error) {
+ newOff := off
+ var err error
+ if h.id, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"id", err}
+ }
+ if h.bits, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"bits", err}
+ }
+ if h.questions, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"questions", err}
+ }
+ if h.answers, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"answers", err}
+ }
+ if h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"authorities", err}
+ }
+ if h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"additionals", err}
+ }
+ return newOff, nil
+}
+
+func (h *header) header() Header {
+ return Header{
+ ID: h.id,
+ Response: (h.bits & headerBitQR) != 0,
+ OpCode: OpCode(h.bits>>11) & 0xF,
+ Authoritative: (h.bits & headerBitAA) != 0,
+ Truncated: (h.bits & headerBitTC) != 0,
+ RecursionDesired: (h.bits & headerBitRD) != 0,
+ RecursionAvailable: (h.bits & headerBitRA) != 0,
+ RCode: RCode(h.bits & 0xF),
+ }
+}
+
+// A Resource is a DNS resource record.
+type Resource struct {
+ Header ResourceHeader
+ Body ResourceBody
+}
+
+func (r *Resource) GoString() string {
+ return "dnsmessage.Resource{" +
+ "Header: " + r.Header.GoString() +
+ ", Body: &" + r.Body.GoString() +
+ "}"
+}
+
+// A ResourceBody is a DNS resource record minus the header.
+type ResourceBody interface {
+ // pack packs a Resource except for its header.
+ pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error)
+
+ // realType returns the actual type of the Resource. This is used to
+ // fill in the header Type field.
+ realType() Type
+
+ // GoString implements fmt.GoStringer.GoString.
+ GoString() string
+}
+
+// pack appends the wire format of the Resource to msg.
+func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ if r.Body == nil {
+ return msg, errNilResouceBody
+ }
+ oldMsg := msg
+ r.Header.Type = r.Body.realType()
+ msg, lenOff, err := r.Header.pack(msg, compression, compressionOff)
+ if err != nil {
+ return msg, &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ msg, err = r.Body.pack(msg, compression, compressionOff)
+ if err != nil {
+ return msg, &nestedError{"content", err}
+ }
+ if err := r.Header.fixLen(msg, lenOff, preLen); err != nil {
+ return oldMsg, err
+ }
+ return msg, nil
+}
+
+// A Parser allows incrementally parsing a DNS message.
+//
+// When parsing is started, the Header is parsed. Next, each Question can be
+// either parsed or skipped. Alternatively, all Questions can be skipped at
+// once. When all Questions have been parsed, attempting to parse Questions
+// will return (nil, nil) and attempting to skip Questions will return
+// (true, nil). After all Questions have been either parsed or skipped, all
+// Answers, Authorities and Additionals can be either parsed or skipped in the
+// same way, and each type of Resource must be fully parsed or skipped before
+// proceeding to the next type of Resource.
+//
+// Note that there is no requirement to fully skip or parse the message.
+type Parser struct {
+ msg []byte
+ header header
+
+ section section
+ off int
+ index int
+ resHeaderValid bool
+ resHeader ResourceHeader
+}
+
+// Start parses the header and enables the parsing of Questions.
+func (p *Parser) Start(msg []byte) (Header, error) {
+ if p.msg != nil {
+ *p = Parser{}
+ }
+ p.msg = msg
+ var err error
+ if p.off, err = p.header.unpack(msg, 0); err != nil {
+ return Header{}, &nestedError{"unpacking header", err}
+ }
+ p.section = sectionQuestions
+ return p.header.header(), nil
+}
+
+func (p *Parser) checkAdvance(sec section) error {
+ if p.section < sec {
+ return ErrNotStarted
+ }
+ if p.section > sec {
+ return ErrSectionDone
+ }
+ p.resHeaderValid = false
+ if p.index == int(p.header.count(sec)) {
+ p.index = 0
+ p.section++
+ return ErrSectionDone
+ }
+ return nil
+}
+
+func (p *Parser) resource(sec section) (Resource, error) {
+ var r Resource
+ var err error
+ r.Header, err = p.resourceHeader(sec)
+ if err != nil {
+ return r, err
+ }
+ p.resHeaderValid = false
+ r.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header)
+ if err != nil {
+ return Resource{}, &nestedError{"unpacking " + sectionNames[sec], err}
+ }
+ p.index++
+ return r, nil
+}
+
+func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) {
+ if p.resHeaderValid {
+ return p.resHeader, nil
+ }
+ if err := p.checkAdvance(sec); err != nil {
+ return ResourceHeader{}, err
+ }
+ var hdr ResourceHeader
+ off, err := hdr.unpack(p.msg, p.off)
+ if err != nil {
+ return ResourceHeader{}, err
+ }
+ p.resHeaderValid = true
+ p.resHeader = hdr
+ p.off = off
+ return hdr, nil
+}
+
+func (p *Parser) skipResource(sec section) error {
+ if p.resHeaderValid {
+ newOff := p.off + int(p.resHeader.Length)
+ if newOff > len(p.msg) {
+ return errResourceLen
+ }
+ p.off = newOff
+ p.resHeaderValid = false
+ p.index++
+ return nil
+ }
+ if err := p.checkAdvance(sec); err != nil {
+ return err
+ }
+ var err error
+ p.off, err = skipResource(p.msg, p.off)
+ if err != nil {
+ return &nestedError{"skipping: " + sectionNames[sec], err}
+ }
+ p.index++
+ return nil
+}
+
+// Question parses a single Question.
+func (p *Parser) Question() (Question, error) {
+ if err := p.checkAdvance(sectionQuestions); err != nil {
+ return Question{}, err
+ }
+ var name Name
+ off, err := name.unpack(p.msg, p.off)
+ if err != nil {
+ return Question{}, &nestedError{"unpacking Question.Name", err}
+ }
+ typ, off, err := unpackType(p.msg, off)
+ if err != nil {
+ return Question{}, &nestedError{"unpacking Question.Type", err}
+ }
+ class, off, err := unpackClass(p.msg, off)
+ if err != nil {
+ return Question{}, &nestedError{"unpacking Question.Class", err}
+ }
+ p.off = off
+ p.index++
+ return Question{name, typ, class}, nil
+}
+
+// AllQuestions parses all Questions.
+func (p *Parser) AllQuestions() ([]Question, error) {
+ // Multiple questions are valid according to the spec,
+ // but servers don't actually support them. There will
+ // be at most one question here.
+ //
+ // Do not pre-allocate based on info in p.header, since
+ // the data is untrusted.
+ qs := []Question{}
+ for {
+ q, err := p.Question()
+ if err == ErrSectionDone {
+ return qs, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ qs = append(qs, q)
+ }
+}
+
+// SkipQuestion skips a single Question.
+func (p *Parser) SkipQuestion() error {
+ if err := p.checkAdvance(sectionQuestions); err != nil {
+ return err
+ }
+ off, err := skipName(p.msg, p.off)
+ if err != nil {
+ return &nestedError{"skipping Question Name", err}
+ }
+ if off, err = skipType(p.msg, off); err != nil {
+ return &nestedError{"skipping Question Type", err}
+ }
+ if off, err = skipClass(p.msg, off); err != nil {
+ return &nestedError{"skipping Question Class", err}
+ }
+ p.off = off
+ p.index++
+ return nil
+}
+
+// SkipAllQuestions skips all Questions.
+func (p *Parser) SkipAllQuestions() error {
+ for {
+ if err := p.SkipQuestion(); err == ErrSectionDone {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+}
+
+// AnswerHeader parses a single Answer ResourceHeader.
+func (p *Parser) AnswerHeader() (ResourceHeader, error) {
+ return p.resourceHeader(sectionAnswers)
+}
+
+// Answer parses a single Answer Resource.
+func (p *Parser) Answer() (Resource, error) {
+ return p.resource(sectionAnswers)
+}
+
+// AllAnswers parses all Answer Resources.
+func (p *Parser) AllAnswers() ([]Resource, error) {
+ // The most common query is for A/AAAA, which usually returns
+ // a handful of IPs.
+ //
+ // Pre-allocate up to a certain limit, since p.header is
+ // untrusted data.
+ n := int(p.header.answers)
+ if n > 20 {
+ n = 20
+ }
+ as := make([]Resource, 0, n)
+ for {
+ a, err := p.Answer()
+ if err == ErrSectionDone {
+ return as, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ as = append(as, a)
+ }
+}
+
+// SkipAnswer skips a single Answer Resource.
+func (p *Parser) SkipAnswer() error {
+ return p.skipResource(sectionAnswers)
+}
+
+// SkipAllAnswers skips all Answer Resources.
+func (p *Parser) SkipAllAnswers() error {
+ for {
+ if err := p.SkipAnswer(); err == ErrSectionDone {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+}
+
+// AuthorityHeader parses a single Authority ResourceHeader.
+func (p *Parser) AuthorityHeader() (ResourceHeader, error) {
+ return p.resourceHeader(sectionAuthorities)
+}
+
+// Authority parses a single Authority Resource.
+func (p *Parser) Authority() (Resource, error) {
+ return p.resource(sectionAuthorities)
+}
+
+// AllAuthorities parses all Authority Resources.
+func (p *Parser) AllAuthorities() ([]Resource, error) {
+ // Authorities contains SOA in case of NXDOMAIN and friends,
+ // otherwise it is empty.
+ //
+ // Pre-allocate up to a certain limit, since p.header is
+ // untrusted data.
+ n := int(p.header.authorities)
+ if n > 10 {
+ n = 10
+ }
+ as := make([]Resource, 0, n)
+ for {
+ a, err := p.Authority()
+ if err == ErrSectionDone {
+ return as, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ as = append(as, a)
+ }
+}
+
+// SkipAuthority skips a single Authority Resource.
+func (p *Parser) SkipAuthority() error {
+ return p.skipResource(sectionAuthorities)
+}
+
+// SkipAllAuthorities skips all Authority Resources.
+func (p *Parser) SkipAllAuthorities() error {
+ for {
+ if err := p.SkipAuthority(); err == ErrSectionDone {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+}
+
+// AdditionalHeader parses a single Additional ResourceHeader.
+func (p *Parser) AdditionalHeader() (ResourceHeader, error) {
+ return p.resourceHeader(sectionAdditionals)
+}
+
+// Additional parses a single Additional Resource.
+func (p *Parser) Additional() (Resource, error) {
+ return p.resource(sectionAdditionals)
+}
+
+// AllAdditionals parses all Additional Resources.
+func (p *Parser) AllAdditionals() ([]Resource, error) {
+ // Additionals usually contain OPT, and sometimes A/AAAA
+ // glue records.
+ //
+ // Pre-allocate up to a certain limit, since p.header is
+ // untrusted data.
+ n := int(p.header.additionals)
+ if n > 10 {
+ n = 10
+ }
+ as := make([]Resource, 0, n)
+ for {
+ a, err := p.Additional()
+ if err == ErrSectionDone {
+ return as, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ as = append(as, a)
+ }
+}
+
+// SkipAdditional skips a single Additional Resource.
+func (p *Parser) SkipAdditional() error {
+ return p.skipResource(sectionAdditionals)
+}
+
+// SkipAllAdditionals skips all Additional Resources.
+func (p *Parser) SkipAllAdditionals() error {
+ for {
+ if err := p.SkipAdditional(); err == ErrSectionDone {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+}
+
+// CNAMEResource parses a single CNAMEResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) CNAMEResource() (CNAMEResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeCNAME {
+ return CNAMEResource{}, ErrNotStarted
+ }
+ r, err := unpackCNAMEResource(p.msg, p.off)
+ if err != nil {
+ return CNAMEResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// MXResource parses a single MXResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) MXResource() (MXResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeMX {
+ return MXResource{}, ErrNotStarted
+ }
+ r, err := unpackMXResource(p.msg, p.off)
+ if err != nil {
+ return MXResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// NSResource parses a single NSResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) NSResource() (NSResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeNS {
+ return NSResource{}, ErrNotStarted
+ }
+ r, err := unpackNSResource(p.msg, p.off)
+ if err != nil {
+ return NSResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// PTRResource parses a single PTRResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) PTRResource() (PTRResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypePTR {
+ return PTRResource{}, ErrNotStarted
+ }
+ r, err := unpackPTRResource(p.msg, p.off)
+ if err != nil {
+ return PTRResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// SOAResource parses a single SOAResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) SOAResource() (SOAResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeSOA {
+ return SOAResource{}, ErrNotStarted
+ }
+ r, err := unpackSOAResource(p.msg, p.off)
+ if err != nil {
+ return SOAResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// TXTResource parses a single TXTResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) TXTResource() (TXTResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeTXT {
+ return TXTResource{}, ErrNotStarted
+ }
+ r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length)
+ if err != nil {
+ return TXTResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// SRVResource parses a single SRVResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) SRVResource() (SRVResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeSRV {
+ return SRVResource{}, ErrNotStarted
+ }
+ r, err := unpackSRVResource(p.msg, p.off)
+ if err != nil {
+ return SRVResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// AResource parses a single AResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) AResource() (AResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeA {
+ return AResource{}, ErrNotStarted
+ }
+ r, err := unpackAResource(p.msg, p.off)
+ if err != nil {
+ return AResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// AAAAResource parses a single AAAAResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) AAAAResource() (AAAAResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeAAAA {
+ return AAAAResource{}, ErrNotStarted
+ }
+ r, err := unpackAAAAResource(p.msg, p.off)
+ if err != nil {
+ return AAAAResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// OPTResource parses a single OPTResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) OPTResource() (OPTResource, error) {
+ if !p.resHeaderValid || p.resHeader.Type != TypeOPT {
+ return OPTResource{}, ErrNotStarted
+ }
+ r, err := unpackOPTResource(p.msg, p.off, p.resHeader.Length)
+ if err != nil {
+ return OPTResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// UnknownResource parses a single UnknownResource.
+//
+// One of the XXXHeader methods must have been called before calling this
+// method.
+func (p *Parser) UnknownResource() (UnknownResource, error) {
+ if !p.resHeaderValid {
+ return UnknownResource{}, ErrNotStarted
+ }
+ r, err := unpackUnknownResource(p.resHeader.Type, p.msg, p.off, p.resHeader.Length)
+ if err != nil {
+ return UnknownResource{}, err
+ }
+ p.off += int(p.resHeader.Length)
+ p.resHeaderValid = false
+ p.index++
+ return r, nil
+}
+
+// Unpack parses a full Message.
+func (m *Message) Unpack(msg []byte) error {
+ var p Parser
+ var err error
+ if m.Header, err = p.Start(msg); err != nil {
+ return err
+ }
+ if m.Questions, err = p.AllQuestions(); err != nil {
+ return err
+ }
+ if m.Answers, err = p.AllAnswers(); err != nil {
+ return err
+ }
+ if m.Authorities, err = p.AllAuthorities(); err != nil {
+ return err
+ }
+ if m.Additionals, err = p.AllAdditionals(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Pack packs a full Message.
+func (m *Message) Pack() ([]byte, error) {
+ return m.AppendPack(make([]byte, 0, packStartingCap))
+}
+
+// AppendPack is like Pack but appends the full Message to b and returns the
+// extended buffer.
+func (m *Message) AppendPack(b []byte) ([]byte, error) {
+ // Validate the lengths. It is very unlikely that anyone will try to
+ // pack more than 65535 of any particular type, but it is possible and
+ // we should fail gracefully.
+ if len(m.Questions) > int(^uint16(0)) {
+ return nil, errTooManyQuestions
+ }
+ if len(m.Answers) > int(^uint16(0)) {
+ return nil, errTooManyAnswers
+ }
+ if len(m.Authorities) > int(^uint16(0)) {
+ return nil, errTooManyAuthorities
+ }
+ if len(m.Additionals) > int(^uint16(0)) {
+ return nil, errTooManyAdditionals
+ }
+
+ var h header
+ h.id, h.bits = m.Header.pack()
+
+ h.questions = uint16(len(m.Questions))
+ h.answers = uint16(len(m.Answers))
+ h.authorities = uint16(len(m.Authorities))
+ h.additionals = uint16(len(m.Additionals))
+
+ compressionOff := len(b)
+ msg := h.pack(b)
+
+ // RFC 1035 allows (but does not require) compression for packing. RFC
+ // 1035 requires unpacking implementations to support compression, so
+ // unconditionally enabling it is fine.
+ //
+ // DNS lookups are typically done over UDP, and RFC 1035 states that UDP
+ // DNS messages can be a maximum of 512 bytes long. Without compression,
+ // many DNS response messages are over this limit, so enabling
+ // compression will help ensure compliance.
+ compression := map[string]int{}
+
+ for i := range m.Questions {
+ var err error
+ if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil {
+ return nil, &nestedError{"packing Question", err}
+ }
+ }
+ for i := range m.Answers {
+ var err error
+ if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil {
+ return nil, &nestedError{"packing Answer", err}
+ }
+ }
+ for i := range m.Authorities {
+ var err error
+ if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil {
+ return nil, &nestedError{"packing Authority", err}
+ }
+ }
+ for i := range m.Additionals {
+ var err error
+ if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil {
+ return nil, &nestedError{"packing Additional", err}
+ }
+ }
+
+ return msg, nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (m *Message) GoString() string {
+ s := "dnsmessage.Message{Header: " + m.Header.GoString() + ", " +
+ "Questions: []dnsmessage.Question{"
+ if len(m.Questions) > 0 {
+ s += m.Questions[0].GoString()
+ for _, q := range m.Questions[1:] {
+ s += ", " + q.GoString()
+ }
+ }
+ s += "}, Answers: []dnsmessage.Resource{"
+ if len(m.Answers) > 0 {
+ s += m.Answers[0].GoString()
+ for _, a := range m.Answers[1:] {
+ s += ", " + a.GoString()
+ }
+ }
+ s += "}, Authorities: []dnsmessage.Resource{"
+ if len(m.Authorities) > 0 {
+ s += m.Authorities[0].GoString()
+ for _, a := range m.Authorities[1:] {
+ s += ", " + a.GoString()
+ }
+ }
+ s += "}, Additionals: []dnsmessage.Resource{"
+ if len(m.Additionals) > 0 {
+ s += m.Additionals[0].GoString()
+ for _, a := range m.Additionals[1:] {
+ s += ", " + a.GoString()
+ }
+ }
+ return s + "}}"
+}
+
+// A Builder allows incrementally packing a DNS message.
+//
+// Example usage:
+//
+// buf := make([]byte, 2, 514)
+// b := NewBuilder(buf, Header{...})
+// b.EnableCompression()
+// // Optionally start a section and add things to that section.
+// // Repeat adding sections as necessary.
+// buf, err := b.Finish()
+// // If err is nil, buf[2:] will contain the built bytes.
+type Builder struct {
+ // msg is the storage for the message being built.
+ msg []byte
+
+ // section keeps track of the current section being built.
+ section section
+
+ // header keeps track of what should go in the header when Finish is
+ // called.
+ header header
+
+ // start is the starting index of the bytes allocated in msg for header.
+ start int
+
+ // compression is a mapping from name suffixes to their starting index
+ // in msg.
+ compression map[string]int
+}
+
+// NewBuilder creates a new builder with compression disabled.
+//
+// Note: Most users will want to immediately enable compression with the
+// EnableCompression method. See that method's comment for why you may or may
+// not want to enable compression.
+//
+// The DNS message is appended to the provided initial buffer buf (which may be
+// nil) as it is built. The final message is returned by the (*Builder).Finish
+// method, which includes buf[:len(buf)] and may return the same underlying
+// array if there was sufficient capacity in the slice.
+func NewBuilder(buf []byte, h Header) Builder {
+ if buf == nil {
+ buf = make([]byte, 0, packStartingCap)
+ }
+ b := Builder{msg: buf, start: len(buf)}
+ b.header.id, b.header.bits = h.pack()
+ var hb [headerLen]byte
+ b.msg = append(b.msg, hb[:]...)
+ b.section = sectionHeader
+ return b
+}
+
+// EnableCompression enables compression in the Builder.
+//
+// Leaving compression disabled avoids compression related allocations, but can
+// result in larger message sizes. Be careful with this mode as it can cause
+// messages to exceed the UDP size limit.
+//
+// According to RFC 1035, section 4.1.4, the use of compression is optional, but
+// all implementations must accept both compressed and uncompressed DNS
+// messages.
+//
+// Compression should be enabled before any sections are added for best results.
+func (b *Builder) EnableCompression() {
+ b.compression = map[string]int{}
+}
+
+func (b *Builder) startCheck(s section) error {
+ if b.section <= sectionNotStarted {
+ return ErrNotStarted
+ }
+ if b.section > s {
+ return ErrSectionDone
+ }
+ return nil
+}
+
+// StartQuestions prepares the builder for packing Questions.
+func (b *Builder) StartQuestions() error {
+ if err := b.startCheck(sectionQuestions); err != nil {
+ return err
+ }
+ b.section = sectionQuestions
+ return nil
+}
+
+// StartAnswers prepares the builder for packing Answers.
+func (b *Builder) StartAnswers() error {
+ if err := b.startCheck(sectionAnswers); err != nil {
+ return err
+ }
+ b.section = sectionAnswers
+ return nil
+}
+
+// StartAuthorities prepares the builder for packing Authorities.
+func (b *Builder) StartAuthorities() error {
+ if err := b.startCheck(sectionAuthorities); err != nil {
+ return err
+ }
+ b.section = sectionAuthorities
+ return nil
+}
+
+// StartAdditionals prepares the builder for packing Additionals.
+func (b *Builder) StartAdditionals() error {
+ if err := b.startCheck(sectionAdditionals); err != nil {
+ return err
+ }
+ b.section = sectionAdditionals
+ return nil
+}
+
+func (b *Builder) incrementSectionCount() error {
+ var count *uint16
+ var err error
+ switch b.section {
+ case sectionQuestions:
+ count = &b.header.questions
+ err = errTooManyQuestions
+ case sectionAnswers:
+ count = &b.header.answers
+ err = errTooManyAnswers
+ case sectionAuthorities:
+ count = &b.header.authorities
+ err = errTooManyAuthorities
+ case sectionAdditionals:
+ count = &b.header.additionals
+ err = errTooManyAdditionals
+ }
+ if *count == ^uint16(0) {
+ return err
+ }
+ *count++
+ return nil
+}
+
+// Question adds a single Question.
+func (b *Builder) Question(q Question) error {
+ if b.section < sectionQuestions {
+ return ErrNotStarted
+ }
+ if b.section > sectionQuestions {
+ return ErrSectionDone
+ }
+ msg, err := q.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+func (b *Builder) checkResourceSection() error {
+ if b.section < sectionAnswers {
+ return ErrNotStarted
+ }
+ if b.section > sectionAdditionals {
+ return ErrSectionDone
+ }
+ return nil
+}
+
+// CNAMEResource adds a single CNAMEResource.
+func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"CNAMEResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// MXResource adds a single MXResource.
+func (b *Builder) MXResource(h ResourceHeader, r MXResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"MXResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// NSResource adds a single NSResource.
+func (b *Builder) NSResource(h ResourceHeader, r NSResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"NSResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// PTRResource adds a single PTRResource.
+func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"PTRResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// SOAResource adds a single SOAResource.
+func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"SOAResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// TXTResource adds a single TXTResource.
+func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"TXTResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// SRVResource adds a single SRVResource.
+func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"SRVResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// AResource adds a single AResource.
+func (b *Builder) AResource(h ResourceHeader, r AResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"AResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// AAAAResource adds a single AAAAResource.
+func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"AAAAResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// OPTResource adds a single OPTResource.
+func (b *Builder) OPTResource(h ResourceHeader, r OPTResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"OPTResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// UnknownResource adds a single UnknownResource.
+func (b *Builder) UnknownResource(h ResourceHeader, r UnknownResource) error {
+ if err := b.checkResourceSection(); err != nil {
+ return err
+ }
+ h.Type = r.realType()
+ msg, lenOff, err := h.pack(b.msg, b.compression, b.start)
+ if err != nil {
+ return &nestedError{"ResourceHeader", err}
+ }
+ preLen := len(msg)
+ if msg, err = r.pack(msg, b.compression, b.start); err != nil {
+ return &nestedError{"UnknownResource body", err}
+ }
+ if err := h.fixLen(msg, lenOff, preLen); err != nil {
+ return err
+ }
+ if err := b.incrementSectionCount(); err != nil {
+ return err
+ }
+ b.msg = msg
+ return nil
+}
+
+// Finish ends message building and generates a binary message.
+func (b *Builder) Finish() ([]byte, error) {
+ if b.section < sectionHeader {
+ return nil, ErrNotStarted
+ }
+ b.section = sectionDone
+ // Space for the header was allocated in NewBuilder.
+ b.header.pack(b.msg[b.start:b.start])
+ return b.msg, nil
+}
+
+// A ResourceHeader is the header of a DNS resource record. There are
+// many types of DNS resource records, but they all share the same header.
+type ResourceHeader struct {
+ // Name is the domain name for which this resource record pertains.
+ Name Name
+
+ // Type is the type of DNS resource record.
+ //
+ // This field will be set automatically during packing.
+ Type Type
+
+ // Class is the class of network to which this DNS resource record
+ // pertains.
+ Class Class
+
+ // TTL is the length of time (measured in seconds) which this resource
+ // record is valid for (time to live). All Resources in a set should
+ // have the same TTL (RFC 2181 Section 5.2).
+ TTL uint32
+
+ // Length is the length of data in the resource record after the header.
+ //
+ // This field will be set automatically during packing.
+ Length uint16
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (h *ResourceHeader) GoString() string {
+ return "dnsmessage.ResourceHeader{" +
+ "Name: " + h.Name.GoString() + ", " +
+ "Type: " + h.Type.GoString() + ", " +
+ "Class: " + h.Class.GoString() + ", " +
+ "TTL: " + printUint32(h.TTL) + ", " +
+ "Length: " + printUint16(h.Length) + "}"
+}
+
+// pack appends the wire format of the ResourceHeader to oldMsg.
+//
+// lenOff is the offset in msg where the Length field was packed.
+func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, lenOff int, err error) {
+ msg = oldMsg
+ if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil {
+ return oldMsg, 0, &nestedError{"Name", err}
+ }
+ msg = packType(msg, h.Type)
+ msg = packClass(msg, h.Class)
+ msg = packUint32(msg, h.TTL)
+ lenOff = len(msg)
+ msg = packUint16(msg, h.Length)
+ return msg, lenOff, nil
+}
+
+func (h *ResourceHeader) unpack(msg []byte, off int) (int, error) {
+ newOff := off
+ var err error
+ if newOff, err = h.Name.unpack(msg, newOff); err != nil {
+ return off, &nestedError{"Name", err}
+ }
+ if h.Type, newOff, err = unpackType(msg, newOff); err != nil {
+ return off, &nestedError{"Type", err}
+ }
+ if h.Class, newOff, err = unpackClass(msg, newOff); err != nil {
+ return off, &nestedError{"Class", err}
+ }
+ if h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil {
+ return off, &nestedError{"TTL", err}
+ }
+ if h.Length, newOff, err = unpackUint16(msg, newOff); err != nil {
+ return off, &nestedError{"Length", err}
+ }
+ return newOff, nil
+}
+
+// fixLen updates a packed ResourceHeader to include the length of the
+// ResourceBody.
+//
+// lenOff is the offset of the ResourceHeader.Length field in msg.
+//
+// preLen is the length that msg was before the ResourceBody was packed.
+func (h *ResourceHeader) fixLen(msg []byte, lenOff int, preLen int) error {
+ conLen := len(msg) - preLen
+ if conLen > int(^uint16(0)) {
+ return errResTooLong
+ }
+
+ // Fill in the length now that we know how long the content is.
+ packUint16(msg[lenOff:lenOff], uint16(conLen))
+ h.Length = uint16(conLen)
+
+ return nil
+}
+
+// EDNS(0) wire constants.
+const (
+ edns0Version = 0
+
+ edns0DNSSECOK = 0x00008000
+ ednsVersionMask = 0x00ff0000
+ edns0DNSSECOKMask = 0x00ff8000
+)
+
+// SetEDNS0 configures h for EDNS(0).
+//
+// The provided extRCode must be an extended RCode.
+func (h *ResourceHeader) SetEDNS0(udpPayloadLen int, extRCode RCode, dnssecOK bool) error {
+ h.Name = Name{Data: [nameLen]byte{'.'}, Length: 1} // RFC 6891 section 6.1.2
+ h.Type = TypeOPT
+ h.Class = Class(udpPayloadLen)
+ h.TTL = uint32(extRCode) >> 4 << 24
+ if dnssecOK {
+ h.TTL |= edns0DNSSECOK
+ }
+ return nil
+}
+
+// DNSSECAllowed reports whether the DNSSEC OK bit is set.
+func (h *ResourceHeader) DNSSECAllowed() bool {
+ return h.TTL&edns0DNSSECOKMask == edns0DNSSECOK // RFC 6891 section 6.1.3
+}
+
+// ExtendedRCode returns an extended RCode.
+//
+// The provided rcode must be the RCode in DNS message header.
+func (h *ResourceHeader) ExtendedRCode(rcode RCode) RCode {
+ if h.TTL&ednsVersionMask == edns0Version { // RFC 6891 section 6.1.3
+ return RCode(h.TTL>>24<<4) | rcode
+ }
+ return rcode
+}
+
+func skipResource(msg []byte, off int) (int, error) {
+ newOff, err := skipName(msg, off)
+ if err != nil {
+ return off, &nestedError{"Name", err}
+ }
+ if newOff, err = skipType(msg, newOff); err != nil {
+ return off, &nestedError{"Type", err}
+ }
+ if newOff, err = skipClass(msg, newOff); err != nil {
+ return off, &nestedError{"Class", err}
+ }
+ if newOff, err = skipUint32(msg, newOff); err != nil {
+ return off, &nestedError{"TTL", err}
+ }
+ length, newOff, err := unpackUint16(msg, newOff)
+ if err != nil {
+ return off, &nestedError{"Length", err}
+ }
+ if newOff += int(length); newOff > len(msg) {
+ return off, errResourceLen
+ }
+ return newOff, nil
+}
+
+// packUint16 appends the wire format of field to msg.
+func packUint16(msg []byte, field uint16) []byte {
+ return append(msg, byte(field>>8), byte(field))
+}
+
+func unpackUint16(msg []byte, off int) (uint16, int, error) {
+ if off+uint16Len > len(msg) {
+ return 0, off, errBaseLen
+ }
+ return uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil
+}
+
+func skipUint16(msg []byte, off int) (int, error) {
+ if off+uint16Len > len(msg) {
+ return off, errBaseLen
+ }
+ return off + uint16Len, nil
+}
+
+// packType appends the wire format of field to msg.
+func packType(msg []byte, field Type) []byte {
+ return packUint16(msg, uint16(field))
+}
+
+func unpackType(msg []byte, off int) (Type, int, error) {
+ t, o, err := unpackUint16(msg, off)
+ return Type(t), o, err
+}
+
+func skipType(msg []byte, off int) (int, error) {
+ return skipUint16(msg, off)
+}
+
+// packClass appends the wire format of field to msg.
+func packClass(msg []byte, field Class) []byte {
+ return packUint16(msg, uint16(field))
+}
+
+func unpackClass(msg []byte, off int) (Class, int, error) {
+ c, o, err := unpackUint16(msg, off)
+ return Class(c), o, err
+}
+
+func skipClass(msg []byte, off int) (int, error) {
+ return skipUint16(msg, off)
+}
+
+// packUint32 appends the wire format of field to msg.
+func packUint32(msg []byte, field uint32) []byte {
+ return append(
+ msg,
+ byte(field>>24),
+ byte(field>>16),
+ byte(field>>8),
+ byte(field),
+ )
+}
+
+func unpackUint32(msg []byte, off int) (uint32, int, error) {
+ if off+uint32Len > len(msg) {
+ return 0, off, errBaseLen
+ }
+ v := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])
+ return v, off + uint32Len, nil
+}
+
+func skipUint32(msg []byte, off int) (int, error) {
+ if off+uint32Len > len(msg) {
+ return off, errBaseLen
+ }
+ return off + uint32Len, nil
+}
+
+// packText appends the wire format of field to msg.
+func packText(msg []byte, field string) ([]byte, error) {
+ l := len(field)
+ if l > 255 {
+ return nil, errStringTooLong
+ }
+ msg = append(msg, byte(l))
+ msg = append(msg, field...)
+
+ return msg, nil
+}
+
+func unpackText(msg []byte, off int) (string, int, error) {
+ if off >= len(msg) {
+ return "", off, errBaseLen
+ }
+ beginOff := off + 1
+ endOff := beginOff + int(msg[off])
+ if endOff > len(msg) {
+ return "", off, errCalcLen
+ }
+ return string(msg[beginOff:endOff]), endOff, nil
+}
+
+// packBytes appends the wire format of field to msg.
+func packBytes(msg []byte, field []byte) []byte {
+ return append(msg, field...)
+}
+
+func unpackBytes(msg []byte, off int, field []byte) (int, error) {
+ newOff := off + len(field)
+ if newOff > len(msg) {
+ return off, errBaseLen
+ }
+ copy(field, msg[off:newOff])
+ return newOff, nil
+}
+
+const nameLen = 255
+
+// A Name is a non-encoded domain name. It is used instead of strings to avoid
+// allocations.
+type Name struct {
+ Data [nameLen]byte // 255 bytes
+ Length uint8
+}
+
+// NewName creates a new Name from a string.
+func NewName(name string) (Name, error) {
+ if len([]byte(name)) > nameLen {
+ return Name{}, errCalcLen
+ }
+ n := Name{Length: uint8(len(name))}
+ copy(n.Data[:], []byte(name))
+ return n, nil
+}
+
+// MustNewName creates a new Name from a string and panics on error.
+func MustNewName(name string) Name {
+ n, err := NewName(name)
+ if err != nil {
+ panic("creating name: " + err.Error())
+ }
+ return n
+}
+
+// String implements fmt.Stringer.String.
+func (n Name) String() string {
+ return string(n.Data[:n.Length])
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (n *Name) GoString() string {
+ return `dnsmessage.MustNewName("` + printString(n.Data[:n.Length]) + `")`
+}
+
+// pack appends the wire format of the Name to msg.
+//
+// Domain names are a sequence of counted strings split at the dots. They end
+// with a zero-length string. Compression can be used to reuse domain suffixes.
+//
+// The compression map will be updated with new domain suffixes. If compression
+// is nil, compression will not be used.
+func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ oldMsg := msg
+
+ // Add a trailing dot to canonicalize name.
+ if n.Length == 0 || n.Data[n.Length-1] != '.' {
+ return oldMsg, errNonCanonicalName
+ }
+
+ // Allow root domain.
+ if n.Data[0] == '.' && n.Length == 1 {
+ return append(msg, 0), nil
+ }
+
+ // Emit sequence of counted strings, chopping at dots.
+ for i, begin := 0, 0; i < int(n.Length); i++ {
+ // Check for the end of the segment.
+ if n.Data[i] == '.' {
+ // The two most significant bits have special meaning.
+ // It isn't allowed for segments to be long enough to
+ // need them.
+ if i-begin >= 1<<6 {
+ return oldMsg, errSegTooLong
+ }
+
+ // Segments must have a non-zero length.
+ if i-begin == 0 {
+ return oldMsg, errZeroSegLen
+ }
+
+ msg = append(msg, byte(i-begin))
+
+ for j := begin; j < i; j++ {
+ msg = append(msg, n.Data[j])
+ }
+
+ begin = i + 1
+ continue
+ }
+
+ // We can only compress domain suffixes starting with a new
+ // segment. A pointer is two bytes with the two most significant
+ // bits set to 1 to indicate that it is a pointer.
+ if (i == 0 || n.Data[i-1] == '.') && compression != nil {
+ if ptr, ok := compression[string(n.Data[i:])]; ok {
+ // Hit. Emit a pointer instead of the rest of
+ // the domain.
+ return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil
+ }
+
+ // Miss. Add the suffix to the compression table if the
+ // offset can be stored in the available 14 bytes.
+ if len(msg) <= int(^uint16(0)>>2) {
+ compression[string(n.Data[i:])] = len(msg) - compressionOff
+ }
+ }
+ }
+ return append(msg, 0), nil
+}
+
+// unpack unpacks a domain name.
+func (n *Name) unpack(msg []byte, off int) (int, error) {
+ return n.unpackCompressed(msg, off, true /* allowCompression */)
+}
+
+func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) {
+ // currOff is the current working offset.
+ currOff := off
+
+ // newOff is the offset where the next record will start. Pointers lead
+ // to data that belongs to other names and thus doesn't count towards to
+ // the usage of this name.
+ newOff := off
+
+ // ptr is the number of pointers followed.
+ var ptr int
+
+ // Name is a slice representation of the name data.
+ name := n.Data[:0]
+
+Loop:
+ for {
+ if currOff >= len(msg) {
+ return off, errBaseLen
+ }
+ c := int(msg[currOff])
+ currOff++
+ switch c & 0xC0 {
+ case 0x00: // String segment
+ if c == 0x00 {
+ // A zero length signals the end of the name.
+ break Loop
+ }
+ endOff := currOff + c
+ if endOff > len(msg) {
+ return off, errCalcLen
+ }
+ name = append(name, msg[currOff:endOff]...)
+ name = append(name, '.')
+ currOff = endOff
+ case 0xC0: // Pointer
+ if !allowCompression {
+ return off, errCompressedSRV
+ }
+ if currOff >= len(msg) {
+ return off, errInvalidPtr
+ }
+ c1 := msg[currOff]
+ currOff++
+ if ptr == 0 {
+ newOff = currOff
+ }
+ // Don't follow too many pointers, maybe there's a loop.
+ if ptr++; ptr > 10 {
+ return off, errTooManyPtr
+ }
+ currOff = (c^0xC0)<<8 | int(c1)
+ default:
+ // Prefixes 0x80 and 0x40 are reserved.
+ return off, errReserved
+ }
+ }
+ if len(name) == 0 {
+ name = append(name, '.')
+ }
+ if len(name) > len(n.Data) {
+ return off, errCalcLen
+ }
+ n.Length = uint8(len(name))
+ if ptr == 0 {
+ newOff = currOff
+ }
+ return newOff, nil
+}
+
+func skipName(msg []byte, off int) (int, error) {
+ // newOff is the offset where the next record will start. Pointers lead
+ // to data that belongs to other names and thus doesn't count towards to
+ // the usage of this name.
+ newOff := off
+
+Loop:
+ for {
+ if newOff >= len(msg) {
+ return off, errBaseLen
+ }
+ c := int(msg[newOff])
+ newOff++
+ switch c & 0xC0 {
+ case 0x00:
+ if c == 0x00 {
+ // A zero length signals the end of the name.
+ break Loop
+ }
+ // literal string
+ newOff += c
+ if newOff > len(msg) {
+ return off, errCalcLen
+ }
+ case 0xC0:
+ // Pointer to somewhere else in msg.
+
+ // Pointers are two bytes.
+ newOff++
+
+ // Don't follow the pointer as the data here has ended.
+ break Loop
+ default:
+ // Prefixes 0x80 and 0x40 are reserved.
+ return off, errReserved
+ }
+ }
+
+ return newOff, nil
+}
+
+// A Question is a DNS query.
+type Question struct {
+ Name Name
+ Type Type
+ Class Class
+}
+
+// pack appends the wire format of the Question to msg.
+func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ msg, err := q.Name.pack(msg, compression, compressionOff)
+ if err != nil {
+ return msg, &nestedError{"Name", err}
+ }
+ msg = packType(msg, q.Type)
+ return packClass(msg, q.Class), nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (q *Question) GoString() string {
+ return "dnsmessage.Question{" +
+ "Name: " + q.Name.GoString() + ", " +
+ "Type: " + q.Type.GoString() + ", " +
+ "Class: " + q.Class.GoString() + "}"
+}
+
+func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) {
+ var (
+ r ResourceBody
+ err error
+ name string
+ )
+ switch hdr.Type {
+ case TypeA:
+ var rb AResource
+ rb, err = unpackAResource(msg, off)
+ r = &rb
+ name = "A"
+ case TypeNS:
+ var rb NSResource
+ rb, err = unpackNSResource(msg, off)
+ r = &rb
+ name = "NS"
+ case TypeCNAME:
+ var rb CNAMEResource
+ rb, err = unpackCNAMEResource(msg, off)
+ r = &rb
+ name = "CNAME"
+ case TypeSOA:
+ var rb SOAResource
+ rb, err = unpackSOAResource(msg, off)
+ r = &rb
+ name = "SOA"
+ case TypePTR:
+ var rb PTRResource
+ rb, err = unpackPTRResource(msg, off)
+ r = &rb
+ name = "PTR"
+ case TypeMX:
+ var rb MXResource
+ rb, err = unpackMXResource(msg, off)
+ r = &rb
+ name = "MX"
+ case TypeTXT:
+ var rb TXTResource
+ rb, err = unpackTXTResource(msg, off, hdr.Length)
+ r = &rb
+ name = "TXT"
+ case TypeAAAA:
+ var rb AAAAResource
+ rb, err = unpackAAAAResource(msg, off)
+ r = &rb
+ name = "AAAA"
+ case TypeSRV:
+ var rb SRVResource
+ rb, err = unpackSRVResource(msg, off)
+ r = &rb
+ name = "SRV"
+ case TypeOPT:
+ var rb OPTResource
+ rb, err = unpackOPTResource(msg, off, hdr.Length)
+ r = &rb
+ name = "OPT"
+ default:
+ var rb UnknownResource
+ rb, err = unpackUnknownResource(hdr.Type, msg, off, hdr.Length)
+ r = &rb
+ name = "Unknown"
+ }
+ if err != nil {
+ return nil, off, &nestedError{name + " record", err}
+ }
+ return r, off + int(hdr.Length), nil
+}
+
+// A CNAMEResource is a CNAME Resource record.
+type CNAMEResource struct {
+ CNAME Name
+}
+
+func (r *CNAMEResource) realType() Type {
+ return TypeCNAME
+}
+
+// pack appends the wire format of the CNAMEResource to msg.
+func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ return r.CNAME.pack(msg, compression, compressionOff)
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *CNAMEResource) GoString() string {
+ return "dnsmessage.CNAMEResource{CNAME: " + r.CNAME.GoString() + "}"
+}
+
+func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) {
+ var cname Name
+ if _, err := cname.unpack(msg, off); err != nil {
+ return CNAMEResource{}, err
+ }
+ return CNAMEResource{cname}, nil
+}
+
+// An MXResource is an MX Resource record.
+type MXResource struct {
+ Pref uint16
+ MX Name
+}
+
+func (r *MXResource) realType() Type {
+ return TypeMX
+}
+
+// pack appends the wire format of the MXResource to msg.
+func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ oldMsg := msg
+ msg = packUint16(msg, r.Pref)
+ msg, err := r.MX.pack(msg, compression, compressionOff)
+ if err != nil {
+ return oldMsg, &nestedError{"MXResource.MX", err}
+ }
+ return msg, nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *MXResource) GoString() string {
+ return "dnsmessage.MXResource{" +
+ "Pref: " + printUint16(r.Pref) + ", " +
+ "MX: " + r.MX.GoString() + "}"
+}
+
+func unpackMXResource(msg []byte, off int) (MXResource, error) {
+ pref, off, err := unpackUint16(msg, off)
+ if err != nil {
+ return MXResource{}, &nestedError{"Pref", err}
+ }
+ var mx Name
+ if _, err := mx.unpack(msg, off); err != nil {
+ return MXResource{}, &nestedError{"MX", err}
+ }
+ return MXResource{pref, mx}, nil
+}
+
+// An NSResource is an NS Resource record.
+type NSResource struct {
+ NS Name
+}
+
+func (r *NSResource) realType() Type {
+ return TypeNS
+}
+
+// pack appends the wire format of the NSResource to msg.
+func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ return r.NS.pack(msg, compression, compressionOff)
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *NSResource) GoString() string {
+ return "dnsmessage.NSResource{NS: " + r.NS.GoString() + "}"
+}
+
+func unpackNSResource(msg []byte, off int) (NSResource, error) {
+ var ns Name
+ if _, err := ns.unpack(msg, off); err != nil {
+ return NSResource{}, err
+ }
+ return NSResource{ns}, nil
+}
+
+// A PTRResource is a PTR Resource record.
+type PTRResource struct {
+ PTR Name
+}
+
+func (r *PTRResource) realType() Type {
+ return TypePTR
+}
+
+// pack appends the wire format of the PTRResource to msg.
+func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ return r.PTR.pack(msg, compression, compressionOff)
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *PTRResource) GoString() string {
+ return "dnsmessage.PTRResource{PTR: " + r.PTR.GoString() + "}"
+}
+
+func unpackPTRResource(msg []byte, off int) (PTRResource, error) {
+ var ptr Name
+ if _, err := ptr.unpack(msg, off); err != nil {
+ return PTRResource{}, err
+ }
+ return PTRResource{ptr}, nil
+}
+
+// An SOAResource is an SOA Resource record.
+type SOAResource struct {
+ NS Name
+ MBox Name
+ Serial uint32
+ Refresh uint32
+ Retry uint32
+ Expire uint32
+
+ // MinTTL the is the default TTL of Resources records which did not
+ // contain a TTL value and the TTL of negative responses. (RFC 2308
+ // Section 4)
+ MinTTL uint32
+}
+
+func (r *SOAResource) realType() Type {
+ return TypeSOA
+}
+
+// pack appends the wire format of the SOAResource to msg.
+func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ oldMsg := msg
+ msg, err := r.NS.pack(msg, compression, compressionOff)
+ if err != nil {
+ return oldMsg, &nestedError{"SOAResource.NS", err}
+ }
+ msg, err = r.MBox.pack(msg, compression, compressionOff)
+ if err != nil {
+ return oldMsg, &nestedError{"SOAResource.MBox", err}
+ }
+ msg = packUint32(msg, r.Serial)
+ msg = packUint32(msg, r.Refresh)
+ msg = packUint32(msg, r.Retry)
+ msg = packUint32(msg, r.Expire)
+ return packUint32(msg, r.MinTTL), nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *SOAResource) GoString() string {
+ return "dnsmessage.SOAResource{" +
+ "NS: " + r.NS.GoString() + ", " +
+ "MBox: " + r.MBox.GoString() + ", " +
+ "Serial: " + printUint32(r.Serial) + ", " +
+ "Refresh: " + printUint32(r.Refresh) + ", " +
+ "Retry: " + printUint32(r.Retry) + ", " +
+ "Expire: " + printUint32(r.Expire) + ", " +
+ "MinTTL: " + printUint32(r.MinTTL) + "}"
+}
+
+func unpackSOAResource(msg []byte, off int) (SOAResource, error) {
+ var ns Name
+ off, err := ns.unpack(msg, off)
+ if err != nil {
+ return SOAResource{}, &nestedError{"NS", err}
+ }
+ var mbox Name
+ if off, err = mbox.unpack(msg, off); err != nil {
+ return SOAResource{}, &nestedError{"MBox", err}
+ }
+ serial, off, err := unpackUint32(msg, off)
+ if err != nil {
+ return SOAResource{}, &nestedError{"Serial", err}
+ }
+ refresh, off, err := unpackUint32(msg, off)
+ if err != nil {
+ return SOAResource{}, &nestedError{"Refresh", err}
+ }
+ retry, off, err := unpackUint32(msg, off)
+ if err != nil {
+ return SOAResource{}, &nestedError{"Retry", err}
+ }
+ expire, off, err := unpackUint32(msg, off)
+ if err != nil {
+ return SOAResource{}, &nestedError{"Expire", err}
+ }
+ minTTL, _, err := unpackUint32(msg, off)
+ if err != nil {
+ return SOAResource{}, &nestedError{"MinTTL", err}
+ }
+ return SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil
+}
+
+// A TXTResource is a TXT Resource record.
+type TXTResource struct {
+ TXT []string
+}
+
+func (r *TXTResource) realType() Type {
+ return TypeTXT
+}
+
+// pack appends the wire format of the TXTResource to msg.
+func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ oldMsg := msg
+ for _, s := range r.TXT {
+ var err error
+ msg, err = packText(msg, s)
+ if err != nil {
+ return oldMsg, err
+ }
+ }
+ return msg, nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *TXTResource) GoString() string {
+ s := "dnsmessage.TXTResource{TXT: []string{"
+ if len(r.TXT) == 0 {
+ return s + "}}"
+ }
+ s += `"` + printString([]byte(r.TXT[0]))
+ for _, t := range r.TXT[1:] {
+ s += `", "` + printString([]byte(t))
+ }
+ return s + `"}}`
+}
+
+func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) {
+ txts := make([]string, 0, 1)
+ for n := uint16(0); n < length; {
+ var t string
+ var err error
+ if t, off, err = unpackText(msg, off); err != nil {
+ return TXTResource{}, &nestedError{"text", err}
+ }
+ // Check if we got too many bytes.
+ if length-n < uint16(len(t))+1 {
+ return TXTResource{}, errCalcLen
+ }
+ n += uint16(len(t)) + 1
+ txts = append(txts, t)
+ }
+ return TXTResource{txts}, nil
+}
+
+// An SRVResource is an SRV Resource record.
+type SRVResource struct {
+ Priority uint16
+ Weight uint16
+ Port uint16
+ Target Name // Not compressed as per RFC 2782.
+}
+
+func (r *SRVResource) realType() Type {
+ return TypeSRV
+}
+
+// pack appends the wire format of the SRVResource to msg.
+func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ oldMsg := msg
+ msg = packUint16(msg, r.Priority)
+ msg = packUint16(msg, r.Weight)
+ msg = packUint16(msg, r.Port)
+ msg, err := r.Target.pack(msg, nil, compressionOff)
+ if err != nil {
+ return oldMsg, &nestedError{"SRVResource.Target", err}
+ }
+ return msg, nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *SRVResource) GoString() string {
+ return "dnsmessage.SRVResource{" +
+ "Priority: " + printUint16(r.Priority) + ", " +
+ "Weight: " + printUint16(r.Weight) + ", " +
+ "Port: " + printUint16(r.Port) + ", " +
+ "Target: " + r.Target.GoString() + "}"
+}
+
+func unpackSRVResource(msg []byte, off int) (SRVResource, error) {
+ priority, off, err := unpackUint16(msg, off)
+ if err != nil {
+ return SRVResource{}, &nestedError{"Priority", err}
+ }
+ weight, off, err := unpackUint16(msg, off)
+ if err != nil {
+ return SRVResource{}, &nestedError{"Weight", err}
+ }
+ port, off, err := unpackUint16(msg, off)
+ if err != nil {
+ return SRVResource{}, &nestedError{"Port", err}
+ }
+ var target Name
+ if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil {
+ return SRVResource{}, &nestedError{"Target", err}
+ }
+ return SRVResource{priority, weight, port, target}, nil
+}
+
+// An AResource is an A Resource record.
+type AResource struct {
+ A [4]byte
+}
+
+func (r *AResource) realType() Type {
+ return TypeA
+}
+
+// pack appends the wire format of the AResource to msg.
+func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ return packBytes(msg, r.A[:]), nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *AResource) GoString() string {
+ return "dnsmessage.AResource{" +
+ "A: [4]byte{" + printByteSlice(r.A[:]) + "}}"
+}
+
+func unpackAResource(msg []byte, off int) (AResource, error) {
+ var a [4]byte
+ if _, err := unpackBytes(msg, off, a[:]); err != nil {
+ return AResource{}, err
+ }
+ return AResource{a}, nil
+}
+
+// An AAAAResource is an AAAA Resource record.
+type AAAAResource struct {
+ AAAA [16]byte
+}
+
+func (r *AAAAResource) realType() Type {
+ return TypeAAAA
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *AAAAResource) GoString() string {
+ return "dnsmessage.AAAAResource{" +
+ "AAAA: [16]byte{" + printByteSlice(r.AAAA[:]) + "}}"
+}
+
+// pack appends the wire format of the AAAAResource to msg.
+func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ return packBytes(msg, r.AAAA[:]), nil
+}
+
+func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) {
+ var aaaa [16]byte
+ if _, err := unpackBytes(msg, off, aaaa[:]); err != nil {
+ return AAAAResource{}, err
+ }
+ return AAAAResource{aaaa}, nil
+}
+
+// An OPTResource is an OPT pseudo Resource record.
+//
+// The pseudo resource record is part of the extension mechanisms for DNS
+// as defined in RFC 6891.
+type OPTResource struct {
+ Options []Option
+}
+
+// An Option represents a DNS message option within OPTResource.
+//
+// The message option is part of the extension mechanisms for DNS as
+// defined in RFC 6891.
+type Option struct {
+ Code uint16 // option code
+ Data []byte
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (o *Option) GoString() string {
+ return "dnsmessage.Option{" +
+ "Code: " + printUint16(o.Code) + ", " +
+ "Data: []byte{" + printByteSlice(o.Data) + "}}"
+}
+
+func (r *OPTResource) realType() Type {
+ return TypeOPT
+}
+
+func (r *OPTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ for _, opt := range r.Options {
+ msg = packUint16(msg, opt.Code)
+ l := uint16(len(opt.Data))
+ msg = packUint16(msg, l)
+ msg = packBytes(msg, opt.Data)
+ }
+ return msg, nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *OPTResource) GoString() string {
+ s := "dnsmessage.OPTResource{Options: []dnsmessage.Option{"
+ if len(r.Options) == 0 {
+ return s + "}}"
+ }
+ s += r.Options[0].GoString()
+ for _, o := range r.Options[1:] {
+ s += ", " + o.GoString()
+ }
+ return s + "}}"
+}
+
+func unpackOPTResource(msg []byte, off int, length uint16) (OPTResource, error) {
+ var opts []Option
+ for oldOff := off; off < oldOff+int(length); {
+ var err error
+ var o Option
+ o.Code, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return OPTResource{}, &nestedError{"Code", err}
+ }
+ var l uint16
+ l, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return OPTResource{}, &nestedError{"Data", err}
+ }
+ o.Data = make([]byte, l)
+ if copy(o.Data, msg[off:]) != int(l) {
+ return OPTResource{}, &nestedError{"Data", errCalcLen}
+ }
+ off += int(l)
+ opts = append(opts, o)
+ }
+ return OPTResource{opts}, nil
+}
+
+// An UnknownResource is a catch-all container for unknown record types.
+type UnknownResource struct {
+ Type Type
+ Data []byte
+}
+
+func (r *UnknownResource) realType() Type {
+ return r.Type
+}
+
+// pack appends the wire format of the UnknownResource to msg.
+func (r *UnknownResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
+ return packBytes(msg, r.Data[:]), nil
+}
+
+// GoString implements fmt.GoStringer.GoString.
+func (r *UnknownResource) GoString() string {
+ return "dnsmessage.UnknownResource{" +
+ "Type: " + r.Type.GoString() + ", " +
+ "Data: []byte{" + printByteSlice(r.Data) + "}}"
+}
+
+func unpackUnknownResource(recordType Type, msg []byte, off int, length uint16) (UnknownResource, error) {
+ parsed := UnknownResource{
+ Type: recordType,
+ Data: make([]byte, length),
+ }
+ if _, err := unpackBytes(msg, off, parsed.Data); err != nil {
+ return UnknownResource{}, err
+ }
+ return parsed, nil
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/guts.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/guts.go
index e6cd0ced39..e6cd0ced39 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http/httpguts/guts.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/guts.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/httplex.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/httplex.go
new file mode 100644
index 0000000000..6e071e8524
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpguts/httplex.go
@@ -0,0 +1,352 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpguts
+
+import (
+ "net"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/idna"
+)
+
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+func IsTokenRune(r rune) bool {
+ i := int(r)
+ return i < len(isTokenTable) && isTokenTable[i]
+}
+
+func isNotToken(r rune) bool {
+ return !IsTokenRune(r)
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
+ if tokenEqual(trimOWS(v[:comma]), token) {
+ return true
+ }
+ v = v[comma+1:]
+ }
+ return tokenEqual(trimOWS(v), token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+//
+// LWS = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+//
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+func isCTL(b byte) bool {
+ const del = 0x7f // a CTL
+ return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+// RFC 7230 says:
+//
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !IsTokenRune(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+ // The latest spec is actually this:
+ //
+ // http://tools.ietf.org/html/rfc7230#section-5.4
+ // Host = uri-host [ ":" port ]
+ //
+ // Where uri-host is:
+ // http://tools.ietf.org/html/rfc3986#section-3.2.2
+ //
+ // But we're going to be much more lenient for now and just
+ // search for any byte that's not a valid byte in any of those
+ // expressions.
+ for i := 0; i < len(h); i++ {
+ if !validHostByte[h[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+ '8': true, '9': true,
+
+ 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+ 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+ 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+ 'y': true, 'z': true,
+
+ 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+ 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+ 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+ 'Y': true, 'Z': true,
+
+ '!': true, // sub-delims
+ '$': true, // sub-delims
+ '%': true, // pct-encoded (and used in IPv6 zones)
+ '&': true, // sub-delims
+ '(': true, // sub-delims
+ ')': true, // sub-delims
+ '*': true, // sub-delims
+ '+': true, // sub-delims
+ ',': true, // sub-delims
+ '-': true, // unreserved
+ '.': true, // unreserved
+ ':': true, // IPv6address + Host expression's optional port
+ ';': true, // sub-delims
+ '=': true, // sub-delims
+ '[': true,
+ '\'': true, // sub-delims
+ ']': true,
+ '_': true, // unreserved
+ '~': true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content = <the OCTETs making up the field-value
+// and consisting of either *TEXT or combinations
+// of token, separators, and quoted-string>
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+// TEXT = <any OCTET except CTLs,
+// but including LWS>
+// LWS = [CRLF] 1*( SP | HT )
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+//
+// RFC 7230 says:
+//
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if isCTL(b) && !isLWS(b) {
+ return false
+ }
+ }
+ return true
+}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+// PunycodeHostPort returns the IDNA Punycode version
+// of the provided "host" or "host:port" string.
+func PunycodeHostPort(v string) (string, error) {
+ if isASCII(v) {
+ return v, nil
+ }
+
+ host, port, err := net.SplitHostPort(v)
+ if err != nil {
+ // The input 'v' argument was just a "host" argument,
+ // without a port. This error should not be returned
+ // to the caller.
+ host = v
+ port = ""
+ }
+ host, err = idna.ToASCII(host)
+ if err != nil {
+ // Non-UTF-8? Not representable in Punycode, in any
+ // case.
+ return "", err
+ }
+ if port == "" {
+ return host, nil
+ }
+ return net.JoinHostPort(host, port), nil
+}
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpproxy/proxy.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpproxy/proxy.go
new file mode 100644
index 0000000000..16994ac134
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http/httpproxy/proxy.go
@@ -0,0 +1,371 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httpproxy provides support for HTTP proxy determination
+// based on environment variables, as provided by net/http's
+// ProxyFromEnvironment function.
+//
+// The API is not subject to the Go 1 compatibility promise and may change at
+// any time.
+package httpproxy
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/idna"
+)
+
+// Config holds configuration for HTTP proxy settings. See
+// FromEnvironment for details.
+type Config struct {
+ // HTTPProxy represents the value of the HTTP_PROXY or
+ // http_proxy environment variable. It will be used as the proxy
+ // URL for HTTP requests unless overridden by NoProxy.
+ HTTPProxy string
+
+ // HTTPSProxy represents the HTTPS_PROXY or https_proxy
+ // environment variable. It will be used as the proxy URL for
+ // HTTPS requests unless overridden by NoProxy.
+ HTTPSProxy string
+
+ // NoProxy represents the NO_PROXY or no_proxy environment
+ // variable. It specifies a string that contains comma-separated values
+ // specifying hosts that should be excluded from proxying. Each value is
+ // represented by an IP address prefix (1.2.3.4), an IP address prefix in
+ // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*).
+ // An IP address prefix and domain name can also include a literal port
+ // number (1.2.3.4:80).
+ // A domain name matches that name and all subdomains. A domain name with
+ // a leading "." matches subdomains only. For example "foo.com" matches
+ // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com".
+ // A single asterisk (*) indicates that no proxying should be done.
+ // A best effort is made to parse the string and errors are
+ // ignored.
+ NoProxy string
+
+ // CGI holds whether the current process is running
+ // as a CGI handler (FromEnvironment infers this from the
+ // presence of a REQUEST_METHOD environment variable).
+ // When this is set, ProxyForURL will return an error
+ // when HTTPProxy applies, because a client could be
+ // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy.
+ CGI bool
+}
+
+// config holds the parsed configuration for HTTP proxy settings.
+type config struct {
+ // Config represents the original configuration as defined above.
+ Config
+
+ // httpsProxy is the parsed URL of the HTTPSProxy if defined.
+ httpsProxy *url.URL
+
+ // httpProxy is the parsed URL of the HTTPProxy if defined.
+ httpProxy *url.URL
+
+ // ipMatchers represent all values in the NoProxy that are IP address
+ // prefixes or an IP address in CIDR notation.
+ ipMatchers []matcher
+
+ // domainMatchers represent all values in the NoProxy that are a domain
+ // name or hostname & domain name
+ domainMatchers []matcher
+}
+
+// FromEnvironment returns a Config instance populated from the
+// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the
+// lowercase versions thereof). HTTPS_PROXY takes precedence over
+// HTTP_PROXY for https requests.
+//
+// The environment values may be either a complete URL or a
+// "host[:port]", in which case the "http" scheme is assumed. An error
+// is returned if the value is a different form.
+func FromEnvironment() *Config {
+ return &Config{
+ HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"),
+ HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"),
+ NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
+ CGI: os.Getenv("REQUEST_METHOD") != "",
+ }
+}
+
+func getEnvAny(names ...string) string {
+ for _, n := range names {
+ if val := os.Getenv(n); val != "" {
+ return val
+ }
+ }
+ return ""
+}
+
+// ProxyFunc returns a function that determines the proxy URL to use for
+// a given request URL. Changing the contents of cfg will not affect
+// proxy functions created earlier.
+//
+// A nil URL and nil error are returned if no proxy is defined in the
+// environment, or a proxy should not be used for the given request, as
+// defined by NO_PROXY.
+//
+// As a special case, if req.URL.Host is "localhost" or a loopback address
+// (with or without a port number), then a nil URL and nil error will be returned.
+func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) {
+ // Preprocess the Config settings for more efficient evaluation.
+ cfg1 := &config{
+ Config: *cfg,
+ }
+ cfg1.init()
+ return cfg1.proxyForURL
+}
+
+func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) {
+ var proxy *url.URL
+ if reqURL.Scheme == "https" {
+ proxy = cfg.httpsProxy
+ } else if reqURL.Scheme == "http" {
+ proxy = cfg.httpProxy
+ if proxy != nil && cfg.CGI {
+ return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy")
+ }
+ }
+ if proxy == nil {
+ return nil, nil
+ }
+ if !cfg.useProxy(canonicalAddr(reqURL)) {
+ return nil, nil
+ }
+
+ return proxy, nil
+}
+
+func parseProxy(proxy string) (*url.URL, error) {
+ if proxy == "" {
+ return nil, nil
+ }
+
+ proxyURL, err := url.Parse(proxy)
+ if err != nil ||
+ (proxyURL.Scheme != "http" &&
+ proxyURL.Scheme != "https" &&
+ proxyURL.Scheme != "socks5") {
+ // proxy was bogus. Try prepending "http://" to it and
+ // see if that parses correctly. If not, we fall
+ // through and complain about the original one.
+ if proxyURL, err := url.Parse("http://" + proxy); err == nil {
+ return proxyURL, nil
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
+ }
+ return proxyURL, nil
+}
+
+// useProxy reports whether requests to addr should use a proxy,
+// according to the NO_PROXY or no_proxy environment variable.
+// addr is always a canonicalAddr with a host and port.
+func (cfg *config) useProxy(addr string) bool {
+ if len(addr) == 0 {
+ return true
+ }
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return false
+ }
+ if host == "localhost" {
+ return false
+ }
+ ip := net.ParseIP(host)
+ if ip != nil {
+ if ip.IsLoopback() {
+ return false
+ }
+ }
+
+ addr = strings.ToLower(strings.TrimSpace(host))
+
+ if ip != nil {
+ for _, m := range cfg.ipMatchers {
+ if m.match(addr, port, ip) {
+ return false
+ }
+ }
+ }
+ for _, m := range cfg.domainMatchers {
+ if m.match(addr, port, ip) {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *config) init() {
+ if parsed, err := parseProxy(c.HTTPProxy); err == nil {
+ c.httpProxy = parsed
+ }
+ if parsed, err := parseProxy(c.HTTPSProxy); err == nil {
+ c.httpsProxy = parsed
+ }
+
+ for _, p := range strings.Split(c.NoProxy, ",") {
+ p = strings.ToLower(strings.TrimSpace(p))
+ if len(p) == 0 {
+ continue
+ }
+
+ if p == "*" {
+ c.ipMatchers = []matcher{allMatch{}}
+ c.domainMatchers = []matcher{allMatch{}}
+ return
+ }
+
+ // IPv4/CIDR, IPv6/CIDR
+ if _, pnet, err := net.ParseCIDR(p); err == nil {
+ c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet})
+ continue
+ }
+
+ // IPv4:port, [IPv6]:port
+ phost, pport, err := net.SplitHostPort(p)
+ if err == nil {
+ if len(phost) == 0 {
+ // There is no host part, likely the entry is malformed; ignore.
+ continue
+ }
+ if phost[0] == '[' && phost[len(phost)-1] == ']' {
+ phost = phost[1 : len(phost)-1]
+ }
+ } else {
+ phost = p
+ }
+ // IPv4, IPv6
+ if pip := net.ParseIP(phost); pip != nil {
+ c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport})
+ continue
+ }
+
+ if len(phost) == 0 {
+ // There is no host part, likely the entry is malformed; ignore.
+ continue
+ }
+
+ // domain.com or domain.com:80
+ // foo.com matches bar.foo.com
+ // .domain.com or .domain.com:port
+ // *.domain.com or *.domain.com:port
+ if strings.HasPrefix(phost, "*.") {
+ phost = phost[1:]
+ }
+ matchHost := false
+ if phost[0] != '.' {
+ matchHost = true
+ phost = "." + phost
+ }
+ if v, err := idnaASCII(phost); err == nil {
+ phost = v
+ }
+ c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost})
+ }
+}
+
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+ "socks5": "1080",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func canonicalAddr(url *url.URL) string {
+ addr := url.Hostname()
+ if v, err := idnaASCII(addr); err == nil {
+ addr = v
+ }
+ port := url.Port()
+ if port == "" {
+ port = portMap[url.Scheme]
+ }
+ return net.JoinHostPort(addr, port)
+}
+
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+func idnaASCII(v string) (string, error) {
+ // TODO: Consider removing this check after verifying performance is okay.
+ // Right now punycode verification, length checks, context checks, and the
+ // permissible character tests are all omitted. It also prevents the ToASCII
+ // call from salvaging an invalid IDN, when possible. As a result it may be
+ // possible to have two IDNs that appear identical to the user where the
+ // ASCII-only version causes an error downstream whereas the non-ASCII
+ // version does not.
+ // Note that for correct ASCII IDNs ToASCII will only do considerably more
+ // work, but it will not cause an allocation.
+ if isASCII(v) {
+ return v, nil
+ }
+ return idna.Lookup.ToASCII(v)
+}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+// matcher represents the matching rule for a given value in the NO_PROXY list
+type matcher interface {
+ // match returns true if the host and optional port or ip and optional port
+ // are allowed
+ match(host, port string, ip net.IP) bool
+}
+
+// allMatch matches on all possible inputs
+type allMatch struct{}
+
+func (a allMatch) match(host, port string, ip net.IP) bool {
+ return true
+}
+
+type cidrMatch struct {
+ cidr *net.IPNet
+}
+
+func (m cidrMatch) match(host, port string, ip net.IP) bool {
+ return m.cidr.Contains(ip)
+}
+
+type ipMatch struct {
+ ip net.IP
+ port string
+}
+
+func (m ipMatch) match(host, port string, ip net.IP) bool {
+ if m.ip.Equal(ip) {
+ return m.port == "" || m.port == port
+ }
+ return false
+}
+
+type domainMatch struct {
+ host string
+ port string
+
+ matchHost bool
+}
+
+func (m domainMatch) match(host, port string, ip net.IP) bool {
+ if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) {
+ return m.port == "" || m.port == port
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/encode.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/encode.go
index 97f17831fc..97f17831fc 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/encode.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/hpack.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/hpack.go
index 85f18a2b0a..85f18a2b0a 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/hpack.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/huffman.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/huffman.go
index fe0b84ccd4..fe0b84ccd4 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/huffman.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/tables.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/tables.go
index a66cfbea69..a66cfbea69 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/http2/hpack/tables.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/http2/hpack/tables.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/go118.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/go118.go
index c5c4338dbe..c5c4338dbe 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/go118.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/go118.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/idna10.0.0.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/idna10.0.0.go
index 64ccf85feb..64ccf85feb 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/idna10.0.0.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/idna10.0.0.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/punycode.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/punycode.go
index e8e3ac11a9..e8e3ac11a9 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/punycode.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/punycode.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/tables13.0.0.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/tables13.0.0.go
index 390c5e56d2..390c5e56d2 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/tables13.0.0.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/tables13.0.0.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trie.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trie.go
index c4ef847e7a..c4ef847e7a 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/idna/trie.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trie.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trieval.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trieval.go
new file mode 100644
index 0000000000..9c070a44b3
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/idna/trieval.go
@@ -0,0 +1,119 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package idna
+
+// This file contains definitions for interpreting the trie value of the idna
+// trie generated by "go run gen*.go". It is shared by both the generator
+// program and the resultant package. Sharing is achieved by the generator
+// copying gen_trieval.go to trieval.go and changing what's above this comment.
+
+// info holds information from the IDNA mapping table for a single rune. It is
+// the value returned by a trie lookup. In most cases, all information fits in
+// a 16-bit value. For mappings, this value may contain an index into a slice
+// with the mapped string. Such mappings can consist of the actual mapped value
+// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
+// input rune. This technique is used by the cases packages and reduces the
+// table size significantly.
+//
+// The per-rune values have the following format:
+//
+// if mapped {
+// if inlinedXOR {
+// 15..13 inline XOR marker
+// 12..11 unused
+// 10..3 inline XOR mask
+// } else {
+// 15..3 index into xor or mapping table
+// }
+// } else {
+// 15..14 unused
+// 13 mayNeedNorm
+// 12..11 attributes
+// 10..8 joining type
+// 7..3 category type
+// }
+// 2 use xor pattern
+// 1..0 mapped category
+//
+// See the definitions below for a more detailed description of the various
+// bits.
+type info uint16
+
+const (
+ catSmallMask = 0x3
+ catBigMask = 0xF8
+ indexShift = 3
+ xorBit = 0x4 // interpret the index as an xor pattern
+ inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
+
+ joinShift = 8
+ joinMask = 0x07
+
+ // Attributes
+ attributesMask = 0x1800
+ viramaModifier = 0x1800
+ modifier = 0x1000
+ rtl = 0x0800
+
+ mayNeedNorm = 0x2000
+)
+
+// A category corresponds to a category defined in the IDNA mapping table.
+type category uint16
+
+const (
+ unknown category = 0 // not currently defined in unicode.
+ mapped category = 1
+ disallowedSTD3Mapped category = 2
+ deviation category = 3
+)
+
+const (
+ valid category = 0x08
+ validNV8 category = 0x18
+ validXV8 category = 0x28
+ disallowed category = 0x40
+ disallowedSTD3Valid category = 0x80
+ ignored category = 0xC0
+)
+
+// join types and additional rune information
+const (
+ joiningL = (iota + 1)
+ joiningD
+ joiningT
+ joiningR
+
+ //the following types are derived during processing
+ joinZWJ
+ joinZWNJ
+ joinVirama
+ numJoinTypes
+)
+
+func (c info) isMapped() bool {
+ return c&0x3 != 0
+}
+
+func (c info) category() category {
+ small := c & catSmallMask
+ if small != 0 {
+ return category(small)
+ }
+ return category(c & catBigMask)
+}
+
+func (c info) joinType() info {
+ if c.isMapped() {
+ return 0
+ }
+ return (c >> joinShift) & joinMask
+}
+
+func (c info) isModifier() bool {
+ return c&(modifier|catSmallMask) == modifier
+}
+
+func (c info) isViramaModifier() bool {
+ return c&(attributesMask|catSmallMask) == viramaModifier
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/address.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/address.go
index 1898ed0fad..1898ed0fad 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/address.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/address.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/binary.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/binary.go
index a5e28f1e9c..a5e28f1e9c 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/binary.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/binary.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/empty.s b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/empty.s
index 90ab4ca3d8..90ab4ca3d8 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/empty.s
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/empty.s
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface.go
index 9e9407830c..9e9407830c 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface_classic.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface_classic.go
index 85b7e993bb..85b7e993bb 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface_classic.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface_classic.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface_multicast.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface_multicast.go
index dd0b214baa..dd0b214baa 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/interface_multicast.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/interface_multicast.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/message.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/message.go
index 456a8363fe..456a8363fe 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/message.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/message.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/route.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/route.go
index fd0019ecc5..fd0019ecc5 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/route.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/route.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/route_classic.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/route_classic.go
index d6ee42f1b1..d6ee42f1b1 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/route_classic.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/route_classic.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/sys.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/sys.go
index 537484ae5a..537484ae5a 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/sys.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/sys.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/sys_darwin.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/sys_darwin.go
index d2daf5c05a..d2daf5c05a 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/sys_darwin.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/sys_darwin.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/syscall.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/syscall.go
index 68d37c9621..68d37c9621 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/syscall.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/syscall.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/zsys_darwin.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/zsys_darwin.go
index 19e4133f7d..19e4133f7d 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/net/route/zsys_darwin.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/net/route/zsys_darwin.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/byteorder.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/byteorder.go
new file mode 100644
index 0000000000..271055be0b
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/byteorder.go
@@ -0,0 +1,66 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "runtime"
+)
+
+// byteOrder is a subset of encoding/binary.ByteOrder.
+type byteOrder interface {
+ Uint32([]byte) uint32
+ Uint64([]byte) uint64
+}
+
+type littleEndian struct{}
+type bigEndian struct{}
+
+func (littleEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func (littleEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func (bigEndian) Uint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func (bigEndian) Uint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+// hostByteOrder returns littleEndian on little-endian machines and
+// bigEndian on big-endian machines.
+func hostByteOrder() byteOrder {
+ switch runtime.GOARCH {
+ case "386", "amd64", "amd64p32",
+ "alpha",
+ "arm", "arm64",
+ "loong64",
+ "mipsle", "mips64le", "mips64p32le",
+ "nios2",
+ "ppc64le",
+ "riscv", "riscv64",
+ "sh":
+ return littleEndian{}
+ case "armbe", "arm64be",
+ "m68k",
+ "mips", "mips64", "mips64p32",
+ "ppc", "ppc64",
+ "s390", "s390x",
+ "shbe",
+ "sparc", "sparc64":
+ return bigEndian{}
+ }
+ panic("unknown architecture")
+}
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu.go
new file mode 100644
index 0000000000..83f112c4c8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu.go
@@ -0,0 +1,287 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cpu implements processor feature detection for
+// various CPU architectures.
+package cpu
+
+import (
+ "os"
+ "strings"
+)
+
+// Initialized reports whether the CPU features were initialized.
+//
+// For some GOOS/GOARCH combinations initialization of the CPU features depends
+// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
+// Initialized will report false if reading the file fails.
+var Initialized bool
+
+// CacheLinePad is used to pad structs to avoid false sharing.
+type CacheLinePad struct{ _ [cacheLineSize]byte }
+
+// X86 contains the supported CPU features of the
+// current X86/AMD64 platform. If the current platform
+// is not X86/AMD64 then all feature flags are false.
+//
+// X86 is padded to avoid false sharing. Further the HasAVX
+// and HasAVX2 are only set if the OS supports XMM and YMM
+// registers in addition to the CPUID feature bit being set.
+var X86 struct {
+ _ CacheLinePad
+ HasAES bool // AES hardware implementation (AES NI)
+ HasADX bool // Multi-precision add-carry instruction extensions
+ HasAVX bool // Advanced vector extension
+ HasAVX2 bool // Advanced vector extension 2
+ HasAVX512 bool // Advanced vector extension 512
+ HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
+ HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
+ HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
+ HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions
+ HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
+ HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
+ HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
+ HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
+ HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
+ HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
+ HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
+ HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
+ HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
+ HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
+ HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
+ HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
+ HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
+ HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
+ HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
+ HasBMI1 bool // Bit manipulation instruction set 1
+ HasBMI2 bool // Bit manipulation instruction set 2
+ HasCX16 bool // Compare and exchange 16 Bytes
+ HasERMS bool // Enhanced REP for MOVSB and STOSB
+ HasFMA bool // Fused-multiply-add instructions
+ HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
+ HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
+ HasPOPCNT bool // Hamming weight instruction POPCNT.
+ HasRDRAND bool // RDRAND instruction (on-chip random number generator)
+ HasRDSEED bool // RDSEED instruction (on-chip random number generator)
+ HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
+ HasSSE3 bool // Streaming SIMD extension 3
+ HasSSSE3 bool // Supplemental streaming SIMD extension 3
+ HasSSE41 bool // Streaming SIMD extension 4 and 4.1
+ HasSSE42 bool // Streaming SIMD extension 4 and 4.2
+ _ CacheLinePad
+}
+
+// ARM64 contains the supported CPU features of the
+// current ARMv8(aarch64) platform. If the current platform
+// is not arm64 then all feature flags are false.
+var ARM64 struct {
+ _ CacheLinePad
+ HasFP bool // Floating-point instruction set (always available)
+ HasASIMD bool // Advanced SIMD (always available)
+ HasEVTSTRM bool // Event stream support
+ HasAES bool // AES hardware implementation
+ HasPMULL bool // Polynomial multiplication instruction set
+ HasSHA1 bool // SHA1 hardware implementation
+ HasSHA2 bool // SHA2 hardware implementation
+ HasCRC32 bool // CRC32 hardware implementation
+ HasATOMICS bool // Atomic memory operation instruction set
+ HasFPHP bool // Half precision floating-point instruction set
+ HasASIMDHP bool // Advanced SIMD half precision instruction set
+ HasCPUID bool // CPUID identification scheme registers
+ HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
+ HasJSCVT bool // Javascript conversion from floating-point to integer
+ HasFCMA bool // Floating-point multiplication and addition of complex numbers
+ HasLRCPC bool // Release Consistent processor consistent support
+ HasDCPOP bool // Persistent memory support
+ HasSHA3 bool // SHA3 hardware implementation
+ HasSM3 bool // SM3 hardware implementation
+ HasSM4 bool // SM4 hardware implementation
+ HasASIMDDP bool // Advanced SIMD double precision instruction set
+ HasSHA512 bool // SHA512 hardware implementation
+ HasSVE bool // Scalable Vector Extensions
+ HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
+ _ CacheLinePad
+}
+
+// ARM contains the supported CPU features of the current ARM (32-bit) platform.
+// All feature flags are false if:
+// 1. the current platform is not arm, or
+// 2. the current operating system is not Linux.
+var ARM struct {
+ _ CacheLinePad
+ HasSWP bool // SWP instruction support
+ HasHALF bool // Half-word load and store support
+ HasTHUMB bool // ARM Thumb instruction set
+ Has26BIT bool // Address space limited to 26-bits
+ HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
+ HasFPA bool // Floating point arithmetic support
+ HasVFP bool // Vector floating point support
+ HasEDSP bool // DSP Extensions support
+ HasJAVA bool // Java instruction set
+ HasIWMMXT bool // Intel Wireless MMX technology support
+ HasCRUNCH bool // MaverickCrunch context switching and handling
+ HasTHUMBEE bool // Thumb EE instruction set
+ HasNEON bool // NEON instruction set
+ HasVFPv3 bool // Vector floating point version 3 support
+ HasVFPv3D16 bool // Vector floating point version 3 D8-D15
+ HasTLS bool // Thread local storage support
+ HasVFPv4 bool // Vector floating point version 4 support
+ HasIDIVA bool // Integer divide instruction support in ARM mode
+ HasIDIVT bool // Integer divide instruction support in Thumb mode
+ HasVFPD32 bool // Vector floating point version 3 D15-D31
+ HasLPAE bool // Large Physical Address Extensions
+ HasEVTSTRM bool // Event stream support
+ HasAES bool // AES hardware implementation
+ HasPMULL bool // Polynomial multiplication instruction set
+ HasSHA1 bool // SHA1 hardware implementation
+ HasSHA2 bool // SHA2 hardware implementation
+ HasCRC32 bool // CRC32 hardware implementation
+ _ CacheLinePad
+}
+
+// MIPS64X contains the supported CPU features of the current mips64/mips64le
+// platforms. If the current platform is not mips64/mips64le or the current
+// operating system is not Linux then all feature flags are false.
+var MIPS64X struct {
+ _ CacheLinePad
+ HasMSA bool // MIPS SIMD architecture
+ _ CacheLinePad
+}
+
+// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
+// If the current platform is not ppc64/ppc64le then all feature flags are false.
+//
+// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
+// since there are no optional categories. There are some exceptions that also
+// require kernel support to work (DARN, SCV), so there are feature bits for
+// those as well. The struct is padded to avoid false sharing.
+var PPC64 struct {
+ _ CacheLinePad
+ HasDARN bool // Hardware random number generator (requires kernel enablement)
+ HasSCV bool // Syscall vectored (requires kernel enablement)
+ IsPOWER8 bool // ISA v2.07 (POWER8)
+ IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
+ _ CacheLinePad
+}
+
+// S390X contains the supported CPU features of the current IBM Z
+// (s390x) platform. If the current platform is not IBM Z then all
+// feature flags are false.
+//
+// S390X is padded to avoid false sharing. Further HasVX is only set
+// if the OS supports vector registers in addition to the STFLE
+// feature bit being set.
+var S390X struct {
+ _ CacheLinePad
+ HasZARCH bool // z/Architecture mode is active [mandatory]
+ HasSTFLE bool // store facility list extended
+ HasLDISP bool // long (20-bit) displacements
+ HasEIMM bool // 32-bit immediates
+ HasDFP bool // decimal floating point
+ HasETF3EH bool // ETF-3 enhanced
+ HasMSA bool // message security assist (CPACF)
+ HasAES bool // KM-AES{128,192,256} functions
+ HasAESCBC bool // KMC-AES{128,192,256} functions
+ HasAESCTR bool // KMCTR-AES{128,192,256} functions
+ HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
+ HasGHASH bool // KIMD-GHASH function
+ HasSHA1 bool // K{I,L}MD-SHA-1 functions
+ HasSHA256 bool // K{I,L}MD-SHA-256 functions
+ HasSHA512 bool // K{I,L}MD-SHA-512 functions
+ HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
+ HasVX bool // vector facility
+ HasVXE bool // vector-enhancements facility 1
+ _ CacheLinePad
+}
+
+func init() {
+ archInit()
+ initOptions()
+ processOptions()
+}
+
+// options contains the cpu debug options that can be used in GODEBUG.
+// Options are arch dependent and are added by the arch specific initOptions functions.
+// Features that are mandatory for the specific GOARCH should have the Required field set
+// (e.g. SSE2 on amd64).
+var options []option
+
+// Option names should be lower case. e.g. avx instead of AVX.
+type option struct {
+ Name string
+ Feature *bool
+ Specified bool // whether feature value was specified in GODEBUG
+ Enable bool // whether feature should be enabled
+ Required bool // whether feature is mandatory and can not be disabled
+}
+
+func processOptions() {
+ env := os.Getenv("GODEBUG")
+field:
+ for env != "" {
+ field := ""
+ i := strings.IndexByte(env, ',')
+ if i < 0 {
+ field, env = env, ""
+ } else {
+ field, env = env[:i], env[i+1:]
+ }
+ if len(field) < 4 || field[:4] != "cpu." {
+ continue
+ }
+ i = strings.IndexByte(field, '=')
+ if i < 0 {
+ print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
+ continue
+ }
+ key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
+
+ var enable bool
+ switch value {
+ case "on":
+ enable = true
+ case "off":
+ enable = false
+ default:
+ print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
+ continue field
+ }
+
+ if key == "all" {
+ for i := range options {
+ options[i].Specified = true
+ options[i].Enable = enable || options[i].Required
+ }
+ continue field
+ }
+
+ for i := range options {
+ if options[i].Name == key {
+ options[i].Specified = true
+ options[i].Enable = enable
+ continue field
+ }
+ }
+
+ print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
+ }
+
+ for _, o := range options {
+ if !o.Specified {
+ continue
+ }
+
+ if o.Enable && !*o.Feature {
+ print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
+ continue
+ }
+
+ if !o.Enable && o.Required {
+ print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
+ continue
+ }
+
+ *o.Feature = o.Enable
+ }
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
index fa7cdb9bcd..fa7cdb9bcd 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
index f4992b1a59..f4992b1a59 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_x86.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_x86.go
index f5aacfc825..f5aacfc825 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_x86.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_x86.s b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_x86.s
index 39acab2ff5..39acab2ff5 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/cpu_x86.s
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/cpu_x86.s
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go
index f3baa37932..f3baa37932 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/sys/cpu/hwcap_linux.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/secure/bidirule/bidirule.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/secure/bidirule/bidirule.go
index e2b70f76c2..e2b70f76c2 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/secure/bidirule/bidirule.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/secure/bidirule/bidirule.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
index 8a7392c4a1..8a7392c4a1 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/transform/transform.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/transform/transform.go
index 48ec64b40c..48ec64b40c 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/transform/transform.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/transform/transform.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/bidi.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/bidi.go
index fd057601bd..fd057601bd 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/bidi.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/bidi.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/bracket.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/bracket.go
index 1853939791..1853939791 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/bracket.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/bracket.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/core.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/core.go
new file mode 100644
index 0000000000..9d2ae547b5
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -0,0 +1,1071 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bidi
+
+import (
+ "fmt"
+ "log"
+)
+
+// This implementation is a port based on the reference implementation found at:
+// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/
+//
+// described in Unicode Bidirectional Algorithm (UAX #9).
+//
+// Input:
+// There are two levels of input to the algorithm, since clients may prefer to
+// supply some information from out-of-band sources rather than relying on the
+// default behavior.
+//
+// - Bidi class array
+// - Bidi class array, with externally supplied base line direction
+//
+// Output:
+// Output is separated into several stages:
+//
+// - levels array over entire paragraph
+// - reordering array over entire paragraph
+// - levels array over line
+// - reordering array over line
+//
+// Note that for conformance to the Unicode Bidirectional Algorithm,
+// implementations are only required to generate correct reordering and
+// character directionality (odd or even levels) over a line. Generating
+// identical level arrays over a line is not required. Bidi explicit format
+// codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and
+// positions as long as the rest of the input is properly reordered.
+//
+// As the algorithm is defined to operate on a single paragraph at a time, this
+// implementation is written to handle single paragraphs. Thus rule P1 is
+// presumed by this implementation-- the data provided to the implementation is
+// assumed to be a single paragraph, and either contains no 'B' codes, or a
+// single 'B' code at the end of the input. 'B' is allowed as input to
+// illustrate how the algorithm assigns it a level.
+//
+// Also note that rules L3 and L4 depend on the rendering engine that uses the
+// result of the bidi algorithm. This implementation assumes that the rendering
+// engine expects combining marks in visual order (e.g. to the left of their
+// base character in RTL runs) and that it adjusts the glyphs used to render
+// mirrored characters that are in RTL runs so that they render appropriately.
+
+// level is the embedding level of a character. Even embedding levels indicate
+// left-to-right order and odd levels indicate right-to-left order. The special
+// level of -1 is reserved for undefined order.
+type level int8
+
+const implicitLevel level = -1
+
+// in returns if x is equal to any of the values in set.
+func (c Class) in(set ...Class) bool {
+ for _, s := range set {
+ if c == s {
+ return true
+ }
+ }
+ return false
+}
+
+// A paragraph contains the state of a paragraph.
+type paragraph struct {
+ initialTypes []Class
+
+ // Arrays of properties needed for paired bracket evaluation in N0
+ pairTypes []bracketType // paired Bracket types for paragraph
+ pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone
+
+ embeddingLevel level // default: = implicitLevel;
+
+ // at the paragraph levels
+ resultTypes []Class
+ resultLevels []level
+
+ // Index of matching PDI for isolate initiator characters. For other
+ // characters, the value of matchingPDI will be set to -1. For isolate
+ // initiators with no matching PDI, matchingPDI will be set to the length of
+ // the input string.
+ matchingPDI []int
+
+ // Index of matching isolate initiator for PDI characters. For other
+ // characters, and for PDIs with no matching isolate initiator, the value of
+ // matchingIsolateInitiator will be set to -1.
+ matchingIsolateInitiator []int
+}
+
+// newParagraph initializes a paragraph. The user needs to supply a few arrays
+// corresponding to the preprocessed text input. The types correspond to the
+// Unicode BiDi classes for each rune. pairTypes indicates the bracket type for
+// each rune. pairValues provides a unique bracket class identifier for each
+// rune (suggested is the rune of the open bracket for opening and matching
+// close brackets, after normalization). The embedding levels are optional, but
+// may be supplied to encode embedding levels of styled text.
+func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) (*paragraph, error) {
+ var err error
+ if err = validateTypes(types); err != nil {
+ return nil, err
+ }
+ if err = validatePbTypes(pairTypes); err != nil {
+ return nil, err
+ }
+ if err = validatePbValues(pairValues, pairTypes); err != nil {
+ return nil, err
+ }
+ if err = validateParagraphEmbeddingLevel(levels); err != nil {
+ return nil, err
+ }
+
+ p := &paragraph{
+ initialTypes: append([]Class(nil), types...),
+ embeddingLevel: levels,
+
+ pairTypes: pairTypes,
+ pairValues: pairValues,
+
+ resultTypes: append([]Class(nil), types...),
+ }
+ p.run()
+ return p, nil
+}
+
+func (p *paragraph) Len() int { return len(p.initialTypes) }
+
+// The algorithm. Does not include line-based processing (Rules L1, L2).
+// These are applied later in the line-based phase of the algorithm.
+func (p *paragraph) run() {
+ p.determineMatchingIsolates()
+
+ // 1) determining the paragraph level
+ // Rule P1 is the requirement for entering this algorithm.
+ // Rules P2, P3.
+ // If no externally supplied paragraph embedding level, use default.
+ if p.embeddingLevel == implicitLevel {
+ p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len())
+ }
+
+ // Initialize result levels to paragraph embedding level.
+ p.resultLevels = make([]level, p.Len())
+ setLevels(p.resultLevels, p.embeddingLevel)
+
+ // 2) Explicit levels and directions
+ // Rules X1-X8.
+ p.determineExplicitEmbeddingLevels()
+
+ // Rule X9.
+ // We do not remove the embeddings, the overrides, the PDFs, and the BNs
+ // from the string explicitly. But they are not copied into isolating run
+ // sequences when they are created, so they are removed for all
+ // practical purposes.
+
+ // Rule X10.
+ // Run remainder of algorithm one isolating run sequence at a time
+ for _, seq := range p.determineIsolatingRunSequences() {
+ // 3) resolving weak types
+ // Rules W1-W7.
+ seq.resolveWeakTypes()
+
+ // 4a) resolving paired brackets
+ // Rule N0
+ resolvePairedBrackets(seq)
+
+ // 4b) resolving neutral types
+ // Rules N1-N3.
+ seq.resolveNeutralTypes()
+
+ // 5) resolving implicit embedding levels
+ // Rules I1, I2.
+ seq.resolveImplicitLevels()
+
+ // Apply the computed levels and types
+ seq.applyLevelsAndTypes()
+ }
+
+ // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and
+ // BNs. This is for convenience, so the resulting level array will have
+ // a value for every character.
+ p.assignLevelsToCharactersRemovedByX9()
+}
+
+// determineMatchingIsolates determines the matching PDI for each isolate
+// initiator and vice versa.
+//
+// Definition BD9.
+//
+// At the end of this function:
+//
+// - The member variable matchingPDI is set to point to the index of the
+// matching PDI character for each isolate initiator character. If there is
+// no matching PDI, it is set to the length of the input text. For other
+// characters, it is set to -1.
+// - The member variable matchingIsolateInitiator is set to point to the
+// index of the matching isolate initiator character for each PDI character.
+// If there is no matching isolate initiator, or the character is not a PDI,
+// it is set to -1.
+func (p *paragraph) determineMatchingIsolates() {
+ p.matchingPDI = make([]int, p.Len())
+ p.matchingIsolateInitiator = make([]int, p.Len())
+
+ for i := range p.matchingIsolateInitiator {
+ p.matchingIsolateInitiator[i] = -1
+ }
+
+ for i := range p.matchingPDI {
+ p.matchingPDI[i] = -1
+
+ if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) {
+ depthCounter := 1
+ for j := i + 1; j < p.Len(); j++ {
+ if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) {
+ depthCounter++
+ } else if u == PDI {
+ if depthCounter--; depthCounter == 0 {
+ p.matchingPDI[i] = j
+ p.matchingIsolateInitiator[j] = i
+ break
+ }
+ }
+ }
+ if p.matchingPDI[i] == -1 {
+ p.matchingPDI[i] = p.Len()
+ }
+ }
+ }
+}
+
+// determineParagraphEmbeddingLevel reports the resolved paragraph direction of
+// the substring limited by the given range [start, end).
+//
+// Determines the paragraph level based on rules P2, P3. This is also used
+// in rule X5c to find if an FSI should resolve to LRI or RLI.
+func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level {
+ var strongType Class = unknownClass
+
+ // Rule P2.
+ for i := start; i < end; i++ {
+ if t := p.resultTypes[i]; t.in(L, AL, R) {
+ strongType = t
+ break
+ } else if t.in(FSI, LRI, RLI) {
+ i = p.matchingPDI[i] // skip over to the matching PDI
+ if i > end {
+ log.Panic("assert (i <= end)")
+ }
+ }
+ }
+ // Rule P3.
+ switch strongType {
+ case unknownClass: // none found
+ // default embedding level when no strong types found is 0.
+ return 0
+ case L:
+ return 0
+ default: // AL, R
+ return 1
+ }
+}
+
+const maxDepth = 125
+
+// This stack will store the embedding levels and override and isolated
+// statuses
+type directionalStatusStack struct {
+ stackCounter int
+ embeddingLevelStack [maxDepth + 1]level
+ overrideStatusStack [maxDepth + 1]Class
+ isolateStatusStack [maxDepth + 1]bool
+}
+
+func (s *directionalStatusStack) empty() { s.stackCounter = 0 }
+func (s *directionalStatusStack) pop() { s.stackCounter-- }
+func (s *directionalStatusStack) depth() int { return s.stackCounter }
+
+func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) {
+ s.embeddingLevelStack[s.stackCounter] = level
+ s.overrideStatusStack[s.stackCounter] = overrideStatus
+ s.isolateStatusStack[s.stackCounter] = isolateStatus
+ s.stackCounter++
+}
+
+func (s *directionalStatusStack) lastEmbeddingLevel() level {
+ return s.embeddingLevelStack[s.stackCounter-1]
+}
+
+func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class {
+ return s.overrideStatusStack[s.stackCounter-1]
+}
+
+func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool {
+ return s.isolateStatusStack[s.stackCounter-1]
+}
+
+// Determine explicit levels using rules X1 - X8
+func (p *paragraph) determineExplicitEmbeddingLevels() {
+ var stack directionalStatusStack
+ var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int
+
+ // Rule X1.
+ stack.push(p.embeddingLevel, ON, false)
+
+ for i, t := range p.resultTypes {
+ // Rules X2, X3, X4, X5, X5a, X5b, X5c
+ switch t {
+ case RLE, LRE, RLO, LRO, RLI, LRI, FSI:
+ isIsolate := t.in(RLI, LRI, FSI)
+ isRTL := t.in(RLE, RLO, RLI)
+
+ // override if this is an FSI that resolves to RLI
+ if t == FSI {
+ isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1)
+ }
+ if isIsolate {
+ p.resultLevels[i] = stack.lastEmbeddingLevel()
+ if stack.lastDirectionalOverrideStatus() != ON {
+ p.resultTypes[i] = stack.lastDirectionalOverrideStatus()
+ }
+ }
+
+ var newLevel level
+ if isRTL {
+ // least greater odd
+ newLevel = (stack.lastEmbeddingLevel() + 1) | 1
+ } else {
+ // least greater even
+ newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1
+ }
+
+ if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 {
+ if isIsolate {
+ validIsolateCount++
+ }
+ // Push new embedding level, override status, and isolated
+ // status.
+ // No check for valid stack counter, since the level check
+ // suffices.
+ switch t {
+ case LRO:
+ stack.push(newLevel, L, isIsolate)
+ case RLO:
+ stack.push(newLevel, R, isIsolate)
+ default:
+ stack.push(newLevel, ON, isIsolate)
+ }
+ // Not really part of the spec
+ if !isIsolate {
+ p.resultLevels[i] = newLevel
+ }
+ } else {
+ // This is an invalid explicit formatting character,
+ // so apply the "Otherwise" part of rules X2-X5b.
+ if isIsolate {
+ overflowIsolateCount++
+ } else { // !isIsolate
+ if overflowIsolateCount == 0 {
+ overflowEmbeddingCount++
+ }
+ }
+ }
+
+ // Rule X6a
+ case PDI:
+ if overflowIsolateCount > 0 {
+ overflowIsolateCount--
+ } else if validIsolateCount == 0 {
+ // do nothing
+ } else {
+ overflowEmbeddingCount = 0
+ for !stack.lastDirectionalIsolateStatus() {
+ stack.pop()
+ }
+ stack.pop()
+ validIsolateCount--
+ }
+ p.resultLevels[i] = stack.lastEmbeddingLevel()
+
+ // Rule X7
+ case PDF:
+ // Not really part of the spec
+ p.resultLevels[i] = stack.lastEmbeddingLevel()
+
+ if overflowIsolateCount > 0 {
+ // do nothing
+ } else if overflowEmbeddingCount > 0 {
+ overflowEmbeddingCount--
+ } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 {
+ stack.pop()
+ }
+
+ case B: // paragraph separator.
+ // Rule X8.
+
+ // These values are reset for clarity, in this implementation B
+ // can only occur as the last code in the array.
+ stack.empty()
+ overflowIsolateCount = 0
+ overflowEmbeddingCount = 0
+ validIsolateCount = 0
+ p.resultLevels[i] = p.embeddingLevel
+
+ default:
+ p.resultLevels[i] = stack.lastEmbeddingLevel()
+ if stack.lastDirectionalOverrideStatus() != ON {
+ p.resultTypes[i] = stack.lastDirectionalOverrideStatus()
+ }
+ }
+ }
+}
+
+type isolatingRunSequence struct {
+ p *paragraph
+
+ indexes []int // indexes to the original string
+
+ types []Class // type of each character using the index
+ resolvedLevels []level // resolved levels after application of rules
+ level level
+ sos, eos Class
+}
+
+func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
+
+func maxLevel(a, b level) level {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
+// either L or R, for each isolating run sequence.
+func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
+ length := len(indexes)
+ types := make([]Class, length)
+ for i, x := range indexes {
+ types[i] = p.resultTypes[x]
+ }
+
+ // assign level, sos and eos
+ prevChar := indexes[0] - 1
+ for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) {
+ prevChar--
+ }
+ prevLevel := p.embeddingLevel
+ if prevChar >= 0 {
+ prevLevel = p.resultLevels[prevChar]
+ }
+
+ var succLevel level
+ lastType := types[length-1]
+ if lastType.in(LRI, RLI, FSI) {
+ succLevel = p.embeddingLevel
+ } else {
+ // the first character after the end of run sequence
+ limit := indexes[length-1] + 1
+ for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ {
+
+ }
+ succLevel = p.embeddingLevel
+ if limit < p.Len() {
+ succLevel = p.resultLevels[limit]
+ }
+ }
+ level := p.resultLevels[indexes[0]]
+ return &isolatingRunSequence{
+ p: p,
+ indexes: indexes,
+ types: types,
+ level: level,
+ sos: typeForLevel(maxLevel(prevLevel, level)),
+ eos: typeForLevel(maxLevel(succLevel, level)),
+ }
+}
+
+// Resolving weak types Rules W1-W7.
+//
+// Note that some weak types (EN, AN) remain after this processing is
+// complete.
+func (s *isolatingRunSequence) resolveWeakTypes() {
+
+ // on entry, only these types remain
+ s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI)
+
+ // Rule W1.
+ // Changes all NSMs.
+ precedingCharacterType := s.sos
+ for i, t := range s.types {
+ if t == NSM {
+ s.types[i] = precedingCharacterType
+ } else {
+ // if t.in(LRI, RLI, FSI, PDI) {
+ // precedingCharacterType = ON
+ // }
+ precedingCharacterType = t
+ }
+ }
+
+ // Rule W2.
+ // EN does not change at the start of the run, because sos != AL.
+ for i, t := range s.types {
+ if t == EN {
+ for j := i - 1; j >= 0; j-- {
+ if t := s.types[j]; t.in(L, R, AL) {
+ if t == AL {
+ s.types[i] = AN
+ }
+ break
+ }
+ }
+ }
+ }
+
+ // Rule W3.
+ for i, t := range s.types {
+ if t == AL {
+ s.types[i] = R
+ }
+ }
+
+ // Rule W4.
+ // Since there must be values on both sides for this rule to have an
+ // effect, the scan skips the first and last value.
+ //
+ // Although the scan proceeds left to right, and changes the type
+ // values in a way that would appear to affect the computations
+ // later in the scan, there is actually no problem. A change in the
+ // current value can only affect the value to its immediate right,
+ // and only affect it if it is ES or CS. But the current value can
+ // only change if the value to its right is not ES or CS. Thus
+ // either the current value will not change, or its change will have
+ // no effect on the remainder of the analysis.
+
+ for i := 1; i < s.Len()-1; i++ {
+ t := s.types[i]
+ if t == ES || t == CS {
+ prevSepType := s.types[i-1]
+ succSepType := s.types[i+1]
+ if prevSepType == EN && succSepType == EN {
+ s.types[i] = EN
+ } else if s.types[i] == CS && prevSepType == AN && succSepType == AN {
+ s.types[i] = AN
+ }
+ }
+ }
+
+ // Rule W5.
+ for i, t := range s.types {
+ if t == ET {
+ // locate end of sequence
+ runStart := i
+ runEnd := s.findRunLimit(runStart, ET)
+
+ // check values at ends of sequence
+ t := s.sos
+ if runStart > 0 {
+ t = s.types[runStart-1]
+ }
+ if t != EN {
+ t = s.eos
+ if runEnd < len(s.types) {
+ t = s.types[runEnd]
+ }
+ }
+ if t == EN {
+ setTypes(s.types[runStart:runEnd], EN)
+ }
+ // continue at end of sequence
+ i = runEnd
+ }
+ }
+
+ // Rule W6.
+ for i, t := range s.types {
+ if t.in(ES, ET, CS) {
+ s.types[i] = ON
+ }
+ }
+
+ // Rule W7.
+ for i, t := range s.types {
+ if t == EN {
+ // set default if we reach start of run
+ prevStrongType := s.sos
+ for j := i - 1; j >= 0; j-- {
+ t = s.types[j]
+ if t == L || t == R { // AL's have been changed to R
+ prevStrongType = t
+ break
+ }
+ }
+ if prevStrongType == L {
+ s.types[i] = L
+ }
+ }
+ }
+}
+
+// 6) resolving neutral types Rules N1-N2.
+func (s *isolatingRunSequence) resolveNeutralTypes() {
+
+ // on entry, only these types can be in resultTypes
+ s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI)
+
+ for i, t := range s.types {
+ switch t {
+ case WS, ON, B, S, RLI, LRI, FSI, PDI:
+ // find bounds of run of neutrals
+ runStart := i
+ runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI)
+
+ // determine effective types at ends of run
+ var leadType, trailType Class
+
+ // Note that the character found can only be L, R, AN, or
+ // EN.
+ if runStart == 0 {
+ leadType = s.sos
+ } else {
+ leadType = s.types[runStart-1]
+ if leadType.in(AN, EN) {
+ leadType = R
+ }
+ }
+ if runEnd == len(s.types) {
+ trailType = s.eos
+ } else {
+ trailType = s.types[runEnd]
+ if trailType.in(AN, EN) {
+ trailType = R
+ }
+ }
+
+ var resolvedType Class
+ if leadType == trailType {
+ // Rule N1.
+ resolvedType = leadType
+ } else {
+ // Rule N2.
+ // Notice the embedding level of the run is used, not
+ // the paragraph embedding level.
+ resolvedType = typeForLevel(s.level)
+ }
+
+ setTypes(s.types[runStart:runEnd], resolvedType)
+
+ // skip over run of (former) neutrals
+ i = runEnd
+ }
+ }
+}
+
+func setLevels(levels []level, newLevel level) {
+ for i := range levels {
+ levels[i] = newLevel
+ }
+}
+
+func setTypes(types []Class, newType Class) {
+ for i := range types {
+ types[i] = newType
+ }
+}
+
+// 7) resolving implicit embedding levels Rules I1, I2.
+func (s *isolatingRunSequence) resolveImplicitLevels() {
+
+ // on entry, only these types can be in resultTypes
+ s.assertOnly(L, R, EN, AN)
+
+ s.resolvedLevels = make([]level, len(s.types))
+ setLevels(s.resolvedLevels, s.level)
+
+ if (s.level & 1) == 0 { // even level
+ for i, t := range s.types {
+ // Rule I1.
+ if t == L {
+ // no change
+ } else if t == R {
+ s.resolvedLevels[i] += 1
+ } else { // t == AN || t == EN
+ s.resolvedLevels[i] += 2
+ }
+ }
+ } else { // odd level
+ for i, t := range s.types {
+ // Rule I2.
+ if t == R {
+ // no change
+ } else { // t == L || t == AN || t == EN
+ s.resolvedLevels[i] += 1
+ }
+ }
+ }
+}
+
+// Applies the levels and types resolved in rules W1-I2 to the
+// resultLevels array.
+func (s *isolatingRunSequence) applyLevelsAndTypes() {
+ for i, x := range s.indexes {
+ s.p.resultTypes[x] = s.types[i]
+ s.p.resultLevels[x] = s.resolvedLevels[i]
+ }
+}
+
+// Return the limit of the run consisting only of the types in validSet
+// starting at index. This checks the value at index, and will return
+// index if that value is not in validSet.
+func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int {
+loop:
+ for ; index < len(s.types); index++ {
+ t := s.types[index]
+ for _, valid := range validSet {
+ if t == valid {
+ continue loop
+ }
+ }
+ return index // didn't find a match in validSet
+ }
+ return len(s.types)
+}
+
+// Algorithm validation. Assert that all values in types are in the
+// provided set.
+func (s *isolatingRunSequence) assertOnly(codes ...Class) {
+loop:
+ for i, t := range s.types {
+ for _, c := range codes {
+ if t == c {
+ continue loop
+ }
+ }
+ log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i])
+ }
+}
+
+// determineLevelRuns returns an array of level runs. Each level run is
+// described as an array of indexes into the input string.
+//
+// Determines the level runs. Rule X9 will be applied in determining the
+// runs, in the way that makes sure the characters that are supposed to be
+// removed are not included in the runs.
+func (p *paragraph) determineLevelRuns() [][]int {
+ run := []int{}
+ allRuns := [][]int{}
+ currentLevel := implicitLevel
+
+ for i := range p.initialTypes {
+ if !isRemovedByX9(p.initialTypes[i]) {
+ if p.resultLevels[i] != currentLevel {
+ // we just encountered a new run; wrap up last run
+ if currentLevel >= 0 { // only wrap it up if there was a run
+ allRuns = append(allRuns, run)
+ run = nil
+ }
+ // Start new run
+ currentLevel = p.resultLevels[i]
+ }
+ run = append(run, i)
+ }
+ }
+ // Wrap up the final run, if any
+ if len(run) > 0 {
+ allRuns = append(allRuns, run)
+ }
+ return allRuns
+}
+
+// Definition BD13. Determine isolating run sequences.
+func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence {
+ levelRuns := p.determineLevelRuns()
+
+ // Compute the run that each character belongs to
+ runForCharacter := make([]int, p.Len())
+ for i, run := range levelRuns {
+ for _, index := range run {
+ runForCharacter[index] = i
+ }
+ }
+
+ sequences := []*isolatingRunSequence{}
+
+ var currentRunSequence []int
+
+ for _, run := range levelRuns {
+ first := run[0]
+ if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 {
+ currentRunSequence = nil
+ // int run = i;
+ for {
+ // Copy this level run into currentRunSequence
+ currentRunSequence = append(currentRunSequence, run...)
+
+ last := currentRunSequence[len(currentRunSequence)-1]
+ lastT := p.initialTypes[last]
+ if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() {
+ run = levelRuns[runForCharacter[p.matchingPDI[last]]]
+ } else {
+ break
+ }
+ }
+ sequences = append(sequences, p.isolatingRunSequence(currentRunSequence))
+ }
+ }
+ return sequences
+}
+
+// Assign level information to characters removed by rule X9. This is for
+// ease of relating the level information to the original input data. Note
+// that the levels assigned to these codes are arbitrary, they're chosen so
+// as to avoid breaking level runs.
+func (p *paragraph) assignLevelsToCharactersRemovedByX9() {
+ for i, t := range p.initialTypes {
+ if t.in(LRE, RLE, LRO, RLO, PDF, BN) {
+ p.resultTypes[i] = t
+ p.resultLevels[i] = -1
+ }
+ }
+ // now propagate forward the levels information (could have
+ // propagated backward, the main thing is not to introduce a level
+ // break where one doesn't already exist).
+
+ if p.resultLevels[0] == -1 {
+ p.resultLevels[0] = p.embeddingLevel
+ }
+ for i := 1; i < len(p.initialTypes); i++ {
+ if p.resultLevels[i] == -1 {
+ p.resultLevels[i] = p.resultLevels[i-1]
+ }
+ }
+ // Embedding information is for informational purposes only so need not be
+ // adjusted.
+}
+
+//
+// Output
+//
+
+// getLevels computes levels array breaking lines at offsets in linebreaks.
+// Rule L1.
+//
+// The linebreaks array must include at least one value. The values must be
+// in strictly increasing order (no duplicates) between 1 and the length of
+// the text, inclusive. The last value must be the length of the text.
+func (p *paragraph) getLevels(linebreaks []int) []level {
+ // Note that since the previous processing has removed all
+ // P, S, and WS values from resultTypes, the values referred to
+ // in these rules are the initial types, before any processing
+ // has been applied (including processing of overrides).
+ //
+ // This example implementation has reinserted explicit format codes
+ // and BN, in order that the levels array correspond to the
+ // initial text. Their final placement is not normative.
+ // These codes are treated like WS in this implementation,
+ // so they don't interrupt sequences of WS.
+
+ validateLineBreaks(linebreaks, p.Len())
+
+ result := append([]level(nil), p.resultLevels...)
+
+ // don't worry about linebreaks since if there is a break within
+ // a series of WS values preceding S, the linebreak itself
+ // causes the reset.
+ for i, t := range p.initialTypes {
+ if t.in(B, S) {
+ // Rule L1, clauses one and two.
+ result[i] = p.embeddingLevel
+
+ // Rule L1, clause three.
+ for j := i - 1; j >= 0; j-- {
+ if isWhitespace(p.initialTypes[j]) { // including format codes
+ result[j] = p.embeddingLevel
+ } else {
+ break
+ }
+ }
+ }
+ }
+
+ // Rule L1, clause four.
+ start := 0
+ for _, limit := range linebreaks {
+ for j := limit - 1; j >= start; j-- {
+ if isWhitespace(p.initialTypes[j]) { // including format codes
+ result[j] = p.embeddingLevel
+ } else {
+ break
+ }
+ }
+ start = limit
+ }
+
+ return result
+}
+
+// getReordering returns the reordering of lines from a visual index to a
+// logical index for line breaks at the given offsets.
+//
+// Lines are concatenated from left to right. So for example, the fifth
+// character from the left on the third line is
+//
+// getReordering(linebreaks)[linebreaks[1] + 4]
+//
+// (linebreaks[1] is the position after the last character of the second
+// line, which is also the index of the first character on the third line,
+// and adding four gets the fifth character from the left).
+//
+// The linebreaks array must include at least one value. The values must be
+// in strictly increasing order (no duplicates) between 1 and the length of
+// the text, inclusive. The last value must be the length of the text.
+func (p *paragraph) getReordering(linebreaks []int) []int {
+ validateLineBreaks(linebreaks, p.Len())
+
+ return computeMultilineReordering(p.getLevels(linebreaks), linebreaks)
+}
+
+// Return multiline reordering array for a given level array. Reordering
+// does not occur across a line break.
+func computeMultilineReordering(levels []level, linebreaks []int) []int {
+ result := make([]int, len(levels))
+
+ start := 0
+ for _, limit := range linebreaks {
+ tempLevels := make([]level, limit-start)
+ copy(tempLevels, levels[start:])
+
+ for j, order := range computeReordering(tempLevels) {
+ result[start+j] = order + start
+ }
+ start = limit
+ }
+ return result
+}
+
+// Return reordering array for a given level array. This reorders a single
+// line. The reordering is a visual to logical map. For example, the
+// leftmost char is string.charAt(order[0]). Rule L2.
+func computeReordering(levels []level) []int {
+ result := make([]int, len(levels))
+ // initialize order
+ for i := range result {
+ result[i] = i
+ }
+
+ // locate highest level found on line.
+ // Note the rules say text, but no reordering across line bounds is
+ // performed, so this is sufficient.
+ highestLevel := level(0)
+ lowestOddLevel := level(maxDepth + 2)
+ for _, level := range levels {
+ if level > highestLevel {
+ highestLevel = level
+ }
+ if level&1 != 0 && level < lowestOddLevel {
+ lowestOddLevel = level
+ }
+ }
+
+ for level := highestLevel; level >= lowestOddLevel; level-- {
+ for i := 0; i < len(levels); i++ {
+ if levels[i] >= level {
+ // find range of text at or above this level
+ start := i
+ limit := i + 1
+ for limit < len(levels) && levels[limit] >= level {
+ limit++
+ }
+
+ for j, k := start, limit-1; j < k; j, k = j+1, k-1 {
+ result[j], result[k] = result[k], result[j]
+ }
+ // skip to end of level run
+ i = limit
+ }
+ }
+ }
+
+ return result
+}
+
+// isWhitespace reports whether the type is considered a whitespace type for the
+// line break rules.
+func isWhitespace(c Class) bool {
+ switch c {
+ case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS:
+ return true
+ }
+ return false
+}
+
+// isRemovedByX9 reports whether the type is one of the types removed in X9.
+func isRemovedByX9(c Class) bool {
+ switch c {
+ case LRE, RLE, LRO, RLO, PDF, BN:
+ return true
+ }
+ return false
+}
+
+// typeForLevel reports the strong type (L or R) corresponding to the level.
+func typeForLevel(level level) Class {
+ if (level & 0x1) == 0 {
+ return L
+ }
+ return R
+}
+
+func validateTypes(types []Class) error {
+ if len(types) == 0 {
+ return fmt.Errorf("types is null")
+ }
+ for i, t := range types[:len(types)-1] {
+ if t == B {
+ return fmt.Errorf("B type before end of paragraph at index: %d", i)
+ }
+ }
+ return nil
+}
+
+func validateParagraphEmbeddingLevel(embeddingLevel level) error {
+ if embeddingLevel != implicitLevel &&
+ embeddingLevel != 0 &&
+ embeddingLevel != 1 {
+ return fmt.Errorf("illegal paragraph embedding level: %d", embeddingLevel)
+ }
+ return nil
+}
+
+func validateLineBreaks(linebreaks []int, textLength int) error {
+ prev := 0
+ for i, next := range linebreaks {
+ if next <= prev {
+ return fmt.Errorf("bad linebreak: %d at index: %d", next, i)
+ }
+ prev = next
+ }
+ if prev != textLength {
+ return fmt.Errorf("last linebreak was %d, want %d", prev, textLength)
+ }
+ return nil
+}
+
+func validatePbTypes(pairTypes []bracketType) error {
+ if len(pairTypes) == 0 {
+ return fmt.Errorf("pairTypes is null")
+ }
+ for i, pt := range pairTypes {
+ switch pt {
+ case bpNone, bpOpen, bpClose:
+ default:
+ return fmt.Errorf("illegal pairType value at %d: %v", i, pairTypes[i])
+ }
+ }
+ return nil
+}
+
+func validatePbValues(pairValues []rune, pairTypes []bracketType) error {
+ if pairValues == nil {
+ return fmt.Errorf("pairValues is null")
+ }
+ if len(pairTypes) != len(pairValues) {
+ return fmt.Errorf("pairTypes is different length from pairValues")
+ }
+ return nil
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/prop.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/prop.go
index 7c9484e1f5..7c9484e1f5 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/prop.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/prop.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go
index f248effae1..f248effae1 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/trieval.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/trieval.go
index 4c459c4b72..4c459c4b72 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/bidi/trieval.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/bidi/trieval.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/composition.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/composition.go
index e2087bce52..e2087bce52 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/composition.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/composition.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/forminfo.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/forminfo.go
new file mode 100644
index 0000000000..d69ccb4f97
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/forminfo.go
@@ -0,0 +1,279 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package norm
+
+import "encoding/binary"
+
+// This file contains Form-specific logic and wrappers for data in tables.go.
+
+// Rune info is stored in a separate trie per composing form. A composing form
+// and its corresponding decomposing form share the same trie. Each trie maps
+// a rune to a uint16. The values take two forms. For v >= 0x8000:
+// bits
+// 15: 1 (inverse of NFD_QC bit of qcInfo)
+// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
+// 6..0: ccc (compressed CCC value).
+// For v < 0x8000, the respective rune has a decomposition and v is an index
+// into a byte array of UTF-8 decomposition sequences and additional info and
+// has the form:
+// <header> <decomp_byte>* [<tccc> [<lccc>]]
+// The header contains the number of bytes in the decomposition (excluding this
+// length byte). The two most significant bits of this length byte correspond
+// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
+// The byte sequence is followed by a trailing and leading CCC if the values
+// for these are not zero. The value of v determines which ccc are appended
+// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
+// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
+// there is an additional leading ccc. The value of tccc itself is the
+// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
+// are the number of trailing non-starters.
+
+const (
+ qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
+ headerLenMask = 0x3F // extract the length value from the header byte
+ headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
+)
+
+// Properties provides access to normalization properties of a rune.
+type Properties struct {
+ pos uint8 // start position in reorderBuffer; used in composition.go
+ size uint8 // length of UTF-8 encoding of this rune
+ ccc uint8 // leading canonical combining class (ccc if not decomposition)
+ tccc uint8 // trailing canonical combining class (ccc if not decomposition)
+ nLead uint8 // number of leading non-starters.
+ flags qcInfo // quick check flags
+ index uint16
+}
+
+// functions dispatchable per form
+type lookupFunc func(b input, i int) Properties
+
+// formInfo holds Form-specific functions and tables.
+type formInfo struct {
+ form Form
+ composing, compatibility bool // form type
+ info lookupFunc
+ nextMain iterFunc
+}
+
+var formTable = []*formInfo{{
+ form: NFC,
+ composing: true,
+ compatibility: false,
+ info: lookupInfoNFC,
+ nextMain: nextComposed,
+}, {
+ form: NFD,
+ composing: false,
+ compatibility: false,
+ info: lookupInfoNFC,
+ nextMain: nextDecomposed,
+}, {
+ form: NFKC,
+ composing: true,
+ compatibility: true,
+ info: lookupInfoNFKC,
+ nextMain: nextComposed,
+}, {
+ form: NFKD,
+ composing: false,
+ compatibility: true,
+ info: lookupInfoNFKC,
+ nextMain: nextDecomposed,
+}}
+
+// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
+// unexpected behavior for the user. For example, in NFD, there is a boundary
+// after 'a'. However, 'a' might combine with modifiers, so from the application's
+// perspective it is not a good boundary. We will therefore always use the
+// boundaries for the combining variants.
+
+// BoundaryBefore returns true if this rune starts a new segment and
+// cannot combine with any rune on the left.
+func (p Properties) BoundaryBefore() bool {
+ if p.ccc == 0 && !p.combinesBackward() {
+ return true
+ }
+ // We assume that the CCC of the first character in a decomposition
+ // is always non-zero if different from info.ccc and that we can return
+ // false at this point. This is verified by maketables.
+ return false
+}
+
+// BoundaryAfter returns true if runes cannot combine with or otherwise
+// interact with this or previous runes.
+func (p Properties) BoundaryAfter() bool {
+ // TODO: loosen these conditions.
+ return p.isInert()
+}
+
+// We pack quick check data in 4 bits:
+//
+// 5: Combines forward (0 == false, 1 == true)
+// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
+// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
+// 1..0: Number of trailing non-starters.
+//
+// When all 4 bits are zero, the character is inert, meaning it is never
+// influenced by normalization.
+type qcInfo uint8
+
+func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
+func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
+
+func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
+func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
+func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
+
+func (p Properties) isInert() bool {
+ return p.flags&qcInfoMask == 0 && p.ccc == 0
+}
+
+func (p Properties) multiSegment() bool {
+ return p.index >= firstMulti && p.index < endMulti
+}
+
+func (p Properties) nLeadingNonStarters() uint8 {
+ return p.nLead
+}
+
+func (p Properties) nTrailingNonStarters() uint8 {
+ return uint8(p.flags & 0x03)
+}
+
+// Decomposition returns the decomposition for the underlying rune
+// or nil if there is none.
+func (p Properties) Decomposition() []byte {
+ // TODO: create the decomposition for Hangul?
+ if p.index == 0 {
+ return nil
+ }
+ i := p.index
+ n := decomps[i] & headerLenMask
+ i++
+ return decomps[i : i+uint16(n)]
+}
+
+// Size returns the length of UTF-8 encoding of the rune.
+func (p Properties) Size() int {
+ return int(p.size)
+}
+
+// CCC returns the canonical combining class of the underlying rune.
+func (p Properties) CCC() uint8 {
+ if p.index >= firstCCCZeroExcept {
+ return 0
+ }
+ return ccc[p.ccc]
+}
+
+// LeadCCC returns the CCC of the first rune in the decomposition.
+// If there is no decomposition, LeadCCC equals CCC.
+func (p Properties) LeadCCC() uint8 {
+ return ccc[p.ccc]
+}
+
+// TrailCCC returns the CCC of the last rune in the decomposition.
+// If there is no decomposition, TrailCCC equals CCC.
+func (p Properties) TrailCCC() uint8 {
+ return ccc[p.tccc]
+}
+
+func buildRecompMap() {
+ recompMap = make(map[uint32]rune, len(recompMapPacked)/8)
+ var buf [8]byte
+ for i := 0; i < len(recompMapPacked); i += 8 {
+ copy(buf[:], recompMapPacked[i:i+8])
+ key := binary.BigEndian.Uint32(buf[:4])
+ val := binary.BigEndian.Uint32(buf[4:])
+ recompMap[key] = rune(val)
+ }
+}
+
+// Recomposition
+// We use 32-bit keys instead of 64-bit for the two codepoint keys.
+// This clips off the bits of three entries, but we know this will not
+// result in a collision. In the unlikely event that changes to
+// UnicodeData.txt introduce collisions, the compiler will catch it.
+// Note that the recomposition map for NFC and NFKC are identical.
+
+// combine returns the combined rune or 0 if it doesn't exist.
+//
+// The caller is responsible for calling
+// recompMapOnce.Do(buildRecompMap) sometime before this is called.
+func combine(a, b rune) rune {
+ key := uint32(uint16(a))<<16 + uint32(uint16(b))
+ if recompMap == nil {
+ panic("caller error") // see func comment
+ }
+ return recompMap[key]
+}
+
+func lookupInfoNFC(b input, i int) Properties {
+ v, sz := b.charinfoNFC(i)
+ return compInfo(v, sz)
+}
+
+func lookupInfoNFKC(b input, i int) Properties {
+ v, sz := b.charinfoNFKC(i)
+ return compInfo(v, sz)
+}
+
+// Properties returns properties for the first rune in s.
+func (f Form) Properties(s []byte) Properties {
+ if f == NFC || f == NFD {
+ return compInfo(nfcData.lookup(s))
+ }
+ return compInfo(nfkcData.lookup(s))
+}
+
+// PropertiesString returns properties for the first rune in s.
+func (f Form) PropertiesString(s string) Properties {
+ if f == NFC || f == NFD {
+ return compInfo(nfcData.lookupString(s))
+ }
+ return compInfo(nfkcData.lookupString(s))
+}
+
+// compInfo converts the information contained in v and sz
+// to a Properties. See the comment at the top of the file
+// for more information on the format.
+func compInfo(v uint16, sz int) Properties {
+ if v == 0 {
+ return Properties{size: uint8(sz)}
+ } else if v >= 0x8000 {
+ p := Properties{
+ size: uint8(sz),
+ ccc: uint8(v),
+ tccc: uint8(v),
+ flags: qcInfo(v >> 8),
+ }
+ if p.ccc > 0 || p.combinesBackward() {
+ p.nLead = uint8(p.flags & 0x3)
+ }
+ return p
+ }
+ // has decomposition
+ h := decomps[v]
+ f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
+ p := Properties{size: uint8(sz), flags: f, index: v}
+ if v >= firstCCC {
+ v += uint16(h&headerLenMask) + 1
+ c := decomps[v]
+ p.tccc = c >> 2
+ p.flags |= qcInfo(c & 0x3)
+ if v >= firstLeadingCCC {
+ p.nLead = c & 0x3
+ if v >= firstStarterWithNLead {
+ // We were tricked. Remove the decomposition.
+ p.flags &= 0x03
+ p.index = 0
+ return p
+ }
+ p.ccc = decomps[v+1]
+ }
+ }
+ return p
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/input.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/input.go
index 479e35bc25..479e35bc25 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/input.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/input.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/iter.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/iter.go
index 417c6b2689..417c6b2689 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/iter.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/iter.go
diff --git a/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/normalize.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/normalize.go
new file mode 100644
index 0000000000..4747ad07a8
--- /dev/null
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/normalize.go
@@ -0,0 +1,610 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Note: the file data_test.go that is generated should not be checked in.
+//go:generate go run maketables.go triegen.go
+//go:generate go test -tags test
+
+// Package norm contains types and functions for normalizing Unicode strings.
+package norm // import "golang.org/x/text/unicode/norm"
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// A Form denotes a canonical representation of Unicode code points.
+// The Unicode-defined normalization and equivalence forms are:
+//
+// NFC Unicode Normalization Form C
+// NFD Unicode Normalization Form D
+// NFKC Unicode Normalization Form KC
+// NFKD Unicode Normalization Form KD
+//
+// For a Form f, this documentation uses the notation f(x) to mean
+// the bytes or string x converted to the given form.
+// A position n in x is called a boundary if conversion to the form can
+// proceed independently on both sides:
+//
+// f(x) == append(f(x[0:n]), f(x[n:])...)
+//
+// References: https://unicode.org/reports/tr15/ and
+// https://unicode.org/notes/tn5/.
+type Form int
+
+const (
+ NFC Form = iota
+ NFD
+ NFKC
+ NFKD
+)
+
+// Bytes returns f(b). May return b if f(b) = b.
+func (f Form) Bytes(b []byte) []byte {
+ src := inputBytes(b)
+ ft := formTable[f]
+ n, ok := ft.quickSpan(src, 0, len(b), true)
+ if ok {
+ return b
+ }
+ out := make([]byte, n, len(b))
+ copy(out, b[0:n])
+ rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush}
+ return doAppendInner(&rb, n)
+}
+
+// String returns f(s).
+func (f Form) String(s string) string {
+ src := inputString(s)
+ ft := formTable[f]
+ n, ok := ft.quickSpan(src, 0, len(s), true)
+ if ok {
+ return s
+ }
+ out := make([]byte, n, len(s))
+ copy(out, s[0:n])
+ rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush}
+ return string(doAppendInner(&rb, n))
+}
+
+// IsNormal returns true if b == f(b).
+func (f Form) IsNormal(b []byte) bool {
+ src := inputBytes(b)
+ ft := formTable[f]
+ bp, ok := ft.quickSpan(src, 0, len(b), true)
+ if ok {
+ return true
+ }
+ rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)}
+ rb.setFlusher(nil, cmpNormalBytes)
+ for bp < len(b) {
+ rb.out = b[bp:]
+ if bp = decomposeSegment(&rb, bp, true); bp < 0 {
+ return false
+ }
+ bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true)
+ }
+ return true
+}
+
+func cmpNormalBytes(rb *reorderBuffer) bool {
+ b := rb.out
+ for i := 0; i < rb.nrune; i++ {
+ info := rb.rune[i]
+ if int(info.size) > len(b) {
+ return false
+ }
+ p := info.pos
+ pe := p + info.size
+ for ; p < pe; p++ {
+ if b[0] != rb.byte[p] {
+ return false
+ }
+ b = b[1:]
+ }
+ }
+ return true
+}
+
+// IsNormalString returns true if s == f(s).
+func (f Form) IsNormalString(s string) bool {
+ src := inputString(s)
+ ft := formTable[f]
+ bp, ok := ft.quickSpan(src, 0, len(s), true)
+ if ok {
+ return true
+ }
+ rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)}
+ rb.setFlusher(nil, func(rb *reorderBuffer) bool {
+ for i := 0; i < rb.nrune; i++ {
+ info := rb.rune[i]
+ if bp+int(info.size) > len(s) {
+ return false
+ }
+ p := info.pos
+ pe := p + info.size
+ for ; p < pe; p++ {
+ if s[bp] != rb.byte[p] {
+ return false
+ }
+ bp++
+ }
+ }
+ return true
+ })
+ for bp < len(s) {
+ if bp = decomposeSegment(&rb, bp, true); bp < 0 {
+ return false
+ }
+ bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true)
+ }
+ return true
+}
+
+// patchTail fixes a case where a rune may be incorrectly normalized
+// if it is followed by illegal continuation bytes. It returns the
+// patched buffer and whether the decomposition is still in progress.
+func patchTail(rb *reorderBuffer) bool {
+ info, p := lastRuneStart(&rb.f, rb.out)
+ if p == -1 || info.size == 0 {
+ return true
+ }
+ end := p + int(info.size)
+ extra := len(rb.out) - end
+ if extra > 0 {
+ // Potentially allocating memory. However, this only
+ // happens with ill-formed UTF-8.
+ x := make([]byte, 0)
+ x = append(x, rb.out[len(rb.out)-extra:]...)
+ rb.out = rb.out[:end]
+ decomposeToLastBoundary(rb)
+ rb.doFlush()
+ rb.out = append(rb.out, x...)
+ return false
+ }
+ buf := rb.out[p:]
+ rb.out = rb.out[:p]
+ decomposeToLastBoundary(rb)
+ if s := rb.ss.next(info); s == ssStarter {
+ rb.doFlush()
+ rb.ss.first(info)
+ } else if s == ssOverflow {
+ rb.doFlush()
+ rb.insertCGJ()
+ rb.ss = 0
+ }
+ rb.insertUnsafe(inputBytes(buf), 0, info)
+ return true
+}
+
+func appendQuick(rb *reorderBuffer, i int) int {
+ if rb.nsrc == i {
+ return i
+ }
+ end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true)
+ rb.out = rb.src.appendSlice(rb.out, i, end)
+ return end
+}
+
+// Append returns f(append(out, b...)).
+// The buffer out must be nil, empty, or equal to f(out).
+func (f Form) Append(out []byte, src ...byte) []byte {
+ return f.doAppend(out, inputBytes(src), len(src))
+}
+
+func (f Form) doAppend(out []byte, src input, n int) []byte {
+ if n == 0 {
+ return out
+ }
+ ft := formTable[f]
+ // Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer.
+ if len(out) == 0 {
+ p, _ := ft.quickSpan(src, 0, n, true)
+ out = src.appendSlice(out, 0, p)
+ if p == n {
+ return out
+ }
+ rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush}
+ return doAppendInner(&rb, p)
+ }
+ rb := reorderBuffer{f: *ft, src: src, nsrc: n}
+ return doAppend(&rb, out, 0)
+}
+
+func doAppend(rb *reorderBuffer, out []byte, p int) []byte {
+ rb.setFlusher(out, appendFlush)
+ src, n := rb.src, rb.nsrc
+ doMerge := len(out) > 0
+ if q := src.skipContinuationBytes(p); q > p {
+ // Move leading non-starters to destination.
+ rb.out = src.appendSlice(rb.out, p, q)
+ p = q
+ doMerge = patchTail(rb)
+ }
+ fd := &rb.f
+ if doMerge {
+ var info Properties
+ if p < n {
+ info = fd.info(src, p)
+ if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 {
+ if p == 0 {
+ decomposeToLastBoundary(rb)
+ }
+ p = decomposeSegment(rb, p, true)
+ }
+ }
+ if info.size == 0 {
+ rb.doFlush()
+ // Append incomplete UTF-8 encoding.
+ return src.appendSlice(rb.out, p, n)
+ }
+ if rb.nrune > 0 {
+ return doAppendInner(rb, p)
+ }
+ }
+ p = appendQuick(rb, p)
+ return doAppendInner(rb, p)
+}
+
+func doAppendInner(rb *reorderBuffer, p int) []byte {
+ for n := rb.nsrc; p < n; {
+ p = decomposeSegment(rb, p, true)
+ p = appendQuick(rb, p)
+ }
+ return rb.out
+}
+
+// AppendString returns f(append(out, []byte(s))).
+// The buffer out must be nil, empty, or equal to f(out).
+func (f Form) AppendString(out []byte, src string) []byte {
+ return f.doAppend(out, inputString(src), len(src))
+}
+
+// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]).
+// It is not guaranteed to return the largest such n.
+func (f Form) QuickSpan(b []byte) int {
+ n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true)
+ return n
+}
+
+// Span implements transform.SpanningTransformer. It returns a boundary n such
+// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
+func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
+ n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
+ if n < len(b) {
+ if !ok {
+ err = transform.ErrEndOfSpan
+ } else {
+ err = transform.ErrShortSrc
+ }
+ }
+ return n, err
+}
+
+// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
+// It is not guaranteed to return the largest such n.
+func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
+ n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
+ if n < len(s) {
+ if !ok {
+ err = transform.ErrEndOfSpan
+ } else {
+ err = transform.ErrShortSrc
+ }
+ }
+ return n, err
+}
+
+// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
+// whether any non-normalized parts were found. If atEOF is false, n will
+// not point past the last segment if this segment might be become
+// non-normalized by appending other runes.
+func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) {
+ var lastCC uint8
+ ss := streamSafe(0)
+ lastSegStart := i
+ for n = end; i < n; {
+ if j := src.skipASCII(i, n); i != j {
+ i = j
+ lastSegStart = i - 1
+ lastCC = 0
+ ss = 0
+ continue
+ }
+ info := f.info(src, i)
+ if info.size == 0 {
+ if atEOF {
+ // include incomplete runes
+ return n, true
+ }
+ return lastSegStart, true
+ }
+ // This block needs to be before the next, because it is possible to
+ // have an overflow for runes that are starters (e.g. with U+FF9E).
+ switch ss.next(info) {
+ case ssStarter:
+ lastSegStart = i
+ case ssOverflow:
+ return lastSegStart, false
+ case ssSuccess:
+ if lastCC > info.ccc {
+ return lastSegStart, false
+ }
+ }
+ if f.composing {
+ if !info.isYesC() {
+ break
+ }
+ } else {
+ if !info.isYesD() {
+ break
+ }
+ }
+ lastCC = info.ccc
+ i += int(info.size)
+ }
+ if i == n {
+ if !atEOF {
+ n = lastSegStart
+ }
+ return n, true
+ }
+ return lastSegStart, false
+}
+
+// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
+// It is not guaranteed to return the largest such n.
+func (f Form) QuickSpanString(s string) int {
+ n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
+ return n
+}
+
+// FirstBoundary returns the position i of the first boundary in b
+// or -1 if b contains no boundary.
+func (f Form) FirstBoundary(b []byte) int {
+ return f.firstBoundary(inputBytes(b), len(b))
+}
+
+func (f Form) firstBoundary(src input, nsrc int) int {
+ i := src.skipContinuationBytes(0)
+ if i >= nsrc {
+ return -1
+ }
+ fd := formTable[f]
+ ss := streamSafe(0)
+ // We should call ss.first here, but we can't as the first rune is
+ // skipped already. This means FirstBoundary can't really determine
+ // CGJ insertion points correctly. Luckily it doesn't have to.
+ for {
+ info := fd.info(src, i)
+ if info.size == 0 {
+ return -1
+ }
+ if s := ss.next(info); s != ssSuccess {
+ return i
+ }
+ i += int(info.size)
+ if i >= nsrc {
+ if !info.BoundaryAfter() && !ss.isMax() {
+ return -1
+ }
+ return nsrc
+ }
+ }
+}
+
+// FirstBoundaryInString returns the position i of the first boundary in s
+// or -1 if s contains no boundary.
+func (f Form) FirstBoundaryInString(s string) int {
+ return f.firstBoundary(inputString(s), len(s))
+}
+
+// NextBoundary reports the index of the boundary between the first and next
+// segment in b or -1 if atEOF is false and there are not enough bytes to
+// determine this boundary.
+func (f Form) NextBoundary(b []byte, atEOF bool) int {
+ return f.nextBoundary(inputBytes(b), len(b), atEOF)
+}
+
+// NextBoundaryInString reports the index of the boundary between the first and
+// next segment in b or -1 if atEOF is false and there are not enough bytes to
+// determine this boundary.
+func (f Form) NextBoundaryInString(s string, atEOF bool) int {
+ return f.nextBoundary(inputString(s), len(s), atEOF)
+}
+
+func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
+ if nsrc == 0 {
+ if atEOF {
+ return 0
+ }
+ return -1
+ }
+ fd := formTable[f]
+ info := fd.info(src, 0)
+ if info.size == 0 {
+ if atEOF {
+ return 1
+ }
+ return -1
+ }
+ ss := streamSafe(0)
+ ss.first(info)
+
+ for i := int(info.size); i < nsrc; i += int(info.size) {
+ info = fd.info(src, i)
+ if info.size == 0 {
+ if atEOF {
+ return i
+ }
+ return -1
+ }
+ // TODO: Using streamSafe to determine the boundary isn't the same as
+ // using BoundaryBefore. Determine which should be used.
+ if s := ss.next(info); s != ssSuccess {
+ return i
+ }
+ }
+ if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
+ return -1
+ }
+ return nsrc
+}
+
+// LastBoundary returns the position i of the last boundary in b
+// or -1 if b contains no boundary.
+func (f Form) LastBoundary(b []byte) int {
+ return lastBoundary(formTable[f], b)
+}
+
+func lastBoundary(fd *formInfo, b []byte) int {
+ i := len(b)
+ info, p := lastRuneStart(fd, b)
+ if p == -1 {
+ return -1
+ }
+ if info.size == 0 { // ends with incomplete rune
+ if p == 0 { // starts with incomplete rune
+ return -1
+ }
+ i = p
+ info, p = lastRuneStart(fd, b[:i])
+ if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter
+ return i
+ }
+ }
+ if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
+ return i
+ }
+ if info.BoundaryAfter() {
+ return i
+ }
+ ss := streamSafe(0)
+ v := ss.backwards(info)
+ for i = p; i >= 0 && v != ssStarter; i = p {
+ info, p = lastRuneStart(fd, b[:i])
+ if v = ss.backwards(info); v == ssOverflow {
+ break
+ }
+ if p+int(info.size) != i {
+ if p == -1 { // no boundary found
+ return -1
+ }
+ return i // boundary after an illegal UTF-8 encoding
+ }
+ }
+ return i
+}
+
+// decomposeSegment scans the first segment in src into rb. It inserts 0x034f
+// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters
+// and returns the number of bytes consumed from src or iShortDst or iShortSrc.
+func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
+ // Force one character to be consumed.
+ info := rb.f.info(rb.src, sp)
+ if info.size == 0 {
+ return 0
+ }
+ if s := rb.ss.next(info); s == ssStarter {
+ // TODO: this could be removed if we don't support merging.
+ if rb.nrune > 0 {
+ goto end
+ }
+ } else if s == ssOverflow {
+ rb.insertCGJ()
+ goto end
+ }
+ if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
+ return int(err)
+ }
+ for {
+ sp += int(info.size)
+ if sp >= rb.nsrc {
+ if !atEOF && !info.BoundaryAfter() {
+ return int(iShortSrc)
+ }
+ break
+ }
+ info = rb.f.info(rb.src, sp)
+ if info.size == 0 {
+ if !atEOF {
+ return int(iShortSrc)
+ }
+ break
+ }
+ if s := rb.ss.next(info); s == ssStarter {
+ break
+ } else if s == ssOverflow {
+ rb.insertCGJ()
+ break
+ }
+ if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
+ return int(err)
+ }
+ }
+end:
+ if !rb.doFlush() {
+ return int(iShortDst)
+ }
+ return sp
+}
+
+// lastRuneStart returns the runeInfo and position of the last
+// rune in buf or the zero runeInfo and -1 if no rune was found.
+func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) {
+ p := len(buf) - 1
+ for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
+ }
+ if p < 0 {
+ return Properties{}, -1
+ }
+ return fd.info(inputBytes(buf), p), p
+}
+
+// decomposeToLastBoundary finds an open segment at the end of the buffer
+// and scans it into rb. Returns the buffer minus the last segment.
+func decomposeToLastBoundary(rb *reorderBuffer) {
+ fd := &rb.f
+ info, i := lastRuneStart(fd, rb.out)
+ if int(info.size) != len(rb.out)-i {
+ // illegal trailing continuation bytes
+ return
+ }
+ if info.BoundaryAfter() {
+ return
+ }
+ var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order
+ padd := 0
+ ss := streamSafe(0)
+ p := len(rb.out)
+ for {
+ add[padd] = info
+ v := ss.backwards(info)
+ if v == ssOverflow {
+ // Note that if we have an overflow, it the string we are appending to
+ // is not correctly normalized. In this case the behavior is undefined.
+ break
+ }
+ padd++
+ p -= int(info.size)
+ if v == ssStarter || p < 0 {
+ break
+ }
+ info, i = lastRuneStart(fd, rb.out[:p])
+ if int(info.size) != p-i {
+ break
+ }
+ }
+ rb.ss = ss
+ // Copy bytes for insertion as we may need to overwrite rb.out.
+ var buf [maxBufferSize * utf8.UTFMax]byte
+ cp := buf[:copy(buf[:], rb.out[p:])]
+ rb.out = rb.out[:p]
+ for padd--; padd >= 0; padd-- {
+ info = add[padd]
+ rb.insertUnsafe(inputBytes(cp), 0, info)
+ cp = cp[info.size:]
+ }
+}
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/readwriter.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/readwriter.go
index b38096f5ca..b38096f5ca 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/readwriter.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/readwriter.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
index 96a130d30e..96a130d30e 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/transform.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/transform.go
index a1d366ae48..a1d366ae48 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/transform.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/transform.go
diff --git a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/trie.go b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/trie.go
index 423386bf43..423386bf43 100644
--- a/contrib/go/_std_1.18/src/vendor/golang.org/x/text/unicode/norm/trie.go
+++ b/contrib/go/_std_1.19/src/vendor/golang.org/x/text/unicode/norm/trie.go